diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/.idea/DAMN.iml b/.idea/DAMN.iml new file mode 100644 index 0000000..c956989 --- /dev/null +++ b/.idea/DAMN.iml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..889dfab --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..94a25f7 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/.idea/workspace.xml b/.idea/workspace.xml new file mode 100644 index 0000000..402018b --- /dev/null +++ b/.idea/workspace.xml @@ -0,0 +1,166 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + true + DEFINITION_ORDER + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/angular-src/src/app/admin/components/modal/modal.component.spec.ts b/angular-src/src/app/admin/components/modal/modal.component.spec.ts new file mode 100644 index 0000000..fc32a90 --- /dev/null +++ b/angular-src/src/app/admin/components/modal/modal.component.spec.ts @@ -0,0 +1,25 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; + +import { ModalComponent } from './modal.component'; + +describe('ModalComponent', () => { + let component: ModalComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + declarations: [ ModalComponent ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(ModalComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/angular-src/src/app/admin/components/modal/modal.component.ts b/angular-src/src/app/admin/components/modal/modal.component.ts new file mode 100644 index 0000000..e17a3b8 --- /dev/null +++ b/angular-src/src/app/admin/components/modal/modal.component.ts @@ -0,0 +1,82 @@ +import { Component, OnInit } from '@angular/core'; +import { AdminDataService } from '../../services/admin-data.service'; +import { ModalData } from '../../app.interfaces'; +import { Subscription } from 'rxjs/Subscription'; +import { FormGroup, FormControl } from '@angular/forms'; + +@Component({ + selector: 'admin-modal', + templateUrl: './modal.component.html', + styleUrls: ['./modal.component.css'] +}) +export class ModalComponent implements OnInit { + + modalActive: boolean; + modalData: ModalData; + subscription: Subscription; + ipForm: FormGroup; + ipValid: boolean; + ip: string; + + constructor( + private adminDataService: AdminDataService + ) { } + + ngOnInit() { + this.modalData = this.adminDataService.modalData; + this.subscription = this.adminDataService.getModalData().subscribe(() => { + this.modalData = this.adminDataService.modalData; + this.modalActive = true; + }); + this.ipForm = new FormGroup({ + ip: new FormControl() + }); + this.modalActive = false; + this.ip = ''; + } + + closeModal() { + this.modalActive = false; + this.ip = ''; + } + + revoke(id, index) { + this.adminDataService.revokeAccessPrivilege(id, index); + this.closeModal(); + } + + reject(id, index) { + this.adminDataService.rejectAccessRequest(id, index); + this.closeModal(); + } + + accept(ip, id, index) { + if(this.ipValid) { + this.adminDataService.acceptAccessRequest(ip, id, index); + this.closeModal(); + } else + this.adminDataService.sendAlertDanger('Enter a valid IP or domain'); + } + + ipInputChanged(value) { + if(this.validateIPAddress(value) || this.validateDomain(value)) + this.ipValid = true; + else + this.ipValid = false; + } + + validateIPAddress(ipaddress) { + if (/^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/.test(ipaddress)) { + return true; + } + return false; + } + + validateDomain(domain) { + if (/^[a-zA-Z0-9][a-zA-Z0-9-]{0,61}[a-zA-Z0-9](?:\.[a-zA-Z]{2,})+$/.test(domain)) { + return true; + } + return false; + } + +} diff --git a/angular-src/src/app/admin/components/navbar/navbar.component.css b/angular-src/src/app/admin/components/navbar/navbar.component.css new file mode 100644 index 0000000..a93d36d --- /dev/null +++ b/angular-src/src/app/admin/components/navbar/navbar.component.css @@ -0,0 +1,137 @@ +#burger-container { + display: none; + position: absolute; + left: 20px; + top: 30px; +} + +#burger { + cursor: pointer; + display: block; +} + +#burger span { + background: #fff; + display: block; + width: 27px; + height: 3px; + margin-bottom: 6px; + position: relative; + top: 0; + transition: all ease-in-out 0.4s; + border-radius: 1px; +} + +#burger-container.open span:nth-child(2) { + width: 0; + opacity: 0; +} + +#burger-container.open span:nth-child(3) { + transform: rotate(45deg); + top: -9px; +} + +#burger-container.open span:nth-child(1) { + transform: rotate(-45deg); + top: 9px; +} + +@media only screen and (max-width: 1050px) { + + #burger-container { + display: block; + } + +} + +.nav { + display: flex; + justify-content: flex-start; + padding: 45px 45px 0 10%; + margin: 0 auto 50px; +} + +.nav-links { + padding: 10px 30px; + cursor: pointer; + color: #aaa; + border-bottom: solid #333; + font-size: 1.15rem; + margin-right: 5px; + font-weight: bold; +} + +.active { + border-bottom: solid #aaa; +} + +.logout-container { + display: flex; + align-items: center; + margin-left: auto; +} + +.logout-button { + padding: 10px 30px; + border-style: none; + background-color: #aaa; + color: #333; + font-size: 1.1rem; + border-radius: 2px; + cursor: pointer; +} + +.home-icon { + color: #bbb; + font-size: 2rem; + cursor: pointer; + margin: 10px 20px 0 0; +} + +.disabled { + display: none; +} + +.burger-no-network { + top: 70px !important; +} + +@media only screen and (max-width: 1050px) { + + .nav { + flex-direction: column; + justify-content: flex-end; + border-bottom: solid thin #333; + background-color: #333; + padding: 0 10%; + margin: 0; + overflow: hidden; + height: 330px; + transition: height 0.8s; + } + + .navClosed { + height: 0; + overflow: hidden; + transition: height 0.8s; + } + + .logout-button { + font-size: 1.15rem; + font-weight: bolder; + width: 125px; + } + + .logout-container { + min-height: 90px; + box-sizing: border-box; + padding: 20px 0 30px; + width: 100%; + } + + .home-icon { + margin: 0 20px 0 0; + } + +} diff --git a/angular-src/src/app/admin/components/navbar/navbar.component.html b/angular-src/src/app/admin/components/navbar/navbar.component.html new file mode 100644 index 0000000..74f2a9a --- /dev/null +++ b/angular-src/src/app/admin/components/navbar/navbar.component.html @@ -0,0 +1,17 @@ +
+
+   +   +   +
+
+ \ No newline at end of file diff --git a/angular-src/src/app/admin/components/navbar/navbar.component.spec.ts b/angular-src/src/app/admin/components/navbar/navbar.component.spec.ts new file mode 100644 index 0000000..9032ad2 --- /dev/null +++ b/angular-src/src/app/admin/components/navbar/navbar.component.spec.ts @@ -0,0 +1,25 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; + +import { NavbarComponent } from './navbar.component'; + +describe('NavbarComponent', () => { + let component: NavbarComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + declarations: [ NavbarComponent ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(NavbarComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/angular-src/src/app/admin/components/navbar/navbar.component.ts b/angular-src/src/app/admin/components/navbar/navbar.component.ts new file mode 100644 index 0000000..29510fa --- /dev/null +++ b/angular-src/src/app/admin/components/navbar/navbar.component.ts @@ -0,0 +1,41 @@ +import { Component, OnInit } from '@angular/core'; + +import { NetworkStatusService } from '../../services/network-status.service'; +import { Subscription } from 'rxjs/Subscription'; + +@Component({ + selector: 'admin-navbar', + templateUrl: './navbar.component.html', + styleUrls: ['./navbar.component.css'] +}) +export class NavbarComponent implements OnInit { + + navOpen: boolean; + online: boolean; + subscription: Subscription; + + constructor( + private networkStatusService: NetworkStatusService + ) { } + + ngOnInit() { + this.online = navigator.onLine; + this.navOpen = false; + this.subscription = this.networkStatusService.getStatus().subscribe(() => { + this.online = this.networkStatusService.online; + }); + } + + toggleNav() { + this.navOpen = !this.navOpen; + } + + closeNav() { + this.navOpen = false; + } + + openNav() { + this.navOpen = true; + } + +} \ No newline at end of file diff --git a/angular-src/src/app/admin/components/snackbar/snackbar.component.css b/angular-src/src/app/admin/components/snackbar/snackbar.component.css new file mode 100644 index 0000000..2582a3e --- /dev/null +++ b/angular-src/src/app/admin/components/snackbar/snackbar.component.css @@ -0,0 +1,49 @@ +#snackbar { + visibility: hidden; + min-width: 250px; + background-color: #AAAAAA; + color: #333333; + text-align: center; + border-radius: 2px; + padding: 16px; + position: fixed; + z-index: 3; + left: 50%; + transform: translateX(-50%); + -webkit-transform: translateX(-50%); + -moz-transform: translateX(-50%); + bottom: 30px; + font-size: 17px; +} + +#snackbar.danger { + background-color: #C9302C; + font-weight: bolder; + color: #eee; +} + +#snackbar.show { + visibility: visible; + -webkit-animation: fadein 0.5s, fadeout 0.5s 2.5s; + animation: fadein 0.5s, fadeout 0.5s 2.5s; +} + +@-webkit-keyframes fadein { + from {bottom: 0; opacity: 0;} + to {bottom: 30px; opacity: 1;} +} + +@keyframes fadein { + from {bottom: 0; opacity: 0;} + to {bottom: 30px; opacity: 1;} +} + +@-webkit-keyframes fadeout { + from {bottom: 30px; opacity: 1;} + to {bottom: 0; opacity: 0;} +} + +@keyframes fadeout { + from {bottom: 30px; opacity: 1;} + to {bottom: 0; opacity: 0;} +} \ No newline at end of file diff --git a/angular-src/src/app/admin/components/snackbar/snackbar.component.html b/angular-src/src/app/admin/components/snackbar/snackbar.component.html new file mode 100644 index 0000000..4c5d4db --- /dev/null +++ b/angular-src/src/app/admin/components/snackbar/snackbar.component.html @@ -0,0 +1 @@ +
{{ message }}
\ No newline at end of file diff --git a/angular-src/src/app/admin/components/snackbar/snackbar.component.spec.ts b/angular-src/src/app/admin/components/snackbar/snackbar.component.spec.ts new file mode 100644 index 0000000..664d327 --- /dev/null +++ b/angular-src/src/app/admin/components/snackbar/snackbar.component.spec.ts @@ -0,0 +1,25 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; + +import { SnackbarComponent } from './snackbar.component'; + +describe('SnackbarComponent', () => { + let component: SnackbarComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + declarations: [ SnackbarComponent ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(SnackbarComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/angular-src/src/app/admin/components/snackbar/snackbar.component.ts b/angular-src/src/app/admin/components/snackbar/snackbar.component.ts new file mode 100644 index 0000000..e29721e --- /dev/null +++ b/angular-src/src/app/admin/components/snackbar/snackbar.component.ts @@ -0,0 +1,37 @@ +import { Component, OnInit } from '@angular/core'; +import { AdminDataService } from '../../services/admin-data.service'; +import { Subscription } from 'rxjs/Subscription'; + +@Component({ + selector: 'admin-snackbar', + templateUrl: './snackbar.component.html', + styleUrls: ['./snackbar.component.css'] +}) +export class SnackbarComponent implements OnInit { + + message: string; + type: boolean; + subscription: Subscription; + show: boolean; + + constructor( + private adminDataService: AdminDataService + ) { } + + ngOnInit() { + this.subscription = this.adminDataService.getSnackbarData().subscribe(() => { + this.message = this.adminDataService.snackbarMessage; + this.type = this.adminDataService.snackbarType; + this.show = false; + this.showAlert(); + }); + } + + showAlert() { + this.show = true; + setTimeout(() => { + this.show = false; + }, 3000); + } + +} diff --git a/angular-src/src/app/admin/services/admin-data.service.spec.ts b/angular-src/src/app/admin/services/admin-data.service.spec.ts new file mode 100644 index 0000000..c84a3fb --- /dev/null +++ b/angular-src/src/app/admin/services/admin-data.service.spec.ts @@ -0,0 +1,15 @@ +import { TestBed, inject } from '@angular/core/testing'; + +import { AdminDataService } from './admin-data.service'; + +describe('AdminDataService', () => { + beforeEach(() => { + TestBed.configureTestingModule({ + providers: [AdminDataService] + }); + }); + + it('should be created', inject([AdminDataService], (service: AdminDataService) => { + expect(service).toBeTruthy(); + })); +}); diff --git a/angular-src/src/app/admin/services/admin-data.service.ts b/angular-src/src/app/admin/services/admin-data.service.ts new file mode 100644 index 0000000..70283e6 --- /dev/null +++ b/angular-src/src/app/admin/services/admin-data.service.ts @@ -0,0 +1,228 @@ +import { Injectable } from '@angular/core'; +import { Http, Headers } from '@angular/http'; +import { Access, Admin, AccessRequest, AdminRequest, ModalData } from '../app.interfaces'; +import { Observable } from 'rxjs'; +import { Subject } from 'rxjs/Subject'; +import 'rxjs/add/operator/map'; + +@Injectable() +export class AdminDataService { + + accesses: Access[]; + admins: Admin[]; + accessRequests: AccessRequest[]; + adminRequests: AdminRequest[]; + modalData: ModalData; + snackbarMessage: string; + snackbarType: boolean; + heading: string; + + private infoSubject = new Subject(); + private modalSubject = new Subject(); + private snackbarSubject = new Subject(); + private headingSubject = new Subject(); + + constructor(private http: Http) { } + + private sendInfo() { + this.infoSubject.next(); + } + + getInfo(): Observable { + return this.infoSubject.asObservable(); + } + + getAccesses() { + let headers = new Headers(); + headers.append('Content-type', 'application/json'); + return this.http.get('/admin/getAccesses', { headers: headers, withCredentials: true }) + .map(res => res.json()).subscribe(data => { + if(data.success) { + this.accesses = JSON.parse(data.message); + this.sendInfo(); + } else { + this.sendAlertDanger(data.message); + } + }); + } + + getAdmins() { + let headers = new Headers(); + headers.append('Content-type', 'application/json'); + return this.http.get('/admin/getAdmins', { headers: headers, withCredentials: true }) + .map(res => res.json()).subscribe(data => { + if(data.success) { + this.admins = JSON.parse(data.message); + this.sendInfo(); + } else { + this.sendAlertDanger(data.message); + } + }); + } + + getAccessRequests() { + let headers = new Headers(); + headers.append('Content-type', 'application/json'); + return this.http.get('/admin/getAccessRequests', { headers: headers, withCredentials: true }) + .map(res => res.json()).subscribe(data => { + if(data.success) { + this.accessRequests = JSON.parse(data.message); + this.sendInfo(); + } else { + this.sendAlertDanger(data.message); + } + }); + } + + getAdminRequests() { + let headers = new Headers(); + headers.append('Content-type', 'application/json'); + return this.http.get('/admin/getAdminRequests', { headers: headers, withCredentials: true }) + .map(res => res.json()).subscribe(data => { + if(data.success) { + this.adminRequests = JSON.parse(data.message); + this.sendInfo(); + } else { + this.sendAlertDanger(data.message); + } + }); + } + + changeHeading(heading) { + this.heading = heading; + this.sendHeadingData(); + } + + private sendHeadingData() { + this.headingSubject.next(); + } + + getHeadingData(): Observable { + return this.headingSubject.asObservable(); + } + + openModal(data, modalfor, index) { + this.modalData = { ...data, id: !!data.access_id ? data.access_id : data.access_request_id, modalfor, index}; + this.sendModalData(); + } + + private sendModalData() { + this.modalSubject.next(); + } + + getModalData(): Observable { + return this.modalSubject.asObservable(); + } + + sendAlert(message) { + this.snackbarMessage = message; + this.snackbarType = true; + this.sendSnackbarData(); + } + + sendAlertDanger(message) { + this.snackbarMessage = message; + this.snackbarType = false; + this.sendSnackbarData(); + } + + private sendSnackbarData() { + this.snackbarSubject.next(); + } + + getSnackbarData(): Observable { + return this.snackbarSubject.asObservable(); + } + + acceptAdminRequests(id, index) { + if(!navigator.onLine) + this.sendAlert('Request will be accepted'); + let headers = new Headers(); + headers.append('Content-type', 'application/json'); + this.http.get('/admin/acceptAdminRequest/' + id, { headers: headers, withCredentials: true }) + .map(res => res.json()).subscribe(data => { + if(data.success) { + this.adminRequests.splice(index, 1); + this.sendAlert(data.message); + } else + this.sendAlertDanger(data.message); + }); + } + + rejectAdminRequests(id, index) { + if(!navigator.onLine) + this.sendAlert('Request will be rejected'); + let headers = new Headers(); + headers.append('Content-type', 'application/json'); + this.http.get('/admin/rejectAdminRequest/' + id, { headers: headers, withCredentials: true }) + .map(res => res.json()).subscribe(data => { + if(data.success) { + this.adminRequests.splice(index, 1); + this.sendAlert(data.message); + } else + this.sendAlertDanger(data.message); + }); + } + + revokeAdminPrivilege(id, index) { + if(!navigator.onLine) + this.sendAlert('Privileges will be revoked'); + let headers = new Headers(); + headers.append('Content-type', 'application/json'); + this.http.get('/admin/revokeAdminPrivilege/' + id, { headers: headers, withCredentials: true }) + .map(res => res.json()).subscribe(data => { + if(data.success) { + this.admins.splice(index, 1); + this.sendAlert(data.message); + } else + this.sendAlertDanger(data.message); + }); + } + + revokeAccessPrivilege(id, index) { + if(!navigator.onLine) + this.sendAlert('Access will be revoked'); + let headers = new Headers(); + headers.append('Content-type', 'application/json'); + this.http.get('/admin/revokeAccessPrivilege/' + id, { headers: headers, withCredentials: true }) + .map(res => res.json()).subscribe(data => { + if(data.success) { + console.log('asd'); + this.accesses.splice(index, 1); + this.sendAlert(data.message); + } else + this.sendAlertDanger(data.message); + }); + } + + rejectAccessRequest(id, index) { + if(!navigator.onLine) + this.sendAlert('Request will be rejected'); + let headers = new Headers(); + headers.append('Content-type', 'application/json'); + this.http.get('/admin/rejectAccessRequest/' + id, { headers: headers, withCredentials: true }) + .map(res => res.json()).subscribe(data => { + if(data.success) { + this.accessRequests.splice(index, 1); + this.sendAlert(data.message); + } else + this.sendAlertDanger(data.message); + }); + } + + acceptAccessRequest(ip, id, index) { + if(!navigator.onLine) + this.sendAlert('Request will be accepted'); + let headers = new Headers(); + headers.append('Content-type', 'application/json'); + this.http.post('/admin/acceptAccessRequest/' + id, ip, { headers: headers, withCredentials: true }) + .map(res => res.json()).subscribe(data => { + if(data.success) { + this.accessRequests.splice(index, 1); + this.sendAlert(data.message); + } else + this.sendAlertDanger(data.message); + }); + } + +} diff --git a/angular-src/src/app/admin/services/network-status.service.spec.ts b/angular-src/src/app/admin/services/network-status.service.spec.ts new file mode 100644 index 0000000..6e9a60e --- /dev/null +++ b/angular-src/src/app/admin/services/network-status.service.spec.ts @@ -0,0 +1,15 @@ +import { TestBed, inject } from '@angular/core/testing'; + +import { NetworkStatusService } from './network-status.service'; + +describe('NetworkStatusService', () => { + beforeEach(() => { + TestBed.configureTestingModule({ + providers: [NetworkStatusService] + }); + }); + + it('should be created', inject([NetworkStatusService], (service: NetworkStatusService) => { + expect(service).toBeTruthy(); + })); +}); diff --git a/angular-src/src/app/admin/services/network-status.service.ts b/angular-src/src/app/admin/services/network-status.service.ts new file mode 100644 index 0000000..f084184 --- /dev/null +++ b/angular-src/src/app/admin/services/network-status.service.ts @@ -0,0 +1,26 @@ +import { Injectable } from '@angular/core'; +import { Observable } from 'rxjs'; +import { Subject } from 'rxjs/Subject'; + +@Injectable() +export class NetworkStatusService { + + online: boolean; + private statusSubject = new Subject(); + + constructor() { } + + setStatus(status) { + this.online = status; + this.sendStatus(); + } + + private sendStatus() { + this.statusSubject.next(); + } + + getStatus(): Observable { + return this.statusSubject.asObservable(); + } + +} diff --git a/angular-src/src/app/user/app.component.css b/angular-src/src/app/user/app.component.css new file mode 100644 index 0000000..e69de29 diff --git a/angular-src/src/app/user/app.component.html b/angular-src/src/app/user/app.component.html new file mode 100644 index 0000000..84dddf2 --- /dev/null +++ b/angular-src/src/app/user/app.component.html @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/angular-src/src/app/user/app.component.spec.ts b/angular-src/src/app/user/app.component.spec.ts new file mode 100644 index 0000000..bcbdf36 --- /dev/null +++ b/angular-src/src/app/user/app.component.spec.ts @@ -0,0 +1,27 @@ +import { TestBed, async } from '@angular/core/testing'; +import { AppComponent } from './app.component'; +describe('AppComponent', () => { + beforeEach(async(() => { + TestBed.configureTestingModule({ + declarations: [ + AppComponent + ], + }).compileComponents(); + })); + it('should create the app', async(() => { + const fixture = TestBed.createComponent(AppComponent); + const app = fixture.debugElement.componentInstance; + expect(app).toBeTruthy(); + })); + it(`should have as title 'app'`, async(() => { + const fixture = TestBed.createComponent(AppComponent); + const app = fixture.debugElement.componentInstance; + expect(app.title).toEqual('app'); + })); + it('should render title in a h1 tag', async(() => { + const fixture = TestBed.createComponent(AppComponent); + fixture.detectChanges(); + const compiled = fixture.debugElement.nativeElement; + expect(compiled.querySelector('h1').textContent).toContain('Welcome to app!'); + })); +}); diff --git a/angular-src/src/app/user/app.component.ts b/angular-src/src/app/user/app.component.ts new file mode 100644 index 0000000..8045ea9 --- /dev/null +++ b/angular-src/src/app/user/app.component.ts @@ -0,0 +1,10 @@ +import { Component } from '@angular/core'; + +@Component({ + selector: 'app-root', + templateUrl: './app.component.html', + styleUrls: ['./app.component.css'] +}) +export class AppComponent { + title = 'user'; +} diff --git a/angular-src/src/app/user/app.module.ts b/angular-src/src/app/user/app.module.ts new file mode 100644 index 0000000..51aff62 --- /dev/null +++ b/angular-src/src/app/user/app.module.ts @@ -0,0 +1,44 @@ +import { BrowserModule } from '@angular/platform-browser'; +import { NgModule } from '@angular/core'; +import { HttpModule } from '@angular/http'; +import { CookieService } from 'ngx-cookie-service'; +import { ReactiveFormsModule } from '@angular/forms'; + +import { routing } from './app.routing'; + +import { AppComponent } from './app.component'; +import { LoginComponent } from './components/login/login.component'; +import { DamnComponent } from './components/damn/damn.component'; +import { SnackbarComponent } from './components/snackbar/snackbar.component'; +import { IndicatorComponent } from './components/indicator/indicator.component'; + +import { AuthService } from './services/auth.service'; +import { UserDataService } from './services/user-data.service'; +import { NetworkStatusService } from './services/network-status.service'; + +import { AuthGuard } from './guards/auth.guard'; + +@NgModule({ + declarations: [ + AppComponent, + LoginComponent, + DamnComponent, + SnackbarComponent, + IndicatorComponent, + ], + imports: [ + routing, + BrowserModule, + HttpModule, + ReactiveFormsModule + ], + providers: [ + CookieService, + AuthService, + AuthGuard, + UserDataService, + NetworkStatusService + ], + bootstrap: [AppComponent] +}) +export class AppModule { } diff --git a/angular-src/src/app/user/app.routing.ts b/angular-src/src/app/user/app.routing.ts new file mode 100755 index 0000000..a48acdb --- /dev/null +++ b/angular-src/src/app/user/app.routing.ts @@ -0,0 +1,25 @@ +import { ModuleWithProviders } from '@angular/core'; +import { Routes, RouterModule } from '@angular/router'; + +import { LoginComponent } from './components/login/login.component'; +import { DamnComponent } from './components/damn/damn.component'; + +import { AuthGuard } from './guards/auth.guard'; + +const appRoutes: Routes = [ + { + path: '', + component: DamnComponent, + canActivate: [AuthGuard] + }, + { + path: 'login', + component: LoginComponent + }, + { + path: '**', + redirectTo: '' + } +] + +export const routing: ModuleWithProviders = RouterModule.forRoot(appRoutes); \ No newline at end of file diff --git a/angular-src/src/app/user/components/damn/damn.component.css b/angular-src/src/app/user/components/damn/damn.component.css new file mode 100644 index 0000000..f043e66 --- /dev/null +++ b/angular-src/src/app/user/components/damn/damn.component.css @@ -0,0 +1,75 @@ +.logout-btn { + position: absolute; + top: 30px; + right: 40px; + text-decoration: none; + color: #fff; + background-color: #2551F8; + padding: 12px 22px; + border-radius: 5px; + box-shadow: 0px 3px 10px 1px #ccc; + font-weight: bold; +} +.admin-btn { + position: absolute; + top: 30px; + left: 40px; + text-decoration: none; + color: #fff; + background-color: #333; + padding: 12px 22px; + border-radius: 5px; + box-shadow: 0px 3px 10px 1px #ccc; +} + +.form-container { + margin: 100px auto 0; + width: 700px; +} +.access-req-textarea { + width: 100%; + height: 100px; + margin-bottom: 20px; + outline: none; + border: solid thin #ddd; + box-shadow: 0px 3px 10px 1px #ccc; + padding: 10px; + border-radius: 3px; + font-size: 1rem; +} +#ssh-key { + height: 150px; +} +.btn { + border: none; + margin: 10px 5px; + border-radius: 3px; + color: #fff; + background-color: #222; + font-size: 1rem; + padding: 12px 22px; + cursor: pointer; + box-shadow: 0px 3px 10px 1px #ccc; +} + +.disabled { + display: none; +} + +.admin-btn-no-network { + top: 60px !important; +} + +@media only screen and (max-width: 700px) { + .form-container { + width: 95%; + } + .logout-btn { + top: 15px; + right: 10px; + } + .admin-btn { + top: 15px; + left: 10px; + } +} \ No newline at end of file diff --git a/angular-src/src/app/user/components/damn/damn.component.html b/angular-src/src/app/user/components/damn/damn.component.html new file mode 100644 index 0000000..d2d9e2f --- /dev/null +++ b/angular-src/src/app/user/components/damn/damn.component.html @@ -0,0 +1,14 @@ +Logout +Admin +
+
+
+ +
+ +
+ +
+ +
+
\ No newline at end of file diff --git a/angular-src/src/app/user/components/damn/damn.component.spec.ts b/angular-src/src/app/user/components/damn/damn.component.spec.ts new file mode 100644 index 0000000..f226e85 --- /dev/null +++ b/angular-src/src/app/user/components/damn/damn.component.spec.ts @@ -0,0 +1,25 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; + +import { DamnComponent } from './damn.component'; + +describe('DamnComponent', () => { + let component: DamnComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + declarations: [ DamnComponent ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(DamnComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/angular-src/src/app/user/components/damn/damn.component.ts b/angular-src/src/app/user/components/damn/damn.component.ts new file mode 100644 index 0000000..2c40431 --- /dev/null +++ b/angular-src/src/app/user/components/damn/damn.component.ts @@ -0,0 +1,62 @@ +import { Component, OnInit } from '@angular/core'; +import { FormGroup, FormControl, Validators } from '@angular/forms'; + +import { NetworkStatusService } from '../../services/network-status.service'; +import { UserDataService } from '../../services/user-data.service'; +import { Subscription } from 'rxjs/Subscription'; + +@Component({ + selector: 'app-damn', + templateUrl: './damn.component.html', + styleUrls: ['./damn.component.css'] +}) +export class DamnComponent implements OnInit { + + accessRequestForm: FormGroup; + message: string; + ssh_key: string; + online: boolean; + subscription: Subscription; + + constructor( + private userDataService: UserDataService, + private networkStatusService: NetworkStatusService + ) { } + + ngOnInit() { + this.online = navigator.onLine; + this.accessRequestForm = new FormGroup({ + message: new FormControl('', [Validators.maxLength(250)]), + ssh_key: new FormControl('', [Validators.required]) + }); + this.subscription = this.networkStatusService.getStatus().subscribe(() => { + this.online = this.networkStatusService.online; + }); + } + + makeAccessRequest(requestData) { + if(this.isValid(requestData.ssh_key)) + this.userDataService.makeAccessRequest(requestData).subscribe(data => { + if(data.success) { + this.message = ''; + this.ssh_key = ''; + this.userDataService.sendAlert(data.message); + } else + this.userDataService.sendAlertDanger(data.message); + }); + else + this.userDataService.sendAlertDanger('Invalid SSH key'); + } + + isValid(ssh_key) { + if(/^ssh-rsa AAAA[0-9A-Za-z+\/]+[=]{0,3}( [^@]+@[^@]+)?$/.test(ssh_key)) + return true; + else + return false; + } + + makeAdminRequest() { + this.userDataService.makeAdminRequest(); + } + +} diff --git a/angular-src/src/app/user/components/indicator/indicator.component.css b/angular-src/src/app/user/components/indicator/indicator.component.css new file mode 100644 index 0000000..e5db382 --- /dev/null +++ b/angular-src/src/app/user/components/indicator/indicator.component.css @@ -0,0 +1,5 @@ +.indicator { + background-color: #777; + color: #fff; + padding: 10px; +} \ No newline at end of file diff --git a/angular-src/src/app/user/components/indicator/indicator.component.html b/angular-src/src/app/user/components/indicator/indicator.component.html new file mode 100644 index 0000000..c7895ee --- /dev/null +++ b/angular-src/src/app/user/components/indicator/indicator.component.html @@ -0,0 +1 @@ +
You are working offline
\ No newline at end of file diff --git a/angular-src/src/app/user/components/indicator/indicator.component.spec.ts b/angular-src/src/app/user/components/indicator/indicator.component.spec.ts new file mode 100644 index 0000000..d62d699 --- /dev/null +++ b/angular-src/src/app/user/components/indicator/indicator.component.spec.ts @@ -0,0 +1,25 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; + +import { IndicatorComponent } from './indicator.component'; + +describe('IndicatorComponent', () => { + let component: IndicatorComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + declarations: [ IndicatorComponent ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(IndicatorComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/angular-src/src/app/user/components/indicator/indicator.component.ts b/angular-src/src/app/user/components/indicator/indicator.component.ts new file mode 100644 index 0000000..1037b81 --- /dev/null +++ b/angular-src/src/app/user/components/indicator/indicator.component.ts @@ -0,0 +1,35 @@ +import { Component, OnInit } from '@angular/core'; + +import { NetworkStatusService } from '../../services/network-status.service'; +import { Subscription } from 'rxjs/Subscription'; + +@Component({ + selector: 'app-indicator', + templateUrl: './indicator.component.html', + styleUrls: ['./indicator.component.css'] +}) +export class IndicatorComponent implements OnInit { + + online: boolean; + subscription: Subscription; + + constructor( + private networkStatusService: NetworkStatusService + ) { } + + ngOnInit() { + this.online = navigator.onLine; + window.addEventListener('online', () => { + this.networkStatusService.setStatus(true); + if(navigator.serviceWorker.controller) + navigator.serviceWorker.controller.postMessage({ + command: 'online' + }); + }); + window.addEventListener('offline', () => {this.networkStatusService.setStatus(false)}); + this.subscription = this.networkStatusService.getStatus().subscribe(() => { + this.online = this.networkStatusService.online; + }); + } + +} diff --git a/angular-src/src/app/user/components/login/login.component.css b/angular-src/src/app/user/components/login/login.component.css new file mode 100644 index 0000000..50e039c --- /dev/null +++ b/angular-src/src/app/user/components/login/login.component.css @@ -0,0 +1,40 @@ +.container { + width: 100%; + height: 100vh; + display: flex; + justify-content: center; + align-items: center; +} + +.login-btn { + font-size: 1.8rem; + padding: 12px 55px; + color: #fff; + border-radius: 3px; + background-color: #111; +} + +.title { + padding: 20px 30px; + font-size: 4.1rem; + font-weight: lighter; + margin: 0; +} + +.disabled { + display: none; +} + +.offline { + font-size: 25px; + font-weight: bold; + color: black; +} + +@media only screen and (max-width: 985px) { + + .container { + flex-direction: column; + } + +} \ No newline at end of file diff --git a/angular-src/src/app/user/components/login/login.component.html b/angular-src/src/app/user/components/login/login.component.html new file mode 100644 index 0000000..9f51bf2 --- /dev/null +++ b/angular-src/src/app/user/components/login/login.component.html @@ -0,0 +1,5 @@ +
+

DAMN

+ + it, you're offline +
\ No newline at end of file diff --git a/angular-src/src/app/user/components/login/login.component.spec.ts b/angular-src/src/app/user/components/login/login.component.spec.ts new file mode 100644 index 0000000..d6d85a8 --- /dev/null +++ b/angular-src/src/app/user/components/login/login.component.spec.ts @@ -0,0 +1,25 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; + +import { LoginComponent } from './login.component'; + +describe('LoginComponent', () => { + let component: LoginComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + declarations: [ LoginComponent ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(LoginComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/angular-src/src/app/user/components/login/login.component.ts b/angular-src/src/app/user/components/login/login.component.ts new file mode 100644 index 0000000..c1bc79d --- /dev/null +++ b/angular-src/src/app/user/components/login/login.component.ts @@ -0,0 +1,27 @@ +import { Component, OnInit } from '@angular/core'; + +import { NetworkStatusService } from '../../services/network-status.service'; +import { Subscription } from 'rxjs/Subscription'; + +@Component({ + selector: 'app-login', + templateUrl: './login.component.html', + styleUrls: ['./login.component.css'] +}) +export class LoginComponent implements OnInit { + + online: boolean; + subscription: Subscription; + + constructor( + private networkStatusService: NetworkStatusService + ) { } + + ngOnInit() { + this.online = navigator.onLine; + this.subscription = this.networkStatusService.getStatus().subscribe(() => { + this.online = this.networkStatusService.online; + }); + } + +} diff --git a/angular-src/src/app/user/components/snackbar/snackbar.component.css b/angular-src/src/app/user/components/snackbar/snackbar.component.css new file mode 100644 index 0000000..2582a3e --- /dev/null +++ b/angular-src/src/app/user/components/snackbar/snackbar.component.css @@ -0,0 +1,49 @@ +#snackbar { + visibility: hidden; + min-width: 250px; + background-color: #AAAAAA; + color: #333333; + text-align: center; + border-radius: 2px; + padding: 16px; + position: fixed; + z-index: 3; + left: 50%; + transform: translateX(-50%); + -webkit-transform: translateX(-50%); + -moz-transform: translateX(-50%); + bottom: 30px; + font-size: 17px; +} + +#snackbar.danger { + background-color: #C9302C; + font-weight: bolder; + color: #eee; +} + +#snackbar.show { + visibility: visible; + -webkit-animation: fadein 0.5s, fadeout 0.5s 2.5s; + animation: fadein 0.5s, fadeout 0.5s 2.5s; +} + +@-webkit-keyframes fadein { + from {bottom: 0; opacity: 0;} + to {bottom: 30px; opacity: 1;} +} + +@keyframes fadein { + from {bottom: 0; opacity: 0;} + to {bottom: 30px; opacity: 1;} +} + +@-webkit-keyframes fadeout { + from {bottom: 30px; opacity: 1;} + to {bottom: 0; opacity: 0;} +} + +@keyframes fadeout { + from {bottom: 30px; opacity: 1;} + to {bottom: 0; opacity: 0;} +} \ No newline at end of file diff --git a/angular-src/src/app/user/components/snackbar/snackbar.component.html b/angular-src/src/app/user/components/snackbar/snackbar.component.html new file mode 100644 index 0000000..4c5d4db --- /dev/null +++ b/angular-src/src/app/user/components/snackbar/snackbar.component.html @@ -0,0 +1 @@ +
{{ message }}
\ No newline at end of file diff --git a/angular-src/src/app/user/components/snackbar/snackbar.component.spec.ts b/angular-src/src/app/user/components/snackbar/snackbar.component.spec.ts new file mode 100644 index 0000000..664d327 --- /dev/null +++ b/angular-src/src/app/user/components/snackbar/snackbar.component.spec.ts @@ -0,0 +1,25 @@ +import { async, ComponentFixture, TestBed } from '@angular/core/testing'; + +import { SnackbarComponent } from './snackbar.component'; + +describe('SnackbarComponent', () => { + let component: SnackbarComponent; + let fixture: ComponentFixture; + + beforeEach(async(() => { + TestBed.configureTestingModule({ + declarations: [ SnackbarComponent ] + }) + .compileComponents(); + })); + + beforeEach(() => { + fixture = TestBed.createComponent(SnackbarComponent); + component = fixture.componentInstance; + fixture.detectChanges(); + }); + + it('should create', () => { + expect(component).toBeTruthy(); + }); +}); diff --git a/angular-src/src/app/user/components/snackbar/snackbar.component.ts b/angular-src/src/app/user/components/snackbar/snackbar.component.ts new file mode 100644 index 0000000..02dca12 --- /dev/null +++ b/angular-src/src/app/user/components/snackbar/snackbar.component.ts @@ -0,0 +1,37 @@ +import { Component, OnInit } from '@angular/core'; +import { UserDataService } from '../../services/user-data.service'; +import { Subscription } from 'rxjs/Subscription'; + +@Component({ + selector: 'app-snackbar', + templateUrl: './snackbar.component.html', + styleUrls: ['./snackbar.component.css'] +}) +export class SnackbarComponent implements OnInit { + + message: string; + type: boolean; + subscription: Subscription; + show: boolean; + + constructor( + private userDataService: UserDataService + ) { } + + ngOnInit() { + this.subscription = this.userDataService.getSnackbarData().subscribe(() => { + this.message = this.userDataService.snackbarMessage; + this.type = this.userDataService.snackbarType; + this.show = false; + this.showAlert(); + }); + } + + showAlert() { + this.show = true; + setTimeout(() => { + this.show = false; + }, 3000); + } + +} diff --git a/angular-src/src/app/user/guards/auth.guard.ts b/angular-src/src/app/user/guards/auth.guard.ts new file mode 100644 index 0000000..86539a1 --- /dev/null +++ b/angular-src/src/app/user/guards/auth.guard.ts @@ -0,0 +1,19 @@ +import { Injectable } from '@angular/core'; +import { Router, CanActivate } from '@angular/router'; +import { AuthService } from '../services/auth.service'; + +@Injectable() +export class AuthGuard implements CanActivate { + + constructor (private authService: AuthService, private router: Router) { } + + canActivate() { + if(this.authService.loggedIn()) { + return true; + } else { + this.router.navigate(['/login']); + return false; + } + } + +} \ No newline at end of file diff --git a/angular-src/src/app/user/services/auth.service.spec.ts b/angular-src/src/app/user/services/auth.service.spec.ts new file mode 100644 index 0000000..bd98634 --- /dev/null +++ b/angular-src/src/app/user/services/auth.service.spec.ts @@ -0,0 +1,15 @@ +import { TestBed, inject } from '@angular/core/testing'; + +import { AuthService } from './auth.service'; + +describe('AuthService', () => { + beforeEach(() => { + TestBed.configureTestingModule({ + providers: [AuthService] + }); + }); + + it('should be created', inject([AuthService], (service: AuthService) => { + expect(service).toBeTruthy(); + })); +}); diff --git a/angular-src/src/app/user/services/auth.service.ts b/angular-src/src/app/user/services/auth.service.ts new file mode 100644 index 0000000..8c0c5da --- /dev/null +++ b/angular-src/src/app/user/services/auth.service.ts @@ -0,0 +1,22 @@ +import { Injectable } from '@angular/core'; +import { Http, Headers } from '@angular/http'; +import { CookieService } from 'ngx-cookie-service'; + +@Injectable() +export class AuthService { + + user: any; + + constructor( + private http: Http, + private cookieService: CookieService + ) { } + + loggedIn() { + if(this.cookieService.check('googCookie')) + return true; + else + return false; + } + +} \ No newline at end of file diff --git a/angular-src/src/app/user/services/network-status.service.spec.ts b/angular-src/src/app/user/services/network-status.service.spec.ts new file mode 100644 index 0000000..6e9a60e --- /dev/null +++ b/angular-src/src/app/user/services/network-status.service.spec.ts @@ -0,0 +1,15 @@ +import { TestBed, inject } from '@angular/core/testing'; + +import { NetworkStatusService } from './network-status.service'; + +describe('NetworkStatusService', () => { + beforeEach(() => { + TestBed.configureTestingModule({ + providers: [NetworkStatusService] + }); + }); + + it('should be created', inject([NetworkStatusService], (service: NetworkStatusService) => { + expect(service).toBeTruthy(); + })); +}); diff --git a/angular-src/src/app/user/services/network-status.service.ts b/angular-src/src/app/user/services/network-status.service.ts new file mode 100644 index 0000000..f084184 --- /dev/null +++ b/angular-src/src/app/user/services/network-status.service.ts @@ -0,0 +1,26 @@ +import { Injectable } from '@angular/core'; +import { Observable } from 'rxjs'; +import { Subject } from 'rxjs/Subject'; + +@Injectable() +export class NetworkStatusService { + + online: boolean; + private statusSubject = new Subject(); + + constructor() { } + + setStatus(status) { + this.online = status; + this.sendStatus(); + } + + private sendStatus() { + this.statusSubject.next(); + } + + getStatus(): Observable { + return this.statusSubject.asObservable(); + } + +} diff --git a/angular-src/src/app/user/services/user-data.service.spec.ts b/angular-src/src/app/user/services/user-data.service.spec.ts new file mode 100644 index 0000000..32c6c8a --- /dev/null +++ b/angular-src/src/app/user/services/user-data.service.spec.ts @@ -0,0 +1,15 @@ +import { TestBed, inject } from '@angular/core/testing'; + +import { UserDataService } from './user-data.service'; + +describe('UserDataService', () => { + beforeEach(() => { + TestBed.configureTestingModule({ + providers: [UserDataService] + }); + }); + + it('should be created', inject([UserDataService], (service: UserDataService) => { + expect(service).toBeTruthy(); + })); +}); diff --git a/angular-src/src/app/user/services/user-data.service.ts b/angular-src/src/app/user/services/user-data.service.ts new file mode 100644 index 0000000..5926518 --- /dev/null +++ b/angular-src/src/app/user/services/user-data.service.ts @@ -0,0 +1,60 @@ +import { Injectable } from '@angular/core'; +import { Http, Headers } from '@angular/http'; +import { Observable } from 'rxjs'; +import { Subject } from 'rxjs/Subject'; +import 'rxjs/add/operator/map'; + +@Injectable() +export class UserDataService { + + snackbarMessage: string; + snackbarType: boolean; + + private snackbarSubject = new Subject(); + + constructor(private http: Http) { } + + makeAdminRequest() { + if(!navigator.onLine) + this.sendAlert('Request will be sent'); + let headers = new Headers(); + headers.append('Content-type', 'application/json'); + return this.http.get('/makeAdminRequest', { headers: headers, withCredentials: true }) + .map(res => res.json()).subscribe(data => { + if(data.success) + this.sendAlert(data.message); + else + this.sendAlertDanger(data.message); + }); + } + + makeAccessRequest(requestData) { + if(!navigator.onLine) + this.sendAlert('Request will be sent'); + let headers = new Headers(); + headers.append('Content-type', 'application/json'); + return this.http.post('/makeAccessRequest/', requestData, { headers: headers, withCredentials: true }) + .map(res => res.json()) + } + + sendAlert(message) { + this.snackbarMessage = message; + this.snackbarType = true; + this.sendSnackbarData(); + } + + sendAlertDanger(message) { + this.snackbarMessage = message; + this.snackbarType = false; + this.sendSnackbarData(); + } + + private sendSnackbarData() { + this.snackbarSubject.next(); + } + + getSnackbarData(): Observable { + return this.snackbarSubject.asObservable(); + } + +} diff --git a/angular-src/src/assets/.gitkeep b/angular-src/src/assets/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/angular-src/src/environments/environment.prod.ts b/angular-src/src/environments/environment.prod.ts new file mode 100644 index 0000000..3612073 --- /dev/null +++ b/angular-src/src/environments/environment.prod.ts @@ -0,0 +1,3 @@ +export const environment = { + production: true +}; diff --git a/angular-src/src/environments/environment.ts b/angular-src/src/environments/environment.ts new file mode 100644 index 0000000..b7f639a --- /dev/null +++ b/angular-src/src/environments/environment.ts @@ -0,0 +1,8 @@ +// The file contents for the current environment will overwrite these during build. +// The build system defaults to the dev environment which uses `environment.ts`, but if you do +// `ng build --env=prod` then `environment.prod.ts` will be used instead. +// The list of which env maps to which file can be found in `.angular-cli.json`. + +export const environment = { + production: false +}; diff --git a/angular-src/src/favicon.ico b/angular-src/src/favicon.ico new file mode 100755 index 0000000..5482175 Binary files /dev/null and b/angular-src/src/favicon.ico differ diff --git a/angular-src/src/index.html b/angular-src/src/index.html new file mode 100644 index 0000000..700bb1b --- /dev/null +++ b/angular-src/src/index.html @@ -0,0 +1,18 @@ + + + + + + + + DAMN + + + + + + + + + + diff --git a/angular-src/src/main-admin.ts b/angular-src/src/main-admin.ts new file mode 100644 index 0000000..8bd75d2 --- /dev/null +++ b/angular-src/src/main-admin.ts @@ -0,0 +1,12 @@ +import { enableProdMode } from '@angular/core'; +import { platformBrowserDynamic } from '@angular/platform-browser-dynamic'; + +import { AppModule } from './app/admin/app.module'; +import { environment } from './environments/environment'; + +if (environment.production) { + enableProdMode(); +} + +platformBrowserDynamic().bootstrapModule(AppModule) + .catch(err => console.log(err)); diff --git a/angular-src/src/main-user.ts b/angular-src/src/main-user.ts new file mode 100644 index 0000000..ec866a7 --- /dev/null +++ b/angular-src/src/main-user.ts @@ -0,0 +1,32 @@ +import { enableProdMode } from '@angular/core'; +import { platformBrowserDynamic } from '@angular/platform-browser-dynamic'; + +import { AppModule } from './app/user/app.module'; +import { environment } from './environments/environment'; + +if (environment.production) { + enableProdMode(); +} + +platformBrowserDynamic().bootstrapModule(AppModule) + .then(() => { + registerServiceWorker('service-worker'); + }) + .catch(err => console.log(err)); + +declare const Notification: any; +function registerServiceWorker(swName: string) { + if ('serviceWorker' in navigator) { + navigator.serviceWorker + .register(`/${swName}.js`) + .then(reg => { + if ('Notification' in window && Notification.permission !== 'granted' && Notification.permission !== 'denied') + Notification.requestPermission() + }) + .catch(err => + console.error('[App] Service worker registration failed', err) + ); + } else { + console.error('[App] Service Worker API is not supported in current browser'); + } +} \ No newline at end of file diff --git a/angular-src/src/polyfills.ts b/angular-src/src/polyfills.ts new file mode 100644 index 0000000..20d4075 --- /dev/null +++ b/angular-src/src/polyfills.ts @@ -0,0 +1,76 @@ +/** + * This file includes polyfills needed by Angular and is loaded before the app. + * You can add your own extra polyfills to this file. + * + * This file is divided into 2 sections: + * 1. Browser polyfills. These are applied before loading ZoneJS and are sorted by browsers. + * 2. Application imports. Files imported after ZoneJS that should be loaded before your main + * file. + * + * The current setup is for so-called "evergreen" browsers; the last versions of browsers that + * automatically update themselves. This includes Safari >= 10, Chrome >= 55 (including Opera), + * Edge >= 13 on the desktop, and iOS 10 and Chrome on mobile. + * + * Learn more in https://angular.io/docs/ts/latest/guide/browser-support.html + */ + +/*************************************************************************************************** + * BROWSER POLYFILLS + */ + +/** IE9, IE10 and IE11 requires all of the following polyfills. **/ +// import 'core-js/es6/symbol'; +// import 'core-js/es6/object'; +// import 'core-js/es6/function'; +// import 'core-js/es6/parse-int'; +// import 'core-js/es6/parse-float'; +// import 'core-js/es6/number'; +// import 'core-js/es6/math'; +// import 'core-js/es6/string'; +// import 'core-js/es6/date'; +// import 'core-js/es6/array'; +// import 'core-js/es6/regexp'; +// import 'core-js/es6/map'; +// import 'core-js/es6/weak-map'; +// import 'core-js/es6/set'; + +/** IE10 and IE11 requires the following for NgClass support on SVG elements */ +// import 'classlist.js'; // Run `npm install --save classlist.js`. + +/** IE10 and IE11 requires the following for the Reflect API. */ +// import 'core-js/es6/reflect'; + + +/** Evergreen browsers require these. **/ +// Used for reflect-metadata in JIT. If you use AOT (and only Angular decorators), you can remove. +import 'core-js/es7/reflect'; + + +/** + * Required to support Web Animations `@angular/platform-browser/animations`. + * Needed for: All but Chrome, Firefox and Opera. http://caniuse.com/#feat=web-animation + **/ +// import 'web-animations-js'; // Run `npm install --save web-animations-js`. + + + +/*************************************************************************************************** + * Zone JS is required by Angular itself. + */ +import 'zone.js/dist/zone'; // Included with Angular CLI. + + + +/*************************************************************************************************** + * APPLICATION IMPORTS + */ + +/** + * Date, currency, decimal and percent pipes. + * Needed for: All but Chrome, Firefox, Edge, IE11 and Safari 10 + */ +// import 'intl'; // Run `npm install --save intl`. +/** + * Need to import at least one locale-data with intl. + */ +// import 'intl/locale-data/jsonp/en'; diff --git a/angular-src/src/service-worker.js b/angular-src/src/service-worker.js new file mode 100644 index 0000000..0148713 --- /dev/null +++ b/angular-src/src/service-worker.js @@ -0,0 +1,123 @@ +importScripts('workbox-sw.prod.v2.1.2.js'); +importScripts('workbox-background-sync.prod.v2.0.3.js'); + +const workboxSW = new WorkboxSW(); +workboxSW.precache([]); +workboxSW.router.registerRoute('/', workboxSW.strategies.cacheFirst()); +workboxSW.router.registerRoute('/login', workboxSW.strategies.cacheFirst()); +workboxSW.router.registerRoute('/admin', workboxSW.strategies.cacheFirst()); +workboxSW.router.registerRoute('/accesses', workboxSW.strategies.cacheFirst()); +workboxSW.router.registerRoute('/accessrequests', workboxSW.strategies.cacheFirst()); +workboxSW.router.registerRoute('/admins', workboxSW.strategies.cacheFirst()); +workboxSW.router.registerRoute('/adminrequests', workboxSW.strategies.cacheFirst()); + +workboxSW.router.registerRoute(new RegExp('/admin/getAccesses'), + workboxSW.strategies.networkFirst({cacheName: 'dynamic-cache'}) +); +workboxSW.router.registerRoute(new RegExp('/admin/getAccessRequests'), + workboxSW.strategies.networkFirst({cacheName: 'dynamic-cache'}) +); +workboxSW.router.registerRoute(new RegExp('/admin/getAdmins'), + workboxSW.strategies.networkFirst({cacheName: 'dynamic-cache'}) +); +workboxSW.router.registerRoute(new RegExp('/admin/getAdminRequests'), + workboxSW.strategies.networkFirst({cacheName: 'dynamic-cache'}) +); + +workboxSW.router.registerRoute(new RegExp('^https://fonts\\.googleapis\\.com'), + workboxSW.strategies.cacheFirst({ + cacheName: 'googleapis', + cacheExpiration: { + maxEntries: 20 + }, + cacheableResponse: {statuses: [0, 200]} + }) +); + +workboxSW.router.registerRoute(new RegExp('^https://cdnjs\\.cloudflare\\.com/ajax/libs/font-awesome'), + workboxSW.strategies.cacheFirst({ + cacheName: 'iconfonts', + cacheExpiration: { + maxEntries: 20 + }, + cacheableResponse: {statuses: [0, 200]} + }) +); + +workboxSW.router.registerRoute(new RegExp('^https://fonts\\.gstatic\\.com'), + workboxSW.strategies.cacheFirst({ + cacheName: 'googleapis', + cacheExpiration: { + maxEntries: 20 + }, + cacheableResponse: {statuses: [0, 200]} + }) +); + +let bgQueue = new workbox.backgroundSync.QueuePlugin({ + callbacks: { + replayDidSucceed: async(hash, res) => { + console.log(res); + if(res.url.indexOf('/makeAdminRequest') !== -1) + message = 'Administrative privileges request sent' + if(res.url.indexOf('/makeAccessRequest') !== -1) + message = 'Access request sent' + if(res.url.indexOf('/admin/acceptAdminRequest') !== -1) + message = 'You have accepted the admin request' + if(res.url.indexOf('/admin/rejectAdminRequest') !== -1) + message = 'You have rejected the admin request' + if(res.url.indexOf('/admin/revokeAdminPrivilege') !== -1) + message = 'You have revoked the admin privilege' + if(res.url.indexOf('/admin/acceptAccessRequest') !== -1) + message = 'You have accepted the access request' + if(res.url.indexOf('/admin/rejectAccessRequest') !== -1) + message = 'You have rejected the access request' + if(res.url.indexOf('/admin/revokeAccessPrivilege') !== -1) + message = 'You have revoked the access privileges' + if(Notification.permission === 'granted') + self.registration.showNotification('DAMN', { + body: message, + icon: './images/icons/icon-128x128.png', + vibrate: [100, 50, 100] + }); + } + } +}); + +workboxSW.router.registerRoute('/makeAdminRequest', + workboxSW.strategies.networkOnly({plugins: [bgQueue]}), 'GET' +); + +workboxSW.router.registerRoute('/makeAccessRequest', + workboxSW.strategies.networkOnly({plugins: [bgQueue]}), 'POST' +); + +workboxSW.router.registerRoute(new RegExp('/admin/acceptAdminRequest*'), + workboxSW.strategies.networkOnly({plugins: [bgQueue]}), 'GET' +); + +workboxSW.router.registerRoute(new RegExp('/admin/rejectAdminRequest*'), + workboxSW.strategies.networkOnly({plugins: [bgQueue]}), 'GET' +); + +workboxSW.router.registerRoute(new RegExp('/admin/revokeAdminPrivilege*'), + workboxSW.strategies.networkOnly({plugins: [bgQueue]}), 'GET' +); + +workboxSW.router.registerRoute(new RegExp('/admin/acceptAccessRequest*'), + workboxSW.strategies.networkOnly({plugins: [bgQueue]}), 'POST' +); + +workboxSW.router.registerRoute(new RegExp('/admin/rejectAccessRequest*'), + workboxSW.strategies.networkOnly({plugins: [bgQueue]}), 'GET' +); + +workboxSW.router.registerRoute(new RegExp('/admin/revokeAccessPrivilege*'), + workboxSW.strategies.networkOnly({plugins: [bgQueue]}), 'GET' +); + +self.addEventListener('message', function(event) { + let data = event.data; + if(data.command == 'online') + bgQueue.replayRequests() +}); \ No newline at end of file diff --git a/angular-src/src/styles.css b/angular-src/src/styles.css new file mode 100644 index 0000000..4b28631 --- /dev/null +++ b/angular-src/src/styles.css @@ -0,0 +1,27 @@ +/* You can add global styles to this file, and also import other style files */ +* { + box-sizing: border-box; + outline: none; +} + +a { + text-decoration: none; +} + +body { + font-family: 'Quicksand', sans-serif; + margin: 0; + padding: 0; +} + +button, table { + font-family: 'Quicksand', sans-serif; +} + +button { + cursor: pointer; +} + +h4 { + margin: 10px 0; +} \ No newline at end of file diff --git a/angular-src/src/test.ts b/angular-src/src/test.ts new file mode 100644 index 0000000..cd612ee --- /dev/null +++ b/angular-src/src/test.ts @@ -0,0 +1,32 @@ +// This file is required by karma.conf.js and loads recursively all the .spec and framework files + +import 'zone.js/dist/long-stack-trace-zone'; +import 'zone.js/dist/proxy.js'; +import 'zone.js/dist/sync-test'; +import 'zone.js/dist/jasmine-patch'; +import 'zone.js/dist/async-test'; +import 'zone.js/dist/fake-async-test'; +import { getTestBed } from '@angular/core/testing'; +import { + BrowserDynamicTestingModule, + platformBrowserDynamicTesting +} from '@angular/platform-browser-dynamic/testing'; + +// Unfortunately there's no typing for the `__karma__` variable. Just declare it as any. +declare const __karma__: any; +declare const require: any; + +// Prevent Karma from running prematurely. +__karma__.loaded = function () {}; + +// First, initialize the Angular testing environment. +getTestBed().initTestEnvironment( + BrowserDynamicTestingModule, + platformBrowserDynamicTesting() +); +// Then we find all the tests. +const context = require.context('./', true, /\.spec\.ts$/); +// And load the modules. +context.keys().map(context); +// Finally, start Karma to run the tests. +__karma__.start(); diff --git a/angular-src/src/tsconfig.app.json b/angular-src/src/tsconfig.app.json new file mode 100644 index 0000000..39ba8db --- /dev/null +++ b/angular-src/src/tsconfig.app.json @@ -0,0 +1,13 @@ +{ + "extends": "../tsconfig.json", + "compilerOptions": { + "outDir": "../out-tsc/app", + "baseUrl": "./", + "module": "es2015", + "types": [] + }, + "exclude": [ + "test.ts", + "**/*.spec.ts" + ] +} diff --git a/angular-src/src/tsconfig.spec.json b/angular-src/src/tsconfig.spec.json new file mode 100644 index 0000000..63d89ff --- /dev/null +++ b/angular-src/src/tsconfig.spec.json @@ -0,0 +1,20 @@ +{ + "extends": "../tsconfig.json", + "compilerOptions": { + "outDir": "../out-tsc/spec", + "baseUrl": "./", + "module": "commonjs", + "target": "es5", + "types": [ + "jasmine", + "node" + ] + }, + "files": [ + "test.ts" + ], + "include": [ + "**/*.spec.ts", + "**/*.d.ts" + ] +} diff --git a/angular-src/src/typings.d.ts b/angular-src/src/typings.d.ts new file mode 100644 index 0000000..ef5c7bd --- /dev/null +++ b/angular-src/src/typings.d.ts @@ -0,0 +1,5 @@ +/* SystemJS module definition */ +declare var module: NodeModule; +interface NodeModule { + id: string; +} diff --git a/angular-src/tsconfig.json b/angular-src/tsconfig.json new file mode 100644 index 0000000..a6c016b --- /dev/null +++ b/angular-src/tsconfig.json @@ -0,0 +1,19 @@ +{ + "compileOnSave": false, + "compilerOptions": { + "outDir": "./dist/out-tsc", + "sourceMap": true, + "declaration": false, + "moduleResolution": "node", + "emitDecoratorMetadata": true, + "experimentalDecorators": true, + "target": "es5", + "typeRoots": [ + "node_modules/@types" + ], + "lib": [ + "es2017", + "dom" + ] + } +} diff --git a/angular-src/tslint.json b/angular-src/tslint.json new file mode 100644 index 0000000..c24dc29 --- /dev/null +++ b/angular-src/tslint.json @@ -0,0 +1,141 @@ +{ + "rulesDirectory": [ + "node_modules/codelyzer" + ], + "rules": { + "arrow-return-shorthand": true, + "callable-types": true, + "class-name": true, + "comment-format": [ + true, + "check-space" + ], + "curly": true, + "eofline": true, + "forin": true, + "import-blacklist": [ + true, + "rxjs", + "rxjs/Rx" + ], + "import-spacing": true, + "indent": [ + true, + "spaces" + ], + "interface-over-type-literal": true, + "label-position": true, + "max-line-length": [ + true, + 140 + ], + "member-access": false, + "member-ordering": [ + true, + { + "order": [ + "static-field", + "instance-field", + "static-method", + "instance-method" + ] + } + ], + "no-arg": true, + "no-bitwise": true, + "no-console": [ + true, + "debug", + "info", + "time", + "timeEnd", + "trace" + ], + "no-construct": true, + "no-debugger": true, + "no-duplicate-super": true, + "no-empty": false, + "no-empty-interface": true, + "no-eval": true, + "no-inferrable-types": [ + true, + "ignore-params" + ], + "no-misused-new": true, + "no-non-null-assertion": true, + "no-shadowed-variable": true, + "no-string-literal": false, + "no-string-throw": true, + "no-switch-case-fall-through": true, + "no-trailing-whitespace": true, + "no-unnecessary-initializer": true, + "no-unused-expression": true, + "no-use-before-declare": true, + "no-var-keyword": true, + "object-literal-sort-keys": false, + "one-line": [ + true, + "check-open-brace", + "check-catch", + "check-else", + "check-whitespace" + ], + "prefer-const": true, + "quotemark": [ + true, + "single" + ], + "radix": true, + "semicolon": [ + true, + "always" + ], + "triple-equals": [ + true, + "allow-null-check" + ], + "typedef-whitespace": [ + true, + { + "call-signature": "nospace", + "index-signature": "nospace", + "parameter": "nospace", + "property-declaration": "nospace", + "variable-declaration": "nospace" + } + ], + "typeof-compare": true, + "unified-signatures": true, + "variable-name": false, + "whitespace": [ + true, + "check-branch", + "check-decl", + "check-operator", + "check-separator", + "check-type" + ], + "directive-selector": [ + true, + "attribute", + "app", + "camelCase" + ], + "component-selector": [ + true, + "element", + "app", + "kebab-case" + ], + "use-input-property-decorator": true, + "use-output-property-decorator": true, + "use-host-property-decorator": true, + "no-input-rename": true, + "no-output-rename": true, + "use-life-cycle-interface": true, + "use-pipe-transform-interface": true, + "component-class-suffix": true, + "directive-class-suffix": true, + "invoke-injectable": true + } +} diff --git a/angular-src/yarn.lock b/angular-src/yarn.lock new file mode 100644 index 0000000..a25f308 --- /dev/null +++ b/angular-src/yarn.lock @@ -0,0 +1,7186 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@angular-devkit/build-optimizer@~0.0.28": + version "0.0.35" + resolved "https://registry.yarnpkg.com/@angular-devkit/build-optimizer/-/build-optimizer-0.0.35.tgz#3aadad1d7e9ffc7dcd106fda8a5670465936562c" + dependencies: + loader-utils "^1.1.0" + source-map "^0.5.6" + typescript "~2.6.1" + webpack-sources "^1.0.1" + +"@angular-devkit/core@0.0.20": + version "0.0.20" + resolved "https://registry.yarnpkg.com/@angular-devkit/core/-/core-0.0.20.tgz#2ad36dd210fccd0e156d01c6499082ad4cd8c2af" + dependencies: + source-map "^0.5.6" + +"@angular-devkit/core@0.0.22": + version "0.0.22" + resolved "https://registry.yarnpkg.com/@angular-devkit/core/-/core-0.0.22.tgz#e90f46bf7ff47d260a767959267bc65ffee39ef1" + dependencies: + source-map "^0.5.6" + +"@angular-devkit/schematics@~0.0.34": + version "0.0.41" + resolved "https://registry.yarnpkg.com/@angular-devkit/schematics/-/schematics-0.0.41.tgz#9a066e442bdf10de4a093d6dd33a58e600d1b101" + dependencies: + "@angular-devkit/core" "0.0.22" + "@ngtools/json-schema" "^1.1.0" + "@schematics/schematics" "0.0.10" + minimist "^1.2.0" + rxjs "^5.5.2" + +"@angular/animations@^4.2.4": + version "4.4.6" + resolved "https://registry.yarnpkg.com/@angular/animations/-/animations-4.4.6.tgz#fa661899a8a4e38cb7c583c7a5c97ce65d592a35" + dependencies: + tslib "^1.7.1" + +"@angular/cli@1.4.9": + version "1.4.9" + resolved "https://registry.yarnpkg.com/@angular/cli/-/cli-1.4.9.tgz#cd5cd7a8f3bf3a1c28dfcdc523a011f0fcebfb96" + dependencies: + "@angular-devkit/build-optimizer" "~0.0.28" + "@angular-devkit/schematics" "~0.0.34" + "@ngtools/json-schema" "1.1.0" + "@ngtools/webpack" "1.7.4" + "@schematics/angular" "~0.0.46" + autoprefixer "^6.5.3" + chalk "^2.0.1" + circular-dependency-plugin "^3.0.0" + common-tags "^1.3.1" + copy-webpack-plugin "^4.1.1" + core-object "^3.1.0" + css-loader "^0.28.1" + cssnano "^3.10.0" + denodeify "^1.2.1" + ember-cli-string-utils "^1.0.0" + exports-loader "^0.6.3" + extract-text-webpack-plugin "3.0.0" + file-loader "^1.1.5" + fs-extra "^4.0.0" + glob "^7.0.3" + html-webpack-plugin "^2.29.0" + istanbul-instrumenter-loader "^2.0.0" + karma-source-map-support "^1.2.0" + less "^2.7.2" + less-loader "^4.0.5" + license-webpack-plugin "^1.0.0" + lodash "^4.11.1" + memory-fs "^0.4.1" + node-modules-path "^1.0.0" + nopt "^4.0.1" + opn "~5.1.0" + portfinder "~1.0.12" + postcss-loader "^1.3.3" + postcss-url "^5.1.2" + raw-loader "^0.5.1" + resolve "^1.1.7" + rxjs "^5.4.2" + sass-loader "^6.0.3" + semver "^5.1.0" + silent-error "^1.0.0" + source-map-loader "^0.2.0" + source-map-support "^0.4.1" + style-loader "^0.13.1" + stylus "^0.54.5" + stylus-loader "^3.0.1" + typescript ">=2.0.0 <2.6.0" + url-loader "^0.6.2" + webpack "~3.7.1" + webpack-concat-plugin "1.4.0" + webpack-dev-middleware "~1.12.0" + webpack-dev-server "~2.7.1" + webpack-merge "^4.1.0" + webpack-sources "^1.0.0" + zone.js "^0.8.14" + optionalDependencies: + node-sass "^4.3.0" + +"@angular/common@^4.2.4": + version "4.4.6" + resolved "https://registry.yarnpkg.com/@angular/common/-/common-4.4.6.tgz#4b81420724e0828a0e839b95a55eb1a7e83918f2" + dependencies: + tslib "^1.7.1" + +"@angular/compiler-cli@^4.2.4": + version "4.4.6" + resolved "https://registry.yarnpkg.com/@angular/compiler-cli/-/compiler-cli-4.4.6.tgz#bafd3d1e260e99087eb9a8cf7532dbd603abb9b1" + dependencies: + "@angular/tsc-wrapped" "4.4.6" + minimist "^1.2.0" + reflect-metadata "^0.1.2" + +"@angular/compiler@^4.2.4": + version "4.4.6" + resolved "https://registry.yarnpkg.com/@angular/compiler/-/compiler-4.4.6.tgz#2ee1faf25b757e1d128979074be7fae529b3bc20" + dependencies: + tslib "^1.7.1" + +"@angular/core@^4.2.4": + version "4.4.6" + resolved "https://registry.yarnpkg.com/@angular/core/-/core-4.4.6.tgz#13031fd10dcfe438875419b38f21120958bc2354" + dependencies: + tslib "^1.7.1" + +"@angular/forms@^4.2.4": + version "4.4.6" + resolved "https://registry.yarnpkg.com/@angular/forms/-/forms-4.4.6.tgz#fe64ace42435c1b80f49034b7c41ce8caf14a44a" + dependencies: + tslib "^1.7.1" + +"@angular/http@^4.2.4": + version "4.4.6" + resolved "https://registry.yarnpkg.com/@angular/http/-/http-4.4.6.tgz#0af680c6710bdc026d940e225cfd0f6a5c005d0c" + dependencies: + tslib "^1.7.1" + +"@angular/language-service@^4.2.4": + version "4.4.6" + resolved "https://registry.yarnpkg.com/@angular/language-service/-/language-service-4.4.6.tgz#498ece95c5f6066403bf9fd3c5831af42b45618b" + +"@angular/platform-browser-dynamic@^4.2.4": + version "4.4.6" + resolved "https://registry.yarnpkg.com/@angular/platform-browser-dynamic/-/platform-browser-dynamic-4.4.6.tgz#4d3d9a6a7bf2cf3de4058a615ae059eff641fa36" + dependencies: + tslib "^1.7.1" + +"@angular/platform-browser@^4.2.4": + version "4.4.6" + resolved "https://registry.yarnpkg.com/@angular/platform-browser/-/platform-browser-4.4.6.tgz#a9839c547e1b654fa1d24a89780c8ba6ab8dcce0" + dependencies: + tslib "^1.7.1" + +"@angular/router@^4.2.4": + version "4.4.6" + resolved "https://registry.yarnpkg.com/@angular/router/-/router-4.4.6.tgz#0f6ad29ae0ff8d2c9ea379bd320447217b7ec866" + dependencies: + tslib "^1.7.1" + +"@angular/tsc-wrapped@4.4.6": + version "4.4.6" + resolved "https://registry.yarnpkg.com/@angular/tsc-wrapped/-/tsc-wrapped-4.4.6.tgz#16787cbbf50bdc7e738123b19c32527f244e178d" + dependencies: + tsickle "^0.21.0" + +"@ngtools/json-schema@1.1.0", "@ngtools/json-schema@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@ngtools/json-schema/-/json-schema-1.1.0.tgz#c3a0c544d62392acc2813a42c8a0dc6f58f86922" + +"@ngtools/webpack@1.7.4": + version "1.7.4" + resolved "https://registry.yarnpkg.com/@ngtools/webpack/-/webpack-1.7.4.tgz#5015c47ebd339045dd89a1bef0497f4524d2c8ed" + dependencies: + enhanced-resolve "^3.1.0" + loader-utils "^1.0.2" + magic-string "^0.22.3" + source-map "^0.5.6" + +"@schematics/angular@~0.0.46": + version "0.0.49" + resolved "https://registry.yarnpkg.com/@schematics/angular/-/angular-0.0.49.tgz#c00ca573756d6a79ee518b05eab5fe6e7b73f341" + dependencies: + "@angular-devkit/core" "0.0.20" + +"@schematics/schematics@0.0.10": + version "0.0.10" + resolved "https://registry.yarnpkg.com/@schematics/schematics/-/schematics-0.0.10.tgz#b27b49a1a5482dc6c1c93fb3c20371c95874938b" + +"@types/jasmine@*": + version "2.8.2" + resolved "https://registry.yarnpkg.com/@types/jasmine/-/jasmine-2.8.2.tgz#6ae4d8740c0da5d5a627df725b4eed71b8e36668" + +"@types/jasmine@~2.5.53": + version "2.5.54" + resolved "https://registry.yarnpkg.com/@types/jasmine/-/jasmine-2.5.54.tgz#a6b5f2ae2afb6e0307774e8c7c608e037d491c63" + +"@types/jasminewd2@~2.0.2": + version "2.0.3" + resolved "https://registry.yarnpkg.com/@types/jasminewd2/-/jasminewd2-2.0.3.tgz#0d2886b0cbdae4c0eeba55e30792f584bf040a95" + dependencies: + "@types/jasmine" "*" + +"@types/node@^6.0.46", "@types/node@~6.0.60": + version "6.0.93" + resolved "https://registry.yarnpkg.com/@types/node/-/node-6.0.93.tgz#498b9461f4ec84a057c0aca80a54cce992805ff8" + +"@types/q@^0.0.32": + version "0.0.32" + resolved "https://registry.yarnpkg.com/@types/q/-/q-0.0.32.tgz#bd284e57c84f1325da702babfc82a5328190c0c5" + +"@types/selenium-webdriver@^2.53.35", "@types/selenium-webdriver@~2.53.39": + version "2.53.43" + resolved "https://registry.yarnpkg.com/@types/selenium-webdriver/-/selenium-webdriver-2.53.43.tgz#2de3d718819bc20165754c4a59afb7e9833f6707" + +abbrev@1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" + +accepts@1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca" + dependencies: + mime-types "~2.1.11" + negotiator "0.6.1" + +accepts@~1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.4.tgz#86246758c7dd6d21a6474ff084a4740ec05eb21f" + dependencies: + mime-types "~2.1.16" + negotiator "0.6.1" + +acorn-dynamic-import@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/acorn-dynamic-import/-/acorn-dynamic-import-2.0.2.tgz#c752bd210bef679501b6c6cb7fc84f8f47158cc4" + dependencies: + acorn "^4.0.3" + +acorn@^4.0.3: + version "4.0.13" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.13.tgz#105495ae5361d697bd195c825192e1ad7f253787" + +acorn@^5.0.0: + version "5.2.1" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-5.2.1.tgz#317ac7821826c22c702d66189ab8359675f135d7" + +adm-zip@0.4.4: + version "0.4.4" + resolved "https://registry.yarnpkg.com/adm-zip/-/adm-zip-0.4.4.tgz#a61ed5ae6905c3aea58b3a657d25033091052736" + +adm-zip@^0.4.7: + version "0.4.7" + resolved "https://registry.yarnpkg.com/adm-zip/-/adm-zip-0.4.7.tgz#8606c2cbf1c426ce8c8ec00174447fd49b6eafc1" + +after@0.8.2: + version "0.8.2" + resolved "https://registry.yarnpkg.com/after/-/after-0.8.2.tgz#fedb394f9f0e02aa9768e702bda23b505fae7e1f" + +agent-base@2: + version "2.1.1" + resolved "https://registry.yarnpkg.com/agent-base/-/agent-base-2.1.1.tgz#d6de10d5af6132d5bd692427d46fc538539094c7" + dependencies: + extend "~3.0.0" + semver "~5.0.1" + +ajv-keywords@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-2.1.1.tgz#617997fc5f60576894c435f940d819e135b80762" + +ajv@^4.9.1: + version "4.11.8" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-4.11.8.tgz#82ffb02b29e662ae53bdc20af15947706739c536" + dependencies: + co "^4.6.0" + json-stable-stringify "^1.0.1" + +ajv@^5.0.0, ajv@^5.1.0, ajv@^5.1.5: + version "5.5.1" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-5.5.1.tgz#b38bb8876d9e86bee994956a04e721e88b248eb2" + dependencies: + co "^4.6.0" + fast-deep-equal "^1.0.0" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.3.0" + +align-text@^0.1.1, align-text@^0.1.3: + version "0.1.4" + resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117" + dependencies: + kind-of "^3.0.2" + longest "^1.0.1" + repeat-string "^1.5.2" + +alphanum-sort@^1.0.1, alphanum-sort@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/alphanum-sort/-/alphanum-sort-1.0.2.tgz#97a1119649b211ad33691d9f9f486a8ec9fbe0a3" + +amdefine@>=0.0.4: + version "1.0.1" + resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5" + +ansi-gray@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/ansi-gray/-/ansi-gray-0.1.1.tgz#2962cf54ec9792c48510a3deb524436861ef7251" + dependencies: + ansi-wrap "0.1.0" + +ansi-html@0.0.7: + version "0.0.7" + resolved "https://registry.yarnpkg.com/ansi-html/-/ansi-html-0.0.7.tgz#813584021962a9e9e6fd039f940d12f56ca7859e" + +ansi-regex@^0.2.0, ansi-regex@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9" + +ansi-regex@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" + +ansi-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" + +ansi-styles@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de" + +ansi-styles@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" + +ansi-styles@^3.1.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.0.tgz#c159b8d5be0f9e5a6f346dab94f16ce022161b88" + dependencies: + color-convert "^1.9.0" + +ansi-wrap@0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/ansi-wrap/-/ansi-wrap-0.1.0.tgz#a82250ddb0015e9a27ca82e82ea603bbfa45efaf" + +anymatch@^1.3.0: + version "1.3.2" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.2.tgz#553dcb8f91e3c889845dfdba34c77721b90b9d7a" + dependencies: + micromatch "^2.1.5" + normalize-path "^2.0.0" + +app-root-path@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/app-root-path/-/app-root-path-2.0.1.tgz#cd62dcf8e4fd5a417efc664d2e5b10653c651b46" + +append-transform@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/append-transform/-/append-transform-0.4.0.tgz#d76ebf8ca94d276e247a36bad44a4b74ab611991" + dependencies: + default-require-extensions "^1.0.0" + +aproba@^1.0.3: + version "1.2.0" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" + +archy@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40" + +are-we-there-yet@~1.1.2: + version "1.1.4" + resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.4.tgz#bb5dca382bb94f05e15194373d16fd3ba1ca110d" + dependencies: + delegates "^1.0.0" + readable-stream "^2.0.6" + +argparse@^1.0.7: + version "1.0.9" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86" + dependencies: + sprintf-js "~1.0.2" + +arr-diff@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf" + dependencies: + arr-flatten "^1.0.1" + +arr-diff@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-4.0.0.tgz#d6461074febfec71e7e15235761a329a5dc7c520" + +arr-flatten@^1.0.1, arr-flatten@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.1.0.tgz#36048bbff4e7b47e136644316c99669ea5ae91f1" + +arr-union@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/arr-union/-/arr-union-3.1.0.tgz#e39b09aea9def866a8f206e288af63919bae39c4" + +array-differ@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/array-differ/-/array-differ-1.0.0.tgz#eff52e3758249d33be402b8bb8e564bb2b5d4031" + +array-each@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/array-each/-/array-each-1.0.1.tgz#a794af0c05ab1752846ee753a1f211a05ba0c44f" + +array-find-index@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-find-index/-/array-find-index-1.0.2.tgz#df010aa1287e164bbda6f9723b0a96a1ec4187a1" + +array-flatten@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" + +array-flatten@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-2.1.1.tgz#426bb9da84090c1838d812c8150af20a8331e296" + +array-slice@^0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/array-slice/-/array-slice-0.2.3.tgz#dd3cfb80ed7973a75117cdac69b0b99ec86186f5" + +array-slice@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/array-slice/-/array-slice-1.1.0.tgz#e368ea15f89bc7069f7ffb89aec3a6c7d4ac22d4" + +array-union@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" + dependencies: + array-uniq "^1.0.1" + +array-uniq@^1.0.1, array-uniq@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + +array-unique@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53" + +array-unique@^0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428" + +arraybuffer.slice@0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca" + +arrify@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" + +asap@~2.0.3: + version "2.0.6" + resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" + +asn1.js@^4.0.0: + version "4.9.2" + resolved "https://registry.yarnpkg.com/asn1.js/-/asn1.js-4.9.2.tgz#8117ef4f7ed87cd8f89044b5bff97ac243a16c9a" + dependencies: + bn.js "^4.0.0" + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + +asn1@~0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86" + +assert-plus@1.0.0, assert-plus@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" + +assert-plus@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234" + +assert@^1.1.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/assert/-/assert-1.4.1.tgz#99912d591836b5a6f5b345c0f07eefc08fc65d91" + dependencies: + util "0.10.3" + +async-each@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.1.tgz#19d386a1d9edc6e7c1c85d388aedbcc56d33602d" + +async-foreach@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/async-foreach/-/async-foreach-0.1.3.tgz#36121f845c0578172de419a97dbeb1d16ec34542" + +async@^1.4.0, async@^1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a" + +async@^2.1.2, async@^2.1.4, async@^2.1.5, async@^2.4.1, async@^2.5.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/async/-/async-2.6.0.tgz#61a29abb6fcc026fea77e56d1c6ec53a795951f4" + dependencies: + lodash "^4.14.0" + +async@~0.2.6: + version "0.2.10" + resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1" + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + +atob@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/atob/-/atob-2.0.3.tgz#19c7a760473774468f20b2d2d03372ad7d4cbf5d" + +autoprefixer@^6.3.1, autoprefixer@^6.5.3: + version "6.7.7" + resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-6.7.7.tgz#1dbd1c835658e35ce3f9984099db00585c782014" + dependencies: + browserslist "^1.7.6" + caniuse-db "^1.0.30000634" + normalize-range "^0.1.2" + num2fraction "^1.2.2" + postcss "^5.2.16" + postcss-value-parser "^3.2.3" + +aws-sign2@~0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f" + +aws-sign2@~0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" + +aws4@^1.2.1, aws4@^1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e" + +babel-code-frame@^6.11.0, babel-code-frame@^6.22.0, babel-code-frame@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b" + dependencies: + chalk "^1.1.3" + esutils "^2.0.2" + js-tokens "^3.0.2" + +babel-generator@^6.18.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.26.0.tgz#ac1ae20070b79f6e3ca1d3269613053774f20dc5" + dependencies: + babel-messages "^6.23.0" + babel-runtime "^6.26.0" + babel-types "^6.26.0" + detect-indent "^4.0.0" + jsesc "^1.3.0" + lodash "^4.17.4" + source-map "^0.5.6" + trim-right "^1.0.1" + +babel-messages@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-messages/-/babel-messages-6.23.0.tgz#f3cdf4703858035b2a2951c6ec5edf6c62f2630e" + dependencies: + babel-runtime "^6.22.0" + +babel-runtime@^6.22.0, babel-runtime@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.26.0.tgz#965c7058668e82b55d7bfe04ff2337bc8b5647fe" + dependencies: + core-js "^2.4.0" + regenerator-runtime "^0.11.0" + +babel-template@^6.16.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.26.0.tgz#de03e2d16396b069f46dd9fff8521fb1a0e35e02" + dependencies: + babel-runtime "^6.26.0" + babel-traverse "^6.26.0" + babel-types "^6.26.0" + babylon "^6.18.0" + lodash "^4.17.4" + +babel-traverse@^6.18.0, babel-traverse@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.26.0.tgz#46a9cbd7edcc62c8e5c064e2d2d8d0f4035766ee" + dependencies: + babel-code-frame "^6.26.0" + babel-messages "^6.23.0" + babel-runtime "^6.26.0" + babel-types "^6.26.0" + babylon "^6.18.0" + debug "^2.6.8" + globals "^9.18.0" + invariant "^2.2.2" + lodash "^4.17.4" + +babel-types@^6.18.0, babel-types@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.26.0.tgz#a3b073f94ab49eb6fa55cd65227a334380632497" + dependencies: + babel-runtime "^6.26.0" + esutils "^2.0.2" + lodash "^4.17.4" + to-fast-properties "^1.0.3" + +babylon@^6.18.0: + version "6.18.0" + resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.18.0.tgz#af2f3b88fa6f5c1e4c634d1a0f8eac4f55b395e3" + +backo2@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947" + +balanced-match@^0.4.2: + version "0.4.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838" + +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" + +base64-arraybuffer@0.1.5: + version "0.1.5" + resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz#73926771923b5a19747ad666aa5cd4bf9c6e9ce8" + +base64-js@^1.0.2: + version "1.2.1" + resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.2.1.tgz#a91947da1f4a516ea38e5b4ec0ec3773675e0886" + +base64id@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/base64id/-/base64id-1.0.0.tgz#47688cb99bb6804f0e06d3e763b1c32e57d8e6b6" + +base@^0.11.1: + version "0.11.2" + resolved "https://registry.yarnpkg.com/base/-/base-0.11.2.tgz#7bde5ced145b6d551a90db87f83c558b4eb48a8f" + dependencies: + cache-base "^1.0.1" + class-utils "^0.3.5" + component-emitter "^1.2.1" + define-property "^1.0.0" + isobject "^3.0.1" + mixin-deep "^1.2.0" + pascalcase "^0.1.1" + +batch@0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/batch/-/batch-0.6.1.tgz#dc34314f4e679318093fc760272525f94bf25c16" + +bcrypt-pbkdf@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d" + dependencies: + tweetnacl "^0.14.3" + +beeper@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/beeper/-/beeper-1.1.1.tgz#e6d5ea8c5dad001304a70b22638447f69cb2f809" + +better-assert@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522" + dependencies: + callsite "1.0.0" + +big.js@^3.1.3: + version "3.2.0" + resolved "https://registry.yarnpkg.com/big.js/-/big.js-3.2.0.tgz#a5fc298b81b9e0dca2e458824784b65c52ba588e" + +binary-extensions@^1.0.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.11.0.tgz#46aa1751fb6a2f93ee5e689bb1087d4b14c6c205" + +blob@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921" + +block-stream@*: + version "0.0.9" + resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.9.tgz#13ebfe778a03205cfe03751481ebb4b3300c126a" + dependencies: + inherits "~2.0.0" + +blocking-proxy@0.0.5: + version "0.0.5" + resolved "https://registry.yarnpkg.com/blocking-proxy/-/blocking-proxy-0.0.5.tgz#462905e0dcfbea970f41aa37223dda9c07b1912b" + dependencies: + minimist "^1.2.0" + +bluebird@^3.3.0, bluebird@^3.4.7, bluebird@^3.5.1: + version "3.5.1" + resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.1.tgz#d9551f9de98f1fcda1e683d17ee91a0602ee2eb9" + +bn.js@^4.0.0, bn.js@^4.1.0, bn.js@^4.1.1, bn.js@^4.4.0: + version "4.11.8" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.11.8.tgz#2cde09eb5ee341f484746bb0309b3253b1b1442f" + +body-parser@1.18.2, body-parser@^1.16.1: + version "1.18.2" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.18.2.tgz#87678a19d84b47d859b83199bd59bce222b10454" + dependencies: + bytes "3.0.0" + content-type "~1.0.4" + debug "2.6.9" + depd "~1.1.1" + http-errors "~1.6.2" + iconv-lite "0.4.19" + on-finished "~2.3.0" + qs "6.5.1" + raw-body "2.3.2" + type-is "~1.6.15" + +bonjour@^3.5.0: + version "3.5.0" + resolved "https://registry.yarnpkg.com/bonjour/-/bonjour-3.5.0.tgz#8e890a183d8ee9a2393b3844c691a42bcf7bc9f5" + dependencies: + array-flatten "^2.1.0" + deep-equal "^1.0.1" + dns-equal "^1.0.0" + dns-txt "^2.0.2" + multicast-dns "^6.0.1" + multicast-dns-service-types "^1.1.0" + +boolbase@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" + +boom@2.x.x: + version "2.10.1" + resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f" + dependencies: + hoek "2.x.x" + +boom@4.x.x: + version "4.3.1" + resolved "https://registry.yarnpkg.com/boom/-/boom-4.3.1.tgz#4f8a3005cb4a7e3889f749030fd25b96e01d2e31" + dependencies: + hoek "4.x.x" + +boom@5.x.x: + version "5.2.0" + resolved "https://registry.yarnpkg.com/boom/-/boom-5.2.0.tgz#5dd9da6ee3a5f302077436290cb717d3f4a54e02" + dependencies: + hoek "4.x.x" + +brace-expansion@^1.0.0, brace-expansion@^1.1.7: + version "1.1.8" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.8.tgz#c07b211c7c952ec1f8efd51a77ef0d1d3990a292" + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +braces@^0.1.2: + version "0.1.5" + resolved "https://registry.yarnpkg.com/braces/-/braces-0.1.5.tgz#c085711085291d8b75fdd74eab0f8597280711e6" + dependencies: + expand-range "^0.1.0" + +braces@^1.8.2: + version "1.8.5" + resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7" + dependencies: + expand-range "^1.8.1" + preserve "^0.2.0" + repeat-element "^1.1.2" + +braces@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/braces/-/braces-2.3.0.tgz#a46941cb5fb492156b3d6a656e06c35364e3e66e" + dependencies: + arr-flatten "^1.1.0" + array-unique "^0.3.2" + define-property "^1.0.0" + extend-shallow "^2.0.1" + fill-range "^4.0.0" + isobject "^3.0.1" + repeat-element "^1.1.2" + snapdragon "^0.8.1" + snapdragon-node "^2.0.1" + split-string "^3.0.2" + to-regex "^3.0.1" + +brorand@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" + +browserify-aes@^1.0.0, browserify-aes@^1.0.4: + version "1.1.1" + resolved "https://registry.yarnpkg.com/browserify-aes/-/browserify-aes-1.1.1.tgz#38b7ab55edb806ff2dcda1a7f1620773a477c49f" + dependencies: + buffer-xor "^1.0.3" + cipher-base "^1.0.0" + create-hash "^1.1.0" + evp_bytestokey "^1.0.3" + inherits "^2.0.1" + safe-buffer "^5.0.1" + +browserify-cipher@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/browserify-cipher/-/browserify-cipher-1.0.0.tgz#9988244874bf5ed4e28da95666dcd66ac8fc363a" + dependencies: + browserify-aes "^1.0.4" + browserify-des "^1.0.0" + evp_bytestokey "^1.0.0" + +browserify-des@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/browserify-des/-/browserify-des-1.0.0.tgz#daa277717470922ed2fe18594118a175439721dd" + dependencies: + cipher-base "^1.0.1" + des.js "^1.0.0" + inherits "^2.0.1" + +browserify-rsa@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/browserify-rsa/-/browserify-rsa-4.0.1.tgz#21e0abfaf6f2029cf2fafb133567a701d4135524" + dependencies: + bn.js "^4.1.0" + randombytes "^2.0.1" + +browserify-sign@^4.0.0: + version "4.0.4" + resolved "https://registry.yarnpkg.com/browserify-sign/-/browserify-sign-4.0.4.tgz#aa4eb68e5d7b658baa6bf6a57e630cbd7a93d298" + dependencies: + bn.js "^4.1.1" + browserify-rsa "^4.0.0" + create-hash "^1.1.0" + create-hmac "^1.1.2" + elliptic "^6.0.0" + inherits "^2.0.1" + parse-asn1 "^5.0.0" + +browserify-zlib@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/browserify-zlib/-/browserify-zlib-0.2.0.tgz#2869459d9aa3be245fe8fe2ca1f46e2e7f54d73f" + dependencies: + pako "~1.0.5" + +browserslist@^1.3.6, browserslist@^1.5.2, browserslist@^1.7.6: + version "1.7.7" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-1.7.7.tgz#0bd76704258be829b2398bb50e4b62d1a166b0b9" + dependencies: + caniuse-db "^1.0.30000639" + electron-to-chromium "^1.2.7" + +buffer-indexof@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/buffer-indexof/-/buffer-indexof-1.1.1.tgz#52fabcc6a606d1a00302802648ef68f639da268c" + +buffer-xor@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/buffer-xor/-/buffer-xor-1.0.3.tgz#26e61ed1422fb70dd42e6e36729ed51d855fe8d9" + +buffer@^4.3.0: + version "4.9.1" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-4.9.1.tgz#6d1bb601b07a4efced97094132093027c95bc298" + dependencies: + base64-js "^1.0.2" + ieee754 "^1.1.4" + isarray "^1.0.0" + +builtin-modules@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f" + +builtin-status-codes@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8" + +bytes@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.0.0.tgz#d32815404d689699f85a4ea4fa8755dd13a96048" + +cache-base@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/cache-base/-/cache-base-1.0.1.tgz#0a7f46416831c8b662ee36fe4e7c59d76f666ab2" + dependencies: + collection-visit "^1.0.0" + component-emitter "^1.2.1" + get-value "^2.0.6" + has-value "^1.0.0" + isobject "^3.0.1" + set-value "^2.0.0" + to-object-path "^0.3.0" + union-value "^1.0.0" + unset-value "^1.0.0" + +callsite@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20" + +camel-case@3.0.x: + version "3.0.0" + resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-3.0.0.tgz#ca3c3688a4e9cf3a4cda777dc4dcbc713249cf73" + dependencies: + no-case "^2.2.0" + upper-case "^1.1.1" + +camelcase-keys@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/camelcase-keys/-/camelcase-keys-2.1.0.tgz#308beeaffdf28119051efa1d932213c91b8f92e7" + dependencies: + camelcase "^2.0.0" + map-obj "^1.0.0" + +camelcase@^1.0.2: + version "1.2.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39" + +camelcase@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-2.1.1.tgz#7c1d16d679a1bbe59ca02cacecfb011e201f5a1f" + +camelcase@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-3.0.0.tgz#32fc4b9fcdaf845fcdf7e73bb97cac2261f0ab0a" + +camelcase@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd" + +caniuse-api@^1.5.2: + version "1.6.1" + resolved "https://registry.yarnpkg.com/caniuse-api/-/caniuse-api-1.6.1.tgz#b534e7c734c4f81ec5fbe8aca2ad24354b962c6c" + dependencies: + browserslist "^1.3.6" + caniuse-db "^1.0.30000529" + lodash.memoize "^4.1.2" + lodash.uniq "^4.5.0" + +caniuse-db@^1.0.30000529, caniuse-db@^1.0.30000634, caniuse-db@^1.0.30000639: + version "1.0.30000782" + resolved "https://registry.yarnpkg.com/caniuse-db/-/caniuse-db-1.0.30000782.tgz#d8815bce1578c350aced1132507301205e0fab53" + +caseless@~0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7" + +caseless@~0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" + +center-align@^0.1.1: + version "0.1.3" + resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad" + dependencies: + align-text "^0.1.3" + lazy-cache "^1.0.3" + +chalk@^0.5.0: + version "0.5.1" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.5.1.tgz#663b3a648b68b55d04690d49167aa837858f2174" + dependencies: + ansi-styles "^1.1.0" + escape-string-regexp "^1.0.0" + has-ansi "^0.1.0" + strip-ansi "^0.3.0" + supports-color "^0.2.0" + +chalk@^1.0.0, chalk@^1.1.1, chalk@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" + dependencies: + ansi-styles "^2.2.1" + escape-string-regexp "^1.0.2" + has-ansi "^2.0.0" + strip-ansi "^3.0.0" + supports-color "^2.0.0" + +chalk@^2.0.0, chalk@^2.0.1, chalk@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.3.0.tgz#b5ea48efc9c1793dccc9b4767c93914d3f2d52ba" + dependencies: + ansi-styles "^3.1.0" + escape-string-regexp "^1.0.5" + supports-color "^4.0.0" + +charenc@~0.0.1: + version "0.0.2" + resolved "https://registry.yarnpkg.com/charenc/-/charenc-0.0.2.tgz#c0a1d2f3a7092e03774bfa83f14c0fc5790a8667" + +chokidar@^1.4.1, chokidar@^1.6.0, chokidar@^1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-1.7.0.tgz#798e689778151c8076b4b360e5edd28cda2bb468" + dependencies: + anymatch "^1.3.0" + async-each "^1.0.0" + glob-parent "^2.0.0" + inherits "^2.0.1" + is-binary-path "^1.0.0" + is-glob "^2.0.0" + path-is-absolute "^1.0.0" + readdirp "^2.0.0" + optionalDependencies: + fsevents "^1.0.0" + +cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de" + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +circular-dependency-plugin@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/circular-dependency-plugin/-/circular-dependency-plugin-3.0.0.tgz#9b68692e35b0e3510998d0164b6ae5011bea5760" + +clap@^1.0.9: + version "1.2.3" + resolved "https://registry.yarnpkg.com/clap/-/clap-1.2.3.tgz#4f36745b32008492557f46412d66d50cb99bce51" + dependencies: + chalk "^1.1.3" + +class-utils@^0.3.5: + version "0.3.5" + resolved "https://registry.yarnpkg.com/class-utils/-/class-utils-0.3.5.tgz#17e793103750f9627b2176ea34cfd1b565903c80" + dependencies: + arr-union "^3.1.0" + define-property "^0.2.5" + isobject "^3.0.0" + lazy-cache "^2.0.2" + static-extend "^0.1.1" + +clean-css@4.1.x: + version "4.1.9" + resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-4.1.9.tgz#35cee8ae7687a49b98034f70de00c4edd3826301" + dependencies: + source-map "0.5.x" + +cliui@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1" + dependencies: + center-align "^0.1.1" + right-align "^0.1.1" + wordwrap "0.0.2" + +cliui@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-3.2.0.tgz#120601537a916d29940f934da3b48d585a39213d" + dependencies: + string-width "^1.0.1" + strip-ansi "^3.0.1" + wrap-ansi "^2.0.0" + +clone-deep@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-0.3.0.tgz#348c61ae9cdbe0edfe053d91ff4cc521d790ede8" + dependencies: + for-own "^1.0.0" + is-plain-object "^2.0.1" + kind-of "^3.2.2" + shallow-clone "^0.1.2" + +clone-stats@^0.0.1, clone-stats@~0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/clone-stats/-/clone-stats-0.0.1.tgz#b88f94a82cf38b8791d58046ea4029ad88ca99d1" + +clone@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/clone/-/clone-0.2.0.tgz#c6126a90ad4f72dbf5acdb243cc37724fe93fc1f" + +clone@^1.0.0, clone@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.3.tgz#298d7e2231660f40c003c2ed3140decf3f53085f" + +clone@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.1.tgz#d217d1e961118e3ac9a4b8bba3285553bf647cdb" + +co@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" + +coa@~1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/coa/-/coa-1.0.4.tgz#a9ef153660d6a86a8bdec0289a5c684d217432fd" + dependencies: + q "^1.1.2" + +code-point-at@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" + +codelyzer@~3.2.0: + version "3.2.2" + resolved "https://registry.yarnpkg.com/codelyzer/-/codelyzer-3.2.2.tgz#abbd4e5956c435677740846e5858c915f89679c3" + dependencies: + app-root-path "^2.0.1" + css-selector-tokenizer "^0.7.0" + cssauron "^1.4.0" + semver-dsl "^1.0.1" + source-map "^0.5.6" + sprintf-js "^1.0.3" + +collection-visit@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/collection-visit/-/collection-visit-1.0.0.tgz#4bc0373c164bc3291b4d368c829cf1a80a59dca0" + dependencies: + map-visit "^1.0.0" + object-visit "^1.0.0" + +color-convert@^1.3.0, color-convert@^1.9.0: + version "1.9.1" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.1.tgz#c1261107aeb2f294ebffec9ed9ecad529a6097ed" + dependencies: + color-name "^1.1.1" + +color-name@^1.0.0, color-name@^1.1.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + +color-string@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/color-string/-/color-string-0.3.0.tgz#27d46fb67025c5c2fa25993bfbf579e47841b991" + dependencies: + color-name "^1.0.0" + +color-support@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-support/-/color-support-1.1.3.tgz#93834379a1cc9a0c61f82f52f0d04322251bd5a2" + +color@^0.11.0: + version "0.11.4" + resolved "https://registry.yarnpkg.com/color/-/color-0.11.4.tgz#6d7b5c74fb65e841cd48792ad1ed5e07b904d764" + dependencies: + clone "^1.0.2" + color-convert "^1.3.0" + color-string "^0.3.0" + +colormin@^1.0.5: + version "1.1.2" + resolved "https://registry.yarnpkg.com/colormin/-/colormin-1.1.2.tgz#ea2f7420a72b96881a38aae59ec124a6f7298133" + dependencies: + color "^0.11.0" + css-color-names "0.0.4" + has "^1.0.1" + +colors@1.1.2, colors@^1.1.0, colors@^1.1.2, colors@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/colors/-/colors-1.1.2.tgz#168a4701756b6a7f51a12ce0c97bfa28c084ed63" + +combine-lists@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/combine-lists/-/combine-lists-1.0.1.tgz#458c07e09e0d900fc28b70a3fec2dacd1d2cb7f6" + dependencies: + lodash "^4.5.0" + +combined-stream@^1.0.5, combined-stream@~1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009" + dependencies: + delayed-stream "~1.0.0" + +commander@2.12.x, commander@^2.9.0, commander@~2.12.1: + version "2.12.2" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.12.2.tgz#0f5946c427ed9ec0d91a46bb9def53e54650e555" + +common-tags@^1.3.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/common-tags/-/common-tags-1.5.1.tgz#e2e39931a013cd02253defeed89a1ad615a27f07" + dependencies: + babel-runtime "^6.26.0" + +component-bind@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1" + +component-emitter@1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3" + +component-emitter@1.2.1, component-emitter@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6" + +component-inherit@0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143" + +compressible@~2.0.11: + version "2.0.12" + resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.12.tgz#c59a5c99db76767e9876500e271ef63b3493bd66" + dependencies: + mime-db ">= 1.30.0 < 2" + +compression@^1.5.2: + version "1.7.1" + resolved "https://registry.yarnpkg.com/compression/-/compression-1.7.1.tgz#eff2603efc2e22cf86f35d2eb93589f9875373db" + dependencies: + accepts "~1.3.4" + bytes "3.0.0" + compressible "~2.0.11" + debug "2.6.9" + on-headers "~1.0.1" + safe-buffer "5.1.1" + vary "~1.1.2" + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + +connect-history-api-fallback@^1.3.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/connect-history-api-fallback/-/connect-history-api-fallback-1.5.0.tgz#b06873934bc5e344fef611a196a6faae0aee015a" + +connect@^3.6.0: + version "3.6.5" + resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.5.tgz#fb8dde7ba0763877d0ec9df9dac0b4b40e72c7da" + dependencies: + debug "2.6.9" + finalhandler "1.0.6" + parseurl "~1.3.2" + utils-merge "1.0.1" + +console-browserify@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10" + dependencies: + date-now "^0.1.4" + +console-control-strings@^1.0.0, console-control-strings@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" + +constants-browserify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/constants-browserify/-/constants-browserify-1.0.0.tgz#c20b96d8c617748aaf1c16021760cd27fcb8cb75" + +content-disposition@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4" + +content-type@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b" + +convert-source-map@^1.3.0: + version "1.5.1" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.1.tgz#b8278097b9bc229365de5c62cf5fcaed8b5599e5" + +cookie-signature@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" + +cookie@0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb" + +copy-descriptor@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d" + +copy-webpack-plugin@^4.1.1: + version "4.2.3" + resolved "https://registry.yarnpkg.com/copy-webpack-plugin/-/copy-webpack-plugin-4.2.3.tgz#4a3c61089f3b635777f0f0af346c338b39d63755" + dependencies: + bluebird "^3.5.1" + glob "^7.1.2" + is-glob "^4.0.0" + loader-utils "^0.2.15" + lodash "^4.3.0" + minimatch "^3.0.4" + +core-js@^2.2.0, core-js@^2.4.0, core-js@^2.4.1: + version "2.5.3" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.5.3.tgz#8acc38345824f16d8365b7c9b4259168e8ed603e" + +core-object@^3.1.0: + version "3.1.5" + resolved "https://registry.yarnpkg.com/core-object/-/core-object-3.1.5.tgz#fa627b87502adc98045e44678e9a8ec3b9c0d2a9" + dependencies: + chalk "^2.0.0" + +core-util-is@1.0.2, core-util-is@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + +cosmiconfig@^2.1.0, cosmiconfig@^2.1.1: + version "2.2.2" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-2.2.2.tgz#6173cebd56fac042c1f4390edf7af6c07c7cb892" + dependencies: + is-directory "^0.3.1" + js-yaml "^3.4.3" + minimist "^1.2.0" + object-assign "^4.1.0" + os-homedir "^1.0.1" + parse-json "^2.2.0" + require-from-string "^1.1.0" + +create-ecdh@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/create-ecdh/-/create-ecdh-4.0.0.tgz#888c723596cdf7612f6498233eebd7a35301737d" + dependencies: + bn.js "^4.1.0" + elliptic "^6.0.0" + +create-hash@^1.1.0, create-hash@^1.1.2: + version "1.1.3" + resolved "https://registry.yarnpkg.com/create-hash/-/create-hash-1.1.3.tgz#606042ac8b9262750f483caddab0f5819172d8fd" + dependencies: + cipher-base "^1.0.1" + inherits "^2.0.1" + ripemd160 "^2.0.0" + sha.js "^2.4.0" + +create-hmac@^1.1.0, create-hmac@^1.1.2, create-hmac@^1.1.4: + version "1.1.6" + resolved "https://registry.yarnpkg.com/create-hmac/-/create-hmac-1.1.6.tgz#acb9e221a4e17bdb076e90657c42b93e3726cf06" + dependencies: + cipher-base "^1.0.3" + create-hash "^1.1.0" + inherits "^2.0.1" + ripemd160 "^2.0.0" + safe-buffer "^5.0.1" + sha.js "^2.4.8" + +cross-spawn@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-3.0.1.tgz#1256037ecb9f0c5f79e3d6ef135e30770184b982" + dependencies: + lru-cache "^4.0.1" + which "^1.2.9" + +cross-spawn@^5.0.1: + version "5.1.0" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449" + dependencies: + lru-cache "^4.0.1" + shebang-command "^1.2.0" + which "^1.2.9" + +crypt@~0.0.1: + version "0.0.2" + resolved "https://registry.yarnpkg.com/crypt/-/crypt-0.0.2.tgz#88d7ff7ec0dfb86f713dc87bbb42d044d3e6c41b" + +cryptiles@2.x.x: + version "2.0.5" + resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8" + dependencies: + boom "2.x.x" + +cryptiles@3.x.x: + version "3.1.2" + resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-3.1.2.tgz#a89fbb220f5ce25ec56e8c4aa8a4fd7b5b0d29fe" + dependencies: + boom "5.x.x" + +crypto-browserify@^3.11.0: + version "3.12.0" + resolved "https://registry.yarnpkg.com/crypto-browserify/-/crypto-browserify-3.12.0.tgz#396cf9f3137f03e4b8e532c58f698254e00f80ec" + dependencies: + browserify-cipher "^1.0.0" + browserify-sign "^4.0.0" + create-ecdh "^4.0.0" + create-hash "^1.1.0" + create-hmac "^1.1.0" + diffie-hellman "^5.0.0" + inherits "^2.0.1" + pbkdf2 "^3.0.3" + public-encrypt "^4.0.0" + randombytes "^2.0.0" + randomfill "^1.0.3" + +css-color-names@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/css-color-names/-/css-color-names-0.0.4.tgz#808adc2e79cf84738069b646cb20ec27beb629e0" + +css-loader@^0.28.1: + version "0.28.7" + resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-0.28.7.tgz#5f2ee989dd32edd907717f953317656160999c1b" + dependencies: + babel-code-frame "^6.11.0" + css-selector-tokenizer "^0.7.0" + cssnano ">=2.6.1 <4" + icss-utils "^2.1.0" + loader-utils "^1.0.2" + lodash.camelcase "^4.3.0" + object-assign "^4.0.1" + postcss "^5.0.6" + postcss-modules-extract-imports "^1.0.0" + postcss-modules-local-by-default "^1.0.1" + postcss-modules-scope "^1.0.0" + postcss-modules-values "^1.1.0" + postcss-value-parser "^3.3.0" + source-list-map "^2.0.0" + +css-parse@1.7.x: + version "1.7.0" + resolved "https://registry.yarnpkg.com/css-parse/-/css-parse-1.7.0.tgz#321f6cf73782a6ff751111390fc05e2c657d8c9b" + +css-select@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-1.2.0.tgz#2b3a110539c5355f1cd8d314623e870b121ec858" + dependencies: + boolbase "~1.0.0" + css-what "2.1" + domutils "1.5.1" + nth-check "~1.0.1" + +css-selector-tokenizer@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/css-selector-tokenizer/-/css-selector-tokenizer-0.7.0.tgz#e6988474ae8c953477bf5e7efecfceccd9cf4c86" + dependencies: + cssesc "^0.1.0" + fastparse "^1.1.1" + regexpu-core "^1.0.0" + +css-what@2.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/css-what/-/css-what-2.1.0.tgz#9467d032c38cfaefb9f2d79501253062f87fa1bd" + +cssauron@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/cssauron/-/cssauron-1.4.0.tgz#a6602dff7e04a8306dc0db9a551e92e8b5662ad8" + dependencies: + through X.X.X + +cssesc@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-0.1.0.tgz#c814903e45623371a0477b40109aaafbeeaddbb4" + +"cssnano@>=2.6.1 <4", cssnano@^3.10.0: + version "3.10.0" + resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-3.10.0.tgz#4f38f6cea2b9b17fa01490f23f1dc68ea65c1c38" + dependencies: + autoprefixer "^6.3.1" + decamelize "^1.1.2" + defined "^1.0.0" + has "^1.0.1" + object-assign "^4.0.1" + postcss "^5.0.14" + postcss-calc "^5.2.0" + postcss-colormin "^2.1.8" + postcss-convert-values "^2.3.4" + postcss-discard-comments "^2.0.4" + postcss-discard-duplicates "^2.0.1" + postcss-discard-empty "^2.0.1" + postcss-discard-overridden "^0.1.1" + postcss-discard-unused "^2.2.1" + postcss-filter-plugins "^2.0.0" + postcss-merge-idents "^2.1.5" + postcss-merge-longhand "^2.0.1" + postcss-merge-rules "^2.0.3" + postcss-minify-font-values "^1.0.2" + postcss-minify-gradients "^1.0.1" + postcss-minify-params "^1.0.4" + postcss-minify-selectors "^2.0.4" + postcss-normalize-charset "^1.1.0" + postcss-normalize-url "^3.0.7" + postcss-ordered-values "^2.1.0" + postcss-reduce-idents "^2.2.2" + postcss-reduce-initial "^1.0.0" + postcss-reduce-transforms "^1.0.3" + postcss-svgo "^2.1.1" + postcss-unique-selectors "^2.0.2" + postcss-value-parser "^3.2.3" + postcss-zindex "^2.0.1" + +csso@~2.3.1: + version "2.3.2" + resolved "https://registry.yarnpkg.com/csso/-/csso-2.3.2.tgz#ddd52c587033f49e94b71fc55569f252e8ff5f85" + dependencies: + clap "^1.0.9" + source-map "^0.5.3" + +currently-unhandled@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/currently-unhandled/-/currently-unhandled-0.4.1.tgz#988df33feab191ef799a61369dd76c17adf957ea" + dependencies: + array-find-index "^1.0.1" + +custom-event@~1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/custom-event/-/custom-event-1.0.1.tgz#5d02a46850adf1b4a317946a3928fccb5bfd0425" + +d@1: + version "1.0.0" + resolved "https://registry.yarnpkg.com/d/-/d-1.0.0.tgz#754bb5bfe55451da69a58b94d45f4c5b0462d58f" + dependencies: + es5-ext "^0.10.9" + +dashdash@^1.12.0: + version "1.14.1" + resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" + dependencies: + assert-plus "^1.0.0" + +date-now@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b" + +dateformat@^1.0.7-1.2.3: + version "1.0.12" + resolved "https://registry.yarnpkg.com/dateformat/-/dateformat-1.0.12.tgz#9f124b67594c937ff706932e4a642cca8dbbfee9" + dependencies: + get-stdin "^4.0.1" + meow "^3.3.0" + +dateformat@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/dateformat/-/dateformat-2.2.0.tgz#4065e2013cf9fb916ddfd82efb506ad4c6769062" + +debug@*, debug@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.1.0.tgz#5bb5a0672628b64149566ba16819e61518c67261" + dependencies: + ms "2.0.0" + +debug@2, debug@2.6.9, debug@^2.2.0, debug@^2.3.3, debug@^2.6.6, debug@^2.6.8: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + dependencies: + ms "2.0.0" + +debug@2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da" + dependencies: + ms "0.7.1" + +debug@2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.3.3.tgz#40c453e67e6e13c901ddec317af8986cda9eff8c" + dependencies: + ms "0.7.2" + +decamelize@^1.0.0, decamelize@^1.1.1, decamelize@^1.1.2: + version "1.2.0" + resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" + +decode-uri-component@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" + +deep-equal@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.0.1.tgz#f5d260292b660e084eff4cdbc9f08ad3247448b5" + +deep-extend@~0.4.0: + version "0.4.2" + resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.4.2.tgz#48b699c27e334bf89f10892be432f6e4c7d34a7f" + +default-require-extensions@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/default-require-extensions/-/default-require-extensions-1.0.0.tgz#f37ea15d3e13ffd9b437d33e1a75b5fb97874cb8" + dependencies: + strip-bom "^2.0.0" + +defaults@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d" + dependencies: + clone "^1.0.2" + +define-property@^0.2.5: + version "0.2.5" + resolved "https://registry.yarnpkg.com/define-property/-/define-property-0.2.5.tgz#c35b1ef918ec3c990f9a5bc57be04aacec5c8116" + dependencies: + is-descriptor "^0.1.0" + +define-property@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/define-property/-/define-property-1.0.0.tgz#769ebaaf3f4a63aad3af9e8d304c9bbe79bfb0e6" + dependencies: + is-descriptor "^1.0.0" + +defined@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693" + +del@^2.2.0: + version "2.2.2" + resolved "https://registry.yarnpkg.com/del/-/del-2.2.2.tgz#c12c981d067846c84bcaf862cff930d907ffd1a8" + dependencies: + globby "^5.0.0" + is-path-cwd "^1.0.0" + is-path-in-cwd "^1.0.0" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + rimraf "^2.2.8" + +del@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/del/-/del-3.0.0.tgz#53ecf699ffcbcb39637691ab13baf160819766e5" + dependencies: + globby "^6.1.0" + is-path-cwd "^1.0.0" + is-path-in-cwd "^1.0.0" + p-map "^1.1.1" + pify "^3.0.0" + rimraf "^2.2.8" + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + +delegates@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" + +denodeify@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/denodeify/-/denodeify-1.2.1.tgz#3a36287f5034e699e7577901052c2e6c94251631" + +depd@1.1.1, depd@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.1.tgz#5783b4e1c459f06fa5ca27f991f3d06e7a310359" + +deprecated@^0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/deprecated/-/deprecated-0.0.1.tgz#f9c9af5464afa1e7a971458a8bdef2aa94d5bb19" + +des.js@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/des.js/-/des.js-1.0.0.tgz#c074d2e2aa6a8a9a07dbd61f9a15c2cd83ec8ecc" + dependencies: + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + +destroy@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80" + +detect-file@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/detect-file/-/detect-file-1.0.0.tgz#f0d66d03672a825cb1b73bdb3fe62310c8e552b7" + +detect-indent@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-4.0.0.tgz#f76d064352cdf43a1cb6ce619c4ee3a9475de208" + dependencies: + repeating "^2.0.0" + +detect-libc@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b" + +detect-node@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/detect-node/-/detect-node-2.0.3.tgz#a2033c09cc8e158d37748fbde7507832bd6ce127" + +di@^0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/di/-/di-0.0.1.tgz#806649326ceaa7caa3306d75d985ea2748ba913c" + +diff@^3.1.0, diff@^3.2.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/diff/-/diff-3.4.0.tgz#b1d85507daf3964828de54b37d0d73ba67dda56c" + +diffie-hellman@^5.0.0: + version "5.0.2" + resolved "https://registry.yarnpkg.com/diffie-hellman/-/diffie-hellman-5.0.2.tgz#b5835739270cfe26acf632099fded2a07f209e5e" + dependencies: + bn.js "^4.1.0" + miller-rabin "^4.0.0" + randombytes "^2.0.0" + +directory-encoder@^0.7.2: + version "0.7.2" + resolved "https://registry.yarnpkg.com/directory-encoder/-/directory-encoder-0.7.2.tgz#59b4e2aa4f25422f6c63b527b462f5e2d0dd2c58" + dependencies: + fs-extra "^0.23.1" + handlebars "^1.3.0" + img-stats "^0.5.2" + +dns-equal@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/dns-equal/-/dns-equal-1.0.0.tgz#b39e7f1da6eb0a75ba9c17324b34753c47e0654d" + +dns-packet@^1.0.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-1.2.2.tgz#a8a26bec7646438963fc86e06f8f8b16d6c8bf7a" + dependencies: + ip "^1.1.0" + safe-buffer "^5.0.1" + +dns-txt@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/dns-txt/-/dns-txt-2.0.2.tgz#b91d806f5d27188e4ab3e7d107d881a1cc4642b6" + dependencies: + buffer-indexof "^1.0.0" + +dom-converter@~0.1: + version "0.1.4" + resolved "https://registry.yarnpkg.com/dom-converter/-/dom-converter-0.1.4.tgz#a45ef5727b890c9bffe6d7c876e7b19cb0e17f3b" + dependencies: + utila "~0.3" + +dom-serialize@^2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/dom-serialize/-/dom-serialize-2.2.1.tgz#562ae8999f44be5ea3076f5419dcd59eb43ac95b" + dependencies: + custom-event "~1.0.0" + ent "~2.2.0" + extend "^3.0.0" + void-elements "^2.0.0" + +dom-serializer@0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.0.tgz#073c697546ce0780ce23be4a28e293e40bc30c82" + dependencies: + domelementtype "~1.1.1" + entities "~1.1.1" + +domain-browser@^1.1.1: + version "1.1.7" + resolved "https://registry.yarnpkg.com/domain-browser/-/domain-browser-1.1.7.tgz#867aa4b093faa05f1de08c06f4d7b21fdf8698bc" + +domelementtype@1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2" + +domelementtype@~1.1.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b" + +domhandler@2.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.1.0.tgz#d2646f5e57f6c3bab11cf6cb05d3c0acf7412594" + dependencies: + domelementtype "1" + +domutils@1.1: + version "1.1.6" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.1.6.tgz#bddc3de099b9a2efacc51c623f28f416ecc57485" + dependencies: + domelementtype "1" + +domutils@1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf" + dependencies: + dom-serializer "0" + domelementtype "1" + +duplexer2@0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/duplexer2/-/duplexer2-0.0.2.tgz#c614dcf67e2fb14995a91711e5a617e8a60a31db" + dependencies: + readable-stream "~1.1.9" + +ecc-jsbn@~0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505" + dependencies: + jsbn "~0.1.0" + +ee-first@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" + +ejs@^2.5.7: + version "2.5.7" + resolved "https://registry.yarnpkg.com/ejs/-/ejs-2.5.7.tgz#cc872c168880ae3c7189762fd5ffc00896c9518a" + +electron-to-chromium@^1.2.7: + version "1.3.28" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.28.tgz#8dd4e6458086644e9f9f0a1cf32e2a1f9dffd9ee" + +elliptic@^6.0.0: + version "6.4.0" + resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.4.0.tgz#cac9af8762c85836187003c8dfe193e5e2eae5df" + dependencies: + bn.js "^4.4.0" + brorand "^1.0.1" + hash.js "^1.0.0" + hmac-drbg "^1.0.0" + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + minimalistic-crypto-utils "^1.0.0" + +ember-cli-string-utils@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/ember-cli-string-utils/-/ember-cli-string-utils-1.1.0.tgz#39b677fc2805f55173735376fcef278eaa4452a1" + +emojis-list@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-2.1.0.tgz#4daa4d9db00f9819880c79fa457ae5b09a1fd389" + +encodeurl@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20" + +end-of-stream@~0.1.5: + version "0.1.5" + resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-0.1.5.tgz#8e177206c3c80837d85632e8b9359dfe8b2f6eaf" + dependencies: + once "~1.3.0" + +engine.io-client@1.8.3: + version "1.8.3" + resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-1.8.3.tgz#1798ed93451246453d4c6f635d7a201fe940d5ab" + dependencies: + component-emitter "1.2.1" + component-inherit "0.0.3" + debug "2.3.3" + engine.io-parser "1.3.2" + has-cors "1.1.0" + indexof "0.0.1" + parsejson "0.0.3" + parseqs "0.0.5" + parseuri "0.0.5" + ws "1.1.2" + xmlhttprequest-ssl "1.5.3" + yeast "0.1.2" + +engine.io-parser@1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.3.2.tgz#937b079f0007d0893ec56d46cb220b8cb435220a" + dependencies: + after "0.8.2" + arraybuffer.slice "0.0.6" + base64-arraybuffer "0.1.5" + blob "0.0.4" + has-binary "0.1.7" + wtf-8 "1.0.0" + +engine.io@1.8.3: + version "1.8.3" + resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-1.8.3.tgz#8de7f97895d20d39b85f88eeee777b2bd42b13d4" + dependencies: + accepts "1.3.3" + base64id "1.0.0" + cookie "0.3.1" + debug "2.3.3" + engine.io-parser "1.3.2" + ws "1.1.2" + +enhanced-resolve@^3.1.0, enhanced-resolve@^3.4.0: + version "3.4.1" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-3.4.1.tgz#0421e339fd71419b3da13d129b3979040230476e" + dependencies: + graceful-fs "^4.1.2" + memory-fs "^0.4.0" + object-assign "^4.0.1" + tapable "^0.2.7" + +ent@~2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/ent/-/ent-2.2.0.tgz#e964219325a21d05f44466a2f686ed6ce5f5dd1d" + +entities@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.1.tgz#6e5c2d0a5621b5dadaecef80b90edfb5cd7772f0" + +errno@^0.1.1, errno@^0.1.3: + version "0.1.5" + resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.5.tgz#a563781a6052bc2c9ccd89e8cef0eb9506e0c321" + dependencies: + prr "~1.0.1" + +error-ex@^1.2.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.1.tgz#f855a86ce61adc4e8621c3cda21e7a7612c3a8dc" + dependencies: + is-arrayish "^0.2.1" + +es5-ext@^0.10.14, es5-ext@^0.10.35, es5-ext@^0.10.9, es5-ext@~0.10.14: + version "0.10.37" + resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.37.tgz#0ee741d148b80069ba27d020393756af257defc3" + dependencies: + es6-iterator "~2.0.1" + es6-symbol "~3.1.1" + +es6-iterator@^2.0.1, es6-iterator@~2.0.1: + version "2.0.3" + resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.3.tgz#a7de889141a05a94b0854403b2d0a0fbfa98f3b7" + dependencies: + d "1" + es5-ext "^0.10.35" + es6-symbol "^3.1.1" + +es6-map@^0.1.3: + version "0.1.5" + resolved "https://registry.yarnpkg.com/es6-map/-/es6-map-0.1.5.tgz#9136e0503dcc06a301690f0bb14ff4e364e949f0" + dependencies: + d "1" + es5-ext "~0.10.14" + es6-iterator "~2.0.1" + es6-set "~0.1.5" + es6-symbol "~3.1.1" + event-emitter "~0.3.5" + +es6-set@~0.1.5: + version "0.1.5" + resolved "https://registry.yarnpkg.com/es6-set/-/es6-set-0.1.5.tgz#d2b3ec5d4d800ced818db538d28974db0a73ccb1" + dependencies: + d "1" + es5-ext "~0.10.14" + es6-iterator "~2.0.1" + es6-symbol "3.1.1" + event-emitter "~0.3.5" + +es6-symbol@3.1.1, es6-symbol@^3.1.1, es6-symbol@~3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.1.tgz#bf00ef4fdab6ba1b46ecb7b629b4c7ed5715cc77" + dependencies: + d "1" + es5-ext "~0.10.14" + +es6-weak-map@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/es6-weak-map/-/es6-weak-map-2.0.2.tgz#5e3ab32251ffd1538a1f8e5ffa1357772f92d96f" + dependencies: + d "1" + es5-ext "^0.10.14" + es6-iterator "^2.0.1" + es6-symbol "^3.1.1" + +escape-html@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" + +escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + +escope@^3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/escope/-/escope-3.6.0.tgz#e01975e812781a163a6dadfdd80398dc64c889c3" + dependencies: + es6-map "^0.1.3" + es6-weak-map "^2.0.1" + esrecurse "^4.1.0" + estraverse "^4.1.1" + +esprima@^2.6.0: + version "2.7.3" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581" + +esprima@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.0.tgz#4499eddcd1110e0b218bacf2fa7f7f59f55ca804" + +esrecurse@^4.1.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.2.0.tgz#fa9568d98d3823f9a41d91e902dcab9ea6e5b163" + dependencies: + estraverse "^4.1.0" + object-assign "^4.0.1" + +estraverse@^4.1.0, estraverse@^4.1.1: + version "4.2.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.2.0.tgz#0dee3fed31fcd469618ce7342099fc1afa0bdb13" + +esutils@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b" + +etag@~1.8.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" + +event-emitter@~0.3.5: + version "0.3.5" + resolved "https://registry.yarnpkg.com/event-emitter/-/event-emitter-0.3.5.tgz#df8c69eef1647923c7157b9ce83840610b02cc39" + dependencies: + d "1" + es5-ext "~0.10.14" + +eventemitter3@1.x.x: + version "1.2.0" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508" + +events@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/events/-/events-1.1.1.tgz#9ebdb7635ad099c70dcc4c2a1f5004288e8bd924" + +eventsource@0.1.6: + version "0.1.6" + resolved "https://registry.yarnpkg.com/eventsource/-/eventsource-0.1.6.tgz#0acede849ed7dd1ccc32c811bb11b944d4f29232" + dependencies: + original ">=0.0.5" + +evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz#7fcbdb198dc71959432efe13842684e0525acb02" + dependencies: + md5.js "^1.3.4" + safe-buffer "^5.1.1" + +execa@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-0.7.0.tgz#944becd34cc41ee32a63a9faf27ad5a65fc59777" + dependencies: + cross-spawn "^5.0.1" + get-stream "^3.0.0" + is-stream "^1.1.0" + npm-run-path "^2.0.0" + p-finally "^1.0.0" + signal-exit "^3.0.0" + strip-eof "^1.0.0" + +exit@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c" + +expand-braces@^0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/expand-braces/-/expand-braces-0.1.2.tgz#488b1d1d2451cb3d3a6b192cfc030f44c5855fea" + dependencies: + array-slice "^0.2.3" + array-unique "^0.2.1" + braces "^0.1.2" + +expand-brackets@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b" + dependencies: + is-posix-bracket "^0.1.0" + +expand-brackets@^2.1.4: + version "2.1.4" + resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-2.1.4.tgz#b77735e315ce30f6b6eff0f83b04151a22449622" + dependencies: + debug "^2.3.3" + define-property "^0.2.5" + extend-shallow "^2.0.1" + posix-character-classes "^0.1.0" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +expand-range@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-0.1.1.tgz#4cb8eda0993ca56fa4f41fc42f3cbb4ccadff044" + dependencies: + is-number "^0.1.1" + repeat-string "^0.2.2" + +expand-range@^1.8.1: + version "1.8.2" + resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337" + dependencies: + fill-range "^2.1.0" + +expand-tilde@^2.0.0, expand-tilde@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/expand-tilde/-/expand-tilde-2.0.2.tgz#97e801aa052df02454de46b02bf621642cdc8502" + dependencies: + homedir-polyfill "^1.0.1" + +exports-loader@^0.6.3: + version "0.6.4" + resolved "https://registry.yarnpkg.com/exports-loader/-/exports-loader-0.6.4.tgz#d70fc6121975b35fc12830cf52754be2740fc886" + dependencies: + loader-utils "^1.0.2" + source-map "0.5.x" + +express@^4.13.3: + version "4.16.2" + resolved "https://registry.yarnpkg.com/express/-/express-4.16.2.tgz#e35c6dfe2d64b7dca0a5cd4f21781be3299e076c" + dependencies: + accepts "~1.3.4" + array-flatten "1.1.1" + body-parser "1.18.2" + content-disposition "0.5.2" + content-type "~1.0.4" + cookie "0.3.1" + cookie-signature "1.0.6" + debug "2.6.9" + depd "~1.1.1" + encodeurl "~1.0.1" + escape-html "~1.0.3" + etag "~1.8.1" + finalhandler "1.1.0" + fresh "0.5.2" + merge-descriptors "1.0.1" + methods "~1.1.2" + on-finished "~2.3.0" + parseurl "~1.3.2" + path-to-regexp "0.1.7" + proxy-addr "~2.0.2" + qs "6.5.1" + range-parser "~1.2.0" + safe-buffer "5.1.1" + send "0.16.1" + serve-static "1.13.1" + setprototypeof "1.1.0" + statuses "~1.3.1" + type-is "~1.6.15" + utils-merge "1.0.1" + vary "~1.1.2" + +extend-shallow@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f" + dependencies: + is-extendable "^0.1.0" + +extend-shallow@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-3.0.1.tgz#4b6d8c49b147fee029dc9eb9484adb770f689844" + dependencies: + is-extendable "^1.0.1" + +extend@3, extend@^3.0.0, extend@~3.0.0, extend@~3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444" + +extglob@^0.3.1: + version "0.3.2" + resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1" + dependencies: + is-extglob "^1.0.0" + +extglob@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/extglob/-/extglob-2.0.2.tgz#3290f46208db1b2e8eb8be0c94ed9e6ad80edbe2" + dependencies: + array-unique "^0.3.2" + define-property "^1.0.0" + expand-brackets "^2.1.4" + extend-shallow "^2.0.1" + fragment-cache "^0.2.1" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +extract-text-webpack-plugin@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/extract-text-webpack-plugin/-/extract-text-webpack-plugin-3.0.0.tgz#90caa7907bc449f335005e3ac7532b41b00de612" + dependencies: + async "^2.4.1" + loader-utils "^1.1.0" + schema-utils "^0.3.0" + webpack-sources "^1.0.1" + +extsprintf@1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" + +extsprintf@^1.2.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f" + +fancy-log@^1.1.0: + version "1.3.2" + resolved "https://registry.yarnpkg.com/fancy-log/-/fancy-log-1.3.2.tgz#f41125e3d84f2e7d89a43d06d958c8f78be16be1" + dependencies: + ansi-gray "^0.1.1" + color-support "^1.1.3" + time-stamp "^1.0.0" + +fast-deep-equal@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-1.0.0.tgz#96256a3bc975595eb36d82e9929d060d893439ff" + +fast-json-stable-stringify@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2" + +fastparse@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/fastparse/-/fastparse-1.1.1.tgz#d1e2643b38a94d7583b479060e6c4affc94071f8" + +faye-websocket@^0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4" + dependencies: + websocket-driver ">=0.5.1" + +faye-websocket@~0.11.0: + version "0.11.1" + resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.11.1.tgz#f0efe18c4f56e4f40afc7e06c719fd5ee6188f38" + dependencies: + websocket-driver ">=0.5.1" + +file-loader@^1.1.5: + version "1.1.5" + resolved "https://registry.yarnpkg.com/file-loader/-/file-loader-1.1.5.tgz#91c25b6b6fbe56dae99f10a425fd64933b5c9daa" + dependencies: + loader-utils "^1.0.2" + schema-utils "^0.3.0" + +filename-regex@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26" + +fileset@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/fileset/-/fileset-2.0.3.tgz#8e7548a96d3cc2327ee5e674168723a333bba2a0" + dependencies: + glob "^7.0.3" + minimatch "^3.0.3" + +fill-range@^2.1.0: + version "2.2.3" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723" + dependencies: + is-number "^2.1.0" + isobject "^2.0.0" + randomatic "^1.1.3" + repeat-element "^1.1.2" + repeat-string "^1.5.2" + +fill-range@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-4.0.0.tgz#d544811d428f98eb06a63dc402d2403c328c38f7" + dependencies: + extend-shallow "^2.0.1" + is-number "^3.0.0" + repeat-string "^1.6.1" + to-regex-range "^2.1.0" + +finalhandler@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.6.tgz#007aea33d1a4d3e42017f624848ad58d212f814f" + dependencies: + debug "2.6.9" + encodeurl "~1.0.1" + escape-html "~1.0.3" + on-finished "~2.3.0" + parseurl "~1.3.2" + statuses "~1.3.1" + unpipe "~1.0.0" + +finalhandler@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.1.0.tgz#ce0b6855b45853e791b2fcc680046d88253dd7f5" + dependencies: + debug "2.6.9" + encodeurl "~1.0.1" + escape-html "~1.0.3" + on-finished "~2.3.0" + parseurl "~1.3.2" + statuses "~1.3.1" + unpipe "~1.0.0" + +find-index@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/find-index/-/find-index-0.1.1.tgz#675d358b2ca3892d795a1ab47232f8b6e2e0dde4" + +find-up@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f" + dependencies: + path-exists "^2.0.0" + pinkie-promise "^2.0.0" + +find-up@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" + dependencies: + locate-path "^2.0.0" + +findup-sync@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-2.0.0.tgz#9326b1488c22d1a6088650a86901b2d9a90a2cbc" + dependencies: + detect-file "^1.0.0" + is-glob "^3.1.0" + micromatch "^3.0.4" + resolve-dir "^1.0.1" + +fined@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/fined/-/fined-1.1.0.tgz#b37dc844b76a2f5e7081e884f7c0ae344f153476" + dependencies: + expand-tilde "^2.0.2" + is-plain-object "^2.0.3" + object.defaults "^1.1.0" + object.pick "^1.2.0" + parse-filepath "^1.0.1" + +first-chunk-stream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/first-chunk-stream/-/first-chunk-stream-1.0.0.tgz#59bfb50cd905f60d7c394cd3d9acaab4e6ad934e" + +flagged-respawn@^0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/flagged-respawn/-/flagged-respawn-0.3.2.tgz#ff191eddcd7088a675b2610fffc976be9b8074b5" + +flatten@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/flatten/-/flatten-1.0.2.tgz#dae46a9d78fbe25292258cc1e780a41d95c03782" + +for-in@^0.1.3: + version "0.1.8" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-0.1.8.tgz#d8773908e31256109952b1fdb9b3fa867d2775e1" + +for-in@^1.0.1, for-in@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80" + +for-own@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce" + dependencies: + for-in "^1.0.1" + +for-own@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/for-own/-/for-own-1.0.0.tgz#c63332f415cedc4b04dbfe70cf836494c53cb44b" + dependencies: + for-in "^1.0.1" + +forever-agent@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" + +form-data@~2.1.1: + version "2.1.4" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.1.4.tgz#33c183acf193276ecaa98143a69e94bfee1750d1" + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.5" + mime-types "^2.1.12" + +form-data@~2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.1.tgz#6fb94fbd71885306d73d15cc497fe4cc4ecd44bf" + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.5" + mime-types "^2.1.12" + +forwarded@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.2.tgz#98c23dab1175657b8c0573e8ceccd91b0ff18c84" + +fragment-cache@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/fragment-cache/-/fragment-cache-0.2.1.tgz#4290fad27f13e89be7f33799c6bc5a0abfff0d19" + dependencies: + map-cache "^0.2.2" + +fresh@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" + +fs-access@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/fs-access/-/fs-access-1.0.1.tgz#d6a87f262271cefebec30c553407fb995da8777a" + dependencies: + null-check "^1.0.0" + +fs-extra@^0.23.1: + version "0.23.1" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.23.1.tgz#6611dba6adf2ab8dc9c69fab37cddf8818157e3d" + dependencies: + graceful-fs "^4.1.2" + jsonfile "^2.1.0" + path-is-absolute "^1.0.0" + rimraf "^2.2.8" + +fs-extra@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-3.0.1.tgz#3794f378c58b342ea7dbbb23095109c4b3b62291" + dependencies: + graceful-fs "^4.1.2" + jsonfile "^3.0.0" + universalify "^0.1.0" + +fs-extra@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-4.0.3.tgz#0d852122e5bc5beb453fb028e9c0c9bf36340c94" + dependencies: + graceful-fs "^4.1.2" + jsonfile "^4.0.0" + universalify "^0.1.0" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + +fsevents@^1.0.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.1.3.tgz#11f82318f5fe7bb2cd22965a108e9306208216d8" + dependencies: + nan "^2.3.0" + node-pre-gyp "^0.6.39" + +fstream-ignore@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105" + dependencies: + fstream "^1.0.0" + inherits "2" + minimatch "^3.0.0" + +fstream@^1.0.0, fstream@^1.0.10, fstream@^1.0.2: + version "1.0.11" + resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171" + dependencies: + graceful-fs "^4.1.2" + inherits "~2.0.0" + mkdirp ">=0.5 0" + rimraf "2" + +function-bind@^1.0.2: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + +gauge@~2.7.3: + version "2.7.4" + resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" + dependencies: + aproba "^1.0.3" + console-control-strings "^1.0.0" + has-unicode "^2.0.0" + object-assign "^4.1.0" + signal-exit "^3.0.0" + string-width "^1.0.1" + strip-ansi "^3.0.1" + wide-align "^1.1.0" + +gaze@^0.5.1: + version "0.5.2" + resolved "https://registry.yarnpkg.com/gaze/-/gaze-0.5.2.tgz#40b709537d24d1d45767db5a908689dfe69ac44f" + dependencies: + globule "~0.1.0" + +gaze@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/gaze/-/gaze-1.1.2.tgz#847224677adb8870d679257ed3388fdb61e40105" + dependencies: + globule "^1.0.0" + +generate-function@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74" + +generate-object-property@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0" + dependencies: + is-property "^1.0.0" + +get-caller-file@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.2.tgz#f702e63127e7e231c160a80c1554acb70d5047e5" + +get-stdin@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe" + +get-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-3.0.0.tgz#8e943d1358dc37555054ecbe2edb05aa174ede14" + +get-value@^2.0.3, get-value@^2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/get-value/-/get-value-2.0.6.tgz#dc15ca1c672387ca76bd37ac0a395ba2042a2c28" + +getpass@^0.1.1: + version "0.1.7" + resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" + dependencies: + assert-plus "^1.0.0" + +glob-base@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4" + dependencies: + glob-parent "^2.0.0" + is-glob "^2.0.0" + +glob-parent@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28" + dependencies: + is-glob "^2.0.0" + +glob-stream@^3.1.5: + version "3.1.18" + resolved "https://registry.yarnpkg.com/glob-stream/-/glob-stream-3.1.18.tgz#9170a5f12b790306fdfe598f313f8f7954fd143b" + dependencies: + glob "^4.3.1" + glob2base "^0.0.12" + minimatch "^2.0.1" + ordered-read-streams "^0.1.0" + through2 "^0.6.1" + unique-stream "^1.0.0" + +glob-watcher@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/glob-watcher/-/glob-watcher-0.0.6.tgz#b95b4a8df74b39c83298b0c05c978b4d9a3b710b" + dependencies: + gaze "^0.5.1" + +glob2base@^0.0.12: + version "0.0.12" + resolved "https://registry.yarnpkg.com/glob2base/-/glob2base-0.0.12.tgz#9d419b3e28f12e83a362164a277055922c9c0d56" + dependencies: + find-index "^0.1.1" + +glob@7.0.x: + version "7.0.6" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.0.6.tgz#211bafaf49e525b8cd93260d14ab136152b3f57a" + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.2" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@^4.3.1: + version "4.5.3" + resolved "https://registry.yarnpkg.com/glob/-/glob-4.5.3.tgz#c6cb73d3226c1efef04de3c56d012f03377ee15f" + dependencies: + inflight "^1.0.4" + inherits "2" + minimatch "^2.0.1" + once "^1.3.0" + +glob@^6.0.4: + version "6.0.4" + resolved "https://registry.yarnpkg.com/glob/-/glob-6.0.4.tgz#0f08860f6a155127b2fadd4f9ce24b1aab6e4d22" + dependencies: + inflight "^1.0.4" + inherits "2" + minimatch "2 || 3" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@^7.0.0, glob@^7.0.3, glob@^7.0.5, glob@^7.0.6, glob@^7.1.1, glob@^7.1.2, glob@~7.1.1: + version "7.1.2" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.2.tgz#c19c9df9a028702d678612384a6552404c636d15" + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@~3.1.21: + version "3.1.21" + resolved "https://registry.yarnpkg.com/glob/-/glob-3.1.21.tgz#d29e0a055dea5138f4d07ed40e8982e83c2066cd" + dependencies: + graceful-fs "~1.2.0" + inherits "1" + minimatch "~0.2.11" + +global-modules@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/global-modules/-/global-modules-1.0.0.tgz#6d770f0eb523ac78164d72b5e71a8877265cc3ea" + dependencies: + global-prefix "^1.0.1" + is-windows "^1.0.1" + resolve-dir "^1.0.0" + +global-prefix@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/global-prefix/-/global-prefix-1.0.2.tgz#dbf743c6c14992593c655568cb66ed32c0122ebe" + dependencies: + expand-tilde "^2.0.2" + homedir-polyfill "^1.0.1" + ini "^1.3.4" + is-windows "^1.0.1" + which "^1.2.14" + +globals@^9.18.0: + version "9.18.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-9.18.0.tgz#aa3896b3e69b487f17e31ed2143d69a8e30c2d8a" + +globby@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-5.0.0.tgz#ebd84667ca0dbb330b99bcfc68eac2bc54370e0d" + dependencies: + array-union "^1.0.1" + arrify "^1.0.0" + glob "^7.0.3" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +globby@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-6.1.0.tgz#f5a6d70e8395e21c858fb0489d64df02424d506c" + dependencies: + array-union "^1.0.1" + glob "^7.0.3" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +globule@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/globule/-/globule-1.2.0.tgz#1dc49c6822dd9e8a2fa00ba2a295006e8664bd09" + dependencies: + glob "~7.1.1" + lodash "~4.17.4" + minimatch "~3.0.2" + +globule@~0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/globule/-/globule-0.1.0.tgz#d9c8edde1da79d125a151b79533b978676346ae5" + dependencies: + glob "~3.1.21" + lodash "~1.0.1" + minimatch "~0.2.11" + +glogg@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/glogg/-/glogg-1.0.0.tgz#7fe0f199f57ac906cf512feead8f90ee4a284fc5" + dependencies: + sparkles "^1.0.0" + +graceful-fs@^3.0.0: + version "3.0.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-3.0.11.tgz#7613c778a1afea62f25c630a086d7f3acbbdd818" + dependencies: + natives "^1.1.0" + +graceful-fs@^4.1.2, graceful-fs@^4.1.6: + version "4.1.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658" + +graceful-fs@~1.2.0: + version "1.2.3" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-1.2.3.tgz#15a4806a57547cb2d2dbf27f42e89a8c3451b364" + +gulp-clean@^0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/gulp-clean/-/gulp-clean-0.3.2.tgz#a347d473acea40182f935587a451941671928102" + dependencies: + gulp-util "^2.2.14" + rimraf "^2.2.8" + through2 "^0.4.2" + +gulp-util@^2.2.14: + version "2.2.20" + resolved "https://registry.yarnpkg.com/gulp-util/-/gulp-util-2.2.20.tgz#d7146e5728910bd8f047a6b0b1e549bc22dbd64c" + dependencies: + chalk "^0.5.0" + dateformat "^1.0.7-1.2.3" + lodash._reinterpolate "^2.4.1" + lodash.template "^2.4.1" + minimist "^0.2.0" + multipipe "^0.1.0" + through2 "^0.5.0" + vinyl "^0.2.1" + +gulp-util@^3.0.0, gulp-util@^3.0.8: + version "3.0.8" + resolved "https://registry.yarnpkg.com/gulp-util/-/gulp-util-3.0.8.tgz#0054e1e744502e27c04c187c3ecc505dd54bbb4f" + dependencies: + array-differ "^1.0.0" + array-uniq "^1.0.2" + beeper "^1.0.0" + chalk "^1.0.0" + dateformat "^2.0.0" + fancy-log "^1.1.0" + gulplog "^1.0.0" + has-gulplog "^0.1.0" + lodash._reescape "^3.0.0" + lodash._reevaluate "^3.0.0" + lodash._reinterpolate "^3.0.0" + lodash.template "^3.0.0" + minimist "^1.1.0" + multipipe "^0.1.2" + object-assign "^3.0.0" + replace-ext "0.0.1" + through2 "^2.0.0" + vinyl "^0.5.0" + +gulp@^3.9.1: + version "3.9.1" + resolved "https://registry.yarnpkg.com/gulp/-/gulp-3.9.1.tgz#571ce45928dd40af6514fc4011866016c13845b4" + dependencies: + archy "^1.0.0" + chalk "^1.0.0" + deprecated "^0.0.1" + gulp-util "^3.0.0" + interpret "^1.0.0" + liftoff "^2.1.0" + minimist "^1.1.0" + orchestrator "^0.3.0" + pretty-hrtime "^1.0.0" + semver "^4.1.0" + tildify "^1.0.0" + v8flags "^2.0.2" + vinyl-fs "^0.3.0" + +gulplog@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/gulplog/-/gulplog-1.0.0.tgz#e28c4d45d05ecbbed818363ce8f9c5926229ffe5" + dependencies: + glogg "^1.0.0" + +handle-thing@^1.2.5: + version "1.2.5" + resolved "https://registry.yarnpkg.com/handle-thing/-/handle-thing-1.2.5.tgz#fd7aad726bf1a5fd16dfc29b2f7a6601d27139c4" + +handlebars@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-1.3.0.tgz#9e9b130a93e389491322d975cf3ec1818c37ce34" + dependencies: + optimist "~0.3" + optionalDependencies: + uglify-js "~2.3" + +handlebars@^4.0.3: + version "4.0.11" + resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.0.11.tgz#630a35dfe0294bc281edae6ffc5d329fc7982dcc" + dependencies: + async "^1.4.0" + optimist "^0.6.1" + source-map "^0.4.4" + optionalDependencies: + uglify-js "^2.6" + +har-schema@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-1.0.5.tgz#d263135f43307c02c602afc8fe95970c0151369e" + +har-schema@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" + +har-validator@~2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d" + dependencies: + chalk "^1.1.1" + commander "^2.9.0" + is-my-json-valid "^2.12.4" + pinkie-promise "^2.0.0" + +har-validator@~4.2.1: + version "4.2.1" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-4.2.1.tgz#33481d0f1bbff600dd203d75812a6a5fba002e2a" + dependencies: + ajv "^4.9.1" + har-schema "^1.0.5" + +har-validator@~5.0.3: + version "5.0.3" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.0.3.tgz#ba402c266194f15956ef15e0fcf242993f6a7dfd" + dependencies: + ajv "^5.1.0" + har-schema "^2.0.0" + +has-ansi@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-0.1.0.tgz#84f265aae8c0e6a88a12d7022894b7568894c62e" + dependencies: + ansi-regex "^0.2.0" + +has-ansi@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" + dependencies: + ansi-regex "^2.0.0" + +has-binary@0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.7.tgz#68e61eb16210c9545a0a5cce06a873912fe1e68c" + dependencies: + isarray "0.0.1" + +has-cors@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39" + +has-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-1.0.0.tgz#9d9e793165ce017a00f00418c43f942a7b1d11fa" + +has-flag@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-2.0.0.tgz#e8207af1cc7b30d446cc70b734b5e8be18f88d51" + +has-gulplog@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/has-gulplog/-/has-gulplog-0.1.0.tgz#6414c82913697da51590397dafb12f22967811ce" + dependencies: + sparkles "^1.0.0" + +has-unicode@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" + +has-value@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/has-value/-/has-value-0.3.1.tgz#7b1f58bada62ca827ec0a2078025654845995e1f" + dependencies: + get-value "^2.0.3" + has-values "^0.1.4" + isobject "^2.0.0" + +has-value@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-value/-/has-value-1.0.0.tgz#18b281da585b1c5c51def24c930ed29a0be6b177" + dependencies: + get-value "^2.0.6" + has-values "^1.0.0" + isobject "^3.0.0" + +has-values@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/has-values/-/has-values-0.1.4.tgz#6d61de95d91dfca9b9a02089ad384bff8f62b771" + +has-values@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-values/-/has-values-1.0.0.tgz#95b0b63fec2146619a6fe57fe75628d5a39efe4f" + dependencies: + is-number "^3.0.0" + kind-of "^4.0.0" + +has@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.1.tgz#8461733f538b0837c9361e39a9ab9e9704dc2f28" + dependencies: + function-bind "^1.0.2" + +hash-base@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-2.0.2.tgz#66ea1d856db4e8a5470cadf6fce23ae5244ef2e1" + dependencies: + inherits "^2.0.1" + +hash-base@^3.0.0: + version "3.0.4" + resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.0.4.tgz#5fc8686847ecd73499403319a6b0a3f3f6ae4918" + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +hash.js@^1.0.0, hash.js@^1.0.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.3.tgz#340dedbe6290187151c1ea1d777a3448935df846" + dependencies: + inherits "^2.0.3" + minimalistic-assert "^1.0.0" + +hawk@3.1.3, hawk@~3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4" + dependencies: + boom "2.x.x" + cryptiles "2.x.x" + hoek "2.x.x" + sntp "1.x.x" + +hawk@~6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/hawk/-/hawk-6.0.2.tgz#af4d914eb065f9b5ce4d9d11c1cb2126eecc3038" + dependencies: + boom "4.x.x" + cryptiles "3.x.x" + hoek "4.x.x" + sntp "2.x.x" + +he@1.1.x: + version "1.1.1" + resolved "https://registry.yarnpkg.com/he/-/he-1.1.1.tgz#93410fd21b009735151f8868c2f271f3427e23fd" + +hmac-drbg@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1" + dependencies: + hash.js "^1.0.3" + minimalistic-assert "^1.0.0" + minimalistic-crypto-utils "^1.0.1" + +hoek@2.x.x: + version "2.16.3" + resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed" + +hoek@4.x.x: + version "4.2.0" + resolved "https://registry.yarnpkg.com/hoek/-/hoek-4.2.0.tgz#72d9d0754f7fe25ca2d01ad8f8f9a9449a89526d" + +homedir-polyfill@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/homedir-polyfill/-/homedir-polyfill-1.0.1.tgz#4c2bbc8a758998feebf5ed68580f76d46768b4bc" + dependencies: + parse-passwd "^1.0.0" + +hosted-git-info@^2.1.4: + version "2.5.0" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.5.0.tgz#6d60e34b3abbc8313062c3b798ef8d901a07af3c" + +hpack.js@^2.1.6: + version "2.1.6" + resolved "https://registry.yarnpkg.com/hpack.js/-/hpack.js-2.1.6.tgz#87774c0949e513f42e84575b3c45681fade2a0b2" + dependencies: + inherits "^2.0.1" + obuf "^1.0.0" + readable-stream "^2.0.1" + wbuf "^1.1.0" + +html-comment-regex@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/html-comment-regex/-/html-comment-regex-1.1.1.tgz#668b93776eaae55ebde8f3ad464b307a4963625e" + +html-entities@^1.2.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/html-entities/-/html-entities-1.2.1.tgz#0df29351f0721163515dfb9e5543e5f6eed5162f" + +html-minifier@^3.2.3: + version "3.5.7" + resolved "https://registry.yarnpkg.com/html-minifier/-/html-minifier-3.5.7.tgz#511e69bb5a8e7677d1012ebe03819aa02ca06208" + dependencies: + camel-case "3.0.x" + clean-css "4.1.x" + commander "2.12.x" + he "1.1.x" + ncname "1.0.x" + param-case "2.1.x" + relateurl "0.2.x" + uglify-js "3.2.x" + +html-webpack-plugin@^2.29.0: + version "2.30.1" + resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-2.30.1.tgz#7f9c421b7ea91ec460f56527d78df484ee7537d5" + dependencies: + bluebird "^3.4.7" + html-minifier "^3.2.3" + loader-utils "^0.2.16" + lodash "^4.17.3" + pretty-error "^2.0.2" + toposort "^1.0.0" + +htmlparser2@~3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.3.0.tgz#cc70d05a59f6542e43f0e685c982e14c924a9efe" + dependencies: + domelementtype "1" + domhandler "2.1" + domutils "1.1" + readable-stream "1.0" + +http-deceiver@^1.2.7: + version "1.2.7" + resolved "https://registry.yarnpkg.com/http-deceiver/-/http-deceiver-1.2.7.tgz#fa7168944ab9a519d337cb0bec7284dc3e723d87" + +http-errors@1.6.2, http-errors@~1.6.2: + version "1.6.2" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.2.tgz#0a002cc85707192a7e7946ceedc11155f60ec736" + dependencies: + depd "1.1.1" + inherits "2.0.3" + setprototypeof "1.0.3" + statuses ">= 1.3.1 < 2" + +http-parser-js@>=0.4.0: + version "0.4.9" + resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.4.9.tgz#ea1a04fb64adff0242e9974f297dd4c3cad271e1" + +http-proxy-middleware@~0.17.4: + version "0.17.4" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-0.17.4.tgz#642e8848851d66f09d4f124912846dbaeb41b833" + dependencies: + http-proxy "^1.16.2" + is-glob "^3.1.0" + lodash "^4.17.2" + micromatch "^2.3.11" + +http-proxy@^1.13.0, http-proxy@^1.16.2: + version "1.16.2" + resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.16.2.tgz#06dff292952bf64dbe8471fa9df73066d4f37742" + dependencies: + eventemitter3 "1.x.x" + requires-port "1.x.x" + +http-signature@~1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.1.1.tgz#df72e267066cd0ac67fb76adf8e134a8fbcf91bf" + dependencies: + assert-plus "^0.2.0" + jsprim "^1.2.2" + sshpk "^1.7.0" + +http-signature@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" + dependencies: + assert-plus "^1.0.0" + jsprim "^1.2.2" + sshpk "^1.7.0" + +https-browserify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/https-browserify/-/https-browserify-1.0.0.tgz#ec06c10e0a34c0f2faf199f7fd7fc78fffd03c73" + +https-proxy-agent@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-1.0.0.tgz#35f7da6c48ce4ddbfa264891ac593ee5ff8671e6" + dependencies: + agent-base "2" + debug "2" + extend "3" + +iconv-lite@0.4.19: + version "0.4.19" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.19.tgz#f7468f60135f5e5dad3399c0a81be9a1603a082b" + +icss-replace-symbols@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz#06ea6f83679a7749e386cfe1fe812ae5db223ded" + +icss-utils@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/icss-utils/-/icss-utils-2.1.0.tgz#83f0a0ec378bf3246178b6c2ad9136f135b1c962" + dependencies: + postcss "^6.0.1" + +ieee754@^1.1.4: + version "1.1.8" + resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.1.8.tgz#be33d40ac10ef1926701f6f08a2d86fbfd1ad3e4" + +image-size@~0.5.0: + version "0.5.5" + resolved "https://registry.yarnpkg.com/image-size/-/image-size-0.5.5.tgz#09dfd4ab9d20e29eb1c3e80b8990378df9e3cb9c" + +img-stats@^0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/img-stats/-/img-stats-0.5.2.tgz#c203496c42f2d9eb2e5ab8232fa756bab32c9e2b" + dependencies: + xmldom "^0.1.19" + +in-publish@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/in-publish/-/in-publish-2.0.0.tgz#e20ff5e3a2afc2690320b6dc552682a9c7fadf51" + +indent-string@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-2.1.0.tgz#8e2d48348742121b4a8218b7a137e9a52049dc80" + dependencies: + repeating "^2.0.0" + +indexes-of@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/indexes-of/-/indexes-of-1.0.1.tgz#f30f716c8e2bd346c7b67d3df3915566a7c05607" + +indexof@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d" + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-1.0.2.tgz#ca4309dadee6b54cc0b8d247e8d7c7a0975bdc9b" + +inherits@2, inherits@2.0.3, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.0, inherits@~2.0.1, inherits@~2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + +inherits@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.1.tgz#b17d08d326b4423e568eff719f91b0b1cbdf69f1" + +ini@^1.3.4, ini@~1.3.0: + version "1.3.5" + resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.5.tgz#eee25f56db1c9ec6085e0c22778083f596abf927" + +internal-ip@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/internal-ip/-/internal-ip-1.2.0.tgz#ae9fbf93b984878785d50a8de1b356956058cf5c" + dependencies: + meow "^3.3.0" + +interpret@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.1.0.tgz#7ed1b1410c6a0e0f78cf95d3b8440c63f78b8614" + +invariant@^2.2.2: + version "2.2.2" + resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.2.tgz#9e1f56ac0acdb6bf303306f338be3b204ae60360" + dependencies: + loose-envify "^1.0.0" + +invert-kv@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6" + +ip@^1.1.0, ip@^1.1.5: + version "1.1.5" + resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.5.tgz#bdded70114290828c0a039e72ef25f5aaec4354a" + +ipaddr.js@1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.5.2.tgz#d4b505bde9946987ccf0fc58d9010ff9607e3fa0" + +is-absolute-url@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-2.1.0.tgz#50530dfb84fcc9aa7dbe7852e83a37b93b9f2aa6" + +is-absolute@^0.2.3: + version "0.2.6" + resolved "https://registry.yarnpkg.com/is-absolute/-/is-absolute-0.2.6.tgz#20de69f3db942ef2d87b9c2da36f172235b1b5eb" + dependencies: + is-relative "^0.2.1" + is-windows "^0.2.0" + +is-accessor-descriptor@^0.1.6: + version "0.1.6" + resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz#a9e12cb3ae8d876727eeef3843f8a0897b5c98d6" + dependencies: + kind-of "^3.0.2" + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + +is-binary-path@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898" + dependencies: + binary-extensions "^1.0.0" + +is-buffer@^1.0.2, is-buffer@^1.1.5, is-buffer@~1.1.1: + version "1.1.6" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" + +is-builtin-module@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-builtin-module/-/is-builtin-module-1.0.0.tgz#540572d34f7ac3119f8f76c30cbc1b1e037affbe" + dependencies: + builtin-modules "^1.0.0" + +is-data-descriptor@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz#0b5ee648388e2c860282e793f1856fec3f301b56" + dependencies: + kind-of "^3.0.2" + +is-descriptor@^0.1.0: + version "0.1.6" + resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-0.1.6.tgz#366d8240dde487ca51823b1ab9f07a10a78251ca" + dependencies: + is-accessor-descriptor "^0.1.6" + is-data-descriptor "^0.1.4" + kind-of "^5.0.0" + +is-descriptor@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-1.0.1.tgz#2c6023599bde2de9d5d2c8b9a9d94082036b6ef2" + dependencies: + is-accessor-descriptor "^0.1.6" + is-data-descriptor "^0.1.4" + kind-of "^5.0.0" + +is-directory@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/is-directory/-/is-directory-0.3.1.tgz#61339b6f2475fc772fd9c9d83f5c8575dc154ae1" + +is-dotfile@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.3.tgz#a6a2f32ffd2dfb04f5ca25ecd0f6b83cf798a1e1" + +is-equal-shallow@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534" + dependencies: + is-primitive "^2.0.0" + +is-extendable@^0.1.0, is-extendable@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" + +is-extendable@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-1.0.1.tgz#a7470f9e426733d81bd81e1155264e3a3507cab4" + dependencies: + is-plain-object "^2.0.4" + +is-extglob@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0" + +is-extglob@^2.1.0, is-extglob@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + +is-finite@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.0.2.tgz#cc6677695602be550ef11e8b4aa6305342b6d0aa" + dependencies: + number-is-nan "^1.0.0" + +is-fullwidth-code-point@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" + dependencies: + number-is-nan "^1.0.0" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + +is-glob@^2.0.0, is-glob@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863" + dependencies: + is-extglob "^1.0.0" + +is-glob@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-3.1.0.tgz#7ba5ae24217804ac70707b96922567486cc3e84a" + dependencies: + is-extglob "^2.1.0" + +is-glob@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.0.tgz#9521c76845cc2610a85203ddf080a958c2ffabc0" + dependencies: + is-extglob "^2.1.1" + +is-my-json-valid@^2.12.4: + version "2.16.1" + resolved "https://registry.yarnpkg.com/is-my-json-valid/-/is-my-json-valid-2.16.1.tgz#5a846777e2c2620d1e69104e5d3a03b1f6088f11" + dependencies: + generate-function "^2.0.0" + generate-object-property "^1.1.0" + jsonpointer "^4.0.0" + xtend "^4.0.0" + +is-number@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-0.1.1.tgz#69a7af116963d47206ec9bd9b48a14216f1e3806" + +is-number@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f" + dependencies: + kind-of "^3.0.2" + +is-number@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-3.0.0.tgz#24fd6201a4782cf50561c810276afc7d12d71195" + dependencies: + kind-of "^3.0.2" + +is-odd@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-odd/-/is-odd-1.0.0.tgz#3b8a932eb028b3775c39bb09e91767accdb69088" + dependencies: + is-number "^3.0.0" + +is-path-cwd@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-path-cwd/-/is-path-cwd-1.0.0.tgz#d225ec23132e89edd38fda767472e62e65f1106d" + +is-path-in-cwd@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-path-in-cwd/-/is-path-in-cwd-1.0.0.tgz#6477582b8214d602346094567003be8a9eac04dc" + dependencies: + is-path-inside "^1.0.0" + +is-path-inside@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-1.0.1.tgz#8ef5b7de50437a3fdca6b4e865ef7aa55cb48036" + dependencies: + path-is-inside "^1.0.1" + +is-plain-obj@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" + +is-plain-object@^2.0.1, is-plain-object@^2.0.3, is-plain-object@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" + dependencies: + isobject "^3.0.1" + +is-posix-bracket@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4" + +is-primitive@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575" + +is-property@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-property/-/is-property-1.0.2.tgz#57fe1c4e48474edd65b09911f26b1cd4095dda84" + +is-relative@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-relative/-/is-relative-0.2.1.tgz#d27f4c7d516d175fb610db84bbeef23c3bc97aa5" + dependencies: + is-unc-path "^0.1.1" + +is-stream@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" + +is-svg@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-svg/-/is-svg-2.1.0.tgz#cf61090da0d9efbcab8722deba6f032208dbb0e9" + dependencies: + html-comment-regex "^1.1.0" + +is-typedarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" + +is-unc-path@^0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/is-unc-path/-/is-unc-path-0.1.2.tgz#6ab053a72573c10250ff416a3814c35178af39b9" + dependencies: + unc-path-regex "^0.1.0" + +is-utf8@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72" + +is-windows@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-0.2.0.tgz#de1aa6d63ea29dd248737b69f1ff8b8002d2108c" + +is-windows@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.1.tgz#310db70f742d259a16a369202b51af84233310d9" + +is-wsl@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-1.1.0.tgz#1f16e4aa22b04d1336b66188a66af3c600c3a66d" + +isarray@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" + +isarray@1.0.0, isarray@^1.0.0, isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + +isbinaryfile@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/isbinaryfile/-/isbinaryfile-3.0.2.tgz#4a3e974ec0cba9004d3fc6cde7209ea69368a621" + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + +isobject@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89" + dependencies: + isarray "1.0.0" + +isobject@^3.0.0, isobject@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" + +isstream@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" + +istanbul-api@^1.1.8: + version "1.2.1" + resolved "https://registry.yarnpkg.com/istanbul-api/-/istanbul-api-1.2.1.tgz#0c60a0515eb11c7d65c6b50bba2c6e999acd8620" + dependencies: + async "^2.1.4" + fileset "^2.0.2" + istanbul-lib-coverage "^1.1.1" + istanbul-lib-hook "^1.1.0" + istanbul-lib-instrument "^1.9.1" + istanbul-lib-report "^1.1.2" + istanbul-lib-source-maps "^1.2.2" + istanbul-reports "^1.1.3" + js-yaml "^3.7.0" + mkdirp "^0.5.1" + once "^1.4.0" + +istanbul-instrumenter-loader@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/istanbul-instrumenter-loader/-/istanbul-instrumenter-loader-2.0.0.tgz#e5492900ab0bba835efa8024cb00be9b3eea2700" + dependencies: + convert-source-map "^1.3.0" + istanbul-lib-instrument "^1.1.3" + loader-utils "^0.2.16" + object-assign "^4.1.0" + +istanbul-lib-coverage@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-1.1.1.tgz#73bfb998885299415c93d38a3e9adf784a77a9da" + +istanbul-lib-hook@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/istanbul-lib-hook/-/istanbul-lib-hook-1.1.0.tgz#8538d970372cb3716d53e55523dd54b557a8d89b" + dependencies: + append-transform "^0.4.0" + +istanbul-lib-instrument@^1.1.3, istanbul-lib-instrument@^1.9.1: + version "1.9.1" + resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-1.9.1.tgz#250b30b3531e5d3251299fdd64b0b2c9db6b558e" + dependencies: + babel-generator "^6.18.0" + babel-template "^6.16.0" + babel-traverse "^6.18.0" + babel-types "^6.18.0" + babylon "^6.18.0" + istanbul-lib-coverage "^1.1.1" + semver "^5.3.0" + +istanbul-lib-report@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-1.1.2.tgz#922be27c13b9511b979bd1587359f69798c1d425" + dependencies: + istanbul-lib-coverage "^1.1.1" + mkdirp "^0.5.1" + path-parse "^1.0.5" + supports-color "^3.1.2" + +istanbul-lib-source-maps@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/istanbul-lib-source-maps/-/istanbul-lib-source-maps-1.2.2.tgz#750578602435f28a0c04ee6d7d9e0f2960e62c1c" + dependencies: + debug "^3.1.0" + istanbul-lib-coverage "^1.1.1" + mkdirp "^0.5.1" + rimraf "^2.6.1" + source-map "^0.5.3" + +istanbul-reports@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-1.1.3.tgz#3b9e1e8defb6d18b1d425da8e8b32c5a163f2d10" + dependencies: + handlebars "^4.0.3" + +jasmine-core@~2.6.2: + version "2.6.4" + resolved "https://registry.yarnpkg.com/jasmine-core/-/jasmine-core-2.6.4.tgz#dec926cd0a9fa287fb6db5c755fa487e74cecac5" + +jasmine-core@~2.8.0: + version "2.8.0" + resolved "https://registry.yarnpkg.com/jasmine-core/-/jasmine-core-2.8.0.tgz#bcc979ae1f9fd05701e45e52e65d3a5d63f1a24e" + +jasmine-spec-reporter@~4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/jasmine-spec-reporter/-/jasmine-spec-reporter-4.1.1.tgz#5a6d58ab5d61bea7309fbc279239511756b1b588" + dependencies: + colors "1.1.2" + +jasmine@^2.5.3: + version "2.8.0" + resolved "https://registry.yarnpkg.com/jasmine/-/jasmine-2.8.0.tgz#6b089c0a11576b1f16df11b80146d91d4e8b8a3e" + dependencies: + exit "^0.1.2" + glob "^7.0.6" + jasmine-core "~2.8.0" + +jasminewd2@^2.1.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/jasminewd2/-/jasminewd2-2.2.0.tgz#e37cf0b17f199cce23bea71b2039395246b4ec4e" + +js-base64@^2.1.5, js-base64@^2.1.8, js-base64@^2.1.9: + version "2.4.0" + resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-2.4.0.tgz#9e566fee624751a1d720c966cd6226d29d4025aa" + +js-tokens@^3.0.0, js-tokens@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" + +js-yaml@^3.4.3, js-yaml@^3.7.0: + version "3.10.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.10.0.tgz#2e78441646bd4682e963f22b6e92823c309c62dc" + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +js-yaml@~3.7.0: + version "3.7.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.7.0.tgz#5c967ddd837a9bfdca5f2de84253abe8a1c03b80" + dependencies: + argparse "^1.0.7" + esprima "^2.6.0" + +jsbn@~0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" + +jsesc@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-1.3.0.tgz#46c3fec8c1892b12b0833db9bc7622176dbab34b" + +jsesc@~0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d" + +json-loader@^0.5.4: + version "0.5.7" + resolved "https://registry.yarnpkg.com/json-loader/-/json-loader-0.5.7.tgz#dca14a70235ff82f0ac9a3abeb60d337a365185d" + +json-schema-traverse@^0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz#349a6d44c53a51de89b40805c5d5e59b417d3340" + +json-schema@0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13" + +json-stable-stringify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af" + dependencies: + jsonify "~0.0.0" + +json-stringify-safe@~5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" + +json3@3.3.2, json3@^3.3.2: + version "3.3.2" + resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.2.tgz#3c0434743df93e2f5c42aee7b19bcb483575f4e1" + +json5@^0.5.0, json5@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821" + +jsonfile@^2.1.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8" + optionalDependencies: + graceful-fs "^4.1.6" + +jsonfile@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-3.0.1.tgz#a5ecc6f65f53f662c4415c7675a0331d0992ec66" + optionalDependencies: + graceful-fs "^4.1.6" + +jsonfile@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb" + optionalDependencies: + graceful-fs "^4.1.6" + +jsonify@~0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73" + +jsonpointer@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/jsonpointer/-/jsonpointer-4.0.1.tgz#4fd92cb34e0e9db3c89c8622ecf51f9b978c6cb9" + +jsprim@^1.2.2: + version "1.4.1" + resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.1.tgz#313e66bc1e5cc06e438bc1b7499c2e5c56acb6a2" + dependencies: + assert-plus "1.0.0" + extsprintf "1.3.0" + json-schema "0.2.3" + verror "1.10.0" + +karma-chrome-launcher@~2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/karma-chrome-launcher/-/karma-chrome-launcher-2.1.1.tgz#216879c68ac04d8d5140e99619ba04b59afd46cf" + dependencies: + fs-access "^1.0.0" + which "^1.2.1" + +karma-cli@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/karma-cli/-/karma-cli-1.0.1.tgz#ae6c3c58a313a1d00b45164c455b9b86ce17f960" + dependencies: + resolve "^1.1.6" + +karma-coverage-istanbul-reporter@^1.2.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/karma-coverage-istanbul-reporter/-/karma-coverage-istanbul-reporter-1.3.0.tgz#d142cd9c55731c9e363ef7374e8ef1a31bebfadb" + dependencies: + istanbul-api "^1.1.8" + minimatch "^3.0.4" + +karma-jasmine-html-reporter@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/karma-jasmine-html-reporter/-/karma-jasmine-html-reporter-0.2.2.tgz#48a8e5ef18807617ee2b5e33c1194c35b439524c" + dependencies: + karma-jasmine "^1.0.2" + +karma-jasmine@^1.0.2, karma-jasmine@~1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/karma-jasmine/-/karma-jasmine-1.1.1.tgz#6fe840e75a11600c9d91e84b33c458e1c46a3529" + +karma-source-map-support@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/karma-source-map-support/-/karma-source-map-support-1.2.0.tgz#1bf81e7bb4b089627ab352ec4179e117c406a540" + dependencies: + source-map-support "^0.4.1" + +karma@~1.7.0: + version "1.7.1" + resolved "https://registry.yarnpkg.com/karma/-/karma-1.7.1.tgz#85cc08e9e0a22d7ce9cca37c4a1be824f6a2b1ae" + dependencies: + bluebird "^3.3.0" + body-parser "^1.16.1" + chokidar "^1.4.1" + colors "^1.1.0" + combine-lists "^1.0.0" + connect "^3.6.0" + core-js "^2.2.0" + di "^0.0.1" + dom-serialize "^2.2.0" + expand-braces "^0.1.1" + glob "^7.1.1" + graceful-fs "^4.1.2" + http-proxy "^1.13.0" + isbinaryfile "^3.0.0" + lodash "^3.8.0" + log4js "^0.6.31" + mime "^1.3.4" + minimatch "^3.0.2" + optimist "^0.6.1" + qjobs "^1.1.4" + range-parser "^1.2.0" + rimraf "^2.6.0" + safe-buffer "^5.0.1" + socket.io "1.7.3" + source-map "^0.5.3" + tmp "0.0.31" + useragent "^2.1.12" + +kind-of@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-2.0.1.tgz#018ec7a4ce7e3a86cb9141be519d24c8faa981b5" + dependencies: + is-buffer "^1.0.2" + +kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.1.0, kind-of@^3.2.0, kind-of@^3.2.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" + dependencies: + is-buffer "^1.1.5" + +kind-of@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-4.0.0.tgz#20813df3d712928b207378691a45066fae72dd57" + dependencies: + is-buffer "^1.1.5" + +kind-of@^5.0.0, kind-of@^5.0.2: + version "5.1.0" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-5.1.0.tgz#729c91e2d857b7a419a1f9aa65685c4c33f5845d" + +kind-of@^6.0.0: + version "6.0.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.2.tgz#01146b36a6218e64e58f3a8d66de5d7fc6f6d051" + +lazy-cache@^0.2.3: + version "0.2.7" + resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-0.2.7.tgz#7feddf2dcb6edb77d11ef1d117ab5ffdf0ab1b65" + +lazy-cache@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e" + +lazy-cache@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-2.0.2.tgz#b9190a4f913354694840859f8a8f7084d8822264" + dependencies: + set-getter "^0.1.0" + +lcid@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835" + dependencies: + invert-kv "^1.0.0" + +less-loader@^4.0.5: + version "4.0.5" + resolved "https://registry.yarnpkg.com/less-loader/-/less-loader-4.0.5.tgz#ae155a7406cac6acd293d785587fcff0f478c4dd" + dependencies: + clone "^2.1.1" + loader-utils "^1.1.0" + pify "^2.3.0" + +less@^2.7.2: + version "2.7.3" + resolved "https://registry.yarnpkg.com/less/-/less-2.7.3.tgz#cc1260f51c900a9ec0d91fb6998139e02507b63b" + optionalDependencies: + errno "^0.1.1" + graceful-fs "^4.1.2" + image-size "~0.5.0" + mime "^1.2.11" + mkdirp "^0.5.0" + promise "^7.1.1" + request "2.81.0" + source-map "^0.5.3" + +license-webpack-plugin@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/license-webpack-plugin/-/license-webpack-plugin-1.1.1.tgz#76b2cedccc78f139fd7877e576f756cfc141b8c2" + dependencies: + ejs "^2.5.7" + +liftoff@^2.1.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/liftoff/-/liftoff-2.4.0.tgz#1fb33e6eb28b027c7bd53ad22ebfc91a51e6ef9f" + dependencies: + extend "^3.0.0" + findup-sync "^2.0.0" + fined "^1.0.1" + flagged-respawn "^0.3.2" + is-plain-object "^2.0.4" + object.map "^1.0.0" + rechoir "^0.6.2" + resolve "^1.1.7" + +load-json-file@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-1.1.0.tgz#956905708d58b4bab4c2261b04f59f31c99374c0" + dependencies: + graceful-fs "^4.1.2" + parse-json "^2.2.0" + pify "^2.0.0" + pinkie-promise "^2.0.0" + strip-bom "^2.0.0" + +load-json-file@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-2.0.0.tgz#7947e42149af80d696cbf797bcaabcfe1fe29ca8" + dependencies: + graceful-fs "^4.1.2" + parse-json "^2.2.0" + pify "^2.0.0" + strip-bom "^3.0.0" + +loader-runner@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/loader-runner/-/loader-runner-2.3.0.tgz#f482aea82d543e07921700d5a46ef26fdac6b8a2" + +loader-utils@^0.2.15, loader-utils@^0.2.16, loader-utils@~0.2.2: + version "0.2.17" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-0.2.17.tgz#f86e6374d43205a6e6c60e9196f17c0299bfb348" + dependencies: + big.js "^3.1.3" + emojis-list "^2.0.0" + json5 "^0.5.0" + object-assign "^4.0.1" + +loader-utils@^1.0.1, loader-utils@^1.0.2, loader-utils@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.1.0.tgz#c98aef488bcceda2ffb5e2de646d6a754429f5cd" + dependencies: + big.js "^3.1.3" + emojis-list "^2.0.0" + json5 "^0.5.0" + +locate-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e" + dependencies: + p-locate "^2.0.0" + path-exists "^3.0.0" + +lodash._basecopy@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/lodash._basecopy/-/lodash._basecopy-3.0.1.tgz#8da0e6a876cf344c0ad8a54882111dd3c5c7ca36" + +lodash._basetostring@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/lodash._basetostring/-/lodash._basetostring-3.0.1.tgz#d1861d877f824a52f669832dcaf3ee15566a07d5" + +lodash._basevalues@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/lodash._basevalues/-/lodash._basevalues-3.0.0.tgz#5b775762802bde3d3297503e26300820fdf661b7" + +lodash._escapehtmlchar@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash._escapehtmlchar/-/lodash._escapehtmlchar-2.4.1.tgz#df67c3bb6b7e8e1e831ab48bfa0795b92afe899d" + dependencies: + lodash._htmlescapes "~2.4.1" + +lodash._escapestringchar@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash._escapestringchar/-/lodash._escapestringchar-2.4.1.tgz#ecfe22618a2ade50bfeea43937e51df66f0edb72" + +lodash._getnative@^3.0.0: + version "3.9.1" + resolved "https://registry.yarnpkg.com/lodash._getnative/-/lodash._getnative-3.9.1.tgz#570bc7dede46d61cdcde687d65d3eecbaa3aaff5" + +lodash._htmlescapes@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash._htmlescapes/-/lodash._htmlescapes-2.4.1.tgz#32d14bf0844b6de6f8b62a051b4f67c228b624cb" + +lodash._isiterateecall@^3.0.0: + version "3.0.9" + resolved "https://registry.yarnpkg.com/lodash._isiterateecall/-/lodash._isiterateecall-3.0.9.tgz#5203ad7ba425fae842460e696db9cf3e6aac057c" + +lodash._isnative@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash._isnative/-/lodash._isnative-2.4.1.tgz#3ea6404b784a7be836c7b57580e1cdf79b14832c" + +lodash._objecttypes@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash._objecttypes/-/lodash._objecttypes-2.4.1.tgz#7c0b7f69d98a1f76529f890b0cdb1b4dfec11c11" + +lodash._reescape@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/lodash._reescape/-/lodash._reescape-3.0.0.tgz#2b1d6f5dfe07c8a355753e5f27fac7f1cde1616a" + +lodash._reevaluate@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/lodash._reevaluate/-/lodash._reevaluate-3.0.0.tgz#58bc74c40664953ae0b124d806996daca431e2ed" + +lodash._reinterpolate@^2.4.1, lodash._reinterpolate@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash._reinterpolate/-/lodash._reinterpolate-2.4.1.tgz#4f1227aa5a8711fc632f5b07a1f4607aab8b3222" + +lodash._reinterpolate@^3.0.0, lodash._reinterpolate@~3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz#0ccf2d89166af03b3663c796538b75ac6e114d9d" + +lodash._reunescapedhtml@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash._reunescapedhtml/-/lodash._reunescapedhtml-2.4.1.tgz#747c4fc40103eb3bb8a0976e571f7a2659e93ba7" + dependencies: + lodash._htmlescapes "~2.4.1" + lodash.keys "~2.4.1" + +lodash._root@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/lodash._root/-/lodash._root-3.0.1.tgz#fba1c4524c19ee9a5f8136b4609f017cf4ded692" + +lodash._shimkeys@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash._shimkeys/-/lodash._shimkeys-2.4.1.tgz#6e9cc9666ff081f0b5a6c978b83e242e6949d203" + dependencies: + lodash._objecttypes "~2.4.1" + +lodash.assign@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-4.2.0.tgz#0d99f3ccd7a6d261d19bdaeb9245005d285808e7" + +lodash.camelcase@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz#b28aa6288a2b9fc651035c7711f65ab6190331a6" + +lodash.clonedeep@^4.3.2, lodash.clonedeep@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef" + +lodash.defaults@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash.defaults/-/lodash.defaults-2.4.1.tgz#a7e8885f05e68851144b6e12a8f3678026bc4c54" + dependencies: + lodash._objecttypes "~2.4.1" + lodash.keys "~2.4.1" + +lodash.escape@^3.0.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/lodash.escape/-/lodash.escape-3.2.0.tgz#995ee0dc18c1b48cc92effae71a10aab5b487698" + dependencies: + lodash._root "^3.0.0" + +lodash.escape@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash.escape/-/lodash.escape-2.4.1.tgz#2ce12c5e084db0a57dda5e5d1eeeb9f5d175a3b4" + dependencies: + lodash._escapehtmlchar "~2.4.1" + lodash._reunescapedhtml "~2.4.1" + lodash.keys "~2.4.1" + +lodash.isarguments@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz#2f573d85c6a24289ff00663b491c1d338ff3458a" + +lodash.isarray@^3.0.0: + version "3.0.4" + resolved "https://registry.yarnpkg.com/lodash.isarray/-/lodash.isarray-3.0.4.tgz#79e4eb88c36a8122af86f844aa9bcd851b5fbb55" + +lodash.isobject@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash.isobject/-/lodash.isobject-2.4.1.tgz#5a2e47fe69953f1ee631a7eba1fe64d2d06558f5" + dependencies: + lodash._objecttypes "~2.4.1" + +lodash.keys@^3.0.0: + version "3.1.2" + resolved "https://registry.yarnpkg.com/lodash.keys/-/lodash.keys-3.1.2.tgz#4dbc0472b156be50a0b286855d1bd0b0c656098a" + dependencies: + lodash._getnative "^3.0.0" + lodash.isarguments "^3.0.0" + lodash.isarray "^3.0.0" + +lodash.keys@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash.keys/-/lodash.keys-2.4.1.tgz#48dea46df8ff7632b10d706b8acb26591e2b3727" + dependencies: + lodash._isnative "~2.4.1" + lodash._shimkeys "~2.4.1" + lodash.isobject "~2.4.1" + +lodash.memoize@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe" + +lodash.mergewith@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/lodash.mergewith/-/lodash.mergewith-4.6.0.tgz#150cf0a16791f5903b8891eab154609274bdea55" + +lodash.restparam@^3.0.0: + version "3.6.1" + resolved "https://registry.yarnpkg.com/lodash.restparam/-/lodash.restparam-3.6.1.tgz#936a4e309ef330a7645ed4145986c85ae5b20805" + +lodash.tail@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lodash.tail/-/lodash.tail-4.1.1.tgz#d2333a36d9e7717c8ad2f7cacafec7c32b444664" + +lodash.template@^2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash.template/-/lodash.template-2.4.1.tgz#9e611007edf629129a974ab3c48b817b3e1cf20d" + dependencies: + lodash._escapestringchar "~2.4.1" + lodash._reinterpolate "~2.4.1" + lodash.defaults "~2.4.1" + lodash.escape "~2.4.1" + lodash.keys "~2.4.1" + lodash.templatesettings "~2.4.1" + lodash.values "~2.4.1" + +lodash.template@^3.0.0: + version "3.6.2" + resolved "https://registry.yarnpkg.com/lodash.template/-/lodash.template-3.6.2.tgz#f8cdecc6169a255be9098ae8b0c53d378931d14f" + dependencies: + lodash._basecopy "^3.0.0" + lodash._basetostring "^3.0.0" + lodash._basevalues "^3.0.0" + lodash._isiterateecall "^3.0.0" + lodash._reinterpolate "^3.0.0" + lodash.escape "^3.0.0" + lodash.keys "^3.0.0" + lodash.restparam "^3.0.0" + lodash.templatesettings "^3.0.0" + +lodash.template@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.template/-/lodash.template-4.4.0.tgz#e73a0385c8355591746e020b99679c690e68fba0" + dependencies: + lodash._reinterpolate "~3.0.0" + lodash.templatesettings "^4.0.0" + +lodash.templatesettings@^3.0.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/lodash.templatesettings/-/lodash.templatesettings-3.1.1.tgz#fb307844753b66b9f1afa54e262c745307dba8e5" + dependencies: + lodash._reinterpolate "^3.0.0" + lodash.escape "^3.0.0" + +lodash.templatesettings@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/lodash.templatesettings/-/lodash.templatesettings-4.1.0.tgz#2b4d4e95ba440d915ff08bc899e4553666713316" + dependencies: + lodash._reinterpolate "~3.0.0" + +lodash.templatesettings@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash.templatesettings/-/lodash.templatesettings-2.4.1.tgz#ea76c75d11eb86d4dbe89a83893bb861929ac699" + dependencies: + lodash._reinterpolate "~2.4.1" + lodash.escape "~2.4.1" + +lodash.uniq@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" + +lodash.values@~2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/lodash.values/-/lodash.values-2.4.1.tgz#abf514436b3cb705001627978cbcf30b1280eea4" + dependencies: + lodash.keys "~2.4.1" + +lodash@^3.8.0: + version "3.10.1" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-3.10.1.tgz#5bf45e8e49ba4189e17d482789dfd15bd140b7b6" + +lodash@^4.0.0, lodash@^4.11.1, lodash@^4.14.0, lodash@^4.17.2, lodash@^4.17.3, lodash@^4.17.4, lodash@^4.3.0, lodash@^4.5.0, lodash@~4.17.4: + version "4.17.4" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.4.tgz#78203a4d1c328ae1d86dca6460e369b57f4055ae" + +lodash@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-1.0.2.tgz#8f57560c83b59fc270bd3d561b690043430e2551" + +log4js@^0.6.31: + version "0.6.38" + resolved "https://registry.yarnpkg.com/log4js/-/log4js-0.6.38.tgz#2c494116695d6fb25480943d3fc872e662a522fd" + dependencies: + readable-stream "~1.0.2" + semver "~4.3.3" + +loglevel@^1.4.1: + version "1.6.0" + resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.6.0.tgz#ae0caa561111498c5ba13723d6fb631d24003934" + +longest@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/longest/-/longest-1.0.1.tgz#30a0b2da38f73770e8294a0d22e6625ed77d0097" + +loose-envify@^1.0.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.3.1.tgz#d1a8ad33fa9ce0e713d65fdd0ac8b748d478c848" + dependencies: + js-tokens "^3.0.0" + +loud-rejection@^1.0.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/loud-rejection/-/loud-rejection-1.6.0.tgz#5b46f80147edee578870f086d04821cf998e551f" + dependencies: + currently-unhandled "^0.4.1" + signal-exit "^3.0.0" + +lower-case@^1.1.1: + version "1.1.4" + resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-1.1.4.tgz#9a2cabd1b9e8e0ae993a4bf7d5875c39c42e8eac" + +lru-cache@2: + version "2.7.3" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952" + +lru-cache@2.2.x: + version "2.2.4" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.2.4.tgz#6c658619becf14031d0d0b594b16042ce4dc063d" + +lru-cache@^4.0.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.1.1.tgz#622e32e82488b49279114a4f9ecf45e7cd6bba55" + dependencies: + pseudomap "^1.0.2" + yallist "^2.1.2" + +macaddress@^0.2.8: + version "0.2.8" + resolved "https://registry.yarnpkg.com/macaddress/-/macaddress-0.2.8.tgz#5904dc537c39ec6dbefeae902327135fa8511f12" + +magic-string@^0.22.3: + version "0.22.4" + resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.22.4.tgz#31039b4e40366395618c1d6cf8193c53917475ff" + dependencies: + vlq "^0.2.1" + +make-error@^1.1.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.0.tgz#52ad3a339ccf10ce62b4040b708fe707244b8b96" + +make-iterator@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/make-iterator/-/make-iterator-1.0.0.tgz#57bef5dc85d23923ba23767324d8e8f8f3d9694b" + dependencies: + kind-of "^3.1.0" + +map-cache@^0.2.0, map-cache@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/map-cache/-/map-cache-0.2.2.tgz#c32abd0bd6525d9b051645bb4f26ac5dc98a0dbf" + +map-obj@^1.0.0, map-obj@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-1.0.1.tgz#d933ceb9205d82bdcf4886f6742bdc2b4dea146d" + +map-visit@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/map-visit/-/map-visit-1.0.0.tgz#ecdca8f13144e660f1b5bd41f12f3479d98dfb8f" + dependencies: + object-visit "^1.0.0" + +math-expression-evaluator@^1.2.14: + version "1.2.17" + resolved "https://registry.yarnpkg.com/math-expression-evaluator/-/math-expression-evaluator-1.2.17.tgz#de819fdbcd84dccd8fae59c6aeb79615b9d266ac" + +md5.js@^1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/md5.js/-/md5.js-1.3.4.tgz#e9bdbde94a20a5ac18b04340fc5764d5b09d901d" + dependencies: + hash-base "^3.0.0" + inherits "^2.0.1" + +md5@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/md5/-/md5-2.2.1.tgz#53ab38d5fe3c8891ba465329ea23fac0540126f9" + dependencies: + charenc "~0.0.1" + crypt "~0.0.1" + is-buffer "~1.1.1" + +media-typer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" + +mem@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/mem/-/mem-1.1.0.tgz#5edd52b485ca1d900fe64895505399a0dfa45f76" + dependencies: + mimic-fn "^1.0.0" + +memory-fs@^0.4.0, memory-fs@^0.4.1, memory-fs@~0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.4.1.tgz#3a9a20b8462523e447cfbc7e8bb80ed667bfc552" + dependencies: + errno "^0.1.3" + readable-stream "^2.0.1" + +meow@^3.3.0, meow@^3.7.0: + version "3.7.0" + resolved "https://registry.yarnpkg.com/meow/-/meow-3.7.0.tgz#72cb668b425228290abbfa856892587308a801fb" + dependencies: + camelcase-keys "^2.0.0" + decamelize "^1.1.2" + loud-rejection "^1.0.0" + map-obj "^1.0.1" + minimist "^1.1.3" + normalize-package-data "^2.3.4" + object-assign "^4.0.1" + read-pkg-up "^1.0.1" + redent "^1.0.0" + trim-newlines "^1.0.0" + +merge-descriptors@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" + +methods@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" + +micromatch@^2.1.5, micromatch@^2.3.11: + version "2.3.11" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565" + dependencies: + arr-diff "^2.0.0" + array-unique "^0.2.1" + braces "^1.8.2" + expand-brackets "^0.1.4" + extglob "^0.3.1" + filename-regex "^2.0.0" + is-extglob "^1.0.0" + is-glob "^2.0.1" + kind-of "^3.0.2" + normalize-path "^2.0.1" + object.omit "^2.0.0" + parse-glob "^3.0.4" + regex-cache "^0.4.2" + +micromatch@^3.0.4: + version "3.1.4" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-3.1.4.tgz#bb812e741a41f982c854e42b421a7eac458796f4" + dependencies: + arr-diff "^4.0.0" + array-unique "^0.3.2" + braces "^2.3.0" + define-property "^1.0.0" + extend-shallow "^2.0.1" + extglob "^2.0.2" + fragment-cache "^0.2.1" + kind-of "^6.0.0" + nanomatch "^1.2.5" + object.pick "^1.3.0" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +miller-rabin@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/miller-rabin/-/miller-rabin-4.0.1.tgz#f080351c865b0dc562a8462966daa53543c78a4d" + dependencies: + bn.js "^4.0.0" + brorand "^1.0.1" + +"mime-db@>= 1.30.0 < 2": + version "1.32.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.32.0.tgz#485b3848b01a3cda5f968b4882c0771e58e09414" + +mime-db@~1.30.0: + version "1.30.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.30.0.tgz#74c643da2dd9d6a45399963465b26d5ca7d71f01" + +mime-types@^2.1.12, mime-types@~2.1.11, mime-types@~2.1.15, mime-types@~2.1.16, mime-types@~2.1.17, mime-types@~2.1.7: + version "2.1.17" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.17.tgz#09d7a393f03e995a79f8af857b70a9e0ab16557a" + dependencies: + mime-db "~1.30.0" + +mime@1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.4.1.tgz#121f9ebc49e3766f311a76e1fa1c8003c4b03aa6" + +mime@^1.2.11, mime@^1.3.4, mime@^1.4.1, mime@^1.5.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" + +mimic-fn@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.1.0.tgz#e667783d92e89dbd342818b5230b9d62a672ad18" + +minimalistic-assert@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.0.tgz#702be2dda6b37f4836bcb3f5db56641b64a1d3d3" + +minimalistic-crypto-utils@^1.0.0, minimalistic-crypto-utils@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a" + +"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2, minimatch@^3.0.3, minimatch@^3.0.4, minimatch@~3.0.2: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + dependencies: + brace-expansion "^1.1.7" + +minimatch@^2.0.1: + version "2.0.10" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-2.0.10.tgz#8d087c39c6b38c001b97fca7ce6d0e1e80afbac7" + dependencies: + brace-expansion "^1.0.0" + +minimatch@~0.2.11: + version "0.2.14" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-0.2.14.tgz#c74e780574f63c6f9a090e90efbe6ef53a6a756a" + dependencies: + lru-cache "2" + sigmund "~1.0.0" + +minimist@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" + +minimist@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.2.0.tgz#4dffe525dae2b864c66c2e23c6271d7afdecefce" + +minimist@^1.1.0, minimist@^1.1.3, minimist@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284" + +minimist@~0.0.1: + version "0.0.10" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.10.tgz#de3f98543dbf96082be48ad1a0c7cda836301dcf" + +mixin-deep@^1.2.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/mixin-deep/-/mixin-deep-1.3.0.tgz#47a8732ba97799457c8c1eca28f95132d7e8150a" + dependencies: + for-in "^1.0.2" + is-extendable "^1.0.1" + +mixin-object@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/mixin-object/-/mixin-object-2.0.1.tgz#4fb949441dab182540f1fe035ba60e1947a5e57e" + dependencies: + for-in "^0.1.3" + is-extendable "^0.1.1" + +mkdirp@0.5.x, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" + dependencies: + minimist "0.0.8" + +ms@0.7.1: + version "0.7.1" + resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.1.tgz#9cd13c03adbff25b65effde7ce864ee952017098" + +ms@0.7.2: + version "0.7.2" + resolved "https://registry.yarnpkg.com/ms/-/ms-0.7.2.tgz#ae25cf2512b3885a1d95d7f037868d8431124765" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + +multicast-dns-service-types@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz#899f11d9686e5e05cb91b35d5f0e63b773cfc901" + +multicast-dns@^6.0.1: + version "6.2.1" + resolved "https://registry.yarnpkg.com/multicast-dns/-/multicast-dns-6.2.1.tgz#c5035defa9219d30640558a49298067352098060" + dependencies: + dns-packet "^1.0.1" + thunky "^0.1.0" + +multipipe@^0.1.0, multipipe@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/multipipe/-/multipipe-0.1.2.tgz#2a8f2ddf70eed564dff2d57f1e1a137d9f05078b" + dependencies: + duplexer2 "0.0.2" + +nan@^2.3.0, nan@^2.3.2: + version "2.8.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.8.0.tgz#ed715f3fe9de02b57a5e6252d90a96675e1f085a" + +nanomatch@^1.2.5: + version "1.2.6" + resolved "https://registry.yarnpkg.com/nanomatch/-/nanomatch-1.2.6.tgz#f27233e97c34a8706b7e781a4bc611c957a81625" + dependencies: + arr-diff "^4.0.0" + array-unique "^0.3.2" + define-property "^1.0.0" + extend-shallow "^2.0.1" + fragment-cache "^0.2.1" + is-odd "^1.0.0" + kind-of "^5.0.2" + object.pick "^1.3.0" + regex-not "^1.0.0" + snapdragon "^0.8.1" + to-regex "^3.0.1" + +natives@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/natives/-/natives-1.1.1.tgz#011acce1f7cbd87f7ba6b3093d6cd9392be1c574" + +ncname@1.0.x: + version "1.0.0" + resolved "https://registry.yarnpkg.com/ncname/-/ncname-1.0.0.tgz#5b57ad18b1ca092864ef62b0b1ed8194f383b71c" + dependencies: + xml-char-classes "^1.0.0" + +negotiator@0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9" + +ngx-clipboard@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/ngx-clipboard/-/ngx-clipboard-9.0.0.tgz#a56e4a3d0488a491898ee5209980b81ddad8b659" + dependencies: + ngx-window-token "0.0.4" + +ngx-cookie-service@^1.0.9: + version "1.0.9" + resolved "https://registry.yarnpkg.com/ngx-cookie-service/-/ngx-cookie-service-1.0.9.tgz#b88ef198299c93d363608545036dd38ed767b0ec" + +ngx-window-token@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/ngx-window-token/-/ngx-window-token-0.0.4.tgz#47e7aaa465411c4ab5f7ba17601bc593c956c736" + +no-case@^2.2.0: + version "2.3.2" + resolved "https://registry.yarnpkg.com/no-case/-/no-case-2.3.2.tgz#60b813396be39b3f1288a4c1ed5d1e7d28b464ac" + dependencies: + lower-case "^1.1.1" + +node-forge@0.6.33: + version "0.6.33" + resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.6.33.tgz#463811879f573d45155ad6a9f43dc296e8e85ebc" + +node-gyp@^3.3.1: + version "3.6.2" + resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-3.6.2.tgz#9bfbe54562286284838e750eac05295853fa1c60" + dependencies: + fstream "^1.0.0" + glob "^7.0.3" + graceful-fs "^4.1.2" + minimatch "^3.0.2" + mkdirp "^0.5.0" + nopt "2 || 3" + npmlog "0 || 1 || 2 || 3 || 4" + osenv "0" + request "2" + rimraf "2" + semver "~5.3.0" + tar "^2.0.0" + which "1" + +node-libs-browser@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/node-libs-browser/-/node-libs-browser-2.1.0.tgz#5f94263d404f6e44767d726901fff05478d600df" + dependencies: + assert "^1.1.1" + browserify-zlib "^0.2.0" + buffer "^4.3.0" + console-browserify "^1.1.0" + constants-browserify "^1.0.0" + crypto-browserify "^3.11.0" + domain-browser "^1.1.1" + events "^1.0.0" + https-browserify "^1.0.0" + os-browserify "^0.3.0" + path-browserify "0.0.0" + process "^0.11.10" + punycode "^1.2.4" + querystring-es3 "^0.2.0" + readable-stream "^2.3.3" + stream-browserify "^2.0.1" + stream-http "^2.7.2" + string_decoder "^1.0.0" + timers-browserify "^2.0.4" + tty-browserify "0.0.0" + url "^0.11.0" + util "^0.10.3" + vm-browserify "0.0.4" + +node-modules-path@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/node-modules-path/-/node-modules-path-1.0.1.tgz#40096b08ce7ad0ea14680863af449c7c75a5d1c8" + +node-pre-gyp@^0.6.39: + version "0.6.39" + resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.6.39.tgz#c00e96860b23c0e1420ac7befc5044e1d78d8649" + dependencies: + detect-libc "^1.0.2" + hawk "3.1.3" + mkdirp "^0.5.1" + nopt "^4.0.1" + npmlog "^4.0.2" + rc "^1.1.7" + request "2.81.0" + rimraf "^2.6.1" + semver "^5.3.0" + tar "^2.2.1" + tar-pack "^3.4.0" + +node-sass@^4.3.0: + version "4.7.2" + resolved "https://registry.yarnpkg.com/node-sass/-/node-sass-4.7.2.tgz#9366778ba1469eb01438a9e8592f4262bcb6794e" + dependencies: + async-foreach "^0.1.3" + chalk "^1.1.1" + cross-spawn "^3.0.0" + gaze "^1.0.0" + get-stdin "^4.0.1" + glob "^7.0.3" + in-publish "^2.0.0" + lodash.assign "^4.2.0" + lodash.clonedeep "^4.3.2" + lodash.mergewith "^4.6.0" + meow "^3.7.0" + mkdirp "^0.5.1" + nan "^2.3.2" + node-gyp "^3.3.1" + npmlog "^4.0.0" + request "~2.79.0" + sass-graph "^2.2.4" + stdout-stream "^1.4.0" + "true-case-path" "^1.0.2" + +"nopt@2 || 3": + version "3.0.6" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-3.0.6.tgz#c6465dbf08abcd4db359317f79ac68a646b28ff9" + dependencies: + abbrev "1" + +nopt@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.1.tgz#d0d4685afd5415193c8c7505602d0d17cd64474d" + dependencies: + abbrev "1" + osenv "^0.1.4" + +normalize-package-data@^2.3.2, normalize-package-data@^2.3.4: + version "2.4.0" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.4.0.tgz#12f95a307d58352075a04907b84ac8be98ac012f" + dependencies: + hosted-git-info "^2.1.4" + is-builtin-module "^1.0.0" + semver "2 || 3 || 4 || 5" + validate-npm-package-license "^3.0.1" + +normalize-path@^2.0.0, normalize-path@^2.0.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9" + dependencies: + remove-trailing-separator "^1.0.1" + +normalize-range@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" + +normalize-url@^1.4.0: + version "1.9.1" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-1.9.1.tgz#2cc0d66b31ea23036458436e3620d85954c66c3c" + dependencies: + object-assign "^4.0.1" + prepend-http "^1.0.0" + query-string "^4.1.0" + sort-keys "^1.0.0" + +npm-run-path@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f" + dependencies: + path-key "^2.0.0" + +"npmlog@0 || 1 || 2 || 3 || 4", npmlog@^4.0.0, npmlog@^4.0.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b" + dependencies: + are-we-there-yet "~1.1.2" + console-control-strings "~1.1.0" + gauge "~2.7.3" + set-blocking "~2.0.0" + +nth-check@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-1.0.1.tgz#9929acdf628fc2c41098deab82ac580cf149aae4" + dependencies: + boolbase "~1.0.0" + +null-check@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/null-check/-/null-check-1.0.0.tgz#977dffd7176012b9ec30d2a39db5cf72a0439edd" + +num2fraction@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/num2fraction/-/num2fraction-1.2.2.tgz#6f682b6a027a4e9ddfa4564cd2589d1d4e669ede" + +number-is-nan@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" + +oauth-sign@~0.8.1, oauth-sign@~0.8.2: + version "0.8.2" + resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.8.2.tgz#46a6ab7f0aead8deae9ec0565780b7d4efeb9d43" + +object-assign@4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0" + +object-assign@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-3.0.0.tgz#9bedd5ca0897949bca47e7ff408062d549f587f2" + +object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + +object-component@0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/object-component/-/object-component-0.0.3.tgz#f0c69aa50efc95b866c186f400a33769cb2f1291" + +object-copy@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/object-copy/-/object-copy-0.1.0.tgz#7e7d858b781bd7c991a41ba975ed3812754e998c" + dependencies: + copy-descriptor "^0.1.0" + define-property "^0.2.5" + kind-of "^3.0.3" + +object-keys@~0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-0.4.0.tgz#28a6aae7428dd2c3a92f3d95f21335dd204e0336" + +object-visit@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/object-visit/-/object-visit-1.0.1.tgz#f79c4493af0c5377b59fe39d395e41042dd045bb" + dependencies: + isobject "^3.0.0" + +object.defaults@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/object.defaults/-/object.defaults-1.1.0.tgz#3a7f868334b407dea06da16d88d5cd29e435fecf" + dependencies: + array-each "^1.0.1" + array-slice "^1.0.0" + for-own "^1.0.0" + isobject "^3.0.0" + +object.map@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/object.map/-/object.map-1.0.0.tgz#92aef871cd6dcbced31fe29c0921db8395624597" + dependencies: + for-own "^0.1.4" + make-iterator "^1.0.0" + +object.omit@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa" + dependencies: + for-own "^0.1.4" + is-extendable "^0.1.1" + +object.pick@^1.2.0, object.pick@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/object.pick/-/object.pick-1.3.0.tgz#87a10ac4c1694bd2e1cbf53591a66141fb5dd747" + dependencies: + isobject "^3.0.1" + +obuf@^1.0.0, obuf@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/obuf/-/obuf-1.1.1.tgz#104124b6c602c6796881a042541d36db43a5264e" + +on-finished@~2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947" + dependencies: + ee-first "1.1.1" + +on-headers@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.1.tgz#928f5d0f470d49342651ea6794b0857c100693f7" + +once@^1.3.0, once@^1.3.3, once@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + dependencies: + wrappy "1" + +once@~1.3.0: + version "1.3.3" + resolved "https://registry.yarnpkg.com/once/-/once-1.3.3.tgz#b2e261557ce4c314ec8304f3fa82663e4297ca20" + dependencies: + wrappy "1" + +opn@4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/opn/-/opn-4.0.2.tgz#7abc22e644dff63b0a96d5ab7f2790c0f01abc95" + dependencies: + object-assign "^4.0.1" + pinkie-promise "^2.0.0" + +opn@~5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/opn/-/opn-5.1.0.tgz#72ce2306a17dbea58ff1041853352b4a8fc77519" + dependencies: + is-wsl "^1.1.0" + +optimist@^0.6.1, optimist@~0.6.0: + version "0.6.1" + resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686" + dependencies: + minimist "~0.0.1" + wordwrap "~0.0.2" + +optimist@~0.3, optimist@~0.3.5: + version "0.3.7" + resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.3.7.tgz#c90941ad59e4273328923074d2cf2e7cbc6ec0d9" + dependencies: + wordwrap "~0.0.2" + +options@>=0.0.5: + version "0.0.6" + resolved "https://registry.yarnpkg.com/options/-/options-0.0.6.tgz#ec22d312806bb53e731773e7cdaefcf1c643128f" + +orchestrator@^0.3.0: + version "0.3.8" + resolved "https://registry.yarnpkg.com/orchestrator/-/orchestrator-0.3.8.tgz#14e7e9e2764f7315fbac184e506c7aa6df94ad7e" + dependencies: + end-of-stream "~0.1.5" + sequencify "~0.0.7" + stream-consume "~0.1.0" + +ordered-read-streams@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/ordered-read-streams/-/ordered-read-streams-0.1.0.tgz#fd565a9af8eb4473ba69b6ed8a34352cb552f126" + +original@>=0.0.5: + version "1.0.0" + resolved "https://registry.yarnpkg.com/original/-/original-1.0.0.tgz#9147f93fa1696d04be61e01bd50baeaca656bd3b" + dependencies: + url-parse "1.0.x" + +os-browserify@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/os-browserify/-/os-browserify-0.3.0.tgz#854373c7f5c2315914fc9bfc6bd8238fdda1ec27" + +os-homedir@^1.0.0, os-homedir@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" + +os-locale@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-1.4.0.tgz#20f9f17ae29ed345e8bde583b13d2009803c14d9" + dependencies: + lcid "^1.0.0" + +os-locale@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-2.1.0.tgz#42bc2900a6b5b8bd17376c8e882b65afccf24bf2" + dependencies: + execa "^0.7.0" + lcid "^1.0.0" + mem "^1.1.0" + +os-tmpdir@^1.0.0, os-tmpdir@~1.0.1, os-tmpdir@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + +osenv@0, osenv@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.4.tgz#42fe6d5953df06c8064be6f176c3d05aaaa34644" + dependencies: + os-homedir "^1.0.0" + os-tmpdir "^1.0.0" + +p-finally@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" + +p-limit@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.1.0.tgz#b07ff2d9a5d88bec806035895a2bab66a27988bc" + +p-locate@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" + dependencies: + p-limit "^1.1.0" + +p-map@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/p-map/-/p-map-1.2.0.tgz#e4e94f311eabbc8633a1e79908165fca26241b6b" + +pako@~1.0.5: + version "1.0.6" + resolved "https://registry.yarnpkg.com/pako/-/pako-1.0.6.tgz#0101211baa70c4bca4a0f63f2206e97b7dfaf258" + +param-case@2.1.x: + version "2.1.1" + resolved "https://registry.yarnpkg.com/param-case/-/param-case-2.1.1.tgz#df94fd8cf6531ecf75e6bef9a0858fbc72be2247" + dependencies: + no-case "^2.2.0" + +parse-asn1@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/parse-asn1/-/parse-asn1-5.1.0.tgz#37c4f9b7ed3ab65c74817b5f2480937fbf97c712" + dependencies: + asn1.js "^4.0.0" + browserify-aes "^1.0.0" + create-hash "^1.1.0" + evp_bytestokey "^1.0.0" + pbkdf2 "^3.0.3" + +parse-filepath@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parse-filepath/-/parse-filepath-1.0.1.tgz#159d6155d43904d16c10ef698911da1e91969b73" + dependencies: + is-absolute "^0.2.3" + map-cache "^0.2.0" + path-root "^0.1.1" + +parse-glob@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c" + dependencies: + glob-base "^0.3.0" + is-dotfile "^1.0.0" + is-extglob "^1.0.0" + is-glob "^2.0.0" + +parse-json@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9" + dependencies: + error-ex "^1.2.0" + +parse-passwd@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/parse-passwd/-/parse-passwd-1.0.0.tgz#6d5b934a456993b23d37f40a382d6f1666a8e5c6" + +parsejson@0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/parsejson/-/parsejson-0.0.3.tgz#ab7e3759f209ece99437973f7d0f1f64ae0e64ab" + dependencies: + better-assert "~1.0.0" + +parseqs@0.0.5: + version "0.0.5" + resolved "https://registry.yarnpkg.com/parseqs/-/parseqs-0.0.5.tgz#d5208a3738e46766e291ba2ea173684921a8b89d" + dependencies: + better-assert "~1.0.0" + +parseuri@0.0.5: + version "0.0.5" + resolved "https://registry.yarnpkg.com/parseuri/-/parseuri-0.0.5.tgz#80204a50d4dbb779bfdc6ebe2778d90e4bce320a" + dependencies: + better-assert "~1.0.0" + +parseurl@~1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.2.tgz#fc289d4ed8993119460c156253262cdc8de65bf3" + +pascalcase@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/pascalcase/-/pascalcase-0.1.1.tgz#b363e55e8006ca6fe21784d2db22bd15d7917f14" + +path-browserify@0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-0.0.0.tgz#a0b870729aae214005b7d5032ec2cbbb0fb4451a" + +path-exists@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b" + dependencies: + pinkie-promise "^2.0.0" + +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + +path-is-inside@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53" + +path-key@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40" + +path-parse@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.5.tgz#3c1adf871ea9cd6c9431b6ea2bd74a0ff055c4c1" + +path-root-regex@^0.1.0: + version "0.1.2" + resolved "https://registry.yarnpkg.com/path-root-regex/-/path-root-regex-0.1.2.tgz#bfccdc8df5b12dc52c8b43ec38d18d72c04ba96d" + +path-root@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/path-root/-/path-root-0.1.1.tgz#9a4a6814cac1c0cd73360a95f32083c8ea4745b7" + dependencies: + path-root-regex "^0.1.0" + +path-to-regexp@0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" + +path-type@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-1.1.0.tgz#59c44f7ee491da704da415da5a4070ba4f8fe441" + dependencies: + graceful-fs "^4.1.2" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +path-type@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-2.0.0.tgz#f012ccb8415b7096fc2daa1054c3d72389594c73" + dependencies: + pify "^2.0.0" + +pbkdf2@^3.0.3: + version "3.0.14" + resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.0.14.tgz#a35e13c64799b06ce15320f459c230e68e73bade" + dependencies: + create-hash "^1.1.2" + create-hmac "^1.1.4" + ripemd160 "^2.0.1" + safe-buffer "^5.0.1" + sha.js "^2.4.8" + +performance-now@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-0.2.0.tgz#33ef30c5c77d4ea21c5a53869d91b56d8f2555e5" + +performance-now@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" + +pify@^2.0.0, pify@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + +pify@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" + +pinkie-promise@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" + dependencies: + pinkie "^2.0.0" + +pinkie@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" + +portfinder@^1.0.9, portfinder@~1.0.12: + version "1.0.13" + resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.13.tgz#bb32ecd87c27104ae6ee44b5a3ccbf0ebb1aede9" + dependencies: + async "^1.5.2" + debug "^2.2.0" + mkdirp "0.5.x" + +posix-character-classes@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/posix-character-classes/-/posix-character-classes-0.1.1.tgz#01eac0fe3b5af71a2a6c02feabb8c1fef7e00eab" + +postcss-calc@^5.2.0: + version "5.3.1" + resolved "https://registry.yarnpkg.com/postcss-calc/-/postcss-calc-5.3.1.tgz#77bae7ca928ad85716e2fda42f261bf7c1d65b5e" + dependencies: + postcss "^5.0.2" + postcss-message-helpers "^2.0.0" + reduce-css-calc "^1.2.6" + +postcss-colormin@^2.1.8: + version "2.2.2" + resolved "https://registry.yarnpkg.com/postcss-colormin/-/postcss-colormin-2.2.2.tgz#6631417d5f0e909a3d7ec26b24c8a8d1e4f96e4b" + dependencies: + colormin "^1.0.5" + postcss "^5.0.13" + postcss-value-parser "^3.2.3" + +postcss-convert-values@^2.3.4: + version "2.6.1" + resolved "https://registry.yarnpkg.com/postcss-convert-values/-/postcss-convert-values-2.6.1.tgz#bbd8593c5c1fd2e3d1c322bb925dcae8dae4d62d" + dependencies: + postcss "^5.0.11" + postcss-value-parser "^3.1.2" + +postcss-discard-comments@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/postcss-discard-comments/-/postcss-discard-comments-2.0.4.tgz#befe89fafd5b3dace5ccce51b76b81514be00e3d" + dependencies: + postcss "^5.0.14" + +postcss-discard-duplicates@^2.0.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/postcss-discard-duplicates/-/postcss-discard-duplicates-2.1.0.tgz#b9abf27b88ac188158a5eb12abcae20263b91932" + dependencies: + postcss "^5.0.4" + +postcss-discard-empty@^2.0.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/postcss-discard-empty/-/postcss-discard-empty-2.1.0.tgz#d2b4bd9d5ced5ebd8dcade7640c7d7cd7f4f92b5" + dependencies: + postcss "^5.0.14" + +postcss-discard-overridden@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/postcss-discard-overridden/-/postcss-discard-overridden-0.1.1.tgz#8b1eaf554f686fb288cd874c55667b0aa3668d58" + dependencies: + postcss "^5.0.16" + +postcss-discard-unused@^2.2.1: + version "2.2.3" + resolved "https://registry.yarnpkg.com/postcss-discard-unused/-/postcss-discard-unused-2.2.3.tgz#bce30b2cc591ffc634322b5fb3464b6d934f4433" + dependencies: + postcss "^5.0.14" + uniqs "^2.0.0" + +postcss-filter-plugins@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/postcss-filter-plugins/-/postcss-filter-plugins-2.0.2.tgz#6d85862534d735ac420e4a85806e1f5d4286d84c" + dependencies: + postcss "^5.0.4" + uniqid "^4.0.0" + +postcss-load-config@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/postcss-load-config/-/postcss-load-config-1.2.0.tgz#539e9afc9ddc8620121ebf9d8c3673e0ce50d28a" + dependencies: + cosmiconfig "^2.1.0" + object-assign "^4.1.0" + postcss-load-options "^1.2.0" + postcss-load-plugins "^2.3.0" + +postcss-load-options@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/postcss-load-options/-/postcss-load-options-1.2.0.tgz#b098b1559ddac2df04bc0bb375f99a5cfe2b6d8c" + dependencies: + cosmiconfig "^2.1.0" + object-assign "^4.1.0" + +postcss-load-plugins@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/postcss-load-plugins/-/postcss-load-plugins-2.3.0.tgz#745768116599aca2f009fad426b00175049d8d92" + dependencies: + cosmiconfig "^2.1.1" + object-assign "^4.1.0" + +postcss-loader@^1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/postcss-loader/-/postcss-loader-1.3.3.tgz#a621ea1fa29062a83972a46f54486771301916eb" + dependencies: + loader-utils "^1.0.2" + object-assign "^4.1.1" + postcss "^5.2.15" + postcss-load-config "^1.2.0" + +postcss-merge-idents@^2.1.5: + version "2.1.7" + resolved "https://registry.yarnpkg.com/postcss-merge-idents/-/postcss-merge-idents-2.1.7.tgz#4c5530313c08e1d5b3bbf3d2bbc747e278eea270" + dependencies: + has "^1.0.1" + postcss "^5.0.10" + postcss-value-parser "^3.1.1" + +postcss-merge-longhand@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/postcss-merge-longhand/-/postcss-merge-longhand-2.0.2.tgz#23d90cd127b0a77994915332739034a1a4f3d658" + dependencies: + postcss "^5.0.4" + +postcss-merge-rules@^2.0.3: + version "2.1.2" + resolved "https://registry.yarnpkg.com/postcss-merge-rules/-/postcss-merge-rules-2.1.2.tgz#d1df5dfaa7b1acc3be553f0e9e10e87c61b5f721" + dependencies: + browserslist "^1.5.2" + caniuse-api "^1.5.2" + postcss "^5.0.4" + postcss-selector-parser "^2.2.2" + vendors "^1.0.0" + +postcss-message-helpers@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/postcss-message-helpers/-/postcss-message-helpers-2.0.0.tgz#a4f2f4fab6e4fe002f0aed000478cdf52f9ba60e" + +postcss-minify-font-values@^1.0.2: + version "1.0.5" + resolved "https://registry.yarnpkg.com/postcss-minify-font-values/-/postcss-minify-font-values-1.0.5.tgz#4b58edb56641eba7c8474ab3526cafd7bbdecb69" + dependencies: + object-assign "^4.0.1" + postcss "^5.0.4" + postcss-value-parser "^3.0.2" + +postcss-minify-gradients@^1.0.1: + version "1.0.5" + resolved "https://registry.yarnpkg.com/postcss-minify-gradients/-/postcss-minify-gradients-1.0.5.tgz#5dbda11373703f83cfb4a3ea3881d8d75ff5e6e1" + dependencies: + postcss "^5.0.12" + postcss-value-parser "^3.3.0" + +postcss-minify-params@^1.0.4: + version "1.2.2" + resolved "https://registry.yarnpkg.com/postcss-minify-params/-/postcss-minify-params-1.2.2.tgz#ad2ce071373b943b3d930a3fa59a358c28d6f1f3" + dependencies: + alphanum-sort "^1.0.1" + postcss "^5.0.2" + postcss-value-parser "^3.0.2" + uniqs "^2.0.0" + +postcss-minify-selectors@^2.0.4: + version "2.1.1" + resolved "https://registry.yarnpkg.com/postcss-minify-selectors/-/postcss-minify-selectors-2.1.1.tgz#b2c6a98c0072cf91b932d1a496508114311735bf" + dependencies: + alphanum-sort "^1.0.2" + has "^1.0.1" + postcss "^5.0.14" + postcss-selector-parser "^2.0.0" + +postcss-modules-extract-imports@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-1.1.0.tgz#b614c9720be6816eaee35fb3a5faa1dba6a05ddb" + dependencies: + postcss "^6.0.1" + +postcss-modules-local-by-default@^1.0.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-1.2.0.tgz#f7d80c398c5a393fa7964466bd19500a7d61c069" + dependencies: + css-selector-tokenizer "^0.7.0" + postcss "^6.0.1" + +postcss-modules-scope@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-1.1.0.tgz#d6ea64994c79f97b62a72b426fbe6056a194bb90" + dependencies: + css-selector-tokenizer "^0.7.0" + postcss "^6.0.1" + +postcss-modules-values@^1.1.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-1.3.0.tgz#ecffa9d7e192518389f42ad0e83f72aec456ea20" + dependencies: + icss-replace-symbols "^1.1.0" + postcss "^6.0.1" + +postcss-normalize-charset@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/postcss-normalize-charset/-/postcss-normalize-charset-1.1.1.tgz#ef9ee71212d7fe759c78ed162f61ed62b5cb93f1" + dependencies: + postcss "^5.0.5" + +postcss-normalize-url@^3.0.7: + version "3.0.8" + resolved "https://registry.yarnpkg.com/postcss-normalize-url/-/postcss-normalize-url-3.0.8.tgz#108f74b3f2fcdaf891a2ffa3ea4592279fc78222" + dependencies: + is-absolute-url "^2.0.0" + normalize-url "^1.4.0" + postcss "^5.0.14" + postcss-value-parser "^3.2.3" + +postcss-ordered-values@^2.1.0: + version "2.2.3" + resolved "https://registry.yarnpkg.com/postcss-ordered-values/-/postcss-ordered-values-2.2.3.tgz#eec6c2a67b6c412a8db2042e77fe8da43f95c11d" + dependencies: + postcss "^5.0.4" + postcss-value-parser "^3.0.1" + +postcss-reduce-idents@^2.2.2: + version "2.4.0" + resolved "https://registry.yarnpkg.com/postcss-reduce-idents/-/postcss-reduce-idents-2.4.0.tgz#c2c6d20cc958284f6abfbe63f7609bf409059ad3" + dependencies: + postcss "^5.0.4" + postcss-value-parser "^3.0.2" + +postcss-reduce-initial@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/postcss-reduce-initial/-/postcss-reduce-initial-1.0.1.tgz#68f80695f045d08263a879ad240df8dd64f644ea" + dependencies: + postcss "^5.0.4" + +postcss-reduce-transforms@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/postcss-reduce-transforms/-/postcss-reduce-transforms-1.0.4.tgz#ff76f4d8212437b31c298a42d2e1444025771ae1" + dependencies: + has "^1.0.1" + postcss "^5.0.8" + postcss-value-parser "^3.0.1" + +postcss-selector-parser@^2.0.0, postcss-selector-parser@^2.2.2: + version "2.2.3" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-2.2.3.tgz#f9437788606c3c9acee16ffe8d8b16297f27bb90" + dependencies: + flatten "^1.0.2" + indexes-of "^1.0.1" + uniq "^1.0.1" + +postcss-svgo@^2.1.1: + version "2.1.6" + resolved "https://registry.yarnpkg.com/postcss-svgo/-/postcss-svgo-2.1.6.tgz#b6df18aa613b666e133f08adb5219c2684ac108d" + dependencies: + is-svg "^2.0.0" + postcss "^5.0.14" + postcss-value-parser "^3.2.3" + svgo "^0.7.0" + +postcss-unique-selectors@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/postcss-unique-selectors/-/postcss-unique-selectors-2.0.2.tgz#981d57d29ddcb33e7b1dfe1fd43b8649f933ca1d" + dependencies: + alphanum-sort "^1.0.1" + postcss "^5.0.4" + uniqs "^2.0.0" + +postcss-url@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/postcss-url/-/postcss-url-5.1.2.tgz#98b3165be8d592471cb0caadde2c0d1f832f133e" + dependencies: + directory-encoder "^0.7.2" + js-base64 "^2.1.5" + mime "^1.2.11" + minimatch "^3.0.0" + mkdirp "^0.5.0" + path-is-absolute "^1.0.0" + postcss "^5.0.0" + +postcss-value-parser@^3.0.1, postcss-value-parser@^3.0.2, postcss-value-parser@^3.1.1, postcss-value-parser@^3.1.2, postcss-value-parser@^3.2.3, postcss-value-parser@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-3.3.0.tgz#87f38f9f18f774a4ab4c8a232f5c5ce8872a9d15" + +postcss-zindex@^2.0.1: + version "2.2.0" + resolved "https://registry.yarnpkg.com/postcss-zindex/-/postcss-zindex-2.2.0.tgz#d2109ddc055b91af67fc4cb3b025946639d2af22" + dependencies: + has "^1.0.1" + postcss "^5.0.4" + uniqs "^2.0.0" + +postcss@^5.0.0, postcss@^5.0.10, postcss@^5.0.11, postcss@^5.0.12, postcss@^5.0.13, postcss@^5.0.14, postcss@^5.0.16, postcss@^5.0.2, postcss@^5.0.4, postcss@^5.0.5, postcss@^5.0.6, postcss@^5.0.8, postcss@^5.2.15, postcss@^5.2.16: + version "5.2.18" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-5.2.18.tgz#badfa1497d46244f6390f58b319830d9107853c5" + dependencies: + chalk "^1.1.3" + js-base64 "^2.1.9" + source-map "^0.5.6" + supports-color "^3.2.3" + +postcss@^6.0.1: + version "6.0.14" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-6.0.14.tgz#5534c72114739e75d0afcf017db853099f562885" + dependencies: + chalk "^2.3.0" + source-map "^0.6.1" + supports-color "^4.4.0" + +prepend-http@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc" + +preserve@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b" + +pretty-error@^2.0.2: + version "2.1.1" + resolved "https://registry.yarnpkg.com/pretty-error/-/pretty-error-2.1.1.tgz#5f4f87c8f91e5ae3f3ba87ab4cf5e03b1a17f1a3" + dependencies: + renderkid "^2.0.1" + utila "~0.4" + +pretty-hrtime@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/pretty-hrtime/-/pretty-hrtime-1.0.3.tgz#b7e3ea42435a4c9b2759d99e0f201eb195802ee1" + +process-nextick-args@~1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3" + +process@^0.11.10: + version "0.11.10" + resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" + +promise@^7.1.1: + version "7.3.1" + resolved "https://registry.yarnpkg.com/promise/-/promise-7.3.1.tgz#064b72602b18f90f29192b8b1bc418ffd1ebd3bf" + dependencies: + asap "~2.0.3" + +protractor@~5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/protractor/-/protractor-5.1.2.tgz#9b221741709a4c62d5cd53c6aadd54a71137e95f" + dependencies: + "@types/node" "^6.0.46" + "@types/q" "^0.0.32" + "@types/selenium-webdriver" "~2.53.39" + blocking-proxy "0.0.5" + chalk "^1.1.3" + glob "^7.0.3" + jasmine "^2.5.3" + jasminewd2 "^2.1.0" + optimist "~0.6.0" + q "1.4.1" + saucelabs "~1.3.0" + selenium-webdriver "3.0.1" + source-map-support "~0.4.0" + webdriver-js-extender "^1.0.0" + webdriver-manager "^12.0.6" + +proxy-addr@~2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.2.tgz#6571504f47bb988ec8180253f85dd7e14952bdec" + dependencies: + forwarded "~0.1.2" + ipaddr.js "1.5.2" + +prr@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/prr/-/prr-1.0.1.tgz#d3fc114ba06995a45ec6893f484ceb1d78f5f476" + +pseudomap@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3" + +public-encrypt@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/public-encrypt/-/public-encrypt-4.0.0.tgz#39f699f3a46560dd5ebacbca693caf7c65c18cc6" + dependencies: + bn.js "^4.1.0" + browserify-rsa "^4.0.0" + create-hash "^1.1.0" + parse-asn1 "^5.0.0" + randombytes "^2.0.1" + +punycode@1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.3.2.tgz#9653a036fb7c1ee42342f2325cceefea3926c48d" + +punycode@^1.2.4, punycode@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" + +q@1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/q/-/q-1.4.1.tgz#55705bcd93c5f3673530c2c2cbc0c2b3addc286e" + +q@^1.1.2, q@^1.4.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/q/-/q-1.5.1.tgz#7e32f75b41381291d04611f1bf14109ac00651d7" + +qjobs@^1.1.4: + version "1.1.5" + resolved "https://registry.yarnpkg.com/qjobs/-/qjobs-1.1.5.tgz#659de9f2cf8dcc27a1481276f205377272382e73" + +qs@6.5.1, qs@~6.5.1: + version "6.5.1" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.1.tgz#349cdf6eef89ec45c12d7d5eb3fc0c870343a6d8" + +qs@~6.3.0: + version "6.3.2" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.3.2.tgz#e75bd5f6e268122a2a0e0bda630b2550c166502c" + +qs@~6.4.0: + version "6.4.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.4.0.tgz#13e26d28ad6b0ffaa91312cd3bf708ed351e7233" + +query-string@^4.1.0: + version "4.3.4" + resolved "https://registry.yarnpkg.com/query-string/-/query-string-4.3.4.tgz#bbb693b9ca915c232515b228b1a02b609043dbeb" + dependencies: + object-assign "^4.1.0" + strict-uri-encode "^1.0.0" + +querystring-es3@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/querystring-es3/-/querystring-es3-0.2.1.tgz#9ec61f79049875707d69414596fd907a4d711e73" + +querystring@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.0.tgz#b209849203bb25df820da756e747005878521620" + +querystringify@0.0.x: + version "0.0.4" + resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-0.0.4.tgz#0cf7f84f9463ff0ae51c4c4b142d95be37724d9c" + +querystringify@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-1.0.0.tgz#6286242112c5b712fa654e526652bf6a13ff05cb" + +randomatic@^1.1.3: + version "1.1.7" + resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-1.1.7.tgz#c7abe9cc8b87c0baa876b19fde83fd464797e38c" + dependencies: + is-number "^3.0.0" + kind-of "^4.0.0" + +randombytes@^2.0.0, randombytes@^2.0.1, randombytes@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.0.5.tgz#dc009a246b8d09a177b4b7a0ae77bc570f4b1b79" + dependencies: + safe-buffer "^5.1.0" + +randomfill@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/randomfill/-/randomfill-1.0.3.tgz#b96b7df587f01dd91726c418f30553b1418e3d62" + dependencies: + randombytes "^2.0.5" + safe-buffer "^5.1.0" + +range-parser@^1.0.3, range-parser@^1.2.0, range-parser@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e" + +raw-body@2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.3.2.tgz#bcd60c77d3eb93cde0050295c3f379389bc88f89" + dependencies: + bytes "3.0.0" + http-errors "1.6.2" + iconv-lite "0.4.19" + unpipe "1.0.0" + +raw-loader@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/raw-loader/-/raw-loader-0.5.1.tgz#0c3d0beaed8a01c966d9787bf778281252a979aa" + +rc@^1.1.7: + version "1.2.2" + resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.2.tgz#d8ce9cb57e8d64d9c7badd9876c7c34cbe3c7077" + dependencies: + deep-extend "~0.4.0" + ini "~1.3.0" + minimist "^1.2.0" + strip-json-comments "~2.0.1" + +read-pkg-up@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-1.0.1.tgz#9d63c13276c065918d57f002a57f40a1b643fb02" + dependencies: + find-up "^1.0.0" + read-pkg "^1.0.0" + +read-pkg-up@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-2.0.0.tgz#6b72a8048984e0c41e79510fd5e9fa99b3b549be" + dependencies: + find-up "^2.0.0" + read-pkg "^2.0.0" + +read-pkg@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-1.1.0.tgz#f5ffaa5ecd29cb31c0474bca7d756b6bb29e3f28" + dependencies: + load-json-file "^1.0.0" + normalize-package-data "^2.3.2" + path-type "^1.0.0" + +read-pkg@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-2.0.0.tgz#8ef1c0623c6a6db0dc6713c4bfac46332b2368f8" + dependencies: + load-json-file "^2.0.0" + normalize-package-data "^2.3.2" + path-type "^2.0.0" + +readable-stream@1.0, "readable-stream@>=1.0.33-1 <1.1.0-0", readable-stream@~1.0.17, readable-stream@~1.0.2: + version "1.0.34" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c" + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "0.0.1" + string_decoder "~0.10.x" + +readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.6, readable-stream@^2.1.4, readable-stream@^2.1.5, readable-stream@^2.2.6, readable-stream@^2.2.9, readable-stream@^2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.3.tgz#368f2512d79f9d46fdfc71349ae7878bbc1eb95c" + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~1.0.6" + safe-buffer "~5.1.1" + string_decoder "~1.0.3" + util-deprecate "~1.0.1" + +readable-stream@~1.1.9: + version "1.1.14" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.1.14.tgz#7cf4c54ef648e3813084c636dd2079e166c081d9" + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "0.0.1" + string_decoder "~0.10.x" + +readdirp@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.1.0.tgz#4ed0ad060df3073300c48440373f72d1cc642d78" + dependencies: + graceful-fs "^4.1.2" + minimatch "^3.0.2" + readable-stream "^2.0.2" + set-immediate-shim "^1.0.1" + +rechoir@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/rechoir/-/rechoir-0.6.2.tgz#85204b54dba82d5742e28c96756ef43af50e3384" + dependencies: + resolve "^1.1.6" + +redent@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/redent/-/redent-1.0.0.tgz#cf916ab1fd5f1f16dfb20822dd6ec7f730c2afde" + dependencies: + indent-string "^2.1.0" + strip-indent "^1.0.1" + +reduce-css-calc@^1.2.6: + version "1.3.0" + resolved "https://registry.yarnpkg.com/reduce-css-calc/-/reduce-css-calc-1.3.0.tgz#747c914e049614a4c9cfbba629871ad1d2927716" + dependencies: + balanced-match "^0.4.2" + math-expression-evaluator "^1.2.14" + reduce-function-call "^1.0.1" + +reduce-function-call@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/reduce-function-call/-/reduce-function-call-1.0.2.tgz#5a200bf92e0e37751752fe45b0ab330fd4b6be99" + dependencies: + balanced-match "^0.4.2" + +reflect-metadata@^0.1.2: + version "0.1.10" + resolved "https://registry.yarnpkg.com/reflect-metadata/-/reflect-metadata-0.1.10.tgz#b4f83704416acad89988c9b15635d47e03b9344a" + +regenerate@^1.2.1: + version "1.3.3" + resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.3.3.tgz#0c336d3980553d755c39b586ae3b20aa49c82b7f" + +regenerator-runtime@^0.11.0: + version "0.11.1" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz#be05ad7f9bf7d22e056f9726cee5017fbf19e2e9" + +regex-cache@^0.4.2: + version "0.4.4" + resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.4.tgz#75bdc58a2a1496cec48a12835bc54c8d562336dd" + dependencies: + is-equal-shallow "^0.1.3" + +regex-not@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/regex-not/-/regex-not-1.0.0.tgz#42f83e39771622df826b02af176525d6a5f157f9" + dependencies: + extend-shallow "^2.0.1" + +regexpu-core@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-1.0.0.tgz#86a763f58ee4d7c2f6b102e4764050de7ed90c6b" + dependencies: + regenerate "^1.2.1" + regjsgen "^0.2.0" + regjsparser "^0.1.4" + +regjsgen@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.2.0.tgz#6c016adeac554f75823fe37ac05b92d5a4edb1f7" + +regjsparser@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.1.5.tgz#7ee8f84dc6fa792d3fd0ae228d24bd949ead205c" + dependencies: + jsesc "~0.5.0" + +relateurl@0.2.x: + version "0.2.7" + resolved "https://registry.yarnpkg.com/relateurl/-/relateurl-0.2.7.tgz#54dbf377e51440aca90a4cd274600d3ff2d888a9" + +remove-trailing-separator@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz#c24bce2a283adad5bc3f58e0d48249b92379d8ef" + +renderkid@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/renderkid/-/renderkid-2.0.1.tgz#898cabfc8bede4b7b91135a3ffd323e58c0db319" + dependencies: + css-select "^1.1.0" + dom-converter "~0.1" + htmlparser2 "~3.3.0" + strip-ansi "^3.0.0" + utila "~0.3" + +repeat-element@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.2.tgz#ef089a178d1483baae4d93eb98b4f9e4e11d990a" + +repeat-string@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-0.2.2.tgz#c7a8d3236068362059a7e4651fc6884e8b1fb4ae" + +repeat-string@^1.5.2, repeat-string@^1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" + +repeating@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda" + dependencies: + is-finite "^1.0.0" + +replace-ext@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/replace-ext/-/replace-ext-0.0.1.tgz#29bbd92078a739f0bcce2b4ee41e837953522924" + +request@2, request@^2.78.0: + version "2.83.0" + resolved "https://registry.yarnpkg.com/request/-/request-2.83.0.tgz#ca0b65da02ed62935887808e6f510381034e3356" + dependencies: + aws-sign2 "~0.7.0" + aws4 "^1.6.0" + caseless "~0.12.0" + combined-stream "~1.0.5" + extend "~3.0.1" + forever-agent "~0.6.1" + form-data "~2.3.1" + har-validator "~5.0.3" + hawk "~6.0.2" + http-signature "~1.2.0" + is-typedarray "~1.0.0" + isstream "~0.1.2" + json-stringify-safe "~5.0.1" + mime-types "~2.1.17" + oauth-sign "~0.8.2" + performance-now "^2.1.0" + qs "~6.5.1" + safe-buffer "^5.1.1" + stringstream "~0.0.5" + tough-cookie "~2.3.3" + tunnel-agent "^0.6.0" + uuid "^3.1.0" + +request@2.81.0: + version "2.81.0" + resolved "https://registry.yarnpkg.com/request/-/request-2.81.0.tgz#c6928946a0e06c5f8d6f8a9333469ffda46298a0" + dependencies: + aws-sign2 "~0.6.0" + aws4 "^1.2.1" + caseless "~0.12.0" + combined-stream "~1.0.5" + extend "~3.0.0" + forever-agent "~0.6.1" + form-data "~2.1.1" + har-validator "~4.2.1" + hawk "~3.1.3" + http-signature "~1.1.0" + is-typedarray "~1.0.0" + isstream "~0.1.2" + json-stringify-safe "~5.0.1" + mime-types "~2.1.7" + oauth-sign "~0.8.1" + performance-now "^0.2.0" + qs "~6.4.0" + safe-buffer "^5.0.1" + stringstream "~0.0.4" + tough-cookie "~2.3.0" + tunnel-agent "^0.6.0" + uuid "^3.0.0" + +request@~2.79.0: + version "2.79.0" + resolved "https://registry.yarnpkg.com/request/-/request-2.79.0.tgz#4dfe5bf6be8b8cdc37fcf93e04b65577722710de" + dependencies: + aws-sign2 "~0.6.0" + aws4 "^1.2.1" + caseless "~0.11.0" + combined-stream "~1.0.5" + extend "~3.0.0" + forever-agent "~0.6.1" + form-data "~2.1.1" + har-validator "~2.0.6" + hawk "~3.1.3" + http-signature "~1.1.0" + is-typedarray "~1.0.0" + isstream "~0.1.2" + json-stringify-safe "~5.0.1" + mime-types "~2.1.7" + oauth-sign "~0.8.1" + qs "~6.3.0" + stringstream "~0.0.4" + tough-cookie "~2.3.0" + tunnel-agent "~0.4.1" + uuid "^3.0.0" + +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + +require-from-string@^1.1.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-1.2.1.tgz#529c9ccef27380adfec9a2f965b649bbee636418" + +require-main-filename@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1" + +requires-port@1.0.x, requires-port@1.x.x, requires-port@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" + +resolve-dir@^1.0.0, resolve-dir@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/resolve-dir/-/resolve-dir-1.0.1.tgz#79a40644c362be82f26effe739c9bb5382046f43" + dependencies: + expand-tilde "^2.0.0" + global-modules "^1.0.0" + +resolve-url@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" + +resolve@^1.1.6, resolve@^1.1.7, resolve@^1.3.2: + version "1.5.0" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.5.0.tgz#1f09acce796c9a762579f31b2c1cc4c3cddf9f36" + dependencies: + path-parse "^1.0.5" + +right-align@^0.1.1: + version "0.1.3" + resolved "https://registry.yarnpkg.com/right-align/-/right-align-0.1.3.tgz#61339b722fe6a3515689210d24e14c96148613ef" + dependencies: + align-text "^0.1.1" + +rimraf@2, rimraf@^2.2.8, rimraf@^2.5.1, rimraf@^2.5.2, rimraf@^2.5.4, rimraf@^2.6.0, rimraf@^2.6.1: + version "2.6.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.2.tgz#2ed8150d24a16ea8651e6d6ef0f47c4158ce7a36" + dependencies: + glob "^7.0.5" + +ripemd160@^2.0.0, ripemd160@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/ripemd160/-/ripemd160-2.0.1.tgz#0f4584295c53a3628af7e6d79aca21ce57d1c6e7" + dependencies: + hash-base "^2.0.0" + inherits "^2.0.1" + +run-sequence@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/run-sequence/-/run-sequence-2.2.0.tgz#b3f8d42836db89d08b2fe704eaf0c93dfd8335e2" + dependencies: + chalk "^1.1.3" + gulp-util "^3.0.8" + +rxjs@^5.4.2, rxjs@^5.5.2: + version "5.5.5" + resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-5.5.5.tgz#e164f11d38eaf29f56f08c3447f74ff02dd84e97" + dependencies: + symbol-observable "1.0.1" + +safe-buffer@5.1.1, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.1.tgz#893312af69b2123def71f57889001671eeb2c853" + +sass-graph@^2.2.4: + version "2.2.4" + resolved "https://registry.yarnpkg.com/sass-graph/-/sass-graph-2.2.4.tgz#13fbd63cd1caf0908b9fd93476ad43a51d1e0b49" + dependencies: + glob "^7.0.0" + lodash "^4.0.0" + scss-tokenizer "^0.2.3" + yargs "^7.0.0" + +sass-loader@^6.0.3: + version "6.0.6" + resolved "https://registry.yarnpkg.com/sass-loader/-/sass-loader-6.0.6.tgz#e9d5e6c1f155faa32a4b26d7a9b7107c225e40f9" + dependencies: + async "^2.1.5" + clone-deep "^0.3.0" + loader-utils "^1.0.1" + lodash.tail "^4.1.1" + pify "^3.0.0" + +saucelabs@~1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/saucelabs/-/saucelabs-1.3.0.tgz#d240e8009df7fa87306ec4578a69ba3b5c424fee" + dependencies: + https-proxy-agent "^1.0.0" + +sax@0.5.x: + version "0.5.8" + resolved "https://registry.yarnpkg.com/sax/-/sax-0.5.8.tgz#d472db228eb331c2506b0e8c15524adb939d12c1" + +sax@0.6.x: + version "0.6.1" + resolved "https://registry.yarnpkg.com/sax/-/sax-0.6.1.tgz#563b19c7c1de892e09bfc4f2fc30e3c27f0952b9" + +sax@>=0.6.0, sax@~1.2.1: + version "1.2.4" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" + +schema-utils@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-0.3.0.tgz#f5877222ce3e931edae039f17eb3716e7137f8cf" + dependencies: + ajv "^5.0.0" + +scss-tokenizer@^0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/scss-tokenizer/-/scss-tokenizer-0.2.3.tgz#8eb06db9a9723333824d3f5530641149847ce5d1" + dependencies: + js-base64 "^2.1.8" + source-map "^0.4.2" + +select-hose@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/select-hose/-/select-hose-2.0.0.tgz#625d8658f865af43ec962bfc376a37359a4994ca" + +selenium-webdriver@3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/selenium-webdriver/-/selenium-webdriver-3.0.1.tgz#a2dea5da4a97f6672e89e7ca7276cefa365147a7" + dependencies: + adm-zip "^0.4.7" + rimraf "^2.5.4" + tmp "0.0.30" + xml2js "^0.4.17" + +selenium-webdriver@^2.53.2: + version "2.53.3" + resolved "https://registry.yarnpkg.com/selenium-webdriver/-/selenium-webdriver-2.53.3.tgz#d29ff5a957dff1a1b49dc457756e4e4bfbdce085" + dependencies: + adm-zip "0.4.4" + rimraf "^2.2.8" + tmp "0.0.24" + ws "^1.0.1" + xml2js "0.4.4" + +selfsigned@^1.9.1: + version "1.10.1" + resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-1.10.1.tgz#bf8cb7b83256c4551e31347c6311778db99eec52" + dependencies: + node-forge "0.6.33" + +semver-dsl@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/semver-dsl/-/semver-dsl-1.0.1.tgz#d3678de5555e8a61f629eed025366ae5f27340a0" + dependencies: + semver "^5.3.0" + +"semver@2 || 3 || 4 || 5", semver@^5.1.0, semver@^5.3.0: + version "5.4.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.4.1.tgz#e059c09d8571f0540823733433505d3a2f00b18e" + +semver@^4.1.0, semver@~4.3.3: + version "4.3.6" + resolved "https://registry.yarnpkg.com/semver/-/semver-4.3.6.tgz#300bc6e0e86374f7ba61068b5b1ecd57fc6532da" + +semver@~5.0.1: + version "5.0.3" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.0.3.tgz#77466de589cd5d3c95f138aa78bc569a3cb5d27a" + +semver@~5.3.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.3.0.tgz#9b2ce5d3de02d17c6012ad326aa6b4d0cf54f94f" + +send@0.16.1: + version "0.16.1" + resolved "https://registry.yarnpkg.com/send/-/send-0.16.1.tgz#a70e1ca21d1382c11d0d9f6231deb281080d7ab3" + dependencies: + debug "2.6.9" + depd "~1.1.1" + destroy "~1.0.4" + encodeurl "~1.0.1" + escape-html "~1.0.3" + etag "~1.8.1" + fresh "0.5.2" + http-errors "~1.6.2" + mime "1.4.1" + ms "2.0.0" + on-finished "~2.3.0" + range-parser "~1.2.0" + statuses "~1.3.1" + +sequencify@~0.0.7: + version "0.0.7" + resolved "https://registry.yarnpkg.com/sequencify/-/sequencify-0.0.7.tgz#90cff19d02e07027fd767f5ead3e7b95d1e7380c" + +serve-index@^1.7.2: + version "1.9.1" + resolved "https://registry.yarnpkg.com/serve-index/-/serve-index-1.9.1.tgz#d3768d69b1e7d82e5ce050fff5b453bea12a9239" + dependencies: + accepts "~1.3.4" + batch "0.6.1" + debug "2.6.9" + escape-html "~1.0.3" + http-errors "~1.6.2" + mime-types "~2.1.17" + parseurl "~1.3.2" + +serve-static@1.13.1: + version "1.13.1" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.13.1.tgz#4c57d53404a761d8f2e7c1e8a18a47dbf278a719" + dependencies: + encodeurl "~1.0.1" + escape-html "~1.0.3" + parseurl "~1.3.2" + send "0.16.1" + +set-blocking@^2.0.0, set-blocking@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" + +set-getter@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/set-getter/-/set-getter-0.1.0.tgz#d769c182c9d5a51f409145f2fba82e5e86e80376" + dependencies: + to-object-path "^0.3.0" + +set-immediate-shim@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz#4b2b1b27eb808a9f8dcc481a58e5e56f599f3f61" + +set-value@^0.4.3: + version "0.4.3" + resolved "https://registry.yarnpkg.com/set-value/-/set-value-0.4.3.tgz#7db08f9d3d22dc7f78e53af3c3bf4666ecdfccf1" + dependencies: + extend-shallow "^2.0.1" + is-extendable "^0.1.1" + is-plain-object "^2.0.1" + to-object-path "^0.3.0" + +set-value@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-value/-/set-value-2.0.0.tgz#71ae4a88f0feefbbf52d1ea604f3fb315ebb6274" + dependencies: + extend-shallow "^2.0.1" + is-extendable "^0.1.1" + is-plain-object "^2.0.3" + split-string "^3.0.1" + +setimmediate@^1.0.4: + version "1.0.5" + resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" + +setprototypeof@1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.3.tgz#66567e37043eeb4f04d91bd658c0cbefb55b8e04" + +setprototypeof@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.0.tgz#d0bd85536887b6fe7c0d818cb962d9d91c54e656" + +sha.js@^2.4.0, sha.js@^2.4.8: + version "2.4.9" + resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.9.tgz#98f64880474b74f4a38b8da9d3c0f2d104633e7d" + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +shallow-clone@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/shallow-clone/-/shallow-clone-0.1.2.tgz#5909e874ba77106d73ac414cfec1ffca87d97060" + dependencies: + is-extendable "^0.1.1" + kind-of "^2.0.1" + lazy-cache "^0.2.3" + mixin-object "^2.0.1" + +shebang-command@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" + dependencies: + shebang-regex "^1.0.0" + +shebang-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" + +sigmund@~1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/sigmund/-/sigmund-1.0.1.tgz#3ff21f198cad2175f9f3b781853fd94d0d19b590" + +signal-exit@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" + +silent-error@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/silent-error/-/silent-error-1.1.0.tgz#2209706f1c850a9f1d10d0d840918b46f26e1bc9" + dependencies: + debug "^2.2.0" + +snapdragon-node@^2.0.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/snapdragon-node/-/snapdragon-node-2.1.1.tgz#6c175f86ff14bdb0724563e8f3c1b021a286853b" + dependencies: + define-property "^1.0.0" + isobject "^3.0.0" + snapdragon-util "^3.0.1" + +snapdragon-util@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/snapdragon-util/-/snapdragon-util-3.0.1.tgz#f956479486f2acd79700693f6f7b805e45ab56e2" + dependencies: + kind-of "^3.2.0" + +snapdragon@^0.8.1: + version "0.8.1" + resolved "https://registry.yarnpkg.com/snapdragon/-/snapdragon-0.8.1.tgz#e12b5487faded3e3dea0ac91e9400bf75b401370" + dependencies: + base "^0.11.1" + debug "^2.2.0" + define-property "^0.2.5" + extend-shallow "^2.0.1" + map-cache "^0.2.2" + source-map "^0.5.6" + source-map-resolve "^0.5.0" + use "^2.0.0" + +sntp@1.x.x: + version "1.0.9" + resolved "https://registry.yarnpkg.com/sntp/-/sntp-1.0.9.tgz#6541184cc90aeea6c6e7b35e2659082443c66198" + dependencies: + hoek "2.x.x" + +sntp@2.x.x: + version "2.1.0" + resolved "https://registry.yarnpkg.com/sntp/-/sntp-2.1.0.tgz#2c6cec14fedc2222739caf9b5c3d85d1cc5a2cc8" + dependencies: + hoek "4.x.x" + +socket.io-adapter@0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-0.5.0.tgz#cb6d4bb8bec81e1078b99677f9ced0046066bb8b" + dependencies: + debug "2.3.3" + socket.io-parser "2.3.1" + +socket.io-client@1.7.3: + version "1.7.3" + resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-1.7.3.tgz#b30e86aa10d5ef3546601c09cde4765e381da377" + dependencies: + backo2 "1.0.2" + component-bind "1.0.0" + component-emitter "1.2.1" + debug "2.3.3" + engine.io-client "1.8.3" + has-binary "0.1.7" + indexof "0.0.1" + object-component "0.0.3" + parseuri "0.0.5" + socket.io-parser "2.3.1" + to-array "0.1.4" + +socket.io-parser@2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-2.3.1.tgz#dd532025103ce429697326befd64005fcfe5b4a0" + dependencies: + component-emitter "1.1.2" + debug "2.2.0" + isarray "0.0.1" + json3 "3.3.2" + +socket.io@1.7.3: + version "1.7.3" + resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-1.7.3.tgz#b8af9caba00949e568e369f1327ea9be9ea2461b" + dependencies: + debug "2.3.3" + engine.io "1.8.3" + has-binary "0.1.7" + object-assign "4.1.0" + socket.io-adapter "0.5.0" + socket.io-client "1.7.3" + socket.io-parser "2.3.1" + +sockjs-client@1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/sockjs-client/-/sockjs-client-1.1.4.tgz#5babe386b775e4cf14e7520911452654016c8b12" + dependencies: + debug "^2.6.6" + eventsource "0.1.6" + faye-websocket "~0.11.0" + inherits "^2.0.1" + json3 "^3.3.2" + url-parse "^1.1.8" + +sockjs@0.3.18: + version "0.3.18" + resolved "https://registry.yarnpkg.com/sockjs/-/sockjs-0.3.18.tgz#d9b289316ca7df77595ef299e075f0f937eb4207" + dependencies: + faye-websocket "^0.10.0" + uuid "^2.0.2" + +sort-keys@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/sort-keys/-/sort-keys-1.1.2.tgz#441b6d4d346798f1b4e49e8920adfba0e543f9ad" + dependencies: + is-plain-obj "^1.0.0" + +source-list-map@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/source-list-map/-/source-list-map-2.0.0.tgz#aaa47403f7b245a92fbc97ea08f250d6087ed085" + +source-map-loader@^0.2.0: + version "0.2.3" + resolved "https://registry.yarnpkg.com/source-map-loader/-/source-map-loader-0.2.3.tgz#d4b0c8cd47d54edce3e6bfa0f523f452b5b0e521" + dependencies: + async "^2.5.0" + loader-utils "~0.2.2" + source-map "~0.6.1" + +source-map-resolve@^0.5.0: + version "0.5.1" + resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.5.1.tgz#7ad0f593f2281598e854df80f19aae4b92d7a11a" + dependencies: + atob "^2.0.0" + decode-uri-component "^0.2.0" + resolve-url "^0.2.1" + source-map-url "^0.4.0" + urix "^0.1.0" + +source-map-support@^0.4.0, source-map-support@^0.4.1, source-map-support@^0.4.2, source-map-support@~0.4.0: + version "0.4.18" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.18.tgz#0286a6de8be42641338594e97ccea75f0a2c585f" + dependencies: + source-map "^0.5.6" + +source-map-url@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.0.tgz#3e935d7ddd73631b97659956d55128e87b5084a3" + +source-map@0.1.x, source-map@~0.1.7: + version "0.1.43" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.1.43.tgz#c24bc146ca517c1471f5dacbe2571b2b7f9e3346" + dependencies: + amdefine ">=0.0.4" + +source-map@0.5.x, source-map@^0.5.3, source-map@^0.5.6, source-map@~0.5.1: + version "0.5.7" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + +source-map@^0.4.2, source-map@^0.4.4: + version "0.4.4" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.4.4.tgz#eba4f5da9c0dc999de68032d8b4f76173652036b" + dependencies: + amdefine ">=0.0.4" + +source-map@^0.6.1, source-map@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" + +sparkles@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/sparkles/-/sparkles-1.0.0.tgz#1acbbfb592436d10bbe8f785b7cc6f82815012c3" + +spdx-correct@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-1.0.2.tgz#4b3073d933ff51f3912f03ac5519498a4150db40" + dependencies: + spdx-license-ids "^1.0.2" + +spdx-expression-parse@~1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-1.0.4.tgz#9bdf2f20e1f40ed447fbe273266191fced51626c" + +spdx-license-ids@^1.0.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57" + +spdy-transport@^2.0.18: + version "2.0.20" + resolved "https://registry.yarnpkg.com/spdy-transport/-/spdy-transport-2.0.20.tgz#735e72054c486b2354fe89e702256004a39ace4d" + dependencies: + debug "^2.6.8" + detect-node "^2.0.3" + hpack.js "^2.1.6" + obuf "^1.1.1" + readable-stream "^2.2.9" + safe-buffer "^5.0.1" + wbuf "^1.7.2" + +spdy@^3.4.1: + version "3.4.7" + resolved "https://registry.yarnpkg.com/spdy/-/spdy-3.4.7.tgz#42ff41ece5cc0f99a3a6c28aabb73f5c3b03acbc" + dependencies: + debug "^2.6.8" + handle-thing "^1.2.5" + http-deceiver "^1.2.7" + safe-buffer "^5.0.1" + select-hose "^2.0.0" + spdy-transport "^2.0.18" + +split-string@^3.0.1, split-string@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/split-string/-/split-string-3.1.0.tgz#7cb09dda3a86585705c64b39a6466038682e8fe2" + dependencies: + extend-shallow "^3.0.0" + +sprintf-js@^1.0.3: + version "1.1.1" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.1.1.tgz#36be78320afe5801f6cea3ee78b6e5aab940ea0c" + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + +sshpk@^1.7.0: + version "1.13.1" + resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.13.1.tgz#512df6da6287144316dc4c18fe1cf1d940739be3" + dependencies: + asn1 "~0.2.3" + assert-plus "^1.0.0" + dashdash "^1.12.0" + getpass "^0.1.1" + optionalDependencies: + bcrypt-pbkdf "^1.0.0" + ecc-jsbn "~0.1.1" + jsbn "~0.1.0" + tweetnacl "~0.14.0" + +static-extend@^0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/static-extend/-/static-extend-0.1.2.tgz#60809c39cbff55337226fd5e0b520f341f1fb5c6" + dependencies: + define-property "^0.2.5" + object-copy "^0.1.0" + +"statuses@>= 1.3.1 < 2": + version "1.4.0" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.4.0.tgz#bb73d446da2796106efcc1b601a253d6c46bd087" + +statuses@~1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.3.1.tgz#faf51b9eb74aaef3b3acf4ad5f61abf24cb7b93e" + +stdout-stream@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/stdout-stream/-/stdout-stream-1.4.0.tgz#a2c7c8587e54d9427ea9edb3ac3f2cd522df378b" + dependencies: + readable-stream "^2.0.1" + +stream-browserify@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/stream-browserify/-/stream-browserify-2.0.1.tgz#66266ee5f9bdb9940a4e4514cafb43bb71e5c9db" + dependencies: + inherits "~2.0.1" + readable-stream "^2.0.2" + +stream-consume@~0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/stream-consume/-/stream-consume-0.1.0.tgz#a41ead1a6d6081ceb79f65b061901b6d8f3d1d0f" + +stream-http@^2.7.2: + version "2.7.2" + resolved "https://registry.yarnpkg.com/stream-http/-/stream-http-2.7.2.tgz#40a050ec8dc3b53b33d9909415c02c0bf1abfbad" + dependencies: + builtin-status-codes "^3.0.0" + inherits "^2.0.1" + readable-stream "^2.2.6" + to-arraybuffer "^1.0.0" + xtend "^4.0.0" + +strict-uri-encode@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" + +string-width@^1.0.1, string-width@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" + dependencies: + code-point-at "^1.0.0" + is-fullwidth-code-point "^1.0.0" + strip-ansi "^3.0.0" + +string-width@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^4.0.0" + +string_decoder@^1.0.0, string_decoder@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.0.3.tgz#0fc67d7c141825de94282dd536bec6b9bce860ab" + dependencies: + safe-buffer "~5.1.0" + +string_decoder@~0.10.x: + version "0.10.31" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94" + +stringstream@~0.0.4, stringstream@~0.0.5: + version "0.0.5" + resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878" + +strip-ansi@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220" + dependencies: + ansi-regex "^0.2.1" + +strip-ansi@^3.0.0, strip-ansi@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" + dependencies: + ansi-regex "^2.0.0" + +strip-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" + dependencies: + ansi-regex "^3.0.0" + +strip-bom@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-1.0.0.tgz#85b8862f3844b5a6d5ec8467a93598173a36f794" + dependencies: + first-chunk-stream "^1.0.0" + is-utf8 "^0.2.0" + +strip-bom@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e" + dependencies: + is-utf8 "^0.2.0" + +strip-bom@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3" + +strip-eof@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf" + +strip-indent@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-1.0.1.tgz#0c7962a6adefa7bbd4ac366460a638552ae1a0a2" + dependencies: + get-stdin "^4.0.1" + +strip-json-comments@^2.0.0, strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + +style-loader@^0.13.1: + version "0.13.2" + resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-0.13.2.tgz#74533384cf698c7104c7951150b49717adc2f3bb" + dependencies: + loader-utils "^1.0.2" + +stylus-loader@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/stylus-loader/-/stylus-loader-3.0.1.tgz#77f4b34fd030d25b2617bcf5513db5b0730c4089" + dependencies: + loader-utils "^1.0.2" + lodash.clonedeep "^4.5.0" + when "~3.6.x" + +stylus@^0.54.5: + version "0.54.5" + resolved "https://registry.yarnpkg.com/stylus/-/stylus-0.54.5.tgz#42b9560931ca7090ce8515a798ba9e6aa3d6dc79" + dependencies: + css-parse "1.7.x" + debug "*" + glob "7.0.x" + mkdirp "0.5.x" + sax "0.5.x" + source-map "0.1.x" + +supports-color@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a" + +supports-color@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" + +supports-color@^3.1.1, supports-color@^3.1.2, supports-color@^3.2.3: + version "3.2.3" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-3.2.3.tgz#65ac0504b3954171d8a64946b2ae3cbb8a5f54f6" + dependencies: + has-flag "^1.0.0" + +supports-color@^4.0.0, supports-color@^4.2.1, supports-color@^4.4.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-4.5.0.tgz#be7a0de484dec5c5cddf8b3d59125044912f635b" + dependencies: + has-flag "^2.0.0" + +svgo@^0.7.0: + version "0.7.2" + resolved "https://registry.yarnpkg.com/svgo/-/svgo-0.7.2.tgz#9f5772413952135c6fefbf40afe6a4faa88b4bb5" + dependencies: + coa "~1.0.1" + colors "~1.1.2" + csso "~2.3.1" + js-yaml "~3.7.0" + mkdirp "~0.5.1" + sax "~1.2.1" + whet.extend "~0.9.9" + +symbol-observable@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/symbol-observable/-/symbol-observable-1.0.1.tgz#8340fc4702c3122df5d22288f88283f513d3fdd4" + +tapable@^0.2.7: + version "0.2.8" + resolved "https://registry.yarnpkg.com/tapable/-/tapable-0.2.8.tgz#99372a5c999bf2df160afc0d74bed4f47948cd22" + +tar-pack@^3.4.0: + version "3.4.1" + resolved "https://registry.yarnpkg.com/tar-pack/-/tar-pack-3.4.1.tgz#e1dbc03a9b9d3ba07e896ad027317eb679a10a1f" + dependencies: + debug "^2.2.0" + fstream "^1.0.10" + fstream-ignore "^1.0.5" + once "^1.3.3" + readable-stream "^2.1.4" + rimraf "^2.5.1" + tar "^2.2.1" + uid-number "^0.0.6" + +tar@^2.0.0, tar@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/tar/-/tar-2.2.1.tgz#8e4d2a256c0e2185c6b18ad694aec968b83cb1d1" + dependencies: + block-stream "*" + fstream "^1.0.2" + inherits "2" + +through2@^0.4.2: + version "0.4.2" + resolved "https://registry.yarnpkg.com/through2/-/through2-0.4.2.tgz#dbf5866031151ec8352bb6c4db64a2292a840b9b" + dependencies: + readable-stream "~1.0.17" + xtend "~2.1.1" + +through2@^0.5.0: + version "0.5.1" + resolved "https://registry.yarnpkg.com/through2/-/through2-0.5.1.tgz#dfdd012eb9c700e2323fd334f38ac622ab372da7" + dependencies: + readable-stream "~1.0.17" + xtend "~3.0.0" + +through2@^0.6.1: + version "0.6.5" + resolved "https://registry.yarnpkg.com/through2/-/through2-0.6.5.tgz#41ab9c67b29d57209071410e1d7a7a968cd3ad48" + dependencies: + readable-stream ">=1.0.33-1 <1.1.0-0" + xtend ">=4.0.0 <4.1.0-0" + +through2@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.3.tgz#0004569b37c7c74ba39c43f3ced78d1ad94140be" + dependencies: + readable-stream "^2.1.5" + xtend "~4.0.1" + +through@X.X.X: + version "2.3.8" + resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" + +thunky@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/thunky/-/thunky-0.1.0.tgz#bf30146824e2b6e67b0f2d7a4ac8beb26908684e" + +tildify@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/tildify/-/tildify-1.2.0.tgz#dcec03f55dca9b7aa3e5b04f21817eb56e63588a" + dependencies: + os-homedir "^1.0.0" + +time-stamp@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/time-stamp/-/time-stamp-1.1.0.tgz#764a5a11af50561921b133f3b44e618687e0f5c3" + +time-stamp@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/time-stamp/-/time-stamp-2.0.0.tgz#95c6a44530e15ba8d6f4a3ecb8c3a3fac46da357" + +timers-browserify@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/timers-browserify/-/timers-browserify-2.0.4.tgz#96ca53f4b794a5e7c0e1bd7cc88a372298fa01e6" + dependencies: + setimmediate "^1.0.4" + +tmp@0.0.24: + version "0.0.24" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.24.tgz#d6a5e198d14a9835cc6f2d7c3d9e302428c8cf12" + +tmp@0.0.30: + version "0.0.30" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.30.tgz#72419d4a8be7d6ce75148fd8b324e593a711c2ed" + dependencies: + os-tmpdir "~1.0.1" + +tmp@0.0.31: + version "0.0.31" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.31.tgz#8f38ab9438e17315e5dbd8b3657e8bfb277ae4a7" + dependencies: + os-tmpdir "~1.0.1" + +tmp@0.0.x: + version "0.0.33" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" + dependencies: + os-tmpdir "~1.0.2" + +to-array@0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/to-array/-/to-array-0.1.4.tgz#17e6c11f73dd4f3d74cda7a4ff3238e9ad9bf890" + +to-arraybuffer@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz#7d229b1fcc637e466ca081180836a7aabff83f43" + +to-fast-properties@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.3.tgz#b83571fa4d8c25b82e231b06e3a3055de4ca1a47" + +to-object-path@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/to-object-path/-/to-object-path-0.3.0.tgz#297588b7b0e7e0ac08e04e672f85c1f4999e17af" + dependencies: + kind-of "^3.0.2" + +to-regex-range@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-2.1.1.tgz#7c80c17b9dfebe599e27367e0d4dd5590141db38" + dependencies: + is-number "^3.0.0" + repeat-string "^1.6.1" + +to-regex@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/to-regex/-/to-regex-3.0.1.tgz#15358bee4a2c83bd76377ba1dc049d0f18837aae" + dependencies: + define-property "^0.2.5" + extend-shallow "^2.0.1" + regex-not "^1.0.0" + +toposort@^1.0.0: + version "1.0.6" + resolved "https://registry.yarnpkg.com/toposort/-/toposort-1.0.6.tgz#c31748e55d210effc00fdcdc7d6e68d7d7bb9cec" + +tough-cookie@~2.3.0, tough-cookie@~2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.3.tgz#0b618a5565b6dea90bf3425d04d55edc475a7561" + dependencies: + punycode "^1.4.1" + +trim-newlines@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/trim-newlines/-/trim-newlines-1.0.0.tgz#5887966bb582a4503a41eb524f7d35011815a613" + +trim-right@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003" + +"true-case-path@^1.0.2": + version "1.0.2" + resolved "https://registry.yarnpkg.com/true-case-path/-/true-case-path-1.0.2.tgz#7ec91130924766c7f573be3020c34f8fdfd00d62" + dependencies: + glob "^6.0.4" + +ts-node@~3.2.0: + version "3.2.2" + resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-3.2.2.tgz#bbd28e38af4aaa3e96076c466e1b220197c1a3ce" + dependencies: + arrify "^1.0.0" + chalk "^2.0.0" + diff "^3.1.0" + make-error "^1.1.1" + minimist "^1.2.0" + mkdirp "^0.5.1" + source-map-support "^0.4.0" + tsconfig "^6.0.0" + v8flags "^3.0.0" + yn "^2.0.0" + +tsconfig@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/tsconfig/-/tsconfig-6.0.0.tgz#6b0e8376003d7af1864f8df8f89dd0059ffcd032" + dependencies: + strip-bom "^3.0.0" + strip-json-comments "^2.0.0" + +tsickle@^0.21.0: + version "0.21.6" + resolved "https://registry.yarnpkg.com/tsickle/-/tsickle-0.21.6.tgz#53b01b979c5c13fdb13afb3fb958177e5991588d" + dependencies: + minimist "^1.2.0" + mkdirp "^0.5.1" + source-map "^0.5.6" + source-map-support "^0.4.2" + +tslib@^1.7.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.8.1.tgz#6946af2d1d651a7b1863b531d6e5afa41aa44eac" + +tslint@~5.7.0: + version "5.7.0" + resolved "https://registry.yarnpkg.com/tslint/-/tslint-5.7.0.tgz#c25e0d0c92fa1201c2bc30e844e08e682b4f3552" + dependencies: + babel-code-frame "^6.22.0" + colors "^1.1.2" + commander "^2.9.0" + diff "^3.2.0" + glob "^7.1.1" + minimatch "^3.0.4" + resolve "^1.3.2" + semver "^5.3.0" + tslib "^1.7.1" + tsutils "^2.8.1" + +tsutils@^2.8.1: + version "2.13.0" + resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-2.13.0.tgz#0f52b6aabbc4216e72796b66db028c6cf173e144" + dependencies: + tslib "^1.7.1" + +tty-browserify@0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/tty-browserify/-/tty-browserify-0.0.0.tgz#a157ba402da24e9bf957f9aa69d524eed42901a6" + +tunnel-agent@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" + dependencies: + safe-buffer "^5.0.1" + +tunnel-agent@~0.4.1: + version "0.4.3" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.4.3.tgz#6373db76909fe570e08d73583365ed828a74eeeb" + +tweetnacl@^0.14.3, tweetnacl@~0.14.0: + version "0.14.5" + resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" + +type-is@~1.6.15: + version "1.6.15" + resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.15.tgz#cab10fb4909e441c82842eafe1ad646c81804410" + dependencies: + media-typer "0.3.0" + mime-types "~2.1.15" + +"typescript@>=2.0.0 <2.6.0": + version "2.5.3" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-2.5.3.tgz#df3dcdc38f3beb800d4bc322646b04a3f6ca7f0d" + +typescript@~2.3.3: + version "2.3.4" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-2.3.4.tgz#3d38321828231e434f287514959c37a82b629f42" + +typescript@~2.6.1: + version "2.6.2" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-2.6.2.tgz#3c5b6fd7f6de0914269027f03c0946758f7673a4" + +uglify-js@3.2.x: + version "3.2.2" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.2.2.tgz#870e4b34ed733d179284f9998efd3293f7fd73f6" + dependencies: + commander "~2.12.1" + source-map "~0.6.1" + +uglify-js@^2.6, uglify-js@^2.8.29: + version "2.8.29" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.8.29.tgz#29c5733148057bb4e1f75df35b7a9cb72e6a59dd" + dependencies: + source-map "~0.5.1" + yargs "~3.10.0" + optionalDependencies: + uglify-to-browserify "~1.0.0" + +uglify-js@~2.3: + version "2.3.6" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-2.3.6.tgz#fa0984770b428b7a9b2a8058f46355d14fef211a" + dependencies: + async "~0.2.6" + optimist "~0.3.5" + source-map "~0.1.7" + +uglify-to-browserify@~1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7" + +uglifyjs-webpack-plugin@^0.4.6: + version "0.4.6" + resolved "https://registry.yarnpkg.com/uglifyjs-webpack-plugin/-/uglifyjs-webpack-plugin-0.4.6.tgz#b951f4abb6bd617e66f63eb891498e391763e309" + dependencies: + source-map "^0.5.6" + uglify-js "^2.8.29" + webpack-sources "^1.0.1" + +uid-number@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/uid-number/-/uid-number-0.0.6.tgz#0ea10e8035e8eb5b8e4449f06da1c730663baa81" + +ultron@1.0.x: + version "1.0.2" + resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.0.2.tgz#ace116ab557cd197386a4e88f4685378c8b2e4fa" + +unc-path-regex@^0.1.0: + version "0.1.2" + resolved "https://registry.yarnpkg.com/unc-path-regex/-/unc-path-regex-0.1.2.tgz#e73dd3d7b0d7c5ed86fbac6b0ae7d8c6a69d50fa" + +union-value@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/union-value/-/union-value-1.0.0.tgz#5c71c34cb5bad5dcebe3ea0cd08207ba5aa1aea4" + dependencies: + arr-union "^3.1.0" + get-value "^2.0.6" + is-extendable "^0.1.1" + set-value "^0.4.3" + +uniq@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/uniq/-/uniq-1.0.1.tgz#b31c5ae8254844a3a8281541ce2b04b865a734ff" + +uniqid@^4.0.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/uniqid/-/uniqid-4.1.1.tgz#89220ddf6b751ae52b5f72484863528596bb84c1" + dependencies: + macaddress "^0.2.8" + +uniqs@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/uniqs/-/uniqs-2.0.0.tgz#ffede4b36b25290696e6e165d4a59edb998e6b02" + +unique-stream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unique-stream/-/unique-stream-1.0.0.tgz#d59a4a75427447d9aa6c91e70263f8d26a4b104b" + +universalify@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.1.tgz#fa71badd4437af4c148841e3b3b165f9e9e590b7" + +unpipe@1.0.0, unpipe@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" + +unset-value@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unset-value/-/unset-value-1.0.0.tgz#8376873f7d2335179ffb1e6fc3a8ed0dfc8ab559" + dependencies: + has-value "^0.3.1" + isobject "^3.0.0" + +upper-case@^1.1.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/upper-case/-/upper-case-1.1.3.tgz#f6b4501c2ec4cdd26ba78be7222961de77621598" + +urix@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72" + +url-loader@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/url-loader/-/url-loader-0.6.2.tgz#a007a7109620e9d988d14bce677a1decb9a993f7" + dependencies: + loader-utils "^1.0.2" + mime "^1.4.1" + schema-utils "^0.3.0" + +url-parse@1.0.x: + version "1.0.5" + resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.0.5.tgz#0854860422afdcfefeb6c965c662d4800169927b" + dependencies: + querystringify "0.0.x" + requires-port "1.0.x" + +url-parse@^1.1.8: + version "1.2.0" + resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.2.0.tgz#3a19e8aaa6d023ddd27dcc44cb4fc8f7fec23986" + dependencies: + querystringify "~1.0.0" + requires-port "~1.0.0" + +url@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/url/-/url-0.11.0.tgz#3838e97cfc60521eb73c525a8e55bfdd9e2e28f1" + dependencies: + punycode "1.3.2" + querystring "0.2.0" + +use@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/use/-/use-2.0.2.tgz#ae28a0d72f93bf22422a18a2e379993112dec8e8" + dependencies: + define-property "^0.2.5" + isobject "^3.0.0" + lazy-cache "^2.0.2" + +user-home@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/user-home/-/user-home-1.1.1.tgz#2b5be23a32b63a7c9deb8d0f28d485724a3df190" + +useragent@^2.1.12: + version "2.2.1" + resolved "https://registry.yarnpkg.com/useragent/-/useragent-2.2.1.tgz#cf593ef4f2d175875e8bb658ea92e18a4fd06d8e" + dependencies: + lru-cache "2.2.x" + tmp "0.0.x" + +util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + +util@0.10.3, util@^0.10.3: + version "0.10.3" + resolved "https://registry.yarnpkg.com/util/-/util-0.10.3.tgz#7afb1afe50805246489e3db7fe0ed379336ac0f9" + dependencies: + inherits "2.0.1" + +utila@~0.3: + version "0.3.3" + resolved "https://registry.yarnpkg.com/utila/-/utila-0.3.3.tgz#d7e8e7d7e309107092b05f8d9688824d633a4226" + +utila@~0.4: + version "0.4.0" + resolved "https://registry.yarnpkg.com/utila/-/utila-0.4.0.tgz#8a16a05d445657a3aea5eecc5b12a4fa5379772c" + +utils-merge@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" + +uuid@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.3.tgz#67e2e863797215530dff318e5bf9dcebfd47b21a" + +uuid@^3.0.0, uuid@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.1.0.tgz#3dd3d3e790abc24d7b0d3a034ffababe28ebbc04" + +v8flags@^2.0.2: + version "2.1.1" + resolved "https://registry.yarnpkg.com/v8flags/-/v8flags-2.1.1.tgz#aab1a1fa30d45f88dd321148875ac02c0b55e5b4" + dependencies: + user-home "^1.1.1" + +v8flags@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/v8flags/-/v8flags-3.0.1.tgz#dce8fc379c17d9f2c9e9ed78d89ce00052b1b76b" + dependencies: + homedir-polyfill "^1.0.1" + +validate-npm-package-license@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.1.tgz#2804babe712ad3379459acfbe24746ab2c303fbc" + dependencies: + spdx-correct "~1.0.0" + spdx-expression-parse "~1.0.0" + +vary@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" + +vendors@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/vendors/-/vendors-1.0.1.tgz#37ad73c8ee417fb3d580e785312307d274847f22" + +verror@1.10.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" + dependencies: + assert-plus "^1.0.0" + core-util-is "1.0.2" + extsprintf "^1.2.0" + +vinyl-fs@^0.3.0: + version "0.3.14" + resolved "https://registry.yarnpkg.com/vinyl-fs/-/vinyl-fs-0.3.14.tgz#9a6851ce1cac1c1cea5fe86c0931d620c2cfa9e6" + dependencies: + defaults "^1.0.0" + glob-stream "^3.1.5" + glob-watcher "^0.0.6" + graceful-fs "^3.0.0" + mkdirp "^0.5.0" + strip-bom "^1.0.0" + through2 "^0.6.1" + vinyl "^0.4.0" + +vinyl@^0.2.1: + version "0.2.3" + resolved "https://registry.yarnpkg.com/vinyl/-/vinyl-0.2.3.tgz#bca938209582ec5a49ad538a00fa1f125e513252" + dependencies: + clone-stats "~0.0.1" + +vinyl@^0.4.0: + version "0.4.6" + resolved "https://registry.yarnpkg.com/vinyl/-/vinyl-0.4.6.tgz#2f356c87a550a255461f36bbeb2a5ba8bf784847" + dependencies: + clone "^0.2.0" + clone-stats "^0.0.1" + +vinyl@^0.5.0: + version "0.5.3" + resolved "https://registry.yarnpkg.com/vinyl/-/vinyl-0.5.3.tgz#b0455b38fc5e0cf30d4325132e461970c2091cde" + dependencies: + clone "^1.0.0" + clone-stats "^0.0.1" + replace-ext "0.0.1" + +vlq@^0.2.1: + version "0.2.3" + resolved "https://registry.yarnpkg.com/vlq/-/vlq-0.2.3.tgz#8f3e4328cf63b1540c0d67e1b2778386f8975b26" + +vm-browserify@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/vm-browserify/-/vm-browserify-0.0.4.tgz#5d7ea45bbef9e4a6ff65f95438e0a87c357d5a73" + dependencies: + indexof "0.0.1" + +void-elements@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/void-elements/-/void-elements-2.0.1.tgz#c066afb582bb1cb4128d60ea92392e94d5e9dbec" + +watchpack@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-1.4.0.tgz#4a1472bcbb952bd0a9bb4036801f954dfb39faac" + dependencies: + async "^2.1.2" + chokidar "^1.7.0" + graceful-fs "^4.1.2" + +wbuf@^1.1.0, wbuf@^1.7.2: + version "1.7.2" + resolved "https://registry.yarnpkg.com/wbuf/-/wbuf-1.7.2.tgz#d697b99f1f59512df2751be42769c1580b5801fe" + dependencies: + minimalistic-assert "^1.0.0" + +webdriver-js-extender@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/webdriver-js-extender/-/webdriver-js-extender-1.0.0.tgz#81c533a9e33d5bfb597b4e63e2cdb25b54777515" + dependencies: + "@types/selenium-webdriver" "^2.53.35" + selenium-webdriver "^2.53.2" + +webdriver-manager@^12.0.6: + version "12.0.6" + resolved "https://registry.yarnpkg.com/webdriver-manager/-/webdriver-manager-12.0.6.tgz#3df1a481977010b4cbf8c9d85c7a577828c0e70b" + dependencies: + adm-zip "^0.4.7" + chalk "^1.1.1" + del "^2.2.0" + glob "^7.0.3" + ini "^1.3.4" + minimist "^1.2.0" + q "^1.4.1" + request "^2.78.0" + rimraf "^2.5.2" + semver "^5.3.0" + xml2js "^0.4.17" + +webpack-concat-plugin@1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/webpack-concat-plugin/-/webpack-concat-plugin-1.4.0.tgz#a6eb3f0082d03c79d8ee2f1518c7f48e44ee12c5" + dependencies: + md5 "^2.2.1" + uglify-js "^2.8.29" + +webpack-dev-middleware@^1.11.0, webpack-dev-middleware@~1.12.0: + version "1.12.2" + resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-1.12.2.tgz#f8fc1120ce3b4fc5680ceecb43d777966b21105e" + dependencies: + memory-fs "~0.4.1" + mime "^1.5.0" + path-is-absolute "^1.0.0" + range-parser "^1.0.3" + time-stamp "^2.0.0" + +webpack-dev-server@~2.7.1: + version "2.7.1" + resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-2.7.1.tgz#21580f5a08cd065c71144cf6f61c345bca59a8b8" + dependencies: + ansi-html "0.0.7" + bonjour "^3.5.0" + chokidar "^1.6.0" + compression "^1.5.2" + connect-history-api-fallback "^1.3.0" + del "^3.0.0" + express "^4.13.3" + html-entities "^1.2.0" + http-proxy-middleware "~0.17.4" + internal-ip "^1.2.0" + ip "^1.1.5" + loglevel "^1.4.1" + opn "4.0.2" + portfinder "^1.0.9" + selfsigned "^1.9.1" + serve-index "^1.7.2" + sockjs "0.3.18" + sockjs-client "1.1.4" + spdy "^3.4.1" + strip-ansi "^3.0.0" + supports-color "^3.1.1" + webpack-dev-middleware "^1.11.0" + yargs "^6.0.0" + +webpack-merge@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/webpack-merge/-/webpack-merge-4.1.1.tgz#f1197a0a973e69c6fbeeb6d658219aa8c0c13555" + dependencies: + lodash "^4.17.4" + +webpack-sources@^1.0.0, webpack-sources@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-1.1.0.tgz#a101ebae59d6507354d71d8013950a3a8b7a5a54" + dependencies: + source-list-map "^2.0.0" + source-map "~0.6.1" + +webpack@~3.7.1: + version "3.7.1" + resolved "https://registry.yarnpkg.com/webpack/-/webpack-3.7.1.tgz#6046b5c415ff7df7a0dc54c5b6b86098e8b952da" + dependencies: + acorn "^5.0.0" + acorn-dynamic-import "^2.0.0" + ajv "^5.1.5" + ajv-keywords "^2.0.0" + async "^2.1.2" + enhanced-resolve "^3.4.0" + escope "^3.6.0" + interpret "^1.0.0" + json-loader "^0.5.4" + json5 "^0.5.1" + loader-runner "^2.3.0" + loader-utils "^1.1.0" + memory-fs "~0.4.1" + mkdirp "~0.5.0" + node-libs-browser "^2.0.0" + source-map "^0.5.3" + supports-color "^4.2.1" + tapable "^0.2.7" + uglifyjs-webpack-plugin "^0.4.6" + watchpack "^1.4.0" + webpack-sources "^1.0.1" + yargs "^8.0.2" + +websocket-driver@>=0.5.1: + version "0.7.0" + resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.7.0.tgz#0caf9d2d755d93aee049d4bdd0d3fe2cca2a24eb" + dependencies: + http-parser-js ">=0.4.0" + websocket-extensions ">=0.1.1" + +websocket-extensions@>=0.1.1: + version "0.1.3" + resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.3.tgz#5d2ff22977003ec687a4b87073dfbbac146ccf29" + +when@~3.6.x: + version "3.6.4" + resolved "https://registry.yarnpkg.com/when/-/when-3.6.4.tgz#473b517ec159e2b85005497a13983f095412e34e" + +whet.extend@~0.9.9: + version "0.9.9" + resolved "https://registry.yarnpkg.com/whet.extend/-/whet.extend-0.9.9.tgz#f877d5bf648c97e5aa542fadc16d6a259b9c11a1" + +which-module@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-1.0.0.tgz#bba63ca861948994ff307736089e3b96026c2a4f" + +which-module@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" + +which@1, which@^1.2.1, which@^1.2.14, which@^1.2.9: + version "1.3.0" + resolved "https://registry.yarnpkg.com/which/-/which-1.3.0.tgz#ff04bdfc010ee547d780bec38e1ac1c2777d253a" + dependencies: + isexe "^2.0.0" + +wide-align@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.2.tgz#571e0f1b0604636ebc0dfc21b0339bbe31341710" + dependencies: + string-width "^1.0.2" + +window-size@0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.1.0.tgz#5438cd2ea93b202efa3a19fe8887aee7c94f9c9d" + +wordwrap@0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.2.tgz#b79669bb42ecb409f83d583cad52ca17eaa1643f" + +wordwrap@~0.0.2: + version "0.0.3" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107" + +workbox-background-sync@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/workbox-background-sync/-/workbox-background-sync-2.0.3.tgz#94c767e1d47dfb295c5fd1a0d3d3c8d849bddd46" + +workbox-build@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/workbox-build/-/workbox-build-2.1.2.tgz#5425094a83ac77c991b6060dd1df3d37984ef87e" + dependencies: + chalk "^1.1.3" + fs-extra "^3.0.1" + glob "^7.1.1" + lodash.template "^4.4.0" + mkdirp "^0.5.1" + workbox-sw "^2.1.2" + +workbox-sw@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/workbox-sw/-/workbox-sw-2.1.2.tgz#8f3a687c36f3b92ac2a66a08183bd3a163474b61" + +wrap-ansi@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85" + dependencies: + string-width "^1.0.1" + strip-ansi "^3.0.1" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + +ws@1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.2.tgz#8a244fa052401e08c9886cf44a85189e1fd4067f" + dependencies: + options ">=0.0.5" + ultron "1.0.x" + +ws@^1.0.1: + version "1.1.5" + resolved "https://registry.yarnpkg.com/ws/-/ws-1.1.5.tgz#cbd9e6e75e09fc5d2c90015f21f0c40875e0dd51" + dependencies: + options ">=0.0.5" + ultron "1.0.x" + +wtf-8@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/wtf-8/-/wtf-8-1.0.0.tgz#392d8ba2d0f1c34d1ee2d630f15d0efb68e1048a" + +xml-char-classes@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/xml-char-classes/-/xml-char-classes-1.0.0.tgz#64657848a20ffc5df583a42ad8a277b4512bbc4d" + +xml2js@0.4.4: + version "0.4.4" + resolved "https://registry.yarnpkg.com/xml2js/-/xml2js-0.4.4.tgz#3111010003008ae19240eba17497b57c729c555d" + dependencies: + sax "0.6.x" + xmlbuilder ">=1.0.0" + +xml2js@^0.4.17: + version "0.4.19" + resolved "https://registry.yarnpkg.com/xml2js/-/xml2js-0.4.19.tgz#686c20f213209e94abf0d1bcf1efaa291c7827a7" + dependencies: + sax ">=0.6.0" + xmlbuilder "~9.0.1" + +xmlbuilder@>=1.0.0, xmlbuilder@~9.0.1: + version "9.0.4" + resolved "https://registry.yarnpkg.com/xmlbuilder/-/xmlbuilder-9.0.4.tgz#519cb4ca686d005a8420d3496f3f0caeecca580f" + +xmldom@^0.1.19: + version "0.1.27" + resolved "https://registry.yarnpkg.com/xmldom/-/xmldom-0.1.27.tgz#d501f97b3bdb403af8ef9ecc20573187aadac0e9" + +xmlhttprequest-ssl@1.5.3: + version "1.5.3" + resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.3.tgz#185a888c04eca46c3e4070d99f7b49de3528992d" + +"xtend@>=4.0.0 <4.1.0-0", xtend@^4.0.0, xtend@~4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af" + +xtend@~2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-2.1.2.tgz#6efecc2a4dad8e6962c4901b337ce7ba87b5d28b" + dependencies: + object-keys "~0.4.0" + +xtend@~3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-3.0.0.tgz#5cce7407baf642cba7becda568111c493f59665a" + +y18n@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41" + +yallist@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52" + +yargs-parser@^4.2.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-4.2.1.tgz#29cceac0dc4f03c6c87b4a9f217dd18c9f74871c" + dependencies: + camelcase "^3.0.0" + +yargs-parser@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-5.0.0.tgz#275ecf0d7ffe05c77e64e7c86e4cd94bf0e1228a" + dependencies: + camelcase "^3.0.0" + +yargs-parser@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-7.0.0.tgz#8d0ac42f16ea55debd332caf4c4038b3e3f5dfd9" + dependencies: + camelcase "^4.1.0" + +yargs@^6.0.0: + version "6.6.0" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-6.6.0.tgz#782ec21ef403345f830a808ca3d513af56065208" + dependencies: + camelcase "^3.0.0" + cliui "^3.2.0" + decamelize "^1.1.1" + get-caller-file "^1.0.1" + os-locale "^1.4.0" + read-pkg-up "^1.0.1" + require-directory "^2.1.1" + require-main-filename "^1.0.1" + set-blocking "^2.0.0" + string-width "^1.0.2" + which-module "^1.0.0" + y18n "^3.2.1" + yargs-parser "^4.2.0" + +yargs@^7.0.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-7.1.0.tgz#6ba318eb16961727f5d284f8ea003e8d6154d0c8" + dependencies: + camelcase "^3.0.0" + cliui "^3.2.0" + decamelize "^1.1.1" + get-caller-file "^1.0.1" + os-locale "^1.4.0" + read-pkg-up "^1.0.1" + require-directory "^2.1.1" + require-main-filename "^1.0.1" + set-blocking "^2.0.0" + string-width "^1.0.2" + which-module "^1.0.0" + y18n "^3.2.1" + yargs-parser "^5.0.0" + +yargs@^8.0.2: + version "8.0.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-8.0.2.tgz#6299a9055b1cefc969ff7e79c1d918dceb22c360" + dependencies: + camelcase "^4.1.0" + cliui "^3.2.0" + decamelize "^1.1.1" + get-caller-file "^1.0.1" + os-locale "^2.0.0" + read-pkg-up "^2.0.0" + require-directory "^2.1.1" + require-main-filename "^1.0.1" + set-blocking "^2.0.0" + string-width "^2.0.0" + which-module "^2.0.0" + y18n "^3.2.1" + yargs-parser "^7.0.0" + +yargs@~3.10.0: + version "3.10.0" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-3.10.0.tgz#f7ee7bd857dd7c1d2d38c0e74efbd681d1431fd1" + dependencies: + camelcase "^1.0.2" + cliui "^2.1.0" + decamelize "^1.0.0" + window-size "0.1.0" + +yeast@0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/yeast/-/yeast-0.1.2.tgz#008e06d8094320c372dbc2f8ed76a0ca6c8ac419" + +yn@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/yn/-/yn-2.0.0.tgz#e5adabc8acf408f6385fc76495684c88e6af689a" + +zone.js@^0.8.14: + version "0.8.18" + resolved "https://registry.yarnpkg.com/zone.js/-/zone.js-0.8.18.tgz#8cecb3977fcd1b3090562ff4570e2847e752b48d" diff --git a/app/adminHandlers.go b/app/adminHandlers.go new file mode 100644 index 0000000..ec47b27 --- /dev/null +++ b/app/adminHandlers.go @@ -0,0 +1,361 @@ +package app + +import ( + "bytes" + "encoding/json" + "html/template" + "log" + "net/http" + + "github.com/codeskyblue/go-sh" + _ "github.com/jinzhu/gorm/dialects/sqlite" // Driver for sqlite + + "github.com/julienschmidt/httprouter" + +) + +// IsAdmin : Read cookie and check db if email exists in admins table +func IsAdmin(w http.ResponseWriter, r *http.Request) (isAdminFlag bool) { + + googTok := ReadCookieHandler(w, r) + var admin Admin + + if DB.db.Where("email = ?", googTok.Email).First(&admin).RecordNotFound() { + return false + } + + return true +} + +// AdminIndex : +func AdminIndex(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + + if IsAdmin(w, r) != true { + http.Redirect(w, r, "/", 302) + } else { + var adminTemplate *template.Template + adminTemplate = template.Must(template.ParseGlob("templates/admin.html")) + adminTemplate.Execute(w, nil) + } + +} + + +// AcceptAdminRequest : Lets admin accept request to give a user admin privilege +func AcceptAdminRequest(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + + var response Response + var adminRequest AdminRequest + var admin Admin + + if IsAdmin(w, r) { + + // gives admin privilege to user by adding entry to admins table + // while deleting entry from admin_requests table + if DB.db.Where("admin_request_id = ?", ps.ByName("id")).First(&adminRequest).RecordNotFound() { + response = Response{ + false, + "Unable to accept, request does not exists", + } + } else { + DB.db.Where("admin_request_id = ?", ps.ByName("id")).First(&adminRequest) + admin = Admin{ + Name: adminRequest.Name, + Email: adminRequest.Email, + } + DB.db.Create(&admin) + DB.db.Delete(&adminRequest) + + response = Response{ + true, + "Request accepted, new admin created", + } + log.Printf("New admin created. Name: %v | Email: %v", admin.Name, admin.Email) + } + + } else { + response = Response{ + false, + "User not admin", + } + } + + json, err := json.Marshal(response) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(json) + +} + +// RejectAdminRequest : :Lets admin reject request to give a user admin privilege +func RejectAdminRequest(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + + var response Response + var adminRequest AdminRequest + + if IsAdmin(w, r) == true { + + // deletes entry from admin_requests table + if DB.db.Where("admin_request_id = ?", ps.ByName("id")).First(&adminRequest).RecordNotFound() { + response = Response{ + false, + "Unable to delete, request does not exists", + } + } else { + DB.db.Where("admin_request_id = ?", ps.ByName("id")).Delete(&adminRequest) + response = Response{ + true, + "Admin request successfully rejected", + } + } + + } else { + response = Response{ + false, + "User not admin", + } + } + + json, err := json.Marshal(response) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(json) + +} + +// RevokeAdminPrivilege : Lets admin revoke admin privileges of fellow admin +func RevokeAdminPrivilege(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + + var response Response + var admin Admin + + if IsAdmin(w, r) == true { + + // deletes entry from admins table + if DB.db.Where("admin_id = ?", ps.ByName("id")).First(&admin).RecordNotFound() { + response = Response{ + false, + "Unable to delete, admin does not exists", + } + } else { + log.Printf("An admin's privileges revoked. Name: %v | Email: %v", admin.Name, admin.Email) + DB.db.Where("admin_id = ?", ps.ByName("id")).Delete(&admin) + response = Response{ + true, + "Admin privileges successfully revoked", + } + } + + } else { + response = Response{ + false, + "User not admin", + } + } + + json, err := json.Marshal(response) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(json) + +} + + +// AcceptAccessRequest : Lets admin accept access request +// copies user's ssh key to destination server +// request contains IP Address of the server +// to which access needs to be granted +func AcceptAccessRequest(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + + type Receive struct { + IP string `json:"ip"` + } + + var receive Receive + var response Response + var accessRequest AccessRequest + var access Access + + decoder := json.NewDecoder(r.Body) + err := decoder.Decode(&receive) + if err != nil { + http.Error(w, "Error reading the received request", http.StatusBadRequest) + log.Printf("Error reading the received request.\nError: %v\n", err.Error()) + } + if IsAdmin(w, r) { + + // gives access privilege to user by adding entry to accesses table + // executes shell script which copies user's ssh key to desired dest server over ssh + // while deleting entry from access_requests table + if DB.db.Where("access_request_id = ?", ps.ByName("id")).First(&accessRequest).RecordNotFound() { + response = Response{ + false, + "Unable to accept, request does not exists", + } + } else { + + // test ssh connection b/w source(DAMN server) and dest servers + status, _ := sh.Command("./scripts/test_connection.sh", receive.IP).Output() + + if !bytes.Equal(status, []byte("ok\n")) { + response = Response{ + false, + "Can't connect using ssh, check if destination server has been set up correctly", + } + + } else { + + DB.db.Where("access_request_id = ?", ps.ByName("id")).First(&accessRequest) + + // execute shell script to copy ssh key to specified server over ssh + sh.Command("./scripts/copy_key_to_server.sh", receive.IP, accessRequest.SSHKey).Run() + + access = Access{ + Name: accessRequest.Name, + Email: accessRequest.Email, + IP: receive.IP, + SSHKey: accessRequest.SSHKey, + } + DB.db.Create(&access) + DB.db.Delete(&accessRequest) + + response = Response{ + true, + "Request accepted, new access created", + } + + log.Printf("New access granted. Name: %v | Email: %v | To: %v", access.Name, access.Email, access.IP) + + } + + } + + } else { + response = Response{ + false, + "User not admin", + } + } + + json, err := json.Marshal(response) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(json) + +} + +// RejectAccessRequest : Lets admin reject access request +func RejectAccessRequest(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + + var response Response + var accessRequest AccessRequest + + if IsAdmin(w, r) == true { + + // deletes entry from access_requests table + if DB.db.Where("access_request_id = ?", ps.ByName("id")).First(&accessRequest).RecordNotFound() { + response = Response{ + false, + "Unable to delete, request does not exists", + } + } else { + DB.db.Where("access_request_id = ?", ps.ByName("id")).Delete(&accessRequest) + response = Response{ + true, + "Access request successfully rejected", + } + } + + } else { + response = Response{ + false, + "User not admin", + } + } + + json, err := json.Marshal(response) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(json) + +} + +// RevokeAccessPrivilege : Lets admin revoke access privileges of users +func RevokeAccessPrivilege(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + + var response Response + var access Access + + if IsAdmin(w, r) == true { + + // revokes access privilege to user by deleting entry from accesses table + // executes shell script which removes user's ssh key from intended dest server over ssh + if DB.db.Where("access_id = ?", ps.ByName("id")).First(&access).RecordNotFound() { + response = Response{ + false, + "Unable to delete, access does not exists", + } + } else { + + // test ssh connection b/w source(DAMN server) and dest servers + status, _ := sh.Command("./scripts/test_connection.sh", access.IP).Output() + + if !bytes.Equal(status, []byte("ok\n")) { + + response = Response{ + false, + "Can't connect using ssh, check if destination server has been set up correctly", + } + + } else { + + log.Printf("Access privilege revoked. Name: %v | Email: %v | From: %v", access.Name, access.Email, access.IP) + + DB.db.Where("access_id = ?", ps.ByName("id")).Delete(&access) + // execute shell script to remove ssh key from the specified server + sh.Command("./scripts/remove_key_from_server.sh", access.IP, access.SSHKey).Run() + + response = Response{ + true, + "Access privileges successfully revoked", + } + } + } + + } else { + response = Response{ + false, + "User not admin", + } + } + + json, err := json.Marshal(response) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(json) + +} diff --git a/app/app.go b/app/app.go index f277a73..6c10d91 100644 --- a/app/app.go +++ b/app/app.go @@ -1,11 +1,59 @@ package app import ( + "log" "net/http" + + _ "github.com/jinzhu/gorm/dialects/mysql" // Driver for MySQL + "github.com/julienschmidt/httprouter" ) + +var err error +// DB : Database struct +var DB Database + +// Run : migrates and creates tables in data base +// registers functions with routes +// opens up port and serves the app func Run() { - http.HandleFunc("/", Index) - http.ListenAndServe(":8080", nil) + defer DB.db.Close() + + DB.db.AutoMigrate(&AccessRequest{} , &Access{} , &AdminRequest{} , &Admin{}) + + router := httprouter.New() + + router.GET("/", Index) + router.GET("/login", Index) + router.GET("/auth", AuthHandler) + router.GET("/logout", LogoutHandler) + router.GET("/options", Options) + router.POST("/makeAccessRequest", MakeAccessRequest) + router.GET("/makeAdminRequest", MakeAdminRequest) + router.GET("/admin", AdminIndex) + + router.GET("/admin/acceptAdminRequest/:id", AcceptAdminRequest) + router.GET("/admin/rejectAdminRequest/:id", RejectAdminRequest) + router.GET("/admin/revokeAdminPrivilege/:id", RevokeAdminPrivilege) + + router.POST("/admin/acceptAccessRequest/:id", AcceptAccessRequest) + router.GET("/admin/rejectAccessRequest/:id", RejectAccessRequest) + router.GET("/admin/revokeAccessPrivilege/:id", RevokeAccessPrivilege) + + router.GET("/admin/getAccesses", AccessesHandler) + router.GET("/admin/getAccessRequests", AccessRequestsHandler) + router.GET("/admin/getAdmins", AdminsHandler) + router.GET("/admin/getAdminRequests", AdminRequestsHandler) + + router.GET("/accesses", AdminIndex) + router.GET("/accessrequests", AdminIndex) + router.GET("/admins", AdminIndex) + router.GET("/adminrequests", AdminIndex) + + router.NotFound = http.FileServer(http.Dir("./dist")) + + log.Println("Server listening at port 8080") + log.Fatal(http.ListenAndServe(":8080", router)) + } diff --git a/app/config.go b/app/config.go new file mode 100644 index 0000000..14e10c1 --- /dev/null +++ b/app/config.go @@ -0,0 +1,69 @@ +package app + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + + "github.com/jinzhu/gorm" + _ "github.com/jinzhu/gorm/dialects/sqlite" // Driver for SQLite + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" +) + +/* +** contains init function to cofigure OAuth2 and Mysql + */ + +var confOAuth2 *oauth2.Config + +func init() { + + // doesen't needs to be called explicitly + // configures OAuth2 and Mysql + + var credOAuth2 CredentialOAuth2 + var credMysql CredentialMysql + + fileOAuth2, err := ioutil.ReadFile("./credOAuth2.json") + if err != nil { + log.Fatalf("Error while reading oauth2 config file.\nError: %v\n", err.Error()) + } + json.Unmarshal(fileOAuth2, &credOAuth2) + + confOAuth2 = &oauth2.Config{ + + // presently set through account nishchayparashar98@gmail.com + ClientID: credOAuth2.Cid, + ClientSecret: credOAuth2.Csecret, + + // change redirect on production server + RedirectURL: "http://127.0.0.1:8080/options", + + Scopes: []string{ + "https://www.googleapis.com/auth/userinfo.email", + }, + Endpoint: google.Endpoint, + } + + fileMysql, err := ioutil.ReadFile("./credMysql.json") + if err != nil { + log.Fatalf("Error while reading oauth2 config file.\nError: %v\n", err.Error()) + } + json.Unmarshal(fileMysql, &credMysql) + + connectionString := fmt.Sprintf("%s:%s@/%s?charset=utf8&parseTime=True&loc=Local", + credMysql.DBUsername, + credMysql.DBPassword, + credMysql.DBName) + + DB.db, err = gorm.Open("mysql", connectionString) + if err != nil { + log.Fatal("Could not open database : ", err.Error()) + } + + state = randState() + +} diff --git a/app/cookie.go b/app/cookie.go new file mode 100644 index 0000000..ccd04d5 --- /dev/null +++ b/app/cookie.go @@ -0,0 +1,52 @@ +package app + +import ( + "net/http" + "time" + + "github.com/gorilla/securecookie" +) + +/* +** contains all middleware fuctions to interact with cookies + */ + +var cookieHandler = securecookie.New( + securecookie.GenerateRandomKey(64), + securecookie.GenerateRandomKey(32)) + +// SetCookieHandler : Set Cookies when a user signs-in +func SetCookieHandler(googTok *GoogleToken, w http.ResponseWriter) { + + if encoded, err := cookieHandler.Encode("googCookie", googTok); err == nil { + cookie := &http.Cookie{ + Name: "googCookie", + Value: encoded, + Path: "/", + Expires: time.Now().Add(12 * time.Hour), + } + http.SetCookie(w, cookie) + } +} + +// ReadCookieHandler : Read the cookie +func ReadCookieHandler(w http.ResponseWriter, r *http.Request) (googTok GoogleToken) { + if cookie, err := r.Cookie("googCookie"); err == nil { + if err = cookieHandler.Decode("googCookie", cookie.Value, &googTok); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + } + } + return googTok +} + +// ClearCookieHandler : Clear cookies +func ClearCookieHandler(w http.ResponseWriter) { + + cookie := &http.Cookie{ + Name: "googCookie", + Value: "", + Path: "/", + MaxAge: -1, + } + http.SetCookie(w, cookie) +} diff --git a/app/gameplay.go b/app/gameplay.go deleted file mode 100644 index 2528da3..0000000 --- a/app/gameplay.go +++ /dev/null @@ -1,28 +0,0 @@ -package app - -import ( - "encoding/json" - "net/http" -) - -func Index(w http.ResponseWriter, r *http.Request) { - - type Response struct { - Success bool `json:"success"` - Message string `json:"message"` - } - - response := &Response{ - true, - "DAMN kid!", - } - - json, err := json.Marshal(response) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - w.Write(json) -} diff --git a/app/handlers.go b/app/handlers.go index 5c16c41..7ac3194 100644 --- a/app/handlers.go +++ b/app/handlers.go @@ -1,137 +1,180 @@ package app import ( + "crypto/rand" + "encoding/base64" "encoding/json" - "github.com/gorilla/securecookie" + "html/template" + "io/ioutil" + "log" "net/http" - "time" + + _ "github.com/jinzhu/gorm/dialects/sqlite" // Driver for SQLite + + "github.com/julienschmidt/httprouter" + "golang.org/x/oauth2" + ) -var cookieHandler = securecookie.New( - securecookie.GenerateRandomKey(64), - securecookie.GenerateRandomKey(32)) +/* +** contains handler fuctions which do not require admin account +** contains fuctions to interact with OAuth2 + */ -func getUID(r *http.Request) (uid uint) { - if cookie, err := r.Cookie("session"); err == nil { - if err = cookieHandler.Decode("session", cookie.Value, &uid); err == nil { - } - } - return uid +var state string + +func randState() string { + + // generates random string + + b := make([]byte, 32) + rand.Read(b) + return base64.StdEncoding.EncodeToString(b) } -func isLoggedIn(r *http.Request) (flag bool) { - uid := getUID(r) - if uid != 0 { - flag = true - } else { - flag = false - } - return flag +// Index : +func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + + var indexTemplate *template.Template + indexTemplate = template.Must(template.ParseGlob("templates/index.html")) + indexTemplate.Execute(w, nil) + } -func setSession(uid uint, w http.ResponseWriter) { - // IMO here would be the code to - // create new entry in session table in db - /*var session Session - session = Session{UserID: uid, LoginTimeStamp: time.Now()} - db.Create(&session)*/ - - if encoded, err := cookieHandler.Encode("session", uid); err == nil { - cookie := &http.Cookie{ - Name: "session", - Value: encoded, - Path: "/", - Expires: time.Now().Add(12 * time.Hour), - } - http.SetCookie(w, cookie) - } +// AuthHandler : calls Google OAuth2 api endpoint on the provided configuration +func AuthHandler(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + + http.Redirect(w, r, confOAuth2.AuthCodeURL(state), 302) } -func clearSession(w http.ResponseWriter) { - // IMO here would be the code to - // delete the session from the sessions table in db - cookie := &http.Cookie{ - Name: "session", - Value: "", - Path: "/", - MaxAge: -1, +// Options : Home route for user that is rendered when a user signs-in +// checks if the state appended to callback url is same as generated by app +// thus ensures the callback is not from a man in the middle +func Options(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + + if r.FormValue("state") != state { + http.Error(w, "Possibly malacious/fake callback redirect", http.StatusBadRequest) + log.Printf("Possibly malacious/fake callback redirect. Random state does not match\nError: %v\n", http.StatusBadRequest) + return } - http.SetCookie(w, cookie) -} -func loginHandler(w http.ResponseWriter, r *http.Request) { + // call Exchange on conf to get token from Google OAuth2 api + // pass code recieved as callback url parameter + tok, err := confOAuth2.Exchange(oauth2.NoContext, r.FormValue("code")) + if err != nil { + http.Error(w, "Could not exchange code for google token", http.StatusBadRequest) + log.Printf("Could not exchange code for google token.\nError: %v\n", err.Error()) + return + } - if r.Method != "POST" { + // authenticate the google token obtained by calling endpoint + // provided by google for token authentication + check, err := http.Get("https://www.googleapis.com/oauth2/v3/tokeninfo?access_token=" + tok.AccessToken) + if err != nil { + http.Error(w, "Google token not authentic", http.StatusBadRequest) + log.Printf("Google token not authentic.\nError: %v\n", err.Error()) return } + defer check.Body.Close() - type Receive struct { - Email string `json:"email"` - Password string `json:"password"` + // use token to get user info + client := confOAuth2.Client(oauth2.NoContext, tok) + info, err := client.Get("https://www.googleapis.com/oauth2/v3/userinfo") + if err != nil { + http.Error(w, "User info could not be obtained", http.StatusBadRequest) + log.Printf("User info could not be obtained.\nError: %v\n", err.Error()) + return } + defer info.Body.Close() + data, _ := ioutil.ReadAll(info.Body) - type Response struct { - Success bool `json:"success"` - Message string `json:"message"` + var googTok GoogleToken + + json.Unmarshal(data, &googTok) + + // check if user has a verified gmail account + if googTok.EmailVerified == false { + http.Redirect(w, r, "https://myaccount.google.com/", 302) + return } - /**** check credentials ****/ - var user User + // set google token as cookie + SetCookieHandler(&googTok, w) - decoder := json.NewDecoder(r.Body) - var receive Receive - err := decoder.Decode(&receive) - if err != nil { - panic(err) + log.Printf("%v logged in using account %v", googTok.Name, googTok.Email) + + var admin Admin + + if DB.db.Where("email = ?", googTok.Email).First(&admin).RecordNotFound() { + http.Redirect(w, r, "/", 302) + } else { + http.Redirect(w, r, "/admin", 302) } - // check for email - notFoundErr := db.Debug().Where("email = ?", receive.Email).First(&user).Error +} - if notFoundErr != nil { - //json for incorrect email or pw - response := &Response{ - false, - "Incorrect email or password", - } +// LogoutHandler : clears cookie logs out user +func LogoutHandler(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - json, err := json.Marshal(response) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } + ClearCookieHandler(w) + http.Redirect(w, r, "/", 302) + +} + +// MakeAccessRequest : make request to get access to server user wants to work on +func MakeAccessRequest(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - w.Header().Set("Content-Type", "application/json") - w.Write(json) + // request contains user's ssh key and message describing the project + // they are working on + type Receive struct { + SSHKey string `json:"ssh_key"` + Message string `json:"message"` + } + + var response Response + + decoder := json.NewDecoder(r.Body) + var receive Receive + err := decoder.Decode(&receive) + if err != nil { + http.Error(w, "Error reading the received request", http.StatusBadRequest) + log.Printf("Error reading the received request.\nError: %v\n", err.Error()) return } - // check for password - if user.Password != receive.Password { - //json for incorrect email or pw - response := &Response{ + googTok := ReadCookieHandler(w, r) + + if googTok.Email == "" { + response = Response{ false, - "Incorrect email or password", + "User not authenticated", } + } else { - json, err := json.Marshal(response) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + var accessRequest AccessRequest + + // register new request if it does not already exists + if DB.db.Where("email = ?", googTok.Email).First(&accessRequest).RecordNotFound() == true { + accessRequest = AccessRequest{ + Name: googTok.Name, + Email: googTok.Email, + Message: receive.Message, + SSHKey: receive.SSHKey, + } + + DB.db.Create(&accessRequest) + + response = Response{ + true, + "New request sent", + } + } else { + response = Response{ + false, + "Request already exists", + } } - w.Header().Set("Content-Type", "application/json") - w.Write(json) - return - - } - - // credentials checked - setSession(user.ID, w) - - response := &Response{ - true, - "User logged in", } json, err := json.Marshal(response) @@ -142,38 +185,53 @@ func loginHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Write(json) + } -func logoutHandler(w http.ResponseWriter, r *http.Request) { +// MakeAdminRequest : make request to get admin privileges for DAMN portal +func MakeAdminRequest(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - type Response struct { - Success bool `json:"success"` - Message string `json:"message"` - } + var response Response - if !(isLoggedIn(r)) { + googTok := ReadCookieHandler(w, r) - response := &Response{ + if googTok.Email == "" { + response = Response{ false, - "Unautorized, login required", + "User not authenticated", } + } else { - json, err := json.Marshal(response) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + var adminRequest AdminRequest + var admin Admin + + // register new request if it does not already exists + if DB.db.Where("email = ?", googTok.Email).First(&adminRequest).RecordNotFound() == false { + response = Response{ + false, + "Request already exists", + } + } else if DB.db.Where("email = ?", googTok.Email).First(&admin).RecordNotFound() == false { + response = Response{ + false, + "User is already an admin", + } + } else { + adminRequest = AdminRequest{ + Name: googTok.Name, + Email: googTok.Email, + } + + DB.db.Create(&adminRequest) + + response = Response{ + true, + "New request sent", + } } - w.Header().Set("Content-Type", "application/json") - w.Write(json) - - return } - clearSession(w) - - response := &Response{true, "User logged out"} - json, err := json.Marshal(response) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -182,4 +240,5 @@ func logoutHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Write(json) + } diff --git a/app/models.go b/app/models.go new file mode 100644 index 0000000..fd41414 --- /dev/null +++ b/app/models.go @@ -0,0 +1,37 @@ +package app + +/* +** contains all models of the data base + */ + +// AccessRequest : model for storing all the requests that are made for acces to a server +type AccessRequest struct { + AccessRequestID uint `json:"access_request_id" sql:"AUTO_INCREMENT" gorm:"primary_key"` + Name string `json:"name" sql:"not null"` + Email string `json:"email" sql:"not null;unique"` + Message string `json:"message" sql:"not null"` + SSHKey string `json:"ssh_key" sql:"size:2048;not null"` +} + +// Access : model for storing the details of users that have acces to a server +type Access struct { + AccessID uint `json:"access_id" sql:"AUTO_INCREMENT" gorm:"primary_key"` + Name string `json:"name" sql:"not null"` + Email string `json:"email" sql:"not null"` + IP string `json:"ip" sql:"not null"` + SSHKey string `json:"ssh_key" sql:"size:2048;not null"` +} + +// AdminRequest : model for storing all the requests that are made for admin access +type AdminRequest struct { + AdminRequestID uint `json:"admin_request_id" sql:"AUTO_INCREMENT" gorm:"primary_key"` + Name string `json:"name" sql:"not null"` + Email string `json:"email" sql:"not null;unique"` +} + +// Admin : model to store the details of each admin +type Admin struct { + AdminID uint `json:"admin_id" sql:"AUTO_INCREMENT" gorm:"primary_key"` + Name string `json:"name" sql:"not null"` + Email string `json:"email" sql:"not null;unique"` +} diff --git a/app/populateViewHandlers.go b/app/populateViewHandlers.go new file mode 100644 index 0000000..8ec727b --- /dev/null +++ b/app/populateViewHandlers.go @@ -0,0 +1,151 @@ +package app + +import ( + "encoding/json" + "net/http" + + _ "github.com/jinzhu/gorm/dialects/sqlite" //Driver for SQLite + "github.com/julienschmidt/httprouter" + +) + +/* +** contains handler fuctions which require admin account +** contains fuctions to populate views in admin portal + */ + +// AccessesHandler : fetches data from accesses table in db +func AccessesHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + + var response Response + var accesses []Access + + if IsAdmin(w, r) == true { + DB.db.Find(&accesses) + json, err := json.Marshal(accesses) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + response = Response{ + true, + string(json), + } + } else { + response = Response{ + false, + "User not admin", + } + } + json, err := json.Marshal(response) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(json) + +} + +// AccessRequestsHandler : fetches data from access_requests table in db +func AccessRequestsHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + + var response Response + var accessRequests []AccessRequest + + if IsAdmin(w, r) == true { + DB.db.Find(&accessRequests) + json, err := json.Marshal(accessRequests) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + response = Response{ + true, + string(json), + } + } else { + response = Response{ + false, + "User not admin", + } + } + json, err := json.Marshal(response) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(json) + +} + +// AdminsHandler : fetches data from admins table in db +func AdminsHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + + var response Response + var admins []Admin + + if IsAdmin(w, r) == true { + DB.db.Find(&admins) + json, err := json.Marshal(admins) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + response = Response{ + true, + string(json), + } + } else { + response = Response{ + false, + "User not admin", + } + } + json, err := json.Marshal(response) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(json) + +} + +// AdminRequestsHandler : fetches data from admin_requests table in db +func AdminRequestsHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + + var response Response + var adminRequests []AdminRequest + + if IsAdmin(w, r) == true { + DB.db.Find(&adminRequests) + json, err := json.Marshal(adminRequests) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + response = Response{ + true, + string(json), + } + } else { + response = Response{ + false, + "User not admin", + } + } + json, err := json.Marshal(response) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(json) + +} diff --git a/app/structs.go b/app/structs.go new file mode 100644 index 0000000..2e6d901 --- /dev/null +++ b/app/structs.go @@ -0,0 +1,47 @@ +package app + +import ( + "github.com/jinzhu/gorm" + _ "github.com/jinzhu/gorm/dialects/mysql" // Driver for MySQL +) + +/* +** contains all structs used in the app + */ + +// Database : Contains the form database object +type Database struct { + db *gorm.DB +} + +// CredentialOAuth2 : Stores the id and secret key for OAuth +type CredentialOAuth2 struct { + Cid string `json:"cid"` + Csecret string `json:"csecret"` +} + +// CredentialMysql : Stores the credentials for MySQL Database +type CredentialMysql struct { + DBUsername string `json:"db_username"` + DBPassword string `json:"db_password"` + DBName string `json:"db_name"` +} + +// Response : Response struct for each incoming request +type Response struct { + Success bool `json:"success"` + Message string `json:"message"` +} + +// GoogleToken : Struct to store the information recieved from OAuth2 callback +type GoogleToken struct { + Sub string `json:"sub"` + Name string `json:"name"` + GivenName string `json:"given_name"` + FamilyName string `json:"family_name"` + Profile string `json:"profile"` + Picture string `json:"picture"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` + Gender string `json:"gender"` +} diff --git a/credMysql.json b/credMysql.json new file mode 100644 index 0000000..1dbdee6 --- /dev/null +++ b/credMysql.json @@ -0,0 +1,5 @@ +{ + "db_username": "damnUser", + "db_password": "damnPassword", + "db_name": "damnDb" +} diff --git a/credOAuth2.json b/credOAuth2.json new file mode 100644 index 0000000..9526776 --- /dev/null +++ b/credOAuth2.json @@ -0,0 +1,4 @@ +{ + "cid": "594126007806-ge1t791saiipqobard3t8b7rtl674d0l.apps.googleusercontent.com", + "csecret": "e-v6aQWg-PEyK58uh-CTyjC2" +} \ No newline at end of file diff --git a/dist/admin-static/3rdpartylicenses.txt b/dist/admin-static/3rdpartylicenses.txt new file mode 100644 index 0000000..e4dcf89 --- /dev/null +++ b/dist/admin-static/3rdpartylicenses.txt @@ -0,0 +1,140 @@ +core-js@2.5.3 +MIT +Copyright (c) 2014-2017 Denis Pushkarev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +webpack@3.7.1 +MIT +Copyright JS Foundation and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +zone.js@0.8.18 +MIT +The MIT License + +Copyright (c) 2016 Google, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +@angular/core@4.4.6 +MIT +MIT + +ngx-clipboard@9.0.0 +MIT +The MIT License (MIT) + +Copyright (c) 2016 Sam Lin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +@angular/router@4.4.6 +MIT +MIT + +@angular/http@4.4.6 +MIT +MIT + +ngx-window-token@0.0.4 +MIT +The MIT License (MIT) + +Copyright (c) 2016 Sam Lin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +@angular/forms@4.4.6 +MIT +MIT + +@angular/platform-browser@4.4.6 +MIT +MIT + +@angular/common@4.4.6 +MIT +MIT \ No newline at end of file diff --git a/dist/admin-static/favicon.ico b/dist/admin-static/favicon.ico new file mode 100644 index 0000000..5482175 Binary files /dev/null and b/dist/admin-static/favicon.ico differ diff --git a/dist/admin-static/index.html b/dist/admin-static/index.html new file mode 100644 index 0000000..a8e7d2a --- /dev/null +++ b/dist/admin-static/index.html @@ -0,0 +1 @@ +DAMN \ No newline at end of file diff --git a/dist/admin-static/inline.4ce40e0fb249ef6c445e.bundle.js b/dist/admin-static/inline.4ce40e0fb249ef6c445e.bundle.js new file mode 100644 index 0000000..70214cd --- /dev/null +++ b/dist/admin-static/inline.4ce40e0fb249ef6c445e.bundle.js @@ -0,0 +1 @@ +!function(e){function n(r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}var r=window.webpackJsonp;window.webpackJsonp=function(t,a,c){for(var i,u,f,s=0,l=[];s0,$._29(l,13).ngClassUntouched,$._29(l,13).ngClassTouched,$._29(l,13).ngClassPristine,$._29(l,13).ngClassDirty,$._29(l,13).ngClassValid,$._29(l,13).ngClassInvalid,$._29(l,13).ngClassPending)})}function R(n){return $._36(0,[(n()(),$._17(0,0,null,null,69,"div",[["class","modal-background"]],[[2,"modal-active",null]],null,null,null,null)),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(2,0,null,null,66,"div",[["class","modal"]],null,null,null,null,null)),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(4,0,null,null,4,"div",[["class","close-modal-container"]],null,[[null,"click"]],function(n,l,t){var e=!0,u=n.component;if("click"===l){e=!1!==u.closeModal()&&e}return e},null,null)),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(6,0,null,null,1,"div",[["class","close-modal"]],null,null,null,null,null)),(n()(),$._35(-1,null,["\xd7"])),(n()(),$._35(-1,null,["\n "])),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(10,0,null,null,57,"div",[["class","modal-content"]],null,null,null,null,null)),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(12,0,null,null,45,"div",[["class","modal-data"]],null,null,null,null,null)),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(14,0,null,null,42,"table",[],null,null,null,null,null)),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(16,0,null,null,40,"tbody",[],null,null,null,null,null)),(n()(),$._17(17,0,null,null,7,"tr",[],null,null,null,null,null)),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(19,0,null,null,1,"td",[],null,null,null,null,null)),(n()(),$._35(-1,null,["Name"])),(n()(),$._17(21,0,null,null,2,"td",[],null,null,null,null,null)),(n()(),$._17(22,0,null,null,1,"span",[],null,null,null,null,null)),(n()(),$._35(23,null,["",""])),(n()(),$._35(-1,null,["\n "])),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(26,0,null,null,7,"tr",[],null,null,null,null,null)),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(28,0,null,null,1,"td",[],null,null,null,null,null)),(n()(),$._35(-1,null,["Email"])),(n()(),$._17(30,0,null,null,2,"td",[],null,null,null,null,null)),(n()(),$._17(31,0,null,null,1,"span",[],null,null,null,null,null)),(n()(),$._35(32,null,["",""])),(n()(),$._35(-1,null,["\n "])),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(35,0,null,null,14,"tr",[],null,null,null,null,null)),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(37,0,null,null,1,"td",[],null,null,null,null,null)),(n()(),$._35(-1,null,["SSH Key"])),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(40,0,null,null,8,"td",[],null,null,null,null,null)),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(42,0,null,null,5,"div",[],null,null,null,null,null)),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(44,0,null,null,2,"button",[["class","copy-btn"],["ngxClipboard",""]],null,[[null,"click"]],function(n,l,t){var e=!0;if("click"===l){e=!1!==$._29(n,45).onClick(t.target)&&e}return e},null,null)),$._15(45,212992,null,0,Rn.a,[Ln.b,$.M],{targetElm:[0,"targetElm"],cbContent:[1,"cbContent"]},null),(n()(),$._35(-1,null,["Copy key"])),(n()(),$._35(47,null,[" for ","\n "])),(n()(),$._35(-1,null,["\n "])),(n()(),$._35(-1,null,["\n "])),(n()(),$._35(-1,null,["\n "])),(n()(),$._11(16777216,null,null,1,null,S)),$._15(52,16384,null,0,J.i,[$.Z,$.W],{ngIf:[0,"ngIf"]},null),(n()(),$._35(-1,null,["\n "])),(n()(),$._11(16777216,null,null,1,null,j)),$._15(55,16384,null,0,J.i,[$.Z,$.W],{ngIf:[0,"ngIf"]},null),(n()(),$._35(-1,null,["\n "])),(n()(),$._35(-1,null,["\n "])),(n()(),$._35(-1,null,["\n "])),(n()(),$._17(59,0,null,null,7,"div",[["class","modal-buttons"]],null,null,null,null,null)),(n()(),$._35(-1,null,["\n "])),(n()(),$._11(16777216,null,null,1,null,q)),$._15(62,16384,null,0,J.i,[$.Z,$.W],{ngIf:[0,"ngIf"]},null),(n()(),$._35(-1,null,["\n "])),(n()(),$._11(16777216,null,null,1,null,I)),$._15(65,16384,null,0,J.i,[$.Z,$.W],{ngIf:[0,"ngIf"]},null),(n()(),$._35(-1,null,["\n "])),(n()(),$._35(-1,null,["\n "])),(n()(),$._35(-1,null,["\n "])),(n()(),$._35(-1,null,["\n"]))],function(n,l){var t=l.component;n(l,45,0,"",null==t.modalData?null:t.modalData.ssh_key),n(l,52,0,"access"===(null==t.modalData?null:t.modalData.modalfor)),n(l,55,0,"access_request"===(null==t.modalData?null:t.modalData.modalfor)),n(l,62,0,"access"===(null==t.modalData?null:t.modalData.modalfor)),n(l,65,0,"access_request"===(null==t.modalData?null:t.modalData.modalfor))},function(n,l){var t=l.component;n(l,0,0,t.modalActive),n(l,23,0,null==t.modalData?null:t.modalData.name),n(l,32,0,null==t.modalData?null:t.modalData.email),n(l,47,0,null==t.modalData?null:t.modalData.ssh_key.split(" ")[2])})}function L(n){return $._36(0,[(n()(),$._17(0,0,null,null,1,"admin-modal",[],null,null,null,R,Nn)),$._15(1,114688,null,0,In,[Y],null,null)],function(n,l){n(l,1,0)},null)}function z(n){return $._36(0,[(n()(),$._17(0,0,null,null,1,"div",[["id","snackbar"]],[[2,"danger",null],[2,"show",null]],null,null,null,null)),(n()(),$._35(1,null,["",""]))],null,function(n,l){var t=l.component;n(l,0,0,!t.type,t.show),n(l,1,0,t.message)})}function N(n){return $._36(0,[(n()(),$._17(0,0,null,null,1,"admin-snackbar",[],null,null,null,z,Fn)),$._15(1,114688,null,0,Zn,[Y],null,null)],function(n,l){n(l,1,0)},null)}function W(n){return $._36(0,[(n()(),$._17(0,0,null,null,1,"admin-indicator",[],null,null,null,C,xn)),$._15(1,114688,null,0,vn,[bn],null,null),(n()(),$._35(-1,null,["\n"])),(n()(),$._17(3,0,null,null,1,"admin-navbar",[],null,null,null,P,Pn)),$._15(4,114688,null,0,wn,[bn],null,null),(n()(),$._35(-1,null,["\n"])),(n()(),$._17(6,0,null,null,1,"admin-heading",[],null,null,null,M,Sn)),$._15(7,114688,null,0,Mn,[Y],null,null),(n()(),$._35(-1,null,["\n"])),(n()(),$._17(9,0,null,null,2,"div",[["class","data-container"]],null,null,null,null,null)),(n()(),$._17(10,16777216,null,null,1,"router-outlet",[],null,null,null,null,null)),$._15(11,212992,null,0,Cn.o,[Cn.b,$.Z,$.k,[8,null],$.i],null,null),(n()(),$._35(-1,null,["\n"])),(n()(),$._17(13,0,null,null,1,"admin-modal",[],null,null,null,R,Nn)),$._15(14,114688,null,0,In,[Y],null,null),(n()(),$._35(-1,null,["\n"])),(n()(),$._17(16,0,null,null,1,"admin-snackbar",[],null,null,null,z,Fn)),$._15(17,114688,null,0,Zn,[Y],null,null)],function(n,l){n(l,1,0),n(l,4,0),n(l,7,0),n(l,11,0),n(l,14,0),n(l,17,0)},null)}function Z(n){return $._36(0,[(n()(),$._17(0,0,null,null,1,"app-root",[],null,null,null,W,Tn)),$._15(1,114688,null,0,T,[],null,null)],function(n,l){n(l,1,0)},null)}Object.defineProperty(l,"__esModule",{value:!0});var $=t("/oeL"),F={production:!0},H=function(){function n(){}return n}(),T=function(){function n(){}return n.prototype.ngOnInit=function(){},n.ctorParameters=function(){return[]},n}(),V=[""],E=[".data-container[_ngcontent-%COMP%]{width:80%;margin:0 auto}.data-item[_ngcontent-%COMP%]{padding:0 15px;display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between;-webkit-box-align:center;-ms-flex-align:center;align-items:center;font-size:1.2rem;color:#ddd;border-style:none none solid none;border-width:thin;border-color:#444;overflow:hidden;min-height:61px}.view-btn[_ngcontent-%COMP%]{margin:0 20px 0 0;padding:5px 15px;border-style:none;background-color:transparent;color:#ddd;border-radius:2px;font-size:1rem;cursor:pointer}.data[_ngcontent-%COMP%]{width:33%;margin:15px 15px 0 0;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.items-right[_ngcontent-%COMP%]{margin-left:auto;display:-webkit-box;display:-ms-flexbox;display:flex;margin:15px 0}.data-option-btn[_ngcontent-%COMP%]{-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;font-size:1rem;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.data-option-btn[_ngcontent-%COMP%] button[_ngcontent-%COMP%]{padding:5px 15px;border-style:none;background-color:#444;color:#ddd;border-radius:2px;text-align:center;margin-right:20px}.no-content[_ngcontent-%COMP%]{color:#eee;height:calc(80vh - 100px);width:100%;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;-webkit-box-align:center;-ms-flex-align:center;align-items:center;font-weight:700}@media only screen and (max-width:750px){.data[_ngcontent-%COMP%]{width:100%}.items-right[_ngcontent-%COMP%]{-webkit-box-orient:horizontal;-webkit-box-direction:reverse;-ms-flex-direction:row-reverse;flex-direction:row-reverse}.view-btn[_ngcontent-%COMP%]{padding:5px 15px;border-style:none;background-color:#444;color:#ddd;border-radius:2px;text-align:center;font-size:.85rem}.data-container[_ngcontent-%COMP%]{width:95%}}"],J=t("qbdv"),X=t("CPp0"),G=t("TO51"),U=(t("GQSG"),this&&this.__assign||Object.assign||function(n){for(var l,t=1,e=arguments.length;t1?arguments[1]:void 0,3);n=n?n.n:this._f;)for(r(n.v,n.k,this);n&&n.r;)n=n.p},has:function(e){return!!y(d(this,t),e)}}),p&&r(l.prototype,"size",{get:function(){return d(this,t)[v]}}),l},def:function(e,t,n){var r,o,i=y(e,t);return i?i.v=n:(e._l=i={i:o=h(t,!0),k:t,v:n,p:r=e._l,n:void 0,r:!1},e._f||(e._f=i),r&&(r.n=i),e[v]++,"F"!==o&&(e._i[o]=i)),e},getEntry:y,setStrong:function(e,t,n){s(e,t,function(e,n){this._t=d(e,t),this._k=n,this._l=void 0},function(){for(var e=this,t=e._k,n=e._l;n&&n.r;)n=n.p;return e._t&&(e._l=n=n?n.n:e._t._f)?"keys"==t?l(0,n.k):"values"==t?l(0,n.v):l(0,[n.k,n.v]):(e._t=void 0,l(1))},n?"entries":"values",!n,!0),f(t)}}},"3r0D":function(e,t,n){var r=n("Iclu")("wks"),o=n("c09d"),i=n("ptrv").Symbol,a="function"==typeof i;(e.exports=function(e){return r[e]||(r[e]=a&&i[e]||(a?i:o)("Symbol."+e))}).store=r},"51pc":function(e,t,n){var r=n("+pQw"),o=n("ewdp"),i=n("a/Sk"),a=n("yIWP")("IE_PROTO"),c=function(){},u=function(){var e,t=n("BQSv")("iframe"),r=i.length;for(t.style.display="none",n("Ed9o").appendChild(t),t.src="javascript:",e=t.contentWindow.document,e.open(),e.write(" \ No newline at end of file diff --git a/dist/user-static/inline.d7e2f46eba80f01103f6.bundle.js b/dist/user-static/inline.d7e2f46eba80f01103f6.bundle.js new file mode 100644 index 0000000..1d53567 --- /dev/null +++ b/dist/user-static/inline.d7e2f46eba80f01103f6.bundle.js @@ -0,0 +1 @@ +!function(e){function n(r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}var r=window.webpackJsonp;window.webpackJsonp=function(t,c,u){for(var a,i,f,s=0,l=[];s1?arguments[1]:void 0,3);n=n?n.n:this._f;)for(r(n.v,n.k,this);n&&n.r;)n=n.p},has:function(e){return!!y(d(this,t),e)}}),p&&r(l.prototype,"size",{get:function(){return d(this,t)[v]}}),l},def:function(e,t,n){var r,o,i=y(e,t);return i?i.v=n:(e._l=i={i:o=h(t,!0),k:t,v:n,p:r=e._l,n:void 0,r:!1},e._f||(e._f=i),r&&(r.n=i),e[v]++,"F"!==o&&(e._i[o]=i)),e},getEntry:y,setStrong:function(e,t,n){s(e,t,function(e,n){this._t=d(e,t),this._k=n,this._l=void 0},function(){for(var e=this,t=e._k,n=e._l;n&&n.r;)n=n.p;return e._t&&(e._l=n=n?n.n:e._t._f)?"keys"==t?l(0,n.k):"values"==t?l(0,n.v):l(0,[n.k,n.v]):(e._t=void 0,l(1))},n?"entries":"values",!n,!0),f(t)}}},"3r0D":function(e,t,n){var r=n("Iclu")("wks"),o=n("c09d"),i=n("ptrv").Symbol,a="function"==typeof i;(e.exports=function(e){return r[e]||(r[e]=a&&i[e]||(a?i:o)("Symbol."+e))}).store=r},"51pc":function(e,t,n){var r=n("+pQw"),o=n("ewdp"),i=n("a/Sk"),a=n("yIWP")("IE_PROTO"),c=function(){},u=function(){var e,t=n("BQSv")("iframe"),r=i.length;for(t.style.display="none",n("Ed9o").appendChild(t),t.src="javascript:",e=t.contentWindow.document,e.open(),e.write(" \ No newline at end of file diff --git a/templates/index.html b/templates/index.html new file mode 100644 index 0000000..a8e7d2a --- /dev/null +++ b/templates/index.html @@ -0,0 +1 @@ +DAMN \ No newline at end of file diff --git a/vendor/cloud.google.com/go/.travis.yml b/vendor/cloud.google.com/go/.travis.yml new file mode 100644 index 0000000..59594d4 --- /dev/null +++ b/vendor/cloud.google.com/go/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go +go: +- 1.6.x +- 1.7.x +- 1.8.x +- 1.9.x +install: +- go get -v cloud.google.com/go/... +script: +- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in keys.tar.enc -out keys.tar -d +- tar xvf keys.tar +- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" + GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json" + GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests" + GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json" + ./run-tests.sh $TRAVIS_COMMIT +env: + matrix: + # The GCLOUD_TESTS_API_KEY environment variable. + secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI= diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS new file mode 100644 index 0000000..c364af1 --- /dev/null +++ b/vendor/cloud.google.com/go/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Filippo Valsorda +Google Inc. +Ingo Oeser +Palm Stone Games, Inc. +Paweł Knap +Péter Szilágyi +Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md new file mode 100644 index 0000000..95c94a4 --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -0,0 +1,152 @@ +# Contributing + +1. Sign one of the contributor license agreements below. +1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. + 1. You will need to ensure that your `GOBIN` directory (by default + `$GOPATH/bin`) is in your `PATH` so that git can find the command. + 1. If you would like, you may want to set up aliases for git-codereview, + such that `git codereview change` becomes `git change`. See the + [godoc](https://godoc.org/golang.org/x/review/git-codereview) for details. + 1. Should you run into issues with the git-codereview tool, please note + that all error messages will assume that you have set up these + aliases. +1. Get the cloud package by running `go get -d cloud.google.com/go`. + 1. If you have already checked out the source, make sure that the remote git + origin is https://code.googlesource.com/gocloud: + + git remote set-url origin https://code.googlesource.com/gocloud +1. Make sure your auth is configured correctly by visiting + https://code.googlesource.com, clicking "Generate Password", and following + the directions. +1. Make changes and create a change by running `git codereview change `, +provide a commit message, and use `git codereview mail` to create a Gerrit CL. +1. Keep amending to the change with `git codereview change` and mail as your receive +feedback. Each new mailed amendment will create a new patch set for your change in Gerrit. + +## Integration Tests + +In addition to the unit tests, you may run the integration test suite. + +To run the integrations tests, creating and configuration of a project in the +Google Developers Console is required. + +After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount). +Ensure the project-level **Owner** +[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the +service account. Alternatively, the account can be granted all of the following roles: +- **Editor** +- **Logs Configuration Writer** +- **PubSub Admin** + +Once you create a project, set the following environment variables to be able to +run the against the actual APIs. + +- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) +- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. +- **GCLOUD_TESTS_API_KEY**: Your API key. + +Firestore requires a different project and key: + +- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: Developers Console project's ID + supporting Firestore +- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file. + +Install the [gcloud command-line tool][gcloudcli] to your machine and use it +to create some resources used in integration tests. + +From the project's root directory: + +``` sh +# Set the default project in your env. +$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Authenticate the gcloud tool with your account. +$ gcloud auth login + +# Create the indexes used in the datastore integration tests. +$ gcloud preview datastore create-indexes datastore/testdata/index.yaml + +# Create a Google Cloud storage bucket with the same name as your test project, +# and with the Stackdriver Logging service account as owner, for the sink +# integration tests in logging. +$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID +$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Create a PubSub topic for integration tests of storage notifications. +$ gcloud beta pubsub topics create go-storage-notification-test + +# Create a Spanner instance for the spanner integration tests. +$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test' +# NOTE: Spanner instances are priced by the node-hour, so you may want to delete +# the instance after testing with 'gcloud beta spanner instances delete'. + + +``` + +Once you've set the environment variables, you can run the integration tests by +running: + +``` sh +$ go test -v cloud.google.com/go/... +``` + +## Contributor License Agreements + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the +intellectual property**, then you'll need to sign an [individual CLA][indvcla]. +- **If you work for a company that wants to allow you to contribute your +work**, then you'll need to sign a [corporate CLA][corpcla]. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. + +## Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) + +[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ +[indvcla]: https://developers.google.com/open-source/cla/individual +[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS new file mode 100644 index 0000000..0d56acd --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -0,0 +1,38 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Alexis Hunt +Andreas Litt +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Sansome +David Symonds +Filippo Valsorda +Glenn Lewis +Ingo Oeser +Johan Euphrosine +Jonathan Amsterdam +Kunpei Sakai +Luna Duclos +Magnus Hiie +Michael McGreevy +Omar Jarjur +Paweł Knap +Péter Szilágyi +Sarah Adams +Thanatat Tamtan +Toby Burress +Tuo Shan +Tyler Treat diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 0000000..a4c5efd --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/MIGRATION.md b/vendor/cloud.google.com/go/MIGRATION.md new file mode 100644 index 0000000..791210d --- /dev/null +++ b/vendor/cloud.google.com/go/MIGRATION.md @@ -0,0 +1,54 @@ +# Code Changes + +## v0.10.0 + +- pubsub: Replace + + ``` + sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"}) + ``` + + with + + ``` + sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{ + PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"}, + }) + ``` + +- trace: traceGRPCServerInterceptor will be provided from *trace.Client. +Given an initialized `*trace.Client` named `tc`, instead of + + ``` + s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc))) + ``` + + write + + ``` + s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor())) + ``` + +- trace trace.GRPCClientInterceptor will also provided from *trace.Client. +Instead of + + ``` + conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor())) + ``` + + write + + ``` + conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) + ``` + +- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC +interceptor as a dial option as shown below when initializing Cloud package +clients: + + ``` + c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))) + if err != nil { + ... + } + ``` diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md new file mode 100644 index 0000000..2f372a6 --- /dev/null +++ b/vendor/cloud.google.com/go/README.md @@ -0,0 +1,502 @@ +# Google Cloud Client Libraries for Go + +[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go) + +Go packages for [Google Cloud Platform](https://cloud.google.com) services. + +``` go +import "cloud.google.com/go" +``` + +To install the packages on your system, + +``` +$ go get -u cloud.google.com/go/... +``` + +**NOTE:** Some of these packages are under development, and may occasionally +make backwards-incompatible changes. + +**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). + + * [News](#news) + * [Supported APIs](#supported-apis) + * [Go Versions Supported](#go-versions-supported) + * [Authorization](#authorization) + * [Cloud Datastore](#cloud-datastore-) + * [Cloud Storage](#cloud-storage-) + * [Cloud Pub/Sub](#cloud-pub-sub-) + * [Cloud BigQuery](#cloud-bigquery-) + * [Stackdriver Logging](#stackdriver-logging-) + * [Cloud Spanner](#cloud-spanner-) + + +## News + +_December 11, 2017_ + +*v0.17.0* + +- firestore BREAKING CHANGES: + - Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update. + Change + `docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})` + to + `docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})` + + Change + `docref.UpdateStruct(ctx, []string{"Field"}, aStruct)` + to + `docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})` + - Rename MergePaths to Merge; require args to be FieldPaths + - A value stored as an integer can be read into a floating-point field, and vice versa. +- bigtable/cmd/cbt: + - Support deleting a column. + - Add regex option for row read. +- spanner: Mark stable. +- storage: + - Add Reader.ContentEncoding method. + - Fix handling of SignedURL headers. +- bigquery: + - If Uploader.Put is called with no rows, it returns nil without making a + call. + - Schema inference supports the "nullable" option in struct tags for + non-required fields. + - TimePartitioning supports "Field". + + +_October 30, 2017_ + +*v0.16.0* + +- Other bigquery changes: + - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE). + - UseStandardSQL is deprecated; set UseLegacySQL to true if you need + Legacy SQL. + - Uploader.Put will generate a random insert ID if you do not provide one. + - Support time partitioning for load jobs. + - Support dry-run queries. + - A `Job` remembers its last retrieved status. + - Support retrieving job configuration. + - Support labels for jobs and tables. + - Support dataset access lists. + - Improve support for external data sources, including data from Bigtable and + Google Sheets, and tables with external data. + - Support updating a table's view configuration. + - Fix uploading civil times with nanoseconds. + +- storage: + - Support PubSub notifications. + - Support Requester Pays buckets. + +- profiler: Support goroutine and mutex profile types. + + +_October 3, 2017_ + +*v0.15.0* + +- firestore: beta release. See the + [announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html). + +- errorreporting: The existing package has been redesigned. + +- errors: This package has been removed. Use errorreporting. + + +[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md) + +## Supported APIs + +Google API | Status | Package +---------------------------------|--------------|----------------------------------------------------------- +[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref] +[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref] +[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref] +[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] +[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] +[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref] +[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref] +[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] +[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref] +[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref] +[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref] +[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref] +[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref] +[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref] +[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref] +[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref] + + +> **Alpha status**: the API is still being actively developed. As a +> result, it might change in backward-incompatible ways and is not recommended +> for production use. +> +> **Beta status**: the API is largely complete, but still has outstanding +> features and bugs to be addressed. There may be minor backwards-incompatible +> changes where necessary. +> +> **Stable status**: the API is mature and ready for production use. We will +> continue addressing bugs and feature requests. + +Documentation and examples are available at +https://godoc.org/cloud.google.com/go + +Visit or join the +[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce) +for updates on these packages. + +## Go Versions Supported + +We support the two most recent major versions of Go. If Google App Engine uses +an older version, we support that as well. You can see which versions are +currently supported by looking at the lines following `go:` in +[`.travis.yml`](.travis.yml). + +## Authorization + +By default, each API will use [Google Application Default Credentials][default-creds] +for authorization credentials used in calling the API endpoints. This will allow your +application to run in many environments without requiring explicit configuration. + +[snip]:# (auth) +```go +client, err := storage.NewClient(ctx) +``` + +To authorize using a +[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), +pass +[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile) +to the `NewClient` function of the desired package. For example: + +[snip]:# (auth-JSON) +```go +client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json")) +``` + +You can exert more control over authorization by using the +[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to +create an `oauth2.TokenSource`. Then pass +[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource) +to the `NewClient` function: +[snip]:# (auth-ts) +```go +tokenSource := ... +client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) +``` + +## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) + +- [About Cloud Datastore][cloud-datastore] +- [Activating the API for your project][cloud-datastore-activation] +- [API documentation][cloud-datastore-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore) +- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks) + +### Example Usage + +First create a `datastore.Client` to use throughout your application: + +[snip]:# (datastore-1) +```go +client, err := datastore.NewClient(ctx, "my-project-id") +if err != nil { + log.Fatal(err) +} +``` + +Then use that client to interact with the API: + +[snip]:# (datastore-2) +```go +type Post struct { + Title string + Body string `datastore:",noindex"` + PublishedAt time.Time +} +keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), +} +posts := []*Post{ + {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, + {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, +} +if _, err := client.PutMulti(ctx, keys, posts); err != nil { + log.Fatal(err) +} +``` + +## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) + +- [About Cloud Storage][cloud-storage] +- [API documentation][cloud-storage-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) + +### Example Usage + +First create a `storage.Client` to use throughout your application: + +[snip]:# (storage-1) +```go +client, err := storage.NewClient(ctx) +if err != nil { + log.Fatal(err) +} +``` + +[snip]:# (storage-2) +```go +// Read the object1 from bucket. +rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) +if err != nil { + log.Fatal(err) +} +defer rc.Close() +body, err := ioutil.ReadAll(rc) +if err != nil { + log.Fatal(err) +} +``` + +## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) + +- [About Cloud Pubsub][cloud-pubsub] +- [API documentation][cloud-pubsub-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) + +### Example Usage + +First create a `pubsub.Client` to use throughout your application: + +[snip]:# (pubsub-1) +```go +client, err := pubsub.NewClient(ctx, "project-id") +if err != nil { + log.Fatal(err) +} +``` + +Then use the client to publish and subscribe: + +[snip]:# (pubsub-2) +```go +// Publish "hello world" on topic1. +topic := client.Topic("topic1") +res := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("hello world"), +}) +// The publish happens asynchronously. +// Later, you can get the result from res: +... +msgID, err := res.Get(ctx) +if err != nil { + log.Fatal(err) +} + +// Use a callback to receive messages via subscription1. +sub := client.Subscription("subscription1") +err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + fmt.Println(m.Data) + m.Ack() // Acknowledge that we've consumed the message. +}) +if err != nil { + log.Println(err) +} +``` + +## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery) + +- [About Cloud BigQuery][cloud-bigquery] +- [API documentation][cloud-bigquery-docs] +- [Go client documentation][cloud-bigquery-ref] +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery) + +### Example Usage + +First create a `bigquery.Client` to use throughout your application: +[snip]:# (bq-1) +```go +c, err := bigquery.NewClient(ctx, "my-project-ID") +if err != nil { + // TODO: Handle error. +} +``` + +Then use that client to interact with the API: +[snip]:# (bq-2) +```go +// Construct a query. +q := c.Query(` + SELECT year, SUM(number) + FROM [bigquery-public-data:usa_names.usa_1910_2013] + WHERE name = "William" + GROUP BY year + ORDER BY year +`) +// Execute the query. +it, err := q.Read(ctx) +if err != nil { + // TODO: Handle error. +} +// Iterate through the results. +for { + var values []bigquery.Value + err := it.Next(&values) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(values) +} +``` + + +## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging) + +- [About Stackdriver Logging][cloud-logging] +- [API documentation][cloud-logging-docs] +- [Go client documentation][cloud-logging-ref] +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging) + +### Example Usage + +First create a `logging.Client` to use throughout your application: +[snip]:# (logging-1) +```go +ctx := context.Background() +client, err := logging.NewClient(ctx, "my-project") +if err != nil { + // TODO: Handle error. +} +``` + +Usually, you'll want to add log entries to a buffer to be periodically flushed +(automatically and asynchronously) to the Stackdriver Logging service. +[snip]:# (logging-2) +```go +logger := client.Logger("my-log") +logger.Log(logging.Entry{Payload: "something happened!"}) +``` + +Close your client before your program exits, to flush any buffered log entries. +[snip]:# (logging-3) +```go +err = client.Close() +if err != nil { + // TODO: Handle error. +} +``` + +## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner) + +- [About Cloud Spanner][cloud-spanner] +- [API documentation][cloud-spanner-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner) + +### Example Usage + +First create a `spanner.Client` to use throughout your application: + +[snip]:# (spanner-1) +```go +client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") +if err != nil { + log.Fatal(err) +} +``` + +[snip]:# (spanner-2) +```go +// Simple Reads And Writes +_, err = client.Apply(ctx, []*spanner.Mutation{ + spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"})}) +if err != nil { + log.Fatal(err) +} +row, err := client.Single().ReadRow(ctx, "Users", + spanner.Key{"alice"}, []string{"email"}) +if err != nil { + log.Fatal(err) +} +``` + + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md) +document for details. We're using Gerrit for our code reviews. Please don't open pull +requests against this repo, new pull requests will be automatically closed. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. + +[cloud-datastore]: https://cloud.google.com/datastore/ +[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore +[cloud-datastore-docs]: https://cloud.google.com/datastore/docs +[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate + +[cloud-firestore]: https://cloud.google.com/firestore/ +[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore +[cloud-firestore-docs]: https://cloud.google.com/firestore/docs +[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate + +[cloud-pubsub]: https://cloud.google.com/pubsub/ +[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub +[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs + +[cloud-storage]: https://cloud.google.com/storage/ +[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage +[cloud-storage-docs]: https://cloud.google.com/storage/docs +[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets + +[cloud-bigtable]: https://cloud.google.com/bigtable/ +[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable + +[cloud-bigquery]: https://cloud.google.com/bigquery/ +[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs +[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery + +[cloud-logging]: https://cloud.google.com/logging/ +[cloud-logging-docs]: https://cloud.google.com/logging/docs +[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging + +[cloud-monitoring]: https://cloud.google.com/monitoring/ +[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3 + +[cloud-vision]: https://cloud.google.com/vision +[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1 + +[cloud-language]: https://cloud.google.com/natural-language +[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1 + +[cloud-speech]: https://cloud.google.com/speech +[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1 + +[cloud-spanner]: https://cloud.google.com/spanner/ +[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner +[cloud-spanner-docs]: https://cloud.google.com/spanner/docs + +[cloud-translation]: https://cloud.google.com/translation +[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation + +[cloud-trace]: https://cloud.google.com/trace/ +[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace + +[cloud-video]: https://cloud.google.com/video-intelligence/ +[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1 + +[cloud-errors]: https://cloud.google.com/error-reporting/ +[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting + +[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials diff --git a/vendor/cloud.google.com/go/appveyor.yml b/vendor/cloud.google.com/go/appveyor.yml new file mode 100644 index 0000000..e66cd00 --- /dev/null +++ b/vendor/cloud.google.com/go/appveyor.yml @@ -0,0 +1,32 @@ +# This file configures AppVeyor (http://www.appveyor.com), +# a Windows-based CI service similar to Travis. + +# Identifier for this run +version: "{build}" + +# Clone the repo into this path, which conforms to the standard +# Go workspace structure. +clone_folder: c:\gopath\src\cloud.google.com\go + +environment: + GOPATH: c:\gopath + GCLOUD_TESTS_GOLANG_PROJECT_ID: dulcet-port-762 + GCLOUD_TESTS_GOLANG_KEY: c:\gopath\src\cloud.google.com\go\key.json + KEYFILE_CONTENTS: + secure: IvRbDAhM2PIQqzVkjzJ4FjizUvoQ+c3vG/qhJQG+HlZ/L5KEkqLu+x6WjLrExrNMyGku4znB2jmbTrUW3Ob4sGG+R5vvqeQ3YMHCVIkw5CxY+/bUDkW5RZWsVbuCnNa/vKsWmCP+/sZW6ICe29yKJ2ZOb6QaauI4s9R6j+cqBbU9pumMGYFRb0Rw3uUU7DKmVFCy+NjTENZIlDP9rmjANgAzigowJJEb2Tg9sLlQKmQeKiBSRN8lKc5Nq60a+fIzHGKvql4eIitDDDpOpyHv15/Xr1BzFw2yDoiR4X1lng0u7q0X9RgX4VIYa6gT16NXBEmQgbuX8gh7SfPMp9RhiZD9sVUaV+yogEabYpyPnmUURo0hXwkctKaBkQlEmKvjHwF5dvbg8+yqGhwtjAgFNimXG3INrwQsfQsZskkQWanutbJf9xy50GyWWFZZdi0uT4oXP/b5P7aklPXKXsvrJKBh7RjEaqBrhi86IJwOjBspvoR4l2WmcQyxb2xzQS1pjbBJFQfYJJ8+JgsstTL8PBO9d4ybJC0li1Om1qnWxkaewvPxxuoHJ9LpRKof19yRYWBmhTXb2tTASKG/zslvl4fgG4DmQBS93WC7dsiGOhAraGw2eCTgd0lYZOhk1FjWl9TS80aktXxzH/7nTvem5ohm+eDl6O0wnTL4KXjQVNSQ1PyLn4lGRJ5MNGzBTRFWIr2API2rca4Fysyfh/UdmazPGlNbY9JPGqb9+F04QzLfqm+Zz/cHy59E7lOSMBlUI4KD6d6ZNNKNRH+/g9i+fSiyiXKugTfda8KBnWGyPwprxuWGYaiQUGUYOwJY5R6x5c4mjImAB310V+Wo33UbWFJiwxEDsiCNqW1meVkBzt2er26vh4qbgCUIQ3iM3gFPfHgy+QxkmIhic7Q1HYacQElt8AAP41M7cCKWCuZidegP37MBB//mjjiNt047ZSQEvB4tqsX/OvfbByVef+cbtVw9T0yjHvmCdPW1XrhyrCCgclu6oYYdbmc5D7BBDRbjjMWGv6YvceAbfGf6ukdB5PuV+TGEN/FoQ1QTRA6Aqf+3fLMg4mS4oyTfw5xyYNbv3qoyLPrp+BnxI53WB9p0hfMg4n9FD6NntBxjDq+Q3Lk/bjC/Y4MaRWdzbMzF9a0lgGfcw9DURlK5p7uGJC9vg34feNoQprxVEZRQ01cHLeob6eGkYm4HxSRx8JY39Mh+9wzJo+k/aIvFleNC3e35NOrkXr6wb5e42n2DwBdPqdNolTLtLFRglAL1LTpp27UjvjieWJAKfoDTR5CKl01sZqt0wPdLLcvsMj6CiPFmccUIOYeZMe86kLBD61Qa5F1EwkgO3Om2qSjW96FzL4skRc+BmU5RrHlAFSldR1wpUgtkUMv9vH5Cy+UJdcvpZ8KbmhZ2PsjF7ddJ1ve9RAw3cP325AyIMwZ77Ef1mgTM0NJze6eSW1qKlEsgt1FADPyeUu1NQTA2H2dueMPGlArWTSUgyWR9AdfpqouT7eg0JWI5w+yUZZC+/rPglYbt84oLmYpwuli0z8FyEQRPIc3EtkfWIv/yYgDr2TZ0N2KvGfpi/MAUWgxI1gleC2uKgEOEtuJthd3XZjF2NoE7IBqjQOINybcJOjyeB5vRLDY1FLuxYzdg1y1etkV4XQig/vje + +install: + # Info for debugging. + - echo %PATH% + - go version + - go env + - go get -v -d -t ./... + + +# Provide a build script, or AppVeyor will call msbuild. +build_script: + - go install -v ./... + - echo %KEYFILE_CONTENTS% > %GCLOUD_TESTS_GOLANG_KEY% + +test_script: + - go test -v ./... diff --git a/vendor/cloud.google.com/go/authexample_test.go b/vendor/cloud.google.com/go/authexample_test.go new file mode 100644 index 0000000..fe75467 --- /dev/null +++ b/vendor/cloud.google.com/go/authexample_test.go @@ -0,0 +1,49 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud_test + +import ( + "cloud.google.com/go/datastore" + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +func Example_applicationDefaultCredentials() { + // Google Application Default Credentials is the recommended way to authorize + // and authenticate clients. + // + // See the following link on how to create and obtain Application Default Credentials: + // https://developers.google.com/identity/protocols/application-default-credentials. + client, err := datastore.NewClient(context.Background(), "project-id") + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. +} + +func Example_serviceAccountFile() { + // Use a JSON key file associated with a Google service account to + // authenticate and authorize. Service Account keys can be created and + // downloaded from https://console.developers.google.com/permissions/serviceaccounts. + // + // Note: This example uses the datastore client, but the same steps apply to + // the other client libraries underneath this package. + client, err := datastore.NewClient(context.Background(), + "project-id", option.WithServiceAccountFile("/path/to/service-account-key.json")) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. +} diff --git a/vendor/cloud.google.com/go/bigquery/benchmarks/README.md b/vendor/cloud.google.com/go/bigquery/benchmarks/README.md new file mode 100644 index 0000000..c97f9d8 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/benchmarks/README.md @@ -0,0 +1,8 @@ +# BigQuery Benchmark +This directory contains benchmarks for BigQuery client. + +## Usage +`go run bench.go -- queries.json` + +BigQuery service caches requests so the benchmark should be run +at least twice, disregarding the first result. diff --git a/vendor/cloud.google.com/go/bigquery/benchmarks/bench.go b/vendor/cloud.google.com/go/bigquery/benchmarks/bench.go new file mode 100644 index 0000000..56d80ec --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/benchmarks/bench.go @@ -0,0 +1,85 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//+build ignore + +package main + +import ( + "encoding/json" + "flag" + "io/ioutil" + "log" + "time" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func main() { + flag.Parse() + + ctx := context.Background() + c, err := bigquery.NewClient(ctx, flag.Arg(0)) + if err != nil { + log.Fatal(err) + } + + queriesJSON, err := ioutil.ReadFile(flag.Arg(1)) + if err != nil { + log.Fatal(err) + } + + var queries []string + if err := json.Unmarshal(queriesJSON, &queries); err != nil { + log.Fatal(err) + } + + for _, q := range queries { + doQuery(ctx, c, q) + } +} + +func doQuery(ctx context.Context, c *bigquery.Client, qt string) { + startTime := time.Now() + q := c.Query(qt) + it, err := q.Read(ctx) + if err != nil { + log.Fatal(err) + } + + numRows, numCols := 0, 0 + var firstByte time.Duration + + for { + var values []bigquery.Value + err := it.Next(&values) + if err == iterator.Done { + break + } + if err != nil { + log.Fatal(err) + } + if numRows == 0 { + numCols = len(values) + firstByte = time.Since(startTime) + } else if numCols != len(values) { + log.Fatalf("got %d columns, want %d", len(values), numCols) + } + numRows++ + } + log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec", + qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds()) +} diff --git a/vendor/cloud.google.com/go/bigquery/benchmarks/queries.json b/vendor/cloud.google.com/go/bigquery/benchmarks/queries.json new file mode 100644 index 0000000..13fed38 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/benchmarks/queries.json @@ -0,0 +1,10 @@ +[ + "SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000", + "SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000", + "SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000000", + "SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000", + "SELECT title, id, timestamp, contributor_ip FROM `bigquery-public-data.samples.wikipedia` WHERE title like 'Blo%' ORDER BY id", + "SELECT * FROM `bigquery-public-data.baseball.games_post_wide` ORDER BY gameId", + "SELECT * FROM `bigquery-public-data.samples.github_nested` WHERE repository.has_downloads ORDER BY repository.created_at LIMIT 10000", + "SELECT repo_name, path FROM `bigquery-public-data.github_repos.files` WHERE path LIKE '%.java' ORDER BY id LIMIT 1000000" +] diff --git a/vendor/cloud.google.com/go/bigquery/bigquery.go b/vendor/cloud.google.com/go/bigquery/bigquery.go new file mode 100644 index 0000000..324a379 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/bigquery.go @@ -0,0 +1,156 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "io" + "net/http" + "time" + + gax "github.com/googleapis/gax-go" + + "cloud.google.com/go/internal" + "cloud.google.com/go/internal/version" + + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +const ( + prodAddr = "https://www.googleapis.com/bigquery/v2/" + Scope = "https://www.googleapis.com/auth/bigquery" + userAgent = "gcloud-golang-bigquery/20160429" +) + +var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) + +func setClientHeader(headers http.Header) { + headers.Set("x-goog-api-client", xGoogHeader) +} + +// Client may be used to perform BigQuery operations. +type Client struct { + projectID string + bqs *bq.Service +} + +// NewClient constructs a new Client which can perform BigQuery operations. +// Operations performed via the client are billed to the specified GCP project. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(Scope), + option.WithUserAgent(userAgent), + } + o = append(o, opts...) + httpClient, endpoint, err := htransport.NewClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("bigquery: dialing: %v", err) + } + bqs, err := bq.New(httpClient) + if err != nil { + return nil, fmt.Errorf("bigquery: constructing client: %v", err) + } + bqs.BasePath = endpoint + c := &Client{ + projectID: projectID, + bqs: bqs, + } + return c, nil +} + +// Close closes any resources held by the client. +// Close should be called when the client is no longer needed. +// It need not be called at program exit. +func (c *Client) Close() error { + return nil +} + +// Calls the Jobs.Insert RPC and returns a Job. +func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) { + call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx) + setClientHeader(call.Header()) + if media != nil { + call.Media(media) + } + var res *bq.Job + var err error + invoke := func() error { + res, err = call.Do() + return err + } + // A job with a client-generated ID can be retried; the presence of the + // ID makes the insert operation idempotent. + // We don't retry if there is media, because it is an io.Reader. We'd + // have to read the contents and keep it in memory, and that could be expensive. + // TODO(jba): Look into retrying if media != nil. + if job.JobReference != nil && media == nil { + err = runWithRetry(ctx, invoke) + } else { + err = invoke() + } + if err != nil { + return nil, err + } + return bqToJob(res, c) +} + +// Convert a number of milliseconds since the Unix epoch to a time.Time. +// Treat an input of zero specially: convert it to the zero time, +// rather than the start of the epoch. +func unixMillisToTime(m int64) time.Time { + if m == 0 { + return time.Time{} + } + return time.Unix(0, m*1e6) +} + +// runWithRetry calls the function until it returns nil or a non-retryable error, or +// the context is done. +// See the similar function in ../storage/invoke.go. The main difference is the +// reason for retrying. +func runWithRetry(ctx context.Context, call func() error) error { + // These parameters match the suggestions in https://cloud.google.com/bigquery/sla. + backoff := gax.Backoff{ + Initial: 1 * time.Second, + Max: 32 * time.Second, + Multiplier: 2, + } + return internal.Retry(ctx, backoff, func() (stop bool, err error) { + err = call() + if err == nil { + return true, nil + } + return !retryableError(err), err + }) +} + +// This is the correct definition of retryable according to the BigQuery team. +func retryableError(err error) bool { + e, ok := err.(*googleapi.Error) + if !ok { + return false + } + var reason string + if len(e.Errors) > 0 { + reason = e.Errors[0].Reason + } + return reason == "backendError" || reason == "rateLimitExceeded" +} diff --git a/vendor/cloud.google.com/go/bigquery/copy.go b/vendor/cloud.google.com/go/bigquery/copy.go new file mode 100644 index 0000000..33feabb --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/copy.go @@ -0,0 +1,101 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// CopyConfig holds the configuration for a copy job. +type CopyConfig struct { + // Srcs are the tables from which data will be copied. + Srcs []*Table + + // Dst is the table into which the data will be copied. + Dst *Table + + // CreateDisposition specifies the circumstances under which the destination table will be created. + // The default is CreateIfNeeded. + CreateDisposition TableCreateDisposition + + // WriteDisposition specifies how existing data in the destination table is treated. + // The default is WriteEmpty. + WriteDisposition TableWriteDisposition + + // The labels associated with this job. + Labels map[string]string +} + +func (c *CopyConfig) toBQ() *bq.JobConfiguration { + var ts []*bq.TableReference + for _, t := range c.Srcs { + ts = append(ts, t.toBQ()) + } + return &bq.JobConfiguration{ + Labels: c.Labels, + Copy: &bq.JobConfigurationTableCopy{ + CreateDisposition: string(c.CreateDisposition), + WriteDisposition: string(c.WriteDisposition), + DestinationTable: c.Dst.toBQ(), + SourceTables: ts, + }, + } +} + +func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig { + cc := &CopyConfig{ + Labels: q.Labels, + CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition), + WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition), + Dst: bqToTable(q.Copy.DestinationTable, c), + } + for _, t := range q.Copy.SourceTables { + cc.Srcs = append(cc.Srcs, bqToTable(t, c)) + } + return cc +} + +// A Copier copies data into a BigQuery table from one or more BigQuery tables. +type Copier struct { + JobIDConfig + CopyConfig + c *Client +} + +// CopierFrom returns a Copier which can be used to copy data into a +// BigQuery table from one or more BigQuery tables. +// The returned Copier may optionally be further configured before its Run method is called. +func (t *Table) CopierFrom(srcs ...*Table) *Copier { + return &Copier{ + c: t.c, + CopyConfig: CopyConfig{ + Srcs: srcs, + Dst: t, + }, + } +} + +// Run initiates a copy job. +func (c *Copier) Run(ctx context.Context) (*Job, error) { + return c.c.insertJob(ctx, c.newJob(), nil) +} + +func (c *Copier) newJob() *bq.Job { + return &bq.Job{ + JobReference: c.JobIDConfig.createJobRef(c.c.projectID), + Configuration: c.CopyConfig.toBQ(), + } +} diff --git a/vendor/cloud.google.com/go/bigquery/copy_test.go b/vendor/cloud.google.com/go/bigquery/copy_test.go new file mode 100644 index 0000000..3f29d18 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/copy_test.go @@ -0,0 +1,141 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + + "github.com/google/go-cmp/cmp/cmpopts" + + "cloud.google.com/go/internal/testutil" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultCopyJob() *bq.Job { + return &bq.Job{ + JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, + Configuration: &bq.JobConfiguration{ + Copy: &bq.JobConfigurationTableCopy{ + DestinationTable: &bq.TableReference{ + ProjectId: "d-project-id", + DatasetId: "d-dataset-id", + TableId: "d-table-id", + }, + SourceTables: []*bq.TableReference{ + { + ProjectId: "s-project-id", + DatasetId: "s-dataset-id", + TableId: "s-table-id", + }, + }, + }, + }, + } +} + +func TestCopy(t *testing.T) { + defer fixRandomID("RANDOM")() + testCases := []struct { + dst *Table + srcs []*Table + jobID string + config CopyConfig + want *bq.Job + }{ + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + srcs: []*Table{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + want: defaultCopyJob(), + }, + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + srcs: []*Table{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + config: CopyConfig{ + CreateDisposition: CreateNever, + WriteDisposition: WriteTruncate, + Labels: map[string]string{"a": "b"}, + }, + want: func() *bq.Job { + j := defaultCopyJob() + j.Configuration.Labels = map[string]string{"a": "b"} + j.Configuration.Copy.CreateDisposition = "CREATE_NEVER" + j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE" + return j + }(), + }, + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + srcs: []*Table{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + jobID: "job-id", + want: func() *bq.Job { + j := defaultCopyJob() + j.JobReference.JobId = "job-id" + return j + }(), + }, + } + c := &Client{projectID: "client-project-id"} + for i, tc := range testCases { + tc.dst.c = c + copier := tc.dst.CopierFrom(tc.srcs...) + copier.JobID = tc.jobID + tc.config.Srcs = tc.srcs + tc.config.Dst = tc.dst + copier.CopyConfig = tc.config + got := copier.newJob() + checkJob(t, i, got, tc.want) + + jc, err := bqToJobConfig(got.Configuration, c) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + diff := testutil.Diff(jc.(*CopyConfig), &copier.CopyConfig, + cmpopts.IgnoreUnexported(Table{})) + if diff != "" { + t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/dataset.go b/vendor/cloud.google.com/go/bigquery/dataset.go new file mode 100644 index 0000000..b712dd6 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/dataset.go @@ -0,0 +1,501 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "time" + + "cloud.google.com/go/internal/optional" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" + "google.golang.org/api/iterator" +) + +// Dataset is a reference to a BigQuery dataset. +type Dataset struct { + ProjectID string + DatasetID string + c *Client +} + +// DatasetMetadata contains information about a BigQuery dataset. +type DatasetMetadata struct { + // These fields can be set when creating a dataset. + Name string // The user-friendly name for this dataset. + Description string // The user-friendly description of this dataset. + Location string // The geo location of the dataset. + DefaultTableExpiration time.Duration // The default expiration time for new tables. + Labels map[string]string // User-provided labels. + Access []*AccessEntry // Access permissions. + + // These fields are read-only. + CreationTime time.Time + LastModifiedTime time.Time // When the dataset or any of its tables were modified. + FullID string // The full dataset ID in the form projectID:datasetID. + + // ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to + // ensure that the metadata hasn't changed since it was read. + ETag string +} + +// DatasetMetadataToUpdate is used when updating a dataset's metadata. +// Only non-nil fields will be updated. +type DatasetMetadataToUpdate struct { + Description optional.String // The user-friendly description of this table. + Name optional.String // The user-friendly name for this dataset. + + // DefaultTableExpiration is the the default expiration time for new tables. + // If set to time.Duration(0), new tables never expire. + DefaultTableExpiration optional.Duration + + // The entire access list. It is not possible to replace individual entries. + Access []*AccessEntry + + labelUpdater +} + +// Dataset creates a handle to a BigQuery dataset in the client's project. +func (c *Client) Dataset(id string) *Dataset { + return c.DatasetInProject(c.projectID, id) +} + +// DatasetInProject creates a handle to a BigQuery dataset in the specified project. +func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset { + return &Dataset{ + ProjectID: projectID, + DatasetID: datasetID, + c: c, + } +} + +// Create creates a dataset in the BigQuery service. An error will be returned if the +// dataset already exists. Pass in a DatasetMetadata value to configure the dataset. +func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) error { + ds, err := md.toBQ() + if err != nil { + return err + } + ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID} + call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx) + setClientHeader(call.Header()) + _, err = call.Do() + return err +} + +func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) { + ds := &bq.Dataset{} + if dm == nil { + return ds, nil + } + ds.FriendlyName = dm.Name + ds.Description = dm.Description + ds.Location = dm.Location + ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond) + ds.Labels = dm.Labels + var err error + ds.Access, err = accessListToBQ(dm.Access) + if err != nil { + return nil, err + } + if !dm.CreationTime.IsZero() { + return nil, errors.New("bigquery: Dataset.CreationTime is not writable") + } + if !dm.LastModifiedTime.IsZero() { + return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable") + } + if dm.FullID != "" { + return nil, errors.New("bigquery: Dataset.FullID is not writable") + } + if dm.ETag != "" { + return nil, errors.New("bigquery: Dataset.ETag is not writable") + } + return ds, nil +} + +func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) { + var q []*bq.DatasetAccess + for _, e := range a { + a, err := e.toBQ() + if err != nil { + return nil, err + } + q = append(q, a) + } + return q, nil +} + +// Delete deletes the dataset. +func (d *Dataset) Delete(ctx context.Context) error { + call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx) + setClientHeader(call.Header()) + return call.Do() +} + +// Metadata fetches the metadata for the dataset. +func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) { + call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx) + setClientHeader(call.Header()) + var ds *bq.Dataset + if err := runWithRetry(ctx, func() (err error) { + ds, err = call.Do() + return err + }); err != nil { + return nil, err + } + return bqToDatasetMetadata(ds) +} + +func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) { + dm := &DatasetMetadata{ + CreationTime: unixMillisToTime(d.CreationTime), + LastModifiedTime: unixMillisToTime(d.LastModifiedTime), + DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond, + Description: d.Description, + Name: d.FriendlyName, + FullID: d.Id, + Location: d.Location, + Labels: d.Labels, + ETag: d.Etag, + } + for _, a := range d.Access { + e, err := bqToAccessEntry(a, nil) + if err != nil { + return nil, err + } + dm.Access = append(dm.Access, e) + } + return dm, nil +} + +// Update modifies specific Dataset metadata fields. +// To perform a read-modify-write that protects against intervening reads, +// set the etag argument to the DatasetMetadata.ETag field from the read. +// Pass the empty string for etag for a "blind write" that will always succeed. +func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) { + ds, err := dm.toBQ() + if err != nil { + return nil, err + } + call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx) + setClientHeader(call.Header()) + if etag != "" { + call.Header().Set("If-Match", etag) + } + var ds2 *bq.Dataset + if err := runWithRetry(ctx, func() (err error) { + ds2, err = call.Do() + return err + }); err != nil { + return nil, err + } + return bqToDatasetMetadata(ds2) +} + +func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) { + ds := &bq.Dataset{} + forceSend := func(field string) { + ds.ForceSendFields = append(ds.ForceSendFields, field) + } + + if dm.Description != nil { + ds.Description = optional.ToString(dm.Description) + forceSend("Description") + } + if dm.Name != nil { + ds.FriendlyName = optional.ToString(dm.Name) + forceSend("FriendlyName") + } + if dm.DefaultTableExpiration != nil { + dur := optional.ToDuration(dm.DefaultTableExpiration) + if dur == 0 { + // Send a null to delete the field. + ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs") + } else { + ds.DefaultTableExpirationMs = int64(dur / time.Millisecond) + } + } + if dm.Access != nil { + var err error + ds.Access, err = accessListToBQ(dm.Access) + if err != nil { + return nil, err + } + if len(ds.Access) == 0 { + ds.NullFields = append(ds.NullFields, "Access") + } + } + labels, forces, nulls := dm.update() + ds.Labels = labels + ds.ForceSendFields = append(ds.ForceSendFields, forces...) + ds.NullFields = append(ds.NullFields, nulls...) + return ds, nil +} + +// Table creates a handle to a BigQuery table in the dataset. +// To determine if a table exists, call Table.Metadata. +// If the table does not already exist, use Table.Create to create it. +func (d *Dataset) Table(tableID string) *Table { + return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c} +} + +// Tables returns an iterator over the tables in the Dataset. +func (d *Dataset) Tables(ctx context.Context) *TableIterator { + it := &TableIterator{ + ctx: ctx, + dataset: d, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.tables) }, + func() interface{} { b := it.tables; it.tables = nil; return b }) + return it +} + +// A TableIterator is an iterator over Tables. +type TableIterator struct { + ctx context.Context + dataset *Dataset + tables []*Table + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *TableIterator) Next() (*Table, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + t := it.tables[0] + it.tables = it.tables[1:] + return t, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// for testing +var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) { + call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID). + PageToken(pageToken). + Context(it.ctx) + setClientHeader(call.Header()) + if pageSize > 0 { + call.MaxResults(int64(pageSize)) + } + var res *bq.TableList + err := runWithRetry(it.ctx, func() (err error) { + res, err = call.Do() + return err + }) + return res, err +} + +func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) { + res, err := listTables(it, pageSize, pageToken) + if err != nil { + return "", err + } + for _, t := range res.Tables { + it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c)) + } + return res.NextPageToken, nil +} + +func bqToTable(tr *bq.TableReference, c *Client) *Table { + return &Table{ + ProjectID: tr.ProjectId, + DatasetID: tr.DatasetId, + TableID: tr.TableId, + c: c, + } +} + +// Datasets returns an iterator over the datasets in a project. +// The Client's project is used by default, but that can be +// changed by setting ProjectID on the returned iterator before calling Next. +func (c *Client) Datasets(ctx context.Context) *DatasetIterator { + return c.DatasetsInProject(ctx, c.projectID) +} + +// DatasetsInProject returns an iterator over the datasets in the provided project. +// +// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator. +func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator { + it := &DatasetIterator{ + ctx: ctx, + c: c, + ProjectID: projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// DatasetIterator iterates over the datasets in a project. +type DatasetIterator struct { + // ListHidden causes hidden datasets to be listed when set to true. + // Set before the first call to Next. + ListHidden bool + + // Filter restricts the datasets returned by label. The filter syntax is described in + // https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels + // Set before the first call to Next. + Filter string + + // The project ID of the listed datasets. + // Set before the first call to Next. + ProjectID string + + ctx context.Context + c *Client + pageInfo *iterator.PageInfo + nextFunc func() error + items []*Dataset +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *DatasetIterator) Next() (*Dataset, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +// for testing +var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) { + call := it.c.bqs.Datasets.List(it.ProjectID). + Context(it.ctx). + PageToken(pageToken). + All(it.ListHidden) + setClientHeader(call.Header()) + if pageSize > 0 { + call.MaxResults(int64(pageSize)) + } + if it.Filter != "" { + call.Filter(it.Filter) + } + var res *bq.DatasetList + err := runWithRetry(it.ctx, func() (err error) { + res, err = call.Do() + return err + }) + return res, err +} + +func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) { + res, err := listDatasets(it, pageSize, pageToken) + if err != nil { + return "", err + } + for _, d := range res.Datasets { + it.items = append(it.items, &Dataset{ + ProjectID: d.DatasetReference.ProjectId, + DatasetID: d.DatasetReference.DatasetId, + c: it.c, + }) + } + return res.NextPageToken, nil +} + +// An AccessEntry describes the permissions that an entity has on a dataset. +type AccessEntry struct { + Role AccessRole // The role of the entity + EntityType EntityType // The type of entity + Entity string // The entity (individual or group) granted access + View *Table // The view granted access (EntityType must be ViewEntity) +} + +// AccessRole is the level of access to grant to a dataset. +type AccessRole string + +const ( + OwnerRole AccessRole = "OWNER" + ReaderRole AccessRole = "READER" + WriterRole AccessRole = "WRITER" +) + +// EntityType is the type of entity in an AccessEntry. +type EntityType int + +const ( + // A domain (e.g. "example.com") + DomainEntity EntityType = iota + 1 + + // Email address of a Google Group + GroupEmailEntity + + // Email address of an individual user. + UserEmailEntity + + // A special group: one of projectOwners, projectReaders, projectWriters or allAuthenticatedUsers. + SpecialGroupEntity + + // A BigQuery view. + ViewEntity +) + +func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) { + q := &bq.DatasetAccess{Role: string(e.Role)} + switch e.EntityType { + case DomainEntity: + q.Domain = e.Entity + case GroupEmailEntity: + q.GroupByEmail = e.Entity + case UserEmailEntity: + q.UserByEmail = e.Entity + case SpecialGroupEntity: + q.SpecialGroup = e.Entity + case ViewEntity: + q.View = e.View.toBQ() + default: + return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType) + } + return q, nil +} + +func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) { + e := &AccessEntry{Role: AccessRole(q.Role)} + switch { + case q.Domain != "": + e.Entity = q.Domain + e.EntityType = DomainEntity + case q.GroupByEmail != "": + e.Entity = q.GroupByEmail + e.EntityType = GroupEmailEntity + case q.UserByEmail != "": + e.Entity = q.UserByEmail + e.EntityType = UserEmailEntity + case q.SpecialGroup != "": + e.Entity = q.SpecialGroup + e.EntityType = SpecialGroupEntity + case q.View != nil: + e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId) + e.EntityType = ViewEntity + default: + return nil, errors.New("bigquery: invalid access value") + } + return e, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/dataset_test.go b/vendor/cloud.google.com/go/bigquery/dataset_test.go new file mode 100644 index 0000000..a434530 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/dataset_test.go @@ -0,0 +1,328 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "strconv" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" + itest "google.golang.org/api/iterator/testing" +) + +// readServiceStub services read requests by returning data from an in-memory list of values. +type listTablesStub struct { + expectedProject, expectedDataset string + tables []*bq.TableListTables +} + +func (s *listTablesStub) listTables(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) { + if it.dataset.ProjectID != s.expectedProject { + return nil, errors.New("wrong project id") + } + if it.dataset.DatasetID != s.expectedDataset { + return nil, errors.New("wrong dataset id") + } + const maxPageSize = 2 + if pageSize <= 0 || pageSize > maxPageSize { + pageSize = maxPageSize + } + start := 0 + if pageToken != "" { + var err error + start, err = strconv.Atoi(pageToken) + if err != nil { + return nil, err + } + } + end := start + pageSize + if end > len(s.tables) { + end = len(s.tables) + } + nextPageToken := "" + if end < len(s.tables) { + nextPageToken = strconv.Itoa(end) + } + return &bq.TableList{ + Tables: s.tables[start:end], + NextPageToken: nextPageToken, + }, nil +} + +func TestTables(t *testing.T) { + c := &Client{projectID: "p1"} + inTables := []*bq.TableListTables{ + {TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t1"}}, + {TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t2"}}, + {TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t3"}}, + } + outTables := []*Table{ + {ProjectID: "p1", DatasetID: "d1", TableID: "t1", c: c}, + {ProjectID: "p1", DatasetID: "d1", TableID: "t2", c: c}, + {ProjectID: "p1", DatasetID: "d1", TableID: "t3", c: c}, + } + + lts := &listTablesStub{ + expectedProject: "p1", + expectedDataset: "d1", + tables: inTables, + } + old := listTables + listTables = lts.listTables // cannot use t.Parallel with this test + defer func() { listTables = old }() + + msg, ok := itest.TestIterator(outTables, + func() interface{} { return c.Dataset("d1").Tables(context.Background()) }, + func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() }) + if !ok { + t.Error(msg) + } +} + +type listDatasetsStub struct { + expectedProject string + datasets []*bq.DatasetListDatasets + hidden map[*bq.DatasetListDatasets]bool +} + +func (s *listDatasetsStub) listDatasets(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) { + const maxPageSize = 2 + if pageSize <= 0 || pageSize > maxPageSize { + pageSize = maxPageSize + } + if it.Filter != "" { + return nil, errors.New("filter not supported") + } + if it.ProjectID != s.expectedProject { + return nil, errors.New("bad project ID") + } + start := 0 + if pageToken != "" { + var err error + start, err = strconv.Atoi(pageToken) + if err != nil { + return nil, err + } + } + var ( + i int + result []*bq.DatasetListDatasets + nextPageToken string + ) + for i = start; len(result) < pageSize && i < len(s.datasets); i++ { + if s.hidden[s.datasets[i]] && !it.ListHidden { + continue + } + result = append(result, s.datasets[i]) + } + if i < len(s.datasets) { + nextPageToken = strconv.Itoa(i) + } + return &bq.DatasetList{ + Datasets: result, + NextPageToken: nextPageToken, + }, nil +} + +func TestDatasets(t *testing.T) { + client := &Client{projectID: "p"} + inDatasets := []*bq.DatasetListDatasets{ + {DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "a"}}, + {DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "b"}}, + {DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "hidden"}}, + {DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "c"}}, + } + outDatasets := []*Dataset{ + {"p", "a", client}, + {"p", "b", client}, + {"p", "hidden", client}, + {"p", "c", client}, + } + lds := &listDatasetsStub{ + expectedProject: "p", + datasets: inDatasets, + hidden: map[*bq.DatasetListDatasets]bool{inDatasets[2]: true}, + } + old := listDatasets + listDatasets = lds.listDatasets // cannot use t.Parallel with this test + defer func() { listDatasets = old }() + + msg, ok := itest.TestIterator(outDatasets, + func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = true; return it }, + func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) + if !ok { + t.Fatalf("ListHidden=true: %s", msg) + } + + msg, ok = itest.TestIterator([]*Dataset{outDatasets[0], outDatasets[1], outDatasets[3]}, + func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = false; return it }, + func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) + if !ok { + t.Fatalf("ListHidden=false: %s", msg) + } +} + +func TestDatasetToBQ(t *testing.T) { + for _, test := range []struct { + in *DatasetMetadata + want *bq.Dataset + }{ + {nil, &bq.Dataset{}}, + {&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}}, + {&DatasetMetadata{ + Name: "name", + Description: "desc", + DefaultTableExpiration: time.Hour, + Location: "EU", + Labels: map[string]string{"x": "y"}, + Access: []*AccessEntry{{Role: OwnerRole, Entity: "example.com", EntityType: DomainEntity}}, + }, &bq.Dataset{ + FriendlyName: "name", + Description: "desc", + DefaultTableExpirationMs: 60 * 60 * 1000, + Location: "EU", + Labels: map[string]string{"x": "y"}, + Access: []*bq.DatasetAccess{{Role: "OWNER", Domain: "example.com"}}, + }}, + } { + got, err := test.in.toBQ() + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, test.want) { + t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want) + } + } + + // Check that non-writeable fields are unset. + aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) + for _, dm := range []*DatasetMetadata{ + {CreationTime: aTime}, + {LastModifiedTime: aTime}, + {FullID: "x"}, + {ETag: "e"}, + } { + if _, err := dm.toBQ(); err == nil { + t.Errorf("%+v: got nil, want error", dm) + } + } +} + +func TestBQToDatasetMetadata(t *testing.T) { + cTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) + cMillis := cTime.UnixNano() / 1e6 + mTime := time.Date(2017, 10, 31, 0, 0, 0, 0, time.Local) + mMillis := mTime.UnixNano() / 1e6 + q := &bq.Dataset{ + CreationTime: cMillis, + LastModifiedTime: mMillis, + FriendlyName: "name", + Description: "desc", + DefaultTableExpirationMs: 60 * 60 * 1000, + Location: "EU", + Labels: map[string]string{"x": "y"}, + Access: []*bq.DatasetAccess{ + {Role: "READER", UserByEmail: "joe@example.com"}, + {Role: "WRITER", GroupByEmail: "users@example.com"}, + }, + Etag: "etag", + } + want := &DatasetMetadata{ + CreationTime: cTime, + LastModifiedTime: mTime, + Name: "name", + Description: "desc", + DefaultTableExpiration: time.Hour, + Location: "EU", + Labels: map[string]string{"x": "y"}, + Access: []*AccessEntry{ + {Role: ReaderRole, Entity: "joe@example.com", EntityType: UserEmailEntity}, + {Role: WriterRole, Entity: "users@example.com", EntityType: GroupEmailEntity}, + }, + ETag: "etag", + } + got, err := bqToDatasetMetadata(q) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("-got, +want:\n%s", diff) + } +} + +func TestDatasetMetadataToUpdateToBQ(t *testing.T) { + dm := DatasetMetadataToUpdate{ + Description: "desc", + Name: "name", + DefaultTableExpiration: time.Hour, + } + dm.SetLabel("label", "value") + dm.DeleteLabel("del") + + got, err := dm.toBQ() + if err != nil { + t.Fatal(err) + } + want := &bq.Dataset{ + Description: "desc", + FriendlyName: "name", + DefaultTableExpirationMs: 60 * 60 * 1000, + Labels: map[string]string{"label": "value"}, + ForceSendFields: []string{"Description", "FriendlyName"}, + NullFields: []string{"Labels.del"}, + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("-got, +want:\n%s", diff) + } +} + +func TestConvertAccessEntry(t *testing.T) { + c := &Client{projectID: "pid"} + for _, e := range []*AccessEntry{ + {Role: ReaderRole, Entity: "e", EntityType: DomainEntity}, + {Role: WriterRole, Entity: "e", EntityType: GroupEmailEntity}, + {Role: OwnerRole, Entity: "e", EntityType: UserEmailEntity}, + {Role: ReaderRole, Entity: "e", EntityType: SpecialGroupEntity}, + {Role: ReaderRole, EntityType: ViewEntity, + View: &Table{ProjectID: "p", DatasetID: "d", TableID: "t", c: c}}, + } { + q, err := e.toBQ() + if err != nil { + t.Fatal(err) + } + got, err := bqToAccessEntry(q, c) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, e, cmp.AllowUnexported(Table{}, Client{})); diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } + } + + e := &AccessEntry{Role: ReaderRole, Entity: "e"} + if _, err := e.toBQ(); err == nil { + t.Error("got nil, want error") + } + if _, err := bqToAccessEntry(&bq.DatasetAccess{Role: "WRITER"}, nil); err == nil { + t.Error("got nil, want error") + } +} diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go new file mode 100644 index 0000000..11eb831 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go @@ -0,0 +1,689 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package datatransfer + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + GetDataSource []gax.CallOption + ListDataSources []gax.CallOption + CreateTransferConfig []gax.CallOption + UpdateTransferConfig []gax.CallOption + DeleteTransferConfig []gax.CallOption + GetTransferConfig []gax.CallOption + ListTransferConfigs []gax.CallOption + ScheduleTransferRuns []gax.CallOption + GetTransferRun []gax.CallOption + DeleteTransferRun []gax.CallOption + ListTransferRuns []gax.CallOption + ListTransferLogs []gax.CallOption + CheckValidCreds []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + GetDataSource: retry[[2]string{"default", "idempotent"}], + ListDataSources: retry[[2]string{"default", "idempotent"}], + CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}], + UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}], + DeleteTransferConfig: retry[[2]string{"default", "idempotent"}], + GetTransferConfig: retry[[2]string{"default", "idempotent"}], + ListTransferConfigs: retry[[2]string{"default", "idempotent"}], + ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}], + GetTransferRun: retry[[2]string{"default", "idempotent"}], + DeleteTransferRun: retry[[2]string{"default", "idempotent"}], + ListTransferRuns: retry[[2]string{"default", "idempotent"}], + ListTransferLogs: retry[[2]string{"default", "idempotent"}], + CheckValidCreds: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with BigQuery Data Transfer API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client datatransferpb.DataTransferServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new data transfer service client. +// +// The Google BigQuery Data Transfer Service API enables BigQuery users to +// configure the transfer of their data from other Google Products into BigQuery. +// This service contains methods that are end user exposed. It backs up the +// frontend. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: datatransferpb.NewDataTransferServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ProjectPath returns the path for the project resource. +func ProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// LocationPath returns the path for the location resource. +func LocationPath(project, location string) string { + return "" + + "projects/" + + project + + "/locations/" + + location + + "" +} + +// LocationDataSourcePath returns the path for the location data source resource. +func LocationDataSourcePath(project, location, dataSource string) string { + return "" + + "projects/" + + project + + "/locations/" + + location + + "/dataSources/" + + dataSource + + "" +} + +// LocationTransferConfigPath returns the path for the location transfer config resource. +func LocationTransferConfigPath(project, location, transferConfig string) string { + return "" + + "projects/" + + project + + "/locations/" + + location + + "/transferConfigs/" + + transferConfig + + "" +} + +// LocationRunPath returns the path for the location run resource. +func LocationRunPath(project, location, transferConfig, run string) string { + return "" + + "projects/" + + project + + "/locations/" + + location + + "/transferConfigs/" + + transferConfig + + "/runs/" + + run + + "" +} + +// DataSourcePath returns the path for the data source resource. +func DataSourcePath(project, dataSource string) string { + return "" + + "projects/" + + project + + "/dataSources/" + + dataSource + + "" +} + +// TransferConfigPath returns the path for the transfer config resource. +func TransferConfigPath(project, transferConfig string) string { + return "" + + "projects/" + + project + + "/transferConfigs/" + + transferConfig + + "" +} + +// RunPath returns the path for the run resource. +func RunPath(project, transferConfig, run string) string { + return "" + + "projects/" + + project + + "/transferConfigs/" + + transferConfig + + "/runs/" + + run + + "" +} + +// GetDataSource retrieves a supported data source and returns its settings, +// which can be used for UI rendering. +func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...) + var resp *datatransferpb.DataSource + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDataSources lists supported data sources and returns their settings, +// which can be used for UI rendering. +func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...) + it := &DataSourceIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) { + var resp *datatransferpb.ListDataSourcesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.DataSources, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateTransferConfig creates a new data transfer configuration. +func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...) + var resp *datatransferpb.TransferConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateTransferConfig updates a data transfer configuration. +// All fields must be set, even if they are not updated. +func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...) + var resp *datatransferpb.TransferConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteTransferConfig deletes a data transfer configuration, +// including any associated transfer runs and logs. +func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// GetTransferConfig returns information about a data transfer config. +func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...) + var resp *datatransferpb.TransferConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListTransferConfigs returns information about all data transfers in the project. +func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...) + it := &TransferConfigIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) { + var resp *datatransferpb.ListTransferConfigsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.TransferConfigs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ScheduleTransferRuns creates transfer runs for a time range [range_start_time, range_end_time]. +// For each date - or whatever granularity the data source supports - in the +// range, one transfer run is created. +// Note that runs are created per UTC time in the time range. +func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...) + var resp *datatransferpb.ScheduleTransferRunsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetTransferRun returns information about the particular transfer run. +func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...) + var resp *datatransferpb.TransferRun + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteTransferRun deletes the specified transfer run. +func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListTransferRuns returns information about running and completed jobs. +func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...) + it := &TransferRunIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) { + var resp *datatransferpb.ListTransferRunsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.TransferRuns, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListTransferLogs returns user facing log messages for the data transfer run. +func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...) + it := &TransferMessageIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) { + var resp *datatransferpb.ListTransferLogsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.TransferMessages, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CheckValidCreds returns true if valid credentials exist for the given data source and +// requesting user. +// Some data sources doesn't support service account, so we need to talk to +// them on behalf of the end user. This API just checks whether we have OAuth +// token for the particular user, which is a pre-requisite before user can +// create a transfer config. +func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...) + var resp *datatransferpb.CheckValidCredsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DataSourceIterator manages a stream of *datatransferpb.DataSource. +type DataSourceIterator struct { + items []*datatransferpb.DataSource + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DataSourceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) { + var item *datatransferpb.DataSource + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DataSourceIterator) bufLen() int { + return len(it.items) +} + +func (it *DataSourceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig. +type TransferConfigIterator struct { + items []*datatransferpb.TransferConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) { + var item *datatransferpb.TransferConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TransferConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *TransferConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage. +type TransferMessageIterator struct { + items []*datatransferpb.TransferMessage + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) { + var item *datatransferpb.TransferMessage + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TransferMessageIterator) bufLen() int { + return len(it.items) +} + +func (it *TransferMessageIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TransferRunIterator manages a stream of *datatransferpb.TransferRun. +type TransferRunIterator struct { + items []*datatransferpb.TransferRun + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TransferRunIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) { + var item *datatransferpb.TransferRun + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TransferRunIterator) bufLen() int { + return len(it.items) +} + +func (it *TransferRunIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go new file mode 100644 index 0000000..fcd1806 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go @@ -0,0 +1,288 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package datatransfer_test + +import ( + "cloud.google.com/go/bigquery/datatransfer/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_GetDataSource() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.GetDataSourceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDataSource(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListDataSources() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.ListDataSourcesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDataSources(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_CreateTransferConfig() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.CreateTransferConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateTransferConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateTransferConfig() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.UpdateTransferConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateTransferConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeleteTransferConfig() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.DeleteTransferConfigRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteTransferConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_GetTransferConfig() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.GetTransferConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetTransferConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListTransferConfigs() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.ListTransferConfigsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTransferConfigs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ScheduleTransferRuns() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.ScheduleTransferRunsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ScheduleTransferRuns(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetTransferRun() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.GetTransferRunRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetTransferRun(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeleteTransferRun() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.DeleteTransferRunRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteTransferRun(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_ListTransferRuns() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.ListTransferRunsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTransferRuns(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ListTransferLogs() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.ListTransferLogsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTransferLogs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_CheckValidCreds() { + ctx := context.Background() + c, err := datatransfer.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &datatransferpb.CheckValidCredsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CheckValidCreds(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go new file mode 100644 index 0000000..9225220 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go @@ -0,0 +1,49 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package datatransfer is an auto-generated package for the +// BigQuery Data Transfer API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Transfers data from partner SaaS applications to Google BigQuery on a +// scheduled, managed basis. +package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + } +} diff --git a/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go new file mode 100644 index 0000000..1c5776e --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go @@ -0,0 +1,1146 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package datatransfer + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" + field_maskpb "google.golang.org/genproto/protobuf/field_mask" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDataTransferServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + datatransferpb.DataTransferServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDataTransferServer) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest) (*datatransferpb.DataSource, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.DataSource), nil +} + +func (s *mockDataTransferServer) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest) (*datatransferpb.ListDataSourcesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.ListDataSourcesResponse), nil +} + +func (s *mockDataTransferServer) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest) (*datatransferpb.TransferConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.TransferConfig), nil +} + +func (s *mockDataTransferServer) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest) (*datatransferpb.TransferConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.TransferConfig), nil +} + +func (s *mockDataTransferServer) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDataTransferServer) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest) (*datatransferpb.TransferConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.TransferConfig), nil +} + +func (s *mockDataTransferServer) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest) (*datatransferpb.ListTransferConfigsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.ListTransferConfigsResponse), nil +} + +func (s *mockDataTransferServer) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest) (*datatransferpb.ScheduleTransferRunsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.ScheduleTransferRunsResponse), nil +} + +func (s *mockDataTransferServer) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest) (*datatransferpb.TransferRun, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.TransferRun), nil +} + +func (s *mockDataTransferServer) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDataTransferServer) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest) (*datatransferpb.ListTransferRunsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.ListTransferRunsResponse), nil +} + +func (s *mockDataTransferServer) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest) (*datatransferpb.ListTransferLogsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.ListTransferLogsResponse), nil +} + +func (s *mockDataTransferServer) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest) (*datatransferpb.CheckValidCredsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*datatransferpb.CheckValidCredsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDataTransfer mockDataTransferServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + datatransferpb.RegisterDataTransferServiceServer(serv, &mockDataTransfer) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDataTransferServiceGetDataSource(t *testing.T) { + var name2 string = "name2-1052831874" + var dataSourceId string = "dataSourceId-1015796374" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var clientId string = "clientId-1904089585" + var supportsMultipleTransfers bool = true + var updateDeadlineSeconds int32 = 991471694 + var defaultSchedule string = "defaultSchedule-800168235" + var supportsCustomSchedule bool = true + var helpUrl string = "helpUrl-789431439" + var defaultDataRefreshWindowDays int32 = -1804935157 + var manualRunsDisabled bool = true + var expectedResponse = &datatransferpb.DataSource{ + Name: name2, + DataSourceId: dataSourceId, + DisplayName: displayName, + Description: description, + ClientId: clientId, + SupportsMultipleTransfers: supportsMultipleTransfers, + UpdateDeadlineSeconds: updateDeadlineSeconds, + DefaultSchedule: defaultSchedule, + SupportsCustomSchedule: supportsCustomSchedule, + HelpUrl: helpUrl, + DefaultDataRefreshWindowDays: defaultDataRefreshWindowDays, + ManualRunsDisabled: manualRunsDisabled, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedName string = LocationDataSourcePath("[PROJECT]", "[LOCATION]", "[DATA_SOURCE]") + var request = &datatransferpb.GetDataSourceRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDataSource(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceGetDataSourceError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedName string = LocationDataSourcePath("[PROJECT]", "[LOCATION]", "[DATA_SOURCE]") + var request = &datatransferpb.GetDataSourceRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDataSource(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceListDataSources(t *testing.T) { + var nextPageToken string = "" + var dataSourcesElement *datatransferpb.DataSource = &datatransferpb.DataSource{} + var dataSources = []*datatransferpb.DataSource{dataSourcesElement} + var expectedResponse = &datatransferpb.ListDataSourcesResponse{ + NextPageToken: nextPageToken, + DataSources: dataSources, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedParent string = LocationPath("[PROJECT]", "[LOCATION]") + var request = &datatransferpb.ListDataSourcesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDataSources(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.DataSources[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceListDataSourcesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedParent string = LocationPath("[PROJECT]", "[LOCATION]") + var request = &datatransferpb.ListDataSourcesRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDataSources(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceCreateTransferConfig(t *testing.T) { + var name string = "name3373707" + var destinationDatasetId string = "destinationDatasetId1541564179" + var displayName string = "displayName1615086568" + var dataSourceId string = "dataSourceId-1015796374" + var schedule string = "schedule-697920873" + var dataRefreshWindowDays int32 = 327632845 + var disabled bool = true + var userId int64 = -147132913 + var datasetRegion string = "datasetRegion959248539" + var expectedResponse = &datatransferpb.TransferConfig{ + Name: name, + DestinationDatasetId: destinationDatasetId, + DisplayName: displayName, + DataSourceId: dataSourceId, + Schedule: schedule, + DataRefreshWindowDays: dataRefreshWindowDays, + Disabled: disabled, + UserId: userId, + DatasetRegion: datasetRegion, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedParent string = LocationPath("[PROJECT]", "[LOCATION]") + var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{} + var request = &datatransferpb.CreateTransferConfigRequest{ + Parent: formattedParent, + TransferConfig: transferConfig, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateTransferConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceCreateTransferConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedParent string = LocationPath("[PROJECT]", "[LOCATION]") + var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{} + var request = &datatransferpb.CreateTransferConfigRequest{ + Parent: formattedParent, + TransferConfig: transferConfig, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateTransferConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceUpdateTransferConfig(t *testing.T) { + var name string = "name3373707" + var destinationDatasetId string = "destinationDatasetId1541564179" + var displayName string = "displayName1615086568" + var dataSourceId string = "dataSourceId-1015796374" + var schedule string = "schedule-697920873" + var dataRefreshWindowDays int32 = 327632845 + var disabled bool = true + var userId int64 = -147132913 + var datasetRegion string = "datasetRegion959248539" + var expectedResponse = &datatransferpb.TransferConfig{ + Name: name, + DestinationDatasetId: destinationDatasetId, + DisplayName: displayName, + DataSourceId: dataSourceId, + Schedule: schedule, + DataRefreshWindowDays: dataRefreshWindowDays, + Disabled: disabled, + UserId: userId, + DatasetRegion: datasetRegion, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &datatransferpb.UpdateTransferConfigRequest{ + TransferConfig: transferConfig, + UpdateMask: updateMask, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateTransferConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceUpdateTransferConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &datatransferpb.UpdateTransferConfigRequest{ + TransferConfig: transferConfig, + UpdateMask: updateMask, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateTransferConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceDeleteTransferConfig(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedName string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var request = &datatransferpb.DeleteTransferConfigRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTransferConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDataTransferServiceDeleteTransferConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedName string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var request = &datatransferpb.DeleteTransferConfigRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTransferConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDataTransferServiceGetTransferConfig(t *testing.T) { + var name2 string = "name2-1052831874" + var destinationDatasetId string = "destinationDatasetId1541564179" + var displayName string = "displayName1615086568" + var dataSourceId string = "dataSourceId-1015796374" + var schedule string = "schedule-697920873" + var dataRefreshWindowDays int32 = 327632845 + var disabled bool = true + var userId int64 = -147132913 + var datasetRegion string = "datasetRegion959248539" + var expectedResponse = &datatransferpb.TransferConfig{ + Name: name2, + DestinationDatasetId: destinationDatasetId, + DisplayName: displayName, + DataSourceId: dataSourceId, + Schedule: schedule, + DataRefreshWindowDays: dataRefreshWindowDays, + Disabled: disabled, + UserId: userId, + DatasetRegion: datasetRegion, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedName string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var request = &datatransferpb.GetTransferConfigRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTransferConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceGetTransferConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedName string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var request = &datatransferpb.GetTransferConfigRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTransferConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceListTransferConfigs(t *testing.T) { + var nextPageToken string = "" + var transferConfigsElement *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{} + var transferConfigs = []*datatransferpb.TransferConfig{transferConfigsElement} + var expectedResponse = &datatransferpb.ListTransferConfigsResponse{ + NextPageToken: nextPageToken, + TransferConfigs: transferConfigs, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedParent string = LocationPath("[PROJECT]", "[LOCATION]") + var request = &datatransferpb.ListTransferConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTransferConfigs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.TransferConfigs[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceListTransferConfigsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedParent string = LocationPath("[PROJECT]", "[LOCATION]") + var request = &datatransferpb.ListTransferConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTransferConfigs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceScheduleTransferRuns(t *testing.T) { + var expectedResponse *datatransferpb.ScheduleTransferRunsResponse = &datatransferpb.ScheduleTransferRunsResponse{} + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedParent string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var request = &datatransferpb.ScheduleTransferRunsRequest{ + Parent: formattedParent, + StartTime: startTime, + EndTime: endTime, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ScheduleTransferRuns(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceScheduleTransferRunsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedParent string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var request = &datatransferpb.ScheduleTransferRunsRequest{ + Parent: formattedParent, + StartTime: startTime, + EndTime: endTime, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ScheduleTransferRuns(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceGetTransferRun(t *testing.T) { + var name2 string = "name2-1052831874" + var destinationDatasetId string = "destinationDatasetId1541564179" + var dataSourceId string = "dataSourceId-1015796374" + var userId int64 = -147132913 + var schedule string = "schedule-697920873" + var expectedResponse = &datatransferpb.TransferRun{ + Name: name2, + DestinationDatasetId: destinationDatasetId, + DataSourceId: dataSourceId, + UserId: userId, + Schedule: schedule, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedName string = LocationRunPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]") + var request = &datatransferpb.GetTransferRunRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTransferRun(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceGetTransferRunError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedName string = LocationRunPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]") + var request = &datatransferpb.GetTransferRunRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTransferRun(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceDeleteTransferRun(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedName string = LocationRunPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]") + var request = &datatransferpb.DeleteTransferRunRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTransferRun(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDataTransferServiceDeleteTransferRunError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedName string = LocationRunPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]") + var request = &datatransferpb.DeleteTransferRunRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTransferRun(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDataTransferServiceListTransferRuns(t *testing.T) { + var nextPageToken string = "" + var transferRunsElement *datatransferpb.TransferRun = &datatransferpb.TransferRun{} + var transferRuns = []*datatransferpb.TransferRun{transferRunsElement} + var expectedResponse = &datatransferpb.ListTransferRunsResponse{ + NextPageToken: nextPageToken, + TransferRuns: transferRuns, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedParent string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var request = &datatransferpb.ListTransferRunsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTransferRuns(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.TransferRuns[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceListTransferRunsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedParent string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]") + var request = &datatransferpb.ListTransferRunsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTransferRuns(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceListTransferLogs(t *testing.T) { + var nextPageToken string = "" + var transferMessagesElement *datatransferpb.TransferMessage = &datatransferpb.TransferMessage{} + var transferMessages = []*datatransferpb.TransferMessage{transferMessagesElement} + var expectedResponse = &datatransferpb.ListTransferLogsResponse{ + NextPageToken: nextPageToken, + TransferMessages: transferMessages, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedParent string = LocationRunPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]") + var request = &datatransferpb.ListTransferLogsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTransferLogs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.TransferMessages[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceListTransferLogsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedParent string = LocationRunPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]") + var request = &datatransferpb.ListTransferLogsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTransferLogs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDataTransferServiceCheckValidCreds(t *testing.T) { + var hasValidCreds bool = false + var expectedResponse = &datatransferpb.CheckValidCredsResponse{ + HasValidCreds: hasValidCreds, + } + + mockDataTransfer.err = nil + mockDataTransfer.reqs = nil + + mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse) + + var formattedName string = LocationDataSourcePath("[PROJECT]", "[LOCATION]", "[DATA_SOURCE]") + var request = &datatransferpb.CheckValidCredsRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CheckValidCreds(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDataTransfer.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDataTransferServiceCheckValidCredsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDataTransfer.err = gstatus.Error(errCode, "test error") + + var formattedName string = LocationDataSourcePath("[PROJECT]", "[LOCATION]", "[DATA_SOURCE]") + var request = &datatransferpb.CheckValidCredsRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CheckValidCreds(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/bigquery/doc.go b/vendor/cloud.google.com/go/bigquery/doc.go new file mode 100644 index 0000000..3eb7b93 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/doc.go @@ -0,0 +1,297 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package bigquery provides a client for the BigQuery service. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + +The following assumes a basic familiarity with BigQuery concepts. +See https://cloud.google.com/bigquery/docs. + + +Creating a Client + +To start working with this package, create a client: + + ctx := context.Background() + client, err := bigquery.NewClient(ctx, projectID) + if err != nil { + // TODO: Handle error. + } + +Querying + +To query existing tables, create a Query and call its Read method: + + q := client.Query(` + SELECT year, SUM(number) as num + FROM [bigquery-public-data:usa_names.usa_1910_2013] + WHERE name = "William" + GROUP BY year + ORDER BY year + `) + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + +Then iterate through the resulting rows. You can store a row using +anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value. +A slice is simplest: + + for { + var values []bigquery.Value + err := it.Next(&values) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(values) + } + +You can also use a struct whose exported fields match the query: + + type Count struct { + Year int + Num int + } + for { + var c Count + err := it.Next(&c) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(c) + } + +You can also start the query running and get the results later. +Create the query as above, but call Run instead of Read. This returns a Job, +which represents an asychronous operation. + + job, err := q.Run(ctx) + if err != nil { + // TODO: Handle error. + } + +Get the job's ID, a printable string. You can save this string to retrieve +the results at a later time, even in another process. + + jobID := job.ID() + fmt.Printf("The job ID is %s\n", jobID) + +To retrieve the job's results from the ID, first look up the Job: + + job, err = client.JobFromID(ctx, jobID) + if err != nil { + // TODO: Handle error. + } + +Use the Job.Read method to obtain an iterator, and loop over the rows. +Query.Read is just a convenience method that combines Query.Run and Job.Read. + + it, err = job.Read(ctx) + if err != nil { + // TODO: Handle error. + } + // Proceed with iteration as above. + +Datasets and Tables + +You can refer to datasets in the client's project with the Dataset method, and +in other projects with the DatasetInProject method: + + myDataset := client.Dataset("my_dataset") + yourDataset := client.DatasetInProject("your-project-id", "your_dataset") + +These methods create references to datasets, not the datasets themselves. You can have +a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to +create a dataset from a reference: + + if err := myDataset.Create(ctx, nil); err != nil { + // TODO: Handle error. + } + +You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference +to an object in BigQuery that may or may not exist. + + table := myDataset.Table("my_table") + +You can create, delete and update the metadata of tables with methods on Table. +For instance, you could create a temporary table with: + + err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{ + ExpirationTime: time.Now().Add(1*time.Hour)}) + if err != nil { + // TODO: Handle error. + } + +We'll see how to create a table with a schema in the next section. + +Schemas + +There are two ways to construct schemas with this package. +You can build a schema by hand, like so: + + schema1 := bigquery.Schema{ + &bigquery.FieldSchema{Name: "Name", Required: true, Type: bigquery.StringFieldType}, + &bigquery.FieldSchema{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType}, + } + +Or you can infer the schema from a struct: + + type student struct { + Name string + Grades []int + } + schema2, err := bigquery.InferSchema(student{}) + if err != nil { + // TODO: Handle error. + } + // schema1 and schema2 are identical. + +Struct inference supports tags like those of the encoding/json package, +so you can change names, ignore fields, or mark a field as nullable (non-required): + + type student2 struct { + Name string `bigquery:"full_name"` + Grades []int + Secret string `bigquery:"-"` + Optional int `bigquery:",nullable" + } + schema3, err := bigquery.InferSchema(student2{}) + if err != nil { + // TODO: Handle error. + } + // schema3 has required fields "full_name", "Grade" and nullable field "Optional". + +Having constructed a schema, you can create a table with it like so: + + if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil { + // TODO: Handle error. + } + +Copying + +You can copy one or more tables to another table. Begin by constructing a Copier +describing the copy. Then set any desired copy options, and finally call Run to get a Job: + + copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src")) + copier.WriteDisposition = bigquery.WriteTruncate + job, err = copier.Run(ctx) + if err != nil { + // TODO: Handle error. + } + +You can chain the call to Run if you don't want to set options: + + job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx) + if err != nil { + // TODO: Handle error. + } + +You can wait for your job to complete: + + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + +Job.Wait polls with exponential backoff. You can also poll yourself, if you +wish: + + for { + status, err := job.Status(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Done() { + if status.Err() != nil { + log.Fatalf("Job failed with error %v", status.Err()) + } + break + } + time.Sleep(pollInterval) + } + +Loading and Uploading + +There are two ways to populate a table with this package: load the data from a Google Cloud Storage +object, or upload rows directly from your program. + +For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure +it as well, and call its Run method. + + gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") + gcsRef.AllowJaggedRows = true + loader := myDataset.Table("dest").LoaderFrom(gcsRef) + loader.CreateDisposition = bigquery.CreateNever + job, err = loader.Run(ctx) + // Poll the job for completion if desired, as above. + +To upload, first define a type that implements the ValueSaver interface, which has a single method named Save. +Then create an Uploader, and call its Put method with a slice of values. + + u := table.Uploader() + // Item implements the ValueSaver interface. + items := []*Item{ + {Name: "n1", Size: 32.6, Count: 7}, + {Name: "n2", Size: 4, Count: 2}, + {Name: "n3", Size: 101.5, Count: 1}, + } + if err := u.Put(ctx, items); err != nil { + // TODO: Handle error. + } + +You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type +to specify the schema and insert ID by hand, or just supply the struct or struct pointer +directly and the schema will be inferred: + + type Item2 struct { + Name string + Size float64 + Count int + } + // Item implements the ValueSaver interface. + items2 := []*Item2{ + {Name: "n1", Size: 32.6, Count: 7}, + {Name: "n2", Size: 4, Count: 2}, + {Name: "n3", Size: 101.5, Count: 1}, + } + if err := u.Put(ctx, items2); err != nil { + // TODO: Handle error. + } + +Extracting + +If you've been following so far, extracting data from a BigQuery table +into a Google Cloud Storage object will feel familiar. First create an +Extractor, then optionally configure it, and lastly call its Run method. + + extractor := table.ExtractorTo(gcsRef) + extractor.DisableHeader = true + job, err = extractor.Run(ctx) + // Poll the job for completion if desired, as above. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package bigquery // import "cloud.google.com/go/bigquery" diff --git a/vendor/cloud.google.com/go/bigquery/error.go b/vendor/cloud.google.com/go/bigquery/error.go new file mode 100644 index 0000000..b5abd15 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/error.go @@ -0,0 +1,82 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + bq "google.golang.org/api/bigquery/v2" +) + +// An Error contains detailed information about a failed bigquery operation. +type Error struct { + // Mirrors bq.ErrorProto, but drops DebugInfo + Location, Message, Reason string +} + +func (e Error) Error() string { + return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason) +} + +func bqToError(ep *bq.ErrorProto) *Error { + if ep == nil { + return nil + } + return &Error{ + Location: ep.Location, + Message: ep.Message, + Reason: ep.Reason, + } +} + +// A MultiError contains multiple related errors. +type MultiError []error + +func (m MultiError) Error() string { + switch len(m) { + case 0: + return "(0 errors)" + case 1: + return m[0].Error() + case 2: + return m[0].Error() + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1) +} + +// RowInsertionError contains all errors that occurred when attempting to insert a row. +type RowInsertionError struct { + InsertID string // The InsertID associated with the affected row. + RowIndex int // The 0-based index of the affected row in the batch of rows being inserted. + Errors MultiError +} + +func (e *RowInsertionError) Error() string { + errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s" + return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error()) +} + +// PutMultiError contains an error for each row which was not successfully inserted +// into a BigQuery table. +type PutMultiError []RowInsertionError + +func (pme PutMultiError) Error() string { + plural := "s" + if len(pme) == 1 { + plural = "" + } + + return fmt.Sprintf("%v row insertion%s failed", len(pme), plural) +} diff --git a/vendor/cloud.google.com/go/bigquery/error_test.go b/vendor/cloud.google.com/go/bigquery/error_test.go new file mode 100644 index 0000000..1745eec --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/error_test.go @@ -0,0 +1,110 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "strings" + "testing" + + "cloud.google.com/go/internal/testutil" + + bq "google.golang.org/api/bigquery/v2" +) + +func rowInsertionError(msg string) RowInsertionError { + return RowInsertionError{Errors: []error{errors.New(msg)}} +} + +func TestPutMultiErrorString(t *testing.T) { + testCases := []struct { + errs PutMultiError + want string + }{ + { + errs: PutMultiError{}, + want: "0 row insertions failed", + }, + { + errs: PutMultiError{rowInsertionError("a")}, + want: "1 row insertion failed", + }, + { + errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")}, + want: "2 row insertions failed", + }, + } + + for _, tc := range testCases { + if tc.errs.Error() != tc.want { + t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) + } + } +} + +func TestMultiErrorString(t *testing.T) { + testCases := []struct { + errs MultiError + want string + }{ + { + errs: MultiError{}, + want: "(0 errors)", + }, + { + errs: MultiError{errors.New("a")}, + want: "a", + }, + { + errs: MultiError{errors.New("a"), errors.New("b")}, + want: "a (and 1 other error)", + }, + { + errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")}, + want: "a (and 2 other errors)", + }, + } + + for _, tc := range testCases { + if tc.errs.Error() != tc.want { + t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) + } + } +} + +func TestErrorFromErrorProto(t *testing.T) { + for _, test := range []struct { + in *bq.ErrorProto + want *Error + }{ + {nil, nil}, + { + in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"}, + want: &Error{Location: "L", Message: "M", Reason: "R"}, + }, + } { + if got := bqToError(test.in); !testutil.Equal(got, test.want) { + t.Errorf("%v: got %v, want %v", test.in, got, test.want) + } + } +} + +func TestErrorString(t *testing.T) { + e := &Error{Location: "", Message: "", Reason: ""} + got := e.Error() + if !strings.Contains(got, "") || !strings.Contains(got, "") || !strings.Contains(got, "") { + t.Errorf(`got %q, expected to see "", "" and ""`, got) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/examples_test.go b/vendor/cloud.google.com/go/bigquery/examples_test.go new file mode 100644 index 0000000..7ec5155 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/examples_test.go @@ -0,0 +1,759 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery_test + +import ( + "fmt" + "os" + "time" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. +} + +func ExampleClient_Dataset() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + fmt.Println(ds) +} + +func ExampleClient_DatasetInProject() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.DatasetInProject("their-project-id", "their-dataset") + fmt.Println(ds) +} + +func ExampleClient_Datasets() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Datasets(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleClient_DatasetsInProject() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.DatasetsInProject(ctx, "their-project-id") + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func getJobID() string { return "" } + +func ExampleClient_JobFromID() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere. + job, err := client.JobFromID(ctx, jobID) + if err != nil { + // TODO: Handle error. + } + fmt.Println(job.LastStatus()) // Display the job's status. +} + +func ExampleClient_Jobs() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Jobs(ctx) + it.State = bigquery.Running // list only running jobs. + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleNewGCSReference() { + gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") + fmt.Println(gcsRef) +} + +func ExampleClient_Query() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + q.DefaultProjectID = "project-id" + // TODO: set other options on the Query. + // TODO: Call Query.Run or Query.Read. +} + +func ExampleClient_Query_parameters() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select num from t1 where name = @user") + q.Parameters = []bigquery.QueryParameter{ + {Name: "user", Value: "Elizabeth"}, + } + // TODO: set other options on the Query. + // TODO: Call Query.Run or Query.Read. +} + +func ExampleQuery_Read() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleRowIterator_Next() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + for { + var row []bigquery.Value + err := it.Next(&row) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(row) + } +} + +func ExampleRowIterator_Next_struct() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + type score struct { + Name string + Num int + } + + q := client.Query("select name, num from t1") + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + for { + var s score + err := it.Next(&s) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(s) + } +} + +func ExampleJob_Read() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + // Call Query.Run to get a Job, then call Read on the job. + // Note: Query.Read is a shorthand for this. + job, err := q.Run(ctx) + if err != nil { + // TODO: Handle error. + } + it, err := job.Read(ctx) + if err != nil { + // TODO: Handle error. + } + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleJob_Wait() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleJob_Config() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx) + if err != nil { + // TODO: Handle error. + } + jc, err := job.Config() + if err != nil { + // TODO: Handle error. + } + copyConfig := jc.(*bigquery.CopyConfig) + fmt.Println(copyConfig.Dst, copyConfig.CreateDisposition) +} + +func ExampleDataset_Create() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + if err := ds.Create(ctx, &bigquery.DatasetMetadata{Location: "EU"}); err != nil { + // TODO: Handle error. + } +} + +func ExampleDataset_Delete() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + if err := client.Dataset("my_dataset").Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleDataset_Metadata() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + md, err := client.Dataset("my_dataset").Metadata(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(md) +} + +// This example illustrates how to perform a read-modify-write sequence on dataset +// metadata. Passing the metadata's ETag to the Update call ensures that the call +// will fail if the metadata was changed since the read. +func ExampleDataset_Update_readModifyWrite() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + md, err := ds.Metadata(ctx) + if err != nil { + // TODO: Handle error. + } + md2, err := ds.Update(ctx, + bigquery.DatasetMetadataToUpdate{Name: "new " + md.Name}, + md.ETag) + if err != nil { + // TODO: Handle error. + } + fmt.Println(md2) +} + +// To perform a blind write, ignoring the existing state (and possibly overwriting +// other updates), pass the empty string as the etag. +func ExampleDataset_Update_blindWrite() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + md, err := client.Dataset("my_dataset").Update(ctx, bigquery.DatasetMetadataToUpdate{Name: "blind"}, "") + if err != nil { + // TODO: Handle error. + } + fmt.Println(md) +} + +func ExampleDataset_Table() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // Table creates a reference to the table. It does not create the actual + // table in BigQuery; to do so, use Table.Create. + t := client.Dataset("my_dataset").Table("my_table") + fmt.Println(t) +} + +func ExampleDataset_Tables() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Dataset("my_dataset").Tables(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleDatasetIterator_Next() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Datasets(ctx) + for { + ds, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(ds) + } +} + +func ExampleInferSchema() { + type Item struct { + Name string + Size float64 + Count int + } + schema, err := bigquery.InferSchema(Item{}) + if err != nil { + fmt.Println(err) + // TODO: Handle error. + } + for _, fs := range schema { + fmt.Println(fs.Name, fs.Type) + } + // Output: + // Name STRING + // Size FLOAT + // Count INTEGER +} + +func ExampleInferSchema_tags() { + type Item struct { + Name string + Size float64 + Count int `bigquery:"number"` + Secret []byte `bigquery:"-"` + Optional bool `bigquery:",nullable"` + } + schema, err := bigquery.InferSchema(Item{}) + if err != nil { + fmt.Println(err) + // TODO: Handle error. + } + for _, fs := range schema { + fmt.Println(fs.Name, fs.Type, fs.Required) + } + // Output: + // Name STRING true + // Size FLOAT true + // number INTEGER true + // Optional BOOLEAN false +} + +func ExampleTable_Create() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("new-table") + if err := t.Create(ctx, nil); err != nil { + // TODO: Handle error. + } +} + +// Initialize a new table by passing TableMetadata to Table.Create. +func ExampleTable_Create_initialize() { + ctx := context.Background() + // Infer table schema from a Go type. + schema, err := bigquery.InferSchema(Item{}) + if err != nil { + // TODO: Handle error. + } + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("new-table") + if err := t.Create(ctx, + &bigquery.TableMetadata{ + Name: "My New Table", + Schema: schema, + ExpirationTime: time.Now().Add(24 * time.Hour), + }); err != nil { + // TODO: Handle error. + } +} + +func ExampleTable_Delete() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleTable_Metadata() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(md) +} + +func ExampleTable_Uploader() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + _ = u // TODO: Use u. +} + +func ExampleTable_Uploader_options() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + u.SkipInvalidRows = true + u.IgnoreUnknownValues = true + _ = u // TODO: Use u. +} + +func ExampleTable_CopierFrom() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2")) + c.WriteDisposition = bigquery.WriteTruncate + // TODO: set other options on the Copier. + job, err := c.Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleTable_ExtractorTo() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") + gcsRef.FieldDelimiter = ":" + // TODO: set other options on the GCSReference. + ds := client.Dataset("my_dataset") + extractor := ds.Table("my_table").ExtractorTo(gcsRef) + extractor.DisableHeader = true + // TODO: set other options on the Extractor. + job, err := extractor.Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleTable_LoaderFrom() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") + gcsRef.AllowJaggedRows = true + gcsRef.MaxBadRecords = 5 + gcsRef.Schema = schema + // TODO: set other options on the GCSReference. + ds := client.Dataset("my_dataset") + loader := ds.Table("my_table").LoaderFrom(gcsRef) + loader.CreateDisposition = bigquery.CreateNever + // TODO: set other options on the Loader. + job, err := loader.Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleTable_LoaderFrom_reader() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + f, err := os.Open("data.csv") + if err != nil { + // TODO: Handle error. + } + rs := bigquery.NewReaderSource(f) + rs.AllowJaggedRows = true + rs.MaxBadRecords = 5 + rs.Schema = schema + // TODO: set other options on the GCSReference. + ds := client.Dataset("my_dataset") + loader := ds.Table("my_table").LoaderFrom(rs) + loader.CreateDisposition = bigquery.CreateNever + // TODO: set other options on the Loader. + job, err := loader.Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleTable_Read() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Dataset("my_dataset").Table("my_table").Read(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +// This example illustrates how to perform a read-modify-write sequence on table +// metadata. Passing the metadata's ETag to the Update call ensures that the call +// will fail if the metadata was changed since the read. +func ExampleTable_Update_readModifyWrite() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("my_table") + md, err := t.Metadata(ctx) + if err != nil { + // TODO: Handle error. + } + md2, err := t.Update(ctx, + bigquery.TableMetadataToUpdate{Name: "new " + md.Name}, + md.ETag) + if err != nil { + // TODO: Handle error. + } + fmt.Println(md2) +} + +// To perform a blind write, ignoring the existing state (and possibly overwriting +// other updates), pass the empty string as the etag. +func ExampleTable_Update_blindWrite() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("my_table") + tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{ + Description: "my favorite table", + }, "") + if err != nil { + // TODO: Handle error. + } + fmt.Println(tm) +} + +func ExampleTableIterator_Next() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Dataset("my_dataset").Tables(ctx) + for { + t, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(t) + } +} + +type Item struct { + Name string + Size float64 + Count int +} + +// Save implements the ValueSaver interface. +func (i *Item) Save() (map[string]bigquery.Value, string, error) { + return map[string]bigquery.Value{ + "Name": i.Name, + "Size": i.Size, + "Count": i.Count, + }, "", nil +} + +func ExampleUploader_Put() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + // Item implements the ValueSaver interface. + items := []*Item{ + {Name: "n1", Size: 32.6, Count: 7}, + {Name: "n2", Size: 4, Count: 2}, + {Name: "n3", Size: 101.5, Count: 1}, + } + if err := u.Put(ctx, items); err != nil { + // TODO: Handle error. + } +} + +var schema bigquery.Schema + +func ExampleUploader_Put_structSaver() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + + type score struct { + Name string + Num int + } + + // Assume schema holds the table's schema. + savers := []*bigquery.StructSaver{ + {Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"}, + {Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"}, + {Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"}, + } + if err := u.Put(ctx, savers); err != nil { + // TODO: Handle error. + } +} + +func ExampleUploader_Put_struct() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + + type score struct { + Name string + Num int + } + scores := []score{ + {Name: "n1", Num: 12}, + {Name: "n2", Num: 31}, + {Name: "n3", Num: 7}, + } + // Schema is inferred from the score type. + if err := u.Put(ctx, scores); err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/bigquery/external.go b/vendor/cloud.google.com/go/bigquery/external.go new file mode 100644 index 0000000..36eb9d9 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/external.go @@ -0,0 +1,398 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "encoding/base64" + "unicode/utf8" + + bq "google.golang.org/api/bigquery/v2" +) + +// DataFormat describes the format of BigQuery table data. +type DataFormat string + +// Constants describing the format of BigQuery table data. +const ( + CSV DataFormat = "CSV" + Avro DataFormat = "AVRO" + JSON DataFormat = "NEWLINE_DELIMITED_JSON" + DatastoreBackup DataFormat = "DATASTORE_BACKUP" + GoogleSheets DataFormat = "GOOGLE_SHEETS" + Bigtable DataFormat = "BIGTABLE" +) + +// ExternalData is a table which is stored outside of BigQuery. It is implemented by +// *ExternalDataConfig. +// GCSReference also implements it, for backwards compatibility. +type ExternalData interface { + toBQ() bq.ExternalDataConfiguration +} + +// ExternalDataConfig describes data external to BigQuery that can be used +// in queries and to create external tables. +type ExternalDataConfig struct { + // The format of the data. Required. + SourceFormat DataFormat + + // The fully-qualified URIs that point to your + // data in Google Cloud. Required. + // + // For Google Cloud Storage URIs, each URI can contain one '*' wildcard character + // and it must come after the 'bucket' name. Size limits related to load jobs + // apply to external data sources. + // + // For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be + // a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. + // + // For Google Cloud Datastore backups, exactly one URI can be specified. Also, + // the '*' wildcard character is not allowed. + SourceURIs []string + + // The schema of the data. Required for CSV and JSON; disallowed for the + // other formats. + Schema Schema + + // Try to detect schema and format options automatically. + // Any option specified explicitly will be honored. + AutoDetect bool + + // The compression type of the data. + Compression Compression + + // IgnoreUnknownValues causes values not matching the schema to be + // tolerated. Unknown values are ignored. For CSV this ignores extra values + // at the end of a line. For JSON this ignores named values that do not + // match any column name. If this field is not set, records containing + // unknown values are treated as bad records. The MaxBadRecords field can + // be used to customize how bad records are handled. + IgnoreUnknownValues bool + + // MaxBadRecords is the maximum number of bad records that will be ignored + // when reading data. + MaxBadRecords int64 + + // Additional options for CSV, GoogleSheets and Bigtable formats. + Options ExternalDataConfigOptions +} + +func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration { + q := bq.ExternalDataConfiguration{ + SourceFormat: string(e.SourceFormat), + SourceUris: e.SourceURIs, + Autodetect: e.AutoDetect, + Compression: string(e.Compression), + IgnoreUnknownValues: e.IgnoreUnknownValues, + MaxBadRecords: e.MaxBadRecords, + } + if e.Schema != nil { + q.Schema = e.Schema.toBQ() + } + if e.Options != nil { + e.Options.populateExternalDataConfig(&q) + } + return q +} + +func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) { + e := &ExternalDataConfig{ + SourceFormat: DataFormat(q.SourceFormat), + SourceURIs: q.SourceUris, + AutoDetect: q.Autodetect, + Compression: Compression(q.Compression), + IgnoreUnknownValues: q.IgnoreUnknownValues, + MaxBadRecords: q.MaxBadRecords, + Schema: bqToSchema(q.Schema), + } + switch { + case q.CsvOptions != nil: + e.Options = bqToCSVOptions(q.CsvOptions) + case q.GoogleSheetsOptions != nil: + e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions) + case q.BigtableOptions != nil: + var err error + e.Options, err = bqToBigtableOptions(q.BigtableOptions) + if err != nil { + return nil, err + } + } + return e, nil +} + +// ExternalDataConfigOptions are additional options for external data configurations. +// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions. +type ExternalDataConfigOptions interface { + populateExternalDataConfig(*bq.ExternalDataConfiguration) +} + +// CSVOptions are additional options for CSV external data sources. +type CSVOptions struct { + // AllowJaggedRows causes missing trailing optional columns to be tolerated + // when reading CSV data. Missing values are treated as nulls. + AllowJaggedRows bool + + // AllowQuotedNewlines sets whether quoted data sections containing + // newlines are allowed when reading CSV data. + AllowQuotedNewlines bool + + // Encoding is the character encoding of data to be read. + Encoding Encoding + + // FieldDelimiter is the separator for fields in a CSV file, used when + // reading or exporting data. The default is ",". + FieldDelimiter string + + // Quote is the value used to quote data sections in a CSV file. The + // default quotation character is the double quote ("), which is used if + // both Quote and ForceZeroQuote are unset. + // To specify that no character should be interpreted as a quotation + // character, set ForceZeroQuote to true. + // Only used when reading data. + Quote string + ForceZeroQuote bool + + // The number of rows at the top of a CSV file that BigQuery will skip when + // reading data. + SkipLeadingRows int64 +} + +func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) { + c.CsvOptions = &bq.CsvOptions{ + AllowJaggedRows: o.AllowJaggedRows, + AllowQuotedNewlines: o.AllowQuotedNewlines, + Encoding: string(o.Encoding), + FieldDelimiter: o.FieldDelimiter, + Quote: o.quote(), + SkipLeadingRows: o.SkipLeadingRows, + } +} + +// quote returns the CSV quote character, or nil if unset. +func (o *CSVOptions) quote() *string { + if o.ForceZeroQuote { + quote := "" + return "e + } + if o.Quote == "" { + return nil + } + return &o.Quote +} + +func (o *CSVOptions) setQuote(ps *string) { + if ps != nil { + o.Quote = *ps + if o.Quote == "" { + o.ForceZeroQuote = true + } + } +} + +func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions { + o := &CSVOptions{ + AllowJaggedRows: q.AllowJaggedRows, + AllowQuotedNewlines: q.AllowQuotedNewlines, + Encoding: Encoding(q.Encoding), + FieldDelimiter: q.FieldDelimiter, + SkipLeadingRows: q.SkipLeadingRows, + } + o.setQuote(q.Quote) + return o +} + +// GoogleSheetsOptions are additional options for GoogleSheets external data sources. +type GoogleSheetsOptions struct { + // The number of rows at the top of a sheet that BigQuery will skip when + // reading data. + SkipLeadingRows int64 +} + +func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) { + c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{ + SkipLeadingRows: o.SkipLeadingRows, + } +} + +func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions { + return &GoogleSheetsOptions{ + SkipLeadingRows: q.SkipLeadingRows, + } +} + +// BigtableOptions are additional options for Bigtable external data sources. +type BigtableOptions struct { + // A list of column families to expose in the table schema along with their + // types. If omitted, all column families are present in the table schema and + // their values are read as BYTES. + ColumnFamilies []*BigtableColumnFamily + + // If true, then the column families that are not specified in columnFamilies + // list are not exposed in the table schema. Otherwise, they are read with BYTES + // type values. The default is false. + IgnoreUnspecifiedColumnFamilies bool + + // If true, then the rowkey column families will be read and converted to string. + // Otherwise they are read with BYTES type values and users need to manually cast + // them with CAST if necessary. The default is false. + ReadRowkeyAsString bool +} + +func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) { + q := &bq.BigtableOptions{ + IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies, + ReadRowkeyAsString: o.ReadRowkeyAsString, + } + for _, f := range o.ColumnFamilies { + q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ()) + } + c.BigtableOptions = q +} + +func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) { + b := &BigtableOptions{ + IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies, + ReadRowkeyAsString: q.ReadRowkeyAsString, + } + for _, f := range q.ColumnFamilies { + f2, err := bqToBigtableColumnFamily(f) + if err != nil { + return nil, err + } + b.ColumnFamilies = append(b.ColumnFamilies, f2) + } + return b, nil +} + +// BigtableColumnFamily describes how BigQuery should access a Bigtable column family. +type BigtableColumnFamily struct { + // Identifier of the column family. + FamilyID string + + // Lists of columns that should be exposed as individual fields as opposed to a + // list of (column name, value) pairs. All columns whose qualifier matches a + // qualifier in this list can be accessed as .. Other columns can be accessed as + // a list through .Column field. + Columns []*BigtableColumn + + // The encoding of the values when the type is not STRING. Acceptable encoding values are: + // - TEXT - indicates values are alphanumeric text strings. + // - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. + // This can be overridden for a specific column by listing that column in 'columns' and + // specifying an encoding for it. + Encoding string + + // If true, only the latest version of values are exposed for all columns in this + // column family. This can be overridden for a specific column by listing that + // column in 'columns' and specifying a different setting for that column. + OnlyReadLatest bool + + // The type to convert the value in cells of this + // column family. The values are expected to be encoded using HBase + // Bytes.toBytes function when using the BINARY encoding value. + // Following BigQuery types are allowed (case-sensitive): + // BYTES STRING INTEGER FLOAT BOOLEAN. + // The default type is BYTES. This can be overridden for a specific column by + // listing that column in 'columns' and specifying a type for it. + Type string +} + +func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily { + q := &bq.BigtableColumnFamily{ + FamilyId: b.FamilyID, + Encoding: b.Encoding, + OnlyReadLatest: b.OnlyReadLatest, + Type: b.Type, + } + for _, col := range b.Columns { + q.Columns = append(q.Columns, col.toBQ()) + } + return q +} + +func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) { + b := &BigtableColumnFamily{ + FamilyID: q.FamilyId, + Encoding: q.Encoding, + OnlyReadLatest: q.OnlyReadLatest, + Type: q.Type, + } + for _, col := range q.Columns { + c, err := bqToBigtableColumn(col) + if err != nil { + return nil, err + } + b.Columns = append(b.Columns, c) + } + return b, nil +} + +// BigtableColumn describes how BigQuery should access a Bigtable column. +type BigtableColumn struct { + // Qualifier of the column. Columns in the parent column family that have this + // exact qualifier are exposed as . field. The column field name is the + // same as the column qualifier. + Qualifier string + + // If the qualifier is not a valid BigQuery field identifier i.e. does not match + // [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field + // name and is used as field name in queries. + FieldName string + + // If true, only the latest version of values are exposed for this column. + // See BigtableColumnFamily.OnlyReadLatest. + OnlyReadLatest bool + + // The encoding of the values when the type is not STRING. + // See BigtableColumnFamily.Encoding + Encoding string + + // The type to convert the value in cells of this column. + // See BigtableColumnFamily.Type + Type string +} + +func (b *BigtableColumn) toBQ() *bq.BigtableColumn { + q := &bq.BigtableColumn{ + FieldName: b.FieldName, + OnlyReadLatest: b.OnlyReadLatest, + Encoding: b.Encoding, + Type: b.Type, + } + if utf8.ValidString(b.Qualifier) { + q.QualifierString = b.Qualifier + } else { + q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier)) + } + return q +} + +func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) { + b := &BigtableColumn{ + FieldName: q.FieldName, + OnlyReadLatest: q.OnlyReadLatest, + Encoding: q.Encoding, + Type: q.Type, + } + if q.QualifierString != "" { + b.Qualifier = q.QualifierString + } else { + bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded) + if err != nil { + return nil, err + } + b.Qualifier = string(bytes) + } + return b, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/external_test.go b/vendor/cloud.google.com/go/bigquery/external_test.go new file mode 100644 index 0000000..54aae2d --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/external_test.go @@ -0,0 +1,143 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" +) + +func TestExternalDataConfig(t *testing.T) { + // Round-trip of ExternalDataConfig to underlying representation. + for i, want := range []*ExternalDataConfig{ + { + SourceFormat: CSV, + SourceURIs: []string{"uri"}, + Schema: Schema{{Name: "n", Type: IntegerFieldType}}, + AutoDetect: true, + Compression: Gzip, + IgnoreUnknownValues: true, + MaxBadRecords: 17, + Options: &CSVOptions{ + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Encoding: UTF_8, + FieldDelimiter: "f", + Quote: "q", + SkipLeadingRows: 3, + }, + }, + { + SourceFormat: GoogleSheets, + Options: &GoogleSheetsOptions{SkipLeadingRows: 4}, + }, + { + SourceFormat: Bigtable, + Options: &BigtableOptions{ + IgnoreUnspecifiedColumnFamilies: true, + ReadRowkeyAsString: true, + ColumnFamilies: []*BigtableColumnFamily{ + { + FamilyID: "f1", + Encoding: "TEXT", + OnlyReadLatest: true, + Type: "FLOAT", + Columns: []*BigtableColumn{ + { + Qualifier: "valid-utf-8", + FieldName: "fn", + OnlyReadLatest: true, + Encoding: "BINARY", + Type: "STRING", + }, + }, + }, + }, + }, + }, + } { + q := want.toBQ() + got, err := bqToExternalDataConfig(&q) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("#%d: got=-, want=+:\n%s", i, diff) + } + } +} + +func TestQuote(t *testing.T) { + ptr := func(s string) *string { return &s } + + for _, test := range []struct { + quote string + force bool + want *string + }{ + {"", false, nil}, + {"", true, ptr("")}, + {"-", false, ptr("-")}, + {"-", true, ptr("")}, + } { + o := CSVOptions{ + Quote: test.quote, + ForceZeroQuote: test.force, + } + got := o.quote() + if (got == nil) != (test.want == nil) { + t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want)) + } + if got != nil && test.want != nil && *got != *test.want { + t.Errorf("%+v: got %q, want %q", test, *got, *test.want) + } + } +} + +func TestQualifier(t *testing.T) { + b := BigtableColumn{Qualifier: "a"} + q := b.toBQ() + if q.QualifierString != b.Qualifier || q.QualifierEncoded != "" { + t.Errorf("got (%q, %q), want (%q, %q)", + q.QualifierString, q.QualifierEncoded, b.Qualifier, "") + } + b2, err := bqToBigtableColumn(q) + if err != nil { + t.Fatal(err) + } + if got, want := b2.Qualifier, b.Qualifier; got != want { + t.Errorf("got %q, want %q", got, want) + } + + const ( + invalidUTF8 = "\xDF\xFF" + invalidEncoded = "3/8" + ) + b = BigtableColumn{Qualifier: invalidUTF8} + q = b.toBQ() + if q.QualifierString != "" || q.QualifierEncoded != invalidEncoded { + t.Errorf("got (%q, %q), want (%q, %q)", + q.QualifierString, "", b.Qualifier, invalidEncoded) + } + b2, err = bqToBigtableColumn(q) + if err != nil { + t.Fatal(err) + } + if got, want := b2.Qualifier, b.Qualifier; got != want { + t.Errorf("got %q, want %q", got, want) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/extract.go b/vendor/cloud.google.com/go/bigquery/extract.go new file mode 100644 index 0000000..e09736e --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/extract.go @@ -0,0 +1,105 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// ExtractConfig holds the configuration for an extract job. +type ExtractConfig struct { + // Src is the table from which data will be extracted. + Src *Table + + // Dst is the destination into which the data will be extracted. + Dst *GCSReference + + // DisableHeader disables the printing of a header row in exported data. + DisableHeader bool + + // The labels associated with this job. + Labels map[string]string +} + +func (e *ExtractConfig) toBQ() *bq.JobConfiguration { + var printHeader *bool + if e.DisableHeader { + f := false + printHeader = &f + } + return &bq.JobConfiguration{ + Labels: e.Labels, + Extract: &bq.JobConfigurationExtract{ + DestinationUris: append([]string{}, e.Dst.URIs...), + Compression: string(e.Dst.Compression), + DestinationFormat: string(e.Dst.DestinationFormat), + FieldDelimiter: e.Dst.FieldDelimiter, + SourceTable: e.Src.toBQ(), + PrintHeader: printHeader, + }, + } +} + +func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig { + qe := q.Extract + return &ExtractConfig{ + Labels: q.Labels, + Dst: &GCSReference{ + URIs: qe.DestinationUris, + Compression: Compression(qe.Compression), + DestinationFormat: DataFormat(qe.DestinationFormat), + FileConfig: FileConfig{ + CSVOptions: CSVOptions{ + FieldDelimiter: qe.FieldDelimiter, + }, + }, + }, + DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader, + Src: bqToTable(qe.SourceTable, c), + } +} + +// An Extractor extracts data from a BigQuery table into Google Cloud Storage. +type Extractor struct { + JobIDConfig + ExtractConfig + c *Client +} + +// ExtractorTo returns an Extractor which can be used to extract data from a +// BigQuery table into Google Cloud Storage. +// The returned Extractor may optionally be further configured before its Run method is called. +func (t *Table) ExtractorTo(dst *GCSReference) *Extractor { + return &Extractor{ + c: t.c, + ExtractConfig: ExtractConfig{ + Src: t, + Dst: dst, + }, + } +} + +// Run initiates an extract job. +func (e *Extractor) Run(ctx context.Context) (*Job, error) { + return e.c.insertJob(ctx, e.newJob(), nil) +} + +func (e *Extractor) newJob() *bq.Job { + return &bq.Job{ + JobReference: e.JobIDConfig.createJobRef(e.c.projectID), + Configuration: e.ExtractConfig.toBQ(), + } +} diff --git a/vendor/cloud.google.com/go/bigquery/extract_test.go b/vendor/cloud.google.com/go/bigquery/extract_test.go new file mode 100644 index 0000000..95d5b79 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/extract_test.go @@ -0,0 +1,118 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/testutil" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultExtractJob() *bq.Job { + return &bq.Job{ + JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, + Configuration: &bq.JobConfiguration{ + Extract: &bq.JobConfigurationExtract{ + SourceTable: &bq.TableReference{ + ProjectId: "client-project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + DestinationUris: []string{"uri"}, + }, + }, + } +} + +func defaultGCS() *GCSReference { + return &GCSReference{ + URIs: []string{"uri"}, + } +} + +func TestExtract(t *testing.T) { + defer fixRandomID("RANDOM")() + c := &Client{ + projectID: "client-project-id", + } + + testCases := []struct { + dst *GCSReference + src *Table + config ExtractConfig + want *bq.Job + }{ + { + dst: defaultGCS(), + src: c.Dataset("dataset-id").Table("table-id"), + want: defaultExtractJob(), + }, + { + dst: defaultGCS(), + src: c.Dataset("dataset-id").Table("table-id"), + config: ExtractConfig{ + DisableHeader: true, + Labels: map[string]string{"a": "b"}, + }, + want: func() *bq.Job { + j := defaultExtractJob() + j.Configuration.Labels = map[string]string{"a": "b"} + f := false + j.Configuration.Extract.PrintHeader = &f + return j + }(), + }, + { + dst: func() *GCSReference { + g := NewGCSReference("uri") + g.Compression = Gzip + g.DestinationFormat = JSON + g.FieldDelimiter = "\t" + return g + }(), + src: c.Dataset("dataset-id").Table("table-id"), + want: func() *bq.Job { + j := defaultExtractJob() + j.Configuration.Extract.Compression = "GZIP" + j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Extract.FieldDelimiter = "\t" + return j + }(), + }, + } + + for i, tc := range testCases { + ext := tc.src.ExtractorTo(tc.dst) + tc.config.Src = ext.Src + tc.config.Dst = ext.Dst + ext.ExtractConfig = tc.config + got := ext.newJob() + checkJob(t, i, got, tc.want) + + jc, err := bqToJobConfig(got.Configuration, c) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + diff := testutil.Diff(jc, &ext.ExtractConfig, + cmp.AllowUnexported(Table{}, Client{})) + if diff != "" { + t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/file.go b/vendor/cloud.google.com/go/bigquery/file.go new file mode 100644 index 0000000..c44c902 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/file.go @@ -0,0 +1,135 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "io" + + bq "google.golang.org/api/bigquery/v2" +) + +// A ReaderSource is a source for a load operation that gets +// data from an io.Reader. +// +// When a ReaderSource is part of a LoadConfig obtained via Job.Config, +// its internal io.Reader will be nil, so it cannot be used for a +// subsequent load operation. +type ReaderSource struct { + r io.Reader + FileConfig +} + +// NewReaderSource creates a ReaderSource from an io.Reader. You may +// optionally configure properties on the ReaderSource that describe the +// data being read, before passing it to Table.LoaderFrom. +func NewReaderSource(r io.Reader) *ReaderSource { + return &ReaderSource{r: r} +} + +func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader { + r.FileConfig.populateLoadConfig(lc) + return r.r +} + +// FileConfig contains configuration options that pertain to files, typically +// text files that require interpretation to be used as a BigQuery table. A +// file may live in Google Cloud Storage (see GCSReference), or it may be +// loaded into a table via the Table.LoaderFromReader. +type FileConfig struct { + // SourceFormat is the format of the GCS data to be read. + // Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV. + SourceFormat DataFormat + + // Indicates if we should automatically infer the options and + // schema for CSV and JSON sources. + AutoDetect bool + + // MaxBadRecords is the maximum number of bad records that will be ignored + // when reading data. + MaxBadRecords int64 + + // IgnoreUnknownValues causes values not matching the schema to be + // tolerated. Unknown values are ignored. For CSV this ignores extra values + // at the end of a line. For JSON this ignores named values that do not + // match any column name. If this field is not set, records containing + // unknown values are treated as bad records. The MaxBadRecords field can + // be used to customize how bad records are handled. + IgnoreUnknownValues bool + + // Schema describes the data. It is required when reading CSV or JSON data, + // unless the data is being loaded into a table that already exists. + Schema Schema + + // Additional options for CSV files. + CSVOptions +} + +func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) { + conf.SkipLeadingRows = fc.SkipLeadingRows + conf.SourceFormat = string(fc.SourceFormat) + conf.Autodetect = fc.AutoDetect + conf.AllowJaggedRows = fc.AllowJaggedRows + conf.AllowQuotedNewlines = fc.AllowQuotedNewlines + conf.Encoding = string(fc.Encoding) + conf.FieldDelimiter = fc.FieldDelimiter + conf.IgnoreUnknownValues = fc.IgnoreUnknownValues + conf.MaxBadRecords = fc.MaxBadRecords + if fc.Schema != nil { + conf.Schema = fc.Schema.toBQ() + } + conf.Quote = fc.quote() +} + +func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) { + fc.SourceFormat = DataFormat(conf.SourceFormat) + fc.AutoDetect = conf.Autodetect + fc.MaxBadRecords = conf.MaxBadRecords + fc.IgnoreUnknownValues = conf.IgnoreUnknownValues + fc.Schema = bqToSchema(conf.Schema) + fc.SkipLeadingRows = conf.SkipLeadingRows + fc.AllowJaggedRows = conf.AllowJaggedRows + fc.AllowQuotedNewlines = conf.AllowQuotedNewlines + fc.Encoding = Encoding(conf.Encoding) + fc.FieldDelimiter = conf.FieldDelimiter + fc.CSVOptions.setQuote(conf.Quote) +} + +func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) { + format := fc.SourceFormat + if format == "" { + // Format must be explicitly set for external data sources. + format = CSV + } + conf.Autodetect = fc.AutoDetect + conf.IgnoreUnknownValues = fc.IgnoreUnknownValues + conf.MaxBadRecords = fc.MaxBadRecords + conf.SourceFormat = string(format) + if fc.Schema != nil { + conf.Schema = fc.Schema.toBQ() + } + if format == CSV { + fc.CSVOptions.populateExternalDataConfig(conf) + } +} + +// Encoding specifies the character encoding of data to be loaded into BigQuery. +// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding +// for more details about how this is used. +type Encoding string + +const ( + UTF_8 Encoding = "UTF-8" + ISO_8859_1 Encoding = "ISO-8859-1" +) diff --git a/vendor/cloud.google.com/go/bigquery/file_test.go b/vendor/cloud.google.com/go/bigquery/file_test.go new file mode 100644 index 0000000..ad24415 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/file_test.go @@ -0,0 +1,98 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + bq "google.golang.org/api/bigquery/v2" +) + +var ( + hyphen = "-" + fc = FileConfig{ + SourceFormat: CSV, + AutoDetect: true, + MaxBadRecords: 7, + IgnoreUnknownValues: true, + Schema: Schema{ + stringFieldSchema(), + nestedFieldSchema(), + }, + CSVOptions: CSVOptions{ + Quote: hyphen, + FieldDelimiter: "\t", + SkipLeadingRows: 8, + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Encoding: UTF_8, + }, + } +) + +func TestFileConfigPopulateLoadConfig(t *testing.T) { + want := &bq.JobConfigurationLoad{ + SourceFormat: "CSV", + FieldDelimiter: "\t", + SkipLeadingRows: 8, + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Autodetect: true, + Encoding: "UTF-8", + MaxBadRecords: 7, + IgnoreUnknownValues: true, + Schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqStringFieldSchema(), + bqNestedFieldSchema(), + }}, + Quote: &hyphen, + } + got := &bq.JobConfigurationLoad{} + fc.populateLoadConfig(got) + if !testutil.Equal(got, want) { + t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want)) + } +} + +func TestFileConfigPopulateExternalDataConfig(t *testing.T) { + got := &bq.ExternalDataConfiguration{} + fc.populateExternalDataConfig(got) + + want := &bq.ExternalDataConfiguration{ + SourceFormat: "CSV", + Autodetect: true, + MaxBadRecords: 7, + IgnoreUnknownValues: true, + Schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqStringFieldSchema(), + bqNestedFieldSchema(), + }}, + CsvOptions: &bq.CsvOptions{ + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Encoding: "UTF-8", + FieldDelimiter: "\t", + Quote: &hyphen, + SkipLeadingRows: 8, + }, + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/gcs.go b/vendor/cloud.google.com/go/bigquery/gcs.go new file mode 100644 index 0000000..6936b4f --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/gcs.go @@ -0,0 +1,73 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "io" + + bq "google.golang.org/api/bigquery/v2" +) + +// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute +// an input or output to a BigQuery operation. +type GCSReference struct { + // URIs refer to Google Cloud Storage objects. + URIs []string + + FileConfig + + // DestinationFormat is the format to use when writing exported files. + // Allowed values are: CSV, Avro, JSON. The default is CSV. + // CSV is not supported for tables with nested or repeated fields. + DestinationFormat DataFormat + + // Compression specifies the type of compression to apply when writing data + // to Google Cloud Storage, or using this GCSReference as an ExternalData + // source with CSV or JSON SourceFormat. Default is None. + Compression Compression +} + +// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. +// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. +// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. +// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. +// For more information about the treatment of wildcards and multiple URIs, +// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple +func NewGCSReference(uri ...string) *GCSReference { + return &GCSReference{URIs: uri} +} + +// Compression is the type of compression to apply when writing data to Google Cloud Storage. +type Compression string + +const ( + None Compression = "NONE" + Gzip Compression = "GZIP" +) + +func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader { + lc.SourceUris = gcs.URIs + gcs.FileConfig.populateLoadConfig(lc) + return nil +} + +func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration { + conf := bq.ExternalDataConfiguration{ + Compression: string(gcs.Compression), + SourceUris: append([]string{}, gcs.URIs...), + } + gcs.FileConfig.populateExternalDataConfig(&conf) + return conf +} diff --git a/vendor/cloud.google.com/go/bigquery/integration_test.go b/vendor/cloud.google.com/go/bigquery/integration_test.go new file mode 100644 index 0000000..593aa2d --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/integration_test.go @@ -0,0 +1,1578 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "flag" + "fmt" + "log" + "net/http" + "os" + "sort" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + gax "github.com/googleapis/gax-go" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal" + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var ( + client *Client + storageClient *storage.Client + dataset *Dataset + schema = Schema{ + {Name: "name", Type: StringFieldType}, + {Name: "nums", Type: IntegerFieldType, Repeated: true}, + {Name: "rec", Type: RecordFieldType, Schema: Schema{ + {Name: "bool", Type: BooleanFieldType}, + }}, + } + testTableExpiration time.Time + // BigQuery does not accept hyphens in dataset or table IDs, so we create IDs + // with underscores. + datasetIDs = testutil.NewUIDSpaceSep("dataset", '_') + tableIDs = testutil.NewUIDSpaceSep("table", '_') +) + +func TestMain(m *testing.M) { + cleanup := initIntegrationTest() + r := m.Run() + cleanup() + os.Exit(r) +} + +func getClient(t *testing.T) *Client { + if client == nil { + t.Skip("Integration tests skipped") + } + return client +} + +// If integration tests will be run, create a unique bucket for them. +func initIntegrationTest() func() { + flag.Parse() // needed for testing.Short() + if testing.Short() { + return func() {} + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, Scope) + if ts == nil { + log.Println("Integration tests skipped. See CONTRIBUTING.md for details") + return func() {} + } + projID := testutil.ProjID() + var err error + client, err = NewClient(ctx, projID, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("NewClient: %v", err) + } + storageClient, err = storage.NewClient(ctx, + option.WithTokenSource(testutil.TokenSource(ctx, storage.ScopeFullControl))) + if err != nil { + log.Fatalf("storage.NewClient: %v", err) + } + dataset = client.Dataset(datasetIDs.New()) + if err := dataset.Create(ctx, nil); err != nil { + log.Fatalf("creating dataset %s: %v", dataset.DatasetID, err) + } + testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second) + return func() { + if err := deleteDataset(ctx, dataset); err != nil { + log.Printf("could not delete %s", dataset.DatasetID) + } + } +} + +func deleteDataset(ctx context.Context, ds *Dataset) error { + it := ds.Tables(ctx) + for { + tbl, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return err + } + if err := tbl.Delete(ctx); err != nil { + return err + } + } + return ds.Delete(ctx) +} +func TestIntegration_TableCreate(t *testing.T) { + // Check that creating a record field with an empty schema is an error. + if client == nil { + t.Skip("Integration tests skipped") + } + table := dataset.Table("t_bad") + schema := Schema{ + {Name: "rec", Type: RecordFieldType, Schema: Schema{}}, + } + err := table.Create(context.Background(), &TableMetadata{ + Schema: schema, + ExpirationTime: time.Now().Add(5 * time.Minute), + }) + if err == nil { + t.Fatal("want error, got nil") + } + if !hasStatusCode(err, http.StatusBadRequest) { + t.Fatalf("want a 400 error, got %v", err) + } +} + +func TestIntegration_TableCreateView(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Test that standard SQL views work. + view := dataset.Table("t_view_standardsql") + query := fmt.Sprintf("SELECT APPROX_COUNT_DISTINCT(name) FROM `%s.%s.%s`", + dataset.ProjectID, dataset.DatasetID, table.TableID) + err := view.Create(context.Background(), &TableMetadata{ + ViewQuery: query, + UseStandardSQL: true, + }) + if err != nil { + t.Fatalf("table.create: Did not expect an error, got: %v", err) + } + view.Delete(ctx) +} + +func TestIntegration_TableMetadata(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + // Check table metadata. + md, err := table.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + // TODO(jba): check md more thorougly. + if got, want := md.FullID, fmt.Sprintf("%s:%s.%s", dataset.ProjectID, dataset.DatasetID, table.TableID); got != want { + t.Errorf("metadata.FullID: got %q, want %q", got, want) + } + if got, want := md.Type, RegularTable; got != want { + t.Errorf("metadata.Type: got %v, want %v", got, want) + } + if got, want := md.ExpirationTime, testTableExpiration; !got.Equal(want) { + t.Errorf("metadata.Type: got %v, want %v", got, want) + } + + // Check that timePartitioning is nil by default + if md.TimePartitioning != nil { + t.Errorf("metadata.TimePartitioning: got %v, want %v", md.TimePartitioning, nil) + } + + // Create tables that have time partitioning + partitionCases := []struct { + timePartitioning TimePartitioning + wantExpiration time.Duration + wantField string + }{ + {TimePartitioning{}, time.Duration(0), ""}, + {TimePartitioning{Expiration: time.Second}, time.Second, ""}, + { + TimePartitioning{ + Expiration: time.Second, + Field: "date", + }, time.Second, "date"}, + } + + schema2 := Schema{ + {Name: "name", Type: StringFieldType}, + {Name: "date", Type: DateFieldType}, + } + + for i, c := range partitionCases { + table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i)) + err = table.Create(context.Background(), &TableMetadata{ + Schema: schema2, + TimePartitioning: &c.timePartitioning, + ExpirationTime: time.Now().Add(5 * time.Minute), + }) + if err != nil { + t.Fatal(err) + } + defer table.Delete(ctx) + md, err = table.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + + got := md.TimePartitioning + want := &TimePartitioning{ + Expiration: c.wantExpiration, + Field: c.wantField, + } + if !testutil.Equal(got, want) { + t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want) + } + } +} + +func TestIntegration_DatasetCreate(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + ds := client.Dataset(datasetIDs.New()) + wmd := &DatasetMetadata{Name: "name", Location: "EU"} + err := ds.Create(ctx, wmd) + if err != nil { + t.Fatal(err) + } + gmd, err := ds.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + if got, want := gmd.Name, wmd.Name; got != want { + t.Errorf("name: got %q, want %q", got, want) + } + if got, want := gmd.Location, wmd.Location; got != want { + t.Errorf("location: got %q, want %q", got, want) + } + if err := ds.Delete(ctx); err != nil { + t.Fatalf("deleting dataset %v: %v", ds, err) + } +} + +func TestIntegration_DatasetMetadata(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + md, err := dataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + if got, want := md.FullID, fmt.Sprintf("%s:%s", dataset.ProjectID, dataset.DatasetID); got != want { + t.Errorf("FullID: got %q, want %q", got, want) + } + jan2016 := time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC) + if md.CreationTime.Before(jan2016) { + t.Errorf("CreationTime: got %s, want > 2016-1-1", md.CreationTime) + } + if md.LastModifiedTime.Before(jan2016) { + t.Errorf("LastModifiedTime: got %s, want > 2016-1-1", md.LastModifiedTime) + } + + // Verify that we get a NotFound for a nonexistent dataset. + _, err = client.Dataset("does_not_exist").Metadata(ctx) + if err == nil || !hasStatusCode(err, http.StatusNotFound) { + t.Errorf("got %v, want NotFound error", err) + } +} + +func TestIntegration_DatasetDelete(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + ds := client.Dataset(datasetIDs.New()) + if err := ds.Create(ctx, nil); err != nil { + t.Fatalf("creating dataset %s: %v", ds.DatasetID, err) + } + if err := ds.Delete(ctx); err != nil { + t.Fatalf("deleting dataset %s: %v", ds.DatasetID, err) + } +} + +func TestIntegration_DatasetUpdateETags(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + + check := func(md *DatasetMetadata, wantDesc, wantName string) { + if md.Description != wantDesc { + t.Errorf("description: got %q, want %q", md.Description, wantDesc) + } + if md.Name != wantName { + t.Errorf("name: got %q, want %q", md.Name, wantName) + } + } + + ctx := context.Background() + md, err := dataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + if md.ETag == "" { + t.Fatal("empty ETag") + } + // Write without ETag succeeds. + desc := md.Description + "d2" + name := md.Name + "n2" + md2, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: desc, Name: name}, "") + if err != nil { + t.Fatal(err) + } + check(md2, desc, name) + + // Write with original ETag fails because of intervening write. + _, err = dataset.Update(ctx, DatasetMetadataToUpdate{Description: "d", Name: "n"}, md.ETag) + if err == nil { + t.Fatal("got nil, want error") + } + + // Write with most recent ETag succeeds. + md3, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: "", Name: ""}, md2.ETag) + if err != nil { + t.Fatal(err) + } + check(md3, "", "") +} + +func TestIntegration_DatasetUpdateDefaultExpiration(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + md, err := dataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + // Set the default expiration time. + md, err = dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Hour}, "") + if err != nil { + t.Fatal(err) + } + if md.DefaultTableExpiration != time.Hour { + t.Fatalf("got %s, want 1h", md.DefaultTableExpiration) + } + // Omitting DefaultTableExpiration doesn't change it. + md, err = dataset.Update(ctx, DatasetMetadataToUpdate{Name: "xyz"}, "") + if err != nil { + t.Fatal(err) + } + if md.DefaultTableExpiration != time.Hour { + t.Fatalf("got %s, want 1h", md.DefaultTableExpiration) + } + // Setting it to 0 deletes it (which looks like a 0 duration). + md, err = dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Duration(0)}, "") + if err != nil { + t.Fatal(err) + } + if md.DefaultTableExpiration != 0 { + t.Fatalf("got %s, want 0", md.DefaultTableExpiration) + } +} + +func TestIntegration_DatasetUpdateAccess(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + md, err := dataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + origAccess := append([]*AccessEntry(nil), md.Access...) + newEntry := &AccessEntry{ + Role: ReaderRole, + Entity: "Joe@example.com", + EntityType: UserEmailEntity, + } + newAccess := append(md.Access, newEntry) + dm := DatasetMetadataToUpdate{Access: newAccess} + md, err = dataset.Update(ctx, dm, md.ETag) + if err != nil { + t.Fatal(err) + } + defer func() { + _, err := dataset.Update(ctx, DatasetMetadataToUpdate{Access: origAccess}, md.ETag) + if err != nil { + t.Log("could not restore dataset access list") + } + }() + if diff := testutil.Diff(md.Access, newAccess); diff != "" { + t.Fatalf("got=-, want=+:\n%s", diff) + } +} + +func TestIntegration_DatasetUpdateLabels(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + md, err := dataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + var dm DatasetMetadataToUpdate + dm.SetLabel("label", "value") + md, err = dataset.Update(ctx, dm, "") + if err != nil { + t.Fatal(err) + } + if got, want := md.Labels["label"], "value"; got != want { + t.Errorf("got %q, want %q", got, want) + } + dm = DatasetMetadataToUpdate{} + dm.DeleteLabel("label") + md, err = dataset.Update(ctx, dm, "") + if err != nil { + t.Fatal(err) + } + if _, ok := md.Labels["label"]; ok { + t.Error("label still present after deletion") + } +} + +func TestIntegration_TableUpdateLabels(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + var tm TableMetadataToUpdate + tm.SetLabel("label", "value") + md, err := table.Update(ctx, tm, "") + if err != nil { + t.Fatal(err) + } + if got, want := md.Labels["label"], "value"; got != want { + t.Errorf("got %q, want %q", got, want) + } + tm = TableMetadataToUpdate{} + tm.DeleteLabel("label") + md, err = table.Update(ctx, tm, "") + if err != nil { + t.Fatal(err) + } + if _, ok := md.Labels["label"]; ok { + t.Error("label still present after deletion") + } +} + +func TestIntegration_Tables(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + wantName := table.FullyQualifiedName() + + // This test is flaky due to eventual consistency. + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + // Iterate over tables in the dataset. + it := dataset.Tables(ctx) + var tableNames []string + for { + tbl, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return false, err + } + tableNames = append(tableNames, tbl.FullyQualifiedName()) + } + // Other tests may be running with this dataset, so there might be more + // than just our table in the list. So don't try for an exact match; just + // make sure that our table is there somewhere. + for _, tn := range tableNames { + if tn == wantName { + return true, nil + } + } + return false, fmt.Errorf("got %v\nwant %s in the list", tableNames, wantName) + }) + if err != nil { + t.Fatal(err) + } +} + +func TestIntegration_UploadAndRead(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Populate the table. + upl := table.Uploader() + var ( + wantRows [][]Value + saverRows []*ValuesSaver + ) + for i, name := range []string{"a", "b", "c"} { + row := []Value{name, []Value{int64(i)}, []Value{true}} + wantRows = append(wantRows, row) + saverRows = append(saverRows, &ValuesSaver{ + Schema: schema, + InsertID: name, + Row: row, + }) + } + if err := upl.Put(ctx, saverRows); err != nil { + t.Fatal(putError(err)) + } + + // Wait until the data has been uploaded. This can take a few seconds, according + // to https://cloud.google.com/bigquery/streaming-data-into-bigquery. + if err := waitForRow(ctx, table); err != nil { + t.Fatal(err) + } + + // Read the table. + checkRead(t, "upload", table.Read(ctx), wantRows) + + // Query the table. + q := client.Query(fmt.Sprintf("select name, nums, rec from %s", table.TableID)) + q.DefaultProjectID = dataset.ProjectID + q.DefaultDatasetID = dataset.DatasetID + + rit, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "query", rit, wantRows) + + // Query the long way. + job1, err := q.Run(ctx) + if err != nil { + t.Fatal(err) + } + if job1.LastStatus() == nil { + t.Error("no LastStatus") + } + job2, err := client.JobFromID(ctx, job1.ID()) + if err != nil { + t.Fatal(err) + } + if job2.LastStatus() == nil { + t.Error("no LastStatus") + } + rit, err = job2.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "job.Read", rit, wantRows) + + // Get statistics. + jobStatus, err := job2.Status(ctx) + if err != nil { + t.Fatal(err) + } + if jobStatus.Statistics == nil { + t.Fatal("jobStatus missing statistics") + } + if _, ok := jobStatus.Statistics.Details.(*QueryStatistics); !ok { + t.Errorf("expected QueryStatistics, got %T", jobStatus.Statistics.Details) + } + + // Test reading directly into a []Value. + valueLists, err := readAll(table.Read(ctx)) + if err != nil { + t.Fatal(err) + } + it := table.Read(ctx) + for i, vl := range valueLists { + var got []Value + if err := it.Next(&got); err != nil { + t.Fatal(err) + } + want := []Value(vl) + if !testutil.Equal(got, want) { + t.Errorf("%d: got %v, want %v", i, got, want) + } + } + + // Test reading into a map. + it = table.Read(ctx) + for _, vl := range valueLists { + var vm map[string]Value + if err := it.Next(&vm); err != nil { + t.Fatal(err) + } + if got, want := len(vm), len(vl); got != want { + t.Fatalf("valueMap len: got %d, want %d", got, want) + } + // With maps, structs become nested maps. + vl[2] = map[string]Value{"bool": vl[2].([]Value)[0]} + for i, v := range vl { + if got, want := vm[schema[i].Name], v; !testutil.Equal(got, want) { + t.Errorf("%d, name=%s: got %#v, want %#v", + i, schema[i].Name, got, want) + } + } + } +} + +type SubSubTestStruct struct { + Integer int64 +} + +type SubTestStruct struct { + String string + Record SubSubTestStruct + RecordArray []SubSubTestStruct +} + +type TestStruct struct { + Name string + Bytes []byte + Integer int64 + Float float64 + Boolean bool + Timestamp time.Time + Date civil.Date + Time civil.Time + DateTime civil.DateTime + + StringArray []string + IntegerArray []int64 + FloatArray []float64 + BooleanArray []bool + TimestampArray []time.Time + DateArray []civil.Date + TimeArray []civil.Time + DateTimeArray []civil.DateTime + + Record SubTestStruct + RecordArray []SubTestStruct +} + +func TestIntegration_UploadAndReadStructs(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + schema, err := InferSchema(TestStruct{}) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + d := civil.Date{2016, 3, 20} + tm := civil.Time{15, 4, 5, 6000} + ts := time.Date(2016, 3, 20, 15, 4, 5, 6000, time.UTC) + dtm := civil.DateTime{d, tm} + d2 := civil.Date{1994, 5, 15} + tm2 := civil.Time{1, 2, 4, 0} + ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC) + dtm2 := civil.DateTime{d2, tm2} + + // Populate the table. + upl := table.Uploader() + want := []*TestStruct{ + { + "a", + []byte("byte"), + 42, + 3.14, + true, + ts, + d, + tm, + dtm, + []string{"a", "b"}, + []int64{1, 2}, + []float64{1, 1.41}, + []bool{true, false}, + []time.Time{ts, ts2}, + []civil.Date{d, d2}, + []civil.Time{tm, tm2}, + []civil.DateTime{dtm, dtm2}, + SubTestStruct{ + "string", + SubSubTestStruct{24}, + []SubSubTestStruct{{1}, {2}}, + }, + []SubTestStruct{ + {String: "empty"}, + { + "full", + SubSubTestStruct{1}, + []SubSubTestStruct{{1}, {2}}, + }, + }, + }, + { + Name: "b", + Bytes: []byte("byte2"), + Integer: 24, + Float: 4.13, + Boolean: false, + Timestamp: ts, + Date: d, + Time: tm, + DateTime: dtm, + }, + } + var savers []*StructSaver + for _, s := range want { + savers = append(savers, &StructSaver{Schema: schema, Struct: s}) + } + if err := upl.Put(ctx, savers); err != nil { + t.Fatal(putError(err)) + } + + // Wait until the data has been uploaded. This can take a few seconds, according + // to https://cloud.google.com/bigquery/streaming-data-into-bigquery. + if err := waitForRow(ctx, table); err != nil { + t.Fatal(err) + } + + // Test iteration with structs. + it := table.Read(ctx) + var got []*TestStruct + for { + var g TestStruct + err := it.Next(&g) + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + got = append(got, &g) + } + sort.Sort(byName(got)) + + // Round times to the microsecond. + roundToMicros := cmp.Transformer("RoundToMicros", + func(t time.Time) time.Time { return t.Round(time.Microsecond) }) + // BigQuery does not elide nils. It reports an error for nil fields. + for i, g := range got { + if i >= len(want) { + t.Errorf("%d: got %v, past end of want", i, pretty.Value(g)) + } else if diff := testutil.Diff(g, want[i], roundToMicros); diff != "" { + t.Errorf("%d: got=-, want=+:\n%s", i, diff) + } + } +} + +type byName []*TestStruct + +func (b byName) Len() int { return len(b) } +func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name } + +func TestIntegration_TableUpdate(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Test Update of non-schema fields. + tm, err := table.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + wantDescription := tm.Description + "more" + wantName := tm.Name + "more" + wantExpiration := tm.ExpirationTime.Add(time.Hour * 24) + got, err := table.Update(ctx, TableMetadataToUpdate{ + Description: wantDescription, + Name: wantName, + ExpirationTime: wantExpiration, + }, tm.ETag) + if err != nil { + t.Fatal(err) + } + if got.Description != wantDescription { + t.Errorf("Description: got %q, want %q", got.Description, wantDescription) + } + if got.Name != wantName { + t.Errorf("Name: got %q, want %q", got.Name, wantName) + } + if got.ExpirationTime != wantExpiration { + t.Errorf("ExpirationTime: got %q, want %q", got.ExpirationTime, wantExpiration) + } + if !testutil.Equal(got.Schema, schema) { + t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema)) + } + + // Blind write succeeds. + _, err = table.Update(ctx, TableMetadataToUpdate{Name: "x"}, "") + if err != nil { + t.Fatal(err) + } + // Write with old etag fails. + _, err = table.Update(ctx, TableMetadataToUpdate{Name: "y"}, got.ETag) + if err == nil { + t.Fatal("Update with old ETag succeeded, wanted failure") + } + + // Test schema update. + // Columns can be added. schema2 is the same as schema, except for the + // added column in the middle. + nested := Schema{ + {Name: "nested", Type: BooleanFieldType}, + {Name: "other", Type: StringFieldType}, + } + schema2 := Schema{ + schema[0], + {Name: "rec2", Type: RecordFieldType, Schema: nested}, + schema[1], + schema[2], + } + + got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2}, "") + if err != nil { + t.Fatal(err) + } + + // Wherever you add the column, it appears at the end. + schema3 := Schema{schema2[0], schema2[2], schema2[3], schema2[1]} + if !testutil.Equal(got.Schema, schema3) { + t.Errorf("add field:\ngot %v\nwant %v", + pretty.Value(got.Schema), pretty.Value(schema3)) + } + + // Updating with the empty schema succeeds, but is a no-op. + got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}}, "") + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got.Schema, schema3) { + t.Errorf("empty schema:\ngot %v\nwant %v", + pretty.Value(got.Schema), pretty.Value(schema3)) + } + + // Error cases when updating schema. + for _, test := range []struct { + desc string + fields []*FieldSchema + }{ + {"change from optional to required", []*FieldSchema{ + {Name: "name", Type: StringFieldType, Required: true}, + schema3[1], + schema3[2], + schema3[3], + }}, + {"add a required field", []*FieldSchema{ + schema3[0], schema3[1], schema3[2], schema3[3], + {Name: "req", Type: StringFieldType, Required: true}, + }}, + {"remove a field", []*FieldSchema{schema3[0], schema3[1], schema3[2]}}, + {"remove a nested field", []*FieldSchema{ + schema3[0], schema3[1], schema3[2], + {Name: "rec2", Type: RecordFieldType, Schema: Schema{nested[0]}}}}, + {"remove all nested fields", []*FieldSchema{ + schema3[0], schema3[1], schema3[2], + {Name: "rec2", Type: RecordFieldType, Schema: Schema{}}}}, + } { + _, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)}, "") + if err == nil { + t.Errorf("%s: want error, got nil", test.desc) + } else if !hasStatusCode(err, 400) { + t.Errorf("%s: want 400, got %v", test.desc, err) + } + } +} + +func TestIntegration_Load(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + // CSV data can't be loaded into a repeated field, so we use a different schema. + table := newTable(t, Schema{ + {Name: "name", Type: StringFieldType}, + {Name: "nums", Type: IntegerFieldType}, + }) + defer table.Delete(ctx) + + // Load the table from a reader. + r := strings.NewReader("a,0\nb,1\nc,2\n") + wantRows := [][]Value{ + []Value{"a", int64(0)}, + []Value{"b", int64(1)}, + []Value{"c", int64(2)}, + } + rs := NewReaderSource(r) + loader := table.LoaderFrom(rs) + loader.WriteDisposition = WriteTruncate + loader.Labels = map[string]string{"test": "go"} + job, err := loader.Run(ctx) + if err != nil { + t.Fatal(err) + } + if job.LastStatus() == nil { + t.Error("no LastStatus") + } + conf, err := job.Config() + if err != nil { + t.Fatal(err) + } + config, ok := conf.(*LoadConfig) + if !ok { + t.Fatalf("got %T, want LoadConfig", conf) + } + diff := testutil.Diff(config, &loader.LoadConfig, + cmp.AllowUnexported(Table{}), + cmpopts.IgnoreUnexported(Client{}, ReaderSource{}), + // returned schema is at top level, not in the config + cmpopts.IgnoreFields(FileConfig{}, "Schema")) + if diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } + if err := wait(ctx, job); err != nil { + t.Fatal(err) + } + checkRead(t, "reader load", table.Read(ctx), wantRows) + +} + +func TestIntegration_DML(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + sql := fmt.Sprintf(`INSERT %s.%s (name, nums, rec) + VALUES ('a', [0], STRUCT(TRUE)), + ('b', [1], STRUCT(FALSE)), + ('c', [2], STRUCT(TRUE))`, + table.DatasetID, table.TableID) + if err := dmlInsert(ctx, sql); err != nil { + t.Fatal(err) + } + wantRows := [][]Value{ + []Value{"a", []Value{int64(0)}, []Value{true}}, + []Value{"b", []Value{int64(1)}, []Value{false}}, + []Value{"c", []Value{int64(2)}, []Value{true}}, + } + checkRead(t, "DML", table.Read(ctx), wantRows) +} + +func dmlInsert(ctx context.Context, sql string) error { + // Retry insert; sometimes it fails with INTERNAL. + return internal.Retry(ctx, gax.Backoff{}, func() (bool, error) { + // Use DML to insert. + q := client.Query(sql) + job, err := q.Run(ctx) + if err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code < 500 { + return true, err // fail on 4xx + } + return false, err + } + if err := wait(ctx, job); err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code < 500 { + return true, err // fail on 4xx + } + return false, err + } + return true, nil + }) +} + +func TestIntegration_TimeTypes(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + dtSchema := Schema{ + {Name: "d", Type: DateFieldType}, + {Name: "t", Type: TimeFieldType}, + {Name: "dt", Type: DateTimeFieldType}, + {Name: "ts", Type: TimestampFieldType}, + } + table := newTable(t, dtSchema) + defer table.Delete(ctx) + + d := civil.Date{2016, 3, 20} + tm := civil.Time{12, 30, 0, 6000} + dtm := civil.DateTime{d, tm} + ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) + wantRows := [][]Value{ + []Value{d, tm, dtm, ts}, + } + upl := table.Uploader() + if err := upl.Put(ctx, []*ValuesSaver{ + {Schema: dtSchema, Row: wantRows[0]}, + }); err != nil { + t.Fatal(putError(err)) + } + if err := waitForRow(ctx, table); err != nil { + t.Fatal(err) + } + + // SQL wants DATETIMEs with a space between date and time, but the service + // returns them in RFC3339 form, with a "T" between. + query := fmt.Sprintf("INSERT %s.%s (d, t, dt, ts) "+ + "VALUES ('%s', '%s', '%s', '%s')", + table.DatasetID, table.TableID, + d, CivilTimeString(tm), CivilDateTimeString(dtm), ts.Format("2006-01-02 15:04:05")) + if err := dmlInsert(ctx, query); err != nil { + t.Fatal(err) + } + wantRows = append(wantRows, wantRows[0]) + checkRead(t, "TimeTypes", table.Read(ctx), wantRows) +} + +func TestIntegration_StandardQuery(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + + d := civil.Date{2016, 3, 20} + tm := civil.Time{15, 04, 05, 0} + ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) + dtm := ts.Format("2006-01-02 15:04:05") + + // Constructs Value slices made up of int64s. + ints := func(args ...int) []Value { + vals := make([]Value, len(args)) + for i, arg := range args { + vals[i] = int64(arg) + } + return vals + } + + testCases := []struct { + query string + wantRow []Value + }{ + {"SELECT 1", ints(1)}, + {"SELECT 1.3", []Value{1.3}}, + {"SELECT TRUE", []Value{true}}, + {"SELECT 'ABC'", []Value{"ABC"}}, + {"SELECT CAST('foo' AS BYTES)", []Value{[]byte("foo")}}, + {fmt.Sprintf("SELECT TIMESTAMP '%s'", dtm), []Value{ts}}, + {fmt.Sprintf("SELECT [TIMESTAMP '%s', TIMESTAMP '%s']", dtm, dtm), []Value{[]Value{ts, ts}}}, + {fmt.Sprintf("SELECT ('hello', TIMESTAMP '%s')", dtm), []Value{[]Value{"hello", ts}}}, + {fmt.Sprintf("SELECT DATETIME(TIMESTAMP '%s')", dtm), []Value{civil.DateTime{d, tm}}}, + {fmt.Sprintf("SELECT DATE(TIMESTAMP '%s')", dtm), []Value{d}}, + {fmt.Sprintf("SELECT TIME(TIMESTAMP '%s')", dtm), []Value{tm}}, + {"SELECT (1, 2)", []Value{ints(1, 2)}}, + {"SELECT [1, 2, 3]", []Value{ints(1, 2, 3)}}, + {"SELECT ([1, 2], 3, [4, 5])", []Value{[]Value{ints(1, 2), int64(3), ints(4, 5)}}}, + {"SELECT [(1, 2, 3), (4, 5, 6)]", []Value{[]Value{ints(1, 2, 3), ints(4, 5, 6)}}}, + {"SELECT [([1, 2, 3], 4), ([5, 6], 7)]", []Value{[]Value{[]Value{ints(1, 2, 3), int64(4)}, []Value{ints(5, 6), int64(7)}}}}, + {"SELECT ARRAY(SELECT STRUCT([1, 2]))", []Value{[]Value{[]Value{ints(1, 2)}}}}, + } + for _, c := range testCases { + q := client.Query(c.query) + it, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "StandardQuery", it, [][]Value{c.wantRow}) + } +} + +func TestIntegration_LegacyQuery(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + + ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) + dtm := ts.Format("2006-01-02 15:04:05") + + testCases := []struct { + query string + wantRow []Value + }{ + {"SELECT 1", []Value{int64(1)}}, + {"SELECT 1.3", []Value{1.3}}, + {"SELECT TRUE", []Value{true}}, + {"SELECT 'ABC'", []Value{"ABC"}}, + {"SELECT CAST('foo' AS BYTES)", []Value{[]byte("foo")}}, + {fmt.Sprintf("SELECT TIMESTAMP('%s')", dtm), []Value{ts}}, + {fmt.Sprintf("SELECT DATE(TIMESTAMP('%s'))", dtm), []Value{"2016-03-20"}}, + {fmt.Sprintf("SELECT TIME(TIMESTAMP('%s'))", dtm), []Value{"15:04:05"}}, + } + for _, c := range testCases { + q := client.Query(c.query) + q.UseLegacySQL = true + it, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "LegacyQuery", it, [][]Value{c.wantRow}) + } +} + +func TestIntegration_QueryParameters(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + + d := civil.Date{2016, 3, 20} + tm := civil.Time{15, 04, 05, 0} + dtm := civil.DateTime{d, tm} + ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) + + type ss struct { + String string + } + + type s struct { + Timestamp time.Time + StringArray []string + SubStruct ss + SubStructArray []ss + } + + testCases := []struct { + query string + parameters []QueryParameter + wantRow []Value + }{ + {"SELECT @val", []QueryParameter{{"val", 1}}, []Value{int64(1)}}, + {"SELECT @val", []QueryParameter{{"val", 1.3}}, []Value{1.3}}, + {"SELECT @val", []QueryParameter{{"val", true}}, []Value{true}}, + {"SELECT @val", []QueryParameter{{"val", "ABC"}}, []Value{"ABC"}}, + {"SELECT @val", []QueryParameter{{"val", []byte("foo")}}, []Value{[]byte("foo")}}, + {"SELECT @val", []QueryParameter{{"val", ts}}, []Value{ts}}, + {"SELECT @val", []QueryParameter{{"val", []time.Time{ts, ts}}}, []Value{[]Value{ts, ts}}}, + {"SELECT @val", []QueryParameter{{"val", dtm}}, []Value{dtm}}, + {"SELECT @val", []QueryParameter{{"val", d}}, []Value{d}}, + {"SELECT @val", []QueryParameter{{"val", tm}}, []Value{tm}}, + {"SELECT @val", []QueryParameter{{"val", s{ts, []string{"a", "b"}, ss{"c"}, []ss{{"d"}, {"e"}}}}}, + []Value{[]Value{ts, []Value{"a", "b"}, []Value{"c"}, []Value{[]Value{"d"}, []Value{"e"}}}}}, + {"SELECT @val.Timestamp, @val.SubStruct.String", []QueryParameter{{"val", s{Timestamp: ts, SubStruct: ss{"a"}}}}, []Value{ts, "a"}}, + } + for _, c := range testCases { + q := client.Query(c.query) + q.Parameters = c.parameters + job, err := q.Run(ctx) + if err != nil { + t.Fatal(err) + } + if job.LastStatus() == nil { + t.Error("no LastStatus") + } + it, err := job.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "QueryParameters", it, [][]Value{c.wantRow}) + } +} + +func TestIntegration_QueryDryRun(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + q := client.Query("SELECT word from " + stdName + " LIMIT 10") + q.DryRun = true + job, err := q.Run(ctx) + if err != nil { + t.Fatal(err) + } + + s := job.LastStatus() + if s.State != Done { + t.Errorf("state is %v, expected Done", s.State) + } + if s.Statistics == nil { + t.Fatal("no statistics") + } + if s.Statistics.Details.(*QueryStatistics).Schema == nil { + t.Fatal("no schema") + } +} + +func TestIntegration_ExtractExternal(t *testing.T) { + // Create a table, extract it to GCS, then query it externally. + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + schema := Schema{ + {Name: "name", Type: StringFieldType}, + {Name: "num", Type: IntegerFieldType}, + } + table := newTable(t, schema) + defer table.Delete(ctx) + + // Insert table data. + sql := fmt.Sprintf(`INSERT %s.%s (name, num) + VALUES ('a', 1), ('b', 2), ('c', 3)`, + table.DatasetID, table.TableID) + if err := dmlInsert(ctx, sql); err != nil { + t.Fatal(err) + } + // Extract to a GCS object as CSV. + bucketName := testutil.ProjID() + objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID) + uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName) + defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx) + gr := NewGCSReference(uri) + gr.DestinationFormat = CSV + e := table.ExtractorTo(gr) + job, err := e.Run(ctx) + if err != nil { + t.Fatal(err) + } + conf, err := job.Config() + if err != nil { + t.Fatal(err) + } + config, ok := conf.(*ExtractConfig) + if !ok { + t.Fatalf("got %T, want ExtractConfig", conf) + } + diff := testutil.Diff(config, &e.ExtractConfig, + cmp.AllowUnexported(Table{}), + cmpopts.IgnoreUnexported(Client{})) + if diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } + if err := wait(ctx, job); err != nil { + t.Fatal(err) + } + + edc := &ExternalDataConfig{ + SourceFormat: CSV, + SourceURIs: []string{uri}, + Schema: schema, + Options: &CSVOptions{SkipLeadingRows: 1}, + } + // Query that CSV file directly. + q := client.Query("SELECT * FROM csv") + q.TableDefinitions = map[string]ExternalData{"csv": edc} + wantRows := [][]Value{ + []Value{"a", int64(1)}, + []Value{"b", int64(2)}, + []Value{"c", int64(3)}, + } + iter, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "external query", iter, wantRows) + + // Make a table pointing to the file, and query it. + // BigQuery does not allow a Table.Read on an external table. + table = dataset.Table(tableIDs.New()) + err = table.Create(context.Background(), &TableMetadata{ + Schema: schema, + ExpirationTime: testTableExpiration, + ExternalDataConfig: edc, + }) + if err != nil { + t.Fatal(err) + } + q = client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID)) + iter, err = q.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "external table", iter, wantRows) + + // While we're here, check that the table metadata is correct. + md, err := table.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + // One difference: since BigQuery returns the schema as part of the ordinary + // table metadata, it does not populate ExternalDataConfig.Schema. + md.ExternalDataConfig.Schema = md.Schema + if diff := testutil.Diff(md.ExternalDataConfig, edc); diff != "" { + t.Errorf("got=-, want=+\n%s", diff) + } +} + +func TestIntegration_ReadNullIntoStruct(t *testing.T) { + // Reading a null into a struct field should return an error (not panic). + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + upl := table.Uploader() + row := &ValuesSaver{ + Schema: schema, + Row: []Value{nil, []Value{}, []Value{nil}}, + } + if err := upl.Put(ctx, []*ValuesSaver{row}); err != nil { + t.Fatal(putError(err)) + } + if err := waitForRow(ctx, table); err != nil { + t.Fatal(err) + } + + q := client.Query(fmt.Sprintf("select name from %s", table.TableID)) + q.DefaultProjectID = dataset.ProjectID + q.DefaultDatasetID = dataset.DatasetID + it, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + type S struct{ Name string } + var s S + if err := it.Next(&s); err == nil { + t.Fatal("got nil, want error") + } +} + +const ( + stdName = "`bigquery-public-data.samples.shakespeare`" + legacyName = "[bigquery-public-data:samples.shakespeare]" +) + +// These tests exploit the fact that the two SQL versions have different syntaxes for +// fully-qualified table names. +var useLegacySqlTests = []struct { + t string // name of table + std, legacy bool // use standard/legacy SQL + err bool // do we expect an error? +}{ + {t: legacyName, std: false, legacy: true, err: false}, + {t: legacyName, std: true, legacy: false, err: true}, + {t: legacyName, std: false, legacy: false, err: true}, // standard SQL is default + {t: legacyName, std: true, legacy: true, err: true}, + {t: stdName, std: false, legacy: true, err: true}, + {t: stdName, std: true, legacy: false, err: false}, + {t: stdName, std: false, legacy: false, err: false}, // standard SQL is default + {t: stdName, std: true, legacy: true, err: true}, +} + +func TestIntegration_QueryUseLegacySQL(t *testing.T) { + // Test the UseLegacySQL and UseStandardSQL options for queries. + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + for _, test := range useLegacySqlTests { + q := client.Query(fmt.Sprintf("select word from %s limit 1", test.t)) + q.UseStandardSQL = test.std + q.UseLegacySQL = test.legacy + _, err := q.Read(ctx) + gotErr := err != nil + if gotErr && !test.err { + t.Errorf("%+v:\nunexpected error: %v", test, err) + } else if !gotErr && test.err { + t.Errorf("%+v:\nsucceeded, but want error", test) + } + } +} + +func TestIntegration_TableUseLegacySQL(t *testing.T) { + // Test UseLegacySQL and UseStandardSQL for Table.Create. + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + for i, test := range useLegacySqlTests { + view := dataset.Table(fmt.Sprintf("t_view_%d", i)) + tm := &TableMetadata{ + ViewQuery: fmt.Sprintf("SELECT word from %s", test.t), + UseStandardSQL: test.std, + UseLegacySQL: test.legacy, + } + err := view.Create(ctx, tm) + gotErr := err != nil + if gotErr && !test.err { + t.Errorf("%+v:\nunexpected error: %v", test, err) + } else if !gotErr && test.err { + t.Errorf("%+v:\nsucceeded, but want error", test) + } + view.Delete(ctx) + } +} + +func TestIntegration_ListJobs(t *testing.T) { + // It's difficult to test the list of jobs, because we can't easily + // control what's in it. Also, there are many jobs in the test project, + // and it takes considerable time to list them all. + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + + // About all we can do is list a few jobs. + const max = 20 + var jobs []*Job + it := client.Jobs(ctx) + for { + job, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + jobs = append(jobs, job) + if len(jobs) >= max { + break + } + } + // We expect that there is at least one job in the last few months. + if len(jobs) == 0 { + t.Fatal("did not get any jobs") + } +} + +// Creates a new, temporary table with a unique name and the given schema. +func newTable(t *testing.T, s Schema) *Table { + table := dataset.Table(tableIDs.New()) + err := table.Create(context.Background(), &TableMetadata{ + Schema: s, + ExpirationTime: testTableExpiration, + }) + if err != nil { + t.Fatal(err) + } + return table +} + +func checkRead(t *testing.T, msg string, it *RowIterator, want [][]Value) { + if msg2, ok := compareRead(it, want); !ok { + t.Errorf("%s: %s", msg, msg2) + } +} + +func compareRead(it *RowIterator, want [][]Value) (msg string, ok bool) { + got, err := readAll(it) + if err != nil { + return err.Error(), false + } + if len(got) != len(want) { + return fmt.Sprintf("got %d rows, want %d", len(got), len(want)), false + } + sort.Sort(byCol0(got)) + for i, r := range got { + gotRow := []Value(r) + wantRow := want[i] + if !testutil.Equal(gotRow, wantRow) { + return fmt.Sprintf("#%d: got %#v, want %#v", i, gotRow, wantRow), false + } + } + return "", true +} + +func readAll(it *RowIterator) ([][]Value, error) { + var rows [][]Value + for { + var vals []Value + err := it.Next(&vals) + if err == iterator.Done { + return rows, nil + } + if err != nil { + return nil, err + } + rows = append(rows, vals) + } +} + +type byCol0 [][]Value + +func (b byCol0) Len() int { return len(b) } +func (b byCol0) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byCol0) Less(i, j int) bool { + switch a := b[i][0].(type) { + case string: + return a < b[j][0].(string) + case civil.Date: + return a.Before(b[j][0].(civil.Date)) + default: + panic("unknown type") + } +} + +func hasStatusCode(err error, code int) bool { + if e, ok := err.(*googleapi.Error); ok && e.Code == code { + return true + } + return false +} + +// wait polls the job until it is complete or an error is returned. +func wait(ctx context.Context, job *Job) error { + status, err := job.Wait(ctx) + if err != nil { + return err + } + if status.Err() != nil { + return fmt.Errorf("job status error: %#v", status.Err()) + } + if status.Statistics == nil { + return errors.New("nil Statistics") + } + if status.Statistics.EndTime.IsZero() { + return errors.New("EndTime is zero") + } + if status.Statistics.Details == nil { + return errors.New("nil Statistics.Details") + } + return nil +} + +// waitForRow polls the table until it contains a row. +// TODO(jba): use internal.Retry. +func waitForRow(ctx context.Context, table *Table) error { + for { + it := table.Read(ctx) + var v []Value + err := it.Next(&v) + if err == nil { + return nil + } + if err != iterator.Done { + return err + } + time.Sleep(1 * time.Second) + } +} + +func putError(err error) string { + pme, ok := err.(PutMultiError) + if !ok { + return err.Error() + } + var msgs []string + for _, err := range pme { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "\n") +} diff --git a/vendor/cloud.google.com/go/bigquery/iterator.go b/vendor/cloud.google.com/go/bigquery/iterator.go new file mode 100644 index 0000000..5a82edc --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/iterator.go @@ -0,0 +1,206 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "reflect" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" + "google.golang.org/api/iterator" +) + +func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator { + it := &RowIterator{ + ctx: ctx, + table: t, + pf: pf, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.rows) }, + func() interface{} { r := it.rows; it.rows = nil; return r }) + return it +} + +// A RowIterator provides access to the result of a BigQuery lookup. +type RowIterator struct { + ctx context.Context + table *Table + pf pageFetcher + pageInfo *iterator.PageInfo + nextFunc func() error + + // StartIndex can be set before the first call to Next. If PageInfo().Token + // is also set, StartIndex is ignored. + StartIndex uint64 + + rows [][]Value + + schema Schema // populated on first call to fetch + structLoader structLoader // used to populate a pointer to a struct +} + +// Next loads the next row into dst. Its return value is iterator.Done if there +// are no more results. Once Next returns iterator.Done, all subsequent calls +// will return iterator.Done. +// +// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer. +// +// If dst is a *[]Value, it will be set to to new []Value whose i'th element +// will be populated with the i'th column of the row. +// +// If dst is a *map[string]Value, a new map will be created if dst is nil. Then +// for each schema column name, the map key of that name will be set to the column's +// value. STRUCT types (RECORD types or nested schemas) become nested maps. +// +// If dst is pointer to a struct, each column in the schema will be matched +// with an exported field of the struct that has the same name, ignoring case. +// Unmatched schema columns and struct fields will be ignored. +// +// Each BigQuery column type corresponds to one or more Go types; a matching struct +// field must be of the correct type. The correspondences are: +// +// STRING string +// BOOL bool +// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32 +// FLOAT float32, float64 +// BYTES []byte +// TIMESTAMP time.Time +// DATE civil.Date +// TIME civil.Time +// DATETIME civil.DateTime +// +// A repeated field corresponds to a slice or array of the element type. A STRUCT +// type (RECORD or nested schema) corresponds to a nested struct or struct pointer. +// All calls to Next on the same iterator must use the same struct type. +// +// It is an error to attempt to read a BigQuery NULL value into a struct field. +// If your table contains NULLs, use a *[]Value or *map[string]Value. +func (it *RowIterator) Next(dst interface{}) error { + var vl ValueLoader + switch dst := dst.(type) { + case ValueLoader: + vl = dst + case *[]Value: + vl = (*valueList)(dst) + case *map[string]Value: + vl = (*valueMap)(dst) + default: + if !isStructPtr(dst) { + return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst) + } + } + if err := it.nextFunc(); err != nil { + return err + } + row := it.rows[0] + it.rows = it.rows[1:] + + if vl == nil { + // This can only happen if dst is a pointer to a struct. We couldn't + // set vl above because we need the schema. + if err := it.structLoader.set(dst, it.schema); err != nil { + return err + } + vl = &it.structLoader + } + return vl.Load(row, it.schema) +} + +func isStructPtr(x interface{}) bool { + t := reflect.TypeOf(x) + return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) { + res, err := it.pf(it.ctx, it.table, it.schema, it.StartIndex, int64(pageSize), pageToken) + if err != nil { + return "", err + } + it.rows = append(it.rows, res.rows...) + it.schema = res.schema + return res.pageToken, nil +} + +// A pageFetcher returns a page of rows from a destination table. +type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) + +type fetchPageResult struct { + pageToken string + rows [][]Value + totalRows uint64 + schema Schema +} + +// fetchPage gets a page of rows from t. +func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) { + // Fetch the table schema in the background, if necessary. + errc := make(chan error, 1) + if schema != nil { + errc <- nil + } else { + go func() { + var bqt *bq.Table + err := runWithRetry(ctx, func() (err error) { + bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID). + Fields("schema"). + Context(ctx). + Do() + return err + }) + if err == nil && bqt.Schema != nil { + schema = bqToSchema(bqt.Schema) + } + errc <- err + }() + } + call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID) + setClientHeader(call.Header()) + if pageToken != "" { + call.PageToken(pageToken) + } else { + call.StartIndex(startIndex) + } + if pageSize > 0 { + call.MaxResults(pageSize) + } + var res *bq.TableDataList + err := runWithRetry(ctx, func() (err error) { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + err = <-errc + if err != nil { + return nil, err + } + rows, err := convertRows(res.Rows, schema) + if err != nil { + return nil, err + } + return &fetchPageResult{ + pageToken: res.PageToken, + rows: rows, + totalRows: uint64(res.TotalRows), + schema: schema, + }, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/iterator_test.go b/vendor/cloud.google.com/go/bigquery/iterator_test.go new file mode 100644 index 0000000..1ecfb53 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/iterator_test.go @@ -0,0 +1,361 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "testing" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +type fetchResponse struct { + result *fetchPageResult // The result to return. + err error // The error to return. +} + +// pageFetcherStub services fetch requests by returning data from an in-memory list of values. +type pageFetcherStub struct { + fetchResponses map[string]fetchResponse + err error +} + +func (pf *pageFetcherStub) fetchPage(ctx context.Context, _ *Table, _ Schema, _ uint64, _ int64, pageToken string) (*fetchPageResult, error) { + call, ok := pf.fetchResponses[pageToken] + if !ok { + pf.err = fmt.Errorf("Unexpected page token: %q", pageToken) + } + return call.result, call.err +} + +func TestIterator(t *testing.T) { + var ( + iiSchema = Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + } + siSchema = Schema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + } + ) + fetchFailure := errors.New("fetch failure") + + testCases := []struct { + desc string + pageToken string + fetchResponses map[string]fetchResponse + want [][]Value + wantErr error + wantSchema Schema + }{ + { + desc: "Iteration over single empty page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{}, + schema: Schema{}, + }, + }, + }, + want: [][]Value{}, + wantSchema: Schema{}, + }, + { + desc: "Iteration over single page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}}, + wantSchema: iiSchema, + }, + { + desc: "Iteration over single page with different schema", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{{"1", 2}, {"11", 12}}, + schema: siSchema, + }, + }, + }, + want: [][]Value{{"1", 2}, {"11", 12}}, + wantSchema: siSchema, + }, + { + desc: "Iteration over two pages", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, + wantSchema: iiSchema, + }, + { + desc: "Server response includes empty page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + result: &fetchPageResult{ + pageToken: "b", + rows: [][]Value{}, + schema: iiSchema, + }, + }, + "b": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, + wantSchema: iiSchema, + }, + { + desc: "Fetch error", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + // We returns some data from this fetch, but also an error. + // So the end result should include only data from the previous fetch. + err: fetchFailure, + result: &fetchPageResult{ + pageToken: "b", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}}, + wantErr: fetchFailure, + wantSchema: iiSchema, + }, + + { + desc: "Skip over an entire page", + pageToken: "a", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{101, 102}, {111, 112}}, + wantSchema: iiSchema, + }, + + { + desc: "Skip beyond all data", + pageToken: "b", + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + result: &fetchPageResult{ + pageToken: "b", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + "b": { + result: &fetchPageResult{}, + }, + }, + // In this test case, Next will return false on its first call, + // so we won't even attempt to call Get. + want: [][]Value{}, + wantSchema: Schema{}, + }, + } + + for _, tc := range testCases { + pf := &pageFetcherStub{ + fetchResponses: tc.fetchResponses, + } + it := newRowIterator(context.Background(), nil, pf.fetchPage) + it.PageInfo().Token = tc.pageToken + values, schema, err := consumeRowIterator(it) + if err != tc.wantErr { + t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr) + } + if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) { + t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want) + } + if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) { + t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema) + } + } +} + +type valueListWithSchema struct { + vals valueList + schema Schema +} + +func (v *valueListWithSchema) Load(vs []Value, s Schema) error { + v.vals.Load(vs, s) + v.schema = s + return nil +} + +// consumeRowIterator reads the schema and all values from a RowIterator and returns them. +func consumeRowIterator(it *RowIterator) ([][]Value, Schema, error) { + var got [][]Value + var schema Schema + for { + var vls valueListWithSchema + err := it.Next(&vls) + if err == iterator.Done { + return got, schema, nil + } + if err != nil { + return got, schema, err + } + got = append(got, vls.vals) + schema = vls.schema + } +} + +func TestNextDuringErrorState(t *testing.T) { + pf := &pageFetcherStub{ + fetchResponses: map[string]fetchResponse{ + "": {err: errors.New("bang")}, + }, + } + it := newRowIterator(context.Background(), nil, pf.fetchPage) + var vals []Value + if err := it.Next(&vals); err == nil { + t.Errorf("Expected error after calling Next") + } + if err := it.Next(&vals); err == nil { + t.Errorf("Expected error calling Next again when iterator has a non-nil error.") + } +} + +func TestNextAfterFinished(t *testing.T) { + testCases := []struct { + fetchResponses map[string]fetchResponse + want [][]Value + }{ + { + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}}, + }, + { + fetchResponses: map[string]fetchResponse{ + "": { + result: &fetchPageResult{ + pageToken: "", + rows: [][]Value{}, + }, + }, + }, + want: [][]Value{}, + }, + } + + for _, tc := range testCases { + pf := &pageFetcherStub{ + fetchResponses: tc.fetchResponses, + } + it := newRowIterator(context.Background(), nil, pf.fetchPage) + + values, _, err := consumeRowIterator(it) + if err != nil { + t.Fatal(err) + } + if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) { + t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want) + } + // Try calling Get again. + var vals []Value + if err := it.Next(&vals); err != iterator.Done { + t.Errorf("Expected Done calling Next when there are no more values") + } + } +} + +func TestIteratorNextTypes(t *testing.T) { + it := newRowIterator(context.Background(), nil, nil) + for _, v := range []interface{}{3, "s", []int{}, &[]int{}, + map[string]Value{}, &map[string]interface{}{}, + struct{}{}, + } { + if err := it.Next(v); err == nil { + t.Errorf("%v: want error, got nil", v) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/job.go b/vendor/cloud.google.com/go/bigquery/job.go new file mode 100644 index 0000000..91040ed --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/job.go @@ -0,0 +1,669 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "math/rand" + "os" + "sync" + "time" + + "cloud.google.com/go/internal" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" +) + +// A Job represents an operation which has been submitted to BigQuery for processing. +type Job struct { + c *Client + projectID string + jobID string + + config *bq.JobConfiguration + lastStatus *JobStatus +} + +// JobFromID creates a Job which refers to an existing BigQuery job. The job +// need not have been created by this package. For example, the job may have +// been created in the BigQuery console. +func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) { + bqjob, err := c.getJobInternal(ctx, id, "configuration", "jobReference", "status", "statistics") + if err != nil { + return nil, err + } + return bqToJob(bqjob, c) +} + +// ID returns the job's ID. +func (j *Job) ID() string { + return j.jobID +} + +// State is one of a sequence of states that a Job progresses through as it is processed. +type State int + +const ( + StateUnspecified State = iota // used only as a default in JobIterator + Pending + Running + Done +) + +// JobStatus contains the current State of a job, and errors encountered while processing that job. +type JobStatus struct { + State State + + err error + + // All errors encountered during the running of the job. + // Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful. + Errors []*Error + + // Statistics about the job. + Statistics *JobStatistics +} + +// JobConfig contains configuration information for a job. It is implemented by +// *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig. +type JobConfig interface { + isJobConfig() +} + +func (*CopyConfig) isJobConfig() {} +func (*ExtractConfig) isJobConfig() {} +func (*LoadConfig) isJobConfig() {} +func (*QueryConfig) isJobConfig() {} + +// Config returns the configuration information for j. +func (j *Job) Config() (JobConfig, error) { + return bqToJobConfig(j.config, j.c) +} + +func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) { + switch { + case q == nil: + return nil, nil + case q.Copy != nil: + return bqToCopyConfig(q, c), nil + case q.Extract != nil: + return bqToExtractConfig(q, c), nil + case q.Load != nil: + return bqToLoadConfig(q, c), nil + case q.Query != nil: + return bqToQueryConfig(q, c) + default: + return nil, nil + } +} + +// JobIDConfig describes how to create an ID for a job. +type JobIDConfig struct { + // JobID is the ID to use for the job. If empty, a random job ID will be generated. + JobID string + + // If AddJobIDSuffix is true, then a random string will be appended to JobID. + AddJobIDSuffix bool +} + +// createJobRef creates a JobReference. +// projectID must be non-empty. +func (j *JobIDConfig) createJobRef(projectID string) *bq.JobReference { + // We don't check whether projectID is empty; the server will return an + // error when it encounters the resulting JobReference. + jr := &bq.JobReference{ProjectId: projectID} + if j.JobID == "" { + jr.JobId = randomIDFn() + } else if j.AddJobIDSuffix { + jr.JobId = j.JobID + "-" + randomIDFn() + } else { + jr.JobId = j.JobID + } + return jr +} + +const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var ( + rngMu sync.Mutex + rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid()))) +) + +// For testing. +var randomIDFn = randomID + +// As of August 2017, the BigQuery service uses 27 alphanumeric characters for +// suffixes. +const randomIDLen = 27 + +func randomID() string { + // This is used for both job IDs and insert IDs. + var b [randomIDLen]byte + rngMu.Lock() + for i := 0; i < len(b); i++ { + b[i] = alphanum[rng.Intn(len(alphanum))] + } + rngMu.Unlock() + return string(b[:]) +} + +// Done reports whether the job has completed. +// After Done returns true, the Err method will return an error if the job completed unsuccesfully. +func (s *JobStatus) Done() bool { + return s.State == Done +} + +// Err returns the error that caused the job to complete unsuccesfully (if any). +func (s *JobStatus) Err() error { + return s.err +} + +// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined. +func (j *Job) Status(ctx context.Context) (*JobStatus, error) { + bqjob, err := j.c.getJobInternal(ctx, j.jobID, "status", "statistics") + if err != nil { + return nil, err + } + if err := j.setStatus(bqjob.Status); err != nil { + return nil, err + } + j.setStatistics(bqjob.Statistics, j.c) + return j.lastStatus, nil +} + +// LastStatus returns the most recently retrieved status of the job. The status is +// retrieved when a new job is created, or when JobFromID or Job.Status is called. +// Call Job.Status to get the most up-to-date information about a job. +func (j *Job) LastStatus() *JobStatus { + return j.lastStatus +} + +// Cancel requests that a job be cancelled. This method returns without waiting for +// cancellation to take effect. To check whether the job has terminated, use Job.Status. +// Cancelled jobs may still incur costs. +func (j *Job) Cancel(ctx context.Context) error { + // Jobs.Cancel returns a job entity, but the only relevant piece of + // data it may contain (the status of the job) is unreliable. From the + // docs: "This call will return immediately, and the client will need + // to poll for the job status to see if the cancel completed + // successfully". So it would be misleading to return a status. + call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID). + Fields(). // We don't need any of the response data. + Context(ctx) + setClientHeader(call.Header()) + return runWithRetry(ctx, func() error { + _, err := call.Do() + return err + }) +} + +// Wait blocks until the job or the context is done. It returns the final status +// of the job. +// If an error occurs while retrieving the status, Wait returns that error. But +// Wait returns nil if the status was retrieved successfully, even if +// status.Err() != nil. So callers must check both errors. See the example. +func (j *Job) Wait(ctx context.Context) (*JobStatus, error) { + if j.isQuery() { + // We can avoid polling for query jobs. + if _, err := j.waitForQuery(ctx, j.projectID); err != nil { + return nil, err + } + // Note: extra RPC even if you just want to wait for the query to finish. + js, err := j.Status(ctx) + if err != nil { + return nil, err + } + return js, nil + } + // Non-query jobs must poll. + var js *JobStatus + err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + js, err = j.Status(ctx) + if err != nil { + return true, err + } + if js.Done() { + return true, nil + } + return false, nil + }) + if err != nil { + return nil, err + } + return js, nil +} + +// Read fetches the results of a query job. +// If j is not a query job, Read returns an error. +func (j *Job) Read(ctx context.Context) (*RowIterator, error) { + return j.read(ctx, j.waitForQuery, fetchPage) +} + +func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, error), pf pageFetcher) (*RowIterator, error) { + if !j.isQuery() { + return nil, errors.New("bigquery: cannot read from a non-query job") + } + destTable := j.config.Query.DestinationTable + // The destination table should only be nil if there was a query error. + if destTable == nil { + return nil, errors.New("bigquery: query job missing destination table") + } + projectID := destTable.ProjectId + schema, err := waitForQuery(ctx, projectID) + if err != nil { + return nil, err + } + dt := bqToTable(destTable, j.c) + it := newRowIterator(ctx, dt, pf) + it.schema = schema + return it, nil +} + +// waitForQuery waits for the query job to complete and returns its schema. +func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) { + // Use GetQueryResults only to wait for completion, not to read results. + call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Context(ctx).MaxResults(0) + setClientHeader(call.Header()) + backoff := gax.Backoff{ + Initial: 1 * time.Second, + Multiplier: 2, + Max: 60 * time.Second, + } + var res *bq.GetQueryResultsResponse + err := internal.Retry(ctx, backoff, func() (stop bool, err error) { + res, err = call.Do() + if err != nil { + return !retryableError(err), err + } + if !res.JobComplete { // GetQueryResults may return early without error; retry. + return false, nil + } + return true, nil + }) + if err != nil { + return nil, err + } + return bqToSchema(res.Schema), nil +} + +// JobStatistics contains statistics about a job. +type JobStatistics struct { + CreationTime time.Time + StartTime time.Time + EndTime time.Time + TotalBytesProcessed int64 + + Details Statistics +} + +// Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics. +type Statistics interface { + implementsStatistics() +} + +// ExtractStatistics contains statistics about an extract job. +type ExtractStatistics struct { + // The number of files per destination URI or URI pattern specified in the + // extract configuration. These values will be in the same order as the + // URIs specified in the 'destinationUris' field. + DestinationURIFileCounts []int64 +} + +// LoadStatistics contains statistics about a load job. +type LoadStatistics struct { + // The number of bytes of source data in a load job. + InputFileBytes int64 + + // The number of source files in a load job. + InputFiles int64 + + // Size of the loaded data in bytes. Note that while a load job is in the + // running state, this value may change. + OutputBytes int64 + + // The number of rows imported in a load job. Note that while an import job is + // in the running state, this value may change. + OutputRows int64 +} + +// QueryStatistics contains statistics about a query job. +type QueryStatistics struct { + // Billing tier for the job. + BillingTier int64 + + // Whether the query result was fetched from the query cache. + CacheHit bool + + // The type of query statement, if valid. + StatementType string + + // Total bytes billed for the job. + TotalBytesBilled int64 + + // Total bytes processed for the job. + TotalBytesProcessed int64 + + // Describes execution plan for the query. + QueryPlan []*ExplainQueryStage + + // The number of rows affected by a DML statement. Present only for DML + // statements INSERT, UPDATE or DELETE. + NumDMLAffectedRows int64 + + // ReferencedTables: [Output-only, Experimental] Referenced tables for + // the job. Queries that reference more than 50 tables will not have a + // complete list. + ReferencedTables []*Table + + // The schema of the results. Present only for successful dry run of + // non-legacy SQL queries. + Schema Schema + + // Standard SQL: list of undeclared query parameter names detected during a + // dry run validation. + UndeclaredQueryParameterNames []string +} + +// ExplainQueryStage describes one stage of a query. +type ExplainQueryStage struct { + // Relative amount of the total time the average shard spent on CPU-bound tasks. + ComputeRatioAvg float64 + + // Relative amount of the total time the slowest shard spent on CPU-bound tasks. + ComputeRatioMax float64 + + // Unique ID for stage within plan. + ID int64 + + // Human-readable name for stage. + Name string + + // Relative amount of the total time the average shard spent reading input. + ReadRatioAvg float64 + + // Relative amount of the total time the slowest shard spent reading input. + ReadRatioMax float64 + + // Number of records read into the stage. + RecordsRead int64 + + // Number of records written by the stage. + RecordsWritten int64 + + // Current status for the stage. + Status string + + // List of operations within the stage in dependency order (approximately + // chronological). + Steps []*ExplainQueryStep + + // Relative amount of the total time the average shard spent waiting to be scheduled. + WaitRatioAvg float64 + + // Relative amount of the total time the slowest shard spent waiting to be scheduled. + WaitRatioMax float64 + + // Relative amount of the total time the average shard spent on writing output. + WriteRatioAvg float64 + + // Relative amount of the total time the slowest shard spent on writing output. + WriteRatioMax float64 +} + +// ExplainQueryStep describes one step of a query stage. +type ExplainQueryStep struct { + // Machine-readable operation type. + Kind string + + // Human-readable stage descriptions. + Substeps []string +} + +func (*ExtractStatistics) implementsStatistics() {} +func (*LoadStatistics) implementsStatistics() {} +func (*QueryStatistics) implementsStatistics() {} + +// Jobs lists jobs within a project. +func (c *Client) Jobs(ctx context.Context) *JobIterator { + it := &JobIterator{ + ctx: ctx, + c: c, + ProjectID: c.projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// JobIterator iterates over jobs in a project. +type JobIterator struct { + ProjectID string // Project ID of the jobs to list. Default is the client's project. + AllUsers bool // Whether to list jobs owned by all users in the project, or just the current caller. + State State // List only jobs in the given state. Defaults to all states. + + ctx context.Context + c *Client + pageInfo *iterator.PageInfo + nextFunc func() error + items []*Job +} + +func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *JobIterator) Next() (*Job, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) { + var st string + switch it.State { + case StateUnspecified: + st = "" + case Pending: + st = "pending" + case Running: + st = "running" + case Done: + st = "done" + default: + return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State) + } + + req := it.c.bqs.Jobs.List(it.ProjectID). + Context(it.ctx). + PageToken(pageToken). + Projection("full"). + AllUsers(it.AllUsers) + if st != "" { + req.StateFilter(st) + } + setClientHeader(req.Header()) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + res, err := req.Do() + if err != nil { + return "", err + } + for _, j := range res.Jobs { + job, err := convertListedJob(j, it.c) + if err != nil { + return "", err + } + it.items = append(it.items, job) + } + return res.NextPageToken, nil +} + +func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) { + return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c) +} + +func (c *Client) getJobInternal(ctx context.Context, jobID string, fields ...googleapi.Field) (*bq.Job, error) { + var job *bq.Job + call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx) + if len(fields) > 0 { + call = call.Fields(fields...) + } + setClientHeader(call.Header()) + err := runWithRetry(ctx, func() (err error) { + job, err = call.Do() + return err + }) + if err != nil { + return nil, err + } + return job, nil +} + +func bqToJob(q *bq.Job, c *Client) (*Job, error) { + return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, c) +} + +func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, c *Client) (*Job, error) { + j := &Job{ + projectID: qr.ProjectId, + jobID: qr.JobId, + c: c, + } + j.setConfig(qc) + if err := j.setStatus(qs); err != nil { + return nil, err + } + j.setStatistics(qt, c) + return j, nil +} + +func (j *Job) setConfig(config *bq.JobConfiguration) { + if config == nil { + return + } + j.config = config +} + +func (j *Job) isQuery() bool { + return j.config != nil && j.config.Query != nil +} + +var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done} + +func (j *Job) setStatus(qs *bq.JobStatus) error { + if qs == nil { + return nil + } + state, ok := stateMap[qs.State] + if !ok { + return fmt.Errorf("unexpected job state: %v", qs.State) + } + j.lastStatus = &JobStatus{ + State: state, + err: nil, + } + if err := bqToError(qs.ErrorResult); state == Done && err != nil { + j.lastStatus.err = err + } + for _, ep := range qs.Errors { + j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep)) + } + return nil +} + +func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) { + if s == nil || j.lastStatus == nil { + return + } + js := &JobStatistics{ + CreationTime: unixMillisToTime(s.CreationTime), + StartTime: unixMillisToTime(s.StartTime), + EndTime: unixMillisToTime(s.EndTime), + TotalBytesProcessed: s.TotalBytesProcessed, + } + switch { + case s.Extract != nil: + js.Details = &ExtractStatistics{ + DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts), + } + case s.Load != nil: + js.Details = &LoadStatistics{ + InputFileBytes: s.Load.InputFileBytes, + InputFiles: s.Load.InputFiles, + OutputBytes: s.Load.OutputBytes, + OutputRows: s.Load.OutputRows, + } + case s.Query != nil: + var names []string + for _, qp := range s.Query.UndeclaredQueryParameters { + names = append(names, qp.Name) + } + var tables []*Table + for _, tr := range s.Query.ReferencedTables { + tables = append(tables, bqToTable(tr, c)) + } + js.Details = &QueryStatistics{ + BillingTier: s.Query.BillingTier, + CacheHit: s.Query.CacheHit, + StatementType: s.Query.StatementType, + TotalBytesBilled: s.Query.TotalBytesBilled, + TotalBytesProcessed: s.Query.TotalBytesProcessed, + NumDMLAffectedRows: s.Query.NumDmlAffectedRows, + QueryPlan: queryPlanFromProto(s.Query.QueryPlan), + Schema: bqToSchema(s.Query.Schema), + ReferencedTables: tables, + UndeclaredQueryParameterNames: names, + } + } + j.lastStatus.Statistics = js +} + +func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage { + var res []*ExplainQueryStage + for _, s := range stages { + var steps []*ExplainQueryStep + for _, p := range s.Steps { + steps = append(steps, &ExplainQueryStep{ + Kind: p.Kind, + Substeps: p.Substeps, + }) + } + res = append(res, &ExplainQueryStage{ + ComputeRatioAvg: s.ComputeRatioAvg, + ComputeRatioMax: s.ComputeRatioMax, + ID: s.Id, + Name: s.Name, + ReadRatioAvg: s.ReadRatioAvg, + ReadRatioMax: s.ReadRatioMax, + RecordsRead: s.RecordsRead, + RecordsWritten: s.RecordsWritten, + Status: s.Status, + Steps: steps, + WaitRatioAvg: s.WaitRatioAvg, + WaitRatioMax: s.WaitRatioMax, + WriteRatioAvg: s.WriteRatioAvg, + WriteRatioMax: s.WriteRatioMax, + }) + } + return res +} diff --git a/vendor/cloud.google.com/go/bigquery/job_test.go b/vendor/cloud.google.com/go/bigquery/job_test.go new file mode 100644 index 0000000..564c22a --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/job_test.go @@ -0,0 +1,80 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + bq "google.golang.org/api/bigquery/v2" +) + +func TestCreateJobRef(t *testing.T) { + defer fixRandomID("RANDOM")() + for _, test := range []struct { + jobID string + addJobIDSuffix bool + want string + }{ + { + jobID: "foo", + addJobIDSuffix: false, + want: "foo", + }, + { + jobID: "", + addJobIDSuffix: false, + want: "RANDOM", + }, + { + jobID: "", + addJobIDSuffix: true, // irrelevant + want: "RANDOM", + }, + { + jobID: "foo", + addJobIDSuffix: true, + want: "foo-RANDOM", + }, + } { + jc := JobIDConfig{JobID: test.jobID, AddJobIDSuffix: test.addJobIDSuffix} + jr := jc.createJobRef("projectID") + got := jr.JobId + if got != test.want { + t.Errorf("%q, %t: got %q, want %q", test.jobID, test.addJobIDSuffix, got, test.want) + } + } +} + +func fixRandomID(s string) func() { + prev := randomIDFn + randomIDFn = func() string { return s } + return func() { randomIDFn = prev } +} + +func checkJob(t *testing.T, i int, got, want *bq.Job) { + if got.JobReference == nil { + t.Errorf("#%d: empty job reference", i) + return + } + if got.JobReference.JobId == "" { + t.Errorf("#%d: empty job ID", i) + return + } + d := testutil.Diff(got, want) + if d != "" { + t.Errorf("#%d: (got=-, want=+) %s", i, d) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/load.go b/vendor/cloud.google.com/go/bigquery/load.go new file mode 100644 index 0000000..10493e1 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/load.go @@ -0,0 +1,126 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "io" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// LoadConfig holds the configuration for a load job. +type LoadConfig struct { + // Src is the source from which data will be loaded. + Src LoadSource + + // Dst is the table into which the data will be loaded. + Dst *Table + + // CreateDisposition specifies the circumstances under which the destination table will be created. + // The default is CreateIfNeeded. + CreateDisposition TableCreateDisposition + + // WriteDisposition specifies how existing data in the destination table is treated. + // The default is WriteAppend. + WriteDisposition TableWriteDisposition + + // The labels associated with this job. + Labels map[string]string + + // If non-nil, the destination table is partitioned by time. + TimePartitioning *TimePartitioning +} + +func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) { + config := &bq.JobConfiguration{ + Labels: l.Labels, + Load: &bq.JobConfigurationLoad{ + CreateDisposition: string(l.CreateDisposition), + WriteDisposition: string(l.WriteDisposition), + DestinationTable: l.Dst.toBQ(), + TimePartitioning: l.TimePartitioning.toBQ(), + }, + } + media := l.Src.populateLoadConfig(config.Load) + return config, media +} + +func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig { + lc := &LoadConfig{ + Labels: q.Labels, + CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition), + WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition), + Dst: bqToTable(q.Load.DestinationTable, c), + TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning), + } + var fc *FileConfig + if len(q.Load.SourceUris) == 0 { + s := NewReaderSource(nil) + fc = &s.FileConfig + lc.Src = s + } else { + s := NewGCSReference(q.Load.SourceUris...) + fc = &s.FileConfig + lc.Src = s + } + bqPopulateFileConfig(q.Load, fc) + return lc +} + +// A Loader loads data from Google Cloud Storage into a BigQuery table. +type Loader struct { + JobIDConfig + LoadConfig + c *Client +} + +// A LoadSource represents a source of data that can be loaded into +// a BigQuery table. +// +// This package defines two LoadSources: GCSReference, for Google Cloud Storage +// objects, and ReaderSource, for data read from an io.Reader. +type LoadSource interface { + // populates config, returns media + populateLoadConfig(*bq.JobConfigurationLoad) io.Reader +} + +// LoaderFrom returns a Loader which can be used to load data into a BigQuery table. +// The returned Loader may optionally be further configured before its Run method is called. +// See GCSReference and ReaderSource for additional configuration options that +// affect loading. +func (t *Table) LoaderFrom(src LoadSource) *Loader { + return &Loader{ + c: t.c, + LoadConfig: LoadConfig{ + Src: src, + Dst: t, + }, + } +} + +// Run initiates a load job. +func (l *Loader) Run(ctx context.Context) (*Job, error) { + job, media := l.newJob() + return l.c.insertJob(ctx, job, media) +} + +func (l *Loader) newJob() (*bq.Job, io.Reader) { + config, media := l.LoadConfig.toBQ() + return &bq.Job{ + JobReference: l.JobIDConfig.createJobRef(l.c.projectID), + Configuration: config, + }, media +} diff --git a/vendor/cloud.google.com/go/bigquery/load_test.go b/vendor/cloud.google.com/go/bigquery/load_test.go new file mode 100644 index 0000000..44c14f2 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/load_test.go @@ -0,0 +1,244 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "strings" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultLoadJob() *bq.Job { + return &bq.Job{ + JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, + Configuration: &bq.JobConfiguration{ + Load: &bq.JobConfigurationLoad{ + DestinationTable: &bq.TableReference{ + ProjectId: "client-project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + SourceUris: []string{"uri"}, + }, + }, + } +} + +func stringFieldSchema() *FieldSchema { + return &FieldSchema{Name: "fieldname", Type: StringFieldType} +} + +func nestedFieldSchema() *FieldSchema { + return &FieldSchema{ + Name: "nested", + Type: RecordFieldType, + Schema: Schema{stringFieldSchema()}, + } +} + +func bqStringFieldSchema() *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Name: "fieldname", + Type: "STRING", + } +} + +func bqNestedFieldSchema() *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Name: "nested", + Type: "RECORD", + Fields: []*bq.TableFieldSchema{bqStringFieldSchema()}, + } +} + +func TestLoad(t *testing.T) { + defer fixRandomID("RANDOM")() + c := &Client{projectID: "client-project-id"} + + testCases := []struct { + dst *Table + src LoadSource + jobID string + config LoadConfig + want *bq.Job + }{ + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: NewGCSReference("uri"), + want: defaultLoadJob(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + jobID: "ajob", + config: LoadConfig{ + CreateDisposition: CreateNever, + WriteDisposition: WriteTruncate, + Labels: map[string]string{"a": "b"}, + TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond}, + }, + src: NewGCSReference("uri"), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Labels = map[string]string{"a": "b"} + j.Configuration.Load.CreateDisposition = "CREATE_NEVER" + j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE" + j.Configuration.Load.TimePartitioning = &bq.TimePartitioning{ + Type: "DAY", + ExpirationMs: 1234, + } + j.JobReference = &bq.JobReference{ + JobId: "ajob", + ProjectId: "client-project-id", + } + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *GCSReference { + g := NewGCSReference("uri") + g.MaxBadRecords = 1 + g.AllowJaggedRows = true + g.AllowQuotedNewlines = true + g.IgnoreUnknownValues = true + return g + }(), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.MaxBadRecords = 1 + j.Configuration.Load.AllowJaggedRows = true + j.Configuration.Load.AllowQuotedNewlines = true + j.Configuration.Load.IgnoreUnknownValues = true + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *GCSReference { + g := NewGCSReference("uri") + g.Schema = Schema{ + stringFieldSchema(), + nestedFieldSchema(), + } + return g + }(), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.Schema = &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqStringFieldSchema(), + bqNestedFieldSchema(), + }} + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *GCSReference { + g := NewGCSReference("uri") + g.SkipLeadingRows = 1 + g.SourceFormat = JSON + g.Encoding = UTF_8 + g.FieldDelimiter = "\t" + g.Quote = "-" + return g + }(), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.SkipLeadingRows = 1 + j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Load.Encoding = "UTF-8" + j.Configuration.Load.FieldDelimiter = "\t" + hyphen := "-" + j.Configuration.Load.Quote = &hyphen + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: NewGCSReference("uri"), + want: func() *bq.Job { + j := defaultLoadJob() + // Quote is left unset in GCSReference, so should be nil here. + j.Configuration.Load.Quote = nil + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *GCSReference { + g := NewGCSReference("uri") + g.ForceZeroQuote = true + return g + }(), + want: func() *bq.Job { + j := defaultLoadJob() + empty := "" + j.Configuration.Load.Quote = &empty + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *ReaderSource { + r := NewReaderSource(strings.NewReader("foo")) + r.SkipLeadingRows = 1 + r.SourceFormat = JSON + r.Encoding = UTF_8 + r.FieldDelimiter = "\t" + r.Quote = "-" + return r + }(), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.SourceUris = nil + j.Configuration.Load.SkipLeadingRows = 1 + j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Load.Encoding = "UTF-8" + j.Configuration.Load.FieldDelimiter = "\t" + hyphen := "-" + j.Configuration.Load.Quote = &hyphen + return j + }(), + }, + } + + for i, tc := range testCases { + loader := tc.dst.LoaderFrom(tc.src) + loader.JobID = tc.jobID + tc.config.Src = tc.src + tc.config.Dst = tc.dst + loader.LoadConfig = tc.config + got, _ := loader.newJob() + checkJob(t, i, got, tc.want) + + jc, err := bqToJobConfig(got.Configuration, c) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + diff := testutil.Diff(jc.(*LoadConfig), &loader.LoadConfig, + cmp.AllowUnexported(Table{}, Client{}), + cmpopts.IgnoreUnexported(ReaderSource{})) + if diff != "" { + t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/params.go b/vendor/cloud.google.com/go/bigquery/params.go new file mode 100644 index 0000000..bcc859e --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/params.go @@ -0,0 +1,349 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "encoding/base64" + "errors" + "fmt" + "reflect" + "regexp" + "strings" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/fields" + + bq "google.golang.org/api/bigquery/v2" +) + +var ( + // See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type. + timestampFormat = "2006-01-02 15:04:05.999999-07:00" + + // See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name + validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$") +) + +const nullableTagOption = "nullable" + +func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + name, keep, opts, err := fields.ParseStandardTag("bigquery", t) + if err != nil { + return "", false, nil, err + } + if name != "" && !validFieldName.MatchString(name) { + return "", false, nil, errInvalidFieldName + } + for _, opt := range opts { + if opt != nullableTagOption { + return "", false, nil, fmt.Errorf( + "bigquery: invalid tag option %q. The only valid option is %q", + opt, nullableTagOption) + } + } + return name, keep, opts, nil +} + +var fieldCache = fields.NewCache(bqTagParser, nil, nil) + +var ( + int64ParamType = &bq.QueryParameterType{Type: "INT64"} + float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"} + boolParamType = &bq.QueryParameterType{Type: "BOOL"} + stringParamType = &bq.QueryParameterType{Type: "STRING"} + bytesParamType = &bq.QueryParameterType{Type: "BYTES"} + dateParamType = &bq.QueryParameterType{Type: "DATE"} + timeParamType = &bq.QueryParameterType{Type: "TIME"} + dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"} + timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"} +) + +var ( + typeOfDate = reflect.TypeOf(civil.Date{}) + typeOfTime = reflect.TypeOf(civil.Time{}) + typeOfDateTime = reflect.TypeOf(civil.DateTime{}) + typeOfGoTime = reflect.TypeOf(time.Time{}) +) + +// A QueryParameter is a parameter to a query. +type QueryParameter struct { + // Name is used for named parameter mode. + // It must match the name in the query case-insensitively. + Name string + + // Value is the value of the parameter. + // + // When you create a QueryParameter to send to BigQuery, the following Go types + // are supported, with their corresponding Bigquery types: + // int, int8, int16, int32, int64, uint8, uint16, uint32: INT64 + // Note that uint, uint64 and uintptr are not supported, because + // they may contain values that cannot fit into a 64-bit signed integer. + // float32, float64: FLOAT64 + // bool: BOOL + // string: STRING + // []byte: BYTES + // time.Time: TIMESTAMP + // Arrays and slices of the above. + // Structs of the above. Only the exported fields are used. + // + // When a QueryParameter is returned inside a QueryConfig from a call to + // Job.Config: + // Integers are of type int64. + // Floating-point values are of type float64. + // Arrays are of type []interface{}, regardless of the array element type. + // Structs are of type map[string]interface{}. + Value interface{} +} + +func (p QueryParameter) toBQ() (*bq.QueryParameter, error) { + pv, err := paramValue(reflect.ValueOf(p.Value)) + if err != nil { + return nil, err + } + pt, err := paramType(reflect.TypeOf(p.Value)) + if err != nil { + return nil, err + } + return &bq.QueryParameter{ + Name: p.Name, + ParameterValue: &pv, + ParameterType: pt, + }, nil +} + +func paramType(t reflect.Type) (*bq.QueryParameterType, error) { + if t == nil { + return nil, errors.New("bigquery: nil parameter") + } + switch t { + case typeOfDate: + return dateParamType, nil + case typeOfTime: + return timeParamType, nil + case typeOfDateTime: + return dateTimeParamType, nil + case typeOfGoTime: + return timestampParamType, nil + } + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64ParamType, nil + + case reflect.Float32, reflect.Float64: + return float64ParamType, nil + + case reflect.Bool: + return boolParamType, nil + + case reflect.String: + return stringParamType, nil + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return bytesParamType, nil + } + fallthrough + + case reflect.Array: + et, err := paramType(t.Elem()) + if err != nil { + return nil, err + } + return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil + + case reflect.Ptr: + if t.Elem().Kind() != reflect.Struct { + break + } + t = t.Elem() + fallthrough + + case reflect.Struct: + var fts []*bq.QueryParameterTypeStructTypes + fields, err := fieldCache.Fields(t) + if err != nil { + return nil, err + } + for _, f := range fields { + pt, err := paramType(f.Type) + if err != nil { + return nil, err + } + fts = append(fts, &bq.QueryParameterTypeStructTypes{ + Name: f.Name, + Type: pt, + }) + } + return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil + } + return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t) +} + +func paramValue(v reflect.Value) (bq.QueryParameterValue, error) { + var res bq.QueryParameterValue + if !v.IsValid() { + return res, errors.New("bigquery: nil parameter") + } + t := v.Type() + switch t { + case typeOfDate: + res.Value = v.Interface().(civil.Date).String() + return res, nil + + case typeOfTime: + // civil.Time has nanosecond resolution, but BigQuery TIME only microsecond. + res.Value = CivilTimeString(v.Interface().(civil.Time)) + return res, nil + + case typeOfDateTime: + res.Value = CivilDateTimeString(v.Interface().(civil.DateTime)) + return res, nil + + case typeOfGoTime: + res.Value = v.Interface().(time.Time).Format(timestampFormat) + return res, nil + } + switch t.Kind() { + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte)) + return res, nil + } + fallthrough + + case reflect.Array: + var vals []*bq.QueryParameterValue + for i := 0; i < v.Len(); i++ { + val, err := paramValue(v.Index(i)) + if err != nil { + return bq.QueryParameterValue{}, err + } + vals = append(vals, &val) + } + return bq.QueryParameterValue{ArrayValues: vals}, nil + + case reflect.Ptr: + if t.Elem().Kind() != reflect.Struct { + return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t) + } + t = t.Elem() + v = v.Elem() + if !v.IsValid() { + // nil pointer becomes empty value + return res, nil + } + fallthrough + + case reflect.Struct: + fields, err := fieldCache.Fields(t) + if err != nil { + return bq.QueryParameterValue{}, err + } + res.StructValues = map[string]bq.QueryParameterValue{} + for _, f := range fields { + fv := v.FieldByIndex(f.Index) + fp, err := paramValue(fv) + if err != nil { + return bq.QueryParameterValue{}, err + } + res.StructValues[f.Name] = fp + } + return res, nil + } + // None of the above: assume a scalar type. (If it's not a valid type, + // paramType will catch the error.) + res.Value = fmt.Sprint(v.Interface()) + return res, nil +} + +func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) { + p := QueryParameter{Name: q.Name} + val, err := convertParamValue(q.ParameterValue, q.ParameterType) + if err != nil { + return QueryParameter{}, err + } + p.Value = val + return p, nil +} + +var paramTypeToFieldType = map[string]FieldType{ + int64ParamType.Type: IntegerFieldType, + float64ParamType.Type: FloatFieldType, + boolParamType.Type: BooleanFieldType, + stringParamType.Type: StringFieldType, + bytesParamType.Type: BytesFieldType, + dateParamType.Type: DateFieldType, + timeParamType.Type: TimeFieldType, +} + +// Convert a parameter value from the service to a Go value. This is similar to, but +// not quite the same as, converting data values. +func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) { + switch qtype.Type { + case "ARRAY": + if qval == nil { + return []interface{}(nil), nil + } + return convertParamArray(qval.ArrayValues, qtype.ArrayType) + case "STRUCT": + if qval == nil { + return map[string]interface{}(nil), nil + } + return convertParamStruct(qval.StructValues, qtype.StructTypes) + case "TIMESTAMP": + return time.Parse(timestampFormat, qval.Value) + case "DATETIME": + parts := strings.Fields(qval.Value) + if len(parts) != 2 { + return nil, fmt.Errorf("bigquery: bad DATETIME value %q", qval.Value) + } + return civil.ParseDateTime(parts[0] + "T" + parts[1]) + default: + return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type]) + } +} + +// convertParamArray converts a query parameter array value to a Go value. It +// always returns a []interface{}. +func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) { + var vals []interface{} + for _, el := range elVals { + val, err := convertParamValue(el, elType) + if err != nil { + return nil, err + } + vals = append(vals, val) + } + return vals, nil +} + +// convertParamValue converts a query parameter struct value into a Go value. It +// always returns a map[string]interface{}. +func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) { + vals := map[string]interface{}{} + for _, st := range sTypes { + if sv, ok := sVals[st.Name]; ok { + val, err := convertParamValue(&sv, st.Type) + if err != nil { + return nil, err + } + vals[st.Name] = val + } else { + vals[st.Name] = nil + } + } + return vals, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/params_test.go b/vendor/cloud.google.com/go/bigquery/params_test.go new file mode 100644 index 0000000..95fb9fd --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/params_test.go @@ -0,0 +1,361 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "math" + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +var scalarTests = []struct { + val interface{} // The Go value + wantVal string // paramValue's desired output + wantType *bq.QueryParameterType // paramType's desired output +}{ + {int64(0), "0", int64ParamType}, + {3.14, "3.14", float64ParamType}, + {3.14159e-87, "3.14159e-87", float64ParamType}, + {true, "true", boolParamType}, + {"string", "string", stringParamType}, + {"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n", stringParamType}, + {math.NaN(), "NaN", float64ParamType}, + {[]byte("foo"), "Zm9v", bytesParamType}, // base64 encoding of "foo" + {time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)), + "2016-03-20 04:22:09.000005-01:02", + timestampParamType}, + {civil.Date{2016, 3, 20}, "2016-03-20", dateParamType}, + {civil.Time{4, 5, 6, 789000000}, "04:05:06.789000", timeParamType}, + {civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}}, + "2016-03-20 04:05:06.789000", + dateTimeParamType}, +} + +type ( + S1 struct { + A int + B *S2 + C bool + } + S2 struct { + D string + e int + } +) + +var ( + s1 = S1{ + A: 1, + B: &S2{D: "s"}, + C: true, + } + + s1ParamType = &bq.QueryParameterType{ + Type: "STRUCT", + StructTypes: []*bq.QueryParameterTypeStructTypes{ + {Name: "A", Type: int64ParamType}, + {Name: "B", Type: &bq.QueryParameterType{ + Type: "STRUCT", + StructTypes: []*bq.QueryParameterTypeStructTypes{ + {Name: "D", Type: stringParamType}, + }, + }}, + {Name: "C", Type: boolParamType}, + }, + } + + s1ParamValue = bq.QueryParameterValue{ + StructValues: map[string]bq.QueryParameterValue{ + "A": sval("1"), + "B": bq.QueryParameterValue{ + StructValues: map[string]bq.QueryParameterValue{ + "D": sval("s"), + }, + }, + "C": sval("true"), + }, + } + + s1ParamReturnValue = map[string]interface{}{ + "A": int64(1), + "B": map[string]interface{}{"D": "s"}, + "C": true, + } +) + +func sval(s string) bq.QueryParameterValue { + return bq.QueryParameterValue{Value: s} +} + +func TestParamValueScalar(t *testing.T) { + for _, test := range scalarTests { + got, err := paramValue(reflect.ValueOf(test.val)) + if err != nil { + t.Errorf("%v: got %v, want nil", test.val, err) + continue + } + want := sval(test.wantVal) + if !testutil.Equal(got, want) { + t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want) + } + } +} + +func TestParamValueArray(t *testing.T) { + qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{ + {Value: "1"}, + {Value: "2"}, + }, + } + for _, test := range []struct { + val interface{} + want bq.QueryParameterValue + }{ + {[]int(nil), bq.QueryParameterValue{}}, + {[]int{}, bq.QueryParameterValue{}}, + {[]int{1, 2}, qpv}, + {[2]int{1, 2}, qpv}, + } { + got, err := paramValue(reflect.ValueOf(test.val)) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, test.want) { + t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want) + } + } +} + +func TestParamValueStruct(t *testing.T) { + got, err := paramValue(reflect.ValueOf(s1)) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, s1ParamValue) { + t.Errorf("got %+v\nwant %+v", got, s1ParamValue) + } +} + +func TestParamValueErrors(t *testing.T) { + // paramValue lets a few invalid types through, but paramType catches them. + // Since we never call one without the other that's fine. + for _, val := range []interface{}{nil, new([]int)} { + _, err := paramValue(reflect.ValueOf(val)) + if err == nil { + t.Errorf("%v (%T): got nil, want error", val, val) + } + } +} + +func TestParamType(t *testing.T) { + for _, test := range scalarTests { + got, err := paramType(reflect.TypeOf(test.val)) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, test.wantType) { + t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.wantType) + } + } + for _, test := range []struct { + val interface{} + want *bq.QueryParameterType + }{ + {uint32(32767), int64ParamType}, + {[]byte("foo"), bytesParamType}, + {[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}}, + {[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}}, + {S1{}, s1ParamType}, + } { + got, err := paramType(reflect.TypeOf(test.val)) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, test.want) { + t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want) + } + } +} + +func TestParamTypeErrors(t *testing.T) { + for _, val := range []interface{}{ + nil, uint(0), new([]int), make(chan int), + } { + _, err := paramType(reflect.TypeOf(val)) + if err == nil { + t.Errorf("%v (%T): got nil, want error", val, val) + } + } +} + +func TestConvertParamValue(t *testing.T) { + // Scalars. + for _, test := range scalarTests { + pval, err := paramValue(reflect.ValueOf(test.val)) + if err != nil { + t.Fatal(err) + } + ptype, err := paramType(reflect.TypeOf(test.val)) + if err != nil { + t.Fatal(err) + } + got, err := convertParamValue(&pval, ptype) + if err != nil { + t.Fatalf("convertParamValue(%+v, %+v): %v", pval, ptype, err) + } + if !testutil.Equal(got, test.val) { + t.Errorf("%#v: got %#v", test.val, got) + } + } + // Arrays. + for _, test := range []struct { + pval *bq.QueryParameterValue + want []interface{} + }{ + { + &bq.QueryParameterValue{}, + nil, + }, + { + &bq.QueryParameterValue{ + ArrayValues: []*bq.QueryParameterValue{{Value: "1"}, {Value: "2"}}, + }, + []interface{}{int64(1), int64(2)}, + }, + } { + ptype := &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType} + got, err := convertParamValue(test.pval, ptype) + if err != nil { + t.Fatalf("%+v: %v", test.pval, err) + } + if !testutil.Equal(got, test.want) { + t.Errorf("%+v: got %+v, want %+v", test.pval, got, test.want) + } + } + // Structs. + got, err := convertParamValue(&s1ParamValue, s1ParamType) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, s1ParamReturnValue) { + t.Errorf("got %+v, want %+v", got, s1ParamReturnValue) + } +} + +func TestIntegration_ScalarParam(t *testing.T) { + roundToMicros := cmp.Transformer("RoundToMicros", + func(t time.Time) time.Time { return t.Round(time.Microsecond) }) + c := getClient(t) + for _, test := range scalarTests { + gotData, gotParam, err := paramRoundTrip(c, test.val) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(gotData, test.val, roundToMicros) { + t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotData, gotData, test.val, test.val) + } + if !testutil.Equal(gotParam, test.val, roundToMicros) { + t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotParam, gotParam, test.val, test.val) + } + } +} + +func TestIntegration_OtherParam(t *testing.T) { + c := getClient(t) + for _, test := range []struct { + val interface{} + wantData interface{} + wantParam interface{} + }{ + {[]int(nil), []Value(nil), []interface{}(nil)}, + {[]int{}, []Value(nil), []interface{}(nil)}, + { + []int{1, 2}, + []Value{int64(1), int64(2)}, + []interface{}{int64(1), int64(2)}, + }, + { + [3]int{1, 2, 3}, + []Value{int64(1), int64(2), int64(3)}, + []interface{}{int64(1), int64(2), int64(3)}, + }, + { + S1{}, + []Value{int64(0), nil, false}, + map[string]interface{}{ + "A": int64(0), + "B": nil, + "C": false, + }, + }, + { + s1, + []Value{int64(1), []Value{"s"}, true}, + s1ParamReturnValue, + }, + } { + gotData, gotParam, err := paramRoundTrip(c, test.val) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(gotData, test.wantData) { + t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)", + test.val, gotData, gotData, test.wantData, test.wantData) + } + if !testutil.Equal(gotParam, test.wantParam) { + t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)", + test.val, gotParam, gotParam, test.wantParam, test.wantParam) + } + } +} + +// paramRoundTrip passes x as a query parameter to BigQuery. It returns +// the resulting data value from running the query and the parameter value from +// the returned job configuration. +func paramRoundTrip(c *Client, x interface{}) (data Value, param interface{}, err error) { + ctx := context.Background() + q := c.Query("select ?") + q.Parameters = []QueryParameter{{Value: x}} + job, err := q.Run(ctx) + if err != nil { + return nil, nil, err + } + it, err := job.Read(ctx) + if err != nil { + return nil, nil, err + } + var val []Value + err = it.Next(&val) + if err != nil { + return nil, nil, err + } + if len(val) != 1 { + return nil, nil, errors.New("wrong number of values") + } + conf, err := job.Config() + if err != nil { + return nil, nil, err + } + return val[0], conf.(*QueryConfig).Parameters[0].Value, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/query.go b/vendor/cloud.google.com/go/bigquery/query.go new file mode 100644 index 0000000..26e59ce --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/query.go @@ -0,0 +1,284 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// QueryConfig holds the configuration for a query job. +type QueryConfig struct { + // Dst is the table into which the results of the query will be written. + // If this field is nil, a temporary table will be created. + Dst *Table + + // The query to execute. See https://cloud.google.com/bigquery/query-reference for details. + Q string + + // DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query. + // If DefaultProjectID is set, DefaultDatasetID must also be set. + DefaultProjectID string + DefaultDatasetID string + + // TableDefinitions describes data sources outside of BigQuery. + // The map keys may be used as table names in the query string. + // + // When a QueryConfig is returned from Job.Config, the map values + // are always of type *ExternalDataConfig. + TableDefinitions map[string]ExternalData + + // CreateDisposition specifies the circumstances under which the destination table will be created. + // The default is CreateIfNeeded. + CreateDisposition TableCreateDisposition + + // WriteDisposition specifies how existing data in the destination table is treated. + // The default is WriteEmpty. + WriteDisposition TableWriteDisposition + + // DisableQueryCache prevents results being fetched from the query cache. + // If this field is false, results are fetched from the cache if they are available. + // The query cache is a best-effort cache that is flushed whenever tables in the query are modified. + // Cached results are only available when TableID is unspecified in the query's destination Table. + // For more information, see https://cloud.google.com/bigquery/querying-data#querycaching + DisableQueryCache bool + + // DisableFlattenedResults prevents results being flattened. + // If this field is false, results from nested and repeated fields are flattened. + // DisableFlattenedResults implies AllowLargeResults + // For more information, see https://cloud.google.com/bigquery/docs/data#nested + DisableFlattenedResults bool + + // AllowLargeResults allows the query to produce arbitrarily large result tables. + // The destination must be a table. + // When using this option, queries will take longer to execute, even if the result set is small. + // For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults + AllowLargeResults bool + + // Priority specifies the priority with which to schedule the query. + // The default priority is InteractivePriority. + // For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries + Priority QueryPriority + + // MaxBillingTier sets the maximum billing tier for a Query. + // Queries that have resource usage beyond this tier will fail (without + // incurring a charge). If this field is zero, the project default will be used. + MaxBillingTier int + + // MaxBytesBilled limits the number of bytes billed for + // this job. Queries that would exceed this limit will fail (without incurring + // a charge). + // If this field is less than 1, the project default will be + // used. + MaxBytesBilled int64 + + // UseStandardSQL causes the query to use standard SQL. The default. + // Deprecated: use UseLegacySQL. + UseStandardSQL bool + + // UseLegacySQL causes the query to use legacy SQL. + UseLegacySQL bool + + // Parameters is a list of query parameters. The presence of parameters + // implies the use of standard SQL. + // If the query uses positional syntax ("?"), then no parameter may have a name. + // If the query uses named syntax ("@p"), then all parameters must have names. + // It is illegal to mix positional and named syntax. + Parameters []QueryParameter + + // The labels associated with this job. + Labels map[string]string + + // If true, don't actually run this job. A valid query will return a mostly + // empty response with some processing statistics, while an invalid query will + // return the same error it would if it wasn't a dry run. + // + // Query.Read will fail with dry-run queries. Call Query.Run instead, and then + // call LastStatus on the returned job to get statistics. Calling Status on a + // dry-run job will fail. + DryRun bool +} + +func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) { + qconf := &bq.JobConfigurationQuery{ + Query: qc.Q, + CreateDisposition: string(qc.CreateDisposition), + WriteDisposition: string(qc.WriteDisposition), + AllowLargeResults: qc.AllowLargeResults, + Priority: string(qc.Priority), + MaximumBytesBilled: qc.MaxBytesBilled, + } + if len(qc.TableDefinitions) > 0 { + qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration) + } + for name, data := range qc.TableDefinitions { + qconf.TableDefinitions[name] = data.toBQ() + } + if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" { + qconf.DefaultDataset = &bq.DatasetReference{ + DatasetId: qc.DefaultDatasetID, + ProjectId: qc.DefaultProjectID, + } + } + if tier := int64(qc.MaxBillingTier); tier > 0 { + qconf.MaximumBillingTier = &tier + } + f := false + if qc.DisableQueryCache { + qconf.UseQueryCache = &f + } + if qc.DisableFlattenedResults { + qconf.FlattenResults = &f + // DisableFlattenResults implies AllowLargeResults. + qconf.AllowLargeResults = true + } + if qc.UseStandardSQL && qc.UseLegacySQL { + return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL") + } + if len(qc.Parameters) > 0 && qc.UseLegacySQL { + return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL") + } + if qc.UseLegacySQL { + qconf.UseLegacySql = true + } else { + qconf.UseLegacySql = false + qconf.ForceSendFields = append(qconf.ForceSendFields, "UseLegacySql") + } + if qc.Dst != nil && !qc.Dst.implicitTable() { + qconf.DestinationTable = qc.Dst.toBQ() + } + for _, p := range qc.Parameters { + qp, err := p.toBQ() + if err != nil { + return nil, err + } + qconf.QueryParameters = append(qconf.QueryParameters, qp) + } + return &bq.JobConfiguration{ + Labels: qc.Labels, + DryRun: qc.DryRun, + Query: qconf, + }, nil +} + +func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) { + qq := q.Query + qc := &QueryConfig{ + Labels: q.Labels, + DryRun: q.DryRun, + Q: qq.Query, + CreateDisposition: TableCreateDisposition(qq.CreateDisposition), + WriteDisposition: TableWriteDisposition(qq.WriteDisposition), + AllowLargeResults: qq.AllowLargeResults, + Priority: QueryPriority(qq.Priority), + MaxBytesBilled: qq.MaximumBytesBilled, + UseLegacySQL: qq.UseLegacySql, + UseStandardSQL: !qq.UseLegacySql, + } + if len(qq.TableDefinitions) > 0 { + qc.TableDefinitions = make(map[string]ExternalData) + } + for name, qedc := range qq.TableDefinitions { + edc, err := bqToExternalDataConfig(&qedc) + if err != nil { + return nil, err + } + qc.TableDefinitions[name] = edc + } + if qq.DefaultDataset != nil { + qc.DefaultProjectID = qq.DefaultDataset.ProjectId + qc.DefaultDatasetID = qq.DefaultDataset.DatasetId + } + if qq.MaximumBillingTier != nil { + qc.MaxBillingTier = int(*qq.MaximumBillingTier) + } + if qq.UseQueryCache != nil && !*qq.UseQueryCache { + qc.DisableQueryCache = true + } + if qq.FlattenResults != nil && !*qq.FlattenResults { + qc.DisableFlattenedResults = true + } + if qq.DestinationTable != nil { + qc.Dst = bqToTable(qq.DestinationTable, c) + } + for _, qp := range qq.QueryParameters { + p, err := bqToQueryParameter(qp) + if err != nil { + return nil, err + } + qc.Parameters = append(qc.Parameters, p) + } + return qc, nil +} + +// QueryPriority specifies a priority with which a query is to be executed. +type QueryPriority string + +const ( + BatchPriority QueryPriority = "BATCH" + InteractivePriority QueryPriority = "INTERACTIVE" +) + +// A Query queries data from a BigQuery table. Use Client.Query to create a Query. +type Query struct { + JobIDConfig + QueryConfig + client *Client +} + +// Query creates a query with string q. +// The returned Query may optionally be further configured before its Run method is called. +func (c *Client) Query(q string) *Query { + return &Query{ + client: c, + QueryConfig: QueryConfig{Q: q}, + } +} + +// Run initiates a query job. +func (q *Query) Run(ctx context.Context) (*Job, error) { + job, err := q.newJob() + if err != nil { + return nil, err + } + j, err := q.client.insertJob(ctx, job, nil) + if err != nil { + return nil, err + } + return j, nil +} + +func (q *Query) newJob() (*bq.Job, error) { + config, err := q.QueryConfig.toBQ() + if err != nil { + return nil, err + } + return &bq.Job{ + JobReference: q.JobIDConfig.createJobRef(q.client.projectID), + Configuration: config, + }, nil +} + +// Read submits a query for execution and returns the results via a RowIterator. +// It is a shorthand for Query.Run followed by Job.Read. +func (q *Query) Read(ctx context.Context) (*RowIterator, error) { + job, err := q.Run(ctx) + if err != nil { + return nil, err + } + return job.Read(ctx) +} diff --git a/vendor/cloud.google.com/go/bigquery/query_test.go b/vendor/cloud.google.com/go/bigquery/query_test.go new file mode 100644 index 0000000..3a5eaa1 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/query_test.go @@ -0,0 +1,399 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/testutil" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultQueryJob() *bq.Job { + return &bq.Job{ + JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, + Configuration: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{ + DestinationTable: &bq.TableReference{ + ProjectId: "client-project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + Query: "query string", + DefaultDataset: &bq.DatasetReference{ + ProjectId: "def-project-id", + DatasetId: "def-dataset-id", + }, + UseLegacySql: false, + ForceSendFields: []string{"UseLegacySql"}, + }, + }, + } +} + +var defaultQuery = &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", +} + +func TestQuery(t *testing.T) { + defer fixRandomID("RANDOM")() + c := &Client{ + projectID: "client-project-id", + } + testCases := []struct { + dst *Table + src *QueryConfig + jobIDConfig JobIDConfig + want *bq.Job + }{ + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: defaultQuery, + want: defaultQueryJob(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + Labels: map[string]string{"a": "b"}, + DryRun: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Labels = map[string]string{"a": "b"} + j.Configuration.DryRun = true + j.Configuration.Query.DefaultDataset = nil + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + jobIDConfig: JobIDConfig{JobID: "jobID", AddJobIDSuffix: true}, + src: &QueryConfig{Q: "query string"}, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DefaultDataset = nil + j.JobReference.JobId = "jobID-RANDOM" + return j + }(), + }, + { + dst: &Table{}, + src: defaultQuery, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DestinationTable = nil + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + TableDefinitions: map[string]ExternalData{ + "atable": func() *GCSReference { + g := NewGCSReference("uri") + g.AllowJaggedRows = true + g.AllowQuotedNewlines = true + g.Compression = Gzip + g.Encoding = UTF_8 + g.FieldDelimiter = ";" + g.IgnoreUnknownValues = true + g.MaxBadRecords = 1 + g.Quote = "'" + g.SkipLeadingRows = 2 + g.Schema = Schema([]*FieldSchema{ + {Name: "name", Type: StringFieldType}, + }) + return g + }(), + }, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DefaultDataset = nil + td := make(map[string]bq.ExternalDataConfiguration) + quote := "'" + td["atable"] = bq.ExternalDataConfiguration{ + Compression: "GZIP", + IgnoreUnknownValues: true, + MaxBadRecords: 1, + SourceFormat: "CSV", // must be explicitly set. + SourceUris: []string{"uri"}, + CsvOptions: &bq.CsvOptions{ + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Encoding: "UTF-8", + FieldDelimiter: ";", + SkipLeadingRows: 2, + Quote: "e, + }, + Schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + {Name: "name", Type: "STRING"}, + }, + }, + } + j.Configuration.Query.TableDefinitions = td + return j + }(), + }, + { + dst: &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + }, + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + CreateDisposition: CreateNever, + WriteDisposition: WriteTruncate, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DestinationTable.ProjectId = "project-id" + j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE" + j.Configuration.Query.CreateDisposition = "CREATE_NEVER" + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + DisableQueryCache: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + f := false + j.Configuration.Query.UseQueryCache = &f + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + AllowLargeResults: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.AllowLargeResults = true + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + DisableFlattenedResults: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + f := false + j.Configuration.Query.FlattenResults = &f + j.Configuration.Query.AllowLargeResults = true + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + Priority: QueryPriority("low"), + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.Priority = "low" + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + MaxBillingTier: 3, + MaxBytesBilled: 5, + }, + want: func() *bq.Job { + j := defaultQueryJob() + tier := int64(3) + j.Configuration.Query.MaximumBillingTier = &tier + j.Configuration.Query.MaximumBytesBilled = 5 + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + UseStandardSQL: true, + }, + want: defaultQueryJob(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + UseLegacySQL: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.UseLegacySql = true + j.Configuration.Query.ForceSendFields = nil + return j + }(), + }, + } + for i, tc := range testCases { + query := c.Query("") + query.JobIDConfig = tc.jobIDConfig + query.QueryConfig = *tc.src + query.Dst = tc.dst + got, err := query.newJob() + if err != nil { + t.Errorf("#%d: err calling query: %v", i, err) + continue + } + checkJob(t, i, got, tc.want) + + // Round-trip. + jc, err := bqToJobConfig(got.Configuration, c) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + wantConfig := query.QueryConfig + // We set AllowLargeResults to true when DisableFlattenedResults is true. + if wantConfig.DisableFlattenedResults { + wantConfig.AllowLargeResults = true + } + // A QueryConfig with neither UseXXXSQL field set is equivalent + // to one where UseStandardSQL = true. + if !wantConfig.UseLegacySQL && !wantConfig.UseStandardSQL { + wantConfig.UseStandardSQL = true + } + // Treat nil and empty tables the same, and ignore the client. + tableEqual := func(t1, t2 *Table) bool { + if t1 == nil { + t1 = &Table{} + } + if t2 == nil { + t2 = &Table{} + } + return t1.ProjectID == t2.ProjectID && t1.DatasetID == t2.DatasetID && t1.TableID == t2.TableID + } + // A table definition that is a GCSReference round-trips as an ExternalDataConfig. + // TODO(jba): see if there is a way to express this with a transformer. + gcsRefToEDC := func(g *GCSReference) *ExternalDataConfig { + q := g.toBQ() + e, _ := bqToExternalDataConfig(&q) + return e + } + externalDataEqual := func(e1, e2 ExternalData) bool { + if r, ok := e1.(*GCSReference); ok { + e1 = gcsRefToEDC(r) + } + if r, ok := e2.(*GCSReference); ok { + e2 = gcsRefToEDC(r) + } + return cmp.Equal(e1, e2) + } + diff := testutil.Diff(jc.(*QueryConfig), &wantConfig, + cmp.Comparer(tableEqual), + cmp.Comparer(externalDataEqual), + ) + if diff != "" { + t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) + } + } +} + +func TestConfiguringQuery(t *testing.T) { + c := &Client{ + projectID: "project-id", + } + + query := c.Query("q") + query.JobID = "ajob" + query.DefaultProjectID = "def-project-id" + query.DefaultDatasetID = "def-dataset-id" + // Note: Other configuration fields are tested in other tests above. + // A lot of that can be consolidated once Client.Copy is gone. + + want := &bq.Job{ + Configuration: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{ + Query: "q", + DefaultDataset: &bq.DatasetReference{ + ProjectId: "def-project-id", + DatasetId: "def-dataset-id", + }, + UseLegacySql: false, + ForceSendFields: []string{"UseLegacySql"}, + }, + }, + JobReference: &bq.JobReference{ + JobId: "ajob", + ProjectId: "project-id", + }, + } + + got, err := query.newJob() + if err != nil { + t.Fatalf("err calling Query.newJob: %v", err) + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("querying: -got +want:\n%s", diff) + } +} + +func TestQueryLegacySQL(t *testing.T) { + c := &Client{projectID: "project-id"} + q := c.Query("q") + q.UseStandardSQL = true + q.UseLegacySQL = true + _, err := q.newJob() + if err == nil { + t.Error("UseStandardSQL and UseLegacySQL: got nil, want error") + } + q = c.Query("q") + q.Parameters = []QueryParameter{{Name: "p", Value: 3}} + q.UseLegacySQL = true + _, err = q.newJob() + if err == nil { + t.Error("Parameters and UseLegacySQL: got nil, want error") + } +} diff --git a/vendor/cloud.google.com/go/bigquery/read_test.go b/vendor/cloud.google.com/go/bigquery/read_test.go new file mode 100644 index 0000000..129348a --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/read_test.go @@ -0,0 +1,235 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" + "google.golang.org/api/iterator" +) + +type pageFetcherArgs struct { + table *Table + schema Schema + startIndex uint64 + pageSize int64 + pageToken string +} + +// pageFetcherReadStub services read requests by returning data from an in-memory list of values. +type pageFetcherReadStub struct { + // values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery. + values [][][]Value // contains pages / rows / columns. + pageTokens map[string]string // maps incoming page token to returned page token. + + // arguments are recorded for later inspection. + calls []pageFetcherArgs +} + +func (s *pageFetcherReadStub) fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) { + s.calls = append(s.calls, + pageFetcherArgs{t, schema, startIndex, pageSize, pageToken}) + result := &fetchPageResult{ + pageToken: s.pageTokens[pageToken], + rows: s.values[0], + } + s.values = s.values[1:] + return result, nil +} + +func waitForQueryStub(context.Context, string) (Schema, error) { + return nil, nil +} + +func TestRead(t *testing.T) { + // The data for the service stub to return is populated for each test case in the testCases for loop. + ctx := context.Background() + c := &Client{projectID: "project-id"} + pf := &pageFetcherReadStub{} + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + c: c, + config: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{ + DestinationTable: &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + }, + }, + } + + for _, readFunc := range []func() *RowIterator{ + func() *RowIterator { + return c.Dataset("dataset-id").Table("table-id").read(ctx, pf.fetchPage) + }, + func() *RowIterator { + it, err := queryJob.read(ctx, waitForQueryStub, pf.fetchPage) + if err != nil { + t.Fatal(err) + } + return it + }, + } { + testCases := []struct { + data [][][]Value + pageTokens map[string]string + want [][]Value + }{ + { + data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, + pageTokens: map[string]string{"": "a", "a": ""}, + want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}}, + }, + { + data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, + pageTokens: map[string]string{"": ""}, // no more pages after first one. + want: [][]Value{{1, 2}, {11, 12}}, + }, + } + for _, tc := range testCases { + pf.values = tc.data + pf.pageTokens = tc.pageTokens + if got, ok := collectValues(t, readFunc()); ok { + if !testutil.Equal(got, tc.want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) + } + } + } + } +} + +func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) { + var got [][]Value + for { + var vals []Value + err := it.Next(&vals) + if err == iterator.Done { + break + } + if err != nil { + t.Errorf("err calling Next: %v", err) + return nil, false + } + got = append(got, vals) + } + return got, true +} + +func TestNoMoreValues(t *testing.T) { + c := &Client{projectID: "project-id"} + pf := &pageFetcherReadStub{ + values: [][][]Value{{{1, 2}, {11, 12}}}, + } + it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), pf.fetchPage) + var vals []Value + // We expect to retrieve two values and then fail on the next attempt. + if err := it.Next(&vals); err != nil { + t.Fatalf("Next: got: %v: want: nil", err) + } + if err := it.Next(&vals); err != nil { + t.Fatalf("Next: got: %v: want: nil", err) + } + if err := it.Next(&vals); err != iterator.Done { + t.Fatalf("Next: got: %v: want: iterator.Done", err) + } +} + +var errBang = errors.New("bang!") + +func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) { + return nil, errBang +} + +func TestReadError(t *testing.T) { + // test that service read errors are propagated back to the caller. + c := &Client{projectID: "project-id"} + it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), errorFetchPage) + var vals []Value + if err := it.Next(&vals); err != errBang { + t.Fatalf("Get: got: %v: want: %v", err, errBang) + } +} + +func TestReadTabledataOptions(t *testing.T) { + // test that read options are propagated. + s := &pageFetcherReadStub{ + values: [][][]Value{{{1, 2}}}, + } + c := &Client{projectID: "project-id"} + tr := c.Dataset("dataset-id").Table("table-id") + it := tr.read(context.Background(), s.fetchPage) + it.PageInfo().MaxSize = 5 + var vals []Value + if err := it.Next(&vals); err != nil { + t.Fatal(err) + } + want := []pageFetcherArgs{{ + table: tr, + pageSize: 5, + pageToken: "", + }} + if diff := testutil.Diff(s.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, pageFetcherReadStub{}, Table{}, Client{})); diff != "" { + t.Errorf("reading (got=-, want=+):\n%s", diff) + } +} + +func TestReadQueryOptions(t *testing.T) { + // test that read options are propagated. + c := &Client{projectID: "project-id"} + pf := &pageFetcherReadStub{ + values: [][][]Value{{{1, 2}}}, + } + tr := &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + } + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + c: c, + config: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{DestinationTable: tr}, + }, + } + it, err := queryJob.read(context.Background(), waitForQueryStub, pf.fetchPage) + if err != nil { + t.Fatalf("err calling Read: %v", err) + } + it.PageInfo().MaxSize = 5 + var vals []Value + if err := it.Next(&vals); err != nil { + t.Fatalf("Next: got: %v: want: nil", err) + } + + want := []pageFetcherArgs{{ + table: bqToTable(tr, c), + pageSize: 5, + pageToken: "", + }} + if !testutil.Equal(pf.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, Table{}, Client{})) { + t.Errorf("reading: got:\n%v\nwant:\n%v", pf.calls, want) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/schema.go b/vendor/cloud.google.com/go/bigquery/schema.go new file mode 100644 index 0000000..0faea33 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/schema.go @@ -0,0 +1,318 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "reflect" + + "cloud.google.com/go/internal/atomiccache" + + bq "google.golang.org/api/bigquery/v2" +) + +// Schema describes the fields in a table or query result. +type Schema []*FieldSchema + +type FieldSchema struct { + // The field name. + // Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), + // and must start with a letter or underscore. + // The maximum length is 128 characters. + Name string + + // A description of the field. The maximum length is 16,384 characters. + Description string + + // Whether the field may contain multiple values. + Repeated bool + // Whether the field is required. Ignored if Repeated is true. + Required bool + + // The field data type. If Type is Record, then this field contains a nested schema, + // which is described by Schema. + Type FieldType + // Describes the nested schema if Type is set to Record. + Schema Schema +} + +func (fs *FieldSchema) toBQ() *bq.TableFieldSchema { + tfs := &bq.TableFieldSchema{ + Description: fs.Description, + Name: fs.Name, + Type: string(fs.Type), + } + + if fs.Repeated { + tfs.Mode = "REPEATED" + } else if fs.Required { + tfs.Mode = "REQUIRED" + } // else leave as default, which is interpreted as NULLABLE. + + for _, f := range fs.Schema { + tfs.Fields = append(tfs.Fields, f.toBQ()) + } + + return tfs +} + +func (s Schema) toBQ() *bq.TableSchema { + var fields []*bq.TableFieldSchema + for _, f := range s { + fields = append(fields, f.toBQ()) + } + return &bq.TableSchema{Fields: fields} +} + +func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema { + fs := &FieldSchema{ + Description: tfs.Description, + Name: tfs.Name, + Repeated: tfs.Mode == "REPEATED", + Required: tfs.Mode == "REQUIRED", + Type: FieldType(tfs.Type), + } + + for _, f := range tfs.Fields { + fs.Schema = append(fs.Schema, bqToFieldSchema(f)) + } + return fs +} + +func bqToSchema(ts *bq.TableSchema) Schema { + if ts == nil { + return nil + } + var s Schema + for _, f := range ts.Fields { + s = append(s, bqToFieldSchema(f)) + } + return s +} + +type FieldType string + +const ( + StringFieldType FieldType = "STRING" + BytesFieldType FieldType = "BYTES" + IntegerFieldType FieldType = "INTEGER" + FloatFieldType FieldType = "FLOAT" + BooleanFieldType FieldType = "BOOLEAN" + TimestampFieldType FieldType = "TIMESTAMP" + RecordFieldType FieldType = "RECORD" + DateFieldType FieldType = "DATE" + TimeFieldType FieldType = "TIME" + DateTimeFieldType FieldType = "DATETIME" +) + +var ( + errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct") + errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct") + errInvalidFieldName = errors.New("bigquery: invalid name of field in struct") +) + +var typeOfByteSlice = reflect.TypeOf([]byte{}) + +// InferSchema tries to derive a BigQuery schema from the supplied struct value. +// NOTE: All fields in the returned Schema are configured to be required, +// unless the corresponding field in the supplied struct is a slice or array. +// +// It is considered an error if the struct (including nested structs) contains +// any exported fields that are pointers or one of the following types: +// uint, uint64, uintptr, map, interface, complex64, complex128, func, chan. +// In these cases, an error will be returned. +// Future versions may handle these cases without error. +// +// Recursively defined structs are also disallowed. +func InferSchema(st interface{}) (Schema, error) { + return inferSchemaReflectCached(reflect.TypeOf(st)) +} + +// TODO(jba): replace with sync.Map for Go 1.9. +var schemaCache atomiccache.Cache + +type cacheVal struct { + schema Schema + err error +} + +func inferSchemaReflectCached(t reflect.Type) (Schema, error) { + cv := schemaCache.Get(t, func() interface{} { + s, err := inferSchemaReflect(t) + return cacheVal{s, err} + }).(cacheVal) + return cv.schema, cv.err +} + +func inferSchemaReflect(t reflect.Type) (Schema, error) { + rec, err := hasRecursiveType(t, nil) + if err != nil { + return nil, err + } + if rec { + return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t) + } + return inferStruct(t) +} + +func inferStruct(t reflect.Type) (Schema, error) { + switch t.Kind() { + case reflect.Ptr: + if t.Elem().Kind() != reflect.Struct { + return nil, errNoStruct + } + t = t.Elem() + fallthrough + + case reflect.Struct: + return inferFields(t) + default: + return nil, errNoStruct + } +} + +// inferFieldSchema infers the FieldSchema for a Go type +func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) { + switch rt { + case typeOfByteSlice: + return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil + case typeOfGoTime: + return &FieldSchema{Required: !nullable, Type: TimestampFieldType}, nil + case typeOfDate: + return &FieldSchema{Required: !nullable, Type: DateFieldType}, nil + case typeOfTime: + return &FieldSchema{Required: !nullable, Type: TimeFieldType}, nil + case typeOfDateTime: + return &FieldSchema{Required: !nullable, Type: DateTimeFieldType}, nil + } + if isSupportedIntType(rt) { + return &FieldSchema{Required: !nullable, Type: IntegerFieldType}, nil + } + switch rt.Kind() { + case reflect.Slice, reflect.Array: + et := rt.Elem() + if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) { + // Multi dimensional slices/arrays are not supported by BigQuery + return nil, errUnsupportedFieldType + } + + f, err := inferFieldSchema(et, false) + if err != nil { + return nil, err + } + f.Repeated = true + f.Required = false + return f, nil + case reflect.Struct, reflect.Ptr: + nested, err := inferStruct(rt) + if err != nil { + return nil, err + } + return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil + case reflect.String: + return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil + case reflect.Bool: + return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil + case reflect.Float32, reflect.Float64: + return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil + default: + return nil, errUnsupportedFieldType + } +} + +// inferFields extracts all exported field types from struct type. +func inferFields(rt reflect.Type) (Schema, error) { + var s Schema + fields, err := fieldCache.Fields(rt) + if err != nil { + return nil, err + } + for _, field := range fields { + var nullable bool + for _, opt := range field.ParsedTag.([]string) { + if opt == nullableTagOption { + nullable = true + break + } + } + f, err := inferFieldSchema(field.Type, nullable) + if err != nil { + return nil, err + } + f.Name = field.Name + s = append(s, f) + } + return s, nil +} + +// isSupportedIntType reports whether t can be properly represented by the +// BigQuery INTEGER/INT64 type. +func isSupportedIntType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int, + reflect.Uint8, reflect.Uint16, reflect.Uint32: + return true + default: + return false + } +} + +// typeList is a linked list of reflect.Types. +type typeList struct { + t reflect.Type + next *typeList +} + +func (l *typeList) has(t reflect.Type) bool { + for l != nil { + if l.t == t { + return true + } + l = l.next + } + return false +} + +// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly, +// via exported fields. (Schema inference ignores unexported fields.) +func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) { + for t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice || t.Kind() == reflect.Array { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return false, nil + } + if seen.has(t) { + return true, nil + } + fields, err := fieldCache.Fields(t) + if err != nil { + return false, err + } + seen = &typeList{t, seen} + // Because seen is a linked list, additions to it from one field's + // recursive call will not affect the value for subsequent fields' calls. + for _, field := range fields { + ok, err := hasRecursiveType(field.Type, seen) + if err != nil { + return false, err + } + if ok { + return true, nil + } + } + return false, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/schema_test.go b/vendor/cloud.google.com/go/bigquery/schema_test.go new file mode 100644 index 0000000..4418aca --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/schema_test.go @@ -0,0 +1,800 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + + bq "google.golang.org/api/bigquery/v2" +) + +func (fs *FieldSchema) GoString() string { + if fs == nil { + return "" + } + + return fmt.Sprintf("{Name:%s Description:%s Repeated:%t Required:%t Type:%s Schema:%s}", + fs.Name, + fs.Description, + fs.Repeated, + fs.Required, + fs.Type, + fmt.Sprintf("%#v", fs.Schema), + ) +} + +func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Description: desc, + Name: name, + Mode: mode, + Type: typ, + } +} + +func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema { + return &FieldSchema{ + Description: desc, + Name: name, + Repeated: repeated, + Required: required, + Type: FieldType(typ), + } +} + +func TestSchemaConversion(t *testing.T) { + testCases := []struct { + schema Schema + bqSchema *bq.TableSchema + }{ + { + // required + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", false, true), + }, + }, + { + // repeated + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REPEATED"), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", true, false), + }, + }, + { + // nullable, string + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", false, false), + }, + }, + { + // integer + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "INTEGER", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "INTEGER", false, false), + }, + }, + { + // float + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "FLOAT", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "FLOAT", false, false), + }, + }, + { + // boolean + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "BOOLEAN", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "BOOLEAN", false, false), + }, + }, + { + // timestamp + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "TIMESTAMP", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "TIMESTAMP", false, false), + }, + }, + { + // civil times + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "f1", "TIME", ""), + bqTableFieldSchema("desc", "f2", "DATE", ""), + bqTableFieldSchema("desc", "f3", "DATETIME", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "f1", "TIME", false, false), + fieldSchema("desc", "f2", "DATE", false, false), + fieldSchema("desc", "f3", "DATETIME", false, false), + }, + }, + { + // nested + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + { + Description: "An outer schema wrapping a nested schema", + Name: "outer", + Mode: "REQUIRED", + Type: "RECORD", + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("inner field", "inner", "STRING", ""), + }, + }, + }, + }, + schema: Schema{ + &FieldSchema{ + Description: "An outer schema wrapping a nested schema", + Name: "outer", + Required: true, + Type: "RECORD", + Schema: []*FieldSchema{ + { + Description: "inner field", + Name: "inner", + Type: "STRING", + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + bqSchema := tc.schema.toBQ() + if !testutil.Equal(bqSchema, tc.bqSchema) { + t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", + pretty.Value(bqSchema), pretty.Value(tc.bqSchema)) + } + schema := bqToSchema(tc.bqSchema) + if !testutil.Equal(schema, tc.schema) { + t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema) + } + } +} + +type allStrings struct { + String string + ByteSlice []byte +} + +type allSignedIntegers struct { + Int64 int64 + Int32 int32 + Int16 int16 + Int8 int8 + Int int +} + +type allUnsignedIntegers struct { + Uint32 uint32 + Uint16 uint16 + Uint8 uint8 +} + +type allFloat struct { + Float64 float64 + Float32 float32 + // NOTE: Complex32 and Complex64 are unsupported by BigQuery +} + +type allBoolean struct { + Bool bool +} + +type allTime struct { + Timestamp time.Time + Time civil.Time + Date civil.Date + DateTime civil.DateTime +} + +func reqField(name, typ string) *FieldSchema { + return &FieldSchema{ + Name: name, + Type: FieldType(typ), + Required: true, + } +} + +func TestSimpleInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: allSignedIntegers{}, + want: Schema{ + reqField("Int64", "INTEGER"), + reqField("Int32", "INTEGER"), + reqField("Int16", "INTEGER"), + reqField("Int8", "INTEGER"), + reqField("Int", "INTEGER"), + }, + }, + { + in: allUnsignedIntegers{}, + want: Schema{ + reqField("Uint32", "INTEGER"), + reqField("Uint16", "INTEGER"), + reqField("Uint8", "INTEGER"), + }, + }, + { + in: allFloat{}, + want: Schema{ + reqField("Float64", "FLOAT"), + reqField("Float32", "FLOAT"), + }, + }, + { + in: allBoolean{}, + want: Schema{ + reqField("Bool", "BOOLEAN"), + }, + }, + { + in: &allBoolean{}, + want: Schema{ + reqField("Bool", "BOOLEAN"), + }, + }, + { + in: allTime{}, + want: Schema{ + reqField("Timestamp", "TIMESTAMP"), + reqField("Time", "TIME"), + reqField("Date", "DATE"), + reqField("DateTime", "DATETIME"), + }, + }, + { + in: allStrings{}, + want: Schema{ + reqField("String", "STRING"), + reqField("ByteSlice", "BYTES"), + }, + }, + } + for _, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err) + } + if !testutil.Equal(got, tc.want) { + t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in, + pretty.Value(got), pretty.Value(tc.want)) + } + } +} + +type containsNested struct { + hidden string + NotNested int + Nested struct { + Inside int + } +} + +type containsDoubleNested struct { + NotNested int + Nested struct { + InsideNested struct { + Inside int + } + } +} + +type ptrNested struct { + Ptr *struct{ Inside int } +} + +type dup struct { // more than one field of the same struct type + A, B allBoolean +} + +func TestNestedInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: containsNested{}, + want: Schema{ + reqField("NotNested", "INTEGER"), + &FieldSchema{ + Name: "Nested", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + }, + }, + { + in: containsDoubleNested{}, + want: Schema{ + reqField("NotNested", "INTEGER"), + &FieldSchema{ + Name: "Nested", + Required: true, + Type: "RECORD", + Schema: Schema{ + { + Name: "InsideNested", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + }, + }, + }, + }, + { + in: ptrNested{}, + want: Schema{ + &FieldSchema{ + Name: "Ptr", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + }, + }, + { + in: dup{}, + want: Schema{ + &FieldSchema{ + Name: "A", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Bool", "BOOLEAN")}, + }, + &FieldSchema{ + Name: "B", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Bool", "BOOLEAN")}, + }, + }, + }, + } + + for _, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err) + } + if !testutil.Equal(got, tc.want) { + t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in, + pretty.Value(got), pretty.Value(tc.want)) + } + } +} + +type repeated struct { + NotRepeated []byte + RepeatedByteSlice [][]byte + Slice []int + Array [5]bool +} + +type nestedRepeated struct { + NotRepeated int + Repeated []struct { + Inside int + } + RepeatedPtr []*struct{ Inside int } +} + +func repField(name, typ string) *FieldSchema { + return &FieldSchema{ + Name: name, + Type: FieldType(typ), + Repeated: true, + } +} + +func TestRepeatedInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: repeated{}, + want: Schema{ + reqField("NotRepeated", "BYTES"), + repField("RepeatedByteSlice", "BYTES"), + repField("Slice", "INTEGER"), + repField("Array", "BOOLEAN"), + }, + }, + { + in: nestedRepeated{}, + want: Schema{ + reqField("NotRepeated", "INTEGER"), + { + Name: "Repeated", + Repeated: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + { + Name: "RepeatedPtr", + Repeated: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + }, + }, + } + + for i, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%d: error inferring TableSchema: %v", i, err) + } + if !testutil.Equal(got, tc.want) { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, + pretty.Value(got), pretty.Value(tc.want)) + } + } +} + +type Embedded struct { + Embedded int +} + +type embedded struct { + Embedded2 int +} + +type nestedEmbedded struct { + Embedded + embedded +} + +func TestEmbeddedInference(t *testing.T) { + got, err := InferSchema(nestedEmbedded{}) + if err != nil { + t.Fatal(err) + } + want := Schema{ + reqField("Embedded", "INTEGER"), + reqField("Embedded2", "INTEGER"), + } + if !testutil.Equal(got, want) { + t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want)) + } +} + +func TestRecursiveInference(t *testing.T) { + type List struct { + Val int + Next *List + } + + _, err := InferSchema(List{}) + if err == nil { + t.Fatal("got nil, want error") + } +} + +type withTags struct { + NoTag int + ExcludeTag int `bigquery:"-"` + SimpleTag int `bigquery:"simple_tag"` + UnderscoreTag int `bigquery:"_id"` + MixedCase int `bigquery:"MIXEDcase"` + Nullable int `bigquery:",nullable"` +} + +type withTagsNested struct { + Nested withTags `bigquery:"nested"` + NestedAnonymous struct { + ExcludeTag int `bigquery:"-"` + Inside int `bigquery:"inside"` + } `bigquery:"anon"` +} + +type withTagsRepeated struct { + Repeated []withTags `bigquery:"repeated"` + RepeatedAnonymous []struct { + ExcludeTag int `bigquery:"-"` + Inside int `bigquery:"inside"` + } `bigquery:"anon"` +} + +type withTagsEmbedded struct { + withTags +} + +var withTagsSchema = Schema{ + reqField("NoTag", "INTEGER"), + reqField("simple_tag", "INTEGER"), + reqField("_id", "INTEGER"), + reqField("MIXEDcase", "INTEGER"), + {Name: "Nullable", Type: FieldType("INTEGER"), Required: false}, +} + +func TestTagInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: withTags{}, + want: withTagsSchema, + }, + { + in: withTagsNested{}, + want: Schema{ + &FieldSchema{ + Name: "nested", + Required: true, + Type: "RECORD", + Schema: withTagsSchema, + }, + &FieldSchema{ + Name: "anon", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("inside", "INTEGER")}, + }, + }, + }, + { + in: withTagsRepeated{}, + want: Schema{ + &FieldSchema{ + Name: "repeated", + Repeated: true, + Type: "RECORD", + Schema: withTagsSchema, + }, + &FieldSchema{ + Name: "anon", + Repeated: true, + Type: "RECORD", + Schema: Schema{reqField("inside", "INTEGER")}, + }, + }, + }, + { + in: withTagsEmbedded{}, + want: withTagsSchema, + }, + } + for i, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%d: error inferring TableSchema: %v", i, err) + } + if !testutil.Equal(got, tc.want) { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, + pretty.Value(got), pretty.Value(tc.want)) + } + } +} + +func TestTagInferenceErrors(t *testing.T) { + testCases := []struct { + in interface{} + err error + }{ + { + in: struct { + LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + UnsupporedStartChar int `bigquery:"øab"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + UnsupportedEndChar int `bigquery:"abø"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + UnsupportedMiddleChar int `bigquery:"aøb"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + StartInt int `bigquery:"1abc"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + Hyphens int `bigquery:"a-b"` + }{}, + err: errInvalidFieldName, + }, + } + for i, tc := range testCases { + want := tc.err + _, got := InferSchema(tc.in) + if got != want { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want) + } + } + + _, err := InferSchema(struct { + X int `bigquery:",optional"` + }{}) + if err == nil { + t.Error("got nil, want error") + } +} + +func TestSchemaErrors(t *testing.T) { + testCases := []struct { + in interface{} + err error + }{ + { + in: []byte{}, + err: errNoStruct, + }, + { + in: new(int), + err: errNoStruct, + }, + { + in: struct{ Uint uint }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Uint64 uint64 }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Uintptr uintptr }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Complex complex64 }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Map map[string]int }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Chan chan bool }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Ptr *int }{}, + err: errNoStruct, + }, + { + in: struct{ Interface interface{} }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ MultiDimensional [][]int }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ MultiDimensional [][][]byte }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ ChanSlice []chan bool }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ NestedChan struct{ Chan []chan bool } }{}, + err: errUnsupportedFieldType, + }, + } + for _, tc := range testCases { + want := tc.err + _, got := InferSchema(tc.in) + if got != want { + t.Errorf("%#v: got:\n%#v\nwant:\n%#v", tc.in, got, want) + } + } +} + +func TestHasRecursiveType(t *testing.T) { + type ( + nonStruct int + nonRec struct{ A string } + dup struct{ A, B nonRec } + rec struct { + A int + B *rec + } + recUnexported struct { + A int + b *rec + } + hasRec struct { + A int + R *rec + } + recSlicePointer struct { + A []*recSlicePointer + } + ) + for _, test := range []struct { + in interface{} + want bool + }{ + {nonStruct(0), false}, + {nonRec{}, false}, + {dup{}, false}, + {rec{}, true}, + {recUnexported{}, false}, + {hasRec{}, true}, + {&recSlicePointer{}, true}, + } { + got, err := hasRecursiveType(reflect.TypeOf(test.in), nil) + if err != nil { + t.Fatal(err) + } + if got != test.want { + t.Errorf("%T: got %t, want %t", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/table.go b/vendor/cloud.google.com/go/bigquery/table.go new file mode 100644 index 0000000..86dda67 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/table.go @@ -0,0 +1,487 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/optional" + bq "google.golang.org/api/bigquery/v2" +) + +// A Table is a reference to a BigQuery table. +type Table struct { + // ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query. + // In this case the result will be stored in an ephemeral table. + ProjectID string + DatasetID string + // TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + // The maximum length is 1,024 characters. + TableID string + + c *Client +} + +// TableMetadata contains information about a BigQuery table. +type TableMetadata struct { + // The following fields can be set when creating a table. + + // The user-friendly name for the table. + Name string + + // The user-friendly description of the table. + Description string + + // The table schema. If provided on create, ViewQuery must be empty. + Schema Schema + + // The query to use for a view. If provided on create, Schema must be nil. + ViewQuery string + + // Use Legacy SQL for the view query. + // At most one of UseLegacySQL and UseStandardSQL can be true. + UseLegacySQL bool + + // Use Legacy SQL for the view query. The default. + // At most one of UseLegacySQL and UseStandardSQL can be true. + // Deprecated: use UseLegacySQL. + UseStandardSQL bool + + // If non-nil, the table is partitioned by time. + TimePartitioning *TimePartitioning + + // The time when this table expires. If not set, the table will persist + // indefinitely. Expired tables will be deleted and their storage reclaimed. + ExpirationTime time.Time + + // User-provided labels. + Labels map[string]string + + // Information about a table stored outside of BigQuery. + ExternalDataConfig *ExternalDataConfig + + // All the fields below are read-only. + + FullID string // An opaque ID uniquely identifying the table. + Type TableType + CreationTime time.Time + LastModifiedTime time.Time + + // The size of the table in bytes. + // This does not include data that is being buffered during a streaming insert. + NumBytes int64 + + // The number of rows of data in this table. + // This does not include data that is being buffered during a streaming insert. + NumRows uint64 + + // Contains information regarding this table's streaming buffer, if one is + // present. This field will be nil if the table is not being streamed to or if + // there is no data in the streaming buffer. + StreamingBuffer *StreamingBuffer + + // ETag is the ETag obtained when reading metadata. Pass it to Table.Update to + // ensure that the metadata hasn't changed since it was read. + ETag string +} + +// TableCreateDisposition specifies the circumstances under which destination table will be created. +// Default is CreateIfNeeded. +type TableCreateDisposition string + +const ( + // CreateIfNeeded will create the table if it does not already exist. + // Tables are created atomically on successful completion of a job. + CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED" + + // CreateNever ensures the table must already exist and will not be + // automatically created. + CreateNever TableCreateDisposition = "CREATE_NEVER" +) + +// TableWriteDisposition specifies how existing data in a destination table is treated. +// Default is WriteAppend. +type TableWriteDisposition string + +const ( + // WriteAppend will append to any existing data in the destination table. + // Data is appended atomically on successful completion of a job. + WriteAppend TableWriteDisposition = "WRITE_APPEND" + + // WriteTruncate overrides the existing data in the destination table. + // Data is overwritten atomically on successful completion of a job. + WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE" + + // WriteEmpty fails writes if the destination table already contains data. + WriteEmpty TableWriteDisposition = "WRITE_EMPTY" +) + +// TableType is the type of table. +type TableType string + +const ( + RegularTable TableType = "TABLE" + ViewTable TableType = "VIEW" + ExternalTable TableType = "EXTERNAL" +) + +// TimePartitioning describes the time-based date partitioning on a table. +// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables. +type TimePartitioning struct { + // The amount of time to keep the storage for a partition. + // If the duration is empty (0), the data in the partitions do not expire. + Expiration time.Duration + + // If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the + // table is partitioned by this field. The field must be a top-level TIMESTAMP or + // DATE field. Its mode must be NULLABLE or REQUIRED. + Field string +} + +func (p *TimePartitioning) toBQ() *bq.TimePartitioning { + if p == nil { + return nil + } + return &bq.TimePartitioning{ + Type: "DAY", + ExpirationMs: int64(p.Expiration / time.Millisecond), + Field: p.Field, + } +} + +func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning { + if q == nil { + return nil + } + return &TimePartitioning{ + Expiration: time.Duration(q.ExpirationMs) * time.Millisecond, + Field: q.Field, + } +} + +// StreamingBuffer holds information about the streaming buffer. +type StreamingBuffer struct { + // A lower-bound estimate of the number of bytes currently in the streaming + // buffer. + EstimatedBytes uint64 + + // A lower-bound estimate of the number of rows currently in the streaming + // buffer. + EstimatedRows uint64 + + // The time of the oldest entry in the streaming buffer. + OldestEntryTime time.Time +} + +func (t *Table) toBQ() *bq.TableReference { + return &bq.TableReference{ + ProjectId: t.ProjectID, + DatasetId: t.DatasetID, + TableId: t.TableID, + } +} + +// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format. +func (t *Table) FullyQualifiedName() string { + return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID) +} + +// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID. +func (t *Table) implicitTable() bool { + return t.ProjectID == "" && t.DatasetID == "" && t.TableID == "" +} + +// Create creates a table in the BigQuery service. +// Pass in a TableMetadata value to configure the table. +// If tm.View.Query is non-empty, the created table will be of type VIEW. +// Expiration can only be set during table creation. +// After table creation, a view can be modified only if its table was initially created +// with a view. +func (t *Table) Create(ctx context.Context, tm *TableMetadata) error { + table, err := tm.toBQ() + if err != nil { + return err + } + table.TableReference = &bq.TableReference{ + ProjectId: t.ProjectID, + DatasetId: t.DatasetID, + TableId: t.TableID, + } + req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx) + setClientHeader(req.Header()) + _, err = req.Do() + return err +} + +func (tm *TableMetadata) toBQ() (*bq.Table, error) { + t := &bq.Table{} + if tm == nil { + return t, nil + } + if tm.Schema != nil && tm.ViewQuery != "" { + return nil, errors.New("bigquery: provide Schema or ViewQuery, not both") + } + t.FriendlyName = tm.Name + t.Description = tm.Description + t.Labels = tm.Labels + if tm.Schema != nil { + t.Schema = tm.Schema.toBQ() + } + if tm.ViewQuery != "" { + if tm.UseStandardSQL && tm.UseLegacySQL { + return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL") + } + t.View = &bq.ViewDefinition{Query: tm.ViewQuery} + if tm.UseLegacySQL { + t.View.UseLegacySql = true + } else { + t.View.UseLegacySql = false + t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql") + } + } else if tm.UseLegacySQL || tm.UseStandardSQL { + return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery") + } + t.TimePartitioning = tm.TimePartitioning.toBQ() + if !tm.ExpirationTime.IsZero() { + t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6 + } + if tm.ExternalDataConfig != nil { + edc := tm.ExternalDataConfig.toBQ() + t.ExternalDataConfiguration = &edc + } + if tm.FullID != "" { + return nil, errors.New("cannot set FullID on create") + } + if tm.Type != "" { + return nil, errors.New("cannot set Type on create") + } + if !tm.CreationTime.IsZero() { + return nil, errors.New("cannot set CreationTime on create") + } + if !tm.LastModifiedTime.IsZero() { + return nil, errors.New("cannot set LastModifiedTime on create") + } + if tm.NumBytes != 0 { + return nil, errors.New("cannot set NumBytes on create") + } + if tm.NumRows != 0 { + return nil, errors.New("cannot set NumRows on create") + } + if tm.StreamingBuffer != nil { + return nil, errors.New("cannot set StreamingBuffer on create") + } + if tm.ETag != "" { + return nil, errors.New("cannot set ETag on create") + } + return t, nil +} + +// Metadata fetches the metadata for the table. +func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) { + req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx) + setClientHeader(req.Header()) + var table *bq.Table + err := runWithRetry(ctx, func() (err error) { + table, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return bqToTableMetadata(table) +} + +func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) { + md := &TableMetadata{ + Description: t.Description, + Name: t.FriendlyName, + Type: TableType(t.Type), + FullID: t.Id, + Labels: t.Labels, + NumBytes: t.NumBytes, + NumRows: t.NumRows, + ExpirationTime: unixMillisToTime(t.ExpirationTime), + CreationTime: unixMillisToTime(t.CreationTime), + LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)), + ETag: t.Etag, + } + if t.Schema != nil { + md.Schema = bqToSchema(t.Schema) + } + if t.View != nil { + md.ViewQuery = t.View.Query + md.UseLegacySQL = t.View.UseLegacySql + } + md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning) + if t.StreamingBuffer != nil { + md.StreamingBuffer = &StreamingBuffer{ + EstimatedBytes: t.StreamingBuffer.EstimatedBytes, + EstimatedRows: t.StreamingBuffer.EstimatedRows, + OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)), + } + } + if t.ExternalDataConfiguration != nil { + edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration) + if err != nil { + return nil, err + } + md.ExternalDataConfig = edc + } + return md, nil +} + +// Delete deletes the table. +func (t *Table) Delete(ctx context.Context) error { + req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx) + setClientHeader(req.Header()) + return req.Do() +} + +// Read fetches the contents of the table. +func (t *Table) Read(ctx context.Context) *RowIterator { + return t.read(ctx, fetchPage) +} + +func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator { + return newRowIterator(ctx, t, pf) +} + +// Update modifies specific Table metadata fields. +func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (*TableMetadata, error) { + bqt := tm.toBQ() + call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx) + setClientHeader(call.Header()) + if etag != "" { + call.Header().Set("If-Match", etag) + } + var res *bq.Table + if err := runWithRetry(ctx, func() (err error) { + res, err = call.Do() + return err + }); err != nil { + return nil, err + } + return bqToTableMetadata(res) +} + +func (tm *TableMetadataToUpdate) toBQ() *bq.Table { + t := &bq.Table{} + forceSend := func(field string) { + t.ForceSendFields = append(t.ForceSendFields, field) + } + + if tm.Description != nil { + t.Description = optional.ToString(tm.Description) + forceSend("Description") + } + if tm.Name != nil { + t.FriendlyName = optional.ToString(tm.Name) + forceSend("FriendlyName") + } + if tm.Schema != nil { + t.Schema = tm.Schema.toBQ() + forceSend("Schema") + } + if !tm.ExpirationTime.IsZero() { + t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6 + forceSend("ExpirationTime") + } + if tm.ViewQuery != nil { + t.View = &bq.ViewDefinition{ + Query: optional.ToString(tm.ViewQuery), + ForceSendFields: []string{"Query"}, + } + } + if tm.UseLegacySQL != nil { + if t.View == nil { + t.View = &bq.ViewDefinition{} + } + t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL) + t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql") + } + labels, forces, nulls := tm.update() + t.Labels = labels + t.ForceSendFields = append(t.ForceSendFields, forces...) + t.NullFields = append(t.NullFields, nulls...) + return t +} + +// TableMetadataToUpdate is used when updating a table's metadata. +// Only non-nil fields will be updated. +type TableMetadataToUpdate struct { + // The user-friendly description of this table. + Description optional.String + + // The user-friendly name for this table. + Name optional.String + + // The table's schema. + // When updating a schema, you can add columns but not remove them. + Schema Schema + + // The time when this table expires. + ExpirationTime time.Time + + // The query to use for a view. + ViewQuery optional.String + + // Use Legacy SQL for the view query. + UseLegacySQL optional.Bool + + labelUpdater +} + +// labelUpdater contains common code for updating labels. +type labelUpdater struct { + setLabels map[string]string + deleteLabels map[string]bool +} + +// SetLabel causes a label to be added or modified on a call to Update. +func (u *labelUpdater) SetLabel(name, value string) { + if u.setLabels == nil { + u.setLabels = map[string]string{} + } + u.setLabels[name] = value +} + +// DeleteLabel causes a label to be deleted on a call to Update. +func (u *labelUpdater) DeleteLabel(name string) { + if u.deleteLabels == nil { + u.deleteLabels = map[string]bool{} + } + u.deleteLabels[name] = true +} + +func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) { + if u.setLabels == nil && u.deleteLabels == nil { + return nil, nil, nil + } + labels = map[string]string{} + for k, v := range u.setLabels { + labels[k] = v + } + if len(labels) == 0 && len(u.deleteLabels) > 0 { + forces = []string{"Labels"} + } + for l := range u.deleteLabels { + nulls = append(nulls, "Labels."+l) + } + return labels, forces, nulls +} diff --git a/vendor/cloud.google.com/go/bigquery/table_test.go b/vendor/cloud.google.com/go/bigquery/table_test.go new file mode 100644 index 0000000..4b1cedc --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/table_test.go @@ -0,0 +1,291 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + bq "google.golang.org/api/bigquery/v2" +) + +func TestBQToTableMetadata(t *testing.T) { + aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) + aTimeMillis := aTime.UnixNano() / 1e6 + for _, test := range []struct { + in *bq.Table + want *TableMetadata + }{ + {&bq.Table{}, &TableMetadata{}}, // test minimal case + { + &bq.Table{ + CreationTime: aTimeMillis, + Description: "desc", + Etag: "etag", + ExpirationTime: aTimeMillis, + FriendlyName: "fname", + Id: "id", + LastModifiedTime: uint64(aTimeMillis), + Location: "loc", + NumBytes: 123, + NumLongTermBytes: 23, + NumRows: 7, + StreamingBuffer: &bq.Streamingbuffer{ + EstimatedBytes: 11, + EstimatedRows: 3, + OldestEntryTime: uint64(aTimeMillis), + }, + TimePartitioning: &bq.TimePartitioning{ + ExpirationMs: 7890, + Type: "DAY", + Field: "pfield", + }, + Type: "EXTERNAL", + View: &bq.ViewDefinition{Query: "view-query"}, + Labels: map[string]string{"a": "b"}, + ExternalDataConfiguration: &bq.ExternalDataConfiguration{ + SourceFormat: "GOOGLE_SHEETS", + }, + }, + &TableMetadata{ + Description: "desc", + Name: "fname", + ViewQuery: "view-query", + FullID: "id", + Type: ExternalTable, + Labels: map[string]string{"a": "b"}, + ExternalDataConfig: &ExternalDataConfig{SourceFormat: GoogleSheets}, + ExpirationTime: aTime.Truncate(time.Millisecond), + CreationTime: aTime.Truncate(time.Millisecond), + LastModifiedTime: aTime.Truncate(time.Millisecond), + NumBytes: 123, + NumRows: 7, + TimePartitioning: &TimePartitioning{ + Expiration: 7890 * time.Millisecond, + Field: "pfield", + }, + StreamingBuffer: &StreamingBuffer{ + EstimatedBytes: 11, + EstimatedRows: 3, + OldestEntryTime: aTime, + }, + ETag: "etag", + }, + }, + } { + got, err := bqToTableMetadata(test.in) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, test.want); diff != "" { + t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff) + } + } +} + +func TestTableMetadataToBQ(t *testing.T) { + aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) + aTimeMillis := aTime.UnixNano() / 1e6 + sc := Schema{fieldSchema("desc", "name", "STRING", false, true)} + + for _, test := range []struct { + in *TableMetadata + want *bq.Table + }{ + {nil, &bq.Table{}}, + {&TableMetadata{}, &bq.Table{}}, + { + &TableMetadata{ + Name: "n", + Description: "d", + Schema: sc, + ExpirationTime: aTime, + Labels: map[string]string{"a": "b"}, + ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable}, + }, + &bq.Table{ + FriendlyName: "n", + Description: "d", + Schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), + }, + }, + ExpirationTime: aTimeMillis, + Labels: map[string]string{"a": "b"}, + ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"}, + }, + }, + { + &TableMetadata{ViewQuery: "q"}, + &bq.Table{ + View: &bq.ViewDefinition{ + Query: "q", + UseLegacySql: false, + ForceSendFields: []string{"UseLegacySql"}, + }, + }, + }, + { + &TableMetadata{ + ViewQuery: "q", + UseLegacySQL: true, + TimePartitioning: &TimePartitioning{}, + }, + &bq.Table{ + View: &bq.ViewDefinition{ + Query: "q", + UseLegacySql: true, + }, + TimePartitioning: &bq.TimePartitioning{ + Type: "DAY", + ExpirationMs: 0, + }, + }, + }, + { + &TableMetadata{ + ViewQuery: "q", + UseStandardSQL: true, + TimePartitioning: &TimePartitioning{ + Expiration: time.Second, + Field: "ofDreams", + }, + }, + &bq.Table{ + View: &bq.ViewDefinition{ + Query: "q", + UseLegacySql: false, + ForceSendFields: []string{"UseLegacySql"}, + }, + TimePartitioning: &bq.TimePartitioning{ + Type: "DAY", + ExpirationMs: 1000, + Field: "ofDreams", + }, + }, + }, + } { + got, err := test.in.toBQ() + if err != nil { + t.Fatalf("%+v: %v", test.in, err) + } + if diff := testutil.Diff(got, test.want); diff != "" { + t.Errorf("%+v:\n-got, +want:\n%s", test.in, diff) + } + } + + // Errors + for _, in := range []*TableMetadata{ + {Schema: sc, ViewQuery: "q"}, // can't have both schema and query + {UseLegacySQL: true}, // UseLegacySQL without query + {UseStandardSQL: true}, // UseStandardSQL without query + // read-only fields + {FullID: "x"}, + {Type: "x"}, + {CreationTime: aTime}, + {LastModifiedTime: aTime}, + {NumBytes: 1}, + {NumRows: 1}, + {StreamingBuffer: &StreamingBuffer{}}, + {ETag: "x"}, + } { + _, err := in.toBQ() + if err == nil { + t.Errorf("%+v: got nil, want error", in) + } + } +} + +func TestTableMetadataToUpdateToBQ(t *testing.T) { + aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) + for _, test := range []struct { + tm TableMetadataToUpdate + want *bq.Table + }{ + { + tm: TableMetadataToUpdate{}, + want: &bq.Table{}, + }, + { + tm: TableMetadataToUpdate{ + Description: "d", + Name: "n", + }, + want: &bq.Table{ + Description: "d", + FriendlyName: "n", + ForceSendFields: []string{"Description", "FriendlyName"}, + }, + }, + { + tm: TableMetadataToUpdate{ + Schema: Schema{fieldSchema("desc", "name", "STRING", false, true)}, + ExpirationTime: aTime, + }, + want: &bq.Table{ + Schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), + }, + }, + ExpirationTime: aTime.UnixNano() / 1e6, + ForceSendFields: []string{"Schema", "ExpirationTime"}, + }, + }, + { + tm: TableMetadataToUpdate{ViewQuery: "q"}, + want: &bq.Table{ + View: &bq.ViewDefinition{Query: "q", ForceSendFields: []string{"Query"}}, + }, + }, + { + tm: TableMetadataToUpdate{UseLegacySQL: false}, + want: &bq.Table{ + View: &bq.ViewDefinition{ + UseLegacySql: false, + ForceSendFields: []string{"UseLegacySql"}, + }, + }, + }, + { + tm: TableMetadataToUpdate{ViewQuery: "q", UseLegacySQL: true}, + want: &bq.Table{ + View: &bq.ViewDefinition{ + Query: "q", + UseLegacySql: true, + ForceSendFields: []string{"Query", "UseLegacySql"}, + }, + }, + }, + { + tm: func() (tm TableMetadataToUpdate) { + tm.SetLabel("L", "V") + tm.DeleteLabel("D") + return tm + }(), + want: &bq.Table{ + Labels: map[string]string{"L": "V"}, + NullFields: []string{"Labels.D"}, + }, + }, + } { + got := test.tm.toBQ() + if !testutil.Equal(got, test.want) { + t.Errorf("%+v:\ngot %+v\nwant %+v", test.tm, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/uploader.go b/vendor/cloud.google.com/go/bigquery/uploader.go new file mode 100644 index 0000000..4d48e3e --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/uploader.go @@ -0,0 +1,224 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "reflect" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// An Uploader does streaming inserts into a BigQuery table. +// It is safe for concurrent use. +type Uploader struct { + t *Table + + // SkipInvalidRows causes rows containing invalid data to be silently + // ignored. The default value is false, which causes the entire request to + // fail if there is an attempt to insert an invalid row. + SkipInvalidRows bool + + // IgnoreUnknownValues causes values not matching the schema to be ignored. + // The default value is false, which causes records containing such values + // to be treated as invalid records. + IgnoreUnknownValues bool + + // A TableTemplateSuffix allows Uploaders to create tables automatically. + // + // Experimental: this option is experimental and may be modified or removed in future versions, + // regardless of any other documented package stability guarantees. + // + // When you specify a suffix, the table you upload data to + // will be used as a template for creating a new table, with the same schema, + // called + . + // + // More information is available at + // https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables + TableTemplateSuffix string +} + +// Uploader returns an Uploader that can be used to append rows to t. +// The returned Uploader may optionally be further configured before its Put method is called. +func (t *Table) Uploader() *Uploader { + return &Uploader{t: t} +} + +// Put uploads one or more rows to the BigQuery service. +// +// If src is ValueSaver, then its Save method is called to produce a row for uploading. +// +// If src is a struct or pointer to a struct, then a schema is inferred from it +// and used to create a StructSaver. The InsertID of the StructSaver will be +// empty. +// +// If src is a slice of ValueSavers, structs, or struct pointers, then each +// element of the slice is treated as above, and multiple rows are uploaded. +// +// Put returns a PutMultiError if one or more rows failed to be uploaded. +// The PutMultiError contains a RowInsertionError for each failed row. +// +// Put will retry on temporary errors (see +// https://cloud.google.com/bigquery/troubleshooting-errors). This can result +// in duplicate rows if you do not use insert IDs. Also, if the error persists, +// the call will run indefinitely. Pass a context with a timeout to prevent +// hanging calls. +func (u *Uploader) Put(ctx context.Context, src interface{}) error { + savers, err := valueSavers(src) + if err != nil { + return err + } + return u.putMulti(ctx, savers) +} + +func valueSavers(src interface{}) ([]ValueSaver, error) { + saver, ok, err := toValueSaver(src) + if err != nil { + return nil, err + } + if ok { + return []ValueSaver{saver}, nil + } + srcVal := reflect.ValueOf(src) + if srcVal.Kind() != reflect.Slice { + return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src) + + } + var savers []ValueSaver + for i := 0; i < srcVal.Len(); i++ { + s := srcVal.Index(i).Interface() + saver, ok, err := toValueSaver(s) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s) + } + savers = append(savers, saver) + } + return savers, nil +} + +// Make a ValueSaver from x, which must implement ValueSaver already +// or be a struct or pointer to struct. +func toValueSaver(x interface{}) (ValueSaver, bool, error) { + if _, ok := x.(StructSaver); ok { + return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver") + } + var insertID string + // Handle StructSavers specially so we can infer the schema if necessary. + if ss, ok := x.(*StructSaver); ok && ss.Schema == nil { + x = ss.Struct + insertID = ss.InsertID + // Fall through so we can infer the schema. + } + if saver, ok := x.(ValueSaver); ok { + return saver, ok, nil + } + v := reflect.ValueOf(x) + // Support Put with []interface{} + if v.Kind() == reflect.Interface { + v = v.Elem() + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, false, nil + } + schema, err := inferSchemaReflectCached(v.Type()) + if err != nil { + return nil, false, err + } + return &StructSaver{ + Struct: x, + InsertID: insertID, + Schema: schema, + }, true, nil +} + +func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error { + req, err := u.newInsertRequest(src) + if err != nil { + return err + } + if req == nil { + return nil + } + call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req) + call = call.Context(ctx) + setClientHeader(call.Header()) + var res *bq.TableDataInsertAllResponse + err = runWithRetry(ctx, func() (err error) { + res, err = call.Do() + return err + }) + if err != nil { + return err + } + return handleInsertErrors(res.InsertErrors, req.Rows) +} + +func (u *Uploader) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) { + if savers == nil { // If there are no rows, do nothing. + return nil, nil + } + req := &bq.TableDataInsertAllRequest{ + TemplateSuffix: u.TableTemplateSuffix, + IgnoreUnknownValues: u.IgnoreUnknownValues, + SkipInvalidRows: u.SkipInvalidRows, + } + for _, saver := range savers { + row, insertID, err := saver.Save() + if err != nil { + return nil, err + } + if insertID == "" { + insertID = randomIDFn() + } + m := make(map[string]bq.JsonValue) + for k, v := range row { + m[k] = bq.JsonValue(v) + } + req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{ + InsertId: insertID, + Json: m, + }) + } + return req, nil +} + +func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error { + if len(ierrs) == 0 { + return nil + } + var errs PutMultiError + for _, e := range ierrs { + if int(e.Index) > len(rows) { + return fmt.Errorf("internal error: unexpected row index: %v", e.Index) + } + rie := RowInsertionError{ + InsertID: rows[e.Index].InsertId, + RowIndex: int(e.Index), + } + for _, errp := range e.Errors { + rie.Errors = append(rie.Errors, bqToError(errp)) + } + errs = append(errs, rie) + } + return errs +} diff --git a/vendor/cloud.google.com/go/bigquery/uploader_test.go b/vendor/cloud.google.com/go/bigquery/uploader_test.go new file mode 100644 index 0000000..d0fbab2 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/uploader_test.go @@ -0,0 +1,211 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "strconv" + "testing" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + bq "google.golang.org/api/bigquery/v2" +) + +type testSaver struct { + row map[string]Value + insertID string + err error +} + +func (ts testSaver) Save() (map[string]Value, string, error) { + return ts.row, ts.insertID, ts.err +} + +func TestNewInsertRequest(t *testing.T) { + prev := randomIDFn + n := 0 + randomIDFn = func() string { n++; return strconv.Itoa(n) } + defer func() { randomIDFn = prev }() + + tests := []struct { + ul *Uploader + savers []ValueSaver + req *bq.TableDataInsertAllRequest + }{ + { + ul: &Uploader{}, + savers: nil, + req: nil, + }, + { + ul: &Uploader{}, + savers: []ValueSaver{ + testSaver{row: map[string]Value{"one": 1}}, + testSaver{row: map[string]Value{"two": 2}}, + }, + req: &bq.TableDataInsertAllRequest{ + Rows: []*bq.TableDataInsertAllRequestRows{ + {InsertId: "1", Json: map[string]bq.JsonValue{"one": 1}}, + {InsertId: "2", Json: map[string]bq.JsonValue{"two": 2}}, + }, + }, + }, + { + ul: &Uploader{ + TableTemplateSuffix: "suffix", + IgnoreUnknownValues: true, + SkipInvalidRows: true, + }, + savers: []ValueSaver{ + testSaver{insertID: "a", row: map[string]Value{"one": 1}}, + testSaver{insertID: "", row: map[string]Value{"two": 2}}, + }, + req: &bq.TableDataInsertAllRequest{ + Rows: []*bq.TableDataInsertAllRequestRows{ + {InsertId: "a", Json: map[string]bq.JsonValue{"one": 1}}, + {InsertId: "3", Json: map[string]bq.JsonValue{"two": 2}}, + }, + TemplateSuffix: "suffix", + SkipInvalidRows: true, + IgnoreUnknownValues: true, + }, + }, + } + for i, tc := range tests { + got, err := tc.ul.newInsertRequest(tc.savers) + if err != nil { + t.Fatal(err) + } + want := tc.req + if !testutil.Equal(got, want) { + t.Errorf("%d: %#v: got %#v, want %#v", i, tc.ul, got, want) + } + } +} + +func TestNewInsertRequestErrors(t *testing.T) { + var u Uploader + _, err := u.newInsertRequest([]ValueSaver{testSaver{err: errors.New("!")}}) + if err == nil { + t.Error("got nil, want error") + } +} + +func TestHandleInsertErrors(t *testing.T) { + rows := []*bq.TableDataInsertAllRequestRows{ + {InsertId: "a"}, + {InsertId: "b"}, + } + for _, test := range []struct { + in []*bq.TableDataInsertAllResponseInsertErrors + want error + }{ + { + in: nil, + want: nil, + }, + { + in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}}, + want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}}, + }, + { + in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}}, + want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}}, + }, + { + in: []*bq.TableDataInsertAllResponseInsertErrors{ + {Errors: []*bq.ErrorProto{{Message: "m0"}}, Index: 0}, + {Errors: []*bq.ErrorProto{{Message: "m1"}}, Index: 1}, + }, + want: PutMultiError{ + RowInsertionError{InsertID: "a", RowIndex: 0, Errors: []error{&Error{Message: "m0"}}}, + RowInsertionError{InsertID: "b", RowIndex: 1, Errors: []error{&Error{Message: "m1"}}}, + }, + }, + } { + got := handleInsertErrors(test.in, rows) + if !testutil.Equal(got, test.want) { + t.Errorf("%#v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want) + } + } +} + +func TestValueSavers(t *testing.T) { + ts := &testSaver{} + type T struct{ I int } + schema, err := InferSchema(T{}) + if err != nil { + t.Fatal(err) + } + for _, test := range []struct { + in interface{} + want []ValueSaver + }{ + {[]interface{}(nil), nil}, + {[]interface{}{}, nil}, + {ts, []ValueSaver{ts}}, + {T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}}, + {[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}}, + {[]interface{}{ts, ts}, []ValueSaver{ts, ts}}, + {[]T{{I: 1}, {I: 2}}, []ValueSaver{ + &StructSaver{Schema: schema, Struct: T{I: 1}}, + &StructSaver{Schema: schema, Struct: T{I: 2}}, + }}, + {[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{ + &StructSaver{Schema: schema, Struct: T{I: 1}}, + &StructSaver{Schema: schema, Struct: &T{I: 2}}, + }}, + {&StructSaver{Struct: T{I: 3}, InsertID: "foo"}, + []ValueSaver{ + &StructSaver{Schema: schema, Struct: T{I: 3}, InsertID: "foo"}, + }}, + } { + got, err := valueSavers(test.in) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, test.want, cmp.AllowUnexported(testSaver{})) { + t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want)) + } + // Make sure Save is successful. + for i, vs := range got { + _, _, err := vs.Save() + if err != nil { + t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err) + } + } + } +} + +func TestValueSaversErrors(t *testing.T) { + inputs := []interface{}{ + nil, + 1, + []int{1, 2}, + []interface{}{ + testSaver{row: map[string]Value{"one": 1}, insertID: "a"}, + 1, + }, + StructSaver{}, + } + for _, in := range inputs { + if _, err := valueSavers(in); err == nil { + t.Errorf("%#v: got nil, want error", in) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/value.go b/vendor/cloud.google.com/go/bigquery/value.go new file mode 100644 index 0000000..2c87545 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/value.go @@ -0,0 +1,718 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "encoding/base64" + "errors" + "fmt" + "reflect" + "strconv" + "time" + + "cloud.google.com/go/civil" + + bq "google.golang.org/api/bigquery/v2" +) + +// Value stores the contents of a single cell from a BigQuery result. +type Value interface{} + +// ValueLoader stores a slice of Values representing a result row from a Read operation. +// See RowIterator.Next for more information. +type ValueLoader interface { + Load(v []Value, s Schema) error +} + +// valueList converts a []Value to implement ValueLoader. +type valueList []Value + +// Load stores a sequence of values in a valueList. +// It resets the slice length to zero, then appends each value to it. +func (vs *valueList) Load(v []Value, _ Schema) error { + *vs = append((*vs)[:0], v...) + return nil +} + +// valueMap converts a map[string]Value to implement ValueLoader. +type valueMap map[string]Value + +// Load stores a sequence of values in a valueMap. +func (vm *valueMap) Load(v []Value, s Schema) error { + if *vm == nil { + *vm = map[string]Value{} + } + loadMap(*vm, v, s) + return nil +} + +func loadMap(m map[string]Value, vals []Value, s Schema) { + for i, f := range s { + val := vals[i] + var v interface{} + switch { + case f.Schema == nil: + v = val + case !f.Repeated: + m2 := map[string]Value{} + loadMap(m2, val.([]Value), f.Schema) + v = m2 + default: // repeated and nested + sval := val.([]Value) + vs := make([]Value, len(sval)) + for j, e := range sval { + m2 := map[string]Value{} + loadMap(m2, e.([]Value), f.Schema) + vs[j] = m2 + } + v = vs + } + m[f.Name] = v + } +} + +type structLoader struct { + typ reflect.Type // type of struct + err error + ops []structLoaderOp + + vstructp reflect.Value // pointer to current struct value; changed by set +} + +// A setFunc is a function that sets a struct field or slice/array +// element to a value. +type setFunc func(v reflect.Value, val interface{}) error + +// A structLoaderOp instructs the loader to set a struct field to a row value. +type structLoaderOp struct { + fieldIndex []int + valueIndex int + setFunc setFunc + repeated bool +} + +var errNoNulls = errors.New("bigquery: NULL values cannot be read into structs") + +func setAny(v reflect.Value, x interface{}) error { + if x == nil { + return errNoNulls + } + v.Set(reflect.ValueOf(x)) + return nil +} + +func setInt(v reflect.Value, x interface{}) error { + if x == nil { + return errNoNulls + } + xx := x.(int64) + if v.OverflowInt(xx) { + return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) + } + v.SetInt(xx) + return nil +} + +func setFloat(v reflect.Value, x interface{}) error { + if x == nil { + return errNoNulls + } + xx := x.(float64) + if v.OverflowFloat(xx) { + return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) + } + v.SetFloat(xx) + return nil +} + +func setBool(v reflect.Value, x interface{}) error { + if x == nil { + return errNoNulls + } + v.SetBool(x.(bool)) + return nil +} + +func setString(v reflect.Value, x interface{}) error { + if x == nil { + return errNoNulls + } + v.SetString(x.(string)) + return nil +} + +func setBytes(v reflect.Value, x interface{}) error { + if x == nil { + return errNoNulls + } + v.SetBytes(x.([]byte)) + return nil +} + +// set remembers a value for the next call to Load. The value must be +// a pointer to a struct. (This is checked in RowIterator.Next.) +func (sl *structLoader) set(structp interface{}, schema Schema) error { + if sl.err != nil { + return sl.err + } + sl.vstructp = reflect.ValueOf(structp) + typ := sl.vstructp.Type().Elem() + if sl.typ == nil { + // First call: remember the type and compile the schema. + sl.typ = typ + ops, err := compileToOps(typ, schema) + if err != nil { + sl.err = err + return err + } + sl.ops = ops + } else if sl.typ != typ { + return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ) + } + return nil +} + +// compileToOps produces a sequence of operations that will set the fields of a +// value of structType to the contents of a row with schema. +func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) { + var ops []structLoaderOp + fields, err := fieldCache.Fields(structType) + if err != nil { + return nil, err + } + for i, schemaField := range schema { + // Look for an exported struct field with the same name as the schema + // field, ignoring case (BigQuery column names are case-insensitive, + // and we want to act like encoding/json anyway). + structField := fields.Match(schemaField.Name) + if structField == nil { + // Ignore schema fields with no corresponding struct field. + continue + } + op := structLoaderOp{ + fieldIndex: structField.Index, + valueIndex: i, + } + t := structField.Type + if schemaField.Repeated { + if t.Kind() != reflect.Slice && t.Kind() != reflect.Array { + return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s", + schemaField.Name, structField.Name, t) + } + t = t.Elem() + op.repeated = true + } + if schemaField.Type == RecordFieldType { + // Field can be a struct or a pointer to a struct. + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct", + structField.Name, structField.Type) + } + nested, err := compileToOps(t, schemaField.Schema) + if err != nil { + return nil, err + } + op.setFunc = func(v reflect.Value, val interface{}) error { + return setNested(nested, v, val.([]Value)) + } + } else { + op.setFunc = determineSetFunc(t, schemaField.Type) + if op.setFunc == nil { + return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s", + schemaField.Name, schemaField.Type, structField.Name, t) + } + } + ops = append(ops, op) + } + return ops, nil +} + +// determineSetFunc chooses the best function for setting a field of type ftype +// to a value whose schema field type is stype. It returns nil if stype +// is not assignable to ftype. +// determineSetFunc considers only basic types. See compileToOps for +// handling of repetition and nesting. +func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc { + switch stype { + case StringFieldType: + if ftype.Kind() == reflect.String { + return setString + } + + case BytesFieldType: + if ftype == typeOfByteSlice { + return setBytes + } + + case IntegerFieldType: + if isSupportedIntType(ftype) { + return setInt + } + + case FloatFieldType: + switch ftype.Kind() { + case reflect.Float32, reflect.Float64: + return setFloat + } + + case BooleanFieldType: + if ftype.Kind() == reflect.Bool { + return setBool + } + + case TimestampFieldType: + if ftype == typeOfGoTime { + return setAny + } + + case DateFieldType: + if ftype == typeOfDate { + return setAny + } + + case TimeFieldType: + if ftype == typeOfTime { + return setAny + } + + case DateTimeFieldType: + if ftype == typeOfDateTime { + return setAny + } + } + return nil +} + +func (sl *structLoader) Load(values []Value, _ Schema) error { + if sl.err != nil { + return sl.err + } + return runOps(sl.ops, sl.vstructp.Elem(), values) +} + +// runOps executes a sequence of ops, setting the fields of vstruct to the +// supplied values. +func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error { + for _, op := range ops { + field := vstruct.FieldByIndex(op.fieldIndex) + var err error + if op.repeated { + err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc) + } else { + err = op.setFunc(field, values[op.valueIndex]) + } + if err != nil { + return err + } + } + return nil +} + +func setNested(ops []structLoaderOp, v reflect.Value, vals []Value) error { + // v is either a struct or a pointer to a struct. + if v.Kind() == reflect.Ptr { + // If the pointer is nil, set it to a zero struct value. + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return runOps(ops, v, vals) +} + +func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error { + vlen := len(vslice) + var flen int + switch field.Type().Kind() { + case reflect.Slice: + // Make a slice of the right size, avoiding allocation if possible. + switch { + case field.Len() < vlen: + field.Set(reflect.MakeSlice(field.Type(), vlen, vlen)) + case field.Len() > vlen: + field.SetLen(vlen) + } + flen = vlen + + case reflect.Array: + flen = field.Len() + if flen > vlen { + // Set extra elements to their zero value. + z := reflect.Zero(field.Type().Elem()) + for i := vlen; i < flen; i++ { + field.Index(i).Set(z) + } + } + default: + return fmt.Errorf("bigquery: impossible field type %s", field.Type()) + } + for i, val := range vslice { + if i < flen { // avoid writing past the end of a short array + if err := setElem(field.Index(i), val); err != nil { + return err + } + } + } + return nil +} + +// A ValueSaver returns a row of data to be inserted into a table. +type ValueSaver interface { + // Save returns a row to be inserted into a BigQuery table, represented + // as a map from field name to Value. + // If insertID is non-empty, BigQuery will use it to de-duplicate + // insertions of this row on a best-effort basis. + Save() (row map[string]Value, insertID string, err error) +} + +// ValuesSaver implements ValueSaver for a slice of Values. +type ValuesSaver struct { + Schema Schema + + // If non-empty, BigQuery will use InsertID to de-duplicate insertions + // of this row on a best-effort basis. + InsertID string + + Row []Value +} + +// Save implements ValueSaver. +func (vls *ValuesSaver) Save() (map[string]Value, string, error) { + m, err := valuesToMap(vls.Row, vls.Schema) + return m, vls.InsertID, err +} + +func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) { + if len(vs) != len(schema) { + return nil, errors.New("Schema does not match length of row to be inserted") + } + + m := make(map[string]Value) + for i, fieldSchema := range schema { + if fieldSchema.Type != RecordFieldType { + m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema) + continue + } + // Nested record, possibly repeated. + vals, ok := vs[i].([]Value) + if !ok { + return nil, errors.New("nested record is not a []Value") + } + if !fieldSchema.Repeated { + value, err := valuesToMap(vals, fieldSchema.Schema) + if err != nil { + return nil, err + } + m[fieldSchema.Name] = value + continue + } + // A repeated nested field is converted into a slice of maps. + var maps []Value + for _, v := range vals { + sv, ok := v.([]Value) + if !ok { + return nil, errors.New("nested record in slice is not a []Value") + } + value, err := valuesToMap(sv, fieldSchema.Schema) + if err != nil { + return nil, err + } + maps = append(maps, value) + } + m[fieldSchema.Name] = maps + } + return m, nil +} + +// StructSaver implements ValueSaver for a struct. +// The struct is converted to a map of values by using the values of struct +// fields corresponding to schema fields. Additional and missing +// fields are ignored, as are nested struct pointers that are nil. +type StructSaver struct { + // Schema determines what fields of the struct are uploaded. It should + // match the table's schema. + Schema Schema + + // If non-empty, BigQuery will use InsertID to de-duplicate insertions + // of this row on a best-effort basis. + InsertID string + + // Struct should be a struct or a pointer to a struct. + Struct interface{} +} + +// Save implements ValueSaver. +func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) { + vstruct := reflect.ValueOf(ss.Struct) + row, err = structToMap(vstruct, ss.Schema) + if err != nil { + return nil, "", err + } + return row, ss.InsertID, nil +} + +func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) { + if vstruct.Kind() == reflect.Ptr { + vstruct = vstruct.Elem() + } + if !vstruct.IsValid() { + return nil, nil + } + m := map[string]Value{} + if vstruct.Kind() != reflect.Struct { + return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type()) + } + fields, err := fieldCache.Fields(vstruct.Type()) + if err != nil { + return nil, err + } + for _, schemaField := range schema { + // Look for an exported struct field with the same name as the schema + // field, ignoring case. + structField := fields.Match(schemaField.Name) + if structField == nil { + continue + } + val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField) + if err != nil { + return nil, err + } + // Add the value to the map, unless it is nil. + if val != nil { + m[schemaField.Name] = val + } + } + return m, nil +} + +// structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using +// the schemaField as a guide. +// structFieldToUploadValue is careful to return a true nil interface{} when needed, so its +// caller can easily identify a nil value. +func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) { + if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) { + return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s", + schemaField.Name, vfield.Type()) + } + + // A non-nested field can be represented by its Go value, except for civil times. + if schemaField.Type != RecordFieldType { + return toUploadValueReflect(vfield, schemaField), nil + } + // A non-repeated nested field is converted into a map[string]Value. + if !schemaField.Repeated { + m, err := structToMap(vfield, schemaField.Schema) + if err != nil { + return nil, err + } + if m == nil { + return nil, nil + } + return m, nil + } + // A repeated nested field is converted into a slice of maps. + if vfield.Len() == 0 { + return nil, nil + } + var vals []Value + for i := 0; i < vfield.Len(); i++ { + m, err := structToMap(vfield.Index(i), schemaField.Schema) + if err != nil { + return nil, err + } + vals = append(vals, m) + } + return vals, nil +} + +func toUploadValue(val interface{}, fs *FieldSchema) interface{} { + if fs.Type == TimeFieldType || fs.Type == DateTimeFieldType { + return toUploadValueReflect(reflect.ValueOf(val), fs) + } + return val +} + +func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} { + switch fs.Type { + case TimeFieldType: + return civilToUploadValue(v, fs, func(v reflect.Value) string { + return CivilTimeString(v.Interface().(civil.Time)) + }) + case DateTimeFieldType: + return civilToUploadValue(v, fs, func(v reflect.Value) string { + return CivilDateTimeString(v.Interface().(civil.DateTime)) + }) + default: + if !fs.Repeated || v.Len() > 0 { + return v.Interface() + } + // The service treats a null repeated field as an error. Return + // nil to omit the field entirely. + return nil + } +} + +func civilToUploadValue(v reflect.Value, fs *FieldSchema, cvt func(reflect.Value) string) interface{} { + if !fs.Repeated { + return cvt(v) + } + if v.Len() == 0 { + return nil + } + s := make([]string, v.Len()) + for i := 0; i < v.Len(); i++ { + s[i] = cvt(v.Index(i)) + } + return s +} + +// CivilTimeString returns a string representing a civil.Time in a format compatible +// with BigQuery SQL. It rounds the time to the nearest microsecond and returns a +// string with six digits of sub-second precision. +// +// Use CivilTimeString when using civil.Time in DML, for example in INSERT +// statements. +func CivilTimeString(t civil.Time) string { + if t.Nanosecond == 0 { + return t.String() + } else { + micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond + t.Nanosecond = 0 + return t.String() + fmt.Sprintf(".%06d", micro) + } +} + +// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible +// with BigQuery SQL. It separate the date and time with a space, and formats the time +// with CivilTimeString. +// +// Use CivilDateTimeString when using civil.DateTime in DML, for example in INSERT +// statements. +func CivilDateTimeString(dt civil.DateTime) string { + return dt.Date.String() + " " + CivilTimeString(dt.Time) +} + +// convertRows converts a series of TableRows into a series of Value slices. +// schema is used to interpret the data from rows; its length must match the +// length of each row. +func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) { + var rs [][]Value + for _, r := range rows { + row, err := convertRow(r, schema) + if err != nil { + return nil, err + } + rs = append(rs, row) + } + return rs, nil +} + +func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) { + if len(schema) != len(r.F) { + return nil, errors.New("schema length does not match row length") + } + var values []Value + for i, cell := range r.F { + fs := schema[i] + v, err := convertValue(cell.V, fs.Type, fs.Schema) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil +} + +func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) { + switch val := val.(type) { + case nil: + return nil, nil + case []interface{}: + return convertRepeatedRecord(val, typ, schema) + case map[string]interface{}: + return convertNestedRecord(val, schema) + case string: + return convertBasicType(val, typ) + default: + return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ) + } +} + +func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) { + var values []Value + for _, cell := range vals { + // each cell contains a single entry, keyed by "v" + val := cell.(map[string]interface{})["v"] + v, err := convertValue(val, typ, schema) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil +} + +func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) { + // convertNestedRecord is similar to convertRow, as a record has the same structure as a row. + + // Nested records are wrapped in a map with a single key, "f". + record := val["f"].([]interface{}) + if len(record) != len(schema) { + return nil, errors.New("schema length does not match record length") + } + + var values []Value + for i, cell := range record { + // each cell contains a single entry, keyed by "v" + val := cell.(map[string]interface{})["v"] + fs := schema[i] + v, err := convertValue(val, fs.Type, fs.Schema) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil +} + +// convertBasicType returns val as an interface with a concrete type specified by typ. +func convertBasicType(val string, typ FieldType) (Value, error) { + switch typ { + case StringFieldType: + return val, nil + case BytesFieldType: + return base64.StdEncoding.DecodeString(val) + case IntegerFieldType: + return strconv.ParseInt(val, 10, 64) + case FloatFieldType: + return strconv.ParseFloat(val, 64) + case BooleanFieldType: + return strconv.ParseBool(val) + case TimestampFieldType: + f, err := strconv.ParseFloat(val, 64) + return Value(time.Unix(0, int64(f*1e9)).UTC()), err + case DateFieldType: + return civil.ParseDate(val) + case TimeFieldType: + return civil.ParseTime(val) + case DateTimeFieldType: + return civil.ParseDateTime(val) + default: + return nil, fmt.Errorf("unrecognized type: %s", typ) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/value_test.go b/vendor/cloud.google.com/go/bigquery/value_test.go new file mode 100644 index 0000000..8aab758 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/value_test.go @@ -0,0 +1,1065 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "encoding/base64" + "fmt" + "math" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + + bq "google.golang.org/api/bigquery/v2" +) + +func TestConvertBasicValues(t *testing.T) { + schema := []*FieldSchema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + {Type: FloatFieldType}, + {Type: BooleanFieldType}, + {Type: BytesFieldType}, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + {V: "a"}, + {V: "1"}, + {V: "1.2"}, + {V: "true"}, + {V: base64.StdEncoding.EncodeToString([]byte("foo"))}, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{"a", int64(1), 1.2, true, []byte("foo")} + if !testutil.Equal(got, want) { + t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestConvertTime(t *testing.T) { + schema := []*FieldSchema{ + {Type: TimestampFieldType}, + {Type: DateFieldType}, + {Type: TimeFieldType}, + {Type: DateTimeFieldType}, + } + ts := testTimestamp.Round(time.Millisecond) + row := &bq.TableRow{ + F: []*bq.TableCell{ + {V: fmt.Sprintf("%.10f", float64(ts.UnixNano())/1e9)}, + {V: testDate.String()}, + {V: testTime.String()}, + {V: testDateTime.String()}, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{ts, testDate, testTime, testDateTime} + for i, g := range got { + w := want[i] + if !testutil.Equal(g, w) { + t.Errorf("#%d: got:\n%v\nwant:\n%v", i, g, w) + } + } + if got[0].(time.Time).Location() != time.UTC { + t.Errorf("expected time zone UTC: got:\n%v", got) + } +} + +func TestConvertNullValues(t *testing.T) { + schema := []*FieldSchema{ + {Type: StringFieldType}, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + {V: nil}, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{nil} + if !testutil.Equal(got, want) { + t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestBasicRepetition(t *testing.T) { + schema := []*FieldSchema{ + {Type: IntegerFieldType, Repeated: true}, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + { + V: []interface{}{ + map[string]interface{}{ + "v": "1", + }, + map[string]interface{}{ + "v": "2", + }, + map[string]interface{}{ + "v": "3", + }, + }, + }, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{[]Value{int64(1), int64(2), int64(3)}} + if !testutil.Equal(got, want) { + t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestNestedRecordContainingRepetition(t *testing.T) { + schema := []*FieldSchema{ + { + Type: RecordFieldType, + Schema: Schema{ + {Type: IntegerFieldType, Repeated: true}, + }, + }, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + { + V: map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": []interface{}{ + map[string]interface{}{"v": "1"}, + map[string]interface{}{"v": "2"}, + map[string]interface{}{"v": "3"}, + }, + }, + }, + }, + }, + }, + } + + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{[]Value{[]Value{int64(1), int64(2), int64(3)}}} + if !testutil.Equal(got, want) { + t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestRepeatedRecordContainingRepetition(t *testing.T) { + schema := []*FieldSchema{ + { + Type: RecordFieldType, + Repeated: true, + Schema: Schema{ + {Type: IntegerFieldType, Repeated: true}, + }, + }, + } + row := &bq.TableRow{F: []*bq.TableCell{ + { + V: []interface{}{ // repeated records. + map[string]interface{}{ // first record. + "v": map[string]interface{}{ // pointless single-key-map wrapper. + "f": []interface{}{ // list of record fields. + map[string]interface{}{ // only record (repeated ints) + "v": []interface{}{ // pointless wrapper. + map[string]interface{}{ + "v": "1", + }, + map[string]interface{}{ + "v": "2", + }, + map[string]interface{}{ + "v": "3", + }, + }, + }, + }, + }, + }, + map[string]interface{}{ // second record. + "v": map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": []interface{}{ + map[string]interface{}{ + "v": "4", + }, + map[string]interface{}{ + "v": "5", + }, + map[string]interface{}{ + "v": "6", + }, + }, + }, + }, + }, + }, + }, + }, + }} + + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{ // the row is a list of length 1, containing an entry for the repeated record. + []Value{ // the repeated record is a list of length 2, containing an entry for each repetition. + []Value{ // the record is a list of length 1, containing an entry for the repeated integer field. + []Value{int64(1), int64(2), int64(3)}, // the repeated integer field is a list of length 3. + }, + []Value{ // second record + []Value{int64(4), int64(5), int64(6)}, + }, + }, + } + if !testutil.Equal(got, want) { + t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestRepeatedRecordContainingRecord(t *testing.T) { + schema := []*FieldSchema{ + { + Type: RecordFieldType, + Repeated: true, + Schema: Schema{ + { + Type: StringFieldType, + }, + { + Type: RecordFieldType, + Schema: Schema{ + {Type: IntegerFieldType}, + {Type: StringFieldType}, + }, + }, + }, + }, + } + row := &bq.TableRow{F: []*bq.TableCell{ + { + V: []interface{}{ // repeated records. + map[string]interface{}{ // first record. + "v": map[string]interface{}{ // pointless single-key-map wrapper. + "f": []interface{}{ // list of record fields. + map[string]interface{}{ // first record field (name) + "v": "first repeated record", + }, + map[string]interface{}{ // second record field (nested record). + "v": map[string]interface{}{ // pointless single-key-map wrapper. + "f": []interface{}{ // nested record fields + map[string]interface{}{ + "v": "1", + }, + map[string]interface{}{ + "v": "two", + }, + }, + }, + }, + }, + }, + }, + map[string]interface{}{ // second record. + "v": map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": "second repeated record", + }, + map[string]interface{}{ + "v": map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": "3", + }, + map[string]interface{}{ + "v": "four", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }} + + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + // TODO: test with flattenresults. + want := []Value{ // the row is a list of length 1, containing an entry for the repeated record. + []Value{ // the repeated record is a list of length 2, containing an entry for each repetition. + []Value{ // record contains a string followed by a nested record. + "first repeated record", + []Value{ + int64(1), + "two", + }, + }, + []Value{ // second record. + "second repeated record", + []Value{ + int64(3), + "four", + }, + }, + }, + } + if !testutil.Equal(got, want) { + t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want) + } +} + +func TestConvertRowErrors(t *testing.T) { + // mismatched lengths + if _, err := convertRow(&bq.TableRow{F: []*bq.TableCell{{V: ""}}}, Schema{}); err == nil { + t.Error("got nil, want error") + } + v3 := map[string]interface{}{"v": 3} + for _, test := range []struct { + value interface{} + fs FieldSchema + }{ + {3, FieldSchema{Type: IntegerFieldType}}, // not a string + {[]interface{}{v3}, // not a string, repeated + FieldSchema{Type: IntegerFieldType, Repeated: true}}, + {map[string]interface{}{"f": []interface{}{v3}}, // not a string, nested + FieldSchema{Type: RecordFieldType, Schema: Schema{{Type: IntegerFieldType}}}}, + {map[string]interface{}{"f": []interface{}{v3}}, // wrong length, nested + FieldSchema{Type: RecordFieldType, Schema: Schema{}}}, + } { + _, err := convertRow( + &bq.TableRow{F: []*bq.TableCell{{V: test.value}}}, + Schema{&test.fs}) + if err == nil { + t.Errorf("value %v, fs %v: got nil, want error", test.value, test.fs) + } + } + + // bad field type + if _, err := convertBasicType("", FieldType("BAD")); err == nil { + t.Error("got nil, want error") + } +} + +func TestValuesSaverConvertsToMap(t *testing.T) { + testCases := []struct { + vs ValuesSaver + wantInsertID string + wantRow map[string]Value + }{ + { + vs: ValuesSaver{ + Schema: []*FieldSchema{ + {Name: "intField", Type: IntegerFieldType}, + {Name: "strField", Type: StringFieldType}, + {Name: "dtField", Type: DateTimeFieldType}, + }, + InsertID: "iid", + Row: []Value{1, "a", + civil.DateTime{civil.Date{1, 2, 3}, civil.Time{4, 5, 6, 7000}}}, + }, + wantInsertID: "iid", + wantRow: map[string]Value{"intField": 1, "strField": "a", + "dtField": "0001-02-03 04:05:06.000007"}, + }, + { + vs: ValuesSaver{ + Schema: []*FieldSchema{ + {Name: "intField", Type: IntegerFieldType}, + { + Name: "recordField", + Type: RecordFieldType, + Schema: []*FieldSchema{ + {Name: "nestedInt", Type: IntegerFieldType, Repeated: true}, + }, + }, + }, + InsertID: "iid", + Row: []Value{1, []Value{[]Value{2, 3}}}, + }, + wantInsertID: "iid", + wantRow: map[string]Value{ + "intField": 1, + "recordField": map[string]Value{ + "nestedInt": []Value{2, 3}, + }, + }, + }, + { // repeated nested field + vs: ValuesSaver{ + Schema: Schema{ + { + Name: "records", + Type: RecordFieldType, + Schema: Schema{ + {Name: "x", Type: IntegerFieldType}, + {Name: "y", Type: IntegerFieldType}, + }, + Repeated: true, + }, + }, + InsertID: "iid", + Row: []Value{ // a row is a []Value + []Value{ // repeated field's value is a []Value + []Value{1, 2}, // first record of the repeated field + []Value{3, 4}, // second record + }, + }, + }, + wantInsertID: "iid", + wantRow: map[string]Value{ + "records": []Value{ + map[string]Value{"x": 1, "y": 2}, + map[string]Value{"x": 3, "y": 4}, + }, + }, + }, + } + for _, tc := range testCases { + gotRow, gotInsertID, err := tc.vs.Save() + if err != nil { + t.Errorf("Expected successful save; got: %v", err) + continue + } + if !testutil.Equal(gotRow, tc.wantRow) { + t.Errorf("%v row:\ngot:\n%+v\nwant:\n%+v", tc.vs, gotRow, tc.wantRow) + } + if !testutil.Equal(gotInsertID, tc.wantInsertID) { + t.Errorf("%v ID:\ngot:\n%+v\nwant:\n%+v", tc.vs, gotInsertID, tc.wantInsertID) + } + } +} + +func TestValuesToMapErrors(t *testing.T) { + for _, test := range []struct { + values []Value + schema Schema + }{ + { // mismatched length + []Value{1}, + Schema{}, + }, + { // nested record not a slice + []Value{1}, + Schema{{Type: RecordFieldType}}, + }, + { // nested record mismatched length + []Value{[]Value{1}}, + Schema{{Type: RecordFieldType}}, + }, + { // nested repeated record not a slice + []Value{[]Value{1}}, + Schema{{Type: RecordFieldType, Repeated: true}}, + }, + { // nested repeated record mismatched length + []Value{[]Value{[]Value{1}}}, + Schema{{Type: RecordFieldType, Repeated: true}}, + }, + } { + _, err := valuesToMap(test.values, test.schema) + if err == nil { + t.Errorf("%v, %v: got nil, want error", test.values, test.schema) + } + } +} + +func TestStructSaver(t *testing.T) { + schema := Schema{ + {Name: "s", Type: StringFieldType}, + {Name: "r", Type: IntegerFieldType, Repeated: true}, + {Name: "t", Type: TimeFieldType}, + {Name: "tr", Type: TimeFieldType, Repeated: true}, + {Name: "nested", Type: RecordFieldType, Schema: Schema{ + {Name: "b", Type: BooleanFieldType}, + }}, + {Name: "rnested", Type: RecordFieldType, Repeated: true, Schema: Schema{ + {Name: "b", Type: BooleanFieldType}, + }}, + } + + type ( + N struct{ B bool } + T struct { + S string + R []int + T civil.Time + TR []civil.Time + Nested *N + Rnested []*N + } + ) + + check := func(msg string, in interface{}, want map[string]Value) { + ss := StructSaver{ + Schema: schema, + InsertID: "iid", + Struct: in, + } + got, gotIID, err := ss.Save() + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + if wantIID := "iid"; gotIID != wantIID { + t.Errorf("%s: InsertID: got %q, want %q", msg, gotIID, wantIID) + } + if !testutil.Equal(got, want) { + t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want) + } + } + ct1 := civil.Time{1, 2, 3, 4000} + ct2 := civil.Time{5, 6, 7, 8000} + in := T{ + S: "x", + R: []int{1, 2}, + T: ct1, + TR: []civil.Time{ct1, ct2}, + Nested: &N{B: true}, + Rnested: []*N{{true}, {false}}, + } + want := map[string]Value{ + "s": "x", + "r": []int{1, 2}, + "t": "01:02:03.000004", + "tr": []string{"01:02:03.000004", "05:06:07.000008"}, + "nested": map[string]Value{"b": true}, + "rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}}, + } + check("all values", in, want) + check("all values, ptr", &in, want) + check("empty struct", T{}, map[string]Value{"s": "", "t": "00:00:00"}) + + // Missing and extra fields ignored. + type T2 struct { + S string + // missing R, Nested, RNested + Extra int + } + check("missing and extra", T2{S: "x"}, map[string]Value{"s": "x"}) + + check("nils in slice", T{Rnested: []*N{{true}, nil, {false}}}, + map[string]Value{ + "s": "", + "t": "00:00:00", + "rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}}, + }) +} + +func TestStructSaverErrors(t *testing.T) { + type ( + badField struct { + I int `bigquery:"@"` + } + badR struct{ R int } + badRN struct{ R []int } + ) + + for i, test := range []struct { + struct_ interface{} + schema Schema + }{ + {0, nil}, // not a struct + {&badField{}, nil}, // bad field name + {&badR{}, Schema{{Name: "r", Repeated: true}}}, // repeated field has bad type + {&badR{}, Schema{{Name: "r", Type: RecordFieldType}}}, // nested field has bad type + {&badRN{[]int{0}}, // nested repeated field has bad type + Schema{{Name: "r", Type: RecordFieldType, Repeated: true}}}, + } { + ss := &StructSaver{Struct: test.struct_, Schema: test.schema} + _, _, err := ss.Save() + if err == nil { + t.Errorf("#%d, %v, %v: got nil, want error", i, test.struct_, test.schema) + } + } +} + +func TestConvertRows(t *testing.T) { + schema := []*FieldSchema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + {Type: FloatFieldType}, + {Type: BooleanFieldType}, + } + rows := []*bq.TableRow{ + {F: []*bq.TableCell{ + {V: "a"}, + {V: "1"}, + {V: "1.2"}, + {V: "true"}, + }}, + {F: []*bq.TableCell{ + {V: "b"}, + {V: "2"}, + {V: "2.2"}, + {V: "false"}, + }}, + } + want := [][]Value{ + {"a", int64(1), 1.2, true}, + {"b", int64(2), 2.2, false}, + } + got, err := convertRows(rows, schema) + if err != nil { + t.Fatalf("got %v, want nil", err) + } + if !testutil.Equal(got, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } + + rows[0].F[0].V = 1 + _, err = convertRows(rows, schema) + if err == nil { + t.Error("got nil, want error") + } +} + +func TestValueList(t *testing.T) { + schema := Schema{ + {Name: "s", Type: StringFieldType}, + {Name: "i", Type: IntegerFieldType}, + {Name: "f", Type: FloatFieldType}, + {Name: "b", Type: BooleanFieldType}, + } + want := []Value{"x", 7, 3.14, true} + var got []Value + vl := (*valueList)(&got) + if err := vl.Load(want, schema); err != nil { + t.Fatal(err) + } + + if !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Load truncates, not appends. + // https://github.com/GoogleCloudPlatform/google-cloud-go/issues/437 + if err := vl.Load(want, schema); err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestValueMap(t *testing.T) { + ns := Schema{ + {Name: "x", Type: IntegerFieldType}, + {Name: "y", Type: IntegerFieldType}, + } + schema := Schema{ + {Name: "s", Type: StringFieldType}, + {Name: "i", Type: IntegerFieldType}, + {Name: "f", Type: FloatFieldType}, + {Name: "b", Type: BooleanFieldType}, + {Name: "n", Type: RecordFieldType, Schema: ns}, + {Name: "rn", Type: RecordFieldType, Schema: ns, Repeated: true}, + } + in := []Value{"x", 7, 3.14, true, + []Value{1, 2}, + []Value{[]Value{3, 4}, []Value{5, 6}}, + } + var vm valueMap + if err := vm.Load(in, schema); err != nil { + t.Fatal(err) + } + want := map[string]Value{ + "s": "x", + "i": 7, + "f": 3.14, + "b": true, + "n": map[string]Value{"x": 1, "y": 2}, + "rn": []Value{ + map[string]Value{"x": 3, "y": 4}, + map[string]Value{"x": 5, "y": 6}, + }, + } + if !testutil.Equal(vm, valueMap(want)) { + t.Errorf("got\n%+v\nwant\n%+v", vm, want) + } + +} + +var ( + // For testing StructLoader + schema2 = Schema{ + {Name: "s", Type: StringFieldType}, + {Name: "s2", Type: StringFieldType}, + {Name: "by", Type: BytesFieldType}, + {Name: "I", Type: IntegerFieldType}, + {Name: "F", Type: FloatFieldType}, + {Name: "B", Type: BooleanFieldType}, + {Name: "TS", Type: TimestampFieldType}, + {Name: "D", Type: DateFieldType}, + {Name: "T", Type: TimeFieldType}, + {Name: "DT", Type: DateTimeFieldType}, + {Name: "nested", Type: RecordFieldType, Schema: Schema{ + {Name: "nestS", Type: StringFieldType}, + {Name: "nestI", Type: IntegerFieldType}, + }}, + {Name: "t", Type: StringFieldType}, + } + + testTimestamp = time.Date(2016, 11, 5, 7, 50, 22, 8, time.UTC) + testDate = civil.Date{2016, 11, 5} + testTime = civil.Time{7, 50, 22, 8} + testDateTime = civil.DateTime{testDate, testTime} + + testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), 3.14, true, + testTimestamp, testDate, testTime, testDateTime, + []Value{"nested", int64(17)}, "z"} +) + +type testStruct1 struct { + B bool + I int + times + S string + S2 String + By []byte + s string + F float64 + Nested nested + Tagged string `bigquery:"t"` +} + +type String string + +type nested struct { + NestS string + NestI int +} + +type times struct { + TS time.Time + T civil.Time + D civil.Date + DT civil.DateTime +} + +func TestStructLoader(t *testing.T) { + var ts1 testStruct1 + if err := load(&ts1, schema2, testValues); err != nil { + t.Fatal(err) + } + // Note: the schema field named "s" gets matched to the exported struct + // field "S", not the unexported "s". + want := &testStruct1{ + B: true, + I: 7, + F: 3.14, + times: times{TS: testTimestamp, T: testTime, D: testDate, DT: testDateTime}, + S: "x", + S2: "y", + By: []byte{1, 2, 3}, + Nested: nested{NestS: "nested", NestI: 17}, + Tagged: "z", + } + if !testutil.Equal(&ts1, want, cmp.AllowUnexported(testStruct1{})) { + t.Errorf("got %+v, want %+v", pretty.Value(ts1), pretty.Value(*want)) + d, _, err := pretty.Diff(*want, ts1) + if err == nil { + t.Logf("diff:\n%s", d) + } + } + + // Test pointers to nested structs. + type nestedPtr struct{ Nested *nested } + var np nestedPtr + if err := load(&np, schema2, testValues); err != nil { + t.Fatal(err) + } + want2 := &nestedPtr{Nested: &nested{NestS: "nested", NestI: 17}} + if !testutil.Equal(&np, want2) { + t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2)) + } + + // Existing values should be reused. + nst := &nested{NestS: "x", NestI: -10} + np = nestedPtr{Nested: nst} + if err := load(&np, schema2, testValues); err != nil { + t.Fatal(err) + } + if !testutil.Equal(&np, want2) { + t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2)) + } + if np.Nested != nst { + t.Error("nested struct pointers not equal") + } +} + +type repStruct struct { + Nums []int + ShortNums [2]int // to test truncation + LongNums [5]int // to test padding with zeroes + Nested []*nested +} + +var ( + repSchema = Schema{ + {Name: "nums", Type: IntegerFieldType, Repeated: true}, + {Name: "shortNums", Type: IntegerFieldType, Repeated: true}, + {Name: "longNums", Type: IntegerFieldType, Repeated: true}, + {Name: "nested", Type: RecordFieldType, Repeated: true, Schema: Schema{ + {Name: "nestS", Type: StringFieldType}, + {Name: "nestI", Type: IntegerFieldType}, + }}, + } + v123 = []Value{int64(1), int64(2), int64(3)} + repValues = []Value{v123, v123, v123, + []Value{ + []Value{"x", int64(1)}, + []Value{"y", int64(2)}, + }, + } +) + +func TestStructLoaderRepeated(t *testing.T) { + var r1 repStruct + if err := load(&r1, repSchema, repValues); err != nil { + t.Fatal(err) + } + want := repStruct{ + Nums: []int{1, 2, 3}, + ShortNums: [...]int{1, 2}, // extra values discarded + LongNums: [...]int{1, 2, 3, 0, 0}, + Nested: []*nested{{"x", 1}, {"y", 2}}, + } + if !testutil.Equal(r1, want) { + t.Errorf("got %+v, want %+v", pretty.Value(r1), pretty.Value(want)) + } + + r2 := repStruct{ + Nums: []int{-1, -2, -3, -4, -5}, // truncated to zero and appended to + LongNums: [...]int{-1, -2, -3, -4, -5}, // unset elements are zeroed + } + if err := load(&r2, repSchema, repValues); err != nil { + t.Fatal(err) + } + if !testutil.Equal(r2, want) { + t.Errorf("got %+v, want %+v", pretty.Value(r2), pretty.Value(want)) + } + if got, want := cap(r2.Nums), 5; got != want { + t.Errorf("cap(r2.Nums) = %d, want %d", got, want) + } + + // Short slice case. + r3 := repStruct{Nums: []int{-1}} + if err := load(&r3, repSchema, repValues); err != nil { + t.Fatal(err) + } + if !testutil.Equal(r3, want) { + t.Errorf("got %+v, want %+v", pretty.Value(r3), pretty.Value(want)) + } + if got, want := cap(r3.Nums), 3; got != want { + t.Errorf("cap(r3.Nums) = %d, want %d", got, want) + } + +} + +func TestStructLoaderOverflow(t *testing.T) { + type S struct { + I int16 + F float32 + } + schema := Schema{ + {Name: "I", Type: IntegerFieldType}, + {Name: "F", Type: FloatFieldType}, + } + var s S + if err := load(&s, schema, []Value{int64(math.MaxInt16 + 1), 0}); err == nil { + t.Error("int: got nil, want error") + } + if err := load(&s, schema, []Value{int64(0), math.MaxFloat32 * 2}); err == nil { + t.Error("float: got nil, want error") + } +} + +func TestStructLoaderFieldOverlap(t *testing.T) { + // It's OK if the struct has fields that the schema does not, and vice versa. + type S1 struct { + I int + X [][]int // not in the schema; does not even correspond to a valid BigQuery type + // many schema fields missing + } + var s1 S1 + if err := load(&s1, schema2, testValues); err != nil { + t.Fatal(err) + } + want1 := S1{I: 7} + if !testutil.Equal(s1, want1) { + t.Errorf("got %+v, want %+v", pretty.Value(s1), pretty.Value(want1)) + } + + // It's even valid to have no overlapping fields at all. + type S2 struct{ Z int } + + var s2 S2 + if err := load(&s2, schema2, testValues); err != nil { + t.Fatal(err) + } + want2 := S2{} + if !testutil.Equal(s2, want2) { + t.Errorf("got %+v, want %+v", pretty.Value(s2), pretty.Value(want2)) + } +} + +func TestStructLoaderErrors(t *testing.T) { + check := func(sp interface{}) { + var sl structLoader + err := sl.set(sp, schema2) + if err == nil { + t.Errorf("%T: got nil, want error", sp) + } + } + + type bad1 struct{ F int32 } // wrong type for FLOAT column + check(&bad1{}) + + type bad2 struct{ I uint } // unsupported integer type + check(&bad2{}) + + type bad3 struct { + I int `bigquery:"@"` + } // bad field name + check(&bad3{}) + + type bad4 struct{ Nested int } // non-struct for nested field + check(&bad4{}) + + type bad5 struct{ Nested struct{ NestS int } } // bad nested struct + check(&bad5{}) + + bad6 := &struct{ Nums int }{} // non-slice for repeated field + sl := structLoader{} + err := sl.set(bad6, repSchema) + if err == nil { + t.Errorf("%T: got nil, want error", bad6) + } + + // sl.set's error is sticky, with even good input. + err2 := sl.set(&repStruct{}, repSchema) + if err2 != err { + t.Errorf("%v != %v, expected equal", err2, err) + } + // sl.Load is similarly sticky + err2 = sl.Load(nil, nil) + if err2 != err { + t.Errorf("%v != %v, expected equal", err2, err) + } + + // Null values. + schema := Schema{ + {Name: "i", Type: IntegerFieldType}, + {Name: "f", Type: FloatFieldType}, + {Name: "b", Type: BooleanFieldType}, + {Name: "s", Type: StringFieldType}, + {Name: "by", Type: BytesFieldType}, + {Name: "d", Type: DateFieldType}, + } + type s struct { + I int + F float64 + B bool + S string + By []byte + D civil.Date + } + vals := []Value{int64(0), 0.0, false, "", []byte{}, testDate} + if err := load(&s{}, schema, vals); err != nil { + t.Fatal(err) + } + for i, e := range vals { + vals[i] = nil + got := load(&s{}, schema, vals) + if got != errNoNulls { + t.Errorf("#%d: got %v, want %v", i, got, errNoNulls) + } + vals[i] = e + } + + // Using more than one struct type with the same structLoader. + type different struct { + B bool + I int + times + S string + s string + Nums []int + } + + sl = structLoader{} + if err := sl.set(&testStruct1{}, schema2); err != nil { + t.Fatal(err) + } + err = sl.set(&different{}, schema2) + if err == nil { + t.Error("different struct types: got nil, want error") + } +} + +func load(pval interface{}, schema Schema, vals []Value) error { + var sl structLoader + if err := sl.set(pval, schema); err != nil { + return err + } + return sl.Load(vals, nil) +} + +func BenchmarkStructLoader_NoCompile(b *testing.B) { + benchmarkStructLoader(b, false) +} + +func BenchmarkStructLoader_Compile(b *testing.B) { + benchmarkStructLoader(b, true) +} + +func benchmarkStructLoader(b *testing.B, compile bool) { + var ts1 testStruct1 + for i := 0; i < b.N; i++ { + var sl structLoader + for j := 0; j < 10; j++ { + if err := load(&ts1, schema2, testValues); err != nil { + b.Fatal(err) + } + if !compile { + sl.typ = nil + } + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/admin.go b/vendor/cloud.google.com/go/bigtable/admin.go new file mode 100644 index 0000000..cfc9106 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/admin.go @@ -0,0 +1,423 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "regexp" + "strings" + + btopt "cloud.google.com/go/bigtable/internal/option" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + "golang.org/x/net/context" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/grpc/codes" +) + +const adminAddr = "bigtableadmin.googleapis.com:443" + +// AdminClient is a client type for performing admin operations within a specific instance. +type AdminClient struct { + conn *grpc.ClientConn + tClient btapb.BigtableTableAdminClient + + project, instance string + + // Metadata to be sent with each request. + md metadata.MD +} + +// NewAdminClient creates a new AdminClient for a given project and instance. +func NewAdminClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*AdminClient, error) { + o, err := btopt.DefaultClientOptions(adminAddr, AdminScope, clientUserAgent) + if err != nil { + return nil, err + } + o = append(o, opts...) + conn, err := gtransport.Dial(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &AdminClient{ + conn: conn, + tClient: btapb.NewBigtableTableAdminClient(conn), + project: project, + instance: instance, + md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)), + }, nil +} + +// Close closes the AdminClient. +func (ac *AdminClient) Close() error { + return ac.conn.Close() +} + +func (ac *AdminClient) instancePrefix() string { + return fmt.Sprintf("projects/%s/instances/%s", ac.project, ac.instance) +} + +// Tables returns a list of the tables in the instance. +func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ListTablesRequest{ + Parent: prefix, + } + res, err := ac.tClient.ListTables(ctx, req) + if err != nil { + return nil, err + } + names := make([]string, 0, len(res.Tables)) + for _, tbl := range res.Tables { + names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/")) + } + return names, nil +} + +// TableConf contains all of the information necessary to create a table with column families. +type TableConf struct { + TableID string + SplitKeys []string + // Families is a map from family name to GCPolicy + Families map[string]GCPolicy +} + +// CreateTable creates a new table in the instance. +// This method may return before the table's creation is complete. +func (ac *AdminClient) CreateTable(ctx context.Context, table string) error { + return ac.CreateTableFromConf(ctx, &TableConf{TableID: table}) +} + +// CreatePresplitTable creates a new table in the instance. +// The list of row keys will be used to initially split the table into multiple tablets. +// Given two split keys, "s1" and "s2", three tablets will be created, +// spanning the key ranges: [, s1), [s1, s2), [s2, ). +// This method may return before the table's creation is complete. +func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, splitKeys []string) error { + return ac.CreateTableFromConf(ctx, &TableConf{TableID: table, SplitKeys: splitKeys}) +} + +// CreateTableFromConf creates a new table in the instance from the given configuration. +func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + var req_splits []*btapb.CreateTableRequest_Split + for _, split := range conf.SplitKeys { + req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)}) + } + var tbl btapb.Table + if conf.Families != nil { + tbl.ColumnFamilies = make(map[string]*btapb.ColumnFamily) + for fam, policy := range conf.Families { + tbl.ColumnFamilies[fam] = &btapb.ColumnFamily{policy.proto()} + } + } + prefix := ac.instancePrefix() + req := &btapb.CreateTableRequest{ + Parent: prefix, + TableId: conf.TableID, + Table: &tbl, + InitialSplits: req_splits, + } + _, err := ac.tClient.CreateTable(ctx, req) + return err +} + +// CreateColumnFamily creates a new column family in a table. +func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error { + // TODO(dsymonds): Permit specifying gcexpr and any other family settings. + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + table, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: family, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + }}, + } + _, err := ac.tClient.ModifyColumnFamilies(ctx, req) + return err +} + +// DeleteTable deletes a table and all of its data. +func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.DeleteTableRequest{ + Name: prefix + "/tables/" + table, + } + _, err := ac.tClient.DeleteTable(ctx, req) + return err +} + +// DeleteColumnFamily deletes a column family in a table and all of its data. +func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + table, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: family, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{true}, + }}, + } + _, err := ac.tClient.ModifyColumnFamilies(ctx, req) + return err +} + +// TableInfo represents information about a table. +type TableInfo struct { + // DEPRECATED - This field is deprecated. Please use FamilyInfos instead. + Families []string + FamilyInfos []FamilyInfo +} + +// FamilyInfo represents information about a column family. +type FamilyInfo struct { + Name string + GCPolicy string +} + +// TableInfo retrieves information about a table. +func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.GetTableRequest{ + Name: prefix + "/tables/" + table, + } + res, err := ac.tClient.GetTable(ctx, req) + if err != nil { + return nil, err + } + ti := &TableInfo{} + for name, fam := range res.ColumnFamilies { + ti.Families = append(ti.Families, name) + ti.FamilyInfos = append(ti.FamilyInfos, FamilyInfo{Name: name, GCPolicy: GCRuleToString(fam.GcRule)}) + } + return ti, nil +} + +// SetGCPolicy specifies which cells in a column family should be garbage collected. +// GC executes opportunistically in the background; table reads may return data +// matching the GC policy. +func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + table, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: family, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{GcRule: policy.proto()}}, + }}, + } + _, err := ac.tClient.ModifyColumnFamilies(ctx, req) + return err +} + +// DropRowRange permanently deletes a row range from the specified table. +func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix string) error { + ctx = mergeOutgoingMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.DropRowRangeRequest{ + Name: prefix + "/tables/" + table, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte(rowKeyPrefix)}, + } + _, err := ac.tClient.DropRowRange(ctx, req) + return err +} + +const instanceAdminAddr = "bigtableadmin.googleapis.com:443" + +// InstanceAdminClient is a client type for performing admin operations on instances. +// These operations can be substantially more dangerous than those provided by AdminClient. +type InstanceAdminClient struct { + conn *grpc.ClientConn + iClient btapb.BigtableInstanceAdminClient + lroClient *lroauto.OperationsClient + + project string + + // Metadata to be sent with each request. + md metadata.MD +} + +// NewInstanceAdminClient creates a new InstanceAdminClient for a given project. +func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.ClientOption) (*InstanceAdminClient, error) { + o, err := btopt.DefaultClientOptions(instanceAdminAddr, InstanceAdminScope, clientUserAgent) + if err != nil { + return nil, err + } + o = append(o, opts...) + conn, err := gtransport.Dial(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + + lroClient, err := lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + + return &InstanceAdminClient{ + conn: conn, + iClient: btapb.NewBigtableInstanceAdminClient(conn), + lroClient: lroClient, + + project: project, + md: metadata.Pairs(resourcePrefixHeader, "projects/"+project), + }, nil +} + +// Close closes the InstanceAdminClient. +func (iac *InstanceAdminClient) Close() error { + return iac.conn.Close() +} + +// StorageType is the type of storage used for all tables in an instance +type StorageType int + +const ( + SSD StorageType = iota + HDD +) + +func (st StorageType) proto() btapb.StorageType { + if st == HDD { + return btapb.StorageType_HDD + } + return btapb.StorageType_SSD +} + +// InstanceType is the type of the instance +type InstanceType int32 + +const ( + PRODUCTION InstanceType = InstanceType(btapb.Instance_PRODUCTION) + DEVELOPMENT = InstanceType(btapb.Instance_DEVELOPMENT) +) + +// InstanceInfo represents information about an instance +type InstanceInfo struct { + Name string // name of the instance + DisplayName string // display name for UIs +} + +// InstanceConf contains the information necessary to create an Instance +type InstanceConf struct { + InstanceId, DisplayName, ClusterId, Zone string + // NumNodes must not be specified for DEVELOPMENT instance types + NumNodes int32 + StorageType StorageType + InstanceType InstanceType +} + +var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`) + +// CreateInstance creates a new instance in the project. +// This method will return when the instance has been created or when an error occurs. +func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error { + ctx = mergeOutgoingMetadata(ctx, iac.md) + req := &btapb.CreateInstanceRequest{ + Parent: "projects/" + iac.project, + InstanceId: conf.InstanceId, + Instance: &btapb.Instance{DisplayName: conf.DisplayName, Type: btapb.Instance_Type(conf.InstanceType)}, + Clusters: map[string]*btapb.Cluster{ + conf.ClusterId: { + ServeNodes: conf.NumNodes, + DefaultStorageType: conf.StorageType.proto(), + Location: "projects/" + iac.project + "/locations/" + conf.Zone, + }, + }, + } + + lro, err := iac.iClient.CreateInstance(ctx, req) + if err != nil { + return err + } + resp := btapb.Instance{} + return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, &resp) +} + +// DeleteInstance deletes an instance from the project. +func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error { + ctx = mergeOutgoingMetadata(ctx, iac.md) + req := &btapb.DeleteInstanceRequest{"projects/" + iac.project + "/instances/" + instanceId} + _, err := iac.iClient.DeleteInstance(ctx, req) + return err +} + +// Instances returns a list of instances in the project. +func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo, error) { + ctx = mergeOutgoingMetadata(ctx, iac.md) + req := &btapb.ListInstancesRequest{ + Parent: "projects/" + iac.project, + } + res, err := iac.iClient.ListInstances(ctx, req) + if err != nil { + return nil, err + } + if len(res.FailedLocations) > 0 { + // We don't have a good way to return a partial result in the face of some zones being unavailable. + // Fail the entire request. + return nil, status.Errorf(codes.Unavailable, "Failed locations: %v", res.FailedLocations) + } + + var is []*InstanceInfo + for _, i := range res.Instances { + m := instanceNameRegexp.FindStringSubmatch(i.Name) + if m == nil { + return nil, fmt.Errorf("malformed instance name %q", i.Name) + } + is = append(is, &InstanceInfo{ + Name: m[2], + DisplayName: i.DisplayName, + }) + } + return is, nil +} + +// InstanceInfo returns information about an instance. +func (iac *InstanceAdminClient) InstanceInfo(ctx context.Context, instanceId string) (*InstanceInfo, error) { + ctx = mergeOutgoingMetadata(ctx, iac.md) + req := &btapb.GetInstanceRequest{ + Name: "projects/" + iac.project + "/instances/" + instanceId, + } + res, err := iac.iClient.GetInstance(ctx, req) + if err != nil { + return nil, err + } + + m := instanceNameRegexp.FindStringSubmatch(res.Name) + if m == nil { + return nil, fmt.Errorf("malformed instance name %q", res.Name) + } + return &InstanceInfo{ + Name: m[2], + DisplayName: res.DisplayName, + }, nil +} diff --git a/vendor/cloud.google.com/go/bigtable/admin_test.go b/vendor/cloud.google.com/go/bigtable/admin_test.go new file mode 100644 index 0000000..af5784f --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/admin_test.go @@ -0,0 +1,180 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigtable + +import ( + "sort" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + "fmt" + "strings" + + "golang.org/x/net/context" +) + +func TestAdminIntegration(t *testing.T) { + testEnv, err := NewIntegrationEnv() + if err != nil { + t.Fatalf("IntegrationEnv: %v", err) + } + defer testEnv.Close() + + timeout := 2 * time.Second + if testEnv.Config().UseProd { + timeout = 5 * time.Minute + } + ctx, _ := context.WithTimeout(context.Background(), timeout) + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + t.Fatalf("NewAdminClient: %v", err) + } + defer adminClient.Close() + + iAdminClient, err := testEnv.NewInstanceAdminClient() + if err != nil { + t.Fatalf("NewInstanceAdminClient: %v", err) + } + if iAdminClient != nil { + defer iAdminClient.Close() + + iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance) + if err != nil { + t.Errorf("InstanceInfo: %v", err) + } + if iInfo.Name != adminClient.instance { + t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance) + } + } + + list := func() []string { + tbls, err := adminClient.Tables(ctx) + if err != nil { + t.Fatalf("Fetching list of tables: %v", err) + } + sort.Strings(tbls) + return tbls + } + containsAll := func(got, want []string) bool { + gotSet := make(map[string]bool) + + for _, s := range got { + gotSet[s] = true + } + for _, s := range want { + if !gotSet[s] { + return false + } + } + return true + } + + defer adminClient.DeleteTable(ctx, "mytable") + + if err := adminClient.CreateTable(ctx, "mytable"); err != nil { + t.Fatalf("Creating table: %v", err) + } + + defer adminClient.DeleteTable(ctx, "myothertable") + + if err := adminClient.CreateTable(ctx, "myothertable"); err != nil { + t.Fatalf("Creating table: %v", err) + } + + if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) { + t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) + } + if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil { + t.Fatalf("Deleting table: %v", err) + } + tables := list() + if got, want := tables, []string{"mytable"}; !containsAll(got, want) { + t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) + } + if got, unwanted := tables, []string{"myothertable"}; containsAll(got, unwanted) { + t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted) + } + + tblConf := TableConf{ + TableID: "conftable", + Families: map[string]GCPolicy{ + "fam1": MaxVersionsPolicy(1), + "fam2": MaxVersionsPolicy(2), + }, + } + if err := adminClient.CreateTableFromConf(ctx, &tblConf); err != nil { + t.Fatalf("Creating table from TableConf: %v", err) + } + defer adminClient.DeleteTable(ctx, tblConf.TableID) + + tblInfo, err := adminClient.TableInfo(ctx, tblConf.TableID) + if err != nil { + t.Fatalf("Getting table info: %v", err) + } + sort.Strings(tblInfo.Families) + wantFams := []string{"fam1", "fam2"} + if !testutil.Equal(tblInfo.Families, wantFams) { + t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams) + } + + // Populate mytable and drop row ranges + if err = adminClient.CreateColumnFamily(ctx, "mytable", "cf"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + + client, err := testEnv.NewClient() + if err != nil { + t.Fatalf("NewClient: %v", err) + } + defer client.Close() + + tbl := client.Open("mytable") + + prefixes := []string{"a", "b", "c"} + for _, prefix := range prefixes { + for i := 0; i < 5; i++ { + mut := NewMutation() + mut.Set("cf", "col", 0, []byte("1")) + if err := tbl.Apply(ctx, fmt.Sprintf("%v-%v", prefix, i), mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + } + } + + if err = adminClient.DropRowRange(ctx, "mytable", "a"); err != nil { + t.Errorf("DropRowRange a: %v", err) + } + if err = adminClient.DropRowRange(ctx, "mytable", "c"); err != nil { + t.Errorf("DropRowRange c: %v", err) + } + if err = adminClient.DropRowRange(ctx, "mytable", "x"); err != nil { + t.Errorf("DropRowRange x: %v", err) + } + + var gotRowCount int + tbl.ReadRows(ctx, RowRange{}, func(row Row) bool { + gotRowCount += 1 + if !strings.HasPrefix(row.Key(), "b") { + t.Errorf("Invalid row after dropping range: %v", row) + } + return true + }) + if gotRowCount != 5 { + t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/bigtable.go b/vendor/cloud.google.com/go/bigtable/bigtable.go new file mode 100644 index 0000000..df83b00 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bigtable.go @@ -0,0 +1,792 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable // import "cloud.google.com/go/bigtable" + +import ( + "errors" + "fmt" + "io" + "strconv" + "time" + + "cloud.google.com/go/bigtable/internal/gax" + btopt "cloud.google.com/go/bigtable/internal/option" + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +const prodAddr = "bigtable.googleapis.com:443" + +// Client is a client for reading and writing data to tables in an instance. +// +// A Client is safe to use concurrently, except for its Close method. +type Client struct { + conn *grpc.ClientConn + client btpb.BigtableClient + project, instance string +} + +// NewClient creates a new Client for a given project and instance. +func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) { + o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent) + if err != nil { + return nil, err + } + // Default to a small connection pool that can be overridden. + o = append(o, + option.WithGRPCConnectionPool(4), + // Set the max size to correspond to server-side limits. + option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))), + // TODO(grpc/grpc-go#1388) using connection pool without WithBlock + // can cause RPCs to fail randomly. We can delete this after the issue is fixed. + option.WithGRPCDialOption(grpc.WithBlock())) + o = append(o, opts...) + conn, err := gtransport.Dial(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &Client{ + conn: conn, + client: btpb.NewBigtableClient(conn), + project: project, + instance: instance, + }, nil +} + +// Close closes the Client. +func (c *Client) Close() error { + return c.conn.Close() +} + +var ( + idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted} + isIdempotentRetryCode = make(map[codes.Code]bool) + retryOptions = []gax.CallOption{ + gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2), + gax.WithRetryCodes(idempotentRetryCodes), + } +) + +func init() { + for _, code := range idempotentRetryCodes { + isIdempotentRetryCode[code] = true + } +} + +func (c *Client) fullTableName(table string) string { + return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table) +} + +// A Table refers to a table. +// +// A Table is safe to use concurrently. +type Table struct { + c *Client + table string + + // Metadata to be sent with each request. + md metadata.MD +} + +// Open opens a table. +func (c *Client) Open(table string) *Table { + return &Table{ + c: c, + table: table, + md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)), + } +} + +// TODO(dsymonds): Read method that returns a sequence of ReadItems. + +// ReadRows reads rows from a table. f is called for each row. +// If f returns false, the stream is shut down and ReadRows returns. +// f owns its argument, and f is called serially in order by row key. +// +// By default, the yielded rows will contain all values in all cells. +// Use RowFilter to limit the cells returned. +func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error { + ctx = mergeOutgoingMetadata(ctx, t.md) + + var prevRowKey string + err := gax.Invoke(ctx, func(ctx context.Context) error { + req := &btpb.ReadRowsRequest{ + TableName: t.c.fullTableName(t.table), + Rows: arg.proto(), + } + for _, opt := range opts { + opt.set(req) + } + ctx, cancel := context.WithCancel(ctx) // for aborting the stream + defer cancel() + + stream, err := t.c.client.ReadRows(ctx, req) + if err != nil { + return err + } + cr := newChunkReader() + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // Reset arg for next Invoke call. + arg = arg.retainRowsAfter(prevRowKey) + return err + } + + for _, cc := range res.Chunks { + row, err := cr.Process(cc) + if err != nil { + // No need to prepare for a retry, this is an unretryable error. + return err + } + if row == nil { + continue + } + prevRowKey = row.Key() + if !f(row) { + // Cancel and drain stream. + cancel() + for { + if _, err := stream.Recv(); err != nil { + // The stream has ended. We don't return an error + // because the caller has intentionally interrupted the scan. + return nil + } + } + } + } + if err := cr.Close(); err != nil { + // No need to prepare for a retry, this is an unretryable error. + return err + } + } + return err + }, retryOptions...) + + return err +} + +// ReadRow is a convenience implementation of a single-row reader. +// A missing row will return a zero-length map and a nil error. +func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) { + var r Row + err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool { + r = rr + return true + }, opts...) + return r, err +} + +// decodeFamilyProto adds the cell data from f to the given row. +func decodeFamilyProto(r Row, row string, f *btpb.Family) { + fam := f.Name // does not have colon + for _, col := range f.Columns { + for _, cell := range col.Cells { + ri := ReadItem{ + Row: row, + Column: fam + ":" + string(col.Qualifier), + Timestamp: Timestamp(cell.TimestampMicros), + Value: cell.Value, + } + r[fam] = append(r[fam], ri) + } + } +} + +// RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList. +// The serialized size of the RowSet must be no larger than 1MiB. +type RowSet interface { + proto() *btpb.RowSet + + // retainRowsAfter returns a new RowSet that does not include the + // given row key or any row key lexicographically less than it. + retainRowsAfter(lastRowKey string) RowSet + + // Valid reports whether this set can cover at least one row. + valid() bool +} + +// RowList is a sequence of row keys. +type RowList []string + +func (r RowList) proto() *btpb.RowSet { + keys := make([][]byte, len(r)) + for i, row := range r { + keys[i] = []byte(row) + } + return &btpb.RowSet{RowKeys: keys} +} + +func (r RowList) retainRowsAfter(lastRowKey string) RowSet { + var retryKeys RowList + for _, key := range r { + if key > lastRowKey { + retryKeys = append(retryKeys, key) + } + } + return retryKeys +} + +func (r RowList) valid() bool { + return len(r) > 0 +} + +// A RowRange is a half-open interval [Start, Limit) encompassing +// all the rows with keys at least as large as Start, and less than Limit. +// (Bigtable string comparison is the same as Go's.) +// A RowRange can be unbounded, encompassing all keys at least as large as Start. +type RowRange struct { + start string + limit string +} + +// NewRange returns the new RowRange [begin, end). +func NewRange(begin, end string) RowRange { + return RowRange{ + start: begin, + limit: end, + } +} + +// Unbounded tests whether a RowRange is unbounded. +func (r RowRange) Unbounded() bool { + return r.limit == "" +} + +// Contains says whether the RowRange contains the key. +func (r RowRange) Contains(row string) bool { + return r.start <= row && (r.limit == "" || r.limit > row) +} + +// String provides a printable description of a RowRange. +func (r RowRange) String() string { + a := strconv.Quote(r.start) + if r.Unbounded() { + return fmt.Sprintf("[%s,∞)", a) + } + return fmt.Sprintf("[%s,%q)", a, r.limit) +} + +func (r RowRange) proto() *btpb.RowSet { + rr := &btpb.RowRange{ + StartKey: &btpb.RowRange_StartKeyClosed{[]byte(r.start)}, + } + if !r.Unbounded() { + rr.EndKey = &btpb.RowRange_EndKeyOpen{[]byte(r.limit)} + } + return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}} +} + +func (r RowRange) retainRowsAfter(lastRowKey string) RowSet { + if lastRowKey == "" || lastRowKey < r.start { + return r + } + // Set the beginning of the range to the row after the last scanned. + start := lastRowKey + "\x00" + if r.Unbounded() { + return InfiniteRange(start) + } + return NewRange(start, r.limit) +} + +func (r RowRange) valid() bool { + return r.start < r.limit +} + +// RowRangeList is a sequence of RowRanges representing the union of the ranges. +type RowRangeList []RowRange + +func (r RowRangeList) proto() *btpb.RowSet { + ranges := make([]*btpb.RowRange, len(r)) + for i, rr := range r { + // RowRange.proto() returns a RowSet with a single element RowRange array + ranges[i] = rr.proto().RowRanges[0] + } + return &btpb.RowSet{RowRanges: ranges} +} + +func (r RowRangeList) retainRowsAfter(lastRowKey string) RowSet { + if lastRowKey == "" { + return r + } + // Return a list of any range that has not yet been completely processed + var ranges RowRangeList + for _, rr := range r { + retained := rr.retainRowsAfter(lastRowKey) + if retained.valid() { + ranges = append(ranges, retained.(RowRange)) + } + } + return ranges +} + +func (r RowRangeList) valid() bool { + for _, rr := range r { + if rr.valid() { + return true + } + } + return false +} + +// SingleRow returns a RowSet for reading a single row. +func SingleRow(row string) RowSet { + return RowList{row} +} + +// PrefixRange returns a RowRange consisting of all keys starting with the prefix. +func PrefixRange(prefix string) RowRange { + return RowRange{ + start: prefix, + limit: prefixSuccessor(prefix), + } +} + +// InfiniteRange returns the RowRange consisting of all keys at least as +// large as start. +func InfiniteRange(start string) RowRange { + return RowRange{ + start: start, + limit: "", + } +} + +// prefixSuccessor returns the lexically smallest string greater than the +// prefix, if it exists, or "" otherwise. In either case, it is the string +// needed for the Limit of a RowRange. +func prefixSuccessor(prefix string) string { + if prefix == "" { + return "" // infinite range + } + n := len(prefix) + for n--; n >= 0 && prefix[n] == '\xff'; n-- { + } + if n == -1 { + return "" + } + ans := []byte(prefix[:n]) + ans = append(ans, prefix[n]+1) + return string(ans) +} + +// A ReadOption is an optional argument to ReadRows. +type ReadOption interface { + set(req *btpb.ReadRowsRequest) +} + +// RowFilter returns a ReadOption that applies f to the contents of read rows. +// +// If multiple RowFilters are provided, only the last is used. To combine filters, +// use ChainFilters or InterleaveFilters instead. +func RowFilter(f Filter) ReadOption { return rowFilter{f} } + +type rowFilter struct{ f Filter } + +func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() } + +// LimitRows returns a ReadOption that will limit the number of rows to be read. +func LimitRows(limit int64) ReadOption { return limitRows{limit} } + +type limitRows struct{ limit int64 } + +func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit } + +// mutationsAreRetryable returns true if all mutations are idempotent +// and therefore retryable. A mutation is idempotent iff all cell timestamps +// have an explicit timestamp set and do not rely on the timestamp being set on the server. +func mutationsAreRetryable(muts []*btpb.Mutation) bool { + serverTime := int64(ServerTime) + for _, mut := range muts { + setCell := mut.GetSetCell() + if setCell != nil && setCell.TimestampMicros == serverTime { + return false + } + } + return true +} + +// Apply applies a Mutation to a specific row. +func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error { + ctx = mergeOutgoingMetadata(ctx, t.md) + after := func(res proto.Message) { + for _, o := range opts { + o.after(res) + } + } + + var callOptions []gax.CallOption + if m.cond == nil { + req := &btpb.MutateRowRequest{ + TableName: t.c.fullTableName(t.table), + RowKey: []byte(row), + Mutations: m.ops, + } + if mutationsAreRetryable(m.ops) { + callOptions = retryOptions + } + var res *btpb.MutateRowResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + res, err = t.c.client.MutateRow(ctx, req) + return err + }, callOptions...) + if err == nil { + after(res) + } + return err + } + + req := &btpb.CheckAndMutateRowRequest{ + TableName: t.c.fullTableName(t.table), + RowKey: []byte(row), + PredicateFilter: m.cond.proto(), + } + if m.mtrue != nil { + req.TrueMutations = m.mtrue.ops + } + if m.mfalse != nil { + req.FalseMutations = m.mfalse.ops + } + if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) { + callOptions = retryOptions + } + var cmRes *btpb.CheckAndMutateRowResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + cmRes, err = t.c.client.CheckAndMutateRow(ctx, req) + return err + }, callOptions...) + if err == nil { + after(cmRes) + } + return err +} + +// An ApplyOption is an optional argument to Apply. +type ApplyOption interface { + after(res proto.Message) +} + +type applyAfterFunc func(res proto.Message) + +func (a applyAfterFunc) after(res proto.Message) { a(res) } + +// GetCondMutationResult returns an ApplyOption that reports whether the conditional +// mutation's condition matched. +func GetCondMutationResult(matched *bool) ApplyOption { + return applyAfterFunc(func(res proto.Message) { + if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok { + *matched = res.PredicateMatched + } + }) +} + +// Mutation represents a set of changes for a single row of a table. +type Mutation struct { + ops []*btpb.Mutation + + // for conditional mutations + cond Filter + mtrue, mfalse *Mutation +} + +// NewMutation returns a new mutation. +func NewMutation() *Mutation { + return new(Mutation) +} + +// NewCondMutation returns a conditional mutation. +// The given row filter determines which mutation is applied: +// If the filter matches any cell in the row, mtrue is applied; +// otherwise, mfalse is applied. +// Either given mutation may be nil. +func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation { + return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse} +} + +// Set sets a value in a specified column, with the given timestamp. +// The timestamp will be truncated to millisecond granularity. +// A timestamp of ServerTime means to use the server timestamp. +func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: family, + ColumnQualifier: []byte(column), + TimestampMicros: int64(ts.TruncateToMilliseconds()), + Value: value, + }}}) +} + +// DeleteCellsInColumn will delete all the cells whose columns are family:column. +func (m *Mutation) DeleteCellsInColumn(family, column string) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{ + FamilyName: family, + ColumnQualifier: []byte(column), + }}}) +} + +// DeleteTimestampRange deletes all cells whose columns are family:column +// and whose timestamps are in the half-open interval [start, end). +// If end is zero, it will be interpreted as infinity. +// The timestamps will be truncated to millisecond granularity. +func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{ + FamilyName: family, + ColumnQualifier: []byte(column), + TimeRange: &btpb.TimestampRange{ + StartTimestampMicros: int64(start.TruncateToMilliseconds()), + EndTimestampMicros: int64(end.TruncateToMilliseconds()), + }, + }}}) +} + +// DeleteCellsInFamily will delete all the cells whose columns are family:*. +func (m *Mutation) DeleteCellsInFamily(family string) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{ + FamilyName: family, + }}}) +} + +// DeleteRow deletes the entire row. +func (m *Mutation) DeleteRow() { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}}) +} + +// entryErr is a container that combines an entry with the error that was returned for it. +// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed. +type entryErr struct { + Entry *btpb.MutateRowsRequest_Entry + Err error +} + +// ApplyBulk applies multiple Mutations, up to a maximum of 100,000. +// Each mutation is individually applied atomically, +// but the set of mutations may be applied in any order. +// +// Two types of failures may occur. If the entire process +// fails, (nil, err) will be returned. If specific mutations +// fail to apply, ([]err, nil) will be returned, and the errors +// will correspond to the relevant rowKeys/muts arguments. +// +// Conditional mutations cannot be applied in bulk and providing one will result in an error. +func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) { + ctx = mergeOutgoingMetadata(ctx, t.md) + if len(rowKeys) != len(muts) { + return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts)) + } + + origEntries := make([]*entryErr, len(rowKeys)) + for i, key := range rowKeys { + mut := muts[i] + if mut.cond != nil { + return nil, errors.New("conditional mutations cannot be applied in bulk") + } + origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}} + } + + // entries will be reduced after each invocation to just what needs to be retried. + entries := make([]*entryErr, len(rowKeys)) + copy(entries, origEntries) + err := gax.Invoke(ctx, func(ctx context.Context) error { + err := t.doApplyBulk(ctx, entries, opts...) + if err != nil { + // We want to retry the entire request with the current entries + return err + } + entries = t.getApplyBulkRetries(entries) + if len(entries) > 0 && len(idempotentRetryCodes) > 0 { + // We have at least one mutation that needs to be retried. + // Return an arbitrary error that is retryable according to callOptions. + return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk") + } + return nil + }, retryOptions...) + + if err != nil { + return nil, err + } + + // Accumulate all of the errors into an array to return, interspersed with nils for successful + // entries. The absence of any errors means we should return nil. + var errs []error + var foundErr bool + for _, entry := range origEntries { + if entry.Err != nil { + foundErr = true + } + errs = append(errs, entry.Err) + } + if foundErr { + return errs, nil + } + return nil, nil +} + +// getApplyBulkRetries returns the entries that need to be retried +func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr { + var retryEntries []*entryErr + for _, entry := range entries { + err := entry.Err + if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) { + // There was an error and the entry is retryable. + retryEntries = append(retryEntries, entry) + } + } + return retryEntries +} + +// doApplyBulk does the work of a single ApplyBulk invocation +func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error { + after := func(res proto.Message) { + for _, o := range opts { + o.after(res) + } + } + + entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs)) + for i, entryErr := range entryErrs { + entries[i] = entryErr.Entry + } + req := &btpb.MutateRowsRequest{ + TableName: t.c.fullTableName(t.table), + Entries: entries, + } + stream, err := t.c.client.MutateRows(ctx, req) + if err != nil { + return err + } + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + + for i, entry := range res.Entries { + status := entry.Status + if status.Code == int32(codes.OK) { + entryErrs[i].Err = nil + } else { + entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message) + } + } + after(res) + } + return nil +} + +// Timestamp is in units of microseconds since 1 January 1970. +type Timestamp int64 + +// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set. +// It indicates that the server's timestamp should be used. +const ServerTime Timestamp = -1 + +// Time converts a time.Time into a Timestamp. +func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) } + +// Now returns the Timestamp representation of the current time on the client. +func Now() Timestamp { return Time(time.Now()) } + +// Time converts a Timestamp into a time.Time. +func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) } + +// TruncateToMilliseconds truncates a Timestamp to millisecond granularity, +// which is currently the only granularity supported. +func (ts Timestamp) TruncateToMilliseconds() Timestamp { + if ts == ServerTime { + return ts + } + return ts - ts%1000 +} + +// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row. +// It returns the newly written cells. +func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) { + ctx = mergeOutgoingMetadata(ctx, t.md) + req := &btpb.ReadModifyWriteRowRequest{ + TableName: t.c.fullTableName(t.table), + RowKey: []byte(row), + Rules: m.ops, + } + res, err := t.c.client.ReadModifyWriteRow(ctx, req) + if err != nil { + return nil, err + } + if res.Row == nil { + return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil") + } + r := make(Row) + for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family + decodeFamilyProto(r, row, fam) + } + return r, nil +} + +// ReadModifyWrite represents a set of operations on a single row of a table. +// It is like Mutation but for non-idempotent changes. +// When applied, these operations operate on the latest values of the row's cells, +// and result in a new value being written to the relevant cell with a timestamp +// that is max(existing timestamp, current server time). +// +// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will +// be executed serially by the server. +type ReadModifyWrite struct { + ops []*btpb.ReadModifyWriteRule +} + +// NewReadModifyWrite returns a new ReadModifyWrite. +func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) } + +// AppendValue appends a value to a specific cell's value. +// If the cell is unset, it will be treated as an empty value. +func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) { + m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ + FamilyName: family, + ColumnQualifier: []byte(column), + Rule: &btpb.ReadModifyWriteRule_AppendValue{v}, + }) +} + +// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer, +// and adds a value to it. If the cell is unset, it will be treated as zero. +// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite +// operation will fail. +func (m *ReadModifyWrite) Increment(family, column string, delta int64) { + m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ + FamilyName: family, + ColumnQualifier: []byte(column), + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta}, + }) +} + +// mergeOutgoingMetadata returns a context populated by the existing outgoing metadata, +// if any, joined with internal metadata. +func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context { + mdCopy, _ := metadata.FromOutgoingContext(ctx) + return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md)) +} diff --git a/vendor/cloud.google.com/go/bigtable/bigtable_test.go b/vendor/cloud.google.com/go/bigtable/bigtable_test.go new file mode 100644 index 0000000..4d7d8c8 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bigtable_test.go @@ -0,0 +1,937 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "math/rand" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" +) + +func TestPrefix(t *testing.T) { + tests := []struct { + prefix, succ string + }{ + {"", ""}, + {"\xff", ""}, // when used, "" means Infinity + {"x\xff", "y"}, + {"\xfe", "\xff"}, + } + for _, tc := range tests { + got := prefixSuccessor(tc.prefix) + if got != tc.succ { + t.Errorf("prefixSuccessor(%q) = %q, want %s", tc.prefix, got, tc.succ) + continue + } + r := PrefixRange(tc.prefix) + if tc.succ == "" && r.limit != "" { + t.Errorf("PrefixRange(%q) got limit %q", tc.prefix, r.limit) + } + if tc.succ != "" && r.limit != tc.succ { + t.Errorf("PrefixRange(%q) got limit %q, want %q", tc.prefix, r.limit, tc.succ) + } + } +} + +func TestClientIntegration(t *testing.T) { + start := time.Now() + lastCheckpoint := start + checkpoint := func(s string) { + n := time.Now() + t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint)) + lastCheckpoint = n + } + + testEnv, err := NewIntegrationEnv() + if err != nil { + t.Fatalf("IntegrationEnv: %v", err) + } + + var timeout time.Duration + if testEnv.Config().UseProd { + timeout = 5 * time.Minute + t.Logf("Running test against production") + } else { + timeout = 1 * time.Minute + t.Logf("bttest.Server running on %s", testEnv.Config().AdminEndpoint) + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + client, err := testEnv.NewClient() + if err != nil { + t.Fatalf("Client: %v", err) + } + defer client.Close() + checkpoint("dialed Client") + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + t.Fatalf("AdminClient: %v", err) + } + defer adminClient.Close() + checkpoint("dialed AdminClient") + + table := testEnv.Config().Table + + // Delete the table at the end of the test. + // Do this even before creating the table so that if this is running + // against production and CreateTable fails there's a chance of cleaning it up. + defer adminClient.DeleteTable(ctx, table) + + if err := adminClient.CreateTable(ctx, table); err != nil { + t.Fatalf("Creating table: %v", err) + } + checkpoint("created table") + if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + checkpoint(`created "follows" column family`) + + tbl := client.Open(table) + + // Insert some data. + initialData := map[string][]string{ + "wmckinley": {"tjefferson"}, + "gwashington": {"jadams"}, + "tjefferson": {"gwashington", "jadams"}, // wmckinley set conditionally below + "jadams": {"gwashington", "tjefferson"}, + } + for row, ss := range initialData { + mut := NewMutation() + for _, name := range ss { + mut.Set("follows", name, 0, []byte("1")) + } + if err := tbl.Apply(ctx, row, mut); err != nil { + t.Errorf("Mutating row %q: %v", row, err) + } + } + checkpoint("inserted initial data") + + // Do a conditional mutation with a complex filter. + mutTrue := NewMutation() + mutTrue.Set("follows", "wmckinley", 0, []byte("1")) + filter := ChainFilters(ColumnFilter("gwash[iz].*"), ValueFilter(".")) + mut := NewCondMutation(filter, mutTrue, nil) + if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { + t.Errorf("Conditionally mutating row: %v", err) + } + // Do a second condition mutation with a filter that does not match, + // and thus no changes should be made. + mutTrue = NewMutation() + mutTrue.DeleteRow() + filter = ColumnFilter("snoop.dogg") + mut = NewCondMutation(filter, mutTrue, nil) + if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { + t.Errorf("Conditionally mutating row: %v", err) + } + checkpoint("did two conditional mutations") + + // Fetch a row. + row, err := tbl.ReadRow(ctx, "jadams") + if err != nil { + t.Fatalf("Reading a row: %v", err) + } + wantRow := Row{ + "follows": []ReadItem{ + {Row: "jadams", Column: "follows:gwashington", Value: []byte("1")}, + {Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")}, + }, + } + if !testutil.Equal(row, wantRow) { + t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) + } + checkpoint("tested ReadRow") + + // Do a bunch of reads with filters. + readTests := []struct { + desc string + rr RowSet + filter Filter // may be nil + limit ReadOption // may be nil + + // We do the read, grab all the cells, turn them into "--", + // and join with a comma. + want string + }{ + { + desc: "read all, unfiltered", + rr: RowRange{}, + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with InfiniteRange, unfiltered", + rr: InfiniteRange("tjefferson"), + want: "tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with NewRange, unfiltered", + rr: NewRange("gargamel", "hubbard"), + want: "gwashington-jadams-1", + }, + { + desc: "read with PrefixRange, unfiltered", + rr: PrefixRange("jad"), + want: "jadams-gwashington-1,jadams-tjefferson-1", + }, + { + desc: "read with SingleRow, unfiltered", + rr: SingleRow("wmckinley"), + want: "wmckinley-tjefferson-1", + }, + { + desc: "read all, with ColumnFilter", + rr: RowRange{}, + filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson" + want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1", + }, + { + desc: "read range, with ColumnRangeFilter", + rr: RowRange{}, + filter: ColumnRangeFilter("follows", "h", "k"), + want: "gwashington-jadams-1,tjefferson-jadams-1", + }, + { + desc: "read range from empty, with ColumnRangeFilter", + rr: RowRange{}, + filter: ColumnRangeFilter("follows", "", "u"), + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1", + }, + { + desc: "read range from start to empty, with ColumnRangeFilter", + rr: RowRange{}, + filter: ColumnRangeFilter("follows", "h", ""), + want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with RowKeyFilter", + rr: RowRange{}, + filter: RowKeyFilter(".*wash.*"), + want: "gwashington-jadams-1", + }, + { + desc: "read with RowKeyFilter, no matches", + rr: RowRange{}, + filter: RowKeyFilter(".*xxx.*"), + want: "", + }, + { + desc: "read with FamilyFilter, no matches", + rr: RowRange{}, + filter: FamilyFilter(".*xxx.*"), + want: "", + }, + { + desc: "read with ColumnFilter + row limit", + rr: RowRange{}, + filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson" + limit: LimitRows(2), + want: "gwashington-jadams-1,jadams-tjefferson-1", + }, + { + desc: "read all, strip values", + rr: RowRange{}, + filter: StripValueFilter(), + want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-", + }, + { + desc: "read with ColumnFilter + row limit + strip values", + rr: RowRange{}, + filter: ChainFilters(ColumnFilter(".*j.*"), StripValueFilter()), // matches "jadams" and "tjefferson" + limit: LimitRows(2), + want: "gwashington-jadams-,jadams-tjefferson-", + }, + { + desc: "read with condition, strip values on true", + rr: RowRange{}, + filter: ConditionFilter(ColumnFilter(".*j.*"), StripValueFilter(), nil), + want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-", + }, + { + desc: "read with condition, strip values on false", + rr: RowRange{}, + filter: ConditionFilter(ColumnFilter(".*xxx.*"), nil, StripValueFilter()), + want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-", + }, + { + desc: "read with ValueRangeFilter + row limit", + rr: RowRange{}, + filter: ValueRangeFilter([]byte("1"), []byte("5")), // matches our value of "1" + limit: LimitRows(2), + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1", + }, + { + desc: "read with ValueRangeFilter, no match on exclusive end", + rr: RowRange{}, + filter: ValueRangeFilter([]byte("0"), []byte("1")), // no match + want: "", + }, + { + desc: "read with ValueRangeFilter, no matches", + rr: RowRange{}, + filter: ValueRangeFilter([]byte("3"), []byte("5")), // matches nothing + want: "", + }, + { + desc: "read with InterleaveFilter, no matches on all filters", + rr: RowRange{}, + filter: InterleaveFilters(ColumnFilter(".*x.*"), ColumnFilter(".*z.*")), + want: "", + }, + { + desc: "read with InterleaveFilter, no duplicate cells", + rr: RowRange{}, + filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*j.*")), + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1", + }, + { + desc: "read with InterleaveFilter, with duplicate cells", + rr: RowRange{}, + filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*g.*")), + want: "jadams-gwashington-1,jadams-gwashington-1,tjefferson-gwashington-1,tjefferson-gwashington-1", + }, + { + desc: "read with a RowRangeList and no filter", + rr: RowRangeList{NewRange("gargamel", "hubbard"), InfiniteRange("wmckinley")}, + want: "gwashington-jadams-1,wmckinley-tjefferson-1", + }, + { + desc: "chain that excludes rows and matches nothing, in a condition", + rr: RowRange{}, + filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), ColumnFilter(".*mckinley.*")), StripValueFilter(), nil), + want: "", + }, + { + desc: "chain that ends with an interleave that has no match. covers #804", + rr: RowRange{}, + filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), InterleaveFilters(ColumnFilter(".*x.*"), ColumnFilter(".*z.*"))), StripValueFilter(), nil), + want: "", + }, + } + for _, tc := range readTests { + var opts []ReadOption + if tc.filter != nil { + opts = append(opts, RowFilter(tc.filter)) + } + if tc.limit != nil { + opts = append(opts, tc.limit) + } + var elt []string + err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + elt = append(elt, formatReadItem(ri)) + } + } + return true + }, opts...) + if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if got := strings.Join(elt, ","); got != tc.want { + t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want) + } + } + // Read a RowList + var elt []string + keys := RowList{"wmckinley", "gwashington", "jadams"} + want := "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,wmckinley-tjefferson-1" + err = tbl.ReadRows(ctx, keys, func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + elt = append(elt, formatReadItem(ri)) + } + } + return true + }) + if err != nil { + t.Errorf("read RowList: %v", err) + } + + if got := strings.Join(elt, ","); got != want { + t.Errorf("bulk read: wrong reads.\n got %q\nwant %q", got, want) + } + checkpoint("tested ReadRows in a few ways") + + // Do a scan and stop part way through. + // Verify that the ReadRows callback doesn't keep running. + stopped := false + err = tbl.ReadRows(ctx, InfiniteRange(""), func(r Row) bool { + if r.Key() < "h" { + return true + } + if !stopped { + stopped = true + return false + } + t.Errorf("ReadRows kept scanning to row %q after being told to stop", r.Key()) + return false + }) + if err != nil { + t.Errorf("Partial ReadRows: %v", err) + } + checkpoint("did partial ReadRows test") + + // Delete a row and check it goes away. + mut = NewMutation() + mut.DeleteRow() + if err := tbl.Apply(ctx, "wmckinley", mut); err != nil { + t.Errorf("Apply DeleteRow: %v", err) + } + row, err = tbl.ReadRow(ctx, "wmckinley") + if err != nil { + t.Fatalf("Reading a row after DeleteRow: %v", err) + } + if len(row) != 0 { + t.Fatalf("Read non-zero row after DeleteRow: %v", row) + } + checkpoint("exercised DeleteRow") + + // Check ReadModifyWrite. + + if err := adminClient.CreateColumnFamily(ctx, table, "counter"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + + appendRMW := func(b []byte) *ReadModifyWrite { + rmw := NewReadModifyWrite() + rmw.AppendValue("counter", "likes", b) + return rmw + } + incRMW := func(n int64) *ReadModifyWrite { + rmw := NewReadModifyWrite() + rmw.Increment("counter", "likes", n) + return rmw + } + rmwSeq := []struct { + desc string + rmw *ReadModifyWrite + want []byte + }{ + { + desc: "append #1", + rmw: appendRMW([]byte{0, 0, 0}), + want: []byte{0, 0, 0}, + }, + { + desc: "append #2", + rmw: appendRMW([]byte{0, 0, 0, 0, 17}), // the remaining 40 bits to make a big-endian 17 + want: []byte{0, 0, 0, 0, 0, 0, 0, 17}, + }, + { + desc: "increment", + rmw: incRMW(8), + want: []byte{0, 0, 0, 0, 0, 0, 0, 25}, + }, + } + for _, step := range rmwSeq { + row, err := tbl.ApplyReadModifyWrite(ctx, "gwashington", step.rmw) + if err != nil { + t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err) + } + // Make sure the modified cell returned by the RMW operation has a timestamp. + if row["counter"][0].Timestamp == 0 { + t.Errorf("RMW returned cell timestamp: got %v, want > 0", row["counter"][0].Timestamp) + } + clearTimestamps(row) + wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}} + if !testutil.Equal(row, wantRow) { + t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow) + } + } + + // Check for google-cloud-go/issues/723. RMWs that insert new rows should keep row order sorted in the emulator. + row, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-2", appendRMW([]byte{0})) + if err != nil { + t.Fatalf("ApplyReadModifyWrite null string: %v", err) + } + row, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-1", appendRMW([]byte{0})) + if err != nil { + t.Fatalf("ApplyReadModifyWrite null string: %v", err) + } + // Get only the correct row back on read. + r, err := tbl.ReadRow(ctx, "issue-723-1") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if r.Key() != "issue-723-1" { + t.Errorf("ApplyReadModifyWrite: incorrect read after RMW,\n got %v\nwant %v", r.Key(), "issue-723-1") + } + checkpoint("tested ReadModifyWrite") + + // Test arbitrary timestamps more thoroughly. + if err := adminClient.CreateColumnFamily(ctx, table, "ts"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + const numVersions = 4 + mut = NewMutation() + for i := 0; i < numVersions; i++ { + // Timestamps are used in thousands because the server + // only permits that granularity. + mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i))) + mut.Set("ts", "col2", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i))) + } + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + // These should be returned in descending timestamp order. + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow) + } + // Do the same read, but filter to the latest two versions. + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow) + } + // Check cell offset / limit + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowLimitFilter(3))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions and CellsPerRowLimitFilter(3),\n got %v\nwant %v", r, wantRow) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowOffsetFilter(3))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions and CellsPerRowOffsetFilter(3),\n got %v\nwant %v", r, wantRow) + } + // Check timestamp range filtering (with truncation) + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1001, 3000))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 3000),\n got %v\nwant %v", r, wantRow) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1000, 0))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow) + } + // Delete non-existing cells, no such column family in this row + // Should not delete anything + if err := adminClient.CreateColumnFamily(ctx, table, "non-existing"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + mut = NewMutation() + mut.DeleteTimestampRange("non-existing", "col", 2000, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow) + } + // Delete non-existing cells, no such column in this column family + // Should not delete anything + mut = NewMutation() + mut.DeleteTimestampRange("ts", "non-existing", 2000, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow) + } + // Delete the cell with timestamp 2000 and repeat the last read, + // checking that we get ts 3000 and ts 1000. + mut = NewMutation() + mut.DeleteTimestampRange("ts", "col", 2001, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow) + } + checkpoint("tested multiple versions in a cell") + + // Check DeleteCellsInFamily + if err := adminClient.CreateColumnFamily(ctx, table, "status"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + + mut = NewMutation() + mut.Set("status", "start", 0, []byte("1")) + mut.Set("status", "end", 0, []byte("2")) + mut.Set("ts", "col", 0, []byte("3")) + if err := tbl.Apply(ctx, "row1", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + if err := tbl.Apply(ctx, "row2", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + + mut = NewMutation() + mut.DeleteCellsInFamily("status") + if err := tbl.Apply(ctx, "row1", mut); err != nil { + t.Errorf("Delete cf: %v", err) + } + + // ColumnFamily removed + r, err = tbl.ReadRow(ctx, "row1") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "row1", Column: "ts:col", Timestamp: 0, Value: []byte("3")}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("column family was not deleted.\n got %v\n want %v", r, wantRow) + } + + // ColumnFamily not removed + r, err = tbl.ReadRow(ctx, "row2") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{ + "ts": []ReadItem{ + {Row: "row2", Column: "ts:col", Timestamp: 0, Value: []byte("3")}, + }, + "status": []ReadItem{ + {Row: "row2", Column: "status:end", Timestamp: 0, Value: []byte("2")}, + {Row: "row2", Column: "status:start", Timestamp: 0, Value: []byte("1")}, + }, + } + if !testutil.Equal(r, wantRow) { + t.Errorf("Column family was deleted unexpectly.\n got %v\n want %v", r, wantRow) + } + checkpoint("tested family delete") + + // Check DeleteCellsInColumn + mut = NewMutation() + mut.Set("status", "start", 0, []byte("1")) + mut.Set("status", "middle", 0, []byte("2")) + mut.Set("status", "end", 0, []byte("3")) + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + mut = NewMutation() + mut.DeleteCellsInColumn("status", "middle") + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Delete column: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{ + "status": []ReadItem{ + {Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")}, + {Row: "row3", Column: "status:start", Timestamp: 0, Value: []byte("1")}, + }, + } + if !testutil.Equal(r, wantRow) { + t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow) + } + mut = NewMutation() + mut.DeleteCellsInColumn("status", "start") + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Delete column: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{ + "status": []ReadItem{ + {Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")}, + }, + } + if !testutil.Equal(r, wantRow) { + t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow) + } + mut = NewMutation() + mut.DeleteCellsInColumn("status", "end") + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Delete column: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if len(r) != 0 { + t.Errorf("Delete column: got %v, want empty row", r) + } + // Add same cell after delete + mut = NewMutation() + mut.Set("status", "end", 0, []byte("3")) + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if !testutil.Equal(r, wantRow) { + t.Errorf("Column was not deleted correctly.\n got %v\n want %v", r, wantRow) + } + checkpoint("tested column delete") + + // Do highly concurrent reads/writes. + // TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved. + const maxConcurrency = 100 + var wg sync.WaitGroup + for i := 0; i < maxConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + switch r := rand.Intn(100); { // r ∈ [0,100) + case 0 <= r && r < 30: + // Do a read. + _, err := tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(1))) + if err != nil { + t.Errorf("Concurrent read: %v", err) + } + case 30 <= r && r < 100: + // Do a write. + mut := NewMutation() + mut.Set("ts", "col", 0, []byte("data")) + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Errorf("Concurrent write: %v", err) + } + } + }() + } + wg.Wait() + checkpoint("tested high concurrency") + + // Large reads, writes and scans. + bigBytes := make([]byte, 5<<20) // 5 MB is larger than current default gRPC max of 4 MB, but less than the max we set. + nonsense := []byte("lorem ipsum dolor sit amet, ") + fill(bigBytes, nonsense) + mut = NewMutation() + mut.Set("ts", "col", 0, bigBytes) + if err := tbl.Apply(ctx, "bigrow", mut); err != nil { + t.Errorf("Big write: %v", err) + } + r, err = tbl.ReadRow(ctx, "bigrow") + if err != nil { + t.Errorf("Big read: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "bigrow", Column: "ts:col", Value: bigBytes}, + }} + if !testutil.Equal(r, wantRow) { + t.Errorf("Big read returned incorrect bytes: %v", r) + } + // Now write 1000 rows, each with 82 KB values, then scan them all. + medBytes := make([]byte, 82<<10) + fill(medBytes, nonsense) + sem := make(chan int, 50) // do up to 50 mutations at a time. + for i := 0; i < 1000; i++ { + mut := NewMutation() + mut.Set("ts", "big-scan", 0, medBytes) + row := fmt.Sprintf("row-%d", i) + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + sem <- 1 + if err := tbl.Apply(ctx, row, mut); err != nil { + t.Errorf("Preparing large scan: %v", err) + } + }() + } + wg.Wait() + n := 0 + err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + n += len(ri.Value) + } + } + return true + }, RowFilter(ColumnFilter("big-scan"))) + if err != nil { + t.Errorf("Doing large scan: %v", err) + } + if want := 1000 * len(medBytes); n != want { + t.Errorf("Large scan returned %d bytes, want %d", n, want) + } + // Scan a subset of the 1000 rows that we just created, using a LimitRows ReadOption. + rc := 0 + wantRc := 3 + err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { + rc++ + return true + }, LimitRows(int64(wantRc))) + if rc != wantRc { + t.Errorf("Scan with row limit returned %d rows, want %d", rc, wantRc) + } + checkpoint("tested big read/write/scan") + + // Test bulk mutations + if err := adminClient.CreateColumnFamily(ctx, table, "bulk"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + bulkData := map[string][]string{ + "red sox": {"2004", "2007", "2013"}, + "patriots": {"2001", "2003", "2004", "2014"}, + "celtics": {"1981", "1984", "1986", "2008"}, + } + var rowKeys []string + var muts []*Mutation + for row, ss := range bulkData { + mut := NewMutation() + for _, name := range ss { + mut.Set("bulk", name, 0, []byte("1")) + } + rowKeys = append(rowKeys, row) + muts = append(muts, mut) + } + status, err := tbl.ApplyBulk(ctx, rowKeys, muts) + if err != nil { + t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err) + } + if status != nil { + t.Errorf("non-nil errors: %v", err) + } + checkpoint("inserted bulk data") + + // Read each row back + for rowKey, ss := range bulkData { + row, err := tbl.ReadRow(ctx, rowKey) + if err != nil { + t.Fatalf("Reading a bulk row: %v", err) + } + var wantItems []ReadItem + for _, val := range ss { + wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")}) + } + wantRow := Row{"bulk": wantItems} + if !testutil.Equal(row, wantRow) { + t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) + } + } + checkpoint("tested reading from bulk insert") + + // Test bulk write errors. + // Note: Setting timestamps as ServerTime makes sure the mutations are not retried on error. + badMut := NewMutation() + badMut.Set("badfamily", "col", ServerTime, nil) + badMut2 := NewMutation() + badMut2.Set("badfamily2", "goodcol", ServerTime, []byte("1")) + status, err = tbl.ApplyBulk(ctx, []string{"badrow", "badrow2"}, []*Mutation{badMut, badMut2}) + if err != nil { + t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err) + } + if status == nil { + t.Errorf("No errors for bad bulk mutation") + } else if status[0] == nil || status[1] == nil { + t.Errorf("No error for bad bulk mutation") + } +} + +func formatReadItem(ri ReadItem) string { + // Use the column qualifier only to make the test data briefer. + col := ri.Column[strings.Index(ri.Column, ":")+1:] + return fmt.Sprintf("%s-%s-%s", ri.Row, col, ri.Value) +} + +func fill(b, sub []byte) { + for len(b) > len(sub) { + n := copy(b, sub) + b = b[n:] + } +} + +func clearTimestamps(r Row) { + for _, ris := range r { + for i := range ris { + ris[i].Timestamp = 0 + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/example_test.go b/vendor/cloud.google.com/go/bigtable/bttest/example_test.go new file mode 100644 index 0000000..5cfc370 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bttest/example_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package bttest_test + +import ( + "fmt" + "log" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/bttest" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func ExampleNewServer() { + + srv, err := bttest.NewServer("127.0.0.1:0") + + if err != nil { + log.Fatalln(err) + } + + ctx := context.Background() + + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + log.Fatalln(err) + } + + proj, instance := "proj", "instance" + + adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalln(err) + } + + if err = adminClient.CreateTable(ctx, "example"); err != nil { + log.Fatalln(err) + } + + if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil { + log.Fatalln(err) + } + + client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalln(err) + } + tbl := client.Open("example") + + mut := bigtable.NewMutation() + mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!")) + if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil { + log.Fatalln(err) + } + + if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil { + log.Fatalln(err) + } else { + for _, column := range row["links"] { + fmt.Println(column.Column) + fmt.Println(string(column.Value)) + } + } + + // Output: + // links:golang.org + // Gophers! +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go new file mode 100644 index 0000000..29935f8 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go @@ -0,0 +1,1273 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package bttest contains test helpers for working with the bigtable package. + +To use a Server, create it, and then connect to it with no security: +(The project/instance values are ignored.) + srv, err := bttest.NewServer("127.0.0.1:0") + ... + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + ... + client, err := bigtable.NewClient(ctx, proj, instance, + option.WithGRPCConn(conn)) + ... +*/ +package bttest // import "cloud.google.com/go/bigtable/bttest" + +import ( + "encoding/binary" + "fmt" + "log" + "math/rand" + "net" + "regexp" + "sort" + "strings" + "sync" + "time" + + "bytes" + + emptypb "github.com/golang/protobuf/ptypes/empty" + "github.com/golang/protobuf/ptypes/wrappers" + "golang.org/x/net/context" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + statpb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Server is an in-memory Cloud Bigtable fake. +// It is unauthenticated, and only a rough approximation. +type Server struct { + Addr string + + l net.Listener + srv *grpc.Server + s *server +} + +// server is the real implementation of the fake. +// It is a separate and unexported type so the API won't be cluttered with +// methods that are only relevant to the fake's implementation. +type server struct { + mu sync.Mutex + tables map[string]*table // keyed by fully qualified name + gcc chan int // set when gcloop starts, closed when server shuts down + + // Any unimplemented methods will cause a panic. + btapb.BigtableTableAdminServer + btpb.BigtableServer +} + +// NewServer creates a new Server. +// The Server will be listening for gRPC connections, without TLS, +// on the provided address. The resolved address is named by the Addr field. +func NewServer(laddr string, opt ...grpc.ServerOption) (*Server, error) { + l, err := net.Listen("tcp", laddr) + if err != nil { + return nil, err + } + + s := &Server{ + Addr: l.Addr().String(), + l: l, + srv: grpc.NewServer(opt...), + s: &server{ + tables: make(map[string]*table), + }, + } + btapb.RegisterBigtableTableAdminServer(s.srv, s.s) + btpb.RegisterBigtableServer(s.srv, s.s) + + go s.srv.Serve(s.l) + + return s, nil +} + +// Close shuts down the server. +func (s *Server) Close() { + s.s.mu.Lock() + if s.s.gcc != nil { + close(s.s.gcc) + } + s.s.mu.Unlock() + + s.srv.Stop() + s.l.Close() +} + +func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest) (*btapb.Table, error) { + tbl := req.Parent + "/tables/" + req.TableId + + s.mu.Lock() + if _, ok := s.tables[tbl]; ok { + s.mu.Unlock() + return nil, grpc.Errorf(codes.AlreadyExists, "table %q already exists", tbl) + } + s.tables[tbl] = newTable(req) + s.mu.Unlock() + + return &btapb.Table{Name: tbl}, nil +} + +func (s *server) ListTables(ctx context.Context, req *btapb.ListTablesRequest) (*btapb.ListTablesResponse, error) { + res := &btapb.ListTablesResponse{} + prefix := req.Parent + "/tables/" + + s.mu.Lock() + for tbl := range s.tables { + if strings.HasPrefix(tbl, prefix) { + res.Tables = append(res.Tables, &btapb.Table{Name: tbl}) + } + } + s.mu.Unlock() + + return res, nil +} + +func (s *server) GetTable(ctx context.Context, req *btapb.GetTableRequest) (*btapb.Table, error) { + tbl := req.Name + + s.mu.Lock() + tblIns, ok := s.tables[tbl] + s.mu.Unlock() + if !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", tbl) + } + + return &btapb.Table{ + Name: tbl, + ColumnFamilies: toColumnFamilies(tblIns.columnFamilies()), + }, nil +} + +func (s *server) DeleteTable(ctx context.Context, req *btapb.DeleteTableRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + if _, ok := s.tables[req.Name]; !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name) + } + delete(s.tables, req.Name) + return &emptypb.Empty{}, nil +} + +func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColumnFamiliesRequest) (*btapb.Table, error) { + tblName := req.Name[strings.LastIndex(req.Name, "/")+1:] + + s.mu.Lock() + tbl, ok := s.tables[req.Name] + s.mu.Unlock() + if !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name) + } + + tbl.mu.Lock() + defer tbl.mu.Unlock() + + for _, mod := range req.Modifications { + if create := mod.GetCreate(); create != nil { + if _, ok := tbl.families[mod.Id]; ok { + return nil, grpc.Errorf(codes.AlreadyExists, "family %q already exists", mod.Id) + } + newcf := &columnFamily{ + name: req.Name + "/columnFamilies/" + mod.Id, + order: tbl.counter, + gcRule: create.GcRule, + } + tbl.counter++ + tbl.families[mod.Id] = newcf + } else if mod.GetDrop() { + if _, ok := tbl.families[mod.Id]; !ok { + return nil, fmt.Errorf("can't delete unknown family %q", mod.Id) + } + delete(tbl.families, mod.Id) + } else if modify := mod.GetUpdate(); modify != nil { + if _, ok := tbl.families[mod.Id]; !ok { + return nil, fmt.Errorf("no such family %q", mod.Id) + } + newcf := &columnFamily{ + name: req.Name + "/columnFamilies/" + mod.Id, + gcRule: modify.GcRule, + } + // assume that we ALWAYS want to replace by the new setting + // we may need partial update through + tbl.families[mod.Id] = newcf + } + } + + s.needGC() + return &btapb.Table{ + Name: tblName, + ColumnFamilies: toColumnFamilies(tbl.families), + }, nil +} + +func (s *server) DropRowRange(ctx context.Context, req *btapb.DropRowRangeRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + tbl, ok := s.tables[req.Name] + if !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name) + } + + if req.GetDeleteAllDataFromTable() { + tbl.rows = nil + tbl.rowIndex = make(map[string]*row) + } else { + // Delete rows by prefix + prefixBytes := req.GetRowKeyPrefix() + if prefixBytes == nil { + return nil, fmt.Errorf("missing row key prefix") + } + prefix := string(prefixBytes) + + start := -1 + end := 0 + for i, row := range tbl.rows { + match := strings.HasPrefix(row.key, prefix) + if match { + // Delete the mapping. Row will be deleted from sorted range below. + delete(tbl.rowIndex, row.key) + } + if match && start == -1 { + start = i + } else if !match && start != -1 { + break + } + end++ + } + if start != -1 { + // Delete the range, using method from https://github.com/golang/go/wiki/SliceTricks + copy(tbl.rows[start:], tbl.rows[end:]) + for k, n := len(tbl.rows)-end+start, len(tbl.rows); k < n; k++ { + tbl.rows[k] = nil + } + tbl.rows = tbl.rows[:len(tbl.rows)-end+start] + } + } + + return &emptypb.Empty{}, nil +} + +func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRowsServer) error { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + + // Rows to read can be specified by a set of row keys and/or a set of row ranges. + // Output is a stream of sorted, de-duped rows. + tbl.mu.RLock() + rowSet := make(map[string]*row) + if req.Rows != nil { + // Add the explicitly given keys + for _, key := range req.Rows.RowKeys { + start := string(key) + addRows(start, start+"\x00", tbl, rowSet) + } + + // Add keys from row ranges + for _, rr := range req.Rows.RowRanges { + var start, end string + switch sk := rr.StartKey.(type) { + case *btpb.RowRange_StartKeyClosed: + start = string(sk.StartKeyClosed) + case *btpb.RowRange_StartKeyOpen: + start = string(sk.StartKeyOpen) + "\x00" + } + switch ek := rr.EndKey.(type) { + case *btpb.RowRange_EndKeyClosed: + end = string(ek.EndKeyClosed) + "\x00" + case *btpb.RowRange_EndKeyOpen: + end = string(ek.EndKeyOpen) + } + + addRows(start, end, tbl, rowSet) + } + } else { + // Read all rows + addRows("", "", tbl, rowSet) + } + tbl.mu.RUnlock() + + rows := make([]*row, 0, len(rowSet)) + for _, r := range rowSet { + rows = append(rows, r) + } + sort.Sort(byRowKey(rows)) + + limit := int(req.RowsLimit) + count := 0 + for _, r := range rows { + if limit > 0 && count >= limit { + return nil + } + streamed, err := streamRow(stream, r, req.Filter) + if err != nil { + return err + } + if streamed { + count++ + } + } + return nil +} + +func addRows(start, end string, tbl *table, rowSet map[string]*row) { + si, ei := 0, len(tbl.rows) // half-open interval + if start != "" { + si = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start }) + } + if end != "" { + ei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end }) + } + if si < ei { + for _, row := range tbl.rows[si:ei] { + rowSet[row.key] = row + } + } +} + +// streamRow filters the given row and sends it via the given stream. +// Returns true if at least one cell matched the filter and was streamed, false otherwise. +func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (bool, error) { + r.mu.Lock() + nr := r.copy() + r.mu.Unlock() + r = nr + + if !filterRow(f, r) { + return false, nil + } + + rrr := &btpb.ReadRowsResponse{} + families := r.sortedFamilies() + for _, fam := range families { + for _, colName := range fam.colNames { + cells := fam.cells[colName] + if len(cells) == 0 { + continue + } + // TODO(dsymonds): Apply transformers. + for _, cell := range cells { + rrr.Chunks = append(rrr.Chunks, &btpb.ReadRowsResponse_CellChunk{ + RowKey: []byte(r.key), + FamilyName: &wrappers.StringValue{Value: fam.name}, + Qualifier: &wrappers.BytesValue{Value: []byte(colName)}, + TimestampMicros: cell.ts, + Value: cell.value, + }) + } + } + } + // We can't have a cell with just COMMIT set, which would imply a new empty cell. + // So modify the last cell to have the COMMIT flag set. + if len(rrr.Chunks) > 0 { + rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{true} + } + + return true, stream.Send(rrr) +} + +// filterRow modifies a row with the given filter. Returns true if at least one cell from the row matches, +// false otherwise. +func filterRow(f *btpb.RowFilter, r *row) bool { + if f == nil { + return true + } + // Handle filters that apply beyond just including/excluding cells. + switch f := f.Filter.(type) { + case *btpb.RowFilter_Chain_: + for _, sub := range f.Chain.Filters { + if !filterRow(sub, r) { + return false + } + } + return true + case *btpb.RowFilter_Interleave_: + srs := make([]*row, 0, len(f.Interleave.Filters)) + for _, sub := range f.Interleave.Filters { + sr := r.copy() + filterRow(sub, sr) + srs = append(srs, sr) + } + // merge + // TODO(dsymonds): is this correct? + r.families = make(map[string]*family) + for _, sr := range srs { + for _, fam := range sr.families { + f := r.getOrCreateFamily(fam.name, fam.order) + for colName, cs := range fam.cells { + f.cells[colName] = append(f.cellsByColumn(colName), cs...) + } + } + } + var count int + for _, fam := range r.families { + for _, cs := range fam.cells { + sort.Sort(byDescTS(cs)) + count += len(cs) + } + } + return count > 0 + case *btpb.RowFilter_CellsPerColumnLimitFilter: + lim := int(f.CellsPerColumnLimitFilter) + for _, fam := range r.families { + for col, cs := range fam.cells { + if len(cs) > lim { + fam.cells[col] = cs[:lim] + } + } + } + return true + case *btpb.RowFilter_Condition_: + if filterRow(f.Condition.PredicateFilter, r.copy()) { + if f.Condition.TrueFilter == nil { + return false + } + return filterRow(f.Condition.TrueFilter, r) + } + if f.Condition.FalseFilter == nil { + return false + } + return filterRow(f.Condition.FalseFilter, r) + case *btpb.RowFilter_RowKeyRegexFilter: + pat := string(f.RowKeyRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad rowkey_regex_filter pattern %q: %v", pat, err) + return false + } + if !rx.MatchString(r.key) { + return false + } + case *btpb.RowFilter_CellsPerRowLimitFilter: + // Grab the first n cells in the row. + lim := int(f.CellsPerRowLimitFilter) + for _, fam := range r.families { + for _, col := range fam.colNames { + cs := fam.cells[col] + if len(cs) > lim { + fam.cells[col] = cs[:lim] + lim = 0 + } else { + lim -= len(cs) + } + } + } + return true + case *btpb.RowFilter_CellsPerRowOffsetFilter: + // Skip the first n cells in the row. + offset := int(f.CellsPerRowOffsetFilter) + for _, fam := range r.families { + for _, col := range fam.colNames { + cs := fam.cells[col] + if len(cs) > offset { + fam.cells[col] = cs[offset:] + offset = 0 + return true + } else { + fam.cells[col] = cs[:0] + offset -= len(cs) + } + } + } + return true + } + + // Any other case, operate on a per-cell basis. + cellCount := 0 + for _, fam := range r.families { + for colName, cs := range fam.cells { + fam.cells[colName] = filterCells(f, fam.name, colName, cs) + cellCount += len(fam.cells[colName]) + } + } + return cellCount > 0 +} + +func filterCells(f *btpb.RowFilter, fam, col string, cs []cell) []cell { + var ret []cell + for _, cell := range cs { + if includeCell(f, fam, col, cell) { + cell = modifyCell(f, cell) + ret = append(ret, cell) + } + } + return ret +} + +func modifyCell(f *btpb.RowFilter, c cell) cell { + if f == nil { + return c + } + // Consider filters that may modify the cell contents + switch f.Filter.(type) { + case *btpb.RowFilter_StripValueTransformer: + return cell{ts: c.ts} + default: + return c + } +} + +func includeCell(f *btpb.RowFilter, fam, col string, cell cell) bool { + if f == nil { + return true + } + // TODO(dsymonds): Implement many more filters. + switch f := f.Filter.(type) { + case *btpb.RowFilter_CellsPerColumnLimitFilter: + // Don't log, row-level filter + return true + case *btpb.RowFilter_RowKeyRegexFilter: + // Don't log, row-level filter + return true + case *btpb.RowFilter_StripValueTransformer: + // Don't log, cell-modifying filter + return true + default: + log.Printf("WARNING: don't know how to handle filter of type %T (ignoring it)", f) + return true + case *btpb.RowFilter_FamilyNameRegexFilter: + pat := string(f.FamilyNameRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad family_name_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.MatchString(fam) + case *btpb.RowFilter_ColumnQualifierRegexFilter: + pat := string(f.ColumnQualifierRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad column_qualifier_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.MatchString(col) + case *btpb.RowFilter_ValueRegexFilter: + pat := string(f.ValueRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad value_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.Match(cell.value) + case *btpb.RowFilter_ColumnRangeFilter: + if fam != f.ColumnRangeFilter.FamilyName { + return false + } + // Start qualifier defaults to empty string closed + inRangeStart := func() bool { return col >= "" } + switch sq := f.ColumnRangeFilter.StartQualifier.(type) { + case *btpb.ColumnRange_StartQualifierOpen: + inRangeStart = func() bool { return col > string(sq.StartQualifierOpen) } + case *btpb.ColumnRange_StartQualifierClosed: + inRangeStart = func() bool { return col >= string(sq.StartQualifierClosed) } + } + // End qualifier defaults to no upper boundary + inRangeEnd := func() bool { return true } + switch eq := f.ColumnRangeFilter.EndQualifier.(type) { + case *btpb.ColumnRange_EndQualifierClosed: + inRangeEnd = func() bool { return col <= string(eq.EndQualifierClosed) } + case *btpb.ColumnRange_EndQualifierOpen: + inRangeEnd = func() bool { return col < string(eq.EndQualifierOpen) } + } + return inRangeStart() && inRangeEnd() + case *btpb.RowFilter_TimestampRangeFilter: + // Lower bound is inclusive and defaults to 0, upper bound is exclusive and defaults to infinity. + return cell.ts >= f.TimestampRangeFilter.StartTimestampMicros && + (f.TimestampRangeFilter.EndTimestampMicros == 0 || cell.ts < f.TimestampRangeFilter.EndTimestampMicros) + case *btpb.RowFilter_ValueRangeFilter: + v := cell.value + // Start value defaults to empty string closed + inRangeStart := func() bool { return bytes.Compare(v, []byte{}) >= 0 } + switch sv := f.ValueRangeFilter.StartValue.(type) { + case *btpb.ValueRange_StartValueOpen: + inRangeStart = func() bool { return bytes.Compare(v, sv.StartValueOpen) > 0 } + case *btpb.ValueRange_StartValueClosed: + inRangeStart = func() bool { return bytes.Compare(v, sv.StartValueClosed) >= 0 } + } + // End value defaults to no upper boundary + inRangeEnd := func() bool { return true } + switch ev := f.ValueRangeFilter.EndValue.(type) { + case *btpb.ValueRange_EndValueClosed: + inRangeEnd = func() bool { return bytes.Compare(v, ev.EndValueClosed) <= 0 } + case *btpb.ValueRange_EndValueOpen: + inRangeEnd = func() bool { return bytes.Compare(v, ev.EndValueOpen) < 0 } + } + return inRangeStart() && inRangeEnd() + } +} + +func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*btpb.MutateRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + fs := tbl.columnFamilies() + r, _ := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer tbl.resortRowIndex() // Make sure the row lock is released before this grabs the table lock + defer r.mu.Unlock() + if err := applyMutations(tbl, r, req.Mutations, fs); err != nil { + return nil, err + } + return &btpb.MutateRowResponse{}, nil +} + +func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_MutateRowsServer) error { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))} + + fs := tbl.columnFamilies() + + defer tbl.resortRowIndex() + for i, entry := range req.Entries { + r, _ := tbl.mutableRow(string(entry.RowKey)) + r.mu.Lock() + code, msg := int32(codes.OK), "" + if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil { + code = int32(codes.Internal) + msg = err.Error() + } + res.Entries[i] = &btpb.MutateRowsResponse_Entry{ + Index: int64(i), + Status: &statpb.Status{Code: code, Message: msg}, + } + r.mu.Unlock() + } + stream.Send(res) + return nil +} + +func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutateRowRequest) (*btpb.CheckAndMutateRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + res := &btpb.CheckAndMutateRowResponse{} + + fs := tbl.columnFamilies() + + r, _ := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer r.mu.Unlock() + + // Figure out which mutation to apply. + whichMut := false + if req.PredicateFilter == nil { + // Use true_mutations iff row contains any cells. + whichMut = !r.isEmpty() + } else { + // Use true_mutations iff any cells in the row match the filter. + // TODO(dsymonds): This could be cheaper. + nr := r.copy() + filterRow(req.PredicateFilter, nr) + whichMut = !nr.isEmpty() + } + res.PredicateMatched = whichMut + muts := req.FalseMutations + if whichMut { + muts = req.TrueMutations + } + + defer tbl.resortRowIndex() + if err := applyMutations(tbl, r, muts, fs); err != nil { + return nil, err + } + return res, nil +} + +// applyMutations applies a sequence of mutations to a row. +// fam should be a snapshot of the keys of tbl.families. +// It assumes r.mu is locked. +func applyMutations(tbl *table, r *row, muts []*btpb.Mutation, fs map[string]*columnFamily) error { + for _, mut := range muts { + switch mut := mut.Mutation.(type) { + default: + return fmt.Errorf("can't handle mutation type %T", mut) + case *btpb.Mutation_SetCell_: + set := mut.SetCell + if _, ok := fs[set.FamilyName]; !ok { + return fmt.Errorf("unknown family %q", set.FamilyName) + } + ts := set.TimestampMicros + if ts == -1 { // bigtable.ServerTime + ts = newTimestamp() + } + if !tbl.validTimestamp(ts) { + return fmt.Errorf("invalid timestamp %d", ts) + } + fam := set.FamilyName + col := string(set.ColumnQualifier) + + newCell := cell{ts: ts, value: set.Value} + f := r.getOrCreateFamily(fam, fs[fam].order) + f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell) + case *btpb.Mutation_DeleteFromColumn_: + del := mut.DeleteFromColumn + if _, ok := fs[del.FamilyName]; !ok { + return fmt.Errorf("unknown family %q", del.FamilyName) + } + fam := del.FamilyName + col := string(del.ColumnQualifier) + if _, ok := r.families[fam]; ok { + cs := r.families[fam].cells[col] + if del.TimeRange != nil { + tsr := del.TimeRange + if !tbl.validTimestamp(tsr.StartTimestampMicros) { + return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros) + } + if !tbl.validTimestamp(tsr.EndTimestampMicros) { + return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros) + } + // Find half-open interval to remove. + // Cells are in descending timestamp order, + // so the predicates to sort.Search are inverted. + si, ei := 0, len(cs) + if tsr.StartTimestampMicros > 0 { + ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros }) + } + if tsr.EndTimestampMicros > 0 { + si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros }) + } + if si < ei { + copy(cs[si:], cs[ei:]) + cs = cs[:len(cs)-(ei-si)] + } + } else { + cs = nil + } + if len(cs) == 0 { + delete(r.families[fam].cells, col) + colNames := r.families[fam].colNames + i := sort.Search(len(colNames), func(i int) bool { return colNames[i] >= col }) + if i < len(colNames) && colNames[i] == col { + r.families[fam].colNames = append(colNames[:i], colNames[i+1:]...) + } + if len(r.families[fam].cells) == 0 { + delete(r.families, fam) + } + } else { + r.families[fam].cells[col] = cs + } + } + case *btpb.Mutation_DeleteFromRow_: + r.families = make(map[string]*family) + case *btpb.Mutation_DeleteFromFamily_: + fampre := mut.DeleteFromFamily.FamilyName + delete(r.families, fampre) + } + } + return nil +} + +func maxTimestamp(x, y int64) int64 { + if x > y { + return x + } + return y +} + +func newTimestamp() int64 { + ts := time.Now().UnixNano() / 1e3 + ts -= ts % 1000 // round to millisecond granularity + return ts +} + +func appendOrReplaceCell(cs []cell, newCell cell) []cell { + replaced := false + for i, cell := range cs { + if cell.ts == newCell.ts { + cs[i] = newCell + replaced = true + break + } + } + if !replaced { + cs = append(cs, newCell) + } + sort.Sort(byDescTS(cs)) + return cs +} + +func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWriteRowRequest) (*btpb.ReadModifyWriteRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + updates := make(map[string]cell) // copy of updated cells; keyed by full column name + + fs := tbl.columnFamilies() + + rowKey := string(req.RowKey) + r, isNewRow := tbl.mutableRow(rowKey) + // This must be done before the row lock, acquired below, is released. + if isNewRow { + defer tbl.resortRowIndex() + } + r.mu.Lock() + defer r.mu.Unlock() + // Assume all mutations apply to the most recent version of the cell. + // TODO(dsymonds): Verify this assumption and document it in the proto. + for _, rule := range req.Rules { + if _, ok := fs[rule.FamilyName]; !ok { + return nil, fmt.Errorf("unknown family %q", rule.FamilyName) + } + + fam := rule.FamilyName + col := string(rule.ColumnQualifier) + isEmpty := false + f := r.getOrCreateFamily(fam, fs[fam].order) + cs := f.cells[col] + isEmpty = len(cs) == 0 + + ts := newTimestamp() + var newCell, prevCell cell + if !isEmpty { + cells := r.families[fam].cells[col] + prevCell = cells[0] + + // ts is the max of now or the prev cell's timestamp in case the + // prev cell is in the future + ts = maxTimestamp(ts, prevCell.ts) + } + + switch rule := rule.Rule.(type) { + default: + return nil, fmt.Errorf("unknown RMW rule oneof %T", rule) + case *btpb.ReadModifyWriteRule_AppendValue: + newCell = cell{ts: ts, value: append(prevCell.value, rule.AppendValue...)} + case *btpb.ReadModifyWriteRule_IncrementAmount: + var v int64 + if !isEmpty { + prevVal := prevCell.value + if len(prevVal) != 8 { + return nil, fmt.Errorf("increment on non-64-bit value") + } + v = int64(binary.BigEndian.Uint64(prevVal)) + } + v += rule.IncrementAmount + var val [8]byte + binary.BigEndian.PutUint64(val[:], uint64(v)) + newCell = cell{ts: ts, value: val[:]} + } + key := strings.Join([]string{fam, col}, ":") + updates[key] = newCell + f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell) + } + + res := &btpb.Row{ + Key: req.RowKey, + } + for col, cell := range updates { + i := strings.Index(col, ":") + fam, qual := col[:i], col[i+1:] + var f *btpb.Family + for _, ff := range res.Families { + if ff.Name == fam { + f = ff + break + } + } + if f == nil { + f = &btpb.Family{Name: fam} + res.Families = append(res.Families, f) + } + f.Columns = append(f.Columns, &btpb.Column{ + Qualifier: []byte(qual), + Cells: []*btpb.Cell{{ + TimestampMicros: cell.ts, + Value: cell.value, + }}, + }) + } + return &btpb.ReadModifyWriteRowResponse{Row: res}, nil +} + +func (s *server) SampleRowKeys(req *btpb.SampleRowKeysRequest, stream btpb.Bigtable_SampleRowKeysServer) error { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + + tbl.mu.RLock() + defer tbl.mu.RUnlock() + + // The return value of SampleRowKeys is very loosely defined. Return at least the + // final row key in the table and choose other row keys randomly. + var offset int64 + for i, row := range tbl.rows { + if i == len(tbl.rows)-1 || rand.Int31n(100) == 0 { + resp := &btpb.SampleRowKeysResponse{ + RowKey: []byte(row.key), + OffsetBytes: offset, + } + err := stream.Send(resp) + if err != nil { + return err + } + } + offset += int64(row.size()) + } + return nil +} + +// needGC is invoked whenever the server needs gcloop running. +func (s *server) needGC() { + s.mu.Lock() + if s.gcc == nil { + s.gcc = make(chan int) + go s.gcloop(s.gcc) + } + s.mu.Unlock() +} + +func (s *server) gcloop(done <-chan int) { + const ( + minWait = 500 // ms + maxWait = 1500 // ms + ) + + for { + // Wait for a random time interval. + d := time.Duration(minWait+rand.Intn(maxWait-minWait)) * time.Millisecond + select { + case <-time.After(d): + case <-done: + return // server has been closed + } + + // Do a GC pass over all tables. + var tables []*table + s.mu.Lock() + for _, tbl := range s.tables { + tables = append(tables, tbl) + } + s.mu.Unlock() + for _, tbl := range tables { + tbl.gc() + } + } +} + +type table struct { + mu sync.RWMutex + counter uint64 // increment by 1 when a new family is created + families map[string]*columnFamily // keyed by plain family name + rows []*row // sorted by row key + rowIndex map[string]*row // indexed by row key +} + +func newTable(ctr *btapb.CreateTableRequest) *table { + fams := make(map[string]*columnFamily) + c := uint64(0) + if ctr.Table != nil { + for id, cf := range ctr.Table.ColumnFamilies { + fams[id] = &columnFamily{ + name: ctr.Parent + "/columnFamilies/" + id, + order: c, + gcRule: cf.GcRule, + } + c++ + } + } + return &table{ + families: fams, + counter: c, + rowIndex: make(map[string]*row), + } +} + +func (t *table) validTimestamp(ts int64) bool { + // Assume millisecond granularity is required. + return ts%1000 == 0 +} + +func (t *table) columnFamilies() map[string]*columnFamily { + cp := make(map[string]*columnFamily) + t.mu.RLock() + for fam, cf := range t.families { + cp[fam] = cf + } + t.mu.RUnlock() + return cp +} + +func (t *table) mutableRow(row string) (mutRow *row, isNewRow bool) { + // Try fast path first. + t.mu.RLock() + r := t.rowIndex[row] + t.mu.RUnlock() + if r != nil { + return r, false + } + + // We probably need to create the row. + t.mu.Lock() + r = t.rowIndex[row] + if r == nil { + r = newRow(row) + t.rowIndex[row] = r + t.rows = append(t.rows, r) + } + t.mu.Unlock() + return r, true +} + +func (t *table) resortRowIndex() { + t.mu.Lock() + sort.Sort(byRowKey(t.rows)) + t.mu.Unlock() +} + +func (t *table) gc() { + // This method doesn't add or remove rows, so we only need a read lock for the table. + t.mu.RLock() + defer t.mu.RUnlock() + + // Gather GC rules we'll apply. + rules := make(map[string]*btapb.GcRule) // keyed by "fam" + for fam, cf := range t.families { + if cf.gcRule != nil { + rules[fam] = cf.gcRule + } + } + if len(rules) == 0 { + return + } + + for _, r := range t.rows { + r.mu.Lock() + r.gc(rules) + r.mu.Unlock() + } +} + +type byRowKey []*row + +func (b byRowKey) Len() int { return len(b) } +func (b byRowKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key } + +type row struct { + key string + + mu sync.Mutex + families map[string]*family // keyed by family name +} + +func newRow(key string) *row { + return &row{ + key: key, + families: make(map[string]*family), + } +} + +// copy returns a copy of the row. +// Cell values are aliased. +// r.mu should be held. +func (r *row) copy() *row { + nr := newRow(r.key) + for _, fam := range r.families { + nr.families[fam.name] = &family{ + name: fam.name, + order: fam.order, + colNames: fam.colNames, + cells: make(map[string][]cell), + } + for col, cs := range fam.cells { + // Copy the []cell slice, but not the []byte inside each cell. + nr.families[fam.name].cells[col] = append([]cell(nil), cs...) + } + } + return nr +} + +// isEmpty returns true if a row doesn't contain any cell +func (r *row) isEmpty() bool { + for _, fam := range r.families { + for _, cs := range fam.cells { + if len(cs) > 0 { + return false + } + } + } + return true +} + +// sortedFamilies returns a column family set +// sorted in ascending creation order in a row. +func (r *row) sortedFamilies() []*family { + var families []*family + for _, fam := range r.families { + families = append(families, fam) + } + sort.Sort(byCreationOrder(families)) + return families +} + +func (r *row) getOrCreateFamily(name string, order uint64) *family { + if _, ok := r.families[name]; !ok { + r.families[name] = &family{ + name: name, + order: order, + cells: make(map[string][]cell), + } + } + return r.families[name] +} + +// gc applies the given GC rules to the row. +// r.mu should be held. +func (r *row) gc(rules map[string]*btapb.GcRule) { + for _, fam := range r.families { + rule, ok := rules[fam.name] + if !ok { + continue + } + for col, cs := range fam.cells { + r.families[fam.name].cells[col] = applyGC(cs, rule) + } + } +} + +// size returns the total size of all cell values in the row. +func (r *row) size() int { + size := 0 + for _, fam := range r.families { + for _, cells := range fam.cells { + for _, cell := range cells { + size += len(cell.value) + } + } + } + return size +} + +func (r *row) String() string { + return r.key +} + +var gcTypeWarn sync.Once + +// applyGC applies the given GC rule to the cells. +func applyGC(cells []cell, rule *btapb.GcRule) []cell { + switch rule := rule.Rule.(type) { + default: + // TODO(dsymonds): Support GcRule_Intersection_ + gcTypeWarn.Do(func() { + log.Printf("Unsupported GC rule type %T", rule) + }) + case *btapb.GcRule_Union_: + for _, sub := range rule.Union.Rules { + cells = applyGC(cells, sub) + } + return cells + case *btapb.GcRule_MaxAge: + // Timestamps are in microseconds. + cutoff := time.Now().UnixNano() / 1e3 + cutoff -= rule.MaxAge.Seconds * 1e6 + cutoff -= int64(rule.MaxAge.Nanos) / 1e3 + // The slice of cells in in descending timestamp order. + // This sort.Search will return the index of the first cell whose timestamp is chronologically before the cutoff. + si := sort.Search(len(cells), func(i int) bool { return cells[i].ts < cutoff }) + if si < len(cells) { + log.Printf("bttest: GC MaxAge(%v) deleted %d cells.", rule.MaxAge, len(cells)-si) + } + return cells[:si] + case *btapb.GcRule_MaxNumVersions: + n := int(rule.MaxNumVersions) + if len(cells) > n { + cells = cells[:n] + } + return cells + } + return cells +} + +type family struct { + name string // Column family name + order uint64 // Creation order of column family + colNames []string // Collumn names are sorted in lexicographical ascending order + cells map[string][]cell // Keyed by collumn name; cells are in descending timestamp order +} + +type byCreationOrder []*family + +func (b byCreationOrder) Len() int { return len(b) } +func (b byCreationOrder) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byCreationOrder) Less(i, j int) bool { return b[i].order < b[j].order } + +// cellsByColumn adds the column name to colNames set if it does not exist +// and returns all cells within a column +func (f *family) cellsByColumn(name string) []cell { + if _, ok := f.cells[name]; !ok { + f.colNames = append(f.colNames, name) + sort.Strings(f.colNames) + } + return f.cells[name] +} + +type cell struct { + ts int64 + value []byte +} + +type byDescTS []cell + +func (b byDescTS) Len() int { return len(b) } +func (b byDescTS) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts } + +type columnFamily struct { + name string + order uint64 // Creation order of column family + gcRule *btapb.GcRule +} + +func (c *columnFamily) proto() *btapb.ColumnFamily { + return &btapb.ColumnFamily{ + GcRule: c.gcRule, + } +} + +func toColumnFamilies(families map[string]*columnFamily) map[string]*btapb.ColumnFamily { + fs := make(map[string]*btapb.ColumnFamily) + for k, v := range families { + fs[k] = v.proto() + } + return fs +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go new file mode 100644 index 0000000..3b785de --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go @@ -0,0 +1,571 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bttest + +import ( + "fmt" + "math/rand" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + "google.golang.org/grpc" +) + +func TestConcurrentMutationsReadModifyAndGC(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + if _, err := s.CreateTable( + ctx, + &btapb.CreateTableRequest{Parent: "cluster", TableId: "t"}); err != nil { + t.Fatal(err) + } + const name = `cluster/tables/t` + tbl := s.tables[name] + req := &btapb.ModifyColumnFamiliesRequest{ + Name: name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf", + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + }}, + } + _, err := s.ModifyColumnFamilies(ctx, req) + if err != nil { + t.Fatal(err) + } + req = &btapb.ModifyColumnFamiliesRequest{ + Name: name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf", + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{ + GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}, + }}, + }}, + } + if _, err := s.ModifyColumnFamilies(ctx, req); err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + var ts int64 + ms := func() []*btpb.Mutation { + return []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: "cf", + ColumnQualifier: []byte(`col`), + TimestampMicros: atomic.AddInt64(&ts, 1000), + }}, + }} + } + + rmw := func() *btpb.ReadModifyWriteRowRequest { + return &btpb.ReadModifyWriteRowRequest{ + TableName: name, + RowKey: []byte(fmt.Sprint(rand.Intn(100))), + Rules: []*btpb.ReadModifyWriteRule{{ + FamilyName: "cf", + ColumnQualifier: []byte("col"), + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1}, + }}, + } + } + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for ctx.Err() == nil { + req := &btpb.MutateRowRequest{ + TableName: name, + RowKey: []byte(fmt.Sprint(rand.Intn(100))), + Mutations: ms(), + } + s.MutateRow(ctx, req) + } + }() + wg.Add(1) + go func() { + defer wg.Done() + for ctx.Err() == nil { + _, _ = s.ReadModifyWriteRow(ctx, rmw()) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + tbl.gc() + }() + } + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + t.Error("Concurrent mutations and GCs haven't completed after 1s") + } +} + +func TestCreateTableWithFamily(t *testing.T) { + // The Go client currently doesn't support creating a table with column families + // in one operation but it is allowed by the API. This must still be supported by the + // fake server so this test lives here instead of in the main bigtable + // integration test. + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{123}}}, + "cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{456}}}, + }, + } + cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + tbl, err := s.GetTable(ctx, &btapb.GetTableRequest{Name: cTbl.Name}) + if err != nil { + t.Fatalf("Getting table: %v", err) + } + cf := tbl.ColumnFamilies["cf1"] + if cf == nil { + t.Fatalf("Missing col family cf1") + } + if got, want := cf.GcRule.GetMaxNumVersions(), int32(123); got != want { + t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got) + } + cf = tbl.ColumnFamilies["cf2"] + if cf == nil { + t.Fatalf("Missing col family cf2") + } + if got, want := cf.GcRule.GetMaxNumVersions(), int32(456); got != want { + t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got) + } +} + +type MockSampleRowKeysServer struct { + responses []*btpb.SampleRowKeysResponse + grpc.ServerStream +} + +func (s *MockSampleRowKeysServer) Send(resp *btpb.SampleRowKeysResponse) error { + s.responses = append(s.responses, resp) + return nil +} + +func TestSampleRowKeys(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + }, + } + tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + + // Populate the table + val := []byte("value") + rowCount := 1000 + for i := 0; i < rowCount; i++ { + req := &btpb.MutateRowRequest{ + TableName: tbl.Name, + RowKey: []byte("row-" + strconv.Itoa(i)), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: "cf", + ColumnQualifier: []byte("col"), + TimestampMicros: 0, + Value: val, + }}, + }}, + } + if _, err := s.MutateRow(ctx, req); err != nil { + t.Fatalf("Populating table: %v", err) + } + } + + mock := &MockSampleRowKeysServer{} + if err := s.SampleRowKeys(&btpb.SampleRowKeysRequest{TableName: tbl.Name}, mock); err != nil { + t.Errorf("SampleRowKeys error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + // Make sure the offset of the final response is the offset of the final row + got := mock.responses[len(mock.responses)-1].OffsetBytes + want := int64((rowCount - 1) * len(val)) + if got != want { + t.Errorf("Invalid offset: got %d, want %d", got, want) + } +} + +func TestDropRowRange(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + }, + } + tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + + tbl := s.tables[tblInfo.Name] + + // Populate the table + prefixes := []string{"AAA", "BBB", "CCC", "DDD"} + count := 3 + doWrite := func() { + for _, prefix := range prefixes { + for i := 0; i < count; i++ { + req := &btpb.MutateRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte(prefix + strconv.Itoa(i)), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: "cf", + ColumnQualifier: []byte("col"), + TimestampMicros: 0, + Value: []byte{}, + }}, + }}, + } + if _, err := s.MutateRow(ctx, req); err != nil { + t.Fatalf("Populating table: %v", err) + } + } + } + } + + doWrite() + tblSize := len(tbl.rows) + req := &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("AAA")}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping first range: %v", err) + } + got, want := len(tbl.rows), tblSize-count + if got != want { + t.Errorf("Row count after first drop: got %d (%v), want %d", got, tbl.rows, want) + } + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("DDD")}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping second range: %v", err) + } + got, want = len(tbl.rows), tblSize-(2*count) + if got != want { + t.Errorf("Row count after second drop: got %d (%v), want %d", got, tbl.rows, want) + } + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("XXX")}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping invalid range: %v", err) + } + got, want = len(tbl.rows), tblSize-(2*count) + if got != want { + t.Errorf("Row count after invalid drop: got %d (%v), want %d", got, tbl.rows, want) + } + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping all data: %v", err) + } + got, want = len(tbl.rows), 0 + if got != want { + t.Errorf("Row count after drop all: got %d, want %d", got, want) + } + + // Test that we can write rows, delete some and then write them again. + count = 1 + doWrite() + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping all data: %v", err) + } + got, want = len(tbl.rows), 0 + if got != want { + t.Errorf("Row count after drop all: got %d, want %d", got, want) + } + + doWrite() + got, want = len(tbl.rows), len(prefixes) + if got != want { + t.Errorf("Row count after rewrite: got %d, want %d", got, want) + } + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("BBB")}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping range: %v", err) + } + doWrite() + got, want = len(tbl.rows), len(prefixes) + if got != want { + t.Errorf("Row count after drop range: got %d, want %d", got, want) + } +} + +type MockReadRowsServer struct { + responses []*btpb.ReadRowsResponse + grpc.ServerStream +} + +func (s *MockReadRowsServer) Send(resp *btpb.ReadRowsResponse) error { + s.responses = append(s.responses, resp) + return nil +} + +func TestReadRowsOrder(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + }, + } + tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + count := 3 + mcf := func(i int) *btapb.ModifyColumnFamiliesRequest { + return &btapb.ModifyColumnFamiliesRequest{ + Name: tblInfo.Name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf" + strconv.Itoa(i), + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + }}, + } + } + for i := 1; i <= count; i++ { + _, err = s.ModifyColumnFamilies(ctx, mcf(i)) + if err != nil { + t.Fatal(err) + } + } + // Populate the table + for fc := 0; fc < count; fc++ { + for cc := count; cc > 0; cc-- { + for tc := 0; tc < count; tc++ { + req := &btpb.MutateRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte("row"), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: "cf" + strconv.Itoa(fc), + ColumnQualifier: []byte("col" + strconv.Itoa(cc)), + TimestampMicros: int64((tc + 1) * 1000), + Value: []byte{}, + }}, + }}, + } + if _, err := s.MutateRow(ctx, req); err != nil { + t.Fatalf("Populating table: %v", err) + } + } + } + } + req := &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + } + mock := &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + if len(mock.responses[0].Chunks) != 27 { + t.Fatalf("Chunk count: got %d, want 27", len(mock.responses[0].Chunks)) + } + testOrder := func(ms *MockReadRowsServer) { + var prevFam, prevCol string + var prevTime int64 + for _, cc := range ms.responses[0].Chunks { + if prevFam == "" { + prevFam = cc.FamilyName.Value + prevCol = string(cc.Qualifier.Value) + prevTime = cc.TimestampMicros + continue + } + if cc.FamilyName.Value < prevFam { + t.Errorf("Family order is not correct: got %s < %s", cc.FamilyName.Value, prevFam) + } else if cc.FamilyName.Value == prevFam { + if string(cc.Qualifier.Value) < prevCol { + t.Errorf("Column order is not correct: got %s < %s", string(cc.Qualifier.Value), prevCol) + } else if string(cc.Qualifier.Value) == prevCol { + if cc.TimestampMicros > prevTime { + t.Errorf("cell order is not correct: got %d > %d", cc.TimestampMicros, prevTime) + } + } + } + prevFam = cc.FamilyName.Value + prevCol = string(cc.Qualifier.Value) + prevTime = cc.TimestampMicros + } + } + testOrder(mock) + + // Read with interleave filter + inter := &btpb.RowFilter_Interleave{} + fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"1"}} + cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("2")}} + inter.Filters = append(inter.Filters, fnr, cqr) + req = &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + Filter: &btpb.RowFilter{ + Filter: &btpb.RowFilter_Interleave_{inter}, + }, + } + mock = &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + if len(mock.responses[0].Chunks) != 18 { + t.Fatalf("Chunk count: got %d, want 18", len(mock.responses[0].Chunks)) + } + testOrder(mock) + + // Check order after ReadModifyWriteRow + rmw := func(i int) *btpb.ReadModifyWriteRowRequest { + return &btpb.ReadModifyWriteRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte("row"), + Rules: []*btpb.ReadModifyWriteRule{{ + FamilyName: "cf3", + ColumnQualifier: []byte("col" + strconv.Itoa(i)), + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1}, + }}, + } + } + for i := count; i > 0; i-- { + s.ReadModifyWriteRow(ctx, rmw(i)) + } + req = &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + } + mock = &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + if len(mock.responses[0].Chunks) != 30 { + t.Fatalf("Chunk count: got %d, want 30", len(mock.responses[0].Chunks)) + } + testOrder(mock) +} + +func TestCheckAndMutateRowWithoutPredicate(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + }, + } + tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + + // Populate the table + val := []byte("value") + mrreq := &btpb.MutateRowRequest{ + TableName: tbl.Name, + RowKey: []byte("row-present"), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: "cf", + ColumnQualifier: []byte("col"), + TimestampMicros: 0, + Value: val, + }}, + }}, + } + if _, err := s.MutateRow(ctx, mrreq); err != nil { + t.Fatalf("Populating table: %v", err) + } + + req := &btpb.CheckAndMutateRowRequest{ + TableName: tbl.Name, + RowKey: []byte("row-not-present"), + } + if res, err := s.CheckAndMutateRow(ctx, req); err != nil { + t.Errorf("CheckAndMutateRow error: %v", err) + } else if got, want := res.PredicateMatched, false; got != want { + t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want) + } + + req = &btpb.CheckAndMutateRowRequest{ + TableName: tbl.Name, + RowKey: []byte("row-present"), + } + if res, err := s.CheckAndMutateRow(ctx, req); err != nil { + t.Errorf("CheckAndMutateRow error: %v", err) + } else if got, want := res.PredicateMatched, true; got != want { + t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go new file mode 100644 index 0000000..67466f2 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go @@ -0,0 +1,842 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +// Command docs are in cbtdoc.go. + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io" + "log" + "os" + "regexp" + "sort" + "strconv" + "strings" + "text/tabwriter" + "text/template" + "time" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/internal/cbtconfig" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +var ( + oFlag = flag.String("o", "", "if set, redirect stdout to this file") + + config *cbtconfig.Config + client *bigtable.Client + adminClient *bigtable.AdminClient + instanceAdminClient *bigtable.InstanceAdminClient + + version = "" + revision = "" + revisionDate = "" +) + +func getCredentialOpts(opts []option.ClientOption) []option.ClientOption { + if ts := config.TokenSource; ts != nil { + opts = append(opts, option.WithTokenSource(ts)) + } + if tlsCreds := config.TLSCreds; tlsCreds != nil { + opts = append(opts, option.WithGRPCDialOption(grpc.WithTransportCredentials(tlsCreds))) + } + return opts +} + +func getClient() *bigtable.Client { + if client == nil { + var opts []option.ClientOption + if ep := config.DataEndpoint; ep != "" { + opts = append(opts, option.WithEndpoint(ep)) + } + opts = getCredentialOpts(opts) + var err error + client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, opts...) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + } + return client +} + +func getAdminClient() *bigtable.AdminClient { + if adminClient == nil { + var opts []option.ClientOption + if ep := config.AdminEndpoint; ep != "" { + opts = append(opts, option.WithEndpoint(ep)) + } + opts = getCredentialOpts(opts) + var err error + adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance, opts...) + if err != nil { + log.Fatalf("Making bigtable.AdminClient: %v", err) + } + } + return adminClient +} + +func getInstanceAdminClient() *bigtable.InstanceAdminClient { + if instanceAdminClient == nil { + var opts []option.ClientOption + if ep := config.AdminEndpoint; ep != "" { + opts = append(opts, option.WithEndpoint(ep)) + } + opts = getCredentialOpts(opts) + var err error + instanceAdminClient, err = bigtable.NewInstanceAdminClient(context.Background(), config.Project, opts...) + if err != nil { + log.Fatalf("Making bigtable.InstanceAdminClient: %v", err) + } + } + return instanceAdminClient +} + +func main() { + var err error + config, err = cbtconfig.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Usage = func() { usage(os.Stderr) } + flag.Parse() + if flag.NArg() == 0 { + usage(os.Stderr) + os.Exit(1) + } + + if *oFlag != "" { + f, err := os.Create(*oFlag) + if err != nil { + log.Fatal(err) + } + defer func() { + if err := f.Close(); err != nil { + log.Fatal(err) + } + }() + os.Stdout = f + } + + ctx := context.Background() + for _, cmd := range commands { + if cmd.Name == flag.Arg(0) { + if err := config.CheckFlags(cmd.Required); err != nil { + log.Fatal(err) + } + cmd.do(ctx, flag.Args()[1:]...) + return + } + } + log.Fatalf("Unknown command %q", flag.Arg(0)) +} + +func usage(w io.Writer) { + fmt.Fprintf(w, "Usage: %s [flags] ...\n", os.Args[0]) + flag.CommandLine.SetOutput(w) + flag.CommandLine.PrintDefaults() + fmt.Fprintf(w, "\n%s", cmdSummary) +} + +var cmdSummary string // generated in init, below + +func init() { + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0) + for _, cmd := range commands { + fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc) + } + tw.Flush() + buf.WriteString(configHelp) + cmdSummary = buf.String() +} + +var configHelp = ` +For convenience, values of the -project, -instance, -creds, +-admin-endpoint and -data-endpoint flags may be specified in +` + cbtconfig.Filename() + ` in this format: + project = my-project-123 + instance = my-instance + creds = path-to-account-key.json + admin-endpoint = hostname:port + data-endpoint = hostname:port +All values are optional, and all will be overridden by flags. + +cbt ` + version + ` ` + revision + ` ` + revisionDate + ` +` + +var commands = []struct { + Name, Desc string + do func(context.Context, ...string) + Usage string + Required cbtconfig.RequiredFlags +}{ + { + Name: "count", + Desc: "Count rows in a table", + do: doCount, + Usage: "cbt count
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "createfamily", + Desc: "Create a column family", + do: doCreateFamily, + Usage: "cbt createfamily
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "createtable", + Desc: "Create a table", + do: doCreateTable, + Usage: "cbt createtable
[initial_splits...]\n" + + " initial_splits=row A row key to be used to initially split the table " + + "into multiple tablets. Can be repeated to create multiple splits.", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deletecolumn", + Desc: "Delete all cells in a column", + do: doDeleteColumn, + Usage: "cbt deletecolumn
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deletefamily", + Desc: "Delete a column family", + do: doDeleteFamily, + Usage: "cbt deletefamily
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deleterow", + Desc: "Delete a row", + do: doDeleteRow, + Usage: "cbt deleterow
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deletetable", + Desc: "Delete a table", + do: doDeleteTable, + Usage: "cbt deletetable
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "doc", + Desc: "Print godoc-suitable documentation for cbt", + do: doDoc, + Usage: "cbt doc", + Required: cbtconfig.NoneRequired, + }, + { + Name: "help", + Desc: "Print help text", + do: doHelp, + Usage: "cbt help [command]", + Required: cbtconfig.NoneRequired, + }, + { + Name: "listinstances", + Desc: "List instances in a project", + do: doListInstances, + Usage: "cbt listinstances", + Required: cbtconfig.ProjectRequired, + }, + { + Name: "lookup", + Desc: "Read from a single row", + do: doLookup, + Usage: "cbt lookup
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "ls", + Desc: "List tables and column families", + do: doLS, + Usage: "cbt ls List tables\n" + + "cbt ls
List column families in
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "mddoc", + Desc: "Print documentation for cbt in Markdown format", + do: doMDDoc, + Usage: "cbt mddoc", + Required: cbtconfig.NoneRequired, + }, + { + Name: "read", + Desc: "Read rows", + do: doRead, + Usage: "cbt read
[start=] [end=] [prefix=]" + + " [regex=] [count=]\n" + + " start= Start reading at this row\n" + + " end= Stop reading before this row\n" + + " prefix= Read rows with this prefix\n" + + " regex= Read rows with keys matching this regex\n" + + " count= Read only this many rows\n", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "set", + Desc: "Set value of a cell", + do: doSet, + Usage: "cbt set
family:column=val[@ts] ...\n" + + " family:column=val[@ts] may be repeated to set multiple cells.\n" + + "\n" + + " ts is an optional integer timestamp.\n" + + " If it cannot be parsed, the `@ts` part will be\n" + + " interpreted as part of the value.", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "setgcpolicy", + Desc: "Set the GC policy for a column family", + do: doSetGCPolicy, + Usage: "cbt setgcpolicy
( maxage= | maxversions= )\n" + + "\n" + + ` maxage= Maximum timestamp age to preserve (e.g. "1h", "4d")` + "\n" + + " maxversions= Maximum number of versions to preserve", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "version", + Desc: "Print the current cbt version", + do: doVersion, + Usage: "cbt version", + Required: cbtconfig.NoneRequired, + }, +} + +func doCount(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatal("usage: cbt count
") + } + tbl := getClient().Open(args[0]) + + n := 0 + err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool { + n++ + return true + }, bigtable.RowFilter(bigtable.StripValueFilter())) + if err != nil { + log.Fatalf("Reading rows: %v", err) + } + fmt.Println(n) +} + +func doCreateFamily(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt createfamily
") + } + err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1]) + if err != nil { + log.Fatalf("Creating column family: %v", err) + } +} + +func doCreateTable(ctx context.Context, args ...string) { + if len(args) < 1 { + log.Fatal("usage: cbt createtable
[initial_splits...]") + } + var err error + if len(args) > 1 { + splits := args[1:] + err = getAdminClient().CreatePresplitTable(ctx, args[0], splits) + } else { + err = getAdminClient().CreateTable(ctx, args[0]) + } + if err != nil { + log.Fatalf("Creating table: %v", err) + } +} + +func doDeleteColumn(ctx context.Context, args ...string) { + if len(args) != 4 { + log.Fatal("usage: cbt deletecolumn
") + } + tbl := getClient().Open(args[0]) + mut := bigtable.NewMutation() + mut.DeleteCellsInColumn(args[2], args[3]) + if err := tbl.Apply(ctx, args[1], mut); err != nil { + log.Fatalf("Deleting cells in column: %v", err) + } +} + +func doDeleteFamily(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt deletefamily
") + } + err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1]) + if err != nil { + log.Fatalf("Deleting column family: %v", err) + } +} + +func doDeleteRow(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt deleterow
") + } + tbl := getClient().Open(args[0]) + mut := bigtable.NewMutation() + mut.DeleteRow() + if err := tbl.Apply(ctx, args[1], mut); err != nil { + log.Fatalf("Deleting row: %v", err) + } +} + +func doDeleteTable(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatalf("Can't do `cbt deletetable %s`", args) + } + err := getAdminClient().DeleteTable(ctx, args[0]) + if err != nil { + log.Fatalf("Deleting table: %v", err) + } +} + +// to break circular dependencies +var ( + doDocFn func(ctx context.Context, args ...string) + doHelpFn func(ctx context.Context, args ...string) + doMDDocFn func(ctx context.Context, args ...string) +) + +func init() { + doDocFn = doDocReal + doHelpFn = doHelpReal + doMDDocFn = doMDDocReal +} + +func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) } +func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) } +func doMDDoc(ctx context.Context, args ...string) { doMDDocFn(ctx, args...) } + +func docFlags() []*flag.Flag { + // Only include specific flags, in a specific order. + var flags []*flag.Flag + for _, name := range []string{"project", "instance", "creds"} { + f := flag.Lookup(name) + if f == nil { + log.Fatalf("Flag not linked: -%s", name) + } + flags = append(flags, f) + } + return flags +} + +func doDocReal(ctx context.Context, args ...string) { + data := map[string]interface{}{ + "Commands": commands, + "Flags": docFlags(), + } + var buf bytes.Buffer + if err := docTemplate.Execute(&buf, data); err != nil { + log.Fatalf("Bad doc template: %v", err) + } + out, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatalf("Bad doc output: %v", err) + } + os.Stdout.Write(out) +} + +func indentLines(s, ind string) string { + ss := strings.Split(s, "\n") + for i, p := range ss { + ss[i] = ind + p + } + return strings.Join(ss, "\n") +} + +var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{ + "indent": indentLines, +}). + Parse(` +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. +// Run "go generate" to regenerate. +//go:generate go run cbt.go -o cbtdoc.go doc + +/* +Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to +install the cbt tool, see the +[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview). + +Usage: + + cbt [options] command [arguments] + +The commands are: +{{range .Commands}} + {{printf "%-25s %s" .Name .Desc}}{{end}} + +Use "cbt help " for more information about a command. + +The options are: +{{range .Flags}} + -{{.Name}} string + {{.Usage}}{{end}} + +{{range .Commands}} +{{.Desc}} + +Usage: +{{indent .Usage "\t"}} + + + +{{end}} +*/ +package main +`)) + +func doHelpReal(ctx context.Context, args ...string) { + if len(args) == 0 { + usage(os.Stdout) + return + } + for _, cmd := range commands { + if cmd.Name == args[0] { + fmt.Println(cmd.Usage) + return + } + } + log.Fatalf("Don't know command %q", args[0]) +} + +func doListInstances(ctx context.Context, args ...string) { + if len(args) != 0 { + log.Fatalf("usage: cbt listinstances") + } + is, err := getInstanceAdminClient().Instances(ctx) + if err != nil { + log.Fatalf("Getting list of instances: %v", err) + } + tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0) + fmt.Fprintf(tw, "Instance Name\tInfo\n") + fmt.Fprintf(tw, "-------------\t----\n") + for _, i := range is { + fmt.Fprintf(tw, "%s\t%s\n", i.Name, i.DisplayName) + } + tw.Flush() +} + +func doLookup(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatalf("usage: cbt lookup
") + } + table, row := args[0], args[1] + tbl := getClient().Open(table) + r, err := tbl.ReadRow(ctx, row) + if err != nil { + log.Fatalf("Reading row: %v", err) + } + printRow(r) +} + +func printRow(r bigtable.Row) { + fmt.Println(strings.Repeat("-", 40)) + fmt.Println(r.Key()) + + var fams []string + for fam := range r { + fams = append(fams, fam) + } + sort.Strings(fams) + for _, fam := range fams { + ris := r[fam] + sort.Sort(byColumn(ris)) + for _, ri := range ris { + ts := time.Unix(0, int64(ri.Timestamp)*1e3) + fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000")) + fmt.Printf(" %q\n", ri.Value) + } + } +} + +type byColumn []bigtable.ReadItem + +func (b byColumn) Len() int { return len(b) } +func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column } + +type byFamilyName []bigtable.FamilyInfo + +func (b byFamilyName) Len() int { return len(b) } +func (b byFamilyName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byFamilyName) Less(i, j int) bool { return b[i].Name < b[j].Name } + +func doLS(ctx context.Context, args ...string) { + switch len(args) { + default: + log.Fatalf("Can't do `cbt ls %s`", args) + case 0: + tables, err := getAdminClient().Tables(ctx) + if err != nil { + log.Fatalf("Getting list of tables: %v", err) + } + sort.Strings(tables) + for _, table := range tables { + fmt.Println(table) + } + case 1: + table := args[0] + ti, err := getAdminClient().TableInfo(ctx, table) + if err != nil { + log.Fatalf("Getting table info: %v", err) + } + sort.Sort(byFamilyName(ti.FamilyInfos)) + tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0) + fmt.Fprintf(tw, "Family Name\tGC Policy\n") + fmt.Fprintf(tw, "-----------\t---------\n") + for _, fam := range ti.FamilyInfos { + fmt.Fprintf(tw, "%s\t%s\n", fam.Name, fam.GCPolicy) + } + tw.Flush() + } +} + +func doMDDocReal(ctx context.Context, args ...string) { + data := map[string]interface{}{ + "Commands": commands, + "Flags": docFlags(), + } + var buf bytes.Buffer + if err := mddocTemplate.Execute(&buf, data); err != nil { + log.Fatalf("Bad mddoc template: %v", err) + } + io.Copy(os.Stdout, &buf) +} + +var mddocTemplate = template.Must(template.New("mddoc").Funcs(template.FuncMap{ + "indent": indentLines, +}). + Parse(` +Cbt is a tool for doing basic interactions with Cloud Bigtable. + +Usage: + + cbt [options] command [arguments] + +The commands are: +{{range .Commands}} + {{printf "%-25s %s" .Name .Desc}}{{end}} + +Use "cbt help " for more information about a command. + +The options are: +{{range .Flags}} + -{{.Name}} string + {{.Usage}}{{end}} + +{{range .Commands}} +## {{.Desc}} + +{{indent .Usage "\t"}} + + + +{{end}} +`)) + +func doRead(ctx context.Context, args ...string) { + if len(args) < 1 { + log.Fatalf("usage: cbt read
[args ...]") + } + tbl := getClient().Open(args[0]) + + parsed := make(map[string]string) + for _, arg := range args[1:] { + i := strings.Index(arg, "=") + if i < 0 { + log.Fatalf("Bad arg %q", arg) + } + key, val := arg[:i], arg[i+1:] + switch key { + default: + log.Fatalf("Unknown arg key %q", key) + case "limit": + // Be nicer; we used to support this, but renamed it to "end". + log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end") + case "start", "end", "prefix", "count", "regex": + parsed[key] = val + } + } + if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" { + log.Fatal(`"start"/"end" may not be mixed with "prefix"`) + } + + var rr bigtable.RowRange + if start, end := parsed["start"], parsed["end"]; end != "" { + rr = bigtable.NewRange(start, end) + } else if start != "" { + rr = bigtable.InfiniteRange(start) + } + if prefix := parsed["prefix"]; prefix != "" { + rr = bigtable.PrefixRange(prefix) + } + + var opts []bigtable.ReadOption + if count := parsed["count"]; count != "" { + n, err := strconv.ParseInt(count, 0, 64) + if err != nil { + log.Fatalf("Bad count %q: %v", count, err) + } + opts = append(opts, bigtable.LimitRows(n)) + } + if regex := parsed["regex"]; regex != "" { + opts = append(opts, bigtable.RowFilter(bigtable.RowKeyFilter(regex))) + } + + // TODO(dsymonds): Support filters. + err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool { + printRow(r) + return true + }, opts...) + if err != nil { + log.Fatalf("Reading rows: %v", err) + } +} + +var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`) + +func doSet(ctx context.Context, args ...string) { + if len(args) < 3 { + log.Fatalf("usage: cbt set
family:[column]=val[@ts] ...") + } + tbl := getClient().Open(args[0]) + row := args[1] + mut := bigtable.NewMutation() + for _, arg := range args[2:] { + m := setArg.FindStringSubmatch(arg) + if m == nil { + log.Fatalf("Bad set arg %q", arg) + } + val := m[3] + ts := bigtable.Now() + if i := strings.LastIndex(val, "@"); i >= 0 { + // Try parsing a timestamp. + n, err := strconv.ParseInt(val[i+1:], 0, 64) + if err == nil { + val = val[:i] + ts = bigtable.Timestamp(n) + } + } + mut.Set(m[1], m[2], ts, []byte(val)) + } + if err := tbl.Apply(ctx, row, mut); err != nil { + log.Fatalf("Applying mutation: %v", err) + } +} + +func doSetGCPolicy(ctx context.Context, args ...string) { + if len(args) < 3 { + log.Fatalf("usage: cbt setgcpolicy
( maxage= | maxversions= )") + } + table := args[0] + fam := args[1] + + var pol bigtable.GCPolicy + switch p := args[2]; { + case strings.HasPrefix(p, "maxage="): + d, err := parseDuration(p[7:]) + if err != nil { + log.Fatal(err) + } + pol = bigtable.MaxAgePolicy(d) + case strings.HasPrefix(p, "maxversions="): + n, err := strconv.ParseUint(p[12:], 10, 16) + if err != nil { + log.Fatal(err) + } + pol = bigtable.MaxVersionsPolicy(int(n)) + default: + log.Fatalf("Bad GC policy %q", p) + } + if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil { + log.Fatalf("Setting GC policy: %v", err) + } +} + +// parseDuration parses a duration string. +// It is similar to Go's time.ParseDuration, except with a different set of supported units, +// and only simple formats supported. +func parseDuration(s string) (time.Duration, error) { + // [0-9]+[a-z]+ + + // Split [0-9]+ from [a-z]+. + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + } + ds, u := s[:i], s[i:] + if ds == "" || u == "" { + return 0, fmt.Errorf("invalid duration %q", s) + } + // Parse them. + d, err := strconv.ParseUint(ds, 10, 32) + if err != nil { + return 0, fmt.Errorf("invalid duration %q: %v", s, err) + } + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, s) + } + if d > uint64((1<<63-1)/unit) { + // overflow + return 0, fmt.Errorf("invalid duration %q overflows", s) + } + return time.Duration(d) * unit, nil +} + +var unitMap = map[string]time.Duration{ + "ms": time.Millisecond, + "s": time.Second, + "m": time.Minute, + "h": time.Hour, + "d": 24 * time.Hour, +} + +func doVersion(ctx context.Context, args ...string) { + fmt.Printf("%s %s %s\n", version, revision, revisionDate) +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go new file mode 100644 index 0000000..350e4f0 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "testing" + "time" +) + +func TestParseDuration(t *testing.T) { + tests := []struct { + in string + // out or fail are mutually exclusive + out time.Duration + fail bool + }{ + {in: "10ms", out: 10 * time.Millisecond}, + {in: "3s", out: 3 * time.Second}, + {in: "60m", out: 60 * time.Minute}, + {in: "12h", out: 12 * time.Hour}, + {in: "7d", out: 168 * time.Hour}, + + {in: "", fail: true}, + {in: "0", fail: true}, + {in: "7ns", fail: true}, + {in: "14mo", fail: true}, + {in: "3.5h", fail: true}, + {in: "106752d", fail: true}, // overflow + } + for _, tc := range tests { + got, err := parseDuration(tc.in) + if !tc.fail && err != nil { + t.Errorf("parseDuration(%q) unexpectedly failed: %v", tc.in, err) + continue + } + if tc.fail && err == nil { + t.Errorf("parseDuration(%q) did not fail", tc.in) + continue + } + if tc.fail { + continue + } + if got != tc.out { + t.Errorf("parseDuration(%q) = %v, want %v", tc.in, got, tc.out) + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go new file mode 100644 index 0000000..728021d --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go @@ -0,0 +1,213 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. +// Run "go generate" to regenerate. +//go:generate go run cbt.go -o cbtdoc.go doc + +/* +Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to +install the cbt tool, see the +[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview). + +Usage: + + cbt [options] command [arguments] + +The commands are: + + count Count rows in a table + createfamily Create a column family + createtable Create a table + deletecolumn Delete all cells in a column + deletefamily Delete a column family + deleterow Delete a row + deletetable Delete a table + doc Print godoc-suitable documentation for cbt + help Print help text + listinstances List instances in a project + lookup Read from a single row + ls List tables and column families + mddoc Print documentation for cbt in Markdown format + read Read rows + set Set value of a cell + setgcpolicy Set the GC policy for a column family + version Print the current cbt version + +Use "cbt help " for more information about a command. + +The options are: + + -project string + project ID, if unset uses gcloud configured project + -instance string + Cloud Bigtable instance + -creds string + if set, use application credentials in this file + + +Count rows in a table + +Usage: + cbt count
+ + + + +Create a column family + +Usage: + cbt createfamily
+ + + + +Create a table + +Usage: + cbt createtable
[initial_splits...] + initial_splits=row A row key to be used to initially split the table into multiple tablets. Can be repeated to create multiple splits. + + + + +Delete all cells in a column + +Usage: + cbt deletecolumn
+ + + + +Delete a column family + +Usage: + cbt deletefamily
+ + + + +Delete a row + +Usage: + cbt deleterow
+ + + + +Delete a table + +Usage: + cbt deletetable
+ + + + +Print godoc-suitable documentation for cbt + +Usage: + cbt doc + + + + +Print help text + +Usage: + cbt help [command] + + + + +List instances in a project + +Usage: + cbt listinstances + + + + +Read from a single row + +Usage: + cbt lookup
+ + + + +List tables and column families + +Usage: + cbt ls List tables + cbt ls
List column families in
+ + + + +Print documentation for cbt in Markdown format + +Usage: + cbt mddoc + + + + +Read rows + +Usage: + cbt read
[start=] [end=] [prefix=] [regex=] [count=] + start= Start reading at this row + end= Stop reading before this row + prefix= Read rows with this prefix + regex= Read rows with keys matching this regex + count= Read only this many rows + + + + + +Set value of a cell + +Usage: + cbt set
family:column=val[@ts] ... + family:column=val[@ts] may be repeated to set multiple cells. + + ts is an optional integer timestamp. + If it cannot be parsed, the `@ts` part will be + interpreted as part of the value. + + + + +Set the GC policy for a column family + +Usage: + cbt setgcpolicy
( maxage= | maxversions= ) + + maxage= Maximum timestamp age to preserve (e.g. "1h", "4d") + maxversions= Maximum number of versions to preserve + + + + +Print the current cbt version + +Usage: + cbt version + + + + +*/ +package main diff --git a/vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go b/vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go new file mode 100644 index 0000000..f561c14 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +cbtemulator launches the in-memory Cloud Bigtable server on the given address. +*/ +package main + +import ( + "flag" + "fmt" + "log" + + "cloud.google.com/go/bigtable/bttest" + "google.golang.org/grpc" +) + +var ( + host = flag.String("host", "localhost", "the address to bind to on the local machine") + port = flag.Int("port", 9000, "the port number to bind to on the local machine") +) + +func main() { + grpc.EnableTracing = false + flag.Parse() + srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port)) + if err != nil { + log.Fatalf("failed to start emulator: %v", err) + } + + fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr) + select {} +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go b/vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go new file mode 100644 index 0000000..158f912 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go @@ -0,0 +1,204 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Loadtest does some load testing through the Go client library for Cloud Bigtable. +*/ +package main + +import ( + "bytes" + "flag" + "fmt" + "log" + "math/rand" + "os" + "os/signal" + "sync" + "sync/atomic" + "time" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/internal/cbtconfig" + "cloud.google.com/go/bigtable/internal/stat" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +var ( + runFor = flag.Duration("run_for", 5*time.Second, + "how long to run the load test for; 0 to run forever until SIGTERM") + scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist") + csvOutput = flag.String("csv_output", "", + "output path for statistics in .csv format. If this file already exists it will be overwritten.") + poolSize = flag.Int("pool_size", 1, "size of the gRPC connection pool to use for the data client") + reqCount = flag.Int("req_count", 100, "number of concurrent requests") + + config *cbtconfig.Config + client *bigtable.Client + adminClient *bigtable.AdminClient +) + +func main() { + var err error + config, err = cbtconfig.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Parse() + if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { + log.Fatal(err) + } + if config.Creds != "" { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) + } + if flag.NArg() != 0 { + flag.Usage() + os.Exit(1) + } + + var options []option.ClientOption + if *poolSize > 1 { + options = append(options, + option.WithGRPCConnectionPool(*poolSize), + + // TODO(grpc/grpc-go#1388) using connection pool without WithBlock + // can cause RPCs to fail randomly. We can delete this after the issue is fixed. + option.WithGRPCDialOption(grpc.WithBlock())) + } + + var csvFile *os.File + if *csvOutput != "" { + csvFile, err = os.Create(*csvOutput) + if err != nil { + log.Fatalf("creating csv output file: %v", err) + } + defer csvFile.Close() + log.Printf("Writing statistics to %q ...", *csvOutput) + } + + log.Printf("Dialing connections...") + client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + defer client.Close() + adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance) + if err != nil { + log.Fatalf("Making bigtable.AdminClient: %v", err) + } + defer adminClient.Close() + + // Create a scratch table. + log.Printf("Setting up scratch table...") + if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil { + log.Fatalf("Making scratch table %q: %v", *scratchTable, err) + } + if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil { + log.Fatalf("Making scratch table column family: %v", err) + } + // Upon a successful run, delete the table. Don't bother checking for errors. + defer adminClient.DeleteTable(context.Background(), *scratchTable) + + // Also delete the table on SIGTERM. + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + go func() { + s := <-c + log.Printf("Caught %v, cleaning scratch table.", s) + adminClient.DeleteTable(context.Background(), *scratchTable) + os.Exit(1) + }() + + log.Printf("Starting load test... (run for %v)", *runFor) + tbl := client.Open(*scratchTable) + sem := make(chan int, *reqCount) // limit the number of requests happening at once + var reads, writes stats + stopTime := time.Now().Add(*runFor) + var wg sync.WaitGroup + for time.Now().Before(stopTime) || *runFor == 0 { + sem <- 1 + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + + ok := true + opStart := time.Now() + var stats *stats + defer func() { + stats.Record(ok, time.Since(opStart)) + }() + + row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows + + switch rand.Intn(10) { + default: + // read + stats = &reads + _, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1))) + if err != nil { + log.Printf("Error doing read: %v", err) + ok = false + } + case 0, 1, 2, 3, 4: + // write + stats = &writes + mut := bigtable.NewMutation() + mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write + if err := tbl.Apply(context.Background(), row, mut); err != nil { + log.Printf("Error doing mutation: %v", err) + ok = false + } + } + }() + } + wg.Wait() + + readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok) + writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok) + log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg) + log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg) + + if csvFile != nil { + stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile) + } +} + +var allStats int64 // atomic + +type stats struct { + mu sync.Mutex + tries, ok int + ds []time.Duration +} + +func (s *stats) Record(ok bool, d time.Duration) { + s.mu.Lock() + s.tries++ + if ok { + s.ok++ + } + s.ds = append(s.ds, d) + s.mu.Unlock() + + if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { + log.Printf("Progress: done %d ops", n) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go b/vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go new file mode 100644 index 0000000..72e3743 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go @@ -0,0 +1,155 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Scantest does scan-related load testing against Cloud Bigtable. The logic here +mimics a similar test written using the Java client. +*/ +package main + +import ( + "bytes" + "flag" + "fmt" + "log" + "math/rand" + "os" + "sync" + "sync/atomic" + "text/tabwriter" + "time" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/internal/cbtconfig" + "cloud.google.com/go/bigtable/internal/stat" + "golang.org/x/net/context" +) + +var ( + runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for") + numScans = flag.Int("concurrent_scans", 1, "number of concurrent scans") + rowLimit = flag.Int("row_limit", 10000, "max number of records per scan") + + config *cbtconfig.Config + client *bigtable.Client +) + +func main() { + flag.Usage = func() { + fmt.Printf("Usage: scantest [options] \n\n") + flag.PrintDefaults() + } + + var err error + config, err = cbtconfig.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Parse() + if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { + log.Fatal(err) + } + if config.Creds != "" { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) + } + if flag.NArg() != 1 { + flag.Usage() + os.Exit(1) + } + + table := flag.Arg(0) + + log.Printf("Dialing connections...") + client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + defer client.Close() + + log.Printf("Starting scan test... (run for %v)", *runFor) + tbl := client.Open(table) + sem := make(chan int, *numScans) // limit the number of requests happening at once + var scans stats + + stopTime := time.Now().Add(*runFor) + var wg sync.WaitGroup + for time.Now().Before(stopTime) { + sem <- 1 + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + + ok := true + opStart := time.Now() + defer func() { + scans.Record(ok, time.Since(opStart)) + }() + + // Start at a random row key + key := fmt.Sprintf("user%d", rand.Int63()) + limit := bigtable.LimitRows(int64(*rowLimit)) + noop := func(bigtable.Row) bool { return true } + if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil { + log.Printf("Error during scan: %v", err) + ok = false + } + }() + } + wg.Wait() + + agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok) + log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v", + scans.ok, scans.tries, agg, throughputString(agg)) +} + +func throughputString(agg *stat.Aggregate) string { + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding + rowLimitF := float64(*rowLimit) + fmt.Fprintf( + tw, + "min:\t%.2f\nmedian:\t%.2f\nmax:\t%.2f\n", + rowLimitF/agg.Max.Seconds(), + rowLimitF/agg.Median.Seconds(), + rowLimitF/agg.Min.Seconds()) + tw.Flush() + return buf.String() +} + +var allStats int64 // atomic + +type stats struct { + mu sync.Mutex + tries, ok int + ds []time.Duration +} + +func (s *stats) Record(ok bool, d time.Duration) { + s.mu.Lock() + s.tries++ + if ok { + s.ok++ + } + s.ds = append(s.ds, d) + s.mu.Unlock() + + if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { + log.Printf("Progress: done %d ops", n) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/doc.go b/vendor/cloud.google.com/go/bigtable/doc.go new file mode 100644 index 0000000..0d7706f --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/doc.go @@ -0,0 +1,125 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package bigtable is an API to Google Cloud Bigtable. + +See https://cloud.google.com/bigtable/docs/ for general product documentation. + +Setup and Credentials + +Use NewClient or NewAdminClient to create a client that can be used to access +the data or admin APIs respectively. Both require credentials that have permission +to access the Cloud Bigtable API. + +If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials +(https://developers.google.com/accounts/docs/application-default-credentials) +is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called. + +To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource. +For instance, you can use service account credentials by visiting +https://cloud.google.com/console/project/MYPROJECT/apiui/credential, +creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing + jsonKey, err := ioutil.ReadFile(pathToKeyFile) + ... + config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc. + ... + client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(config.TokenSource(ctx))) + ... +Here, `google` means the golang.org/x/oauth2/google package +and `option` means the google.golang.org/api/option package. + +Reading + +The principal way to read from a Bigtable is to use the ReadRows method on *Table. +A RowRange specifies a contiguous portion of a table. A Filter may be provided through +RowFilter to limit or transform the data that is returned. + tbl := client.Open("mytable") + ... + // Read all the rows starting with "com.google.", + // but only fetch the columns in the "links" family. + rr := bigtable.PrefixRange("com.google.") + err := tbl.ReadRows(ctx, rr, func(r Row) bool { + // do something with r + return true // keep going + }, bigtable.RowFilter(bigtable.FamilyFilter("links"))) + ... + +To read a single row, use the ReadRow helper method. + r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key + ... + +Writing + +This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite. +The former expresses idempotent operations. +The latter expresses non-idempotent operations and returns the new values of updated cells. +These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite), +building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite +methods on a Table. + +For instance, to set a couple of cells in a table, + tbl := client.Open("mytable") + mut := bigtable.NewMutation() + mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1")) + mut.Set("links", "golang.org", bigtable.Now(), []byte("1")) + err := tbl.Apply(ctx, "com.google.cloud", mut) + ... + +To increment an encoded value in one cell, + tbl := client.Open("mytable") + rmw := bigtable.NewReadModifyWrite() + rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org" + r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw) + ... + +Retries + +If a read or write operation encounters a transient error it will be retried until a successful +response, an unretryable error or the context deadline is reached. Non-idempotent writes (where +the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls +will not re-scan rows that have already been processed. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. + +*/ +package bigtable // import "cloud.google.com/go/bigtable" + +// Scope constants for authentication credentials. +// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile. +const ( + // Scope is the OAuth scope for Cloud Bigtable data operations. + Scope = "https://www.googleapis.com/auth/bigtable.data" + // ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations. + ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly" + + // AdminScope is the OAuth scope for Cloud Bigtable table admin operations. + AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table" + + // InstanceAdminScope is the OAuth scope for Cloud Bigtable instance (and cluster) admin operations. + InstanceAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster" +) + +// clientUserAgent identifies the version of this package. +// It should be bumped upon significant changes only. +const clientUserAgent = "cbt-go/20160628" + +// resourcePrefixHeader is the name of the metadata header used to indicate +// the resource being operated on. +const resourcePrefixHeader = "google-cloud-resource-prefix" diff --git a/vendor/cloud.google.com/go/bigtable/export_test.go b/vendor/cloud.google.com/go/bigtable/export_test.go new file mode 100644 index 0000000..978530b --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/export_test.go @@ -0,0 +1,222 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "errors" + "flag" + "fmt" + "strings" + "time" + + "cloud.google.com/go/bigtable/bttest" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +var legacyUseProd string +var integrationConfig IntegrationTestConfig + +func init() { + c := &integrationConfig + + flag.BoolVar(&c.UseProd, "it.use-prod", false, "Use remote bigtable instead of local emulator") + flag.StringVar(&c.AdminEndpoint, "it.admin-endpoint", "", "Admin api host and port") + flag.StringVar(&c.DataEndpoint, "it.data-endpoint", "", "Data api host and port") + flag.StringVar(&c.Project, "it.project", "", "Project to use for integration test") + flag.StringVar(&c.Instance, "it.instance", "", "Bigtable instance to use") + flag.StringVar(&c.Cluster, "it.cluster", "", "Bigtable cluster to use") + flag.StringVar(&c.Table, "it.table", "", "Bigtable table to create") + + // Backwards compat + flag.StringVar(&legacyUseProd, "use_prod", "", `DEPRECATED: if set to "proj,instance,table", run integration test against production`) + +} + +// IntegrationTestConfig contains parameters to pick and setup a IntegrationEnv for testing +type IntegrationTestConfig struct { + UseProd bool + AdminEndpoint string + DataEndpoint string + Project string + Instance string + Cluster string + Table string +} + +// IntegrationEnv represents a testing environment. +// The environment can be implemented using production or an emulator +type IntegrationEnv interface { + Config() IntegrationTestConfig + NewAdminClient() (*AdminClient, error) + // NewInstanceAdminClient will return nil if instance administration is unsupported in this environment + NewInstanceAdminClient() (*InstanceAdminClient, error) + NewClient() (*Client, error) + Close() +} + +// NewIntegrationEnv creates a new environment based on the command line args +func NewIntegrationEnv() (IntegrationEnv, error) { + c := integrationConfig + + if legacyUseProd != "" { + fmt.Println("WARNING: using legacy commandline arg -use_prod, please switch to -it.*") + parts := strings.SplitN(legacyUseProd, ",", 3) + c.UseProd = true + c.Project = parts[0] + c.Instance = parts[1] + c.Table = parts[2] + } + + if integrationConfig.UseProd { + return NewProdEnv(c) + } else { + return NewEmulatedEnv(c) + } +} + +// EmulatedEnv encapsulates the state of an emulator +type EmulatedEnv struct { + config IntegrationTestConfig + server *bttest.Server +} + +// NewEmulatedEnv builds and starts the emulator based environment +func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) { + srv, err := bttest.NewServer("127.0.0.1:0", grpc.MaxRecvMsgSize(200<<20), grpc.MaxSendMsgSize(100<<20)) + if err != nil { + return nil, err + } + + if config.Project == "" { + config.Project = "project" + } + if config.Instance == "" { + config.Instance = "instance" + } + if config.Table == "" { + config.Table = "mytable" + } + config.AdminEndpoint = srv.Addr + config.DataEndpoint = srv.Addr + + env := &EmulatedEnv{ + config: config, + server: srv, + } + return env, nil +} + +// Close stops & cleans up the emulator +func (e *EmulatedEnv) Close() { + e.server.Close() +} + +// Config gets the config used to build this environment +func (e *EmulatedEnv) Config() IntegrationTestConfig { + return e.config +} + +// NewAdminClient builds a new connected admin client for this environment +func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + return nil, err + } + return NewAdminClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) +} + +// NewInstanceAdminClient returns nil for the emulated environment since the API is not implemented. +func (e *EmulatedEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) { + return nil, nil +} + +// NewClient builds a new connected data client for this environment +func (e *EmulatedEnv) NewClient() (*Client, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock(), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))) + if err != nil { + return nil, err + } + return NewClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) +} + +// ProdEnv encapsulates the state necessary to connect to the external Bigtable service +type ProdEnv struct { + config IntegrationTestConfig +} + +// NewProdEnv builds the environment representation +func NewProdEnv(config IntegrationTestConfig) (*ProdEnv, error) { + if config.Project == "" { + return nil, errors.New("Project not set") + } + if config.Instance == "" { + return nil, errors.New("Instance not set") + } + if config.Table == "" { + return nil, errors.New("Table not set") + } + + return &ProdEnv{config}, nil +} + +// Close is a no-op for production environments +func (e *ProdEnv) Close() {} + +// Config gets the config used to build this environment +func (e *ProdEnv) Config() IntegrationTestConfig { + return e.config +} + +// NewAdminClient builds a new connected admin client for this environment +func (e *ProdEnv) NewAdminClient() (*AdminClient, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + var clientOpts []option.ClientOption + if endpoint := e.config.AdminEndpoint; endpoint != "" { + clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) + } + return NewAdminClient(ctx, e.config.Project, e.config.Instance, clientOpts...) +} + +// NewInstanceAdminClient returns a new connected instance admin client for this environment +func (e *ProdEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + var clientOpts []option.ClientOption + if endpoint := e.config.AdminEndpoint; endpoint != "" { + clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) + } + return NewInstanceAdminClient(ctx, e.config.Project, clientOpts...) +} + +// NewClient builds a connected data client for this environment +func (e *ProdEnv) NewClient() (*Client, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + var clientOpts []option.ClientOption + if endpoint := e.config.DataEndpoint; endpoint != "" { + clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) + } + return NewClient(ctx, e.config.Project, e.config.Instance, clientOpts...) +} diff --git a/vendor/cloud.google.com/go/bigtable/filter.go b/vendor/cloud.google.com/go/bigtable/filter.go new file mode 100644 index 0000000..b8e453b --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/filter.go @@ -0,0 +1,318 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "strings" + "time" + + btpb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +// A Filter represents a row filter. +type Filter interface { + String() string + proto() *btpb.RowFilter +} + +// ChainFilters returns a filter that applies a sequence of filters. +func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} } + +type chainFilter struct { + sub []Filter +} + +func (cf chainFilter) String() string { + var ss []string + for _, sf := range cf.sub { + ss = append(ss, sf.String()) + } + return "(" + strings.Join(ss, " | ") + ")" +} + +func (cf chainFilter) proto() *btpb.RowFilter { + chain := &btpb.RowFilter_Chain{} + for _, sf := range cf.sub { + chain.Filters = append(chain.Filters, sf.proto()) + } + return &btpb.RowFilter{ + Filter: &btpb.RowFilter_Chain_{chain}, + } +} + +// InterleaveFilters returns a filter that applies a set of filters in parallel +// and interleaves the results. +func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} } + +type interleaveFilter struct { + sub []Filter +} + +func (ilf interleaveFilter) String() string { + var ss []string + for _, sf := range ilf.sub { + ss = append(ss, sf.String()) + } + return "(" + strings.Join(ss, " + ") + ")" +} + +func (ilf interleaveFilter) proto() *btpb.RowFilter { + inter := &btpb.RowFilter_Interleave{} + for _, sf := range ilf.sub { + inter.Filters = append(inter.Filters, sf.proto()) + } + return &btpb.RowFilter{ + Filter: &btpb.RowFilter_Interleave_{inter}, + } +} + +// RowKeyFilter returns a filter that matches cells from rows whose +// key matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) } + +type rowKeyFilter string + +func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) } + +func (rkf rowKeyFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}} +} + +// FamilyFilter returns a filter that matches cells whose family name +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func FamilyFilter(pattern string) Filter { return familyFilter(pattern) } + +type familyFilter string + +func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) } + +func (ff familyFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{string(ff)}} +} + +// ColumnFilter returns a filter that matches cells whose column name +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func ColumnFilter(pattern string) Filter { return columnFilter(pattern) } + +type columnFilter string + +func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) } + +func (cf columnFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}} +} + +// ValueFilter returns a filter that matches cells whose value +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func ValueFilter(pattern string) Filter { return valueFilter(pattern) } + +type valueFilter string + +func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) } + +func (vf valueFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte(vf)}} +} + +// LatestNFilter returns a filter that matches the most recent N cells in each column. +func LatestNFilter(n int) Filter { return latestNFilter(n) } + +type latestNFilter int32 + +func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) } + +func (lnf latestNFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}} +} + +// StripValueFilter returns a filter that replaces each value with the empty string. +func StripValueFilter() Filter { return stripValueFilter{} } + +type stripValueFilter struct{} + +func (stripValueFilter) String() string { return "strip_value()" } +func (stripValueFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}} +} + +// TimestampRangeFilter returns a filter that matches any cells whose timestamp is within the given time bounds. A zero +// time means no bound. +// The timestamp will be truncated to millisecond granularity. +func TimestampRangeFilter(startTime time.Time, endTime time.Time) Filter { + trf := timestampRangeFilter{} + if !startTime.IsZero() { + trf.startTime = Time(startTime) + } + if !endTime.IsZero() { + trf.endTime = Time(endTime) + } + return trf +} + +// TimestampRangeFilterMicros returns a filter that matches any cells whose timestamp is within the given time bounds, +// specified in units of microseconds since 1 January 1970. A zero value for the end time is interpreted as no bound. +// The timestamp will be truncated to millisecond granularity. +func TimestampRangeFilterMicros(startTime Timestamp, endTime Timestamp) Filter { + return timestampRangeFilter{startTime, endTime} +} + +type timestampRangeFilter struct { + startTime Timestamp + endTime Timestamp +} + +func (trf timestampRangeFilter) String() string { + return fmt.Sprintf("timestamp_range(%v,%v)", trf.startTime, trf.endTime) +} + +func (trf timestampRangeFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{ + Filter: &btpb.RowFilter_TimestampRangeFilter{ + &btpb.TimestampRange{ + int64(trf.startTime.TruncateToMilliseconds()), + int64(trf.endTime.TruncateToMilliseconds()), + }, + }} +} + +// ColumnRangeFilter returns a filter that matches a contiguous range of columns within a single +// family, as specified by an inclusive start qualifier and exclusive end qualifier. +func ColumnRangeFilter(family, start, end string) Filter { + return columnRangeFilter{family, start, end} +} + +type columnRangeFilter struct { + family string + start string + end string +} + +func (crf columnRangeFilter) String() string { + return fmt.Sprintf("columnRangeFilter(%s,%s,%s)", crf.family, crf.start, crf.end) +} + +func (crf columnRangeFilter) proto() *btpb.RowFilter { + r := &btpb.ColumnRange{FamilyName: crf.family} + if crf.start != "" { + r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{[]byte(crf.start)} + } + if crf.end != "" { + r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{[]byte(crf.end)} + } + return &btpb.RowFilter{&btpb.RowFilter_ColumnRangeFilter{r}} +} + +// ValueRangeFilter returns a filter that matches cells with values that fall within +// the given range, as specified by an inclusive start value and exclusive end value. +func ValueRangeFilter(start, end []byte) Filter { + return valueRangeFilter{start, end} +} + +type valueRangeFilter struct { + start []byte + end []byte +} + +func (vrf valueRangeFilter) String() string { + return fmt.Sprintf("valueRangeFilter(%s,%s)", vrf.start, vrf.end) +} + +func (vrf valueRangeFilter) proto() *btpb.RowFilter { + r := &btpb.ValueRange{} + if vrf.start != nil { + r.StartValue = &btpb.ValueRange_StartValueClosed{vrf.start} + } + if vrf.end != nil { + r.EndValue = &btpb.ValueRange_EndValueOpen{vrf.end} + } + return &btpb.RowFilter{&btpb.RowFilter_ValueRangeFilter{r}} +} + +// ConditionFilter returns a filter that evaluates to one of two possible filters depending +// on whether or not the given predicate filter matches at least one cell. +// If the matched filter is nil then no results will be returned. +// IMPORTANT NOTE: The predicate filter does not execute atomically with the +// true and false filters, which may lead to inconsistent or unexpected +// results. Additionally, condition filters have poor performance, especially +// when filters are set for the false condition. +func ConditionFilter(predicateFilter, trueFilter, falseFilter Filter) Filter { + return conditionFilter{predicateFilter, trueFilter, falseFilter} +} + +type conditionFilter struct { + predicateFilter Filter + trueFilter Filter + falseFilter Filter +} + +func (cf conditionFilter) String() string { + return fmt.Sprintf("conditionFilter(%s,%s,%s)", cf.predicateFilter, cf.trueFilter, cf.falseFilter) +} + +func (cf conditionFilter) proto() *btpb.RowFilter { + var tf *btpb.RowFilter + var ff *btpb.RowFilter + if cf.trueFilter != nil { + tf = cf.trueFilter.proto() + } + if cf.falseFilter != nil { + ff = cf.falseFilter.proto() + } + return &btpb.RowFilter{ + &btpb.RowFilter_Condition_{&btpb.RowFilter_Condition{ + cf.predicateFilter.proto(), + tf, + ff, + }}} +} + +// CellsPerRowOffsetFilter returns a filter that skips the first N cells of each row, matching all subsequent cells. +func CellsPerRowOffsetFilter(n int) Filter { + return cellsPerRowOffsetFilter(n) +} + +type cellsPerRowOffsetFilter int32 + +func (cof cellsPerRowOffsetFilter) String() string { + return fmt.Sprintf("cells_per_row_offset(%d)", cof) +} + +func (cof cellsPerRowOffsetFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{int32(cof)}} +} + +// CellsPerRowLimitFilter returns a filter that matches only the first N cells of each row. +func CellsPerRowLimitFilter(n int) Filter { + return cellsPerRowLimitFilter(n) +} + +type cellsPerRowLimitFilter int32 + +func (clf cellsPerRowLimitFilter) String() string { + return fmt.Sprintf("cells_per_row_limit(%d)", clf) +} + +func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{int32(clf)}} +} + +// TODO(dsymonds): More filters: sampling diff --git a/vendor/cloud.google.com/go/bigtable/gc.go b/vendor/cloud.google.com/go/bigtable/gc.go new file mode 100644 index 0000000..e7054ad --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/gc.go @@ -0,0 +1,158 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "strings" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" + bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" +) + +// A GCPolicy represents a rule that determines which cells are eligible for garbage collection. +type GCPolicy interface { + String() string + proto() *bttdpb.GcRule +} + +// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply. +func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} } + +type intersectionPolicy struct { + sub []GCPolicy +} + +func (ip intersectionPolicy) String() string { + var ss []string + for _, sp := range ip.sub { + ss = append(ss, sp.String()) + } + return "(" + strings.Join(ss, " && ") + ")" +} + +func (ip intersectionPolicy) proto() *bttdpb.GcRule { + inter := &bttdpb.GcRule_Intersection{} + for _, sp := range ip.sub { + inter.Rules = append(inter.Rules, sp.proto()) + } + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_Intersection_{inter}, + } +} + +// UnionPolicy returns a GC policy that applies when any of its sub-policies apply. +func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} } + +type unionPolicy struct { + sub []GCPolicy +} + +func (up unionPolicy) String() string { + var ss []string + for _, sp := range up.sub { + ss = append(ss, sp.String()) + } + return "(" + strings.Join(ss, " || ") + ")" +} + +func (up unionPolicy) proto() *bttdpb.GcRule { + union := &bttdpb.GcRule_Union{} + for _, sp := range up.sub { + union.Rules = append(union.Rules, sp.proto()) + } + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_Union_{union}, + } +} + +// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell +// except for the most recent n. +func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) } + +type maxVersionsPolicy int + +func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) } + +func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule { + return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}} +} + +// MaxAgePolicy returns a GC policy that applies to all cells +// older than the given age. +func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) } + +type maxAgePolicy time.Duration + +var units = []struct { + d time.Duration + suffix string +}{ + {24 * time.Hour, "d"}, + {time.Hour, "h"}, + {time.Minute, "m"}, +} + +func (ma maxAgePolicy) String() string { + d := time.Duration(ma) + for _, u := range units { + if d%u.d == 0 { + return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix) + } + } + return fmt.Sprintf("age() > %d", d/time.Microsecond) +} + +func (ma maxAgePolicy) proto() *bttdpb.GcRule { + // This doesn't handle overflows, etc. + // Fix this if people care about GC policies over 290 years. + ns := time.Duration(ma).Nanoseconds() + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{ + Seconds: ns / 1e9, + Nanos: int32(ns % 1e9), + }}, + } +} + +// GCRuleToString converts the given GcRule proto to a user-visible string. +func GCRuleToString(rule *bttdpb.GcRule) string { + if rule == nil { + return "" + } + switch r := rule.Rule.(type) { + case *bttdpb.GcRule_MaxNumVersions: + return MaxVersionsPolicy(int(r.MaxNumVersions)).String() + case *bttdpb.GcRule_MaxAge: + return MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String() + case *bttdpb.GcRule_Intersection_: + return joinRules(r.Intersection.Rules, " && ") + case *bttdpb.GcRule_Union_: + return joinRules(r.Union.Rules, " || ") + default: + return "" + } +} + +func joinRules(rules []*bttdpb.GcRule, sep string) string { + var chunks []string + for _, r := range rules { + chunks = append(chunks, GCRuleToString(r)) + } + return "(" + strings.Join(chunks, sep) + ")" +} diff --git a/vendor/cloud.google.com/go/bigtable/gc_test.go b/vendor/cloud.google.com/go/bigtable/gc_test.go new file mode 100644 index 0000000..0c77958 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/gc_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package bigtable + +import ( + "testing" + "time" + + bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" +) + +func TestGcRuleToString(t *testing.T) { + + intersection := IntersectionPolicy(MaxVersionsPolicy(5), MaxVersionsPolicy(10), MaxAgePolicy(16*time.Hour)) + + var tests = []struct { + proto *bttdpb.GcRule + want string + }{ + {MaxAgePolicy(72 * time.Hour).proto(), "age() > 3d"}, + {MaxVersionsPolicy(5).proto(), "versions() > 5"}, + {intersection.proto(), "(versions() > 5 && versions() > 10 && age() > 16h)"}, + {UnionPolicy(intersection, MaxAgePolicy(72*time.Hour)).proto(), + "((versions() > 5 && versions() > 10 && age() > 16h) || age() > 3d)"}, + } + + for _, test := range tests { + got := GCRuleToString(test.proto) + if got != test.want { + t.Errorf("got gc rule string: %v, wanted: %v", got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go b/vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go new file mode 100644 index 0000000..073406f --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go @@ -0,0 +1,246 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cbtconfig encapsulates common code for reading configuration from .cbtrc and gcloud. +package cbtconfig + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/oauth2" + "google.golang.org/grpc/credentials" +) + +// Config represents a configuration. +type Config struct { + Project, Instance string // required + Creds string // optional + AdminEndpoint string // optional + DataEndpoint string // optional + CertFile string // optional + TokenSource oauth2.TokenSource // derived + TLSCreds credentials.TransportCredentials // derived +} + +type RequiredFlags uint + +const NoneRequired RequiredFlags = 0 +const ( + ProjectRequired RequiredFlags = 1 << iota + InstanceRequired +) +const ProjectAndInstanceRequired RequiredFlags = ProjectRequired | InstanceRequired + +// RegisterFlags registers a set of standard flags for this config. +// It should be called before flag.Parse. +func (c *Config) RegisterFlags() { + flag.StringVar(&c.Project, "project", c.Project, "project ID, if unset uses gcloud configured project") + flag.StringVar(&c.Instance, "instance", c.Instance, "Cloud Bigtable instance") + flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file") + flag.StringVar(&c.AdminEndpoint, "admin-endpoint", c.AdminEndpoint, "Override the admin api endpoint") + flag.StringVar(&c.DataEndpoint, "data-endpoint", c.DataEndpoint, "Override the data api endpoint") + flag.StringVar(&c.CertFile, "cert-file", c.CertFile, "Override the TLS certificates file") +} + +// CheckFlags checks that the required config values are set. +func (c *Config) CheckFlags(required RequiredFlags) error { + var missing []string + if c.CertFile != "" { + b, err := ioutil.ReadFile(c.CertFile) + if err != nil { + return fmt.Errorf("Failed to load certificates from %s: %v", c.CertFile, err) + } + + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return fmt.Errorf("Failed to append certificates from %s", c.CertFile) + } + + c.TLSCreds = credentials.NewTLS(&tls.Config{RootCAs: cp}) + } + if required != NoneRequired { + c.SetFromGcloud() + } + if required&ProjectRequired != 0 && c.Project == "" { + missing = append(missing, "-project") + } + if required&InstanceRequired != 0 && c.Instance == "" { + missing = append(missing, "-instance") + } + if len(missing) > 0 { + return fmt.Errorf("Missing %s", strings.Join(missing, " and ")) + } + return nil +} + +// Filename returns the filename consulted for standard configuration. +func Filename() string { + // TODO(dsymonds): Might need tweaking for Windows. + return filepath.Join(os.Getenv("HOME"), ".cbtrc") +} + +// Load loads a .cbtrc file. +// If the file is not present, an empty config is returned. +func Load() (*Config, error) { + filename := Filename() + data, err := ioutil.ReadFile(filename) + if err != nil { + // silent fail if the file isn't there + if os.IsNotExist(err) { + return &Config{}, nil + } + return nil, fmt.Errorf("Reading %s: %v", filename, err) + } + c := new(Config) + s := bufio.NewScanner(bytes.NewReader(data)) + for s.Scan() { + line := s.Text() + i := strings.Index(line, "=") + if i < 0 { + return nil, fmt.Errorf("Bad line in %s: %q", filename, line) + } + key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:]) + switch key { + default: + return nil, fmt.Errorf("Unknown key in %s: %q", filename, key) + case "project": + c.Project = val + case "instance": + c.Instance = val + case "creds": + c.Creds = val + case "admin-endpoint": + c.AdminEndpoint = val + case "data-endpoint": + c.DataEndpoint = val + } + + } + return c, s.Err() +} + +type GcloudCredential struct { + AccessToken string `json:"access_token"` + Expiry time.Time `json:"token_expiry"` +} + +func (cred *GcloudCredential) Token() *oauth2.Token { + return &oauth2.Token{AccessToken: cred.AccessToken, TokenType: "Bearer", Expiry: cred.Expiry} +} + +type GcloudConfig struct { + Configuration struct { + Properties struct { + Core struct { + Project string `json:"project"` + } `json:"core"` + } `json:"properties"` + } `json:"configuration"` + Credential GcloudCredential `json:"credential"` +} + +type GcloudCmdTokenSource struct { + Command string + Args []string +} + +// Token implements the oauth2.TokenSource interface +func (g *GcloudCmdTokenSource) Token() (*oauth2.Token, error) { + gcloudConfig, err := LoadGcloudConfig(g.Command, g.Args) + if err != nil { + return nil, err + } + return gcloudConfig.Credential.Token(), nil +} + +// LoadGcloudConfig retrieves the gcloud configuration values we need use via the +// 'config-helper' command +func LoadGcloudConfig(gcloudCmd string, gcloudCmdArgs []string) (*GcloudConfig, error) { + out, err := exec.Command(gcloudCmd, gcloudCmdArgs...).Output() + if err != nil { + return nil, fmt.Errorf("Could not retrieve gcloud configuration") + } + + var gcloudConfig GcloudConfig + if err := json.Unmarshal(out, &gcloudConfig); err != nil { + return nil, fmt.Errorf("Could not parse gcloud configuration") + } + + return &gcloudConfig, nil +} + +// SetFromGcloud retrieves and sets any missing config values from the gcloud +// configuration if possible possible +func (c *Config) SetFromGcloud() error { + + if c.Creds == "" { + c.Creds = os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") + if c.Creds == "" { + log.Printf("-creds flag unset, will use gcloud credential") + } + } else { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", c.Creds) + } + + if c.Project == "" { + log.Printf("-project flag unset, will use gcloud active project") + } + + if c.Creds != "" && c.Project != "" { + return nil + } + + gcloudCmd := "gcloud" + if runtime.GOOS == "windows" { + gcloudCmd = gcloudCmd + ".cmd" + } + + gcloudCmdArgs := []string{"config", "config-helper", + "--format=json(configuration.properties.core.project,credential)"} + + gcloudConfig, err := LoadGcloudConfig(gcloudCmd, gcloudCmdArgs) + if err != nil { + return err + } + + if c.Project == "" && gcloudConfig.Configuration.Properties.Core.Project != "" { + log.Printf("gcloud active project is \"%s\"", + gcloudConfig.Configuration.Properties.Core.Project) + c.Project = gcloudConfig.Configuration.Properties.Core.Project + } + + if c.Creds == "" { + c.TokenSource = oauth2.ReuseTokenSource( + gcloudConfig.Credential.Token(), + &GcloudCmdTokenSource{Command: gcloudCmd, Args: gcloudCmdArgs}) + } + + return nil +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go b/vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go new file mode 100644 index 0000000..60a18be --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is ia snapshot from github.com/googleapis/gax-go with minor modifications. +package gax + +import ( + "time" + + "google.golang.org/grpc/codes" +) + +type CallOption interface { + Resolve(*CallSettings) +} + +type callOptions []CallOption + +func (opts callOptions) Resolve(s *CallSettings) *CallSettings { + for _, opt := range opts { + opt.Resolve(s) + } + return s +} + +// Encapsulates the call settings for a particular API call. +type CallSettings struct { + Timeout time.Duration + RetrySettings RetrySettings +} + +// Per-call configurable settings for retrying upon transient failure. +type RetrySettings struct { + RetryCodes map[codes.Code]bool + BackoffSettings BackoffSettings +} + +// Parameters to the exponential backoff algorithm for retrying. +type BackoffSettings struct { + DelayTimeoutSettings MultipliableDuration + RPCTimeoutSettings MultipliableDuration +} + +type MultipliableDuration struct { + Initial time.Duration + Max time.Duration + Multiplier float64 +} + +func (w CallSettings) Resolve(s *CallSettings) { + s.Timeout = w.Timeout + s.RetrySettings = w.RetrySettings + + s.RetrySettings.RetryCodes = make(map[codes.Code]bool, len(w.RetrySettings.RetryCodes)) + for key, value := range w.RetrySettings.RetryCodes { + s.RetrySettings.RetryCodes[key] = value + } +} + +type withRetryCodes []codes.Code + +func (w withRetryCodes) Resolve(s *CallSettings) { + s.RetrySettings.RetryCodes = make(map[codes.Code]bool) + for _, code := range w { + s.RetrySettings.RetryCodes[code] = true + } +} + +// WithRetryCodes sets a list of Google API canonical error codes upon which a +// retry should be attempted. +func WithRetryCodes(retryCodes []codes.Code) CallOption { + return withRetryCodes(retryCodes) +} + +type withDelayTimeoutSettings MultipliableDuration + +func (w withDelayTimeoutSettings) Resolve(s *CallSettings) { + s.RetrySettings.BackoffSettings.DelayTimeoutSettings = MultipliableDuration(w) +} + +// WithDelayTimeoutSettings specifies: +// - The initial delay time, in milliseconds, between the completion of +// the first failed request and the initiation of the first retrying +// request. +// - The multiplier by which to increase the delay time between the +// completion of failed requests, and the initiation of the subsequent +// retrying request. +// - The maximum delay time, in milliseconds, between requests. When this +// value is reached, `RetryDelayMultiplier` will no longer be used to +// increase delay time. +func WithDelayTimeoutSettings(initial time.Duration, max time.Duration, multiplier float64) CallOption { + return withDelayTimeoutSettings(MultipliableDuration{initial, max, multiplier}) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go new file mode 100644 index 0000000..b7be7d4 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go @@ -0,0 +1,84 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is ia snapshot from github.com/googleapis/gax-go with minor modifications. +package gax + +import ( + "math/rand" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "log" + "os" +) + +var logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags) + +// A user defined call stub. +type APICall func(context.Context) error + +// scaleDuration returns the product of a and mult. +func scaleDuration(a time.Duration, mult float64) time.Duration { + ns := float64(a) * mult + return time.Duration(ns) +} + +// invokeWithRetry calls stub using an exponential backoff retry mechanism +// based on the values provided in callSettings. +func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error { + retrySettings := callSettings.RetrySettings + backoffSettings := callSettings.RetrySettings.BackoffSettings + delay := backoffSettings.DelayTimeoutSettings.Initial + for { + // If the deadline is exceeded... + if ctx.Err() != nil { + return ctx.Err() + } + err := stub(ctx) + code := grpc.Code(err) + if code == codes.OK { + return nil + } + + if !retrySettings.RetryCodes[code] { + return err + } + + // Sleep a random amount up to the current delay + d := time.Duration(rand.Int63n(int64(delay))) + delayCtx, _ := context.WithTimeout(ctx, delay) + logger.Printf("Retryable error: %v, retrying in %v", err, d) + <-delayCtx.Done() + + delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier) + if delay > backoffSettings.DelayTimeoutSettings.Max { + delay = backoffSettings.DelayTimeoutSettings.Max + } + } +} + +// Invoke calls stub with a child of context modified by the specified options. +func Invoke(ctx context.Context, stub APICall, opts ...CallOption) error { + settings := &CallSettings{} + callOptions(opts).Resolve(settings) + if len(settings.RetrySettings.RetryCodes) > 0 { + return invokeWithRetry(ctx, stub, *settings) + } + return stub(ctx) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go new file mode 100644 index 0000000..f32e834 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go @@ -0,0 +1,49 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package gax + +import ( + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func TestRandomizedDelays(t *testing.T) { + max := 200 * time.Millisecond + settings := []CallOption{ + WithRetryCodes([]codes.Code{codes.Unavailable, codes.DeadlineExceeded}), + WithDelayTimeoutSettings(10*time.Millisecond, max, 1.5), + } + + deadline := time.Now().Add(1 * time.Second) + ctx, _ := context.WithDeadline(context.Background(), deadline) + var invokeTime time.Time + Invoke(ctx, func(childCtx context.Context) error { + // Keep failing, make sure we never slept more than max (plus a fudge factor) + if !invokeTime.IsZero() { + if got, want := time.Since(invokeTime), max; got > (want + 20*time.Millisecond) { + t.Logf("Slept too long. Got: %v, want: %v", got, max) + } + } + invokeTime = time.Now() + // Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90 + errf := grpc.Errorf + return errf(codes.Unavailable, "") + }, settings...) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/option/option.go b/vendor/cloud.google.com/go/bigtable/internal/option/option.go new file mode 100644 index 0000000..3b9072e --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/option/option.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package option contains common code for dealing with client options. +package option + +import ( + "fmt" + "os" + + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +// DefaultClientOptions returns the default client options to use for the +// client's gRPC connection. +func DefaultClientOptions(endpoint, scope, userAgent string) ([]option.ClientOption, error) { + var o []option.ClientOption + // Check the environment variables for the bigtable emulator. + // Dial it directly and don't pass any credentials. + if addr := os.Getenv("BIGTABLE_EMULATOR_HOST"); addr != "" { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("emulator grpc.Dial: %v", err) + } + o = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + o = []option.ClientOption{ + option.WithEndpoint(endpoint), + option.WithScopes(scope), + option.WithUserAgent(userAgent), + } + } + return o, nil +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/stat/stats.go b/vendor/cloud.google.com/go/bigtable/internal/stat/stats.go new file mode 100644 index 0000000..5fb047f --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/stat/stats.go @@ -0,0 +1,144 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stat + +import ( + "bytes" + "encoding/csv" + "fmt" + "io" + "math" + "sort" + "strconv" + "text/tabwriter" + "time" +) + +type byDuration []time.Duration + +func (data byDuration) Len() int { return len(data) } +func (data byDuration) Swap(i, j int) { data[i], data[j] = data[j], data[i] } +func (data byDuration) Less(i, j int) bool { return data[i] < data[j] } + +// quantile returns a value representing the kth of q quantiles. +// May alter the order of data. +func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) { + if len(data) < 1 { + return 0, false + } + if k > q { + return 0, false + } + if k < 0 || q < 1 { + return 0, false + } + + sort.Sort(byDuration(data)) + + if k == 0 { + return data[0], true + } + if k == q { + return data[len(data)-1], true + } + + bucketSize := float64(len(data)-1) / float64(q) + i := float64(k) * bucketSize + + lower := int(math.Trunc(i)) + var upper int + if i > float64(lower) && lower+1 < len(data) { + // If the quantile lies between two elements + upper = lower + 1 + } else { + upper = lower + } + weightUpper := i - float64(lower) + weightLower := 1 - weightUpper + return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true +} + +type Aggregate struct { + Name string + Count, Errors int + Min, Median, Max time.Duration + P75, P90, P95, P99 time.Duration // percentiles +} + +// NewAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data. +func NewAggregate(name string, latencies []time.Duration, errorCount int) *Aggregate { + agg := Aggregate{Name: name, Count: len(latencies), Errors: errorCount} + + if len(latencies) == 0 { + return nil + } + var ok bool + if agg.Min, ok = quantile(latencies, 0, 2); !ok { + return nil + } + if agg.Median, ok = quantile(latencies, 1, 2); !ok { + return nil + } + if agg.Max, ok = quantile(latencies, 2, 2); !ok { + return nil + } + if agg.P75, ok = quantile(latencies, 75, 100); !ok { + return nil + } + if agg.P90, ok = quantile(latencies, 90, 100); !ok { + return nil + } + if agg.P95, ok = quantile(latencies, 95, 100); !ok { + return nil + } + if agg.P99, ok = quantile(latencies, 99, 100); !ok { + return nil + } + return &agg +} + +func (agg *Aggregate) String() string { + if agg == nil { + return "no data" + } + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding + fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n", + agg.Min, agg.Median, agg.Max, agg.P95, agg.P99) + tw.Flush() + return buf.String() +} + +// WriteCSV writes a csv file to the given Writer, +// with a header row and one row per aggregate. +func WriteCSV(aggs []*Aggregate, iow io.Writer) error { + w := csv.NewWriter(iow) + defer w.Flush() + err := w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"}) + if err != nil { + return err + } + for _, agg := range aggs { + err = w.Write([]string{ + agg.Name, strconv.Itoa(agg.Count), strconv.Itoa(agg.Errors), + agg.Min.String(), agg.Median.String(), agg.Max.String(), + agg.P75.String(), agg.P90.String(), agg.P95.String(), agg.P99.String(), + }) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/cloud.google.com/go/bigtable/reader.go b/vendor/cloud.google.com/go/bigtable/reader.go new file mode 100644 index 0000000..4af2f70 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/reader.go @@ -0,0 +1,250 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "bytes" + "fmt" + + btpb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +// A Row is returned by ReadRows. The map is keyed by column family (the prefix +// of the column name before the colon). The values are the returned ReadItems +// for that column family in the order returned by Read. +type Row map[string][]ReadItem + +// Key returns the row's key, or "" if the row is empty. +func (r Row) Key() string { + for _, items := range r { + if len(items) > 0 { + return items[0].Row + } + } + return "" +} + +// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column. +type ReadItem struct { + Row, Column string + Timestamp Timestamp + Value []byte +} + +// The current state of the read rows state machine. +type rrState int64 + +const ( + newRow rrState = iota + rowInProgress + cellInProgress +) + +// chunkReader handles cell chunks from the read rows response and combines +// them into full Rows. +type chunkReader struct { + state rrState + curKey []byte + curFam string + curQual []byte + curTS int64 + curVal []byte + curRow Row + lastKey string +} + +// newChunkReader returns a new chunkReader for handling read rows responses. +func newChunkReader() *chunkReader { + return &chunkReader{state: newRow} +} + +// Process takes a cell chunk and returns a new Row if the given chunk +// completes a Row, or nil otherwise. +func (cr *chunkReader) Process(cc *btpb.ReadRowsResponse_CellChunk) (Row, error) { + var row Row + switch cr.state { + case newRow: + if err := cr.validateNewRow(cc); err != nil { + return nil, err + } + + cr.curRow = make(Row) + cr.curKey = cc.RowKey + cr.curFam = cc.FamilyName.Value + cr.curQual = cc.Qualifier.Value + cr.curTS = cc.TimestampMicros + row = cr.handleCellValue(cc) + + case rowInProgress: + if err := cr.validateRowInProgress(cc); err != nil { + return nil, err + } + + if cc.GetResetRow() { + cr.resetToNewRow() + return nil, nil + } + + if cc.FamilyName != nil { + cr.curFam = cc.FamilyName.Value + } + if cc.Qualifier != nil { + cr.curQual = cc.Qualifier.Value + } + cr.curTS = cc.TimestampMicros + row = cr.handleCellValue(cc) + + case cellInProgress: + if err := cr.validateCellInProgress(cc); err != nil { + return nil, err + } + if cc.GetResetRow() { + cr.resetToNewRow() + return nil, nil + } + row = cr.handleCellValue(cc) + } + + return row, nil +} + +// Close must be called after all cell chunks from the response +// have been processed. An error will be returned if the reader is +// in an invalid state, in which case the error should be propagated to the caller. +func (cr *chunkReader) Close() error { + if cr.state != newRow { + return fmt.Errorf("invalid state for end of stream %q", cr.state) + } + return nil +} + +// handleCellValue returns a Row if the cell value includes a commit, otherwise nil. +func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row { + if cc.ValueSize > 0 { + // ValueSize is specified so expect a split value of ValueSize bytes + if cr.curVal == nil { + cr.curVal = make([]byte, 0, cc.ValueSize) + } + cr.curVal = append(cr.curVal, cc.Value...) + cr.state = cellInProgress + } else { + // This cell is either the complete value or the last chunk of a split + if cr.curVal == nil { + cr.curVal = cc.Value + } else { + cr.curVal = append(cr.curVal, cc.Value...) + } + cr.finishCell() + + if cc.GetCommitRow() { + return cr.commitRow() + } else { + cr.state = rowInProgress + } + } + + return nil +} + +func (cr *chunkReader) finishCell() { + ri := ReadItem{ + Row: string(cr.curKey), + Column: fmt.Sprintf("%s:%s", cr.curFam, cr.curQual), + Timestamp: Timestamp(cr.curTS), + Value: cr.curVal, + } + cr.curRow[cr.curFam] = append(cr.curRow[cr.curFam], ri) + cr.curVal = nil +} + +func (cr *chunkReader) commitRow() Row { + row := cr.curRow + cr.lastKey = cr.curRow.Key() + cr.resetToNewRow() + return row +} + +func (cr *chunkReader) resetToNewRow() { + cr.curKey = nil + cr.curFam = "" + cr.curQual = nil + cr.curVal = nil + cr.curRow = nil + cr.curTS = 0 + cr.state = newRow +} + +func (cr *chunkReader) validateNewRow(cc *btpb.ReadRowsResponse_CellChunk) error { + if cc.GetResetRow() { + return fmt.Errorf("reset_row not allowed between rows") + } + if cc.RowKey == nil || cc.FamilyName == nil || cc.Qualifier == nil { + return fmt.Errorf("missing key field for new row %v", cc) + } + if cr.lastKey != "" && cr.lastKey >= string(cc.RowKey) { + return fmt.Errorf("out of order row key: %q, %q", cr.lastKey, string(cc.RowKey)) + } + return nil +} + +func (cr *chunkReader) validateRowInProgress(cc *btpb.ReadRowsResponse_CellChunk) error { + if err := cr.validateRowStatus(cc); err != nil { + return err + } + if cc.RowKey != nil && !bytes.Equal(cc.RowKey, cr.curKey) { + return fmt.Errorf("received new row key %q during existing row %q", cc.RowKey, cr.curKey) + } + if cc.FamilyName != nil && cc.Qualifier == nil { + return fmt.Errorf("family name %q specified without a qualifier", cc.FamilyName) + } + return nil +} + +func (cr *chunkReader) validateCellInProgress(cc *btpb.ReadRowsResponse_CellChunk) error { + if err := cr.validateRowStatus(cc); err != nil { + return err + } + if cr.curVal == nil { + return fmt.Errorf("no cached cell while CELL_IN_PROGRESS %v", cc) + } + if cc.GetResetRow() == false && cr.isAnyKeyPresent(cc) { + return fmt.Errorf("cell key components found while CELL_IN_PROGRESS %v", cc) + } + return nil +} + +func (cr *chunkReader) isAnyKeyPresent(cc *btpb.ReadRowsResponse_CellChunk) bool { + return cc.RowKey != nil || + cc.FamilyName != nil || + cc.Qualifier != nil || + cc.TimestampMicros != 0 +} + +// Validate a RowStatus, commit or reset, if present. +func (cr *chunkReader) validateRowStatus(cc *btpb.ReadRowsResponse_CellChunk) error { + // Resets can't be specified with any other part of a cell + if cc.GetResetRow() && (cr.isAnyKeyPresent(cc) || + cc.Value != nil || + cc.ValueSize != 0 || + cc.Labels != nil) { + return fmt.Errorf("reset must not be specified with other fields %v", cc) + } + if cc.GetCommitRow() && cc.ValueSize > 0 { + return fmt.Errorf("commit row found in between chunks in a cell") + } + return nil +} diff --git a/vendor/cloud.google.com/go/bigtable/reader_test.go b/vendor/cloud.google.com/go/bigtable/reader_test.go new file mode 100644 index 0000000..f202891 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/reader_test.go @@ -0,0 +1,344 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "strings" + "testing" + + "cloud.google.com/go/internal/testutil" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/wrappers" + btspb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +// Indicates that a field in the proto should be omitted, rather than included +// as a wrapped empty string. +const nilStr = "<>" + +func TestSingleCell(t *testing.T) { + cr := newChunkReader() + + // All in one cell + row, err := cr.Process(cc("rk", "fm", "col", 1, "value", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + if row == nil { + t.Fatalf("Missing row") + } + if len(row["fm"]) != 1 { + t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"])) + } + want := []ReadItem{ri("rk", "fm", "col", 1, "value")} + if !testutil.Equal(row["fm"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestMultipleCells(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) + cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false)) + cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false)) + cr.Process(cc("rs", "fm2", "col1", 0, "val4", 0, false)) + row, err := cr.Process(cc("rs", "fm2", "col2", 1, "extralongval5", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + if row == nil { + t.Fatalf("Missing row") + } + + want := []ReadItem{ + ri("rs", "fm1", "col1", 0, "val1"), + ri("rs", "fm1", "col1", 1, "val2"), + ri("rs", "fm1", "col2", 0, "val3"), + } + if !testutil.Equal(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + want = []ReadItem{ + ri("rs", "fm2", "col1", 0, "val4"), + ri("rs", "fm2", "col2", 1, "extralongval5"), + } + if !testutil.Equal(row["fm2"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestSplitCells(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "hello ", 11, false)) + cr.Process(ccData("world", 0, false)) + row, err := cr.Process(cc("rs", "fm1", "col2", 0, "val2", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + if row == nil { + t.Fatalf("Missing row") + } + + want := []ReadItem{ + ri("rs", "fm1", "col1", 0, "hello world"), + ri("rs", "fm1", "col2", 0, "val2"), + } + if !testutil.Equal(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestMultipleRows(t *testing.T) { + cr := newChunkReader() + + row, err := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} + if !testutil.Equal(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + + row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} + if !testutil.Equal(row["fm2"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) + } + + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestBlankQualifier(t *testing.T) { + cr := newChunkReader() + + row, err := cr.Process(cc("rs1", "fm1", "", 1, "val1", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")} + if !testutil.Equal(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + + row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} + if !testutil.Equal(row["fm2"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) + } + + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestReset(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) + cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false)) + cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false)) + cr.Process(ccReset()) + row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true)) + want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} + if !testutil.Equal(row["fm1"], want) { + t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestNewFamEmptyQualifier(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) + _, err := cr.Process(cc(nilStr, "fm2", nilStr, 0, "val2", 0, true)) + if err == nil { + t.Fatalf("Expected error on second chunk with no qualifier set") + } +} + +// The read rows acceptance test reads a json file specifying a number of tests, +// each consisting of one or more cell chunk text protos and one or more resulting +// cells or errors. +type AcceptanceTest struct { + Tests []TestCase `json:"tests"` +} + +type TestCase struct { + Name string `json:"name"` + Chunks []string `json:"chunks"` + Results []TestResult `json:"results"` +} + +type TestResult struct { + RK string `json:"rk"` + FM string `json:"fm"` + Qual string `json:"qual"` + TS int64 `json:"ts"` + Value string `json:"value"` + Error bool `json:"error"` // If true, expect an error. Ignore any other field. +} + +func TestAcceptance(t *testing.T) { + testJson, err := ioutil.ReadFile("./testdata/read-rows-acceptance-test.json") + if err != nil { + t.Fatalf("could not open acceptance test file %v", err) + } + + var accTest AcceptanceTest + err = json.Unmarshal(testJson, &accTest) + if err != nil { + t.Fatalf("could not parse acceptance test file: %v", err) + } + + for _, test := range accTest.Tests { + runTestCase(t, test) + } +} + +func runTestCase(t *testing.T, test TestCase) { + // Increment an index into the result array as we get results + cr := newChunkReader() + var results []TestResult + var seenErr bool + for _, chunkText := range test.Chunks { + // Parse and pass each cell chunk to the ChunkReader + cc := &btspb.ReadRowsResponse_CellChunk{} + err := proto.UnmarshalText(chunkText, cc) + if err != nil { + t.Errorf("[%s] failed to unmarshal text proto: %s\n%s", test.Name, chunkText, err) + return + } + row, err := cr.Process(cc) + if err != nil { + results = append(results, TestResult{Error: true}) + seenErr = true + break + } else { + // Turn the Row into TestResults + for fm, ris := range row { + for _, ri := range ris { + tr := TestResult{ + RK: ri.Row, + FM: fm, + Qual: strings.Split(ri.Column, ":")[1], + TS: int64(ri.Timestamp), + Value: string(ri.Value), + } + results = append(results, tr) + } + } + } + } + + // Only Close if we don't have an error yet, otherwise Close: is expected. + if !seenErr { + err := cr.Close() + if err != nil { + results = append(results, TestResult{Error: true}) + } + } + + got := toSet(results) + want := toSet(test.Results) + if !testutil.Equal(got, want) { + t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want) + } +} + +func toSet(res []TestResult) map[TestResult]bool { + set := make(map[TestResult]bool) + for _, tr := range res { + set[tr] = true + } + return set +} + +// ri returns a ReadItem for the given components +func ri(rk string, fm string, qual string, ts int64, val string) ReadItem { + return ReadItem{Row: rk, Column: fmt.Sprintf("%s:%s", fm, qual), Value: []byte(val), Timestamp: Timestamp(ts)} +} + +// cc returns a CellChunk proto +func cc(rk string, fm string, qual string, ts int64, val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk { + // The components of the cell key are wrapped and can be null or empty + var rkWrapper []byte + if rk == nilStr { + rkWrapper = nil + } else { + rkWrapper = []byte(rk) + } + + var fmWrapper *wrappers.StringValue + if fm != nilStr { + fmWrapper = &wrappers.StringValue{Value: fm} + } else { + fmWrapper = nil + } + + var qualWrapper *wrappers.BytesValue + if qual != nilStr { + qualWrapper = &wrappers.BytesValue{Value: []byte(qual)} + } else { + qualWrapper = nil + } + + return &btspb.ReadRowsResponse_CellChunk{ + RowKey: rkWrapper, + FamilyName: fmWrapper, + Qualifier: qualWrapper, + TimestampMicros: ts, + Value: []byte(val), + ValueSize: size, + RowStatus: &btspb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: commit}} +} + +// ccData returns a CellChunk with only a value and size +func ccData(val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk { + return cc(nilStr, nilStr, nilStr, 0, val, size, commit) +} + +// ccReset returns a CellChunk with RestRow set to true +func ccReset() *btspb.ReadRowsResponse_CellChunk { + return &btspb.ReadRowsResponse_CellChunk{ + RowStatus: &btspb.ReadRowsResponse_CellChunk_ResetRow{ResetRow: true}} +} diff --git a/vendor/cloud.google.com/go/bigtable/retry_test.go b/vendor/cloud.google.com/go/bigtable/retry_test.go new file mode 100644 index 0000000..cbeb8d1 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/retry_test.go @@ -0,0 +1,371 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package bigtable + +import ( + "strings" + "testing" + "time" + + "cloud.google.com/go/bigtable/bttest" + "cloud.google.com/go/internal/testutil" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/google/go-cmp/cmp" + "golang.org/x/net/context" + "google.golang.org/api/option" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + rpcpb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) { + srv, err := bttest.NewServer("127.0.0.1:0", opt...) + if err != nil { + return nil, nil, err + } + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + return nil, nil, err + } + + client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn)) + if err != nil { + return nil, nil, err + } + + adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn)) + if err != nil { + return nil, nil, err + } + if err := adminClient.CreateTable(context.Background(), "table"); err != nil { + return nil, nil, err + } + if err := adminClient.CreateColumnFamily(context.Background(), "table", "cf"); err != nil { + return nil, nil, err + } + t := client.Open("table") + + cleanupFunc := func() { + adminClient.Close() + client.Close() + srv.Close() + } + return t, cleanupFunc, nil +} + +func TestRetryApply(t *testing.T) { + ctx := context.Background() + + errCount := 0 + code := codes.Unavailable // Will be retried + // Intercept requests and return an error or defer to the underlying handler + errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 { + errCount++ + return nil, grpc.Errorf(code, "") + } + return handler(ctx, req) + } + tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector)) + if err != nil { + t.Fatalf("fake server setup: %v", err) + } + defer cleanup() + + mut := NewMutation() + mut.Set("cf", "col", 1, []byte("val")) + if err := tbl.Apply(ctx, "row1", mut); err != nil { + t.Errorf("applying single mutation with retries: %v", err) + } + row, err := tbl.ReadRow(ctx, "row1") + if err != nil { + t.Errorf("reading single value with retries: %v", err) + } + if row == nil { + t.Errorf("applying single mutation with retries: could not read back row") + } + + code = codes.FailedPrecondition // Won't be retried + errCount = 0 + if err := tbl.Apply(ctx, "row", mut); err == nil { + t.Errorf("applying single mutation with no retries: no error") + } + + // Check and mutate + mutTrue := NewMutation() + mutTrue.DeleteRow() + mutFalse := NewMutation() + mutFalse.Set("cf", "col", 1, []byte("val")) + condMut := NewCondMutation(ValueFilter("."), mutTrue, mutFalse) + + errCount = 0 + code = codes.Unavailable // Will be retried + if err := tbl.Apply(ctx, "row1", condMut); err != nil { + t.Errorf("conditionally mutating row with retries: %v", err) + } + row, err = tbl.ReadRow(ctx, "row1") // row1 already in the table + if err != nil { + t.Errorf("reading single value after conditional mutation: %v", err) + } + if row != nil { + t.Errorf("reading single value after conditional mutation: row not deleted") + } + + errCount = 0 + code = codes.FailedPrecondition // Won't be retried + if err := tbl.Apply(ctx, "row", condMut); err == nil { + t.Errorf("conditionally mutating row with no retries: no error") + } +} + +func TestRetryApplyBulk(t *testing.T) { + ctx := context.Background() + + // Intercept requests and delegate to an interceptor defined by the test case + errCount := 0 + var f func(grpc.ServerStream) error + errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if strings.HasSuffix(info.FullMethod, "MutateRows") { + return f(ss) + } + return handler(ctx, ss) + } + + tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) + defer cleanup() + if err != nil { + t.Fatalf("fake server setup: %v", err) + } + + errCount = 0 + // Test overall request failure and retries + f = func(ss grpc.ServerStream) error { + if errCount < 3 { + errCount++ + return grpc.Errorf(codes.Aborted, "") + } + return nil + } + mut := NewMutation() + mut.Set("cf", "col", 1, []byte{}) + errors, err := tbl.ApplyBulk(ctx, []string{"row2"}, []*Mutation{mut}) + if errors != nil || err != nil { + t.Errorf("bulk with request failure: got: %v, %v, want: nil", errors, err) + } + + // Test failures and retries in one request + errCount = 0 + m1 := NewMutation() + m1.Set("cf", "col", 1, []byte{}) + m2 := NewMutation() + m2.Set("cf", "col2", 1, []byte{}) + m3 := NewMutation() + m3.Set("cf", "col3", 1, []byte{}) + f = func(ss grpc.ServerStream) error { + var err error + req := new(btpb.MutateRowsRequest) + ss.RecvMsg(req) + switch errCount { + case 0: + // Retryable request failure + err = grpc.Errorf(codes.Unavailable, "") + case 1: + // Two mutations fail + writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted) + err = nil + case 2: + // Two failures were retried. One will succeed. + if want, got := 2, len(req.Entries); want != got { + t.Errorf("2 bulk retries, got: %d, want %d", got, want) + } + writeMutateRowsResponse(ss, codes.OK, codes.Aborted) + err = nil + case 3: + // One failure was retried and will succeed. + if want, got := 1, len(req.Entries); want != got { + t.Errorf("1 bulk retry, got: %d, want %d", got, want) + } + writeMutateRowsResponse(ss, codes.OK) + err = nil + } + errCount++ + return err + } + errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) + if errors != nil || err != nil { + t.Errorf("bulk with retries: got: %v, %v, want: nil", errors, err) + } + + // Test unretryable errors + niMut := NewMutation() + niMut.Set("cf", "col", ServerTime, []byte{}) // Non-idempotent + errCount = 0 + f = func(ss grpc.ServerStream) error { + var err error + req := new(btpb.MutateRowsRequest) + ss.RecvMsg(req) + switch errCount { + case 0: + // Give non-idempotent mutation a retryable error code. + // Nothing should be retried. + writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted) + err = nil + case 1: + t.Errorf("unretryable errors: got one retry, want no retries") + } + errCount++ + return err + } + errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut}) + if err != nil { + t.Errorf("unretryable errors: request failed %v", err) + } + want := []error{ + grpc.Errorf(codes.FailedPrecondition, ""), + grpc.Errorf(codes.Aborted, ""), + } + if !testutil.Equal(want, errors) { + t.Errorf("unretryable errors: got: %v, want: %v", errors, want) + } + + // Test individual errors and a deadline exceeded + f = func(ss grpc.ServerStream) error { + writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted) + return nil + } + ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond) + errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) + wantErr := context.DeadlineExceeded + if wantErr != err { + t.Errorf("deadline exceeded error: got: %v, want: %v", err, wantErr) + } + if errors != nil { + t.Errorf("deadline exceeded errors: got: %v, want: nil", err) + } +} + +func writeMutateRowsResponse(ss grpc.ServerStream, codes ...codes.Code) error { + res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(codes))} + for i, code := range codes { + res.Entries[i] = &btpb.MutateRowsResponse_Entry{ + Index: int64(i), + Status: &rpcpb.Status{Code: int32(code), Message: ""}, + } + } + return ss.SendMsg(res) +} + +func TestRetainRowsAfter(t *testing.T) { + prevRowRange := NewRange("a", "z") + prevRowKey := "m" + want := NewRange("m\x00", "z") + got := prevRowRange.retainRowsAfter(prevRowKey) + if !testutil.Equal(want, got, cmp.AllowUnexported(RowRange{})) { + t.Errorf("range retry: got %v, want %v", got, want) + } + + prevRowRangeList := RowRangeList{NewRange("a", "d"), NewRange("e", "g"), NewRange("h", "l")} + prevRowKey = "f" + wantRowRangeList := RowRangeList{NewRange("f\x00", "g"), NewRange("h", "l")} + got = prevRowRangeList.retainRowsAfter(prevRowKey) + if !testutil.Equal(wantRowRangeList, got, cmp.AllowUnexported(RowRange{})) { + t.Errorf("range list retry: got %v, want %v", got, wantRowRangeList) + } + + prevRowList := RowList{"a", "b", "c", "d", "e", "f"} + prevRowKey = "b" + wantList := RowList{"c", "d", "e", "f"} + got = prevRowList.retainRowsAfter(prevRowKey) + if !testutil.Equal(wantList, got) { + t.Errorf("list retry: got %v, want %v", got, wantList) + } +} + +func TestRetryReadRows(t *testing.T) { + ctx := context.Background() + + // Intercept requests and delegate to an interceptor defined by the test case + errCount := 0 + var f func(grpc.ServerStream) error + errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if strings.HasSuffix(info.FullMethod, "ReadRows") { + return f(ss) + } + return handler(ctx, ss) + } + + tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) + defer cleanup() + if err != nil { + t.Fatalf("fake server setup: %v", err) + } + + errCount = 0 + // Test overall request failure and retries + f = func(ss grpc.ServerStream) error { + var err error + req := new(btpb.ReadRowsRequest) + ss.RecvMsg(req) + switch errCount { + case 0: + // Retryable request failure + err = grpc.Errorf(codes.Unavailable, "") + case 1: + // Write two rows then error + if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { + t.Errorf("first retry, no data received yet: got %q, want %q", got, want) + } + writeReadRowsResponse(ss, "a", "b") + err = grpc.Errorf(codes.Unavailable, "") + case 2: + // Retryable request failure + if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { + t.Errorf("2 range retries: got %q, want %q", got, want) + } + err = grpc.Errorf(codes.Unavailable, "") + case 3: + // Write two more rows + writeReadRowsResponse(ss, "c", "d") + err = nil + } + errCount++ + return err + } + + var got []string + tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool { + got = append(got, r.Key()) + return true + }) + want := []string{"a", "b", "c", "d"} + if !testutil.Equal(got, want) { + t.Errorf("retry range integration: got %v, want %v", got, want) + } +} + +func writeReadRowsResponse(ss grpc.ServerStream, rowKeys ...string) error { + var chunks []*btpb.ReadRowsResponse_CellChunk + for _, key := range rowKeys { + chunks = append(chunks, &btpb.ReadRowsResponse_CellChunk{ + RowKey: []byte(key), + FamilyName: &wrappers.StringValue{Value: "fm"}, + Qualifier: &wrappers.BytesValue{Value: []byte("col")}, + RowStatus: &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true}, + }) + } + return ss.SendMsg(&btpb.ReadRowsResponse{Chunks: chunks}) +} diff --git a/vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json b/vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json new file mode 100644 index 0000000..4973831 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json @@ -0,0 +1,1178 @@ +{ + "tests": [ + { + "name": "invalid - no commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before commit", + "chunks": [ + "commit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before value", + "chunks": [ + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new col family must specify qualifier", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "bare commit implies ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "simple row with timestamp", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "missing timestamp, implied ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "empty cell value", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "two unsplit cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two qualifiers", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two families", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "with labels", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nlabels: \"L_2\"\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "L_2", + "error": false + } + ] + }, + { + "name": "split cell, bare commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "split cell", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "split four ways", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"l\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"ue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "L", + "error": false + } + ] + }, + { + "name": "two split cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier multi-split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-family split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - no commit between rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no commit after first row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - last row missing commit", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - duplicate row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new row missing row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "two rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows implicit timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows empty value", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, one with multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"F\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "E", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "F", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells, multiple families", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"M\"\n\u003e\nqualifier: \u003c\n value: \"O\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"N\"\n\u003e\nqualifier: \u003c\n value: \"P\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "M", + "qual": "O", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "N", + "qual": "P", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, four cells, 2 labels", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nlabels: \"L_3\"\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "timestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "L_3", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows with splits, same timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - bare reset", + "chunks": [ + "reset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - bad reset, no commit", + "chunks": [ + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - missing key after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "no data after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n" + ], + "results": null + }, + { + "name": "simple reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new val", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new qual", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "reset with splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "timestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two resets", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset then two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "B", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset in between chunks", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - reset with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\nreset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - commit with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "empty cell chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + } + ] +} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/civil/civil.go b/vendor/cloud.google.com/go/civil/civil.go new file mode 100644 index 0000000..1cb2675 --- /dev/null +++ b/vendor/cloud.google.com/go/civil/civil.go @@ -0,0 +1,277 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package civil + +import ( + "fmt" + "time" +) + +// A Date represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type Date struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// DateOf returns the Date in which a time occurs in that time's location. +func DateOf(t time.Time) Date { + var d Date + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseDate(s string) (Date, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return Date{}, err + } + return DateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d Date) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d Date) IsValid() bool { + return DateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.Date, even when time.Date returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.Date(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.Date{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d Date) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d Date) AddDays(n int) Date { + return DateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d Date) DaysSince(s Date) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 Date) Before(d2 Date) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 Date) After(d2 Date) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseDate. +func (d *Date) UnmarshalText(data []byte) error { + var err error + *d, err = ParseDate(string(data)) + return err +} + +// A Time represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the DateTime type. +type Time struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// TimeOf returns the Time representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func TimeOf(t time.Time) Time { + var tm Time + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseTime parses a string and returns the time value it represents. +// ParseTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseTime(s string) (Time, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return Time{}, err + } + return TimeOf(t), nil +} + +// String returns the date in the format described in ParseTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t Time) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t Time) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return TimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t Time) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseTime. +func (t *Time) UnmarshalText(data []byte) error { + var err error + *t, err = ParseTime(string(data)) + return err +} + +// A DateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type DateTime struct { + Date Date + Time Time +} + +// Note: We deliberately do not embed Date into DateTime, to avoid promoting AddDays and Sub. + +// DateTimeOf returns the DateTime in which a time occurs in that time's location. +func DateTimeOf(t time.Time) DateTime { + return DateTime{ + Date: DateOf(t), + Time: TimeOf(t), + } +} + +// ParseDateTime parses a string and returns the DateTime it represents. +// ParseDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseDateTime(s string) (DateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return DateTime{}, err + } + } + return DateTimeOf(t), nil +} + +// String returns the date in the format described in ParseDate. +func (dt DateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt DateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the DateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.Date. For example, if loc is America/Indiana/Vincennes, then +// both +// time.Date(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.DateTime{ +// civil.Date{Year: 1955, Month: time.May, Day: 1}}, +// civil.Time{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt DateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 DateTime) Before(dt2 DateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 DateTime) After(dt2 DateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt DateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseDateTime +func (dt *DateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseDateTime(string(data)) + return err +} diff --git a/vendor/cloud.google.com/go/civil/civil_test.go b/vendor/cloud.google.com/go/civil/civil_test.go new file mode 100644 index 0000000..f50899c --- /dev/null +++ b/vendor/cloud.google.com/go/civil/civil_test.go @@ -0,0 +1,442 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package civil + +import ( + "encoding/json" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +func TestDates(t *testing.T) { + for _, test := range []struct { + date Date + loc *time.Location + wantStr string + wantTime time.Time + }{ + { + date: Date{2014, 7, 29}, + loc: time.Local, + wantStr: "2014-07-29", + wantTime: time.Date(2014, time.July, 29, 0, 0, 0, 0, time.Local), + }, + { + date: DateOf(time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local)), + loc: time.UTC, + wantStr: "2014-08-20", + wantTime: time.Date(2014, 8, 20, 0, 0, 0, 0, time.UTC), + }, + { + date: DateOf(time.Date(999, time.January, 26, 0, 0, 0, 0, time.Local)), + loc: time.UTC, + wantStr: "0999-01-26", + wantTime: time.Date(999, 1, 26, 0, 0, 0, 0, time.UTC), + }, + } { + if got := test.date.String(); got != test.wantStr { + t.Errorf("%#v.String() = %q, want %q", test.date, got, test.wantStr) + } + if got := test.date.In(test.loc); !got.Equal(test.wantTime) { + t.Errorf("%#v.In(%v) = %v, want %v", test.date, test.loc, got, test.wantTime) + } + } +} + +func TestDateIsValid(t *testing.T) { + for _, test := range []struct { + date Date + want bool + }{ + {Date{2014, 7, 29}, true}, + {Date{2000, 2, 29}, true}, + {Date{10000, 12, 31}, true}, + {Date{1, 1, 1}, true}, + {Date{0, 1, 1}, true}, // year zero is OK + {Date{-1, 1, 1}, true}, // negative year is OK + {Date{1, 0, 1}, false}, + {Date{1, 1, 0}, false}, + {Date{2016, 1, 32}, false}, + {Date{2016, 13, 1}, false}, + {Date{1, -1, 1}, false}, + {Date{1, 1, -1}, false}, + } { + got := test.date.IsValid() + if got != test.want { + t.Errorf("%#v: got %t, want %t", test.date, got, test.want) + } + } +} + +func TestParseDate(t *testing.T) { + for _, test := range []struct { + str string + want Date // if empty, expect an error + }{ + {"2016-01-02", Date{2016, 1, 2}}, + {"2016-12-31", Date{2016, 12, 31}}, + {"0003-02-04", Date{3, 2, 4}}, + {"999-01-26", Date{}}, + {"", Date{}}, + {"2016-01-02x", Date{}}, + } { + got, err := ParseDate(test.str) + if got != test.want { + t.Errorf("ParseDate(%q) = %+v, want %+v", test.str, got, test.want) + } + if err != nil && test.want != (Date{}) { + t.Errorf("Unexpected error %v from ParseDate(%q)", err, test.str) + } + } +} + +func TestDateArithmetic(t *testing.T) { + for _, test := range []struct { + desc string + start Date + end Date + days int + }{ + { + desc: "zero days noop", + start: Date{2014, 5, 9}, + end: Date{2014, 5, 9}, + days: 0, + }, + { + desc: "crossing a year boundary", + start: Date{2014, 12, 31}, + end: Date{2015, 1, 1}, + days: 1, + }, + { + desc: "negative number of days", + start: Date{2015, 1, 1}, + end: Date{2014, 12, 31}, + days: -1, + }, + { + desc: "full leap year", + start: Date{2004, 1, 1}, + end: Date{2005, 1, 1}, + days: 366, + }, + { + desc: "full non-leap year", + start: Date{2001, 1, 1}, + end: Date{2002, 1, 1}, + days: 365, + }, + { + desc: "crossing a leap second", + start: Date{1972, 6, 30}, + end: Date{1972, 7, 1}, + days: 1, + }, + { + desc: "dates before the unix epoch", + start: Date{101, 1, 1}, + end: Date{102, 1, 1}, + days: 365, + }, + } { + if got := test.start.AddDays(test.days); got != test.end { + t.Errorf("[%s] %#v.AddDays(%v) = %#v, want %#v", test.desc, test.start, test.days, got, test.end) + } + if got := test.end.DaysSince(test.start); got != test.days { + t.Errorf("[%s] %#v.Sub(%#v) = %v, want %v", test.desc, test.end, test.start, got, test.days) + } + } +} + +func TestDateBefore(t *testing.T) { + for _, test := range []struct { + d1, d2 Date + want bool + }{ + {Date{2016, 12, 31}, Date{2017, 1, 1}, true}, + {Date{2016, 1, 1}, Date{2016, 1, 1}, false}, + {Date{2016, 12, 30}, Date{2016, 12, 31}, true}, + } { + if got := test.d1.Before(test.d2); got != test.want { + t.Errorf("%v.Before(%v): got %t, want %t", test.d1, test.d2, got, test.want) + } + } +} + +func TestDateAfter(t *testing.T) { + for _, test := range []struct { + d1, d2 Date + want bool + }{ + {Date{2016, 12, 31}, Date{2017, 1, 1}, false}, + {Date{2016, 1, 1}, Date{2016, 1, 1}, false}, + {Date{2016, 12, 30}, Date{2016, 12, 31}, false}, + } { + if got := test.d1.After(test.d2); got != test.want { + t.Errorf("%v.After(%v): got %t, want %t", test.d1, test.d2, got, test.want) + } + } +} + +func TestTimeToString(t *testing.T) { + for _, test := range []struct { + str string + time Time + roundTrip bool // ParseTime(str).String() == str? + }{ + {"13:26:33", Time{13, 26, 33, 0}, true}, + {"01:02:03.000023456", Time{1, 2, 3, 23456}, true}, + {"00:00:00.000000001", Time{0, 0, 0, 1}, true}, + {"13:26:03.1", Time{13, 26, 3, 100000000}, false}, + {"13:26:33.0000003", Time{13, 26, 33, 300}, false}, + } { + gotTime, err := ParseTime(test.str) + if err != nil { + t.Errorf("ParseTime(%q): got error: %v", test.str, err) + continue + } + if gotTime != test.time { + t.Errorf("ParseTime(%q) = %+v, want %+v", test.str, gotTime, test.time) + } + if test.roundTrip { + gotStr := test.time.String() + if gotStr != test.str { + t.Errorf("%#v.String() = %q, want %q", test.time, gotStr, test.str) + } + } + } +} + +func TestTimeOf(t *testing.T) { + for _, test := range []struct { + time time.Time + want Time + }{ + {time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local), Time{15, 8, 43, 1}}, + {time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), Time{0, 0, 0, 0}}, + } { + if got := TimeOf(test.time); got != test.want { + t.Errorf("TimeOf(%v) = %+v, want %+v", test.time, got, test.want) + } + } +} + +func TestTimeIsValid(t *testing.T) { + for _, test := range []struct { + time Time + want bool + }{ + {Time{0, 0, 0, 0}, true}, + {Time{23, 0, 0, 0}, true}, + {Time{23, 59, 59, 999999999}, true}, + {Time{24, 59, 59, 999999999}, false}, + {Time{23, 60, 59, 999999999}, false}, + {Time{23, 59, 60, 999999999}, false}, + {Time{23, 59, 59, 1000000000}, false}, + {Time{-1, 0, 0, 0}, false}, + {Time{0, -1, 0, 0}, false}, + {Time{0, 0, -1, 0}, false}, + {Time{0, 0, 0, -1}, false}, + } { + got := test.time.IsValid() + if got != test.want { + t.Errorf("%#v: got %t, want %t", test.time, got, test.want) + } + } +} + +func TestDateTimeToString(t *testing.T) { + for _, test := range []struct { + str string + dateTime DateTime + roundTrip bool // ParseDateTime(str).String() == str? + }{ + {"2016-03-22T13:26:33", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 0}}, true}, + {"2016-03-22T13:26:33.000000600", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 600}}, true}, + {"2016-03-22t13:26:33", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 0}}, false}, + } { + gotDateTime, err := ParseDateTime(test.str) + if err != nil { + t.Errorf("ParseDateTime(%q): got error: %v", test.str, err) + continue + } + if gotDateTime != test.dateTime { + t.Errorf("ParseDateTime(%q) = %+v, want %+v", test.str, gotDateTime, test.dateTime) + } + if test.roundTrip { + gotStr := test.dateTime.String() + if gotStr != test.str { + t.Errorf("%#v.String() = %q, want %q", test.dateTime, gotStr, test.str) + } + } + } +} + +func TestParseDateTimeErrors(t *testing.T) { + for _, str := range []string{ + "", + "2016-03-22", // just a date + "13:26:33", // just a time + "2016-03-22 13:26:33", // wrong separating character + "2016-03-22T13:26:33x", // extra at end + } { + if _, err := ParseDateTime(str); err == nil { + t.Errorf("ParseDateTime(%q) succeeded, want error", str) + } + } +} + +func TestDateTimeOf(t *testing.T) { + for _, test := range []struct { + time time.Time + want DateTime + }{ + {time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local), + DateTime{Date{2014, 8, 20}, Time{15, 8, 43, 1}}}, + {time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), + DateTime{Date{1, 1, 1}, Time{0, 0, 0, 0}}}, + } { + if got := DateTimeOf(test.time); got != test.want { + t.Errorf("DateTimeOf(%v) = %+v, want %+v", test.time, got, test.want) + } + } +} + +func TestDateTimeIsValid(t *testing.T) { + // No need to be exhaustive here; it's just Date.IsValid && Time.IsValid. + for _, test := range []struct { + dt DateTime + want bool + }{ + {DateTime{Date{2016, 3, 20}, Time{0, 0, 0, 0}}, true}, + {DateTime{Date{2016, -3, 20}, Time{0, 0, 0, 0}}, false}, + {DateTime{Date{2016, 3, 20}, Time{24, 0, 0, 0}}, false}, + } { + got := test.dt.IsValid() + if got != test.want { + t.Errorf("%#v: got %t, want %t", test.dt, got, test.want) + } + } +} + +func TestDateTimeIn(t *testing.T) { + dt := DateTime{Date{2016, 1, 2}, Time{3, 4, 5, 6}} + got := dt.In(time.UTC) + want := time.Date(2016, 1, 2, 3, 4, 5, 6, time.UTC) + if !got.Equal(want) { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestDateTimeBefore(t *testing.T) { + d1 := Date{2016, 12, 31} + d2 := Date{2017, 1, 1} + t1 := Time{5, 6, 7, 8} + t2 := Time{5, 6, 7, 9} + for _, test := range []struct { + dt1, dt2 DateTime + want bool + }{ + {DateTime{d1, t1}, DateTime{d2, t1}, true}, + {DateTime{d1, t1}, DateTime{d1, t2}, true}, + {DateTime{d2, t1}, DateTime{d1, t1}, false}, + {DateTime{d2, t1}, DateTime{d2, t1}, false}, + } { + if got := test.dt1.Before(test.dt2); got != test.want { + t.Errorf("%v.Before(%v): got %t, want %t", test.dt1, test.dt2, got, test.want) + } + } +} + +func TestDateTimeAfter(t *testing.T) { + d1 := Date{2016, 12, 31} + d2 := Date{2017, 1, 1} + t1 := Time{5, 6, 7, 8} + t2 := Time{5, 6, 7, 9} + for _, test := range []struct { + dt1, dt2 DateTime + want bool + }{ + {DateTime{d1, t1}, DateTime{d2, t1}, false}, + {DateTime{d1, t1}, DateTime{d1, t2}, false}, + {DateTime{d2, t1}, DateTime{d1, t1}, true}, + {DateTime{d2, t1}, DateTime{d2, t1}, false}, + } { + if got := test.dt1.After(test.dt2); got != test.want { + t.Errorf("%v.After(%v): got %t, want %t", test.dt1, test.dt2, got, test.want) + } + } +} + +func TestMarshalJSON(t *testing.T) { + for _, test := range []struct { + value interface{} + want string + }{ + {Date{1987, 4, 15}, `"1987-04-15"`}, + {Time{18, 54, 2, 0}, `"18:54:02"`}, + {DateTime{Date{1987, 4, 15}, Time{18, 54, 2, 0}}, `"1987-04-15T18:54:02"`}, + } { + bgot, err := json.Marshal(test.value) + if err != nil { + t.Fatal(err) + } + if got := string(bgot); got != test.want { + t.Errorf("%#v: got %s, want %s", test.value, got, test.want) + } + } +} + +func TestUnmarshalJSON(t *testing.T) { + var d Date + var tm Time + var dt DateTime + for _, test := range []struct { + data string + ptr interface{} + want interface{} + }{ + {`"1987-04-15"`, &d, &Date{1987, 4, 15}}, + {`"1987-04-\u0031\u0035"`, &d, &Date{1987, 4, 15}}, + {`"18:54:02"`, &tm, &Time{18, 54, 2, 0}}, + {`"1987-04-15T18:54:02"`, &dt, &DateTime{Date{1987, 4, 15}, Time{18, 54, 2, 0}}}, + } { + if err := json.Unmarshal([]byte(test.data), test.ptr); err != nil { + t.Fatalf("%s: %v", test.data, err) + } + if !cmp.Equal(test.ptr, test.want) { + t.Errorf("%s: got %#v, want %#v", test.data, test.ptr, test.want) + } + } + + for _, bad := range []string{"", `""`, `"bad"`, `"1987-04-15x"`, + `19870415`, // a JSON number + `11987-04-15x`, // not a JSON string + + } { + if json.Unmarshal([]byte(bad), &d) == nil { + t.Errorf("%q, Date: got nil, want error", bad) + } + if json.Unmarshal([]byte(bad), &tm) == nil { + t.Errorf("%q, Time: got nil, want error", bad) + } + if json.Unmarshal([]byte(bad), &dt) == nil { + t.Errorf("%q, DateTime: got nil, want error", bad) + } + } +} diff --git a/vendor/cloud.google.com/go/cloud.go b/vendor/cloud.google.com/go/cloud.go new file mode 100644 index 0000000..6ba428d --- /dev/null +++ b/vendor/cloud.google.com/go/cloud.go @@ -0,0 +1,20 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cloud is the root of the packages used to access Google Cloud +// Services. See https://godoc.org/cloud.google.com/go for a full list +// of sub-packages. +// +// This package documents how to authorize and authenticate the sub packages. +package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go new file mode 100644 index 0000000..cbed2be --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go @@ -0,0 +1,450 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,go1.7 + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "math/rand" + "os" + "sync" + "time" + + "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints" + debuglet "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller" + "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector" + "cloud.google.com/go/compute/metadata" + "golang.org/x/debug" + "golang.org/x/debug/local" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + cd "google.golang.org/api/clouddebugger/v2" +) + +var ( + appModule = flag.String("appmodule", "", "Optional application module name.") + appVersion = flag.String("appversion", "", "Optional application module version name.") + sourceContextFile = flag.String("sourcecontext", "", "File containing JSON-encoded source context.") + verbose = flag.Bool("v", false, "Output verbose log messages.") + projectNumber = flag.String("projectnumber", "", "Project number."+ + " If this is not set, it is read from the GCP metadata server.") + projectID = flag.String("projectid", "", "Project ID."+ + " If this is not set, it is read from the GCP metadata server.") + serviceAccountFile = flag.String("serviceaccountfile", "", "File containing JSON service account credentials.") +) + +const ( + maxCapturedStackFrames = 50 + maxCapturedVariables = 1000 +) + +func main() { + flag.Usage = usage + flag.Parse() + args := flag.Args() + if len(args) == 0 { + // The user needs to supply the name of the executable to run. + flag.Usage() + return + } + if *projectNumber == "" { + var err error + *projectNumber, err = metadata.NumericProjectID() + if err != nil { + log.Print("Debuglet initialization: ", err) + } + } + if *projectID == "" { + var err error + *projectID, err = metadata.ProjectID() + if err != nil { + log.Print("Debuglet initialization: ", err) + } + } + sourceContexts, err := readSourceContextFile(*sourceContextFile) + if err != nil { + log.Print("Reading source context file: ", err) + } + var ts oauth2.TokenSource + ctx := context.Background() + if *serviceAccountFile != "" { + if ts, err = serviceAcctTokenSource(ctx, *serviceAccountFile, cd.CloudDebuggerScope); err != nil { + log.Fatalf("Error getting credentials from file %s: %v", *serviceAccountFile, err) + } + } else if ts, err = google.DefaultTokenSource(ctx, cd.CloudDebuggerScope); err != nil { + log.Print("Error getting application default credentials for Cloud Debugger:", err) + os.Exit(103) + } + c, err := debuglet.NewController(ctx, debuglet.Options{ + ProjectNumber: *projectNumber, + ProjectID: *projectID, + AppModule: *appModule, + AppVersion: *appVersion, + SourceContexts: sourceContexts, + Verbose: *verbose, + TokenSource: ts, + }) + if err != nil { + log.Fatal("Error connecting to Cloud Debugger: ", err) + } + prog, err := local.New(args[0]) + if err != nil { + log.Fatal("Error loading program: ", err) + } + // Load the program, but don't actually start it running yet. + if _, err = prog.Run(args[1:]...); err != nil { + log.Fatal("Error loading program: ", err) + } + bs := breakpoints.NewBreakpointStore(prog) + + // Seed the random number generator. + rand.Seed(time.Now().UnixNano()) + + // Now we want to do two things: run the user's program, and start sending + // List requests periodically to the Debuglet Controller to get breakpoints + // to set. + // + // We want to give the Debuglet Controller a chance to give us breakpoints + // before we start the program, otherwise we would miss any breakpoint + // triggers that occur during program startup -- for example, a breakpoint on + // the first line of main. But if the Debuglet Controller is not responding or + // is returning errors, we don't want to delay starting the program + // indefinitely. + // + // We pass a channel to breakpointListLoop, which will close it when the first + // List call finishes. Then we wait until either the channel is closed or a + // 5-second timer has finished before starting the program. + ch := make(chan bool) + // Start a goroutine that sends List requests to the Debuglet Controller, and + // sets any breakpoints it gets back. + go breakpointListLoop(ctx, c, bs, ch) + // Wait until 5 seconds have passed or breakpointListLoop has closed ch. + select { + case <-time.After(5 * time.Second): + case <-ch: + } + // Run the debuggee. + programLoop(ctx, c, bs, prog) +} + +// usage prints a usage message to stderr and exits. +func usage() { + me := "a.out" + if len(os.Args) >= 1 { + me = os.Args[0] + } + fmt.Fprintf(os.Stderr, "Usage of %s:\n", me) + fmt.Fprintf(os.Stderr, "\t%s [flags...] -- args...\n", me) + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, + "See https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine for more information.\n") + os.Exit(2) +} + +// readSourceContextFile reads a JSON-encoded source context from the given file. +// It returns a non-empty slice on success. +func readSourceContextFile(filename string) ([]*cd.SourceContext, error) { + if filename == "" { + return nil, nil + } + scJSON, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("reading file %q: %v", filename, err) + } + var sc cd.SourceContext + if err = json.Unmarshal(scJSON, &sc); err != nil { + return nil, fmt.Errorf("parsing file %q: %v", filename, err) + } + return []*cd.SourceContext{&sc}, nil +} + +// breakpointListLoop repeatedly calls the Debuglet Controller's List RPC, and +// passes the results to the BreakpointStore so it can set and unset breakpoints +// in the program. +// +// After the first List call finishes, ch is closed. +func breakpointListLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, first chan bool) { + const ( + avgTimeBetweenCalls = time.Second + errorDelay = 5 * time.Second + ) + + // randomDuration returns a random duration with expected value avg. + randomDuration := func(avg time.Duration) time.Duration { + return time.Duration(rand.Int63n(int64(2*avg + 1))) + } + + var consecutiveFailures uint + + for { + callStart := time.Now() + resp, err := c.List(ctx) + if err != nil && err != debuglet.ErrListUnchanged { + log.Printf("Debuglet controller server error: %v", err) + } + if err == nil { + bs.ProcessBreakpointList(resp.Breakpoints) + } + + if first != nil { + // We've finished one call to List and set any breakpoints we received. + close(first) + first = nil + } + + // Asynchronously send updates for any breakpoints that caused an error when + // the BreakpointStore tried to process them. We don't wait for the update + // to finish before the program can exit, as we do for normal updates. + errorBps := bs.ErrorBreakpoints() + for _, bp := range errorBps { + go func(bp *cd.Breakpoint) { + if err := c.Update(ctx, bp.Id, bp); err != nil { + log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err) + } + }(bp) + } + + // Make the next call not too soon after the one we just did. + delay := randomDuration(avgTimeBetweenCalls) + + // If the call returned an error other than ErrListUnchanged, wait longer. + if err != nil && err != debuglet.ErrListUnchanged { + // Wait twice as long after each consecutive failure, to a maximum of 16x. + delay += randomDuration(errorDelay * (1 << consecutiveFailures)) + if consecutiveFailures < 4 { + consecutiveFailures++ + } + } else { + consecutiveFailures = 0 + } + + // Sleep until we reach time callStart+delay. If we've already passed that + // time, time.Sleep will return immediately -- this should be the common + // case, since the server will delay responding to List for a while when + // there are no changes to report. + time.Sleep(callStart.Add(delay).Sub(time.Now())) + } +} + +// programLoop runs the program being debugged to completion. When a breakpoint's +// conditions are satisfied, it sends an Update RPC to the Debuglet Controller. +// The function returns when the program exits and all Update RPCs have finished. +func programLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, prog debug.Program) { + var wg sync.WaitGroup + for { + // Run the program until it hits a breakpoint or exits. + status, err := prog.Resume() + if err != nil { + break + } + + // Get the breakpoints at this address whose conditions were satisfied, + // and remove the ones that aren't logpoints. + bps := bs.BreakpointsAtPC(status.PC) + bps = bpsWithConditionSatisfied(bps, prog) + for _, bp := range bps { + if bp.Action != "LOG" { + bs.RemoveBreakpoint(bp) + } + } + + if len(bps) == 0 { + continue + } + + // Evaluate expressions and get the stack. + vc := valuecollector.NewCollector(prog, maxCapturedVariables) + needStackFrames := false + for _, bp := range bps { + // If evaluating bp's condition didn't return an error, evaluate bp's + // expressions, and later get the stack frames. + if bp.Status == nil { + bp.EvaluatedExpressions = expressionValues(bp.Expressions, prog, vc) + needStackFrames = true + } + } + var ( + stack []*cd.StackFrame + stackFramesStatusMessage *cd.StatusMessage + ) + if needStackFrames { + stack, stackFramesStatusMessage = stackFrames(prog, vc) + } + + // Read variable values from the program. + variableTable := vc.ReadValues() + + // Start a goroutine to send updates to the Debuglet Controller or write + // to logs, concurrently with resuming the program. + // TODO: retry Update on failure. + for _, bp := range bps { + wg.Add(1) + switch bp.Action { + case "LOG": + go func(format string, evaluatedExpressions []*cd.Variable) { + s := valuecollector.LogString(format, evaluatedExpressions, variableTable) + log.Print(s) + wg.Done() + }(bp.LogMessageFormat, bp.EvaluatedExpressions) + bp.Status = nil + bp.EvaluatedExpressions = nil + default: + go func(bp *cd.Breakpoint) { + defer wg.Done() + bp.IsFinalState = true + if bp.Status == nil { + // If evaluating bp's condition didn't return an error, include the + // stack frames, variable table, and any status message produced when + // getting the stack frames. + bp.StackFrames = stack + bp.VariableTable = variableTable + bp.Status = stackFramesStatusMessage + } + if err := c.Update(ctx, bp.Id, bp); err != nil { + log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err) + } + }(bp) + } + } + } + + // Wait for all updates to finish before returning. + wg.Wait() +} + +// bpsWithConditionSatisfied returns the breakpoints whose conditions are true +// (or that do not have a condition.) +func bpsWithConditionSatisfied(bpsIn []*cd.Breakpoint, prog debug.Program) []*cd.Breakpoint { + var bpsOut []*cd.Breakpoint + for _, bp := range bpsIn { + cond, err := condTruth(bp.Condition, prog) + if err != nil { + bp.Status = errorStatusMessage(err.Error(), refersToBreakpointCondition) + // Include bp in the list to be updated when there's an error, so that + // the user gets a response. + bpsOut = append(bpsOut, bp) + } else if cond { + bpsOut = append(bpsOut, bp) + } + } + return bpsOut +} + +// condTruth evaluates a condition. +func condTruth(condition string, prog debug.Program) (bool, error) { + if condition == "" { + // A condition wasn't set. + return true, nil + } + val, err := prog.Evaluate(condition) + if err != nil { + return false, err + } + if v, ok := val.(bool); !ok { + return false, fmt.Errorf("condition expression has type %T, should be bool", val) + } else { + return v, nil + } +} + +// expressionValues evaluates a slice of expressions and returns a []*cd.Variable +// containing the results. +// If the result of an expression evaluation refers to values from the program's +// memory (e.g., the expression evaluates to a slice) a corresponding variable is +// added to the value collector, to be read later. +func expressionValues(expressions []string, prog debug.Program, vc *valuecollector.Collector) []*cd.Variable { + evaluatedExpressions := make([]*cd.Variable, len(expressions)) + for i, exp := range expressions { + ee := &cd.Variable{Name: exp} + evaluatedExpressions[i] = ee + if val, err := prog.Evaluate(exp); err != nil { + ee.Status = errorStatusMessage(err.Error(), refersToBreakpointExpression) + } else { + vc.FillValue(val, ee) + } + } + return evaluatedExpressions +} + +// stackFrames returns a stack trace for the program. It passes references to +// function parameters and local variables to the value collector, so it can read +// their values later. +func stackFrames(prog debug.Program, vc *valuecollector.Collector) ([]*cd.StackFrame, *cd.StatusMessage) { + frames, err := prog.Frames(maxCapturedStackFrames) + if err != nil { + return nil, errorStatusMessage("Error getting stack: "+err.Error(), refersToUnspecified) + } + stackFrames := make([]*cd.StackFrame, len(frames)) + for i, f := range frames { + frame := &cd.StackFrame{} + frame.Function = f.Function + for _, v := range f.Params { + frame.Arguments = append(frame.Arguments, vc.AddVariable(debug.LocalVar(v))) + } + for _, v := range f.Vars { + frame.Locals = append(frame.Locals, vc.AddVariable(v)) + } + frame.Location = &cd.SourceLocation{ + Path: f.File, + Line: int64(f.Line), + } + stackFrames[i] = frame + } + return stackFrames, nil +} + +// errorStatusMessage returns a *cd.StatusMessage indicating an error, +// with the given message and refersTo field. +func errorStatusMessage(msg string, refersTo int) *cd.StatusMessage { + return &cd.StatusMessage{ + Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}}, + IsError: true, + RefersTo: refersToString[refersTo], + } +} + +const ( + // RefersTo values for cd.StatusMessage. + refersToUnspecified = iota + refersToBreakpointCondition + refersToBreakpointExpression +) + +// refersToString contains the strings for each refersTo value. +// See the definition of StatusMessage in the v2/clouddebugger package. +var refersToString = map[int]string{ + refersToUnspecified: "UNSPECIFIED", + refersToBreakpointCondition: "BREAKPOINT_CONDITION", + refersToBreakpointExpression: "BREAKPOINT_EXPRESSION", +} + +func serviceAcctTokenSource(ctx context.Context, filename string, scope ...string) (oauth2.TokenSource, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("cannot read service account file: %v", err) + } + cfg, err := google.JWTConfigFromJSON(data, scope...) + if err != nil { + return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err) + } + return cfg.TokenSource(ctx), nil +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go new file mode 100644 index 0000000..afe07cb --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go @@ -0,0 +1,174 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package breakpoints handles breakpoint requests we get from the user through +// the Debuglet Controller, and manages corresponding breakpoints set in the code. +package breakpoints + +import ( + "log" + "sync" + + "golang.org/x/debug" + cd "google.golang.org/api/clouddebugger/v2" +) + +// BreakpointStore stores the set of breakpoints for a program. +type BreakpointStore struct { + mu sync.Mutex + // prog is the program being debugged. + prog debug.Program + // idToBreakpoint is a map from breakpoint identifier to *cd.Breakpoint. The + // map value is nil if the breakpoint is inactive. A breakpoint is active if: + // - We received it from the Debuglet Controller, and it was active at the time; + // - We were able to set code breakpoints for it; + // - We have not reached any of those code breakpoints while satisfying the + // breakpoint's conditions, or the breakpoint has action LOG; and + // - The Debuglet Controller hasn't informed us the breakpoint has become inactive. + idToBreakpoint map[string]*cd.Breakpoint + // pcToBps and bpToPCs store the many-to-many relationship between breakpoints we + // received from the Debuglet Controller and the code breakpoints we set for them. + pcToBps map[uint64][]*cd.Breakpoint + bpToPCs map[*cd.Breakpoint][]uint64 + // errors contains any breakpoints which couldn't be set because they caused an + // error. These are retrieved with ErrorBreakpoints, and the caller is + // expected to handle sending updates for them. + errors []*cd.Breakpoint +} + +// NewBreakpointStore returns a BreakpointStore for the given program. +func NewBreakpointStore(prog debug.Program) *BreakpointStore { + return &BreakpointStore{ + idToBreakpoint: make(map[string]*cd.Breakpoint), + pcToBps: make(map[uint64][]*cd.Breakpoint), + bpToPCs: make(map[*cd.Breakpoint][]uint64), + prog: prog, + } +} + +// ProcessBreakpointList applies updates received from the Debuglet Controller through a List call. +func (bs *BreakpointStore) ProcessBreakpointList(bps []*cd.Breakpoint) { + bs.mu.Lock() + defer bs.mu.Unlock() + for _, bp := range bps { + if storedBp, ok := bs.idToBreakpoint[bp.Id]; ok { + if storedBp != nil && bp.IsFinalState { + // IsFinalState indicates that the breakpoint has been made inactive. + bs.removeBreakpointLocked(storedBp) + } + } else { + if bp.IsFinalState { + // The controller is notifying us that the breakpoint is no longer active, + // but we didn't know about it anyway. + continue + } + if bp.Action != "" && bp.Action != "CAPTURE" && bp.Action != "LOG" { + bp.IsFinalState = true + bp.Status = &cd.StatusMessage{ + Description: &cd.FormatMessage{Format: "Action is not supported"}, + IsError: true, + } + bs.errors = append(bs.errors, bp) + // Note in idToBreakpoint that we've already seen this breakpoint, so that we + // don't try to report it as an error multiple times. + bs.idToBreakpoint[bp.Id] = nil + continue + } + pcs, err := bs.prog.BreakpointAtLine(bp.Location.Path, uint64(bp.Location.Line)) + if err != nil { + log.Printf("error setting breakpoint at %s:%d: %v", bp.Location.Path, bp.Location.Line, err) + } + if len(pcs) == 0 { + // We can't find a PC for this breakpoint's source line, so don't make it active. + // TODO: we could snap the line to a location where we can break, or report an error to the user. + bs.idToBreakpoint[bp.Id] = nil + } else { + bs.idToBreakpoint[bp.Id] = bp + for _, pc := range pcs { + bs.pcToBps[pc] = append(bs.pcToBps[pc], bp) + } + bs.bpToPCs[bp] = pcs + } + } + } +} + +// ErrorBreakpoints returns a slice of Breakpoints that caused errors when the +// BreakpointStore tried to process them, and resets the list of such +// breakpoints. +// The caller is expected to send updates to the server to indicate the errors. +func (bs *BreakpointStore) ErrorBreakpoints() []*cd.Breakpoint { + bs.mu.Lock() + defer bs.mu.Unlock() + bps := bs.errors + bs.errors = nil + return bps +} + +// BreakpointsAtPC returns all the breakpoints for which we set a code +// breakpoint at the given address. +func (bs *BreakpointStore) BreakpointsAtPC(pc uint64) []*cd.Breakpoint { + bs.mu.Lock() + defer bs.mu.Unlock() + return bs.pcToBps[pc] +} + +// RemoveBreakpoint makes the given breakpoint inactive. +// This is called when either the debugged program hits the breakpoint, or the Debuglet +// Controller informs us that the breakpoint is now inactive. +func (bs *BreakpointStore) RemoveBreakpoint(bp *cd.Breakpoint) { + bs.mu.Lock() + bs.removeBreakpointLocked(bp) + bs.mu.Unlock() +} + +func (bs *BreakpointStore) removeBreakpointLocked(bp *cd.Breakpoint) { + // Set the ID's corresponding breakpoint to nil, so that we won't activate it + // if we see it again. + // TODO: we could delete it after a few seconds. + bs.idToBreakpoint[bp.Id] = nil + + // Delete bp from the list of cd breakpoints at each of its corresponding + // code breakpoint locations, and delete any code breakpoints which no longer + // have a corresponding cd breakpoint. + var codeBreakpointsToDelete []uint64 + for _, pc := range bs.bpToPCs[bp] { + bps := remove(bs.pcToBps[pc], bp) + if len(bps) == 0 { + // bp was the last breakpoint set at this PC, so delete the code breakpoint. + codeBreakpointsToDelete = append(codeBreakpointsToDelete, pc) + delete(bs.pcToBps, pc) + } else { + bs.pcToBps[pc] = bps + } + } + if len(codeBreakpointsToDelete) > 0 { + bs.prog.DeleteBreakpoints(codeBreakpointsToDelete) + } + delete(bs.bpToPCs, bp) +} + +// remove updates rs by removing r, then returns rs. +// The mutex in the BreakpointStore which contains rs should be held. +func remove(rs []*cd.Breakpoint, r *cd.Breakpoint) []*cd.Breakpoint { + for i := range rs { + if rs[i] == r { + rs[i] = rs[len(rs)-1] + rs = rs[0 : len(rs)-1] + return rs + } + } + // We shouldn't reach here. + return rs +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go new file mode 100644 index 0000000..089a3ba --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go @@ -0,0 +1,168 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package breakpoints + +import ( + "reflect" + "testing" + + "golang.org/x/debug" + cd "google.golang.org/api/clouddebugger/v2" +) + +var ( + testPC1 uint64 = 0x1234 + testPC2 uint64 = 0x5678 + testPC3 uint64 = 0x3333 + testFile = "foo.go" + testLine uint64 = 42 + testLine2 uint64 = 99 + testLogPC uint64 = 0x9abc + testLogLine uint64 = 43 + testBadPC uint64 = 0xdef0 + testBadLine uint64 = 44 + testBP = &cd.Breakpoint{ + Action: "CAPTURE", + Id: "TestBreakpoint", + IsFinalState: false, + Location: &cd.SourceLocation{Path: testFile, Line: int64(testLine)}, + } + testBP2 = &cd.Breakpoint{ + Action: "CAPTURE", + Id: "TestBreakpoint2", + IsFinalState: false, + Location: &cd.SourceLocation{Path: testFile, Line: int64(testLine2)}, + } + testLogBP = &cd.Breakpoint{ + Action: "LOG", + Id: "TestLogBreakpoint", + IsFinalState: false, + Location: &cd.SourceLocation{Path: testFile, Line: int64(testLogLine)}, + } + testBadBP = &cd.Breakpoint{ + Action: "BEEP", + Id: "TestBadBreakpoint", + IsFinalState: false, + Location: &cd.SourceLocation{Path: testFile, Line: int64(testBadLine)}, + } +) + +func TestBreakpointStore(t *testing.T) { + p := &Program{breakpointPCs: make(map[uint64]bool)} + bs := NewBreakpointStore(p) + checkPCs := func(expected map[uint64]bool) { + if !reflect.DeepEqual(p.breakpointPCs, expected) { + t.Errorf("got breakpoint map %v want %v", p.breakpointPCs, expected) + } + } + bs.ProcessBreakpointList([]*cd.Breakpoint{testBP, testBP2, testLogBP, testBadBP}) + checkPCs(map[uint64]bool{ + testPC1: true, + testPC2: true, + testPC3: true, + testLogPC: true, + }) + for _, test := range []struct { + pc uint64 + expected []*cd.Breakpoint + }{ + {testPC1, []*cd.Breakpoint{testBP}}, + {testPC2, []*cd.Breakpoint{testBP}}, + {testPC3, []*cd.Breakpoint{testBP2}}, + {testLogPC, []*cd.Breakpoint{testLogBP}}, + } { + if bps := bs.BreakpointsAtPC(test.pc); !reflect.DeepEqual(bps, test.expected) { + t.Errorf("BreakpointsAtPC(%x): got %v want %v", test.pc, bps, test.expected) + } + } + testBP2.IsFinalState = true + bs.ProcessBreakpointList([]*cd.Breakpoint{testBP, testBP2, testLogBP, testBadBP}) + checkPCs(map[uint64]bool{ + testPC1: true, + testPC2: true, + testPC3: false, + testLogPC: true, + }) + bs.RemoveBreakpoint(testBP) + checkPCs(map[uint64]bool{ + testPC1: false, + testPC2: false, + testPC3: false, + testLogPC: true, + }) + for _, pc := range []uint64{testPC1, testPC2, testPC3} { + if bps := bs.BreakpointsAtPC(pc); len(bps) != 0 { + t.Errorf("BreakpointsAtPC(%x): got %v want []", pc, bps) + } + } + // bs.ErrorBreakpoints should return testBadBP. + errorBps := bs.ErrorBreakpoints() + if len(errorBps) != 1 { + t.Errorf("ErrorBreakpoints: got %d want 1", len(errorBps)) + } else { + bp := errorBps[0] + if bp.Id != testBadBP.Id { + t.Errorf("ErrorBreakpoints: got id %q want 1", bp.Id) + } + if bp.Status == nil || !bp.Status.IsError { + t.Errorf("ErrorBreakpoints: got %v, want error", bp.Status) + } + } + // The error should have been removed by the last call to bs.ErrorBreakpoints. + errorBps = bs.ErrorBreakpoints() + if len(errorBps) != 0 { + t.Errorf("ErrorBreakpoints: got %d want 0", len(errorBps)) + } + // Even if testBadBP is sent in a new list, it should not be returned again. + bs.ProcessBreakpointList([]*cd.Breakpoint{testBadBP}) + errorBps = bs.ErrorBreakpoints() + if len(errorBps) != 0 { + t.Errorf("ErrorBreakpoints: got %d want 0", len(errorBps)) + } +} + +// Program implements the similarly-named interface in x/debug. +// ValueCollector should only call its BreakpointAtLine and DeleteBreakpoints methods. +type Program struct { + debug.Program + // breakpointPCs contains the state of code breakpoints -- true if the + // breakpoint is currently set, false if it has been deleted. + breakpointPCs map[uint64]bool +} + +func (p *Program) BreakpointAtLine(file string, line uint64) ([]uint64, error) { + var pcs []uint64 + switch { + case file == testFile && line == testLine: + pcs = []uint64{testPC1, testPC2} + case file == testFile && line == testLine2: + pcs = []uint64{testPC3} + case file == testFile && line == testLogLine: + pcs = []uint64{testLogPC} + default: + pcs = []uint64{0xbad} + } + for _, pc := range pcs { + p.breakpointPCs[pc] = true + } + return pcs, nil +} + +func (p *Program) DeleteBreakpoints(pcs []uint64) error { + for _, pc := range pcs { + p.breakpointPCs[pc] = false + } + return nil +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go new file mode 100644 index 0000000..2571583 --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go @@ -0,0 +1,291 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package controller is a library for interacting with the Google Cloud Debugger's Debuglet Controller service. +package controller + +import ( + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "log" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + cd "google.golang.org/api/clouddebugger/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" +) + +const ( + // agentVersionString identifies the agent to the service. + agentVersionString = "google.com/go-gcp/v0.2" + // initWaitToken is the wait token sent in the first Update request to a server. + initWaitToken = "init" +) + +var ( + // ErrListUnchanged is returned by List if the server time limit is reached + // before the list of breakpoints changes. + ErrListUnchanged = errors.New("breakpoint list unchanged") + // ErrDebuggeeDisabled is returned by List or Update if the server has disabled + // this Debuggee. The caller can retry later. + ErrDebuggeeDisabled = errors.New("debuglet disabled by server") +) + +// Controller manages a connection to the Debuglet Controller service. +type Controller struct { + s serviceInterface + // waitToken is sent with List requests so the server knows which set of + // breakpoints this client has already seen. Each successful List request + // returns a new waitToken to send in the next request. + waitToken string + // verbose determines whether to do some logging + verbose bool + // options, uniquifier and description are used in register. + options Options + uniquifier string + description string + // labels are included when registering the debuggee. They should contain + // the module name, version and minorversion, and are used by the debug UI + // to label the correct version active for debugging. + labels map[string]string + // mu protects debuggeeID + mu sync.Mutex + // debuggeeID is returned from the server on registration, and is passed back + // to the server in List and Update requests. + debuggeeID string +} + +// Options controls how the Debuglet Controller client identifies itself to the server. +// See https://cloud.google.com/storage/docs/projects and +// https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine +// for further documentation of these parameters. +type Options struct { + ProjectNumber string // GCP Project Number. + ProjectID string // GCP Project ID. + AppModule string // Module name for the debugged program. + AppVersion string // Version number for this module. + SourceContexts []*cd.SourceContext // Description of source. + Verbose bool + TokenSource oauth2.TokenSource // Source of Credentials used for Stackdriver Debugger. +} + +type serviceInterface interface { + Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) + Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) + List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) +} + +var newService = func(ctx context.Context, tokenSource oauth2.TokenSource) (serviceInterface, error) { + httpClient, endpoint, err := htransport.NewClient(ctx, option.WithTokenSource(tokenSource)) + if err != nil { + return nil, err + } + s, err := cd.New(httpClient) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return &service{s: s}, nil +} + +type service struct { + s *cd.Service +} + +func (s service) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) { + call := cd.NewControllerDebuggeesService(s.s).Register(req) + return call.Context(ctx).Do() +} + +func (s service) Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) { + call := cd.NewControllerDebuggeesBreakpointsService(s.s).Update(debuggeeID, breakpointID, req) + return call.Context(ctx).Do() +} + +func (s service) List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) { + call := cd.NewControllerDebuggeesBreakpointsService(s.s).List(debuggeeID) + call.WaitToken(waitToken) + return call.Context(ctx).Do() +} + +// NewController connects to the Debuglet Controller server using the given options, +// and returns a Controller for that connection. +// Google Application Default Credentials are used to connect to the Debuglet Controller; +// see https://developers.google.com/identity/protocols/application-default-credentials +func NewController(ctx context.Context, o Options) (*Controller, error) { + // We build a JSON encoding of o.SourceContexts so we can hash it. + scJSON, err := json.Marshal(o.SourceContexts) + if err != nil { + scJSON = nil + o.SourceContexts = nil + } + const minorversion = "107157" // any arbitrary numeric string + + // Compute a uniquifier string by hashing the project number, app module name, + // app module version, debuglet version, and source context. + // The choice of hash function is arbitrary. + h := sha256.Sum256([]byte(fmt.Sprintf("%d %s %d %s %d %s %d %s %d %s %d %s", + len(o.ProjectNumber), o.ProjectNumber, + len(o.AppModule), o.AppModule, + len(o.AppVersion), o.AppVersion, + len(agentVersionString), agentVersionString, + len(scJSON), scJSON, + len(minorversion), minorversion))) + uniquifier := fmt.Sprintf("%X", h[0:16]) // 32 hex characters + + description := o.ProjectID + if o.AppModule != "" { + description += "-" + o.AppModule + } + if o.AppVersion != "" { + description += "-" + o.AppVersion + } + + s, err := newService(ctx, o.TokenSource) + if err != nil { + return nil, err + } + + // Construct client. + c := &Controller{ + s: s, + waitToken: initWaitToken, + verbose: o.Verbose, + options: o, + uniquifier: uniquifier, + description: description, + labels: map[string]string{ + "module": o.AppModule, + "version": o.AppVersion, + "minorversion": minorversion, + }, + } + + return c, nil +} + +func (c *Controller) getDebuggeeID(ctx context.Context) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.debuggeeID != "" { + return c.debuggeeID, nil + } + // The debuglet hasn't been registered yet, or it is disabled and we should try registering again. + if err := c.register(ctx); err != nil { + return "", err + } + return c.debuggeeID, nil +} + +// List retrieves the current list of breakpoints from the server. +// If the set of breakpoints on the server is the same as the one returned in +// the previous call to List, the server can delay responding until it changes, +// and return an error instead if no change occurs before a time limit the +// server sets. List can't be called concurrently with itself. +func (c *Controller) List(ctx context.Context) (*cd.ListActiveBreakpointsResponse, error) { + id, err := c.getDebuggeeID(ctx) + if err != nil { + return nil, err + } + resp, err := c.s.List(ctx, id, c.waitToken) + if err != nil { + if isAbortedError(err) { + return nil, ErrListUnchanged + } + // For other errors, the protocol requires that we attempt to re-register. + c.mu.Lock() + defer c.mu.Unlock() + if regError := c.register(ctx); regError != nil { + return nil, regError + } + return nil, err + } + if resp == nil { + return nil, errors.New("no response") + } + if c.verbose { + log.Printf("List response: %v", resp) + } + c.waitToken = resp.NextWaitToken + return resp, nil +} + +// isAbortedError tests if err is a *googleapi.Error, that it contains one error +// in Errors, and that that error's Reason is "aborted". +func isAbortedError(err error) bool { + e, _ := err.(*googleapi.Error) + if e == nil { + return false + } + if len(e.Errors) != 1 { + return false + } + return e.Errors[0].Reason == "aborted" +} + +// Update reports information to the server about a breakpoint that was hit. +// Update can be called concurrently with List and Update. +func (c *Controller) Update(ctx context.Context, breakpointID string, bp *cd.Breakpoint) error { + req := &cd.UpdateActiveBreakpointRequest{Breakpoint: bp} + if c.verbose { + log.Printf("sending update for %s: %v", breakpointID, req) + } + id, err := c.getDebuggeeID(ctx) + if err != nil { + return err + } + _, err = c.s.Update(ctx, id, breakpointID, req) + return err +} + +// register calls the Debuglet Controller Register method, and sets c.debuggeeID. +// c.mu should be locked while calling this function. List and Update can't +// make progress until it returns. +func (c *Controller) register(ctx context.Context) error { + req := cd.RegisterDebuggeeRequest{ + Debuggee: &cd.Debuggee{ + AgentVersion: agentVersionString, + Description: c.description, + Project: c.options.ProjectNumber, + SourceContexts: c.options.SourceContexts, + Uniquifier: c.uniquifier, + Labels: c.labels, + }, + } + resp, err := c.s.Register(ctx, &req) + if err != nil { + return err + } + if resp == nil { + return errors.New("register: no response") + } + if resp.Debuggee.IsDisabled { + // Setting c.debuggeeID to empty makes sure future List and Update calls + // will call register first. + c.debuggeeID = "" + } else { + c.debuggeeID = resp.Debuggee.Id + } + if c.debuggeeID == "" { + return ErrDebuggeeDisabled + } + return nil +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go new file mode 100644 index 0000000..fa06347 --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go @@ -0,0 +1,254 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "testing" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + + cd "google.golang.org/api/clouddebugger/v2" + "google.golang.org/api/googleapi" +) + +const ( + testDebuggeeID = "d12345" + testBreakpointID = "bp12345" +) + +var ( + // The sequence of wait tokens in List requests and responses. + expectedWaitToken = []string{"init", "token1", "token2", "token1", "token1"} + // The set of breakpoints returned from each List call. + expectedBreakpoints = [][]*cd.Breakpoint{ + nil, + { + &cd.Breakpoint{ + Id: testBreakpointID, + IsFinalState: false, + Location: &cd.SourceLocation{Line: 42, Path: "foo.go"}, + }, + }, + nil, + } + abortedError error = &googleapi.Error{ + Code: 409, + Message: "Conflict", + Body: `{ + "error": { + "errors": [ + { + "domain": "global", + "reason": "aborted", + "message": "Conflict" + } + ], + "code": 409, + "message": "Conflict" + } + }`, + Errors: []googleapi.ErrorItem{ + {Reason: "aborted", Message: "Conflict"}, + }, + } + backendError error = &googleapi.Error{ + Code: 503, + Message: "Backend Error", + Body: `{ + "error": { + "errors": [ + { + "domain": "global", + "reason": "backendError", + "message": "Backend Error" + } + ], + "code": 503, + "message": "Backend Error" + } + }`, + Errors: []googleapi.ErrorItem{ + {Reason: "backendError", Message: "Backend Error"}, + }, + } +) + +type mockService struct { + t *testing.T + listCallsSeen int + registerCallsSeen int +} + +func (s *mockService) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) { + s.registerCallsSeen++ + if req.Debuggee == nil { + s.t.Errorf("missing debuggee") + return nil, nil + } + if req.Debuggee.AgentVersion == "" { + s.t.Errorf("missing agent version") + } + if req.Debuggee.Description == "" { + s.t.Errorf("missing debuglet description") + } + if req.Debuggee.Project == "" { + s.t.Errorf("missing project id") + } + if req.Debuggee.Uniquifier == "" { + s.t.Errorf("missing uniquifier") + } + return &cd.RegisterDebuggeeResponse{ + Debuggee: &cd.Debuggee{Id: testDebuggeeID}, + }, nil +} + +func (s *mockService) Update(ctx context.Context, id, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) { + if id != testDebuggeeID { + s.t.Errorf("got debuggee ID %s want %s", id, testDebuggeeID) + } + if breakpointID != testBreakpointID { + s.t.Errorf("got breakpoint ID %s want %s", breakpointID, testBreakpointID) + } + if !req.Breakpoint.IsFinalState { + s.t.Errorf("got IsFinalState = false, want true") + } + return nil, nil +} + +func (s *mockService) List(ctx context.Context, id, waitToken string) (*cd.ListActiveBreakpointsResponse, error) { + if id != testDebuggeeID { + s.t.Errorf("got debuggee ID %s want %s", id, testDebuggeeID) + } + if waitToken != expectedWaitToken[s.listCallsSeen] { + s.t.Errorf("got wait token %s want %s", waitToken, expectedWaitToken[s.listCallsSeen]) + } + s.listCallsSeen++ + if s.listCallsSeen == 4 { + return nil, backendError + } + if s.listCallsSeen == 5 { + return nil, abortedError + } + resp := &cd.ListActiveBreakpointsResponse{ + Breakpoints: expectedBreakpoints[s.listCallsSeen-1], + NextWaitToken: expectedWaitToken[s.listCallsSeen], + } + return resp, nil +} + +func TestDebugletControllerClientLibrary(t *testing.T) { + var ( + m *mockService + c *Controller + list *cd.ListActiveBreakpointsResponse + err error + ) + m = &mockService{t: t} + newService = func(context.Context, oauth2.TokenSource) (serviceInterface, error) { return m, nil } + opts := Options{ + ProjectNumber: "5", + ProjectID: "p1", + AppModule: "mod1", + AppVersion: "v1", + } + ctx := context.Background() + if c, err = NewController(ctx, opts); err != nil { + t.Fatal("Initializing Controller client:", err) + } + if err := validateLabels(c, opts); err != nil { + t.Fatalf("Invalid labels:\n%v", err) + } + if list, err = c.List(ctx); err != nil { + t.Fatal("List:", err) + } + if m.registerCallsSeen != 1 { + t.Errorf("saw %d Register calls, want 1", m.registerCallsSeen) + } + if list, err = c.List(ctx); err != nil { + t.Fatal("List:", err) + } + if len(list.Breakpoints) != 1 { + t.Fatalf("got %d breakpoints, want 1", len(list.Breakpoints)) + } + if err = c.Update(ctx, list.Breakpoints[0].Id, &cd.Breakpoint{Id: testBreakpointID, IsFinalState: true}); err != nil { + t.Fatal("Update:", err) + } + if list, err = c.List(ctx); err != nil { + t.Fatal("List:", err) + } + if m.registerCallsSeen != 1 { + t.Errorf("saw %d Register calls, want 1", m.registerCallsSeen) + } + // The next List call produces an error that should cause a Register call. + if list, err = c.List(ctx); err == nil { + t.Fatal("List should have returned an error") + } + if m.registerCallsSeen != 2 { + t.Errorf("saw %d Register calls, want 2", m.registerCallsSeen) + } + // The next List call produces an error that should not cause a Register call. + if list, err = c.List(ctx); err == nil { + t.Fatal("List should have returned an error") + } + if m.registerCallsSeen != 2 { + t.Errorf("saw %d Register calls, want 2", m.registerCallsSeen) + } + if m.listCallsSeen != 5 { + t.Errorf("saw %d list calls, want 5", m.listCallsSeen) + } +} + +func validateLabels(c *Controller, o Options) error { + errMsg := new(bytes.Buffer) + if m, ok := c.labels["module"]; ok { + if m != o.AppModule { + errMsg.WriteString(fmt.Sprintf("label module: want %s, got %s\n", o.AppModule, m)) + } + } else { + errMsg.WriteString("Missing \"module\" label\n") + } + if v, ok := c.labels["version"]; ok { + if v != o.AppVersion { + errMsg.WriteString(fmt.Sprintf("label version: want %s, got %s\n", o.AppVersion, v)) + } + } else { + errMsg.WriteString("Missing \"version\" label\n") + } + if mv, ok := c.labels["minorversion"]; ok { + if _, err := strconv.Atoi(mv); err != nil { + errMsg.WriteString(fmt.Sprintln("label minorversion: not a numeric string:", mv)) + } + } else { + errMsg.WriteString("Missing \"minorversion\" label\n") + } + if errMsg.Len() != 0 { + return errors.New(errMsg.String()) + } + return nil +} + +func TestIsAbortedError(t *testing.T) { + if !isAbortedError(abortedError) { + t.Errorf("isAborted(%+v): got false, want true", abortedError) + } + if isAbortedError(backendError) { + t.Errorf("isAborted(%+v): got true, want false", backendError) + } +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go new file mode 100644 index 0000000..8dadc2f --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go @@ -0,0 +1,460 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package valuecollector is used to collect the values of variables in a program. +package valuecollector + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "golang.org/x/debug" + cd "google.golang.org/api/clouddebugger/v2" +) + +const ( + maxArrayLength = 50 + maxMapLength = 20 +) + +// Collector is given references to variables from a program being debugged +// using AddVariable. Then when ReadValues is called, the Collector will fetch +// the values of those variables. Any variables referred to by those values +// will also be fetched; e.g. the targets of pointers, members of structs, +// elements of slices, etc. This continues iteratively, building a graph of +// values, until all the reachable values are fetched, or a size limit is +// reached. +// +// Variables are passed to the Collector as debug.Var, which is used by x/debug +// to represent references to variables. Values are returned as cd.Variable, +// which is used by the Debuglet Controller to represent the graph of values. +// +// For example, if the program has a struct variable: +// +// foo := SomeStruct{a:42, b:"xyz"} +// +// and we call AddVariable with a reference to foo, we will get back a result +// like: +// +// cd.Variable{Name:"foo", VarTableIndex:10} +// +// which denotes a variable named "foo" which will have its value stored in +// element 10 of the table that will later be returned by ReadValues. That +// element might be: +// +// out[10] = &cd.Variable{Members:{{Name:"a", VarTableIndex:11},{Name:"b", VarTableIndex:12}}} +// +// which denotes a struct with two members a and b, whose values are in elements +// 11 and 12 of the output table: +// +// out[11] = &cd.Variable{Value:"42"} +// out[12] = &cd.Variable{Value:"xyz"} +type Collector struct { + // prog is the program being debugged. + prog debug.Program + // limit is the maximum size of the output slice of values. + limit int + // index is a map from references (variables and map elements) to their + // locations in the table. + index map[reference]int + // table contains the references, including those given to the + // Collector directly and those the Collector itself found. + // If VarTableIndex is set to 0 in a cd.Variable, it is ignored, so the first entry + // of table can't be used. On initialization we put a dummy value there. + table []reference +} + +// reference represents a value which is in the queue to be read by the +// collector. It is either a debug.Var, or a mapElement. +type reference interface{} + +// mapElement represents an element of a map in the debugged program's memory. +type mapElement struct { + debug.Map + index uint64 +} + +// NewCollector returns a Collector for the given program and size limit. +// The limit is the maximum size of the slice of values returned by ReadValues. +func NewCollector(prog debug.Program, limit int) *Collector { + return &Collector{ + prog: prog, + limit: limit, + index: make(map[reference]int), + table: []reference{debug.Var{}}, + } +} + +// AddVariable adds another variable to be collected. +// The Collector doesn't get the value immediately; it returns a cd.Variable +// that contains an index into the table which will later be returned by +// ReadValues. +func (c *Collector) AddVariable(lv debug.LocalVar) *cd.Variable { + ret := &cd.Variable{Name: lv.Name} + if index, ok := c.add(lv.Var); !ok { + // If the add call failed, it's because we reached the size limit. + // The Debuglet Controller's convention is to pass it a "Not Captured" error + // in this case. + ret.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + } else { + ret.VarTableIndex = int64(index) + } + return ret +} + +// add adds a reference to the set of values to be read from the +// program. It returns the index in the output table that will contain the +// corresponding value. It fails if the table has reached the size limit. +// It deduplicates references, so the index may be the same as one that was +// returned from an earlier add call. +func (c *Collector) add(r reference) (outputIndex int, ok bool) { + if i, ok := c.index[r]; ok { + return i, true + } + i := len(c.table) + if i >= c.limit { + return 0, false + } + c.index[r] = i + c.table = append(c.table, r) + return i, true +} + +func addMember(v *cd.Variable, name string) *cd.Variable { + v2 := &cd.Variable{Name: name} + v.Members = append(v.Members, v2) + return v2 +} + +// ReadValues fetches values of the variables that were passed to the Collector +// with AddVariable. The values of any new variables found are also fetched, +// e.g. the targets of pointers or the members of structs, until we reach the +// size limit or we run out of values to fetch. +// The results are output as a []*cd.Variable, which is the type we need to send +// to the Debuglet Controller after we trigger a breakpoint. +func (c *Collector) ReadValues() (out []*cd.Variable) { + for i := 0; i < len(c.table); i++ { + // Create a new cd.Variable for this value, and append it to the output. + dcv := new(cd.Variable) + out = append(out, dcv) + if i == 0 { + // The first element is unused. + continue + } + switch x := c.table[i].(type) { + case mapElement: + key, value, err := c.prog.MapElement(x.Map, x.index) + if err != nil { + dcv.Status = statusMessage(err.Error(), true, refersToVariableValue) + continue + } + // Add a member for the key. + member := addMember(dcv, "key") + if index, ok := c.add(key); !ok { + // The table is full. + member.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + continue + } else { + member.VarTableIndex = int64(index) + } + // Add a member for the value. + member = addMember(dcv, "value") + if index, ok := c.add(value); !ok { + // The table is full. + member.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + } else { + member.VarTableIndex = int64(index) + } + case debug.Var: + if v, err := c.prog.Value(x); err != nil { + dcv.Status = statusMessage(err.Error(), true, refersToVariableValue) + } else { + c.FillValue(v, dcv) + } + } + } + return out +} + +// indexable is an interface for arrays, slices and channels. +type indexable interface { + Len() uint64 + Element(uint64) debug.Var +} + +// channel implements indexable. +type channel struct { + debug.Channel +} + +func (c channel) Len() uint64 { + return c.Length +} + +var ( + _ indexable = debug.Array{} + _ indexable = debug.Slice{} + _ indexable = channel{} +) + +// FillValue copies a value into a cd.Variable. Any variables referred to by +// that value, e.g. struct members and pointer targets, are added to the +// collector's queue, to be fetched later by ReadValues. +func (c *Collector) FillValue(v debug.Value, dcv *cd.Variable) { + if c, ok := v.(debug.Channel); ok { + // Convert to channel, which implements indexable. + v = channel{c} + } + // Fill in dcv in a manner depending on the type of the value we got. + switch val := v.(type) { + case int8, int16, int32, int64, bool, uint8, uint16, uint32, uint64, float32, float64, complex64, complex128: + // For simple types, we just print the value to dcv.Value. + dcv.Value = fmt.Sprint(val) + case string: + // Put double quotes around strings. + dcv.Value = strconv.Quote(val) + case debug.String: + if uint64(len(val.String)) < val.Length { + // This string value was truncated. + dcv.Value = strconv.Quote(val.String + "...") + } else { + dcv.Value = strconv.Quote(val.String) + } + case debug.Struct: + // For structs, we add an entry to dcv.Members for each field in the + // struct. + // Each member will contain the name of the field, and the index in the + // output table which will contain the value of that field. + for _, f := range val.Fields { + member := addMember(dcv, f.Name) + if index, ok := c.add(f.Var); !ok { + // The table is full. + member.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + } else { + member.VarTableIndex = int64(index) + } + } + case debug.Map: + dcv.Value = fmt.Sprintf("len = %d", val.Length) + for i := uint64(0); i < val.Length; i++ { + field := addMember(dcv, `⚫`) + if i == maxMapLength { + field.Name = "..." + field.Status = statusMessage(messageTruncated, true, refersToVariableName) + break + } + if index, ok := c.add(mapElement{val, i}); !ok { + // The value table is full; add a member to contain the error message. + field.Name = "..." + field.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + break + } else { + field.VarTableIndex = int64(index) + } + } + case debug.Pointer: + if val.Address == 0 { + dcv.Value = "" + } else if val.TypeID == 0 { + // We don't know the type of the pointer, so just output the address as + // the value. + dcv.Value = fmt.Sprintf("0x%X", val.Address) + dcv.Status = statusMessage(messageUnknownPointerType, false, refersToVariableName) + } else { + // Adds the pointed-to variable to the table, and links this value to + // that table entry through VarTableIndex. + dcv.Value = fmt.Sprintf("0x%X", val.Address) + target := addMember(dcv, "") + if index, ok := c.add(debug.Var(val)); !ok { + target.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + } else { + target.VarTableIndex = int64(index) + } + } + case indexable: + // Arrays, slices and channels. + dcv.Value = "len = " + fmt.Sprint(val.Len()) + for j := uint64(0); j < val.Len(); j++ { + field := addMember(dcv, fmt.Sprint(`[`, j, `]`)) + if j == maxArrayLength { + field.Name = "..." + field.Status = statusMessage(messageTruncated, true, refersToVariableName) + break + } + vr := val.Element(j) + if index, ok := c.add(vr); !ok { + // The value table is full; add a member to contain the error message. + field.Name = "..." + field.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + break + } else { + // Add a member with the index as the name. + field.VarTableIndex = int64(index) + } + } + default: + dcv.Status = statusMessage(messageUnknownType, false, refersToVariableName) + } +} + +// statusMessage returns a *cd.StatusMessage with the given message, IsError +// field and refersTo field. +func statusMessage(msg string, isError bool, refersTo int) *cd.StatusMessage { + return &cd.StatusMessage{ + Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}}, + IsError: isError, + RefersTo: refersToString[refersTo], + } +} + +// LogString produces a string for a logpoint, substituting in variable values +// using evaluatedExpressions and varTable. +func LogString(s string, evaluatedExpressions []*cd.Variable, varTable []*cd.Variable) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "LOGPOINT: ") + seen := make(map[*cd.Variable]bool) + for i := 0; i < len(s); { + if s[i] == '$' { + i++ + if num, n, ok := parseToken(s[i:], len(evaluatedExpressions)-1); ok { + // This token is one of $0, $1, etc. Write the corresponding expression. + writeExpression(&buf, evaluatedExpressions[num], false, varTable, seen) + i += n + } else { + // Something else, like $$. + buf.WriteByte(s[i]) + i++ + } + } else { + buf.WriteByte(s[i]) + i++ + } + } + return buf.String() +} + +func parseToken(s string, max int) (num int, bytesRead int, ok bool) { + var i int + for i < len(s) && s[i] >= '0' && s[i] <= '9' { + i++ + } + num, err := strconv.Atoi(s[:i]) + return num, i, err == nil && num <= max +} + +// writeExpression recursively writes variables to buf, in a format suitable +// for logging. If printName is true, writes the name of the variable. +func writeExpression(buf *bytes.Buffer, v *cd.Variable, printName bool, varTable []*cd.Variable, seen map[*cd.Variable]bool) { + if v == nil { + // Shouldn't happen. + return + } + name, value, status, members := v.Name, v.Value, v.Status, v.Members + + // If v.VarTableIndex is not zero, it refers to an element of varTable. + // We merge its fields with the fields we got from v. + var other *cd.Variable + if idx := int(v.VarTableIndex); idx > 0 && idx < len(varTable) { + other = varTable[idx] + } + if other != nil { + if name == "" { + name = other.Name + } + if value == "" { + value = other.Value + } + if status == nil { + status = other.Status + } + if len(members) == 0 { + members = other.Members + } + } + if printName && name != "" { + buf.WriteString(name) + buf.WriteByte(':') + } + + // If we have seen this value before, write "..." rather than repeating it. + if seen[v] { + buf.WriteString("...") + return + } + seen[v] = true + if other != nil { + if seen[other] { + buf.WriteString("...") + return + } + seen[other] = true + } + + if value != "" && !strings.HasPrefix(value, "len = ") { + // A plain value. + buf.WriteString(value) + } else if status != nil && status.Description != nil { + // An error. + for _, p := range status.Description.Parameters { + buf.WriteByte('(') + buf.WriteString(p) + buf.WriteByte(')') + } + } else if name == `⚫` { + // A map element. + first := true + for _, member := range members { + if first { + first = false + } else { + buf.WriteByte(':') + } + writeExpression(buf, member, false, varTable, seen) + } + } else { + // A map, array, slice, channel, or struct. + isStruct := value == "" + first := true + buf.WriteByte('{') + for _, member := range members { + if first { + first = false + } else { + buf.WriteString(", ") + } + writeExpression(buf, member, isStruct, varTable, seen) + } + buf.WriteByte('}') + } +} + +const ( + // Error messages for cd.StatusMessage + messageNotCaptured = "Not captured" + messageTruncated = "Truncated" + messageUnknownPointerType = "Unknown pointer type" + messageUnknownType = "Unknown type" + // RefersTo values for cd.StatusMessage. + refersToVariableName = iota + refersToVariableValue +) + +// refersToString contains the strings for each refersTo value. +// See the definition of StatusMessage in the v2/clouddebugger package. +var refersToString = map[int]string{ + refersToVariableName: "VARIABLE_NAME", + refersToVariableValue: "VARIABLE_VALUE", +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go new file mode 100644 index 0000000..2bc97dc --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go @@ -0,0 +1,418 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package valuecollector + +import ( + "fmt" + "reflect" + "testing" + + "golang.org/x/debug" + cd "google.golang.org/api/clouddebugger/v2" +) + +const ( + // Some arbitrary type IDs for the test, for use in debug.Var's TypeID field. + // A TypeID of 0 means the type is unknown, so we start at 1. + int16Type = iota + 1 + stringType + structType + pointerType + arrayType + int32Type + debugStringType + mapType + channelType + sliceType +) + +func TestValueCollector(t *testing.T) { + // Construct the collector. + c := NewCollector(&Program{}, 26) + // Add some variables of various types, whose values we want the collector to read. + variablesToAdd := []debug.LocalVar{ + {Name: "a", Var: debug.Var{int16Type, 0x1}}, + {Name: "b", Var: debug.Var{stringType, 0x2}}, + {Name: "c", Var: debug.Var{structType, 0x3}}, + {Name: "d", Var: debug.Var{pointerType, 0x4}}, + {Name: "e", Var: debug.Var{arrayType, 0x5}}, + {Name: "f", Var: debug.Var{debugStringType, 0x6}}, + {Name: "g", Var: debug.Var{mapType, 0x7}}, + {Name: "h", Var: debug.Var{channelType, 0x8}}, + {Name: "i", Var: debug.Var{sliceType, 0x9}}, + } + expectedResults := []*cd.Variable{ + &cd.Variable{Name: "a", VarTableIndex: 1}, + &cd.Variable{Name: "b", VarTableIndex: 2}, + &cd.Variable{Name: "c", VarTableIndex: 3}, + &cd.Variable{Name: "d", VarTableIndex: 4}, + &cd.Variable{Name: "e", VarTableIndex: 5}, + &cd.Variable{Name: "f", VarTableIndex: 6}, + &cd.Variable{Name: "g", VarTableIndex: 7}, + &cd.Variable{Name: "h", VarTableIndex: 8}, + &cd.Variable{Name: "i", VarTableIndex: 9}, + } + for i, v := range variablesToAdd { + added := c.AddVariable(v) + if !reflect.DeepEqual(added, expectedResults[i]) { + t.Errorf("AddVariable: got %+v want %+v", *added, *expectedResults[i]) + } + } + // Read the values, compare the output to what we expect. + v := c.ReadValues() + expectedValues := []*cd.Variable{ + &cd.Variable{}, + &cd.Variable{Value: "1"}, + &cd.Variable{Value: `"hello"`}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "x", VarTableIndex: 1}, + &cd.Variable{Name: "y", VarTableIndex: 2}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{VarTableIndex: 1}, + }, + Value: "0x1", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 10}, + &cd.Variable{Name: "[1]", VarTableIndex: 11}, + &cd.Variable{Name: "[2]", VarTableIndex: 12}, + &cd.Variable{Name: "[3]", VarTableIndex: 13}, + }, + Value: "len = 4", + }, + &cd.Variable{Value: `"world"`}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "⚫", VarTableIndex: 14}, + &cd.Variable{Name: "⚫", VarTableIndex: 15}, + &cd.Variable{Name: "⚫", VarTableIndex: 16}, + }, + Value: "len = 3", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 17}, + &cd.Variable{Name: "[1]", VarTableIndex: 18}, + }, + Value: "len = 2", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 19}, + &cd.Variable{Name: "[1]", VarTableIndex: 20}, + }, + Value: "len = 2", + }, + &cd.Variable{Value: "100"}, + &cd.Variable{Value: "104"}, + &cd.Variable{Value: "108"}, + &cd.Variable{Value: "112"}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 21}, + &cd.Variable{Name: "value", VarTableIndex: 22}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 23}, + &cd.Variable{Name: "value", VarTableIndex: 24}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 25}, + &cd.Variable{ + Name: "value", + Status: &cd.StatusMessage{ + Description: &cd.FormatMessage{ + Format: "$0", + Parameters: []string{"Not captured"}, + }, + IsError: true, + RefersTo: "VARIABLE_NAME", + }, + }, + }, + }, + &cd.Variable{Value: "246"}, + &cd.Variable{Value: "210"}, + &cd.Variable{Value: "300"}, + &cd.Variable{Value: "304"}, + &cd.Variable{Value: "400"}, + &cd.Variable{Value: "404"}, + &cd.Variable{Value: "1400"}, + &cd.Variable{Value: "1404"}, + &cd.Variable{Value: "2400"}, + } + if !reflect.DeepEqual(v, expectedValues) { + t.Errorf("ReadValues: got %v want %v", v, expectedValues) + // Do element-by-element comparisons, for more useful error messages. + for i := range v { + if i < len(expectedValues) && !reflect.DeepEqual(v[i], expectedValues[i]) { + t.Errorf("element %d: got %+v want %+v", i, *v[i], *expectedValues[i]) + } + } + } +} + +// Program implements the similarly-named interface in x/debug. +// ValueCollector should only call its Value and MapElement methods. +type Program struct { + debug.Program +} + +func (p *Program) Value(v debug.Var) (debug.Value, error) { + // We determine what to return using v.TypeID. + switch v.TypeID { + case int16Type: + // We use the address as the value, so that we're testing whether the right + // address was calculated. + return int16(v.Address), nil + case stringType: + // A string. + return "hello", nil + case structType: + // A struct with two elements. + return debug.Struct{ + Fields: []debug.StructField{ + { + Name: "x", + Var: debug.Var{int16Type, 0x1}, + }, + { + Name: "y", + Var: debug.Var{stringType, 0x2}, + }, + }, + }, nil + case pointerType: + // A pointer to the first variable above. + return debug.Pointer{int16Type, 0x1}, nil + case arrayType: + // An array of 4 32-bit-wide elements. + return debug.Array{ + ElementTypeID: int32Type, + Address: 0x64, + Length: 4, + StrideBits: 32, + }, nil + case debugStringType: + return debug.String{ + Length: 5, + String: "world", + }, nil + case mapType: + return debug.Map{ + TypeID: 99, + Address: 0x100, + Length: 3, + }, nil + case channelType: + return debug.Channel{ + ElementTypeID: int32Type, + Address: 200, + Buffer: 210, + Length: 2, + Capacity: 10, + Stride: 4, + BufferStart: 9, + }, nil + case sliceType: + // A slice of 2 32-bit-wide elements. + return debug.Slice{ + Array: debug.Array{ + ElementTypeID: int32Type, + Address: 300, + Length: 2, + StrideBits: 32, + }, + Capacity: 50, + }, nil + case int32Type: + // We use the address as the value, so that we're testing whether the right + // address was calculated. + return int32(v.Address), nil + } + return nil, fmt.Errorf("unexpected Value request") +} + +func (p *Program) MapElement(m debug.Map, index uint64) (debug.Var, debug.Var, error) { + return debug.Var{TypeID: int16Type, Address: 1000*index + 400}, + debug.Var{TypeID: int32Type, Address: 1000*index + 404}, + nil +} + +func TestLogString(t *testing.T) { + bp := cd.Breakpoint{ + Action: "LOG", + LogMessageFormat: "$0 hello, $$7world! $1 $2 $3 $4 $5$6 $7 $8", + EvaluatedExpressions: []*cd.Variable{ + &cd.Variable{Name: "a", VarTableIndex: 1}, + &cd.Variable{Name: "b", VarTableIndex: 2}, + &cd.Variable{Name: "c", VarTableIndex: 3}, + &cd.Variable{Name: "d", VarTableIndex: 4}, + &cd.Variable{Name: "e", VarTableIndex: 5}, + &cd.Variable{Name: "f", VarTableIndex: 6}, + &cd.Variable{Name: "g", VarTableIndex: 7}, + &cd.Variable{Name: "h", VarTableIndex: 8}, + &cd.Variable{Name: "i", VarTableIndex: 9}, + }, + } + varTable := []*cd.Variable{ + &cd.Variable{}, + &cd.Variable{Value: "1"}, + &cd.Variable{Value: `"hello"`}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "x", Value: "1"}, + &cd.Variable{Name: "y", Value: `"hello"`}, + &cd.Variable{Name: "z", VarTableIndex: 3}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{VarTableIndex: 1}, + }, + Value: "0x1", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 10}, + &cd.Variable{Name: "[1]", VarTableIndex: 11}, + &cd.Variable{Name: "[2]", VarTableIndex: 12}, + &cd.Variable{Name: "[3]", VarTableIndex: 13}, + }, + Value: "len = 4", + }, + &cd.Variable{Value: `"world"`}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "⚫", VarTableIndex: 14}, + &cd.Variable{Name: "⚫", VarTableIndex: 15}, + &cd.Variable{Name: "⚫", VarTableIndex: 16}, + }, + Value: "len = 3", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 17}, + &cd.Variable{Name: "[1]", VarTableIndex: 18}, + }, + Value: "len = 2", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 19}, + &cd.Variable{Name: "[1]", VarTableIndex: 20}, + }, + Value: "len = 2", + }, + &cd.Variable{Value: "100"}, + &cd.Variable{Value: "104"}, + &cd.Variable{Value: "108"}, + &cd.Variable{Value: "112"}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 21}, + &cd.Variable{Name: "value", VarTableIndex: 22}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 23}, + &cd.Variable{Name: "value", VarTableIndex: 24}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 25}, + &cd.Variable{ + Name: "value", + Status: &cd.StatusMessage{ + Description: &cd.FormatMessage{ + Format: "$0", + Parameters: []string{"Not captured"}, + }, + IsError: true, + RefersTo: "VARIABLE_NAME", + }, + }, + }, + }, + &cd.Variable{Value: "246"}, + &cd.Variable{Value: "210"}, + &cd.Variable{Value: "300"}, + &cd.Variable{Value: "304"}, + &cd.Variable{Value: "400"}, + &cd.Variable{Value: "404"}, + &cd.Variable{Value: "1400"}, + &cd.Variable{Value: "1404"}, + &cd.Variable{Value: "2400"}, + } + s := LogString(bp.LogMessageFormat, bp.EvaluatedExpressions, varTable) + expected := `LOGPOINT: 1 hello, $7world! "hello" {x:1, y:"hello", z:...} ` + + `0x1 {100, 104, 108, 112} "world"{400:404, 1400:1404, 2400:(Not captured)} ` + + `{246, 210} {300, 304}` + if s != expected { + t.Errorf("LogString: got %q want %q", s, expected) + } +} + +func TestParseToken(t *testing.T) { + for _, c := range []struct { + s string + max int + num int + n int + ok bool + }{ + {"", 0, 0, 0, false}, + {".", 0, 0, 0, false}, + {"0", 0, 0, 1, true}, + {"0", 1, 0, 1, true}, + {"00", 0, 0, 2, true}, + {"1.", 1, 1, 1, true}, + {"1.", 0, 0, 0, false}, + {"10", 10, 10, 2, true}, + {"10..", 10, 10, 2, true}, + {"10", 11, 10, 2, true}, + {"10..", 11, 10, 2, true}, + {"10", 9, 0, 0, false}, + {"10..", 9, 0, 0, false}, + {" 10", 10, 0, 0, false}, + {"010", 10, 10, 3, true}, + {"123456789", 123456789, 123456789, 9, true}, + {"123456789", 123456788, 0, 0, false}, + {"123456789123456789123456789", 999999999, 0, 0, false}, + } { + num, n, ok := parseToken(c.s, c.max) + if ok != c.ok { + t.Errorf("parseToken(%q, %d): got ok=%t want ok=%t", c.s, c.max, ok, c.ok) + continue + } + if !ok { + continue + } + if num != c.num || n != c.n { + t.Errorf("parseToken(%q, %d): got %d,%d,%t want %d,%d,%t", c.s, c.max, num, n, ok, c.num, c.n, c.ok) + } + } +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 0000000..e708c03 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,437 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + metaClient = &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + } + subscribeClient = &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + } +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(metaClient, suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag using the provided client. This func is otherwise equivalent to Get. +func getETag(client *http.Client, suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := client.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := ctxhttp.Do(ctx, metaClient, req) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(subscribeClient, suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata_test.go b/vendor/cloud.google.com/go/compute/metadata/metadata_test.go new file mode 100644 index 0000000..9ac5926 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata_test.go @@ -0,0 +1,48 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "os" + "sync" + "testing" +) + +func TestOnGCE_Stress(t *testing.T) { + if testing.Short() { + t.Skip("skipping in -short mode") + } + var last bool + for i := 0; i < 100; i++ { + onGCEOnce = sync.Once{} + + now := OnGCE() + if i > 0 && now != last { + t.Errorf("%d. changed from %v to %v", i, last, now) + } + last = now + } + t.Logf("OnGCE() = %v", last) +} + +func TestOnGCE_Force(t *testing.T) { + onGCEOnce = sync.Once{} + old := os.Getenv(metadataHostEnv) + defer os.Setenv(metadataHostEnv, old) + os.Setenv(metadataHostEnv, "127.0.0.1") + if !OnGCE() { + t.Error("OnGCE() = false; want true") + } +} diff --git a/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go b/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go new file mode 100644 index 0000000..55980c5 --- /dev/null +++ b/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go @@ -0,0 +1,674 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package container + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + containerpb "google.golang.org/genproto/googleapis/container/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ClusterManagerCallOptions contains the retry settings for each method of ClusterManagerClient. +type ClusterManagerCallOptions struct { + ListClusters []gax.CallOption + GetCluster []gax.CallOption + CreateCluster []gax.CallOption + UpdateCluster []gax.CallOption + UpdateNodePool []gax.CallOption + SetNodePoolAutoscaling []gax.CallOption + SetLoggingService []gax.CallOption + SetMonitoringService []gax.CallOption + SetAddonsConfig []gax.CallOption + SetLocations []gax.CallOption + UpdateMaster []gax.CallOption + SetMasterAuth []gax.CallOption + DeleteCluster []gax.CallOption + ListOperations []gax.CallOption + GetOperation []gax.CallOption + CancelOperation []gax.CallOption + GetServerConfig []gax.CallOption + ListNodePools []gax.CallOption + GetNodePool []gax.CallOption + CreateNodePool []gax.CallOption + DeleteNodePool []gax.CallOption + RollbackNodePoolUpgrade []gax.CallOption + SetNodePoolManagement []gax.CallOption + SetLabels []gax.CallOption + SetLegacyAbac []gax.CallOption + StartIPRotation []gax.CallOption + CompleteIPRotation []gax.CallOption + SetNodePoolSize []gax.CallOption + SetNetworkPolicy []gax.CallOption + SetMaintenancePolicy []gax.CallOption +} + +func defaultClusterManagerClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("container.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultClusterManagerCallOptions() *ClusterManagerCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ClusterManagerCallOptions{ + ListClusters: retry[[2]string{"default", "idempotent"}], + GetCluster: retry[[2]string{"default", "idempotent"}], + CreateCluster: retry[[2]string{"default", "non_idempotent"}], + UpdateCluster: retry[[2]string{"default", "non_idempotent"}], + UpdateNodePool: retry[[2]string{"default", "non_idempotent"}], + SetNodePoolAutoscaling: retry[[2]string{"default", "non_idempotent"}], + SetLoggingService: retry[[2]string{"default", "non_idempotent"}], + SetMonitoringService: retry[[2]string{"default", "non_idempotent"}], + SetAddonsConfig: retry[[2]string{"default", "non_idempotent"}], + SetLocations: retry[[2]string{"default", "non_idempotent"}], + UpdateMaster: retry[[2]string{"default", "non_idempotent"}], + SetMasterAuth: retry[[2]string{"default", "non_idempotent"}], + DeleteCluster: retry[[2]string{"default", "idempotent"}], + ListOperations: retry[[2]string{"default", "idempotent"}], + GetOperation: retry[[2]string{"default", "idempotent"}], + CancelOperation: retry[[2]string{"default", "non_idempotent"}], + GetServerConfig: retry[[2]string{"default", "idempotent"}], + ListNodePools: retry[[2]string{"default", "idempotent"}], + GetNodePool: retry[[2]string{"default", "idempotent"}], + CreateNodePool: retry[[2]string{"default", "non_idempotent"}], + DeleteNodePool: retry[[2]string{"default", "idempotent"}], + RollbackNodePoolUpgrade: retry[[2]string{"default", "non_idempotent"}], + SetNodePoolManagement: retry[[2]string{"default", "non_idempotent"}], + SetLabels: retry[[2]string{"default", "non_idempotent"}], + SetLegacyAbac: retry[[2]string{"default", "non_idempotent"}], + StartIPRotation: retry[[2]string{"default", "non_idempotent"}], + CompleteIPRotation: retry[[2]string{"default", "non_idempotent"}], + SetNodePoolSize: retry[[2]string{"default", "non_idempotent"}], + SetNetworkPolicy: retry[[2]string{"default", "non_idempotent"}], + SetMaintenancePolicy: retry[[2]string{"default", "non_idempotent"}], + } +} + +// ClusterManagerClient is a client for interacting with Google Container Engine API. +type ClusterManagerClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + clusterManagerClient containerpb.ClusterManagerClient + + // The call options for this service. + CallOptions *ClusterManagerCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClusterManagerClient creates a new cluster manager client. +// +// Google Container Engine Cluster Manager v1 +func NewClusterManagerClient(ctx context.Context, opts ...option.ClientOption) (*ClusterManagerClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClusterManagerClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ClusterManagerClient{ + conn: conn, + CallOptions: defaultClusterManagerCallOptions(), + + clusterManagerClient: containerpb.NewClusterManagerClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ClusterManagerClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ClusterManagerClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ClusterManagerClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ListClusters lists all clusters owned by a project in either the specified zone or all +// zones. +func (c *ClusterManagerClient) ListClusters(ctx context.Context, req *containerpb.ListClustersRequest, opts ...gax.CallOption) (*containerpb.ListClustersResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListClusters[0:len(c.CallOptions.ListClusters):len(c.CallOptions.ListClusters)], opts...) + var resp *containerpb.ListClustersResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.ListClusters(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetCluster gets the details of a specific cluster. +func (c *ClusterManagerClient) GetCluster(ctx context.Context, req *containerpb.GetClusterRequest, opts ...gax.CallOption) (*containerpb.Cluster, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetCluster[0:len(c.CallOptions.GetCluster):len(c.CallOptions.GetCluster)], opts...) + var resp *containerpb.Cluster + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.GetCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateCluster creates a cluster, consisting of the specified number and type of Google +// Compute Engine instances. +// +// By default, the cluster is created in the project's +// default network (at /compute/docs/networks-and-firewalls#networks). +// +// One firewall is added for the cluster. After cluster creation, +// the cluster creates routes for each node to allow the containers +// on that node to communicate with all other instances in the +// cluster. +// +// Finally, an entry is added to the project's global metadata indicating +// which CIDR range is being used by the cluster. +func (c *ClusterManagerClient) CreateCluster(ctx context.Context, req *containerpb.CreateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateCluster[0:len(c.CallOptions.CreateCluster):len(c.CallOptions.CreateCluster)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.CreateCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateCluster updates the settings of a specific cluster. +func (c *ClusterManagerClient) UpdateCluster(ctx context.Context, req *containerpb.UpdateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateCluster[0:len(c.CallOptions.UpdateCluster):len(c.CallOptions.UpdateCluster)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.UpdateCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateNodePool updates the version and/or image type of a specific node pool. +func (c *ClusterManagerClient) UpdateNodePool(ctx context.Context, req *containerpb.UpdateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateNodePool[0:len(c.CallOptions.UpdateNodePool):len(c.CallOptions.UpdateNodePool)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.UpdateNodePool(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetNodePoolAutoscaling sets the autoscaling settings of a specific node pool. +func (c *ClusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, req *containerpb.SetNodePoolAutoscalingRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetNodePoolAutoscaling[0:len(c.CallOptions.SetNodePoolAutoscaling):len(c.CallOptions.SetNodePoolAutoscaling)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetNodePoolAutoscaling(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetLoggingService sets the logging service of a specific cluster. +func (c *ClusterManagerClient) SetLoggingService(ctx context.Context, req *containerpb.SetLoggingServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetLoggingService[0:len(c.CallOptions.SetLoggingService):len(c.CallOptions.SetLoggingService)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetLoggingService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetMonitoringService sets the monitoring service of a specific cluster. +func (c *ClusterManagerClient) SetMonitoringService(ctx context.Context, req *containerpb.SetMonitoringServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetMonitoringService[0:len(c.CallOptions.SetMonitoringService):len(c.CallOptions.SetMonitoringService)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetMonitoringService(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetAddonsConfig sets the addons of a specific cluster. +func (c *ClusterManagerClient) SetAddonsConfig(ctx context.Context, req *containerpb.SetAddonsConfigRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetAddonsConfig[0:len(c.CallOptions.SetAddonsConfig):len(c.CallOptions.SetAddonsConfig)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetAddonsConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetLocations sets the locations of a specific cluster. +func (c *ClusterManagerClient) SetLocations(ctx context.Context, req *containerpb.SetLocationsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetLocations[0:len(c.CallOptions.SetLocations):len(c.CallOptions.SetLocations)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetLocations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateMaster updates the master of a specific cluster. +func (c *ClusterManagerClient) UpdateMaster(ctx context.Context, req *containerpb.UpdateMasterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateMaster[0:len(c.CallOptions.UpdateMaster):len(c.CallOptions.UpdateMaster)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.UpdateMaster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetMasterAuth used to set master auth materials. Currently supports :- +// Changing the admin password of a specific cluster. +// This can be either via password generation or explicitly set the password. +func (c *ClusterManagerClient) SetMasterAuth(ctx context.Context, req *containerpb.SetMasterAuthRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetMasterAuth[0:len(c.CallOptions.SetMasterAuth):len(c.CallOptions.SetMasterAuth)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetMasterAuth(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteCluster deletes the cluster, including the Kubernetes endpoint and all worker +// nodes. +// +// Firewalls and routes that were configured during cluster creation +// are also deleted. +// +// Other Google Compute Engine resources that might be in use by the cluster +// (e.g. load balancer resources) will not be deleted if they weren't present +// at the initial create time. +func (c *ClusterManagerClient) DeleteCluster(ctx context.Context, req *containerpb.DeleteClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteCluster[0:len(c.CallOptions.DeleteCluster):len(c.CallOptions.DeleteCluster)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.DeleteCluster(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListOperations lists all operations in a project in a specific zone or all zones. +func (c *ClusterManagerClient) ListOperations(ctx context.Context, req *containerpb.ListOperationsRequest, opts ...gax.CallOption) (*containerpb.ListOperationsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...) + var resp *containerpb.ListOperationsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.ListOperations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetOperation gets the specified operation. +func (c *ClusterManagerClient) GetOperation(ctx context.Context, req *containerpb.GetOperationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.GetOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CancelOperation cancels the specified operation. +func (c *ClusterManagerClient) CancelOperation(ctx context.Context, req *containerpb.CancelOperationRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.clusterManagerClient.CancelOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// GetServerConfig returns configuration info about the Container Engine service. +func (c *ClusterManagerClient) GetServerConfig(ctx context.Context, req *containerpb.GetServerConfigRequest, opts ...gax.CallOption) (*containerpb.ServerConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetServerConfig[0:len(c.CallOptions.GetServerConfig):len(c.CallOptions.GetServerConfig)], opts...) + var resp *containerpb.ServerConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.GetServerConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListNodePools lists the node pools for a cluster. +func (c *ClusterManagerClient) ListNodePools(ctx context.Context, req *containerpb.ListNodePoolsRequest, opts ...gax.CallOption) (*containerpb.ListNodePoolsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListNodePools[0:len(c.CallOptions.ListNodePools):len(c.CallOptions.ListNodePools)], opts...) + var resp *containerpb.ListNodePoolsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.ListNodePools(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetNodePool retrieves the node pool requested. +func (c *ClusterManagerClient) GetNodePool(ctx context.Context, req *containerpb.GetNodePoolRequest, opts ...gax.CallOption) (*containerpb.NodePool, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetNodePool[0:len(c.CallOptions.GetNodePool):len(c.CallOptions.GetNodePool)], opts...) + var resp *containerpb.NodePool + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.GetNodePool(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateNodePool creates a node pool for a cluster. +func (c *ClusterManagerClient) CreateNodePool(ctx context.Context, req *containerpb.CreateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateNodePool[0:len(c.CallOptions.CreateNodePool):len(c.CallOptions.CreateNodePool)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.CreateNodePool(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteNodePool deletes a node pool from a cluster. +func (c *ClusterManagerClient) DeleteNodePool(ctx context.Context, req *containerpb.DeleteNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteNodePool[0:len(c.CallOptions.DeleteNodePool):len(c.CallOptions.DeleteNodePool)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.DeleteNodePool(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// RollbackNodePoolUpgrade roll back the previously Aborted or Failed NodePool upgrade. +// This will be an no-op if the last upgrade successfully completed. +func (c *ClusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, req *containerpb.RollbackNodePoolUpgradeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RollbackNodePoolUpgrade[0:len(c.CallOptions.RollbackNodePoolUpgrade):len(c.CallOptions.RollbackNodePoolUpgrade)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.RollbackNodePoolUpgrade(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetNodePoolManagement sets the NodeManagement options for a node pool. +func (c *ClusterManagerClient) SetNodePoolManagement(ctx context.Context, req *containerpb.SetNodePoolManagementRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetNodePoolManagement[0:len(c.CallOptions.SetNodePoolManagement):len(c.CallOptions.SetNodePoolManagement)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetNodePoolManagement(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetLabels sets labels on a cluster. +func (c *ClusterManagerClient) SetLabels(ctx context.Context, req *containerpb.SetLabelsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetLabels[0:len(c.CallOptions.SetLabels):len(c.CallOptions.SetLabels)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetLabels(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetLegacyAbac enables or disables the ABAC authorization mechanism on a cluster. +func (c *ClusterManagerClient) SetLegacyAbac(ctx context.Context, req *containerpb.SetLegacyAbacRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetLegacyAbac[0:len(c.CallOptions.SetLegacyAbac):len(c.CallOptions.SetLegacyAbac)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetLegacyAbac(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// StartIPRotation start master IP rotation. +func (c *ClusterManagerClient) StartIPRotation(ctx context.Context, req *containerpb.StartIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.StartIPRotation[0:len(c.CallOptions.StartIPRotation):len(c.CallOptions.StartIPRotation)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.StartIPRotation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CompleteIPRotation completes master IP rotation. +func (c *ClusterManagerClient) CompleteIPRotation(ctx context.Context, req *containerpb.CompleteIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CompleteIPRotation[0:len(c.CallOptions.CompleteIPRotation):len(c.CallOptions.CompleteIPRotation)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.CompleteIPRotation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetNodePoolSize sets the size of a specific node pool. +func (c *ClusterManagerClient) SetNodePoolSize(ctx context.Context, req *containerpb.SetNodePoolSizeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetNodePoolSize[0:len(c.CallOptions.SetNodePoolSize):len(c.CallOptions.SetNodePoolSize)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetNodePoolSize(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetNetworkPolicy enables/Disables Network Policy for a cluster. +func (c *ClusterManagerClient) SetNetworkPolicy(ctx context.Context, req *containerpb.SetNetworkPolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetNetworkPolicy[0:len(c.CallOptions.SetNetworkPolicy):len(c.CallOptions.SetNetworkPolicy)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetNetworkPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetMaintenancePolicy sets the maintenance policy for a cluster. +func (c *ClusterManagerClient) SetMaintenancePolicy(ctx context.Context, req *containerpb.SetMaintenancePolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetMaintenancePolicy[0:len(c.CallOptions.SetMaintenancePolicy):len(c.CallOptions.SetMaintenancePolicy)], opts...) + var resp *containerpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.clusterManagerClient.SetMaintenancePolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client_example_test.go b/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client_example_test.go new file mode 100644 index 0000000..2640175 --- /dev/null +++ b/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client_example_test.go @@ -0,0 +1,571 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package container_test + +import ( + "cloud.google.com/go/container/apiv1" + "golang.org/x/net/context" + containerpb "google.golang.org/genproto/googleapis/container/v1" +) + +func ExampleNewClusterManagerClient() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClusterManagerClient_ListClusters() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.ListClustersRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListClusters(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_GetCluster() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.GetClusterRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_CreateCluster() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.CreateClusterRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_UpdateCluster() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.UpdateClusterRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_UpdateNodePool() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.UpdateNodePoolRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateNodePool(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetNodePoolAutoscaling() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetNodePoolAutoscalingRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetNodePoolAutoscaling(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetLoggingService() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetLoggingServiceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetLoggingService(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetMonitoringService() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetMonitoringServiceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetMonitoringService(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetAddonsConfig() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetAddonsConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetAddonsConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetLocations() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetLocationsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetLocations(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_UpdateMaster() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.UpdateMasterRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateMaster(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetMasterAuth() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetMasterAuthRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetMasterAuth(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_DeleteCluster() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.DeleteClusterRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeleteCluster(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_ListOperations() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.ListOperationsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListOperations(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_GetOperation() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.GetOperationRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetOperation(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_CancelOperation() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.CancelOperationRequest{ + // TODO: Fill request struct fields. + } + err = c.CancelOperation(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClusterManagerClient_GetServerConfig() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.GetServerConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetServerConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_ListNodePools() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.ListNodePoolsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListNodePools(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_GetNodePool() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.GetNodePoolRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetNodePool(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_CreateNodePool() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.CreateNodePoolRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateNodePool(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_DeleteNodePool() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.DeleteNodePoolRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeleteNodePool(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_RollbackNodePoolUpgrade() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.RollbackNodePoolUpgradeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.RollbackNodePoolUpgrade(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetNodePoolManagement() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetNodePoolManagementRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetNodePoolManagement(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetLabels() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetLabelsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetLabels(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetLegacyAbac() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetLegacyAbacRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetLegacyAbac(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_StartIPRotation() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.StartIPRotationRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.StartIPRotation(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_CompleteIPRotation() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.CompleteIPRotationRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CompleteIPRotation(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetNodePoolSize() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetNodePoolSizeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetNodePoolSize(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetNetworkPolicy() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetNetworkPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetNetworkPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClusterManagerClient_SetMaintenancePolicy() { + ctx := context.Background() + c, err := container.NewClusterManagerClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &containerpb.SetMaintenancePolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetMaintenancePolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/container/apiv1/doc.go b/vendor/cloud.google.com/go/container/apiv1/doc.go new file mode 100644 index 0000000..6d301f0 --- /dev/null +++ b/vendor/cloud.google.com/go/container/apiv1/doc.go @@ -0,0 +1,48 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package container is an auto-generated package for the +// Google Container Engine API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// The Google Kubernetes Engine API is used for building and managing +// container +// based applications, powered by the open source Kubernetes technology. +package container // import "cloud.google.com/go/container/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/container/apiv1/mock_test.go b/vendor/cloud.google.com/go/container/apiv1/mock_test.go new file mode 100644 index 0000000..aefcc33 --- /dev/null +++ b/vendor/cloud.google.com/go/container/apiv1/mock_test.go @@ -0,0 +1,2912 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package container + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + containerpb "google.golang.org/genproto/googleapis/container/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockClusterManagerServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + containerpb.ClusterManagerServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockClusterManagerServer) ListClusters(ctx context.Context, req *containerpb.ListClustersRequest) (*containerpb.ListClustersResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.ListClustersResponse), nil +} + +func (s *mockClusterManagerServer) GetCluster(ctx context.Context, req *containerpb.GetClusterRequest) (*containerpb.Cluster, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Cluster), nil +} + +func (s *mockClusterManagerServer) CreateCluster(ctx context.Context, req *containerpb.CreateClusterRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) UpdateCluster(ctx context.Context, req *containerpb.UpdateClusterRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) UpdateNodePool(ctx context.Context, req *containerpb.UpdateNodePoolRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetNodePoolAutoscaling(ctx context.Context, req *containerpb.SetNodePoolAutoscalingRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetLoggingService(ctx context.Context, req *containerpb.SetLoggingServiceRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetMonitoringService(ctx context.Context, req *containerpb.SetMonitoringServiceRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetAddonsConfig(ctx context.Context, req *containerpb.SetAddonsConfigRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetLocations(ctx context.Context, req *containerpb.SetLocationsRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) UpdateMaster(ctx context.Context, req *containerpb.UpdateMasterRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetMasterAuth(ctx context.Context, req *containerpb.SetMasterAuthRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) DeleteCluster(ctx context.Context, req *containerpb.DeleteClusterRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) ListOperations(ctx context.Context, req *containerpb.ListOperationsRequest) (*containerpb.ListOperationsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.ListOperationsResponse), nil +} + +func (s *mockClusterManagerServer) GetOperation(ctx context.Context, req *containerpb.GetOperationRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) CancelOperation(ctx context.Context, req *containerpb.CancelOperationRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockClusterManagerServer) GetServerConfig(ctx context.Context, req *containerpb.GetServerConfigRequest) (*containerpb.ServerConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.ServerConfig), nil +} + +func (s *mockClusterManagerServer) ListNodePools(ctx context.Context, req *containerpb.ListNodePoolsRequest) (*containerpb.ListNodePoolsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.ListNodePoolsResponse), nil +} + +func (s *mockClusterManagerServer) GetNodePool(ctx context.Context, req *containerpb.GetNodePoolRequest) (*containerpb.NodePool, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.NodePool), nil +} + +func (s *mockClusterManagerServer) CreateNodePool(ctx context.Context, req *containerpb.CreateNodePoolRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) DeleteNodePool(ctx context.Context, req *containerpb.DeleteNodePoolRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) RollbackNodePoolUpgrade(ctx context.Context, req *containerpb.RollbackNodePoolUpgradeRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetNodePoolManagement(ctx context.Context, req *containerpb.SetNodePoolManagementRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetLabels(ctx context.Context, req *containerpb.SetLabelsRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetLegacyAbac(ctx context.Context, req *containerpb.SetLegacyAbacRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) StartIPRotation(ctx context.Context, req *containerpb.StartIPRotationRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) CompleteIPRotation(ctx context.Context, req *containerpb.CompleteIPRotationRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetNodePoolSize(ctx context.Context, req *containerpb.SetNodePoolSizeRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetNetworkPolicy(ctx context.Context, req *containerpb.SetNetworkPolicyRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +func (s *mockClusterManagerServer) SetMaintenancePolicy(ctx context.Context, req *containerpb.SetMaintenancePolicyRequest) (*containerpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*containerpb.Operation), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockClusterManager mockClusterManagerServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + containerpb.RegisterClusterManagerServer(serv, &mockClusterManager) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestClusterManagerListClusters(t *testing.T) { + var expectedResponse *containerpb.ListClustersResponse = &containerpb.ListClustersResponse{} + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var request = &containerpb.ListClustersRequest{ + ProjectId: projectId, + Zone: zone, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListClusters(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerListClustersError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var request = &containerpb.ListClustersRequest{ + ProjectId: projectId, + Zone: zone, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListClusters(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerGetCluster(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var initialNodeCount int32 = 1682564205 + var loggingService string = "loggingService-1700501035" + var monitoringService string = "monitoringService1469270462" + var network string = "network1843485230" + var clusterIpv4Cidr string = "clusterIpv4Cidr-141875831" + var subnetwork string = "subnetwork-1302785042" + var enableKubernetesAlpha bool = false + var labelFingerprint string = "labelFingerprint714995737" + var selfLink string = "selfLink-1691268851" + var zone2 string = "zone2-696322977" + var endpoint string = "endpoint1741102485" + var initialClusterVersion string = "initialClusterVersion-276373352" + var currentMasterVersion string = "currentMasterVersion-920953983" + var currentNodeVersion string = "currentNodeVersion-407476063" + var createTime string = "createTime-493574096" + var statusMessage string = "statusMessage-239442758" + var nodeIpv4CidrSize int32 = 1181176815 + var servicesIpv4Cidr string = "servicesIpv4Cidr1966438125" + var currentNodeCount int32 = 178977560 + var expireTime string = "expireTime-96179731" + var expectedResponse = &containerpb.Cluster{ + Name: name, + Description: description, + InitialNodeCount: initialNodeCount, + LoggingService: loggingService, + MonitoringService: monitoringService, + Network: network, + ClusterIpv4Cidr: clusterIpv4Cidr, + Subnetwork: subnetwork, + EnableKubernetesAlpha: enableKubernetesAlpha, + LabelFingerprint: labelFingerprint, + SelfLink: selfLink, + Zone: zone2, + Endpoint: endpoint, + InitialClusterVersion: initialClusterVersion, + CurrentMasterVersion: currentMasterVersion, + CurrentNodeVersion: currentNodeVersion, + CreateTime: createTime, + StatusMessage: statusMessage, + NodeIpv4CidrSize: nodeIpv4CidrSize, + ServicesIpv4Cidr: servicesIpv4Cidr, + CurrentNodeCount: currentNodeCount, + ExpireTime: expireTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.GetClusterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetCluster(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerGetClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.GetClusterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetCluster(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerCreateCluster(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var cluster *containerpb.Cluster = &containerpb.Cluster{} + var request = &containerpb.CreateClusterRequest{ + ProjectId: projectId, + Zone: zone, + Cluster: cluster, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateCluster(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerCreateClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var cluster *containerpb.Cluster = &containerpb.Cluster{} + var request = &containerpb.CreateClusterRequest{ + ProjectId: projectId, + Zone: zone, + Cluster: cluster, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateCluster(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerUpdateCluster(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var update *containerpb.ClusterUpdate = &containerpb.ClusterUpdate{} + var request = &containerpb.UpdateClusterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Update: update, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateCluster(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerUpdateClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var update *containerpb.ClusterUpdate = &containerpb.ClusterUpdate{} + var request = &containerpb.UpdateClusterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Update: update, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateCluster(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerUpdateNodePool(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var nodeVersion string = "nodeVersion1790136219" + var imageType string = "imageType-1442758754" + var request = &containerpb.UpdateNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + NodeVersion: nodeVersion, + ImageType: imageType, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateNodePool(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerUpdateNodePoolError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var nodeVersion string = "nodeVersion1790136219" + var imageType string = "imageType-1442758754" + var request = &containerpb.UpdateNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + NodeVersion: nodeVersion, + ImageType: imageType, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateNodePool(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetNodePoolAutoscaling(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var autoscaling *containerpb.NodePoolAutoscaling = &containerpb.NodePoolAutoscaling{} + var request = &containerpb.SetNodePoolAutoscalingRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + Autoscaling: autoscaling, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNodePoolAutoscaling(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetNodePoolAutoscalingError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var autoscaling *containerpb.NodePoolAutoscaling = &containerpb.NodePoolAutoscaling{} + var request = &containerpb.SetNodePoolAutoscalingRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + Autoscaling: autoscaling, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNodePoolAutoscaling(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetLoggingService(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var loggingService string = "loggingService-1700501035" + var request = &containerpb.SetLoggingServiceRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + LoggingService: loggingService, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLoggingService(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetLoggingServiceError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var loggingService string = "loggingService-1700501035" + var request = &containerpb.SetLoggingServiceRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + LoggingService: loggingService, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLoggingService(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetMonitoringService(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var monitoringService string = "monitoringService1469270462" + var request = &containerpb.SetMonitoringServiceRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + MonitoringService: monitoringService, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetMonitoringService(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetMonitoringServiceError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var monitoringService string = "monitoringService1469270462" + var request = &containerpb.SetMonitoringServiceRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + MonitoringService: monitoringService, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetMonitoringService(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetAddonsConfig(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var addonsConfig *containerpb.AddonsConfig = &containerpb.AddonsConfig{} + var request = &containerpb.SetAddonsConfigRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + AddonsConfig: addonsConfig, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetAddonsConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetAddonsConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var addonsConfig *containerpb.AddonsConfig = &containerpb.AddonsConfig{} + var request = &containerpb.SetAddonsConfigRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + AddonsConfig: addonsConfig, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetAddonsConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetLocations(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var locations []string = nil + var request = &containerpb.SetLocationsRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Locations: locations, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLocations(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetLocationsError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var locations []string = nil + var request = &containerpb.SetLocationsRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Locations: locations, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLocations(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerUpdateMaster(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var masterVersion string = "masterVersion-2139460613" + var request = &containerpb.UpdateMasterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + MasterVersion: masterVersion, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateMaster(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerUpdateMasterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var masterVersion string = "masterVersion-2139460613" + var request = &containerpb.UpdateMasterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + MasterVersion: masterVersion, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateMaster(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetMasterAuth(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var action containerpb.SetMasterAuthRequest_Action = containerpb.SetMasterAuthRequest_UNKNOWN + var update *containerpb.MasterAuth = &containerpb.MasterAuth{} + var request = &containerpb.SetMasterAuthRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Action: action, + Update: update, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetMasterAuth(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetMasterAuthError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var action containerpb.SetMasterAuthRequest_Action = containerpb.SetMasterAuthRequest_UNKNOWN + var update *containerpb.MasterAuth = &containerpb.MasterAuth{} + var request = &containerpb.SetMasterAuthRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Action: action, + Update: update, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetMasterAuth(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerDeleteCluster(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.DeleteClusterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteCluster(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerDeleteClusterError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.DeleteClusterRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteCluster(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerListOperations(t *testing.T) { + var expectedResponse *containerpb.ListOperationsResponse = &containerpb.ListOperationsResponse{} + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var request = &containerpb.ListOperationsRequest{ + ProjectId: projectId, + Zone: zone, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListOperations(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerListOperationsError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var request = &containerpb.ListOperationsRequest{ + ProjectId: projectId, + Zone: zone, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListOperations(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerGetOperation(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var operationId string = "operationId-274116877" + var request = &containerpb.GetOperationRequest{ + ProjectId: projectId, + Zone: zone, + OperationId: operationId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetOperation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerGetOperationError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var operationId string = "operationId-274116877" + var request = &containerpb.GetOperationRequest{ + ProjectId: projectId, + Zone: zone, + OperationId: operationId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetOperation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerCancelOperation(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var operationId string = "operationId-274116877" + var request = &containerpb.CancelOperationRequest{ + ProjectId: projectId, + Zone: zone, + OperationId: operationId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelOperation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestClusterManagerCancelOperationError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var operationId string = "operationId-274116877" + var request = &containerpb.CancelOperationRequest{ + ProjectId: projectId, + Zone: zone, + OperationId: operationId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelOperation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestClusterManagerGetServerConfig(t *testing.T) { + var defaultClusterVersion string = "defaultClusterVersion111003029" + var defaultImageType string = "defaultImageType-918225828" + var expectedResponse = &containerpb.ServerConfig{ + DefaultClusterVersion: defaultClusterVersion, + DefaultImageType: defaultImageType, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var request = &containerpb.GetServerConfigRequest{ + ProjectId: projectId, + Zone: zone, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServerConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerGetServerConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var request = &containerpb.GetServerConfigRequest{ + ProjectId: projectId, + Zone: zone, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServerConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerListNodePools(t *testing.T) { + var expectedResponse *containerpb.ListNodePoolsResponse = &containerpb.ListNodePoolsResponse{} + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.ListNodePoolsRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListNodePools(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerListNodePoolsError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.ListNodePoolsRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListNodePools(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerGetNodePool(t *testing.T) { + var name string = "name3373707" + var initialNodeCount int32 = 1682564205 + var selfLink string = "selfLink-1691268851" + var version string = "version351608024" + var statusMessage string = "statusMessage-239442758" + var expectedResponse = &containerpb.NodePool{ + Name: name, + InitialNodeCount: initialNodeCount, + SelfLink: selfLink, + Version: version, + StatusMessage: statusMessage, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var request = &containerpb.GetNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetNodePool(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerGetNodePoolError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var request = &containerpb.GetNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetNodePool(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerCreateNodePool(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePool *containerpb.NodePool = &containerpb.NodePool{} + var request = &containerpb.CreateNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePool: nodePool, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateNodePool(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerCreateNodePoolError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePool *containerpb.NodePool = &containerpb.NodePool{} + var request = &containerpb.CreateNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePool: nodePool, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateNodePool(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerDeleteNodePool(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var request = &containerpb.DeleteNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteNodePool(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerDeleteNodePoolError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var request = &containerpb.DeleteNodePoolRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteNodePool(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerRollbackNodePoolUpgrade(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var request = &containerpb.RollbackNodePoolUpgradeRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RollbackNodePoolUpgrade(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerRollbackNodePoolUpgradeError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var request = &containerpb.RollbackNodePoolUpgradeRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RollbackNodePoolUpgrade(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetNodePoolManagement(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var management *containerpb.NodeManagement = &containerpb.NodeManagement{} + var request = &containerpb.SetNodePoolManagementRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + Management: management, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNodePoolManagement(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetNodePoolManagementError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var management *containerpb.NodeManagement = &containerpb.NodeManagement{} + var request = &containerpb.SetNodePoolManagementRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + Management: management, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNodePoolManagement(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetLabels(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var resourceLabels map[string]string = nil + var labelFingerprint string = "labelFingerprint714995737" + var request = &containerpb.SetLabelsRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + ResourceLabels: resourceLabels, + LabelFingerprint: labelFingerprint, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLabels(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetLabelsError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var resourceLabels map[string]string = nil + var labelFingerprint string = "labelFingerprint714995737" + var request = &containerpb.SetLabelsRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + ResourceLabels: resourceLabels, + LabelFingerprint: labelFingerprint, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLabels(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetLegacyAbac(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var enabled bool = false + var request = &containerpb.SetLegacyAbacRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Enabled: enabled, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLegacyAbac(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetLegacyAbacError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var enabled bool = false + var request = &containerpb.SetLegacyAbacRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + Enabled: enabled, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetLegacyAbac(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerStartIPRotation(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.StartIPRotationRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.StartIPRotation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerStartIPRotationError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.StartIPRotationRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.StartIPRotation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerCompleteIPRotation(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.CompleteIPRotationRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CompleteIPRotation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerCompleteIPRotationError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var request = &containerpb.CompleteIPRotationRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CompleteIPRotation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetNodePoolSize(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var nodeCount int32 = 1539922066 + var request = &containerpb.SetNodePoolSizeRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + NodeCount: nodeCount, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNodePoolSize(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetNodePoolSizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var nodePoolId string = "nodePoolId1043384033" + var nodeCount int32 = 1539922066 + var request = &containerpb.SetNodePoolSizeRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NodePoolId: nodePoolId, + NodeCount: nodeCount, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNodePoolSize(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetNetworkPolicy(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var networkPolicy *containerpb.NetworkPolicy = &containerpb.NetworkPolicy{} + var request = &containerpb.SetNetworkPolicyRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NetworkPolicy: networkPolicy, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNetworkPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetNetworkPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var networkPolicy *containerpb.NetworkPolicy = &containerpb.NetworkPolicy{} + var request = &containerpb.SetNetworkPolicyRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + NetworkPolicy: networkPolicy, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetNetworkPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestClusterManagerSetMaintenancePolicy(t *testing.T) { + var name string = "name3373707" + var zone2 string = "zone2-696322977" + var detail string = "detail-1335224239" + var statusMessage string = "statusMessage-239442758" + var selfLink string = "selfLink-1691268851" + var targetLink string = "targetLink-2084812312" + var startTime string = "startTime-1573145462" + var endTime string = "endTime1725551537" + var expectedResponse = &containerpb.Operation{ + Name: name, + Zone: zone2, + Detail: detail, + StatusMessage: statusMessage, + SelfLink: selfLink, + TargetLink: targetLink, + StartTime: startTime, + EndTime: endTime, + } + + mockClusterManager.err = nil + mockClusterManager.reqs = nil + + mockClusterManager.resps = append(mockClusterManager.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var maintenancePolicy *containerpb.MaintenancePolicy = &containerpb.MaintenancePolicy{} + var request = &containerpb.SetMaintenancePolicyRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + MaintenancePolicy: maintenancePolicy, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetMaintenancePolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockClusterManager.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestClusterManagerSetMaintenancePolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockClusterManager.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var zone string = "zone3744684" + var clusterId string = "clusterId240280960" + var maintenancePolicy *containerpb.MaintenancePolicy = &containerpb.MaintenancePolicy{} + var request = &containerpb.SetMaintenancePolicyRequest{ + ProjectId: projectId, + Zone: zone, + ClusterId: clusterId, + MaintenancePolicy: maintenancePolicy, + } + + c, err := NewClusterManagerClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetMaintenancePolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/container/container.go b/vendor/cloud.google.com/go/container/container.go new file mode 100644 index 0000000..9d4074e --- /dev/null +++ b/vendor/cloud.google.com/go/container/container.go @@ -0,0 +1,272 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package container contains a deprecated Google Container Engine client. +// +// Deprecated: Use google.golang.org/api/container instead. +package container // import "cloud.google.com/go/container" + +import ( + "errors" + "fmt" + "time" + + "golang.org/x/net/context" + raw "google.golang.org/api/container/v1" + "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" +) + +type Type string + +const ( + TypeCreate = Type("createCluster") + TypeDelete = Type("deleteCluster") +) + +type Status string + +const ( + StatusDone = Status("done") + StatusPending = Status("pending") + StatusRunning = Status("running") + StatusError = Status("error") + StatusProvisioning = Status("provisioning") + StatusStopping = Status("stopping") +) + +const prodAddr = "https://container.googleapis.com/" +const userAgent = "gcloud-golang-container/20151008" + +// Client is a Google Container Engine client, which may be used to manage +// clusters with a project. It must be constructed via NewClient. +type Client struct { + projectID string + svc *raw.Service +} + +// NewClient creates a new Google Container Engine client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(raw.CloudPlatformScope), + option.WithUserAgent(userAgent), + } + o = append(o, opts...) + httpClient, endpoint, err := htransport.NewClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + + svc, err := raw.New(httpClient) + if err != nil { + return nil, fmt.Errorf("constructing container client: %v", err) + } + svc.BasePath = endpoint + + c := &Client{ + projectID: projectID, + svc: svc, + } + + return c, nil +} + +// Resource is a Google Container Engine cluster resource. +type Resource struct { + // Name is the name of this cluster. The name must be unique + // within this project and zone, and can be up to 40 characters. + Name string + + // Description is the description of the cluster. Optional. + Description string + + // Zone is the Google Compute Engine zone in which the cluster resides. + Zone string + + // Status is the current status of the cluster. It could either be + // StatusError, StatusProvisioning, StatusRunning or StatusStopping. + Status Status + + // Num is the number of the nodes in this cluster resource. + Num int64 + + // APIVersion is the version of the Kubernetes master and kubelets running + // in this cluster. Allowed value is 0.4.2, or leave blank to + // pick up the latest stable release. + APIVersion string + + // Endpoint is the IP address of this cluster's Kubernetes master. + // The endpoint can be accessed at https://username:password@endpoint/. + // See Username and Password fields for the username and password information. + Endpoint string + + // Username is the username to use when accessing the Kubernetes master endpoint. + Username string + + // Password is the password to use when accessing the Kubernetes master endpoint. + Password string + + // ContainerIPv4CIDR is the IP addresses of the container pods in + // this cluster, in CIDR notation (e.g. 1.2.3.4/29). + ContainerIPv4CIDR string + + // ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this + // cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are + // always in the 10.0.0.0/16 range. + ServicesIPv4CIDR string + + // MachineType is a Google Compute Engine machine type (e.g. n1-standard-1). + // If none set, the default type is used while creating a new cluster. + MachineType string + + // This field is ignored. It was removed from the underlying container API in v1. + SourceImage string + + // Created is the creation time of this cluster. + Created time.Time +} + +func resourceFromRaw(c *raw.Cluster) *Resource { + if c == nil { + return nil + } + r := &Resource{ + Name: c.Name, + Description: c.Description, + Zone: c.Zone, + Status: Status(c.Status), + Num: c.CurrentNodeCount, + APIVersion: c.InitialClusterVersion, + Endpoint: c.Endpoint, + Username: c.MasterAuth.Username, + Password: c.MasterAuth.Password, + ContainerIPv4CIDR: c.ClusterIpv4Cidr, + ServicesIPv4CIDR: c.ServicesIpv4Cidr, + MachineType: c.NodeConfig.MachineType, + } + r.Created, _ = time.Parse(time.RFC3339, c.CreateTime) + return r +} + +func resourcesFromRaw(c []*raw.Cluster) []*Resource { + r := make([]*Resource, len(c)) + for i, val := range c { + r[i] = resourceFromRaw(val) + } + return r +} + +// Op represents a Google Container Engine API operation. +type Op struct { + // Name is the name of the operation. + Name string + + // Zone is the Google Compute Engine zone. + Zone string + + // This field is ignored. It was removed from the underlying container API in v1. + TargetURL string + + // Type is the operation type. It could be either be TypeCreate or TypeDelete. + Type Type + + // Status is the current status of this operation. It could be either + // OpDone or OpPending. + Status Status +} + +func opFromRaw(o *raw.Operation) *Op { + if o == nil { + return nil + } + return &Op{ + Name: o.Name, + Zone: o.Zone, + Type: Type(o.OperationType), + Status: Status(o.Status), + } +} + +func opsFromRaw(o []*raw.Operation) []*Op { + ops := make([]*Op, len(o)) + for i, val := range o { + ops[i] = opFromRaw(val) + } + return ops +} + +// Clusters returns a list of cluster resources from the specified zone. +// If no zone is specified, it returns all clusters under the user project. +func (c *Client) Clusters(ctx context.Context, zone string) ([]*Resource, error) { + if zone == "" { + zone = "-" + } + resp, err := c.svc.Projects.Zones.Clusters.List(c.projectID, zone).Do() + if err != nil { + return nil, err + } + return resourcesFromRaw(resp.Clusters), nil +} + +// Cluster returns metadata about the specified cluster. +func (c *Client) Cluster(ctx context.Context, zone, name string) (*Resource, error) { + resp, err := c.svc.Projects.Zones.Clusters.Get(c.projectID, zone, name).Do() + if err != nil { + return nil, err + } + return resourceFromRaw(resp), nil +} + +// CreateCluster creates a new cluster with the provided metadata +// in the specified zone. +func (c *Client) CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) { + panic("not implemented") +} + +// DeleteCluster deletes a cluster. +func (c *Client) DeleteCluster(ctx context.Context, zone, name string) error { + _, err := c.svc.Projects.Zones.Clusters.Delete(c.projectID, zone, name).Do() + return err +} + +// Operations returns a list of operations from the specified zone. +// If no zone is specified, it looks up for all of the operations +// that are running under the user's project. +func (c *Client) Operations(ctx context.Context, zone string) ([]*Op, error) { + if zone == "" { + resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, "-").Do() + if err != nil { + return nil, err + } + return opsFromRaw(resp.Operations), nil + } + resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, zone).Do() + if err != nil { + return nil, err + } + return opsFromRaw(resp.Operations), nil +} + +// Operation returns an operation. +func (c *Client) Operation(ctx context.Context, zone, name string) (*Op, error) { + resp, err := c.svc.Projects.Zones.Operations.Get(c.projectID, zone, name).Do() + if err != nil { + return nil, err + } + if resp.StatusMessage != "" { + return nil, errors.New(resp.StatusMessage) + } + return opFromRaw(resp), nil +} diff --git a/vendor/cloud.google.com/go/datastore/client.go b/vendor/cloud.google.com/go/datastore/client.go new file mode 100644 index 0000000..940bfec --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/client.go @@ -0,0 +1,118 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + + gax "github.com/googleapis/gax-go" + + "cloud.google.com/go/internal" + "cloud.google.com/go/internal/version" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC +// metadata to be sent in each request for server-side traffic management. +type datastoreClient struct { + // Embed so we still implement the DatastoreClient interface, + // if the interface adds more methods. + pb.DatastoreClient + + c pb.DatastoreClient + md metadata.MD +} + +func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient { + return &datastoreClient{ + c: pb.NewDatastoreClient(conn), + md: metadata.Pairs( + resourcePrefixHeader, "projects/"+projectID, + "x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)), + } +} + +func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (res *pb.LookupResponse, err error) { + err = dc.invoke(ctx, func(ctx context.Context) error { + res, err = dc.c.Lookup(ctx, in, opts...) + return err + }) + return res, err +} + +func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (res *pb.RunQueryResponse, err error) { + err = dc.invoke(ctx, func(ctx context.Context) error { + res, err = dc.c.RunQuery(ctx, in, opts...) + return err + }) + return res, err +} + +func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (res *pb.BeginTransactionResponse, err error) { + err = dc.invoke(ctx, func(ctx context.Context) error { + res, err = dc.c.BeginTransaction(ctx, in, opts...) + return err + }) + return res, err +} + +func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (res *pb.CommitResponse, err error) { + err = dc.invoke(ctx, func(ctx context.Context) error { + res, err = dc.c.Commit(ctx, in, opts...) + return err + }) + return res, err +} + +func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (res *pb.RollbackResponse, err error) { + err = dc.invoke(ctx, func(ctx context.Context) error { + res, err = dc.c.Rollback(ctx, in, opts...) + return err + }) + return res, err +} + +func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (res *pb.AllocateIdsResponse, err error) { + err = dc.invoke(ctx, func(ctx context.Context) error { + res, err = dc.c.AllocateIds(ctx, in, opts...) + return err + }) + return res, err +} + +func (dc *datastoreClient) invoke(ctx context.Context, f func(ctx context.Context) error) error { + ctx = metadata.NewOutgoingContext(ctx, dc.md) + return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + err = f(ctx) + return !shouldRetry(err), err + }) +} + +func shouldRetry(err error) bool { + if err == nil { + return false + } + s, ok := status.FromError(err) + if !ok { + return false + } + // See https://cloud.google.com/datastore/docs/concepts/errors. + return s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded +} diff --git a/vendor/cloud.google.com/go/datastore/datastore.go b/vendor/cloud.google.com/go/datastore/datastore.go new file mode 100644 index 0000000..b35db87 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/datastore.go @@ -0,0 +1,574 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "log" + "os" + "reflect" + + "golang.org/x/net/context" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" +) + +const ( + prodAddr = "datastore.googleapis.com:443" + userAgent = "gcloud-golang-datastore/20160401" +) + +// ScopeDatastore grants permissions to view and/or manage datastore entities +const ScopeDatastore = "https://www.googleapis.com/auth/datastore" + +// resourcePrefixHeader is the name of the metadata header used to indicate +// the resource being operated on. +const resourcePrefixHeader = "google-cloud-resource-prefix" + +// Client is a client for reading and writing data in a datastore dataset. +type Client struct { + conn *grpc.ClientConn + client pb.DatastoreClient + endpoint string + dataset string // Called dataset by the datastore API, synonym for project ID. +} + +// NewClient creates a new Client for a given dataset. +// If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable. +// If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value +// to connect to a locally-running datastore emulator. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + var o []option.ClientOption + // Environment variables for gcd emulator: + // https://cloud.google.com/datastore/docs/tools/datastore-emulator + // If the emulator is available, dial it directly (and don't pass any credentials). + if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("grpc.Dial: %v", err) + } + o = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + o = []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(ScopeDatastore), + option.WithUserAgent(userAgent), + } + } + // Warn if we see the legacy emulator environment variables. + if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" { + log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.") + } + if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" { + log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.") + } + if projectID == "" { + projectID = os.Getenv("DATASTORE_PROJECT_ID") + } + if projectID == "" { + return nil, errors.New("datastore: missing project/dataset id") + } + o = append(o, opts...) + conn, err := gtransport.Dial(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &Client{ + conn: conn, + client: newDatastoreClient(conn, projectID), + dataset: projectID, + }, nil + +} + +var ( + // ErrInvalidEntityType is returned when functions like Get or Next are + // passed a dst or src argument of invalid type. + ErrInvalidEntityType = errors.New("datastore: invalid entity type") + // ErrInvalidKey is returned when an invalid key is presented. + ErrInvalidKey = errors.New("datastore: invalid key") + // ErrNoSuchEntity is returned when no entity was found for a given key. + ErrNoSuchEntity = errors.New("datastore: no such entity") +) + +type multiArgType int + +const ( + multiArgTypeInvalid multiArgType = iota + multiArgTypePropertyLoadSaver + multiArgTypeStruct + multiArgTypeStructPtr + multiArgTypeInterface +) + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. +// StructType is the type of the struct pointed to by the destination argument +// passed to Get or to Iterator.Next. +type ErrFieldMismatch struct { + StructType reflect.Type + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", + e.FieldName, e.StructType, e.Reason) +} + +// GeoPoint represents a location as latitude/longitude in degrees. +type GeoPoint struct { + Lat, Lng float64 +} + +// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. +func (g GeoPoint) Valid() bool { + return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 +} + +func keyToProto(k *Key) *pb.Key { + if k == nil { + return nil + } + + var path []*pb.Key_PathElement + for { + el := &pb.Key_PathElement{Kind: k.Kind} + if k.ID != 0 { + el.IdType = &pb.Key_PathElement_Id{Id: k.ID} + } else if k.Name != "" { + el.IdType = &pb.Key_PathElement_Name{Name: k.Name} + } + path = append(path, el) + if k.Parent == nil { + break + } + k = k.Parent + } + + // The path should be in order [grandparent, parent, child] + // We did it backward above, so reverse back. + for i := 0; i < len(path)/2; i++ { + path[i], path[len(path)-i-1] = path[len(path)-i-1], path[i] + } + + key := &pb.Key{Path: path} + if k.Namespace != "" { + key.PartitionId = &pb.PartitionId{ + NamespaceId: k.Namespace, + } + } + return key +} + +// protoToKey decodes a protocol buffer representation of a key into an +// equivalent *Key object. If the key is invalid, protoToKey will return the +// invalid key along with ErrInvalidKey. +func protoToKey(p *pb.Key) (*Key, error) { + var key *Key + var namespace string + if partition := p.PartitionId; partition != nil { + namespace = partition.NamespaceId + } + for _, el := range p.Path { + key = &Key{ + Namespace: namespace, + Kind: el.Kind, + ID: el.GetId(), + Name: el.GetName(), + Parent: key, + } + } + if !key.valid() { // Also detects key == nil. + return key, ErrInvalidKey + } + return key, nil +} + +// multiKeyToProto is a batch version of keyToProto. +func multiKeyToProto(keys []*Key) []*pb.Key { + ret := make([]*pb.Key, len(keys)) + for i, k := range keys { + ret[i] = keyToProto(k) + } + return ret +} + +// multiKeyToProto is a batch version of keyToProto. +func multiProtoToKey(keys []*pb.Key) ([]*Key, error) { + hasErr := false + ret := make([]*Key, len(keys)) + err := make(MultiError, len(keys)) + for i, k := range keys { + ret[i], err[i] = protoToKey(k) + if err[i] != nil { + hasErr = true + } + } + if hasErr { + return nil, err + } + return ret, nil +} + +// multiValid is a batch version of Key.valid. It returns an error, not a +// []bool. +func multiValid(key []*Key) error { + invalid := false + for _, k := range key { + if !k.valid() { + invalid = true + break + } + } + if !invalid { + return nil + } + err := make(MultiError, len(key)) + for i, k := range key { + if !k.valid() { + err[i] = ErrInvalidKey + } + } + return err +} + +// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct +// type S, for some interface type I, or some non-interface non-pointer type P +// such that P or *P implements PropertyLoadSaver. +// +// It returns what category the slice's elements are, and the reflect.Type +// that represents S, I or P. +// +// As a special case, PropertyList is an invalid type for v. +// +// TODO(djd): multiArg is very confusing. Fold this logic into the +// relevant Put/Get methods to make the logic less opaque. +func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { + if v.Kind() != reflect.Slice { + return multiArgTypeInvalid, nil + } + if v.Type() == typeOfPropertyList { + return multiArgTypeInvalid, nil + } + elemType = v.Type().Elem() + if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { + return multiArgTypePropertyLoadSaver, elemType + } + switch elemType.Kind() { + case reflect.Struct: + return multiArgTypeStruct, elemType + case reflect.Interface: + return multiArgTypeInterface, elemType + case reflect.Ptr: + elemType = elemType.Elem() + if elemType.Kind() == reflect.Struct { + return multiArgTypeStructPtr, elemType + } + } + return multiArgTypeInvalid, nil +} + +// Close closes the Client. +func (c *Client) Close() error { + return c.conn.Close() +} + +// Get loads the entity stored for key into dst, which must be a struct pointer +// or implement PropertyLoadSaver. If there is no such entity for the key, Get +// returns ErrNoSuchEntity. +// +// The values of dst's unmatched struct fields are not modified, and matching +// slice-typed fields are not reset before appending to them. In particular, it +// is recommended to pass a pointer to a zero valued struct on each Get call. +// +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. ErrFieldMismatch is only returned if +// dst is a struct pointer. +func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error { + if dst == nil { // get catches nil interfaces; we need to catch nil ptr here + return ErrInvalidEntityType + } + err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// GetMulti is a batch version of Get. +// +// dst must be a []S, []*S, []I or []P, for some struct type S, some interface +// type I, or some non-interface non-pointer type P such that P or *P +// implements PropertyLoadSaver. If an []I, each element must be a valid dst +// for Get: it must be a struct pointer or implement PropertyLoadSaver. +// +// As a special case, PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when []PropertyList was intended. +func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error { + return c.get(ctx, keys, dst, nil) +} + +func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error { + v := reflect.ValueOf(dst) + multiArgType, _ := checkMultiArg(v) + + // Sanity checks + if multiArgType == multiArgTypeInvalid { + return errors.New("datastore: dst has invalid type") + } + if len(keys) != v.Len() { + return errors.New("datastore: keys and dst slices have different length") + } + if len(keys) == 0 { + return nil + } + + // Go through keys, validate them, serialize then, and create a dict mapping them to their indices. + // Equal keys are deduped. + multiErr, any := make(MultiError, len(keys)), false + keyMap := make(map[string][]int, len(keys)) + pbKeys := make([]*pb.Key, 0, len(keys)) + for i, k := range keys { + if !k.valid() { + multiErr[i] = ErrInvalidKey + any = true + } else { + ks := k.String() + if _, ok := keyMap[ks]; !ok { + pbKeys = append(pbKeys, keyToProto(k)) + } + keyMap[ks] = append(keyMap[ks], i) + } + } + if any { + return multiErr + } + req := &pb.LookupRequest{ + ProjectId: c.dataset, + Keys: pbKeys, + ReadOptions: opts, + } + resp, err := c.client.Lookup(ctx, req) + if err != nil { + return err + } + found := resp.Found + missing := resp.Missing + // Upper bound 100 iterations to prevent infinite loop. + // We choose 100 iterations somewhat logically: + // Max number of Entities you can request from Datastore is 1,000. + // Max size for a Datastore Entity is 1 MiB. + // Max request size is 10 MiB, so we assume max response size is also 10 MiB. + // 1,000 / 10 = 100. + // Note that if ctx has a deadline, the deadline will probably + // be hit before we reach 100 iterations. + for i := 0; len(resp.Deferred) > 0 && i < 100; i++ { + req.Keys = resp.Deferred + resp, err = c.client.Lookup(ctx, req) + if err != nil { + return err + } + found = append(found, resp.Found...) + missing = append(missing, resp.Missing...) + } + + filled := 0 + for _, e := range found { + k, err := protoToKey(e.Entity.Key) + if err != nil { + return errors.New("datastore: internal error: server returned an invalid key") + } + filled += len(keyMap[k.String()]) + for _, index := range keyMap[k.String()] { + elem := v.Index(index) + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + if multiArgType == multiArgTypeStructPtr && elem.IsNil() { + elem.Set(reflect.New(elem.Type().Elem())) + } + if err := loadEntityProto(elem.Interface(), e.Entity); err != nil { + multiErr[index] = err + any = true + } + } + } + for _, e := range missing { + k, err := protoToKey(e.Entity.Key) + if err != nil { + return errors.New("datastore: internal error: server returned an invalid key") + } + filled += len(keyMap[k.String()]) + for _, index := range keyMap[k.String()] { + multiErr[index] = ErrNoSuchEntity + } + any = true + } + + if filled != len(keys) { + return errors.New("datastore: internal error: server returned the wrong number of entities") + } + + if any { + return multiErr + } + return nil +} + +// Put saves the entity src into the datastore with key k. src must be a struct +// pointer or implement PropertyLoadSaver; if a struct pointer then any +// unexported fields of that struct will be skipped. If k is an incomplete key, +// the returned key will be a unique key generated by the datastore. +func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) { + k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src}) + if err != nil { + if me, ok := err.(MultiError); ok { + return nil, me[0] + } + return nil, err + } + return k[0], nil +} + +// PutMulti is a batch version of Put. +// +// src must satisfy the same conditions as the dst argument to GetMulti. +func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) { + mutations, err := putMutations(keys, src) + if err != nil { + return nil, err + } + + // Make the request. + req := &pb.CommitRequest{ + ProjectId: c.dataset, + Mutations: mutations, + Mode: pb.CommitRequest_NON_TRANSACTIONAL, + } + resp, err := c.client.Commit(ctx, req) + if err != nil { + return nil, err + } + + // Copy any newly minted keys into the returned keys. + ret := make([]*Key, len(keys)) + for i, key := range keys { + if key.Incomplete() { + // This key is in the mutation results. + ret[i], err = protoToKey(resp.MutationResults[i].Key) + if err != nil { + return nil, errors.New("datastore: internal error: server returned an invalid key") + } + } else { + ret[i] = key + } + } + return ret, nil +} + +func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) { + v := reflect.ValueOf(src) + multiArgType, _ := checkMultiArg(v) + if multiArgType == multiArgTypeInvalid { + return nil, errors.New("datastore: src has invalid type") + } + if len(keys) != v.Len() { + return nil, errors.New("datastore: key and src slices have different length") + } + if len(keys) == 0 { + return nil, nil + } + if err := multiValid(keys); err != nil { + return nil, err + } + mutations := make([]*pb.Mutation, 0, len(keys)) + multiErr := make(MultiError, len(keys)) + hasErr := false + for i, k := range keys { + elem := v.Index(i) + // Two cases where we need to take the address: + // 1) multiArgTypePropertyLoadSaver => &elem implements PLS + // 2) multiArgTypeStruct => saveEntity needs *struct + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + p, err := saveEntity(k, elem.Interface()) + if err != nil { + multiErr[i] = err + hasErr = true + } + var mut *pb.Mutation + if k.Incomplete() { + mut = &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}} + } else { + mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}} + } + mutations = append(mutations, mut) + } + if hasErr { + return nil, multiErr + } + return mutations, nil +} + +// Delete deletes the entity for the given key. +func (c *Client) Delete(ctx context.Context, key *Key) error { + err := c.DeleteMulti(ctx, []*Key{key}) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti is a batch version of Delete. +func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error { + mutations, err := deleteMutations(keys) + if err != nil { + return err + } + + req := &pb.CommitRequest{ + ProjectId: c.dataset, + Mutations: mutations, + Mode: pb.CommitRequest_NON_TRANSACTIONAL, + } + _, err = c.client.Commit(ctx, req) + return err +} + +func deleteMutations(keys []*Key) ([]*pb.Mutation, error) { + mutations := make([]*pb.Mutation, 0, len(keys)) + set := make(map[string]bool, len(keys)) + for _, k := range keys { + if k.Incomplete() { + return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k) + } + ks := k.String() + if !set[ks] { + mutations = append(mutations, &pb.Mutation{ + Operation: &pb.Mutation_Delete{Delete: keyToProto(k)}, + }) + } + set[ks] = true + } + return mutations, nil +} diff --git a/vendor/cloud.google.com/go/datastore/datastore_test.go b/vendor/cloud.google.com/go/datastore/datastore_test.go new file mode 100644 index 0000000..a3b7a6e --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/datastore_test.go @@ -0,0 +1,3455 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strings" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" +) + +type ( + myBlob []byte + myByte byte + myString string +) + +func makeMyByteSlice(n int) []myByte { + b := make([]myByte, n) + for i := range b { + b[i] = myByte(i) + } + return b +} + +func makeInt8Slice(n int) []int8 { + b := make([]int8, n) + for i := range b { + b[i] = int8(i) + } + return b +} + +func makeUint8Slice(n int) []uint8 { + b := make([]uint8, n) + for i := range b { + b[i] = uint8(i) + } + return b +} + +func newKey(stringID string, parent *Key) *Key { + return NameKey("kind", stringID, parent) +} + +var ( + testKey0 = newKey("name0", nil) + testKey1a = newKey("name1", nil) + testKey1b = newKey("name1", nil) + testKey2a = newKey("name2", testKey0) + testKey2b = newKey("name2", testKey0) + testGeoPt0 = GeoPoint{Lat: 1.2, Lng: 3.4} + testGeoPt1 = GeoPoint{Lat: 5, Lng: 10} + testBadGeoPt = GeoPoint{Lat: 1000, Lng: 34} + + ts = time.Unix(1e9, 0).UTC() +) + +type B0 struct { + B []byte `datastore:",noindex"` +} + +type B1 struct { + B []int8 +} + +type B2 struct { + B myBlob `datastore:",noindex"` +} + +type B3 struct { + B []myByte `datastore:",noindex"` +} + +type B4 struct { + B [][]byte +} + +type C0 struct { + I int + C chan int +} + +type C1 struct { + I int + C *chan int +} + +type C2 struct { + I int + C []chan int +} + +type C3 struct { + C string +} + +type c4 struct { + C string +} + +type E struct{} + +type G0 struct { + G GeoPoint +} + +type G1 struct { + G []GeoPoint +} + +type K0 struct { + K *Key +} + +type K1 struct { + K []*Key +} + +type S struct { + St string +} + +type NoOmit struct { + A string + B int `datastore:"Bb"` + C bool `datastore:",noindex"` +} + +type OmitAll struct { + A string `datastore:",omitempty"` + B int `datastore:"Bb,omitempty"` + C bool `datastore:",omitempty,noindex"` + F []int `datastore:",omitempty"` +} + +type Omit struct { + A string `datastore:",omitempty"` + B int `datastore:"Bb,omitempty"` + C bool `datastore:",omitempty,noindex"` + F []int `datastore:",omitempty"` + S `datastore:",omitempty"` +} + +type NoOmits struct { + No []NoOmit `datastore:",omitempty"` + S `datastore:",omitempty"` + Ss S `datastore:",omitempty"` +} + +type N0 struct { + X0 + Nonymous X0 + Ignore string `datastore:"-"` + Other string +} + +type N1 struct { + X0 + Nonymous []X0 + Ignore string `datastore:"-"` + Other string +} + +type N2 struct { + N1 `datastore:"red"` + Green N1 `datastore:"green"` + Blue N1 + White N1 `datastore:"-"` +} + +type N3 struct { + C3 `datastore:"red"` +} + +type N4 struct { + c4 +} + +type N5 struct { + c4 `datastore:"red"` +} + +type O0 struct { + I int64 +} + +type O1 struct { + I int32 +} + +type U0 struct { + U uint +} + +type U1 struct { + U string +} + +type T struct { + T time.Time +} + +type X0 struct { + S string + I int + i int +} + +type X1 struct { + S myString + I int32 + J int64 +} + +type X2 struct { + Z string + i int +} + +type X3 struct { + S bool + I int +} + +type Y0 struct { + B bool + F []float64 + G []float64 +} + +type Y1 struct { + B bool + F float64 +} + +type Y2 struct { + B bool + F []int64 +} + +type Tagged struct { + A int `datastore:"a,noindex"` + B []int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + + Y0 `datastore:"-"` + Z chan int `datastore:"-"` +} + +type InvalidTagged1 struct { + I int `datastore:"\t"` +} + +type InvalidTagged2 struct { + I int + J int `datastore:"I"` +} + +type InvalidTagged3 struct { + X string `datastore:"-,noindex"` +} + +type InvalidTagged4 struct { + X string `datastore:",garbage"` +} + +type Inner1 struct { + W int32 + X string +} + +type Inner2 struct { + Y float64 +} + +type Inner3 struct { + Z bool +} + +type Inner5 struct { + WW int +} + +type Inner4 struct { + X Inner5 +} + +type Outer struct { + A int16 + I []Inner1 + J Inner2 + Inner3 +} + +type OuterFlatten struct { + A int16 + I []Inner1 `datastore:",flatten"` + J Inner2 `datastore:",flatten,noindex"` + Inner3 `datastore:",flatten"` + K Inner4 `datastore:",flatten"` +} + +type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + Z bool +} + +type Dotted struct { + A DottedA `datastore:"A0.A1.A2"` +} + +type DottedA struct { + B DottedB `datastore:"B3"` +} + +type DottedB struct { + C int `datastore:"C4.C5"` +} + +type SliceOfSlices struct { + I int + S []struct { + J int + F []float64 + } `datastore:",flatten"` +} + +type Recursive struct { + I int + R []Recursive +} + +type MutuallyRecursive0 struct { + I int + R []MutuallyRecursive1 +} + +type MutuallyRecursive1 struct { + I int + R []MutuallyRecursive0 +} + +type EntityWithKey struct { + I int + S string + K *Key `datastore:"__key__"` +} + +type EntityWithKey2 EntityWithKey + +type WithNestedEntityWithKey struct { + N EntityWithKey +} + +type WithNonKeyField struct { + I int + K string `datastore:"__key__"` +} + +type NestedWithNonKeyField struct { + N WithNonKeyField +} + +type Basic struct { + A string +} + +type PtrToStructField struct { + B *Basic + C *Basic `datastore:"c,noindex"` + *Basic + D []*Basic +} + +var two int = 2 + +type PtrToInt struct { + I *int +} + +type EmbeddedTime struct { + time.Time +} + +type SpecialTime struct { + MyTime EmbeddedTime +} + +type Doubler struct { + S string + I int64 + B bool +} + +type Repeat struct { + Key string + Value []byte +} + +type Repeated struct { + Repeats []Repeat +} + +func (d *Doubler) Load(props []Property) error { + return LoadStruct(d, props) +} + +func (d *Doubler) Save() ([]Property, error) { + // Save the default Property slice to an in-memory buffer (a PropertyList). + props, err := SaveStruct(d) + if err != nil { + return nil, err + } + var list PropertyList + if err := list.Load(props); err != nil { + return nil, err + } + + // Edit that PropertyList, and send it on. + for i := range list { + switch v := list[i].Value.(type) { + case string: + // + means string concatenation. + list[i].Value = v + v + case int64: + // + means integer addition. + list[i].Value = v + v + } + } + return list.Save() +} + +var _ PropertyLoadSaver = (*Doubler)(nil) + +type Deriver struct { + S, Derived, Ignored string +} + +func (e *Deriver) Load(props []Property) error { + for _, p := range props { + if p.Name != "S" { + continue + } + e.S = p.Value.(string) + e.Derived = "derived+" + e.S + } + return nil +} + +func (e *Deriver) Save() ([]Property, error) { + return []Property{ + { + Name: "S", + Value: e.S, + }, + }, nil +} + +var _ PropertyLoadSaver = (*Deriver)(nil) + +type BadMultiPropEntity struct{} + +func (e *BadMultiPropEntity) Load(props []Property) error { + return errors.New("unimplemented") +} + +func (e *BadMultiPropEntity) Save() ([]Property, error) { + // Write multiple properties with the same name "I". + var props []Property + for i := 0; i < 3; i++ { + props = append(props, Property{ + Name: "I", + Value: int64(i), + }) + } + return props, nil +} + +var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil) + +type testCase struct { + desc string + src interface{} + want interface{} + putErr string + getErr string +} + +var testCases = []testCase{ + { + "chan save fails", + &C0{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "*chan save fails", + &C1{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "[]chan save fails", + &C2{I: -1, C: make([]chan int, 8)}, + &E{}, + "unsupported struct field", + "", + }, + { + "chan load fails", + &C3{C: "not a chan"}, + &C0{}, + "", + "type mismatch", + }, + { + "*chan load fails", + &C3{C: "not a *chan"}, + &C1{}, + "", + "type mismatch", + }, + { + "[]chan load fails", + &C3{C: "not a []chan"}, + &C2{}, + "", + "type mismatch", + }, + { + "empty struct", + &E{}, + &E{}, + "", + "", + }, + { + "geopoint", + &G0{G: testGeoPt0}, + &G0{G: testGeoPt0}, + "", + "", + }, + { + "geopoint invalid", + &G0{G: testBadGeoPt}, + &G0{}, + "invalid GeoPoint value", + "", + }, + { + "geopoint as props", + &G0{G: testGeoPt0}, + &PropertyList{ + Property{Name: "G", Value: testGeoPt0, NoIndex: false}, + }, + "", + "", + }, + { + "geopoint slice", + &G1{G: []GeoPoint{testGeoPt0, testGeoPt1}}, + &G1{G: []GeoPoint{testGeoPt0, testGeoPt1}}, + "", + "", + }, + { + "omit empty, all", + &OmitAll{}, + new(PropertyList), + "", + "", + }, + { + "omit empty", + &Omit{}, + &PropertyList{ + Property{Name: "St", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "omit empty, fields populated", + &Omit{ + A: "a", + B: 10, + C: true, + F: []int{11}, + }, + &PropertyList{ + Property{Name: "A", Value: "a", NoIndex: false}, + Property{Name: "Bb", Value: int64(10), NoIndex: false}, + Property{Name: "C", Value: true, NoIndex: true}, + Property{Name: "F", Value: []interface{}{int64(11)}, NoIndex: false}, + Property{Name: "St", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "omit empty, fields populated", + &Omit{ + A: "a", + B: 10, + C: true, + F: []int{11}, + S: S{St: "string"}, + }, + &PropertyList{ + Property{Name: "A", Value: "a", NoIndex: false}, + Property{Name: "Bb", Value: int64(10), NoIndex: false}, + Property{Name: "C", Value: true, NoIndex: true}, + Property{Name: "F", Value: []interface{}{int64(11)}, NoIndex: false}, + Property{Name: "St", Value: "string", NoIndex: false}, + }, + "", + "", + }, + { + "omit empty does not propagate", + &NoOmits{ + No: []NoOmit{ + NoOmit{}, + }, + S: S{}, + Ss: S{}, + }, + &PropertyList{ + Property{Name: "No", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "", NoIndex: false}, + Property{Name: "Bb", Value: int64(0), NoIndex: false}, + Property{Name: "C", Value: false, NoIndex: true}, + }, + }, + }, NoIndex: false}, + Property{Name: "Ss", Value: &Entity{ + Properties: []Property{ + Property{Name: "St", Value: "", NoIndex: false}, + }, + }, NoIndex: false}, + Property{Name: "St", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "key", + &K0{K: testKey1a}, + &K0{K: testKey1b}, + "", + "", + }, + { + "key with parent", + &K0{K: testKey2a}, + &K0{K: testKey2b}, + "", + "", + }, + { + "nil key", + &K0{}, + &K0{}, + "", + "", + }, + { + "all nil keys in slice", + &K1{[]*Key{nil, nil}}, + &K1{[]*Key{nil, nil}}, + "", + "", + }, + { + "some nil keys in slice", + &K1{[]*Key{testKey1a, nil, testKey2a}}, + &K1{[]*Key{testKey1b, nil, testKey2b}}, + "", + "", + }, + { + "overflow", + &O0{I: 1 << 48}, + &O1{}, + "", + "overflow", + }, + { + "time", + &T{T: time.Unix(1e9, 0)}, + &T{T: time.Unix(1e9, 0)}, + "", + "", + }, + { + "time as props", + &T{T: time.Unix(1e9, 0)}, + &PropertyList{ + Property{Name: "T", Value: time.Unix(1e9, 0), NoIndex: false}, + }, + "", + "", + }, + { + "uint save", + &U0{U: 1}, + &U0{}, + "unsupported struct field", + "", + }, + { + "uint load", + &U1{U: "not a uint"}, + &U0{}, + "", + "type mismatch", + }, + { + "zero", + &X0{}, + &X0{}, + "", + "", + }, + { + "basic", + &X0{S: "one", I: 2, i: 3}, + &X0{S: "one", I: 2}, + "", + "", + }, + { + "save string/int load myString/int32", + &X0{S: "one", I: 2, i: 3}, + &X1{S: "one", I: 2}, + "", + "", + }, + { + "missing fields", + &X0{S: "one", I: 2, i: 3}, + &X2{}, + "", + "no such struct field", + }, + { + "save string load bool", + &X0{S: "one", I: 2, i: 3}, + &X3{I: 2}, + "", + "type mismatch", + }, + { + "basic slice", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y0{B: true, F: []float64{7, 8, 9}}, + "", + "", + }, + { + "save []float64 load float64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y1{B: true}, + "", + "requires a slice", + }, + { + "save []float64 load []int64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y2{B: true}, + "", + "type mismatch", + }, + { + "single slice is too long", + &Y0{F: make([]float64, maxIndexedProperties+1)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "two slices are too long", + &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "one slice and one scalar are too long", + &Y0{F: make([]float64, maxIndexedProperties), B: true}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "slice of slices of bytes", + &Repeated{ + Repeats: []Repeat{ + { + Key: "key 1", + Value: []byte("value 1"), + }, + { + Key: "key 2", + Value: []byte("value 2"), + }, + }, + }, + &Repeated{ + Repeats: []Repeat{ + { + Key: "key 1", + Value: []byte("value 1"), + }, + { + Key: "key 2", + Value: []byte("value 2"), + }, + }, + }, + "", + "", + }, + { + "long blob", + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "long []int8 is too long", + &B1{B: makeInt8Slice(maxIndexedProperties + 1)}, + &B1{}, + "too many indexed properties", + "", + }, + { + "short []int8", + &B1{B: makeInt8Slice(3)}, + &B1{B: makeInt8Slice(3)}, + "", + "", + }, + { + "long myBlob", + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short myBlob", + &B2{B: makeUint8Slice(3)}, + &B2{B: makeUint8Slice(3)}, + "", + "", + }, + { + "long []myByte", + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short []myByte", + &B3{B: makeMyByteSlice(3)}, + &B3{B: makeMyByteSlice(3)}, + "", + "", + }, + { + "slice of blobs", + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + "", + "", + }, + { + "[]byte must be noindex", + &PropertyList{ + Property{Name: "B", Value: makeUint8Slice(1501), NoIndex: false}, + }, + nil, + "[]byte property too long to index", + "", + }, + { + "string must be noindex", + &PropertyList{ + Property{Name: "B", Value: strings.Repeat("x", 1501), NoIndex: false}, + }, + nil, + "string property too long to index", + "", + }, + { + "slice of []byte must be noindex", + &PropertyList{ + Property{Name: "B", Value: []interface{}{ + []byte("short"), + makeUint8Slice(1501), + }, NoIndex: false}, + }, + nil, + "[]byte property too long to index", + "", + }, + { + "slice of string must be noindex", + &PropertyList{ + Property{Name: "B", Value: []interface{}{ + "short", + strings.Repeat("x", 1501), + }, NoIndex: false}, + }, + nil, + "string property too long to index", + "", + }, + { + "save tagged load props", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, + &PropertyList{ + // A and B are renamed to a and b; A and C are noindex, I is ignored. + // Order is sorted as per byName. + Property{Name: "C", Value: int64(3), NoIndex: true}, + Property{Name: "D", Value: int64(4), NoIndex: false}, + Property{Name: "E", Value: int64(5), NoIndex: false}, + Property{Name: "J", Value: int64(7), NoIndex: true}, + Property{Name: "a", Value: int64(1), NoIndex: true}, + Property{Name: "b", Value: []interface{}{int64(21), int64(22), int64(23)}, NoIndex: false}, + }, + "", + "", + }, + { + "save tagged load tagged", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7}, + "", + "", + }, + { + "invalid tagged1", + &InvalidTagged1{I: 1}, + &InvalidTagged1{}, + "struct tag has invalid property name", + "", + }, + { + "invalid tagged2", + &InvalidTagged2{I: 1, J: 2}, + &InvalidTagged2{J: 2}, + "", + "", + }, + { + "invalid tagged3", + &InvalidTagged3{X: "hello"}, + &InvalidTagged3{}, + "struct tag has invalid property name: \"-\"", + "", + }, + { + "invalid tagged4", + &InvalidTagged4{X: "hello"}, + &InvalidTagged4{}, + "struct tag has invalid option: \"garbage\"", + "", + }, + { + "doubler", + &Doubler{S: "s", I: 1, B: true}, + &Doubler{S: "ss", I: 2, B: true}, + "", + "", + }, + { + "save struct load props", + &X0{S: "s", I: 1}, + &PropertyList{ + Property{Name: "I", Value: int64(1), NoIndex: false}, + Property{Name: "S", Value: "s", NoIndex: false}, + }, + "", + "", + }, + { + "save props load struct", + &PropertyList{ + Property{Name: "I", Value: int64(1), NoIndex: false}, + Property{Name: "S", Value: "s", NoIndex: false}, + }, + &X0{S: "s", I: 1}, + "", + "", + }, + { + "nil-value props", + &PropertyList{ + Property{Name: "I", Value: nil, NoIndex: false}, + Property{Name: "B", Value: nil, NoIndex: false}, + Property{Name: "S", Value: nil, NoIndex: false}, + Property{Name: "F", Value: nil, NoIndex: false}, + Property{Name: "K", Value: nil, NoIndex: false}, + Property{Name: "T", Value: nil, NoIndex: false}, + Property{Name: "J", Value: []interface{}{nil, int64(7), nil}, NoIndex: false}, + }, + &struct { + I int64 + B bool + S string + F float64 + K *Key + T time.Time + J []int64 + }{ + J: []int64{0, 7, 0}, + }, + "", + "", + }, + { + "save outer load props flatten", + &OuterFlatten{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + K: Inner4{ + X: Inner5{ + WW: 12, + }, + }, + }, + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, + Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: true}, + Property{Name: "K.X.WW", Value: int64(12), NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + "", + "", + }, + { + "load outer props flatten", + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, + Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: true}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + &OuterFlatten{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + "", + "", + }, + { + "save outer load props", + &Outer{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "W", Value: int64(10), NoIndex: false}, + Property{Name: "X", Value: "ten", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "W", Value: int64(20), NoIndex: false}, + Property{Name: "X", Value: "twenty", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "W", Value: int64(30), NoIndex: false}, + Property{Name: "X", Value: "thirty", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "J", Value: &Entity{ + Properties: []Property{ + Property{Name: "Y", Value: float64(3.14), NoIndex: false}, + }, + }, NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + "", + "", + }, + { + "save props load outer-equivalent", + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, + Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + &OuterEquivalent{ + A: 1, + IDotW: []int32{10, 20, 30}, + IDotX: []string{"ten", "twenty", "thirty"}, + JDotY: 3.14, + Z: true, + }, + "", + "", + }, + { + "dotted names save", + &Dotted{A: DottedA{B: DottedB{C: 88}}}, + &PropertyList{ + Property{Name: "A0.A1.A2", Value: &Entity{ + Properties: []Property{ + Property{Name: "B3", Value: &Entity{ + Properties: []Property{ + Property{Name: "C4.C5", Value: int64(88), NoIndex: false}, + }, + }, NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "dotted names load", + &PropertyList{ + Property{Name: "A0.A1.A2", Value: &Entity{ + Properties: []Property{ + Property{Name: "B3", Value: &Entity{ + Properties: []Property{ + Property{Name: "C4.C5", Value: 99, NoIndex: false}, + }, + }, NoIndex: false}, + }, + }, NoIndex: false}, + }, + &Dotted{A: DottedA{B: DottedB{C: 99}}}, + "", + "", + }, + { + "save struct load deriver", + &X0{S: "s", I: 1}, + &Deriver{S: "s", Derived: "derived+s"}, + "", + "", + }, + { + "save deriver load struct", + &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"}, + &X0{S: "s"}, + "", + "", + }, + { + "zero time.Time", + &T{T: time.Time{}}, + &T{T: time.Time{}}, + "", + "", + }, + { + "time.Time near Unix zero time", + &T{T: time.Unix(0, 4e3)}, + &T{T: time.Unix(0, 4e3)}, + "", + "", + }, + { + "time.Time, far in the future", + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + "", + "", + }, + { + "time.Time, very far in the past", + &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "time.Time, very far in the future", + &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "structs", + &N0{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: X0{S: "four", I: 5, i: 6}, + Ignore: "ignore", + Other: "other", + }, + &N0{ + X0: X0{S: "one", I: 2}, + Nonymous: X0{S: "four", I: 5}, + Other: "other", + }, + "", + "", + }, + { + "slice of structs", + &N1{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: []X0{ + {S: "four", I: 5, i: 6}, + {S: "seven", I: 8, i: 9}, + {S: "ten", I: 11, i: 12}, + {S: "thirteen", I: 14, i: 15}, + }, + Ignore: "ignore", + Other: "other", + }, + &N1{ + X0: X0{S: "one", I: 2}, + Nonymous: []X0{ + {S: "four", I: 5}, + {S: "seven", I: 8}, + {S: "ten", I: 11}, + {S: "thirteen", I: 14}, + }, + Other: "other", + }, + "", + "", + }, + { + "structs with slices of structs", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + "", + "", + }, + { + "save structs load props", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &PropertyList{ + Property{Name: "Blue", Value: &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "Nonymous", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "blu0", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "blu1", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "blu2", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "blu3", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "Other", Value: "", NoIndex: false}, + Property{Name: "S", Value: "bleu", NoIndex: false}, + }, + }, NoIndex: false}, + Property{Name: "green", Value: &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "Nonymous", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "verde0", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "verde1", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "verde2", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "Other", Value: "", NoIndex: false}, + Property{Name: "S", Value: "vert", NoIndex: false}, + }, + }, NoIndex: false}, + Property{Name: "red", Value: &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "Nonymous", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "rosso0", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "rosso1", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "Other", Value: "", NoIndex: false}, + Property{Name: "S", Value: "rouge", NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "nested entity with key", + &WithNestedEntityWithKey{ + N: EntityWithKey{ + I: 12, + S: "abcd", + K: testKey0, + }, + }, + &WithNestedEntityWithKey{ + N: EntityWithKey{ + I: 12, + S: "abcd", + K: testKey0, + }, + }, + "", + "", + }, + { + "entity with key at top level", + &EntityWithKey{ + I: 12, + S: "abc", + K: testKey0, + }, + &EntityWithKey{ + I: 12, + S: "abc", + K: testKey0, + }, + "", + "", + }, + { + "entity with key at top level (key is populated on load)", + &EntityWithKey{ + I: 12, + S: "abc", + }, + &EntityWithKey{ + I: 12, + S: "abc", + K: testKey0, + }, + "", + "", + }, + { + "__key__ field not a *Key", + &NestedWithNonKeyField{ + N: WithNonKeyField{ + I: 12, + K: "abcd", + }, + }, + &NestedWithNonKeyField{ + N: WithNonKeyField{ + I: 12, + K: "abcd", + }, + }, + "datastore: __key__ field on struct datastore.WithNonKeyField is not a *datastore.Key", + "", + }, + { + "save struct with ptr to struct fields", + &PtrToStructField{ + &Basic{ + A: "b", + }, + &Basic{ + A: "c", + }, + &Basic{ + A: "anon", + }, + []*Basic{ + &Basic{ + A: "slice0", + }, + &Basic{ + A: "slice1", + }, + }, + }, + &PropertyList{ + Property{Name: "A", Value: "anon", NoIndex: false}, + Property{Name: "B", Value: &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "b", NoIndex: false}, + }, + }}, + Property{Name: "D", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "slice0", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "slice1", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "c", Value: &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "c", NoIndex: true}, + }, + }, NoIndex: true}, + }, + "", + "", + }, + { + "save and load struct with ptr to struct fields", + &PtrToStructField{ + &Basic{ + A: "b", + }, + &Basic{ + A: "c", + }, + &Basic{ + A: "anon", + }, + []*Basic{ + &Basic{ + A: "slice0", + }, + &Basic{ + A: "slice1", + }, + }, + }, + &PtrToStructField{ + &Basic{ + A: "b", + }, + &Basic{ + A: "c", + }, + &Basic{ + A: "anon", + }, + []*Basic{ + &Basic{ + A: "slice0", + }, + &Basic{ + A: "slice1", + }, + }, + }, + "", + "", + }, + { + "save struct with pointer to int field", + &PtrToInt{ + I: &two, + }, + &PtrToInt{}, + "unsupported struct field", + "", + }, + { + "struct with nil ptr to struct fields", + &PtrToStructField{ + nil, + nil, + nil, + nil, + }, + new(PropertyList), + "", + "", + }, + { + "nested load entity with key", + &WithNestedEntityWithKey{ + N: EntityWithKey{ + I: 12, + S: "abcd", + K: testKey0, + }, + }, + &PropertyList{ + Property{Name: "N", Value: &Entity{ + Key: testKey0, + Properties: []Property{ + Property{Name: "I", Value: int64(12), NoIndex: false}, + Property{Name: "S", Value: "abcd", NoIndex: false}, + }, + }, + NoIndex: false}, + }, + "", + "", + }, + { + "nested save entity with key", + &PropertyList{ + Property{Name: "N", Value: &Entity{ + Key: testKey0, + Properties: []Property{ + Property{Name: "I", Value: int64(12), NoIndex: false}, + Property{Name: "S", Value: "abcd", NoIndex: false}, + }, + }, NoIndex: false}, + }, + + &WithNestedEntityWithKey{ + N: EntityWithKey{ + I: 12, + S: "abcd", + K: testKey0, + }, + }, + "", + "", + }, + { + "anonymous field with tag", + &N3{ + C3: C3{C: "s"}, + }, + &PropertyList{ + Property{Name: "red", Value: &Entity{ + Properties: []Property{ + Property{Name: "C", Value: "s", NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "unexported anonymous field", + &N4{ + c4: c4{C: "s"}, + }, + &PropertyList{ + Property{Name: "C", Value: "s", NoIndex: false}, + }, + "", + "", + }, + { + "unexported anonymous field with tag", + &N5{ + c4: c4{C: "s"}, + }, + new(PropertyList), + "", + "", + }, + { + "save props load structs with ragged fields", + &PropertyList{ + Property{Name: "red.S", Value: "rot", NoIndex: false}, + Property{Name: "green.Nonymous.I", Value: []interface{}{int64(10), int64(11), int64(12), int64(13)}, NoIndex: false}, + Property{Name: "Blue.Nonymous.I", Value: []interface{}{int64(20), int64(21)}, NoIndex: false}, + Property{Name: "Blue.Nonymous.S", Value: []interface{}{"blau0", "blau1", "blau2"}, NoIndex: false}, + }, + &N2{ + N1: N1{ + X0: X0{S: "rot"}, + }, + Green: N1{ + Nonymous: []X0{ + {I: 10}, + {I: 11}, + {I: 12}, + {I: 13}, + }, + }, + Blue: N1{ + Nonymous: []X0{ + {S: "blau0", I: 20}, + {S: "blau1", I: 21}, + {S: "blau2"}, + }, + }, + }, + "", + "", + }, + { + "save structs with noindex tags", + &struct { + A struct { + X string `datastore:",noindex"` + Y string + } `datastore:",noindex"` + B struct { + X string `datastore:",noindex"` + Y string + } + }{}, + &PropertyList{ + Property{Name: "A", Value: &Entity{ + Properties: []Property{ + Property{Name: "X", Value: "", NoIndex: true}, + Property{Name: "Y", Value: "", NoIndex: true}, + }, + }, NoIndex: true}, + Property{Name: "B", Value: &Entity{ + Properties: []Property{ + Property{Name: "X", Value: "", NoIndex: true}, + Property{Name: "Y", Value: "", NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "embedded struct with name override", + &struct { + Inner1 `datastore:"foo"` + }{}, + &PropertyList{ + Property{Name: "foo", Value: &Entity{ + Properties: []Property{ + Property{Name: "W", Value: int64(0), NoIndex: false}, + Property{Name: "X", Value: "", NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "slice of slices", + &SliceOfSlices{}, + nil, + "flattening nested structs leads to a slice of slices", + "", + }, + { + "recursive struct", + &Recursive{}, + &Recursive{}, + "", + "", + }, + { + "mutually recursive struct", + &MutuallyRecursive0{}, + &MutuallyRecursive0{}, + "", + "", + }, + { + "non-exported struct fields", + &struct { + i, J int64 + }{i: 1, J: 2}, + &PropertyList{ + Property{Name: "J", Value: int64(2), NoIndex: false}, + }, + "", + "", + }, + { + "json.RawMessage", + &struct { + J json.RawMessage + }{ + J: json.RawMessage("rawr"), + }, + &PropertyList{ + Property{Name: "J", Value: []byte("rawr"), NoIndex: false}, + }, + "", + "", + }, + { + "json.RawMessage to myBlob", + &struct { + B json.RawMessage + }{ + B: json.RawMessage("rawr"), + }, + &B2{B: myBlob("rawr")}, + "", + "", + }, + { + "repeated property names", + &PropertyList{ + Property{Name: "A", Value: ""}, + Property{Name: "A", Value: ""}, + }, + nil, + "duplicate Property", + "", + }, + { + "embedded time field", + &SpecialTime{MyTime: EmbeddedTime{ts}}, + &SpecialTime{MyTime: EmbeddedTime{ts}}, + "", + "", + }, + { + "embedded time load", + &PropertyList{ + Property{Name: "MyTime.Time", Value: ts}, + }, + &SpecialTime{MyTime: EmbeddedTime{ts}}, + "", + "", + }, +} + +// checkErr returns the empty string if either both want and err are zero, +// or if want is a non-empty substring of err's string representation. +func checkErr(want string, err error) string { + if err != nil { + got := err.Error() + if want == "" || strings.Index(got, want) == -1 { + return got + } + } else if want != "" { + return fmt.Sprintf("want error %q", want) + } + return "" +} + +func TestRoundTrip(t *testing.T) { + for _, tc := range testCases { + p, err := saveEntity(testKey0, tc.src) + if s := checkErr(tc.putErr, err); s != "" { + t.Errorf("%s: save: %s", tc.desc, s) + continue + } + if p == nil { + continue + } + var got interface{} + if _, ok := tc.want.(*PropertyList); ok { + got = new(PropertyList) + } else { + got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + } + err = loadEntityProto(got, p) + if s := checkErr(tc.getErr, err); s != "" { + t.Errorf("%s: load: %s", tc.desc, s) + continue + } + if pl, ok := got.(*PropertyList); ok { + // Sort by name to make sure we have a deterministic order. + sortPL(*pl) + } + + if !testutil.Equal(got, tc.want, cmp.AllowUnexported(X0{}, X2{})) { + t.Errorf("%s: compare:\ngot: %+#v\nwant: %+#v", tc.desc, got, tc.want) + continue + } + } +} + +type aPtrPLS struct { + Count int +} + +func (pls *aPtrPLS) Load([]Property) error { + pls.Count += 1 + return nil +} + +func (pls *aPtrPLS) Save() ([]Property, error) { + return []Property{{Name: "Count", Value: 4}}, nil +} + +type aValuePLS struct { + Count int +} + +func (pls aValuePLS) Load([]Property) error { + pls.Count += 2 + return nil +} + +func (pls aValuePLS) Save() ([]Property, error) { + return []Property{{Name: "Count", Value: 8}}, nil +} + +type aValuePtrPLS struct { + Count int +} + +func (pls *aValuePtrPLS) Load([]Property) error { + pls.Count = 11 + return nil +} + +func (pls *aValuePtrPLS) Save() ([]Property, error) { + return []Property{{Name: "Count", Value: 12}}, nil +} + +type aNotPLS struct { + Count int +} + +type plsString string + +func (s *plsString) Load([]Property) error { + *s = "LOADED" + return nil +} + +func (s *plsString) Save() ([]Property, error) { + return []Property{{Name: "SS", Value: "SAVED"}}, nil +} + +func ptrToplsString(s string) *plsString { + plsStr := plsString(s) + return &plsStr +} + +type aSubPLS struct { + Foo string + Bar *aPtrPLS + Baz aValuePtrPLS + S plsString +} + +type aSubNotPLS struct { + Foo string + Bar *aNotPLS +} + +type aSubPLSErr struct { + Foo string + Bar aValuePLS +} + +type aSubPLSNoErr struct { + Foo string + Bar aPtrPLS +} + +type GrandparentFlatten struct { + Parent Parent `datastore:",flatten"` +} + +type GrandparentOfPtrFlatten struct { + Parent ParentOfPtr `datastore:",flatten"` +} + +type GrandparentOfSlice struct { + Parent ParentOfSlice +} + +type GrandparentOfSlicePtrs struct { + Parent ParentOfSlicePtrs +} + +type GrandparentOfSliceFlatten struct { + Parent ParentOfSlice `datastore:",flatten"` +} + +type GrandparentOfSlicePtrsFlatten struct { + Parent ParentOfSlicePtrs `datastore:",flatten"` +} + +type Grandparent struct { + Parent Parent +} + +type Parent struct { + Child Child + String plsString +} + +type ParentOfPtr struct { + Child *Child + String *plsString +} + +type ParentOfSlice struct { + Children []Child + Strings []plsString +} + +type ParentOfSlicePtrs struct { + Children []*Child + Strings []*plsString +} + +type Child struct { + I int + Grandchild Grandchild +} + +type Grandchild struct { + S string +} + +func (c *Child) Load(props []Property) error { + for _, p := range props { + if p.Name == "I" { + c.I += 1 + } else if p.Name == "Grandchild.S" { + c.Grandchild.S = "grandchild loaded" + } + } + + return nil +} + +func (c *Child) Save() ([]Property, error) { + v := c.I + 1 + return []Property{ + {Name: "I", Value: v}, + {Name: "Grandchild.S", Value: fmt.Sprintf("grandchild saved %d", v)}, + }, nil +} + +func TestLoadSavePLS(t *testing.T) { + type testCase struct { + desc string + src interface{} + wantSave *pb.Entity + wantLoad interface{} + saveErr string + loadErr string + } + + testCases := []testCase{ + { + desc: "non-struct implements PLS (top-level)", + src: ptrToplsString("hello"), + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + wantLoad: ptrToplsString("LOADED"), + }, + { + desc: "substructs do implement PLS", + src: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 2}, Baz: aValuePtrPLS{Count: 15}, S: "something"}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}}, + "Bar": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 4}}, + }, + }, + }}, + "Baz": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 12}}, + }, + }, + }}, + "S": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }, + }, + wantLoad: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 1}, Baz: aValuePtrPLS{Count: 11}, S: "LOADED"}, + }, + { + desc: "substruct (ptr) does implement PLS, nil valued substruct", + src: &aSubPLS{Foo: "foo", S: "something"}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}}, + "Baz": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 12}}, + }, + }, + }}, + "S": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }, + }, + wantLoad: &aSubPLS{Foo: "foo", Baz: aValuePtrPLS{Count: 11}, S: "LOADED"}, + }, + { + desc: "substruct (ptr) does not implement PLS", + src: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}}, + "Bar": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + }, + }, + wantLoad: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}}, + }, + { + desc: "substruct (value) does implement PLS, error on save", + src: &aSubPLSErr{Foo: "foo", Bar: aValuePLS{Count: 2}}, + wantSave: (*pb.Entity)(nil), + wantLoad: &aSubPLSErr{}, + saveErr: "PropertyLoadSaver methods must be implemented on a pointer", + }, + { + desc: "substruct (value) does implement PLS, error on load", + src: &aSubPLSNoErr{Foo: "foo", Bar: aPtrPLS{Count: 2}}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}}, + "Bar": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 4}}, + }, + }, + }}, + }, + }, + wantLoad: &aSubPLSErr{}, + loadErr: "PropertyLoadSaver methods must be implemented on a pointer", + }, + + { + desc: "parent does not have flatten option, child impl PLS", + src: &Grandparent{ + Parent: Parent{ + Child: Child{ + I: 9, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + String: plsString("something"), + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Child": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, + "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, + }, + }, + }}, + "String": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }, + }, + }}, + }, + }, + wantLoad: &Grandparent{ + Parent: Parent{ + Child: Child{ + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + String: "LOADED", + }, + }, + }, + { + desc: "parent has flatten option enabled, child impl PLS", + src: &GrandparentFlatten{ + Parent: Parent{ + Child: Child{ + I: 7, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + String: plsString("something"), + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent.Child.I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, + "Parent.Child.Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, + "Parent.String.SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + wantLoad: &GrandparentFlatten{ + Parent: Parent{ + Child: Child{ + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + String: "LOADED", + }, + }, + }, + + { + desc: "parent has flatten option enabled, child (ptr to) impl PLS", + src: &GrandparentOfPtrFlatten{ + Parent: ParentOfPtr{ + Child: &Child{ + I: 7, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + String: ptrToplsString("something"), + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent.Child.I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, + "Parent.Child.Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, + "Parent.String.SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + wantLoad: &GrandparentOfPtrFlatten{ + Parent: ParentOfPtr{ + Child: &Child{ + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + String: ptrToplsString("LOADED"), + }, + }, + }, + { + desc: "children (slice of) impl PLS", + src: &GrandparentOfSlice{ + Parent: ParentOfSlice{ + Children: []Child{ + { + I: 7, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + { + I: 9, + Grandchild: Grandchild{ + S: "BAD2", + }, + }, + }, + Strings: []plsString{ + "something1", + "something2", + }, + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Children": {ValueType: &pb.Value_ArrayValue{ + ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, + "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, + }, + }, + }}, + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, + "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, + }, + }, + }}, + }}, + }}, + "Strings": {ValueType: &pb.Value_ArrayValue{ + ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }}, + }}, + }, + }, + }}, + }, + }, + wantLoad: &GrandparentOfSlice{ + Parent: ParentOfSlice{ + Children: []Child{ + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + }, + Strings: []plsString{ + "LOADED", + "LOADED", + }, + }, + }, + }, + { + desc: "children (slice of ptrs) impl PLS", + src: &GrandparentOfSlicePtrs{ + Parent: ParentOfSlicePtrs{ + Children: []*Child{ + { + I: 7, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + { + I: 9, + Grandchild: Grandchild{ + S: "BAD2", + }, + }, + }, + Strings: []*plsString{ + ptrToplsString("something1"), + ptrToplsString("something2"), + }, + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "Children": {ValueType: &pb.Value_ArrayValue{ + ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, + "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, + }, + }, + }}, + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, + "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, + }, + }, + }}, + }}, + }}, + "Strings": {ValueType: &pb.Value_ArrayValue{ + ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }}, + }}, + }, + }, + }}, + }, + }, + wantLoad: &GrandparentOfSlicePtrs{ + Parent: ParentOfSlicePtrs{ + Children: []*Child{ + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + }, + Strings: []*plsString{ + ptrToplsString("LOADED"), + ptrToplsString("LOADED"), + }, + }, + }, + }, + { + desc: "parent has flatten option, children (slice of) impl PLS", + src: &GrandparentOfSliceFlatten{ + Parent: ParentOfSlice{ + Children: []Child{ + { + I: 7, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + { + I: 9, + Grandchild: Grandchild{ + S: "BAD2", + }, + }, + }, + Strings: []plsString{ + "something1", + "something2", + }, + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent.Children.I": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, + {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, + }, + }, + }}, + "Parent.Children.Grandchild.S": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, + {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, + }, + }, + }}, + "Parent.Strings.SS": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }, + }, + wantLoad: &GrandparentOfSliceFlatten{ + Parent: ParentOfSlice{ + Children: []Child{ + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + }, + Strings: []plsString{ + "LOADED", + "LOADED", + }, + }, + }, + }, + { + desc: "parent has flatten option, children (slice of ptrs) impl PLS", + src: &GrandparentOfSlicePtrsFlatten{ + Parent: ParentOfSlicePtrs{ + Children: []*Child{ + { + I: 7, + Grandchild: Grandchild{ + S: "BAD", + }, + }, + { + I: 9, + Grandchild: Grandchild{ + S: "BAD2", + }, + }, + }, + Strings: []*plsString{ + ptrToplsString("something1"), + ptrToplsString("something1"), + }, + }, + }, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Parent.Children.I": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, + {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, + }, + }, + }}, + "Parent.Children.Grandchild.S": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, + {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, + }, + }, + }}, + "Parent.Strings.SS": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, + }, + }, + }}, + }, + }, + wantLoad: &GrandparentOfSlicePtrsFlatten{ + Parent: ParentOfSlicePtrs{ + Children: []*Child{ + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + { + I: 1, + Grandchild: Grandchild{ + S: "grandchild loaded", + }, + }, + }, + Strings: []*plsString{ + ptrToplsString("LOADED"), + ptrToplsString("LOADED"), + }, + }, + }, + }, + } + + for _, tc := range testCases { + e, err := saveEntity(testKey0, tc.src) + if tc.saveErr == "" { // Want no error. + if err != nil { + t.Errorf("%s: save: %v", tc.desc, err) + continue + } + if !testutil.Equal(e, tc.wantSave) { + t.Errorf("%s: save: \ngot: %+v\nwant: %+v", tc.desc, e, tc.wantSave) + continue + } + } else { // Want error. + if err == nil { + t.Errorf("%s: save: want err", tc.desc) + continue + } + if !strings.Contains(err.Error(), tc.saveErr) { + t.Errorf("%s: save: \ngot err '%s'\nwant err '%s'", tc.desc, err.Error(), tc.saveErr) + } + continue + } + + gota := reflect.New(reflect.TypeOf(tc.wantLoad).Elem()).Interface() + err = loadEntityProto(gota, e) + if tc.loadErr == "" { // Want no error. + if err != nil { + t.Errorf("%s: load: %v", tc.desc, err) + continue + } + if !testutil.Equal(gota, tc.wantLoad) { + t.Errorf("%s: load: \ngot: %+v\nwant: %+v", tc.desc, gota, tc.wantLoad) + continue + } + } else { // Want error. + if err == nil { + t.Errorf("%s: load: want err", tc.desc) + continue + } + if !strings.Contains(err.Error(), tc.loadErr) { + t.Errorf("%s: load: \ngot err '%s'\nwant err '%s'", tc.desc, err.Error(), tc.loadErr) + } + } + } +} + +func TestQueryConstruction(t *testing.T) { + tests := []struct { + q, exp *Query + err string + }{ + { + q: NewQuery("Foo"), + exp: &Query{ + kind: "Foo", + limit: -1, + }, + }, + { + // Regular filtered query with standard spacing. + q: NewQuery("Foo").Filter("foo >", 7), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterThan, + Value: 7, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with no spacing. + q: NewQuery("Foo").Filter("foo=", 6), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: equal, + Value: 6, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with funky spacing. + q: NewQuery("Foo").Filter(" foo< ", 8), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: lessThan, + Value: 8, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with multicharacter op. + q: NewQuery("Foo").Filter("foo >=", 9), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterEq, + Value: 9, + }, + }, + limit: -1, + }, + }, + { + // Query with ordering. + q: NewQuery("Foo").Order("bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: ascending, + }, + }, + limit: -1, + }, + }, + { + // Query with reverse ordering, and funky spacing. + q: NewQuery("Foo").Order(" - bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: descending, + }, + }, + limit: -1, + }, + }, + { + // Query with an empty ordering. + q: NewQuery("Foo").Order(""), + err: "empty order", + }, + { + // Query with a + ordering. + q: NewQuery("Foo").Order("+bar"), + err: "invalid order", + }, + } + for i, test := range tests { + if test.q.err != nil { + got := test.q.err.Error() + if !strings.Contains(got, test.err) { + t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err) + } + continue + } + if !testutil.Equal(test.q, test.exp, cmp.AllowUnexported(Query{})) { + t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp) + } + } +} + +func TestPutMultiTypes(t *testing.T) { + ctx := context.Background() + type S struct { + A int + B string + } + + testCases := []struct { + desc string + src interface{} + wantErr bool + }{ + // Test cases to check each of the valid input types for src. + // Each case has the same elements. + { + desc: "type []struct", + src: []S{ + {1, "one"}, {2, "two"}, + }, + }, + { + desc: "type []*struct", + src: []*S{ + {1, "one"}, {2, "two"}, + }, + }, + { + desc: "type []interface{} with PLS elems", + src: []interface{}{ + &PropertyList{Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, + &PropertyList{Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, + }, + }, + { + desc: "type []interface{} with struct ptr elems", + src: []interface{}{ + &S{1, "one"}, &S{2, "two"}, + }, + }, + { + desc: "type []PropertyLoadSaver{}", + src: []PropertyLoadSaver{ + &PropertyList{Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, + &PropertyList{Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, + }, + }, + { + desc: "type []P (non-pointer, *P implements PropertyLoadSaver)", + src: []PropertyList{ + {Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, + {Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, + }, + }, + // Test some invalid cases. + { + desc: "type []interface{} with struct elems", + src: []interface{}{ + S{1, "one"}, S{2, "two"}, + }, + wantErr: true, + }, + { + desc: "PropertyList", + src: PropertyList{ + Property{Name: "A", Value: 1}, + Property{Name: "B", Value: "one"}, + }, + wantErr: true, + }, + { + desc: "type []int", + src: []int{1, 2}, + wantErr: true, + }, + { + desc: "not a slice", + src: S{1, "one"}, + wantErr: true, + }, + } + + // Use the same keys and expected entities for all tests. + keys := []*Key{ + NameKey("testKind", "first", nil), + NameKey("testKind", "second", nil), + } + want := []*pb.Mutation{ + {Operation: &pb.Mutation_Upsert{ + Upsert: &pb.Entity{ + Key: keyToProto(keys[0]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, + "B": {ValueType: &pb.Value_StringValue{StringValue: "one"}}, + }, + }}}, + {Operation: &pb.Mutation_Upsert{ + Upsert: &pb.Entity{ + Key: keyToProto(keys[1]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + "B": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + }, + }}}, + } + + for _, tt := range testCases { + // Set up a fake client which captures upserts. + var got []*pb.Mutation + client := &Client{ + client: &fakeClient{ + commitFn: func(req *pb.CommitRequest) (*pb.CommitResponse, error) { + got = req.Mutations + return &pb.CommitResponse{}, nil + }, + }, + } + + _, err := client.PutMulti(ctx, keys, tt.src) + if err != nil { + if !tt.wantErr { + t.Errorf("%s: error %v", tt.desc, err) + } + continue + } + if tt.wantErr { + t.Errorf("%s: wanted error, but none returned", tt.desc) + continue + } + if len(got) != len(want) { + t.Errorf("%s: got %d entities, want %d", tt.desc, len(got), len(want)) + continue + } + for i, e := range got { + if !proto.Equal(e, want[i]) { + t.Logf("%s: entity %d doesn't match\ngot: %v\nwant: %v", tt.desc, i, e, want[i]) + } + } + } +} + +func TestNoIndexOnSliceProperties(t *testing.T) { + // Check that ExcludeFromIndexes is set on the inner elements, + // rather than the top-level ArrayValue value. + pl := PropertyList{ + Property{ + Name: "repeated", + Value: []interface{}{ + 123, + false, + "short", + strings.Repeat("a", 1503), + }, + NoIndex: true, + }, + } + key := NameKey("dummy", "dummy", nil) + + entity, err := saveEntity(key, &pl) + if err != nil { + t.Fatalf("saveEntity: %v", err) + } + + want := &pb.Value{ + ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ + {ValueType: &pb.Value_IntegerValue{IntegerValue: 123}, ExcludeFromIndexes: true}, + {ValueType: &pb.Value_BooleanValue{BooleanValue: false}, ExcludeFromIndexes: true}, + {ValueType: &pb.Value_StringValue{StringValue: "short"}, ExcludeFromIndexes: true}, + {ValueType: &pb.Value_StringValue{StringValue: strings.Repeat("a", 1503)}, ExcludeFromIndexes: true}, + }}}, + } + if got := entity.Properties["repeated"]; !proto.Equal(got, want) { + t.Errorf("Entity proto differs\ngot: %v\nwant: %v", got, want) + } +} + +type byName PropertyList + +func (s byName) Len() int { return len(s) } +func (s byName) Less(i, j int) bool { return s[i].Name < s[j].Name } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sortPL sorts the property list by property name, and +// recursively sorts any nested property lists, or nested slices of +// property lists. +func sortPL(pl PropertyList) { + sort.Stable(byName(pl)) + for _, p := range pl { + switch p.Value.(type) { + case *Entity: + sortPL(p.Value.(*Entity).Properties) + case []interface{}: + for _, p2 := range p.Value.([]interface{}) { + if nent, ok := p2.(*Entity); ok { + sortPL(nent.Properties) + } + } + } + } +} + +func TestValidGeoPoint(t *testing.T) { + testCases := []struct { + desc string + pt GeoPoint + want bool + }{ + { + "valid", + GeoPoint{67.21, 13.37}, + true, + }, + { + "high lat", + GeoPoint{-90.01, 13.37}, + false, + }, + { + "low lat", + GeoPoint{90.01, 13.37}, + false, + }, + { + "high lng", + GeoPoint{67.21, 182}, + false, + }, + { + "low lng", + GeoPoint{67.21, -181}, + false, + }, + } + + for _, tc := range testCases { + if got := tc.pt.Valid(); got != tc.want { + t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want) + } + } +} + +func TestPutInvalidEntity(t *testing.T) { + // Test that trying to put an invalid entity always returns the correct error + // type. + + // Fake client that can pretend to start a transaction. + fakeClient := &fakeDatastoreClient{ + beginTransaction: func(*pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) { + return &pb.BeginTransactionResponse{ + Transaction: []byte("deadbeef"), + }, nil + }, + } + client := &Client{ + client: fakeClient, + } + + ctx := context.Background() + key := IncompleteKey("kind", nil) + + _, err := client.Put(ctx, key, "invalid entity") + if err != ErrInvalidEntityType { + t.Errorf("client.Put returned err %v, want %v", err, ErrInvalidEntityType) + } + + _, err = client.PutMulti(ctx, []*Key{key}, []interface{}{"invalid entity"}) + if me, ok := err.(MultiError); !ok { + t.Errorf("client.PutMulti returned err %v, want MultiError type", err) + } else if len(me) != 1 || me[0] != ErrInvalidEntityType { + t.Errorf("client.PutMulti returned err %v, want MulitError{ErrInvalidEntityType}", err) + } + + client.RunInTransaction(ctx, func(tx *Transaction) error { + _, err := tx.Put(key, "invalid entity") + if err != ErrInvalidEntityType { + t.Errorf("tx.Put returned err %v, want %v", err, ErrInvalidEntityType) + } + + _, err = tx.PutMulti([]*Key{key}, []interface{}{"invalid entity"}) + if me, ok := err.(MultiError); !ok { + t.Errorf("tx.PutMulti returned err %v, want MultiError type", err) + } else if len(me) != 1 || me[0] != ErrInvalidEntityType { + t.Errorf("tx.PutMulti returned err %v, want MulitError{ErrInvalidEntityType}", err) + } + + return errors.New("bang!") // Return error: we don't actually want to commit. + }) +} + +func TestDeferred(t *testing.T) { + type Ent struct { + A int + B string + } + + keys := []*Key{ + NameKey("testKind", "first", nil), + NameKey("testKind", "second", nil), + } + + entity1 := &pb.Entity{ + Key: keyToProto(keys[0]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, + "B": {ValueType: &pb.Value_StringValue{StringValue: "one"}}, + }, + } + entity2 := &pb.Entity{ + Key: keyToProto(keys[1]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + "B": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + }, + } + + // count keeps track of the number of times fakeClient.lookup has been + // called. + var count int + // Fake client that will return Deferred keys in resp on the first call. + fakeClient := &fakeDatastoreClient{ + lookup: func(*pb.LookupRequest) (*pb.LookupResponse, error) { + count++ + // On the first call, we return deferred keys. + if count == 1 { + return &pb.LookupResponse{ + Found: []*pb.EntityResult{ + { + Entity: entity1, + Version: 1, + }, + }, + Deferred: []*pb.Key{ + keyToProto(keys[1]), + }, + }, nil + } + + // On the second call, we do not return any more deferred keys. + return &pb.LookupResponse{ + Found: []*pb.EntityResult{ + { + Entity: entity2, + Version: 1, + }, + }, + }, nil + }, + } + client := &Client{ + client: fakeClient, + } + + ctx := context.Background() + + dst := make([]Ent, len(keys)) + err := client.GetMulti(ctx, keys, dst) + if err != nil { + t.Fatalf("client.Get: %v", err) + } + + if count != 2 { + t.Fatalf("expected client.lookup to be called 2 times. Got %d", count) + } + + if len(dst) != 2 { + t.Fatalf("expected 2 entities returned, got %d", len(dst)) + } + + for _, e := range dst { + if e.A == 1 { + if e.B != "one" { + t.Fatalf("unexpected entity %+v", e) + } + } else if e.A == 2 { + if e.B != "two" { + t.Fatalf("unexpected entity %+v", e) + } + } else { + t.Fatalf("unexpected entity %+v", e) + } + } + +} + +type KeyLoaderEnt struct { + A int + K *Key +} + +func (e *KeyLoaderEnt) Load(p []Property) error { + e.A = 2 + return nil +} + +func (e *KeyLoaderEnt) LoadKey(k *Key) error { + e.K = k + return nil +} + +func (e *KeyLoaderEnt) Save() ([]Property, error) { + return []Property{{Name: "A", Value: int64(3)}}, nil +} + +func TestKeyLoaderEndToEnd(t *testing.T) { + keys := []*Key{ + NameKey("testKind", "first", nil), + NameKey("testKind", "second", nil), + } + + entity1 := &pb.Entity{ + Key: keyToProto(keys[0]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, + "B": {ValueType: &pb.Value_StringValue{StringValue: "one"}}, + }, + } + entity2 := &pb.Entity{ + Key: keyToProto(keys[1]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + "B": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + }, + } + + fakeClient := &fakeDatastoreClient{ + lookup: func(*pb.LookupRequest) (*pb.LookupResponse, error) { + return &pb.LookupResponse{ + Found: []*pb.EntityResult{ + { + Entity: entity1, + Version: 1, + }, + { + Entity: entity2, + Version: 1, + }, + }, + }, nil + }, + } + client := &Client{ + client: fakeClient, + } + + ctx := context.Background() + + dst := make([]*KeyLoaderEnt, len(keys)) + err := client.GetMulti(ctx, keys, dst) + if err != nil { + t.Fatalf("client.Get: %v", err) + } + + for i := range dst { + if !testutil.Equal(dst[i].K, keys[i]) { + t.Fatalf("unexpected entity %d to have key %+v, got %+v", i, keys[i], dst[i].K) + } + } +} + +func TestDeferredMissing(t *testing.T) { + type Ent struct { + A int + B string + } + + keys := []*Key{ + NameKey("testKind", "first", nil), + NameKey("testKind", "second", nil), + } + + entity1 := &pb.Entity{ + Key: keyToProto(keys[0]), + } + entity2 := &pb.Entity{ + Key: keyToProto(keys[1]), + } + + var count int + fakeClient := &fakeDatastoreClient{ + lookup: func(*pb.LookupRequest) (*pb.LookupResponse, error) { + count++ + + if count == 1 { + return &pb.LookupResponse{ + Missing: []*pb.EntityResult{ + { + Entity: entity1, + Version: 1, + }, + }, + Deferred: []*pb.Key{ + keyToProto(keys[1]), + }, + }, nil + } + + return &pb.LookupResponse{ + Missing: []*pb.EntityResult{ + { + Entity: entity2, + Version: 1, + }, + }, + }, nil + }, + } + client := &Client{ + client: fakeClient, + } + + ctx := context.Background() + + dst := make([]Ent, len(keys)) + err := client.GetMulti(ctx, keys, dst) + errs, ok := err.(MultiError) + if !ok { + t.Fatalf("expected error returns to be MultiError; got %v", err) + } + if len(errs) != 2 { + t.Fatalf("expected 2 errors returns, got %d", len(errs)) + } + if errs[0] != ErrNoSuchEntity { + t.Fatalf("expected error to be ErrNoSuchEntity; got %v", errs[0]) + } + if errs[1] != ErrNoSuchEntity { + t.Fatalf("expected error to be ErrNoSuchEntity; got %v", errs[1]) + } + + if count != 2 { + t.Fatalf("expected client.lookup to be called 2 times. Got %d", count) + } + + if len(dst) != 2 { + t.Fatalf("expected 2 entities returned, got %d", len(dst)) + } + + for _, e := range dst { + if e.A != 0 || e.B != "" { + t.Fatalf("unexpected entity %+v", e) + } + } +} + +type fakeDatastoreClient struct { + pb.DatastoreClient + + // Optional handlers for the datastore methods. + // Any handlers left undefined will return an error. + lookup func(*pb.LookupRequest) (*pb.LookupResponse, error) + runQuery func(*pb.RunQueryRequest) (*pb.RunQueryResponse, error) + beginTransaction func(*pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) + commit func(*pb.CommitRequest) (*pb.CommitResponse, error) + rollback func(*pb.RollbackRequest) (*pb.RollbackResponse, error) + allocateIds func(*pb.AllocateIdsRequest) (*pb.AllocateIdsResponse, error) +} + +func (c *fakeDatastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) { + if c.lookup == nil { + return nil, errors.New("no lookup handler defined") + } + return c.lookup(in) +} +func (c *fakeDatastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) { + if c.runQuery == nil { + return nil, errors.New("no runQuery handler defined") + } + return c.runQuery(in) +} +func (c *fakeDatastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) { + if c.beginTransaction == nil { + return nil, errors.New("no beginTransaction handler defined") + } + return c.beginTransaction(in) +} +func (c *fakeDatastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) { + if c.commit == nil { + return nil, errors.New("no commit handler defined") + } + return c.commit(in) +} +func (c *fakeDatastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) { + if c.rollback == nil { + return nil, errors.New("no rollback handler defined") + } + return c.rollback(in) +} +func (c *fakeDatastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) { + if c.allocateIds == nil { + return nil, errors.New("no allocateIds handler defined") + } + return c.allocateIds(in) +} diff --git a/vendor/cloud.google.com/go/datastore/doc.go b/vendor/cloud.google.com/go/datastore/doc.go new file mode 100644 index 0000000..df86cd2 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/doc.go @@ -0,0 +1,454 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package datastore provides a client for Google Cloud Datastore. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + + +Basic Operations + +Entities are the unit of storage and are associated with a key. A key +consists of an optional parent key, a string application ID, a string kind +(also known as an entity type), and either a StringID or an IntID. A +StringID is also known as an entity name or key name. + +It is valid to create a key with a zero StringID and a zero IntID; this is +called an incomplete key, and does not refer to any saved entity. Putting an +entity into the datastore under an incomplete key will cause a unique key +to be generated for that entity, with a non-zero IntID. + +An entity's contents are a mapping from case-sensitive field names to values. +Valid value types are: + - signed integers (int, int8, int16, int32 and int64), + - bool, + - string, + - float32 and float64, + - []byte (up to 1 megabyte in length), + - any type whose underlying type is one of the above predeclared types, + - *Key, + - GeoPoint, + - time.Time (stored with microsecond precision), + - structs whose fields are all valid value types, + - pointers to structs whose fields are all valid value types, + - slices of any of the above. + +Slices of structs are valid, as are structs that contain slices. + +The Get and Put functions load and save an entity's contents. An entity's +contents are typically represented by a struct pointer. + +Example code: + + type Entity struct { + Value string + } + + func main() { + ctx := context.Background() + + // Create a datastore client. In a typical application, you would create + // a single client which is reused for every datastore operation. + dsClient, err := datastore.NewClient(ctx, "my-project") + if err != nil { + // Handle error. + } + + k := datastore.NameKey("Entity", "stringID", nil) + e := new(Entity) + if err := dsClient.Get(ctx, k, e); err != nil { + // Handle error. + } + + old := e.Value + e.Value = "Hello World!" + + if _, err := dsClient.Put(ctx, k, e); err != nil { + // Handle error. + } + + fmt.Printf("Updated value from %q to %q\n", old, e.Value) + } + +GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and +Delete functions. They take a []*Key instead of a *Key, and may return a +datastore.MultiError when encountering partial failure. + + +Properties + +An entity's contents can be represented by a variety of types. These are +typically struct pointers, but can also be any type that implements the +PropertyLoadSaver interface. If using a struct pointer, you do not have to +explicitly implement the PropertyLoadSaver interface; the datastore will +automatically convert via reflection. If a struct pointer does implement that +interface then those methods will be used in preference to the default +behavior for struct pointers. Struct pointers are more strongly typed and are +easier to use; PropertyLoadSavers are more flexible. + +The actual types passed do not have to match between Get and Put calls or even +across different calls to datastore. It is valid to put a *PropertyList and +get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1. +Conceptually, any entity is saved as a sequence of properties, and is loaded +into the destination value on a property-by-property basis. When loading into +a struct pointer, an entity that cannot be completely represented (such as a +missing field) will result in an ErrFieldMismatch error but it is up to the +caller whether this error is fatal, recoverable or ignorable. + +By default, for struct pointers, all properties are potentially indexed, and +the property name is the same as the field name (and hence must start with an +upper case letter). + +Fields may have a `datastore:"name,options"` tag. The tag name is the +property name, which must be one or more valid Go identifiers joined by ".", +but may start with a lower case letter. An empty tag name means to just use the +field name. A "-" tag name means that the datastore will ignore that field. + +The only valid options are "omitempty", "noindex" and "flatten". + +If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save. +The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero. +Struct field values will never be empty. + +If options include "noindex" then the field will not be indexed. All fields are indexed +by default. Strings or byte slices longer than 1500 bytes cannot be indexed; +fields used to store long strings and byte slices must be tagged with "noindex" +or they will cause Put operations to fail. + +For a nested struct field, the options may also include "flatten". This indicates +that the immediate fields and any nested substruct fields of the nested struct should be +flattened. See below for examples. + +To use multiple options together, separate them by a comma. +The order does not matter. + +If the options is "" then the comma may be omitted. + +Example code: + + // A and B are renamed to a and b. + // A, C and J are not indexed. + // D's tag is equivalent to having no tag at all (E). + // I is ignored entirely by the datastore. + // J has tag information for both the datastore and json packages. + type TaggedStruct struct { + A int `datastore:"a,noindex"` + B int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + } + + +Key Field + +If the struct contains a *datastore.Key field tagged with the name "__key__", +its value will be ignored on Put. When reading the Entity back into the Go struct, +the field will be populated with the *datastore.Key value used to query for +the Entity. + +Example code: + + type MyEntity struct { + A int + K *datastore.Key `datastore:"__key__"` + } + + k := datastore.NameKey("Entity", "stringID", nil) + e := MyEntity{A: 12} + k, err = dsClient.Put(ctx, k, e) + if err != nil { + // Handle error. + } + + var entities []MyEntity + q := datastore.NewQuery("Entity").Filter("A =", 12).Limit(1) + _, err := dsClient.GetAll(ctx, q, &entities) + if err != nil { + // Handle error + } + + log.Println(entities[0]) + // Prints {12 /Entity,stringID} + + + +Structured Properties + +If the struct pointed to contains other structs, then the nested or embedded +structs are themselves saved as Entity values. For example, given these definitions: + + type Inner struct { + W int32 + X string + } + + type Outer struct { + I Inner + } + +then an Outer would have one property, Inner, encoded as an Entity value. + +If an outer struct is tagged "noindex" then all of its implicit flattened +fields are effectively "noindex". + +If the Inner struct contains a *Key field with the name "__key__", like so: + + type Inner struct { + W int32 + X string + K *datastore.Key `datastore:"__key__"` + } + + type Outer struct { + I Inner + } + +then the value of K will be used as the Key for Inner, represented +as an Entity value in datastore. + +If any nested struct fields should be flattened, instead of encoded as +Entity values, the nested struct field should be tagged with the "flatten" +option. For example, given the following: + + type Inner1 struct { + W int32 + X string + } + + type Inner2 struct { + Y float64 + } + + type Inner3 struct { + Z bool + } + + type Inner4 struct { + WW int + } + + type Inner5 struct { + X Inner4 + } + + type Outer struct { + A int16 + I []Inner1 `datastore:",flatten"` + J Inner2 `datastore:",flatten"` + K Inner5 `datastore:",flatten"` + Inner3 `datastore:",flatten"` + } + +an Outer's properties would be equivalent to those of: + + type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + KDotXDotWW int `datastore:"K.X.WW"` + Z bool + } + +Note that the "flatten" option cannot be used for Entity value fields. +The server will reject any dotted field names for an Entity value. + + +The PropertyLoadSaver Interface + +An entity's contents can also be represented by any type that implements the +PropertyLoadSaver interface. This type may be a struct pointer, but it does +not have to be. The datastore package will call Load when getting the entity's +contents, and Save when putting the entity's contents. +Possible uses include deriving non-stored fields, verifying fields, or indexing +a field only if its value is positive. + +Example code: + + type CustomPropsExample struct { + I, J int + // Sum is not stored, but should always be equal to I + J. + Sum int `datastore:"-"` + } + + func (x *CustomPropsExample) Load(ps []datastore.Property) error { + // Load I and J as usual. + if err := datastore.LoadStruct(x, ps); err != nil { + return err + } + // Derive the Sum field. + x.Sum = x.I + x.J + return nil + } + + func (x *CustomPropsExample) Save() ([]datastore.Property, error) { + // Validate the Sum field. + if x.Sum != x.I + x.J { + return nil, errors.New("CustomPropsExample has inconsistent sum") + } + // Save I and J as usual. The code below is equivalent to calling + // "return datastore.SaveStruct(x)", but is done manually for + // demonstration purposes. + return []datastore.Property{ + { + Name: "I", + Value: int64(x.I), + }, + { + Name: "J", + Value: int64(x.J), + }, + }, nil + } + +The *PropertyList type implements PropertyLoadSaver, and can therefore hold an +arbitrary entity's contents. + +The KeyLoader Interface + +If a type implements the PropertyLoadSaver interface, it may +also want to implement the KeyLoader interface. +The KeyLoader interface exists to allow implementations of PropertyLoadSaver +to also load an Entity's Key into the Go type. This type may be a struct +pointer, but it does not have to be. The datastore package will call LoadKey +when getting the entity's contents, after calling Load. + +Example code: + + type WithKeyExample struct { + I int + Key *datastore.Key + } + + func (x *WithKeyExample) LoadKey(k *datastore.Key) error { + x.Key = k + return nil + } + + func (x *WithKeyExample) Load(ps []datastore.Property) error { + // Load I as usual. + return datastore.LoadStruct(x, ps) + } + + func (x *WithKeyExample) Save() ([]datastore.Property, error) { + // Save I as usual. + return datastore.SaveStruct(x) + } + +To load a Key into a struct which does not implement the PropertyLoadSaver +interface, see the "Key Field" section above. + + +Queries + +Queries retrieve entities based on their properties or key's ancestry. Running +a query yields an iterator of results: either keys or (key, entity) pairs. +Queries are re-usable and it is safe to call Query.Run from concurrent +goroutines. Iterators are not safe for concurrent use. + +Queries are immutable, and are either created by calling NewQuery, or derived +from an existing query by calling a method like Filter or Order that returns a +new query value. A query is typically constructed by calling NewQuery followed +by a chain of zero or more such methods. These methods are: + - Ancestor and Filter constrain the entities returned by running a query. + - Order affects the order in which they are returned. + - Project constrains the fields returned. + - Distinct de-duplicates projected entities. + - KeysOnly makes the iterator return only keys, not (key, entity) pairs. + - Start, End, Offset and Limit define which sub-sequence of matching entities + to return. Start and End take cursors, Offset and Limit take integers. Start + and Offset affect the first result, End and Limit affect the last result. + If both Start and Offset are set, then the offset is relative to Start. + If both End and Limit are set, then the earliest constraint wins. Limit is + relative to Start+Offset, not relative to End. As a special case, a + negative limit means unlimited. + +Example code: + + type Widget struct { + Description string + Price int + } + + func printWidgets(ctx context.Context, client *datastore.Client) { + q := datastore.NewQuery("Widget"). + Filter("Price <", 1000). + Order("-Price") + for t := client.Run(ctx, q); ; { + var x Widget + key, err := t.Next(&x) + if err == iterator.Done { + break + } + if err != nil { + // Handle error. + } + fmt.Printf("Key=%v\nWidget=%#v\n\n", key, x) + } + } + + +Transactions + +Client.RunInTransaction runs a function in a transaction. + +Example code: + + type Counter struct { + Count int + } + + func incCount(ctx context.Context, client *datastore.Client) { + var count int + key := datastore.NameKey("Counter", "singleton", nil) + _, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var x Counter + if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity { + return err + } + x.Count++ + if _, err := tx.Put(key, &x); err != nil { + return err + } + count = x.Count + return nil + }) + if err != nil { + // Handle error. + } + // The value of count is only valid once the transaction is successful + // (RunInTransaction has returned nil). + fmt.Printf("Count=%d\n", count) + } + +Google Cloud Datastore Emulator + +This package supports the Cloud Datastore emulator, which is useful for testing and +development. Environment variables are used to indicate that datastore traffic should be +directed to the emulator instead of the production Datastore service. + +To install and set up the emulator and its environment variables, see the documentation +at https://cloud.google.com/datastore/docs/tools/datastore-emulator. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. + +*/ +package datastore // import "cloud.google.com/go/datastore" diff --git a/vendor/cloud.google.com/go/datastore/errors.go b/vendor/cloud.google.com/go/datastore/errors.go new file mode 100644 index 0000000..3077f80 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/errors.go @@ -0,0 +1,47 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file provides error functions for common API failure modes. + +package datastore + +import ( + "fmt" +) + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/cloud.google.com/go/datastore/example_test.go b/vendor/cloud.google.com/go/datastore/example_test.go new file mode 100644 index 0000000..c6f81e1 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/example_test.go @@ -0,0 +1,545 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore_test + +import ( + "fmt" + "log" + "time" + + "cloud.google.com/go/datastore" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. +} + +func ExampleClient_Get() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + type Article struct { + Title string + Description string + Body string `datastore:",noindex"` + Author *datastore.Key + PublishedAt time.Time + } + key := datastore.NameKey("Article", "articled1", nil) + article := &Article{} + if err := client.Get(ctx, key, article); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Put() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + type Article struct { + Title string + Description string + Body string `datastore:",noindex"` + Author *datastore.Key + PublishedAt time.Time + } + newKey := datastore.IncompleteKey("Article", nil) + _, err = client.Put(ctx, newKey, &Article{ + Title: "The title of the article", + Description: "The description of the article...", + Body: "...", + Author: datastore.NameKey("Author", "jbd", nil), + PublishedAt: time.Now(), + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Put_flatten() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + type Animal struct { + Name string + Type string + Breed string + } + + type Human struct { + Name string + Height int + Pet Animal `datastore:",flatten"` + } + + newKey := datastore.IncompleteKey("Human", nil) + _, err = client.Put(ctx, newKey, &Human{ + Name: "Susan", + Height: 67, + Pet: Animal{ + Name: "Fluffy", + Type: "Cat", + Breed: "Sphynx", + }, + }) + if err != nil { + log.Fatal(err) + } +} + +func ExampleClient_Delete() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + key := datastore.NameKey("Article", "articled1", nil) + if err := client.Delete(ctx, key); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_DeleteMulti() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + var keys []*datastore.Key + for i := 1; i <= 10; i++ { + keys = append(keys, datastore.IDKey("Article", int64(i), nil)) + } + if err := client.DeleteMulti(ctx, keys); err != nil { + // TODO: Handle error. + } +} + +type Post struct { + Title string + PublishedAt time.Time + Comments int +} + +func ExampleClient_GetMulti() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), + datastore.NameKey("Post", "post3", nil), + } + posts := make([]Post, 3) + if err := client.GetMulti(ctx, keys, posts); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_PutMulti_slice() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), + } + + // PutMulti with a Post slice. + posts := []*Post{ + {Title: "Post 1", PublishedAt: time.Now()}, + {Title: "Post 2", PublishedAt: time.Now()}, + } + if _, err := client.PutMulti(ctx, keys, posts); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_PutMulti_interfaceSlice() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), + } + + // PutMulti with an empty interface slice. + posts := []interface{}{ + &Post{Title: "Post 1", PublishedAt: time.Now()}, + &Post{Title: "Post 2", PublishedAt: time.Now()}, + } + if _, err := client.PutMulti(ctx, keys, posts); err != nil { + // TODO: Handle error. + } +} + +func ExampleNewQuery() { + // Query for Post entities. + q := datastore.NewQuery("Post") + _ = q // TODO: Use the query with Client.Run. +} + +func ExampleNewQuery_options() { + // Query to order the posts by the number of comments they have recieved. + q := datastore.NewQuery("Post").Order("-Comments") + // Start listing from an offset and limit the results. + q = q.Offset(20).Limit(10) + _ = q // TODO: Use the query. +} + +func ExampleClient_Count() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // Count the number of the post entities. + q := datastore.NewQuery("Post") + n, err := client.Count(ctx, q) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("There are %d posts.", n) +} + +func ExampleClient_Run() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List the posts published since yesterday. + yesterday := time.Now().Add(-24 * time.Hour) + q := datastore.NewQuery("Post").Filter("PublishedAt >", yesterday) + it := client.Run(ctx, q) + _ = it // TODO: iterate using Next. +} + +func ExampleClient_NewTransaction() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + const retries = 3 + + // Increment a counter. + // See https://cloud.google.com/appengine/articles/sharding_counters for + // a more scalable solution. + type Counter struct { + Count int + } + + key := datastore.NameKey("counter", "CounterA", nil) + var tx *datastore.Transaction + for i := 0; i < retries; i++ { + tx, err = client.NewTransaction(ctx) + if err != nil { + break + } + + var c Counter + if err = tx.Get(key, &c); err != nil && err != datastore.ErrNoSuchEntity { + break + } + c.Count++ + if _, err = tx.Put(key, &c); err != nil { + break + } + + // Attempt to commit the transaction. If there's a conflict, try again. + if _, err = tx.Commit(); err != datastore.ErrConcurrentTransaction { + break + } + } + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_RunInTransaction() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // Increment a counter. + // See https://cloud.google.com/appengine/articles/sharding_counters for + // a more scalable solution. + type Counter struct { + Count int + } + + var count int + key := datastore.NameKey("Counter", "singleton", nil) + _, err = client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var x Counter + if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity { + return err + } + x.Count++ + if _, err := tx.Put(key, &x); err != nil { + return err + } + count = x.Count + return nil + }) + if err != nil { + // TODO: Handle error. + } + // The value of count is only valid once the transaction is successful + // (RunInTransaction has returned nil). + fmt.Printf("Count=%d\n", count) +} + +func ExampleClient_AllocateIDs() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + var keys []*datastore.Key + for i := 0; i < 10; i++ { + keys = append(keys, datastore.IncompleteKey("Article", nil)) + } + keys, err = client.AllocateIDs(ctx, keys) + if err != nil { + // TODO: Handle error. + } + _ = keys // TODO: Use keys. +} + +func ExampleKey_Encode() { + key := datastore.IDKey("Article", 1, nil) + encoded := key.Encode() + fmt.Println(encoded) + // Output: EgsKB0FydGljbGUQAQ +} + +func ExampleDecodeKey() { + const encoded = "EgsKB0FydGljbGUQAQ" + key, err := datastore.DecodeKey(encoded) + if err != nil { + // TODO: Handle error. + } + fmt.Println(key) + // Output: /Article,1 +} + +func ExampleIDKey() { + // Key with numeric ID. + k := datastore.IDKey("Article", 1, nil) + _ = k // TODO: Use key. +} + +func ExampleNameKey() { + // Key with string ID. + k := datastore.NameKey("Article", "article8", nil) + _ = k // TODO: Use key. +} + +func ExampleIncompleteKey() { + k := datastore.IncompleteKey("Article", nil) + _ = k // TODO: Use incomplete key. +} + +func ExampleClient_GetAll() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + var posts []*Post + keys, err := client.GetAll(ctx, datastore.NewQuery("Post"), &posts) + for i, key := range keys { + fmt.Println(key) + fmt.Println(posts[i]) + } +} + +func ExampleCommit_Key() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "") + if err != nil { + // TODO: Handle error. + } + var pk1, pk2 *datastore.PendingKey + // Create two posts in a single transaction. + commit, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var err error + pk1, err = tx.Put(datastore.IncompleteKey("Post", nil), &Post{Title: "Post 1", PublishedAt: time.Now()}) + if err != nil { + return err + } + pk2, err = tx.Put(datastore.IncompleteKey("Post", nil), &Post{Title: "Post 2", PublishedAt: time.Now()}) + if err != nil { + return err + } + return nil + }) + if err != nil { + // TODO: Handle error. + } + // Now pk1, pk2 are valid PendingKeys. Let's convert them into real keys + // using the Commit object. + k1 := commit.Key(pk1) + k2 := commit.Key(pk2) + fmt.Println(k1, k2) +} + +func ExampleIterator_Next() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Run(ctx, datastore.NewQuery("Post")) + for { + var p Post + key, err := it.Next(&p) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(key, p) + } +} + +func ExampleIterator_Cursor() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Run(ctx, datastore.NewQuery("Post")) + for { + var p Post + _, err := it.Next(&p) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(p) + cursor, err := it.Cursor() + if err != nil { + // TODO: Handle error. + } + // When printed, a cursor will display as a string that can be passed + // to datastore.NewCursor. + fmt.Printf("to resume with this post, use cursor %s\n", cursor) + } +} + +func ExampleDecodeCursor() { + // See Query.Start for a fuller example of DecodeCursor. + // getCursor represents a function that returns a cursor from a previous + // iteration in string form. + cursorString := getCursor() + cursor, err := datastore.DecodeCursor(cursorString) + if err != nil { + // TODO: Handle error. + } + _ = cursor // TODO: Use the cursor with Query.Start or Query.End. +} + +func getCursor() string { return "" } + +func ExampleQuery_Start() { + // This example demonstrates how to use cursors and Query.Start + // to resume an iteration. + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // getCursor represents a function that returns a cursor from a previous + // iteration in string form. + cursorString := getCursor() + cursor, err := datastore.DecodeCursor(cursorString) + if err != nil { + // TODO: Handle error. + } + it := client.Run(ctx, datastore.NewQuery("Post").Start(cursor)) + _ = it // TODO: Use iterator. +} + +func ExampleLoadStruct() { + type Player struct { + User string + Score int + } + // Normally LoadStruct would only be used inside a custom implementation of + // PropertyLoadSaver; this is for illustrative purposes only. + props := []datastore.Property{ + {Name: "User", Value: "Alice"}, + {Name: "Score", Value: int64(97)}, + } + + var p Player + if err := datastore.LoadStruct(&p, props); err != nil { + // TODO: Handle error. + } + fmt.Println(p) + // Output: {Alice 97} +} + +func ExampleSaveStruct() { + type Player struct { + User string + Score int + } + + p := &Player{ + User: "Alice", + Score: 97, + } + props, err := datastore.SaveStruct(p) + if err != nil { + // TODO: Handle error. + } + fmt.Println(props) + // TODO(jba): make this output stable: Output: [{User Alice false} {Score 97 false}] +} diff --git a/vendor/cloud.google.com/go/datastore/integration_test.go b/vendor/cloud.google.com/go/datastore/integration_test.go new file mode 100644 index 0000000..0bde3fa --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/integration_test.go @@ -0,0 +1,1043 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +// TODO(djd): Make test entity clean up more robust: some test entities may +// be left behind if tests are aborted, the transport fails, etc. + +// suffix is a timestamp-based suffix which is appended to key names, +// particularly for the root keys of entity groups. This reduces flakiness +// when the tests are run in parallel. +var suffix = fmt.Sprintf("-t%d", time.Now().UnixNano()) + +func newClient(ctx context.Context, t *testing.T) *Client { + ts := testutil.TokenSource(ctx, ScopeDatastore) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + return client +} + +func TestBasics(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx, _ := context.WithTimeout(context.Background(), time.Second*20) + client := newClient(ctx, t) + defer client.Close() + + type X struct { + I int + S string + T time.Time + } + + x0 := X{66, "99", time.Now().Truncate(time.Millisecond)} + k, err := client.Put(ctx, IncompleteKey("BasicsX", nil), &x0) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + x1 := X{} + err = client.Get(ctx, k, &x1) + if err != nil { + t.Errorf("client.Get: %v", err) + } + err = client.Delete(ctx, k) + if err != nil { + t.Errorf("client.Delete: %v", err) + } + if !testutil.Equal(x0, x1) { + t.Errorf("compare: x0=%v, x1=%v", x0, x1) + } +} + +func TestTopLevelKeyLoaded(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx, _ := context.WithTimeout(context.Background(), time.Second*20) + client := newClient(ctx, t) + defer client.Close() + + completeKey := NameKey("EntityWithKey", "myent", nil) + + type EntityWithKey struct { + I int + S string + K *Key `datastore:"__key__"` + } + + in := &EntityWithKey{ + I: 12, + S: "abcd", + } + + k, err := client.Put(ctx, completeKey, in) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + + var e EntityWithKey + err = client.Get(ctx, k, &e) + if err != nil { + t.Fatalf("client.Get: %v", err) + } + + // The two keys should be absolutely identical. + if !testutil.Equal(e.K, k) { + t.Fatalf("e.K not equal to k; got %#v, want %#v", e.K, k) + } + +} + +func TestListValues(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + p0 := PropertyList{ + {Name: "L", Value: []interface{}{int64(12), "string", true}}, + } + k, err := client.Put(ctx, IncompleteKey("ListValue", nil), &p0) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + var p1 PropertyList + if err := client.Get(ctx, k, &p1); err != nil { + t.Errorf("client.Get: %v", err) + } + if !testutil.Equal(p0, p1) { + t.Errorf("compare:\np0=%v\np1=%#v", p0, p1) + } + if err = client.Delete(ctx, k); err != nil { + t.Errorf("client.Delete: %v", err) + } +} + +func TestGetMulti(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type X struct { + I int + } + p := NameKey("X", "x"+suffix, nil) + + cases := []struct { + key *Key + put bool + }{ + {key: NameKey("X", "item1", p), put: true}, + {key: NameKey("X", "item2", p), put: false}, + {key: NameKey("X", "item3", p), put: false}, + {key: NameKey("X", "item3", p), put: false}, + {key: NameKey("X", "item4", p), put: true}, + } + + var src, dst []*X + var srcKeys, dstKeys []*Key + for _, c := range cases { + dst = append(dst, &X{}) + dstKeys = append(dstKeys, c.key) + if c.put { + src = append(src, &X{}) + srcKeys = append(srcKeys, c.key) + } + } + if _, err := client.PutMulti(ctx, srcKeys, src); err != nil { + t.Error(err) + } + err := client.GetMulti(ctx, dstKeys, dst) + if err == nil { + t.Errorf("client.GetMulti got %v, expected error", err) + } + e, ok := err.(MultiError) + if !ok { + t.Errorf("client.GetMulti got %T, expected MultiError", err) + } + for i, err := range e { + got, want := err, (error)(nil) + if !cases[i].put { + got, want = err, ErrNoSuchEntity + } + if got != want { + t.Errorf("MultiError[%d] == %v, want %v", i, got, want) + } + } +} + +type Z struct { + S string + T string `datastore:",noindex"` + P []byte + K []byte `datastore:",noindex"` +} + +func (z Z) String() string { + var lens []string + v := reflect.ValueOf(z) + for i := 0; i < v.NumField(); i++ { + if l := v.Field(i).Len(); l > 0 { + lens = append(lens, fmt.Sprintf("len(%s)=%d", v.Type().Field(i).Name, l)) + } + } + return fmt.Sprintf("Z{ %s }", strings.Join(lens, ",")) +} + +func TestUnindexableValues(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + x1500 := strings.Repeat("x", 1500) + x1501 := strings.Repeat("x", 1501) + testCases := []struct { + in Z + wantErr bool + }{ + {in: Z{S: x1500}, wantErr: false}, + {in: Z{S: x1501}, wantErr: true}, + {in: Z{T: x1500}, wantErr: false}, + {in: Z{T: x1501}, wantErr: false}, + {in: Z{P: []byte(x1500)}, wantErr: false}, + {in: Z{P: []byte(x1501)}, wantErr: true}, + {in: Z{K: []byte(x1500)}, wantErr: false}, + {in: Z{K: []byte(x1501)}, wantErr: false}, + } + for _, tt := range testCases { + _, err := client.Put(ctx, IncompleteKey("BasicsZ", nil), &tt.in) + if (err != nil) != tt.wantErr { + t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr) + } + } +} + +func TestNilKey(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + testCases := []struct { + in K0 + wantErr bool + }{ + {in: K0{K: testKey0}, wantErr: false}, + {in: K0{}, wantErr: false}, + } + for _, tt := range testCases { + _, err := client.Put(ctx, IncompleteKey("NilKey", nil), &tt.in) + if (err != nil) != tt.wantErr { + t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr) + } + } +} + +type SQChild struct { + I, J int + T, U int64 +} + +type SQTestCase struct { + desc string + q *Query + wantCount int + wantSum int +} + +func testSmallQueries(t *testing.T, ctx context.Context, client *Client, parent *Key, children []*SQChild, + testCases []SQTestCase, extraTests ...func()) { + keys := make([]*Key, len(children)) + for i := range keys { + keys[i] = IncompleteKey("SQChild", parent) + } + keys, err := client.PutMulti(ctx, keys, children) + if err != nil { + t.Fatalf("client.PutMulti: %v", err) + } + defer func() { + err := client.DeleteMulti(ctx, keys) + if err != nil { + t.Errorf("client.DeleteMulti: %v", err) + } + }() + + for _, tc := range testCases { + count, err := client.Count(ctx, tc.q) + if err != nil { + t.Errorf("Count %q: %v", tc.desc, err) + continue + } + if count != tc.wantCount { + t.Errorf("Count %q: got %d want %d", tc.desc, count, tc.wantCount) + continue + } + } + + for _, tc := range testCases { + var got []SQChild + _, err := client.GetAll(ctx, tc.q, &got) + if err != nil { + t.Errorf("client.GetAll %q: %v", tc.desc, err) + continue + } + sum := 0 + for _, c := range got { + sum += c.I + c.J + } + if sum != tc.wantSum { + t.Errorf("sum %q: got %d want %d", tc.desc, sum, tc.wantSum) + continue + } + } + for _, x := range extraTests { + x() + } +} + +func TestFilters(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + parent := NameKey("SQParent", "TestFilters"+suffix, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 7, T: now, U: now}, + } + baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now) + testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ + { + "I>1", + baseQuery.Filter("I>", 1), + 6, + 2 + 3 + 4 + 5 + 6 + 7, + }, + { + "I>2 AND I<=5", + baseQuery.Filter("I>", 2).Filter("I<=", 5), + 3, + 3 + 4 + 5, + }, + { + "I>=3 AND I<3", + baseQuery.Filter("I>=", 3).Filter("I<", 3), + 0, + 0, + }, + { + "I=4", + baseQuery.Filter("I=", 4), + 1, + 4, + }, + }, func() { + got := []*SQChild{} + want := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 7, T: now, U: now}, + } + _, err := client.GetAll(ctx, baseQuery.Order("I"), &got) + if err != nil { + t.Errorf("client.GetAll: %v", err) + } + if !testutil.Equal(got, want) { + t.Errorf("compare: got=%v, want=%v", got, want) + } + }, func() { + got := []*SQChild{} + want := []*SQChild{ + {I: 7, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 0, T: now, U: now}, + } + _, err := client.GetAll(ctx, baseQuery.Order("-I"), &got) + if err != nil { + t.Errorf("client.GetAll: %v", err) + } + if !testutil.Equal(got, want) { + t.Errorf("compare: got=%v, want=%v", got, want) + } + }) +} + +func TestLargeQuery(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + parent := NameKey("LQParent", "TestFilters"+suffix, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + + // Make a large number of children entities. + const n = 800 + children := make([]*SQChild, 0, n) + keys := make([]*Key, 0, n) + for i := 0; i < n; i++ { + children = append(children, &SQChild{I: i, T: now, U: now}) + keys = append(keys, IncompleteKey("SQChild", parent)) + } + + // Store using PutMulti in batches. + const batchSize = 500 + for i := 0; i < n; i = i + 500 { + j := i + batchSize + if j > n { + j = n + } + fullKeys, err := client.PutMulti(ctx, keys[i:j], children[i:j]) + if err != nil { + t.Fatalf("PutMulti(%d, %d): %v", i, j, err) + } + defer func() { + err := client.DeleteMulti(ctx, fullKeys) + if err != nil { + t.Errorf("client.DeleteMulti: %v", err) + } + }() + } + + q := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Order("I") + + // Wait group to allow us to run query tests in parallel below. + var wg sync.WaitGroup + + // Check we get the expected count and results for various limits/offsets. + queryTests := []struct { + limit, offset, want int + }{ + // Just limit. + {limit: 0, want: 0}, + {limit: 100, want: 100}, + {limit: 501, want: 501}, + {limit: n, want: n}, + {limit: n * 2, want: n}, + {limit: -1, want: n}, + // Just offset. + {limit: -1, offset: 100, want: n - 100}, + {limit: -1, offset: 500, want: n - 500}, + {limit: -1, offset: n, want: 0}, + // Limit and offset. + {limit: 100, offset: 100, want: 100}, + {limit: 1000, offset: 100, want: n - 100}, + {limit: 500, offset: 500, want: n - 500}, + } + for _, tt := range queryTests { + q := q.Limit(tt.limit).Offset(tt.offset) + wg.Add(1) + + go func(limit, offset, want int) { + defer wg.Done() + // Check Count returns the expected number of results. + count, err := client.Count(ctx, q) + if err != nil { + t.Errorf("client.Count(limit=%d offset=%d): %v", limit, offset, err) + return + } + if count != want { + t.Errorf("Count(limit=%d offset=%d) returned %d, want %d", limit, offset, count, want) + } + + var got []SQChild + _, err = client.GetAll(ctx, q, &got) + if err != nil { + t.Errorf("client.GetAll(limit=%d offset=%d): %v", limit, offset, err) + return + } + if len(got) != want { + t.Errorf("GetAll(limit=%d offset=%d) returned %d, want %d", limit, offset, len(got), want) + } + for i, child := range got { + if got, want := child.I, i+offset; got != want { + t.Errorf("GetAll(limit=%d offset=%d) got[%d].I == %d; want %d", limit, offset, i, got, want) + break + } + } + }(tt.limit, tt.offset, tt.want) + } + + // Also check iterator cursor behaviour. + cursorTests := []struct { + limit, offset int // Query limit and offset. + count int // The number of times to call "next" + want int // The I value of the desired element, -1 for "Done". + }{ + // No limits. + {count: 0, limit: -1, want: 0}, + {count: 5, limit: -1, want: 5}, + {count: 500, limit: -1, want: 500}, + {count: 1000, limit: -1, want: -1}, // No more results. + // Limits. + {count: 5, limit: 5, want: 5}, + {count: 500, limit: 5, want: 5}, + {count: 1000, limit: 1000, want: -1}, // No more results. + // Offsets. + {count: 0, offset: 5, limit: -1, want: 5}, + {count: 5, offset: 5, limit: -1, want: 10}, + {count: 200, offset: 500, limit: -1, want: 700}, + {count: 200, offset: 1000, limit: -1, want: -1}, // No more results. + } + for _, tt := range cursorTests { + wg.Add(1) + + go func(count, limit, offset, want int) { + defer wg.Done() + + // Run iterator through count calls to Next. + it := client.Run(ctx, q.Limit(limit).Offset(offset).KeysOnly()) + for i := 0; i < count; i++ { + _, err := it.Next(nil) + if err == iterator.Done { + break + } + if err != nil { + t.Errorf("count=%d, limit=%d, offset=%d: it.Next failed at i=%d", count, limit, offset, i) + return + } + } + + // Grab the cursor. + cursor, err := it.Cursor() + if err != nil { + t.Errorf("count=%d, limit=%d, offset=%d: it.Cursor: %v", count, limit, offset, err) + return + } + + // Make a request for the next element. + it = client.Run(ctx, q.Limit(1).Start(cursor)) + var entity SQChild + _, err = it.Next(&entity) + switch { + case want == -1: + if err != iterator.Done { + t.Errorf("count=%d, limit=%d, offset=%d: it.Next from cursor %v, want Done", count, limit, offset, err) + } + case err != nil: + t.Errorf("count=%d, limit=%d, offset=%d: it.Next from cursor: %v, want nil", count, limit, offset, err) + case entity.I != want: + t.Errorf("count=%d, limit=%d, offset=%d: got.I = %d, want %d", count, limit, offset, entity.I, want) + } + }(tt.count, tt.limit, tt.offset, tt.want) + } + + wg.Wait() +} + +func TestEventualConsistency(t *testing.T) { + // TODO(jba): either make this actually test eventual consistency, or + // delete it. Currently it behaves the same with or without the + // EventualConsistency call. + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + parent := NameKey("SQParent", "TestEventualConsistency"+suffix, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + } + query := NewQuery("SQChild").Ancestor(parent).Filter("T =", now).EventualConsistency() + testSmallQueries(t, ctx, client, parent, children, nil, func() { + got, err := client.Count(ctx, query) + if err != nil { + t.Fatalf("Count: %v", err) + } + if got < 0 || 3 < got { + t.Errorf("Count: got %d, want [0,3]", got) + } + }) +} + +func TestProjection(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + parent := NameKey("SQParent", "TestProjection"+suffix, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 1 << 0, J: 100, T: now, U: now}, + {I: 1 << 1, J: 100, T: now, U: now}, + {I: 1 << 2, J: 200, T: now, U: now}, + {I: 1 << 3, J: 300, T: now, U: now}, + {I: 1 << 4, J: 300, T: now, U: now}, + } + baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Filter("J>", 150) + testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ + { + "project", + baseQuery.Project("J"), + 3, + 200 + 300 + 300, + }, + { + "distinct", + baseQuery.Project("J").Distinct(), + 2, + 200 + 300, + }, + { + "distinct on", + baseQuery.Project("J").DistinctOn("J"), + 2, + 200 + 300, + }, + { + "project on meaningful (GD_WHEN) field", + baseQuery.Project("U"), + 3, + 0, + }, + }) +} + +func TestAllocateIDs(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + keys := make([]*Key, 5) + for i := range keys { + keys[i] = IncompleteKey("AllocID", nil) + } + keys, err := client.AllocateIDs(ctx, keys) + if err != nil { + t.Errorf("AllocID #0 failed: %v", err) + } + if want := len(keys); want != 5 { + t.Errorf("Expected to allocate 5 keys, %d keys are found", want) + } + for _, k := range keys { + if k.Incomplete() { + t.Errorf("Unexpeceted incomplete key found: %v", k) + } + } +} + +func TestGetAllWithFieldMismatch(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type Fat struct { + X, Y int + } + type Thin struct { + X int + } + + // Ancestor queries (those within an entity group) are strongly consistent + // by default, which prevents a test from being flaky. + // See https://cloud.google.com/appengine/docs/go/datastore/queries#Go_Data_consistency + // for more information. + parent := NameKey("SQParent", "TestGetAllWithFieldMismatch"+suffix, nil) + putKeys := make([]*Key, 3) + for i := range putKeys { + putKeys[i] = IDKey("GetAllThing", int64(10+i), parent) + _, err := client.Put(ctx, putKeys[i], &Fat{X: 20 + i, Y: 30 + i}) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + } + + var got []Thin + want := []Thin{ + {X: 20}, + {X: 21}, + {X: 22}, + } + getKeys, err := client.GetAll(ctx, NewQuery("GetAllThing").Ancestor(parent), &got) + if len(getKeys) != 3 && !testutil.Equal(getKeys, putKeys) { + t.Errorf("client.GetAll: keys differ\ngetKeys=%v\nputKeys=%v", getKeys, putKeys) + } + if !testutil.Equal(got, want) { + t.Errorf("client.GetAll: entities differ\ngot =%v\nwant=%v", got, want) + } + if _, ok := err.(*ErrFieldMismatch); !ok { + t.Errorf("client.GetAll: got err=%v, want ErrFieldMismatch", err) + } +} + +func TestKindlessQueries(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type Dee struct { + I int + Why string + } + type Dum struct { + I int + Pling string + } + + parent := NameKey("Tweedle", "tweedle"+suffix, nil) + + keys := []*Key{ + NameKey("Dee", "dee0", parent), + NameKey("Dum", "dum1", parent), + NameKey("Dum", "dum2", parent), + NameKey("Dum", "dum3", parent), + } + src := []interface{}{ + &Dee{1, "binary0001"}, + &Dum{2, "binary0010"}, + &Dum{4, "binary0100"}, + &Dum{8, "binary1000"}, + } + keys, err := client.PutMulti(ctx, keys, src) + if err != nil { + t.Fatalf("put: %v", err) + } + + testCases := []struct { + desc string + query *Query + want []int + wantErr string + }{ + { + desc: "Dee", + query: NewQuery("Dee"), + want: []int{1}, + }, + { + desc: "Doh", + query: NewQuery("Doh"), + want: nil}, + { + desc: "Dum", + query: NewQuery("Dum"), + want: []int{2, 4, 8}, + }, + { + desc: "", + query: NewQuery(""), + want: []int{1, 2, 4, 8}, + }, + { + desc: "Kindless filter", + query: NewQuery("").Filter("__key__ =", keys[2]), + want: []int{4}, + }, + { + desc: "Kindless order", + query: NewQuery("").Order("__key__"), + want: []int{1, 2, 4, 8}, + }, + { + desc: "Kindless bad filter", + query: NewQuery("").Filter("I =", 4), + wantErr: "kind is required", + }, + { + desc: "Kindless bad order", + query: NewQuery("").Order("-__key__"), + wantErr: "kind is required for all orders except __key__ ascending", + }, + } +loop: + for _, tc := range testCases { + q := tc.query.Ancestor(parent) + gotCount, err := client.Count(ctx, q) + if err != nil { + if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("count %q: err %v, want err %q", tc.desc, err, tc.wantErr) + } + continue + } + if tc.wantErr != "" { + t.Errorf("count %q: want err %q", tc.desc, tc.wantErr) + continue + } + if gotCount != len(tc.want) { + t.Errorf("count %q: got %d want %d", tc.desc, gotCount, len(tc.want)) + continue + } + var got []int + for iter := client.Run(ctx, q); ; { + var dst struct { + I int + Why, Pling string + } + _, err := iter.Next(&dst) + if err == iterator.Done { + break + } + if err != nil { + t.Errorf("iter.Next %q: %v", tc.desc, err) + continue loop + } + got = append(got, dst.I) + } + sort.Ints(got) + if !testutil.Equal(got, tc.want) { + t.Errorf("elems %q: got %+v want %+v", tc.desc, got, tc.want) + continue + } + } +} + +func TestTransaction(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type Counter struct { + N int + T time.Time + } + + bangErr := errors.New("bang") + tests := []struct { + desc string + causeConflict []bool + retErr []error + want int + wantErr error + }{ + { + desc: "3 attempts, no conflicts", + causeConflict: []bool{false}, + retErr: []error{nil}, + want: 11, + }, + { + desc: "1 attempt, user error", + causeConflict: []bool{false}, + retErr: []error{bangErr}, + wantErr: bangErr, + }, + { + desc: "2 attempts, 1 conflict", + causeConflict: []bool{true, false}, + retErr: []error{nil, nil}, + want: 13, // Each conflict increments by 2. + }, + { + desc: "3 attempts, 3 conflicts", + causeConflict: []bool{true, true, true}, + retErr: []error{nil, nil, nil}, + wantErr: ErrConcurrentTransaction, + }, + } + + for i, tt := range tests { + // Put a new counter. + c := &Counter{N: 10, T: time.Now()} + key, err := client.Put(ctx, IncompleteKey("TransCounter", nil), c) + if err != nil { + t.Errorf("%s: client.Put: %v", tt.desc, err) + continue + } + defer client.Delete(ctx, key) + + // Increment the counter in a transaction. + // The test case can manually cause a conflict or return an + // error at each attempt. + var attempts int + _, err = client.RunInTransaction(ctx, func(tx *Transaction) error { + attempts++ + if attempts > len(tt.causeConflict) { + return fmt.Errorf("too many attempts. Got %d, max %d", attempts, len(tt.causeConflict)) + } + + var c Counter + if err := tx.Get(key, &c); err != nil { + return err + } + c.N++ + if _, err := tx.Put(key, &c); err != nil { + return err + } + + if tt.causeConflict[attempts-1] { + c.N += 1 + if _, err := client.Put(ctx, key, &c); err != nil { + return err + } + } + + return tt.retErr[attempts-1] + }, MaxAttempts(i)) + + // Check the error returned by RunInTransaction. + if err != tt.wantErr { + t.Errorf("%s: got err %v, want %v", tt.desc, err, tt.wantErr) + continue + } + if err != nil { + continue + } + + // Check the final value of the counter. + if err := client.Get(ctx, key, c); err != nil { + t.Errorf("%s: client.Get: %v", tt.desc, err) + continue + } + if c.N != tt.want { + t.Errorf("%s: counter N=%d, want N=%d", tt.desc, c.N, tt.want) + } + } +} + +func TestNilPointers(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type X struct { + S string + } + + src := []*X{{"zero"}, {"one"}} + keys := []*Key{IncompleteKey("NilX", nil), IncompleteKey("NilX", nil)} + keys, err := client.PutMulti(ctx, keys, src) + if err != nil { + t.Fatalf("PutMulti: %v", err) + } + + // It's okay to store into a slice of nil *X. + xs := make([]*X, 2) + if err := client.GetMulti(ctx, keys, xs); err != nil { + t.Errorf("GetMulti: %v", err) + } else if !testutil.Equal(xs, src) { + t.Errorf("GetMulti fetched %v, want %v", xs, src) + } + + // It isn't okay to store into a single nil *X. + var x0 *X + if err, want := client.Get(ctx, keys[0], x0), ErrInvalidEntityType; err != want { + t.Errorf("Get: err %v; want %v", err, want) + } + + // Test that deleting with duplicate keys work. + keys = append(keys, keys...) + if err := client.DeleteMulti(ctx, keys); err != nil { + t.Errorf("Delete: %v", err) + } +} + +func TestNestedRepeatedElementNoIndex(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type Inner struct { + Name string + Value string `datastore:",noindex"` + } + type Outer struct { + Config []Inner + } + m := &Outer{ + Config: []Inner{ + {Name: "short", Value: "a"}, + {Name: "long", Value: strings.Repeat("a", 2000)}, + }, + } + + key := NameKey("Nested", "Nested"+suffix, nil) + if _, err := client.Put(ctx, key, m); err != nil { + t.Fatalf("client.Put: %v", err) + } + if err := client.Delete(ctx, key); err != nil { + t.Fatalf("client.Delete: %v", err) + } +} diff --git a/vendor/cloud.google.com/go/datastore/key.go b/vendor/cloud.google.com/go/datastore/key.go new file mode 100644 index 0000000..b9f2cf5 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/key.go @@ -0,0 +1,280 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "bytes" + "encoding/base64" + "encoding/gob" + "errors" + "strconv" + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +// Key represents the datastore key for a stored entity. +type Key struct { + // Kind cannot be empty. + Kind string + // Either ID or Name must be zero for the Key to be valid. + // If both are zero, the Key is incomplete. + ID int64 + Name string + // Parent must either be a complete Key or nil. + Parent *Key + + // Namespace provides the ability to partition your data for multiple + // tenants. In most cases, it is not necessary to specify a namespace. + // See docs on datastore multitenancy for details: + // https://cloud.google.com/datastore/docs/concepts/multitenancy + Namespace string +} + +// Incomplete reports whether the key does not refer to a stored entity. +func (k *Key) Incomplete() bool { + return k.Name == "" && k.ID == 0 +} + +// valid returns whether the key is valid. +func (k *Key) valid() bool { + if k == nil { + return false + } + for ; k != nil; k = k.Parent { + if k.Kind == "" { + return false + } + if k.Name != "" && k.ID != 0 { + return false + } + if k.Parent != nil { + if k.Parent.Incomplete() { + return false + } + if k.Parent.Namespace != k.Namespace { + return false + } + } + } + return true +} + +// Equal reports whether two keys are equal. Two keys are equal if they are +// both nil, or if their kinds, IDs, names, namespaces and parents are equal. +func (k *Key) Equal(o *Key) bool { + for { + if k == nil || o == nil { + return k == o // if either is nil, both must be nil + } + if k.Namespace != o.Namespace || k.Name != o.Name || k.ID != o.ID || k.Kind != o.Kind { + return false + } + if k.Parent == nil && o.Parent == nil { + return true + } + k = k.Parent + o = o.Parent + } +} + +// marshal marshals the key's string representation to the buffer. +func (k *Key) marshal(b *bytes.Buffer) { + if k.Parent != nil { + k.Parent.marshal(b) + } + b.WriteByte('/') + b.WriteString(k.Kind) + b.WriteByte(',') + if k.Name != "" { + b.WriteString(k.Name) + } else { + b.WriteString(strconv.FormatInt(k.ID, 10)) + } +} + +// String returns a string representation of the key. +func (k *Key) String() string { + if k == nil { + return "" + } + b := bytes.NewBuffer(make([]byte, 0, 512)) + k.marshal(b) + return b.String() +} + +// Note: Fields not renamed compared to appengine gobKey struct +// This ensures gobs created by appengine can be read here, and vice/versa +type gobKey struct { + Kind string + StringID string + IntID int64 + Parent *gobKey + AppID string + Namespace string +} + +func keyToGobKey(k *Key) *gobKey { + if k == nil { + return nil + } + return &gobKey{ + Kind: k.Kind, + StringID: k.Name, + IntID: k.ID, + Parent: keyToGobKey(k.Parent), + Namespace: k.Namespace, + } +} + +func gobKeyToKey(gk *gobKey) *Key { + if gk == nil { + return nil + } + return &Key{ + Kind: gk.Kind, + Name: gk.StringID, + ID: gk.IntID, + Parent: gobKeyToKey(gk.Parent), + Namespace: gk.Namespace, + } +} + +// GobEncode marshals the key into a sequence of bytes +// using an encoding/gob.Encoder. +func (k *Key) GobEncode() ([]byte, error) { + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// GobDecode unmarshals a sequence of bytes using an encoding/gob.Decoder. +func (k *Key) GobDecode(buf []byte) error { + gk := new(gobKey) + if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { + return err + } + *k = *gobKeyToKey(gk) + return nil +} + +// MarshalJSON marshals the key into JSON. +func (k *Key) MarshalJSON() ([]byte, error) { + return []byte(`"` + k.Encode() + `"`), nil +} + +// UnmarshalJSON unmarshals a key JSON object into a Key. +func (k *Key) UnmarshalJSON(buf []byte) error { + if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { + return errors.New("datastore: bad JSON key") + } + k2, err := DecodeKey(string(buf[1 : len(buf)-1])) + if err != nil { + return err + } + *k = *k2 + return nil +} + +// Encode returns an opaque representation of the key +// suitable for use in HTML and URLs. +// This is compatible with the Python and Java runtimes. +func (k *Key) Encode() string { + pKey := keyToProto(k) + + b, err := proto.Marshal(pKey) + if err != nil { + panic(err) + } + + // Trailing padding is stripped. + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// DecodeKey decodes a key from the opaque representation returned by Encode. +func DecodeKey(encoded string) (*Key, error) { + // Re-add padding. + if m := len(encoded) % 4; m != 0 { + encoded += strings.Repeat("=", 4-m) + } + + b, err := base64.URLEncoding.DecodeString(encoded) + if err != nil { + return nil, err + } + + pKey := new(pb.Key) + if err := proto.Unmarshal(b, pKey); err != nil { + return nil, err + } + return protoToKey(pKey) +} + +// AllocateIDs accepts a slice of incomplete keys and returns a +// slice of complete keys that are guaranteed to be valid in the datastore. +func (c *Client) AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) { + if keys == nil { + return nil, nil + } + + req := &pb.AllocateIdsRequest{ + ProjectId: c.dataset, + Keys: multiKeyToProto(keys), + } + resp, err := c.client.AllocateIds(ctx, req) + if err != nil { + return nil, err + } + + return multiProtoToKey(resp.Keys) +} + +// IncompleteKey creates a new incomplete key. +// The supplied kind cannot be empty. +// The namespace of the new key is empty. +func IncompleteKey(kind string, parent *Key) *Key { + return &Key{ + Kind: kind, + Parent: parent, + } +} + +// NameKey creates a new key with a name. +// The supplied kind cannot be empty. +// The supplied parent must either be a complete key or nil. +// The namespace of the new key is empty. +func NameKey(kind, name string, parent *Key) *Key { + return &Key{ + Kind: kind, + Name: name, + Parent: parent, + } +} + +// IDKey creates a new key with an ID. +// The supplied kind cannot be empty. +// The supplied parent must either be a complete key or nil. +// The namespace of the new key is empty. +func IDKey(kind string, id int64, parent *Key) *Key { + return &Key{ + Kind: kind, + ID: id, + Parent: parent, + } +} diff --git a/vendor/cloud.google.com/go/datastore/key_test.go b/vendor/cloud.google.com/go/datastore/key_test.go new file mode 100644 index 0000000..5f2ddcb --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/key_test.go @@ -0,0 +1,210 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "testing" +) + +func TestEqual(t *testing.T) { + testCases := []struct { + x, y *Key + equal bool + }{ + { + x: nil, + y: nil, + equal: true, + }, + { + x: &Key{Kind: "kindA"}, + y: &Key{Kind: "kindA"}, + equal: true, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindA", Name: "nameA"}, + equal: true, + }, + { + x: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"}, + y: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"}, + equal: true, + }, + { + x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, + y: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, + equal: true, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindB", Name: "nameA"}, + equal: false, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindA", Name: "nameB"}, + equal: false, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindA", ID: 1337}, + equal: false, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"}, + equal: false, + }, + { + x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, + y: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindY", Name: "nameX"}}, + equal: false, + }, + { + x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, + y: &Key{Kind: "kindA", ID: 1337}, + equal: false, + }, + } + + for _, tt := range testCases { + if got := tt.x.Equal(tt.y); got != tt.equal { + t.Errorf("Equal(%v, %v) = %t; want %t", tt.x, tt.y, got, tt.equal) + } + if got := tt.y.Equal(tt.x); got != tt.equal { + t.Errorf("Equal(%v, %v) = %t; want %t", tt.y, tt.x, got, tt.equal) + } + } +} + +func TestEncoding(t *testing.T) { + testCases := []struct { + k *Key + valid bool + }{ + { + k: nil, + valid: false, + }, + { + k: &Key{}, + valid: false, + }, + { + k: &Key{Kind: "kindA"}, + valid: true, + }, + { + k: &Key{Kind: "kindA", Namespace: "gopherspace"}, + valid: true, + }, + { + k: &Key{Kind: "kindA", Name: "nameA"}, + valid: true, + }, + { + k: &Key{Kind: "kindA", ID: 1337}, + valid: true, + }, + { + k: &Key{Kind: "kindA", Name: "nameA", ID: 1337}, + valid: false, + }, + { + k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB", Name: "nameB"}}, + valid: true, + }, + { + k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB"}}, + valid: false, + }, + { + k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB", Name: "nameB", Namespace: "gopherspace"}}, + valid: false, + }, + } + + for _, tt := range testCases { + if got := tt.k.valid(); got != tt.valid { + t.Errorf("valid(%v) = %t; want %t", tt.k, got, tt.valid) + } + + // Check encoding/decoding for valid keys. + if !tt.valid { + continue + } + enc := tt.k.Encode() + dec, err := DecodeKey(enc) + if err != nil { + t.Errorf("DecodeKey(%q) from %v: %v", enc, tt.k, err) + continue + } + if !tt.k.Equal(dec) { + t.Logf("Proto: %s", keyToProto(tt.k)) + t.Errorf("Decoded key %v not equal to %v", dec, tt.k) + } + + b, err := json.Marshal(tt.k) + if err != nil { + t.Errorf("json.Marshal(%v): %v", tt.k, err) + continue + } + key := &Key{} + if err := json.Unmarshal(b, key); err != nil { + t.Errorf("json.Unmarshal(%s) for key %v: %v", b, tt.k, err) + continue + } + if !tt.k.Equal(key) { + t.Errorf("JSON decoded key %v not equal to %v", dec, tt.k) + } + + buf := &bytes.Buffer{} + gobEnc := gob.NewEncoder(buf) + if err := gobEnc.Encode(tt.k); err != nil { + t.Errorf("gobEnc.Encode(%v): %v", tt.k, err) + continue + } + gobDec := gob.NewDecoder(buf) + key = &Key{} + if err := gobDec.Decode(key); err != nil { + t.Errorf("gobDec.Decode() for key %v: %v", tt.k, err) + } + if !tt.k.Equal(key) { + t.Errorf("gob decoded key %v not equal to %v", dec, tt.k) + } + } +} + +func TestInvalidKeyDecode(t *testing.T) { + // Check that decoding an invalid key returns an err and doesn't panic. + enc := NameKey("Kind", "Foo", nil).Encode() + + invalid := []string{ + "", + "Laboratorio", + enc + "Junk", + enc[:len(enc)-4], + } + for _, enc := range invalid { + key, err := DecodeKey(enc) + if err == nil || key != nil { + t.Errorf("DecodeKey(%q) = %v, %v; want nil, error", enc, key, err) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/load.go b/vendor/cloud.google.com/go/datastore/load.go new file mode 100644 index 0000000..03bde81 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/load.go @@ -0,0 +1,491 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + "reflect" + "strings" + "time" + + "cloud.google.com/go/internal/fields" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +var ( + typeOfByteSlice = reflect.TypeOf([]byte(nil)) + typeOfTime = reflect.TypeOf(time.Time{}) + typeOfGeoPoint = reflect.TypeOf(GeoPoint{}) + typeOfKeyPtr = reflect.TypeOf(&Key{}) + typeOfEntityPtr = reflect.TypeOf(&Entity{}) +) + +// typeMismatchReason returns a string explaining why the property p could not +// be stored in an entity field of type v.Type(). +func typeMismatchReason(p Property, v reflect.Value) string { + entityType := "empty" + switch p.Value.(type) { + case int64: + entityType = "int" + case bool: + entityType = "bool" + case string: + entityType = "string" + case float64: + entityType = "float" + case *Key: + entityType = "*datastore.Key" + case *Entity: + entityType = "*datastore.Entity" + case GeoPoint: + entityType = "GeoPoint" + case time.Time: + entityType = "time.Time" + case []byte: + entityType = "[]byte" + } + + return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) +} + +type propertyLoader struct { + // m holds the number of times a substruct field like "Foo.Bar.Baz" has + // been seen so far. The map is constructed lazily. + m map[string]int +} + +func (l *propertyLoader) load(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string { + sl, ok := p.Value.([]interface{}) + if !ok { + return l.loadOneElement(codec, structValue, p, prev) + } + for _, val := range sl { + p.Value = val + if errStr := l.loadOneElement(codec, structValue, p, prev); errStr != "" { + return errStr + } + } + return "" +} + +// loadOneElement loads the value of Property p into structValue based on the provided +// codec. codec is used to find the field in structValue into which p should be loaded. +// prev is the set of property names already seen for structValue. +func (l *propertyLoader) loadOneElement(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string { + var sliceOk bool + var sliceIndex int + var v reflect.Value + + name := p.Name + fieldNames := strings.Split(name, ".") + + for len(fieldNames) > 0 { + var field *fields.Field + + // Start by trying to find a field with name. If none found, + // cut off the last field (delimited by ".") and find its parent + // in the codec. + // eg. for name "A.B.C.D", split off "A.B.C" and try to + // find a field in the codec with this name. + // Loop again with "A.B", etc. + for i := len(fieldNames); i > 0; i-- { + parent := strings.Join(fieldNames[:i], ".") + field = codec.Match(parent) + if field != nil { + fieldNames = fieldNames[i:] + break + } + } + + // If we never found a matching field in the codec, return + // error message. + if field == nil { + return "no such struct field" + } + + v = initField(structValue, field.Index) + if !v.IsValid() { + return "no such struct field" + } + if !v.CanSet() { + return "cannot set struct field" + } + + // If field implements PLS, we delegate loading to the PLS's Load early, + // and stop iterating through fields. + ok, err := plsFieldLoad(v, p, fieldNames) + if err != nil { + return err.Error() + } + if ok { + return "" + } + + if field.Type.Kind() == reflect.Struct { + codec, err = structCache.Fields(field.Type) + if err != nil { + return err.Error() + } + structValue = v + } + + // If the element is a slice, we need to accommodate it. + if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice { + if l.m == nil { + l.m = make(map[string]int) + } + sliceIndex = l.m[p.Name] + l.m[p.Name] = sliceIndex + 1 + for v.Len() <= sliceIndex { + v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) + } + structValue = v.Index(sliceIndex) + + // If structValue implements PLS, we delegate loading to the PLS's + // Load early, and stop iterating through fields. + ok, err := plsFieldLoad(structValue, p, fieldNames) + if err != nil { + return err.Error() + } + if ok { + return "" + } + + if structValue.Type().Kind() == reflect.Struct { + codec, err = structCache.Fields(structValue.Type()) + if err != nil { + return err.Error() + } + } + sliceOk = true + } + } + + var slice reflect.Value + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + slice = v + v = reflect.New(v.Type().Elem()).Elem() + } else if _, ok := prev[p.Name]; ok && !sliceOk { + // Zero the field back out that was set previously, turns out + // it's a slice and we don't know what to do with it + v.Set(reflect.Zero(v.Type())) + return "multiple-valued property requires a slice field type" + } + + prev[p.Name] = struct{}{} + + if errReason := setVal(v, p); errReason != "" { + // Set the slice back to its zero value. + if slice.IsValid() { + slice.Set(reflect.Zero(slice.Type())) + } + return errReason + } + + if slice.IsValid() { + slice.Index(sliceIndex).Set(v) + } + + return "" +} + +// plsFieldLoad first tries to converts v's value to a PLS, then v's addressed +// value to a PLS. If neither succeeds, plsFieldLoad returns false for first return +// value. Otherwise, the first return value will be true. +// If v is successfully converted to a PLS, plsFieldLoad will then try to Load +// the property p into v (by way of the PLS's Load method). +// +// If the field v has been flattened, the Property's name must be altered +// before calling Load to reflect the field v. +// For example, if our original field name was "A.B.C.D", +// and at this point in iteration we had initialized the field +// corresponding to "A" and have moved into the struct, so that now +// v corresponds to the field named "B", then we want to let the +// PLS handle this field (B)'s subfields ("C", "D"), +// so we send the property to the PLS's Load, renamed to "C.D". +// +// If subfields are present, the field v has been flattened. +func plsFieldLoad(v reflect.Value, p Property, subfields []string) (ok bool, err error) { + vpls, err := plsForLoad(v) + if err != nil { + return false, err + } + + if vpls == nil { + return false, nil + } + + // If Entity, load properties as well as key. + if e, ok := p.Value.(*Entity); ok { + err = loadEntity(vpls, e) + return true, err + } + + // If flattened, we must alter the property's name to reflect + // the field v. + if len(subfields) > 0 { + p.Name = strings.Join(subfields, ".") + } + + return true, vpls.Load([]Property{p}) +} + +// setVal sets 'v' to the value of the Property 'p'. +func setVal(v reflect.Value, p Property) string { + pValue := p.Value + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x, ok := pValue.(int64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowInt(x) { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + } + v.SetInt(x) + case reflect.Bool: + x, ok := pValue.(bool) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.SetBool(x) + case reflect.String: + x, ok := pValue.(string) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.SetString(x) + case reflect.Float32, reflect.Float64: + x, ok := pValue.(float64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowFloat(x) { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + } + v.SetFloat(x) + case reflect.Ptr: + // v must be either a pointer to a Key or Entity. + if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct { + return typeMismatchReason(p, v) + } + + if pValue == nil { + // If v is populated already, set it to nil. + if !v.IsNil() { + v.Set(reflect.New(v.Type()).Elem()) + } + return "" + } + + switch x := pValue.(type) { + case *Key: + if _, ok := v.Interface().(*Key); !ok { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + case *Entity: + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + err := loadEntity(v.Interface(), x) + if err != nil { + return err.Error() + } + + default: + return typeMismatchReason(p, v) + } + case reflect.Struct: + switch v.Type() { + case typeOfTime: + x, ok := pValue.(time.Time) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + case typeOfGeoPoint: + x, ok := pValue.(GeoPoint) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + default: + ent, ok := pValue.(*Entity) + if !ok { + return typeMismatchReason(p, v) + } + err := loadEntity(v.Addr().Interface(), ent) + if err != nil { + return err.Error() + } + } + case reflect.Slice: + x, ok := pValue.([]byte) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.Type().Elem().Kind() != reflect.Uint8 { + return typeMismatchReason(p, v) + } + v.SetBytes(x) + default: + return typeMismatchReason(p, v) + } + return "" +} + +// initField is similar to reflect's Value.FieldByIndex, in that it +// returns the nested struct field corresponding to index, but it +// initialises any nil pointers encountered when traversing the structure. +func initField(val reflect.Value, index []int) reflect.Value { + for _, i := range index[:len(index)-1] { + val = val.Field(i) + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + } + return val.Field(index[len(index)-1]) +} + +// loadEntityProto loads an EntityProto into PropertyLoadSaver or struct pointer. +func loadEntityProto(dst interface{}, src *pb.Entity) error { + ent, err := protoToEntity(src) + if err != nil { + return err + } + return loadEntity(dst, ent) +} + +func loadEntity(dst interface{}, ent *Entity) error { + if pls, ok := dst.(PropertyLoadSaver); ok { + err := pls.Load(ent.Properties) + if err != nil { + return err + } + if e, ok := dst.(KeyLoader); ok { + err = e.LoadKey(ent.Key) + } + return err + } + return loadEntityToStruct(dst, ent) +} + +func loadEntityToStruct(dst interface{}, ent *Entity) error { + pls, err := newStructPLS(dst) + if err != nil { + return err + } + // Load properties. + err = pls.Load(ent.Properties) + if err != nil { + return err + } + // Load key. + keyField := pls.codec.Match(keyFieldName) + if keyField != nil && ent.Key != nil { + pls.v.FieldByIndex(keyField.Index).Set(reflect.ValueOf(ent.Key)) + } + + return nil +} + +func (s structPLS) Load(props []Property) error { + var fieldName, errReason string + var l propertyLoader + + prev := make(map[string]struct{}) + for _, p := range props { + if errStr := l.load(s.codec, s.v, p, prev); errStr != "" { + // We don't return early, as we try to load as many properties as possible. + // It is valid to load an entity into a struct that cannot fully represent it. + // That case returns an error, but the caller is free to ignore it. + fieldName, errReason = p.Name, errStr + } + } + if errReason != "" { + return &ErrFieldMismatch{ + StructType: s.v.Type(), + FieldName: fieldName, + Reason: errReason, + } + } + return nil +} + +func protoToEntity(src *pb.Entity) (*Entity, error) { + props := make([]Property, 0, len(src.Properties)) + for name, val := range src.Properties { + v, err := propToValue(val) + if err != nil { + return nil, err + } + props = append(props, Property{ + Name: name, + Value: v, + NoIndex: val.ExcludeFromIndexes, + }) + } + var key *Key + if src.Key != nil { + // Ignore any error, since nested entity values + // are allowed to have an invalid key. + key, _ = protoToKey(src.Key) + } + + return &Entity{key, props}, nil +} + +// propToValue returns a Go value that represents the PropertyValue. For +// example, a TimestampValue becomes a time.Time. +func propToValue(v *pb.Value) (interface{}, error) { + switch v := v.ValueType.(type) { + case *pb.Value_NullValue: + return nil, nil + case *pb.Value_BooleanValue: + return v.BooleanValue, nil + case *pb.Value_IntegerValue: + return v.IntegerValue, nil + case *pb.Value_DoubleValue: + return v.DoubleValue, nil + case *pb.Value_TimestampValue: + return time.Unix(v.TimestampValue.Seconds, int64(v.TimestampValue.Nanos)), nil + case *pb.Value_KeyValue: + return protoToKey(v.KeyValue) + case *pb.Value_StringValue: + return v.StringValue, nil + case *pb.Value_BlobValue: + return []byte(v.BlobValue), nil + case *pb.Value_GeoPointValue: + return GeoPoint{Lat: v.GeoPointValue.Latitude, Lng: v.GeoPointValue.Longitude}, nil + case *pb.Value_EntityValue: + return protoToEntity(v.EntityValue) + case *pb.Value_ArrayValue: + arr := make([]interface{}, 0, len(v.ArrayValue.Values)) + for _, v := range v.ArrayValue.Values { + vv, err := propToValue(v) + if err != nil { + return nil, err + } + arr = append(arr, vv) + } + return arr, nil + default: + return nil, nil + } +} diff --git a/vendor/cloud.google.com/go/datastore/load_test.go b/vendor/cloud.google.com/go/datastore/load_test.go new file mode 100644 index 0000000..0bbabb5 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/load_test.go @@ -0,0 +1,757 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "reflect" + "testing" + + "cloud.google.com/go/internal/testutil" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +type Simple struct { + I int64 +} + +type SimpleWithTag struct { + I int64 `datastore:"II"` +} + +type NestedSimpleWithTag struct { + A SimpleWithTag `datastore:"AA"` +} + +type NestedSliceOfSimple struct { + A []Simple +} + +type SimpleTwoFields struct { + S string + SS string +} + +type NestedSimpleAnonymous struct { + Simple + X string +} + +type NestedSimple struct { + A Simple + I int +} + +type NestedSimple1 struct { + A Simple + X string +} + +type NestedSimple2X struct { + AA NestedSimple + A SimpleTwoFields + S string +} + +type BDotB struct { + B string `datastore:"B.B"` +} + +type ABDotB struct { + A BDotB +} + +type MultiAnonymous struct { + Simple + SimpleTwoFields + X string +} + +func TestLoadEntityNestedLegacy(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + want interface{} + }{ + { + desc: "nested", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "A.I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + want: &NestedSimple1{ + A: Simple{I: 2}, + X: "two", + }, + }, + { + desc: "nested with tag", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "AA.II": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + want: &NestedSimpleWithTag{ + A: SimpleWithTag{I: 2}, + }, + }, + { + desc: "nested with anonymous struct field", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + want: &NestedSimpleAnonymous{ + Simple: Simple{I: 2}, + X: "two", + }, + }, + { + desc: "nested with dotted field tag", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "A.B.B": {ValueType: &pb.Value_StringValue{StringValue: "bb"}}, + }, + }, + want: &ABDotB{ + A: BDotB{ + B: "bb", + }, + }, + }, + { + desc: "nested with multiple anonymous fields", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + "S": {ValueType: &pb.Value_StringValue{StringValue: "S"}}, + "SS": {ValueType: &pb.Value_StringValue{StringValue: "s"}}, + "X": {ValueType: &pb.Value_StringValue{StringValue: "s"}}, + }, + }, + want: &MultiAnonymous{ + Simple: Simple{I: 3}, + SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"}, + X: "s", + }, + }, + } + + for _, tc := range testCases { + dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + err := loadEntityProto(dst, tc.src) + if err != nil { + t.Errorf("loadEntityProto: %s: %v", tc.desc, err) + continue + } + + if !testutil.Equal(tc.want, dst) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want) + } + } +} + +type WithKey struct { + X string + I int + K *Key `datastore:"__key__"` +} + +type NestedWithKey struct { + Y string + N WithKey +} + +var ( + incompleteKey = newKey("", nil) + invalidKey = newKey("s", incompleteKey) +) + +func TestLoadEntityNested(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + want interface{} + }{ + { + desc: "nested basic", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + }, + }, + }}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, + }, + }, + want: &NestedSimple{ + A: Simple{I: 3}, + I: 10, + }, + }, + { + desc: "nested with struct tags", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "AA": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "II": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, + }, + }, + }}, + }, + }, + want: &NestedSimpleWithTag{ + A: SimpleWithTag{I: 1}, + }, + }, + { + desc: "nested 2x", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "AA": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + }, + }, + }}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, + }, + }, + }}, + "A": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "S": {ValueType: &pb.Value_StringValue{StringValue: "S"}}, + "SS": {ValueType: &pb.Value_StringValue{StringValue: "s"}}, + }, + }, + }}, + "S": {ValueType: &pb.Value_StringValue{StringValue: "SS"}}, + }, + }, + want: &NestedSimple2X{ + AA: NestedSimple{ + A: Simple{I: 3}, + I: 1, + }, + A: SimpleTwoFields{S: "S", SS: "s"}, + S: "SS", + }, + }, + { + desc: "nested anonymous", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + "X": {ValueType: &pb.Value_StringValue{StringValue: "SomeX"}}, + }, + }, + want: &NestedSimpleAnonymous{ + Simple: Simple{I: 3}, + X: "SomeX", + }, + }, + { + desc: "nested simple with slice", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_ArrayValue{ + ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + }, + }, + }}, + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 4}}, + }, + }, + }}, + }, + }, + }}, + }, + }, + + want: &NestedSliceOfSimple{ + A: []Simple{Simple{I: 3}, Simple{I: 4}}, + }, + }, + { + desc: "nested with multiple anonymous fields", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + "S": {ValueType: &pb.Value_StringValue{StringValue: "S"}}, + "SS": {ValueType: &pb.Value_StringValue{StringValue: "s"}}, + "X": {ValueType: &pb.Value_StringValue{StringValue: "ss"}}, + }, + }, + want: &MultiAnonymous{ + Simple: Simple{I: 3}, + SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"}, + X: "ss", + }, + }, + { + desc: "nested with dotted field tag", + src: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "B.B": {ValueType: &pb.Value_StringValue{StringValue: "bb"}}, + }, + }, + }}, + }, + }, + want: &ABDotB{ + A: BDotB{ + B: "bb", + }, + }, + }, + { + desc: "nested entity with key", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Key: keyToProto(testKey1a), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + }, + }, + want: &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: testKey1a, + }, + }, + }, + { + desc: "nested entity with invalid key", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Key: keyToProto(invalidKey), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + }, + }, + want: &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: invalidKey, + }, + }, + }, + } + + for _, tc := range testCases { + dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + err := loadEntityProto(dst, tc.src) + if err != nil { + t.Errorf("loadEntityProto: %s: %v", tc.desc, err) + continue + } + + if !testutil.Equal(tc.want, dst) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want) + } + } +} + +type NestedStructPtrs struct { + *SimpleTwoFields + Nest *SimpleTwoFields + TwiceNest *NestedSimple2 + I int +} + +type NestedSimple2 struct { + A *Simple + I int +} + +func TestAlreadyPopulatedDst(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + dst interface{} + want interface{} + }{ + { + desc: "simple already populated, nil properties", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_NullValue{}}, + }, + }, + dst: &Simple{ + I: 12, + }, + want: &Simple{}, + }, + { + desc: "nested structs already populated", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{StringValue: "world"}}, + }, + }, + dst: &SimpleTwoFields{S: "hello" /* SS: "" */}, + want: &SimpleTwoFields{S: "hello", SS: "world"}, + }, + { + desc: "nested structs already populated, pValues nil", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "S": {ValueType: &pb.Value_NullValue{}}, + "SS": {ValueType: &pb.Value_StringValue{StringValue: "ss hello"}}, + "Nest": {ValueType: &pb.Value_NullValue{}}, + "TwiceNest": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_NullValue{}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 5}}, + }, + }, + dst: &NestedStructPtrs{ + &SimpleTwoFields{S: "hello" /* SS: "" */}, + &SimpleTwoFields{ /* S: "" */ SS: "twice hello"}, + &NestedSimple2{ + A: &Simple{I: 2}, + /* I: 0 */ + }, + 0, + }, + want: &NestedStructPtrs{ + &SimpleTwoFields{ /* S: "" */ SS: "ss hello"}, + nil, + &NestedSimple2{ + /* A: nil, */ + I: 2, + }, + 5, + }, + }, + } + + for _, tc := range testCases { + err := loadEntityProto(tc.dst, tc.src) + if err != nil { + t.Errorf("loadEntityProto: %s: %v", tc.desc, err) + continue + } + + if !testutil.Equal(tc.want, tc.dst) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, tc.dst, tc.want) + } + } +} + +type PLS0 struct { + A string +} + +func (p *PLS0) Load(props []Property) error { + for _, pp := range props { + if pp.Name == "A" { + p.A = pp.Value.(string) + } + } + return nil +} + +func (p *PLS0) Save() (props []Property, err error) { + return []Property{{Name: "A", Value: p.A}}, nil +} + +type KeyLoader1 struct { + A string + K *Key +} + +func (kl *KeyLoader1) Load(props []Property) error { + for _, pp := range props { + if pp.Name == "A" { + kl.A = pp.Value.(string) + } + } + return nil +} + +func (kl *KeyLoader1) Save() (props []Property, err error) { + return []Property{{Name: "A", Value: kl.A}}, nil +} + +func (kl *KeyLoader1) LoadKey(k *Key) error { + kl.K = k + return nil +} + +type KeyLoader2 struct { + B int + Key *Key +} + +func (kl *KeyLoader2) Load(props []Property) error { + for _, pp := range props { + if pp.Name == "B" { + kl.B = int(pp.Value.(int64)) + } + } + return nil +} + +func (kl *KeyLoader2) Save() (props []Property, err error) { + return []Property{{Name: "B", Value: int64(kl.B)}}, nil +} + +func (kl *KeyLoader2) LoadKey(k *Key) error { + kl.Key = k + return nil +} + +type KeyLoader3 struct { + C bool + K *Key +} + +func (kl *KeyLoader3) Load(props []Property) error { + for _, pp := range props { + if pp.Name == "C" { + kl.C = pp.Value.(bool) + } + } + return nil +} + +func (kl *KeyLoader3) Save() (props []Property, err error) { + return []Property{{Name: "C", Value: kl.C}}, nil +} + +func (kl *KeyLoader3) LoadKey(k *Key) error { + kl.K = k + return nil +} + +type KeyLoader4 struct { + PLS0 + K *Key +} + +func (kl *KeyLoader4) LoadKey(k *Key) error { + kl.K = k + return nil +} + +type NotKeyLoader struct { + A string + K *Key +} + +func (p *NotKeyLoader) Load(props []Property) error { + for _, pp := range props { + if pp.Name == "A" { + p.A = pp.Value.(string) + } + } + return nil +} + +func (p *NotKeyLoader) Save() (props []Property, err error) { + return []Property{{Name: "A", Value: p.A}}, nil +} + +type NestedKeyLoaders struct { + Two *KeyLoader2 + Three []*KeyLoader3 + Four *KeyLoader4 + PLS *NotKeyLoader +} + +func TestKeyLoader(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + dst interface{} + want interface{} + }{ + { + desc: "simple key loader", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_StringValue{StringValue: "hello"}}, + }, + }, + dst: &KeyLoader1{}, + want: &KeyLoader1{ + A: "hello", + K: testKey0, + }, + }, + { + desc: "embedded PLS key loader", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_StringValue{StringValue: "hello"}}, + }, + }, + dst: &KeyLoader4{}, + want: &KeyLoader4{ + PLS0: PLS0{A: "hello"}, + K: testKey0, + }, + }, + { + desc: "nested key loaders", + src: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Two": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "B": {ValueType: &pb.Value_IntegerValue{IntegerValue: 12}}, + }, + Key: keyToProto(testKey1a), + }, + }}, + "Three": {ValueType: &pb.Value_ArrayValue{ + ArrayValue: &pb.ArrayValue{ + Values: []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "C": {ValueType: &pb.Value_BooleanValue{BooleanValue: true}}, + }, + Key: keyToProto(testKey1b), + }, + }}, + {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "C": {ValueType: &pb.Value_BooleanValue{BooleanValue: false}}, + }, + Key: keyToProto(testKey0), + }, + }}, + }, + }, + }}, + "Four": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_StringValue{StringValue: "testing"}}, + }, + Key: keyToProto(testKey2a), + }, + }}, + "PLS": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_StringValue{StringValue: "something"}}, + }, + + Key: keyToProto(testKey1a), + }, + }}, + }, + }, + dst: &NestedKeyLoaders{}, + want: &NestedKeyLoaders{ + Two: &KeyLoader2{B: 12, Key: testKey1a}, + Three: []*KeyLoader3{ + { + C: true, + K: testKey1b, + }, + { + C: false, + K: testKey0, + }, + }, + Four: &KeyLoader4{ + PLS0: PLS0{A: "testing"}, + K: testKey2a, + }, + PLS: &NotKeyLoader{A: "something"}, + }, + }, + } + + for _, tc := range testCases { + err := loadEntityProto(tc.dst, tc.src) + if err != nil { + t.Errorf("loadEntityProto: %s: %v", tc.desc, err) + continue + } + + if !testutil.Equal(tc.want, tc.dst) { + t.Errorf("%s: compare:\ngot: %+v\nwant: %+v", tc.desc, tc.dst, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/prop.go b/vendor/cloud.google.com/go/datastore/prop.go new file mode 100644 index 0000000..628ccc9 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/prop.go @@ -0,0 +1,342 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + "reflect" + "strings" + "unicode" + + "cloud.google.com/go/internal/fields" +) + +// Entities with more than this many indexed properties will not be saved. +const maxIndexedProperties = 20000 + +// []byte fields more than 1 megabyte long will not be loaded or saved. +const maxBlobLen = 1 << 20 + +// Property is a name/value pair plus some metadata. A datastore entity's +// contents are loaded and saved as a sequence of Properties. Each property +// name must be unique within an entity. +type Property struct { + // Name is the property name. + Name string + // Value is the property value. The valid types are: + // - int64 + // - bool + // - string + // - float64 + // - *Key + // - time.Time + // - GeoPoint + // - []byte (up to 1 megabyte in length) + // - *Entity (representing a nested struct) + // Value can also be: + // - []interface{} where each element is one of the above types + // This set is smaller than the set of valid struct field types that the + // datastore can load and save. A Value's type must be explicitly on + // the list above; it is not sufficient for the underlying type to be + // on that list. For example, a Value of "type myInt64 int64" is + // invalid. Smaller-width integers and floats are also invalid. Again, + // this is more restrictive than the set of valid struct field types. + // + // A Value will have an opaque type when loading entities from an index, + // such as via a projection query. Load entities into a struct instead + // of a PropertyLoadSaver when using a projection query. + // + // A Value may also be the nil interface value; this is equivalent to + // Python's None but not directly representable by a Go struct. Loading + // a nil-valued property into a struct will set that field to the zero + // value. + Value interface{} + // NoIndex is whether the datastore cannot index this property. + // If NoIndex is set to false, []byte and string values are limited to + // 1500 bytes. + NoIndex bool +} + +// An Entity is the value type for a nested struct. +// This type is only used for a Property's Value. +type Entity struct { + Key *Key + Properties []Property +} + +// PropertyLoadSaver can be converted from and to a slice of Properties. +type PropertyLoadSaver interface { + Load([]Property) error + Save() ([]Property, error) +} + +// KeyLoader can store a Key. +type KeyLoader interface { + // PropertyLoadSaver is embedded because a KeyLoader + // must also always implement PropertyLoadSaver. + PropertyLoadSaver + LoadKey(k *Key) error +} + +// PropertyList converts a []Property to implement PropertyLoadSaver. +type PropertyList []Property + +var ( + typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() + typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) +) + +// Load loads all of the provided properties into l. +// It does not first reset *l to an empty slice. +func (l *PropertyList) Load(p []Property) error { + *l = append(*l, p...) + return nil +} + +// Save saves all of l's properties as a slice of Properties. +func (l *PropertyList) Save() ([]Property, error) { + return *l, nil +} + +// validPropertyName returns whether name consists of one or more valid Go +// identifiers joined by ".". +func validPropertyName(name string) bool { + if name == "" { + return false + } + for _, s := range strings.Split(name, ".") { + if s == "" { + return false + } + first := true + for _, c := range s { + if first { + first = false + if c != '_' && !unicode.IsLetter(c) { + return false + } + } else { + if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + } + return true +} + +// parseTag interprets datastore struct field tags +func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + s := t.Get("datastore") + parts := strings.Split(s, ",") + if parts[0] == "-" && len(parts) == 1 { + return "", false, nil, nil + } + if parts[0] != "" && !validPropertyName(parts[0]) { + err = fmt.Errorf("datastore: struct tag has invalid property name: %q", parts[0]) + return "", false, nil, err + } + + var opts saveOpts + if len(parts) > 1 { + for _, p := range parts[1:] { + switch p { + case "flatten": + opts.flatten = true + case "omitempty": + opts.omitEmpty = true + case "noindex": + opts.noIndex = true + default: + err = fmt.Errorf("datastore: struct tag has invalid option: %q", p) + return "", false, nil, err + } + } + other = opts + } + return parts[0], true, other, nil +} + +func validateType(t reflect.Type) error { + if t.Kind() != reflect.Struct { + return fmt.Errorf("datastore: validate called with non-struct type %s", t) + } + + return validateChildType(t, "", false, false, map[reflect.Type]bool{}) +} + +// validateChildType is a recursion helper func for validateType +func validateChildType(t reflect.Type, fieldName string, flatten, prevSlice bool, prevTypes map[reflect.Type]bool) error { + if prevTypes[t] { + return nil + } + prevTypes[t] = true + + switch t.Kind() { + case reflect.Slice: + if flatten && prevSlice { + return fmt.Errorf("datastore: flattening nested structs leads to a slice of slices: field %q", fieldName) + } + return validateChildType(t.Elem(), fieldName, flatten, true, prevTypes) + case reflect.Struct: + if t == typeOfTime || t == typeOfGeoPoint { + return nil + } + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + // If a named field is unexported, ignore it. An anonymous + // unexported field is processed, because it may contain + // exported fields, which are visible. + exported := (f.PkgPath == "") + if !exported && !f.Anonymous { + continue + } + + _, keep, other, err := parseTag(f.Tag) + // Handle error from parseTag now instead of later (in cache.Fields call). + if err != nil { + return err + } + if !keep { + continue + } + if other != nil { + opts := other.(saveOpts) + flatten = flatten || opts.flatten + } + if err := validateChildType(f.Type, f.Name, flatten, prevSlice, prevTypes); err != nil { + return err + } + } + case reflect.Ptr: + if t == typeOfKeyPtr { + return nil + } + return validateChildType(t.Elem(), fieldName, flatten, prevSlice, prevTypes) + } + return nil +} + +// isLeafType determines whether or not a type is a 'leaf type' +// and should not be recursed into, but considered one field. +func isLeafType(t reflect.Type) bool { + return t == typeOfTime || t == typeOfGeoPoint +} + +// structCache collects the structs whose fields have already been calculated. +var structCache = fields.NewCache(parseTag, validateType, isLeafType) + +// structPLS adapts a struct to be a PropertyLoadSaver. +type structPLS struct { + v reflect.Value + codec fields.List +} + +// newStructPLS returns a structPLS, which implements the +// PropertyLoadSaver interface, for the struct pointer p. +func newStructPLS(p interface{}) (*structPLS, error) { + v := reflect.ValueOf(p) + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return nil, ErrInvalidEntityType + } + v = v.Elem() + f, err := structCache.Fields(v.Type()) + if err != nil { + return nil, err + } + return &structPLS{v, f}, nil +} + +// LoadStruct loads the properties from p to dst. +// dst must be a struct pointer. +// +// The values of dst's unmatched struct fields are not modified, +// and matching slice-typed fields are not reset before appending to +// them. In particular, it is recommended to pass a pointer to a zero +// valued struct on each LoadStruct call. +func LoadStruct(dst interface{}, p []Property) error { + x, err := newStructPLS(dst) + if err != nil { + return err + } + return x.Load(p) +} + +// SaveStruct returns the properties from src as a slice of Properties. +// src must be a struct pointer. +func SaveStruct(src interface{}) ([]Property, error) { + x, err := newStructPLS(src) + if err != nil { + return nil, err + } + return x.Save() +} + +// plsForLoad tries to convert v to a PropertyLoadSaver. +// If successful, plsForLoad returns a settable v as a PropertyLoadSaver. +// +// plsForLoad is intended to be used with nested struct fields which +// may implement PropertyLoadSaver. +// +// v must be settable. +func plsForLoad(v reflect.Value) (PropertyLoadSaver, error) { + var nilPtr bool + if v.Kind() == reflect.Ptr && v.IsNil() { + nilPtr = true + v.Set(reflect.New(v.Type().Elem())) + } + + vpls, err := pls(v) + if nilPtr && (vpls == nil || err != nil) { + // unset v + v.Set(reflect.Zero(v.Type())) + } + + return vpls, err +} + +// plsForSave tries to convert v to a PropertyLoadSaver. +// If successful, plsForSave returns v as a PropertyLoadSaver. +// +// plsForSave is intended to be used with nested struct fields which +// may implement PropertyLoadSaver. +// +// v must be settable. +func plsForSave(v reflect.Value) (PropertyLoadSaver, error) { + switch v.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface, reflect.Chan, reflect.Func: + // If v is nil, return early. v contains no data to save. + if v.IsNil() { + return nil, nil + } + } + + return pls(v) +} + +func pls(v reflect.Value) (PropertyLoadSaver, error) { + if v.Kind() != reflect.Ptr { + if _, ok := v.Interface().(PropertyLoadSaver); ok { + return nil, fmt.Errorf("datastore: PropertyLoadSaver methods must be implemented on a pointer to %T.", v.Interface()) + } + + v = v.Addr() + } + + vpls, _ := v.Interface().(PropertyLoadSaver) + return vpls, nil +} diff --git a/vendor/cloud.google.com/go/datastore/query.go b/vendor/cloud.google.com/go/datastore/query.go new file mode 100644 index 0000000..09f0830 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/query.go @@ -0,0 +1,773 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "strconv" + "strings" + + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +type operator int + +const ( + lessThan operator = iota + 1 + lessEq + equal + greaterEq + greaterThan + + keyFieldName = "__key__" +) + +var operatorToProto = map[operator]pb.PropertyFilter_Operator{ + lessThan: pb.PropertyFilter_LESS_THAN, + lessEq: pb.PropertyFilter_LESS_THAN_OR_EQUAL, + equal: pb.PropertyFilter_EQUAL, + greaterEq: pb.PropertyFilter_GREATER_THAN_OR_EQUAL, + greaterThan: pb.PropertyFilter_GREATER_THAN, +} + +// filter is a conditional filter on query results. +type filter struct { + FieldName string + Op operator + Value interface{} +} + +type sortDirection bool + +const ( + ascending sortDirection = false + descending sortDirection = true +) + +var sortDirectionToProto = map[sortDirection]pb.PropertyOrder_Direction{ + ascending: pb.PropertyOrder_ASCENDING, + descending: pb.PropertyOrder_DESCENDING, +} + +// order is a sort order on query results. +type order struct { + FieldName string + Direction sortDirection +} + +// NewQuery creates a new Query for a specific entity kind. +// +// An empty kind means to return all entities, including entities created and +// managed by other App Engine features, and is called a kindless query. +// Kindless queries cannot include filters or sort orders on property values. +func NewQuery(kind string) *Query { + return &Query{ + kind: kind, + limit: -1, + } +} + +// Query represents a datastore query. +type Query struct { + kind string + ancestor *Key + filter []filter + order []order + projection []string + + distinct bool + distinctOn []string + keysOnly bool + eventual bool + limit int32 + offset int32 + start []byte + end []byte + + namespace string + + trans *Transaction + + err error +} + +func (q *Query) clone() *Query { + x := *q + // Copy the contents of the slice-typed fields to a new backing store. + if len(q.filter) > 0 { + x.filter = make([]filter, len(q.filter)) + copy(x.filter, q.filter) + } + if len(q.order) > 0 { + x.order = make([]order, len(q.order)) + copy(x.order, q.order) + } + return &x +} + +// Ancestor returns a derivative query with an ancestor filter. +// The ancestor should not be nil. +func (q *Query) Ancestor(ancestor *Key) *Query { + q = q.clone() + if ancestor == nil { + q.err = errors.New("datastore: nil query ancestor") + return q + } + q.ancestor = ancestor + return q +} + +// EventualConsistency returns a derivative query that returns eventually +// consistent results. +// It only has an effect on ancestor queries. +func (q *Query) EventualConsistency() *Query { + q = q.clone() + q.eventual = true + return q +} + +// Namespace returns a derivative query that is associated with the given +// namespace. +// +// A namespace may be used to partition data for multi-tenant applications. +// For details, see https://cloud.google.com/datastore/docs/concepts/multitenancy. +func (q *Query) Namespace(ns string) *Query { + q = q.clone() + q.namespace = ns + return q +} + +// Transaction returns a derivative query that is associated with the given +// transaction. +// +// All reads performed as part of the transaction will come from a single +// consistent snapshot. Furthermore, if the transaction is set to a +// serializable isolation level, another transaction cannot concurrently modify +// the data that is read or modified by this transaction. +func (q *Query) Transaction(t *Transaction) *Query { + q = q.clone() + q.trans = t + return q +} + +// Filter returns a derivative query with a field-based filter. +// The filterStr argument must be a field name followed by optional space, +// followed by an operator, one of ">", "<", ">=", "<=", or "=". +// Fields are compared against the provided value using the operator. +// Multiple filters are AND'ed together. +// Field names which contain spaces, quote marks, or operator characters +// should be passed as quoted Go string literals as returned by strconv.Quote +// or the fmt package's %q verb. +func (q *Query) Filter(filterStr string, value interface{}) *Query { + q = q.clone() + filterStr = strings.TrimSpace(filterStr) + if filterStr == "" { + q.err = fmt.Errorf("datastore: invalid filter %q", filterStr) + return q + } + f := filter{ + FieldName: strings.TrimRight(filterStr, " ><=!"), + Value: value, + } + switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { + case "<=": + f.Op = lessEq + case ">=": + f.Op = greaterEq + case "<": + f.Op = lessThan + case ">": + f.Op = greaterThan + case "=": + f.Op = equal + default: + q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) + return q + } + var err error + f.FieldName, err = unquote(f.FieldName) + if err != nil { + q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", f.FieldName) + return q + } + q.filter = append(q.filter, f) + return q +} + +// Order returns a derivative query with a field-based sort order. Orders are +// applied in the order they are added. The default order is ascending; to sort +// in descending order prefix the fieldName with a minus sign (-). +// Field names which contain spaces, quote marks, or the minus sign +// should be passed as quoted Go string literals as returned by strconv.Quote +// or the fmt package's %q verb. +func (q *Query) Order(fieldName string) *Query { + q = q.clone() + fieldName, dir := strings.TrimSpace(fieldName), ascending + if strings.HasPrefix(fieldName, "-") { + fieldName, dir = strings.TrimSpace(fieldName[1:]), descending + } else if strings.HasPrefix(fieldName, "+") { + q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) + return q + } + fieldName, err := unquote(fieldName) + if err != nil { + q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", fieldName) + return q + } + if fieldName == "" { + q.err = errors.New("datastore: empty order") + return q + } + q.order = append(q.order, order{ + Direction: dir, + FieldName: fieldName, + }) + return q +} + +// unquote optionally interprets s as a double-quoted or backquoted Go +// string literal if it begins with the relevant character. +func unquote(s string) (string, error) { + if s == "" || (s[0] != '`' && s[0] != '"') { + return s, nil + } + return strconv.Unquote(s) +} + +// Project returns a derivative query that yields only the given fields. It +// cannot be used with KeysOnly. +func (q *Query) Project(fieldNames ...string) *Query { + q = q.clone() + q.projection = append([]string(nil), fieldNames...) + return q +} + +// Distinct returns a derivative query that yields de-duplicated entities with +// respect to the set of projected fields. It is only used for projection +// queries. Distinct cannot be used with DistinctOn. +func (q *Query) Distinct() *Query { + q = q.clone() + q.distinct = true + return q +} + +// DistinctOn returns a derivative query that yields de-duplicated entities with +// respect to the set of the specified fields. It is only used for projection +// queries. The field list should be a subset of the projected field list. +// DistinctOn cannot be used with Distinct. +func (q *Query) DistinctOn(fieldNames ...string) *Query { + q = q.clone() + q.distinctOn = fieldNames + return q +} + +// KeysOnly returns a derivative query that yields only keys, not keys and +// entities. It cannot be used with projection queries. +func (q *Query) KeysOnly() *Query { + q = q.clone() + q.keysOnly = true + return q +} + +// Limit returns a derivative query that has a limit on the number of results +// returned. A negative value means unlimited. +func (q *Query) Limit(limit int) *Query { + q = q.clone() + if limit < math.MinInt32 || limit > math.MaxInt32 { + q.err = errors.New("datastore: query limit overflow") + return q + } + q.limit = int32(limit) + return q +} + +// Offset returns a derivative query that has an offset of how many keys to +// skip over before returning results. A negative value is invalid. +func (q *Query) Offset(offset int) *Query { + q = q.clone() + if offset < 0 { + q.err = errors.New("datastore: negative query offset") + return q + } + if offset > math.MaxInt32 { + q.err = errors.New("datastore: query offset overflow") + return q + } + q.offset = int32(offset) + return q +} + +// Start returns a derivative query with the given start point. +func (q *Query) Start(c Cursor) *Query { + q = q.clone() + q.start = c.cc + return q +} + +// End returns a derivative query with the given end point. +func (q *Query) End(c Cursor) *Query { + q = q.clone() + q.end = c.cc + return q +} + +// toProto converts the query to a protocol buffer. +func (q *Query) toProto(req *pb.RunQueryRequest) error { + if len(q.projection) != 0 && q.keysOnly { + return errors.New("datastore: query cannot both project and be keys-only") + } + if len(q.distinctOn) != 0 && q.distinct { + return errors.New("datastore: query cannot be both distinct and distinct-on") + } + dst := &pb.Query{} + if q.kind != "" { + dst.Kind = []*pb.KindExpression{{Name: q.kind}} + } + if q.projection != nil { + for _, propertyName := range q.projection { + dst.Projection = append(dst.Projection, &pb.Projection{Property: &pb.PropertyReference{Name: propertyName}}) + } + + for _, propertyName := range q.distinctOn { + dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName}) + } + + if q.distinct { + for _, propertyName := range q.projection { + dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName}) + } + } + } + if q.keysOnly { + dst.Projection = []*pb.Projection{{Property: &pb.PropertyReference{Name: keyFieldName}}} + } + + var filters []*pb.Filter + for _, qf := range q.filter { + if qf.FieldName == "" { + return errors.New("datastore: empty query filter field name") + } + v, err := interfaceToProto(reflect.ValueOf(qf.Value).Interface(), false) + if err != nil { + return fmt.Errorf("datastore: bad query filter value type: %v", err) + } + op, ok := operatorToProto[qf.Op] + if !ok { + return errors.New("datastore: unknown query filter operator") + } + xf := &pb.PropertyFilter{ + Op: op, + Property: &pb.PropertyReference{Name: qf.FieldName}, + Value: v, + } + filters = append(filters, &pb.Filter{ + FilterType: &pb.Filter_PropertyFilter{PropertyFilter: xf}, + }) + } + + if q.ancestor != nil { + filters = append(filters, &pb.Filter{ + FilterType: &pb.Filter_PropertyFilter{PropertyFilter: &pb.PropertyFilter{ + Property: &pb.PropertyReference{Name: keyFieldName}, + Op: pb.PropertyFilter_HAS_ANCESTOR, + Value: &pb.Value{ValueType: &pb.Value_KeyValue{KeyValue: keyToProto(q.ancestor)}}, + }}}) + } + + if len(filters) == 1 { + dst.Filter = filters[0] + } else if len(filters) > 1 { + dst.Filter = &pb.Filter{FilterType: &pb.Filter_CompositeFilter{CompositeFilter: &pb.CompositeFilter{ + Op: pb.CompositeFilter_AND, + Filters: filters, + }}} + } + + for _, qo := range q.order { + if qo.FieldName == "" { + return errors.New("datastore: empty query order field name") + } + xo := &pb.PropertyOrder{ + Property: &pb.PropertyReference{Name: qo.FieldName}, + Direction: sortDirectionToProto[qo.Direction], + } + dst.Order = append(dst.Order, xo) + } + if q.limit >= 0 { + dst.Limit = &wrapperspb.Int32Value{Value: q.limit} + } + dst.Offset = q.offset + dst.StartCursor = q.start + dst.EndCursor = q.end + + if t := q.trans; t != nil { + if t.id == nil { + return errExpiredTransaction + } + if q.eventual { + return errors.New("datastore: cannot use EventualConsistency query in a transaction") + } + req.ReadOptions = &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id}, + } + } + + if q.eventual { + req.ReadOptions = &pb.ReadOptions{ConsistencyType: &pb.ReadOptions_ReadConsistency_{ReadConsistency: pb.ReadOptions_EVENTUAL}} + } + + req.QueryType = &pb.RunQueryRequest_Query{Query: dst} + return nil +} + +// Count returns the number of results for the given query. +// +// The running time and number of API calls made by Count scale linearly with +// with the sum of the query's offset and limit. Unless the result count is +// expected to be small, it is best to specify a limit; otherwise Count will +// continue until it finishes counting or the provided context expires. +func (c *Client) Count(ctx context.Context, q *Query) (int, error) { + // Check that the query is well-formed. + if q.err != nil { + return 0, q.err + } + + // Create a copy of the query, with keysOnly true (if we're not a projection, + // since the two are incompatible). + newQ := q.clone() + newQ.keysOnly = len(newQ.projection) == 0 + + // Create an iterator and use it to walk through the batches of results + // directly. + it := c.Run(ctx, newQ) + n := 0 + for { + err := it.nextBatch() + if err == iterator.Done { + return n, nil + } + if err != nil { + return 0, err + } + n += len(it.results) + } +} + +// GetAll runs the provided query in the given context and returns all keys +// that match that query, as well as appending the values to dst. +// +// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- +// interface, non-pointer type P such that P or *P implements PropertyLoadSaver. +// +// As a special case, *PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when *[]PropertyList was intended. +// +// The keys returned by GetAll will be in a 1-1 correspondence with the entities +// added to dst. +// +// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. +// +// The running time and number of API calls made by GetAll scale linearly with +// with the sum of the query's offset and limit. Unless the result count is +// expected to be small, it is best to specify a limit; otherwise GetAll will +// continue until it finishes collecting results or the provided context +// expires. +func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) ([]*Key, error) { + var ( + dv reflect.Value + mat multiArgType + elemType reflect.Type + errFieldMismatch error + ) + if !q.keysOnly { + dv = reflect.ValueOf(dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return nil, ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType = checkMultiArg(dv) + if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { + return nil, ErrInvalidEntityType + } + } + + var keys []*Key + for t := c.Run(ctx, q); ; { + k, e, err := t.next() + if err == iterator.Done { + break + } + if err != nil { + return keys, err + } + if !q.keysOnly { + ev := reflect.New(elemType) + if elemType.Kind() == reflect.Map { + // This is a special case. The zero values of a map type are + // not immediately useful; they have to be make'd. + // + // Funcs and channels are similar, in that a zero value is not useful, + // but even a freshly make'd channel isn't useful: there's no fixed + // channel buffer size that is always going to be large enough, and + // there's no goroutine to drain the other end. Theoretically, these + // types could be supported, for example by sniffing for a constructor + // method or requiring prior registration, but for now it's not a + // frequent enough concern to be worth it. Programmers can work around + // it by explicitly using Iterator.Next instead of the Query.GetAll + // convenience method. + x := reflect.MakeMap(elemType) + ev.Elem().Set(x) + } + if err = loadEntityProto(ev.Interface(), e); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return keys, err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + } + keys = append(keys, k) + } + return keys, errFieldMismatch +} + +// Run runs the given query in the given context. +func (c *Client) Run(ctx context.Context, q *Query) *Iterator { + if q.err != nil { + return &Iterator{err: q.err} + } + t := &Iterator{ + ctx: ctx, + client: c, + limit: q.limit, + offset: q.offset, + keysOnly: q.keysOnly, + pageCursor: q.start, + entityCursor: q.start, + req: &pb.RunQueryRequest{ + ProjectId: c.dataset, + }, + } + if q.namespace != "" { + t.req.PartitionId = &pb.PartitionId{ + NamespaceId: q.namespace, + } + } + + if err := q.toProto(t.req); err != nil { + t.err = err + } + return t +} + +// Iterator is the result of running a query. +type Iterator struct { + ctx context.Context + client *Client + err error + + // results is the list of EntityResults still to be iterated over from the + // most recent API call. It will be nil if no requests have yet been issued. + results []*pb.EntityResult + // req is the request to send. It may be modified and used multiple times. + req *pb.RunQueryRequest + + // limit is the limit on the number of results this iterator should return. + // The zero value is used to prevent further fetches from the server. + // A negative value means unlimited. + limit int32 + // offset is the number of results that still need to be skipped. + offset int32 + // keysOnly records whether the query was keys-only (skip entity loading). + keysOnly bool + + // pageCursor is the compiled cursor for the next batch/page of result. + // TODO(djd): Can we delete this in favour of paging with the last + // entityCursor from each batch? + pageCursor []byte + // entityCursor is the compiled cursor of the next result. + entityCursor []byte +} + +// Next returns the key of the next result. When there are no more results, +// iterator.Done is returned as the error. +// +// If the query is not keys only and dst is non-nil, it also loads the entity +// stored for that key into the struct pointer or PropertyLoadSaver dst, with +// the same semantics and possible errors as for the Get function. +func (t *Iterator) Next(dst interface{}) (*Key, error) { + k, e, err := t.next() + if err != nil { + return nil, err + } + if dst != nil && !t.keysOnly { + err = loadEntityProto(dst, e) + } + return k, err +} + +func (t *Iterator) next() (*Key, *pb.Entity, error) { + // Fetch additional batches while there are no more results. + for t.err == nil && len(t.results) == 0 { + t.err = t.nextBatch() + } + if t.err != nil { + return nil, nil, t.err + } + + // Extract the next result, update cursors, and parse the entity's key. + e := t.results[0] + t.results = t.results[1:] + t.entityCursor = e.Cursor + if len(t.results) == 0 { + t.entityCursor = t.pageCursor // At the end of the batch. + } + if e.Entity.Key == nil { + return nil, nil, errors.New("datastore: internal error: server did not return a key") + } + k, err := protoToKey(e.Entity.Key) + if err != nil || k.Incomplete() { + return nil, nil, errors.New("datastore: internal error: server returned an invalid key") + } + + return k, e.Entity, nil +} + +// nextBatch makes a single call to the server for a batch of results. +func (t *Iterator) nextBatch() error { + if t.limit == 0 { + return iterator.Done // Short-circuits the zero-item response. + } + + // Adjust the query with the latest start cursor, limit and offset. + q := t.req.GetQuery() + q.StartCursor = t.pageCursor + q.Offset = t.offset + if t.limit >= 0 { + q.Limit = &wrapperspb.Int32Value{Value: t.limit} + } else { + q.Limit = nil + } + + // Run the query. + resp, err := t.client.client.RunQuery(t.ctx, t.req) + if err != nil { + return err + } + + // Adjust any offset from skipped results. + skip := resp.Batch.SkippedResults + if skip < 0 { + return errors.New("datastore: internal error: negative number of skipped_results") + } + t.offset -= skip + if t.offset < 0 { + return errors.New("datastore: internal error: query skipped too many results") + } + if t.offset > 0 && len(resp.Batch.EntityResults) > 0 { + return errors.New("datastore: internal error: query returned results before requested offset") + } + + // Adjust the limit. + if t.limit >= 0 { + t.limit -= int32(len(resp.Batch.EntityResults)) + if t.limit < 0 { + return errors.New("datastore: internal error: query returned more results than the limit") + } + } + + // If there are no more results available, set limit to zero to prevent + // further fetches. Otherwise, check that there is a next page cursor available. + if resp.Batch.MoreResults != pb.QueryResultBatch_NOT_FINISHED { + t.limit = 0 + } else if resp.Batch.EndCursor == nil { + return errors.New("datastore: internal error: server did not return a cursor") + } + + // Update cursors. + // If any results were skipped, use the SkippedCursor as the next entity cursor. + if skip > 0 { + t.entityCursor = resp.Batch.SkippedCursor + } else { + t.entityCursor = q.StartCursor + } + t.pageCursor = resp.Batch.EndCursor + + t.results = resp.Batch.EntityResults + return nil +} + +// Cursor returns a cursor for the iterator's current location. +func (t *Iterator) Cursor() (Cursor, error) { + // If there is still an offset, we need to the skip those results first. + for t.err == nil && t.offset > 0 { + t.err = t.nextBatch() + } + + if t.err != nil && t.err != iterator.Done { + return Cursor{}, t.err + } + + return Cursor{t.entityCursor}, nil +} + +// Cursor is an iterator's position. It can be converted to and from an opaque +// string. A cursor can be used from different HTTP requests, but only with a +// query with the same kind, ancestor, filter and order constraints. +// +// The zero Cursor can be used to indicate that there is no start and/or end +// constraint for a query. +type Cursor struct { + cc []byte +} + +// String returns a base-64 string representation of a cursor. +func (c Cursor) String() string { + if c.cc == nil { + return "" + } + + return strings.TrimRight(base64.URLEncoding.EncodeToString(c.cc), "=") +} + +// Decode decodes a cursor from its base-64 string representation. +func DecodeCursor(s string) (Cursor, error) { + if s == "" { + return Cursor{}, nil + } + if n := len(s) % 4; n != 0 { + s += strings.Repeat("=", 4-n) + } + b, err := base64.URLEncoding.DecodeString(s) + if err != nil { + return Cursor{}, err + } + return Cursor{b}, nil +} diff --git a/vendor/cloud.google.com/go/datastore/query_test.go b/vendor/cloud.google.com/go/datastore/query_test.go new file mode 100644 index 0000000..795fa6d --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/query_test.go @@ -0,0 +1,547 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "sort" + "testing" + + "cloud.google.com/go/internal/testutil" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" +) + +var ( + key1 = &pb.Key{ + Path: []*pb.Key_PathElement{ + { + Kind: "Gopher", + IdType: &pb.Key_PathElement_Id{Id: 6}, + }, + }, + } + key2 = &pb.Key{ + Path: []*pb.Key_PathElement{ + { + Kind: "Gopher", + IdType: &pb.Key_PathElement_Id{Id: 6}, + }, + { + Kind: "Gopher", + IdType: &pb.Key_PathElement_Id{Id: 8}, + }, + }, + } +) + +type fakeClient struct { + pb.DatastoreClient + queryFn func(*pb.RunQueryRequest) (*pb.RunQueryResponse, error) + commitFn func(*pb.CommitRequest) (*pb.CommitResponse, error) +} + +func (c *fakeClient) RunQuery(_ context.Context, req *pb.RunQueryRequest, _ ...grpc.CallOption) (*pb.RunQueryResponse, error) { + return c.queryFn(req) +} + +func (c *fakeClient) Commit(_ context.Context, req *pb.CommitRequest, _ ...grpc.CallOption) (*pb.CommitResponse, error) { + return c.commitFn(req) +} + +func fakeRunQuery(in *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { + expectedIn := &pb.RunQueryRequest{ + QueryType: &pb.RunQueryRequest_Query{Query: &pb.Query{ + Kind: []*pb.KindExpression{{Name: "Gopher"}}, + }}, + } + if !proto.Equal(in, expectedIn) { + return nil, fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn) + } + return &pb.RunQueryResponse{ + Batch: &pb.QueryResultBatch{ + MoreResults: pb.QueryResultBatch_NO_MORE_RESULTS, + EntityResultType: pb.EntityResult_FULL, + EntityResults: []*pb.EntityResult{ + { + Entity: &pb.Entity{ + Key: key1, + Properties: map[string]*pb.Value{ + "Name": {ValueType: &pb.Value_StringValue{StringValue: "George"}}, + "Height": {ValueType: &pb.Value_IntegerValue{IntegerValue: 32}}, + }, + }, + }, + { + Entity: &pb.Entity{ + Key: key2, + Properties: map[string]*pb.Value{ + "Name": {ValueType: &pb.Value_StringValue{StringValue: "Rufus"}}, + // No height for Rufus. + }, + }, + }, + }, + }, + }, nil +} + +type StructThatImplementsPLS struct{} + +func (StructThatImplementsPLS) Load(p []Property) error { return nil } +func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = StructThatImplementsPLS{} + +type StructPtrThatImplementsPLS struct{} + +func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil } +func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{} + +type PropertyMap map[string]Property + +func (m PropertyMap) Load(props []Property) error { + for _, p := range props { + m[p.Name] = p + } + return nil +} + +func (m PropertyMap) Save() ([]Property, error) { + props := make([]Property, 0, len(m)) + for _, p := range m { + props = append(props, p) + } + return props, nil +} + +var _ PropertyLoadSaver = PropertyMap{} + +type Gopher struct { + Name string + Height int +} + +// typeOfEmptyInterface is the type of interface{}, but we can't use +// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an +// interface{}. +var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem() + +func TestCheckMultiArg(t *testing.T) { + testCases := []struct { + v interface{} + mat multiArgType + elemType reflect.Type + }{ + // Invalid cases. + {nil, multiArgTypeInvalid, nil}, + {Gopher{}, multiArgTypeInvalid, nil}, + {&Gopher{}, multiArgTypeInvalid, nil}, + {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case. + {PropertyMap{}, multiArgTypeInvalid, nil}, + {[]*PropertyList(nil), multiArgTypeInvalid, nil}, + {[]*PropertyMap(nil), multiArgTypeInvalid, nil}, + {[]**Gopher(nil), multiArgTypeInvalid, nil}, + {[]*interface{}(nil), multiArgTypeInvalid, nil}, + // Valid cases. + { + []PropertyList(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyList{}), + }, + { + []PropertyMap(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyMap{}), + }, + { + []StructThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructThatImplementsPLS{}), + }, + { + []StructPtrThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructPtrThatImplementsPLS{}), + }, + { + []Gopher(nil), + multiArgTypeStruct, + reflect.TypeOf(Gopher{}), + }, + { + []*Gopher(nil), + multiArgTypeStructPtr, + reflect.TypeOf(Gopher{}), + }, + { + []interface{}(nil), + multiArgTypeInterface, + typeOfEmptyInterface, + }, + } + for _, tc := range testCases { + mat, elemType := checkMultiArg(reflect.ValueOf(tc.v)) + if mat != tc.mat || elemType != tc.elemType { + t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v", + tc.v, mat, elemType, tc.mat, tc.elemType) + } + } +} + +func TestSimpleQuery(t *testing.T) { + struct1 := Gopher{Name: "George", Height: 32} + struct2 := Gopher{Name: "Rufus"} + pList1 := PropertyList{ + { + Name: "Height", + Value: int64(32), + }, + { + Name: "Name", + Value: "George", + }, + } + pList2 := PropertyList{ + { + Name: "Name", + Value: "Rufus", + }, + } + pMap1 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "George", + }, + "Height": Property{ + Name: "Height", + Value: int64(32), + }, + } + pMap2 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "Rufus", + }, + } + + testCases := []struct { + dst interface{} + want interface{} + }{ + // The destination must have type *[]P, *[]S or *[]*S, for some non-interface + // type P such that *P implements PropertyLoadSaver, or for some struct type S. + {new([]Gopher), &[]Gopher{struct1, struct2}}, + {new([]*Gopher), &[]*Gopher{&struct1, &struct2}}, + {new([]PropertyList), &[]PropertyList{pList1, pList2}}, + {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}}, + + // Any other destination type is invalid. + {0, nil}, + {Gopher{}, nil}, + {PropertyList{}, nil}, + {PropertyMap{}, nil}, + {[]int{}, nil}, + {[]Gopher{}, nil}, + {[]PropertyList{}, nil}, + {new(int), nil}, + {new(Gopher), nil}, + {new(PropertyList), nil}, // This is a special case. + {new(PropertyMap), nil}, + {new([]int), nil}, + {new([]map[int]int), nil}, + {new([]map[string]Property), nil}, + {new([]map[string]interface{}), nil}, + {new([]*int), nil}, + {new([]*map[int]int), nil}, + {new([]*map[string]Property), nil}, + {new([]*map[string]interface{}), nil}, + {new([]**Gopher), nil}, + {new([]*PropertyList), nil}, + {new([]*PropertyMap), nil}, + } + for _, tc := range testCases { + nCall := 0 + client := &Client{ + client: &fakeClient{ + queryFn: func(req *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { + nCall++ + return fakeRunQuery(req) + }, + }, + } + ctx := context.Background() + + var ( + expectedErr error + expectedNCall int + ) + if tc.want == nil { + expectedErr = ErrInvalidEntityType + } else { + expectedNCall = 1 + } + keys, err := client.GetAll(ctx, NewQuery("Gopher"), tc.dst) + if err != expectedErr { + t.Errorf("dst type %T: got error %v, want %v", tc.dst, err, expectedErr) + continue + } + if nCall != expectedNCall { + t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall) + continue + } + if err != nil { + continue + } + + key1 := IDKey("Gopher", 6, nil) + expectedKeys := []*Key{ + key1, + IDKey("Gopher", 8, key1), + } + if l1, l2 := len(keys), len(expectedKeys); l1 != l2 { + t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2) + continue + } + for i, key := range keys { + if !keysEqual(key, expectedKeys[i]) { + t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i]) + continue + } + } + + // Make sure we sort any PropertyList items (the order is not deterministic). + if pLists, ok := tc.dst.(*[]PropertyList); ok { + for _, p := range *pLists { + sort.Sort(byName(p)) + } + } + + if !testutil.Equal(tc.dst, tc.want) { + t.Errorf("dst type %T: Entities\ngot %+v\nwant %+v", tc.dst, tc.dst, tc.want) + continue + } + } +} + +// keysEqual is like (*Key).Equal, but ignores the App ID. +func keysEqual(a, b *Key) bool { + for a != nil && b != nil { + if a.Kind != b.Kind || a.Name != b.Name || a.ID != b.ID { + return false + } + a, b = a.Parent, b.Parent + } + return a == b +} + +func TestQueriesAreImmutable(t *testing.T) { + // Test that deriving q2 from q1 does not modify q1. + q0 := NewQuery("foo") + q1 := NewQuery("foo") + q2 := q1.Offset(2) + if !testutil.Equal(q0, q1, cmp.AllowUnexported(Query{})) { + t.Errorf("q0 and q1 were not equal") + } + if testutil.Equal(q1, q2, cmp.AllowUnexported(Query{})) { + t.Errorf("q1 and q2 were equal") + } + + // Test that deriving from q4 twice does not conflict, even though + // q4 has a long list of order clauses. This tests that the arrays + // backed by a query's slice of orders are not shared. + f := func() *Query { + q := NewQuery("bar") + // 47 is an ugly number that is unlikely to be near a re-allocation + // point in repeated append calls. For example, it's not near a power + // of 2 or a multiple of 10. + for i := 0; i < 47; i++ { + q = q.Order(fmt.Sprintf("x%d", i)) + } + return q + } + q3 := f().Order("y") + q4 := f() + q5 := q4.Order("y") + q6 := q4.Order("z") + if !testutil.Equal(q3, q5, cmp.AllowUnexported(Query{})) { + t.Errorf("q3 and q5 were not equal") + } + if testutil.Equal(q5, q6, cmp.AllowUnexported(Query{})) { + t.Errorf("q5 and q6 were equal") + } +} + +func TestFilterParser(t *testing.T) { + testCases := []struct { + filterStr string + wantOK bool + wantFieldName string + wantOp operator + }{ + // Supported ops. + {"x<", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {" x < ", true, "x", lessThan}, + {"x <=", true, "x", lessEq}, + {"x =", true, "x", equal}, + {"x >=", true, "x", greaterEq}, + {"x >", true, "x", greaterThan}, + {"in >", true, "in", greaterThan}, + {"in>", true, "in", greaterThan}, + // Valid but (currently) unsupported ops. + {"x!=", false, "", 0}, + {"x !=", false, "", 0}, + {" x != ", false, "", 0}, + {"x IN", false, "", 0}, + {"x in", false, "", 0}, + // Invalid ops. + {"x EQ", false, "", 0}, + {"x lt", false, "", 0}, + {"x <>", false, "", 0}, + {"x >>", false, "", 0}, + {"x ==", false, "", 0}, + {"x =<", false, "", 0}, + {"x =>", false, "", 0}, + {"x !", false, "", 0}, + {"x ", false, "", 0}, + {"x", false, "", 0}, + // Quoted and interesting field names. + {"x > y =", true, "x > y", equal}, + {"` x ` =", true, " x ", equal}, + {`" x " =`, true, " x ", equal}, + {`" \"x " =`, true, ` "x `, equal}, + {`" x =`, false, "", 0}, + {`" x ="`, false, "", 0}, + {"` x \" =", false, "", 0}, + } + for _, tc := range testCases { + q := NewQuery("foo").Filter(tc.filterStr, 42) + if ok := q.err == nil; ok != tc.wantOK { + t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK) + continue + } + if !tc.wantOK { + continue + } + if len(q.filter) != 1 { + t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1) + continue + } + got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42} + if got != want { + t.Errorf("%q: got %v, want %v", tc.filterStr, got, want) + continue + } + } +} + +func TestNamespaceQuery(t *testing.T) { + gotNamespace := make(chan string, 1) + ctx := context.Background() + client := &Client{ + client: &fakeClient{ + queryFn: func(req *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { + if part := req.PartitionId; part != nil { + gotNamespace <- part.NamespaceId + } else { + gotNamespace <- "" + } + return nil, errors.New("not implemented") + }, + }, + } + + var gs []Gopher + + client.GetAll(ctx, NewQuery("gopher"), &gs) + if got, want := <-gotNamespace, ""; got != want { + t.Errorf("GetAll: got namespace %q, want %q", got, want) + } + client.Count(ctx, NewQuery("gopher")) + if got, want := <-gotNamespace, ""; got != want { + t.Errorf("Count: got namespace %q, want %q", got, want) + } + + const ns = "not_default" + client.GetAll(ctx, NewQuery("gopher").Namespace(ns), &gs) + if got, want := <-gotNamespace, ns; got != want { + t.Errorf("GetAll: got namespace %q, want %q", got, want) + } + client.Count(ctx, NewQuery("gopher").Namespace(ns)) + if got, want := <-gotNamespace, ns; got != want { + t.Errorf("Count: got namespace %q, want %q", got, want) + } +} + +func TestReadOptions(t *testing.T) { + tid := []byte{1} + for _, test := range []struct { + q *Query + want *pb.ReadOptions + }{ + { + q: NewQuery(""), + want: nil, + }, + { + q: NewQuery("").Transaction(nil), + want: nil, + }, + { + q: NewQuery("").Transaction(&Transaction{id: tid}), + want: &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{ + Transaction: tid, + }, + }, + }, + { + q: NewQuery("").EventualConsistency(), + want: &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_ReadConsistency_{ + ReadConsistency: pb.ReadOptions_EVENTUAL, + }, + }, + }, + } { + req := &pb.RunQueryRequest{} + if err := test.q.toProto(req); err != nil { + t.Fatalf("%+v: got %v, want no error", test.q, err) + } + if got := req.ReadOptions; !proto.Equal(got, test.want) { + t.Errorf("%+v:\ngot %+v\nwant %+v", test.q, got, test.want) + } + } + // Test errors. + for _, q := range []*Query{ + NewQuery("").Transaction(&Transaction{id: nil}), + NewQuery("").Transaction(&Transaction{id: tid}).EventualConsistency(), + } { + req := &pb.RunQueryRequest{} + if err := q.toProto(req); err == nil { + t.Errorf("%+v: got nil, wanted error", q) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/save.go b/vendor/cloud.google.com/go/datastore/save.go new file mode 100644 index 0000000..5cc5577 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/save.go @@ -0,0 +1,425 @@ +// Copyright 4 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "time" + "unicode/utf8" + + timepb "github.com/golang/protobuf/ptypes/timestamp" + pb "google.golang.org/genproto/googleapis/datastore/v1" + llpb "google.golang.org/genproto/googleapis/type/latlng" +) + +type saveOpts struct { + noIndex bool + flatten bool + omitEmpty bool +} + +// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. +func saveEntity(key *Key, src interface{}) (*pb.Entity, error) { + var err error + var props []Property + if e, ok := src.(PropertyLoadSaver); ok { + props, err = e.Save() + } else { + props, err = SaveStruct(src) + } + if err != nil { + return nil, err + } + return propertiesToProto(key, props) +} + +// TODO(djd): Convert this and below to return ([]Property, error). +func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error { + p := Property{ + Name: name, + NoIndex: opts.noIndex, + } + + if opts.omitEmpty && isEmptyValue(v) { + return nil + } + + // First check if field type implements PLS. If so, use PLS to + // save. + ok, err := plsFieldSave(props, p, name, opts, v) + if err != nil { + return err + } + if ok { + return nil + } + + switch x := v.Interface().(type) { + case *Key, time.Time, GeoPoint: + p.Value = x + default: + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.Value = v.Int() + case reflect.Bool: + p.Value = v.Bool() + case reflect.String: + p.Value = v.String() + case reflect.Float32, reflect.Float64: + p.Value = v.Float() + case reflect.Slice: + if v.Type().Elem().Kind() == reflect.Uint8 { + p.Value = v.Bytes() + } else { + return saveSliceProperty(props, name, opts, v) + } + case reflect.Ptr: + if v.Type().Elem().Kind() != reflect.Struct { + return fmt.Errorf("datastore: unsupported struct field type: %s", v.Type()) + } + if v.IsNil() { + return nil + } + v = v.Elem() + fallthrough + case reflect.Struct: + if !v.CanAddr() { + return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") + } + vi := v.Addr().Interface() + + sub, err := newStructPLS(vi) + if err != nil { + return fmt.Errorf("datastore: unsupported struct field: %v", err) + } + + if opts.flatten { + return sub.save(props, opts, name+".") + } + + var subProps []Property + err = sub.save(&subProps, opts, "") + if err != nil { + return err + } + subKey, err := sub.key(v) + if err != nil { + return err + } + + p.Value = &Entity{ + Key: subKey, + Properties: subProps, + } + } + } + if p.Value == nil { + return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) + } + *props = append(*props, p) + return nil +} + +// plsFieldSave first tries to converts v's value to a PLS, then v's addressed +// value to a PLS. If neither succeeds, plsFieldSave returns false for first return +// value. +// If v is successfully converted to a PLS, plsFieldSave will then add the +// Value to property p by way of the PLS's Save method, and append it to props. +// +// If the flatten option is present in opts, name must be prepended to each property's +// name before it is appended to props. Eg. if name were "A" and a subproperty's name +// were "B", the resultant name of the property to be appended to props would be "A.B". +func plsFieldSave(props *[]Property, p Property, name string, opts saveOpts, v reflect.Value) (ok bool, err error) { + vpls, err := plsForSave(v) + if err != nil { + return false, err + } + + if vpls == nil { + return false, nil + } + + subProps, err := vpls.Save() + if err != nil { + return true, err + } + + if opts.flatten { + for _, subp := range subProps { + subp.Name = name + "." + subp.Name + *props = append(*props, subp) + } + return true, nil + } + + p.Value = &Entity{Properties: subProps} + *props = append(*props, p) + + return true, nil +} + +// key extracts the *Key struct field from struct v based on the structCodec of s. +func (s structPLS) key(v reflect.Value) (*Key, error) { + if v.Kind() != reflect.Struct { + return nil, errors.New("datastore: cannot save key of non-struct type") + } + + keyField := s.codec.Match(keyFieldName) + + if keyField == nil { + return nil, nil + } + + f := v.FieldByIndex(keyField.Index) + k, ok := f.Interface().(*Key) + if !ok { + return nil, fmt.Errorf("datastore: %s field on struct %T is not a *datastore.Key", keyFieldName, v.Interface()) + } + + return k, nil +} + +func saveSliceProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error { + // Easy case: if the slice is empty, we're done. + if v.Len() == 0 { + return nil + } + // Work out the properties generated by the first element in the slice. This will + // usually be a single property, but will be more if this is a slice of structs. + var headProps []Property + if err := saveStructProperty(&headProps, name, opts, v.Index(0)); err != nil { + return err + } + + // Convert the first element's properties into slice properties, and + // keep track of the values in a map. + values := make(map[string][]interface{}, len(headProps)) + for _, p := range headProps { + values[p.Name] = append(make([]interface{}, 0, v.Len()), p.Value) + } + + // Find the elements for the subsequent elements. + for i := 1; i < v.Len(); i++ { + elemProps := make([]Property, 0, len(headProps)) + if err := saveStructProperty(&elemProps, name, opts, v.Index(i)); err != nil { + return err + } + for _, p := range elemProps { + v, ok := values[p.Name] + if !ok { + return fmt.Errorf("datastore: unexpected property %q in elem %d of slice", p.Name, i) + } + values[p.Name] = append(v, p.Value) + } + } + + // Convert to the final properties. + for _, p := range headProps { + p.Value = values[p.Name] + *props = append(*props, p) + } + return nil +} + +func (s structPLS) Save() ([]Property, error) { + var props []Property + if err := s.save(&props, saveOpts{}, ""); err != nil { + return nil, err + } + return props, nil +} + +func (s structPLS) save(props *[]Property, opts saveOpts, prefix string) error { + for _, f := range s.codec { + name := prefix + f.Name + v := getField(s.v, f.Index) + if !v.IsValid() || !v.CanSet() { + continue + } + + var tagOpts saveOpts + if f.ParsedTag != nil { + tagOpts = f.ParsedTag.(saveOpts) + } + + var opts1 saveOpts + opts1.noIndex = opts.noIndex || tagOpts.noIndex + opts1.flatten = opts.flatten || tagOpts.flatten + opts1.omitEmpty = tagOpts.omitEmpty // don't propagate + if err := saveStructProperty(props, name, opts1, v); err != nil { + return err + } + } + return nil +} + +// getField returns the field from v at the given index path. +// If it encounters a nil-valued field in the path, getField +// stops and returns a zero-valued reflect.Value, preventing the +// panic that would have been caused by reflect's FieldByIndex. +func getField(v reflect.Value, index []int) reflect.Value { + var zero reflect.Value + if v.Type().Kind() != reflect.Struct { + return zero + } + + for _, i := range index { + if v.Kind() == reflect.Ptr && v.Type().Elem().Kind() == reflect.Struct { + if v.IsNil() { + return zero + } + v = v.Elem() + } + v = v.Field(i) + } + return v +} + +func propertiesToProto(key *Key, props []Property) (*pb.Entity, error) { + e := &pb.Entity{ + Key: keyToProto(key), + Properties: map[string]*pb.Value{}, + } + indexedProps := 0 + for _, p := range props { + // Do not send a Key value a a field to datastore. + if p.Name == keyFieldName { + continue + } + + val, err := interfaceToProto(p.Value, p.NoIndex) + if err != nil { + return nil, fmt.Errorf("datastore: %v for a Property with Name %q", err, p.Name) + } + if !p.NoIndex { + rVal := reflect.ValueOf(p.Value) + if rVal.Kind() == reflect.Slice && rVal.Type().Elem().Kind() != reflect.Uint8 { + indexedProps += rVal.Len() + } else { + indexedProps++ + } + } + if indexedProps > maxIndexedProperties { + return nil, errors.New("datastore: too many indexed properties") + } + + if _, ok := e.Properties[p.Name]; ok { + return nil, fmt.Errorf("datastore: duplicate Property with Name %q", p.Name) + } + e.Properties[p.Name] = val + } + return e, nil +} + +func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) { + val := &pb.Value{ExcludeFromIndexes: noIndex} + switch v := iv.(type) { + case int: + val.ValueType = &pb.Value_IntegerValue{IntegerValue: int64(v)} + case int32: + val.ValueType = &pb.Value_IntegerValue{IntegerValue: int64(v)} + case int64: + val.ValueType = &pb.Value_IntegerValue{IntegerValue: v} + case bool: + val.ValueType = &pb.Value_BooleanValue{BooleanValue: v} + case string: + if len(v) > 1500 && !noIndex { + return nil, errors.New("string property too long to index") + } + if !utf8.ValidString(v) { + return nil, fmt.Errorf("string is not valid utf8: %q", v) + } + val.ValueType = &pb.Value_StringValue{StringValue: v} + case float32: + val.ValueType = &pb.Value_DoubleValue{DoubleValue: float64(v)} + case float64: + val.ValueType = &pb.Value_DoubleValue{DoubleValue: v} + case *Key: + if v == nil { + val.ValueType = &pb.Value_NullValue{} + } else { + val.ValueType = &pb.Value_KeyValue{KeyValue: keyToProto(v)} + } + case GeoPoint: + if !v.Valid() { + return nil, errors.New("invalid GeoPoint value") + } + val.ValueType = &pb.Value_GeoPointValue{GeoPointValue: &llpb.LatLng{ + Latitude: v.Lat, + Longitude: v.Lng, + }} + case time.Time: + if v.Before(minTime) || v.After(maxTime) { + return nil, errors.New("time value out of range") + } + val.ValueType = &pb.Value_TimestampValue{TimestampValue: &timepb.Timestamp{ + Seconds: v.Unix(), + Nanos: int32(v.Nanosecond()), + }} + case []byte: + if len(v) > 1500 && !noIndex { + return nil, errors.New("[]byte property too long to index") + } + val.ValueType = &pb.Value_BlobValue{BlobValue: v} + case *Entity: + e, err := propertiesToProto(v.Key, v.Properties) + if err != nil { + return nil, err + } + val.ValueType = &pb.Value_EntityValue{EntityValue: e} + case []interface{}: + arr := make([]*pb.Value, 0, len(v)) + for i, v := range v { + elem, err := interfaceToProto(v, noIndex) + if err != nil { + return nil, fmt.Errorf("%v at index %d", err, i) + } + arr = append(arr, elem) + } + val.ValueType = &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{Values: arr}} + // ArrayValues have ExcludeFromIndexes set on the individual items, rather + // than the top-level value. + val.ExcludeFromIndexes = false + default: + if iv != nil { + return nil, fmt.Errorf("invalid Value type %t", iv) + } + val.ValueType = &pb.Value_NullValue{} + } + // TODO(jbd): Support EntityValue. + return val, nil +} + +// isEmptyValue is taken from the encoding/json package in the +// standard library. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/cloud.google.com/go/datastore/save_test.go b/vendor/cloud.google.com/go/datastore/save_test.go new file mode 100644 index 0000000..cd28a7c --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/save_test.go @@ -0,0 +1,195 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +func TestInterfaceToProtoNilKey(t *testing.T) { + var iv *Key + pv, err := interfaceToProto(iv, false) + if err != nil { + t.Fatalf("nil key: interfaceToProto: %v", err) + } + + _, ok := pv.ValueType.(*pb.Value_NullValue) + if !ok { + t.Errorf("nil key: type:\ngot: %T\nwant: %T", pv.ValueType, &pb.Value_NullValue{}) + } +} + +func TestSaveEntityNested(t *testing.T) { + type WithKey struct { + X string + I int + K *Key `datastore:"__key__"` + } + + type NestedWithKey struct { + Y string + N WithKey + } + + type WithoutKey struct { + X string + I int + } + + type NestedWithoutKey struct { + Y string + N WithoutKey + } + + type a struct { + S string + } + + type UnexpAnonym struct { + a + } + + testCases := []struct { + desc string + src interface{} + key *Key + want *pb.Entity + }{ + { + desc: "nested entity with key", + src: &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: testKey1a, + }, + }, + key: testKey0, + want: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Key: keyToProto(testKey1a), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + }, + }, + }, + { + desc: "nested entity with incomplete key", + src: &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: incompleteKey, + }, + }, + key: testKey0, + want: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Key: keyToProto(incompleteKey), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + }, + }, + }, + { + desc: "nested entity without key", + src: &NestedWithoutKey{ + Y: "yyy", + N: WithoutKey{ + X: "two", + I: 2, + }, + }, + key: testKey0, + want: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + EntityValue: &pb.Entity{ + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, + }, + }, + }}, + }, + }, + }, + { + desc: "key at top level", + src: &WithKey{ + X: "three", + I: 3, + K: testKey0, + }, + key: testKey0, + want: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{StringValue: "three"}}, + "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, + }, + }, + }, + { + desc: "nested unexported anonymous struct field", + src: &UnexpAnonym{ + a{S: "hello"}, + }, + key: testKey0, + want: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "S": {ValueType: &pb.Value_StringValue{StringValue: "hello"}}, + }, + }, + }, + } + + for _, tc := range testCases { + got, err := saveEntity(tc.key, tc.src) + if err != nil { + t.Errorf("saveEntity: %s: %v", tc.desc, err) + continue + } + + if !testutil.Equal(tc.want, got) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, got, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/testdata/index.yaml b/vendor/cloud.google.com/go/datastore/testdata/index.yaml new file mode 100644 index 0000000..47bc9de --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/testdata/index.yaml @@ -0,0 +1,41 @@ +indexes: + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: I + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: I + direction: desc + +- kind: SQChild + ancestor: yes + properties: + - name: I + - name: T + - name: U + +- kind: SQChild + ancestor: yes + properties: + - name: I + - name: T + - name: U + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: J + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: J + - name: U \ No newline at end of file diff --git a/vendor/cloud.google.com/go/datastore/time.go b/vendor/cloud.google.com/go/datastore/time.go new file mode 100644 index 0000000..e7f6a19 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/time.go @@ -0,0 +1,36 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "math" + "time" +) + +var ( + minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) + maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) +) + +func toUnixMicro(t time.Time) int64 { + // We cannot use t.UnixNano() / 1e3 because we want to handle times more than + // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot + // be represented in the numerator of a single int64 divide. + return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) +} + +func fromUnixMicro(t int64) time.Time { + return time.Unix(t/1e6, (t%1e6)*1e3) +} diff --git a/vendor/cloud.google.com/go/datastore/time_test.go b/vendor/cloud.google.com/go/datastore/time_test.go new file mode 100644 index 0000000..5cc846c --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/time_test.go @@ -0,0 +1,75 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "testing" + "time" +) + +func TestUnixMicro(t *testing.T) { + // Test that all these time.Time values survive a round trip to unix micros. + testCases := []time.Time{ + {}, + time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), + time.Unix(-1e6, -1000), + time.Unix(-1e6, 0), + time.Unix(-1e6, +1000), + time.Unix(-60, -1000), + time.Unix(-60, 0), + time.Unix(-60, +1000), + time.Unix(-1, -1000), + time.Unix(-1, 0), + time.Unix(-1, +1000), + time.Unix(0, -3000), + time.Unix(0, -2000), + time.Unix(0, -1000), + time.Unix(0, 0), + time.Unix(0, +1000), + time.Unix(0, +2000), + time.Unix(+60, -1000), + time.Unix(+60, 0), + time.Unix(+60, +1000), + time.Unix(+1e6, -1000), + time.Unix(+1e6, 0), + time.Unix(+1e6, +1000), + time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC), + time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC), + time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), + time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC), + } + for _, tc := range testCases { + got := fromUnixMicro(toUnixMicro(tc)) + if !got.Equal(tc) { + t.Errorf("got %q, want %q", got, tc) + } + } + + // Test that a time.Time that isn't an integral number of microseconds + // is not perfectly reconstructed after a round trip. + t0 := time.Unix(0, 123) + t1 := fromUnixMicro(toUnixMicro(t0)) + if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 { + t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond()) + } +} diff --git a/vendor/cloud.google.com/go/datastore/transaction.go b/vendor/cloud.google.com/go/datastore/transaction.go new file mode 100644 index 0000000..af5c427 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/transaction.go @@ -0,0 +1,310 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +// ErrConcurrentTransaction is returned when a transaction is rolled back due +// to a conflict with a concurrent transaction. +var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") + +var errExpiredTransaction = errors.New("datastore: transaction expired") + +type transactionSettings struct { + attempts int +} + +// newTransactionSettings creates a transactionSettings with a given TransactionOption slice. +// Unconfigured options will be set to default values. +func newTransactionSettings(opts []TransactionOption) *transactionSettings { + s := &transactionSettings{attempts: 3} + for _, o := range opts { + o.apply(s) + } + return s +} + +// TransactionOption configures the way a transaction is executed. +type TransactionOption interface { + apply(*transactionSettings) +} + +// MaxAttempts returns a TransactionOption that overrides the default 3 attempt times. +func MaxAttempts(attempts int) TransactionOption { + return maxAttempts(attempts) +} + +type maxAttempts int + +func (w maxAttempts) apply(s *transactionSettings) { + if w > 0 { + s.attempts = int(w) + } +} + +// Transaction represents a set of datastore operations to be committed atomically. +// +// Operations are enqueued by calling the Put and Delete methods on Transaction +// (or their Multi-equivalents). These operations are only committed when the +// Commit method is invoked. To ensure consistency, reads must be performed by +// using Transaction's Get method or by using the Transaction method when +// building a query. +// +// A Transaction must be committed or rolled back exactly once. +type Transaction struct { + id []byte + client *Client + ctx context.Context + mutations []*pb.Mutation // The mutations to apply. + pending map[int]*PendingKey // Map from mutation index to incomplete keys pending transaction completion. +} + +// NewTransaction starts a new transaction. +func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) { + for _, o := range opts { + if _, ok := o.(maxAttempts); ok { + return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option") + } + } + req := &pb.BeginTransactionRequest{ + ProjectId: c.dataset, + } + resp, err := c.client.BeginTransaction(ctx, req) + if err != nil { + return nil, err + } + + return &Transaction{ + id: resp.Transaction, + ctx: ctx, + client: c, + mutations: nil, + pending: make(map[int]*PendingKey), + }, nil +} + +// RunInTransaction runs f in a transaction. f is invoked with a Transaction +// that f should use for all the transaction's datastore operations. +// +// f must not call Commit or Rollback on the provided Transaction. +// +// If f returns nil, RunInTransaction commits the transaction, +// returning the Commit and a nil error if it succeeds. If the commit fails due +// to a conflicting transaction, RunInTransaction retries f with a new +// Transaction. It gives up and returns ErrConcurrentTransaction after three +// failed attempts (or as configured with MaxAttempts). +// +// If f returns non-nil, then the transaction will be rolled back and +// RunInTransaction will return the same error. The function f is not retried. +// +// Note that when f returns, the transaction is not committed. Calling code +// must not assume that any of f's changes have been committed until +// RunInTransaction returns nil. +// +// Since f may be called multiple times, f should usually be idempotent – that +// is, it should have the same result when called multiple times. Note that +// Transaction.Get will append when unmarshalling slice fields, so it is not +// necessarily idempotent. +func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (*Commit, error) { + settings := newTransactionSettings(opts) + for n := 0; n < settings.attempts; n++ { + tx, err := c.NewTransaction(ctx) + if err != nil { + return nil, err + } + if err := f(tx); err != nil { + tx.Rollback() + return nil, err + } + if cmt, err := tx.Commit(); err != ErrConcurrentTransaction { + return cmt, err + } + } + return nil, ErrConcurrentTransaction +} + +// Commit applies the enqueued operations atomically. +func (t *Transaction) Commit() (*Commit, error) { + if t.id == nil { + return nil, errExpiredTransaction + } + req := &pb.CommitRequest{ + ProjectId: t.client.dataset, + TransactionSelector: &pb.CommitRequest_Transaction{Transaction: t.id}, + Mutations: t.mutations, + Mode: pb.CommitRequest_TRANSACTIONAL, + } + t.id = nil + resp, err := t.client.client.Commit(t.ctx, req) + if err != nil { + if grpc.Code(err) == codes.Aborted { + return nil, ErrConcurrentTransaction + } + return nil, err + } + + // Copy any newly minted keys into the returned keys. + commit := &Commit{} + for i, p := range t.pending { + if i >= len(resp.MutationResults) || resp.MutationResults[i].Key == nil { + return nil, errors.New("datastore: internal error: server returned the wrong mutation results") + } + key, err := protoToKey(resp.MutationResults[i].Key) + if err != nil { + return nil, errors.New("datastore: internal error: server returned an invalid key") + } + p.key = key + p.commit = commit + } + + return commit, nil +} + +// Rollback abandons a pending transaction. +func (t *Transaction) Rollback() error { + if t.id == nil { + return errExpiredTransaction + } + id := t.id + t.id = nil + _, err := t.client.client.Rollback(t.ctx, &pb.RollbackRequest{ + ProjectId: t.client.dataset, + Transaction: id, + }) + return err +} + +// Get is the transaction-specific version of the package function Get. +// All reads performed during the transaction will come from a single consistent +// snapshot. Furthermore, if the transaction is set to a serializable isolation +// level, another transaction cannot concurrently modify the data that is read +// or modified by this transaction. +func (t *Transaction) Get(key *Key, dst interface{}) error { + opts := &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id}, + } + err := t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// GetMulti is a batch version of Get. +func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error { + if t.id == nil { + return errExpiredTransaction + } + opts := &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id}, + } + return t.client.get(t.ctx, keys, dst, opts) +} + +// Put is the transaction-specific version of the package function Put. +// +// Put returns a PendingKey which can be resolved into a Key using the +// return value from a successful Commit. If key is an incomplete key, the +// returned pending key will resolve to a unique key generated by the +// datastore. +func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) { + h, err := t.PutMulti([]*Key{key}, []interface{}{src}) + if err != nil { + if me, ok := err.(MultiError); ok { + return nil, me[0] + } + return nil, err + } + return h[0], nil +} + +// PutMulti is a batch version of Put. One PendingKey is returned for each +// element of src in the same order. +func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) { + if t.id == nil { + return nil, errExpiredTransaction + } + mutations, err := putMutations(keys, src) + if err != nil { + return nil, err + } + origin := len(t.mutations) + t.mutations = append(t.mutations, mutations...) + + // Prepare the returned handles, pre-populating where possible. + ret := make([]*PendingKey, len(keys)) + for i, key := range keys { + p := &PendingKey{} + if key.Incomplete() { + // This key will be in the final commit result. + t.pending[origin+i] = p + } else { + p.key = key + } + ret[i] = p + } + + return ret, nil +} + +// Delete is the transaction-specific version of the package function Delete. +// Delete enqueues the deletion of the entity for the given key, to be +// committed atomically upon calling Commit. +func (t *Transaction) Delete(key *Key) error { + err := t.DeleteMulti([]*Key{key}) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti is a batch version of Delete. +func (t *Transaction) DeleteMulti(keys []*Key) error { + if t.id == nil { + return errExpiredTransaction + } + mutations, err := deleteMutations(keys) + if err != nil { + return err + } + t.mutations = append(t.mutations, mutations...) + return nil +} + +// Commit represents the result of a committed transaction. +type Commit struct{} + +// Key resolves a pending key handle into a final key. +func (c *Commit) Key(p *PendingKey) *Key { + if c != p.commit { + panic("PendingKey was not created by corresponding transaction") + } + return p.key +} + +// PendingKey represents the key for newly-inserted entity. It can be +// resolved into a Key by calling the Key method of Commit. +type PendingKey struct { + key *Key + commit *Commit +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go b/vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go new file mode 100644 index 0000000..0e51ed4 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go @@ -0,0 +1,215 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// Controller2CallOptions contains the retry settings for each method of Controller2Client. +type Controller2CallOptions struct { + RegisterDebuggee []gax.CallOption + ListActiveBreakpoints []gax.CallOption + UpdateActiveBreakpoint []gax.CallOption +} + +func defaultController2ClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouddebugger.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultController2CallOptions() *Controller2CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &Controller2CallOptions{ + RegisterDebuggee: retry[[2]string{"default", "non_idempotent"}], + ListActiveBreakpoints: retry[[2]string{"default", "idempotent"}], + UpdateActiveBreakpoint: retry[[2]string{"default", "idempotent"}], + } +} + +// Controller2Client is a client for interacting with Stackdriver Debugger API. +type Controller2Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + controller2Client clouddebuggerpb.Controller2Client + + // The call options for this service. + CallOptions *Controller2CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewController2Client creates a new controller2 client. +// +// The Controller service provides the API for orchestrating a collection of +// debugger agents to perform debugging tasks. These agents are each attached +// to a process of an application which may include one or more replicas. +// +// The debugger agents register with the Controller to identify the application +// being debugged, the Debuggee. All agents that register with the same data, +// represent the same Debuggee, and are assigned the same debuggee_id. +// +// The debugger agents call the Controller to retrieve the list of active +// Breakpoints. Agents with the same debuggee_id get the same breakpoints +// list. An agent that can fulfill the breakpoint request updates the +// Controller with the breakpoint result. The controller selects the first +// result received and discards the rest of the results. +// Agents that poll again for active breakpoints will no longer have +// the completed breakpoint in the list and should remove that breakpoint from +// their attached process. +// +// The Controller service does not provide a way to retrieve the results of +// a completed breakpoint. This functionality is available using the Debugger +// service. +func NewController2Client(ctx context.Context, opts ...option.ClientOption) (*Controller2Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultController2ClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Controller2Client{ + conn: conn, + CallOptions: defaultController2CallOptions(), + + controller2Client: clouddebuggerpb.NewController2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Controller2Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Controller2Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// RegisterDebuggee registers the debuggee with the controller service. +// +// All agents attached to the same application must call this method with +// exactly the same request content to get back the same stable debuggee_id. +// Agents should call this method again whenever google.rpc.Code.NOT_FOUND +// is returned from any controller method. +// +// This protocol allows the controller service to disable debuggees, recover +// from data loss, or change the debuggee_id format. Agents must handle +// debuggee_id value changing upon re-registration. +func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...) + var resp *clouddebuggerpb.RegisterDebuggeeResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.controller2Client.RegisterDebuggee(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListActiveBreakpoints returns the list of all active breakpoints for the debuggee. +// +// The breakpoint specification (location, condition, and expressions +// fields) is semantically immutable, although the field values may +// change. For example, an agent may update the location line number +// to reflect the actual line where the breakpoint was set, but this +// doesn't change the breakpoint semantics. +// +// This means that an agent does not need to check if a breakpoint has changed +// when it encounters the same breakpoint on a successive call. +// Moreover, an agent should remember the breakpoints that are completed +// until the controller removes them from the active list to avoid +// setting those breakpoints again. +func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListActiveBreakpoints[0:len(c.CallOptions.ListActiveBreakpoints):len(c.CallOptions.ListActiveBreakpoints)], opts...) + var resp *clouddebuggerpb.ListActiveBreakpointsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.controller2Client.ListActiveBreakpoints(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateActiveBreakpoint updates the breakpoint state or mutable fields. +// The entire Breakpoint message must be sent back to the controller service. +// +// Updates to active breakpoint fields are only allowed if the new value +// does not change the breakpoint specification. Updates to the location, +// condition and expressions fields should not alter the breakpoint +// semantics. These may only make changes such as canonicalizing a value +// or snapping the location to the correct line of code. +func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateActiveBreakpoint[0:len(c.CallOptions.UpdateActiveBreakpoint):len(c.CallOptions.UpdateActiveBreakpoint)], opts...) + var resp *clouddebuggerpb.UpdateActiveBreakpointResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.controller2Client.UpdateActiveBreakpoint(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go b/vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go new file mode 100644 index 0000000..c3d3bcd --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go @@ -0,0 +1,87 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger_test + +import ( + "cloud.google.com/go/debugger/apiv2" + "golang.org/x/net/context" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" +) + +func ExampleNewController2Client() { + ctx := context.Background() + c, err := debugger.NewController2Client(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleController2Client_RegisterDebuggee() { + ctx := context.Background() + c, err := debugger.NewController2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.RegisterDebuggeeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.RegisterDebuggee(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleController2Client_ListActiveBreakpoints() { + ctx := context.Background() + c, err := debugger.NewController2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.ListActiveBreakpointsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListActiveBreakpoints(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleController2Client_UpdateActiveBreakpoint() { + ctx := context.Background() + c, err := debugger.NewController2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.UpdateActiveBreakpointRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateActiveBreakpoint(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go b/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go new file mode 100644 index 0000000..a20d382 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go @@ -0,0 +1,211 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// Debugger2CallOptions contains the retry settings for each method of Debugger2Client. +type Debugger2CallOptions struct { + SetBreakpoint []gax.CallOption + GetBreakpoint []gax.CallOption + DeleteBreakpoint []gax.CallOption + ListBreakpoints []gax.CallOption + ListDebuggees []gax.CallOption +} + +func defaultDebugger2ClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouddebugger.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultDebugger2CallOptions() *Debugger2CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &Debugger2CallOptions{ + SetBreakpoint: retry[[2]string{"default", "non_idempotent"}], + GetBreakpoint: retry[[2]string{"default", "idempotent"}], + DeleteBreakpoint: retry[[2]string{"default", "idempotent"}], + ListBreakpoints: retry[[2]string{"default", "idempotent"}], + ListDebuggees: retry[[2]string{"default", "idempotent"}], + } +} + +// Debugger2Client is a client for interacting with Stackdriver Debugger API. +type Debugger2Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + debugger2Client clouddebuggerpb.Debugger2Client + + // The call options for this service. + CallOptions *Debugger2CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewDebugger2Client creates a new debugger2 client. +// +// The Debugger service provides the API that allows users to collect run-time +// information from a running application, without stopping or slowing it down +// and without modifying its state. An application may include one or +// more replicated processes performing the same work. +// +// A debugged application is represented using the Debuggee concept. The +// Debugger service provides a way to query for available debuggees, but does +// not provide a way to create one. A debuggee is created using the Controller +// service, usually by running a debugger agent with the application. +// +// The Debugger service enables the client to set one or more Breakpoints on a +// Debuggee and collect the results of the set Breakpoints. +func NewDebugger2Client(ctx context.Context, opts ...option.ClientOption) (*Debugger2Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultDebugger2ClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Debugger2Client{ + conn: conn, + CallOptions: defaultDebugger2CallOptions(), + + debugger2Client: clouddebuggerpb.NewDebugger2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Debugger2Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Debugger2Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Debugger2Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// SetBreakpoint sets the breakpoint to the debuggee. +func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.SetBreakpointResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetBreakpoint[0:len(c.CallOptions.SetBreakpoint):len(c.CallOptions.SetBreakpoint)], opts...) + var resp *clouddebuggerpb.SetBreakpointResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.debugger2Client.SetBreakpoint(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetBreakpoint gets breakpoint information. +func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.GetBreakpointResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetBreakpoint[0:len(c.CallOptions.GetBreakpoint):len(c.CallOptions.GetBreakpoint)], opts...) + var resp *clouddebuggerpb.GetBreakpointResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.debugger2Client.GetBreakpoint(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteBreakpoint deletes the breakpoint from the debuggee. +func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteBreakpoint[0:len(c.CallOptions.DeleteBreakpoint):len(c.CallOptions.DeleteBreakpoint)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.debugger2Client.DeleteBreakpoint(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListBreakpoints lists all breakpoints for the debuggee. +func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListBreakpointsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListBreakpoints[0:len(c.CallOptions.ListBreakpoints):len(c.CallOptions.ListBreakpoints)], opts...) + var resp *clouddebuggerpb.ListBreakpointsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.debugger2Client.ListBreakpoints(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDebuggees lists all the debuggees that the user has access to. +func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...) + var resp *clouddebuggerpb.ListDebuggeesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.debugger2Client.ListDebuggees(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go b/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go new file mode 100644 index 0000000..0dede2f --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go @@ -0,0 +1,121 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger_test + +import ( + "cloud.google.com/go/debugger/apiv2" + "golang.org/x/net/context" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" +) + +func ExampleNewDebugger2Client() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleDebugger2Client_SetBreakpoint() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.SetBreakpointRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetBreakpoint(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDebugger2Client_GetBreakpoint() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.GetBreakpointRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetBreakpoint(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDebugger2Client_DeleteBreakpoint() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.DeleteBreakpointRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteBreakpoint(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleDebugger2Client_ListBreakpoints() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.ListBreakpointsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListBreakpoints(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDebugger2Client_ListDebuggees() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.ListDebuggeesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListDebuggees(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/doc.go b/vendor/cloud.google.com/go/debugger/apiv2/doc.go new file mode 100644 index 0000000..fd283f6 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/doc.go @@ -0,0 +1,50 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package debugger is an auto-generated package for the +// Stackdriver Debugger API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Examines the call stack and variables of a running application +// without stopping or slowing it down. +// +// Use the client at cloud.google.com/go/cmd/go-cloud-debug-agent in preference to this. +package debugger // import "cloud.google.com/go/debugger/apiv2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud_debugger", + } +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/mock_test.go b/vendor/cloud.google.com/go/debugger/apiv2/mock_test.go new file mode 100644 index 0000000..4cfd6c0 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/mock_test.go @@ -0,0 +1,693 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDebugger2Server struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouddebuggerpb.Debugger2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDebugger2Server) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest) (*clouddebuggerpb.SetBreakpointResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.SetBreakpointResponse), nil +} + +func (s *mockDebugger2Server) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest) (*clouddebuggerpb.GetBreakpointResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.GetBreakpointResponse), nil +} + +func (s *mockDebugger2Server) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDebugger2Server) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest) (*clouddebuggerpb.ListBreakpointsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.ListBreakpointsResponse), nil +} + +func (s *mockDebugger2Server) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest) (*clouddebuggerpb.ListDebuggeesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.ListDebuggeesResponse), nil +} + +type mockController2Server struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouddebuggerpb.Controller2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockController2Server) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest) (*clouddebuggerpb.RegisterDebuggeeResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.RegisterDebuggeeResponse), nil +} + +func (s *mockController2Server) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.ListActiveBreakpointsResponse), nil +} + +func (s *mockController2Server) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.UpdateActiveBreakpointResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDebugger2 mockDebugger2Server + mockController2 mockController2Server +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + clouddebuggerpb.RegisterDebugger2Server(serv, &mockDebugger2) + clouddebuggerpb.RegisterController2Server(serv, &mockController2) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDebugger2SetBreakpoint(t *testing.T) { + var expectedResponse *clouddebuggerpb.SetBreakpointResponse = &clouddebuggerpb.SetBreakpointResponse{} + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.SetBreakpointRequest{ + DebuggeeId: debuggeeId, + Breakpoint: breakpoint, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetBreakpoint(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDebugger2SetBreakpointError(t *testing.T) { + errCode := codes.PermissionDenied + mockDebugger2.err = gstatus.Error(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.SetBreakpointRequest{ + DebuggeeId: debuggeeId, + Breakpoint: breakpoint, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetBreakpoint(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDebugger2GetBreakpoint(t *testing.T) { + var expectedResponse *clouddebuggerpb.GetBreakpointResponse = &clouddebuggerpb.GetBreakpointResponse{} + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var breakpointId string = "breakpointId498424873" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.GetBreakpointRequest{ + DebuggeeId: debuggeeId, + BreakpointId: breakpointId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetBreakpoint(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDebugger2GetBreakpointError(t *testing.T) { + errCode := codes.PermissionDenied + mockDebugger2.err = gstatus.Error(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var breakpointId string = "breakpointId498424873" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.GetBreakpointRequest{ + DebuggeeId: debuggeeId, + BreakpointId: breakpointId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetBreakpoint(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDebugger2DeleteBreakpoint(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var breakpointId string = "breakpointId498424873" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.DeleteBreakpointRequest{ + DebuggeeId: debuggeeId, + BreakpointId: breakpointId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteBreakpoint(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDebugger2DeleteBreakpointError(t *testing.T) { + errCode := codes.PermissionDenied + mockDebugger2.err = gstatus.Error(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var breakpointId string = "breakpointId498424873" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.DeleteBreakpointRequest{ + DebuggeeId: debuggeeId, + BreakpointId: breakpointId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteBreakpoint(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDebugger2ListBreakpoints(t *testing.T) { + var nextWaitToken string = "nextWaitToken1006864251" + var expectedResponse = &clouddebuggerpb.ListBreakpointsResponse{ + NextWaitToken: nextWaitToken, + } + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.ListBreakpointsRequest{ + DebuggeeId: debuggeeId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListBreakpoints(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDebugger2ListBreakpointsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDebugger2.err = gstatus.Error(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.ListBreakpointsRequest{ + DebuggeeId: debuggeeId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListBreakpoints(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDebugger2ListDebuggees(t *testing.T) { + var expectedResponse *clouddebuggerpb.ListDebuggeesResponse = &clouddebuggerpb.ListDebuggeesResponse{} + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var project string = "project-309310695" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.ListDebuggeesRequest{ + Project: project, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDebuggees(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDebugger2ListDebuggeesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDebugger2.err = gstatus.Error(errCode, "test error") + + var project string = "project-309310695" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.ListDebuggeesRequest{ + Project: project, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDebuggees(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestController2RegisterDebuggee(t *testing.T) { + var expectedResponse *clouddebuggerpb.RegisterDebuggeeResponse = &clouddebuggerpb.RegisterDebuggeeResponse{} + + mockController2.err = nil + mockController2.reqs = nil + + mockController2.resps = append(mockController2.resps[:0], expectedResponse) + + var debuggee *clouddebuggerpb.Debuggee = &clouddebuggerpb.Debuggee{} + var request = &clouddebuggerpb.RegisterDebuggeeRequest{ + Debuggee: debuggee, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RegisterDebuggee(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockController2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestController2RegisterDebuggeeError(t *testing.T) { + errCode := codes.PermissionDenied + mockController2.err = gstatus.Error(errCode, "test error") + + var debuggee *clouddebuggerpb.Debuggee = &clouddebuggerpb.Debuggee{} + var request = &clouddebuggerpb.RegisterDebuggeeRequest{ + Debuggee: debuggee, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RegisterDebuggee(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestController2ListActiveBreakpoints(t *testing.T) { + var nextWaitToken string = "nextWaitToken1006864251" + var waitExpired bool = false + var expectedResponse = &clouddebuggerpb.ListActiveBreakpointsResponse{ + NextWaitToken: nextWaitToken, + WaitExpired: waitExpired, + } + + mockController2.err = nil + mockController2.reqs = nil + + mockController2.resps = append(mockController2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var request = &clouddebuggerpb.ListActiveBreakpointsRequest{ + DebuggeeId: debuggeeId, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListActiveBreakpoints(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockController2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestController2ListActiveBreakpointsError(t *testing.T) { + errCode := codes.PermissionDenied + mockController2.err = gstatus.Error(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var request = &clouddebuggerpb.ListActiveBreakpointsRequest{ + DebuggeeId: debuggeeId, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListActiveBreakpoints(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestController2UpdateActiveBreakpoint(t *testing.T) { + var expectedResponse *clouddebuggerpb.UpdateActiveBreakpointResponse = &clouddebuggerpb.UpdateActiveBreakpointResponse{} + + mockController2.err = nil + mockController2.reqs = nil + + mockController2.resps = append(mockController2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} + var request = &clouddebuggerpb.UpdateActiveBreakpointRequest{ + DebuggeeId: debuggeeId, + Breakpoint: breakpoint, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateActiveBreakpoint(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockController2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestController2UpdateActiveBreakpointError(t *testing.T) { + errCode := codes.PermissionDenied + mockController2.err = gstatus.Error(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} + var request = &clouddebuggerpb.UpdateActiveBreakpointRequest{ + DebuggeeId: debuggeeId, + Breakpoint: breakpoint, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateActiveBreakpoint(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2beta1/InspectContent_smoke_test.go b/vendor/cloud.google.com/go/dlp/apiv2beta1/InspectContent_smoke_test.go new file mode 100644 index 0000000..babaaac --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2beta1/InspectContent_smoke_test.go @@ -0,0 +1,77 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1" +) + +import ( + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestDlpServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var minLikelihood dlppb.Likelihood = dlppb.Likelihood_POSSIBLE + var inspectConfig = &dlppb.InspectConfig{ + MinLikelihood: minLikelihood, + } + var type_ string = "text/plain" + var value string = "my phone number is 215-512-1212" + var itemsElement = &dlppb.ContentItem{ + Type: type_, + DataItem: &dlppb.ContentItem_Value{ + Value: value, + }, + } + var items = []*dlppb.ContentItem{itemsElement} + var request = &dlppb.InspectContentRequest{ + InspectConfig: inspectConfig, + Items: items, + } + + if _, err := c.InspectContent(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go b/vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go new file mode 100644 index 0000000..a9d6401 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go @@ -0,0 +1,437 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + InspectContent []gax.CallOption + RedactContent []gax.CallOption + DeidentifyContent []gax.CallOption + AnalyzeDataSourceRisk []gax.CallOption + CreateInspectOperation []gax.CallOption + ListInspectFindings []gax.CallOption + ListInfoTypes []gax.CallOption + ListRootCategories []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("dlp.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + InspectContent: retry[[2]string{"default", "non_idempotent"}], + RedactContent: retry[[2]string{"default", "non_idempotent"}], + DeidentifyContent: retry[[2]string{"default", "idempotent"}], + AnalyzeDataSourceRisk: retry[[2]string{"default", "idempotent"}], + CreateInspectOperation: retry[[2]string{"default", "non_idempotent"}], + ListInspectFindings: retry[[2]string{"default", "idempotent"}], + ListInfoTypes: retry[[2]string{"default", "idempotent"}], + ListRootCategories: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with DLP API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client dlppb.DlpServiceClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new dlp service client. +// +// The DLP API is a service that allows clients +// to detect the presence of Personally Identifiable Information (PII) and other +// privacy-sensitive data in user-supplied, unstructured data streams, like text +// blocks or images. +// The service also includes methods for sensitive data redaction and +// scheduling of data scans on Google Cloud Platform based data sets. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: dlppb.NewDlpServiceClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ResultPath returns the path for the result resource. +func ResultPath(result string) string { + return "" + + "inspect/results/" + + result + + "" +} + +// InspectContent finds potentially sensitive info in a list of strings. +// This method has limits on input size, processing time, and output size. +func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...) + var resp *dlppb.InspectContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.InspectContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// RedactContent redacts potentially sensitive info from a list of strings. +// This method has limits on input size, processing time, and output size. +func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest, opts ...gax.CallOption) (*dlppb.RedactContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RedactContent[0:len(c.CallOptions.RedactContent):len(c.CallOptions.RedactContent)], opts...) + var resp *dlppb.RedactContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RedactContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeidentifyContent de-identifies potentially sensitive info from a list of strings. +// This method has limits on input size and output size. +func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...) + var resp *dlppb.DeidentifyContentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google +// Cloud Platform repository. +func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*AnalyzeDataSourceRiskOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &AnalyzeDataSourceRiskOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// CreateInspectOperation schedules a job scanning content in a Google Cloud Platform data +// repository. +func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest, opts ...gax.CallOption) (*CreateInspectOperationHandle, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateInspectOperation[0:len(c.CallOptions.CreateInspectOperation):len(c.CallOptions.CreateInspectOperation)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateInspectOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &CreateInspectOperationHandle{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// ListInspectFindings returns list of results for given inspect operation result set id. +func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest, opts ...gax.CallOption) (*dlppb.ListInspectFindingsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInspectFindings[0:len(c.CallOptions.ListInspectFindings):len(c.CallOptions.ListInspectFindings)], opts...) + var resp *dlppb.ListInspectFindingsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListInspectFindings(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInfoTypes returns sensitive information types for given category. +func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...) + var resp *dlppb.ListInfoTypesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListRootCategories returns the list of root categories of sensitive information. +func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest, opts ...gax.CallOption) (*dlppb.ListRootCategoriesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListRootCategories[0:len(c.CallOptions.ListRootCategories):len(c.CallOptions.ListRootCategories)], opts...) + var resp *dlppb.ListRootCategoriesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListRootCategories(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeDataSourceRiskOperation manages a long-running operation from AnalyzeDataSourceRisk. +type AnalyzeDataSourceRiskOperation struct { + lro *longrunning.Operation +} + +// AnalyzeDataSourceRiskOperation returns a new AnalyzeDataSourceRiskOperation from a given name. +// The name must be that of a previously created AnalyzeDataSourceRiskOperation, possibly from a different process. +func (c *Client) AnalyzeDataSourceRiskOperation(name string) *AnalyzeDataSourceRiskOperation { + return &AnalyzeDataSourceRiskOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *AnalyzeDataSourceRiskOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) { + var resp dlppb.RiskAnalysisOperationResult + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *AnalyzeDataSourceRiskOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) { + var resp dlppb.RiskAnalysisOperationResult + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *AnalyzeDataSourceRiskOperation) Metadata() (*dlppb.RiskAnalysisOperationMetadata, error) { + var meta dlppb.RiskAnalysisOperationMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *AnalyzeDataSourceRiskOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *AnalyzeDataSourceRiskOperation) Name() string { + return op.lro.Name() +} + +// CreateInspectOperationHandle manages a long-running operation from CreateInspectOperation. +type CreateInspectOperationHandle struct { + lro *longrunning.Operation +} + +// CreateInspectOperationHandle returns a new CreateInspectOperationHandle from a given name. +// The name must be that of a previously created CreateInspectOperationHandle, possibly from a different process. +func (c *Client) CreateInspectOperationHandle(name string) *CreateInspectOperationHandle { + return &CreateInspectOperationHandle{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *CreateInspectOperationHandle) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) { + var resp dlppb.InspectOperationResult + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *CreateInspectOperationHandle) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) { + var resp dlppb.InspectOperationResult + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *CreateInspectOperationHandle) Metadata() (*dlppb.InspectOperationMetadata, error) { + var meta dlppb.InspectOperationMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *CreateInspectOperationHandle) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *CreateInspectOperationHandle) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go b/vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go new file mode 100644 index 0000000..6777483 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go @@ -0,0 +1,187 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp_test + +import ( + "cloud.google.com/go/dlp/apiv2beta1" + "golang.org/x/net/context" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_InspectContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.InspectContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.InspectContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_RedactContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.RedactContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.RedactContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeidentifyContent() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.DeidentifyContentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeidentifyContent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeDataSourceRisk() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.AnalyzeDataSourceRiskRequest{ + // TODO: Fill request struct fields. + } + op, err := c.AnalyzeDataSourceRisk(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_CreateInspectOperation() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.CreateInspectOperationRequest{ + // TODO: Fill request struct fields. + } + op, err := c.CreateInspectOperation(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListInspectFindings() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListInspectFindingsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListInspectFindings(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListInfoTypes() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListInfoTypesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListInfoTypes(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListRootCategories() { + ctx := context.Background() + c, err := dlp.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &dlppb.ListRootCategoriesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListRootCategories(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go b/vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go new file mode 100644 index 0000000..2870835 --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go @@ -0,0 +1,48 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package dlp is an auto-generated package for the +// DLP API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// The Google Data Loss Prevention API provides methods for detection of +// privacy-sensitive fragments in text, images, and Google Cloud Platform +// storage repositories. +package dlp // import "cloud.google.com/go/dlp/apiv2beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go b/vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go new file mode 100644 index 0000000..0cf0d6c --- /dev/null +++ b/vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go @@ -0,0 +1,822 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package dlp + +import ( + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDlpServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + dlppb.DlpServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDlpServer) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest) (*dlppb.InspectContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.InspectContentResponse), nil +} + +func (s *mockDlpServer) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest) (*dlppb.RedactContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.RedactContentResponse), nil +} + +func (s *mockDlpServer) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest) (*dlppb.DeidentifyContentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.DeidentifyContentResponse), nil +} + +func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockDlpServer) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockDlpServer) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest) (*dlppb.ListInspectFindingsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListInspectFindingsResponse), nil +} + +func (s *mockDlpServer) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest) (*dlppb.ListInfoTypesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListInfoTypesResponse), nil +} + +func (s *mockDlpServer) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest) (*dlppb.ListRootCategoriesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*dlppb.ListRootCategoriesResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDlp mockDlpServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + dlppb.RegisterDlpServiceServer(serv, &mockDlp) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDlpServiceInspectContent(t *testing.T) { + var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var name string = "EMAIL_ADDRESS" + var infoTypesElement = &dlppb.InfoType{ + Name: name, + } + var infoTypes = []*dlppb.InfoType{infoTypesElement} + var inspectConfig = &dlppb.InspectConfig{ + InfoTypes: infoTypes, + } + var type_ string = "text/plain" + var value string = "My email is example@example.com." + var itemsElement = &dlppb.ContentItem{ + Type: type_, + DataItem: &dlppb.ContentItem_Value{ + Value: value, + }, + } + var items = []*dlppb.ContentItem{itemsElement} + var request = &dlppb.InspectContentRequest{ + InspectConfig: inspectConfig, + Items: items, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceInspectContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var name string = "EMAIL_ADDRESS" + var infoTypesElement = &dlppb.InfoType{ + Name: name, + } + var infoTypes = []*dlppb.InfoType{infoTypesElement} + var inspectConfig = &dlppb.InspectConfig{ + InfoTypes: infoTypes, + } + var type_ string = "text/plain" + var value string = "My email is example@example.com." + var itemsElement = &dlppb.ContentItem{ + Type: type_, + DataItem: &dlppb.ContentItem_Value{ + Value: value, + }, + } + var items = []*dlppb.ContentItem{itemsElement} + var request = &dlppb.InspectContentRequest{ + InspectConfig: inspectConfig, + Items: items, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.InspectContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceRedactContent(t *testing.T) { + var expectedResponse *dlppb.RedactContentResponse = &dlppb.RedactContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var name string = "EMAIL_ADDRESS" + var infoTypesElement = &dlppb.InfoType{ + Name: name, + } + var infoTypes = []*dlppb.InfoType{infoTypesElement} + var inspectConfig = &dlppb.InspectConfig{ + InfoTypes: infoTypes, + } + var type_ string = "text/plain" + var value string = "My email is example@example.com." + var itemsElement = &dlppb.ContentItem{ + Type: type_, + DataItem: &dlppb.ContentItem_Value{ + Value: value, + }, + } + var items = []*dlppb.ContentItem{itemsElement} + var request = &dlppb.RedactContentRequest{ + InspectConfig: inspectConfig, + Items: items, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RedactContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceRedactContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var name string = "EMAIL_ADDRESS" + var infoTypesElement = &dlppb.InfoType{ + Name: name, + } + var infoTypes = []*dlppb.InfoType{infoTypesElement} + var inspectConfig = &dlppb.InspectConfig{ + InfoTypes: infoTypes, + } + var type_ string = "text/plain" + var value string = "My email is example@example.com." + var itemsElement = &dlppb.ContentItem{ + Type: type_, + DataItem: &dlppb.ContentItem_Value{ + Value: value, + }, + } + var items = []*dlppb.ContentItem{itemsElement} + var request = &dlppb.RedactContentRequest{ + InspectConfig: inspectConfig, + Items: items, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RedactContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceDeidentifyContent(t *testing.T) { + var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{} + var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{} + var items []*dlppb.ContentItem = nil + var request = &dlppb.DeidentifyContentRequest{ + DeidentifyConfig: deidentifyConfig, + InspectConfig: inspectConfig, + Items: items, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeidentifyContent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceDeidentifyContentError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{} + var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{} + var items []*dlppb.ContentItem = nil + var request = &dlppb.DeidentifyContentRequest{ + DeidentifyConfig: deidentifyConfig, + InspectConfig: inspectConfig, + Items: items, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeidentifyContent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceAnalyzeDataSourceRisk(t *testing.T) { + var expectedResponse *dlppb.RiskAnalysisOperationResult = &dlppb.RiskAnalysisOperationResult{} + + mockDlp.err = nil + mockDlp.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{} + var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{} + var request = &dlppb.AnalyzeDataSourceRiskRequest{ + PrivacyMetric: privacyMetric, + SourceTable: sourceTable, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceAnalyzeDataSourceRiskError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = nil + mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{} + var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{} + var request = &dlppb.AnalyzeDataSourceRiskRequest{ + PrivacyMetric: privacyMetric, + SourceTable: sourceTable, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceCreateInspectOperation(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &dlppb.InspectOperationResult{ + Name: name2, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var name string = "EMAIL_ADDRESS" + var infoTypesElement = &dlppb.InfoType{ + Name: name, + } + var infoTypes = []*dlppb.InfoType{infoTypesElement} + var inspectConfig = &dlppb.InspectConfig{ + InfoTypes: infoTypes, + } + var url string = "gs://example_bucket/example_file.png" + var fileSet = &dlppb.CloudStorageOptions_FileSet{ + Url: url, + } + var cloudStorageOptions = &dlppb.CloudStorageOptions{ + FileSet: fileSet, + } + var storageConfig = &dlppb.StorageConfig{ + Type: &dlppb.StorageConfig_CloudStorageOptions{ + CloudStorageOptions: cloudStorageOptions, + }, + } + var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{} + var request = &dlppb.CreateInspectOperationRequest{ + InspectConfig: inspectConfig, + StorageConfig: storageConfig, + OutputConfig: outputConfig, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateInspectOperation(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceCreateInspectOperationError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = nil + mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var name string = "EMAIL_ADDRESS" + var infoTypesElement = &dlppb.InfoType{ + Name: name, + } + var infoTypes = []*dlppb.InfoType{infoTypesElement} + var inspectConfig = &dlppb.InspectConfig{ + InfoTypes: infoTypes, + } + var url string = "gs://example_bucket/example_file.png" + var fileSet = &dlppb.CloudStorageOptions_FileSet{ + Url: url, + } + var cloudStorageOptions = &dlppb.CloudStorageOptions{ + FileSet: fileSet, + } + var storageConfig = &dlppb.StorageConfig{ + Type: &dlppb.StorageConfig_CloudStorageOptions{ + CloudStorageOptions: cloudStorageOptions, + }, + } + var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{} + var request = &dlppb.CreateInspectOperationRequest{ + InspectConfig: inspectConfig, + StorageConfig: storageConfig, + OutputConfig: outputConfig, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateInspectOperation(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListInspectFindings(t *testing.T) { + var nextPageToken string = "nextPageToken-1530815211" + var expectedResponse = &dlppb.ListInspectFindingsResponse{ + NextPageToken: nextPageToken, + } + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var formattedName string = ResultPath("[RESULT]") + var request = &dlppb.ListInspectFindingsRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInspectFindings(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListInspectFindingsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var formattedName string = ResultPath("[RESULT]") + var request = &dlppb.ListInspectFindingsRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInspectFindings(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListInfoTypes(t *testing.T) { + var expectedResponse *dlppb.ListInfoTypesResponse = &dlppb.ListInfoTypesResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var category string = "PII" + var languageCode string = "en" + var request = &dlppb.ListInfoTypesRequest{ + Category: category, + LanguageCode: languageCode, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInfoTypes(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListInfoTypesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var category string = "PII" + var languageCode string = "en" + var request = &dlppb.ListInfoTypesRequest{ + Category: category, + LanguageCode: languageCode, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInfoTypes(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDlpServiceListRootCategories(t *testing.T) { + var expectedResponse *dlppb.ListRootCategoriesResponse = &dlppb.ListRootCategoriesResponse{} + + mockDlp.err = nil + mockDlp.reqs = nil + + mockDlp.resps = append(mockDlp.resps[:0], expectedResponse) + + var languageCode string = "en" + var request = &dlppb.ListRootCategoriesRequest{ + LanguageCode: languageCode, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListRootCategories(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDlpServiceListRootCategoriesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDlp.err = gstatus.Error(errCode, "test error") + + var languageCode string = "en" + var request = &dlppb.ListRootCategoriesRequest{ + LanguageCode: languageCode, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListRootCategories(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go new file mode 100644 index 0000000..7b35a84 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go @@ -0,0 +1,86 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +import ( + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestReportErrorsServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewReportErrorsClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var formattedProjectName string = ReportErrorsProjectPath(projectId) + var message string = "[MESSAGE]" + var service string = "[SERVICE]" + var serviceContext = &clouderrorreportingpb.ServiceContext{ + Service: service, + } + var filePath string = "path/to/file.lang" + var lineNumber int32 = 42 + var functionName string = "meaningOfLife" + var reportLocation = &clouderrorreportingpb.SourceLocation{ + FilePath: filePath, + LineNumber: lineNumber, + FunctionName: functionName, + } + var context = &clouderrorreportingpb.ErrorContext{ + ReportLocation: reportLocation, + } + var event = &clouderrorreportingpb.ReportedErrorEvent{ + Message: message, + ServiceContext: serviceContext, + Context: context, + } + var request = &clouderrorreportingpb.ReportErrorEventRequest{ + ProjectName: formattedProjectName, + Event: event, + } + + if _, err := c.ReportErrorEvent(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go new file mode 100644 index 0000000..a549228 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go @@ -0,0 +1,50 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package errorreporting is an auto-generated package for the +// Stackdriver Error Reporting API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Stackdriver Error Reporting groups and counts similar errors from cloud +// services. The Stackdriver Error Reporting API provides a way to report new +// errors and read access to error groups and their associated errors. +// +// Use the client at cloud.google.com/go/errorreporting in preference to this. +package errorreporting // import "cloud.google.com/go/errorreporting/apiv1beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go new file mode 100644 index 0000000..580dc2f --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go @@ -0,0 +1,161 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient. +type ErrorGroupCallOptions struct { + GetGroup []gax.CallOption + UpdateGroup []gax.CallOption +} + +func defaultErrorGroupClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouderrorreporting.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultErrorGroupCallOptions() *ErrorGroupCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ErrorGroupCallOptions{ + GetGroup: retry[[2]string{"default", "idempotent"}], + UpdateGroup: retry[[2]string{"default", "idempotent"}], + } +} + +// ErrorGroupClient is a client for interacting with Stackdriver Error Reporting API. +type ErrorGroupClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + errorGroupClient clouderrorreportingpb.ErrorGroupServiceClient + + // The call options for this service. + CallOptions *ErrorGroupCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewErrorGroupClient creates a new error group service client. +// +// Service for retrieving and updating individual error groups. +func NewErrorGroupClient(ctx context.Context, opts ...option.ClientOption) (*ErrorGroupClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultErrorGroupClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ErrorGroupClient{ + conn: conn, + CallOptions: defaultErrorGroupCallOptions(), + + errorGroupClient: clouderrorreportingpb.NewErrorGroupServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ErrorGroupClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ErrorGroupClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ErrorGroupGroupPath returns the path for the group resource. +func ErrorGroupGroupPath(project, group string) string { + return "" + + "projects/" + + project + + "/groups/" + + group + + "" +} + +// GetGroup get the specified group. +func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...) + var resp *clouderrorreportingpb.ErrorGroup + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.errorGroupClient.GetGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateGroup replace the data for the specified group. +// Fails if the group does not exist. +func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) + var resp *clouderrorreportingpb.ErrorGroup + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.errorGroupClient.UpdateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go new file mode 100644 index 0000000..686c344 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go @@ -0,0 +1,69 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting_test + +import ( + "cloud.google.com/go/errorreporting/apiv1beta1" + "golang.org/x/net/context" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +func ExampleNewErrorGroupClient() { + ctx := context.Background() + c, err := errorreporting.NewErrorGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleErrorGroupClient_GetGroup() { + ctx := context.Background() + c, err := errorreporting.NewErrorGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.GetGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleErrorGroupClient_UpdateGroup() { + ctx := context.Background() + c, err := errorreporting.NewErrorGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.UpdateGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go new file mode 100644 index 0000000..ac4e0a2 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go @@ -0,0 +1,301 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient. +type ErrorStatsCallOptions struct { + ListGroupStats []gax.CallOption + ListEvents []gax.CallOption + DeleteEvents []gax.CallOption +} + +func defaultErrorStatsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouderrorreporting.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultErrorStatsCallOptions() *ErrorStatsCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ErrorStatsCallOptions{ + ListGroupStats: retry[[2]string{"default", "idempotent"}], + ListEvents: retry[[2]string{"default", "idempotent"}], + DeleteEvents: retry[[2]string{"default", "idempotent"}], + } +} + +// ErrorStatsClient is a client for interacting with Stackdriver Error Reporting API. +type ErrorStatsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + errorStatsClient clouderrorreportingpb.ErrorStatsServiceClient + + // The call options for this service. + CallOptions *ErrorStatsCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewErrorStatsClient creates a new error stats service client. +// +// An API for retrieving and managing error statistics as well as data for +// individual events. +func NewErrorStatsClient(ctx context.Context, opts ...option.ClientOption) (*ErrorStatsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultErrorStatsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ErrorStatsClient{ + conn: conn, + CallOptions: defaultErrorStatsCallOptions(), + + errorStatsClient: clouderrorreportingpb.NewErrorStatsServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ErrorStatsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ErrorStatsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ErrorStatsProjectPath returns the path for the project resource. +func ErrorStatsProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// ListGroupStats lists the specified groups. +func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest, opts ...gax.CallOption) *ErrorGroupStatsIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListGroupStats[0:len(c.CallOptions.ListGroupStats):len(c.CallOptions.ListGroupStats)], opts...) + it := &ErrorGroupStatsIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorGroupStats, string, error) { + var resp *clouderrorreportingpb.ListGroupStatsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.errorStatsClient.ListGroupStats(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ErrorGroupStats, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListEvents lists the specified events. +func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest, opts ...gax.CallOption) *ErrorEventIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListEvents[0:len(c.CallOptions.ListEvents):len(c.CallOptions.ListEvents)], opts...) + it := &ErrorEventIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorEvent, string, error) { + var resp *clouderrorreportingpb.ListEventsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.errorStatsClient.ListEvents(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ErrorEvents, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteEvents deletes all error events of a given project. +func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest, opts ...gax.CallOption) (*clouderrorreportingpb.DeleteEventsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteEvents[0:len(c.CallOptions.DeleteEvents):len(c.CallOptions.DeleteEvents)], opts...) + var resp *clouderrorreportingpb.DeleteEventsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.errorStatsClient.DeleteEvents(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ErrorEventIterator manages a stream of *clouderrorreportingpb.ErrorEvent. +type ErrorEventIterator struct { + items []*clouderrorreportingpb.ErrorEvent + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*clouderrorreportingpb.ErrorEvent, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ErrorEventIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ErrorEventIterator) Next() (*clouderrorreportingpb.ErrorEvent, error) { + var item *clouderrorreportingpb.ErrorEvent + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ErrorEventIterator) bufLen() int { + return len(it.items) +} + +func (it *ErrorEventIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ErrorGroupStatsIterator manages a stream of *clouderrorreportingpb.ErrorGroupStats. +type ErrorGroupStatsIterator struct { + items []*clouderrorreportingpb.ErrorGroupStats + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*clouderrorreportingpb.ErrorGroupStats, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ErrorGroupStatsIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ErrorGroupStatsIterator) Next() (*clouderrorreportingpb.ErrorGroupStats, error) { + var item *clouderrorreportingpb.ErrorGroupStats + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ErrorGroupStatsIterator) bufLen() int { + return len(it.items) +} + +func (it *ErrorGroupStatsIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go new file mode 100644 index 0000000..48d2acc --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go @@ -0,0 +1,100 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting_test + +import ( + "cloud.google.com/go/errorreporting/apiv1beta1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +func ExampleNewErrorStatsClient() { + ctx := context.Background() + c, err := errorreporting.NewErrorStatsClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleErrorStatsClient_ListGroupStats() { + ctx := context.Background() + c, err := errorreporting.NewErrorStatsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.ListGroupStatsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListGroupStats(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleErrorStatsClient_ListEvents() { + ctx := context.Background() + c, err := errorreporting.NewErrorStatsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.ListEventsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListEvents(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleErrorStatsClient_DeleteEvents() { + ctx := context.Background() + c, err := errorreporting.NewErrorStatsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.DeleteEventsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeleteEvents(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go new file mode 100644 index 0000000..f33acd9 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go @@ -0,0 +1,587 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockErrorGroupServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouderrorreportingpb.ErrorGroupServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockErrorGroupServer) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ErrorGroup), nil +} + +func (s *mockErrorGroupServer) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ErrorGroup), nil +} + +type mockErrorStatsServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouderrorreportingpb.ErrorStatsServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockErrorStatsServer) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest) (*clouderrorreportingpb.ListGroupStatsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ListGroupStatsResponse), nil +} + +func (s *mockErrorStatsServer) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest) (*clouderrorreportingpb.ListEventsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ListEventsResponse), nil +} + +func (s *mockErrorStatsServer) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest) (*clouderrorreportingpb.DeleteEventsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.DeleteEventsResponse), nil +} + +type mockReportErrorsServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouderrorreportingpb.ReportErrorsServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockReportErrorsServer) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest) (*clouderrorreportingpb.ReportErrorEventResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ReportErrorEventResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockErrorGroup mockErrorGroupServer + mockErrorStats mockErrorStatsServer + mockReportErrors mockReportErrorsServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + clouderrorreportingpb.RegisterErrorGroupServiceServer(serv, &mockErrorGroup) + clouderrorreportingpb.RegisterErrorStatsServiceServer(serv, &mockErrorStats) + clouderrorreportingpb.RegisterReportErrorsServiceServer(serv, &mockReportErrors) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestErrorGroupServiceGetGroup(t *testing.T) { + var name string = "name3373707" + var groupId string = "groupId506361563" + var expectedResponse = &clouderrorreportingpb.ErrorGroup{ + Name: name, + GroupId: groupId, + } + + mockErrorGroup.err = nil + mockErrorGroup.reqs = nil + + mockErrorGroup.resps = append(mockErrorGroup.resps[:0], expectedResponse) + + var formattedGroupName string = ErrorGroupGroupPath("[PROJECT]", "[GROUP]") + var request = &clouderrorreportingpb.GetGroupRequest{ + GroupName: formattedGroupName, + } + + c, err := NewErrorGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorGroupServiceGetGroupError(t *testing.T) { + errCode := codes.PermissionDenied + mockErrorGroup.err = gstatus.Error(errCode, "test error") + + var formattedGroupName string = ErrorGroupGroupPath("[PROJECT]", "[GROUP]") + var request = &clouderrorreportingpb.GetGroupRequest{ + GroupName: formattedGroupName, + } + + c, err := NewErrorGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetGroup(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestErrorGroupServiceUpdateGroup(t *testing.T) { + var name string = "name3373707" + var groupId string = "groupId506361563" + var expectedResponse = &clouderrorreportingpb.ErrorGroup{ + Name: name, + GroupId: groupId, + } + + mockErrorGroup.err = nil + mockErrorGroup.reqs = nil + + mockErrorGroup.resps = append(mockErrorGroup.resps[:0], expectedResponse) + + var group *clouderrorreportingpb.ErrorGroup = &clouderrorreportingpb.ErrorGroup{} + var request = &clouderrorreportingpb.UpdateGroupRequest{ + Group: group, + } + + c, err := NewErrorGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorGroupServiceUpdateGroupError(t *testing.T) { + errCode := codes.PermissionDenied + mockErrorGroup.err = gstatus.Error(errCode, "test error") + + var group *clouderrorreportingpb.ErrorGroup = &clouderrorreportingpb.ErrorGroup{} + var request = &clouderrorreportingpb.UpdateGroupRequest{ + Group: group, + } + + c, err := NewErrorGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateGroup(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestErrorStatsServiceListGroupStats(t *testing.T) { + var nextPageToken string = "" + var errorGroupStatsElement *clouderrorreportingpb.ErrorGroupStats = &clouderrorreportingpb.ErrorGroupStats{} + var errorGroupStats = []*clouderrorreportingpb.ErrorGroupStats{errorGroupStatsElement} + var expectedResponse = &clouderrorreportingpb.ListGroupStatsResponse{ + NextPageToken: nextPageToken, + ErrorGroupStats: errorGroupStats, + } + + mockErrorStats.err = nil + mockErrorStats.reqs = nil + + mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse) + + var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") + var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{} + var request = &clouderrorreportingpb.ListGroupStatsRequest{ + ProjectName: formattedProjectName, + TimeRange: timeRange, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroupStats(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorStats.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ErrorGroupStats[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorStatsServiceListGroupStatsError(t *testing.T) { + errCode := codes.PermissionDenied + mockErrorStats.err = gstatus.Error(errCode, "test error") + + var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") + var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{} + var request = &clouderrorreportingpb.ListGroupStatsRequest{ + ProjectName: formattedProjectName, + TimeRange: timeRange, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroupStats(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestErrorStatsServiceListEvents(t *testing.T) { + var nextPageToken string = "" + var errorEventsElement *clouderrorreportingpb.ErrorEvent = &clouderrorreportingpb.ErrorEvent{} + var errorEvents = []*clouderrorreportingpb.ErrorEvent{errorEventsElement} + var expectedResponse = &clouderrorreportingpb.ListEventsResponse{ + NextPageToken: nextPageToken, + ErrorEvents: errorEvents, + } + + mockErrorStats.err = nil + mockErrorStats.reqs = nil + + mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse) + + var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") + var groupId string = "groupId506361563" + var request = &clouderrorreportingpb.ListEventsRequest{ + ProjectName: formattedProjectName, + GroupId: groupId, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListEvents(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorStats.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ErrorEvents[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorStatsServiceListEventsError(t *testing.T) { + errCode := codes.PermissionDenied + mockErrorStats.err = gstatus.Error(errCode, "test error") + + var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") + var groupId string = "groupId506361563" + var request = &clouderrorreportingpb.ListEventsRequest{ + ProjectName: formattedProjectName, + GroupId: groupId, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListEvents(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestErrorStatsServiceDeleteEvents(t *testing.T) { + var expectedResponse *clouderrorreportingpb.DeleteEventsResponse = &clouderrorreportingpb.DeleteEventsResponse{} + + mockErrorStats.err = nil + mockErrorStats.reqs = nil + + mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse) + + var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") + var request = &clouderrorreportingpb.DeleteEventsRequest{ + ProjectName: formattedProjectName, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteEvents(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorStats.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorStatsServiceDeleteEventsError(t *testing.T) { + errCode := codes.PermissionDenied + mockErrorStats.err = gstatus.Error(errCode, "test error") + + var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") + var request = &clouderrorreportingpb.DeleteEventsRequest{ + ProjectName: formattedProjectName, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteEvents(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestReportErrorsServiceReportErrorEvent(t *testing.T) { + var expectedResponse *clouderrorreportingpb.ReportErrorEventResponse = &clouderrorreportingpb.ReportErrorEventResponse{} + + mockReportErrors.err = nil + mockReportErrors.reqs = nil + + mockReportErrors.resps = append(mockReportErrors.resps[:0], expectedResponse) + + var formattedProjectName string = ReportErrorsProjectPath("[PROJECT]") + var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{} + var request = &clouderrorreportingpb.ReportErrorEventRequest{ + ProjectName: formattedProjectName, + Event: event, + } + + c, err := NewReportErrorsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReportErrorEvent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockReportErrors.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestReportErrorsServiceReportErrorEventError(t *testing.T) { + errCode := codes.PermissionDenied + mockReportErrors.err = gstatus.Error(errCode, "test error") + + var formattedProjectName string = ReportErrorsProjectPath("[PROJECT]") + var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{} + var request = &clouderrorreportingpb.ReportErrorEventRequest{ + ProjectName: formattedProjectName, + Event: event, + } + + c, err := NewReportErrorsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReportErrorEvent(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go new file mode 100644 index 0000000..46f5a8e --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go @@ -0,0 +1,130 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient. +type ReportErrorsCallOptions struct { + ReportErrorEvent []gax.CallOption +} + +func defaultReportErrorsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouderrorreporting.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultReportErrorsCallOptions() *ReportErrorsCallOptions { + retry := map[[2]string][]gax.CallOption{} + return &ReportErrorsCallOptions{ + ReportErrorEvent: retry[[2]string{"default", "non_idempotent"}], + } +} + +// ReportErrorsClient is a client for interacting with Stackdriver Error Reporting API. +type ReportErrorsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + reportErrorsClient clouderrorreportingpb.ReportErrorsServiceClient + + // The call options for this service. + CallOptions *ReportErrorsCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewReportErrorsClient creates a new report errors service client. +// +// An API for reporting error events. +func NewReportErrorsClient(ctx context.Context, opts ...option.ClientOption) (*ReportErrorsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultReportErrorsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ReportErrorsClient{ + conn: conn, + CallOptions: defaultReportErrorsCallOptions(), + + reportErrorsClient: clouderrorreportingpb.NewReportErrorsServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ReportErrorsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ReportErrorsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ReportErrorsProjectPath returns the path for the project resource. +func ReportErrorsProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// ReportErrorEvent report an individual error event. +// +// This endpoint accepts either an OAuth token, +// or an +// API key +// for authentication. To use an API key, append it to the URL as the value of +// a key parameter. For example:
POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456
+func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...) + var resp *clouderrorreportingpb.ReportErrorEventResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.reportErrorsClient.ReportErrorEvent(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go new file mode 100644 index 0000000..53be406 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go @@ -0,0 +1,51 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting_test + +import ( + "cloud.google.com/go/errorreporting/apiv1beta1" + "golang.org/x/net/context" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +func ExampleNewReportErrorsClient() { + ctx := context.Background() + c, err := errorreporting.NewReportErrorsClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleReportErrorsClient_ReportErrorEvent() { + ctx := context.Background() + c, err := errorreporting.NewReportErrorsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.ReportErrorEventRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ReportErrorEvent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/errors.go b/vendor/cloud.google.com/go/errorreporting/errors.go new file mode 100644 index 0000000..744057c --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/errors.go @@ -0,0 +1,230 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package errorreporting is a Google Stackdriver Error Reporting library. +// +// This package is still experimental and subject to change. +// +// See https://cloud.google.com/error-reporting/ for more information. +package errorreporting // import "cloud.google.com/go/errorreporting" + +import ( + "bytes" + "fmt" + "log" + "net/http" + "runtime" + "time" + + api "cloud.google.com/go/errorreporting/apiv1beta1" + "cloud.google.com/go/internal/version" + "github.com/golang/protobuf/ptypes" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +const ( + userAgent = `gcloud-golang-errorreporting/20160701` +) + +// Config is additional configuration for Client. +type Config struct { + // ServiceName identifies the running program and is included in the error reports. + // Optional. + ServiceName string + + // ServiceVersion identifies the version of the running program and is + // included in the error reports. + // Optional. + ServiceVersion string + + // OnError is the function to call if any background + // tasks errored. By default, errors are logged. + OnError func(err error) +} + +// Entry holds information about the reported error. +type Entry struct { + Error error + Req *http.Request // if error is associated with a request. + Stack []byte // if user does not provide a stack trace, runtime.Stack will be called +} + +// Client represents a Google Cloud Error Reporting client. +type Client struct { + projectID string + apiClient client + serviceContext erpb.ServiceContext + bundler *bundler.Bundler + + onErrorFn func(err error) +} + +var newClient = func(ctx context.Context, opts ...option.ClientOption) (client, error) { + client, err := api.NewReportErrorsClient(ctx, opts...) + if err != nil { + return nil, err + } + client.SetGoogleClientInfo("gccl", version.Repo) + return client, nil +} + +// NewClient returns a new error reporting client. Generally you will want +// to create a client on program initialization and use it through the lifetime +// of the process. +func NewClient(ctx context.Context, projectID string, cfg Config, opts ...option.ClientOption) (*Client, error) { + if cfg.ServiceName == "" { + cfg.ServiceName = "goapp" + } + c, err := newClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("creating client: %v", err) + } + + client := &Client{ + apiClient: c, + projectID: "projects/" + projectID, + serviceContext: erpb.ServiceContext{ + Service: cfg.ServiceName, + Version: cfg.ServiceVersion, + }, + } + bundler := bundler.NewBundler((*erpb.ReportErrorEventRequest)(nil), func(bundle interface{}) { + reqs := bundle.([]*erpb.ReportErrorEventRequest) + for _, req := range reqs { + _, err = client.apiClient.ReportErrorEvent(ctx, req) + if err != nil { + client.onError(fmt.Errorf("failed to upload: %v", err)) + } + } + }) + // TODO(jbd): Optimize bundler limits. + bundler.DelayThreshold = 2 * time.Second + bundler.BundleCountThreshold = 100 + bundler.BundleByteThreshold = 1000 + bundler.BundleByteLimit = 1000 + bundler.BufferedByteLimit = 10000 + client.bundler = bundler + return client, nil +} + +func (c *Client) onError(err error) { + if c.onErrorFn != nil { + c.onErrorFn(err) + return + } + log.Println(err) +} + +// Close closes any resources held by the client. +// Close should be called when the client is no longer needed. +// It need not be called at program exit. +func (c *Client) Close() error { + return c.apiClient.Close() +} + +// Report writes an error report. It doesn't block. Errors in +// writing the error report can be handled via Client.OnError. +func (c *Client) Report(e Entry) { + var stack string + if e.Stack != nil { + stack = string(e.Stack) + } + req := c.makeReportErrorEventRequest(e.Req, e.Error.Error(), stack) + c.bundler.Add(req, 1) +} + +// ReportSync writes an error report. It blocks until the entry is written. +func (c *Client) ReportSync(ctx context.Context, e Entry) error { + var stack string + if e.Stack != nil { + stack = string(e.Stack) + } + req := c.makeReportErrorEventRequest(e.Req, e.Error.Error(), stack) + _, err := c.apiClient.ReportErrorEvent(ctx, req) + return err +} + +// Flush blocks until all currently buffered error reports are sent. +// +// If any errors occurred since the last call to Flush, or the +// creation of the client if this is the first call, then Flush report the +// error via the (*Client).OnError handler. +func (c *Client) Flush() { + c.bundler.Flush() +} + +func (c *Client) makeReportErrorEventRequest(r *http.Request, msg string, stack string) *erpb.ReportErrorEventRequest { + if stack == "" { + // limit the stack trace to 16k. + var buf [16 * 1024]byte + stack = chopStack(buf[0:runtime.Stack(buf[:], false)]) + } + message := msg + "\n" + stack + + var errorContext *erpb.ErrorContext + if r != nil { + errorContext = &erpb.ErrorContext{ + HttpRequest: &erpb.HttpRequestContext{ + Method: r.Method, + Url: r.Host + r.RequestURI, + UserAgent: r.UserAgent(), + Referrer: r.Referer(), + RemoteIp: r.RemoteAddr, + }, + } + } + return &erpb.ReportErrorEventRequest{ + ProjectName: c.projectID, + Event: &erpb.ReportedErrorEvent{ + EventTime: ptypes.TimestampNow(), + ServiceContext: &c.serviceContext, + Message: message, + Context: errorContext, + }, + } +} + +// chopStack trims a stack trace so that the function which panics or calls +// Report is first. +func chopStack(s []byte) string { + f := []byte("cloud.google.com/go/errorreporting.(*Client).Report") + + lfFirst := bytes.IndexByte(s, '\n') + if lfFirst == -1 { + return string(s) + } + stack := s[lfFirst:] + panicLine := bytes.Index(stack, f) + if panicLine == -1 { + return string(s) + } + stack = stack[panicLine+1:] + for i := 0; i < 2; i++ { + nextLine := bytes.IndexByte(stack, '\n') + if nextLine == -1 { + return string(s) + } + stack = stack[nextLine+1:] + } + return string(s[:lfFirst+1]) + string(stack) +} + +type client interface { + ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) + Close() error +} diff --git a/vendor/cloud.google.com/go/errorreporting/errors_test.go b/vendor/cloud.google.com/go/errorreporting/errors_test.go new file mode 100644 index 0000000..7673e76 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/errors_test.go @@ -0,0 +1,113 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errorreporting + +import ( + "errors" + "strings" + "testing" + + "cloud.google.com/go/internal/testutil" + + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" + "google.golang.org/api/option" + erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +type fakeReportErrorsClient struct { + req *erpb.ReportErrorEventRequest + fail bool + doneCh chan struct{} +} + +func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) { + defer func() { + close(c.doneCh) + }() + if c.fail { + return nil, errors.New("request failed") + } + c.req = req + return &erpb.ReportErrorEventResponse{}, nil +} + +func (c *fakeReportErrorsClient) Close() error { + return nil +} + +func newFakeReportErrorsClient() *fakeReportErrorsClient { + c := &fakeReportErrorsClient{} + c.doneCh = make(chan struct{}) + return c +} + +func newTestClient(c *fakeReportErrorsClient) *Client { + newClient = func(ctx context.Context, opts ...option.ClientOption) (client, error) { + return c, nil + } + t, err := NewClient(context.Background(), testutil.ProjID(), Config{ + ServiceName: "myservice", + ServiceVersion: "v1.0", + }) + if err != nil { + panic(err) + } + return t +} + +func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, fn string) { + if req.Event.ServiceContext.Service != "myservice" { + t.Errorf("error report didn't contain service name") + } + if req.Event.ServiceContext.Version != "v1.0" { + t.Errorf("error report didn't contain version name") + } + if !strings.Contains(req.Event.Message, "error") { + t.Errorf("error report didn't contain message") + } + if !strings.Contains(req.Event.Message, fn) { + t.Errorf("error report didn't contain stack trace") + } +} + +func TestReport(t *testing.T) { + fc := newFakeReportErrorsClient() + c := newTestClient(fc) + c.Report(Entry{Error: errors.New("error")}) + + <-fc.doneCh + r := fc.req + if r == nil { + t.Fatalf("got no error report, expected one") + } + commonChecks(t, r, "errorreporting.TestReport") +} +func TestReportSync(t *testing.T) { + ctx := context.Background() + fc := newFakeReportErrorsClient() + c := newTestClient(fc) + if err := c.ReportSync(ctx, Entry{Error: errors.New("error")}); err != nil { + t.Fatalf("cannot upload errors: %v", err) + } + + <-fc.doneCh + r := fc.req + if r == nil { + t.Fatalf("got no error report, expected one") + } + commonChecks(t, r, "errorreporting.TestReport") +} diff --git a/vendor/cloud.google.com/go/errorreporting/example_test.go b/vendor/cloud.google.com/go/errorreporting/example_test.go new file mode 100644 index 0000000..0744558 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/example_test.go @@ -0,0 +1,49 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errorreporting_test + +import ( + "errors" + "log" + + "cloud.google.com/go/errorreporting" + "golang.org/x/net/context" +) + +func Example() { + // Create the client. + ctx := context.Background() + ec, err := errorreporting.NewClient(ctx, "my-gcp-project", errorreporting.Config{ + ServiceName: "myservice", + ServiceVersion: "v1.0", + }) + defer func() { + if err := ec.Close(); err != nil { + log.Printf("failed to report errors to Stackdriver: %v", err) + } + }() + + // Report an error. + err = doSomething() + if err != nil { + ec.Report(errorreporting.Entry{ + Error: err, + }) + } +} + +func doSomething() error { + return errors.New("something went wrong") +} diff --git a/vendor/cloud.google.com/go/errorreporting/stack_test.go b/vendor/cloud.google.com/go/errorreporting/stack_test.go new file mode 100644 index 0000000..92ece0b --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/stack_test.go @@ -0,0 +1,56 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errorreporting + +import "testing" + +func TestChopStack(t *testing.T) { + for _, test := range []struct { + name string + in []byte + expected string + }{ + { + name: "Report", + in: []byte(` goroutine 39 [running]: +runtime/debug.Stack() + /gopath/runtime/debug/stack.go:24 +0x79 +cloud.google.com/go/errorreporting.(*Client).logInternal() + /gopath/cloud.google.com/go/errorreporting/errors.go:259 +0x18b +cloud.google.com/go/errorreporting.(*Client).Report() + /gopath/cloud.google.com/go/errorreporting/errors.go:248 +0x4ed +cloud.google.com/go/errorreporting.TestReport() + /gopath/cloud.google.com/go/errorreporting/errors_test.go:137 +0x2a1 +testing.tRunner() + /gopath/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/testing/testing.go:646 +0x2ec +`), + expected: ` goroutine 39 [running]: +cloud.google.com/go/errorreporting.TestReport() + /gopath/cloud.google.com/go/errorreporting/errors_test.go:137 +0x2a1 +testing.tRunner() + /gopath/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/testing/testing.go:646 +0x2ec +`, + }, + } { + out := chopStack(test.in) + if out != test.expected { + t.Errorf("case %q: chopStack(%q): got %q want %q", test.name, test.in, out, test.expected) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go b/vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go new file mode 100644 index 0000000..9206333 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go @@ -0,0 +1,48 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package firestore is an auto-generated package for the +// Google Cloud Firestore API. +// +// NOTE: This package is in beta. It is not stable, and may be subject to changes. +// +// +// Use the client at cloud.google.com/go/firestore in preference to this. +package firestore // import "cloud.google.com/go/firestore/apiv1beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/datastore", + } +} diff --git a/vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go b/vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go new file mode 100644 index 0000000..ae7e050 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go @@ -0,0 +1,544 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package firestore + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + GetDocument []gax.CallOption + ListDocuments []gax.CallOption + CreateDocument []gax.CallOption + UpdateDocument []gax.CallOption + DeleteDocument []gax.CallOption + BatchGetDocuments []gax.CallOption + BeginTransaction []gax.CallOption + Commit []gax.CallOption + Rollback []gax.CallOption + RunQuery []gax.CallOption + Write []gax.CallOption + Listen []gax.CallOption + ListCollectionIds []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("firestore.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"streaming", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + GetDocument: retry[[2]string{"default", "idempotent"}], + ListDocuments: retry[[2]string{"default", "idempotent"}], + CreateDocument: retry[[2]string{"default", "non_idempotent"}], + UpdateDocument: retry[[2]string{"default", "non_idempotent"}], + DeleteDocument: retry[[2]string{"default", "idempotent"}], + BatchGetDocuments: retry[[2]string{"streaming", "idempotent"}], + BeginTransaction: retry[[2]string{"default", "idempotent"}], + Commit: retry[[2]string{"default", "non_idempotent"}], + Rollback: retry[[2]string{"default", "idempotent"}], + RunQuery: retry[[2]string{"default", "idempotent"}], + Write: retry[[2]string{"streaming", "non_idempotent"}], + Listen: retry[[2]string{"streaming", "idempotent"}], + ListCollectionIds: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Firestore API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client firestorepb.FirestoreClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new firestore client. +// +// The Cloud Firestore service. +// +// This service exposes several types of comparable timestamps: +// +// create_time - The time at which a document was created. Changes only +// when a document is deleted, then re-created. Increases in a strict +// monotonic fashion. +// +// update_time - The time at which a document was last updated. Changes +// every time a document is modified. Does not change when a write results +// in no modifications. Increases in a strict monotonic fashion. +// +// read_time - The time at which a particular state was observed. Used +// to denote a consistent snapshot of the database or the time at which a +// Document was observed to not exist. +// +// commit_time - The time at which the writes in a transaction were +// committed. Any read with an equal or greater read_time is guaranteed +// to see the effects of the transaction. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: firestorepb.NewFirestoreClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// DatabaseRootPath returns the path for the database root resource. +func DatabaseRootPath(project, database string) string { + return "" + + "projects/" + + project + + "/databases/" + + database + + "" +} + +// DocumentRootPath returns the path for the document root resource. +func DocumentRootPath(project, database string) string { + return "" + + "projects/" + + project + + "/databases/" + + database + + "/documents" + + "" +} + +// DocumentPathPath returns the path for the document path resource. +func DocumentPathPath(project, database, documentPath string) string { + return "" + + "projects/" + + project + + "/databases/" + + database + + "/documents/" + + documentPath + + "" +} + +// AnyPathPath returns the path for the any path resource. +func AnyPathPath(project, database, document, anyPath string) string { + return "" + + "projects/" + + project + + "/databases/" + + database + + "/documents/" + + document + + "/" + + anyPath + + "" +} + +// GetDocument gets a single document. +func (c *Client) GetDocument(ctx context.Context, req *firestorepb.GetDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDocument[0:len(c.CallOptions.GetDocument):len(c.CallOptions.GetDocument)], opts...) + var resp *firestorepb.Document + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetDocument(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDocuments lists documents. +func (c *Client) ListDocuments(ctx context.Context, req *firestorepb.ListDocumentsRequest, opts ...gax.CallOption) *DocumentIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDocuments[0:len(c.CallOptions.ListDocuments):len(c.CallOptions.ListDocuments)], opts...) + it := &DocumentIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*firestorepb.Document, string, error) { + var resp *firestorepb.ListDocumentsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListDocuments(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Documents, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateDocument creates a new document. +func (c *Client) CreateDocument(ctx context.Context, req *firestorepb.CreateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateDocument[0:len(c.CallOptions.CreateDocument):len(c.CallOptions.CreateDocument)], opts...) + var resp *firestorepb.Document + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateDocument(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateDocument updates or inserts a document. +func (c *Client) UpdateDocument(ctx context.Context, req *firestorepb.UpdateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateDocument[0:len(c.CallOptions.UpdateDocument):len(c.CallOptions.UpdateDocument)], opts...) + var resp *firestorepb.Document + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.UpdateDocument(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteDocument deletes a document. +func (c *Client) DeleteDocument(ctx context.Context, req *firestorepb.DeleteDocumentRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteDocument[0:len(c.CallOptions.DeleteDocument):len(c.CallOptions.DeleteDocument)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteDocument(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// BatchGetDocuments gets multiple documents. +// +// Documents returned by this method are not guaranteed to be returned in the +// same order that they were requested. +func (c *Client) BatchGetDocuments(ctx context.Context, req *firestorepb.BatchGetDocumentsRequest, opts ...gax.CallOption) (firestorepb.Firestore_BatchGetDocumentsClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BatchGetDocuments[0:len(c.CallOptions.BatchGetDocuments):len(c.CallOptions.BatchGetDocuments)], opts...) + var resp firestorepb.Firestore_BatchGetDocumentsClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.BatchGetDocuments(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// BeginTransaction starts a new transaction. +func (c *Client) BeginTransaction(ctx context.Context, req *firestorepb.BeginTransactionRequest, opts ...gax.CallOption) (*firestorepb.BeginTransactionResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BeginTransaction[0:len(c.CallOptions.BeginTransaction):len(c.CallOptions.BeginTransaction)], opts...) + var resp *firestorepb.BeginTransactionResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.BeginTransaction(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Commit commits a transaction, while optionally updating documents. +func (c *Client) Commit(ctx context.Context, req *firestorepb.CommitRequest, opts ...gax.CallOption) (*firestorepb.CommitResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Commit[0:len(c.CallOptions.Commit):len(c.CallOptions.Commit)], opts...) + var resp *firestorepb.CommitResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.Commit(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Rollback rolls back a transaction. +func (c *Client) Rollback(ctx context.Context, req *firestorepb.RollbackRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Rollback[0:len(c.CallOptions.Rollback):len(c.CallOptions.Rollback)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.Rollback(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// RunQuery runs a query. +func (c *Client) RunQuery(ctx context.Context, req *firestorepb.RunQueryRequest, opts ...gax.CallOption) (firestorepb.Firestore_RunQueryClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.RunQuery[0:len(c.CallOptions.RunQuery):len(c.CallOptions.RunQuery)], opts...) + var resp firestorepb.Firestore_RunQueryClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RunQuery(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Write streams batches of document updates and deletes, in order. +func (c *Client) Write(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_WriteClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Write[0:len(c.CallOptions.Write):len(c.CallOptions.Write)], opts...) + var resp firestorepb.Firestore_WriteClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.Write(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Listen listens to changes. +func (c *Client) Listen(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_ListenClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Listen[0:len(c.CallOptions.Listen):len(c.CallOptions.Listen)], opts...) + var resp firestorepb.Firestore_ListenClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.Listen(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListCollectionIds lists all the collection IDs underneath a document. +func (c *Client) ListCollectionIds(ctx context.Context, req *firestorepb.ListCollectionIdsRequest, opts ...gax.CallOption) *StringIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListCollectionIds[0:len(c.CallOptions.ListCollectionIds):len(c.CallOptions.ListCollectionIds)], opts...) + it := &StringIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { + var resp *firestorepb.ListCollectionIdsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListCollectionIds(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.CollectionIds, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DocumentIterator manages a stream of *firestorepb.Document. +type DocumentIterator struct { + items []*firestorepb.Document + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*firestorepb.Document, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DocumentIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DocumentIterator) Next() (*firestorepb.Document, error) { + var item *firestorepb.Document + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DocumentIterator) bufLen() int { + return len(it.items) +} + +func (it *DocumentIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// StringIterator manages a stream of string. +type StringIterator struct { + items []string + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *StringIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *StringIterator) Next() (string, error) { + var item string + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *StringIterator) bufLen() int { + return len(it.items) +} + +func (it *StringIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client_example_test.go b/vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client_example_test.go new file mode 100644 index 0000000..52f219d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client_example_test.go @@ -0,0 +1,328 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package firestore_test + +import ( + "io" + + "cloud.google.com/go/firestore/apiv1beta1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_GetDocument() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.GetDocumentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDocument(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListDocuments() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.ListDocumentsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDocuments(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_CreateDocument() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.CreateDocumentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateDocument(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_UpdateDocument() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.UpdateDocumentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateDocument(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_DeleteDocument() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.DeleteDocumentRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteDocument(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_BatchGetDocuments() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.BatchGetDocumentsRequest{ + // TODO: Fill request struct fields. + } + stream, err := c.BatchGetDocuments(ctx, req) + if err != nil { + // TODO: Handle error. + } + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_BeginTransaction() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.BeginTransactionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.BeginTransaction(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_Commit() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.CommitRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Commit(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_Rollback() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.RollbackRequest{ + // TODO: Fill request struct fields. + } + err = c.Rollback(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_RunQuery() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.RunQueryRequest{ + // TODO: Fill request struct fields. + } + stream, err := c.RunQuery(ctx, req) + if err != nil { + // TODO: Handle error. + } + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_Write() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + stream, err := c.Write(ctx) + if err != nil { + // TODO: Handle error. + } + go func() { + reqs := []*firestorepb.WriteRequest{ + // TODO: Create requests. + } + for _, req := range reqs { + if err := stream.Send(req); err != nil { + // TODO: Handle error. + } + } + stream.CloseSend() + }() + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_Listen() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + stream, err := c.Listen(ctx) + if err != nil { + // TODO: Handle error. + } + go func() { + reqs := []*firestorepb.ListenRequest{ + // TODO: Create requests. + } + for _, req := range reqs { + if err := stream.Send(req); err != nil { + // TODO: Handle error. + } + } + stream.CloseSend() + }() + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ListCollectionIds() { + ctx := context.Background() + c, err := firestore.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &firestorepb.ListCollectionIdsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListCollectionIds(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/firestore/apiv1beta1/mock_test.go b/vendor/cloud.google.com/go/firestore/apiv1beta1/mock_test.go new file mode 100644 index 0000000..bf20650 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/apiv1beta1/mock_test.go @@ -0,0 +1,1153 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package firestore + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockFirestoreServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + firestorepb.FirestoreServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockFirestoreServer) GetDocument(ctx context.Context, req *firestorepb.GetDocumentRequest) (*firestorepb.Document, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.Document), nil +} + +func (s *mockFirestoreServer) ListDocuments(ctx context.Context, req *firestorepb.ListDocumentsRequest) (*firestorepb.ListDocumentsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.ListDocumentsResponse), nil +} + +func (s *mockFirestoreServer) CreateDocument(ctx context.Context, req *firestorepb.CreateDocumentRequest) (*firestorepb.Document, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.Document), nil +} + +func (s *mockFirestoreServer) UpdateDocument(ctx context.Context, req *firestorepb.UpdateDocumentRequest) (*firestorepb.Document, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.Document), nil +} + +func (s *mockFirestoreServer) DeleteDocument(ctx context.Context, req *firestorepb.DeleteDocumentRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockFirestoreServer) BatchGetDocuments(req *firestorepb.BatchGetDocumentsRequest, stream firestorepb.Firestore_BatchGetDocumentsServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*firestorepb.BatchGetDocumentsResponse)); err != nil { + return err + } + } + return nil +} + +func (s *mockFirestoreServer) BeginTransaction(ctx context.Context, req *firestorepb.BeginTransactionRequest) (*firestorepb.BeginTransactionResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.BeginTransactionResponse), nil +} + +func (s *mockFirestoreServer) Commit(ctx context.Context, req *firestorepb.CommitRequest) (*firestorepb.CommitResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.CommitResponse), nil +} + +func (s *mockFirestoreServer) Rollback(ctx context.Context, req *firestorepb.RollbackRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockFirestoreServer) RunQuery(req *firestorepb.RunQueryRequest, stream firestorepb.Firestore_RunQueryServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*firestorepb.RunQueryResponse)); err != nil { + return err + } + } + return nil +} + +func (s *mockFirestoreServer) Write(stream firestorepb.Firestore_WriteServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + for { + if req, err := stream.Recv(); err == io.EOF { + break + } else if err != nil { + return err + } else { + s.reqs = append(s.reqs, req) + } + } + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*firestorepb.WriteResponse)); err != nil { + return err + } + } + return nil +} + +func (s *mockFirestoreServer) Listen(stream firestorepb.Firestore_ListenServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + for { + if req, err := stream.Recv(); err == io.EOF { + break + } else if err != nil { + return err + } else { + s.reqs = append(s.reqs, req) + } + } + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*firestorepb.ListenResponse)); err != nil { + return err + } + } + return nil +} + +func (s *mockFirestoreServer) ListCollectionIds(ctx context.Context, req *firestorepb.ListCollectionIdsRequest) (*firestorepb.ListCollectionIdsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*firestorepb.ListCollectionIdsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockFirestore mockFirestoreServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + firestorepb.RegisterFirestoreServer(serv, &mockFirestore) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestFirestoreGetDocument(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &firestorepb.Document{ + Name: name2, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedName string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.GetDocumentRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDocument(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreGetDocumentError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedName string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.GetDocumentRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDocument(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreListDocuments(t *testing.T) { + var nextPageToken string = "" + var documentsElement *firestorepb.Document = &firestorepb.Document{} + var documents = []*firestorepb.Document{documentsElement} + var expectedResponse = &firestorepb.ListDocumentsResponse{ + NextPageToken: nextPageToken, + Documents: documents, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var collectionId string = "collectionId-821242276" + var request = &firestorepb.ListDocumentsRequest{ + Parent: formattedParent, + CollectionId: collectionId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDocuments(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Documents[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreListDocumentsError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var collectionId string = "collectionId-821242276" + var request = &firestorepb.ListDocumentsRequest{ + Parent: formattedParent, + CollectionId: collectionId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDocuments(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreCreateDocument(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &firestorepb.Document{ + Name: name, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var collectionId string = "collectionId-821242276" + var documentId string = "documentId506676927" + var document *firestorepb.Document = &firestorepb.Document{} + var request = &firestorepb.CreateDocumentRequest{ + Parent: formattedParent, + CollectionId: collectionId, + DocumentId: documentId, + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDocument(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreCreateDocumentError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var collectionId string = "collectionId-821242276" + var documentId string = "documentId506676927" + var document *firestorepb.Document = &firestorepb.Document{} + var request = &firestorepb.CreateDocumentRequest{ + Parent: formattedParent, + CollectionId: collectionId, + DocumentId: documentId, + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateDocument(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreUpdateDocument(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &firestorepb.Document{ + Name: name, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var document *firestorepb.Document = &firestorepb.Document{} + var updateMask *firestorepb.DocumentMask = &firestorepb.DocumentMask{} + var request = &firestorepb.UpdateDocumentRequest{ + Document: document, + UpdateMask: updateMask, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateDocument(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreUpdateDocumentError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var document *firestorepb.Document = &firestorepb.Document{} + var updateMask *firestorepb.DocumentMask = &firestorepb.DocumentMask{} + var request = &firestorepb.UpdateDocumentRequest{ + Document: document, + UpdateMask: updateMask, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateDocument(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreDeleteDocument(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedName string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.DeleteDocumentRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDocument(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestFirestoreDeleteDocumentError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedName string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.DeleteDocumentRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteDocument(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestFirestoreBatchGetDocuments(t *testing.T) { + var missing string = "missing1069449574" + var transaction []byte = []byte("-34") + var expectedResponse = &firestorepb.BatchGetDocumentsResponse{ + Result: &firestorepb.BatchGetDocumentsResponse_Missing{ + Missing: missing, + }, + Transaction: transaction, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]") + var documents []string = nil + var request = &firestorepb.BatchGetDocumentsRequest{ + Database: formattedDatabase, + Documents: documents, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.BatchGetDocuments(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreBatchGetDocumentsError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]") + var documents []string = nil + var request = &firestorepb.BatchGetDocumentsRequest{ + Database: formattedDatabase, + Documents: documents, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.BatchGetDocuments(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreBeginTransaction(t *testing.T) { + var transaction []byte = []byte("-34") + var expectedResponse = &firestorepb.BeginTransactionResponse{ + Transaction: transaction, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]") + var request = &firestorepb.BeginTransactionRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BeginTransaction(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreBeginTransactionError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]") + var request = &firestorepb.BeginTransactionRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BeginTransaction(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreCommit(t *testing.T) { + var expectedResponse *firestorepb.CommitResponse = &firestorepb.CommitResponse{} + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]") + var writes []*firestorepb.Write = nil + var request = &firestorepb.CommitRequest{ + Database: formattedDatabase, + Writes: writes, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Commit(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreCommitError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]") + var writes []*firestorepb.Write = nil + var request = &firestorepb.CommitRequest{ + Database: formattedDatabase, + Writes: writes, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Commit(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreRollback(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]") + var transaction []byte = []byte("-34") + var request = &firestorepb.RollbackRequest{ + Database: formattedDatabase, + Transaction: transaction, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Rollback(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestFirestoreRollbackError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]") + var transaction []byte = []byte("-34") + var request = &firestorepb.RollbackRequest{ + Database: formattedDatabase, + Transaction: transaction, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Rollback(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestFirestoreRunQuery(t *testing.T) { + var transaction []byte = []byte("-34") + var skippedResults int32 = 880286183 + var expectedResponse = &firestorepb.RunQueryResponse{ + Transaction: transaction, + SkippedResults: skippedResults, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.RunQueryRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.RunQuery(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreRunQueryError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.RunQueryRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.RunQuery(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreWrite(t *testing.T) { + var streamId string = "streamId-315624902" + var streamToken []byte = []byte("122") + var expectedResponse = &firestorepb.WriteResponse{ + StreamId: streamId, + StreamToken: streamToken, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]") + var request = &firestorepb.WriteRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.Write(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreWriteError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]") + var request = &firestorepb.WriteRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.Write(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreListen(t *testing.T) { + var expectedResponse *firestorepb.ListenResponse = &firestorepb.ListenResponse{} + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]") + var request = &firestorepb.ListenRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.Listen(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreListenError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = DatabaseRootPath("[PROJECT]", "[DATABASE]") + var request = &firestorepb.ListenRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.Listen(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestFirestoreListCollectionIds(t *testing.T) { + var nextPageToken string = "" + var collectionIdsElement string = "collectionIdsElement1368994900" + var collectionIds = []string{collectionIdsElement} + var expectedResponse = &firestorepb.ListCollectionIdsResponse{ + NextPageToken: nextPageToken, + CollectionIds: collectionIds, + } + + mockFirestore.err = nil + mockFirestore.reqs = nil + + mockFirestore.resps = append(mockFirestore.resps[:0], expectedResponse) + + var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.ListCollectionIdsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListCollectionIds(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockFirestore.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.CollectionIds[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestFirestoreListCollectionIdsError(t *testing.T) { + errCode := codes.PermissionDenied + mockFirestore.err = gstatus.Error(errCode, "test error") + + var formattedParent string = AnyPathPath("[PROJECT]", "[DATABASE]", "[DOCUMENT]", "[ANY_PATH]") + var request = &firestorepb.ListCollectionIdsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListCollectionIds(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/firestore/client.go b/vendor/cloud.google.com/go/firestore/client.go new file mode 100644 index 0000000..058491a --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/client.go @@ -0,0 +1,261 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "io" + "strings" + "time" + + "google.golang.org/api/iterator" + + vkit "cloud.google.com/go/firestore/apiv1beta1" + + "cloud.google.com/go/internal/version" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc/metadata" +) + +// resourcePrefixHeader is the name of the metadata header used to indicate +// the resource being operated on. +const resourcePrefixHeader = "google-cloud-resource-prefix" + +// A Client provides access to the Firestore service. +type Client struct { + c *vkit.Client + projectID string + databaseID string // A client is tied to a single database. +} + +// NewClient creates a new Firestore client that uses the given project. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + vc, err := vkit.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + vc.SetGoogleClientInfo("gccl", version.Repo) + c := &Client{ + c: vc, + projectID: projectID, + databaseID: "(default)", // always "(default)", for now + } + return c, nil + +} + +// Close closes any resources held by the client. +// +// Close need not be called at program exit. +func (c *Client) Close() error { + return c.c.Close() +} + +func (c *Client) path() string { + return fmt.Sprintf("projects/%s/databases/%s", c.projectID, c.databaseID) +} + +func withResourceHeader(ctx context.Context, resource string) context.Context { + md, _ := metadata.FromOutgoingContext(ctx) + md = md.Copy() + md[resourcePrefixHeader] = []string{resource} + return metadata.NewOutgoingContext(ctx, md) +} + +// Collection creates a reference to a collection with the given path. +// A path is a sequence of IDs separated by slashes. +// +// Collection returns nil if path contains an even number of IDs or any ID is empty. +func (c *Client) Collection(path string) *CollectionRef { + coll, _ := c.idsToRef(strings.Split(path, "/"), c.path()) + return coll +} + +// Doc creates a reference to a document with the given path. +// A path is a sequence of IDs separated by slashes. +// +// Doc returns nil if path contains an odd number of IDs or any ID is empty. +func (c *Client) Doc(path string) *DocumentRef { + _, doc := c.idsToRef(strings.Split(path, "/"), c.path()) + return doc +} + +func (c *Client) idsToRef(IDs []string, dbPath string) (*CollectionRef, *DocumentRef) { + if len(IDs) == 0 { + return nil, nil + } + for _, id := range IDs { + if id == "" { + return nil, nil + } + } + coll := newTopLevelCollRef(c, dbPath, IDs[0]) + i := 1 + for i < len(IDs) { + doc := newDocRef(coll, IDs[i]) + i++ + if i == len(IDs) { + return nil, doc + } + coll = newCollRefWithParent(c, doc, IDs[i]) + i++ + } + return coll, nil +} + +// GetAll retrieves multiple documents with a single call. The DocumentSnapshots are +// returned in the order of the given DocumentRefs. +// +// If a document is not present, the corresponding DocumentSnapshot will be nil. +func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*DocumentSnapshot, error) { + if err := checkTransaction(ctx); err != nil { + return nil, err + } + var docNames []string + for _, dr := range docRefs { + if dr == nil { + return nil, errNilDocRef + } + docNames = append(docNames, dr.Path) + } + req := &pb.BatchGetDocumentsRequest{ + Database: c.path(), + Documents: docNames, + } + streamClient, err := c.c.BatchGetDocuments(withResourceHeader(ctx, req.Database), req) + if err != nil { + return nil, err + } + + // Read results from the stream and add them to a map. + docMap := map[string]*pb.Document{} + for { + res, err := streamClient.Recv() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + switch x := res.Result.(type) { + case *pb.BatchGetDocumentsResponse_Found: + docMap[x.Found.Name] = x.Found + + case *pb.BatchGetDocumentsResponse_Missing: + if docMap[x.Missing] != nil { + return nil, fmt.Errorf("firestore: %q both missing and present", x.Missing) + } + docMap[x.Missing] = nil + default: + return nil, errors.New("firestore: unknown BatchGetDocumentsResponse result type") + } + } + + // Put the documents we've gathered in the same order as the requesting slice of + // DocumentRefs. + docs := make([]*DocumentSnapshot, len(docNames)) + for i, name := range docNames { + pbDoc, ok := docMap[name] + if !ok { + return nil, fmt.Errorf("firestore: passed %q to BatchGetDocuments but never saw response", name) + } + if pbDoc != nil { + doc, err := newDocumentSnapshot(docRefs[i], pbDoc, c) + if err != nil { + return nil, err + } + docs[i] = doc + } + } + return docs, nil +} + +// Collections returns an interator over the top-level collections. +func (c *Client) Collections(ctx context.Context) *CollectionIterator { + it := &CollectionIterator{ + err: checkTransaction(ctx), + client: c, + it: c.c.ListCollectionIds( + withResourceHeader(ctx, c.path()), + &pb.ListCollectionIdsRequest{Parent: c.path()}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// Batch returns a WriteBatch. +func (c *Client) Batch() *WriteBatch { + return &WriteBatch{c: c} +} + +// commit calls the Commit RPC outside of a transaction. +func (c *Client) commit(ctx context.Context, ws []*pb.Write) ([]*WriteResult, error) { + if err := checkTransaction(ctx); err != nil { + return nil, err + } + req := &pb.CommitRequest{ + Database: c.path(), + Writes: ws, + } + res, err := c.c.Commit(withResourceHeader(ctx, req.Database), req) + if err != nil { + return nil, err + } + if len(res.WriteResults) == 0 { + return nil, errors.New("firestore: missing WriteResult") + } + var wrs []*WriteResult + for _, pwr := range res.WriteResults { + wr, err := writeResultFromProto(pwr) + if err != nil { + return nil, err + } + wrs = append(wrs, wr) + } + return wrs, nil +} + +func (c *Client) commit1(ctx context.Context, ws []*pb.Write) (*WriteResult, error) { + wrs, err := c.commit(ctx, ws) + if err != nil { + return nil, err + } + return wrs[0], nil +} + +// A WriteResult is returned by methods that write documents. +type WriteResult struct { + // The time at which the document was updated, or created if it did not + // previously exist. Writes that do not actually change the document do + // not change the update time. + UpdateTime time.Time +} + +func writeResultFromProto(wr *pb.WriteResult) (*WriteResult, error) { + t, err := ptypes.Timestamp(wr.UpdateTime) + if err != nil { + t = time.Time{} + // TODO(jba): Follow up if Delete is supposed to return a nil timestamp. + } + return &WriteResult{UpdateTime: t}, nil +} diff --git a/vendor/cloud.google.com/go/firestore/client_test.go b/vendor/cloud.google.com/go/firestore/client_test.go new file mode 100644 index 0000000..23b492c --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/client_test.go @@ -0,0 +1,216 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "testing" + + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var testClient = &Client{ + projectID: "projectID", + databaseID: "(default)", +} + +func TestClientCollectionAndDoc(t *testing.T) { + coll1 := testClient.Collection("X") + db := "projects/projectID/databases/(default)" + wantc1 := &CollectionRef{ + c: testClient, + parentPath: db, + Parent: nil, + ID: "X", + Path: "projects/projectID/databases/(default)/documents/X", + Query: Query{c: testClient, collectionID: "X", parentPath: db}, + } + if !testEqual(coll1, wantc1) { + t.Fatalf("got\n%+v\nwant\n%+v", coll1, wantc1) + } + doc1 := testClient.Doc("X/a") + wantd1 := &DocumentRef{ + Parent: coll1, + ID: "a", + Path: "projects/projectID/databases/(default)/documents/X/a", + } + + if !testEqual(doc1, wantd1) { + t.Fatalf("got %+v, want %+v", doc1, wantd1) + } + coll2 := testClient.Collection("X/a/Y") + parentPath := "projects/projectID/databases/(default)/documents/X/a" + wantc2 := &CollectionRef{ + c: testClient, + parentPath: parentPath, + Parent: doc1, + ID: "Y", + Path: "projects/projectID/databases/(default)/documents/X/a/Y", + Query: Query{c: testClient, collectionID: "Y", parentPath: parentPath}, + } + if !testEqual(coll2, wantc2) { + t.Fatalf("\ngot %+v\nwant %+v", coll2, wantc2) + } + doc2 := testClient.Doc("X/a/Y/b") + wantd2 := &DocumentRef{ + Parent: coll2, + ID: "b", + Path: "projects/projectID/databases/(default)/documents/X/a/Y/b", + } + if !testEqual(doc2, wantd2) { + t.Fatalf("got %+v, want %+v", doc2, wantd2) + } +} + +func TestClientCollDocErrors(t *testing.T) { + for _, badColl := range []string{"", "/", "/a/", "/a/b", "a/b/", "a//b"} { + coll := testClient.Collection(badColl) + if coll != nil { + t.Errorf("coll path %q: got %+v, want nil", badColl, coll) + } + } + for _, badDoc := range []string{"", "a", "/", "/a", "a/", "a/b/c", "a//b/c"} { + doc := testClient.Doc(badDoc) + if doc != nil { + t.Errorf("doc path %q: got %+v, want nil", badDoc, doc) + } + } +} + +func TestGetAll(t *testing.T) { + ctx := context.Background() + const dbPath = "projects/projectID/databases/(default)" + c, srv := newMock(t) + defer c.Close() + wantPBDocs := []*pb.Document{ + { + Name: dbPath + "/documents/C/a", + CreateTime: aTimestamp, + UpdateTime: aTimestamp, + Fields: map[string]*pb.Value{"f": intval(2)}, + }, + nil, + { + Name: dbPath + "/documents/C/c", + CreateTime: aTimestamp, + UpdateTime: aTimestamp, + Fields: map[string]*pb.Value{"f": intval(1)}, + }, + } + srv.addRPC( + &pb.BatchGetDocumentsRequest{ + Database: dbPath, + Documents: []string{ + dbPath + "/documents/C/a", + dbPath + "/documents/C/b", + dbPath + "/documents/C/c", + }, + }, + []interface{}{ + // deliberately put these out of order + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[2]}, + }, + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[0]}, + }, + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Missing{dbPath + "/documents/C/b"}, + }, + }, + ) + coll := c.Collection("C") + var docRefs []*DocumentRef + for _, name := range []string{"a", "b", "c"} { + docRefs = append(docRefs, coll.Doc(name)) + } + docs, err := c.GetAll(ctx, docRefs) + if err != nil { + t.Fatal(err) + } + if got, want := len(docs), len(wantPBDocs); got != want { + t.Errorf("got %d docs, wanted %d", got, want) + } + for i, got := range docs { + var want *DocumentSnapshot + if wantPBDocs[i] != nil { + want, err = newDocumentSnapshot(docRefs[i], wantPBDocs[i], c) + if err != nil { + t.Fatal(err) + } + } + if diff := testDiff(got, want); diff != "" { + t.Errorf("#%d: got=--, want==++\n%s", i, diff) + } + } +} + +func TestGetAllErrors(t *testing.T) { + ctx := context.Background() + const ( + dbPath = "projects/projectID/databases/(default)" + docPath = dbPath + "/documents/C/a" + ) + c, srv := newMock(t) + if _, err := c.GetAll(ctx, []*DocumentRef{nil}); err != errNilDocRef { + t.Errorf("got %v, want errNilDocRef", err) + } + + // Internal server error. + srv.addRPC( + &pb.BatchGetDocumentsRequest{ + Database: dbPath, + Documents: []string{docPath}, + }, + []interface{}{grpc.Errorf(codes.Internal, "")}, + ) + _, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")}) + codeEq(t, "GetAll #1", codes.Internal, err) + + // Doc appears as both found and missing (server bug). + srv.reset() + srv.addRPC( + &pb.BatchGetDocumentsRequest{ + Database: dbPath, + Documents: []string{docPath}, + }, + []interface{}{ + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Found{&pb.Document{Name: docPath}}, + }, + &pb.BatchGetDocumentsResponse{ + Result: &pb.BatchGetDocumentsResponse_Missing{docPath}, + }, + }, + ) + if _, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")}); err == nil { + t.Error("got nil, want error") + } + + // Doc never appears (server bug). + srv.reset() + srv.addRPC( + &pb.BatchGetDocumentsRequest{ + Database: dbPath, + Documents: []string{docPath}, + }, + []interface{}{}, + ) + if _, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")}); err == nil { + t.Error("got nil, want error") + } +} diff --git a/vendor/cloud.google.com/go/firestore/collref.go b/vendor/cloud.google.com/go/firestore/collref.go new file mode 100644 index 0000000..8459c66 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/collref.go @@ -0,0 +1,114 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "math/rand" + "os" + "sync" + "time" + + "golang.org/x/net/context" +) + +// A CollectionRef is a reference to Firestore collection. +type CollectionRef struct { + c *Client + + // Typically Parent.Path, or c.path if Parent is nil. + // May be different if this CollectionRef was created from a stored reference + // to a different project/DB. + parentPath string + + // Parent is the document of which this collection is a part. It is + // nil for top-level collections. + Parent *DocumentRef + + // The full resource path of the collection: "projects/P/databases/D/documents..." + Path string + + // ID is the collection identifier. + ID string + + // Use the methods of Query on a CollectionRef to create and run queries. + Query +} + +func newTopLevelCollRef(c *Client, dbPath, id string) *CollectionRef { + return &CollectionRef{ + c: c, + ID: id, + parentPath: dbPath, + Path: dbPath + "/documents/" + id, + Query: Query{c: c, collectionID: id, parentPath: dbPath}, + } +} + +func newCollRefWithParent(c *Client, parent *DocumentRef, id string) *CollectionRef { + return &CollectionRef{ + c: c, + Parent: parent, + ID: id, + parentPath: parent.Path, + Path: parent.Path + "/" + id, + Query: Query{c: c, collectionID: id, parentPath: parent.Path}, + } +} + +// Doc returns a DocumentRef that refers to the document in the collection with the +// given identifier. +func (c *CollectionRef) Doc(id string) *DocumentRef { + if c == nil { + return nil + } + return newDocRef(c, id) +} + +// NewDoc returns a DocumentRef with a uniquely generated ID. +func (c *CollectionRef) NewDoc() *DocumentRef { + return c.Doc(uniqueID()) +} + +// Add generates a DocumentRef with a unique ID. It then creates the document +// with the given data, which can be a map[string]interface{}, a struct or a +// pointer to a struct. +// +// Add returns an error in the unlikely event that a document with the same ID +// already exists. +func (c *CollectionRef) Add(ctx context.Context, data interface{}) (*DocumentRef, *WriteResult, error) { + d := c.NewDoc() + wr, err := d.Create(ctx, data) + if err != nil { + return nil, nil, err + } + return d, wr, nil +} + +const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var ( + rngMu sync.Mutex + rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid()))) +) + +func uniqueID() string { + var b [20]byte + rngMu.Lock() + for i := 0; i < len(b); i++ { + b[i] = alphanum[rng.Intn(len(alphanum))] + } + rngMu.Unlock() + return string(b[:]) +} diff --git a/vendor/cloud.google.com/go/firestore/collref_test.go b/vendor/cloud.google.com/go/firestore/collref_test.go new file mode 100644 index 0000000..3d82be5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/collref_test.go @@ -0,0 +1,97 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "golang.org/x/net/context" +) + +func TestDoc(t *testing.T) { + coll := testClient.Collection("C") + got := coll.Doc("d") + want := &DocumentRef{ + Parent: coll, + ID: "d", + Path: "projects/projectID/databases/(default)/documents/C/d", + } + if !testEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestNewDoc(t *testing.T) { + c := &Client{} + coll := c.Collection("C") + got := coll.NewDoc() + if got.Parent != coll { + t.Errorf("got %v, want %v", got.Parent, coll) + } + if len(got.ID) != 20 { + t.Errorf("got %d-char ID, wanted 20", len(got.ID)) + } + + got2 := coll.NewDoc() + if got.ID == got2.ID { + t.Error("got same ID") + } +} + +func TestAdd(t *testing.T) { + ctx := context.Background() + c, srv := newMock(t) + wantReq := commitRequestForSet() + w := wantReq.Writes[0] + w.CurrentDocument = &pb.Precondition{ + ConditionType: &pb.Precondition_Exists{false}, + } + srv.addRPCAdjust(wantReq, commitResponseForSet, func(gotReq proto.Message) { + // We can't know the doc ID before Add is called, so we take it from + // the request. + w.Operation.(*pb.Write_Update).Update.Name = gotReq.(*pb.CommitRequest).Writes[0].Operation.(*pb.Write_Update).Update.Name + }) + _, wr, err := c.Collection("C").Add(ctx, testData) + if err != nil { + t.Fatal(err) + } + if !testEqual(wr, writeResultForSet) { + t.Errorf("got %v, want %v", wr, writeResultForSet) + } +} + +func TestNilErrors(t *testing.T) { + ctx := context.Background() + c, _ := newMock(t) + // Test that a nil CollectionRef results in a nil DocumentRef and errors + // where possible. + coll := c.Collection("a/b") // nil because "a/b" denotes a doc. + if coll != nil { + t.Fatal("collection not nil") + } + if got := coll.Doc("d"); got != nil { + t.Fatalf("got %v, want nil", got) + } + if got := coll.NewDoc(); got != nil { + t.Fatalf("got %v, want nil", got) + } + if _, _, err := coll.Add(ctx, testData); err != errNilDocRef { + t.Fatalf("got <%v>, want <%v>", err, errNilDocRef) + } +} diff --git a/vendor/cloud.google.com/go/firestore/cross_language_test.go b/vendor/cloud.google.com/go/firestore/cross_language_test.go new file mode 100644 index 0000000..a077fa5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/cross_language_test.go @@ -0,0 +1,232 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A runner for the cross-language tests. + +package firestore + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "path" + "path/filepath" + "strings" + "testing" + + pb "cloud.google.com/go/firestore/genproto" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + ts "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + fspb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +func TestCrossLanguageTests(t *testing.T) { + const dir = "testdata" + fis, err := ioutil.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + n := 0 + for _, fi := range fis { + if strings.HasSuffix(fi.Name(), ".textproto") { + // TODO(jba): use sub-tests. + runTestFromFile(t, filepath.Join(dir, fi.Name())) + n++ + } + } + t.Logf("ran %d cross-language tests", n) +} + +func runTestFromFile(t *testing.T, filename string) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + var test pb.Test + if err := proto.UnmarshalText(string(bytes), &test); err != nil { + t.Fatalf("unmarshalling %s: %v", filename, err) + } + msg := fmt.Sprintf("%s (file %s)", test.Description, filepath.Base(filename)) + runTest(t, msg, &test) +} + +func runTest(t *testing.T, msg string, test *pb.Test) { + check := func(gotErr error, wantErr bool) { + if wantErr && gotErr == nil { + t.Errorf("%s: got nil, want error", msg) + } else if !wantErr && gotErr != nil { + t.Errorf("%s: %v", msg, gotErr) + } + } + + ctx := context.Background() + c, srv := newMock(t) + + switch tt := test.Test.(type) { + case *pb.Test_Get: + srv.addRPC(tt.Get.Request, &fspb.Document{ + CreateTime: &ts.Timestamp{}, + UpdateTime: &ts.Timestamp{}, + }) + ref := docRefFromPath(tt.Get.DocRefPath, c) + _, err := ref.Get(ctx) + if err != nil { + t.Errorf("%s: %v", msg, err) + return + } + // Checking response would just be testing the function converting a Document + // proto to a DocumentSnapshot, hence uninteresting. + + case *pb.Test_Create: + srv.addRPC(tt.Create.Request, commitResponseForSet) + ref := docRefFromPath(tt.Create.DocRefPath, c) + data := convertData(tt.Create.JsonData) + _, err := ref.Create(ctx, data) + check(err, tt.Create.IsError) + + case *pb.Test_Set: + srv.addRPC(tt.Set.Request, commitResponseForSet) + ref := docRefFromPath(tt.Set.DocRefPath, c) + data := convertData(tt.Set.JsonData) + var opts []SetOption + if tt.Set.Option != nil { + opts = []SetOption{convertSetOption(tt.Set.Option)} + } + _, err := ref.Set(ctx, data, opts...) + check(err, tt.Set.IsError) + + case *pb.Test_Update: + // Ignore Update test because we only support UpdatePaths. + // Not to worry, every Update test has a corresponding UpdatePaths test. + + case *pb.Test_UpdatePaths: + srv.addRPC(tt.UpdatePaths.Request, commitResponseForSet) + ref := docRefFromPath(tt.UpdatePaths.DocRefPath, c) + preconds := convertPrecondition(t, tt.UpdatePaths.Precondition) + paths := convertFieldPaths(tt.UpdatePaths.FieldPaths) + var ups []Update + for i, path := range paths { + jsonValue := tt.UpdatePaths.JsonValues[i] + var val interface{} + if err := json.Unmarshal([]byte(jsonValue), &val); err != nil { + t.Fatalf("%s: %q: %v", msg, jsonValue, err) + } + ups = append(ups, Update{ + FieldPath: path, + Value: convertTestValue(val), + }) + } + _, err := ref.Update(ctx, ups, preconds...) + check(err, tt.UpdatePaths.IsError) + + case *pb.Test_Delete: + srv.addRPC(tt.Delete.Request, commitResponseForSet) + ref := docRefFromPath(tt.Delete.DocRefPath, c) + preconds := convertPrecondition(t, tt.Delete.Precondition) + _, err := ref.Delete(ctx, preconds...) + check(err, tt.Delete.IsError) + + default: + t.Fatalf("unknown test type %T", tt) + } +} + +func docRefFromPath(p string, c *Client) *DocumentRef { + return &DocumentRef{ + Path: p, + ID: path.Base(p), + Parent: &CollectionRef{c: c}, + } +} + +func convertData(jsonData string) map[string]interface{} { + var m map[string]interface{} + if err := json.Unmarshal([]byte(jsonData), &m); err != nil { + log.Fatal(err) + } + return convertTestMap(m) +} + +func convertTestMap(m map[string]interface{}) map[string]interface{} { + for k, v := range m { + m[k] = convertTestValue(v) + } + return m +} + +func convertTestValue(v interface{}) interface{} { + switch v := v.(type) { + case string: + switch v { + case "ServerTimestamp": + return ServerTimestamp + case "Delete": + return Delete + default: + return v + } + case float64: + if v == float64(int(v)) { + return int(v) + } + return v + case []interface{}: + for i, e := range v { + v[i] = convertTestValue(e) + } + return v + case map[string]interface{}: + return convertTestMap(v) + default: + return v + } +} + +func convertSetOption(opt *pb.SetOption) SetOption { + if opt.All { + return MergeAll + } + return Merge(convertFieldPaths(opt.Fields)...) +} + +func convertFieldPaths(fps []*pb.FieldPath) []FieldPath { + var res []FieldPath + for _, fp := range fps { + res = append(res, fp.Field) + } + return res +} + +func convertPrecondition(t *testing.T, fp *fspb.Precondition) []Precondition { + if fp == nil { + return nil + } + var pc Precondition + switch fp := fp.ConditionType.(type) { + case *fspb.Precondition_Exists: + pc = exists(fp.Exists) + case *fspb.Precondition_UpdateTime: + tm, err := ptypes.Timestamp(fp.UpdateTime) + if err != nil { + t.Fatal(err) + } + pc = LastUpdateTime(tm) + default: + t.Fatalf("unknown precondition type %T", fp) + } + return []Precondition{pc} +} diff --git a/vendor/cloud.google.com/go/firestore/doc.go b/vendor/cloud.google.com/go/firestore/doc.go new file mode 100644 index 0000000..d0f45fa --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/doc.go @@ -0,0 +1,214 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT doc.go. Modify internal/doc.template, then run make -C internal. + +/* +Package firestore provides a client for reading and writing to a Cloud Firestore +database. + +See https://cloud.google.com/firestore/docs for an introduction +to Cloud Firestore and additional help on using the Firestore API. + +Creating a Client + +To start working with this package, create a client with a project ID: + + ctx := context.Background() + client, err := firestore.NewClient(ctx, "projectID") + if err != nil { + // TODO: Handle error. + } + +CollectionRefs and DocumentRefs + +In Firestore, documents are sets of key-value pairs, and collections are groups of +documents. A Firestore database consists of a hierarchy of alternating collections +and documents, referred to by slash-separated paths like +"States/California/Cities/SanFrancisco". + +This client is built around references to collections and documents. CollectionRefs +and DocumentRefs are lightweight values that refer to the corresponding database +entities. Creating a ref does not involve any network traffic. + + states := client.Collection("States") + ny := states.Doc("NewYork") + // Or, in a single call: + ny = client.Doc("States/NewYork") + +Reading + +Use DocumentRef.Get to read a document. The result is a DocumentSnapshot. +Call its Data method to obtain the entire document contents as a map. + + docsnap, err := ny.Get(ctx) + if err != nil { + // TODO: Handle error. + } + dataMap := docsnap.Data() + fmt.Println(dataMap) + +You can also obtain a single field with DataAt, or extract the data into a struct +with DataTo. With the type definition + + type State struct { + Capital string `firestore:"capital"` + Population float64 `firestore:"pop"` // in millions + } + +we can extract the document's data into a value of type State: + + var nyData State + if err := docsnap.DataTo(&nyData); err != nil { + // TODO: Handle error. + } + +Note that this client supports struct tags beginning with "firestore:" that work like +the tags of the encoding/json package, letting you rename fields, ignore them, or +omit their values when empty. + +To retrieve multiple documents from their references in a single call, use +Client.GetAll. + + docsnaps, err := client.GetAll(ctx, []*firestore.DocumentRef{ + states.Doc("Wisconsin"), states.Doc("Ohio"), + }) + if err != nil { + // TODO: Handle error. + } + for _, ds := range docsnaps { + _ = ds // TODO: Use ds. + } + + +Writing + +For writing individual documents, use the methods on DocumentReference. +Create creates a new document. + + wr, err := ny.Create(ctx, State{ + Capital: "Albany", + Population: 19.8, + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(wr) + +The first return value is a WriteResult, which contains the time +at which the document was updated. + +Create fails if the document exists. Another method, Set, either replaces an existing +document or creates a new one. + + ca := states.Doc("California") + _, err = ca.Set(ctx, State{ + Capital: "Sacramento", + Population: 39.14, + }) + +To update some fields of an existing document, use Update. It takes a list of +paths to update and their corresponding values. + + _, err = ca.Update(ctx, []firestore.Update{{Path: "capital", Value: "Sacramento"}}) + +Use DocumentRef.Delete to delete a document. + + _, err = ny.Delete(ctx) + +Preconditions + +You can condition Deletes or Updates on when a document was last changed. Specify +these preconditions as an option to a Delete or Update method. The check and the +write happen atomically with a single RPC. + + docsnap, err = ca.Get(ctx) + if err != nil { + // TODO: Handle error. + } + _, err = ca.Update(ctx, + []firestore.Update{{Path: "capital", Value: "Sacramento"}}, + firestore.LastUpdateTime(docsnap.UpdateTime)) + +Here we update a doc only if it hasn't changed since we read it. +You could also do this with a transaction. + +To perform multiple writes at once, use a WriteBatch. Its methods chain +for convenience. + +WriteBatch.Commit sends the collected writes to the server, where they happen +atomically. + + writeResults, err := client.Batch(). + Create(ny, State{Capital: "Albany"}). + Update(ca, []firestore.Update{{Path: "capital", Value: "Sacramento"}}). + Delete(client.Doc("States/WestDakota")). + Commit(ctx) + +Queries + +You can use SQL to select documents from a collection. Begin with the collection, and +build up a query using Select, Where and other methods of Query. + + q := states.Where("pop", ">", 10).OrderBy("pop", firestore.Desc) + +Call the Query's Documents method to get an iterator, and use it like +the other Google Cloud Client iterators. + + iter := q.Documents(ctx) + for { + doc, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(doc.Data()) + } + +To get all the documents in a collection, you can use the collection itself +as a query. + + iter = client.Collection("States").Documents(ctx) + +Transactions + +Use a transaction to execute reads and writes atomically. All reads must happen +before any writes. Transaction creation, commit, rollback and retry are handled for +you by the Client.RunTransaction method; just provide a function and use the +read and write methods of the Transaction passed to it. + + ny := client.Doc("States/NewYork") + err := client.RunTransaction(ctx, func(ctx context.Context, tx *firestore.Transaction) error { + doc, err := tx.Get(ny) // tx.Get, NOT ny.Get! + if err != nil { + return err + } + pop, err := doc.DataAt("pop") + if err != nil { + return err + } + return tx.Update(ny, []firestore.Update{{Path: "pop", Value: pop.(float64) + 0.2}}) + }) + if err != nil { + // TODO: Handle error. + } + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package firestore diff --git a/vendor/cloud.google.com/go/firestore/docref.go b/vendor/cloud.google.com/go/firestore/docref.go new file mode 100644 index 0000000..674fee8 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/docref.go @@ -0,0 +1,558 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "reflect" + "sort" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" + + vkit "cloud.google.com/go/firestore/apiv1beta1" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +var errNilDocRef = errors.New("firestore: nil DocumentRef") + +// A DocumentRef is a reference to a Firestore document. +type DocumentRef struct { + // The CollectionRef that this document is a part of. Never nil. + Parent *CollectionRef + + // The full resource path of the document: "projects/P/databases/D/documents..." + Path string + + // The ID of the document: the last component of the resource path. + ID string +} + +func newDocRef(parent *CollectionRef, id string) *DocumentRef { + return &DocumentRef{ + Parent: parent, + ID: id, + Path: parent.Path + "/" + id, + } +} + +// Collection returns a reference to sub-collection of this document. +func (d *DocumentRef) Collection(id string) *CollectionRef { + return newCollRefWithParent(d.Parent.c, d, id) +} + +// Get retrieves the document. It returns an error if the document does not exist. +func (d *DocumentRef) Get(ctx context.Context) (*DocumentSnapshot, error) { + if err := checkTransaction(ctx); err != nil { + return nil, err + } + if d == nil { + return nil, errNilDocRef + } + doc, err := d.Parent.c.c.GetDocument(withResourceHeader(ctx, d.Parent.c.path()), + &pb.GetDocumentRequest{Name: d.Path}) + // TODO(jba): verify that GetDocument returns NOT_FOUND. + if err != nil { + return nil, err + } + return newDocumentSnapshot(d, doc, d.Parent.c) +} + +// Create creates the document with the given data. +// It returns an error if a document with the same ID already exists. +// +// The data argument can be a map with string keys, a struct, or a pointer to a +// struct. The map keys or exported struct fields become the fields of the firestore +// document. +// The values of data are converted to Firestore values as follows: +// +// - bool converts to Bool. +// - string converts to String. +// - int, int8, int16, int32 and int64 convert to Integer. +// - uint8, uint16 and uint32 convert to Integer. uint64 is disallowed, +// because it can represent values that cannot be represented in an int64, which +// is the underlying type of a Integer. +// - float32 and float64 convert to Double. +// - []byte converts to Bytes. +// - time.Time converts to Timestamp. +// - latlng.LatLng converts to GeoPoint. latlng is the package +// "google.golang.org/genproto/googleapis/type/latlng". +// - Slices convert to Array. +// - Maps and structs convert to Map. +// - nils of any type convert to Null. +// +// Pointers and interface{} are also permitted, and their elements processed +// recursively. +// +// Struct fields can have tags like those used by the encoding/json package. Tags +// begin with "firestore:" and are followed by "-", meaning "ignore this field," or +// an alternative name for the field. Following the name, these comma-separated +// options may be provided: +// +// - omitempty: Do not encode this field if it is empty. A value is empty +// if it is a zero value, or an array, slice or map of length zero. +// - serverTimestamp: The field must be of type time.Time. When writing, if +// the field has the zero value, the server will populate the stored document with +// the time that the request is processed. +func (d *DocumentRef) Create(ctx context.Context, data interface{}) (*WriteResult, error) { + ws, err := d.newCreateWrites(data) + if err != nil { + return nil, err + } + return d.Parent.c.commit1(ctx, ws) +} + +func (d *DocumentRef) newCreateWrites(data interface{}) ([]*pb.Write, error) { + if d == nil { + return nil, errNilDocRef + } + doc, serverTimestampPaths, err := toProtoDocument(data) + if err != nil { + return nil, err + } + doc.Name = d.Path + pc, err := exists(false).preconditionProto() + if err != nil { + return nil, err + } + return d.newUpdateWithTransform(doc, nil, pc, serverTimestampPaths, false), nil +} + +// Set creates or overwrites the document with the given data. See DocumentRef.Create +// for the acceptable values of data. Without options, Set overwrites the document +// completely. Specify one of the Merge options to preserve an existing document's +// fields. +func (d *DocumentRef) Set(ctx context.Context, data interface{}, opts ...SetOption) (*WriteResult, error) { + ws, err := d.newSetWrites(data, opts) + if err != nil { + return nil, err + } + return d.Parent.c.commit1(ctx, ws) +} + +func (d *DocumentRef) newSetWrites(data interface{}, opts []SetOption) ([]*pb.Write, error) { + if d == nil { + return nil, errNilDocRef + } + origFieldPaths, allPaths, err := processSetOptions(opts) + if err != nil { + return nil, err + } + doc, serverTimestampPaths, err := toProtoDocument(data) + if err != nil { + return nil, err + } + if len(origFieldPaths) > 0 { + // Keep only data fields corresponding to the given field paths. + doc.Fields = applyFieldPaths(doc.Fields, origFieldPaths, nil) + } + doc.Name = d.Path + + var fieldPaths []FieldPath + if allPaths { + // MergeAll was passed. Check that the data is a map, and extract its field paths. + v := reflect.ValueOf(data) + if v.Kind() != reflect.Map { + return nil, errors.New("firestore: MergeAll can only be specified with map data") + } + fieldPaths = fieldPathsFromMap(v, nil) + } else if len(origFieldPaths) > 0 { + // Remove server timestamp paths that are not in the list of paths to merge. + // Note: this is technically O(n^2), but it is unlikely that there is more + // than one server timestamp path. + serverTimestampPaths = removePathsIf(serverTimestampPaths, func(fp FieldPath) bool { + return !fp.in(origFieldPaths) + }) + // Remove server timestamp fields from fieldPaths. Those fields were removed + // from the document by toProtoDocument, so they should not be in the update + // mask. + // Note: this is technically O(n^2), but it is unlikely that there is + // more than one server timestamp path. + fieldPaths = removePathsIf(origFieldPaths, func(fp FieldPath) bool { + return fp.in(serverTimestampPaths) + }) + // Check that all the remaining field paths in the merge option are in the document. + for _, fp := range fieldPaths { + if _, err := valueAtPath(fp, doc.Fields); err != nil { + return nil, err + } + } + } + return d.newUpdateWithTransform(doc, fieldPaths, nil, serverTimestampPaths, len(opts) == 0), nil +} + +// Delete deletes the document. If the document doesn't exist, it does nothing +// and returns no error. +func (d *DocumentRef) Delete(ctx context.Context, preconds ...Precondition) (*WriteResult, error) { + ws, err := d.newDeleteWrites(preconds) + if err != nil { + return nil, err + } + return d.Parent.c.commit1(ctx, ws) +} + +// Create a new map that contains only the field paths in fps. +func applyFieldPaths(fields map[string]*pb.Value, fps []FieldPath, root FieldPath) map[string]*pb.Value { + r := map[string]*pb.Value{} + for k, v := range fields { + kpath := root.with(k) + if kpath.in(fps) { + r[k] = v + } else if mv := v.GetMapValue(); mv != nil { + if m2 := applyFieldPaths(mv.Fields, fps, kpath); m2 != nil { + r[k] = &pb.Value{&pb.Value_MapValue{&pb.MapValue{m2}}} + } + } + } + if len(r) == 0 { + return nil + } + return r +} + +func fieldPathsFromMap(vmap reflect.Value, prefix FieldPath) []FieldPath { + // vmap is a map and its keys are strings. + // Each map key denotes a field; no splitting or escaping. + var fps []FieldPath + for _, k := range vmap.MapKeys() { + v := vmap.MapIndex(k) + fp := prefix.with(k.String()) + if vm := extractMap(v); vm.IsValid() { + fps = append(fps, fieldPathsFromMap(vm, fp)...) + } else if v.Interface() != ServerTimestamp { + // ServerTimestamp fields do not go into the update mask. + fps = append(fps, fp) + } + } + return fps +} + +func extractMap(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Map: + return v + case reflect.Interface: + return extractMap(v.Elem()) + default: + return reflect.Value{} + } +} + +// removePathsIf creates a new slice of FieldPaths that contains +// exactly those elements of fps for which pred returns false. +func removePathsIf(fps []FieldPath, pred func(FieldPath) bool) []FieldPath { + var result []FieldPath + for _, fp := range fps { + if !pred(fp) { + result = append(result, fp) + } + } + return result +} + +func (d *DocumentRef) newDeleteWrites(preconds []Precondition) ([]*pb.Write, error) { + if d == nil { + return nil, errNilDocRef + } + pc, err := processPreconditionsForDelete(preconds) + if err != nil { + return nil, err + } + return []*pb.Write{{ + Operation: &pb.Write_Delete{d.Path}, + CurrentDocument: pc, + }}, nil +} + +func (d *DocumentRef) newUpdatePathWrites(updates []Update, preconds []Precondition) ([]*pb.Write, error) { + if len(updates) == 0 { + return nil, errors.New("firestore: no paths to update") + } + var fps []FieldPath + var fpvs []fpv + for _, u := range updates { + v, err := u.process() + if err != nil { + return nil, err + } + fps = append(fps, v.fieldPath) + fpvs = append(fpvs, v) + } + if err := checkNoDupOrPrefix(fps); err != nil { + return nil, err + } + m := createMapFromUpdates(fpvs) + return d.newUpdateWrites(m, fps, preconds) +} + +// newUpdateWrites creates Write operations for an update. +func (d *DocumentRef) newUpdateWrites(data interface{}, fieldPaths []FieldPath, preconds []Precondition) ([]*pb.Write, error) { + if d == nil { + return nil, errNilDocRef + } + pc, err := processPreconditionsForUpdate(preconds) + if err != nil { + return nil, err + } + doc, serverTimestampPaths, err := toProtoDocument(data) + if err != nil { + return nil, err + } + doc.Name = d.Path + return d.newUpdateWithTransform(doc, fieldPaths, pc, serverTimestampPaths, false), nil +} + +var requestTimeTransform = &pb.DocumentTransform_FieldTransform_SetToServerValue{ + pb.DocumentTransform_FieldTransform_REQUEST_TIME, +} + +// newUpdateWithTransform constructs operations for a commit. Most generally, it +// returns an update operation followed by a transform. +// +// If there are no serverTimestampPaths, the transform is omitted. +// +// If doc.Fields is empty, there are no updatePaths, and there is no precondition, +// the update is omitted, unless updateOnEmpty is true. +func (d *DocumentRef) newUpdateWithTransform(doc *pb.Document, updatePaths []FieldPath, pc *pb.Precondition, serverTimestampPaths []FieldPath, updateOnEmpty bool) []*pb.Write { + // Remove server timestamp fields from updatePaths. Those fields were removed + // from the document by toProtoDocument, so they should not be in the update + // mask. + // Note: this is technically O(n^2), but it is unlikely that there is + // more than one server timestamp path. + updatePaths = removePathsIf(updatePaths, func(fp FieldPath) bool { + return fp.in(serverTimestampPaths) + }) + var ws []*pb.Write + if updateOnEmpty || len(doc.Fields) > 0 || + len(updatePaths) > 0 || (pc != nil && len(serverTimestampPaths) == 0) { + var mask *pb.DocumentMask + if len(updatePaths) > 0 { + sfps := toServiceFieldPaths(updatePaths) + sort.Strings(sfps) // TODO(jba): make tests pass without this + mask = &pb.DocumentMask{FieldPaths: sfps} + } + w := &pb.Write{ + Operation: &pb.Write_Update{doc}, + UpdateMask: mask, + CurrentDocument: pc, + } + ws = append(ws, w) + pc = nil // If the precondition is in the write, we don't need it in the transform. + } + if len(serverTimestampPaths) > 0 || pc != nil { + ws = append(ws, d.newTransform(serverTimestampPaths, pc)) + } + return ws +} + +func (d *DocumentRef) newTransform(serverTimestampFieldPaths []FieldPath, pc *pb.Precondition) *pb.Write { + sort.Sort(byPath(serverTimestampFieldPaths)) // TODO(jba): make tests pass without this + var fts []*pb.DocumentTransform_FieldTransform + for _, p := range serverTimestampFieldPaths { + fts = append(fts, &pb.DocumentTransform_FieldTransform{ + FieldPath: p.toServiceFieldPath(), + TransformType: requestTimeTransform, + }) + } + return &pb.Write{ + Operation: &pb.Write_Transform{ + &pb.DocumentTransform{ + Document: d.Path, + FieldTransforms: fts, + // TODO(jba): should the transform have the same preconditions as the write? + }, + }, + CurrentDocument: pc, + } +} + +type sentinel int + +const ( + // Delete is used as a value in a call to UpdateMap to indicate that the + // corresponding key should be deleted. + Delete sentinel = iota + + // ServerTimestamp is used as a value in a call to UpdateMap to indicate that the + // key's value should be set to the time at which the server processed + // the request. + ServerTimestamp +) + +func (s sentinel) String() string { + switch s { + case Delete: + return "Delete" + case ServerTimestamp: + return "ServerTimestamp" + default: + return "" + } +} + +func isStructOrStructPtr(x interface{}) bool { + v := reflect.ValueOf(x) + if v.Kind() == reflect.Struct { + return true + } + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + return true + } + return false +} + +// An Update describes an update to a value referred to by a path. +// An Update should have either a non-empty Path or a non-empty FieldPath, +// but not both. +// +// See DocumentRef.Create for acceptable values. +// To delete a field, specify firestore.Delete as the value. +type Update struct { + Path string // Will be split on dots, and must not contain any of "˜*/[]". + FieldPath FieldPath + Value interface{} +} + +// An fpv is a pair of validated FieldPath and value. +type fpv struct { + fieldPath FieldPath + value interface{} +} + +func (u *Update) process() (v fpv, err error) { + if (u.Path != "") == (u.FieldPath != nil) { + return v, fmt.Errorf("firestore: update %+v should have exactly one of Path or FieldPath", u) + } + fp := u.FieldPath + if fp == nil { + fp, err = parseDotSeparatedString(u.Path) + if err != nil { + return v, err + } + } + if err := fp.validate(); err != nil { + return v, err + } + return fpv{fp, u.Value}, nil +} + +// Update updates the document. The values at the given +// field paths are replaced, but other fields of the stored document are untouched. +func (d *DocumentRef) Update(ctx context.Context, updates []Update, preconds ...Precondition) (*WriteResult, error) { + ws, err := d.newUpdatePathWrites(updates, preconds) + if err != nil { + return nil, err + } + return d.Parent.c.commit1(ctx, ws) +} + +// Collections returns an interator over the immediate sub-collections of the document. +func (d *DocumentRef) Collections(ctx context.Context) *CollectionIterator { + client := d.Parent.c + it := &CollectionIterator{ + err: checkTransaction(ctx), + client: client, + parent: d, + it: client.c.ListCollectionIds( + withResourceHeader(ctx, client.path()), + &pb.ListCollectionIdsRequest{Parent: d.Path}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// CollectionIterator is an iterator over sub-collections of a document. +type CollectionIterator struct { + client *Client + parent *DocumentRef + it *vkit.StringIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*CollectionRef + err error +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *CollectionIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done if there +// are no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *CollectionIterator) Next() (*CollectionRef, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *CollectionIterator) fetch(pageSize int, pageToken string) (string, error) { + if it.err != nil { + return "", it.err + } + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + id, err := it.it.Next() + if err != nil { + return err + } + var cr *CollectionRef + if it.parent == nil { + cr = newTopLevelCollRef(it.client, it.client.path(), id) + } else { + cr = newCollRefWithParent(it.client, it.parent, id) + } + it.items = append(it.items, cr) + return nil + }) +} + +// GetAll returns all the collections remaining from the iterator. +func (it *CollectionIterator) GetAll() ([]*CollectionRef, error) { + var crs []*CollectionRef + for { + cr, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, err + } + crs = append(crs, cr) + } + return crs, nil +} + +// Common fetch code for iterators that are backed by vkit iterators. +// TODO(jba): dedup with same function in logging/logadmin. +func iterFetch(pageSize int, pageToken string, pi *iterator.PageInfo, next func() error) (string, error) { + pi.MaxSize = pageSize + pi.Token = pageToken + // Get one item, which will fill the buffer. + if err := next(); err != nil { + return "", err + } + // Collect the rest of the buffer. + for pi.Remaining() > 0 { + if err := next(); err != nil { + return "", err + } + } + return pi.Token, nil +} diff --git a/vendor/cloud.google.com/go/firestore/docref_test.go b/vendor/cloud.google.com/go/firestore/docref_test.go new file mode 100644 index 0000000..5e51098 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/docref_test.go @@ -0,0 +1,306 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "reflect" + "sort" + "testing" + "time" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "golang.org/x/net/context" + "google.golang.org/genproto/googleapis/type/latlng" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + writeResultForSet = &WriteResult{UpdateTime: aTime} + commitResponseForSet = &pb.CommitResponse{ + WriteResults: []*pb.WriteResult{{UpdateTime: aTimestamp}}, + } +) + +func TestDocGet(t *testing.T) { + ctx := context.Background() + c, srv := newMock(t) + path := "projects/projectID/databases/(default)/documents/C/a" + pdoc := &pb.Document{ + Name: path, + CreateTime: aTimestamp, + UpdateTime: aTimestamp, + Fields: map[string]*pb.Value{"f": intval(1)}, + } + srv.addRPC(&pb.GetDocumentRequest{Name: path}, pdoc) + ref := c.Collection("C").Doc("a") + gotDoc, err := ref.Get(ctx) + if err != nil { + t.Fatal(err) + } + wantDoc := &DocumentSnapshot{ + Ref: ref, + CreateTime: aTime, + UpdateTime: aTime, + proto: pdoc, + c: c, + } + if !testEqual(gotDoc, wantDoc) { + t.Fatalf("\ngot %+v\nwant %+v", gotDoc, wantDoc) + } + + srv.addRPC( + &pb.GetDocumentRequest{ + Name: "projects/projectID/databases/(default)/documents/C/b", + }, + grpc.Errorf(codes.NotFound, "not found"), + ) + _, err = c.Collection("C").Doc("b").Get(ctx) + if grpc.Code(err) != codes.NotFound { + t.Errorf("got %v, want NotFound", err) + } +} + +func TestDocSet(t *testing.T) { + // Most tests for Set are in the cross-language tests. + ctx := context.Background() + c, srv := newMock(t) + + doc := c.Collection("C").Doc("d") + // Merge with a struct and FieldPaths. + srv.addRPC(&pb.CommitRequest{ + Database: "projects/projectID/databases/(default)", + Writes: []*pb.Write{ + { + Operation: &pb.Write_Update{ + Update: &pb.Document{ + Name: "projects/projectID/databases/(default)/documents/C/d", + Fields: map[string]*pb.Value{ + "*": mapval(map[string]*pb.Value{ + "~": boolval(true), + }), + }, + }, + }, + UpdateMask: &pb.DocumentMask{FieldPaths: []string{"`*`.`~`"}}, + }, + }, + }, commitResponseForSet) + data := struct { + A map[string]bool `firestore:"*"` + }{A: map[string]bool{"~": true}} + wr, err := doc.Set(ctx, data, Merge([]string{"*", "~"})) + if err != nil { + t.Fatal(err) + } + if !testEqual(wr, writeResultForSet) { + t.Errorf("got %v, want %v", wr, writeResultForSet) + } + + // MergeAll cannot be used with structs. + _, err = doc.Set(ctx, data, MergeAll) + if err == nil { + t.Errorf("got nil, want error") + } +} + +func TestDocCreate(t *testing.T) { + // Verify creation with structs. In particular, make sure zero values + // are handled well. + // Other tests for Create are handled by the cross-language tests. + ctx := context.Background() + c, srv := newMock(t) + + type create struct { + Time time.Time + Bytes []byte + Geo *latlng.LatLng + } + srv.addRPC( + &pb.CommitRequest{ + Database: "projects/projectID/databases/(default)", + Writes: []*pb.Write{ + { + Operation: &pb.Write_Update{ + Update: &pb.Document{ + Name: "projects/projectID/databases/(default)/documents/C/d", + Fields: map[string]*pb.Value{ + "Time": tsval(time.Time{}), + "Bytes": bytesval(nil), + "Geo": nullValue, + }, + }, + }, + CurrentDocument: &pb.Precondition{ + ConditionType: &pb.Precondition_Exists{false}, + }, + }, + }, + }, + commitResponseForSet, + ) + _, err := c.Collection("C").Doc("d").Create(ctx, &create{}) + if err != nil { + t.Fatal(err) + } +} + +func TestDocDelete(t *testing.T) { + ctx := context.Background() + c, srv := newMock(t) + srv.addRPC( + &pb.CommitRequest{ + Database: "projects/projectID/databases/(default)", + Writes: []*pb.Write{ + {Operation: &pb.Write_Delete{"projects/projectID/databases/(default)/documents/C/d"}}, + }, + }, + &pb.CommitResponse{ + WriteResults: []*pb.WriteResult{{}}, + }) + wr, err := c.Collection("C").Doc("d").Delete(ctx) + if err != nil { + t.Fatal(err) + } + if !testEqual(wr, &WriteResult{}) { + t.Errorf("got %+v, want %+v", wr, writeResultForSet) + } +} + +var ( + testData = map[string]interface{}{"a": 1} + testFields = map[string]*pb.Value{"a": intval(1)} +) + +// Update is tested by the cross-language tests. + +func TestApplyFieldPaths(t *testing.T) { + submap := mapval(map[string]*pb.Value{ + "b": intval(1), + "c": intval(2), + }) + fields := map[string]*pb.Value{ + "a": submap, + "d": intval(3), + } + for _, test := range []struct { + fps []FieldPath + want map[string]*pb.Value + }{ + {nil, nil}, + {[]FieldPath{[]string{"z"}}, nil}, + {[]FieldPath{[]string{"a"}}, map[string]*pb.Value{"a": submap}}, + {[]FieldPath{[]string{"a", "b", "c"}}, nil}, + {[]FieldPath{[]string{"d"}}, map[string]*pb.Value{"d": intval(3)}}, + { + []FieldPath{[]string{"d"}, []string{"a", "c"}}, + map[string]*pb.Value{ + "a": mapval(map[string]*pb.Value{"c": intval(2)}), + "d": intval(3), + }, + }, + } { + got := applyFieldPaths(fields, test.fps, nil) + if !testEqual(got, test.want) { + t.Errorf("%v:\ngot %v\nwant \n%v", test.fps, got, test.want) + } + } +} + +func TestFieldPathsFromMap(t *testing.T) { + for _, test := range []struct { + in map[string]interface{} + want []string + }{ + {nil, nil}, + {map[string]interface{}{"a": 1}, []string{"a"}}, + {map[string]interface{}{ + "a": 1, + "b": map[string]interface{}{"c": 2}, + }, []string{"a", "b.c"}}, + } { + fps := fieldPathsFromMap(reflect.ValueOf(test.in), nil) + got := toServiceFieldPaths(fps) + sort.Strings(got) + if !testEqual(got, test.want) { + t.Errorf("%+v: got %v, want %v", test.in, got, test.want) + } + } +} + +func commitRequestForSet() *pb.CommitRequest { + return &pb.CommitRequest{ + Database: "projects/projectID/databases/(default)", + Writes: []*pb.Write{ + { + Operation: &pb.Write_Update{ + Update: &pb.Document{ + Name: "projects/projectID/databases/(default)/documents/C/d", + Fields: testFields, + }, + }, + }, + }, + } +} + +func TestUpdateProcess(t *testing.T) { + for _, test := range []struct { + in Update + want fpv + wantErr bool + }{ + { + in: Update{Path: "a", Value: 1}, + want: fpv{fieldPath: []string{"a"}, value: 1}, + }, + { + in: Update{Path: "c.d", Value: Delete}, + want: fpv{fieldPath: []string{"c", "d"}, value: Delete}, + }, + { + in: Update{FieldPath: []string{"*", "~"}, Value: ServerTimestamp}, + want: fpv{fieldPath: []string{"*", "~"}, value: ServerTimestamp}, + }, + { + in: Update{Path: "*"}, + wantErr: true, // bad rune in path + }, + { + in: Update{Path: "a", FieldPath: []string{"b"}}, + wantErr: true, // both Path and FieldPath + }, + { + in: Update{Value: 1}, + wantErr: true, // neither Path nor FieldPath + }, + { + in: Update{FieldPath: []string{"", "a"}}, + wantErr: true, // empty FieldPath component + }, + } { + got, err := test.in.process() + if test.wantErr { + if err == nil { + t.Errorf("%+v: got nil, want error", test.in) + } + } else if err != nil { + t.Errorf("%+v: got error %v, want nil", test.in, err) + } else if !testEqual(got, test.want) { + t.Errorf("%+v: got %+v, want %+v", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/document.go b/vendor/cloud.google.com/go/firestore/document.go new file mode 100644 index 0000000..4a650f4 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/document.go @@ -0,0 +1,261 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "reflect" + "time" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes" +) + +// A DocumentSnapshot contains document data and metadata. +type DocumentSnapshot struct { + // The DocumentRef for this document. + Ref *DocumentRef + + // Read-only. The time at which the document was created. + // Increases monotonically when a document is deleted then + // recreated. It can also be compared to values from other documents and + // the read time of a query. + CreateTime time.Time + + // Read-only. The time at which the document was last changed. This value + // is initally set to CreateTime then increases monotonically with each + // change to the document. It can also be compared to values from other + // documents and the read time of a query. + UpdateTime time.Time + + c *Client + proto *pb.Document +} + +// Data returns the DocumentSnapshot's fields as a map. +// It is equivalent to +// var m map[string]interface{} +// d.DataTo(&m) +func (d *DocumentSnapshot) Data() map[string]interface{} { + m, err := createMapFromValueMap(d.proto.Fields, d.c) + // Any error here is a bug in the client. + if err != nil { + panic(fmt.Sprintf("firestore: %v", err)) + } + return m +} + +// DataTo uses the document's fields to populate p, which can be a pointer to a +// map[string]interface{} or a pointer to a struct. +// +// Firestore field values are converted to Go values as follows: +// - Null converts to nil. +// - Bool converts to bool. +// - String converts to string. +// - Integer converts int64. When setting a struct field, any signed or unsigned +// integer type is permitted except uint64. Overflow is detected and results in +// an error. +// - Double converts to float64. When setting a struct field, float32 is permitted. +// Overflow is detected and results in an error. +// - Bytes is converted to []byte. +// - Timestamp converts to time.Time. +// - GeoPoint converts to latlng.LatLng, where latlng is the package +// "google.golang.org/genproto/googleapis/type/latlng". +// - Arrays convert to []interface{}. When setting a struct field, the field +// may be a slice or array of any type and is populated recursively. +// Slices are resized to the incoming value's size, while arrays that are too +// long have excess elements filled with zero values. If the array is too short, +// excess incoming values will be dropped. +// - Maps convert to map[string]interface{}. When setting a struct field, +// maps of key type string and any value type are permitted, and are populated +// recursively. +// - References are converted to DocumentRefs. +// +// Field names given by struct field tags are observed, as described in +// DocumentRef.Create. +func (d *DocumentSnapshot) DataTo(p interface{}) error { + return setFromProtoValue(p, &pb.Value{&pb.Value_MapValue{&pb.MapValue{d.proto.Fields}}}, d.c) +} + +// DataAt returns the data value denoted by path. +// +// The path argument can be a single field or a dot-separated sequence of +// fields, and must not contain any of the runes "˜*/[]". Use DataAtPath instead for +// such a path. +// +// See DocumentSnapshot.DataTo for how Firestore values are converted to Go values. +func (d *DocumentSnapshot) DataAt(path string) (interface{}, error) { + fp, err := parseDotSeparatedString(path) + if err != nil { + return nil, err + } + return d.DataAtPath(fp) +} + +// DataAtPath returns the data value denoted by the FieldPath fp. +func (d *DocumentSnapshot) DataAtPath(fp FieldPath) (interface{}, error) { + v, err := valueAtPath(fp, d.proto.Fields) + if err != nil { + return nil, err + } + return createFromProtoValue(v, d.c) +} + +// valueAtPath returns the value of m referred to by fp. +func valueAtPath(fp FieldPath, m map[string]*pb.Value) (*pb.Value, error) { + for _, k := range fp[:len(fp)-1] { + v := m[k] + if v == nil { + return nil, fmt.Errorf("firestore: no field %q", k) + } + mv := v.GetMapValue() + if mv == nil { + return nil, fmt.Errorf("firestore: value for field %q is not a map", k) + } + m = mv.Fields + } + k := fp[len(fp)-1] + v := m[k] + if v == nil { + return nil, fmt.Errorf("firestore: no field %q", k) + } + return v, nil +} + +// toProtoDocument converts a Go value to a Document proto. +// Valid values are: map[string]T, struct, or pointer to a valid value. +// It also returns a list of field paths for DocumentTransform (server timestamp). +func toProtoDocument(x interface{}) (*pb.Document, []FieldPath, error) { + if x == nil { + return nil, nil, errors.New("firestore: nil document contents") + } + v := reflect.ValueOf(x) + pv, sawTransform, err := toProtoValue(v) + if err != nil { + return nil, nil, err + } + var fieldPaths []FieldPath + if sawTransform { + fieldPaths, err = extractTransformPaths(v, nil) + if err != nil { + return nil, nil, err + } + } + var fields map[string]*pb.Value + if pv != nil { + m := pv.GetMapValue() + if m == nil { + return nil, nil, fmt.Errorf("firestore: cannot covert value of type %T into a map", x) + } + fields = m.Fields + } + return &pb.Document{Fields: fields}, fieldPaths, nil +} + +func extractTransformPaths(v reflect.Value, prefix FieldPath) ([]FieldPath, error) { + switch v.Kind() { + case reflect.Map: + return extractTransformPathsFromMap(v, prefix) + case reflect.Struct: + return extractTransformPathsFromStruct(v, prefix) + case reflect.Ptr: + if v.IsNil() { + return nil, nil + } + return extractTransformPaths(v.Elem(), prefix) + case reflect.Interface: + if v.NumMethod() == 0 { // empty interface: recurse on its contents + return extractTransformPaths(v.Elem(), prefix) + } + return nil, nil + default: + return nil, nil + } +} + +func extractTransformPathsFromMap(v reflect.Value, prefix FieldPath) ([]FieldPath, error) { + var paths []FieldPath + for _, k := range v.MapKeys() { + sk := k.Interface().(string) // assume keys are strings; checked in toProtoValue + path := prefix.with(sk) + mi := v.MapIndex(k) + if mi.Interface() == ServerTimestamp { + paths = append(paths, path) + } else { + ps, err := extractTransformPaths(mi, path) + if err != nil { + return nil, err + } + paths = append(paths, ps...) + } + } + return paths, nil +} + +func extractTransformPathsFromStruct(v reflect.Value, prefix FieldPath) ([]FieldPath, error) { + var paths []FieldPath + fields, err := fieldCache.Fields(v.Type()) + if err != nil { + return nil, err + } + for _, f := range fields { + fv := v.FieldByIndex(f.Index) + path := prefix.with(f.Name) + opts := f.ParsedTag.(tagOptions) + if opts.serverTimestamp { + var isZero bool + switch f.Type { + case typeOfGoTime: + isZero = fv.Interface().(time.Time).IsZero() + case reflect.PtrTo(typeOfGoTime): + isZero = fv.IsNil() || fv.Elem().Interface().(time.Time).IsZero() + default: + return nil, fmt.Errorf("firestore: field %s of struct %s with serverTimestamp tag must be of type time.Time or *time.Time", + f.Name, v.Type()) + } + if isZero { + paths = append(paths, path) + } + } else { + ps, err := extractTransformPaths(fv, path) + if err != nil { + return nil, err + } + paths = append(paths, ps...) + } + } + return paths, nil +} + +func newDocumentSnapshot(ref *DocumentRef, proto *pb.Document, c *Client) (*DocumentSnapshot, error) { + d := &DocumentSnapshot{ + Ref: ref, + c: c, + proto: proto, + } + ts, err := ptypes.Timestamp(proto.CreateTime) + if err != nil { + return nil, err + } + d.CreateTime = ts + ts, err = ptypes.Timestamp(proto.UpdateTime) + if err != nil { + return nil, err + } + d.UpdateTime = ts + return d, nil +} diff --git a/vendor/cloud.google.com/go/firestore/document_test.go b/vendor/cloud.google.com/go/firestore/document_test.go new file mode 100644 index 0000000..df3ffda --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/document_test.go @@ -0,0 +1,238 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "reflect" + "sort" + "testing" + "time" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +func TestToProtoDocument(t *testing.T) { + type s struct{ I int } + + for _, test := range []struct { + in interface{} + want *pb.Document + wantErr bool + }{ + {nil, nil, true}, + {[]int{1}, nil, true}, + {map[string]int{"a": 1}, + &pb.Document{Fields: map[string]*pb.Value{"a": intval(1)}}, + false}, + {s{2}, &pb.Document{Fields: map[string]*pb.Value{"I": intval(2)}}, false}, + {&s{3}, &pb.Document{Fields: map[string]*pb.Value{"I": intval(3)}}, false}, + } { + got, _, gotErr := toProtoDocument(test.in) + if (gotErr != nil) != test.wantErr { + t.Errorf("%v: got error %v, want %t", test.in, gotErr, test.wantErr) + } + if gotErr != nil { + continue + } + if !testEqual(got, test.want) { + t.Errorf("%v: got %v, want %v", test.in, got, test.want) + } + } +} + +func TestNewDocumentSnapshot(t *testing.T) { + c := &Client{ + projectID: "projID", + databaseID: "(database)", + } + docRef := c.Doc("C/a") + in := &pb.Document{ + CreateTime: &tspb.Timestamp{Seconds: 10}, + UpdateTime: &tspb.Timestamp{Seconds: 20}, + Fields: map[string]*pb.Value{"a": intval(1)}, + } + want := &DocumentSnapshot{ + Ref: docRef, + CreateTime: time.Unix(10, 0).UTC(), + UpdateTime: time.Unix(20, 0).UTC(), + proto: in, + c: c, + } + got, err := newDocumentSnapshot(docRef, in, c) + if err != nil { + t.Fatal(err) + } + if !testEqual(got, want) { + t.Errorf("got %+v\nwant %+v", got, want) + } +} + +func TestData(t *testing.T) { + doc := &DocumentSnapshot{ + proto: &pb.Document{ + Fields: map[string]*pb.Value{"a": intval(1), "b": strval("x")}, + }, + } + got := doc.Data() + want := map[string]interface{}{"a": int64(1), "b": "x"} + if !testEqual(got, want) { + t.Errorf("got %#v\nwant %#v", got, want) + } + var got2 map[string]interface{} + if err := doc.DataTo(&got2); err != nil { + t.Fatal(err) + } + if !testEqual(got2, want) { + t.Errorf("got %#v\nwant %#v", got2, want) + } + + type s struct { + A int + B string + } + var got3 s + if err := doc.DataTo(&got3); err != nil { + t.Fatal(err) + } + want2 := s{A: 1, B: "x"} + if !testEqual(got3, want2) { + t.Errorf("got %#v\nwant %#v", got3, want2) + } +} + +var testDoc = &DocumentSnapshot{ + proto: &pb.Document{ + Fields: map[string]*pb.Value{ + "a": intval(1), + "b": mapval(map[string]*pb.Value{ + "`": intval(2), + "~": mapval(map[string]*pb.Value{ + "x": intval(3), + }), + }), + }, + }, +} + +func TestDataAt(t *testing.T) { + for _, test := range []struct { + fieldPath string + want interface{} + }{ + {"a", int64(1)}, + {"b.`", int64(2)}, + } { + got, err := testDoc.DataAt(test.fieldPath) + if err != nil { + t.Errorf("%q: %v", test.fieldPath, err) + continue + } + if !testEqual(got, test.want) { + t.Errorf("%q: got %v, want %v", test.fieldPath, got, test.want) + } + } + + for _, bad := range []string{ + "c.~.x", // bad field path + "a.b", // "a" isn't a map + "z.b", // bad non-final key + "b.z", // bad final key + } { + _, err := testDoc.DataAt(bad) + if err == nil { + t.Errorf("%q: got nil, want error", bad) + } + } +} + +func TestDataAtPath(t *testing.T) { + for _, test := range []struct { + fieldPath FieldPath + want interface{} + }{ + {[]string{"a"}, int64(1)}, + {[]string{"b", "`"}, int64(2)}, + {[]string{"b", "~"}, map[string]interface{}{"x": int64(3)}}, + {[]string{"b", "~", "x"}, int64(3)}, + } { + got, err := testDoc.DataAtPath(test.fieldPath) + if err != nil { + t.Errorf("%v: %v", test.fieldPath, err) + continue + } + if !testEqual(got, test.want) { + t.Errorf("%v: got %v, want %v", test.fieldPath, got, test.want) + } + } + + for _, bad := range []FieldPath{ + []string{"c", "", "x"}, // bad field path + []string{"a", "b"}, // "a" isn't a map + []string{"z", "~"}, // bad non-final key + []string{"b", "z"}, // bad final key + } { + _, err := testDoc.DataAtPath(bad) + if err == nil { + t.Errorf("%v: got nil, want error", bad) + } + } +} + +func TestExtractTransformPaths(t *testing.T) { + type S struct { + A time.Time `firestore:",serverTimestamp"` + B time.Time `firestore:",serverTimestamp"` + C *time.Time `firestore:",serverTimestamp"` + D *time.Time `firestore:"d.d,serverTimestamp"` + E *time.Time `firestore:",serverTimestamp"` + F time.Time + G int + } + + m := map[string]interface{}{ + "x": 1, + "y": &S{ + // A is a zero time: included + B: aTime, // not a zero time: excluded + // C is nil: included + D: &time.Time{}, // pointer to a zero time: included + E: &aTime, // pointer to a non-zero time: excluded + // F is a zero time, but does not have the right tag: excluded + G: 15, // not a time.Time + }, + "z": map[string]interface{}{"w": ServerTimestamp}, + } + got, err := extractTransformPaths(reflect.ValueOf(m), nil) + if err != nil { + t.Fatal(err) + } + sort.Sort(byPath(got)) + want := []FieldPath{{"y", "A"}, {"y", "C"}, {"y", "d.d"}, {"z", "w"}} + if !testEqual(got, want) { + t.Errorf("got %#v, want %#v", got, want) + } +} + +func TestExtractTransformPathsErrors(t *testing.T) { + type S struct { + A int `firestore:",serverTimestamp"` + } + _, err := extractTransformPaths(reflect.ValueOf(S{}), nil) + if err == nil { + t.Error("got nil, want error") + } +} diff --git a/vendor/cloud.google.com/go/firestore/examples_test.go b/vendor/cloud.google.com/go/firestore/examples_test.go new file mode 100644 index 0000000..2980159 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/examples_test.go @@ -0,0 +1,509 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): add Output comments to examples when feasible. + +package firestore_test + +import ( + "fmt" + + "cloud.google.com/go/firestore" + "golang.org/x/net/context" + + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() // Close client when done. + _ = client // TODO: Use client. +} + +func ExampleClient_Collection() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + coll1 := client.Collection("States") + coll2 := client.Collection("States/NewYork/Cities") + fmt.Println(coll1, coll2) +} + +func ExampleClient_Doc() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + doc1 := client.Doc("States/NewYork") + doc2 := client.Doc("States/NewYork/Cities/Albany") + fmt.Println(doc1, doc2) +} + +func ExampleClient_GetAll() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + docs, err := client.GetAll(ctx, []*firestore.DocumentRef{ + client.Doc("States/NorthCarolina"), + client.Doc("States/SouthCarolina"), + client.Doc("States/WestCarolina"), + client.Doc("States/EastCarolina"), + }) + if err != nil { + // TODO: Handle error. + } + // docs is a slice with four DocumentSnapshots, but the last two are + // nil because there is no West or East Carolina. + fmt.Println(docs) +} + +func ExampleClient_Batch() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + b := client.Batch() + _ = b // TODO: Use batch. +} + +func ExampleWriteBatch_Commit() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + type State struct { + Capital string `firestore:"capital"` + Population float64 `firestore:"pop"` // in millions + } + + ny := client.Doc("States/NewYork") + ca := client.Doc("States/California") + + writeResults, err := client.Batch(). + Create(ny, State{Capital: "Albany", Population: 19.8}). + Set(ca, State{Capital: "Sacramento", Population: 39.14}). + Delete(client.Doc("States/WestDakota")). + Commit(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(writeResults) +} + +func ExampleCollectionRef_Add() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + doc, wr, err := client.Collection("Users").Add(ctx, map[string]interface{}{ + "name": "Alice", + "email": "aj@example.com", + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(doc, wr) +} + +func ExampleCollectionRef_Doc() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + fl := client.Collection("States").Doc("Florida") + ta := client.Collection("States").Doc("Florida/Cities/Tampa") + + fmt.Println(fl, ta) +} + +func ExampleCollectionRef_NewDoc() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + doc := client.Collection("Users").NewDoc() + + fmt.Println(doc) +} + +func ExampleDocumentRef_Collection() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + mi := client.Collection("States").Doc("Michigan") + cities := mi.Collection("Cities") + + fmt.Println(cities) +} + +func ExampleDocumentRef_Create_map() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + wr, err := client.Doc("States/Colorado").Create(ctx, map[string]interface{}{ + "capital": "Denver", + "pop": 5.5, + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(wr.UpdateTime) +} + +func ExampleDocumentRef_Create_struct() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + type State struct { + Capital string `firestore:"capital"` + Population float64 `firestore:"pop"` // in millions + } + + wr, err := client.Doc("States/Colorado").Create(ctx, State{ + Capital: "Denver", + Population: 5.5, + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(wr.UpdateTime) +} + +func ExampleDocumentRef_Set() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + // Overwrite the document with the given data. Any other fields currently + // in the document will be removed. + wr, err := client.Doc("States/Alabama").Set(ctx, map[string]interface{}{ + "capital": "Montgomery", + "pop": 4.9, + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(wr.UpdateTime) +} + +func ExampleDocumentRef_Set_merge() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + // Overwrite only the fields in the map; preserve all others. + _, err = client.Doc("States/Alabama").Set(ctx, map[string]interface{}{ + "pop": 5.2, + }, firestore.MergeAll) + if err != nil { + // TODO: Handle error. + } + + type State struct { + Capital string `firestore:"capital"` + Population float64 `firestore:"pop"` // in millions + } + + // To do a merging Set with struct data, specify the exact fields to overwrite. + // MergeAll is disallowed here, because it would probably be a mistake: the "capital" + // field would be overwritten with the empty string. + _, err = client.Doc("States/Alabama").Set(ctx, State{Population: 5.2}, firestore.Merge([]string{"pop"})) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleDocumentRef_Update() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + tenn := client.Doc("States/Tennessee") + wr, err := tenn.Update(ctx, []firestore.Update{ + {Path: "pop", Value: 6.6}, + {FieldPath: []string{".", "*", "/"}, Value: "odd"}, + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(wr.UpdateTime) +} + +func ExampleDocumentRef_Delete() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + // Oops, Ontario is a Canadian province... + if _, err = client.Doc("States/Ontario").Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleDocumentRef_Get() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + docsnap, err := client.Doc("States/Ohio").Get(ctx) + if err != nil { + // TODO: Handle error. + } + _ = docsnap // TODO: Use DocumentSnapshot. +} + +func ExampleDocumentSnapshot_Data() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + docsnap, err := client.Doc("States/Ohio").Get(ctx) + if err != nil { + // TODO: Handle error. + } + ohioMap := docsnap.Data() + fmt.Println(ohioMap["capital"]) +} + +func ExampleDocumentSnapshot_DataAt() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + docsnap, err := client.Doc("States/Ohio").Get(ctx) + if err != nil { + // TODO: Handle error. + } + cap, err := docsnap.DataAt("capital") + if err != nil { + // TODO: Handle error. + } + fmt.Println(cap) +} + +func ExampleDocumentSnapshot_DataAtPath() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + docsnap, err := client.Doc("States/Ohio").Get(ctx) + if err != nil { + // TODO: Handle error. + } + pop, err := docsnap.DataAtPath([]string{"capital", "population"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(pop) +} + +func ExampleDocumentSnapshot_DataTo() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + docsnap, err := client.Doc("States/Ohio").Get(ctx) + if err != nil { + // TODO: Handle error. + } + + type State struct { + Capital string `firestore:"capital"` + Population float64 `firestore:"pop"` // in millions + } + + var s State + if err := docsnap.DataTo(&s); err != nil { + // TODO: Handle error. + } + fmt.Println(s) +} + +func ExampleQuery_Documents() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + q := client.Collection("States").Select("pop"). + Where("pop", ">", 10). + OrderBy("pop", firestore.Desc). + Limit(10) + iter1 := q.Documents(ctx) + _ = iter1 // TODO: Use iter1. + + // You can call Documents directly on a CollectionRef as well. + iter2 := client.Collection("States").Documents(ctx) + _ = iter2 // TODO: Use iter2. +} + +// This example is just like the one above, but illustrates +// how to use the XXXPath methods of Query for field paths +// that can't be expressed as a dot-separated string. +func ExampleQuery_Documents_path_methods() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + q := client.Collection("Unusual").SelectPaths([]string{"*"}, []string{"[~]"}). + WherePath([]string{"/"}, ">", 10). + OrderByPath([]string{"/"}, firestore.Desc). + Limit(10) + iter1 := q.Documents(ctx) + _ = iter1 // TODO: Use iter1. + + // You can call Documents directly on a CollectionRef as well. + iter2 := client.Collection("States").Documents(ctx) + _ = iter2 // TODO: Use iter2. +} + +func ExampleDocumentIterator_Next() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + q := client.Collection("States"). + Where("pop", ">", 10). + OrderBy("pop", firestore.Desc) + iter := q.Documents(ctx) + for { + doc, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(doc.Data()) + } +} + +func ExampleDocumentIterator_GetAll() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + q := client.Collection("States"). + Where("pop", ">", 10). + OrderBy("pop", firestore.Desc). + Limit(10) // a good idea with GetAll, to avoid filling memory + docs, err := q.Documents(ctx).GetAll() + if err != nil { + // TODO: Handle error. + } + for _, doc := range docs { + fmt.Println(doc.Data()) + } +} + +func ExampleClient_RunTransaction() { + ctx := context.Background() + client, err := firestore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + + nm := client.Doc("States/NewMexico") + err = client.RunTransaction(ctx, func(ctx context.Context, tx *firestore.Transaction) error { + doc, err := tx.Get(nm) // tx.Get, NOT nm.Get! + if err != nil { + return err + } + pop, err := doc.DataAt("pop") + if err != nil { + return err + } + return tx.Update(nm, []firestore.Update{{Path: "pop", Value: pop.(float64) + 0.2}}) + }) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/firestore/fieldpath.go b/vendor/cloud.google.com/go/firestore/fieldpath.go new file mode 100644 index 0000000..ec7e088 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/fieldpath.go @@ -0,0 +1,210 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "sort" + "strings" +) + +// A FieldPath is a non-empty sequence of non-empty fields that reference a value. +// +// A FieldPath value should only be necessary if one of the field names contains +// one of the runes ".˜*/[]". Most methods accept a simpler form of field path +// as a string in which the individual fields are separated by dots. +// For example, +// []string{"a", "b"} +// is equivalent to the string form +// "a.b" +// but +// []string{"*"} +// has no equivalent string form. +type FieldPath []string + +// parseDotSeparatedString constructs a FieldPath from a string that separates +// path components with dots. Other than splitting at dots and checking for invalid +// characters, it ignores everything else about the string, +// including attempts to quote field path compontents. So "a.`b.c`.d" is parsed into +// four parts, "a", "`b", "c`" and "d". +func parseDotSeparatedString(s string) (FieldPath, error) { + const invalidRunes = "~*/[]" + if strings.ContainsAny(s, invalidRunes) { + return nil, fmt.Errorf("firestore: %q contains an invalid rune (one of %s)", s, invalidRunes) + } + fp := FieldPath(strings.Split(s, ".")) + if err := fp.validate(); err != nil { + return nil, err + } + return fp, nil +} + +func (fp1 FieldPath) equal(fp2 FieldPath) bool { + if len(fp1) != len(fp2) { + return false + } + for i, c1 := range fp1 { + if c1 != fp2[i] { + return false + } + } + return true +} + +func (fp1 FieldPath) prefixOf(fp2 FieldPath) bool { + return len(fp1) <= len(fp2) && fp1.equal(fp2[:len(fp1)]) +} + +// Lexicographic ordering. +func (fp1 FieldPath) less(fp2 FieldPath) bool { + for i := range fp1 { + switch { + case i >= len(fp2): + return false + case fp1[i] < fp2[i]: + return true + case fp1[i] > fp2[i]: + return false + } + } + // fp1 and fp2 are equal up to len(fp1). + return len(fp1) < len(fp2) +} + +// validate checks the validity of fp and returns an error if it is invalid. +func (fp FieldPath) validate() error { + if len(fp) == 0 { + return errors.New("firestore: empty field path") + } + for _, c := range fp { + if len(c) == 0 { + return errors.New("firestore: empty component in field path") + } + } + return nil +} + +// with creates a new FieldPath consisting of fp followed by k. +func (fp FieldPath) with(k string) FieldPath { + r := make(FieldPath, len(fp), len(fp)+1) + copy(r, fp) + return append(r, k) +} + +// in reports whether fp is equal to one of the fps. +func (fp FieldPath) in(fps []FieldPath) bool { + for _, e := range fps { + if fp.equal(e) { + return true + } + } + return false +} + +// checkNoDupOrPrefix checks whether any FieldPath is a prefix of (or equal to) +// another. +// It modifies the order of FieldPaths in its argument (via sorting). +func checkNoDupOrPrefix(fps []FieldPath) error { + // Sort fps lexicographically. + sort.Sort(byPath(fps)) + // Check adjacent pairs for prefix. + for i := 1; i < len(fps); i++ { + if fps[i-1].prefixOf(fps[i]) { + return fmt.Errorf("field path %v cannot be used in the same update as %v", fps[i-1], fps[i]) + } + } + return nil +} + +type byPath []FieldPath + +func (b byPath) Len() int { return len(b) } +func (b byPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byPath) Less(i, j int) bool { return b[i].less(b[j]) } + +// createMapFromUpdates uses a list of updates to construct a valid +// Firestore data value in the form of a map. It assumes the FieldPaths in the updates +// already been validated and checked for prefixes. If any field path is associated +// with the Delete value, it is not stored in the map. +func createMapFromUpdates(fpvs []fpv) map[string]interface{} { + m := map[string]interface{}{} + for _, v := range fpvs { + if v.value != Delete { + setAtPath(m, v.fieldPath, v.value) + } + } + return m +} + +// setAtPath sets val at the location in m specified by fp, creating sub-maps as +// needed. m must not be nil. fp is assumed to be valid. +func setAtPath(m map[string]interface{}, fp FieldPath, val interface{}) { + if len(fp) == 1 { + m[fp[0]] = val + } else { + v, ok := m[fp[0]] + if !ok { + v = map[string]interface{}{} + m[fp[0]] = v + } + // The type assertion below cannot fail, because setAtPath is only called + // with either an empty map or one filled by setAtPath itself, and the + // set of FieldPaths it is called with has been checked to make sure that + // no path is the prefix of any other. + setAtPath(v.(map[string]interface{}), fp[1:], val) + } +} + +// toServiceFieldPath converts fp the form required by the Firestore service. +// It assumes fp has been validated. +func (fp FieldPath) toServiceFieldPath() string { + cs := make([]string, len(fp)) + for i, c := range fp { + cs[i] = toServiceFieldPathComponent(c) + } + return strings.Join(cs, ".") +} + +func toServiceFieldPaths(fps []FieldPath) []string { + var sfps []string + for _, fp := range fps { + sfps = append(sfps, fp.toServiceFieldPath()) + } + return sfps +} + +// Google SQL syntax for an unquoted field. +var unquotedFieldRegexp = regexp.MustCompile("^[A-Za-z_][A-Za-z_0-9]*$") + +// toServiceFieldPathComponent returns a string that represents key and is a valid +// field path component. +func toServiceFieldPathComponent(key string) string { + if unquotedFieldRegexp.MatchString(key) { + return key + } + var buf bytes.Buffer + buf.WriteRune('`') + for _, r := range key { + if r == '`' || r == '\\' { + buf.WriteRune('\\') + } + buf.WriteRune(r) + } + buf.WriteRune('`') + return buf.String() +} diff --git a/vendor/cloud.google.com/go/firestore/fieldpath_test.go b/vendor/cloud.google.com/go/firestore/fieldpath_test.go new file mode 100644 index 0000000..a9b3f0a --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/fieldpath_test.go @@ -0,0 +1,147 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "strings" + "testing" +) + +func TestFieldPathValidate(t *testing.T) { + for _, in := range [][]string{nil, []string{}, []string{"a", "", "b"}} { + if err := FieldPath(in).validate(); err == nil { + t.Errorf("%v: want error, got nil", in) + } + } +} + +func TestFieldPathLess(t *testing.T) { + for _, test := range []struct { + in1, in2 string + want bool + }{ + {"a b", "a b", false}, + {"a", "b", true}, + {"b", "a", false}, + {"a", "a b", true}, + {"a b", "a", false}, + {"a b c", "a b d", true}, + {"a b d", "a b c", false}, + } { + fp1 := FieldPath(strings.Fields(test.in1)) + fp2 := FieldPath(strings.Fields(test.in2)) + got := fp1.less(fp2) + if got != test.want { + t.Errorf("%q.less(%q): got %t, want %t", test.in1, test.in2, got, test.want) + } + } +} + +func TestCheckForPrefix(t *testing.T) { + for _, test := range []struct { + in []string // field paths as space-separated strings + wantErr bool + }{ + {in: []string{"a", "b", "c"}, wantErr: false}, + {in: []string{"a b", "b", "c d"}, wantErr: false}, + {in: []string{"a b", "a c", "a d"}, wantErr: false}, + {in: []string{"a b", "b", "b d"}, wantErr: true}, + {in: []string{"a b", "b", "b d"}, wantErr: true}, + {in: []string{"b c d", "c d", "b c"}, wantErr: true}, + } { + var fps []FieldPath + for _, s := range test.in { + fps = append(fps, strings.Fields(s)) + } + err := checkNoDupOrPrefix(fps) + if got, want := (err != nil), test.wantErr; got != want { + t.Errorf("%#v: got '%v', want %t", test.in, err, want) + } + } +} + +func TestCreateMapFromUpdates(t *testing.T) { + type M map[string]interface{} + + for _, test := range []struct { + fpvs []fpv + want M + }{ + { + fpvs: nil, + want: M{}, + }, + { + fpvs: []fpv{{[]string{"a"}, 1}, {[]string{"b"}, 2}}, + want: M{"a": 1, "b": 2}, + }, + { + fpvs: []fpv{{[]string{"a", "b"}, 1}, {[]string{"c"}, 2}}, + want: M{"a": map[string]interface{}{"b": 1}, "c": 2}, + }, + { + fpvs: []fpv{{[]string{"a", "b"}, 1}, {[]string{"c", "d"}, 2}}, + want: M{ + "a": map[string]interface{}{"b": 1}, + "c": map[string]interface{}{"d": 2}, + }, + }, + { + fpvs: []fpv{{[]string{"a", "b"}, 1}, {[]string{"a", "c"}, 2}}, + want: M{"a": map[string]interface{}{"b": 1, "c": 2}}, + }, + } { + gotm := createMapFromUpdates(test.fpvs) + got := M(gotm) + if !testEqual(got, test.want) { + t.Errorf("%v: got %#v, want %#v", test.fpvs, got, test.want) + } + } +} + +func TestToServiceFieldPath(t *testing.T) { + for _, test := range []struct { + in FieldPath + want string + }{ + {[]string{"a"}, "a"}, + {[]string{"a", "b"}, "a.b"}, + {[]string{"a.", "[b*", "c2"}, "`a.`.`[b*`.c2"}, + {[]string{"`a", `b\`}, "`\\`a`.`b\\\\`"}, + } { + got := test.in.toServiceFieldPath() + if got != test.want { + t.Errorf("%v: got %s, want %s", test.in, got, test.want) + } + } +} + +func TestToServiceFieldPathComponent(t *testing.T) { + for _, test := range []struct { + in, want string + }{ + {"", "``"}, + {"clam_chowder23", "clam_chowder23"}, + {"23skidoo", "`23skidoo`"}, + {"bak`tik", "`bak\\`tik`"}, + {"a\\b", "`a\\\\b`"}, + {"dots.are.confusing", "`dots.are.confusing`"}, + } { + got := toServiceFieldPathComponent(test.in) + if got != test.want { + t.Errorf("%q: got %q, want %q", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/from_value.go b/vendor/cloud.google.com/go/firestore/from_value.go new file mode 100644 index 0000000..cc940b6 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/from_value.go @@ -0,0 +1,423 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "reflect" + "strings" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes" +) + +func setFromProtoValue(x interface{}, vproto *pb.Value, c *Client) error { + v := reflect.ValueOf(x) + if v.Kind() != reflect.Ptr || v.IsNil() { + return errors.New("firestore: nil or not a pointer") + } + return setReflectFromProtoValue(v.Elem(), vproto, c) +} + +// setReflectFromProtoValue sets v from a Firestore Value. +// v must be a settable value. +func setReflectFromProtoValue(v reflect.Value, vproto *pb.Value, c *Client) error { + typeErr := func() error { + return fmt.Errorf("firestore: cannot set type %s to %s", v.Type(), typeString(vproto)) + } + + val := vproto.ValueType + // A Null value sets anything nullable to nil, and has no effect + // on anything else. + if _, ok := val.(*pb.Value_NullValue); ok { + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + } + return nil + } + + // Handle special types first. + switch v.Type() { + case typeOfByteSlice: + x, ok := val.(*pb.Value_BytesValue) + if !ok { + return typeErr() + } + v.SetBytes(x.BytesValue) + return nil + + case typeOfGoTime: + x, ok := val.(*pb.Value_TimestampValue) + if !ok { + return typeErr() + } + t, err := ptypes.Timestamp(x.TimestampValue) + if err != nil { + return err + } + v.Set(reflect.ValueOf(t)) + return nil + + case typeOfLatLng: + x, ok := val.(*pb.Value_GeoPointValue) + if !ok { + return typeErr() + } + v.Set(reflect.ValueOf(x.GeoPointValue)) + return nil + + case typeOfDocumentRef: + x, ok := val.(*pb.Value_ReferenceValue) + if !ok { + return typeErr() + } + dr, err := pathToDoc(x.ReferenceValue, c) + if err != nil { + return err + } + v.Set(reflect.ValueOf(dr)) + return nil + } + + switch v.Kind() { + case reflect.Bool: + x, ok := val.(*pb.Value_BooleanValue) + if !ok { + return typeErr() + } + v.SetBool(x.BooleanValue) + + case reflect.String: + x, ok := val.(*pb.Value_StringValue) + if !ok { + return typeErr() + } + v.SetString(x.StringValue) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var i int64 + switch x := val.(type) { + case *pb.Value_IntegerValue: + i = x.IntegerValue + case *pb.Value_DoubleValue: + f := x.DoubleValue + i = int64(f) + if float64(i) != f { + return fmt.Errorf("firestore: float %f does not fit into %s", f, v.Type()) + } + default: + return typeErr() + } + if v.OverflowInt(i) { + return overflowErr(v, i) + } + v.SetInt(i) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + var u uint64 + switch x := val.(type) { + case *pb.Value_IntegerValue: + u = uint64(x.IntegerValue) + case *pb.Value_DoubleValue: + f := x.DoubleValue + u = uint64(f) + if float64(u) != f { + return fmt.Errorf("firestore: float %f does not fit into %s", f, v.Type()) + } + default: + return typeErr() + } + if v.OverflowUint(u) { + return overflowErr(v, u) + } + v.SetUint(u) + + case reflect.Float32, reflect.Float64: + var f float64 + switch x := val.(type) { + case *pb.Value_DoubleValue: + f = x.DoubleValue + case *pb.Value_IntegerValue: + f = float64(x.IntegerValue) + if int64(f) != x.IntegerValue { + return overflowErr(v, x.IntegerValue) + } + default: + return typeErr() + } + if v.OverflowFloat(f) { + return overflowErr(v, f) + } + v.SetFloat(f) + + case reflect.Slice: + x, ok := val.(*pb.Value_ArrayValue) + if !ok { + return typeErr() + } + vals := x.ArrayValue.Values + vlen := v.Len() + xlen := len(vals) + // Make a slice of the right size, avoiding allocation if possible. + switch { + case vlen < xlen: + v.Set(reflect.MakeSlice(v.Type(), xlen, xlen)) + case vlen > xlen: + v.SetLen(xlen) + } + return populateRepeated(v, vals, xlen, c) + + case reflect.Array: + x, ok := val.(*pb.Value_ArrayValue) + if !ok { + return typeErr() + } + vals := x.ArrayValue.Values + xlen := len(vals) + vlen := v.Len() + minlen := vlen + // Set extra elements to their zero value. + if vlen > xlen { + z := reflect.Zero(v.Type().Elem()) + for i := xlen; i < vlen; i++ { + v.Index(i).Set(z) + } + minlen = xlen + } + return populateRepeated(v, vals, minlen, c) + + case reflect.Map: + x, ok := val.(*pb.Value_MapValue) + if !ok { + return typeErr() + } + return populateMap(v, x.MapValue.Fields, c) + + case reflect.Ptr: + // If the pointer is nil, set it to a zero value. + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return setReflectFromProtoValue(v.Elem(), vproto, c) + + case reflect.Struct: + x, ok := val.(*pb.Value_MapValue) + if !ok { + return typeErr() + } + return populateStruct(v, x.MapValue.Fields, c) + + case reflect.Interface: + if v.NumMethod() == 0 { // empty interface + // If v holds a pointer, set the pointer. + if !v.IsNil() && v.Elem().Kind() == reflect.Ptr { + return setReflectFromProtoValue(v.Elem(), vproto, c) + } + // Otherwise, create a fresh value. + x, err := createFromProtoValue(vproto, c) + if err != nil { + return err + } + v.Set(reflect.ValueOf(x)) + return nil + } + // Any other kind of interface is an error. + fallthrough + + default: + return fmt.Errorf("firestore: cannot set type %s", v.Type()) + } + return nil +} + +// populateRepeated sets the first n elements of vr, which must be a slice or +// array, to the corresponding elements of vals. +func populateRepeated(vr reflect.Value, vals []*pb.Value, n int, c *Client) error { + for i := 0; i < n; i++ { + if err := setReflectFromProtoValue(vr.Index(i), vals[i], c); err != nil { + return err + } + } + return nil +} + +// populateMap sets the elements of vm, which must be a map, from the +// corresponding elements of pm. +// +// Since a map value is not settable, this function always creates a new +// element for each corresponding map key. Existing values of vm are +// overwritten. This happens even if the map value is something like a pointer +// to a struct, where we could in theory populate the existing struct value +// instead of discarding it. This behavior matches encoding/json. +func populateMap(vm reflect.Value, pm map[string]*pb.Value, c *Client) error { + t := vm.Type() + if t.Key().Kind() != reflect.String { + return errors.New("firestore: map key type is not string") + } + if vm.IsNil() { + vm.Set(reflect.MakeMap(t)) + } + et := t.Elem() + for k, vproto := range pm { + el := reflect.New(et).Elem() + if err := setReflectFromProtoValue(el, vproto, c); err != nil { + return err + } + vm.SetMapIndex(reflect.ValueOf(k), el) + } + return nil +} + +// createMapFromValueMap creates a fresh map and populates it with pm. +func createMapFromValueMap(pm map[string]*pb.Value, c *Client) (map[string]interface{}, error) { + m := map[string]interface{}{} + for k, pv := range pm { + v, err := createFromProtoValue(pv, c) + if err != nil { + return nil, err + } + m[k] = v + } + return m, nil +} + +// populateStruct sets the fields of vs, which must be a struct, from +// the matching elements of pm. +func populateStruct(vs reflect.Value, pm map[string]*pb.Value, c *Client) error { + fields, err := fieldCache.Fields(vs.Type()) + if err != nil { + return err + } + for k, vproto := range pm { + f := fields.Match(k) + if f == nil { + continue + } + if err := setReflectFromProtoValue(vs.FieldByIndex(f.Index), vproto, c); err != nil { + return fmt.Errorf("%s.%s: %v", vs.Type(), f.Name, err) + } + } + return nil +} + +func createFromProtoValue(vproto *pb.Value, c *Client) (interface{}, error) { + switch v := vproto.ValueType.(type) { + case *pb.Value_NullValue: + return nil, nil + case *pb.Value_BooleanValue: + return v.BooleanValue, nil + case *pb.Value_IntegerValue: + return v.IntegerValue, nil + case *pb.Value_DoubleValue: + return v.DoubleValue, nil + case *pb.Value_TimestampValue: + return ptypes.Timestamp(v.TimestampValue) + case *pb.Value_StringValue: + return v.StringValue, nil + case *pb.Value_BytesValue: + return v.BytesValue, nil + case *pb.Value_ReferenceValue: + return pathToDoc(v.ReferenceValue, c) + case *pb.Value_GeoPointValue: + return v.GeoPointValue, nil + + case *pb.Value_ArrayValue: + vals := v.ArrayValue.Values + ret := make([]interface{}, len(vals)) + for i, v := range vals { + r, err := createFromProtoValue(v, c) + if err != nil { + return nil, err + } + ret[i] = r + } + return ret, nil + + case *pb.Value_MapValue: + fields := v.MapValue.Fields + ret := make(map[string]interface{}, len(fields)) + for k, v := range fields { + r, err := createFromProtoValue(v, c) + if err != nil { + return nil, err + } + ret[k] = r + } + return ret, nil + + default: + return nil, fmt.Errorf("firestore: unknown value type %T", v) + } +} + +// Convert a document path to a DocumentRef. +func pathToDoc(docPath string, c *Client) (*DocumentRef, error) { + projID, dbID, docIDs, err := parseDocumentPath(docPath) + if err != nil { + return nil, err + } + parentResourceName := fmt.Sprintf("projects/%s/databases/%s", projID, dbID) + _, doc := c.idsToRef(docIDs, parentResourceName) + return doc, nil +} + +// A document path should be of the form "projects/P/databases/D/documents/coll1/doc1/coll2/doc2/...". +func parseDocumentPath(path string) (projectID, databaseID string, docPath []string, err error) { + parts := strings.Split(path, "/") + if len(parts) < 6 || parts[0] != "projects" || parts[2] != "databases" || parts[4] != "documents" { + return "", "", nil, fmt.Errorf("firestore: malformed document path %q", path) + } + docp := parts[5:] + if len(docp)%2 != 0 { + return "", "", nil, fmt.Errorf("firestore: path %q refers to collection, not document", path) + } + return parts[1], parts[3], docp, nil +} + +func typeString(vproto *pb.Value) string { + switch vproto.ValueType.(type) { + case *pb.Value_NullValue: + return "null" + case *pb.Value_BooleanValue: + return "bool" + case *pb.Value_IntegerValue: + return "int" + case *pb.Value_DoubleValue: + return "float" + case *pb.Value_TimestampValue: + return "timestamp" + case *pb.Value_StringValue: + return "string" + case *pb.Value_BytesValue: + return "bytes" + case *pb.Value_ReferenceValue: + return "reference" + case *pb.Value_GeoPointValue: + return "GeoPoint" + case *pb.Value_MapValue: + return "map" + case *pb.Value_ArrayValue: + return "array" + default: + return "" + } +} + +func overflowErr(v reflect.Value, x interface{}) error { + return fmt.Errorf("firestore: value %v overflows type %s", x, v.Type()) +} diff --git a/vendor/cloud.google.com/go/firestore/from_value_test.go b/vendor/cloud.google.com/go/firestore/from_value_test.go new file mode 100644 index 0000000..c5a1b8b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/from_value_test.go @@ -0,0 +1,546 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "encoding/json" + "fmt" + "io" + "math" + "reflect" + "strings" + "testing" + "time" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "google.golang.org/genproto/googleapis/type/latlng" +) + +var ( + tm = time.Date(2016, 12, 25, 0, 0, 0, 123456789, time.UTC) + ll = &latlng.LatLng{Latitude: 20, Longitude: 30} +) + +func TestCreateFromProtoValue(t *testing.T) { + for _, test := range []struct { + in *pb.Value + want interface{} + }{ + {in: nullValue, want: nil}, + {in: boolval(true), want: true}, + {in: intval(3), want: int64(3)}, + {in: floatval(1.5), want: 1.5}, + {in: strval("str"), want: "str"}, + {in: tsval(tm), want: tm}, + { + in: bytesval([]byte{1, 2}), + want: []byte{1, 2}, + }, + { + in: &pb.Value{&pb.Value_GeoPointValue{ll}}, + want: ll, + }, + { + in: arrayval(intval(1), intval(2)), + want: []interface{}{int64(1), int64(2)}, + }, + { + in: arrayval(), + want: []interface{}{}, + }, + { + in: mapval(map[string]*pb.Value{"a": intval(1), "b": intval(2)}), + want: map[string]interface{}{"a": int64(1), "b": int64(2)}, + }, + { + in: mapval(map[string]*pb.Value{}), + want: map[string]interface{}{}, + }, + { + in: refval("projects/P/databases/D/documents/c/d"), + want: &DocumentRef{ + ID: "d", + Parent: &CollectionRef{ + ID: "c", + parentPath: "projects/P/databases/D", + Path: "projects/P/databases/D/documents/c", + Query: Query{collectionID: "c", parentPath: "projects/P/databases/D"}, + }, + Path: "projects/P/databases/D/documents/c/d", + }, + }, + } { + got, err := createFromProtoValue(test.in, nil) + if err != nil { + t.Errorf("%v: %v", test.in, err) + continue + } + if !testEqual(got, test.want) { + t.Errorf("%+v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want) + } + } +} + +func TestSetFromProtoValue(t *testing.T) { + testSetFromProtoValue(t, "json", jsonTester{}) + testSetFromProtoValue(t, "firestore", protoTester{}) +} + +func testSetFromProtoValue(t *testing.T, prefix string, r tester) { + pi := newfloat(7) + s := []float64{7, 8} + ar1 := [1]float64{7} + ar2 := [2]float64{7, 8} + ar3 := [3]float64{7, 8, 9} + mf := map[string]float64{"a": 7} + + type T struct { + I **float64 + J float64 + } + + one := newfloat(1) + six := newfloat(6) + st := []*T{&T{I: &six}, nil, &T{I: &six, J: 7}} + vs := interface{}(T{J: 1}) + vm := interface{}(map[string]float64{"i": 1}) + var ( + i int + i8 int8 + i16 int16 + i32 int32 + i64 int64 + u8 uint8 + u16 uint16 + u32 uint32 + b bool + ll *latlng.LatLng + mi map[string]interface{} + ms map[string]T + ) + + for i, test := range []struct { + in interface{} + val interface{} + want interface{} + }{ + {&pi, r.Null(), (*float64)(nil)}, + {pi, r.Float(1), 1.0}, + {&s, r.Null(), ([]float64)(nil)}, + {&s, r.Array(r.Float(1), r.Float(2)), []float64{1, 2}}, + {&ar1, r.Array(r.Float(1), r.Float(2)), [1]float64{1}}, + {&ar2, r.Array(r.Float(1), r.Float(2)), [2]float64{1, 2}}, + {&ar3, r.Array(r.Float(1), r.Float(2)), [3]float64{1, 2, 0}}, + {&mf, r.Null(), (map[string]float64)(nil)}, + {&mf, r.Map("a", r.Float(1), "b", r.Float(2)), map[string]float64{"a": 1, "b": 2}}, + {&st, r.Array( + r.Null(), // overwrites st[0] with nil + r.Map("i", r.Float(1)), // sets st[1] to a new struct + r.Map("i", r.Float(2)), // modifies st[2] + ), + []*T{nil, &T{I: &one}, &T{I: &six, J: 7}}}, + {&mi, r.Map("a", r.Float(1), "b", r.Float(2)), map[string]interface{}{"a": 1.0, "b": 2.0}}, + {&ms, r.Map("a", r.Map("j", r.Float(1))), map[string]T{"a": T{J: 1}}}, + {&vs, r.Map("i", r.Float(2)), map[string]interface{}{"i": 2.0}}, + {&vm, r.Map("i", r.Float(2)), map[string]interface{}{"i": 2.0}}, + {&ll, r.Null(), (*latlng.LatLng)(nil)}, + {&i, r.Int(1), int(1)}, + {&i8, r.Int(1), int8(1)}, + {&i16, r.Int(1), int16(1)}, + {&i32, r.Int(1), int32(1)}, + {&i64, r.Int(1), int64(1)}, + {&u8, r.Int(1), uint8(1)}, + {&u16, r.Int(1), uint16(1)}, + {&u32, r.Int(1), uint32(1)}, + {&b, r.Bool(true), true}, + {&i, r.Float(1), int(1)}, // can put a float with no fractional part into an int + {pi, r.Int(1), float64(1)}, // can put an int into a float + } { + if err := r.Set(test.in, test.val); err != nil { + t.Errorf("%s: #%d: got error %v", prefix, i, err) + continue + } + got := reflect.ValueOf(test.in).Elem().Interface() + if !testEqual(got, test.want) { + t.Errorf("%s: #%d, %v:\ngot\n%+v (%T)\nwant\n%+v (%T)", + prefix, i, test.val, got, got, test.want, test.want) + } + } +} + +func TestSetFromProtoValueNoJSON(t *testing.T) { + // Test code paths that we cannot compare to JSON. + var ( + bs []byte + tmi time.Time + lli *latlng.LatLng + ) + bytes := []byte{1, 2, 3} + + for i, test := range []struct { + in interface{} + val *pb.Value + want interface{} + }{ + {&bs, bytesval(bytes), bytes}, + {&tmi, tsval(tm), tm}, + {&lli, geoval(ll), ll}, + } { + if err := setFromProtoValue(test.in, test.val, &Client{}); err != nil { + t.Errorf("#%d: got error %v", i, err) + continue + } + got := reflect.ValueOf(test.in).Elem().Interface() + if !testEqual(got, test.want) { + t.Errorf("#%d, %v:\ngot\n%+v (%T)\nwant\n%+v (%T)", + i, test.val, got, got, test.want, test.want) + } + } +} + +func TestSetFromProtoValueErrors(t *testing.T) { + c := &Client{} + ival := intval(3) + for i, test := range []struct { + in interface{} + val *pb.Value + }{ + {3, ival}, // not a pointer + {new(int8), intval(128)}, // int overflow + {new(uint8), intval(256)}, // uint overflow + {new(float32), floatval(2 * math.MaxFloat32)}, // float overflow + {new(uint), ival}, // cannot set type + {new(uint64), ival}, // cannot set type + {new(io.Reader), ival}, // cannot set type + {new(map[int]int), + mapval(map[string]*pb.Value{"x": ival})}, // map key type is not string + // the rest are all type mismatches + {new(bool), ival}, + {new(*latlng.LatLng), ival}, + {new(time.Time), ival}, + {new(string), ival}, + {new([]byte), ival}, + {new([]int), ival}, + {new([1]int), ival}, + {new(map[string]int), ival}, + {new(*bool), ival}, + {new(struct{}), ival}, + {new(int), floatval(2.5)}, // float must be integral + {new(uint16), intval(-1)}, // uint cannot be negative + {new(int16), floatval(math.MaxFloat32)}, // doesn't fit + {new(uint16), floatval(math.MaxFloat32)}, // doesn't fit + {new(float32), + &pb.Value{&pb.Value_IntegerValue{math.MaxInt64}}}, // overflow + } { + err := setFromProtoValue(test.in, test.val, c) + if err == nil { + t.Errorf("#%d: %v, %v: got nil, want error", i, test.in, test.val) + } + } +} + +func TestSetFromProtoValuePointers(t *testing.T) { + // Verify that pointers are set, instead of being replaced. + // Confirm that the behavior matches encoding/json. + testSetPointer(t, "json", jsonTester{}) + testSetPointer(t, "firestore", protoTester{&Client{}}) +} + +func testSetPointer(t *testing.T, prefix string, r tester) { + // If an interface{} holds a pointer, the pointer is set. + + set := func(x, val interface{}) { + if err := r.Set(x, val); err != nil { + t.Fatalf("%s: set(%v, %v): %v", prefix, x, val, err) + } + } + p := new(float64) + var st struct { + I interface{} + } + + // A pointer in a slice of interface{} is set. + s := []interface{}{p} + set(&s, r.Array(r.Float(1))) + if s[0] != p { + t.Errorf("%s: pointers not identical", prefix) + } + if *p != 1 { + t.Errorf("%s: got %f, want 1", prefix, *p) + } + // Setting a null will set the pointer to nil. + set(&s, r.Array(r.Null())) + if got := s[0]; got != nil { + t.Errorf("%s: got %v, want null", prefix, got) + } + + // It doesn't matter how deep the pointers nest. + p = new(float64) + p2 := &p + p3 := &p2 + s = []interface{}{p3} + set(&s, r.Array(r.Float(1))) + if s[0] != p3 { + t.Errorf("%s: pointers not identical", prefix) + } + if *p != 1 { + t.Errorf("%s: got %f, want 1", prefix, *p) + } + + // A pointer in an interface{} field is set. + p = new(float64) + st.I = p + set(&st, r.Map("i", r.Float(1))) + if st.I != p { + t.Errorf("%s: pointers not identical", prefix) + } + if *p != 1 { + t.Errorf("%s: got %f, want 1", prefix, *p) + } + // Setting a null will set the pointer to nil. + set(&st, r.Map("i", r.Null())) + if got := st.I; got != nil { + t.Errorf("%s: got %v, want null", prefix, got) + } + + // A pointer to a slice (instead of to float64) is set. + psi := &[]float64{7, 8, 9} + st.I = psi + set(&st, r.Map("i", r.Array(r.Float(1)))) + if st.I != psi { + t.Errorf("%s: pointers not identical", prefix) + } + // The slice itself should be truncated and filled, not replaced. + if got, want := cap(*psi), 3; got != want { + t.Errorf("cap: got %d, want %d", got, want) + } + if want := &[]float64{1}; !testEqual(st.I, want) { + t.Errorf("got %+v, want %+v", st.I, want) + } + + // A pointer to a map is set. + pmf := &map[string]float64{"a": 7, "b": 8} + st.I = pmf + set(&st, r.Map("i", r.Map("a", r.Float(1)))) + if st.I != pmf { + t.Errorf("%s: pointers not identical", prefix) + } + if want := map[string]float64{"a": 1, "b": 8}; !testEqual(*pmf, want) { + t.Errorf("%s: got %+v, want %+v", prefix, *pmf, want) + } + + // Maps are different: since the map values aren't addressable, they + // are always discarded, even if the map element type is not interface{}. + + // A map's values are discarded if the value type is a pointer type. + p = new(float64) + m := map[string]*float64{"i": p} + set(&m, r.Map("i", r.Float(1))) + if m["i"] == p { + t.Errorf("%s: pointers are identical", prefix) + } + if got, want := *m["i"], 1.0; got != want { + t.Errorf("%s: got %v, want %v", prefix, got, want) + } + // A map's values are discarded if the value type is interface{}. + p = new(float64) + m2 := map[string]interface{}{"i": p} + set(&m2, r.Map("i", r.Float(1))) + if m2["i"] == p { + t.Errorf("%s: pointers are identical", prefix) + } + if got, want := m2["i"].(float64), 1.0; got != want { + t.Errorf("%s: got %f, want %f", prefix, got, want) + } +} + +// An interface for setting and building values, to facilitate comparing firestore deserialization +// with encoding/json. +type tester interface { + Set(x, val interface{}) error + Null() interface{} + Int(int) interface{} + Float(float64) interface{} + Bool(bool) interface{} + Array(...interface{}) interface{} + Map(keysvals ...interface{}) interface{} +} + +type protoTester struct { + c *Client +} + +func (p protoTester) Set(x, val interface{}) error { return setFromProtoValue(x, val.(*pb.Value), p.c) } +func (protoTester) Null() interface{} { return nullValue } +func (protoTester) Int(i int) interface{} { return intval(i) } +func (protoTester) Float(f float64) interface{} { return floatval(f) } +func (protoTester) Bool(b bool) interface{} { return boolval(b) } + +func (protoTester) Array(els ...interface{}) interface{} { + var s []*pb.Value + for _, el := range els { + s = append(s, el.(*pb.Value)) + } + return arrayval(s...) +} + +func (protoTester) Map(keysvals ...interface{}) interface{} { + m := map[string]*pb.Value{} + for i := 0; i < len(keysvals); i += 2 { + m[keysvals[i].(string)] = keysvals[i+1].(*pb.Value) + } + return mapval(m) +} + +type jsonTester struct{} + +func (jsonTester) Set(x, val interface{}) error { return json.Unmarshal([]byte(val.(string)), x) } +func (jsonTester) Null() interface{} { return "null" } +func (jsonTester) Int(i int) interface{} { return fmt.Sprint(i) } +func (jsonTester) Float(f float64) interface{} { return fmt.Sprint(f) } + +func (jsonTester) Bool(b bool) interface{} { + if b { + return "true" + } + return "false" +} + +func (jsonTester) Array(els ...interface{}) interface{} { + var s []string + for _, el := range els { + s = append(s, el.(string)) + } + return "[" + strings.Join(s, ", ") + "]" +} + +func (jsonTester) Map(keysvals ...interface{}) interface{} { + var s []string + for i := 0; i < len(keysvals); i += 2 { + s = append(s, fmt.Sprintf("%q: %v", keysvals[i], keysvals[i+1])) + } + return "{" + strings.Join(s, ", ") + "}" +} + +func newfloat(f float64) *float64 { + p := new(float64) + *p = f + return p +} + +func TestParseDocumentPath(t *testing.T) { + for _, test := range []struct { + in string + pid, dbid string + dpath []string + }{ + {"projects/foo-bar/databases/db2/documents/c1/d1", + "foo-bar", "db2", []string{"c1", "d1"}}, + {"projects/P/databases/D/documents/c1/d1/c2/d2", + "P", "D", []string{"c1", "d1", "c2", "d2"}}, + } { + gotPid, gotDbid, gotDpath, err := parseDocumentPath(test.in) + if err != nil { + t.Fatal(err) + } + if got, want := gotPid, test.pid; got != want { + t.Errorf("project ID: got %q, want %q", got, want) + } + if got, want := gotDbid, test.dbid; got != want { + t.Errorf("db ID: got %q, want %q", got, want) + } + if got, want := gotDpath, test.dpath; !testEqual(got, want) { + t.Errorf("doc path: got %q, want %q", got, want) + } + } +} + +func TestParseDocumentPathErrors(t *testing.T) { + for _, badPath := range []string{ + "projects/P/databases/D/documents/c", // collection path + "/projects/P/databases/D/documents/c/d", // initial slash + "projects/P/databases/D/c/d", // missing "documents" + "project/P/database/D/document/c/d", + } { + // Every prefix of a bad path is also bad. + for i := 0; i <= len(badPath); i++ { + in := badPath[:i] + _, _, _, err := parseDocumentPath(in) + if err == nil { + t.Errorf("%q: got nil, want error", in) + } + } + } +} + +func TestPathToDoc(t *testing.T) { + c := &Client{} + path := "projects/P/databases/D/documents/c1/d1/c2/d2" + got, err := pathToDoc(path, c) + if err != nil { + t.Fatal(err) + } + want := &DocumentRef{ + ID: "d2", + Path: "projects/P/databases/D/documents/c1/d1/c2/d2", + Parent: &CollectionRef{ + ID: "c2", + parentPath: "projects/P/databases/D/documents/c1/d1", + Path: "projects/P/databases/D/documents/c1/d1/c2", + c: c, + Query: Query{c: c, collectionID: "c2", parentPath: "projects/P/databases/D/documents/c1/d1"}, + Parent: &DocumentRef{ + ID: "d1", + Path: "projects/P/databases/D/documents/c1/d1", + Parent: &CollectionRef{ + ID: "c1", + c: c, + parentPath: "projects/P/databases/D", + Path: "projects/P/databases/D/documents/c1", + Parent: nil, + Query: Query{c: c, collectionID: "c1", parentPath: "projects/P/databases/D"}, + }, + }, + }, + } + if !testEqual(got, want) { + t.Errorf("\ngot %+v\nwant %+v", got, want) + } +} + +func TestTypeString(t *testing.T) { + for _, test := range []struct { + in *pb.Value + want string + }{ + {nullValue, "null"}, + {intval(1), "int"}, + {floatval(1), "float"}, + {boolval(true), "bool"}, + {strval(""), "string"}, + {tsval(tm), "timestamp"}, + {geoval(ll), "GeoPoint"}, + {bytesval(nil), "bytes"}, + {refval(""), "reference"}, + {arrayval(nil), "array"}, + {mapval(nil), "map"}, + } { + got := typeString(test.in) + if got != test.want { + t.Errorf("%+v: got %q, want %q", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/genproto/README.md b/vendor/cloud.google.com/go/firestore/genproto/README.md new file mode 100644 index 0000000..146e7f4 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/genproto/README.md @@ -0,0 +1,3 @@ +The contents of this directory are copied from +github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/genproto. + diff --git a/vendor/cloud.google.com/go/firestore/genproto/test.pb.go b/vendor/cloud.google.com/go/firestore/genproto/test.pb.go new file mode 100644 index 0000000..39f35e4 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/genproto/test.pb.go @@ -0,0 +1,662 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +/* +Package tests is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + Test + GetTest + CreateTest + SetTest + UpdateTest + UpdatePathsTest + DeleteTest + SetOption + FieldPath +*/ +package tests + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_firestore_v1beta14 "google.golang.org/genproto/googleapis/firestore/v1beta1" +import google_firestore_v1beta1 "google.golang.org/genproto/googleapis/firestore/v1beta1" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Test describes a single client method call and its expected result. +type Test struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // Types that are valid to be assigned to Test: + // *Test_Get + // *Test_Create + // *Test_Set + // *Test_Update + // *Test_UpdatePaths + // *Test_Delete + Test isTest_Test `protobuf_oneof:"test"` +} + +func (m *Test) Reset() { *m = Test{} } +func (m *Test) String() string { return proto.CompactTextString(m) } +func (*Test) ProtoMessage() {} +func (*Test) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isTest_Test interface { + isTest_Test() +} + +type Test_Get struct { + Get *GetTest `protobuf:"bytes,2,opt,name=get,oneof"` +} +type Test_Create struct { + Create *CreateTest `protobuf:"bytes,3,opt,name=create,oneof"` +} +type Test_Set struct { + Set *SetTest `protobuf:"bytes,4,opt,name=set,oneof"` +} +type Test_Update struct { + Update *UpdateTest `protobuf:"bytes,5,opt,name=update,oneof"` +} +type Test_UpdatePaths struct { + UpdatePaths *UpdatePathsTest `protobuf:"bytes,6,opt,name=update_paths,json=updatePaths,oneof"` +} +type Test_Delete struct { + Delete *DeleteTest `protobuf:"bytes,7,opt,name=delete,oneof"` +} + +func (*Test_Get) isTest_Test() {} +func (*Test_Create) isTest_Test() {} +func (*Test_Set) isTest_Test() {} +func (*Test_Update) isTest_Test() {} +func (*Test_UpdatePaths) isTest_Test() {} +func (*Test_Delete) isTest_Test() {} + +func (m *Test) GetTest() isTest_Test { + if m != nil { + return m.Test + } + return nil +} + +func (m *Test) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Test) GetGet() *GetTest { + if x, ok := m.GetTest().(*Test_Get); ok { + return x.Get + } + return nil +} + +func (m *Test) GetCreate() *CreateTest { + if x, ok := m.GetTest().(*Test_Create); ok { + return x.Create + } + return nil +} + +func (m *Test) GetSet() *SetTest { + if x, ok := m.GetTest().(*Test_Set); ok { + return x.Set + } + return nil +} + +func (m *Test) GetUpdate() *UpdateTest { + if x, ok := m.GetTest().(*Test_Update); ok { + return x.Update + } + return nil +} + +func (m *Test) GetUpdatePaths() *UpdatePathsTest { + if x, ok := m.GetTest().(*Test_UpdatePaths); ok { + return x.UpdatePaths + } + return nil +} + +func (m *Test) GetDelete() *DeleteTest { + if x, ok := m.GetTest().(*Test_Delete); ok { + return x.Delete + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Test) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Test_OneofMarshaler, _Test_OneofUnmarshaler, _Test_OneofSizer, []interface{}{ + (*Test_Get)(nil), + (*Test_Create)(nil), + (*Test_Set)(nil), + (*Test_Update)(nil), + (*Test_UpdatePaths)(nil), + (*Test_Delete)(nil), + } +} + +func _Test_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Test) + // test + switch x := m.Test.(type) { + case *Test_Get: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Get); err != nil { + return err + } + case *Test_Create: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Create); err != nil { + return err + } + case *Test_Set: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Set); err != nil { + return err + } + case *Test_Update: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Update); err != nil { + return err + } + case *Test_UpdatePaths: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.UpdatePaths); err != nil { + return err + } + case *Test_Delete: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Delete); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Test.Test has unexpected type %T", x) + } + return nil +} + +func _Test_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Test) + switch tag { + case 2: // test.get + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GetTest) + err := b.DecodeMessage(msg) + m.Test = &Test_Get{msg} + return true, err + case 3: // test.create + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CreateTest) + err := b.DecodeMessage(msg) + m.Test = &Test_Create{msg} + return true, err + case 4: // test.set + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SetTest) + err := b.DecodeMessage(msg) + m.Test = &Test_Set{msg} + return true, err + case 5: // test.update + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UpdateTest) + err := b.DecodeMessage(msg) + m.Test = &Test_Update{msg} + return true, err + case 6: // test.update_paths + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UpdatePathsTest) + err := b.DecodeMessage(msg) + m.Test = &Test_UpdatePaths{msg} + return true, err + case 7: // test.delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeleteTest) + err := b.DecodeMessage(msg) + m.Test = &Test_Delete{msg} + return true, err + default: + return false, nil + } +} + +func _Test_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Test) + // test + switch x := m.Test.(type) { + case *Test_Get: + s := proto.Size(x.Get) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Test_Create: + s := proto.Size(x.Create) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Test_Set: + s := proto.Size(x.Set) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Test_Update: + s := proto.Size(x.Update) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Test_UpdatePaths: + s := proto.Size(x.UpdatePaths) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Test_Delete: + s := proto.Size(x.Delete) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Call to the DocumentRef.Get method. +type GetTest struct { + // The path of the doc, e.g. "projects/projectID/databases/(default)/documents/C/d" + DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"` + // The request that the call should send to the Firestore service. + Request *google_firestore_v1beta14.GetDocumentRequest `protobuf:"bytes,2,opt,name=request" json:"request,omitempty"` +} + +func (m *GetTest) Reset() { *m = GetTest{} } +func (m *GetTest) String() string { return proto.CompactTextString(m) } +func (*GetTest) ProtoMessage() {} +func (*GetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GetTest) GetDocRefPath() string { + if m != nil { + return m.DocRefPath + } + return "" +} + +func (m *GetTest) GetRequest() *google_firestore_v1beta14.GetDocumentRequest { + if m != nil { + return m.Request + } + return nil +} + +// Call to DocumentRef.Create. +type CreateTest struct { + // The path of the doc, e.g. "projects/projectID/databases/(default)/documents/C/d" + DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"` + // The data passed to Create, as JSON. The strings "Delete" and "ServerTimestamp" + // denote the two special sentinel values. Values that could be interpreted as integers + // (i.e. digit strings) should be treated as integers. + JsonData string `protobuf:"bytes,2,opt,name=json_data,json=jsonData" json:"json_data,omitempty"` + // The request that the call should generate. + Request *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,3,opt,name=request" json:"request,omitempty"` + // If true, the call should result in an error without generating a request. + // If this is true, request should not be set. + IsError bool `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"` +} + +func (m *CreateTest) Reset() { *m = CreateTest{} } +func (m *CreateTest) String() string { return proto.CompactTextString(m) } +func (*CreateTest) ProtoMessage() {} +func (*CreateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *CreateTest) GetDocRefPath() string { + if m != nil { + return m.DocRefPath + } + return "" +} + +func (m *CreateTest) GetJsonData() string { + if m != nil { + return m.JsonData + } + return "" +} + +func (m *CreateTest) GetRequest() *google_firestore_v1beta14.CommitRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *CreateTest) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +// A call to DocumentRef.Set. +type SetTest struct { + DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"` + Option *SetOption `protobuf:"bytes,2,opt,name=option" json:"option,omitempty"` + JsonData string `protobuf:"bytes,3,opt,name=json_data,json=jsonData" json:"json_data,omitempty"` + Request *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,4,opt,name=request" json:"request,omitempty"` + IsError bool `protobuf:"varint,5,opt,name=is_error,json=isError" json:"is_error,omitempty"` +} + +func (m *SetTest) Reset() { *m = SetTest{} } +func (m *SetTest) String() string { return proto.CompactTextString(m) } +func (*SetTest) ProtoMessage() {} +func (*SetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *SetTest) GetDocRefPath() string { + if m != nil { + return m.DocRefPath + } + return "" +} + +func (m *SetTest) GetOption() *SetOption { + if m != nil { + return m.Option + } + return nil +} + +func (m *SetTest) GetJsonData() string { + if m != nil { + return m.JsonData + } + return "" +} + +func (m *SetTest) GetRequest() *google_firestore_v1beta14.CommitRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *SetTest) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +// A call to the form of DocumentRef.Update that represents the data as a map +// or dictionary. +type UpdateTest struct { + DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"` + Precondition *google_firestore_v1beta1.Precondition `protobuf:"bytes,2,opt,name=precondition" json:"precondition,omitempty"` + JsonData string `protobuf:"bytes,3,opt,name=json_data,json=jsonData" json:"json_data,omitempty"` + Request *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,4,opt,name=request" json:"request,omitempty"` + IsError bool `protobuf:"varint,5,opt,name=is_error,json=isError" json:"is_error,omitempty"` +} + +func (m *UpdateTest) Reset() { *m = UpdateTest{} } +func (m *UpdateTest) String() string { return proto.CompactTextString(m) } +func (*UpdateTest) ProtoMessage() {} +func (*UpdateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *UpdateTest) GetDocRefPath() string { + if m != nil { + return m.DocRefPath + } + return "" +} + +func (m *UpdateTest) GetPrecondition() *google_firestore_v1beta1.Precondition { + if m != nil { + return m.Precondition + } + return nil +} + +func (m *UpdateTest) GetJsonData() string { + if m != nil { + return m.JsonData + } + return "" +} + +func (m *UpdateTest) GetRequest() *google_firestore_v1beta14.CommitRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *UpdateTest) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +// A call to the form of DocumentRef.Update that represents the data as a list +// of field paths and their values. +type UpdatePathsTest struct { + DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"` + Precondition *google_firestore_v1beta1.Precondition `protobuf:"bytes,2,opt,name=precondition" json:"precondition,omitempty"` + // parallel sequences: field_paths[i] corresponds to json_values[i] + FieldPaths []*FieldPath `protobuf:"bytes,3,rep,name=field_paths,json=fieldPaths" json:"field_paths,omitempty"` + JsonValues []string `protobuf:"bytes,4,rep,name=json_values,json=jsonValues" json:"json_values,omitempty"` + Request *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,5,opt,name=request" json:"request,omitempty"` + IsError bool `protobuf:"varint,6,opt,name=is_error,json=isError" json:"is_error,omitempty"` +} + +func (m *UpdatePathsTest) Reset() { *m = UpdatePathsTest{} } +func (m *UpdatePathsTest) String() string { return proto.CompactTextString(m) } +func (*UpdatePathsTest) ProtoMessage() {} +func (*UpdatePathsTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *UpdatePathsTest) GetDocRefPath() string { + if m != nil { + return m.DocRefPath + } + return "" +} + +func (m *UpdatePathsTest) GetPrecondition() *google_firestore_v1beta1.Precondition { + if m != nil { + return m.Precondition + } + return nil +} + +func (m *UpdatePathsTest) GetFieldPaths() []*FieldPath { + if m != nil { + return m.FieldPaths + } + return nil +} + +func (m *UpdatePathsTest) GetJsonValues() []string { + if m != nil { + return m.JsonValues + } + return nil +} + +func (m *UpdatePathsTest) GetRequest() *google_firestore_v1beta14.CommitRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *UpdatePathsTest) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +// A call to DocmentRef.Delete +type DeleteTest struct { + DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"` + Precondition *google_firestore_v1beta1.Precondition `protobuf:"bytes,2,opt,name=precondition" json:"precondition,omitempty"` + Request *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,3,opt,name=request" json:"request,omitempty"` + IsError bool `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"` +} + +func (m *DeleteTest) Reset() { *m = DeleteTest{} } +func (m *DeleteTest) String() string { return proto.CompactTextString(m) } +func (*DeleteTest) ProtoMessage() {} +func (*DeleteTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *DeleteTest) GetDocRefPath() string { + if m != nil { + return m.DocRefPath + } + return "" +} + +func (m *DeleteTest) GetPrecondition() *google_firestore_v1beta1.Precondition { + if m != nil { + return m.Precondition + } + return nil +} + +func (m *DeleteTest) GetRequest() *google_firestore_v1beta14.CommitRequest { + if m != nil { + return m.Request + } + return nil +} + +func (m *DeleteTest) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +// An option to the DocumentRef.Set call. +type SetOption struct { + All bool `protobuf:"varint,1,opt,name=all" json:"all,omitempty"` + Fields []*FieldPath `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"` +} + +func (m *SetOption) Reset() { *m = SetOption{} } +func (m *SetOption) String() string { return proto.CompactTextString(m) } +func (*SetOption) ProtoMessage() {} +func (*SetOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *SetOption) GetAll() bool { + if m != nil { + return m.All + } + return false +} + +func (m *SetOption) GetFields() []*FieldPath { + if m != nil { + return m.Fields + } + return nil +} + +// A field path. +type FieldPath struct { + Field []string `protobuf:"bytes,1,rep,name=field" json:"field,omitempty"` +} + +func (m *FieldPath) Reset() { *m = FieldPath{} } +func (m *FieldPath) String() string { return proto.CompactTextString(m) } +func (*FieldPath) ProtoMessage() {} +func (*FieldPath) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *FieldPath) GetField() []string { + if m != nil { + return m.Field + } + return nil +} + +func init() { + proto.RegisterType((*Test)(nil), "tests.Test") + proto.RegisterType((*GetTest)(nil), "tests.GetTest") + proto.RegisterType((*CreateTest)(nil), "tests.CreateTest") + proto.RegisterType((*SetTest)(nil), "tests.SetTest") + proto.RegisterType((*UpdateTest)(nil), "tests.UpdateTest") + proto.RegisterType((*UpdatePathsTest)(nil), "tests.UpdatePathsTest") + proto.RegisterType((*DeleteTest)(nil), "tests.DeleteTest") + proto.RegisterType((*SetOption)(nil), "tests.SetOption") + proto.RegisterType((*FieldPath)(nil), "tests.FieldPath") +} + +func init() { proto.RegisterFile("test.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 559 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x55, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xc5, 0x71, 0xe2, 0x24, 0x93, 0x08, 0xca, 0x0a, 0x21, 0x53, 0x0e, 0x18, 0x4b, 0x40, 0x24, + 0x50, 0xaa, 0xc0, 0x91, 0x13, 0x34, 0xb4, 0x88, 0x0b, 0xd5, 0x16, 0xb8, 0x46, 0xae, 0x3d, 0x09, + 0x46, 0x8e, 0xd7, 0xec, 0xae, 0xfb, 0x9f, 0x38, 0x72, 0xe7, 0x47, 0x70, 0xe4, 0x8f, 0x70, 0x47, + 0xfb, 0xe1, 0xda, 0x06, 0x59, 0xca, 0xa1, 0xb4, 0xb7, 0xf5, 0x9b, 0x37, 0x1f, 0xef, 0xcd, 0x6e, + 0x02, 0x20, 0x51, 0xc8, 0x79, 0xc1, 0x99, 0x64, 0x64, 0xa0, 0xce, 0x62, 0x7f, 0xb6, 0x61, 0x6c, + 0x93, 0xe1, 0xc1, 0x3a, 0xe5, 0x28, 0x24, 0xe3, 0x78, 0x70, 0xbe, 0x38, 0x43, 0x19, 0x2d, 0x6a, + 0xc4, 0x24, 0xec, 0x3f, 0xea, 0x64, 0xc6, 0x6c, 0xbb, 0x65, 0xb9, 0xa1, 0x85, 0x3f, 0x7a, 0xd0, + 0xff, 0x80, 0x42, 0x92, 0x00, 0x26, 0x09, 0x8a, 0x98, 0xa7, 0x85, 0x4c, 0x59, 0xee, 0x3b, 0x81, + 0x33, 0x1b, 0xd3, 0x26, 0x44, 0x42, 0x70, 0x37, 0x28, 0xfd, 0x5e, 0xe0, 0xcc, 0x26, 0xcf, 0x6f, + 0xce, 0xf5, 0x40, 0xf3, 0x63, 0x94, 0x2a, 0xfd, 0xed, 0x0d, 0xaa, 0x82, 0xe4, 0x29, 0x78, 0x31, + 0xc7, 0x48, 0xa2, 0xef, 0x6a, 0xda, 0x6d, 0x4b, 0x3b, 0xd4, 0xa0, 0x65, 0x5a, 0x8a, 0x2a, 0x28, + 0x50, 0xfa, 0xfd, 0x56, 0xc1, 0xd3, 0xba, 0xa0, 0x30, 0x05, 0xcb, 0x22, 0x51, 0x05, 0x07, 0xad, + 0x82, 0x1f, 0x35, 0x58, 0x15, 0x34, 0x14, 0xf2, 0x12, 0xa6, 0xe6, 0xb4, 0x2a, 0x22, 0xf9, 0x59, + 0xf8, 0x9e, 0x4e, 0xb9, 0xdb, 0x4a, 0x39, 0x51, 0x11, 0x9b, 0x37, 0x29, 0x6b, 0x48, 0x75, 0x4a, + 0x30, 0x43, 0x89, 0xfe, 0xb0, 0xd5, 0x69, 0xa9, 0xc1, 0xaa, 0x93, 0xa1, 0xbc, 0xf6, 0xa0, 0xaf, + 0xa2, 0xa1, 0x80, 0xa1, 0x75, 0x80, 0x04, 0x30, 0x4d, 0x58, 0xbc, 0xe2, 0xb8, 0xd6, 0xdd, 0xad, + 0x83, 0x90, 0xb0, 0x98, 0xe2, 0x5a, 0xb5, 0x20, 0x47, 0x30, 0xe4, 0xf8, 0xb5, 0x44, 0x51, 0x99, + 0xf8, 0x6c, 0x6e, 0x96, 0x34, 0xaf, 0x97, 0x67, 0x97, 0xa4, 0x7c, 0x5d, 0xb2, 0xb8, 0xdc, 0x62, + 0x2e, 0xa9, 0xc9, 0xa1, 0x55, 0x72, 0xf8, 0xcd, 0x01, 0xa8, 0x0d, 0xdd, 0xa1, 0xf1, 0x7d, 0x18, + 0x7f, 0x11, 0x2c, 0x5f, 0x25, 0x91, 0x8c, 0x74, 0xeb, 0x31, 0x1d, 0x29, 0x60, 0x19, 0xc9, 0x88, + 0xbc, 0xaa, 0xa7, 0x32, 0x3b, 0x7b, 0xd2, 0x3d, 0xd5, 0x21, 0xdb, 0x6e, 0xd3, 0x7f, 0x06, 0x22, + 0xf7, 0x60, 0x94, 0x8a, 0x15, 0x72, 0xce, 0xb8, 0xde, 0xe6, 0x88, 0x0e, 0x53, 0xf1, 0x46, 0x7d, + 0x86, 0x3f, 0x1d, 0x18, 0x9e, 0xee, 0xec, 0xd0, 0x0c, 0x3c, 0x66, 0xee, 0x9f, 0x31, 0x68, 0xaf, + 0xbe, 0x14, 0xef, 0x35, 0x4e, 0x6d, 0xbc, 0x2d, 0xc9, 0xed, 0x96, 0xd4, 0xbf, 0x04, 0x49, 0x83, + 0xb6, 0xa4, 0xdf, 0x0e, 0x40, 0x7d, 0xfd, 0x76, 0x50, 0xf5, 0x0e, 0xa6, 0x05, 0xc7, 0x98, 0xe5, + 0x49, 0xda, 0xd0, 0xf6, 0xb8, 0x7b, 0xa6, 0x93, 0x06, 0x9b, 0xb6, 0x72, 0xaf, 0x53, 0xf7, 0xf7, + 0x1e, 0xdc, 0xfa, 0xeb, 0x0d, 0x5d, 0xb1, 0xf8, 0x05, 0x4c, 0xd6, 0x29, 0x66, 0x89, 0x7d, 0xde, + 0x6e, 0xe0, 0x36, 0xee, 0xc8, 0x91, 0x8a, 0xa8, 0x96, 0x14, 0xd6, 0xd5, 0x51, 0x90, 0x07, 0x30, + 0xd1, 0x7e, 0x9d, 0x47, 0x59, 0x89, 0xc2, 0xef, 0x07, 0xae, 0x9a, 0x4f, 0x41, 0x9f, 0x34, 0xd2, + 0xf4, 0x6c, 0x70, 0x09, 0x9e, 0x79, 0x6d, 0xcf, 0x7e, 0x39, 0x00, 0xf5, 0x0f, 0xc8, 0x15, 0xdb, + 0xf5, 0x7f, 0x5f, 0xf6, 0x31, 0x8c, 0x2f, 0x9e, 0x25, 0xd9, 0x03, 0x37, 0xca, 0x32, 0xad, 0x67, + 0x44, 0xd5, 0x51, 0x3d, 0x65, 0xbd, 0x06, 0xe1, 0xf7, 0x3a, 0xd6, 0x64, 0xe3, 0xe1, 0x43, 0x18, + 0x5f, 0x80, 0xe4, 0x0e, 0x0c, 0x34, 0xec, 0x3b, 0x7a, 0x53, 0xe6, 0xe3, 0xcc, 0xd3, 0x7f, 0x56, + 0x2f, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x8e, 0x38, 0xdd, 0x12, 0x07, 0x00, 0x00, +} diff --git a/vendor/cloud.google.com/go/firestore/integration_test.go b/vendor/cloud.google.com/go/firestore/integration_test.go new file mode 100644 index 0000000..bc34337 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/integration_test.go @@ -0,0 +1,854 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "flag" + "fmt" + "log" + "os" + "sort" + "testing" + "time" + + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/genproto/googleapis/type/latlng" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +func TestMain(m *testing.M) { + initIntegrationTest() + status := m.Run() + cleanupIntegrationTest() + os.Exit(status) +} + +const ( + envProjID = "GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID" + envPrivateKey = "GCLOUD_TESTS_GOLANG_FIRESTORE_KEY" +) + +var ( + iClient *Client + iColl *CollectionRef + collectionIDs = testutil.NewUIDSpace("go-integration-test") +) + +func initIntegrationTest() { + flag.Parse() // needed for testing.Short() + if testing.Short() { + return + } + ctx := context.Background() + testProjectID := os.Getenv(envProjID) + if testProjectID == "" { + log.Println("Integration tests skipped. See CONTRIBUTING.md for details") + return + } + ts := testutil.TokenSourceEnv(ctx, envPrivateKey, + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/datastore") + if ts == nil { + log.Fatal("The project key must be set. See CONTRIBUTING.md for details") + } + ti := &testInterceptor{dbPath: "projects/" + testProjectID + "/databases/(default)"} + c, err := NewClient(ctx, testProjectID, + option.WithTokenSource(ts), + option.WithGRPCDialOption(grpc.WithUnaryInterceptor(ti.interceptUnary)), + option.WithGRPCDialOption(grpc.WithStreamInterceptor(ti.interceptStream)), + ) + if err != nil { + log.Fatalf("NewClient: %v", err) + } + iClient = c + iColl = c.Collection(collectionIDs.New()) + refDoc := iColl.NewDoc() + integrationTestMap["ref"] = refDoc + wantIntegrationTestMap["ref"] = refDoc + integrationTestStruct.Ref = refDoc +} + +type testInterceptor struct { + dbPath string +} + +func (ti *testInterceptor) interceptUnary(ctx context.Context, method string, req, res interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ti.checkMetadata(ctx, method) + return invoker(ctx, method, req, res, cc, opts...) +} + +func (ti *testInterceptor) interceptStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + ti.checkMetadata(ctx, method) + return streamer(ctx, desc, cc, method, opts...) +} + +func (ti *testInterceptor) checkMetadata(ctx context.Context, method string) { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + log.Fatalf("method %s: bad metadata", method) + } + for _, h := range []string{"google-cloud-resource-prefix", "x-goog-api-client"} { + v, ok := md[h] + if !ok { + log.Fatalf("method %s, header %s missing", method, h) + } + if len(v) != 1 { + log.Fatalf("method %s, header %s: bad value %v", method, h, v) + } + } + v := md["google-cloud-resource-prefix"][0] + if v != ti.dbPath { + log.Fatalf("method %s: bad resource prefix header: %q", method, v) + } +} + +func cleanupIntegrationTest() { + if iClient == nil { + return + } + // TODO(jba): delete everything in integrationColl. + iClient.Close() +} + +// integrationClient should be called by integration tests to get a valid client. It will never +// return nil. If integrationClient returns, an integration test can proceed without +// further checks. +func integrationClient(t *testing.T) *Client { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + if iClient == nil { + t.SkipNow() // log message printed in initIntegrationTest + } + return iClient +} + +func integrationColl(t *testing.T) *CollectionRef { + _ = integrationClient(t) + return iColl +} + +type integrationTestStructType struct { + Int int + Str string + Bool bool + Float float32 + Null interface{} + Bytes []byte + Time time.Time + Geo, NilGeo *latlng.LatLng + Ref *DocumentRef +} + +var ( + integrationTime = time.Date(2017, 3, 20, 1, 2, 3, 456789, time.UTC) + // Firestore times are accurate only to microseconds. + wantIntegrationTime = time.Date(2017, 3, 20, 1, 2, 3, 456000, time.UTC) + + integrationGeo = &latlng.LatLng{Latitude: 30, Longitude: 70} + + // Use this when writing a doc. + integrationTestMap = map[string]interface{}{ + "int": 1, + "str": "two", + "bool": true, + "float": 3.14, + "null": nil, + "bytes": []byte("bytes"), + "*": map[string]interface{}{"`": 4}, + "time": integrationTime, + "geo": integrationGeo, + "ref": nil, // populated by initIntegrationTest + } + + // The returned data is slightly different. + wantIntegrationTestMap = map[string]interface{}{ + "int": int64(1), + "str": "two", + "bool": true, + "float": 3.14, + "null": nil, + "bytes": []byte("bytes"), + "*": map[string]interface{}{"`": int64(4)}, + "time": wantIntegrationTime, + "geo": integrationGeo, + "ref": nil, // populated by initIntegrationTest + } + + integrationTestStruct = integrationTestStructType{ + Int: 1, + Str: "two", + Bool: true, + Float: 3.14, + Null: nil, + Bytes: []byte("bytes"), + Time: integrationTime, + Geo: integrationGeo, + NilGeo: nil, + Ref: nil, // populated by initIntegrationTest + } +) + +func TestIntegration_Create(t *testing.T) { + ctx := context.Background() + doc := integrationColl(t).NewDoc() + start := time.Now() + wr := mustCreate("Create #1", t, doc, integrationTestMap) + end := time.Now() + checkTimeBetween(t, wr.UpdateTime, start, end) + _, err := doc.Create(ctx, integrationTestMap) + codeEq(t, "Create on a present doc", codes.AlreadyExists, err) + // OK to create an empty document. + _, err = integrationColl(t).NewDoc().Create(ctx, map[string]interface{}{}) + codeEq(t, "Create empty doc", codes.OK, err) +} + +func TestIntegration_Get(t *testing.T) { + ctx := context.Background() + doc := integrationColl(t).NewDoc() + mustCreate("Get #1", t, doc, integrationTestMap) + ds := mustGet("Get #1", t, doc) + if ds.CreateTime != ds.UpdateTime { + t.Errorf("create time %s != update time %s", ds.CreateTime, ds.UpdateTime) + } + got := ds.Data() + if want := wantIntegrationTestMap; !testEqual(got, want) { + t.Errorf("got\n%v\nwant\n%v", pretty.Value(got), pretty.Value(want)) + } + + doc = integrationColl(t).NewDoc() + empty := map[string]interface{}{} + mustCreate("Get empty", t, doc, empty) + ds = mustGet("Get empty", t, doc) + if ds.CreateTime != ds.UpdateTime { + t.Errorf("create time %s != update time %s", ds.CreateTime, ds.UpdateTime) + } + if got, want := ds.Data(), empty; !testEqual(got, want) { + t.Errorf("got\n%v\nwant\n%v", pretty.Value(got), pretty.Value(want)) + } + + _, err := integrationColl(t).NewDoc().Get(ctx) + codeEq(t, "Get on a missing doc", codes.NotFound, err) +} + +func TestIntegration_GetAll(t *testing.T) { + type getAll struct{ N int } + + coll := integrationColl(t) + ctx := context.Background() + var docRefs []*DocumentRef + for i := 0; i < 5; i++ { + doc := coll.NewDoc() + docRefs = append(docRefs, doc) + mustCreate("GetAll #1", t, doc, getAll{N: i}) + } + docSnapshots, err := iClient.GetAll(ctx, docRefs) + if err != nil { + t.Fatal(err) + } + if got, want := len(docSnapshots), len(docRefs); got != want { + t.Fatalf("got %d snapshots, want %d", got, want) + } + for i, ds := range docSnapshots { + var got getAll + if err := ds.DataTo(&got); err != nil { + t.Fatal(err) + } + want := getAll{N: i} + if got != want { + t.Errorf("%d: got %+v, want %+v", i, got, want) + } + } +} + +func TestIntegration_Add(t *testing.T) { + start := time.Now() + _, wr, err := integrationColl(t).Add(context.Background(), integrationTestMap) + if err != nil { + t.Fatal(err) + } + end := time.Now() + checkTimeBetween(t, wr.UpdateTime, start, end) +} + +func TestIntegration_Set(t *testing.T) { + coll := integrationColl(t) + ctx := context.Background() + + // Set Should be able to create a new doc. + doc := coll.NewDoc() + wr1, err := doc.Set(ctx, integrationTestMap) + if err != nil { + t.Fatal(err) + } + // Calling Set on the doc completely replaces the contents. + // The update time should increase. + newData := map[string]interface{}{ + "str": "change", + "x": "1", + } + wr2, err := doc.Set(ctx, newData) + if err != nil { + t.Fatal(err) + } + if !wr1.UpdateTime.Before(wr2.UpdateTime) { + t.Errorf("update time did not increase: old=%s, new=%s", wr1.UpdateTime, wr2.UpdateTime) + } + ds := mustGet("Set #1", t, doc) + if got := ds.Data(); !testEqual(got, newData) { + t.Errorf("got %v, want %v", got, newData) + } + + newData = map[string]interface{}{ + "str": "1", + "x": "2", + "y": "3", + } + // SetOptions: + // Only fields mentioned in the Merge option will be changed. + // In this case, "str" will not be changed to "1". + wr3, err := doc.Set(ctx, newData, Merge([]string{"x"}, []string{"y"})) + if err != nil { + t.Fatal(err) + } + ds = mustGet("Set #2", t, doc) + want := map[string]interface{}{ + "str": "change", + "x": "2", + "y": "3", + } + if got := ds.Data(); !testEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } + if !wr2.UpdateTime.Before(wr3.UpdateTime) { + t.Errorf("update time did not increase: old=%s, new=%s", wr2.UpdateTime, wr3.UpdateTime) + } + + // Another way to change only x and y is to pass a map with only + // those keys, and use MergeAll. + wr4, err := doc.Set(ctx, map[string]interface{}{"x": "4", "y": "5"}, MergeAll) + if err != nil { + t.Fatal(err) + } + ds = mustGet("Set #3", t, doc) + want = map[string]interface{}{ + "str": "change", + "x": "4", + "y": "5", + } + if got := ds.Data(); !testEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } + if !wr3.UpdateTime.Before(wr4.UpdateTime) { + t.Errorf("update time did not increase: old=%s, new=%s", wr3.UpdateTime, wr4.UpdateTime) + } +} + +func TestIntegration_Delete(t *testing.T) { + ctx := context.Background() + doc := integrationColl(t).NewDoc() + mustCreate("Delete #1", t, doc, integrationTestMap) + wr, err := doc.Delete(ctx) + if err != nil { + t.Fatal(err) + } + // Confirm that doc doesn't exist. + if _, err := doc.Get(ctx); grpc.Code(err) != codes.NotFound { + t.Fatalf("got error <%v>, want NotFound", err) + } + + er := func(_ *WriteResult, err error) error { return err } + + codeEq(t, "Delete on a missing doc", codes.OK, + er(doc.Delete(ctx))) + // TODO(jba): confirm that the server should return InvalidArgument instead of + // FailedPrecondition. + wr = mustCreate("Delete #2", t, doc, integrationTestMap) + codeEq(t, "Delete with wrong LastUpdateTime", codes.FailedPrecondition, + er(doc.Delete(ctx, LastUpdateTime(wr.UpdateTime.Add(-time.Millisecond))))) + codeEq(t, "Delete with right LastUpdateTime", codes.OK, + er(doc.Delete(ctx, LastUpdateTime(wr.UpdateTime)))) +} + +func TestIntegration_Update(t *testing.T) { + ctx := context.Background() + doc := integrationColl(t).NewDoc() + mustCreate("Update", t, doc, integrationTestMap) + fpus := []Update{ + {Path: "bool", Value: false}, + {Path: "time", Value: 17}, + {FieldPath: []string{"*", "`"}, Value: 18}, + {Path: "null", Value: Delete}, + {Path: "noSuchField", Value: Delete}, // deleting a non-existent field is a no-op + } + wr, err := doc.Update(ctx, fpus) + if err != nil { + t.Fatal(err) + } + ds := mustGet("Update", t, doc) + got := ds.Data() + want := copyMap(wantIntegrationTestMap) + want["bool"] = false + want["time"] = int64(17) + want["*"] = map[string]interface{}{"`": int64(18)} + delete(want, "null") + if !testEqual(got, want) { + t.Errorf("got\n%#v\nwant\n%#v", got, want) + } + + er := func(_ *WriteResult, err error) error { return err } + + codeEq(t, "Update on missing doc", codes.NotFound, + er(integrationColl(t).NewDoc().Update(ctx, fpus))) + codeEq(t, "Update with wrong LastUpdateTime", codes.FailedPrecondition, + er(doc.Update(ctx, fpus, LastUpdateTime(wr.UpdateTime.Add(-time.Millisecond))))) + codeEq(t, "Update with right LastUpdateTime", codes.OK, + er(doc.Update(ctx, fpus, LastUpdateTime(wr.UpdateTime)))) +} + +func TestIntegration_Collections(t *testing.T) { + ctx := context.Background() + c := integrationClient(t) + got, err := c.Collections(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + // There should be at least one collection. + if len(got) == 0 { + t.Error("got 0 top-level collections, want at least one") + } + + doc := integrationColl(t).NewDoc() + got, err = doc.Collections(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("got %d collections, want 0", len(got)) + } + var want []*CollectionRef + for i := 0; i < 3; i++ { + id := collectionIDs.New() + cr := doc.Collection(id) + want = append(want, cr) + mustCreate("Collections", t, cr.NewDoc(), integrationTestMap) + } + got, err = doc.Collections(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + if !testEqual(got, want) { + t.Errorf("got\n%#v\nwant\n%#v", got, want) + } +} + +func TestIntegration_ServerTimestamp(t *testing.T) { + type S struct { + A int + B time.Time + C time.Time `firestore:"C.C,serverTimestamp"` + D map[string]interface{} + E time.Time `firestore:",omitempty,serverTimestamp"` + } + data := S{ + A: 1, + B: aTime, + // C is unset, so will get the server timestamp. + D: map[string]interface{}{"x": ServerTimestamp}, + // E is unset, so will get the server timestamp. + } + doc := integrationColl(t).NewDoc() + // Bound times of the RPC, with some slack for clock skew. + start := time.Now() + mustCreate("ServerTimestamp", t, doc, data) + end := time.Now() + ds := mustGet("ServerTimestamp", t, doc) + var got S + if err := ds.DataTo(&got); err != nil { + t.Fatal(err) + } + if !testEqual(got.B, aTime) { + t.Errorf("B: got %s, want %s", got.B, aTime) + } + checkTimeBetween(t, got.C, start, end) + if g, w := got.D["x"], got.C; !testEqual(g, w) { + t.Errorf(`D["x"] = %s, want equal to C (%s)`, g, w) + } + if g, w := got.E, got.C; !testEqual(g, w) { + t.Errorf(`E = %s, want equal to C (%s)`, g, w) + } +} + +func TestIntegration_MergeServerTimestamp(t *testing.T) { + ctx := context.Background() + doc := integrationColl(t).NewDoc() + + // Create a doc with an ordinary field "a" and a ServerTimestamp field "b". + _, err := doc.Set(ctx, map[string]interface{}{ + "a": 1, + "b": ServerTimestamp}) + if err != nil { + t.Fatal(err) + } + docSnap := mustGet("MergeST #1", t, doc) + data1 := docSnap.Data() + // Merge with a document with a different value of "a". However, + // specify only "b" in the list of merge fields. + _, err = doc.Set(ctx, + map[string]interface{}{"a": 2, "b": ServerTimestamp}, + Merge([]string{"b"})) + if err != nil { + t.Fatal(err) + } + // The result should leave "a" unchanged, while "b" is updated. + docSnap = mustGet("MergeST #2", t, doc) + data2 := docSnap.Data() + if got, want := data2["a"], data1["a"]; got != want { + t.Errorf("got %v, want %v", got, want) + } + t1 := data1["b"].(time.Time) + t2 := data2["b"].(time.Time) + if !t1.Before(t2) { + t.Errorf("got t1=%s, t2=%s; want t1 before t2", t1, t2) + } +} + +func TestIntegration_MergeNestedServerTimestamp(t *testing.T) { + ctx := context.Background() + doc := integrationColl(t).NewDoc() + + // Create a doc with an ordinary field "a" a ServerTimestamp field "b", + // and a second ServerTimestamp field "c.d". + _, err := doc.Set(ctx, map[string]interface{}{ + "a": 1, + "b": ServerTimestamp, + "c": map[string]interface{}{"d": ServerTimestamp}, + }) + if err != nil { + t.Fatal(err) + } + data1 := mustGet("MergeNST #1", t, doc).Data() + // Merge with a document with a different value of "a". However, + // specify only "c.d" in the list of merge fields. + _, err = doc.Set(ctx, + map[string]interface{}{ + "a": 2, + "b": ServerTimestamp, + "c": map[string]interface{}{"d": ServerTimestamp}, + }, + Merge([]string{"c", "d"})) + if err != nil { + t.Fatal(err) + } + // The result should leave "a" and "b" unchanged, while "c.d" is updated. + data2 := mustGet("MergeNST #2", t, doc).Data() + if got, want := data2["a"], data1["a"]; got != want { + t.Errorf("a: got %v, want %v", got, want) + } + want := data1["b"].(time.Time) + got := data2["b"].(time.Time) + if !got.Equal(want) { + t.Errorf("b: got %s, want %s", got, want) + } + t1 := data1["c"].(map[string]interface{})["d"].(time.Time) + t2 := data2["c"].(map[string]interface{})["d"].(time.Time) + if !t1.Before(t2) { + t.Errorf("got t1=%s, t2=%s; want t1 before t2", t1, t2) + } +} + +func TestIntegration_WriteBatch(t *testing.T) { + ctx := context.Background() + b := integrationClient(t).Batch() + doc1 := iColl.NewDoc() + doc2 := iColl.NewDoc() + b.Create(doc1, integrationTestMap) + b.Set(doc2, integrationTestMap) + b.Update(doc1, []Update{{Path: "bool", Value: false}}) + b.Update(doc1, []Update{{Path: "str", Value: Delete}}) + + wrs, err := b.Commit(ctx) + if err != nil { + t.Fatal(err) + } + if got, want := len(wrs), 4; got != want { + t.Fatalf("got %d WriteResults, want %d", got, want) + } + got1 := mustGet("WriteBatch #1", t, doc1).Data() + want := copyMap(wantIntegrationTestMap) + want["bool"] = false + delete(want, "str") + if !testEqual(got1, want) { + t.Errorf("got\n%#v\nwant\n%#v", got1, want) + } + got2 := mustGet("WriteBatch #2", t, doc2).Data() + if !testEqual(got2, wantIntegrationTestMap) { + t.Errorf("got\n%#v\nwant\n%#v", got2, wantIntegrationTestMap) + } + // TODO(jba): test two updates to the same document when it is supported. + // TODO(jba): test verify when it is supported. +} + +func TestIntegration_Query(t *testing.T) { + ctx := context.Background() + coll := integrationColl(t) + var docs []*DocumentRef + var wants []map[string]interface{} + for i := 0; i < 3; i++ { + doc := coll.NewDoc() + docs = append(docs, doc) + // To support running this test in parallel with the others, use a field name + // that we don't use anywhere else. + mustCreate(fmt.Sprintf("Query #%d", i), t, doc, + map[string]interface{}{ + "q": i, + "x": 1, + }) + wants = append(wants, map[string]interface{}{"q": int64(i)}) + } + q := coll.Select("q").OrderBy("q", Asc) + for i, test := range []struct { + q Query + want []map[string]interface{} + }{ + {q, wants}, + {q.Where("q", ">", 1), wants[2:]}, + {q.WherePath([]string{"q"}, ">", 1), wants[2:]}, + {q.Offset(1).Limit(1), wants[1:2]}, + {q.StartAt(1), wants[1:]}, + {q.StartAfter(1), wants[2:]}, + {q.EndAt(1), wants[:2]}, + {q.EndBefore(1), wants[:1]}, + } { + gotDocs, err := test.q.Documents(ctx).GetAll() + if err != nil { + t.Errorf("#%d: %+v: %v", i, test.q, err) + continue + } + if len(gotDocs) != len(test.want) { + t.Errorf("#%d: %+v: got %d docs, want %d", i, test.q, len(gotDocs), len(test.want)) + continue + } + for j, g := range gotDocs { + if got, want := g.Data(), test.want[j]; !testEqual(got, want) { + t.Errorf("#%d: %+v, #%d: got\n%+v\nwant\n%+v", i, test.q, j, got, want) + } + } + } + _, err := coll.Select("q").Where("x", "==", 1).OrderBy("q", Asc).Documents(ctx).GetAll() + codeEq(t, "Where and OrderBy on different fields without an index", codes.FailedPrecondition, err) + + // Using the collection itself as the query should return the full documents. + allDocs, err := coll.Documents(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + seen := map[int64]bool{} // "q" values we see + for _, d := range allDocs { + data := d.Data() + q, ok := data["q"] + if !ok { + // A document from another test. + continue + } + if seen[q.(int64)] { + t.Errorf("%v: duplicate doc", data) + } + seen[q.(int64)] = true + if data["x"] != int64(1) { + t.Errorf("%v: wrong or missing 'x'", data) + } + if len(data) != 2 { + t.Errorf("%v: want two keys", data) + } + } + if got, want := len(seen), len(wants); got != want { + t.Errorf("got %d docs with 'q', want %d", len(seen), len(wants)) + } +} + +// Test the special DocumentID field in queries. +func TestIntegration_QueryName(t *testing.T) { + ctx := context.Background() + + checkIDs := func(q Query, wantIDs []string) { + gots, err := q.Documents(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + if len(gots) != len(wantIDs) { + t.Fatalf("got %d, want %d", len(gots), len(wantIDs)) + } + for i, g := range gots { + if got, want := g.Ref.ID, wantIDs[i]; got != want { + t.Errorf("#%d: got %s, want %s", i, got, want) + } + } + } + + coll := integrationColl(t) + var wantIDs []string + for i := 0; i < 3; i++ { + doc := coll.NewDoc() + mustCreate(fmt.Sprintf("Query #%d", i), t, doc, map[string]interface{}{"nm": 1}) + wantIDs = append(wantIDs, doc.ID) + } + sort.Strings(wantIDs) + q := coll.Where("nm", "==", 1).OrderBy(DocumentID, Asc) + checkIDs(q, wantIDs) + + // Empty Select. + q = coll.Select().Where("nm", "==", 1).OrderBy(DocumentID, Asc) + checkIDs(q, wantIDs) + + // Test cursors with __name__. + checkIDs(q.StartAt(wantIDs[1]), wantIDs[1:]) + checkIDs(q.EndAt(wantIDs[1]), wantIDs[:2]) +} + +func TestIntegration_QueryNested(t *testing.T) { + ctx := context.Background() + coll1 := integrationColl(t) + doc1 := coll1.NewDoc() + coll2 := doc1.Collection(collectionIDs.New()) + doc2 := coll2.NewDoc() + wantData := map[string]interface{}{"x": int64(1)} + mustCreate("QueryNested", t, doc2, wantData) + q := coll2.Select("x") + got, err := q.Documents(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + if len(got) != 1 { + t.Fatalf("got %d docs, want 1", len(got)) + } + if gotData := got[0].Data(); !testEqual(gotData, wantData) { + t.Errorf("got\n%+v\nwant\n%+v", gotData, wantData) + } +} + +func TestIntegration_RunTransaction(t *testing.T) { + ctx := context.Background() + type Player struct { + Name string + Score int + Star bool `firestore:"*"` + } + pat := Player{Name: "Pat", Score: 3, Star: false} + client := integrationClient(t) + patDoc := iColl.Doc("pat") + var anError error + incPat := func(_ context.Context, tx *Transaction) error { + doc, err := tx.Get(patDoc) + if err != nil { + return err + } + score, err := doc.DataAt("Score") + if err != nil { + return err + } + // Since the Star field is called "*", we must use DataAtPath to get it. + star, err := doc.DataAtPath([]string{"*"}) + if err != nil { + return err + } + err = tx.Update(patDoc, []Update{{Path: "Score", Value: int(score.(int64) + 7)}}) + if err != nil { + return err + } + // Since the Star field is called "*", we must use Update to change it. + err = tx.Update(patDoc, + []Update{{FieldPath: []string{"*"}, Value: !star.(bool)}}) + if err != nil { + return err + } + return anError + } + mustCreate("RunTransaction", t, patDoc, pat) + err := client.RunTransaction(ctx, incPat) + if err != nil { + t.Fatal(err) + } + ds := mustGet("RunTransaction", t, patDoc) + var got Player + if err := ds.DataTo(&got); err != nil { + t.Fatal(err) + } + want := Player{Name: "Pat", Score: 10, Star: true} + if got != want { + t.Errorf("got %+v, want %+v", got, want) + } + + // Function returns error, so transaction is rolled back and no writes happen. + anError = errors.New("bad") + err = client.RunTransaction(ctx, incPat) + if err != anError { + t.Fatalf("got %v, want %v", err, anError) + } + if err := ds.DataTo(&got); err != nil { + t.Fatal(err) + } + // want is same as before. + if got != want { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func codeEq(t *testing.T, msg string, code codes.Code, err error) { + if grpc.Code(err) != code { + t.Fatalf("%s:\ngot <%v>\nwant code %s", msg, err, code) + } +} + +func mustCreate(msg string, t *testing.T, doc *DocumentRef, data interface{}) *WriteResult { + wr, err := doc.Create(context.Background(), data) + if err != nil { + t.Fatalf("%s: creating: %v", msg, err) + } + return wr +} + +func mustGet(msg string, t *testing.T, doc *DocumentRef) *DocumentSnapshot { + d, err := doc.Get(context.Background()) + if err != nil { + t.Fatalf("%s: getting: %v", msg, err) + } + return d +} + +func copyMap(m map[string]interface{}) map[string]interface{} { + c := map[string]interface{}{} + for k, v := range m { + c[k] = v + } + return c +} + +func checkTimeBetween(t *testing.T, got, low, high time.Time) { + // Allow slack for clock skew. + const slack = 4 * time.Second + low = low.Add(-slack) + high = high.Add(slack) + if got.Before(low) || got.After(high) { + t.Fatalf("got %s, not in [%s, %s]", got, low, high) + } +} diff --git a/vendor/cloud.google.com/go/firestore/internal/Makefile b/vendor/cloud.google.com/go/firestore/internal/Makefile new file mode 100644 index 0000000..6769cb3 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/internal/Makefile @@ -0,0 +1,16 @@ +# Build doc.go from template and snippets. + +SHELL=/bin/bash + +../doc.go: build doc-snippets.go doc.template snipdoc.awk + @tmp=$$(mktemp) && \ + awk -f snipdoc.awk doc-snippets.go doc.template > $$tmp && \ + chmod +w $@ && \ + mv $$tmp $@ && \ + chmod -w $@ + @echo "wrote $@" + +.PHONY: build + +build: + go build doc-snippets.go diff --git a/vendor/cloud.google.com/go/firestore/internal/doc-snippets.go b/vendor/cloud.google.com/go/firestore/internal/doc-snippets.go new file mode 100644 index 0000000..4657e4b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/internal/doc-snippets.go @@ -0,0 +1,161 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + + firestore "cloud.google.com/go/firestore" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +const ELLIPSIS = 0 + +//[ structDef +type State struct { + Capital string `firestore:"capital"` + Population float64 `firestore:"pop"` // in millions +} + +//] + +func f1() { + //[ NewClient + ctx := context.Background() + client, err := firestore.NewClient(ctx, "projectID") + if err != nil { + // TODO: Handle error. + } + //] + //[ refs + states := client.Collection("States") + ny := states.Doc("NewYork") + // Or, in a single call: + ny = client.Doc("States/NewYork") + //] + //[ docref.Get + docsnap, err := ny.Get(ctx) + if err != nil { + // TODO: Handle error. + } + dataMap := docsnap.Data() + fmt.Println(dataMap) + //] + //[ DataTo + var nyData State + if err := docsnap.DataTo(&nyData); err != nil { + // TODO: Handle error. + } + //] + //[ GetAll + docsnaps, err := client.GetAll(ctx, []*firestore.DocumentRef{ + states.Doc("Wisconsin"), states.Doc("Ohio"), + }) + if err != nil { + // TODO: Handle error. + } + for _, ds := range docsnaps { + _ = ds // TODO: Use ds. + } + //[ docref.Create + wr, err := ny.Create(ctx, State{ + Capital: "Albany", + Population: 19.8, + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(wr) + //] + //[ docref.Set + ca := states.Doc("California") + _, err = ca.Set(ctx, State{ + Capital: "Sacramento", + Population: 39.14, + }) + //] + + //[ docref.Update + _, err = ca.Update(ctx, []firestore.Update{{Path: "capital", Value: "Sacramento"}}) + //] + + //[ docref.Delete + _, err = ny.Delete(ctx) + //] + + //[ LUT-precond + docsnap, err = ca.Get(ctx) + if err != nil { + // TODO: Handle error. + } + _, err = ca.Update(ctx, + []firestore.Update{{Path: "capital", Value: "Sacramento"}}, + firestore.LastUpdateTime(docsnap.UpdateTime)) + //] + + //[ WriteBatch + writeResults, err := client.Batch(). + Create(ny, State{Capital: "Albany"}). + Update(ca, []firestore.Update{{Path: "capital", Value: "Sacramento"}}). + Delete(client.Doc("States/WestDakota")). + Commit(ctx) + //] + _ = writeResults + + //[ Query + q := states.Where("pop", ">", 10).OrderBy("pop", firestore.Desc) + //] + //[ Documents + iter := q.Documents(ctx) + for { + doc, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(doc.Data()) + } + //] + + //[ CollQuery + iter = client.Collection("States").Documents(ctx) + //] +} + +func txn() { + var ctx context.Context + var client *firestore.Client + //[ Transaction + ny := client.Doc("States/NewYork") + err := client.RunTransaction(ctx, func(ctx context.Context, tx *firestore.Transaction) error { + doc, err := tx.Get(ny) // tx.Get, NOT ny.Get! + if err != nil { + return err + } + pop, err := doc.DataAt("pop") + if err != nil { + return err + } + return tx.Update(ny, []firestore.Update{{Path: "pop", Value: pop.(float64) + 0.2}}) + }) + if err != nil { + // TODO: Handle error. + } + //] +} diff --git a/vendor/cloud.google.com/go/firestore/internal/doc.template b/vendor/cloud.google.com/go/firestore/internal/doc.template new file mode 100644 index 0000000..705f6bf --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/internal/doc.template @@ -0,0 +1,142 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT doc.go. Modify internal/doc.template, then run make -C internal. + +/* +Package firestore provides a client for reading and writing to a Cloud Firestore +database. + +See https://cloud.google.com/firestore/docs for an introduction +to Cloud Firestore and additional help on using the Firestore API. + +Creating a Client + +To start working with this package, create a client with a project ID: + +[NewClient] + +CollectionRefs and DocumentRefs + +In Firestore, documents are sets of key-value pairs, and collections are groups of +documents. A Firestore database consists of a hierarchy of alternating collections +and documents, referred to by slash-separated paths like +"States/California/Cities/SanFrancisco". + +This client is built around references to collections and documents. CollectionRefs +and DocumentRefs are lightweight values that refer to the corresponding database +entities. Creating a ref does not involve any network traffic. + +[refs] + +Reading + +Use DocumentRef.Get to read a document. The result is a DocumentSnapshot. +Call its Data method to obtain the entire document contents as a map. + +[docref.Get] + +You can also obtain a single field with DataAt, or extract the data into a struct +with DataTo. With the type definition + +[structDef] + +we can extract the document's data into a value of type State: + +[DataTo] + +Note that this client supports struct tags beginning with "firestore:" that work like +the tags of the encoding/json package, letting you rename fields, ignore them, or +omit their values when empty. + +To retrieve multiple documents from their references in a single call, use +Client.GetAll. + +[GetAll] + +Writing + +For writing individual documents, use the methods on DocumentReference. +Create creates a new document. + +[docref.Create] + +The first return value is a WriteResult, which contains the time +at which the document was updated. + +Create fails if the document exists. Another method, Set, either replaces an existing +document or creates a new one. + +[docref.Set] + +To update some fields of an existing document, use Update. It takes a list of +paths to update and their corresponding values. + +[docref.Update] + +Use DocumentRef.Delete to delete a document. + +[docref.Delete] + +Preconditions + +You can condition Deletes or Updates on when a document was last changed. Specify +these preconditions as an option to a Delete or Update method. The check and the +write happen atomically with a single RPC. + +[LUT-precond] + +Here we update a doc only if it hasn't changed since we read it. +You could also do this with a transaction. + +To perform multiple writes at once, use a WriteBatch. Its methods chain +for convenience. + +WriteBatch.Commit sends the collected writes to the server, where they happen +atomically. + +[WriteBatch] + +Queries + +You can use SQL to select documents from a collection. Begin with the collection, and +build up a query using Select, Where and other methods of Query. + +[Query] + +Call the Query's Documents method to get an iterator, and use it like +the other Google Cloud Client iterators. + +[Documents] + +To get all the documents in a collection, you can use the collection itself +as a query. + +[CollQuery] + +Transactions + +Use a transaction to execute reads and writes atomically. All reads must happen +before any writes. Transaction creation, commit, rollback and retry are handled for +you by the Client.RunTransaction method; just provide a function and use the +read and write methods of the Transaction passed to it. + +[Transaction] + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package firestore diff --git a/vendor/cloud.google.com/go/firestore/internal/snipdoc.awk b/vendor/cloud.google.com/go/firestore/internal/snipdoc.awk new file mode 100644 index 0000000..ebb7e21 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/internal/snipdoc.awk @@ -0,0 +1,116 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# snipdoc merges code snippets from Go source files into a template to +# produce another go file (typically doc.go). +# +# Call with one or more .go files and a template file. +# +# awk -f snipmd.awk foo.go bar.go doc.template +# +# In the Go files, start a snippet with +# //[ NAME +# and end it with +# //] +# +# In the template, write +# [NAME] +# on a line by itself to insert the snippet NAME on that line. +# +# The following transformations are made to the Go code: +# - Trailing blank lines are removed. +# - `ELLIPSIS` and `_ = ELLIPSIS` are replaced by `...` + + +/^[ \t]*\/\/\[/ { # start snippet in Go file + if (inGo()) { + if ($2 == "") { + die("missing snippet name") + } + curSnip = $2 + next + } +} + +/^[ \t]*\/\/]/ { # end snippet in Go file + if (inGo()) { + if (curSnip != "") { + # Remove all trailing newlines. + gsub(/[\t\n]+$/, "", snips[curSnip]) + curSnip = "" + next + } else { + die("//] without corresponding //[") + } + } +} + +ENDFILE { + if (curSnip != "") { + die("unclosed snippet: " curSnip) + } +} + +/^\[.*\]$/ { # Snippet marker in template file. + if (inTemplate()) { + name = substr($1, 2, length($1)-2) + if (snips[name] == "") { + die("no snippet named " name) + } + printf("%s\n", snips[name]) + afterSnip = 1 + next + } +} + +# Matches every line. +{ + if (curSnip != "") { + # If the first line in the snip has no indent, add the indent. + if (snips[curSnip] == "") { + if (index($0, "\t") == 1) { + extraIndent = "" + } else { + extraIndent = "\t" + } + } + + line = $0 + # Replace ELLIPSIS. + gsub(/_ = ELLIPSIS/, "...", line) + gsub(/ELLIPSIS/, "...", line) + + snips[curSnip] = snips[curSnip] extraIndent line "\n" + } else if (inTemplate()) { + afterSnip = 0 + # Copy to output. + print + } +} + + + +function inTemplate() { + return match(FILENAME, /\.template$/) +} + +function inGo() { + return match(FILENAME, /\.go$/) +} + + +function die(msg) { + printf("%s:%d: %s\n", FILENAME, FNR, msg) > "/dev/stderr" + exit 1 +} diff --git a/vendor/cloud.google.com/go/firestore/mock_test.go b/vendor/cloud.google.com/go/firestore/mock_test.go new file mode 100644 index 0000000..d0cdba9 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/mock_test.go @@ -0,0 +1,176 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +// A simple mock server. + +import ( + "fmt" + + "cloud.google.com/go/internal/testutil" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/empty" + "golang.org/x/net/context" +) + +type mockServer struct { + pb.FirestoreServer + + Addr string + + reqItems []reqItem + resps []interface{} +} + +type reqItem struct { + wantReq proto.Message + adjust func(gotReq proto.Message) +} + +func newMockServer() (*mockServer, error) { + srv, err := testutil.NewServer() + if err != nil { + return nil, err + } + mock := &mockServer{Addr: srv.Addr} + pb.RegisterFirestoreServer(srv.Gsrv, mock) + srv.Start() + return mock, nil +} + +// addRPC adds a (request, response) pair to the server's list of expected +// interactions. The server will compare the incoming request with wantReq +// using proto.Equal. +// +// Passing nil for wantReq disables the request check. +func (s *mockServer) addRPC(wantReq proto.Message, resp interface{}) { + s.addRPCAdjust(wantReq, resp, nil) +} + +// addRPCAdjust is like addRPC, but accepts a function that can be used +// to tweak the requests before comparison, for example to adjust for +// randomness. +func (s *mockServer) addRPCAdjust(wantReq proto.Message, resp interface{}, adjust func(proto.Message)) { + s.reqItems = append(s.reqItems, reqItem{wantReq, adjust}) + s.resps = append(s.resps, resp) +} + +// popRPC compares the request with the next expected (request, response) pair. +// It returns the response, or an error if the request doesn't match what +// was expected or there are no expected rpcs. +func (s *mockServer) popRPC(gotReq proto.Message) (interface{}, error) { + if len(s.reqItems) == 0 { + panic("out of RPCs") + } + ri := s.reqItems[0] + s.reqItems = s.reqItems[1:] + if ri.wantReq != nil { + if ri.adjust != nil { + ri.adjust(gotReq) + } + if !proto.Equal(gotReq, ri.wantReq) { + return nil, fmt.Errorf("mockServer: bad request\ngot: %T\n%s\nwant: %T\n%s", + gotReq, proto.MarshalTextString(gotReq), + ri.wantReq, proto.MarshalTextString(ri.wantReq)) + } + } + resp := s.resps[0] + s.resps = s.resps[1:] + if err, ok := resp.(error); ok { + return nil, err + } + return resp, nil +} + +func (s *mockServer) reset() { + s.reqItems = nil + s.resps = nil +} + +func (s *mockServer) GetDocument(_ context.Context, req *pb.GetDocumentRequest) (*pb.Document, error) { + res, err := s.popRPC(req) + if err != nil { + return nil, err + } + return res.(*pb.Document), nil +} + +func (s *mockServer) Commit(_ context.Context, req *pb.CommitRequest) (*pb.CommitResponse, error) { + res, err := s.popRPC(req) + if err != nil { + return nil, err + } + return res.(*pb.CommitResponse), nil +} + +func (s *mockServer) BatchGetDocuments(req *pb.BatchGetDocumentsRequest, bs pb.Firestore_BatchGetDocumentsServer) error { + res, err := s.popRPC(req) + if err != nil { + return err + } + responses := res.([]interface{}) + for _, res := range responses { + switch res := res.(type) { + case *pb.BatchGetDocumentsResponse: + if err := bs.Send(res); err != nil { + return err + } + case error: + return res + default: + panic(fmt.Sprintf("bad response type in BatchGetDocuments: %+v", res)) + } + } + return nil +} + +func (s *mockServer) RunQuery(req *pb.RunQueryRequest, qs pb.Firestore_RunQueryServer) error { + res, err := s.popRPC(req) + if err != nil { + return err + } + responses := res.([]interface{}) + for _, res := range responses { + switch res := res.(type) { + case *pb.RunQueryResponse: + if err := qs.Send(res); err != nil { + return err + } + case error: + return res + default: + panic(fmt.Sprintf("bad response type in RunQuery: %+v", res)) + } + } + return nil +} + +func (s *mockServer) BeginTransaction(_ context.Context, req *pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) { + res, err := s.popRPC(req) + if err != nil { + return nil, err + } + return res.(*pb.BeginTransactionResponse), nil +} + +func (s *mockServer) Rollback(_ context.Context, req *pb.RollbackRequest) (*empty.Empty, error) { + res, err := s.popRPC(req) + if err != nil { + return nil, err + } + return res.(*empty.Empty), nil +} diff --git a/vendor/cloud.google.com/go/firestore/options.go b/vendor/cloud.google.com/go/firestore/options.go new file mode 100644 index 0000000..874253b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/options.go @@ -0,0 +1,177 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "time" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes" +) + +// A Precondition modifies a Firestore update or delete operation. +type Precondition interface { + // Returns the corresponding Precondition proto. + preconditionProto() (*pb.Precondition, error) +} + +// Exists is a Precondition that checks for the existence of a resource before +// writing to it. If the check fails, the write does not occur. +var Exists Precondition + +func init() { + // Initialize here so godoc doesn't show the internal value. + Exists = exists(true) +} + +type exists bool + +func (e exists) preconditionProto() (*pb.Precondition, error) { + return &pb.Precondition{ + ConditionType: &pb.Precondition_Exists{bool(e)}, + }, nil +} + +func (e exists) String() string { + if e { + return "Exists" + } else { + return "DoesNotExist" + } +} + +// LastUpdateTime returns a Precondition that checks that a resource must exist and +// must have last been updated at the given time. If the check fails, the write +// does not occur. +func LastUpdateTime(t time.Time) Precondition { return lastUpdateTime(t) } + +type lastUpdateTime time.Time + +func (u lastUpdateTime) preconditionProto() (*pb.Precondition, error) { + ts, err := ptypes.TimestampProto(time.Time(u)) + if err != nil { + return nil, err + } + return &pb.Precondition{ + ConditionType: &pb.Precondition_UpdateTime{ts}, + }, nil +} + +func (u lastUpdateTime) String() string { return fmt.Sprintf("LastUpdateTime(%s)", time.Time(u)) } + +func processPreconditionsForDelete(preconds []Precondition) (*pb.Precondition, error) { + // At most one option permitted. + switch len(preconds) { + case 0: + return nil, nil + case 1: + return preconds[0].preconditionProto() + default: + return nil, fmt.Errorf("firestore: conflicting preconditions: %+v", preconds) + } +} + +func processPreconditionsForUpdate(preconds []Precondition) (*pb.Precondition, error) { + // At most one option permitted, and it cannot be Exists. + switch len(preconds) { + case 0: + // If the user doesn't provide any options, default to Exists(true). + return exists(true).preconditionProto() + case 1: + if _, ok := preconds[0].(exists); ok { + return nil, errors.New("Cannot use Exists with Update") + } + return preconds[0].preconditionProto() + default: + return nil, fmt.Errorf("firestore: conflicting preconditions: %+v", preconds) + } +} + +func processPreconditionsForVerify(preconds []Precondition) (*pb.Precondition, error) { + // At most one option permitted. + switch len(preconds) { + case 0: + return nil, nil + case 1: + return preconds[0].preconditionProto() + default: + return nil, fmt.Errorf("firestore: conflicting preconditions: %+v", preconds) + } +} + +// A SetOption modifies a Firestore set operation. +type SetOption interface { + fieldPaths() (fps []FieldPath, all bool, err error) +} + +// MergeAll is a SetOption that causes all the field paths given in the data argument +// to Set to be overwritten. It is not supported for struct data. +var MergeAll SetOption = merge{all: true} + +// Merge returns a SetOption that causes only the given field paths to be +// overwritten. Other fields on the existing document will be untouched. It is an +// error if a provided field path does not refer to a value in the data passed to +// Set. +func Merge(fps ...FieldPath) SetOption { + for _, fp := range fps { + if err := fp.validate(); err != nil { + return merge{err: err} + } + } + return merge{paths: fps} +} + +type merge struct { + all bool + paths []FieldPath + err error +} + +func (m merge) String() string { + if m.err != nil { + return fmt.Sprintf("", m.err) + } + if m.all { + return "MergeAll" + } + return fmt.Sprintf("Merge(%+v)", m.paths) +} + +func (m merge) fieldPaths() (fps []FieldPath, all bool, err error) { + if m.err != nil { + return nil, false, m.err + } + if err := checkNoDupOrPrefix(m.paths); err != nil { + return nil, false, err + } + if m.all { + return nil, true, nil + } + return m.paths, false, nil +} + +func processSetOptions(opts []SetOption) (fps []FieldPath, all bool, err error) { + switch len(opts) { + case 0: + return nil, false, nil + case 1: + return opts[0].fieldPaths() + default: + return nil, false, fmt.Errorf("conflicting options: %+v", opts) + } +} diff --git a/vendor/cloud.google.com/go/firestore/options_test.go b/vendor/cloud.google.com/go/firestore/options_test.go new file mode 100644 index 0000000..211557b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/options_test.go @@ -0,0 +1,151 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "testing" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" +) + +func TestProcessPreconditionsForVerify(t *testing.T) { + for _, test := range []struct { + in []Precondition + want *pb.Precondition + wantErr bool + }{ + { + in: nil, + want: nil, + }, + { + in: []Precondition{}, + want: nil, + }, + { + in: []Precondition{Exists}, + want: &pb.Precondition{&pb.Precondition_Exists{true}}, + }, + { + in: []Precondition{LastUpdateTime(aTime)}, + want: &pb.Precondition{&pb.Precondition_UpdateTime{aTimestamp}}, + }, + { + in: []Precondition{Exists, LastUpdateTime(aTime)}, + wantErr: true, + }, + { + in: []Precondition{Exists, Exists}, + wantErr: true, + }, + } { + got, err := processPreconditionsForVerify(test.in) + switch { + case test.wantErr && err == nil: + t.Errorf("%v: got nil, want error", test.in) + case !test.wantErr && err != nil: + t.Errorf("%v: got <%v>, want no error", test.in, err) + case !test.wantErr && err == nil && !testEqual(got, test.want): + t.Errorf("%v: got %+v, want %v", test.in, got, test.want) + } + } +} + +func TestProcessPreconditionsForDelete(t *testing.T) { + for _, test := range []struct { + in []Precondition + want *pb.Precondition + wantErr bool + }{ + { + in: nil, + want: nil, + }, + { + in: []Precondition{}, + want: nil, + }, + { + in: []Precondition{Exists}, + want: &pb.Precondition{&pb.Precondition_Exists{true}}, + }, + { + in: []Precondition{LastUpdateTime(aTime)}, + want: &pb.Precondition{&pb.Precondition_UpdateTime{aTimestamp}}, + }, + { + in: []Precondition{Exists, LastUpdateTime(aTime)}, + wantErr: true, + }, + { + in: []Precondition{Exists, Exists}, + wantErr: true, + }, + } { + got, err := processPreconditionsForDelete(test.in) + switch { + case test.wantErr && err == nil: + t.Errorf("%v: got nil, want error", test.in) + case !test.wantErr && err != nil: + t.Errorf("%v: got <%v>, want no error", test.in, err) + case !test.wantErr && err == nil && !testEqual(got, test.want): + t.Errorf("%v: got %+v, want %v", test.in, got, test.want) + } + } +} + +func TestProcessPreconditionsForUpdate(t *testing.T) { + for _, test := range []struct { + in []Precondition + want *pb.Precondition + wantErr bool + }{ + { + in: nil, + want: &pb.Precondition{&pb.Precondition_Exists{true}}, + }, + { + in: []Precondition{}, + want: &pb.Precondition{&pb.Precondition_Exists{true}}, + }, + + { + in: []Precondition{Exists}, + wantErr: true, + }, + { + in: []Precondition{LastUpdateTime(aTime)}, + want: &pb.Precondition{&pb.Precondition_UpdateTime{aTimestamp}}, + }, + { + in: []Precondition{Exists, LastUpdateTime(aTime)}, + wantErr: true, + }, + { + in: []Precondition{Exists, Exists}, + wantErr: true, + }, + } { + got, err := processPreconditionsForUpdate(test.in) + switch { + case test.wantErr && err == nil: + t.Errorf("%v: got nil, want error", test.in) + case !test.wantErr && err != nil: + t.Errorf("%v: got <%v>, want no error", test.in, err) + case !test.wantErr && err == nil && !testEqual(got, test.want): + t.Errorf("%v: got %+v, want %v", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/query.go b/vendor/cloud.google.com/go/firestore/query.go new file mode 100644 index 0000000..af9b7dc --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/query.go @@ -0,0 +1,463 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + + "golang.org/x/net/context" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/api/iterator" +) + +// Query represents a Firestore query. +// +// Query values are immutable. Each Query method creates +// a new Query; it does not modify the old. +type Query struct { + c *Client + parentPath string // path of the collection's parent + collectionID string + selection []FieldPath + filters []filter + orders []order + offset int32 + limit *wrappers.Int32Value + startVals, endVals []interface{} + startBefore, endBefore bool + err error +} + +// DocumentID is the special field name representing the ID of a document +// in queries. +const DocumentID = "__name__" + +// Select returns a new Query that specifies the paths +// to return from the result documents. +// Each path argument can be a single field or a dot-separated sequence of +// fields, and must not contain any of the runes "˜*/[]". +func (q Query) Select(paths ...string) Query { + var fps []FieldPath + for _, s := range paths { + fp, err := parseDotSeparatedString(s) + if err != nil { + q.err = err + return q + } + fps = append(fps, fp) + } + if fps == nil { + q.selection = []FieldPath{{DocumentID}} + } else { + q.selection = fps + } + return q +} + +// SelectPaths returns a new Query that specifies the field paths +// to return from the result documents. +func (q Query) SelectPaths(fieldPaths ...FieldPath) Query { + q.selection = fieldPaths + return q +} + +// Where returns a new Query that filters the set of results. +// A Query can have multiple filters. +// The path argument can be a single field or a dot-separated sequence of +// fields, and must not contain any of the runes "˜*/[]". +// The op argument must be one of "==", "<", "<=", ">" or ">=". +func (q Query) Where(path, op string, value interface{}) Query { + fp, err := parseDotSeparatedString(path) + if err != nil { + q.err = err + return q + } + q.filters = append(append([]filter(nil), q.filters...), filter{fp, op, value}) + return q +} + +// WherePath returns a new Query that filters the set of results. +// A Query can have multiple filters. +// The op argument must be one of "==", "<", "<=", ">" or ">=". +func (q Query) WherePath(fp FieldPath, op string, value interface{}) Query { + q.filters = append(append([]filter(nil), q.filters...), filter{fp, op, value}) + return q +} + +// Direction is the sort direction for result ordering. +type Direction int32 + +const ( + // Asc sorts results from smallest to largest. + Asc Direction = Direction(pb.StructuredQuery_ASCENDING) + + // Desc sorts results from largest to smallest. + Desc Direction = Direction(pb.StructuredQuery_DESCENDING) +) + +// OrderBy returns a new Query that specifies the order in which results are +// returned. A Query can have multiple OrderBy/OrderByPath specifications. OrderBy +// appends the specification to the list of existing ones. +// +// The path argument can be a single field or a dot-separated sequence of +// fields, and must not contain any of the runes "˜*/[]". +// +// To order by document name, use the special field path DocumentID. +func (q Query) OrderBy(path string, dir Direction) Query { + fp, err := parseDotSeparatedString(path) + if err != nil { + q.err = err + return q + } + q.orders = append(append([]order(nil), q.orders...), order{fp, dir}) + return q +} + +// OrderByPath returns a new Query that specifies the order in which results are +// returned. A Query can have multiple OrderBy/OrderByPath specifications. +// OrderByPath appends the specification to the list of existing ones. +func (q Query) OrderByPath(fp FieldPath, dir Direction) Query { + q.orders = append(append([]order(nil), q.orders...), order{fp, dir}) + return q +} + +// Offset returns a new Query that specifies the number of initial results to skip. +// It must not be negative. +func (q Query) Offset(n int) Query { + q.offset = trunc32(n) + return q +} + +// Limit returns a new Query that specifies the maximum number of results to return. +// It must not be negative. +func (q Query) Limit(n int) Query { + q.limit = &wrappers.Int32Value{trunc32(n)} + return q +} + +// StartAt returns a new Query that specifies that results should start at +// the document with the given field values. The field path corresponding to +// each value is taken from the corresponding OrderBy call. For example, in +// q.OrderBy("X", Asc).OrderBy("Y", Desc).StartAt(1, 2) +// results will begin at the first document where X = 1 and Y = 2. +// +// If an OrderBy call uses the special DocumentID field path, the corresponding value +// should be the document ID relative to the query's collection. For example, to +// start at the document "NewYork" in the "States" collection, write +// +// client.Collection("States").OrderBy(DocumentID, firestore.Asc).StartAt("NewYork") +// +// Calling StartAt overrides a previous call to StartAt or StartAfter. +func (q Query) StartAt(fieldValues ...interface{}) Query { + q.startVals, q.startBefore = fieldValues, true + return q +} + +// StartAfter returns a new Query that specifies that results should start just after +// the document with the given field values. See Query.StartAt for more information. +// +// Calling StartAfter overrides a previous call to StartAt or StartAfter. +func (q Query) StartAfter(fieldValues ...interface{}) Query { + q.startVals, q.startBefore = fieldValues, false + return q +} + +// EndAt returns a new Query that specifies that results should end at the +// document with the given field values. See Query.StartAt for more information. +// +// Calling EndAt overrides a previous call to EndAt or EndBefore. +func (q Query) EndAt(fieldValues ...interface{}) Query { + q.endVals, q.endBefore = fieldValues, false + return q +} + +// EndBefore returns a new Query that specifies that results should end just before +// the document with the given field values. See Query.StartAt for more information. +// +// Calling EndBefore overrides a previous call to EndAt or EndBefore. +func (q Query) EndBefore(fieldValues ...interface{}) Query { + q.endVals, q.endBefore = fieldValues, true + return q +} + +func (q Query) query() *Query { return &q } + +func (q Query) toProto() (*pb.StructuredQuery, error) { + if q.err != nil { + return nil, q.err + } + if q.collectionID == "" { + return nil, errors.New("firestore: query created without CollectionRef") + } + p := &pb.StructuredQuery{ + From: []*pb.StructuredQuery_CollectionSelector{{CollectionId: q.collectionID}}, + Offset: q.offset, + Limit: q.limit, + } + if len(q.selection) > 0 { + p.Select = &pb.StructuredQuery_Projection{} + for _, fp := range q.selection { + if err := fp.validate(); err != nil { + return nil, err + } + p.Select.Fields = append(p.Select.Fields, fref(fp)) + } + } + // If there is only filter, use it directly. Otherwise, construct + // a CompositeFilter. + if len(q.filters) == 1 { + pf, err := q.filters[0].toProto() + if err != nil { + return nil, err + } + p.Where = pf + } else if len(q.filters) > 1 { + cf := &pb.StructuredQuery_CompositeFilter{ + Op: pb.StructuredQuery_CompositeFilter_AND, + } + p.Where = &pb.StructuredQuery_Filter{ + FilterType: &pb.StructuredQuery_Filter_CompositeFilter{cf}, + } + for _, f := range q.filters { + pf, err := f.toProto() + if err != nil { + return nil, err + } + cf.Filters = append(cf.Filters, pf) + } + } + for _, ord := range q.orders { + po, err := ord.toProto() + if err != nil { + return nil, err + } + p.OrderBy = append(p.OrderBy, po) + } + // StartAt and EndAt must have values that correspond exactly to the explicit order-by fields. + if len(q.startVals) != 0 { + vals, err := q.toPositionValues(q.startVals) + if err != nil { + return nil, err + } + p.StartAt = &pb.Cursor{Values: vals, Before: q.startBefore} + } + if len(q.endVals) != 0 { + vals, err := q.toPositionValues(q.endVals) + if err != nil { + return nil, err + } + p.EndAt = &pb.Cursor{Values: vals, Before: q.endBefore} + } + return p, nil +} + +// toPositionValues converts the field values to protos. +func (q *Query) toPositionValues(fieldValues []interface{}) ([]*pb.Value, error) { + if len(fieldValues) != len(q.orders) { + return nil, errors.New("firestore: number of field values in StartAt/StartAfter/EndAt/EndBefore does not match number of OrderBy fields") + } + vals := make([]*pb.Value, len(fieldValues)) + var err error + for i, ord := range q.orders { + fval := fieldValues[i] + if len(ord.fieldPath) == 1 && ord.fieldPath[0] == DocumentID { + docID, ok := fval.(string) + if !ok { + return nil, fmt.Errorf("firestore: expected doc ID for DocumentID field, got %T", fval) + } + vals[i] = &pb.Value{&pb.Value_ReferenceValue{q.parentPath + "/documents/" + q.collectionID + "/" + docID}} + } else { + var sawTransform bool + vals[i], sawTransform, err = toProtoValue(reflect.ValueOf(fval)) + if err != nil { + return nil, err + } + if sawTransform { + return nil, errors.New("firestore: ServerTimestamp disallowed in query value") + } + } + } + return vals, nil +} + +type filter struct { + fieldPath FieldPath + op string + value interface{} +} + +func (f filter) toProto() (*pb.StructuredQuery_Filter, error) { + if err := f.fieldPath.validate(); err != nil { + return nil, err + } + var op pb.StructuredQuery_FieldFilter_Operator + switch f.op { + case "<": + op = pb.StructuredQuery_FieldFilter_LESS_THAN + case "<=": + op = pb.StructuredQuery_FieldFilter_LESS_THAN_OR_EQUAL + case ">": + op = pb.StructuredQuery_FieldFilter_GREATER_THAN + case ">=": + op = pb.StructuredQuery_FieldFilter_GREATER_THAN_OR_EQUAL + case "==": + op = pb.StructuredQuery_FieldFilter_EQUAL + default: + return nil, fmt.Errorf("firestore: invalid operator %q", f.op) + } + val, sawTransform, err := toProtoValue(reflect.ValueOf(f.value)) + if err != nil { + return nil, err + } + if sawTransform { + return nil, errors.New("firestore: ServerTimestamp disallowed in query value") + } + return &pb.StructuredQuery_Filter{ + FilterType: &pb.StructuredQuery_Filter_FieldFilter{ + &pb.StructuredQuery_FieldFilter{ + Field: fref(f.fieldPath), + Op: op, + Value: val, + }, + }, + }, nil +} + +type order struct { + fieldPath FieldPath + dir Direction +} + +func (r order) toProto() (*pb.StructuredQuery_Order, error) { + if err := r.fieldPath.validate(); err != nil { + return nil, err + } + return &pb.StructuredQuery_Order{ + Field: fref(r.fieldPath), + Direction: pb.StructuredQuery_Direction(r.dir), + }, nil +} + +func fref(fp FieldPath) *pb.StructuredQuery_FieldReference { + return &pb.StructuredQuery_FieldReference{fp.toServiceFieldPath()} +} + +func trunc32(i int) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +// Documents returns an iterator over the query's resulting documents. +func (q Query) Documents(ctx context.Context) *DocumentIterator { + return &DocumentIterator{ + ctx: withResourceHeader(ctx, q.c.path()), + q: &q, + err: checkTransaction(ctx), + } +} + +// DocumentIterator is an iterator over documents returned by a query. +type DocumentIterator struct { + ctx context.Context + q *Query + tid []byte // transaction ID, if any + streamClient pb.Firestore_RunQueryClient + err error +} + +// Next returns the next result. Its second return value is iterator.Done if there +// are no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *DocumentIterator) Next() (*DocumentSnapshot, error) { + if it.err != nil { + return nil, it.err + } + client := it.q.c + if it.streamClient == nil { + sq, err := it.q.toProto() + if err != nil { + it.err = err + return nil, err + } + req := &pb.RunQueryRequest{ + Parent: it.q.parentPath, + QueryType: &pb.RunQueryRequest_StructuredQuery{sq}, + } + if it.tid != nil { + req.ConsistencySelector = &pb.RunQueryRequest_Transaction{it.tid} + } + it.streamClient, it.err = client.c.RunQuery(it.ctx, req) + if it.err != nil { + return nil, it.err + } + } + var res *pb.RunQueryResponse + var err error + for { + res, err = it.streamClient.Recv() + if err == io.EOF { + err = iterator.Done + } + if err != nil { + it.err = err + return nil, it.err + } + if res.Document != nil { + break + } + // No document => partial progress; keep receiving. + } + docRef, err := pathToDoc(res.Document.Name, client) + if err != nil { + it.err = err + return nil, err + } + doc, err := newDocumentSnapshot(docRef, res.Document, client) + if err != nil { + it.err = err + return nil, err + } + return doc, nil +} + +// GetAll returns all the documents remaining from the iterator. +func (it *DocumentIterator) GetAll() ([]*DocumentSnapshot, error) { + var docs []*DocumentSnapshot + for { + doc, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, err + } + docs = append(docs, doc) + } + return docs, nil +} + +// TODO(jba): Does the iterator need a Stop or Close method? I don't think so-- +// I don't think the client can terminate a streaming receive except perhaps +// by cancelling the context, and the user can do that themselves if they wish. +// Find out for sure. diff --git a/vendor/cloud.google.com/go/firestore/query_test.go b/vendor/cloud.google.com/go/firestore/query_test.go new file mode 100644 index 0000000..0fea114 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/query_test.go @@ -0,0 +1,389 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "testing" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/pretty" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes/wrappers" +) + +func TestQueryToProto(t *testing.T) { + c := &Client{} + coll := c.Collection("C") + q := coll.Query + aFilter, err := filter{[]string{"a"}, ">", 5}.toProto() + if err != nil { + t.Fatal(err) + } + bFilter, err := filter{[]string{"b"}, "<", "foo"}.toProto() + if err != nil { + t.Fatal(err) + } + slashStarFilter, err := filter{[]string{"/", "*"}, ">", 5}.toProto() + if err != nil { + t.Fatal(err) + } + type S struct { + A int `firestore:"a"` + } + for _, test := range []struct { + in Query + want *pb.StructuredQuery + }{ + { + in: q.Select(), + want: &pb.StructuredQuery{ + Select: &pb.StructuredQuery_Projection{ + Fields: []*pb.StructuredQuery_FieldReference{fref1("__name__")}, + }, + }, + }, + { + in: q.Select("a", "b"), + want: &pb.StructuredQuery{ + Select: &pb.StructuredQuery_Projection{ + Fields: []*pb.StructuredQuery_FieldReference{fref1("a"), fref1("b")}, + }, + }, + }, + { + in: q.Select("a", "b").Select("c"), // last wins + want: &pb.StructuredQuery{ + Select: &pb.StructuredQuery_Projection{ + Fields: []*pb.StructuredQuery_FieldReference{fref1("c")}, + }, + }, + }, + { + in: q.SelectPaths([]string{"*"}, []string{"/"}), + want: &pb.StructuredQuery{ + Select: &pb.StructuredQuery_Projection{ + Fields: []*pb.StructuredQuery_FieldReference{fref1("*"), fref1("/")}, + }, + }, + }, + { + in: q.Where("a", ">", 5), + want: &pb.StructuredQuery{Where: aFilter}, + }, + { + in: q.Where("a", ">", 5).Where("b", "<", "foo"), + want: &pb.StructuredQuery{ + Where: &pb.StructuredQuery_Filter{ + &pb.StructuredQuery_Filter_CompositeFilter{ + &pb.StructuredQuery_CompositeFilter{ + Op: pb.StructuredQuery_CompositeFilter_AND, + Filters: []*pb.StructuredQuery_Filter{ + aFilter, bFilter, + }, + }, + }, + }, + }, + }, + { + in: q.WherePath([]string{"/", "*"}, ">", 5), + want: &pb.StructuredQuery{Where: slashStarFilter}, + }, + { + in: q.OrderBy("b", Asc).OrderBy("a", Desc).OrderByPath([]string{"~"}, Asc), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("b"), pb.StructuredQuery_ASCENDING}, + {fref1("a"), pb.StructuredQuery_DESCENDING}, + {fref1("~"), pb.StructuredQuery_ASCENDING}, + }, + }, + }, + { + in: q.Offset(2).Limit(3), + want: &pb.StructuredQuery{ + Offset: 2, + Limit: &wrappers.Int32Value{3}, + }, + }, + { + in: q.Offset(2).Limit(3).Limit(4).Offset(5), // last wins + want: &pb.StructuredQuery{ + Offset: 5, + Limit: &wrappers.Int32Value{4}, + }, + }, + { + in: q.OrderBy("a", Asc).StartAt(7).EndBefore(9), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7)}, + Before: true, + }, + EndAt: &pb.Cursor{ + Values: []*pb.Value{intval(9)}, + Before: true, + }, + }, + }, + { + in: q.OrderBy("a", Asc).StartAt(7).EndBefore(9), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7)}, + Before: true, + }, + EndAt: &pb.Cursor{ + Values: []*pb.Value{intval(9)}, + Before: true, + }, + }, + }, + { + in: q.OrderBy("a", Asc).StartAfter(7).EndAt(9), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7)}, + Before: false, + }, + EndAt: &pb.Cursor{ + Values: []*pb.Value{intval(9)}, + Before: false, + }, + }, + }, + { + in: q.OrderBy("a", Asc).OrderBy("b", Desc).StartAfter(7, 8).EndAt(9, 10), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + {fref1("b"), pb.StructuredQuery_DESCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(7), intval(8)}, + Before: false, + }, + EndAt: &pb.Cursor{ + Values: []*pb.Value{intval(9), intval(10)}, + Before: false, + }, + }, + }, + { + // last of StartAt/After wins, same for End + in: q.OrderBy("a", Asc). + StartAfter(1).StartAt(2). + EndAt(3).EndBefore(4), + want: &pb.StructuredQuery{ + OrderBy: []*pb.StructuredQuery_Order{ + {fref1("a"), pb.StructuredQuery_ASCENDING}, + }, + StartAt: &pb.Cursor{ + Values: []*pb.Value{intval(2)}, + Before: true, + }, + EndAt: &pb.Cursor{ + Values: []*pb.Value{intval(4)}, + Before: true, + }, + }, + }, + } { + got, err := test.in.toProto() + if err != nil { + t.Fatalf("%+v: %v", test.in, err) + } + test.want.From = []*pb.StructuredQuery_CollectionSelector{{CollectionId: "C"}} + if !testEqual(got, test.want) { + t.Errorf("%+v: got\n%v\nwant\n%v", test.in, pretty.Value(got), pretty.Value(test.want)) + } + } +} + +func fref1(s string) *pb.StructuredQuery_FieldReference { + return fref([]string{s}) +} + +func TestQueryToProtoErrors(t *testing.T) { + st := map[string]interface{}{"a": ServerTimestamp} + del := map[string]interface{}{"a": Delete} + q := (&Client{}).Collection("C").Query + for _, query := range []Query{ + Query{}, // no collection ID + q.Where("x", "!=", 1), // invalid operator + q.Where("~", ">", 1), // invalid path + q.WherePath([]string{"*", ""}, ">", 1), // invalid path + q.StartAt(1), // no OrderBy + q.StartAt(2).OrderBy("x", Asc).OrderBy("y", Desc), // wrong # OrderBy + q.Select("*"), // invalid path + q.SelectPaths([]string{"/", "", "~"}), // invalid path + q.OrderBy("[", Asc), // invalid path + q.OrderByPath([]string{""}, Desc), // invalid path + q.Where("x", "==", st), // ServerTimestamp in filter + q.OrderBy("a", Asc).StartAt(st), // ServerTimestamp in Start + q.OrderBy("a", Asc).EndAt(st), // ServerTimestamp in End + q.Where("x", "==", del), // Delete in filter + q.OrderBy("a", Asc).StartAt(del), // Delete in Start + q.OrderBy("a", Asc).EndAt(del), // Delete in End + } { + _, err := query.toProto() + if err == nil { + t.Errorf("%+v: got nil, want error", query) + } + } +} + +func TestQueryMethodsDoNotModifyReceiver(t *testing.T) { + var empty Query + + q := Query{} + _ = q.Select("a", "b") + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + + q = Query{} + q1 := q.Where("a", ">", 3) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + // Extra check because Where appends to a slice. + q1before := q.Where("a", ">", 3) // same as q1 + _ = q1.Where("b", "<", "foo") + if !testEqual(q1, q1before) { + t.Errorf("got %+v, want %+v", q1, q1before) + } + + q = Query{} + q1 = q.OrderBy("a", Asc) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + // Extra check because Where appends to a slice. + q1before = q.OrderBy("a", Asc) // same as q1 + _ = q1.OrderBy("b", Desc) + if !testEqual(q1, q1before) { + t.Errorf("got %+v, want %+v", q1, q1before) + } + + q = Query{} + _ = q.Offset(5) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + + q = Query{} + _ = q.Limit(5) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + + q = Query{} + _ = q.StartAt(5) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + + q = Query{} + _ = q.StartAfter(5) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + + q = Query{} + _ = q.EndAt(5) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } + + q = Query{} + _ = q.EndBefore(5) + if !testEqual(q, empty) { + t.Errorf("got %+v, want empty", q) + } +} + +func TestQueryFromCollectionRef(t *testing.T) { + c := &Client{} + coll := c.Collection("C") + got := coll.Select("x").Offset(8) + want := Query{ + c: c, + parentPath: c.path(), + collectionID: "C", + selection: []FieldPath{{"x"}}, + offset: 8, + } + if !testEqual(got, want) { + t.Fatalf("got %+v, want %+v", got, want) + } +} + +func TestQueryGetAll(t *testing.T) { + // This implicitly tests DocumentIterator as well. + const dbPath = "projects/projectID/databases/(default)" + ctx := context.Background() + c, srv := newMock(t) + docNames := []string{"C/a", "C/b"} + wantPBDocs := []*pb.Document{ + { + Name: dbPath + "/documents/" + docNames[0], + CreateTime: aTimestamp, + UpdateTime: aTimestamp, + Fields: map[string]*pb.Value{"f": intval(2)}, + }, + { + Name: dbPath + "/documents/" + docNames[1], + CreateTime: aTimestamp2, + UpdateTime: aTimestamp3, + Fields: map[string]*pb.Value{"f": intval(1)}, + }, + } + + srv.addRPC(nil, []interface{}{ + &pb.RunQueryResponse{Document: wantPBDocs[0]}, + &pb.RunQueryResponse{Document: wantPBDocs[1]}, + }) + gotDocs, err := c.Collection("C").Documents(ctx).GetAll() + if err != nil { + t.Fatal(err) + } + if got, want := len(gotDocs), len(wantPBDocs); got != want { + t.Errorf("got %d docs, wanted %d", got, want) + } + for i, got := range gotDocs { + want, err := newDocumentSnapshot(c.Doc(docNames[i]), wantPBDocs[i], c) + if err != nil { + t.Fatal(err) + } + if !testEqual(got, want) { + // avoid writing a cycle + got.c = nil + want.c = nil + t.Errorf("#%d: got %+v, want %+v", i, pretty.Value(got), pretty.Value(want)) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/testdata/Makefile b/vendor/cloud.google.com/go/firestore/testdata/Makefile new file mode 100644 index 0000000..499aa05 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/Makefile @@ -0,0 +1,11 @@ +# Copy textproto files in this directory from the source of truth. + +SRC=$(GOPATH)/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata + +.PHONY: refresh + +refresh: + -rm *.textproto + cp $(SRC)/*.textproto . + openssl dgst -sha1 $(SRC)/tests.binprotos > VERSION + diff --git a/vendor/cloud.google.com/go/firestore/testdata/VERSION b/vendor/cloud.google.com/go/firestore/testdata/VERSION new file mode 100644 index 0000000..e1149bb --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/VERSION @@ -0,0 +1 @@ +SHA1(/usr/local/google/home/jba/go/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata/tests.binprotos)= b0fbaaac8664945cb4f5667da092a6f9ededc57e diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-basic.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-basic.textproto new file mode 100644 index 0000000..433ffda --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-basic.textproto @@ -0,0 +1,27 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A simple call, resulting in a single update operation. + +description: "create: basic" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + current_document: < + exists: false + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-complex.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-complex.textproto new file mode 100644 index 0000000..00a994e --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-complex.textproto @@ -0,0 +1,61 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A call to a write method with complicated input data. + +description: "create: complex" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + array_value: < + values: < + integer_value: 1 + > + values: < + double_value: 2.5 + > + > + > + > + fields: < + key: "b" + value: < + map_value: < + fields: < + key: "c" + value: < + array_value: < + values: < + string_value: "three" + > + values: < + map_value: < + fields: < + key: "d" + value: < + boolean_value: true + > + > + > + > + > + > + > + > + > + > + > + current_document: < + exists: false + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-del-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-del-noarray-nested.textproto new file mode 100644 index 0000000..60694e1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-del-noarray-nested.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "create: Delete cannot be anywhere inside an array value" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, {\"b\": \"Delete\"}]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-del-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-del-noarray.textproto new file mode 100644 index 0000000..5731be1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-del-noarray.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "create: Delete cannot be in an array value" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2, \"Delete\"]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-empty.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-empty.textproto new file mode 100644 index 0000000..2b6fec7 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-empty.textproto @@ -0,0 +1,20 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + + +description: "create: creating or setting an empty map" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + > + current_document: < + exists: false + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-nodel.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-nodel.textproto new file mode 100644 index 0000000..c878814 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-nodel.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel cannot be used in Create, or in Set without a Merge option. + +description: "create: Delete cannot appear in data" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"Delete\"}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-nosplit.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-nosplit.textproto new file mode 100644 index 0000000..e9e1ee2 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-nosplit.textproto @@ -0,0 +1,40 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Create and Set treat their map keys literally. They do not split on dots. + +description: "create: don\342\200\231t split on dots" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{ \"a.b\": { \"c.d\": 1 }, \"e\": 2 }" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a.b" + value: < + map_value: < + fields: < + key: "c.d" + value: < + integer_value: 1 + > + > + > + > + > + fields: < + key: "e" + value: < + integer_value: 2 + > + > + > + current_document: < + exists: false + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-special-chars.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-special-chars.textproto new file mode 100644 index 0000000..3a7acd3 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-special-chars.textproto @@ -0,0 +1,41 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Create and Set treat their map keys literally. They do not escape special +# characters. + +description: "create: non-alpha characters in map keys" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{ \"*\": { \".\": 1 }, \"~\": 2 }" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "*" + value: < + map_value: < + fields: < + key: "." + value: < + integer_value: 1 + > + > + > + > + > + fields: < + key: "~" + value: < + integer_value: 2 + > + > + > + current_document: < + exists: false + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-st-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-st-alone.textproto new file mode 100644 index 0000000..9803a67 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-st-alone.textproto @@ -0,0 +1,26 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the only values in the input are ServerTimestamps, then no update operation +# should be produced. + +description: "create: ServerTimestamp alone" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "a" + set_to_server_value: REQUEST_TIME + > + > + current_document: < + exists: false + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-st-multi.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-st-multi.textproto new file mode 100644 index 0000000..cb3db48 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-st-multi.textproto @@ -0,0 +1,41 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A document can have more than one ServerTimestamp field. Since all the +# ServerTimestamp fields are removed, the only field in the update is "a". + +description: "create: multiple ServerTimestamp fields" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": {\"d\": \"ServerTimestamp\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + current_document: < + exists: false + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + field_transforms: < + field_path: "c.d" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-st-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-st-nested.textproto new file mode 100644 index 0000000..6bc03e8 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-st-nested.textproto @@ -0,0 +1,38 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A ServerTimestamp value can occur at any depth. In this case, the transform +# applies to the field path "b.c". Since "c" is removed from the update, "b" +# becomes empty, so it is also removed from the update. + +description: "create: nested ServerTimestamp field" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": {\"c\": \"ServerTimestamp\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + current_document: < + exists: false + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b.c" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-st-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-st-noarray-nested.textproto new file mode 100644 index 0000000..0cec0ae --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-st-noarray-nested.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# There cannot be an array value anywhere on the path from the document root to +# the ServerTimestamp sentinel. Firestore transforms don't support array indexing. + +description: "create: ServerTimestamp cannot be anywhere inside an array value" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, {\"b\": \"ServerTimestamp\"}]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-st-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-st-noarray.textproto new file mode 100644 index 0000000..56d91c2 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-st-noarray.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The ServerTimestamp sentinel must be the value of a field. Firestore transforms +# don't support array indexing. + +description: "create: ServerTimestamp cannot be in an array value" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2, \"ServerTimestamp\"]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/create-st.textproto b/vendor/cloud.google.com/go/firestore/testdata/create-st.textproto new file mode 100644 index 0000000..ddfc6a1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/create-st.textproto @@ -0,0 +1,39 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A key with the special ServerTimestamp sentinel is removed from the data in the +# update operation. Instead it appears in a separate Transform operation. Note +# that in these tests, the string "ServerTimestamp" should be replaced with the +# special ServerTimestamp value. + +description: "create: ServerTimestamp with data" +create: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + current_document: < + exists: false + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/delete-exists-precond.textproto b/vendor/cloud.google.com/go/firestore/testdata/delete-exists-precond.textproto new file mode 100644 index 0000000..c9cf2dd --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/delete-exists-precond.textproto @@ -0,0 +1,21 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Delete supports an exists precondition. + +description: "delete: delete with exists precondition" +delete: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + precondition: < + exists: true + > + request: < + database: "projects/projectID/databases/(default)" + writes: < + delete: "projects/projectID/databases/(default)/documents/C/d" + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/delete-no-precond.textproto b/vendor/cloud.google.com/go/firestore/testdata/delete-no-precond.textproto new file mode 100644 index 0000000..a396cdb --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/delete-no-precond.textproto @@ -0,0 +1,15 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# An ordinary Delete call. + +description: "delete: delete without precondition" +delete: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + request: < + database: "projects/projectID/databases/(default)" + writes: < + delete: "projects/projectID/databases/(default)/documents/C/d" + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/delete-time-precond.textproto b/vendor/cloud.google.com/go/firestore/testdata/delete-time-precond.textproto new file mode 100644 index 0000000..5798f5f --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/delete-time-precond.textproto @@ -0,0 +1,25 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Delete supports a last-update-time precondition. + +description: "delete: delete with last-update-time precondition" +delete: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + precondition: < + update_time: < + seconds: 42 + > + > + request: < + database: "projects/projectID/databases/(default)" + writes: < + delete: "projects/projectID/databases/(default)/documents/C/d" + current_document: < + update_time: < + seconds: 42 + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/get-basic.textproto b/vendor/cloud.google.com/go/firestore/testdata/get-basic.textproto new file mode 100644 index 0000000..2a44816 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/get-basic.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A call to DocumentRef.Get. + +description: "get: get a document" +get: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + request: < + name: "projects/projectID/databases/(default)/documents/C/d" + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-basic.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-basic.textproto new file mode 100644 index 0000000..e9b292e --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-basic.textproto @@ -0,0 +1,24 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A simple call, resulting in a single update operation. + +description: "set: basic" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-complex.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-complex.textproto new file mode 100644 index 0000000..6ec1950 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-complex.textproto @@ -0,0 +1,58 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A call to a write method with complicated input data. + +description: "set: complex" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + array_value: < + values: < + integer_value: 1 + > + values: < + double_value: 2.5 + > + > + > + > + fields: < + key: "b" + value: < + map_value: < + fields: < + key: "c" + value: < + array_value: < + values: < + string_value: "three" + > + values: < + map_value: < + fields: < + key: "d" + value: < + boolean_value: true + > + > + > + > + > + > + > + > + > + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-del-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-del-noarray-nested.textproto new file mode 100644 index 0000000..bbf6a3d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-del-noarray-nested.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "set: Delete cannot be anywhere inside an array value" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, {\"b\": \"Delete\"}]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-del-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-del-noarray.textproto new file mode 100644 index 0000000..07fc649 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-del-noarray.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "set: Delete cannot be in an array value" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2, \"Delete\"]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-del-nomerge.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-del-nomerge.textproto new file mode 100644 index 0000000..cb6ef4f --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-del-nomerge.textproto @@ -0,0 +1,17 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The client signals an error if the Delete sentinel is in the input data, but not +# selected by a merge option, because this is most likely a programming bug. + +description: "set-merge: Delete cannot appear in an unmerged field" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "a" + > + > + json_data: "{\"a\": 1, \"b\": \"Delete\"}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-empty.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-empty.textproto new file mode 100644 index 0000000..c2b73d3 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-empty.textproto @@ -0,0 +1,17 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + + +description: "set: creating or setting an empty map" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-merge-fp.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-merge-fp.textproto new file mode 100644 index 0000000..68690f6 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-merge-fp.textproto @@ -0,0 +1,40 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A merge with fields that use special characters. + +description: "set-merge: Merge with FieldPaths" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "*" + field: "~" + > + > + json_data: "{\"*\": {\"~\": true}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "*" + value: < + map_value: < + fields: < + key: "~" + value: < + boolean_value: true + > + > + > + > + > + > + update_mask: < + field_paths: "`*`.`~`" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-merge-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-merge-nested.textproto new file mode 100644 index 0000000..0d12828 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-merge-nested.textproto @@ -0,0 +1,41 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A merge option where the field is not at top level. Only fields mentioned in the +# option are present in the update operation. + +description: "set-merge: Merge with a nested field" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "h" + field: "g" + > + > + json_data: "{\"h\": {\"g\": 4, \"f\": 5}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "h" + value: < + map_value: < + fields: < + key: "g" + value: < + integer_value: 4 + > + > + > + > + > + > + update_mask: < + field_paths: "h.g" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-merge-nonleaf.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-merge-nonleaf.textproto new file mode 100644 index 0000000..d1990e7 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-merge-nonleaf.textproto @@ -0,0 +1,46 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a field path is in a merge option, the value at that path replaces the stored +# value. That is true even if the value is complex. + +description: "set-merge: Merge field is not a leaf" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "h" + > + > + json_data: "{\"h\": {\"g\": 5, \"f\": 6}, \"e\": 7}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "h" + value: < + map_value: < + fields: < + key: "f" + value: < + integer_value: 6 + > + > + fields: < + key: "g" + value: < + integer_value: 5 + > + > + > + > + > + > + update_mask: < + field_paths: "h" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-merge-nowrite.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-merge-nowrite.textproto new file mode 100644 index 0000000..3222230 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-merge-nowrite.textproto @@ -0,0 +1,28 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If all the fields in the merge option have ServerTimestamp values, then no +# update operation is produced, only a transform. + +description: "set-merge: If no ordinary values in Merge, no write" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "b" + > + > + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-merge-present.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-merge-present.textproto new file mode 100644 index 0000000..f6665de --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-merge-present.textproto @@ -0,0 +1,20 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The client signals an error if a merge option mentions a path that is not in the +# input data. + +description: "set-merge: Merge fields must all be present in data" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "b" + > + fields: < + field: "a" + > + > + json_data: "{\"a\": 1}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-merge.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-merge.textproto new file mode 100644 index 0000000..2791252 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-merge.textproto @@ -0,0 +1,32 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Fields in the input data but not in a merge option are pruned. + +description: "set-merge: Merge with a field" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "a" + > + > + json_data: "{\"a\": 1, \"b\": 2}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-mergeall-empty.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-mergeall-empty.textproto new file mode 100644 index 0000000..1c6615b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-mergeall-empty.textproto @@ -0,0 +1,15 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# It makes no sense to specify MergeAll and provide no data, so we disallow it on +# the client. + +description: "set: MergeAll cannot be specified with empty data." +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + all: true + > + json_data: "{}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-mergeall-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-mergeall-nested.textproto new file mode 100644 index 0000000..1fbc697 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-mergeall-nested.textproto @@ -0,0 +1,45 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# MergeAll with nested fields results in an update mask that includes entries for +# all the leaf fields. + +description: "set: MergeAll with nested fields" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + all: true + > + json_data: "{\"h\": { \"g\": 3, \"f\": 4 }}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "h" + value: < + map_value: < + fields: < + key: "f" + value: < + integer_value: 4 + > + > + fields: < + key: "g" + value: < + integer_value: 3 + > + > + > + > + > + > + update_mask: < + field_paths: "h.f" + field_paths: "h.g" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-mergeall.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-mergeall.textproto new file mode 100644 index 0000000..cb2ebc5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-mergeall.textproto @@ -0,0 +1,37 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The MergeAll option with a simple piece of data. + +description: "set: MergeAll" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + all: true + > + json_data: "{\"a\": 1, \"b\": 2}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + fields: < + key: "b" + value: < + integer_value: 2 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-nodel.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-nodel.textproto new file mode 100644 index 0000000..0fb887d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-nodel.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel cannot be used in Create, or in Set without a Merge option. + +description: "set: Delete cannot appear in data" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"Delete\"}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-nosplit.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-nosplit.textproto new file mode 100644 index 0000000..0ff3fad --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-nosplit.textproto @@ -0,0 +1,37 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Create and Set treat their map keys literally. They do not split on dots. + +description: "set: don\342\200\231t split on dots" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{ \"a.b\": { \"c.d\": 1 }, \"e\": 2 }" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a.b" + value: < + map_value: < + fields: < + key: "c.d" + value: < + integer_value: 1 + > + > + > + > + > + fields: < + key: "e" + value: < + integer_value: 2 + > + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-special-chars.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-special-chars.textproto new file mode 100644 index 0000000..f4122c9 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-special-chars.textproto @@ -0,0 +1,38 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Create and Set treat their map keys literally. They do not escape special +# characters. + +description: "set: non-alpha characters in map keys" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{ \"*\": { \".\": 1 }, \"~\": 2 }" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "*" + value: < + map_value: < + fields: < + key: "." + value: < + integer_value: 1 + > + > + > + > + > + fields: < + key: "~" + value: < + integer_value: 2 + > + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-alone-mergeall.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-alone-mergeall.textproto new file mode 100644 index 0000000..16ce4cf --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-alone-mergeall.textproto @@ -0,0 +1,26 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the only values in the input are ServerTimestamps, then no update operation +# should be produced. + +description: "set: ServerTimestamp alone with MergeAll" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + all: true + > + json_data: "{\"a\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "a" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-alone.textproto new file mode 100644 index 0000000..6ce46d7 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-alone.textproto @@ -0,0 +1,28 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the only values in the input are ServerTimestamps, then an update operation +# with an empty map should be produced. + +description: "set: ServerTimestamp alone" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "a" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-both.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-both.textproto new file mode 100644 index 0000000..5cc7bbc --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-merge-both.textproto @@ -0,0 +1,45 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Just as when no merge option is specified, ServerTimestamp sentinel values are +# removed from the data in the update operation and become transforms. + +description: "set-merge: ServerTimestamp with Merge of both fields" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "a" + > + fields: < + field: "b" + > + > + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-mergeall.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-mergeall.textproto new file mode 100644 index 0000000..b8c53a5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-mergeall.textproto @@ -0,0 +1,40 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Just as when no merge option is specified, ServerTimestamp sentinel values are +# removed from the data in the update operation and become transforms. + +description: "set: ServerTimestamp with MergeAll" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + all: true + > + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-multi.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-multi.textproto new file mode 100644 index 0000000..375ec18 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-multi.textproto @@ -0,0 +1,38 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A document can have more than one ServerTimestamp field. Since all the +# ServerTimestamp fields are removed, the only field in the update is "a". + +description: "set: multiple ServerTimestamp fields" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": {\"d\": \"ServerTimestamp\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + field_transforms: < + field_path: "c.d" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-nested.textproto new file mode 100644 index 0000000..abfd2e8 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-nested.textproto @@ -0,0 +1,35 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A ServerTimestamp value can occur at any depth. In this case, the transform +# applies to the field path "b.c". Since "c" is removed from the update, "b" +# becomes empty, so it is also removed from the update. + +description: "set: nested ServerTimestamp field" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": {\"c\": \"ServerTimestamp\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b.c" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-noarray-nested.textproto new file mode 100644 index 0000000..241d791 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-noarray-nested.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# There cannot be an array value anywhere on the path from the document root to +# the ServerTimestamp sentinel. Firestore transforms don't support array indexing. + +description: "set: ServerTimestamp cannot be anywhere inside an array value" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, {\"b\": \"ServerTimestamp\"}]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-noarray.textproto new file mode 100644 index 0000000..591fb03 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-noarray.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The ServerTimestamp sentinel must be the value of a field. Firestore transforms +# don't support array indexing. + +description: "set: ServerTimestamp cannot be in an array value" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2, \"ServerTimestamp\"]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st-nomerge.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st-nomerge.textproto new file mode 100644 index 0000000..20c0ae1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st-nomerge.textproto @@ -0,0 +1,33 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the ServerTimestamp value is not mentioned in a merge option, then it is +# pruned from the data but does not result in a transform. + +description: "set-merge: If is ServerTimestamp not in Merge, no transform" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + option: < + fields: < + field: "a" + > + > + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/set-st.textproto b/vendor/cloud.google.com/go/firestore/testdata/set-st.textproto new file mode 100644 index 0000000..8bceddc --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/set-st.textproto @@ -0,0 +1,36 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A key with the special ServerTimestamp sentinel is removed from the data in the +# update operation. Instead it appears in a separate Transform operation. Note +# that in these tests, the string "ServerTimestamp" should be replaced with the +# special ServerTimestamp value. + +description: "set: ServerTimestamp with data" +set: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-badchar.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-badchar.textproto new file mode 100644 index 0000000..656ff53 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-badchar.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The keys of the data given to Update are interpreted, unlike those of Create and +# Set. They cannot contain special characters. + +description: "update: invalid character" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a~b\": 1}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-basic.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-basic.textproto new file mode 100644 index 0000000..9da316f --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-basic.textproto @@ -0,0 +1,30 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A simple call, resulting in a single update operation. + +description: "update: basic" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-complex.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-complex.textproto new file mode 100644 index 0000000..1a6d9ef --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-complex.textproto @@ -0,0 +1,65 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A call to a write method with complicated input data. + +description: "update: complex" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + array_value: < + values: < + integer_value: 1 + > + values: < + double_value: 2.5 + > + > + > + > + fields: < + key: "b" + value: < + map_value: < + fields: < + key: "c" + value: < + array_value: < + values: < + string_value: "three" + > + values: < + map_value: < + fields: < + key: "d" + value: < + boolean_value: true + > + > + > + > + > + > + > + > + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-del-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-del-alone.textproto new file mode 100644 index 0000000..8f55823 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-del-alone.textproto @@ -0,0 +1,25 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the input data consists solely of Deletes, then the update operation has no +# map, just an update mask. + +description: "update: Delete alone" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": \"Delete\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + > + update_mask: < + field_paths: "a" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-del-dot.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-del-dot.textproto new file mode 100644 index 0000000..c0ebdf6 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-del-dot.textproto @@ -0,0 +1,46 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# After expanding top-level dotted fields, fields with Delete values are pruned +# from the output data, but appear in the update mask. + +description: "update: Delete with a dotted field" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b.c\": \"Delete\", \"b.d\": 2}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + fields: < + key: "b" + value: < + map_value: < + fields: < + key: "d" + value: < + integer_value: 2 + > + > + > + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b.c" + field_paths: "b.d" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-del-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-del-nested.textproto new file mode 100644 index 0000000..ed10269 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-del-nested.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a top-level key. + +description: "update: Delete cannot be nested" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": {\"b\": \"Delete\"}}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-del-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-del-noarray-nested.textproto new file mode 100644 index 0000000..a2eec49 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-del-noarray-nested.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "update: Delete cannot be anywhere inside an array value" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, {\"b\": \"Delete\"}]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-del-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-del-noarray.textproto new file mode 100644 index 0000000..a7eea87 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-del-noarray.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "update: Delete cannot be in an array value" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2, \"Delete\"]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-del.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-del.textproto new file mode 100644 index 0000000..ec443e6 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-del.textproto @@ -0,0 +1,32 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a field's value is the Delete sentinel, then it doesn't appear in the update +# data, but does in the mask. + +description: "update: Delete" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"Delete\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-exists-precond.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-exists-precond.textproto new file mode 100644 index 0000000..3c6fef4 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-exists-precond.textproto @@ -0,0 +1,14 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Update method does not support an explicit exists precondition. + +description: "update: Exists precondition is invalid" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + precondition: < + exists: true + > + json_data: "{\"a\": 1}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-fp-empty-component.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-fp-empty-component.textproto new file mode 100644 index 0000000..c3bceff --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-fp-empty-component.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Empty fields are not allowed. + +description: "update: empty field path component" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a..b\": 1}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-no-paths.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-no-paths.textproto new file mode 100644 index 0000000..b524b74 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-no-paths.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# It is a client-side error to call Update with empty data. + +description: "update: no paths" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-basic.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-basic.textproto new file mode 100644 index 0000000..515f29d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-basic.textproto @@ -0,0 +1,33 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A simple call, resulting in a single update operation. + +description: "update-paths: basic" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "1" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-complex.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-complex.textproto new file mode 100644 index 0000000..38a8322 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-complex.textproto @@ -0,0 +1,72 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A call to a write method with complicated input data. + +description: "update-paths: complex" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "b" + > + json_values: "[1, 2.5]" + json_values: "{\"c\": [\"three\", {\"d\": true}]}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + array_value: < + values: < + integer_value: 1 + > + values: < + double_value: 2.5 + > + > + > + > + fields: < + key: "b" + value: < + map_value: < + fields: < + key: "c" + value: < + array_value: < + values: < + string_value: "three" + > + values: < + map_value: < + fields: < + key: "d" + value: < + boolean_value: true + > + > + > + > + > + > + > + > + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-alone.textproto new file mode 100644 index 0000000..5dbb787 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-alone.textproto @@ -0,0 +1,28 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the input data consists solely of Deletes, then the update operation has no +# map, just an update mask. + +description: "update-paths: Delete alone" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "\"Delete\"" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + > + update_mask: < + field_paths: "a" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-nested.textproto new file mode 100644 index 0000000..bdf65fb --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-nested.textproto @@ -0,0 +1,14 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a top-level key. + +description: "update-paths: Delete cannot be nested" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "{\"b\": \"Delete\"}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray-nested.textproto new file mode 100644 index 0000000..d3da15d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray-nested.textproto @@ -0,0 +1,16 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "update-paths: Delete cannot be anywhere inside an array value" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "[1, {\"b\": \"Delete\"}]" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray.textproto new file mode 100644 index 0000000..9ebdd09 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del-noarray.textproto @@ -0,0 +1,16 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Delete sentinel must be the value of a field. Deletes are implemented by +# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not +# support array indexing. + +description: "update-paths: Delete cannot be in an array value" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "[1, 2, \"Delete\"]" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-del.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del.textproto new file mode 100644 index 0000000..5197a78 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-del.textproto @@ -0,0 +1,39 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If a field's value is the Delete sentinel, then it doesn't appear in the update +# data, but does in the mask. + +description: "update-paths: Delete" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "b" + > + json_values: "1" + json_values: "\"Delete\"" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-exists-precond.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-exists-precond.textproto new file mode 100644 index 0000000..084e077 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-exists-precond.textproto @@ -0,0 +1,17 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Update method does not support an explicit exists precondition. + +description: "update-paths: Exists precondition is invalid" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + precondition: < + exists: true + > + field_paths: < + field: "a" + > + json_values: "1" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-dup.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-dup.textproto new file mode 100644 index 0000000..fedbd3a --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-dup.textproto @@ -0,0 +1,22 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The same field cannot occur more than once. + +description: "update-paths: duplicate field path" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "b" + > + field_paths: < + field: "a" + > + json_values: "1" + json_values: "2" + json_values: "3" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty-component.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty-component.textproto new file mode 100644 index 0000000..7a5df25 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty-component.textproto @@ -0,0 +1,15 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Empty fields are not allowed. + +description: "update-paths: empty field path component" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "*" + field: "" + > + json_values: "1" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty.textproto new file mode 100644 index 0000000..311e309 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-empty.textproto @@ -0,0 +1,13 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A FieldPath of length zero is invalid. + +description: "update-paths: empty field path" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + > + json_values: "1" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-multi.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-multi.textproto new file mode 100644 index 0000000..9ba41e3 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-multi.textproto @@ -0,0 +1,42 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The UpdatePaths or equivalent method takes a list of FieldPaths. Each FieldPath +# is a sequence of uninterpreted path components. + +description: "update-paths: multiple-element field path" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + field: "b" + > + json_values: "1" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + map_value: < + fields: < + key: "b" + value: < + integer_value: 1 + > + > + > + > + > + > + update_mask: < + field_paths: "a.b" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-nosplit.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-nosplit.textproto new file mode 100644 index 0000000..5164952 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-fp-nosplit.textproto @@ -0,0 +1,48 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# FieldPath components are not split on dots. + +description: "update-paths: FieldPath elements are not split on dots" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a.b" + field: "f.g" + > + json_values: "{\"n.o\": 7}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a.b" + value: < + map_value: < + fields: < + key: "f.g" + value: < + map_value: < + fields: < + key: "n.o" + value: < + integer_value: 7 + > + > + > + > + > + > + > + > + > + update_mask: < + field_paths: "`a.b`.`f.g`" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-no-paths.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-no-paths.textproto new file mode 100644 index 0000000..d9939dc --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-no-paths.textproto @@ -0,0 +1,10 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# It is a client-side error to call Update with empty data. + +description: "update-paths: no paths" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-1.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-1.textproto new file mode 100644 index 0000000..1710b91 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-1.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In the input data, one field cannot be a prefix of another. + +description: "update-paths: prefix #1" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + field: "b" + > + field_paths: < + field: "a" + > + json_values: "1" + json_values: "2" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-2.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-2.textproto new file mode 100644 index 0000000..be78ab5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-2.textproto @@ -0,0 +1,19 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In the input data, one field cannot be a prefix of another. + +description: "update-paths: prefix #2" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "a" + field: "b" + > + json_values: "1" + json_values: "2" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-3.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-3.textproto new file mode 100644 index 0000000..b8a84c9 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-prefix-3.textproto @@ -0,0 +1,20 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In the input data, one field cannot be a prefix of another, even if the values +# could in principle be combined. + +description: "update-paths: prefix #3" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "a" + field: "d" + > + json_values: "{\"b\": 1}" + json_values: "2" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-special-chars.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-special-chars.textproto new file mode 100644 index 0000000..51cb33b --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-special-chars.textproto @@ -0,0 +1,53 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# FieldPaths can contain special characters. + +description: "update-paths: special characters" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "*" + field: "~" + > + field_paths: < + field: "*" + field: "`" + > + json_values: "1" + json_values: "2" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "*" + value: < + map_value: < + fields: < + key: "`" + value: < + integer_value: 2 + > + > + fields: < + key: "~" + value: < + integer_value: 1 + > + > + > + > + > + > + update_mask: < + field_paths: "`*`.`\\``" + field_paths: "`*`.`~`" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-alone.textproto new file mode 100644 index 0000000..abc44f5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-alone.textproto @@ -0,0 +1,29 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the only values in the input are ServerTimestamps, then no update operation +# should be produced. + +description: "update-paths: ServerTimestamp alone" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "\"ServerTimestamp\"" + request: < + database: "projects/projectID/databases/(default)" + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "a" + set_to_server_value: REQUEST_TIME + > + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-multi.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-multi.textproto new file mode 100644 index 0000000..b0b7df1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-multi.textproto @@ -0,0 +1,56 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A document can have more than one ServerTimestamp field. Since all the +# ServerTimestamp fields are removed, the only field in the update is "a". + +description: "update-paths: multiple ServerTimestamp fields" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "b" + > + field_paths: < + field: "c" + > + json_values: "1" + json_values: "\"ServerTimestamp\"" + json_values: "{\"d\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "c" + > + current_document: < + exists: true + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + field_transforms: < + field_path: "c.d" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-nested.textproto new file mode 100644 index 0000000..3077368 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-nested.textproto @@ -0,0 +1,49 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A ServerTimestamp value can occur at any depth. In this case, the transform +# applies to the field path "b.c". Since "c" is removed from the update, "b" +# becomes empty, so it is also removed from the update. + +description: "update-paths: nested ServerTimestamp field" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "b" + > + json_values: "1" + json_values: "{\"c\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + current_document: < + exists: true + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b.c" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray-nested.textproto new file mode 100644 index 0000000..2c2cb89 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray-nested.textproto @@ -0,0 +1,15 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# There cannot be an array value anywhere on the path from the document root to +# the ServerTimestamp sentinel. Firestore transforms don't support array indexing. + +description: "update-paths: ServerTimestamp cannot be anywhere inside an array value" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "[1, {\"b\": \"ServerTimestamp\"}]" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray.textproto new file mode 100644 index 0000000..a2baa66 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st-noarray.textproto @@ -0,0 +1,15 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The ServerTimestamp sentinel must be the value of a field. Firestore transforms +# don't support array indexing. + +description: "update-paths: ServerTimestamp cannot be in an array value" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + json_values: "[1, 2, \"ServerTimestamp\"]" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-st.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st.textproto new file mode 100644 index 0000000..40634c1 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-st.textproto @@ -0,0 +1,49 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A key with the special ServerTimestamp sentinel is removed from the data in the +# update operation. Instead it appears in a separate Transform operation. Note +# that in these tests, the string "ServerTimestamp" should be replaced with the +# special ServerTimestamp value. + +description: "update-paths: ServerTimestamp with data" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + field_paths: < + field: "a" + > + field_paths: < + field: "b" + > + json_values: "1" + json_values: "\"ServerTimestamp\"" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + current_document: < + exists: true + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-paths-uptime.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-paths-uptime.textproto new file mode 100644 index 0000000..7a15874 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-paths-uptime.textproto @@ -0,0 +1,40 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Update call supports a last-update-time precondition. + +description: "update-paths: last-update-time precondition" +update_paths: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + precondition: < + update_time: < + seconds: 42 + > + > + field_paths: < + field: "a" + > + json_values: "1" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + current_document: < + update_time: < + seconds: 42 + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-prefix-1.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-prefix-1.textproto new file mode 100644 index 0000000..e5c895e --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-prefix-1.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In the input data, one field cannot be a prefix of another. + +description: "update: prefix #1" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a.b\": 1, \"a\": 2}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-prefix-2.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-prefix-2.textproto new file mode 100644 index 0000000..4870176 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-prefix-2.textproto @@ -0,0 +1,11 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In the input data, one field cannot be a prefix of another. + +description: "update: prefix #2" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"a.b\": 2}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-prefix-3.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-prefix-3.textproto new file mode 100644 index 0000000..0c03b0d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-prefix-3.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In the input data, one field cannot be a prefix of another, even if the values +# could in principle be combined. + +description: "update: prefix #3" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": {\"b\": 1}, \"a.d\": 2}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-quoting.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-quoting.textproto new file mode 100644 index 0000000..20e530a --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-quoting.textproto @@ -0,0 +1,45 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# In a field path, any component beginning with a non-letter or underscore is +# quoted. + +description: "update: non-letter starting chars are quoted, except underscore" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"_0.1.+2\": 1}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "_0" + value: < + map_value: < + fields: < + key: "1" + value: < + map_value: < + fields: < + key: "+2" + value: < + integer_value: 1 + > + > + > + > + > + > + > + > + > + update_mask: < + field_paths: "_0.`1`.`+2`" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-split-top-level.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-split-top-level.textproto new file mode 100644 index 0000000..d1b0ca0 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-split-top-level.textproto @@ -0,0 +1,45 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Update method splits only top-level keys at dots. Keys at other levels are +# taken literally. + +description: "update: Split on dots for top-level keys only" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"h.g\": {\"j.k\": 6}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "h" + value: < + map_value: < + fields: < + key: "g" + value: < + map_value: < + fields: < + key: "j.k" + value: < + integer_value: 6 + > + > + > + > + > + > + > + > + > + update_mask: < + field_paths: "h.g" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-split.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-split.textproto new file mode 100644 index 0000000..b96fd6a --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-split.textproto @@ -0,0 +1,44 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Update method splits top-level keys at dots. + +description: "update: split on dots" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a.b.c\": 1}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + map_value: < + fields: < + key: "b" + value: < + map_value: < + fields: < + key: "c" + value: < + integer_value: 1 + > + > + > + > + > + > + > + > + > + update_mask: < + field_paths: "a.b.c" + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st-alone.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st-alone.textproto new file mode 100644 index 0000000..0d5ab6e --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st-alone.textproto @@ -0,0 +1,26 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# If the only values in the input are ServerTimestamps, then no update operation +# should be produced. + +description: "update: ServerTimestamp alone" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "a" + set_to_server_value: REQUEST_TIME + > + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st-dot.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st-dot.textproto new file mode 100644 index 0000000..19d4d18 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st-dot.textproto @@ -0,0 +1,27 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# Like other uses of ServerTimestamp, the data is pruned and the field does not +# appear in the update mask, because it is in the transform. In this case An +# update operation is produced just to hold the precondition. + +description: "update: ServerTimestamp with dotted field" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a.b.c\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "a.b.c" + set_to_server_value: REQUEST_TIME + > + > + current_document: < + exists: true + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st-multi.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st-multi.textproto new file mode 100644 index 0000000..0434cb5 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st-multi.textproto @@ -0,0 +1,49 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A document can have more than one ServerTimestamp field. Since all the +# ServerTimestamp fields are removed, the only field in the update is "a". + +# b is not in the mask because it will be set in the transform. c must be in the +# mask: it should be replaced entirely. The transform will set c.d to the +# timestamp, but the update will delete the rest of c. + +description: "update: multiple ServerTimestamp fields" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\", \"c\": {\"d\": \"ServerTimestamp\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "c" + > + current_document: < + exists: true + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + field_transforms: < + field_path: "c.d" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st-nested.textproto new file mode 100644 index 0000000..f79d9c6 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st-nested.textproto @@ -0,0 +1,42 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A ServerTimestamp value can occur at any depth. In this case, the transform +# applies to the field path "b.c". Since "c" is removed from the update, "b" +# becomes empty, so it is also removed from the update. + +description: "update: nested ServerTimestamp field" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": {\"c\": \"ServerTimestamp\"}}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + field_paths: "b" + > + current_document: < + exists: true + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b.c" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st-noarray-nested.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st-noarray-nested.textproto new file mode 100644 index 0000000..2939dd6 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st-noarray-nested.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# There cannot be an array value anywhere on the path from the document root to +# the ServerTimestamp sentinel. Firestore transforms don't support array indexing. + +description: "update: ServerTimestamp cannot be anywhere inside an array value" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, {\"b\": \"ServerTimestamp\"}]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st-noarray.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st-noarray.textproto new file mode 100644 index 0000000..f3879cd --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st-noarray.textproto @@ -0,0 +1,12 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The ServerTimestamp sentinel must be the value of a field. Firestore transforms +# don't support array indexing. + +description: "update: ServerTimestamp cannot be in an array value" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": [1, 2, \"ServerTimestamp\"]}" + is_error: true +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-st.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-st.textproto new file mode 100644 index 0000000..12045a9 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-st.textproto @@ -0,0 +1,42 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# A key with the special ServerTimestamp sentinel is removed from the data in the +# update operation. Instead it appears in a separate Transform operation. Note +# that in these tests, the string "ServerTimestamp" should be replaced with the +# special ServerTimestamp value. + +description: "update: ServerTimestamp with data" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + json_data: "{\"a\": 1, \"b\": \"ServerTimestamp\"}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + current_document: < + exists: true + > + > + writes: < + transform: < + document: "projects/projectID/databases/(default)/documents/C/d" + field_transforms: < + field_path: "b" + set_to_server_value: REQUEST_TIME + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/testdata/update-uptime.textproto b/vendor/cloud.google.com/go/firestore/testdata/update-uptime.textproto new file mode 100644 index 0000000..66119ac --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/testdata/update-uptime.textproto @@ -0,0 +1,37 @@ +# DO NOT MODIFY. This file was generated by +# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go. + +# The Update call supports a last-update-time precondition. + +description: "update: last-update-time precondition" +update: < + doc_ref_path: "projects/projectID/databases/(default)/documents/C/d" + precondition: < + update_time: < + seconds: 42 + > + > + json_data: "{\"a\": 1}" + request: < + database: "projects/projectID/databases/(default)" + writes: < + update: < + name: "projects/projectID/databases/(default)/documents/C/d" + fields: < + key: "a" + value: < + integer_value: 1 + > + > + > + update_mask: < + field_paths: "a" + > + current_document: < + update_time: < + seconds: 42 + > + > + > + > +> diff --git a/vendor/cloud.google.com/go/firestore/to_value.go b/vendor/cloud.google.com/go/firestore/to_value.go new file mode 100644 index 0000000..0c6fe93 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/to_value.go @@ -0,0 +1,270 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + "fmt" + "reflect" + "time" + + "cloud.google.com/go/internal/fields" + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + "google.golang.org/genproto/googleapis/type/latlng" +) + +var nullValue = &pb.Value{&pb.Value_NullValue{}} + +var ( + typeOfByteSlice = reflect.TypeOf([]byte{}) + typeOfGoTime = reflect.TypeOf(time.Time{}) + typeOfLatLng = reflect.TypeOf((*latlng.LatLng)(nil)) + typeOfDocumentRef = reflect.TypeOf((*DocumentRef)(nil)) +) + +// toProtoValue converts a Go value to a Firestore Value protobuf. +// Some corner cases: +// - All nils (nil interface, nil slice, nil map, nil pointer) are converted to +// a NullValue (not a nil *pb.Value). toProtoValue never returns (nil, false, nil). +// It returns (nil, true, nil) if everything in the value is ServerTimestamp. +// - An error is returned for uintptr, uint and uint64, because Firestore uses +// an int64 to represent integral values, and those types can't be properly +// represented in an int64. +// - An error is returned for the special Delete value. +func toProtoValue(v reflect.Value) (pbv *pb.Value, sawServerTimestamp bool, err error) { + if !v.IsValid() { + return nullValue, false, nil + } + vi := v.Interface() + if vi == Delete { + return nil, false, errors.New("firestore: cannot use Delete in value") + } + if vi == ServerTimestamp { + return nil, false, errors.New("firestore: must use ServerTimestamp as a map value") + } + switch x := vi.(type) { + case []byte: + return &pb.Value{&pb.Value_BytesValue{x}}, false, nil + case time.Time: + ts, err := ptypes.TimestampProto(x) + if err != nil { + return nil, false, err + } + return &pb.Value{&pb.Value_TimestampValue{ts}}, false, nil + case *latlng.LatLng: + if x == nil { + // gRPC doesn't like nil oneofs. Use NullValue. + return nullValue, false, nil + } + return &pb.Value{&pb.Value_GeoPointValue{x}}, false, nil + case *DocumentRef: + if x == nil { + // gRPC doesn't like nil oneofs. Use NullValue. + return nullValue, false, nil + } + return &pb.Value{&pb.Value_ReferenceValue{x.Path}}, false, nil + // Do not add bool, string, int, etc. to this switch; leave them in the + // reflect-based switch below. Moving them here would drop support for + // types whose underlying types are those primitives. + // E.g. Given "type mybool bool", an ordinary type switch on bool will + // not catch a mybool, but the reflect.Kind of a mybool is reflect.Bool. + } + switch v.Kind() { + case reflect.Bool: + return &pb.Value{&pb.Value_BooleanValue{v.Bool()}}, false, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return &pb.Value{&pb.Value_IntegerValue{v.Int()}}, false, nil + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return &pb.Value{&pb.Value_IntegerValue{int64(v.Uint())}}, false, nil + case reflect.Float32, reflect.Float64: + return &pb.Value{&pb.Value_DoubleValue{v.Float()}}, false, nil + case reflect.String: + return &pb.Value{&pb.Value_StringValue{v.String()}}, false, nil + case reflect.Slice: + return sliceToProtoValue(v) + case reflect.Map: + return mapToProtoValue(v) + case reflect.Struct: + return structToProtoValue(v) + case reflect.Ptr: + if v.IsNil() { + return nullValue, false, nil + } + return toProtoValue(v.Elem()) + case reflect.Interface: + if v.NumMethod() == 0 { // empty interface: recurse on its contents + return toProtoValue(v.Elem()) + } + fallthrough // any other interface value is an error + + default: + return nil, false, fmt.Errorf("firestore: cannot convert type %s to value", v.Type()) + } +} + +func sliceToProtoValue(v reflect.Value) (*pb.Value, bool, error) { + // A nil slice is converted to a null value. + if v.IsNil() { + return nullValue, false, nil + } + vals := make([]*pb.Value, v.Len()) + for i := 0; i < v.Len(); i++ { + val, sawServerTimestamp, err := toProtoValue(v.Index(i)) + if err != nil { + return nil, false, err + } + if sawServerTimestamp { + return nil, false, errors.New("firestore: ServerTimestamp cannot occur in an array") + } + vals[i] = val + } + return &pb.Value{&pb.Value_ArrayValue{&pb.ArrayValue{vals}}}, false, nil +} + +func mapToProtoValue(v reflect.Value) (*pb.Value, bool, error) { + if v.Type().Key().Kind() != reflect.String { + return nil, false, errors.New("firestore: map key type must be string") + } + // A nil map is converted to a null value. + if v.IsNil() { + return nullValue, false, nil + } + m := map[string]*pb.Value{} + sawServerTimestamp := false + for _, k := range v.MapKeys() { + mi := v.MapIndex(k) + if mi.Interface() == ServerTimestamp { + sawServerTimestamp = true + continue + } + val, sst, err := toProtoValue(mi) + if err != nil { + return nil, false, err + } + if sst { + sawServerTimestamp = true + } + if val == nil { // value was a map with all ServerTimestamp values + continue + } + m[k.String()] = val + } + var pv *pb.Value + if len(m) == 0 && sawServerTimestamp { + // The entire map consisted of ServerTimestamp values. + pv = nil + } else { + pv = &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}} + } + return pv, sawServerTimestamp, nil +} + +func structToProtoValue(v reflect.Value) (*pb.Value, bool, error) { + m := map[string]*pb.Value{} + fields, err := fieldCache.Fields(v.Type()) + if err != nil { + return nil, false, err + } + sawServerTimestamp := false + for _, f := range fields { + fv := v.FieldByIndex(f.Index) + opts := f.ParsedTag.(tagOptions) + if opts.serverTimestamp { + // TODO(jba): should we return a non-zero time? + sawServerTimestamp = true + continue + } + if opts.omitEmpty && isEmptyValue(fv) { + continue + } + val, sst, err := toProtoValue(fv) + if err != nil { + return nil, false, err + } + if sst { + sawServerTimestamp = true + } + if val == nil { // value was a map with all ServerTimestamp values + continue + } + m[f.Name] = val + } + var pv *pb.Value + if len(m) == 0 && sawServerTimestamp { + // The entire struct consisted of ServerTimestamp or omitempty values. + pv = nil + } else { + pv = &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}} + } + return pv, sawServerTimestamp, nil +} + +type tagOptions struct { + omitEmpty bool // do not marshal value if empty + serverTimestamp bool // set time.Time to server timestamp on write +} + +// parseTag interprets firestore struct field tags. +func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + name, keep, opts, err := fields.ParseStandardTag("firestore", t) + if err != nil { + return "", false, nil, fmt.Errorf("firestore: %v", err) + } + tagOpts := tagOptions{} + for _, opt := range opts { + switch opt { + case "omitempty": + tagOpts.omitEmpty = true + case "serverTimestamp": + tagOpts.serverTimestamp = true + default: + return "", false, nil, fmt.Errorf("firestore: unknown tag option: %q", opt) + } + } + return name, keep, tagOpts, nil +} + +// isLeafType determines whether or not a type is a 'leaf type' +// and should not be recursed into, but considered one field. +func isLeafType(t reflect.Type) bool { + return t == typeOfGoTime || t == typeOfLatLng +} + +var fieldCache = fields.NewCache(parseTag, nil, isLeafType) + +// isEmptyValue is taken from the encoding/json package in the +// standard library. +// TODO(jba): move to the fields package +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + if v.Type() == typeOfGoTime { + return v.Interface().(time.Time).IsZero() + } + return false +} diff --git a/vendor/cloud.google.com/go/firestore/to_value_test.go b/vendor/cloud.google.com/go/firestore/to_value_test.go new file mode 100644 index 0000000..ae58a13 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/to_value_test.go @@ -0,0 +1,268 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "fmt" + "reflect" + "testing" + "time" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "google.golang.org/genproto/googleapis/type/latlng" +) + +type testStruct1 struct { + B bool + I int + U uint32 + F float64 + S string + Y []byte + T time.Time + G *latlng.LatLng + L []int + M map[string]int + P *int +} + +var ( + p = new(int) + + testVal1 = testStruct1{ + B: true, + I: 1, + U: 2, + F: 3.0, + S: "four", + Y: []byte{5}, + T: tm, + G: ll, + L: []int{6}, + M: map[string]int{"a": 7}, + P: p, + } + + mapVal1 = mapval(map[string]*pb.Value{ + "B": boolval(true), + "I": intval(1), + "U": intval(2), + "F": floatval(3), + "S": &pb.Value{&pb.Value_StringValue{"four"}}, + "Y": bytesval([]byte{5}), + "T": tsval(tm), + "G": geoval(ll), + "L": arrayval(intval(6)), + "M": mapval(map[string]*pb.Value{"a": intval(7)}), + "P": intval(8), + }) +) + +func TestToProtoValue(t *testing.T) { + *p = 8 + for _, test := range []struct { + in interface{} + want *pb.Value + }{ + {nil, nullValue}, + {[]int(nil), nullValue}, + {map[string]int(nil), nullValue}, + {(*testStruct1)(nil), nullValue}, + {(*latlng.LatLng)(nil), nullValue}, + {(*DocumentRef)(nil), nullValue}, + {true, boolval(true)}, + {3, intval(3)}, + {uint32(3), intval(3)}, + {1.5, floatval(1.5)}, + {"str", strval("str")}, + {[]byte{1, 2}, bytesval([]byte{1, 2})}, + {tm, tsval(tm)}, + {ll, geoval(ll)}, + {[]int{1, 2}, arrayval(intval(1), intval(2))}, + {&[]int{1, 2}, arrayval(intval(1), intval(2))}, + {[]int{}, arrayval()}, + {map[string]int{"a": 1, "b": 2}, + mapval(map[string]*pb.Value{"a": intval(1), "b": intval(2)})}, + {map[string]int{}, mapval(map[string]*pb.Value{})}, + {p, intval(8)}, + {&p, intval(8)}, + {map[string]interface{}{"a": 1, "p": p, "s": "str"}, + mapval(map[string]*pb.Value{"a": intval(1), "p": intval(8), "s": strval("str")})}, + {map[string]fmt.Stringer{"a": tm}, + mapval(map[string]*pb.Value{"a": tsval(tm)})}, + {testVal1, mapVal1}, + { + &DocumentRef{ + ID: "d", + Path: "projects/P/databases/D/documents/c/d", + Parent: &CollectionRef{ + ID: "c", + parentPath: "projects/P/databases/D", + Path: "projects/P/databases/D/documents/c", + Query: Query{collectionID: "c", parentPath: "projects/P/databases/D"}, + }, + }, + refval("projects/P/databases/D/documents/c/d"), + }, + // ServerTimestamps are removed, possibly leaving nil. + {map[string]interface{}{"a": ServerTimestamp}, nil}, + { + map[string]interface{}{ + "a": map[string]interface{}{ + "b": map[string]interface{}{ + "c": ServerTimestamp, + }, + }, + }, + nil, + }, + { + map[string]interface{}{ + "a": map[string]interface{}{ + "b": map[string]interface{}{ + "c": ServerTimestamp, + "d": ServerTimestamp, + }, + }, + }, + nil, + }, + { + map[string]interface{}{ + "a": map[string]interface{}{ + "b": map[string]interface{}{ + "c": ServerTimestamp, + "d": ServerTimestamp, + "e": 1, + }, + }, + }, + mapval(map[string]*pb.Value{ + "a": mapval(map[string]*pb.Value{ + "b": mapval(map[string]*pb.Value{"e": intval(1)}), + }), + }), + }, + } { + got, _, err := toProtoValue(reflect.ValueOf(test.in)) + if err != nil { + t.Errorf("%v (%T): %v", test.in, test.in, err) + continue + } + if !testEqual(got, test.want) { + t.Errorf("%+v (%T):\ngot\n%+v\nwant\n%+v", test.in, test.in, got, test.want) + } + } +} + +type stringy struct{} + +func (stringy) String() string { return "stringy" } + +func TestToProtoValueErrors(t *testing.T) { + for _, in := range []interface{}{ + uint64(0), // a bad fit for int64 + map[int]bool{}, // map key type is not string + make(chan int), // can't handle type + map[string]fmt.Stringer{"a": stringy{}}, // only empty interfaces + ServerTimestamp, // ServerTimestamp can only be a field value + []interface{}{ServerTimestamp}, + map[string]interface{}{"a": []interface{}{ServerTimestamp}}, + map[string]interface{}{"a": []interface{}{ + map[string]interface{}{"b": ServerTimestamp}, + }}, + Delete, // Delete should never appear + []interface{}{Delete}, + map[string]interface{}{"a": Delete}, + map[string]interface{}{"a": []interface{}{Delete}}, + } { + _, _, err := toProtoValue(reflect.ValueOf(in)) + if err == nil { + t.Errorf("%v: got nil, want error", in) + } + } +} + +type testStruct2 struct { + Ignore int `firestore:"-"` + Rename int `firestore:"a"` + OmitEmpty int `firestore:",omitempty"` + OmitEmptyTime time.Time `firestore:",omitempty"` +} + +func TestToProtoValueTags(t *testing.T) { + in := &testStruct2{ + Ignore: 1, + Rename: 2, + OmitEmpty: 3, + OmitEmptyTime: aTime, + } + got, _, err := toProtoValue(reflect.ValueOf(in)) + if err != nil { + t.Fatal(err) + } + want := mapval(map[string]*pb.Value{ + "a": intval(2), + "OmitEmpty": intval(3), + "OmitEmptyTime": tsval(aTime), + }) + if !testEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + got, _, err = toProtoValue(reflect.ValueOf(testStruct2{})) + if err != nil { + t.Fatal(err) + } + want = mapval(map[string]*pb.Value{"a": intval(0)}) + if !testEqual(got, want) { + t.Errorf("got\n%+v\nwant\n%+v", got, want) + } +} + +func TestToProtoValueEmbedded(t *testing.T) { + // Embedded time.Time or LatLng should behave like non-embedded. + type embed struct { + time.Time + *latlng.LatLng + } + + got, _, err := toProtoValue(reflect.ValueOf(embed{tm, ll})) + if err != nil { + t.Fatal(err) + } + want := mapval(map[string]*pb.Value{ + "Time": tsval(tm), + "LatLng": geoval(ll), + }) + if !testEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestIsEmpty(t *testing.T) { + for _, e := range []interface{}{int(0), float32(0), false, "", []int{}, []int(nil), (*int)(nil)} { + if !isEmptyValue(reflect.ValueOf(e)) { + t.Errorf("%v (%T): want true, got false", e, e) + } + } + i := 3 + for _, n := range []interface{}{int(1), float32(1), true, "x", []int{1}, &i} { + if isEmptyValue(reflect.ValueOf(n)) { + t.Errorf("%v (%T): want false, got true", n, n) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/transaction.go b/vendor/cloud.google.com/go/firestore/transaction.go new file mode 100644 index 0000000..0fa7e7c --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/transaction.go @@ -0,0 +1,267 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Transaction represents a Firestore transaction. +type Transaction struct { + c *Client + ctx context.Context + id []byte + writes []*pb.Write + maxAttempts int + readOnly bool + readAfterWrite bool +} + +// A TransactionOption is an option passed to Client.Transaction. +type TransactionOption interface { + config(t *Transaction) +} + +// MaxAttempts is a TransactionOption that configures the maximum number of times to +// try a transaction. In defaults to DefaultTransactionMaxAttempts. +func MaxAttempts(n int) maxAttempts { return maxAttempts(n) } + +type maxAttempts int + +func (m maxAttempts) config(t *Transaction) { t.maxAttempts = int(m) } + +// DefaultTransactionMaxAttempts is the default number of times to attempt a transaction. +const DefaultTransactionMaxAttempts = 5 + +// ReadOnly is a TransactionOption that makes the transaction read-only. Read-only +// transactions cannot issue write operations, but are more efficient. +var ReadOnly = ro{} + +type ro struct{} + +func (ro) config(t *Transaction) { t.readOnly = true } + +var ( + // ErrConcurrentTransaction is returned when a transaction is rolled back due + // to a conflict with a concurrent transaction. + ErrConcurrentTransaction = errors.New("firestore: concurrent transaction") + + // Defined here for testing. + errReadAfterWrite = errors.New("firestore: read after write in transaction") + errWriteReadOnly = errors.New("firestore: write in read-only transaction") + errNonTransactionalOp = errors.New("firestore: non-transactional operation inside a transaction") + errNestedTransaction = errors.New("firestore: nested transaction") +) + +type transactionInProgressKey struct{} + +func checkTransaction(ctx context.Context) error { + if ctx.Value(transactionInProgressKey{}) != nil { + return errNonTransactionalOp + } + return nil +} + +// RunTransaction runs f in a transaction. f should use the transaction it is given +// for all Firestore operations. For any operation requiring a context, f should use +// the context it is passed, not the first argument to RunTransaction. +// +// f must not call Commit or Rollback on the provided Transaction. +// +// If f returns nil, RunTransaction commits the transaction. If the commit fails due +// to a conflicting transaction, RunTransaction retries f. It gives up and returns +// ErrConcurrentTransaction after a number of attempts that can be configured with +// the MaxAttempts option. If the commit succeeds, RunTransaction returns a nil error. +// +// If f returns non-nil, then the transaction will be rolled back and +// this method will return the same error. The function f is not retried. +// +// Note that when f returns, the transaction is not committed. Calling code +// must not assume that any of f's changes have been committed until +// RunTransaction returns nil. +// +// Since f may be called more than once, f should usually be idempotent – that is, it +// should have the same result when called multiple times. +func (c *Client) RunTransaction(ctx context.Context, f func(context.Context, *Transaction) error, opts ...TransactionOption) error { + if ctx.Value(transactionInProgressKey{}) != nil { + return errNestedTransaction + } + db := c.path() + t := &Transaction{ + c: c, + ctx: withResourceHeader(ctx, db), + maxAttempts: DefaultTransactionMaxAttempts, + } + for _, opt := range opts { + opt.config(t) + } + var txOpts *pb.TransactionOptions + if t.readOnly { + txOpts = &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadOnly_{&pb.TransactionOptions_ReadOnly{}}, + } + } + var backoff gax.Backoff + // TODO(jba): use other than the standard backoff parameters? + // TODO(jba): get backoff time from gRPC trailer metadata? See extractRetryDelay in https://code.googlesource.com/gocloud/+/master/spanner/retry.go. + var err error + for i := 0; i < t.maxAttempts; i++ { + var res *pb.BeginTransactionResponse + res, err = t.c.c.BeginTransaction(t.ctx, &pb.BeginTransactionRequest{ + Database: db, + Options: txOpts, + }) + if err != nil { + return err + } + t.id = res.Transaction + err = f(context.WithValue(ctx, transactionInProgressKey{}, 1), t) + // Read after write can only be checked client-side, so we make sure to check + // even if the user does not. + if err == nil && t.readAfterWrite { + err = errReadAfterWrite + } + if err != nil { + t.rollback() + // Prefer f's returned error to rollback error. + return err + } + _, err = t.c.c.Commit(t.ctx, &pb.CommitRequest{ + Database: t.c.path(), + Writes: t.writes, + Transaction: t.id, + }) + // If a read-write transaction returns Aborted, retry. + // On success or other failures, return here. + if t.readOnly || grpc.Code(err) != codes.Aborted { + // According to the Firestore team, we should not roll back here + // if err != nil. But spanner does. + // See https://code.googlesource.com/gocloud/+/master/spanner/transaction.go#740. + return err + } + + if txOpts == nil { + // txOpts can only be nil if is the first retry of a read-write transaction. + // (It is only set here and in the body of "if t.readOnly" above.) + // Mention the transaction ID in BeginTransaction so the service + // knows it is a retry. + txOpts = &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadWrite_{ + &pb.TransactionOptions_ReadWrite{RetryTransaction: t.id}, + }, + } + } + // Use exponential backoff to avoid contention with other running + // transactions. + if cerr := gax.Sleep(ctx, backoff.Pause()); cerr != nil { + err = cerr + break + } + } + // If we run out of retries, return the last error we saw (which should + // be the Aborted from Commit, or a context error). + if err != nil { + t.rollback() + } + return err +} + +func (t *Transaction) rollback() { + _ = t.c.c.Rollback(t.ctx, &pb.RollbackRequest{ + Database: t.c.path(), + Transaction: t.id, + }) + // Ignore the rollback error. + // TODO(jba): Log it? + // Note: Rollback is idempotent so it will be retried by the gapic layer. +} + +// Get gets the document in the context of the transaction. +func (t *Transaction) Get(dr *DocumentRef) (*DocumentSnapshot, error) { + if len(t.writes) > 0 { + t.readAfterWrite = true + return nil, errReadAfterWrite + } + docProto, err := t.c.c.GetDocument(t.ctx, &pb.GetDocumentRequest{ + Name: dr.Path, + ConsistencySelector: &pb.GetDocumentRequest_Transaction{t.id}, + }) + if err != nil { + return nil, err + } + return newDocumentSnapshot(dr, docProto, t.c) +} + +// A Queryer is a Query or a CollectionRef. CollectionRefs act as queries whose +// results are all the documents in the collection. +type Queryer interface { + query() *Query +} + +// Documents returns a DocumentIterator based on given Query or CollectionRef. The +// results will be in the context of the transaction. +func (t *Transaction) Documents(q Queryer) *DocumentIterator { + if len(t.writes) > 0 { + t.readAfterWrite = true + return &DocumentIterator{err: errReadAfterWrite} + } + return &DocumentIterator{ + ctx: t.ctx, + q: q.query(), + tid: t.id, + } +} + +// Create adds a Create operation to the Transaction. +// See DocumentRef.Create for details. +func (t *Transaction) Create(dr *DocumentRef, data interface{}) error { + return t.addWrites(dr.newCreateWrites(data)) +} + +// Set adds a Set operation to the Transaction. +// See DocumentRef.Set for details. +func (t *Transaction) Set(dr *DocumentRef, data interface{}, opts ...SetOption) error { + return t.addWrites(dr.newSetWrites(data, opts)) +} + +// Delete adds a Delete operation to the Transaction. +// See DocumentRef.Delete for details. +func (t *Transaction) Delete(dr *DocumentRef, opts ...Precondition) error { + return t.addWrites(dr.newDeleteWrites(opts)) +} + +// Update adds a new Update operation to the Transaction. +// See DocumentRef.Update for details. +func (t *Transaction) Update(dr *DocumentRef, data []Update, opts ...Precondition) error { + return t.addWrites(dr.newUpdatePathWrites(data, opts)) +} + +func (t *Transaction) addWrites(ws []*pb.Write, err error) error { + if t.readOnly { + return errWriteReadOnly + } + if err != nil { + return err + } + t.writes = append(t.writes, ws...) + return nil +} diff --git a/vendor/cloud.google.com/go/firestore/transaction_test.go b/vendor/cloud.google.com/go/firestore/transaction_test.go new file mode 100644 index 0000000..fad528d --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/transaction_test.go @@ -0,0 +1,345 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "testing" + + "golang.org/x/net/context" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes/empty" + "google.golang.org/api/iterator" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func TestRunTransaction(t *testing.T) { + ctx := context.Background() + const db = "projects/projectID/databases/(default)" + tid := []byte{1} + c, srv := newMock(t) + beginReq := &pb.BeginTransactionRequest{Database: db} + beginRes := &pb.BeginTransactionResponse{Transaction: tid} + commitReq := &pb.CommitRequest{Database: db, Transaction: tid} + // Empty transaction. + srv.addRPC(beginReq, beginRes) + srv.addRPC(commitReq, &pb.CommitResponse{CommitTime: aTimestamp}) + err := c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil }) + if err != nil { + t.Fatal(err) + } + + // Transaction with read and write. + srv.reset() + srv.addRPC(beginReq, beginRes) + aDoc := &pb.Document{ + Name: db + "/documents/C/a", + CreateTime: aTimestamp, + UpdateTime: aTimestamp2, + Fields: map[string]*pb.Value{"count": intval(1)}, + } + srv.addRPC( + &pb.GetDocumentRequest{ + Name: db + "/documents/C/a", + ConsistencySelector: &pb.GetDocumentRequest_Transaction{tid}, + }, + aDoc, + ) + aDoc2 := &pb.Document{ + Name: aDoc.Name, + Fields: map[string]*pb.Value{"count": intval(2)}, + } + srv.addRPC( + &pb.CommitRequest{ + Database: db, + Transaction: tid, + Writes: []*pb.Write{{ + Operation: &pb.Write_Update{aDoc2}, + UpdateMask: &pb.DocumentMask{FieldPaths: []string{"count"}}, + CurrentDocument: &pb.Precondition{ + ConditionType: &pb.Precondition_Exists{true}, + }, + }}, + }, + &pb.CommitResponse{CommitTime: aTimestamp3}, + ) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + docref := c.Collection("C").Doc("a") + doc, err := tx.Get(docref) + if err != nil { + return err + } + count, err := doc.DataAt("count") + if err != nil { + return err + } + tx.Update(docref, []Update{{Path: "count", Value: count.(int64) + 1}}) + return nil + }) + if err != nil { + t.Fatal(err) + } + + // Query + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC( + &pb.RunQueryRequest{ + Parent: db, + QueryType: &pb.RunQueryRequest_StructuredQuery{ + &pb.StructuredQuery{ + From: []*pb.StructuredQuery_CollectionSelector{{CollectionId: "C"}}, + }, + }, + ConsistencySelector: &pb.RunQueryRequest_Transaction{tid}, + }, + []interface{}{}, + ) + srv.addRPC(commitReq, &pb.CommitResponse{CommitTime: aTimestamp3}) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + it := tx.Documents(c.Collection("C")) + _, err := it.Next() + if err != iterator.Done { + return err + } + return nil + }) + if err != nil { + t.Fatal(err) + } + + // Retry entire transaction. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(commitReq, grpc.Errorf(codes.Aborted, "")) + srv.addRPC( + &pb.BeginTransactionRequest{ + Database: db, + Options: &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadWrite_{ + &pb.TransactionOptions_ReadWrite{tid}, + }, + }, + }, + beginRes, + ) + srv.addRPC(commitReq, &pb.CommitResponse{CommitTime: aTimestamp}) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { return nil }) + if err != nil { + t.Fatal(err) + } +} + +func TestTransactionErrors(t *testing.T) { + ctx := context.Background() + const db = "projects/projectID/databases/(default)" + c, srv := newMock(t) + var ( + tid = []byte{1} + internalErr = grpc.Errorf(codes.Internal, "so sad") + beginReq = &pb.BeginTransactionRequest{ + Database: db, + } + beginRes = &pb.BeginTransactionResponse{Transaction: tid} + getReq = &pb.GetDocumentRequest{ + Name: db + "/documents/C/a", + ConsistencySelector: &pb.GetDocumentRequest_Transaction{tid}, + } + rollbackReq = &pb.RollbackRequest{Database: db, Transaction: tid} + commitReq = &pb.CommitRequest{Database: db, Transaction: tid} + ) + + // BeginTransaction has a permanent error. + srv.addRPC(beginReq, internalErr) + err := c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil }) + if grpc.Code(err) != codes.Internal { + t.Errorf("got <%v>, want Internal", err) + } + + // Get has a permanent error. + get := func(_ context.Context, tx *Transaction) error { + _, err := tx.Get(c.Doc("C/a")) + return err + } + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(getReq, internalErr) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, get) + if grpc.Code(err) != codes.Internal { + t.Errorf("got <%v>, want Internal", err) + } + + // Get has a permanent error, but the rollback fails. We still + // return Get's error. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(getReq, internalErr) + srv.addRPC(rollbackReq, grpc.Errorf(codes.FailedPrecondition, "")) + err = c.RunTransaction(ctx, get) + if grpc.Code(err) != codes.Internal { + t.Errorf("got <%v>, want Internal", err) + } + + // Commit has a permanent error. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(getReq, &pb.Document{ + Name: "projects/projectID/databases/(default)/documents/C/a", + CreateTime: aTimestamp, + UpdateTime: aTimestamp2, + }) + srv.addRPC(commitReq, internalErr) + err = c.RunTransaction(ctx, get) + if grpc.Code(err) != codes.Internal { + t.Errorf("got <%v>, want Internal", err) + } + + // Read after write. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + tx.Delete(c.Doc("C/a")) + if _, err := tx.Get(c.Doc("C/a")); err != nil { + return err + } + return nil + }) + if err != errReadAfterWrite { + t.Errorf("got <%v>, want <%v>", err, errReadAfterWrite) + } + + // Read after write, with query. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + tx.Delete(c.Doc("C/a")) + it := tx.Documents(c.Collection("C").Select("x")) + if _, err := it.Next(); err != iterator.Done { + return err + } + return nil + }) + if err != errReadAfterWrite { + t.Errorf("got <%v>, want <%v>", err, errReadAfterWrite) + } + + // Read after write fails even if the user ignores the read's error. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + tx.Delete(c.Doc("C/a")) + tx.Get(c.Doc("C/a")) + return nil + }) + if err != errReadAfterWrite { + t.Errorf("got <%v>, want <%v>", err, errReadAfterWrite) + } + + // Write in read-only transaction. + srv.reset() + srv.addRPC( + &pb.BeginTransactionRequest{ + Database: db, + Options: &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadOnly_{&pb.TransactionOptions_ReadOnly{}}, + }, + }, + beginRes, + ) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error { + return tx.Delete(c.Doc("C/a")) + }, ReadOnly) + if err != errWriteReadOnly { + t.Errorf("got <%v>, want <%v>", err, errWriteReadOnly) + } + + // Too many retries. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(commitReq, grpc.Errorf(codes.Aborted, "")) + srv.addRPC( + &pb.BeginTransactionRequest{ + Database: db, + Options: &pb.TransactionOptions{ + Mode: &pb.TransactionOptions_ReadWrite_{ + &pb.TransactionOptions_ReadWrite{tid}, + }, + }, + }, + beginRes, + ) + srv.addRPC(commitReq, grpc.Errorf(codes.Aborted, "")) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil }, + MaxAttempts(2)) + if grpc.Code(err) != codes.Aborted { + t.Errorf("got <%v>, want Aborted", err) + } + + // Nested transaction. + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(ctx context.Context, tx *Transaction) error { + return c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil }) + }) + if got, want := err, errNestedTransaction; got != want { + t.Errorf("got <%v>, want <%v>", got, want) + } + + // Non-transactional operation. + dr := c.Doc("C/d") + + for i, op := range []func(ctx context.Context) error{ + func(ctx context.Context) error { _, err := c.GetAll(ctx, []*DocumentRef{dr}); return err }, + func(ctx context.Context) error { _, _, err := c.Collection("C").Add(ctx, testData); return err }, + func(ctx context.Context) error { _, err := dr.Get(ctx); return err }, + func(ctx context.Context) error { _, err := dr.Create(ctx, testData); return err }, + func(ctx context.Context) error { _, err := dr.Set(ctx, testData); return err }, + func(ctx context.Context) error { _, err := dr.Delete(ctx); return err }, + func(ctx context.Context) error { + _, err := dr.Update(ctx, []Update{{FieldPath: []string{"*"}, Value: 1}}) + return err + }, + func(ctx context.Context) error { it := c.Collections(ctx); _, err := it.Next(); return err }, + func(ctx context.Context) error { it := dr.Collections(ctx); _, err := it.Next(); return err }, + func(ctx context.Context) error { + _, err := c.Batch().Set(dr, testData).Commit(ctx) + return err + }, + func(ctx context.Context) error { + it := c.Collection("C").Documents(ctx) + _, err := it.Next() + return err + }, + } { + srv.reset() + srv.addRPC(beginReq, beginRes) + srv.addRPC(rollbackReq, &empty.Empty{}) + err = c.RunTransaction(ctx, func(ctx context.Context, _ *Transaction) error { + return op(ctx) + }) + if got, want := err, errNonTransactionalOp; got != want { + t.Errorf("#%d: got <%v>, want <%v>", i, got, want) + } + } +} diff --git a/vendor/cloud.google.com/go/firestore/util_test.go b/vendor/cloud.google.com/go/firestore/util_test.go new file mode 100644 index 0000000..db8e151 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/util_test.go @@ -0,0 +1,146 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "fmt" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "github.com/golang/protobuf/ptypes" + tspb "github.com/golang/protobuf/ptypes/timestamp" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/api/option" + "google.golang.org/genproto/googleapis/type/latlng" + "google.golang.org/grpc" +) + +var ( + aTime = time.Date(2017, 1, 26, 0, 0, 0, 0, time.UTC) + aTime2 = time.Date(2017, 2, 5, 0, 0, 0, 0, time.UTC) + aTime3 = time.Date(2017, 3, 20, 0, 0, 0, 0, time.UTC) + aTimestamp = mustTimestampProto(aTime) + aTimestamp2 = mustTimestampProto(aTime2) + aTimestamp3 = mustTimestampProto(aTime3) +) + +func mustTimestampProto(t time.Time) *tspb.Timestamp { + ts, err := ptypes.TimestampProto(t) + if err != nil { + panic(err) + } + return ts +} + +var cmpOpts = []cmp.Option{ + cmp.AllowUnexported(DocumentRef{}, CollectionRef{}, DocumentSnapshot{}, + Query{}, filter{}, order{}, fpv{}), + cmpopts.IgnoreTypes(Client{}), +} + +// testEqual implements equality for Firestore tests. +func testEqual(a, b interface{}) bool { + return testutil.Equal(a, b, cmpOpts...) +} + +func testDiff(a, b interface{}) string { + return testutil.Diff(a, b, cmpOpts...) +} + +func TestTestEqual(t *testing.T) { + for _, test := range []struct { + a, b interface{} + want bool + }{ + {nil, nil, true}, + {([]int)(nil), nil, false}, + {nil, ([]int)(nil), false}, + {([]int)(nil), ([]int)(nil), true}, + } { + if got := testEqual(test.a, test.b); got != test.want { + t.Errorf("testEqual(%#v, %#v) == %t, want %t", test.a, test.b, got, test.want) + } + } +} + +func newMock(t *testing.T) (*Client, *mockServer) { + srv, err := newMockServer() + if err != nil { + t.Fatal(err) + } + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + t.Fatal(err) + } + client, err := NewClient(context.Background(), "projectID", option.WithGRPCConn(conn)) + if err != nil { + t.Fatal(err) + } + return client, srv +} + +func intval(i int) *pb.Value { + return &pb.Value{&pb.Value_IntegerValue{int64(i)}} +} + +func boolval(b bool) *pb.Value { + return &pb.Value{&pb.Value_BooleanValue{b}} +} + +func floatval(f float64) *pb.Value { + return &pb.Value{&pb.Value_DoubleValue{f}} +} + +func strval(s string) *pb.Value { + return &pb.Value{&pb.Value_StringValue{s}} +} + +func bytesval(b []byte) *pb.Value { + return &pb.Value{&pb.Value_BytesValue{b}} +} + +func tsval(t time.Time) *pb.Value { + ts, err := ptypes.TimestampProto(t) + if err != nil { + panic(fmt.Sprintf("bad time %s in test: %v", t, err)) + } + return &pb.Value{&pb.Value_TimestampValue{ts}} +} + +func geoval(ll *latlng.LatLng) *pb.Value { + return &pb.Value{&pb.Value_GeoPointValue{ll}} +} + +func arrayval(s ...*pb.Value) *pb.Value { + if s == nil { + s = []*pb.Value{} + } + return &pb.Value{&pb.Value_ArrayValue{&pb.ArrayValue{s}}} +} + +func mapval(m map[string]*pb.Value) *pb.Value { + return &pb.Value{&pb.Value_MapValue{&pb.MapValue{m}}} +} + +func refval(path string) *pb.Value { + return &pb.Value{&pb.Value_ReferenceValue{path}} +} diff --git a/vendor/cloud.google.com/go/firestore/writebatch.go b/vendor/cloud.google.com/go/firestore/writebatch.go new file mode 100644 index 0000000..46b2a59 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/writebatch.go @@ -0,0 +1,82 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "errors" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "golang.org/x/net/context" +) + +// A WriteBatch holds multiple database updates. Build a batch with the Create, Set, +// Update and Delete methods, then run it with the Commit method. Errors in Create, +// Set, Update or Delete are recorded instead of being returned immediately. The +// first such error is returned by Commit. +type WriteBatch struct { + c *Client + err error + writes []*pb.Write +} + +func (b *WriteBatch) add(ws []*pb.Write, err error) *WriteBatch { + if b.err != nil { + return b + } + if err != nil { + b.err = err + return b + } + b.writes = append(b.writes, ws...) + return b +} + +// Create adds a Create operation to the batch. +// See DocumentRef.Create for details. +func (b *WriteBatch) Create(dr *DocumentRef, data interface{}) *WriteBatch { + return b.add(dr.newCreateWrites(data)) +} + +// Set adds a Set operation to the batch. +// See DocumentRef.Set for details. +func (b *WriteBatch) Set(dr *DocumentRef, data interface{}, opts ...SetOption) *WriteBatch { + return b.add(dr.newSetWrites(data, opts)) +} + +// Delete adds a Delete operation to the batch. +// See DocumentRef.Delete for details. +func (b *WriteBatch) Delete(dr *DocumentRef, opts ...Precondition) *WriteBatch { + return b.add(dr.newDeleteWrites(opts)) +} + +// Update adds an Update operation to the batch. +// See DocumentRef.Update for details. +func (b *WriteBatch) Update(dr *DocumentRef, data []Update, opts ...Precondition) *WriteBatch { + return b.add(dr.newUpdatePathWrites(data, opts)) +} + +// Commit applies all the writes in the batch to the database atomically. Commit +// returns an error if there are no writes in the batch, if any errors occurred in +// constructing the writes, or if the Commmit operation fails. +func (b *WriteBatch) Commit(ctx context.Context) ([]*WriteResult, error) { + if b.err != nil { + return nil, b.err + } + if len(b.writes) == 0 { + return nil, errors.New("firestore: cannot commit empty WriteBatch") + } + return b.c.commit(ctx, b.writes) +} diff --git a/vendor/cloud.google.com/go/firestore/writebatch_test.go b/vendor/cloud.google.com/go/firestore/writebatch_test.go new file mode 100644 index 0000000..db38d03 --- /dev/null +++ b/vendor/cloud.google.com/go/firestore/writebatch_test.go @@ -0,0 +1,119 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package firestore + +import ( + "testing" + + pb "google.golang.org/genproto/googleapis/firestore/v1beta1" + + "golang.org/x/net/context" +) + +func TestWriteBatch(t *testing.T) { + type update struct{ A int } + + c, srv := newMock(t) + docPrefix := c.Collection("C").Path + "/" + srv.addRPC( + &pb.CommitRequest{ + Database: c.path(), + Writes: []*pb.Write{ + { // Create + Operation: &pb.Write_Update{ + Update: &pb.Document{ + Name: docPrefix + "a", + Fields: testFields, + }, + }, + CurrentDocument: &pb.Precondition{ + ConditionType: &pb.Precondition_Exists{false}, + }, + }, + { // Set + Operation: &pb.Write_Update{ + Update: &pb.Document{ + Name: docPrefix + "b", + Fields: testFields, + }, + }, + }, + { // Delete + Operation: &pb.Write_Delete{ + Delete: docPrefix + "c", + }, + }, + { // Update + Operation: &pb.Write_Update{ + Update: &pb.Document{ + Name: docPrefix + "f", + Fields: map[string]*pb.Value{"*": intval(3)}, + }, + }, + UpdateMask: &pb.DocumentMask{[]string{"`*`"}}, + CurrentDocument: &pb.Precondition{ + ConditionType: &pb.Precondition_Exists{true}, + }, + }, + }, + }, + &pb.CommitResponse{ + WriteResults: []*pb.WriteResult{ + {UpdateTime: aTimestamp}, + {UpdateTime: aTimestamp2}, + {UpdateTime: aTimestamp3}, + }, + }, + ) + gotWRs, err := c.Batch(). + Create(c.Doc("C/a"), testData). + Set(c.Doc("C/b"), testData). + Delete(c.Doc("C/c")). + Update(c.Doc("C/f"), []Update{{FieldPath: []string{"*"}, Value: 3}}). + Commit(context.Background()) + if err != nil { + t.Fatal(err) + } + wantWRs := []*WriteResult{{aTime}, {aTime2}, {aTime3}} + if !testEqual(gotWRs, wantWRs) { + t.Errorf("got %+v\nwant %+v", gotWRs, wantWRs) + } +} + +func TestWriteBatchErrors(t *testing.T) { + ctx := context.Background() + c, _ := newMock(t) + for _, test := range []struct { + desc string + batch *WriteBatch + }{ + { + "empty batch", + c.Batch(), + }, + { + "bad doc reference", + c.Batch().Create(c.Doc("a"), testData), + }, + { + "bad data", + c.Batch().Create(c.Doc("a/b"), 3), + }, + } { + if _, err := test.batch.Commit(ctx); err == nil { + t.Errorf("%s: got nil, want error", test.desc) + } + } +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go b/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go new file mode 100644 index 0000000..e366fed --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go @@ -0,0 +1,43 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package admin is an experimental, auto-generated package for the +// Google Identity and Access Management (IAM) API. +// +// Manages identity and access control for Google Cloud Platform resources, +// including the creation of service accounts, which you can use to +// authenticate to Google and make API calls. +package admin // import "cloud.google.com/go/iam/admin/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val []string) context.Context { + md, _ := metadata.FromOutgoingContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = val + return metadata.NewOutgoingContext(ctx, md) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/iam", + } +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go new file mode 100644 index 0000000..32ca067 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go @@ -0,0 +1,478 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package admin + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + adminpb "google.golang.org/genproto/googleapis/iam/admin/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// IamCallOptions contains the retry settings for each method of IamClient. +type IamCallOptions struct { + ListServiceAccounts []gax.CallOption + GetServiceAccount []gax.CallOption + CreateServiceAccount []gax.CallOption + UpdateServiceAccount []gax.CallOption + DeleteServiceAccount []gax.CallOption + ListServiceAccountKeys []gax.CallOption + GetServiceAccountKey []gax.CallOption + CreateServiceAccountKey []gax.CallOption + DeleteServiceAccountKey []gax.CallOption + SignBlob []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption + QueryGrantableRoles []gax.CallOption +} + +func defaultIamClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("iam.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultIamCallOptions() *IamCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &IamCallOptions{ + ListServiceAccounts: retry[[2]string{"default", "idempotent"}], + GetServiceAccount: retry[[2]string{"default", "idempotent"}], + CreateServiceAccount: retry[[2]string{"default", "non_idempotent"}], + UpdateServiceAccount: retry[[2]string{"default", "idempotent"}], + DeleteServiceAccount: retry[[2]string{"default", "idempotent"}], + ListServiceAccountKeys: retry[[2]string{"default", "idempotent"}], + GetServiceAccountKey: retry[[2]string{"default", "idempotent"}], + CreateServiceAccountKey: retry[[2]string{"default", "non_idempotent"}], + DeleteServiceAccountKey: retry[[2]string{"default", "idempotent"}], + SignBlob: retry[[2]string{"default", "non_idempotent"}], + GetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], + QueryGrantableRoles: retry[[2]string{"default", "non_idempotent"}], + } +} + +// IamClient is a client for interacting with Google Identity and Access Management (IAM) API. +type IamClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + iamClient adminpb.IAMClient + + // The call options for this service. + CallOptions *IamCallOptions + + // The metadata to be sent with each request. + xGoogHeader []string +} + +// NewIamClient creates a new iam client. +// +// Creates and manages service account objects. +// +// Service account is an account that belongs to your project instead +// of to an individual end user. It is used to authenticate calls +// to a Google API. +// +// To create a service account, specify the project_id and account_id +// for the account. The account_id is unique within the project, and used +// to generate the service account email address and a stable +// unique_id. +// +// All other methods can identify accounts using the format +// projects/{project}/serviceAccounts/{account}. +// Using - as a wildcard for the project will infer the project from +// the account. The account value can be the email address or the +// unique_id of the service account. +func NewIamClient(ctx context.Context, opts ...option.ClientOption) (*IamClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultIamClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &IamClient{ + conn: conn, + CallOptions: defaultIamCallOptions(), + + iamClient: adminpb.NewIAMClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *IamClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *IamClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *IamClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeader = []string{gax.XGoogHeader(kv...)} +} + +// IamProjectPath returns the path for the project resource. +func IamProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// IamServiceAccountPath returns the path for the service account resource. +func IamServiceAccountPath(project, serviceAccount string) string { + return "" + + "projects/" + + project + + "/serviceAccounts/" + + serviceAccount + + "" +} + +// IamKeyPath returns the path for the key resource. +func IamKeyPath(project, serviceAccount, key string) string { + return "" + + "projects/" + + project + + "/serviceAccounts/" + + serviceAccount + + "/keys/" + + key + + "" +} + +// ListServiceAccounts lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project. +func (c *IamClient) ListServiceAccounts(ctx context.Context, req *adminpb.ListServiceAccountsRequest, opts ...gax.CallOption) *ServiceAccountIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListServiceAccounts[0:len(c.CallOptions.ListServiceAccounts):len(c.CallOptions.ListServiceAccounts)], opts...) + it := &ServiceAccountIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*adminpb.ServiceAccount, string, error) { + var resp *adminpb.ListServiceAccountsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.ListServiceAccounts(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Accounts, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetServiceAccount gets a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) GetServiceAccount(ctx context.Context, req *adminpb.GetServiceAccountRequest, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.GetServiceAccount[0:len(c.CallOptions.GetServiceAccount):len(c.CallOptions.GetServiceAccount)], opts...) + var resp *adminpb.ServiceAccount + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.GetServiceAccount(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateServiceAccount creates a [ServiceAccount][google.iam.admin.v1.ServiceAccount] +// and returns it. +func (c *IamClient) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.CreateServiceAccount[0:len(c.CallOptions.CreateServiceAccount):len(c.CallOptions.CreateServiceAccount)], opts...) + var resp *adminpb.ServiceAccount + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.CreateServiceAccount(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateServiceAccount updates a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +// +// Currently, only the following fields are updatable: +// display_name . +// The etag is mandatory. +func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.UpdateServiceAccount[0:len(c.CallOptions.UpdateServiceAccount):len(c.CallOptions.UpdateServiceAccount)], opts...) + var resp *adminpb.ServiceAccount + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.UpdateServiceAccount(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteServiceAccount deletes a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest, opts ...gax.CallOption) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.DeleteServiceAccount[0:len(c.CallOptions.DeleteServiceAccount):len(c.CallOptions.DeleteServiceAccount)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.iamClient.DeleteServiceAccount(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListServiceAccountKeys lists [ServiceAccountKeys][google.iam.admin.v1.ServiceAccountKey]. +func (c *IamClient) ListServiceAccountKeys(ctx context.Context, req *adminpb.ListServiceAccountKeysRequest, opts ...gax.CallOption) (*adminpb.ListServiceAccountKeysResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListServiceAccountKeys[0:len(c.CallOptions.ListServiceAccountKeys):len(c.CallOptions.ListServiceAccountKeys)], opts...) + var resp *adminpb.ListServiceAccountKeysResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.ListServiceAccountKeys(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetServiceAccountKey gets the [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] +// by key id. +func (c *IamClient) GetServiceAccountKey(ctx context.Context, req *adminpb.GetServiceAccountKeyRequest, opts ...gax.CallOption) (*adminpb.ServiceAccountKey, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.GetServiceAccountKey[0:len(c.CallOptions.GetServiceAccountKey):len(c.CallOptions.GetServiceAccountKey)], opts...) + var resp *adminpb.ServiceAccountKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.GetServiceAccountKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateServiceAccountKey creates a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] +// and returns it. +func (c *IamClient) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest, opts ...gax.CallOption) (*adminpb.ServiceAccountKey, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.CreateServiceAccountKey[0:len(c.CallOptions.CreateServiceAccountKey):len(c.CallOptions.CreateServiceAccountKey)], opts...) + var resp *adminpb.ServiceAccountKey + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.CreateServiceAccountKey(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteServiceAccountKey deletes a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey]. +func (c *IamClient) DeleteServiceAccountKey(ctx context.Context, req *adminpb.DeleteServiceAccountKeyRequest, opts ...gax.CallOption) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.DeleteServiceAccountKey[0:len(c.CallOptions.DeleteServiceAccountKey):len(c.CallOptions.DeleteServiceAccountKey)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.iamClient.DeleteServiceAccountKey(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// SignBlob signs a blob using a service account's system-managed private key. +func (c *IamClient) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest, opts ...gax.CallOption) (*adminpb.SignBlobResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.SignBlob[0:len(c.CallOptions.SignBlob):len(c.CallOptions.SignBlob)], opts...) + var resp *adminpb.SignBlobResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.SignBlob(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// getIamPolicy returns the IAM access control policy for a +// [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) getIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// setIamPolicy sets the IAM access control policy for a +// [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) setIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// TestIamPermissions tests the specified permissions against the IAM access control policy +// for a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// QueryGrantableRoles queries roles that can be granted on a particular resource. +// A role is grantable if it can be used as the role in a binding for a policy +// for that resource. +func (c *IamClient) QueryGrantableRoles(ctx context.Context, req *adminpb.QueryGrantableRolesRequest, opts ...gax.CallOption) (*adminpb.QueryGrantableRolesResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.QueryGrantableRoles[0:len(c.CallOptions.QueryGrantableRoles):len(c.CallOptions.QueryGrantableRoles)], opts...) + var resp *adminpb.QueryGrantableRolesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.iamClient.QueryGrantableRoles(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ServiceAccountIterator manages a stream of *adminpb.ServiceAccount. +type ServiceAccountIterator struct { + items []*adminpb.ServiceAccount + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*adminpb.ServiceAccount, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ServiceAccountIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ServiceAccountIterator) Next() (*adminpb.ServiceAccount, error) { + var item *adminpb.ServiceAccount + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ServiceAccountIterator) bufLen() int { + return len(it.items) +} + +func (it *ServiceAccountIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go new file mode 100644 index 0000000..901fcb9 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go @@ -0,0 +1,253 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package admin_test + +import ( + "cloud.google.com/go/iam/admin/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + adminpb "google.golang.org/genproto/googleapis/iam/admin/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +func ExampleNewIamClient() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleIamClient_ListServiceAccounts() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.ListServiceAccountsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListServiceAccounts(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleIamClient_GetServiceAccount() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.GetServiceAccountRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetServiceAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_CreateServiceAccount() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.CreateServiceAccountRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateServiceAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_UpdateServiceAccount() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.ServiceAccount{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateServiceAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_DeleteServiceAccount() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.DeleteServiceAccountRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteServiceAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleIamClient_ListServiceAccountKeys() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.ListServiceAccountKeysRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListServiceAccountKeys(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_GetServiceAccountKey() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.GetServiceAccountKeyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetServiceAccountKey(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_CreateServiceAccountKey() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.CreateServiceAccountKeyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateServiceAccountKey(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_DeleteServiceAccountKey() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.DeleteServiceAccountKeyRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteServiceAccountKey(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleIamClient_SignBlob() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.SignBlobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SignBlob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_TestIamPermissions() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.TestIamPermissionsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.TestIamPermissions(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_QueryGrantableRoles() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.QueryGrantableRolesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.QueryGrantableRoles(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go b/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go new file mode 100644 index 0000000..43ff01b --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go @@ -0,0 +1,1143 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package admin + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + adminpb "google.golang.org/genproto/googleapis/iam/admin/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockIamServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + adminpb.IAMServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockIamServer) ListServiceAccounts(ctx context.Context, req *adminpb.ListServiceAccountsRequest) (*adminpb.ListServiceAccountsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ListServiceAccountsResponse), nil +} + +func (s *mockIamServer) GetServiceAccount(ctx context.Context, req *adminpb.GetServiceAccountRequest) (*adminpb.ServiceAccount, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccount), nil +} + +func (s *mockIamServer) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest) (*adminpb.ServiceAccount, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccount), nil +} + +func (s *mockIamServer) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount) (*adminpb.ServiceAccount, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccount), nil +} + +func (s *mockIamServer) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockIamServer) ListServiceAccountKeys(ctx context.Context, req *adminpb.ListServiceAccountKeysRequest) (*adminpb.ListServiceAccountKeysResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ListServiceAccountKeysResponse), nil +} + +func (s *mockIamServer) GetServiceAccountKey(ctx context.Context, req *adminpb.GetServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccountKey), nil +} + +func (s *mockIamServer) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccountKey), nil +} + +func (s *mockIamServer) DeleteServiceAccountKey(ctx context.Context, req *adminpb.DeleteServiceAccountKeyRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockIamServer) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest) (*adminpb.SignBlobResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.SignBlobResponse), nil +} + +func (s *mockIamServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockIamServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockIamServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +func (s *mockIamServer) QueryGrantableRoles(ctx context.Context, req *adminpb.QueryGrantableRolesRequest) (*adminpb.QueryGrantableRolesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.QueryGrantableRolesResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockIam mockIamServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + adminpb.RegisterIAMServer(serv, &mockIam) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestIamListServiceAccounts(t *testing.T) { + var nextPageToken string = "" + var accountsElement *adminpb.ServiceAccount = &adminpb.ServiceAccount{} + var accounts = []*adminpb.ServiceAccount{accountsElement} + var expectedResponse = &adminpb.ListServiceAccountsResponse{ + NextPageToken: nextPageToken, + Accounts: accounts, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamProjectPath("[PROJECT]") + var request = &adminpb.ListServiceAccountsRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListServiceAccounts(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Accounts[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamListServiceAccountsError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamProjectPath("[PROJECT]") + var request = &adminpb.ListServiceAccountsRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListServiceAccounts(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamGetServiceAccount(t *testing.T) { + var name2 string = "name2-1052831874" + var projectId string = "projectId-1969970175" + var uniqueId string = "uniqueId-538310583" + var email string = "email96619420" + var displayName string = "displayName1615086568" + var etag []byte = []byte("21") + var oauth2ClientId string = "oauth2ClientId-1833466037" + var expectedResponse = &adminpb.ServiceAccount{ + Name: name2, + ProjectId: projectId, + UniqueId: uniqueId, + Email: email, + DisplayName: displayName, + Etag: etag, + Oauth2ClientId: oauth2ClientId, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.GetServiceAccountRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServiceAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamGetServiceAccountError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.GetServiceAccountRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServiceAccount(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamCreateServiceAccount(t *testing.T) { + var name2 string = "name2-1052831874" + var projectId string = "projectId-1969970175" + var uniqueId string = "uniqueId-538310583" + var email string = "email96619420" + var displayName string = "displayName1615086568" + var etag []byte = []byte("21") + var oauth2ClientId string = "oauth2ClientId-1833466037" + var expectedResponse = &adminpb.ServiceAccount{ + Name: name2, + ProjectId: projectId, + UniqueId: uniqueId, + Email: email, + DisplayName: displayName, + Etag: etag, + Oauth2ClientId: oauth2ClientId, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamProjectPath("[PROJECT]") + var accountId string = "accountId-803333011" + var request = &adminpb.CreateServiceAccountRequest{ + Name: formattedName, + AccountId: accountId, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateServiceAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamCreateServiceAccountError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamProjectPath("[PROJECT]") + var accountId string = "accountId-803333011" + var request = &adminpb.CreateServiceAccountRequest{ + Name: formattedName, + AccountId: accountId, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateServiceAccount(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamUpdateServiceAccount(t *testing.T) { + var name string = "name3373707" + var projectId string = "projectId-1969970175" + var uniqueId string = "uniqueId-538310583" + var email string = "email96619420" + var displayName string = "displayName1615086568" + var etag2 []byte = []byte("-120") + var oauth2ClientId string = "oauth2ClientId-1833466037" + var expectedResponse = &adminpb.ServiceAccount{ + Name: name, + ProjectId: projectId, + UniqueId: uniqueId, + Email: email, + DisplayName: displayName, + Etag: etag2, + Oauth2ClientId: oauth2ClientId, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var etag []byte = []byte("21") + var request = &adminpb.ServiceAccount{ + Etag: etag, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateServiceAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamUpdateServiceAccountError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var etag []byte = []byte("21") + var request = &adminpb.ServiceAccount{ + Etag: etag, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateServiceAccount(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamDeleteServiceAccount(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.DeleteServiceAccountRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteServiceAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestIamDeleteServiceAccountError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.DeleteServiceAccountRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteServiceAccount(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestIamListServiceAccountKeys(t *testing.T) { + var expectedResponse *adminpb.ListServiceAccountKeysResponse = &adminpb.ListServiceAccountKeysResponse{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.ListServiceAccountKeysRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListServiceAccountKeys(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamListServiceAccountKeysError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.ListServiceAccountKeysRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListServiceAccountKeys(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamGetServiceAccountKey(t *testing.T) { + var name2 string = "name2-1052831874" + var privateKeyData []byte = []byte("-58") + var publicKeyData []byte = []byte("-96") + var expectedResponse = &adminpb.ServiceAccountKey{ + Name: name2, + PrivateKeyData: privateKeyData, + PublicKeyData: publicKeyData, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") + var request = &adminpb.GetServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServiceAccountKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamGetServiceAccountKeyError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") + var request = &adminpb.GetServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServiceAccountKey(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamCreateServiceAccountKey(t *testing.T) { + var name2 string = "name2-1052831874" + var privateKeyData []byte = []byte("-58") + var publicKeyData []byte = []byte("-96") + var expectedResponse = &adminpb.ServiceAccountKey{ + Name: name2, + PrivateKeyData: privateKeyData, + PublicKeyData: publicKeyData, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.CreateServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateServiceAccountKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamCreateServiceAccountKeyError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.CreateServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateServiceAccountKey(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamDeleteServiceAccountKey(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") + var request = &adminpb.DeleteServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteServiceAccountKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestIamDeleteServiceAccountKeyError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") + var request = &adminpb.DeleteServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteServiceAccountKey(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestIamSignBlob(t *testing.T) { + var keyId string = "keyId-1134673157" + var signature []byte = []byte("-72") + var expectedResponse = &adminpb.SignBlobResponse{ + KeyId: keyId, + Signature: signature, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var bytesToSign []byte = []byte("45") + var request = &adminpb.SignBlobRequest{ + Name: formattedName, + BytesToSign: bytesToSign, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SignBlob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamSignBlobError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var bytesToSign []byte = []byte("45") + var request = &adminpb.SignBlobRequest{ + Name: formattedName, + BytesToSign: bytesToSign, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SignBlob(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamGetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.getIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamGetIamPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.getIamPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamSetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.setIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamSetIamPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.setIamPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamTestIamPermissions(t *testing.T) { + var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamTestIamPermissionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamQueryGrantableRoles(t *testing.T) { + var expectedResponse *adminpb.QueryGrantableRolesResponse = &adminpb.QueryGrantableRolesResponse{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var fullResourceName string = "fullResourceName1300993644" + var request = &adminpb.QueryGrantableRolesRequest{ + FullResourceName: fullResourceName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.QueryGrantableRoles(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamQueryGrantableRolesError(t *testing.T) { + errCode := codes.PermissionDenied + mockIam.err = gstatus.Error(errCode, "test error") + + var fullResourceName string = "fullResourceName1300993644" + var request = &adminpb.QueryGrantableRolesRequest{ + FullResourceName: fullResourceName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.QueryGrantableRoles(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/policy_methods.go b/vendor/cloud.google.com/go/iam/admin/apiv1/policy_methods.go new file mode 100644 index 0000000..a3ff72e --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/policy_methods.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This is handwritten code. These methods are implemented by hand so they can use +// the iam.Policy type. + +package admin + +import ( + "cloud.google.com/go/iam" + "golang.org/x/net/context" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// GetIamPolicy returns the IAM access control policy for a ServiceAccount. +func (c *IamClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iam.Policy, error) { + policy, err := c.getIamPolicy(ctx, req) + if err != nil { + return nil, err + } + return &iam.Policy{InternalProto: policy}, nil +} + +// SetIamPolicyRequest is the request type for the SetIamPolicy method. +type SetIamPolicyRequest struct { + Resource string + Policy *iam.Policy +} + +// SetIamPolicy sets the IAM access control policy for a ServiceAccount. +func (c *IamClient) SetIamPolicy(ctx context.Context, req *SetIamPolicyRequest) (*iam.Policy, error) { + preq := &iampb.SetIamPolicyRequest{ + Resource: req.Resource, + Policy: req.Policy.InternalProto, + } + policy, err := c.setIamPolicy(ctx, preq) + if err != nil { + return nil, err + } + return &iam.Policy{InternalProto: policy}, nil +} diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go new file mode 100644 index 0000000..8722ee8 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -0,0 +1,256 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package iam supports the resource-specific operations of Google Cloud +// IAM (Identity and Access Management) for the Google Cloud Libraries. +// See https://cloud.google.com/iam for more about IAM. +// +// Users of the Google Cloud Libraries will typically not use this package +// directly. Instead they will begin with some resource that supports IAM, like +// a pubsub topic, and call its IAM method to get a Handle for that resource. +package iam + +import ( + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" +) + +// client abstracts the IAMPolicy API to allow multiple implementations. +type client interface { + Get(ctx context.Context, resource string) (*pb.Policy, error) + Set(ctx context.Context, resource string, p *pb.Policy) error + Test(ctx context.Context, resource string, perms []string) ([]string, error) +} + +// grpcClient implements client for the standard gRPC-based IAMPolicy service. +type grpcClient struct { + c pb.IAMPolicyClient +} + +func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { + proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) + if err != nil { + return nil, err + } + return proto, nil +} +func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { + _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ + Resource: resource, + Policy: p, + }) + return err +} + +func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { + res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ + Resource: resource, + Permissions: perms, + }) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// A Handle provides IAM operations for a resource. +type Handle struct { + c client + resource string +} + +// InternalNewHandle is for use by the Google Cloud Libraries only. +// +// InternalNewHandle returns a Handle for resource. +// The conn parameter refers to a server that must support the IAMPolicy service. +func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { + return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource) +} + +// InternalNewHandleClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// client implementation. +func InternalNewHandleClient(c client, resource string) *Handle { + return &Handle{ + c: c, + resource: resource, + } +} + +// Policy retrieves the IAM policy for the resource. +func (h *Handle) Policy(ctx context.Context) (*Policy, error) { + proto, err := h.c.Get(ctx, h.resource) + if err != nil { + return nil, err + } + return &Policy{InternalProto: proto}, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { + return h.c.Set(ctx, h.resource, policy.InternalProto) +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + return h.c.Test(ctx, h.resource, permissions) +} + +// A RoleName is a name representing a collection of permissions. +type RoleName string + +// Common role names. +const ( + Owner RoleName = "roles/owner" + Editor RoleName = "roles/editor" + Viewer RoleName = "roles/viewer" +) + +const ( + // AllUsers is a special member that denotes all users, even unauthenticated ones. + AllUsers = "allUsers" + + // AllAuthenticatedUsers is a special member that denotes all authenticated users. + AllAuthenticatedUsers = "allAuthenticatedUsers" +) + +// A Policy is a list of Bindings representing roles +// granted to members. +// +// The zero Policy is a valid policy with no bindings. +type Policy struct { + // TODO(jba): when type aliases are available, put Policy into an internal package + // and provide an exported alias here. + + // This field is exported for use by the Google Cloud Libraries only. + // It may become unexported in a future release. + InternalProto *pb.Policy +} + +// Members returns the list of members with the supplied role. +// The return value should not be modified. Use Add and Remove +// to modify the members of a role. +func (p *Policy) Members(r RoleName) []string { + b := p.binding(r) + if b == nil { + return nil + } + return b.Members +} + +// HasRole reports whether member has role r. +func (p *Policy) HasRole(member string, r RoleName) bool { + return memberIndex(member, p.binding(r)) >= 0 +} + +// Add adds member member to role r if it is not already present. +// A new binding is created if there is no binding for the role. +func (p *Policy) Add(member string, r RoleName) { + b := p.binding(r) + if b == nil { + if p.InternalProto == nil { + p.InternalProto = &pb.Policy{} + } + p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ + Role: string(r), + Members: []string{member}, + }) + return + } + if memberIndex(member, b) < 0 { + b.Members = append(b.Members, member) + return + } +} + +// Remove removes member from role r if it is present. +func (p *Policy) Remove(member string, r RoleName) { + bi := p.bindingIndex(r) + if bi < 0 { + return + } + bindings := p.InternalProto.Bindings + b := bindings[bi] + mi := memberIndex(member, b) + if mi < 0 { + return + } + // Order doesn't matter for bindings or members, so to remove, move the last item + // into the removed spot and shrink the slice. + if len(b.Members) == 1 { + // Remove binding. + last := len(bindings) - 1 + bindings[bi] = bindings[last] + bindings[last] = nil + p.InternalProto.Bindings = bindings[:last] + return + } + // Remove member. + // TODO(jba): worry about multiple copies of m? + last := len(b.Members) - 1 + b.Members[mi] = b.Members[last] + b.Members[last] = "" + b.Members = b.Members[:last] +} + +// Roles returns the names of all the roles that appear in the Policy. +func (p *Policy) Roles() []RoleName { + if p.InternalProto == nil { + return nil + } + var rns []RoleName + for _, b := range p.InternalProto.Bindings { + rns = append(rns, RoleName(b.Role)) + } + return rns +} + +// binding returns the Binding for the suppied role, or nil if there isn't one. +func (p *Policy) binding(r RoleName) *pb.Binding { + i := p.bindingIndex(r) + if i < 0 { + return nil + } + return p.InternalProto.Bindings[i] +} + +func (p *Policy) bindingIndex(r RoleName) int { + if p.InternalProto == nil { + return -1 + } + for i, b := range p.InternalProto.Bindings { + if b.Role == string(r) { + return i + } + } + return -1 +} + +// memberIndex returns the index of m in b's Members, or -1 if not found. +func memberIndex(m string, b *pb.Binding) int { + if b == nil { + return -1 + } + for i, mm := range b.Members { + if mm == m { + return i + } + } + return -1 +} diff --git a/vendor/cloud.google.com/go/iam/iam_test.go b/vendor/cloud.google.com/go/iam/iam_test.go new file mode 100644 index 0000000..f29caa8 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/iam_test.go @@ -0,0 +1,87 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iam + +import ( + "fmt" + "sort" + "testing" + + "cloud.google.com/go/internal/testutil" +) + +func TestPolicy(t *testing.T) { + p := &Policy{} + + add := func(member string, role RoleName) { + p.Add(member, role) + } + remove := func(member string, role RoleName) { + p.Remove(member, role) + } + + if msg, ok := checkMembers(p, Owner, nil); !ok { + t.Fatal(msg) + } + add("m1", Owner) + if msg, ok := checkMembers(p, Owner, []string{"m1"}); !ok { + t.Fatal(msg) + } + add("m2", Owner) + if msg, ok := checkMembers(p, Owner, []string{"m1", "m2"}); !ok { + t.Fatal(msg) + } + add("m1", Owner) // duplicate adds ignored + if msg, ok := checkMembers(p, Owner, []string{"m1", "m2"}); !ok { + t.Fatal(msg) + } + // No other roles populated yet. + if msg, ok := checkMembers(p, Viewer, nil); !ok { + t.Fatal(msg) + } + remove("m1", Owner) + if msg, ok := checkMembers(p, Owner, []string{"m2"}); !ok { + t.Fatal(msg) + } + if msg, ok := checkMembers(p, Viewer, nil); !ok { + t.Fatal(msg) + } + remove("m3", Owner) // OK to remove non-existent member. + if msg, ok := checkMembers(p, Owner, []string{"m2"}); !ok { + t.Fatal(msg) + } + remove("m2", Owner) + if msg, ok := checkMembers(p, Owner, nil); !ok { + t.Fatal(msg) + } + if got, want := p.Roles(), []RoleName(nil); !testutil.Equal(got, want) { + t.Fatalf("roles: got %v, want %v", got, want) + } +} + +func checkMembers(p *Policy, role RoleName, wantMembers []string) (string, bool) { + gotMembers := p.Members(role) + sort.Strings(gotMembers) + sort.Strings(wantMembers) + if !testutil.Equal(gotMembers, wantMembers) { + return fmt.Sprintf("got %v, want %v", gotMembers, wantMembers), false + } + for _, m := range wantMembers { + if !p.HasRole(m, role) { + return fmt.Sprintf("member %q should have role %s but does not", m, role), false + } + } + return "", true +} diff --git a/vendor/cloud.google.com/go/import_test.go b/vendor/cloud.google.com/go/import_test.go new file mode 100644 index 0000000..839fae4 --- /dev/null +++ b/vendor/cloud.google.com/go/import_test.go @@ -0,0 +1,61 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud + +import ( + "go/parser" + "go/token" + "os" + "path/filepath" + "strconv" + "testing" +) + +func TestContextImport(t *testing.T) { + t.Parallel() + + whiteList := map[string]bool{ + "storage/go17.go": true, + } + + err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if filepath.Ext(path) != ".go" || whiteList[path] { + return nil + } + + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, path, nil, parser.ImportsOnly) + if err != nil { + return err + } + + for _, imp := range file.Imports { + impPath, err := strconv.Unquote(imp.Path.Value) + if err != nil { + return err + } + if impPath == "context" { + t.Errorf(`file %q import "context", want "golang.org/x/net/context"`, path) + } + } + return nil + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/cloud.google.com/go/internal/annotate.go b/vendor/cloud.google.com/go/internal/annotate.go new file mode 100644 index 0000000..797809a --- /dev/null +++ b/vendor/cloud.google.com/go/internal/annotate.go @@ -0,0 +1,54 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + + "google.golang.org/api/googleapi" + "google.golang.org/grpc/status" +) + +// Annotate prepends msg to the error message in err, attempting +// to preserve other information in err, like an error code. +// +// Annotate panics if err is nil. +// +// Annotate knows about these error types: +// - "google.golang.org/grpc/status".Status +// - "google.golang.org/api/googleapi".Error +// If the error is not one of these types, Annotate behaves +// like +// fmt.Errorf("%s: %v", msg, err) +func Annotate(err error, msg string) error { + if err == nil { + panic("Annotate called with nil") + } + if s, ok := status.FromError(err); ok { + p := s.Proto() + p.Message = msg + ": " + p.Message + return status.ErrorProto(p) + } + if g, ok := err.(*googleapi.Error); ok { + g.Message = msg + ": " + g.Message + return g + } + return fmt.Errorf("%s: %v", msg, err) +} + +// Annotatef uses format and args to format a string, then calls Annotate. +func Annotatef(err error, format string, args ...interface{}) error { + return Annotate(err, fmt.Sprintf(format, args...)) +} diff --git a/vendor/cloud.google.com/go/internal/annotate_test.go b/vendor/cloud.google.com/go/internal/annotate_test.go new file mode 100644 index 0000000..36cea5c --- /dev/null +++ b/vendor/cloud.google.com/go/internal/annotate_test.go @@ -0,0 +1,65 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "errors" + "testing" + + "google.golang.org/api/googleapi" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const wantMessage = "prefix: msg" + +func TestAnnotateGRPC(t *testing.T) { + // grpc Status error + err := status.Error(codes.NotFound, "msg") + err = Annotate(err, "prefix") + got, ok := status.FromError(err) + if !ok { + t.Fatalf("got %T, wanted a status", got) + } + if g, w := got.Code(), codes.NotFound; g != w { + t.Errorf("got code %v, want %v", g, w) + } + if g, w := got.Message(), wantMessage; g != w { + t.Errorf("got message %q, want %q", g, w) + } +} + +func TestAnnotateGoogleapi(t *testing.T) { + // googleapi error + var err error = &googleapi.Error{Code: 403, Message: "msg"} + err = Annotate(err, "prefix") + got2, ok := err.(*googleapi.Error) + if !ok { + t.Fatalf("got %T, wanted a googleapi.Error", got2) + } + if g, w := got2.Code, 403; g != w { + t.Errorf("got code %d, want %d", g, w) + } + if g, w := got2.Message, wantMessage; g != w { + t.Errorf("got message %q, want %q", g, w) + } +} + +func TestAnnotateUnknownError(t *testing.T) { + err := Annotate(errors.New("msg"), "prefix") + if g, w := err.Error(), wantMessage; g != w { + t.Errorf("got message %q, want %q", g, w) + } +} diff --git a/vendor/cloud.google.com/go/internal/atomiccache/atomiccache.go b/vendor/cloud.google.com/go/internal/atomiccache/atomiccache.go new file mode 100644 index 0000000..2bea8a1 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/atomiccache/atomiccache.go @@ -0,0 +1,58 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package atomiccache provides a map-based cache that supports very fast +// reads. +package atomiccache + +import ( + "sync" + "sync/atomic" +) + +type mapType map[interface{}]interface{} + +// Cache is a map-based cache that supports fast reads via use of atomics. +// Writes are slow, requiring a copy of the entire cache. +// The zero Cache is an empty cache, ready for use. +type Cache struct { + val atomic.Value // mapType + mu sync.Mutex // used only by writers +} + +// Get returns the value of the cache at key. If there is no value, +// getter is called to provide one, and the cache is updated. +// The getter function may be called concurrently. It should be pure, +// returning the same value for every call. +func (c *Cache) Get(key interface{}, getter func() interface{}) interface{} { + mp, _ := c.val.Load().(mapType) + if v, ok := mp[key]; ok { + return v + } + + // Compute value without lock. + // Might duplicate effort but won't hold other computations back. + newV := getter() + + c.mu.Lock() + mp, _ = c.val.Load().(mapType) + newM := make(mapType, len(mp)+1) + for k, v := range mp { + newM[k] = v + } + newM[key] = newV + c.val.Store(newM) + c.mu.Unlock() + return newV +} diff --git a/vendor/cloud.google.com/go/internal/atomiccache/atomiccache_test.go b/vendor/cloud.google.com/go/internal/atomiccache/atomiccache_test.go new file mode 100644 index 0000000..33105b3 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/atomiccache/atomiccache_test.go @@ -0,0 +1,46 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package atomiccache + +import ( + "fmt" + "testing" +) + +func TestGet(t *testing.T) { + var c Cache + called := false + get := func(k interface{}) interface{} { + return c.Get(k, func() interface{} { + called = true + return fmt.Sprintf("v%d", k) + }) + } + got := get(1) + if want := "v1"; got != want { + t.Errorf("got %v, want %v", got, want) + } + if !called { + t.Error("getter not called, expected a call") + } + called = false + got = get(1) + if want := "v1"; got != want { + t.Errorf("got %v, want %v", got, want) + } + if called { + t.Error("getter unexpectedly called") + } +} diff --git a/vendor/cloud.google.com/go/internal/fields/fields.go b/vendor/cloud.google.com/go/internal/fields/fields.go new file mode 100644 index 0000000..882820f --- /dev/null +++ b/vendor/cloud.google.com/go/internal/fields/fields.go @@ -0,0 +1,468 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fields provides a view of the fields of a struct that follows the Go +// rules, amended to consider tags and case insensitivity. +// +// Usage +// +// First define a function that interprets tags: +// +// func parseTag(st reflect.StructTag) (name string, keep bool, other interface{}, err error) { ... } +// +// The function's return values describe whether to ignore the field +// completely or provide an alternate name, as well as other data from the +// parse that is stored to avoid re-parsing. +// +// Then define a function to validate the type: +// +// func validate(t reflect.Type) error { ... } +// +// Then, if necessary, define a function to specify leaf types - types +// which should be considered one field and not be recursed into: +// +// func isLeafType(t reflect.Type) bool { ... } +// +// eg: +// +// func isLeafType(t reflect.Type) bool { +// return t == reflect.TypeOf(time.Time{}) +// } +// +// Next, construct a Cache, passing your functions. As its name suggests, a +// Cache remembers validation and field information for a type, so subsequent +// calls with the same type are very fast. +// +// cache := fields.NewCache(parseTag, validate, isLeafType) +// +// To get the fields of a struct type as determined by the above rules, call +// the Fields method: +// +// fields, err := cache.Fields(reflect.TypeOf(MyStruct{})) +// +// The return value can be treated as a slice of Fields. +// +// Given a string, such as a key or column name obtained during unmarshalling, +// call Match on the list of fields to find a field whose name is the best +// match: +// +// field := fields.Match(name) +// +// Match looks for an exact match first, then falls back to a case-insensitive +// comparison. +package fields + +import ( + "bytes" + "errors" + "reflect" + "sort" + "strings" + + "cloud.google.com/go/internal/atomiccache" +) + +// A Field records information about a struct field. +type Field struct { + Name string // effective field name + NameFromTag bool // did Name come from a tag? + Type reflect.Type // field type + Index []int // index sequence, for reflect.Value.FieldByIndex + ParsedTag interface{} // third return value of the parseTag function + + nameBytes []byte + equalFold func(s, t []byte) bool +} + +type ParseTagFunc func(reflect.StructTag) (name string, keep bool, other interface{}, err error) + +type ValidateFunc func(reflect.Type) error + +type LeafTypesFunc func(reflect.Type) bool + +// A Cache records information about the fields of struct types. +// +// A Cache is safe for use by multiple goroutines. +type Cache struct { + parseTag ParseTagFunc + validate ValidateFunc + leafTypes LeafTypesFunc + cache atomiccache.Cache // from reflect.Type to cacheValue +} + +// NewCache constructs a Cache. +// +// Its first argument should be a function that accepts +// a struct tag and returns four values: an alternative name for the field +// extracted from the tag, a boolean saying whether to keep the field or ignore +// it, additional data that is stored with the field information to avoid +// having to parse the tag again, and an error. +// +// Its second argument should be a function that accepts a reflect.Type and +// returns an error if the struct type is invalid in any way. For example, it +// may check that all of the struct field tags are valid, or that all fields +// are of an appropriate type. +func NewCache(parseTag ParseTagFunc, validate ValidateFunc, leafTypes LeafTypesFunc) *Cache { + if parseTag == nil { + parseTag = func(reflect.StructTag) (string, bool, interface{}, error) { + return "", true, nil, nil + } + } + if validate == nil { + validate = func(reflect.Type) error { + return nil + } + } + if leafTypes == nil { + leafTypes = func(reflect.Type) bool { + return false + } + } + + return &Cache{ + parseTag: parseTag, + validate: validate, + leafTypes: leafTypes, + } +} + +// A fieldScan represents an item on the fieldByNameFunc scan work list. +type fieldScan struct { + typ reflect.Type + index []int +} + +// Fields returns all the exported fields of t, which must be a struct type. It +// follows the standard Go rules for embedded fields, modified by the presence +// of tags. The result is sorted lexicographically by index. +// +// These rules apply in the absence of tags: +// Anonymous struct fields are treated as if their inner exported fields were +// fields in the outer struct (embedding). The result includes all fields that +// aren't shadowed by fields at higher level of embedding. If more than one +// field with the same name exists at the same level of embedding, it is +// excluded. An anonymous field that is not of struct type is treated as having +// its type as its name. +// +// Tags modify these rules as follows: +// A field's tag is used as its name. +// An anonymous struct field with a name given in its tag is treated as +// a field having that name, rather than an embedded struct (the struct's +// fields will not be returned). +// If more than one field with the same name exists at the same level of embedding, +// but exactly one of them is tagged, then the tagged field is reported and the others +// are ignored. +func (c *Cache) Fields(t reflect.Type) (List, error) { + if t.Kind() != reflect.Struct { + panic("fields: Fields of non-struct type") + } + return c.cachedTypeFields(t) +} + +// A List is a list of Fields. +type List []Field + +// Match returns the field in the list whose name best matches the supplied +// name, nor nil if no field does. If there is a field with the exact name, it +// is returned. Otherwise the first field (sorted by index) whose name matches +// case-insensitively is returned. +func (l List) Match(name string) *Field { + return l.MatchBytes([]byte(name)) +} + +// MatchBytes is identical to Match, except that the argument is a byte slice. +func (l List) MatchBytes(name []byte) *Field { + var f *Field + for i := range l { + ff := &l[i] + if bytes.Equal(ff.nameBytes, name) { + return ff + } + if f == nil && ff.equalFold(ff.nameBytes, name) { + f = ff + } + } + return f +} + +type cacheValue struct { + fields List + err error +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +// This code has been copied and modified from +// https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/encode.go. +func (c *Cache) cachedTypeFields(t reflect.Type) (List, error) { + cv := c.cache.Get(t, func() interface{} { + if err := c.validate(t); err != nil { + return cacheValue{nil, err} + } + f, err := c.typeFields(t) + return cacheValue{List(f), err} + }).(cacheValue) + return cv.fields, cv.err +} + +func (c *Cache) typeFields(t reflect.Type) ([]Field, error) { + fields, err := c.listFields(t) + if err != nil { + return nil, err + } + sort.Sort(byName(fields)) + // Delete all fields that are hidden by the Go rules for embedded fields. + + // The fields are sorted in primary order of name, secondary order of field + // index length. So the first field with a given name is the dominant one. + var out []Field + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.Name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.Name != name { + break + } + } + // Find the dominant field, if any, out of all fields that have the same name. + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + sort.Sort(byIndex(out)) + return out, nil +} + +func (c *Cache) listFields(t reflect.Type) ([]Field, error) { + // This uses the same condition that the Go language does: there must be a unique instance + // of the match at a given depth level. If there are multiple instances of a match at the + // same depth, they annihilate each other and inhibit any possible match at a lower level. + // The algorithm is breadth first search, one depth level at a time. + + // The current and next slices are work queues: + // current lists the fields to visit on this depth level, + // and next lists the fields on the next lower level. + current := []fieldScan{} + next := []fieldScan{{typ: t}} + + // nextCount records the number of times an embedded type has been + // encountered and considered for queueing in the 'next' slice. + // We only queue the first one, but we increment the count on each. + // If a struct type T can be reached more than once at a given depth level, + // then it annihilates itself and need not be considered at all when we + // process that next depth level. + var nextCount map[reflect.Type]int + + // visited records the structs that have been considered already. + // Embedded pointer fields can create cycles in the graph of + // reachable embedded types; visited avoids following those cycles. + // It also avoids duplicated effort: if we didn't find the field in an + // embedded type T at level 2, we won't find it in one at level 4 either. + visited := map[reflect.Type]bool{} + + var fields []Field // Fields found. + + for len(next) > 0 { + current, next = next, current[:0] + count := nextCount + nextCount = nil + + // Process all the fields at this depth, now listed in 'current'. + // The loop queues embedded fields found in 'next', for processing during the next + // iteration. The multiplicity of the 'current' field counts is recorded + // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. + for _, scan := range current { + t := scan.typ + if visited[t] { + // We've looked through this type before, at a higher level. + // That higher level would shadow the lower level we're now at, + // so this one can't be useful to us. Ignore it. + continue + } + visited[t] = true + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + exported := (f.PkgPath == "") + + // If a named field is unexported, ignore it. An anonymous + // unexported field is processed, because it may contain + // exported fields, which are visible. + if !exported && !f.Anonymous { + continue + } + + // Examine the tag. + tagName, keep, other, err := c.parseTag(f.Tag) + if err != nil { + return nil, err + } + if !keep { + continue + } + if c.leafTypes(f.Type) { + fields = append(fields, newField(f, tagName, other, scan.index, i)) + continue + } + + var ntyp reflect.Type + if f.Anonymous { + // Anonymous field of type T or *T. + ntyp = f.Type + if ntyp.Kind() == reflect.Ptr { + ntyp = ntyp.Elem() + } + } + + // Record fields with a tag name, non-anonymous fields, or + // anonymous non-struct fields. + if tagName != "" || ntyp == nil || ntyp.Kind() != reflect.Struct { + if !exported { + continue + } + fields = append(fields, newField(f, tagName, other, scan.index, i)) + if count[t] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Queue embedded struct fields for processing with next level, + // but only if the embedded types haven't already been queued. + if nextCount[ntyp] > 0 { + nextCount[ntyp] = 2 // exact multiple doesn't matter + continue + } + if nextCount == nil { + nextCount = map[reflect.Type]int{} + } + nextCount[ntyp] = 1 + if count[t] > 1 { + nextCount[ntyp] = 2 // exact multiple doesn't matter + } + var index []int + index = append(index, scan.index...) + index = append(index, i) + next = append(next, fieldScan{ntyp, index}) + } + } + } + return fields, nil +} + +func newField(f reflect.StructField, tagName string, other interface{}, index []int, i int) Field { + name := tagName + if name == "" { + name = f.Name + } + sf := Field{ + Name: name, + NameFromTag: tagName != "", + Type: f.Type, + ParsedTag: other, + nameBytes: []byte(name), + } + sf.equalFold = foldFunc(sf.nameBytes) + sf.Index = append(sf.Index, index...) + sf.Index = append(sf.Index, i) + return sf +} + +// byName sorts fields using the following criteria, in order: +// 1. name +// 2. embedding depth +// 3. tag presence (preferring a tagged field) +// 4. index sequence. +type byName []Field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].Name != x[j].Name { + return x[i].Name < x[j].Name + } + if len(x[i].Index) != len(x[j].Index) { + return len(x[i].Index) < len(x[j].Index) + } + if x[i].NameFromTag != x[j].NameFromTag { + return x[i].NameFromTag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []Field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + xi := x[i].Index + xj := x[j].Index + ln := len(xi) + if l := len(xj); l < ln { + ln = l + } + for k := 0; k < ln; k++ { + if xi[k] != xj[k] { + return xi[k] < xj[k] + } + } + return len(xi) < len(xj) +} + +// dominantField looks through the fields, all of which are known to have the +// same name, to find the single field that dominates the others using Go's +// embedding rules, modified by the presence of tags. If there are multiple +// top-level fields, the boolean will be false: This condition is an error in +// Go and we skip all the fields. +func dominantField(fs []Field) (Field, bool) { + // The fields are sorted in increasing index-length order, then by presence of tag. + // That means that the first field is the dominant one. We need only check + // for error cases: two fields at top level, either both tagged or neither tagged. + if len(fs) > 1 && len(fs[0].Index) == len(fs[1].Index) && fs[0].NameFromTag == fs[1].NameFromTag { + return Field{}, false + } + return fs[0], true +} + +// ParseStandardTag extracts the sub-tag named by key, then parses it using the +// de facto standard format introduced in encoding/json: +// "-" means "ignore this tag". It must occur by itself. (parseStandardTag returns an error +// in this case, whereas encoding/json accepts the "-" even if it is not alone.) +// "" provides an alternative name for the field +// ",opt1,opt2,..." specifies options after the name. +// The options are returned as a []string. +func ParseStandardTag(key string, t reflect.StructTag) (name string, keep bool, options []string, err error) { + s := t.Get(key) + parts := strings.Split(s, ",") + if parts[0] == "-" { + if len(parts) > 1 { + return "", false, nil, errors.New(`"-" field tag with options`) + } + return "", false, nil, nil + } + if len(parts) > 1 { + options = parts[1:] + } + return parts[0], true, options, nil +} diff --git a/vendor/cloud.google.com/go/internal/fields/fields_test.go b/vendor/cloud.google.com/go/internal/fields/fields_test.go new file mode 100644 index 0000000..925765d --- /dev/null +++ b/vendor/cloud.google.com/go/internal/fields/fields_test.go @@ -0,0 +1,563 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fields + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/testutil" +) + +type embed1 struct { + Em1 int + Dup int // annihilates with embed2.Dup + Shadow int + embed3 +} + +type embed2 struct { + Dup int + embed3 + embed4 +} + +type embed3 struct { + Em3 int // annihilated because embed3 is in both embed1 and embed2 + embed5 +} + +type embed4 struct { + Em4 int + Dup int // annihilation of Dup in embed1, embed2 hides this Dup + *embed1 // ignored because it occurs at a higher level +} + +type embed5 struct { + x int +} + +type Anonymous int + +type S1 struct { + Exported int + unexported int + Shadow int // shadows S1.Shadow + embed1 + *embed2 + Anonymous +} + +type Time struct { + time.Time +} + +var intType = reflect.TypeOf(int(0)) + +func field(name string, tval interface{}, index ...int) *Field { + return &Field{ + Name: name, + Type: reflect.TypeOf(tval), + Index: index, + ParsedTag: []string(nil), + } +} + +func tfield(name string, tval interface{}, index ...int) *Field { + return &Field{ + Name: name, + Type: reflect.TypeOf(tval), + Index: index, + NameFromTag: true, + ParsedTag: []string(nil), + } +} + +func TestFieldsNoTags(t *testing.T) { + c := NewCache(nil, nil, nil) + got, err := c.Fields(reflect.TypeOf(S1{})) + if err != nil { + t.Fatal(err) + } + want := []*Field{ + field("Exported", int(0), 0), + field("Shadow", int(0), 2), + field("Em1", int(0), 3, 0), + field("Em4", int(0), 4, 2, 0), + field("Anonymous", Anonymous(0), 5), + } + for _, f := range want { + f.ParsedTag = nil + } + if msg, ok := compareFields(got, want); !ok { + t.Error(msg) + } +} + +func TestAgainstJSONEncodingNoTags(t *testing.T) { + // Demonstrates that this package produces the same set of fields as encoding/json. + s1 := S1{ + Exported: 1, + unexported: 2, + Shadow: 3, + embed1: embed1{ + Em1: 4, + Dup: 5, + Shadow: 6, + embed3: embed3{ + Em3: 7, + embed5: embed5{x: 8}, + }, + }, + embed2: &embed2{ + Dup: 9, + embed3: embed3{ + Em3: 10, + embed5: embed5{x: 11}, + }, + embed4: embed4{ + Em4: 12, + Dup: 13, + embed1: &embed1{Em1: 14}, + }, + }, + Anonymous: Anonymous(15), + } + var want S1 + want.embed2 = &embed2{} // need this because reflection won't create it + jsonRoundTrip(t, s1, &want) + var got S1 + got.embed2 = &embed2{} + fields, err := NewCache(nil, nil, nil).Fields(reflect.TypeOf(got)) + if err != nil { + t.Fatal(err) + } + setFields(fields, &got, s1) + if !testutil.Equal(got, want, + cmp.AllowUnexported(S1{}, embed1{}, embed2{}, embed3{}, embed4{}, embed5{})) { + t.Errorf("got\n%+v\nwant\n%+v", got, want) + } +} + +// Tests use of LeafTypes parameter to NewCache +func TestAgainstJSONEncodingEmbeddedTime(t *testing.T) { + timeLeafFn := func(t reflect.Type) bool { + return t == reflect.TypeOf(time.Time{}) + } + // Demonstrates that this package can produce the same set of + // fields as encoding/json for a struct with an embedded time.Time. + now := time.Now().UTC() + myt := Time{ + now, + } + var want Time + jsonRoundTrip(t, myt, &want) + var got Time + fields, err := NewCache(nil, nil, timeLeafFn).Fields(reflect.TypeOf(got)) + if err != nil { + t.Fatal(err) + } + setFields(fields, &got, myt) + if !testutil.Equal(got, want) { + t.Errorf("got\n%+v\nwant\n%+v", got, want) + } +} + +type S2 struct { + NoTag int + XXX int `json:"tag"` // tag name takes precedence + Anonymous `json:"anon"` // anonymous non-structs also get their name from the tag + unexported int `json:"tag"` + Embed `json:"em"` // embedded structs with tags become fields + Tag int + YYY int `json:"Tag"` // tag takes precedence over untagged field of the same name + Empty int `json:""` // empty tag is noop + tEmbed1 + tEmbed2 +} + +type Embed struct { + Em int +} + +type tEmbed1 struct { + Dup int + X int `json:"Dup2"` +} + +type tEmbed2 struct { + Y int `json:"Dup"` // takes precedence over tEmbed1.Dup because it is tagged + Z int `json:"Dup2"` // same name as tEmbed1.X and both tagged, so ignored +} + +func jsonTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + return ParseStandardTag("json", t) +} + +func validateFunc(t reflect.Type) (err error) { + if t.Kind() != reflect.Struct { + return errors.New("non-struct type used") + } + + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type.Kind() == reflect.Slice { + return fmt.Errorf("slice field found at field %s on struct %s", t.Field(i).Name, t.Name()) + } + } + + return nil +} + +func TestFieldsWithTags(t *testing.T) { + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S2{})) + if err != nil { + t.Fatal(err) + } + want := []*Field{ + field("NoTag", int(0), 0), + tfield("tag", int(0), 1), + tfield("anon", Anonymous(0), 2), + tfield("em", Embed{}, 4), + tfield("Tag", int(0), 6), + field("Empty", int(0), 7), + tfield("Dup", int(0), 8, 0), + } + if msg, ok := compareFields(got, want); !ok { + t.Error(msg) + } +} + +func TestAgainstJSONEncodingWithTags(t *testing.T) { + // Demonstrates that this package produces the same set of fields as encoding/json. + s2 := S2{ + NoTag: 1, + XXX: 2, + Anonymous: 3, + Embed: Embed{ + Em: 4, + }, + tEmbed1: tEmbed1{ + Dup: 5, + X: 6, + }, + tEmbed2: tEmbed2{ + Y: 7, + Z: 8, + }, + } + var want S2 + jsonRoundTrip(t, s2, &want) + var got S2 + fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(got)) + if err != nil { + t.Fatal(err) + } + setFields(fields, &got, s2) + if !testutil.Equal(got, want, cmp.AllowUnexported(S2{})) { + t.Errorf("got\n%+v\nwant\n%+v", got, want) + } +} + +func TestUnexportedAnonymousNonStruct(t *testing.T) { + // An unexported anonymous non-struct field should not be recorded. + // This is currently a bug in encoding/json. + // https://github.com/golang/go/issues/18009 + type ( + u int + v int + S struct { + u + v `json:"x"` + int + } + ) + + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("got %d fields, want 0", len(got)) + } +} + +func TestUnexportedAnonymousStruct(t *testing.T) { + // An unexported anonymous struct with a tag is ignored. + // This is currently a bug in encoding/json. + // https://github.com/golang/go/issues/18009 + type ( + s1 struct{ X int } + S2 struct { + s1 `json:"Y"` + } + ) + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S2{})) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("got %d fields, want 0", len(got)) + } +} + +func TestDominantField(t *testing.T) { + // With fields sorted by index length and then by tag presence, + // the dominant field is always the first. Make sure all error + // cases are caught. + for _, test := range []struct { + fields []Field + wantOK bool + }{ + // A single field is OK. + {[]Field{{Index: []int{0}}}, true}, + {[]Field{{Index: []int{0}, NameFromTag: true}}, true}, + // A single field at top level is OK. + {[]Field{{Index: []int{0}}, {Index: []int{1, 0}}}, true}, + {[]Field{{Index: []int{0}}, {Index: []int{1, 0}, NameFromTag: true}}, true}, + {[]Field{{Index: []int{0}, NameFromTag: true}, {Index: []int{1, 0}, NameFromTag: true}}, true}, + // A single tagged field is OK. + {[]Field{{Index: []int{0}, NameFromTag: true}, {Index: []int{1}}}, true}, + // Two untagged fields at the same level is an error. + {[]Field{{Index: []int{0}}, {Index: []int{1}}}, false}, + // Two tagged fields at the same level is an error. + {[]Field{{Index: []int{0}, NameFromTag: true}, {Index: []int{1}, NameFromTag: true}}, false}, + } { + _, gotOK := dominantField(test.fields) + if gotOK != test.wantOK { + t.Errorf("%v: got %t, want %t", test.fields, gotOK, test.wantOK) + } + } +} + +func TestIgnore(t *testing.T) { + type S struct { + X int `json:"-"` + } + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("got %d fields, want 0", len(got)) + } +} + +func TestParsedTag(t *testing.T) { + type S struct { + X int `json:"name,omitempty"` + } + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) + if err != nil { + t.Fatal(err) + } + want := []*Field{ + {Name: "name", NameFromTag: true, Type: intType, + Index: []int{0}, ParsedTag: []string{"omitempty"}}, + } + if msg, ok := compareFields(got, want); !ok { + t.Error(msg) + } +} + +func TestValidateFunc(t *testing.T) { + type MyInvalidStruct struct { + A string + B []int + } + + _, err := NewCache(nil, validateFunc, nil).Fields(reflect.TypeOf(MyInvalidStruct{})) + if err == nil { + t.Fatal("expected error, got nil") + } + + type MyValidStruct struct { + A string + B int + } + _, err = NewCache(nil, validateFunc, nil).Fields(reflect.TypeOf(MyValidStruct{})) + if err != nil { + t.Fatalf("expected nil, got error: %s\n", err) + } +} + +func compareFields(got []Field, want []*Field) (msg string, ok bool) { + if len(got) != len(want) { + return fmt.Sprintf("got %d fields, want %d", len(got), len(want)), false + } + for i, g := range got { + w := *want[i] + if !fieldsEqual(&g, &w) { + return fmt.Sprintf("got\n%+v\nwant\n%+v", g, w), false + } + } + return "", true +} + +// Need this because Field contains a function, which cannot be compared even +// by testutil.Equal. +func fieldsEqual(f1, f2 *Field) bool { + if f1 == nil || f2 == nil { + return f1 == f2 + } + return f1.Name == f2.Name && + f1.NameFromTag == f2.NameFromTag && + f1.Type == f2.Type && + testutil.Equal(f1.ParsedTag, f2.ParsedTag) +} + +// Set the fields of dst from those of src. +// dst must be a pointer to a struct value. +// src must be a struct value. +func setFields(fields []Field, dst, src interface{}) { + vsrc := reflect.ValueOf(src) + vdst := reflect.ValueOf(dst).Elem() + for _, f := range fields { + fdst := vdst.FieldByIndex(f.Index) + fsrc := vsrc.FieldByIndex(f.Index) + fdst.Set(fsrc) + } +} + +func jsonRoundTrip(t *testing.T, in, out interface{}) { + bytes, err := json.Marshal(in) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(bytes, out); err != nil { + t.Fatal(err) + } +} + +type S3 struct { + S4 + Abc int + AbC int + Tag int + X int `json:"Tag"` + unexported int +} + +type S4 struct { + ABc int + Y int `json:"Abc"` // ignored because of top-level Abc +} + +func TestMatchingField(t *testing.T) { + fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S3{})) + if err != nil { + t.Fatal(err) + } + for _, test := range []struct { + name string + want *Field + }{ + // Exact match wins. + {"Abc", field("Abc", int(0), 1)}, + {"AbC", field("AbC", int(0), 2)}, + {"ABc", field("ABc", int(0), 0, 0)}, + // If there are multiple matches but no exact match or tag, + // the first field wins, lexicographically by index. + // Here, "ABc" is at a deeper embedding level, but since S4 appears + // first in S3, its index precedes the other fields of S3. + {"abc", field("ABc", int(0), 0, 0)}, + // Tag name takes precedence over untagged field of the same name. + {"Tag", tfield("Tag", int(0), 4)}, + // Unexported fields disappear. + {"unexported", nil}, + // Untagged embedded structs disappear. + {"S4", nil}, + } { + if got := fields.Match(test.name); !fieldsEqual(got, test.want) { + t.Errorf("match %q:\ngot %+v\nwant %+v", test.name, got, test.want) + } + } +} + +func TestAgainstJSONMatchingField(t *testing.T) { + s3 := S3{ + S4: S4{ABc: 1, Y: 2}, + Abc: 3, + AbC: 4, + Tag: 5, + X: 6, + unexported: 7, + } + var want S3 + jsonRoundTrip(t, s3, &want) + v := reflect.ValueOf(want) + fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S3{})) + if err != nil { + t.Fatal(err) + } + for _, test := range []struct { + name string + got int + }{ + {"Abc", 3}, + {"AbC", 4}, + {"ABc", 1}, + {"abc", 1}, + {"Tag", 6}, + } { + f := fields.Match(test.name) + if f == nil { + t.Fatalf("%s: no match", test.name) + } + w := v.FieldByIndex(f.Index).Interface() + if test.got != w { + t.Errorf("%s: got %d, want %d", test.name, test.got, w) + } + } +} + +func TestTagErrors(t *testing.T) { + called := false + c := NewCache(func(t reflect.StructTag) (string, bool, interface{}, error) { + called = true + s := t.Get("f") + if s == "bad" { + return "", false, nil, errors.New("error") + } + return s, true, nil, nil + }, nil, nil) + + type T struct { + X int `f:"ok"` + Y int `f:"bad"` + } + + _, err := c.Fields(reflect.TypeOf(T{})) + if !called { + t.Fatal("tag parser not called") + } + if err == nil { + t.Error("want error, got nil") + } + // Second time, we should cache the error. + called = false + _, err = c.Fields(reflect.TypeOf(T{})) + if called { + t.Fatal("tag parser called on second time") + } + if err == nil { + t.Error("want error, got nil") + } +} diff --git a/vendor/cloud.google.com/go/internal/fields/fold.go b/vendor/cloud.google.com/go/internal/fields/fold.go new file mode 100644 index 0000000..10a6818 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/fields/fold.go @@ -0,0 +1,156 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fields + +// This file was copied from https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/fold.go. +// Only the license and package were changed. + +import ( + "bytes" + "unicode/utf8" +) + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See https://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} diff --git a/vendor/cloud.google.com/go/internal/fields/fold_test.go b/vendor/cloud.google.com/go/internal/fields/fold_test.go new file mode 100644 index 0000000..eadded1 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/fields/fold_test.go @@ -0,0 +1,129 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fields + +// This file was copied from https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/fold_test.go. +// Only the license and package were changed. + +import ( + "bytes" + "strings" + "testing" + "unicode/utf8" +) + +var foldTests = []struct { + fn func(s, t []byte) bool + s, t string + want bool +}{ + {equalFoldRight, "", "", true}, + {equalFoldRight, "a", "a", true}, + {equalFoldRight, "", "a", false}, + {equalFoldRight, "a", "", false}, + {equalFoldRight, "a", "A", true}, + {equalFoldRight, "AB", "ab", true}, + {equalFoldRight, "AB", "ac", false}, + {equalFoldRight, "sbkKc", "ſbKKc", true}, + {equalFoldRight, "SbKkc", "ſbKKc", true}, + {equalFoldRight, "SbKkc", "ſbKK", false}, + {equalFoldRight, "e", "é", false}, + {equalFoldRight, "s", "S", true}, + + {simpleLetterEqualFold, "", "", true}, + {simpleLetterEqualFold, "abc", "abc", true}, + {simpleLetterEqualFold, "abc", "ABC", true}, + {simpleLetterEqualFold, "abc", "ABCD", false}, + {simpleLetterEqualFold, "abc", "xxx", false}, + + {asciiEqualFold, "a_B", "A_b", true}, + {asciiEqualFold, "aa@", "aa`", false}, // verify 0x40 and 0x60 aren't case-equivalent +} + +func TestFold(t *testing.T) { + for i, tt := range foldTests { + if got := tt.fn([]byte(tt.s), []byte(tt.t)); got != tt.want { + t.Errorf("%d. %q, %q = %v; want %v", i, tt.s, tt.t, got, tt.want) + } + truth := strings.EqualFold(tt.s, tt.t) + if truth != tt.want { + t.Errorf("strings.EqualFold doesn't agree with case %d", i) + } + } +} + +func TestFoldAgainstUnicode(t *testing.T) { + const bufSize = 5 + buf1 := make([]byte, 0, bufSize) + buf2 := make([]byte, 0, bufSize) + var runes []rune + for i := 0x20; i <= 0x7f; i++ { + runes = append(runes, rune(i)) + } + runes = append(runes, kelvin, smallLongEss) + + funcs := []struct { + name string + fold func(s, t []byte) bool + letter bool // must be ASCII letter + simple bool // must be simple ASCII letter (not 'S' or 'K') + }{ + { + name: "equalFoldRight", + fold: equalFoldRight, + }, + { + name: "asciiEqualFold", + fold: asciiEqualFold, + simple: true, + }, + { + name: "simpleLetterEqualFold", + fold: simpleLetterEqualFold, + simple: true, + letter: true, + }, + } + + for _, ff := range funcs { + for _, r := range runes { + if r >= utf8.RuneSelf { + continue + } + if ff.letter && !isASCIILetter(byte(r)) { + continue + } + if ff.simple && (r == 's' || r == 'S' || r == 'k' || r == 'K') { + continue + } + for _, r2 := range runes { + buf1 := append(buf1[:0], 'x') + buf2 := append(buf2[:0], 'x') + buf1 = buf1[:1+utf8.EncodeRune(buf1[1:bufSize], r)] + buf2 = buf2[:1+utf8.EncodeRune(buf2[1:bufSize], r2)] + buf1 = append(buf1, 'x') + buf2 = append(buf2, 'x') + want := bytes.EqualFold(buf1, buf2) + if got := ff.fold(buf1, buf2); got != want { + t.Errorf("%s(%q, %q) = %v; want %v", ff.name, buf1, buf2, got, want) + } + } + } + } +} + +func isASCIILetter(b byte) bool { + return ('A' <= b && b <= 'Z') || ('a' <= b && b <= 'z') +} diff --git a/vendor/cloud.google.com/go/internal/kokoro/build.sh b/vendor/cloud.google.com/go/internal/kokoro/build.sh new file mode 100755 index 0000000..869be0d --- /dev/null +++ b/vendor/cloud.google.com/go/internal/kokoro/build.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Fail on any error +set -eo pipefail + +# Display commands being run +set -x + +# cd to project dir on Kokoro instance +cd git/gocloud + +go version + +# Set $GOPATH +export GOPATH="$HOME/go" +GOCLOUD_HOME=$GOPATH/src/cloud.google.com/go +mkdir -p $GOCLOUD_HOME + +# Move code into $GOPATH and get dependencies +cp -R ./* $GOCLOUD_HOME +cd $GOCLOUD_HOME +go get -v ./... + +# # Don't run integration tests until we can protect against code from +# # untrusted forks reading and storing our service account key. +# cd internal/kokoro +# # Don't print out encryption keys, etc +# set +x +# key=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_key) +# iv=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_iv) +# pass=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_pass) + +# openssl aes-256-cbc -K $key -iv $iv -pass pass:$pass -in kokoro-key.json.enc -out key.json -d +# set -x + +# export GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" +# export GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" +# cd $GOCLOUD_HOME + +# Run tests and tee output to log file, to be pushed to GCS as artifact. +go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_CHANGE_NUMBER.txt + +# Make sure README.md is up to date. +make -C internal/readme test diff diff --git a/vendor/cloud.google.com/go/internal/kokoro/kokoro-key.json.enc b/vendor/cloud.google.com/go/internal/kokoro/kokoro-key.json.enc new file mode 100644 index 0000000..b238854 Binary files /dev/null and b/vendor/cloud.google.com/go/internal/kokoro/kokoro-key.json.enc differ diff --git a/vendor/cloud.google.com/go/internal/optional/optional.go b/vendor/cloud.google.com/go/internal/optional/optional.go new file mode 100644 index 0000000..4c15410 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/optional/optional.go @@ -0,0 +1,108 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package optional provides versions of primitive types that can +// be nil. These are useful in methods that update some of an API object's +// fields. +package optional + +import ( + "fmt" + "strings" + "time" +) + +type ( + // Bool is either a bool or nil. + Bool interface{} + + // String is either a string or nil. + String interface{} + + // Int is either an int or nil. + Int interface{} + + // Uint is either a uint or nil. + Uint interface{} + + // Float64 is either a float64 or nil. + Float64 interface{} + + // Duration is either a time.Duration or nil. + Duration interface{} +) + +// ToBool returns its argument as a bool. +// It panics if its argument is nil or not a bool. +func ToBool(v Bool) bool { + x, ok := v.(bool) + if !ok { + doPanic("Bool", v) + } + return x +} + +// ToString returns its argument as a string. +// It panics if its argument is nil or not a string. +func ToString(v String) string { + x, ok := v.(string) + if !ok { + doPanic("String", v) + } + return x +} + +// ToInt returns its argument as an int. +// It panics if its argument is nil or not an int. +func ToInt(v Int) int { + x, ok := v.(int) + if !ok { + doPanic("Int", v) + } + return x +} + +// ToUint returns its argument as a uint. +// It panics if its argument is nil or not a uint. +func ToUint(v Uint) uint { + x, ok := v.(uint) + if !ok { + doPanic("Uint", v) + } + return x +} + +// ToFloat64 returns its argument as a float64. +// It panics if its argument is nil or not a float64. +func ToFloat64(v Float64) float64 { + x, ok := v.(float64) + if !ok { + doPanic("Float64", v) + } + return x +} + +// ToDuration returns its argument as a time.Duration. +// It panics if its argument is nil or not a time.Duration. +func ToDuration(v Duration) time.Duration { + x, ok := v.(time.Duration) + if !ok { + doPanic("Duration", v) + } + return x +} + +func doPanic(capType string, v interface{}) { + panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) +} diff --git a/vendor/cloud.google.com/go/internal/optional/optional_test.go b/vendor/cloud.google.com/go/internal/optional/optional_test.go new file mode 100644 index 0000000..6fe6d6e --- /dev/null +++ b/vendor/cloud.google.com/go/internal/optional/optional_test.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package optional + +import "testing" + +func TestConvertSuccess(t *testing.T) { + if got, want := ToBool(false), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := ToString(""), ""; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := ToInt(0), 0; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := ToUint(uint(0)), uint(0); got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := ToFloat64(0.0), 0.0; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestConvertFailure(t *testing.T) { + for _, f := range []func(){ + func() { ToBool(nil) }, + func() { ToBool(3) }, + func() { ToString(nil) }, + func() { ToString(3) }, + func() { ToInt(nil) }, + func() { ToInt("") }, + func() { ToUint(nil) }, + func() { ToUint("") }, + func() { ToFloat64(nil) }, + func() { ToFloat64("") }, + } { + if !panics(f) { + t.Error("got no panic, want panic") + } + } +} + +func panics(f func()) (b bool) { + defer func() { + if recover() != nil { + b = true + } + }() + f() + return false +} diff --git a/vendor/cloud.google.com/go/internal/pretty/diff.go b/vendor/cloud.google.com/go/internal/pretty/diff.go new file mode 100644 index 0000000..38a7cf1 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/pretty/diff.go @@ -0,0 +1,78 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "syscall" +) + +// Diff compares the pretty-printed representation of two values. The second +// return value reports whether the two values' representations are identical. +// If it is false, the first return value contains the diffs. +// +// The output labels the first value "want" and the second "got". +// +// Diff works by invoking the "diff" command. It will only succeed in +// environments where "diff" is on the shell path. +func Diff(want, got interface{}) (string, bool, error) { + fname1, err := writeToTemp(want) + if err != nil { + return "", false, err + } + defer os.Remove(fname1) + + fname2, err := writeToTemp(got) + if err != nil { + return "", false, err + } + defer os.Remove(fname2) + + cmd := exec.Command("diff", "-u", "--label=want", "--label=got", fname1, fname2) + out, err := cmd.Output() + if err == nil { + return string(out), true, nil + } + eerr, ok := err.(*exec.ExitError) + if !ok { + return "", false, err + } + ws, ok := eerr.Sys().(syscall.WaitStatus) + if !ok { + return "", false, err + } + if ws.ExitStatus() != 1 { + return "", false, err + } + // Exit status of 1 means no error, but diffs were found. + return string(out), false, nil +} + +func writeToTemp(v interface{}) (string, error) { + f, err := ioutil.TempFile("", "prettyDiff") + if err != nil { + return "", err + } + if _, err := fmt.Fprintf(f, "%+v\n", Value(v)); err != nil { + return "", err + } + if err := f.Close(); err != nil { + return "", err + } + return f.Name(), nil +} diff --git a/vendor/cloud.google.com/go/internal/pretty/diff_test.go b/vendor/cloud.google.com/go/internal/pretty/diff_test.go new file mode 100644 index 0000000..739cc50 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/pretty/diff_test.go @@ -0,0 +1,50 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import "testing" + +func TestDiff(t *testing.T) { + for _, test := range []struct { + v1, v2 interface{} + ok bool + want string + }{ + {5, 5, true, ""}, + {"foo", "foo", true, ""}, + {[]int{1, 2, 3}, []int{1, 0, 3}, false, `--- want ++++ got +@@ -1,5 +1,5 @@ + []int{ + 1, +- 2, ++ 0, + 3, + } +`}, + } { + got, ok, err := Diff(test.v1, test.v2) + if err != nil { + t.Errorf("%v vs. %v: %v", test.v1, test.v2, err) + continue + } + if ok != test.ok { + t.Errorf("%v vs. %v: got %t, want %t", test.v1, test.v2, ok, test.ok) + } + if got != test.want { + t.Errorf("%v vs. %v: got:\n%q\nwant:\n%q", test.v1, test.v2, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/internal/pretty/pretty.go b/vendor/cloud.google.com/go/internal/pretty/pretty.go new file mode 100644 index 0000000..e5866aa --- /dev/null +++ b/vendor/cloud.google.com/go/internal/pretty/pretty.go @@ -0,0 +1,254 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pretty implements a simple pretty-printer. It is intended for +// debugging the output of tests. +// +// It follows pointers and produces multi-line output for complex values like +// slices, maps and structs. +package pretty + +import ( + "fmt" + "io" + "reflect" + "sort" + "strings" + "time" +) + +// Indent is the string output at each level of indentation. +var Indent = " " + +// Value returns a value that will print prettily when used as an +// argument for the %v or %s format specifiers. +// With no flags, struct fields and map keys with default values are omitted. +// With the '+' or '#' flags, all values are displayed. +// +// This package does not detect cycles. Attempting to print a Value that +// contains cycles will result in unbounded recursion. +func Value(v interface{}) val { return val{v: v} } + +type val struct{ v interface{} } + +// Format implements the fmt.Formatter interface. +func (v val) Format(s fmt.State, c rune) { + if c == 'v' || c == 's' { + fprint(s, reflect.ValueOf(v.v), state{ + defaults: s.Flag('+') || s.Flag('#'), + }) + } else { + fmt.Fprintf(s, "%%!%c(pretty.Val)", c) + } +} + +type state struct { + level int + prefix, suffix string + defaults bool +} + +const maxLevel = 100 + +var typeOfTime = reflect.TypeOf(time.Time{}) + +func fprint(w io.Writer, v reflect.Value, s state) { + if s.level > maxLevel { + fmt.Fprintln(w, "pretty: max nested depth exceeded") + return + } + indent := strings.Repeat(Indent, s.level) + fmt.Fprintf(w, "%s%s", indent, s.prefix) + if isNil(v) { + fmt.Fprintf(w, "nil%s", s.suffix) + return + } + if v.Type().Kind() == reflect.Interface { + v = v.Elem() + } + if v.Type() == typeOfTime { + fmt.Fprintf(w, "%s%s", v.Interface(), s.suffix) + return + } + for v.Type().Kind() == reflect.Ptr { + fmt.Fprintf(w, "&") + v = v.Elem() + } + switch v.Type().Kind() { + default: + fmt.Fprintf(w, "%s%s", short(v), s.suffix) + + case reflect.Array: + fmt.Fprintf(w, "%s{\n", v.Type()) + for i := 0; i < v.Len(); i++ { + fprint(w, v.Index(i), state{ + level: s.level + 1, + prefix: "", + suffix: ",", + defaults: s.defaults, + }) + fmt.Fprintln(w) + } + fmt.Fprintf(w, "%s}", indent) + + case reflect.Slice: + fmt.Fprintf(w, "%s{", v.Type()) + if v.Len() > 0 { + fmt.Fprintln(w) + for i := 0; i < v.Len(); i++ { + fprint(w, v.Index(i), state{ + level: s.level + 1, + prefix: "", + suffix: ",", + defaults: s.defaults, + }) + fmt.Fprintln(w) + } + } + fmt.Fprintf(w, "%s}%s", indent, s.suffix) + + case reflect.Map: + fmt.Fprintf(w, "%s{", v.Type()) + if v.Len() > 0 { + fmt.Fprintln(w) + keys := v.MapKeys() + maybeSort(keys, v.Type().Key()) + for _, key := range keys { + val := v.MapIndex(key) + if s.defaults || !isDefault(val) { + fprint(w, val, state{ + level: s.level + 1, + prefix: short(key) + ": ", + suffix: ",", + defaults: s.defaults, + }) + fmt.Fprintln(w) + } + } + } + fmt.Fprintf(w, "%s}%s", indent, s.suffix) + + case reflect.Struct: + t := v.Type() + fmt.Fprintf(w, "%s{\n", t) + for i := 0; i < t.NumField(); i++ { + f := v.Field(i) + if s.defaults || !isDefault(f) { + fprint(w, f, state{ + level: s.level + 1, + prefix: t.Field(i).Name + ": ", + suffix: ",", + defaults: s.defaults, + }) + fmt.Fprintln(w) + } + } + fmt.Fprintf(w, "%s}%s", indent, s.suffix) + } +} + +func isNil(v reflect.Value) bool { + if !v.IsValid() { + return true + } + switch v.Type().Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + default: + return false + } +} + +func isDefault(v reflect.Value) bool { + if !v.IsValid() { + return true + } + t := v.Type() + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + default: + if !v.CanInterface() { + return false + } + return t.Comparable() && v.Interface() == reflect.Zero(t).Interface() + } +} + +// short returns a short, one-line string for v. +func short(v reflect.Value) string { + if !v.IsValid() { + return "nil" + } + if v.Type().Kind() == reflect.String { + return fmt.Sprintf("%q", v) + } + return fmt.Sprintf("%v", v) +} + +func indent(w io.Writer, level int) { + for i := 0; i < level; i++ { + io.WriteString(w, Indent) // ignore errors + } +} + +func maybeSort(vs []reflect.Value, t reflect.Type) { + if less := lessFunc(t); less != nil { + sort.Sort(&sorter{vs, less}) + } +} + +// lessFunc returns a function that implements the "<" operator +// for the given type, or nil if the type doesn't support "<" . +func lessFunc(t reflect.Type) func(v1, v2 interface{}) bool { + switch t.Kind() { + case reflect.String: + return func(v1, v2 interface{}) bool { return v1.(string) < v2.(string) } + case reflect.Int: + return func(v1, v2 interface{}) bool { return v1.(int) < v2.(int) } + case reflect.Int8: + return func(v1, v2 interface{}) bool { return v1.(int8) < v2.(int8) } + case reflect.Int16: + return func(v1, v2 interface{}) bool { return v1.(int16) < v2.(int16) } + case reflect.Int32: + return func(v1, v2 interface{}) bool { return v1.(int32) < v2.(int32) } + case reflect.Int64: + return func(v1, v2 interface{}) bool { return v1.(int64) < v2.(int64) } + case reflect.Uint: + return func(v1, v2 interface{}) bool { return v1.(uint) < v2.(uint) } + case reflect.Uint8: + return func(v1, v2 interface{}) bool { return v1.(uint8) < v2.(uint8) } + case reflect.Uint16: + return func(v1, v2 interface{}) bool { return v1.(uint16) < v2.(uint16) } + case reflect.Uint32: + return func(v1, v2 interface{}) bool { return v1.(uint32) < v2.(uint32) } + case reflect.Uint64: + return func(v1, v2 interface{}) bool { return v1.(uint64) < v2.(uint64) } + case reflect.Float32: + return func(v1, v2 interface{}) bool { return v1.(float32) < v2.(float32) } + case reflect.Float64: + return func(v1, v2 interface{}) bool { return v1.(float64) < v2.(float64) } + default: + return nil + } +} + +type sorter struct { + vs []reflect.Value + less func(v1, v2 interface{}) bool +} + +func (s *sorter) Len() int { return len(s.vs) } +func (s *sorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s *sorter) Less(i, j int) bool { return s.less(s.vs[i].Interface(), s.vs[j].Interface()) } diff --git a/vendor/cloud.google.com/go/internal/pretty/pretty_test.go b/vendor/cloud.google.com/go/internal/pretty/pretty_test.go new file mode 100644 index 0000000..6e2ff8b --- /dev/null +++ b/vendor/cloud.google.com/go/internal/pretty/pretty_test.go @@ -0,0 +1,105 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "fmt" + "strings" + "testing" +) + +type S struct { + X int + Y bool + z *string +} + +func TestSprint(t *testing.T) { + Indent = "~" + i := 17 + + for _, test := range []struct { + value interface{} + want string + }{ + // primitives and pointer + {nil, "nil"}, + {3, "3"}, + {9.8, "9.8"}, + {true, "true"}, + {"foo", `"foo"`}, + {&i, "&17"}, + // array and slice + {[3]int{1, 2, 3}, "[3]int{\n~1,\n~2,\n~3,\n}"}, + {[]int{1, 2, 3}, "[]int{\n~1,\n~2,\n~3,\n}"}, + {[]int{}, "[]int{}"}, + {[]string{"foo"}, "[]string{\n~\"foo\",\n}"}, + // map + {map[int]bool{}, "map[int]bool{}"}, + {map[int]bool{1: true, 2: false, 3: true}, + "map[int]bool{\n~1: true,\n~3: true,\n}"}, + // struct + {S{}, "pretty.S{\n}"}, + {S{3, true, ptr("foo")}, + "pretty.S{\n~X: 3,\n~Y: true,\n~z: &\"foo\",\n}"}, + // interface + {[]interface{}{&i}, "[]interface {}{\n~&17,\n}"}, + // nesting + {[]S{{1, false, ptr("a")}, {2, true, ptr("b")}}, + `[]pretty.S{ +~pretty.S{ +~~X: 1, +~~z: &"a", +~}, +~pretty.S{ +~~X: 2, +~~Y: true, +~~z: &"b", +~}, +}`}, + } { + got := fmt.Sprintf("%v", Value(test.value)) + if got != test.want { + t.Errorf("%v: got:\n%q\nwant:\n%q", test.value, got, test.want) + } + } +} + +func TestWithDefaults(t *testing.T) { + Indent = "~" + for _, test := range []struct { + value interface{} + want string + }{ + {map[int]bool{1: true, 2: false, 3: true}, + "map[int]bool{\n~1: true,\n~2: false,\n~3: true,\n}"}, + {S{}, "pretty.S{\n~X: 0,\n~Y: false,\n~z: nil,\n}"}, + } { + got := fmt.Sprintf("%+v", Value(test.value)) + if got != test.want { + t.Errorf("%v: got:\n%q\nwant:\n%q", test.value, got, test.want) + } + } +} + +func TestBadVerb(t *testing.T) { + got := fmt.Sprintf("%d", Value(8)) + want := "%!d(" + if !strings.HasPrefix(got, want) { + t.Errorf("got %q, want prefix %q", got, want) + } +} + +func ptr(s string) *string { return &s } diff --git a/vendor/cloud.google.com/go/internal/readme/Makefile b/vendor/cloud.google.com/go/internal/readme/Makefile new file mode 100644 index 0000000..1c1f4e1 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/Makefile @@ -0,0 +1,48 @@ +# Rebuild the README.md file at repo root by inserting code samples +# from compilable go files. + +SHELL=/bin/bash + +GOCLOUD_HOME=$(GOPATH)/src/cloud.google.com/go +README=$(GOCLOUD_HOME)/README.md + +.PHONY: readme test test-good test-bad-go test-bad-md + +readme: + @tmp=$$(mktemp); \ + awk -f snipmd.awk snippets.go $(README) > $$tmp; \ + mv $$tmp $(README) + +diff: + diff $(README) <(awk -f snipmd.awk snippets.go $(README)) + +test: test-good test-bad-go test-bad-md + @echo PASS + +test-good: + @echo testdata/good.md + @cd testdata >& /dev/null; \ + diff -u want.md <(awk -f ../snipmd.awk snips.go good.md) + @echo "testdata/want.md (round trip)" + @cd testdata >& /dev/null; \ + diff -u want.md <(awk -f ../snipmd.awk snips.go want.md) + +test-bad-go: + @for f in testdata/bad-*.go; do \ + echo $$f; \ + if awk -f snipmd.awk $$f >& /dev/null; then \ + echo "$f succeeded, want failure"; \ + exit 1; \ + fi; \ + done + +test-bad-md: + @for f in testdata/bad-*.md; do \ + echo $$f; \ + if awk -f snipmd.awk testdata/snips.go $$f >& /dev/null; then \ + echo "$f succeeded, want failure"; \ + exit 1; \ + fi; \ + done + + diff --git a/vendor/cloud.google.com/go/internal/readme/snipmd.awk b/vendor/cloud.google.com/go/internal/readme/snipmd.awk new file mode 100644 index 0000000..3a52383 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/snipmd.awk @@ -0,0 +1,123 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# snipmd inserts code snippets from Go source files into a markdown file. +# +# Call with one or more .go files and a .md file: +# +# awk -f snipmd.awk foo.go bar.go template.md +# +# In the Go files, start a snippet with +# //[ NAME +# and end it with +# //] +# +# In the markdown, write +# [snip]:# NAME +# to insert the snippet NAME just below that line. +# If there is already a code block after the [snip]:# line, it will be +# replaced, so a previous output can be used as input. +# +# The following transformations are made to the Go code: +# - The first tab of each line is removed. +# - Trailing blank lines are removed. +# - `ELLIPSIS` and `_ = ELLIPSIS` are replaced by `...` + + +/^[ \t]*\/\/\[/ { # start snippet in Go file + if (inGo()) { + if ($2 == "") { + die("missing snippet name") + } + curSnip = $2 + next + } +} + +/^[ \t]*\/\/]/ { # end snippet in Go file + if (inGo()) { + if (curSnip != "") { + # Remove all but one trailing newline. + gsub(/\n+$/, "\n", snips[curSnip]) + curSnip = "" + next + } else { + die("//] without corresponding //[") + } + } +} + +ENDFILE { + if (curSnip != "") { + die("unclosed snippet: " curSnip) + } +} + +# Skip code blocks in the input that immediately follow [snip]:# lines, +# because we just inserted the snippet. Supports round-tripping. +/^```go$/,/^```$/ { + if (inMarkdown() && afterSnip) { + next + } +} + +# Matches every line. +{ + if (curSnip != "") { + line = $0 + # Remove initial tab, if any. + if (line ~ /^\t/) { + line = substr(line, 2) + } + # Replace ELLIPSIS. + gsub(/_ = ELLIPSIS/, "...", line) + gsub(/ELLIPSIS/, "...", line) + + snips[curSnip] = snips[curSnip] line "\n" + } else if (inMarkdown()) { + afterSnip = 0 + # Copy .md to output. + print + } +} + +$1 ~ /\[snip\]:#/ { # Snippet marker in .md file. + if (inMarkdown()) { + # We expect '[snip]:#' to be followed by '(NAME)' + if ($2 !~ /\(.*\)/) { + die("bad snip spec: " $0) + } + name = substr($2, 2, length($2)-2) + if (snips[name] == "") { + die("no snippet named " name) + } + printf("```go\n%s```\n", snips[name]) + afterSnip = 1 + } +} + + +function inMarkdown() { + return match(FILENAME, /\.md$/) +} + +function inGo() { + return match(FILENAME, /\.go$/) +} + + +function die(msg) { + printf("%s:%d: %s\n", FILENAME, FNR, msg) > "/dev/stderr" + exit 1 +} diff --git a/vendor/cloud.google.com/go/internal/readme/snippets.go b/vendor/cloud.google.com/go/internal/readme/snippets.go new file mode 100644 index 0000000..049710d --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/snippets.go @@ -0,0 +1,241 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file holds samples that are embedded into README.md. + +// This file has to compile, but need not execute. +// If it fails to compile, fix it, then run `make` to regenerate README.md. + +package readme + +import ( + "fmt" + "io/ioutil" + "log" + "time" + + "cloud.google.com/go/bigquery" + "cloud.google.com/go/datastore" + "cloud.google.com/go/logging" + "cloud.google.com/go/pubsub" + "cloud.google.com/go/spanner" + "cloud.google.com/go/storage" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var ctx context.Context + +const END = 0 + +func auth() { + //[ auth + client, err := storage.NewClient(ctx) + //] + _ = client + _ = err +} + +func auth2() { + //[ auth-JSON + client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json")) + //] + _ = client + _ = err +} + +func auth3() { + var ELLIPSIS oauth2.TokenSource + //[ auth-ts + tokenSource := ELLIPSIS + client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) + //] + _ = client + _ = err +} + +func datastoreSnippets() { + //[ datastore-1 + client, err := datastore.NewClient(ctx, "my-project-id") + if err != nil { + log.Fatal(err) + } + //] + + //[ datastore-2 + type Post struct { + Title string + Body string `datastore:",noindex"` + PublishedAt time.Time + } + keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), + } + posts := []*Post{ + {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, + {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, + } + if _, err := client.PutMulti(ctx, keys, posts); err != nil { + log.Fatal(err) + } + //] +} + +func storageSnippets() { + //[ storage-1 + client, err := storage.NewClient(ctx) + if err != nil { + log.Fatal(err) + } + //] + + //[ storage-2 + // Read the object1 from bucket. + rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) + if err != nil { + log.Fatal(err) + } + defer rc.Close() + body, err := ioutil.ReadAll(rc) + if err != nil { + log.Fatal(err) + } + //] + _ = body +} + +func pubsubSnippets() { + //[ pubsub-1 + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + //] + + const ELLIPSIS = 0 + + //[ pubsub-2 + // Publish "hello world" on topic1. + topic := client.Topic("topic1") + res := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("hello world"), + }) + // The publish happens asynchronously. + // Later, you can get the result from res: + _ = ELLIPSIS + msgID, err := res.Get(ctx) + if err != nil { + log.Fatal(err) + } + + // Use a callback to receive messages via subscription1. + sub := client.Subscription("subscription1") + err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + fmt.Println(m.Data) + m.Ack() // Acknowledge that we've consumed the message. + }) + if err != nil { + log.Println(err) + } + //] + _ = msgID +} + +func bqSnippets() { + //[ bq-1 + c, err := bigquery.NewClient(ctx, "my-project-ID") + if err != nil { + // TODO: Handle error. + } + //] + + //[ bq-2 + // Construct a query. + q := c.Query(` + SELECT year, SUM(number) + FROM [bigquery-public-data:usa_names.usa_1910_2013] + WHERE name = "William" + GROUP BY year + ORDER BY year +`) + // Execute the query. + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + // Iterate through the results. + for { + var values []bigquery.Value + err := it.Next(&values) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(values) + } + //] +} + +func loggingSnippets() { + //[ logging-1 + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + //] + //[ logging-2 + logger := client.Logger("my-log") + logger.Log(logging.Entry{Payload: "something happened!"}) + //] + + //[ logging-3 + err = client.Close() + if err != nil { + // TODO: Handle error. + } + //] +} + +func spannerSnippets() { + //[ spanner-1 + client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") + if err != nil { + log.Fatal(err) + } + //] + + //[ spanner-2 + // Simple Reads And Writes + _, err = client.Apply(ctx, []*spanner.Mutation{ + spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"})}) + if err != nil { + log.Fatal(err) + } + row, err := client.Single().ReadRow(ctx, "Users", + spanner.Key{"alice"}, []string{"email"}) + if err != nil { + log.Fatal(err) + } + //] + _ = row +} diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/bad-no-name.go b/vendor/cloud.google.com/go/internal/readme/testdata/bad-no-name.go new file mode 100644 index 0000000..5b96a50 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/bad-no-name.go @@ -0,0 +1,23 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package readme + +import "fmt" + +func f() { + //[ + fmt.Println() + //] +} diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/bad-no-open.go b/vendor/cloud.google.com/go/internal/readme/testdata/bad-no-open.go new file mode 100644 index 0000000..d69fbcb --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/bad-no-open.go @@ -0,0 +1,19 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package readme + +func f() { + //] +} diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/bad-nosnip.md b/vendor/cloud.google.com/go/internal/readme/testdata/bad-nosnip.md new file mode 100644 index 0000000..e3a1a3c --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/bad-nosnip.md @@ -0,0 +1,2 @@ +[snip]:# (unknown) + diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/bad-spec.md b/vendor/cloud.google.com/go/internal/readme/testdata/bad-spec.md new file mode 100644 index 0000000..2f46ad8 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/bad-spec.md @@ -0,0 +1 @@ +[snip]:# missing-parens diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/bad-unclosed.go b/vendor/cloud.google.com/go/internal/readme/testdata/bad-unclosed.go new file mode 100644 index 0000000..486ce42 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/bad-unclosed.go @@ -0,0 +1,21 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package readme + +// unclosed snippet + +func f() { + //[ X +} diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/good.md b/vendor/cloud.google.com/go/internal/readme/testdata/good.md new file mode 100644 index 0000000..c300afe --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/good.md @@ -0,0 +1,18 @@ +This template is for testing snipmd.awk. + +Put the first snippet here. + +[snip]:# (first) + +And now the second. +[snip]:# (second) + +A top-level snippet. + +[snip]:# (top-level) + +```go +// A code block that is not included. +``` + +And we're done. diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/snips.go b/vendor/cloud.google.com/go/internal/readme/testdata/snips.go new file mode 100644 index 0000000..cddb5f9 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/snips.go @@ -0,0 +1,39 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package readme + +import ( + "errors" + "fmt" +) + +func f() { + ELLIPSIS := 3 + //[ first + fmt.Println("hello") + x := ELLIPSIS + //] + + //[ second + if x > 2 { + _ = ELLIPSIS + } + //] +} + +//[ top-level +var ErrBad = errors.New("bad") + +//] diff --git a/vendor/cloud.google.com/go/internal/readme/testdata/want.md b/vendor/cloud.google.com/go/internal/readme/testdata/want.md new file mode 100644 index 0000000..176bd06 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/readme/testdata/want.md @@ -0,0 +1,30 @@ +This template is for testing snipmd.awk. + +Put the first snippet here. + +[snip]:# (first) +```go +fmt.Println("hello") +x := ... +``` + +And now the second. +[snip]:# (second) +```go +if x > 2 { + ... +} +``` + +A top-level snippet. + +[snip]:# (top-level) +```go +var ErrBad = errors.New("bad") +``` + +```go +// A code block that is not included. +``` + +And we're done. diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go new file mode 100644 index 0000000..e1f9aaa --- /dev/null +++ b/vendor/cloud.google.com/go/internal/retry.go @@ -0,0 +1,55 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "time" + + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" +) + +// Retry calls the supplied function f repeatedly according to the provided +// backoff parameters. It returns when one of the following occurs: +// When f's first return value is true, Retry immediately returns with f's second +// return value. +// When the provided context is done, Retry returns with an error that +// includes both ctx.Error() and the last error returned by f. +func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { + return retry(ctx, bo, f, gax.Sleep) +} + +func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), + sleep func(context.Context, time.Duration) error) error { + var lastErr error + for { + stop, err := f() + if stop { + return err + } + // Remember the last "real" error from f. + if err != nil && err != context.Canceled && err != context.DeadlineExceeded { + lastErr = err + } + p := bo.Pause() + if cerr := sleep(ctx, p); cerr != nil { + if lastErr != nil { + return Annotatef(lastErr, "retry failed with %v; last error", cerr) + } + return cerr + } + } +} diff --git a/vendor/cloud.google.com/go/internal/retry_test.go b/vendor/cloud.google.com/go/internal/retry_test.go new file mode 100644 index 0000000..534c0fe --- /dev/null +++ b/vendor/cloud.google.com/go/internal/retry_test.go @@ -0,0 +1,89 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "errors" + "fmt" + "testing" + "time" + + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestRetry(t *testing.T) { + ctx := context.Background() + // Without a context deadline, retry will run until the function + // says not to retry any more. + n := 0 + endRetry := errors.New("end retry") + err := retry(ctx, gax.Backoff{}, + func() (bool, error) { + n++ + if n < 10 { + return false, nil + } + return true, endRetry + }, + func(context.Context, time.Duration) error { return nil }) + if got, want := err, endRetry; got != want { + t.Errorf("got %v, want %v", err, endRetry) + } + if n != 10 { + t.Errorf("n: got %d, want %d", n, 10) + } + + // If the context has a deadline, sleep will return an error + // and end the function. + n = 0 + err = retry(ctx, gax.Backoff{}, + func() (bool, error) { return false, nil }, + func(context.Context, time.Duration) error { + n++ + if n < 10 { + return nil + } + return context.DeadlineExceeded + }) + if err == nil { + t.Error("got nil, want error") + } +} + +func TestRetryPreserveError(t *testing.T) { + // Retry tries to preserve the type and other information from + // the last error returned by the function. + err := retry(context.Background(), gax.Backoff{}, + func() (bool, error) { + return false, status.Error(codes.NotFound, "not found") + }, + func(context.Context, time.Duration) error { + return context.DeadlineExceeded + }) + got, ok := status.FromError(err) + if !ok { + t.Fatalf("got %T, wanted a status", got) + } + if g, w := got.Code(), codes.NotFound; g != w { + t.Errorf("got code %v, want %v", g, w) + } + wantMessage := fmt.Sprintf("retry failed with %v; last error: not found", context.DeadlineExceeded) + if g, w := got.Message(), wantMessage; g != w { + t.Errorf("got message %q, want %q", g, w) + } +} diff --git a/vendor/cloud.google.com/go/internal/rpcreplay/Makefile b/vendor/cloud.google.com/go/internal/rpcreplay/Makefile new file mode 100644 index 0000000..f41293f --- /dev/null +++ b/vendor/cloud.google.com/go/internal/rpcreplay/Makefile @@ -0,0 +1,32 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Makefile for building Go files from protos. + +# Change these to match your environment. +PROTOC=$(HOME)/bin/protoc +PROTOC_GO_PLUGIN_DIR=$(GOPATH)/bin +PROTOBUF_REPO=$(HOME)/git-repos/protobuf + +gen-protos: sync-protobuf + for d in proto/*; do \ + PATH=$(PATH):$(PROTOC_GO_PLUGIN_DIR) \ + $(PROTOC) --go_out=plugins=grpc:$$d \ + -I $$d -I $(PROTOBUF_REPO)/src $$d/*.proto; \ + done + + +sync-protobuf: + cd $(PROTOBUF_REPO); git pull + diff --git a/vendor/cloud.google.com/go/internal/rpcreplay/doc.go b/vendor/cloud.google.com/go/internal/rpcreplay/doc.go new file mode 100644 index 0000000..91bb79a --- /dev/null +++ b/vendor/cloud.google.com/go/internal/rpcreplay/doc.go @@ -0,0 +1,106 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package rpcreplay supports the capture and replay of gRPC calls. Its main goal is +to improve testing. Once one captures the calls of a test that runs against a real +service, one has an "automatic mock" that can be replayed against the same test, +yielding a unit test that is fast and flake-free. + + +Recording + +To record a sequence of gRPC calls to a file, create a Recorder and pass its +DialOptions to grpc.Dial: + + rec, err := rpcreplay.NewRecorder("service.replay", nil) + if err != nil { ... } + defer func() { + if err := rec.Close(); err != nil { ... } + }() + conn, err := grpc.Dial(serverAddress, rec.DialOptions()...) + +It's essential to close the Recorder when the interaction is finished. + +There is also a NewRecorderWriter function for capturing to an arbitrary +io.Writer. + + +Replaying + +Replaying a captured file looks almost identical: create a Replayer and use +its DialOptions. (Since we're reading the file and not writing it, we don't +have to be as careful about the error returned from Close). + + rep, err := rpcreplay.NewReplayer("service.replay") + if err != nil { ... } + defer rep.Close() + conn, err := grpc.Dial(serverAddress, rep.DialOptions()...) + + +Initial State + +A test might use random or time-sensitive values, for instance to create unique +resources for isolation from other tests. The test therefore has initial values -- +the current time, a random seed -- that differ from run to run. You must record this +initial state and re-establish it on replay. + +To record the initial state, serialize it into a []byte and pass it as the second +argument to NewRecorder: + + timeNow := time.Now() + b, err := timeNow.MarshalBinary() + if err != nil { ... } + rec, err := rpcreplay.NewRecorder("service.replay", b) + +On replay, get the bytes from Replayer.Initial: + + rep, err := rpcreplay.NewReplayer("service.replay") + if err != nil { ... } + defer rep.Close() + err = timeNow.UnmarshalBinary(rep.Initial()) + if err != nil { ... } + + +Nondeterminism + +A nondeterministic program may invoke RPCs in a different order each time +it is run. The order in which RPCs are called during recording may differ +from the order during replay. + +The replayer matches incoming to recorded requests by method name and request +contents, so nondeterminism is only a concern for identical requests that result +in different responses. A nondeterministic program whose behavior differs +depending on the order of such RPCs probably has a race condition: since both the +recorded sequence of RPCs and the sequence during replay are valid orderings, the +program should behave the same under both. + + +Other Replayer Differences + +Besides the differences in replay mentioned above, other differences may cause issues +for some programs. We list them here. + +The Replayer delivers a response to an RPC immediately, without waiting for other +incoming RPCs. This can violate causality. For example, in a Pub/Sub program where +one goroutine publishes and another subscribes, during replay the Subscribe call may +finish before the Publish call begins. + +For streaming RPCs, the Replayer delivers the result of Send and Recv calls in +the order they were recorded. No attempt is made to match message contents. + +At present, this package does not record or replay stream headers and trailers, or +the result of the CloseSend method. +*/ +package rpcreplay // import "cloud.google.com/go/internal/rpcreplay" diff --git a/vendor/cloud.google.com/go/internal/rpcreplay/example_test.go b/vendor/cloud.google.com/go/internal/rpcreplay/example_test.go new file mode 100644 index 0000000..747ba50 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/rpcreplay/example_test.go @@ -0,0 +1,47 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcreplay_test + +var serverAddress string + +// func Example_NewRecorder() { +// rec, err := rpcreplay.NewRecorder("service.replay", nil) +// if err != nil { +// // TODO: Handle error. +// } +// defer func() { +// if err := rec.Close(); err != nil { +// // TODO: Handle error. +// } +// }() +// conn, err := grpc.Dial(serverAddress, rec.DialOptions()...) +// if err != nil { +// // TODO: Handle error. +// } +// _ = conn // TODO: use connection +// } + +// func Example_NewReplayer() { +// rep, err := rpcreplay.NewReplayer("service.replay") +// if err != nil { +// // TODO: Handle error. +// } +// defer rep.Close() +// conn, err := grpc.Dial(serverAddress, rep.DialOptions()...) +// if err != nil { +// // TODO: Handle error. +// } +// _ = conn // TODO: use connection +// } diff --git a/vendor/cloud.google.com/go/internal/rpcreplay/fake_test.go b/vendor/cloud.google.com/go/internal/rpcreplay/fake_test.go new file mode 100644 index 0000000..8f41fdf --- /dev/null +++ b/vendor/cloud.google.com/go/internal/rpcreplay/fake_test.go @@ -0,0 +1,121 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcreplay + +import ( + "io" + "log" + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + + pb "cloud.google.com/go/internal/rpcreplay/proto/intstore" +) + +// intStoreServer is an in-memory implementation of IntStore. +type intStoreServer struct { + pb.IntStoreServer + + Addr string + l net.Listener + gsrv *grpc.Server + + items map[string]int32 +} + +func newIntStoreServer() *intStoreServer { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + log.Fatal(err) + } + s := &intStoreServer{ + Addr: l.Addr().String(), + l: l, + gsrv: grpc.NewServer(), + } + pb.RegisterIntStoreServer(s.gsrv, s) + go s.gsrv.Serve(s.l) + return s +} + +func (s *intStoreServer) stop() { + s.gsrv.Stop() + s.l.Close() +} + +func (s *intStoreServer) Set(_ context.Context, item *pb.Item) (*pb.SetResponse, error) { + old := s.setItem(item) + return &pb.SetResponse{PrevValue: old}, nil +} + +func (s *intStoreServer) setItem(item *pb.Item) int32 { + if s.items == nil { + s.items = map[string]int32{} + } + old := s.items[item.Name] + s.items[item.Name] = item.Value + return old +} + +func (s *intStoreServer) Get(_ context.Context, req *pb.GetRequest) (*pb.Item, error) { + val, ok := s.items[req.Name] + if !ok { + return nil, grpc.Errorf(codes.NotFound, "%q", req.Name) + } + return &pb.Item{Name: req.Name, Value: val}, nil +} + +func (s *intStoreServer) ListItems(_ *pb.ListItemsRequest, ss pb.IntStore_ListItemsServer) error { + for name, val := range s.items { + if err := ss.Send(&pb.Item{Name: name, Value: val}); err != nil { + return err + } + } + return nil +} + +func (s *intStoreServer) SetStream(ss pb.IntStore_SetStreamServer) error { + n := 0 + for { + item, err := ss.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + s.setItem(item) + n++ + } + return ss.SendAndClose(&pb.Summary{Count: int32(n)}) +} + +func (s *intStoreServer) StreamChat(ss pb.IntStore_StreamChatServer) error { + for { + item, err := ss.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := ss.Send(item); err != nil { + return err + } + } + return nil +} diff --git a/vendor/cloud.google.com/go/internal/rpcreplay/proto/intstore/intstore.pb.go b/vendor/cloud.google.com/go/internal/rpcreplay/proto/intstore/intstore.pb.go new file mode 100644 index 0000000..657ac2f --- /dev/null +++ b/vendor/cloud.google.com/go/internal/rpcreplay/proto/intstore/intstore.pb.go @@ -0,0 +1,454 @@ +// Code generated by protoc-gen-go. +// source: intstore.proto +// DO NOT EDIT! + +/* +Package intstore is a generated protocol buffer package. + +It is generated from these files: + intstore.proto + +It has these top-level messages: + Item + SetResponse + GetRequest + Summary + ListItemsRequest +*/ +package intstore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Item struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"` +} + +func (m *Item) Reset() { *m = Item{} } +func (m *Item) String() string { return proto.CompactTextString(m) } +func (*Item) ProtoMessage() {} +func (*Item) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Item) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Item) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +type SetResponse struct { + PrevValue int32 `protobuf:"varint,1,opt,name=prev_value,json=prevValue" json:"prev_value,omitempty"` +} + +func (m *SetResponse) Reset() { *m = SetResponse{} } +func (m *SetResponse) String() string { return proto.CompactTextString(m) } +func (*SetResponse) ProtoMessage() {} +func (*SetResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *SetResponse) GetPrevValue() int32 { + if m != nil { + return m.PrevValue + } + return 0 +} + +type GetRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *GetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type Summary struct { + Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Summary) GetCount() int32 { + if m != nil { + return m.Count + } + return 0 +} + +type ListItemsRequest struct { +} + +func (m *ListItemsRequest) Reset() { *m = ListItemsRequest{} } +func (m *ListItemsRequest) String() string { return proto.CompactTextString(m) } +func (*ListItemsRequest) ProtoMessage() {} +func (*ListItemsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func init() { + proto.RegisterType((*Item)(nil), "intstore.Item") + proto.RegisterType((*SetResponse)(nil), "intstore.SetResponse") + proto.RegisterType((*GetRequest)(nil), "intstore.GetRequest") + proto.RegisterType((*Summary)(nil), "intstore.Summary") + proto.RegisterType((*ListItemsRequest)(nil), "intstore.ListItemsRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for IntStore service + +type IntStoreClient interface { + Set(ctx context.Context, in *Item, opts ...grpc.CallOption) (*SetResponse, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*Item, error) + // A server-to-client streaming RPC. + ListItems(ctx context.Context, in *ListItemsRequest, opts ...grpc.CallOption) (IntStore_ListItemsClient, error) + // A client-to-server streaming RPC. + SetStream(ctx context.Context, opts ...grpc.CallOption) (IntStore_SetStreamClient, error) + // A Bidirectional streaming RPC. + StreamChat(ctx context.Context, opts ...grpc.CallOption) (IntStore_StreamChatClient, error) +} + +type intStoreClient struct { + cc *grpc.ClientConn +} + +func NewIntStoreClient(cc *grpc.ClientConn) IntStoreClient { + return &intStoreClient{cc} +} + +func (c *intStoreClient) Set(ctx context.Context, in *Item, opts ...grpc.CallOption) (*SetResponse, error) { + out := new(SetResponse) + err := grpc.Invoke(ctx, "/intstore.IntStore/Set", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *intStoreClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*Item, error) { + out := new(Item) + err := grpc.Invoke(ctx, "/intstore.IntStore/Get", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *intStoreClient) ListItems(ctx context.Context, in *ListItemsRequest, opts ...grpc.CallOption) (IntStore_ListItemsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_IntStore_serviceDesc.Streams[0], c.cc, "/intstore.IntStore/ListItems", opts...) + if err != nil { + return nil, err + } + x := &intStoreListItemsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type IntStore_ListItemsClient interface { + Recv() (*Item, error) + grpc.ClientStream +} + +type intStoreListItemsClient struct { + grpc.ClientStream +} + +func (x *intStoreListItemsClient) Recv() (*Item, error) { + m := new(Item) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *intStoreClient) SetStream(ctx context.Context, opts ...grpc.CallOption) (IntStore_SetStreamClient, error) { + stream, err := grpc.NewClientStream(ctx, &_IntStore_serviceDesc.Streams[1], c.cc, "/intstore.IntStore/SetStream", opts...) + if err != nil { + return nil, err + } + x := &intStoreSetStreamClient{stream} + return x, nil +} + +type IntStore_SetStreamClient interface { + Send(*Item) error + CloseAndRecv() (*Summary, error) + grpc.ClientStream +} + +type intStoreSetStreamClient struct { + grpc.ClientStream +} + +func (x *intStoreSetStreamClient) Send(m *Item) error { + return x.ClientStream.SendMsg(m) +} + +func (x *intStoreSetStreamClient) CloseAndRecv() (*Summary, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(Summary) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *intStoreClient) StreamChat(ctx context.Context, opts ...grpc.CallOption) (IntStore_StreamChatClient, error) { + stream, err := grpc.NewClientStream(ctx, &_IntStore_serviceDesc.Streams[2], c.cc, "/intstore.IntStore/StreamChat", opts...) + if err != nil { + return nil, err + } + x := &intStoreStreamChatClient{stream} + return x, nil +} + +type IntStore_StreamChatClient interface { + Send(*Item) error + Recv() (*Item, error) + grpc.ClientStream +} + +type intStoreStreamChatClient struct { + grpc.ClientStream +} + +func (x *intStoreStreamChatClient) Send(m *Item) error { + return x.ClientStream.SendMsg(m) +} + +func (x *intStoreStreamChatClient) Recv() (*Item, error) { + m := new(Item) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for IntStore service + +type IntStoreServer interface { + Set(context.Context, *Item) (*SetResponse, error) + Get(context.Context, *GetRequest) (*Item, error) + // A server-to-client streaming RPC. + ListItems(*ListItemsRequest, IntStore_ListItemsServer) error + // A client-to-server streaming RPC. + SetStream(IntStore_SetStreamServer) error + // A Bidirectional streaming RPC. + StreamChat(IntStore_StreamChatServer) error +} + +func RegisterIntStoreServer(s *grpc.Server, srv IntStoreServer) { + s.RegisterService(&_IntStore_serviceDesc, srv) +} + +func _IntStore_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Item) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IntStoreServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/intstore.IntStore/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IntStoreServer).Set(ctx, req.(*Item)) + } + return interceptor(ctx, in, info, handler) +} + +func _IntStore_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IntStoreServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/intstore.IntStore/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IntStoreServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _IntStore_ListItems_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListItemsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(IntStoreServer).ListItems(m, &intStoreListItemsServer{stream}) +} + +type IntStore_ListItemsServer interface { + Send(*Item) error + grpc.ServerStream +} + +type intStoreListItemsServer struct { + grpc.ServerStream +} + +func (x *intStoreListItemsServer) Send(m *Item) error { + return x.ServerStream.SendMsg(m) +} + +func _IntStore_SetStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IntStoreServer).SetStream(&intStoreSetStreamServer{stream}) +} + +type IntStore_SetStreamServer interface { + SendAndClose(*Summary) error + Recv() (*Item, error) + grpc.ServerStream +} + +type intStoreSetStreamServer struct { + grpc.ServerStream +} + +func (x *intStoreSetStreamServer) SendAndClose(m *Summary) error { + return x.ServerStream.SendMsg(m) +} + +func (x *intStoreSetStreamServer) Recv() (*Item, error) { + m := new(Item) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _IntStore_StreamChat_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IntStoreServer).StreamChat(&intStoreStreamChatServer{stream}) +} + +type IntStore_StreamChatServer interface { + Send(*Item) error + Recv() (*Item, error) + grpc.ServerStream +} + +type intStoreStreamChatServer struct { + grpc.ServerStream +} + +func (x *intStoreStreamChatServer) Send(m *Item) error { + return x.ServerStream.SendMsg(m) +} + +func (x *intStoreStreamChatServer) Recv() (*Item, error) { + m := new(Item) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _IntStore_serviceDesc = grpc.ServiceDesc{ + ServiceName: "intstore.IntStore", + HandlerType: (*IntStoreServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Set", + Handler: _IntStore_Set_Handler, + }, + { + MethodName: "Get", + Handler: _IntStore_Get_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListItems", + Handler: _IntStore_ListItems_Handler, + ServerStreams: true, + }, + { + StreamName: "SetStream", + Handler: _IntStore_SetStream_Handler, + ClientStreams: true, + }, + { + StreamName: "StreamChat", + Handler: _IntStore_StreamChat_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "intstore.proto", +} + +func init() { proto.RegisterFile("intstore.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 273 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x91, 0x4f, 0x4b, 0xc3, 0x40, + 0x10, 0xc5, 0xb3, 0xfd, 0xa3, 0xcd, 0x08, 0x45, 0x87, 0x0a, 0x25, 0x20, 0x86, 0x3d, 0xe5, 0xa0, + 0x21, 0xd4, 0xa3, 0x47, 0x0f, 0xa5, 0xe0, 0x29, 0x0b, 0x5e, 0x25, 0xca, 0x80, 0x05, 0xb3, 0x1b, + 0x77, 0x27, 0x05, 0xbf, 0x84, 0x9f, 0x59, 0x36, 0x5b, 0x9b, 0xd2, 0x78, 0xdb, 0xb7, 0xf3, 0x66, + 0xde, 0x6f, 0x76, 0x61, 0xbe, 0xd5, 0xec, 0xd8, 0x58, 0xca, 0x1b, 0x6b, 0xd8, 0xe0, 0xec, 0x4f, + 0xcb, 0x02, 0x26, 0x1b, 0xa6, 0x1a, 0x11, 0x26, 0xba, 0xaa, 0x69, 0x29, 0x52, 0x91, 0xc5, 0x65, + 0x77, 0xc6, 0x05, 0x4c, 0x77, 0xd5, 0x67, 0x4b, 0xcb, 0x51, 0x2a, 0xb2, 0x69, 0x19, 0x84, 0xbc, + 0x83, 0x0b, 0x45, 0x5c, 0x92, 0x6b, 0x8c, 0x76, 0x84, 0x37, 0x00, 0x8d, 0xa5, 0xdd, 0x6b, 0x70, + 0x8a, 0xce, 0x19, 0xfb, 0x9b, 0x97, 0xce, 0x9d, 0x02, 0xac, 0xbd, 0xfb, 0xab, 0x25, 0xc7, 0xff, + 0xa5, 0xc8, 0x5b, 0x38, 0x57, 0x6d, 0x5d, 0x57, 0xf6, 0xdb, 0x07, 0xbe, 0x9b, 0x56, 0xf3, 0x7e, + 0x4c, 0x10, 0x12, 0xe1, 0xf2, 0x79, 0xeb, 0xd8, 0x63, 0xba, 0xfd, 0xa0, 0xd5, 0xcf, 0x08, 0x66, + 0x1b, 0xcd, 0xca, 0xef, 0x80, 0x39, 0x8c, 0x15, 0x31, 0xce, 0xf3, 0xc3, 0x96, 0xde, 0x9b, 0x5c, + 0xf7, 0xfa, 0x08, 0x58, 0x46, 0x78, 0x0f, 0xe3, 0x35, 0x31, 0x2e, 0xfa, 0x7a, 0x8f, 0x98, 0x9c, + 0x4c, 0x91, 0x11, 0x3e, 0x42, 0x7c, 0xc8, 0xc7, 0xa4, 0x2f, 0x9f, 0x42, 0x0d, 0x5b, 0x0b, 0x81, + 0x2b, 0x88, 0x15, 0xb1, 0x62, 0x4b, 0x55, 0x3d, 0x20, 0xbc, 0x3a, 0x22, 0x0c, 0x4f, 0x20, 0xa3, + 0xcc, 0xf7, 0x40, 0x68, 0x78, 0xfa, 0xa8, 0x86, 0x6b, 0x0d, 0x52, 0x32, 0x51, 0x88, 0xb7, 0xb3, + 0xee, 0x63, 0x1f, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x22, 0x28, 0xa0, 0x49, 0xea, 0x01, 0x00, + 0x00, +} diff --git a/vendor/cloud.google.com/go/internal/rpcreplay/proto/intstore/intstore.proto b/vendor/cloud.google.com/go/internal/rpcreplay/proto/intstore/intstore.proto new file mode 100644 index 0000000..9d987cb --- /dev/null +++ b/vendor/cloud.google.com/go/internal/rpcreplay/proto/intstore/intstore.proto @@ -0,0 +1,54 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// IntStore is a service for testing the rpcreplay package. +// It is a simple key-value store for integers. + +syntax = "proto3"; + +package intstore; + +service IntStore { + rpc Set(Item) returns (SetResponse) {} + + rpc Get(GetRequest) returns (Item) {} + + // A server-to-client streaming RPC. + rpc ListItems(ListItemsRequest) returns (stream Item) {} + + // A client-to-server streaming RPC. + rpc SetStream(stream Item) returns (Summary) {} + + // A Bidirectional streaming RPC. + rpc StreamChat(stream Item) returns (stream Item) {} +} + +message Item { + string name = 1; + int32 value = 2; +} + +message SetResponse { + int32 prev_value = 1; +} + +message GetRequest { + string name = 1; +} + +message Summary { + int32 count = 1; +} + +message ListItemsRequest {} diff --git a/vendor/cloud.google.com/go/internal/rpcreplay/proto/rpcreplay/rpcreplay.pb.go b/vendor/cloud.google.com/go/internal/rpcreplay/proto/rpcreplay/rpcreplay.pb.go new file mode 100644 index 0000000..8e76a39 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/rpcreplay/proto/rpcreplay/rpcreplay.pb.go @@ -0,0 +1,170 @@ +// Code generated by protoc-gen-go. +// source: rpcreplay.proto +// DO NOT EDIT! + +/* +Package rpcreplay is a generated protocol buffer package. + +It is generated from these files: + rpcreplay.proto + +It has these top-level messages: + Entry +*/ +package rpcreplay + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Entry_Kind int32 + +const ( + Entry_TYPE_UNSPECIFIED Entry_Kind = 0 + // A unary request. + // method: the full name of the method + // message: the request proto + // is_error: false + // ref_index: 0 + Entry_REQUEST Entry_Kind = 1 + // A unary response. + // method: the full name of the method + // message: + // if is_error: a google.rpc.Status proto + // else: the response proto + // ref_index: index in the sequence of Entries of matching request (1-based) + Entry_RESPONSE Entry_Kind = 2 + // A method that creates a stream. + // method: the full name of the method + // message: + // if is_error: a google.rpc.Status proto + // else: nil + // ref_index: 0 + Entry_CREATE_STREAM Entry_Kind = 3 + // A call to Send on the client returned by a stream-creating method. + // method: unset + // message: the proto being sent + // is_error: false + // ref_index: index of matching CREATE_STREAM entry (1-based) + Entry_SEND Entry_Kind = 4 + // A call to Recv on the client returned by a stream-creating method. + // method: unset + // message: + // if is_error: a google.rpc.Status proto, or nil on EOF + // else: the received message + // ref_index: index of matching CREATE_STREAM entry + Entry_RECV Entry_Kind = 5 +) + +var Entry_Kind_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "REQUEST", + 2: "RESPONSE", + 3: "CREATE_STREAM", + 4: "SEND", + 5: "RECV", +} +var Entry_Kind_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "REQUEST": 1, + "RESPONSE": 2, + "CREATE_STREAM": 3, + "SEND": 4, + "RECV": 5, +} + +func (x Entry_Kind) String() string { + return proto.EnumName(Entry_Kind_name, int32(x)) +} +func (Entry_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +// An Entry represents a single RPC activity, typically a request or response. +type Entry struct { + Kind Entry_Kind `protobuf:"varint,1,opt,name=kind,enum=rpcreplay.Entry_Kind" json:"kind,omitempty"` + Method string `protobuf:"bytes,2,opt,name=method" json:"method,omitempty"` + Message *google_protobuf.Any `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` + IsError bool `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"` + RefIndex int32 `protobuf:"varint,5,opt,name=ref_index,json=refIndex" json:"ref_index,omitempty"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Entry) GetKind() Entry_Kind { + if m != nil { + return m.Kind + } + return Entry_TYPE_UNSPECIFIED +} + +func (m *Entry) GetMethod() string { + if m != nil { + return m.Method + } + return "" +} + +func (m *Entry) GetMessage() *google_protobuf.Any { + if m != nil { + return m.Message + } + return nil +} + +func (m *Entry) GetIsError() bool { + if m != nil { + return m.IsError + } + return false +} + +func (m *Entry) GetRefIndex() int32 { + if m != nil { + return m.RefIndex + } + return 0 +} + +func init() { + proto.RegisterType((*Entry)(nil), "rpcreplay.Entry") + proto.RegisterEnum("rpcreplay.Entry_Kind", Entry_Kind_name, Entry_Kind_value) +} + +func init() { proto.RegisterFile("rpcreplay.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 289 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x44, 0x8e, 0xdf, 0x4e, 0xc2, 0x30, + 0x14, 0xc6, 0x2d, 0x6c, 0x30, 0x0e, 0xfe, 0xa9, 0x0d, 0x9a, 0xa1, 0x37, 0x0b, 0x57, 0xf3, 0xa6, + 0x24, 0xf8, 0x04, 0x04, 0x8e, 0x09, 0x31, 0x22, 0xb6, 0xc3, 0xc4, 0x1b, 0x17, 0x70, 0x05, 0x17, + 0xa1, 0x25, 0xdd, 0x4c, 0xdc, 0x6b, 0xf8, 0xc4, 0x66, 0x13, 0xf4, 0xae, 0xbf, 0x7e, 0xbf, 0x9c, + 0xef, 0x83, 0x33, 0xbb, 0x7b, 0xb3, 0x6a, 0xb7, 0x59, 0x14, 0x7c, 0x67, 0x4d, 0x6e, 0x58, 0xeb, + 0xef, 0xe3, 0xaa, 0xbb, 0x36, 0x66, 0xbd, 0x51, 0xfd, 0x2a, 0x58, 0x7e, 0xae, 0xfa, 0x0b, 0xbd, + 0xb7, 0x7a, 0xdf, 0x35, 0x70, 0x51, 0xe7, 0xb6, 0x60, 0x37, 0xe0, 0x7c, 0xa4, 0x3a, 0xf1, 0x49, + 0x40, 0xc2, 0xd3, 0xc1, 0x05, 0xff, 0xbf, 0x57, 0xe5, 0xfc, 0x3e, 0xd5, 0x89, 0xa8, 0x14, 0x76, + 0x09, 0x8d, 0xad, 0xca, 0xdf, 0x4d, 0xe2, 0xd7, 0x02, 0x12, 0xb6, 0xc4, 0x9e, 0x18, 0x87, 0xe6, + 0x56, 0x65, 0xd9, 0x62, 0xad, 0xfc, 0x7a, 0x40, 0xc2, 0xf6, 0xa0, 0xc3, 0x7f, 0x9b, 0xf9, 0xa1, + 0x99, 0x0f, 0x75, 0x21, 0x0e, 0x12, 0xeb, 0x82, 0x97, 0x66, 0xb1, 0xb2, 0xd6, 0x58, 0xdf, 0x09, + 0x48, 0xe8, 0x89, 0x66, 0x9a, 0x61, 0x89, 0xec, 0x1a, 0x5a, 0x56, 0xad, 0xe2, 0x54, 0x27, 0xea, + 0xcb, 0x77, 0x03, 0x12, 0xba, 0xc2, 0xb3, 0x6a, 0x35, 0x29, 0xb9, 0xf7, 0x0a, 0x4e, 0xb9, 0x86, + 0x75, 0x80, 0x46, 0x2f, 0x33, 0x8c, 0xe7, 0x53, 0x39, 0xc3, 0xd1, 0xe4, 0x6e, 0x82, 0x63, 0x7a, + 0xc4, 0xda, 0xd0, 0x14, 0xf8, 0x34, 0x47, 0x19, 0x51, 0xc2, 0x8e, 0xc1, 0x13, 0x28, 0x67, 0x8f, + 0x53, 0x89, 0xb4, 0xc6, 0xce, 0xe1, 0x64, 0x24, 0x70, 0x18, 0x61, 0x2c, 0x23, 0x81, 0xc3, 0x07, + 0x5a, 0x67, 0x1e, 0x38, 0x12, 0xa7, 0x63, 0xea, 0x94, 0x2f, 0x81, 0xa3, 0x67, 0xea, 0x2e, 0x1b, + 0xd5, 0xdc, 0xdb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x9b, 0x9d, 0x4f, 0x54, 0x01, 0x00, + 0x00, +} diff --git a/vendor/cloud.google.com/go/internal/rpcreplay/proto/rpcreplay/rpcreplay.proto b/vendor/cloud.google.com/go/internal/rpcreplay/proto/rpcreplay/rpcreplay.proto new file mode 100644 index 0000000..8475f33 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/rpcreplay/proto/rpcreplay/rpcreplay.proto @@ -0,0 +1,71 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package rpcreplay; + +import "google/protobuf/any.proto"; + +// An Entry represents a single RPC activity, typically a request or response. +message Entry { + enum Kind { + TYPE_UNSPECIFIED = 0; + + // A unary request. + // method: the full name of the method + // message: the request proto + // is_error: false + // ref_index: 0 + REQUEST = 1; + + // A unary response. + // method: the full name of the method + // message: + // if is_error: a google.rpc.Status proto + // else: the response proto + // ref_index: index in the sequence of Entries of matching request (1-based) + RESPONSE = 2; + + // A method that creates a stream. + // method: the full name of the method + // message: + // if is_error: a google.rpc.Status proto + // else: nil + // ref_index: 0 + CREATE_STREAM = 3; + + // A call to Send on the client returned by a stream-creating method. + // method: unset + // message: the proto being sent + // is_error: false + // ref_index: index of matching CREATE_STREAM entry (1-based) + SEND = 4; // message sent on stream + + // A call to Recv on the client returned by a stream-creating method. + // method: unset + // message: + // if is_error: a google.rpc.Status proto, or nil on EOF + // else: the received message + // ref_index: index of matching CREATE_STREAM entry + RECV = 5; // message received from stream + } + + Kind kind = 1; + string method = 2; // method name + google.protobuf.Any message = 3; // request, response or error status + bool is_error = 4; // was response an error? + int32 ref_index = 5; // for RESPONSE, index of matching request; + // for SEND/RECV, index of CREATE_STREAM +} diff --git a/vendor/cloud.google.com/go/internal/rpcreplay/rpcreplay.go b/vendor/cloud.google.com/go/internal/rpcreplay/rpcreplay.go new file mode 100644 index 0000000..b3823dc --- /dev/null +++ b/vendor/cloud.google.com/go/internal/rpcreplay/rpcreplay.go @@ -0,0 +1,689 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcreplay + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "os" + "sync" + + "golang.org/x/net/context" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + pb "cloud.google.com/go/internal/rpcreplay/proto/rpcreplay" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/any" + spb "google.golang.org/genproto/googleapis/rpc/status" +) + +// A Recorder records RPCs for later playback. +type Recorder struct { + mu sync.Mutex + w *bufio.Writer + f *os.File + next int + err error +} + +// NewRecorder creates a recorder that writes to filename. The file will +// also store the initial bytes for retrieval during replay. +// +// You must call Close on the Recorder to ensure that all data is written. +func NewRecorder(filename string, initial []byte) (*Recorder, error) { + f, err := os.Create(filename) + if err != nil { + return nil, err + } + rec, err := NewRecorderWriter(f, initial) + if err != nil { + _ = f.Close() + return nil, err + } + rec.f = f + return rec, nil +} + +// NewRecorderWriter creates a recorder that writes to w. The initial +// bytes will also be written to w for retrieval during replay. +// +// You must call Close on the Recorder to ensure that all data is written. +func NewRecorderWriter(w io.Writer, initial []byte) (*Recorder, error) { + bw := bufio.NewWriter(w) + if err := writeHeader(bw, initial); err != nil { + return nil, err + } + return &Recorder{w: bw, next: 1}, nil +} + +// DialOptions returns the options that must be passed to grpc.Dial +// to enable recording. +func (r *Recorder) DialOptions() []grpc.DialOption { + return []grpc.DialOption{ + grpc.WithUnaryInterceptor(r.interceptUnary), + grpc.WithStreamInterceptor(r.interceptStream), + } +} + +// Close saves any unwritten information. +func (r *Recorder) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + if r.err != nil { + return r.err + } + err := r.w.Flush() + if r.f != nil { + if err2 := r.f.Close(); err == nil { + err = err2 + } + } + return err +} + +// Intercepts all unary (non-stream) RPCs. +func (r *Recorder) interceptUnary(ctx context.Context, method string, req, res interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ereq := &entry{ + kind: pb.Entry_REQUEST, + method: method, + msg: message{msg: req.(proto.Message)}, + } + + refIndex, err := r.writeEntry(ereq) + if err != nil { + return err + } + ierr := invoker(ctx, method, req, res, cc, opts...) + eres := &entry{ + kind: pb.Entry_RESPONSE, + refIndex: refIndex, + } + // If the error is not a gRPC status, then something more + // serious is wrong. More significantly, we have no way + // of serializing an arbitrary error. So just return it + // without recording the response. + if _, ok := status.FromError(ierr); !ok { + r.mu.Lock() + r.err = fmt.Errorf("saw non-status error in %s response: %v (%T)", method, ierr, ierr) + r.mu.Unlock() + return ierr + } + eres.msg.set(res, ierr) + if _, err := r.writeEntry(eres); err != nil { + return err + } + return ierr +} + +func (r *Recorder) writeEntry(e *entry) (int, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.err != nil { + return 0, r.err + } + err := writeEntry(r.w, e) + if err != nil { + r.err = err + return 0, err + } + n := r.next + r.next++ + return n, nil +} + +func (r *Recorder) interceptStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + cstream, serr := streamer(ctx, desc, cc, method, opts...) + e := &entry{ + kind: pb.Entry_CREATE_STREAM, + method: method, + } + e.msg.set(nil, serr) + refIndex, err := r.writeEntry(e) + if err != nil { + return nil, err + } + return &recClientStream{ + ctx: ctx, + rec: r, + cstream: cstream, + refIndex: refIndex, + }, serr +} + +// A recClientStream implements the gprc.ClientStream interface. +// It behaves exactly like the default ClientStream, but also +// records all messages sent and received. +type recClientStream struct { + ctx context.Context + rec *Recorder + cstream grpc.ClientStream + refIndex int +} + +func (rcs *recClientStream) Context() context.Context { return rcs.ctx } + +func (rcs *recClientStream) SendMsg(m interface{}) error { + serr := rcs.cstream.SendMsg(m) + e := &entry{ + kind: pb.Entry_SEND, + refIndex: rcs.refIndex, + } + e.msg.set(m, serr) + if _, err := rcs.rec.writeEntry(e); err != nil { + return err + } + return serr +} + +func (rcs *recClientStream) RecvMsg(m interface{}) error { + serr := rcs.cstream.RecvMsg(m) + e := &entry{ + kind: pb.Entry_RECV, + refIndex: rcs.refIndex, + } + e.msg.set(m, serr) + if _, err := rcs.rec.writeEntry(e); err != nil { + return err + } + return serr +} + +func (rcs *recClientStream) Header() (metadata.MD, error) { + // TODO(jba): record. + return rcs.cstream.Header() +} + +func (rcs *recClientStream) Trailer() metadata.MD { + // TODO(jba): record. + return rcs.cstream.Trailer() +} + +func (rcs *recClientStream) CloseSend() error { + // TODO(jba): record. + return rcs.cstream.CloseSend() +} + +// A Replayer replays a set of RPCs saved by a Recorder. +type Replayer struct { + initial []byte // initial state + log func(format string, v ...interface{}) // for debugging + + mu sync.Mutex + calls []*call + streams []*stream +} + +// A call represents a unary RPC, with a request and response (or error). +type call struct { + method string + request proto.Message + response message +} + +// A stream represents a gRPC stream, with an initial create-stream call, followed by +// zero or more sends and/or receives. +type stream struct { + method string + createIndex int + createErr error // error from create call + sends []message + recvs []message +} + +// NewReplayer creates a Replayer that reads from filename. +func NewReplayer(filename string) (*Replayer, error) { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + return NewReplayerReader(f) +} + +// NewReplayerReader creates a Replayer that reads from r. +func NewReplayerReader(r io.Reader) (*Replayer, error) { + rep := &Replayer{ + log: func(string, ...interface{}) {}, + } + if err := rep.read(r); err != nil { + return nil, err + } + return rep, nil +} + +// read reads the stream of recorded entries. +// It matches requests with responses, with each pair grouped +// into a call struct. +func (rep *Replayer) read(r io.Reader) error { + r = bufio.NewReader(r) + bytes, err := readHeader(r) + if err != nil { + return err + } + rep.initial = bytes + + callsByIndex := map[int]*call{} + streamsByIndex := map[int]*stream{} + for i := 1; ; i++ { + e, err := readEntry(r) + if err != nil { + return err + } + if e == nil { + break + } + switch e.kind { + case pb.Entry_REQUEST: + callsByIndex[i] = &call{ + method: e.method, + request: e.msg.msg, + } + + case pb.Entry_RESPONSE: + call := callsByIndex[e.refIndex] + if call == nil { + return fmt.Errorf("replayer: no request for response #%d", i) + } + delete(callsByIndex, e.refIndex) + call.response = e.msg + rep.calls = append(rep.calls, call) + + case pb.Entry_CREATE_STREAM: + s := &stream{method: e.method, createIndex: i} + s.createErr = e.msg.err + streamsByIndex[i] = s + rep.streams = append(rep.streams, s) + + case pb.Entry_SEND: + s := streamsByIndex[e.refIndex] + if s == nil { + return fmt.Errorf("replayer: no stream for send #%d", i) + } + s.sends = append(s.sends, e.msg) + + case pb.Entry_RECV: + s := streamsByIndex[e.refIndex] + if s == nil { + return fmt.Errorf("replayer: no stream for recv #%d", i) + } + s.recvs = append(s.recvs, e.msg) + + default: + return fmt.Errorf("replayer: unknown kind %s", e.kind) + } + } + if len(callsByIndex) > 0 { + return fmt.Errorf("replayer: %d unmatched requests", len(callsByIndex)) + } + return nil +} + +// DialOptions returns the options that must be passed to grpc.Dial +// to enable replaying. +func (r *Replayer) DialOptions() []grpc.DialOption { + return []grpc.DialOption{ + // On replay, we make no RPCs, which means the connection may be closed + // before the normally async Dial completes. Making the Dial synchronous + // fixes that. + grpc.WithBlock(), + grpc.WithUnaryInterceptor(r.interceptUnary), + grpc.WithStreamInterceptor(r.interceptStream), + } +} + +// Initial returns the initial state saved by the Recorder. +func (r *Replayer) Initial() []byte { return r.initial } + +// SetLogFunc sets a function to be used for debug logging. The function +// should be safe to be called from multiple goroutines. +func (r *Replayer) SetLogFunc(f func(format string, v ...interface{})) { + r.log = f +} + +// Close closes the Replayer. +func (r *Replayer) Close() error { + return nil +} + +func (r *Replayer) interceptUnary(_ context.Context, method string, req, res interface{}, _ *grpc.ClientConn, _ grpc.UnaryInvoker, _ ...grpc.CallOption) error { + mreq := req.(proto.Message) + r.log("request %s (%s)", method, req) + call := r.extractCall(method, mreq) + if call == nil { + return fmt.Errorf("replayer: request not found: %s", mreq) + } + r.log("returning %v", call.response) + if call.response.err != nil { + return call.response.err + } + proto.Merge(res.(proto.Message), call.response.msg) // copy msg into res + return nil +} + +func (r *Replayer) interceptStream(ctx context.Context, _ *grpc.StreamDesc, _ *grpc.ClientConn, method string, _ grpc.Streamer, _ ...grpc.CallOption) (grpc.ClientStream, error) { + r.log("create-stream %s", method) + str := r.extractStream(method) + if str == nil { + return nil, fmt.Errorf("replayer: stream not found for method %s", method) + } + if str.createErr != nil { + return nil, str.createErr + } + return &repClientStream{ctx: ctx, str: str}, nil +} + +type repClientStream struct { + ctx context.Context + str *stream +} + +func (rcs *repClientStream) Context() context.Context { return rcs.ctx } + +func (rcs *repClientStream) SendMsg(m interface{}) error { + if len(rcs.str.sends) == 0 { + return fmt.Errorf("replayer: no more sends for stream %s, created at index %d", + rcs.str.method, rcs.str.createIndex) + } + // TODO(jba): Do not assume that the sends happen in the same order on replay. + msg := rcs.str.sends[0] + rcs.str.sends = rcs.str.sends[1:] + return msg.err +} + +func (rcs *repClientStream) RecvMsg(m interface{}) error { + if len(rcs.str.recvs) == 0 { + return fmt.Errorf("replayer: no more receives for stream %s, created at index %d", + rcs.str.method, rcs.str.createIndex) + } + msg := rcs.str.recvs[0] + rcs.str.recvs = rcs.str.recvs[1:] + if msg.err != nil { + return msg.err + } + proto.Merge(m.(proto.Message), msg.msg) // copy msg into m + return nil +} + +func (rcs *repClientStream) Header() (metadata.MD, error) { + log.Printf("replay: stream metadata not supported") + return nil, nil +} + +func (rcs *repClientStream) Trailer() metadata.MD { + log.Printf("replay: stream metadata not supported") + return nil +} + +func (rcs *repClientStream) CloseSend() error { + return nil +} + +// extractCall finds the first call in the list with the same method +// and request. It returns nil if it can't find such a call. +func (r *Replayer) extractCall(method string, req proto.Message) *call { + r.mu.Lock() + defer r.mu.Unlock() + for i, call := range r.calls { + if call == nil { + continue + } + if method == call.method && proto.Equal(req, call.request) { + r.calls[i] = nil // nil out this call so we don't reuse it + return call + } + } + return nil +} + +func (r *Replayer) extractStream(method string) *stream { + r.mu.Lock() + defer r.mu.Unlock() + for i, stream := range r.streams { + if stream == nil { + continue + } + if method == stream.method { + r.streams[i] = nil + return stream + } + } + return nil +} + +// Fprint reads the entries from filename and writes them to w in human-readable form. +// It is intended for debugging. +func Fprint(w io.Writer, filename string) error { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + return FprintReader(w, f) +} + +// FprintReader reads the entries from r and writes them to w in human-readable form. +// It is intended for debugging. +func FprintReader(w io.Writer, r io.Reader) error { + initial, err := readHeader(r) + if err != nil { + return err + } + fmt.Fprintf(w, "initial state: %q\n", string(initial)) + for i := 1; ; i++ { + e, err := readEntry(r) + if err != nil { + return err + } + if e == nil { + return nil + } + + s := "message" + if e.msg.err != nil { + s = "error" + } + fmt.Fprintf(w, "#%d: kind: %s, method: %s, ref index: %d, %s:\n", + i, e.kind, e.method, e.refIndex, s) + if e.msg.err == nil { + if err := proto.MarshalText(w, e.msg.msg); err != nil { + return err + } + } else { + fmt.Fprintf(w, "%v\n", e.msg.err) + } + } +} + +// An entry holds one gRPC action (request, response, etc.). +type entry struct { + kind pb.Entry_Kind + method string + msg message + refIndex int // index of corresponding request or create-stream +} + +func (e1 *entry) equal(e2 *entry) bool { + if e1 == nil && e2 == nil { + return true + } + if e1 == nil || e2 == nil { + return false + } + return e1.kind == e2.kind && + e1.method == e2.method && + proto.Equal(e1.msg.msg, e2.msg.msg) && + errEqual(e1.msg.err, e2.msg.err) && + e1.refIndex == e2.refIndex +} + +func errEqual(e1, e2 error) bool { + if e1 == e2 { + return true + } + s1, ok1 := status.FromError(e1) + s2, ok2 := status.FromError(e2) + if !ok1 || !ok2 { + return false + } + return proto.Equal(s1.Proto(), s2.Proto()) +} + +// message holds either a single proto.Message or an error. +type message struct { + msg proto.Message + err error +} + +func (m *message) set(msg interface{}, err error) { + m.err = err + if err != io.EOF && msg != nil { + m.msg = msg.(proto.Message) + } +} + +// File format: +// header +// sequence of Entry protos +// +// Header format: +// magic string +// a record containing the bytes of the initial state + +const magic = "RPCReplay" + +func writeHeader(w io.Writer, initial []byte) error { + if _, err := io.WriteString(w, magic); err != nil { + return err + } + return writeRecord(w, initial) +} + +func readHeader(r io.Reader) ([]byte, error) { + var buf [len(magic)]byte + if _, err := io.ReadFull(r, buf[:]); err != nil { + if err == io.EOF { + err = errors.New("rpcreplay: empty replay file") + } + return nil, err + } + if string(buf[:]) != magic { + return nil, errors.New("rpcreplay: not a replay file (does not begin with magic string)") + } + bytes, err := readRecord(r) + if err == io.EOF { + err = errors.New("rpcreplay: missing initial state") + } + return bytes, err +} + +func writeEntry(w io.Writer, e *entry) error { + var m proto.Message + if e.msg.err != nil && e.msg.err != io.EOF { + s, ok := status.FromError(e.msg.err) + if !ok { + return fmt.Errorf("rpcreplay: error %v is not a Status", e.msg.err) + } + m = s.Proto() + } else { + m = e.msg.msg + } + var a *any.Any + var err error + if m != nil { + a, err = ptypes.MarshalAny(m) + if err != nil { + return err + } + } + pe := &pb.Entry{ + Kind: e.kind, + Method: e.method, + Message: a, + IsError: e.msg.err != nil, + RefIndex: int32(e.refIndex), + } + bytes, err := proto.Marshal(pe) + if err != nil { + return err + } + return writeRecord(w, bytes) +} + +func readEntry(r io.Reader) (*entry, error) { + buf, err := readRecord(r) + if err == io.EOF { + return nil, nil + } + if err != nil { + return nil, err + } + var pe pb.Entry + if err := proto.Unmarshal(buf, &pe); err != nil { + return nil, err + } + var msg message + if pe.Message != nil { + var any ptypes.DynamicAny + if err := ptypes.UnmarshalAny(pe.Message, &any); err != nil { + return nil, err + } + if pe.IsError { + msg.err = status.ErrorProto(any.Message.(*spb.Status)) + } else { + msg.msg = any.Message + } + } else if pe.IsError { + msg.err = io.EOF + } else if pe.Kind != pb.Entry_CREATE_STREAM { + return nil, errors.New("rpcreplay: entry with nil message and false is_error") + } + return &entry{ + kind: pe.Kind, + method: pe.Method, + msg: msg, + refIndex: int(pe.RefIndex), + }, nil +} + +// A record consists of an unsigned 32-bit little-endian length L followed by L +// bytes. + +func writeRecord(w io.Writer, data []byte) error { + if err := binary.Write(w, binary.LittleEndian, uint32(len(data))); err != nil { + return err + } + _, err := w.Write(data) + return err +} + +func readRecord(r io.Reader) ([]byte, error) { + var size uint32 + if err := binary.Read(r, binary.LittleEndian, &size); err != nil { + return nil, err + } + buf := make([]byte, size) + if _, err := io.ReadFull(r, buf); err != nil { + return nil, err + } + return buf, nil +} diff --git a/vendor/cloud.google.com/go/internal/rpcreplay/rpcreplay_test.go b/vendor/cloud.google.com/go/internal/rpcreplay/rpcreplay_test.go new file mode 100644 index 0000000..46cc279 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/rpcreplay/rpcreplay_test.go @@ -0,0 +1,362 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcreplay + +import ( + "bytes" + "io" + "testing" + + ipb "cloud.google.com/go/internal/rpcreplay/proto/intstore" + rpb "cloud.google.com/go/internal/rpcreplay/proto/rpcreplay" + "cloud.google.com/go/internal/testutil" + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestRecordIO(t *testing.T) { + buf := &bytes.Buffer{} + want := []byte{1, 2, 3} + if err := writeRecord(buf, want); err != nil { + t.Fatal(err) + } + got, err := readRecord(buf) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(got, want) { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestHeaderIO(t *testing.T) { + buf := &bytes.Buffer{} + want := []byte{1, 2, 3} + if err := writeHeader(buf, want); err != nil { + t.Fatal(err) + } + got, err := readHeader(buf) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(got, want) { + t.Errorf("got %v, want %v", got, want) + } + + // readHeader errors + for _, contents := range []string{"", "badmagic", "gRPCReplay"} { + if _, err := readHeader(bytes.NewBufferString(contents)); err == nil { + t.Errorf("%q: got nil, want error", contents) + } + } +} + +func TestEntryIO(t *testing.T) { + for i, want := range []*entry{ + { + kind: rpb.Entry_REQUEST, + method: "method", + msg: message{msg: &rpb.Entry{}}, + refIndex: 7, + }, + { + kind: rpb.Entry_RESPONSE, + method: "method", + msg: message{err: status.Error(codes.NotFound, "not found")}, + refIndex: 8, + }, + { + kind: rpb.Entry_RECV, + method: "method", + msg: message{err: io.EOF}, + refIndex: 3, + }, + } { + buf := &bytes.Buffer{} + if err := writeEntry(buf, want); err != nil { + t.Fatal(err) + } + got, err := readEntry(buf) + if err != nil { + t.Fatal(err) + } + if !got.equal(want) { + t.Errorf("#%d: got %v, want %v", i, got, want) + } + } +} + +var initialState = []byte{1, 2, 3} + +func TestRecord(t *testing.T) { + srv := newIntStoreServer() + defer srv.stop() + buf := record(t, srv) + + gotIstate, err := readHeader(buf) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(gotIstate, initialState) { + t.Fatalf("got %v, want %v", gotIstate, initialState) + } + item := &ipb.Item{Name: "a", Value: 1} + wantEntries := []*entry{ + // Set + { + kind: rpb.Entry_REQUEST, + method: "/intstore.IntStore/Set", + msg: message{msg: item}, + }, + { + kind: rpb.Entry_RESPONSE, + msg: message{msg: &ipb.SetResponse{PrevValue: 0}}, + refIndex: 1, + }, + // Get + { + kind: rpb.Entry_REQUEST, + method: "/intstore.IntStore/Get", + msg: message{msg: &ipb.GetRequest{Name: "a"}}, + }, + { + kind: rpb.Entry_RESPONSE, + msg: message{msg: item}, + refIndex: 3, + }, + { + kind: rpb.Entry_REQUEST, + method: "/intstore.IntStore/Get", + msg: message{msg: &ipb.GetRequest{Name: "x"}}, + }, + { + kind: rpb.Entry_RESPONSE, + msg: message{err: status.Error(codes.NotFound, `"x"`)}, + refIndex: 5, + }, + // ListItems + { // entry #7 + kind: rpb.Entry_CREATE_STREAM, + method: "/intstore.IntStore/ListItems", + }, + { + kind: rpb.Entry_SEND, + msg: message{msg: &ipb.ListItemsRequest{}}, + refIndex: 7, + }, + { + kind: rpb.Entry_RECV, + msg: message{msg: item}, + refIndex: 7, + }, + { + kind: rpb.Entry_RECV, + msg: message{err: io.EOF}, + refIndex: 7, + }, + // SetStream + { // entry #11 + kind: rpb.Entry_CREATE_STREAM, + method: "/intstore.IntStore/SetStream", + }, + { + kind: rpb.Entry_SEND, + msg: message{msg: &ipb.Item{Name: "b", Value: 2}}, + refIndex: 11, + }, + { + kind: rpb.Entry_SEND, + msg: message{msg: &ipb.Item{Name: "c", Value: 3}}, + refIndex: 11, + }, + { + kind: rpb.Entry_RECV, + msg: message{msg: &ipb.Summary{Count: 2}}, + refIndex: 11, + }, + + // StreamChat + { // entry #15 + kind: rpb.Entry_CREATE_STREAM, + method: "/intstore.IntStore/StreamChat", + }, + { + kind: rpb.Entry_SEND, + msg: message{msg: &ipb.Item{Name: "d", Value: 4}}, + refIndex: 15, + }, + { + kind: rpb.Entry_RECV, + msg: message{msg: &ipb.Item{Name: "d", Value: 4}}, + refIndex: 15, + }, + { + kind: rpb.Entry_SEND, + msg: message{msg: &ipb.Item{Name: "e", Value: 5}}, + refIndex: 15, + }, + { + kind: rpb.Entry_RECV, + msg: message{msg: &ipb.Item{Name: "e", Value: 5}}, + refIndex: 15, + }, + { + kind: rpb.Entry_RECV, + msg: message{err: io.EOF}, + refIndex: 15, + }, + } + for i, w := range wantEntries { + g, err := readEntry(buf) + if err != nil { + t.Fatalf("#%d: %v", i+1, err) + } + if !g.equal(w) { + t.Errorf("#%d:\ngot %+v\nwant %+v", i+1, g, w) + } + } + g, err := readEntry(buf) + if err != nil { + t.Fatal(err) + } + if g != nil { + t.Errorf("\ngot %+v\nwant nil", g) + } +} + +func TestReplay(t *testing.T) { + srv := newIntStoreServer() + defer srv.stop() + + buf := record(t, srv) + rep, err := NewReplayerReader(buf) + if err != nil { + t.Fatal(err) + } + if got, want := rep.Initial(), initialState; !testutil.Equal(got, want) { + t.Fatalf("got %v, want %v", got, want) + } + // Replay the test. + testService(t, srv.Addr, rep.DialOptions()) +} + +func record(t *testing.T, srv *intStoreServer) *bytes.Buffer { + buf := &bytes.Buffer{} + rec, err := NewRecorderWriter(buf, initialState) + if err != nil { + t.Fatal(err) + } + testService(t, srv.Addr, rec.DialOptions()) + if err := rec.Close(); err != nil { + t.Fatal(err) + } + return buf +} + +func testService(t *testing.T, addr string, opts []grpc.DialOption) { + conn, err := grpc.Dial(addr, + append([]grpc.DialOption{grpc.WithInsecure()}, opts...)...) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + client := ipb.NewIntStoreClient(conn) + ctx := context.Background() + item := &ipb.Item{Name: "a", Value: 1} + res, err := client.Set(ctx, item) + if err != nil { + t.Fatal(err) + } + if res.PrevValue != 0 { + t.Errorf("got %d, want 0", res.PrevValue) + } + got, err := client.Get(ctx, &ipb.GetRequest{Name: "a"}) + if err != nil { + t.Fatal(err) + } + if !proto.Equal(got, item) { + t.Errorf("got %v, want %v", got, item) + } + _, err = client.Get(ctx, &ipb.GetRequest{Name: "x"}) + if err == nil { + t.Fatal("got nil, want error") + } + if _, ok := status.FromError(err); !ok { + t.Errorf("got error type %T, want a grpc/status.Status", err) + } + + wantItems := []*ipb.Item{item} + lic, err := client.ListItems(ctx, &ipb.ListItemsRequest{}) + if err != nil { + t.Fatal(err) + } + for i := 0; ; i++ { + item, err := lic.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + if i >= len(wantItems) || !proto.Equal(item, wantItems[i]) { + t.Fatalf("%d: bad item", i) + } + } + + ssc, err := client.SetStream(ctx) + if err != nil { + t.Fatal(err) + } + + must := func(err error) { + if err != nil { + t.Fatal(err) + } + } + + for i, name := range []string{"b", "c"} { + must(ssc.Send(&ipb.Item{Name: name, Value: int32(i + 2)})) + } + summary, err := ssc.CloseAndRecv() + if err != nil { + t.Fatal(err) + } + if got, want := summary.Count, int32(2); got != want { + t.Fatalf("got %d, want %d", got, want) + } + + chatc, err := client.StreamChat(ctx) + if err != nil { + t.Fatal(err) + } + for i, name := range []string{"d", "e"} { + item := &ipb.Item{Name: name, Value: int32(i + 4)} + must(chatc.Send(item)) + got, err := chatc.Recv() + if err != nil { + t.Fatal(err) + } + if !proto.Equal(got, item) { + t.Errorf("got %v, want %v", got, item) + } + } + must(chatc.CloseSend()) + if _, err := chatc.Recv(); err != io.EOF { + t.Fatalf("got %v, want EOF", err) + } +} diff --git a/vendor/cloud.google.com/go/internal/snipdoc/README.md b/vendor/cloud.google.com/go/internal/snipdoc/README.md new file mode 100644 index 0000000..5d28429 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/snipdoc/README.md @@ -0,0 +1,29 @@ +# Snipdoc + +Snipdoc is a simple tool for maintaining package documentation that contains +code samples. + +1. Create a subdirectory of your package to hold the following files. "internal" + is a good name. + +2. Write a template file (for example, "doc.template") with the text of your package documentation. The file +should look exactly like you want your doc.go file to look, except for code +snippets. +Instead of embedding a code snippet, write a line consisting solely of + + [NAME] + + for your choice of NAME. + +3. Write a snippets file (for example, "doc-snippets.go") as a valid Go source + file. Begin each snippet you'd like to appear in your package docs with + `//[ NAME` and end it with `//]`. + +4. Construct your doc.go file with the command + ``` + awk -f snipdoc.awk doc-snippets.go doc.template + ``` + The file "sample-makefile" in this directory verifies that the + snippets file compiles and safely constructs a doc.go file. + + diff --git a/vendor/cloud.google.com/go/internal/snipdoc/sample-makefile b/vendor/cloud.google.com/go/internal/snipdoc/sample-makefile new file mode 100644 index 0000000..6769cb3 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/snipdoc/sample-makefile @@ -0,0 +1,16 @@ +# Build doc.go from template and snippets. + +SHELL=/bin/bash + +../doc.go: build doc-snippets.go doc.template snipdoc.awk + @tmp=$$(mktemp) && \ + awk -f snipdoc.awk doc-snippets.go doc.template > $$tmp && \ + chmod +w $@ && \ + mv $$tmp $@ && \ + chmod -w $@ + @echo "wrote $@" + +.PHONY: build + +build: + go build doc-snippets.go diff --git a/vendor/cloud.google.com/go/internal/snipdoc/snipdoc.awk b/vendor/cloud.google.com/go/internal/snipdoc/snipdoc.awk new file mode 100644 index 0000000..ebb7e21 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/snipdoc/snipdoc.awk @@ -0,0 +1,116 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# snipdoc merges code snippets from Go source files into a template to +# produce another go file (typically doc.go). +# +# Call with one or more .go files and a template file. +# +# awk -f snipmd.awk foo.go bar.go doc.template +# +# In the Go files, start a snippet with +# //[ NAME +# and end it with +# //] +# +# In the template, write +# [NAME] +# on a line by itself to insert the snippet NAME on that line. +# +# The following transformations are made to the Go code: +# - Trailing blank lines are removed. +# - `ELLIPSIS` and `_ = ELLIPSIS` are replaced by `...` + + +/^[ \t]*\/\/\[/ { # start snippet in Go file + if (inGo()) { + if ($2 == "") { + die("missing snippet name") + } + curSnip = $2 + next + } +} + +/^[ \t]*\/\/]/ { # end snippet in Go file + if (inGo()) { + if (curSnip != "") { + # Remove all trailing newlines. + gsub(/[\t\n]+$/, "", snips[curSnip]) + curSnip = "" + next + } else { + die("//] without corresponding //[") + } + } +} + +ENDFILE { + if (curSnip != "") { + die("unclosed snippet: " curSnip) + } +} + +/^\[.*\]$/ { # Snippet marker in template file. + if (inTemplate()) { + name = substr($1, 2, length($1)-2) + if (snips[name] == "") { + die("no snippet named " name) + } + printf("%s\n", snips[name]) + afterSnip = 1 + next + } +} + +# Matches every line. +{ + if (curSnip != "") { + # If the first line in the snip has no indent, add the indent. + if (snips[curSnip] == "") { + if (index($0, "\t") == 1) { + extraIndent = "" + } else { + extraIndent = "\t" + } + } + + line = $0 + # Replace ELLIPSIS. + gsub(/_ = ELLIPSIS/, "...", line) + gsub(/ELLIPSIS/, "...", line) + + snips[curSnip] = snips[curSnip] extraIndent line "\n" + } else if (inTemplate()) { + afterSnip = 0 + # Copy to output. + print + } +} + + + +function inTemplate() { + return match(FILENAME, /\.template$/) +} + +function inGo() { + return match(FILENAME, /\.go$/) +} + + +function die(msg) { + printf("%s:%d: %s\n", FILENAME, FNR, msg) > "/dev/stderr" + exit 1 +} diff --git a/vendor/cloud.google.com/go/internal/testutil/cmp.go b/vendor/cloud.google.com/go/internal/testutil/cmp.go new file mode 100644 index 0000000..376f05e --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/cmp.go @@ -0,0 +1,99 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "fmt" + "math" + "reflect" + "unicode" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" +) + +var ( + alwaysEqual = cmp.Comparer(func(_, _ interface{}) bool { return true }) + + defaultCmpOptions = []cmp.Option{ + // Use proto.Equal for protobufs + cmp.Comparer(proto.Equal), + // NaNs compare equal + cmp.FilterValues(func(x, y float64) bool { + return math.IsNaN(x) && math.IsNaN(y) + }, alwaysEqual), + cmp.FilterValues(func(x, y float32) bool { + return math.IsNaN(float64(x)) && math.IsNaN(float64(y)) + }, alwaysEqual), + } +) + +// Equal tests two values for equality. +func Equal(x, y interface{}, opts ...cmp.Option) bool { + // Put default options at the end. Order doesn't matter. + opts = append(opts[:len(opts):len(opts)], defaultCmpOptions...) + return cmp.Equal(x, y, opts...) +} + +// Diff reports the differences between two values. +// Diff(x, y) == "" iff Equal(x, y). +func Diff(x, y interface{}, opts ...cmp.Option) string { + // Put default options at the end. Order doesn't matter. + opts = append(opts[:len(opts):len(opts)], defaultCmpOptions...) + return cmp.Diff(x, y, opts...) +} + +// TODO(jba): remove the code below when cmpopts becomes available. + +// IgnoreUnexported returns an Option that only ignores the immediate unexported +// fields of a struct, including anonymous fields of unexported types. +// In particular, unexported fields within the struct's exported fields +// of struct types, including anonymous fields, will not be ignored unless the +// type of the field itself is also passed to IgnoreUnexported. +func IgnoreUnexported(typs ...interface{}) cmp.Option { + ux := newUnexportedFilter(typs...) + return cmp.FilterPath(ux.filter, cmp.Ignore()) +} + +type unexportedFilter struct{ m map[reflect.Type]bool } + +func newUnexportedFilter(typs ...interface{}) unexportedFilter { + ux := unexportedFilter{m: make(map[reflect.Type]bool)} + for _, typ := range typs { + t := reflect.TypeOf(typ) + if t == nil || t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + ux.m[t] = true + } + return ux +} +func (xf unexportedFilter) filter(p cmp.Path) bool { + if len(p) < 2 { + return false + } + sf, ok := p[len(p)-1].(cmp.StructField) + if !ok { + return false + } + return xf.m[p[len(p)-2].Type()] && !isExported(sf.Name()) +} + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/cloud.google.com/go/internal/testutil/context.go b/vendor/cloud.google.com/go/internal/testutil/context.go new file mode 100644 index 0000000..4dd89bb --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/context.go @@ -0,0 +1,95 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testutil contains helper functions for writing tests. +package testutil + +import ( + "fmt" + "io/ioutil" + "log" + "os" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" +) + +const ( + envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" + envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" +) + +// ProjID returns the project ID to use in integration tests, or the empty +// string if none is configured. +func ProjID() string { + return os.Getenv(envProjID) +} + +// TokenSource returns the OAuth2 token source to use in integration tests, +// or nil if none is configured. It uses the standard environment variable +// for tests in this repo. +func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource { + return TokenSourceEnv(ctx, envPrivateKey, scopes...) +} + +// TokenSourceEnv returns the OAuth2 token source to use in integration tests. or nil +// if none is configured. It tries to get credentials from the filename in the +// environment variable envVar. If the environment variable is unset, TokenSourceEnv +// will try to find 'Application Default Credentials'. Else, TokenSourceEnv will +// return nil. TokenSourceEnv will log.Fatal if the token source is specified but +// missing or invalid. +func TokenSourceEnv(ctx context.Context, envVar string, scopes ...string) oauth2.TokenSource { + key := os.Getenv(envVar) + if key == "" { // Try for application default credentials. + ts, err := google.DefaultTokenSource(ctx, scopes...) + if err != nil { + log.Println("No 'Application Default Credentials' found.") + return nil + } + return ts + } + conf, err := jwtConfigFromFile(key, scopes) + if err != nil { + log.Fatal(err) + } + return conf.TokenSource(ctx) +} + +// JWTConfig reads the JSON private key file whose name is in the default +// environment variable, and returns the jwt.Config it contains. It ignores +// scopes. +// If the environment variable is empty, it returns (nil, nil). +func JWTConfig() (*jwt.Config, error) { + return jwtConfigFromFile(os.Getenv(envPrivateKey), nil) +} + +// jwtConfigFromFile reads the given JSON private key file, and returns the +// jwt.Config it contains. +// If the filename is empty, it returns (nil, nil). +func jwtConfigFromFile(filename string, scopes []string) (*jwt.Config, error) { + if filename == "" { + return nil, nil + } + jsonKey, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("Cannot read the JSON key file, err: %v", err) + } + conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) + if err != nil { + return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err) + } + return conf, nil +} diff --git a/vendor/cloud.google.com/go/internal/testutil/server.go b/vendor/cloud.google.com/go/internal/testutil/server.go new file mode 100644 index 0000000..e6b7b97 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/server.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "net" + + grpc "google.golang.org/grpc" +) + +// A Server is an in-process gRPC server, listening on a system-chosen port on +// the local loopback interface. Servers are for testing only and are not +// intended to be used in production code. +// +// To create a server, make a new Server, register your handlers, then call +// Start: +// +// srv, err := NewServer() +// ... +// mypb.RegisterMyServiceServer(srv.Gsrv, &myHandler) +// .... +// srv.Start() +// +// Clients should connect to the server with no security: +// +// conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) +// ... +type Server struct { + Addr string + l net.Listener + Gsrv *grpc.Server +} + +// NewServer creates a new Server. The Server will be listening for gRPC connections +// at the address named by the Addr field, without TLS. +func NewServer(opts ...grpc.ServerOption) (*Server, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + s := &Server{ + Addr: l.Addr().String(), + l: l, + Gsrv: grpc.NewServer(opts...), + } + return s, nil +} + +// Start causes the server to start accepting incoming connections. +// Call Start after registering handlers. +func (s *Server) Start() { + go s.Gsrv.Serve(s.l) +} + +// Close shuts down the server. +func (s *Server) Close() { + s.Gsrv.Stop() + s.l.Close() +} diff --git a/vendor/cloud.google.com/go/internal/testutil/server_test.go b/vendor/cloud.google.com/go/internal/testutil/server_test.go new file mode 100644 index 0000000..817ce4e --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/server_test.go @@ -0,0 +1,35 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "testing" + + grpc "google.golang.org/grpc" +) + +func TestNewServer(t *testing.T) { + srv, err := NewServer() + if err != nil { + t.Fatal(err) + } + srv.Start() + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + conn.Close() + srv.Close() +} diff --git a/vendor/cloud.google.com/go/internal/testutil/unique.go b/vendor/cloud.google.com/go/internal/testutil/unique.go new file mode 100644 index 0000000..ff0de4a --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/unique.go @@ -0,0 +1,101 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file supports generating unique IDs so that multiple test executions +// don't interfere with each other, and cleaning up old entities that may +// remain if tests exit early. + +package testutil + +import ( + "fmt" + "regexp" + "strconv" + "sync" + "time" +) + +var startTime = time.Now().UTC() + +// A UIDSpace manages a set of unique IDs distinguished by a prefix. +type UIDSpace struct { + Prefix string + Sep rune + re *regexp.Regexp + mu sync.Mutex + count int +} + +func NewUIDSpace(prefix string) *UIDSpace { + return NewUIDSpaceSep(prefix, '-') +} + +func NewUIDSpaceSep(prefix string, sep rune) *UIDSpace { + re := fmt.Sprintf(`^%s%[2]c(\d{4})(\d{2})(\d{2})%[2]c(\d+)%[2]c\d+$`, + regexp.QuoteMeta(prefix), sep) + return &UIDSpace{ + Prefix: prefix, + Sep: sep, + re: regexp.MustCompile(re), + } +} + +// New generates a new unique ID . The ID consists of the UIDSpace's prefix, a +// timestamp, and a counter value. All unique IDs generated in the same test +// execution will have the same timestamp. +// +// Aside from the characters in the prefix, IDs contain only letters, numbers +// and sep. +func (s *UIDSpace) New() string { return s.newID(startTime) } + +func (s *UIDSpace) newID(t time.Time) string { + s.mu.Lock() + c := s.count + s.count++ + s.mu.Unlock() + // Write the time as a date followed by nanoseconds from midnight of that date. + // That makes it easier to see the approximate time of the ID when it is displayed. + y, m, d := t.Date() + ns := t.Sub(time.Date(y, m, d, 0, 0, 0, 0, time.UTC)) + // Zero-pad the counter for lexical sort order for IDs with the same timestamp. + return fmt.Sprintf("%s%c%04d%02d%02d%c%d%c%04d", + s.Prefix, s.Sep, y, m, d, s.Sep, ns, s.Sep, c) +} + +// Timestamp extracts the timestamp of uid, which must have been generated by +// s. The second return value is true on success, false if there was a problem. +func (s *UIDSpace) Timestamp(uid string) (time.Time, bool) { + subs := s.re.FindStringSubmatch(uid) + if subs == nil { + return time.Time{}, false + } + y, err1 := strconv.Atoi(subs[1]) + m, err2 := strconv.Atoi(subs[2]) + d, err3 := strconv.Atoi(subs[3]) + ns, err4 := strconv.Atoi(subs[4]) + if err1 != nil || err2 != nil || err3 != nil || err4 != nil { + return time.Time{}, false + } + return time.Date(y, time.Month(m), d, 0, 0, 0, ns, time.UTC), true +} + +// Older reports whether uid was created by m and has a timestamp older than +// the current time by at least d. +func (s *UIDSpace) Older(uid string, d time.Duration) bool { + ts, ok := s.Timestamp(uid) + if !ok { + return false + } + return time.Since(ts) > d +} diff --git a/vendor/cloud.google.com/go/internal/testutil/unique_test.go b/vendor/cloud.google.com/go/internal/testutil/unique_test.go new file mode 100644 index 0000000..5a39833 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/unique_test.go @@ -0,0 +1,69 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "testing" + "time" +) + +func TestNew(t *testing.T) { + s := NewUIDSpace("prefix") + tm := time.Date(2017, 1, 6, 0, 0, 0, 21, time.UTC) + got := s.newID(tm) + want := "prefix-20170106-21-0000" + if got != want { + t.Errorf("got %q, want %q", got, want) + } + + s2 := NewUIDSpaceSep("prefix2", '_') + got = s2.newID(tm) + want = "prefix2_20170106_21_0000" + if got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestTimestamp(t *testing.T) { + s := NewUIDSpace("unique-ID") + uid := s.New() + got, ok := s.Timestamp(uid) + if !ok { + t.Fatal("got ok = false, want true") + } + if !startTime.Equal(got) { + t.Errorf("got %s, want %s", got, startTime) + } + + got, ok = s.Timestamp("unique-ID-20160308-123-8") + if !ok { + t.Fatal("got false, want true") + } + if want := time.Date(2016, 3, 8, 0, 0, 0, 123, time.UTC); !want.Equal(got) { + t.Errorf("got %s, want %s", got, want) + } + if _, ok = s.Timestamp("invalid-time-1234"); ok { + t.Error("got true, want false") + } +} + +func TestOlder(t *testing.T) { + s := NewUIDSpace("uid") + // A non-matching ID returns false. + id2 := NewUIDSpace("different-prefix").New() + if got, want := s.Older(id2, time.Second), false; got != want { + t.Errorf("got %t, want %t", got, want) + } +} diff --git a/vendor/cloud.google.com/go/internal/tracecontext/tracecontext.go b/vendor/cloud.google.com/go/internal/tracecontext/tracecontext.go new file mode 100644 index 0000000..bfc77ba --- /dev/null +++ b/vendor/cloud.google.com/go/internal/tracecontext/tracecontext.go @@ -0,0 +1,83 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracecontext provides encoders and decoders for Stackdriver Trace contexts. +package tracecontext + +import "encoding/binary" + +const ( + versionID = 0 + traceIDField = 0 + spanIDField = 1 + optsField = 2 + + traceIDLen = 16 + spanIDLen = 8 + optsLen = 1 + + // Len represents the length of trace context. + Len = 1 + 1 + traceIDLen + 1 + spanIDLen + 1 + optsLen +) + +// Encode encodes trace ID, span ID and options into dst. The number of bytes +// written will be returned. If len(dst) isn't big enough to fit the trace context, +// a negative number is returned. +func Encode(dst []byte, traceID []byte, spanID uint64, opts byte) (n int) { + if len(dst) < Len { + return -1 + } + var offset = 0 + putByte := func(b byte) { dst[offset] = b; offset++ } + putUint64 := func(u uint64) { binary.LittleEndian.PutUint64(dst[offset:], u); offset += 8 } + + putByte(versionID) + putByte(traceIDField) + for _, b := range traceID { + putByte(b) + } + putByte(spanIDField) + putUint64(spanID) + putByte(optsField) + putByte(opts) + + return offset +} + +// Decode decodes the src into a trace ID, span ID and options. If src doesn't +// contain a valid trace context, ok = false is returned. +func Decode(src []byte) (traceID []byte, spanID uint64, opts byte, ok bool) { + if len(src) < Len { + return traceID, spanID, 0, false + } + var offset = 0 + readByte := func() byte { b := src[offset]; offset++; return b } + readUint64 := func() uint64 { v := binary.LittleEndian.Uint64(src[offset:]); offset += 8; return v } + + if readByte() != versionID { + return traceID, spanID, 0, false + } + for offset < len(src) { + switch readByte() { + case traceIDField: + traceID = src[offset : offset+traceIDLen] + offset += traceIDLen + case spanIDField: + spanID = readUint64() + case optsField: + opts = readByte() + } + } + return traceID, spanID, opts, true +} diff --git a/vendor/cloud.google.com/go/internal/tracecontext/tracecontext_test.go b/vendor/cloud.google.com/go/internal/tracecontext/tracecontext_test.go new file mode 100644 index 0000000..95d3b88 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/tracecontext/tracecontext_test.go @@ -0,0 +1,136 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracecontext + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" +) + +var validData = []byte{0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, 98, 99, 100, 101, 102, 103, 104, 2, 1} + +func TestDecode(t *testing.T) { + tests := []struct { + name string + data []byte + wantTraceID []byte + wantSpanID uint64 + wantOpts byte + wantOk bool + }{ + { + name: "nil data", + data: nil, + wantTraceID: nil, + wantSpanID: 0, + wantOpts: 0, + wantOk: false, + }, + { + name: "short data", + data: []byte{0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77}, + wantTraceID: nil, + wantSpanID: 0, + wantOpts: 0, + wantOk: false, + }, + { + name: "wrong field number", + data: []byte{0, 1, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77}, + wantTraceID: nil, + wantSpanID: 0, + wantOpts: 0, + wantOk: false, + }, + { + name: "valid data", + data: validData, + wantTraceID: []byte{64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}, + wantSpanID: 0x6867666564636261, + wantOpts: 1, + wantOk: true, + }, + } + for _, tt := range tests { + gotTraceID, gotSpanID, gotOpts, gotOk := Decode(tt.data) + if !testutil.Equal(gotTraceID, tt.wantTraceID) { + t.Errorf("%s: Decode() gotTraceID = %v, want %v", tt.name, gotTraceID, tt.wantTraceID) + } + if gotSpanID != tt.wantSpanID { + t.Errorf("%s: Decode() gotSpanID = %v, want %v", tt.name, gotSpanID, tt.wantSpanID) + } + if gotOpts != tt.wantOpts { + t.Errorf("%s: Decode() gotOpts = %v, want %v", tt.name, gotOpts, tt.wantOpts) + } + if gotOk != tt.wantOk { + t.Errorf("%s: Decode() gotOk = %v, want %v", tt.name, gotOk, tt.wantOk) + } + } +} + +func TestEncode(t *testing.T) { + tests := []struct { + name string + dst []byte + traceID []byte + spanID uint64 + opts byte + wantN int + wantData []byte + }{ + { + name: "short data", + dst: make([]byte, 0), + traceID: []byte("00112233445566"), + spanID: 0x6867666564636261, + opts: 1, + wantN: -1, + wantData: make([]byte, 0), + }, + { + name: "valid data", + dst: make([]byte, Len), + traceID: []byte{64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}, + spanID: 0x6867666564636261, + opts: 1, + wantN: Len, + wantData: validData, + }, + } + for _, tt := range tests { + gotN := Encode(tt.dst, tt.traceID, tt.spanID, tt.opts) + if gotN != tt.wantN { + t.Errorf("%s: n = %v, want %v", tt.name, gotN, tt.wantN) + } + if gotData := tt.dst; !testutil.Equal(gotData, tt.wantData) { + t.Errorf("%s: dst = %v, want %v", tt.name, gotData, tt.wantData) + } + } +} + +func BenchmarkDecode(b *testing.B) { + for i := 0; i < b.N; i++ { + Decode(validData) + } +} + +func BenchmarkEncode(b *testing.B) { + for i := 0; i < b.N; i++ { + traceID := make([]byte, 16) + var opts byte + Encode(validData, traceID, 0, opts) + } +} diff --git a/vendor/cloud.google.com/go/internal/version/update_version.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh new file mode 100755 index 0000000..fecf1f0 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/update_version.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +today=$(date +%Y%m%d) + +sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE + diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go new file mode 100644 index 0000000..39f924e --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ./update_version.sh + +// Package version contains version information for Google Cloud Client +// Libraries for Go, as reported in request headers. +package version + +import ( + "runtime" + "strings" + "unicode" +) + +// Repo is the current version of the client libraries in this +// repo. It should be a date in YYYYMMDD format. +const Repo = "20171211" + +// Go returns the Go runtime version. The returned string +// has no whitespace. +func Go() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return strings.IndexRune("0123456789.", r) < 0 +} diff --git a/vendor/cloud.google.com/go/internal/version/version_test.go b/vendor/cloud.google.com/go/internal/version/version_test.go new file mode 100644 index 0000000..7e032f0 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/version_test.go @@ -0,0 +1,35 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import "testing" + +func TestGoVer(t *testing.T) { + for _, tst := range []struct { + in, want string + }{ + {"go1.8", "1.8.0"}, + {"go1.7.3", "1.7.3"}, + {"go1.8.typealias", "1.8.0-typealias"}, + {"go1.8beta1", "1.8.0-beta1"}, + {"go1.8rc2", "1.8.0-rc2"}, + {"devel +824f981dd4b7 Tue Apr 29 21:41:54 2014 -0400", "824f981dd4b7"}, + {"foo bar zipzap", ""}, + } { + if got := goVer(tst.in); got != tst.want { + t.Errorf("goVer(%q) = %q, want %q", tst.in, got, tst.want) + } + } +} diff --git a/vendor/cloud.google.com/go/keys.tar.enc b/vendor/cloud.google.com/go/keys.tar.enc new file mode 100644 index 0000000..c54408c Binary files /dev/null and b/vendor/cloud.google.com/go/keys.tar.enc differ diff --git a/vendor/cloud.google.com/go/language/apiv1/AnalyzeSentiment_smoke_test.go b/vendor/cloud.google.com/go/language/apiv1/AnalyzeSentiment_smoke_test.go new file mode 100644 index 0000000..066f04e --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/AnalyzeSentiment_smoke_test.go @@ -0,0 +1,71 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" +) + +import ( + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestLanguageServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var content string = "Hello, world!" + var type_ languagepb.Document_Type = languagepb.Document_PLAIN_TEXT + var document = &languagepb.Document{ + Source: &languagepb.Document_Content{ + Content: content, + }, + Type: type_, + } + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + if _, err := c.AnalyzeSentiment(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/language/apiv1/doc.go b/vendor/cloud.google.com/go/language/apiv1/doc.go new file mode 100644 index 0000000..11eb179 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/doc.go @@ -0,0 +1,47 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package language is an auto-generated package for the +// Google Cloud Natural Language API. + +// +// Google Cloud Natural Language API provides natural language understanding +// technologies to developers. Examples include sentiment analysis, entity +// recognition, and text annotations. +package language // import "cloud.google.com/go/language/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/language/apiv1/language_client.go b/vendor/cloud.google.com/go/language/apiv1/language_client.go new file mode 100644 index 0000000..ae19546 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/language_client.go @@ -0,0 +1,229 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + AnalyzeSentiment []gax.CallOption + AnalyzeEntities []gax.CallOption + AnalyzeEntitySentiment []gax.CallOption + AnalyzeSyntax []gax.CallOption + ClassifyText []gax.CallOption + AnnotateText []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("language.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + AnalyzeSentiment: retry[[2]string{"default", "idempotent"}], + AnalyzeEntities: retry[[2]string{"default", "idempotent"}], + AnalyzeEntitySentiment: retry[[2]string{"default", "idempotent"}], + AnalyzeSyntax: retry[[2]string{"default", "idempotent"}], + ClassifyText: retry[[2]string{"default", "idempotent"}], + AnnotateText: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Natural Language API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client languagepb.LanguageServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new language service client. +// +// Provides text analysis operations such as sentiment analysis and entity +// recognition. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: languagepb.NewLanguageServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// AnalyzeSentiment analyzes the sentiment of the provided text. +func (c *Client) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSentimentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeSentiment[0:len(c.CallOptions.AnalyzeSentiment):len(c.CallOptions.AnalyzeSentiment)], opts...) + var resp *languagepb.AnalyzeSentimentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeSentiment(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeEntities finds named entities (currently proper names and common nouns) in the text +// along with entity types, salience, mentions for each entity, and +// other properties. +func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitiesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeEntities[0:len(c.CallOptions.AnalyzeEntities):len(c.CallOptions.AnalyzeEntities)], opts...) + var resp *languagepb.AnalyzeEntitiesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeEntities(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeEntitySentiment finds entities, similar to [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] in the text and analyzes +// sentiment associated with each entity and its mentions. +func (c *Client) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitySentimentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeEntitySentiment[0:len(c.CallOptions.AnalyzeEntitySentiment):len(c.CallOptions.AnalyzeEntitySentiment)], opts...) + var resp *languagepb.AnalyzeEntitySentimentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeEntitySentiment(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeSyntax analyzes the syntax of the text and provides sentence boundaries and +// tokenization along with part of speech tags, dependency trees, and other +// properties. +func (c *Client) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSyntaxResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeSyntax[0:len(c.CallOptions.AnalyzeSyntax):len(c.CallOptions.AnalyzeSyntax)], opts...) + var resp *languagepb.AnalyzeSyntaxResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeSyntax(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ClassifyText classifies a document into categories. +func (c *Client) ClassifyText(ctx context.Context, req *languagepb.ClassifyTextRequest, opts ...gax.CallOption) (*languagepb.ClassifyTextResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ClassifyText[0:len(c.CallOptions.ClassifyText):len(c.CallOptions.ClassifyText)], opts...) + var resp *languagepb.ClassifyTextResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ClassifyText(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnnotateText a convenience method that provides all the features that analyzeSentiment, +// analyzeEntities, and analyzeSyntax provide in one call. +func (c *Client) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest, opts ...gax.CallOption) (*languagepb.AnnotateTextResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnnotateText[0:len(c.CallOptions.AnnotateText):len(c.CallOptions.AnnotateText)], opts...) + var resp *languagepb.AnnotateTextResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnnotateText(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go b/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go new file mode 100644 index 0000000..6ca4151 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go @@ -0,0 +1,141 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language_test + +import ( + "cloud.google.com/go/language/apiv1" + "golang.org/x/net/context" + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_AnalyzeSentiment() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeSentimentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeSentiment(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeEntities() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeEntitiesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeEntities(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeEntitySentiment() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeEntitySentimentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeEntitySentiment(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeSyntax() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeSyntaxRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeSyntax(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ClassifyText() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.ClassifyTextRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ClassifyText(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnnotateText() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnnotateTextRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnnotateText(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/language/apiv1/mock_test.go b/vendor/cloud.google.com/go/language/apiv1/mock_test.go new file mode 100644 index 0000000..b1dd541 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/mock_test.go @@ -0,0 +1,518 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockLanguageServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + languagepb.LanguageServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockLanguageServer) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest) (*languagepb.AnalyzeSentimentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeSentimentResponse), nil +} + +func (s *mockLanguageServer) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest) (*languagepb.AnalyzeEntitiesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeEntitiesResponse), nil +} + +func (s *mockLanguageServer) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest) (*languagepb.AnalyzeEntitySentimentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeEntitySentimentResponse), nil +} + +func (s *mockLanguageServer) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest) (*languagepb.AnalyzeSyntaxResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeSyntaxResponse), nil +} + +func (s *mockLanguageServer) ClassifyText(ctx context.Context, req *languagepb.ClassifyTextRequest) (*languagepb.ClassifyTextResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.ClassifyTextResponse), nil +} + +func (s *mockLanguageServer) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest) (*languagepb.AnnotateTextResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnnotateTextResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockLanguage mockLanguageServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + languagepb.RegisterLanguageServiceServer(serv, &mockLanguage) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestLanguageServiceAnalyzeSentiment(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeSentimentResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSentiment(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeSentimentError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSentiment(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeEntities(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeEntitiesResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitiesRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntities(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitiesRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntities(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeEntitySentiment(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeEntitySentimentResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitySentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntitySentiment(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeEntitySentimentError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitySentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntitySentiment(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeSyntax(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeSyntaxResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSyntaxRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSyntax(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSyntaxRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSyntax(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceClassifyText(t *testing.T) { + var expectedResponse *languagepb.ClassifyTextResponse = &languagepb.ClassifyTextResponse{} + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.ClassifyTextRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ClassifyText(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceClassifyTextError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.ClassifyTextRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ClassifyText(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnnotateText(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnnotateTextResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} + var request = &languagepb.AnnotateTextRequest{ + Document: document, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnnotateText(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnnotateTextError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} + var request = &languagepb.AnnotateTextRequest{ + Document: document, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnnotateText(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/language/apiv1beta2/AnalyzeSentiment_smoke_test.go b/vendor/cloud.google.com/go/language/apiv1beta2/AnalyzeSentiment_smoke_test.go new file mode 100644 index 0000000..2c5fa5d --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta2/AnalyzeSentiment_smoke_test.go @@ -0,0 +1,71 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2" +) + +import ( + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestLanguageServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var content string = "Hello, world!" + var type_ languagepb.Document_Type = languagepb.Document_PLAIN_TEXT + var document = &languagepb.Document{ + Source: &languagepb.Document_Content{ + Content: content, + }, + Type: type_, + } + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + if _, err := c.AnalyzeSentiment(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/language/apiv1beta2/doc.go b/vendor/cloud.google.com/go/language/apiv1beta2/doc.go new file mode 100644 index 0000000..3f03683 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta2/doc.go @@ -0,0 +1,48 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package language is an auto-generated package for the +// Google Cloud Natural Language API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Google Cloud Natural Language API provides natural language understanding +// technologies to developers. Examples include sentiment analysis, entity +// recognition, and text annotations. +package language // import "cloud.google.com/go/language/apiv1beta2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/language/apiv1beta2/language_client.go b/vendor/cloud.google.com/go/language/apiv1beta2/language_client.go new file mode 100644 index 0000000..fa360eb --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta2/language_client.go @@ -0,0 +1,229 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + AnalyzeSentiment []gax.CallOption + AnalyzeEntities []gax.CallOption + AnalyzeEntitySentiment []gax.CallOption + AnalyzeSyntax []gax.CallOption + ClassifyText []gax.CallOption + AnnotateText []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("language.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + AnalyzeSentiment: retry[[2]string{"default", "idempotent"}], + AnalyzeEntities: retry[[2]string{"default", "idempotent"}], + AnalyzeEntitySentiment: retry[[2]string{"default", "idempotent"}], + AnalyzeSyntax: retry[[2]string{"default", "idempotent"}], + ClassifyText: retry[[2]string{"default", "idempotent"}], + AnnotateText: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Natural Language API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client languagepb.LanguageServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new language service client. +// +// Provides text analysis operations such as sentiment analysis and entity +// recognition. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: languagepb.NewLanguageServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// AnalyzeSentiment analyzes the sentiment of the provided text. +func (c *Client) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSentimentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeSentiment[0:len(c.CallOptions.AnalyzeSentiment):len(c.CallOptions.AnalyzeSentiment)], opts...) + var resp *languagepb.AnalyzeSentimentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeSentiment(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeEntities finds named entities (currently proper names and common nouns) in the text +// along with entity types, salience, mentions for each entity, and +// other properties. +func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitiesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeEntities[0:len(c.CallOptions.AnalyzeEntities):len(c.CallOptions.AnalyzeEntities)], opts...) + var resp *languagepb.AnalyzeEntitiesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeEntities(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeEntitySentiment finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes +// sentiment associated with each entity and its mentions. +func (c *Client) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitySentimentResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeEntitySentiment[0:len(c.CallOptions.AnalyzeEntitySentiment):len(c.CallOptions.AnalyzeEntitySentiment)], opts...) + var resp *languagepb.AnalyzeEntitySentimentResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeEntitySentiment(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeSyntax analyzes the syntax of the text and provides sentence boundaries and +// tokenization along with part of speech tags, dependency trees, and other +// properties. +func (c *Client) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSyntaxResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnalyzeSyntax[0:len(c.CallOptions.AnalyzeSyntax):len(c.CallOptions.AnalyzeSyntax)], opts...) + var resp *languagepb.AnalyzeSyntaxResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnalyzeSyntax(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ClassifyText classifies a document into categories. +func (c *Client) ClassifyText(ctx context.Context, req *languagepb.ClassifyTextRequest, opts ...gax.CallOption) (*languagepb.ClassifyTextResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ClassifyText[0:len(c.CallOptions.ClassifyText):len(c.CallOptions.ClassifyText)], opts...) + var resp *languagepb.ClassifyTextResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ClassifyText(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnnotateText a convenience method that provides all syntax, sentiment, entity, and +// classification features in one call. +func (c *Client) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest, opts ...gax.CallOption) (*languagepb.AnnotateTextResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnnotateText[0:len(c.CallOptions.AnnotateText):len(c.CallOptions.AnnotateText)], opts...) + var resp *languagepb.AnnotateTextResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnnotateText(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/language/apiv1beta2/language_client_example_test.go b/vendor/cloud.google.com/go/language/apiv1beta2/language_client_example_test.go new file mode 100644 index 0000000..fb2b53c --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta2/language_client_example_test.go @@ -0,0 +1,141 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language_test + +import ( + "cloud.google.com/go/language/apiv1beta2" + "golang.org/x/net/context" + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_AnalyzeSentiment() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeSentimentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeSentiment(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeEntities() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeEntitiesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeEntities(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeEntitySentiment() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeEntitySentimentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeEntitySentiment(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeSyntax() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeSyntaxRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeSyntax(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ClassifyText() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.ClassifyTextRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ClassifyText(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnnotateText() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnnotateTextRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnnotateText(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/language/apiv1beta2/mock_test.go b/vendor/cloud.google.com/go/language/apiv1beta2/mock_test.go new file mode 100644 index 0000000..e75c221 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1beta2/mock_test.go @@ -0,0 +1,518 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockLanguageServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + languagepb.LanguageServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockLanguageServer) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest) (*languagepb.AnalyzeSentimentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeSentimentResponse), nil +} + +func (s *mockLanguageServer) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest) (*languagepb.AnalyzeEntitiesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeEntitiesResponse), nil +} + +func (s *mockLanguageServer) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest) (*languagepb.AnalyzeEntitySentimentResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeEntitySentimentResponse), nil +} + +func (s *mockLanguageServer) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest) (*languagepb.AnalyzeSyntaxResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeSyntaxResponse), nil +} + +func (s *mockLanguageServer) ClassifyText(ctx context.Context, req *languagepb.ClassifyTextRequest) (*languagepb.ClassifyTextResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.ClassifyTextResponse), nil +} + +func (s *mockLanguageServer) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest) (*languagepb.AnnotateTextResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnnotateTextResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockLanguage mockLanguageServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + languagepb.RegisterLanguageServiceServer(serv, &mockLanguage) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestLanguageServiceAnalyzeSentiment(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeSentimentResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSentiment(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeSentimentError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSentiment(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeEntities(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeEntitiesResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitiesRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntities(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitiesRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntities(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeEntitySentiment(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeEntitySentimentResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitySentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntitySentiment(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeEntitySentimentError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeEntitySentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntitySentiment(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeSyntax(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeSyntaxResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSyntaxRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSyntax(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSyntaxRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSyntax(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceClassifyText(t *testing.T) { + var expectedResponse *languagepb.ClassifyTextResponse = &languagepb.ClassifyTextResponse{} + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.ClassifyTextRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ClassifyText(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceClassifyTextError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.ClassifyTextRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ClassifyText(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnnotateText(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnnotateTextResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} + var request = &languagepb.AnnotateTextRequest{ + Document: document, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnnotateText(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnnotateTextError(t *testing.T) { + errCode := codes.PermissionDenied + mockLanguage.err = gstatus.Error(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} + var request = &languagepb.AnnotateTextRequest{ + Document: document, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnnotateText(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/license_test.go b/vendor/cloud.google.com/go/license_test.go new file mode 100644 index 0000000..f93e9e0 --- /dev/null +++ b/vendor/cloud.google.com/go/license_test.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +var sentinels = []string{ + "Copyright", + "Google", + `Licensed under the Apache License, Version 2.0 (the "License");`, +} + +func TestLicense(t *testing.T) { + t.Parallel() + err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if ext := filepath.Ext(path); ext != ".go" && ext != ".proto" { + return nil + } + if strings.HasSuffix(path, ".pb.go") { + // .pb.go files are generated from the proto files. + // .proto files must have license headers. + return nil + } + if path == "bigtable/cmd/cbt/cbtdoc.go" { + // Automatically generated. + return nil + } + + src, err := ioutil.ReadFile(path) + if err != nil { + return nil + } + src = src[:140] // Ensure all of the sentinel values are at the top of the file. + + // Find license + for _, sentinel := range sentinels { + if !bytes.Contains(src, []byte(sentinel)) { + t.Errorf("%v: license header not present. want %q", path, sentinel) + return nil + } + } + + return nil + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/README.md b/vendor/cloud.google.com/go/logging/apiv2/README.md new file mode 100644 index 0000000..d2d9a17 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/README.md @@ -0,0 +1,11 @@ +Auto-generated logging v2 clients +================================= + +This package includes auto-generated clients for the logging v2 API. + +Use the handwritten logging client (in the parent directory, +cloud.google.com/go/logging) in preference to this. + +This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. + + diff --git a/vendor/cloud.google.com/go/logging/apiv2/WriteLogEntries_smoke_test.go b/vendor/cloud.google.com/go/logging/apiv2/WriteLogEntries_smoke_test.go new file mode 100644 index 0000000..6aab33a --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/WriteLogEntries_smoke_test.go @@ -0,0 +1,66 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +import ( + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestLoggingServiceV2Smoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var entries []*loggingpb.LogEntry = nil + var formattedLogName string = LogPath(projectId, "test-"+strconv.FormatInt(time.Now().UnixNano(), 10)+"") + var request = &loggingpb.WriteLogEntriesRequest{ + Entries: entries, + LogName: formattedLogName, + } + + if _, err := c.WriteLogEntries(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client.go b/vendor/cloud.google.com/go/logging/apiv2/config_client.go new file mode 100644 index 0000000..f2a13e0 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client.go @@ -0,0 +1,449 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ConfigCallOptions contains the retry settings for each method of ConfigClient. +type ConfigCallOptions struct { + ListSinks []gax.CallOption + GetSink []gax.CallOption + CreateSink []gax.CallOption + UpdateSink []gax.CallOption + DeleteSink []gax.CallOption + ListExclusions []gax.CallOption + GetExclusion []gax.CallOption + CreateExclusion []gax.CallOption + UpdateExclusion []gax.CallOption + DeleteExclusion []gax.CallOption +} + +func defaultConfigClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultConfigCallOptions() *ConfigCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Internal, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &ConfigCallOptions{ + ListSinks: retry[[2]string{"default", "idempotent"}], + GetSink: retry[[2]string{"default", "idempotent"}], + CreateSink: retry[[2]string{"default", "non_idempotent"}], + UpdateSink: retry[[2]string{"default", "non_idempotent"}], + DeleteSink: retry[[2]string{"default", "idempotent"}], + ListExclusions: retry[[2]string{"default", "idempotent"}], + GetExclusion: retry[[2]string{"default", "idempotent"}], + CreateExclusion: retry[[2]string{"default", "non_idempotent"}], + UpdateExclusion: retry[[2]string{"default", "non_idempotent"}], + DeleteExclusion: retry[[2]string{"default", "idempotent"}], + } +} + +// ConfigClient is a client for interacting with Stackdriver Logging API. +type ConfigClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + configClient loggingpb.ConfigServiceV2Client + + // The call options for this service. + CallOptions *ConfigCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewConfigClient creates a new config service v2 client. +// +// Service for configuring sinks used to export log entries outside of +// Stackdriver Logging. +func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ConfigClient{ + conn: conn, + CallOptions: defaultConfigCallOptions(), + + configClient: loggingpb.NewConfigServiceV2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ConfigClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ConfigClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ConfigProjectPath returns the path for the project resource. +func ConfigProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// ConfigSinkPath returns the path for the sink resource. +func ConfigSinkPath(project, sink string) string { + return "" + + "projects/" + + project + + "/sinks/" + + sink + + "" +} + +// ConfigExclusionPath returns the path for the exclusion resource. +func ConfigExclusionPath(project, exclusion string) string { + return "" + + "projects/" + + project + + "/exclusions/" + + exclusion + + "" +} + +// ListSinks lists sinks. +func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListSinks[0:len(c.CallOptions.ListSinks):len(c.CallOptions.ListSinks)], opts...) + it := &LogSinkIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) { + var resp *loggingpb.ListSinksResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.ListSinks(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Sinks, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetSink gets a sink. +func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetSink[0:len(c.CallOptions.GetSink):len(c.CallOptions.GetSink)], opts...) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.GetSink(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateSink creates a sink that exports specified log entries to a destination. The +// export of newly-ingested log entries begins immediately, unless the sink's +// writer_identity is not permitted to write to the destination. A sink can +// export log entries only from the resource owning the sink. +func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateSink[0:len(c.CallOptions.CreateSink):len(c.CallOptions.CreateSink)], opts...) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.CreateSink(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateSink updates a sink. This method replaces the following fields in the existing +// sink with values from the new sink: destination, and filter. +// The updated sink might also have a new writer_identity; see the +// unique_writer_identity field. +func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.UpdateSink(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteSink deletes a sink. If the sink has a unique writer_identity, then that +// service account is also deleted. +func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteSink[0:len(c.CallOptions.DeleteSink):len(c.CallOptions.DeleteSink)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.configClient.DeleteSink(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListExclusions lists all the exclusions in a parent resource. +func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListExclusionsRequest, opts ...gax.CallOption) *LogExclusionIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListExclusions[0:len(c.CallOptions.ListExclusions):len(c.CallOptions.ListExclusions)], opts...) + it := &LogExclusionIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogExclusion, string, error) { + var resp *loggingpb.ListExclusionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.ListExclusions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Exclusions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetExclusion gets the description of an exclusion. +func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetExclusion[0:len(c.CallOptions.GetExclusion):len(c.CallOptions.GetExclusion)], opts...) + var resp *loggingpb.LogExclusion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.GetExclusion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateExclusion creates a new exclusion in a specified parent resource. +// Only log entries belonging to that resource can be excluded. +// You can have up to 10 exclusions in a resource. +func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateExclusion[0:len(c.CallOptions.CreateExclusion):len(c.CallOptions.CreateExclusion)], opts...) + var resp *loggingpb.LogExclusion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.CreateExclusion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateExclusion changes one or more properties of an existing exclusion. +func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateExclusion[0:len(c.CallOptions.UpdateExclusion):len(c.CallOptions.UpdateExclusion)], opts...) + var resp *loggingpb.LogExclusion + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.UpdateExclusion(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteExclusion deletes an exclusion. +func (c *ConfigClient) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteExclusion[0:len(c.CallOptions.DeleteExclusion):len(c.CallOptions.DeleteExclusion)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.configClient.DeleteExclusion(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// LogExclusionIterator manages a stream of *loggingpb.LogExclusion. +type LogExclusionIterator struct { + items []*loggingpb.LogExclusion + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogExclusion, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogExclusionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogExclusionIterator) Next() (*loggingpb.LogExclusion, error) { + var item *loggingpb.LogExclusion + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogExclusionIterator) bufLen() int { + return len(it.items) +} + +func (it *LogExclusionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// LogSinkIterator manages a stream of *loggingpb.LogSink. +type LogSinkIterator struct { + items []*loggingpb.LogSink + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogSink, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogSinkIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogSinkIterator) Next() (*loggingpb.LogSink, error) { + var item *loggingpb.LogSink + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogSinkIterator) bufLen() int { + return len(it.items) +} + +func (it *LogSinkIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go new file mode 100644 index 0000000..909bc12 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go @@ -0,0 +1,222 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging_test + +import ( + "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +func ExampleNewConfigClient() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleConfigClient_ListSinks() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListSinksRequest{ + // TODO: Fill request struct fields. + } + it := c.ListSinks(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleConfigClient_GetSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.GetSinkRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetSink(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_CreateSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.CreateSinkRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSink(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_UpdateSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.UpdateSinkRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateSink(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_DeleteSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteSinkRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSink(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleConfigClient_ListExclusions() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListExclusionsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListExclusions(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleConfigClient_GetExclusion() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.GetExclusionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetExclusion(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_CreateExclusion() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.CreateExclusionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateExclusion(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_UpdateExclusion() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.UpdateExclusionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateExclusion(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_DeleteExclusion() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteExclusionRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteExclusion(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/doc.go b/vendor/cloud.google.com/go/logging/apiv2/doc.go new file mode 100644 index 0000000..207138d --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/doc.go @@ -0,0 +1,52 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package logging is an auto-generated package for the +// Stackdriver Logging API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Writes log entries and manages your Stackdriver Logging configuration. +// +// Use the client at cloud.google.com/go/logging in preference to this. +package logging // import "cloud.google.com/go/logging/apiv2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go new file mode 100644 index 0000000..8199abd --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go @@ -0,0 +1,431 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + DeleteLog []gax.CallOption + WriteLogEntries []gax.CallOption + ListLogEntries []gax.CallOption + ListMonitoredResourceDescriptors []gax.CallOption + ListLogs []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Internal, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + {"list", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Internal, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &CallOptions{ + DeleteLog: retry[[2]string{"default", "idempotent"}], + WriteLogEntries: retry[[2]string{"default", "non_idempotent"}], + ListLogEntries: retry[[2]string{"list", "idempotent"}], + ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], + ListLogs: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Stackdriver Logging API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client loggingpb.LoggingServiceV2Client + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new logging service v2 client. +// +// Service for ingesting and querying logs. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: loggingpb.NewLoggingServiceV2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ProjectPath returns the path for the project resource. +func ProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// LogPath returns the path for the log resource. +func LogPath(project, log string) string { + return "" + + "projects/" + + project + + "/logs/" + + log + + "" +} + +// DeleteLog deletes all the log entries in a log. +// The log reappears if it receives new entries. +// Log entries written shortly before the delete operation might not be +// deleted. +func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteLog[0:len(c.CallOptions.DeleteLog):len(c.CallOptions.DeleteLog)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteLog(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// WriteLogEntries ## Log entry resources +// +// Writes log entries to Stackdriver Logging. This API method is the +// only way to send log entries to Stackdriver Logging. This method +// is used, directly or indirectly, by the Stackdriver Logging agent +// (fluentd) and all logging libraries configured to use Stackdriver +// Logging. +func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.WriteLogEntries[0:len(c.CallOptions.WriteLogEntries):len(c.CallOptions.WriteLogEntries)], opts...) + var resp *loggingpb.WriteLogEntriesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.WriteLogEntries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListLogEntries lists log entries. Use this method to retrieve log entries from +// Stackdriver Logging. For ways to export log entries, see +// Exporting Logs (at /logging/docs/export). +func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...) + it := &LogEntryIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) { + var resp *loggingpb.ListLogEntriesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListLogEntries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Entries, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Stackdriver +// Logging. +func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) + it := &MonitoredResourceDescriptorIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + var resp *loggingpb.ListMonitoredResourceDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ResourceDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListLogs lists the logs in projects, organizations, folders, or billing accounts. +// Only logs that have entries are listed. +func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, opts ...gax.CallOption) *StringIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListLogs[0:len(c.CallOptions.ListLogs):len(c.CallOptions.ListLogs)], opts...) + it := &StringIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { + var resp *loggingpb.ListLogsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListLogs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.LogNames, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// LogEntryIterator manages a stream of *loggingpb.LogEntry. +type LogEntryIterator struct { + items []*loggingpb.LogEntry + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogEntry, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogEntryIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogEntryIterator) Next() (*loggingpb.LogEntry, error) { + var item *loggingpb.LogEntry + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogEntryIterator) bufLen() int { + return len(it.items) +} + +func (it *LogEntryIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// StringIterator manages a stream of string. +type StringIterator struct { + items []string + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *StringIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *StringIterator) Next() (string, error) { + var item string + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *StringIterator) bufLen() int { + return len(it.items) +} + +func (it *StringIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go new file mode 100644 index 0000000..759462a --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go @@ -0,0 +1,140 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging_test + +import ( + "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_DeleteLog() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteLogRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteLog(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_WriteLogEntries() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.WriteLogEntriesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.WriteLogEntries(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListLogEntries() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListLogEntriesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListLogEntries(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ListMonitoredResourceDescriptors() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListMonitoredResourceDescriptorsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListMonitoredResourceDescriptors(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ListLogs() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListLogsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListLogs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go new file mode 100644 index 0000000..ad8cbf1 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go @@ -0,0 +1,282 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// MetricsCallOptions contains the retry settings for each method of MetricsClient. +type MetricsCallOptions struct { + ListLogMetrics []gax.CallOption + GetLogMetric []gax.CallOption + CreateLogMetric []gax.CallOption + UpdateLogMetric []gax.CallOption + DeleteLogMetric []gax.CallOption +} + +func defaultMetricsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultMetricsCallOptions() *MetricsCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Internal, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &MetricsCallOptions{ + ListLogMetrics: retry[[2]string{"default", "idempotent"}], + GetLogMetric: retry[[2]string{"default", "idempotent"}], + CreateLogMetric: retry[[2]string{"default", "non_idempotent"}], + UpdateLogMetric: retry[[2]string{"default", "non_idempotent"}], + DeleteLogMetric: retry[[2]string{"default", "idempotent"}], + } +} + +// MetricsClient is a client for interacting with Stackdriver Logging API. +type MetricsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + metricsClient loggingpb.MetricsServiceV2Client + + // The call options for this service. + CallOptions *MetricsCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewMetricsClient creates a new metrics service v2 client. +// +// Service for configuring logs-based metrics. +func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*MetricsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultMetricsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &MetricsClient{ + conn: conn, + CallOptions: defaultMetricsCallOptions(), + + metricsClient: loggingpb.NewMetricsServiceV2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *MetricsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// MetricsProjectPath returns the path for the project resource. +func MetricsProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// MetricsMetricPath returns the path for the metric resource. +func MetricsMetricPath(project, metric string) string { + return "" + + "projects/" + + project + + "/metrics/" + + metric + + "" +} + +// ListLogMetrics lists logs-based metrics. +func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest, opts ...gax.CallOption) *LogMetricIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListLogMetrics[0:len(c.CallOptions.ListLogMetrics):len(c.CallOptions.ListLogMetrics)], opts...) + it := &LogMetricIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) { + var resp *loggingpb.ListLogMetricsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricsClient.ListLogMetrics(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Metrics, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetLogMetric gets a logs-based metric. +func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetLogMetric[0:len(c.CallOptions.GetLogMetric):len(c.CallOptions.GetLogMetric)], opts...) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricsClient.GetLogMetric(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateLogMetric creates a logs-based metric. +func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateLogMetric[0:len(c.CallOptions.CreateLogMetric):len(c.CallOptions.CreateLogMetric)], opts...) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricsClient.CreateLogMetric(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateLogMetric creates or updates a logs-based metric. +func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateLogMetric[0:len(c.CallOptions.UpdateLogMetric):len(c.CallOptions.UpdateLogMetric)], opts...) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricsClient.UpdateLogMetric(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteLogMetric deletes a logs-based metric. +func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteLogMetric[0:len(c.CallOptions.DeleteLogMetric):len(c.CallOptions.DeleteLogMetric)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricsClient.DeleteLogMetric(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// LogMetricIterator manages a stream of *loggingpb.LogMetric. +type LogMetricIterator struct { + items []*loggingpb.LogMetric + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogMetric, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogMetricIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogMetricIterator) Next() (*loggingpb.LogMetric, error) { + var item *loggingpb.LogMetric + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogMetricIterator) bufLen() int { + return len(it.items) +} + +func (it *LogMetricIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go new file mode 100644 index 0000000..4f61106 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go @@ -0,0 +1,128 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging_test + +import ( + "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +func ExampleNewMetricsClient() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleMetricsClient_ListLogMetrics() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListLogMetricsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListLogMetrics(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricsClient_GetLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.GetLogMetricRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricsClient_CreateLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.CreateLogMetricRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricsClient_UpdateLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.UpdateLogMetricRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricsClient_DeleteLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteLogMetricRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/mock_test.go b/vendor/cloud.google.com/go/logging/apiv2/mock_test.go new file mode 100644 index 0000000..a5345c5 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/mock_test.go @@ -0,0 +1,1677 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + field_maskpb "google.golang.org/genproto/protobuf/field_mask" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockLoggingServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + loggingpb.LoggingServiceV2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockLoggingServer) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockLoggingServer) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest) (*loggingpb.WriteLogEntriesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.WriteLogEntriesResponse), nil +} + +func (s *mockLoggingServer) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest) (*loggingpb.ListLogEntriesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListLogEntriesResponse), nil +} + +func (s *mockLoggingServer) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) (*loggingpb.ListMonitoredResourceDescriptorsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListMonitoredResourceDescriptorsResponse), nil +} + +func (s *mockLoggingServer) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest) (*loggingpb.ListLogsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListLogsResponse), nil +} + +type mockConfigServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + loggingpb.ConfigServiceV2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockConfigServer) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest) (*loggingpb.ListSinksResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListSinksResponse), nil +} + +func (s *mockConfigServer) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest) (*loggingpb.LogSink, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogSink), nil +} + +func (s *mockConfigServer) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest) (*loggingpb.LogSink, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogSink), nil +} + +func (s *mockConfigServer) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest) (*loggingpb.LogSink, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogSink), nil +} + +func (s *mockConfigServer) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockConfigServer) ListExclusions(ctx context.Context, req *loggingpb.ListExclusionsRequest) (*loggingpb.ListExclusionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListExclusionsResponse), nil +} + +func (s *mockConfigServer) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest) (*loggingpb.LogExclusion, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogExclusion), nil +} + +func (s *mockConfigServer) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest) (*loggingpb.LogExclusion, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogExclusion), nil +} + +func (s *mockConfigServer) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest) (*loggingpb.LogExclusion, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogExclusion), nil +} + +func (s *mockConfigServer) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +type mockMetricsServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + loggingpb.MetricsServiceV2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockMetricsServer) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest) (*loggingpb.ListLogMetricsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListLogMetricsResponse), nil +} + +func (s *mockMetricsServer) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest) (*loggingpb.LogMetric, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogMetric), nil +} + +func (s *mockMetricsServer) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest) (*loggingpb.LogMetric, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogMetric), nil +} + +func (s *mockMetricsServer) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest) (*loggingpb.LogMetric, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogMetric), nil +} + +func (s *mockMetricsServer) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockLogging mockLoggingServer + mockConfig mockConfigServer + mockMetrics mockMetricsServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + loggingpb.RegisterLoggingServiceV2Server(serv, &mockLogging) + loggingpb.RegisterConfigServiceV2Server(serv, &mockConfig) + loggingpb.RegisterMetricsServiceV2Server(serv, &mockMetrics) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestLoggingServiceV2DeleteLog(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var formattedLogName string = LogPath("[PROJECT]", "[LOG]") + var request = &loggingpb.DeleteLogRequest{ + LogName: formattedLogName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteLog(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestLoggingServiceV2DeleteLogError(t *testing.T) { + errCode := codes.PermissionDenied + mockLogging.err = gstatus.Error(errCode, "test error") + + var formattedLogName string = LogPath("[PROJECT]", "[LOG]") + var request = &loggingpb.DeleteLogRequest{ + LogName: formattedLogName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteLog(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestLoggingServiceV2WriteLogEntries(t *testing.T) { + var expectedResponse *loggingpb.WriteLogEntriesResponse = &loggingpb.WriteLogEntriesResponse{} + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var entries []*loggingpb.LogEntry = nil + var request = &loggingpb.WriteLogEntriesRequest{ + Entries: entries, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.WriteLogEntries(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLoggingServiceV2WriteLogEntriesError(t *testing.T) { + errCode := codes.PermissionDenied + mockLogging.err = gstatus.Error(errCode, "test error") + + var entries []*loggingpb.LogEntry = nil + var request = &loggingpb.WriteLogEntriesRequest{ + Entries: entries, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.WriteLogEntries(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLoggingServiceV2ListLogEntries(t *testing.T) { + var nextPageToken string = "" + var entriesElement *loggingpb.LogEntry = &loggingpb.LogEntry{} + var entries = []*loggingpb.LogEntry{entriesElement} + var expectedResponse = &loggingpb.ListLogEntriesResponse{ + NextPageToken: nextPageToken, + Entries: entries, + } + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var resourceNames []string = nil + var request = &loggingpb.ListLogEntriesRequest{ + ResourceNames: resourceNames, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogEntries(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Entries[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLoggingServiceV2ListLogEntriesError(t *testing.T) { + errCode := codes.PermissionDenied + mockLogging.err = gstatus.Error(errCode, "test error") + + var resourceNames []string = nil + var request = &loggingpb.ListLogEntriesRequest{ + ResourceNames: resourceNames, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogEntries(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLoggingServiceV2ListMonitoredResourceDescriptors(t *testing.T) { + var nextPageToken string = "" + var resourceDescriptorsElement *monitoredrespb.MonitoredResourceDescriptor = &monitoredrespb.MonitoredResourceDescriptor{} + var resourceDescriptors = []*monitoredrespb.MonitoredResourceDescriptor{resourceDescriptorsElement} + var expectedResponse = &loggingpb.ListMonitoredResourceDescriptorsResponse{ + NextPageToken: nextPageToken, + ResourceDescriptors: resourceDescriptors, + } + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var request *loggingpb.ListMonitoredResourceDescriptorsRequest = &loggingpb.ListMonitoredResourceDescriptorsRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ResourceDescriptors[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLoggingServiceV2ListMonitoredResourceDescriptorsError(t *testing.T) { + errCode := codes.PermissionDenied + mockLogging.err = gstatus.Error(errCode, "test error") + + var request *loggingpb.ListMonitoredResourceDescriptorsRequest = &loggingpb.ListMonitoredResourceDescriptorsRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLoggingServiceV2ListLogs(t *testing.T) { + var nextPageToken string = "" + var logNamesElement string = "logNamesElement-1079688374" + var logNames = []string{logNamesElement} + var expectedResponse = &loggingpb.ListLogsResponse{ + NextPageToken: nextPageToken, + LogNames: logNames, + } + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var formattedParent string = ProjectPath("[PROJECT]") + var request = &loggingpb.ListLogsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.LogNames[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLoggingServiceV2ListLogsError(t *testing.T) { + errCode := codes.PermissionDenied + mockLogging.err = gstatus.Error(errCode, "test error") + + var formattedParent string = ProjectPath("[PROJECT]") + var request = &loggingpb.ListLogsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2ListSinks(t *testing.T) { + var nextPageToken string = "" + var sinksElement *loggingpb.LogSink = &loggingpb.LogSink{} + var sinks = []*loggingpb.LogSink{sinksElement} + var expectedResponse = &loggingpb.ListSinksResponse{ + NextPageToken: nextPageToken, + Sinks: sinks, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedParent string = ConfigProjectPath("[PROJECT]") + var request = &loggingpb.ListSinksRequest{ + Parent: formattedParent, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSinks(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Sinks[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2ListSinksError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedParent string = ConfigProjectPath("[PROJECT]") + var request = &loggingpb.ListSinksRequest{ + Parent: formattedParent, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSinks(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2GetSink(t *testing.T) { + var name string = "name3373707" + var destination string = "destination-1429847026" + var filter string = "filter-1274492040" + var writerIdentity string = "writerIdentity775638794" + var includeChildren bool = true + var expectedResponse = &loggingpb.LogSink{ + Name: name, + Destination: destination, + Filter: filter, + WriterIdentity: writerIdentity, + IncludeChildren: includeChildren, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") + var request = &loggingpb.GetSinkRequest{ + SinkName: formattedSinkName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSink(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2GetSinkError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") + var request = &loggingpb.GetSinkRequest{ + SinkName: formattedSinkName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSink(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2CreateSink(t *testing.T) { + var name string = "name3373707" + var destination string = "destination-1429847026" + var filter string = "filter-1274492040" + var writerIdentity string = "writerIdentity775638794" + var includeChildren bool = true + var expectedResponse = &loggingpb.LogSink{ + Name: name, + Destination: destination, + Filter: filter, + WriterIdentity: writerIdentity, + IncludeChildren: includeChildren, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedParent string = ConfigProjectPath("[PROJECT]") + var sink *loggingpb.LogSink = &loggingpb.LogSink{} + var request = &loggingpb.CreateSinkRequest{ + Parent: formattedParent, + Sink: sink, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSink(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2CreateSinkError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedParent string = ConfigProjectPath("[PROJECT]") + var sink *loggingpb.LogSink = &loggingpb.LogSink{} + var request = &loggingpb.CreateSinkRequest{ + Parent: formattedParent, + Sink: sink, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSink(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2UpdateSink(t *testing.T) { + var name string = "name3373707" + var destination string = "destination-1429847026" + var filter string = "filter-1274492040" + var writerIdentity string = "writerIdentity775638794" + var includeChildren bool = true + var expectedResponse = &loggingpb.LogSink{ + Name: name, + Destination: destination, + Filter: filter, + WriterIdentity: writerIdentity, + IncludeChildren: includeChildren, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") + var sink *loggingpb.LogSink = &loggingpb.LogSink{} + var request = &loggingpb.UpdateSinkRequest{ + SinkName: formattedSinkName, + Sink: sink, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSink(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2UpdateSinkError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") + var sink *loggingpb.LogSink = &loggingpb.LogSink{} + var request = &loggingpb.UpdateSinkRequest{ + SinkName: formattedSinkName, + Sink: sink, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSink(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2DeleteSink(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") + var request = &loggingpb.DeleteSinkRequest{ + SinkName: formattedSinkName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSink(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestConfigServiceV2DeleteSinkError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") + var request = &loggingpb.DeleteSinkRequest{ + SinkName: formattedSinkName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSink(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestConfigServiceV2ListExclusions(t *testing.T) { + var nextPageToken string = "" + var exclusionsElement *loggingpb.LogExclusion = &loggingpb.LogExclusion{} + var exclusions = []*loggingpb.LogExclusion{exclusionsElement} + var expectedResponse = &loggingpb.ListExclusionsResponse{ + NextPageToken: nextPageToken, + Exclusions: exclusions, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedParent string = ConfigProjectPath("[PROJECT]") + var request = &loggingpb.ListExclusionsRequest{ + Parent: formattedParent, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListExclusions(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Exclusions[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2ListExclusionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedParent string = ConfigProjectPath("[PROJECT]") + var request = &loggingpb.ListExclusionsRequest{ + Parent: formattedParent, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListExclusions(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2GetExclusion(t *testing.T) { + var name2 string = "name2-1052831874" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var disabled bool = true + var expectedResponse = &loggingpb.LogExclusion{ + Name: name2, + Description: description, + Filter: filter, + Disabled: disabled, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedName string = ConfigExclusionPath("[PROJECT]", "[EXCLUSION]") + var request = &loggingpb.GetExclusionRequest{ + Name: formattedName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetExclusion(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2GetExclusionError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedName string = ConfigExclusionPath("[PROJECT]", "[EXCLUSION]") + var request = &loggingpb.GetExclusionRequest{ + Name: formattedName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetExclusion(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2CreateExclusion(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var disabled bool = true + var expectedResponse = &loggingpb.LogExclusion{ + Name: name, + Description: description, + Filter: filter, + Disabled: disabled, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedParent string = ConfigProjectPath("[PROJECT]") + var exclusion *loggingpb.LogExclusion = &loggingpb.LogExclusion{} + var request = &loggingpb.CreateExclusionRequest{ + Parent: formattedParent, + Exclusion: exclusion, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateExclusion(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2CreateExclusionError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedParent string = ConfigProjectPath("[PROJECT]") + var exclusion *loggingpb.LogExclusion = &loggingpb.LogExclusion{} + var request = &loggingpb.CreateExclusionRequest{ + Parent: formattedParent, + Exclusion: exclusion, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateExclusion(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2UpdateExclusion(t *testing.T) { + var name2 string = "name2-1052831874" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var disabled bool = true + var expectedResponse = &loggingpb.LogExclusion{ + Name: name2, + Description: description, + Filter: filter, + Disabled: disabled, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedName string = ConfigExclusionPath("[PROJECT]", "[EXCLUSION]") + var exclusion *loggingpb.LogExclusion = &loggingpb.LogExclusion{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &loggingpb.UpdateExclusionRequest{ + Name: formattedName, + Exclusion: exclusion, + UpdateMask: updateMask, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateExclusion(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2UpdateExclusionError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedName string = ConfigExclusionPath("[PROJECT]", "[EXCLUSION]") + var exclusion *loggingpb.LogExclusion = &loggingpb.LogExclusion{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &loggingpb.UpdateExclusionRequest{ + Name: formattedName, + Exclusion: exclusion, + UpdateMask: updateMask, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateExclusion(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2DeleteExclusion(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedName string = ConfigExclusionPath("[PROJECT]", "[EXCLUSION]") + var request = &loggingpb.DeleteExclusionRequest{ + Name: formattedName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteExclusion(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestConfigServiceV2DeleteExclusionError(t *testing.T) { + errCode := codes.PermissionDenied + mockConfig.err = gstatus.Error(errCode, "test error") + + var formattedName string = ConfigExclusionPath("[PROJECT]", "[EXCLUSION]") + var request = &loggingpb.DeleteExclusionRequest{ + Name: formattedName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteExclusion(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestMetricsServiceV2ListLogMetrics(t *testing.T) { + var nextPageToken string = "" + var metricsElement *loggingpb.LogMetric = &loggingpb.LogMetric{} + var metrics = []*loggingpb.LogMetric{metricsElement} + var expectedResponse = &loggingpb.ListLogMetricsResponse{ + NextPageToken: nextPageToken, + Metrics: metrics, + } + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedParent string = MetricsProjectPath("[PROJECT]") + var request = &loggingpb.ListLogMetricsRequest{ + Parent: formattedParent, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogMetrics(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Metrics[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricsServiceV2ListLogMetricsError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetrics.err = gstatus.Error(errCode, "test error") + + var formattedParent string = MetricsProjectPath("[PROJECT]") + var request = &loggingpb.ListLogMetricsRequest{ + Parent: formattedParent, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogMetrics(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricsServiceV2GetLogMetric(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var valueExtractor string = "valueExtractor2047672534" + var expectedResponse = &loggingpb.LogMetric{ + Name: name, + Description: description, + Filter: filter, + ValueExtractor: valueExtractor, + } + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") + var request = &loggingpb.GetLogMetricRequest{ + MetricName: formattedMetricName, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetLogMetric(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricsServiceV2GetLogMetricError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetrics.err = gstatus.Error(errCode, "test error") + + var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") + var request = &loggingpb.GetLogMetricRequest{ + MetricName: formattedMetricName, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetLogMetric(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricsServiceV2CreateLogMetric(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var valueExtractor string = "valueExtractor2047672534" + var expectedResponse = &loggingpb.LogMetric{ + Name: name, + Description: description, + Filter: filter, + ValueExtractor: valueExtractor, + } + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedParent string = MetricsProjectPath("[PROJECT]") + var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} + var request = &loggingpb.CreateLogMetricRequest{ + Parent: formattedParent, + Metric: metric, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateLogMetric(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricsServiceV2CreateLogMetricError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetrics.err = gstatus.Error(errCode, "test error") + + var formattedParent string = MetricsProjectPath("[PROJECT]") + var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} + var request = &loggingpb.CreateLogMetricRequest{ + Parent: formattedParent, + Metric: metric, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateLogMetric(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricsServiceV2UpdateLogMetric(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var valueExtractor string = "valueExtractor2047672534" + var expectedResponse = &loggingpb.LogMetric{ + Name: name, + Description: description, + Filter: filter, + ValueExtractor: valueExtractor, + } + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") + var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} + var request = &loggingpb.UpdateLogMetricRequest{ + MetricName: formattedMetricName, + Metric: metric, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateLogMetric(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricsServiceV2UpdateLogMetricError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetrics.err = gstatus.Error(errCode, "test error") + + var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") + var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} + var request = &loggingpb.UpdateLogMetricRequest{ + MetricName: formattedMetricName, + Metric: metric, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateLogMetric(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricsServiceV2DeleteLogMetric(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") + var request = &loggingpb.DeleteLogMetricRequest{ + MetricName: formattedMetricName, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteLogMetric(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestMetricsServiceV2DeleteLogMetricError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetrics.err = gstatus.Error(errCode, "test error") + + var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") + var request = &loggingpb.DeleteLogMetricRequest{ + MetricName: formattedMetricName, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteLogMetric(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} diff --git a/vendor/cloud.google.com/go/logging/doc.go b/vendor/cloud.google.com/go/logging/doc.go new file mode 100644 index 0000000..32ca717 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/doc.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package logging contains a Stackdriver Logging client suitable for writing logs. +For reading logs, and working with sinks, metrics and monitored resources, +see package cloud.google.com/go/logging/logadmin. + +This client uses Logging API v2. +See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. + + +Note: This package is in beta. Some backwards-incompatible changes may occur. + + +Creating a Client + +Use a Client to interact with the Stackdriver Logging API. + + // Create a Client + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + + +Basic Usage + +For most use-cases, you'll want to add log entries to a buffer to be periodically +flushed (automatically and asynchronously) to the Stackdriver Logging service. + + // Initialize a logger + lg := client.Logger("my-log") + + // Add entry to log buffer + lg.Log(logging.Entry{Payload: "something happened!"}) + + +Closing your Client + +You should call Client.Close before your program exits to flush any buffered log entries to the Stackdriver Logging service. + + // Close the client when finished. + err = client.Close() + if err != nil { + // TODO: Handle error. + } + + +Synchronous Logging + +For critical errors, you may want to send your log entries immediately. +LogSync is slow and will block until the log entry has been sent, so it is +not recommended for basic use. + + lg.LogSync(ctx, logging.Entry{Payload: "ALERT! Something critical happened!"}) + + +The Standard Logger Interface + +You may want use a standard log.Logger in your program. + + // stdlg implements log.Logger + stdlg := lg.StandardLogger(logging.Info) + stdlg.Println("some info") + + +Log Levels + +An Entry may have one of a number of severity levels associated with it. + + logging.Entry{ + Payload: "something terrible happened!", + Severity: logging.Critical, + } + +*/ +package logging // import "cloud.google.com/go/logging" diff --git a/vendor/cloud.google.com/go/logging/examples_test.go b/vendor/cloud.google.com/go/logging/examples_test.go new file mode 100644 index 0000000..10ceee9 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/examples_test.go @@ -0,0 +1,136 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging_test + +import ( + "fmt" + "os" + + "cloud.google.com/go/logging" + "golang.org/x/net/context" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + // Use client to manage logs, metrics and sinks. + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Ping() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + if err := client.Ping(ctx); err != nil { + // TODO: Handle error. + } +} + +// Although Logger.Flush and Client.Close both return errors, they don't tell you +// whether the errors were frequent or significant. For most programs, it doesn't +// matter if there were a few errors while writing logs, although if those few errors +// indicated a bug in your program, you might want to know about them. The best way +// to handle errors is by setting the OnError function. If it runs quickly, it will +// see every error generated during logging. +func ExampleNewClient_errorFunc() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + // Print all errors to stdout, and count them. Multiple calls to the OnError + // function never happen concurrently, so there is no need for locking nErrs, + // provided you don't read it until after the logging client is closed. + var nErrs int + client.OnError = func(e error) { + fmt.Fprintf(os.Stdout, "logging: %v", e) + nErrs++ + } + // Use client to manage logs, metrics and sinks. + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: Handle error. + } + fmt.Printf("saw %d errors\n", nErrs) +} + +func ExampleClient_Logger() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + _ = lg // TODO: use the Logger. +} + +func ExampleLogger_LogSync() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + err = lg.LogSync(ctx, logging.Entry{Payload: "red alert"}) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleLogger_Log() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + lg.Log(logging.Entry{Payload: "something happened"}) +} + +func ExampleLogger_Flush() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + lg.Log(logging.Entry{Payload: "something happened"}) + lg.Flush() +} + +func ExampleLogger_StandardLogger() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + slg := lg.StandardLogger(logging.Info) + slg.Println("an informative message") +} + +func ExampleParseSeverity() { + sev := logging.ParseSeverity("ALERT") + fmt.Println(sev) + // Output: Alert +} diff --git a/vendor/cloud.google.com/go/logging/internal/common.go b/vendor/cloud.google.com/go/logging/internal/common.go new file mode 100644 index 0000000..38cfbb5 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/common.go @@ -0,0 +1,39 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "strings" +) + +const ( + ProdAddr = "logging.googleapis.com:443" + Version = "0.2.0" +) + +func LogPath(parent, logID string) string { + logID = strings.Replace(logID, "/", "%2F", -1) + return fmt.Sprintf("%s/logs/%s", parent, logID) +} + +func LogIDFromPath(parent, path string) string { + start := len(parent) + len("/logs/") + if len(path) < start { + return "" + } + logID := path[start:] + return strings.Replace(logID, "%2F", "/", -1) +} diff --git a/vendor/cloud.google.com/go/logging/internal/testing/equal.go b/vendor/cloud.google.com/go/logging/internal/testing/equal.go new file mode 100644 index 0000000..c95d199 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/testing/equal.go @@ -0,0 +1,42 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + + "github.com/golang/protobuf/proto" +) + +// Compare two payloads, assuming they are both proto.Messages +// or both strings. +func PayloadEqual(a, b interface{}) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + switch a := a.(type) { + case proto.Message: + return proto.Equal(a, b.(proto.Message)) + case string: + return a == b.(string) + default: + panic(fmt.Sprintf("payloadEqual: unexpected type %T", a)) + } +} diff --git a/vendor/cloud.google.com/go/logging/internal/testing/fake.go b/vendor/cloud.google.com/go/logging/internal/testing/fake.go new file mode 100644 index 0000000..c76974d --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/testing/fake.go @@ -0,0 +1,418 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testing provides support for testing the logging client. +package testing + +import ( + "errors" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + emptypb "github.com/golang/protobuf/ptypes/empty" + tspb "github.com/golang/protobuf/ptypes/timestamp" + + "cloud.google.com/go/internal/testutil" + context "golang.org/x/net/context" + lpb "google.golang.org/genproto/googleapis/api/label" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +type loggingHandler struct { + logpb.LoggingServiceV2Server + + mu sync.Mutex + logs map[string][]*logpb.LogEntry // indexed by log name +} + +type configHandler struct { + logpb.ConfigServiceV2Server + + mu sync.Mutex + sinks map[string]*logpb.LogSink // indexed by (full) sink name +} + +type metricHandler struct { + logpb.MetricsServiceV2Server + + mu sync.Mutex + metrics map[string]*logpb.LogMetric // indexed by (full) metric name +} + +// NewServer creates a new in-memory fake server implementing the logging service. +// It returns the address of the server. +func NewServer() (string, error) { + srv, err := testutil.NewServer() + if err != nil { + return "", err + } + logpb.RegisterLoggingServiceV2Server(srv.Gsrv, &loggingHandler{ + logs: make(map[string][]*logpb.LogEntry), + }) + logpb.RegisterConfigServiceV2Server(srv.Gsrv, &configHandler{ + sinks: make(map[string]*logpb.LogSink), + }) + logpb.RegisterMetricsServiceV2Server(srv.Gsrv, &metricHandler{ + metrics: make(map[string]*logpb.LogMetric), + }) + srv.Start() + return srv.Addr, nil +} + +// DeleteLog deletes a log and all its log entries. The log will reappear if it +// receives new entries. +func (h *loggingHandler) DeleteLog(_ context.Context, req *logpb.DeleteLogRequest) (*emptypb.Empty, error) { + // TODO(jba): return NotFound if log isn't there? + h.mu.Lock() + defer h.mu.Unlock() + delete(h.logs, req.LogName) + return &emptypb.Empty{}, nil +} + +// The only project ID that WriteLogEntries will accept. +// Important for testing Ping. +const validProjectID = "PROJECT_ID" + +// WriteLogEntries writes log entries to Stackdriver Logging. All log entries in +// Stackdriver Logging are written by this method. +func (h *loggingHandler) WriteLogEntries(_ context.Context, req *logpb.WriteLogEntriesRequest) (*logpb.WriteLogEntriesResponse, error) { + if !strings.HasPrefix(req.LogName, "projects/"+validProjectID+"/") { + return nil, fmt.Errorf("bad project ID: %q", req.LogName) + } + // TODO(jba): support insertId? + h.mu.Lock() + defer h.mu.Unlock() + for _, e := range req.Entries { + // Assign timestamp if missing. + if e.Timestamp == nil { + e.Timestamp = &tspb.Timestamp{Seconds: time.Now().Unix(), Nanos: 0} + } + // Fill from common fields in request. + if e.LogName == "" { + e.LogName = req.LogName + } + if e.Resource == nil { + // TODO(jba): use a global one if nil? + e.Resource = req.Resource + } + for k, v := range req.Labels { + if _, ok := e.Labels[k]; !ok { + e.Labels[k] = v + } + } + + // Store by log name. + h.logs[e.LogName] = append(h.logs[e.LogName], e) + } + return &logpb.WriteLogEntriesResponse{}, nil +} + +// ListLogEntries lists log entries. Use this method to retrieve log entries +// from Stackdriver Logging. +// +// This fake implementation ignores project IDs. It does not support full filtering, only +// expressions of the form "logName = NAME". +func (h *loggingHandler) ListLogEntries(_ context.Context, req *logpb.ListLogEntriesRequest) (*logpb.ListLogEntriesResponse, error) { + h.mu.Lock() + defer h.mu.Unlock() + entries, err := h.filterEntries(req.Filter) + if err != nil { + return nil, err + } + if err = sortEntries(entries, req.OrderBy); err != nil { + return nil, err + } + + from, to, nextPageToken, err := getPage(int(req.PageSize), req.PageToken, len(entries)) + if err != nil { + return nil, err + } + return &logpb.ListLogEntriesResponse{ + Entries: entries[from:to], + NextPageToken: nextPageToken, + }, nil +} + +// getPage converts an incoming page size and token from an RPC request into +// slice bounds and the outgoing next-page token. +// +// getPage assumes that the complete, unpaginated list of items exists as a +// single slice. In addition to the page size and token, getPage needs the +// length of that slice. +// +// getPage's first two return values should be used to construct a sub-slice of +// the complete, unpaginated slice. E.g. if the complete slice is s, then +// s[from:to] is the desired page. Its third return value should be set as the +// NextPageToken field of the RPC response. +func getPage(pageSize int, pageToken string, length int) (from, to int, nextPageToken string, err error) { + from, to = 0, length + if pageToken != "" { + from, err = strconv.Atoi(pageToken) + if err != nil { + return 0, 0, "", invalidArgument("bad page token") + } + if from >= length { + return length, length, "", nil + } + } + if pageSize > 0 && from+pageSize < length { + to = from + pageSize + nextPageToken = strconv.Itoa(to) + } + return from, to, nextPageToken, nil +} + +func (h *loggingHandler) filterEntries(filter string) ([]*logpb.LogEntry, error) { + logName, err := parseFilter(filter) + if err != nil { + return nil, err + } + if logName != "" { + return h.logs[logName], nil + } + var entries []*logpb.LogEntry + for _, es := range h.logs { + entries = append(entries, es...) + } + return entries, nil +} + +var filterRegexp = regexp.MustCompile(`^logName\s*=\s*"?([-_/.%\w]+)"?$`) + +// returns the log name, or "" for the empty filter +func parseFilter(filter string) (string, error) { + if filter == "" { + return "", nil + } + subs := filterRegexp.FindStringSubmatch(filter) + if subs == nil { + return "", invalidArgument("bad filter") + } + return subs[1], nil // cannot panic by construction of regexp +} + +func sortEntries(entries []*logpb.LogEntry, orderBy string) error { + switch orderBy { + case "", "timestamp asc": + sort.Sort(byTimestamp(entries)) + return nil + + case "timestamp desc": + sort.Sort(sort.Reverse(byTimestamp(entries))) + return nil + + default: + return invalidArgument("bad order_by") + } +} + +type byTimestamp []*logpb.LogEntry + +func (s byTimestamp) Len() int { return len(s) } +func (s byTimestamp) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTimestamp) Less(i, j int) bool { + c := compareTimestamps(s[i].Timestamp, s[j].Timestamp) + switch { + case c < 0: + return true + case c > 0: + return false + default: + return s[i].InsertId < s[j].InsertId + } +} + +func compareTimestamps(ts1, ts2 *tspb.Timestamp) int64 { + if ts1.Seconds != ts2.Seconds { + return ts1.Seconds - ts2.Seconds + } + return int64(ts1.Nanos - ts2.Nanos) +} + +// Lists monitored resource descriptors that are used by Stackdriver Logging. +func (h *loggingHandler) ListMonitoredResourceDescriptors(context.Context, *logpb.ListMonitoredResourceDescriptorsRequest) (*logpb.ListMonitoredResourceDescriptorsResponse, error) { + return &logpb.ListMonitoredResourceDescriptorsResponse{ + ResourceDescriptors: []*mrpb.MonitoredResourceDescriptor{ + { + Type: "global", + DisplayName: "Global", + Description: "... a log is not associated with any specific resource.", + Labels: []*lpb.LabelDescriptor{ + {Key: "project_id", Description: "The identifier of the GCP project..."}, + }, + }, + }, + }, nil +} + +// Lists logs. +func (h *loggingHandler) ListLogs(_ context.Context, req *logpb.ListLogsRequest) (*logpb.ListLogsResponse, error) { + // Return fixed, fake response. + logNames := []string{"a", "b", "c"} + from, to, npt, err := getPage(int(req.PageSize), req.PageToken, len(logNames)) + if err != nil { + return nil, err + } + return &logpb.ListLogsResponse{ + LogNames: logNames[from:to], + NextPageToken: npt, + }, nil +} + +// Gets a sink. +func (h *configHandler) GetSink(_ context.Context, req *logpb.GetSinkRequest) (*logpb.LogSink, error) { + h.mu.Lock() + defer h.mu.Unlock() + if s, ok := h.sinks[req.SinkName]; ok { + return s, nil + } + // TODO(jba): use error codes + return nil, fmt.Errorf("sink %q not found", req.SinkName) +} + +// Creates a sink. +func (h *configHandler) CreateSink(_ context.Context, req *logpb.CreateSinkRequest) (*logpb.LogSink, error) { + h.mu.Lock() + defer h.mu.Unlock() + fullName := fmt.Sprintf("%s/sinks/%s", req.Parent, req.Sink.Name) + if _, ok := h.sinks[fullName]; ok { + return nil, fmt.Errorf("sink with name %q already exists", fullName) + } + h.sinks[fullName] = req.Sink + return req.Sink, nil +} + +// Creates or updates a sink. +func (h *configHandler) UpdateSink(_ context.Context, req *logpb.UpdateSinkRequest) (*logpb.LogSink, error) { + h.mu.Lock() + defer h.mu.Unlock() + // Update of a non-existent sink will create it. + h.sinks[req.SinkName] = req.Sink + return req.Sink, nil +} + +// Deletes a sink. +func (h *configHandler) DeleteSink(_ context.Context, req *logpb.DeleteSinkRequest) (*emptypb.Empty, error) { + h.mu.Lock() + defer h.mu.Unlock() + delete(h.sinks, req.SinkName) + return &emptypb.Empty{}, nil +} + +// Lists sinks. This fake implementation ignores the Parent field of +// ListSinksRequest. All sinks are listed, regardless of their project. +func (h *configHandler) ListSinks(_ context.Context, req *logpb.ListSinksRequest) (*logpb.ListSinksResponse, error) { + h.mu.Lock() + var sinks []*logpb.LogSink + for _, s := range h.sinks { + sinks = append(sinks, s) + } + h.mu.Unlock() // safe because no *logpb.LogSink is ever modified + // Since map iteration varies, sort the sinks. + sort.Sort(sinksByName(sinks)) + from, to, nextPageToken, err := getPage(int(req.PageSize), req.PageToken, len(sinks)) + if err != nil { + return nil, err + } + return &logpb.ListSinksResponse{ + Sinks: sinks[from:to], + NextPageToken: nextPageToken, + }, nil +} + +type sinksByName []*logpb.LogSink + +func (s sinksByName) Len() int { return len(s) } +func (s sinksByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sinksByName) Less(i, j int) bool { return s[i].Name < s[j].Name } + +// Gets a metric. +func (h *metricHandler) GetLogMetric(_ context.Context, req *logpb.GetLogMetricRequest) (*logpb.LogMetric, error) { + h.mu.Lock() + defer h.mu.Unlock() + if s, ok := h.metrics[req.MetricName]; ok { + return s, nil + } + // TODO(jba): use error codes + return nil, fmt.Errorf("metric %q not found", req.MetricName) +} + +// Creates a metric. +func (h *metricHandler) CreateLogMetric(_ context.Context, req *logpb.CreateLogMetricRequest) (*logpb.LogMetric, error) { + h.mu.Lock() + defer h.mu.Unlock() + fullName := fmt.Sprintf("%s/metrics/%s", req.Parent, req.Metric.Name) + if _, ok := h.metrics[fullName]; ok { + return nil, fmt.Errorf("metric with name %q already exists", fullName) + } + h.metrics[fullName] = req.Metric + return req.Metric, nil +} + +// Creates or updates a metric. +func (h *metricHandler) UpdateLogMetric(_ context.Context, req *logpb.UpdateLogMetricRequest) (*logpb.LogMetric, error) { + h.mu.Lock() + defer h.mu.Unlock() + // Update of a non-existent metric will create it. + h.metrics[req.MetricName] = req.Metric + return req.Metric, nil +} + +// Deletes a metric. +func (h *metricHandler) DeleteLogMetric(_ context.Context, req *logpb.DeleteLogMetricRequest) (*emptypb.Empty, error) { + h.mu.Lock() + defer h.mu.Unlock() + delete(h.metrics, req.MetricName) + return &emptypb.Empty{}, nil +} + +// Lists metrics. This fake implementation ignores the Parent field of +// ListMetricsRequest. All metrics are listed, regardless of their project. +func (h *metricHandler) ListLogMetrics(_ context.Context, req *logpb.ListLogMetricsRequest) (*logpb.ListLogMetricsResponse, error) { + h.mu.Lock() + var metrics []*logpb.LogMetric + for _, s := range h.metrics { + metrics = append(metrics, s) + } + h.mu.Unlock() // safe because no *logpb.LogMetric is ever modified + // Since map iteration varies, sort the metrics. + sort.Sort(metricsByName(metrics)) + from, to, nextPageToken, err := getPage(int(req.PageSize), req.PageToken, len(metrics)) + if err != nil { + return nil, err + } + return &logpb.ListLogMetricsResponse{ + Metrics: metrics[from:to], + NextPageToken: nextPageToken, + }, nil +} + +type metricsByName []*logpb.LogMetric + +func (s metricsByName) Len() int { return len(s) } +func (s metricsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s metricsByName) Less(i, j int) bool { return s[i].Name < s[j].Name } + +func invalidArgument(msg string) error { + // TODO(jba): status codes + return errors.New(msg) +} diff --git a/vendor/cloud.google.com/go/logging/internal/testing/fake_test.go b/vendor/cloud.google.com/go/logging/internal/testing/fake_test.go new file mode 100644 index 0000000..cd6e5c2 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/testing/fake_test.go @@ -0,0 +1,122 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains only basic checks. The fake is effectively tested by the +// logging client unit tests. + +package testing + +import ( + "testing" + "time" + + "github.com/golang/protobuf/proto" + tspb "github.com/golang/protobuf/ptypes/timestamp" + logpb "google.golang.org/genproto/googleapis/logging/v2" + grpc "google.golang.org/grpc" +) + +func TestNewServer(t *testing.T) { + // Confirm that we can create and use a working gRPC server. + addr, err := NewServer() + if err != nil { + t.Fatal(err) + } + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + // Avoid "connection is closing; please retry" message from gRPC. + time.Sleep(300 * time.Millisecond) + conn.Close() +} + +func TestParseFilter(t *testing.T) { + for _, test := range []struct { + filter string + want string + wantErr bool + }{ + {"", "", false}, + {"logName = syslog", "syslog", false}, + {"logname = syslog", "", true}, + {"logName = 'syslog'", "", true}, + {"logName == syslog", "", true}, + } { + got, err := parseFilter(test.filter) + if err != nil { + if !test.wantErr { + t.Errorf("%q: got %v, want no error", test.filter, err) + } + continue + } + if test.wantErr { + t.Errorf("%q: got no error, want one", test.filter) + continue + } + if got != test.want { + t.Errorf("%q: got %q, want %q", test.filter, got, test.want) + } + } +} + +func TestSortEntries(t *testing.T) { + entries := []*logpb.LogEntry{ + /* 0 */ {Timestamp: &tspb.Timestamp{Seconds: 30}}, + /* 1 */ {Timestamp: &tspb.Timestamp{Seconds: 10}}, + /* 2 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "b"}, + /* 3 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "a"}, + /* 4 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "c"}, + } + for _, test := range []struct { + orderBy string + want []int // slice of index into entries; nil == error + }{ + {"", []int{1, 3, 2, 4, 0}}, + {"timestamp asc", []int{1, 3, 2, 4, 0}}, + {"timestamp desc", []int{0, 4, 2, 3, 1}}, + {"something else", nil}, + } { + got := make([]*logpb.LogEntry, len(entries)) + copy(got, entries) + err := sortEntries(got, test.orderBy) + if err != nil { + if test.want != nil { + t.Errorf("%q: got %v, want nil error", test.orderBy, err) + } + continue + } + want := make([]*logpb.LogEntry, len(entries)) + for i, j := range test.want { + want[i] = entries[j] + } + if !logEntriesEqual(got, want) { + t.Errorf("%q: got %v, want %v", test.orderBy, got, want) + } + } +} + +func logEntriesEqual(a, b []*logpb.LogEntry) bool { + if len(a) != len(b) { + return false + } + for i, aa := range a { + if !proto.Equal(aa, b[i]) { + return false + } + } + return true +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_entry_iterator_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_entry_iterator_test.go new file mode 100644 index 0000000..39e6f57 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_entry_iterator_test.go @@ -0,0 +1,66 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + "time" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Entries() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Entries(ctx, logadmin.Filter(`logName = "projects/my-project/logs/my-log"`)) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleFilter_timestamp() { + // This example demonstrates how to list the last 24 hours of log entries. + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + oneDayAgo := time.Now().Add(-24 * time.Hour) + t := oneDayAgo.Format(time.RFC3339) // Logging API wants timestamps in RFC 3339 format. + it := client.Entries(ctx, logadmin.Filter(fmt.Sprintf(`timestamp > "%s"`, t))) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleEntryIterator_Next() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Entries(ctx) + for { + entry, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(entry) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_metric_iterator_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_metric_iterator_test.go new file mode 100644 index 0000000..2e876e9 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_metric_iterator_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Metrics() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Metrics(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleMetricIterator_Next() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Metrics(ctx) + for { + metric, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(metric) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_paging_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_paging_test.go new file mode 100644 index 0000000..036eeeb --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_paging_test.go @@ -0,0 +1,92 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "bytes" + "flag" + "fmt" + "html/template" + "log" + "net/http" + + "cloud.google.com/go/logging" + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +var ( + client *logadmin.Client + projectID = flag.String("project-id", "", "ID of the project to use") +) + +func ExampleClient_Entries_pagination() { + // This example demonstrates how to iterate through items a page at a time + // even if each successive page is fetched by a different process. It is a + // complete web server that displays pages of log entries. To run it as a + // standalone program, rename both the package and this function to "main". + ctx := context.Background() + flag.Parse() + if *projectID == "" { + log.Fatal("-project-id missing") + } + var err error + client, err = logadmin.NewClient(ctx, *projectID) + if err != nil { + log.Fatalf("creating logging client: %v", err) + } + + http.HandleFunc("/entries", handleEntries) + log.Print("listening on 8080") + log.Fatal(http.ListenAndServe(":8080", nil)) +} + +var pageTemplate = template.Must(template.New("").Parse(` +
+ {{range .Entries}} + + {{end}} +
{{.}}
+{{if .Next}} + Next Page +{{end}} +`)) + +func handleEntries(w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + filter := fmt.Sprintf(`logName = "projects/%s/logs/testlog"`, *projectID) + it := client.Entries(ctx, logadmin.Filter(filter)) + var entries []*logging.Entry + nextTok, err := iterator.NewPager(it, 5, r.URL.Query().Get("pageToken")).NextPage(&entries) + if err != nil { + http.Error(w, fmt.Sprintf("problem getting the next page: %v", err), http.StatusInternalServerError) + return + } + data := struct { + Entries []*logging.Entry + Next string + }{ + entries, + nextTok, + } + var buf bytes.Buffer + if err := pageTemplate.Execute(&buf, data); err != nil { + http.Error(w, fmt.Sprintf("problem executing page template: %v", err), http.StatusInternalServerError) + } + if _, err := buf.WriteTo(w); err != nil { + log.Printf("writing response: %v", err) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_resource_iterator_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_resource_iterator_test.go new file mode 100644 index 0000000..fe67e23 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_resource_iterator_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_ResourceDescriptors() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.ResourceDescriptors(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleResourceDescriptorIterator_Next() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.ResourceDescriptors(ctx) + for { + rdesc, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(rdesc) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_sink_iterator_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_sink_iterator_test.go new file mode 100644 index 0000000..918fd9f --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_sink_iterator_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Sinks() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Sinks(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleSinkIterator_Next() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Sinks(ctx) + for { + sink, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(sink) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/examples_test.go b/vendor/cloud.google.com/go/logging/logadmin/examples_test.go new file mode 100644 index 0000000..0926dd5 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/examples_test.go @@ -0,0 +1,161 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + // Use client to manage logs, metrics and sinks. + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_DeleteLog() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + err = client.DeleteLog(ctx, "my-log") + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateMetric() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + err = client.CreateMetric(ctx, &logadmin.Metric{ + ID: "severe-errors", + Description: "entries at ERROR or higher severities", + Filter: "severity >= ERROR", + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_DeleteMetric() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + if err := client.DeleteMetric(ctx, "severe-errors"); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Metric() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + m, err := client.Metric(ctx, "severe-errors") + if err != nil { + // TODO: Handle error. + } + fmt.Println(m) +} + +func ExampleClient_UpdateMetric() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + err = client.UpdateMetric(ctx, &logadmin.Metric{ + ID: "severe-errors", + Description: "entries at high severities", + Filter: "severity > ERROR", + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateSink() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + sink, err := client.CreateSink(ctx, &logadmin.Sink{ + ID: "severe-errors-to-gcs", + Destination: "storage.googleapis.com/my-bucket", + Filter: "severity >= ERROR", + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(sink) +} + +func ExampleClient_DeleteSink() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + if err := client.DeleteSink(ctx, "severe-errors-to-gcs"); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Sink() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + s, err := client.Sink(ctx, "severe-errors-to-gcs") + if err != nil { + // TODO: Handle error. + } + fmt.Println(s) +} + +func ExampleClient_UpdateSink() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + sink, err := client.UpdateSink(ctx, &logadmin.Sink{ + ID: "severe-errors-to-gcs", + Destination: "storage.googleapis.com/my-other-bucket", + Filter: "severity >= ERROR", + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(sink) +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/logadmin.go b/vendor/cloud.google.com/go/logging/logadmin/logadmin.go new file mode 100644 index 0000000..4097ea7 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/logadmin.go @@ -0,0 +1,404 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// These features are missing now, but will likely be added: +// - There is no way to specify CallOptions. + +// Package logadmin contains a Stackdriver Logging client that can be used +// for reading logs and working with sinks, metrics and monitored resources. +// For a client that can write logs, see package cloud.google.com/go/logging. +// +// The client uses Logging API v2. +// See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. +// +// Note: This package is in beta. Some backwards-incompatible changes may occur. +package logadmin // import "cloud.google.com/go/logging/logadmin" + +import ( + "errors" + "fmt" + "math" + "net/http" + "net/url" + "strings" + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/logging" + vkit "cloud.google.com/go/logging/apiv2" + "cloud.google.com/go/logging/internal" + "github.com/golang/protobuf/ptypes" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + logtypepb "google.golang.org/genproto/googleapis/logging/type" + logpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc/codes" + + // Import the following so EntryIterator can unmarshal log protos. + _ "google.golang.org/genproto/googleapis/appengine/logging/v1" + _ "google.golang.org/genproto/googleapis/cloud/audit" +) + +// Client is a Logging client. A Client is associated with a single Cloud project. +type Client struct { + lClient *vkit.Client // logging client + sClient *vkit.ConfigClient // sink client + mClient *vkit.MetricsClient // metric client + projectID string + closed bool +} + +// NewClient returns a new logging client associated with the provided project ID. +// +// By default NewClient uses AdminScope. To use a different scope, call +// NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes). +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + // Check for '/' in project ID to reserve the ability to support various owning resources, + // in the form "{Collection}/{Name}", for instance "organizations/my-org". + if strings.ContainsRune(projectID, '/') { + return nil, errors.New("logging: project ID contains '/'") + } + opts = append([]option.ClientOption{ + option.WithEndpoint(internal.ProdAddr), + option.WithScopes(logging.AdminScope), + }, opts...) + lc, err := vkit.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + // TODO(jba): pass along any client options that should be provided to all clients. + sc, err := vkit.NewConfigClient(ctx, option.WithGRPCConn(lc.Connection())) + if err != nil { + return nil, err + } + mc, err := vkit.NewMetricsClient(ctx, option.WithGRPCConn(lc.Connection())) + if err != nil { + return nil, err + } + // Retry some non-idempotent methods on INTERNAL, because it happens sometimes + // and in all observed cases the operation did not complete. + retryerOnInternal := func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Internal, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + } + mc.CallOptions.CreateLogMetric = []gax.CallOption{gax.WithRetry(retryerOnInternal)} + mc.CallOptions.UpdateLogMetric = []gax.CallOption{gax.WithRetry(retryerOnInternal)} + + lc.SetGoogleClientInfo("gccl", version.Repo) + sc.SetGoogleClientInfo("gccl", version.Repo) + mc.SetGoogleClientInfo("gccl", version.Repo) + client := &Client{ + lClient: lc, + sClient: sc, + mClient: mc, + projectID: projectID, + } + return client, nil +} + +// parent returns the string used in many RPCs to denote the parent resource of the log. +func (c *Client) parent() string { + return "projects/" + c.projectID +} + +// Close closes the client. +func (c *Client) Close() error { + if c.closed { + return nil + } + // Return only the first error. Since all clients share an underlying connection, + // Closes after the first always report a "connection is closing" error. + err := c.lClient.Close() + _ = c.sClient.Close() + _ = c.mClient.Close() + c.closed = true + return err +} + +// DeleteLog deletes a log and all its log entries. The log will reappear if it receives new entries. +// logID identifies the log within the project. An example log ID is "syslog". Requires AdminScope. +func (c *Client) DeleteLog(ctx context.Context, logID string) error { + return c.lClient.DeleteLog(ctx, &logpb.DeleteLogRequest{ + LogName: internal.LogPath(c.parent(), logID), + }) +} + +func toHTTPRequest(p *logtypepb.HttpRequest) (*logging.HTTPRequest, error) { + if p == nil { + return nil, nil + } + u, err := url.Parse(p.RequestUrl) + if err != nil { + return nil, err + } + var dur time.Duration + if p.Latency != nil { + dur, err = ptypes.Duration(p.Latency) + if err != nil { + return nil, err + } + } + hr := &http.Request{ + Method: p.RequestMethod, + URL: u, + Header: map[string][]string{}, + } + if p.UserAgent != "" { + hr.Header.Set("User-Agent", p.UserAgent) + } + if p.Referer != "" { + hr.Header.Set("Referer", p.Referer) + } + return &logging.HTTPRequest{ + Request: hr, + RequestSize: p.RequestSize, + Status: int(p.Status), + ResponseSize: p.ResponseSize, + Latency: dur, + RemoteIP: p.RemoteIp, + CacheHit: p.CacheHit, + CacheValidatedWithOriginServer: p.CacheValidatedWithOriginServer, + }, nil +} + +// An EntriesOption is an option for listing log entries. +type EntriesOption interface { + set(*logpb.ListLogEntriesRequest) +} + +// ProjectIDs sets the project IDs or project numbers from which to retrieve +// log entries. Examples of a project ID: "my-project-1A", "1234567890". +func ProjectIDs(pids []string) EntriesOption { return projectIDs(pids) } + +type projectIDs []string + +func (p projectIDs) set(r *logpb.ListLogEntriesRequest) { + r.ResourceNames = make([]string, len(p)) + for i, v := range p { + r.ResourceNames[i] = fmt.Sprintf("projects/%s", v) + } +} + +// Filter sets an advanced logs filter for listing log entries (see +// https://cloud.google.com/logging/docs/view/advanced_filters). The filter is +// compared against all log entries in the projects specified by ProjectIDs. +// Only entries that match the filter are retrieved. An empty filter (the +// default) matches all log entries. +// +// In the filter string, log names must be written in their full form, as +// "projects/PROJECT-ID/logs/LOG-ID". Forward slashes in LOG-ID must be +// replaced by %2F before calling Filter. +// +// Timestamps in the filter string must be written in RFC 3339 format. See the +// timestamp example. +func Filter(f string) EntriesOption { return filter(f) } + +type filter string + +func (f filter) set(r *logpb.ListLogEntriesRequest) { r.Filter = string(f) } + +// NewestFirst causes log entries to be listed from most recent (newest) to +// least recent (oldest). By default, they are listed from oldest to newest. +func NewestFirst() EntriesOption { return newestFirst{} } + +type newestFirst struct{} + +func (newestFirst) set(r *logpb.ListLogEntriesRequest) { r.OrderBy = "timestamp desc" } + +// Entries returns an EntryIterator for iterating over log entries. By default, +// the log entries will be restricted to those from the project passed to +// NewClient. This may be overridden by passing a ProjectIDs option. Requires ReadScope or AdminScope. +func (c *Client) Entries(ctx context.Context, opts ...EntriesOption) *EntryIterator { + it := &EntryIterator{ + it: c.lClient.ListLogEntries(ctx, listLogEntriesRequest(c.projectID, opts)), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +func listLogEntriesRequest(projectID string, opts []EntriesOption) *logpb.ListLogEntriesRequest { + req := &logpb.ListLogEntriesRequest{ + ResourceNames: []string{"projects/" + projectID}, + } + for _, opt := range opts { + opt.set(req) + } + return req +} + +// An EntryIterator iterates over log entries. +type EntryIterator struct { + it *vkit.LogEntryIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*logging.Entry +} + +// PageInfo supports pagination. See https://godoc.org/google.golang.org/api/iterator package for details. +func (it *EntryIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done +// (https://godoc.org/google.golang.org/api/iterator) if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *EntryIterator) Next() (*logging.Entry, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *EntryIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + item, err := it.it.Next() + if err != nil { + return err + } + e, err := fromLogEntry(item) + if err != nil { + return err + } + it.items = append(it.items, e) + return nil + }) +} + +func trunc32(i int) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +var slashUnescaper = strings.NewReplacer("%2F", "/", "%2f", "/") + +func fromLogEntry(le *logpb.LogEntry) (*logging.Entry, error) { + time, err := ptypes.Timestamp(le.Timestamp) + if err != nil { + return nil, err + } + var payload interface{} + switch x := le.Payload.(type) { + case *logpb.LogEntry_TextPayload: + payload = x.TextPayload + + case *logpb.LogEntry_ProtoPayload: + var d ptypes.DynamicAny + if err := ptypes.UnmarshalAny(x.ProtoPayload, &d); err != nil { + return nil, fmt.Errorf("logging: unmarshalling proto payload: %v", err) + } + payload = d.Message + + case *logpb.LogEntry_JsonPayload: + // Leave this as a Struct. + // TODO(jba): convert to map[string]interface{}? + payload = x.JsonPayload + + default: + return nil, fmt.Errorf("logging: unknown payload type: %T", le.Payload) + } + hr, err := toHTTPRequest(le.HttpRequest) + if err != nil { + return nil, err + } + return &logging.Entry{ + Timestamp: time, + Severity: logging.Severity(le.Severity), + Payload: payload, + Labels: le.Labels, + InsertID: le.InsertId, + HTTPRequest: hr, + Operation: le.Operation, + LogName: slashUnescaper.Replace(le.LogName), + Resource: le.Resource, + Trace: le.Trace, + }, nil +} + +// Logs lists the logs owned by the parent resource of the client. +func (c *Client) Logs(ctx context.Context) *LogIterator { + it := &LogIterator{ + parentResource: c.parent(), + it: c.lClient.ListLogs(ctx, &logpb.ListLogsRequest{Parent: c.parent()}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// A LogIterator iterates over logs. +type LogIterator struct { + parentResource string + it *vkit.StringIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []string +} + +// PageInfo supports pagination. See https://godoc.org/google.golang.org/api/iterator package for details. +func (it *LogIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done +// (https://godoc.org/google.golang.org/api/iterator) if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogIterator) Next() (string, error) { + if err := it.nextFunc(); err != nil { + return "", err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + logPath, err := it.it.Next() + if err != nil { + return err + } + logID := internal.LogIDFromPath(it.parentResource, logPath) + it.items = append(it.items, logID) + return nil + }) +} + +// Common fetch code for iterators that are backed by vkit iterators. +func iterFetch(pageSize int, pageToken string, pi *iterator.PageInfo, next func() error) (string, error) { + pi.MaxSize = pageSize + pi.Token = pageToken + // Get one item, which will fill the buffer. + if err := next(); err != nil { + return "", err + } + // Collect the rest of the buffer. + for pi.Remaining() > 0 { + if err := next(); err != nil { + return "", err + } + } + return pi.Token, nil +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/logadmin_test.go b/vendor/cloud.google.com/go/logging/logadmin/logadmin_test.go new file mode 100644 index 0000000..1124485 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/logadmin_test.go @@ -0,0 +1,265 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): test that OnError is getting called appropriately. + +package logadmin + +import ( + "flag" + "log" + "net/http" + "net/url" + "os" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/logging" + ltesting "cloud.google.com/go/logging/internal/testing" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + durpb "github.com/golang/protobuf/ptypes/duration" + structpb "github.com/golang/protobuf/ptypes/struct" + "golang.org/x/net/context" + "google.golang.org/api/option" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + audit "google.golang.org/genproto/googleapis/cloud/audit" + logtypepb "google.golang.org/genproto/googleapis/logging/type" + logpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" +) + +var ( + client *Client + testProjectID string +) + +var ( + // If true, this test is using the production service, not a fake. + integrationTest bool + + newClient func(ctx context.Context, projectID string) *Client +) + +func TestMain(m *testing.M) { + flag.Parse() // needed for testing.Short() + ctx := context.Background() + testProjectID = testutil.ProjID() + if testProjectID == "" || testing.Short() { + integrationTest = false + if testProjectID != "" { + log.Print("Integration tests skipped in short mode (using fake instead)") + } + testProjectID = "PROJECT_ID" + addr, err := ltesting.NewServer() + if err != nil { + log.Fatalf("creating fake server: %v", err) + } + newClient = func(ctx context.Context, projectID string) *Client { + conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + log.Fatalf("dialing %q: %v", addr, err) + } + c, err := NewClient(ctx, projectID, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalf("creating client for fake at %q: %v", addr, err) + } + return c + } + } else { + integrationTest = true + ts := testutil.TokenSource(ctx, logging.AdminScope) + if ts == nil { + log.Fatal("The project key must be set. See CONTRIBUTING.md for details") + } + log.Printf("running integration tests with project %s", testProjectID) + newClient = func(ctx context.Context, projectID string) *Client { + c, err := NewClient(ctx, projectID, option.WithTokenSource(ts), + option.WithGRPCDialOption(grpc.WithBlock())) + if err != nil { + log.Fatalf("creating prod client: %v", err) + } + return c + } + } + client = newClient(ctx, testProjectID) + initMetrics(ctx) + cleanup := initSinks(ctx) + exit := m.Run() + cleanup() + client.Close() + os.Exit(exit) +} + +// EntryIterator and DeleteLog are tested in the logging package. + +func TestClientClose(t *testing.T) { + c := newClient(context.Background(), testProjectID) + if err := c.Close(); err != nil { + t.Errorf("want got %v, want nil", err) + } +} + +func TestFromLogEntry(t *testing.T) { + now := time.Now() + res := &mrpb.MonitoredResource{Type: "global"} + ts, err := ptypes.TimestampProto(now) + if err != nil { + t.Fatal(err) + } + logEntry := logpb.LogEntry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Payload: &logpb.LogEntry_TextPayload{TextPayload: "hello"}, + Timestamp: ts, + Severity: logtypepb.LogSeverity_INFO, + InsertId: "123", + HttpRequest: &logtypepb.HttpRequest{ + RequestMethod: "GET", + RequestUrl: "http:://example.com/path?q=1", + RequestSize: 100, + Status: 200, + ResponseSize: 25, + Latency: &durpb.Duration{Seconds: 100}, + UserAgent: "user-agent", + RemoteIp: "127.0.0.1", + Referer: "referer", + CacheHit: true, + CacheValidatedWithOriginServer: true, + }, + Labels: map[string]string{ + "a": "1", + "b": "two", + "c": "true", + }, + } + u, err := url.Parse("http:://example.com/path?q=1") + if err != nil { + t.Fatal(err) + } + want := &logging.Entry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Timestamp: now.In(time.UTC), + Severity: logging.Info, + Payload: "hello", + Labels: map[string]string{ + "a": "1", + "b": "two", + "c": "true", + }, + InsertID: "123", + HTTPRequest: &logging.HTTPRequest{ + Request: &http.Request{ + Method: "GET", + URL: u, + Header: map[string][]string{ + "User-Agent": []string{"user-agent"}, + "Referer": []string{"referer"}, + }, + }, + RequestSize: 100, + Status: 200, + ResponseSize: 25, + Latency: 100 * time.Second, + RemoteIP: "127.0.0.1", + CacheHit: true, + CacheValidatedWithOriginServer: true, + }, + } + got, err := fromLogEntry(&logEntry) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, want, testutil.IgnoreUnexported(http.Request{})); diff != "" { + t.Errorf("FullEntry:\n%s", diff) + } + + // Proto payload. + alog := &audit.AuditLog{ + ServiceName: "svc", + MethodName: "method", + ResourceName: "shelves/S/books/B", + } + any, err := ptypes.MarshalAny(alog) + if err != nil { + t.Fatal(err) + } + logEntry = logpb.LogEntry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Timestamp: ts, + Payload: &logpb.LogEntry_ProtoPayload{ProtoPayload: any}, + } + got, err = fromLogEntry(&logEntry) + if err != nil { + t.Fatal(err) + } + if !ltesting.PayloadEqual(got.Payload, alog) { + t.Errorf("got %+v, want %+v", got.Payload, alog) + } + + // JSON payload. + jstruct := &structpb.Struct{Fields: map[string]*structpb.Value{ + "f": &structpb.Value{Kind: &structpb.Value_NumberValue{NumberValue: 3.1}}, + }} + logEntry = logpb.LogEntry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Timestamp: ts, + Payload: &logpb.LogEntry_JsonPayload{JsonPayload: jstruct}, + } + got, err = fromLogEntry(&logEntry) + if err != nil { + t.Fatal(err) + } + if !ltesting.PayloadEqual(got.Payload, jstruct) { + t.Errorf("got %+v, want %+v", got.Payload, jstruct) + } +} + +func TestListLogEntriesRequest(t *testing.T) { + for _, test := range []struct { + opts []EntriesOption + projectIDs []string + filter string + orderBy string + }{ + // Default is client's project ID, empty filter and orderBy. + {nil, + []string{"PROJECT_ID"}, "", ""}, + {[]EntriesOption{NewestFirst(), Filter("f")}, + []string{"PROJECT_ID"}, "f", "timestamp desc"}, + {[]EntriesOption{ProjectIDs([]string{"foo"})}, + []string{"foo"}, "", ""}, + {[]EntriesOption{NewestFirst(), Filter("f"), ProjectIDs([]string{"foo"})}, + []string{"foo"}, "f", "timestamp desc"}, + {[]EntriesOption{NewestFirst(), Filter("f"), ProjectIDs([]string{"foo"})}, + []string{"foo"}, "f", "timestamp desc"}, + // If there are repeats, last one wins. + {[]EntriesOption{NewestFirst(), Filter("no"), ProjectIDs([]string{"foo"}), Filter("f")}, + []string{"foo"}, "f", "timestamp desc"}, + } { + got := listLogEntriesRequest("PROJECT_ID", test.opts) + want := &logpb.ListLogEntriesRequest{ + ResourceNames: []string{"projects/" + test.projectIDs[0]}, + Filter: test.filter, + OrderBy: test.orderBy, + } + if !proto.Equal(got, want) { + t.Errorf("%v:\ngot %v\nwant %v", test.opts, got, want) + } + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/metrics.go b/vendor/cloud.google.com/go/logging/logadmin/metrics.go new file mode 100644 index 0000000..9374ac4 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/metrics.go @@ -0,0 +1,154 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + "fmt" + + vkit "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// Metric describes a logs-based metric. The value of the metric is the +// number of log entries that match a logs filter. +// +// Metrics are a feature of Stackdriver Monitoring. +// See https://cloud.google.com/monitoring/api/v3/metrics for more about them. +type Metric struct { + // ID is a client-assigned metric identifier. Example: + // "severe_errors". Metric identifiers are limited to 1000 + // characters and can include only the following characters: A-Z, + // a-z, 0-9, and the special characters _-.,+!*',()%/\. The + // forward-slash character (/) denotes a hierarchy of name pieces, + // and it cannot be the first character of the name. + ID string + + // Description describes this metric. It is used in documentation. + Description string + + // Filter is an advanced logs filter (see + // https://cloud.google.com/logging/docs/view/advanced_filters). + // Example: "logName:syslog AND severity>=ERROR". + Filter string +} + +// CreateMetric creates a logs-based metric. +func (c *Client) CreateMetric(ctx context.Context, m *Metric) error { + _, err := c.mClient.CreateLogMetric(ctx, &logpb.CreateLogMetricRequest{ + Parent: c.parent(), + Metric: toLogMetric(m), + }) + return err +} + +// DeleteMetric deletes a log-based metric. +// The provided metric ID is the metric identifier. For example, "severe_errors". +func (c *Client) DeleteMetric(ctx context.Context, metricID string) error { + return c.mClient.DeleteLogMetric(ctx, &logpb.DeleteLogMetricRequest{ + MetricName: c.metricPath(metricID), + }) +} + +// Metric gets a logs-based metric. +// The provided metric ID is the metric identifier. For example, "severe_errors". +// Requires ReadScope or AdminScope. +func (c *Client) Metric(ctx context.Context, metricID string) (*Metric, error) { + lm, err := c.mClient.GetLogMetric(ctx, &logpb.GetLogMetricRequest{ + MetricName: c.metricPath(metricID), + }) + if err != nil { + return nil, err + } + return fromLogMetric(lm), nil +} + +// UpdateMetric creates a logs-based metric if it does not exist, or updates an +// existing one. +func (c *Client) UpdateMetric(ctx context.Context, m *Metric) error { + _, err := c.mClient.UpdateLogMetric(ctx, &logpb.UpdateLogMetricRequest{ + MetricName: c.metricPath(m.ID), + Metric: toLogMetric(m), + }) + return err +} + +func (c *Client) metricPath(metricID string) string { + return fmt.Sprintf("%s/metrics/%s", c.parent(), metricID) +} + +// Metrics returns a MetricIterator for iterating over all Metrics in the Client's project. +// Requires ReadScope or AdminScope. +func (c *Client) Metrics(ctx context.Context) *MetricIterator { + it := &MetricIterator{ + it: c.mClient.ListLogMetrics(ctx, &logpb.ListLogMetricsRequest{Parent: c.parent()}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// A MetricIterator iterates over Metrics. +type MetricIterator struct { + it *vkit.LogMetricIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*Metric +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *MetricIterator) Next() (*Metric, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + item, err := it.it.Next() + if err != nil { + return err + } + it.items = append(it.items, fromLogMetric(item)) + return nil + }) +} + +func toLogMetric(m *Metric) *logpb.LogMetric { + return &logpb.LogMetric{ + Name: m.ID, + Description: m.Description, + Filter: m.Filter, + } +} + +func fromLogMetric(lm *logpb.LogMetric) *Metric { + return &Metric{ + ID: lm.Name, + Description: lm.Description, + Filter: lm.Filter, + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/metrics_test.go b/vendor/cloud.google.com/go/logging/logadmin/metrics_test.go new file mode 100644 index 0000000..3a70358 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/metrics_test.go @@ -0,0 +1,154 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + "log" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +var metricIDs = testutil.NewUIDSpace("GO-CLIENT-TEST-METRIC") + +// Initializes the tests before they run. +func initMetrics(ctx context.Context) { + // Clean up from aborted tests. + it := client.Metrics(ctx) +loop: + for { + m, err := it.Next() + switch err { + case nil: + if metricIDs.Older(m.ID, 24*time.Hour) { + client.DeleteMetric(ctx, m.ID) + } + case iterator.Done: + break loop + default: + log.Printf("cleanupMetrics: %v", err) + return + } + } +} + +func TestCreateDeleteMetric(t *testing.T) { + ctx := context.Background() + metric := &Metric{ + ID: metricIDs.New(), + Description: "DESC", + Filter: "FILTER", + } + if err := client.CreateMetric(ctx, metric); err != nil { + t.Fatal(err) + } + defer client.DeleteMetric(ctx, metric.ID) + + got, err := client.Metric(ctx, metric.ID) + if err != nil { + t.Fatal(err) + } + if want := metric; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + if err := client.DeleteMetric(ctx, metric.ID); err != nil { + t.Fatal(err) + } + + if _, err := client.Metric(ctx, metric.ID); err == nil { + t.Fatal("got no error, expected one") + } +} + +func TestUpdateMetric(t *testing.T) { + ctx := context.Background() + metric := &Metric{ + ID: metricIDs.New(), + Description: "DESC", + Filter: "FILTER", + } + + // Updating a non-existent metric creates a new one. + if err := client.UpdateMetric(ctx, metric); err != nil { + t.Fatal(err) + } + defer client.DeleteMetric(ctx, metric.ID) + got, err := client.Metric(ctx, metric.ID) + if err != nil { + t.Fatal(err) + } + if want := metric; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Updating an existing metric changes it. + metric.Description = "CHANGED" + if err := client.UpdateMetric(ctx, metric); err != nil { + t.Fatal(err) + } + got, err = client.Metric(ctx, metric.ID) + if err != nil { + t.Fatal(err) + } + if want := metric; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestListMetrics(t *testing.T) { + ctx := context.Background() + + var metrics []*Metric + want := map[string]*Metric{} + for i := 0; i < 10; i++ { + m := &Metric{ + ID: metricIDs.New(), + Description: "DESC", + Filter: "FILTER", + } + metrics = append(metrics, m) + want[m.ID] = m + } + for _, m := range metrics { + if err := client.CreateMetric(ctx, m); err != nil { + t.Fatalf("Create(%q): %v", m.ID, err) + } + defer client.DeleteMetric(ctx, m.ID) + } + + got := map[string]*Metric{} + it := client.Metrics(ctx) + for { + m, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + // If tests run simultaneously, we may have more metrics than we + // created. So only check for our own. + if _, ok := want[m.ID]; ok { + got[m.ID] = m + } + } + if !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/resources.go b/vendor/cloud.google.com/go/logging/logadmin/resources.go new file mode 100644 index 0000000..79e8fdb --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/resources.go @@ -0,0 +1,74 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + vkit "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// ResourceDescriptors returns a ResourceDescriptorIterator +// for iterating over MonitoredResourceDescriptors. Requires ReadScope or AdminScope. +// See https://cloud.google.com/logging/docs/api/v2/#monitored-resources for an explanation of +// monitored resources. +// See https://cloud.google.com/logging/docs/api/v2/resource-list for a list of monitored resources. +func (c *Client) ResourceDescriptors(ctx context.Context) *ResourceDescriptorIterator { + it := &ResourceDescriptorIterator{ + it: c.lClient.ListMonitoredResourceDescriptors(ctx, + &logpb.ListMonitoredResourceDescriptorsRequest{}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// ResourceDescriptorIterator is an iterator over MonitoredResourceDescriptors. +type ResourceDescriptorIterator struct { + it *vkit.MonitoredResourceDescriptorIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*mrpb.MonitoredResourceDescriptor +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ResourceDescriptorIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *ResourceDescriptorIterator) Next() (*mrpb.MonitoredResourceDescriptor, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ResourceDescriptorIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + item, err := it.it.Next() + if err != nil { + return err + } + it.items = append(it.items, item) + return nil + }) +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/resources_test.go b/vendor/cloud.google.com/go/logging/logadmin/resources_test.go new file mode 100644 index 0000000..067d3d7 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/resources_test.go @@ -0,0 +1,46 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + "testing" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func TestMonitoredResourceDescriptors(t *testing.T) { + // We can't create MonitoredResourceDescriptors, and there is no guarantee + // about what the service will return. So we just check that the result is + // non-empty. + it := client.ResourceDescriptors(context.Background()) + n := 0 +loop: + for { + _, err := it.Next() + switch err { + case nil: + n++ + case iterator.Done: + break loop + default: + t.Fatal(err) + } + } + if n == 0 { + t.Fatal("Next: got no MetricResourceDescriptors, expected at least one") + } + // TODO(jba) test pagination. +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/sinks.go b/vendor/cloud.google.com/go/logging/logadmin/sinks.go new file mode 100644 index 0000000..12aa54c --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/sinks.go @@ -0,0 +1,168 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + "fmt" + + vkit "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// Sink describes a sink used to export log entries outside Stackdriver +// Logging. Incoming log entries matching a filter are exported to a +// destination (a Cloud Storage bucket, BigQuery dataset or Cloud Pub/Sub +// topic). +// +// For more information, see https://cloud.google.com/logging/docs/export/using_exported_logs. +// (The Sinks in this package are what the documentation refers to as "project sinks".) +type Sink struct { + // ID is a client-assigned sink identifier. Example: + // "my-severe-errors-to-pubsub". + // Sink identifiers are limited to 1000 characters + // and can include only the following characters: A-Z, a-z, + // 0-9, and the special characters "_-.". + ID string + + // Destination is the export destination. See + // https://cloud.google.com/logging/docs/api/tasks/exporting-logs. + // Examples: "storage.googleapis.com/a-bucket", + // "bigquery.googleapis.com/projects/a-project-id/datasets/a-dataset". + Destination string + + // Filter optionally specifies an advanced logs filter (see + // https://cloud.google.com/logging/docs/view/advanced_filters) that + // defines the log entries to be exported. Example: "logName:syslog AND + // severity>=ERROR". If omitted, all entries are returned. + Filter string +} + +// CreateSink creates a Sink. It returns an error if the Sink already exists. +// Requires AdminScope. +func (c *Client) CreateSink(ctx context.Context, sink *Sink) (*Sink, error) { + ls, err := c.sClient.CreateSink(ctx, &logpb.CreateSinkRequest{ + Parent: c.parent(), + Sink: toLogSink(sink), + }) + if err != nil { + fmt.Printf("Sink: %+v\n", toLogSink(sink)) + return nil, err + } + return fromLogSink(ls), nil +} + +// DeleteSink deletes a sink. The provided sinkID is the sink's identifier, such as +// "my-severe-errors-to-pubsub". +// Requires AdminScope. +func (c *Client) DeleteSink(ctx context.Context, sinkID string) error { + return c.sClient.DeleteSink(ctx, &logpb.DeleteSinkRequest{ + SinkName: c.sinkPath(sinkID), + }) +} + +// Sink gets a sink. The provided sinkID is the sink's identifier, such as +// "my-severe-errors-to-pubsub". +// Requires ReadScope or AdminScope. +func (c *Client) Sink(ctx context.Context, sinkID string) (*Sink, error) { + ls, err := c.sClient.GetSink(ctx, &logpb.GetSinkRequest{ + SinkName: c.sinkPath(sinkID), + }) + if err != nil { + return nil, err + } + return fromLogSink(ls), nil +} + +// UpdateSink updates an existing Sink. Requires AdminScope. +func (c *Client) UpdateSink(ctx context.Context, sink *Sink) (*Sink, error) { + ls, err := c.sClient.UpdateSink(ctx, &logpb.UpdateSinkRequest{ + SinkName: c.sinkPath(sink.ID), + Sink: toLogSink(sink), + }) + if err != nil { + return nil, err + } + return fromLogSink(ls), err +} + +func (c *Client) sinkPath(sinkID string) string { + return fmt.Sprintf("%s/sinks/%s", c.parent(), sinkID) +} + +// Sinks returns a SinkIterator for iterating over all Sinks in the Client's project. +// Requires ReadScope or AdminScope. +func (c *Client) Sinks(ctx context.Context) *SinkIterator { + it := &SinkIterator{ + it: c.sClient.ListSinks(ctx, &logpb.ListSinksRequest{Parent: c.parent()}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// A SinkIterator iterates over Sinks. +type SinkIterator struct { + it *vkit.LogSinkIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*Sink +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SinkIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *SinkIterator) Next() (*Sink, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SinkIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + item, err := it.it.Next() + if err != nil { + return err + } + it.items = append(it.items, fromLogSink(item)) + return nil + }) +} + +func toLogSink(s *Sink) *logpb.LogSink { + return &logpb.LogSink{ + Name: s.ID, + Destination: s.Destination, + Filter: s.Filter, + OutputVersionFormat: logpb.LogSink_V2, + } +} + +func fromLogSink(ls *logpb.LogSink) *Sink { + return &Sink{ + ID: ls.Name, + Destination: ls.Destination, + Filter: ls.Filter, + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/sinks_test.go b/vendor/cloud.google.com/go/logging/logadmin/sinks_test.go new file mode 100644 index 0000000..20f4b7c --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/sinks_test.go @@ -0,0 +1,227 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): document in CONTRIBUTING.md that service account must be given "Logs Configuration Writer" IAM role for sink tests to pass. +// TODO(jba): [cont] (1) From top left menu, go to IAM & Admin. (2) In Roles dropdown for acct, select Logging > Logs Configuration Writer. (3) Save. +// TODO(jba): Also, cloud-logs@google.com must have Owner permission on the GCS bucket named for the test project. + +package logadmin + +import ( + "log" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var sinkIDs = testutil.NewUIDSpace("GO-CLIENT-TEST-SINK") + +const testFilter = "" + +var testSinkDestination string + +// Called just before TestMain calls m.Run. +// Returns a cleanup function to be called after the tests finish. +func initSinks(ctx context.Context) func() { + // Create a unique GCS bucket so concurrent tests don't interfere with each other. + bucketIDs := testutil.NewUIDSpace(testProjectID + "-log-sink") + testBucket := bucketIDs.New() + testSinkDestination = "storage.googleapis.com/" + testBucket + var storageClient *storage.Client + if integrationTest { + // Create a unique bucket as a sink destination, and give the cloud logging account + // owner right. + ts := testutil.TokenSource(ctx, storage.ScopeFullControl) + var err error + storageClient, err = storage.NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("new storage client: %v", err) + } + bucket := storageClient.Bucket(testBucket) + if err := bucket.Create(ctx, testProjectID, nil); err != nil { + log.Fatalf("creating storage bucket %q: %v", testBucket, err) + } + if err := bucket.ACL().Set(ctx, "group-cloud-logs@google.com", storage.RoleOwner); err != nil { + log.Fatalf("setting owner role: %v", err) + } + } + // Clean up from aborted tests. + it := client.Sinks(ctx) + for { + s, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + log.Printf("listing sinks: %v", err) + break + } + if sinkIDs.Older(s.ID, 24*time.Hour) { + client.DeleteSink(ctx, s.ID) // ignore error + } + } + if integrationTest { + for _, bn := range bucketNames(ctx, storageClient) { + if bucketIDs.Older(bn, 24*time.Hour) { + storageClient.Bucket(bn).Delete(ctx) // ignore error + } + } + return func() { + if err := storageClient.Bucket(testBucket).Delete(ctx); err != nil { + log.Printf("deleting %q: %v", testBucket, err) + } + storageClient.Close() + } + } + return func() {} +} + +// Collect the name of all buckets for the test project. +func bucketNames(ctx context.Context, client *storage.Client) []string { + var names []string + it := client.Buckets(ctx, testProjectID) +loop: + for { + b, err := it.Next() + switch err { + case nil: + names = append(names, b.Name) + case iterator.Done: + break loop + default: + log.Printf("listing buckets: %v", err) + break loop + } + } + return names +} + +func TestCreateDeleteSink(t *testing.T) { + ctx := context.Background() + sink := &Sink{ + ID: sinkIDs.New(), + Destination: testSinkDestination, + Filter: testFilter, + } + got, err := client.CreateSink(ctx, sink) + if err != nil { + t.Fatal(err) + } + defer client.DeleteSink(ctx, sink.ID) + if want := sink; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + got, err = client.Sink(ctx, sink.ID) + if err != nil { + t.Fatal(err) + } + if want := sink; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + if err := client.DeleteSink(ctx, sink.ID); err != nil { + t.Fatal(err) + } + + if _, err := client.Sink(ctx, sink.ID); err == nil { + t.Fatal("got no error, expected one") + } +} + +func TestUpdateSink(t *testing.T) { + ctx := context.Background() + sink := &Sink{ + ID: sinkIDs.New(), + Destination: testSinkDestination, + Filter: testFilter, + } + + if _, err := client.CreateSink(ctx, sink); err != nil { + t.Fatal(err) + } + got, err := client.UpdateSink(ctx, sink) + if err != nil { + t.Fatal(err) + } + defer client.DeleteSink(ctx, sink.ID) + if want := sink; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + got, err = client.Sink(ctx, sink.ID) + if err != nil { + t.Fatal(err) + } + if want := sink; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Updating an existing sink changes it. + sink.Filter = "" + if _, err := client.UpdateSink(ctx, sink); err != nil { + t.Fatal(err) + } + got, err = client.Sink(ctx, sink.ID) + if err != nil { + t.Fatal(err) + } + if want := sink; !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestListSinks(t *testing.T) { + ctx := context.Background() + var sinks []*Sink + want := map[string]*Sink{} + for i := 0; i < 4; i++ { + s := &Sink{ + ID: sinkIDs.New(), + Destination: testSinkDestination, + Filter: testFilter, + } + sinks = append(sinks, s) + want[s.ID] = s + } + for _, s := range sinks { + if _, err := client.CreateSink(ctx, s); err != nil { + t.Fatalf("Create(%q): %v", s.ID, err) + } + defer client.DeleteSink(ctx, s.ID) + } + + got := map[string]*Sink{} + it := client.Sinks(ctx) + for { + s, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + // If tests run simultaneously, we may have more sinks than we + // created. So only check for our own. + if _, ok := want[s.ID]; ok { + got[s.ID] = s + } + } + if !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} diff --git a/vendor/cloud.google.com/go/logging/logging.go b/vendor/cloud.google.com/go/logging/logging.go new file mode 100644 index 0000000..5738adc --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logging.go @@ -0,0 +1,771 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// API/gRPC features intentionally missing from this client: +// - You cannot have the server pick the time of the entry. This client +// always sends a time. +// - There is no way to provide a protocol buffer payload. +// - No support for the "partial success" feature when writing log entries. + +// TODO(jba): test whether forward-slash characters in the log ID must be URL-encoded. +// These features are missing now, but will likely be added: +// - There is no way to specify CallOptions. + +package logging + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "math" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/internal/version" + vkit "cloud.google.com/go/logging/apiv2" + "cloud.google.com/go/logging/internal" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + structpb "github.com/golang/protobuf/ptypes/struct" + tspb "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logtypepb "google.golang.org/genproto/googleapis/logging/type" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +const ( + // Scope for reading from the logging service. + ReadScope = "https://www.googleapis.com/auth/logging.read" + + // Scope for writing to the logging service. + WriteScope = "https://www.googleapis.com/auth/logging.write" + + // Scope for administrative actions on the logging service. + AdminScope = "https://www.googleapis.com/auth/logging.admin" +) + +const ( + // defaultErrorCapacity is the capacity of the channel used to deliver + // errors to the OnError function. + defaultErrorCapacity = 10 + + // DefaultDelayThreshold is the default value for the DelayThreshold LoggerOption. + DefaultDelayThreshold = time.Second + + // DefaultEntryCountThreshold is the default value for the EntryCountThreshold LoggerOption. + DefaultEntryCountThreshold = 1000 + + // DefaultEntryByteThreshold is the default value for the EntryByteThreshold LoggerOption. + DefaultEntryByteThreshold = 1 << 20 // 1MiB + + // DefaultBufferedByteLimit is the default value for the BufferedByteLimit LoggerOption. + DefaultBufferedByteLimit = 1 << 30 // 1GiB +) + +// For testing: +var now = time.Now + +// ErrOverflow signals that the number of buffered entries for a Logger +// exceeds its BufferLimit. +var ErrOverflow = bundler.ErrOverflow + +// ErrOversizedEntry signals that an entry's size exceeds the maximum number of +// bytes that will be sent in a single call to the logging service. +var ErrOversizedEntry = bundler.ErrOversizedItem + +// Client is a Logging client. A Client is associated with a single Cloud project. +type Client struct { + client *vkit.Client // client for the logging service + projectID string + errc chan error // should be buffered to minimize dropped errors + donec chan struct{} // closed on Client.Close to close Logger bundlers + loggers sync.WaitGroup // so we can wait for loggers to close + closed bool + + mu sync.Mutex + nErrs int // number of errors we saw + lastErr error // last error we saw + + // OnError is called when an error occurs in a call to Log or Flush. The + // error may be due to an invalid Entry, an overflow because BufferLimit + // was reached (in which case the error will be ErrOverflow) or an error + // communicating with the logging service. OnError is called with errors + // from all Loggers. It is never called concurrently. OnError is expected + // to return quickly; if errors occur while OnError is running, some may + // not be reported. The default behavior is to call log.Printf. + // + // This field should be set only once, before any method of Client is called. + OnError func(err error) +} + +// NewClient returns a new logging client associated with the provided project ID. +// +// By default NewClient uses WriteScope. To use a different scope, call +// NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes). +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + // Check for '/' in project ID to reserve the ability to support various owning resources, + // in the form "{Collection}/{Name}", for instance "organizations/my-org". + if strings.ContainsRune(projectID, '/') { + return nil, errors.New("logging: project ID contains '/'") + } + opts = append([]option.ClientOption{ + option.WithEndpoint(internal.ProdAddr), + option.WithScopes(WriteScope), + }, opts...) + c, err := vkit.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + c.SetGoogleClientInfo("gccl", version.Repo) + client := &Client{ + client: c, + projectID: projectID, + errc: make(chan error, defaultErrorCapacity), // create a small buffer for errors + donec: make(chan struct{}), + OnError: func(e error) { log.Printf("logging client: %v", e) }, + } + // Call the user's function synchronously, to make life easier for them. + go func() { + for err := range client.errc { + // This reference to OnError is memory-safe if the user sets OnError before + // calling any client methods. The reference happens before the first read from + // client.errc, which happens before the first write to client.errc, which + // happens before any call, which happens before the user sets OnError. + if fn := client.OnError; fn != nil { + fn(err) + } else { + log.Printf("logging (project ID %q): %v", projectID, err) + } + } + }() + return client, nil +} + +// parent returns the string used in many RPCs to denote the parent resource of the log. +func (c *Client) parent() string { + return "projects/" + c.projectID +} + +var unixZeroTimestamp *tspb.Timestamp + +func init() { + var err error + unixZeroTimestamp, err = ptypes.TimestampProto(time.Unix(0, 0)) + if err != nil { + panic(err) + } +} + +// Ping reports whether the client's connection to the logging service and the +// authentication configuration are valid. To accomplish this, Ping writes a +// log entry "ping" to a log named "ping". +func (c *Client) Ping(ctx context.Context) error { + ent := &logpb.LogEntry{ + Payload: &logpb.LogEntry_TextPayload{TextPayload: "ping"}, + Timestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both + InsertId: "ping", // necessary for the service to dedup these entries. + } + _, err := c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ + LogName: internal.LogPath(c.parent(), "ping"), + Resource: globalResource(c.projectID), + Entries: []*logpb.LogEntry{ent}, + }) + return err +} + +// error puts the error on the client's error channel +// without blocking, and records summary error info. +func (c *Client) error(err error) { + select { + case c.errc <- err: + default: + } + c.mu.Lock() + c.lastErr = err + c.nErrs++ + c.mu.Unlock() +} + +func (c *Client) extractErrorInfo() error { + var err error + c.mu.Lock() + if c.lastErr != nil { + err = fmt.Errorf("saw %d errors; last: %v", c.nErrs, c.lastErr) + c.nErrs = 0 + c.lastErr = nil + } + c.mu.Unlock() + return err +} + +// A Logger is used to write log messages to a single log. It can be configured +// with a log ID, common monitored resource, and a set of common labels. +type Logger struct { + client *Client + logName string // "projects/{projectID}/logs/{logID}" + stdLoggers map[Severity]*log.Logger + bundler *bundler.Bundler + + // Options + commonResource *mrpb.MonitoredResource + commonLabels map[string]string +} + +// A LoggerOption is a configuration option for a Logger. +type LoggerOption interface { + set(*Logger) +} + +// CommonResource sets the monitored resource associated with all log entries +// written from a Logger. If not provided, the resource is automatically +// detected based on the running environment. This value can be overridden +// per-entry by setting an Entry's Resource field. +func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} } + +type commonResource struct{ *mrpb.MonitoredResource } + +func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource } + +var detectedResource struct { + pb *mrpb.MonitoredResource + once sync.Once +} + +func detectResource() *mrpb.MonitoredResource { + detectedResource.once.Do(func() { + if !metadata.OnGCE() { + return + } + projectID, err := metadata.ProjectID() + if err != nil { + return + } + id, err := metadata.InstanceID() + if err != nil { + return + } + zone, err := metadata.Zone() + if err != nil { + return + } + detectedResource.pb = &mrpb.MonitoredResource{ + Type: "gce_instance", + Labels: map[string]string{ + "project_id": projectID, + "instance_id": id, + "zone": zone, + }, + } + }) + return detectedResource.pb +} + +func globalResource(projectID string) *mrpb.MonitoredResource { + return &mrpb.MonitoredResource{ + Type: "global", + Labels: map[string]string{ + "project_id": projectID, + }, + } +} + +// CommonLabels are labels that apply to all log entries written from a Logger, +// so that you don't have to repeat them in each log entry's Labels field. If +// any of the log entries contains a (key, value) with the same key that is in +// CommonLabels, then the entry's (key, value) overrides the one in +// CommonLabels. +func CommonLabels(m map[string]string) LoggerOption { return commonLabels(m) } + +type commonLabels map[string]string + +func (c commonLabels) set(l *Logger) { l.commonLabels = c } + +// DelayThreshold is the maximum amount of time that an entry should remain +// buffered in memory before a call to the logging service is triggered. Larger +// values of DelayThreshold will generally result in fewer calls to the logging +// service, while increasing the risk that log entries will be lost if the +// process crashes. +// The default is DefaultDelayThreshold. +func DelayThreshold(d time.Duration) LoggerOption { return delayThreshold(d) } + +type delayThreshold time.Duration + +func (d delayThreshold) set(l *Logger) { l.bundler.DelayThreshold = time.Duration(d) } + +// EntryCountThreshold is the maximum number of entries that will be buffered +// in memory before a call to the logging service is triggered. Larger values +// will generally result in fewer calls to the logging service, while +// increasing both memory consumption and the risk that log entries will be +// lost if the process crashes. +// The default is DefaultEntryCountThreshold. +func EntryCountThreshold(n int) LoggerOption { return entryCountThreshold(n) } + +type entryCountThreshold int + +func (e entryCountThreshold) set(l *Logger) { l.bundler.BundleCountThreshold = int(e) } + +// EntryByteThreshold is the maximum number of bytes of entries that will be +// buffered in memory before a call to the logging service is triggered. See +// EntryCountThreshold for a discussion of the tradeoffs involved in setting +// this option. +// The default is DefaultEntryByteThreshold. +func EntryByteThreshold(n int) LoggerOption { return entryByteThreshold(n) } + +type entryByteThreshold int + +func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) } + +// EntryByteLimit is the maximum number of bytes of entries that will be sent +// in a single call to the logging service. ErrOversizedEntry is returned if an +// entry exceeds EntryByteLimit. This option limits the size of a single RPC +// payload, to account for network or service issues with large RPCs. If +// EntryByteLimit is smaller than EntryByteThreshold, the latter has no effect. +// The default is zero, meaning there is no limit. +func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) } + +type entryByteLimit int + +func (e entryByteLimit) set(l *Logger) { l.bundler.BundleByteLimit = int(e) } + +// BufferedByteLimit is the maximum number of bytes that the Logger will keep +// in memory before returning ErrOverflow. This option limits the total memory +// consumption of the Logger (but note that each Logger has its own, separate +// limit). It is possible to reach BufferedByteLimit even if it is larger than +// EntryByteThreshold or EntryByteLimit, because calls triggered by the latter +// two options may be enqueued (and hence occupying memory) while new log +// entries are being added. +// The default is DefaultBufferedByteLimit. +func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) } + +type bufferedByteLimit int + +func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) } + +// Logger returns a Logger that will write entries with the given log ID, such as +// "syslog". A log ID must be less than 512 characters long and can only +// include the following characters: upper and lower case alphanumeric +// characters: [A-Za-z0-9]; and punctuation characters: forward-slash, +// underscore, hyphen, and period. +func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { + r := detectResource() + if r == nil { + r = globalResource(c.projectID) + } + l := &Logger{ + client: c, + logName: internal.LogPath(c.parent(), logID), + commonResource: r, + } + // TODO(jba): determine the right context for the bundle handler. + ctx := context.TODO() + l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) { + l.writeLogEntries(ctx, entries.([]*logpb.LogEntry)) + }) + l.bundler.DelayThreshold = DefaultDelayThreshold + l.bundler.BundleCountThreshold = DefaultEntryCountThreshold + l.bundler.BundleByteThreshold = DefaultEntryByteThreshold + l.bundler.BufferedByteLimit = DefaultBufferedByteLimit + for _, opt := range opts { + opt.set(l) + } + + l.stdLoggers = map[Severity]*log.Logger{} + for s := range severityName { + l.stdLoggers[s] = log.New(severityWriter{l, s}, "", 0) + } + c.loggers.Add(1) + go func() { + defer c.loggers.Done() + <-c.donec + l.bundler.Flush() + }() + return l +} + +type severityWriter struct { + l *Logger + s Severity +} + +func (w severityWriter) Write(p []byte) (n int, err error) { + w.l.Log(Entry{ + Severity: w.s, + Payload: string(p), + }) + return len(p), nil +} + +// Close waits for all opened loggers to be flushed and closes the client. +func (c *Client) Close() error { + if c.closed { + return nil + } + close(c.donec) // close Logger bundlers + c.loggers.Wait() // wait for all bundlers to flush and close + // Now there can be no more errors. + close(c.errc) // terminate error goroutine + // Prefer logging errors to close errors. + err := c.extractErrorInfo() + err2 := c.client.Close() + if err == nil { + err = err2 + } + c.closed = true + return err +} + +// Severity is the severity of the event described in a log entry. These +// guideline severity levels are ordered, with numerically smaller levels +// treated as less severe than numerically larger levels. +type Severity int + +const ( + // Default means the log entry has no assigned severity level. + Default = Severity(logtypepb.LogSeverity_DEFAULT) + // Debug means debug or trace information. + Debug = Severity(logtypepb.LogSeverity_DEBUG) + // Info means routine information, such as ongoing status or performance. + Info = Severity(logtypepb.LogSeverity_INFO) + // Notice means normal but significant events, such as start up, shut down, or configuration. + Notice = Severity(logtypepb.LogSeverity_NOTICE) + // Warning means events that might cause problems. + Warning = Severity(logtypepb.LogSeverity_WARNING) + // Error means events that are likely to cause problems. + Error = Severity(logtypepb.LogSeverity_ERROR) + // Critical means events that cause more severe problems or brief outages. + Critical = Severity(logtypepb.LogSeverity_CRITICAL) + // Alert means a person must take an action immediately. + Alert = Severity(logtypepb.LogSeverity_ALERT) + // Emergency means one or more systems are unusable. + Emergency = Severity(logtypepb.LogSeverity_EMERGENCY) +) + +var severityName = map[Severity]string{ + Default: "Default", + Debug: "Debug", + Info: "Info", + Notice: "Notice", + Warning: "Warning", + Error: "Error", + Critical: "Critical", + Alert: "Alert", + Emergency: "Emergency", +} + +// String converts a severity level to a string. +func (v Severity) String() string { + // same as proto.EnumName + s, ok := severityName[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// ParseSeverity returns the Severity whose name equals s, ignoring case. It +// returns Default if no Severity matches. +func ParseSeverity(s string) Severity { + sl := strings.ToLower(s) + for sev, name := range severityName { + if strings.ToLower(name) == sl { + return sev + } + } + return Default +} + +// Entry is a log entry. +// See https://cloud.google.com/logging/docs/view/logs_index for more about entries. +type Entry struct { + // Timestamp is the time of the entry. If zero, the current time is used. + Timestamp time.Time + + // Severity is the entry's severity level. + // The zero value is Default. + Severity Severity + + // Payload must be either a string or something that + // marshals via the encoding/json package to a JSON object + // (and not any other type of JSON value). + Payload interface{} + + // Labels optionally specifies key/value labels for the log entry. + // The Logger.Log method takes ownership of this map. See Logger.CommonLabels + // for more about labels. + Labels map[string]string + + // InsertID is a unique ID for the log entry. If you provide this field, + // the logging service considers other log entries in the same log with the + // same ID as duplicates which can be removed. If omitted, the logging + // service will generate a unique ID for this log entry. Note that because + // this client retries RPCs automatically, it is possible (though unlikely) + // that an Entry without an InsertID will be written more than once. + InsertID string + + // HTTPRequest optionally specifies metadata about the HTTP request + // associated with this log entry, if applicable. It is optional. + HTTPRequest *HTTPRequest + + // Operation optionally provides information about an operation associated + // with the log entry, if applicable. + Operation *logpb.LogEntryOperation + + // LogName is the full log name, in the form + // "projects/{ProjectID}/logs/{LogID}". It is set by the client when + // reading entries. It is an error to set it when writing entries. + LogName string + + // Resource is the monitored resource associated with the entry. It is set + // by the client when reading entries. It is an error to set it when + // writing entries. + Resource *mrpb.MonitoredResource + + // Trace is the resource name of the trace associated with the log entry, + // if any. If it contains a relative resource name, the name is assumed to + // be relative to //tracing.googleapis.com. + Trace string +} + +// HTTPRequest contains an http.Request as well as additional +// information about the request and its response. +type HTTPRequest struct { + // Request is the http.Request passed to the handler. + Request *http.Request + + // RequestSize is the size of the HTTP request message in bytes, including + // the request headers and the request body. + RequestSize int64 + + // Status is the response code indicating the status of the response. + // Examples: 200, 404. + Status int + + // ResponseSize is the size of the HTTP response message sent back to the client, in bytes, + // including the response headers and the response body. + ResponseSize int64 + + // Latency is the request processing latency on the server, from the time the request was + // received until the response was sent. + Latency time.Duration + + // LocalIP is the IP address (IPv4 or IPv6) of the origin server that the request + // was sent to. + LocalIP string + + // RemoteIP is the IP address (IPv4 or IPv6) of the client that issued the + // HTTP request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329". + RemoteIP string + + // CacheHit reports whether an entity was served from cache (with or without + // validation). + CacheHit bool + + // CacheValidatedWithOriginServer reports whether the response was + // validated with the origin server before being served from cache. This + // field is only meaningful if CacheHit is true. + CacheValidatedWithOriginServer bool +} + +func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest { + if r == nil { + return nil + } + if r.Request == nil { + panic("HTTPRequest must have a non-nil Request") + } + u := *r.Request.URL + u.Fragment = "" + pb := &logtypepb.HttpRequest{ + RequestMethod: r.Request.Method, + RequestUrl: u.String(), + RequestSize: r.RequestSize, + Status: int32(r.Status), + ResponseSize: r.ResponseSize, + UserAgent: r.Request.UserAgent(), + ServerIp: r.LocalIP, + RemoteIp: r.RemoteIP, // TODO(jba): attempt to parse http.Request.RemoteAddr? + Referer: r.Request.Referer(), + CacheHit: r.CacheHit, + CacheValidatedWithOriginServer: r.CacheValidatedWithOriginServer, + } + if r.Latency != 0 { + pb.Latency = ptypes.DurationProto(r.Latency) + } + return pb +} + +// toProtoStruct converts v, which must marshal into a JSON object, +// into a Google Struct proto. +func toProtoStruct(v interface{}) (*structpb.Struct, error) { + // Fast path: if v is already a *structpb.Struct, nothing to do. + if s, ok := v.(*structpb.Struct); ok { + return s, nil + } + // v is a Go struct that supports JSON marshalling. We want a Struct + // protobuf. Some day we may have a more direct way to get there, but right + // now the only way is to marshal the Go struct to JSON, unmarshal into a + // map, and then build the Struct proto from the map. + jb, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("logging: json.Marshal: %v", err) + } + var m map[string]interface{} + err = json.Unmarshal(jb, &m) + if err != nil { + return nil, fmt.Errorf("logging: json.Unmarshal: %v", err) + } + return jsonMapToProtoStruct(m), nil +} + +func jsonMapToProtoStruct(m map[string]interface{}) *structpb.Struct { + fields := map[string]*structpb.Value{} + for k, v := range m { + fields[k] = jsonValueToStructValue(v) + } + return &structpb.Struct{Fields: fields} +} + +func jsonValueToStructValue(v interface{}) *structpb.Value { + switch x := v.(type) { + case bool: + return &structpb.Value{Kind: &structpb.Value_BoolValue{BoolValue: x}} + case float64: + return &structpb.Value{Kind: &structpb.Value_NumberValue{NumberValue: x}} + case string: + return &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: x}} + case nil: + return &structpb.Value{Kind: &structpb.Value_NullValue{}} + case map[string]interface{}: + return &structpb.Value{Kind: &structpb.Value_StructValue{StructValue: jsonMapToProtoStruct(x)}} + case []interface{}: + var vals []*structpb.Value + for _, e := range x { + vals = append(vals, jsonValueToStructValue(e)) + } + return &structpb.Value{Kind: &structpb.Value_ListValue{ListValue: &structpb.ListValue{Values: vals}}} + default: + panic(fmt.Sprintf("bad type %T for JSON value", v)) + } +} + +// LogSync logs the Entry synchronously without any buffering. Because LogSync is slow +// and will block, it is intended primarily for debugging or critical errors. +// Prefer Log for most uses. +// TODO(jba): come up with a better name (LogNow?) or eliminate. +func (l *Logger) LogSync(ctx context.Context, e Entry) error { + ent, err := toLogEntry(e) + if err != nil { + return err + } + _, err = l.client.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ + LogName: l.logName, + Resource: l.commonResource, + Labels: l.commonLabels, + Entries: []*logpb.LogEntry{ent}, + }) + return err +} + +// Log buffers the Entry for output to the logging service. It never blocks. +func (l *Logger) Log(e Entry) { + ent, err := toLogEntry(e) + if err != nil { + l.client.error(err) + return + } + if err := l.bundler.Add(ent, proto.Size(ent)); err != nil { + l.client.error(err) + } +} + +// Flush blocks until all currently buffered log entries are sent. +// +// If any errors occurred since the last call to Flush from any Logger, or the +// creation of the client if this is the first call, then Flush returns a non-nil +// error with summary information about the errors. This information is unlikely to +// be actionable. For more accurate error reporting, set Client.OnError. +func (l *Logger) Flush() error { + l.bundler.Flush() + return l.client.extractErrorInfo() +} + +func (l *Logger) writeLogEntries(ctx context.Context, entries []*logpb.LogEntry) { + req := &logpb.WriteLogEntriesRequest{ + LogName: l.logName, + Resource: l.commonResource, + Labels: l.commonLabels, + Entries: entries, + } + _, err := l.client.client.WriteLogEntries(ctx, req) + if err != nil { + l.client.error(err) + } +} + +// StandardLogger returns a *log.Logger for the provided severity. +// +// This method is cheap. A single log.Logger is pre-allocated for each +// severity level in each Logger. Callers may mutate the returned log.Logger +// (for example by calling SetFlags or SetPrefix). +func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] } + +func trunc32(i int) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +func toLogEntry(e Entry) (*logpb.LogEntry, error) { + if e.LogName != "" { + return nil, errors.New("logging: Entry.LogName should be not be set when writing") + } + t := e.Timestamp + if t.IsZero() { + t = now() + } + ts, err := ptypes.TimestampProto(t) + if err != nil { + return nil, err + } + ent := &logpb.LogEntry{ + Timestamp: ts, + Severity: logtypepb.LogSeverity(e.Severity), + InsertId: e.InsertID, + HttpRequest: fromHTTPRequest(e.HTTPRequest), + Operation: e.Operation, + Labels: e.Labels, + Trace: e.Trace, + } + + switch p := e.Payload.(type) { + case string: + ent.Payload = &logpb.LogEntry_TextPayload{TextPayload: p} + default: + s, err := toProtoStruct(p) + if err != nil { + return nil, err + } + ent.Payload = &logpb.LogEntry_JsonPayload{JsonPayload: s} + } + return ent, nil +} diff --git a/vendor/cloud.google.com/go/logging/logging_test.go b/vendor/cloud.google.com/go/logging/logging_test.go new file mode 100644 index 0000000..da59a9d --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logging_test.go @@ -0,0 +1,484 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): test that OnError is getting called appropriately. + +package logging_test + +import ( + "flag" + "fmt" + "log" + "os" + "strings" + "testing" + "time" + + gax "github.com/googleapis/gax-go" + + cinternal "cloud.google.com/go/internal" + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/logging" + ltesting "cloud.google.com/go/logging/internal/testing" + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/grpc" +) + +const testLogIDPrefix = "GO-LOGGING-CLIENT/TEST-LOG" + +var uids = testutil.NewUIDSpace(testLogIDPrefix) + +var ( + client *logging.Client + aclient *logadmin.Client + testProjectID string + testLogID string + testFilter string + errorc chan error + ctx context.Context + + // Adjust the fields of a FullEntry received from the production service + // before comparing it with the expected result. We can't correctly + // compare certain fields, like times or server-generated IDs. + clean func(*logging.Entry) + + // Create a new client with the given project ID. + newClients func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) +) + +func testNow() time.Time { + return time.Unix(1000, 0) +} + +// If true, this test is using the production service, not a fake. +var integrationTest bool + +func TestMain(m *testing.M) { + flag.Parse() // needed for testing.Short() + ctx = context.Background() + testProjectID = testutil.ProjID() + errorc = make(chan error, 100) + if testProjectID == "" || testing.Short() { + integrationTest = false + if testProjectID != "" { + log.Print("Integration tests skipped in short mode (using fake instead)") + } + testProjectID = "PROJECT_ID" + clean = func(e *logging.Entry) { + // Remove the insert ID for consistency with the integration test. + e.InsertID = "" + } + + addr, err := ltesting.NewServer() + if err != nil { + log.Fatalf("creating fake server: %v", err) + } + logging.SetNow(testNow) + + newClients = func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + log.Fatalf("dialing %q: %v", addr, err) + } + c, err := logging.NewClient(ctx, projectID, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalf("creating client for fake at %q: %v", addr, err) + } + ac, err := logadmin.NewClient(ctx, projectID, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalf("creating client for fake at %q: %v", addr, err) + } + return c, ac + } + + } else { + integrationTest = true + clean = func(e *logging.Entry) { + // We cannot compare timestamps, so set them to the test time. + // Also, remove the insert ID added by the service. + e.Timestamp = testNow().UTC() + e.InsertID = "" + } + ts := testutil.TokenSource(ctx, logging.AdminScope) + if ts == nil { + log.Fatal("The project key must be set. See CONTRIBUTING.md for details") + } + log.Printf("running integration tests with project %s", testProjectID) + newClients = func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) { + c, err := logging.NewClient(ctx, projectID, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("creating prod client: %v", err) + } + ac, err := logadmin.NewClient(ctx, projectID, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("creating prod client: %v", err) + } + return c, ac + } + + } + client, aclient = newClients(ctx, testProjectID) + client.OnError = func(e error) { errorc <- e } + + exit := m.Run() + client.Close() + os.Exit(exit) +} + +func initLogs(ctx context.Context) { + testLogID = uids.New() + testFilter = fmt.Sprintf(`logName = "projects/%s/logs/%s"`, testProjectID, + strings.Replace(testLogID, "/", "%2F", -1)) +} + +// Testing of Logger.Log is done in logadmin_test.go, TestEntries. + +func TestLogSync(t *testing.T) { + initLogs(ctx) // Generate new testLogID + ctx := context.Background() + lg := client.Logger(testLogID) + err := lg.LogSync(ctx, logging.Entry{Payload: "hello"}) + if err != nil { + t.Fatal(err) + } + err = lg.LogSync(ctx, logging.Entry{Payload: "goodbye"}) + if err != nil { + t.Fatal(err) + } + // Allow overriding the MonitoredResource. + err = lg.LogSync(ctx, logging.Entry{Payload: "mr", Resource: &mrpb.MonitoredResource{Type: "global"}}) + if err != nil { + t.Fatal(err) + } + + want := []*logging.Entry{ + entryForTesting("hello"), + entryForTesting("goodbye"), + entryForTesting("mr"), + } + var got []*logging.Entry + ok := waitFor(func() bool { + got, err = allTestLogEntries(ctx) + if err != nil { + t.Log("fetching log entries: ", err) + return false + } + return len(got) == len(want) + }) + if !ok { + t.Fatalf("timed out; got: %d, want: %d\n", len(got), len(want)) + } + if msg, ok := compareEntries(got, want); !ok { + t.Error(msg) + } +} + +func TestLogAndEntries(t *testing.T) { + initLogs(ctx) // Generate new testLogID + ctx := context.Background() + payloads := []string{"p1", "p2", "p3", "p4", "p5"} + lg := client.Logger(testLogID) + for _, p := range payloads { + // Use the insert ID to guarantee iteration order. + lg.Log(logging.Entry{Payload: p, InsertID: p}) + } + lg.Flush() + var want []*logging.Entry + for _, p := range payloads { + want = append(want, entryForTesting(p)) + } + var got []*logging.Entry + ok := waitFor(func() bool { + var err error + got, err = allTestLogEntries(ctx) + if err != nil { + t.Log("fetching log entries: ", err) + return false + } + return len(got) == len(want) + }) + if !ok { + t.Fatalf("timed out; got: %d, want: %d\n", len(got), len(want)) + } + if msg, ok := compareEntries(got, want); !ok { + t.Error(msg) + } +} + +// compareEntries compares most fields list of Entries against expected. compareEntries does not compare: +// - HTTPRequest +// - Operation +// - Resource +func compareEntries(got, want []*logging.Entry) (string, bool) { + if len(got) != len(want) { + return fmt.Sprintf("got %d entries, want %d", len(got), len(want)), false + } + for i := range got { + if !compareEntry(got[i], want[i]) { + return fmt.Sprintf("#%d:\ngot %+v\nwant %+v", i, got[i], want[i]), false + } + } + return "", true +} + +func compareEntry(got, want *logging.Entry) bool { + if got.Timestamp.Unix() != want.Timestamp.Unix() { + return false + } + + if got.Severity != want.Severity { + return false + } + + if !ltesting.PayloadEqual(got.Payload, want.Payload) { + return false + } + if !testutil.Equal(got.Labels, want.Labels) { + return false + } + + if got.InsertID != want.InsertID { + return false + } + + if got.LogName != want.LogName { + return false + } + + return true +} + +func entryForTesting(payload interface{}) *logging.Entry { + return &logging.Entry{ + Timestamp: testNow().UTC(), + Payload: payload, + LogName: "projects/" + testProjectID + "/logs/" + testLogID, + Resource: &mrpb.MonitoredResource{Type: "global", Labels: map[string]string{"project_id": testProjectID}}, + } +} + +func countLogEntries(ctx context.Context, filter string) int { + it := aclient.Entries(ctx, logadmin.Filter(filter)) + n := 0 + for { + _, err := it.Next() + if err == iterator.Done { + return n + } + if err != nil { + log.Fatalf("counting log entries: %v", err) + } + n++ + } +} + +func allTestLogEntries(ctx context.Context) ([]*logging.Entry, error) { + var es []*logging.Entry + it := aclient.Entries(ctx, logadmin.Filter(testFilter)) + for { + e, err := cleanNext(it) + switch err { + case nil: + es = append(es, e) + case iterator.Done: + return es, nil + default: + return nil, err + } + } +} + +func cleanNext(it *logadmin.EntryIterator) (*logging.Entry, error) { + e, err := it.Next() + if err != nil { + return nil, err + } + clean(e) + return e, nil +} + +func TestStandardLogger(t *testing.T) { + initLogs(ctx) // Generate new testLogID + ctx := context.Background() + lg := client.Logger(testLogID) + slg := lg.StandardLogger(logging.Info) + + if slg != lg.StandardLogger(logging.Info) { + t.Error("There should be only one standard logger at each severity.") + } + if slg == lg.StandardLogger(logging.Debug) { + t.Error("There should be a different standard logger for each severity.") + } + + slg.Print("info") + lg.Flush() + var got []*logging.Entry + ok := waitFor(func() bool { + var err error + got, err = allTestLogEntries(ctx) + if err != nil { + t.Log("fetching log entries: ", err) + return false + } + return len(got) == 1 + }) + if !ok { + t.Fatalf("timed out; got: %d, want: %d\n", len(got), 1) + } + if len(got) != 1 { + t.Fatalf("expected non-nil request with one entry; got:\n%+v", got) + } + if got, want := got[0].Payload.(string), "info\n"; got != want { + t.Errorf("payload: got %q, want %q", got, want) + } + if got, want := logging.Severity(got[0].Severity), logging.Info; got != want { + t.Errorf("severity: got %s, want %s", got, want) + } +} + +func TestSeverity(t *testing.T) { + if got, want := logging.Info.String(), "Info"; got != want { + t.Errorf("got %q, want %q", got, want) + } + if got, want := logging.Severity(-99).String(), "-99"; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestParseSeverity(t *testing.T) { + for _, test := range []struct { + in string + want logging.Severity + }{ + {"", logging.Default}, + {"whatever", logging.Default}, + {"Default", logging.Default}, + {"ERROR", logging.Error}, + {"Error", logging.Error}, + {"error", logging.Error}, + } { + got := logging.ParseSeverity(test.in) + if got != test.want { + t.Errorf("%q: got %s, want %s\n", test.in, got, test.want) + } + } +} + +func TestErrors(t *testing.T) { + initLogs(ctx) // Generate new testLogID + // Drain errors already seen. +loop: + for { + select { + case <-errorc: + default: + break loop + } + } + // Try to log something that can't be JSON-marshalled. + lg := client.Logger(testLogID) + lg.Log(logging.Entry{Payload: func() {}}) + // Expect an error from Flush. + err := lg.Flush() + if err == nil { + t.Fatal("expected error, got nil") + } +} + +type badTokenSource struct{} + +func (badTokenSource) Token() (*oauth2.Token, error) { + return &oauth2.Token{}, nil +} + +func TestPing(t *testing.T) { + // Ping twice, in case the service's InsertID logic messes with the error code. + ctx := context.Background() + // The global client should be valid. + if err := client.Ping(ctx); err != nil { + t.Errorf("project %s: got %v, expected nil", testProjectID, err) + } + if err := client.Ping(ctx); err != nil { + t.Errorf("project %s, #2: got %v, expected nil", testProjectID, err) + } + // nonexistent project + c, _ := newClients(ctx, testProjectID+"-BAD") + if err := c.Ping(ctx); err == nil { + t.Errorf("nonexistent project: want error pinging logging api, got nil") + } + if err := c.Ping(ctx); err == nil { + t.Errorf("nonexistent project, #2: want error pinging logging api, got nil") + } + + // Bad creds. We cannot test this with the fake, since it doesn't do auth. + if integrationTest { + c, err := logging.NewClient(ctx, testProjectID, option.WithTokenSource(badTokenSource{})) + if err != nil { + t.Fatal(err) + } + if err := c.Ping(ctx); err == nil { + t.Errorf("bad creds: want error pinging logging api, got nil") + } + if err := c.Ping(ctx); err == nil { + t.Errorf("bad creds, #2: want error pinging logging api, got nil") + } + if err := c.Close(); err != nil { + t.Fatalf("error closing client: %v", err) + } + } +} + +func TestLogsAndDelete(t *testing.T) { + // This function tests both the Logs and DeleteLog methods. We only try to + // delete those logs that we can observe and that were generated by this + // test. This may not include the logs generated from the current test run, + // because the logging service is only eventually consistent. It's + // therefore possible that on some runs, this test will do nothing. + ctx := context.Background() + it := aclient.Logs(ctx) + nDeleted := 0 + for { + logID, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + if strings.HasPrefix(logID, testLogIDPrefix) { + if err := aclient.DeleteLog(ctx, logID); err != nil { + t.Fatalf("deleting %q: %v", logID, err) + } + nDeleted++ + } + } + t.Logf("deleted %d logs", nDeleted) +} + +// waitFor calls f repeatedly with exponential backoff, blocking until it returns true. +// It returns false after a while (if it times out). +func waitFor(f func() bool) bool { + // TODO(shadams): Find a better way to deflake these tests. + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + err := cinternal.Retry(ctx, + gax.Backoff{Initial: time.Second, Multiplier: 2}, + func() (bool, error) { return f(), nil }) + return err == nil +} diff --git a/vendor/cloud.google.com/go/logging/logging_unexported_test.go b/vendor/cloud.google.com/go/logging/logging_unexported_test.go new file mode 100644 index 0000000..9be566e --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logging_unexported_test.go @@ -0,0 +1,229 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Tests that require access to unexported names of the logging package. + +package logging + +import ( + "net/http" + "net/url" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + "github.com/golang/protobuf/proto" + durpb "github.com/golang/protobuf/ptypes/duration" + structpb "github.com/golang/protobuf/ptypes/struct" + "google.golang.org/api/support/bundler" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logtypepb "google.golang.org/genproto/googleapis/logging/type" +) + +func TestLoggerCreation(t *testing.T) { + const logID = "testing" + c := &Client{projectID: "PROJECT_ID"} + customResource := &mrpb.MonitoredResource{ + Type: "global", + Labels: map[string]string{ + "project_id": "ANOTHER_PROJECT", + }, + } + defaultBundler := &bundler.Bundler{ + DelayThreshold: DefaultDelayThreshold, + BundleCountThreshold: DefaultEntryCountThreshold, + BundleByteThreshold: DefaultEntryByteThreshold, + BundleByteLimit: 0, + BufferedByteLimit: DefaultBufferedByteLimit, + } + for _, test := range []struct { + options []LoggerOption + wantLogger *Logger + defaultResource bool + wantBundler *bundler.Bundler + }{ + { + options: nil, + wantLogger: &Logger{}, + defaultResource: true, + wantBundler: defaultBundler, + }, + { + options: []LoggerOption{ + CommonResource(nil), + CommonLabels(map[string]string{"a": "1"}), + }, + wantLogger: &Logger{ + commonResource: nil, + commonLabels: map[string]string{"a": "1"}, + }, + wantBundler: defaultBundler, + }, + { + options: []LoggerOption{CommonResource(customResource)}, + wantLogger: &Logger{commonResource: customResource}, + wantBundler: defaultBundler, + }, + { + options: []LoggerOption{ + DelayThreshold(time.Minute), + EntryCountThreshold(99), + EntryByteThreshold(17), + EntryByteLimit(18), + BufferedByteLimit(19), + }, + wantLogger: &Logger{}, + defaultResource: true, + wantBundler: &bundler.Bundler{ + DelayThreshold: time.Minute, + BundleCountThreshold: 99, + BundleByteThreshold: 17, + BundleByteLimit: 18, + BufferedByteLimit: 19, + }, + }, + } { + gotLogger := c.Logger(logID, test.options...) + if got, want := gotLogger.commonResource, test.wantLogger.commonResource; !test.defaultResource && !proto.Equal(got, want) { + t.Errorf("%v: resource: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.commonLabels, test.wantLogger.commonLabels; !testutil.Equal(got, want) { + t.Errorf("%v: commonLabels: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.DelayThreshold, test.wantBundler.DelayThreshold; got != want { + t.Errorf("%v: DelayThreshold: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BundleCountThreshold, test.wantBundler.BundleCountThreshold; got != want { + t.Errorf("%v: BundleCountThreshold: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BundleByteThreshold, test.wantBundler.BundleByteThreshold; got != want { + t.Errorf("%v: BundleByteThreshold: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BundleByteLimit, test.wantBundler.BundleByteLimit; got != want { + t.Errorf("%v: BundleByteLimit: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BufferedByteLimit, test.wantBundler.BufferedByteLimit; got != want { + t.Errorf("%v: BufferedByteLimit: got %v, want %v", test.options, got, want) + } + } +} + +func TestToProtoStruct(t *testing.T) { + v := struct { + Foo string `json:"foo"` + Bar int `json:"bar,omitempty"` + Baz []float64 `json:"baz"` + Moo map[string]interface{} `json:"moo"` + }{ + Foo: "foovalue", + Baz: []float64{1.1}, + Moo: map[string]interface{}{ + "a": 1, + "b": "two", + "c": true, + }, + } + + got, err := toProtoStruct(v) + if err != nil { + t.Fatal(err) + } + want := &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "foo": {Kind: &structpb.Value_StringValue{StringValue: v.Foo}}, + "baz": {Kind: &structpb.Value_ListValue{ListValue: &structpb.ListValue{Values: []*structpb.Value{ + {Kind: &structpb.Value_NumberValue{NumberValue: 1.1}}, + }}}}, + "moo": {Kind: &structpb.Value_StructValue{ + StructValue: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": {Kind: &structpb.Value_NumberValue{NumberValue: 1}}, + "b": {Kind: &structpb.Value_StringValue{StringValue: "two"}}, + "c": {Kind: &structpb.Value_BoolValue{BoolValue: true}}, + }, + }, + }}, + }, + } + if !proto.Equal(got, want) { + t.Errorf("got %+v\nwant %+v", got, want) + } + + // Non-structs should fail to convert. + for v := range []interface{}{3, "foo", []int{1, 2, 3}} { + _, err := toProtoStruct(v) + if err == nil { + t.Errorf("%v: got nil, want error", v) + } + } + + // Test fast path. + got, err = toProtoStruct(want) + if err != nil { + t.Fatal(err) + } + if got != want { + t.Error("got and want should be identical, but are not") + } +} + +func TestFromHTTPRequest(t *testing.T) { + const testURL = "http:://example.com/path?q=1" + u, err := url.Parse(testURL) + if err != nil { + t.Fatal(err) + } + req := &HTTPRequest{ + Request: &http.Request{ + Method: "GET", + URL: u, + Header: map[string][]string{ + "User-Agent": []string{"user-agent"}, + "Referer": []string{"referer"}, + }, + }, + RequestSize: 100, + Status: 200, + ResponseSize: 25, + Latency: 100 * time.Second, + LocalIP: "127.0.0.1", + RemoteIP: "10.0.1.1", + CacheHit: true, + CacheValidatedWithOriginServer: true, + } + got := fromHTTPRequest(req) + want := &logtypepb.HttpRequest{ + RequestMethod: "GET", + RequestUrl: testURL, + RequestSize: 100, + Status: 200, + ResponseSize: 25, + Latency: &durpb.Duration{Seconds: 100}, + UserAgent: "user-agent", + ServerIp: "127.0.0.1", + RemoteIp: "10.0.1.1", + Referer: "referer", + CacheHit: true, + CacheValidatedWithOriginServer: true, + } + if !proto.Equal(got, want) { + t.Errorf("got %+v\nwant %+v", got, want) + } +} + +// Used by the tests in logging_test. +func SetNow(f func() time.Time) { + now = f +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/vendor/cloud.google.com/go/longrunning/autogen/doc.go new file mode 100644 index 0000000..684d911 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/doc.go @@ -0,0 +1,45 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package longrunning is an auto-generated package for the +// Google Long Running Operations API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// +// Use the client at cloud.google.com/go/longrunning in preference to this. +package longrunning // import "cloud.google.com/go/longrunning/autogen" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{} +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go b/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go new file mode 100644 index 0000000..07fe43b --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go @@ -0,0 +1,34 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package longrunning + +import ( + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" +) + +// InternalFromConn is for use by the google Cloud Libraries only. +// +// InternalFromConn creates OperationsClient from available connection. +func InternalFromConn(conn *grpc.ClientConn) *OperationsClient { + c := &OperationsClient{ + conn: conn, + CallOptions: defaultOperationsCallOptions(), + + operationsClient: longrunningpb.NewOperationsClient(conn), + } + c.SetGoogleClientInfo() + return c +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/mock_test.go b/vendor/cloud.google.com/go/longrunning/autogen/mock_test.go new file mode 100644 index 0000000..ce7641c --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/mock_test.go @@ -0,0 +1,381 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package longrunning + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockOperationsServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + longrunningpb.OperationsServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockOperationsServer) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest) (*longrunningpb.ListOperationsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.ListOperationsResponse), nil +} + +func (s *mockOperationsServer) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockOperationsServer) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockOperationsServer) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockOperations mockOperationsServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + longrunningpb.RegisterOperationsServer(serv, &mockOperations) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestOperationsGetOperation(t *testing.T) { + var name2 string = "name2-1052831874" + var done bool = true + var expectedResponse = &longrunningpb.Operation{ + Name: name2, + Done: done, + } + + mockOperations.err = nil + mockOperations.reqs = nil + + mockOperations.resps = append(mockOperations.resps[:0], expectedResponse) + + var name string = "name3373707" + var request = &longrunningpb.GetOperationRequest{ + Name: name, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetOperation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOperations.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestOperationsGetOperationError(t *testing.T) { + errCode := codes.PermissionDenied + mockOperations.err = gstatus.Error(errCode, "test error") + + var name string = "name3373707" + var request = &longrunningpb.GetOperationRequest{ + Name: name, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetOperation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestOperationsListOperations(t *testing.T) { + var nextPageToken string = "" + var operationsElement *longrunningpb.Operation = &longrunningpb.Operation{} + var operations = []*longrunningpb.Operation{operationsElement} + var expectedResponse = &longrunningpb.ListOperationsResponse{ + NextPageToken: nextPageToken, + Operations: operations, + } + + mockOperations.err = nil + mockOperations.reqs = nil + + mockOperations.resps = append(mockOperations.resps[:0], expectedResponse) + + var name string = "name3373707" + var filter string = "filter-1274492040" + var request = &longrunningpb.ListOperationsRequest{ + Name: name, + Filter: filter, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListOperations(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOperations.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Operations[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestOperationsListOperationsError(t *testing.T) { + errCode := codes.PermissionDenied + mockOperations.err = gstatus.Error(errCode, "test error") + + var name string = "name3373707" + var filter string = "filter-1274492040" + var request = &longrunningpb.ListOperationsRequest{ + Name: name, + Filter: filter, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListOperations(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestOperationsCancelOperation(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockOperations.err = nil + mockOperations.reqs = nil + + mockOperations.resps = append(mockOperations.resps[:0], expectedResponse) + + var name string = "name3373707" + var request = &longrunningpb.CancelOperationRequest{ + Name: name, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelOperation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOperations.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestOperationsCancelOperationError(t *testing.T) { + errCode := codes.PermissionDenied + mockOperations.err = gstatus.Error(errCode, "test error") + + var name string = "name3373707" + var request = &longrunningpb.CancelOperationRequest{ + Name: name, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CancelOperation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestOperationsDeleteOperation(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockOperations.err = nil + mockOperations.reqs = nil + + mockOperations.resps = append(mockOperations.resps[:0], expectedResponse) + + var name string = "name3373707" + var request = &longrunningpb.DeleteOperationRequest{ + Name: name, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteOperation(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockOperations.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestOperationsDeleteOperationError(t *testing.T) { + errCode := codes.PermissionDenied + mockOperations.err = gstatus.Error(errCode, "test error") + + var name string = "name3373707" + var request = &longrunningpb.DeleteOperationRequest{ + Name: name, + } + + c, err := NewOperationsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteOperation(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go new file mode 100644 index 0000000..caa1b0b --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -0,0 +1,267 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package longrunning + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// OperationsCallOptions contains the retry settings for each method of OperationsClient. +type OperationsCallOptions struct { + GetOperation []gax.CallOption + ListOperations []gax.CallOption + CancelOperation []gax.CallOption + DeleteOperation []gax.CallOption +} + +func defaultOperationsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("longrunning.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultOperationsCallOptions() *OperationsCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &OperationsCallOptions{ + GetOperation: retry[[2]string{"default", "idempotent"}], + ListOperations: retry[[2]string{"default", "idempotent"}], + CancelOperation: retry[[2]string{"default", "idempotent"}], + DeleteOperation: retry[[2]string{"default", "idempotent"}], + } +} + +// OperationsClient is a client for interacting with Google Long Running Operations API. +type OperationsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + operationsClient longrunningpb.OperationsClient + + // The call options for this service. + CallOptions *OperationsCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewOperationsClient creates a new operations client. +// +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return [Operation][google.longrunning.Operation] to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or pass the operation resource to another API (such as +// Google Cloud Pub/Sub API) to receive the response. Any API service that +// returns long-running operations should implement the Operations interface +// so developers can have a consistent client experience. +func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultOperationsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &OperationsClient{ + conn: conn, + CallOptions: defaultOperationsCallOptions(), + + operationsClient: longrunningpb.NewOperationsClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *OperationsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *OperationsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *OperationsClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// GetOperation gets the latest state of a long-running operation. Clients can use this +// method to poll the operation result at intervals as recommended by the API +// service. +func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListOperations lists operations that match the specified filter in the request. If the +// server doesn't support this method, it returns UNIMPLEMENTED. +// +// NOTE: the name binding below allows API services to override the binding +// to use different resource name schemes, such as users/*/operations. +func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...) + it := &OperationIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) { + var resp *longrunningpb.ListOperationsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.ListOperations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Operations, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CancelOperation starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn't support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. Clients can use +// [Operations.GetOperation][google.longrunning.Operations.GetOperation] or +// other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, it becomes an operation with +// an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, +// corresponding to Code.CANCELLED. +func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.operationsClient.CancelOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// DeleteOperation deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn't support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteOperation[0:len(c.CallOptions.DeleteOperation):len(c.CallOptions.DeleteOperation)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.operationsClient.DeleteOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// OperationIterator manages a stream of *longrunningpb.Operation. +type OperationIterator struct { + items []*longrunningpb.Operation + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*longrunningpb.Operation, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *OperationIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *OperationIterator) Next() (*longrunningpb.Operation, error) { + var item *longrunningpb.Operation + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *OperationIterator) bufLen() int { + return len(it.items) +} + +func (it *OperationIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client_example_test.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client_example_test.go new file mode 100644 index 0000000..f3180d5 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client_example_test.go @@ -0,0 +1,108 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package longrunning_test + +import ( + "cloud.google.com/go/longrunning/autogen" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +func ExampleNewOperationsClient() { + ctx := context.Background() + c, err := longrunning.NewOperationsClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleOperationsClient_GetOperation() { + ctx := context.Background() + c, err := longrunning.NewOperationsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &longrunningpb.GetOperationRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetOperation(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleOperationsClient_ListOperations() { + ctx := context.Background() + c, err := longrunning.NewOperationsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &longrunningpb.ListOperationsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListOperations(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleOperationsClient_CancelOperation() { + ctx := context.Background() + c, err := longrunning.NewOperationsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &longrunningpb.CancelOperationRequest{ + // TODO: Fill request struct fields. + } + err = c.CancelOperation(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleOperationsClient_DeleteOperation() { + ctx := context.Background() + c, err := longrunning.NewOperationsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &longrunningpb.DeleteOperationRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteOperation(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/longrunning/example_test.go b/vendor/cloud.google.com/go/longrunning/example_test.go new file mode 100644 index 0000000..c7b52ac --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/example_test.go @@ -0,0 +1,116 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package longrunning + +import ( + "fmt" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/longrunning" +) + +func bestMomentInHistory() (*Operation, error) { + t, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", "2009-11-10 23:00:00 +0000 UTC") + if err != nil { + return nil, err + } + resp, err := ptypes.TimestampProto(t) + if err != nil { + return nil, err + } + respAny, err := ptypes.MarshalAny(resp) + if err != nil { + return nil, err + } + metaAny, err := ptypes.MarshalAny(ptypes.DurationProto(1 * time.Hour)) + return &Operation{ + proto: &pb.Operation{ + Name: "best-moment", + Done: true, + Metadata: metaAny, + Result: &pb.Operation_Response{ + Response: respAny, + }, + }, + }, err +} + +func ExampleOperation_Wait() { + // Complex computation, might take a long time. + op, err := bestMomentInHistory() + if err != nil { + // TODO: Handle err. + } + var ts timestamp.Timestamp + err = op.Wait(context.TODO(), &ts) + if err != nil && !op.Done() { + fmt.Println("failed to fetch operation status", err) + } else if err != nil && op.Done() { + fmt.Println("operation completed with error", err) + } else { + fmt.Println(ptypes.TimestampString(&ts)) + } + // Output: + // 2009-11-10T23:00:00Z +} + +func ExampleOperation_Metadata() { + op, err := bestMomentInHistory() + if err != nil { + // TODO: Handle err. + } + + // The operation might contain metadata. + // In this example, the metadata contains the estimated length of time + // the operation might take to complete. + var meta duration.Duration + if err := op.Metadata(&meta); err != nil { + // TODO: Handle err. + } + d, err := ptypes.Duration(&meta) + if err == ErrNoMetadata { + fmt.Println("no metadata") + } else if err != nil { + // TODO: Handle err. + } else { + fmt.Println(d) + } + // Output: + // 1h0m0s +} + +func ExampleOperation_Cancel() { + op, err := bestMomentInHistory() + if err != nil { + // TODO: Handle err. + } + if err := op.Cancel(context.Background()); err != nil { + // TODO: Handle err. + } +} + +func ExampleOperation_Delete() { + op, err := bestMomentInHistory() + if err != nil { + // TODO: Handle err. + } + if err := op.Delete(context.Background()); err != nil { + // TODO: Handle err. + } +} diff --git a/vendor/cloud.google.com/go/longrunning/longrunning.go b/vendor/cloud.google.com/go/longrunning/longrunning.go new file mode 100644 index 0000000..3054d23 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/longrunning.go @@ -0,0 +1,181 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package longrunning supports Long Running Operations for the Google Cloud Libraries. +// See google.golang.org/genproto/googleapis/longrunning for its service definition. +// +// Users of the Google Cloud Libraries will typically not use this package directly. +// Instead they will call functions returning Operations and call their methods. +// +// This package is still experimental and subject to change. +package longrunning // import "cloud.google.com/go/longrunning" + +import ( + "errors" + "fmt" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" + + autogen "cloud.google.com/go/longrunning/autogen" + pb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// ErrNoMetadata is the error returned by Metadata if the operation contains no metadata. +var ErrNoMetadata = errors.New("operation contains no metadata") + +// Operation represents the result of an API call that may not be ready yet. +type Operation struct { + c operationsClient + proto *pb.Operation +} + +type operationsClient interface { + GetOperation(context.Context, *pb.GetOperationRequest, ...gax.CallOption) (*pb.Operation, error) + CancelOperation(context.Context, *pb.CancelOperationRequest, ...gax.CallOption) error + DeleteOperation(context.Context, *pb.DeleteOperationRequest, ...gax.CallOption) error +} + +// InternalNewOperation is for use by the google Cloud Libraries only. +// +// InternalNewOperation returns an long-running operation, abstracting the raw pb.Operation. +// The conn parameter refers to a server that proto was received from. +func InternalNewOperation(inner *autogen.OperationsClient, proto *pb.Operation) *Operation { + return &Operation{ + c: inner, + proto: proto, + } +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service +// from which the operation is created. +func (op *Operation) Name() string { + return op.proto.Name +} + +// Done reports whether the long-running operation has completed. +func (op *Operation) Done() bool { + return op.proto.Done +} + +// Metadata unmarshals op's metadata into meta. +// If op does not contain any metadata, Metadata returns ErrNoMetadata and meta is unmodified. +func (op *Operation) Metadata(meta proto.Message) error { + if m := op.proto.Metadata; m != nil { + return ptypes.UnmarshalAny(m, meta) + } + return ErrNoMetadata +} + +// Poll fetches the latest state of a long-running operation. +// +// If Poll fails, the error is returned and op is unmodified. +// If Poll succeeds and the operation has completed with failure, +// the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true; if resp != nil, the response of the operation +// is stored in resp. +func (op *Operation) Poll(ctx context.Context, resp proto.Message, opts ...gax.CallOption) error { + if !op.Done() { + p, err := op.c.GetOperation(ctx, &pb.GetOperationRequest{Name: op.Name()}, opts...) + if err != nil { + return err + } + op.proto = p + } + if !op.Done() { + return nil + } + + switch r := op.proto.Result.(type) { + case *pb.Operation_Error: + // TODO (pongad): r.Details may contain further information + return grpc.Errorf(codes.Code(r.Error.Code), "%s", r.Error.Message) + case *pb.Operation_Response: + if resp == nil { + return nil + } + return ptypes.UnmarshalAny(r.Response, resp) + default: + return fmt.Errorf("unsupported result type %[1]T: %[1]v", r) + } +} + +// DefaultWaitInterval is the polling interval used by Operation.Wait. +const DefaultWaitInterval = 60 * time.Second + +// Wait is equivalent to WaitWithInterval using DefaultWaitInterval. +func (op *Operation) Wait(ctx context.Context, resp proto.Message, opts ...gax.CallOption) error { + return op.WaitWithInterval(ctx, resp, DefaultWaitInterval, opts...) +} + +// WaitWithInterval blocks until the operation is completed. +// If resp != nil, Wait stores the response in resp. +// WaitWithInterval polls every interval, except initially +// when it polls using exponential backoff. +// +// See documentation of Poll for error-handling information. +func (op *Operation) WaitWithInterval(ctx context.Context, resp proto.Message, interval time.Duration, opts ...gax.CallOption) error { + bo := gax.Backoff{ + Initial: 1 * time.Second, + Max: interval, + } + if bo.Max < bo.Initial { + bo.Max = bo.Initial + } + return op.wait(ctx, resp, &bo, gax.Sleep, opts...) +} + +type sleeper func(context.Context, time.Duration) error + +// wait implements Wait, taking exponentialBackoff and sleeper arguments for testing. +func (op *Operation) wait(ctx context.Context, resp proto.Message, bo *gax.Backoff, sl sleeper, opts ...gax.CallOption) error { + for { + if err := op.Poll(ctx, resp, opts...); err != nil { + return err + } + if op.Done() { + return nil + } + if err := sl(ctx, bo.Pause()); err != nil { + return err + } + } +} + +// Cancel starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn't support this method, it returns +// grpc.Code(error) == codes.Unimplemented. Clients can use +// Poll or other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, op.Poll returns an error +// with code Canceled. +func (op *Operation) Cancel(ctx context.Context, opts ...gax.CallOption) error { + return op.c.CancelOperation(ctx, &pb.CancelOperationRequest{Name: op.Name()}, opts...) +} + +// Delete deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn't support this method, grpc.Code(error) == codes.Unimplemented. +func (op *Operation) Delete(ctx context.Context, opts ...gax.CallOption) error { + return op.c.DeleteOperation(ctx, &pb.DeleteOperationRequest{Name: op.Name()}, opts...) +} diff --git a/vendor/cloud.google.com/go/longrunning/longrunning_test.go b/vendor/cloud.google.com/go/longrunning/longrunning_test.go new file mode 100644 index 0000000..3116c3d --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/longrunning_test.go @@ -0,0 +1,215 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package lro supports Long Running Operations for the Google Cloud Libraries. +// +// This package is still experimental and subject to change. +package longrunning + +import ( + "errors" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/duration" + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" + + pb "google.golang.org/genproto/googleapis/longrunning" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +type getterService struct { + operationsClient + + // clock represents the fake current time of the service. + // It is the running sum of the of the duration we have slept. + clock time.Duration + + // getTimes records the the times at which GetOperation is called. + getTimes []time.Duration + + // results are the fake results that GetOperation should return. + results []*pb.Operation +} + +func (s *getterService) GetOperation(context.Context, *pb.GetOperationRequest, ...gax.CallOption) (*pb.Operation, error) { + i := len(s.getTimes) + s.getTimes = append(s.getTimes, s.clock) + if i >= len(s.results) { + return nil, errors.New("unexpected call") + } + return s.results[i], nil +} + +func (s *getterService) sleeper() sleeper { + return func(_ context.Context, d time.Duration) error { + s.clock += d + return nil + } +} + +func TestWait(t *testing.T) { + responseDur := ptypes.DurationProto(42 * time.Second) + responseAny, err := ptypes.MarshalAny(responseDur) + if err != nil { + t.Fatal(err) + } + + s := &getterService{ + results: []*pb.Operation{ + {Name: "foo"}, + {Name: "foo"}, + {Name: "foo"}, + {Name: "foo"}, + {Name: "foo"}, + { + Name: "foo", + Done: true, + Result: &pb.Operation_Response{ + Response: responseAny, + }, + }, + }, + } + op := &Operation{ + c: s, + proto: &pb.Operation{Name: "foo"}, + } + if op.Done() { + t.Fatal("operation should not have completed yet") + } + + var resp duration.Duration + bo := gax.Backoff{ + Initial: 1 * time.Second, + Max: 3 * time.Second, + } + if err := op.wait(context.Background(), &resp, &bo, s.sleeper()); err != nil { + t.Fatal(err) + } + if !proto.Equal(&resp, responseDur) { + t.Errorf("response, got %v, want %v", resp, responseDur) + } + if !op.Done() { + t.Errorf("operation should have completed") + } + + maxWait := []time.Duration{ + 1 * time.Second, + 2 * time.Second, + 3 * time.Second, + 3 * time.Second, + 3 * time.Second, + } + for i := 0; i < len(s.getTimes)-1; i++ { + w := s.getTimes[i+1] - s.getTimes[i] + if mw := maxWait[i]; w > mw { + t.Errorf("backoff, waited %s, max %s", w, mw) + } + } +} + +func TestPollRequestError(t *testing.T) { + const opName = "foo" + + // All calls error. + s := &getterService{} + op := &Operation{ + c: s, + proto: &pb.Operation{Name: opName}, + } + if err := op.Poll(context.Background(), nil); err == nil { + t.Fatalf("Poll should error") + } + if n := op.Name(); n != opName { + t.Errorf("operation name, got %q, want %q", n, opName) + } + if op.Done() { + t.Errorf("operation should not have completed; we failed to fetch state") + } +} + +func TestPollErrorResult(t *testing.T) { + const ( + errCode = codes.NotFound + errMsg = "my error" + ) + op := &Operation{ + proto: &pb.Operation{ + Name: "foo", + Done: true, + Result: &pb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: errMsg, + }, + }, + }, + } + err := op.Poll(context.Background(), nil) + if got := grpc.Code(err); got != errCode { + t.Errorf("error code, want %s, got %s", errCode, got) + } + if got := grpc.ErrorDesc(err); got != errMsg { + t.Errorf("error code, want %s, got %s", errMsg, got) + } + if !op.Done() { + t.Errorf("operation should have completed") + } +} + +type errService struct { + operationsClient + errCancel, errDelete error +} + +func (s *errService) CancelOperation(context.Context, *pb.CancelOperationRequest, ...gax.CallOption) error { + return s.errCancel +} + +func (s *errService) DeleteOperation(context.Context, *pb.DeleteOperationRequest, ...gax.CallOption) error { + return s.errDelete +} + +func TestCancelReturnsError(t *testing.T) { + s := &errService{ + errCancel: errors.New("cancel error"), + } + op := &Operation{ + c: s, + proto: &pb.Operation{Name: "foo"}, + } + if got, want := op.Cancel(context.Background()), s.errCancel; got != want { + t.Errorf("cancel, got error %s, want %s", got, want) + } +} + +func TestDeleteReturnsError(t *testing.T) { + s := &errService{ + errDelete: errors.New("delete error"), + } + op := &Operation{ + c: s, + proto: &pb.Operation{Name: "foo"}, + } + if got, want := op.Delete(context.Background()), s.errDelete; got != want { + t.Errorf("cancel, got error %s, want %s", got, want) + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/ListMonitoredResourceDescriptors_smoke_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/ListMonitoredResourceDescriptors_smoke_test.go new file mode 100644 index 0000000..7e8b55a --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/ListMonitoredResourceDescriptors_smoke_test.go @@ -0,0 +1,65 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +import ( + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestMetricServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewMetricClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var formattedName string = MetricProjectPath(projectId) + var request = &monitoringpb.ListMonitoredResourceDescriptorsRequest{ + Name: formattedName, + } + + iter := c.ListMonitoredResourceDescriptors(ctx, request) + if _, err := iter.Next(); err != nil && err != iterator.Done { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go new file mode 100644 index 0000000..1cdb03a --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go @@ -0,0 +1,51 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package monitoring is an auto-generated package for the +// Stackdriver Monitoring API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Manages your Stackdriver Monitoring data and configurations. Most projects +// must be associated with a Stackdriver account, with a few exceptions as +// noted on the individual method pages. +package monitoring // import "cloud.google.com/go/monitoring/apiv3" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go new file mode 100644 index 0000000..7b21d7f --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go @@ -0,0 +1,373 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// GroupCallOptions contains the retry settings for each method of GroupClient. +type GroupCallOptions struct { + ListGroups []gax.CallOption + GetGroup []gax.CallOption + CreateGroup []gax.CallOption + UpdateGroup []gax.CallOption + DeleteGroup []gax.CallOption + ListGroupMembers []gax.CallOption +} + +func defaultGroupClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultGroupCallOptions() *GroupCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &GroupCallOptions{ + ListGroups: retry[[2]string{"default", "idempotent"}], + GetGroup: retry[[2]string{"default", "idempotent"}], + CreateGroup: retry[[2]string{"default", "non_idempotent"}], + UpdateGroup: retry[[2]string{"default", "idempotent"}], + DeleteGroup: retry[[2]string{"default", "idempotent"}], + ListGroupMembers: retry[[2]string{"default", "idempotent"}], + } +} + +// GroupClient is a client for interacting with Stackdriver Monitoring API. +type GroupClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + groupClient monitoringpb.GroupServiceClient + + // The call options for this service. + CallOptions *GroupCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewGroupClient creates a new group service client. +// +// The Group API lets you inspect and manage your +// groups (at google.monitoring.v3.Group). +// +// A group is a named filter that is used to identify +// a collection of monitored resources. Groups are typically used to +// mirror the physical and/or logical topology of the environment. +// Because group membership is computed dynamically, monitored +// resources that are started in the future are automatically placed +// in matching groups. By using a group to name monitored resources in, +// for example, an alert policy, the target of that alert policy is +// updated automatically as monitored resources are added and removed +// from the infrastructure. +func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultGroupClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &GroupClient{ + conn: conn, + CallOptions: defaultGroupCallOptions(), + + groupClient: monitoringpb.NewGroupServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *GroupClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *GroupClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *GroupClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// GroupProjectPath returns the path for the project resource. +func GroupProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// GroupGroupPath returns the path for the group resource. +func GroupGroupPath(project, group string) string { + return "" + + "projects/" + + project + + "/groups/" + + group + + "" +} + +// ListGroups lists the existing groups. +func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListGroups[0:len(c.CallOptions.ListGroups):len(c.CallOptions.ListGroups)], opts...) + it := &GroupIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { + var resp *monitoringpb.ListGroupsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Group, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetGroup gets a single group. +func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateGroup creates a new group. +func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateGroup[0:len(c.CallOptions.CreateGroup):len(c.CallOptions.CreateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateGroup updates an existing group. +// You can change any group attributes except name. +func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteGroup deletes an existing group. +func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteGroup[0:len(c.CallOptions.DeleteGroup):len(c.CallOptions.DeleteGroup)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListGroupMembers lists the monitored resources that are members of a group. +func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListGroupMembers[0:len(c.CallOptions.ListGroupMembers):len(c.CallOptions.ListGroupMembers)], opts...) + it := &MonitoredResourceIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { + var resp *monitoringpb.ListGroupMembersResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Members, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GroupIterator manages a stream of *monitoringpb.Group. +type GroupIterator struct { + items []*monitoringpb.Group + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *GroupIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *GroupIterator) Next() (*monitoringpb.Group, error) { + var item *monitoringpb.Group + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *GroupIterator) bufLen() int { + return len(it.items) +} + +func (it *GroupIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource. +type MonitoredResourceIterator struct { + items []*monitoredrespb.MonitoredResource + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) { + var item *monitoredrespb.MonitoredResource + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go new file mode 100644 index 0000000..b656258 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go @@ -0,0 +1,152 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring_test + +import ( + "cloud.google.com/go/monitoring/apiv3" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +func ExampleNewGroupClient() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleGroupClient_ListGroups() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListGroupsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListGroups(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleGroupClient_GetGroup() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleGroupClient_CreateGroup() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleGroupClient_UpdateGroup() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.UpdateGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleGroupClient_DeleteGroup() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.DeleteGroupRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleGroupClient_ListGroupMembers() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListGroupMembersRequest{ + // TODO: Fill request struct fields. + } + it := c.ListGroupMembers(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go new file mode 100644 index 0000000..433318f --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go @@ -0,0 +1,472 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// MetricCallOptions contains the retry settings for each method of MetricClient. +type MetricCallOptions struct { + ListMonitoredResourceDescriptors []gax.CallOption + GetMonitoredResourceDescriptor []gax.CallOption + ListMetricDescriptors []gax.CallOption + GetMetricDescriptor []gax.CallOption + CreateMetricDescriptor []gax.CallOption + DeleteMetricDescriptor []gax.CallOption + ListTimeSeries []gax.CallOption + CreateTimeSeries []gax.CallOption +} + +func defaultMetricClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultMetricCallOptions() *MetricCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &MetricCallOptions{ + ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], + GetMonitoredResourceDescriptor: retry[[2]string{"default", "idempotent"}], + ListMetricDescriptors: retry[[2]string{"default", "idempotent"}], + GetMetricDescriptor: retry[[2]string{"default", "idempotent"}], + CreateMetricDescriptor: retry[[2]string{"default", "non_idempotent"}], + DeleteMetricDescriptor: retry[[2]string{"default", "idempotent"}], + ListTimeSeries: retry[[2]string{"default", "idempotent"}], + CreateTimeSeries: retry[[2]string{"default", "non_idempotent"}], + } +} + +// MetricClient is a client for interacting with Stackdriver Monitoring API. +type MetricClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + metricClient monitoringpb.MetricServiceClient + + // The call options for this service. + CallOptions *MetricCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewMetricClient creates a new metric service client. +// +// Manages metric descriptors, monitored resource descriptors, and +// time series data. +func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultMetricClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &MetricClient{ + conn: conn, + CallOptions: defaultMetricCallOptions(), + + metricClient: monitoringpb.NewMetricServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *MetricClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// MetricProjectPath returns the path for the project resource. +func MetricProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// MetricMetricDescriptorPath returns the path for the metric descriptor resource. +func MetricMetricDescriptorPath(project, metricDescriptor string) string { + return "" + + "projects/" + + project + + "/metricDescriptors/" + + metricDescriptor + + "" +} + +// MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource. +func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string { + return "" + + "projects/" + + project + + "/monitoredResourceDescriptors/" + + monitoredResourceDescriptor + + "" +} + +// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) + it := &MonitoredResourceDescriptorIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + var resp *monitoringpb.ListMonitoredResourceDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.ResourceDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a Stackdriver account. +func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetMonitoredResourceDescriptor[0:len(c.CallOptions.GetMonitoredResourceDescriptor):len(c.CallOptions.GetMonitoredResourceDescriptor)], opts...) + var resp *monitoredrespb.MonitoredResourceDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListMetricDescriptors lists metric descriptors that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListMetricDescriptors[0:len(c.CallOptions.ListMetricDescriptors):len(c.CallOptions.ListMetricDescriptors)], opts...) + it := &MetricDescriptorIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { + var resp *monitoringpb.ListMetricDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.MetricDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetMetricDescriptor gets a single metric descriptor. This method does not require a Stackdriver account. +func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetMetricDescriptor[0:len(c.CallOptions.GetMetricDescriptor):len(c.CallOptions.GetMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateMetricDescriptor creates a new metric descriptor. +// User-created metric descriptors define +// custom metrics (at /monitoring/custom-metrics). +func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteMetricDescriptor deletes a metric descriptor. Only user-created +// custom metrics (at /monitoring/custom-metrics) can be deleted. +func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListTimeSeries lists time series that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTimeSeries[0:len(c.CallOptions.ListTimeSeries):len(c.CallOptions.ListTimeSeries)], opts...) + it := &TimeSeriesIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { + var resp *monitoringpb.ListTimeSeriesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.TimeSeries, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateTimeSeries creates or adds data to one or more time series. +// The response is empty if all time series in the request were written. +// If any time series could not be written, a corresponding failure message is +// included in the error response. +func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateTimeSeries[0:len(c.CallOptions.CreateTimeSeries):len(c.CallOptions.CreateTimeSeries)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. +type MetricDescriptorIterator struct { + items []*metricpb.MetricDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { + var item *metricpb.MetricDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MetricDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries. +type TimeSeriesIterator struct { + items []*monitoringpb.TimeSeries + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { + var item *monitoringpb.TimeSeries + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TimeSeriesIterator) bufLen() int { + return len(it.items) +} + +func (it *TimeSeriesIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go new file mode 100644 index 0000000..188ba8a --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go @@ -0,0 +1,192 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring_test + +import ( + "cloud.google.com/go/monitoring/apiv3" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +func ExampleNewMetricClient() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleMetricClient_ListMonitoredResourceDescriptors() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListMonitoredResourceDescriptorsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListMonitoredResourceDescriptors(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricClient_GetMonitoredResourceDescriptor() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetMonitoredResourceDescriptorRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetMonitoredResourceDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricClient_ListMetricDescriptors() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListMetricDescriptorsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListMetricDescriptors(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricClient_GetMetricDescriptor() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetMetricDescriptorRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetMetricDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricClient_CreateMetricDescriptor() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateMetricDescriptorRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateMetricDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricClient_DeleteMetricDescriptor() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.DeleteMetricDescriptorRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteMetricDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleMetricClient_ListTimeSeries() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListTimeSeriesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTimeSeries(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricClient_CreateTimeSeries() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateTimeSeriesRequest{ + // TODO: Fill request struct fields. + } + err = c.CreateTimeSeries(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go new file mode 100644 index 0000000..2b1058e --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go @@ -0,0 +1,1221 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockGroupServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.GroupServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockGroupServer) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest) (*monitoringpb.ListGroupsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListGroupsResponse), nil +} + +func (s *mockGroupServer) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest) (*monitoringpb.Group, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.Group), nil +} + +func (s *mockGroupServer) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest) (*monitoringpb.Group, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.Group), nil +} + +func (s *mockGroupServer) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest) (*monitoringpb.Group, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.Group), nil +} + +func (s *mockGroupServer) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockGroupServer) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest) (*monitoringpb.ListGroupMembersResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListGroupMembersResponse), nil +} + +type mockMetricServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.MetricServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockMetricServer) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest) (*monitoringpb.ListMonitoredResourceDescriptorsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListMonitoredResourceDescriptorsResponse), nil +} + +func (s *mockMetricServer) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest) (*monitoredrespb.MonitoredResourceDescriptor, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoredrespb.MonitoredResourceDescriptor), nil +} + +func (s *mockMetricServer) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (*monitoringpb.ListMetricDescriptorsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListMetricDescriptorsResponse), nil +} + +func (s *mockMetricServer) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*metricpb.MetricDescriptor), nil +} + +func (s *mockMetricServer) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*metricpb.MetricDescriptor), nil +} + +func (s *mockMetricServer) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockMetricServer) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (*monitoringpb.ListTimeSeriesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListTimeSeriesResponse), nil +} + +func (s *mockMetricServer) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockGroup mockGroupServer + mockMetric mockMetricServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + monitoringpb.RegisterGroupServiceServer(serv, &mockGroup) + monitoringpb.RegisterMetricServiceServer(serv, &mockMetric) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestGroupServiceListGroups(t *testing.T) { + var nextPageToken string = "" + var groupElement *monitoringpb.Group = &monitoringpb.Group{} + var group = []*monitoringpb.Group{groupElement} + var expectedResponse = &monitoringpb.ListGroupsResponse{ + NextPageToken: nextPageToken, + Group: group, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = GroupProjectPath("[PROJECT]") + var request = &monitoringpb.ListGroupsRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroups(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Group[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceListGroupsError(t *testing.T) { + errCode := codes.PermissionDenied + mockGroup.err = gstatus.Error(errCode, "test error") + + var formattedName string = GroupProjectPath("[PROJECT]") + var request = &monitoringpb.ListGroupsRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroups(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceGetGroup(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var parentName string = "parentName1015022848" + var filter string = "filter-1274492040" + var isCluster bool = false + var expectedResponse = &monitoringpb.Group{ + Name: name2, + DisplayName: displayName, + ParentName: parentName, + Filter: filter, + IsCluster: isCluster, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") + var request = &monitoringpb.GetGroupRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceGetGroupError(t *testing.T) { + errCode := codes.PermissionDenied + mockGroup.err = gstatus.Error(errCode, "test error") + + var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") + var request = &monitoringpb.GetGroupRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetGroup(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceCreateGroup(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var parentName string = "parentName1015022848" + var filter string = "filter-1274492040" + var isCluster bool = false + var expectedResponse = &monitoringpb.Group{ + Name: name2, + DisplayName: displayName, + ParentName: parentName, + Filter: filter, + IsCluster: isCluster, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = GroupProjectPath("[PROJECT]") + var group *monitoringpb.Group = &monitoringpb.Group{} + var request = &monitoringpb.CreateGroupRequest{ + Name: formattedName, + Group: group, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceCreateGroupError(t *testing.T) { + errCode := codes.PermissionDenied + mockGroup.err = gstatus.Error(errCode, "test error") + + var formattedName string = GroupProjectPath("[PROJECT]") + var group *monitoringpb.Group = &monitoringpb.Group{} + var request = &monitoringpb.CreateGroupRequest{ + Name: formattedName, + Group: group, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateGroup(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceUpdateGroup(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var parentName string = "parentName1015022848" + var filter string = "filter-1274492040" + var isCluster bool = false + var expectedResponse = &monitoringpb.Group{ + Name: name, + DisplayName: displayName, + ParentName: parentName, + Filter: filter, + IsCluster: isCluster, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var group *monitoringpb.Group = &monitoringpb.Group{} + var request = &monitoringpb.UpdateGroupRequest{ + Group: group, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceUpdateGroupError(t *testing.T) { + errCode := codes.PermissionDenied + mockGroup.err = gstatus.Error(errCode, "test error") + + var group *monitoringpb.Group = &monitoringpb.Group{} + var request = &monitoringpb.UpdateGroupRequest{ + Group: group, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateGroup(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceDeleteGroup(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") + var request = &monitoringpb.DeleteGroupRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestGroupServiceDeleteGroupError(t *testing.T) { + errCode := codes.PermissionDenied + mockGroup.err = gstatus.Error(errCode, "test error") + + var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") + var request = &monitoringpb.DeleteGroupRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteGroup(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestGroupServiceListGroupMembers(t *testing.T) { + var nextPageToken string = "" + var totalSize int32 = -705419236 + var membersElement *monitoredrespb.MonitoredResource = &monitoredrespb.MonitoredResource{} + var members = []*monitoredrespb.MonitoredResource{membersElement} + var expectedResponse = &monitoringpb.ListGroupMembersResponse{ + NextPageToken: nextPageToken, + TotalSize: totalSize, + Members: members, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") + var request = &monitoringpb.ListGroupMembersRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroupMembers(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Members[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceListGroupMembersError(t *testing.T) { + errCode := codes.PermissionDenied + mockGroup.err = gstatus.Error(errCode, "test error") + + var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") + var request = &monitoringpb.ListGroupMembersRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroupMembers(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceListMonitoredResourceDescriptors(t *testing.T) { + var nextPageToken string = "" + var resourceDescriptorsElement *monitoredrespb.MonitoredResourceDescriptor = &monitoredrespb.MonitoredResourceDescriptor{} + var resourceDescriptors = []*monitoredrespb.MonitoredResourceDescriptor{resourceDescriptorsElement} + var expectedResponse = &monitoringpb.ListMonitoredResourceDescriptorsResponse{ + NextPageToken: nextPageToken, + ResourceDescriptors: resourceDescriptors, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricProjectPath("[PROJECT]") + var request = &monitoringpb.ListMonitoredResourceDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ResourceDescriptors[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceListMonitoredResourceDescriptorsError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = MetricProjectPath("[PROJECT]") + var request = &monitoringpb.ListMonitoredResourceDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceGetMonitoredResourceDescriptor(t *testing.T) { + var name2 string = "name2-1052831874" + var type_ string = "type3575610" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &monitoredrespb.MonitoredResourceDescriptor{ + Name: name2, + Type: type_, + DisplayName: displayName, + Description: description, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricMonitoredResourceDescriptorPath("[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]") + var request = &monitoringpb.GetMonitoredResourceDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetMonitoredResourceDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceGetMonitoredResourceDescriptorError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = MetricMonitoredResourceDescriptorPath("[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]") + var request = &monitoringpb.GetMonitoredResourceDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetMonitoredResourceDescriptor(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceListMetricDescriptors(t *testing.T) { + var nextPageToken string = "" + var metricDescriptorsElement *metricpb.MetricDescriptor = &metricpb.MetricDescriptor{} + var metricDescriptors = []*metricpb.MetricDescriptor{metricDescriptorsElement} + var expectedResponse = &monitoringpb.ListMetricDescriptorsResponse{ + NextPageToken: nextPageToken, + MetricDescriptors: metricDescriptors, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricProjectPath("[PROJECT]") + var request = &monitoringpb.ListMetricDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMetricDescriptors(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.MetricDescriptors[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceListMetricDescriptorsError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = MetricProjectPath("[PROJECT]") + var request = &monitoringpb.ListMetricDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMetricDescriptors(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceGetMetricDescriptor(t *testing.T) { + var name2 string = "name2-1052831874" + var type_ string = "type3575610" + var unit string = "unit3594628" + var description string = "description-1724546052" + var displayName string = "displayName1615086568" + var expectedResponse = &metricpb.MetricDescriptor{ + Name: name2, + Type: type_, + Unit: unit, + Description: description, + DisplayName: displayName, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricMetricDescriptorPath("[PROJECT]", "[METRIC_DESCRIPTOR]") + var request = &monitoringpb.GetMetricDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetMetricDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceGetMetricDescriptorError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = MetricMetricDescriptorPath("[PROJECT]", "[METRIC_DESCRIPTOR]") + var request = &monitoringpb.GetMetricDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetMetricDescriptor(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceCreateMetricDescriptor(t *testing.T) { + var name2 string = "name2-1052831874" + var type_ string = "type3575610" + var unit string = "unit3594628" + var description string = "description-1724546052" + var displayName string = "displayName1615086568" + var expectedResponse = &metricpb.MetricDescriptor{ + Name: name2, + Type: type_, + Unit: unit, + Description: description, + DisplayName: displayName, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricProjectPath("[PROJECT]") + var metricDescriptor *metricpb.MetricDescriptor = &metricpb.MetricDescriptor{} + var request = &monitoringpb.CreateMetricDescriptorRequest{ + Name: formattedName, + MetricDescriptor: metricDescriptor, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateMetricDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceCreateMetricDescriptorError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = MetricProjectPath("[PROJECT]") + var metricDescriptor *metricpb.MetricDescriptor = &metricpb.MetricDescriptor{} + var request = &monitoringpb.CreateMetricDescriptorRequest{ + Name: formattedName, + MetricDescriptor: metricDescriptor, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateMetricDescriptor(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceDeleteMetricDescriptor(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricMetricDescriptorPath("[PROJECT]", "[METRIC_DESCRIPTOR]") + var request = &monitoringpb.DeleteMetricDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteMetricDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestMetricServiceDeleteMetricDescriptorError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = MetricMetricDescriptorPath("[PROJECT]", "[METRIC_DESCRIPTOR]") + var request = &monitoringpb.DeleteMetricDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteMetricDescriptor(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestMetricServiceListTimeSeries(t *testing.T) { + var nextPageToken string = "" + var timeSeriesElement *monitoringpb.TimeSeries = &monitoringpb.TimeSeries{} + var timeSeries = []*monitoringpb.TimeSeries{timeSeriesElement} + var expectedResponse = &monitoringpb.ListTimeSeriesResponse{ + NextPageToken: nextPageToken, + TimeSeries: timeSeries, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricProjectPath("[PROJECT]") + var filter string = "filter-1274492040" + var interval *monitoringpb.TimeInterval = &monitoringpb.TimeInterval{} + var view monitoringpb.ListTimeSeriesRequest_TimeSeriesView = monitoringpb.ListTimeSeriesRequest_FULL + var request = &monitoringpb.ListTimeSeriesRequest{ + Name: formattedName, + Filter: filter, + Interval: interval, + View: view, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTimeSeries(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.TimeSeries[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceListTimeSeriesError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = MetricProjectPath("[PROJECT]") + var filter string = "filter-1274492040" + var interval *monitoringpb.TimeInterval = &monitoringpb.TimeInterval{} + var view monitoringpb.ListTimeSeriesRequest_TimeSeriesView = monitoringpb.ListTimeSeriesRequest_FULL + var request = &monitoringpb.ListTimeSeriesRequest{ + Name: formattedName, + Filter: filter, + Interval: interval, + View: view, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTimeSeries(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceCreateTimeSeries(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricProjectPath("[PROJECT]") + var timeSeries []*monitoringpb.TimeSeries = nil + var request = &monitoringpb.CreateTimeSeriesRequest{ + Name: formattedName, + TimeSeries: timeSeries, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CreateTimeSeries(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestMetricServiceCreateTimeSeriesError(t *testing.T) { + errCode := codes.PermissionDenied + mockMetric.err = gstatus.Error(errCode, "test error") + + var formattedName string = MetricProjectPath("[PROJECT]") + var timeSeries []*monitoringpb.TimeSeries = nil + var request = &monitoringpb.CreateTimeSeriesRequest{ + Name: formattedName, + TimeSeries: timeSeries, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CreateTimeSeries(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} diff --git a/vendor/cloud.google.com/go/old-news.md b/vendor/cloud.google.com/go/old-news.md new file mode 100644 index 0000000..0c8a224 --- /dev/null +++ b/vendor/cloud.google.com/go/old-news.md @@ -0,0 +1,557 @@ +_September 28, 2017_ + +*v0.14.0* + +- bigquery BREAKING CHANGES: + - Standard SQL is the default for queries and views. + - `Table.Create` takes `TableMetadata` as a second argument, instead of + options. + - `Dataset.Create` takes `DatasetMetadata` as a second argument. + - `DatasetMetadata` field `ID` renamed to `FullID` + - `TableMetadata` field `ID` renamed to `FullID` + +- Other bigquery changes: + - The client will append a random suffix to a provided job ID if you set + `AddJobIDSuffix` to true in a job config. + - Listing jobs is supported. + - Better retry logic. + +- vision, language, speech: clients are now stable + +- monitoring: client is now beta + +- profiler: + - Rename InstanceName to Instance, ZoneName to Zone + - Auto-detect service name and version on AppEngine. + +_September 8, 2017_ + +*v0.13.0* + +- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these + options to continue using Legacy SQL after the client switches its default + to Standard SQL. + +- bigquery: Support for updating dataset labels. + +- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other + than the client's. DatasetsInProject is no longer needed and is deprecated. + +- bigtable: Fail ListInstances when any zones fail. + +- spanner: support decoding of slices of basic types (e.g. []string, []int64, + etc.) + +- logging/logadmin: UpdateSink no longer creates a sink if it is missing + (actually a change to the underlying service, not the client) + +- profiler: Service and ServiceVersion replace Target in Config. + +_August 22, 2017_ + +*v0.12.0* + +- pubsub: Subscription.Receive now uses streaming pull. + +- pubsub: add Client.TopicInProject to access topics in a different project + than the client. + +- errors: renamed errorreporting. The errors package will be removed shortly. + +- datastore: improved retry behavior. + +- bigquery: support updates to dataset metadata, with etags. + +- bigquery: add etag support to Table.Update (BREAKING: etag argument added). + +- bigquery: generate all job IDs on the client. + +- storage: support bucket lifecycle configurations. + + +_July 31, 2017_ + +*v0.11.0* + +- Clients for spanner, pubsub and video are now in beta. + +- New client for DLP. + +- spanner: performance and testing improvements. + +- storage: requester-pays buckets are supported. + +- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements. + +- pubsub: bug fixes and other minor improvements + +_June 17, 2017_ + + +*v0.10.0* + +- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update. + +- pubsub: Subscription.Receive now runs concurrently for higher throughput. + +- vision: cloud.google.com/go/vision is deprecated. Use +cloud.google.com/go/vision/apiv1 instead. + +- translation: now stable. + +- trace: several changes to the surface. See the link below. + +[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md) + + +_March 17, 2017_ + +Breaking Pubsub changes. +* Publish is now asynchronous +([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)). +* Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)). +* Message.Done replaced with Message.Ack and Message.Nack. + +_February 14, 2017_ + +Release of a client library for Spanner. See +the +[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html). + +Note that although the Spanner service is beta, the Go client library is alpha. + +_December 12, 2016_ + +Beta release of BigQuery, DataStore, Logging and Storage. See the +[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html). + +Also, BigQuery now supports structs. Read a row directly into a struct with +`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`. +You can also use field tags. See the [package documentation][cloud-bigquery-ref] +for details. + +_December 5, 2016_ + +More changes to BigQuery: + +* The `ValueList` type was removed. It is no longer necessary. Instead of + ```go + var v ValueList + ... it.Next(&v) .. + ``` + use + + ```go + var v []Value + ... it.Next(&v) ... + ``` + +* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or + `ValueList` would append to the slice. Now each call resets the size to zero first. + +* Schema inference will infer the SQL type BYTES for a struct field of + type []byte. Previously it inferred STRING. + +* The types `uint`, `uint64` and `uintptr` are no longer supported in schema + inference. BigQuery's integer type is INT64, and those types may hold values + that are not correctly represented in a 64-bit signed integer. + +* The SQL types DATE, TIME and DATETIME are now supported. They correspond to + the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil` + package. + +_November 17, 2016_ + +Change to BigQuery: values from INTEGER columns will now be returned as int64, +not int. This will avoid errors arising from large values on 32-bit systems. + +_November 8, 2016_ + +New datastore feature: datastore now encodes your nested Go structs as Entity values, +instead of a flattened list of the embedded struct's fields. +This means that you may now have twice-nested slices, eg. +```go +type State struct { + Cities []struct{ + Populations []int + } +} +``` + +See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for +more details. + +_November 8, 2016_ + +Breaking changes to datastore: contexts no longer hold namespaces; instead you +must set a key's namespace explicitly. Also, key functions have been changed +and renamed. + +* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method: + ```go + q := datastore.NewQuery("Kind").Namespace("ns") + ``` + +* All the fields of Key are exported. That means you can construct any Key with a struct literal: + ```go + k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"} + ``` + +* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed. + +* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace + ```go + NewIncompleteKey(ctx, kind, parent) + ``` + with + ```go + IncompleteKey(kind, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + +* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace + ```go + NewKey(ctx, kind, name, 0, parent) + NewKey(ctx, kind, "", id, parent) + ``` + with + ```go + NameKey(kind, name, parent) + IDKey(kind, id, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + +* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`. + +* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection. + +See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for +more details. + +_October 27, 2016_ + +Breaking change to bigquery: `NewGCSReference` is now a function, +not a method on `Client`. + +New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling +loading data into a table from a file or any `io.Reader`. + +_October 21, 2016_ + +Breaking change to pubsub: removed `pubsub.Done`. + +Use `iterator.Done` instead, where `iterator` is the package +`google.golang.org/api/iterator`. + +_October 19, 2016_ + +Breaking changes to cloud.google.com/go/bigquery: + +* Client.Table and Client.OpenTable have been removed. + Replace + ```go + client.OpenTable("project", "dataset", "table") + ``` + with + ```go + client.DatasetInProject("project", "dataset").Table("table") + ``` + +* Client.CreateTable has been removed. + Replace + ```go + client.CreateTable(ctx, "project", "dataset", "table") + ``` + with + ```go + client.DatasetInProject("project", "dataset").Table("table").Create(ctx) + ``` + +* Dataset.ListTables have been replaced with Dataset.Tables. + Replace + ```go + tables, err := ds.ListTables(ctx) + ``` + with + ```go + it := ds.Tables(ctx) + for { + table, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use table. + } + ``` + +* Client.Read has been replaced with Job.Read, Table.Read and Query.Read. + Replace + ```go + it, err := client.Read(ctx, job) + ``` + with + ```go + it, err := job.Read(ctx) + ``` + and similarly for reading from tables or queries. + +* The iterator returned from the Read methods is now named RowIterator. Its + behavior is closer to the other iterators in these libraries. It no longer + supports the Schema method; see the next item. + Replace + ```go + for it.Next(ctx) { + var vals ValueList + if err := it.Get(&vals); err != nil { + // TODO: Handle error. + } + // TODO: use vals. + } + if err := it.Err(); err != nil { + // TODO: Handle error. + } + ``` + with + ``` + for { + var vals ValueList + err := it.Next(&vals) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use vals. + } + ``` + Instead of the `RecordsPerRequest(n)` option, write + ```go + it.PageInfo().MaxSize = n + ``` + Instead of the `StartIndex(i)` option, write + ```go + it.StartIndex = i + ``` + +* ValueLoader.Load now takes a Schema in addition to a slice of Values. + Replace + ```go + func (vl *myValueLoader) Load(v []bigquery.Value) + ``` + with + ```go + func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema) + ``` + + +* Table.Patch is replace by Table.Update. + Replace + ```go + p := table.Patch() + p.Description("new description") + metadata, err := p.Apply(ctx) + ``` + with + ```go + metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{ + Description: "new description", + }) + ``` + +* Client.Copy is replaced by separate methods for each of its four functions. + All options have been replaced by struct fields. + + * To load data from Google Cloud Storage into a table, use Table.LoaderFrom. + + Replace + ```go + client.Copy(ctx, table, gcsRef) + ``` + with + ```go + table.LoaderFrom(gcsRef).Run(ctx) + ``` + Instead of passing options to Copy, set fields on the Loader: + ```go + loader := table.LoaderFrom(gcsRef) + loader.WriteDisposition = bigquery.WriteTruncate + ``` + + * To extract data from a table into Google Cloud Storage, use + Table.ExtractorTo. Set fields on the returned Extractor instead of + passing options. + + Replace + ```go + client.Copy(ctx, gcsRef, table) + ``` + with + ```go + table.ExtractorTo(gcsRef).Run(ctx) + ``` + + * To copy data into a table from one or more other tables, use + Table.CopierFrom. Set fields on the returned Copier instead of passing options. + + Replace + ```go + client.Copy(ctx, dstTable, srcTable) + ``` + with + ```go + dst.Table.CopierFrom(srcTable).Run(ctx) + ``` + + * To start a query job, create a Query and call its Run method. Set fields + on the query instead of passing options. + + Replace + ```go + client.Copy(ctx, table, query) + ``` + with + ```go + query.Run(ctx) + ``` + +* Table.NewUploader has been renamed to Table.Uploader. Instead of options, + configure an Uploader by setting its fields. + Replace + ```go + u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) + ``` + with + ```go + u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) + u.IgnoreUnknownValues = true + ``` + +_October 10, 2016_ + +Breaking changes to cloud.google.com/go/storage: + +* AdminClient replaced by methods on Client. + Replace + ```go + adminClient.CreateBucket(ctx, bucketName, attrs) + ``` + with + ```go + client.Bucket(bucketName).Create(ctx, projectID, attrs) + ``` + +* BucketHandle.List replaced by BucketHandle.Objects. + Replace + ```go + for query != nil { + objs, err := bucket.List(d.ctx, query) + if err != nil { ... } + query = objs.Next + for _, obj := range objs.Results { + fmt.Println(obj) + } + } + ``` + with + ```go + iter := bucket.Objects(d.ctx, query) + for { + obj, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { ... } + fmt.Println(obj) + } + ``` + (The `iterator` package is at `google.golang.org/api/iterator`.) + + Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`. + + Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`. + + +* ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom. + Replace + ```go + attrs, err := src.CopyTo(ctx, dst, nil) + ``` + with + ```go + attrs, err := dst.CopierFrom(src).Run(ctx) + ``` + + Replace + ```go + attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"}) + ``` + with + ```go + c := dst.CopierFrom(src) + c.ContextType = "text/html" + attrs, err := c.Run(ctx) + ``` + +* ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom. + Replace + ```go + attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil) + ``` + with + ```go + attrs, err := dst.ComposerFrom(src1, src2).Run(ctx) + ``` + +* ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate. + Replace + ```go + attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"}) + ``` + with + ```go + attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"}) + ``` + +* ObjectHandle.WithConditions replaced by ObjectHandle.If. + Replace + ```go + obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen)) + ``` + with + ```go + obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen}) + ``` + + Replace + ```go + obj.WithConditions(storage.IfGenerationMatch(0)) + ``` + with + ```go + obj.If(storage.Conditions{DoesNotExist: true}) + ``` + +* `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`). + +_October 6, 2016_ + +Package preview/logging deleted. Use logging instead. + +_September 27, 2016_ + +Logging client replaced with preview version (see below). + +_September 8, 2016_ + +* New clients for some of Google's Machine Learning APIs: Vision, Speech, and +Natural Language. + +* Preview version of a new [Stackdriver Logging][cloud-logging] client in +[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). +This client uses gRPC as its transport layer, and supports log reading, sinks +and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. + diff --git a/vendor/cloud.google.com/go/profiler/busybench/busybench.go b/vendor/cloud.google.com/go/profiler/busybench/busybench.go new file mode 100644 index 0000000..03b9589 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/busybench/busybench.go @@ -0,0 +1,100 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bytes" + "cloud.google.com/go/profiler" + "compress/gzip" + "flag" + "log" + "math/rand" + "sync" + "time" +) + +var ( + service = flag.String("service", "", "service name") + mutexProfiling = flag.Bool("mutex_profiling", false, "enable mutex profiling") +) + +const duration = time.Minute * 10 + +// busywork continuously generates 1MiB of random data and compresses it +// throwing away the result. +func busywork(mu *sync.Mutex) { + ticker := time.NewTicker(duration) + defer ticker.Stop() + for { + select { + case <-ticker.C: + return + default: + mu.Lock() + busyworkOnce() + mu.Unlock() + } + } +} + +func busyworkOnce() { + data := make([]byte, 1024*1024) + rand.Read(data) + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write(data); err != nil { + log.Printf("Failed to write to gzip stream: %v", err) + return + } + if err := gz.Flush(); err != nil { + log.Printf("Failed to flush to gzip stream: %v", err) + return + } + if err := gz.Close(); err != nil { + log.Printf("Failed to close gzip stream: %v", err) + } + // Throw away the result. +} + +func main() { + flag.Parse() + + if *service == "" { + log.Print("Service name must be configured using --service flag.") + } else if err := profiler.Start( + profiler.Config{ + Service: *service, + MutexProfiling: *mutexProfiling, + DebugLogging: true, + }); err != nil { + log.Printf("Failed to start the profiler: %v", err) + } else { + mu := new(sync.Mutex) + var wg sync.WaitGroup + wg.Add(5) + for i := 0; i < 5; i++ { + go func() { + defer wg.Done() + busywork(mu) + }() + } + wg.Wait() + } + + log.Printf("busybench finished profiling.") + // Do not exit, since the pod in the GKE test is set to always restart. + select {} +} diff --git a/vendor/cloud.google.com/go/profiler/integration-test.sh b/vendor/cloud.google.com/go/profiler/integration-test.sh new file mode 100644 index 0000000..928a825 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/integration-test.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Fail on any error. +set -eo pipefail + +# Display commands being run. +set -x + +cd git/gocloud + +# Run test only if profiler directory is touched. +profiler_test=false +for f in $(git diff-tree --no-commit-id --name-only -r HEAD); do + if [[ "$(dirname $f)" == "profiler" ]]; then + profiler_test=true + fi +done + +if [[ "$profiler_test" = false ]]; then + exit 0 +fi + +COMMIT=$(git rev-parse HEAD) + +# Set $GOPATH +export GOPATH="$HOME/go" +GOCLOUD_HOME=$GOPATH/src/cloud.google.com/go +mkdir -p $GOCLOUD_HOME + +# Move code into $GOPATH and get dependencies +cp -R ./* $GOCLOUD_HOME +cd $GOCLOUD_HOME/internal/kokoro +# Don't print out encryption keys, etc +set +x +key=$(cat "$KOKORO_ARTIFACTS_DIR/keystore/72523_encrypted_ba2d6f7723ed_key") +iv=$(cat "$KOKORO_ARTIFACTS_DIR/keystore/72523_encrypted_ba2d6f7723ed_iv") +pass=$(cat "$KOKORO_ARTIFACTS_DIR/keystore/72523_encrypted_ba2d6f7723ed_pass") + +openssl aes-256-cbc -K $key -iv $iv -pass pass:$pass -in kokoro-key.json.enc -out key.json -d +set -x + +export GOOGLE_APPLICATION_CREDENTIALS="$(pwd)/key.json" +export GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" +export GCLOUD_TESTS_GOLANG_ZONE="us-west1-a" +export GCLOUD_TESTS_GOLANG_BUCKET="dulcet-port-762-go-cloud-profiler-test" + +cd $GOCLOUD_HOME/profiler +go get -t -tags=integration . +go test -timeout=60m -parallel=5 -tags=integration -run TestAgentIntegration -commit="$COMMIT" diff --git a/vendor/cloud.google.com/go/profiler/integration_test.go b/vendor/cloud.google.com/go/profiler/integration_test.go new file mode 100644 index 0000000..d874a0e --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/integration_test.go @@ -0,0 +1,700 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build integration,go1.7 + +package profiler + +import ( + "archive/zip" + "bytes" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "text/template" + "time" + + "cloud.google.com/go/storage" + "golang.org/x/build/kubernetes" + k8sapi "golang.org/x/build/kubernetes/api" + "golang.org/x/build/kubernetes/gke" + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + cloudbuild "google.golang.org/api/cloudbuild/v1" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" + "google.golang.org/api/googleapi" +) + +var ( + commit = flag.String("commit", "", "git commit to test") + runID = time.Now().Unix() +) + +const ( + cloudScope = "https://www.googleapis.com/auth/cloud-platform" + monitorWriteScope = "https://www.googleapis.com/auth/monitoring.write" + storageReadScope = "https://www.googleapis.com/auth/devstorage.read_only" + // benchFinishString should keep in sync with the finish string in busybench. + benchFinishString = "busybench finished profiling" +) + +const startupTemplate = ` +#! /bin/bash + +# Fail on any error. +set -eo pipefail + +# Display commands being run. +set -x + +# Install git +sudo apt-get update +sudo apt-get -y -q install git-all + +# Install desired Go version +mkdir -p /tmp/bin +curl -sL -o /tmp/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme +chmod +x /tmp/bin/gimme +export PATH=$PATH:/tmp/bin + +eval "$(gimme {{.GoVersion}})" + +# Set $GOPATH +export GOPATH="$HOME/go" + +export GOCLOUD_HOME=$GOPATH/src/cloud.google.com/go +mkdir -p $GOCLOUD_HOME + +# Install agent +git clone https://code.googlesource.com/gocloud $GOCLOUD_HOME + +cd $GOCLOUD_HOME/profiler/busybench +git reset --hard {{.Commit}} +go get -v + +# Run benchmark with agent +go run busybench.go --service="{{.Service}}" --mutex_profiling="{{.MutexProfiling}}" +` + +const dockerfileFmt = `FROM golang +RUN git clone https://code.googlesource.com/gocloud /go/src/cloud.google.com/go \ + && cd /go/src/cloud.google.com/go/profiler/busybench && git reset --hard %s \ + && go get -v && go install -v +CMD ["busybench", "--service", "%s"] + ` + +type testRunner struct { + client *http.Client + startupTemplate *template.Template + containerService *container.Service + computeService *compute.Service + storageClient *storage.Client +} + +type profileResponse struct { + Profile profileData `json:"profile"` + NumProfiles int32 `json:"numProfiles"` + Deployments []interface{} `json:"deployments"` +} + +type profileData struct { + Samples []int32 `json:"samples"` + SampleMetrics interface{} `json:"sampleMetrics"` + DefaultMetricType string `json:"defaultMetricType"` + TreeNodes interface{} `json:"treeNodes"` + Functions functionArray `json:"functions"` + SourceFiles interface{} `json:"sourceFiles"` +} + +type functionArray struct { + Name []string `json:"name"` + Sourcefile []int32 `json:"sourceFile"` +} + +func validateProfileData(rawData []byte, wantFunctionName string) error { + var pr profileResponse + if err := json.Unmarshal(rawData, &pr); err != nil { + return err + } + + if pr.NumProfiles == 0 { + return fmt.Errorf("profile response contains zero profiles: %v", pr) + } + + if len(pr.Deployments) == 0 { + return fmt.Errorf("profile response contains zero deployments: %v", pr) + } + + if len(pr.Profile.Functions.Name) == 0 { + return fmt.Errorf("profile does not have function data") + } + + for _, name := range pr.Profile.Functions.Name { + if strings.Contains(name, wantFunctionName) { + return nil + } + } + return fmt.Errorf("wanted function name %s not found in profile", wantFunctionName) +} + +type instanceConfig struct { + name string + service string + goVersion string + mutexProfiling bool +} + +func newInstanceConfigs() []instanceConfig { + return []instanceConfig{ + { + name: fmt.Sprintf("profiler-test-go19-%d", runID), + service: fmt.Sprintf("profiler-test-go19-%d-gce", runID), + goVersion: "1.9", + mutexProfiling: true, + }, + { + name: fmt.Sprintf("profiler-test-go18-%d", runID), + service: fmt.Sprintf("profiler-test-go18-%d-gce", runID), + goVersion: "1.8", + mutexProfiling: true, + }, + { + name: fmt.Sprintf("profiler-test-go17-%d", runID), + service: fmt.Sprintf("profiler-test-go17-%d-gce", runID), + goVersion: "1.7", + }, + { + name: fmt.Sprintf("profiler-test-go16-%d", runID), + service: fmt.Sprintf("profiler-test-go16-%d-gce", runID), + goVersion: "1.6", + }, + } +} + +type clusterConfig struct { + clusterName string + podName string + imageSourceName string + imageName string + service string +} + +func newClusterConfig(projectID string) clusterConfig { + return clusterConfig{ + clusterName: fmt.Sprintf("profiler-test-cluster-%d", runID), + podName: fmt.Sprintf("profiler-test-pod-%d", runID), + imageSourceName: fmt.Sprintf("profiler-test/%d/Dockerfile.zip", runID), + imageName: fmt.Sprintf("%s/profiler-test-%d", projectID, runID), + service: fmt.Sprintf("profiler-test-%d-gke", runID), + } +} + +func renderStartupScript(template *template.Template, inst instanceConfig) (string, error) { + var buf bytes.Buffer + err := template.Execute(&buf, + struct { + Service string + GoVersion string + Commit string + MutexProfiling bool + }{ + Service: inst.service, + GoVersion: inst.goVersion, + Commit: *commit, + MutexProfiling: inst.mutexProfiling, + }) + if err != nil { + return "", fmt.Errorf("failed to render startup script for %s: %v", inst.name, err) + } + + return buf.String(), nil +} + +func (tr *testRunner) startInstance(ctx context.Context, inst instanceConfig, projectID, zone string) error { + img, err := tr.computeService.Images.GetFromFamily("debian-cloud", "debian-9").Context(ctx).Do() + if err != nil { + return err + } + + startupScript, err := renderStartupScript(tr.startupTemplate, inst) + if err != nil { + return err + } + + _, err = tr.computeService.Instances.Insert(projectID, zone, &compute.Instance{ + MachineType: fmt.Sprintf("zones/%s/machineTypes/n1-standard-1", zone), + Name: inst.name, + Disks: []*compute.AttachedDisk{{ + AutoDelete: true, // delete the disk when the VM is deleted. + Boot: true, + Type: "PERSISTENT", + Mode: "READ_WRITE", + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: img.SelfLink, + DiskType: fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/diskTypes/pd-standard", projectID, zone), + }, + }}, + NetworkInterfaces: []*compute.NetworkInterface{{ + Network: fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/networks/default", projectID), + AccessConfigs: []*compute.AccessConfig{{ + Name: "External NAT", + }}, + }}, + Metadata: &compute.Metadata{ + Items: []*compute.MetadataItems{{ + Key: "startup-script", + Value: googleapi.String(startupScript), + }}, + }, + ServiceAccounts: []*compute.ServiceAccount{{ + Email: "default", + Scopes: []string{ + monitorWriteScope, + }, + }}, + }).Do() + + return err +} + +func (tr *testRunner) pollForSerialOutput(ctx context.Context, projectID, zone, instanceName string) error { + var output string + defer func() { + log.Printf("Serial port output for %s:\n%s", instanceName, output) + }() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting for profiling finishing on instance %s", instanceName) + + case <-time.After(20 * time.Second): + resp, err := tr.computeService.Instances.GetSerialPortOutput(projectID, zone, instanceName).Context(ctx).Do() + if err != nil { + // Transient failure. + log.Printf("Transient error getting serial port output from instance %s (will retry): %v", instanceName, err) + continue + } + + if output = resp.Contents; strings.Contains(output, benchFinishString) { + return nil + } + } + } +} + +func (tr *testRunner) queryAndCheckProfile(service, startTime, endTime, profileType, projectID string) error { + queryURL := fmt.Sprintf("https://cloudprofiler.googleapis.com/v2/projects/%s/profiles:query", projectID) + const queryJsonFmt = `{"endTime": "%s", "profileType": "%s","startTime": "%s", "target": "%s"}` + + queryRequest := fmt.Sprintf(queryJsonFmt, endTime, profileType, startTime, service) + + resp, err := tr.client.Post(queryURL, "application/json", strings.NewReader(queryRequest)) + if err != nil { + return fmt.Errorf("failed to query API: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("failed to read response body: %v", err) + } + + if err := validateProfileData(body, "busywork"); err != nil { + return fmt.Errorf("failed to validate profile %v", err) + } + + return nil +} + +func (tr *testRunner) runTestOnGCE(ctx context.Context, t *testing.T, inst instanceConfig, projectID, zone string) { + if err := tr.startInstance(ctx, inst, projectID, zone); err != nil { + t.Fatalf("startInstance(%s) got error: %v", inst.name, err) + } + defer func() { + if _, err := tr.computeService.Instances.Delete(projectID, zone, inst.name).Context(ctx).Do(); err != nil { + t.Errorf("Instances.Delete(%s) got error: %v", inst.name, err) + } + }() + + timeoutCtx, cancel := context.WithTimeout(ctx, time.Minute*25) + defer cancel() + if err := tr.pollForSerialOutput(timeoutCtx, projectID, zone, inst.name); err != nil { + t.Fatalf("pollForSerialOutput(%s) got error: %v", inst.name, err) + } + + timeNow := time.Now() + endTime := timeNow.Format(time.RFC3339) + startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339) + + profileTypes := []string{"CPU", "HEAP", "THREADS"} + if inst.mutexProfiling { + profileTypes = append(profileTypes, "CONTENTION") + } + for _, pType := range profileTypes { + if err := tr.queryAndCheckProfile(inst.service, startTime, endTime, pType, projectID); err != nil { + t.Errorf("queryAndCheckProfile(%s, %s, %s, %s) got error: %v", inst.service, startTime, endTime, pType, err) + } + } +} + +// createAndPublishDockerImage creates a docker image from source code in a GCS +// bucket and pushes the image to Google Container Registry. +func (tr *testRunner) createAndPublishDockerImage(ctx context.Context, projectID, sourceBucket, sourceObject, imageName string) error { + cloudbuildService, err := cloudbuild.New(tr.client) + + build := &cloudbuild.Build{ + Source: &cloudbuild.Source{ + StorageSource: &cloudbuild.StorageSource{ + Bucket: sourceBucket, + Object: sourceObject, + }, + }, + Steps: []*cloudbuild.BuildStep{ + { + Name: "gcr.io/cloud-builders/docker", + Args: []string{"build", "-t", imageName, "."}, + }, + }, + Images: []string{imageName}, + } + + op, err := cloudbuildService.Projects.Builds.Create(projectID, build).Context(ctx).Do() + if err != nil { + return fmt.Errorf("failed to create image: %v", err) + } + opID := op.Name + + // Wait for creating image. + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting creating image") + + case <-time.After(10 * time.Second): + op, err := cloudbuildService.Operations.Get(opID).Context(ctx).Do() + if err != nil { + log.Printf("Transient error getting operation (will retry): %v", err) + break + } + if op.Done == true { + log.Printf("Published image %s to Google Container Registry.", imageName) + return nil + } + } + } +} + +type imageResponse struct { + Manifest map[string]interface{} `json:"manifest"` + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// deleteDockerImage deletes a docker image from Google Container Registry. +func (tr *testRunner) deleteDockerImage(ctx context.Context, imageName string) []error { + queryImageURL := fmt.Sprintf("https://gcr.io/v2/%s/tags/list", imageName) + resp, err := tr.client.Get(queryImageURL) + if err != nil { + return []error{fmt.Errorf("failed to list tags: %v", err)} + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return []error{err} + } + var ir imageResponse + if err := json.Unmarshal(body, &ir); err != nil { + return []error{err} + } + + const deleteImageURLFmt = "https://gcr.io/v2/%s/manifests/%s" + var errs []error + for _, tag := range ir.Tags { + if err := deleteDockerImageResource(tr.client, fmt.Sprintf(deleteImageURLFmt, imageName, tag)); err != nil { + errs = append(errs, fmt.Errorf("failed to delete tag %s: %v", tag, err)) + } + } + + for manifest := range ir.Manifest { + if err := deleteDockerImageResource(tr.client, fmt.Sprintf(deleteImageURLFmt, imageName, manifest)); err != nil { + errs = append(errs, fmt.Errorf("failed to delete manifest %s: %v", manifest, err)) + } + } + return errs +} + +func deleteDockerImageResource(client *http.Client, url string) error { + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return fmt.Errorf("failed to get request: %v", err) + } + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("failed to delete resource: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { + return fmt.Errorf("failed to delete resource: status code = %d", resp.StatusCode) + } + return nil +} + +func (tr *testRunner) createCluster(ctx context.Context, client *http.Client, projectID, zone, clusterName string) error { + request := &container.CreateClusterRequest{Cluster: &container.Cluster{ + Name: clusterName, + InitialNodeCount: 3, + NodeConfig: &container.NodeConfig{ + OauthScopes: []string{ + storageReadScope, + }, + }, + }} + op, err := tr.containerService.Projects.Zones.Clusters.Create(projectID, zone, request).Context(ctx).Do() + if err != nil { + return fmt.Errorf("failed to create cluster %s: %v", clusterName, err) + } + opID := op.Name + + // Wait for creating cluster. + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting creating cluster") + + case <-time.After(10 * time.Second): + op, err := tr.containerService.Projects.Zones.Operations.Get(projectID, zone, opID).Context(ctx).Do() + if err != nil { + log.Printf("Transient error getting operation (will retry): %v", err) + break + } + if op.Status == "DONE" { + log.Printf("Created cluster %s.", clusterName) + return nil + } + if op.Status == "ABORTING" { + return fmt.Errorf("create cluster operation is aborted") + } + } + } +} + +func (tr *testRunner) deployContainer(ctx context.Context, kubernetesClient *kubernetes.Client, podName, imageName string) error { + pod := &k8sapi.Pod{ + ObjectMeta: k8sapi.ObjectMeta{ + Name: podName, + }, + Spec: k8sapi.PodSpec{ + Containers: []k8sapi.Container{ + { + Name: "profiler-test", + Image: fmt.Sprintf("gcr.io/%s:latest", imageName), + }, + }, + }, + } + if _, err := kubernetesClient.RunLongLivedPod(ctx, pod); err != nil { + return fmt.Errorf("failed to run pod %s: %v", podName, err) + } + return nil +} + +func (tr *testRunner) pollPodLog(ctx context.Context, kubernetesClient *kubernetes.Client, podName string) error { + var output string + defer func() { + log.Printf("Log for pod %s:\n%s", podName, output) + }() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting profiling finishing on container") + + case <-time.After(20 * time.Second): + var err error + output, err = kubernetesClient.PodLog(ctx, podName) + if err != nil { + // Transient failure. + log.Printf("Transient error getting log (will retry): %v", err) + continue + } + if strings.Contains(output, benchFinishString) { + return nil + } + } + } +} + +func (tr *testRunner) runTestOnGKE(ctx context.Context, t *testing.T, cfg clusterConfig, projectID, zone, bucket string) { + if err := tr.uploadImageSource(ctx, bucket, cfg.imageSourceName, *commit, cfg.service); err != nil { + t.Fatalf("uploadImageSource() got error: %v", err) + } + defer func() { + if err := tr.storageClient.Bucket(bucket).Object(cfg.imageSourceName).Delete(ctx); err != nil { + t.Errorf("Bucket(%s).Object(%s).Delete() got error: %v", bucket, cfg.imageSourceName, err) + } + }() + + createImageCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + if err := tr.createAndPublishDockerImage(createImageCtx, projectID, bucket, cfg.imageSourceName, fmt.Sprintf("gcr.io/%s", cfg.imageName)); err != nil { + t.Fatalf("createAndPublishDockerImage(%s) got error: %v", cfg.imageName, err) + } + defer func() { + for _, err := range tr.deleteDockerImage(ctx, cfg.imageName) { + t.Errorf("deleteDockerImage(%s) got error: %v", cfg.imageName, err) + } + }() + + createClusterCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + if err := tr.createCluster(createClusterCtx, tr.client, projectID, zone, cfg.clusterName); err != nil { + t.Fatalf("createCluster(%s) got error: %v", cfg.clusterName, err) + } + defer func() { + if _, err := tr.containerService.Projects.Zones.Clusters.Delete(projectID, zone, cfg.clusterName).Context(ctx).Do(); err != nil { + t.Errorf("Clusters.Delete(%s) got error: %v", cfg.clusterName, err) + } + }() + + kubernetesClient, err := gke.NewClient(ctx, cfg.clusterName, gke.OptZone(zone), gke.OptProject(projectID)) + if err != nil { + t.Fatalf("gke.NewClient() got error: %v", err) + } + + deployContainerCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + if err := tr.deployContainer(deployContainerCtx, kubernetesClient, cfg.podName, cfg.imageName); err != nil { + t.Fatalf("deployContainer(%s, %s) got error: %v", cfg.podName, cfg.imageName, err) + } + + pollLogCtx, cancel := context.WithTimeout(ctx, 20*time.Minute) + defer cancel() + if err := tr.pollPodLog(pollLogCtx, kubernetesClient, cfg.podName); err != nil { + t.Fatalf("pollPodLog(%s) got error: %v", cfg.podName, err) + } + + timeNow := time.Now() + endTime := timeNow.Format(time.RFC3339) + startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339) + for _, pType := range []string{"CPU", "HEAP", "THREADS"} { + if err := tr.queryAndCheckProfile(cfg.service, startTime, endTime, pType, projectID); err != nil { + t.Errorf("queryAndCheckProfile(%s, %s, %s, %s) got error: %v", cfg.service, startTime, endTime, pType, err) + } + } +} + +// uploadImageSource uploads source code for building docker image to GCS. +func (tr *testRunner) uploadImageSource(ctx context.Context, bucket, objectName, commit, service string) error { + zipBuf := new(bytes.Buffer) + z := zip.NewWriter(zipBuf) + f, err := z.Create("Dockerfile") + if err != nil { + return err + } + + dockerfile := fmt.Sprintf(dockerfileFmt, commit, service) + if _, err := f.Write([]byte(dockerfile)); err != nil { + return err + } + + if err := z.Close(); err != nil { + return err + } + wc := tr.storageClient.Bucket(bucket).Object(objectName).NewWriter(ctx) + wc.ContentType = "application/zip" + wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}} + if _, err := wc.Write(zipBuf.Bytes()); err != nil { + return err + } + return wc.Close() +} + +func TestAgentIntegration(t *testing.T) { + projectID := os.Getenv("GCLOUD_TESTS_GOLANG_PROJECT_ID") + if projectID == "" { + t.Fatalf("Getenv(GCLOUD_TESTS_GOLANG_PROJECT_ID) got empty string") + } + + zone := os.Getenv("GCLOUD_TESTS_GOLANG_ZONE") + if zone == "" { + t.Fatalf("Getenv(GCLOUD_TESTS_GOLANG_ZONE) got empty string") + } + + bucket := os.Getenv("GCLOUD_TESTS_GOLANG_BUCKET") + if bucket == "" { + t.Fatalf("Getenv(GCLOUD_TESTS_GOLANG_BUCKET) got empty string") + } + + if *commit == "" { + t.Fatal("commit flag is not set") + } + + ctx := context.Background() + + client, err := google.DefaultClient(ctx, cloudScope) + if err != nil { + t.Fatalf("failed to get default client: %v", err) + } + + storageClient, err := storage.NewClient(ctx) + if err != nil { + t.Fatalf("storage.NewClient() error: %v", err) + } + + computeService, err := compute.New(client) + if err != nil { + t.Fatalf("failed to initialize compute service: %v", err) + } + + containerService, err := container.New(client) + if err != nil { + t.Fatalf("failed to create container client: %v", err) + } + + template, err := template.New("startupScript").Parse(startupTemplate) + if err != nil { + t.Fatalf("failed to parse startup script template: %v", err) + } + tr := testRunner{ + computeService: computeService, + client: client, + startupTemplate: template, + containerService: containerService, + storageClient: storageClient, + } + + cluster := newClusterConfig(projectID) + t.Run(cluster.service, func(t *testing.T) { + t.Parallel() + tr.runTestOnGKE(ctx, t, cluster, projectID, zone, bucket) + }) + + instances := newInstanceConfigs() + for _, instance := range instances { + inst := instance // capture range variable + t.Run(inst.service, func(t *testing.T) { + t.Parallel() + tr.runTestOnGCE(ctx, t, inst, projectID, zone) + }) + } +} diff --git a/vendor/cloud.google.com/go/profiler/mocks/mock_profiler_client.go b/vendor/cloud.google.com/go/profiler/mocks/mock_profiler_client.go new file mode 100644 index 0000000..13e9e77 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/mocks/mock_profiler_client.go @@ -0,0 +1,78 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Automatically generated by MockGen. DO NOT EDIT! +// Source: google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2 (interfaces: ProfilerServiceClient) + +package mocks + +import ( + gomock "github.com/golang/mock/gomock" + context "golang.org/x/net/context" + v2 "google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2" + grpc "google.golang.org/grpc" +) + +// Mock of ProfilerServiceClient interface +type MockProfilerServiceClient struct { + ctrl *gomock.Controller + recorder *_MockProfilerServiceClientRecorder +} + +// Recorder for MockProfilerServiceClient (not exported) +type _MockProfilerServiceClientRecorder struct { + mock *MockProfilerServiceClient +} + +func NewMockProfilerServiceClient(ctrl *gomock.Controller) *MockProfilerServiceClient { + mock := &MockProfilerServiceClient{ctrl: ctrl} + mock.recorder = &_MockProfilerServiceClientRecorder{mock} + return mock +} + +func (_m *MockProfilerServiceClient) EXPECT() *_MockProfilerServiceClientRecorder { + return _m.recorder +} + +func (_m *MockProfilerServiceClient) CreateProfile(_param0 context.Context, _param1 *v2.CreateProfileRequest, _param2 ...grpc.CallOption) (*v2.Profile, error) { + _s := []interface{}{_param0, _param1} + for _, _x := range _param2 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "CreateProfile", _s...) + ret0, _ := ret[0].(*v2.Profile) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockProfilerServiceClientRecorder) CreateProfile(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0, arg1}, arg2...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateProfile", _s...) +} + +func (_m *MockProfilerServiceClient) UpdateProfile(_param0 context.Context, _param1 *v2.UpdateProfileRequest, _param2 ...grpc.CallOption) (*v2.Profile, error) { + _s := []interface{}{_param0, _param1} + for _, _x := range _param2 { + _s = append(_s, _x) + } + ret := _m.ctrl.Call(_m, "UpdateProfile", _s...) + ret0, _ := ret[0].(*v2.Profile) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockProfilerServiceClientRecorder) UpdateProfile(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + _s := append([]interface{}{arg0, arg1}, arg2...) + return _mr.mock.ctrl.RecordCall(_mr.mock, "UpdateProfile", _s...) +} diff --git a/vendor/cloud.google.com/go/profiler/mutex.go b/vendor/cloud.google.com/go/profiler/mutex.go new file mode 100644 index 0000000..84e92de --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/mutex.go @@ -0,0 +1,25 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package profiler + +import "runtime" + +func enableMutexProfiling() bool { + // One percent of mutex contention events are profiled. + runtime.SetMutexProfileFraction(100) + return true +} diff --git a/vendor/cloud.google.com/go/profiler/mutex_go17.go b/vendor/cloud.google.com/go/profiler/mutex_go17.go new file mode 100644 index 0000000..4c7a7c0 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/mutex_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package profiler + +func enableMutexProfiling() bool { + return false +} diff --git a/vendor/cloud.google.com/go/profiler/profiler.go b/vendor/cloud.google.com/go/profiler/profiler.go new file mode 100644 index 0000000..5ea2bff --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/profiler.go @@ -0,0 +1,481 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profiler is a client for the Stackdriver Profiler service. +// +// This package is still experimental and subject to change. +// +// Usage example: +// +// import "cloud.google.com/go/profiler" +// ... +// if err := profiler.Start(profiler.Config{Service: "my-service"}); err != nil { +// // TODO: Handle error. +// } +// +// Calling Start will start a goroutine to collect profiles and upload to +// the profiler server, at the rhythm specified by the server. +// +// The caller must provide the service string in the config, and may provide +// other information as well. See Config for details. +// +// Profiler has CPU, heap and goroutine profiling enabled by default. Mutex +// profiling can be enabled in the config. Note that goroutine and mutex +// profiles are shown as "threads" and "contention" profiles in the profiler +// UI. +package profiler + +import ( + "bytes" + "errors" + "fmt" + "log" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" + + gcemd "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/internal/version" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/google/pprof/profile" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + pb "google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2" + edpb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpcmd "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +var ( + config Config + startOnce sync.Once + mutexEnabled bool + // The functions below are stubbed to be overrideable for testing. + getProjectID = gcemd.ProjectID + getInstanceName = gcemd.InstanceName + getZone = gcemd.Zone + startCPUProfile = pprof.StartCPUProfile + stopCPUProfile = pprof.StopCPUProfile + writeHeapProfile = pprof.WriteHeapProfile + sleep = gax.Sleep + dialGRPC = gtransport.Dial + onGCE = gcemd.OnGCE +) + +const ( + apiAddress = "cloudprofiler.googleapis.com:443" + xGoogAPIMetadata = "x-goog-api-client" + zoneNameLabel = "zone" + versionLabel = "version" + instanceLabel = "instance" + scope = "https://www.googleapis.com/auth/monitoring.write" + + initialBackoff = time.Second + // Ensure the agent will recover within 1 hour. + maxBackoff = time.Hour + backoffMultiplier = 1.3 // Backoff envelope increases by this factor on each retry. + retryInfoMetadata = "google.rpc.retryinfo-bin" +) + +// Config is the profiler configuration. +type Config struct { + // Service (or deprecated Target) must be provided to start the profiler. + // It specifies the name of the service under which the profiled data + // will be recorded and exposed at the Profiler UI for the project. + // You can specify an arbitrary string, but see Deployment.target at + // https://github.com/googleapis/googleapis/blob/master/google/devtools/cloudprofiler/v2/profiler.proto + // for restrictions. + // NOTE: The string should be the same across different replicas of + // your service so that the globally constant profiling rate is + // maintained. Do not put things like PID or unique pod ID in the name. + Service string + + // ServiceVersion is an optional field specifying the version of the + // service. It can be an arbitrary string. Profiler profiles + // once per minute for each version of each service in each zone. + // ServiceVersion defaults to an empty string. + ServiceVersion string + + // DebugLogging enables detailed debug logging from profiler. It + // defaults to false. + DebugLogging bool + + // MutexProfiling enables mutex profiling. It defaults to false. + // Note that mutex profiling is not supported by Go versions older + // than Go 1.8. + MutexProfiling bool + + // ProjectID is the Cloud Console project ID to use instead of + // the one read from the VM metadata server. + // + // Set this if you are running the agent in your local environment + // or anywhere else outside of Google Cloud Platform. + ProjectID string + + // APIAddr is the HTTP endpoint to use to connect to the profiler + // agent API. Defaults to the production environment, overridable + // for testing. + APIAddr string + + // Target is deprecated, use Service instead. + Target string + + instance string + zone string +} + +// startError represents the error occured during the +// initializating and starting of the agent. +var startError error + +// Start starts a goroutine to collect and upload profiles. The +// caller must provide the service string in the config. See +// Config for details. Start should only be called once. Any +// additional calls will be ignored. +func Start(cfg Config, options ...option.ClientOption) error { + startOnce.Do(func() { + startError = start(cfg, options...) + }) + return startError +} + +func start(cfg Config, options ...option.ClientOption) error { + if err := initializeConfig(cfg); err != nil { + debugLog("failed to initialize config: %v", err) + return err + } + if config.MutexProfiling { + if mutexEnabled = enableMutexProfiling(); !mutexEnabled { + return fmt.Errorf("mutex profiling is not supported by %s, requires Go 1.8 or later", runtime.Version()) + } + } + + ctx := context.Background() + + opts := []option.ClientOption{ + option.WithEndpoint(config.APIAddr), + option.WithScopes(scope), + } + opts = append(opts, options...) + + conn, err := dialGRPC(ctx, opts...) + if err != nil { + debugLog("failed to dial GRPC: %v", err) + return err + } + + a := initializeAgent(pb.NewProfilerServiceClient(conn)) + go pollProfilerService(withXGoogHeader(ctx), a) + return nil +} + +func debugLog(format string, e ...interface{}) { + if config.DebugLogging { + log.Printf(format, e...) + } +} + +// agent polls the profiler server for instructions on behalf of a task, +// and collects and uploads profiles as requested. +type agent struct { + client pb.ProfilerServiceClient + deployment *pb.Deployment + profileLabels map[string]string + profileTypes []pb.ProfileType +} + +// abortedBackoffDuration retrieves the retry duration from gRPC trailing +// metadata, which is set by the profiler server. +func abortedBackoffDuration(md grpcmd.MD) (time.Duration, error) { + elem := md[retryInfoMetadata] + if len(elem) <= 0 { + return 0, errors.New("no retry info") + } + + var retryInfo edpb.RetryInfo + if err := proto.Unmarshal([]byte(elem[0]), &retryInfo); err != nil { + return 0, err + } else if time, err := ptypes.Duration(retryInfo.RetryDelay); err != nil { + return 0, err + } else { + if time < 0 { + return 0, errors.New("negative retry duration") + } + return time, nil + } +} + +type retryer struct { + backoff gax.Backoff + md grpcmd.MD +} + +func (r *retryer) Retry(err error) (time.Duration, bool) { + st, _ := status.FromError(err) + if st != nil && st.Code() == codes.Aborted { + dur, err := abortedBackoffDuration(r.md) + if err == nil { + return dur, true + } + debugLog("failed to get backoff duration: %v", err) + } + return r.backoff.Pause(), true +} + +// createProfile talks to the profiler server to create profile. In +// case of error, the goroutine will sleep and retry. Sleep duration may +// be specified by the server. Otherwise it will be an exponentially +// increasing value, bounded by maxBackoff. +func (a *agent) createProfile(ctx context.Context) *pb.Profile { + req := pb.CreateProfileRequest{ + Deployment: a.deployment, + ProfileType: a.profileTypes, + } + + var p *pb.Profile + md := grpcmd.New(map[string]string{}) + + gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + p, err = a.client.CreateProfile(ctx, &req, grpc.Trailer(&md)) + return err + }, gax.WithRetry(func() gax.Retryer { + return &retryer{ + backoff: gax.Backoff{ + Initial: initialBackoff, + Max: maxBackoff, + Multiplier: backoffMultiplier, + }, + md: md, + } + })) + + debugLog("successfully created profile %v", p.GetProfileType()) + return p +} + +func (a *agent) profileAndUpload(ctx context.Context, p *pb.Profile) { + var prof bytes.Buffer + pt := p.GetProfileType() + + switch pt { + case pb.ProfileType_CPU: + duration, err := ptypes.Duration(p.Duration) + if err != nil { + debugLog("failed to get profile duration: %v", err) + return + } + if err := startCPUProfile(&prof); err != nil { + debugLog("failed to start CPU profile: %v", err) + return + } + sleep(ctx, duration) + stopCPUProfile() + case pb.ProfileType_HEAP: + if err := writeHeapProfile(&prof); err != nil { + debugLog("failed to write heap profile: %v", err) + return + } + case pb.ProfileType_THREADS: + if err := pprof.Lookup("goroutine").WriteTo(&prof, 0); err != nil { + debugLog("failed to create goroutine profile: %v", err) + return + } + case pb.ProfileType_CONTENTION: + duration, err := ptypes.Duration(p.Duration) + if err != nil { + debugLog("failed to get profile duration: %v", err) + return + } + if err := deltaMutexProfile(ctx, duration, &prof); err != nil { + debugLog("failed to create mutex profile: %v", err) + return + } + default: + debugLog("unexpected profile type: %v", pt) + return + } + + // Starting Go 1.9 the profiles are symbolized by runtime/pprof. + // TODO(jianqiaoli): Remove the symbolization code when we decide to + // stop supporting Go 1.8. + if !shouldAssumeSymbolized && pt != pb.ProfileType_CONTENTION { + if err := parseAndSymbolize(&prof); err != nil { + debugLog("failed to symbolize profile: %v", err) + } + } + + p.ProfileBytes = prof.Bytes() + p.Labels = a.profileLabels + req := pb.UpdateProfileRequest{Profile: p} + + // Upload profile, discard profile in case of error. + debugLog("start uploading profile") + if _, err := a.client.UpdateProfile(ctx, &req); err != nil { + debugLog("failed to upload profile: %v", err) + } +} + +// deltaMutexProfile writes mutex profile changes over a time period specified +// with 'duration' to 'prof'. +func deltaMutexProfile(ctx context.Context, duration time.Duration, prof *bytes.Buffer) error { + if !mutexEnabled { + return errors.New("mutex profiling is not enabled") + } + p0, err := mutexProfile() + if err != nil { + return err + } + sleep(ctx, duration) + p, err := mutexProfile() + if err != nil { + return err + } + + // TODO(jianqiaoli): Remove this check when github.com/google/pprof/issues/242 + // is fixed. + if len(p0.Mapping) > 0 { + p0.Scale(-1) + p, err = profile.Merge([]*profile.Profile{p0, p}) + if err != nil { + return err + } + } + + // The mutex profile is not symbolized by runtime.pprof until + // golang.org/issue/21474 is fixed in go1.10. + symbolize(p) + return p.Write(prof) +} + +func mutexProfile() (*profile.Profile, error) { + p := pprof.Lookup("mutex") + if p == nil { + return nil, errors.New("mutex profiling is not supported") + } + var buf bytes.Buffer + if err := p.WriteTo(&buf, 0); err != nil { + return nil, err + } + return profile.Parse(&buf) +} + +// withXGoogHeader sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func withXGoogHeader(ctx context.Context, keyval ...string) context.Context { + kv := append([]string{"gl-go", version.Go(), "gccl", version.Repo}, keyval...) + kv = append(kv, "gax", gax.Version, "grpc", grpc.Version) + + md, _ := grpcmd.FromOutgoingContext(ctx) + md = md.Copy() + md[xGoogAPIMetadata] = []string{gax.XGoogHeader(kv...)} + return grpcmd.NewOutgoingContext(ctx, md) +} + +func initializeAgent(c pb.ProfilerServiceClient) *agent { + labels := map[string]string{} + if config.zone != "" { + labels[zoneNameLabel] = config.zone + } + if config.ServiceVersion != "" { + labels[versionLabel] = config.ServiceVersion + } + d := &pb.Deployment{ + ProjectId: config.ProjectID, + Target: config.Target, + Labels: labels, + } + + profileLabels := map[string]string{} + + if config.instance != "" { + profileLabels[instanceLabel] = config.instance + } + + profileTypes := []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS} + if mutexEnabled { + profileTypes = append(profileTypes, pb.ProfileType_CONTENTION) + } + + return &agent{ + client: c, + deployment: d, + profileLabels: profileLabels, + profileTypes: profileTypes, + } +} + +func initializeConfig(cfg Config) error { + config = cfg + + switch { + case config.Service != "": + config.Target = config.Service + case config.Target == "": + config.Target = os.Getenv("GAE_SERVICE") + } + + if config.Target == "" { + return errors.New("service name must be specified in the configuration") + } + + if config.ServiceVersion == "" { + config.ServiceVersion = os.Getenv("GAE_VERSION") + } + + if onGCE() { + var err error + if config.ProjectID == "" { + if config.ProjectID, err = getProjectID(); err != nil { + return fmt.Errorf("failed to get the project ID from Compute Engine: %v", err) + } + } + + if config.zone, err = getZone(); err != nil { + return fmt.Errorf("failed to get zone from Compute Engine: %v", err) + } + + if config.instance, err = getInstanceName(); err != nil { + return fmt.Errorf("failed to get instance from Compute Engine: %v", err) + } + + } else { + if config.ProjectID == "" { + return fmt.Errorf("project ID must be specified in the configuration if running outside of GCP") + } + } + + if config.APIAddr == "" { + config.APIAddr = apiAddress + } + return nil +} + +// pollProfilerService starts an endless loop to poll the profiler +// server for instructions, and collects and uploads profiles as +// requested. +func pollProfilerService(ctx context.Context, a *agent) { + for { + p := a.createProfile(ctx) + a.profileAndUpload(ctx, p) + } +} diff --git a/vendor/cloud.google.com/go/profiler/profiler_example_test.go b/vendor/cloud.google.com/go/profiler/profiler_example_test.go new file mode 100644 index 0000000..e961ee6 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/profiler_example_test.go @@ -0,0 +1,25 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profiler_test + +import ( + "cloud.google.com/go/profiler" +) + +func ExampleStart() { + if err := profiler.Start(profiler.Config{Service: "my-service", ServiceVersion: "v1"}); err != nil { + //TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/profiler/profiler_test.go b/vendor/cloud.google.com/go/profiler/profiler_test.go new file mode 100644 index 0000000..c506b0b --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/profiler_test.go @@ -0,0 +1,812 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profiler + +import ( + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "log" + "math/rand" + "os" + "runtime" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/profiler/mocks" + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/google/pprof/profile" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + gtransport "google.golang.org/api/transport/grpc" + pb "google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2" + edpb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + grpcmd "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + testProjectID = "test-project-ID" + testInstance = "test-instance" + testZone = "test-zone" + testTarget = "test-target" + testService = "test-service" + testSvcVersion = "test-service-version" + testProfileDuration = time.Second * 10 + testServerTimeout = time.Second * 15 +) + +func createTestDeployment() *pb.Deployment { + labels := map[string]string{ + zoneNameLabel: testZone, + versionLabel: testSvcVersion, + } + return &pb.Deployment{ + ProjectId: testProjectID, + Target: testService, + Labels: labels, + } +} + +func createTestAgent(psc pb.ProfilerServiceClient) *agent { + return &agent{ + client: psc, + deployment: createTestDeployment(), + profileLabels: map[string]string{instanceLabel: testInstance}, + profileTypes: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS}, + } +} + +func createTrailers(dur time.Duration) map[string]string { + b, _ := proto.Marshal(&edpb.RetryInfo{ + RetryDelay: ptypes.DurationProto(dur), + }) + return map[string]string{ + retryInfoMetadata: string(b), + } +} + +func TestCreateProfile(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mpc := mocks.NewMockProfilerServiceClient(ctrl) + a := createTestAgent(mpc) + p := &pb.Profile{Name: "test_profile"} + wantRequest := pb.CreateProfileRequest{ + Deployment: a.deployment, + ProfileType: a.profileTypes, + } + + mpc.EXPECT().CreateProfile(ctx, gomock.Eq(&wantRequest), gomock.Any()).Times(1).Return(p, nil) + + gotP := a.createProfile(ctx) + + if !testutil.Equal(gotP, p) { + t.Errorf("CreateProfile() got wrong profile, got %v, want %v", gotP, p) + } +} + +func TestProfileAndUpload(t *testing.T) { + oldStartCPUProfile, oldStopCPUProfile, oldWriteHeapProfile, oldSleep := startCPUProfile, stopCPUProfile, writeHeapProfile, sleep + defer func() { + startCPUProfile, stopCPUProfile, writeHeapProfile, sleep = oldStartCPUProfile, oldStopCPUProfile, oldWriteHeapProfile, oldSleep + }() + + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + errFunc := func(io.Writer) error { return errors.New("") } + testDuration := time.Second * 5 + tests := []struct { + profileType pb.ProfileType + duration *time.Duration + startCPUProfileFunc func(io.Writer) error + writeHeapProfileFunc func(io.Writer) error + wantBytes []byte + }{ + { + profileType: pb.ProfileType_CPU, + duration: &testDuration, + startCPUProfileFunc: func(w io.Writer) error { + w.Write([]byte{1}) + return nil + }, + writeHeapProfileFunc: errFunc, + wantBytes: []byte{1}, + }, + { + profileType: pb.ProfileType_CPU, + startCPUProfileFunc: errFunc, + writeHeapProfileFunc: errFunc, + }, + { + profileType: pb.ProfileType_CPU, + duration: &testDuration, + startCPUProfileFunc: func(w io.Writer) error { + w.Write([]byte{2}) + return nil + }, + writeHeapProfileFunc: func(w io.Writer) error { + w.Write([]byte{3}) + return nil + }, + wantBytes: []byte{2}, + }, + { + profileType: pb.ProfileType_HEAP, + startCPUProfileFunc: errFunc, + writeHeapProfileFunc: func(w io.Writer) error { + w.Write([]byte{4}) + return nil + }, + wantBytes: []byte{4}, + }, + { + profileType: pb.ProfileType_HEAP, + startCPUProfileFunc: errFunc, + writeHeapProfileFunc: errFunc, + }, + { + profileType: pb.ProfileType_HEAP, + startCPUProfileFunc: func(w io.Writer) error { + w.Write([]byte{5}) + return nil + }, + writeHeapProfileFunc: func(w io.Writer) error { + w.Write([]byte{6}) + return nil + }, + wantBytes: []byte{6}, + }, + { + profileType: pb.ProfileType_PROFILE_TYPE_UNSPECIFIED, + startCPUProfileFunc: func(w io.Writer) error { + w.Write([]byte{7}) + return nil + }, + writeHeapProfileFunc: func(w io.Writer) error { + w.Write([]byte{8}) + return nil + }, + }, + } + + for _, tt := range tests { + mpc := mocks.NewMockProfilerServiceClient(ctrl) + a := createTestAgent(mpc) + startCPUProfile = tt.startCPUProfileFunc + stopCPUProfile = func() {} + writeHeapProfile = tt.writeHeapProfileFunc + var gotSleep *time.Duration + sleep = func(ctx context.Context, d time.Duration) error { + gotSleep = &d + return nil + } + p := &pb.Profile{ProfileType: tt.profileType} + if tt.duration != nil { + p.Duration = ptypes.DurationProto(*tt.duration) + } + if tt.wantBytes != nil { + wantProfile := &pb.Profile{ + ProfileType: p.ProfileType, + Duration: p.Duration, + ProfileBytes: tt.wantBytes, + Labels: a.profileLabels, + } + wantRequest := pb.UpdateProfileRequest{ + Profile: wantProfile, + } + mpc.EXPECT().UpdateProfile(ctx, gomock.Eq(&wantRequest)).Times(1) + } else { + mpc.EXPECT().UpdateProfile(gomock.Any(), gomock.Any()).MaxTimes(0) + } + + a.profileAndUpload(ctx, p) + + if tt.duration == nil { + if gotSleep != nil { + t.Errorf("profileAndUpload(%v) slept for: %v, want no sleep", p, gotSleep) + } + } else { + if gotSleep == nil { + t.Errorf("profileAndUpload(%v) didn't sleep, want sleep for: %v", p, tt.duration) + } else if *gotSleep != *tt.duration { + t.Errorf("profileAndUpload(%v) slept for wrong duration, got: %v, want: %v", p, gotSleep, tt.duration) + } + } + } +} + +func TestRetry(t *testing.T) { + normalDuration := time.Second * 3 + negativeDuration := time.Second * -3 + + tests := []struct { + trailers map[string]string + wantPause *time.Duration + }{ + { + createTrailers(normalDuration), + &normalDuration, + }, + { + createTrailers(negativeDuration), + nil, + }, + { + map[string]string{retryInfoMetadata: "wrong format"}, + nil, + }, + { + map[string]string{}, + nil, + }, + } + + for _, tt := range tests { + md := grpcmd.New(tt.trailers) + r := &retryer{ + backoff: gax.Backoff{ + Initial: initialBackoff, + Max: maxBackoff, + Multiplier: backoffMultiplier, + }, + md: md, + } + + pause, shouldRetry := r.Retry(status.Error(codes.Aborted, "")) + + if !shouldRetry { + t.Error("retryer.Retry() returned shouldRetry false, want true") + } + + if tt.wantPause != nil { + if pause != *tt.wantPause { + t.Errorf("retryer.Retry() returned wrong pause, got: %v, want: %v", pause, tt.wantPause) + } + } else { + if pause > initialBackoff { + t.Errorf("retryer.Retry() returned wrong pause, got: %v, want: < %v", pause, initialBackoff) + } + } + } + + md := grpcmd.New(map[string]string{}) + + r := &retryer{ + backoff: gax.Backoff{ + Initial: initialBackoff, + Max: maxBackoff, + Multiplier: backoffMultiplier, + }, + md: md, + } + for i := 0; i < 100; i++ { + pause, shouldRetry := r.Retry(errors.New("")) + if !shouldRetry { + t.Errorf("retryer.Retry() called %v times, returned shouldRetry false, want true", i) + } + if pause > maxBackoff { + t.Errorf("retryer.Retry() called %v times, returned wrong pause, got: %v, want: < %v", i, pause, maxBackoff) + } + } +} + +func TestWithXGoogHeader(t *testing.T) { + ctx := withXGoogHeader(context.Background()) + md, _ := grpcmd.FromOutgoingContext(ctx) + + if xg := md[xGoogAPIMetadata]; len(xg) == 0 { + t.Errorf("withXGoogHeader() sets empty xGoogHeader") + } else { + if !strings.Contains(xg[0], "gl-go/") { + t.Errorf("withXGoogHeader() got: %v, want gl-go key", xg[0]) + } + if !strings.Contains(xg[0], "gccl/") { + t.Errorf("withXGoogHeader() got: %v, want gccl key", xg[0]) + } + if !strings.Contains(xg[0], "gax/") { + t.Errorf("withXGoogHeader() got: %v, want gax key", xg[0]) + } + if !strings.Contains(xg[0], "grpc/") { + t.Errorf("withXGoogHeader() got: %v, want grpc key", xg[0]) + } + } +} + +func TestInitializeAgent(t *testing.T) { + oldConfig, oldMutexEnabled := config, mutexEnabled + defer func() { + config, mutexEnabled = oldConfig, oldMutexEnabled + }() + + for _, tt := range []struct { + config Config + enableMutex bool + wantDeploymentLabels map[string]string + wantProfileLabels map[string]string + }{ + { + config: Config{ServiceVersion: testSvcVersion, zone: testZone}, + wantDeploymentLabels: map[string]string{zoneNameLabel: testZone, versionLabel: testSvcVersion}, + wantProfileLabels: map[string]string{}, + }, + { + config: Config{zone: testZone}, + wantDeploymentLabels: map[string]string{zoneNameLabel: testZone}, + wantProfileLabels: map[string]string{}, + }, + { + config: Config{ServiceVersion: testSvcVersion}, + wantDeploymentLabels: map[string]string{versionLabel: testSvcVersion}, + wantProfileLabels: map[string]string{}, + }, + { + config: Config{instance: testInstance}, + wantDeploymentLabels: map[string]string{}, + wantProfileLabels: map[string]string{instanceLabel: testInstance}, + }, + { + config: Config{instance: testInstance}, + enableMutex: true, + wantDeploymentLabels: map[string]string{}, + wantProfileLabels: map[string]string{instanceLabel: testInstance}, + }, + } { + + config = tt.config + config.ProjectID = testProjectID + config.Target = testTarget + mutexEnabled = tt.enableMutex + a := initializeAgent(nil) + + wantDeployment := &pb.Deployment{ + ProjectId: testProjectID, + Target: testTarget, + Labels: tt.wantDeploymentLabels, + } + if !testutil.Equal(a.deployment, wantDeployment) { + t.Errorf("initializeAgent() got deployment: %v, want %v", a.deployment, wantDeployment) + } + + if !testutil.Equal(a.profileLabels, tt.wantProfileLabels) { + t.Errorf("initializeAgent() got profile labels: %v, want %v", a.profileLabels, tt.wantProfileLabels) + } + + wantProfileTypes := []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP, pb.ProfileType_THREADS} + if tt.enableMutex { + wantProfileTypes = append(wantProfileTypes, pb.ProfileType_CONTENTION) + } + if !testutil.Equal(a.profileTypes, wantProfileTypes) { + t.Errorf("initializeAgent() got profile types: %v, want %v", a.profileTypes, wantProfileTypes) + } + + } +} + +func TestInitializeConfig(t *testing.T) { + oldConfig, oldService, oldVersion, oldGetProjectID, oldGetInstanceName, oldGetZone, oldOnGCE := config, os.Getenv("GAE_SERVICE"), os.Getenv("GAE_VERSION"), getProjectID, getInstanceName, getZone, onGCE + defer func() { + config, getProjectID, getInstanceName, getZone, onGCE = oldConfig, oldGetProjectID, oldGetInstanceName, oldGetZone, oldOnGCE + if err := os.Setenv("GAE_SERVICE", oldService); err != nil { + t.Fatal(err) + } + if err := os.Setenv("GAE_VERSION", oldVersion); err != nil { + t.Fatal(err) + } + }() + testGAEService := "test-gae-service" + testGAEVersion := "test-gae-version" + testGCEProjectID := "test-gce-project-id" + for _, tt := range []struct { + config Config + wantConfig Config + wantErrorString string + onGAE bool + onGCE bool + }{ + { + Config{Service: testService}, + Config{Target: testService, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + false, + true, + }, + { + Config{Target: testTarget}, + Config{Target: testTarget, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + false, + true, + }, + { + Config{}, + Config{}, + "service name must be specified in the configuration", + false, + true, + }, + { + Config{Service: testService}, + Config{Target: testService, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + true, + true, + }, + { + Config{Target: testTarget}, + Config{Target: testTarget, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + true, + true, + }, + { + Config{}, + Config{Target: testGAEService, ServiceVersion: testGAEVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + true, + true, + }, + { + Config{Service: testService, ServiceVersion: testSvcVersion}, + Config{Target: testService, ServiceVersion: testSvcVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + false, + true, + }, + { + Config{Service: testService, ServiceVersion: testSvcVersion}, + Config{Target: testService, ServiceVersion: testSvcVersion, ProjectID: testGCEProjectID, zone: testZone, instance: testInstance}, + "", + true, + true, + }, + { + Config{Service: testService, ProjectID: testProjectID}, + Config{Target: testService, ProjectID: testProjectID, zone: testZone, instance: testInstance}, + "", + false, + true, + }, + { + Config{Service: testService}, + Config{Target: testService}, + "project ID must be specified in the configuration if running outside of GCP", + false, + false, + }, + } { + envService, envVersion := "", "" + if tt.onGAE { + envService, envVersion = testGAEService, testGAEVersion + } + if err := os.Setenv("GAE_SERVICE", envService); err != nil { + t.Fatal(err) + } + if err := os.Setenv("GAE_VERSION", envVersion); err != nil { + t.Fatal(err) + } + if tt.onGCE { + onGCE = func() bool { return true } + getProjectID = func() (string, error) { return testGCEProjectID, nil } + getZone = func() (string, error) { return testZone, nil } + getInstanceName = func() (string, error) { return testInstance, nil } + } else { + onGCE = func() bool { return false } + getProjectID = func() (string, error) { return "", fmt.Errorf("test get project id error") } + getZone = func() (string, error) { return "", fmt.Errorf("test get zone error") } + getInstanceName = func() (string, error) { return "", fmt.Errorf("test get instance error") } + } + + errorString := "" + if err := initializeConfig(tt.config); err != nil { + errorString = err.Error() + } + + if !strings.Contains(errorString, tt.wantErrorString) { + t.Errorf("initializeConfig(%v) got error: %v, want contain %v", tt.config, errorString, tt.wantErrorString) + } + + if tt.wantErrorString == "" { + tt.wantConfig.APIAddr = apiAddress + } + tt.wantConfig.Service = tt.config.Service + if config != tt.wantConfig { + t.Errorf("initializeConfig(%v) got: %v, want %v", tt.config, config, tt.wantConfig) + } + } + + for _, tt := range []struct { + wantErrorString string + getProjectIDError bool + getZoneError bool + getInstanceError bool + }{ + { + wantErrorString: "failed to get the project ID from Compute Engine:", + getProjectIDError: true, + }, + { + wantErrorString: "failed to get zone from Compute Engine:", + getZoneError: true, + }, + { + wantErrorString: "failed to get instance from Compute Engine:", + getInstanceError: true, + }, + } { + onGCE = func() bool { return true } + if tt.getProjectIDError { + getProjectID = func() (string, error) { return "", fmt.Errorf("test get project ID error") } + } else { + getProjectID = func() (string, error) { return testGCEProjectID, nil } + } + + if tt.getZoneError { + getZone = func() (string, error) { return "", fmt.Errorf("test get zone error") } + } else { + getZone = func() (string, error) { return testZone, nil } + } + + if tt.getInstanceError { + getInstanceName = func() (string, error) { return "", fmt.Errorf("test get instance error") } + } else { + getInstanceName = func() (string, error) { return testInstance, nil } + } + errorString := "" + if err := initializeConfig(Config{Service: testService}); err != nil { + errorString = err.Error() + } + + if !strings.Contains(errorString, tt.wantErrorString) { + t.Errorf("initializeConfig() got error: %v, want contain %v", errorString, tt.wantErrorString) + } + } +} + +type fakeProfilerServer struct { + pb.ProfilerServiceServer + count int + gotProfiles map[string][]byte + done chan bool +} + +func (fs *fakeProfilerServer) CreateProfile(ctx context.Context, in *pb.CreateProfileRequest) (*pb.Profile, error) { + fs.count++ + switch fs.count { + case 1: + return &pb.Profile{Name: "testCPU", ProfileType: pb.ProfileType_CPU, Duration: ptypes.DurationProto(testProfileDuration)}, nil + case 2: + return &pb.Profile{Name: "testHeap", ProfileType: pb.ProfileType_HEAP}, nil + default: + select {} + } +} + +func (fs *fakeProfilerServer) UpdateProfile(ctx context.Context, in *pb.UpdateProfileRequest) (*pb.Profile, error) { + switch in.Profile.ProfileType { + case pb.ProfileType_CPU: + fs.gotProfiles["CPU"] = in.Profile.ProfileBytes + case pb.ProfileType_HEAP: + fs.gotProfiles["HEAP"] = in.Profile.ProfileBytes + fs.done <- true + } + + return in.Profile, nil +} + +func profileeLoop(quit chan bool) { + for { + select { + case <-quit: + return + default: + profileeWork() + } + } +} + +func profileeWork() { + data := make([]byte, 1024*1024) + rand.Read(data) + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write(data); err != nil { + log.Println("failed to write to gzip stream", err) + return + } + if err := gz.Flush(); err != nil { + log.Println("failed to flush to gzip stream", err) + return + } + if err := gz.Close(); err != nil { + log.Println("failed to close gzip stream", err) + } +} + +func validateProfile(rawData []byte, wantFunctionName string) error { + p, err := profile.ParseData(rawData) + if err != nil { + return fmt.Errorf("ParseData failed: %v", err) + } + + if len(p.Sample) == 0 { + return fmt.Errorf("profile contains zero samples: %v", p) + } + + if len(p.Location) == 0 { + return fmt.Errorf("profile contains zero locations: %v", p) + } + + if len(p.Function) == 0 { + return fmt.Errorf("profile contains zero functions: %v", p) + } + + for _, l := range p.Location { + if len(l.Line) > 0 && l.Line[0].Function != nil && strings.Contains(l.Line[0].Function.Name, wantFunctionName) { + return nil + } + } + return fmt.Errorf("wanted function name %s not found in the profile", wantFunctionName) +} + +func TestDeltaMutexProfile(t *testing.T) { + oldMutexEnabled, oldMaxProcs := mutexEnabled, runtime.GOMAXPROCS(10) + defer func() { + mutexEnabled = oldMutexEnabled + runtime.GOMAXPROCS(oldMaxProcs) + }() + if mutexEnabled = enableMutexProfiling(); !mutexEnabled { + t.Skip("Go too old - mutex profiling not supported.") + } + + hog(time.Second, mutexHog) + go func() { + hog(2*time.Second, backgroundHog) + }() + + var prof bytes.Buffer + if err := deltaMutexProfile(context.Background(), time.Second, &prof); err != nil { + t.Fatalf("deltaMutexProfile() got error: %v", err) + } + p, err := profile.Parse(&prof) + if err != nil { + t.Fatalf("profile.Parse() got error: %v", err) + } + + if s := sum(p, "mutexHog"); s != 0 { + t.Errorf("mutexHog found in the delta mutex profile (sum=%d):\n%s", s, p) + } + if s := sum(p, "backgroundHog"); s <= 0 { + t.Errorf("backgroundHog not in the delta mutex profile (sum=%d):\n%s", s, p) + } +} + +// sum returns the sum of all mutex counts from the samples whose +// stacks include the specified function name. +func sum(p *profile.Profile, fname string) int64 { + locIDs := map[*profile.Location]bool{} + for _, loc := range p.Location { + for _, l := range loc.Line { + if strings.Contains(l.Function.Name, fname) { + locIDs[loc] = true + break + } + } + } + var s int64 + for _, sample := range p.Sample { + for _, loc := range sample.Location { + if locIDs[loc] { + s += sample.Value[0] + break + } + } + } + return s +} + +func mutexHog(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration) { + for time.Since(start) < dt { + mu1.Lock() + runtime.Gosched() + mu2.Lock() + mu1.Unlock() + mu2.Unlock() + } +} + +// backgroundHog is identical to mutexHog. We keep them separate +// in order to distinguish them with function names in the stack trace. +func backgroundHog(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration) { + for time.Since(start) < dt { + mu1.Lock() + runtime.Gosched() + mu2.Lock() + mu1.Unlock() + mu2.Unlock() + } +} + +func hog(dt time.Duration, hogger func(mu1, mu2 *sync.Mutex, start time.Time, dt time.Duration)) { + start := time.Now() + mu1 := new(sync.Mutex) + mu2 := new(sync.Mutex) + var wg sync.WaitGroup + wg.Add(10) + for i := 0; i < 10; i++ { + go func() { + defer wg.Done() + hogger(mu1, mu2, start, dt) + }() + } + wg.Wait() +} + +func TestAgentWithServer(t *testing.T) { + oldDialGRPC, oldConfig := dialGRPC, config + defer func() { + dialGRPC, config = oldDialGRPC, oldConfig + }() + + srv, err := testutil.NewServer() + if err != nil { + t.Fatalf("testutil.NewServer(): %v", err) + } + fakeServer := &fakeProfilerServer{gotProfiles: map[string][]byte{}, done: make(chan bool)} + pb.RegisterProfilerServiceServer(srv.Gsrv, fakeServer) + + srv.Start() + + dialGRPC = gtransport.DialInsecure + if err := Start(Config{ + Target: testTarget, + ProjectID: testProjectID, + APIAddr: srv.Addr, + instance: testInstance, + zone: testZone, + }); err != nil { + t.Fatalf("Start(): %v", err) + } + + quitProfilee := make(chan bool) + go profileeLoop(quitProfilee) + + select { + case <-fakeServer.done: + case <-time.After(testServerTimeout): + t.Errorf("got timeout after %v, want fake server done", testServerTimeout) + } + quitProfilee <- true + + for _, pType := range []string{"CPU", "HEAP"} { + if profile, ok := fakeServer.gotProfiles[pType]; !ok { + t.Errorf("fakeServer.gotProfiles[%s] got no profile, want profile", pType) + } else if err := validateProfile(profile, "profilee"); err != nil { + t.Errorf("validateProfile(%s) got error: %v", pType, err) + } + } +} diff --git a/vendor/cloud.google.com/go/profiler/symbolizer.go b/vendor/cloud.google.com/go/profiler/symbolizer.go new file mode 100644 index 0000000..15e0b45 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/symbolizer.go @@ -0,0 +1,143 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profiler + +import ( + "bytes" + "regexp" + "runtime" + "strings" + + "github.com/google/pprof/profile" +) + +var shouldAssumeSymbolized = isSymbolizedGoVersion(runtime.Version()) + +type function interface { + Name() string + FileLine(pc uintptr) (string, int) +} + +// funcForPC is a wrapper for runtime.FuncForPC. Defined as var for testing. +var funcForPC = func(pc uintptr) function { + if f := runtime.FuncForPC(pc); f != nil { + return f + } + return nil +} + +// parseAndSymbolize parses a profile from a buffer, symbolizes it +// if it's not yet symbolized, and writes the profile back as a +// gzip-compressed marshaled protobuf. +func parseAndSymbolize(data *bytes.Buffer) error { + p, err := profile.ParseData(data.Bytes()) + if err != nil { + return err + } + + // Do nothing if the profile is already symbolized. + if symbolized(p) { + return nil + } + // Clear the profile functions to avoid creating duplicates. + p.Function = nil + symbolize(p) + data.Reset() + return p.Write(data) +} + +// isSymbolizedGoVersion returns true if Go version equals to or is +// higher than Go 1.9. Starting Go 1.9 the profiles are symbolized +// by runtime/pprof. +func isSymbolizedGoVersion(goVersion string) bool { + r, err := regexp.Compile(`go(1\.9|1\.[1-9][0-9]|[2-9]).*`) + if err == nil && r.MatchString(goVersion) { + return true + } + return false +} + +// symbolized checks if all locations have symbolized function +// information. +func symbolized(p *profile.Profile) bool { + for _, l := range p.Location { + if len(l.Line) == 0 || l.Line[0].Function == nil { + return false + } + } + return true +} + +func symbolize(p *profile.Profile) { + fns := profileFunctionMap{} + for _, l := range p.Location { + pc := uintptr(l.Address) + f := funcForPC(pc) + if f == nil { + continue + } + file, lineno := f.FileLine(pc) + l.Line = []profile.Line{ + { + Function: fns.findOrAddFunction(f.Name(), file, p), + Line: int64(lineno), + }, + } + } + // Trim runtime functions. Always hide runtime.goexit. Other runtime + // functions are only hidden for heap profile when they appear at the beginning. + isHeapProfile := p.PeriodType != nil && p.PeriodType.Type == "space" + for _, s := range p.Sample { + show := !isHeapProfile + var i int + for _, l := range s.Location { + if len(l.Line) > 0 && l.Line[0].Function != nil { + name := l.Line[0].Function.Name + if name == "runtime.goexit" || !show && strings.HasPrefix(name, "runtime.") { + continue + } + } + show = true + s.Location[i] = l + i++ + } + // If all locations of a sample are trimmed, keep the root location. + if i == 0 && len(s.Location) > 0 { + s.Location[0] = s.Location[len(s.Location)-1] + i = 1 + } + s.Location = s.Location[:i] + } +} + +type profileFunctionMap map[profile.Function]*profile.Function + +func (fns profileFunctionMap) findOrAddFunction(name, filename string, p *profile.Profile) *profile.Function { + f := profile.Function{ + Name: name, + SystemName: name, + Filename: filename, + } + if fp := fns[f]; fp != nil { + return fp + } + fp := new(profile.Function) + fns[f] = fp + + *fp = f + fp.ID = uint64(len(p.Function) + 1) + p.Function = append(p.Function, fp) + return fp +} diff --git a/vendor/cloud.google.com/go/profiler/symbolizer_test.go b/vendor/cloud.google.com/go/profiler/symbolizer_test.go new file mode 100644 index 0000000..779f7f3 --- /dev/null +++ b/vendor/cloud.google.com/go/profiler/symbolizer_test.go @@ -0,0 +1,229 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profiler + +import ( + "bytes" + "testing" + + "cloud.google.com/go/internal/testutil" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/pprof/profile" +) + +type fakeFunc struct { + name string + file string + lineno int +} + +func (f *fakeFunc) Name() string { + return f.name +} +func (f *fakeFunc) FileLine(_ uintptr) (string, int) { + return f.file, f.lineno +} + +var cmpOpt = cmpopts.IgnoreUnexported(profile.Profile{}, profile.Function{}, + profile.Line{}, profile.Location{}, profile.Sample{}, profile.ValueType{}) + +// TestRuntimeFunctionTrimming tests if symbolize trims runtime functions as intended. +func TestRuntimeFunctionTrimming(t *testing.T) { + fakeFuncMap := map[uintptr]*fakeFunc{ + 0x10: &fakeFunc{"runtime.goexit", "runtime.go", 10}, + 0x20: &fakeFunc{"runtime.other", "runtime.go", 20}, + 0x30: &fakeFunc{"foo", "foo.go", 30}, + 0x40: &fakeFunc{"bar", "bar.go", 40}, + } + backupFuncForPC := funcForPC + funcForPC = func(pc uintptr) function { + return fakeFuncMap[pc] + } + defer func() { + funcForPC = backupFuncForPC + }() + testLoc := []*profile.Location{ + {ID: 1, Address: 0x10}, + {ID: 2, Address: 0x20}, + {ID: 3, Address: 0x30}, + {ID: 4, Address: 0x40}, + } + testProfile := &profile.Profile{ + Sample: []*profile.Sample{ + {Location: []*profile.Location{testLoc[0], testLoc[1], testLoc[3], testLoc[2]}}, + {Location: []*profile.Location{testLoc[1], testLoc[3], testLoc[2]}}, + {Location: []*profile.Location{testLoc[3], testLoc[2], testLoc[1]}}, + {Location: []*profile.Location{testLoc[3], testLoc[2], testLoc[0]}}, + {Location: []*profile.Location{testLoc[0], testLoc[1], testLoc[3], testLoc[0]}}, + {Location: []*profile.Location{testLoc[1], testLoc[0]}}, + }, + Location: testLoc, + } + testProfiles := make([]*profile.Profile, 2) + testProfiles[0] = testProfile.Copy() + testProfiles[1] = testProfile.Copy() + // Test case for CPU profile. + testProfiles[0].PeriodType = &profile.ValueType{Type: "cpu", Unit: "nanoseconds"} + // Test case for heap profile. + testProfiles[1].PeriodType = &profile.ValueType{Type: "space", Unit: "bytes"} + wantFunc := []*profile.Function{ + {ID: 1, Name: "runtime.goexit", SystemName: "runtime.goexit", Filename: "runtime.go"}, + {ID: 2, Name: "runtime.other", SystemName: "runtime.other", Filename: "runtime.go"}, + {ID: 3, Name: "foo", SystemName: "foo", Filename: "foo.go"}, + {ID: 4, Name: "bar", SystemName: "bar", Filename: "bar.go"}, + } + wantLoc := []*profile.Location{ + {ID: 1, Address: 0x10, Line: []profile.Line{{Function: wantFunc[0], Line: 10}}}, + {ID: 2, Address: 0x20, Line: []profile.Line{{Function: wantFunc[1], Line: 20}}}, + {ID: 3, Address: 0x30, Line: []profile.Line{{Function: wantFunc[2], Line: 30}}}, + {ID: 4, Address: 0x40, Line: []profile.Line{{Function: wantFunc[3], Line: 40}}}, + } + wantProfiles := []*profile.Profile{ + { + PeriodType: &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}, + Sample: []*profile.Sample{ + {Location: []*profile.Location{wantLoc[1], wantLoc[3], wantLoc[2]}}, + {Location: []*profile.Location{wantLoc[1], wantLoc[3], wantLoc[2]}}, + {Location: []*profile.Location{wantLoc[3], wantLoc[2], wantLoc[1]}}, + {Location: []*profile.Location{wantLoc[3], wantLoc[2]}}, + {Location: []*profile.Location{wantLoc[1], wantLoc[3]}}, + {Location: []*profile.Location{wantLoc[1]}}, + }, + Location: wantLoc, + Function: wantFunc, + }, + { + PeriodType: &profile.ValueType{Type: "space", Unit: "bytes"}, + Sample: []*profile.Sample{ + {Location: []*profile.Location{wantLoc[3], wantLoc[2]}}, + {Location: []*profile.Location{wantLoc[3], wantLoc[2]}}, + {Location: []*profile.Location{wantLoc[3], wantLoc[2], wantLoc[1]}}, + {Location: []*profile.Location{wantLoc[3], wantLoc[2]}}, + {Location: []*profile.Location{wantLoc[3]}}, + {Location: []*profile.Location{wantLoc[0]}}, + }, + Location: wantLoc, + Function: wantFunc, + }, + } + for i := 0; i < 2; i++ { + symbolize(testProfiles[i]) + if !testutil.Equal(testProfiles[i], wantProfiles[i], cmpOpt) { + t.Errorf("incorrect trimming (testcase = %d): got {%v}, want {%v}", i, testProfiles[i], wantProfiles[i]) + } + } +} + +// TestParseAndSymbolize tests if parseAndSymbolize parses and symbolizes +// profiles as intended. +func TestParseAndSymbolize(t *testing.T) { + fakeFuncMap := map[uintptr]*fakeFunc{ + 0x10: &fakeFunc{"foo", "foo.go", 10}, + 0x20: &fakeFunc{"bar", "bar.go", 20}, + } + backupFuncForPC := funcForPC + funcForPC = func(pc uintptr) function { + return fakeFuncMap[pc] + } + defer func() { + funcForPC = backupFuncForPC + }() + + testLoc := []*profile.Location{ + {ID: 1, Address: 0x10}, + {ID: 2, Address: 0x20}, + } + testProfile := &profile.Profile{ + SampleType: []*profile.ValueType{ + &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}, + }, + PeriodType: &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}, + Sample: []*profile.Sample{ + {Location: []*profile.Location{testLoc[0], testLoc[1]}, Value: []int64{1}}, + {Location: []*profile.Location{testLoc[1]}, Value: []int64{1}}, + }, + Location: testLoc, + } + testProfiles := make([]*profile.Profile, 2) + testProfiles[0] = testProfile.Copy() + testProfiles[1] = testProfile.Copy() + + wantFunc := []*profile.Function{ + {ID: 1, Name: "foo", SystemName: "foo", Filename: "foo.go"}, + {ID: 2, Name: "bar", SystemName: "bar", Filename: "bar.go"}, + } + wantLoc := []*profile.Location{ + {ID: 1, Address: 0x10, Line: []profile.Line{{Function: wantFunc[0], Line: 10}}}, + {ID: 2, Address: 0x20, Line: []profile.Line{{Function: wantFunc[1], Line: 20}}}, + } + wantProfile := &profile.Profile{ + SampleType: []*profile.ValueType{ + &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}, + }, + PeriodType: &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}, + Sample: []*profile.Sample{ + {Location: []*profile.Location{wantLoc[0], wantLoc[1]}, Value: []int64{1}}, + {Location: []*profile.Location{wantLoc[1]}, Value: []int64{1}}, + }, + Location: wantLoc, + Function: wantFunc, + } + + // Profile already symbolized. + testProfiles[1].Location = []*profile.Location{ + {ID: 1, Address: 0x10, Line: []profile.Line{{Function: wantFunc[0], Line: 10}}}, + {ID: 2, Address: 0x20, Line: []profile.Line{{Function: wantFunc[1], Line: 20}}}, + } + testProfiles[1].Function = []*profile.Function{ + {ID: 1, Name: "foo", SystemName: "foo", Filename: "foo.go"}, + {ID: 2, Name: "bar", SystemName: "bar", Filename: "bar.go"}, + } + for i := 0; i < 2; i++ { + var prof bytes.Buffer + testProfiles[i].Write(&prof) + + parseAndSymbolize(&prof) + gotProfile, err := profile.ParseData(prof.Bytes()) + if err != nil { + t.Errorf("parsing symbolized profile (testcase = %d) got err: %v, want no error", i, err) + } + if !testutil.Equal(gotProfile, wantProfile, cmpOpt) { + t.Errorf("incorrect symbolization (testcase = %d): got {%v}, want {%v}", i, gotProfile, wantProfile) + } + } +} + +func TestIsSymbolizedGoVersion(t *testing.T) { + for _, tc := range []struct { + input string + want bool + }{ + {"go1.9beta2", true}, + {"go1.9", true}, + {"go1.9.1", true}, + {"go1.10", true}, + {"go1.10.1", true}, + {"go2.0", true}, + {"go3.1", true}, + {"go1.8", false}, + {"go1.8.1", false}, + {"go1.7", false}, + {"devel ", false}, + } { + if got := isSymbolizedGoVersion(tc.input); got != tc.want { + t.Errorf("isSymbolizedGoVersion(%v) got %v, want %v", tc.input, got, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/ListTopics_smoke_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/ListTopics_smoke_test.go new file mode 100644 index 0000000..03e0862 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/ListTopics_smoke_test.go @@ -0,0 +1,65 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +import ( + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestPublisherSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewPublisherClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var formattedProject string = PublisherProjectPath(projectId) + var request = &pubsubpb.ListTopicsRequest{ + Project: formattedProject, + } + + iter := c.ListTopics(ctx, request) + if _, err := iter.Next(); err != nil && err != iterator.Done { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/README.md b/vendor/cloud.google.com/go/pubsub/apiv1/README.md new file mode 100644 index 0000000..b5967ab --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/README.md @@ -0,0 +1,9 @@ +Auto-generated pubsub v1 clients +================================= + +This package includes auto-generated clients for the pubsub v1 API. + +Use the handwritten client (in the parent directory, +cloud.google.com/go/pubsub) in preference to this. + +This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go new file mode 100644 index 0000000..fdfe25a --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go @@ -0,0 +1,50 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package pubsub is an auto-generated package for the +// Google Cloud Pub/Sub API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Provides reliable, many-to-many, asynchronous messaging between +// applications. +// +// Use the client at cloud.google.com/go/pubsub in preference to this. +package pubsub // import "cloud.google.com/go/pubsub/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub", + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go new file mode 100644 index 0000000..5465670 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go @@ -0,0 +1,1878 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + iampb "google.golang.org/genproto/googleapis/iam/v1" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + field_maskpb "google.golang.org/genproto/protobuf/field_mask" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockPublisherServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + pubsubpb.PublisherServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockPublisherServer) CreateTopic(ctx context.Context, req *pubsubpb.Topic) (*pubsubpb.Topic, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Topic), nil +} + +func (s *mockPublisherServer) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest) (*pubsubpb.Topic, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Topic), nil +} + +func (s *mockPublisherServer) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.PublishResponse), nil +} + +func (s *mockPublisherServer) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest) (*pubsubpb.Topic, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Topic), nil +} + +func (s *mockPublisherServer) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest) (*pubsubpb.ListTopicsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.ListTopicsResponse), nil +} + +func (s *mockPublisherServer) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest) (*pubsubpb.ListTopicSubscriptionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.ListTopicSubscriptionsResponse), nil +} + +func (s *mockPublisherServer) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +type mockIamPolicyServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + iampb.IAMPolicyServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockIamPolicyServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockIamPolicyServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockIamPolicyServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +type mockSubscriberServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + pubsubpb.SubscriberServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockSubscriberServer) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription) (*pubsubpb.Subscription, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Subscription), nil +} + +func (s *mockSubscriberServer) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest) (*pubsubpb.Subscription, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Subscription), nil +} + +func (s *mockSubscriberServer) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest) (*pubsubpb.Subscription, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Subscription), nil +} + +func (s *mockSubscriberServer) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest) (*pubsubpb.ListSubscriptionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.ListSubscriptionsResponse), nil +} + +func (s *mockSubscriberServer) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSubscriberServer) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSubscriberServer) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSubscriberServer) Pull(ctx context.Context, req *pubsubpb.PullRequest) (*pubsubpb.PullResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.PullResponse), nil +} + +func (s *mockSubscriberServer) StreamingPull(stream pubsubpb.Subscriber_StreamingPullServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + for { + if req, err := stream.Recv(); err == io.EOF { + break + } else if err != nil { + return err + } else { + s.reqs = append(s.reqs, req) + } + } + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*pubsubpb.StreamingPullResponse)); err != nil { + return err + } + } + return nil +} + +func (s *mockSubscriberServer) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSubscriberServer) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest) (*pubsubpb.ListSnapshotsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.ListSnapshotsResponse), nil +} + +func (s *mockSubscriberServer) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest) (*pubsubpb.Snapshot, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Snapshot), nil +} + +func (s *mockSubscriberServer) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest) (*pubsubpb.Snapshot, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Snapshot), nil +} + +func (s *mockSubscriberServer) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSubscriberServer) Seek(ctx context.Context, req *pubsubpb.SeekRequest) (*pubsubpb.SeekResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.SeekResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockPublisher mockPublisherServer + mockIamPolicy mockIamPolicyServer + mockSubscriber mockSubscriberServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + pubsubpb.RegisterPublisherServer(serv, &mockPublisher) + iampb.RegisterIAMPolicyServer(serv, &mockIamPolicy) + pubsubpb.RegisterSubscriberServer(serv, &mockSubscriber) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestPublisherCreateTopic(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &pubsubpb.Topic{ + Name: name2, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedName string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.Topic{ + Name: formattedName, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateTopic(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherCreateTopicError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var formattedName string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.Topic{ + Name: formattedName, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateTopic(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherUpdateTopic(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &pubsubpb.Topic{ + Name: name, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var topic *pubsubpb.Topic = &pubsubpb.Topic{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &pubsubpb.UpdateTopicRequest{ + Topic: topic, + UpdateMask: updateMask, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateTopic(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherUpdateTopicError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var topic *pubsubpb.Topic = &pubsubpb.Topic{} + var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &pubsubpb.UpdateTopicRequest{ + Topic: topic, + UpdateMask: updateMask, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateTopic(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherPublish(t *testing.T) { + var messageIdsElement string = "messageIdsElement-744837059" + var messageIds = []string{messageIdsElement} + var expectedResponse = &pubsubpb.PublishResponse{ + MessageIds: messageIds, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var data []byte = []byte("-86") + var messagesElement = &pubsubpb.PubsubMessage{ + Data: data, + } + var messages = []*pubsubpb.PubsubMessage{messagesElement} + var request = &pubsubpb.PublishRequest{ + Topic: formattedTopic, + Messages: messages, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Publish(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherPublishError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var data []byte = []byte("-86") + var messagesElement = &pubsubpb.PubsubMessage{ + Data: data, + } + var messages = []*pubsubpb.PubsubMessage{messagesElement} + var request = &pubsubpb.PublishRequest{ + Topic: formattedTopic, + Messages: messages, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Publish(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherGetTopic(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &pubsubpb.Topic{ + Name: name, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.GetTopicRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTopic(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherGetTopicError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.GetTopicRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTopic(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherListTopics(t *testing.T) { + var nextPageToken string = "" + var topicsElement *pubsubpb.Topic = &pubsubpb.Topic{} + var topics = []*pubsubpb.Topic{topicsElement} + var expectedResponse = &pubsubpb.ListTopicsResponse{ + NextPageToken: nextPageToken, + Topics: topics, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedProject string = PublisherProjectPath("[PROJECT]") + var request = &pubsubpb.ListTopicsRequest{ + Project: formattedProject, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTopics(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Topics[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherListTopicsError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var formattedProject string = PublisherProjectPath("[PROJECT]") + var request = &pubsubpb.ListTopicsRequest{ + Project: formattedProject, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTopics(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherListTopicSubscriptions(t *testing.T) { + var nextPageToken string = "" + var subscriptionsElement string = "subscriptionsElement1698708147" + var subscriptions = []string{subscriptionsElement} + var expectedResponse = &pubsubpb.ListTopicSubscriptionsResponse{ + NextPageToken: nextPageToken, + Subscriptions: subscriptions, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.ListTopicSubscriptionsRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTopicSubscriptions(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Subscriptions[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherListTopicSubscriptionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.ListTopicSubscriptionsRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTopicSubscriptions(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherDeleteTopic(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.DeleteTopicRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTopic(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestPublisherDeleteTopicError(t *testing.T) { + errCode := codes.PermissionDenied + mockPublisher.err = gstatus.Error(errCode, "test error") + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.DeleteTopicRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTopic(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberCreateSubscription(t *testing.T) { + var name2 string = "name2-1052831874" + var topic2 string = "topic2-1139259102" + var ackDeadlineSeconds int32 = 2135351438 + var retainAckedMessages bool = false + var expectedResponse = &pubsubpb.Subscription{ + Name: name2, + Topic: topic2, + AckDeadlineSeconds: ackDeadlineSeconds, + RetainAckedMessages: retainAckedMessages, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedName string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var formattedTopic string = SubscriberTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.Subscription{ + Name: formattedName, + Topic: formattedTopic, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSubscription(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberCreateSubscriptionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedName string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var formattedTopic string = SubscriberTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.Subscription{ + Name: formattedName, + Topic: formattedTopic, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSubscription(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberGetSubscription(t *testing.T) { + var name string = "name3373707" + var topic string = "topic110546223" + var ackDeadlineSeconds int32 = 2135351438 + var retainAckedMessages bool = false + var expectedResponse = &pubsubpb.Subscription{ + Name: name, + Topic: topic, + AckDeadlineSeconds: ackDeadlineSeconds, + RetainAckedMessages: retainAckedMessages, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.GetSubscriptionRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSubscription(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberGetSubscriptionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.GetSubscriptionRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSubscription(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberUpdateSubscription(t *testing.T) { + var name string = "name3373707" + var topic string = "topic110546223" + var ackDeadlineSeconds2 int32 = -921632575 + var retainAckedMessages bool = false + var expectedResponse = &pubsubpb.Subscription{ + Name: name, + Topic: topic, + AckDeadlineSeconds: ackDeadlineSeconds2, + RetainAckedMessages: retainAckedMessages, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var ackDeadlineSeconds int32 = 42 + var subscription = &pubsubpb.Subscription{ + AckDeadlineSeconds: ackDeadlineSeconds, + } + var pathsElement string = "ack_deadline_seconds" + var paths = []string{pathsElement} + var updateMask = &field_maskpb.FieldMask{ + Paths: paths, + } + var request = &pubsubpb.UpdateSubscriptionRequest{ + Subscription: subscription, + UpdateMask: updateMask, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSubscription(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberUpdateSubscriptionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var ackDeadlineSeconds int32 = 42 + var subscription = &pubsubpb.Subscription{ + AckDeadlineSeconds: ackDeadlineSeconds, + } + var pathsElement string = "ack_deadline_seconds" + var paths = []string{pathsElement} + var updateMask = &field_maskpb.FieldMask{ + Paths: paths, + } + var request = &pubsubpb.UpdateSubscriptionRequest{ + Subscription: subscription, + UpdateMask: updateMask, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSubscription(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberListSubscriptions(t *testing.T) { + var nextPageToken string = "" + var subscriptionsElement *pubsubpb.Subscription = &pubsubpb.Subscription{} + var subscriptions = []*pubsubpb.Subscription{subscriptionsElement} + var expectedResponse = &pubsubpb.ListSubscriptionsResponse{ + NextPageToken: nextPageToken, + Subscriptions: subscriptions, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedProject string = SubscriberProjectPath("[PROJECT]") + var request = &pubsubpb.ListSubscriptionsRequest{ + Project: formattedProject, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSubscriptions(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Subscriptions[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberListSubscriptionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedProject string = SubscriberProjectPath("[PROJECT]") + var request = &pubsubpb.ListSubscriptionsRequest{ + Project: formattedProject, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSubscriptions(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberDeleteSubscription(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.DeleteSubscriptionRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSubscription(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberDeleteSubscriptionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.DeleteSubscriptionRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSubscription(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberModifyAckDeadline(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var ackIds []string = nil + var ackDeadlineSeconds int32 = 2135351438 + var request = &pubsubpb.ModifyAckDeadlineRequest{ + Subscription: formattedSubscription, + AckIds: ackIds, + AckDeadlineSeconds: ackDeadlineSeconds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.ModifyAckDeadline(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberModifyAckDeadlineError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var ackIds []string = nil + var ackDeadlineSeconds int32 = 2135351438 + var request = &pubsubpb.ModifyAckDeadlineRequest{ + Subscription: formattedSubscription, + AckIds: ackIds, + AckDeadlineSeconds: ackDeadlineSeconds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.ModifyAckDeadline(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberAcknowledge(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var ackIds []string = nil + var request = &pubsubpb.AcknowledgeRequest{ + Subscription: formattedSubscription, + AckIds: ackIds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Acknowledge(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberAcknowledgeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var ackIds []string = nil + var request = &pubsubpb.AcknowledgeRequest{ + Subscription: formattedSubscription, + AckIds: ackIds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Acknowledge(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberPull(t *testing.T) { + var expectedResponse *pubsubpb.PullResponse = &pubsubpb.PullResponse{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var maxMessages int32 = 496131527 + var request = &pubsubpb.PullRequest{ + Subscription: formattedSubscription, + MaxMessages: maxMessages, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Pull(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberPullError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var maxMessages int32 = 496131527 + var request = &pubsubpb.PullRequest{ + Subscription: formattedSubscription, + MaxMessages: maxMessages, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Pull(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberStreamingPull(t *testing.T) { + var receivedMessagesElement *pubsubpb.ReceivedMessage = &pubsubpb.ReceivedMessage{} + var receivedMessages = []*pubsubpb.ReceivedMessage{receivedMessagesElement} + var expectedResponse = &pubsubpb.StreamingPullResponse{ + ReceivedMessages: receivedMessages, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var streamAckDeadlineSeconds int32 = 1875467245 + var request = &pubsubpb.StreamingPullRequest{ + Subscription: formattedSubscription, + StreamAckDeadlineSeconds: streamAckDeadlineSeconds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingPull(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberStreamingPullError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var streamAckDeadlineSeconds int32 = 1875467245 + var request = &pubsubpb.StreamingPullRequest{ + Subscription: formattedSubscription, + StreamAckDeadlineSeconds: streamAckDeadlineSeconds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingPull(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberModifyPushConfig(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var pushConfig *pubsubpb.PushConfig = &pubsubpb.PushConfig{} + var request = &pubsubpb.ModifyPushConfigRequest{ + Subscription: formattedSubscription, + PushConfig: pushConfig, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.ModifyPushConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberModifyPushConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var pushConfig *pubsubpb.PushConfig = &pubsubpb.PushConfig{} + var request = &pubsubpb.ModifyPushConfigRequest{ + Subscription: formattedSubscription, + PushConfig: pushConfig, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.ModifyPushConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberListSnapshots(t *testing.T) { + var nextPageToken string = "" + var snapshotsElement *pubsubpb.Snapshot = &pubsubpb.Snapshot{} + var snapshots = []*pubsubpb.Snapshot{snapshotsElement} + var expectedResponse = &pubsubpb.ListSnapshotsResponse{ + NextPageToken: nextPageToken, + Snapshots: snapshots, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedProject string = SubscriberProjectPath("[PROJECT]") + var request = &pubsubpb.ListSnapshotsRequest{ + Project: formattedProject, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSnapshots(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Snapshots[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberListSnapshotsError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedProject string = SubscriberProjectPath("[PROJECT]") + var request = &pubsubpb.ListSnapshotsRequest{ + Project: formattedProject, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSnapshots(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberCreateSnapshot(t *testing.T) { + var name2 string = "name2-1052831874" + var topic string = "topic110546223" + var expectedResponse = &pubsubpb.Snapshot{ + Name: name2, + Topic: topic, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedName string = SubscriberSnapshotPath("[PROJECT]", "[SNAPSHOT]") + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.CreateSnapshotRequest{ + Name: formattedName, + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSnapshot(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberCreateSnapshotError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedName string = SubscriberSnapshotPath("[PROJECT]", "[SNAPSHOT]") + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.CreateSnapshotRequest{ + Name: formattedName, + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSnapshot(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberUpdateSnapshot(t *testing.T) { + var name string = "name3373707" + var topic string = "topic110546223" + var expectedResponse = &pubsubpb.Snapshot{ + Name: name, + Topic: topic, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var seconds int64 = 123456 + var expireTime = ×tamppb.Timestamp{ + Seconds: seconds, + } + var snapshot = &pubsubpb.Snapshot{ + ExpireTime: expireTime, + } + var pathsElement string = "expire_time" + var paths = []string{pathsElement} + var updateMask = &field_maskpb.FieldMask{ + Paths: paths, + } + var request = &pubsubpb.UpdateSnapshotRequest{ + Snapshot: snapshot, + UpdateMask: updateMask, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSnapshot(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberUpdateSnapshotError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var seconds int64 = 123456 + var expireTime = ×tamppb.Timestamp{ + Seconds: seconds, + } + var snapshot = &pubsubpb.Snapshot{ + ExpireTime: expireTime, + } + var pathsElement string = "expire_time" + var paths = []string{pathsElement} + var updateMask = &field_maskpb.FieldMask{ + Paths: paths, + } + var request = &pubsubpb.UpdateSnapshotRequest{ + Snapshot: snapshot, + UpdateMask: updateMask, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSnapshot(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberDeleteSnapshot(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSnapshot string = SubscriberSnapshotPath("[PROJECT]", "[SNAPSHOT]") + var request = &pubsubpb.DeleteSnapshotRequest{ + Snapshot: formattedSnapshot, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSnapshot(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberDeleteSnapshotError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSnapshot string = SubscriberSnapshotPath("[PROJECT]", "[SNAPSHOT]") + var request = &pubsubpb.DeleteSnapshotRequest{ + Snapshot: formattedSnapshot, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSnapshot(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberSeek(t *testing.T) { + var expectedResponse *pubsubpb.SeekResponse = &pubsubpb.SeekResponse{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.SeekRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Seek(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberSeekError(t *testing.T) { + errCode := codes.PermissionDenied + mockSubscriber.err = gstatus.Error(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.SeekRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Seek(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go new file mode 100644 index 0000000..fe71358 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go @@ -0,0 +1,416 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + "math" + "time" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// PublisherCallOptions contains the retry settings for each method of PublisherClient. +type PublisherCallOptions struct { + CreateTopic []gax.CallOption + UpdateTopic []gax.CallOption + Publish []gax.CallOption + GetTopic []gax.CallOption + ListTopics []gax.CallOption + ListTopicSubscriptions []gax.CallOption + DeleteTopic []gax.CallOption +} + +func defaultPublisherClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("pubsub.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultPublisherCallOptions() *PublisherCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"messaging", "one_plus_delivery"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Aborted, + codes.Canceled, + codes.DeadlineExceeded, + codes.Internal, + codes.ResourceExhausted, + codes.Unavailable, + codes.Unknown, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &PublisherCallOptions{ + CreateTopic: retry[[2]string{"default", "idempotent"}], + UpdateTopic: retry[[2]string{"default", "idempotent"}], + Publish: retry[[2]string{"messaging", "one_plus_delivery"}], + GetTopic: retry[[2]string{"default", "idempotent"}], + ListTopics: retry[[2]string{"default", "idempotent"}], + ListTopicSubscriptions: retry[[2]string{"default", "idempotent"}], + DeleteTopic: retry[[2]string{"default", "idempotent"}], + } +} + +// PublisherClient is a client for interacting with Google Cloud Pub/Sub API. +type PublisherClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + publisherClient pubsubpb.PublisherClient + + // The call options for this service. + CallOptions *PublisherCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewPublisherClient creates a new publisher client. +// +// The service that an application uses to manipulate topics, and to send +// messages to a topic. +func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultPublisherClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &PublisherClient{ + conn: conn, + CallOptions: defaultPublisherCallOptions(), + + publisherClient: pubsubpb.NewPublisherClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *PublisherClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *PublisherClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// PublisherProjectPath returns the path for the project resource. +func PublisherProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// PublisherTopicPath returns the path for the topic resource. +func PublisherTopicPath(project, topic string) string { + return "" + + "projects/" + + project + + "/topics/" + + topic + + "" +} + +func (c *PublisherClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), subscription.Name) +} + +func (c *PublisherClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), topic.Name) +} + +// CreateTopic creates the given topic with the given name. +func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateTopic[0:len(c.CallOptions.CreateTopic):len(c.CallOptions.CreateTopic)], opts...) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.CreateTopic(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateTopic updates an existing topic. Note that certain properties of a topic are not +// modifiable. Options settings follow the style guide: +// NOTE: The style guide requires body: "topic" instead of body: "*". +// Keeping the latter for internal consistency in V1, however it should be +// corrected in V2. See +// https://cloud.google.com/apis/design/standard_methods#update for details. +func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateTopic[0:len(c.CallOptions.UpdateTopic):len(c.CallOptions.UpdateTopic)], opts...) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.UpdateTopic(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Publish adds one or more messages to the topic. Returns NOT_FOUND if the topic +// does not exist. The message payload must not be empty; it must contain +// either a non-empty data field, or at least one attribute. +func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...) + var resp *pubsubpb.PublishResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.Publish(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetTopic gets the configuration of a topic. +func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetTopic[0:len(c.CallOptions.GetTopic):len(c.CallOptions.GetTopic)], opts...) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.GetTopic(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListTopics lists matching topics. +func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTopics[0:len(c.CallOptions.ListTopics):len(c.CallOptions.ListTopics)], opts...) + it := &TopicIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) { + var resp *pubsubpb.ListTopicsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.ListTopics(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Topics, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListTopicSubscriptions lists the name of the subscriptions for this topic. +func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTopicSubscriptions[0:len(c.CallOptions.ListTopicSubscriptions):len(c.CallOptions.ListTopicSubscriptions)], opts...) + it := &StringIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { + var resp *pubsubpb.ListTopicSubscriptionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.ListTopicSubscriptions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Subscriptions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteTopic deletes the topic with the given name. Returns NOT_FOUND if the topic +// does not exist. After a topic is deleted, a new topic may be created with +// the same name; this is an entirely new topic with none of the old +// configuration or subscriptions. Existing subscriptions to this topic are +// not deleted, but their topic field is set to _deleted-topic_. +func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.publisherClient.DeleteTopic(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// StringIterator manages a stream of string. +type StringIterator struct { + items []string + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *StringIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *StringIterator) Next() (string, error) { + var item string + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *StringIterator) bufLen() int { + return len(it.items) +} + +func (it *StringIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TopicIterator manages a stream of *pubsubpb.Topic. +type TopicIterator struct { + items []*pubsubpb.Topic + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Topic, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TopicIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TopicIterator) Next() (*pubsubpb.Topic, error) { + var item *pubsubpb.Topic + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TopicIterator) bufLen() int { + return len(it.items) +} + +func (it *TopicIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go new file mode 100644 index 0000000..7f0dff5 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go @@ -0,0 +1,204 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub_test + +import ( + "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func ExampleNewPublisherClient() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExamplePublisherClient_SubscriptionIAM() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + subscription := &pubsubpb.Subscription{} + h := c.SubscriptionIAM(subscription) + policy, err := h.Policy(ctx) + if err != nil { + // TODO: Handle error. + } + //TODO: Use the IAM policy + _ = policy +} + +func ExamplePublisherClient_TopicIAM() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + topic := &pubsubpb.Topic{} + h := c.TopicIAM(topic) + policy, err := h.Policy(ctx) + if err != nil { + // TODO: Handle error. + } + //TODO: Use the IAM policy + _ = policy +} + +func ExamplePublisherClient_CreateTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.Topic{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_UpdateTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.UpdateTopicRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_Publish() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.PublishRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Publish(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_GetTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.GetTopicRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_ListTopics() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListTopicsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTopics(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExamplePublisherClient_ListTopicSubscriptions() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListTopicSubscriptionsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTopicSubscriptions(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExamplePublisherClient_DeleteTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.DeleteTopicRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go new file mode 100644 index 0000000..f94a212 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go @@ -0,0 +1,631 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + "math" + "time" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// SubscriberCallOptions contains the retry settings for each method of SubscriberClient. +type SubscriberCallOptions struct { + CreateSubscription []gax.CallOption + GetSubscription []gax.CallOption + UpdateSubscription []gax.CallOption + ListSubscriptions []gax.CallOption + DeleteSubscription []gax.CallOption + ModifyAckDeadline []gax.CallOption + Acknowledge []gax.CallOption + Pull []gax.CallOption + StreamingPull []gax.CallOption + ModifyPushConfig []gax.CallOption + ListSnapshots []gax.CallOption + CreateSnapshot []gax.CallOption + UpdateSnapshot []gax.CallOption + DeleteSnapshot []gax.CallOption + Seek []gax.CallOption +} + +func defaultSubscriberClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("pubsub.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultSubscriberCallOptions() *SubscriberCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"messaging", "pull"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Canceled, + codes.DeadlineExceeded, + codes.Internal, + codes.ResourceExhausted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"streaming_messaging", "pull"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Canceled, + codes.DeadlineExceeded, + codes.Internal, + codes.ResourceExhausted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &SubscriberCallOptions{ + CreateSubscription: retry[[2]string{"default", "idempotent"}], + GetSubscription: retry[[2]string{"default", "idempotent"}], + UpdateSubscription: retry[[2]string{"default", "idempotent"}], + ListSubscriptions: retry[[2]string{"default", "idempotent"}], + DeleteSubscription: retry[[2]string{"default", "idempotent"}], + ModifyAckDeadline: retry[[2]string{"default", "non_idempotent"}], + Acknowledge: retry[[2]string{"messaging", "non_idempotent"}], + Pull: retry[[2]string{"messaging", "pull"}], + StreamingPull: retry[[2]string{"streaming_messaging", "pull"}], + ModifyPushConfig: retry[[2]string{"default", "non_idempotent"}], + ListSnapshots: retry[[2]string{"default", "idempotent"}], + CreateSnapshot: retry[[2]string{"default", "idempotent"}], + UpdateSnapshot: retry[[2]string{"default", "idempotent"}], + DeleteSnapshot: retry[[2]string{"default", "idempotent"}], + Seek: retry[[2]string{"default", "non_idempotent"}], + } +} + +// SubscriberClient is a client for interacting with Google Cloud Pub/Sub API. +type SubscriberClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + subscriberClient pubsubpb.SubscriberClient + + // The call options for this service. + CallOptions *SubscriberCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewSubscriberClient creates a new subscriber client. +// +// The service that an application uses to manipulate subscriptions and to +// consume messages from a subscription via the Pull method. +func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultSubscriberClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &SubscriberClient{ + conn: conn, + CallOptions: defaultSubscriberCallOptions(), + + subscriberClient: pubsubpb.NewSubscriberClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *SubscriberClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SubscriberClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// SubscriberProjectPath returns the path for the project resource. +func SubscriberProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// SubscriberSnapshotPath returns the path for the snapshot resource. +func SubscriberSnapshotPath(project, snapshot string) string { + return "" + + "projects/" + + project + + "/snapshots/" + + snapshot + + "" +} + +// SubscriberSubscriptionPath returns the path for the subscription resource. +func SubscriberSubscriptionPath(project, subscription string) string { + return "" + + "projects/" + + project + + "/subscriptions/" + + subscription + + "" +} + +// SubscriberTopicPath returns the path for the topic resource. +func SubscriberTopicPath(project, topic string) string { + return "" + + "projects/" + + project + + "/topics/" + + topic + + "" +} + +func (c *SubscriberClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), subscription.Name) +} + +func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), topic.Name) +} + +// CreateSubscription creates a subscription to a given topic. +// If the subscription already exists, returns ALREADY_EXISTS. +// If the corresponding topic doesn't exist, returns NOT_FOUND. +// +// If the name is not provided in the request, the server will assign a random +// name for this subscription on the same project as the topic, conforming +// to the +// resource name format (at https://cloud.google.com/pubsub/docs/overview#names). +// The generated name is populated in the returned Subscription object. +// Note that for REST API requests, you must specify a name in the request. +func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateSubscription[0:len(c.CallOptions.CreateSubscription):len(c.CallOptions.CreateSubscription)], opts...) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.CreateSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetSubscription gets the configuration details of a subscription. +func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetSubscription[0:len(c.CallOptions.GetSubscription):len(c.CallOptions.GetSubscription)], opts...) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.GetSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateSubscription updates an existing subscription. Note that certain properties of a +// subscription, such as its topic, are not modifiable. +// NOTE: The style guide requires body: "subscription" instead of body: "*". +// Keeping the latter for internal consistency in V1, however it should be +// corrected in V2. See +// https://cloud.google.com/apis/design/standard_methods#update for details. +func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.UpdateSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListSubscriptions lists matching subscriptions. +func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListSubscriptions[0:len(c.CallOptions.ListSubscriptions):len(c.CallOptions.ListSubscriptions)], opts...) + it := &SubscriptionIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) { + var resp *pubsubpb.ListSubscriptionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.ListSubscriptions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Subscriptions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteSubscription deletes an existing subscription. All messages retained in the subscription +// are immediately dropped. Calls to Pull after deletion will return +// NOT_FOUND. After a subscription is deleted, a new one may be created with +// the same name, but the new one has no association with the old +// subscription or its topic unless the same topic is specified. +func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteSubscription[0:len(c.CallOptions.DeleteSubscription):len(c.CallOptions.DeleteSubscription)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.DeleteSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful +// to indicate that more time is needed to process a message by the +// subscriber, or to make the message available for redelivery if the +// processing was interrupted. Note that this does not modify the +// subscription-level ackDeadlineSeconds used for subsequent messages. +func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.ModifyAckDeadline(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// Acknowledge acknowledges the messages associated with the ack_ids in the +// AcknowledgeRequest. The Pub/Sub system can remove the relevant messages +// from the subscription. +// +// Acknowledging a message whose ack deadline has expired may succeed, +// but such a message may be redelivered later. Acknowledging a message more +// than once will not result in an error. +func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Acknowledge[0:len(c.CallOptions.Acknowledge):len(c.CallOptions.Acknowledge)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.Acknowledge(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// Pull pulls messages from the server. Returns an empty list if there are no +// messages available in the backlog. The server may return UNAVAILABLE if +// there are too many concurrent pull requests pending for the given +// subscription. +func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Pull[0:len(c.CallOptions.Pull):len(c.CallOptions.Pull)], opts...) + var resp *pubsubpb.PullResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.Pull(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// StreamingPull (EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will +// respond with UNIMPLEMENTED errors unless you have been invited to test +// this feature. Contact cloud-pubsub@google.com with any questions. +// +// Establishes a stream with the server, which sends messages down to the +// client. The client streams acknowledgements and ack deadline modifications +// back to the server. The server will close the stream and return the status +// on any error. The server may close the stream with status OK to reassign +// server-side resources, in which case, the client should re-establish the +// stream. UNAVAILABLE may also be returned in the case of a transient error +// (e.g., a server restart). These should also be retried by the client. Flow +// control can be achieved by configuring the underlying RPC channel. +func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.StreamingPull[0:len(c.CallOptions.StreamingPull):len(c.CallOptions.StreamingPull)], opts...) + var resp pubsubpb.Subscriber_StreamingPullClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.StreamingPull(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ModifyPushConfig modifies the PushConfig for a specified subscription. +// +// This may be used to change a push subscription to a pull one (signified by +// an empty PushConfig) or vice versa, or change the endpoint URL and other +// attributes of a push subscription. Messages will accumulate for delivery +// continuously through the call regardless of changes to the PushConfig. +func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.ModifyPushConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ListSnapshots lists the existing snapshots. +func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListSnapshots[0:len(c.CallOptions.ListSnapshots):len(c.CallOptions.ListSnapshots)], opts...) + it := &SnapshotIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Snapshot, string, error) { + var resp *pubsubpb.ListSnapshotsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.ListSnapshots(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Snapshots, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateSnapshot creates a snapshot from the requested subscription. +// If the snapshot already exists, returns ALREADY_EXISTS. +// If the requested subscription doesn't exist, returns NOT_FOUND. +// +// If the name is not provided in the request, the server will assign a random +// name for this snapshot on the same project as the subscription, conforming +// to the +// resource name format (at https://cloud.google.com/pubsub/docs/overview#names). +// The generated name is populated in the returned Snapshot object. +// Note that for REST API requests, you must specify a name in the request. +func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateSnapshot[0:len(c.CallOptions.CreateSnapshot):len(c.CallOptions.CreateSnapshot)], opts...) + var resp *pubsubpb.Snapshot + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.CreateSnapshot(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateSnapshot updates an existing snapshot. Note that certain properties of a snapshot +// are not modifiable. +// NOTE: The style guide requires body: "snapshot" instead of body: "*". +// Keeping the latter for internal consistency in V1, however it should be +// corrected in V2. See +// https://cloud.google.com/apis/design/standard_methods#update for details. +func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateSnapshot[0:len(c.CallOptions.UpdateSnapshot):len(c.CallOptions.UpdateSnapshot)], opts...) + var resp *pubsubpb.Snapshot + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.UpdateSnapshot(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteSnapshot removes an existing snapshot. All messages retained in the snapshot +// are immediately dropped. After a snapshot is deleted, a new one may be +// created with the same name, but the new one has no association with the old +// snapshot or its subscription, unless the same subscription is specified. +func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteSnapshot[0:len(c.CallOptions.DeleteSnapshot):len(c.CallOptions.DeleteSnapshot)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.DeleteSnapshot(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// Seek seeks an existing subscription to a point in time or to a given snapshot, +// whichever is provided in the request. +func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Seek[0:len(c.CallOptions.Seek):len(c.CallOptions.Seek)], opts...) + var resp *pubsubpb.SeekResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.Seek(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SnapshotIterator manages a stream of *pubsubpb.Snapshot. +type SnapshotIterator struct { + items []*pubsubpb.Snapshot + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Snapshot, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SnapshotIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SnapshotIterator) Next() (*pubsubpb.Snapshot, error) { + var item *pubsubpb.Snapshot + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SnapshotIterator) bufLen() int { + return len(it.items) +} + +func (it *SnapshotIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// SubscriptionIterator manages a stream of *pubsubpb.Subscription. +type SubscriptionIterator struct { + items []*pubsubpb.Subscription + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Subscription, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SubscriptionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SubscriptionIterator) Next() (*pubsubpb.Subscription, error) { + var item *pubsubpb.Subscription + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SubscriptionIterator) bufLen() int { + return len(it.items) +} + +func (it *SubscriptionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go new file mode 100644 index 0000000..6241eb3 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go @@ -0,0 +1,358 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub_test + +import ( + "io" + + "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func ExampleNewSubscriberClient() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleSubscriberClient_SubscriptionIAM() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + subscription := &pubsubpb.Subscription{} + h := c.SubscriptionIAM(subscription) + policy, err := h.Policy(ctx) + if err != nil { + // TODO: Handle error. + } + //TODO: Use the IAM policy + _ = policy +} + +func ExampleSubscriberClient_TopicIAM() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + topic := &pubsubpb.Topic{} + h := c.TopicIAM(topic) + policy, err := h.Policy(ctx) + if err != nil { + // TODO: Handle error. + } + //TODO: Use the IAM policy + _ = policy +} + +func ExampleSubscriberClient_CreateSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.Subscription{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_GetSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.GetSubscriptionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_UpdateSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.UpdateSubscriptionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_ListSubscriptions() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListSubscriptionsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListSubscriptions(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleSubscriberClient_DeleteSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.DeleteSubscriptionRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_ModifyAckDeadline() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ModifyAckDeadlineRequest{ + // TODO: Fill request struct fields. + } + err = c.ModifyAckDeadline(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_Acknowledge() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.AcknowledgeRequest{ + // TODO: Fill request struct fields. + } + err = c.Acknowledge(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_Pull() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.PullRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Pull(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_StreamingPull() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + stream, err := c.StreamingPull(ctx) + if err != nil { + // TODO: Handle error. + } + go func() { + reqs := []*pubsubpb.StreamingPullRequest{ + // TODO: Create requests. + } + for _, req := range reqs { + if err := stream.Send(req); err != nil { + // TODO: Handle error. + } + } + stream.CloseSend() + }() + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleSubscriberClient_ModifyPushConfig() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ModifyPushConfigRequest{ + // TODO: Fill request struct fields. + } + err = c.ModifyPushConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_ListSnapshots() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListSnapshotsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListSnapshots(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleSubscriberClient_CreateSnapshot() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.CreateSnapshotRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSnapshot(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_UpdateSnapshot() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.UpdateSnapshotRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateSnapshot(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_DeleteSnapshot() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.DeleteSnapshotRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSnapshot(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_Seek() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.SeekRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Seek(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go new file mode 100644 index 0000000..b00f364 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/doc.go @@ -0,0 +1,120 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub +messages, hiding the the details of the underlying server RPCs. Google Cloud +Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders +and receivers. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + +More information about Google Cloud Pub/Sub is available at +https://cloud.google.com/pubsub/docs + +Publishing + +Google Cloud Pub/Sub messages are published to topics. Topics may be created +using the pubsub package like so: + + topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name") + +Messages may then be published to a topic: + + res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")}) + +Publish queues the message for publishing and returns immediately. When enough +messages have accumulated, or enough time has elapsed, the batch of messages is +sent to the Pub/Sub service. + +Publish returns a PublishResult, which behaves like a future: its Get method +blocks until the message has been sent to the service. + +The first time you call Publish on a topic, goroutines are started in the +background. To clean up these goroutines, call Stop: + + topic.Stop() + +Receiving + +To receive messages published to a topic, clients create subscriptions +to the topic. There may be more than one subscription per topic; each message +that is published to the topic will be delivered to all of its subscriptions. + +Subsciptions may be created like so: + + sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name", + pubsub.SubscriptionConfig{Topic: topic}) + +Messages are then consumed from a subscription via callback. + + err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) { + log.Printf("Got message: %s", m.Data) + m.Ack() + }) + if err != nil { + // Handle error. + } + +The callback is invoked concurrently by multiple goroutines, maximizing +throughput. To terminate a call to Receive, cancel its context. + +Once client code has processed the message, it must call Message.Ack, otherwise +the message will eventually be redelivered. As an optimization, if the client +cannot or doesn't want to process the message, it can call Message.Nack to +speed redelivery. For more information and configuration options, see +"Deadlines" below. + +Note: It is possible for Messages to be redelivered, even if Message.Ack has +been called. Client code must be robust to multiple deliveries of messages. + +Deadlines + +The default pubsub deadlines are suitable for most use cases, but may be +overridden. This section describes the tradeoffs that should be considered +when overriding the defaults. + +Behind the scenes, each message returned by the Pub/Sub server has an +associated lease, known as an "ACK deadline". +Unless a message is acknowledged within the ACK deadline, or the client requests that +the ACK deadline be extended, the message will become elegible for redelivery. +As a convenience, the pubsub package will automatically extend deadlines until +either: + * Message.Ack or Message.Nack is called, or + * the "MaxExtension" period elapses from the time the message is fetched from the server. + +The initial ACK deadline given to each messages defaults to 10 seconds, but may +be overridden during subscription creation. Selecting an ACK deadline is a +tradeoff between message redelivery latency and RPC volume. If the pubsub +package fails to acknowledge or extend a message (e.g. due to unexpected +termination of the process), a shorter ACK deadline will generally result in +faster message redelivery by the Pub/Sub system. However, a short ACK deadline +may also increase the number of deadline extension RPCs that the pubsub package +sends to the server. + +The default max extension period is DefaultReceiveSettings.MaxExtension, and can +be overridden by setting Subscription.ReceiveSettings.MaxExtension. Selecting a +max extension period is a tradeoff between the speed at which client code must +process messages, and the redelivery delay if messages fail to be acknowledged +(e.g. because client code neglects to do so). Using a large MaxExtension +increases the available time for client code to process messages. However, if +the client code neglects to call Message.Ack/Nack, a large MaxExtension will +increase the delay before the message is redelivered. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package pubsub // import "cloud.google.com/go/pubsub" diff --git a/vendor/cloud.google.com/go/pubsub/endtoend_test.go b/vendor/cloud.google.com/go/pubsub/endtoend_test.go new file mode 100644 index 0000000..a30bdfb --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/endtoend_test.go @@ -0,0 +1,244 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "bytes" + "fmt" + "log" + "math/rand" + "os" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/option" +) + +const timeout = time.Minute * 10 +const ackDeadline = time.Second * 10 + +const nMessages = 1e4 + +// Buffer log messages to debug failures. +var logBuf bytes.Buffer + +// TestEndToEnd pumps many messages into a topic and tests that they are all +// delivered to each subscription for the topic. It also tests that messages +// are not unexpectedly redelivered. +func TestEndToEnd(t *testing.T) { + t.Parallel() + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + log.SetOutput(&logBuf) + ctx := context.Background() + ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + now := time.Now() + topicName := fmt.Sprintf("endtoend-%d", now.UnixNano()) + subPrefix := fmt.Sprintf("endtoend-%d", now.UnixNano()) + + client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("Creating client error: %v", err) + } + + var topic *Topic + if topic, err = client.CreateTopic(ctx, topicName); err != nil { + t.Fatalf("CreateTopic error: %v", err) + } + defer topic.Delete(ctx) + + // Two subscriptions to the same topic. + var subs [2]*Subscription + for i := 0; i < len(subs); i++ { + subs[i], err = client.CreateSubscription(ctx, fmt.Sprintf("%s-%d", subPrefix, i), SubscriptionConfig{ + Topic: topic, + AckDeadline: ackDeadline, + }) + if err != nil { + t.Fatalf("CreateSub error: %v", err) + } + defer subs[i].Delete(ctx) + } + + ids, err := publish(ctx, topic, nMessages) + topic.Stop() + if err != nil { + t.Fatalf("publish: %v", err) + } + wantCounts := make(map[string]int) + for _, id := range ids { + wantCounts[id] = 1 + } + + // recv provides an indication that messages are still arriving. + recv := make(chan struct{}) + // We have two subscriptions to our topic. + // Each subscription will get a copy of each published message. + var wg sync.WaitGroup + cctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + consumers := []*consumer{ + {counts: make(map[string]int), recv: recv, durations: []time.Duration{time.Hour}}, + {counts: make(map[string]int), recv: recv, + durations: []time.Duration{ackDeadline, ackDeadline, ackDeadline / 2, ackDeadline / 2, time.Hour}}, + } + for i, con := range consumers { + con := con + sub := subs[i] + wg.Add(1) + go func() { + defer wg.Done() + con.consume(t, cctx, sub) + }() + } + // Wait for a while after the last message before declaring quiescence. + // We wait a multiple of the ack deadline, for two reasons: + // 1. To detect if messages are redelivered after having their ack + // deadline extended. + // 2. To wait for redelivery of messages that were en route when a Receive + // is canceled. This can take considerably longer than the ack deadline. + quiescenceDur := ackDeadline * 6 + quiescenceTimer := time.NewTimer(quiescenceDur) + +loop: + for { + select { + case <-recv: + // Reset timer so we wait quiescenceDur after the last message. + // See https://godoc.org/time#Timer.Reset for why the Stop + // and channel drain are necessary. + if !quiescenceTimer.Stop() { + <-quiescenceTimer.C + } + quiescenceTimer.Reset(quiescenceDur) + + case <-quiescenceTimer.C: + cancel() + log.Println("quiesced") + break loop + + case <-cctx.Done(): + t.Fatal("timed out") + } + } + wg.Wait() + ok := true + for i, con := range consumers { + if got, want := con.counts, wantCounts; !testutil.Equal(got, want) { + t.Errorf("%d: message counts: %v\n", i, diff(got, want)) + ok = false + } + } + if !ok { + logBuf.WriteTo(os.Stdout) + } +} + +// publish publishes n messages to topic, and returns the published message IDs. +func publish(ctx context.Context, topic *Topic, n int) ([]string, error) { + var rs []*PublishResult + for i := 0; i < n; i++ { + m := &Message{Data: []byte(fmt.Sprintf("msg %d", i))} + rs = append(rs, topic.Publish(ctx, m)) + } + var ids []string + for _, r := range rs { + id, err := r.Get(ctx) + if err != nil { + return nil, err + } + ids = append(ids, id) + } + return ids, nil +} + +// consumer consumes messages according to its configuration. +type consumer struct { + durations []time.Duration + + // A value is sent to recv each time Inc is called. + recv chan struct{} + + mu sync.Mutex + counts map[string]int + total int +} + +// consume reads messages from a subscription, and keeps track of what it receives in mc. +// After consume returns, the caller should wait on wg to ensure that no more updates to mc will be made. +func (c *consumer) consume(t *testing.T, ctx context.Context, sub *Subscription) { + for _, dur := range c.durations { + ctx2, cancel := context.WithTimeout(ctx, dur) + defer cancel() + id := sub.name[len(sub.name)-2:] + log.Printf("%s: start receive", id) + prev := c.total + err := sub.Receive(ctx2, c.process) + log.Printf("%s: end receive; read %d", id, c.total-prev) + if err != nil { + t.Errorf("error from Receive: %v", err) + return + } + select { + case <-ctx.Done(): + return + default: + } + } +} + +// process handles a message and records it in mc. +func (c *consumer) process(_ context.Context, m *Message) { + c.mu.Lock() + c.counts[m.ID] += 1 + c.total++ + c.mu.Unlock() + c.recv <- struct{}{} + // Simulate time taken to process m, while continuing to process more messages. + // Some messages will need to have their ack deadline extended due to this delay. + delay := rand.Intn(int(ackDeadline * 3)) + time.AfterFunc(time.Duration(delay), m.Ack) +} + +// diff returns counts of the differences between got and want. +func diff(got, want map[string]int) map[string]int { + ids := make(map[string]struct{}) + for k := range got { + ids[k] = struct{}{} + } + for k := range want { + ids[k] = struct{}{} + } + + gotWantCount := make(map[string]int) + for k := range ids { + if got[k] == want[k] { + continue + } + desc := fmt.Sprintf("", got[k], want[k]) + gotWantCount[desc] += 1 + } + return gotWantCount +} diff --git a/vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go b/vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go new file mode 100644 index 0000000..da74b1b --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go @@ -0,0 +1,54 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + + "cloud.google.com/go/pubsub" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Subscriptions() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all subscriptions of the project. + it := client.Subscriptions(ctx) + _ = it // TODO: iterate using Next. +} + +func ExampleSubscriptionIterator_Next() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all subscriptions of the project. + it := client.Subscriptions(ctx) + for { + sub, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(sub) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/example_test.go b/vendor/cloud.google.com/go/pubsub/example_test.go new file mode 100644 index 0000000..4e8e1a5 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/example_test.go @@ -0,0 +1,281 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + "time" + + "cloud.google.com/go/pubsub" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + _, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // See the other examples to learn how to use the Client. +} + +func ExampleClient_CreateTopic() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // Create a new topic with the given name. + topic, err := client.CreateTopic(ctx, "topicName") + if err != nil { + // TODO: Handle error. + } + + _ = topic // TODO: use the topic. +} + +// Use TopicInProject to refer to a topic that is not in the client's project, such +// as a public topic. +func ExampleClient_TopicInProject() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + topic := client.TopicInProject("topicName", "another-project-id") + _ = topic // TODO: use the topic. +} + +func ExampleClient_CreateSubscription() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // Create a new topic with the given name. + topic, err := client.CreateTopic(ctx, "topicName") + if err != nil { + // TODO: Handle error. + } + + // Create a new subscription to the previously created topic + // with the given name. + sub, err := client.CreateSubscription(ctx, "subName", pubsub.SubscriptionConfig{ + Topic: topic, + AckDeadline: 10 * time.Second, + }) + if err != nil { + // TODO: Handle error. + } + + _ = sub // TODO: use the subscription. +} + +func ExampleTopic_Delete() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + topic := client.Topic("topicName") + if err := topic.Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleTopic_Exists() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + topic := client.Topic("topicName") + ok, err := topic.Exists(ctx) + if err != nil { + // TODO: Handle error. + } + if !ok { + // Topic doesn't exist. + } +} + +func ExampleTopic_Publish() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + topic := client.Topic("topicName") + defer topic.Stop() + var results []*pubsub.PublishResult + r := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("hello world"), + }) + results = append(results, r) + // Do other work ... + for _, r := range results { + id, err := r.Get(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("Published a message with a message ID: %s\n", id) + } +} + +func ExampleTopic_Subscriptions() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + topic := client.Topic("topic-name") + // List all subscriptions of the topic (maybe of multiple projects). + for subs := topic.Subscriptions(ctx); ; { + sub, err := subs.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + _ = sub // TODO: use the subscription. + } +} + +func ExampleSubscription_Delete() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + sub := client.Subscription("subName") + if err := sub.Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscription_Exists() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + sub := client.Subscription("subName") + ok, err := sub.Exists(ctx) + if err != nil { + // TODO: Handle error. + } + if !ok { + // Subscription doesn't exist. + } +} + +func ExampleSubscription_Config() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + config, err := sub.Config(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(config) +} + +func ExampleSubscription_Receive() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + // TODO: Handle message. + // NOTE: May be called concurrently; synchronize access to shared memory. + m.Ack() + }) + if err != context.Canceled { + // TODO: Handle error. + } +} + +// This example shows how to configure keepalive so that unacknoweldged messages +// expire quickly, allowing other subscribers to take them. +func ExampleSubscription_Receive_maxExtension() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + // This program is expected to process and acknowledge messages in 30 seconds. If + // not, the Pub/Sub API will assume the message is not acknowledged. + sub.ReceiveSettings.MaxExtension = 30 * time.Second + err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + // TODO: Handle message. + m.Ack() + }) + if err != context.Canceled { + // TODO: Handle error. + } +} + +// This example shows how to throttle Subscription.Receive, which aims for high +// throughput by default. By limiting the number of messages and/or bytes being +// processed at once, you can bound your program's resource consumption. +func ExampleSubscription_Receive_maxOutstanding() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + sub.ReceiveSettings.MaxOutstandingMessages = 5 + sub.ReceiveSettings.MaxOutstandingBytes = 10e6 + err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + // TODO: Handle message. + m.Ack() + }) + if err != context.Canceled { + // TODO: Handle error. + } +} + +func ExampleSubscription_Update() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + subConfig, err := sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{ + PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"}, + }) + if err != nil { + // TODO: Handle error. + } + _ = subConfig // TODO: Use SubscriptionConfig. +} diff --git a/vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go b/vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go new file mode 100644 index 0000000..0c227ed --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + + "cloud.google.com/go/pubsub" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Topics() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Topics(ctx) + _ = it // TODO: iterate using Next. +} + +func ExampleTopicIterator_Next() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all topics. + it := client.Topics(ctx) + for { + t, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(t) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/fake_test.go b/vendor/cloud.google.com/go/pubsub/fake_test.go new file mode 100644 index 0000000..693b151 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/fake_test.go @@ -0,0 +1,147 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +// This file provides a fake/mock in-memory pubsub server. +// (Really just a mock at the moment, but we hope to turn it into +// more of a fake.) + +import ( + "io" + "sync" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +type fakeServer struct { + pb.PublisherServer + pb.SubscriberServer + + Addr string + + mu sync.Mutex + Acked map[string]bool // acked message IDs + Deadlines map[string]int32 // deadlines by message ID + pullResponses []*pullResponse + wg sync.WaitGroup +} + +type pullResponse struct { + msgs []*pb.ReceivedMessage + err error +} + +func newFakeServer() (*fakeServer, error) { + srv, err := testutil.NewServer() + if err != nil { + return nil, err + } + fake := &fakeServer{ + Addr: srv.Addr, + Acked: map[string]bool{}, + Deadlines: map[string]int32{}, + } + pb.RegisterPublisherServer(srv.Gsrv, fake) + pb.RegisterSubscriberServer(srv.Gsrv, fake) + srv.Start() + return fake, nil +} + +// Each call to addStreamingPullMessages results in one StreamingPullResponse. +func (s *fakeServer) addStreamingPullMessages(msgs []*pb.ReceivedMessage) { + s.pullResponses = append(s.pullResponses, &pullResponse{msgs, nil}) +} + +func (s *fakeServer) addStreamingPullError(err error) { + s.pullResponses = append(s.pullResponses, &pullResponse{nil, err}) +} + +func (s *fakeServer) wait() { + s.wg.Wait() +} + +func (s *fakeServer) StreamingPull(stream pb.Subscriber_StreamingPullServer) error { + s.wg.Add(1) + defer s.wg.Done() + errc := make(chan error, 1) + s.wg.Add(1) + go func() { + defer s.wg.Done() + for { + req, err := stream.Recv() + if err != nil { + errc <- err + return + } + s.mu.Lock() + for _, id := range req.AckIds { + s.Acked[id] = true + } + for i, id := range req.ModifyDeadlineAckIds { + s.Deadlines[id] = req.ModifyDeadlineSeconds[i] + } + s.mu.Unlock() + } + }() + // Send responses. + for { + s.mu.Lock() + if len(s.pullResponses) == 0 { + s.mu.Unlock() + // Nothing to send, so wait for the client to shut down the stream. + err := <-errc // a real error, or at least EOF + if err == io.EOF { + return nil + } + return err + } + pr := s.pullResponses[0] + s.pullResponses = s.pullResponses[1:] + s.mu.Unlock() + if pr.err != nil { + // Add a slight delay to ensure the server receives any + // messages en route from the client before shutting down the stream. + // This reduces flakiness of tests involving retry. + time.Sleep(200 * time.Millisecond) + } + if pr.err == io.EOF { + return nil + } + if pr.err != nil { + return pr.err + } + // Return any error from Recv. + select { + case err := <-errc: + return err + default: + } + res := &pb.StreamingPullResponse{ReceivedMessages: pr.msgs} + if err := stream.Send(res); err != nil { + return err + } + } +} + +func (s *fakeServer) GetSubscription(ctx context.Context, req *pb.GetSubscriptionRequest) (*pb.Subscription, error) { + return &pb.Subscription{ + Name: req.Subscription, + AckDeadlineSeconds: 10, + PushConfig: &pb.PushConfig{}, + }, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/flow_controller.go b/vendor/cloud.google.com/go/pubsub/flow_controller.go new file mode 100644 index 0000000..0fd7bd6 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/flow_controller.go @@ -0,0 +1,106 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "golang.org/x/net/context" + "golang.org/x/sync/semaphore" +) + +// flowController implements flow control for Subscription.Receive. +type flowController struct { + maxSize int // max total size of messages + semCount, semSize *semaphore.Weighted // enforces max number and size of messages +} + +// newFlowController creates a new flowController that ensures no more than +// maxCount messages or maxSize bytes are outstanding at once. If maxCount or +// maxSize is < 1, then an unlimited number of messages or bytes is permitted, +// respectively. +func newFlowController(maxCount, maxSize int) *flowController { + fc := &flowController{ + maxSize: maxSize, + semCount: nil, + semSize: nil, + } + if maxCount > 0 { + fc.semCount = semaphore.NewWeighted(int64(maxCount)) + } + if maxSize > 0 { + fc.semSize = semaphore.NewWeighted(int64(maxSize)) + } + return fc +} + +// acquire blocks until one message of size bytes can proceed or ctx is done. +// It returns nil in the first case, or ctx.Err() in the second. +// +// acquire allows large messages to proceed by treating a size greater than maxSize +// as if it were equal to maxSize. +func (f *flowController) acquire(ctx context.Context, size int) error { + if f.semCount != nil { + if err := f.semCount.Acquire(ctx, 1); err != nil { + return err + } + } + if f.semSize != nil { + if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil { + if f.semCount != nil { + f.semCount.Release(1) + } + return err + } + } + return nil +} + +// tryAcquire returns false if acquire would block. Otherwise, it behaves like +// acquire and returns true. +// +// tryAcquire allows large messages to proceed by treating a size greater than +// maxSize as if it were equal to maxSize. +func (f *flowController) tryAcquire(size int) bool { + if f.semCount != nil { + if !f.semCount.TryAcquire(1) { + return false + } + } + if f.semSize != nil { + if !f.semSize.TryAcquire(f.bound(size)) { + if f.semCount != nil { + f.semCount.Release(1) + } + return false + } + } + return true +} + +// release notes that one message of size bytes is no longer outstanding. +func (f *flowController) release(size int) { + if f.semCount != nil { + f.semCount.Release(1) + } + if f.semSize != nil { + f.semSize.Release(f.bound(size)) + } +} + +func (f *flowController) bound(size int) int64 { + if size > f.maxSize { + return int64(f.maxSize) + } + return int64(size) +} diff --git a/vendor/cloud.google.com/go/pubsub/flow_controller_test.go b/vendor/cloud.google.com/go/pubsub/flow_controller_test.go new file mode 100644 index 0000000..a0f0260 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/flow_controller_test.go @@ -0,0 +1,236 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "fmt" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +func TestFlowControllerCancel(t *testing.T) { + // Test canceling a flow controller's context. + t.Parallel() + fc := newFlowController(3, 10) + if err := fc.acquire(context.Background(), 5); err != nil { + t.Fatal(err) + } + // Experiment: a context that times out should always return an error. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Millisecond) + defer cancel() + if err := fc.acquire(ctx, 6); err != context.DeadlineExceeded { + t.Fatalf("got %v, expected DeadlineExceeded", err) + } + // Control: a context that is not done should always return nil. + go func() { + time.Sleep(5 * time.Millisecond) + fc.release(5) + }() + if err := fc.acquire(context.Background(), 6); err != nil { + t.Errorf("got %v, expected nil", err) + } +} + +func TestFlowControllerLargeRequest(t *testing.T) { + // Large requests succeed, consuming the entire allotment. + t.Parallel() + fc := newFlowController(3, 10) + err := fc.acquire(context.Background(), 11) + if err != nil { + t.Fatal(err) + } +} + +func TestFlowControllerNoStarve(t *testing.T) { + // A large request won't starve, because the flowController is + // (best-effort) FIFO. + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + fc := newFlowController(10, 10) + first := make(chan int) + for i := 0; i < 20; i++ { + go func() { + for { + if err := fc.acquire(ctx, 1); err != nil { + if err != context.Canceled { + t.Error(err) + } + return + } + select { + case first <- 1: + default: + } + fc.release(1) + } + }() + } + <-first // Wait until the flowController's state is non-zero. + if err := fc.acquire(ctx, 11); err != nil { + t.Errorf("got %v, want nil", err) + } +} + +func TestFlowControllerSaturation(t *testing.T) { + t.Parallel() + const ( + maxCount = 6 + maxSize = 10 + ) + for _, test := range []struct { + acquireSize int + wantCount, wantSize int64 + }{ + { + // Many small acquires cause the flow controller to reach its max count. + acquireSize: 1, + wantCount: 6, + wantSize: 6, + }, + { + // Five acquires of size 2 will cause the flow controller to reach its max size, + // but not its max count. + acquireSize: 2, + wantCount: 5, + wantSize: 10, + }, + { + // If the requests are the right size (relatively prime to maxSize), + // the flow controller will not saturate on size. (In this case, not on count either.) + acquireSize: 3, + wantCount: 3, + wantSize: 9, + }, + } { + fc := newFlowController(maxCount, maxSize) + // Atomically track flow controller state. + var curCount, curSize int64 + success := errors.New("") + // Time out if wantSize or wantCount is never reached. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + g, ctx := errgroup.WithContext(ctx) + for i := 0; i < 10; i++ { + g.Go(func() error { + var hitCount, hitSize bool + // Run at least until we hit the expected values, and at least + // for enough iterations to exceed them if the flow controller + // is broken. + for i := 0; i < 100 || !hitCount || !hitSize; i++ { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err := fc.acquire(ctx, test.acquireSize); err != nil { + return err + } + c := atomic.AddInt64(&curCount, 1) + if c > test.wantCount { + return fmt.Errorf("count %d exceeds want %d", c, test.wantCount) + } + if c == test.wantCount { + hitCount = true + } + s := atomic.AddInt64(&curSize, int64(test.acquireSize)) + if s > test.wantSize { + return fmt.Errorf("size %d exceeds want %d", s, test.wantSize) + } + if s == test.wantSize { + hitSize = true + } + time.Sleep(5 * time.Millisecond) // Let other goroutines make progress. + if atomic.AddInt64(&curCount, -1) < 0 { + return errors.New("negative count") + } + if atomic.AddInt64(&curSize, -int64(test.acquireSize)) < 0 { + return errors.New("negative size") + } + fc.release(test.acquireSize) + } + return success + }) + } + if err := g.Wait(); err != success { + t.Errorf("%+v: %v", test, err) + continue + } + } +} + +func TestFlowControllerTryAcquire(t *testing.T) { + fc := newFlowController(3, 10) + + // Successfully tryAcquire 4 bytes. + if !fc.tryAcquire(4) { + t.Error("got false, wanted true") + } + + // Fail to tryAcquire 7 bytes. + if fc.tryAcquire(7) { + t.Error("got true, wanted false") + } + + // Successfully tryAcquire 6 byte. + if !fc.tryAcquire(6) { + t.Error("got false, wanted true") + } +} + +func TestFlowControllerUnboundedCount(t *testing.T) { + ctx := context.Background() + fc := newFlowController(0, 10) + + // Successfully acquire 4 bytes. + if err := fc.acquire(ctx, 4); err != nil { + t.Errorf("got %v, wanted no error", err) + } + + // Successfully tryAcquire 4 bytes. + if !fc.tryAcquire(4) { + t.Error("got false, wanted true") + } + + // Fail to tryAcquire 3 bytes. + if fc.tryAcquire(3) { + t.Error("got true, wanted false") + } +} + +func TestFlowControllerUnboundedBytes(t *testing.T) { + ctx := context.Background() + fc := newFlowController(2, 0) + + // Successfully acquire 4GB. + if err := fc.acquire(ctx, 4e9); err != nil { + t.Errorf("got %v, wanted no error", err) + } + + // Successfully tryAcquire 4GB bytes. + if !fc.tryAcquire(4e9) { + t.Error("got false, wanted true") + } + + // Fail to tryAcquire a third message. + if fc.tryAcquire(3) { + t.Error("got true, wanted false") + } +} diff --git a/vendor/cloud.google.com/go/pubsub/integration_test.go b/vendor/cloud.google.com/go/pubsub/integration_test.go new file mode 100644 index 0000000..d94eba1 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/integration_test.go @@ -0,0 +1,362 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "testing" + "time" + + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal" + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var ( + topicIDs = testutil.NewUIDSpace("topic") + subIDs = testutil.NewUIDSpace("sub") +) + +// messageData is used to hold the contents of a message so that it can be compared against the contents +// of another message without regard to irrelevant fields. +type messageData struct { + ID string + Data []byte + Attributes map[string]string +} + +func extractMessageData(m *Message) *messageData { + return &messageData{ + ID: m.ID, + Data: m.Data, + Attributes: m.Attributes, + } +} + +func integrationTestClient(t *testing.T, ctx context.Context) *Client { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + projID := testutil.ProjID() + if projID == "" { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + client, err := NewClient(ctx, projID, option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("Creating client error: %v", err) + } + return client +} + +func TestAll(t *testing.T) { + t.Parallel() + ctx := context.Background() + client := integrationTestClient(t, ctx) + defer client.Close() + + topic, err := client.CreateTopic(ctx, topicIDs.New()) + if err != nil { + t.Errorf("CreateTopic error: %v", err) + } + defer topic.Stop() + + var sub *Subscription + if sub, err = client.CreateSubscription(ctx, subIDs.New(), SubscriptionConfig{Topic: topic}); err != nil { + t.Errorf("CreateSub error: %v", err) + } + + exists, err := topic.Exists(ctx) + if err != nil { + t.Fatalf("TopicExists error: %v", err) + } + if !exists { + t.Errorf("topic %v should exist, but it doesn't", topic) + } + + exists, err = sub.Exists(ctx) + if err != nil { + t.Fatalf("SubExists error: %v", err) + } + if !exists { + t.Errorf("subscription %s should exist, but it doesn't", sub.ID()) + } + + var msgs []*Message + for i := 0; i < 10; i++ { + text := fmt.Sprintf("a message with an index %d", i) + attrs := make(map[string]string) + attrs["foo"] = "bar" + msgs = append(msgs, &Message{ + Data: []byte(text), + Attributes: attrs, + }) + } + + // Publish the messages. + type pubResult struct { + m *Message + r *PublishResult + } + var rs []pubResult + for _, m := range msgs { + r := topic.Publish(ctx, m) + rs = append(rs, pubResult{m, r}) + } + want := make(map[string]*messageData) + for _, res := range rs { + id, err := res.r.Get(ctx) + if err != nil { + t.Fatal(err) + } + md := extractMessageData(res.m) + md.ID = id + want[md.ID] = md + } + + // Use a timeout to ensure that Pull does not block indefinitely if there are unexpectedly few messages available. + timeoutCtx, _ := context.WithTimeout(ctx, time.Minute) + gotMsgs, err := pullN(timeoutCtx, sub, len(want), func(ctx context.Context, m *Message) { + m.Ack() + }) + if err != nil { + t.Fatalf("Pull: %v", err) + } + got := make(map[string]*messageData) + for _, m := range gotMsgs { + md := extractMessageData(m) + got[md.ID] = md + } + if !testutil.Equal(got, want) { + t.Errorf("messages: got: %v ; want: %v", got, want) + } + + if msg, ok := testIAM(ctx, topic.IAM(), "pubsub.topics.get"); !ok { + t.Errorf("topic IAM: %s", msg) + } + if msg, ok := testIAM(ctx, sub.IAM(), "pubsub.subscriptions.get"); !ok { + t.Errorf("sub IAM: %s", msg) + } + + snap, err := sub.createSnapshot(ctx, "") + if err != nil { + t.Fatalf("CreateSnapshot error: %v", err) + } + + timeoutCtx, _ = context.WithTimeout(ctx, time.Minute) + err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) { + snapIt := client.snapshots(timeoutCtx) + for { + s, err := snapIt.Next() + if err == nil && s.name == snap.name { + return true, nil + } + if err == iterator.Done { + return false, fmt.Errorf("cannot find snapshot: %q", snap.name) + } + if err != nil { + return false, err + } + } + }) + if err != nil { + t.Error(err) + } + + err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) { + err := sub.seekToSnapshot(timeoutCtx, snap.snapshot) + return err == nil, err + }) + if err != nil { + t.Error(err) + } + + err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) { + err := sub.seekToTime(timeoutCtx, time.Now()) + return err == nil, err + }) + if err != nil { + t.Error(err) + } + + err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) { + snapHandle := client.snapshot(snap.ID()) + err := snapHandle.delete(timeoutCtx) + return err == nil, err + }) + if err != nil { + t.Error(err) + } + + if err := sub.Delete(ctx); err != nil { + t.Errorf("DeleteSub error: %v", err) + } + + if err := topic.Delete(ctx); err != nil { + t.Errorf("DeleteTopic error: %v", err) + } +} + +// IAM tests. +// NOTE: for these to succeed, the test runner identity must have the Pub/Sub Admin or Owner roles. +// To set, visit https://console.developers.google.com, select "IAM & Admin" from the top-left +// menu, choose the account, click the Roles dropdown, and select "Pub/Sub > Pub/Sub Admin". +// TODO(jba): move this to a testing package within cloud.google.com/iam, so we can re-use it. +func testIAM(ctx context.Context, h *iam.Handle, permission string) (msg string, ok bool) { + // Attempting to add an non-existent identity (e.g. "alice@example.com") causes the service + // to return an internal error, so use a real identity. + const member = "domain:google.com" + + var policy *iam.Policy + var err error + + if policy, err = h.Policy(ctx); err != nil { + return fmt.Sprintf("Policy: %v", err), false + } + // The resource is new, so the policy should be empty. + if got := policy.Roles(); len(got) > 0 { + return fmt.Sprintf("initially: got roles %v, want none", got), false + } + // Add a member, set the policy, then check that the member is present. + policy.Add(member, iam.Viewer) + if err := h.SetPolicy(ctx, policy); err != nil { + return fmt.Sprintf("SetPolicy: %v", err), false + } + if policy, err = h.Policy(ctx); err != nil { + return fmt.Sprintf("Policy: %v", err), false + } + if got, want := policy.Members(iam.Viewer), []string{member}; !testutil.Equal(got, want) { + return fmt.Sprintf("after Add: got %v, want %v", got, want), false + } + // Now remove that member, set the policy, and check that it's empty again. + policy.Remove(member, iam.Viewer) + if err := h.SetPolicy(ctx, policy); err != nil { + return fmt.Sprintf("SetPolicy: %v", err), false + } + if policy, err = h.Policy(ctx); err != nil { + return fmt.Sprintf("Policy: %v", err), false + } + if got := policy.Roles(); len(got) > 0 { + return fmt.Sprintf("after Remove: got roles %v, want none", got), false + } + // Call TestPermissions. + // Because this user is an admin, it has all the permissions on the + // resource type. Note: the service fails if we ask for inapplicable + // permissions (e.g. a subscription permission on a topic, or a topic + // create permission on a topic rather than its parent). + wantPerms := []string{permission} + gotPerms, err := h.TestPermissions(ctx, wantPerms) + if err != nil { + return fmt.Sprintf("TestPermissions: %v", err), false + } + if !testutil.Equal(gotPerms, wantPerms) { + return fmt.Sprintf("TestPermissions: got %v, want %v", gotPerms, wantPerms), false + } + return "", true +} + +func TestSubscriptionUpdate(t *testing.T) { + t.Parallel() + ctx := context.Background() + client := integrationTestClient(t, ctx) + defer client.Close() + + topic, err := client.CreateTopic(ctx, topicIDs.New()) + if err != nil { + t.Fatalf("CreateTopic error: %v", err) + } + defer topic.Stop() + defer topic.Delete(ctx) + + var sub *Subscription + if sub, err = client.CreateSubscription(ctx, subIDs.New(), SubscriptionConfig{Topic: topic}); err != nil { + t.Fatalf("CreateSub error: %v", err) + } + defer sub.Delete(ctx) + + sc, err := sub.Config(ctx) + if err != nil { + t.Fatal(err) + } + if !testutil.Equal(sc.PushConfig, PushConfig{}) { + t.Fatalf("got %+v, want empty PushConfig", sc.PushConfig) + } + // Add a PushConfig. + projID := testutil.ProjID() + pc := PushConfig{ + Endpoint: "https://" + projID + ".appspot.com/_ah/push-handlers/push", + Attributes: map[string]string{"x-goog-version": "v1"}, + } + sc, err = sub.Update(ctx, SubscriptionConfigToUpdate{PushConfig: &pc}) + if err != nil { + t.Fatal(err) + } + // Despite the docs which say that Get always returns a valid "x-goog-version" + // attribute, none is returned. See + // https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#google.pubsub.v1.PushConfig + pc.Attributes = nil + if got, want := sc.PushConfig, pc; !testutil.Equal(got, want) { + t.Fatalf("setting push config: got\n%+v\nwant\n%+v", got, want) + } + // Remove the PushConfig, turning the subscription back into pull mode. + pc = PushConfig{} + sc, err = sub.Update(ctx, SubscriptionConfigToUpdate{PushConfig: &pc}) + if err != nil { + t.Fatal(err) + } + if got, want := sc.PushConfig, pc; !testutil.Equal(got, want) { + t.Fatalf("removing push config: got\n%+v\nwant %+v", got, want) + } + + // If nothing changes, our client returns an error. + _, err = sub.Update(ctx, SubscriptionConfigToUpdate{}) + if err == nil { + t.Fatal("got nil, wanted error") + } +} + +func TestPublicTopic(t *testing.T) { + t.Parallel() + ctx := context.Background() + client := integrationTestClient(t, ctx) + defer client.Close() + + sub, err := client.CreateSubscription(ctx, subIDs.New(), SubscriptionConfig{ + Topic: client.TopicInProject("taxirides-realtime", "pubsub-public-data"), + }) + if err != nil { + t.Fatal(err) + } + defer sub.Delete(ctx) + // Confirm that Receive works. It doesn't matter if we actually get any + // messages. + ctxt, cancel := context.WithTimeout(ctx, 5*time.Second) + err = sub.Receive(ctxt, func(_ context.Context, msg *Message) { + msg.Ack() + cancel() + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go b/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go new file mode 100644 index 0000000..c13fd63 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go @@ -0,0 +1,70 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package distribution + +import ( + "log" + "math" + "sort" + "sync/atomic" +) + +// D is a distribution. Methods of D can be called concurrently by multiple +// goroutines. +type D struct { + buckets []uint64 +} + +// New creates a new distribution capable of holding values from 0 to n-1. +func New(n int) *D { + return &D{ + buckets: make([]uint64, n), + } +} + +// Record records value v to the distribution. +// To help with distributions with long tails, if v is larger than the maximum value, +// Record records the maximum value instead. +// If v is negative, Record panics. +func (d *D) Record(v int) { + if v < 0 { + log.Panicf("Record: value out of range: %d", v) + } else if v >= len(d.buckets) { + v = len(d.buckets) - 1 + } + atomic.AddUint64(&d.buckets[v], 1) +} + +// Percentile computes the p-th percentile of the distribution where +// p is between 0 and 1. +func (d *D) Percentile(p float64) int { + // NOTE: This implementation uses the nearest-rank method. + // https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method + + if p < 0 || p > 1 { + log.Panicf("Percentile: percentile out of range: %f", p) + } + + bucketSums := make([]uint64, len(d.buckets)) + var sum uint64 + for i := range bucketSums { + sum += atomic.LoadUint64(&d.buckets[i]) + bucketSums[i] = sum + } + + total := bucketSums[len(bucketSums)-1] + target := uint64(math.Ceil(float64(total) * p)) + return sort.Search(len(bucketSums), func(i int) bool { return bucketSums[i] >= target }) +} diff --git a/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution_test.go b/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution_test.go new file mode 100644 index 0000000..552dd90 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution_test.go @@ -0,0 +1,94 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package distribution + +import ( + "sync" + "testing" +) + +func TestDistribution(t *testing.T) { + // These tests come from examples in https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method + tests := []struct { + // values in distribution + vals []int + + // percentiles and expected percentile values + pp []float64 + vv []int + }{ + { + vals: []int{15, 20, 35, 40, 50}, + pp: []float64{0.05, 0.3, 0.4, 0.5, 1}, + vv: []int{15, 20, 20, 35, 50}, + }, + { + vals: []int{3, 6, 7, 8, 8, 10, 13, 15, 16, 20}, + pp: []float64{0.25, 0.5, 0.75, 1}, + vv: []int{7, 8, 15, 20}, + }, + { + vals: []int{3, 6, 7, 8, 8, 9, 10, 13, 15, 16, 20}, + pp: []float64{0.25, 0.5, 0.75, 1}, + vv: []int{7, 9, 15, 20}, + }, + } + + maxVal := 0 + for _, tst := range tests { + for _, v := range tst.vals { + if maxVal < v { + maxVal = v + } + } + } + + for _, tst := range tests { + d := New(maxVal + 1) + for _, v := range tst.vals { + d.Record(v) + } + for i, p := range tst.pp { + got, want := d.Percentile(p), tst.vv[i] + if got != want { + t.Errorf("d=%v, d.Percentile(%f)=%d, want %d", d, p, got, want) + } + } + } +} + +func TestRace(t *testing.T) { + const N int = 1e3 + const parallel = 2 + + d := New(N) + + var wg sync.WaitGroup + wg.Add(parallel) + for i := 0; i < parallel; i++ { + go func() { + for i := 0; i < N; i++ { + d.Record(i) + } + wg.Done() + }() + } + + for i := 0; i < N; i++ { + if p := d.Percentile(0.5); p > N { + t.Fatalf("d.Percentile(0.5)=%d, expected to be at most %d", p, N) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go new file mode 100644 index 0000000..8c579f0 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/iterator.go @@ -0,0 +1,270 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "sync" + "time" + + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +// newMessageIterator starts a new streamingMessageIterator. Stop must be called on the messageIterator +// when it is no longer needed. +// subName is the full name of the subscription to pull messages from. +// ctx is the context to use for acking messages and extending message deadlines. +func newMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *streamingMessageIterator { + sp := s.newStreamingPuller(ctx, subName, int32(po.ackDeadline.Seconds())) + _ = sp.open() // error stored in sp + return newStreamingMessageIterator(ctx, sp, po) +} + +type streamingMessageIterator struct { + ctx context.Context + po *pullOptions + sp *streamingPuller + kaTicker *time.Ticker // keep-alive (deadline extensions) + ackTicker *time.Ticker // message acks + nackTicker *time.Ticker // message nacks (more frequent than acks) + failed chan struct{} // closed on stream error + stopped chan struct{} // closed when Stop is called + drained chan struct{} // closed when stopped && no more pending messages + wg sync.WaitGroup + + mu sync.Mutex + keepAliveDeadlines map[string]time.Time + pendingReq *pb.StreamingPullRequest + pendingModAcks map[string]int32 // ack IDs whose ack deadline is to be modified + err error // error from stream failure +} + +func newStreamingMessageIterator(ctx context.Context, sp *streamingPuller, po *pullOptions) *streamingMessageIterator { + // TODO: make kaTicker frequency more configurable. (ackDeadline - 5s) is a + // reasonable default for now, because the minimum ack period is 10s. This + // gives us 5s grace. + keepAlivePeriod := po.ackDeadline - 5*time.Second + kaTicker := time.NewTicker(keepAlivePeriod) + + // Ack promptly so users don't lose work if client crashes. + ackTicker := time.NewTicker(100 * time.Millisecond) + nackTicker := time.NewTicker(100 * time.Millisecond) + it := &streamingMessageIterator{ + ctx: ctx, + sp: sp, + po: po, + kaTicker: kaTicker, + ackTicker: ackTicker, + nackTicker: nackTicker, + failed: make(chan struct{}), + stopped: make(chan struct{}), + drained: make(chan struct{}), + keepAliveDeadlines: map[string]time.Time{}, + pendingReq: &pb.StreamingPullRequest{}, + pendingModAcks: map[string]int32{}, + } + it.wg.Add(1) + go it.sender() + return it +} + +// Subscription.receive will call stop on its messageIterator when finished with it. +// Stop will block until Done has been called on all Messages that have been +// returned by Next, or until the context with which the messageIterator was created +// is cancelled or exceeds its deadline. +func (it *streamingMessageIterator) stop() { + it.mu.Lock() + select { + case <-it.stopped: + default: + close(it.stopped) + } + it.checkDrained() + it.mu.Unlock() + it.wg.Wait() +} + +// checkDrained closes the drained channel if the iterator has been stopped and all +// pending messages have either been n/acked or expired. +// +// Called with the lock held. +func (it *streamingMessageIterator) checkDrained() { + select { + case <-it.drained: + return + default: + } + select { + case <-it.stopped: + if len(it.keepAliveDeadlines) == 0 { + close(it.drained) + } + default: + } +} + +// Called when a message is acked/nacked. +func (it *streamingMessageIterator) done(ackID string, ack bool) { + it.mu.Lock() + defer it.mu.Unlock() + delete(it.keepAliveDeadlines, ackID) + if ack { + it.pendingReq.AckIds = append(it.pendingReq.AckIds, ackID) + } else { + it.pendingModAcks[ackID] = 0 // Nack indicated by modifying the deadline to zero. + } + it.checkDrained() +} + +// fail is called when a stream method returns a permanent error. +func (it *streamingMessageIterator) fail(err error) { + it.mu.Lock() + if it.err == nil { + it.err = err + close(it.failed) + } + it.mu.Unlock() +} + +// receive makes a call to the stream's Recv method and returns +// its messages. +func (it *streamingMessageIterator) receive() ([]*Message, error) { + // Stop retrieving messages if the context is done, the stream + // failed, or the iterator's Stop method was called. + select { + case <-it.ctx.Done(): + return nil, it.ctx.Err() + default: + } + it.mu.Lock() + err := it.err + it.mu.Unlock() + if err != nil { + return nil, err + } + // Receive messages from stream. This may block indefinitely. + msgs, err := it.sp.fetchMessages() + // The streamingPuller handles retries, so any error here + // is fatal. + if err != nil { + it.fail(err) + return nil, err + } + // We received some messages. Remember them so we can keep them alive. Also, + // arrange for a receipt mod-ack (which will occur at the next firing of + // nackTicker). + maxExt := time.Now().Add(it.po.maxExtension) + deadline := trunc32(int64(it.po.ackDeadline.Seconds())) + it.mu.Lock() + for _, m := range msgs { + m.doneFunc = it.done + it.keepAliveDeadlines[m.ackID] = maxExt + // The receipt mod-ack uses the subscription's configured ack deadline. Don't + // change the mod-ack if one is already pending. This is possible if there + // are retries. + if _, ok := it.pendingModAcks[m.ackID]; !ok { + it.pendingModAcks[m.ackID] = deadline + } + } + it.mu.Unlock() + return msgs, nil +} + +// sender runs in a goroutine and handles all sends to the stream. +func (it *streamingMessageIterator) sender() { + defer it.wg.Done() + defer it.kaTicker.Stop() + defer it.ackTicker.Stop() + defer it.nackTicker.Stop() + defer it.sp.closeSend() + + done := false + for !done { + send := false + select { + case <-it.ctx.Done(): + // Context canceled or timed out: stop immediately, without + // another RPC. + return + + case <-it.failed: + // Stream failed: nothing to do, so stop immediately. + return + + case <-it.drained: + // All outstanding messages have been marked done: + // nothing left to do except send the final request. + it.mu.Lock() + send = (len(it.pendingReq.AckIds) > 0 || len(it.pendingModAcks) > 0) + done = true + + case <-it.kaTicker.C: + it.mu.Lock() + it.handleKeepAlives() + send = (len(it.pendingModAcks) > 0) + + case <-it.nackTicker.C: + it.mu.Lock() + send = (len(it.pendingModAcks) > 0) + + case <-it.ackTicker.C: + it.mu.Lock() + send = (len(it.pendingReq.AckIds) > 0) + } + // Lock is held here. + if send { + req := it.pendingReq + it.pendingReq = &pb.StreamingPullRequest{} + modAcks := it.pendingModAcks + it.pendingModAcks = map[string]int32{} + it.mu.Unlock() + for id, s := range modAcks { + req.ModifyDeadlineAckIds = append(req.ModifyDeadlineAckIds, id) + req.ModifyDeadlineSeconds = append(req.ModifyDeadlineSeconds, s) + } + err := it.sp.send(req) + if err != nil { + // The streamingPuller handles retries, so any error here + // is fatal to the iterator. + it.fail(err) + return + } + } else { + it.mu.Unlock() + } + } +} + +// handleKeepAlives modifies the pending request to include deadline extensions +// for live messages. It also purges expired messages. +// +// Called with the lock held. +func (it *streamingMessageIterator) handleKeepAlives() { + now := time.Now() + dl := trunc32(int64(it.po.ackDeadline.Seconds())) + for id, expiry := range it.keepAliveDeadlines { + if expiry.Before(now) { + // This delete will not result in skipping any map items, as implied by + // the spec at https://golang.org/ref/spec#For_statements, "For + // statements with range clause", note 3, and stated explicitly at + // https://groups.google.com/forum/#!msg/golang-nuts/UciASUb03Js/pzSq5iVFAQAJ. + delete(it.keepAliveDeadlines, id) + } else { + // This will not overwrite a nack, because nacking removes the ID from keepAliveDeadlines. + it.pendingModAcks[id] = dl + } + } + it.checkDrained() +} diff --git a/vendor/cloud.google.com/go/pubsub/loadtest/benchmark_test.go b/vendor/cloud.google.com/go/pubsub/loadtest/benchmark_test.go new file mode 100644 index 0000000..ff695ea --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/loadtest/benchmark_test.go @@ -0,0 +1,176 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loadtest + +// Performance benchmarks for pubsub. +// Run with +// go test -bench . -cpu 1 + +import ( + "log" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" + + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/pubsub" + gtransport "google.golang.org/api/transport/grpc" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +// These constants are designed to match the "throughput" test in +// https://github.com/GoogleCloudPlatform/pubsub/blob/master/load-test-framework/run.py +// and +// https://github.com/GoogleCloudPlatform/pubsub/blob/master/load-test-framework/src/main/java/com/google/pubsub/clients/experimental/CPSPublisherTask.java + +const ( + nMessages = 1e5 + messageSize = 10000 // size of msg data in bytes + batchSize = 10 + batchDuration = 50 * time.Millisecond + serverDelay = 200 * time.Millisecond + maxOutstandingPublishes = 1600 // max_outstanding_messages in run.py +) + +func BenchmarkPublishThroughput(b *testing.B) { + b.SetBytes(nMessages * messageSize) + client := perfClient(serverDelay, 1, b) + + lts := &PubServer{ID: "xxx"} + lts.init(client, "t", messageSize, batchSize, batchDuration) + b.ResetTimer() + for i := 0; i < b.N; i++ { + runOnce(lts) + } +} + +func runOnce(lts *PubServer) { + nRequests := int64(nMessages / batchSize) + var nPublished int64 + var wg sync.WaitGroup + // The Java loadtest framework is rate-limited to 1 billion Execute calls a + // second (each Execute call corresponding to a publishBatch call here), + // but we can ignore this because of the following. + // The framework runs 10,000 threads, each calling Execute in a loop, but + // we can ignore this too. + // The framework caps the number of outstanding calls to Execute at + // maxOutstandingPublishes. That is what we simulate here. + for i := 0; i < maxOutstandingPublishes; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for atomic.AddInt64(&nRequests, -1) >= 0 { + latencies, err := lts.publishBatch() + if err != nil { + log.Fatalf("publishBatch: %v", err) + } + atomic.AddInt64(&nPublished, int64(len(latencies))) + } + }() + } + wg.Wait() + sent := atomic.LoadInt64(&nPublished) + if sent != nMessages { + log.Fatalf("sent %d messages, expected %d", sent, int(nMessages)) + } +} + +func perfClient(pubDelay time.Duration, nConns int, f interface { + Fatal(...interface{}) +}) *pubsub.Client { + ctx := context.Background() + srv, err := newPerfServer(pubDelay) + if err != nil { + f.Fatal(err) + } + conn, err := gtransport.DialInsecure(ctx, + option.WithEndpoint(srv.Addr), + option.WithGRPCConnectionPool(nConns), + + // TODO(grpc/grpc-go#1388) using connection pool without WithBlock + // can cause RPCs to fail randomly. We can delete this after the issue is fixed. + option.WithGRPCDialOption(grpc.WithBlock())) + if err != nil { + f.Fatal(err) + } + client, err := pubsub.NewClient(ctx, "projectID", option.WithGRPCConn(conn)) + if err != nil { + f.Fatal(err) + } + return client +} + +type perfServer struct { + pb.PublisherServer + pb.SubscriberServer + + Addr string + pubDelay time.Duration + + mu sync.Mutex + activePubs int + maxActivePubs int +} + +func newPerfServer(pubDelay time.Duration) (*perfServer, error) { + srv, err := testutil.NewServer(grpc.MaxMsgSize(pubsub.MaxPublishRequestBytes)) + if err != nil { + return nil, err + } + perf := &perfServer{Addr: srv.Addr, pubDelay: pubDelay} + pb.RegisterPublisherServer(srv.Gsrv, perf) + pb.RegisterSubscriberServer(srv.Gsrv, perf) + srv.Start() + return perf, nil +} + +var doLog = false + +func (p *perfServer) incActivePubs(n int) (int, bool) { + p.mu.Lock() + defer p.mu.Unlock() + p.activePubs += n + newMax := false + if p.activePubs > p.maxActivePubs { + p.maxActivePubs = p.activePubs + newMax = true + } + return p.activePubs, newMax +} + +func (p *perfServer) Publish(ctx context.Context, req *pb.PublishRequest) (*pb.PublishResponse, error) { + a, newMax := p.incActivePubs(1) + defer p.incActivePubs(-1) + if newMax && doLog { + log.Printf("max %d active publish calls", a) + } + if doLog { + log.Printf("%p -> Publish %d", p, len(req.Messages)) + } + res := &pb.PublishResponse{MessageIds: make([]string, len(req.Messages))} + for i := range res.MessageIds { + res.MessageIds[i] = "x" + } + time.Sleep(p.pubDelay) + if doLog { + log.Printf("%p <- Publish %d", p, len(req.Messages)) + } + return res, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/loadtest/cmd/loadtest.go b/vendor/cloud.google.com/go/pubsub/loadtest/cmd/loadtest.go new file mode 100644 index 0000000..fa75eb3 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/loadtest/cmd/loadtest.go @@ -0,0 +1,54 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "flag" + "fmt" + "log" + "net" + "strconv" + + "math/rand" + + "cloud.google.com/go/pubsub/loadtest" + pb "cloud.google.com/go/pubsub/loadtest/pb" + "google.golang.org/grpc" +) + +func main() { + port := flag.Uint("worker_port", 6000, "port to bind worker to") + role := flag.String("r", "", "role: pub/sub") + flag.Parse() + + var lts pb.LoadtestWorkerServer + switch *role { + case "pub": + lts = &loadtest.PubServer{ID: strconv.Itoa(rand.Int())} + case "sub": + lts = &loadtest.SubServer{} + default: + log.Fatalf("unknown role: %q", *role) + } + + serv := grpc.NewServer() + pb.RegisterLoadtestWorkerServer(serv, lts) + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + serv.Serve(lis) +} diff --git a/vendor/cloud.google.com/go/pubsub/loadtest/loadtest.go b/vendor/cloud.google.com/go/pubsub/loadtest/loadtest.go new file mode 100644 index 0000000..19a7a60 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/loadtest/loadtest.go @@ -0,0 +1,215 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package loadtest implements load testing for pubsub, +// following the interface defined in https://github.com/GoogleCloudPlatform/pubsub/tree/master/load-test-framework/ . +// +// This package is experimental. +package loadtest + +import ( + "bytes" + "errors" + "log" + "runtime" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "golang.org/x/time/rate" + + "github.com/golang/protobuf/ptypes" + + "cloud.google.com/go/pubsub" + pb "cloud.google.com/go/pubsub/loadtest/pb" +) + +type pubServerConfig struct { + topic *pubsub.Topic + msgData []byte + batchSize int32 +} + +type PubServer struct { + ID string + + cfg atomic.Value + seqNum int32 +} + +func (l *PubServer) Start(ctx context.Context, req *pb.StartRequest) (*pb.StartResponse, error) { + log.Println("received start") + c, err := pubsub.NewClient(ctx, req.Project) + if err != nil { + return nil, err + } + dur, err := ptypes.Duration(req.PublishBatchDuration) + if err != nil { + return nil, err + } + l.init(c, req.Topic, req.MessageSize, req.PublishBatchSize, dur) + log.Println("started") + return &pb.StartResponse{}, nil +} + +func (l *PubServer) init(c *pubsub.Client, topicName string, msgSize, batchSize int32, batchDur time.Duration) { + topic := c.Topic(topicName) + topic.PublishSettings = pubsub.PublishSettings{ + DelayThreshold: batchDur, + CountThreshold: 950, + ByteThreshold: 9500000, + } + + l.cfg.Store(pubServerConfig{ + topic: topic, + msgData: bytes.Repeat([]byte{'A'}, int(msgSize)), + batchSize: batchSize, + }) +} + +func (l *PubServer) Execute(ctx context.Context, _ *pb.ExecuteRequest) (*pb.ExecuteResponse, error) { + latencies, err := l.publishBatch() + if err != nil { + log.Printf("error: %v", err) + return nil, err + } + return &pb.ExecuteResponse{Latencies: latencies}, nil +} + +func (l *PubServer) publishBatch() ([]int64, error) { + var cfg pubServerConfig + if c, ok := l.cfg.Load().(pubServerConfig); ok { + cfg = c + } else { + return nil, errors.New("config not loaded") + } + + start := time.Now() + latencies := make([]int64, cfg.batchSize) + startStr := strconv.FormatInt(start.UnixNano()/1e6, 10) + seqNum := atomic.AddInt32(&l.seqNum, cfg.batchSize) - cfg.batchSize + + rs := make([]*pubsub.PublishResult, cfg.batchSize) + for i := int32(0); i < cfg.batchSize; i++ { + rs[i] = cfg.topic.Publish(context.TODO(), &pubsub.Message{ + Data: cfg.msgData, + Attributes: map[string]string{ + "sendTime": startStr, + "clientId": l.ID, + "sequenceNumber": strconv.Itoa(int(seqNum + i)), + }, + }) + } + for i, r := range rs { + _, err := r.Get(context.Background()) + if err != nil { + return nil, err + } + // TODO(jba,pongad): fix latencies + // Later values will be skewed by earlier ones, since we wait for the + // results in order. (On the other hand, it may not matter much, since + // messages are added to bundles in order and bundles get sent more or + // less in order.) If we want more accurate values, we can either start + // a goroutine for each result (similar to the original code using a + // callback), or call reflect.Select with the Ready channels of the + // results. + latencies[i] = time.Since(start).Nanoseconds() / 1e6 + } + return latencies, nil +} + +type SubServer struct { + lim *rate.Limiter + + mu sync.Mutex + idents []*pb.MessageIdentifier + latencies []int64 +} + +func (s *SubServer) Start(ctx context.Context, req *pb.StartRequest) (*pb.StartResponse, error) { + log.Println("received start") + s.lim = rate.NewLimiter(rate.Every(time.Second), 1) + + c, err := pubsub.NewClient(ctx, req.Project) + if err != nil { + return nil, err + } + + // Load test API doesn't define any way to stop right now. + go func() { + sub := c.Subscription(req.GetPubsubOptions().Subscription) + sub.ReceiveSettings.NumGoroutines = 10 * runtime.GOMAXPROCS(0) + err := sub.Receive(context.Background(), s.callback) + log.Fatal(err) + }() + + log.Println("started") + return &pb.StartResponse{}, nil +} + +func (s *SubServer) callback(_ context.Context, m *pubsub.Message) { + id, err := strconv.ParseInt(m.Attributes["clientId"], 10, 64) + if err != nil { + log.Println(err) + m.Nack() + return + } + + seqNum, err := strconv.ParseInt(m.Attributes["sequenceNumber"], 10, 32) + if err != nil { + log.Println(err) + m.Nack() + return + } + + sendTimeMillis, err := strconv.ParseInt(m.Attributes["sendTime"], 10, 64) + if err != nil { + log.Println(err) + m.Nack() + return + } + + latency := time.Now().UnixNano()/1e6 - sendTimeMillis + ident := &pb.MessageIdentifier{ + PublisherClientId: id, + SequenceNumber: int32(seqNum), + } + + s.mu.Lock() + s.idents = append(s.idents, ident) + s.latencies = append(s.latencies, latency) + s.mu.Unlock() + m.Ack() +} + +func (s *SubServer) Execute(ctx context.Context, _ *pb.ExecuteRequest) (*pb.ExecuteResponse, error) { + // Throttle so the load tester doesn't spam us and consume all our CPU. + if err := s.lim.Wait(ctx); err != nil { + return nil, err + } + + s.mu.Lock() + idents := s.idents + s.idents = nil + latencies := s.latencies + s.latencies = nil + s.mu.Unlock() + + return &pb.ExecuteResponse{ + Latencies: latencies, + ReceivedMessages: idents, + }, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/loadtest/pb/loadtest.pb.go b/vendor/cloud.google.com/go/pubsub/loadtest/pb/loadtest.pb.go new file mode 100644 index 0000000..12ea8f0 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/loadtest/pb/loadtest.pb.go @@ -0,0 +1,792 @@ +// Code generated by protoc-gen-go. +// source: loadtest.proto +// DO NOT EDIT! + +/* +Package google_pubsub_loadtest is a generated protocol buffer package. + +It is generated from these files: + loadtest.proto + +It has these top-level messages: + StartRequest + StartResponse + PubsubOptions + KafkaOptions + MessageIdentifier + CheckRequest + CheckResponse + ExecuteRequest + ExecuteResponse +*/ +package google_pubsub_loadtest + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/duration" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StartRequest struct { + // The GCP project. This must be set even for Kafka, as we use it to export metrics. + Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` + // The Pub/Sub or Kafka topic name. + Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"` + // The number of requests that can be made, each second, per client. + RequestRate int32 `protobuf:"varint,3,opt,name=request_rate,json=requestRate" json:"request_rate,omitempty"` + // The size of each user message to publish + MessageSize int32 `protobuf:"varint,4,opt,name=message_size,json=messageSize" json:"message_size,omitempty"` + // The maximum outstanding requests, per client. + MaxOutstandingRequests int32 `protobuf:"varint,5,opt,name=max_outstanding_requests,json=maxOutstandingRequests" json:"max_outstanding_requests,omitempty"` + // The time at which the load test should start. If this is less than the current time, we start immediately. + StartTime *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // The burn-in duration, before which results should not be reported. + BurnInDuration *google_protobuf.Duration `protobuf:"bytes,12,opt,name=burn_in_duration,json=burnInDuration" json:"burn_in_duration,omitempty"` + // The number of user messages of size message_size to publish together. + PublishBatchSize int32 `protobuf:"varint,11,opt,name=publish_batch_size,json=publishBatchSize" json:"publish_batch_size,omitempty"` + // The max duration for coalescing a batch of published messages. + PublishBatchDuration *google_protobuf.Duration `protobuf:"bytes,13,opt,name=publish_batch_duration,json=publishBatchDuration" json:"publish_batch_duration,omitempty"` + // Types that are valid to be assigned to StopConditions: + // *StartRequest_TestDuration + // *StartRequest_NumberOfMessages + StopConditions isStartRequest_StopConditions `protobuf_oneof:"stop_conditions"` + // Types that are valid to be assigned to Options: + // *StartRequest_PubsubOptions + // *StartRequest_KafkaOptions + Options isStartRequest_Options `protobuf_oneof:"options"` +} + +func (m *StartRequest) Reset() { *m = StartRequest{} } +func (m *StartRequest) String() string { return proto.CompactTextString(m) } +func (*StartRequest) ProtoMessage() {} +func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isStartRequest_StopConditions interface { + isStartRequest_StopConditions() +} +type isStartRequest_Options interface { + isStartRequest_Options() +} + +type StartRequest_TestDuration struct { + TestDuration *google_protobuf.Duration `protobuf:"bytes,7,opt,name=test_duration,json=testDuration,oneof"` +} +type StartRequest_NumberOfMessages struct { + NumberOfMessages int32 `protobuf:"varint,8,opt,name=number_of_messages,json=numberOfMessages,oneof"` +} +type StartRequest_PubsubOptions struct { + PubsubOptions *PubsubOptions `protobuf:"bytes,9,opt,name=pubsub_options,json=pubsubOptions,oneof"` +} +type StartRequest_KafkaOptions struct { + KafkaOptions *KafkaOptions `protobuf:"bytes,10,opt,name=kafka_options,json=kafkaOptions,oneof"` +} + +func (*StartRequest_TestDuration) isStartRequest_StopConditions() {} +func (*StartRequest_NumberOfMessages) isStartRequest_StopConditions() {} +func (*StartRequest_PubsubOptions) isStartRequest_Options() {} +func (*StartRequest_KafkaOptions) isStartRequest_Options() {} + +func (m *StartRequest) GetStopConditions() isStartRequest_StopConditions { + if m != nil { + return m.StopConditions + } + return nil +} +func (m *StartRequest) GetOptions() isStartRequest_Options { + if m != nil { + return m.Options + } + return nil +} + +func (m *StartRequest) GetProject() string { + if m != nil { + return m.Project + } + return "" +} + +func (m *StartRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *StartRequest) GetRequestRate() int32 { + if m != nil { + return m.RequestRate + } + return 0 +} + +func (m *StartRequest) GetMessageSize() int32 { + if m != nil { + return m.MessageSize + } + return 0 +} + +func (m *StartRequest) GetMaxOutstandingRequests() int32 { + if m != nil { + return m.MaxOutstandingRequests + } + return 0 +} + +func (m *StartRequest) GetStartTime() *google_protobuf1.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *StartRequest) GetBurnInDuration() *google_protobuf.Duration { + if m != nil { + return m.BurnInDuration + } + return nil +} + +func (m *StartRequest) GetPublishBatchSize() int32 { + if m != nil { + return m.PublishBatchSize + } + return 0 +} + +func (m *StartRequest) GetPublishBatchDuration() *google_protobuf.Duration { + if m != nil { + return m.PublishBatchDuration + } + return nil +} + +func (m *StartRequest) GetTestDuration() *google_protobuf.Duration { + if x, ok := m.GetStopConditions().(*StartRequest_TestDuration); ok { + return x.TestDuration + } + return nil +} + +func (m *StartRequest) GetNumberOfMessages() int32 { + if x, ok := m.GetStopConditions().(*StartRequest_NumberOfMessages); ok { + return x.NumberOfMessages + } + return 0 +} + +func (m *StartRequest) GetPubsubOptions() *PubsubOptions { + if x, ok := m.GetOptions().(*StartRequest_PubsubOptions); ok { + return x.PubsubOptions + } + return nil +} + +func (m *StartRequest) GetKafkaOptions() *KafkaOptions { + if x, ok := m.GetOptions().(*StartRequest_KafkaOptions); ok { + return x.KafkaOptions + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StartRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StartRequest_OneofMarshaler, _StartRequest_OneofUnmarshaler, _StartRequest_OneofSizer, []interface{}{ + (*StartRequest_TestDuration)(nil), + (*StartRequest_NumberOfMessages)(nil), + (*StartRequest_PubsubOptions)(nil), + (*StartRequest_KafkaOptions)(nil), + } +} + +func _StartRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StartRequest) + // stop_conditions + switch x := m.StopConditions.(type) { + case *StartRequest_TestDuration: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TestDuration); err != nil { + return err + } + case *StartRequest_NumberOfMessages: + b.EncodeVarint(8<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NumberOfMessages)) + case nil: + default: + return fmt.Errorf("StartRequest.StopConditions has unexpected type %T", x) + } + // options + switch x := m.Options.(type) { + case *StartRequest_PubsubOptions: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PubsubOptions); err != nil { + return err + } + case *StartRequest_KafkaOptions: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.KafkaOptions); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("StartRequest.Options has unexpected type %T", x) + } + return nil +} + +func _StartRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StartRequest) + switch tag { + case 7: // stop_conditions.test_duration + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf.Duration) + err := b.DecodeMessage(msg) + m.StopConditions = &StartRequest_TestDuration{msg} + return true, err + case 8: // stop_conditions.number_of_messages + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.StopConditions = &StartRequest_NumberOfMessages{int32(x)} + return true, err + case 9: // options.pubsub_options + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PubsubOptions) + err := b.DecodeMessage(msg) + m.Options = &StartRequest_PubsubOptions{msg} + return true, err + case 10: // options.kafka_options + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(KafkaOptions) + err := b.DecodeMessage(msg) + m.Options = &StartRequest_KafkaOptions{msg} + return true, err + default: + return false, nil + } +} + +func _StartRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StartRequest) + // stop_conditions + switch x := m.StopConditions.(type) { + case *StartRequest_TestDuration: + s := proto.Size(x.TestDuration) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StartRequest_NumberOfMessages: + n += proto.SizeVarint(8<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NumberOfMessages)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // options + switch x := m.Options.(type) { + case *StartRequest_PubsubOptions: + s := proto.Size(x.PubsubOptions) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StartRequest_KafkaOptions: + s := proto.Size(x.KafkaOptions) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type StartResponse struct { +} + +func (m *StartResponse) Reset() { *m = StartResponse{} } +func (m *StartResponse) String() string { return proto.CompactTextString(m) } +func (*StartResponse) ProtoMessage() {} +func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type PubsubOptions struct { + // The Cloud Pub/Sub subscription name + Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` + // The maximum number of messages to pull which each request. + MaxMessagesPerPull int32 `protobuf:"varint,2,opt,name=max_messages_per_pull,json=maxMessagesPerPull" json:"max_messages_per_pull,omitempty"` +} + +func (m *PubsubOptions) Reset() { *m = PubsubOptions{} } +func (m *PubsubOptions) String() string { return proto.CompactTextString(m) } +func (*PubsubOptions) ProtoMessage() {} +func (*PubsubOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *PubsubOptions) GetSubscription() string { + if m != nil { + return m.Subscription + } + return "" +} + +func (m *PubsubOptions) GetMaxMessagesPerPull() int32 { + if m != nil { + return m.MaxMessagesPerPull + } + return 0 +} + +type KafkaOptions struct { + // The network address of the Kafka broker. + Broker string `protobuf:"bytes,1,opt,name=broker" json:"broker,omitempty"` + // The length of time to poll for. + PollDuration *google_protobuf.Duration `protobuf:"bytes,2,opt,name=poll_duration,json=pollDuration" json:"poll_duration,omitempty"` +} + +func (m *KafkaOptions) Reset() { *m = KafkaOptions{} } +func (m *KafkaOptions) String() string { return proto.CompactTextString(m) } +func (*KafkaOptions) ProtoMessage() {} +func (*KafkaOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *KafkaOptions) GetBroker() string { + if m != nil { + return m.Broker + } + return "" +} + +func (m *KafkaOptions) GetPollDuration() *google_protobuf.Duration { + if m != nil { + return m.PollDuration + } + return nil +} + +type MessageIdentifier struct { + // The unique id of the client that published the message. + PublisherClientId int64 `protobuf:"varint,1,opt,name=publisher_client_id,json=publisherClientId" json:"publisher_client_id,omitempty"` + // Sequence number of the published message with the given publish_client_id. + SequenceNumber int32 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` +} + +func (m *MessageIdentifier) Reset() { *m = MessageIdentifier{} } +func (m *MessageIdentifier) String() string { return proto.CompactTextString(m) } +func (*MessageIdentifier) ProtoMessage() {} +func (*MessageIdentifier) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *MessageIdentifier) GetPublisherClientId() int64 { + if m != nil { + return m.PublisherClientId + } + return 0 +} + +func (m *MessageIdentifier) GetSequenceNumber() int32 { + if m != nil { + return m.SequenceNumber + } + return 0 +} + +type CheckRequest struct { + // Duplicate messages that should not be reported for throughput and latency. + Duplicates []*MessageIdentifier `protobuf:"bytes,1,rep,name=duplicates" json:"duplicates,omitempty"` +} + +func (m *CheckRequest) Reset() { *m = CheckRequest{} } +func (m *CheckRequest) String() string { return proto.CompactTextString(m) } +func (*CheckRequest) ProtoMessage() {} +func (*CheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *CheckRequest) GetDuplicates() []*MessageIdentifier { + if m != nil { + return m.Duplicates + } + return nil +} + +type CheckResponse struct { + // Histogram of latencies, each one a delta from the previous CheckResponse sent. + BucketValues []int64 `protobuf:"varint,1,rep,packed,name=bucket_values,json=bucketValues" json:"bucket_values,omitempty"` + // The duration from the start of the loadtest to its completion or now if is_finished is false. + RunningDuration *google_protobuf.Duration `protobuf:"bytes,2,opt,name=running_duration,json=runningDuration" json:"running_duration,omitempty"` + // True if the load test has finished running. + IsFinished bool `protobuf:"varint,3,opt,name=is_finished,json=isFinished" json:"is_finished,omitempty"` + // MessageIdentifiers of all received messages since the last Check + ReceivedMessages []*MessageIdentifier `protobuf:"bytes,4,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"` +} + +func (m *CheckResponse) Reset() { *m = CheckResponse{} } +func (m *CheckResponse) String() string { return proto.CompactTextString(m) } +func (*CheckResponse) ProtoMessage() {} +func (*CheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *CheckResponse) GetBucketValues() []int64 { + if m != nil { + return m.BucketValues + } + return nil +} + +func (m *CheckResponse) GetRunningDuration() *google_protobuf.Duration { + if m != nil { + return m.RunningDuration + } + return nil +} + +func (m *CheckResponse) GetIsFinished() bool { + if m != nil { + return m.IsFinished + } + return false +} + +func (m *CheckResponse) GetReceivedMessages() []*MessageIdentifier { + if m != nil { + return m.ReceivedMessages + } + return nil +} + +type ExecuteRequest struct { +} + +func (m *ExecuteRequest) Reset() { *m = ExecuteRequest{} } +func (m *ExecuteRequest) String() string { return proto.CompactTextString(m) } +func (*ExecuteRequest) ProtoMessage() {} +func (*ExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type ExecuteResponse struct { + // Latencies of the completed operations + Latencies []int64 `protobuf:"varint,1,rep,packed,name=latencies" json:"latencies,omitempty"` + // MessageIdentifiers of all received messages since the last Execute + ReceivedMessages []*MessageIdentifier `protobuf:"bytes,2,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"` +} + +func (m *ExecuteResponse) Reset() { *m = ExecuteResponse{} } +func (m *ExecuteResponse) String() string { return proto.CompactTextString(m) } +func (*ExecuteResponse) ProtoMessage() {} +func (*ExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ExecuteResponse) GetLatencies() []int64 { + if m != nil { + return m.Latencies + } + return nil +} + +func (m *ExecuteResponse) GetReceivedMessages() []*MessageIdentifier { + if m != nil { + return m.ReceivedMessages + } + return nil +} + +func init() { + proto.RegisterType((*StartRequest)(nil), "google.pubsub.loadtest.StartRequest") + proto.RegisterType((*StartResponse)(nil), "google.pubsub.loadtest.StartResponse") + proto.RegisterType((*PubsubOptions)(nil), "google.pubsub.loadtest.PubsubOptions") + proto.RegisterType((*KafkaOptions)(nil), "google.pubsub.loadtest.KafkaOptions") + proto.RegisterType((*MessageIdentifier)(nil), "google.pubsub.loadtest.MessageIdentifier") + proto.RegisterType((*CheckRequest)(nil), "google.pubsub.loadtest.CheckRequest") + proto.RegisterType((*CheckResponse)(nil), "google.pubsub.loadtest.CheckResponse") + proto.RegisterType((*ExecuteRequest)(nil), "google.pubsub.loadtest.ExecuteRequest") + proto.RegisterType((*ExecuteResponse)(nil), "google.pubsub.loadtest.ExecuteResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Loadtest service + +type LoadtestClient interface { + // Starts a load test + Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) + // Checks the status of a load test + Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) +} + +type loadtestClient struct { + cc *grpc.ClientConn +} + +func NewLoadtestClient(cc *grpc.ClientConn) LoadtestClient { + return &loadtestClient{cc} +} + +func (c *loadtestClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { + out := new(StartResponse) + err := grpc.Invoke(ctx, "/google.pubsub.loadtest.Loadtest/Start", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *loadtestClient) Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) { + out := new(CheckResponse) + err := grpc.Invoke(ctx, "/google.pubsub.loadtest.Loadtest/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Loadtest service + +type LoadtestServer interface { + // Starts a load test + Start(context.Context, *StartRequest) (*StartResponse, error) + // Checks the status of a load test + Check(context.Context, *CheckRequest) (*CheckResponse, error) +} + +func RegisterLoadtestServer(s *grpc.Server, srv LoadtestServer) { + s.RegisterService(&_Loadtest_serviceDesc, srv) +} + +func _Loadtest_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoadtestServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.loadtest.Loadtest/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoadtestServer).Start(ctx, req.(*StartRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Loadtest_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoadtestServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.loadtest.Loadtest/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoadtestServer).Check(ctx, req.(*CheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Loadtest_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.pubsub.loadtest.Loadtest", + HandlerType: (*LoadtestServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Start", + Handler: _Loadtest_Start_Handler, + }, + { + MethodName: "Check", + Handler: _Loadtest_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "loadtest.proto", +} + +// Client API for LoadtestWorker service + +type LoadtestWorkerClient interface { + // Starts a worker + Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) + // Executes a command on the worker, returning the latencies of the operations. Since some + // commands consist of multiple operations (i.e. pulls contain many received messages with + // different end to end latencies) a single command can have multiple latencies returned. + Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*ExecuteResponse, error) +} + +type loadtestWorkerClient struct { + cc *grpc.ClientConn +} + +func NewLoadtestWorkerClient(cc *grpc.ClientConn) LoadtestWorkerClient { + return &loadtestWorkerClient{cc} +} + +func (c *loadtestWorkerClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { + out := new(StartResponse) + err := grpc.Invoke(ctx, "/google.pubsub.loadtest.LoadtestWorker/Start", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *loadtestWorkerClient) Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*ExecuteResponse, error) { + out := new(ExecuteResponse) + err := grpc.Invoke(ctx, "/google.pubsub.loadtest.LoadtestWorker/Execute", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for LoadtestWorker service + +type LoadtestWorkerServer interface { + // Starts a worker + Start(context.Context, *StartRequest) (*StartResponse, error) + // Executes a command on the worker, returning the latencies of the operations. Since some + // commands consist of multiple operations (i.e. pulls contain many received messages with + // different end to end latencies) a single command can have multiple latencies returned. + Execute(context.Context, *ExecuteRequest) (*ExecuteResponse, error) +} + +func RegisterLoadtestWorkerServer(s *grpc.Server, srv LoadtestWorkerServer) { + s.RegisterService(&_LoadtestWorker_serviceDesc, srv) +} + +func _LoadtestWorker_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoadtestWorkerServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.loadtest.LoadtestWorker/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoadtestWorkerServer).Start(ctx, req.(*StartRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LoadtestWorker_Execute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecuteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoadtestWorkerServer).Execute(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.loadtest.LoadtestWorker/Execute", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoadtestWorkerServer).Execute(ctx, req.(*ExecuteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _LoadtestWorker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.pubsub.loadtest.LoadtestWorker", + HandlerType: (*LoadtestWorkerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Start", + Handler: _LoadtestWorker_Start_Handler, + }, + { + MethodName: "Execute", + Handler: _LoadtestWorker_Execute_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "loadtest.proto", +} + +func init() { proto.RegisterFile("loadtest.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 847 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xdd, 0x6e, 0xdc, 0x44, + 0x14, 0xae, 0x93, 0x6e, 0x92, 0x3d, 0x6b, 0xef, 0x6e, 0x86, 0x12, 0x99, 0x15, 0xd0, 0x60, 0x28, + 0x0d, 0x12, 0x72, 0x45, 0xb8, 0x81, 0x1b, 0x84, 0x92, 0x82, 0x12, 0x15, 0x9a, 0xc8, 0x8d, 0x8a, + 0xe0, 0x66, 0x34, 0xb6, 0x67, 0x93, 0x61, 0xed, 0x19, 0x33, 0x3f, 0x55, 0xd4, 0x17, 0xe0, 0x8d, + 0x78, 0x00, 0x1e, 0x87, 0x5b, 0x5e, 0x00, 0xcd, 0x78, 0xbc, 0x3f, 0x6d, 0x57, 0x0b, 0x42, 0xbd, + 0x3c, 0xdf, 0xf9, 0xce, 0x37, 0xe7, 0xd7, 0x86, 0x61, 0x25, 0x48, 0xa9, 0xa9, 0xd2, 0x69, 0x23, + 0x85, 0x16, 0xe8, 0xe0, 0x5a, 0x88, 0xeb, 0x8a, 0xa6, 0x8d, 0xc9, 0x95, 0xc9, 0xd3, 0xce, 0x3b, + 0xf9, 0xb0, 0xc5, 0x1f, 0x39, 0x56, 0x6e, 0xa6, 0x8f, 0x4a, 0x23, 0x89, 0x66, 0x82, 0xb7, 0x71, + 0x93, 0xfb, 0xaf, 0xfa, 0x35, 0xab, 0xa9, 0xd2, 0xa4, 0x6e, 0x5a, 0x42, 0xf2, 0x57, 0x0f, 0xc2, + 0x67, 0x9a, 0x48, 0x9d, 0xd1, 0xdf, 0x0c, 0x55, 0x1a, 0xc5, 0xb0, 0xdb, 0x48, 0xf1, 0x2b, 0x2d, + 0x74, 0x1c, 0x1c, 0x06, 0x47, 0xfd, 0xac, 0x33, 0xd1, 0x3d, 0xe8, 0x69, 0xd1, 0xb0, 0x22, 0xde, + 0x72, 0x78, 0x6b, 0xa0, 0x8f, 0x20, 0x94, 0x6d, 0x28, 0x96, 0x44, 0xd3, 0x78, 0xfb, 0x30, 0x38, + 0xea, 0x65, 0x03, 0x8f, 0x65, 0x44, 0x53, 0x4b, 0xa9, 0xa9, 0x52, 0xe4, 0x9a, 0x62, 0xc5, 0x5e, + 0xd2, 0xf8, 0x6e, 0x4b, 0xf1, 0xd8, 0x33, 0xf6, 0x92, 0xa2, 0xaf, 0x20, 0xae, 0xc9, 0x2d, 0x16, + 0x46, 0x2b, 0x4d, 0x78, 0xc9, 0xf8, 0x35, 0xf6, 0x0a, 0x2a, 0xee, 0x39, 0xfa, 0x41, 0x4d, 0x6e, + 0x2f, 0x16, 0x6e, 0x9f, 0xae, 0x42, 0x5f, 0x03, 0x28, 0x9b, 0x3f, 0xb6, 0x95, 0xc5, 0x3b, 0x87, + 0xc1, 0xd1, 0xe0, 0x78, 0x92, 0x76, 0xed, 0xf2, 0x65, 0xa7, 0x57, 0x5d, 0xd9, 0x59, 0xdf, 0xb1, + 0xad, 0x8d, 0x4e, 0x61, 0x9c, 0x1b, 0xc9, 0x31, 0xe3, 0xb8, 0x6b, 0x5b, 0x1c, 0x3a, 0x81, 0xf7, + 0x5e, 0x13, 0x78, 0xec, 0x09, 0xd9, 0xd0, 0x86, 0x9c, 0xf3, 0xce, 0x46, 0x9f, 0x03, 0x6a, 0x4c, + 0x5e, 0x31, 0x75, 0x83, 0x73, 0xa2, 0x8b, 0x9b, 0xb6, 0xc4, 0x81, 0xcb, 0x79, 0xec, 0x3d, 0x27, + 0xd6, 0xe1, 0xea, 0xbc, 0x80, 0x83, 0x55, 0xf6, 0xfc, 0xe1, 0x68, 0xd3, 0xc3, 0xf7, 0x96, 0xc5, + 0xe6, 0xcf, 0x7f, 0x0b, 0x91, 0x5d, 0x84, 0x85, 0xce, 0xee, 0x06, 0x9d, 0xb3, 0x3b, 0x59, 0x68, + 0x23, 0xe6, 0x0a, 0x29, 0x20, 0x6e, 0xea, 0x9c, 0x4a, 0x2c, 0xa6, 0xd8, 0xcf, 0x44, 0xc5, 0x7b, + 0xb6, 0x80, 0xb3, 0x3b, 0xd9, 0xb8, 0xf5, 0x5d, 0x4c, 0x7f, 0xf4, 0x1e, 0xf4, 0x14, 0x86, 0xed, + 0x16, 0x62, 0xd1, 0x58, 0x01, 0x15, 0xf7, 0xdd, 0x93, 0x0f, 0xd2, 0x37, 0xef, 0x68, 0x7a, 0xe9, + 0xec, 0x8b, 0x96, 0x7c, 0x16, 0x64, 0x51, 0xb3, 0x0c, 0xa0, 0x27, 0x10, 0xcd, 0xc8, 0x74, 0x46, + 0xe6, 0x72, 0xe0, 0xe4, 0x3e, 0x59, 0x27, 0xf7, 0xc4, 0x92, 0x17, 0x6a, 0xe1, 0x6c, 0xc9, 0x3e, + 0xd9, 0x87, 0x91, 0xd2, 0xa2, 0xc1, 0x85, 0xe0, 0x25, 0x6b, 0xa1, 0x3e, 0xec, 0x7a, 0xe5, 0x64, + 0x04, 0x91, 0xdf, 0x75, 0xd5, 0x08, 0xae, 0x68, 0x32, 0x85, 0x68, 0x25, 0x3b, 0x94, 0x40, 0xa8, + 0x4c, 0xae, 0x0a, 0xc9, 0x1c, 0xe0, 0x4f, 0x60, 0x05, 0x43, 0x5f, 0xc0, 0xbb, 0x76, 0x57, 0xbb, + 0x56, 0xe1, 0x86, 0x4a, 0xdc, 0x98, 0xaa, 0x72, 0x77, 0xd1, 0xcb, 0x50, 0x4d, 0x6e, 0xbb, 0x66, + 0x5d, 0x52, 0x79, 0x69, 0xaa, 0x2a, 0x99, 0x42, 0xb8, 0x9c, 0x36, 0x3a, 0x80, 0x9d, 0x5c, 0x8a, + 0x19, 0x95, 0xfe, 0x01, 0x6f, 0xa1, 0x6f, 0x20, 0x6a, 0x44, 0x55, 0x2d, 0xa6, 0xb9, 0xb5, 0x69, + 0x2b, 0x42, 0xcb, 0xef, 0xac, 0xa4, 0x82, 0x7d, 0xff, 0xf4, 0x79, 0x49, 0xb9, 0x66, 0x53, 0x46, + 0x25, 0x4a, 0xe1, 0x1d, 0xbf, 0x3a, 0x54, 0xe2, 0xa2, 0x62, 0x94, 0x6b, 0xcc, 0x4a, 0xf7, 0xf2, + 0x76, 0xb6, 0x3f, 0x77, 0x9d, 0x3a, 0xcf, 0x79, 0x89, 0x1e, 0xc2, 0x48, 0xd9, 0xeb, 0xe2, 0x05, + 0xc5, 0xed, 0xf4, 0x7d, 0x65, 0xc3, 0x0e, 0x7e, 0xea, 0xd0, 0xe4, 0x67, 0x08, 0x4f, 0x6f, 0x68, + 0x31, 0xeb, 0x3e, 0x1d, 0xe7, 0x00, 0xa5, 0x69, 0x2a, 0x56, 0x10, 0x4d, 0x55, 0x1c, 0x1c, 0x6e, + 0x1f, 0x0d, 0x8e, 0x3f, 0x5b, 0x37, 0xc6, 0xd7, 0xf2, 0xcc, 0x96, 0x82, 0x93, 0xbf, 0x03, 0x88, + 0xbc, 0x76, 0x3b, 0x2a, 0xf4, 0x31, 0x44, 0xb9, 0x29, 0x66, 0x54, 0xe3, 0x17, 0xa4, 0x32, 0x5e, + 0x7f, 0x3b, 0x0b, 0x5b, 0xf0, 0xb9, 0xc3, 0xd0, 0x63, 0x18, 0x4b, 0xc3, 0xb9, 0xfd, 0x7c, 0xfc, + 0xfb, 0x16, 0x8e, 0x7c, 0xc8, 0xfc, 0x22, 0xee, 0xc3, 0x80, 0x29, 0x3c, 0x65, 0xdc, 0xf6, 0xa5, + 0x74, 0x5f, 0xb4, 0xbd, 0x0c, 0x98, 0xfa, 0xde, 0x23, 0xe8, 0x39, 0xec, 0x4b, 0x5a, 0x50, 0xf6, + 0x82, 0x96, 0x8b, 0x8b, 0xb9, 0xfb, 0x5f, 0xeb, 0x1d, 0x77, 0x1a, 0xdd, 0xb6, 0x24, 0x63, 0x18, + 0x7e, 0x77, 0x4b, 0x0b, 0xa3, 0xa9, 0x6f, 0x69, 0xf2, 0x7b, 0x00, 0xa3, 0x39, 0xe4, 0x3b, 0xf1, + 0x3e, 0xf4, 0x2b, 0xa2, 0x29, 0x2f, 0xd8, 0xbc, 0x0b, 0x0b, 0xe0, 0xcd, 0xb9, 0x6d, 0xfd, 0xef, + 0xdc, 0x8e, 0xff, 0x08, 0x60, 0xef, 0x07, 0x1f, 0x80, 0xae, 0xa0, 0xe7, 0x0e, 0x09, 0xad, 0xbd, + 0xd2, 0xe5, 0x7f, 0xca, 0xe4, 0xc1, 0x06, 0x96, 0x2f, 0xec, 0x0a, 0x7a, 0x6e, 0xe6, 0xeb, 0x55, + 0x97, 0xd7, 0x6d, 0xbd, 0xea, 0xca, 0xe2, 0x1c, 0xff, 0x19, 0xc0, 0xb0, 0x4b, 0xfc, 0x27, 0x21, + 0xed, 0x99, 0xbd, 0x9d, 0xf4, 0x7f, 0x81, 0x5d, 0x3f, 0x2a, 0xf4, 0xe9, 0xba, 0x88, 0xd5, 0xf1, + 0x4e, 0x1e, 0x6e, 0xe4, 0xb5, 0xda, 0x27, 0x29, 0x7c, 0x50, 0x88, 0xfa, 0x15, 0xf6, 0xb4, 0x62, + 0x45, 0x5a, 0x88, 0xba, 0x16, 0xfc, 0x24, 0xea, 0x4a, 0xbc, 0x74, 0xfb, 0xbd, 0xe3, 0xd6, 0xfc, + 0xcb, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc4, 0xfc, 0xdc, 0x27, 0x48, 0x08, 0x00, 0x00, +} diff --git a/vendor/cloud.google.com/go/pubsub/message.go b/vendor/cloud.google.com/go/pubsub/message.go new file mode 100644 index 0000000..f6bb5e0 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/message.go @@ -0,0 +1,97 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "time" + + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +// Message represents a Pub/Sub message. +type Message struct { + // ID identifies this message. + // This ID is assigned by the server and is populated for Messages obtained from a subscription. + // This field is read-only. + ID string + + // Data is the actual data in the message. + Data []byte + + // Attributes represents the key-value pairs the current message + // is labelled with. + Attributes map[string]string + + // ackID is the identifier to acknowledge this message. + ackID string + + // The time at which the message was published. + // This is populated by the server for Messages obtained from a subscription. + // This field is read-only. + PublishTime time.Time + + // size is the approximate size of the message's data and attributes. + size int + + calledDone bool + + // The done method of the iterator that created this Message. + doneFunc func(string, bool) +} + +func toMessage(resp *pb.ReceivedMessage) (*Message, error) { + if resp.Message == nil { + return &Message{ackID: resp.AckId}, nil + } + + pubTime, err := ptypes.Timestamp(resp.Message.PublishTime) + if err != nil { + return nil, err + } + return &Message{ + ackID: resp.AckId, + Data: resp.Message.Data, + Attributes: resp.Message.Attributes, + ID: resp.Message.MessageId, + PublishTime: pubTime, + }, nil +} + +// Ack indicates successful processing of a Message passed to the Subscriber.Receive callback. +// It should not be called on any other Message value. +// If message acknowledgement fails, the Message will be redelivered. +// Client code must call Ack or Nack when finished for each received Message. +// Calls to Ack or Nack have no effect after the first call. +func (m *Message) Ack() { + m.done(true) +} + +// Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback. +// It should not be called on any other Message value. +// Nack will result in the Message being redelivered more quickly than if it were allowed to expire. +// Client code must call Ack or Nack when finished for each received Message. +// Calls to Ack or Nack have no effect after the first call. +func (m *Message) Nack() { + m.done(false) +} + +func (m *Message) done(ack bool) { + if m.calledDone { + return + } + m.calledDone = true + m.doneFunc(m.ackID, ack) +} diff --git a/vendor/cloud.google.com/go/pubsub/pubsub.go b/vendor/cloud.google.com/go/pubsub/pubsub.go new file mode 100644 index 0000000..ffccf66 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pubsub.go @@ -0,0 +1,151 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub // import "cloud.google.com/go/pubsub" + +import ( + "fmt" + "os" + "runtime" + "time" + + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + + "golang.org/x/net/context" +) + +const ( + // ScopePubSub grants permissions to view and manage Pub/Sub + // topics and subscriptions. + ScopePubSub = "https://www.googleapis.com/auth/pubsub" + + // ScopeCloudPlatform grants permissions to view and manage your data + // across Google Cloud Platform services. + ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" +) + +const prodAddr = "https://pubsub.googleapis.com/" + +// Client is a Google Pub/Sub client scoped to a single project. +// +// Clients should be reused rather than being created as needed. +// A Client may be shared by multiple goroutines. +type Client struct { + projectID string + s service +} + +// NewClient creates a new PubSub client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + var o []option.ClientOption + // Environment variables for gcloud emulator: + // https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ + if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("grpc.Dial: %v", err) + } + o = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + o = []option.ClientOption{ + // Create multiple connections to increase throughput. + option.WithGRPCConnectionPool(runtime.GOMAXPROCS(0)), + + // TODO(grpc/grpc-go#1388) using connection pool without WithBlock + // can cause RPCs to fail randomly. We can delete this after the issue is fixed. + option.WithGRPCDialOption(grpc.WithBlock()), + + option.WithGRPCDialOption(grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 5 * time.Minute, + })), + } + } + o = append(o, opts...) + s, err := newPubSubService(ctx, o) + if err != nil { + return nil, fmt.Errorf("constructing pubsub client: %v", err) + } + + c := &Client{ + projectID: projectID, + s: s, + } + + return c, nil +} + +// Close releases any resources held by the client, +// such as memory and goroutines. +// +// If the client is available for the lifetime of the program, then Close need not be +// called at exit. +func (c *Client) Close() error { + return c.s.close() +} + +func (c *Client) fullyQualifiedProjectName() string { + return fmt.Sprintf("projects/%s", c.projectID) +} + +// pageToken stores the next page token for a server response which is split over multiple pages. +type pageToken struct { + tok string + explicit bool +} + +func (pt *pageToken) set(tok string) { + pt.tok = tok + pt.explicit = true +} + +func (pt *pageToken) get() string { + return pt.tok +} + +// more returns whether further pages should be fetched from the server. +func (pt *pageToken) more() bool { + return pt.tok != "" || !pt.explicit +} + +// stringsIterator provides an iterator API for a sequence of API page fetches that return lists of strings. +type stringsIterator struct { + ctx context.Context + strings []string + token pageToken + fetch func(ctx context.Context, tok string) (*stringsPage, error) +} + +// Next returns the next string. If there are no more strings, iterator.Done will be returned. +func (si *stringsIterator) Next() (string, error) { + for len(si.strings) == 0 && si.token.more() { + page, err := si.fetch(si.ctx, si.token.get()) + if err != nil { + return "", err + } + si.token.set(page.tok) + si.strings = page.strings + } + + if len(si.strings) == 0 { + return "", iterator.Done + } + + s := si.strings[0] + si.strings = si.strings[1:] + + return s, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/service.go b/vendor/cloud.google.com/go/pubsub/service.go new file mode 100644 index 0000000..4d57d1b --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/service.go @@ -0,0 +1,598 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "math" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/ptypes" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/version" + vkit "cloud.google.com/go/pubsub/apiv1" + durpb "github.com/golang/protobuf/ptypes/duration" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type nextStringFunc func() (string, error) +type nextSnapshotFunc func() (*snapshotConfig, error) + +// service provides an internal abstraction to isolate the generated +// PubSub API; most of this package uses this interface instead. +// The single implementation, *apiService, contains all the knowledge +// of the generated PubSub API (except for that present in legacy code). +type service interface { + createSubscription(ctx context.Context, subName string, cfg SubscriptionConfig) error + getSubscriptionConfig(ctx context.Context, subName string) (SubscriptionConfig, string, error) + listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc + deleteSubscription(ctx context.Context, name string) error + subscriptionExists(ctx context.Context, name string) (bool, error) + modifyPushConfig(ctx context.Context, subName string, conf PushConfig) error + + createTopic(ctx context.Context, name string) error + deleteTopic(ctx context.Context, name string) error + topicExists(ctx context.Context, name string) (bool, error) + listProjectTopics(ctx context.Context, projName string) nextStringFunc + listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc + + modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error + fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) + publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error) + + // splitAckIDs divides ackIDs into + // * a batch of a size which is suitable for passing to acknowledge or + // modifyAckDeadline, and + // * the rest. + splitAckIDs(ackIDs []string) ([]string, []string) + + // acknowledge ACKs the IDs in ackIDs. + acknowledge(ctx context.Context, subName string, ackIDs []string) error + + iamHandle(resourceName string) *iam.Handle + + newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller + + createSnapshot(ctx context.Context, snapName, subName string) (*snapshotConfig, error) + deleteSnapshot(ctx context.Context, snapName string) error + listProjectSnapshots(ctx context.Context, projName string) nextSnapshotFunc + + // TODO(pongad): Raw proto returns an empty SeekResponse; figure out if we want to return it before GA. + seekToTime(ctx context.Context, subName string, t time.Time) error + seekToSnapshot(ctx context.Context, subName, snapName string) error + + close() error +} + +type apiService struct { + pubc *vkit.PublisherClient + subc *vkit.SubscriberClient +} + +func newPubSubService(ctx context.Context, opts []option.ClientOption) (*apiService, error) { + pubc, err := vkit.NewPublisherClient(ctx, opts...) + if err != nil { + return nil, err + } + subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection())) + if err != nil { + _ = pubc.Close() // ignore error + return nil, err + } + pubc.SetGoogleClientInfo("gccl", version.Repo) + subc.SetGoogleClientInfo("gccl", version.Repo) + return &apiService{pubc: pubc, subc: subc}, nil +} + +func (s *apiService) close() error { + // Return the first error, because the first call closes the connection. + err := s.pubc.Close() + _ = s.subc.Close() + return err +} + +func (s *apiService) createSubscription(ctx context.Context, subName string, cfg SubscriptionConfig) error { + var rawPushConfig *pb.PushConfig + if cfg.PushConfig.Endpoint != "" || len(cfg.PushConfig.Attributes) != 0 { + rawPushConfig = &pb.PushConfig{ + Attributes: cfg.PushConfig.Attributes, + PushEndpoint: cfg.PushConfig.Endpoint, + } + } + var retentionDuration *durpb.Duration + if cfg.retentionDuration != 0 { + retentionDuration = ptypes.DurationProto(cfg.retentionDuration) + } + + _, err := s.subc.CreateSubscription(ctx, &pb.Subscription{ + Name: subName, + Topic: cfg.Topic.name, + PushConfig: rawPushConfig, + AckDeadlineSeconds: trunc32(int64(cfg.AckDeadline.Seconds())), + RetainAckedMessages: cfg.retainAckedMessages, + MessageRetentionDuration: retentionDuration, + }) + return err +} + +func (s *apiService) getSubscriptionConfig(ctx context.Context, subName string) (SubscriptionConfig, string, error) { + rawSub, err := s.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: subName}) + if err != nil { + return SubscriptionConfig{}, "", err + } + var rd time.Duration + // TODO(pongad): Remove nil-check after white list is removed. + if rawSub.MessageRetentionDuration != nil { + if rd, err = ptypes.Duration(rawSub.MessageRetentionDuration); err != nil { + return SubscriptionConfig{}, "", err + } + } + sub := SubscriptionConfig{ + AckDeadline: time.Second * time.Duration(rawSub.AckDeadlineSeconds), + PushConfig: PushConfig{ + Endpoint: rawSub.PushConfig.PushEndpoint, + Attributes: rawSub.PushConfig.Attributes, + }, + retainAckedMessages: rawSub.RetainAckedMessages, + retentionDuration: rd, + } + return sub, rawSub.Topic, nil +} + +// stringsPage contains a list of strings and a token for fetching the next page. +type stringsPage struct { + strings []string + tok string +} + +func (s *apiService) listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc { + it := s.subc.ListSubscriptions(ctx, &pb.ListSubscriptionsRequest{ + Project: projName, + }) + return func() (string, error) { + sub, err := it.Next() + if err != nil { + return "", err + } + return sub.Name, nil + } +} + +func (s *apiService) deleteSubscription(ctx context.Context, name string) error { + return s.subc.DeleteSubscription(ctx, &pb.DeleteSubscriptionRequest{Subscription: name}) +} + +func (s *apiService) subscriptionExists(ctx context.Context, name string) (bool, error) { + _, err := s.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: name}) + if err == nil { + return true, nil + } + if grpc.Code(err) == codes.NotFound { + return false, nil + } + return false, err +} + +func (s *apiService) createTopic(ctx context.Context, name string) error { + _, err := s.pubc.CreateTopic(ctx, &pb.Topic{Name: name}) + return err +} + +func (s *apiService) listProjectTopics(ctx context.Context, projName string) nextStringFunc { + it := s.pubc.ListTopics(ctx, &pb.ListTopicsRequest{ + Project: projName, + }) + return func() (string, error) { + topic, err := it.Next() + if err != nil { + return "", err + } + return topic.Name, nil + } +} + +func (s *apiService) deleteTopic(ctx context.Context, name string) error { + return s.pubc.DeleteTopic(ctx, &pb.DeleteTopicRequest{Topic: name}) +} + +func (s *apiService) topicExists(ctx context.Context, name string) (bool, error) { + _, err := s.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: name}) + if err == nil { + return true, nil + } + if grpc.Code(err) == codes.NotFound { + return false, nil + } + return false, err +} + +func (s *apiService) listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc { + it := s.pubc.ListTopicSubscriptions(ctx, &pb.ListTopicSubscriptionsRequest{ + Topic: topicName, + }) + return it.Next +} + +func (s *apiService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { + return s.subc.ModifyAckDeadline(ctx, &pb.ModifyAckDeadlineRequest{ + Subscription: subName, + AckIds: ackIDs, + AckDeadlineSeconds: trunc32(int64(deadline.Seconds())), + }) +} + +// maxPayload is the maximum number of bytes to devote to actual ids in +// acknowledgement or modifyAckDeadline requests. A serialized +// AcknowledgeRequest proto has a small constant overhead, plus the size of the +// subscription name, plus 3 bytes per ID (a tag byte and two size bytes). A +// ModifyAckDeadlineRequest has an additional few bytes for the deadline. We +// don't know the subscription name here, so we just assume the size exclusive +// of ids is 100 bytes. +// +// With gRPC there is no way for the client to know the server's max message size (it is +// configurable on the server). We know from experience that it +// it 512K. +const ( + maxPayload = 512 * 1024 + reqFixedOverhead = 100 + overheadPerID = 3 + maxSendRecvBytes = 20 * 1024 * 1024 // 20M +) + +// splitAckIDs splits ids into two slices, the first of which contains at most maxPayload bytes of ackID data. +func (s *apiService) splitAckIDs(ids []string) ([]string, []string) { + total := reqFixedOverhead + for i, id := range ids { + total += len(id) + overheadPerID + if total > maxPayload { + return ids[:i], ids[i:] + } + } + return ids, nil +} + +func (s *apiService) acknowledge(ctx context.Context, subName string, ackIDs []string) error { + return s.subc.Acknowledge(ctx, &pb.AcknowledgeRequest{ + Subscription: subName, + AckIds: ackIDs, + }) +} + +func (s *apiService) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) { + resp, err := s.subc.Pull(ctx, &pb.PullRequest{ + Subscription: subName, + MaxMessages: maxMessages, + }, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) + if err != nil { + return nil, err + } + return convertMessages(resp.ReceivedMessages) +} + +func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) { + msgs := make([]*Message, 0, len(rms)) + for i, m := range rms { + msg, err := toMessage(m) + if err != nil { + return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) + } + msgs = append(msgs, msg) + } + return msgs, nil +} + +func (s *apiService) publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error) { + rawMsgs := make([]*pb.PubsubMessage, len(msgs)) + for i, msg := range msgs { + rawMsgs[i] = &pb.PubsubMessage{ + Data: msg.Data, + Attributes: msg.Attributes, + } + } + resp, err := s.pubc.Publish(ctx, &pb.PublishRequest{ + Topic: topicName, + Messages: rawMsgs, + }, gax.WithGRPCOptions(grpc.MaxCallSendMsgSize(maxSendRecvBytes))) + if err != nil { + return nil, err + } + return resp.MessageIds, nil +} + +func (s *apiService) modifyPushConfig(ctx context.Context, subName string, conf PushConfig) error { + return s.subc.ModifyPushConfig(ctx, &pb.ModifyPushConfigRequest{ + Subscription: subName, + PushConfig: &pb.PushConfig{ + Attributes: conf.Attributes, + PushEndpoint: conf.Endpoint, + }, + }) +} + +func (s *apiService) iamHandle(resourceName string) *iam.Handle { + return iam.InternalNewHandle(s.pubc.Connection(), resourceName) +} + +func trunc32(i int64) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +func (s *apiService) newStreamingPuller(ctx context.Context, subName string, ackDeadlineSecs int32) *streamingPuller { + p := &streamingPuller{ + ctx: ctx, + subName: subName, + ackDeadlineSecs: ackDeadlineSecs, + subc: s.subc, + } + p.c = sync.NewCond(&p.mu) + return p +} + +type streamingPuller struct { + ctx context.Context + subName string + ackDeadlineSecs int32 + subc *vkit.SubscriberClient + + mu sync.Mutex + c *sync.Cond + inFlight bool + closed bool // set after CloseSend called + spc pb.Subscriber_StreamingPullClient + err error +} + +// open establishes (or re-establishes) a stream for pulling messages. +// It takes care that only one RPC is in flight at a time. +func (p *streamingPuller) open() error { + p.c.L.Lock() + defer p.c.L.Unlock() + p.openLocked() + return p.err +} + +func (p *streamingPuller) openLocked() { + if p.inFlight { + // Another goroutine is opening; wait for it. + for p.inFlight { + p.c.Wait() + } + return + } + // No opens in flight; start one. + // Keep the lock held, to avoid a race where we + // close the old stream while opening a new one. + p.inFlight = true + spc, err := p.subc.StreamingPull(p.ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) + if err == nil { + err = spc.Send(&pb.StreamingPullRequest{ + Subscription: p.subName, + StreamAckDeadlineSeconds: p.ackDeadlineSecs, + }) + } + p.spc = spc + p.err = err + p.inFlight = false + p.c.Broadcast() +} + +func (p *streamingPuller) call(f func(pb.Subscriber_StreamingPullClient) error) error { + p.c.L.Lock() + defer p.c.L.Unlock() + // Wait for an open in flight. + for p.inFlight { + p.c.Wait() + } + var err error + var bo gax.Backoff + for { + select { + case <-p.ctx.Done(): + p.err = p.ctx.Err() + default: + } + if p.err != nil { + return p.err + } + spc := p.spc + // Do not call f with the lock held. Only one goroutine calls Send + // (streamingMessageIterator.sender) and only one calls Recv + // (streamingMessageIterator.receiver). If we locked, then a + // blocked Recv would prevent a Send from happening. + p.c.L.Unlock() + err = f(spc) + p.c.L.Lock() + if !p.closed && err != nil && isRetryable(err) { + // Sleep with exponential backoff. Normally we wouldn't hold the lock while sleeping, + // but here it can't do any harm, since the stream is broken anyway. + gax.Sleep(p.ctx, bo.Pause()) + p.openLocked() + continue + } + // Not an error, or not a retryable error; stop retrying. + p.err = err + return err + } +} + +// Logic from https://github.com/GoogleCloudPlatform/google-cloud-java/blob/master/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java. +func isRetryable(err error) bool { + s, ok := status.FromError(err) + if !ok { // includes io.EOF, normal stream close, which causes us to reopen + return true + } + switch s.Code() { + case codes.DeadlineExceeded, codes.Internal, codes.Canceled, codes.ResourceExhausted: + return true + case codes.Unavailable: + return !strings.Contains(s.Message(), "Server shutdownNow invoked") + default: + return false + } +} + +func (p *streamingPuller) fetchMessages() ([]*Message, error) { + var res *pb.StreamingPullResponse + err := p.call(func(spc pb.Subscriber_StreamingPullClient) error { + var err error + res, err = spc.Recv() + return err + }) + if err != nil { + return nil, err + } + return convertMessages(res.ReceivedMessages) +} + +func (p *streamingPuller) send(req *pb.StreamingPullRequest) error { + // Note: len(modAckIDs) == len(modSecs) + var rest *pb.StreamingPullRequest + for len(req.AckIds) > 0 || len(req.ModifyDeadlineAckIds) > 0 { + req, rest = splitRequest(req, maxPayload) + err := p.call(func(spc pb.Subscriber_StreamingPullClient) error { + x := spc.Send(req) + return x + }) + if err != nil { + return err + } + req = rest + } + return nil +} + +func (p *streamingPuller) closeSend() { + p.mu.Lock() + p.closed = true + p.spc.CloseSend() + p.mu.Unlock() +} + +// Split req into a prefix that is smaller than maxSize, and a remainder. +func splitRequest(req *pb.StreamingPullRequest, maxSize int) (prefix, remainder *pb.StreamingPullRequest) { + const int32Bytes = 4 + + // Copy all fields before splitting the variable-sized ones. + remainder = &pb.StreamingPullRequest{} + *remainder = *req + // Split message so it isn't too big. + size := reqFixedOverhead + i := 0 + for size < maxSize && (i < len(req.AckIds) || i < len(req.ModifyDeadlineAckIds)) { + if i < len(req.AckIds) { + size += overheadPerID + len(req.AckIds[i]) + } + if i < len(req.ModifyDeadlineAckIds) { + size += overheadPerID + len(req.ModifyDeadlineAckIds[i]) + int32Bytes + } + i++ + } + + min := func(a, b int) int { + if a < b { + return a + } + return b + } + + j := i + if size > maxSize { + j-- + } + k := min(j, len(req.AckIds)) + remainder.AckIds = req.AckIds[k:] + req.AckIds = req.AckIds[:k] + k = min(j, len(req.ModifyDeadlineAckIds)) + remainder.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[k:] + remainder.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[k:] + req.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[:k] + req.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[:k] + return req, remainder +} + +func (s *apiService) createSnapshot(ctx context.Context, snapName, subName string) (*snapshotConfig, error) { + snap, err := s.subc.CreateSnapshot(ctx, &pb.CreateSnapshotRequest{ + Name: snapName, + Subscription: subName, + }) + if err != nil { + return nil, err + } + return s.toSnapshotConfig(snap) +} + +func (s *apiService) deleteSnapshot(ctx context.Context, snapName string) error { + return s.subc.DeleteSnapshot(ctx, &pb.DeleteSnapshotRequest{Snapshot: snapName}) +} + +func (s *apiService) listProjectSnapshots(ctx context.Context, projName string) nextSnapshotFunc { + it := s.subc.ListSnapshots(ctx, &pb.ListSnapshotsRequest{ + Project: projName, + }) + return func() (*snapshotConfig, error) { + snap, err := it.Next() + if err != nil { + return nil, err + } + return s.toSnapshotConfig(snap) + } +} + +func (s *apiService) toSnapshotConfig(snap *pb.Snapshot) (*snapshotConfig, error) { + exp, err := ptypes.Timestamp(snap.ExpireTime) + if err != nil { + return nil, err + } + return &snapshotConfig{ + snapshot: &snapshot{ + s: s, + name: snap.Name, + }, + Topic: newTopic(s, snap.Topic), + Expiration: exp, + }, nil +} + +func (s *apiService) seekToTime(ctx context.Context, subName string, t time.Time) error { + ts, err := ptypes.TimestampProto(t) + if err != nil { + return err + } + _, err = s.subc.Seek(ctx, &pb.SeekRequest{ + Subscription: subName, + Target: &pb.SeekRequest_Time{ts}, + }) + return err +} + +func (s *apiService) seekToSnapshot(ctx context.Context, subName, snapName string) error { + _, err := s.subc.Seek(ctx, &pb.SeekRequest{ + Subscription: subName, + Target: &pb.SeekRequest_Snapshot{snapName}, + }) + return err +} diff --git a/vendor/cloud.google.com/go/pubsub/service_test.go b/vendor/cloud.google.com/go/pubsub/service_test.go new file mode 100644 index 0000000..1139d0f --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/service_test.go @@ -0,0 +1,69 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func TestSplitRequest(t *testing.T) { + split := func(a []string, i int) ([]string, []string) { + if len(a) < i { + return a, nil + } + return a[:i], a[i:] + } + ackIDs := []string{"aaaa", "bbbb", "cccc", "dddd", "eeee"} + modDeadlines := []int32{1, 2, 3, 4, 5} + for i, test := range []struct { + ackIDs []string + modAckIDs []string + splitIndex int + }{ + {ackIDs, ackIDs, 2}, + {nil, ackIDs, 3}, + {ackIDs, nil, 5}, + {nil, ackIDs[:1], 1}, + } { + req := &pb.StreamingPullRequest{ + AckIds: test.ackIDs, + ModifyDeadlineAckIds: test.modAckIDs, + ModifyDeadlineSeconds: modDeadlines[:len(test.modAckIDs)], + } + a1, a2 := split(test.ackIDs, test.splitIndex) + m1, m2 := split(test.modAckIDs, test.splitIndex) + want1 := &pb.StreamingPullRequest{ + AckIds: a1, + ModifyDeadlineAckIds: m1, + ModifyDeadlineSeconds: modDeadlines[:len(m1)], + } + want2 := &pb.StreamingPullRequest{ + AckIds: a2, + ModifyDeadlineAckIds: m2, + ModifyDeadlineSeconds: modDeadlines[len(m1) : len(m1)+len(m2)], + } + got1, got2 := splitRequest(req, reqFixedOverhead+40) + if !testutil.Equal(got1, want1) { + t.Errorf("#%d: first:\ngot %+v\nwant %+v", i, got1, want1) + } + if !testutil.Equal(got2, want2) { + t.Errorf("#%d: second:\ngot %+v\nwant %+v", i, got2, want2) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/snapshot.go b/vendor/cloud.google.com/go/pubsub/snapshot.go new file mode 100644 index 0000000..77115b5 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/snapshot.go @@ -0,0 +1,119 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "strings" + "time" + + vkit "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" +) + +// Snapshot is a reference to a PubSub snapshot. +type snapshot struct { + s service + + // The fully qualified identifier for the snapshot, in the format "projects//snapshots/" + name string +} + +// ID returns the unique identifier of the snapshot within its project. +func (s *snapshot) ID() string { + slash := strings.LastIndex(s.name, "/") + if slash == -1 { + // name is not a fully-qualified name. + panic("bad snapshot name") + } + return s.name[slash+1:] +} + +// SnapshotConfig contains the details of a Snapshot. +type snapshotConfig struct { + *snapshot + Topic *Topic + Expiration time.Time +} + +// Snapshot creates a reference to a snapshot. +func (c *Client) snapshot(id string) *snapshot { + return &snapshot{ + s: c.s, + name: vkit.SubscriberSnapshotPath(c.projectID, id), + } +} + +// Snapshots returns an iterator which returns snapshots for this project. +func (c *Client) snapshots(ctx context.Context) *snapshotConfigIterator { + return &snapshotConfigIterator{ + next: c.s.listProjectSnapshots(ctx, c.fullyQualifiedProjectName()), + } +} + +// SnapshotConfigIterator is an iterator that returns a series of snapshots. +type snapshotConfigIterator struct { + next nextSnapshotFunc +} + +// Next returns the next SnapshotConfig. Its second return value is iterator.Done if there are no more results. +// Once Next returns iterator.Done, all subsequent calls will return iterator.Done. +func (snaps *snapshotConfigIterator) Next() (*snapshotConfig, error) { + return snaps.next() +} + +// Delete deletes a snapshot. +func (snap *snapshot) delete(ctx context.Context) error { + return snap.s.deleteSnapshot(ctx, snap.name) +} + +// SeekTime seeks the subscription to a point in time. +// +// Messages retained in the subscription that were published before this +// time are marked as acknowledged, and messages retained in the +// subscription that were published after this time are marked as +// unacknowledged. Note that this operation affects only those messages +// retained in the subscription (configured by SnapshotConfig). For example, +// if `time` corresponds to a point before the message retention +// window (or to a point before the system's notion of the subscription +// creation time), only retained messages will be marked as unacknowledged, +// and already-expunged messages will not be restored. +func (s *Subscription) seekToTime(ctx context.Context, t time.Time) error { + return s.s.seekToTime(ctx, s.name, t) +} + +// Snapshot creates a new snapshot from this subscription. +// The snapshot will be for the topic this subscription is subscribed to. +// If the name is empty string, a unique name is assigned. +// +// The created snapshot is guaranteed to retain: +// (a) The existing backlog on the subscription. More precisely, this is +// defined as the messages in the subscription's backlog that are +// unacknowledged when Snapshot returns without error. +// (b) Any messages published to the subscription's topic following +// Snapshot returning without error. +func (s *Subscription) createSnapshot(ctx context.Context, name string) (*snapshotConfig, error) { + if name != "" { + name = vkit.SubscriberSnapshotPath(strings.Split(s.name, "/")[1], name) + } + return s.s.createSnapshot(ctx, name, s.name) +} + +// SeekSnapshot seeks the subscription to a snapshot. +// +// The snapshot needs not be created from this subscription, +// but the snapshot must be for the topic this subscription is subscribed to. +func (s *Subscription) seekToSnapshot(ctx context.Context, snap *snapshot) error { + return s.s.seekToSnapshot(ctx, s.name, snap.name) +} diff --git a/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go b/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go new file mode 100644 index 0000000..5500809 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go @@ -0,0 +1,314 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +// TODO(jba): test keepalive +// TODO(jba): test that expired messages are not kept alive +// TODO(jba): test that when all messages expire, Stop returns. + +import ( + "io" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + tspb "github.com/golang/protobuf/ptypes/timestamp" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "golang.org/x/net/context" + "google.golang.org/api/option" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + timestamp = &tspb.Timestamp{} + testMessages = []*pb.ReceivedMessage{ + {AckId: "0", Message: &pb.PubsubMessage{Data: []byte{1}, PublishTime: timestamp}}, + {AckId: "1", Message: &pb.PubsubMessage{Data: []byte{2}, PublishTime: timestamp}}, + {AckId: "2", Message: &pb.PubsubMessage{Data: []byte{3}, PublishTime: timestamp}}, + } +) + +func TestStreamingPullBasic(t *testing.T) { + client, server := newFake(t) + server.addStreamingPullMessages(testMessages) + testStreamingPullIteration(t, client, server, testMessages) +} + +func TestStreamingPullMultipleFetches(t *testing.T) { + client, server := newFake(t) + server.addStreamingPullMessages(testMessages[:1]) + server.addStreamingPullMessages(testMessages[1:]) + testStreamingPullIteration(t, client, server, testMessages) +} + +func testStreamingPullIteration(t *testing.T, client *Client, server *fakeServer, msgs []*pb.ReceivedMessage) { + sub := client.Subscription("s") + gotMsgs, err := pullN(context.Background(), sub, len(msgs), func(_ context.Context, m *Message) { + id, err := strconv.Atoi(m.ackID) + if err != nil { + panic(err) + } + // ack evens, nack odds + if id%2 == 0 { + m.Ack() + } else { + m.Nack() + } + }) + if err != nil { + t.Fatalf("Pull: %v", err) + } + gotMap := map[string]*Message{} + for _, m := range gotMsgs { + gotMap[m.ackID] = m + } + for i, msg := range msgs { + want, err := toMessage(msg) + if err != nil { + t.Fatal(err) + } + want.calledDone = true + got := gotMap[want.ackID] + if got == nil { + t.Errorf("%d: no message for ackID %q", i, want.ackID) + continue + } + if !testutil.Equal(got, want, cmp.AllowUnexported(Message{}), cmpopts.IgnoreTypes(func(string, bool) {})) { + t.Errorf("%d: got\n%#v\nwant\n%#v", i, got, want) + } + } + server.wait() + for i := 0; i < len(msgs); i++ { + id := msgs[i].AckId + if i%2 == 0 { + if !server.Acked[id] { + t.Errorf("msg %q should have been acked but wasn't", id) + } + } else { + if dl, ok := server.Deadlines[id]; !ok || dl != 0 { + t.Errorf("msg %q should have been nacked but wasn't", id) + } + } + } +} + +func TestStreamingPullError(t *testing.T) { + // If an RPC to the service returns a non-retryable error, Pull should + // return after all callbacks return, without waiting for messages to be + // acked. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages[:1]) + server.addStreamingPullError(grpc.Errorf(codes.Unknown, "")) + sub := client.Subscription("s") + // Use only one goroutine, since the fake server is configured to + // return only one error. + sub.ReceiveSettings.NumGoroutines = 1 + callbackDone := make(chan struct{}) + ctx, _ := context.WithTimeout(context.Background(), time.Second) + err := sub.Receive(ctx, func(ctx context.Context, m *Message) { + defer close(callbackDone) + select { + case <-ctx.Done(): + return + } + }) + select { + case <-callbackDone: + default: + t.Fatal("Receive returned but callback was not done") + } + if want := codes.Unknown; grpc.Code(err) != want { + t.Fatalf("got <%v>, want code %v", err, want) + } +} + +func TestStreamingPullCancel(t *testing.T) { + // If Receive's context is canceled, it should return after all callbacks + // return and all messages have been acked. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages) + sub := client.Subscription("s") + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + var n int32 + err := sub.Receive(ctx, func(ctx2 context.Context, m *Message) { + atomic.AddInt32(&n, 1) + defer atomic.AddInt32(&n, -1) + cancel() + m.Ack() + }) + if got := atomic.LoadInt32(&n); got != 0 { + t.Errorf("Receive returned with %d callbacks still running", got) + } + if err != nil { + t.Fatalf("Receive got <%v>, want nil", err) + } +} + +func TestStreamingPullRetry(t *testing.T) { + // Check that we retry on io.EOF or Unavailable. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages[:1]) + server.addStreamingPullError(io.EOF) + server.addStreamingPullError(io.EOF) + server.addStreamingPullMessages(testMessages[1:2]) + server.addStreamingPullError(grpc.Errorf(codes.Unavailable, "")) + server.addStreamingPullError(grpc.Errorf(codes.Unavailable, "")) + server.addStreamingPullMessages(testMessages[2:]) + + testStreamingPullIteration(t, client, server, testMessages) +} + +func TestStreamingPullOneActive(t *testing.T) { + // Only one call to Pull can be active at a time. + client, srv := newFake(t) + srv.addStreamingPullMessages(testMessages[:1]) + sub := client.Subscription("s") + ctx, cancel := context.WithCancel(context.Background()) + err := sub.Receive(ctx, func(ctx context.Context, m *Message) { + m.Ack() + err := sub.Receive(ctx, func(context.Context, *Message) {}) + if err != errReceiveInProgress { + t.Errorf("got <%v>, want <%v>", err, errReceiveInProgress) + } + cancel() + }) + if err != nil { + t.Fatalf("got <%v>, want nil", err) + } +} + +func TestStreamingPullConcurrent(t *testing.T) { + newMsg := func(i int) *pb.ReceivedMessage { + return &pb.ReceivedMessage{ + AckId: strconv.Itoa(i), + Message: &pb.PubsubMessage{Data: []byte{byte(i)}, PublishTime: timestamp}, + } + } + + // Multiple goroutines should be able to read from the same iterator. + client, server := newFake(t) + // Add a lot of messages, a few at a time, to make sure both threads get a chance. + nMessages := 100 + for i := 0; i < nMessages; i += 2 { + server.addStreamingPullMessages([]*pb.ReceivedMessage{newMsg(i), newMsg(i + 1)}) + } + sub := client.Subscription("s") + ctx, _ := context.WithTimeout(context.Background(), time.Second) + gotMsgs, err := pullN(ctx, sub, nMessages, func(ctx context.Context, m *Message) { + m.Ack() + }) + if err != nil { + t.Fatalf("Receive: %v", err) + } + seen := map[string]bool{} + for _, gm := range gotMsgs { + if seen[gm.ackID] { + t.Fatalf("duplicate ID %q", gm.ackID) + } + seen[gm.ackID] = true + } + if len(seen) != nMessages { + t.Fatalf("got %d messages, want %d", len(seen), nMessages) + } +} + +func TestStreamingPullFlowControl(t *testing.T) { + // Callback invocations should not occur if flow control limits are exceeded. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages) + sub := client.Subscription("s") + sub.ReceiveSettings.MaxOutstandingMessages = 2 + ctx, cancel := context.WithCancel(context.Background()) + activec := make(chan int) + waitc := make(chan int) + errc := make(chan error) + go func() { + errc <- sub.Receive(ctx, func(_ context.Context, m *Message) { + activec <- 1 + <-waitc + m.Ack() + }) + }() + // Here, two callbacks are active. Receive should be blocked in the flow + // control acquire method on the third message. + <-activec + <-activec + select { + case <-activec: + t.Fatal("third callback in progress") + case <-time.After(100 * time.Millisecond): + } + cancel() + // Receive still has not returned, because both callbacks are still blocked on waitc. + select { + case err := <-errc: + t.Fatalf("Receive returned early with error %v", err) + case <-time.After(100 * time.Millisecond): + } + // Let both callbacks proceed. + waitc <- 1 + waitc <- 1 + // The third callback will never run, because acquire returned a non-nil + // error, causing Receive to return. So now Receive should end. + if err := <-errc; err != nil { + t.Fatalf("got %v from Receive, want nil", err) + } +} + +func newFake(t *testing.T) (*Client, *fakeServer) { + srv, err := newFakeServer() + if err != nil { + t.Fatal(err) + } + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + client, err := NewClient(context.Background(), "projectID", option.WithGRPCConn(conn)) + if err != nil { + t.Fatal(err) + } + return client, srv +} + +// pullN calls sub.Receive until at least n messages are received. +func pullN(ctx context.Context, sub *Subscription, n int, f func(context.Context, *Message)) ([]*Message, error) { + var ( + mu sync.Mutex + msgs []*Message + ) + cctx, cancel := context.WithCancel(ctx) + err := sub.Receive(cctx, func(ctx context.Context, m *Message) { + mu.Lock() + msgs = append(msgs, m) + nSeen := len(msgs) + mu.Unlock() + f(ctx, m) + if nSeen >= n { + cancel() + } + }) + if err != nil { + return nil, err + } + return msgs, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go new file mode 100644 index 0000000..2144125 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/subscription.go @@ -0,0 +1,405 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "fmt" + "io" + "strings" + "sync" + "time" + + "cloud.google.com/go/iam" + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Subscription is a reference to a PubSub subscription. +type Subscription struct { + s service + + // The fully qualified identifier for the subscription, in the format "projects//subscriptions/" + name string + + // Settings for pulling messages. Configure these before calling Receive. + ReceiveSettings ReceiveSettings + + mu sync.Mutex + receiveActive bool +} + +// Subscription creates a reference to a subscription. +func (c *Client) Subscription(id string) *Subscription { + return newSubscription(c.s, fmt.Sprintf("projects/%s/subscriptions/%s", c.projectID, id)) +} + +func newSubscription(s service, name string) *Subscription { + return &Subscription{ + s: s, + name: name, + } +} + +// String returns the globally unique printable name of the subscription. +func (s *Subscription) String() string { + return s.name +} + +// ID returns the unique identifier of the subscription within its project. +func (s *Subscription) ID() string { + slash := strings.LastIndex(s.name, "/") + if slash == -1 { + // name is not a fully-qualified name. + panic("bad subscription name") + } + return s.name[slash+1:] +} + +// Subscriptions returns an iterator which returns all of the subscriptions for the client's project. +func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator { + return &SubscriptionIterator{ + s: c.s, + next: c.s.listProjectSubscriptions(ctx, c.fullyQualifiedProjectName()), + } +} + +// SubscriptionIterator is an iterator that returns a series of subscriptions. +type SubscriptionIterator struct { + s service + next nextStringFunc +} + +// Next returns the next subscription. If there are no more subscriptions, iterator.Done will be returned. +func (subs *SubscriptionIterator) Next() (*Subscription, error) { + subName, err := subs.next() + if err != nil { + return nil, err + } + return newSubscription(subs.s, subName), nil +} + +// PushConfig contains configuration for subscriptions that operate in push mode. +type PushConfig struct { + // A URL locating the endpoint to which messages should be pushed. + Endpoint string + + // Endpoint configuration attributes. See https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions#pushconfig for more details. + Attributes map[string]string +} + +// Subscription config contains the configuration of a subscription. +type SubscriptionConfig struct { + Topic *Topic + PushConfig PushConfig + + // The default maximum time after a subscriber receives a message before + // the subscriber should acknowledge the message. Note: messages which are + // obtained via Subscription.Receive need not be acknowledged within this + // deadline, as the deadline will be automatically extended. + AckDeadline time.Duration + + // Whether to retain acknowledged messages. If true, acknowledged messages + // will not be expunged until they fall out of the RetentionDuration window. + retainAckedMessages bool + + // How long to retain messages in backlog, from the time of publish. If RetainAckedMessages is true, + // this duration affects the retention of acknowledged messages, + // otherwise only unacknowledged messages are retained. + // Defaults to 7 days. Cannot be longer than 7 days or shorter than 10 minutes. + retentionDuration time.Duration +} + +// ReceiveSettings configure the Receive method. +// A zero ReceiveSettings will result in values equivalent to DefaultReceiveSettings. +type ReceiveSettings struct { + // MaxExtension is the maximum period for which the Subscription should + // automatically extend the ack deadline for each message. + // + // The Subscription will automatically extend the ack deadline of all + // fetched Messages for the duration specified. Automatic deadline + // extension may be disabled by specifying a duration less than 1. + MaxExtension time.Duration + + // MaxOutstandingMessages is the maximum number of unprocessed messages + // (unacknowledged but not yet expired). If MaxOutstandingMessages is 0, it + // will be treated as if it were DefaultReceiveSettings.MaxOutstandingMessages. + // If the value is negative, then there will be no limit on the number of + // unprocessed messages. + MaxOutstandingMessages int + + // MaxOutstandingBytes is the maximum size of unprocessed messages + // (unacknowledged but not yet expired). If MaxOutstandingBytes is 0, it will + // be treated as if it were DefaultReceiveSettings.MaxOutstandingBytes. If + // the value is negative, then there will be no limit on the number of bytes + // for unprocessed messages. + MaxOutstandingBytes int + + // NumGoroutines is the number of goroutines Receive will spawn to pull + // messages concurrently. If NumGoroutines is less than 1, it will be treated + // as if it were DefaultReceiveSettings.NumGoroutines. + // + // NumGoroutines does not limit the number of messages that can be processed + // concurrently. Even with one goroutine, many messages might be processed at + // once, because that goroutine may continually receive messages and invoke the + // function passed to Receive on them. To limit the number of messages being + // processed concurrently, set MaxOutstandingMessages. + NumGoroutines int +} + +// DefaultReceiveSettings holds the default values for ReceiveSettings. +var DefaultReceiveSettings = ReceiveSettings{ + MaxExtension: 10 * time.Minute, + MaxOutstandingMessages: 1000, + MaxOutstandingBytes: 1e9, // 1G + NumGoroutines: 1, +} + +// Delete deletes the subscription. +func (s *Subscription) Delete(ctx context.Context) error { + return s.s.deleteSubscription(ctx, s.name) +} + +// Exists reports whether the subscription exists on the server. +func (s *Subscription) Exists(ctx context.Context) (bool, error) { + return s.s.subscriptionExists(ctx, s.name) +} + +// Config fetches the current configuration for the subscription. +func (s *Subscription) Config(ctx context.Context) (SubscriptionConfig, error) { + conf, topicName, err := s.s.getSubscriptionConfig(ctx, s.name) + if err != nil { + return SubscriptionConfig{}, err + } + conf.Topic = &Topic{ + s: s.s, + name: topicName, + } + return conf, nil +} + +// SubscriptionConfigToUpdate describes how to update a subscription. +type SubscriptionConfigToUpdate struct { + // If non-nil, the push config is changed. + PushConfig *PushConfig +} + +// Update changes an existing subscription according to the fields set in cfg. +// It returns the new SubscriptionConfig. +// +// Update returns an error if no fields were modified. +func (s *Subscription) Update(ctx context.Context, cfg SubscriptionConfigToUpdate) (SubscriptionConfig, error) { + if cfg.PushConfig == nil { + return SubscriptionConfig{}, errors.New("pubsub: UpdateSubscription call with nothing to update") + } + if err := s.s.modifyPushConfig(ctx, s.name, *cfg.PushConfig); err != nil { + return SubscriptionConfig{}, err + } + return s.Config(ctx) +} + +func (s *Subscription) IAM() *iam.Handle { + return s.s.iamHandle(s.name) +} + +// CreateSubscription creates a new subscription on a topic. +// +// id is the name of the subscription to create. It must start with a letter, +// and contain only letters ([A-Za-z]), numbers ([0-9]), dashes (-), +// underscores (_), periods (.), tildes (~), plus (+) or percent signs (%). It +// must be between 3 and 255 characters in length, and must not start with +// "goog". +// +// cfg.Topic is the topic from which the subscription should receive messages. It +// need not belong to the same project as the subscription. This field is required. +// +// cfg.AckDeadline is the maximum time after a subscriber receives a message before +// the subscriber should acknowledge the message. It must be between 10 and 600 +// seconds (inclusive), and is rounded down to the nearest second. If the +// provided ackDeadline is 0, then the default value of 10 seconds is used. +// Note: messages which are obtained via Subscription.Receive need not be +// acknowledged within this deadline, as the deadline will be automatically +// extended. +// +// cfg.PushConfig may be set to configure this subscription for push delivery. +// +// If the subscription already exists an error will be returned. +func (c *Client) CreateSubscription(ctx context.Context, id string, cfg SubscriptionConfig) (*Subscription, error) { + if cfg.Topic == nil { + return nil, errors.New("pubsub: require non-nil Topic") + } + if cfg.AckDeadline == 0 { + cfg.AckDeadline = 10 * time.Second + } + if d := cfg.AckDeadline; d < 10*time.Second || d > 600*time.Second { + return nil, fmt.Errorf("ack deadline must be between 10 and 600 seconds; got: %v", d) + } + + sub := c.Subscription(id) + err := c.s.createSubscription(ctx, sub.name, cfg) + return sub, err +} + +var errReceiveInProgress = errors.New("pubsub: Receive already in progress for this subscription") + +// Receive calls f with the outstanding messages from the subscription. +// It blocks until ctx is done, or the service returns a non-retryable error. +// +// The standard way to terminate a Receive is to cancel its context: +// +// cctx, cancel := context.WithCancel(ctx) +// err := sub.Receive(cctx, callback) +// // Call cancel from callback, or another goroutine. +// +// If the service returns a non-retryable error, Receive returns that error after +// all of the outstanding calls to f have returned. If ctx is done, Receive +// returns nil after all of the outstanding calls to f have returned and +// all messages have been acknowledged or have expired. +// +// Receive calls f concurrently from multiple goroutines. It is encouraged to +// process messages synchronously in f, even if that processing is relatively +// time-consuming; Receive will spawn new goroutines for incoming messages, +// limited by MaxOutstandingMessages and MaxOutstandingBytes in ReceiveSettings. +// +// The context passed to f will be canceled when ctx is Done or there is a +// fatal service error. +// +// Receive will automatically extend the ack deadline of all fetched Messages for the +// period specified by s.ReceiveSettings.MaxExtension. +// +// Each Subscription may have only one invocation of Receive active at a time. +func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Message)) error { + s.mu.Lock() + if s.receiveActive { + s.mu.Unlock() + return errReceiveInProgress + } + s.receiveActive = true + s.mu.Unlock() + defer func() { s.mu.Lock(); s.receiveActive = false; s.mu.Unlock() }() + + config, err := s.Config(ctx) + if err != nil { + if grpc.Code(err) == codes.Canceled { + return nil + } + return err + } + maxCount := s.ReceiveSettings.MaxOutstandingMessages + if maxCount == 0 { + maxCount = DefaultReceiveSettings.MaxOutstandingMessages + } + maxBytes := s.ReceiveSettings.MaxOutstandingBytes + if maxBytes == 0 { + maxBytes = DefaultReceiveSettings.MaxOutstandingBytes + } + maxExt := s.ReceiveSettings.MaxExtension + if maxExt == 0 { + maxExt = DefaultReceiveSettings.MaxExtension + } else if maxExt < 0 { + // If MaxExtension is negative, disable automatic extension. + maxExt = 0 + } + numGoroutines := s.ReceiveSettings.NumGoroutines + if numGoroutines < 1 { + numGoroutines = DefaultReceiveSettings.NumGoroutines + } + // TODO(jba): add tests that verify that ReceiveSettings are correctly processed. + po := &pullOptions{ + maxExtension: maxExt, + maxPrefetch: trunc32(int64(maxCount)), + ackDeadline: config.AckDeadline, + } + fc := newFlowController(maxCount, maxBytes) + + // Wait for all goroutines started by Receive to return, so instead of an + // obscure goroutine leak we have an obvious blocked call to Receive. + group, gctx := errgroup.WithContext(ctx) + for i := 0; i < numGoroutines; i++ { + group.Go(func() error { + return s.receive(gctx, po, fc, f) + }) + } + return group.Wait() +} + +func (s *Subscription) receive(ctx context.Context, po *pullOptions, fc *flowController, f func(context.Context, *Message)) error { + // Cancel a sub-context when we return, to kick the context-aware callbacks + // and the goroutine below. + ctx2, cancel := context.WithCancel(ctx) + // Call stop when Receive's context is done. + // Stop will block until all outstanding messages have been acknowledged + // or there was a fatal service error. + // The iterator does not use the context passed to Receive. If it did, canceling + // that context would immediately stop the iterator without waiting for unacked + // messages. + iter := newMessageIterator(context.Background(), s.s, s.name, po) + + // We cannot use errgroup from Receive here. Receive might already be calling group.Wait, + // and group.Wait cannot be called concurrently with group.Go. We give each receive() its + // own WaitGroup instead. + // Since wg.Add is only called from the main goroutine, wg.Wait is guaranteed + // to be called after all Adds. + var wg sync.WaitGroup + wg.Add(1) + go func() { + <-ctx2.Done() + iter.stop() + wg.Done() + }() + defer wg.Wait() + + defer cancel() + for { + msgs, err := iter.receive() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + for i, msg := range msgs { + msg := msg + // TODO(jba): call acquire closer to when the message is allocated. + if err := fc.acquire(ctx, len(msg.Data)); err != nil { + // TODO(jba): test that these "orphaned" messages are nacked immediately when ctx is done. + for _, m := range msgs[i:] { + m.Nack() + } + return nil + } + wg.Add(1) + go func() { + // TODO(jba): call release when the message is available for GC. + // This considers the message to be released when + // f is finished, but f may ack early or not at all. + defer wg.Done() + defer fc.release(len(msg.Data)) + f(ctx2, msg) + }() + } + } +} + +// TODO(jba): remove when we delete messageIterator. +type pullOptions struct { + maxExtension time.Duration + maxPrefetch int32 + // ackDeadline is the default ack deadline for the subscription. Not + // configurable. + ackDeadline time.Duration +} diff --git a/vendor/cloud.google.com/go/pubsub/subscription_test.go b/vendor/cloud.google.com/go/pubsub/subscription_test.go new file mode 100644 index 0000000..06ccf17 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/subscription_test.go @@ -0,0 +1,150 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + + "google.golang.org/api/iterator" +) + +type subListService struct { + service + subs []string + err error + + t *testing.T // for error logging. +} + +func (s *subListService) newNextStringFunc() nextStringFunc { + return func() (string, error) { + if len(s.subs) == 0 { + return "", iterator.Done + } + sn := s.subs[0] + s.subs = s.subs[1:] + return sn, s.err + } +} + +func (s *subListService) listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc { + if projName != "projects/projid" { + s.t.Fatalf("unexpected call: projName: %q", projName) + return nil + } + return s.newNextStringFunc() +} + +func (s *subListService) listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc { + if topicName != "projects/projid/topics/topic" { + s.t.Fatalf("unexpected call: topicName: %q", topicName) + return nil + } + return s.newNextStringFunc() +} + +// All returns the remaining subscriptions from this iterator. +func slurpSubs(it *SubscriptionIterator) ([]*Subscription, error) { + var subs []*Subscription + for { + switch sub, err := it.Next(); err { + case nil: + subs = append(subs, sub) + case iterator.Done: + return subs, nil + default: + return nil, err + } + } +} + +func TestSubscriptionID(t *testing.T) { + const id = "id" + serv := &subListService{ + subs: []string{"projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2"}, + t: t, + } + c := &Client{projectID: "projid", s: serv} + s := c.Subscription(id) + if got, want := s.ID(), id; got != want { + t.Errorf("Subscription.ID() = %q; want %q", got, want) + } + want := []string{"s1", "s2"} + subs, err := slurpSubs(c.Subscriptions(context.Background())) + if err != nil { + t.Errorf("error listing subscriptions: %v", err) + } + for i, s := range subs { + if got, want := s.ID(), want[i]; got != want { + t.Errorf("Subscription.ID() = %q; want %q", got, want) + } + } +} + +func TestListProjectSubscriptions(t *testing.T) { + snames := []string{"projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2", + "projects/projid/subscriptions/s3"} + s := &subListService{subs: snames, t: t} + c := &Client{projectID: "projid", s: s} + subs, err := slurpSubs(c.Subscriptions(context.Background())) + if err != nil { + t.Errorf("error listing subscriptions: %v", err) + } + got := subNames(subs) + want := []string{ + "projects/projid/subscriptions/s1", + "projects/projid/subscriptions/s2", + "projects/projid/subscriptions/s3"} + if !testutil.Equal(got, want) { + t.Errorf("sub list: got: %v, want: %v", got, want) + } + if len(s.subs) != 0 { + t.Errorf("outstanding subs: %v", s.subs) + } +} + +func TestListTopicSubscriptions(t *testing.T) { + snames := []string{"projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2", + "projects/projid/subscriptions/s3"} + s := &subListService{subs: snames, t: t} + c := &Client{projectID: "projid", s: s} + subs, err := slurpSubs(c.Topic("topic").Subscriptions(context.Background())) + if err != nil { + t.Errorf("error listing subscriptions: %v", err) + } + got := subNames(subs) + want := []string{ + "projects/projid/subscriptions/s1", + "projects/projid/subscriptions/s2", + "projects/projid/subscriptions/s3"} + if !testutil.Equal(got, want) { + t.Errorf("sub list: got: %v, want: %v", got, want) + } + if len(s.subs) != 0 { + t.Errorf("outstanding subs: %v", s.subs) + } +} + +func subNames(subs []*Subscription) []string { + var names []string + for _, sub := range subs { + names = append(names, sub.name) + } + return names +} diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go new file mode 100644 index 0000000..274f54d --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/topic.go @@ -0,0 +1,368 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "fmt" + "runtime" + "strings" + "sync" + "time" + + "cloud.google.com/go/iam" + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/api/support/bundler" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +const ( + // The maximum number of messages that can be in a single publish request, as + // determined by the PubSub service. + MaxPublishRequestCount = 1000 + + // The maximum size of a single publish request in bytes, as determined by the PubSub service. + MaxPublishRequestBytes = 1e7 + + maxInt = int(^uint(0) >> 1) +) + +// ErrOversizedMessage indicates that a message's size exceeds MaxPublishRequestBytes. +var ErrOversizedMessage = bundler.ErrOversizedItem + +// Topic is a reference to a PubSub topic. +// +// The methods of Topic are safe for use by multiple goroutines. +type Topic struct { + s service + // The fully qualified identifier for the topic, in the format "projects//topics/" + name string + + // Settings for publishing messages. All changes must be made before the + // first call to Publish. The default is DefaultPublishSettings. + PublishSettings PublishSettings + + mu sync.RWMutex + stopped bool + bundler *bundler.Bundler + + wg sync.WaitGroup + + // Channel for message bundles to be published. Close to indicate that Stop was called. + bundlec chan []*bundledMessage +} + +// PublishSettings control the bundling of published messages. +type PublishSettings struct { + + // Publish a non-empty batch after this delay has passed. + DelayThreshold time.Duration + + // Publish a batch when it has this many messages. The maximum is + // MaxPublishRequestCount. + CountThreshold int + + // Publish a batch when its size in bytes reaches this value. + ByteThreshold int + + // The number of goroutines that invoke the Publish RPC concurrently. + // Defaults to a multiple of GOMAXPROCS. + NumGoroutines int + + // The maximum time that the client will attempt to publish a bundle of messages. + Timeout time.Duration +} + +// DefaultPublishSettings holds the default values for topics' PublishSettings. +var DefaultPublishSettings = PublishSettings{ + DelayThreshold: 1 * time.Millisecond, + CountThreshold: 100, + ByteThreshold: 1e6, + Timeout: 60 * time.Second, +} + +// CreateTopic creates a new topic. +// The specified topic ID must start with a letter, and contain only letters +// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.), +// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255 +// characters in length, and must not start with "goog". +// If the topic already exists an error will be returned. +func (c *Client) CreateTopic(ctx context.Context, id string) (*Topic, error) { + t := c.Topic(id) + err := c.s.createTopic(ctx, t.name) + return t, err +} + +// Topic creates a reference to a topic in the client's project. +// +// If a Topic's Publish method is called, it has background goroutines +// associated with it. Clean them up by calling Topic.Stop. +// +// Avoid creating many Topic instances if you use them to publish. +func (c *Client) Topic(id string) *Topic { + return c.TopicInProject(id, c.projectID) +} + +// TopicInProject creates a reference to a topic in the given project. +// +// If a Topic's Publish method is called, it has background goroutines +// associated with it. Clean them up by calling Topic.Stop. +// +// Avoid creating many Topic instances if you use them to publish. +func (c *Client) TopicInProject(id, projectID string) *Topic { + return newTopic(c.s, fmt.Sprintf("projects/%s/topics/%s", projectID, id)) +} + +func newTopic(s service, name string) *Topic { + // bundlec is unbuffered. A buffer would occupy memory not + // accounted for by the bundler, so BufferedByteLimit would be a lie: + // the actual memory consumed would be higher. + return &Topic{ + s: s, + name: name, + PublishSettings: DefaultPublishSettings, + bundlec: make(chan []*bundledMessage), + } +} + +// Topics returns an iterator which returns all of the topics for the client's project. +func (c *Client) Topics(ctx context.Context) *TopicIterator { + return &TopicIterator{ + s: c.s, + next: c.s.listProjectTopics(ctx, c.fullyQualifiedProjectName()), + } +} + +// TopicIterator is an iterator that returns a series of topics. +type TopicIterator struct { + s service + next nextStringFunc +} + +// Next returns the next topic. If there are no more topics, iterator.Done will be returned. +func (tps *TopicIterator) Next() (*Topic, error) { + topicName, err := tps.next() + if err != nil { + return nil, err + } + return newTopic(tps.s, topicName), nil +} + +// ID returns the unique idenfier of the topic within its project. +func (t *Topic) ID() string { + slash := strings.LastIndex(t.name, "/") + if slash == -1 { + // name is not a fully-qualified name. + panic("bad topic name") + } + return t.name[slash+1:] +} + +// String returns the printable globally unique name for the topic. +func (t *Topic) String() string { + return t.name +} + +// Delete deletes the topic. +func (t *Topic) Delete(ctx context.Context) error { + return t.s.deleteTopic(ctx, t.name) +} + +// Exists reports whether the topic exists on the server. +func (t *Topic) Exists(ctx context.Context) (bool, error) { + if t.name == "_deleted-topic_" { + return false, nil + } + + return t.s.topicExists(ctx, t.name) +} + +func (t *Topic) IAM() *iam.Handle { + return t.s.iamHandle(t.name) +} + +// Subscriptions returns an iterator which returns the subscriptions for this topic. +func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator { + // NOTE: zero or more Subscriptions that are ultimately returned by this + // Subscriptions iterator may belong to a different project to t. + return &SubscriptionIterator{ + s: t.s, + next: t.s.listTopicSubscriptions(ctx, t.name), + } +} + +var errTopicStopped = errors.New("pubsub: Stop has been called for this topic") + +// Publish publishes msg to the topic asynchronously. Messages are batched and +// sent according to the topic's PublishSettings. Publish never blocks. +// +// Publish returns a non-nil PublishResult which will be ready when the +// message has been sent (or has failed to be sent) to the server. +// +// Publish creates goroutines for batching and sending messages. These goroutines +// need to be stopped by calling t.Stop(). Once stopped, future calls to Publish +// will immediately return a PublishResult with an error. +func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { + // TODO(jba): if this turns out to take significant time, try to approximate it. + // Or, convert the messages to protos in Publish, instead of in the service. + msg.size = proto.Size(&pb.PubsubMessage{ + Data: msg.Data, + Attributes: msg.Attributes, + }) + r := &PublishResult{ready: make(chan struct{})} + t.initBundler() + t.mu.RLock() + defer t.mu.RUnlock() + // TODO(aboulhosn) [from bcmills] consider changing the semantics of bundler to perform this logic so we don't have to do it here + if t.stopped { + r.set("", errTopicStopped) + return r + } + + // TODO(jba) [from bcmills] consider using a shared channel per bundle + // (requires Bundler API changes; would reduce allocations) + // The call to Add should never return an error because the bundler's + // BufferedByteLimit is set to maxInt; we do not perform any flow + // control in the client. + err := t.bundler.Add(&bundledMessage{msg, r}, msg.size) + if err != nil { + r.set("", err) + } + return r +} + +// Send all remaining published messages and stop goroutines created for handling +// publishing. Returns once all outstanding messages have been sent or have +// failed to be sent. +func (t *Topic) Stop() { + t.mu.Lock() + noop := t.stopped || t.bundler == nil + t.stopped = true + t.mu.Unlock() + if noop { + return + } + t.bundler.Flush() + // At this point, all pending bundles have been published and the bundler's + // goroutines have exited, so it is OK for this goroutine to close bundlec. + close(t.bundlec) + t.wg.Wait() +} + +// A PublishResult holds the result from a call to Publish. +type PublishResult struct { + ready chan struct{} + serverID string + err error +} + +// Ready returns a channel that is closed when the result is ready. +// When the Ready channel is closed, Get is guaranteed not to block. +func (r *PublishResult) Ready() <-chan struct{} { return r.ready } + +// Get returns the server-generated message ID and/or error result of a Publish call. +// Get blocks until the Publish call completes or the context is done. +func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) { + // If the result is already ready, return it even if the context is done. + select { + case <-r.Ready(): + return r.serverID, r.err + default: + } + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-r.Ready(): + return r.serverID, r.err + } +} + +func (r *PublishResult) set(sid string, err error) { + r.serverID = sid + r.err = err + close(r.ready) +} + +type bundledMessage struct { + msg *Message + res *PublishResult +} + +func (t *Topic) initBundler() { + t.mu.RLock() + noop := t.stopped || t.bundler != nil + t.mu.RUnlock() + if noop { + return + } + t.mu.Lock() + defer t.mu.Unlock() + // Must re-check, since we released the lock. + if t.stopped || t.bundler != nil { + return + } + + // TODO(jba): use a context detached from the one passed to NewClient. + ctx := context.TODO() + // Unless overridden, run several goroutines per CPU to call the Publish RPC. + n := t.PublishSettings.NumGoroutines + if n <= 0 { + n = 25 * runtime.GOMAXPROCS(0) + } + timeout := t.PublishSettings.Timeout + t.wg.Add(n) + for i := 0; i < n; i++ { + go func() { + defer t.wg.Done() + for b := range t.bundlec { + bctx := ctx + cancel := func() {} + if timeout != 0 { + bctx, cancel = context.WithTimeout(ctx, timeout) + } + t.publishMessageBundle(bctx, b) + cancel() + } + }() + } + t.bundler = bundler.NewBundler(&bundledMessage{}, func(items interface{}) { + t.bundlec <- items.([]*bundledMessage) + + }) + t.bundler.DelayThreshold = t.PublishSettings.DelayThreshold + t.bundler.BundleCountThreshold = t.PublishSettings.CountThreshold + if t.bundler.BundleCountThreshold > MaxPublishRequestCount { + t.bundler.BundleCountThreshold = MaxPublishRequestCount + } + t.bundler.BundleByteThreshold = t.PublishSettings.ByteThreshold + t.bundler.BufferedByteLimit = maxInt + t.bundler.BundleByteLimit = MaxPublishRequestBytes +} + +func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) { + msgs := make([]*Message, len(bms)) + for i, bm := range bms { + msgs[i], bm.msg = bm.msg, nil // release bm.msg for GC + } + ids, err := t.s.publishMessages(ctx, t.name, msgs) + for i, bm := range bms { + if err != nil { + bm.res.set("", err) + } else { + bm.res.set(ids[i], nil) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/topic_test.go b/vendor/cloud.google.com/go/pubsub/topic_test.go new file mode 100644 index 0000000..0f6c52d --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/topic_test.go @@ -0,0 +1,188 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "net" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +type topicListService struct { + service + topics []string + err error + t *testing.T // for error logging. +} + +func (s *topicListService) newNextStringFunc() nextStringFunc { + return func() (string, error) { + if len(s.topics) == 0 { + return "", iterator.Done + } + tn := s.topics[0] + s.topics = s.topics[1:] + return tn, s.err + } +} + +func (s *topicListService) listProjectTopics(ctx context.Context, projName string) nextStringFunc { + if projName != "projects/projid" { + s.t.Fatalf("unexpected call: projName: %q", projName) + return nil + } + return s.newNextStringFunc() +} + +func checkTopicListing(t *testing.T, want []string) { + s := &topicListService{topics: want, t: t} + c := &Client{projectID: "projid", s: s} + topics, err := slurpTopics(c.Topics(context.Background())) + if err != nil { + t.Errorf("error listing topics: %v", err) + } + got := topicNames(topics) + if !testutil.Equal(got, want) { + t.Errorf("topic list: got: %v, want: %v", got, want) + } + if len(s.topics) != 0 { + t.Errorf("outstanding topics: %v", s.topics) + } +} + +// All returns the remaining topics from this iterator. +func slurpTopics(it *TopicIterator) ([]*Topic, error) { + var topics []*Topic + for { + switch topic, err := it.Next(); err { + case nil: + topics = append(topics, topic) + case iterator.Done: + return topics, nil + default: + return nil, err + } + } +} + +func TestTopicID(t *testing.T) { + const id = "id" + serv := &topicListService{ + topics: []string{"projects/projid/topics/t1", "projects/projid/topics/t2"}, + t: t, + } + c := &Client{projectID: "projid", s: serv} + s := c.Topic(id) + if got, want := s.ID(), id; got != want { + t.Errorf("Token.ID() = %q; want %q", got, want) + } + want := []string{"t1", "t2"} + topics, err := slurpTopics(c.Topics(context.Background())) + if err != nil { + t.Errorf("error listing topics: %v", err) + } + for i, topic := range topics { + if got, want := topic.ID(), want[i]; got != want { + t.Errorf("Token.ID() = %q; want %q", got, want) + } + } +} + +func TestListTopics(t *testing.T) { + checkTopicListing(t, []string{ + "projects/projid/topics/t1", + "projects/projid/topics/t2", + "projects/projid/topics/t3", + "projects/projid/topics/t4"}) +} + +func TestListCompletelyEmptyTopics(t *testing.T) { + var want []string + checkTopicListing(t, want) +} + +func TestStopPublishOrder(t *testing.T) { + // Check that Stop doesn't panic if called before Publish. + // Also that Publish after Stop returns the right error. + ctx := context.Background() + c := &Client{projectID: "projid"} + topic := c.Topic("t") + topic.Stop() + r := topic.Publish(ctx, &Message{}) + _, err := r.Get(ctx) + if err != errTopicStopped { + t.Errorf("got %v, want errTopicStopped", err) + } +} + +func TestPublishTimeout(t *testing.T) { + ctx := context.Background() + serv := grpc.NewServer() + pubsubpb.RegisterPublisherServer(serv, &alwaysFailPublish{}) + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatal(err) + } + go serv.Serve(lis) + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + s, err := newPubSubService(context.Background(), []option.ClientOption{option.WithGRPCConn(conn)}) + if err != nil { + t.Fatal(err) + } + c := &Client{s: s} + topic := c.Topic("t") + topic.PublishSettings.Timeout = 3 * time.Second + r := topic.Publish(ctx, &Message{}) + defer topic.Stop() + select { + case <-r.Ready(): + _, err = r.Get(ctx) + if err != context.DeadlineExceeded { + t.Fatalf("got %v, want context.DeadlineExceeded", err) + } + case <-time.After(2 * topic.PublishSettings.Timeout): + t.Fatal("timed out") + } +} + +type alwaysFailPublish struct { + pubsubpb.PublisherServer +} + +func (s *alwaysFailPublish) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) { + return nil, grpc.Errorf(codes.Unavailable, "try again") +} + +func topicNames(topics []*Topic) []string { + var names []string + + for _, topic := range topics { + names = append(names, topic.name) + + } + return names +} diff --git a/vendor/cloud.google.com/go/pubsub/utils_test.go b/vendor/cloud.google.com/go/pubsub/utils_test.go new file mode 100644 index 0000000..d0b5420 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/utils_test.go @@ -0,0 +1,63 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "time" + + "golang.org/x/net/context" +) + +type modDeadlineCall struct { + subName string + deadline time.Duration + ackIDs []string +} + +type acknowledgeCall struct { + subName string + ackIDs []string +} + +type testService struct { + service + + // The arguments of each call to modifyAckDealine are written to this channel. + modDeadlineCalled chan modDeadlineCall + + // The arguments of each call to acknowledge are written to this channel. + acknowledgeCalled chan acknowledgeCall +} + +func (s *testService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { + s.modDeadlineCalled <- modDeadlineCall{ + subName: subName, + deadline: deadline, + ackIDs: ackIDs, + } + return nil +} + +func (s *testService) acknowledge(ctx context.Context, subName string, ackIDs []string) error { + s.acknowledgeCalled <- acknowledgeCall{ + subName: subName, + ackIDs: ackIDs, + } + return nil +} + +func (s *testService) splitAckIDs(ids []string) ([]string, []string) { + return ids, nil +} diff --git a/vendor/cloud.google.com/go/run-tests.sh b/vendor/cloud.google.com/go/run-tests.sh new file mode 100755 index 0000000..f47ff50 --- /dev/null +++ b/vendor/cloud.google.com/go/run-tests.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +# Selectively run tests for this repo, based on what has changed +# in a commit. Runs short tests for the whole repo, and full tests +# for changed directories. + +set -e + +prefix=cloud.google.com/go + +dryrun=false +if [[ $1 == "-n" ]]; then + dryrun=true + shift +fi + +if [[ $1 == "" ]]; then + echo >&2 "usage: $0 [-n] COMMIT" + exit 1 +fi + +# Files or directories that cause all tests to run if modified. +declare -A run_all +run_all=([.travis.yml]=1 [run-tests.sh]=1) + +function run { + if $dryrun; then + echo $* + else + (set -x; $*) + fi +} + + +# Find all the packages that have changed in this commit. +declare -A changed_packages + +for f in $(git diff-tree --no-commit-id --name-only -r $1); do + if [[ ${run_all[$f]} == 1 ]]; then + # This change requires a full test. Do it and exit. + run go test -race -v $prefix/... + exit + fi + # Map, e.g., "spanner/client.go" to "$prefix/spanner". + d=$(dirname $f) + if [[ $d == "." ]]; then + pkg=$prefix + else + pkg=$prefix/$d + fi + changed_packages[$pkg]=1 +done + +echo "changed packages: ${!changed_packages[*]}" + + +# Reports whether its argument, a package name, depends (recursively) +# on a changed package. +function depends_on_changed_package { + # According to go list, a package does not depend on itself, so + # we test that separately. + if [[ ${changed_packages[$1]} == 1 ]]; then + return 0 + fi + for dep in $(go list -f '{{range .Deps}}{{.}} {{end}}' $1); do + if [[ ${changed_packages[$dep]} == 1 ]]; then + return 0 + fi + done + return 1 +} + +# Collect the packages into two separate lists. (It is faster go test a list of +# packages than to individually go test each one.) + +shorts= +fulls= +for pkg in $(go list $prefix/...); do # for each package in the repo + if depends_on_changed_package $pkg; then # if it depends on a changed package + fulls="$fulls $pkg" # run the full test + else # otherwise + shorts="$shorts $pkg" # run the short test + fi +done +run go test -race -v -short $shorts +if [[ $fulls != "" ]]; then + run go test -race -v $fulls +fi diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go new file mode 100644 index 0000000..e63692e --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go @@ -0,0 +1,538 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package database + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// DatabaseAdminCallOptions contains the retry settings for each method of DatabaseAdminClient. +type DatabaseAdminCallOptions struct { + ListDatabases []gax.CallOption + CreateDatabase []gax.CallOption + GetDatabase []gax.CallOption + UpdateDatabaseDdl []gax.CallOption + DropDatabase []gax.CallOption + GetDatabaseDdl []gax.CallOption + SetIamPolicy []gax.CallOption + GetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption +} + +func defaultDatabaseAdminClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("spanner.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultDatabaseAdminCallOptions() *DatabaseAdminCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &DatabaseAdminCallOptions{ + ListDatabases: retry[[2]string{"default", "idempotent"}], + CreateDatabase: retry[[2]string{"default", "non_idempotent"}], + GetDatabase: retry[[2]string{"default", "idempotent"}], + UpdateDatabaseDdl: retry[[2]string{"default", "idempotent"}], + DropDatabase: retry[[2]string{"default", "idempotent"}], + GetDatabaseDdl: retry[[2]string{"default", "idempotent"}], + SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + GetIamPolicy: retry[[2]string{"default", "idempotent"}], + TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], + } +} + +// DatabaseAdminClient is a client for interacting with Cloud Spanner Database Admin API. +type DatabaseAdminClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + databaseAdminClient databasepb.DatabaseAdminClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *DatabaseAdminCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewDatabaseAdminClient creates a new database admin client. +// +// Cloud Spanner Database Admin API +// +// The Cloud Spanner Database Admin API can be used to create, drop, and +// list databases. It also enables updating the schema of pre-existing +// databases. +func NewDatabaseAdminClient(ctx context.Context, opts ...option.ClientOption) (*DatabaseAdminClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultDatabaseAdminClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &DatabaseAdminClient{ + conn: conn, + CallOptions: defaultDatabaseAdminCallOptions(), + + databaseAdminClient: databasepb.NewDatabaseAdminClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *DatabaseAdminClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *DatabaseAdminClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *DatabaseAdminClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// DatabaseAdminInstancePath returns the path for the instance resource. +func DatabaseAdminInstancePath(project, instance string) string { + return "" + + "projects/" + + project + + "/instances/" + + instance + + "" +} + +// DatabaseAdminDatabasePath returns the path for the database resource. +func DatabaseAdminDatabasePath(project, instance, database string) string { + return "" + + "projects/" + + project + + "/instances/" + + instance + + "/databases/" + + database + + "" +} + +// ListDatabases lists Cloud Spanner databases. +func (c *DatabaseAdminClient) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest, opts ...gax.CallOption) *DatabaseIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListDatabases[0:len(c.CallOptions.ListDatabases):len(c.CallOptions.ListDatabases)], opts...) + it := &DatabaseIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Database, string, error) { + var resp *databasepb.ListDatabasesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.ListDatabases(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Databases, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateDatabase creates a new Cloud Spanner database and starts to prepare it for serving. +// The returned [long-running operation][google.longrunning.Operation] will +// have a name of the format /operations/ and +// can be used to track preparation of the database. The +// [metadata][google.longrunning.Operation.metadata] field type is +// [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The +// [response][google.longrunning.Operation.response] field type is +// [Database][google.spanner.admin.database.v1.Database], if successful. +func (c *DatabaseAdminClient) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest, opts ...gax.CallOption) (*CreateDatabaseOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateDatabase[0:len(c.CallOptions.CreateDatabase):len(c.CallOptions.CreateDatabase)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.CreateDatabase(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &CreateDatabaseOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// GetDatabase gets the state of a Cloud Spanner database. +func (c *DatabaseAdminClient) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest, opts ...gax.CallOption) (*databasepb.Database, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDatabase[0:len(c.CallOptions.GetDatabase):len(c.CallOptions.GetDatabase)], opts...) + var resp *databasepb.Database + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.GetDatabase(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateDatabaseDdl updates the schema of a Cloud Spanner database by +// creating/altering/dropping tables, columns, indexes, etc. The returned +// [long-running operation][google.longrunning.Operation] will have a name of +// the format /operations/ and can be used to +// track execution of the schema change(s). The +// [metadata][google.longrunning.Operation.metadata] field type is +// [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. +func (c *DatabaseAdminClient) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest, opts ...gax.CallOption) (*UpdateDatabaseDdlOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateDatabaseDdl[0:len(c.CallOptions.UpdateDatabaseDdl):len(c.CallOptions.UpdateDatabaseDdl)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.UpdateDatabaseDdl(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &UpdateDatabaseDdlOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// DropDatabase drops (aka deletes) a Cloud Spanner database. +func (c *DatabaseAdminClient) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DropDatabase[0:len(c.CallOptions.DropDatabase):len(c.CallOptions.DropDatabase)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.databaseAdminClient.DropDatabase(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// GetDatabaseDdl returns the schema of a Cloud Spanner database as a list of formatted +// DDL statements. This method does not show pending schema updates, those may +// be queried using the [Operations][google.longrunning.Operations] API. +func (c *DatabaseAdminClient) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest, opts ...gax.CallOption) (*databasepb.GetDatabaseDdlResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetDatabaseDdl[0:len(c.CallOptions.GetDatabaseDdl):len(c.CallOptions.GetDatabaseDdl)], opts...) + var resp *databasepb.GetDatabaseDdlResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.GetDatabaseDdl(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetIamPolicy sets the access control policy on a database resource. Replaces any +// existing policy. +// +// Authorization requires spanner.databases.setIamPolicy permission on +// [resource][google.iam.v1.SetIamPolicyRequest.resource]. +func (c *DatabaseAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetIamPolicy gets the access control policy for a database resource. Returns an empty +// policy if a database exists but does not have a policy set. +// +// Authorization requires spanner.databases.getIamPolicy permission on +// [resource][google.iam.v1.GetIamPolicyRequest.resource]. +func (c *DatabaseAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// TestIamPermissions returns permissions that the caller has on the specified database resource. +// +// Attempting this RPC on a non-existent Cloud Spanner database will result in +// a NOT_FOUND error if the user has spanner.databases.list permission on +// the containing Cloud Spanner instance. Otherwise returns an empty set of +// permissions. +func (c *DatabaseAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.databaseAdminClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DatabaseIterator manages a stream of *databasepb.Database. +type DatabaseIterator struct { + items []*databasepb.Database + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*databasepb.Database, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DatabaseIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DatabaseIterator) Next() (*databasepb.Database, error) { + var item *databasepb.Database + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DatabaseIterator) bufLen() int { + return len(it.items) +} + +func (it *DatabaseIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// CreateDatabaseOperation manages a long-running operation from CreateDatabase. +type CreateDatabaseOperation struct { + lro *longrunning.Operation +} + +// CreateDatabaseOperation returns a new CreateDatabaseOperation from a given name. +// The name must be that of a previously created CreateDatabaseOperation, possibly from a different process. +func (c *DatabaseAdminClient) CreateDatabaseOperation(name string) *CreateDatabaseOperation { + return &CreateDatabaseOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *CreateDatabaseOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) { + var resp databasepb.Database + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *CreateDatabaseOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) { + var resp databasepb.Database + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *CreateDatabaseOperation) Metadata() (*databasepb.CreateDatabaseMetadata, error) { + var meta databasepb.CreateDatabaseMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *CreateDatabaseOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *CreateDatabaseOperation) Name() string { + return op.lro.Name() +} + +// UpdateDatabaseDdlOperation manages a long-running operation from UpdateDatabaseDdl. +type UpdateDatabaseDdlOperation struct { + lro *longrunning.Operation +} + +// UpdateDatabaseDdlOperation returns a new UpdateDatabaseDdlOperation from a given name. +// The name must be that of a previously created UpdateDatabaseDdlOperation, possibly from a different process. +func (c *DatabaseAdminClient) UpdateDatabaseDdlOperation(name string) *UpdateDatabaseDdlOperation { + return &UpdateDatabaseDdlOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning any error encountered. +// +// See documentation of Poll for error-handling information. +func (op *UpdateDatabaseDdlOperation) Wait(ctx context.Context, opts ...gax.CallOption) error { + return op.lro.WaitWithInterval(ctx, nil, 45000*time.Millisecond, opts...) +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, op.Done will return true. +func (op *UpdateDatabaseDdlOperation) Poll(ctx context.Context, opts ...gax.CallOption) error { + return op.lro.Poll(ctx, nil, opts...) +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *UpdateDatabaseDdlOperation) Metadata() (*databasepb.UpdateDatabaseDdlMetadata, error) { + var meta databasepb.UpdateDatabaseDdlMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *UpdateDatabaseDdlOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *UpdateDatabaseDdlOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go new file mode 100644 index 0000000..a13ffb2 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go @@ -0,0 +1,207 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package database_test + +import ( + "cloud.google.com/go/spanner/admin/database/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + iampb "google.golang.org/genproto/googleapis/iam/v1" + databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +func ExampleNewDatabaseAdminClient() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleDatabaseAdminClient_ListDatabases() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.ListDatabasesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDatabases(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleDatabaseAdminClient_CreateDatabase() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.CreateDatabaseRequest{ + // TODO: Fill request struct fields. + } + op, err := c.CreateDatabase(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_GetDatabase() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.GetDatabaseRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDatabase(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_UpdateDatabaseDdl() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.UpdateDatabaseDdlRequest{ + // TODO: Fill request struct fields. + } + op, err := c.UpdateDatabaseDdl(ctx, req) + if err != nil { + // TODO: Handle error. + } + + err = op.Wait(ctx) + // TODO: Handle error. +} + +func ExampleDatabaseAdminClient_DropDatabase() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.DropDatabaseRequest{ + // TODO: Fill request struct fields. + } + err = c.DropDatabase(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleDatabaseAdminClient_GetDatabaseDdl() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.GetDatabaseDdlRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDatabaseDdl(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_SetIamPolicy() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.SetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_GetIamPolicy() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.GetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_TestIamPermissions() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.TestIamPermissionsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.TestIamPermissions(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go new file mode 100644 index 0000000..3e8bc2d --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go @@ -0,0 +1,46 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package database is an auto-generated package for the +// Cloud Spanner Database Admin API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +package database // import "cloud.google.com/go/spanner/admin/database/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + } +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go new file mode 100644 index 0000000..36b5de6 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go @@ -0,0 +1,798 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package database + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDatabaseAdminServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + databasepb.DatabaseAdminServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDatabaseAdminServer) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest) (*databasepb.ListDatabasesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*databasepb.ListDatabasesResponse), nil +} + +func (s *mockDatabaseAdminServer) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockDatabaseAdminServer) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest) (*databasepb.Database, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*databasepb.Database), nil +} + +func (s *mockDatabaseAdminServer) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockDatabaseAdminServer) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockDatabaseAdminServer) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest) (*databasepb.GetDatabaseDdlResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*databasepb.GetDatabaseDdlResponse), nil +} + +func (s *mockDatabaseAdminServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockDatabaseAdminServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockDatabaseAdminServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDatabaseAdmin mockDatabaseAdminServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + databasepb.RegisterDatabaseAdminServer(serv, &mockDatabaseAdmin) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDatabaseAdminListDatabases(t *testing.T) { + var nextPageToken string = "" + var databasesElement *databasepb.Database = &databasepb.Database{} + var databases = []*databasepb.Database{databasesElement} + var expectedResponse = &databasepb.ListDatabasesResponse{ + NextPageToken: nextPageToken, + Databases: databases, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &databasepb.ListDatabasesRequest{ + Parent: formattedParent, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDatabases(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Databases[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminListDatabasesError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &databasepb.ListDatabasesRequest{ + Parent: formattedParent, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDatabases(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminCreateDatabase(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &databasepb.Database{ + Name: name, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") + var createStatement string = "createStatement552974828" + var request = &databasepb.CreateDatabaseRequest{ + Parent: formattedParent, + CreateStatement: createStatement, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateDatabase(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminCreateDatabaseError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") + var createStatement string = "createStatement552974828" + var request = &databasepb.CreateDatabaseRequest{ + Parent: formattedParent, + CreateStatement: createStatement, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateDatabase(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminGetDatabase(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &databasepb.Database{ + Name: name2, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedName string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseRequest{ + Name: formattedName, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabase(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminGetDatabaseError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedName string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseRequest{ + Name: formattedName, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabase(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminUpdateDatabaseDdl(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var statements []string = nil + var request = &databasepb.UpdateDatabaseDdlRequest{ + Database: formattedDatabase, + Statements: statements, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateDatabaseDdl(context.Background(), request) + if err != nil { + t.Fatal(err) + } + err = respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDatabaseAdminUpdateDatabaseDdlError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var statements []string = nil + var request = &databasepb.UpdateDatabaseDdlRequest{ + Database: formattedDatabase, + Statements: statements, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateDatabaseDdl(context.Background(), request) + if err != nil { + t.Fatal(err) + } + err = respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDatabaseAdminDropDatabase(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.DropDatabaseRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DropDatabase(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDatabaseAdminDropDatabaseError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.DropDatabaseRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DropDatabase(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDatabaseAdminGetDatabaseDdl(t *testing.T) { + var expectedResponse *databasepb.GetDatabaseDdlResponse = &databasepb.GetDatabaseDdlResponse{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseDdlRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabaseDdl(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminGetDatabaseDdlError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseDdlRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabaseDdl(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminSetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminSetIamPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminGetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminGetIamPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminTestIamPermissions(t *testing.T) { + var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminTestIamPermissionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go new file mode 100644 index 0000000..6cf4f9c --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go @@ -0,0 +1,46 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package instance is an auto-generated package for the +// Cloud Spanner Instance Admin API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +package instance // import "cloud.google.com/go/spanner/admin/instance/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + } +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go new file mode 100644 index 0000000..e25ab56 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go @@ -0,0 +1,728 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package instance + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// InstanceAdminCallOptions contains the retry settings for each method of InstanceAdminClient. +type InstanceAdminCallOptions struct { + ListInstanceConfigs []gax.CallOption + GetInstanceConfig []gax.CallOption + ListInstances []gax.CallOption + GetInstance []gax.CallOption + CreateInstance []gax.CallOption + UpdateInstance []gax.CallOption + DeleteInstance []gax.CallOption + SetIamPolicy []gax.CallOption + GetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption +} + +func defaultInstanceAdminClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("spanner.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultInstanceAdminCallOptions() *InstanceAdminCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &InstanceAdminCallOptions{ + ListInstanceConfigs: retry[[2]string{"default", "idempotent"}], + GetInstanceConfig: retry[[2]string{"default", "idempotent"}], + ListInstances: retry[[2]string{"default", "idempotent"}], + GetInstance: retry[[2]string{"default", "idempotent"}], + CreateInstance: retry[[2]string{"default", "non_idempotent"}], + UpdateInstance: retry[[2]string{"default", "non_idempotent"}], + DeleteInstance: retry[[2]string{"default", "idempotent"}], + SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + GetIamPolicy: retry[[2]string{"default", "idempotent"}], + TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], + } +} + +// InstanceAdminClient is a client for interacting with Cloud Spanner Instance Admin API. +type InstanceAdminClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + instanceAdminClient instancepb.InstanceAdminClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *InstanceAdminCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewInstanceAdminClient creates a new instance admin client. +// +// Cloud Spanner Instance Admin API +// +// The Cloud Spanner Instance Admin API can be used to create, delete, +// modify and list instances. Instances are dedicated Cloud Spanner serving +// and storage resources to be used by Cloud Spanner databases. +// +// Each instance has a "configuration", which dictates where the +// serving resources for the Cloud Spanner instance are located (e.g., +// US-central, Europe). Configurations are created by Google based on +// resource availability. +// +// Cloud Spanner billing is based on the instances that exist and their +// sizes. After an instance exists, there are no additional +// per-database or per-operation charges for use of the instance +// (though there may be additional network bandwidth charges). +// Instances offer isolation: problems with databases in one instance +// will not affect other instances. However, within an instance +// databases can affect each other. For example, if one database in an +// instance receives a lot of requests and consumes most of the +// instance resources, fewer resources are available for other +// databases in that instance, and their performance may suffer. +func NewInstanceAdminClient(ctx context.Context, opts ...option.ClientOption) (*InstanceAdminClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultInstanceAdminClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &InstanceAdminClient{ + conn: conn, + CallOptions: defaultInstanceAdminCallOptions(), + + instanceAdminClient: instancepb.NewInstanceAdminClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *InstanceAdminClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *InstanceAdminClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *InstanceAdminClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// InstanceAdminProjectPath returns the path for the project resource. +func InstanceAdminProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// InstanceAdminInstanceConfigPath returns the path for the instance config resource. +func InstanceAdminInstanceConfigPath(project, instanceConfig string) string { + return "" + + "projects/" + + project + + "/instanceConfigs/" + + instanceConfig + + "" +} + +// InstanceAdminInstancePath returns the path for the instance resource. +func InstanceAdminInstancePath(project, instance string) string { + return "" + + "projects/" + + project + + "/instances/" + + instance + + "" +} + +// ListInstanceConfigs lists the supported instance configurations for a given project. +func (c *InstanceAdminClient) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest, opts ...gax.CallOption) *InstanceConfigIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInstanceConfigs[0:len(c.CallOptions.ListInstanceConfigs):len(c.CallOptions.ListInstanceConfigs)], opts...) + it := &InstanceConfigIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstanceConfig, string, error) { + var resp *instancepb.ListInstanceConfigsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.ListInstanceConfigs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.InstanceConfigs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetInstanceConfig gets information about a particular instance configuration. +func (c *InstanceAdminClient) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetInstanceConfig[0:len(c.CallOptions.GetInstanceConfig):len(c.CallOptions.GetInstanceConfig)], opts...) + var resp *instancepb.InstanceConfig + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.GetInstanceConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInstances lists all instances in the given project. +func (c *InstanceAdminClient) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest, opts ...gax.CallOption) *InstanceIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListInstances[0:len(c.CallOptions.ListInstances):len(c.CallOptions.ListInstances)], opts...) + it := &InstanceIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.Instance, string, error) { + var resp *instancepb.ListInstancesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.ListInstances(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Instances, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetInstance gets information about a particular instance. +func (c *InstanceAdminClient) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest, opts ...gax.CallOption) (*instancepb.Instance, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetInstance[0:len(c.CallOptions.GetInstance):len(c.CallOptions.GetInstance)], opts...) + var resp *instancepb.Instance + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.GetInstance(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateInstance creates an instance and begins preparing it to begin serving. The +// returned [long-running operation][google.longrunning.Operation] +// can be used to track the progress of preparing the new +// instance. The instance name is assigned by the caller. If the +// named instance already exists, CreateInstance returns +// ALREADY_EXISTS. +// +// Immediately upon completion of this request: +// +// The instance is readable via the API, with all requested attributes +// but no allocated resources. Its state is CREATING. +// +// Until completion of the returned operation: +// +// Cancelling the operation renders the instance immediately unreadable +// via the API. +// +// The instance can be deleted. +// +// All other attempts to modify the instance are rejected. +// +// Upon completion of the returned operation: +// +// Billing for all successfully-allocated resources begins (some types +// may have lower than the requested levels). +// +// Databases can be created in the instance. +// +// The instance's allocated resource levels are readable via the API. +// +// The instance's state becomes READY. +// +// The returned [long-running operation][google.longrunning.Operation] will +// have a name of the format /operations/ and +// can be used to track creation of the instance. The +// [metadata][google.longrunning.Operation.metadata] field type is +// [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. +// The [response][google.longrunning.Operation.response] field type is +// [Instance][google.spanner.admin.instance.v1.Instance], if successful. +func (c *InstanceAdminClient) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest, opts ...gax.CallOption) (*CreateInstanceOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateInstance[0:len(c.CallOptions.CreateInstance):len(c.CallOptions.CreateInstance)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.CreateInstance(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &CreateInstanceOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// UpdateInstance updates an instance, and begins allocating or releasing resources +// as requested. The returned [long-running +// operation][google.longrunning.Operation] can be used to track the +// progress of updating the instance. If the named instance does not +// exist, returns NOT_FOUND. +// +// Immediately upon completion of this request: +// +// For resource types for which a decrease in the instance's allocation +// has been requested, billing is based on the newly-requested level. +// +// Until completion of the returned operation: +// +// Cancelling the operation sets its metadata's +// [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins +// restoring resources to their pre-request values. The operation +// is guaranteed to succeed at undoing all resource changes, +// after which point it terminates with a CANCELLED status. +// +// All other attempts to modify the instance are rejected. +// +// Reading the instance via the API continues to give the pre-request +// resource levels. +// +// Upon completion of the returned operation: +// +// Billing begins for all successfully-allocated resources (some types +// may have lower than the requested levels). +// +// All newly-reserved resources are available for serving the instance's +// tables. +// +// The instance's new resource levels are readable via the API. +// +// The returned [long-running operation][google.longrunning.Operation] will +// have a name of the format /operations/ and +// can be used to track the instance modification. The +// [metadata][google.longrunning.Operation.metadata] field type is +// [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. +// The [response][google.longrunning.Operation.response] field type is +// [Instance][google.spanner.admin.instance.v1.Instance], if successful. +// +// Authorization requires spanner.instances.update permission on +// resource [name][google.spanner.admin.instance.v1.Instance.name]. +func (c *InstanceAdminClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest, opts ...gax.CallOption) (*UpdateInstanceOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.UpdateInstance[0:len(c.CallOptions.UpdateInstance):len(c.CallOptions.UpdateInstance)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.UpdateInstance(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &UpdateInstanceOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// DeleteInstance deletes an instance. +// +// Immediately upon completion of the request: +// +// Billing ceases for all of the instance's reserved resources. +// +// Soon afterward: +// +// The instance and all of its databases immediately and +// irrevocably disappear from the API. All data in the databases +// is permanently deleted. +func (c *InstanceAdminClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteInstance[0:len(c.CallOptions.DeleteInstance):len(c.CallOptions.DeleteInstance)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.instanceAdminClient.DeleteInstance(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// SetIamPolicy sets the access control policy on an instance resource. Replaces any +// existing policy. +// +// Authorization requires spanner.instances.setIamPolicy on +// [resource][google.iam.v1.SetIamPolicyRequest.resource]. +func (c *InstanceAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.SetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetIamPolicy gets the access control policy for an instance resource. Returns an empty +// policy if an instance exists but does not have a policy set. +// +// Authorization requires spanner.instances.getIamPolicy on +// [resource][google.iam.v1.GetIamPolicyRequest.resource]. +func (c *InstanceAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.GetIamPolicy(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// TestIamPermissions returns permissions that the caller has on the specified instance resource. +// +// Attempting this RPC on a non-existent Cloud Spanner instance resource will +// result in a NOT_FOUND error if the user has spanner.instances.list +// permission on the containing Google Cloud Project. Otherwise returns an +// empty set of permissions. +func (c *InstanceAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.instanceAdminClient.TestIamPermissions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// InstanceConfigIterator manages a stream of *instancepb.InstanceConfig. +type InstanceConfigIterator struct { + items []*instancepb.InstanceConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*instancepb.InstanceConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *InstanceConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *InstanceConfigIterator) Next() (*instancepb.InstanceConfig, error) { + var item *instancepb.InstanceConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *InstanceConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *InstanceConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// InstanceIterator manages a stream of *instancepb.Instance. +type InstanceIterator struct { + items []*instancepb.Instance + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*instancepb.Instance, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *InstanceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *InstanceIterator) Next() (*instancepb.Instance, error) { + var item *instancepb.Instance + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *InstanceIterator) bufLen() int { + return len(it.items) +} + +func (it *InstanceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// CreateInstanceOperation manages a long-running operation from CreateInstance. +type CreateInstanceOperation struct { + lro *longrunning.Operation +} + +// CreateInstanceOperation returns a new CreateInstanceOperation from a given name. +// The name must be that of a previously created CreateInstanceOperation, possibly from a different process. +func (c *InstanceAdminClient) CreateInstanceOperation(name string) *CreateInstanceOperation { + return &CreateInstanceOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *CreateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { + var resp instancepb.Instance + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *CreateInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { + var resp instancepb.Instance + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *CreateInstanceOperation) Metadata() (*instancepb.CreateInstanceMetadata, error) { + var meta instancepb.CreateInstanceMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *CreateInstanceOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *CreateInstanceOperation) Name() string { + return op.lro.Name() +} + +// UpdateInstanceOperation manages a long-running operation from UpdateInstance. +type UpdateInstanceOperation struct { + lro *longrunning.Operation +} + +// UpdateInstanceOperation returns a new UpdateInstanceOperation from a given name. +// The name must be that of a previously created UpdateInstanceOperation, possibly from a different process. +func (c *InstanceAdminClient) UpdateInstanceOperation(name string) *UpdateInstanceOperation { + return &UpdateInstanceOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *UpdateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { + var resp instancepb.Instance + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *UpdateInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { + var resp instancepb.Instance + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *UpdateInstanceOperation) Metadata() (*instancepb.UpdateInstanceMetadata, error) { + var meta instancepb.UpdateInstanceMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *UpdateInstanceOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *UpdateInstanceOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go new file mode 100644 index 0000000..a0f4e02 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go @@ -0,0 +1,235 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package instance_test + +import ( + "cloud.google.com/go/spanner/admin/instance/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + iampb "google.golang.org/genproto/googleapis/iam/v1" + instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" +) + +func ExampleNewInstanceAdminClient() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleInstanceAdminClient_ListInstanceConfigs() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.ListInstanceConfigsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListInstanceConfigs(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleInstanceAdminClient_GetInstanceConfig() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.GetInstanceConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetInstanceConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_ListInstances() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.ListInstancesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListInstances(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleInstanceAdminClient_GetInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.GetInstanceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_CreateInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.CreateInstanceRequest{ + // TODO: Fill request struct fields. + } + op, err := c.CreateInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_UpdateInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.UpdateInstanceRequest{ + // TODO: Fill request struct fields. + } + op, err := c.UpdateInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_DeleteInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.DeleteInstanceRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleInstanceAdminClient_SetIamPolicy() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.SetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_GetIamPolicy() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.GetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_TestIamPermissions() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.TestIamPermissionsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.TestIamPermissions(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go new file mode 100644 index 0000000..b3b513d --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go @@ -0,0 +1,917 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package instance + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" + field_maskpb "google.golang.org/genproto/protobuf/field_mask" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockInstanceAdminServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + instancepb.InstanceAdminServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockInstanceAdminServer) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest) (*instancepb.ListInstanceConfigsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.ListInstanceConfigsResponse), nil +} + +func (s *mockInstanceAdminServer) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest) (*instancepb.InstanceConfig, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.InstanceConfig), nil +} + +func (s *mockInstanceAdminServer) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest) (*instancepb.ListInstancesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.ListInstancesResponse), nil +} + +func (s *mockInstanceAdminServer) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest) (*instancepb.Instance, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.Instance), nil +} + +func (s *mockInstanceAdminServer) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockInstanceAdminServer) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockInstanceAdminServer) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockInstanceAdminServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockInstanceAdminServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockInstanceAdminServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockInstanceAdmin mockInstanceAdminServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + instancepb.RegisterInstanceAdminServer(serv, &mockInstanceAdmin) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestInstanceAdminListInstanceConfigs(t *testing.T) { + var nextPageToken string = "" + var instanceConfigsElement *instancepb.InstanceConfig = &instancepb.InstanceConfig{} + var instanceConfigs = []*instancepb.InstanceConfig{instanceConfigsElement} + var expectedResponse = &instancepb.ListInstanceConfigsResponse{ + NextPageToken: nextPageToken, + InstanceConfigs: instanceConfigs, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var request = &instancepb.ListInstanceConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstanceConfigs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.InstanceConfigs[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminListInstanceConfigsError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var request = &instancepb.ListInstanceConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstanceConfigs(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminGetInstanceConfig(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var expectedResponse = &instancepb.InstanceConfig{ + Name: name2, + DisplayName: displayName, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedName string = InstanceAdminInstanceConfigPath("[PROJECT]", "[INSTANCE_CONFIG]") + var request = &instancepb.GetInstanceConfigRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstanceConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminGetInstanceConfigError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedName string = InstanceAdminInstanceConfigPath("[PROJECT]", "[INSTANCE_CONFIG]") + var request = &instancepb.GetInstanceConfigRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstanceConfig(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminListInstances(t *testing.T) { + var nextPageToken string = "" + var instancesElement *instancepb.Instance = &instancepb.Instance{} + var instances = []*instancepb.Instance{instancesElement} + var expectedResponse = &instancepb.ListInstancesResponse{ + NextPageToken: nextPageToken, + Instances: instances, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var request = &instancepb.ListInstancesRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstances(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Instances[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminListInstancesError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var request = &instancepb.ListInstancesRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstances(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminGetInstance(t *testing.T) { + var name2 string = "name2-1052831874" + var config string = "config-1354792126" + var displayName string = "displayName1615086568" + var nodeCount int32 = 1539922066 + var expectedResponse = &instancepb.Instance{ + Name: name2, + Config: config, + DisplayName: displayName, + NodeCount: nodeCount, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &instancepb.GetInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstance(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminGetInstanceError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &instancepb.GetInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstance(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminCreateInstance(t *testing.T) { + var name string = "name3373707" + var config string = "config-1354792126" + var displayName string = "displayName1615086568" + var nodeCount int32 = 1539922066 + var expectedResponse = &instancepb.Instance{ + Name: name, + Config: config, + DisplayName: displayName, + NodeCount: nodeCount, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var instanceId string = "instanceId-2101995259" + var instance *instancepb.Instance = &instancepb.Instance{} + var request = &instancepb.CreateInstanceRequest{ + Parent: formattedParent, + InstanceId: instanceId, + Instance: instance, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminCreateInstanceError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = nil + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var instanceId string = "instanceId-2101995259" + var instance *instancepb.Instance = &instancepb.Instance{} + var request = &instancepb.CreateInstanceRequest{ + Parent: formattedParent, + InstanceId: instanceId, + Instance: instance, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminUpdateInstance(t *testing.T) { + var name string = "name3373707" + var config string = "config-1354792126" + var displayName string = "displayName1615086568" + var nodeCount int32 = 1539922066 + var expectedResponse = &instancepb.Instance{ + Name: name, + Config: config, + DisplayName: displayName, + NodeCount: nodeCount, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var instance *instancepb.Instance = &instancepb.Instance{} + var fieldMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &instancepb.UpdateInstanceRequest{ + Instance: instance, + FieldMask: fieldMask, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminUpdateInstanceError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = nil + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var instance *instancepb.Instance = &instancepb.Instance{} + var fieldMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &instancepb.UpdateInstanceRequest{ + Instance: instance, + FieldMask: fieldMask, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminDeleteInstance(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &instancepb.DeleteInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInstance(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestInstanceAdminDeleteInstanceError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &instancepb.DeleteInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInstance(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestInstanceAdminSetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminSetIamPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminGetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminGetIamPolicyError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminTestIamPermissions(t *testing.T) { + var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminTestIamPermissionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockInstanceAdmin.err = gstatus.Error(errCode, "test error") + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/apiv1/doc.go new file mode 100644 index 0000000..aff7ca9 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/apiv1/doc.go @@ -0,0 +1,50 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package spanner is an auto-generated package for the +// Cloud Spanner API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Cloud Spanner is a managed, mission-critical, globally consistent and +// scalable relational database service. +// +// Use the client at cloud.google.com/go/spanner in preference to this. +package spanner // import "cloud.google.com/go/spanner/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.data", + } +} diff --git a/vendor/cloud.google.com/go/spanner/apiv1/mock_test.go b/vendor/cloud.google.com/go/spanner/apiv1/mock_test.go new file mode 100644 index 0000000..02e0e03 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/apiv1/mock_test.go @@ -0,0 +1,937 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package spanner + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + spannerpb "google.golang.org/genproto/googleapis/spanner/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockSpannerServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + spannerpb.SpannerServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockSpannerServer) CreateSession(ctx context.Context, req *spannerpb.CreateSessionRequest) (*spannerpb.Session, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.Session), nil +} + +func (s *mockSpannerServer) GetSession(ctx context.Context, req *spannerpb.GetSessionRequest) (*spannerpb.Session, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.Session), nil +} + +func (s *mockSpannerServer) ListSessions(ctx context.Context, req *spannerpb.ListSessionsRequest) (*spannerpb.ListSessionsResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.ListSessionsResponse), nil +} + +func (s *mockSpannerServer) DeleteSession(ctx context.Context, req *spannerpb.DeleteSessionRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockSpannerServer) ExecuteSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest) (*spannerpb.ResultSet, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.ResultSet), nil +} + +func (s *mockSpannerServer) ExecuteStreamingSql(req *spannerpb.ExecuteSqlRequest, stream spannerpb.Spanner_ExecuteStreamingSqlServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*spannerpb.PartialResultSet)); err != nil { + return err + } + } + return nil +} + +func (s *mockSpannerServer) Read(ctx context.Context, req *spannerpb.ReadRequest) (*spannerpb.ResultSet, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.ResultSet), nil +} + +func (s *mockSpannerServer) StreamingRead(req *spannerpb.ReadRequest, stream spannerpb.Spanner_StreamingReadServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*spannerpb.PartialResultSet)); err != nil { + return err + } + } + return nil +} + +func (s *mockSpannerServer) BeginTransaction(ctx context.Context, req *spannerpb.BeginTransactionRequest) (*spannerpb.Transaction, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.Transaction), nil +} + +func (s *mockSpannerServer) Commit(ctx context.Context, req *spannerpb.CommitRequest) (*spannerpb.CommitResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*spannerpb.CommitResponse), nil +} + +func (s *mockSpannerServer) Rollback(ctx context.Context, req *spannerpb.RollbackRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockSpanner mockSpannerServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + spannerpb.RegisterSpannerServer(serv, &mockSpanner) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestSpannerCreateSession(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &spannerpb.Session{ + Name: name, + } + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedDatabase string = DatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &spannerpb.CreateSessionRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSession(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerCreateSessionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = DatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &spannerpb.CreateSessionRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSession(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerGetSession(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &spannerpb.Session{ + Name: name2, + } + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedName string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var request = &spannerpb.GetSessionRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSession(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerGetSessionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedName string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var request = &spannerpb.GetSessionRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSession(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerListSessions(t *testing.T) { + var nextPageToken string = "" + var sessionsElement *spannerpb.Session = &spannerpb.Session{} + var sessions = []*spannerpb.Session{sessionsElement} + var expectedResponse = &spannerpb.ListSessionsResponse{ + NextPageToken: nextPageToken, + Sessions: sessions, + } + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedDatabase string = DatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &spannerpb.ListSessionsRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSessions(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Sessions[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerListSessionsError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedDatabase string = DatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &spannerpb.ListSessionsRequest{ + Database: formattedDatabase, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSessions(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerDeleteSession(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedName string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var request = &spannerpb.DeleteSessionRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSession(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSpannerDeleteSessionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedName string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var request = &spannerpb.DeleteSessionRequest{ + Name: formattedName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSession(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSpannerExecuteSql(t *testing.T) { + var expectedResponse *spannerpb.ResultSet = &spannerpb.ResultSet{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var sql string = "sql114126" + var request = &spannerpb.ExecuteSqlRequest{ + Session: formattedSession, + Sql: sql, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ExecuteSql(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerExecuteSqlError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var sql string = "sql114126" + var request = &spannerpb.ExecuteSqlRequest{ + Session: formattedSession, + Sql: sql, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ExecuteSql(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerExecuteStreamingSql(t *testing.T) { + var chunkedValue bool = true + var resumeToken []byte = []byte("103") + var expectedResponse = &spannerpb.PartialResultSet{ + ChunkedValue: chunkedValue, + ResumeToken: resumeToken, + } + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var sql string = "sql114126" + var request = &spannerpb.ExecuteSqlRequest{ + Session: formattedSession, + Sql: sql, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.ExecuteStreamingSql(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerExecuteStreamingSqlError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var sql string = "sql114126" + var request = &spannerpb.ExecuteSqlRequest{ + Session: formattedSession, + Sql: sql, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.ExecuteStreamingSql(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerRead(t *testing.T) { + var expectedResponse *spannerpb.ResultSet = &spannerpb.ResultSet{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var table string = "table110115790" + var columns []string = nil + var keySet *spannerpb.KeySet = &spannerpb.KeySet{} + var request = &spannerpb.ReadRequest{ + Session: formattedSession, + Table: table, + Columns: columns, + KeySet: keySet, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Read(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerReadError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var table string = "table110115790" + var columns []string = nil + var keySet *spannerpb.KeySet = &spannerpb.KeySet{} + var request = &spannerpb.ReadRequest{ + Session: formattedSession, + Table: table, + Columns: columns, + KeySet: keySet, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Read(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerStreamingRead(t *testing.T) { + var chunkedValue bool = true + var resumeToken []byte = []byte("103") + var expectedResponse = &spannerpb.PartialResultSet{ + ChunkedValue: chunkedValue, + ResumeToken: resumeToken, + } + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var table string = "table110115790" + var columns []string = nil + var keySet *spannerpb.KeySet = &spannerpb.KeySet{} + var request = &spannerpb.ReadRequest{ + Session: formattedSession, + Table: table, + Columns: columns, + KeySet: keySet, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRead(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerStreamingReadError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var table string = "table110115790" + var columns []string = nil + var keySet *spannerpb.KeySet = &spannerpb.KeySet{} + var request = &spannerpb.ReadRequest{ + Session: formattedSession, + Table: table, + Columns: columns, + KeySet: keySet, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRead(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerBeginTransaction(t *testing.T) { + var id []byte = []byte("27") + var expectedResponse = &spannerpb.Transaction{ + Id: id, + } + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var options *spannerpb.TransactionOptions = &spannerpb.TransactionOptions{} + var request = &spannerpb.BeginTransactionRequest{ + Session: formattedSession, + Options: options, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BeginTransaction(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerBeginTransactionError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var options *spannerpb.TransactionOptions = &spannerpb.TransactionOptions{} + var request = &spannerpb.BeginTransactionRequest{ + Session: formattedSession, + Options: options, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BeginTransaction(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerCommit(t *testing.T) { + var expectedResponse *spannerpb.CommitResponse = &spannerpb.CommitResponse{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var mutations []*spannerpb.Mutation = nil + var request = &spannerpb.CommitRequest{ + Session: formattedSession, + Mutations: mutations, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Commit(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpannerCommitError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var mutations []*spannerpb.Mutation = nil + var request = &spannerpb.CommitRequest{ + Session: formattedSession, + Mutations: mutations, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Commit(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpannerRollback(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockSpanner.err = nil + mockSpanner.reqs = nil + + mockSpanner.resps = append(mockSpanner.resps[:0], expectedResponse) + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var transactionId []byte = []byte("28") + var request = &spannerpb.RollbackRequest{ + Session: formattedSession, + TransactionId: transactionId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Rollback(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpanner.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSpannerRollbackError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpanner.err = gstatus.Error(errCode, "test error") + + var formattedSession string = SessionPath("[PROJECT]", "[INSTANCE]", "[DATABASE]", "[SESSION]") + var transactionId []byte = []byte("28") + var request = &spannerpb.RollbackRequest{ + Session: formattedSession, + TransactionId: transactionId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Rollback(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go new file mode 100644 index 0000000..b7f915a --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go @@ -0,0 +1,474 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package spanner + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + spannerpb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + CreateSession []gax.CallOption + GetSession []gax.CallOption + ListSessions []gax.CallOption + DeleteSession []gax.CallOption + ExecuteSql []gax.CallOption + ExecuteStreamingSql []gax.CallOption + Read []gax.CallOption + StreamingRead []gax.CallOption + BeginTransaction []gax.CallOption + Commit []gax.CallOption + Rollback []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("spanner.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"long_running", "long_running"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + CreateSession: retry[[2]string{"default", "idempotent"}], + GetSession: retry[[2]string{"default", "idempotent"}], + ListSessions: retry[[2]string{"default", "idempotent"}], + DeleteSession: retry[[2]string{"default", "idempotent"}], + ExecuteSql: retry[[2]string{"default", "idempotent"}], + ExecuteStreamingSql: retry[[2]string{"default", "non_idempotent"}], + Read: retry[[2]string{"default", "idempotent"}], + StreamingRead: retry[[2]string{"default", "non_idempotent"}], + BeginTransaction: retry[[2]string{"default", "idempotent"}], + Commit: retry[[2]string{"long_running", "long_running"}], + Rollback: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Cloud Spanner API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client spannerpb.SpannerClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new spanner client. +// +// Cloud Spanner API +// +// The Cloud Spanner API can be used to manage sessions and execute +// transactions on data stored in Cloud Spanner databases. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: spannerpb.NewSpannerClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// DatabasePath returns the path for the database resource. +func DatabasePath(project, instance, database string) string { + return "" + + "projects/" + + project + + "/instances/" + + instance + + "/databases/" + + database + + "" +} + +// SessionPath returns the path for the session resource. +func SessionPath(project, instance, database, session string) string { + return "" + + "projects/" + + project + + "/instances/" + + instance + + "/databases/" + + database + + "/sessions/" + + session + + "" +} + +// CreateSession creates a new session. A session can be used to perform +// transactions that read and/or modify data in a Cloud Spanner database. +// Sessions are meant to be reused for many consecutive +// transactions. +// +// Sessions can only execute one transaction at a time. To execute +// multiple concurrent read-write/write-only transactions, create +// multiple sessions. Note that standalone reads and queries use a +// transaction internally, and count toward the one transaction +// limit. +// +// Cloud Spanner limits the number of sessions that can exist at any given +// time; thus, it is a good idea to delete idle and/or unneeded sessions. +// Aside from explicit deletes, Cloud Spanner can delete sessions for which no +// operations are sent for more than an hour. If a session is deleted, +// requests to it return NOT_FOUND. +// +// Idle sessions can be kept alive by sending a trivial SQL query +// periodically, e.g., "SELECT 1". +func (c *Client) CreateSession(ctx context.Context, req *spannerpb.CreateSessionRequest, opts ...gax.CallOption) (*spannerpb.Session, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateSession[0:len(c.CallOptions.CreateSession):len(c.CallOptions.CreateSession)], opts...) + var resp *spannerpb.Session + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateSession(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetSession gets a session. Returns NOT_FOUND if the session does not exist. +// This is mainly useful for determining whether a session is still +// alive. +func (c *Client) GetSession(ctx context.Context, req *spannerpb.GetSessionRequest, opts ...gax.CallOption) (*spannerpb.Session, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetSession[0:len(c.CallOptions.GetSession):len(c.CallOptions.GetSession)], opts...) + var resp *spannerpb.Session + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetSession(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListSessions lists all sessions in a given database. +func (c *Client) ListSessions(ctx context.Context, req *spannerpb.ListSessionsRequest, opts ...gax.CallOption) *SessionIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListSessions[0:len(c.CallOptions.ListSessions):len(c.CallOptions.ListSessions)], opts...) + it := &SessionIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*spannerpb.Session, string, error) { + var resp *spannerpb.ListSessionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListSessions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Sessions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteSession ends a session, releasing server resources associated with it. +func (c *Client) DeleteSession(ctx context.Context, req *spannerpb.DeleteSessionRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.DeleteSession[0:len(c.CallOptions.DeleteSession):len(c.CallOptions.DeleteSession)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.DeleteSession(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ExecuteSql executes an SQL query, returning all rows in a single reply. This +// method cannot be used to return a result set larger than 10 MiB; +// if the query yields more data than that, the query fails with +// a FAILED_PRECONDITION error. +// +// Queries inside read-write transactions might return ABORTED. If +// this occurs, the application should restart the transaction from +// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. +// +// Larger result sets can be fetched in streaming fashion by calling +// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. +func (c *Client) ExecuteSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest, opts ...gax.CallOption) (*spannerpb.ResultSet, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ExecuteSql[0:len(c.CallOptions.ExecuteSql):len(c.CallOptions.ExecuteSql)], opts...) + var resp *spannerpb.ResultSet + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ExecuteSql(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ExecuteStreamingSql like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result +// set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there +// is no limit on the size of the returned result set. However, no +// individual row in the result set can exceed 100 MiB, and no +// column value can exceed 10 MiB. +func (c *Client) ExecuteStreamingSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest, opts ...gax.CallOption) (spannerpb.Spanner_ExecuteStreamingSqlClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ExecuteStreamingSql[0:len(c.CallOptions.ExecuteStreamingSql):len(c.CallOptions.ExecuteStreamingSql)], opts...) + var resp spannerpb.Spanner_ExecuteStreamingSqlClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ExecuteStreamingSql(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Read reads rows from the database using key lookups and scans, as a +// simple key/value style alternative to +// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to +// return a result set larger than 10 MiB; if the read matches more +// data than that, the read fails with a FAILED_PRECONDITION +// error. +// +// Reads inside read-write transactions might return ABORTED. If +// this occurs, the application should restart the transaction from +// the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. +// +// Larger result sets can be yielded in streaming fashion by calling +// [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. +func (c *Client) Read(ctx context.Context, req *spannerpb.ReadRequest, opts ...gax.CallOption) (*spannerpb.ResultSet, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Read[0:len(c.CallOptions.Read):len(c.CallOptions.Read)], opts...) + var resp *spannerpb.ResultSet + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.Read(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// StreamingRead like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a +// stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the +// size of the returned result set. However, no individual row in +// the result set can exceed 100 MiB, and no column value can exceed +// 10 MiB. +func (c *Client) StreamingRead(ctx context.Context, req *spannerpb.ReadRequest, opts ...gax.CallOption) (spannerpb.Spanner_StreamingReadClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.StreamingRead[0:len(c.CallOptions.StreamingRead):len(c.CallOptions.StreamingRead)], opts...) + var resp spannerpb.Spanner_StreamingReadClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.StreamingRead(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// BeginTransaction begins a new transaction. This step can often be skipped: +// [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and +// [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a +// side-effect. +func (c *Client) BeginTransaction(ctx context.Context, req *spannerpb.BeginTransactionRequest, opts ...gax.CallOption) (*spannerpb.Transaction, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BeginTransaction[0:len(c.CallOptions.BeginTransaction):len(c.CallOptions.BeginTransaction)], opts...) + var resp *spannerpb.Transaction + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.BeginTransaction(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Commit commits a transaction. The request includes the mutations to be +// applied to rows in the database. +// +// Commit might return an ABORTED error. This can occur at any time; +// commonly, the cause is conflicts with concurrent +// transactions. However, it can also happen for a variety of other +// reasons. If Commit returns ABORTED, the caller should re-attempt +// the transaction from the beginning, re-using the same session. +func (c *Client) Commit(ctx context.Context, req *spannerpb.CommitRequest, opts ...gax.CallOption) (*spannerpb.CommitResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Commit[0:len(c.CallOptions.Commit):len(c.CallOptions.Commit)], opts...) + var resp *spannerpb.CommitResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.Commit(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Rollback rolls back a transaction, releasing any locks it holds. It is a good +// idea to call this for any transaction that includes one or more +// [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and +// ultimately decides not to commit. +// +// Rollback returns OK if it successfully aborts the transaction, the +// transaction was already aborted, or the transaction is not +// found. Rollback never returns ABORTED. +func (c *Client) Rollback(ctx context.Context, req *spannerpb.RollbackRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Rollback[0:len(c.CallOptions.Rollback):len(c.CallOptions.Rollback)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.Rollback(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// SessionIterator manages a stream of *spannerpb.Session. +type SessionIterator struct { + items []*spannerpb.Session + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*spannerpb.Session, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SessionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SessionIterator) Next() (*spannerpb.Session, error) { + var item *spannerpb.Session + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SessionIterator) bufLen() int { + return len(it.items) +} + +func (it *SessionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_example_test.go b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_example_test.go new file mode 100644 index 0000000..d1b6613 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_example_test.go @@ -0,0 +1,254 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package spanner_test + +import ( + "io" + + "cloud.google.com/go/spanner/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + spannerpb "google.golang.org/genproto/googleapis/spanner/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_CreateSession() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.CreateSessionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSession(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_GetSession() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.GetSessionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetSession(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListSessions() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.ListSessionsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListSessions(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_DeleteSession() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.DeleteSessionRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSession(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_ExecuteSql() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.ExecuteSqlRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ExecuteSql(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ExecuteStreamingSql() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.ExecuteSqlRequest{ + // TODO: Fill request struct fields. + } + stream, err := c.ExecuteStreamingSql(ctx, req) + if err != nil { + // TODO: Handle error. + } + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_Read() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.ReadRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Read(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_StreamingRead() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.ReadRequest{ + // TODO: Fill request struct fields. + } + stream, err := c.StreamingRead(ctx, req) + if err != nil { + // TODO: Handle error. + } + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_BeginTransaction() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.BeginTransactionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.BeginTransaction(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_Commit() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.CommitRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Commit(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_Rollback() { + ctx := context.Background() + c, err := spanner.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &spannerpb.RollbackRequest{ + // TODO: Fill request struct fields. + } + err = c.Rollback(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/spanner/backoff.go b/vendor/cloud.google.com/go/spanner/backoff.go new file mode 100644 index 0000000..d387238 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/backoff.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math/rand" + "time" +) + +const ( + // minBackoff is the minimum backoff used by default. + minBackoff = 1 * time.Second + // maxBackoff is the maximum backoff used by default. + maxBackoff = 32 * time.Second + // jitter is the jitter factor. + jitter = 0.4 + // rate is the rate of exponential increase in the backoff. + rate = 1.3 +) + +var defaultBackoff = exponentialBackoff{minBackoff, maxBackoff} + +type exponentialBackoff struct { + min, max time.Duration +} + +// delay calculates the delay that should happen at n-th +// exponential backoff in a series. +func (b exponentialBackoff) delay(retries int) time.Duration { + min, max := float64(b.min), float64(b.max) + delay := min + for delay < max && retries > 0 { + delay *= rate + retries-- + } + if delay > max { + delay = max + } + delay -= delay * jitter * rand.Float64() + if delay < min { + delay = min + } + return time.Duration(delay) +} diff --git a/vendor/cloud.google.com/go/spanner/backoff_test.go b/vendor/cloud.google.com/go/spanner/backoff_test.go new file mode 100644 index 0000000..7a0314e --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/backoff_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math" + "time" + + "testing" +) + +// Test if exponential backoff helper can produce correct series of +// retry delays. +func TestBackoff(t *testing.T) { + b := exponentialBackoff{minBackoff, maxBackoff} + tests := []struct { + retries int + min time.Duration + max time.Duration + }{ + { + retries: 0, + min: minBackoff, + max: minBackoff, + }, + { + retries: 1, + min: minBackoff, + max: time.Duration(rate * float64(minBackoff)), + }, + { + retries: 3, + min: time.Duration(math.Pow(rate, 3) * (1 - jitter) * float64(minBackoff)), + max: time.Duration(math.Pow(rate, 3) * float64(minBackoff)), + }, + { + retries: 1000, + min: time.Duration((1 - jitter) * float64(maxBackoff)), + max: maxBackoff, + }, + } + for _, test := range tests { + got := b.delay(test.retries) + if float64(got) < float64(test.min) || float64(got) > float64(test.max) { + t.Errorf("delay(%v) = %v, want in range [%v, %v]", test.retries, got, test.min, test.max) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/client.go b/vendor/cloud.google.com/go/spanner/client.go new file mode 100644 index 0000000..cbc2623 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/client.go @@ -0,0 +1,329 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "regexp" + "sync/atomic" + "time" + + "cloud.google.com/go/internal/version" + "golang.org/x/net/context" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +const ( + prodAddr = "spanner.googleapis.com:443" + + // resourcePrefixHeader is the name of the metadata header used to indicate + // the resource being operated on. + resourcePrefixHeader = "google-cloud-resource-prefix" + // xGoogHeaderKey is the name of the metadata header used to indicate client + // information. + xGoogHeaderKey = "x-goog-api-client" + + // numChannels is the default value for NumChannels of client + numChannels = 4 +) + +const ( + // Scope is the scope for Cloud Spanner Data API. + Scope = "https://www.googleapis.com/auth/spanner.data" + + // AdminScope is the scope for Cloud Spanner Admin APIs. + AdminScope = "https://www.googleapis.com/auth/spanner.admin" +) + +var ( + validDBPattern = regexp.MustCompile("^projects/[^/]+/instances/[^/]+/databases/[^/]+$") + xGoogHeaderVal = fmt.Sprintf("gl-go/%s gccl/%s grpc/%s", version.Go(), version.Repo, grpc.Version) +) + +func validDatabaseName(db string) error { + if matched := validDBPattern.MatchString(db); !matched { + return fmt.Errorf("database name %q should conform to pattern %q", + db, validDBPattern.String()) + } + return nil +} + +// Client is a client for reading and writing data to a Cloud Spanner database. A +// client is safe to use concurrently, except for its Close method. +type Client struct { + // rr must be accessed through atomic operations. + rr uint32 + conns []*grpc.ClientConn + clients []sppb.SpannerClient + database string + // Metadata to be sent with each request. + md metadata.MD + idleSessions *sessionPool +} + +// ClientConfig has configurations for the client. +type ClientConfig struct { + // NumChannels is the number of GRPC channels. + // If zero, numChannels is used. + NumChannels int + co []option.ClientOption + // SessionPoolConfig is the configuration for session pool. + SessionPoolConfig +} + +// errDial returns error for dialing to Cloud Spanner. +func errDial(ci int, err error) error { + e := toSpannerError(err).(*Error) + e.decorate(fmt.Sprintf("dialing fails for channel[%v]", ci)) + return e +} + +func contextWithOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context { + existing, ok := metadata.FromOutgoingContext(ctx) + if ok { + md = metadata.Join(existing, md) + } + return metadata.NewOutgoingContext(ctx, md) +} + +// NewClient creates a client to a database. A valid database name has the +// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. It uses a default +// configuration. +func NewClient(ctx context.Context, database string, opts ...option.ClientOption) (*Client, error) { + return NewClientWithConfig(ctx, database, ClientConfig{}, opts...) +} + +// NewClientWithConfig creates a client to a database. A valid database name has the +// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. +func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (*Client, error) { + // Validate database path. + if err := validDatabaseName(database); err != nil { + return nil, err + } + c := &Client{ + database: database, + md: metadata.Pairs( + resourcePrefixHeader, database, + xGoogHeaderKey, xGoogHeaderVal), + } + allOpts := []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(Scope), + option.WithGRPCDialOption( + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(100<<20), + grpc.MaxCallRecvMsgSize(100<<20), + ), + ), + } + allOpts = append(allOpts, openCensusOptions()...) + allOpts = append(allOpts, opts...) + // Prepare gRPC channels. + if config.NumChannels == 0 { + config.NumChannels = numChannels + } + // Default MaxOpened sessions + if config.MaxOpened == 0 { + config.MaxOpened = uint64(config.NumChannels * 100) + } + if config.MaxBurst == 0 { + config.MaxBurst = 10 + } + for i := 0; i < config.NumChannels; i++ { + conn, err := gtransport.Dial(ctx, allOpts...) + if err != nil { + return nil, errDial(i, err) + } + c.conns = append(c.conns, conn) + c.clients = append(c.clients, sppb.NewSpannerClient(conn)) + } + // Prepare session pool. + config.SessionPoolConfig.getRPCClient = func() (sppb.SpannerClient, error) { + // TODO: support more loadbalancing options. + return c.rrNext(), nil + } + sp, err := newSessionPool(database, config.SessionPoolConfig, c.md) + if err != nil { + c.Close() + return nil, err + } + c.idleSessions = sp + return c, nil +} + +// rrNext returns the next available Cloud Spanner RPC client in a round-robin manner. +func (c *Client) rrNext() sppb.SpannerClient { + return c.clients[atomic.AddUint32(&c.rr, 1)%uint32(len(c.clients))] +} + +// Close closes the client. +func (c *Client) Close() { + if c.idleSessions != nil { + c.idleSessions.close() + } + for _, conn := range c.conns { + conn.Close() + } +} + +// Single provides a read-only snapshot transaction optimized for the case +// where only a single read or query is needed. This is more efficient than +// using ReadOnlyTransaction() for a single read or query. +// +// Single will use a strong TimestampBound by default. Use +// ReadOnlyTransaction.WithTimestampBound to specify a different +// TimestampBound. A non-strong bound can be used to reduce latency, or +// "time-travel" to prior versions of the database, see the documentation of +// TimestampBound for details. +func (c *Client) Single() *ReadOnlyTransaction { + t := &ReadOnlyTransaction{singleUse: true, sp: c.idleSessions} + t.txReadOnly.txReadEnv = t + return t +} + +// ReadOnlyTransaction returns a ReadOnlyTransaction that can be used for +// multiple reads from the database. You must call Close() when the +// ReadOnlyTransaction is no longer needed to release resources on the server. +// +// ReadOnlyTransaction will use a strong TimestampBound by default. Use +// ReadOnlyTransaction.WithTimestampBound to specify a different +// TimestampBound. A non-strong bound can be used to reduce latency, or +// "time-travel" to prior versions of the database, see the documentation of +// TimestampBound for details. +func (c *Client) ReadOnlyTransaction() *ReadOnlyTransaction { + t := &ReadOnlyTransaction{ + singleUse: false, + sp: c.idleSessions, + txReadyOrClosed: make(chan struct{}), + } + t.txReadOnly.txReadEnv = t + return t +} + +type transactionInProgressKey struct{} + +func checkNestedTxn(ctx context.Context) error { + if ctx.Value(transactionInProgressKey{}) != nil { + return spannerErrorf(codes.FailedPrecondition, "Cloud Spanner does not support nested transactions") + } + return nil +} + +// ReadWriteTransaction executes a read-write transaction, with retries as +// necessary. +// +// The function f will be called one or more times. It must not maintain +// any state between calls. +// +// If the transaction cannot be committed or if f returns an IsAborted error, +// ReadWriteTransaction will call f again. It will continue to call f until the +// transaction can be committed or the Context times out or is cancelled. If f +// returns an error other than IsAborted, ReadWriteTransaction will abort the +// transaction and return the error. +// +// To limit the number of retries, set a deadline on the Context rather than +// using a fixed limit on the number of attempts. ReadWriteTransaction will +// retry as needed until that deadline is met. +func (c *Client) ReadWriteTransaction(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error) (time.Time, error) { + if err := checkNestedTxn(ctx); err != nil { + return time.Time{}, err + } + var ( + ts time.Time + sh *sessionHandle + ) + err := runRetryableNoWrap(ctx, func(ctx context.Context) error { + var ( + err error + t *ReadWriteTransaction + ) + if sh == nil || sh.getID() == "" || sh.getClient() == nil { + // Session handle hasn't been allocated or has been destroyed. + sh, err = c.idleSessions.takeWriteSession(ctx) + if err != nil { + // If session retrieval fails, just fail the transaction. + return err + } + t = &ReadWriteTransaction{ + sh: sh, + tx: sh.getTransactionID(), + } + } else { + t = &ReadWriteTransaction{ + sh: sh, + } + } + t.txReadOnly.txReadEnv = t + if err = t.begin(ctx); err != nil { + // Mask error from begin operation as retryable error. + return errRetry(err) + } + ts, err = t.runInTransaction(ctx, f) + return err + }) + if sh != nil { + sh.recycle() + } + return ts, err +} + +// applyOption controls the behavior of Client.Apply. +type applyOption struct { + // If atLeastOnce == true, Client.Apply will execute the mutations on Cloud Spanner at least once. + atLeastOnce bool +} + +// An ApplyOption is an optional argument to Apply. +type ApplyOption func(*applyOption) + +// ApplyAtLeastOnce returns an ApplyOption that removes replay protection. +// +// With this option, Apply may attempt to apply mutations more than once; if +// the mutations are not idempotent, this may lead to a failure being reported +// when the mutation was applied more than once. For example, an insert may +// fail with ALREADY_EXISTS even though the row did not exist before Apply was +// called. For this reason, most users of the library will prefer not to use +// this option. However, ApplyAtLeastOnce requires only a single RPC, whereas +// Apply's default replay protection may require an additional RPC. So this +// option may be appropriate for latency sensitive and/or high throughput blind +// writing. +func ApplyAtLeastOnce() ApplyOption { + return func(ao *applyOption) { + ao.atLeastOnce = true + } +} + +// Apply applies a list of mutations atomically to the database. +func (c *Client) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption) (time.Time, error) { + ao := &applyOption{} + for _, opt := range opts { + opt(ao) + } + if !ao.atLeastOnce { + return c.ReadWriteTransaction(ctx, func(ctx context.Context, t *ReadWriteTransaction) error { + t.BufferWrite(ms) + return nil + }) + } + t := &writeOnlyTransaction{c.idleSessions} + return t.applyAtLeastOnce(ctx, ms...) +} diff --git a/vendor/cloud.google.com/go/spanner/client_test.go b/vendor/cloud.google.com/go/spanner/client_test.go new file mode 100644 index 0000000..951d958 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/client_test.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "strings" + "testing" +) + +// Test validDatabaseName() +func TestValidDatabaseName(t *testing.T) { + validDbURI := "projects/spanner-cloud-test/instances/foo/databases/foodb" + invalidDbUris := []string{ + // Completely wrong DB URI. + "foobarDB", + // Project ID contains "/". + "projects/spanner-cloud/test/instances/foo/databases/foodb", + // No instance ID. + "projects/spanner-cloud-test/instances//databases/foodb", + } + if err := validDatabaseName(validDbURI); err != nil { + t.Errorf("validateDatabaseName(%q) = %v, want nil", validDbURI, err) + } + for _, d := range invalidDbUris { + if err, wantErr := validDatabaseName(d), "should conform to pattern"; !strings.Contains(err.Error(), wantErr) { + t.Errorf("validateDatabaseName(%q) = %q, want error pattern %q", validDbURI, err, wantErr) + } + } +} + +func TestReadOnlyTransactionClose(t *testing.T) { + // Closing a ReadOnlyTransaction shouldn't panic. + c := &Client{} + tx := c.ReadOnlyTransaction() + tx.Close() +} diff --git a/vendor/cloud.google.com/go/spanner/doc.go b/vendor/cloud.google.com/go/spanner/doc.go new file mode 100644 index 0000000..2d0105e --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/doc.go @@ -0,0 +1,310 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package spanner provides a client for reading and writing to Cloud Spanner +databases. See the packages under admin for clients that operate on databases +and instances. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + +See https://cloud.google.com/spanner/docs/getting-started/go/ for an introduction +to Cloud Spanner and additional help on using this API. + +Creating a Client + +To start working with this package, create a client that refers to the database +of interest: + + ctx := context.Background() + client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") + if err != nil { + // TODO: Handle error. + } + defer client.Close() + +Remember to close the client after use to free up the sessions in the session +pool. + + +Simple Reads and Writes + +Two Client methods, Apply and Single, work well for simple reads and writes. As +a quick introduction, here we write a new row to the database and read it back: + + _, err := client.Apply(ctx, []*spanner.Mutation{ + spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"})}) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Users", + spanner.Key{"alice"}, []string{"email"}) + if err != nil { + // TODO: Handle error. + } + +All the methods used above are discussed in more detail below. + + +Keys + +Every Cloud Spanner row has a unique key, composed of one or more columns. +Construct keys with a literal of type Key: + + key1 := spanner.Key{"alice"} + + +KeyRanges + +The keys of a Cloud Spanner table are ordered. You can specify ranges of keys +using the KeyRange type: + + kr1 := spanner.KeyRange{Start: key1, End: key2} + +By default, a KeyRange includes its start key but not its end key. Use +the Kind field to specify other boundary conditions: + + // include both keys + kr2 := spanner.KeyRange{Start: key1, End: key2, Kind: spanner.ClosedClosed} + + +KeySets + +A KeySet represents a set of keys. A single Key or KeyRange can act as a KeySet. Use +the KeySets function to build the union of several KeySets: + + ks1 := spanner.KeySets(key1, key2, kr1, kr2) + +AllKeys returns a KeySet that refers to all the keys in a table: + + ks2 := spanner.AllKeys() + + +Transactions + +All Cloud Spanner reads and writes occur inside transactions. There are two +types of transactions, read-only and read-write. Read-only transactions cannot +change the database, do not acquire locks, and may access either the current +database state or states in the past. Read-write transactions can read the +database before writing to it, and always apply to the most recent database +state. + + +Single Reads + +The simplest and fastest transaction is a ReadOnlyTransaction that supports a +single read operation. Use Client.Single to create such a transaction. You can +chain the call to Single with a call to a Read method. + +When you only want one row whose key you know, use ReadRow. Provide the table +name, key, and the columns you want to read: + + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + +Read multiple rows with the Read method. It takes a table name, KeySet, and list +of columns: + + iter := client.Single().Read(ctx, "Accounts", keyset1, columns) + +Read returns a RowIterator. You can call the Do method on the iterator and pass +a callback: + + err := iter.Do(func(row *Row) error { + // TODO: use row + return nil + }) + +RowIterator also follows the standard pattern for the Google +Cloud Client Libraries: + + defer iter.Stop() + for { + row, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use row + } + +Always call Stop when you finish using an iterator this way, whether or not you +iterate to the end. (Failing to call Stop could lead you to exhaust the +database's session quota.) + +To read rows with an index, use ReadUsingIndex. + +Statements + +The most general form of reading uses SQL statements. Construct a Statement +with NewStatement, setting any parameters using the Statement's Params map: + + stmt := spanner.NewStatement("SELECT First, Last FROM SINGERS WHERE Last >= @start") + stmt.Params["start"] = "Dylan" + +You can also construct a Statement directly with a struct literal, providing +your own map of parameters. + +Use the Query method to run the statement and obtain an iterator: + + iter := client.Single().Query(ctx, stmt) + + +Rows + +Once you have a Row, via an iterator or a call to ReadRow, you can extract +column values in several ways. Pass in a pointer to a Go variable of the +appropriate type when you extract a value. + +You can extract by column position or name: + + err := row.Column(0, &name) + err = row.ColumnByName("balance", &balance) + +You can extract all the columns at once: + + err = row.Columns(&name, &balance) + +Or you can define a Go struct that corresponds to your columns, and extract +into that: + + var s struct { Name string; Balance int64 } + err = row.ToStruct(&s) + + +For Cloud Spanner columns that may contain NULL, use one of the NullXXX types, +like NullString: + + var ns spanner.NullString + if err =: row.Column(0, &ns); err != nil { + // TODO: Handle error. + } + if ns.Valid { + fmt.Println(ns.StringVal) + } else { + fmt.Println("column is NULL") + } + + +Multiple Reads + +To perform more than one read in a transaction, use ReadOnlyTransaction: + + txn := client.ReadOnlyTransaction() + defer txn.Close() + iter := txn.Query(ctx, stmt1) + // ... + iter = txn.Query(ctx, stmt2) + // ... + +You must call Close when you are done with the transaction. + + +Timestamps and Timestamp Bounds + +Cloud Spanner read-only transactions conceptually perform all their reads at a +single moment in time, called the transaction's read timestamp. Once a read has +started, you can call ReadOnlyTransaction's Timestamp method to obtain the read +timestamp. + +By default, a transaction will pick the most recent time (a time where all +previously committed transactions are visible) for its reads. This provides the +freshest data, but may involve some delay. You can often get a quicker response +if you are willing to tolerate "stale" data. You can control the read timestamp +selected by a transaction by calling the WithTimestampBound method on the +transaction before using it. For example, to perform a query on data that is at +most one minute stale, use + + client.Single(). + WithTimestampBound(spanner.MaxStaleness(1*time.Minute)). + Query(ctx, stmt) + +See the documentation of TimestampBound for more details. + + +Mutations + +To write values to a Cloud Spanner database, construct a Mutation. The spanner +package has functions for inserting, updating and deleting rows. Except for the +Delete methods, which take a Key or KeyRange, each mutation-building function +comes in three varieties. + +One takes lists of columns and values along with the table name: + + m1 := spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"}) + +One takes a map from column names to values: + + m2 := spanner.InsertMap("Users", map[string]interface{}{ + "name": "alice", + "email": "a@example.com", + }) + +And the third accepts a struct value, and determines the columns from the +struct field names: + + type User struct { Name, Email string } + u := User{Name: "alice", Email: "a@example.com"} + m3, err := spanner.InsertStruct("Users", u) + + +Writes + +To apply a list of mutations to the database, use Apply: + + _, err := client.Apply(ctx, []*spanner.Mutation{m1, m2, m3}) + +If you need to read before writing in a single transaction, use a +ReadWriteTransaction. ReadWriteTransactions may abort and need to be retried. +You pass in a function to ReadWriteTransaction, and the client will handle the +retries automatically. Use the transaction's BufferWrite method to buffer +mutations, which will all be executed at the end of the transaction: + + _, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + var balance int64 + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + // This function will be called again if this is an IsAborted error. + return err + } + if err := row.Column(0, &balance); err != nil { + return err + } + + if balance <= 10 { + return errors.New("insufficient funds in account") + } + balance -= 10 + m := spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance}) + txn.BufferWrite([]*spanner.Mutation{m}) + + // The buffered mutation will be committed. If the commit + // fails with an IsAborted error, this function will be called + // again. + return nil + }) + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package spanner // import "cloud.google.com/go/spanner" diff --git a/vendor/cloud.google.com/go/spanner/errors.go b/vendor/cloud.google.com/go/spanner/errors.go new file mode 100644 index 0000000..13106f2 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/errors.go @@ -0,0 +1,108 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// Error is the structured error returned by Cloud Spanner client. +type Error struct { + // Code is the canonical error code for describing the nature of a + // particular error. + Code codes.Code + // Desc explains more details of the error. + Desc string + // trailers are the trailers returned in the response, if any. + trailers metadata.MD +} + +// Error implements error.Error. +func (e *Error) Error() string { + if e == nil { + return fmt.Sprintf("spanner: OK") + } + return fmt.Sprintf("spanner: code = %q, desc = %q", e.Code, e.Desc) +} + +// decorate decorates an existing spanner.Error with more information. +func (e *Error) decorate(info string) { + e.Desc = fmt.Sprintf("%v, %v", info, e.Desc) +} + +// spannerErrorf generates a *spanner.Error with the given error code and +// description. +func spannerErrorf(ec codes.Code, format string, args ...interface{}) error { + return &Error{ + Code: ec, + Desc: fmt.Sprintf(format, args...), + } +} + +// toSpannerError converts general Go error to *spanner.Error. +func toSpannerError(err error) error { + return toSpannerErrorWithMetadata(err, nil) +} + +// toSpannerErrorWithMetadata converts general Go error and grpc trailers to *spanner.Error. +// Note: modifies original error if trailers aren't nil +func toSpannerErrorWithMetadata(err error, trailers metadata.MD) error { + if err == nil { + return nil + } + if se, ok := err.(*Error); ok { + if trailers != nil { + se.trailers = metadata.Join(se.trailers, trailers) + } + return se + } + if grpc.Code(err) == codes.Unknown { + return &Error{codes.Unknown, err.Error(), trailers} + } + return &Error{grpc.Code(err), grpc.ErrorDesc(err), trailers} +} + +// ErrCode extracts the canonical error code from a Go error. +func ErrCode(err error) codes.Code { + se, ok := toSpannerError(err).(*Error) + if !ok { + return codes.Unknown + } + return se.Code +} + +// ErrDesc extracts the Cloud Spanner error description from a Go error. +func ErrDesc(err error) string { + se, ok := toSpannerError(err).(*Error) + if !ok { + return err.Error() + } + return se.Desc +} + +// errTrailers extracts the grpc trailers if present from a Go error. +func errTrailers(err error) metadata.MD { + se, ok := err.(*Error) + if !ok { + return nil + } + return se.trailers +} diff --git a/vendor/cloud.google.com/go/spanner/examples_test.go b/vendor/cloud.google.com/go/spanner/examples_test.go new file mode 100644 index 0000000..7f0d606 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/examples_test.go @@ -0,0 +1,536 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner_test + +import ( + "errors" + "fmt" + "time" + + "cloud.google.com/go/spanner" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + const myDB = "projects/my-project/instances/my-instance/database/my-db" + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. +} + +const myDB = "projects/my-project/instances/my-instance/database/my-db" + +func ExampleNewClientWithConfig() { + ctx := context.Background() + const myDB = "projects/my-project/instances/my-instance/database/my-db" + client, err := spanner.NewClientWithConfig(ctx, myDB, spanner.ClientConfig{ + NumChannels: 10, + }) + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. + client.Close() // Close client when done. +} + +func ExampleClient_Single() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleClient_ReadOnlyTransaction() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + t := client.ReadOnlyTransaction() + defer t.Close() + // TODO: Read with t using Read, ReadRow, ReadUsingIndex, or Query. +} + +func ExampleClient_ReadWriteTransaction() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + _, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + var balance int64 + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + // This function will be called again if this is an + // IsAborted error. + return err + } + if err := row.Column(0, &balance); err != nil { + return err + } + + if balance <= 10 { + return errors.New("insufficient funds in account") + } + balance -= 10 + m := spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance}) + return txn.BufferWrite([]*spanner.Mutation{m}) + // The buffered mutation will be committed. If the commit + // fails with an IsAborted error, this function will be called + // again. + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleUpdate() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + _, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + return err + } + var balance int64 + if err := row.Column(0, &balance); err != nil { + return err + } + return txn.BufferWrite([]*spanner.Mutation{ + spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance + 10}), + }) + }) + if err != nil { + // TODO: Handle error. + } +} + +// This example is the same as the one for Update, except for the use of UpdateMap. +func ExampleUpdateMap() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + _, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + return err + } + var balance int64 + if err := row.Column(0, &balance); err != nil { + return err + } + return txn.BufferWrite([]*spanner.Mutation{ + spanner.UpdateMap("Accounts", map[string]interface{}{ + "user": "alice", + "balance": balance + 10, + }), + }) + }) + if err != nil { + // TODO: Handle error. + } +} + +// This example is the same as the one for Update, except for the use of UpdateStruct. +func ExampleUpdateStruct() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + type account struct { + User string `spanner:"user"` + Balance int64 `spanner:"balance"` + } + _, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + return err + } + var balance int64 + if err := row.Column(0, &balance); err != nil { + return err + } + m, err := spanner.UpdateStruct("Accounts", account{ + User: "alice", + Balance: balance + 10, + }) + if err != nil { + return err + } + return txn.BufferWrite([]*spanner.Mutation{m}) + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Apply() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + m := spanner.Update("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"}) + _, err = client.Apply(ctx, []*spanner.Mutation{m}) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleInsert() { + m := spanner.Insert("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"}) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleInsertMap() { + m := spanner.InsertMap("Users", map[string]interface{}{ + "name": "alice", + "email": "a@example.com", + }) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleInsertStruct() { + type User struct { + Name, Email string + } + u := User{Name: "alice", Email: "a@example.com"} + m, err := spanner.InsertStruct("Users", u) + if err != nil { + // TODO: Handle error. + } + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleDelete() { + m := spanner.Delete("Users", spanner.Key{"alice"}) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleDelete_keyRange() { + m := spanner.Delete("Users", spanner.KeyRange{ + Start: spanner.Key{"alice"}, + End: spanner.Key{"bob"}, + Kind: spanner.ClosedClosed, + }) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleRowIterator_Next() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + defer iter.Stop() + for { + row, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + var firstName string + if err := row.Column(0, &firstName); err != nil { + // TODO: Handle error. + } + fmt.Println(firstName) + } +} + +func ExampleRowIterator_Do() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + err = iter.Do(func(r *spanner.Row) error { + var firstName string + if err := r.Column(0, &firstName); err != nil { + return err + } + fmt.Println(firstName) + return nil + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleRow_Size() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(row.Size()) // size is 2 +} + +func ExampleRow_ColumnName() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(row.ColumnName(1)) // prints "balance" +} + +func ExampleRow_ColumnIndex() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + index, err := row.ColumnIndex("balance") + if err != nil { + // TODO: Handle error. + } + fmt.Println(index) +} + +func ExampleRow_ColumnNames() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(row.ColumnNames()) +} + +func ExampleRow_ColumnByName() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + var balance int64 + if err := row.ColumnByName("balance", &balance); err != nil { + // TODO: Handle error. + } + fmt.Println(balance) +} + +func ExampleRow_Columns() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + var name string + var balance int64 + if err := row.Columns(&name, &balance); err != nil { + // TODO: Handle error. + } + fmt.Println(name, balance) +} + +func ExampleRow_ToStruct() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + + type Account struct { + Name string + Balance int64 + } + + var acct Account + if err := row.ToStruct(&acct); err != nil { + // TODO: Handle error. + } + fmt.Println(acct) +} + +func ExampleReadOnlyTransaction_Read() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Read(ctx, "Users", + spanner.KeySets(spanner.Key{"alice"}, spanner.Key{"bob"}), + []string{"name", "email"}) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleReadOnlyTransaction_ReadUsingIndex() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().ReadUsingIndex(ctx, "Users", + "UsersByEmail", + spanner.KeySets(spanner.Key{"a@example.com"}, spanner.Key{"b@example.com"}), + []string{"name", "email"}) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleReadOnlyTransaction_ReadRow() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Users", spanner.Key{"alice"}, + []string{"name", "email"}) + if err != nil { + // TODO: Handle error. + } + _ = row // TODO: use row +} + +func ExampleReadOnlyTransaction_Query() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleNewStatement() { + stmt := spanner.NewStatement("SELECT FirstName, LastName FROM SINGERS WHERE LastName >= @start") + stmt.Params["start"] = "Dylan" + // TODO: Use stmt in Query. +} + +func ExampleNewStatement_structLiteral() { + stmt := spanner.Statement{ + SQL: "SELECT FirstName, LastName FROM SINGERS WHERE LastName >= @start", + Params: map[string]interface{}{"start": "Dylan"}, + } + _ = stmt // TODO: Use stmt in Query. +} + +func ExampleReadOnlyTransaction_Timestamp() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + txn := client.Single() + row, err := txn.ReadRow(ctx, "Users", spanner.Key{"alice"}, + []string{"name", "email"}) + if err != nil { + // TODO: Handle error. + } + readTimestamp, err := txn.Timestamp() + if err != nil { + // TODO: Handle error. + } + fmt.Println("read happened at", readTimestamp) + _ = row // TODO: use row +} + +func ExampleReadOnlyTransaction_WithTimestampBound() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + txn := client.Single().WithTimestampBound(spanner.MaxStaleness(30 * time.Second)) + row, err := txn.ReadRow(ctx, "Users", spanner.Key{"alice"}, []string{"name", "email"}) + if err != nil { + // TODO: Handle error. + } + _ = row // TODO: use row + readTimestamp, err := txn.Timestamp() + if err != nil { + // TODO: Handle error. + } + fmt.Println("read happened at", readTimestamp) +} + +func ExampleGenericColumnValue_Decode() { + // In real applications, rows can be retrieved by methods like client.Single().ReadRow(). + row, err := spanner.NewRow([]string{"intCol", "strCol"}, []interface{}{42, "my-text"}) + if err != nil { + // TODO: Handle error. + } + for i := 0; i < row.Size(); i++ { + var col spanner.GenericColumnValue + if err := row.Column(i, &col); err != nil { + // TODO: Handle error. + } + switch col.Type.Code { + case sppb.TypeCode_INT64: + var v int64 + if err := col.Decode(&v); err != nil { + // TODO: Handle error. + } + fmt.Println("int", v) + case sppb.TypeCode_STRING: + var v string + if err := col.Decode(&v); err != nil { + // TODO: Handle error. + } + fmt.Println("string", v) + } + } + // Output: + // int 42 + // string my-text +} diff --git a/vendor/cloud.google.com/go/spanner/go18.go b/vendor/cloud.google.com/go/spanner/go18.go new file mode 100644 index 0000000..8aebfbb --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/go18.go @@ -0,0 +1,29 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package spanner + +import ( + ocgrpc "go.opencensus.io/plugin/grpc" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func openCensusOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithGRPCDialOption(grpc.WithStatsHandler(ocgrpc.NewClientStatsHandler())), + } +} diff --git a/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go b/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go new file mode 100644 index 0000000..45f5da1 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go @@ -0,0 +1,370 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "errors" + "fmt" + "reflect" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + + "github.com/golang/protobuf/ptypes/empty" + proto3 "github.com/golang/protobuf/ptypes/struct" + pbt "github.com/golang/protobuf/ptypes/timestamp" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Action is a mocked RPC activity that MockCloudSpannerClient will take. +type Action struct { + Method string + Err error +} + +// MockCloudSpannerClient is a mock implementation of sppb.SpannerClient. +type MockCloudSpannerClient struct { + sppb.SpannerClient + + mu sync.Mutex + t *testing.T + // Live sessions on the client. + sessions map[string]bool + // Expected set of actions that will be executed by the client. + actions []Action + // Session ping history. + pings []string + // Injected error, will be returned by all APIs. + injErr map[string]error + // Client will not fail on any request. + nice bool + // Client will stall on any requests. + freezed chan struct{} +} + +// NewMockCloudSpannerClient creates new MockCloudSpannerClient instance. +func NewMockCloudSpannerClient(t *testing.T, acts ...Action) *MockCloudSpannerClient { + mc := &MockCloudSpannerClient{t: t, sessions: map[string]bool{}, injErr: map[string]error{}} + mc.SetActions(acts...) + // Produce a closed channel, so the default action of ready is to not block. + mc.Freeze() + mc.Unfreeze() + return mc +} + +// MakeNice makes this a nice mock which will not fail on any request. +func (m *MockCloudSpannerClient) MakeNice() { + m.mu.Lock() + defer m.mu.Unlock() + m.nice = true +} + +// MakeStrict makes this a strict mock which will fail on any unexpected request. +func (m *MockCloudSpannerClient) MakeStrict() { + m.mu.Lock() + defer m.mu.Unlock() + m.nice = false +} + +// InjectError injects a global error that will be returned by all calls to method +// regardless of the actions array. +func (m *MockCloudSpannerClient) InjectError(method string, err error) { + m.mu.Lock() + defer m.mu.Unlock() + m.injErr[method] = err +} + +// SetActions sets the new set of expected actions to MockCloudSpannerClient. +func (m *MockCloudSpannerClient) SetActions(acts ...Action) { + m.mu.Lock() + defer m.mu.Unlock() + m.actions = nil + for _, act := range acts { + m.actions = append(m.actions, act) + } +} + +// DumpPings dumps the ping history. +func (m *MockCloudSpannerClient) DumpPings() []string { + m.mu.Lock() + defer m.mu.Unlock() + return append([]string(nil), m.pings...) +} + +// DumpSessions dumps the internal session table. +func (m *MockCloudSpannerClient) DumpSessions() map[string]bool { + m.mu.Lock() + defer m.mu.Unlock() + st := map[string]bool{} + for s, v := range m.sessions { + st[s] = v + } + return st +} + +// CreateSession is a placeholder for SpannerClient.CreateSession. +func (m *MockCloudSpannerClient) CreateSession(c context.Context, r *sppb.CreateSessionRequest, opts ...grpc.CallOption) (*sppb.Session, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["CreateSession"]; err != nil { + return nil, err + } + s := &sppb.Session{} + if r.Database != "mockdb" { + // Reject other databases + return s, grpc.Errorf(codes.NotFound, fmt.Sprintf("database not found: %v", r.Database)) + } + // Generate & record session name. + s.Name = fmt.Sprintf("mockdb-%v", time.Now().UnixNano()) + m.sessions[s.Name] = true + return s, nil +} + +// GetSession is a placeholder for SpannerClient.GetSession. +func (m *MockCloudSpannerClient) GetSession(c context.Context, r *sppb.GetSessionRequest, opts ...grpc.CallOption) (*sppb.Session, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["GetSession"]; err != nil { + return nil, err + } + m.pings = append(m.pings, r.Name) + if _, ok := m.sessions[r.Name]; !ok { + return nil, grpc.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) + } + return &sppb.Session{Name: r.Name}, nil +} + +// DeleteSession is a placeholder for SpannerClient.DeleteSession. +func (m *MockCloudSpannerClient) DeleteSession(c context.Context, r *sppb.DeleteSessionRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["DeleteSession"]; err != nil { + return nil, err + } + if _, ok := m.sessions[r.Name]; !ok { + // Session not found. + return &empty.Empty{}, grpc.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) + } + // Delete session from in-memory table. + delete(m.sessions, r.Name) + return &empty.Empty{}, nil +} + +// ExecuteStreamingSql is a mock implementation of SpannerClient.ExecuteStreamingSql. +func (m *MockCloudSpannerClient) ExecuteStreamingSql(c context.Context, r *sppb.ExecuteSqlRequest, opts ...grpc.CallOption) (sppb.Spanner_ExecuteStreamingSqlClient, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + act, err := m.expectAction("ExecuteStreamingSql") + if err != nil { + return nil, err + } + wantReq := &sppb.ExecuteSqlRequest{ + Session: "mocksession", + Transaction: &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_SingleUse{ + SingleUse: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: &sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true, + }, + ReturnReadTimestamp: false, + }, + }, + }, + }, + }, + Sql: "mockquery", + Params: &proto3.Struct{ + Fields: map[string]*proto3.Value{"var1": &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: "abc"}}}, + }, + ParamTypes: map[string]*sppb.Type{"var1": &sppb.Type{Code: sppb.TypeCode_STRING}}, + } + if !reflect.DeepEqual(r, wantReq) { + return nil, fmt.Errorf("got query request: %v, want: %v", r, wantReq) + } + if act.Err != nil { + return nil, act.Err + } + return nil, errors.New("query never succeeds on mock client") +} + +// StreamingRead is a placeholder for SpannerClient.StreamingRead. +func (m *MockCloudSpannerClient) StreamingRead(c context.Context, r *sppb.ReadRequest, opts ...grpc.CallOption) (sppb.Spanner_StreamingReadClient, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + act, err := m.expectAction("StreamingRead", "StreamingReadIndex") + if err != nil { + return nil, err + } + wantReq := &sppb.ReadRequest{ + Session: "mocksession", + Transaction: &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_SingleUse{ + SingleUse: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: &sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true, + }, + ReturnReadTimestamp: false, + }, + }, + }, + }, + }, + Table: "t_mock", + Columns: []string{"col1", "col2"}, + KeySet: &sppb.KeySet{ + Keys: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{ + &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + }, + }, + }, + Ranges: []*sppb.KeyRange{}, + All: false, + }, + } + if act.Method == "StreamingIndexRead" { + wantReq.Index = "idx1" + } + if !reflect.DeepEqual(r, wantReq) { + return nil, fmt.Errorf("got query request: %v, want: %v", r, wantReq) + } + if act.Err != nil { + return nil, act.Err + } + return nil, errors.New("read never succeeds on mock client") +} + +// BeginTransaction is a placeholder for SpannerClient.BeginTransaction. +func (m *MockCloudSpannerClient) BeginTransaction(c context.Context, r *sppb.BeginTransactionRequest, opts ...grpc.CallOption) (*sppb.Transaction, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + if !m.nice { + act, err := m.expectAction("BeginTransaction") + if err != nil { + return nil, err + } + if act.Err != nil { + return nil, act.Err + } + } + resp := &sppb.Transaction{Id: []byte("transaction-1")} + if _, ok := r.Options.Mode.(*sppb.TransactionOptions_ReadOnly_); ok { + resp.ReadTimestamp = &pbt.Timestamp{Seconds: 3, Nanos: 4} + } + return resp, nil +} + +// Commit is a placeholder for SpannerClient.Commit. +func (m *MockCloudSpannerClient) Commit(c context.Context, r *sppb.CommitRequest, opts ...grpc.CallOption) (*sppb.CommitResponse, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + if !m.nice { + act, err := m.expectAction("Commit") + if err != nil { + return nil, err + } + if act.Err != nil { + return nil, act.Err + } + } + return &sppb.CommitResponse{CommitTimestamp: &pbt.Timestamp{Seconds: 1, Nanos: 2}}, nil +} + +// Rollback is a placeholder for SpannerClient.Rollback. +func (m *MockCloudSpannerClient) Rollback(c context.Context, r *sppb.RollbackRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + m.ready() + m.mu.Lock() + defer m.mu.Unlock() + if !m.nice { + act, err := m.expectAction("Rollback") + if err != nil { + return nil, err + } + if act.Err != nil { + return nil, act.Err + } + } + return nil, nil +} + +func (m *MockCloudSpannerClient) expectAction(methods ...string) (Action, error) { + for _, me := range methods { + if err := m.injErr[me]; err != nil { + return Action{}, err + } + } + if len(m.actions) == 0 { + m.t.Fatalf("unexpected %v executed", methods) + } + act := m.actions[0] + m.actions = m.actions[1:] + for _, me := range methods { + if me == act.Method { + return act, nil + } + } + m.t.Fatalf("unexpected call of one of %v, want method %s", methods, act.Method) + return Action{}, nil +} + +// Freeze stalls all requests. +func (m *MockCloudSpannerClient) Freeze() { + m.mu.Lock() + defer m.mu.Unlock() + m.freezed = make(chan struct{}) +} + +// Unfreeze restores processing requests. +func (m *MockCloudSpannerClient) Unfreeze() { + m.mu.Lock() + defer m.mu.Unlock() + close(m.freezed) +} + +// CheckActionsConsumed checks that all actions have been consumed. +func (m *MockCloudSpannerClient) CheckActionsConsumed() { + if len(m.actions) != 0 { + m.t.Fatalf("unconsumed mock client actions: %v", m.actions) + } +} + +// ready checks conditions before executing requests +// TODO: add checks for injected errors, actions +func (m *MockCloudSpannerClient) ready() { + m.mu.Lock() + freezed := m.freezed + m.mu.Unlock() + // check if client should be freezed + <-freezed +} diff --git a/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go b/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go new file mode 100644 index 0000000..c47a5d5 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go @@ -0,0 +1,257 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "testing" + "time" + + "golang.org/x/net/context" + + "github.com/golang/protobuf/ptypes/empty" + proto3 "github.com/golang/protobuf/ptypes/struct" + pbt "github.com/golang/protobuf/ptypes/timestamp" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + // KvMeta is the Metadata for mocked KV table. + KvMeta = sppb.ResultSetMetadata{ + RowType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "Key", + Type: &sppb.Type{Code: sppb.TypeCode_STRING}, + }, + { + Name: "Value", + Type: &sppb.Type{Code: sppb.TypeCode_STRING}, + }, + }, + }, + } +) + +// MockCtlMsg encapsulates PartialResultSet/error that might be sent to +// client +type MockCtlMsg struct { + // If ResumeToken == true, mock server will generate a row with + // resume token. + ResumeToken bool + // If Err != nil, mock server will return error in RPC response. + Err error +} + +// MockCloudSpanner is a mock implementation of SpannerServer interface. +// TODO: make MockCloudSpanner a full-fleged Cloud Spanner implementation. +type MockCloudSpanner struct { + sppb.SpannerServer + + s *grpc.Server + t *testing.T + addr string + msgs chan MockCtlMsg + readTs time.Time + next int +} + +// Addr returns the listening address of mock server. +func (m *MockCloudSpanner) Addr() string { + return m.addr +} + +// AddMsg generates a new mocked row which can be received by client. +func (m *MockCloudSpanner) AddMsg(err error, resumeToken bool) { + msg := MockCtlMsg{ + ResumeToken: resumeToken, + Err: err, + } + if err == io.EOF { + close(m.msgs) + } else { + m.msgs <- msg + } +} + +// Done signals an end to a mocked stream. +func (m *MockCloudSpanner) Done() { + close(m.msgs) +} + +// CreateSession is a placeholder for SpannerServer.CreateSession. +func (m *MockCloudSpanner) CreateSession(c context.Context, r *sppb.CreateSessionRequest) (*sppb.Session, error) { + m.t.Fatalf("CreateSession is unimplemented") + return nil, errors.New("Unimplemented") +} + +// GetSession is a placeholder for SpannerServer.GetSession. +func (m *MockCloudSpanner) GetSession(c context.Context, r *sppb.GetSessionRequest) (*sppb.Session, error) { + m.t.Fatalf("GetSession is unimplemented") + return nil, errors.New("Unimplemented") +} + +// DeleteSession is a placeholder for SpannerServer.DeleteSession. +func (m *MockCloudSpanner) DeleteSession(c context.Context, r *sppb.DeleteSessionRequest) (*empty.Empty, error) { + m.t.Fatalf("DeleteSession is unimplemented") + return nil, errors.New("Unimplemented") +} + +// ExecuteSql is a placeholder for SpannerServer.ExecuteSql. +func (m *MockCloudSpanner) ExecuteSql(c context.Context, r *sppb.ExecuteSqlRequest) (*sppb.ResultSet, error) { + m.t.Fatalf("ExecuteSql is unimplemented") + return nil, errors.New("Unimplemented") +} + +// EncodeResumeToken return mock resume token encoding for an uint64 integer. +func EncodeResumeToken(t uint64) []byte { + rt := make([]byte, 16) + binary.PutUvarint(rt, t) + return rt +} + +// DecodeResumeToken decodes a mock resume token into an uint64 integer. +func DecodeResumeToken(t []byte) (uint64, error) { + s, n := binary.Uvarint(t) + if n <= 0 { + return 0, fmt.Errorf("invalid resume token: %v", t) + } + return s, nil +} + +// ExecuteStreamingSql is a mock implementation of SpannerServer.ExecuteStreamingSql. +func (m *MockCloudSpanner) ExecuteStreamingSql(r *sppb.ExecuteSqlRequest, s sppb.Spanner_ExecuteStreamingSqlServer) error { + switch r.Sql { + case "SELECT * from t_unavailable": + return grpc.Errorf(codes.Unavailable, "mock table unavailable") + case "SELECT t.key key, t.value value FROM t_mock t": + if r.ResumeToken != nil { + s, err := DecodeResumeToken(r.ResumeToken) + if err != nil { + return err + } + m.next = int(s) + 1 + } + for { + msg, more := <-m.msgs + if !more { + break + } + if msg.Err == nil { + var rt []byte + if msg.ResumeToken { + rt = EncodeResumeToken(uint64(m.next)) + } + meta := KvMeta + meta.Transaction = &sppb.Transaction{ + ReadTimestamp: &pbt.Timestamp{ + Seconds: m.readTs.Unix(), + Nanos: int32(m.readTs.Nanosecond()), + }, + } + err := s.Send(&sppb.PartialResultSet{ + Metadata: &meta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: fmt.Sprintf("foo-%02d", m.next)}}, + {Kind: &proto3.Value_StringValue{StringValue: fmt.Sprintf("bar-%02d", m.next)}}, + }, + ResumeToken: rt, + }) + m.next = m.next + 1 + if err != nil { + return err + } + continue + } + return msg.Err + } + return nil + default: + return fmt.Errorf("unsupported SQL: %v", r.Sql) + } +} + +// Read is a placeholder for SpannerServer.Read. +func (m *MockCloudSpanner) Read(c context.Context, r *sppb.ReadRequest) (*sppb.ResultSet, error) { + m.t.Fatalf("Read is unimplemented") + return nil, errors.New("Unimplemented") +} + +// StreamingRead is a placeholder for SpannerServer.StreamingRead. +func (m *MockCloudSpanner) StreamingRead(r *sppb.ReadRequest, s sppb.Spanner_StreamingReadServer) error { + m.t.Fatalf("StreamingRead is unimplemented") + return errors.New("Unimplemented") +} + +// BeginTransaction is a placeholder for SpannerServer.BeginTransaction. +func (m *MockCloudSpanner) BeginTransaction(c context.Context, r *sppb.BeginTransactionRequest) (*sppb.Transaction, error) { + m.t.Fatalf("BeginTransaction is unimplemented") + return nil, errors.New("Unimplemented") +} + +// Commit is a placeholder for SpannerServer.Commit. +func (m *MockCloudSpanner) Commit(c context.Context, r *sppb.CommitRequest) (*sppb.CommitResponse, error) { + m.t.Fatalf("Commit is unimplemented") + return nil, errors.New("Unimplemented") +} + +// Rollback is a placeholder for SpannerServer.Rollback. +func (m *MockCloudSpanner) Rollback(c context.Context, r *sppb.RollbackRequest) (*empty.Empty, error) { + m.t.Fatalf("Rollback is unimplemented") + return nil, errors.New("Unimplemented") +} + +// Serve runs a MockCloudSpanner listening on a random localhost address. +func (m *MockCloudSpanner) Serve() { + m.s = grpc.NewServer() + if m.addr == "" { + m.addr = "localhost:0" + } + lis, err := net.Listen("tcp", m.addr) + if err != nil { + m.t.Fatalf("Failed to listen: %v", err) + } + _, port, err := net.SplitHostPort(lis.Addr().String()) + if err != nil { + m.t.Fatalf("Failed to parse listener address: %v", err) + } + sppb.RegisterSpannerServer(m.s, m) + m.addr = "localhost:" + port + go m.s.Serve(lis) +} + +// Stop terminates MockCloudSpanner and closes the serving port. +func (m *MockCloudSpanner) Stop() { + m.s.Stop() +} + +// NewMockCloudSpanner creates a new MockCloudSpanner instance. +func NewMockCloudSpanner(t *testing.T, ts time.Time) *MockCloudSpanner { + mcs := &MockCloudSpanner{ + t: t, + msgs: make(chan MockCtlMsg, 1000), + readTs: ts, + } + return mcs +} diff --git a/vendor/cloud.google.com/go/spanner/key.go b/vendor/cloud.google.com/go/spanner/key.go new file mode 100644 index 0000000..5b332f4 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/key.go @@ -0,0 +1,400 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "bytes" + "fmt" + "time" + + "google.golang.org/grpc/codes" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// A Key can be either a Cloud Spanner row's primary key or a secondary index key. +// It is essentially an interface{} array, which represents a set of Cloud Spanner +// columns. A Key type has the following usages: +// +// - Used as primary key which uniquely identifies a Cloud Spanner row. +// - Used as secondary index key which maps to a set of Cloud Spanner rows +// indexed under it. +// - Used as endpoints of primary key/secondary index ranges, +// see also the KeyRange type. +// +// Rows that are identified by the Key type are outputs of read operation or targets of +// delete operation in a mutation. Note that for Insert/Update/InsertOrUpdate/Update +// mutation types, although they don't require a primary key explicitly, the column list +// provided must contain enough columns that can comprise a primary key. +// +// Keys are easy to construct. For example, suppose you have a table with a +// primary key of username and product ID. To make a key for this table: +// +// key := spanner.Key{"john", 16} +// +// See the description of Row and Mutation types for how Go types are +// mapped to Cloud Spanner types. For convenience, Key type supports a wide range +// of Go types: +// - int, int8, int16, int32, int64, and NullInt64 are mapped to Cloud Spanner's INT64 type. +// - uint8, uint16 and uint32 are also mapped to Cloud Spanner's INT64 type. +// - float32, float64, NullFloat64 are mapped to Cloud Spanner's FLOAT64 type. +// - bool and NullBool are mapped to Cloud Spanner's BOOL type. +// - []byte is mapped to Cloud Spanner's BYTES type. +// - string and NullString are mapped to Cloud Spanner's STRING type. +// - time.Time and NullTime are mapped to Cloud Spanner's TIMESTAMP type. +// - civil.Date and NullDate are mapped to Cloud Spanner's DATE type. +type Key []interface{} + +// errInvdKeyPartType returns error for unsupported key part type. +func errInvdKeyPartType(part interface{}) error { + return spannerErrorf(codes.InvalidArgument, "key part has unsupported type %T", part) +} + +// keyPartValue converts a part of the Key (which is a valid Cloud Spanner type) +// into a proto3.Value. Used for encoding Key type into protobuf. +func keyPartValue(part interface{}) (pb *proto3.Value, err error) { + switch v := part.(type) { + case int: + pb, _, err = encodeValue(int64(v)) + case int8: + pb, _, err = encodeValue(int64(v)) + case int16: + pb, _, err = encodeValue(int64(v)) + case int32: + pb, _, err = encodeValue(int64(v)) + case uint8: + pb, _, err = encodeValue(int64(v)) + case uint16: + pb, _, err = encodeValue(int64(v)) + case uint32: + pb, _, err = encodeValue(int64(v)) + case float32: + pb, _, err = encodeValue(float64(v)) + case int64, float64, NullInt64, NullFloat64, bool, NullBool, []byte, string, NullString, time.Time, civil.Date, NullTime, NullDate: + pb, _, err = encodeValue(v) + default: + return nil, errInvdKeyPartType(v) + } + return pb, err +} + +// proto converts a spanner.Key into a proto3.ListValue. +func (key Key) proto() (*proto3.ListValue, error) { + lv := &proto3.ListValue{} + lv.Values = make([]*proto3.Value, 0, len(key)) + for _, part := range key { + v, err := keyPartValue(part) + if err != nil { + return nil, err + } + lv.Values = append(lv.Values, v) + } + return lv, nil +} + +// keySetProto lets a single Key act as a KeySet. +func (key Key) keySetProto() (*sppb.KeySet, error) { + kp, err := key.proto() + if err != nil { + return nil, err + } + return &sppb.KeySet{Keys: []*proto3.ListValue{kp}}, nil +} + +// String implements fmt.Stringer for Key. For string, []byte and NullString, it +// prints the uninterpreted bytes of their contents, leaving caller with the +// opportunity to escape the output. +func (key Key) String() string { + b := &bytes.Buffer{} + fmt.Fprint(b, "(") + for i, part := range []interface{}(key) { + if i != 0 { + fmt.Fprint(b, ",") + } + switch v := part.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool: + // Use %v to print numeric types and bool. + fmt.Fprintf(b, "%v", v) + case string: + fmt.Fprintf(b, "%q", v) + case []byte: + if v != nil { + fmt.Fprintf(b, "%q", v) + } else { + fmt.Fprint(b, "") + } + case NullInt64, NullFloat64, NullBool, NullString, NullTime, NullDate: + // The above types implement fmt.Stringer. + fmt.Fprintf(b, "%s", v) + case civil.Date: + fmt.Fprintf(b, "%q", v) + case time.Time: + fmt.Fprintf(b, "%q", v.Format(time.RFC3339Nano)) + default: + fmt.Fprintf(b, "%v", v) + } + } + fmt.Fprint(b, ")") + return b.String() +} + +// AsPrefix returns a KeyRange for all keys where k is the prefix. +func (key Key) AsPrefix() KeyRange { + return KeyRange{ + Start: key, + End: key, + Kind: ClosedClosed, + } +} + +// KeyRangeKind describes the kind of interval represented by a KeyRange: +// whether it is open or closed on the left and right. +type KeyRangeKind int + +const ( + // ClosedOpen is closed on the left and open on the right: the Start + // key is included, the End key is excluded. + ClosedOpen KeyRangeKind = iota + + // ClosedClosed is closed on the left and the right: both keys are included. + ClosedClosed + + // OpenClosed is open on the left and closed on the right: the Start + // key is excluded, the End key is included. + OpenClosed + + // OpenOpen is open on the left and the right: neither key is included. + OpenOpen +) + +// A KeyRange represents a range of rows in a table or index. +// +// A range has a Start key and an End key. IncludeStart and IncludeEnd +// indicate whether the Start and End keys are included in the range. +// +// For example, consider the following table definition: +// +// CREATE TABLE UserEvents ( +// UserName STRING(MAX), +// EventDate STRING(10), +// ) PRIMARY KEY(UserName, EventDate); +// +// The following keys name rows in this table: +// +// spanner.Key{"Bob", "2014-09-23"} +// spanner.Key{"Alfred", "2015-06-12"} +// +// Since the UserEvents table's PRIMARY KEY clause names two columns, each +// UserEvents key has two elements; the first is the UserName, and the second +// is the EventDate. +// +// Key ranges with multiple components are interpreted lexicographically by +// component using the table or index key's declared sort order. For example, +// the following range returns all events for user "Bob" that occurred in the +// year 2015: +// +// spanner.KeyRange{ +// Start: spanner.Key{"Bob", "2015-01-01"}, +// End: spanner.Key{"Bob", "2015-12-31"}, +// Kind: ClosedClosed, +// } +// +// Start and end keys can omit trailing key components. This affects the +// inclusion and exclusion of rows that exactly match the provided key +// components: if IncludeStart is true, then rows that exactly match the +// provided components of the Start key are included; if IncludeStart is false +// then rows that exactly match are not included. IncludeEnd and End key +// behave in the same fashion. +// +// For example, the following range includes all events for "Bob" that occurred +// during and after the year 2000: +// +// spanner.KeyRange{ +// Start: spanner.Key{"Bob", "2000-01-01"}, +// End: spanner.Key{"Bob"}, +// Kind: ClosedClosed, +// } +// +// The next example retrieves all events for "Bob": +// +// spanner.Key{"Bob"}.AsPrefix() +// +// To retrieve events before the year 2000: +// +// spanner.KeyRange{ +// Start: spanner.Key{"Bob"}, +// End: spanner.Key{"Bob", "2000-01-01"}, +// Kind: ClosedOpen, +// } +// +// Although we specified a Kind for this KeyRange, we didn't need to, because +// the default is ClosedOpen. In later examples we'll omit Kind if it is +// ClosedOpen. +// +// The following range includes all rows in a table or under a +// index: +// +// spanner.AllKeys() +// +// This range returns all users whose UserName begins with any +// character from A to C: +// +// spanner.KeyRange{ +// Start: spanner.Key{"A"}, +// End: spanner.Key{"D"}, +// } +// +// This range returns all users whose UserName begins with B: +// +// spanner.KeyRange{ +// Start: spanner.Key{"B"}, +// End: spanner.Key{"C"}, +// } +// +// Key ranges honor column sort order. For example, suppose a table is defined +// as follows: +// +// CREATE TABLE DescendingSortedTable { +// Key INT64, +// ... +// ) PRIMARY KEY(Key DESC); +// +// The following range retrieves all rows with key values between 1 and 100 +// inclusive: +// +// spanner.KeyRange{ +// Start: spanner.Key{100}, +// End: spanner.Key{1}, +// Kind: ClosedClosed, +// } +// +// Note that 100 is passed as the start, and 1 is passed as the end, because +// Key is a descending column in the schema. +type KeyRange struct { + // Start specifies the left boundary of the key range; End specifies + // the right boundary of the key range. + Start, End Key + + // Kind describes whether the boundaries of the key range include + // their keys. + Kind KeyRangeKind +} + +// String implements fmt.Stringer for KeyRange type. +func (r KeyRange) String() string { + var left, right string + switch r.Kind { + case ClosedClosed: + left, right = "[", "]" + case ClosedOpen: + left, right = "[", ")" + case OpenClosed: + left, right = "(", "]" + case OpenOpen: + left, right = "(", ")" + default: + left, right = "?", "?" + } + return fmt.Sprintf("%s%s,%s%s", left, r.Start, r.End, right) +} + +// proto converts KeyRange into sppb.KeyRange. +func (r KeyRange) proto() (*sppb.KeyRange, error) { + var err error + var start, end *proto3.ListValue + pb := &sppb.KeyRange{} + if start, err = r.Start.proto(); err != nil { + return nil, err + } + if end, err = r.End.proto(); err != nil { + return nil, err + } + if r.Kind == ClosedClosed || r.Kind == ClosedOpen { + pb.StartKeyType = &sppb.KeyRange_StartClosed{StartClosed: start} + } else { + pb.StartKeyType = &sppb.KeyRange_StartOpen{StartOpen: start} + } + if r.Kind == ClosedClosed || r.Kind == OpenClosed { + pb.EndKeyType = &sppb.KeyRange_EndClosed{EndClosed: end} + } else { + pb.EndKeyType = &sppb.KeyRange_EndOpen{EndOpen: end} + } + return pb, nil +} + +// keySetProto lets a KeyRange act as a KeySet. +func (r KeyRange) keySetProto() (*sppb.KeySet, error) { + rp, err := r.proto() + if err != nil { + return nil, err + } + return &sppb.KeySet{Ranges: []*sppb.KeyRange{rp}}, nil +} + +// A KeySet defines a collection of Cloud Spanner keys and/or key ranges. All the +// keys are expected to be in the same table or index. The keys need not be sorted in +// any particular way. +// +// An individual Key can act as a KeySet, as can a KeyRange. Use the KeySets function +// to create a KeySet consisting of multiple Keys and KeyRanges. To obtain an empty +// KeySet, call KeySets with no arguments. +// +// If the same key is specified multiple times in the set (for example if two +// ranges, two keys, or a key and a range overlap), the Cloud Spanner backend behaves +// as if the key were only specified once. +type KeySet interface { + keySetProto() (*sppb.KeySet, error) +} + +// AllKeys returns a KeySet that represents all Keys of a table or a index. +func AllKeys() KeySet { + return all{} +} + +type all struct{} + +func (all) keySetProto() (*sppb.KeySet, error) { + return &sppb.KeySet{All: true}, nil +} + +// KeySets returns the union of the KeySets. If any of the KeySets is AllKeys, then +// the resulting KeySet will be equivalent to AllKeys. +func KeySets(keySets ...KeySet) KeySet { + u := make(union, len(keySets)) + copy(u, keySets) + return u +} + +type union []KeySet + +func (u union) keySetProto() (*sppb.KeySet, error) { + upb := &sppb.KeySet{} + for _, ks := range u { + pb, err := ks.keySetProto() + if err != nil { + return nil, err + } + if pb.All { + return pb, nil + } + upb.Keys = append(upb.Keys, pb.Keys...) + upb.Ranges = append(upb.Ranges, pb.Ranges...) + } + return upb, nil +} diff --git a/vendor/cloud.google.com/go/spanner/key_test.go b/vendor/cloud.google.com/go/spanner/key_test.go new file mode 100644 index 0000000..aee47f9 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/key_test.go @@ -0,0 +1,373 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test Key.String() and Key.proto(). +func TestKey(t *testing.T) { + tm, _ := time.Parse(time.RFC3339Nano, "2016-11-15T15:04:05.999999999Z") + dt, _ := civil.ParseDate("2016-11-15") + for _, test := range []struct { + k Key + wantProto *proto3.ListValue + wantStr string + }{ + { + k: Key{int(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int8(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int16(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int32(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int64(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{uint8(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{uint16(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{uint32(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{true}, + wantProto: listValueProto(boolProto(true)), + wantStr: "(true)", + }, + { + k: Key{float32(1.5)}, + wantProto: listValueProto(floatProto(1.5)), + wantStr: "(1.5)", + }, + { + k: Key{float64(1.5)}, + wantProto: listValueProto(floatProto(1.5)), + wantStr: "(1.5)", + }, + { + k: Key{"value"}, + wantProto: listValueProto(stringProto("value")), + wantStr: `("value")`, + }, + { + k: Key{[]byte(nil)}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{[]byte{}}, + wantProto: listValueProto(stringProto("")), + wantStr: `("")`, + }, + { + k: Key{tm}, + wantProto: listValueProto(stringProto("2016-11-15T15:04:05.999999999Z")), + wantStr: `("2016-11-15T15:04:05.999999999Z")`, + }, + {k: Key{dt}, + wantProto: listValueProto(stringProto("2016-11-15")), + wantStr: `("2016-11-15")`, + }, + { + k: Key{[]byte("value")}, + wantProto: listValueProto(bytesProto([]byte("value"))), + wantStr: `("value")`, + }, + { + k: Key{NullInt64{1, true}}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{NullInt64{2, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullFloat64{1.5, true}}, + wantProto: listValueProto(floatProto(1.5)), + wantStr: "(1.5)", + }, + { + k: Key{NullFloat64{2.0, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullBool{true, true}}, + wantProto: listValueProto(boolProto(true)), + wantStr: "(true)", + }, + { + k: Key{NullBool{true, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullString{"value", true}}, + wantProto: listValueProto(stringProto("value")), + wantStr: `("value")`, + }, + { + k: Key{NullString{"value", false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullTime{tm, true}}, + wantProto: listValueProto(timeProto(tm)), + wantStr: `("2016-11-15T15:04:05.999999999Z")`, + }, + + { + k: Key{NullTime{time.Now(), false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullDate{dt, true}}, + wantProto: listValueProto(dateProto(dt)), + wantStr: `("2016-11-15")`, + }, + { + k: Key{NullDate{civil.Date{}, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{int(1), NullString{"value", false}, "value", 1.5, true}, + wantProto: listValueProto(stringProto("1"), nullProto(), stringProto("value"), floatProto(1.5), boolProto(true)), + wantStr: `(1,,"value",1.5,true)`, + }, + } { + if got := test.k.String(); got != test.wantStr { + t.Errorf("%v.String() = %v, want %v", test.k, got, test.wantStr) + } + gotProto, err := test.k.proto() + if err != nil { + t.Errorf("%v.proto() returns error %v; want nil error", test.k, err) + } + if !reflect.DeepEqual(gotProto, test.wantProto) { + t.Errorf("%v.proto() = \n%v\nwant:\n%v", test.k, gotProto, test.wantProto) + } + } +} + +// Test KeyRange.String() and KeyRange.proto(). +func TestKeyRange(t *testing.T) { + for _, test := range []struct { + kr KeyRange + wantProto *sppb.KeyRange + wantStr string + }{ + { + kr: KeyRange{Key{"A"}, Key{"D"}, OpenOpen}, + wantProto: &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartOpen{StartOpen: listValueProto(stringProto("A"))}, + EndKeyType: &sppb.KeyRange_EndOpen{EndOpen: listValueProto(stringProto("D"))}, + }, + wantStr: `(("A"),("D"))`, + }, + { + kr: KeyRange{Key{1}, Key{10}, OpenClosed}, + wantProto: &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartOpen{StartOpen: listValueProto(stringProto("1"))}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(stringProto("10"))}, + }, + wantStr: "((1),(10)]", + }, + { + kr: KeyRange{Key{1.5, 2.1, 0.2}, Key{1.9, 0.7}, ClosedOpen}, + wantProto: &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartClosed{StartClosed: listValueProto(floatProto(1.5), floatProto(2.1), floatProto(0.2))}, + EndKeyType: &sppb.KeyRange_EndOpen{EndOpen: listValueProto(floatProto(1.9), floatProto(0.7))}, + }, + wantStr: "[(1.5,2.1,0.2),(1.9,0.7))", + }, + { + kr: KeyRange{Key{NullInt64{1, true}}, Key{10}, ClosedClosed}, + wantProto: &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartClosed{StartClosed: listValueProto(stringProto("1"))}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(stringProto("10"))}, + }, + wantStr: "[(1),(10)]", + }, + } { + if got := test.kr.String(); got != test.wantStr { + t.Errorf("%v.String() = %v, want %v", test.kr, got, test.wantStr) + } + gotProto, err := test.kr.proto() + if err != nil { + t.Errorf("%v.proto() returns error %v; want nil error", test.kr, err) + } + if !reflect.DeepEqual(gotProto, test.wantProto) { + t.Errorf("%v.proto() = \n%v\nwant:\n%v", test.kr, gotProto.String(), test.wantProto.String()) + } + } +} + +func TestPrefixRange(t *testing.T) { + got := Key{1}.AsPrefix() + want := KeyRange{Start: Key{1}, End: Key{1}, Kind: ClosedClosed} + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestKeySets(t *testing.T) { + int1 := intProto(1) + int2 := intProto(2) + int3 := intProto(3) + int4 := intProto(4) + for i, test := range []struct { + ks KeySet + wantProto *sppb.KeySet + }{ + { + KeySets(), + &sppb.KeySet{}, + }, + { + Key{4}, + &sppb.KeySet{ + Keys: []*proto3.ListValue{listValueProto(int4)}, + }, + }, + { + AllKeys(), + &sppb.KeySet{All: true}, + }, + { + KeySets(Key{1, 2}, Key{3, 4}), + &sppb.KeySet{ + Keys: []*proto3.ListValue{ + listValueProto(int1, int2), + listValueProto(int3, int4), + }, + }, + }, + { + KeyRange{Key{1}, Key{2}, ClosedOpen}, + &sppb.KeySet{Ranges: []*sppb.KeyRange{ + &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartClosed{StartClosed: listValueProto(int1)}, + EndKeyType: &sppb.KeyRange_EndOpen{EndOpen: listValueProto(int2)}, + }, + }}, + }, + { + Key{2}.AsPrefix(), + &sppb.KeySet{Ranges: []*sppb.KeyRange{ + &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartClosed{StartClosed: listValueProto(int2)}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(int2)}, + }, + }}, + }, + { + KeySets( + KeyRange{Key{1}, Key{2}, ClosedClosed}, + KeyRange{Key{3}, Key{4}, OpenClosed}, + ), + &sppb.KeySet{ + Ranges: []*sppb.KeyRange{ + &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartClosed{StartClosed: listValueProto(int1)}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(int2)}, + }, + &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartOpen{StartOpen: listValueProto(int3)}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(int4)}, + }, + }, + }, + }, + { + KeySets( + Key{1}, + KeyRange{Key{2}, Key{3}, ClosedClosed}, + KeyRange{Key{4}, Key{5}, OpenClosed}, + KeySets(), + Key{6}), + &sppb.KeySet{ + Keys: []*proto3.ListValue{ + listValueProto(int1), + listValueProto(intProto(6)), + }, + Ranges: []*sppb.KeyRange{ + &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartClosed{StartClosed: listValueProto(int2)}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(int3)}, + }, + &sppb.KeyRange{ + StartKeyType: &sppb.KeyRange_StartOpen{StartOpen: listValueProto(int4)}, + EndKeyType: &sppb.KeyRange_EndClosed{EndClosed: listValueProto(intProto(5))}, + }, + }, + }, + }, + { + KeySets( + Key{1}, + KeyRange{Key{2}, Key{3}, ClosedClosed}, + AllKeys(), + KeyRange{Key{4}, Key{5}, OpenClosed}, + Key{6}), + &sppb.KeySet{All: true}, + }, + } { + gotProto, err := test.ks.keySetProto() + if err != nil { + t.Errorf("#%d: %v.proto() returns error %v; want nil error", i, test.ks, err) + } + if !reflect.DeepEqual(gotProto, test.wantProto) { + t.Errorf("#%d: %v.proto() = \n%v\nwant:\n%v", i, test.ks, gotProto.String(), test.wantProto.String()) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/mutation.go b/vendor/cloud.google.com/go/spanner/mutation.go new file mode 100644 index 0000000..4eac915 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/mutation.go @@ -0,0 +1,431 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// op is the mutation operation. +type op int + +const ( + // opDelete removes a row from a table. Succeeds whether or not the + // key was present. + opDelete op = iota + // opInsert inserts a row into a table. If the row already exists, the + // write or transaction fails. + opInsert + // opInsertOrUpdate inserts a row into a table. If the row already + // exists, it updates it instead. Any column values not explicitly + // written are preserved. + opInsertOrUpdate + // opReplace inserts a row into a table, deleting any existing row. + // Unlike InsertOrUpdate, this means any values not explicitly written + // become NULL. + opReplace + // opUpdate updates a row in a table. If the row does not already + // exist, the write or transaction fails. + opUpdate +) + +// A Mutation describes a modification to one or more Cloud Spanner rows. The +// mutation represents an insert, update, delete, etc on a table. +// +// Many mutations can be applied in a single atomic commit. For purposes of +// constraint checking (such as foreign key constraints), the operations can be +// viewed as applying in same order as the mutations are supplied in (so that +// e.g., a row and its logical "child" can be inserted in the same commit). +// +// - The Apply function applies series of mutations. +// - A ReadWriteTransaction applies a series of mutations as part of an +// atomic read-modify-write operation. +// Example: +// +// m := spanner.Insert("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, profile}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// In this example, we insert a new row into the User table. The primary key +// for the new row is UserID (presuming that "user_id" has been declared as the +// primary key of the "User" table). +// +// Updating a row +// +// Changing the values of columns in an existing row is very similar to +// inserting a new row: +// +// m := spanner.Update("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, profile}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// Deleting a row +// +// To delete a row, use spanner.Delete: +// +// m := spanner.Delete("User", spanner.Key{UserId}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// spanner.Delete accepts a KeySet, so you can also pass in a KeyRange, or use the +// spanner.KeySets function to build any combination of Keys and KeyRanges. +// +// Note that deleting a row in a table may also delete rows from other tables +// if cascading deletes are specified in those tables' schemas. Delete does +// nothing if the named row does not exist (does not yield an error). +// +// Deleting a field +// +// To delete/clear a field within a row, use spanner.Update with the value nil: +// +// m := spanner.Update("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, nil}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// The valid Go types and their corresponding Cloud Spanner types that can be +// used in the Insert/Update/InsertOrUpdate functions are: +// +// string, NullString - STRING +// []string, []NullString - STRING ARRAY +// []byte - BYTES +// [][]byte - BYTES ARRAY +// int, int64, NullInt64 - INT64 +// []int, []int64, []NullInt64 - INT64 ARRAY +// bool, NullBool - BOOL +// []bool, []NullBool - BOOL ARRAY +// float64, NullFloat64 - FLOAT64 +// []float64, []NullFloat64 - FLOAT64 ARRAY +// time.Time, NullTime - TIMESTAMP +// []time.Time, []NullTime - TIMESTAMP ARRAY +// Date, NullDate - DATE +// []Date, []NullDate - DATE ARRAY +// +// To compare two Mutations for testing purposes, use reflect.DeepEqual. +type Mutation struct { + // op is the operation type of the mutation. + // See documentation for spanner.op for more details. + op op + // Table is the name of the target table to be modified. + table string + // keySet is a set of primary keys that names the rows + // in a delete operation. + keySet KeySet + // columns names the set of columns that are going to be + // modified by Insert, InsertOrUpdate, Replace or Update + // operations. + columns []string + // values specifies the new values for the target columns + // named by Columns. + values []interface{} +} + +// mapToMutationParams converts Go map into mutation parameters. +func mapToMutationParams(in map[string]interface{}) ([]string, []interface{}) { + cols := []string{} + vals := []interface{}{} + for k, v := range in { + cols = append(cols, k) + vals = append(vals, v) + } + return cols, vals +} + +// errNotStruct returns error for not getting a go struct type. +func errNotStruct(in interface{}) error { + return spannerErrorf(codes.InvalidArgument, "%T is not a go struct type", in) +} + +// structToMutationParams converts Go struct into mutation parameters. +// If the input is not a valid Go struct type, structToMutationParams +// returns error. +func structToMutationParams(in interface{}) ([]string, []interface{}, error) { + if in == nil { + return nil, nil, errNotStruct(in) + } + v := reflect.ValueOf(in) + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + // t is a pointer to a struct. + if v.IsNil() { + // Return empty results. + return nil, nil, nil + } + // Get the struct value that in points to. + v = v.Elem() + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, nil, errNotStruct(in) + } + fields, err := fieldCache.Fields(t) + if err != nil { + return nil, nil, toSpannerError(err) + } + var cols []string + var vals []interface{} + for _, f := range fields { + cols = append(cols, f.Name) + vals = append(vals, v.FieldByIndex(f.Index).Interface()) + } + return cols, vals, nil +} + +// Insert returns a Mutation to insert a row into a table. If the row already +// exists, the write or transaction fails. +func Insert(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opInsert, + table: table, + columns: cols, + values: vals, + } +} + +// InsertMap returns a Mutation to insert a row into a table, specified by +// a map of column name to value. If the row already exists, the write or +// transaction fails. +func InsertMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return Insert(table, cols, vals) +} + +// InsertStruct returns a Mutation to insert a row into a table, specified by +// a Go struct. If the row already exists, the write or transaction fails. +// +// The in argument must be a struct or a pointer to a struct. Its exported +// fields specify the column names and values. Use a field tag like "spanner:name" +// to provide an alternative column name, or use "spanner:-" to ignore the field. +func InsertStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return Insert(table, cols, vals), nil +} + +// Update returns a Mutation to update a row in a table. If the row does not +// already exist, the write or transaction fails. +func Update(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opUpdate, + table: table, + columns: cols, + values: vals, + } +} + +// UpdateMap returns a Mutation to update a row in a table, specified by +// a map of column to value. If the row does not already exist, the write or +// transaction fails. +func UpdateMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return Update(table, cols, vals) +} + +// UpdateStruct returns a Mutation to update a row in a table, specified by a Go +// struct. If the row does not already exist, the write or transaction fails. +func UpdateStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return Update(table, cols, vals), nil +} + +// InsertOrUpdate returns a Mutation to insert a row into a table. If the row +// already exists, it updates it instead. Any column values not explicitly +// written are preserved. +// +// For a similar example, See Update. +func InsertOrUpdate(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opInsertOrUpdate, + table: table, + columns: cols, + values: vals, + } +} + +// InsertOrUpdateMap returns a Mutation to insert a row into a table, +// specified by a map of column to value. If the row already exists, it +// updates it instead. Any column values not explicitly written are preserved. +// +// For a similar example, See UpdateMap. +func InsertOrUpdateMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return InsertOrUpdate(table, cols, vals) +} + +// InsertOrUpdateStruct returns a Mutation to insert a row into a table, +// specified by a Go struct. If the row already exists, it updates it instead. +// Any column values not explicitly written are preserved. +// +// The in argument must be a struct or a pointer to a struct. Its exported +// fields specify the column names and values. Use a field tag like "spanner:name" +// to provide an alternative column name, or use "spanner:-" to ignore the field. +// +// For a similar example, See UpdateStruct. +func InsertOrUpdateStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return InsertOrUpdate(table, cols, vals), nil +} + +// Replace returns a Mutation to insert a row into a table, deleting any +// existing row. Unlike InsertOrUpdate, this means any values not explicitly +// written become NULL. +// +// For a similar example, See Update. +func Replace(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opReplace, + table: table, + columns: cols, + values: vals, + } +} + +// ReplaceMap returns a Mutation to insert a row into a table, deleting any +// existing row. Unlike InsertOrUpdateMap, this means any values not explicitly +// written become NULL. The row is specified by a map of column to value. +// +// For a similar example, See UpdateMap. +func ReplaceMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return Replace(table, cols, vals) +} + +// ReplaceStruct returns a Mutation to insert a row into a table, deleting any +// existing row. Unlike InsertOrUpdateMap, this means any values not explicitly +// written become NULL. The row is specified by a Go struct. +// +// The in argument must be a struct or a pointer to a struct. Its exported +// fields specify the column names and values. Use a field tag like "spanner:name" +// to provide an alternative column name, or use "spanner:-" to ignore the field. +// +// For a similar example, See UpdateStruct. +func ReplaceStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return Replace(table, cols, vals), nil +} + +// Delete removes the rows described by the KeySet from the table. It succeeds +// whether or not the keys were present. +func Delete(table string, ks KeySet) *Mutation { + return &Mutation{ + op: opDelete, + table: table, + keySet: ks, + } +} + +// prepareWrite generates sppb.Mutation_Write from table name, column names +// and new column values. +func prepareWrite(table string, columns []string, vals []interface{}) (*sppb.Mutation_Write, error) { + v, err := encodeValueArray(vals) + if err != nil { + return nil, err + } + return &sppb.Mutation_Write{ + Table: table, + Columns: columns, + Values: []*proto3.ListValue{v}, + }, nil +} + +// errInvdMutationOp returns error for unrecognized mutation operation. +func errInvdMutationOp(m Mutation) error { + return spannerErrorf(codes.InvalidArgument, "Unknown op type: %d", m.op) +} + +// proto converts spanner.Mutation to sppb.Mutation, in preparation to send +// RPCs. +func (m Mutation) proto() (*sppb.Mutation, error) { + var pb *sppb.Mutation + switch m.op { + case opDelete: + var kp *sppb.KeySet + if m.keySet != nil { + var err error + kp, err = m.keySet.keySetProto() + if err != nil { + return nil, err + } + } + pb = &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: m.table, + KeySet: kp, + }, + }, + } + case opInsert: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_Insert{Insert: w}} + case opInsertOrUpdate: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_InsertOrUpdate{InsertOrUpdate: w}} + case opReplace: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_Replace{Replace: w}} + case opUpdate: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_Update{Update: w}} + default: + return nil, errInvdMutationOp(m) + } + return pb, nil +} + +// mutationsProto turns a spanner.Mutation array into a sppb.Mutation array, +// it is convenient for sending batch mutations to Cloud Spanner. +func mutationsProto(ms []*Mutation) ([]*sppb.Mutation, error) { + l := make([]*sppb.Mutation, 0, len(ms)) + for _, m := range ms { + pb, err := m.proto() + if err != nil { + return nil, err + } + l = append(l, pb) + } + return l, nil +} diff --git a/vendor/cloud.google.com/go/spanner/mutation_test.go b/vendor/cloud.google.com/go/spanner/mutation_test.go new file mode 100644 index 0000000..7a0a941 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/mutation_test.go @@ -0,0 +1,571 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "sort" + "strings" + "testing" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// keysetProto returns protobuf encoding of valid spanner.KeySet. +func keysetProto(t *testing.T, ks KeySet) *sppb.KeySet { + k, err := ks.keySetProto() + if err != nil { + t.Fatalf("cannot convert keyset %v to protobuf: %v", ks, err) + } + return k +} + +// Test encoding from spanner.Mutation to protobuf. +func TestMutationToProto(t *testing.T) { + for i, test := range []struct { + m *Mutation + want *sppb.Mutation + }{ + // Delete Mutation + { + &Mutation{opDelete, "t_foo", Key{"foo"}, nil, nil}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_foo", + KeySet: keysetProto(t, Key{"foo"}), + }, + }, + }, + }, + // Insert Mutation + { + &Mutation{opInsert, "t_foo", KeySets(), []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Insert{ + Insert: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{intProto(1), intProto(2)}, + }, + }, + }, + }, + }, + }, + // InsertOrUpdate Mutation + { + &Mutation{opInsertOrUpdate, "t_foo", KeySets(), []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{floatProto(1.0), floatProto(2.0)}, + }, + }, + }, + }, + }, + }, + // Replace Mutation + { + &Mutation{opReplace, "t_foo", KeySets(), []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Replace{ + Replace: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{stringProto("one"), floatProto(2.0)}, + }, + }, + }, + }, + }, + }, + // Update Mutation + { + &Mutation{opUpdate, "t_foo", KeySets(), []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Update{ + Update: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{stringProto("one"), nullProto()}, + }, + }, + }, + }, + }, + }, + } { + if got, err := test.m.proto(); err != nil || !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: (%#v).proto() = (%v, %v), want (%v, nil)", i, test.m, got, err, test.want) + } + } +} + +// mutationColumnSorter implements sort.Interface for sorting column-value pairs in a Mutation by column names. +type mutationColumnSorter struct { + Mutation +} + +// newMutationColumnSorter creates new instance of mutationColumnSorter by duplicating the input Mutation so that +// sorting won't change the input Mutation. +func newMutationColumnSorter(m *Mutation) *mutationColumnSorter { + return &mutationColumnSorter{ + Mutation{ + m.op, + m.table, + m.keySet, + append([]string(nil), m.columns...), + append([]interface{}(nil), m.values...), + }, + } +} + +// Len implements sort.Interface.Len. +func (ms *mutationColumnSorter) Len() int { + return len(ms.columns) +} + +// Swap implements sort.Interface.Swap. +func (ms *mutationColumnSorter) Swap(i, j int) { + ms.columns[i], ms.columns[j] = ms.columns[j], ms.columns[i] + ms.values[i], ms.values[j] = ms.values[j], ms.values[i] +} + +// Less implements sort.Interface.Less. +func (ms *mutationColumnSorter) Less(i, j int) bool { + return strings.Compare(ms.columns[i], ms.columns[j]) < 0 +} + +// mutationEqual returns true if two mutations in question are equal +// to each other. +func mutationEqual(t *testing.T, m1, m2 Mutation) bool { + // Two mutations are considered to be equal even if their column values have different + // orders. + ms1 := newMutationColumnSorter(&m1) + ms2 := newMutationColumnSorter(&m2) + sort.Sort(ms1) + sort.Sort(ms2) + return reflect.DeepEqual(ms1, ms2) +} + +// Test helper functions which help to generate spanner.Mutation. +func TestMutationHelpers(t *testing.T) { + for _, test := range []struct { + m string + got *Mutation + want *Mutation + }{ + { + "Insert", + Insert("t_foo", []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}), + &Mutation{opInsert, "t_foo", nil, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + }, + { + "InsertMap", + InsertMap("t_foo", map[string]interface{}{"col1": int64(1), "col2": int64(2)}), + &Mutation{opInsert, "t_foo", nil, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + }, + { + "InsertStruct", + func() *Mutation { + m, err := InsertStruct( + "t_foo", + struct { + notCol bool + Col1 int64 `spanner:"col1"` + Col2 int64 `spanner:"col2"` + }{false, int64(1), int64(2)}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opInsert, "t_foo", nil, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + }, + { + "Update", + Update("t_foo", []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}), + &Mutation{opUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + }, + { + "UpdateMap", + UpdateMap("t_foo", map[string]interface{}{"col1": "one", "col2": []byte(nil)}), + &Mutation{opUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + }, + { + "UpdateStruct", + func() *Mutation { + m, err := UpdateStruct( + "t_foo", + struct { + Col1 string `spanner:"col1"` + notCol int + Col2 []byte `spanner:"col2"` + }{"one", 1, nil}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + }, + { + "InsertOrUpdate", + InsertOrUpdate("t_foo", []string{"col1", "col2"}, []interface{}{1.0, 2.0}), + &Mutation{opInsertOrUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + }, + { + "InsertOrUpdateMap", + InsertOrUpdateMap("t_foo", map[string]interface{}{"col1": 1.0, "col2": 2.0}), + &Mutation{opInsertOrUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + }, + { + "InsertOrUpdateStruct", + func() *Mutation { + m, err := InsertOrUpdateStruct( + "t_foo", + struct { + Col1 float64 `spanner:"col1"` + Col2 float64 `spanner:"col2"` + notCol float64 + }{1.0, 2.0, 3.0}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opInsertOrUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + }, + { + "Replace", + Replace("t_foo", []string{"col1", "col2"}, []interface{}{"one", 2.0}), + &Mutation{opReplace, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + }, + { + "ReplaceMap", + ReplaceMap("t_foo", map[string]interface{}{"col1": "one", "col2": 2.0}), + &Mutation{opReplace, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + }, + { + "ReplaceStruct", + func() *Mutation { + m, err := ReplaceStruct( + "t_foo", + struct { + Col1 string `spanner:"col1"` + Col2 float64 `spanner:"col2"` + notCol string + }{"one", 2.0, "foo"}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opReplace, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + }, + { + "Delete", + Delete("t_foo", Key{"foo"}), + &Mutation{opDelete, "t_foo", Key{"foo"}, nil, nil}, + }, + { + "DeleteRange", + Delete("t_foo", KeyRange{Key{"bar"}, Key{"foo"}, ClosedClosed}), + &Mutation{opDelete, "t_foo", KeyRange{Key{"bar"}, Key{"foo"}, ClosedClosed}, nil, nil}, + }, + } { + if !mutationEqual(t, *test.got, *test.want) { + t.Errorf("%v: got Mutation %v, want %v", test.m, test.got, test.want) + } + } +} + +// Test encoding non-struct types by using *Struct helpers. +func TestBadStructs(t *testing.T) { + val := "i_am_not_a_struct" + wantErr := errNotStruct(val) + if _, gotErr := InsertStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("InsertStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } + if _, gotErr := InsertOrUpdateStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("InsertOrUpdateStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } + if _, gotErr := UpdateStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("UpdateStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } + if _, gotErr := ReplaceStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("ReplaceStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } +} + +func TestStructToMutationParams(t *testing.T) { + // Tests cases not covered elsewhere. + type S struct{ F int } + + for _, test := range []struct { + in interface{} + wantCols []string + wantVals []interface{} + wantErr error + }{ + {nil, nil, nil, errNotStruct(nil)}, + {3, nil, nil, errNotStruct(3)}, + {(*S)(nil), nil, nil, nil}, + {&S{F: 1}, []string{"F"}, []interface{}{1}, nil}, + } { + gotCols, gotVals, gotErr := structToMutationParams(test.in) + if !reflect.DeepEqual(gotCols, test.wantCols) { + t.Errorf("%#v: got cols %v, want %v", test.in, gotCols, test.wantCols) + } + if !reflect.DeepEqual(gotVals, test.wantVals) { + t.Errorf("%#v: got vals %v, want %v", test.in, gotVals, test.wantVals) + } + if !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%#v: got err %v, want %v", test.in, gotErr, test.wantErr) + } + } +} + +// Test encoding Mutation into proto. +func TestEncodeMutation(t *testing.T) { + for _, test := range []struct { + name string + mutation Mutation + wantProto *sppb.Mutation + wantErr error + }{ + { + "OpDelete", + Mutation{opDelete, "t_test", Key{1}, nil, nil}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_test", + KeySet: &sppb.KeySet{ + Keys: []*proto3.ListValue{listValueProto(intProto(1))}, + }, + }, + }, + }, + nil, + }, + { + "OpDelete - Key error", + Mutation{opDelete, "t_test", Key{struct{}{}}, nil, nil}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_test", + KeySet: &sppb.KeySet{}, + }, + }, + }, + errInvdKeyPartType(struct{}{}), + }, + { + "OpInsert", + Mutation{opInsert, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Insert{ + Insert: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpInsert - Value Type Error", + Mutation{opInsert, "t_test", nil, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Insert{ + Insert: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpInsertOrUpdate", + Mutation{opInsertOrUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpInsertOrUpdate - Value Type Error", + Mutation{opInsertOrUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpReplace", + Mutation{opReplace, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Replace{ + Replace: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpReplace - Value Type Error", + Mutation{opReplace, "t_test", nil, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Replace{ + Replace: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpUpdate", + Mutation{opUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Update{ + Update: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpUpdate - Value Type Error", + Mutation{opUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Update{ + Update: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpKnown - Unknown Mutation Operation Code", + Mutation{op(100), "t_test", nil, nil, nil}, + &sppb.Mutation{}, + errInvdMutationOp(Mutation{op(100), "t_test", nil, nil, nil}), + }, + } { + gotProto, gotErr := test.mutation.proto() + if gotErr != nil { + if !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%s: %v.proto() returns error %v, want %v", test.name, test.mutation, gotErr, test.wantErr) + } + continue + } + if !reflect.DeepEqual(gotProto, test.wantProto) { + t.Errorf("%s: %v.proto() = (%v, nil), want (%v, nil)", test.name, test.mutation, gotProto, test.wantProto) + } + } +} + +// Test Encoding an array of mutations. +func TestEncodeMutationArray(t *testing.T) { + for _, test := range []struct { + name string + ms []*Mutation + want []*sppb.Mutation + wantErr error + }{ + { + "Multiple Mutations", + []*Mutation{ + &Mutation{opDelete, "t_test", Key{"bar"}, nil, nil}, + &Mutation{opInsertOrUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, + }, + []*sppb.Mutation{ + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_test", + KeySet: &sppb.KeySet{ + Keys: []*proto3.ListValue{listValueProto(stringProto("bar"))}, + }, + }, + }, + }, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + }, + nil, + }, + { + "Multiple Mutations - Bad Mutation", + []*Mutation{ + &Mutation{opDelete, "t_test", Key{"bar"}, nil, nil}, + &Mutation{opInsertOrUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", struct{}{}}}, + }, + []*sppb.Mutation{}, + errEncoderUnsupportedType(struct{}{}), + }, + } { + gotProto, gotErr := mutationsProto(test.ms) + if gotErr != nil { + if !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: mutationsProto(%v) returns error %v, want %v", test.name, test.ms, gotErr, test.wantErr) + } + continue + } + if !reflect.DeepEqual(gotProto, test.want) { + t.Errorf("%v: mutationsProto(%v) = (%v, nil), want (%v, nil)", test.name, test.ms, gotProto, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/not_go18.go b/vendor/cloud.google.com/go/spanner/not_go18.go new file mode 100644 index 0000000..5db0f03 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/not_go18.go @@ -0,0 +1,23 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package spanner + +import "google.golang.org/api/option" + +// OpenCensus only supports go 1.8 and higher. + +func openCensusOptions() []option.ClientOption { return nil } diff --git a/vendor/cloud.google.com/go/spanner/protoutils.go b/vendor/cloud.google.com/go/spanner/protoutils.go new file mode 100644 index 0000000..a6fcdd7 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/protoutils.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "encoding/base64" + "strconv" + "time" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Helpers to generate protobuf values and Cloud Spanner types. + +func stringProto(s string) *proto3.Value { + return &proto3.Value{Kind: stringKind(s)} +} + +func stringKind(s string) *proto3.Value_StringValue { + return &proto3.Value_StringValue{StringValue: s} +} + +func stringType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_STRING} +} + +func boolProto(b bool) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_BoolValue{BoolValue: b}} +} + +func boolType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_BOOL} +} + +func intProto(n int64) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: strconv.FormatInt(n, 10)}} +} + +func intType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_INT64} +} + +func floatProto(n float64) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_NumberValue{NumberValue: n}} +} + +func floatType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_FLOAT64} +} + +func bytesProto(b []byte) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: base64.StdEncoding.EncodeToString(b)}} +} + +func bytesType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_BYTES} +} + +func timeProto(t time.Time) *proto3.Value { + return stringProto(t.UTC().Format(time.RFC3339Nano)) +} + +func timeType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_TIMESTAMP} +} + +func dateProto(d civil.Date) *proto3.Value { + return stringProto(d.String()) +} + +func dateType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_DATE} +} + +func listProto(p ...*proto3.Value) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_ListValue{ListValue: &proto3.ListValue{Values: p}}} +} + +func listValueProto(p ...*proto3.Value) *proto3.ListValue { + return &proto3.ListValue{Values: p} +} + +func listType(t *sppb.Type) *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_ARRAY, ArrayElementType: t} +} + +func mkField(n string, t *sppb.Type) *sppb.StructType_Field { + return &sppb.StructType_Field{Name: n, Type: t} +} + +func structType(fields ...*sppb.StructType_Field) *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_STRUCT, StructType: &sppb.StructType{Fields: fields}} +} + +func nullProto() *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_NullValue{NullValue: proto3.NullValue_NULL_VALUE}} +} diff --git a/vendor/cloud.google.com/go/spanner/read.go b/vendor/cloud.google.com/go/spanner/read.go new file mode 100644 index 0000000..5d733f1 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/read.go @@ -0,0 +1,685 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "bytes" + "io" + "sync/atomic" + "time" + + log "github.com/golang/glog" + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + "golang.org/x/net/context" + + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// streamingReceiver is the interface for receiving data from a client side +// stream. +type streamingReceiver interface { + Recv() (*sppb.PartialResultSet, error) +} + +// errEarlyReadEnd returns error for read finishes when gRPC stream is still active. +func errEarlyReadEnd() error { + return spannerErrorf(codes.FailedPrecondition, "read completed with active stream") +} + +// stream is the internal fault tolerant method for streaming data from +// Cloud Spanner. +func stream(ctx context.Context, rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error), setTimestamp func(time.Time), release func(error)) *RowIterator { + ctx, cancel := context.WithCancel(ctx) + return &RowIterator{ + streamd: newResumableStreamDecoder(ctx, rpc), + rowd: &partialResultSetDecoder{}, + setTimestamp: setTimestamp, + release: release, + cancel: cancel, + } +} + +// RowIterator is an iterator over Rows. +type RowIterator struct { + streamd *resumableStreamDecoder + rowd *partialResultSetDecoder + setTimestamp func(time.Time) + release func(error) + cancel func() + err error + rows []*Row +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns Done, all subsequent calls +// will return Done. +func (r *RowIterator) Next() (*Row, error) { + if r.err != nil { + return nil, r.err + } + for len(r.rows) == 0 && r.streamd.next() { + r.rows, r.err = r.rowd.add(r.streamd.get()) + if r.err != nil { + return nil, r.err + } + if !r.rowd.ts.IsZero() && r.setTimestamp != nil { + r.setTimestamp(r.rowd.ts) + r.setTimestamp = nil + } + } + if len(r.rows) > 0 { + row := r.rows[0] + r.rows = r.rows[1:] + return row, nil + } + if err := r.streamd.lastErr(); err != nil { + r.err = toSpannerError(err) + } else if !r.rowd.done() { + r.err = errEarlyReadEnd() + } else { + r.err = iterator.Done + } + return nil, r.err +} + +// Do calls the provided function once in sequence for each row in the iteration. If the +// function returns a non-nil error, Do immediately returns that error. +// +// If there are no rows in the iterator, Do will return nil without calling the +// provided function. +// +// Do always calls Stop on the iterator. +func (r *RowIterator) Do(f func(r *Row) error) error { + defer r.Stop() + for { + row, err := r.Next() + switch err { + case iterator.Done: + return nil + case nil: + if err = f(row); err != nil { + return err + } + default: + return err + } + } +} + +// Stop terminates the iteration. It should be called after every iteration. +func (r *RowIterator) Stop() { + if r.cancel != nil { + r.cancel() + } + if r.release != nil { + r.release(r.err) + if r.err == nil { + r.err = spannerErrorf(codes.FailedPrecondition, "Next called after Stop") + } + r.release = nil + + } +} + +// partialResultQueue implements a simple FIFO queue. The zero value is a +// valid queue. +type partialResultQueue struct { + q []*sppb.PartialResultSet + first int + last int + n int // number of elements in queue +} + +// empty returns if the partialResultQueue is empty. +func (q *partialResultQueue) empty() bool { + return q.n == 0 +} + +// errEmptyQueue returns error for dequeuing an empty queue. +func errEmptyQueue() error { + return spannerErrorf(codes.OutOfRange, "empty partialResultQueue") +} + +// peekLast returns the last item in partialResultQueue; if the queue +// is empty, it returns error. +func (q *partialResultQueue) peekLast() (*sppb.PartialResultSet, error) { + if q.empty() { + return nil, errEmptyQueue() + } + return q.q[(q.last+cap(q.q)-1)%cap(q.q)], nil +} + +// push adds an item to the tail of partialResultQueue. +func (q *partialResultQueue) push(r *sppb.PartialResultSet) { + if q.q == nil { + q.q = make([]*sppb.PartialResultSet, 8 /* arbitrary */) + } + if q.n == cap(q.q) { + buf := make([]*sppb.PartialResultSet, cap(q.q)*2) + for i := 0; i < q.n; i++ { + buf[i] = q.q[(q.first+i)%cap(q.q)] + } + q.q = buf + q.first = 0 + q.last = q.n + } + q.q[q.last] = r + q.last = (q.last + 1) % cap(q.q) + q.n++ +} + +// pop removes an item from the head of partialResultQueue and returns +// it. +func (q *partialResultQueue) pop() *sppb.PartialResultSet { + if q.n == 0 { + return nil + } + r := q.q[q.first] + q.q[q.first] = nil + q.first = (q.first + 1) % cap(q.q) + q.n-- + return r +} + +// clear empties partialResultQueue. +func (q *partialResultQueue) clear() { + *q = partialResultQueue{} +} + +// dump retrieves all items from partialResultQueue and return them in a slice. +// It is used only in tests. +func (q *partialResultQueue) dump() []*sppb.PartialResultSet { + var dq []*sppb.PartialResultSet + for i := q.first; len(dq) < q.n; i = (i + 1) % cap(q.q) { + dq = append(dq, q.q[i]) + } + return dq +} + +// resumableStreamDecoderState encodes resumableStreamDecoder's status. +// See also the comments for resumableStreamDecoder.Next. +type resumableStreamDecoderState int + +const ( + unConnected resumableStreamDecoderState = iota // 0 + queueingRetryable // 1 + queueingUnretryable // 2 + aborted // 3 + finished // 4 +) + +// resumableStreamDecoder provides a resumable interface for receiving +// sppb.PartialResultSet(s) from a given query wrapped by +// resumableStreamDecoder.rpc(). +type resumableStreamDecoder struct { + // state is the current status of resumableStreamDecoder, see also + // the comments for resumableStreamDecoder.Next. + state resumableStreamDecoderState + // stateWitness when non-nil is called to observe state change, + // used for testing. + stateWitness func(resumableStreamDecoderState) + // ctx is the caller's context, used for cancel/timeout Next(). + ctx context.Context + // rpc is a factory of streamingReceiver, which might resume + // a pervious stream from the point encoded in restartToken. + // rpc is always a wrapper of a Cloud Spanner query which is + // resumable. + rpc func(ctx context.Context, restartToken []byte) (streamingReceiver, error) + // stream is the current RPC streaming receiver. + stream streamingReceiver + // q buffers received yet undecoded partial results. + q partialResultQueue + // bytesBetweenResumeTokens is the proxy of the byte size of PartialResultSets being queued + // between two resume tokens. Once bytesBetweenResumeTokens is greater than + // maxBytesBetweenResumeTokens, resumableStreamDecoder goes into queueingUnretryable state. + bytesBetweenResumeTokens int32 + // maxBytesBetweenResumeTokens is the max number of bytes that can be buffered + // between two resume tokens. It is always copied from the global maxBytesBetweenResumeTokens + // atomically. + maxBytesBetweenResumeTokens int32 + // np is the next sppb.PartialResultSet ready to be returned + // to caller of resumableStreamDecoder.Get(). + np *sppb.PartialResultSet + // resumeToken stores the resume token that resumableStreamDecoder has + // last revealed to caller. + resumeToken []byte + // retryCount is the number of retries that have been carried out so far + retryCount int + // err is the last error resumableStreamDecoder has encountered so far. + err error + // backoff to compute delays between retries. + backoff exponentialBackoff +} + +// newResumableStreamDecoder creates a new resumeableStreamDecoder instance. +// Parameter rpc should be a function that creates a new stream +// beginning at the restartToken if non-nil. +func newResumableStreamDecoder(ctx context.Context, rpc func(ct context.Context, restartToken []byte) (streamingReceiver, error)) *resumableStreamDecoder { + return &resumableStreamDecoder{ + ctx: ctx, + rpc: rpc, + maxBytesBetweenResumeTokens: atomic.LoadInt32(&maxBytesBetweenResumeTokens), + backoff: defaultBackoff, + } +} + +// changeState fulfills state transition for resumableStateDecoder. +func (d *resumableStreamDecoder) changeState(target resumableStreamDecoderState) { + if d.state == queueingRetryable && d.state != target { + // Reset bytesBetweenResumeTokens because it is only meaningful/changed under + // queueingRetryable state. + d.bytesBetweenResumeTokens = 0 + } + d.state = target + if d.stateWitness != nil { + d.stateWitness(target) + } +} + +// isNewResumeToken returns if the observed resume token is different from +// the one returned from server last time. +func (d *resumableStreamDecoder) isNewResumeToken(rt []byte) bool { + if rt == nil { + return false + } + if bytes.Compare(rt, d.resumeToken) == 0 { + return false + } + return true +} + +// Next advances to the next available partial result set. If error or no +// more, returns false, call Err to determine if an error was encountered. +// The following diagram illustrates the state machine of resumableStreamDecoder +// that Next() implements. Note that state transition can be only triggered by +// RPC activities. +/* + rpc() fails retryable + +---------+ + | | rpc() fails unretryable/ctx timeouts or cancelled + | | +------------------------------------------------+ + | | | | + | v | v + | +---+---+---+ +--------+ +------+--+ + +-----+unConnected| |finished| | aborted |<----+ + | | ++-----+-+ +------+--+ | + +---+----+--+ ^ ^ ^ | + | ^ | | | | + | | | | recv() fails | + | | | | | | + | |recv() fails retryable | | | | + | |with valid ctx | | | | + | | | | | | + rpc() succeeds | +-----------------------+ | | | + | | | recv EOF recv EOF | | + | | | | | | + v | | Queue size exceeds | | | + +---+----+---+----+threshold +-------+-----------+ | | ++---------->+ +--------------->+ +-+ | +| |queueingRetryable| |queueingUnretryable| | +| | +<---------------+ | | +| +---+----------+--+ pop() returns +--+----+-----------+ | +| | | resume token | ^ | +| | | | | | +| | | | | | ++---------------+ | | | | + recv() succeeds | +----+ | + | recv() succeeds | + | | + | | + | | + | | + | | + +--------------------------------------------------+ + recv() fails unretryable + +*/ +var ( + // maxBytesBetweenResumeTokens is the maximum amount of bytes that resumableStreamDecoder + // in queueingRetryable state can use to queue PartialResultSets before getting + // into queueingUnretryable state. + maxBytesBetweenResumeTokens = int32(128 * 1024 * 1024) +) + +func (d *resumableStreamDecoder) next() bool { + for { + select { + case <-d.ctx.Done(): + // Do context check here so that even gRPC failed to do + // so, resumableStreamDecoder can still break the loop + // as expected. + d.err = errContextCanceled(d.ctx, d.err) + d.changeState(aborted) + default: + } + switch d.state { + case unConnected: + // If no gRPC stream is available, try to initiate one. + if d.stream, d.err = d.rpc(d.ctx, d.resumeToken); d.err != nil { + if isRetryable(d.err) { + d.doBackOff() + // Be explicit about state transition, although the + // state doesn't actually change. State transition + // will be triggered only by RPC activity, regardless of + // whether there is an actual state change or not. + d.changeState(unConnected) + continue + } + d.changeState(aborted) + continue + } + d.resetBackOff() + d.changeState(queueingRetryable) + continue + case queueingRetryable: + fallthrough + case queueingUnretryable: + // Receiving queue is not empty. + last, err := d.q.peekLast() + if err != nil { + // Only the case that receiving queue is empty could cause peekLast to + // return error and in such case, we should try to receive from stream. + d.tryRecv() + continue + } + if d.isNewResumeToken(last.ResumeToken) { + // Got new resume token, return buffered sppb.PartialResultSets to caller. + d.np = d.q.pop() + if d.q.empty() { + d.bytesBetweenResumeTokens = 0 + // The new resume token was just popped out from queue, record it. + d.resumeToken = d.np.ResumeToken + d.changeState(queueingRetryable) + } + return true + } + if d.bytesBetweenResumeTokens >= d.maxBytesBetweenResumeTokens && d.state == queueingRetryable { + d.changeState(queueingUnretryable) + continue + } + if d.state == queueingUnretryable { + // When there is no resume token observed, + // only yield sppb.PartialResultSets to caller under + // queueingUnretryable state. + d.np = d.q.pop() + return true + } + // Needs to receive more from gRPC stream till a new resume token + // is observed. + d.tryRecv() + continue + case aborted: + // Discard all pending items because none of them + // should be yield to caller. + d.q.clear() + return false + case finished: + // If query has finished, check if there are still buffered messages. + if d.q.empty() { + // No buffered PartialResultSet. + return false + } + // Although query has finished, there are still buffered PartialResultSets. + d.np = d.q.pop() + return true + + default: + log.Errorf("Unexpected resumableStreamDecoder.state: %v", d.state) + return false + } + } +} + +// tryRecv attempts to receive a PartialResultSet from gRPC stream. +func (d *resumableStreamDecoder) tryRecv() { + var res *sppb.PartialResultSet + if res, d.err = d.stream.Recv(); d.err != nil { + if d.err == io.EOF { + d.err = nil + d.changeState(finished) + return + } + if isRetryable(d.err) && d.state == queueingRetryable { + d.err = nil + // Discard all queue items (none have resume tokens). + d.q.clear() + d.stream = nil + d.changeState(unConnected) + d.doBackOff() + return + } + d.changeState(aborted) + return + } + d.q.push(res) + if d.state == queueingRetryable && !d.isNewResumeToken(res.ResumeToken) { + // adjusting d.bytesBetweenResumeTokens + d.bytesBetweenResumeTokens += int32(proto.Size(res)) + } + d.resetBackOff() + d.changeState(d.state) +} + +// resetBackOff clears the internal retry counter of +// resumableStreamDecoder so that the next exponential +// backoff will start at a fresh state. +func (d *resumableStreamDecoder) resetBackOff() { + d.retryCount = 0 +} + +// doBackoff does an exponential backoff sleep. +func (d *resumableStreamDecoder) doBackOff() { + ticker := time.NewTicker(d.backoff.delay(d.retryCount)) + defer ticker.Stop() + d.retryCount++ + select { + case <-d.ctx.Done(): + case <-ticker.C: + } +} + +// get returns the most recent PartialResultSet generated by a call to next. +func (d *resumableStreamDecoder) get() *sppb.PartialResultSet { + return d.np +} + +// lastErr returns the last non-EOF error encountered. +func (d *resumableStreamDecoder) lastErr() error { + return d.err +} + +// partialResultSetDecoder assembles PartialResultSet(s) into Cloud Spanner +// Rows. +type partialResultSetDecoder struct { + row Row + tx *sppb.Transaction + chunked bool // if true, next value should be merged with last values entry. + ts time.Time // read timestamp +} + +// yield checks we have a complete row, and if so returns it. A row is not +// complete if it doesn't have enough columns, or if this is a chunked response +// and there are no further values to process. +func (p *partialResultSetDecoder) yield(chunked, last bool) *Row { + if len(p.row.vals) == len(p.row.fields) && (!chunked || !last) { + // When partialResultSetDecoder gets enough number of + // Column values, There are two cases that a new Row + // should be yield: + // 1. The incoming PartialResultSet is not chunked; + // 2. The incoming PartialResultSet is chunked, but the + // proto3.Value being merged is not the last one in + // the PartialResultSet. + // + // Use a fresh Row to simplify clients that want to use yielded results + // after the next row is retrieved. Note that fields is never changed + // so it doesn't need to be copied. + fresh := Row{ + fields: p.row.fields, + vals: make([]*proto3.Value, len(p.row.vals)), + } + copy(fresh.vals, p.row.vals) + p.row.vals = p.row.vals[:0] // empty and reuse slice + return &fresh + } + return nil +} + +// yieldTx returns transaction information via caller supplied callback. +func errChunkedEmptyRow() error { + return spannerErrorf(codes.FailedPrecondition, "got invalid chunked PartialResultSet with empty Row") +} + +// add tries to merge a new PartialResultSet into buffered Row. It returns +// any rows that have been completed as a result. +func (p *partialResultSetDecoder) add(r *sppb.PartialResultSet) ([]*Row, error) { + var rows []*Row + if r.Metadata != nil { + // Metadata should only be returned in the first result. + if p.row.fields == nil { + p.row.fields = r.Metadata.RowType.Fields + } + if p.tx == nil && r.Metadata.Transaction != nil { + p.tx = r.Metadata.Transaction + if p.tx.ReadTimestamp != nil { + p.ts = time.Unix(p.tx.ReadTimestamp.Seconds, int64(p.tx.ReadTimestamp.Nanos)) + } + } + } + if len(r.Values) == 0 { + return nil, nil + } + if p.chunked { + p.chunked = false + // Try to merge first value in r.Values into + // uncompleted row. + last := len(p.row.vals) - 1 + if last < 0 { // sanity check + return nil, errChunkedEmptyRow() + } + var err error + // If p is chunked, then we should always try to merge p.last with r.first. + if p.row.vals[last], err = p.merge(p.row.vals[last], r.Values[0]); err != nil { + return nil, err + } + r.Values = r.Values[1:] + // Merge is done, try to yield a complete Row. + if row := p.yield(r.ChunkedValue, len(r.Values) == 0); row != nil { + rows = append(rows, row) + } + } + for i, v := range r.Values { + // The rest values in r can be appened into p directly. + p.row.vals = append(p.row.vals, v) + // Again, check to see if a complete Row can be yielded because of + // the newly added value. + if row := p.yield(r.ChunkedValue, i == len(r.Values)-1); row != nil { + rows = append(rows, row) + } + } + if r.ChunkedValue { + // After dealing with all values in r, if r is chunked then p must + // be also chunked. + p.chunked = true + } + return rows, nil +} + +// isMergeable returns if a protobuf Value can be potentially merged with +// other protobuf Values. +func (p *partialResultSetDecoder) isMergeable(a *proto3.Value) bool { + switch a.Kind.(type) { + case *proto3.Value_StringValue: + return true + case *proto3.Value_ListValue: + return true + default: + return false + } +} + +// errIncompatibleMergeTypes returns error for incompatible protobuf types +// that cannot be merged by partialResultSetDecoder. +func errIncompatibleMergeTypes(a, b *proto3.Value) error { + return spannerErrorf(codes.FailedPrecondition, "incompatible type in chunked PartialResultSet. expected (%T), got (%T)", a.Kind, b.Kind) +} + +// errUnsupportedMergeType returns error for protobuf type that cannot be +// merged to other protobufs. +func errUnsupportedMergeType(a *proto3.Value) error { + return spannerErrorf(codes.FailedPrecondition, "unsupported type merge (%T)", a.Kind) +} + +// merge tries to combine two protobuf Values if possible. +func (p *partialResultSetDecoder) merge(a, b *proto3.Value) (*proto3.Value, error) { + var err error + typeErr := errIncompatibleMergeTypes(a, b) + switch t := a.Kind.(type) { + case *proto3.Value_StringValue: + s, ok := b.Kind.(*proto3.Value_StringValue) + if !ok { + return nil, typeErr + } + return &proto3.Value{ + Kind: &proto3.Value_StringValue{StringValue: t.StringValue + s.StringValue}, + }, nil + case *proto3.Value_ListValue: + l, ok := b.Kind.(*proto3.Value_ListValue) + if !ok { + return nil, typeErr + } + if l.ListValue == nil || len(l.ListValue.Values) <= 0 { + // b is an empty list, just return a. + return a, nil + } + if t.ListValue == nil || len(t.ListValue.Values) <= 0 { + // a is an empty list, just return b. + return b, nil + } + if la := len(t.ListValue.Values) - 1; p.isMergeable(t.ListValue.Values[la]) { + // When the last item in a is of type String, + // List or Struct(encoded into List by Cloud Spanner), + // try to Merge last item in a and first item in b. + t.ListValue.Values[la], err = p.merge(t.ListValue.Values[la], l.ListValue.Values[0]) + if err != nil { + return nil, err + } + l.ListValue.Values = l.ListValue.Values[1:] + } + return &proto3.Value{ + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: append(t.ListValue.Values, l.ListValue.Values...), + }, + }, + }, nil + default: + return nil, errUnsupportedMergeType(a) + } + +} + +// Done returns if partialResultSetDecoder has already done with all buffered +// values. +func (p *partialResultSetDecoder) done() bool { + // There is no explicit end of stream marker, but ending part way + // through a row is obviously bad, or ending with the last column still + // awaiting completion. + return len(p.row.vals) == 0 && !p.chunked +} diff --git a/vendor/cloud.google.com/go/spanner/read_test.go b/vendor/cloud.google.com/go/spanner/read_test.go new file mode 100644 index 0000000..9818a0c --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/read_test.go @@ -0,0 +1,1733 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "fmt" + "io" + "reflect" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + + "cloud.google.com/go/spanner/internal/testutil" + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + // Mocked transaction timestamp. + trxTs = time.Unix(1, 2) + // Metadata for mocked KV table, its rows are returned by SingleUse transactions. + kvMeta = func() *sppb.ResultSetMetadata { + meta := testutil.KvMeta + meta.Transaction = &sppb.Transaction{ + ReadTimestamp: timestampProto(trxTs), + } + return &meta + }() + // Metadata for mocked ListKV table, which uses List for its key and value. + // Its rows are returned by snapshot readonly transactions, as indicated in the transaction metadata. + kvListMeta = &sppb.ResultSetMetadata{ + RowType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "Key", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + { + Name: "Value", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + }, + }, + Transaction: &sppb.Transaction{ + Id: transactionID{5, 6, 7, 8, 9}, + ReadTimestamp: timestampProto(trxTs), + }, + } + // Metadata for mocked schema of a query result set, which has two struct + // columns named "Col1" and "Col2", the struct's schema is like the + // following: + // + // STRUCT { + // INT + // LIST + // } + // + // Its rows are returned in readwrite transaction, as indicated in the transaction metadata. + kvObjectMeta = &sppb.ResultSetMetadata{ + RowType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "Col1", + Type: &sppb.Type{ + Code: sppb.TypeCode_STRUCT, + StructType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "foo-f1", + Type: &sppb.Type{ + Code: sppb.TypeCode_INT64, + }, + }, + { + Name: "foo-f2", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + }, + }, + }, + }, + { + Name: "Col2", + Type: &sppb.Type{ + Code: sppb.TypeCode_STRUCT, + StructType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "bar-f1", + Type: &sppb.Type{ + Code: sppb.TypeCode_INT64, + }, + }, + { + Name: "bar-f2", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Transaction: &sppb.Transaction{ + Id: transactionID{1, 2, 3, 4, 5}, + }, + } +) + +// String implements fmt.stringer. +func (r *Row) String() string { + return fmt.Sprintf("{fields: %s, val: %s}", r.fields, r.vals) +} + +func describeRows(l []*Row) string { + // generate a nice test failure description + var s = "[" + for i, r := range l { + if i != 0 { + s += ",\n " + } + s += fmt.Sprint(r) + } + s += "]" + return s +} + +// Helper for generating proto3 Value_ListValue instances, making +// test code shorter and readable. +func genProtoListValue(v ...string) *proto3.Value_ListValue { + r := &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{}, + }, + } + for _, e := range v { + r.ListValue.Values = append( + r.ListValue.Values, + &proto3.Value{ + Kind: &proto3.Value_StringValue{StringValue: e}, + }, + ) + } + return r +} + +// Test Row generation logics of partialResultSetDecoder. +func TestPartialResultSetDecoder(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + var tests = []struct { + input []*sppb.PartialResultSet + wantF []*Row + wantTxID transactionID + wantTs time.Time + wantD bool + }{ + { + // Empty input. + wantD: true, + }, + // String merging examples. + { + // Single KV result. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // Incomplete partial result. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + }, + }, + }, + wantTs: trxTs, + wantD: false, + }, + { + // Complete splitted result. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + }, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // Multi-row example with splitted row in the middle. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + {Kind: &proto3.Value_StringValue{StringValue: "A"}}, + }, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "1"}}, + {Kind: &proto3.Value_StringValue{StringValue: "B"}}, + {Kind: &proto3.Value_StringValue{StringValue: "2"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "A"}}, + {Kind: &proto3.Value_StringValue{StringValue: "1"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "B"}}, + {Kind: &proto3.Value_StringValue{StringValue: "2"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // Merging example in result_set.proto. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "W"}}, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "orl"}}, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "d"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "World"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // More complex example showing completing a merge and + // starting a new merge in the same partialResultSet. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "W"}}, // start split in value + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "orld"}}, // complete value + {Kind: &proto3.Value_StringValue{StringValue: "i"}}, // start split in key + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "s"}}, // complete key + {Kind: &proto3.Value_StringValue{StringValue: "not"}}, + {Kind: &proto3.Value_StringValue{StringValue: "a"}}, + {Kind: &proto3.Value_StringValue{StringValue: "qu"}}, // split in value + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "estion"}}, // complete value + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "World"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "is"}}, + {Kind: &proto3.Value_StringValue{StringValue: "not"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "a"}}, + {Kind: &proto3.Value_StringValue{StringValue: "question"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + // List merging examples. + { + // Non-splitting Lists. + input: []*sppb.PartialResultSet{ + { + Metadata: kvListMeta, + Values: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-2"), + }, + }, + }, + { + Values: []*proto3.Value{ + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantF: []*Row{ + { + fields: kvListMeta.RowType.Fields, + vals: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-2"), + }, + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantTxID: transactionID{5, 6, 7, 8, 9}, + wantTs: trxTs, + wantD: true, + }, + { + // Simple List merge case: splitted string element. + input: []*sppb.PartialResultSet{ + { + Metadata: kvListMeta, + Values: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-"), + }, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + { + Kind: genProtoListValue("2"), + }, + }, + }, + { + Values: []*proto3.Value{ + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantF: []*Row{ + { + fields: kvListMeta.RowType.Fields, + vals: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-2"), + }, + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantTxID: transactionID{5, 6, 7, 8, 9}, + wantTs: trxTs, + wantD: true, + }, + { + // Struct merging is also implemented by List merging. Note that + // Cloud Spanner uses proto.ListValue to encode Structs as well. + input: []*sppb.PartialResultSet{ + { + Metadata: kvObjectMeta, + Values: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 23}}, + {Kind: genProtoListValue("foo-1", "fo")}, + }, + }, + }, + }, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: genProtoListValue("o-2", "f")}, + }, + }, + }, + }, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: genProtoListValue("oo-3")}, + }, + }, + }, + }, + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 45}}, + {Kind: genProtoListValue("bar-1")}, + }, + }, + }, + }, + }, + }, + }, + wantF: []*Row{ + { + fields: kvObjectMeta.RowType.Fields, + vals: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 23}}, + {Kind: genProtoListValue("foo-1", "foo-2", "foo-3")}, + }, + }, + }, + }, + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 45}}, + {Kind: genProtoListValue("bar-1")}, + }, + }, + }, + }, + }, + }, + }, + wantTxID: transactionID{1, 2, 3, 4, 5}, + wantD: true, + }, + } + +nextTest: + for i, test := range tests { + var rows []*Row + p := &partialResultSetDecoder{} + for j, v := range test.input { + rs, err := p.add(v) + if err != nil { + t.Errorf("test %d.%d: partialResultSetDecoder.add(%v) = %v; want nil", i, j, v, err) + continue nextTest + } + rows = append(rows, rs...) + } + if !reflect.DeepEqual(p.ts, test.wantTs) { + t.Errorf("got transaction(%v), want %v", p.ts, test.wantTs) + } + if !reflect.DeepEqual(rows, test.wantF) { + t.Errorf("test %d: rows=\n%v\n; want\n%v\n; p.row:\n%v\n", i, describeRows(rows), describeRows(test.wantF), p.row) + } + if got := p.done(); got != test.wantD { + t.Errorf("test %d: partialResultSetDecoder.done() = %v", i, got) + } + } +} + +const ( + maxBuffers = 16 // max number of PartialResultSets that will be buffered in tests. +) + +// setMaxBytesBetweenResumeTokens sets the global maxBytesBetweenResumeTokens to a smaller +// value more suitable for tests. It returns a function which should be called to restore +// the maxBytesBetweenResumeTokens to its old value +func setMaxBytesBetweenResumeTokens() func() { + o := atomic.LoadInt32(&maxBytesBetweenResumeTokens) + atomic.StoreInt32(&maxBytesBetweenResumeTokens, int32(maxBuffers*proto.Size(&sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }))) + return func() { + atomic.StoreInt32(&maxBytesBetweenResumeTokens, o) + } +} + +// keyStr generates key string for kvMeta schema. +func keyStr(i int) string { + return fmt.Sprintf("foo-%02d", i) +} + +// valStr generates value string for kvMeta schema. +func valStr(i int) string { + return fmt.Sprintf("bar-%02d", i) +} + +// Test state transitions of resumableStreamDecoder where state machine +// ends up to a non-blocking state(resumableStreamDecoder.Next returns +// on non-blocking state). +func TestRsdNonblockingStates(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + tests := []struct { + name string + msgs []testutil.MockCtlMsg + rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error) + sql string + // Expected values + want []*sppb.PartialResultSet // PartialResultSets that should be returned to caller + queue []*sppb.PartialResultSet // PartialResultSets that should be buffered + resumeToken []byte // Resume token that is maintained by resumableStreamDecoder + stateHistory []resumableStreamDecoderState // State transition history of resumableStreamDecoder + wantErr error + }{ + { + // unConnected->queueingRetryable->finished + name: "unConnected->queueingRetryable->finished", + msgs: []testutil.MockCtlMsg{ + {}, + {}, + {Err: io.EOF, ResumeToken: false}, + }, + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + }, + queue: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + }, + }, + stateHistory: []resumableStreamDecoderState{ + queueingRetryable, // do RPC + queueingRetryable, // got foo-00 + queueingRetryable, // got foo-01 + finished, // got EOF + }, + }, + { + // unConnected->queueingRetryable->aborted + name: "unConnected->queueingRetryable->aborted", + msgs: []testutil.MockCtlMsg{ + {}, + {Err: nil, ResumeToken: true}, + {}, + {Err: errors.New("I quit"), ResumeToken: false}, + }, + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + ResumeToken: testutil.EncodeResumeToken(1), + }, + }, + stateHistory: []resumableStreamDecoderState{ + queueingRetryable, // do RPC + queueingRetryable, // got foo-00 + queueingRetryable, // got foo-01 + queueingRetryable, // foo-01, resume token + queueingRetryable, // got foo-02 + aborted, // got error + }, + wantErr: grpc.Errorf(codes.Unknown, "I quit"), + }, + { + // unConnected->queueingRetryable->queueingUnretryable->queueingUnretryable + name: "unConnected->queueingRetryable->queueingUnretryable->queueingUnretryable", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers+1; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers+1; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + return s + }(), + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // the internal queue of resumableStreamDecoder fills up + } + // the first item fills up the queue and triggers state transition; + // the second item is received under queueingUnretryable state. + s = append(s, queueingUnretryable) + s = append(s, queueingUnretryable) + return s + }(), + }, + { + // unConnected->queueingRetryable->queueingUnretryable->aborted + name: "unConnected->queueingRetryable->queueingUnretryable->aborted", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + m = append(m, testutil.MockCtlMsg{Err: errors.New("Just Abort It"), ResumeToken: false}) + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + return s + }(), + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder fills up + } + s = append(s, queueingUnretryable) // the last row triggers state change + s = append(s, aborted) // Error happens + return s + }(), + wantErr: grpc.Errorf(codes.Unknown, "Just Abort It"), + }, + } +nextTest: + for _, test := range tests { + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + mc := sppb.NewSpannerClient(dialMock(t, ms)) + if test.rpc == nil { + test.rpc = func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: test.sql, + ResumeToken: resumeToken, + }) + } + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + r := newResumableStreamDecoder( + ctx, + test.rpc, + ) + st := []resumableStreamDecoderState{} + var lastErr error + // Once the expected number of state transitions are observed, + // send a signal by setting stateDone = true. + stateDone := false + // Set stateWitness to listen to state changes. + hl := len(test.stateHistory) // To avoid data race on test. + r.stateWitness = func(rs resumableStreamDecoderState) { + if !stateDone { + // Record state transitions. + st = append(st, rs) + if len(st) == hl { + lastErr = r.lastErr() + stateDone = true + } + } + } + // Let mock server stream given messages to resumableStreamDecoder. + for _, m := range test.msgs { + ms.AddMsg(m.Err, m.ResumeToken) + } + var rs []*sppb.PartialResultSet + for { + select { + case <-ctx.Done(): + t.Errorf("context cancelled or timeout during test") + continue nextTest + default: + } + if stateDone { + // Check if resumableStreamDecoder carried out expected + // state transitions. + if !reflect.DeepEqual(st, test.stateHistory) { + t.Errorf("%v: observed state transitions: \n%v\n, want \n%v\n", + test.name, st, test.stateHistory) + } + // Check if resumableStreamDecoder returns expected array of + // PartialResultSets. + if !reflect.DeepEqual(rs, test.want) { + t.Errorf("%v: received PartialResultSets: \n%v\n, want \n%v\n", test.name, rs, test.want) + } + // Verify that resumableStreamDecoder's internal buffering is also correct. + var q []*sppb.PartialResultSet + for { + item := r.q.pop() + if item == nil { + break + } + q = append(q, item) + } + if !reflect.DeepEqual(q, test.queue) { + t.Errorf("%v: PartialResultSets still queued: \n%v\n, want \n%v\n", test.name, q, test.queue) + } + // Verify resume token. + if test.resumeToken != nil && !reflect.DeepEqual(r.resumeToken, test.resumeToken) { + t.Errorf("%v: Resume token is %v, want %v\n", test.name, r.resumeToken, test.resumeToken) + } + // Verify error message. + if !reflect.DeepEqual(lastErr, test.wantErr) { + t.Errorf("%v: got error %v, want %v", test.name, lastErr, test.wantErr) + } + // Proceed to next test + continue nextTest + } + // Receive next decoded item. + if r.next() { + rs = append(rs, r.get()) + } + } + } +} + +// Test state transitions of resumableStreamDecoder where state machine +// ends up to a blocking state(resumableStreamDecoder.Next blocks +// on blocking state). +func TestRsdBlockingStates(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + tests := []struct { + name string + msgs []testutil.MockCtlMsg + rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error) + sql string + // Expected values + want []*sppb.PartialResultSet // PartialResultSets that should be returned to caller + queue []*sppb.PartialResultSet // PartialResultSets that should be buffered + resumeToken []byte // Resume token that is maintained by resumableStreamDecoder + stateHistory []resumableStreamDecoderState // State transition history of resumableStreamDecoder + wantErr error + }{ + { + // unConnected -> unConnected + name: "unConnected -> unConnected", + rpc: func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return nil, grpc.Errorf(codes.Unavailable, "trust me: server is unavailable") + }, + sql: "SELECT * from t_whatever", + stateHistory: []resumableStreamDecoderState{unConnected, unConnected, unConnected}, + wantErr: grpc.Errorf(codes.Unavailable, "trust me: server is unavailable"), + }, + { + // unConnected -> queueingRetryable + name: "unConnected -> queueingRetryable", + sql: "SELECT t.key key, t.value value FROM t_mock t", + stateHistory: []resumableStreamDecoderState{queueingRetryable}, + }, + { + // unConnected->queueingRetryable->queueingRetryable + name: "unConnected->queueingRetryable->queueingRetryable", + msgs: []testutil.MockCtlMsg{ + {}, + {Err: nil, ResumeToken: true}, + {Err: nil, ResumeToken: true}, + {}, + }, + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + ResumeToken: testutil.EncodeResumeToken(1), + }, + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(2)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(2)}}, + }, + ResumeToken: testutil.EncodeResumeToken(2), + }, + }, + queue: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(3)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(3)}}, + }, + }, + }, + resumeToken: testutil.EncodeResumeToken(2), + stateHistory: []resumableStreamDecoderState{ + queueingRetryable, // do RPC + queueingRetryable, // got foo-00 + queueingRetryable, // got foo-01 + queueingRetryable, // foo-01, resume token + queueingRetryable, // got foo-02 + queueingRetryable, // foo-02, resume token + queueingRetryable, // got foo-03 + }, + }, + { + // unConnected->queueingRetryable->queueingUnretryable->queueingRetryable->queueingRetryable + name: "unConnected->queueingRetryable->queueingUnretryable->queueingRetryable->queueingRetryable", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers+1; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + m = append(m, testutil.MockCtlMsg{Err: nil, ResumeToken: true}) + m = append(m, testutil.MockCtlMsg{}) + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers+2; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + s[maxBuffers+1].ResumeToken = testutil.EncodeResumeToken(maxBuffers + 1) + return s + }(), + resumeToken: testutil.EncodeResumeToken(maxBuffers + 1), + queue: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 2)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 2)}}, + }, + }, + }, + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder filles up + } + for i := maxBuffers - 1; i < maxBuffers+1; i++ { + // the first item fills up the queue and triggers state change; + // the second item is received under queueingUnretryable state. + s = append(s, queueingUnretryable) + } + s = append(s, queueingUnretryable) // got (maxBuffers+1)th row under Unretryable state + s = append(s, queueingRetryable) // (maxBuffers+1)th row has resume token + s = append(s, queueingRetryable) // (maxBuffers+2)th row has no resume token + return s + }(), + }, + { + // unConnected->queueingRetryable->queueingUnretryable->finished + name: "unConnected->queueingRetryable->queueingUnretryable->finished", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + m = append(m, testutil.MockCtlMsg{Err: io.EOF, ResumeToken: false}) + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + return s + }(), + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder fills up + } + s = append(s, queueingUnretryable) // last row triggers state change + s = append(s, finished) // query finishes + return s + }(), + }, + } + for _, test := range tests { + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + cc := dialMock(t, ms) + mc := sppb.NewSpannerClient(cc) + if test.rpc == nil { + // Avoid using test.sql directly in closure because for loop changes test. + sql := test.sql + test.rpc = func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: sql, + ResumeToken: resumeToken, + }) + } + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + r := newResumableStreamDecoder( + ctx, + test.rpc, + ) + // Override backoff to make the test run faster. + r.backoff = exponentialBackoff{1 * time.Nanosecond, 1 * time.Nanosecond} + // st is the set of observed state transitions. + st := []resumableStreamDecoderState{} + // q is the content of the decoder's partial result queue when expected number of state transitions are done. + q := []*sppb.PartialResultSet{} + var lastErr error + // Once the expected number of state transitions are observed, + // send a signal to channel stateDone. + stateDone := make(chan int) + // Set stateWitness to listen to state changes. + hl := len(test.stateHistory) // To avoid data race on test. + r.stateWitness = func(rs resumableStreamDecoderState) { + select { + case <-stateDone: + // Noop after expected number of state transitions + default: + // Record state transitions. + st = append(st, rs) + if len(st) == hl { + lastErr = r.lastErr() + q = r.q.dump() + close(stateDone) + } + } + } + // Let mock server stream given messages to resumableStreamDecoder. + for _, m := range test.msgs { + ms.AddMsg(m.Err, m.ResumeToken) + } + var rs []*sppb.PartialResultSet + go func() { + for { + if !r.next() { + // Note that r.Next also exits on context cancel/timeout. + return + } + rs = append(rs, r.get()) + } + }() + // Verify that resumableStreamDecoder reaches expected state. + select { + case <-stateDone: // Note that at this point, receiver is still blocking on r.next(). + // Check if resumableStreamDecoder carried out expected + // state transitions. + if !reflect.DeepEqual(st, test.stateHistory) { + t.Errorf("%v: observed state transitions: \n%v\n, want \n%v\n", + test.name, st, test.stateHistory) + } + // Check if resumableStreamDecoder returns expected array of + // PartialResultSets. + if !reflect.DeepEqual(rs, test.want) { + t.Errorf("%v: received PartialResultSets: \n%v\n, want \n%v\n", test.name, rs, test.want) + } + // Verify that resumableStreamDecoder's internal buffering is also correct. + if !reflect.DeepEqual(q, test.queue) { + t.Errorf("%v: PartialResultSets still queued: \n%v\n, want \n%v\n", test.name, q, test.queue) + } + // Verify resume token. + if test.resumeToken != nil && !reflect.DeepEqual(r.resumeToken, test.resumeToken) { + t.Errorf("%v: Resume token is %v, want %v\n", test.name, r.resumeToken, test.resumeToken) + } + // Verify error message. + if !reflect.DeepEqual(lastErr, test.wantErr) { + t.Errorf("%v: got error %v, want %v", test.name, lastErr, test.wantErr) + } + case <-time.After(1 * time.Second): + t.Errorf("%v: Timeout in waiting for state change", test.name) + } + ms.Stop() + cc.Close() + } +} + +// sReceiver signals every receiving attempt through a channel, +// used by TestResumeToken to determine if the receiving of a certain +// PartialResultSet will be attempted next. +type sReceiver struct { + c chan int + rpcReceiver sppb.Spanner_ExecuteStreamingSqlClient +} + +// Recv() implements streamingReceiver.Recv for sReceiver. +func (sr *sReceiver) Recv() (*sppb.PartialResultSet, error) { + sr.c <- 1 + return sr.rpcReceiver.Recv() +} + +// waitn waits for nth receiving attempt from now on, until +// the signal for nth Recv() attempts is received or timeout. +// Note that because the way stream() works, the signal for the +// nth Recv() means that the previous n - 1 PartialResultSets +// has already been returned to caller or queued, if no error happened. +func (sr *sReceiver) waitn(n int) error { + for i := 0; i < n; i++ { + select { + case <-sr.c: + case <-time.After(10 * time.Second): + return fmt.Errorf("timeout in waiting for %v-th Recv()", i+1) + } + } + return nil +} + +// Test the handling of resumableStreamDecoder.bytesBetweenResumeTokens. +func TestQueueBytes(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + sr := &sReceiver{ + c: make(chan int, 1000), // will never block in this test + } + wantQueueBytes := 0 + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + r := newResumableStreamDecoder( + ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + r, err := mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + sr.rpcReceiver = r + return sr, err + }, + ) + go func() { + for r.next() { + } + }() + // Let server send maxBuffers / 2 rows. + for i := 0; i < maxBuffers/2; i++ { + wantQueueBytes += proto.Size(&sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + ms.AddMsg(nil, false) + } + if err := sr.waitn(maxBuffers/2 + 1); err != nil { + t.Fatalf("failed to wait for the first %v recv() calls: %v", maxBuffers, err) + } + if int32(wantQueueBytes) != r.bytesBetweenResumeTokens { + t.Errorf("r.bytesBetweenResumeTokens = %v, want %v", r.bytesBetweenResumeTokens, wantQueueBytes) + } + // Now send a resume token to drain the queue. + ms.AddMsg(nil, true) + // Wait for all rows to be processes. + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for rows to be processed: %v", err) + } + if r.bytesBetweenResumeTokens != 0 { + t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) + } + // Let server send maxBuffers - 1 rows. + wantQueueBytes = 0 + for i := 0; i < maxBuffers-1; i++ { + wantQueueBytes += proto.Size(&sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + ms.AddMsg(nil, false) + } + if err := sr.waitn(maxBuffers - 1); err != nil { + t.Fatalf("failed to wait for %v rows to be processed: %v", maxBuffers-1, err) + } + if int32(wantQueueBytes) != r.bytesBetweenResumeTokens { + t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) + } + // Trigger a state transition: queueingRetryable -> queueingUnretryable. + ms.AddMsg(nil, false) + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for state transition: %v", err) + } + if r.bytesBetweenResumeTokens != 0 { + t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) + } +} + +// Verify that client can deal with resume token correctly +func TestResumeToken(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + sr := &sReceiver{ + c: make(chan int, 1000), // will never block in this test + } + rows := []*Row{} + done := make(chan error) + streaming := func() { + // Establish a stream to mock cloud spanner server. + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + r, err := mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + sr.rpcReceiver = r + return sr, err + }, + nil, + func(error) {}) + defer iter.Stop() + var err error + for { + var row *Row + row, err = iter.Next() + if err == iterator.Done { + err = nil + break + } + if err != nil { + break + } + rows = append(rows, row) + } + done <- err + } + go streaming() + // Server streaming row 0 - 2, only row 1 has resume token. + // Client will receive row 0 - 2, so it will try receiving for + // 4 times (the last recv will block), and only row 0 - 1 will + // be yielded. + for i := 0; i < 3; i++ { + if i == 1 { + ms.AddMsg(nil, true) + } else { + ms.AddMsg(nil, false) + } + } + // Wait for 4 receive attempts, as explained above. + if err := sr.waitn(4); err != nil { + t.Fatalf("failed to wait for row 0 - 2: %v", err) + } + want := []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + }, + } + if !reflect.DeepEqual(rows, want) { + t.Errorf("received rows: \n%v\n; but want\n%v\n", rows, want) + } + // Inject resumable failure. + ms.AddMsg( + grpc.Errorf(codes.Unavailable, "mock server unavailable"), + false, + ) + // Test if client detects the resumable failure and retries. + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for client to retry: %v", err) + } + // Client has resumed the query, now server resend row 2. + ms.AddMsg(nil, true) + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for resending row 2: %v", err) + } + // Now client should have received row 0 - 2. + want = append(want, &Row{ + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(2)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(2)}}, + }, + }) + if !reflect.DeepEqual(rows, want) { + t.Errorf("received rows: \n%v\n, want\n%v\n", rows, want) + } + // Sending 3rd - (maxBuffers+1)th rows without resume tokens, client should buffer them. + for i := 3; i < maxBuffers+2; i++ { + ms.AddMsg(nil, false) + } + if err := sr.waitn(maxBuffers - 1); err != nil { + t.Fatalf("failed to wait for row 3-%v: %v", maxBuffers+1, err) + } + // Received rows should be unchanged. + if !reflect.DeepEqual(rows, want) { + t.Errorf("receive rows: \n%v\n, want\n%v\n", rows, want) + } + // Send (maxBuffers+2)th row to trigger state change of resumableStreamDecoder: + // queueingRetryable -> queueingUnretryable + ms.AddMsg(nil, false) + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for row %v: %v", maxBuffers+2, err) + } + // Client should yield row 3rd - (maxBuffers+2)th to application. Therefore, application should + // see row 0 - (maxBuffers+2)th so far. + for i := 3; i < maxBuffers+3; i++ { + want = append(want, &Row{ + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + if !reflect.DeepEqual(rows, want) { + t.Errorf("received rows: \n%v\n; want\n%v\n", rows, want) + } + // Inject resumable error, but since resumableStreamDecoder is already at queueingUnretryable + // state, query will just fail. + ms.AddMsg( + grpc.Errorf(codes.Unavailable, "mock server wants some sleep"), + false, + ) + var gotErr error + select { + case gotErr = <-done: + case <-time.After(10 * time.Second): + t.Fatalf("timeout in waiting for failed query to return.") + } + if wantErr := toSpannerError(grpc.Errorf(codes.Unavailable, "mock server wants some sleep")); !reflect.DeepEqual(gotErr, wantErr) { + t.Fatalf("stream() returns error: %v, but want error: %v", gotErr, wantErr) + } + + // Reconnect to mock Cloud Spanner. + rows = []*Row{} + go streaming() + // Let server send two rows without resume token. + for i := maxBuffers + 3; i < maxBuffers+5; i++ { + ms.AddMsg(nil, false) + } + if err := sr.waitn(3); err != nil { + t.Fatalf("failed to wait for row %v - %v: %v", maxBuffers+3, maxBuffers+5, err) + } + if len(rows) > 0 { + t.Errorf("client received some rows unexpectedly: %v, want nothing", rows) + } + // Let server end the query. + ms.AddMsg(io.EOF, false) + select { + case gotErr = <-done: + case <-time.After(10 * time.Second): + t.Fatalf("timeout in waiting for failed query to return") + } + if gotErr != nil { + t.Fatalf("stream() returns unexpected error: %v, but want no error", gotErr) + } + // Verify if a normal server side EOF flushes all queued rows. + want = []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 3)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 3)}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 4)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 4)}}, + }, + }, + } + if !reflect.DeepEqual(rows, want) { + t.Errorf("received rows: \n%v\n; but want\n%v\n", rows, want) + } +} + +// Verify that streaming query get retried upon real gRPC server transport failures. +func TestGrpcReconnect(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + retry := make(chan int) + row := make(chan int) + var err error + go func() { + r := 0 + // Establish a stream to mock cloud spanner server. + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + if r > 0 { + // This RPC attempt is a retry, signal it. + retry <- r + } + r++ + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + + }, + nil, + func(error) {}) + defer iter.Stop() + for { + _, err = iter.Next() + if err == iterator.Done { + err = nil + break + } + if err != nil { + break + } + row <- 0 + } + }() + // Add a message and wait for the receipt. + ms.AddMsg(nil, true) + select { + case <-row: + case <-time.After(10 * time.Second): + t.Fatalf("expect stream to be established within 10 seconds, but it didn't") + } + // Error injection: force server to close all connections. + ms.Stop() + // Test to see if client respond to the real RPC failure correctly by + // retrying RPC. + select { + case r, ok := <-retry: + if ok && r == 1 { + break + } + t.Errorf("retry count = %v, want 1", r) + case <-time.After(10 * time.Second): + t.Errorf("client library failed to respond after 10 seconds, aborting") + return + } +} + +// Test cancel/timeout for client operations. +func TestCancelTimeout(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + done := make(chan int) + go func() { + for { + ms.AddMsg(nil, true) + } + }() + // Test cancelling query. + ctx, cancel := context.WithCancel(context.Background()) + var err error + go func() { + // Establish a stream to mock cloud spanner server. + iter := stream(ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + nil, + func(error) {}) + defer iter.Stop() + for { + _, err = iter.Next() + if err == iterator.Done { + break + } + if err != nil { + done <- 0 + break + } + } + }() + cancel() + select { + case <-done: + if ErrCode(err) != codes.Canceled { + t.Errorf("streaming query is canceled and returns error %v, want error code %v", err, codes.Canceled) + } + case <-time.After(1 * time.Second): + t.Errorf("query doesn't exit timely after being cancelled") + } + // Test query timeout. + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + go func() { + // Establish a stream to mock cloud spanner server. + iter := stream(ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + nil, + func(error) {}) + defer iter.Stop() + for { + _, err = iter.Next() + if err == iterator.Done { + err = nil + break + } + if err != nil { + break + } + } + done <- 0 + }() + select { + case <-done: + if wantErr := codes.DeadlineExceeded; ErrCode(err) != wantErr { + t.Errorf("streaming query timeout returns error %v, want error code %v", err, wantErr) + } + case <-time.After(2 * time.Second): + t.Errorf("query doesn't timeout as expected") + } +} + +func TestRowIteratorDo(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + + for i := 0; i < 3; i++ { + ms.AddMsg(nil, false) + } + ms.AddMsg(io.EOF, true) + nRows := 0 + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + nil, + func(error) {}) + err := iter.Do(func(r *Row) error { nRows++; return nil }) + if err != nil { + t.Errorf("Using Do: %v", err) + } + if nRows != 3 { + t.Errorf("got %d rows, want 3", nRows) + } +} + +func TestRowIteratorDoWithError(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + + for i := 0; i < 3; i++ { + ms.AddMsg(nil, false) + } + ms.AddMsg(io.EOF, true) + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + nil, + func(error) {}) + injected := errors.New("Failed iterator") + err := iter.Do(func(r *Row) error { return injected }) + if err != injected { + t.Errorf("got <%v>, want <%v>", err, injected) + } +} + +func TestIteratorStopEarly(t *testing.T) { + ctx := context.Background() + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc := dialMock(t, ms) + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + + ms.AddMsg(nil, false) + ms.AddMsg(nil, false) + ms.AddMsg(io.EOF, true) + + iter := stream(ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + nil, + func(error) {}) + _, err := iter.Next() + if err != nil { + t.Fatalf("before Stop: %v", err) + } + iter.Stop() + // Stop sets r.err to the FailedPrecondition error "Next called after Stop". + // Override that here so this test can observe the Canceled error from the stream. + iter.err = nil + iter.Next() + if ErrCode(iter.streamd.lastErr()) != codes.Canceled { + t.Errorf("after Stop: got %v, wanted Canceled", err) + } +} + +func TestIteratorWithError(t *testing.T) { + injected := errors.New("Failed iterator") + iter := RowIterator{err: injected} + defer iter.Stop() + if _, err := iter.Next(); err != injected { + t.Fatalf("Expected error: %v, got %v", injected, err) + } +} + +func dialMock(t *testing.T, ms *testutil.MockCloudSpanner) *grpc.ClientConn { + cc, err := grpc.Dial(ms.Addr(), grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + return cc +} diff --git a/vendor/cloud.google.com/go/spanner/retry.go b/vendor/cloud.google.com/go/spanner/retry.go new file mode 100644 index 0000000..37fb3b2 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/retry.go @@ -0,0 +1,197 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + edpb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +const ( + retryInfoKey = "google.rpc.retryinfo-bin" +) + +// errRetry returns an unavailable error under error namespace EsOther. It is a +// generic retryable error that is used to mask and recover unretryable errors +// in a retry loop. +func errRetry(err error) error { + if se, ok := err.(*Error); ok { + return &Error{codes.Unavailable, fmt.Sprintf("generic Cloud Spanner retryable error: { %v }", se.Error()), se.trailers} + } + return spannerErrorf(codes.Unavailable, "generic Cloud Spanner retryable error: { %v }", err.Error()) +} + +// isErrorClosing reports whether the error is generated by gRPC layer talking to a closed server. +func isErrorClosing(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "transport is closing") { + // Handle the case when connection is closed unexpectedly. + // TODO: once gRPC is able to categorize + // this as retryable error, we should stop parsing the + // error message here. + return true + } + return false +} + +// isErrorRST reports whether the error is generated by gRPC client receiving a RST frame from server. +func isErrorRST(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "stream terminated by RST_STREAM") { + // TODO: once gRPC is able to categorize this error as "go away" or "retryable", + // we should stop parsing the error message. + return true + } + return false +} + +// isErrorUnexpectedEOF returns true if error is generated by gRPC layer +// receiving io.EOF unexpectedly. +func isErrorUnexpectedEOF(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Unknown && strings.Contains(ErrDesc(err), "unexpected EOF") { + // Unexpected EOF is an transport layer issue that + // could be recovered by retries. The most likely + // scenario is a flaky RecvMsg() call due to network + // issues. + // TODO: once gRPC is able to categorize + // this as retryable error, we should stop parsing the + // error message here. + return true + } + return false +} + +// isErrorUnavailable returns true if the error is about server being unavailable. +func isErrorUnavailable(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Unavailable { + return true + } + return false +} + +// isRetryable returns true if the Cloud Spanner error being checked is a retryable error. +func isRetryable(err error) bool { + if isErrorClosing(err) { + return true + } + if isErrorUnexpectedEOF(err) { + return true + } + if isErrorRST(err) { + return true + } + if isErrorUnavailable(err) { + return true + } + return false +} + +// errContextCanceled returns *spanner.Error for canceled context. +func errContextCanceled(ctx context.Context, lastErr error) error { + if ctx.Err() == context.DeadlineExceeded { + return spannerErrorf(codes.DeadlineExceeded, "%v, lastErr is <%v>", ctx.Err(), lastErr) + } + return spannerErrorf(codes.Canceled, "%v, lastErr is <%v>", ctx.Err(), lastErr) +} + +// extractRetryDelay extracts retry backoff if present. +func extractRetryDelay(err error) (time.Duration, bool) { + trailers := errTrailers(err) + if trailers == nil { + return 0, false + } + elem, ok := trailers[retryInfoKey] + if !ok || len(elem) <= 0 { + return 0, false + } + _, b, err := metadata.DecodeKeyValue(retryInfoKey, elem[0]) + if err != nil { + return 0, false + } + var retryInfo edpb.RetryInfo + if proto.Unmarshal([]byte(b), &retryInfo) != nil { + return 0, false + } + delay, err := ptypes.Duration(retryInfo.RetryDelay) + if err != nil { + return 0, false + } + return delay, true +} + +// runRetryable keeps attempting to run f until one of the following happens: +// 1) f returns nil error or an unretryable error; +// 2) context is cancelled or timeout. +// TODO: consider using https://github.com/googleapis/gax-go once it +// becomes available internally. +func runRetryable(ctx context.Context, f func(context.Context) error) error { + return toSpannerError(runRetryableNoWrap(ctx, f)) +} + +// Like runRetryable, but doesn't wrap the returned error in a spanner.Error. +func runRetryableNoWrap(ctx context.Context, f func(context.Context) error) error { + var funcErr error + retryCount := 0 + for { + select { + case <-ctx.Done(): + // Do context check here so that even f() failed to do + // so (for example, gRPC implementation bug), the loop + // can still have a chance to exit as expected. + return errContextCanceled(ctx, funcErr) + default: + } + funcErr = f(ctx) + if funcErr == nil { + return nil + } + if isRetryable(funcErr) { + // Error is retryable, do exponential backoff and continue. + b, ok := extractRetryDelay(funcErr) + if !ok { + b = defaultBackoff.delay(retryCount) + } + select { + case <-ctx.Done(): + return errContextCanceled(ctx, funcErr) + case <-time.After(b): + } + retryCount++ + continue + } + // Error isn't retryable / no error, return immediately. + return funcErr + } +} diff --git a/vendor/cloud.google.com/go/spanner/retry_test.go b/vendor/cloud.google.com/go/spanner/retry_test.go new file mode 100644 index 0000000..49c5051 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/retry_test.go @@ -0,0 +1,108 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "fmt" + "reflect" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + edpb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// Test if runRetryable loop deals with various errors correctly. +func TestRetry(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + responses := []error{ + grpc.Errorf(codes.Internal, "transport is closing"), + grpc.Errorf(codes.Unknown, "unexpected EOF"), + grpc.Errorf(codes.Internal, "stream terminated by RST_STREAM with error code: 2"), + grpc.Errorf(codes.Unavailable, "service is currently unavailable"), + errRetry(fmt.Errorf("just retry it")), + } + err := runRetryable(context.Background(), func(ct context.Context) error { + var r error + if len(responses) > 0 { + r = responses[0] + responses = responses[1:] + } + return r + }) + if err != nil { + t.Errorf("runRetryable should be able to survive all retryable errors, but it returns %v", err) + } + // Unretryable errors + injErr := errors.New("this is unretryable") + err = runRetryable(context.Background(), func(ct context.Context) error { + return injErr + }) + if wantErr := toSpannerError(injErr); !reflect.DeepEqual(err, wantErr) { + t.Errorf("runRetryable returns error %v, want %v", err, wantErr) + } + // Timeout + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + retryErr := errRetry(fmt.Errorf("still retrying")) + err = runRetryable(ctx, func(ct context.Context) error { + // Expect to trigger timeout in retryable runner after 10 executions. + <-time.After(100 * time.Millisecond) + // Let retryable runner to retry so that timeout will eventually happen. + return retryErr + }) + // Check error code and error message + if wantErrCode, wantErr := codes.DeadlineExceeded, errContextCanceled(ctx, retryErr); ErrCode(err) != wantErrCode || !reflect.DeepEqual(err, wantErr) { + t.Errorf("=\n<%v, %v>, want:\n<%v, %v>", ErrCode(err), err, wantErrCode, wantErr) + } + // Cancellation + ctx, cancel = context.WithCancel(context.Background()) + retries := 3 + retryErr = errRetry(fmt.Errorf("retry before cancel")) + err = runRetryable(ctx, func(ct context.Context) error { + retries-- + if retries == 0 { + cancel() + } + return retryErr + }) + // Check error code, error message, retry count + if wantErrCode, wantErr := codes.Canceled, errContextCanceled(ctx, retryErr); ErrCode(err) != wantErrCode || !reflect.DeepEqual(err, wantErr) || retries != 0 { + t.Errorf("=\n<%v, %v, %v>, want:\n<%v, %v, %v>", ErrCode(err), err, retries, wantErrCode, wantErr, 0) + } +} + +func TestRetryInfo(t *testing.T) { + b, _ := proto.Marshal(&edpb.RetryInfo{ + RetryDelay: ptypes.DurationProto(time.Second), + }) + trailers := map[string]string{ + retryInfoKey: string(b), + } + gotDelay, ok := extractRetryDelay(errRetry(toSpannerErrorWithMetadata(grpc.Errorf(codes.Aborted, ""), metadata.New(trailers)))) + if !ok || !reflect.DeepEqual(time.Second, gotDelay) { + t.Errorf(" = <%t, %v>, want ", ok, gotDelay, time.Second) + } +} diff --git a/vendor/cloud.google.com/go/spanner/row.go b/vendor/cloud.google.com/go/spanner/row.go new file mode 100644 index 0000000..200fda5 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/row.go @@ -0,0 +1,303 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "reflect" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// A Row is a view of a row of data returned by a Cloud Spanner read. +// It consists of a number of columns; the number depends on the columns +// used to construct the read. +// +// The column values can be accessed by index. For instance, if the read specified +// []string{"photo_id", "caption"}, then each row will contain two +// columns: "photo_id" with index 0, and "caption" with index 1. +// +// Column values are decoded by using one of the Column, ColumnByName, or +// Columns methods. The valid values passed to these methods depend on the +// column type. For example: +// +// var photoID int64 +// err := row.Column(0, &photoID) // Decode column 0 as an integer. +// +// var caption string +// err := row.Column(1, &caption) // Decode column 1 as a string. +// +// // Decode all the columns. +// err := row.Columns(&photoID, &caption) +// +// Supported types and their corresponding Cloud Spanner column type(s) are: +// +// *string(not NULL), *NullString - STRING +// *[]string, *[]NullString - STRING ARRAY +// *[]byte - BYTES +// *[][]byte - BYTES ARRAY +// *int64(not NULL), *NullInt64 - INT64 +// *[]int64, *[]NullInt64 - INT64 ARRAY +// *bool(not NULL), *NullBool - BOOL +// *[]bool, *[]NullBool - BOOL ARRAY +// *float64(not NULL), *NullFloat64 - FLOAT64 +// *[]float64, *[]NullFloat64 - FLOAT64 ARRAY +// *time.Time(not NULL), *NullTime - TIMESTAMP +// *[]time.Time, *[]NullTime - TIMESTAMP ARRAY +// *Date(not NULL), *NullDate - DATE +// *[]civil.Date, *[]NullDate - DATE ARRAY +// *[]*some_go_struct, *[]NullRow - STRUCT ARRAY +// *GenericColumnValue - any Cloud Spanner type +// +// For TIMESTAMP columns, the returned time.Time object will be in UTC. +// +// To fetch an array of BYTES, pass a *[][]byte. To fetch an array of (sub)rows, pass +// a *[]spanner.NullRow or a *[]*some_go_struct where some_go_struct holds all +// information of the subrow, see spanner.Row.ToStruct for the mapping between a +// Cloud Spanner row and a Go struct. To fetch an array of other types, pass a +// *[]spanner.NullXXX type of the appropriate type. Use GenericColumnValue when you +// don't know in advance what column type to expect. +// +// Row decodes the row contents lazily; as a result, each call to a getter has +// a chance of returning an error. +// +// A column value may be NULL if the corresponding value is not present in +// Cloud Spanner. The spanner.NullXXX types (spanner.NullInt64 et al.) allow fetching +// values that may be null. A NULL BYTES can be fetched into a *[]byte as nil. +// It is an error to fetch a NULL value into any other type. +type Row struct { + fields []*sppb.StructType_Field + vals []*proto3.Value // keep decoded for now +} + +// errNamesValuesMismatch returns error for when columnNames count is not equal +// to columnValues count. +func errNamesValuesMismatch(columnNames []string, columnValues []interface{}) error { + return spannerErrorf(codes.FailedPrecondition, + "different number of names(%v) and values(%v)", len(columnNames), len(columnValues)) +} + +// NewRow returns a Row containing the supplied data. This can be useful for +// mocking Cloud Spanner Read and Query responses for unit testing. +func NewRow(columnNames []string, columnValues []interface{}) (*Row, error) { + if len(columnValues) != len(columnNames) { + return nil, errNamesValuesMismatch(columnNames, columnValues) + } + r := Row{ + fields: make([]*sppb.StructType_Field, len(columnValues)), + vals: make([]*proto3.Value, len(columnValues)), + } + for i := range columnValues { + val, typ, err := encodeValue(columnValues[i]) + if err != nil { + return nil, err + } + r.fields[i] = &sppb.StructType_Field{ + Name: columnNames[i], + Type: typ, + } + r.vals[i] = val + } + return &r, nil +} + +// Size is the number of columns in the row. +func (r *Row) Size() int { + return len(r.fields) +} + +// ColumnName returns the name of column i, or empty string for invalid column. +func (r *Row) ColumnName(i int) string { + if i < 0 || i >= len(r.fields) { + return "" + } + return r.fields[i].Name +} + +// ColumnIndex returns the index of the column with the given name. The +// comparison is case-sensitive. +func (r *Row) ColumnIndex(name string) (int, error) { + found := false + var index int + if len(r.vals) != len(r.fields) { + return 0, errFieldsMismatchVals(r) + } + for i, f := range r.fields { + if f == nil { + return 0, errNilColType(i) + } + if name == f.Name { + if found { + return 0, errDupColName(name) + } + found = true + index = i + } + } + if !found { + return 0, errColNotFound(name) + } + return index, nil +} + +// ColumnNames returns all column names of the row. +func (r *Row) ColumnNames() []string { + var n []string + for _, c := range r.fields { + n = append(n, c.Name) + } + return n +} + +// errColIdxOutOfRange returns error for requested column index is out of the +// range of the target Row's columns. +func errColIdxOutOfRange(i int, r *Row) error { + return spannerErrorf(codes.OutOfRange, "column index %d out of range [0,%d)", i, len(r.vals)) +} + +// errDecodeColumn returns error for not being able to decode a indexed column. +func errDecodeColumn(i int, err error) error { + if err == nil { + return nil + } + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.InvalidArgument, "failed to decode column %v, error = <%v>", i, err) + } + se.decorate(fmt.Sprintf("failed to decode column %v", i)) + return se +} + +// errFieldsMismatchVals returns error for field count isn't equal to value count in a Row. +func errFieldsMismatchVals(r *Row) error { + return spannerErrorf(codes.FailedPrecondition, "row has different number of fields(%v) and values(%v)", + len(r.fields), len(r.vals)) +} + +// errNilColType returns error for column type for column i being nil in the row. +func errNilColType(i int) error { + return spannerErrorf(codes.FailedPrecondition, "column(%v)'s type is nil", i) +} + +// Column fetches the value from the ith column, decoding it into ptr. +// See the Row documentation for the list of acceptable argument types. +// see Client.ReadWriteTransaction for an example. +func (r *Row) Column(i int, ptr interface{}) error { + if len(r.vals) != len(r.fields) { + return errFieldsMismatchVals(r) + } + if i < 0 || i >= len(r.fields) { + return errColIdxOutOfRange(i, r) + } + if r.fields[i] == nil { + return errNilColType(i) + } + if err := decodeValue(r.vals[i], r.fields[i].Type, ptr); err != nil { + return errDecodeColumn(i, err) + } + return nil +} + +// errDupColName returns error for duplicated column name in the same row. +func errDupColName(n string) error { + return spannerErrorf(codes.FailedPrecondition, "ambiguous column name %q", n) +} + +// errColNotFound returns error for not being able to find a named column. +func errColNotFound(n string) error { + return spannerErrorf(codes.NotFound, "column %q not found", n) +} + +// ColumnByName fetches the value from the named column, decoding it into ptr. +// See the Row documentation for the list of acceptable argument types. +func (r *Row) ColumnByName(name string, ptr interface{}) error { + index, err := r.ColumnIndex(name) + if err != nil { + return err + } + return r.Column(index, ptr) +} + +// errNumOfColValue returns error for providing wrong number of values to Columns. +func errNumOfColValue(n int, r *Row) error { + return spannerErrorf(codes.InvalidArgument, + "Columns(): number of arguments (%d) does not match row size (%d)", n, len(r.vals)) +} + +// Columns fetches all the columns in the row at once. +// +// The value of the kth column will be decoded into the kth argument to Columns. See +// Row for the list of acceptable argument types. The number of arguments must be +// equal to the number of columns. Pass nil to specify that a column should be +// ignored. +func (r *Row) Columns(ptrs ...interface{}) error { + if len(ptrs) != len(r.vals) { + return errNumOfColValue(len(ptrs), r) + } + if len(r.vals) != len(r.fields) { + return errFieldsMismatchVals(r) + } + for i, p := range ptrs { + if p == nil { + continue + } + if err := r.Column(i, p); err != nil { + return err + } + } + return nil +} + +// errToStructArgType returns error for p not having the correct data type(pointer to Go struct) to +// be the argument of Row.ToStruct. +func errToStructArgType(p interface{}) error { + return spannerErrorf(codes.InvalidArgument, "ToStruct(): type %T is not a valid pointer to Go struct", p) +} + +// ToStruct fetches the columns in a row into the fields of a struct. +// The rules for mapping a row's columns into a struct's exported fields +// are as the following: +// 1. If a field has a `spanner: "column_name"` tag, then decode column +// 'column_name' into the field. A special case is the `spanner: "-"` +// tag, which instructs ToStruct to ignore the field during decoding. +// 2. Otherwise, if the name of a field matches the name of a column (ignoring case), +// decode the column into the field. +// +// The fields of the destination struct can be of any type that is acceptable +// to spanner.Row.Column. +// +// Slice and pointer fields will be set to nil if the source column is NULL, and a +// non-nil value if the column is not NULL. To decode NULL values of other types, use +// one of the spanner.NullXXX types as the type of the destination field. +func (r *Row) ToStruct(p interface{}) error { + // Check if p is a pointer to a struct + if t := reflect.TypeOf(p); t == nil || t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return errToStructArgType(p) + } + if len(r.vals) != len(r.fields) { + return errFieldsMismatchVals(r) + } + // Call decodeStruct directly to decode the row as a typed proto.ListValue. + return decodeStruct( + &sppb.StructType{Fields: r.fields}, + &proto3.ListValue{Values: r.vals}, + p, + ) +} diff --git a/vendor/cloud.google.com/go/spanner/row_test.go b/vendor/cloud.google.com/go/spanner/row_test.go new file mode 100644 index 0000000..d566955 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/row_test.go @@ -0,0 +1,1784 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "encoding/base64" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "cloud.google.com/go/civil" + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +var ( + tm = time.Date(2016, 11, 15, 0, 0, 0, 0, time.UTC) + dt, _ = civil.ParseDate("2016-11-15") + // row contains a column for each unique Cloud Spanner type. + row = Row{ + []*sppb.StructType_Field{ + // STRING / STRING ARRAY + {"STRING", stringType()}, + {"NULL_STRING", stringType()}, + {"STRING_ARRAY", listType(stringType())}, + {"NULL_STRING_ARRAY", listType(stringType())}, + // BYTES / BYTES ARRAY + {"BYTES", bytesType()}, + {"NULL_BYTES", bytesType()}, + {"BYTES_ARRAY", listType(bytesType())}, + {"NULL_BYTES_ARRAY", listType(bytesType())}, + // INT64 / INT64 ARRAY + {"INT64", intType()}, + {"NULL_INT64", intType()}, + {"INT64_ARRAY", listType(intType())}, + {"NULL_INT64_ARRAY", listType(intType())}, + // BOOL / BOOL ARRAY + {"BOOL", boolType()}, + {"NULL_BOOL", boolType()}, + {"BOOL_ARRAY", listType(boolType())}, + {"NULL_BOOL_ARRAY", listType(boolType())}, + // FLOAT64 / FLOAT64 ARRAY + {"FLOAT64", floatType()}, + {"NULL_FLOAT64", floatType()}, + {"FLOAT64_ARRAY", listType(floatType())}, + {"NULL_FLOAT64_ARRAY", listType(floatType())}, + // TIMESTAMP / TIMESTAMP ARRAY + {"TIMESTAMP", timeType()}, + {"NULL_TIMESTAMP", timeType()}, + {"TIMESTAMP_ARRAY", listType(timeType())}, + {"NULL_TIMESTAMP_ARRAY", listType(timeType())}, + // DATE / DATE ARRAY + {"DATE", dateType()}, + {"NULL_DATE", dateType()}, + {"DATE_ARRAY", listType(dateType())}, + {"NULL_DATE_ARRAY", listType(dateType())}, + + // STRUCT ARRAY + { + "STRUCT_ARRAY", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + { + "NULL_STRUCT_ARRAY", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{ + // STRING / STRING ARRAY + stringProto("value"), + nullProto(), + listProto(stringProto("value1"), nullProto(), stringProto("value3")), + nullProto(), + // BYTES / BYTES ARRAY + bytesProto([]byte("value")), + nullProto(), + listProto(bytesProto([]byte("value1")), nullProto(), bytesProto([]byte("value3"))), + nullProto(), + // INT64 / INT64 ARRAY + intProto(17), + nullProto(), + listProto(intProto(1), intProto(2), nullProto()), + nullProto(), + // BOOL / BOOL ARRAY + boolProto(true), + nullProto(), + listProto(nullProto(), boolProto(true), boolProto(false)), + nullProto(), + // FLOAT64 / FLOAT64 ARRAY + floatProto(1.7), + nullProto(), + listProto(nullProto(), nullProto(), floatProto(1.7)), + nullProto(), + // TIMESTAMP / TIMESTAMP ARRAY + timeProto(tm), + nullProto(), + listProto(nullProto(), timeProto(tm)), + nullProto(), + // DATE / DATE ARRAY + dateProto(dt), + nullProto(), + listProto(nullProto(), dateProto(dt)), + nullProto(), + // STRUCT ARRAY + listProto( + nullProto(), + listProto(intProto(3), floatProto(33.3), stringProto("three")), + nullProto(), + ), + nullProto(), + }, + } +) + +// Test helpers for getting column values. +func TestColumnValues(t *testing.T) { + vals := []interface{}{} + wantVals := []interface{}{} + // Test getting column values. + for i, wants := range [][]interface{}{ + // STRING / STRING ARRAY + {"value", NullString{"value", true}}, + {NullString{}}, + {[]NullString{{"value1", true}, {}, {"value3", true}}}, + {[]NullString(nil)}, + // BYTES / BYTES ARRAY + {[]byte("value")}, + {[]byte(nil)}, + {[][]byte{[]byte("value1"), nil, []byte("value3")}}, + {[][]byte(nil)}, + // INT64 / INT64 ARRAY + {int64(17), NullInt64{17, true}}, + {NullInt64{}}, + {[]NullInt64{{1, true}, {2, true}, {}}}, + {[]NullInt64(nil)}, + // BOOL / BOOL ARRAY + {true, NullBool{true, true}}, + {NullBool{}}, + {[]NullBool{{}, {true, true}, {false, true}}}, + {[]NullBool(nil)}, + // FLOAT64 / FLOAT64 ARRAY + {1.7, NullFloat64{1.7, true}}, + {NullFloat64{}}, + {[]NullFloat64{{}, {}, {1.7, true}}}, + {[]NullFloat64(nil)}, + // TIMESTAMP / TIMESTAMP ARRAY + {tm, NullTime{tm, true}}, + {NullTime{}}, + {[]NullTime{{}, {tm, true}}}, + {[]NullTime(nil)}, + // DATE / DATE ARRAY + {dt, NullDate{dt, true}}, + {NullDate{}}, + {[]NullDate{{}, {dt, true}}}, + {[]NullDate(nil)}, + // STRUCT ARRAY + { + []*struct { + Col1 NullInt64 + Col2 NullFloat64 + Col3 string + }{ + nil, + &struct { + Col1 NullInt64 + Col2 NullFloat64 + Col3 string + }{ + NullInt64{3, true}, + NullFloat64{33.3, true}, + "three", + }, + nil, + }, + []NullRow{ + {}, + { + Row: Row{ + fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + }, + vals: []*proto3.Value{ + intProto(3), + floatProto(33.3), + stringProto("three"), + }, + }, + Valid: true, + }, + {}, + }, + }, + { + []*struct { + Col1 NullInt64 + Col2 NullFloat64 + Col3 string + }(nil), + []NullRow(nil), + }, + } { + for j, want := range wants { + // Prepare Value vector to test Row.Columns. + if j == 0 { + vals = append(vals, reflect.New(reflect.TypeOf(want)).Interface()) + wantVals = append(wantVals, want) + } + // Column + gotp := reflect.New(reflect.TypeOf(want)) + err := row.Column(i, gotp.Interface()) + if err != nil { + t.Errorf("\t row.Column(%v, %T) returns error: %v, want nil", i, gotp.Interface(), err) + } + if got := reflect.Indirect(gotp).Interface(); !reflect.DeepEqual(got, want) { + t.Errorf("\t row.Column(%v, %T) retrives %v, want %v", i, gotp.Interface(), got, want) + } + // ColumnByName + gotp = reflect.New(reflect.TypeOf(want)) + err = row.ColumnByName(row.fields[i].Name, gotp.Interface()) + if err != nil { + t.Errorf("\t row.ColumnByName(%v, %T) returns error: %v, want nil", row.fields[i].Name, gotp.Interface(), err) + } + if got := reflect.Indirect(gotp).Interface(); !reflect.DeepEqual(got, want) { + t.Errorf("\t row.ColumnByName(%v, %T) retrives %v, want %v", row.fields[i].Name, gotp.Interface(), got, want) + } + } + } + // Test Row.Columns. + if err := row.Columns(vals...); err != nil { + t.Errorf("row.Columns() returns error: %v, want nil", err) + } + for i, want := range wantVals { + if got := reflect.Indirect(reflect.ValueOf(vals[i])).Interface(); !reflect.DeepEqual(got, want) { + t.Errorf("\t got %v(%T) for column[%v], want %v(%T)", got, got, row.fields[i].Name, want, want) + } + } +} + +// Test decoding into nil destination. +func TestNilDst(t *testing.T) { + for i, test := range []struct { + r *Row + dst interface{} + wantErr error + structDst interface{} + wantToStructErr error + }{ + { + &Row{ + []*sppb.StructType_Field{ + {"Col0", stringType()}, + }, + []*proto3.Value{stringProto("value")}, + }, + nil, + errDecodeColumn(0, errNilDst(nil)), + nil, + errToStructArgType(nil), + }, + { + &Row{ + []*sppb.StructType_Field{ + {"Col0", stringType()}, + }, + []*proto3.Value{stringProto("value")}, + }, + (*string)(nil), + errDecodeColumn(0, errNilDst((*string)(nil))), + (*struct{ STRING string })(nil), + errNilDst((*struct{ STRING string })(nil)), + }, + { + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + ), + ), + }, + }, + []*proto3.Value{listProto( + listProto(intProto(3), floatProto(33.3)), + )}, + }, + (*[]*struct { + Col1 int + Col2 float64 + })(nil), + errDecodeColumn(0, errNilDst((*[]*struct { + Col1 int + Col2 float64 + })(nil))), + (*struct { + StructArray []*struct { + Col1 int + Col2 float64 + } `spanner:"STRUCT_ARRAY"` + })(nil), + errNilDst((*struct { + StructArray []*struct { + Col1 int + Col2 float64 + } `spanner:"STRUCT_ARRAY"` + })(nil)), + }, + } { + if gotErr := test.r.Column(0, test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.r.Column() returns error %v, want %v", i, gotErr, test.wantErr) + } + if gotErr := test.r.ColumnByName("Col0", test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.r.ColumnByName() returns error %v, want %v", i, gotErr, test.wantErr) + } + // Row.Columns(T) should return nil on T == nil, otherwise, it should return test.wantErr. + wantColumnsErr := test.wantErr + if test.dst == nil { + wantColumnsErr = nil + } + if gotErr := test.r.Columns(test.dst); !reflect.DeepEqual(gotErr, wantColumnsErr) { + t.Errorf("%v: test.r.Columns() returns error %v, want %v", i, gotErr, wantColumnsErr) + } + if gotErr := test.r.ToStruct(test.structDst); !reflect.DeepEqual(gotErr, test.wantToStructErr) { + t.Errorf("%v: test.r.ToStruct() returns error %v, want %v", i, gotErr, test.wantToStructErr) + } + } +} + +// Test decoding NULL columns using Go types that don't support NULL. +func TestNullTypeErr(t *testing.T) { + var tm time.Time + ntoi := func(n string) int { + for i, f := range row.fields { + if f.Name == n { + return i + } + } + t.Errorf("cannot find column name %q in row", n) + return 0 + } + for _, test := range []struct { + colName string + dst interface{} + }{ + { + "NULL_STRING", + proto.String(""), + }, + { + "NULL_INT64", + proto.Int64(0), + }, + { + "NULL_BOOL", + proto.Bool(false), + }, + { + "NULL_FLOAT64", + proto.Float64(0.0), + }, + { + "NULL_TIMESTAMP", + &tm, + }, + { + "NULL_DATE", + &dt, + }, + } { + wantErr := errDecodeColumn(ntoi(test.colName), errDstNotForNull(test.dst)) + if gotErr := row.ColumnByName(test.colName, test.dst); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("row.ColumnByName(%v) returns error %v, want %v", test.colName, gotErr, wantErr) + } + } +} + +// Test using wrong destination type in column decoders. +func TestColumnTypeErr(t *testing.T) { + // badDst cannot hold any of the column values. + badDst := &struct{}{} + for i, f := range row.fields { // For each of the columns, try to decode it into badDst. + tc := f.Type.Code + var etc sppb.TypeCode + if strings.Contains(f.Name, "ARRAY") { + etc = f.Type.ArrayElementType.Code + } + wantErr := errDecodeColumn(i, errTypeMismatch(tc, etc, badDst)) + if gotErr := row.Column(i, badDst); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("Column(%v): decoding into destination with wrong type %T returns error %v, want %v", + i, badDst, gotErr, wantErr) + } + if gotErr := row.ColumnByName(f.Name, badDst); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("ColumnByName(%v): decoding into destination with wrong type %T returns error %v, want %v", + f.Name, badDst, gotErr, wantErr) + } + } + wantErr := errDecodeColumn(1, errTypeMismatch(sppb.TypeCode_STRING, sppb.TypeCode_TYPE_CODE_UNSPECIFIED, badDst)) + // badDst is used to receive column 1. + vals := []interface{}{nil, badDst} // Row.Column() is expected to fail at column 1. + // Skip decoding the rest columns by providing nils as the destinations. + for i := 2; i < len(row.fields); i++ { + vals = append(vals, nil) + } + if gotErr := row.Columns(vals...); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("Columns(): decoding column 1 with wrong type %T returns error %v, want %v", + badDst, gotErr, wantErr) + } +} + +// Test the handling of invalid column decoding requests which cannot be mapped to correct column(s). +func TestInvalidColumnRequest(t *testing.T) { + for _, test := range []struct { + desc string + f func() error + wantErr error + }{ + { + "Request column index is out of range", + func() error { + return row.Column(10000, &struct{}{}) + }, + errColIdxOutOfRange(10000, &row), + }, + { + "Cannot find the named column", + func() error { + return row.ColumnByName("string", &struct{}{}) + }, + errColNotFound("string"), + }, + { + "Not enough arguments to call row.Columns()", + func() error { + return row.Columns(nil, nil) + }, + errNumOfColValue(2, &row), + }, + { + "Call ColumnByName on row with duplicated column names", + func() error { + var s string + r := &Row{ + []*sppb.StructType_Field{ + {"Val", stringType()}, + {"Val", stringType()}, + }, + []*proto3.Value{stringProto("value1"), stringProto("value2")}, + } + return r.ColumnByName("Val", &s) + }, + errDupColName("Val"), + }, + { + "Call ToStruct on row with duplicated column names", + func() error { + s := &struct { + Val string + }{} + r := &Row{ + []*sppb.StructType_Field{ + {"Val", stringType()}, + {"Val", stringType()}, + }, + []*proto3.Value{stringProto("value1"), stringProto("value2")}, + } + return r.ToStruct(s) + }, + errDupSpannerField("Val", &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + {"Val", stringType()}, + {"Val", stringType()}, + }, + }), + }, + { + "Call ToStruct on a row with unnamed field", + func() error { + s := &struct { + Val string + }{} + r := &Row{ + []*sppb.StructType_Field{ + {"", stringType()}, + }, + []*proto3.Value{stringProto("value1")}, + } + return r.ToStruct(s) + }, + errUnnamedField(&sppb.StructType{Fields: []*sppb.StructType_Field{{"", stringType()}}}, 0), + }, + } { + if gotErr := test.f(); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.f() returns error %v, want %v", test.desc, gotErr, test.wantErr) + } + } +} + +// Test decoding the row with row.ToStruct into an invalid destination. +func TestToStructInvalidDst(t *testing.T) { + for _, test := range []struct { + desc string + dst interface{} + wantErr error + }{ + { + "Decode row as STRUCT into int32", + proto.Int(1), + errToStructArgType(proto.Int(1)), + }, + { + "Decode row as STRUCT to nil Go struct", + (*struct{})(nil), + errNilDst((*struct{})(nil)), + }, + { + "Decode row as STRUCT to Go struct with duplicated fields for the PK column", + &struct { + PK1 string `spanner:"STRING"` + PK2 string `spanner:"STRING"` + }{}, + errNoOrDupGoField(&struct { + PK1 string `spanner:"STRING"` + PK2 string `spanner:"STRING"` + }{}, "STRING"), + }, + { + "Decode row as STRUCT to Go struct with no field for the PK column", + &struct { + PK1 string `spanner:"_STRING"` + }{}, + errNoOrDupGoField(&struct { + PK1 string `spanner:"_STRING"` + }{}, "STRING"), + }, + { + "Decode row as STRUCT to Go struct with wrong type for the PK column", + &struct { + PK1 int64 `spanner:"STRING"` + }{}, + errDecodeStructField(&sppb.StructType{Fields: row.fields}, "STRING", + errTypeMismatch(sppb.TypeCode_STRING, sppb.TypeCode_TYPE_CODE_UNSPECIFIED, proto.Int64(0))), + }, + } { + if gotErr := row.ToStruct(test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: decoding:\ngot %v\nwant %v", test.desc, gotErr, test.wantErr) + } + } +} + +// Test decoding a broken row. +func TestBrokenRow(t *testing.T) { + for i, test := range []struct { + row *Row + dst interface{} + wantErr error + }{ + { + // A row with no field. + &Row{ + []*sppb.StructType_Field{}, + []*proto3.Value{stringProto("value")}, + }, + &NullString{"value", true}, + errFieldsMismatchVals(&Row{ + []*sppb.StructType_Field{}, + []*proto3.Value{stringProto("value")}, + }), + }, + { + // A row with nil field. + &Row{ + []*sppb.StructType_Field{nil}, + []*proto3.Value{stringProto("value")}, + }, + &NullString{"value", true}, + errNilColType(0), + }, + { + // Field is not nil, but its type is nil. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + nil, + }, + }, + []*proto3.Value{listProto(stringProto("value1"), stringProto("value2"))}, + }, + &[]NullString{}, + errDecodeColumn(0, errNilSpannerType()), + }, + { + // Field is not nil, field type is not nil, but it is an array and its array element type is nil. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + }, + }, + }, + []*proto3.Value{listProto(stringProto("value1"), stringProto("value2"))}, + }, + &[]NullString{}, + errDecodeColumn(0, errNilArrElemType(&sppb.Type{Code: sppb.TypeCode_ARRAY})), + }, + { + // Field specifies valid type, value is nil. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{nil}, + }, + &NullInt64{1, true}, + errDecodeColumn(0, errNilSrc()), + }, + { + // Field specifies INT64 type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullInt64{1, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies INT64 type, but value is for Number type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &NullInt64{1, true}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "String")), + }, + { + // Field specifies INT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{stringProto("&1")}, + }, + proto.Int64(0), + errDecodeColumn(0, errBadEncoding(stringProto("&1"), func() error { + _, err := strconv.ParseInt("&1", 10, 64) + return err + }())), + }, + { + // Field specifies INT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{stringProto("&1")}, + }, + &NullInt64{}, + errDecodeColumn(0, errBadEncoding(stringProto("&1"), func() error { + _, err := strconv.ParseInt("&1", 10, 64) + return err + }())), + }, + { + // Field specifies STRING type, but value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + stringType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullString{"value", true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies STRING type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + stringType(), + }, + }, + []*proto3.Value{listProto(stringProto("value"))}, + }, + &NullString{"value", true}, + errDecodeColumn(0, errSrcVal(listProto(stringProto("value")), "String")), + }, + { + // Field specifies FLOAT64 type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_NumberValue)(nil)}}, + }, + &NullFloat64{1.0, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_NumberValue)(nil)}, "Number")), + }, + { + // Field specifies FLOAT64 type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{boolProto(true)}, + }, + &NullFloat64{1.0, true}, + errDecodeColumn(0, errSrcVal(boolProto(true), "Number")), + }, + { + // Field specifies FLOAT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{stringProto("nan")}, + }, + &NullFloat64{}, + errDecodeColumn(0, errUnexpectedNumStr("nan")), + }, + { + // Field specifies FLOAT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{stringProto("nan")}, + }, + proto.Float64(0), + errDecodeColumn(0, errUnexpectedNumStr("nan")), + }, + { + // Field specifies BYTES type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + bytesType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &[]byte{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies BYTES type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + bytesType(), + }, + }, + []*proto3.Value{boolProto(false)}, + }, + &[]byte{}, + errDecodeColumn(0, errSrcVal(boolProto(false), "String")), + }, + { + // Field specifies BYTES type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + bytesType(), + }, + }, + []*proto3.Value{stringProto("&&")}, + }, + &[]byte{}, + errDecodeColumn(0, errBadEncoding(stringProto("&&"), func() error { + _, err := base64.StdEncoding.DecodeString("&&") + return err + }())), + }, + { + // Field specifies BOOL type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + boolType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_BoolValue)(nil)}}, + }, + &NullBool{false, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_BoolValue)(nil)}, "Bool")), + }, + { + // Field specifies BOOL type, but value is for STRING type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + boolType(), + }, + }, + []*proto3.Value{stringProto("false")}, + }, + &NullBool{false, true}, + errDecodeColumn(0, errSrcVal(stringProto("false"), "Bool")), + }, + { + // Field specifies TIMESTAMP type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + timeType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullTime{time.Now(), true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies TIMESTAMP type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + timeType(), + }, + }, + []*proto3.Value{boolProto(false)}, + }, + &NullTime{time.Now(), true}, + errDecodeColumn(0, errSrcVal(boolProto(false), "String")), + }, + { + // Field specifies TIMESTAMP type, but value is invalid timestamp. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + timeType(), + }, + }, + []*proto3.Value{stringProto("junk")}, + }, + &NullTime{time.Now(), true}, + errDecodeColumn(0, errBadEncoding(stringProto("junk"), func() error { + _, err := time.Parse(time.RFC3339Nano, "junk") + return err + }())), + }, + { + // Field specifies DATE type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + dateType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullDate{civil.Date{}, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies DATE type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + dateType(), + }, + }, + []*proto3.Value{boolProto(false)}, + }, + &NullDate{civil.Date{}, true}, + errDecodeColumn(0, errSrcVal(boolProto(false), "String")), + }, + { + // Field specifies DATE type, but value is invalid timestamp. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + dateType(), + }, + }, + []*proto3.Value{stringProto("junk")}, + }, + &NullDate{civil.Date{}, true}, + errDecodeColumn(0, errBadEncoding(stringProto("junk"), func() error { + _, err := civil.ParseDate("junk") + return err + }())), + }, + + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errNilListValue("INT64")), + }, + { + // Field specifies ARRAY type, but value is for BYTES type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{bytesProto([]byte("value"))}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errSrcVal(bytesProto([]byte("value")), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{listProto(boolProto(true))}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), + "INT64", errSrcVal(boolProto(true), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullString{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullString{}, + errDecodeColumn(0, errNilListValue("STRING")), + }, + { + // Field specifies ARRAY type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{boolProto(true)}, + }, + &[]NullString{}, + errDecodeColumn(0, errSrcVal(boolProto(true), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{listProto(boolProto(true))}, + }, + &[]NullString{}, + errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), + "STRING", errSrcVal(boolProto(true), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errNilListValue("FLOAT64")), + }, + { + // Field specifies ARRAY type, but value is for STRING type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{stringProto("value")}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errSrcVal(stringProto("value"), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{listProto(boolProto(true))}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), + "FLOAT64", errSrcVal(boolProto(true), "Number"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[][]byte{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[][]byte{}, + errDecodeColumn(0, errNilListValue("BYTES")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[][]byte{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[][]byte{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "BYTES", errSrcVal(floatProto(1.0), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullBool{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullBool{}, + errDecodeColumn(0, errNilListValue("BOOL")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[]NullBool{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[]NullBool{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "BOOL", errSrcVal(floatProto(1.0), "Bool"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullTime{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullTime{}, + errDecodeColumn(0, errNilListValue("TIMESTAMP")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[]NullTime{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[]NullTime{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "TIMESTAMP", errSrcVal(floatProto(1.0), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullDate{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullDate{}, + errDecodeColumn(0, errNilListValue("DATE")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[]NullDate{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[]NullDate{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "DATE", errSrcVal(floatProto(1.0), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errNilListValue("STRUCT")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullRow{}, + errDecodeColumn(0, errNilListValue("STRUCT")), + }, + { + // Field specifies ARRAY type, value is for BYTES type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{bytesProto([]byte("value"))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errSrcVal(bytesProto([]byte("value")), "List")), + }, + { + // Field specifies ARRAY type, value is for BYTES type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{listProto(bytesProto([]byte("value")))}, + }, + &[]NullRow{}, + errDecodeColumn(0, errNotStructElement(0, bytesProto([]byte("value")))), + }, + { + // Field specifies ARRAY type, value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{listProto(bytesProto([]byte("value")))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errDecodeArrayElement(0, bytesProto([]byte("value")), + "STRUCT", errSrcVal(bytesProto([]byte("value")), "List"))), + }, + { + // Field specifies ARRAY, but is having nil StructType. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + &sppb.Type{Code: sppb.TypeCode_STRUCT}, + ), + }, + }, + []*proto3.Value{listProto(listProto(intProto(1), floatProto(2.0), stringProto("3")))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errDecodeArrayElement(0, listProto(intProto(1), floatProto(2.0), stringProto("3")), + "STRUCT", errNilSpannerStructType())), + }, + { + // Field specifies ARRAY, but the second struct value is for BOOL type instead of FLOAT64. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{listProto(listProto(intProto(1), boolProto(true), stringProto("3")))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn( + 0, + errDecodeArrayElement( + 0, listProto(intProto(1), boolProto(true), stringProto("3")), "STRUCT", + errDecodeStructField( + &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + }, + }, + "Col2", + errSrcVal(boolProto(true), "Number"), + ), + ), + ), + }, + } { + if gotErr := test.row.Column(0, test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.row.Column(0) got error %v, want %v", i, gotErr, test.wantErr) + } + if gotErr := test.row.ColumnByName("Col0", test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.row.ColumnByName(%q) got error %v, want %v", i, "Col0", gotErr, test.wantErr) + } + if gotErr := test.row.Columns(test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.row.Columns(%T) got error %v, want %v", i, test.dst, gotErr, test.wantErr) + } + } +} + +// Test Row.ToStruct(). +func TestToStruct(t *testing.T) { + s := []struct { + // STRING / STRING ARRAY + PrimaryKey string `spanner:"STRING"` + NullString NullString `spanner:"NULL_STRING"` + StringArray []NullString `spanner:"STRING_ARRAY"` + NullStringArray []NullString `spanner:"NULL_STRING_ARRAY"` + // BYTES / BYTES ARRAY + Bytes []byte `spanner:"BYTES"` + NullBytes []byte `spanner:"NULL_BYTES"` + BytesArray [][]byte `spanner:"BYTES_ARRAY"` + NullBytesArray [][]byte `spanner:"NULL_BYTES_ARRAY"` + // INT64 / INT64 ARRAY + Int64 int64 `spanner:"INT64"` + NullInt64 NullInt64 `spanner:"NULL_INT64"` + Int64Array []NullInt64 `spanner:"INT64_ARRAY"` + NullInt64Array []NullInt64 `spanner:"NULL_INT64_ARRAY"` + // BOOL / BOOL ARRAY + Bool bool `spanner:"BOOL"` + NullBool NullBool `spanner:"NULL_BOOL"` + BoolArray []NullBool `spanner:"BOOL_ARRAY"` + NullBoolArray []NullBool `spanner:"NULL_BOOL_ARRAY"` + // FLOAT64 / FLOAT64 ARRAY + Float64 float64 `spanner:"FLOAT64"` + NullFloat64 NullFloat64 `spanner:"NULL_FLOAT64"` + Float64Array []NullFloat64 `spanner:"FLOAT64_ARRAY"` + NullFloat64Array []NullFloat64 `spanner:"NULL_FLOAT64_ARRAY"` + // TIMESTAMP / TIMESTAMP ARRAY + Timestamp time.Time `spanner:"TIMESTAMP"` + NullTimestamp NullTime `spanner:"NULL_TIMESTAMP"` + TimestampArray []NullTime `spanner:"TIMESTAMP_ARRAY"` + NullTimestampArray []NullTime `spanner:"NULL_TIMESTAMP_ARRAY"` + // DATE / DATE ARRAY + Date civil.Date `spanner:"DATE"` + NullDate NullDate `spanner:"NULL_DATE"` + DateArray []NullDate `spanner:"DATE_ARRAY"` + NullDateArray []NullDate `spanner:"NULL_DATE_ARRAY"` + + // STRUCT ARRAY + StructArray []*struct { + Col1 int64 + Col2 float64 + Col3 string + } `spanner:"STRUCT_ARRAY"` + NullStructArray []*struct { + Col1 int64 + Col2 float64 + Col3 string + } `spanner:"NULL_STRUCT_ARRAY"` + }{ + {}, // got + { + // STRING / STRING ARRAY + "value", + NullString{}, + []NullString{{"value1", true}, {}, {"value3", true}}, + []NullString(nil), + // BYTES / BYTES ARRAY + []byte("value"), + []byte(nil), + [][]byte{[]byte("value1"), nil, []byte("value3")}, + [][]byte(nil), + // INT64 / INT64 ARRAY + int64(17), + NullInt64{}, + []NullInt64{{int64(1), true}, {int64(2), true}, {}}, + []NullInt64(nil), + // BOOL / BOOL ARRAY + true, + NullBool{}, + []NullBool{{}, {true, true}, {false, true}}, + []NullBool(nil), + // FLOAT64 / FLOAT64 ARRAY + 1.7, + NullFloat64{}, + []NullFloat64{{}, {}, {1.7, true}}, + []NullFloat64(nil), + // TIMESTAMP / TIMESTAMP ARRAY + tm, + NullTime{}, + []NullTime{{}, {tm, true}}, + []NullTime(nil), + // DATE / DATE ARRAY + dt, + NullDate{}, + []NullDate{{}, {dt, true}}, + []NullDate(nil), + // STRUCT ARRAY + []*struct { + Col1 int64 + Col2 float64 + Col3 string + }{ + nil, + &struct { + Col1 int64 + Col2 float64 + Col3 string + }{3, 33.3, "three"}, + nil, + }, + []*struct { + Col1 int64 + Col2 float64 + Col3 string + }(nil), + }, // want + } + err := row.ToStruct(&s[0]) + if err != nil { + t.Errorf("row.ToStruct() returns error: %v, want nil", err) + } + if !reflect.DeepEqual(s[0], s[1]) { + t.Errorf("row.ToStruct() fetches struct %v, want %v", s[0], s[1]) + } +} + +// Test helpers for getting column names. +func TestColumnNameAndIndex(t *testing.T) { + // Test Row.Size(). + if rs := row.Size(); rs != len(row.fields) { + t.Errorf("row.Size() returns %v, want %v", rs, len(row.fields)) + } + // Test Row.Size() on empty Row. + if rs := (&Row{}).Size(); rs != 0 { + t.Errorf("empty_row.Size() returns %v, want %v", rs, 0) + } + // Test Row.ColumnName() + for i, col := range row.fields { + if cn := row.ColumnName(i); cn != col.Name { + t.Errorf("row.ColumnName(%v) returns %q, want %q", i, cn, col.Name) + } + goti, err := row.ColumnIndex(col.Name) + if err != nil { + t.Errorf("ColumnIndex(%q) error %v", col.Name, err) + continue + } + if goti != i { + t.Errorf("ColumnIndex(%q) = %d, want %d", col.Name, goti, i) + } + } + // Test Row.ColumnName on empty Row. + if cn := (&Row{}).ColumnName(0); cn != "" { + t.Errorf("empty_row.ColumnName(%v) returns %q, want %q", 0, cn, "") + } + // Test Row.ColumnIndex on empty Row. + if _, err := (&Row{}).ColumnIndex(""); err == nil { + t.Error("empty_row.ColumnIndex returns nil, want error") + } +} + +func TestNewRow(t *testing.T) { + for _, test := range []struct { + names []string + values []interface{} + want *Row + wantErr error + }{ + { + want: &Row{fields: []*sppb.StructType_Field{}, vals: []*proto3.Value{}}, + }, + { + names: []string{}, + values: []interface{}{}, + want: &Row{fields: []*sppb.StructType_Field{}, vals: []*proto3.Value{}}, + }, + { + names: []string{"a", "b"}, + values: []interface{}{}, + want: nil, + wantErr: errNamesValuesMismatch([]string{"a", "b"}, []interface{}{}), + }, + { + names: []string{"a", "b", "c"}, + values: []interface{}{5, "abc", GenericColumnValue{listType(intType()), listProto(intProto(91), nullProto(), intProto(87))}}, + want: &Row{ + []*sppb.StructType_Field{ + {"a", intType()}, + {"b", stringType()}, + {"c", listType(intType())}, + }, + []*proto3.Value{ + intProto(5), + stringProto("abc"), + listProto(intProto(91), nullProto(), intProto(87)), + }, + }, + }, + } { + got, err := NewRow(test.names, test.values) + if !reflect.DeepEqual(err, test.wantErr) { + t.Errorf("NewRow(%v,%v).err = %s, want %s", test.names, test.values, err, test.wantErr) + continue + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("NewRow(%v,%v) = %s, want %s", test.names, test.values, got, test.want) + continue + } + } +} + +func BenchmarkColumn(b *testing.B) { + var s string + for i := 0; i < b.N; i++ { + if err := row.Column(0, &s); err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/session.go b/vendor/cloud.google.com/go/spanner/session.go new file mode 100644 index 0000000..6676213 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/session.go @@ -0,0 +1,1054 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "container/heap" + "container/list" + "fmt" + "math/rand" + "strings" + "sync" + "time" + + log "github.com/golang/glog" + "golang.org/x/net/context" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// sessionHandle is an interface for transactions to access Cloud Spanner sessions safely. It is generated by sessionPool.take(). +type sessionHandle struct { + // mu guarantees that the inner session object is returned / destroyed only once. + mu sync.Mutex + // session is a pointer to a session object. Transactions never need to access it directly. + session *session +} + +// recycle gives the inner session object back to its home session pool. It is safe to call recycle multiple times but only the first one would take effect. +func (sh *sessionHandle) recycle() { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + // sessionHandle has already been recycled. + return + } + sh.session.recycle() + sh.session = nil +} + +// getID gets the Cloud Spanner session ID from the internal session object. getID returns empty string if the sessionHandle is nil or the inner session +// object has been released by recycle / destroy. +func (sh *sessionHandle) getID() string { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + // sessionHandle has already been recycled/destroyed. + return "" + } + return sh.session.getID() +} + +// getClient gets the Cloud Spanner RPC client associated with the session ID in sessionHandle. +func (sh *sessionHandle) getClient() sppb.SpannerClient { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + return nil + } + return sh.session.client +} + +// getMetadata returns the metadata associated with the session in sessionHandle. +func (sh *sessionHandle) getMetadata() metadata.MD { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + return nil + } + return sh.session.md +} + +// getTransactionID returns the transaction id in the session if available. +func (sh *sessionHandle) getTransactionID() transactionID { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + return nil + } + return sh.session.tx +} + +// destroy destroys the inner session object. It is safe to call destroy multiple times and only the first call would attempt to +// destroy the inner session object. +func (sh *sessionHandle) destroy() { + sh.mu.Lock() + s := sh.session + sh.session = nil + sh.mu.Unlock() + if s == nil { + // sessionHandle has already been destroyed. + return + } + s.destroy(false) +} + +// session wraps a Cloud Spanner session ID through which transactions are created and executed. +type session struct { + // client is the RPC channel to Cloud Spanner. It is set only once during session's creation. + client sppb.SpannerClient + // id is the unique id of the session in Cloud Spanner. It is set only once during session's creation. + id string + // pool is the session's home session pool where it was created. It is set only once during session's creation. + pool *sessionPool + // createTime is the timestamp of the session's creation. It is set only once during session's creation. + createTime time.Time + + // mu protects the following fields from concurrent access: both healthcheck workers and transactions can modify them. + mu sync.Mutex + // valid marks the validity of a session. + valid bool + // hcIndex is the index of the session inside the global healthcheck queue. If hcIndex < 0, session has been unregistered from the queue. + hcIndex int + // idleList is the linkedlist node which links the session to its home session pool's idle list. If idleList == nil, the + // session is not in idle list. + idleList *list.Element + // nextCheck is the timestamp of next scheduled healthcheck of the session. It is maintained by the global health checker. + nextCheck time.Time + // checkingHelath is true if currently this session is being processed by health checker. Must be modified under health checker lock. + checkingHealth bool + // md is the Metadata to be sent with each request. + md metadata.MD + // tx contains the transaction id if the session has been prepared for write. + tx transactionID +} + +// isValid returns true if the session is still valid for use. +func (s *session) isValid() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.valid +} + +// isWritePrepared returns true if the session is prepared for write. +func (s *session) isWritePrepared() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.tx != nil +} + +// String implements fmt.Stringer for session. +func (s *session) String() string { + s.mu.Lock() + defer s.mu.Unlock() + return fmt.Sprintf("", + s.id, s.hcIndex, s.idleList, s.valid, s.createTime, s.nextCheck) +} + +// ping verifies if the session is still alive in Cloud Spanner. +func (s *session) ping() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + return runRetryable(ctx, func(ctx context.Context) error { + _, err := s.client.GetSession(contextWithOutgoingMetadata(ctx, s.pool.md), &sppb.GetSessionRequest{Name: s.getID()}) // s.getID is safe even when s is invalid. + return err + }) +} + +// setHcIndex atomically sets the session's index in the healthcheck queue and returns the old index. +func (s *session) setHcIndex(i int) int { + s.mu.Lock() + defer s.mu.Unlock() + oi := s.hcIndex + s.hcIndex = i + return oi +} + +// setIdleList atomically sets the session's idle list link and returns the old link. +func (s *session) setIdleList(le *list.Element) *list.Element { + s.mu.Lock() + defer s.mu.Unlock() + old := s.idleList + s.idleList = le + return old +} + +// invalidate marks a session as invalid and returns the old validity. +func (s *session) invalidate() bool { + s.mu.Lock() + defer s.mu.Unlock() + ov := s.valid + s.valid = false + return ov +} + +// setNextCheck sets the timestamp for next healthcheck on the session. +func (s *session) setNextCheck(t time.Time) { + s.mu.Lock() + defer s.mu.Unlock() + s.nextCheck = t +} + +// setTransactionID sets the transaction id in the session +func (s *session) setTransactionID(tx transactionID) { + s.mu.Lock() + defer s.mu.Unlock() + s.tx = tx +} + +// getID returns the session ID which uniquely identifies the session in Cloud Spanner. +func (s *session) getID() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.id +} + +// getHcIndex returns the session's index into the global healthcheck priority queue. +func (s *session) getHcIndex() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.hcIndex +} + +// getIdleList returns the session's link in its home session pool's idle list. +func (s *session) getIdleList() *list.Element { + s.mu.Lock() + defer s.mu.Unlock() + return s.idleList +} + +// getNextCheck returns the timestamp for next healthcheck on the session. +func (s *session) getNextCheck() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.nextCheck +} + +// recycle turns the session back to its home session pool. +func (s *session) recycle() { + s.setTransactionID(nil) + if !s.pool.recycle(s) { + // s is rejected by its home session pool because it expired and the session pool currently has enough open sessions. + s.destroy(false) + } +} + +// destroy removes the session from its home session pool, healthcheck queue and Cloud Spanner service. +func (s *session) destroy(isExpire bool) bool { + // Remove s from session pool. + if !s.pool.remove(s, isExpire) { + return false + } + // Unregister s from healthcheck queue. + s.pool.hc.unregister(s) + // Remove s from Cloud Spanner service. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + // Ignore the error returned by runRetryable because even if we fail to explicitly destroy the session, + // it will be eventually garbage collected by Cloud Spanner. + err := runRetryable(ctx, func(ctx context.Context) error { + _, e := s.client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: s.getID()}) + return e + }) + if err != nil && log.V(2) { + log.Warningf("Failed to delete session %v. Error: %v", s.getID(), err) + } + return true +} + +// prepareForWrite prepares the session for write if it is not already in that state. +func (s *session) prepareForWrite(ctx context.Context) error { + if s.isWritePrepared() { + return nil + } + tx, err := beginTransaction(ctx, s.getID(), s.client) + if err != nil { + return err + } + s.setTransactionID(tx) + return nil +} + +// SessionPoolConfig stores configurations of a session pool. +type SessionPoolConfig struct { + // getRPCClient is the caller supplied method for getting a gRPC client to Cloud Spanner, this makes session pool able to use client pooling. + getRPCClient func() (sppb.SpannerClient, error) + // MaxOpened is the maximum number of opened sessions allowed by the + // session pool. Defaults to NumChannels * 100. + MaxOpened uint64 + // MinOpened is the minimum number of opened sessions that the session pool + // tries to maintain. Session pool won't continue to expire sessions if number + // of opened connections drops below MinOpened. However, if session is found + // to be broken, it will still be evicted from session pool, therefore it is + // posssible that the number of opened sessions drops below MinOpened. + MinOpened uint64 + // MaxIdle is the maximum number of idle sessions, pool is allowed to keep. Defaults to 0. + MaxIdle uint64 + // MaxBurst is the maximum number of concurrent session creation requests. Defaults to 10. + MaxBurst uint64 + // WriteSessions is the fraction of sessions we try to keep prepared for write. + WriteSessions float64 + // HealthCheckWorkers is number of workers used by health checker for this pool. + HealthCheckWorkers int + // HealthCheckInterval is how often the health checker pings a session. Defaults to 5 min. + HealthCheckInterval time.Duration + // healthCheckSampleInterval is how often the health checker samples live session (for use in maintaining session pool size). Defaults to 1 min. + healthCheckSampleInterval time.Duration +} + +// errNoRPCGetter returns error for SessionPoolConfig missing getRPCClient method. +func errNoRPCGetter() error { + return spannerErrorf(codes.InvalidArgument, "require SessionPoolConfig.getRPCClient != nil, got nil") +} + +// errMinOpenedGTMapOpened returns error for SessionPoolConfig.MaxOpened < SessionPoolConfig.MinOpened when SessionPoolConfig.MaxOpened is set. +func errMinOpenedGTMaxOpened(spc *SessionPoolConfig) error { + return spannerErrorf(codes.InvalidArgument, + "require SessionPoolConfig.MaxOpened >= SessionPoolConfig.MinOpened, got %v and %v", spc.MaxOpened, spc.MinOpened) +} + +// validate verifies that the SessionPoolConfig is good for use. +func (spc *SessionPoolConfig) validate() error { + if spc.getRPCClient == nil { + return errNoRPCGetter() + } + if spc.MinOpened > spc.MaxOpened && spc.MaxOpened > 0 { + return errMinOpenedGTMaxOpened(spc) + } + return nil +} + +// sessionPool creates and caches Cloud Spanner sessions. +type sessionPool struct { + // mu protects sessionPool from concurrent access. + mu sync.Mutex + // valid marks the validity of the session pool. + valid bool + // db is the database name that all sessions in the pool are associated with. + db string + // idleList caches idle session IDs. Session IDs in this list can be allocated for use. + idleList list.List + // idleWriteList caches idle sessions which have been prepared for write. + idleWriteList list.List + // mayGetSession is for broadcasting that session retrival/creation may proceed. + mayGetSession chan struct{} + // numOpened is the total number of open sessions from the session pool. + numOpened uint64 + // createReqs is the number of ongoing session creation requests. + createReqs uint64 + // prepareReqs is the number of ongoing session preparation request. + prepareReqs uint64 + // configuration of the session pool. + SessionPoolConfig + // Metadata to be sent with each request + md metadata.MD + // hc is the health checker + hc *healthChecker +} + +// newSessionPool creates a new session pool. +func newSessionPool(db string, config SessionPoolConfig, md metadata.MD) (*sessionPool, error) { + if err := config.validate(); err != nil { + return nil, err + } + pool := &sessionPool{ + db: db, + valid: true, + mayGetSession: make(chan struct{}), + SessionPoolConfig: config, + md: md, + } + if config.HealthCheckWorkers == 0 { + // With 10 workers and assuming average latency of 5 ms for BeginTransaction, we will be able to + // prepare 2000 tx/sec in advance. If the rate of takeWriteSession is more than that, it will + // degrade to doing BeginTransaction inline. + // TODO: consider resizing the worker pool dynamically according to the load. + config.HealthCheckWorkers = 10 + } + if config.HealthCheckInterval == 0 { + config.HealthCheckInterval = 5 * time.Minute + } + if config.healthCheckSampleInterval == 0 { + config.healthCheckSampleInterval = time.Minute + } + // On GCE VM, within the same region an healthcheck ping takes on average 10ms to finish, given a 5 minutes interval and + // 10 healthcheck workers, a healthChecker can effectively mantain 100 checks_per_worker/sec * 10 workers * 300 seconds = 300K sessions. + pool.hc = newHealthChecker(config.HealthCheckInterval, config.HealthCheckWorkers, config.healthCheckSampleInterval, pool) + close(pool.hc.ready) + return pool, nil +} + +// isValid checks if the session pool is still valid. +func (p *sessionPool) isValid() bool { + if p == nil { + return false + } + p.mu.Lock() + defer p.mu.Unlock() + return p.valid +} + +// close marks the session pool as closed. +func (p *sessionPool) close() { + if p == nil { + return + } + p.mu.Lock() + if !p.valid { + p.mu.Unlock() + return + } + p.valid = false + p.mu.Unlock() + p.hc.close() + // destroy all the sessions + p.hc.mu.Lock() + allSessions := make([]*session, len(p.hc.queue.sessions)) + copy(allSessions, p.hc.queue.sessions) + p.hc.mu.Unlock() + for _, s := range allSessions { + s.destroy(false) + } +} + +// errInvalidSessionPool returns error for using an invalid session pool. +func errInvalidSessionPool() error { + return spannerErrorf(codes.InvalidArgument, "invalid session pool") +} + +// errGetSessionTimeout returns error for context timeout during sessionPool.take(). +func errGetSessionTimeout() error { + return spannerErrorf(codes.Canceled, "timeout / context canceled during getting session") +} + +// shouldPrepareWrite returns true if we should prepare more sessions for write. +func (p *sessionPool) shouldPrepareWrite() bool { + return float64(p.numOpened)*p.WriteSessions > float64(p.idleWriteList.Len()+int(p.prepareReqs)) +} + +func (p *sessionPool) createSession(ctx context.Context) (*session, error) { + doneCreate := func(done bool) { + p.mu.Lock() + if !done { + // Session creation failed, give budget back. + p.numOpened-- + } + p.createReqs-- + // Notify other waiters blocking on session creation. + close(p.mayGetSession) + p.mayGetSession = make(chan struct{}) + p.mu.Unlock() + } + sc, err := p.getRPCClient() + if err != nil { + doneCreate(false) + return nil, err + } + var s *session + err = runRetryable(ctx, func(ctx context.Context) error { + sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{Database: p.db}) + if e != nil { + return e + } + // If no error, construct the new session. + s = &session{valid: true, client: sc, id: sid.Name, pool: p, createTime: time.Now(), md: p.md} + p.hc.register(s) + return nil + }) + if err != nil { + doneCreate(false) + // Should return error directly because of the previous retries on CreateSession RPC. + return nil, err + } + doneCreate(true) + return s, nil +} + +func (p *sessionPool) isHealthy(s *session) bool { + if s.getNextCheck().Add(2 * p.hc.getInterval()).Before(time.Now()) { + // TODO: figure out if we need to schedule a new healthcheck worker here. + if err := s.ping(); shouldDropSession(err) { + // The session is already bad, continue to fetch/create a new one. + s.destroy(false) + return false + } + p.hc.scheduledHC(s) + } + return true +} + +// take returns a cached session if there are available ones; if there isn't any, it tries to allocate a new one. +// Session returned by take should be used for read operations. +func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) { + ctx = contextWithOutgoingMetadata(ctx, p.md) + for { + var ( + s *session + err error + ) + + p.mu.Lock() + if !p.valid { + p.mu.Unlock() + return nil, errInvalidSessionPool() + } + if p.idleList.Len() > 0 { + // Idle sessions are available, get one from the top of the idle list. + s = p.idleList.Remove(p.idleList.Front()).(*session) + } else if p.idleWriteList.Len() > 0 { + s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session) + } + if s != nil { + s.setIdleList(nil) + p.mu.Unlock() + // From here, session is no longer in idle list, so healthcheck workers won't destroy it. + // If healthcheck workers failed to schedule healthcheck for the session timely, do the check here. + // Because session check is still much cheaper than session creation, they should be reused as much as possible. + if !p.isHealthy(s) { + continue + } + return &sessionHandle{session: s}, nil + } + // Idle list is empty, block if session pool has reached max session creation concurrency or max number of open sessions. + if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) { + mayGetSession := p.mayGetSession + p.mu.Unlock() + select { + case <-ctx.Done(): + return nil, errGetSessionTimeout() + case <-mayGetSession: + } + continue + } + // Take budget before the actual session creation. + p.numOpened++ + p.createReqs++ + p.mu.Unlock() + if s, err = p.createSession(ctx); err != nil { + return nil, toSpannerError(err) + } + return &sessionHandle{session: s}, nil + } +} + +// takeWriteSession returns a write prepared cached session if there are available ones; if there isn't any, it tries to allocate a new one. +// Session returned should be used for read write transactions. +func (p *sessionPool) takeWriteSession(ctx context.Context) (*sessionHandle, error) { + ctx = contextWithOutgoingMetadata(ctx, p.md) + for { + var ( + s *session + err error + ) + + p.mu.Lock() + if !p.valid { + p.mu.Unlock() + return nil, errInvalidSessionPool() + } + if p.idleWriteList.Len() > 0 { + // Idle sessions are available, get one from the top of the idle list. + s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session) + } else if p.idleList.Len() > 0 { + s = p.idleList.Remove(p.idleList.Front()).(*session) + } + if s != nil { + s.setIdleList(nil) + p.mu.Unlock() + // From here, session is no longer in idle list, so healthcheck workers won't destroy it. + // If healthcheck workers failed to schedule healthcheck for the session timely, do the check here. + // Because session check is still much cheaper than session creation, they should be reused as much as possible. + if !p.isHealthy(s) { + continue + } + } else { + // Idle list is empty, block if session pool has reached max session creation concurrency or max number of open sessions. + if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) { + mayGetSession := p.mayGetSession + p.mu.Unlock() + select { + case <-ctx.Done(): + return nil, errGetSessionTimeout() + case <-mayGetSession: + } + continue + } + + // Take budget before the actual session creation. + p.numOpened++ + p.createReqs++ + p.mu.Unlock() + if s, err = p.createSession(ctx); err != nil { + return nil, toSpannerError(err) + } + } + if !s.isWritePrepared() { + if err = s.prepareForWrite(ctx); err != nil { + s.recycle() + return nil, toSpannerError(err) + } + } + return &sessionHandle{session: s}, nil + } +} + +// recycle puts session s back to the session pool's idle list, it returns true if the session pool successfully recycles session s. +func (p *sessionPool) recycle(s *session) bool { + p.mu.Lock() + defer p.mu.Unlock() + if !s.isValid() || !p.valid { + // Reject the session if session is invalid or pool itself is invalid. + return false + } + // Put session at the back of the list to round robin for load balancing across channels. + if s.isWritePrepared() { + s.setIdleList(p.idleWriteList.PushBack(s)) + } else { + s.setIdleList(p.idleList.PushBack(s)) + } + // Broadcast that a session has been returned to idle list. + close(p.mayGetSession) + p.mayGetSession = make(chan struct{}) + return true +} + +// remove atomically removes session s from the session pool and invalidates s. +// If isExpire == true, the removal is triggered by session expiration and in such cases, only idle sessions can be removed. +func (p *sessionPool) remove(s *session, isExpire bool) bool { + p.mu.Lock() + defer p.mu.Unlock() + if isExpire && (p.numOpened <= p.MinOpened || s.getIdleList() == nil) { + // Don't expire session if the session is not in idle list (in use), or if number of open sessions is going below p.MinOpened. + return false + } + ol := s.setIdleList(nil) + // If the session is in the idlelist, remove it. + if ol != nil { + // Remove from whichever list it is in. + p.idleList.Remove(ol) + p.idleWriteList.Remove(ol) + } + if s.invalidate() { + // Decrease the number of opened sessions. + p.numOpened-- + // Broadcast that a session has been destroyed. + close(p.mayGetSession) + p.mayGetSession = make(chan struct{}) + return true + } + return false +} + +// hcHeap implements heap.Interface. It is used to create the priority queue for session healthchecks. +type hcHeap struct { + sessions []*session +} + +// Len impelemnts heap.Interface.Len. +func (h hcHeap) Len() int { + return len(h.sessions) +} + +// Less implements heap.Interface.Less. +func (h hcHeap) Less(i, j int) bool { + return h.sessions[i].getNextCheck().Before(h.sessions[j].getNextCheck()) +} + +// Swap implements heap.Interface.Swap. +func (h hcHeap) Swap(i, j int) { + h.sessions[i], h.sessions[j] = h.sessions[j], h.sessions[i] + h.sessions[i].setHcIndex(i) + h.sessions[j].setHcIndex(j) +} + +// Push implements heap.Interface.Push. +func (h *hcHeap) Push(s interface{}) { + ns := s.(*session) + ns.setHcIndex(len(h.sessions)) + h.sessions = append(h.sessions, ns) +} + +// Pop implements heap.Interface.Pop. +func (h *hcHeap) Pop() interface{} { + old := h.sessions + n := len(old) + s := old[n-1] + h.sessions = old[:n-1] + s.setHcIndex(-1) + return s +} + +// healthChecker performs periodical healthchecks on registered sessions. +type healthChecker struct { + // mu protects concurrent access to hcQueue. + mu sync.Mutex + // queue is the priority queue for session healthchecks. Sessions with lower nextCheck rank higher in the queue. + queue hcHeap + // interval is the average interval between two healthchecks on a session. + interval time.Duration + // workers is the number of concurrent healthcheck workers. + workers int + // waitWorkers waits for all healthcheck workers to exit + waitWorkers sync.WaitGroup + // pool is the underlying session pool. + pool *sessionPool + // sampleInterval is the interval of sampling by the maintainer. + sampleInterval time.Duration + // ready is used to signal that maintainer can start running. + ready chan struct{} + // done is used to signal that health checker should be closed. + done chan struct{} + // once is used for closing channel done only once. + once sync.Once +} + +// newHealthChecker initializes new instance of healthChecker. +func newHealthChecker(interval time.Duration, workers int, sampleInterval time.Duration, pool *sessionPool) *healthChecker { + if workers <= 0 { + workers = 1 + } + hc := &healthChecker{ + interval: interval, + workers: workers, + pool: pool, + sampleInterval: sampleInterval, + ready: make(chan struct{}), + done: make(chan struct{}), + } + hc.waitWorkers.Add(1) + go hc.maintainer() + for i := 1; i <= hc.workers; i++ { + hc.waitWorkers.Add(1) + go hc.worker(i) + } + return hc +} + +// close closes the healthChecker and waits for all healthcheck workers to exit. +func (hc *healthChecker) close() { + hc.once.Do(func() { close(hc.done) }) + hc.waitWorkers.Wait() +} + +// isClosing checks if a healthChecker is already closing. +func (hc *healthChecker) isClosing() bool { + select { + case <-hc.done: + return true + default: + return false + } +} + +// getInterval gets the healthcheck interval. +func (hc *healthChecker) getInterval() time.Duration { + hc.mu.Lock() + defer hc.mu.Unlock() + return hc.interval +} + +// scheduledHCLocked schedules next healthcheck on session s with the assumption that hc.mu is being held. +func (hc *healthChecker) scheduledHCLocked(s *session) { + // The next healthcheck will be scheduled after [interval*0.5, interval*1.5) nanoseconds. + nsFromNow := rand.Int63n(int64(hc.interval)) + int64(hc.interval)/2 + s.setNextCheck(time.Now().Add(time.Duration(nsFromNow))) + if hi := s.getHcIndex(); hi != -1 { + // Session is still being tracked by healthcheck workers. + heap.Fix(&hc.queue, hi) + } +} + +// scheduledHC schedules next healthcheck on session s. It is safe to be called concurrently. +func (hc *healthChecker) scheduledHC(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + hc.scheduledHCLocked(s) +} + +// register registers a session with healthChecker for periodical healthcheck. +func (hc *healthChecker) register(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + hc.scheduledHCLocked(s) + heap.Push(&hc.queue, s) +} + +// unregister unregisters a session from healthcheck queue. +func (hc *healthChecker) unregister(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + oi := s.setHcIndex(-1) + if oi >= 0 { + heap.Remove(&hc.queue, oi) + } +} + +// markDone marks that health check for session has been performed. +func (hc *healthChecker) markDone(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + s.checkingHealth = false +} + +// healthCheck checks the health of the session and pings it if needed. +func (hc *healthChecker) healthCheck(s *session) { + defer hc.markDone(s) + if !s.pool.isValid() { + // Session pool is closed, perform a garbage collection. + s.destroy(false) + return + } + if err := s.ping(); shouldDropSession(err) { + // Ping failed, destroy the session. + s.destroy(false) + } +} + +// worker performs the healthcheck on sessions in healthChecker's priority queue. +func (hc *healthChecker) worker(i int) { + // Returns a session which we should ping to keep it alive. + getNextForPing := func() *session { + hc.pool.mu.Lock() + defer hc.pool.mu.Unlock() + hc.mu.Lock() + defer hc.mu.Unlock() + if hc.queue.Len() <= 0 { + // Queue is empty. + return nil + } + s := hc.queue.sessions[0] + if s.getNextCheck().After(time.Now()) && hc.pool.valid { + // All sessions have been checked recently. + return nil + } + hc.scheduledHCLocked(s) + if !s.checkingHealth { + s.checkingHealth = true + return s + } + return nil + } + + // Returns a session which we should prepare for write. + getNextForTx := func() *session { + hc.pool.mu.Lock() + defer hc.pool.mu.Unlock() + if hc.pool.shouldPrepareWrite() { + if hc.pool.idleList.Len() > 0 && hc.pool.valid { + hc.mu.Lock() + defer hc.mu.Unlock() + if hc.pool.idleList.Front().Value.(*session).checkingHealth { + return nil + } + session := hc.pool.idleList.Remove(hc.pool.idleList.Front()).(*session) + session.checkingHealth = true + hc.pool.prepareReqs++ + return session + } + } + return nil + } + + for { + if hc.isClosing() { + // Exit when the pool has been closed and all sessions have been destroyed + // or when health checker has been closed. + hc.waitWorkers.Done() + return + } + ws := getNextForTx() + if ws != nil { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err := ws.prepareForWrite(contextWithOutgoingMetadata(ctx, hc.pool.md)) + cancel() + if err != nil { + // Skip handling prepare error, session can be prepared in next cycle + log.Warningf("Failed to prepare session, error: %v", toSpannerError(err)) + } + hc.pool.recycle(ws) + hc.pool.mu.Lock() + hc.pool.prepareReqs-- + hc.pool.mu.Unlock() + hc.markDone(ws) + } + rs := getNextForPing() + if rs == nil { + if ws == nil { + // No work to be done so sleep to avoid burning cpu + pause := int64(100 * time.Millisecond) + if pause > int64(hc.interval) { + pause = int64(hc.interval) + } + select { + case <-time.After(time.Duration(rand.Int63n(pause) + pause/2)): + break + case <-hc.done: + break + } + + } + continue + } + hc.healthCheck(rs) + } +} + +// maintainer maintains the maxSessionsInUse by a window of kWindowSize * sampleInterval. +// Based on this information, health checker will try to maintain the number of sessions by hc.. +func (hc *healthChecker) maintainer() { + // Wait so that pool is ready. + <-hc.ready + + var ( + windowSize uint64 = 10 + iteration uint64 + timeout <-chan time.Time + ) + + // replenishPool is run if numOpened is less than sessionsToKeep, timeouts on sampleInterval. + replenishPool := func(sessionsToKeep uint64) { + ctx, _ := context.WithTimeout(context.Background(), hc.sampleInterval) + for { + select { + case <-timeout: + return + default: + break + } + + p := hc.pool + p.mu.Lock() + // Take budget before the actual session creation. + if sessionsToKeep <= p.numOpened { + p.mu.Unlock() + break + } + p.numOpened++ + p.createReqs++ + shouldPrepareWrite := p.shouldPrepareWrite() + p.mu.Unlock() + var ( + s *session + err error + ) + if s, err = p.createSession(ctx); err != nil { + log.Warningf("Failed to create session, error: %v", toSpannerError(err)) + continue + } + if shouldPrepareWrite { + if err = s.prepareForWrite(ctx); err != nil { + p.recycle(s) + log.Warningf("Failed to prepare session, error: %v", toSpannerError(err)) + continue + } + } + p.recycle(s) + } + } + + // shrinkPool, scales down the session pool. + shrinkPool := func(sessionsToKeep uint64) { + for { + select { + case <-timeout: + return + default: + break + } + + p := hc.pool + p.mu.Lock() + + if sessionsToKeep >= p.numOpened { + p.mu.Unlock() + break + } + + var s *session + if p.idleList.Len() > 0 { + s = p.idleList.Front().Value.(*session) + } else if p.idleWriteList.Len() > 0 { + s = p.idleWriteList.Front().Value.(*session) + } + p.mu.Unlock() + if s != nil { + // destroy session as expire. + s.destroy(true) + } else { + break + } + } + } + + for { + if hc.isClosing() { + hc.waitWorkers.Done() + return + } + + // maxSessionsInUse is the maximum number of sessions in use concurrently over a period of time. + var maxSessionsInUse uint64 + + // Updates metrics. + hc.pool.mu.Lock() + currSessionsInUse := hc.pool.numOpened - uint64(hc.pool.idleList.Len()) - uint64(hc.pool.idleWriteList.Len()) + currSessionsOpened := hc.pool.numOpened + hc.pool.mu.Unlock() + + hc.mu.Lock() + if iteration%windowSize == 0 || maxSessionsInUse < currSessionsInUse { + maxSessionsInUse = currSessionsInUse + } + sessionsToKeep := maxUint64(hc.pool.MinOpened, + minUint64(currSessionsOpened, hc.pool.MaxIdle+maxSessionsInUse)) + hc.mu.Unlock() + + timeout = time.After(hc.sampleInterval) + // Replenish or Shrink pool if needed. + // Note: we don't need to worry about pending create session requests, we only need to sample the current sessions in use. + // the routines will not try to create extra / delete creating sessions. + if sessionsToKeep > currSessionsOpened { + replenishPool(sessionsToKeep) + } else { + shrinkPool(sessionsToKeep) + } + + select { + case <-timeout: + break + case <-hc.done: + break + } + iteration++ + } +} + +// shouldDropSession returns true if a particular error leads to the removal of a session +func shouldDropSession(err error) bool { + if err == nil { + return false + } + // If a Cloud Spanner can no longer locate the session (for example, if session is garbage collected), then caller + // should not try to return the session back into the session pool. + // TODO: once gRPC can return auxilary error information, stop parsing the error message. + if ErrCode(err) == codes.NotFound && strings.Contains(ErrDesc(err), "Session not found:") { + return true + } + return false +} diff --git a/vendor/cloud.google.com/go/spanner/session_test.go b/vendor/cloud.google.com/go/spanner/session_test.go new file mode 100644 index 0000000..c8311fa --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/session_test.go @@ -0,0 +1,843 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "container/heap" + "math/rand" + "reflect" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/spanner/internal/testutil" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// setup prepares test environment for regular session pool tests. +func setup(t *testing.T, spc SessionPoolConfig) (sp *sessionPool, sc *testutil.MockCloudSpannerClient, cancel func()) { + sc = testutil.NewMockCloudSpannerClient(t) + spc.getRPCClient = func() (sppb.SpannerClient, error) { + return sc, nil + } + if spc.HealthCheckInterval == 0 { + spc.HealthCheckInterval = 50 * time.Millisecond + } + if spc.healthCheckSampleInterval == 0 { + spc.healthCheckSampleInterval = 10 * time.Millisecond + } + sp, err := newSessionPool("mockdb", spc, nil) + if err != nil { + t.Fatalf("cannot create session pool: %v", err) + } + cancel = func() { + sp.close() + } + return +} + +// TestSessionCreation tests session creation during sessionPool.Take(). +func TestSessionCreation(t *testing.T) { + t.Parallel() + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Take three sessions from session pool, this should trigger session pool to create three new sessions. + shs := make([]*sessionHandle, 3) + // gotDs holds the unique sessions taken from session pool. + gotDs := map[string]bool{} + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + gotDs[shs[i].getID()] = true + } + if len(gotDs) != len(shs) { + t.Errorf("session pool created %v sessions, want %v", len(gotDs), len(shs)) + } + if wantDs := sc.DumpSessions(); !reflect.DeepEqual(gotDs, wantDs) { + t.Errorf("session pool creates sessions %v, want %v", gotDs, wantDs) + } + // Verify that created sessions are recorded correctly in session pool. + sp.mu.Lock() + if int(sp.numOpened) != len(shs) { + t.Errorf("session pool reports %v open sessions, want %v", sp.numOpened, len(shs)) + } + if sp.createReqs != 0 { + t.Errorf("session pool reports %v session create requests, want 0", int(sp.createReqs)) + } + sp.mu.Unlock() + // Verify that created sessions are tracked correctly by healthcheck queue. + hc := sp.hc + hc.mu.Lock() + if hc.queue.Len() != len(shs) { + t.Errorf("healthcheck queue length = %v, want %v", hc.queue.Len(), len(shs)) + } + for _, s := range hc.queue.sessions { + if !gotDs[s.getID()] { + t.Errorf("session %v is in healthcheck queue, but it is not created by session pool", s.getID()) + } + } + hc.mu.Unlock() +} + +// TestTakeFromIdleList tests taking sessions from session pool's idle list. +func TestTakeFromIdleList(t *testing.T) { + t.Parallel() + sp, sc, cancel := setup(t, SessionPoolConfig{MaxIdle: 10}) // make sure maintainer keeps the idle sessions + defer cancel() + // Take ten sessions from session pool and recycle them. + shs := make([]*sessionHandle, 10) + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + } + // Make sure it's sampled once before recycling, otherwise it will be cleaned up. + <-time.After(sp.SessionPoolConfig.healthCheckSampleInterval) + for i := 0; i < len(shs); i++ { + shs[i].recycle() + } + // Further session requests from session pool won't cause mockclient to create more sessions. + wantSessions := sc.DumpSessions() + // Take ten sessions from session pool again, this time all sessions should come from idle list. + gotSessions := map[string]bool{} + for i := 0; i < len(shs); i++ { + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot take session from session pool: %v", err) + } + gotSessions[sh.getID()] = true + } + if len(gotSessions) != 10 { + t.Errorf("got %v unique sessions, want 10", len(gotSessions)) + } + if !reflect.DeepEqual(gotSessions, wantSessions) { + t.Errorf("got sessions: %v, want %v", gotSessions, wantSessions) + } +} + +// TesttakeWriteSessionFromIdleList tests taking write sessions from session pool's idle list. +func TestTakeWriteSessionFromIdleList(t *testing.T) { + t.Parallel() + sp, sc, cancel := setup(t, SessionPoolConfig{MaxIdle: 20}) // make sure maintainer keeps the idle sessions + defer cancel() + + acts := make([]testutil.Action, 20) + for i := 0; i < len(acts); i++ { + acts[i] = testutil.Action{"BeginTransaction", nil} + } + sc.SetActions(acts...) + // Take ten sessions from session pool and recycle them. + shs := make([]*sessionHandle, 10) + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + } + // Make sure it's sampled once before recycling, otherwise it will be cleaned up. + <-time.After(sp.SessionPoolConfig.healthCheckSampleInterval) + for i := 0; i < len(shs); i++ { + shs[i].recycle() + } + // Further session requests from session pool won't cause mockclient to create more sessions. + wantSessions := sc.DumpSessions() + // Take ten sessions from session pool again, this time all sessions should come from idle list. + gotSessions := map[string]bool{} + for i := 0; i < len(shs); i++ { + sh, err := sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("cannot take session from session pool: %v", err) + } + gotSessions[sh.getID()] = true + } + if len(gotSessions) != 10 { + t.Errorf("got %v unique sessions, want 10", len(gotSessions)) + } + if !reflect.DeepEqual(gotSessions, wantSessions) { + t.Errorf("got sessions: %v, want %v", gotSessions, wantSessions) + } +} + +// TestTakeFromIdleListChecked tests taking sessions from session pool's idle list, but with a extra ping check. +func TestTakeFromIdleListChecked(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{MaxIdle: 1}) // make sure maintainer keeps the idle sessions + defer cancel() + // Stop healthcheck workers to simulate slow pings. + sp.hc.close() + // Create a session and recycle it. + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + // Make sure it's sampled once before recycling, otherwise it will be cleaned up. + <-time.After(sp.SessionPoolConfig.healthCheckSampleInterval) + wantSid := sh.getID() + sh.recycle() + <-time.After(time.Second) + // Two back-to-back session requests, both of them should return the same session created before and + // none of them should trigger a session ping. + for i := 0; i < 2; i++ { + // Take the session from the idle list and recycle it. + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("%v - failed to get session: %v", i, err) + } + if gotSid := sh.getID(); gotSid != wantSid { + t.Errorf("%v - got session id: %v, want %v", i, gotSid, wantSid) + } + // The two back-to-back session requests shouldn't trigger any session pings because sessionPool.Take + // reschedules the next healthcheck. + if got, want := sc.DumpPings(), ([]string{wantSid}); !reflect.DeepEqual(got, want) { + t.Errorf("%v - got ping session requests: %v, want %v", i, got, want) + } + sh.recycle() + } + // Inject session error to mockclient, and take the session from the session pool, the old session should be destroyed and + // the session pool will create a new session. + sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) + // Delay to trigger sessionPool.Take to ping the session. + <-time.After(time.Second) + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + ds := sc.DumpSessions() + if len(ds) != 1 { + t.Errorf("dumped sessions from mockclient: %v, want %v", ds, sh.getID()) + } + if sh.getID() == wantSid { + t.Errorf("sessionPool.Take still returns the same session %v, want it to create a new one", wantSid) + } +} + +// TestTakeFromIdleWriteListChecked tests taking sessions from session pool's idle list, but with a extra ping check. +func TestTakeFromIdleWriteListChecked(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{MaxIdle: 1}) // make sure maintainer keeps the idle sessions + defer cancel() + sc.MakeNice() + // Stop healthcheck workers to simulate slow pings. + sp.hc.close() + // Create a session and recycle it. + sh, err := sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + wantSid := sh.getID() + // Make sure it's sampled once before recycling, otherwise it will be cleaned up. + <-time.After(sp.SessionPoolConfig.healthCheckSampleInterval) + sh.recycle() + <-time.After(time.Second) + // Two back-to-back session requests, both of them should return the same session created before and + // none of them should trigger a session ping. + for i := 0; i < 2; i++ { + // Take the session from the idle list and recycle it. + sh, err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("%v - failed to get session: %v", i, err) + } + if gotSid := sh.getID(); gotSid != wantSid { + t.Errorf("%v - got session id: %v, want %v", i, gotSid, wantSid) + } + // The two back-to-back session requests shouldn't trigger any session pings because sessionPool.Take + // reschedules the next healthcheck. + if got, want := sc.DumpPings(), ([]string{wantSid}); !reflect.DeepEqual(got, want) { + t.Errorf("%v - got ping session requests: %v, want %v", i, got, want) + } + sh.recycle() + } + // Inject session error to mockclient, and take the session from the session pool, the old session should be destroyed and + // the session pool will create a new session. + sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) + // Delay to trigger sessionPool.Take to ping the session. + <-time.After(time.Second) + sh, err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + ds := sc.DumpSessions() + if len(ds) != 1 { + t.Errorf("dumped sessions from mockclient: %v, want %v", ds, sh.getID()) + } + if sh.getID() == wantSid { + t.Errorf("sessionPool.Take still returns the same session %v, want it to create a new one", wantSid) + } +} + +// TestMaxOpenedSessions tests max open sessions constraint. +func TestMaxOpenedSessions(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, _, cancel := setup(t, SessionPoolConfig{MaxOpened: 1}) + defer cancel() + sh1, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot take session from session pool: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + // Session request will timeout due to the max open sessions constraint. + sh2, gotErr := sp.take(ctx) + if wantErr := errGetSessionTimeout(); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("the second session retrival returns error %v, want %v", gotErr, wantErr) + } + go func() { + <-time.After(time.Second) + // destroy the first session to allow the next session request to proceed. + sh1.destroy() + }() + // Now session request can be processed because the first session will be destroyed. + sh2, err = sp.take(context.Background()) + if err != nil { + t.Errorf("after the first session is destroyed, session retrival still returns error %v, want nil", err) + } + if !sh2.session.isValid() || sh2.getID() == "" { + t.Errorf("got invalid session: %v", sh2.session) + } +} + +// TestMinOpenedSessions tests min open session constraint. +func TestMinOpenedSessions(t *testing.T) { + sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: 1}) + defer cancel() + // Take ten sessions from session pool and recycle them. + var ss []*session + var shs []*sessionHandle + for i := 0; i < 10; i++ { + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + ss = append(ss, sh.session) + shs = append(shs, sh) + sh.recycle() + } + for _, sh := range shs { + sh.recycle() + } + // Simulate session expiration. + for _, s := range ss { + s.destroy(true) + } + sp.mu.Lock() + defer sp.mu.Unlock() + // There should be still one session left in idle list due to the min open sessions constraint. + if sp.idleList.Len() != 1 { + t.Errorf("got %v sessions in idle list, want 1 %d", sp.idleList.Len(), sp.numOpened) + } +} + +// TestMaxBurst tests max burst constraint. +func TestMaxBurst(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{MaxBurst: 1}) + defer cancel() + // Will cause session creation RPC to be retried forever. + sc.InjectError("CreateSession", grpc.Errorf(codes.Unavailable, "try later")) + // This session request will never finish until the injected error is cleared. + go sp.take(context.Background()) + // Poll for the execution of the first session request. + for { + sp.mu.Lock() + cr := sp.createReqs + sp.mu.Unlock() + if cr == 0 { + <-time.After(time.Second) + continue + } + // The first session request is being executed. + break + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + sh, gotErr := sp.take(ctx) + // Since MaxBurst == 1, the second session request should block. + if wantErr := errGetSessionTimeout(); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("session retrival returns error %v, want %v", gotErr, wantErr) + } + // Let the first session request succeed. + sc.InjectError("CreateSession", nil) + // Now new session request can proceed because the first session request will eventually succeed. + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("session retrival returns error %v, want nil", err) + } + if !sh.session.isValid() || sh.getID() == "" { + t.Errorf("got invalid session: %v", sh.session) + } +} + +// TestSessionrecycle tests recycling sessions. +func TestSessionRecycle(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: 1, MaxIdle: 2}) + // Set MaxIdle to ensure shs[0] is not destroyed from scale down. + defer cancel() + + // Test session is correctly recycled and reused. + for i := 0; i < 20; i++ { + s, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get the session %v: %v", i, err) + } + s.recycle() + } + if sp.numOpened != 1 { + t.Errorf("Expect session pool size %d, got %d", 1, sp.numOpened) + } +} + +// TestSessionDestroy tests destroying sessions. +func TestSessionDestroy(t *testing.T) { + t.Parallel() + sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: 1}) + defer cancel() + <-time.After(10 * time.Millisecond) // maintainer will create one session, we wait for it create session to avoid flakiness in test + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + s := sh.session + sh.recycle() + if d := s.destroy(true); d || !s.isValid() { + // Session should be remaining because of min open sessions constraint. + t.Errorf("session %v invalid, want it to stay alive. (destroy in expiration mode, success: %v)", s, d) + } + if d := s.destroy(false); !d || s.isValid() { + // Session should be destroyed. + t.Errorf("failed to destroy session %v. (destroy in default mode, success: %v)", s, d) + } +} + +// TestHcHeap tests heap operation on top of hcHeap. +func TestHcHeap(t *testing.T) { + in := []*session{ + &session{nextCheck: time.Unix(10, 0)}, + &session{nextCheck: time.Unix(0, 5)}, + &session{nextCheck: time.Unix(1, 8)}, + &session{nextCheck: time.Unix(11, 7)}, + &session{nextCheck: time.Unix(6, 3)}, + } + want := []*session{ + &session{nextCheck: time.Unix(1, 8), hcIndex: 0}, + &session{nextCheck: time.Unix(6, 3), hcIndex: 1}, + &session{nextCheck: time.Unix(8, 2), hcIndex: 2}, + &session{nextCheck: time.Unix(10, 0), hcIndex: 3}, + &session{nextCheck: time.Unix(11, 7), hcIndex: 4}, + } + hh := hcHeap{} + for _, s := range in { + heap.Push(&hh, s) + } + // Change top of the heap and do a adjustment. + hh.sessions[0].nextCheck = time.Unix(8, 2) + heap.Fix(&hh, 0) + for idx := 0; hh.Len() > 0; idx++ { + got := heap.Pop(&hh).(*session) + want[idx].hcIndex = -1 + if !reflect.DeepEqual(got, want[idx]) { + t.Errorf("%v: heap.Pop returns %v, want %v", idx, got, want[idx]) + } + } +} + +// TestHealthCheckScheduler tests if healthcheck workers can schedule and perform healthchecks properly. +func TestHealthCheckScheduler(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Create 50 sessions. + ss := []string{} + for i := 0; i < 50; i++ { + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + ss = append(ss, sh.getID()) + } + // Sleep for 1s, allowing healthcheck workers to perform some session pings. + <-time.After(time.Second) + dp := sc.DumpPings() + gotPings := map[string]int64{} + for _, p := range dp { + gotPings[p]++ + } + for _, s := range ss { + // The average ping interval is 50ms. + want := int64(time.Second) / int64(50*time.Millisecond) + if got := gotPings[s]; got < want/2 || got > want+want/2 { + t.Errorf("got %v healthchecks on session %v, want it between (%v, %v)", got, s, want/2, want+want/2) + } + } +} + +// Tests that a fractions of sessions are prepared for write by health checker. +func TestWriteSessionsPrepared(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{WriteSessions: 0.5, MaxIdle: 20}) + sc.MakeNice() + defer cancel() + shs := make([]*sessionHandle, 10) + var err error + for i := 0; i < 10; i++ { + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + } + // Now there are 10 sessions in the pool. Release them. + for _, sh := range shs { + sh.recycle() + } + // Sleep for 1s, allowing healthcheck workers to invoke begin transaction. + <-time.After(time.Second) + wshs := make([]*sessionHandle, 5) + for i := 0; i < 5; i++ { + wshs[i], err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + if wshs[i].getTransactionID() == nil { + t.Errorf("got nil transaction id from session pool") + } + } + for _, sh := range wshs { + sh.recycle() + } + <-time.After(time.Second) + // Now force creation of 10 more sessions. + shs = make([]*sessionHandle, 20) + for i := 0; i < 20; i++ { + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + } + // Now there are 20 sessions in the pool. Release them. + for _, sh := range shs { + sh.recycle() + } + <-time.After(time.Second) + if sp.idleWriteList.Len() != 10 { + t.Errorf("Expect 10 write prepared session, got: %d", sp.idleWriteList.Len()) + } +} + +// TestTakeFromWriteQueue tests that sessionPool.take() returns write prepared sessions as well. +func TestTakeFromWriteQueue(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{MaxOpened: 1, WriteSessions: 1.0, MaxIdle: 1}) + sc.MakeNice() + defer cancel() + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sh.recycle() + <-time.After(time.Second) + // The session should now be in write queue but take should also return it. + if sp.idleWriteList.Len() == 0 { + t.Errorf("write queue unexpectedly empty") + } + if sp.idleList.Len() != 0 { + t.Errorf("read queue not empty") + } + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sh.recycle() +} + +// TestSessionHealthCheck tests healthchecking cases. +func TestSessionHealthCheck(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Test pinging sessions. + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + <-time.After(time.Second) + pings := sc.DumpPings() + if len(pings) == 0 || pings[0] != sh.getID() { + t.Errorf("healthchecker didn't send any ping to session %v", sh.getID()) + } + // Test broken session detection. + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) + // Wait for healthcheck workers to find the broken session and tear it down. + <-time.After(1 * time.Second) + s := sh.session + if sh.session.isValid() { + t.Errorf("session(%v) is still alive, want it to be dropped by healthcheck workers", s) + } + sc.InjectError("GetSession", nil) + // Test garbage collection. + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sp.close() + if sh.session.isValid() { + t.Errorf("session(%v) is still alive, want it to be garbage collected", s) + } +} + +// TestStressSessionPool does stress test on session pool by the following concurrent operations: +// 1) Test worker gets a session from the pool. +// 2) Test worker turns a session back into the pool. +// 3) Test worker destroys a session got from the pool. +// 4) Healthcheck destroys a broken session (because a worker has already destroyed it). +// 5) Test worker closes the session pool. +// +// During the test, the session pool maintainer maintains the number of sessions, +// and it is expected that all sessions that are taken from session pool remains valid. +// When all test workers and healthcheck workers exit, mockclient, session pool +// and healthchecker should be in consistent state. + +func TestStressSessionPool(t *testing.T) { + t.Parallel() + // Use concurrent workers to test different session pool built from different configurations. + if testing.Short() { + t.SkipNow() + } + for ti, cfg := range []SessionPoolConfig{ + SessionPoolConfig{}, + SessionPoolConfig{MinOpened: 10, MaxOpened: 100}, + SessionPoolConfig{MaxBurst: 50}, + SessionPoolConfig{MinOpened: 10, MaxOpened: 200, MaxBurst: 5}, + SessionPoolConfig{MinOpened: 10, MaxOpened: 200, MaxBurst: 5, WriteSessions: 0.2}, + } { + var wg sync.WaitGroup + // Create a more aggressive session healthchecker to increase test concurrency. + cfg.HealthCheckInterval = 50 * time.Millisecond + cfg.healthCheckSampleInterval = 10 * time.Millisecond + cfg.HealthCheckWorkers = 50 + sc := testutil.NewMockCloudSpannerClient(t) + sc.MakeNice() + cfg.getRPCClient = func() (sppb.SpannerClient, error) { + return sc, nil + } + sp, _ := newSessionPool("mockdb", cfg, nil) + for i := 0; i < 100; i++ { + wg.Add(1) + // Schedule a test worker. + go func(idx int, pool *sessionPool, client sppb.SpannerClient) { + defer wg.Done() + // Test worker iterates 1K times and tries different session / session pool operations. + for j := 0; j < 1000; j++ { + if idx%10 == 0 && j >= 900 { + // Close the pool in selected set of workers during the middle of the test. + pool.close() + } + // Take a write sessions ~ 20% of the times. + takeWrite := rand.Intn(5) == 4 + var ( + sh *sessionHandle + gotErr error + ) + if takeWrite { + sh, gotErr = pool.takeWriteSession(context.Background()) + } else { + sh, gotErr = pool.take(context.Background()) + } + if gotErr != nil { + if pool.isValid() { + t.Errorf("%v.%v: pool.take returns error when pool is still valid: %v", ti, idx, gotErr) + } + if wantErr := errInvalidSessionPool(); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("%v.%v: got error when pool is closed: %v, want %v", ti, idx, gotErr, wantErr) + } + continue + } + // Verify if session is valid when session pool is valid. Note that if session pool is invalid after sh is taken, + // then sh might be invalidated by healthcheck workers. + if (sh.getID() == "" || sh.session == nil || !sh.session.isValid()) && pool.isValid() { + t.Errorf("%v.%v.%v: pool.take returns invalid session %v", ti, idx, takeWrite, sh.session) + } + if takeWrite && sh.getTransactionID() == nil { + t.Errorf("%v.%v: pool.takeWriteSession returns session %v without transaction", ti, idx, sh.session) + } + if rand.Intn(100) < idx { + // Random sleep before destroying/recycling the session, to give healthcheck worker a chance to step in. + <-time.After(time.Duration(rand.Int63n(int64(cfg.HealthCheckInterval)))) + } + if rand.Intn(100) < idx { + // destroy the session. + sh.destroy() + continue + } + // recycle the session. + sh.recycle() + } + }(i, sp, sc) + } + wg.Wait() + sp.hc.close() + // Here the states of healthchecker, session pool and mockclient are stable. + idleSessions := map[string]bool{} + hcSessions := map[string]bool{} + mockSessions := sc.DumpSessions() + // Dump session pool's idle list. + for sl := sp.idleList.Front(); sl != nil; sl = sl.Next() { + s := sl.Value.(*session) + if idleSessions[s.getID()] { + t.Errorf("%v: found duplicated session in idle list: %v", ti, s.getID()) + } + idleSessions[s.getID()] = true + } + for sl := sp.idleWriteList.Front(); sl != nil; sl = sl.Next() { + s := sl.Value.(*session) + if idleSessions[s.getID()] { + t.Errorf("%v: found duplicated session in idle write list: %v", ti, s.getID()) + } + idleSessions[s.getID()] = true + } + sp.mu.Lock() + if int(sp.numOpened) != len(idleSessions) { + t.Errorf("%v: number of opened sessions (%v) != number of idle sessions (%v)", ti, sp.numOpened, len(idleSessions)) + } + if sp.createReqs != 0 { + t.Errorf("%v: number of pending session creations = %v, want 0", ti, sp.createReqs) + } + // Dump healthcheck queue. + for _, s := range sp.hc.queue.sessions { + if hcSessions[s.getID()] { + t.Errorf("%v: found duplicated session in healthcheck queue: %v", ti, s.getID()) + } + hcSessions[s.getID()] = true + } + sp.mu.Unlock() + + // Verify that idleSessions == hcSessions == mockSessions. + if !reflect.DeepEqual(idleSessions, hcSessions) { + t.Errorf("%v: sessions in idle list (%v) != sessions in healthcheck queue (%v)", ti, idleSessions, hcSessions) + } + if !reflect.DeepEqual(hcSessions, mockSessions) { + t.Errorf("%v: sessions in healthcheck queue (%v) != sessions in mockclient (%v)", ti, hcSessions, mockSessions) + } + sp.close() + mockSessions = sc.DumpSessions() + if len(mockSessions) != 0 { + t.Errorf("Found live sessions: %v", mockSessions) + } + } +} + +// TestMaintainer checks the session pool maintainer maintains the number of sessions in the following cases +// 1. On initialization of session pool, replenish session pool to meet MinOpened or MaxIdle. +// 2. On increased session usage, provision extra MaxIdle sessions. +// 3. After the surge passes, scale down the session pool accordingly. +func TestMaintainer(t *testing.T) { + t.Parallel() + if testing.Short() { + t.SkipNow() + } + var ( + minOpened uint64 = 5 + maxIdle uint64 = 4 + ) + sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: minOpened, MaxIdle: maxIdle}) + sampleInterval := sp.SessionPoolConfig.healthCheckSampleInterval + hcInterval := sp.SessionPoolConfig.HealthCheckInterval + defer cancel() + + <-time.After(sampleInterval * 1) + sp.mu.Lock() + if sp.numOpened != 5 { + t.Errorf("Replenish. Expect %d open, got %d", sp.MinOpened, sp.numOpened) + } + sp.mu.Unlock() + + // To save test time, we are not creating many sessions, because the time to create sessions will have impact on the decision on sessionsToKeep. We also parallelize the take and recycle process. + shs := make([]*sessionHandle, 10) + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + } + sp.mu.Lock() + if sp.numOpened != 10 { + t.Errorf("Scale out from normal use. Expect %d open, got %d", 10, sp.numOpened) + } + sp.mu.Unlock() + + <-time.After(sampleInterval) + for _, sh := range shs[:7] { + sh.recycle() + } + + <-time.After(sampleInterval * 2) + sp.mu.Lock() + if sp.numOpened != 7 { + t.Errorf("Keep extra MaxIdle sessions. Expect %d open, got %d", 7, sp.numOpened) + } + sp.mu.Unlock() + + for _, sh := range shs[7:] { + sh.recycle() + } + <-time.After(sampleInterval*10 + hcInterval) + sp.mu.Lock() + if sp.numOpened != minOpened { + t.Errorf("Scale down. Expect %d open, got %d", minOpened, sp.numOpened) + } + sp.mu.Unlock() +} diff --git a/vendor/cloud.google.com/go/spanner/spanner_test.go b/vendor/cloud.google.com/go/spanner/spanner_test.go new file mode 100644 index 0000000..8fbe4c5 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/spanner_test.go @@ -0,0 +1,1506 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "flag" + "fmt" + "log" + "math" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/testutil" + database "cloud.google.com/go/spanner/admin/database/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/grpc/codes" + + adminpb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +var ( + // testProjectID specifies the project used for testing. + // It can be changed by setting environment variable GCLOUD_TESTS_GOLANG_PROJECT_ID. + testProjectID = testutil.ProjID() + // testInstanceID specifies the Cloud Spanner instance used for testing. + testInstanceID = "go-integration-test" + + // admin is a spanner.DatabaseAdminClient. + admin *database.DatabaseAdminClient +) + +var ( + singerDBStatements = []string{ + `CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo BYTES(MAX) + ) PRIMARY KEY (SingerId)`, + `CREATE INDEX SingerByName ON Singers(FirstName, LastName)`, + `CREATE TABLE Accounts ( + AccountId INT64 NOT NULL, + Nickname STRING(100), + Balance INT64 NOT NULL, + ) PRIMARY KEY (AccountId)`, + `CREATE INDEX AccountByNickname ON Accounts(Nickname) STORING (Balance)`, + `CREATE TABLE Types ( + RowID INT64 NOT NULL, + String STRING(MAX), + StringArray ARRAY, + Bytes BYTES(MAX), + BytesArray ARRAY, + Int64a INT64, + Int64Array ARRAY, + Bool BOOL, + BoolArray ARRAY, + Float64 FLOAT64, + Float64Array ARRAY, + Date DATE, + DateArray ARRAY, + Timestamp TIMESTAMP, + TimestampArray ARRAY, + ) PRIMARY KEY (RowID)`, + } + + readDBStatements = []string{ + `CREATE TABLE TestTable ( + Key STRING(MAX) NOT NULL, + StringValue STRING(MAX) + ) PRIMARY KEY (Key)`, + `CREATE INDEX TestTableByValue ON TestTable(StringValue)`, + `CREATE INDEX TestTableByValueDesc ON TestTable(StringValue DESC)`, + } +) + +type testTableRow struct{ Key, StringValue string } + +func TestMain(m *testing.M) { + initIntegrationTest() + os.Exit(m.Run()) +} + +func initIntegrationTest() { + flag.Parse() // needed for testing.Short() + if testing.Short() { + return + } + if testProjectID == "" { + log.Print("Integration tests skipped: GCLOUD_TESTS_GOLANG_PROJECT_ID is missing") + return + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, AdminScope, Scope) + if ts == nil { + log.Printf("Integration test skipped: cannot get service account credential from environment variable %v", "GCLOUD_TESTS_GOLANG_KEY") + return + } + var err error + // Create Admin client and Data client. + // TODO: Remove the EndPoint option once this is the default. + admin, err = database.NewDatabaseAdminClient(ctx, option.WithTokenSource(ts), option.WithEndpoint("spanner.googleapis.com:443")) + if err != nil { + log.Fatalf("cannot create admin client: %v", err) + } +} + +var ( + mu sync.Mutex + count int + now = time.Now() +) + +// prepare initializes Cloud Spanner testing DB and clients. +func prepare(ctx context.Context, t *testing.T, statements []string) (client *Client, dbPath string, tearDown func()) { + if admin == nil { + t.Skip("Integration tests skipped") + } + // Construct a unique test DB name. + mu.Lock() + dbName := fmt.Sprintf("gotest_%d_%d", now.UnixNano(), count) + count++ + mu.Unlock() + + dbPath = fmt.Sprintf("projects/%v/instances/%v/databases/%v", testProjectID, testInstanceID, dbName) + // Create database and tables. + op, err := admin.CreateDatabase(ctx, &adminpb.CreateDatabaseRequest{ + Parent: fmt.Sprintf("projects/%v/instances/%v", testProjectID, testInstanceID), + CreateStatement: "CREATE DATABASE " + dbName, + ExtraStatements: statements, + }) + if err != nil { + t.Fatalf("cannot create testing DB %v: %v", dbPath, err) + } + if _, err := op.Wait(ctx); err != nil { + t.Fatalf("cannot create testing DB %v: %v", dbPath, err) + } + client, err = NewClientWithConfig(ctx, dbPath, ClientConfig{ + SessionPoolConfig: SessionPoolConfig{WriteSessions: 0.2}, + }, option.WithTokenSource(testutil.TokenSource(ctx, Scope))) + if err != nil { + t.Fatalf("cannot create data client on DB %v: %v", dbPath, err) + } + return client, dbPath, func() { + if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{dbPath}); err != nil { + t.Logf("failed to drop database %s (error %v), might need a manual removal", + dbPath, err) + } + client.Close() + } +} + +// Test SingleUse transaction. +func TestSingleUse(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + // Set up testing environment. + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + writes := []struct { + row []interface{} + ts time.Time + }{ + {row: []interface{}{1, "Marc", "Foo"}}, + {row: []interface{}{2, "Tars", "Bar"}}, + {row: []interface{}{3, "Alpha", "Beta"}}, + {row: []interface{}{4, "Last", "End"}}, + } + // Try to write four rows through the Apply API. + for i, w := range writes { + var err error + m := InsertOrUpdate("Singers", + []string{"SingerId", "FirstName", "LastName"}, + w.row) + if writes[i].ts, err = client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + } + + // For testing timestamp bound staleness. + <-time.After(time.Second) + + // Test reading rows with different timestamp bounds. + for i, test := range []struct { + want [][]interface{} + tb TimestampBound + checkTs func(time.Time) error + }{ + { + // strong + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + StrongRead(), + func(ts time.Time) error { + // writes[3] is the last write, all subsequent strong read should have a timestamp larger than that. + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // min_read_timestamp + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + MinReadTimestamp(writes[3].ts), + func(ts time.Time) error { + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // max_staleness + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + MaxStaleness(time.Second), + func(ts time.Time) error { + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // read_timestamp + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}}, + ReadTimestamp(writes[2].ts), + func(ts time.Time) error { + if ts != writes[2].ts { + return fmt.Errorf("read got timestamp %v, want %v", ts, writes[2].ts) + } + return nil + }, + }, + { + // exact_staleness + nil, + // Specify a staleness which should be already before this test because + // context timeout is set to be 10s. + ExactStaleness(11 * time.Second), + func(ts time.Time) error { + if ts.After(writes[0].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no earlier than %v", ts, writes[0].ts) + } + return nil + }, + }, + } { + // SingleUse.Query + su := client.Single().WithTimestampBound(test.tb) + got, err := readAll(su.Query( + ctx, + Statement{ + "SELECT SingerId, FirstName, LastName FROM Singers WHERE SingerId IN (@id1, @id3, @id4)", + map[string]interface{}{"id1": int64(1), "id3": int64(3), "id4": int64(4)}, + })) + if err != nil { + t.Errorf("%d: SingleUse.Query returns error %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected result from SingleUse.Query: %v, want %v", i, got, test.want) + } + rts, err := su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.Query doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.Query doesn't return expected timestamp: %v", i, err) + } + // SingleUse.Read + su = client.Single().WithTimestampBound(test.tb) + got, err = readAll(su.Read(ctx, "Singers", KeySets(Key{1}, Key{3}, Key{4}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: SingleUse.Read returns error %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected result from SingleUse.Read: %v, want %v", i, got, test.want) + } + rts, err = su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.Read doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.Read doesn't return expected timestamp: %v", i, err) + } + // SingleUse.ReadRow + got = nil + for _, k := range []Key{Key{1}, Key{3}, Key{4}} { + su = client.Single().WithTimestampBound(test.tb) + r, err := su.ReadRow(ctx, "Singers", k, []string{"SingerId", "FirstName", "LastName"}) + if err != nil { + continue + } + v, err := rowToValues(r) + if err != nil { + continue + } + got = append(got, v) + rts, err = su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.ReadRow(%v) doesn't return a timestamp, error: %v", i, k, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.ReadRow(%v) doesn't return expected timestamp: %v", i, k, err) + } + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected results from SingleUse.ReadRow: %v, want %v", i, got, test.want) + } + // SingleUse.ReadUsingIndex + su = client.Single().WithTimestampBound(test.tb) + got, err = readAll(su.ReadUsingIndex(ctx, "Singers", "SingerByName", KeySets(Key{"Marc", "Foo"}, Key{"Alpha", "Beta"}, Key{"Last", "End"}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: SingleUse.ReadUsingIndex returns error %v, want nil", i, err) + } + // The results from ReadUsingIndex is sorted by the index rather than primary key. + if len(got) != len(test.want) { + t.Errorf("%d: got unexpected result from SingleUse.ReadUsingIndex: %v, want %v", i, got, test.want) + } + for j, g := range got { + if j > 0 { + prev := got[j-1][1].(string) + got[j-1][2].(string) + curr := got[j][1].(string) + got[j][2].(string) + if strings.Compare(prev, curr) > 0 { + t.Errorf("%d: SingleUse.ReadUsingIndex fails to order rows by index keys, %v should be after %v", i, got[j-1], got[j]) + } + } + found := false + for _, w := range test.want { + if reflect.DeepEqual(g, w) { + found = true + } + } + if !found { + t.Errorf("%d: got unexpected result from SingleUse.ReadUsingIndex: %v, want %v", i, got, test.want) + break + } + } + rts, err = su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.ReadUsingIndex doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.ReadUsingIndex doesn't return expected timestamp: %v", i, err) + } + } +} + +// Test ReadOnlyTransaction. The testsuite is mostly like SingleUse, except it +// also tests for a single timestamp across multiple reads. +func TestReadOnlyTransaction(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + // Set up testing environment. + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + writes := []struct { + row []interface{} + ts time.Time + }{ + {row: []interface{}{1, "Marc", "Foo"}}, + {row: []interface{}{2, "Tars", "Bar"}}, + {row: []interface{}{3, "Alpha", "Beta"}}, + {row: []interface{}{4, "Last", "End"}}, + } + // Try to write four rows through the Apply API. + for i, w := range writes { + var err error + m := InsertOrUpdate("Singers", + []string{"SingerId", "FirstName", "LastName"}, + w.row) + if writes[i].ts, err = client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + } + + // For testing timestamp bound staleness. + <-time.After(time.Second) + + // Test reading rows with different timestamp bounds. + for i, test := range []struct { + want [][]interface{} + tb TimestampBound + checkTs func(time.Time) error + }{ + // Note: min_read_timestamp and max_staleness are not supported by ReadOnlyTransaction. See + // API document for more details. + { + // strong + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + StrongRead(), + func(ts time.Time) error { + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // read_timestamp + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}}, + ReadTimestamp(writes[2].ts), + func(ts time.Time) error { + if ts != writes[2].ts { + return fmt.Errorf("read got timestamp %v, expect %v", ts, writes[2].ts) + } + return nil + }, + }, + { + // exact_staleness + nil, + // Specify a staleness which should be already before this test because + // context timeout is set to be 10s. + ExactStaleness(11 * time.Second), + func(ts time.Time) error { + if ts.After(writes[0].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no earlier than %v", ts, writes[0].ts) + } + return nil + }, + }, + } { + // ReadOnlyTransaction.Query + ro := client.ReadOnlyTransaction().WithTimestampBound(test.tb) + got, err := readAll(ro.Query( + ctx, + Statement{ + "SELECT SingerId, FirstName, LastName FROM Singers WHERE SingerId IN (@id1, @id3, @id4)", + map[string]interface{}{"id1": int64(1), "id3": int64(3), "id4": int64(4)}, + })) + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Query returns error %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.Query: %v, want %v", i, got, test.want) + } + rts, err := ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Query doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.Query doesn't return expected timestamp: %v", i, err) + } + roTs := rts + // ReadOnlyTransaction.Read + got, err = readAll(ro.Read(ctx, "Singers", KeySets(Key{1}, Key{3}, Key{4}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Read returns error %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.Read: %v, want %v", i, got, test.want) + } + rts, err = ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Read doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.Read doesn't return expected timestamp: %v", i, err) + } + if roTs != rts { + t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) + } + // ReadOnlyTransaction.ReadRow + got = nil + for _, k := range []Key{Key{1}, Key{3}, Key{4}} { + r, err := ro.ReadRow(ctx, "Singers", k, []string{"SingerId", "FirstName", "LastName"}) + if err != nil { + continue + } + v, err := rowToValues(r) + if err != nil { + continue + } + got = append(got, v) + rts, err = ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadRow(%v) doesn't return a timestamp, error: %v", i, k, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadRow(%v) doesn't return expected timestamp: %v", i, k, err) + } + if roTs != rts { + t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) + } + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected results from ReadOnlyTransaction.ReadRow: %v, want %v", i, got, test.want) + } + // SingleUse.ReadUsingIndex + got, err = readAll(ro.ReadUsingIndex(ctx, "Singers", "SingerByName", KeySets(Key{"Marc", "Foo"}, Key{"Alpha", "Beta"}, Key{"Last", "End"}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex returns error %v, want nil", i, err) + } + // The results from ReadUsingIndex is sorted by the index rather than primary key. + if len(got) != len(test.want) { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.ReadUsingIndex: %v, want %v", i, got, test.want) + } + for j, g := range got { + if j > 0 { + prev := got[j-1][1].(string) + got[j-1][2].(string) + curr := got[j][1].(string) + got[j][2].(string) + if strings.Compare(prev, curr) > 0 { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex fails to order rows by index keys, %v should be after %v", i, got[j-1], got[j]) + } + } + found := false + for _, w := range test.want { + if reflect.DeepEqual(g, w) { + found = true + } + } + if !found { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.ReadUsingIndex: %v, want %v", i, got, test.want) + break + } + } + rts, err = ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex doesn't return expected timestamp: %v", i, err) + } + if roTs != rts { + t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) + } + ro.Close() + } +} + +// Test ReadOnlyTransaction with different timestamp bound when there's an update at the same time. +func TestUpdateDuringRead(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + for i, tb := range []TimestampBound{ + StrongRead(), + ReadTimestamp(time.Now().Add(-time.Minute * 30)), // version GC is 1 hour + ExactStaleness(time.Minute * 30), + } { + ro := client.ReadOnlyTransaction().WithTimestampBound(tb) + _, err := ro.ReadRow(ctx, "Singers", Key{i}, []string{"SingerId"}) + if ErrCode(err) != codes.NotFound { + t.Errorf("%d: ReadOnlyTransaction.ReadRow before write returns error: %v, want NotFound", i, err) + } + + m := InsertOrUpdate("Singers", []string{"SingerId"}, []interface{}{i}) + if _, err := client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + + _, err = ro.ReadRow(ctx, "Singers", Key{i}, []string{"SingerId"}) + if ErrCode(err) != codes.NotFound { + t.Errorf("%d: ReadOnlyTransaction.ReadRow after write returns error: %v, want NotFound", i, err) + } + } +} + +// Test ReadWriteTransaction. +func TestReadWriteTransaction(t *testing.T) { + t.Parallel() + // Give a longer deadline because of transaction backoffs. + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + defer cancel() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + // Set up two accounts + accounts := []*Mutation{ + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}), + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}), + } + if _, err := client.Apply(ctx, accounts, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + wg := sync.WaitGroup{} + + readBalance := func(iter *RowIterator) (int64, error) { + defer iter.Stop() + var bal int64 + for { + row, err := iter.Next() + if err == iterator.Done { + return bal, nil + } + if err != nil { + return 0, err + } + if err := row.Column(0, &bal); err != nil { + return 0, err + } + } + } + + for i := 0; i < 20; i++ { + wg.Add(1) + go func(iter int) { + defer wg.Done() + _, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { + // Query Foo's balance and Bar's balance. + bf, e := readBalance(tx.Query(ctx, + Statement{"SELECT Balance FROM Accounts WHERE AccountId = @id", map[string]interface{}{"id": int64(1)}})) + if e != nil { + return e + } + bb, e := readBalance(tx.Read(ctx, "Accounts", KeySets(Key{int64(2)}), []string{"Balance"})) + if e != nil { + return e + } + if bf <= 0 { + return nil + } + bf-- + bb++ + tx.BufferWrite([]*Mutation{ + Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(1), bf}), + Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(2), bb}), + }) + return nil + }) + if err != nil { + t.Fatalf("%d: failed to execute transaction: %v", iter, err) + } + }(i) + } + // Because of context timeout, all goroutines will eventually return. + wg.Wait() + _, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { + var bf, bb int64 + r, e := tx.ReadRow(ctx, "Accounts", Key{int64(1)}, []string{"Balance"}) + if e != nil { + return e + } + if ce := r.Column(0, &bf); ce != nil { + return ce + } + bb, e = readBalance(tx.ReadUsingIndex(ctx, "Accounts", "AccountByNickname", KeySets(Key{"Bar"}), []string{"Balance"})) + if e != nil { + return e + } + if bf != 30 || bb != 21 { + t.Errorf("Foo's balance is now %v and Bar's balance is now %v, want %v and %v", bf, bb, 30, 21) + } + return nil + }) + if err != nil { + t.Errorf("failed to check balances: %v", err) + } +} + +const ( + testTable = "TestTable" + testTableIndex = "TestTableByValue" +) + +var testTableColumns = []string{"Key", "StringValue"} + +func TestReads(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + // Set up testing environment. + client, _, tearDown := prepare(ctx, t, readDBStatements) + defer tearDown() + + // Includes k0..k14. Strings sort lexically, eg "k1" < "k10" < "k2". + var ms []*Mutation + for i := 0; i < 15; i++ { + ms = append(ms, InsertOrUpdate(testTable, + testTableColumns, + []interface{}{fmt.Sprintf("k%d", i), fmt.Sprintf("v%d", i)})) + } + // Don't use ApplyAtLeastOnce, so we can test the other code path. + if _, err := client.Apply(ctx, ms); err != nil { + t.Fatal(err) + } + + // Empty read. + rows, err := readAllTestTable(client.Single().Read(ctx, testTable, + KeyRange{Start: Key{"k99"}, End: Key{"z"}}, testTableColumns)) + if err != nil { + t.Fatal(err) + } + if got, want := len(rows), 0; got != want { + t.Errorf("got %d, want %d", got, want) + } + + // Index empty read. + rows, err = readAllTestTable(client.Single().ReadUsingIndex(ctx, testTable, testTableIndex, + KeyRange{Start: Key{"v99"}, End: Key{"z"}}, testTableColumns)) + if err != nil { + t.Fatal(err) + } + if got, want := len(rows), 0; got != want { + t.Errorf("got %d, want %d", got, want) + } + + // Point read. + row, err := client.Single().ReadRow(ctx, testTable, Key{"k1"}, testTableColumns) + if err != nil { + t.Fatal(err) + } + var got testTableRow + if err := row.ToStruct(&got); err != nil { + t.Fatal(err) + } + if want := (testTableRow{"k1", "v1"}); got != want { + t.Errorf("got %v, want %v", got, want) + } + + // Point read not found. + _, err = client.Single().ReadRow(ctx, testTable, Key{"k999"}, testTableColumns) + if ErrCode(err) != codes.NotFound { + t.Fatalf("got %v, want NotFound", err) + } + + // No index point read not found, because Go does not have ReadRowUsingIndex. + + rangeReads(ctx, t, client) + indexRangeReads(ctx, t, client) +} + +func rangeReads(ctx context.Context, t *testing.T, client *Client) { + checkRange := func(ks KeySet, wantNums ...int) { + if msg, ok := compareRows(client.Single().Read(ctx, testTable, ks, testTableColumns), wantNums); !ok { + t.Errorf("key set %+v: %s", ks, msg) + } + } + + checkRange(Key{"k1"}, 1) + checkRange(KeyRange{Key{"k3"}, Key{"k5"}, ClosedOpen}, 3, 4) + checkRange(KeyRange{Key{"k3"}, Key{"k5"}, ClosedClosed}, 3, 4, 5) + checkRange(KeyRange{Key{"k3"}, Key{"k5"}, OpenClosed}, 4, 5) + checkRange(KeyRange{Key{"k3"}, Key{"k5"}, OpenOpen}, 4) + + // Partial key specification. + checkRange(KeyRange{Key{"k7"}, Key{}, ClosedClosed}, 7, 8, 9) + checkRange(KeyRange{Key{"k7"}, Key{}, OpenClosed}, 8, 9) + checkRange(KeyRange{Key{}, Key{"k11"}, ClosedOpen}, 0, 1, 10) + checkRange(KeyRange{Key{}, Key{"k11"}, ClosedClosed}, 0, 1, 10, 11) + + // The following produce empty ranges. + // TODO(jba): Consider a multi-part key to illustrate partial key behavior. + // checkRange(KeyRange{Key{"k7"}, Key{}, ClosedOpen}) + // checkRange(KeyRange{Key{"k7"}, Key{}, OpenOpen}) + // checkRange(KeyRange{Key{}, Key{"k11"}, OpenOpen}) + // checkRange(KeyRange{Key{}, Key{"k11"}, OpenClosed}) + + // Prefix is component-wise, not string prefix. + checkRange(Key{"k1"}.AsPrefix(), 1) + checkRange(KeyRange{Key{"k1"}, Key{"k2"}, ClosedOpen}, 1, 10, 11, 12, 13, 14) + + checkRange(AllKeys(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) +} + +func indexRangeReads(ctx context.Context, t *testing.T, client *Client) { + checkRange := func(ks KeySet, wantNums ...int) { + if msg, ok := compareRows(client.Single().ReadUsingIndex(ctx, testTable, testTableIndex, ks, testTableColumns), + wantNums); !ok { + t.Errorf("key set %+v: %s", ks, msg) + } + } + + checkRange(Key{"v1"}, 1) + checkRange(KeyRange{Key{"v3"}, Key{"v5"}, ClosedOpen}, 3, 4) + checkRange(KeyRange{Key{"v3"}, Key{"v5"}, ClosedClosed}, 3, 4, 5) + checkRange(KeyRange{Key{"v3"}, Key{"v5"}, OpenClosed}, 4, 5) + checkRange(KeyRange{Key{"v3"}, Key{"v5"}, OpenOpen}, 4) + + // // Partial key specification. + checkRange(KeyRange{Key{"v7"}, Key{}, ClosedClosed}, 7, 8, 9) + checkRange(KeyRange{Key{"v7"}, Key{}, OpenClosed}, 8, 9) + checkRange(KeyRange{Key{}, Key{"v11"}, ClosedOpen}, 0, 1, 10) + checkRange(KeyRange{Key{}, Key{"v11"}, ClosedClosed}, 0, 1, 10, 11) + + // // The following produce empty ranges. + // checkRange(KeyRange{Key{"v7"}, Key{}, ClosedOpen}) + // checkRange(KeyRange{Key{"v7"}, Key{}, OpenOpen}) + // checkRange(KeyRange{Key{}, Key{"v11"}, OpenOpen}) + // checkRange(KeyRange{Key{}, Key{"v11"}, OpenClosed}) + + // // Prefix is component-wise, not string prefix. + checkRange(Key{"v1"}.AsPrefix(), 1) + checkRange(KeyRange{Key{"v1"}, Key{"v2"}, ClosedOpen}, 1, 10, 11, 12, 13, 14) + checkRange(AllKeys(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) + + // Read from an index with DESC ordering. + wantNums := []int{14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0} + if msg, ok := compareRows(client.Single().ReadUsingIndex(ctx, testTable, "TestTableByValueDesc", AllKeys(), testTableColumns), + wantNums); !ok { + t.Errorf("desc: %s", msg) + } +} + +func compareRows(iter *RowIterator, wantNums []int) (string, bool) { + rows, err := readAllTestTable(iter) + if err != nil { + return err.Error(), false + } + want := map[string]string{} + for _, n := range wantNums { + want[fmt.Sprintf("k%d", n)] = fmt.Sprintf("v%d", n) + } + got := map[string]string{} + for _, r := range rows { + got[r.Key] = r.StringValue + } + if !reflect.DeepEqual(got, want) { + return fmt.Sprintf("got %v, want %v", got, want), false + } + return "", true +} + +func TestEarlyTimestamp(t *testing.T) { + t.Parallel() + // Test that we can get the timestamp from a read-only transaction as + // soon as we have read at least one row. + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + // Set up testing environment. + client, _, tearDown := prepare(ctx, t, readDBStatements) + defer tearDown() + + var ms []*Mutation + for i := 0; i < 3; i++ { + ms = append(ms, InsertOrUpdate(testTable, + testTableColumns, + []interface{}{fmt.Sprintf("k%d", i), fmt.Sprintf("v%d", i)})) + } + if _, err := client.Apply(ctx, ms, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + + txn := client.Single() + iter := txn.Read(ctx, testTable, AllKeys(), testTableColumns) + defer iter.Stop() + // In single-use transaction, we should get an error before reading anything. + if _, err := txn.Timestamp(); err == nil { + t.Error("wanted error, got nil") + } + // After reading one row, the timestamp should be available. + _, err := iter.Next() + if err != nil { + t.Fatal(err) + } + if _, err := txn.Timestamp(); err != nil { + t.Errorf("got %v, want nil", err) + } + + txn = client.ReadOnlyTransaction() + defer txn.Close() + iter = txn.Read(ctx, testTable, AllKeys(), testTableColumns) + defer iter.Stop() + // In an ordinary read-only transaction, the timestamp should be + // available immediately. + if _, err := txn.Timestamp(); err != nil { + t.Errorf("got %v, want nil", err) + } +} + +func TestNestedTransaction(t *testing.T) { + t.Parallel() + // You cannot use a transaction from inside a read-write transaction. + ctx := context.Background() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { + _, err := client.ReadWriteTransaction(ctx, + func(context.Context, *ReadWriteTransaction) error { return nil }) + if ErrCode(err) != codes.FailedPrecondition { + t.Fatalf("got %v, want FailedPrecondition", err) + } + _, err = client.Single().ReadRow(ctx, "Singers", Key{1}, []string{"SingerId"}) + if ErrCode(err) != codes.FailedPrecondition { + t.Fatalf("got %v, want FailedPrecondition", err) + } + rot := client.ReadOnlyTransaction() + defer rot.Close() + _, err = rot.ReadRow(ctx, "Singers", Key{1}, []string{"SingerId"}) + if ErrCode(err) != codes.FailedPrecondition { + t.Fatalf("got %v, want FailedPrecondition", err) + } + return nil + }) +} + +// Test client recovery on database recreation. +func TestDbRemovalRecovery(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + client, dbPath, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + // Drop the testing database. + if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{dbPath}); err != nil { + t.Fatalf("failed to drop testing database %v: %v", dbPath, err) + } + + // Now, send the query. + iter := client.Single().Query(ctx, Statement{SQL: "SELECT SingerId FROM Singers"}) + defer iter.Stop() + if _, err := iter.Next(); err == nil { + t.Errorf("client sends query to removed database successfully, want it to fail") + } + + // Recreate database and table. + dbName := dbPath[strings.LastIndex(dbPath, "/")+1:] + op, err := admin.CreateDatabase(ctx, &adminpb.CreateDatabaseRequest{ + Parent: fmt.Sprintf("projects/%v/instances/%v", testProjectID, testInstanceID), + CreateStatement: "CREATE DATABASE " + dbName, + ExtraStatements: []string{ + `CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo BYTES(MAX) + ) PRIMARY KEY (SingerId)`, + }, + }) + if _, err := op.Wait(ctx); err != nil { + t.Errorf("cannot recreate testing DB %v: %v", dbPath, err) + } + + // Now, send the query again. + iter = client.Single().Query(ctx, Statement{SQL: "SELECT SingerId FROM Singers"}) + defer iter.Stop() + _, err = iter.Next() + if err != nil && err != iterator.Done { + t.Fatalf("failed to send query to database %v: %v", dbPath, err) + } +} + +// Test encoding/decoding non-struct Cloud Spanner types. +func TestBasicTypes(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + t1, _ := time.Parse(time.RFC3339Nano, "2016-11-15T15:04:05.999999999Z") + // Boundaries + t2, _ := time.Parse(time.RFC3339Nano, "0001-01-01T00:00:00.000000000Z") + t3, _ := time.Parse(time.RFC3339Nano, "9999-12-31T23:59:59.999999999Z") + d1, _ := civil.ParseDate("2016-11-15") + // Boundaries + d2, _ := civil.ParseDate("0001-01-01") + d3, _ := civil.ParseDate("9999-12-31") + + tests := []struct { + col string + val interface{} + want interface{} + }{ + {col: "String", val: ""}, + {col: "String", val: "", want: NullString{"", true}}, + {col: "String", val: "foo"}, + {col: "String", val: "foo", want: NullString{"foo", true}}, + {col: "String", val: NullString{"bar", true}, want: "bar"}, + {col: "String", val: NullString{"bar", false}, want: NullString{"", false}}, + {col: "StringArray", val: []string(nil), want: []NullString(nil)}, + {col: "StringArray", val: []string{}, want: []NullString{}}, + {col: "StringArray", val: []string{"foo", "bar"}, want: []NullString{{"foo", true}, {"bar", true}}}, + {col: "StringArray", val: []NullString(nil)}, + {col: "StringArray", val: []NullString{}}, + {col: "StringArray", val: []NullString{{"foo", true}, {}}}, + {col: "Bytes", val: []byte{}}, + {col: "Bytes", val: []byte{1, 2, 3}}, + {col: "Bytes", val: []byte(nil)}, + {col: "BytesArray", val: [][]byte(nil)}, + {col: "BytesArray", val: [][]byte{}}, + {col: "BytesArray", val: [][]byte{[]byte{1}, []byte{2, 3}}}, + {col: "Int64a", val: 0, want: int64(0)}, + {col: "Int64a", val: -1, want: int64(-1)}, + {col: "Int64a", val: 2, want: int64(2)}, + {col: "Int64a", val: int64(3)}, + {col: "Int64a", val: 4, want: NullInt64{4, true}}, + {col: "Int64a", val: NullInt64{5, true}, want: int64(5)}, + {col: "Int64a", val: NullInt64{6, true}, want: int64(6)}, + {col: "Int64a", val: NullInt64{7, false}, want: NullInt64{0, false}}, + {col: "Int64Array", val: []int(nil), want: []NullInt64(nil)}, + {col: "Int64Array", val: []int{}, want: []NullInt64{}}, + {col: "Int64Array", val: []int{1, 2}, want: []NullInt64{{1, true}, {2, true}}}, + {col: "Int64Array", val: []int64(nil), want: []NullInt64(nil)}, + {col: "Int64Array", val: []int64{}, want: []NullInt64{}}, + {col: "Int64Array", val: []int64{1, 2}, want: []NullInt64{{1, true}, {2, true}}}, + {col: "Int64Array", val: []NullInt64(nil)}, + {col: "Int64Array", val: []NullInt64{}}, + {col: "Int64Array", val: []NullInt64{{1, true}, {}}}, + {col: "Bool", val: false}, + {col: "Bool", val: true}, + {col: "Bool", val: false, want: NullBool{false, true}}, + {col: "Bool", val: true, want: NullBool{true, true}}, + {col: "Bool", val: NullBool{true, true}}, + {col: "Bool", val: NullBool{false, false}}, + {col: "BoolArray", val: []bool(nil), want: []NullBool(nil)}, + {col: "BoolArray", val: []bool{}, want: []NullBool{}}, + {col: "BoolArray", val: []bool{true, false}, want: []NullBool{{true, true}, {false, true}}}, + {col: "BoolArray", val: []NullBool(nil)}, + {col: "BoolArray", val: []NullBool{}}, + {col: "BoolArray", val: []NullBool{{false, true}, {true, true}, {}}}, + {col: "Float64", val: 0.0}, + {col: "Float64", val: 3.14}, + {col: "Float64", val: math.NaN()}, + {col: "Float64", val: math.Inf(1)}, + {col: "Float64", val: math.Inf(-1)}, + {col: "Float64", val: 2.78, want: NullFloat64{2.78, true}}, + {col: "Float64", val: NullFloat64{2.71, true}, want: 2.71}, + {col: "Float64", val: NullFloat64{1.41, true}, want: NullFloat64{1.41, true}}, + {col: "Float64", val: NullFloat64{0, false}}, + {col: "Float64Array", val: []float64(nil), want: []NullFloat64(nil)}, + {col: "Float64Array", val: []float64{}, want: []NullFloat64{}}, + {col: "Float64Array", val: []float64{2.72, 3.14, math.Inf(1)}, want: []NullFloat64{{2.72, true}, {3.14, true}, {math.Inf(1), true}}}, + {col: "Float64Array", val: []NullFloat64(nil)}, + {col: "Float64Array", val: []NullFloat64{}}, + {col: "Float64Array", val: []NullFloat64{{2.72, true}, {math.Inf(1), true}, {}}}, + {col: "Date", val: d1}, + {col: "Date", val: d1, want: NullDate{d1, true}}, + {col: "Date", val: NullDate{d1, true}}, + {col: "Date", val: NullDate{d1, true}, want: d1}, + {col: "Date", val: NullDate{civil.Date{}, false}}, + {col: "DateArray", val: []civil.Date(nil), want: []NullDate(nil)}, + {col: "DateArray", val: []civil.Date{}, want: []NullDate{}}, + {col: "DateArray", val: []civil.Date{d1, d2, d3}, want: []NullDate{{d1, true}, {d2, true}, {d3, true}}}, + {col: "Timestamp", val: t1}, + {col: "Timestamp", val: t1, want: NullTime{t1, true}}, + {col: "Timestamp", val: NullTime{t1, true}}, + {col: "Timestamp", val: NullTime{t1, true}, want: t1}, + {col: "Timestamp", val: NullTime{}}, + {col: "TimestampArray", val: []time.Time(nil), want: []NullTime(nil)}, + {col: "TimestampArray", val: []time.Time{}, want: []NullTime{}}, + {col: "TimestampArray", val: []time.Time{t1, t2, t3}, want: []NullTime{{t1, true}, {t2, true}, {t3, true}}}, + } + + // Write rows into table first. + var muts []*Mutation + for i, test := range tests { + muts = append(muts, InsertOrUpdate("Types", []string{"RowID", test.col}, []interface{}{i, test.val})) + } + if _, err := client.Apply(ctx, muts, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + + for i, test := range tests { + row, err := client.Single().ReadRow(ctx, "Types", []interface{}{i}, []string{test.col}) + if err != nil { + t.Fatalf("Unable to fetch row %v: %v", i, err) + } + // Create new instance of type of test.want. + want := test.want + if want == nil { + want = test.val + } + gotp := reflect.New(reflect.TypeOf(want)) + if err := row.Column(0, gotp.Interface()); err != nil { + t.Errorf("%d: col:%v val:%#v, %v", i, test.col, test.val, err) + continue + } + got := reflect.Indirect(gotp).Interface() + + // One of the test cases is checking NaN handling. Given + // NaN!=NaN, we can't use reflect to test for it. + if isNaN(got) && isNaN(want) { + continue + } + + // Check non-NaN cases. + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: col:%v val:%#v, got %#v, want %#v", i, test.col, test.val, got, want) + continue + } + } +} + +// Test decoding Cloud Spanner STRUCT type. +func TestStructTypes(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + tests := []struct { + q Statement + want func(r *Row) error + }{ + { + q: Statement{SQL: `SELECT ARRAY(SELECT STRUCT(1, 2))`}, + want: func(r *Row) error { + // Test STRUCT ARRAY decoding to []NullRow. + var rows []NullRow + if err := r.Column(0, &rows); err != nil { + return err + } + if len(rows) != 1 { + return fmt.Errorf("len(rows) = %d; want 1", len(rows)) + } + if !rows[0].Valid { + return fmt.Errorf("rows[0] is NULL") + } + var i, j int64 + if err := rows[0].Row.Columns(&i, &j); err != nil { + return err + } + if i != 1 || j != 2 { + return fmt.Errorf("got (%d,%d), want (1,2)", i, j) + } + return nil + }, + }, + { + q: Statement{SQL: `SELECT ARRAY(SELECT STRUCT(1 as foo, 2 as bar)) as col1`}, + want: func(r *Row) error { + // Test Row.ToStruct. + s := struct { + Col1 []*struct { + Foo int64 `spanner:"foo"` + Bar int64 `spanner:"bar"` + } `spanner:"col1"` + }{} + if err := r.ToStruct(&s); err != nil { + return err + } + want := struct { + Col1 []*struct { + Foo int64 `spanner:"foo"` + Bar int64 `spanner:"bar"` + } `spanner:"col1"` + }{ + Col1: []*struct { + Foo int64 `spanner:"foo"` + Bar int64 `spanner:"bar"` + }{ + { + Foo: 1, + Bar: 2, + }, + }, + } + if !reflect.DeepEqual(want, s) { + return fmt.Errorf("unexpected decoding result: %v, want %v", s, want) + } + return nil + }, + }, + } + for i, test := range tests { + iter := client.Single().Query(ctx, test.q) + defer iter.Stop() + row, err := iter.Next() + if err != nil { + t.Errorf("%d: %v", i, err) + continue + } + if err := test.want(row); err != nil { + t.Errorf("%d: %v", i, err) + continue + } + } +} + +// Test queries of the form "SELECT expr". +func TestQueryExpressions(t *testing.T) { + t.Parallel() + ctx := context.Background() + client, _, tearDown := prepare(ctx, t, nil) + defer tearDown() + + newRow := func(vals []interface{}) *Row { + row, err := NewRow(make([]string, len(vals)), vals) + if err != nil { + t.Fatal(err) + } + return row + } + + tests := []struct { + expr string + want interface{} + }{ + {"1", int64(1)}, + {"[1, 2, 3]", []NullInt64{{1, true}, {2, true}, {3, true}}}, + {"[1, NULL, 3]", []NullInt64{{1, true}, {0, false}, {3, true}}}, + {"IEEE_DIVIDE(1, 0)", math.Inf(1)}, + {"IEEE_DIVIDE(-1, 0)", math.Inf(-1)}, + {"IEEE_DIVIDE(0, 0)", math.NaN()}, + // TODO(jba): add IEEE_DIVIDE(0, 0) to the following array when we have a better equality predicate. + {"[IEEE_DIVIDE(1, 0), IEEE_DIVIDE(-1, 0)]", []NullFloat64{{math.Inf(1), true}, {math.Inf(-1), true}}}, + {"ARRAY(SELECT AS STRUCT * FROM (SELECT 'a', 1) WHERE 0 = 1)", []NullRow{}}, + {"ARRAY(SELECT STRUCT(1, 2))", []NullRow{{Row: *newRow([]interface{}{1, 2}), Valid: true}}}, + } + for _, test := range tests { + iter := client.Single().Query(ctx, Statement{SQL: "SELECT " + test.expr}) + defer iter.Stop() + row, err := iter.Next() + if err != nil { + t.Errorf("%q: %v", test.expr, err) + continue + } + // Create new instance of type of test.want. + gotp := reflect.New(reflect.TypeOf(test.want)) + if err := row.Column(0, gotp.Interface()); err != nil { + t.Errorf("%q: Column returned error %v", test.expr, err) + continue + } + got := reflect.Indirect(gotp).Interface() + // TODO(jba): remove isNaN special case when we have a better equality predicate. + if isNaN(got) && isNaN(test.want) { + continue + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%q\n got %#v\nwant %#v", test.expr, got, test.want) + } + } +} + +func isNaN(x interface{}) bool { + f, ok := x.(float64) + if !ok { + return false + } + return math.IsNaN(f) +} + +func TestInvalidDatabase(t *testing.T) { + t.Parallel() + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + if testProjectID == "" { + t.Skip("Integration tests skipped: GCLOUD_TESTS_GOLANG_PROJECT_ID is missing") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, Scope) + if ts == nil { + t.Skip("Integration test skipped: cannot get service account credential from environment variable %v", "GCLOUD_TESTS_GOLANG_KEY") + } + db := fmt.Sprintf("projects/%v/instances/%v/databases/invalid", testProjectID, testInstanceID) + c, err := NewClient(ctx, db, option.WithTokenSource(ts)) + // Client creation should succeed even if the database is invalid. + if err != nil { + t.Fatal(err) + } + _, err = c.Single().ReadRow(ctx, "TestTable", Key{1}, []string{"col1"}) + if msg, ok := matchError(err, codes.NotFound, ""); !ok { + t.Fatal(msg) + } +} + +func TestReadErrors(t *testing.T) { + t.Parallel() + ctx := context.Background() + client, _, tearDown := prepare(ctx, t, readDBStatements) + defer tearDown() + + // Read over invalid table fails + _, err := client.Single().ReadRow(ctx, "badTable", Key{1}, []string{"StringValue"}) + if msg, ok := matchError(err, codes.NotFound, "badTable"); !ok { + t.Error(msg) + } + // Read over invalid column fails + _, err = client.Single().ReadRow(ctx, "TestTable", Key{1}, []string{"badcol"}) + if msg, ok := matchError(err, codes.NotFound, "badcol"); !ok { + t.Error(msg) + } + + // Invalid query fails + iter := client.Single().Query(ctx, Statement{SQL: "SELECT Apples AND Oranges"}) + defer iter.Stop() + _, err = iter.Next() + if msg, ok := matchError(err, codes.InvalidArgument, "unrecognized name"); !ok { + t.Error(msg) + } + + // Read should fail on cancellation. + cctx, cancel := context.WithCancel(ctx) + cancel() + _, err = client.Single().ReadRow(cctx, "TestTable", Key{1}, []string{"StringValue"}) + if msg, ok := matchError(err, codes.Canceled, ""); !ok { + t.Error(msg) + } + // Read should fail if deadline exceeded. + dctx, _ := context.WithTimeout(ctx, time.Nanosecond) + <-dctx.Done() + _, err = client.Single().ReadRow(dctx, "TestTable", Key{1}, []string{"StringValue"}) + if msg, ok := matchError(err, codes.DeadlineExceeded, ""); !ok { + t.Error(msg) + } +} + +func matchError(got error, wantCode codes.Code, wantMsgPart string) (string, bool) { + if ErrCode(got) != wantCode || !strings.Contains(strings.ToLower(ErrDesc(got)), strings.ToLower(wantMsgPart)) { + return fmt.Sprintf("got error <%v>\n"+`want `, got, wantCode, wantMsgPart), false + } + return "", true +} + +func rowToValues(r *Row) ([]interface{}, error) { + var x int64 + var y, z string + if err := r.Column(0, &x); err != nil { + return nil, err + } + if err := r.Column(1, &y); err != nil { + return nil, err + } + if err := r.Column(2, &z); err != nil { + return nil, err + } + return []interface{}{x, y, z}, nil +} + +func readAll(iter *RowIterator) ([][]interface{}, error) { + defer iter.Stop() + var vals [][]interface{} + for { + row, err := iter.Next() + if err == iterator.Done { + return vals, nil + } + if err != nil { + return nil, err + } + v, err := rowToValues(row) + if err != nil { + return nil, err + } + vals = append(vals, v) + } +} + +func readAllTestTable(iter *RowIterator) ([]testTableRow, error) { + defer iter.Stop() + var vals []testTableRow + for { + row, err := iter.Next() + if err == iterator.Done { + return vals, nil + } + if err != nil { + return nil, err + } + var ttr testTableRow + if err := row.ToStruct(&ttr); err != nil { + return nil, err + } + vals = append(vals, ttr) + } +} + +// Test TransactionRunner. Test that transactions are aborted and retried as expected. +func TestTransactionRunner(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + client, _, tearDown := prepare(ctx, t, singerDBStatements) + defer tearDown() + + // Test 1: User error should abort the transaction. + _, _ = client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { + tx.BufferWrite([]*Mutation{ + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)})}) + return errors.New("user error") + }) + // Empty read. + rows, err := readAllTestTable(client.Single().Read(ctx, "Accounts", Key{1}, []string{"AccountId", "Nickname", "Balance"})) + if err != nil { + t.Fatal(err) + } + if got, want := len(rows), 0; got != want { + t.Errorf("Empty read, got %d, want %d.", got, want) + } + + // Test 2: Expect abort and retry. + // We run two ReadWriteTransactions concurrently and make txn1 abort txn2 by committing writes to the column txn2 have read, + // and expect the following read to abort and txn2 retries. + + // Set up two accounts + accounts := []*Mutation{ + Insert("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(1), int64(0)}), + Insert("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(2), int64(1)}), + } + if _, err := client.Apply(ctx, accounts, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + + var ( + cTxn1Start = make(chan struct{}) + cTxn1Commit = make(chan struct{}) + cTxn2Start = make(chan struct{}) + wg sync.WaitGroup + ) + + // read balance, check error if we don't expect abort. + readBalance := func(tx interface { + ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error) + }, key int64, expectAbort bool) (int64, error) { + var b int64 + r, e := tx.ReadRow(ctx, "Accounts", Key{int64(key)}, []string{"Balance"}) + if e != nil { + if expectAbort && !isAbortErr(e) { + t.Errorf("ReadRow got %v, want Abort error.", e) + } + return b, e + } + if ce := r.Column(0, &b); ce != nil { + return b, ce + } + return b, nil + } + + wg.Add(2) + // Txn 1 + go func() { + defer wg.Done() + var once sync.Once + _, e := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { + b, e := readBalance(tx, 1, false) + if e != nil { + return e + } + // txn 1 can abort, in that case we skip closing the channel on retry. + once.Do(func() { close(cTxn1Start) }) + tx.BufferWrite([]*Mutation{ + Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(1), int64(b + 1)})}) + // Wait for second transaction. + <-cTxn2Start + return nil + }) + close(cTxn1Commit) + if e != nil { + t.Errorf("Transaction 1 commit, got %v, want nil.", e) + } + }() + // Txn 2 + go func() { + // Wait until txn 1 starts. + <-cTxn1Start + defer wg.Done() + var ( + once sync.Once + b1 int64 + b2 int64 + e error + ) + _, e = client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { + if b1, e = readBalance(tx, 1, false); e != nil { + return e + } + // Skip closing channel on retry. + once.Do(func() { close(cTxn2Start) }) + // Wait until txn 1 successfully commits. + <-cTxn1Commit + // Txn1 has committed and written a balance to the account. + // Now this transaction (txn2) reads and re-writes the balance. + // The first time through, it will abort because it overlaps with txn1. + // Then it will retry after txn1 commits, and succeed. + if b2, e = readBalance(tx, 2, true); e != nil { + return e + } + tx.BufferWrite([]*Mutation{ + Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(2), int64(b1 + b2)})}) + return nil + }) + if e != nil { + t.Errorf("Transaction 2 commit, got %v, want nil.", e) + } + }() + wg.Wait() + // Check that both transactions' effects are visible. + for i := int64(1); i <= int64(2); i++ { + if b, e := readBalance(client.Single(), i, false); e != nil { + t.Fatalf("ReadBalance for key %d error %v.", i, e) + } else if b != i { + t.Errorf("Balance for key %d, got %d, want %d.", i, b, i) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/statement.go b/vendor/cloud.google.com/go/spanner/statement.go new file mode 100644 index 0000000..d04c200 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/statement.go @@ -0,0 +1,90 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "fmt" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// A Statement is a SQL query with named parameters. +// +// A parameter placeholder consists of '@' followed by the parameter name. +// Parameter names consist of any combination of letters, numbers, and +// underscores. Names may be entirely numeric (e.g., "WHERE m.id = @5"). +// Parameters may appear anywhere that a literal value is expected. The same +// parameter name may be used more than once. It is an error to execute a +// statement with unbound parameters. On the other hand, it is allowable to +// bind parameter names that are not used. +// +// See the documentation of the Row type for how Go types are mapped to Cloud +// Spanner types. +type Statement struct { + SQL string + Params map[string]interface{} +} + +// NewStatement returns a Statement with the given SQL and an empty Params map. +func NewStatement(sql string) Statement { + return Statement{SQL: sql, Params: map[string]interface{}{}} +} + +// errBindParam returns error for not being able to bind parameter to query request. +func errBindParam(k string, v interface{}, err error) error { + if err == nil { + return nil + } + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.InvalidArgument, "failed to bind query parameter(name: %q, value: %v), error = <%v>", k, v, err) + } + se.decorate(fmt.Sprintf("failed to bind query parameter(name: %q, value: %v)", k, v)) + return se +} + +var ( + errNilParam = errors.New("use T(nil), not nil") + errNoType = errors.New("no type information") +) + +// bindParams binds parameters in a Statement to a sppb.ExecuteSqlRequest. +func (s *Statement) bindParams(r *sppb.ExecuteSqlRequest) error { + r.Params = &proto3.Struct{ + Fields: map[string]*proto3.Value{}, + } + r.ParamTypes = map[string]*sppb.Type{} + for k, v := range s.Params { + if v == nil { + return errBindParam(k, v, errNilParam) + } + val, t, err := encodeValue(v) + if err != nil { + return errBindParam(k, v, err) + } + if t == nil { // should not happen, because of nil check above + return errBindParam(k, v, errNoType) + } + r.Params.Fields[k] = val + r.ParamTypes[k] = t + } + return nil +} diff --git a/vendor/cloud.google.com/go/spanner/statement_test.go b/vendor/cloud.google.com/go/spanner/statement_test.go new file mode 100644 index 0000000..afe0700 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/statement_test.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math" + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + + "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test Statement.bindParams. +func TestBindParams(t *testing.T) { + // Verify Statement.bindParams generates correct values and types. + st := Statement{ + SQL: "SELECT id from t_foo WHERE col = @var", + Params: map[string]interface{}{"var": nil}, + } + want := &sppb.ExecuteSqlRequest{ + Params: &proto3.Struct{ + Fields: map[string]*proto3.Value{"var": nil}, + }, + ParamTypes: map[string]*sppb.Type{"var": nil}, + } + var ( + t1, _ = time.Parse(time.RFC3339Nano, "2016-11-15T15:04:05.999999999Z") + // Boundaries + t2, _ = time.Parse(time.RFC3339Nano, "0001-01-01T00:00:00.000000000Z") + t3, _ = time.Parse(time.RFC3339Nano, "9999-12-31T23:59:59.999999999Z") + d1, _ = civil.ParseDate("2016-11-15") + // Boundaries + d2, _ = civil.ParseDate("0001-01-01") + d3, _ = civil.ParseDate("9999-12-31") + ) + for i, test := range []struct { + val interface{} + wantField *proto3.Value + wantType *sppb.Type + }{ + // bool + {true, boolProto(true), boolType()}, + {NullBool{true, true}, boolProto(true), boolType()}, + {NullBool{true, false}, nullProto(), boolType()}, + {[]bool(nil), nullProto(), listType(boolType())}, + {[]bool{}, listProto(), listType(boolType())}, + {[]bool{true, false}, listProto(boolProto(true), boolProto(false)), listType(boolType())}, + {[]NullBool(nil), nullProto(), listType(boolType())}, + {[]NullBool{}, listProto(), listType(boolType())}, + {[]NullBool{{true, true}, {}}, listProto(boolProto(true), nullProto()), listType(boolType())}, + // int + {int(1), intProto(1), intType()}, + {[]int(nil), nullProto(), listType(intType())}, + {[]int{}, listProto(), listType(intType())}, + {[]int{1, 2}, listProto(intProto(1), intProto(2)), listType(intType())}, + // int64 + {int64(1), intProto(1), intType()}, + {NullInt64{5, true}, intProto(5), intType()}, + {NullInt64{5, false}, nullProto(), intType()}, + {[]int64(nil), nullProto(), listType(intType())}, + {[]int64{}, listProto(), listType(intType())}, + {[]int64{1, 2}, listProto(intProto(1), intProto(2)), listType(intType())}, + {[]NullInt64(nil), nullProto(), listType(intType())}, + {[]NullInt64{}, listProto(), listType(intType())}, + {[]NullInt64{{1, true}, {}}, listProto(intProto(1), nullProto()), listType(intType())}, + // float64 + {0.0, floatProto(0.0), floatType()}, + {math.Inf(1), floatProto(math.Inf(1)), floatType()}, + {math.Inf(-1), floatProto(math.Inf(-1)), floatType()}, + {math.NaN(), floatProto(math.NaN()), floatType()}, + {NullFloat64{2.71, true}, floatProto(2.71), floatType()}, + {NullFloat64{1.41, false}, nullProto(), floatType()}, + {[]float64(nil), nullProto(), listType(floatType())}, + {[]float64{}, listProto(), listType(floatType())}, + {[]float64{2.72, math.Inf(1)}, listProto(floatProto(2.72), floatProto(math.Inf(1))), listType(floatType())}, + {[]NullFloat64(nil), nullProto(), listType(floatType())}, + {[]NullFloat64{}, listProto(), listType(floatType())}, + {[]NullFloat64{{2.72, true}, {}}, listProto(floatProto(2.72), nullProto()), listType(floatType())}, + // string + {"", stringProto(""), stringType()}, + {"foo", stringProto("foo"), stringType()}, + {NullString{"bar", true}, stringProto("bar"), stringType()}, + {NullString{"bar", false}, nullProto(), stringType()}, + {[]string(nil), nullProto(), listType(stringType())}, + {[]string{}, listProto(), listType(stringType())}, + {[]string{"foo", "bar"}, listProto(stringProto("foo"), stringProto("bar")), listType(stringType())}, + {[]NullString(nil), nullProto(), listType(stringType())}, + {[]NullString{}, listProto(), listType(stringType())}, + {[]NullString{{"foo", true}, {}}, listProto(stringProto("foo"), nullProto()), listType(stringType())}, + // bytes + {[]byte{}, bytesProto([]byte{}), bytesType()}, + {[]byte{1, 2, 3}, bytesProto([]byte{1, 2, 3}), bytesType()}, + {[]byte(nil), nullProto(), bytesType()}, + {[][]byte(nil), nullProto(), listType(bytesType())}, + {[][]byte{}, listProto(), listType(bytesType())}, + {[][]byte{[]byte{1}, []byte(nil)}, listProto(bytesProto([]byte{1}), nullProto()), listType(bytesType())}, + // date + {d1, dateProto(d1), dateType()}, + {NullDate{civil.Date{}, false}, nullProto(), dateType()}, + {[]civil.Date(nil), nullProto(), listType(dateType())}, + {[]civil.Date{}, listProto(), listType(dateType())}, + {[]civil.Date{d1, d2, d3}, listProto(dateProto(d1), dateProto(d2), dateProto(d3)), listType(dateType())}, + {[]NullDate{NullDate{d2, true}, NullDate{}}, listProto(dateProto(d2), nullProto()), listType(dateType())}, + // timestamp + {t1, timeProto(t1), timeType()}, + {NullTime{}, nullProto(), timeType()}, + {[]time.Time(nil), nullProto(), listType(timeType())}, + {[]time.Time{}, listProto(), listType(timeType())}, + {[]time.Time{t1, t2, t3}, listProto(timeProto(t1), timeProto(t2), timeProto(t3)), listType(timeType())}, + {[]NullTime{NullTime{t2, true}, NullTime{}}, listProto(timeProto(t2), nullProto()), listType(timeType())}, + } { + st.Params["var"] = test.val + want.Params.Fields["var"] = test.wantField + want.ParamTypes["var"] = test.wantType + got := &sppb.ExecuteSqlRequest{} + if err := st.bindParams(got); err != nil || !proto.Equal(got, want) { + // handle NaN + if test.wantType.Code == floatType().Code && proto.MarshalTextString(got) == proto.MarshalTextString(want) { + continue + } + t.Errorf("#%d: bind result: \n(%v, %v)\nwant\n(%v, %v)\n", i, got, err, want, nil) + } + } + + // Verify type error reporting. + for _, test := range []struct { + val interface{} + wantErr error + }{ + { + struct{}{}, + errBindParam("var", struct{}{}, errEncoderUnsupportedType(struct{}{})), + }, + { + nil, + errBindParam("var", nil, errNilParam), + }, + } { + st.Params["var"] = test.val + var got sppb.ExecuteSqlRequest + if err := st.bindParams(&got); !reflect.DeepEqual(err, test.wantErr) { + t.Errorf("value %#v:\ngot: %v\nwant: %v", test.val, err, test.wantErr) + } + } +} + +func TestNewStatement(t *testing.T) { + s := NewStatement("query") + if got, want := s.SQL, "query"; got != want { + t.Errorf("got %q, want %q", got, want) + } +} diff --git a/vendor/cloud.google.com/go/spanner/timestampbound.go b/vendor/cloud.google.com/go/spanner/timestampbound.go new file mode 100644 index 0000000..068d966 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/timestampbound.go @@ -0,0 +1,245 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "time" + + pbd "github.com/golang/protobuf/ptypes/duration" + pbt "github.com/golang/protobuf/ptypes/timestamp" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// timestampBoundType specifies the timestamp bound mode. +type timestampBoundType int + +const ( + strong timestampBoundType = iota // strong reads + exactStaleness // read with exact staleness + maxStaleness // read with max staleness + minReadTimestamp // read with min freshness + readTimestamp // read data at exact timestamp +) + +// TimestampBound defines how Cloud Spanner will choose a timestamp for a single +// read/query or read-only transaction. +// +// The types of timestamp bound are: +// +// - Strong (the default). +// - Bounded staleness. +// - Exact staleness. +// +// If the Cloud Spanner database to be read is geographically distributed, stale +// read-only transactions can execute more quickly than strong or read-write +// transactions, because they are able to execute far from the leader replica. +// +// Each type of timestamp bound is discussed in detail below. A TimestampBound +// can be specified when creating transactions, see the documentation of +// spanner.Client for an example. +// +// Strong reads +// +// Strong reads are guaranteed to see the effects of all transactions that have +// committed before the start of the read. Furthermore, all rows yielded by a +// single read are consistent with each other - if any part of the read +// observes a transaction, all parts of the read see the transaction. +// +// Strong reads are not repeatable: two consecutive strong read-only +// transactions might return inconsistent results if there are concurrent +// writes. If consistency across reads is required, the reads should be +// executed within a transaction or at an exact read timestamp. +// +// Use StrongRead() to create a bound of this type. +// +// Exact staleness +// +// These timestamp bounds execute reads at a user-specified timestamp. Reads at +// a timestamp are guaranteed to see a consistent prefix of the global +// transaction history: they observe modifications done by all transactions +// with a commit timestamp less than or equal to the read timestamp, and +// observe none of the modifications done by transactions with a larger commit +// timestamp. They will block until all conflicting transactions that may be +// assigned commit timestamps less than or equal to the read timestamp have +// finished. +// +// The timestamp can either be expressed as an absolute Cloud Spanner commit +// timestamp or a staleness relative to the current time. +// +// These modes do not require a "negotiation phase" to pick a timestamp. As a +// result, they execute slightly faster than the equivalent boundedly stale +// concurrency modes. On the other hand, boundedly stale reads usually return +// fresher results. +// +// Use ReadTimestamp() and ExactStaleness() to create a bound of this type. +// +// Bounded staleness +// +// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to +// a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within +// the staleness bound that allows execution of the reads at the closest +// available replica without blocking. +// +// All rows yielded are consistent with each other -- if any part of the read +// observes a transaction, all parts of the read see the transaction. Boundedly +// stale reads are not repeatable: two stale reads, even if they use the same +// staleness bound, can execute at different timestamps and thus return +// inconsistent results. +// +// Boundedly stale reads execute in two phases: the first phase negotiates a +// timestamp among all replicas needed to serve the read. In the second phase, +// reads are executed at the negotiated timestamp. +// +// As a result of the two phase execution, bounded staleness reads are usually +// a little slower than comparable exact staleness reads. However, they are +// typically able to return fresher results, and are more likely to execute at +// the closest replica. +// +// Because the timestamp negotiation requires up-front knowledge of which rows +// will be read, it can only be used with single-use reads and single-use +// read-only transactions. +// +// Use MinReadTimestamp() and MaxStaleness() to create a bound of this type. +// +// Old read timestamps and garbage collection +// +// Cloud Spanner continuously garbage collects deleted and overwritten data in the +// background to reclaim storage space. This process is known as "version +// GC". By default, version GC reclaims versions after they are four hours +// old. Because of this, Cloud Spanner cannot perform reads at read timestamps more +// than four hours in the past. This restriction also applies to in-progress +// reads and/or SQL queries whose timestamp become too old while +// executing. Reads and SQL queries with too-old read timestamps fail with the +// error ErrorCode.FAILED_PRECONDITION. +type TimestampBound struct { + mode timestampBoundType + d time.Duration + t time.Time +} + +// StrongRead returns a TimestampBound that will perform reads and queries at a +// timestamp where all previously committed transactions are visible. +func StrongRead() TimestampBound { + return TimestampBound{mode: strong} +} + +// ExactStaleness returns a TimestampBound that will perform reads and queries +// at an exact staleness. +func ExactStaleness(d time.Duration) TimestampBound { + return TimestampBound{ + mode: exactStaleness, + d: d, + } +} + +// MaxStaleness returns a TimestampBound that will perform reads and queries at +// a time chosen to be at most "d" stale. +func MaxStaleness(d time.Duration) TimestampBound { + return TimestampBound{ + mode: maxStaleness, + d: d, + } +} + +// MinReadTimestamp returns a TimestampBound that bound that will perform reads +// and queries at a time chosen to be at least "t". +func MinReadTimestamp(t time.Time) TimestampBound { + return TimestampBound{ + mode: minReadTimestamp, + t: t, + } +} + +// ReadTimestamp returns a TimestampBound that will peform reads and queries at +// the given time. +func ReadTimestamp(t time.Time) TimestampBound { + return TimestampBound{ + mode: readTimestamp, + t: t, + } +} + +// String implements fmt.Stringer. +func (tb TimestampBound) String() string { + switch tb.mode { + case strong: + return fmt.Sprintf("(strong)") + case exactStaleness: + return fmt.Sprintf("(exactStaleness: %s)", tb.d) + case maxStaleness: + return fmt.Sprintf("(maxStaleness: %s)", tb.d) + case minReadTimestamp: + return fmt.Sprintf("(minReadTimestamp: %s)", tb.t) + case readTimestamp: + return fmt.Sprintf("(readTimestamp: %s)", tb.t) + default: + return fmt.Sprintf("{mode=%v, d=%v, t=%v}", tb.mode, tb.d, tb.t) + } +} + +// durationProto takes a time.Duration and converts it into pdb.Duration for +// calling gRPC APIs. +func durationProto(d time.Duration) *pbd.Duration { + n := d.Nanoseconds() + return &pbd.Duration{ + Seconds: n / int64(time.Second), + Nanos: int32(n % int64(time.Second)), + } +} + +// timestampProto takes a time.Time and converts it into pbt.Timestamp for calling +// gRPC APIs. +func timestampProto(t time.Time) *pbt.Timestamp { + return &pbt.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } +} + +// buildTransactionOptionsReadOnly converts a spanner.TimestampBound into a sppb.TransactionOptions_ReadOnly +// transaction option, which is then used in transactional reads. +func buildTransactionOptionsReadOnly(tb TimestampBound, returnReadTimestamp bool) *sppb.TransactionOptions_ReadOnly { + pb := &sppb.TransactionOptions_ReadOnly{ + ReturnReadTimestamp: returnReadTimestamp, + } + switch tb.mode { + case strong: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true, + } + case exactStaleness: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_ExactStaleness{ + ExactStaleness: durationProto(tb.d), + } + case maxStaleness: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_MaxStaleness{ + MaxStaleness: durationProto(tb.d), + } + case minReadTimestamp: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_MinReadTimestamp{ + MinReadTimestamp: timestampProto(tb.t), + } + case readTimestamp: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_ReadTimestamp{ + ReadTimestamp: timestampProto(tb.t), + } + default: + panic(fmt.Sprintf("buildTransactionOptionsReadOnly(%v,%v)", tb, returnReadTimestamp)) + } + return pb +} diff --git a/vendor/cloud.google.com/go/spanner/timestampbound_test.go b/vendor/cloud.google.com/go/spanner/timestampbound_test.go new file mode 100644 index 0000000..47fb481 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/timestampbound_test.go @@ -0,0 +1,208 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "testing" + "time" + + pbd "github.com/golang/protobuf/ptypes/duration" + pbt "github.com/golang/protobuf/ptypes/timestamp" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test generating TimestampBound for strong reads. +func TestStrong(t *testing.T) { + got := StrongRead() + want := TimestampBound{mode: strong} + if !reflect.DeepEqual(got, want) { + t.Errorf("Strong() = %v; want %v", got, want) + } +} + +// Test generating TimestampBound for reads with exact staleness. +func TestExactStaleness(t *testing.T) { + got := ExactStaleness(10 * time.Second) + want := TimestampBound{mode: exactStaleness, d: 10 * time.Second} + if !reflect.DeepEqual(got, want) { + t.Errorf("ExactStaleness(10*time.Second) = %v; want %v", got, want) + } +} + +// Test generating TimestampBound for reads with max staleness. +func TestMaxStaleness(t *testing.T) { + got := MaxStaleness(10 * time.Second) + want := TimestampBound{mode: maxStaleness, d: 10 * time.Second} + if !reflect.DeepEqual(got, want) { + t.Errorf("MaxStaleness(10*time.Second) = %v; want %v", got, want) + } +} + +// Test generating TimestampBound for reads with minimum freshness requirement. +func TestMinReadTimestamp(t *testing.T) { + ts := time.Now() + got := MinReadTimestamp(ts) + want := TimestampBound{mode: minReadTimestamp, t: ts} + if !reflect.DeepEqual(got, want) { + t.Errorf("MinReadTimestamp(%v) = %v; want %v", ts, got, want) + } +} + +// Test generating TimestampBound for reads requesting data at a exact timestamp. +func TestReadTimestamp(t *testing.T) { + ts := time.Now() + got := ReadTimestamp(ts) + want := TimestampBound{mode: readTimestamp, t: ts} + if !reflect.DeepEqual(got, want) { + t.Errorf("ReadTimestamp(%v) = %v; want %v", ts, got, want) + } +} + +// Test TimestampBound.String. +func TestTimestampBoundString(t *testing.T) { + ts := time.Unix(1136239445, 0).UTC() + var tests = []struct { + tb TimestampBound + want string + }{ + { + tb: TimestampBound{mode: strong}, + want: "(strong)", + }, + { + tb: TimestampBound{mode: exactStaleness, d: 10 * time.Second}, + want: "(exactStaleness: 10s)", + }, + { + tb: TimestampBound{mode: maxStaleness, d: 10 * time.Second}, + want: "(maxStaleness: 10s)", + }, + { + tb: TimestampBound{mode: minReadTimestamp, t: ts}, + want: "(minReadTimestamp: 2006-01-02 22:04:05 +0000 UTC)", + }, + { + tb: TimestampBound{mode: readTimestamp, t: ts}, + want: "(readTimestamp: 2006-01-02 22:04:05 +0000 UTC)", + }, + } + for _, test := range tests { + got := test.tb.String() + if got != test.want { + t.Errorf("%#v.String():\ngot %q\nwant %q", test.tb, got, test.want) + } + } +} + +// Test time.Duration to pdb.Duration conversion. +func TestDurationProto(t *testing.T) { + var tests = []struct { + d time.Duration + want pbd.Duration + }{ + {time.Duration(0), pbd.Duration{Seconds: 0, Nanos: 0}}, + {time.Second, pbd.Duration{Seconds: 1, Nanos: 0}}, + {time.Millisecond, pbd.Duration{Seconds: 0, Nanos: 1e6}}, + {15 * time.Nanosecond, pbd.Duration{Seconds: 0, Nanos: 15}}, + {42 * time.Hour, pbd.Duration{Seconds: 151200}}, + {-(1*time.Hour + 4*time.Millisecond), pbd.Duration{Seconds: -3600, Nanos: -4e6}}, + } + for _, test := range tests { + got := durationProto(test.d) + if !reflect.DeepEqual(got, &test.want) { + t.Errorf("durationProto(%v) = %v; want %v", test.d, got, test.want) + } + } +} + +// Test time.Time to pbt.Timestamp conversion. +func TestTimeProto(t *testing.T) { + var tests = []struct { + t time.Time + want pbt.Timestamp + }{ + {time.Unix(0, 0), pbt.Timestamp{}}, + {time.Unix(1136239445, 12345), pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, + {time.Unix(-1000, 12345), pbt.Timestamp{Seconds: -1000, Nanos: 12345}}, + } + for _, test := range tests { + got := timestampProto(test.t) + if !reflect.DeepEqual(got, &test.want) { + t.Errorf("timestampProto(%v) = %v; want %v", test.t, got, test.want) + } + } +} + +// Test readonly transaction option builder. +func TestBuildTransactionOptionsReadOnly(t *testing.T) { + ts := time.Unix(1136239445, 12345) + var tests = []struct { + tb TimestampBound + ts bool + want sppb.TransactionOptions_ReadOnly + }{ + { + StrongRead(), false, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true}, + ReturnReadTimestamp: false, + }, + }, + { + ExactStaleness(10 * time.Second), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_ExactStaleness{ + ExactStaleness: &pbd.Duration{Seconds: 10}}, + ReturnReadTimestamp: true, + }, + }, + { + MaxStaleness(10 * time.Second), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_MaxStaleness{ + MaxStaleness: &pbd.Duration{Seconds: 10}}, + ReturnReadTimestamp: true, + }, + }, + + { + MinReadTimestamp(ts), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_MinReadTimestamp{ + MinReadTimestamp: &pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, + ReturnReadTimestamp: true, + }, + }, + { + ReadTimestamp(ts), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_ReadTimestamp{ + ReadTimestamp: &pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, + ReturnReadTimestamp: true, + }, + }, + } + for _, test := range tests { + got := buildTransactionOptionsReadOnly(test.tb, test.ts) + if !reflect.DeepEqual(got, &test.want) { + t.Errorf("buildTransactionOptionsReadOnly(%v,%v) = %v; want %v", test.tb, test.ts, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/transaction.go b/vendor/cloud.google.com/go/spanner/transaction.go new file mode 100644 index 0000000..ffdbcb9 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/transaction.go @@ -0,0 +1,821 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "sync" + "time" + + "golang.org/x/net/context" + + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// transactionID stores a transaction ID which uniquely identifies a transaction in Cloud Spanner. +type transactionID []byte + +// txReadEnv manages a read-transaction environment consisting of a session handle and a transaction selector. +type txReadEnv interface { + // acquire returns a read-transaction environment that can be used to perform a transactional read. + acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) + // sets the transaction's read timestamp + setTimestamp(time.Time) + // release should be called at the end of every transactional read to deal with session recycling. + release(error) +} + +// txReadOnly contains methods for doing transactional reads. +type txReadOnly struct { + // read-transaction environment for performing transactional read operations. + txReadEnv +} + +// errSessionClosed returns error for using a recycled/destroyed session +func errSessionClosed(sh *sessionHandle) error { + return spannerErrorf(codes.FailedPrecondition, + "session is already recycled / destroyed: session_id = %q, rpc_client = %v", sh.getID(), sh.getClient()) +} + +// Read returns a RowIterator for reading multiple rows from the database. +func (t *txReadOnly) Read(ctx context.Context, table string, keys KeySet, columns []string) *RowIterator { + // ReadUsingIndex will use primary index if an empty index name is provided. + return t.ReadUsingIndex(ctx, table, "", keys, columns) +} + +// ReadUsingIndex returns a RowIterator for reading multiple rows from the database +// using an index. +// +// Currently, this function can only read columns that are part of the index +// key, part of the primary key, or stored in the index due to a STORING clause +// in the index definition. +func (t *txReadOnly) ReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string) *RowIterator { + var ( + sh *sessionHandle + ts *sppb.TransactionSelector + err error + ) + kset, err := keys.keySetProto() + if err != nil { + return &RowIterator{err: err} + } + if sh, ts, err = t.acquire(ctx); err != nil { + return &RowIterator{err: err} + } + // Cloud Spanner will return "Session not found" on bad sessions. + sid, client := sh.getID(), sh.getClient() + if sid == "" || client == nil { + // Might happen if transaction is closed in the middle of a API call. + return &RowIterator{err: errSessionClosed(sh)} + } + return stream( + contextWithOutgoingMetadata(ctx, sh.getMetadata()), + func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + return client.StreamingRead(ctx, + &sppb.ReadRequest{ + Session: sid, + Transaction: ts, + Table: table, + Index: index, + Columns: columns, + KeySet: kset, + ResumeToken: resumeToken, + }) + }, + t.setTimestamp, + t.release, + ) +} + +// errRowNotFound returns error for not being able to read the row identified by key. +func errRowNotFound(table string, key Key) error { + return spannerErrorf(codes.NotFound, "row not found(Table: %v, PrimaryKey: %v)", table, key) +} + +// ReadRow reads a single row from the database. +// +// If no row is present with the given key, then ReadRow returns an error where +// spanner.ErrCode(err) is codes.NotFound. +func (t *txReadOnly) ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error) { + iter := t.Read(ctx, table, key, columns) + defer iter.Stop() + row, err := iter.Next() + switch err { + case iterator.Done: + return nil, errRowNotFound(table, key) + case nil: + return row, nil + default: + return nil, err + } +} + +// Query executes a query against the database. It returns a RowIterator +// for retrieving the resulting rows. +func (t *txReadOnly) Query(ctx context.Context, statement Statement) *RowIterator { + var ( + sh *sessionHandle + ts *sppb.TransactionSelector + err error + ) + if sh, ts, err = t.acquire(ctx); err != nil { + return &RowIterator{err: err} + } + // Cloud Spanner will return "Session not found" on bad sessions. + sid, client := sh.getID(), sh.getClient() + if sid == "" || client == nil { + // Might happen if transaction is closed in the middle of a API call. + return &RowIterator{err: errSessionClosed(sh)} + } + req := &sppb.ExecuteSqlRequest{ + Session: sid, + Transaction: ts, + Sql: statement.SQL, + } + if err := statement.bindParams(req); err != nil { + return &RowIterator{err: err} + } + return stream( + contextWithOutgoingMetadata(ctx, sh.getMetadata()), + func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + req.ResumeToken = resumeToken + return client.ExecuteStreamingSql(ctx, req) + }, + t.setTimestamp, + t.release) +} + +// txState is the status of a transaction. +type txState int + +const ( + // transaction is new, waiting to be initialized. + txNew txState = iota + // transaction is being initialized. + txInit + // transaction is active and can perform read/write. + txActive + // transaction is closed, cannot be used anymore. + txClosed +) + +// errRtsUnavailable returns error for read transaction's read timestamp being unavailable. +func errRtsUnavailable() error { + return spannerErrorf(codes.Internal, "read timestamp is unavailable") +} + +// errTxNotInitialized returns error for using an uninitialized transaction. +func errTxNotInitialized() error { + return spannerErrorf(codes.InvalidArgument, "cannot use a uninitialized transaction") +} + +// errTxClosed returns error for using a closed transaction. +func errTxClosed() error { + return spannerErrorf(codes.InvalidArgument, "cannot use a closed transaction") +} + +// errUnexpectedTxState returns error for transaction enters an unexpected state. +func errUnexpectedTxState(ts txState) error { + return spannerErrorf(codes.FailedPrecondition, "unexpected transaction state: %v", ts) +} + +// ReadOnlyTransaction provides a snapshot transaction with guaranteed +// consistency across reads, but does not allow writes. Read-only +// transactions can be configured to read at timestamps in the past. +// +// Read-only transactions do not take locks. Instead, they work by choosing a +// Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do +// not acquire locks, they do not block concurrent read-write transactions. +// +// Unlike locking read-write transactions, read-only transactions never +// abort. They can fail if the chosen read timestamp is garbage collected; +// however, the default garbage collection policy is generous enough that most +// applications do not need to worry about this in practice. See the +// documentation of TimestampBound for more details. +// +// A ReadOnlyTransaction consumes resources on the server until Close() is +// called. +type ReadOnlyTransaction struct { + // txReadOnly contains methods for performing transactional reads. + txReadOnly + + // singleUse indicates that the transaction can be used for only one read. + singleUse bool + + // sp is the session pool for allocating a session to execute the read-only transaction. It is set only once during initialization of the ReadOnlyTransaction. + sp *sessionPool + // mu protects concurrent access to the internal states of ReadOnlyTransaction. + mu sync.Mutex + // tx is the transaction ID in Cloud Spanner that uniquely identifies the ReadOnlyTransaction. + tx transactionID + // txReadyOrClosed is for broadcasting that transaction ID has been returned by Cloud Spanner or that transaction is closed. + txReadyOrClosed chan struct{} + // state is the current transaction status of the ReadOnly transaction. + state txState + // sh is the sessionHandle allocated from sp. + sh *sessionHandle + // rts is the read timestamp returned by transactional reads. + rts time.Time + // tb is the read staleness bound specification for transactional reads. + tb TimestampBound +} + +// errTxInitTimeout returns error for timeout in waiting for initialization of the transaction. +func errTxInitTimeout() error { + return spannerErrorf(codes.Canceled, "timeout/context canceled in waiting for transaction's initialization") +} + +// getTimestampBound returns the read staleness bound specified for the ReadOnlyTransaction. +func (t *ReadOnlyTransaction) getTimestampBound() TimestampBound { + t.mu.Lock() + defer t.mu.Unlock() + return t.tb +} + +// begin starts a snapshot read-only Transaction on Cloud Spanner. +func (t *ReadOnlyTransaction) begin(ctx context.Context) error { + var ( + locked bool + tx transactionID + rts time.Time + sh *sessionHandle + err error + ) + defer func() { + if !locked { + t.mu.Lock() + // Not necessary, just to make it clear that t.mu is being held when locked == true. + locked = true + } + if t.state != txClosed { + // Signal other initialization routines. + close(t.txReadyOrClosed) + t.txReadyOrClosed = make(chan struct{}) + } + t.mu.Unlock() + if err != nil && sh != nil { + // Got a valid session handle, but failed to initalize transaction on Cloud Spanner. + if shouldDropSession(err) { + sh.destroy() + } + // If sh.destroy was already executed, this becomes a noop. + sh.recycle() + } + }() + sh, err = t.sp.take(ctx) + if err != nil { + return err + } + err = runRetryable(contextWithOutgoingMetadata(ctx, sh.getMetadata()), func(ctx context.Context) error { + res, e := sh.getClient().BeginTransaction(ctx, &sppb.BeginTransactionRequest{ + Session: sh.getID(), + Options: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: buildTransactionOptionsReadOnly(t.getTimestampBound(), true), + }, + }, + }) + if e != nil { + return e + } + tx = res.Id + if res.ReadTimestamp != nil { + rts = time.Unix(res.ReadTimestamp.Seconds, int64(res.ReadTimestamp.Nanos)) + } + return nil + }) + t.mu.Lock() + locked = true // defer function will be executed with t.mu being held. + if t.state == txClosed { // During the execution of t.begin(), t.Close() was invoked. + return errSessionClosed(sh) + } + // If begin() fails, this allows other queries to take over the initialization. + t.tx = nil + if err == nil { + t.tx = tx + t.rts = rts + t.sh = sh + // State transite to txActive. + t.state = txActive + } + return err +} + +// acquire implements txReadEnv.acquire. +func (t *ReadOnlyTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + if err := checkNestedTxn(ctx); err != nil { + return nil, nil, err + } + if t.singleUse { + return t.acquireSingleUse(ctx) + } + return t.acquireMultiUse(ctx) +} + +func (t *ReadOnlyTransaction) acquireSingleUse(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + t.mu.Lock() + defer t.mu.Unlock() + switch t.state { + case txClosed: + // A closed single-use transaction can never be reused. + return nil, nil, errTxClosed() + case txNew: + t.state = txClosed + ts := &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_SingleUse{ + SingleUse: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: buildTransactionOptionsReadOnly(t.tb, true), + }, + }, + }, + } + sh, err := t.sp.take(ctx) + if err != nil { + return nil, nil, err + } + // Install session handle into t, which can be used for readonly operations later. + t.sh = sh + return sh, ts, nil + } + us := t.state + // SingleUse transaction should only be in either txNew state or txClosed state. + return nil, nil, errUnexpectedTxState(us) +} + +func (t *ReadOnlyTransaction) acquireMultiUse(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + for { + t.mu.Lock() + switch t.state { + case txClosed: + t.mu.Unlock() + return nil, nil, errTxClosed() + case txNew: + // State transit to txInit so that no further TimestampBound change is accepted. + t.state = txInit + t.mu.Unlock() + continue + case txInit: + if t.tx != nil { + // Wait for a transaction ID to become ready. + txReadyOrClosed := t.txReadyOrClosed + t.mu.Unlock() + select { + case <-txReadyOrClosed: + // Need to check transaction state again. + continue + case <-ctx.Done(): + // The waiting for initialization is timeout, return error directly. + return nil, nil, errTxInitTimeout() + } + } + // Take the ownership of initializing the transaction. + t.tx = transactionID{} + t.mu.Unlock() + // Begin a read-only transaction. + // TODO: consider adding a transaction option which allow queries to initiate transactions by themselves. Note that this option might not be + // always good because the ID of the new transaction won't be ready till the query returns some data or completes. + if err := t.begin(ctx); err != nil { + return nil, nil, err + } + // If t.begin() succeeded, t.state should have been changed to txActive, so we can just continue here. + continue + case txActive: + sh := t.sh + ts := &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_Id{ + Id: t.tx, + }, + } + t.mu.Unlock() + return sh, ts, nil + } + state := t.state + t.mu.Unlock() + return nil, nil, errUnexpectedTxState(state) + } +} + +func (t *ReadOnlyTransaction) setTimestamp(ts time.Time) { + t.mu.Lock() + defer t.mu.Unlock() + if t.rts.IsZero() { + t.rts = ts + } +} + +// release implements txReadEnv.release. +func (t *ReadOnlyTransaction) release(err error) { + t.mu.Lock() + sh := t.sh + t.mu.Unlock() + if sh != nil { // sh could be nil if t.acquire() fails. + if shouldDropSession(err) { + sh.destroy() + } + if t.singleUse { + // If session handle is already destroyed, this becomes a noop. + sh.recycle() + } + } +} + +// Close closes a ReadOnlyTransaction, the transaction cannot perform any reads after being closed. +func (t *ReadOnlyTransaction) Close() { + if t.singleUse { + return + } + t.mu.Lock() + if t.state != txClosed { + t.state = txClosed + close(t.txReadyOrClosed) + } + sh := t.sh + t.mu.Unlock() + if sh == nil { + return + } + // If session handle is already destroyed, this becomes a noop. + // If there are still active queries and if the recycled session is reused before they complete, Cloud Spanner will cancel them + // on behalf of the new transaction on the session. + if sh != nil { + sh.recycle() + } +} + +// Timestamp returns the timestamp chosen to perform reads and +// queries in this transaction. The value can only be read after some +// read or query has either returned some data or completed without +// returning any data. +func (t *ReadOnlyTransaction) Timestamp() (time.Time, error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.rts.IsZero() { + return t.rts, errRtsUnavailable() + } + return t.rts, nil +} + +// WithTimestampBound specifies the TimestampBound to use for read or query. +// This can only be used before the first read or query is invoked. Note: +// bounded staleness is not available with general ReadOnlyTransactions; use a +// single-use ReadOnlyTransaction instead. +// +// The returned value is the ReadOnlyTransaction so calls can be chained. +func (t *ReadOnlyTransaction) WithTimestampBound(tb TimestampBound) *ReadOnlyTransaction { + t.mu.Lock() + defer t.mu.Unlock() + if t.state == txNew { + // Only allow to set TimestampBound before the first query. + t.tb = tb + } + return t +} + +// ReadWriteTransaction provides a locking read-write transaction. +// +// This type of transaction is the only way to write data into Cloud Spanner; +// (*Client).Apply and (*Client).ApplyAtLeastOnce use transactions +// internally. These transactions rely on pessimistic locking and, if +// necessary, two-phase commit. Locking read-write transactions may abort, +// requiring the application to retry. However, the interface exposed by +// (*Client).ReadWriteTransaction eliminates the need for applications to write +// retry loops explicitly. +// +// Locking transactions may be used to atomically read-modify-write data +// anywhere in a database. This type of transaction is externally consistent. +// +// Clients should attempt to minimize the amount of time a transaction is +// active. Faster transactions commit with higher probability and cause less +// contention. Cloud Spanner attempts to keep read locks active as long as the +// transaction continues to do reads. Long periods of inactivity at the client +// may cause Cloud Spanner to release a transaction's locks and abort it. +// +// Reads performed within a transaction acquire locks on the data being +// read. Writes can only be done at commit time, after all reads have been +// completed. Conceptually, a read-write transaction consists of zero or more +// reads or SQL queries followed by a commit. +// +// See (*Client).ReadWriteTransaction for an example. +// +// Semantics +// +// Cloud Spanner can commit the transaction if all read locks it acquired are still +// valid at commit time, and it is able to acquire write locks for all +// writes. Cloud Spanner can abort the transaction for any reason. If a commit +// attempt returns ABORTED, Cloud Spanner guarantees that the transaction has not +// modified any user data in Cloud Spanner. +// +// Unless the transaction commits, Cloud Spanner makes no guarantees about how long +// the transaction's locks were held for. It is an error to use Cloud Spanner locks +// for any sort of mutual exclusion other than between Cloud Spanner transactions +// themselves. +// +// Aborted transactions +// +// Application code does not need to retry explicitly; RunInTransaction will +// automatically retry a transaction if an attempt results in an abort. The +// lock priority of a transaction increases after each prior aborted +// transaction, meaning that the next attempt has a slightly better chance of +// success than before. +// +// Under some circumstances (e.g., many transactions attempting to modify the +// same row(s)), a transaction can abort many times in a short period before +// successfully committing. Thus, it is not a good idea to cap the number of +// retries a transaction can attempt; instead, it is better to limit the total +// amount of wall time spent retrying. +// +// Idle transactions +// +// A transaction is considered idle if it has no outstanding reads or SQL +// queries and has not started a read or SQL query within the last 10 +// seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold +// on to locks indefinitely. In that case, the commit will fail with error +// ABORTED. +// +// If this behavior is undesirable, periodically executing a simple SQL query +// in the transaction (e.g., SELECT 1) prevents the transaction from becoming +// idle. +type ReadWriteTransaction struct { + // txReadOnly contains methods for performing transactional reads. + txReadOnly + // sh is the sessionHandle allocated from sp. It is set only once during the initialization of ReadWriteTransaction. + sh *sessionHandle + // tx is the transaction ID in Cloud Spanner that uniquely identifies the ReadWriteTransaction. + // It is set only once in ReadWriteTransaction.begin() during the initialization of ReadWriteTransaction. + tx transactionID + // mu protects concurrent access to the internal states of ReadWriteTransaction. + mu sync.Mutex + // state is the current transaction status of the read-write transaction. + state txState + // wb is the set of buffered mutations waiting to be commited. + wb []*Mutation +} + +// BufferWrite adds a list of mutations to the set of updates that will be +// applied when the transaction is committed. It does not actually apply the +// write until the transaction is committed, so the operation does not +// block. The effects of the write won't be visible to any reads (including +// reads done in the same transaction) until the transaction commits. +// +// See the example for Client.ReadWriteTransaction. +func (t *ReadWriteTransaction) BufferWrite(ms []*Mutation) error { + t.mu.Lock() + defer t.mu.Unlock() + if t.state == txClosed { + return errTxClosed() + } + if t.state != txActive { + return errUnexpectedTxState(t.state) + } + t.wb = append(t.wb, ms...) + return nil +} + +// acquire implements txReadEnv.acquire. +func (t *ReadWriteTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + ts := &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_Id{ + Id: t.tx, + }, + } + t.mu.Lock() + defer t.mu.Unlock() + switch t.state { + case txClosed: + return nil, nil, errTxClosed() + case txActive: + return t.sh, ts, nil + } + return nil, nil, errUnexpectedTxState(t.state) +} + +// release implements txReadEnv.release. +func (t *ReadWriteTransaction) release(err error) { + t.mu.Lock() + sh := t.sh + t.mu.Unlock() + if sh != nil && shouldDropSession(err) { + sh.destroy() + } +} + +func beginTransaction(ctx context.Context, sid string, client sppb.SpannerClient) (transactionID, error) { + var tx transactionID + err := runRetryable(ctx, func(ctx context.Context) error { + res, e := client.BeginTransaction(ctx, &sppb.BeginTransactionRequest{ + Session: sid, + Options: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadWrite_{ + ReadWrite: &sppb.TransactionOptions_ReadWrite{}, + }, + }, + }) + if e != nil { + return e + } + tx = res.Id + return nil + }) + if err != nil { + return nil, err + } + return tx, nil +} + +// begin starts a read-write transacton on Cloud Spanner, it is always called before any of the public APIs. +func (t *ReadWriteTransaction) begin(ctx context.Context) error { + if t.tx != nil { + t.state = txActive + return nil + } + tx, err := beginTransaction(contextWithOutgoingMetadata(ctx, t.sh.getMetadata()), t.sh.getID(), t.sh.getClient()) + if err == nil { + t.tx = tx + t.state = txActive + return nil + } + if shouldDropSession(err) { + t.sh.destroy() + } + return err +} + +// commit tries to commit a readwrite transaction to Cloud Spanner. It also returns the commit timestamp for the transactions. +func (t *ReadWriteTransaction) commit(ctx context.Context) (time.Time, error) { + var ts time.Time + t.mu.Lock() + t.state = txClosed // No futher operations after commit. + mPb, err := mutationsProto(t.wb) + t.mu.Unlock() + if err != nil { + return ts, err + } + // In case that sessionHandle was destroyed but transaction body fails to report it. + sid, client := t.sh.getID(), t.sh.getClient() + if sid == "" || client == nil { + return ts, errSessionClosed(t.sh) + } + err = runRetryable(contextWithOutgoingMetadata(ctx, t.sh.getMetadata()), func(ctx context.Context) error { + var trailer metadata.MD + res, e := client.Commit(ctx, &sppb.CommitRequest{ + Session: sid, + Transaction: &sppb.CommitRequest_TransactionId{ + TransactionId: t.tx, + }, + Mutations: mPb, + }, grpc.Trailer(&trailer)) + if e != nil { + return toSpannerErrorWithMetadata(e, trailer) + } + if tstamp := res.GetCommitTimestamp(); tstamp != nil { + ts = time.Unix(tstamp.Seconds, int64(tstamp.Nanos)) + } + return nil + }) + if shouldDropSession(err) { + t.sh.destroy() + } + return ts, err +} + +// rollback is called when a commit is aborted or the transaction body runs into error. +func (t *ReadWriteTransaction) rollback(ctx context.Context) { + t.mu.Lock() + // Forbid further operations on rollbacked transaction. + t.state = txClosed + t.mu.Unlock() + // In case that sessionHandle was destroyed but transaction body fails to report it. + sid, client := t.sh.getID(), t.sh.getClient() + if sid == "" || client == nil { + return + } + err := runRetryable(contextWithOutgoingMetadata(ctx, t.sh.getMetadata()), func(ctx context.Context) error { + _, e := client.Rollback(ctx, &sppb.RollbackRequest{ + Session: sid, + TransactionId: t.tx, + }) + return e + }) + if shouldDropSession(err) { + t.sh.destroy() + } + return +} + +// runInTransaction executes f under a read-write transaction context. +func (t *ReadWriteTransaction) runInTransaction(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error) (time.Time, error) { + var ( + ts time.Time + err error + ) + if err = f(context.WithValue(ctx, transactionInProgressKey{}, 1), t); err == nil { + // Try to commit if transaction body returns no error. + ts, err = t.commit(ctx) + } + if err != nil { + if isAbortErr(err) { + // Retry the transaction using the same session on ABORT error. + // Cloud Spanner will create the new transaction with the previous one's wound-wait priority. + err = errRetry(err) + return ts, err + } + // Not going to commit, according to API spec, should rollback the transaction. + t.rollback(ctx) + return ts, err + } + // err == nil, return commit timestamp. + return ts, nil +} + +// writeOnlyTransaction provides the most efficient way of doing write-only transactions. It essentially does blind writes to Cloud Spanner. +type writeOnlyTransaction struct { + // sp is the session pool which writeOnlyTransaction uses to get Cloud Spanner sessions for blind writes. + sp *sessionPool +} + +// applyAtLeastOnce commits a list of mutations to Cloud Spanner for at least once, unless one of the following happends: +// 1) Context is timeout. +// 2) An unretryable error(e.g. database not found) occurs. +// 3) There is a malformed Mutation object. +func (t *writeOnlyTransaction) applyAtLeastOnce(ctx context.Context, ms ...*Mutation) (time.Time, error) { + var ( + ts time.Time + sh *sessionHandle + ) + mPb, err := mutationsProto(ms) + if err != nil { + // Malformed mutation found, just return the error. + return ts, err + } + err = runRetryable(ctx, func(ct context.Context) error { + var e error + var trailers metadata.MD + if sh == nil || sh.getID() == "" || sh.getClient() == nil { + // No usable session for doing the commit, take one from pool. + sh, e = t.sp.take(ctx) + if e != nil { + // sessionPool.Take already retries for session creations/retrivals. + return e + } + } + res, e := sh.getClient().Commit(contextWithOutgoingMetadata(ctx, sh.getMetadata()), &sppb.CommitRequest{ + Session: sh.getID(), + Transaction: &sppb.CommitRequest_SingleUseTransaction{ + SingleUseTransaction: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadWrite_{ + ReadWrite: &sppb.TransactionOptions_ReadWrite{}, + }, + }, + }, + Mutations: mPb, + }, grpc.Trailer(&trailers)) + if e != nil { + if isAbortErr(e) { + // Mask ABORT error as retryable, because aborted transactions are allowed to be retried. + return errRetry(toSpannerErrorWithMetadata(e, trailers)) + } + if shouldDropSession(e) { + // Discard the bad session. + sh.destroy() + } + return e + } + if tstamp := res.GetCommitTimestamp(); tstamp != nil { + ts = time.Unix(tstamp.Seconds, int64(tstamp.Nanos)) + } + return nil + }) + if sh != nil { + sh.recycle() + } + return ts, err +} + +// isAbortedErr returns true if the error indicates that an gRPC call is aborted on the server side. +func isAbortErr(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Aborted { + return true + } + return false +} diff --git a/vendor/cloud.google.com/go/spanner/transaction_test.go b/vendor/cloud.google.com/go/spanner/transaction_test.go new file mode 100644 index 0000000..f2172ed --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/transaction_test.go @@ -0,0 +1,223 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "reflect" + "sync" + "testing" + "time" + + "cloud.google.com/go/spanner/internal/testutil" + + "golang.org/x/net/context" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +var ( + errAbrt = spannerErrorf(codes.Aborted, "") + errUsr = errors.New("error") +) + +// setup sets up a Client using mockclient +func mockClient(t *testing.T) (*sessionPool, *testutil.MockCloudSpannerClient, *Client) { + var ( + mc = testutil.NewMockCloudSpannerClient(t) + spc = SessionPoolConfig{} + database = "mockdb" + ) + spc.getRPCClient = func() (sppb.SpannerClient, error) { + return mc, nil + } + sp, err := newSessionPool(database, spc, nil) + if err != nil { + t.Fatalf("cannot create session pool: %v", err) + } + return sp, mc, &Client{ + database: database, + idleSessions: sp, + } +} + +// TestReadOnlyAcquire tests acquire for ReadOnlyTransaction. +func TestReadOnlyAcquire(t *testing.T) { + t.Parallel() + _, mc, client := mockClient(t) + defer client.Close() + mc.SetActions( + testutil.Action{"BeginTransaction", errUsr}, + testutil.Action{"BeginTransaction", nil}, + testutil.Action{"BeginTransaction", nil}, + ) + + // Singleuse should only be used once. + txn := client.Single() + defer txn.Close() + _, _, e := txn.acquire(context.Background()) + if e != nil { + t.Errorf("Acquire for single use, got %v, want nil.", e) + } + _, _, e = txn.acquire(context.Background()) + if wantErr := errTxClosed(); !reflect.DeepEqual(e, wantErr) { + t.Errorf("Second acquire for single use, got %v, want %v.", e, wantErr) + } + // Multiuse can recover from acquire failure. + txn = client.ReadOnlyTransaction() + _, _, e = txn.acquire(context.Background()) + if wantErr := toSpannerError(errUsr); !reflect.DeepEqual(e, wantErr) { + t.Errorf("Acquire for multi use, got %v, want %v.", e, wantErr) + } + _, _, e = txn.acquire(context.Background()) + if e != nil { + t.Errorf("Acquire for multi use, got %v, want nil.", e) + } + txn.Close() + // Multiuse can not be used after close. + _, _, e = txn.acquire(context.Background()) + if wantErr := errTxClosed(); !reflect.DeepEqual(e, wantErr) { + t.Errorf("Second acquire for multi use, got %v, want %v.", e, wantErr) + } + // Multiuse can be acquired concurrently. + txn = client.ReadOnlyTransaction() + defer txn.Close() + mc.Freeze() + var ( + sh1 *sessionHandle + sh2 *sessionHandle + ts1 *sppb.TransactionSelector + ts2 *sppb.TransactionSelector + wg = sync.WaitGroup{} + ) + acquire := func(sh **sessionHandle, ts **sppb.TransactionSelector) { + defer wg.Done() + var e error + *sh, *ts, e = txn.acquire(context.Background()) + if e != nil { + t.Errorf("Concurrent acquire for multiuse, got %v, expect nil.", e) + } + } + wg.Add(2) + go acquire(&sh1, &ts1) + go acquire(&sh2, &ts2) + <-time.After(100 * time.Millisecond) + mc.Unfreeze() + wg.Wait() + if !reflect.DeepEqual(sh1, sh2) { + t.Errorf("Expect acquire to get same session handle, got %v and %v.", sh1, sh2) + } + if !reflect.DeepEqual(ts1, ts2) { + t.Errorf("Expect acquire to get same transaction selector, got %v and %v.", ts1, ts2) + } +} + +// TestRetryOnAbort tests transaction retries on abort. +func TestRetryOnAbort(t *testing.T) { + t.Parallel() + _, mc, client := mockClient(t) + defer client.Close() + // commit in writeOnlyTransaction + mc.SetActions( + testutil.Action{"Commit", errAbrt}, // abort on first commit + testutil.Action{"Commit", nil}, + ) + + ms := []*Mutation{ + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}), + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}), + } + if _, e := client.Apply(context.Background(), ms, ApplyAtLeastOnce()); e != nil { + t.Errorf("applyAtLeastOnce retry on abort, got %v, want nil.", e) + } + // begin and commit in ReadWriteTransaction + mc.SetActions( + testutil.Action{"BeginTransaction", nil}, // let takeWriteSession succeed and get a session handle + testutil.Action{"Commit", errAbrt}, // let first commit fail and retry will begin new transaction + testutil.Action{"BeginTransaction", errAbrt}, // this time we can fail the begin attempt + testutil.Action{"BeginTransaction", nil}, + testutil.Action{"Commit", nil}, + ) + + if _, e := client.Apply(context.Background(), ms); e != nil { + t.Errorf("ReadWriteTransaction retry on abort, got %v, want nil.", e) + } +} + +// TestBadSession tests bad session (session not found error). +// TODO: session closed from transaction close +func TestBadSession(t *testing.T) { + t.Parallel() + ctx := context.Background() + sp, mc, client := mockClient(t) + defer client.Close() + var sid string + // Prepare a session, get the session id for use in testing. + if s, e := sp.take(ctx); e != nil { + t.Fatal("Prepare session failed.") + } else { + sid = s.getID() + s.recycle() + } + + wantErr := spannerErrorf(codes.NotFound, "Session not found: %v", sid) + // ReadOnlyTransaction + mc.SetActions( + testutil.Action{"BeginTransaction", wantErr}, + testutil.Action{"BeginTransaction", wantErr}, + testutil.Action{"BeginTransaction", wantErr}, + ) + txn := client.ReadOnlyTransaction() + defer txn.Close() + if _, _, got := txn.acquire(ctx); !reflect.DeepEqual(wantErr, got) { + t.Errorf("Expect acquire to fail, got %v, want %v.", got, wantErr) + } + // The failure should recycle the session, we expect it to be used in following requests. + if got := txn.Query(ctx, NewStatement("SELECT 1")); !reflect.DeepEqual(wantErr, got.err) { + t.Errorf("Expect Query to fail, got %v, want %v.", got.err, wantErr) + } + if got := txn.Read(ctx, "Users", KeySets(Key{"alice"}, Key{"bob"}), []string{"name", "email"}); !reflect.DeepEqual(wantErr, got.err) { + t.Errorf("Expect Read to fail, got %v, want %v.", got.err, wantErr) + } + // writeOnlyTransaction + ms := []*Mutation{ + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}), + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}), + } + mc.SetActions(testutil.Action{"Commit", wantErr}) + if _, got := client.Apply(context.Background(), ms, ApplyAtLeastOnce()); !reflect.DeepEqual(wantErr, got) { + t.Errorf("Expect applyAtLeastOnce to fail, got %v, want %v.", got, wantErr) + } +} + +func TestFunctionErrorReturned(t *testing.T) { + t.Parallel() + _, mc, client := mockClient(t) + defer client.Close() + mc.SetActions( + testutil.Action{"BeginTransaction", nil}, + testutil.Action{"Rollback", nil}, + ) + + want := errors.New("an error") + _, got := client.ReadWriteTransaction(context.Background(), + func(context.Context, *ReadWriteTransaction) error { return want }) + if got != want { + t.Errorf("got <%v>, want <%v>", got, want) + } + mc.CheckActionsConsumed() +} diff --git a/vendor/cloud.google.com/go/spanner/util.go b/vendor/cloud.google.com/go/spanner/util.go new file mode 100644 index 0000000..d35fec2 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/util.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +// maxUint64 returns the maximum of two uint64 +func maxUint64(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +// minUint64 returns the minimum of two uint64 +func minUint64(a, b uint64) uint64 { + if a > b { + return b + } + return a +} diff --git a/vendor/cloud.google.com/go/spanner/value.go b/vendor/cloud.google.com/go/spanner/value.go new file mode 100644 index 0000000..86d9f71 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/value.go @@ -0,0 +1,1425 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/fields" + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// NullInt64 represents a Cloud Spanner INT64 that may be NULL. +type NullInt64 struct { + Int64 int64 + Valid bool // Valid is true if Int64 is not NULL. +} + +// String implements Stringer.String for NullInt64 +func (n NullInt64) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%v", n.Int64) +} + +// NullString represents a Cloud Spanner STRING that may be NULL. +type NullString struct { + StringVal string + Valid bool // Valid is true if StringVal is not NULL. +} + +// String implements Stringer.String for NullString +func (n NullString) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%q", n.StringVal) +} + +// NullFloat64 represents a Cloud Spanner FLOAT64 that may be NULL. +type NullFloat64 struct { + Float64 float64 + Valid bool // Valid is true if Float64 is not NULL. +} + +// String implements Stringer.String for NullFloat64 +func (n NullFloat64) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%v", n.Float64) +} + +// NullBool represents a Cloud Spanner BOOL that may be NULL. +type NullBool struct { + Bool bool + Valid bool // Valid is true if Bool is not NULL. +} + +// String implements Stringer.String for NullBool +func (n NullBool) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%v", n.Bool) +} + +// NullTime represents a Cloud Spanner TIMESTAMP that may be null. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL. +} + +// String implements Stringer.String for NullTime +func (n NullTime) String() string { + if !n.Valid { + return fmt.Sprintf("%s", "") + } + return fmt.Sprintf("%q", n.Time.Format(time.RFC3339Nano)) +} + +// NullDate represents a Cloud Spanner DATE that may be null. +type NullDate struct { + Date civil.Date + Valid bool // Valid is true if Date is not NULL. +} + +// String implements Stringer.String for NullDate +func (n NullDate) String() string { + if !n.Valid { + return fmt.Sprintf("%s", "") + } + return fmt.Sprintf("%q", n.Date) +} + +// NullRow represents a Cloud Spanner STRUCT that may be NULL. +// See also the document for Row. +// Note that NullRow is not a valid Cloud Spanner column Type. +type NullRow struct { + Row Row + Valid bool // Valid is true if Row is not NULL. +} + +// GenericColumnValue represents the generic encoded value and type of the +// column. See google.spanner.v1.ResultSet proto for details. This can be +// useful for proxying query results when the result types are not known in +// advance. +// +// If you populate a GenericColumnValue from a row using Row.Column or related +// methods, do not modify the contents of Type and Value. +type GenericColumnValue struct { + Type *sppb.Type + Value *proto3.Value +} + +// Decode decodes a GenericColumnValue. The ptr argument should be a pointer +// to a Go value that can accept v. +func (v GenericColumnValue) Decode(ptr interface{}) error { + return decodeValue(v.Value, v.Type, ptr) +} + +// NewGenericColumnValue creates a GenericColumnValue from Go value that is +// valid for Cloud Spanner. +func newGenericColumnValue(v interface{}) (*GenericColumnValue, error) { + value, typ, err := encodeValue(v) + if err != nil { + return nil, err + } + return &GenericColumnValue{Value: value, Type: typ}, nil +} + +// errTypeMismatch returns error for destination not having a compatible type +// with source Cloud Spanner type. +func errTypeMismatch(srcCode, elCode sppb.TypeCode, dst interface{}) error { + s := srcCode.String() + if srcCode == sppb.TypeCode_ARRAY { + s = fmt.Sprintf("%v[%v]", srcCode, elCode) + } + return spannerErrorf(codes.InvalidArgument, "type %T cannot be used for decoding %s", dst, s) +} + +// errNilSpannerType returns error for nil Cloud Spanner type in decoding. +func errNilSpannerType() error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil Cloud Spanner data type in decoding") +} + +// errNilSrc returns error for decoding from nil proto value. +func errNilSrc() error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil Cloud Spanner value in decoding") +} + +// errNilDst returns error for decoding into nil interface{}. +func errNilDst(dst interface{}) error { + return spannerErrorf(codes.InvalidArgument, "cannot decode into nil type %T", dst) +} + +// errNilArrElemType returns error for input Cloud Spanner data type being a array but without a +// non-nil array element type. +func errNilArrElemType(t *sppb.Type) error { + return spannerErrorf(codes.FailedPrecondition, "array type %v is with nil array element type", t) +} + +// errDstNotForNull returns error for decoding a SQL NULL value into a destination which doesn't +// support NULL values. +func errDstNotForNull(dst interface{}) error { + return spannerErrorf(codes.InvalidArgument, "destination %T cannot support NULL SQL values", dst) +} + +// errBadEncoding returns error for decoding wrongly encoded types. +func errBadEncoding(v *proto3.Value, err error) error { + return spannerErrorf(codes.FailedPrecondition, "%v wasn't correctly encoded: <%v>", v, err) +} + +func parseNullTime(v *proto3.Value, p *NullTime, code sppb.TypeCode, isNull bool) error { + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_TIMESTAMP { + return errTypeMismatch(code, sppb.TypeCode_TYPE_CODE_UNSPECIFIED, p) + } + if isNull { + *p = NullTime{} + return nil + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := time.Parse(time.RFC3339Nano, x) + if err != nil { + return errBadEncoding(v, err) + } + p.Valid = true + p.Time = y + return nil +} + +// decodeValue decodes a protobuf Value into a pointer to a Go value, as +// specified by sppb.Type. +func decodeValue(v *proto3.Value, t *sppb.Type, ptr interface{}) error { + if v == nil { + return errNilSrc() + } + if t == nil { + return errNilSpannerType() + } + code := t.Code + acode := sppb.TypeCode_TYPE_CODE_UNSPECIFIED + if code == sppb.TypeCode_ARRAY { + if t.ArrayElementType == nil { + return errNilArrElemType(t) + } + acode = t.ArrayElementType.Code + } + _, isNull := v.Kind.(*proto3.Value_NullValue) + + // Do the decoding based on the type of ptr. + switch p := ptr.(type) { + case nil: + return errNilDst(nil) + case *string: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_STRING { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + return errDstNotForNull(ptr) + } + x, err := getStringValue(v) + if err != nil { + return err + } + *p = x + case *NullString: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_STRING { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = NullString{} + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + p.Valid = true + p.StringVal = x + case *[]NullString: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_STRING { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeNullStringArray(x) + if err != nil { + return err + } + *p = y + case *[]string: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_STRING { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeStringArray(x) + if err != nil { + return err + } + *p = y + case *[]byte: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_BYTES { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := base64.StdEncoding.DecodeString(x) + if err != nil { + return errBadEncoding(v, err) + } + *p = y + case *[][]byte: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_BYTES { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeByteArray(x) + if err != nil { + return err + } + *p = y + case *int64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_INT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + return errDstNotForNull(ptr) + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := strconv.ParseInt(x, 10, 64) + if err != nil { + return errBadEncoding(v, err) + } + *p = y + case *NullInt64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_INT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = NullInt64{} + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := strconv.ParseInt(x, 10, 64) + if err != nil { + return errBadEncoding(v, err) + } + p.Valid = true + p.Int64 = y + case *[]NullInt64: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_INT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeNullInt64Array(x) + if err != nil { + return err + } + *p = y + case *[]int64: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_INT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeInt64Array(x) + if err != nil { + return err + } + *p = y + case *bool: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_BOOL { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + return errDstNotForNull(ptr) + } + x, err := getBoolValue(v) + if err != nil { + return err + } + *p = x + case *NullBool: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_BOOL { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = NullBool{} + break + } + x, err := getBoolValue(v) + if err != nil { + return err + } + p.Valid = true + p.Bool = x + case *[]NullBool: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_BOOL { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeNullBoolArray(x) + if err != nil { + return err + } + *p = y + case *[]bool: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_BOOL { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeBoolArray(x) + if err != nil { + return err + } + *p = y + case *float64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_FLOAT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + return errDstNotForNull(ptr) + } + x, err := getFloat64Value(v) + if err != nil { + return err + } + *p = x + case *NullFloat64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_FLOAT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = NullFloat64{} + break + } + x, err := getFloat64Value(v) + if err != nil { + return err + } + p.Valid = true + p.Float64 = x + case *[]NullFloat64: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_FLOAT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeNullFloat64Array(x) + if err != nil { + return err + } + *p = y + case *[]float64: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_FLOAT64 { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeFloat64Array(x) + if err != nil { + return err + } + *p = y + case *time.Time: + var nt NullTime + if isNull { + return errDstNotForNull(ptr) + } + err := parseNullTime(v, &nt, code, isNull) + if err != nil { + return nil + } + *p = nt.Time + case *NullTime: + err := parseNullTime(v, p, code, isNull) + if err != nil { + return err + } + case *[]NullTime: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_TIMESTAMP { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeNullTimeArray(x) + if err != nil { + return err + } + *p = y + case *[]time.Time: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_TIMESTAMP { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeTimeArray(x) + if err != nil { + return err + } + *p = y + case *civil.Date: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_DATE { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + return errDstNotForNull(ptr) + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := civil.ParseDate(x) + if err != nil { + return errBadEncoding(v, err) + } + *p = y + case *NullDate: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_DATE { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = NullDate{} + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := civil.ParseDate(x) + if err != nil { + return errBadEncoding(v, err) + } + p.Valid = true + p.Date = y + case *[]NullDate: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_DATE { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeNullDateArray(x) + if err != nil { + return err + } + *p = y + case *[]civil.Date: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_DATE { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeDateArray(x) + if err != nil { + return err + } + *p = y + case *[]NullRow: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_STRUCT { + return errTypeMismatch(code, acode, ptr) + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeRowArray(t.ArrayElementType.StructType, x) + if err != nil { + return err + } + *p = y + case *GenericColumnValue: + *p = GenericColumnValue{Type: t, Value: v} + default: + // Check if the proto encoding is for an array of structs. + if !(code == sppb.TypeCode_ARRAY && acode == sppb.TypeCode_STRUCT) { + return errTypeMismatch(code, acode, ptr) + } + vp := reflect.ValueOf(p) + if !vp.IsValid() { + return errNilDst(p) + } + if !isPtrStructPtrSlice(vp.Type()) { + // The container is not a pointer to a struct pointer slice. + return errTypeMismatch(code, acode, ptr) + } + // Only use reflection for nil detection on slow path. + // Also, IsNil panics on many types, so check it after the type check. + if vp.IsNil() { + return errNilDst(p) + } + if isNull { + // The proto Value is encoding NULL, set the pointer to struct + // slice to nil as well. + vp.Elem().Set(reflect.Zero(vp.Elem().Type())) + break + } + x, err := getListValue(v) + if err != nil { + return err + } + if err = decodeStructArray(t.ArrayElementType.StructType, x, p); err != nil { + return err + } + } + return nil +} + +// errSrvVal returns an error for getting a wrong source protobuf value in decoding. +func errSrcVal(v *proto3.Value, want string) error { + return spannerErrorf(codes.FailedPrecondition, "cannot use %v(Kind: %T) as %s Value", + v, v.GetKind(), want) +} + +// getStringValue returns the string value encoded in proto3.Value v whose +// kind is proto3.Value_StringValue. +func getStringValue(v *proto3.Value) (string, error) { + if x, ok := v.GetKind().(*proto3.Value_StringValue); ok && x != nil { + return x.StringValue, nil + } + return "", errSrcVal(v, "String") +} + +// getBoolValue returns the bool value encoded in proto3.Value v whose +// kind is proto3.Value_BoolValue. +func getBoolValue(v *proto3.Value) (bool, error) { + if x, ok := v.GetKind().(*proto3.Value_BoolValue); ok && x != nil { + return x.BoolValue, nil + } + return false, errSrcVal(v, "Bool") +} + +// getListValue returns the proto3.ListValue contained in proto3.Value v whose +// kind is proto3.Value_ListValue. +func getListValue(v *proto3.Value) (*proto3.ListValue, error) { + if x, ok := v.GetKind().(*proto3.Value_ListValue); ok && x != nil { + return x.ListValue, nil + } + return nil, errSrcVal(v, "List") +} + +// errUnexpectedNumStr returns error for decoder getting a unexpected string for +// representing special float values. +func errUnexpectedNumStr(s string) error { + return spannerErrorf(codes.FailedPrecondition, "unexpected string value %q for number", s) +} + +// getFloat64Value returns the float64 value encoded in proto3.Value v whose +// kind is proto3.Value_NumberValue / proto3.Value_StringValue. +// Cloud Spanner uses string to encode NaN, Infinity and -Infinity. +func getFloat64Value(v *proto3.Value) (float64, error) { + switch x := v.GetKind().(type) { + case *proto3.Value_NumberValue: + if x == nil { + break + } + return x.NumberValue, nil + case *proto3.Value_StringValue: + if x == nil { + break + } + switch x.StringValue { + case "NaN": + return math.NaN(), nil + case "Infinity": + return math.Inf(1), nil + case "-Infinity": + return math.Inf(-1), nil + default: + return 0, errUnexpectedNumStr(x.StringValue) + } + } + return 0, errSrcVal(v, "Number") +} + +// errNilListValue returns error for unexpected nil ListValue in decoding Cloud Spanner ARRAYs. +func errNilListValue(sqlType string) error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil ListValue in decoding %v array", sqlType) +} + +// errDecodeArrayElement returns error for failure in decoding single array element. +func errDecodeArrayElement(i int, v proto.Message, sqlType string, err error) error { + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.Unknown, + "cannot decode %v(array element %v) as %v, error = <%v>", v, i, sqlType, err) + } + se.decorate(fmt.Sprintf("cannot decode %v(array element %v) as %v", v, i, sqlType)) + return se +} + +// decodeNullStringArray decodes proto3.ListValue pb into a NullString slice. +func decodeNullStringArray(pb *proto3.ListValue) ([]NullString, error) { + if pb == nil { + return nil, errNilListValue("STRING") + } + a := make([]NullString, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, stringType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "STRING", err) + } + } + return a, nil +} + +// decodeStringArray decodes proto3.ListValue pb into a string slice. +func decodeStringArray(pb *proto3.ListValue) ([]string, error) { + if pb == nil { + return nil, errNilListValue("STRING") + } + a := make([]string, len(pb.Values)) + st := stringType() + for i, v := range pb.Values { + if err := decodeValue(v, st, &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "STRING", err) + } + } + return a, nil +} + +// decodeNullInt64Array decodes proto3.ListValue pb into a NullInt64 slice. +func decodeNullInt64Array(pb *proto3.ListValue) ([]NullInt64, error) { + if pb == nil { + return nil, errNilListValue("INT64") + } + a := make([]NullInt64, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, intType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "INT64", err) + } + } + return a, nil +} + +// decodeInt64Array decodes proto3.ListValue pb into a int64 slice. +func decodeInt64Array(pb *proto3.ListValue) ([]int64, error) { + if pb == nil { + return nil, errNilListValue("INT64") + } + a := make([]int64, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, intType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "INT64", err) + } + } + return a, nil +} + +// decodeNullBoolArray decodes proto3.ListValue pb into a NullBool slice. +func decodeNullBoolArray(pb *proto3.ListValue) ([]NullBool, error) { + if pb == nil { + return nil, errNilListValue("BOOL") + } + a := make([]NullBool, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, boolType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "BOOL", err) + } + } + return a, nil +} + +// decodeBoolArray decodes proto3.ListValue pb into a bool slice. +func decodeBoolArray(pb *proto3.ListValue) ([]bool, error) { + if pb == nil { + return nil, errNilListValue("BOOL") + } + a := make([]bool, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, boolType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "BOOL", err) + } + } + return a, nil +} + +// decodeNullFloat64Array decodes proto3.ListValue pb into a NullFloat64 slice. +func decodeNullFloat64Array(pb *proto3.ListValue) ([]NullFloat64, error) { + if pb == nil { + return nil, errNilListValue("FLOAT64") + } + a := make([]NullFloat64, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, floatType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "FLOAT64", err) + } + } + return a, nil +} + +// decodeFloat64Array decodes proto3.ListValue pb into a float64 slice. +func decodeFloat64Array(pb *proto3.ListValue) ([]float64, error) { + if pb == nil { + return nil, errNilListValue("FLOAT64") + } + a := make([]float64, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, floatType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "FLOAT64", err) + } + } + return a, nil +} + +// decodeByteArray decodes proto3.ListValue pb into a slice of byte slice. +func decodeByteArray(pb *proto3.ListValue) ([][]byte, error) { + if pb == nil { + return nil, errNilListValue("BYTES") + } + a := make([][]byte, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, bytesType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "BYTES", err) + } + } + return a, nil +} + +// decodeNullTimeArray decodes proto3.ListValue pb into a NullTime slice. +func decodeNullTimeArray(pb *proto3.ListValue) ([]NullTime, error) { + if pb == nil { + return nil, errNilListValue("TIMESTAMP") + } + a := make([]NullTime, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, timeType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "TIMESTAMP", err) + } + } + return a, nil +} + +// decodeTimeArray decodes proto3.ListValue pb into a time.Time slice. +func decodeTimeArray(pb *proto3.ListValue) ([]time.Time, error) { + if pb == nil { + return nil, errNilListValue("TIMESTAMP") + } + a := make([]time.Time, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, timeType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "TIMESTAMP", err) + } + } + return a, nil +} + +// decodeNullDateArray decodes proto3.ListValue pb into a NullDate slice. +func decodeNullDateArray(pb *proto3.ListValue) ([]NullDate, error) { + if pb == nil { + return nil, errNilListValue("DATE") + } + a := make([]NullDate, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, dateType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "DATE", err) + } + } + return a, nil +} + +// decodeDateArray decodes proto3.ListValue pb into a civil.Date slice. +func decodeDateArray(pb *proto3.ListValue) ([]civil.Date, error) { + if pb == nil { + return nil, errNilListValue("DATE") + } + a := make([]civil.Date, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, dateType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "DATE", err) + } + } + return a, nil +} + +func errNotStructElement(i int, v *proto3.Value) error { + return errDecodeArrayElement(i, v, "STRUCT", + spannerErrorf(codes.FailedPrecondition, "%v(type: %T) doesn't encode Cloud Spanner STRUCT", v, v)) +} + +// decodeRowArray decodes proto3.ListValue pb into a NullRow slice according to +// the structual information given in sppb.StructType ty. +func decodeRowArray(ty *sppb.StructType, pb *proto3.ListValue) ([]NullRow, error) { + if pb == nil { + return nil, errNilListValue("STRUCT") + } + a := make([]NullRow, len(pb.Values)) + for i := range pb.Values { + switch v := pb.Values[i].GetKind().(type) { + case *proto3.Value_ListValue: + a[i] = NullRow{ + Row: Row{ + fields: ty.Fields, + vals: v.ListValue.Values, + }, + Valid: true, + } + // Null elements not currently supported by the server, see + // https://cloud.google.com/spanner/docs/query-syntax#using-structs-with-select + case *proto3.Value_NullValue: + // no-op, a[i] is NullRow{} already + default: + return nil, errNotStructElement(i, pb.Values[i]) + } + } + return a, nil +} + +// errNilSpannerStructType returns error for unexpected nil Cloud Spanner STRUCT schema type in decoding. +func errNilSpannerStructType() error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil StructType in decoding Cloud Spanner STRUCT") +} + +// errUnnamedField returns error for decoding a Cloud Spanner STRUCT with unnamed field into a Go struct. +func errUnnamedField(ty *sppb.StructType, i int) error { + return spannerErrorf(codes.InvalidArgument, "unnamed field %v in Cloud Spanner STRUCT %+v", i, ty) +} + +// errNoOrDupGoField returns error for decoding a Cloud Spanner +// STRUCT into a Go struct which is either missing a field, or has duplicate fields. +func errNoOrDupGoField(s interface{}, f string) error { + return spannerErrorf(codes.InvalidArgument, "Go struct %+v(type %T) has no or duplicate fields for Cloud Spanner STRUCT field %v", s, s, f) +} + +// errDupColNames returns error for duplicated Cloud Spanner STRUCT field names found in decoding a Cloud Spanner STRUCT into a Go struct. +func errDupSpannerField(f string, ty *sppb.StructType) error { + return spannerErrorf(codes.InvalidArgument, "duplicated field name %q in Cloud Spanner STRUCT %+v", f, ty) +} + +// errDecodeStructField returns error for failure in decoding a single field of a Cloud Spanner STRUCT. +func errDecodeStructField(ty *sppb.StructType, f string, err error) error { + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.Unknown, + "cannot decode field %v of Cloud Spanner STRUCT %+v, error = <%v>", f, ty, err) + } + se.decorate(fmt.Sprintf("cannot decode field %v of Cloud Spanner STRUCT %+v", f, ty)) + return se +} + +// decodeStruct decodes proto3.ListValue pb into struct referenced by pointer ptr, according to +// the structual information given in sppb.StructType ty. +func decodeStruct(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error { + if reflect.ValueOf(ptr).IsNil() { + return errNilDst(ptr) + } + if ty == nil { + return errNilSpannerStructType() + } + // t holds the structual information of ptr. + t := reflect.TypeOf(ptr).Elem() + // v is the actual value that ptr points to. + v := reflect.ValueOf(ptr).Elem() + + fields, err := fieldCache.Fields(t) + if err != nil { + return toSpannerError(err) + } + seen := map[string]bool{} + for i, f := range ty.Fields { + if f.Name == "" { + return errUnnamedField(ty, i) + } + sf := fields.Match(f.Name) + if sf == nil { + return errNoOrDupGoField(ptr, f.Name) + } + if seen[f.Name] { + // We don't allow duplicated field name. + return errDupSpannerField(f.Name, ty) + } + // Try to decode a single field. + if err := decodeValue(pb.Values[i], f.Type, v.FieldByIndex(sf.Index).Addr().Interface()); err != nil { + return errDecodeStructField(ty, f.Name, err) + } + // Mark field f.Name as processed. + seen[f.Name] = true + } + return nil +} + +// isPtrStructPtrSlice returns true if ptr is a pointer to a slice of struct pointers. +func isPtrStructPtrSlice(t reflect.Type) bool { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Slice { + // t is not a pointer to a slice. + return false + } + if t = t.Elem(); t.Elem().Kind() != reflect.Ptr || t.Elem().Elem().Kind() != reflect.Struct { + // the slice that t points to is not a slice of struct pointers. + return false + } + return true +} + +// decodeStructArray decodes proto3.ListValue pb into struct slice referenced by pointer ptr, according to the +// structual information given in a sppb.StructType. +func decodeStructArray(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error { + if pb == nil { + return errNilListValue("STRUCT") + } + // Type of the struct pointers stored in the slice that ptr points to. + ts := reflect.TypeOf(ptr).Elem().Elem() + // The slice that ptr points to, might be nil at this point. + v := reflect.ValueOf(ptr).Elem() + // Allocate empty slice. + v.Set(reflect.MakeSlice(v.Type(), 0, len(pb.Values))) + // Decode every struct in pb.Values. + for i, pv := range pb.Values { + // Check if pv is a NULL value. + if _, isNull := pv.Kind.(*proto3.Value_NullValue); isNull { + // Append a nil pointer to the slice. + v.Set(reflect.Append(v, reflect.New(ts).Elem())) + continue + } + // Allocate empty struct. + s := reflect.New(ts.Elem()) + // Get proto3.ListValue l from proto3.Value pv. + l, err := getListValue(pv) + if err != nil { + return errDecodeArrayElement(i, pv, "STRUCT", err) + } + // Decode proto3.ListValue l into struct referenced by s.Interface(). + if err = decodeStruct(ty, l, s.Interface()); err != nil { + return errDecodeArrayElement(i, pv, "STRUCT", err) + } + // Append the decoded struct back into the slice. + v.Set(reflect.Append(v, s)) + } + return nil +} + +// errEncoderUnsupportedType returns error for not being able to encode a value of +// certain type. +func errEncoderUnsupportedType(v interface{}) error { + return spannerErrorf(codes.InvalidArgument, "client doesn't support type %T", v) +} + +// encodeValue encodes a Go native type into a proto3.Value. +func encodeValue(v interface{}) (*proto3.Value, *sppb.Type, error) { + pb := &proto3.Value{ + Kind: &proto3.Value_NullValue{NullValue: proto3.NullValue_NULL_VALUE}, + } + var pt *sppb.Type + var err error + switch v := v.(type) { + case nil: + case string: + pb.Kind = stringKind(v) + pt = stringType() + case NullString: + if v.Valid { + return encodeValue(v.StringVal) + } + pt = stringType() + case []string: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(stringType()) + case []NullString: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(stringType()) + case []byte: + if v != nil { + pb.Kind = stringKind(base64.StdEncoding.EncodeToString(v)) + } + pt = bytesType() + case [][]byte: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(bytesType()) + case int: + pb.Kind = stringKind(strconv.FormatInt(int64(v), 10)) + pt = intType() + case []int: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(intType()) + case int64: + pb.Kind = stringKind(strconv.FormatInt(v, 10)) + pt = intType() + case []int64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(intType()) + case NullInt64: + if v.Valid { + return encodeValue(v.Int64) + } + pt = intType() + case []NullInt64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(intType()) + case bool: + pb.Kind = &proto3.Value_BoolValue{BoolValue: v} + pt = boolType() + case []bool: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(boolType()) + case NullBool: + if v.Valid { + return encodeValue(v.Bool) + } + pt = boolType() + case []NullBool: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(boolType()) + case float64: + pb.Kind = &proto3.Value_NumberValue{NumberValue: v} + pt = floatType() + case []float64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(floatType()) + case NullFloat64: + if v.Valid { + return encodeValue(v.Float64) + } + pt = floatType() + case []NullFloat64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(floatType()) + case time.Time: + pb.Kind = stringKind(v.UTC().Format(time.RFC3339Nano)) + pt = timeType() + case []time.Time: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(timeType()) + case NullTime: + if v.Valid { + return encodeValue(v.Time) + } + pt = timeType() + case []NullTime: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(timeType()) + case civil.Date: + pb.Kind = stringKind(v.String()) + pt = dateType() + case []civil.Date: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(dateType()) + case NullDate: + if v.Valid { + return encodeValue(v.Date) + } + pt = dateType() + case []NullDate: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + } + pt = listType(dateType()) + case GenericColumnValue: + // Deep clone to ensure subsequent changes to v before + // transmission don't affect our encoded value. + pb = proto.Clone(v.Value).(*proto3.Value) + pt = proto.Clone(v.Type).(*sppb.Type) + default: + return nil, nil, errEncoderUnsupportedType(v) + } + return pb, pt, nil +} + +// encodeValueArray encodes a Value array into a proto3.ListValue. +func encodeValueArray(vs []interface{}) (*proto3.ListValue, error) { + lv := &proto3.ListValue{} + lv.Values = make([]*proto3.Value, 0, len(vs)) + for _, v := range vs { + pb, _, err := encodeValue(v) + if err != nil { + return nil, err + } + lv.Values = append(lv.Values, pb) + } + return lv, nil +} + +// encodeArray assumes that all values of the array element type encode without error. +func encodeArray(len int, at func(int) interface{}) (*proto3.Value, error) { + vs := make([]*proto3.Value, len) + var err error + for i := 0; i < len; i++ { + vs[i], _, err = encodeValue(at(i)) + if err != nil { + return nil, err + } + } + return listProto(vs...), nil +} + +func spannerTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + if s := t.Get("spanner"); s != "" { + if s == "-" { + return "", false, nil, nil + } + return s, true, nil, nil + } + return "", true, nil, nil +} + +var fieldCache = fields.NewCache(spannerTagParser, nil, nil) diff --git a/vendor/cloud.google.com/go/spanner/value_benchmarks_test.go b/vendor/cloud.google.com/go/spanner/value_benchmarks_test.go new file mode 100644 index 0000000..0d95ab9 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/value_benchmarks_test.go @@ -0,0 +1,214 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package spanner + +import ( + "reflect" + "strconv" + "testing" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +func BenchmarkEncodeIntArray(b *testing.B) { + for _, s := range []struct { + name string + f func(a []int) (*proto3.Value, *sppb.Type, error) + }{ + {"Orig", encodeIntArrayOrig}, + {"Func", encodeIntArrayFunc}, + {"Reflect", encodeIntArrayReflect}, + } { + b.Run(s.name, func(b *testing.B) { + for _, size := range []int{1, 10, 100, 1000} { + a := make([]int, size) + b.Run(strconv.Itoa(size), func(b *testing.B) { + for i := 0; i < b.N; i++ { + s.f(a) + } + }) + } + }) + } +} + +func encodeIntArrayOrig(a []int) (*proto3.Value, *sppb.Type, error) { + vs := make([]*proto3.Value, len(a)) + var err error + for i := range a { + vs[i], _, err = encodeValue(a[i]) + if err != nil { + return nil, nil, err + } + } + return listProto(vs...), listType(intType()), nil +} + +func encodeIntArrayFunc(a []int) (*proto3.Value, *sppb.Type, error) { + v, err := encodeArray(len(a), func(i int) interface{} { return a[i] }) + if err != nil { + return nil, nil, err + } + return v, listType(intType()), nil +} + +func encodeIntArrayReflect(a []int) (*proto3.Value, *sppb.Type, error) { + v, err := encodeArrayReflect(a) + if err != nil { + return nil, nil, err + } + return v, listType(intType()), nil +} + +func encodeArrayReflect(a interface{}) (*proto3.Value, error) { + va := reflect.ValueOf(a) + len := va.Len() + vs := make([]*proto3.Value, len) + var err error + for i := 0; i < len; i++ { + vs[i], _, err = encodeValue(va.Index(i).Interface()) + if err != nil { + return nil, err + } + } + return listProto(vs...), nil +} + +func BenchmarkDecodeGeneric(b *testing.B) { + v := stringProto("test") + t := stringType() + var g GenericColumnValue + b.ResetTimer() + for i := 0; i < b.N; i++ { + decodeValue(v, t, &g) + } +} + +func BenchmarkDecodeArray(b *testing.B) { + for _, size := range []int{1, 10, 100, 1000} { + vals := make([]*proto3.Value, size) + for i := 0; i < size; i++ { + vals[i] = dateProto(d1) + } + lv := &proto3.ListValue{Values: vals} + b.Run(strconv.Itoa(size), func(b *testing.B) { + for _, s := range []struct { + name string + decode func(*proto3.ListValue) + }{ + {"DateDirect", decodeArray_Date_direct}, + {"DateFunc", decodeArray_Date_func}, + {"DateReflect", decodeArray_Date_reflect}, + {"StringDecodeStringArray", decodeStringArrayWrap}, + {"StringDirect", decodeArray_String_direct}, + {"StringFunc", decodeArray_String_func}, + {"StringReflect", decodeArray_String_reflect}, + } { + b.Run(s.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + s.decode(lv) + } + }) + } + }) + + } +} + +func decodeArray_Date_direct(pb *proto3.ListValue) { + a := make([]civil.Date, len(pb.Values)) + t := dateType() + for i, v := range pb.Values { + if err := decodeValue(v, t, &a[i]); err != nil { + panic(err) + } + } +} + +func decodeArray_Date_func(pb *proto3.ListValue) { + a := make([]civil.Date, len(pb.Values)) + if err := decodeArray_func(pb, "DATE", dateType(), func(i int) interface{} { return &a[i] }); err != nil { + panic(err) + } +} + +func decodeArray_Date_reflect(pb *proto3.ListValue) { + var a []civil.Date + if err := decodeArray_reflect(pb, "DATE", dateType(), &a); err != nil { + panic(err) + } +} + +func decodeStringArrayWrap(pb *proto3.ListValue) { + if _, err := decodeStringArray(pb); err != nil { + panic(err) + } +} + +func decodeArray_String_direct(pb *proto3.ListValue) { + a := make([]string, len(pb.Values)) + t := stringType() + for i, v := range pb.Values { + if err := decodeValue(v, t, &a[i]); err != nil { + panic(err) + } + } +} + +func decodeArray_String_func(pb *proto3.ListValue) { + + a := make([]string, len(pb.Values)) + if err := decodeArray_func(pb, "STRING", stringType(), func(i int) interface{} { return &a[i] }); err != nil { + panic(err) + } +} + +func decodeArray_String_reflect(pb *proto3.ListValue) { + var a []string + if err := decodeArray_reflect(pb, "STRING", stringType(), &a); err != nil { + panic(err) + } +} + +func decodeArray_func(pb *proto3.ListValue, name string, typ *sppb.Type, elptr func(int) interface{}) error { + if pb == nil { + return errNilListValue(name) + } + for i, v := range pb.Values { + if err := decodeValue(v, typ, elptr(i)); err != nil { + return errDecodeArrayElement(i, v, name, err) + } + } + return nil +} + +func decodeArray_reflect(pb *proto3.ListValue, name string, typ *sppb.Type, aptr interface{}) error { + if pb == nil { + return errNilListValue(name) + } + av := reflect.ValueOf(aptr).Elem() + av.Set(reflect.MakeSlice(av.Type(), len(pb.Values), len(pb.Values))) + for i, v := range pb.Values { + if err := decodeValue(v, typ, av.Index(i).Addr().Interface()); err != nil { + av.Set(reflect.Zero(av.Type())) // reset slice to nil + return errDecodeArrayElement(i, v, name, err) + } + } + return nil +} diff --git a/vendor/cloud.google.com/go/spanner/value_test.go b/vendor/cloud.google.com/go/spanner/value_test.go new file mode 100644 index 0000000..7fca3eb --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/value_test.go @@ -0,0 +1,520 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math" + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +var ( + t1 = mustParseTime("2016-11-15T15:04:05.999999999Z") + // Boundaries + t2 = mustParseTime("0000-01-01T00:00:00.000000000Z") + t3 = mustParseTime("9999-12-31T23:59:59.999999999Z") + // Local timezone + t4 = time.Now() + d1 = mustParseDate("2016-11-15") + d2 = mustParseDate("1678-01-01") +) + +func mustParseTime(s string) time.Time { + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + panic(err) + } + return t +} + +func mustParseDate(s string) civil.Date { + d, err := civil.ParseDate(s) + if err != nil { + panic(err) + } + return d +} + +// Test encoding Values. +func TestEncodeValue(t *testing.T) { + var ( + tString = stringType() + tInt = intType() + tBool = boolType() + tFloat = floatType() + tBytes = bytesType() + tTime = timeType() + tDate = dateType() + ) + for i, test := range []struct { + in interface{} + want *proto3.Value + wantType *sppb.Type + }{ + // STRING / STRING ARRAY + {"abc", stringProto("abc"), tString}, + {NullString{"abc", true}, stringProto("abc"), tString}, + {NullString{"abc", false}, nullProto(), tString}, + {[]string(nil), nullProto(), listType(tString)}, + {[]string{"abc", "bcd"}, listProto(stringProto("abc"), stringProto("bcd")), listType(tString)}, + {[]NullString{{"abcd", true}, {"xyz", false}}, listProto(stringProto("abcd"), nullProto()), listType(tString)}, + // BYTES / BYTES ARRAY + {[]byte("foo"), bytesProto([]byte("foo")), tBytes}, + {[]byte(nil), nullProto(), tBytes}, + {[][]byte{nil, []byte("ab")}, listProto(nullProto(), bytesProto([]byte("ab"))), listType(tBytes)}, + {[][]byte(nil), nullProto(), listType(tBytes)}, + // INT64 / INT64 ARRAY + {7, intProto(7), tInt}, + {[]int(nil), nullProto(), listType(tInt)}, + {[]int{31, 127}, listProto(intProto(31), intProto(127)), listType(tInt)}, + {int64(81), intProto(81), tInt}, + {[]int64(nil), nullProto(), listType(tInt)}, + {[]int64{33, 129}, listProto(intProto(33), intProto(129)), listType(tInt)}, + {NullInt64{11, true}, intProto(11), tInt}, + {NullInt64{11, false}, nullProto(), tInt}, + {[]NullInt64{{35, true}, {131, false}}, listProto(intProto(35), nullProto()), listType(tInt)}, + // BOOL / BOOL ARRAY + {true, boolProto(true), tBool}, + {NullBool{true, true}, boolProto(true), tBool}, + {NullBool{true, false}, nullProto(), tBool}, + {[]bool{true, false}, listProto(boolProto(true), boolProto(false)), listType(tBool)}, + {[]NullBool{{true, true}, {true, false}}, listProto(boolProto(true), nullProto()), listType(tBool)}, + // FLOAT64 / FLOAT64 ARRAY + {3.14, floatProto(3.14), tFloat}, + {NullFloat64{3.1415, true}, floatProto(3.1415), tFloat}, + {NullFloat64{math.Inf(1), true}, floatProto(math.Inf(1)), tFloat}, + {NullFloat64{3.14159, false}, nullProto(), tFloat}, + {[]float64(nil), nullProto(), listType(tFloat)}, + {[]float64{3.141, 0.618, math.Inf(-1)}, listProto(floatProto(3.141), floatProto(0.618), floatProto(math.Inf(-1))), listType(tFloat)}, + {[]NullFloat64{{3.141, true}, {0.618, false}}, listProto(floatProto(3.141), nullProto()), listType(tFloat)}, + // TIMESTAMP / TIMESTAMP ARRAY + {t1, timeProto(t1), tTime}, + {NullTime{t1, true}, timeProto(t1), tTime}, + {NullTime{t1, false}, nullProto(), tTime}, + {[]time.Time(nil), nullProto(), listType(tTime)}, + {[]time.Time{t1, t2, t3, t4}, listProto(timeProto(t1), timeProto(t2), timeProto(t3), timeProto(t4)), listType(tTime)}, + {[]NullTime{{t1, true}, {t1, false}}, listProto(timeProto(t1), nullProto()), listType(tTime)}, + // DATE / DATE ARRAY + {d1, dateProto(d1), tDate}, + {NullDate{d1, true}, dateProto(d1), tDate}, + {NullDate{civil.Date{}, false}, nullProto(), tDate}, + {[]civil.Date(nil), nullProto(), listType(tDate)}, + {[]civil.Date{d1, d2}, listProto(dateProto(d1), dateProto(d2)), listType(tDate)}, + {[]NullDate{{d1, true}, {civil.Date{}, false}}, listProto(dateProto(d1), nullProto()), listType(tDate)}, + // GenericColumnValue + {GenericColumnValue{tString, stringProto("abc")}, stringProto("abc"), tString}, + {GenericColumnValue{tString, nullProto()}, nullProto(), tString}, + // not actually valid (stringProto inside int list), but demonstrates pass-through. + { + GenericColumnValue{ + Type: listType(tInt), + Value: listProto(intProto(5), nullProto(), stringProto("bcd")), + }, + listProto(intProto(5), nullProto(), stringProto("bcd")), + listType(tInt), + }, + } { + got, gotType, err := encodeValue(test.in) + if err != nil { + t.Fatalf("#%d: got error during encoding: %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("#%d: got encode result: %v, want %v", i, got, test.want) + } + if !reflect.DeepEqual(gotType, test.wantType) { + t.Errorf("#%d: got encode type: %v, want %v", i, gotType, test.wantType) + } + } +} + +// Test decoding Values. +func TestDecodeValue(t *testing.T) { + for i, test := range []struct { + in *proto3.Value + t *sppb.Type + want interface{} + fail bool + }{ + // STRING + {stringProto("abc"), stringType(), "abc", false}, + {nullProto(), stringType(), "abc", true}, + {stringProto("abc"), stringType(), NullString{"abc", true}, false}, + {nullProto(), stringType(), NullString{}, false}, + // STRING ARRAY with []NullString + { + listProto(stringProto("abc"), nullProto(), stringProto("bcd")), + listType(stringType()), + []NullString{{"abc", true}, {}, {"bcd", true}}, + false, + }, + {nullProto(), listType(stringType()), []NullString(nil), false}, + // STRING ARRAY with []string + { + listProto(stringProto("abc"), stringProto("bcd")), + listType(stringType()), + []string{"abc", "bcd"}, + false, + }, + // BYTES + {bytesProto([]byte("ab")), bytesType(), []byte("ab"), false}, + {nullProto(), bytesType(), []byte(nil), false}, + // BYTES ARRAY + {listProto(bytesProto([]byte("ab")), nullProto()), listType(bytesType()), [][]byte{[]byte("ab"), nil}, false}, + {nullProto(), listType(bytesType()), [][]byte(nil), false}, + //INT64 + {intProto(15), intType(), int64(15), false}, + {nullProto(), intType(), int64(0), true}, + {intProto(15), intType(), NullInt64{15, true}, false}, + {nullProto(), intType(), NullInt64{}, false}, + // INT64 ARRAY with []NullInt64 + {listProto(intProto(91), nullProto(), intProto(87)), listType(intType()), []NullInt64{{91, true}, {}, {87, true}}, false}, + {nullProto(), listType(intType()), []NullInt64(nil), false}, + // INT64 ARRAY with []int64 + {listProto(intProto(91), intProto(87)), listType(intType()), []int64{91, 87}, false}, + // BOOL + {boolProto(true), boolType(), true, false}, + {nullProto(), boolType(), true, true}, + {boolProto(true), boolType(), NullBool{true, true}, false}, + {nullProto(), boolType(), NullBool{}, false}, + // BOOL ARRAY with []NullBool + {listProto(boolProto(true), boolProto(false), nullProto()), listType(boolType()), []NullBool{{true, true}, {false, true}, {}}, false}, + {nullProto(), listType(boolType()), []NullBool(nil), false}, + // BOOL ARRAY with []bool + {listProto(boolProto(true), boolProto(false)), listType(boolType()), []bool{true, false}, false}, + // FLOAT64 + {floatProto(3.14), floatType(), 3.14, false}, + {nullProto(), floatType(), 0.00, true}, + {floatProto(3.14), floatType(), NullFloat64{3.14, true}, false}, + {nullProto(), floatType(), NullFloat64{}, false}, + // FLOAT64 ARRAY with []NullFloat64 + { + listProto(floatProto(math.Inf(1)), floatProto(math.Inf(-1)), nullProto(), floatProto(3.1)), + listType(floatType()), + []NullFloat64{{math.Inf(1), true}, {math.Inf(-1), true}, {}, {3.1, true}}, + false, + }, + {nullProto(), listType(floatType()), []NullFloat64(nil), false}, + // FLOAT64 ARRAY with []float64 + { + listProto(floatProto(math.Inf(1)), floatProto(math.Inf(-1)), floatProto(3.1)), + listType(floatType()), + []float64{math.Inf(1), math.Inf(-1), 3.1}, + false, + }, + // TIMESTAMP + {timeProto(t1), timeType(), t1, false}, + {timeProto(t1), timeType(), NullTime{t1, true}, false}, + {nullProto(), timeType(), NullTime{}, false}, + // TIMESTAMP ARRAY with []NullTime + {listProto(timeProto(t1), timeProto(t2), timeProto(t3), nullProto()), listType(timeType()), []NullTime{{t1, true}, {t2, true}, {t3, true}, {}}, false}, + {nullProto(), listType(timeType()), []NullTime(nil), false}, + // TIMESTAMP ARRAY with []time.Time + {listProto(timeProto(t1), timeProto(t2), timeProto(t3)), listType(timeType()), []time.Time{t1, t2, t3}, false}, + // DATE + {dateProto(d1), dateType(), d1, false}, + {dateProto(d1), dateType(), NullDate{d1, true}, false}, + {nullProto(), dateType(), NullDate{}, false}, + // DATE ARRAY with []NullDate + {listProto(dateProto(d1), dateProto(d2), nullProto()), listType(dateType()), []NullDate{{d1, true}, {d2, true}, {}}, false}, + {nullProto(), listType(dateType()), []NullDate(nil), false}, + // DATE ARRAY with []civil.Date + {listProto(dateProto(d1), dateProto(d2)), listType(dateType()), []civil.Date{d1, d2}, false}, + // STRUCT ARRAY + // STRUCT schema is equal to the following Go struct: + // type s struct { + // Col1 NullInt64 + // Col2 []struct { + // SubCol1 float64 + // SubCol2 string + // } + // } + { + in: listProto( + listProto( + intProto(3), + listProto( + listProto(floatProto(3.14), stringProto("this")), + listProto(floatProto(0.57), stringProto("siht")), + ), + ), + listProto( + nullProto(), + nullProto(), + ), + nullProto(), + ), + t: listType( + structType( + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + ), + ), + want: []NullRow{ + { + Row: Row{ + fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + }, + vals: []*proto3.Value{ + intProto(3), + listProto( + listProto(floatProto(3.14), stringProto("this")), + listProto(floatProto(0.57), stringProto("siht")), + ), + }, + }, + Valid: true, + }, + { + Row: Row{ + fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + }, + vals: []*proto3.Value{ + nullProto(), + nullProto(), + }, + }, + Valid: true, + }, + {}, + }, + fail: false, + }, + { + in: listProto( + listProto( + intProto(3), + listProto( + listProto(floatProto(3.14), stringProto("this")), + listProto(floatProto(0.57), stringProto("siht")), + ), + ), + listProto( + nullProto(), + nullProto(), + ), + nullProto(), + ), + t: listType( + structType( + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + ), + ), + want: []*struct { + Col1 NullInt64 + StructCol []*struct { + SubCol1 NullFloat64 + SubCol2 string + } `spanner:"Col2"` + }{ + { + Col1: NullInt64{3, true}, + StructCol: []*struct { + SubCol1 NullFloat64 + SubCol2 string + }{ + { + SubCol1: NullFloat64{3.14, true}, + SubCol2: "this", + }, + { + SubCol1: NullFloat64{0.57, true}, + SubCol2: "siht", + }, + }, + }, + { + Col1: NullInt64{}, + StructCol: []*struct { + SubCol1 NullFloat64 + SubCol2 string + }(nil), + }, + nil, + }, + fail: false, + }, + // GenericColumnValue + {stringProto("abc"), stringType(), GenericColumnValue{stringType(), stringProto("abc")}, false}, + {nullProto(), stringType(), GenericColumnValue{stringType(), nullProto()}, false}, + // not actually valid (stringProto inside int list), but demonstrates pass-through. + { + in: listProto(intProto(5), nullProto(), stringProto("bcd")), + t: listType(intType()), + want: GenericColumnValue{ + Type: listType(intType()), + Value: listProto(intProto(5), nullProto(), stringProto("bcd")), + }, + fail: false, + }, + } { + gotp := reflect.New(reflect.TypeOf(test.want)) + if err := decodeValue(test.in, test.t, gotp.Interface()); err != nil { + if !test.fail { + t.Errorf("%d: cannot decode %v(%v): %v", i, test.in, test.t, err) + } + continue + } + if test.fail { + t.Errorf("%d: decoding %v(%v) succeeds unexpectedly, want error", i, test.in, test.t) + continue + } + got := reflect.Indirect(gotp).Interface() + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: unexpected decoding result - got %v, want %v", i, got, test.want) + continue + } + } +} + +// Test error cases for decodeValue. +func TestDecodeValueErrors(t *testing.T) { + for i, test := range []struct { + in *proto3.Value + t *sppb.Type + v interface{} + }{ + {nullProto(), stringType(), nil}, + {nullProto(), stringType(), 1}, + } { + err := decodeValue(test.in, test.t, test.v) + if err == nil { + t.Errorf("#%d: want error, got nil", i) + } + } +} + +// Test NaN encoding/decoding. +func TestNaN(t *testing.T) { + // Decode NaN value. + f := 0.0 + nf := NullFloat64{} + // To float64 + if err := decodeValue(floatProto(math.NaN()), floatType(), &f); err != nil { + t.Errorf("decodeValue returns %q for %v, want nil", err, floatProto(math.NaN())) + } + if !math.IsNaN(f) { + t.Errorf("f = %v, want %v", f, math.NaN()) + } + // To NullFloat64 + if err := decodeValue(floatProto(math.NaN()), floatType(), &nf); err != nil { + t.Errorf("decodeValue returns %q for %v, want nil", err, floatProto(math.NaN())) + } + if !math.IsNaN(nf.Float64) || !nf.Valid { + t.Errorf("f = %v, want %v", f, NullFloat64{math.NaN(), true}) + } + // Encode NaN value + // From float64 + v, _, err := encodeValue(math.NaN()) + if err != nil { + t.Errorf("encodeValue returns %q for NaN, want nil", err) + } + x, ok := v.GetKind().(*proto3.Value_NumberValue) + if !ok { + t.Errorf("incorrect type for v.GetKind(): %T, want *proto3.Value_NumberValue", v.GetKind()) + } + if !math.IsNaN(x.NumberValue) { + t.Errorf("x.NumberValue = %v, want %v", x.NumberValue, math.NaN()) + } + // From NullFloat64 + v, _, err = encodeValue(NullFloat64{math.NaN(), true}) + if err != nil { + t.Errorf("encodeValue returns %q for NaN, want nil", err) + } + x, ok = v.GetKind().(*proto3.Value_NumberValue) + if !ok { + t.Errorf("incorrect type for v.GetKind(): %T, want *proto3.Value_NumberValue", v.GetKind()) + } + if !math.IsNaN(x.NumberValue) { + t.Errorf("x.NumberValue = %v, want %v", x.NumberValue, math.NaN()) + } +} + +func TestGenericColumnValue(t *testing.T) { + for _, test := range []struct { + in GenericColumnValue + want interface{} + fail bool + }{ + {GenericColumnValue{stringType(), stringProto("abc")}, "abc", false}, + {GenericColumnValue{stringType(), stringProto("abc")}, 5, true}, + {GenericColumnValue{listType(intType()), listProto(intProto(91), nullProto(), intProto(87))}, []NullInt64{{91, true}, {}, {87, true}}, false}, + {GenericColumnValue{intType(), intProto(42)}, GenericColumnValue{intType(), intProto(42)}, false}, // trippy! :-) + } { + gotp := reflect.New(reflect.TypeOf(test.want)) + if err := test.in.Decode(gotp.Interface()); err != nil { + if !test.fail { + t.Errorf("cannot decode %v to %v: %v", test.in, test.want, err) + } + continue + } + if test.fail { + t.Errorf("decoding %v to %v succeeds unexpectedly", test.in, test.want) + } + + // Test we can go backwards as well. + v, err := newGenericColumnValue(test.want) + if err != nil { + t.Errorf("NewGenericColumnValue failed: %v", err) + continue + } + if !reflect.DeepEqual(*v, test.in) { + t.Errorf("unexpected encode result - got %v, want %v", v, test.in) + } + } +} diff --git a/vendor/cloud.google.com/go/speech/apiv1/Recognize_smoke_test.go b/vendor/cloud.google.com/go/speech/apiv1/Recognize_smoke_test.go new file mode 100644 index 0000000..def2eb5 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1/Recognize_smoke_test.go @@ -0,0 +1,78 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1" +) + +import ( + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestSpeechSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var languageCode string = "en-US" + var sampleRateHertz int32 = 44100 + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var config = &speechpb.RecognitionConfig{ + LanguageCode: languageCode, + SampleRateHertz: sampleRateHertz, + Encoding: encoding, + } + var uri string = "gs://gapic-toolkit/hello.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.RecognizeRequest{ + Config: config, + Audio: audio, + } + + if _, err := c.Recognize(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/speech/apiv1/doc.go b/vendor/cloud.google.com/go/speech/apiv1/doc.go new file mode 100644 index 0000000..4209d3c --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1/doc.go @@ -0,0 +1,45 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package speech is an auto-generated package for the +// Google Cloud Speech API. + +// +// Google Cloud Speech API. +package speech // import "cloud.google.com/go/speech/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/speech/apiv1/mock_test.go b/vendor/cloud.google.com/go/speech/apiv1/mock_test.go new file mode 100644 index 0000000..1c949b2 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1/mock_test.go @@ -0,0 +1,405 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockSpeechServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + speechpb.SpeechServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockSpeechServer) Recognize(ctx context.Context, req *speechpb.RecognizeRequest) (*speechpb.RecognizeResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*speechpb.RecognizeResponse), nil +} + +func (s *mockSpeechServer) LongRunningRecognize(ctx context.Context, req *speechpb.LongRunningRecognizeRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockSpeechServer) StreamingRecognize(stream speechpb.Speech_StreamingRecognizeServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + for { + if req, err := stream.Recv(); err == io.EOF { + break + } else if err != nil { + return err + } else { + s.reqs = append(s.reqs, req) + } + } + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*speechpb.StreamingRecognizeResponse)); err != nil { + return err + } + } + return nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockSpeech mockSpeechServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + speechpb.RegisterSpeechServer(serv, &mockSpeech) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestSpeechRecognize(t *testing.T) { + var expectedResponse *speechpb.RecognizeResponse = &speechpb.RecognizeResponse{} + + mockSpeech.err = nil + mockSpeech.reqs = nil + + mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRateHertz int32 = 44100 + var languageCode string = "en-US" + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRateHertz: sampleRateHertz, + LanguageCode: languageCode, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.RecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Recognize(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechRecognizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpeech.err = gstatus.Error(errCode, "test error") + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRateHertz int32 = 44100 + var languageCode string = "en-US" + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRateHertz: sampleRateHertz, + LanguageCode: languageCode, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.RecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Recognize(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpeechLongRunningRecognize(t *testing.T) { + var expectedResponse *speechpb.LongRunningRecognizeResponse = &speechpb.LongRunningRecognizeResponse{} + + mockSpeech.err = nil + mockSpeech.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRateHertz int32 = 44100 + var languageCode string = "en-US" + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRateHertz: sampleRateHertz, + LanguageCode: languageCode, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.LongRunningRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.LongRunningRecognize(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechLongRunningRecognizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpeech.err = nil + mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRateHertz int32 = 44100 + var languageCode string = "en-US" + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRateHertz: sampleRateHertz, + LanguageCode: languageCode, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.LongRunningRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.LongRunningRecognize(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpeechStreamingRecognize(t *testing.T) { + var expectedResponse *speechpb.StreamingRecognizeResponse = &speechpb.StreamingRecognizeResponse{} + + mockSpeech.err = nil + mockSpeech.reqs = nil + + mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) + + var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRecognize(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechStreamingRecognizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpeech.err = gstatus.Error(errCode, "test error") + + var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRecognize(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/speech/apiv1/speech_client.go b/vendor/cloud.google.com/go/speech/apiv1/speech_client.go new file mode 100644 index 0000000..77e4ee1 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1/speech_client.go @@ -0,0 +1,263 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + Recognize []gax.CallOption + LongRunningRecognize []gax.CallOption + StreamingRecognize []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("speech.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + Recognize: retry[[2]string{"default", "idempotent"}], + LongRunningRecognize: retry[[2]string{"default", "non_idempotent"}], + StreamingRecognize: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Speech API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client speechpb.SpeechClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new speech client. +// +// Service that implements Google Cloud Speech API. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: speechpb.NewSpeechClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Recognize performs synchronous speech recognition: receive results after all audio +// has been sent and processed. +func (c *Client) Recognize(ctx context.Context, req *speechpb.RecognizeRequest, opts ...gax.CallOption) (*speechpb.RecognizeResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.Recognize[0:len(c.CallOptions.Recognize):len(c.CallOptions.Recognize)], opts...) + var resp *speechpb.RecognizeResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.Recognize(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// LongRunningRecognize performs asynchronous speech recognition: receive results via the +// google.longrunning.Operations interface. Returns either an +// Operation.error or an Operation.response which contains +// a LongRunningRecognizeResponse message. +func (c *Client) LongRunningRecognize(ctx context.Context, req *speechpb.LongRunningRecognizeRequest, opts ...gax.CallOption) (*LongRunningRecognizeOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.LongRunningRecognize[0:len(c.CallOptions.LongRunningRecognize):len(c.CallOptions.LongRunningRecognize)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.LongRunningRecognize(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &LongRunningRecognizeOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// StreamingRecognize performs bidirectional streaming speech recognition: receive results while +// sending audio. This method is only available via the gRPC API (not REST). +func (c *Client) StreamingRecognize(ctx context.Context, opts ...gax.CallOption) (speechpb.Speech_StreamingRecognizeClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.StreamingRecognize[0:len(c.CallOptions.StreamingRecognize):len(c.CallOptions.StreamingRecognize)], opts...) + var resp speechpb.Speech_StreamingRecognizeClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.StreamingRecognize(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// LongRunningRecognizeOperation manages a long-running operation from LongRunningRecognize. +type LongRunningRecognizeOperation struct { + lro *longrunning.Operation +} + +// LongRunningRecognizeOperation returns a new LongRunningRecognizeOperation from a given name. +// The name must be that of a previously created LongRunningRecognizeOperation, possibly from a different process. +func (c *Client) LongRunningRecognizeOperation(name string) *LongRunningRecognizeOperation { + return &LongRunningRecognizeOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *LongRunningRecognizeOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*speechpb.LongRunningRecognizeResponse, error) { + var resp speechpb.LongRunningRecognizeResponse + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *LongRunningRecognizeOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*speechpb.LongRunningRecognizeResponse, error) { + var resp speechpb.LongRunningRecognizeResponse + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *LongRunningRecognizeOperation) Metadata() (*speechpb.LongRunningRecognizeMetadata, error) { + var meta speechpb.LongRunningRecognizeMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *LongRunningRecognizeOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *LongRunningRecognizeOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/speech/apiv1/speech_client_example_test.go b/vendor/cloud.google.com/go/speech/apiv1/speech_client_example_test.go new file mode 100644 index 0000000..a89eea4 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1/speech_client_example_test.go @@ -0,0 +1,110 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech_test + +import ( + "io" + + "cloud.google.com/go/speech/apiv1" + "golang.org/x/net/context" + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_Recognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &speechpb.RecognizeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Recognize(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_LongRunningRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &speechpb.LongRunningRecognizeRequest{ + // TODO: Fill request struct fields. + } + op, err := c.LongRunningRecognize(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_StreamingRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + stream, err := c.StreamingRecognize(ctx) + if err != nil { + // TODO: Handle error. + } + go func() { + reqs := []*speechpb.StreamingRecognizeRequest{ + // TODO: Create requests. + } + for _, req := range reqs { + if err := stream.Send(req); err != nil { + // TODO: Handle error. + } + } + stream.CloseSend() + }() + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/SyncRecognize_smoke_test.go b/vendor/cloud.google.com/go/speech/apiv1beta1/SyncRecognize_smoke_test.go new file mode 100644 index 0000000..d42504f --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/SyncRecognize_smoke_test.go @@ -0,0 +1,78 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" +) + +import ( + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestSpeechSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var languageCode string = "en-US" + var sampleRate int32 = 44100 + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var config = &speechpb.RecognitionConfig{ + LanguageCode: languageCode, + SampleRate: sampleRate, + Encoding: encoding, + } + var uri string = "gs://gapic-toolkit/hello.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.SyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + if _, err := c.SyncRecognize(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go b/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go new file mode 100644 index 0000000..50f13b6 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go @@ -0,0 +1,46 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package speech is an auto-generated package for the +// Google Cloud Speech API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Google Cloud Speech API. +package speech // import "cloud.google.com/go/speech/apiv1beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go b/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go new file mode 100644 index 0000000..8e2f6bd --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go @@ -0,0 +1,400 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockSpeechServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + speechpb.SpeechServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockSpeechServer) SyncRecognize(ctx context.Context, req *speechpb.SyncRecognizeRequest) (*speechpb.SyncRecognizeResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*speechpb.SyncRecognizeResponse), nil +} + +func (s *mockSpeechServer) AsyncRecognize(ctx context.Context, req *speechpb.AsyncRecognizeRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockSpeechServer) StreamingRecognize(stream speechpb.Speech_StreamingRecognizeServer) error { + md, _ := metadata.FromIncomingContext(stream.Context()) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + for { + if req, err := stream.Recv(); err == io.EOF { + break + } else if err != nil { + return err + } else { + s.reqs = append(s.reqs, req) + } + } + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*speechpb.StreamingRecognizeResponse)); err != nil { + return err + } + } + return nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockSpeech mockSpeechServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + speechpb.RegisterSpeechServer(serv, &mockSpeech) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestSpeechSyncRecognize(t *testing.T) { + var expectedResponse *speechpb.SyncRecognizeResponse = &speechpb.SyncRecognizeResponse{} + + mockSpeech.err = nil + mockSpeech.reqs = nil + + mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.SyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SyncRecognize(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechSyncRecognizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpeech.err = gstatus.Error(errCode, "test error") + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.SyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SyncRecognize(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpeechAsyncRecognize(t *testing.T) { + var expectedResponse *speechpb.AsyncRecognizeResponse = &speechpb.AsyncRecognizeResponse{} + + mockSpeech.err = nil + mockSpeech.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.AsyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AsyncRecognize(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechAsyncRecognizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpeech.err = nil + mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.AsyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AsyncRecognize(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpeechStreamingRecognize(t *testing.T) { + var resultIndex int32 = 520358448 + var expectedResponse = &speechpb.StreamingRecognizeResponse{ + ResultIndex: resultIndex, + } + + mockSpeech.err = nil + mockSpeech.reqs = nil + + mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) + + var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRecognize(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechStreamingRecognizeError(t *testing.T) { + errCode := codes.PermissionDenied + mockSpeech.err = gstatus.Error(errCode, "test error") + + var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRecognize(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go new file mode 100644 index 0000000..e2ab734 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go @@ -0,0 +1,265 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + SyncRecognize []gax.CallOption + AsyncRecognize []gax.CallOption + StreamingRecognize []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("speech.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + SyncRecognize: retry[[2]string{"default", "idempotent"}], + AsyncRecognize: retry[[2]string{"default", "idempotent"}], + StreamingRecognize: retry[[2]string{"default", "non_idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Speech API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client speechpb.SpeechClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new speech client. +// +// Service that implements Google Cloud Speech API. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: speechpb.NewSpeechClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// SyncRecognize performs synchronous speech recognition: receive results after all audio +// has been sent and processed. +func (c *Client) SyncRecognize(ctx context.Context, req *speechpb.SyncRecognizeRequest, opts ...gax.CallOption) (*speechpb.SyncRecognizeResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.SyncRecognize[0:len(c.CallOptions.SyncRecognize):len(c.CallOptions.SyncRecognize)], opts...) + var resp *speechpb.SyncRecognizeResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.SyncRecognize(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AsyncRecognize performs asynchronous speech recognition: receive results via the +// [google.longrunning.Operations] +// (/speech/reference/rest/v1beta1/operations#Operation) +// interface. Returns either an +// Operation.error or an Operation.response which contains +// an AsyncRecognizeResponse message. +func (c *Client) AsyncRecognize(ctx context.Context, req *speechpb.AsyncRecognizeRequest, opts ...gax.CallOption) (*AsyncRecognizeOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AsyncRecognize[0:len(c.CallOptions.AsyncRecognize):len(c.CallOptions.AsyncRecognize)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AsyncRecognize(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &AsyncRecognizeOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// StreamingRecognize performs bidirectional streaming speech recognition: receive results while +// sending audio. This method is only available via the gRPC API (not REST). +func (c *Client) StreamingRecognize(ctx context.Context, opts ...gax.CallOption) (speechpb.Speech_StreamingRecognizeClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.StreamingRecognize[0:len(c.CallOptions.StreamingRecognize):len(c.CallOptions.StreamingRecognize)], opts...) + var resp speechpb.Speech_StreamingRecognizeClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.StreamingRecognize(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AsyncRecognizeOperation manages a long-running operation from AsyncRecognize. +type AsyncRecognizeOperation struct { + lro *longrunning.Operation +} + +// AsyncRecognizeOperation returns a new AsyncRecognizeOperation from a given name. +// The name must be that of a previously created AsyncRecognizeOperation, possibly from a different process. +func (c *Client) AsyncRecognizeOperation(name string) *AsyncRecognizeOperation { + return &AsyncRecognizeOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *AsyncRecognizeOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*speechpb.AsyncRecognizeResponse, error) { + var resp speechpb.AsyncRecognizeResponse + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *AsyncRecognizeOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*speechpb.AsyncRecognizeResponse, error) { + var resp speechpb.AsyncRecognizeResponse + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *AsyncRecognizeOperation) Metadata() (*speechpb.AsyncRecognizeMetadata, error) { + var meta speechpb.AsyncRecognizeMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *AsyncRecognizeOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *AsyncRecognizeOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go new file mode 100644 index 0000000..9b86527 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go @@ -0,0 +1,110 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech_test + +import ( + "io" + + "cloud.google.com/go/speech/apiv1beta1" + "golang.org/x/net/context" + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_SyncRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &speechpb.SyncRecognizeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SyncRecognize(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AsyncRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &speechpb.AsyncRecognizeRequest{ + // TODO: Fill request struct fields. + } + op, err := c.AsyncRecognize(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_StreamingRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + stream, err := c.StreamingRecognize(ctx) + if err != nil { + // TODO: Handle error. + } + go func() { + reqs := []*speechpb.StreamingRecognizeRequest{ + // TODO: Create requests. + } + for _, req := range reqs { + if err := stream.Send(req); err != nil { + // TODO: Handle error. + } + } + stream.CloseSend() + }() + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go new file mode 100644 index 0000000..24f90c9 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -0,0 +1,235 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "net/http" + "reflect" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// ACLRole is the level of access to grant. +type ACLRole string + +const ( + RoleOwner ACLRole = "OWNER" + RoleReader ACLRole = "READER" + RoleWriter ACLRole = "WRITER" +) + +// ACLEntity refers to a user or group. +// They are sometimes referred to as grantees. +// +// It could be in the form of: +// "user-", "user-", "group-", "group-", +// "domain-" and "project-team-". +// +// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. +type ACLEntity string + +const ( + AllUsers ACLEntity = "allUsers" + AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" +) + +// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket. +type ACLRule struct { + Entity ACLEntity + Role ACLRole +} + +// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. +type ACLHandle struct { + c *Client + bucket string + object string + isDefault bool + userProject string // for requester-pays buckets +} + +// Delete permanently deletes the ACL entry for the given entity. +func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error { + if a.object != "" { + return a.objectDelete(ctx, entity) + } + if a.isDefault { + return a.bucketDefaultDelete(ctx, entity) + } + return a.bucketDelete(ctx, entity) +} + +// Set sets the permission level for the given entity. +func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error { + if a.object != "" { + return a.objectSet(ctx, entity, role, false) + } + if a.isDefault { + return a.objectSet(ctx, entity, role, true) + } + return a.bucketSet(ctx, entity, role) +} + +// List retrieves ACL entries. +func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) { + if a.object != "" { + return a.objectList(ctx) + } + if a.isDefault { + return a.bucketDefaultList(ctx) + } + return a.bucketList(ctx) +} + +func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.List(a.bucket) + a.configureCall(req, ctx) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toACLRules(acls.Items), nil +} + +func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)) + a.configureCall(req, ctx) + return req.Do() + }) +} + +func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.BucketAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.List(a.bucket) + a.configureCall(req, ctx) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + r := make([]ACLRule, len(acls.Items)) + for i, v := range acls.Items { + r[i].Entity = ACLEntity(v.Entity) + r[i].Role = ACLRole(v.Role) + } + return r, nil +} + +func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.BucketAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + err := runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl) + a.configureCall(req, ctx) + _, err := req.Do() + return err + }) + if err != nil { + return err + } + return nil +} + +func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { + err := runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)) + a.configureCall(req, ctx) + return req.Do() + }) + if err != nil { + return err + } + return nil +} + +func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object) + a.configureCall(req, ctx) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toACLRules(acls.Items), nil +} + +func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + + acl := &raw.ObjectAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + var req setRequest + if isBucketDefault { + req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl) + } else { + req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl) + } + a.configureCall(req, ctx) + return runWithRetry(ctx, func() error { + _, err := req.Do() + return err + }) +} + +func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)) + a.configureCall(req, ctx) + return req.Do() + }) +} + +func (a *ACLHandle) configureCall(call interface { + Header() http.Header +}, ctx context.Context) { + vc := reflect.ValueOf(call) + vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) + if a.userProject != "" { + vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) + } + setClientHeader(call.Header()) +} + +func toACLRules(items []*raw.ObjectAccessControl) []ACLRule { + r := make([]ACLRule, 0, len(items)) + for _, item := range items { + r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)}) + } + return r +} diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go new file mode 100644 index 0000000..fcaa59d --- /dev/null +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -0,0 +1,767 @@ +// Copyright 2014 Google Inc. LiveAndArchived Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "net/http" + "reflect" + "time" + + "cloud.google.com/go/internal/optional" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + raw "google.golang.org/api/storage/v1" +) + +// BucketHandle provides operations on a Google Cloud Storage bucket. +// Use Client.Bucket to get a handle. +type BucketHandle struct { + c *Client + name string + acl ACLHandle + defaultObjectACL ACLHandle + conds *BucketConditions + userProject string // project for Requester Pays buckets +} + +// Bucket returns a BucketHandle, which provides operations on the named bucket. +// This call does not perform any network operations. +// +// The supplied name must contain only lowercase letters, numbers, dashes, +// underscores, and dots. The full specification for valid bucket names can be +// found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (c *Client) Bucket(name string) *BucketHandle { + return &BucketHandle{ + c: c, + name: name, + acl: ACLHandle{ + c: c, + bucket: name, + }, + defaultObjectACL: ACLHandle{ + c: c, + bucket: name, + isDefault: true, + }, + } +} + +// Create creates the Bucket in the project. +// If attrs is nil the API defaults will be used. +func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error { + var bkt *raw.Bucket + if attrs != nil { + bkt = attrs.toRawBucket() + } else { + bkt = &raw.Bucket{} + } + bkt.Name = b.name + // If there is lifecycle information but no location, explicitly set + // the location. This is a GCS quirk/bug. + if bkt.Location == "" && bkt.Lifecycle != nil { + bkt.Location = "US" + } + req := b.c.raw.Buckets.Insert(projectID, bkt) + setClientHeader(req.Header()) + return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err }) +} + +// Delete deletes the Bucket. +func (b *BucketHandle) Delete(ctx context.Context) error { + req, err := b.newDeleteCall() + if err != nil { + return err + } + return runWithRetry(ctx, func() error { return req.Context(ctx).Do() }) +} + +func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) { + req := b.c.raw.Buckets.Delete(b.name) + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// ACL returns an ACLHandle, which provides access to the bucket's access control list. +// This controls who can list, create or overwrite the objects in a bucket. +// This call does not perform any network operations. +func (b *BucketHandle) ACL() *ACLHandle { + return &b.acl +} + +// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. +// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. +// This call does not perform any network operations. +func (b *BucketHandle) DefaultObjectACL() *ACLHandle { + return &b.defaultObjectACL +} + +// Object returns an ObjectHandle, which provides operations on the named object. +// This call does not perform any network operations. +// +// name must consist entirely of valid UTF-8-encoded runes. The full specification +// for valid object names can be found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (b *BucketHandle) Object(name string) *ObjectHandle { + return &ObjectHandle{ + c: b.c, + bucket: b.name, + object: name, + acl: ACLHandle{ + c: b.c, + bucket: b.name, + object: name, + userProject: b.userProject, + }, + gen: -1, + userProject: b.userProject, + } +} + +// Attrs returns the metadata for the bucket. +func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) { + req, err := b.newGetCall() + if err != nil { + return nil, err + } + var resp *raw.Bucket + err = runWithRetry(ctx, func() error { + resp, err = req.Context(ctx).Do() + return err + }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp), nil +} + +func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { + req := b.c.raw.Buckets.Get(b.name).Projection("full") + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (*BucketAttrs, error) { + req, err := b.newPatchCall(&uattrs) + if err != nil { + return nil, err + } + // TODO(jba): retry iff metagen is set? + rb, err := req.Context(ctx).Do() + if err != nil { + return nil, err + } + return newBucket(rb), nil +} + +func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) { + rb := uattrs.toRawBucket() + req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full") + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// BucketAttrs represents the metadata for a Google Cloud Storage bucket. +// Read-only fields are ignored by BucketHandle.Create. +type BucketAttrs struct { + // Name is the name of the bucket. + // This field is read-only. + Name string + + // ACL is the list of access control rules on the bucket. + ACL []ACLRule + + // DefaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + DefaultObjectACL []ACLRule + + // Location is the location of the bucket. It defaults to "US". + Location string + + // MetaGeneration is the metadata generation of the bucket. + // This field is read-only. + MetaGeneration int64 + + // StorageClass is the default storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "MULTI_REGIONAL", + // "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and + // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which + // is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on + // the bucket's location settings. + StorageClass string + + // Created is the creation time of the bucket. + // This field is read-only. + Created time.Time + + // VersioningEnabled reports whether this bucket has versioning enabled. + VersioningEnabled bool + + // Labels are the bucket's labels. + Labels map[string]string + + // RequesterPays reports whether the bucket is a Requester Pays bucket. + // Clients performing operations on Requester Pays buckets must provide + // a user project (see BucketHandle.UserProject), which will be billed + // for the operations. + RequesterPays bool + // Lifecycle is the lifecycle configuration for objects in the bucket. + Lifecycle Lifecycle +} + +// Lifecycle is the lifecycle configuration for objects in the bucket. +type Lifecycle struct { + Rules []LifecycleRule +} + +const ( + // RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule. + rfc3339Date = "2006-01-02" + + // DeleteAction is a lifecycle action that deletes a live and/or archived + // objects. Takes precendence over SetStorageClass actions. + DeleteAction = "Delete" + + // SetStorageClassAction changes the storage class of live and/or archived + // objects. + SetStorageClassAction = "SetStorageClass" +) + +// LifecycleRule is a lifecycle configuration rule. +// +// When all the configured conditions are met by an object in the bucket, the +// configured action will automatically be taken on that object. +type LifecycleRule struct { + // Action is the action to take when all of the associated conditions are + // met. + Action LifecycleAction + + // Condition is the set of conditions that must be met for the associated + // action to be taken. + Condition LifecycleCondition +} + +// LifecycleAction is a lifecycle configuration action. +type LifecycleAction struct { + // Type is the type of action to take on matching objects. + // + // Acceptable values are "Delete" to delete matching objects and + // "SetStorageClass" to set the storage class defined in StorageClass on + // matching objects. + Type string + + // StorageClass is the storage class to set on matching objects if the Action + // is "SetStorageClass". + StorageClass string +} + +// Liveness specifies whether the object is live or not. +type Liveness int + +const ( + // LiveAndArchived includes both live and archived objects. + LiveAndArchived Liveness = iota + // Live specifies that the object is still live. + Live + // Archived specifies that the object is archived. + Archived +) + +// LifecycleCondition is a set of conditions used to match objects and take an +// action automatically. +// +// All configured conditions must be met for the associated action to be taken. +type LifecycleCondition struct { + // AgeInDays is the age of the object in days. + AgeInDays int64 + + // CreatedBefore is the time the object was created. + // + // This condition is satisfied when an object is created before midnight of + // the specified date in UTC. + CreatedBefore time.Time + + // Liveness specifies the object's liveness. Relevant only for versioned objects + Liveness Liveness + + // MatchesStorageClasses is the condition matching the object's storage + // class. + // + // Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", + // "STANDARD", and "DURABLE_REDUCED_AVAILABILITY". + MatchesStorageClasses []string + + // NumNewerVersions is the condition matching objects with a number of newer versions. + // + // If the value is N, this condition is satisfied when there are at least N + // versions (including the live version) newer than this version of the + // object. + NumNewerVersions int64 +} + +func newBucket(b *raw.Bucket) *BucketAttrs { + if b == nil { + return nil + } + bucket := &BucketAttrs{ + Name: b.Name, + Location: b.Location, + MetaGeneration: b.Metageneration, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, + Labels: b.Labels, + RequesterPays: b.Billing != nil && b.Billing.RequesterPays, + Lifecycle: toLifecycle(b.Lifecycle), + } + acl := make([]ACLRule, len(b.Acl)) + for i, rule := range b.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.ACL = acl + objACL := make([]ACLRule, len(b.DefaultObjectAcl)) + for i, rule := range b.DefaultObjectAcl { + objACL[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.DefaultObjectACL = objACL + return bucket +} + +// toRawBucket copies the editable attribute from b to the raw library's Bucket type. +func (b *BucketAttrs) toRawBucket() *raw.Bucket { + var acl []*raw.BucketAccessControl + if len(b.ACL) > 0 { + acl = make([]*raw.BucketAccessControl, len(b.ACL)) + for i, rule := range b.ACL { + acl[i] = &raw.BucketAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + dACL := toRawObjectACL(b.DefaultObjectACL) + // Copy label map. + var labels map[string]string + if len(b.Labels) > 0 { + labels = make(map[string]string, len(b.Labels)) + for k, v := range b.Labels { + labels[k] = v + } + } + // Ignore VersioningEnabled if it is false. This is OK because + // we only call this method when creating a bucket, and by default + // new buckets have versioning off. + var v *raw.BucketVersioning + if b.VersioningEnabled { + v = &raw.BucketVersioning{Enabled: true} + } + var bb *raw.BucketBilling + if b.RequesterPays { + bb = &raw.BucketBilling{RequesterPays: true} + } + return &raw.Bucket{ + Name: b.Name, + DefaultObjectAcl: dACL, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: acl, + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toRawLifecycle(b.Lifecycle), + } +} + +type BucketAttrsToUpdate struct { + // VersioningEnabled, if set, updates whether the bucket uses versioning. + VersioningEnabled optional.Bool + + // RequesterPays, if set, updates whether the bucket is a Requester Pays bucket. + RequesterPays optional.Bool + + setLabels map[string]string + deleteLabels map[string]bool +} + +// SetLabel causes a label to be added or modified when ua is used +// in a call to Bucket.Update. +func (ua *BucketAttrsToUpdate) SetLabel(name, value string) { + if ua.setLabels == nil { + ua.setLabels = map[string]string{} + } + ua.setLabels[name] = value +} + +// DeleteLabel causes a label to be deleted when ua is used in a +// call to Bucket.Update. +func (ua *BucketAttrsToUpdate) DeleteLabel(name string) { + if ua.deleteLabels == nil { + ua.deleteLabels = map[string]bool{} + } + ua.deleteLabels[name] = true +} + +func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { + rb := &raw.Bucket{} + if ua.VersioningEnabled != nil { + rb.Versioning = &raw.BucketVersioning{ + Enabled: optional.ToBool(ua.VersioningEnabled), + ForceSendFields: []string{"Enabled"}, + } + } + if ua.RequesterPays != nil { + rb.Billing = &raw.BucketBilling{ + RequesterPays: optional.ToBool(ua.RequesterPays), + ForceSendFields: []string{"RequesterPays"}, + } + } + if ua.setLabels != nil || ua.deleteLabels != nil { + rb.Labels = map[string]string{} + for k, v := range ua.setLabels { + rb.Labels[k] = v + } + if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 { + rb.ForceSendFields = append(rb.ForceSendFields, "Labels") + } + for l := range ua.deleteLabels { + rb.NullFields = append(rb.NullFields, "Labels."+l) + } + } + return rb +} + +// If returns a new BucketHandle that applies a set of preconditions. +// Preconditions already set on the BucketHandle are ignored. +// Operations on the new handle will only occur if the preconditions are +// satisfied. The only valid preconditions for buckets are MetagenerationMatch +// and MetagenerationNotMatch. +func (b *BucketHandle) If(conds BucketConditions) *BucketHandle { + b2 := *b + b2.conds = &conds + return &b2 +} + +// BucketConditions constrain bucket methods to act on specific metagenerations. +// +// The zero value is an empty set of constraints. +type BucketConditions struct { + // MetagenerationMatch specifies that the bucket must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the bucket must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *BucketConditions) validate(method string) error { + if *c == (BucketConditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +// UserProject returns a new BucketHandle that passes the project ID as the user +// project for all subsequent calls. Calls with a user project will be billed to that +// project rather than to the bucket's owning project. +// +// A user project is required for all operations on Requester Pays buckets. +func (b *BucketHandle) UserProject(projectID string) *BucketHandle { + b2 := *b + b2.userProject = projectID + b2.acl.userProject = projectID + b2.defaultObjectACL.userProject = projectID + return &b2 +} + +// applyBucketConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + cval := reflect.ValueOf(call) + switch { + case conds.MetagenerationMatch != 0: + if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { + var rl raw.BucketLifecycle + if len(l.Rules) == 0 { + return nil + } + for _, r := range l.Rules { + rr := &raw.BucketLifecycleRule{ + Action: &raw.BucketLifecycleRuleAction{ + Type: r.Action.Type, + StorageClass: r.Action.StorageClass, + }, + Condition: &raw.BucketLifecycleRuleCondition{ + Age: r.Condition.AgeInDays, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + NumNewerVersions: r.Condition.NumNewerVersions, + }, + } + + switch r.Condition.Liveness { + case LiveAndArchived: + rr.Condition.IsLive = nil + case Live: + rr.Condition.IsLive = googleapi.Bool(true) + case Archived: + rr.Condition.IsLive = googleapi.Bool(false) + } + + if !r.Condition.CreatedBefore.IsZero() { + rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) + } + rl.Rule = append(rl.Rule, rr) + } + return &rl +} + +func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { + var l Lifecycle + if rl == nil { + return l + } + for _, rr := range rl.Rule { + r := LifecycleRule{ + Action: LifecycleAction{ + Type: rr.Action.Type, + StorageClass: rr.Action.StorageClass, + }, + Condition: LifecycleCondition{ + AgeInDays: rr.Condition.Age, + MatchesStorageClasses: rr.Condition.MatchesStorageClass, + NumNewerVersions: rr.Condition.NumNewerVersions, + }, + } + + switch { + case rr.Condition.IsLive == nil: + r.Condition.Liveness = LiveAndArchived + case *rr.Condition.IsLive == true: + r.Condition.Liveness = Live + case *rr.Condition.IsLive == false: + r.Condition.Liveness = Archived + } + + if rr.Condition.CreatedBefore != "" { + r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) + } + l.Rules = append(l.Rules, r) + } + return l +} + +// Objects returns an iterator over the objects in the bucket that match the Query q. +// If q is nil, no filtering is done. +func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { + it := &ObjectIterator{ + ctx: ctx, + bucket: b, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + if q != nil { + it.query = *q + } + return it +} + +// An ObjectIterator is an iterator over ObjectAttrs. +type ObjectIterator struct { + ctx context.Context + bucket *BucketHandle + query Query + pageInfo *iterator.PageInfo + nextFunc func() error + items []*ObjectAttrs +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +// +// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will +// have a non-empty Prefix field, and a zero value for all other fields. These +// represent prefixes. +func (it *ObjectIterator) Next() (*ObjectAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { + req := it.bucket.c.raw.Objects.List(it.bucket.name) + setClientHeader(req.Header()) + req.Projection("full") + req.Delimiter(it.query.Delimiter) + req.Prefix(it.query.Prefix) + req.Versions(it.query.Versions) + req.PageToken(pageToken) + if it.bucket.userProject != "" { + req.UserProject(it.bucket.userProject) + } + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Objects + var err error + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + err = ErrBucketNotExist + } + return "", err + } + for _, item := range resp.Items { + it.items = append(it.items, newObject(item)) + } + for _, prefix := range resp.Prefixes { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + return resp.NextPageToken, nil +} + +// TODO(jbd): Add storage.buckets.update. + +// Buckets returns an iterator over the buckets in the project. You may +// optionally set the iterator's Prefix field to restrict the list to buckets +// whose names begin with the prefix. By default, all buckets in the project +// are returned. +func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { + it := &BucketIterator{ + ctx: ctx, + client: c, + projectID: projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + return it +} + +// A BucketIterator is an iterator over BucketAttrs. +type BucketIterator struct { + // Prefix restricts the iterator to buckets whose names begin with it. + Prefix string + + ctx context.Context + client *Client + projectID string + buckets []*BucketAttrs + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +func (it *BucketIterator) Next() (*BucketAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + b := it.buckets[0] + it.buckets = it.buckets[1:] + return b, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) { + req := it.client.raw.Buckets.List(it.projectID) + setClientHeader(req.Header()) + req.Projection("full") + req.Prefix(it.Prefix) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Buckets + var err error + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + return "", err + } + for _, item := range resp.Items { + it.buckets = append(it.buckets, newBucket(item)) + } + return resp.NextPageToken, nil +} diff --git a/vendor/cloud.google.com/go/storage/bucket_test.go b/vendor/cloud.google.com/go/storage/bucket_test.go new file mode 100644 index 0000000..8355f69 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/bucket_test.go @@ -0,0 +1,288 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "net/http" + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +func TestBucketAttrsToRawBucket(t *testing.T) { + t.Parallel() + attrs := &BucketAttrs{ + Name: "name", + ACL: []ACLRule{{Entity: "bob@example.com", Role: RoleOwner}}, + DefaultObjectACL: []ACLRule{{Entity: AllUsers, Role: RoleReader}}, + Location: "loc", + StorageClass: "class", + VersioningEnabled: false, + // should be ignored: + MetaGeneration: 39, + Created: time.Now(), + Labels: map[string]string{"label": "value"}, + } + got := attrs.toRawBucket() + want := &raw.Bucket{ + Name: "name", + Acl: []*raw.BucketAccessControl{ + {Entity: "bob@example.com", Role: "OWNER"}, + }, + DefaultObjectAcl: []*raw.ObjectAccessControl{ + {Entity: "allUsers", Role: "READER"}, + }, + Location: "loc", + StorageClass: "class", + Versioning: nil, // ignore VersioningEnabled if false + Labels: map[string]string{"label": "value"}, + } + msg, ok, err := pretty.Diff(want, got) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Error(msg) + } + + attrs.VersioningEnabled = true + attrs.RequesterPays = true + got = attrs.toRawBucket() + want.Versioning = &raw.BucketVersioning{Enabled: true} + want.Billing = &raw.BucketBilling{RequesterPays: true} + msg, ok, err = pretty.Diff(want, got) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Error(msg) + } +} + +func TestBucketAttrsToUpdateToRawBucket(t *testing.T) { + t.Parallel() + au := &BucketAttrsToUpdate{ + VersioningEnabled: false, + RequesterPays: false, + } + au.SetLabel("a", "foo") + au.DeleteLabel("b") + au.SetLabel("c", "") + got := au.toRawBucket() + want := &raw.Bucket{ + Versioning: &raw.BucketVersioning{ + Enabled: false, + ForceSendFields: []string{"Enabled"}, + }, + Labels: map[string]string{ + "a": "foo", + "c": "", + }, + Billing: &raw.BucketBilling{ + RequesterPays: false, + ForceSendFields: []string{"RequesterPays"}, + }, + NullFields: []string{"Labels.b"}, + } + msg, ok, err := pretty.Diff(want, got) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Error(msg) + } + + var au2 BucketAttrsToUpdate + au2.DeleteLabel("b") + got = au2.toRawBucket() + want = &raw.Bucket{ + Labels: map[string]string{}, + ForceSendFields: []string{"Labels"}, + NullFields: []string{"Labels.b"}, + } + msg, ok, err = pretty.Diff(want, got) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Error(msg) + } + +} + +func TestCallBuilders(t *testing.T) { + rc, err := raw.New(&http.Client{}) + if err != nil { + t.Fatal(err) + } + c := &Client{raw: rc} + const metagen = 17 + + b := c.Bucket("name") + bm := b.If(BucketConditions{MetagenerationMatch: metagen}).UserProject("p") + + equal := func(x, y interface{}) bool { + return testutil.Equal(x, y, + cmp.AllowUnexported( + raw.BucketsGetCall{}, + raw.BucketsDeleteCall{}, + raw.BucketsPatchCall{}, + ), + cmp.FilterPath(func(p cmp.Path) bool { + return p[len(p)-1].Type() == reflect.TypeOf(&raw.Service{}) + }, cmp.Ignore()), + ) + } + + for i, test := range []struct { + callFunc func(*BucketHandle) (interface{}, error) + want interface { + Header() http.Header + } + metagenFunc func(interface{}) + }{ + { + func(b *BucketHandle) (interface{}, error) { return b.newGetCall() }, + rc.Buckets.Get("name").Projection("full"), + func(req interface{}) { req.(*raw.BucketsGetCall).IfMetagenerationMatch(metagen).UserProject("p") }, + }, + { + func(b *BucketHandle) (interface{}, error) { return b.newDeleteCall() }, + rc.Buckets.Delete("name"), + func(req interface{}) { req.(*raw.BucketsDeleteCall).IfMetagenerationMatch(metagen).UserProject("p") }, + }, + { + func(b *BucketHandle) (interface{}, error) { + return b.newPatchCall(&BucketAttrsToUpdate{ + VersioningEnabled: false, + RequesterPays: false, + }) + }, + rc.Buckets.Patch("name", &raw.Bucket{ + Versioning: &raw.BucketVersioning{ + Enabled: false, + ForceSendFields: []string{"Enabled"}, + }, + Billing: &raw.BucketBilling{ + RequesterPays: false, + ForceSendFields: []string{"RequesterPays"}, + }, + }).Projection("full"), + func(req interface{}) { req.(*raw.BucketsPatchCall).IfMetagenerationMatch(metagen).UserProject("p") }, + }, + } { + got, err := test.callFunc(b) + if err != nil { + t.Fatal(err) + } + setClientHeader(test.want.Header()) + if !equal(got, test.want) { + t.Errorf("#%d: got %#v, want %#v", i, got, test.want) + } + got, err = test.callFunc(bm) + if err != nil { + t.Fatal(err) + } + test.metagenFunc(test.want) + if !equal(got, test.want) { + t.Errorf("#%d:\ngot %#v\nwant %#v", i, got, test.want) + } + } + + // Error. + bm = b.If(BucketConditions{MetagenerationMatch: 1, MetagenerationNotMatch: 2}) + if _, err := bm.newGetCall(); err == nil { + t.Errorf("got nil, want error") + } + if _, err := bm.newDeleteCall(); err == nil { + t.Errorf("got nil, want error") + } + if _, err := bm.newPatchCall(&BucketAttrsToUpdate{}); err == nil { + t.Errorf("got nil, want error") + } +} + +func TestNewBucket(t *testing.T) { + labels := map[string]string{"a": "b"} + matchClasses := []string{"MULTI_REGIONAL", "REGIONAL", "STANDARD"} + rb := &raw.Bucket{ + Name: "name", + Location: "loc", + Metageneration: 3, + StorageClass: "sc", + TimeCreated: "2017-10-23T04:05:06Z", + Versioning: &raw.BucketVersioning{Enabled: true}, + Labels: labels, + Billing: &raw.BucketBilling{RequesterPays: true}, + Lifecycle: &raw.BucketLifecycle{ + Rule: []*raw.BucketLifecycleRule{{ + Action: &raw.BucketLifecycleRuleAction{ + Type: "SetStorageClass", + StorageClass: "NEARLINE", + }, + Condition: &raw.BucketLifecycleRuleCondition{ + Age: 10, + IsLive: googleapi.Bool(true), + CreatedBefore: "2017-01-02", + MatchesStorageClass: matchClasses, + NumNewerVersions: 3, + }, + }}, + }, + Acl: []*raw.BucketAccessControl{ + {Bucket: "name", Role: "READER", Email: "joe@example.com", Entity: "allUsers"}, + }, + } + want := &BucketAttrs{ + Name: "name", + Location: "loc", + MetaGeneration: 3, + StorageClass: "sc", + Created: time.Date(2017, 10, 23, 4, 5, 6, 0, time.UTC), + VersioningEnabled: true, + Labels: labels, + RequesterPays: true, + Lifecycle: Lifecycle{ + Rules: []LifecycleRule{ + { + Action: LifecycleAction{ + Type: SetStorageClassAction, + StorageClass: "NEARLINE", + }, + Condition: LifecycleCondition{ + AgeInDays: 10, + Liveness: Live, + CreatedBefore: time.Date(2017, 1, 2, 0, 0, 0, 0, time.UTC), + MatchesStorageClasses: matchClasses, + NumNewerVersions: 3, + }, + }, + }, + }, + ACL: []ACLRule{{Entity: "allUsers", Role: RoleReader}}, + DefaultObjectACL: []ACLRule{}, + } + got := newBucket(rb) + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } +} diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go new file mode 100644 index 0000000..d0a999c --- /dev/null +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -0,0 +1,201 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "fmt" + + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// CopierFrom creates a Copier that can copy src to dst. +// You can immediately call Run on the returned Copier, or +// you can configure it first. +// +// For Requester Pays buckets, the user project of dst is billed, unless it is empty, +// in which case the user project of src is billed. +func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { + return &Copier{dst: dst, src: src} +} + +// A Copier copies a source object to a destination. +type Copier struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Copier. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + // RewriteToken can be set before calling Run to resume a copy + // operation. After Run returns a non-nil error, RewriteToken will + // have been updated to contain the value needed to resume the copy. + RewriteToken string + + // ProgressFunc can be used to monitor the progress of a multi-RPC copy + // operation. If ProgressFunc is not nil and copying requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then + // ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far and the total size in bytes of the source object. + // + // ProgressFunc is intended to make upload progress available to the + // application. For example, the implementation of ProgressFunc may update + // a progress bar in the application's UI, or log the result of + // float64(copiedBytes)/float64(totalBytes). + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(copiedBytes, totalBytes uint64) + + dst, src *ObjectHandle +} + +// Run performs the copy. +func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) { + if err := c.src.validate(); err != nil { + return nil, err + } + if err := c.dst.validate(); err != nil { + return nil, err + } + // Convert destination attributes to raw form, omitting the bucket. + // If the bucket is included but name or content-type aren't, the service + // returns a 400 with "Required" as the only message. Omitting the bucket + // does not cause any problems. + rawObject := c.ObjectAttrs.toRawObject("") + for { + res, err := c.callRewrite(ctx, rawObject) + if err != nil { + return nil, err + } + if c.ProgressFunc != nil { + c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize)) + } + if res.Done { // Finished successfully. + return newObject(res.Resource), nil + } + } +} + +func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) { + call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj) + + call.Context(ctx).Projection("full") + if c.RewriteToken != "" { + call.RewriteToken(c.RewriteToken) + } + if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if c.dst.userProject != "" { + call.UserProject(c.dst.userProject) + } else if c.src.userProject != "" { + call.UserProject(c.src.userProject) + } + if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { + return nil, err + } + var res *raw.RewriteResponse + var err error + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { res, err = call.Do(); return err }) + if err != nil { + return nil, err + } + c.RewriteToken = res.RewriteToken + return res, nil +} + +// ComposerFrom creates a Composer that can compose srcs into dst. +// You can immediately call Run on the returned Composer, or you can +// configure it first. +// +// The encryption key for the destination object will be used to decrypt all +// source objects and encrypt the destination object. It is an error +// to specify an encryption key for any of the source objects. +func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { + return &Composer{dst: dst, srcs: srcs} +} + +// A Composer composes source objects into a destination object. +// +// For Requester Pays buckets, the user project of dst is billed. +type Composer struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Composer. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + dst *ObjectHandle + srcs []*ObjectHandle +} + +// Run performs the compose operation. +func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) { + if err := c.dst.validate(); err != nil { + return nil, err + } + if len(c.srcs) == 0 { + return nil, errors.New("storage: at least one source object must be specified") + } + + req := &raw.ComposeRequest{} + // Compose requires a non-empty Destination, so we always set it, + // even if the caller-provided ObjectAttrs is the zero value. + req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) + for _, src := range c.srcs { + if err := src.validate(); err != nil { + return nil, err + } + if src.bucket != c.dst.bucket { + return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) + } + if src.encryptionKey != nil { + return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) + } + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.object, + } + if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + req.SourceObjects = append(req.SourceObjects, srcObj) + } + + call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) + if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if c.dst.userProject != "" { + call.UserProject(c.dst.userProject) + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if err != nil { + return nil, err + } + return newObject(obj), nil +} diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go new file mode 100644 index 0000000..06961b6 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -0,0 +1,168 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package storage provides an easy way to work with Google Cloud Storage. +Google Cloud Storage stores data in named objects, which are grouped into buckets. + +More information about Google Cloud Storage is available at +https://cloud.google.com/storage/docs. + +All of the methods of this package use exponential backoff to retry calls +that fail with certain errors, as described in +https://cloud.google.com/storage/docs/exponential-backoff. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + + +Creating a Client + +To start working with this package, create a client: + + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + +The client will use your default application credentials. + +If you only wish to access public data, you can create +an unauthenticated client with + + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + +Buckets + +A Google Cloud Storage bucket is a collection of objects. To work with a +bucket, make a bucket handle: + + bkt := client.Bucket(bucketName) + +A handle is a reference to a bucket. You can have a handle even if the +bucket doesn't exist yet. To create a bucket in Google Cloud Storage, +call Create on the handle: + + if err := bkt.Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } + +Note that although buckets are associated with projects, bucket names are +global across all projects. + +Each bucket has associated metadata, represented in this package by +BucketAttrs. The third argument to BucketHandle.Create allows you to set +the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use +Attrs: + + attrs, err := bkt.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", + attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) + +Objects + +An object holds arbitrary data as a sequence of bytes, like a file. You +refer to objects using a handle, just as with buckets. You can use the +standard Go io.Reader and io.Writer interfaces to read and write +object data: + + obj := bkt.Object("data") + // Write something to obj. + // w implements io.Writer. + w := obj.NewWriter(ctx) + // Write some text to obj. This will overwrite whatever is there. + if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { + // TODO: Handle error. + } + // Close, just like writing a file. + if err := w.Close(); err != nil { + // TODO: Handle error. + } + + // Read it back. + r, err := obj.NewReader(ctx) + if err != nil { + // TODO: Handle error. + } + defer r.Close() + if _, err := io.Copy(os.Stdout, r); err != nil { + // TODO: Handle error. + } + // Prints "This object contains text." + +Objects also have attributes, which you can fetch with Attrs: + + objAttrs, err := obj.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("object %s has size %d and can be read using %s\n", + objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) + +ACLs + +Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of +ACLRules, each of which specifies the role of a user, group or project. ACLs +are suitable for fine-grained control, but you may prefer using IAM to control +access at the project level (see +https://cloud.google.com/storage/docs/access-control/iam). + +To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: + + acls, err := obj.ACL().List(ctx) + if err != nil { + // TODO: Handle error. + } + for _, rule := range acls { + fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) + } + +You can also set and delete ACLs. + +Conditions + +Every object has a generation and a metageneration. The generation changes +whenever the content changes, and the metageneration changes whenever the +metadata changes. Conditions let you check these values before an operation; +the operation only executes if the conditions match. You can use conditions to +prevent race conditions in read-modify-write operations. + +For example, say you've read an object's metadata into objAttrs. Now +you want to write to that object, but only if its contents haven't changed +since you read it. Here is how to express that: + + w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) + // Proceed with writing as above. + +Signed URLs + +You can obtain a URL that lets anyone read or write an object for a limited time. +You don't need to create a client to do this. See the documentation of +SignedURL for details. + + url, err := storage.SignedURL(bucketName, "shared-object", opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/example_test.go b/vendor/cloud.google.com/go/storage/example_test.go new file mode 100644 index 0000000..c5afe31 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/example_test.go @@ -0,0 +1,622 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage_test + +import ( + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "log" + "os" + "time" + + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +func ExampleNewClient() { + ctx := context.Background() + // Use Google Application Default Credentials to authorize and authenticate the client. + // More information about Application Default Credentials and how to enable is at + // https://developers.google.com/identity/protocols/application-default-credentials. + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Use the client. + + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: handle error. + } +} + +// This example shows how to create an unauthenticated client, which +// can be used to access public data. +func ExampleNewClient_unauthenticated() { + ctx := context.Background() + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + if err != nil { + // TODO: handle error. + } + // Use the client. + + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: handle error. + } +} + +func ExampleBucketHandle_Create() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + if err := client.Bucket("my-bucket").Create(ctx, "my-project", nil); err != nil { + // TODO: handle error. + } +} + +func ExampleBucketHandle_Delete() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + if err := client.Bucket("my-bucket").Delete(ctx); err != nil { + // TODO: handle error. + } +} + +func ExampleBucketHandle_Attrs() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + attrs, err := client.Bucket("my-bucket").Attrs(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(attrs) +} + +func ExampleBucketHandle_Update() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Enable versioning in the bucket, regardless of its previous value. + attrs, err := client.Bucket("my-bucket").Update(ctx, + storage.BucketAttrsToUpdate{VersioningEnabled: true}) + if err != nil { + // TODO: handle error. + } + fmt.Println(attrs) +} + +// If your update is based on the bucket's previous attributes, match the +// metageneration number to make sure the bucket hasn't changed since you read it. +func ExampleBucketHandle_Update_readModifyWrite() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + b := client.Bucket("my-bucket") + attrs, err := b.Attrs(ctx) + if err != nil { + // TODO: handle error. + } + var au storage.BucketAttrsToUpdate + au.SetLabel("lab", attrs.Labels["lab"]+"-more") + if attrs.Labels["delete-me"] == "yes" { + au.DeleteLabel("delete-me") + } + attrs, err = b. + If(storage.BucketConditions{MetagenerationMatch: attrs.MetaGeneration}). + Update(ctx, au) + if err != nil { + // TODO: handle error. + } + fmt.Println(attrs) +} + +func ExampleClient_Buckets() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Bucket("my-bucket") + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleBucketIterator_Next() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Buckets(ctx, "my-project") + for { + bucketAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(bucketAttrs) + } +} + +func ExampleBucketHandle_Objects() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Bucket("my-bucket").Objects(ctx, nil) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleBucketHandle_AddNotification() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + b := client.Bucket("my-bucket") + n, err := b.AddNotification(ctx, &storage.Notification{ + TopicProjectID: "my-project", + TopicID: "my-topic", + PayloadFormat: storage.JSONPayload, + }) + if err != nil { + // TODO: handle error. + } + fmt.Println(n.ID) +} + +func ExampleBucketHandle_Notifications() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + b := client.Bucket("my-bucket") + ns, err := b.Notifications(ctx) + if err != nil { + // TODO: handle error. + } + for id, n := range ns { + fmt.Printf("%s: %+v\n", id, n) + } +} + +var notificationID string + +func ExampleBucketHandle_DeleteNotification() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + b := client.Bucket("my-bucket") + // TODO: Obtain notificationID from BucketHandle.AddNotification + // or BucketHandle.Notifications. + err = b.DeleteNotification(ctx, notificationID) + if err != nil { + // TODO: handle error. + } +} + +func ExampleObjectIterator_Next() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Bucket("my-bucket").Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(objAttrs) + } +} + +func ExampleSignedURL() { + pkey, err := ioutil.ReadFile("my-private-key.pem") + if err != nil { + // TODO: handle error. + } + url, err := storage.SignedURL("my-bucket", "my-object", &storage.SignedURLOptions{ + GoogleAccessID: "xxx@developer.gserviceaccount.com", + PrivateKey: pkey, + Method: "GET", + Expires: time.Now().Add(48 * time.Hour), + }) + if err != nil { + // TODO: handle error. + } + fmt.Println(url) +} + +func ExampleObjectHandle_Attrs() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + objAttrs, err := client.Bucket("my-bucket").Object("my-object").Attrs(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(objAttrs) +} + +func ExampleObjectHandle_Attrs_withConditions() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + // Read the object. + objAttrs1, err := obj.Attrs(ctx) + if err != nil { + // TODO: handle error. + } + // Do something else for a while. + time.Sleep(5 * time.Minute) + // Now read the same contents, even if the object has been written since the last read. + objAttrs2, err := obj.Generation(objAttrs1.Generation).Attrs(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(objAttrs1, objAttrs2) +} + +func ExampleObjectHandle_Update() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Change only the content type of the object. + objAttrs, err := client.Bucket("my-bucket").Object("my-object").Update(ctx, storage.ObjectAttrsToUpdate{ + ContentType: "text/html", + ContentDisposition: "", // delete ContentDisposition + }) + if err != nil { + // TODO: handle error. + } + fmt.Println(objAttrs) +} + +func ExampleObjectHandle_NewReader() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + rc, err := client.Bucket("my-bucket").Object("my-object").NewReader(ctx) + if err != nil { + // TODO: handle error. + } + slurp, err := ioutil.ReadAll(rc) + rc.Close() + if err != nil { + // TODO: handle error. + } + fmt.Println("file contents:", slurp) +} + +func ExampleObjectHandle_NewRangeReader() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Read only the first 64K. + rc, err := client.Bucket("bucketname").Object("filename1").NewRangeReader(ctx, 0, 64*1024) + if err != nil { + // TODO: handle error. + } + slurp, err := ioutil.ReadAll(rc) + rc.Close() + if err != nil { + // TODO: handle error. + } + fmt.Println("first 64K of file contents:", slurp) +} + +func ExampleObjectHandle_NewWriter() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + _ = wc // TODO: Use the Writer. +} + +func ExampleWriter_Write() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + wc.ContentType = "text/plain" + wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}} + if _, err := wc.Write([]byte("hello world")); err != nil { + // TODO: handle error. + // Note that Write may return nil in some error situations, + // so always check the error from Close. + } + if err := wc.Close(); err != nil { + // TODO: handle error. + } + fmt.Println("updated object:", wc.Attrs()) +} + +// To make sure the data you write is uncorrupted, use an MD5 or CRC32c +// checksum. This example illustrates CRC32c. +func ExampleWriter_Write_checksum() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + data := []byte("verify me") + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + wc.CRC32C = crc32.Checksum(data, crc32.MakeTable(crc32.Castagnoli)) + wc.SendCRC32C = true + if _, err := wc.Write([]byte("hello world")); err != nil { + // TODO: handle error. + // Note that Write may return nil in some error situations, + // so always check the error from Close. + } + if err := wc.Close(); err != nil { + // TODO: handle error. + } + fmt.Println("updated object:", wc.Attrs()) +} + +func ExampleObjectHandle_Delete() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // To delete multiple objects in a bucket, list them with an + // ObjectIterator, then Delete them. + + // If you are using this package on the App Engine Flex runtime, + // you can init a bucket client with your app's default bucket name. + // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. + bucket := client.Bucket("my-bucket") + it := bucket.Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err != nil && err != iterator.Done { + // TODO: Handle error. + } + if err == iterator.Done { + break + } + if err := bucket.Object(objAttrs.Name).Delete(ctx); err != nil { + // TODO: Handle error. + } + } + fmt.Println("deleted all object items in the bucket specified.") +} + +func ExampleACLHandle_Delete() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // No longer grant access to the bucket to everyone on the Internet. + if err := client.Bucket("my-bucket").ACL().Delete(ctx, storage.AllUsers); err != nil { + // TODO: handle error. + } +} + +func ExampleACLHandle_Set() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Let any authenticated user read my-bucket/my-object. + obj := client.Bucket("my-bucket").Object("my-object") + if err := obj.ACL().Set(ctx, storage.AllAuthenticatedUsers, storage.RoleReader); err != nil { + // TODO: handle error. + } +} + +func ExampleACLHandle_List() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // List the default object ACLs for my-bucket. + aclRules, err := client.Bucket("my-bucket").DefaultObjectACL().List(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(aclRules) +} + +func ExampleCopier_Run() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + src := client.Bucket("bucketname").Object("file1") + dst := client.Bucket("another-bucketname").Object("file2") + + // Copy content and modify metadata. + copier := dst.CopierFrom(src) + copier.ContentType = "text/plain" + attrs, err := copier.Run(ctx) + if err != nil { + // TODO: Handle error, possibly resuming with copier.RewriteToken. + } + fmt.Println(attrs) + + // Just copy content. + attrs, err = dst.CopierFrom(src).Run(ctx) + if err != nil { + // TODO: Handle error. No way to resume. + } + fmt.Println(attrs) +} + +func ExampleCopier_Run_progress() { + // Display progress across multiple rewrite RPCs. + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + src := client.Bucket("bucketname").Object("file1") + dst := client.Bucket("another-bucketname").Object("file2") + + copier := dst.CopierFrom(src) + copier.ProgressFunc = func(copiedBytes, totalBytes uint64) { + log.Printf("copy %.1f%% done", float64(copiedBytes)/float64(totalBytes)*100) + } + if _, err := copier.Run(ctx); err != nil { + // TODO: handle error. + } +} + +var key1, key2 []byte + +func ExampleObjectHandle_CopierFrom_rotateEncryptionKeys() { + // To rotate the encryption key on an object, copy it onto itself. + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("bucketname").Object("obj") + // Assume obj is encrypted with key1, and we want to change to key2. + _, err = obj.Key(key2).CopierFrom(obj.Key(key1)).Run(ctx) + if err != nil { + // TODO: handle error. + } +} + +func ExampleComposer_Run() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + bkt := client.Bucket("bucketname") + src1 := bkt.Object("o1") + src2 := bkt.Object("o2") + dst := bkt.Object("o3") + // Compose and modify metadata. + c := dst.ComposerFrom(src1, src2) + c.ContentType = "text/plain" + attrs, err := c.Run(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(attrs) + // Just compose. + attrs, err = dst.ComposerFrom(src1, src2).Run(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(attrs) +} + +var gen int64 + +func ExampleObjectHandle_Generation() { + // Read an object's contents from generation gen, regardless of the + // current generation of the object. + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + rc, err := obj.Generation(gen).NewReader(ctx) + if err != nil { + // TODO: handle error. + } + defer rc.Close() + if _, err := io.Copy(os.Stdout, rc); err != nil { + // TODO: handle error. + } +} + +func ExampleObjectHandle_If() { + // Read from an object only if the current generation is gen. + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + rc, err := obj.If(storage.Conditions{GenerationMatch: gen}).NewReader(ctx) + if err != nil { + // TODO: handle error. + } + defer rc.Close() + if _, err := io.Copy(os.Stdout, rc); err != nil { + // TODO: handle error. + } +} + +var secretKey []byte + +func ExampleObjectHandle_Key() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + // Encrypt the object's contents. + w := obj.Key(secretKey).NewWriter(ctx) + if _, err := w.Write([]byte("top secret")); err != nil { + // TODO: handle error. + } + if err := w.Close(); err != nil { + // TODO: handle error. + } +} diff --git a/vendor/cloud.google.com/go/storage/go110.go b/vendor/cloud.google.com/go/storage/go110.go new file mode 100644 index 0000000..b85e8c3 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go110.go @@ -0,0 +1,30 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.10 + +package storage + +import "google.golang.org/api/googleapi" + +func shouldRetry(err error) bool { + switch e := err.(type) { + case *googleapi.Error: + // Retry on 429 and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/storage/go17.go b/vendor/cloud.google.com/go/storage/go17.go new file mode 100644 index 0000000..982db4e --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go17.go @@ -0,0 +1,26 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package storage + +import ( + "context" + "net/http" +) + +func withContext(r *http.Request, ctx context.Context) *http.Request { + return r.WithContext(ctx) +} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go new file mode 100644 index 0000000..9365509 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -0,0 +1,121 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "cloud.google.com/go/iam" + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// IAM provides access to IAM access control for the bucket. +func (b *BucketHandle) IAM() *iam.Handle { + return iam.InternalNewHandleClient(&iamClient{ + raw: b.c.raw, + userProject: b.userProject, + }, b.name) +} + +// iamClient implements the iam.client interface. +type iamClient struct { + raw *raw.Service + userProject string +} + +func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) { + call := c.raw.Buckets.GetIamPolicy(resource) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + var rp *raw.Policy + var err error + err = runWithRetry(ctx, func() error { + rp, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return iamFromStoragePolicy(rp), nil +} + +func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error { + rp := iamToStoragePolicy(p) + call := c.raw.Buckets.SetIamPolicy(resource, rp) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + return runWithRetry(ctx, func() error { + _, err := call.Context(ctx).Do() + return err + }) +} + +func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { + call := c.raw.Buckets.TestIamPermissions(resource, perms) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + var res *raw.TestIamPermissionsResponse + var err error + err = runWithRetry(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { + return &raw.Policy{ + Bindings: iamToStorageBindings(ip.Bindings), + Etag: string(ip.Etag), + } +} + +func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { + var rbs []*raw.PolicyBindings + for _, ib := range ibs { + rbs = append(rbs, &raw.PolicyBindings{ + Role: ib.Role, + Members: ib.Members, + }) + } + return rbs +} + +func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { + return &iampb.Policy{ + Bindings: iamFromStorageBindings(rp.Bindings), + Etag: []byte(rp.Etag), + } +} + +func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { + var ibs []*iampb.Binding + for _, rb := range rbs { + ibs = append(ibs, &iampb.Binding{ + Role: rb.Role, + Members: rb.Members, + }) + } + return ibs +} diff --git a/vendor/cloud.google.com/go/storage/integration_test.go b/vendor/cloud.google.com/go/storage/integration_test.go new file mode 100644 index 0000000..e584b1a --- /dev/null +++ b/vendor/cloud.google.com/go/storage/integration_test.go @@ -0,0 +1,1866 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "flag" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "log" + "math/rand" + "net/http" + "os" + "sort" + "strconv" + "strings" + "testing" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + itesting "google.golang.org/api/iterator/testing" + "google.golang.org/api/option" +) + +const testPrefix = "-go-test" + +// suffix is a timestamp-based suffix which is added to all buckets created by +// tests. This reduces flakiness when the tests are run in parallel and allows +// automatic cleaning up of artifacts left when tests fail. +var suffix = fmt.Sprintf("%s-%d", testPrefix, time.Now().UnixNano()) + +func TestMain(m *testing.M) { + integrationTest := initIntegrationTest() + exit := m.Run() + if integrationTest { + if err := cleanup(); err != nil { + // No need to be loud if cleanup() fails; we'll get + // any undeleted buckets next time. + log.Printf("Post-test cleanup failed: %v\n", err) + } + } + os.Exit(exit) +} + +// If integration tests will be run, create a unique bucket for them. +func initIntegrationTest() bool { + flag.Parse() // needed for testing.Short() + ctx := context.Background() + if testing.Short() { + return false + } + client, bucket := config(ctx) + if client == nil { + return false + } + defer client.Close() + if err := client.Bucket(bucket).Create(ctx, testutil.ProjID(), nil); err != nil { + log.Fatalf("creating bucket %q: %v", bucket, err) + } + return true +} + +// testConfig returns the Client used to access GCS and the default bucket +// name to use. testConfig skips the current test if credentials are not +// available or when being run in Short mode. +func testConfig(ctx context.Context, t *testing.T) (*Client, string) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + client, bucket := config(ctx) + if client == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + return client, bucket +} + +// config is like testConfig, but it doesn't need a *testing.T. +func config(ctx context.Context) (*Client, string) { + ts := testutil.TokenSource(ctx, ScopeFullControl) + if ts == nil { + return nil, "" + } + p := testutil.ProjID() + if p == "" { + log.Fatal("The project ID must be set. See CONTRIBUTING.md for details") + } + client, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("NewClient: %v", err) + } + return client, p + suffix +} + +func TestIntegration_BucketMethods(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + projectID := testutil.ProjID() + newBucket := bucket + "-new" + b := client.Bucket(newBucket) + // Test Create and Delete. + if err := b.Create(ctx, projectID, nil); err != nil { + t.Fatalf("Bucket(%v).Create(%v, %v) failed: %v", newBucket, projectID, nil, err) + } + attrs, err := b.Attrs(ctx) + if err != nil { + t.Error(err) + } else { + if got, want := attrs.MetaGeneration, int64(1); got != want { + t.Errorf("got metagen %d, want %d", got, want) + } + if got, want := attrs.StorageClass, "STANDARD"; got != want { + t.Errorf("got storage class %q, want %q", got, want) + } + if attrs.VersioningEnabled { + t.Error("got versioning enabled, wanted it disabled") + } + } + if err := client.Bucket(newBucket).Delete(ctx); err != nil { + t.Errorf("Bucket(%v).Delete failed: %v", newBucket, err) + } + + // Test Create and Delete with attributes. + labels := map[string]string{ + "l1": "v1", + "empty": "", + } + attrs = &BucketAttrs{ + StorageClass: "NEARLINE", + VersioningEnabled: true, + Labels: labels, + Lifecycle: Lifecycle{ + Rules: []LifecycleRule{{ + Action: LifecycleAction{ + Type: SetStorageClassAction, + StorageClass: "NEARLINE", + }, + Condition: LifecycleCondition{ + AgeInDays: 10, + Liveness: Archived, + CreatedBefore: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC), + MatchesStorageClasses: []string{"MULTI_REGIONAL", "STANDARD"}, + NumNewerVersions: 3, + }, + }, { + Action: LifecycleAction{ + Type: DeleteAction, + }, + Condition: LifecycleCondition{ + AgeInDays: 30, + Liveness: Live, + CreatedBefore: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC), + MatchesStorageClasses: []string{"NEARLINE"}, + NumNewerVersions: 10, + }, + }}, + }, + } + if err := client.Bucket(newBucket).Create(ctx, projectID, attrs); err != nil { + t.Fatalf("Bucket(%v).Create(%v, %+v) failed: %v", newBucket, projectID, attrs, err) + } + attrs, err = b.Attrs(ctx) + if err != nil { + t.Error(err) + } else { + if got, want := attrs.MetaGeneration, int64(1); got != want { + t.Errorf("got metagen %d, want %d", got, want) + } + if got, want := attrs.StorageClass, "NEARLINE"; got != want { + t.Errorf("got storage class %q, want %q", got, want) + } + if !attrs.VersioningEnabled { + t.Error("got versioning disabled, wanted it enabled") + } + if got, want := attrs.Labels, labels; !testutil.Equal(got, want) { + t.Errorf("labels: got %v, want %v", got, want) + } + } + if err := client.Bucket(newBucket).Delete(ctx); err != nil { + t.Errorf("Bucket(%v).Delete failed: %v", newBucket, err) + } +} + +func TestIntegration_BucketUpdate(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + b := client.Bucket(bucket) + attrs, err := b.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + if attrs.VersioningEnabled { + t.Fatal("bucket should not have versioning by default") + } + if len(attrs.Labels) > 0 { + t.Fatal("bucket should not have labels initially") + } + + // Using empty BucketAttrsToUpdate should be a no-nop. + attrs, err = b.Update(ctx, BucketAttrsToUpdate{}) + if err != nil { + t.Fatal(err) + } + if attrs.VersioningEnabled { + t.Fatal("should not have versioning") + } + if len(attrs.Labels) > 0 { + t.Fatal("should not have labels") + } + + // Turn on versioning, add some labels. + ua := BucketAttrsToUpdate{VersioningEnabled: true} + ua.SetLabel("l1", "v1") + ua.SetLabel("empty", "") + attrs, err = b.Update(ctx, ua) + if err != nil { + t.Fatal(err) + } + if !attrs.VersioningEnabled { + t.Fatal("should have versioning now") + } + wantLabels := map[string]string{ + "l1": "v1", + "empty": "", + } + if !testutil.Equal(attrs.Labels, wantLabels) { + t.Fatalf("got %v, want %v", attrs.Labels, wantLabels) + } + + // Turn off versioning again; add and remove some more labels. + ua = BucketAttrsToUpdate{VersioningEnabled: false} + ua.SetLabel("l1", "v2") // update + ua.SetLabel("new", "new") // create + ua.DeleteLabel("empty") // delete + ua.DeleteLabel("absent") // delete non-existent + attrs, err = b.Update(ctx, ua) + if err != nil { + t.Fatal(err) + } + if attrs.VersioningEnabled { + t.Fatal("should have versioning off") + } + wantLabels = map[string]string{ + "l1": "v2", + "new": "new", + } + if !testutil.Equal(attrs.Labels, wantLabels) { + t.Fatalf("got %v, want %v", attrs.Labels, wantLabels) + } +} + +func TestIntegration_ConditionalDelete(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + o := client.Bucket(bucket).Object("conddel") + + wc := o.NewWriter(ctx) + wc.ContentType = "text/plain" + if _, err := wc.Write([]byte("foo")); err != nil { + t.Fatal(err) + } + if err := wc.Close(); err != nil { + t.Fatal(err) + } + + gen := wc.Attrs().Generation + metaGen := wc.Attrs().Metageneration + + if err := o.Generation(gen - 1).Delete(ctx); err == nil { + t.Fatalf("Unexpected successful delete with Generation") + } + if err := o.If(Conditions{MetagenerationMatch: metaGen + 1}).Delete(ctx); err == nil { + t.Fatalf("Unexpected successful delete with IfMetaGenerationMatch") + } + if err := o.If(Conditions{MetagenerationNotMatch: metaGen}).Delete(ctx); err == nil { + t.Fatalf("Unexpected successful delete with IfMetaGenerationNotMatch") + } + if err := o.Generation(gen).Delete(ctx); err != nil { + t.Fatalf("final delete failed: %v", err) + } +} + +func TestIntegration_Objects(t *testing.T) { + // TODO(jba): Use subtests (Go 1.7). + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + + const defaultType = "text/plain" + + // Populate object names and make a map for their contents. + objects := []string{ + "obj1", + "obj2", + "obj/with/slashes", + } + contents := make(map[string][]byte) + + // Test Writer. + for _, obj := range objects { + c := randomContents() + if err := writeObject(ctx, bkt.Object(obj), defaultType, c); err != nil { + t.Errorf("Write for %v failed with %v", obj, err) + } + contents[obj] = c + } + + testObjectIterator(t, bkt, objects) + + // Test Reader. + for _, obj := range objects { + rc, err := bkt.Object(obj).NewReader(ctx) + if err != nil { + t.Errorf("Can't create a reader for %v, errored with %v", obj, err) + continue + } + if !rc.checkCRC { + t.Errorf("%v: not checking CRC", obj) + } + slurp, err := ioutil.ReadAll(rc) + if err != nil { + t.Errorf("Can't ReadAll object %v, errored with %v", obj, err) + } + if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { + t.Errorf("Contents (%q) = %q; want %q", obj, got, want) + } + if got, want := rc.Size(), len(contents[obj]); got != int64(want) { + t.Errorf("Size (%q) = %d; want %d", obj, got, want) + } + if got, want := rc.ContentType(), "text/plain"; got != want { + t.Errorf("ContentType (%q) = %q; want %q", obj, got, want) + } + if got, want := rc.CacheControl(), "public, max-age=60"; got != want { + t.Errorf("CacheControl (%q) = %q; want %q", obj, got, want) + } + rc.Close() + + // Check early close. + buf := make([]byte, 1) + rc, err = bkt.Object(obj).NewReader(ctx) + if err != nil { + t.Fatalf("%v: %v", obj, err) + } + _, err = rc.Read(buf) + if err != nil { + t.Fatalf("%v: %v", obj, err) + } + if got, want := buf, contents[obj][:1]; !bytes.Equal(got, want) { + t.Errorf("Contents[0] (%q) = %q; want %q", obj, got, want) + } + if err := rc.Close(); err != nil { + t.Errorf("%v Close: %v", obj, err) + } + } + + obj := objects[0] + objlen := int64(len(contents[obj])) + // Test Range Reader. + for i, r := range []struct { + offset, length, want int64 + }{ + {0, objlen, objlen}, + {0, objlen / 2, objlen / 2}, + {objlen / 2, objlen, objlen / 2}, + {0, 0, 0}, + {objlen / 2, 0, 0}, + {objlen / 2, -1, objlen / 2}, + {0, objlen * 2, objlen}, + } { + rc, err := bkt.Object(obj).NewRangeReader(ctx, r.offset, r.length) + if err != nil { + t.Errorf("%d: Can't create a range reader for %v, errored with %v", i, obj, err) + continue + } + if rc.Size() != objlen { + t.Errorf("%d: Reader has a content-size of %d, want %d", i, rc.Size(), objlen) + } + if rc.Remain() != r.want { + t.Errorf("%d: Reader's available bytes reported as %d, want %d", i, rc.Remain(), r.want) + } + slurp, err := ioutil.ReadAll(rc) + if err != nil { + t.Errorf("%d:Can't ReadAll object %v, errored with %v", i, obj, err) + continue + } + if len(slurp) != int(r.want) { + t.Errorf("%d:RangeReader (%d, %d): Read %d bytes, wanted %d bytes", i, r.offset, r.length, len(slurp), r.want) + continue + } + if got, want := slurp, contents[obj][r.offset:r.offset+r.want]; !bytes.Equal(got, want) { + t.Errorf("RangeReader (%d, %d) = %q; want %q", r.offset, r.length, got, want) + } + rc.Close() + } + + // Test content encoding + const zeroCount = 20 << 20 + w := bkt.Object("gzip-test").NewWriter(ctx) + w.ContentEncoding = "gzip" + gw := gzip.NewWriter(w) + if _, err := io.Copy(gw, io.LimitReader(zeros{}, zeroCount)); err != nil { + t.Fatalf("io.Copy, upload: %v", err) + } + if err := gw.Close(); err != nil { + t.Errorf("gzip.Close(): %v", err) + } + if err := w.Close(); err != nil { + t.Errorf("w.Close(): %v", err) + } + r, err := bkt.Object("gzip-test").NewReader(ctx) + if err != nil { + t.Fatalf("NewReader(gzip-test): %v", err) + } + n, err := io.Copy(ioutil.Discard, r) + if err != nil { + t.Errorf("io.Copy, download: %v", err) + } + if n != zeroCount { + t.Errorf("downloaded bad data: got %d bytes, want %d", n, zeroCount) + } + + // Test NotFound. + _, err = bkt.Object("obj-not-exists").NewReader(ctx) + if err != ErrObjectNotExist { + t.Errorf("Object should not exist, err found to be %v", err) + } + + objName := objects[0] + + // Test NewReader googleapi.Error. + // Since a 429 or 5xx is hard to cause, we trigger a 416. + realLen := len(contents[objName]) + _, err = bkt.Object(objName).NewRangeReader(ctx, int64(realLen*2), 10) + if err, ok := err.(*googleapi.Error); !ok { + t.Error("NewRangeReader did not return a googleapi.Error") + } else { + if err.Code != 416 { + t.Errorf("Code = %d; want %d", err.Code, 416) + } + if len(err.Header) == 0 { + t.Error("Missing googleapi.Error.Header") + } + if len(err.Body) == 0 { + t.Error("Missing googleapi.Error.Body") + } + } + + // Test StatObject. + o, err := bkt.Object(objName).Attrs(ctx) + if err != nil { + t.Error(err) + } + if got, want := o.Name, objName; got != want { + t.Errorf("Name (%v) = %q; want %q", objName, got, want) + } + if got, want := o.ContentType, defaultType; got != want { + t.Errorf("ContentType (%v) = %q; want %q", objName, got, want) + } + created := o.Created + // Check that the object is newer than its containing bucket. + bAttrs, err := bkt.Attrs(ctx) + if err != nil { + t.Error(err) + } + if o.Created.Before(bAttrs.Created) { + t.Errorf("Object %v is older than its containing bucket, %v", o, bAttrs) + } + + // Test object copy. + copyName := "copy-" + objName + copyObj, err := bkt.Object(copyName).CopierFrom(bkt.Object(objName)).Run(ctx) + if err != nil { + t.Errorf("Copier.Run failed with %v", err) + } else if !namesEqual(copyObj, bucket, copyName) { + t.Errorf("Copy object bucket, name: got %q.%q, want %q.%q", + copyObj.Bucket, copyObj.Name, bucket, copyName) + } + + // Copying with attributes. + const contentEncoding = "identity" + copier := bkt.Object(copyName).CopierFrom(bkt.Object(objName)) + copier.ContentEncoding = contentEncoding + copyObj, err = copier.Run(ctx) + if err != nil { + t.Errorf("Copier.Run failed with %v", err) + } else { + if !namesEqual(copyObj, bucket, copyName) { + t.Errorf("Copy object bucket, name: got %q.%q, want %q.%q", + copyObj.Bucket, copyObj.Name, bucket, copyName) + } + if copyObj.ContentEncoding != contentEncoding { + t.Errorf("Copy ContentEncoding: got %q, want %q", copyObj.ContentEncoding, contentEncoding) + } + } + + // Test UpdateAttrs. + metadata := map[string]string{"key": "value"} + updated, err := bkt.Object(objName).Update(ctx, ObjectAttrsToUpdate{ + ContentType: "text/html", + ContentLanguage: "en", + Metadata: metadata, + ACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, + }) + if err != nil { + t.Errorf("UpdateAttrs failed with %v", err) + } else { + if got, want := updated.ContentType, "text/html"; got != want { + t.Errorf("updated.ContentType == %q; want %q", got, want) + } + if got, want := updated.ContentLanguage, "en"; got != want { + t.Errorf("updated.ContentLanguage == %q; want %q", updated.ContentLanguage, want) + } + if got, want := updated.Metadata, metadata; !testutil.Equal(got, want) { + t.Errorf("updated.Metadata == %+v; want %+v", updated.Metadata, want) + } + if got, want := updated.Created, created; got != want { + t.Errorf("updated.Created == %q; want %q", got, want) + } + if !updated.Created.Before(updated.Updated) { + t.Errorf("updated.Updated should be newer than update.Created") + } + } + // Delete ContentType and ContentLanguage. + updated, err = bkt.Object(objName).Update(ctx, ObjectAttrsToUpdate{ + ContentType: "", + ContentLanguage: "", + Metadata: map[string]string{}, + }) + if err != nil { + t.Errorf("UpdateAttrs failed with %v", err) + } else { + if got, want := updated.ContentType, ""; got != want { + t.Errorf("updated.ContentType == %q; want %q", got, want) + } + if got, want := updated.ContentLanguage, ""; got != want { + t.Errorf("updated.ContentLanguage == %q; want %q", updated.ContentLanguage, want) + } + if updated.Metadata != nil { + t.Errorf("updated.Metadata == %+v; want nil", updated.Metadata) + } + if got, want := updated.Created, created; got != want { + t.Errorf("updated.Created == %q; want %q", got, want) + } + if !updated.Created.Before(updated.Updated) { + t.Errorf("updated.Updated should be newer than update.Created") + } + } + + // Test checksums. + checksumCases := []struct { + name string + contents [][]byte + size int64 + md5 string + crc32c uint32 + }{ + { + name: "checksum-object", + contents: [][]byte{[]byte("hello"), []byte("world")}, + size: 10, + md5: "fc5e038d38a57032085441e7fe7010b0", + crc32c: 1456190592, + }, + { + name: "zero-object", + contents: [][]byte{}, + size: 0, + md5: "d41d8cd98f00b204e9800998ecf8427e", + crc32c: 0, + }, + } + for _, c := range checksumCases { + wc := bkt.Object(c.name).NewWriter(ctx) + for _, data := range c.contents { + if _, err := wc.Write(data); err != nil { + t.Errorf("Write(%q) failed with %q", data, err) + } + } + if err = wc.Close(); err != nil { + t.Errorf("%q: close failed with %q", c.name, err) + } + obj := wc.Attrs() + if got, want := obj.Size, c.size; got != want { + t.Errorf("Object (%q) Size = %v; want %v", c.name, got, want) + } + if got, want := fmt.Sprintf("%x", obj.MD5), c.md5; got != want { + t.Errorf("Object (%q) MD5 = %q; want %q", c.name, got, want) + } + if got, want := obj.CRC32C, c.crc32c; got != want { + t.Errorf("Object (%q) CRC32C = %v; want %v", c.name, got, want) + } + } + + // Test public ACL. + publicObj := objects[0] + if err = bkt.Object(publicObj).ACL().Set(ctx, AllUsers, RoleReader); err != nil { + t.Errorf("PutACLEntry failed with %v", err) + } + publicClient, err := NewClient(ctx, option.WithHTTPClient(http.DefaultClient)) + if err != nil { + t.Fatal(err) + } + + slurp, err := readObject(ctx, publicClient.Bucket(bucket).Object(publicObj)) + if err != nil { + t.Errorf("readObject failed with %v", err) + } else if !bytes.Equal(slurp, contents[publicObj]) { + t.Errorf("Public object's content: got %q, want %q", slurp, contents[publicObj]) + } + + // Test writer error handling. + wc := publicClient.Bucket(bucket).Object(publicObj).NewWriter(ctx) + if _, err := wc.Write([]byte("hello")); err != nil { + t.Errorf("Write unexpectedly failed with %v", err) + } + if err = wc.Close(); err == nil { + t.Error("Close expected an error, found none") + } + + // Test deleting the copy object. + if err := bkt.Object(copyName).Delete(ctx); err != nil { + t.Errorf("Deletion of %v failed with %v", copyName, err) + } + // Deleting it a second time should return ErrObjectNotExist. + if err := bkt.Object(copyName).Delete(ctx); err != ErrObjectNotExist { + t.Errorf("second deletion of %v = %v; want ErrObjectNotExist", copyName, err) + } + _, err = bkt.Object(copyName).Attrs(ctx) + if err != ErrObjectNotExist { + t.Errorf("Copy is expected to be deleted, stat errored with %v", err) + } + + // Test object composition. + var compSrcs []*ObjectHandle + var wantContents []byte + for _, obj := range objects { + compSrcs = append(compSrcs, bkt.Object(obj)) + wantContents = append(wantContents, contents[obj]...) + } + checkCompose := func(obj *ObjectHandle, wantContentType string) { + rc, err := obj.NewReader(ctx) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + slurp, err = ioutil.ReadAll(rc) + if err != nil { + t.Fatalf("ioutil.ReadAll: %v", err) + } + defer rc.Close() + if !bytes.Equal(slurp, wantContents) { + t.Errorf("Composed object contents\ngot: %q\nwant: %q", slurp, wantContents) + } + if got := rc.ContentType(); got != wantContentType { + t.Errorf("Composed object content-type = %q, want %q", got, wantContentType) + } + } + + // Compose should work even if the user sets no destination attributes. + compDst := bkt.Object("composed1") + c := compDst.ComposerFrom(compSrcs...) + if _, err := c.Run(ctx); err != nil { + t.Fatalf("ComposeFrom error: %v", err) + } + checkCompose(compDst, "application/octet-stream") + + // It should also work if we do. + compDst = bkt.Object("composed2") + c = compDst.ComposerFrom(compSrcs...) + c.ContentType = "text/json" + if _, err := c.Run(ctx); err != nil { + t.Fatalf("ComposeFrom error: %v", err) + } + checkCompose(compDst, "text/json") +} + +func namesEqual(obj *ObjectAttrs, bucketName, objectName string) bool { + return obj.Bucket == bucketName && obj.Name == objectName +} + +func testObjectIterator(t *testing.T, bkt *BucketHandle, objects []string) { + ctx := context.Background() + // Collect the list of items we expect: ObjectAttrs in lexical order by name. + names := make([]string, len(objects)) + copy(names, objects) + sort.Strings(names) + var attrs []*ObjectAttrs + for _, name := range names { + attr, err := bkt.Object(name).Attrs(ctx) + if err != nil { + t.Errorf("Object(%q).Attrs: %v", name, err) + return + } + attrs = append(attrs, attr) + } + msg, ok := itesting.TestIterator(attrs, + func() interface{} { return bkt.Objects(ctx, &Query{Prefix: "obj"}) }, + func(it interface{}) (interface{}, error) { return it.(*ObjectIterator).Next() }) + if !ok { + t.Errorf("ObjectIterator.Next: %s", msg) + } + // TODO(jba): test query.Delimiter != "" +} + +func TestIntegration_SignedURL(t *testing.T) { + // To test SignedURL, we need a real user email and private key. Extract them + // from the JSON key file. + jwtConf, err := testutil.JWTConfig() + if err != nil { + t.Fatal(err) + } + if jwtConf == nil { + t.Skip("JSON key file is not present") + } + + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + obj := "signedURL" + contents := []byte("This is a test of SignedURL.\n") + md5 := "Jyxvgwm9n2MsrGTMPbMeYA==" // base64-encoded MD5 of contents + if err := writeObject(ctx, bkt.Object(obj), "text/plain", contents); err != nil { + t.Fatalf("writing: %v", err) + } + for _, test := range []struct { + desc string + opts SignedURLOptions + headers map[string][]string + fail bool + }{ + { + desc: "basic", + }, + { + desc: "MD5 sent and matches", + opts: SignedURLOptions{MD5: md5}, + headers: map[string][]string{"Content-MD5": {md5}}, + }, + { + desc: "MD5 not sent", + opts: SignedURLOptions{MD5: md5}, + fail: true, + }, + { + desc: "Content-Type sent and matches", + opts: SignedURLOptions{ContentType: "text/plain"}, + headers: map[string][]string{"Content-Type": {"text/plain"}}, + }, + { + desc: "Content-Type sent but does not match", + opts: SignedURLOptions{ContentType: "text/plain"}, + headers: map[string][]string{"Content-Type": {"application/json"}}, + fail: true, + }, + { + desc: "Canonical headers sent and match", + opts: SignedURLOptions{Headers: []string{ + " X-Goog-Foo: Bar baz ", + "X-Goog-Novalue", // ignored: no value + "X-Google-Foo", // ignored: wrong prefix + }}, + headers: map[string][]string{"X-Goog-foo": {"Bar baz "}}, + }, + { + desc: "Canonical headers sent but don't match", + opts: SignedURLOptions{Headers: []string{" X-Goog-Foo: Bar baz"}}, + headers: map[string][]string{"X-Goog-Foo": {"bar baz"}}, + fail: true, + }, + } { + opts := test.opts + opts.GoogleAccessID = jwtConf.Email + opts.PrivateKey = jwtConf.PrivateKey + opts.Method = "GET" + opts.Expires = time.Now().Add(time.Hour) + u, err := SignedURL(bucket, obj, &opts) + if err != nil { + t.Errorf("%s: SignedURL: %v", test.desc, err) + continue + } + got, err := getURL(u, test.headers) + if err != nil && !test.fail { + t.Errorf("%s: getURL %q: %v", test.desc, u, err) + } else if err == nil && !bytes.Equal(got, contents) { + t.Errorf("%s: got %q, want %q", test.desc, got, contents) + } + } +} + +// Make a GET request to a URL using an unauthenticated client, and return its contents. +func getURL(url string, headers map[string][]string) ([]byte, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + req.Header = headers + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + bytes, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + if res.StatusCode != 200 { + return nil, fmt.Errorf("code=%d, body=%s", res.StatusCode, string(bytes)) + } + return bytes, nil +} + +func TestIntegration_ACL(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + + entity := ACLEntity("domain-google.com") + rule := ACLRule{Entity: entity, Role: RoleReader} + if err := bkt.DefaultObjectACL().Set(ctx, entity, RoleReader); err != nil { + t.Errorf("Can't put default ACL rule for the bucket, errored with %v", err) + } + acl, err := bkt.DefaultObjectACL().List(ctx) + if err != nil { + t.Errorf("DefaultObjectACL.List for bucket %q: %v", bucket, err) + } else if !hasRule(acl, rule) { + t.Errorf("default ACL missing %#v", rule) + } + aclObjects := []string{"acl1", "acl2"} + for _, obj := range aclObjects { + c := randomContents() + if err := writeObject(ctx, bkt.Object(obj), "", c); err != nil { + t.Errorf("Write for %v failed with %v", obj, err) + } + } + name := aclObjects[0] + o := bkt.Object(name) + acl, err = o.ACL().List(ctx) + if err != nil { + t.Errorf("Can't retrieve ACL of %v", name) + } else if !hasRule(acl, rule) { + t.Errorf("object ACL missing %+v", rule) + } + if err := o.ACL().Delete(ctx, entity); err != nil { + t.Errorf("object ACL: could not delete entity %s", entity) + } + // Delete the default ACL rule. We can't move this code earlier in the + // test, because the test depends on the fact that the object ACL inherits + // it. + if err := bkt.DefaultObjectACL().Delete(ctx, entity); err != nil { + t.Errorf("default ACL: could not delete entity %s", entity) + } + + entity2 := ACLEntity("user-jbd@google.com") + rule2 := ACLRule{Entity: entity2, Role: RoleReader} + if err := bkt.ACL().Set(ctx, entity2, RoleReader); err != nil { + t.Errorf("Error while putting bucket ACL rule: %v", err) + } + bACL, err := bkt.ACL().List(ctx) + if err != nil { + t.Errorf("Error while getting the ACL of the bucket: %v", err) + } else if !hasRule(bACL, rule2) { + t.Errorf("bucket ACL missing %+v", rule2) + } + if err := bkt.ACL().Delete(ctx, entity2); err != nil { + t.Errorf("Error while deleting bucket ACL rule: %v", err) + } + +} + +func hasRule(acl []ACLRule, rule ACLRule) bool { + for _, r := range acl { + if r == rule { + return true + } + } + return false +} + +func TestIntegration_ValidObjectNames(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + + validNames := []string{ + "gopher", + "Гоферови", + "a", + strings.Repeat("a", 1024), + } + for _, name := range validNames { + if err := writeObject(ctx, bkt.Object(name), "", []byte("data")); err != nil { + t.Errorf("Object %q write failed: %v. Want success", name, err) + continue + } + defer bkt.Object(name).Delete(ctx) + } + + invalidNames := []string{ + "", // Too short. + strings.Repeat("a", 1025), // Too long. + "new\nlines", + "bad\xffunicode", + } + for _, name := range invalidNames { + // Invalid object names will either cause failure during Write or Close. + if err := writeObject(ctx, bkt.Object(name), "", []byte("data")); err != nil { + continue + } + defer bkt.Object(name).Delete(ctx) + t.Errorf("%q should have failed. Didn't", name) + } +} + +func TestIntegration_WriterContentType(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + obj := client.Bucket(bucket).Object("content") + testCases := []struct { + content string + setType, wantType string + }{ + { + content: "It was the best of times, it was the worst of times.", + wantType: "text/plain; charset=utf-8", + }, + { + content: "My first page", + wantType: "text/html; charset=utf-8", + }, + { + content: "My first page", + setType: "text/html", + wantType: "text/html", + }, + { + content: "My first page", + setType: "image/jpeg", + wantType: "image/jpeg", + }, + } + for i, tt := range testCases { + if err := writeObject(ctx, obj, tt.setType, []byte(tt.content)); err != nil { + t.Errorf("writing #%d: %v", i, err) + } + attrs, err := obj.Attrs(ctx) + if err != nil { + t.Errorf("obj.Attrs: %v", err) + continue + } + if got := attrs.ContentType; got != tt.wantType { + t.Errorf("Content-Type = %q; want %q\nContent: %q\nSet Content-Type: %q", got, tt.wantType, tt.content, tt.setType) + } + } +} + +func TestIntegration_ZeroSizedObject(t *testing.T) { + t.Parallel() + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + obj := client.Bucket(bucket).Object("zero") + + // Check writing it works as expected. + w := obj.NewWriter(ctx) + if err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + defer obj.Delete(ctx) + + // Check we can read it too. + body, err := readObject(ctx, obj) + if err != nil { + t.Fatalf("readObject: %v", err) + } + if len(body) != 0 { + t.Errorf("Body is %v, want empty []byte{}", body) + } +} + +func TestIntegration_Encryption(t *testing.T) { + // This function tests customer-supplied encryption keys for all operations + // involving objects. Bucket and ACL operations aren't tested because they + // aren't affected by customer encryption. Neither is deletion. + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + obj := client.Bucket(bucket).Object("customer-encryption") + key := []byte("my-secret-AES-256-encryption-key") + keyHash := sha256.Sum256(key) + keyHashB64 := base64.StdEncoding.EncodeToString(keyHash[:]) + key2 := []byte("My-Secret-AES-256-Encryption-Key") + contents := "top secret." + + checkMetadataCall := func(msg string, f func(o *ObjectHandle) (*ObjectAttrs, error)) { + // Performing a metadata operation without the key should succeed. + attrs, err := f(obj) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + // The key hash should match... + if got, want := attrs.CustomerKeySHA256, keyHashB64; got != want { + t.Errorf("%s: key hash: got %q, want %q", msg, got, want) + } + // ...but CRC and MD5 should not be present. + if attrs.CRC32C != 0 { + t.Errorf("%s: CRC: got %v, want 0", msg, attrs.CRC32C) + } + if len(attrs.MD5) > 0 { + t.Errorf("%s: MD5: got %v, want len == 0", msg, attrs.MD5) + } + + // Performing a metadata operation with the key should succeed. + attrs, err = f(obj.Key(key)) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + // Check the key and content hashes. + if got, want := attrs.CustomerKeySHA256, keyHashB64; got != want { + t.Errorf("%s: key hash: got %q, want %q", msg, got, want) + } + if attrs.CRC32C == 0 { + t.Errorf("%s: CRC: got 0, want non-zero", msg) + } + if len(attrs.MD5) == 0 { + t.Errorf("%s: MD5: got len == 0, want len > 0", msg) + } + } + + checkRead := func(msg string, o *ObjectHandle, k []byte, wantContents string) { + // Reading the object without the key should fail. + if _, err := readObject(ctx, o); err == nil { + t.Errorf("%s: reading without key: want error, got nil", msg) + } + // Reading the object with the key should succeed. + got, err := readObject(ctx, o.Key(k)) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + gotContents := string(got) + // And the contents should match what we wrote. + if gotContents != wantContents { + t.Errorf("%s: contents: got %q, want %q", msg, gotContents, wantContents) + } + } + + checkReadUnencrypted := func(msg string, obj *ObjectHandle, wantContents string) { + got, err := readObject(ctx, obj) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + gotContents := string(got) + if gotContents != wantContents { + t.Errorf("%s: got %q, want %q", msg, gotContents, wantContents) + } + } + + // Write to obj using our own encryption key, which is a valid 32-byte + // AES-256 key. + w := obj.Key(key).NewWriter(ctx) + w.Write([]byte(contents)) + if err := w.Close(); err != nil { + t.Fatal(err) + } + + checkMetadataCall("Attrs", func(o *ObjectHandle) (*ObjectAttrs, error) { + return o.Attrs(ctx) + }) + + checkMetadataCall("Update", func(o *ObjectHandle) (*ObjectAttrs, error) { + return o.Update(ctx, ObjectAttrsToUpdate{ContentLanguage: "en"}) + }) + + checkRead("first object", obj, key, contents) + + obj2 := client.Bucket(bucket).Object("customer-encryption-2") + // Copying an object without the key should fail. + if _, err := obj2.CopierFrom(obj).Run(ctx); err == nil { + t.Fatal("want error, got nil") + } + // Copying an object with the key should succeed. + if _, err := obj2.CopierFrom(obj.Key(key)).Run(ctx); err != nil { + t.Fatal(err) + } + // The destination object is not encrypted; we can read it without a key. + checkReadUnencrypted("copy dest", obj2, contents) + + // Providing a key on the destination but not the source should fail, + // since the source is encrypted. + if _, err := obj2.Key(key2).CopierFrom(obj).Run(ctx); err == nil { + t.Fatal("want error, got nil") + } + + // But copying with keys for both source and destination should succeed. + if _, err := obj2.Key(key2).CopierFrom(obj.Key(key)).Run(ctx); err != nil { + t.Fatal(err) + } + // And the destination should be encrypted, meaning we can only read it + // with a key. + checkRead("copy destination", obj2, key2, contents) + + // Change obj2's key to prepare for compose, where all objects must have + // the same key. Also illustrates key rotation: copy an object to itself + // with a different key. + if _, err := obj2.Key(key).CopierFrom(obj2.Key(key2)).Run(ctx); err != nil { + t.Fatal(err) + } + obj3 := client.Bucket(bucket).Object("customer-encryption-3") + // Composing without keys should fail. + if _, err := obj3.ComposerFrom(obj, obj2).Run(ctx); err == nil { + t.Fatal("want error, got nil") + } + // Keys on the source objects result in an error. + if _, err := obj3.ComposerFrom(obj.Key(key), obj2).Run(ctx); err == nil { + t.Fatal("want error, got nil") + } + // A key on the destination object both decrypts the source objects + // and encrypts the destination. + if _, err := obj3.Key(key).ComposerFrom(obj, obj2).Run(ctx); err != nil { + t.Fatalf("got %v, want nil", err) + } + // Check that the destination in encrypted. + checkRead("compose destination", obj3, key, contents+contents) + + // You can't compose one or more unencrypted source objects into an + // encrypted destination object. + _, err := obj2.CopierFrom(obj2.Key(key)).Run(ctx) // unencrypt obj2 + if err != nil { + t.Fatal(err) + } + if _, err := obj3.Key(key).ComposerFrom(obj2).Run(ctx); err == nil { + t.Fatal("got nil, want error") + } +} + +func TestIntegration_NonexistentBucket(t *testing.T) { + t.Parallel() + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket + "-nonexistent") + if _, err := bkt.Attrs(ctx); err != ErrBucketNotExist { + t.Errorf("Attrs: got %v, want ErrBucketNotExist", err) + } + it := bkt.Objects(ctx, nil) + if _, err := it.Next(); err != ErrBucketNotExist { + t.Errorf("Objects: got %v, want ErrBucketNotExist", err) + } +} + +func TestIntegration_PerObjectStorageClass(t *testing.T) { + const ( + defaultStorageClass = "STANDARD" + newStorageClass = "MULTI_REGIONAL" + ) + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + + // The bucket should have the default storage class. + battrs, err := bkt.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + if battrs.StorageClass != defaultStorageClass { + t.Fatalf("bucket storage class: got %q, want %q", + battrs.StorageClass, defaultStorageClass) + } + // Write an object; it should start with the bucket's storage class. + obj := bkt.Object("posc") + if err := writeObject(ctx, obj, "", []byte("foo")); err != nil { + t.Fatal(err) + } + oattrs, err := obj.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + if oattrs.StorageClass != defaultStorageClass { + t.Fatalf("object storage class: got %q, want %q", + oattrs.StorageClass, defaultStorageClass) + } + // Now use Copy to change the storage class. + copier := obj.CopierFrom(obj) + copier.StorageClass = newStorageClass + oattrs2, err := copier.Run(ctx) + if err != nil { + log.Fatal(err) + } + if oattrs2.StorageClass != newStorageClass { + t.Fatalf("new object storage class: got %q, want %q", + oattrs2.StorageClass, newStorageClass) + } + + // We can also write a new object using a non-default storage class. + obj2 := bkt.Object("posc2") + w := obj2.NewWriter(ctx) + w.StorageClass = newStorageClass + if _, err := w.Write([]byte("xxx")); err != nil { + t.Fatal(err) + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + if w.Attrs().StorageClass != newStorageClass { + t.Fatalf("new object storage class: got %q, want %q", + w.Attrs().StorageClass, newStorageClass) + } +} + +func TestIntegration_BucketInCopyAttrs(t *testing.T) { + // Confirm that if bucket is included in the object attributes of a rewrite + // call, but object name and content-type aren't, then we get an error. See + // the comment in Copier.Run. + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + obj := bkt.Object("bucketInCopyAttrs") + if err := writeObject(ctx, obj, "", []byte("foo")); err != nil { + t.Fatal(err) + } + copier := obj.CopierFrom(obj) + rawObject := copier.ObjectAttrs.toRawObject(bucket) + _, err := copier.callRewrite(ctx, rawObject) + if err == nil { + t.Errorf("got nil, want error") + } +} + +func TestIntegration_NoUnicodeNormalization(t *testing.T) { + t.Parallel() + ctx := context.Background() + client, _ := testConfig(ctx, t) + defer client.Close() + bkt := client.Bucket("storage-library-test-bucket") + + for _, tst := range []struct { + nameQuoted, content string + }{ + {`"Caf\u00e9"`, "Normalization Form C"}, + {`"Cafe\u0301"`, "Normalization Form D"}, + } { + name, err := strconv.Unquote(tst.nameQuoted) + if err != nil { + t.Fatalf("invalid name: %s: %v", tst.nameQuoted, err) + } + got, err := readObject(ctx, bkt.Object(name)) + if err != nil { + t.Fatal(err) + } + if g := string(got); g != tst.content { + t.Errorf("content of %s is %q, want %q", tst.nameQuoted, g, tst.content) + } + } +} + +func TestIntegration_HashesOnUpload(t *testing.T) { + // Check that the user can provide hashes on upload, and that these are checked. + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client, bucket := testConfig(ctx, t) + if client == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + defer client.Close() + obj := client.Bucket(bucket).Object("hashesOnUpload-1") + data := []byte("I can't wait to be verified") + + write := func(w *Writer) error { + if _, err := w.Write(data); err != nil { + w.Close() + return err + } + return w.Close() + } + + crc32c := crc32.Checksum(data, crc32cTable) + // The correct CRC should succeed. + w := obj.NewWriter(ctx) + w.CRC32C = crc32c + w.SendCRC32C = true + if err := write(w); err != nil { + t.Fatal(err) + } + + // If we change the CRC, validation should fail. + w = obj.NewWriter(ctx) + w.CRC32C = crc32c + 1 + w.SendCRC32C = true + if err := write(w); err == nil { + t.Fatal("write with bad CRC32c: want error, got nil") + } + + // If we have the wrong CRC but forget to send it, we succeed. + w = obj.NewWriter(ctx) + w.CRC32C = crc32c + 1 + if err := write(w); err != nil { + t.Fatal(err) + } + + // MD5 + md5 := md5.Sum(data) + // The correct MD5 should succeed. + w = obj.NewWriter(ctx) + w.MD5 = md5[:] + if err := write(w); err != nil { + t.Fatal(err) + } + + // If we change the MD5, validation should fail. + w = obj.NewWriter(ctx) + w.MD5 = append([]byte(nil), md5[:]...) + w.MD5[0]++ + if err := write(w); err == nil { + t.Fatal("write with bad MD5: want error, got nil") + } +} + +func TestIntegration_BucketIAM(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + + // This bucket is unique to this test run. So we don't have + // to worry about other runs interfering with our IAM policy + // changes. + + member := "projectViewer:" + testutil.ProjID() + role := iam.RoleName("roles/storage.objectViewer") + // Get the bucket's IAM policy. + policy, err := bkt.IAM().Policy(ctx) + if err != nil { + t.Fatalf("Getting policy: %v", err) + } + // The member should not have the role. + if policy.HasRole(member, role) { + t.Errorf("member %q has role %q", member, role) + } + // Change the policy. + policy.Add(member, role) + if err := bkt.IAM().SetPolicy(ctx, policy); err != nil { + t.Fatalf("SetPolicy: %v", err) + } + // Confirm that the binding was added. + policy, err = bkt.IAM().Policy(ctx) + if err != nil { + t.Fatalf("Getting policy: %v", err) + } + if !policy.HasRole(member, role) { + t.Errorf("member %q does not have role %q", member, role) + } + + // Check TestPermissions. + // This client should have all these permissions (and more). + perms := []string{"storage.buckets.get", "storage.buckets.delete"} + got, err := bkt.IAM().TestPermissions(ctx, perms) + if err != nil { + t.Fatalf("TestPermissions: %v", err) + } + sort.Strings(perms) + sort.Strings(got) + if !testutil.Equal(got, perms) { + t.Errorf("got %v, want %v", got, perms) + } +} + +func TestIntegration_RequesterPays(t *testing.T) { + // This test needs a second project and user (token source) to test + // all possibilities. Since we need these things for Firestore already, + // we use them here. + // + // There are up to three entities involved in a requester-pays call: + // + // 1. The user making the request. Here, we use + // a. The account used to create the token source used for all our + // integration tests (see testutil.TokenSource). + // b. The account used for the Firestore tests. + // 2. The project that owns the requester-pays bucket. Here, that + // is the test project ID (see testutil.ProjID). + // 3. The project provided as the userProject parameter of the request; + // the project to be billed. This test uses: + // a. The project that owns the requester-pays bucket (same as (2)) + // b. Another project (the Firestore project). + // + // The following must hold for this test to work: + // - (1a) must have resourcemanager.projects.createBillingAssignment permission + // (Owner role) on (2) (the project, not the bucket). + // - (1b) must NOT have that permission on (2). + // - (1b) must have serviceusage.services.use permission (Editor role) on (3b). + // - (1b) must NOT have that permission on (3a). + // - (1a) must NOT have that permission on (3b). + const wantErrorCode = 400 + + ctx := context.Background() + client, bucketName := testConfig(ctx, t) + defer client.Close() + bucketName += "-rp" + b := client.Bucket(bucketName) + projID := testutil.ProjID() + // Use Firestore project as a project that does not contain the bucket. + otherProjID := os.Getenv(envFirestoreProjID) + if otherProjID == "" { + t.Fatalf("need a second project (env var %s)", envFirestoreProjID) + } + ts := testutil.TokenSourceEnv(ctx, envFirestorePrivateKey, ScopeFullControl) + if ts == nil { + t.Fatalf("need a second account (env var %s)", envFirestorePrivateKey) + } + otherClient, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + defer otherClient.Close() + ob := otherClient.Bucket(bucketName) + user, err := keyFileEmail(os.Getenv("GCLOUD_TESTS_GOLANG_KEY")) + if err != nil { + t.Fatal(err) + } + otherUser, err := keyFileEmail(os.Getenv(envFirestorePrivateKey)) + if err != nil { + t.Fatal(err) + } + + // Create a requester-pays bucket. The bucket is contained in the project projID. + if err := b.Create(ctx, projID, &BucketAttrs{RequesterPays: true}); err != nil { + t.Fatal(err) + } + if err := b.ACL().Set(ctx, ACLEntity("user-"+otherUser), RoleOwner); err != nil { + t.Fatal(err) + } + + // Extract the error code from err if it's a googleapi.Error. + errCode := func(err error) int { + if err == nil { + return 0 + } + if err, ok := err.(*googleapi.Error); ok { + return err.Code + } + return -1 + } + + // Call f under various conditions. + // Here b and ob refer to the same bucket, but b is bound to client, + // while ob is bound to otherClient. The clients differ in their credentials, + // i.e. the identity of the user making the RPC: b's user is an Owner on the + // bucket's containing project, ob's is not. + call := func(msg string, f func(*BucketHandle) error) { + // user: an Owner on the containing project + // userProject: absent + // result: success, by the rule permitting access by owners of the containing bucket. + if err := f(b); err != nil { + t.Errorf("%s: %v, want nil\n"+ + "confirm that %s is an Owner on %s", + msg, err, user, projID) + } + // user: an Owner on the containing project + // userProject: containing project + // result: success, by the same rule as above; userProject is unnecessary but allowed. + if err := f(b.UserProject(projID)); err != nil { + t.Errorf("%s: got %v, want nil", msg, err) + } + // user: not an Owner on the containing project + // userProject: absent + // result: failure, by the standard requester-pays rule + err := f(ob) + if got, want := errCode(err), wantErrorCode; got != want { + t.Errorf("%s: got error %s, want code %d\n"+ + "confirm that %s is NOT an Owner on %s", + msg, err, want, otherUser, projID) + } + // user: not an Owner on the containing project + // userProject: not the containing one, but user has Editor role on it + // result: success, by the standard requester-pays rule + if err := f(ob.UserProject(otherProjID)); err != nil { + t.Errorf("%s: got %v, want nil\n"+ + "confirm that %s is an Editor on %s and that that project has billing enabled", + msg, err, otherUser, otherProjID) + } + // user: not an Owner on the containing project + // userProject: the containing one, on which the user does NOT have Editor permission. + // result: failure + err = f(ob.UserProject("veener-jba")) + if got, want := errCode(err), 403; got != want { + t.Errorf("%s: got error %s, want code %d\n"+ + "confirm that %s is NOT an Editor on %s", + msg, err, want, otherUser, "veener-jba") + } + } + + // Getting its attributes requires a user project. + var attrs *BucketAttrs + call("Bucket attrs", func(b *BucketHandle) error { + a, err := b.Attrs(ctx) + if a != nil { + attrs = a + } + return err + }) + if attrs != nil { + if got, want := attrs.RequesterPays, true; got != want { + t.Fatalf("attr.RequesterPays = %t, want %t", got, want) + } + } + // Object operations. + call("write object", func(b *BucketHandle) error { + return writeObject(ctx, b.Object("foo"), "text/plain", []byte("hello")) + }) + call("read object", func(b *BucketHandle) error { + _, err := readObject(ctx, b.Object("foo")) + return err + }) + call("object attrs", func(b *BucketHandle) error { + _, err := b.Object("foo").Attrs(ctx) + return err + }) + call("update object", func(b *BucketHandle) error { + _, err := b.Object("foo").Update(ctx, ObjectAttrsToUpdate{ContentLanguage: "en"}) + return err + }) + + // ACL operations. + entity := ACLEntity("domain-google.com") + call("bucket acl set", func(b *BucketHandle) error { + return b.ACL().Set(ctx, entity, RoleReader) + }) + call("bucket acl list", func(b *BucketHandle) error { + _, err := b.ACL().List(ctx) + return err + }) + call("bucket acl delete", func(b *BucketHandle) error { + err := b.ACL().Delete(ctx, entity) + if errCode(err) == 404 { + // Since we call the function multiple times, it will + // fail with NotFound for all but the first. + return nil + } + return err + }) + call("default object acl set", func(b *BucketHandle) error { + return b.DefaultObjectACL().Set(ctx, entity, RoleReader) + }) + call("default object acl list", func(b *BucketHandle) error { + _, err := b.DefaultObjectACL().List(ctx) + return err + }) + call("default object acl delete", func(b *BucketHandle) error { + err := b.DefaultObjectACL().Delete(ctx, entity) + if errCode(err) == 404 { + return nil + } + return err + }) + call("object acl set", func(b *BucketHandle) error { + return b.Object("foo").ACL().Set(ctx, entity, RoleReader) + }) + call("object acl list", func(b *BucketHandle) error { + _, err := b.Object("foo").ACL().List(ctx) + return err + }) + call("object acl delete", func(b *BucketHandle) error { + err := b.Object("foo").ACL().Delete(ctx, entity) + if errCode(err) == 404 { + return nil + } + return err + }) + + // Copy and compose. + call("copy", func(b *BucketHandle) error { + _, err := b.Object("copy").CopierFrom(b.Object("foo")).Run(ctx) + return err + }) + call("compose", func(b *BucketHandle) error { + _, err := b.Object("compose").ComposerFrom(b.Object("foo"), b.Object("copy")).Run(ctx) + return err + }) + + // Deletion. + call("delete object", func(b *BucketHandle) error { + err := b.Object("foo").Delete(ctx) + if err == ErrObjectNotExist { + return nil + } + return err + }) + for _, obj := range []string{"copy", "compose"} { + if err := b.UserProject(projID).Object(obj).Delete(ctx); err != nil { + t.Fatalf("could not delete %q: %v", obj, err) + } + } + if err := b.Delete(ctx); err != nil { + t.Fatalf("deleting bucket: %v", err) + } +} + +// TODO(jba): move to testutil, factor out from firestore/integration_test.go. +const ( + envFirestoreProjID = "GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID" + envFirestorePrivateKey = "GCLOUD_TESTS_GOLANG_FIRESTORE_KEY" +) + +func keyFileEmail(filename string) (string, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return "", err + } + var v struct { + ClientEmail string `json:"client_email"` + } + if err := json.Unmarshal(bytes, &v); err != nil { + return "", err + } + return v.ClientEmail, nil +} + +func TestNotifications(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + bkt := client.Bucket(bucket) + + checkNotifications := func(msg string, want map[string]*Notification) { + got, err := bkt.Notifications(ctx) + if err != nil { + t.Fatal(err) + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("%s: got=-, want=+:\n%s", msg, diff) + } + } + checkNotifications("initial", map[string]*Notification{}) + + nArg := &Notification{ + TopicProjectID: testutil.ProjID(), + TopicID: "go-storage-notification-test", + PayloadFormat: NoPayload, + } + n, err := bkt.AddNotification(ctx, nArg) + if err != nil { + t.Fatal(err) + } + nArg.ID = n.ID + if !testutil.Equal(n, nArg) { + t.Errorf("got %+v, want %+v", n, nArg) + } + checkNotifications("after add", map[string]*Notification{n.ID: n}) + + if err := bkt.DeleteNotification(ctx, n.ID); err != nil { + t.Fatal(err) + } + checkNotifications("after delete", map[string]*Notification{}) +} + +func TestIntegration_Public(t *testing.T) { + // Confirm that an unauthenticated client can access a public bucket. + + // See https://cloud.google.com/storage/docs/public-datasets/landsat + const landsatBucket = "gcp-public-data-landsat" + const landsatPrefix = "LC08/PRE/044/034/LC80440342016259LGN00/" + const landsatObject = landsatPrefix + "LC80440342016259LGN00_MTL.txt" + + // Create an unauthenticated client. + ctx := context.Background() + client, err := NewClient(ctx, option.WithoutAuthentication()) + if err != nil { + t.Fatal(err) + } + defer client.Close() + bkt := client.Bucket(landsatBucket) + obj := bkt.Object(landsatObject) + + // Read a public object. + bytes, err := readObject(ctx, obj) + if err != nil { + t.Fatal(err) + } + if got, want := len(bytes), 7903; got != want { + t.Errorf("len(bytes) = %d, want %d", got, want) + } + + // List objects in a public bucket. + iter := bkt.Objects(ctx, &Query{Prefix: landsatPrefix}) + gotCount := 0 + for { + _, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + gotCount++ + } + if wantCount := 13; gotCount != wantCount { + t.Errorf("object count: got %d, want %d", gotCount, wantCount) + } + + errCode := func(err error) int { + if err, ok := err.(*googleapi.Error); !ok { + return -1 + } else { + return err.Code + } + } + + // Reading from or writing to a non-public bucket fails. + c, bucketName := testConfig(ctx, t) + defer c.Close() + nonPublicObj := client.Bucket(bucketName).Object("noauth") + // Oddly, reading returns 403 but writing returns 401. + _, err = readObject(ctx, nonPublicObj) + if got, want := errCode(err), 403; got != want { + t.Errorf("got code %d; want %d\nerror: %v", got, want, err) + } + err = writeObject(ctx, nonPublicObj, "text/plain", []byte("b")) + if got, want := errCode(err), 401; got != want { + t.Errorf("got code %d; want %d\nerror: %v", got, want, err) + } +} + +func writeObject(ctx context.Context, obj *ObjectHandle, contentType string, contents []byte) error { + w := obj.NewWriter(ctx) + w.ContentType = contentType + w.CacheControl = "public, max-age=60" + if contents != nil { + if _, err := w.Write(contents); err != nil { + _ = w.Close() + return err + } + } + return w.Close() +} + +func readObject(ctx context.Context, obj *ObjectHandle) ([]byte, error) { + r, err := obj.NewReader(ctx) + if err != nil { + return nil, err + } + defer r.Close() + return ioutil.ReadAll(r) +} + +// cleanup deletes the bucket used for testing, as well as old +// testing buckets that weren't cleaned previously. +func cleanup() error { + if testing.Short() { + return nil // Don't clean up in short mode. + } + ctx := context.Background() + client, bucket := config(ctx) + if client == nil { + return nil // Don't cleanup if we're not configured correctly. + } + defer client.Close() + if err := killBucket(ctx, client, bucket); err != nil { + return err + } + + // Delete buckets whose name begins with our test prefix, and which were + // created a while ago. (Unfortunately GCS doesn't provide last-modified + // time, which would be a better way to check for staleness.) + const expireAge = 24 * time.Hour + projectID := testutil.ProjID() + it := client.Buckets(ctx, projectID) + it.Prefix = projectID + testPrefix + for { + bktAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return err + } + if time.Since(bktAttrs.Created) > expireAge { + log.Printf("deleting bucket %q, which is more than %s old", bktAttrs.Name, expireAge) + if err := killBucket(ctx, client, bktAttrs.Name); err != nil { + return err + } + } + } + return nil +} + +// killBucket deletes a bucket and all its objects. +func killBucket(ctx context.Context, client *Client, bucketName string) error { + bkt := client.Bucket(bucketName) + // Bucket must be empty to delete. + it := bkt.Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return err + } + if err := bkt.Object(objAttrs.Name).Delete(ctx); err != nil { + return fmt.Errorf("deleting %q: %v", bucketName+"/"+objAttrs.Name, err) + } + } + // GCS is eventually consistent, so this delete may fail because the + // replica still sees an object in the bucket. We log the error and expect + // a later test run to delete the bucket. + if err := bkt.Delete(ctx); err != nil { + log.Printf("deleting %q: %v", bucketName, err) + } + return nil +} + +func randomContents() []byte { + h := md5.New() + io.WriteString(h, fmt.Sprintf("hello world%d", rand.Intn(100000))) + return h.Sum(nil) +} + +type zeros struct{} + +func (zeros) Read(p []byte) (int, error) { return len(p), nil } diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go new file mode 100644 index 0000000..46423a8 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -0,0 +1,36 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "cloud.google.com/go/internal" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" +) + +// runWithRetry calls the function until it returns nil or a non-retryable error, or +// the context is done. +func runWithRetry(ctx context.Context, call func() error) error { + return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + err = call() + if err == nil { + return true, nil + } + if shouldRetry(err) { + return false, nil + } + return true, err + }) +} diff --git a/vendor/cloud.google.com/go/storage/invoke_test.go b/vendor/cloud.google.com/go/storage/invoke_test.go new file mode 100644 index 0000000..f7a5807 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/invoke_test.go @@ -0,0 +1,56 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "testing" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" +) + +func TestInvoke(t *testing.T) { + t.Parallel() + ctx := context.Background() + // Time-based tests are flaky. We just make sure that invoke eventually + // returns with the right error. + + for _, test := range []struct { + count int // number of times to return retryable error + retryCode int // error code for retryable error + err error // error to return after count returns of retryCode + }{ + {0, 0, nil}, + {0, 0, errors.New("foo")}, + {1, 429, nil}, + {1, 429, errors.New("bar")}, + {2, 518, nil}, + {2, 599, &googleapi.Error{Code: 428}}, + } { + counter := 0 + call := func() error { + counter++ + if counter <= test.count { + return &googleapi.Error{Code: test.retryCode} + } + return test.err + } + got := runWithRetry(ctx, call) + if got != test.err { + t.Errorf("%v: got %v, want %v", test, got, test.err) + } + } +} diff --git a/vendor/cloud.google.com/go/storage/not_go110.go b/vendor/cloud.google.com/go/storage/not_go110.go new file mode 100644 index 0000000..c354e74 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/not_go110.go @@ -0,0 +1,40 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.10 + +package storage + +import ( + "net/url" + "strings" + + "google.golang.org/api/googleapi" +) + +func shouldRetry(err error) bool { + switch e := err.(type) { + case *googleapi.Error: + // Retry on 429 and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case *url.Error: + // Retry on REFUSED_STREAM. + // Unfortunately the error type is unexported, so we resort to string + // matching. + return strings.Contains(e.Error(), "REFUSED_STREAM") + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/storage/not_go17.go b/vendor/cloud.google.com/go/storage/not_go17.go new file mode 100644 index 0000000..1f6f7ae --- /dev/null +++ b/vendor/cloud.google.com/go/storage/not_go17.go @@ -0,0 +1,26 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.7 + +package storage + +import ( + "net/http" +) + +func withContext(r *http.Request, _ interface{}) *http.Request { + // In Go 1.6 and below, ignore the context. + return r +} diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go new file mode 100644 index 0000000..b95dd45 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -0,0 +1,179 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "fmt" + "regexp" + + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// A Notification describes how to send Cloud PubSub messages when certain +// events occur in a bucket. +type Notification struct { + //The ID of the notification. + ID string + + // The ID of the topic to which this subscription publishes. + TopicID string + + // The ID of the project to which the topic belongs. + TopicProjectID string + + // Only send notifications about listed event types. If empty, send notifications + // for all event types. + // See https://cloud.google.com/storage/docs/pubsub-notifications#events. + EventTypes []string + + // If present, only apply this notification configuration to object names that + // begin with this prefix. + ObjectNamePrefix string + + // An optional list of additional attributes to attach to each Cloud PubSub + // message published for this notification subscription. + CustomAttributes map[string]string + + // The contents of the message payload. + // See https://cloud.google.com/storage/docs/pubsub-notifications#payload. + PayloadFormat string +} + +// Values for Notification.PayloadFormat. +const ( + // Send no payload with notification messages. + NoPayload = "NONE" + + // Send object metadata as JSON with notification messages. + JSONPayload = "JSON_API_V1" +) + +// Values for Notification.EventTypes. +const ( + // Event that occurs when an object is successfully created. + ObjectFinalizeEvent = "OBJECT_FINALIZE" + + // Event that occurs when the metadata of an existing object changes. + ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE" + + // Event that occurs when an object is permanently deleted. + ObjectDeleteEvent = "OBJECT_DELETE" + + // Event that occurs when the live version of an object becomes an + // archived version. + ObjectArchiveEvent = "OBJECT_ARCHIVE" +) + +func toNotification(rn *raw.Notification) *Notification { + n := &Notification{ + ID: rn.Id, + EventTypes: rn.EventTypes, + ObjectNamePrefix: rn.ObjectNamePrefix, + CustomAttributes: rn.CustomAttributes, + PayloadFormat: rn.PayloadFormat, + } + n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic) + return n +} + +var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") + +// parseNotificationTopic extracts the project and topic IDs from from the full +// resource name returned by the service. If the name is malformed, it returns +// "?" for both IDs. +func parseNotificationTopic(nt string) (projectID, topicID string) { + matches := topicRE.FindStringSubmatch(nt) + if matches == nil { + return "?", "?" + } + return matches[1], matches[2] +} + +func toRawNotification(n *Notification) *raw.Notification { + return &raw.Notification{ + Id: n.ID, + Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", + n.TopicProjectID, n.TopicID), + EventTypes: n.EventTypes, + ObjectNamePrefix: n.ObjectNamePrefix, + CustomAttributes: n.CustomAttributes, + PayloadFormat: string(n.PayloadFormat), + } +} + +// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID +// and PayloadFormat, and must not set its ID. The other fields are all optional. The +// returned Notification's ID can be used to refer to it. +func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (*Notification, error) { + if n.ID != "" { + return nil, errors.New("storage: AddNotification: ID must not be set") + } + if n.TopicProjectID == "" { + return nil, errors.New("storage: AddNotification: missing TopicProjectID") + } + if n.TopicID == "" { + return nil, errors.New("storage: AddNotification: missing TopicID") + } + call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n)) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + rn, err := call.Context(ctx).Do() + if err != nil { + return nil, err + } + return toNotification(rn), nil +} + +// Notifications returns all the Notifications configured for this bucket, as a map +// indexed by notification ID. +func (b *BucketHandle) Notifications(ctx context.Context) (map[string]*Notification, error) { + call := b.c.raw.Notifications.List(b.name) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + var res *raw.Notifications + var err error + err = runWithRetry(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return notificationsToMap(res.Items), nil +} + +func notificationsToMap(rns []*raw.Notification) map[string]*Notification { + m := map[string]*Notification{} + for _, rn := range rns { + m[rn.Id] = toNotification(rn) + } + return m +} + +// DeleteNotification deletes the notification with the given ID. +func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) error { + call := b.c.raw.Notifications.Delete(b.name, id) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + return call.Context(ctx).Do() +} diff --git a/vendor/cloud.google.com/go/storage/notifications_test.go b/vendor/cloud.google.com/go/storage/notifications_test.go new file mode 100644 index 0000000..d4dcf6b --- /dev/null +++ b/vendor/cloud.google.com/go/storage/notifications_test.go @@ -0,0 +1,97 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "testing" + + "cloud.google.com/go/internal/testutil" + raw "google.golang.org/api/storage/v1" +) + +func TestParseNotificationTopic(t *testing.T) { + for _, test := range []struct { + in string + wantProjectID string + wantTopicID string + }{ + {"", "?", "?"}, + {"foobar", "?", "?"}, + {"//pubsub.googleapis.com/projects/foo", "?", "?"}, + {"//pubsub.googleapis.com/projects/my-project/topics/my-topic", + "my-project", "my-topic"}, + } { + gotProjectID, gotTopicID := parseNotificationTopic(test.in) + if gotProjectID != test.wantProjectID || gotTopicID != test.wantTopicID { + t.Errorf("%q: got (%q, %q), want (%q, %q)", + test.in, gotProjectID, gotTopicID, test.wantProjectID, test.wantTopicID) + } + } + +} + +func TestConvertNotification(t *testing.T) { + want := &Notification{ + ID: "id", + TopicProjectID: "my-project", + TopicID: "my-topic", + EventTypes: []string{ObjectFinalizeEvent}, + ObjectNamePrefix: "prefix", + CustomAttributes: map[string]string{"a": "b"}, + PayloadFormat: JSONPayload, + } + got := toNotification(toRawNotification(want)) + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } +} + +func TestNotificationsToMap(t *testing.T) { + got := notificationsToMap(nil) + want := map[string]*Notification{} + if !testutil.Equal(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + in := []*raw.Notification{ + {Id: "a", Topic: "//pubsub.googleapis.com/projects/P1/topics/T1"}, + {Id: "b", Topic: "//pubsub.googleapis.com/projects/P2/topics/T2"}, + {Id: "c", Topic: "//pubsub.googleapis.com/projects/P3/topics/T3"}, + } + got = notificationsToMap(in) + want = map[string]*Notification{ + "a": &Notification{ID: "a", TopicProjectID: "P1", TopicID: "T1"}, + "b": &Notification{ID: "b", TopicProjectID: "P2", TopicID: "T2"}, + "c": &Notification{ID: "c", TopicProjectID: "P3", TopicID: "T3"}, + } + if diff := testutil.Diff(got, want); diff != "" { + t.Errorf("got=-, want=+:\n%s", diff) + } +} + +func TestAddNotificationsErrors(t *testing.T) { + c := &Client{} + b := c.Bucket("b") + for _, n := range []*Notification{ + {ID: "foo", TopicProjectID: "p", TopicID: "t"}, // has ID + {TopicProjectID: "p"}, // missing TopicID + {TopicID: "t"}, // missing TopicProjectID + } { + _, err := b.AddNotification(nil, n) + if err == nil { + t.Errorf("%+v: got nil, want error", n) + } + } +} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go new file mode 100644 index 0000000..ef964f0 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -0,0 +1,86 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "hash/crc32" + "io" +) + +var crc32cTable = crc32.MakeTable(crc32.Castagnoli) + +// Reader reads a Cloud Storage object. +// It implements io.Reader. +type Reader struct { + body io.ReadCloser + remain, size int64 + contentType string + contentEncoding string + cacheControl string + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc +} + +// Close closes the Reader. It must be called when done reading. +func (r *Reader) Close() error { + return r.body.Close() +} + +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.body.Read(p) + if r.remain != -1 { + r.remain -= int64(n) + } + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) + // Check CRC here. It would be natural to check it in Close, but + // everybody defers Close on the assumption that it doesn't return + // anything worth looking at. + if r.remain == 0 && r.gotCRC != r.wantCRC { + return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", + r.gotCRC, r.wantCRC) + } + } + return n, err +} + +// Size returns the size of the object in bytes. +// The returned value is always the same and is not affected by +// calls to Read or Close. +func (r *Reader) Size() int64 { + return r.size +} + +// Remain returns the number of bytes left to read, or -1 if unknown. +func (r *Reader) Remain() int64 { + return r.remain +} + +// ContentType returns the content type of the object. +func (r *Reader) ContentType() string { + return r.contentType +} + +// ContentEncoding returns the content encoding of the object. +func (r *Reader) ContentEncoding() string { + return r.contentEncoding +} + +// CacheControl returns the cache control of the object. +func (r *Reader) CacheControl() string { + return r.cacheControl +} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go new file mode 100644 index 0000000..0723f07 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -0,0 +1,1201 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/version" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +var ( + ErrBucketNotExist = errors.New("storage: bucket doesn't exist") + ErrObjectNotExist = errors.New("storage: object doesn't exist") +) + +const userAgent = "gcloud-golang-storage/20151204" + +const ( + // ScopeFullControl grants permissions to manage your + // data and permissions in Google Cloud Storage. + ScopeFullControl = raw.DevstorageFullControlScope + + // ScopeReadOnly grants permissions to + // view your data in Google Cloud Storage. + ScopeReadOnly = raw.DevstorageReadOnlyScope + + // ScopeReadWrite grants permissions to manage your + // data in Google Cloud Storage. + ScopeReadWrite = raw.DevstorageReadWriteScope +) + +var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) + +func setClientHeader(headers http.Header) { + headers.Set("x-goog-api-client", xGoogHeader) +} + +// Client is a client for interacting with Google Cloud Storage. +// +// Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +type Client struct { + hc *http.Client + raw *raw.Service +} + +// NewClient creates a new Google Cloud Storage client. +// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(ScopeFullControl), + option.WithUserAgent(userAgent), + } + opts = append(o, opts...) + hc, ep, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + rawService, err := raw.New(hc) + if err != nil { + return nil, fmt.Errorf("storage client: %v", err) + } + if ep != "" { + rawService.BasePath = ep + } + return &Client{ + hc: hc, + raw: rawService, + }, nil +} + +// Close closes the Client. +// +// Close need not be called at program exit. +func (c *Client) Close() error { + // Set fields to nil so that subsequent uses + // will panic. + c.hc = nil + c.raw = nil + return nil +} + +// SignedURLOptions allows you to restrict the access to the signed URL. +type SignedURLOptions struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. + // If your application is running on Google App Engine, you can use appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func([]byte) ([]byte, error) + + // Method is the HTTP method to be used with the signed URL. + // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. + // Required. + Method string + + // Expires is the expiration time on the signed URL. It must be + // a datetime in the future. + // Required. + Expires time.Time + + // ContentType is the content type header the client must provide + // to use the generated signed URL. + // Optional. + ContentType string + + // Headers is a list of extension headers the client must provide + // in order to use the generated signed URL. + // Optional. + Headers []string + + // MD5 is the base64 encoded MD5 checksum of the file. + // If provided, the client should provide the exact value on the request + // header in order to use the signed URL. + // Optional. + MD5 string +} + +var ( + canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`) + excludedCanonicalHeaders = map[string]bool{ + "x-goog-encryption-key": true, + "x-goog-encryption-key-sha256": true, + } +) + +// sanitizeHeaders applies the specifications for canonical extension headers at +// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +func sanitizeHeaders(hdrs []string) []string { + headerMap := map[string][]string{} + for _, hdr := range hdrs { + // No leading or trailing whitespaces. + sanitizedHeader := strings.TrimSpace(hdr) + + // Only keep canonical headers, discard any others. + headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader) + if len(headerMatches) == 0 { + continue + } + + header := strings.ToLower(strings.TrimSpace(headerMatches[1])) + if excludedCanonicalHeaders[headerMatches[1]] { + // Do not keep any deliberately excluded canonical headers when signing. + continue + } + value := strings.TrimSpace(headerMatches[2]) + if len(value) > 0 { + // Remove duplicate headers by appending the values of duplicates + // in their order of appearance. + headerMap[header] = append(headerMap[header], value) + } + } + + var sanitizedHeaders []string + for header, values := range headerMap { + // There should be no spaces around the colon separating the + // header name from the header value or around the values + // themselves. The values should be separated by commas. + // NOTE: The semantics for headers without a value are not clear. + // However from specifications these should be edge-cases + // anyway and we should assume that there will be no + // canonical headers using empty values. Any such headers + // are discarded at the regexp stage above. + sanitizedHeaders = append( + sanitizedHeaders, + fmt.Sprintf("%s:%s", header, strings.Join(values, ",")), + ) + } + sort.Strings(sanitizedHeaders) + return sanitizedHeaders +} + +// SignedURL returns a URL for the specified object. Signed URLs allow +// the users access to a restricted resource for a limited time without having a +// Google account or signing in. For more information about the signed +// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. +func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { + if opts == nil { + return "", errors.New("storage: missing required SignedURLOptions") + } + if opts.GoogleAccessID == "" { + return "", errors.New("storage: missing required GoogleAccessID") + } + if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { + return "", errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + if opts.Method == "" { + return "", errors.New("storage: missing required method option") + } + if opts.Expires.IsZero() { + return "", errors.New("storage: missing required expires option") + } + if opts.MD5 != "" { + md5, err := base64.StdEncoding.DecodeString(opts.MD5) + if err != nil || len(md5) != 16 { + return "", errors.New("storage: invalid MD5 checksum") + } + } + opts.Headers = sanitizeHeaders(opts.Headers) + + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } + + u := &url.URL{ + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + fmt.Fprintf(buf, "%s\n", opts.MD5) + fmt.Fprintf(buf, "%s\n", opts.ContentType) + fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) + if len(opts.Headers) > 0 { + fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n")) + } + fmt.Fprintf(buf, "%s", u.String()) + + b, err := signBytes(buf.Bytes()) + if err != nil { + return "", err + } + encoded := base64.StdEncoding.EncodeToString(b) + u.Scheme = "https" + u.Host = "storage.googleapis.com" + q := u.Query() + q.Set("GoogleAccessId", opts.GoogleAccessID) + q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) + q.Set("Signature", string(encoded)) + u.RawQuery = q.Encode() + return u.String(), nil +} + +// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. +// Use BucketHandle.Object to get a handle. +type ObjectHandle struct { + c *Client + bucket string + object string + acl ACLHandle + gen int64 // a negative value indicates latest + conds *Conditions + encryptionKey []byte // AES-256 key + userProject string // for requester-pays buckets + readCompressed bool // Accept-Encoding: gzip +} + +// ACL provides access to the object's access control list. +// This controls who can read and write this object. +// This call does not perform any network operations. +func (o *ObjectHandle) ACL() *ACLHandle { + return &o.acl +} + +// Generation returns a new ObjectHandle that operates on a specific generation +// of the object. +// By default, the handle operates on the latest generation. Not +// all operations work when given a specific generation; check the API +// endpoints at https://cloud.google.com/storage/docs/json_api/ for details. +func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { + o2 := *o + o2.gen = gen + return &o2 +} + +// If returns a new ObjectHandle that applies a set of preconditions. +// Preconditions already set on the ObjectHandle are ignored. +// Operations on the new handle will only occur if the preconditions are +// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions +// for more details. +func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { + o2 := *o + o2.conds = &conds + return &o2 +} + +// Key returns a new ObjectHandle that uses the supplied encryption +// key to encrypt and decrypt the object's contents. +// +// Encryption key must be a 32-byte AES-256 key. +// See https://cloud.google.com/storage/docs/encryption for details. +func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { + o2 := *o + o2.encryptionKey = encryptionKey + return &o2 +} + +// Attrs returns meta information about the object. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) { + if err := o.validate(); err != nil { + return nil, err + } + call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) + if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { + return nil, err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// Update updates an object with the provided attributes. +// All zero-value attributes are ignored. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) { + if err := o.validate(); err != nil { + return nil, err + } + var attrs ObjectAttrs + // Lists of fields to send, and set to null, in the JSON. + var forceSendFields, nullFields []string + if uattrs.ContentType != nil { + attrs.ContentType = optional.ToString(uattrs.ContentType) + // For ContentType, sending the empty string is a no-op. + // Instead we send a null. + if attrs.ContentType == "" { + nullFields = append(nullFields, "ContentType") + } else { + forceSendFields = append(forceSendFields, "ContentType") + } + } + if uattrs.ContentLanguage != nil { + attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + // For ContentLanguage it's an error to send the empty string. + // Instead we send a null. + if attrs.ContentLanguage == "" { + nullFields = append(nullFields, "ContentLanguage") + } else { + forceSendFields = append(forceSendFields, "ContentLanguage") + } + } + if uattrs.ContentEncoding != nil { + attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + forceSendFields = append(forceSendFields, "ContentEncoding") + } + if uattrs.ContentDisposition != nil { + attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + forceSendFields = append(forceSendFields, "ContentDisposition") + } + if uattrs.CacheControl != nil { + attrs.CacheControl = optional.ToString(uattrs.CacheControl) + forceSendFields = append(forceSendFields, "CacheControl") + } + if uattrs.Metadata != nil { + attrs.Metadata = uattrs.Metadata + if len(attrs.Metadata) == 0 { + // Sending the empty map is a no-op. We send null instead. + nullFields = append(nullFields, "Metadata") + } else { + forceSendFields = append(forceSendFields, "Metadata") + } + } + if uattrs.ACL != nil { + attrs.ACL = uattrs.ACL + // It's an error to attempt to delete the ACL, so + // we don't append to nullFields here. + forceSendFields = append(forceSendFields, "Acl") + } + rawObj := attrs.toRawObject(o.bucket) + rawObj.ForceSendFields = forceSendFields + rawObj.NullFields = nullFields + call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx) + if err := applyConds("Update", o.gen, o.conds, call); err != nil { + return nil, err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// ObjectAttrsToUpdate is used to update the attributes of an object. +// Only fields set to non-nil values will be updated. +// Set a field to its zero value to delete it. +// +// For example, to change ContentType and delete ContentEncoding and +// Metadata, use +// ObjectAttrsToUpdate{ +// ContentType: "text/html", +// ContentEncoding: "", +// Metadata: map[string]string{}, +// } +type ObjectAttrsToUpdate struct { + ContentType optional.String + ContentLanguage optional.String + ContentEncoding optional.String + ContentDisposition optional.String + CacheControl optional.String + Metadata map[string]string // set to map[string]string{} to delete + ACL []ACLRule +} + +// Delete deletes the single specified object. +func (o *ObjectHandle) Delete(ctx context.Context) error { + if err := o.validate(); err != nil { + return err + } + call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) + if err := applyConds("Delete", o.gen, o.conds, call); err != nil { + return err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + // Encryption doesn't apply to Delete. + setClientHeader(call.Header()) + err := runWithRetry(ctx, func() error { return call.Do() }) + switch e := err.(type) { + case nil: + return nil + case *googleapi.Error: + if e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + } + return err +} + +// ReadCompressed when true causes the read to happen without decompressing. +func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { + o2 := *o + o2.readCompressed = compressed + return &o2 +} + +// NewReader creates a new Reader to read the contents of the +// object. +// ErrObjectNotExist will be returned if the object is not found. +// +// The caller must call Close on the returned Reader when done reading. +func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { + return o.NewRangeReader(ctx, 0, -1) +} + +// NewRangeReader reads part of an object, reading at most length bytes +// starting at the given offset. If length is negative, the object is read +// until the end. +func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) { + if err := o.validate(); err != nil { + return nil, err + } + if offset < 0 { + return nil, fmt.Errorf("storage: invalid offset %d < 0", offset) + } + if o.conds != nil { + if err := o.conds.validate("NewRangeReader"); err != nil { + return nil, err + } + } + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), + RawQuery: conditionsQuery(o.gen, o.conds), + } + verb := "GET" + if length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + req = withContext(req, ctx) + if length < 0 && offset > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } else if length > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } + if o.userProject != "" { + req.Header.Set("X-Goog-User-Project", o.userProject) + } + if o.readCompressed { + req.Header.Set("Accept-Encoding", "gzip") + } + if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { + return nil, err + } + var res *http.Response + err = runWithRetry(ctx, func() error { + res, err = o.c.hc.Do(req) + if err != nil { + return err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + return nil + }) + if err != nil { + return nil, err + } + + var size int64 // total size of object, even if a range was requested. + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + } else { + size = res.ContentLength + } + + remain := res.ContentLength + body := res.Body + if length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var ( + checkCRC bool + crc uint32 + ) + // Even if there is a CRC header, we can't compute the hash on partial data. + if remain == size { + crc, checkCRC = parseCRC32c(res) + } + return &Reader{ + body: body, + size: size, + remain: remain, + contentType: res.Header.Get("Content-Type"), + contentEncoding: res.Header.Get("Content-Encoding"), + cacheControl: res.Header.Get("Cache-Control"), + wantCRC: crc, + checkCRC: checkCRC, + }, nil +} + +func parseCRC32c(res *http.Response) (uint32, bool) { + const prefix = "crc32c=" + for _, spec := range res.Header["X-Goog-Hash"] { + if strings.HasPrefix(spec, prefix) { + c, err := decodeUint32(spec[len(prefix):]) + if err == nil { + return c, true + } + } + } + return 0, false +} + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +// NewWriter returns a storage Writer that writes to the GCS object +// associated with this ObjectHandle. +// +// A new object will be created unless an object with this name already exists. +// Otherwise any previous object with the same name will be replaced. +// The object will not be available (and any previous object will remain) +// until Close has been called. +// +// Attributes can be set on the object by modifying the returned Writer's +// ObjectAttrs field before the first call to Write. If no ContentType +// attribute is specified, the content type will be automatically sniffed +// using net/http.DetectContentType. +// +// It is the caller's responsibility to call Close when writing is done. +func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { + return &Writer{ + ctx: ctx, + o: o, + donec: make(chan struct{}), + ObjectAttrs: ObjectAttrs{Name: o.object}, + ChunkSize: googleapi.DefaultUploadChunkSize, + } +} + +func (o *ObjectHandle) validate() error { + if o.bucket == "" { + return errors.New("storage: bucket name is empty") + } + if o.object == "" { + return errors.New("storage: object name is empty") + } + if !utf8.ValidString(o.object) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + return nil +} + +// parseKey converts the binary contents of a private key file to an +// *rsa.PrivateKey. It detects whether the private key is in a PEM container or +// not. If so, it extracts the private key from PEM container before +// conversion. It only supports PEM containers with no passphrase. +func parseKey(key []byte) (*rsa.PrivateKey, error) { + if block, _ := pem.Decode(key); block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, err + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("oauth2: private key is invalid") + } + return parsed, nil +} + +func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl { + var acl []*raw.ObjectAccessControl + if len(oldACL) > 0 { + acl = make([]*raw.ObjectAccessControl, len(oldACL)) + for i, rule := range oldACL { + acl[i] = &raw.ObjectAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + return acl +} + +// toRawObject copies the editable attributes from o to the raw library's Object type. +func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { + acl := toRawObjectACL(o.ACL) + return &raw.Object{ + Bucket: bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + StorageClass: o.StorageClass, + Acl: acl, + Metadata: o.Metadata, + } +} + +// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. +type ObjectAttrs struct { + // Bucket is the name of the bucket containing this GCS object. + // This field is read-only. + Bucket string + + // Name is the name of the object within the bucket. + // This field is read-only. + Name string + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentLanguage is the content language of the object's content. + ContentLanguage string + + // CacheControl is the Cache-Control header to be sent in the response + // headers when serving the object data. + CacheControl string + + // ACL is the list of access control rules for the object. + ACL []ACLRule + + // Owner is the owner of the object. This field is read-only. + // + // If non-zero, it is in the form of "user-". + Owner string + + // Size is the length of the object's content. This field is read-only. + Size int64 + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // ContentDisposition is the optional Content-Disposition header of the object + // sent in the response headers. + ContentDisposition string + + // MD5 is the MD5 hash of the object's content. This field is read-only, + // except when used from a Writer. If set on a Writer, the uploaded + // data is rejected if its MD5 hash does not match this field. + MD5 []byte + + // CRC32C is the CRC32 checksum of the object's content using + // the Castagnoli93 polynomial. This field is read-only, except when + // used from a Writer. If set on a Writer and Writer.SendCRC32C + // is true, the uploaded data is rejected if its CRC32c hash does not + // match this field. + CRC32C uint32 + + // MediaLink is an URL to the object's content. This field is read-only. + MediaLink string + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if no metadata is provided. + Metadata map[string]string + + // Generation is the generation number of the object's content. + // This field is read-only. + Generation int64 + + // Metageneration is the version of the metadata for this + // object at this generation. This field is used for preconditions + // and for detecting changes in metadata. A metageneration number + // is only meaningful in the context of a particular generation + // of a particular object. This field is read-only. + Metageneration int64 + + // StorageClass is the storage class of the object. + // This value defines how objects in the bucket are stored and + // determines the SLA and the cost of storage. Typical values are + // "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" + // and "DURABLE_REDUCED_AVAILABILITY". + // It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL" + // or "REGIONAL" depending on the bucket's location settings. + StorageClass string + + // Created is the time the object was created. This field is read-only. + Created time.Time + + // Deleted is the time the object was deleted. + // If not deleted, it is the zero value. This field is read-only. + Deleted time.Time + + // Updated is the creation or modification time of the object. + // For buckets with versioning enabled, changing an object's + // metadata does not change this property. This field is read-only. + Updated time.Time + + // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the + // customer-supplied encryption key for the object. It is empty if there is + // no customer-supplied encryption key. + // See // https://cloud.google.com/storage/docs/encryption for more about + // encryption in Google Cloud Storage. + CustomerKeySHA256 string + + // Prefix is set only for ObjectAttrs which represent synthetic "directory + // entries" when iterating over buckets using Query.Delimiter. See + // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be + // populated. + Prefix string +} + +// convertTime converts a time in RFC3339 format to time.Time. +// If any error occurs in parsing, the zero-value time.Time is silently returned. +func convertTime(t string) time.Time { + var r time.Time + if t != "" { + r, _ = time.Parse(time.RFC3339, t) + } + return r +} + +func newObject(o *raw.Object) *ObjectAttrs { + if o == nil { + return nil + } + acl := make([]ACLRule, len(o.Acl)) + for i, rule := range o.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + owner := "" + if o.Owner != nil { + owner = o.Owner.Entity + } + md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) + crc32c, _ := decodeUint32(o.Crc32c) + var sha256 string + if o.CustomerEncryption != nil { + sha256 = o.CustomerEncryption.KeySha256 + } + return &ObjectAttrs{ + Bucket: o.Bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ACL: acl, + Owner: owner, + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + Size: int64(o.Size), + MD5: md5, + CRC32C: crc32c, + MediaLink: o.MediaLink, + Metadata: o.Metadata, + Generation: o.Generation, + Metageneration: o.Metageneration, + StorageClass: o.StorageClass, + CustomerKeySHA256: sha256, + Created: convertTime(o.TimeCreated), + Deleted: convertTime(o.TimeDeleted), + Updated: convertTime(o.Updated), + } +} + +// Decode a uint32 encoded in Base64 in big-endian byte order. +func decodeUint32(b64 string) (uint32, error) { + d, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return 0, err + } + if len(d) != 4 { + return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d) + } + return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil +} + +// Encode a uint32 as Base64 in big-endian byte order. +func encodeUint32(u uint32) string { + b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)} + return base64.StdEncoding.EncodeToString(b) +} + +// Query represents a query to filter objects from a bucket. +type Query struct { + // Delimiter returns results in a directory-like fashion. + // Results will contain only objects whose names, aside from the + // prefix, do not contain delimiter. Objects whose names, + // aside from the prefix, contain delimiter will have their name, + // truncated after the delimiter, returned in prefixes. + // Duplicate prefixes are omitted. + // Optional. + Delimiter string + + // Prefix is the prefix filter to query objects + // whose names begin with this prefix. + // Optional. + Prefix string + + // Versions indicates whether multiple versions of the same + // object will be included in the results. + Versions bool +} + +// contentTyper implements ContentTyper to enable an +// io.ReadCloser to specify its MIME type. +type contentTyper struct { + io.Reader + t string +} + +func (c *contentTyper) ContentType() string { + return c.t +} + +// Conditions constrain methods to act on specific generations of +// objects. +// +// The zero value is an empty set of constraints. Not all conditions or +// combinations of conditions are applicable to all methods. +// See https://cloud.google.com/storage/docs/generations-preconditions +// for details on how these operate. +type Conditions struct { + // Generation constraints. + // At most one of the following can be set to a non-zero value. + + // GenerationMatch specifies that the object must have the given generation + // for the operation to occur. + // If GenerationMatch is zero, it has no effect. + // Use DoesNotExist to specify that the object does not exist in the bucket. + GenerationMatch int64 + + // GenerationNotMatch specifies that the object must not have the given + // generation for the operation to occur. + // If GenerationNotMatch is zero, it has no effect. + GenerationNotMatch int64 + + // DoesNotExist specifies that the object must not exist in the bucket for + // the operation to occur. + // If DoesNotExist is false, it has no effect. + DoesNotExist bool + + // Metadata generation constraints. + // At most one of the following can be set to a non-zero value. + + // MetagenerationMatch specifies that the object must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the object must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *Conditions) validate(method string) error { + if *c == (Conditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if !c.isGenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for generation", method) + } + if !c.isMetagenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +func (c *Conditions) isGenerationValid() bool { + n := 0 + if c.GenerationMatch != 0 { + n++ + } + if c.GenerationNotMatch != 0 { + n++ + } + if c.DoesNotExist { + n++ + } + return n <= 1 +} + +func (c *Conditions) isMetagenerationValid() bool { + return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0 +} + +// applyConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { + cval := reflect.ValueOf(call) + if gen >= 0 { + if !setConditionField(cval, "Generation", gen) { + return fmt.Errorf("storage: %s: generation not supported", method) + } + } + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) { + return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) + } + case conds.GenerationNotMatch != 0: + if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) { + return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) + } + case conds.DoesNotExist: + if !setConditionField(cval, "IfGenerationMatch", int64(0)) { + return fmt.Errorf("storage: %s: DoesNotExist not supported", method) + } + } + switch { + case conds.MetagenerationMatch != 0: + if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { + if gen >= 0 { + call.SourceGeneration(gen) + } + if conds == nil { + return nil + } + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) + } + return nil +} + +// setConditionField sets a field on a *raw.WhateverCall. +// We can't use anonymous interfaces because the return type is +// different, since the field setters are builders. +func setConditionField(call reflect.Value, name string, value interface{}) bool { + m := call.MethodByName(name) + if !m.IsValid() { + return false + } + m.Call([]reflect.Value{reflect.ValueOf(value)}) + return true +} + +// conditionsQuery returns the generation and conditions as a URL query +// string suitable for URL.RawQuery. It assumes that the conditions +// have been validated. +func conditionsQuery(gen int64, conds *Conditions) string { + // URL escapes are elided because integer strings are URL-safe. + var buf []byte + + appendParam := func(s string, n int64) { + if len(buf) > 0 { + buf = append(buf, '&') + } + buf = append(buf, s...) + buf = strconv.AppendInt(buf, n, 10) + } + + if gen >= 0 { + appendParam("generation=", gen) + } + if conds == nil { + return string(buf) + } + switch { + case conds.GenerationMatch != 0: + appendParam("ifGenerationMatch=", conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch) + case conds.DoesNotExist: + appendParam("ifGenerationMatch=", 0) + } + switch { + case conds.MetagenerationMatch != 0: + appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch) + } + return string(buf) +} + +// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods +// that modifyCall searches for by name. +type composeSourceObj struct { + src *raw.ComposeRequestSourceObjects +} + +func (c composeSourceObj) Generation(gen int64) { + c.src.Generation = gen +} + +func (c composeSourceObj) IfGenerationMatch(gen int64) { + // It's safe to overwrite ObjectPreconditions, since its only field is + // IfGenerationMatch. + c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: gen, + } +} + +func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error { + if key == nil { + return nil + } + // TODO(jbd): Ask the API team to return a more user-friendly error + // and avoid doing this check at the client level. + if len(key) != 32 { + return errors.New("storage: not a 32-byte AES-256 key") + } + var cs string + if copySource { + cs = "copy-source-" + } + headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256") + headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) + keyHash := sha256.Sum256(key) + headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) + return nil +} + +// TODO(jbd): Add storage.objects.watch. diff --git a/vendor/cloud.google.com/go/storage/storage_test.go b/vendor/cloud.google.com/go/storage/storage_test.go new file mode 100644 index 0000000..23622a0 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage_test.go @@ -0,0 +1,912 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httptest" + "net/url" + "regexp" + "strings" + "testing" + "time" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + raw "google.golang.org/api/storage/v1" +) + +func TestHeaderSanitization(t *testing.T) { + t.Parallel() + var tests = []struct { + desc string + in []string + want []string + }{ + { + desc: "already sanitized headers should not be modified", + in: []string{"x-goog-header1:true", "x-goog-header2:0"}, + want: []string{"x-goog-header1:true", "x-goog-header2:0"}, + }, + { + desc: "sanitized headers should be sorted", + in: []string{"x-goog-header2:0", "x-goog-header1:true"}, + want: []string{"x-goog-header1:true", "x-goog-header2:0"}, + }, + { + desc: "non-canonical headers should be removed", + in: []string{"x-goog-header1:true", "x-goog-no-value", "non-canonical-header:not-of-use"}, + want: []string{"x-goog-header1:true"}, + }, + { + desc: "excluded canonical headers should be removed", + in: []string{"x-goog-header1:true", "x-goog-encryption-key:my_key", "x-goog-encryption-key-sha256:my_sha256"}, + want: []string{"x-goog-header1:true"}, + }, + { + desc: "dirty headers should be formatted correctly", + in: []string{" x-goog-header1 : \textra-spaces ", "X-Goog-Header2:CamelCaseValue"}, + want: []string{"x-goog-header1:extra-spaces", "x-goog-header2:CamelCaseValue"}, + }, + { + desc: "duplicate headers should be merged", + in: []string{"x-goog-header1:value1", "X-Goog-Header1:value2"}, + want: []string{"x-goog-header1:value1,value2"}, + }, + } + for _, test := range tests { + got := sanitizeHeaders(test.in) + if !testutil.Equal(got, test.want) { + t.Errorf("%s: got %v, want %v", test.desc, got, test.want) + } + } +} + +func TestSignedURL(t *testing.T) { + t.Parallel() + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: "ICy5YqxZB1uWSwcVLSNLcA==", + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-goog-header1:true", "x-goog-header2:false"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "RfsHlPtbB2JUYjzCgNr2Mi%2BjggdEuL1V7E6N9o6aaqwVLBDuTv3I0%2B9" + + "x94E6rmmr%2FVgnmZigkIUxX%2Blfl7LgKf30uPGLt0mjKGH2p7r9ey1ONJ" + + "%2BhVec23FnTRcSgopglvHPuCMWU2oNJE%2F1y8EwWE27baHrG1RhRHbLVF" + + "bPpLZ9xTRFK20pluIkfHV00JGljB1imqQHXM%2B2XPWqBngLr%2FwqxLN7i" + + "FcUiqR8xQEOHF%2F2e7fbkTHPNq4TazaLZ8X0eZ3eFdJ55A5QmNi8atlN4W" + + "5q7Hvs0jcxElG3yqIbx439A995BkspLiAcA%2Fo4%2BxAwEMkGLICdbvakq" + + "3eEprNCojw%3D%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_PEMPrivateKey(t *testing.T) { + t.Parallel() + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("pem"), + Method: "GET", + MD5: "ICy5YqxZB1uWSwcVLSNLcA==", + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-goog-header1:true", "x-goog-header2:false"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "TiyKD%2FgGb6Kh0kkb2iF%2FfF%2BnTx7L0J4YiZua8AcTmnidutePEGIU5" + + "NULYlrGl6l52gz4zqFb3VFfIRTcPXMdXnnFdMCDhz2QuJBUpsU1Ai9zlyTQ" + + "dkb6ShG03xz9%2BEXWAUQO4GBybJw%2FULASuv37xA00SwLdkqj8YdyS5II" + + "1lro%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_SignBytes(t *testing.T) { + t.Parallel() + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + SignBytes: func(b []byte) ([]byte, error) { + return []byte("signed"), nil + }, + Method: "GET", + MD5: "ICy5YqxZB1uWSwcVLSNLcA==", + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-goog-header1:true", "x-goog-header2:false"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "c2lnbmVk" // base64('signed') == 'c2lnbmVk' + if url != want { + t.Fatalf("Unexpected signed URL\ngot: %q\nwant: %q", url, want) + } +} + +func TestSignedURL_URLUnsafeObjectName(t *testing.T) { + t.Parallel() + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object name界", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("pem"), + Method: "GET", + MD5: "ICy5YqxZB1uWSwcVLSNLcA==", + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-goog-header1:true", "x-goog-header2:false"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object%20name%E7%95%8C?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=bxVH1%2Bl%2" + + "BSxpnj3XuqKz6mOFk6M94Y%2B4w85J6FCmJan%2FNhGSpndP6fAw1uLHlOn%2F8xUaY%2F" + + "SfZ5GzcQ%2BbxOL1WA37yIwZ7xgLYlO%2ByAi3GuqMUmHZiNCai28emODXQ8RtWHvgv6dE" + + "SQ%2F0KpDMIWW7rYCaUa63UkUyeSQsKhrVqkIA%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_MissingOptions(t *testing.T) { + t.Parallel() + pk := dummyKey("rsa") + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + var tests = []struct { + opts *SignedURLOptions + errMsg string + }{ + { + &SignedURLOptions{}, + "missing required GoogleAccessID", + }, + { + &SignedURLOptions{GoogleAccessID: "access_id"}, + "exactly one of PrivateKey or SignedBytes must be set", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + SignBytes: func(b []byte) ([]byte, error) { return b, nil }, + PrivateKey: pk, + }, + "exactly one of PrivateKey or SignedBytes must be set", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + PrivateKey: pk, + }, + "missing required method", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + SignBytes: func(b []byte) ([]byte, error) { return b, nil }, + }, + "missing required method", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + PrivateKey: pk, + Method: "PUT", + }, + "missing required expires", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + PrivateKey: pk, + Method: "PUT", + Expires: expires, + MD5: "invalid", + }, + "invalid MD5 checksum", + }, + } + for _, test := range tests { + _, err := SignedURL("bucket", "name", test.opts) + if !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected err: %v, found: %v", test.errMsg, err) + } + } +} + +func dummyKey(kind string) []byte { + slurp, err := ioutil.ReadFile(fmt.Sprintf("./testdata/dummy_%s", kind)) + if err != nil { + log.Fatal(err) + } + return slurp +} + +func TestCopyToMissingFields(t *testing.T) { + t.Parallel() + var tests = []struct { + srcBucket, srcName, destBucket, destName string + errMsg string + }{ + { + "mybucket", "", "mybucket", "destname", + "name is empty", + }, + { + "mybucket", "srcname", "mybucket", "", + "name is empty", + }, + { + "", "srcfile", "mybucket", "destname", + "name is empty", + }, + { + "mybucket", "srcfile", "", "destname", + "name is empty", + }, + } + ctx := context.Background() + client, err := NewClient(ctx, option.WithHTTPClient(&http.Client{Transport: &fakeTransport{}})) + if err != nil { + panic(err) + } + for i, test := range tests { + src := client.Bucket(test.srcBucket).Object(test.srcName) + dst := client.Bucket(test.destBucket).Object(test.destName) + _, err := dst.CopierFrom(src).Run(ctx) + if !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("CopyTo test #%v:\ngot err %q\nwant err %q", i, err, test.errMsg) + } + } +} + +func TestObjectNames(t *testing.T) { + t.Parallel() + // Naming requirements: https://cloud.google.com/storage/docs/bucket-naming + const maxLegalLength = 1024 + + type testT struct { + name, want string + } + tests := []testT{ + // Embedded characters important in URLs. + {"foo % bar", "foo%20%25%20bar"}, + {"foo ? bar", "foo%20%3F%20bar"}, + {"foo / bar", "foo%20/%20bar"}, + {"foo %?/ bar", "foo%20%25%3F/%20bar"}, + + // Non-Roman scripts + {"타코", "%ED%83%80%EC%BD%94"}, + {"世界", "%E4%B8%96%E7%95%8C"}, + + // Longest legal name + {strings.Repeat("a", maxLegalLength), strings.Repeat("a", maxLegalLength)}, + + // Line terminators besides CR and LF: https://en.wikipedia.org/wiki/Newline#Unicode + {"foo \u000b bar", "foo%20%0B%20bar"}, + {"foo \u000c bar", "foo%20%0C%20bar"}, + {"foo \u0085 bar", "foo%20%C2%85%20bar"}, + {"foo \u2028 bar", "foo%20%E2%80%A8%20bar"}, + {"foo \u2029 bar", "foo%20%E2%80%A9%20bar"}, + + // Null byte. + {"foo \u0000 bar", "foo%20%00%20bar"}, + + // Non-control characters that are discouraged, but not forbidden, according to the documentation. + {"foo # bar", "foo%20%23%20bar"}, + {"foo []*? bar", "foo%20%5B%5D%2A%3F%20bar"}, + + // Angstrom symbol singleton and normalized forms: http://unicode.org/reports/tr15/ + {"foo \u212b bar", "foo%20%E2%84%AB%20bar"}, + {"foo \u0041\u030a bar", "foo%20A%CC%8A%20bar"}, + {"foo \u00c5 bar", "foo%20%C3%85%20bar"}, + + // Hangul separating jamo: http://www.unicode.org/versions/Unicode7.0.0/ch18.pdf (Table 18-10) + {"foo \u3131\u314f bar", "foo%20%E3%84%B1%E3%85%8F%20bar"}, + {"foo \u1100\u1161 bar", "foo%20%E1%84%80%E1%85%A1%20bar"}, + {"foo \uac00 bar", "foo%20%EA%B0%80%20bar"}, + } + + // C0 control characters not forbidden by the docs. + var runes []rune + for r := rune(0x01); r <= rune(0x1f); r++ { + if r != '\u000a' && r != '\u000d' { + runes = append(runes, r) + } + } + tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%01%02%03%04%05%06%07%08%09%0B%0C%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20bar"}) + + // C1 control characters, plus DEL. + runes = nil + for r := rune(0x7f); r <= rune(0x9f); r++ { + runes = append(runes, r) + } + tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%20bar"}) + + opts := &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: "ICy5YqxZB1uWSwcVLSNLcA==", + Expires: time.Date(2002, time.October, 2, 10, 0, 0, 0, time.UTC), + ContentType: "application/json", + Headers: []string{"x-goog-header1", "x-goog-header2"}, + } + + for _, test := range tests { + g, err := SignedURL("bucket-name", test.name, opts) + if err != nil { + t.Errorf("SignedURL(%q) err=%v, want nil", test.name, err) + } + if w := "/bucket-name/" + test.want; !strings.Contains(g, w) { + t.Errorf("SignedURL(%q)=%q, want substring %q", test.name, g, w) + } + } +} + +func TestCondition(t *testing.T) { + t.Parallel() + gotReq := make(chan *http.Request, 1) + hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + gotReq <- r + w.WriteHeader(200) + }) + defer close() + ctx := context.Background() + c, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + + obj := c.Bucket("buck").Object("obj") + dst := c.Bucket("dstbuck").Object("dst") + tests := []struct { + fn func() + want string + }{ + { + func() { obj.Generation(1234).NewReader(ctx) }, + "GET /buck/obj?generation=1234", + }, + { + func() { obj.If(Conditions{GenerationMatch: 1234}).NewReader(ctx) }, + "GET /buck/obj?ifGenerationMatch=1234", + }, + { + func() { obj.If(Conditions{GenerationNotMatch: 1234}).NewReader(ctx) }, + "GET /buck/obj?ifGenerationNotMatch=1234", + }, + { + func() { obj.If(Conditions{MetagenerationMatch: 1234}).NewReader(ctx) }, + "GET /buck/obj?ifMetagenerationMatch=1234", + }, + { + func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).NewReader(ctx) }, + "GET /buck/obj?ifMetagenerationNotMatch=1234", + }, + { + func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).Attrs(ctx) }, + "GET /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationNotMatch=1234&projection=full", + }, + + { + func() { obj.If(Conditions{MetagenerationMatch: 1234}).Update(ctx, ObjectAttrsToUpdate{}) }, + "PATCH /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationMatch=1234&projection=full", + }, + { + func() { obj.Generation(1234).Delete(ctx) }, + "DELETE /storage/v1/b/buck/o/obj?alt=json&generation=1234", + }, + { + func() { + w := obj.If(Conditions{GenerationMatch: 1234}).NewWriter(ctx) + w.ContentType = "text/plain" + w.Close() + }, + "POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=1234&projection=full&uploadType=multipart", + }, + { + func() { + w := obj.If(Conditions{DoesNotExist: true}).NewWriter(ctx) + w.ContentType = "text/plain" + w.Close() + }, + "POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=0&projection=full&uploadType=multipart", + }, + { + func() { + dst.If(Conditions{MetagenerationMatch: 5678}).CopierFrom(obj.If(Conditions{GenerationMatch: 1234})).Run(ctx) + }, + "POST /storage/v1/b/buck/o/obj/rewriteTo/b/dstbuck/o/dst?alt=json&ifMetagenerationMatch=5678&ifSourceGenerationMatch=1234&projection=full", + }, + } + + for i, tt := range tests { + tt.fn() + select { + case r := <-gotReq: + got := r.Method + " " + r.RequestURI + if got != tt.want { + t.Errorf("%d. RequestURI = %q; want %q", i, got, tt.want) + } + case <-time.After(5 * time.Second): + t.Fatalf("%d. timeout", i) + } + if err != nil { + t.Fatal(err) + } + } + + // Test an error, too: + err = obj.Generation(1234).NewWriter(ctx).Close() + if err == nil || !strings.Contains(err.Error(), "NewWriter: generation not supported") { + t.Errorf("want error about unsupported generation; got %v", err) + } +} + +func TestConditionErrors(t *testing.T) { + t.Parallel() + for _, conds := range []Conditions{ + {GenerationMatch: 0}, + {DoesNotExist: false}, // same as above, actually + {GenerationMatch: 1, GenerationNotMatch: 2}, + {GenerationNotMatch: 2, DoesNotExist: true}, + {MetagenerationMatch: 1, MetagenerationNotMatch: 2}, + } { + if err := conds.validate(""); err == nil { + t.Errorf("%+v: got nil, want error", conds) + } + } +} + +// Test object compose. +func TestObjectCompose(t *testing.T) { + t.Parallel() + gotURL := make(chan string, 1) + gotBody := make(chan []byte, 1) + hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + body, _ := ioutil.ReadAll(r.Body) + gotURL <- r.URL.String() + gotBody <- body + w.Write([]byte("{}")) + }) + defer close() + ctx := context.Background() + c, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + desc string + dst *ObjectHandle + srcs []*ObjectHandle + attrs *ObjectAttrs + wantReq raw.ComposeRequest + wantURL string + wantErr bool + }{ + { + desc: "basic case", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + c.Bucket("foo").Object("quux"), + }, + wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json", + wantReq: raw.ComposeRequest{ + Destination: &raw.Object{Bucket: "foo"}, + SourceObjects: []*raw.ComposeRequestSourceObjects{ + {Name: "baz"}, + {Name: "quux"}, + }, + }, + }, + { + desc: "with object attrs", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + c.Bucket("foo").Object("quux"), + }, + attrs: &ObjectAttrs{ + Name: "not-bar", + ContentType: "application/json", + }, + wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json", + wantReq: raw.ComposeRequest{ + Destination: &raw.Object{ + Bucket: "foo", + Name: "not-bar", + ContentType: "application/json", + }, + SourceObjects: []*raw.ComposeRequestSourceObjects{ + {Name: "baz"}, + {Name: "quux"}, + }, + }, + }, + { + desc: "with conditions", + dst: c.Bucket("foo").Object("bar").If(Conditions{ + GenerationMatch: 12, + MetagenerationMatch: 34, + }), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz").Generation(56), + c.Bucket("foo").Object("quux").If(Conditions{GenerationMatch: 78}), + }, + wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json&ifGenerationMatch=12&ifMetagenerationMatch=34", + wantReq: raw.ComposeRequest{ + Destination: &raw.Object{Bucket: "foo"}, + SourceObjects: []*raw.ComposeRequestSourceObjects{ + { + Name: "baz", + Generation: 56, + }, + { + Name: "quux", + ObjectPreconditions: &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: 78, + }, + }, + }, + }, + }, + { + desc: "no sources", + dst: c.Bucket("foo").Object("bar"), + wantErr: true, + }, + { + desc: "destination, no bucket", + dst: c.Bucket("").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + }, + wantErr: true, + }, + { + desc: "destination, no object", + dst: c.Bucket("foo").Object(""), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + }, + wantErr: true, + }, + { + desc: "source, different bucket", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("otherbucket").Object("baz"), + }, + wantErr: true, + }, + { + desc: "source, no object", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object(""), + }, + wantErr: true, + }, + { + desc: "destination, bad condition", + dst: c.Bucket("foo").Object("bar").Generation(12), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + }, + wantErr: true, + }, + { + desc: "source, bad condition", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz").If(Conditions{MetagenerationMatch: 12}), + }, + wantErr: true, + }, + } + + for _, tt := range testCases { + composer := tt.dst.ComposerFrom(tt.srcs...) + if tt.attrs != nil { + composer.ObjectAttrs = *tt.attrs + } + _, err := composer.Run(ctx) + if gotErr := err != nil; gotErr != tt.wantErr { + t.Errorf("%s: got error %v; want err %t", tt.desc, err, tt.wantErr) + continue + } + if tt.wantErr { + continue + } + url, body := <-gotURL, <-gotBody + if url != tt.wantURL { + t.Errorf("%s: request URL\ngot %q\nwant %q", tt.desc, url, tt.wantURL) + } + var req raw.ComposeRequest + if err := json.Unmarshal(body, &req); err != nil { + t.Errorf("%s: json.Unmarshal %v (body %s)", tt.desc, err, body) + } + if !testutil.Equal(req, tt.wantReq) { + // Print to JSON. + wantReq, _ := json.Marshal(tt.wantReq) + t.Errorf("%s: request body\ngot %s\nwant %s", tt.desc, body, wantReq) + } + } +} + +// Test that ObjectIterator's Next and NextPage methods correctly terminate +// if there is nothing to iterate over. +func TestEmptyObjectIterator(t *testing.T) { + t.Parallel() + hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + fmt.Fprintf(w, "{}") + }) + defer close() + ctx := context.Background() + client, err := NewClient(ctx, option.WithHTTPClient(hClient)) + if err != nil { + t.Fatal(err) + } + it := client.Bucket("b").Objects(ctx, nil) + _, err = it.Next() + if err != iterator.Done { + t.Errorf("got %v, want Done", err) + } +} + +// Test that BucketIterator's Next method correctly terminates if there is +// nothing to iterate over. +func TestEmptyBucketIterator(t *testing.T) { + t.Parallel() + hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + fmt.Fprintf(w, "{}") + }) + defer close() + ctx := context.Background() + client, err := NewClient(ctx, option.WithHTTPClient(hClient)) + if err != nil { + t.Fatal(err) + } + it := client.Buckets(ctx, "project") + _, err = it.Next() + if err != iterator.Done { + t.Errorf("got %v, want Done", err) + } + +} + +func TestCodecUint32(t *testing.T) { + t.Parallel() + for _, u := range []uint32{0, 1, 256, 0xFFFFFFFF} { + s := encodeUint32(u) + d, err := decodeUint32(s) + if err != nil { + t.Fatal(err) + } + if d != u { + t.Errorf("got %d, want input %d", d, u) + } + } +} + +func TestBucketAttrs(t *testing.T) { + for _, c := range []struct { + attrs BucketAttrs + raw raw.Bucket + }{{ + attrs: BucketAttrs{ + Lifecycle: Lifecycle{ + Rules: []LifecycleRule{{ + Action: LifecycleAction{ + Type: SetStorageClassAction, + StorageClass: "NEARLINE", + }, + Condition: LifecycleCondition{ + AgeInDays: 10, + Liveness: Live, + CreatedBefore: time.Date(2017, 1, 2, 3, 4, 5, 6, time.UTC), + MatchesStorageClasses: []string{"MULTI_REGIONAL", "REGIONAL", "STANDARD"}, + NumNewerVersions: 3, + }, + }, { + Action: LifecycleAction{ + Type: DeleteAction, + }, + Condition: LifecycleCondition{ + AgeInDays: 30, + Liveness: Live, + CreatedBefore: time.Date(2017, 1, 2, 3, 4, 5, 6, time.UTC), + MatchesStorageClasses: []string{"NEARLINE"}, + NumNewerVersions: 10, + }, + }, { + Action: LifecycleAction{ + Type: DeleteAction, + }, + Condition: LifecycleCondition{ + Liveness: Archived, + }, + }}, + }, + }, + raw: raw.Bucket{ + Lifecycle: &raw.BucketLifecycle{ + Rule: []*raw.BucketLifecycleRule{{ + Action: &raw.BucketLifecycleRuleAction{ + Type: SetStorageClassAction, + StorageClass: "NEARLINE", + }, + Condition: &raw.BucketLifecycleRuleCondition{ + Age: 10, + IsLive: googleapi.Bool(true), + CreatedBefore: "2017-01-02", + MatchesStorageClass: []string{"MULTI_REGIONAL", "REGIONAL", "STANDARD"}, + NumNewerVersions: 3, + }, + }, { + Action: &raw.BucketLifecycleRuleAction{ + Type: DeleteAction, + }, + Condition: &raw.BucketLifecycleRuleCondition{ + Age: 30, + IsLive: googleapi.Bool(true), + CreatedBefore: "2017-01-02", + MatchesStorageClass: []string{"NEARLINE"}, + NumNewerVersions: 10, + }, + }, { + Action: &raw.BucketLifecycleRuleAction{ + Type: DeleteAction, + }, + Condition: &raw.BucketLifecycleRuleCondition{ + IsLive: googleapi.Bool(false), + }, + }}, + }, + }, + }} { + if got := c.attrs.toRawBucket(); !testutil.Equal(*got, c.raw) { + t.Errorf("toRawBucket: got %v, want %v", *got, c.raw) + } + } +} + +func TestUserProject(t *testing.T) { + // Verify that the userProject query param is sent. + t.Parallel() + ctx := context.Background() + gotURL := make(chan *url.URL, 1) + hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + gotURL <- r.URL + if strings.Contains(r.URL.String(), "/rewriteTo/") { + res := &raw.RewriteResponse{Done: true} + bytes, err := res.MarshalJSON() + if err != nil { + t.Fatal(err) + } + w.Write(bytes) + } else { + fmt.Fprintf(w, "{}") + } + }) + defer close() + client, err := NewClient(ctx, option.WithHTTPClient(hClient)) + if err != nil { + t.Fatal(err) + } + + re := regexp.MustCompile(`\buserProject=p\b`) + b := client.Bucket("b").UserProject("p") + o := b.Object("o") + + check := func(msg string, f func()) { + f() + select { + case u := <-gotURL: + if !re.MatchString(u.RawQuery) { + t.Errorf("%s: query string %q does not contain userProject", msg, u.RawQuery) + } + case <-time.After(2 * time.Second): + t.Errorf("%s: timed out", msg) + } + } + + check("buckets.delete", func() { b.Delete(ctx) }) + check("buckets.get", func() { b.Attrs(ctx) }) + check("buckets.patch", func() { b.Update(ctx, BucketAttrsToUpdate{}) }) + check("storage.objects.compose", func() { o.ComposerFrom(b.Object("x")).Run(ctx) }) + check("storage.objects.delete", func() { o.Delete(ctx) }) + check("storage.objects.get", func() { o.Attrs(ctx) }) + check("storage.objects.insert", func() { o.NewWriter(ctx).Close() }) + check("storage.objects.list", func() { b.Objects(ctx, nil).Next() }) + check("storage.objects.patch", func() { o.Update(ctx, ObjectAttrsToUpdate{}) }) + check("storage.objects.rewrite", func() { o.CopierFrom(b.Object("x")).Run(ctx) }) + check("storage.objectAccessControls.list", func() { o.ACL().List(ctx) }) + check("storage.objectAccessControls.update", func() { o.ACL().Set(ctx, "", "") }) + check("storage.objectAccessControls.delete", func() { o.ACL().Delete(ctx, "") }) + check("storage.bucketAccessControls.list", func() { b.ACL().List(ctx) }) + check("storage.bucketAccessControls.update", func() { b.ACL().Set(ctx, "", "") }) + check("storage.bucketAccessControls.delete", func() { b.ACL().Delete(ctx, "") }) + check("storage.defaultObjectAccessControls.list", + func() { b.DefaultObjectACL().List(ctx) }) + check("storage.defaultObjectAccessControls.update", + func() { b.DefaultObjectACL().Set(ctx, "", "") }) + check("storage.defaultObjectAccessControls.delete", + func() { b.DefaultObjectACL().Delete(ctx, "") }) + check("buckets.getIamPolicy", func() { b.IAM().Policy(ctx) }) + check("buckets.setIamPolicy", func() { + p := &iam.Policy{} + p.Add("m", iam.Owner) + b.IAM().SetPolicy(ctx, p) + }) + check("buckets.testIamPermissions", func() { b.IAM().TestPermissions(ctx, nil) }) + check("storage.notifications.insert", func() { + b.AddNotification(ctx, &Notification{TopicProjectID: "p", TopicID: "t"}) + }) + check("storage.notifications.delete", func() { b.DeleteNotification(ctx, "n") }) + check("storage.notifications.list", func() { b.Notifications(ctx) }) +} + +func newTestServer(handler func(w http.ResponseWriter, r *http.Request)) (*http.Client, func()) { + ts := httptest.NewTLSServer(http.HandlerFunc(handler)) + tlsConf := &tls.Config{InsecureSkipVerify: true} + tr := &http.Transport{ + TLSClientConfig: tlsConf, + DialTLS: func(netw, addr string) (net.Conn, error) { + return tls.Dial("tcp", ts.Listener.Addr().String(), tlsConf) + }, + } + return &http.Client{Transport: tr}, func() { + tr.CloseIdleConnections() + ts.Close() + } +} diff --git a/vendor/cloud.google.com/go/storage/testdata/dummy_pem b/vendor/cloud.google.com/go/storage/testdata/dummy_pem new file mode 100644 index 0000000..3428d44 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/testdata/dummy_pem @@ -0,0 +1,39 @@ +Bag Attributes + friendlyName: privatekey + localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 +Key Attributes: +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQCtCWMoJ2Bok2QoGFyU7A6IlGprO9QfUTT0jNrLkIbM5OWNIuDx +64+PEaTS5g5m+2Hz/lmd5jJKanAH4dY9LZzsaYAPq1K17Gcmg1hEisYeKsgOcjYY +kwRkV+natCTsC+tfWmS0voRh0jA1rI1J4MikceoHtgWdEuoHrrptRVpWKwIDAQAB +AoGAKp3uQvx3vSnX+BwP6Um+RpsvHpwMoW3xue1bEdnVqW8SrlERz+NxZw40ZxDs +KSbuuBZD4iTI7BUM5JQVnNm4FQY1YrPlWZLyI73Bj8RKTXrPdJheM/0r7xjiIXbQ +7w4cUSM9rVugnI/rxF2kPIQTGYI+EG/6+P+k6VvgPmC0T/ECQQDUPskiS18WaY+i +Koalbrb3GakaBoHrC1b4ln4CAv7fq7H4WvFvqi/2rxLhHYq31iwxYy8s7J7Sba1+ +5vwJ2TxZAkEA0LVfs3Q2VWZ+cM3bv0aYTalMXg6wT+LoNvk9HnOb0zQYajF3qm4G +ZFdfEqvOkje0zQ4fcihARKyda/VY84UGIwJBAIZa0FvjNmgrnn7bSKzEbxHwrnkJ +EYjGfuGR8mY3mzvfpiM+/oLfSslvfhX+62cALq18yco4ZzlxsFgaxAU//NECQDcS +NN94YcHlGqYPW9W7/gI4EwOaoqFhwV6II71+SfbP/0U+KlJZV+xwNZEKrqZcdqPI +/zkzL8ovNha/laokRrsCQQCyoPHGcBWj+VFbNoyQnX4tghc6rOY7n4pmpgQvU825 +TAM9vnYtSkKK/V56kEDNBO5LwiRsir95IUNclqqMKR1C +-----END RSA PRIVATE KEY----- +Bag Attributes + friendlyName: privatekey + localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 +subject=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com +issuer=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com +-----BEGIN CERTIFICATE----- +MIICXTCCAcagAwIBAgIIHxTMQUVJRZ0wDQYJKoZIhvcNAQEFBQAwVDFSMFAGA1UE +AxNJMTA3OTQzMjM1MDY1OS1udm9nMHZtbjlzNnBxcjNrcjR2MmF2YmM3bmtob2Ex +MS5hcHBzLmdvb2dsZXVzZXJjb250ZW50LmNvbTAeFw0xNDExMjQxODAwMDRaFw0y +NDExMjExODAwMDRaMFQxUjBQBgNVBAMTSTEwNzk0MzIzNTA2NTktbnZvZzB2bW45 +czZwcXIza3I0djJhdmJjN25raG9hMTEuYXBwcy5nb29nbGV1c2VyY29udGVudC5j +b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAK0JYygnYGiTZCgYXJTsDoiU +ams71B9RNPSM2suQhszk5Y0i4PHrj48RpNLmDmb7YfP+WZ3mMkpqcAfh1j0tnOxp +gA+rUrXsZyaDWESKxh4qyA5yNhiTBGRX6dq0JOwL619aZLS+hGHSMDWsjUngyKRx +6ge2BZ0S6geuum1FWlYrAgMBAAGjODA2MAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/ +BAQDAgeAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMCMA0GCSqGSIb3DQEBBQUAA4GB +ACVvKkZkomHq3uffOQwdZ4VJYuxrvDGnZu/ExW9WngO2teEsjxABL41TNnRYHN5T +lMC19poFA2tR/DySDLJ2XNs/hSvyQUL6HHCncVdR4Srpie88j48peY1MZSMP51Jv +qagbbP5K5DSEu02/zZaV0kaCvLEN0KAtj/noDuOOnQU2 +-----END CERTIFICATE----- diff --git a/vendor/cloud.google.com/go/storage/testdata/dummy_rsa b/vendor/cloud.google.com/go/storage/testdata/dummy_rsa new file mode 100644 index 0000000..4ce6678 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/testdata/dummy_rsa @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE +DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY +fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK +1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr +k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 +/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt +3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn +2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 +nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK +6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf +5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e +DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 +M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g +z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y +1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK +J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U +f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx +QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA +cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr +Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw +5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg +KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 +OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd +mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ +5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== +-----END RSA PRIVATE KEY----- diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go new file mode 100644 index 0000000..28eb74a --- /dev/null +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -0,0 +1,201 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "unicode/utf8" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// A Writer writes a Cloud Storage object. +type Writer struct { + // ObjectAttrs are optional attributes to set on the object. Any attributes + // must be initialized before the first Write call. Nil or zero-valued + // attributes are ignored. + ObjectAttrs + + // SendCRC specifies whether to transmit a CRC32C field. It should be set + // to true in addition to setting the Writer's CRC32C field, because zero + // is a valid CRC and normally a zero would not be transmitted. + // If a CRC32C is sent, and the data written does not match the checksum, + // the write will be rejected. + SendCRC32C bool + + // ChunkSize controls the maximum number of bytes of the object that the + // Writer will attempt to send to the server in a single request. Objects + // smaller than the size will be sent in a single request, while larger + // objects will be split over multiple requests. The size will be rounded up + // to the nearest multiple of 256K. If zero, chunking will be disabled and + // the object will be uploaded in a single request. + // + // ChunkSize will default to a reasonable value. Any custom configuration + // must be done before the first Write call. + ChunkSize int + + // ProgressFunc can be used to monitor the progress of a large write. + // operation. If ProgressFunc is not nil and writing requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload), + // then ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far. + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(int64) + + ctx context.Context + o *ObjectHandle + + opened bool + pw *io.PipeWriter + + donec chan struct{} // closed after err and obj are set. + err error + obj *ObjectAttrs +} + +func (w *Writer) open() error { + attrs := w.ObjectAttrs + // Check the developer didn't change the object Name (this is unfortunate, but + // we don't want to store an object under the wrong name). + if attrs.Name != w.o.object { + return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) + } + if !utf8.ValidString(attrs.Name) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) + } + pr, pw := io.Pipe() + w.pw = pw + w.opened = true + + if w.ChunkSize < 0 { + return errors.New("storage: Writer.ChunkSize must be non-negative") + } + mediaOpts := []googleapi.MediaOption{ + googleapi.ChunkSize(w.ChunkSize), + } + if c := attrs.ContentType; c != "" { + mediaOpts = append(mediaOpts, googleapi.ContentType(c)) + } + + go func() { + defer close(w.donec) + + rawObj := attrs.toRawObject(w.o.bucket) + if w.SendCRC32C { + rawObj.Crc32c = encodeUint32(attrs.CRC32C) + } + if w.MD5 != nil { + rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5) + } + call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). + Media(pr, mediaOpts...). + Projection("full"). + Context(w.ctx) + if w.ProgressFunc != nil { + call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) + } + if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { + w.err = err + pr.CloseWithError(w.err) + return + } + var resp *raw.Object + err := applyConds("NewWriter", w.o.gen, w.o.conds, call) + if err == nil { + if w.o.userProject != "" { + call.UserProject(w.o.userProject) + } + setClientHeader(call.Header()) + // If the chunk size is zero, then no chunking is done on the Reader, + // which means we cannot retry: the first call will read the data, and if + // it fails, there is no way to re-read. + if w.ChunkSize == 0 { + resp, err = call.Do() + } else { + // We will only retry here if the initial POST, which obtains a URI for + // the resumable upload, fails with a retryable error. The upload itself + // has its own retry logic. + err = runWithRetry(w.ctx, func() error { + var err2 error + resp, err2 = call.Do() + return err2 + }) + } + } + if err != nil { + w.err = err + pr.CloseWithError(w.err) + return + } + w.obj = newObject(resp) + }() + return nil +} + +// Write appends to w. It implements the io.Writer interface. +// +// Since writes happen asynchronously, Write may return a nil +// error even though the write failed (or will fail). Always +// use the error returned from Writer.Close to determine if +// the upload was successful. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if !w.opened { + if err := w.open(); err != nil { + return 0, err + } + } + return w.pw.Write(p) +} + +// Close completes the write operation and flushes any buffered data. +// If Close doesn't return an error, metadata about the written object +// can be retrieved by calling Attrs. +func (w *Writer) Close() error { + if !w.opened { + if err := w.open(); err != nil { + return err + } + } + if err := w.pw.Close(); err != nil { + return err + } + <-w.donec + return w.err +} + +// CloseWithError aborts the write operation with the provided error. +// CloseWithError always returns nil. +func (w *Writer) CloseWithError(err error) error { + if !w.opened { + return nil + } + return w.pw.CloseWithError(err) +} + +// Attrs returns metadata about a successfully-written object. +// It's only valid to call it after Close returns nil. +func (w *Writer) Attrs() *ObjectAttrs { + return w.obj +} diff --git a/vendor/cloud.google.com/go/storage/writer_test.go b/vendor/cloud.google.com/go/storage/writer_test.go new file mode 100644 index 0000000..c04b4a5 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/writer_test.go @@ -0,0 +1,146 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + + "google.golang.org/api/option" +) + +type fakeTransport struct { + gotReq *http.Request + gotBody []byte + results []transportResult +} + +type transportResult struct { + res *http.Response + err error +} + +func (t *fakeTransport) addResult(res *http.Response, err error) { + t.results = append(t.results, transportResult{res, err}) +} + +func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { + t.gotReq = req + t.gotBody = nil + if req.Body != nil { + bytes, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + t.gotBody = bytes + } + if len(t.results) == 0 { + return nil, fmt.Errorf("error handling request") + } + result := t.results[0] + t.results = t.results[1:] + return result.res, result.err +} + +func TestErrorOnObjectsInsertCall(t *testing.T) { + t.Parallel() + ctx := context.Background() + const contents = "hello world" + + doWrite := func(hc *http.Client) *Writer { + client, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatalf("error when creating client: %v", err) + } + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + wc.ContentType = "text/plain" + + // We can't check that the Write fails, since it depends on the write to the + // underling fakeTransport failing which is racy. + wc.Write([]byte(contents)) + return wc + } + + wc := doWrite(&http.Client{Transport: &fakeTransport{}}) + // Close must always return an error though since it waits for the transport to + // have closed. + if err := wc.Close(); err == nil { + t.Errorf("expected error on close, got nil") + } + + // Retry on 5xx + ft := &fakeTransport{} + ft.addResult(&http.Response{ + StatusCode: 503, + Body: ioutil.NopCloser(&bytes.Buffer{}), + }, nil) + ft.addResult(&http.Response{ + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("{}")), + }, nil) + wc = doWrite(&http.Client{Transport: ft}) + if err := wc.Close(); err != nil { + t.Errorf("got %v, want nil", err) + } + got := string(ft.gotBody) + if !strings.Contains(got, contents) { + t.Errorf("got body %q, which does not contain %q", got, contents) + } +} + +func TestEncryption(t *testing.T) { + t.Parallel() + ctx := context.Background() + ft := &fakeTransport{} + hc := &http.Client{Transport: ft} + client, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatalf("error when creating client: %v", err) + } + obj := client.Bucket("bucketname").Object("filename1") + key := []byte("secret-key-that-is-32-bytes-long") + wc := obj.Key(key).NewWriter(ctx) + // TODO(jba): use something other than fakeTransport, which always returns error. + wc.Write([]byte("hello world")) + wc.Close() + if got, want := ft.gotReq.Header.Get("x-goog-encryption-algorithm"), "AES256"; got != want { + t.Errorf("algorithm: got %q, want %q", got, want) + } + gotKey, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key")) + if err != nil { + t.Fatalf("decoding key: %v", err) + } + if !testutil.Equal(gotKey, key) { + t.Errorf("key: got %v, want %v", gotKey, key) + } + wantHash := sha256.Sum256(key) + gotHash, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key-sha256")) + if err != nil { + t.Fatalf("decoding hash: %v", err) + } + if !testutil.Equal(gotHash, wantHash[:]) { // wantHash is an array + t.Errorf("hash: got\n%v, want\n%v", gotHash, wantHash) + } +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/ListTraces_smoke_test.go b/vendor/cloud.google.com/go/trace/apiv1/ListTraces_smoke_test.go new file mode 100644 index 0000000..3e90449 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/ListTraces_smoke_test.go @@ -0,0 +1,65 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" +) + +import ( + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestTraceServiceSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var projectId2 string = projectId + var request = &cloudtracepb.ListTracesRequest{ + ProjectId: projectId2, + } + + iter := c.ListTraces(ctx, request) + if _, err := iter.Next(); err != nil && err != iterator.Done { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/doc.go b/vendor/cloud.google.com/go/trace/apiv1/doc.go new file mode 100644 index 0000000..a56e206 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/doc.go @@ -0,0 +1,54 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package trace is an auto-generated package for the +// Stackdriver Trace API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Sends application trace data to Stackdriver Trace for viewing. Trace data +// is +// collected for all App Engine applications by default. Trace data from +// other +// applications can be provided using this API. +// +// Use the client at cloud.google.com/go/trace in preference to this. +package trace // import "cloud.google.com/go/trace/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.append", + "https://www.googleapis.com/auth/trace.readonly", + } +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/mock_test.go b/vendor/cloud.google.com/go/trace/apiv1/mock_test.go new file mode 100644 index 0000000..a8b6765 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/mock_test.go @@ -0,0 +1,321 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockTraceServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + cloudtracepb.TraceServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockTraceServer) ListTraces(ctx context.Context, req *cloudtracepb.ListTracesRequest) (*cloudtracepb.ListTracesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*cloudtracepb.ListTracesResponse), nil +} + +func (s *mockTraceServer) GetTrace(ctx context.Context, req *cloudtracepb.GetTraceRequest) (*cloudtracepb.Trace, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*cloudtracepb.Trace), nil +} + +func (s *mockTraceServer) PatchTraces(ctx context.Context, req *cloudtracepb.PatchTracesRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockTrace mockTraceServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + cloudtracepb.RegisterTraceServiceServer(serv, &mockTrace) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestTraceServicePatchTraces(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var traces *cloudtracepb.Traces = &cloudtracepb.Traces{} + var request = &cloudtracepb.PatchTracesRequest{ + ProjectId: projectId, + Traces: traces, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.PatchTraces(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestTraceServicePatchTracesError(t *testing.T) { + errCode := codes.PermissionDenied + mockTrace.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var traces *cloudtracepb.Traces = &cloudtracepb.Traces{} + var request = &cloudtracepb.PatchTracesRequest{ + ProjectId: projectId, + Traces: traces, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.PatchTraces(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestTraceServiceGetTrace(t *testing.T) { + var projectId2 string = "projectId2939242356" + var traceId2 string = "traceId2987826376" + var expectedResponse = &cloudtracepb.Trace{ + ProjectId: projectId2, + TraceId: traceId2, + } + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var traceId string = "traceId1270300245" + var request = &cloudtracepb.GetTraceRequest{ + ProjectId: projectId, + TraceId: traceId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTrace(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestTraceServiceGetTraceError(t *testing.T) { + errCode := codes.PermissionDenied + mockTrace.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var traceId string = "traceId1270300245" + var request = &cloudtracepb.GetTraceRequest{ + ProjectId: projectId, + TraceId: traceId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTrace(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestTraceServiceListTraces(t *testing.T) { + var nextPageToken string = "" + var tracesElement *cloudtracepb.Trace = &cloudtracepb.Trace{} + var traces = []*cloudtracepb.Trace{tracesElement} + var expectedResponse = &cloudtracepb.ListTracesResponse{ + NextPageToken: nextPageToken, + Traces: traces, + } + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var request = &cloudtracepb.ListTracesRequest{ + ProjectId: projectId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTraces(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Traces[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestTraceServiceListTracesError(t *testing.T) { + errCode := codes.PermissionDenied + mockTrace.err = gstatus.Error(errCode, "test error") + + var projectId string = "projectId-1969970175" + var request = &cloudtracepb.ListTracesRequest{ + ProjectId: projectId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTraces(context.Background(), request).Next() + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/trace_client.go b/vendor/cloud.google.com/go/trace/apiv1/trace_client.go new file mode 100644 index 0000000..f7d4dad --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/trace_client.go @@ -0,0 +1,235 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + PatchTraces []gax.CallOption + GetTrace []gax.CallOption + ListTraces []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("cloudtrace.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &CallOptions{ + PatchTraces: retry[[2]string{"default", "idempotent"}], + GetTrace: retry[[2]string{"default", "idempotent"}], + ListTraces: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Stackdriver Trace API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client cloudtracepb.TraceServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new trace service client. +// +// This file describes an API for collecting and viewing traces and spans +// within a trace. A Trace is a collection of spans corresponding to a single +// operation or set of operations for an application. A span is an individual +// timed event which forms a node of the trace tree. Spans for a single trace +// may span multiple services. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: cloudtracepb.NewTraceServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// PatchTraces sends new traces to Stackdriver Trace or updates existing traces. If the ID +// of a trace that you send matches that of an existing trace, any fields +// in the existing trace and its spans are overwritten by the provided values, +// and any new fields provided are merged with the existing trace data. If the +// ID does not match, a new trace is created. +func (c *Client) PatchTraces(ctx context.Context, req *cloudtracepb.PatchTracesRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.PatchTraces[0:len(c.CallOptions.PatchTraces):len(c.CallOptions.PatchTraces)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.PatchTraces(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// GetTrace gets a single trace by its ID. +func (c *Client) GetTrace(ctx context.Context, req *cloudtracepb.GetTraceRequest, opts ...gax.CallOption) (*cloudtracepb.Trace, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.GetTrace[0:len(c.CallOptions.GetTrace):len(c.CallOptions.GetTrace)], opts...) + var resp *cloudtracepb.Trace + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.GetTrace(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListTraces returns of a list of traces that match the specified filter conditions. +func (c *Client) ListTraces(ctx context.Context, req *cloudtracepb.ListTracesRequest, opts ...gax.CallOption) *TraceIterator { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.ListTraces[0:len(c.CallOptions.ListTraces):len(c.CallOptions.ListTraces)], opts...) + it := &TraceIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtracepb.Trace, string, error) { + var resp *cloudtracepb.ListTracesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListTraces(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.Traces, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// TraceIterator manages a stream of *cloudtracepb.Trace. +type TraceIterator struct { + items []*cloudtracepb.Trace + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*cloudtracepb.Trace, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TraceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TraceIterator) Next() (*cloudtracepb.Trace, error) { + var item *cloudtracepb.Trace + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TraceIterator) bufLen() int { + return len(it.items) +} + +func (it *TraceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go b/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go new file mode 100644 index 0000000..54a2455 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go @@ -0,0 +1,92 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace_test + +import ( + "cloud.google.com/go/trace/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_PatchTraces() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.PatchTracesRequest{ + // TODO: Fill request struct fields. + } + err = c.PatchTraces(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_GetTrace() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.GetTraceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetTrace(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListTraces() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.ListTracesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTraces(ctx, req) + for { + resp, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/doc.go b/vendor/cloud.google.com/go/trace/apiv2/doc.go new file mode 100644 index 0000000..2bb7371 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/doc.go @@ -0,0 +1,51 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package trace is an auto-generated package for the +// Stackdriver Trace API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Sends application trace data to Stackdriver Trace for viewing. Trace data +// is +// collected for all App Engine applications by default. Trace data from +// other +// applications can be provided using this API. +package trace // import "cloud.google.com/go/trace/apiv2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.append", + } +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/mock_test.go b/vendor/cloud.google.com/go/trace/apiv2/mock_test.go new file mode 100644 index 0000000..a4bc7b8 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/mock_test.go @@ -0,0 +1,252 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + emptypb "github.com/golang/protobuf/ptypes/empty" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockTraceServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + cloudtracepb.TraceServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockTraceServer) BatchWriteSpans(ctx context.Context, req *cloudtracepb.BatchWriteSpansRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func (s *mockTraceServer) CreateSpan(ctx context.Context, req *cloudtracepb.Span) (*cloudtracepb.Span, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*cloudtracepb.Span), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockTrace mockTraceServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + cloudtracepb.RegisterTraceServiceServer(serv, &mockTrace) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestTraceServiceBatchWriteSpans(t *testing.T) { + var expectedResponse *emptypb.Empty = &emptypb.Empty{} + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var formattedName string = ProjectPath("[PROJECT]") + var spans []*cloudtracepb.Span = nil + var request = &cloudtracepb.BatchWriteSpansRequest{ + Name: formattedName, + Spans: spans, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.BatchWriteSpans(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestTraceServiceBatchWriteSpansError(t *testing.T) { + errCode := codes.PermissionDenied + mockTrace.err = gstatus.Error(errCode, "test error") + + var formattedName string = ProjectPath("[PROJECT]") + var spans []*cloudtracepb.Span = nil + var request = &cloudtracepb.BatchWriteSpansRequest{ + Name: formattedName, + Spans: spans, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.BatchWriteSpans(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestTraceServiceCreateSpan(t *testing.T) { + var name2 string = "name2-1052831874" + var spanId2 string = "spanId2-643891741" + var parentSpanId string = "parentSpanId-1757797477" + var expectedResponse = &cloudtracepb.Span{ + Name: name2, + SpanId: spanId2, + ParentSpanId: parentSpanId, + } + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var formattedName string = SpanPath("[PROJECT]", "[TRACE]", "[SPAN]") + var spanId string = "spanId-2011840976" + var displayName *cloudtracepb.TruncatableString = &cloudtracepb.TruncatableString{} + var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var request = &cloudtracepb.Span{ + Name: formattedName, + SpanId: spanId, + DisplayName: displayName, + StartTime: startTime, + EndTime: endTime, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSpan(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestTraceServiceCreateSpanError(t *testing.T) { + errCode := codes.PermissionDenied + mockTrace.err = gstatus.Error(errCode, "test error") + + var formattedName string = SpanPath("[PROJECT]", "[TRACE]", "[SPAN]") + var spanId string = "spanId-2011840976" + var displayName *cloudtracepb.TruncatableString = &cloudtracepb.TruncatableString{} + var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{} + var request = &cloudtracepb.Span{ + Name: formattedName, + SpanId: spanId, + DisplayName: displayName, + StartTime: startTime, + EndTime: endTime, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSpan(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/trace_client.go b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go new file mode 100644 index 0000000..0e3457d --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go @@ -0,0 +1,171 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + BatchWriteSpans []gax.CallOption + CreateSpan []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("cloudtrace.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &CallOptions{ + BatchWriteSpans: retry[[2]string{"default", "non_idempotent"}], + CreateSpan: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Stackdriver Trace API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client cloudtracepb.TraceServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new trace service client. +// +// This file describes an API for collecting and viewing traces and spans +// within a trace. A Trace is a collection of spans corresponding to a single +// operation or set of operations for an application. A span is an individual +// timed event which forms a node of the trace tree. A single trace may +// contain span(s) from multiple services. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: cloudtracepb.NewTraceServiceClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// ProjectPath returns the path for the project resource. +func ProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// SpanPath returns the path for the span resource. +func SpanPath(project, trace, span string) string { + return "" + + "projects/" + + project + + "/traces/" + + trace + + "/spans/" + + span + + "" +} + +// BatchWriteSpans sends new spans to new or existing traces. You cannot update +// existing spans. +func (c *Client) BatchWriteSpans(ctx context.Context, req *cloudtracepb.BatchWriteSpansRequest, opts ...gax.CallOption) error { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BatchWriteSpans[0:len(c.CallOptions.BatchWriteSpans):len(c.CallOptions.BatchWriteSpans)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.client.BatchWriteSpans(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// CreateSpan creates a new span. +func (c *Client) CreateSpan(ctx context.Context, req *cloudtracepb.Span, opts ...gax.CallOption) (*cloudtracepb.Span, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.CreateSpan[0:len(c.CallOptions.CreateSpan):len(c.CallOptions.CreateSpan)], opts...) + var resp *cloudtracepb.Span + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.CreateSpan(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/trace/apiv2/trace_client_example_test.go b/vendor/cloud.google.com/go/trace/apiv2/trace_client_example_test.go new file mode 100644 index 0000000..cbde0ba --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv2/trace_client_example_test.go @@ -0,0 +1,67 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace_test + +import ( + "cloud.google.com/go/trace/apiv2" + "golang.org/x/net/context" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_BatchWriteSpans() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.BatchWriteSpansRequest{ + // TODO: Fill request struct fields. + } + err = c.BatchWriteSpans(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateSpan() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.Span{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSpan(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/trace/grpc.go b/vendor/cloud.google.com/go/trace/grpc.go new file mode 100644 index 0000000..e78f4a2 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/grpc.go @@ -0,0 +1,108 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/hex" + "fmt" + + "cloud.google.com/go/internal/tracecontext" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +const grpcMetadataKey = "grpc-trace-bin" + +// GRPCClientInterceptor returns a grpc.UnaryClientInterceptor that traces all outgoing requests from a gRPC client. +// The calling context should already have a *trace.Span; a child span will be +// created for the outgoing gRPC call. If the calling context doesn't have a span, +// the call will not be traced. If the client is nil, then the interceptor just +// passes through the request. +// +// The functionality in gRPC that this feature relies on is currently experimental. +func (c *Client) GRPCClientInterceptor() grpc.UnaryClientInterceptor { + if c == nil { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return invoker(ctx, method, req, reply, cc, opts...) + } + } + return grpc.UnaryClientInterceptor(c.grpcUnaryInterceptor) +} + +func (c *Client) grpcUnaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + // TODO: also intercept streams. + span := FromContext(ctx).NewChild(method) + if span == nil { + span = c.NewSpan(method) + } + defer span.Finish() + + traceContext := make([]byte, tracecontext.Len) + // traceID is a hex-encoded 128-bit value. + // TODO(jbd): Decode trace IDs upon arrival and + // represent trace IDs with 16 bytes internally. + tid, err := hex.DecodeString(span.trace.traceID) + if err != nil { + return invoker(ctx, method, req, reply, cc, opts...) + } + tracecontext.Encode(traceContext, tid, span.span.SpanId, byte(span.trace.globalOptions)) + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.Pairs(grpcMetadataKey, string(traceContext)) + } else { + md = md.Copy() // metadata is immutable, copy. + md[grpcMetadataKey] = []string{string(traceContext)} + } + ctx = metadata.NewOutgoingContext(ctx, md) + + err = invoker(ctx, method, req, reply, cc, opts...) + if err != nil { + // TODO: standardize gRPC label names? + span.SetLabel("error", err.Error()) + } + return err +} + +// GRPCServerInterceptor returns a grpc.UnaryServerInterceptor that enables the tracing of the incoming +// gRPC calls. Incoming call's context can be used to extract the span on servers that enabled this option: +// +// span := trace.FromContext(ctx) +// +// If the client is nil, then the interceptor just invokes the handler. +// +// The functionality in gRPC that this feature relies on is currently experimental. +func (c *Client) GRPCServerInterceptor() grpc.UnaryServerInterceptor { + if c == nil { + return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return handler(ctx, req) + } + } + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + md, _ := metadata.FromIncomingContext(ctx) + var traceHeader string + if header, ok := md[grpcMetadataKey]; ok { + traceID, spanID, opts, ok := tracecontext.Decode([]byte(header[0])) + if ok { + // TODO(jbd): Generate a span directly from string(traceID), spanID and opts. + traceHeader = fmt.Sprintf("%x/%d;o=%d", traceID, spanID, opts) + } + } + span := c.SpanFromHeader(info.FullMethod, traceHeader) + defer span.Finish() + ctx = NewContext(ctx, span) + return handler(ctx, req) + } +} diff --git a/vendor/cloud.google.com/go/trace/grpc_test.go b/vendor/cloud.google.com/go/trace/grpc_test.go new file mode 100644 index 0000000..bf4733c --- /dev/null +++ b/vendor/cloud.google.com/go/trace/grpc_test.go @@ -0,0 +1,180 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "io/ioutil" + "log" + "net" + "net/http" + "strings" + "testing" + + pb "cloud.google.com/go/trace/testdata/helloworld" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +func TestGRPCInterceptors(t *testing.T) { + t.Skip("hangs forever for go < 1.9") + + tc := newTestClient(&noopTransport{}) + + // default sampling with global=1. + parent := tc.SpanFromHeader("parent", "7f27601f17b7a2873739efd18ff83872/123;o=1") + testGRPCInterceptor(t, tc, parent, func(t *testing.T, out, in *Span) { + if in == nil { + t.Fatalf("missing span in the incoming context") + } + if got, want := in.TraceID(), out.TraceID(); got != want { + t.Errorf("incoming call is not tracing the outgoing trace; TraceID = %q; want %q", got, want) + } + if !in.Traced() { + t.Errorf("incoming span is not traced; want traced") + } + }) + + // default sampling with global=0. + parent = tc.SpanFromHeader("parent", "7f27601f17b7a2873739efd18ff83872/123;o=0") + testGRPCInterceptor(t, tc, parent, func(t *testing.T, out, in *Span) { + if in == nil { + t.Fatalf("missing span in the incoming context") + } + if got, want := in.TraceID(), out.TraceID(); got != want { + t.Errorf("incoming call is not tracing the outgoing trace; TraceID = %q; want %q", got, want) + } + if in.Traced() { + t.Errorf("incoming span is traced; want not traced") + } + }) + + // sampling all with global=1. + all, _ := NewLimitedSampler(1.0, 1<<32) + tc.SetSamplingPolicy(all) + parent = tc.SpanFromHeader("parent", "7f27601f17b7a2873739efd18ff83872/123;o=1") + testGRPCInterceptor(t, tc, parent, func(t *testing.T, out, in *Span) { + if in == nil { + t.Fatalf("missing span in the incoming context") + } + if got, want := in.TraceID(), out.TraceID(); got != want { + t.Errorf("incoming call is not tracing the outgoing trace; TraceID = %q; want %q", got, want) + } + if !in.Traced() { + t.Errorf("incoming span is not traced; want traced") + } + }) + + // sampling none with global=1. + none, _ := NewLimitedSampler(0, 0) + tc.SetSamplingPolicy(none) + parent = tc.SpanFromHeader("parent", "7f27601f17b7a2873739efd18ff83872/123;o=1") + testGRPCInterceptor(t, tc, parent, func(t *testing.T, out, in *Span) { + if in == nil { + t.Fatalf("missing span in the incoming context") + } + if got, want := in.TraceID(), out.TraceID(); got != want { + t.Errorf("incoming call is not tracing the outgoing trace; TraceID = %q; want %q", got, want) + } + if in.Traced() { + t.Errorf("incoming span is traced; want not traced") + } + }) + + // sampling all with no parent span. + tc.SetSamplingPolicy(all) + testGRPCInterceptor(t, tc, nil, func(t *testing.T, out, in *Span) { + if in == nil { + t.Fatalf("missing span in the incoming context") + } + if in.TraceID() == "" { + t.Errorf("incoming call TraceID is empty") + } + if !in.Traced() { + t.Errorf("incoming span is not traced; want traced") + } + }) + + // sampling none with no parent span. + tc.SetSamplingPolicy(none) + testGRPCInterceptor(t, tc, nil, func(t *testing.T, out, in *Span) { + if in == nil { + t.Fatalf("missing span in the incoming context") + } + if in.TraceID() == "" { + t.Errorf("incoming call TraceID is empty") + } + if in.Traced() { + t.Errorf("incoming span is traced; want not traced") + } + }) +} + +func testGRPCInterceptor(t *testing.T, tc *Client, parent *Span, assert func(t *testing.T, out, in *Span)) { + incomingCh := make(chan *Span, 1) + addrCh := make(chan net.Addr, 1) + go func() { + lis, err := net.Listen("tcp", "") + if err != nil { + t.Fatalf("Failed to listen: %v", err) + } + addrCh <- lis.Addr() + + s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor())) + pb.RegisterGreeterServer(s, &grpcServer{ + fn: func(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + incomingCh <- FromContext(ctx) + return &pb.HelloReply{}, nil + }, + }) + if err := s.Serve(lis); err != nil { + t.Fatalf("Failed to serve: %v", err) + } + }() + + addr := <-addrCh + conn, err := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock(), grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) + if err != nil { + t.Fatalf("Did not connect: %v", err) + } + defer conn.Close() + c := pb.NewGreeterClient(conn) + + outgoingCtx := NewContext(context.Background(), parent) + _, err = c.SayHello(outgoingCtx, &pb.HelloRequest{}) + if err != nil { + log.Fatalf("Could not SayHello: %v", err) + } + + assert(t, parent, <-incomingCh) +} + +type noopTransport struct{} + +func (rt *noopTransport) RoundTrip(req *http.Request) (*http.Response, error) { + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("{}")), + } + return resp, nil +} + +type grpcServer struct { + fn func(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) +} + +func (s *grpcServer) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + return s.fn(ctx, in) +} diff --git a/vendor/cloud.google.com/go/trace/http.go b/vendor/cloud.google.com/go/trace/http.go new file mode 100644 index 0000000..290d139 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/http.go @@ -0,0 +1,107 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package trace + +import ( + "net/http" +) + +// Transport is an http.RoundTripper that traces the outgoing requests. +// +// Transport is safe for concurrent usage. +type Transport struct { + // Base is the base http.RoundTripper to be used to do the actual request. + // + // Optional. If nil, http.DefaultTransport is used. + Base http.RoundTripper +} + +// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. +// The created span can follow a parent span, if a parent is presented in +// the request's context. +func (t Transport) RoundTrip(req *http.Request) (*http.Response, error) { + span := FromContext(req.Context()).NewRemoteChild(req) + resp, err := t.base().RoundTrip(req) + + // TODO(jbd): Is it possible to defer the span.Finish? + // In cases where RoundTrip panics, we still can finish the span. + span.Finish(WithResponse(resp)) + return resp, err +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + cr.CancelRequest(req) + } +} + +func (t Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// HTTPHandler returns a http.Handler from the given handler +// that is aware of the incoming request's span. +// The span can be extracted from the incoming request in handler +// functions from incoming request's context: +// +// span := trace.FromContext(r.Context()) +// +// The span will be auto finished by the handler. +func (c *Client) HTTPHandler(h http.Handler) http.Handler { + if c == nil { + return h + } + return &handler{traceClient: c, handler: h} +} + +type handler struct { + traceClient *Client + handler http.Handler +} + +func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + traceID, parentSpanID, options, optionsOk, ok := traceInfoFromHeader(r.Header.Get(httpHeader)) + if !ok { + traceID = nextTraceID() + } + t := &trace{ + traceID: traceID, + client: h.traceClient, + globalOptions: options, + localOptions: options, + } + span := startNewChildWithRequest(r, t, parentSpanID) + span.span.Kind = spanKindServer + span.rootSpan = true + configureSpanFromPolicy(span, h.traceClient.policy, ok) + defer span.Finish() + + r = r.WithContext(NewContext(r.Context(), span)) + if ok && !optionsOk { + // Inject the trace context back to the response with the sampling options. + // TODO(jbd): Remove when there is a better way to report the client's sampling. + w.Header().Set(httpHeader, spanHeader(traceID, parentSpanID, span.trace.localOptions)) + } + h.handler.ServeHTTP(w, r) +} diff --git a/vendor/cloud.google.com/go/trace/http_test.go b/vendor/cloud.google.com/go/trace/http_test.go new file mode 100644 index 0000000..54c2e17 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/http_test.go @@ -0,0 +1,151 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package trace + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +type recorderTransport struct { + ch chan *http.Request +} + +func (rt *recorderTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt.ch <- req + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("{}")), + } + return resp, nil +} + +func TestNewHTTPClient(t *testing.T) { + rt := &recorderTransport{ + ch: make(chan *http.Request, 1), + } + + tc := newTestClient(&noopTransport{}) + client := &http.Client{ + Transport: &Transport{ + Base: rt, + }, + } + req, _ := http.NewRequest("GET", "http://example.com", nil) + + t.Run("NoTrace", func(t *testing.T) { + _, err := client.Do(req) + if err != nil { + t.Error(err) + } + outgoing := <-rt.ch + if got, want := outgoing.Header.Get(httpHeader), ""; want != got { + t.Errorf("got trace header = %q; want none", got) + } + }) + + t.Run("Trace", func(t *testing.T) { + span := tc.NewSpan("/foo") + + req = req.WithContext(NewContext(req.Context(), span)) + _, err := client.Do(req) + if err != nil { + t.Error(err) + } + outgoing := <-rt.ch + + s := tc.SpanFromHeader("/foo", outgoing.Header.Get(httpHeader)) + if got, want := s.TraceID(), span.TraceID(); got != want { + t.Errorf("trace ID = %q; want %q", got, want) + } + }) +} + +func TestHTTPHandlerNoTrace(t *testing.T) { + tc := newTestClient(&noopTransport{}) + client := &http.Client{ + Transport: &Transport{}, + } + handler := tc.HTTPHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + span := FromContext(r.Context()) + if span == nil { + t.Errorf("span is nil; want non-nil span") + } + })) + + ts := httptest.NewServer(handler) + defer ts.Close() + + req, _ := http.NewRequest("GET", ts.URL, nil) + _, err := client.Do(req) + if err != nil { + t.Fatal(err) + } +} + +func TestHTTPHandler_response(t *testing.T) { + tc := newTestClient(&noopTransport{}) + p, _ := NewLimitedSampler(1, 1<<32) // all + tc.SetSamplingPolicy(p) + handler := tc.HTTPHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + ts := httptest.NewServer(handler) + defer ts.Close() + + tests := []struct { + name string + traceHeader string + wantTraceHeader string + }{ + { + name: "no global", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/123", + wantTraceHeader: "0123456789ABCDEF0123456789ABCDEF/123;o=1", + }, + { + name: "global=1", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/123;o=1", + wantTraceHeader: "", + }, + { + name: "global=0", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/123;o=0", + wantTraceHeader: "", + }, + { + name: "no trace context", + traceHeader: "", + wantTraceHeader: "", + }, + } + + for _, tt := range tests { + req, _ := http.NewRequest("GET", ts.URL, nil) + req.Header.Set(httpHeader, tt.traceHeader) + + res, err := http.DefaultClient.Do(req) + if err != nil { + t.Errorf("failed to request: %v", err) + } + if got, want := res.Header.Get(httpHeader), tt.wantTraceHeader; got != want { + t.Errorf("%v: response context header = %q; want %q", tt.name, got, want) + } + } +} diff --git a/vendor/cloud.google.com/go/trace/httpexample_test.go b/vendor/cloud.google.com/go/trace/httpexample_test.go new file mode 100644 index 0000000..4c29036 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/httpexample_test.go @@ -0,0 +1,57 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package trace_test + +import ( + "log" + "net/http" + + "cloud.google.com/go/trace" +) + +var traceClient *trace.Client + +func ExampleHTTPClient_Do() { + client := http.Client{ + Transport: &trace.Transport{}, + } + span := traceClient.NewSpan("/foo") // traceClient is a *trace.Client + + req, _ := http.NewRequest("GET", "https://metadata/users", nil) + req = req.WithContext(trace.NewContext(req.Context(), span)) + + if _, err := client.Do(req); err != nil { + log.Fatal(err) + } +} + +func ExampleClient_HTTPHandler() { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + client := http.Client{ + Transport: &trace.Transport{}, + } + + req, _ := http.NewRequest("GET", "https://metadata/users", nil) + req = req.WithContext(r.Context()) + + // The outgoing request will be traced with r's trace ID. + if _, err := client.Do(req); err != nil { + log.Fatal(err) + } + }) + http.Handle("/foo", traceClient.HTTPHandler(handler)) // traceClient is a *trace.Client +} diff --git a/vendor/cloud.google.com/go/trace/sampling.go b/vendor/cloud.google.com/go/trace/sampling.go new file mode 100644 index 0000000..d609290 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/sampling.go @@ -0,0 +1,117 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "time" + + "golang.org/x/time/rate" +) + +type SamplingPolicy interface { + // Sample returns a Decision. + // If Trace is false in the returned Decision, then the Decision should be + // the zero value. + Sample(p Parameters) Decision +} + +// Parameters contains the values passed to a SamplingPolicy's Sample method. +type Parameters struct { + HasTraceHeader bool // whether the incoming request has a valid X-Cloud-Trace-Context header. +} + +// Decision is the value returned by a call to a SamplingPolicy's Sample method. +type Decision struct { + Trace bool // Whether to trace the request. + Sample bool // Whether the trace is included in the random sample. + Policy string // Name of the sampling policy. + Weight float64 // Sample weight to be used in statistical calculations. +} + +type sampler struct { + fraction float64 + skipped float64 + *rate.Limiter + *rand.Rand + sync.Mutex +} + +func (s *sampler) Sample(p Parameters) Decision { + s.Lock() + x := s.Float64() + d := s.sample(p, time.Now(), x) + s.Unlock() + return d +} + +// sample contains the a deterministic, time-independent logic of Sample. +func (s *sampler) sample(p Parameters, now time.Time, x float64) (d Decision) { + d.Sample = x < s.fraction + d.Trace = p.HasTraceHeader || d.Sample + if !d.Trace { + // We have no reason to trace this request. + return Decision{} + } + // We test separately that the rate limit is not tiny before calling AllowN, + // because of overflow problems in x/time/rate. + if s.Limit() < 1e-9 || !s.AllowN(now, 1) { + // Rejected by the rate limit. + if d.Sample { + s.skipped++ + } + return Decision{} + } + if d.Sample { + d.Policy, d.Weight = "default", (1.0+s.skipped)/s.fraction + s.skipped = 0.0 + } + return +} + +// NewLimitedSampler returns a sampling policy that randomly samples a given +// fraction of requests. It also enforces a limit on the number of traces per +// second. It tries to trace every request with a trace header, but will not +// exceed the qps limit to do it. +func NewLimitedSampler(fraction, maxqps float64) (SamplingPolicy, error) { + if !(fraction >= 0) { + return nil, fmt.Errorf("invalid fraction %f", fraction) + } + if !(maxqps >= 0) { + return nil, fmt.Errorf("invalid maxqps %f", maxqps) + } + // Set a limit on the number of accumulated "tokens", to limit bursts of + // traced requests. Use one more than a second's worth of tokens, or 100, + // whichever is smaller. + // See https://godoc.org/golang.org/x/time/rate#NewLimiter. + maxTokens := 100 + if maxqps < 99.0 { + maxTokens = 1 + int(maxqps) + } + var seed int64 + if err := binary.Read(crand.Reader, binary.LittleEndian, &seed); err != nil { + seed = time.Now().UnixNano() + } + s := sampler{ + fraction: fraction, + Limiter: rate.NewLimiter(rate.Limit(maxqps), maxTokens), + Rand: rand.New(rand.NewSource(seed)), + } + return &s, nil +} diff --git a/vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.pb.go b/vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.pb.go new file mode 100644 index 0000000..5fb40e3 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.pb.go @@ -0,0 +1,161 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package helloworld is a generated protocol buffer package. + +It is generated from these files: + helloworld.proto + +It has these top-level messages: + HelloRequest + HelloReply +*/ +package helloworld + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The request message containing the user's name. +type HelloRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *HelloRequest) Reset() { *m = HelloRequest{} } +func (m *HelloRequest) String() string { return proto.CompactTextString(m) } +func (*HelloRequest) ProtoMessage() {} +func (*HelloRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// The response message containing the greetings +type HelloReply struct { + Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *HelloReply) Reset() { *m = HelloReply{} } +func (m *HelloReply) String() string { return proto.CompactTextString(m) } +func (*HelloReply) ProtoMessage() {} +func (*HelloReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func init() { + proto.RegisterType((*HelloRequest)(nil), "helloworld.HelloRequest") + proto.RegisterType((*HelloReply)(nil), "helloworld.HelloReply") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Greeter service + +type GreeterClient interface { + // Sends a greeting + SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) +} + +type greeterClient struct { + cc *grpc.ClientConn +} + +func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { + return &greeterClient{cc} +} + +func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { + out := new(HelloReply) + err := grpc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Greeter service + +type GreeterServer interface { + // Sends a greeting + SayHello(context.Context, *HelloRequest) (*HelloReply, error) +} + +func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { + s.RegisterService(&_Greeter_serviceDesc, srv) +} + +func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).SayHello(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/helloworld.Greeter/SayHello", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Greeter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "helloworld.Greeter", + HandlerType: (*GreeterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SayHello", + Handler: _Greeter_SayHello_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "helloworld.proto", +} + +func init() { proto.RegisterFile("helloworld.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 174 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9, + 0xc9, 0x2f, 0xcf, 0x2f, 0xca, 0x49, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88, + 0x28, 0x29, 0x71, 0xf1, 0x78, 0x80, 0x78, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, + 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x92, + 0x1a, 0x17, 0x17, 0x54, 0x4d, 0x41, 0x4e, 0xa5, 0x90, 0x04, 0x17, 0x7b, 0x6e, 0x6a, 0x71, 0x71, + 0x62, 0x3a, 0x4c, 0x11, 0x8c, 0x6b, 0xe4, 0xc9, 0xc5, 0xee, 0x5e, 0x94, 0x9a, 0x5a, 0x92, 0x5a, + 0x24, 0x64, 0xc7, 0xc5, 0x11, 0x9c, 0x58, 0x09, 0xd6, 0x25, 0x24, 0xa1, 0x87, 0xe4, 0x02, 0x64, + 0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x00, 0xad, 0x50, 0x62, 0x70, 0x32, 0xe0, 0x92, 0xce, 0xcc, 0xd7, + 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x4b, 0xad, 0x48, 0xcc, 0x2d, 0xc8, 0x49, 0x2d, 0x46, 0x52, 0xeb, + 0xc4, 0x0f, 0x56, 0x1c, 0x0e, 0x62, 0x07, 0x80, 0xbc, 0x14, 0xc0, 0x98, 0xc4, 0x06, 0xf6, 0x9b, + 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb7, 0xcd, 0xf2, 0xef, 0x00, 0x00, 0x00, +} diff --git a/vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.proto b/vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.proto new file mode 100644 index 0000000..5be9f5b --- /dev/null +++ b/vendor/cloud.google.com/go/trace/testdata/helloworld/helloworld.proto @@ -0,0 +1,37 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "io.grpc.examples.helloworld"; +option java_outer_classname = "HelloWorldProto"; + +package helloworld; + +// The greeting service definition. +service Greeter { + // Sends a greeting + rpc SayHello (HelloRequest) returns (HelloReply) {} +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; +} diff --git a/vendor/cloud.google.com/go/trace/trace.go b/vendor/cloud.google.com/go/trace/trace.go new file mode 100644 index 0000000..9f000a2 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/trace.go @@ -0,0 +1,842 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package trace is a Google Stackdriver Trace library. +// +// This package is still experimental and subject to change. +// +// See https://cloud.google.com/trace/api/#data_model for a discussion of traces +// and spans. +// +// To initialize a client that connects to the Stackdriver Trace server, use the +// NewClient function. Generally you will want to do this on program +// initialization. +// +// import "cloud.google.com/go/trace" +// ... +// traceClient, err = trace.NewClient(ctx, projectID) +// +// Calling SpanFromRequest will create a new trace span for an incoming HTTP +// request. If the request contains a trace context header, it is used to +// determine the trace ID. Otherwise, a new trace ID is created. +// +// func handler(w http.ResponseWriter, r *http.Request) { +// span := traceClient.SpanFromRequest(r) +// defer span.Finish() +// ... +// } +// +// SpanFromRequest and NewSpan returns nil if the *Client is nil, so you can disable +// tracing by not initializing your *Client variable. All of the exported +// functions on *Span do nothing when the *Span is nil. +// +// If you need to start traces that don't correspond to an incoming HTTP request, +// you can use NewSpan to create a root-level span. +// +// span := traceClient.NewSpan("span name") +// defer span.Finish() +// +// Although a trace span object is created for every request, only a subset of +// traces are uploaded to the server, for efficiency. By default, the requests +// that are traced are those with the tracing bit set in the options field of +// the trace context header. Ideally, you should override this behaviour by +// calling SetSamplingPolicy. NewLimitedSampler returns an implementation of +// SamplingPolicy which traces requests that have the tracing bit set, and also +// randomly traces a specified fraction of requests. Additionally, it sets a +// limit on the number of requests traced per second. The following example +// traces one in every thousand requests, up to a limit of 5 per second. +// +// p, err := trace.NewLimitedSampler(0.001, 5) +// traceClient.SetSamplingPolicy(p) +// +// You can create a new span as a child of an existing span with NewChild. +// +// childSpan := span.NewChild(name) +// ... +// childSpan.Finish() +// +// When sending an HTTP request to another server, NewRemoteChild will create +// a span to represent the time the current program waits for the request to +// complete, and attach a header to the outgoing request so that the trace will +// be propagated to the destination server. +// +// childSpan := span.NewRemoteChild(&httpRequest) +// ... +// childSpan.Finish() +// +// Alternatively, if you have access to the X-Cloud-Trace-Context header value +// but not the underlying HTTP request (this can happen if you are using a +// different transport or messaging protocol, such as gRPC), you can use +// SpanFromHeader instead of SpanFromRequest. In that case, you will need to +// specify the span name explicility, since it cannot be constructed from the +// HTTP request's URL and method. +// +// func handler(r *somepkg.Request) { +// span := traceClient.SpanFromHeader("span name", r.TraceContext()) +// defer span.Finish() +// ... +// } +// +// Spans can contain a map from keys to values that have useful information +// about the span. The elements of this map are called labels. Some labels, +// whose keys all begin with the string "trace.cloud.google.com/", are set +// automatically in the following ways: +// +// - SpanFromRequest sets some labels to data about the incoming request. +// +// - NewRemoteChild sets some labels to data about the outgoing request. +// +// - Finish sets a label to a stack trace, if the stack trace option is enabled +// in the incoming trace header. +// +// - The WithResponse option sets some labels to data about a response. +// You can also set labels using SetLabel. If a label is given a value +// automatically and by SetLabel, the automatically-set value is used. +// +// span.SetLabel(key, value) +// +// The WithResponse option can be used when Finish is called. +// +// childSpan := span.NewRemoteChild(outgoingReq) +// resp, err := http.DefaultClient.Do(outgoingReq) +// ... +// childSpan.Finish(trace.WithResponse(resp)) +// +// When a span created by SpanFromRequest or SpanFromHeader is finished, the +// finished spans in the corresponding trace -- the span itself and its +// descendants -- are uploaded to the Stackdriver Trace server using the +// *Client that created the span. Finish returns immediately, and uploading +// occurs asynchronously. You can use the FinishWait function instead to wait +// until uploading has finished. +// +// err := span.FinishWait() +// +// Using contexts to pass *trace.Span objects through your program will often +// be a better approach than passing them around explicitly. This allows trace +// spans, and other request-scoped or part-of-request-scoped values, to be +// easily passed through API boundaries. Various Google Cloud libraries will +// retrieve trace spans from contexts and automatically create child spans for +// API requests. +// See https://blog.golang.org/context for more discussion of contexts. +// A derived context containing a trace span can be created using NewContext. +// +// span := traceClient.SpanFromRequest(r) +// ctx = trace.NewContext(ctx, span) +// +// The span can be retrieved from a context elsewhere in the program using +// FromContext. +// +// func foo(ctx context.Context) { +// span := trace.FromContext(ctx).NewChild("in foo") +// defer span.Finish() +// ... +// } +// +package trace // import "cloud.google.com/go/trace" + +import ( + "crypto/rand" + "encoding/binary" + "encoding/json" + "fmt" + "log" + "net/http" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + api "google.golang.org/api/cloudtrace/v1" + "google.golang.org/api/gensupport" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + htransport "google.golang.org/api/transport/http" +) + +const ( + httpHeader = `X-Cloud-Trace-Context` + userAgent = `gcloud-golang-trace/20160501` + cloudPlatformScope = `https://www.googleapis.com/auth/cloud-platform` + spanKindClient = `RPC_CLIENT` + spanKindServer = `RPC_SERVER` + spanKindUnspecified = `SPAN_KIND_UNSPECIFIED` + maxStackFrames = 20 +) + +// Stackdriver Trace API predefined labels. +const ( + LabelAgent = `trace.cloud.google.com/agent` + LabelComponent = `trace.cloud.google.com/component` + LabelErrorMessage = `trace.cloud.google.com/error/message` + LabelErrorName = `trace.cloud.google.com/error/name` + LabelHTTPClientCity = `trace.cloud.google.com/http/client_city` + LabelHTTPClientCountry = `trace.cloud.google.com/http/client_country` + LabelHTTPClientProtocol = `trace.cloud.google.com/http/client_protocol` + LabelHTTPClientRegion = `trace.cloud.google.com/http/client_region` + LabelHTTPHost = `trace.cloud.google.com/http/host` + LabelHTTPMethod = `trace.cloud.google.com/http/method` + LabelHTTPRedirectedURL = `trace.cloud.google.com/http/redirected_url` + LabelHTTPRequestSize = `trace.cloud.google.com/http/request/size` + LabelHTTPResponseSize = `trace.cloud.google.com/http/response/size` + LabelHTTPStatusCode = `trace.cloud.google.com/http/status_code` + LabelHTTPURL = `trace.cloud.google.com/http/url` + LabelHTTPUserAgent = `trace.cloud.google.com/http/user_agent` + LabelPID = `trace.cloud.google.com/pid` + LabelSamplingPolicy = `trace.cloud.google.com/sampling_policy` + LabelSamplingWeight = `trace.cloud.google.com/sampling_weight` + LabelStackTrace = `trace.cloud.google.com/stacktrace` + LabelTID = `trace.cloud.google.com/tid` +) + +const ( + // ScopeTraceAppend grants permissions to write trace data for a project. + ScopeTraceAppend = "https://www.googleapis.com/auth/trace.append" + + // ScopeCloudPlatform grants permissions to view and manage your data + // across Google Cloud Platform services. + ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" +) + +type contextKey struct{} + +type stackLabelValue struct { + Frames []stackFrame `json:"stack_frame"` +} + +type stackFrame struct { + Class string `json:"class_name,omitempty"` + Method string `json:"method_name"` + Filename string `json:"file_name"` + Line int64 `json:"line_number"` +} + +var ( + spanIDCounter uint64 + spanIDIncrement uint64 +) + +func init() { + // Set spanIDCounter and spanIDIncrement to random values. nextSpanID will + // return an arithmetic progression using these values, skipping zero. We set + // the LSB of spanIDIncrement to 1, so that the cycle length is 2^64. + binary.Read(rand.Reader, binary.LittleEndian, &spanIDCounter) + binary.Read(rand.Reader, binary.LittleEndian, &spanIDIncrement) + spanIDIncrement |= 1 + // Attach hook for autogenerated Google API calls. This will automatically + // create trace spans for API calls if there is a trace in the context. + gensupport.RegisterHook(requestHook) +} + +func requestHook(ctx context.Context, req *http.Request) func(resp *http.Response) { + span := FromContext(ctx) + if span == nil || req == nil { + return nil + } + span = span.NewRemoteChild(req) + return func(resp *http.Response) { + if resp != nil { + span.Finish(WithResponse(resp)) + } else { + span.Finish() + } + } +} + +// nextSpanID returns a new span ID. It will never return zero. +func nextSpanID() uint64 { + var id uint64 + for id == 0 { + id = atomic.AddUint64(&spanIDCounter, spanIDIncrement) + } + return id +} + +// nextTraceID returns a new trace ID. +func nextTraceID() string { + id1 := nextSpanID() + id2 := nextSpanID() + return fmt.Sprintf("%016x%016x", id1, id2) +} + +// Client is a client for uploading traces to the Google Stackdriver Trace service. +// A nil Client will no-op for all of its methods. +type Client struct { + service *api.Service + projectID string + policy SamplingPolicy + bundler *bundler.Bundler +} + +// NewClient creates a new Google Stackdriver Trace client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(cloudPlatformScope), + option.WithUserAgent(userAgent), + } + o = append(o, opts...) + hc, basePath, err := htransport.NewClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("creating HTTP client for Google Stackdriver Trace API: %v", err) + } + apiService, err := api.New(hc) + if err != nil { + return nil, fmt.Errorf("creating Google Stackdriver Trace API client: %v", err) + } + if basePath != "" { + // An option set a basepath, so override api.New's default. + apiService.BasePath = basePath + } + c := &Client{ + service: apiService, + projectID: projectID, + } + bundler := bundler.NewBundler((*api.Trace)(nil), func(bundle interface{}) { + traces := bundle.([]*api.Trace) + err := c.upload(traces) + if err != nil { + log.Printf("failed to upload %d traces to the Cloud Trace server: %v", len(traces), err) + } + }) + bundler.DelayThreshold = 2 * time.Second + bundler.BundleCountThreshold = 100 + // We're not measuring bytes here, we're counting traces and spans as one "byte" each. + bundler.BundleByteThreshold = 1000 + bundler.BundleByteLimit = 1000 + bundler.BufferedByteLimit = 10000 + c.bundler = bundler + return c, nil +} + +// SetSamplingPolicy sets the SamplingPolicy that determines how often traces +// are initiated by this client. +func (c *Client) SetSamplingPolicy(p SamplingPolicy) { + if c != nil { + c.policy = p + } +} + +// SpanFromHeader returns a new trace span based on a provided request header +// value or nil iff the client is nil. +// +// The trace information and identifiers will be read from the header value. +// Otherwise, a new trace ID is made and the parent span ID is zero. +// For the exact format of the header value, see +// https://cloud.google.com/trace/docs/support#how_do_i_force_a_request_to_be_traced +// +// The name of the new span is provided as an argument. +// +// If a non-nil sampling policy has been set in the client, it can override +// the options set in the header and choose whether to trace the request. +// +// If the header doesn't have existing tracing information, then a *Span is +// returned anyway, but it will not be uploaded to the server, just as when +// calling SpanFromRequest on an untraced request. +// +// Most users using HTTP should use SpanFromRequest, rather than +// SpanFromHeader, since it provides additional functionality for HTTP +// requests. In particular, it will set various pieces of request information +// as labels on the *Span, which is not available from the header alone. +func (c *Client) SpanFromHeader(name string, header string) *Span { + if c == nil { + return nil + } + traceID, parentSpanID, options, _, ok := traceInfoFromHeader(header) + if !ok { + traceID = nextTraceID() + } + t := &trace{ + traceID: traceID, + client: c, + globalOptions: options, + localOptions: options, + } + span := startNewChild(name, t, parentSpanID) + span.span.Kind = spanKindServer + span.rootSpan = true + configureSpanFromPolicy(span, c.policy, ok) + return span +} + +// SpanFromRequest returns a new trace span for an HTTP request or nil +// iff the client is nil. +// +// If the incoming HTTP request contains a trace context header, the trace ID, +// parent span ID, and tracing options will be read from that header. +// Otherwise, a new trace ID is made and the parent span ID is zero. +// +// If a non-nil sampling policy has been set in the client, it can override the +// options set in the header and choose whether to trace the request. +// +// If the request is not being traced, then a *Span is returned anyway, but it +// will not be uploaded to the server -- it is only useful for propagating +// trace context to child requests and for getting the TraceID. All its +// methods can still be called -- the Finish, FinishWait, and SetLabel methods +// do nothing. NewChild does nothing, and returns the same *Span. TraceID +// works as usual. +func (c *Client) SpanFromRequest(r *http.Request) *Span { + if c == nil { + return nil + } + traceID, parentSpanID, options, _, ok := traceInfoFromHeader(r.Header.Get(httpHeader)) + if !ok { + traceID = nextTraceID() + } + t := &trace{ + traceID: traceID, + client: c, + globalOptions: options, + localOptions: options, + } + span := startNewChildWithRequest(r, t, parentSpanID) + span.span.Kind = spanKindServer + span.rootSpan = true + configureSpanFromPolicy(span, c.policy, ok) + return span +} + +// NewSpan returns a new trace span with the given name or nil iff the +// client is nil. +// +// A new trace and span ID is generated to trace the span. +// Returned span need to be finished by calling Finish or FinishWait. +func (c *Client) NewSpan(name string) *Span { + if c == nil { + return nil + } + t := &trace{ + traceID: nextTraceID(), + client: c, + localOptions: optionTrace, + globalOptions: optionTrace, + } + span := startNewChild(name, t, 0) + span.span.Kind = spanKindUnspecified + span.rootSpan = true + configureSpanFromPolicy(span, c.policy, false) + return span +} + +func configureSpanFromPolicy(s *Span, p SamplingPolicy, ok bool) { + if p == nil { + return + } + d := p.Sample(Parameters{HasTraceHeader: ok}) + if d.Trace { + // Turn on tracing locally, and in child requests. + s.trace.localOptions |= optionTrace + s.trace.globalOptions |= optionTrace + } else { + // Turn off tracing locally. + s.trace.localOptions = 0 + return + } + if d.Sample { + // This trace is in the random sample, so set the labels. + s.SetLabel(LabelSamplingPolicy, d.Policy) + s.SetLabel(LabelSamplingWeight, fmt.Sprint(d.Weight)) + } +} + +// NewContext returns a derived context containing the span. +func NewContext(ctx context.Context, s *Span) context.Context { + if s == nil { + return ctx + } + return context.WithValue(ctx, contextKey{}, s) +} + +// FromContext returns the span contained in the context, or nil. +func FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +func traceInfoFromHeader(h string) (traceID string, spanID uint64, options optionFlags, optionsOk bool, ok bool) { + // See https://cloud.google.com/trace/docs/faq for the header format. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > 200 { + return "", 0, 0, false, false + + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return "", 0, 0, false, false + + } + traceID, h = h[:slash], h[slash+1:] + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + spanID, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return "", 0, 0, false, false + + } + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return traceID, spanID, 0, false, true + + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return "", 0, 0, false, false + + } + options = optionFlags(o) + return traceID, spanID, options, true, true +} + +type optionFlags uint32 + +const ( + optionTrace optionFlags = 1 << iota + optionStack +) + +type trace struct { + mu sync.Mutex + client *Client + traceID string + globalOptions optionFlags // options that will be passed to any child requests + localOptions optionFlags // options applied in this server + spans []*Span // finished spans for this trace. +} + +// finish appends s to t.spans. If s is the root span, uploads the trace to the +// server. +func (t *trace) finish(s *Span, wait bool, opts ...FinishOption) error { + for _, o := range opts { + o.modifySpan(s) + } + s.end = time.Now() + t.mu.Lock() + t.spans = append(t.spans, s) + spans := t.spans + t.mu.Unlock() + if s.rootSpan { + if wait { + return t.client.upload([]*api.Trace{t.constructTrace(spans)}) + } + go func() { + tr := t.constructTrace(spans) + err := t.client.bundler.Add(tr, 1+len(spans)) + if err == bundler.ErrOversizedItem { + err = t.client.upload([]*api.Trace{tr}) + } + if err != nil { + log.Println("error uploading trace:", err) + } + }() + } + return nil +} + +func (t *trace) constructTrace(spans []*Span) *api.Trace { + apiSpans := make([]*api.TraceSpan, len(spans)) + for i, sp := range spans { + sp.span.StartTime = sp.start.In(time.UTC).Format(time.RFC3339Nano) + sp.span.EndTime = sp.end.In(time.UTC).Format(time.RFC3339Nano) + if t.localOptions&optionStack != 0 { + sp.setStackLabel() + } + if sp.host != "" { + sp.SetLabel(LabelHTTPHost, sp.host) + } + if sp.url != "" { + sp.SetLabel(LabelHTTPURL, sp.url) + } + if sp.method != "" { + sp.SetLabel(LabelHTTPMethod, sp.method) + } + if sp.statusCode != 0 { + sp.SetLabel(LabelHTTPStatusCode, strconv.Itoa(sp.statusCode)) + } + apiSpans[i] = &sp.span + } + + return &api.Trace{ + ProjectId: t.client.projectID, + TraceId: t.traceID, + Spans: apiSpans, + } +} + +func (c *Client) upload(traces []*api.Trace) error { + _, err := c.service.Projects.PatchTraces(c.projectID, &api.Traces{Traces: traces}).Do() + return err +} + +// Span contains information about one span of a trace. +type Span struct { + trace *trace + + spanMu sync.Mutex // guards span.Labels + span api.TraceSpan + + start time.Time + end time.Time + rootSpan bool + stack [maxStackFrames]uintptr + host string + method string + url string + statusCode int +} + +// Traced reports whether the current span is sampled to be traced. +func (s *Span) Traced() bool { + if s == nil { + return false + } + return s.trace.localOptions&optionTrace != 0 +} + +// NewChild creates a new span with the given name as a child of s. +// If s is nil, does nothing and returns nil. +func (s *Span) NewChild(name string) *Span { + if s == nil { + return nil + } + if !s.Traced() { + // TODO(jbd): Document this behavior in godoc here and elsewhere. + return s + } + return startNewChild(name, s.trace, s.span.SpanId) +} + +// NewRemoteChild creates a new span as a child of s. +// +// Some labels in the span are set from the outgoing *http.Request r. +// +// A header is set in r so that the trace context is propagated to the +// destination. The parent span ID in that header is set as follows: +// - If the request is being traced, then the ID of s is used. +// - If the request is not being traced, but there was a trace context header +// in the incoming request for this trace (the request passed to +// SpanFromRequest), the parent span ID in that header is used. +// - Otherwise, the parent span ID is zero. +// The tracing bit in the options is set if tracing is enabled, or if it was +// set in the incoming request. +// +// If s is nil, does nothing and returns nil. +func (s *Span) NewRemoteChild(r *http.Request) *Span { + if s == nil { + return nil + } + if !s.Traced() { + r.Header[httpHeader] = []string{spanHeader(s.trace.traceID, s.span.ParentSpanId, s.trace.globalOptions)} + return s + } + newSpan := startNewChildWithRequest(r, s.trace, s.span.SpanId) + r.Header[httpHeader] = []string{spanHeader(s.trace.traceID, newSpan.span.SpanId, s.trace.globalOptions)} + return newSpan +} + +// Header returns the value of the X-Cloud-Trace-Context header that +// should be used to propagate the span. This is the inverse of +// SpanFromHeader. +// +// Most users should use NewRemoteChild unless they have specific +// propagation needs or want to control the naming of their span. +// Header() does not create a new span. +func (s *Span) Header() string { + if s == nil { + return "" + } + return spanHeader(s.trace.traceID, s.span.SpanId, s.trace.globalOptions) +} + +func startNewChildWithRequest(r *http.Request, trace *trace, parentSpanID uint64) *Span { + name := r.URL.Host + r.URL.Path // drop scheme and query params + newSpan := startNewChild(name, trace, parentSpanID) + if r.Host == "" { + newSpan.host = r.URL.Host + } else { + newSpan.host = r.Host + } + newSpan.method = r.Method + newSpan.url = r.URL.String() + return newSpan +} + +func startNewChild(name string, trace *trace, parentSpanID uint64) *Span { + spanID := nextSpanID() + for spanID == parentSpanID { + spanID = nextSpanID() + } + newSpan := &Span{ + trace: trace, + span: api.TraceSpan{ + Kind: spanKindClient, + Name: name, + ParentSpanId: parentSpanID, + SpanId: spanID, + }, + start: time.Now(), + } + if trace.localOptions&optionStack != 0 { + _ = runtime.Callers(1, newSpan.stack[:]) + } + return newSpan +} + +// TraceID returns the ID of the trace to which s belongs. +func (s *Span) TraceID() string { + if s == nil { + return "" + } + return s.trace.traceID +} + +// SetLabel sets the label for the given key to the given value. +// If the value is empty, the label for that key is deleted. +// If a label is given a value automatically and by SetLabel, the +// automatically-set value is used. +// If s is nil, does nothing. +// +// SetLabel shouldn't be called after Finish or FinishWait. +func (s *Span) SetLabel(key, value string) { + if s == nil { + return + } + if !s.Traced() { + return + } + s.spanMu.Lock() + defer s.spanMu.Unlock() + + if value == "" { + if s.span.Labels != nil { + delete(s.span.Labels, key) + } + return + } + if s.span.Labels == nil { + s.span.Labels = make(map[string]string) + } + s.span.Labels[key] = value +} + +type FinishOption interface { + modifySpan(s *Span) +} + +type withResponse struct { + *http.Response +} + +// WithResponse returns an option that can be passed to Finish that indicates +// that some labels for the span should be set using the given *http.Response. +func WithResponse(resp *http.Response) FinishOption { + return withResponse{resp} +} +func (u withResponse) modifySpan(s *Span) { + if u.Response != nil { + s.statusCode = u.StatusCode + } +} + +// Finish declares that the span has finished. +// +// If s is nil, Finish does nothing and returns nil. +// +// If the option trace.WithResponse(resp) is passed, then some labels are set +// for s using information in the given *http.Response. This is useful when the +// span is for an outgoing http request; s will typically have been created by +// NewRemoteChild in this case. +// +// If s is a root span (one created by SpanFromRequest) then s, and all its +// descendant spans that have finished, are uploaded to the Google Stackdriver +// Trace server asynchronously. +func (s *Span) Finish(opts ...FinishOption) { + if s == nil { + return + } + if !s.Traced() { + return + } + s.trace.finish(s, false, opts...) +} + +// FinishWait is like Finish, but if s is a root span, it waits until uploading +// is finished, then returns an error if one occurred. +func (s *Span) FinishWait(opts ...FinishOption) error { + if s == nil { + return nil + } + if !s.Traced() { + return nil + } + return s.trace.finish(s, true, opts...) +} + +func spanHeader(traceID string, spanID uint64, options optionFlags) string { + // See https://cloud.google.com/trace/docs/faq for the header format. + return fmt.Sprintf("%s/%d;o=%d", traceID, spanID, options) +} + +func (s *Span) setStackLabel() { + var stack stackLabelValue + lastSigPanic, inTraceLibrary := false, true + for _, pc := range s.stack { + if pc == 0 { + break + } + if !lastSigPanic { + pc-- + } + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + // Name has one of the following forms: + // path/to/package.Foo + // path/to/package.(Type).Foo + // For the first form, we store the whole name in the Method field of the + // stack frame. For the second form, we set the Method field to "Foo" and + // the Class field to "path/to/package.(Type)". + name := fn.Name() + if inTraceLibrary && !strings.HasPrefix(name, "cloud.google.com/go/trace.") { + inTraceLibrary = false + } + var class string + if i := strings.Index(name, ")."); i != -1 { + class, name = name[:i+1], name[i+2:] + } + frame := stackFrame{ + Class: class, + Method: name, + Filename: file, + Line: int64(line), + } + if inTraceLibrary && len(stack.Frames) == 1 { + stack.Frames[0] = frame + } else { + stack.Frames = append(stack.Frames, frame) + } + lastSigPanic = fn.Name() == "runtime.sigpanic" + } + if label, err := json.Marshal(stack); err == nil { + s.SetLabel(LabelStackTrace, string(label)) + } +} diff --git a/vendor/cloud.google.com/go/trace/trace_test.go b/vendor/cloud.google.com/go/trace/trace_test.go new file mode 100644 index 0000000..805184c --- /dev/null +++ b/vendor/cloud.google.com/go/trace/trace_test.go @@ -0,0 +1,969 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net/http" + "regexp" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/datastore" + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/storage" + "golang.org/x/net/context" + api "google.golang.org/api/cloudtrace/v1" + compute "google.golang.org/api/compute/v1" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + dspb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" +) + +const testProjectID = "testproject" + +type fakeRoundTripper struct { + reqc chan *http.Request +} + +func newFakeRoundTripper() *fakeRoundTripper { + return &fakeRoundTripper{reqc: make(chan *http.Request)} +} + +func (rt *fakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + rt.reqc <- r + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("{}")), + } + return resp, nil +} + +func newTestClient(rt http.RoundTripper) *Client { + t, err := NewClient(context.Background(), testProjectID, option.WithHTTPClient(&http.Client{Transport: rt})) + if err != nil { + panic(err) + } + return t +} + +type fakeDatastoreServer struct { + dspb.DatastoreServer + fail bool +} + +func (f *fakeDatastoreServer) Lookup(ctx context.Context, req *dspb.LookupRequest) (*dspb.LookupResponse, error) { + if f.fail { + return nil, errors.New("lookup failed") + } + return &dspb.LookupResponse{}, nil +} + +// makeRequests makes some requests. +// span is the root span. rt is the trace client's http client's transport. +// This is used to retrieve the trace uploaded by the client, if any. If +// expectTrace is true, we expect a trace will be uploaded. If synchronous is +// true, the call to Finish is expected not to return before the client has +// uploaded any traces. +func makeRequests(t *testing.T, span *Span, rt *fakeRoundTripper, synchronous bool, expectTrace bool) *http.Request { + ctx := NewContext(context.Background(), span) + tc := newTestClient(&noopTransport{}) + + // An HTTP request. + { + req2, err := http.NewRequest("GET", "http://example.com/bar", nil) + if err != nil { + t.Fatal(err) + } + resp := &http.Response{StatusCode: 200} + s := span.NewRemoteChild(req2) + s.Finish(WithResponse(resp)) + } + + // An autogenerated API call. + { + rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} + hc := &http.Client{Transport: rt} + computeClient, err := compute.New(hc) + if err != nil { + t.Fatal(err) + } + _, err = computeClient.Zones.List(testProjectID).Context(ctx).Do() + if err != nil { + t.Fatal(err) + } + } + + // A cloud library call that uses the autogenerated API. + { + rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} + hc := &http.Client{Transport: rt} + storageClient, err := storage.NewClient(context.Background(), option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + var objAttrsList []*storage.ObjectAttrs + it := storageClient.Bucket("testbucket").Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err != nil && err != iterator.Done { + t.Fatal(err) + } + if err == iterator.Done { + break + } + objAttrsList = append(objAttrsList, objAttrs) + } + } + + // A cloud library call that uses grpc internally. + for _, fail := range []bool{false, true} { + srv, err := testutil.NewServer() + if err != nil { + t.Fatalf("creating test datastore server: %v", err) + } + dspb.RegisterDatastoreServer(srv.Gsrv, &fakeDatastoreServer{fail: fail}) + srv.Start() + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) + if err != nil { + t.Fatalf("connecting to test datastore server: %v", err) + } + datastoreClient, err := datastore.NewClient(ctx, testProjectID, option.WithGRPCConn(conn)) + if err != nil { + t.Fatalf("creating datastore client: %v", err) + } + k := datastore.NameKey("Entity", "stringID", nil) + e := new(datastore.Entity) + datastoreClient.Get(ctx, k, e) + } + + done := make(chan struct{}) + go func() { + if synchronous { + err := span.FinishWait() + if err != nil { + t.Errorf("Unexpected error from span.FinishWait: %v", err) + } + } else { + span.Finish() + } + done <- struct{}{} + }() + if !expectTrace { + <-done + select { + case <-rt.reqc: + t.Errorf("Got a trace, expected none.") + case <-time.After(5 * time.Millisecond): + } + return nil + } else if !synchronous { + <-done + return <-rt.reqc + } else { + select { + case <-done: + t.Errorf("Synchronous Finish didn't wait for trace upload.") + return <-rt.reqc + case <-time.After(5 * time.Millisecond): + r := <-rt.reqc + <-done + return r + } + } +} + +func TestHeader(t *testing.T) { + tests := []struct { + header string + wantTraceID string + wantSpanID uint64 + wantOpts optionFlags + wantOK bool + }{ + { + header: "0123456789ABCDEF0123456789ABCDEF/1;o=1", + wantTraceID: "0123456789ABCDEF0123456789ABCDEF", + wantSpanID: 1, + wantOpts: 1, + wantOK: true, + }, + { + header: "0123456789ABCDEF0123456789ABCDEF/1;o=0", + wantTraceID: "0123456789ABCDEF0123456789ABCDEF", + wantSpanID: 1, + wantOpts: 0, + wantOK: true, + }, + { + header: "0123456789ABCDEF0123456789ABCDEF/1", + wantTraceID: "0123456789ABCDEF0123456789ABCDEF", + wantSpanID: 1, + wantOpts: 0, + wantOK: true, + }, + { + header: "", + wantTraceID: "", + wantSpanID: 0, + wantOpts: 0, + wantOK: false, + }, + } + for _, tt := range tests { + traceID, parentSpanID, opts, _, ok := traceInfoFromHeader(tt.header) + if got, want := traceID, tt.wantTraceID; got != want { + t.Errorf("TraceID(%v) = %q; want %q", tt.header, got, want) + } + if got, want := parentSpanID, tt.wantSpanID; got != want { + t.Errorf("SpanID(%v) = %v; want %v", tt.header, got, want) + } + if got, want := opts, tt.wantOpts; got != want { + t.Errorf("Options(%v) = %v; want %v", tt.header, got, want) + } + if got, want := ok, tt.wantOK; got != want { + t.Errorf("Header exists (%v) = %v; want %v", tt.header, got, want) + } + } +} + +func TestOutgoingReqHeader(t *testing.T) { + all, _ := NewLimitedSampler(1, 1<<16) // trace every request + + tests := []struct { + desc string + traceHeader string + samplingPolicy SamplingPolicy + + wantHeaderRe *regexp.Regexp + }{ + { + desc: "Parent span without sampling options, client samples all", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/1", + samplingPolicy: all, + wantHeaderRe: regexp.MustCompile("0123456789ABCDEF0123456789ABCDEF/\\d+;o=1"), + }, + { + desc: "Parent span without sampling options, without client sampling", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/1", + samplingPolicy: nil, + wantHeaderRe: regexp.MustCompile("0123456789ABCDEF0123456789ABCDEF/\\d+;o=0"), + }, + { + desc: "Parent span with o=1, client samples none", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/1;o=1", + samplingPolicy: nil, + wantHeaderRe: regexp.MustCompile("0123456789ABCDEF0123456789ABCDEF/\\d+;o=1"), + }, + { + desc: "Parent span with o=0, without client sampling", + traceHeader: "0123456789ABCDEF0123456789ABCDEF/1;o=0", + samplingPolicy: nil, + wantHeaderRe: regexp.MustCompile("0123456789ABCDEF0123456789ABCDEF/\\d+;o=0"), + }, + } + + tc := newTestClient(nil) + for _, tt := range tests { + tc.SetSamplingPolicy(tt.samplingPolicy) + span := tc.SpanFromHeader("/foo", tt.traceHeader) + + req, _ := http.NewRequest("GET", "http://localhost", nil) + span.NewRemoteChild(req) + + if got, re := req.Header.Get(httpHeader), tt.wantHeaderRe; !re.MatchString(got) { + t.Errorf("%v (parent=%q): got header %q; want in format %q", tt.desc, tt.traceHeader, got, re) + } + } +} + +func TestTrace(t *testing.T) { + t.Parallel() + testTrace(t, false, true) +} + +func TestTraceWithWait(t *testing.T) { + testTrace(t, true, true) +} + +func TestTraceFromHeader(t *testing.T) { + t.Parallel() + testTrace(t, false, false) +} + +func TestTraceFromHeaderWithWait(t *testing.T) { + testTrace(t, false, true) +} + +func TestNewSpan(t *testing.T) { + t.Skip("flaky") + const traceID = "0123456789ABCDEF0123456789ABCDEF" + + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + span := traceClient.NewSpan("/foo") + span.trace.traceID = traceID + + uploaded := makeRequests(t, span, rt, true, true) + + if uploaded == nil { + t.Fatalf("No trace uploaded, expected one.") + } + + expected := api.Traces{ + Traces: []*api.Trace{ + { + ProjectId: testProjectID, + Spans: []*api.TraceSpan{ + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "http://example.com/bar", + }, + Name: "example.com/bar", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/compute/v1/projects/testproject/zones", + }, + Name: "www.googleapis.com/compute/v1/projects/testproject/zones", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/storage/v1/b/testbucket/o", + }, + Name: "www.googleapis.com/storage/v1/b/testbucket/o", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: nil, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: map[string]string{"error": "rpc error: code = Unknown desc = lookup failed"}, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + { + Kind: "SPAN_KIND_UNSPECIFIED", + Labels: map[string]string{}, + Name: "/foo", + }, + }, + TraceId: traceID, + }, + }, + } + + body, err := ioutil.ReadAll(uploaded.Body) + if err != nil { + t.Fatal(err) + } + var patch api.Traces + err = json.Unmarshal(body, &patch) + if err != nil { + t.Fatal(err) + } + + checkTraces(t, patch, expected) + + n := len(patch.Traces[0].Spans) + rootSpan := patch.Traces[0].Spans[n-1] + for i, s := range patch.Traces[0].Spans { + if a, b := s.StartTime, s.EndTime; a > b { + t.Errorf("span %d start time is later than its end time (%q, %q)", i, a, b) + } + if a, b := rootSpan.StartTime, s.StartTime; a > b { + t.Errorf("trace start time is later than span %d start time (%q, %q)", i, a, b) + } + if a, b := s.EndTime, rootSpan.EndTime; a > b { + t.Errorf("span %d end time is later than trace end time (%q, %q)", i, a, b) + } + if i > 1 && i < n-1 { + if a, b := patch.Traces[0].Spans[i-1].EndTime, s.StartTime; a > b { + t.Errorf("span %d end time is later than span %d start time (%q, %q)", i-1, i, a, b) + } + } + } + + if x := rootSpan.ParentSpanId; x != 0 { + t.Errorf("Incorrect ParentSpanId: got %d want %d", x, 0) + } + for i, s := range patch.Traces[0].Spans { + if x, y := rootSpan.SpanId, s.ParentSpanId; i < n-1 && x != y { + t.Errorf("Incorrect ParentSpanId in span %d: got %d want %d", i, y, x) + } + } + for i, s := range patch.Traces[0].Spans { + s.EndTime = "" + labels := &expected.Traces[0].Spans[i].Labels + for key, value := range *labels { + if v, ok := s.Labels[key]; !ok { + t.Errorf("Span %d is missing Label %q:%q", i, key, value) + } else if key == "trace.cloud.google.com/http/url" { + if !strings.HasPrefix(v, value) { + t.Errorf("Span %d Label %q: got value %q want prefix %q", i, key, v, value) + } + } else if v != value { + t.Errorf("Span %d Label %q: got value %q want %q", i, key, v, value) + } + } + for key := range s.Labels { + if _, ok := (*labels)[key]; key != "trace.cloud.google.com/stacktrace" && !ok { + t.Errorf("Span %d: unexpected label %q", i, key) + } + } + *labels = nil + s.Labels = nil + s.ParentSpanId = 0 + if s.SpanId == 0 { + t.Errorf("Incorrect SpanId: got 0 want nonzero") + } + s.SpanId = 0 + s.StartTime = "" + } + if !testutil.Equal(patch, expected) { + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Errorf("PatchTraces request: got %s want %s", got, want) + } +} + +func testTrace(t *testing.T, synchronous bool, fromRequest bool) { + t.Skip("flaky") + const header = `0123456789ABCDEF0123456789ABCDEF/42;o=3` + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + + span := traceClient.SpanFromHeader("/foo", header) + headerOrReqLabels := map[string]string{} + headerOrReqName := "/foo" + + if fromRequest { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("X-Cloud-Trace-Context", header) + span = traceClient.SpanFromRequest(req) + headerOrReqLabels = map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/url": "http://example.com/foo", + } + headerOrReqName = "example.com/foo" + } + + uploaded := makeRequests(t, span, rt, synchronous, true) + if uploaded == nil { + t.Fatalf("No trace uploaded, expected one.") + } + + expected := api.Traces{ + Traces: []*api.Trace{ + { + ProjectId: testProjectID, + Spans: []*api.TraceSpan{ + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "http://example.com/bar", + }, + Name: "example.com/bar", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/compute/v1/projects/testproject/zones", + }, + Name: "www.googleapis.com/compute/v1/projects/testproject/zones", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/storage/v1/b/testbucket/o", + }, + Name: "www.googleapis.com/storage/v1/b/testbucket/o", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: nil, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: map[string]string{"error": "rpc error: code = Unknown desc = lookup failed"}, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + { + Kind: "RPC_SERVER", + Labels: headerOrReqLabels, + Name: headerOrReqName, + }, + }, + TraceId: "0123456789ABCDEF0123456789ABCDEF", + }, + }, + } + + body, err := ioutil.ReadAll(uploaded.Body) + if err != nil { + t.Fatal(err) + } + var patch api.Traces + err = json.Unmarshal(body, &patch) + if err != nil { + t.Fatal(err) + } + + checkTraces(t, patch, expected) + + n := len(patch.Traces[0].Spans) + rootSpan := patch.Traces[0].Spans[n-1] + for i, s := range patch.Traces[0].Spans { + if a, b := s.StartTime, s.EndTime; a > b { + t.Errorf("span %d start time is later than its end time (%q, %q)", i, a, b) + } + if a, b := rootSpan.StartTime, s.StartTime; a > b { + t.Errorf("trace start time is later than span %d start time (%q, %q)", i, a, b) + } + if a, b := s.EndTime, rootSpan.EndTime; a > b { + t.Errorf("span %d end time is later than trace end time (%q, %q)", i, a, b) + } + if i > 1 && i < n-1 { + if a, b := patch.Traces[0].Spans[i-1].EndTime, s.StartTime; a > b { + t.Errorf("span %d end time is later than span %d start time (%q, %q)", i-1, i, a, b) + } + } + } + + if x := rootSpan.ParentSpanId; x != 42 { + t.Errorf("Incorrect ParentSpanId: got %d want %d", x, 42) + } + for i, s := range patch.Traces[0].Spans { + if x, y := rootSpan.SpanId, s.ParentSpanId; i < n-1 && x != y { + t.Errorf("Incorrect ParentSpanId in span %d: got %d want %d", i, y, x) + } + } + for i, s := range patch.Traces[0].Spans { + s.EndTime = "" + labels := &expected.Traces[0].Spans[i].Labels + for key, value := range *labels { + if v, ok := s.Labels[key]; !ok { + t.Errorf("Span %d is missing Label %q:%q", i, key, value) + } else if key == "trace.cloud.google.com/http/url" { + if !strings.HasPrefix(v, value) { + t.Errorf("Span %d Label %q: got value %q want prefix %q", i, key, v, value) + } + } else if v != value { + t.Errorf("Span %d Label %q: got value %q want %q", i, key, v, value) + } + } + for key := range s.Labels { + if _, ok := (*labels)[key]; key != "trace.cloud.google.com/stacktrace" && !ok { + t.Errorf("Span %d: unexpected label %q", i, key) + } + } + *labels = nil + s.Labels = nil + s.ParentSpanId = 0 + if s.SpanId == 0 { + t.Errorf("Incorrect SpanId: got 0 want nonzero") + } + s.SpanId = 0 + s.StartTime = "" + } + if !testutil.Equal(patch, expected) { + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Errorf("PatchTraces request: got %s \n\n want %s", got, want) + } +} + +func TestNoTrace(t *testing.T) { + testNoTrace(t, false, true) +} + +func TestNoTraceWithWait(t *testing.T) { + testNoTrace(t, true, true) +} + +func TestNoTraceFromHeader(t *testing.T) { + testNoTrace(t, false, false) +} + +func TestNoTraceFromHeaderWithWait(t *testing.T) { + testNoTrace(t, true, false) +} + +func testNoTrace(t *testing.T, synchronous bool, fromRequest bool) { + for _, header := range []string{ + `0123456789ABCDEF0123456789ABCDEF/42;o=2`, + `0123456789ABCDEF0123456789ABCDEF/42;o=0`, + `0123456789ABCDEF0123456789ABCDEF/42`, + `0123456789ABCDEF0123456789ABCDEF`, + ``, + } { + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + var span *Span + if fromRequest { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if header != "" { + req.Header.Set("X-Cloud-Trace-Context", header) + } + if err != nil { + t.Fatal(err) + } + span = traceClient.SpanFromRequest(req) + } else { + span = traceClient.SpanFromHeader("/foo", header) + } + uploaded := makeRequests(t, span, rt, synchronous, false) + if uploaded != nil { + t.Errorf("Got a trace, expected none.") + } + } +} + +func TestSample(t *testing.T) { + // A deterministic test of the sampler logic. + type testCase struct { + rate float64 + maxqps float64 + want int + } + const delta = 25 * time.Millisecond + for _, test := range []testCase{ + // qps won't matter, so we will sample half of the 79 calls + {0.50, 100, 40}, + // with 1 qps and a burst of 2, we will sample twice in second #1, once in the partial second #2 + {0.50, 1, 3}, + } { + sp, err := NewLimitedSampler(test.rate, test.maxqps) + if err != nil { + t.Fatal(err) + } + s := sp.(*sampler) + sampled := 0 + tm := time.Now() + for i := 0; i < 80; i++ { + if s.sample(Parameters{}, tm, float64(i%2)).Sample { + sampled++ + } + tm = tm.Add(delta) + } + if sampled != test.want { + t.Errorf("rate=%f, maxqps=%f: got %d samples, want %d", test.rate, test.maxqps, sampled, test.want) + } + } +} + +func TestSampling(t *testing.T) { + t.Parallel() + // This scope tests sampling in a larger context, with real time and randomness. + wg := sync.WaitGroup{} + type testCase struct { + rate float64 + maxqps float64 + expectedRange [2]int + } + for _, test := range []testCase{ + {0, 5, [2]int{0, 0}}, + {5, 0, [2]int{0, 0}}, + {0.50, 100, [2]int{20, 60}}, + {0.50, 1, [2]int{3, 4}}, // Windows, with its less precise clock, sometimes gives 4. + } { + wg.Add(1) + go func(test testCase) { + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + traceClient.bundler.BundleByteLimit = 1 + p, err := NewLimitedSampler(test.rate, test.maxqps) + if err != nil { + t.Fatalf("NewLimitedSampler: %v", err) + } + traceClient.SetSamplingPolicy(p) + ticker := time.NewTicker(25 * time.Millisecond) + sampled := 0 + for i := 0; i < 79; i++ { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + span := traceClient.SpanFromRequest(req) + span.Finish() + select { + case <-rt.reqc: + <-ticker.C + sampled++ + case <-ticker.C: + } + } + ticker.Stop() + if test.expectedRange[0] > sampled || sampled > test.expectedRange[1] { + t.Errorf("rate=%f, maxqps=%f: got %d samples want ∈ %v", test.rate, test.maxqps, sampled, test.expectedRange) + } + wg.Done() + }(test) + } + wg.Wait() +} + +func TestBundling(t *testing.T) { + t.Parallel() + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + traceClient.bundler.DelayThreshold = time.Second / 2 + traceClient.bundler.BundleCountThreshold = 10 + p, err := NewLimitedSampler(1, 99) // sample every request. + if err != nil { + t.Fatalf("NewLimitedSampler: %v", err) + } + traceClient.SetSamplingPolicy(p) + + for i := 0; i < 35; i++ { + go func() { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + span := traceClient.SpanFromRequest(req) + span.Finish() + }() + } + + // Read the first three bundles. + <-rt.reqc + <-rt.reqc + <-rt.reqc + + // Test that the fourth bundle isn't sent early. + select { + case <-rt.reqc: + t.Errorf("bundle sent too early") + case <-time.After(time.Second / 4): + <-rt.reqc + } + + // Test that there aren't extra bundles. + select { + case <-rt.reqc: + t.Errorf("too many bundles sent") + case <-time.After(time.Second): + } +} + +func TestWeights(t *testing.T) { + const ( + expectedNumTraced = 10100 + numTracedEpsilon = 100 + expectedTotalWeight = 50000 + totalWeightEpsilon = 5000 + ) + rng := rand.New(rand.NewSource(1)) + const delta = 2 * time.Millisecond + for _, headerRate := range []float64{0.0, 0.5, 1.0} { + // Simulate 10 seconds of requests arriving at 500qps. + // + // The sampling policy tries to sample 25% of them, but has a qps limit of + // 100, so it will not be able to. The returned weight should be higher + // for some sampled requests to compensate. + // + // headerRate is the fraction of incoming requests that have a trace header + // set. The qps limit should not be exceeded, even if headerRate is high. + sp, err := NewLimitedSampler(0.25, 100) + if err != nil { + t.Fatal(err) + } + s := sp.(*sampler) + tm := time.Now() + totalWeight := 0.0 + numTraced := 0 + seenLargeWeight := false + for i := 0; i < 50000; i++ { + d := s.sample(Parameters{HasTraceHeader: rng.Float64() < headerRate}, tm, rng.Float64()) + if d.Trace { + numTraced++ + } + if d.Sample { + totalWeight += d.Weight + if x := int(d.Weight) / 4; x <= 0 || x >= 100 || d.Weight != float64(x)*4.0 { + t.Errorf("weight: got %f, want a small positive multiple of 4", d.Weight) + } + if d.Weight > 4 { + seenLargeWeight = true + } + } + tm = tm.Add(delta) + } + if !seenLargeWeight { + t.Errorf("headerRate %f: never saw sample weight higher than 4.", headerRate) + } + if numTraced < expectedNumTraced-numTracedEpsilon || expectedNumTraced+numTracedEpsilon < numTraced { + t.Errorf("headerRate %f: got %d traced requests, want ∈ [%d, %d]", headerRate, numTraced, expectedNumTraced-numTracedEpsilon, expectedNumTraced+numTracedEpsilon) + } + if totalWeight < expectedTotalWeight-totalWeightEpsilon || expectedTotalWeight+totalWeightEpsilon < totalWeight { + t.Errorf("headerRate %f: got total weight %f want ∈ [%d, %d]", headerRate, totalWeight, expectedTotalWeight-totalWeightEpsilon, expectedTotalWeight+totalWeightEpsilon) + } + } +} + +type alwaysTrace struct{} + +func (a alwaysTrace) Sample(p Parameters) Decision { + return Decision{Trace: true} +} + +type neverTrace struct{} + +func (a neverTrace) Sample(p Parameters) Decision { + return Decision{Trace: false} +} + +func TestPropagation(t *testing.T) { + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + for _, header := range []string{ + `0123456789ABCDEF0123456789ABCDEF/42;o=0`, + `0123456789ABCDEF0123456789ABCDEF/42;o=1`, + `0123456789ABCDEF0123456789ABCDEF/42;o=2`, + `0123456789ABCDEF0123456789ABCDEF/42;o=3`, + `0123456789ABCDEF0123456789ABCDEF/0;o=0`, + `0123456789ABCDEF0123456789ABCDEF/0;o=1`, + `0123456789ABCDEF0123456789ABCDEF/0;o=2`, + `0123456789ABCDEF0123456789ABCDEF/0;o=3`, + ``, + } { + for _, policy := range []SamplingPolicy{ + nil, + alwaysTrace{}, + neverTrace{}, + } { + traceClient.SetSamplingPolicy(policy) + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + if header != "" { + req.Header.Set("X-Cloud-Trace-Context", header) + } + + span := traceClient.SpanFromRequest(req) + + req2, err := http.NewRequest("GET", "http://example.com/bar", nil) + if err != nil { + t.Fatal(err) + } + req3, err := http.NewRequest("GET", "http://example.com/baz", nil) + if err != nil { + t.Fatal(err) + } + span.NewRemoteChild(req2) + span.NewRemoteChild(req3) + + var ( + t1, t2, t3 string + s1, s2, s3 uint64 + o1, o2, o3 uint64 + ) + fmt.Sscanf(header, "%32s/%d;o=%d", &t1, &s1, &o1) + fmt.Sscanf(req2.Header.Get("X-Cloud-Trace-Context"), "%32s/%d;o=%d", &t2, &s2, &o2) + fmt.Sscanf(req3.Header.Get("X-Cloud-Trace-Context"), "%32s/%d;o=%d", &t3, &s3, &o3) + + if header == "" { + if t2 != t3 { + t.Errorf("expected the same trace ID in child requests, got %q %q", t2, t3) + } + } else { + if t2 != t1 || t3 != t1 { + t.Errorf("trace IDs should be passed to child requests") + } + } + trace := policy == alwaysTrace{} || policy == nil && (o1&1) != 0 + if header == "" { + if trace && (s2 == 0 || s3 == 0) { + t.Errorf("got span IDs %d %d in child requests, want nonzero", s2, s3) + } + if trace && s2 == s3 { + t.Errorf("got span IDs %d %d in child requests, should be different", s2, s3) + } + if !trace && (s2 != 0 || s3 != 0) { + t.Errorf("got span IDs %d %d in child requests, want zero", s2, s3) + } + } else { + if trace && (s2 == s1 || s3 == s1 || s2 == s3) { + t.Errorf("parent span IDs in input and outputs should be all different, got %d %d %d", s1, s2, s3) + } + if !trace && (s2 != s1 || s3 != s1) { + t.Errorf("parent span ID in input, %d, should have been equal to parent span IDs in output: %d %d", s1, s2, s3) + } + } + expectTraceOption := policy == alwaysTrace{} || (o1&1) != 0 + if expectTraceOption != ((o2&1) != 0) || expectTraceOption != ((o3&1) != 0) { + t.Errorf("tracing flag in child requests should be %t, got options %d %d", expectTraceOption, o2, o3) + } + } + } +} + +func BenchmarkSpanFromHeader(b *testing.B) { + const header = `0123456789ABCDEF0123456789ABCDEF/42;o=0` + const name = "/foo" + + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + for n := 0; n < b.N; n++ { + traceClient.SpanFromHeader(name, header) + } +} + +func checkTraces(t *testing.T, patch, expected api.Traces) { + if len(patch.Traces) != len(expected.Traces) || len(patch.Traces[0].Spans) != len(expected.Traces[0].Spans) { + diff := testutil.Diff(patch.Traces, expected.Traces) + t.Logf("diff:\n%s", diff) + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Fatalf("PatchTraces request: got %s want %s", got, want) + } +} diff --git a/vendor/cloud.google.com/go/translate/examples_test.go b/vendor/cloud.google.com/go/translate/examples_test.go new file mode 100644 index 0000000..8746bd0 --- /dev/null +++ b/vendor/cloud.google.com/go/translate/examples_test.go @@ -0,0 +1,81 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package translate_test + +import ( + "fmt" + + "cloud.google.com/go/translate" + "golang.org/x/net/context" + "golang.org/x/text/language" +) + +func Example_NewClient() { + ctx := context.Background() + client, err := translate.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Use the client. + + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: handle error. + } +} + +func Example_Translate() { + ctx := context.Background() + client, err := translate.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + translations, err := client.Translate(ctx, + []string{"Le singe est sur la branche"}, language.English, + &translate.Options{ + Source: language.French, + Format: translate.Text, + }) + if err != nil { + // TODO: handle error. + } + fmt.Println(translations[0].Text) +} + +func Example_DetectLanguage() { + ctx := context.Background() + client, err := translate.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + ds, err := client.DetectLanguage(ctx, []string{"Today is Monday"}) + if err != nil { + // TODO: handle error. + } + fmt.Println(ds) +} + +func Example_SupportedLanguages() { + ctx := context.Background() + client, err := translate.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + langs, err := client.SupportedLanguages(ctx, language.English) + if err != nil { + // TODO: handle error. + } + fmt.Println(langs) +} diff --git a/vendor/cloud.google.com/go/translate/internal/translate/v2/README b/vendor/cloud.google.com/go/translate/internal/translate/v2/README new file mode 100644 index 0000000..a4f22c6 --- /dev/null +++ b/vendor/cloud.google.com/go/translate/internal/translate/v2/README @@ -0,0 +1,12 @@ +translate-nov2016-api.json is a hand-modified version of translate-api.json. +It correctly reflects the API as of 2016-11-15. + +Differences: + +- Change to base URL +- Addition of OAuth scopes + +To generate: + + + diff --git a/vendor/cloud.google.com/go/translate/internal/translate/v2/regen.sh b/vendor/cloud.google.com/go/translate/internal/translate/v2/regen.sh new file mode 100755 index 0000000..3aec1d8 --- /dev/null +++ b/vendor/cloud.google.com/go/translate/internal/translate/v2/regen.sh @@ -0,0 +1,29 @@ +#!/bin/bash -e + + +(cd $GOPATH/src/google.golang.org/api; make generator) + +$GOPATH/bin/google-api-go-generator \ + -api_json_file translate-nov2016-api.json \ + -api_pkg_base cloud.google.com/go/translate/internal \ + -output translate-nov2016-gen.nolicense + +cat - translate-nov2016-gen.nolicense > translate-nov2016-gen.go <" + s + "" + } + tr = translate(htmlify(test.input), test.target, nil) + if got, want := tr.Text, htmlify(test.output); got != want { + t.Errorf("html: got %q, want %q", got, want) + } + // Using the HTML format behaves the same. + tr = translate(htmlify(test.input), test.target, &Options{Format: HTML}) + if got, want := tr.Text, htmlify(test.output); got != want { + t.Errorf("html: got %q, want %q", got, want) + } + } +} + +// This tests the beta "nmt" model. +func TestTranslateModel(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + + trs, err := c.Translate(ctx, []string{"Hello"}, language.French, &Options{Model: "nmt"}) + if err != nil { + t.Fatal(err) + } + if len(trs) != 1 { + t.Fatalf("wanted one Translation, got %d", len(trs)) + } + tr := trs[0] + if got, want := tr.Text, "Bonjour"; got != want { + t.Errorf("text: got %q, want %q", got, want) + } + if got, want := tr.Model, "nmt"; got != want { + t.Errorf("model: got %q, want %q", got, want) + } +} + +func TestTranslateMultipleInputs(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + + inputs := []string{ + "When you're a Jet, you're a Jet all the way", + "From your first cigarette to your last dying day", + "When you're a Jet if the spit hits the fan", + "You got brothers around, you're a family man", + } + ts, err := c.Translate(ctx, inputs, language.French, nil) + if err != nil { + t.Fatal(err) + } + if got, want := len(ts), len(inputs); got != want { + t.Fatalf("got %d Translations, wanted %d", got, want) + } +} + +func TestTranslateErrors(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + + for _, test := range []struct { + ctx context.Context + target language.Tag + inputs []string + opts *Options + }{ + {ctx, language.English, nil, nil}, + {ctx, language.Und, []string{"input"}, nil}, + {ctx, language.English, []string{}, nil}, + {ctx, language.English, []string{"input"}, &Options{Format: "random"}}, + } { + _, err := c.Translate(test.ctx, test.inputs, test.target, test.opts) + if err == nil { + t.Errorf("%+v: got nil, want error", test) + } + } +} + +func TestDetectLanguage(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + ds, err := c.DetectLanguage(ctx, []string{ + "Today is Monday", + "Aujourd'hui est lundi", + }) + if err != nil { + t.Fatal(err) + } + if len(ds) != 2 { + t.Fatalf("got %d detection lists, want 2", len(ds)) + } + checkDetections(t, ds[0], language.English) + checkDetections(t, ds[1], language.French) +} + +func checkDetections(t *testing.T, ds []Detection, want language.Tag) { + for _, d := range ds { + if d.Language == want { + return + } + } + t.Errorf("%v: missing %s", ds, want) +} + +// A small subset of the supported languages. +var supportedLangs = []Language{ + {Name: "Danish", Tag: language.Danish}, + {Name: "English", Tag: language.English}, + {Name: "French", Tag: language.French}, + {Name: "German", Tag: language.German}, + {Name: "Greek", Tag: language.Greek}, + {Name: "Hindi", Tag: language.Hindi}, + {Name: "Hungarian", Tag: language.Hungarian}, + {Name: "Italian", Tag: language.Italian}, + {Name: "Russian", Tag: language.Russian}, + {Name: "Turkish", Tag: language.Turkish}, +} + +func TestSupportedLanguages(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + got, err := c.SupportedLanguages(ctx, language.English) + if err != nil { + t.Fatal(err) + } + want := map[language.Tag]Language{} + for _, sl := range supportedLangs { + want[sl.Tag] = sl + } + for _, g := range got { + w, ok := want[g.Tag] + if !ok { + continue + } + if g != w { + t.Errorf("got %+v, want %+v", g, w) + } + delete(want, g.Tag) + } + if len(want) > 0 { + t.Errorf("missing: %+v", want) + } +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1/doc.go b/vendor/cloud.google.com/go/videointelligence/apiv1/doc.go new file mode 100644 index 0000000..15652bf --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1/doc.go @@ -0,0 +1,46 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package videointelligence is an auto-generated package for the +// Cloud Video Intelligence API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to be subject to changes. +// +// Cloud Video Intelligence API. +package videointelligence // import "cloud.google.com/go/videointelligence/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1/mock_test.go b/vendor/cloud.google.com/go/videointelligence/apiv1/mock_test.go new file mode 100644 index 0000000..b18f31e --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1/mock_test.go @@ -0,0 +1,180 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence + +import ( + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockVideoIntelligenceServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + videointelligencepb.VideoIntelligenceServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockVideoIntelligenceServer) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockVideoIntelligence mockVideoIntelligenceServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + videointelligencepb.RegisterVideoIntelligenceServiceServer(serv, &mockVideoIntelligence) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestVideoIntelligenceServiceAnnotateVideo(t *testing.T) { + var expectedResponse *videointelligencepb.AnnotateVideoResponse = &videointelligencepb.AnnotateVideoResponse{} + + mockVideoIntelligence.err = nil + mockVideoIntelligence.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var request *videointelligencepb.AnnotateVideoRequest = &videointelligencepb.AnnotateVideoRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnnotateVideo(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockVideoIntelligence.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestVideoIntelligenceServiceAnnotateVideoError(t *testing.T) { + errCode := codes.PermissionDenied + mockVideoIntelligence.err = nil + mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var request *videointelligencepb.AnnotateVideoRequest = &videointelligencepb.AnnotateVideoRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnnotateVideo(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client.go b/vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client.go new file mode 100644 index 0000000..5393001 --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client.go @@ -0,0 +1,225 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + AnnotateVideo []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("videointelligence.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 120000 * time.Millisecond, + Multiplier: 2.5, + }) + }), + }, + } + return &CallOptions{ + AnnotateVideo: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Cloud Video Intelligence API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client videointelligencepb.VideoIntelligenceServiceClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *CallOptions + + // The metadata to be sent with each request. + Metadata metadata.MD +} + +// NewClient creates a new video intelligence service client. +// +// Service that implements Google Cloud Video Intelligence API. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: videointelligencepb.NewVideoIntelligenceServiceClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.Metadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// AnnotateVideo performs asynchronous video annotation. Progress and results can be +// retrieved through the google.longrunning.Operations interface. +// Operation.metadata contains AnnotateVideoProgress (progress). +// Operation.response contains AnnotateVideoResponse (results). +func (c *Client) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest, opts ...gax.CallOption) (*AnnotateVideoOperation, error) { + ctx = insertMetadata(ctx, c.Metadata) + opts = append(c.CallOptions.AnnotateVideo[0:len(c.CallOptions.AnnotateVideo):len(c.CallOptions.AnnotateVideo)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnnotateVideo(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &AnnotateVideoOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// AnnotateVideoOperation manages a long-running operation from AnnotateVideo. +type AnnotateVideoOperation struct { + lro *longrunning.Operation +} + +// AnnotateVideoOperation returns a new AnnotateVideoOperation from a given name. +// The name must be that of a previously created AnnotateVideoOperation, possibly from a different process. +func (c *Client) AnnotateVideoOperation(name string) *AnnotateVideoOperation { + return &AnnotateVideoOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *AnnotateVideoOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { + var resp videointelligencepb.AnnotateVideoResponse + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *AnnotateVideoOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { + var resp videointelligencepb.AnnotateVideoResponse + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *AnnotateVideoOperation) Metadata() (*videointelligencepb.AnnotateVideoProgress, error) { + var meta videointelligencepb.AnnotateVideoProgress + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *AnnotateVideoOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *AnnotateVideoOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client_example_test.go b/vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client_example_test.go new file mode 100644 index 0000000..296ea3d --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1/video_intelligence_client_example_test.go @@ -0,0 +1,56 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence_test + +import ( + "cloud.google.com/go/videointelligence/apiv1" + "golang.org/x/net/context" + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := videointelligence.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_AnnotateVideo() { + ctx := context.Background() + c, err := videointelligence.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &videointelligencepb.AnnotateVideoRequest{ + // TODO: Fill request struct fields. + } + op, err := c.AnnotateVideo(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta1/doc.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/doc.go new file mode 100644 index 0000000..55e9643 --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/doc.go @@ -0,0 +1,46 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package videointelligence is an auto-generated package for the +// Google Cloud Video Intelligence API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Google Cloud Video Intelligence API. +package videointelligence // import "cloud.google.com/go/videointelligence/apiv1beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta1/mock_test.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/mock_test.go new file mode 100644 index 0000000..6dbce6e --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/mock_test.go @@ -0,0 +1,192 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence + +import ( + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockVideoIntelligenceServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + videointelligencepb.VideoIntelligenceServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockVideoIntelligenceServer) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockVideoIntelligence mockVideoIntelligenceServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + videointelligencepb.RegisterVideoIntelligenceServiceServer(serv, &mockVideoIntelligence) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestVideoIntelligenceServiceAnnotateVideo(t *testing.T) { + var expectedResponse *videointelligencepb.AnnotateVideoResponse = &videointelligencepb.AnnotateVideoResponse{} + + mockVideoIntelligence.err = nil + mockVideoIntelligence.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var inputUri string = "gs://demomaker/cat.mp4" + var featuresElement videointelligencepb.Feature = videointelligencepb.Feature_LABEL_DETECTION + var features = []videointelligencepb.Feature{featuresElement} + var request = &videointelligencepb.AnnotateVideoRequest{ + InputUri: inputUri, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnnotateVideo(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockVideoIntelligence.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestVideoIntelligenceServiceAnnotateVideoError(t *testing.T) { + errCode := codes.PermissionDenied + mockVideoIntelligence.err = nil + mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var inputUri string = "gs://demomaker/cat.mp4" + var featuresElement videointelligencepb.Feature = videointelligencepb.Feature_LABEL_DETECTION + var features = []videointelligencepb.Feature{featuresElement} + var request = &videointelligencepb.AnnotateVideoRequest{ + InputUri: inputUri, + Features: features, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnnotateVideo(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client.go new file mode 100644 index 0000000..50b738f --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client.go @@ -0,0 +1,225 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + AnnotateVideo []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("videointelligence.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 120000 * time.Millisecond, + Multiplier: 2.5, + }) + }), + }, + } + return &CallOptions{ + AnnotateVideo: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Video Intelligence API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client videointelligencepb.VideoIntelligenceServiceClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new video intelligence service client. +// +// Service that implements Google Cloud Video Intelligence API. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: videointelligencepb.NewVideoIntelligenceServiceClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// AnnotateVideo performs asynchronous video annotation. Progress and results can be +// retrieved through the google.longrunning.Operations interface. +// Operation.metadata contains AnnotateVideoProgress (progress). +// Operation.response contains AnnotateVideoResponse (results). +func (c *Client) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest, opts ...gax.CallOption) (*AnnotateVideoOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnnotateVideo[0:len(c.CallOptions.AnnotateVideo):len(c.CallOptions.AnnotateVideo)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnnotateVideo(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &AnnotateVideoOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// AnnotateVideoOperation manages a long-running operation from AnnotateVideo. +type AnnotateVideoOperation struct { + lro *longrunning.Operation +} + +// AnnotateVideoOperation returns a new AnnotateVideoOperation from a given name. +// The name must be that of a previously created AnnotateVideoOperation, possibly from a different process. +func (c *Client) AnnotateVideoOperation(name string) *AnnotateVideoOperation { + return &AnnotateVideoOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *AnnotateVideoOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { + var resp videointelligencepb.AnnotateVideoResponse + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *AnnotateVideoOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { + var resp videointelligencepb.AnnotateVideoResponse + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *AnnotateVideoOperation) Metadata() (*videointelligencepb.AnnotateVideoProgress, error) { + var meta videointelligencepb.AnnotateVideoProgress + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *AnnotateVideoOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *AnnotateVideoOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client_example_test.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client_example_test.go new file mode 100644 index 0000000..7904f48 --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta1/video_intelligence_client_example_test.go @@ -0,0 +1,56 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence_test + +import ( + "cloud.google.com/go/videointelligence/apiv1beta1" + "golang.org/x/net/context" + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := videointelligence.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_AnnotateVideo() { + ctx := context.Background() + c, err := videointelligence.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &videointelligencepb.AnnotateVideoRequest{ + // TODO: Fill request struct fields. + } + op, err := c.AnnotateVideo(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta2/doc.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/doc.go new file mode 100644 index 0000000..4a88722 --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/doc.go @@ -0,0 +1,46 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package videointelligence is an auto-generated package for the +// Google Cloud Video Intelligence API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// Google Cloud Video Intelligence API. +package videointelligence // import "cloud.google.com/go/videointelligence/apiv1beta2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + } +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta2/mock_test.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/mock_test.go new file mode 100644 index 0000000..c782329 --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/mock_test.go @@ -0,0 +1,180 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence + +import ( + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockVideoIntelligenceServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + videointelligencepb.VideoIntelligenceServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockVideoIntelligenceServer) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest) (*longrunningpb.Operation, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockVideoIntelligence mockVideoIntelligenceServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + videointelligencepb.RegisterVideoIntelligenceServiceServer(serv, &mockVideoIntelligence) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestVideoIntelligenceServiceAnnotateVideo(t *testing.T) { + var expectedResponse *videointelligencepb.AnnotateVideoResponse = &videointelligencepb.AnnotateVideoResponse{} + + mockVideoIntelligence.err = nil + mockVideoIntelligence.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var request *videointelligencepb.AnnotateVideoRequest = &videointelligencepb.AnnotateVideoRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnnotateVideo(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockVideoIntelligence.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestVideoIntelligenceServiceAnnotateVideoError(t *testing.T) { + errCode := codes.PermissionDenied + mockVideoIntelligence.err = nil + mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var request *videointelligencepb.AnnotateVideoRequest = &videointelligencepb.AnnotateVideoRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AnnotateVideo(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client.go new file mode 100644 index 0000000..b527932 --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client.go @@ -0,0 +1,225 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + AnnotateVideo []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("videointelligence.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 120000 * time.Millisecond, + Multiplier: 2.5, + }) + }), + }, + } + return &CallOptions{ + AnnotateVideo: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Video Intelligence API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client videointelligencepb.VideoIntelligenceServiceClient + + // LROClient is used internally to handle longrunning operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient + + // The call options for this service. + CallOptions *CallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewClient creates a new video intelligence service client. +// +// Service that implements Google Cloud Video Intelligence API. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: videointelligencepb.NewVideoIntelligenceServiceClient(conn), + } + c.setGoogleClientInfo() + + c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) + if err != nil { + // This error "should not happen", since we are just reusing old connection + // and never actually need to dial. + // If this does happen, we could leak conn. However, we cannot close conn: + // If the user invoked the function with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO(pongad): investigate error conditions. + return nil, err + } + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// AnnotateVideo performs asynchronous video annotation. Progress and results can be +// retrieved through the google.longrunning.Operations interface. +// Operation.metadata contains AnnotateVideoProgress (progress). +// Operation.response contains AnnotateVideoResponse (results). +func (c *Client) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest, opts ...gax.CallOption) (*AnnotateVideoOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.AnnotateVideo[0:len(c.CallOptions.AnnotateVideo):len(c.CallOptions.AnnotateVideo)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.AnnotateVideo(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &AnnotateVideoOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, resp), + }, nil +} + +// AnnotateVideoOperation manages a long-running operation from AnnotateVideo. +type AnnotateVideoOperation struct { + lro *longrunning.Operation +} + +// AnnotateVideoOperation returns a new AnnotateVideoOperation from a given name. +// The name must be that of a previously created AnnotateVideoOperation, possibly from a different process. +func (c *Client) AnnotateVideoOperation(name string) *AnnotateVideoOperation { + return &AnnotateVideoOperation{ + lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *AnnotateVideoOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { + var resp videointelligencepb.AnnotateVideoResponse + if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *AnnotateVideoOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { + var resp videointelligencepb.AnnotateVideoResponse + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *AnnotateVideoOperation) Metadata() (*videointelligencepb.AnnotateVideoProgress, error) { + var meta videointelligencepb.AnnotateVideoProgress + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *AnnotateVideoOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *AnnotateVideoOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client_example_test.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client_example_test.go new file mode 100644 index 0000000..b34446f --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/video_intelligence_client_example_test.go @@ -0,0 +1,56 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package videointelligence_test + +import ( + "cloud.google.com/go/videointelligence/apiv1beta2" + "golang.org/x/net/context" + videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := videointelligence.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_AnnotateVideo() { + ctx := context.Background() + c, err := videointelligence.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &videointelligencepb.AnnotateVideoRequest{ + // TODO: Fill request struct fields. + } + op, err := c.AnnotateVideo(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/videointelligence/apiv1beta2/whitelist.go b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/whitelist.go new file mode 100644 index 0000000..d0e057e --- /dev/null +++ b/vendor/cloud.google.com/go/videointelligence/apiv1beta2/whitelist.go @@ -0,0 +1,16 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS API IS CURRENTLY UNDER WHITELIST. +package videointelligence diff --git a/vendor/cloud.google.com/go/vision/apiv1/BatchAnnotateImages_smoke_test.go b/vendor/cloud.google.com/go/vision/apiv1/BatchAnnotateImages_smoke_test.go new file mode 100644 index 0000000..79e8ce4 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/BatchAnnotateImages_smoke_test.go @@ -0,0 +1,80 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +import ( + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestImageAnnotatorSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewImageAnnotatorClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var gcsImageUri string = "gs://gapic-toolkit/President_Barack_Obama.jpg" + var source = &visionpb.ImageSource{ + GcsImageUri: gcsImageUri, + } + var image = &visionpb.Image{ + Source: source, + } + var type_ visionpb.Feature_Type = visionpb.Feature_FACE_DETECTION + var featuresElement = &visionpb.Feature{ + Type: type_, + } + var features = []*visionpb.Feature{featuresElement} + var requestsElement = &visionpb.AnnotateImageRequest{ + Image: image, + Features: features, + } + var requests = []*visionpb.AnnotateImageRequest{requestsElement} + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + if _, err := c.BatchAnnotateImages(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/README.md b/vendor/cloud.google.com/go/vision/apiv1/README.md new file mode 100644 index 0000000..bcfa08d --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/README.md @@ -0,0 +1,9 @@ +Auto-generated vision v1 clients +================================= + +This package includes auto-generated clients for the vision v1 API. + +Use the handwritten client (in the parent directory, +cloud.google.com/go/vision) in preference to this. + +This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. diff --git a/vendor/cloud.google.com/go/vision/apiv1/client.go b/vendor/cloud.google.com/go/vision/apiv1/client.go new file mode 100644 index 0000000..cc91032 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/client.go @@ -0,0 +1,151 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// AnnotateImage runs image detection and annotation for a single image. +func (c *ImageAnnotatorClient) AnnotateImage(ctx context.Context, req *pb.AnnotateImageRequest, opts ...gax.CallOption) (*pb.AnnotateImageResponse, error) { + res, err := c.BatchAnnotateImages(ctx, &pb.BatchAnnotateImagesRequest{ + Requests: []*pb.AnnotateImageRequest{req}, + }, opts...) + if err != nil { + return nil, err + } + return res.Responses[0], nil +} + +// Called for a single image and a single feature. +func (c *ImageAnnotatorClient) annotateOne(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, ftype pb.Feature_Type, maxResults int, opts []gax.CallOption) (*pb.AnnotateImageResponse, error) { + res, err := c.AnnotateImage(ctx, &pb.AnnotateImageRequest{ + Image: img, + ImageContext: ictx, + Features: []*pb.Feature{{Type: ftype, MaxResults: int32(maxResults)}}, + }, opts...) + if err != nil { + return nil, err + } + // When there is only one image and one feature, the response's Error field is + // unambiguously about that one detection, so we "promote" it to the error return + // value. + // res.Error is a google.rpc.Status. Convert to a Go error. Use a gRPC + // error because it preserves the code as a separate field. + // TODO(jba): preserve the details field. + if res.Error != nil { + return nil, grpc.Errorf(codes.Code(res.Error.Code), "%s", res.Error.Message) + } + return res, nil +} + +// DetectFaces performs face detection on the image. +// At most maxResults results are returned. +func (c *ImageAnnotatorClient) DetectFaces(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.FaceAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_FACE_DETECTION, maxResults, opts) + if err != nil { + return nil, err + } + return res.FaceAnnotations, nil +} + +// DetectLandmarks performs landmark detection on the image. +// At most maxResults results are returned. +func (c *ImageAnnotatorClient) DetectLandmarks(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.EntityAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_LANDMARK_DETECTION, maxResults, opts) + if err != nil { + return nil, err + } + return res.LandmarkAnnotations, nil +} + +// DetectLogos performs logo detection on the image. +// At most maxResults results are returned. +func (c *ImageAnnotatorClient) DetectLogos(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.EntityAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_LOGO_DETECTION, maxResults, opts) + if err != nil { + return nil, err + } + return res.LogoAnnotations, nil +} + +// DetectLabels performs label detection on the image. +// At most maxResults results are returned. +func (c *ImageAnnotatorClient) DetectLabels(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.EntityAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_LABEL_DETECTION, maxResults, opts) + if err != nil { + return nil, err + } + return res.LabelAnnotations, nil +} + +// DetectTexts performs text detection on the image. +// At most maxResults results are returned. +func (c *ImageAnnotatorClient) DetectTexts(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.EntityAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_TEXT_DETECTION, maxResults, opts) + if err != nil { + return nil, err + } + return res.TextAnnotations, nil +} + +// DetectDocumentText performs full text (OCR) detection on the image. +func (c *ImageAnnotatorClient) DetectDocumentText(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.TextAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_DOCUMENT_TEXT_DETECTION, 0, opts) + if err != nil { + return nil, err + } + return res.FullTextAnnotation, nil +} + +// DetectSafeSearch performs safe-search detection on the image. +func (c *ImageAnnotatorClient) DetectSafeSearch(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.SafeSearchAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_SAFE_SEARCH_DETECTION, 0, opts) + if err != nil { + return nil, err + } + return res.SafeSearchAnnotation, nil +} + +// DetectImageProperties computes properties of the image. +func (c *ImageAnnotatorClient) DetectImageProperties(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.ImageProperties, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_IMAGE_PROPERTIES, 0, opts) + if err != nil { + return nil, err + } + return res.ImagePropertiesAnnotation, nil +} + +// DetectWeb computes a web annotation on the image. +func (c *ImageAnnotatorClient) DetectWeb(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.WebDetection, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_WEB_DETECTION, 0, opts) + if err != nil { + return nil, err + } + return res.WebDetection, nil +} + +// CropHints computes crop hints for the image. +func (c *ImageAnnotatorClient) CropHints(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.CropHintsAnnotation, error) { + res, err := c.annotateOne(ctx, img, ictx, pb.Feature_CROP_HINTS, 0, opts) + if err != nil { + return nil, err + } + return res.CropHintsAnnotation, nil +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/client_test.go b/vendor/cloud.google.com/go/vision/apiv1/client_test.go new file mode 100644 index 0000000..c6f0ba0 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/client_test.go @@ -0,0 +1,200 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "fmt" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" + "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var batchResponse = &pb.BatchAnnotateImagesResponse{ + Responses: []*pb.AnnotateImageResponse{{ + FaceAnnotations: []*pb.FaceAnnotation{ + {RollAngle: 1}, {RollAngle: 2}}, + LandmarkAnnotations: []*pb.EntityAnnotation{{Mid: "landmark"}}, + LogoAnnotations: []*pb.EntityAnnotation{{Mid: "logo"}}, + LabelAnnotations: []*pb.EntityAnnotation{{Mid: "label"}}, + TextAnnotations: []*pb.EntityAnnotation{{Mid: "text"}}, + FullTextAnnotation: &pb.TextAnnotation{Text: "full"}, + SafeSearchAnnotation: &pb.SafeSearchAnnotation{Spoof: pb.Likelihood_POSSIBLE}, + ImagePropertiesAnnotation: &pb.ImageProperties{DominantColors: &pb.DominantColorsAnnotation{}}, + CropHintsAnnotation: &pb.CropHintsAnnotation{CropHints: []*pb.CropHint{{Confidence: 0.5}}}, + WebDetection: &pb.WebDetection{WebEntities: []*pb.WebDetection_WebEntity{{EntityId: "web"}}}, + }}, +} + +// Verify that all the "shortcut" methods use the underlying +// BatchAnnotateImages RPC correctly. +func TestClientMethods(t *testing.T) { + ctx := context.Background() + c, err := NewImageAnnotatorClient(ctx, clientOpt) + if err != nil { + t.Fatal(err) + } + + mockImageAnnotator.resps = []proto.Message{batchResponse} + img := &pb.Image{Source: &pb.ImageSource{ImageUri: "http://foo.jpg"}} + ictx := &pb.ImageContext{LanguageHints: []string{"en", "fr"}} + req := &pb.AnnotateImageRequest{ + Image: img, + ImageContext: ictx, + Features: []*pb.Feature{ + {Type: pb.Feature_LABEL_DETECTION, MaxResults: 3}, + {Type: pb.Feature_FACE_DETECTION, MaxResults: 4}, + }, + } + + for i, test := range []struct { + call func() (interface{}, error) + wantFeatures []*pb.Feature + wantRes interface{} + }{ + { + func() (interface{}, error) { return c.AnnotateImage(ctx, req) }, + req.Features, batchResponse.Responses[0], + }, + { + func() (interface{}, error) { return c.DetectFaces(ctx, img, ictx, 2) }, + []*pb.Feature{{pb.Feature_FACE_DETECTION, 2}}, + batchResponse.Responses[0].FaceAnnotations, + }, + { + func() (interface{}, error) { return c.DetectLandmarks(ctx, img, ictx, 2) }, + []*pb.Feature{{pb.Feature_LANDMARK_DETECTION, 2}}, + batchResponse.Responses[0].LandmarkAnnotations, + }, + { + func() (interface{}, error) { return c.DetectLogos(ctx, img, ictx, 2) }, + []*pb.Feature{{pb.Feature_LOGO_DETECTION, 2}}, + batchResponse.Responses[0].LogoAnnotations, + }, + { + func() (interface{}, error) { return c.DetectLabels(ctx, img, ictx, 2) }, + []*pb.Feature{{pb.Feature_LABEL_DETECTION, 2}}, + batchResponse.Responses[0].LabelAnnotations, + }, + { + func() (interface{}, error) { return c.DetectTexts(ctx, img, ictx, 2) }, + []*pb.Feature{{pb.Feature_TEXT_DETECTION, 2}}, + batchResponse.Responses[0].TextAnnotations, + }, + { + func() (interface{}, error) { return c.DetectDocumentText(ctx, img, ictx) }, + []*pb.Feature{{pb.Feature_DOCUMENT_TEXT_DETECTION, 0}}, + batchResponse.Responses[0].FullTextAnnotation, + }, + { + func() (interface{}, error) { return c.DetectSafeSearch(ctx, img, ictx) }, + []*pb.Feature{{pb.Feature_SAFE_SEARCH_DETECTION, 0}}, + batchResponse.Responses[0].SafeSearchAnnotation, + }, + { + func() (interface{}, error) { return c.DetectImageProperties(ctx, img, ictx) }, + []*pb.Feature{{pb.Feature_IMAGE_PROPERTIES, 0}}, + batchResponse.Responses[0].ImagePropertiesAnnotation, + }, + { + func() (interface{}, error) { return c.DetectWeb(ctx, img, ictx) }, + []*pb.Feature{{pb.Feature_WEB_DETECTION, 0}}, + batchResponse.Responses[0].WebDetection, + }, + { + func() (interface{}, error) { return c.CropHints(ctx, img, ictx) }, + []*pb.Feature{{pb.Feature_CROP_HINTS, 0}}, + batchResponse.Responses[0].CropHintsAnnotation, + }, + } { + mockImageAnnotator.reqs = nil + res, err := test.call() + if err != nil { + t.Fatal(err) + } + got := mockImageAnnotator.reqs[0] + want := &pb.BatchAnnotateImagesRequest{ + Requests: []*pb.AnnotateImageRequest{{ + Image: img, + ImageContext: ictx, + Features: test.wantFeatures, + }}, + } + if !testEqual(got, want) { + t.Errorf("#%d:\ngot %v\nwant %v", i, got, want) + } + if got, want := res, test.wantRes; !testEqual(got, want) { + t.Errorf("#%d:\ngot %v\nwant %v", i, got, want) + } + } + +} + +func testEqual(a, b interface{}) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + t := reflect.TypeOf(a) + if t != reflect.TypeOf(b) { + return false + } + if am, ok := a.(proto.Message); ok { + return proto.Equal(am, b.(proto.Message)) + } + if t.Kind() != reflect.Slice { + panic(fmt.Sprintf("testEqual can only handle proto.Message and slices, got %s", t)) + } + va := reflect.ValueOf(a) + vb := reflect.ValueOf(b) + if va.Len() != vb.Len() { + return false + } + for i := 0; i < va.Len(); i++ { + if !testEqual(va.Index(i).Interface(), vb.Index(i).Interface()) { + return false + } + } + return true +} + +func TestAnnotateOneError(t *testing.T) { + ctx := context.Background() + c, err := NewImageAnnotatorClient(ctx, clientOpt) + if err != nil { + t.Fatal(err) + } + mockImageAnnotator.resps = []proto.Message{ + &pb.BatchAnnotateImagesResponse{ + Responses: []*pb.AnnotateImageResponse{{ + Error: &status.Status{Code: int32(codes.NotFound), Message: "not found"}, + }}, + }, + } + + _, err = c.annotateOne(ctx, + &pb.Image{Source: &pb.ImageSource{ImageUri: "http://foo.jpg"}}, + nil, pb.Feature_LOGO_DETECTION, 1, nil) + if c := grpc.Code(err); c != codes.NotFound { + t.Errorf("got %v, want NotFound", c) + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/doc.go b/vendor/cloud.google.com/go/vision/apiv1/doc.go new file mode 100644 index 0000000..883f5c7 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/doc.go @@ -0,0 +1,49 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package vision is an auto-generated package for the +// Google Cloud Vision API. + +// +// Integrates Google Vision features, including image labeling, face, logo, +// and +// landmark detection, optical character recognition (OCR), and detection of +// explicit content, into applications. +package vision // import "cloud.google.com/go/vision/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-vision", + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/examples_test.go b/vendor/cloud.google.com/go/vision/apiv1/examples_test.go new file mode 100644 index 0000000..84b3ad8 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/examples_test.go @@ -0,0 +1,92 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision_test + +import ( + "fmt" + "os" + + vision "cloud.google.com/go/vision/apiv1" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +func Example_NewImageFromReader() { + f, err := os.Open("path/to/image.jpg") + if err != nil { + // TODO: handle error. + } + img, err := vision.NewImageFromReader(f) + if err != nil { + // TODO: handle error. + } + fmt.Println(img) +} + +func Example_NewImageFromURI() { + img := vision.NewImageFromURI("gs://my-bucket/my-image.png") + fmt.Println(img) +} + +func ExampleImageAnnotatorClient_AnnotateImage() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + res, err := c.AnnotateImage(ctx, &pb.AnnotateImageRequest{ + Image: vision.NewImageFromURI("gs://my-bucket/my-image.png"), + Features: []*pb.Feature{ + {Type: pb.Feature_LANDMARK_DETECTION, MaxResults: 5}, + {Type: pb.Feature_LABEL_DETECTION, MaxResults: 3}, + }, + }) + if err != nil { + // TODO: Handle error. + } + // TODO: Use res. + _ = res +} + +func Example_FaceFromLandmarks() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + resp, err := c.BatchAnnotateImages(ctx, &pb.BatchAnnotateImagesRequest{ + Requests: []*pb.AnnotateImageRequest{ + { + Image: vision.NewImageFromURI("gs://bucket/image.jpg"), + Features: []*pb.Feature{{ + Type: pb.Feature_FACE_DETECTION, + MaxResults: 5, + }}, + }, + }, + }) + if err != nil { + // TODO: Handle error. + } + res := resp.Responses[0] + if res.Error != nil { + // TODO: Handle error. + } + for _, a := range res.FaceAnnotations { + face := vision.FaceFromLandmarks(a.Landmarks) + fmt.Println(face.Nose.Tip) + fmt.Println(face.Eyes.Left.Pupil) + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/face.go b/vendor/cloud.google.com/go/vision/apiv1/face.go new file mode 100644 index 0000000..943d49f --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/face.go @@ -0,0 +1,153 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "log" + + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +// FaceLandmarks contains the positions of facial features detected by the service. +type FaceLandmarks struct { + Eyebrows Eyebrows + Eyes Eyes + Ears Ears + Nose Nose + Mouth Mouth + Chin Chin + Forehead *pb.Position +} + +// Eyebrows represents a face's eyebrows. +type Eyebrows struct { + Left, Right Eyebrow +} + +// Eyebrow represents a face's eyebrow. +type Eyebrow struct { + Top, Left, Right *pb.Position +} + +// Eyes represents a face's eyes. +type Eyes struct { + Left, Right Eye +} + +// Eye represents a face's eye. +type Eye struct { + Left, Right, Top, Bottom, Center, Pupil *pb.Position +} + +// Ears represents a face's ears. +type Ears struct { + Left, Right *pb.Position +} + +// Nose represents a face's nose. +type Nose struct { + Left, Right, Top, Bottom, Tip *pb.Position +} + +// Mouth represents a face's mouth. +type Mouth struct { + Left, Center, Right, UpperLip, LowerLip *pb.Position +} + +// Chin represents a face's chin. +type Chin struct { + Left, Center, Right *pb.Position +} + +// FaceFromLandmarks converts the list of face landmarks returned by the service +// to a FaceLandmarks struct. +func FaceFromLandmarks(landmarks []*pb.FaceAnnotation_Landmark) *FaceLandmarks { + face := &FaceLandmarks{} + for _, lm := range landmarks { + switch lm.Type { + case pb.FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW: + face.Eyebrows.Left.Left = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW: + face.Eyebrows.Left.Right = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW: + face.Eyebrows.Right.Left = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW: + face.Eyebrows.Right.Right = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT: + face.Eyebrows.Left.Top = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT: + face.Eyebrows.Right.Top = lm.Position + case pb.FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES: + face.Nose.Top = lm.Position + case pb.FaceAnnotation_Landmark_NOSE_TIP: + face.Nose.Tip = lm.Position + case pb.FaceAnnotation_Landmark_UPPER_LIP: + face.Mouth.UpperLip = lm.Position + case pb.FaceAnnotation_Landmark_LOWER_LIP: + face.Mouth.LowerLip = lm.Position + case pb.FaceAnnotation_Landmark_MOUTH_LEFT: + face.Mouth.Left = lm.Position + case pb.FaceAnnotation_Landmark_MOUTH_RIGHT: + face.Mouth.Right = lm.Position + case pb.FaceAnnotation_Landmark_MOUTH_CENTER: + face.Mouth.Center = lm.Position + case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT: + face.Nose.Right = lm.Position + case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT: + face.Nose.Left = lm.Position + case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER: + face.Nose.Bottom = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYE: + face.Eyes.Left.Center = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYE: + face.Eyes.Right.Center = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY: + face.Eyes.Left.Top = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER: + face.Eyes.Left.Right = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY: + face.Eyes.Left.Bottom = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER: + face.Eyes.Left.Left = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY: + face.Eyes.Right.Top = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER: + face.Eyes.Right.Right = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY: + face.Eyes.Right.Bottom = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER: + face.Eyes.Right.Left = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EYE_PUPIL: + face.Eyes.Left.Pupil = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EYE_PUPIL: + face.Eyes.Right.Pupil = lm.Position + case pb.FaceAnnotation_Landmark_LEFT_EAR_TRAGION: + face.Ears.Left = lm.Position + case pb.FaceAnnotation_Landmark_RIGHT_EAR_TRAGION: + face.Ears.Right = lm.Position + case pb.FaceAnnotation_Landmark_FOREHEAD_GLABELLA: + face.Forehead = lm.Position + case pb.FaceAnnotation_Landmark_CHIN_GNATHION: + face.Chin.Center = lm.Position + case pb.FaceAnnotation_Landmark_CHIN_LEFT_GONION: + face.Chin.Left = lm.Position + case pb.FaceAnnotation_Landmark_CHIN_RIGHT_GONION: + face.Chin.Right = lm.Position + default: + log.Printf("vision: ignoring unknown face annotation landmark %s", lm.Type) + } + } + return face +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/face_test.go b/vendor/cloud.google.com/go/vision/apiv1/face_test.go new file mode 100644 index 0000000..f857e57 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/face_test.go @@ -0,0 +1,228 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "testing" + + "cloud.google.com/go/internal/pretty" + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +func TestFaceFromLandmarks(t *testing.T) { + landmarks := []*pb.FaceAnnotation_Landmark{ + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYE, + Position: &pb.Position{X: 1192, Y: 575, Z: 0}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYE, + Position: &pb.Position{X: 1479, Y: 571, Z: -9}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW, + Position: &pb.Position{X: 1097, Y: 522, Z: 27}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW, + Position: &pb.Position{X: 1266, Y: 521, Z: -61}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW, + Position: &pb.Position{X: 1402, Y: 520, Z: -66}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW, + Position: &pb.Position{X: 1571, Y: 519, Z: 10}, + }, + { + Type: pb.FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES, + Position: &pb.Position{X: 1331, Y: 566, Z: -66}, + }, + { + Type: pb.FaceAnnotation_Landmark_NOSE_TIP, + Position: &pb.Position{X: 1329, Y: 743, Z: -137}, + }, + { + Type: pb.FaceAnnotation_Landmark_UPPER_LIP, + Position: &pb.Position{X: 1330, Y: 836, Z: -66}, + }, + { + Type: pb.FaceAnnotation_Landmark_LOWER_LIP, + Position: &pb.Position{X: 1334, Y: 954, Z: -36}, + }, + { + Type: pb.FaceAnnotation_Landmark_MOUTH_LEFT, + Position: &pb.Position{X: 1186, Y: 867, Z: 27}, + }, + { + Type: pb.FaceAnnotation_Landmark_MOUTH_RIGHT, + Position: &pb.Position{X: 1484, Y: 857, Z: 19}, + }, + { + Type: pb.FaceAnnotation_Landmark_MOUTH_CENTER, + Position: &pb.Position{X: 1332, Y: 894, Z: -41}, + }, + { + Type: pb.FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT, + Position: &pb.Position{X: 1432, Y: 750, Z: -26}, + }, + { + Type: pb.FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT, + Position: &pb.Position{X: 1236, Y: 755, Z: -20}, + }, + { + Type: pb.FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER, + Position: &pb.Position{X: 1332, Y: 783, Z: -70}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY, + Position: &pb.Position{X: 1193, Y: 561, Z: -20}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER, + Position: &pb.Position{X: 1252, Y: 581, Z: -1}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY, + Position: &pb.Position{X: 1190, Y: 593, Z: -1}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER, + Position: &pb.Position{X: 1133, Y: 584, Z: 28}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYE_PUPIL, + Position: &pb.Position{X: 1189, Y: 580, Z: -8}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY, + Position: &pb.Position{X: 1474, Y: 561, Z: -30}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER, + Position: &pb.Position{X: 1536, Y: 581, Z: 15}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY, + Position: &pb.Position{X: 1481, Y: 590, Z: -11}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER, + Position: &pb.Position{X: 1424, Y: 579, Z: -6}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_PUPIL, + Position: &pb.Position{X: 1478, Y: 580, Z: -18}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT, + Position: &pb.Position{X: 1181, Y: 482, Z: -40}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT, + Position: &pb.Position{X: 1485, Y: 482, Z: -50}, + }, + { + Type: pb.FaceAnnotation_Landmark_LEFT_EAR_TRAGION, + Position: &pb.Position{X: 1027, Y: 696, Z: 361}, + }, + { + Type: pb.FaceAnnotation_Landmark_RIGHT_EAR_TRAGION, + Position: &pb.Position{X: 1666, Y: 695, Z: 339}, + }, + { + Type: pb.FaceAnnotation_Landmark_FOREHEAD_GLABELLA, + Position: &pb.Position{X: 1332, Y: 514, Z: -75}, + }, + { + Type: pb.FaceAnnotation_Landmark_CHIN_GNATHION, + Position: &pb.Position{X: 1335, Y: 1058, Z: 6}, + }, + { + Type: pb.FaceAnnotation_Landmark_CHIN_LEFT_GONION, + Position: &pb.Position{X: 1055, Y: 882, Z: 257}, + }, + { + Type: pb.FaceAnnotation_Landmark_CHIN_RIGHT_GONION, + Position: &pb.Position{X: 1631, Y: 881, Z: 238}, + }, + } + want := &FaceLandmarks{ + Eyebrows: Eyebrows{ + Left: Eyebrow{ + Top: &pb.Position{X: 1181, Y: 482, Z: -40}, + Left: &pb.Position{X: 1097, Y: 522, Z: 27}, + Right: &pb.Position{X: 1266, Y: 521, Z: -61}, + }, + Right: Eyebrow{ + Top: &pb.Position{X: 1485, Y: 482, Z: -50}, + Left: &pb.Position{X: 1402, Y: 520, Z: -66}, + Right: &pb.Position{X: 1571, Y: 519, Z: 10}, + }, + }, + Eyes: Eyes{ + Left: Eye{ + Left: &pb.Position{X: 1133, Y: 584, Z: 28}, + Right: &pb.Position{X: 1252, Y: 581, Z: -1}, + Top: &pb.Position{X: 1193, Y: 561, Z: -20}, + Bottom: &pb.Position{X: 1190, Y: 593, Z: -1}, + Center: &pb.Position{X: 1192, Y: 575, Z: 0}, + Pupil: &pb.Position{X: 1189, Y: 580, Z: -8}, + }, + Right: Eye{ + Left: &pb.Position{X: 1424, Y: 579, Z: -6}, + Right: &pb.Position{X: 1536, Y: 581, Z: 15}, + Top: &pb.Position{X: 1474, Y: 561, Z: -30}, + Bottom: &pb.Position{X: 1481, Y: 590, Z: -11}, + Center: &pb.Position{X: 1479, Y: 571, Z: -9}, + Pupil: &pb.Position{X: 1478, Y: 580, Z: -18}, + }, + }, + Ears: Ears{ + Left: &pb.Position{X: 1027, Y: 696, Z: 361}, + Right: &pb.Position{X: 1666, Y: 695, Z: 339}, + }, + Nose: Nose{ + Left: &pb.Position{X: 1236, Y: 755, Z: -20}, + Right: &pb.Position{X: 1432, Y: 750, Z: -26}, + Top: &pb.Position{X: 1331, Y: 566, Z: -66}, + Bottom: &pb.Position{X: 1332, Y: 783, Z: -70}, + Tip: &pb.Position{X: 1329, Y: 743, Z: -137}, + }, + Mouth: Mouth{ + Left: &pb.Position{X: 1186, Y: 867, Z: 27}, + Center: &pb.Position{X: 1332, Y: 894, Z: -41}, + Right: &pb.Position{X: 1484, Y: 857, Z: 19}, + UpperLip: &pb.Position{X: 1330, Y: 836, Z: -66}, + LowerLip: &pb.Position{X: 1334, Y: 954, Z: -36}, + }, + Chin: Chin{ + Left: &pb.Position{X: 1055, Y: 882, Z: 257}, + Center: &pb.Position{X: 1335, Y: 1058, Z: 6}, + Right: &pb.Position{X: 1631, Y: 881, Z: 238}, + }, + Forehead: &pb.Position{X: 1332, Y: 514, Z: -75}, + } + + got := FaceFromLandmarks(landmarks) + msg, ok, err := pretty.Diff(want, got) + if err != nil { + t.Fatal(err) + } + if !ok { + t.Error(msg) + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/image.go b/vendor/cloud.google.com/go/vision/apiv1/image.go new file mode 100644 index 0000000..7981366 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/image.go @@ -0,0 +1,37 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "io" + "io/ioutil" + + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +// NewImageFromReader reads the bytes of an image from r. +func NewImageFromReader(r io.Reader) (*pb.Image, error) { + bytes, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return &pb.Image{Content: bytes}, nil +} + +// NewImageFromURI returns an image that refers to an object in Google Cloud Storage +// (when the uri is of the form "gs://BUCKET/OBJECT") or at a public URL. +func NewImageFromURI(uri string) *pb.Image { + return &pb.Image{Source: &pb.ImageSource{ImageUri: uri}} +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go new file mode 100644 index 0000000..95c8f08 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go @@ -0,0 +1,134 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ImageAnnotatorCallOptions contains the retry settings for each method of ImageAnnotatorClient. +type ImageAnnotatorCallOptions struct { + BatchAnnotateImages []gax.CallOption +} + +func defaultImageAnnotatorClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("vision.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultImageAnnotatorCallOptions() *ImageAnnotatorCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ImageAnnotatorCallOptions{ + BatchAnnotateImages: retry[[2]string{"default", "idempotent"}], + } +} + +// ImageAnnotatorClient is a client for interacting with Google Cloud Vision API. +type ImageAnnotatorClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + imageAnnotatorClient visionpb.ImageAnnotatorClient + + // The call options for this service. + CallOptions *ImageAnnotatorCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewImageAnnotatorClient creates a new image annotator client. +// +// Service that performs Google Cloud Vision API detection tasks over client +// images, such as face, landmark, logo, label, and text detection. The +// ImageAnnotator service returns detected entities from the images. +func NewImageAnnotatorClient(ctx context.Context, opts ...option.ClientOption) (*ImageAnnotatorClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultImageAnnotatorClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ImageAnnotatorClient{ + conn: conn, + CallOptions: defaultImageAnnotatorCallOptions(), + + imageAnnotatorClient: visionpb.NewImageAnnotatorClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ImageAnnotatorClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ImageAnnotatorClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ImageAnnotatorClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// BatchAnnotateImages run image detection and annotation for a batch of images. +func (c *ImageAnnotatorClient) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest, opts ...gax.CallOption) (*visionpb.BatchAnnotateImagesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BatchAnnotateImages[0:len(c.CallOptions.BatchAnnotateImages):len(c.CallOptions.BatchAnnotateImages)], opts...) + var resp *visionpb.BatchAnnotateImagesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.imageAnnotatorClient.BatchAnnotateImages(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go new file mode 100644 index 0000000..215636f --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go @@ -0,0 +1,51 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision_test + +import ( + "cloud.google.com/go/vision/apiv1" + "golang.org/x/net/context" + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +func ExampleNewImageAnnotatorClient() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleImageAnnotatorClient_BatchAnnotateImages() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &visionpb.BatchAnnotateImagesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.BatchAnnotateImages(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/mock_test.go b/vendor/cloud.google.com/go/vision/apiv1/mock_test.go new file mode 100644 index 0000000..d34c786 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/mock_test.go @@ -0,0 +1,159 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockImageAnnotatorServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + visionpb.ImageAnnotatorServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockImageAnnotatorServer) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest) (*visionpb.BatchAnnotateImagesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*visionpb.BatchAnnotateImagesResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockImageAnnotator mockImageAnnotatorServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + visionpb.RegisterImageAnnotatorServer(serv, &mockImageAnnotator) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestImageAnnotatorBatchAnnotateImages(t *testing.T) { + var expectedResponse *visionpb.BatchAnnotateImagesResponse = &visionpb.BatchAnnotateImagesResponse{} + + mockImageAnnotator.err = nil + mockImageAnnotator.reqs = nil + + mockImageAnnotator.resps = append(mockImageAnnotator.resps[:0], expectedResponse) + + var requests []*visionpb.AnnotateImageRequest = nil + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + c, err := NewImageAnnotatorClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BatchAnnotateImages(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockImageAnnotator.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestImageAnnotatorBatchAnnotateImagesError(t *testing.T) { + errCode := codes.PermissionDenied + mockImageAnnotator.err = gstatus.Error(errCode, "test error") + + var requests []*visionpb.AnnotateImageRequest = nil + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + c, err := NewImageAnnotatorClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BatchAnnotateImages(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/vision/apiv1p1beta1/BatchAnnotateImages_smoke_test.go b/vendor/cloud.google.com/go/vision/apiv1p1beta1/BatchAnnotateImages_smoke_test.go new file mode 100644 index 0000000..6bd43fa --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1p1beta1/BatchAnnotateImages_smoke_test.go @@ -0,0 +1,80 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1" +) + +import ( + "strconv" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var _ = iterator.Done +var _ = strconv.FormatUint +var _ = time.Now + +func TestImageAnnotatorSmoke(t *testing.T) { + if testing.Short() { + t.Skip("skipping smoke test in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + projectId := testutil.ProjID() + _ = projectId + + c, err := NewImageAnnotatorClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + + var gcsImageUri string = "gs://gapic-toolkit/President_Barack_Obama.jpg" + var source = &visionpb.ImageSource{ + GcsImageUri: gcsImageUri, + } + var image = &visionpb.Image{ + Source: source, + } + var type_ visionpb.Feature_Type = visionpb.Feature_FACE_DETECTION + var featuresElement = &visionpb.Feature{ + Type: type_, + } + var features = []*visionpb.Feature{featuresElement} + var requestsElement = &visionpb.AnnotateImageRequest{ + Image: image, + Features: features, + } + var requests = []*visionpb.AnnotateImageRequest{requestsElement} + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + if _, err := c.BatchAnnotateImages(ctx, request); err != nil { + t.Error(err) + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1p1beta1/doc.go b/vendor/cloud.google.com/go/vision/apiv1p1beta1/doc.go new file mode 100644 index 0000000..8fd08bb --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1p1beta1/doc.go @@ -0,0 +1,49 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package vision is an auto-generated package for the +// Google Cloud Vision API. + +// +// Integrates Google Vision features, including image labeling, face, logo, +// and +// landmark detection, optical character recognition (OCR), and detection of +// explicit content, into applications. +package vision // import "cloud.google.com/go/vision/apiv1p1beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-vision", + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client.go b/vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client.go new file mode 100644 index 0000000..6157605 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client.go @@ -0,0 +1,134 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// ImageAnnotatorCallOptions contains the retry settings for each method of ImageAnnotatorClient. +type ImageAnnotatorCallOptions struct { + BatchAnnotateImages []gax.CallOption +} + +func defaultImageAnnotatorClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("vision.googleapis.com:443"), + option.WithScopes(DefaultAuthScopes()...), + } +} + +func defaultImageAnnotatorCallOptions() *ImageAnnotatorCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ImageAnnotatorCallOptions{ + BatchAnnotateImages: retry[[2]string{"default", "idempotent"}], + } +} + +// ImageAnnotatorClient is a client for interacting with Google Cloud Vision API. +type ImageAnnotatorClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + imageAnnotatorClient visionpb.ImageAnnotatorClient + + // The call options for this service. + CallOptions *ImageAnnotatorCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewImageAnnotatorClient creates a new image annotator client. +// +// Service that performs Google Cloud Vision API detection tasks over client +// images, such as face, landmark, logo, label, and text detection. The +// ImageAnnotator service returns detected entities from the images. +func NewImageAnnotatorClient(ctx context.Context, opts ...option.ClientOption) (*ImageAnnotatorClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultImageAnnotatorClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ImageAnnotatorClient{ + conn: conn, + CallOptions: defaultImageAnnotatorCallOptions(), + + imageAnnotatorClient: visionpb.NewImageAnnotatorClient(conn), + } + c.setGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ImageAnnotatorClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ImageAnnotatorClient) Close() error { + return c.conn.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ImageAnnotatorClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// BatchAnnotateImages run image detection and annotation for a batch of images. +func (c *ImageAnnotatorClient) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest, opts ...gax.CallOption) (*visionpb.BatchAnnotateImagesResponse, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.BatchAnnotateImages[0:len(c.CallOptions.BatchAnnotateImages):len(c.CallOptions.BatchAnnotateImages)], opts...) + var resp *visionpb.BatchAnnotateImagesResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.imageAnnotatorClient.BatchAnnotateImages(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client_example_test.go b/vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client_example_test.go new file mode 100644 index 0000000..923e7e4 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1p1beta1/image_annotator_client_example_test.go @@ -0,0 +1,51 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision_test + +import ( + "cloud.google.com/go/vision/apiv1p1beta1" + "golang.org/x/net/context" + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1" +) + +func ExampleNewImageAnnotatorClient() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleImageAnnotatorClient_BatchAnnotateImages() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &visionpb.BatchAnnotateImagesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.BatchAnnotateImages(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/vision/apiv1p1beta1/mock_test.go b/vendor/cloud.google.com/go/vision/apiv1p1beta1/mock_test.go new file mode 100644 index 0000000..9c9c028 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1p1beta1/mock_test.go @@ -0,0 +1,159 @@ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1p1beta1" +) + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + gstatus "google.golang.org/grpc/status" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockImageAnnotatorServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + visionpb.ImageAnnotatorServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockImageAnnotatorServer) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest) (*visionpb.BatchAnnotateImagesResponse, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*visionpb.BatchAnnotateImagesResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockImageAnnotator mockImageAnnotatorServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + visionpb.RegisterImageAnnotatorServer(serv, &mockImageAnnotator) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestImageAnnotatorBatchAnnotateImages(t *testing.T) { + var expectedResponse *visionpb.BatchAnnotateImagesResponse = &visionpb.BatchAnnotateImagesResponse{} + + mockImageAnnotator.err = nil + mockImageAnnotator.reqs = nil + + mockImageAnnotator.resps = append(mockImageAnnotator.resps[:0], expectedResponse) + + var requests []*visionpb.AnnotateImageRequest = nil + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + c, err := NewImageAnnotatorClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BatchAnnotateImages(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockImageAnnotator.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestImageAnnotatorBatchAnnotateImagesError(t *testing.T) { + errCode := codes.PermissionDenied + mockImageAnnotator.err = gstatus.Error(errCode, "test error") + + var requests []*visionpb.AnnotateImageRequest = nil + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + c, err := NewImageAnnotatorClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BatchAnnotateImages(context.Background(), request) + + if st, ok := gstatus.FromError(err); !ok { + t.Errorf("got error %v, expected grpc error", err) + } else if c := st.Code(); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/github.com/codegangsta/inject/.gitignore b/vendor/github.com/codegangsta/inject/.gitignore new file mode 100644 index 0000000..df3df8a --- /dev/null +++ b/vendor/github.com/codegangsta/inject/.gitignore @@ -0,0 +1,2 @@ +inject +inject.test diff --git a/vendor/github.com/codegangsta/inject/LICENSE b/vendor/github.com/codegangsta/inject/LICENSE new file mode 100644 index 0000000..eb68a0e --- /dev/null +++ b/vendor/github.com/codegangsta/inject/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Jeremy Saenz + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/codegangsta/inject/README.md b/vendor/github.com/codegangsta/inject/README.md new file mode 100644 index 0000000..1721ab4 --- /dev/null +++ b/vendor/github.com/codegangsta/inject/README.md @@ -0,0 +1,4 @@ +inject +====== + +Dependency injection for go diff --git a/vendor/github.com/codegangsta/inject/inject.go b/vendor/github.com/codegangsta/inject/inject.go new file mode 100644 index 0000000..3ff713c --- /dev/null +++ b/vendor/github.com/codegangsta/inject/inject.go @@ -0,0 +1,187 @@ +// Package inject provides utilities for mapping and injecting dependencies in various ways. +package inject + +import ( + "fmt" + "reflect" +) + +// Injector represents an interface for mapping and injecting dependencies into structs +// and function arguments. +type Injector interface { + Applicator + Invoker + TypeMapper + // SetParent sets the parent of the injector. If the injector cannot find a + // dependency in its Type map it will check its parent before returning an + // error. + SetParent(Injector) +} + +// Applicator represents an interface for mapping dependencies to a struct. +type Applicator interface { + // Maps dependencies in the Type map to each field in the struct + // that is tagged with 'inject'. Returns an error if the injection + // fails. + Apply(interface{}) error +} + +// Invoker represents an interface for calling functions via reflection. +type Invoker interface { + // Invoke attempts to call the interface{} provided as a function, + // providing dependencies for function arguments based on Type. Returns + // a slice of reflect.Value representing the returned values of the function. + // Returns an error if the injection fails. + Invoke(interface{}) ([]reflect.Value, error) +} + +// TypeMapper represents an interface for mapping interface{} values based on type. +type TypeMapper interface { + // Maps the interface{} value based on its immediate type from reflect.TypeOf. + Map(interface{}) TypeMapper + // Maps the interface{} value based on the pointer of an Interface provided. + // This is really only useful for mapping a value as an interface, as interfaces + // cannot at this time be referenced directly without a pointer. + MapTo(interface{}, interface{}) TypeMapper + // Provides a possibility to directly insert a mapping based on type and value. + // This makes it possible to directly map type arguments not possible to instantiate + // with reflect like unidirectional channels. + Set(reflect.Type, reflect.Value) TypeMapper + // Returns the Value that is mapped to the current type. Returns a zeroed Value if + // the Type has not been mapped. + Get(reflect.Type) reflect.Value +} + +type injector struct { + values map[reflect.Type]reflect.Value + parent Injector +} + +// InterfaceOf dereferences a pointer to an Interface type. +// It panics if value is not an pointer to an interface. +func InterfaceOf(value interface{}) reflect.Type { + t := reflect.TypeOf(value) + + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Interface { + panic("Called inject.InterfaceOf with a value that is not a pointer to an interface. (*MyInterface)(nil)") + } + + return t +} + +// New returns a new Injector. +func New() Injector { + return &injector{ + values: make(map[reflect.Type]reflect.Value), + } +} + +// Invoke attempts to call the interface{} provided as a function, +// providing dependencies for function arguments based on Type. +// Returns a slice of reflect.Value representing the returned values of the function. +// Returns an error if the injection fails. +// It panics if f is not a function +func (inj *injector) Invoke(f interface{}) ([]reflect.Value, error) { + t := reflect.TypeOf(f) + + var in = make([]reflect.Value, t.NumIn()) //Panic if t is not kind of Func + for i := 0; i < t.NumIn(); i++ { + argType := t.In(i) + val := inj.Get(argType) + if !val.IsValid() { + return nil, fmt.Errorf("Value not found for type %v", argType) + } + + in[i] = val + } + + return reflect.ValueOf(f).Call(in), nil +} + +// Maps dependencies in the Type map to each field in the struct +// that is tagged with 'inject'. +// Returns an error if the injection fails. +func (inj *injector) Apply(val interface{}) error { + v := reflect.ValueOf(val) + + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + if v.Kind() != reflect.Struct { + return nil // Should not panic here ? + } + + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := v.Field(i) + structField := t.Field(i) + if f.CanSet() && (structField.Tag == "inject" || structField.Tag.Get("inject") != "") { + ft := f.Type() + v := inj.Get(ft) + if !v.IsValid() { + return fmt.Errorf("Value not found for type %v", ft) + } + + f.Set(v) + } + + } + + return nil +} + +// Maps the concrete value of val to its dynamic type using reflect.TypeOf, +// It returns the TypeMapper registered in. +func (i *injector) Map(val interface{}) TypeMapper { + i.values[reflect.TypeOf(val)] = reflect.ValueOf(val) + return i +} + +func (i *injector) MapTo(val interface{}, ifacePtr interface{}) TypeMapper { + i.values[InterfaceOf(ifacePtr)] = reflect.ValueOf(val) + return i +} + +// Maps the given reflect.Type to the given reflect.Value and returns +// the Typemapper the mapping has been registered in. +func (i *injector) Set(typ reflect.Type, val reflect.Value) TypeMapper { + i.values[typ] = val + return i +} + +func (i *injector) Get(t reflect.Type) reflect.Value { + val := i.values[t] + + if val.IsValid() { + return val + } + + // no concrete types found, try to find implementors + // if t is an interface + if t.Kind() == reflect.Interface { + for k, v := range i.values { + if k.Implements(t) { + val = v + break + } + } + } + + // Still no type found, try to look it up on the parent + if !val.IsValid() && i.parent != nil { + val = i.parent.Get(t) + } + + return val + +} + +func (i *injector) SetParent(parent Injector) { + i.parent = parent +} diff --git a/vendor/github.com/codegangsta/inject/inject_test.go b/vendor/github.com/codegangsta/inject/inject_test.go new file mode 100644 index 0000000..b436dd0 --- /dev/null +++ b/vendor/github.com/codegangsta/inject/inject_test.go @@ -0,0 +1,159 @@ +package inject_test + +import ( + "fmt" + "github.com/codegangsta/inject" + "reflect" + "testing" +) + +type SpecialString interface { +} + +type TestStruct struct { + Dep1 string `inject:"t" json:"-"` + Dep2 SpecialString `inject` + Dep3 string +} + +type Greeter struct { + Name string +} + +func (g *Greeter) String() string { + return "Hello, My name is" + g.Name +} + +/* Test Helpers */ +func expect(t *testing.T, a interface{}, b interface{}) { + if a != b { + t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) + } +} + +func refute(t *testing.T, a interface{}, b interface{}) { + if a == b { + t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) + } +} + +func Test_InjectorInvoke(t *testing.T) { + injector := inject.New() + expect(t, injector == nil, false) + + dep := "some dependency" + injector.Map(dep) + dep2 := "another dep" + injector.MapTo(dep2, (*SpecialString)(nil)) + dep3 := make(chan *SpecialString) + dep4 := make(chan *SpecialString) + typRecv := reflect.ChanOf(reflect.RecvDir, reflect.TypeOf(dep3).Elem()) + typSend := reflect.ChanOf(reflect.SendDir, reflect.TypeOf(dep4).Elem()) + injector.Set(typRecv, reflect.ValueOf(dep3)) + injector.Set(typSend, reflect.ValueOf(dep4)) + + _, err := injector.Invoke(func(d1 string, d2 SpecialString, d3 <-chan *SpecialString, d4 chan<- *SpecialString) { + expect(t, d1, dep) + expect(t, d2, dep2) + expect(t, reflect.TypeOf(d3).Elem(), reflect.TypeOf(dep3).Elem()) + expect(t, reflect.TypeOf(d4).Elem(), reflect.TypeOf(dep4).Elem()) + expect(t, reflect.TypeOf(d3).ChanDir(), reflect.RecvDir) + expect(t, reflect.TypeOf(d4).ChanDir(), reflect.SendDir) + }) + + expect(t, err, nil) +} + +func Test_InjectorInvokeReturnValues(t *testing.T) { + injector := inject.New() + expect(t, injector == nil, false) + + dep := "some dependency" + injector.Map(dep) + dep2 := "another dep" + injector.MapTo(dep2, (*SpecialString)(nil)) + + result, err := injector.Invoke(func(d1 string, d2 SpecialString) string { + expect(t, d1, dep) + expect(t, d2, dep2) + return "Hello world" + }) + + expect(t, result[0].String(), "Hello world") + expect(t, err, nil) +} + +func Test_InjectorApply(t *testing.T) { + injector := inject.New() + + injector.Map("a dep").MapTo("another dep", (*SpecialString)(nil)) + + s := TestStruct{} + err := injector.Apply(&s) + expect(t, err, nil) + + expect(t, s.Dep1, "a dep") + expect(t, s.Dep2, "another dep") +} + +func Test_InterfaceOf(t *testing.T) { + iType := inject.InterfaceOf((*SpecialString)(nil)) + expect(t, iType.Kind(), reflect.Interface) + + iType = inject.InterfaceOf((**SpecialString)(nil)) + expect(t, iType.Kind(), reflect.Interface) + + // Expecting nil + defer func() { + rec := recover() + refute(t, rec, nil) + }() + iType = inject.InterfaceOf((*testing.T)(nil)) +} + +func Test_InjectorSet(t *testing.T) { + injector := inject.New() + typ := reflect.TypeOf("string") + typSend := reflect.ChanOf(reflect.SendDir, typ) + typRecv := reflect.ChanOf(reflect.RecvDir, typ) + + // instantiating unidirectional channels is not possible using reflect + // http://golang.org/src/pkg/reflect/value.go?s=60463:60504#L2064 + chanRecv := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, typ), 0) + chanSend := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, typ), 0) + + injector.Set(typSend, chanSend) + injector.Set(typRecv, chanRecv) + + expect(t, injector.Get(typSend).IsValid(), true) + expect(t, injector.Get(typRecv).IsValid(), true) + expect(t, injector.Get(chanSend.Type()).IsValid(), false) +} + + +func Test_InjectorGet(t *testing.T) { + injector := inject.New() + + injector.Map("some dependency") + + expect(t, injector.Get(reflect.TypeOf("string")).IsValid(), true) + expect(t, injector.Get(reflect.TypeOf(11)).IsValid(), false) +} + +func Test_InjectorSetParent(t *testing.T) { + injector := inject.New() + injector.MapTo("another dep", (*SpecialString)(nil)) + + injector2 := inject.New() + injector2.SetParent(injector) + + expect(t, injector2.Get(inject.InterfaceOf((*SpecialString)(nil))).IsValid(), true) +} + +func TestInjectImplementors(t *testing.T) { + injector := inject.New() + g := &Greeter{"Jeremy"} + injector.Map(g) + + expect(t, injector.Get(inject.InterfaceOf((*fmt.Stringer)(nil))).IsValid(), true) +} diff --git a/vendor/github.com/codeskyblue/go-sh/LICENSE b/vendor/github.com/codeskyblue/go-sh/LICENSE new file mode 100644 index 0000000..e06d208 --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/codeskyblue/go-sh/OLD_README.md b/vendor/github.com/codeskyblue/go-sh/OLD_README.md new file mode 100644 index 0000000..7e89940 --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/OLD_README.md @@ -0,0 +1,69 @@ +## OLD README +First give you a full example, I will explain every command below. + + session := sh.NewSession() + session.Env["PATH"] = "/usr/bin:/bin" + session.Stdout = os.Stdout + session.Stderr = os.Stderr + session.Alias("ll", "ls", "-l") + session.ShowCMD = true // enable for debug + var err error + err = session.Call("ll", "/") + if err != nil { + log.Fatal(err) + } + ret, err := session.Capture("pwd", sh.Dir("/home")) # wraper of session.Call + if err != nil { + log.Fatal(err) + } + # ret is "/home\n" + fmt.Println(ret) + +create a new Session + + session := sh.NewSession() + +use alias like this + + session.Alias("ll", "ls", "-l") # like alias ll='ls -l' + +set current env like this + + session.Env["BUILD_ID"] = "123" # like export BUILD_ID=123 + +set current directory + + session.Set(sh.Dir("/")) # like cd / + +pipe is also supported + + session.Command("echo", "hello\tworld").Command("cut", "-f2") + // output should be "world" + session.Run() + +test, the build in command support + + session.Test("d", "dir") // test dir + session.Test("f", "file) // test regular file + +with `Alias Env Set Call Capture Command` a shell scripts can be easily converted into golang program. below is a shell script. + + #!/bin/bash - + # + export PATH=/usr/bin:/bin + alias ll='ls -l' + cd /usr + if test -d "local" + then + ll local | awk '{print $1, $NF}' + fi + +convert to golang, will be + + s := sh.NewSession() + s.Env["PATH"] = "/usr/bin:/bin" + s.Set(sh.Dir("/usr")) + s.Alias("ll", "ls", "-l") + if s.Test("d", "local") { + s.Command("ll", "local").Command("awk", "{print $1, $NF}").Run() + } diff --git a/vendor/github.com/codeskyblue/go-sh/README.md b/vendor/github.com/codeskyblue/go-sh/README.md new file mode 100644 index 0000000..19d41d4 --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/README.md @@ -0,0 +1,88 @@ +## go-sh +[![wercker status](https://app.wercker.com/status/009acbd4f00ccc6de7e2554e12a50d84/s "wercker status")](https://app.wercker.com/project/bykey/009acbd4f00ccc6de7e2554e12a50d84) +[![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/codeskyblue/go-sh) + +*If you depend on the old api, see tag: v.0.1* + +install: `go get github.com/codeskyblue/go-sh` + +Pipe Example: + + package main + + import "github.com/codeskyblue/go-sh" + + func main() { + sh.Command("echo", "hello\tworld").Command("cut", "-f2").Run() + } + +Because I like os/exec, `go-sh` is very much modelled after it. However, `go-sh` provides a better experience. + +These are some of its features: + +* keep the variable environment (e.g. export) +* alias support (e.g. alias in shell) +* remember current dir +* pipe command +* shell build-in commands echo & test +* timeout support + +Examples are important: + + sh: echo hello + go: sh.Command("echo", "hello").Run() + + sh: export BUILD_ID=123 + go: s = sh.NewSession().SetEnv("BUILD_ID", "123") + + sh: alias ll='ls -l' + go: s = sh.NewSession().Alias('ll', 'ls', '-l') + + sh: (cd /; pwd) + go: sh.Command("pwd", sh.Dir("/")).Run() + + sh: test -d data || mkdir data + go: if ! sh.Test("dir", "data") { sh.Command("mkdir", "data").Run() } + + sh: cat first second | awk '{print $1}' + go: sh.Command("cat", "first", "second").Command("awk", "{print $1}").Run() + + sh: count=$(echo "one two three" | wc -w) + go: count, err := sh.Echo("one two three").Command("wc", "-w").Output() + + sh(in ubuntu): timeout 1s sleep 3 + go: c := sh.Command("sleep", "3"); c.Start(); c.WaitTimeout(time.Second) # default SIGKILL + go: out, err := sh.Command("sleep", "3").SetTimeout(time.Second).Output() # set session timeout and get output) + + sh: echo hello | cat + go: out, err := sh.Command("cat").SetInput("hello").Output() + + sh: cat # read from stdin + go: out, err := sh.Command("cat").SetStdin(os.Stdin).Output() + + sh: ls -l > /tmp/listing.txt # write stdout to file + go: err := sh.Command("ls", "-l").WriteStdout("/tmp/listing.txt") + +If you need to keep env and dir, it is better to create a session + + session := sh.NewSession() + session.SetEnv("BUILD_ID", "123") + session.SetDir("/") + # then call cmd + session.Command("echo", "hello").Run() + # set ShowCMD to true for easily debug + session.ShowCMD = true + +for more information, it better to see docs. +[![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/codeskyblue/go-sh) + +### contribute +If you love this project, starring it will encourage the coder. Pull requests are welcome. + +support the author: [alipay](https://me.alipay.com/goskyblue) + +### thanks +this project is based on . thanks for the author. + +# the reason to use Go shell +Sometimes we need to write shell scripts, but shell scripts are not good at working cross platform, Go, on the other hand, is good at that. Is there a good way to use Go to write shell like scripts? Using go-sh we can do this now. diff --git a/vendor/github.com/codeskyblue/go-sh/example/example1.go b/vendor/github.com/codeskyblue/go-sh/example/example1.go new file mode 100644 index 0000000..cdc7f6c --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/example/example1.go @@ -0,0 +1,41 @@ +package main + +import ( + "fmt" + "log" + + "github.com/codeskyblue/go-sh" +) + +func main() { + sh.Command("echo", "hello").Run() + out, err := sh.Command("echo", "hello").Output() + if err != nil { + log.Fatal(err) + } + fmt.Println("output is", string(out)) + + var a int + sh.Command("echo", "2").UnmarshalJSON(&a) + fmt.Println("a =", a) + + s := sh.NewSession() + s.Alias("hi", "echo", "hi") + s.Command("hi", "boy").Run() + + fmt.Print("pwd = ") + s.Command("pwd", sh.Dir("/")).Run() + + if !sh.Test("dir", "data") { + sh.Command("echo", "mkdir", "data").Run() + } + + sh.Command("echo", "hello", "world"). + Command("awk", `{print "second arg is "$2}`).Run() + s.ShowCMD = true + s.Command("echo", "hello", "world"). + Command("awk", `{print "second arg is "$2}`).Run() + + s.SetEnv("BUILD_ID", "123").Command("bash", "-c", "echo $BUILD_ID").Run() + s.Command("bash", "-c", "echo current shell is $SHELL").Run() +} diff --git a/vendor/github.com/codeskyblue/go-sh/example/less/less.go b/vendor/github.com/codeskyblue/go-sh/example/less/less.go new file mode 100644 index 0000000..ffced2e --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/example/less/less.go @@ -0,0 +1,7 @@ +package main + +import "github.com/codeskyblue/go-sh" + +func main() { + sh.Command("less", "less.go").Run() +} diff --git a/vendor/github.com/codeskyblue/go-sh/example/tail/tailf.go b/vendor/github.com/codeskyblue/go-sh/example/tail/tailf.go new file mode 100644 index 0000000..b7ecd3b --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/example/tail/tailf.go @@ -0,0 +1,17 @@ +package main + +import ( + "flag" + "fmt" + + "github.com/codeskyblue/go-sh" +) + +func main() { + flag.Parse() + if flag.NArg() != 1 { + fmt.Println("Usage: PROGRAM ") + return + } + sh.Command("tail", "-f", flag.Arg(0)).Run() +} diff --git a/vendor/github.com/codeskyblue/go-sh/example/timeout/timeout.go b/vendor/github.com/codeskyblue/go-sh/example/timeout/timeout.go new file mode 100644 index 0000000..852908a --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/example/timeout/timeout.go @@ -0,0 +1,23 @@ +package main + +import ( + "fmt" + "time" + + sh "github.com/codeskyblue/go-sh" +) + +func main() { + c := sh.Command("sleep", "3") + c.Start() + err := c.WaitTimeout(time.Second * 1) + if err != nil { + fmt.Printf("timeout should happend: %v\n", err) + } + // timeout should be a session + out, err := sh.Command("sleep", "2").SetTimeout(time.Second).Output() + fmt.Printf("output:(%s), err(%v)\n", string(out), err) + + out, err = sh.Command("echo", "hello").SetTimeout(time.Second).Output() + fmt.Printf("output:(%s), err(%v)\n", string(out), err) +} diff --git a/vendor/github.com/codeskyblue/go-sh/example_test.go b/vendor/github.com/codeskyblue/go-sh/example_test.go new file mode 100644 index 0000000..5abfed6 --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/example_test.go @@ -0,0 +1,28 @@ +package sh_test + +import ( + "fmt" + + "github.com/codeskyblue/go-sh" +) + +func ExampleCommand() { + out, err := sh.Command("echo", "hello").Output() + fmt.Println(string(out), err) +} + +func ExampleCommandPipe() { + out, err := sh.Command("echo", "-n", "hi").Command("wc", "-c").Output() + fmt.Println(string(out), err) +} + +func ExampleCommandSetDir() { + out, err := sh.Command("pwd", sh.Dir("/")).Output() + fmt.Println(string(out), err) +} + +func ExampleTest() { + if sh.Test("dir", "mydir") { + fmt.Println("mydir exists") + } +} diff --git a/vendor/github.com/codeskyblue/go-sh/pipe.go b/vendor/github.com/codeskyblue/go-sh/pipe.go new file mode 100644 index 0000000..ee7b8da --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/pipe.go @@ -0,0 +1,163 @@ +package sh + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "errors" + "io" + "os" + "strings" + "syscall" + "time" +) + +var ErrExecTimeout = errors.New("execute timeout") + +// unmarshal shell output to decode json +func (s *Session) UnmarshalJSON(data interface{}) (err error) { + bufrw := bytes.NewBuffer(nil) + s.Stdout = bufrw + if err = s.Run(); err != nil { + return + } + return json.NewDecoder(bufrw).Decode(data) +} + +// unmarshal command output into xml +func (s *Session) UnmarshalXML(data interface{}) (err error) { + bufrw := bytes.NewBuffer(nil) + s.Stdout = bufrw + if err = s.Run(); err != nil { + return + } + return xml.NewDecoder(bufrw).Decode(data) +} + +// start command +func (s *Session) Start() (err error) { + s.started = true + var rd *io.PipeReader + var wr *io.PipeWriter + var length = len(s.cmds) + if s.ShowCMD { + var cmds = make([]string, 0, 4) + for _, cmd := range s.cmds { + cmds = append(cmds, strings.Join(cmd.Args, " ")) + } + s.writePrompt(strings.Join(cmds, " | ")) + } + for index, cmd := range s.cmds { + if index == 0 { + cmd.Stdin = s.Stdin + } else { + cmd.Stdin = rd + } + if index != length { + rd, wr = io.Pipe() // create pipe + cmd.Stdout = wr + cmd.Stderr = os.Stderr + } + if index == length-1 { + cmd.Stdout = s.Stdout + cmd.Stderr = s.Stderr + } + err = cmd.Start() + if err != nil { + return + } + } + return +} + +// Should be call after Start() +// only catch the last command error +func (s *Session) Wait() (err error) { + for _, cmd := range s.cmds { + err = cmd.Wait() + wr, ok := cmd.Stdout.(*io.PipeWriter) + if ok { + wr.Close() + } + } + return err +} + +func (s *Session) Kill(sig os.Signal) { + for _, cmd := range s.cmds { + if cmd.Process != nil { + cmd.Process.Signal(sig) + } + } +} + +func (s *Session) WaitTimeout(timeout time.Duration) (err error) { + select { + case <-time.After(timeout): + s.Kill(syscall.SIGKILL) + return ErrExecTimeout + case err = <-Go(s.Wait): + return err + } +} + +func Go(f func() error) chan error { + ch := make(chan error) + go func() { + ch <- f() + }() + return ch +} + +func (s *Session) Run() (err error) { + if err = s.Start(); err != nil { + return + } + if s.timeout != time.Duration(0) { + return s.WaitTimeout(s.timeout) + } + return s.Wait() +} + +func (s *Session) Output() (out []byte, err error) { + oldout := s.Stdout + defer func() { + s.Stdout = oldout + }() + stdout := bytes.NewBuffer(nil) + s.Stdout = stdout + err = s.Run() + out = stdout.Bytes() + return +} + +func (s *Session) WriteStdout(f string) error { + oldout := s.Stdout + defer func() { + s.Stdout = oldout + }() + + out, err := os.Create(f) + if err != nil { + return err + } + defer out.Close() + s.Stdout = out + return s.Run() +} + +func (s *Session) CombinedOutput() (out []byte, err error) { + oldout := s.Stdout + olderr := s.Stderr + defer func() { + s.Stdout = oldout + s.Stderr = olderr + }() + stdout := bytes.NewBuffer(nil) + s.Stdout = stdout + s.Stderr = stdout + + err = s.Run() + out = stdout.Bytes() + return +} diff --git a/vendor/github.com/codeskyblue/go-sh/pipe_test.go b/vendor/github.com/codeskyblue/go-sh/pipe_test.go new file mode 100644 index 0000000..8d0e22c --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/pipe_test.go @@ -0,0 +1,126 @@ +package sh + +import ( + "encoding/xml" + "io" + "os" + "os/exec" + "strings" + "testing" + "time" +) + +func TestUnmarshalJSON(t *testing.T) { + var a int + s := NewSession() + s.ShowCMD = true + err := s.Command("echo", []string{"1"}).UnmarshalJSON(&a) + if err != nil { + t.Error(err) + } + if a != 1 { + t.Errorf("expect a tobe 1, but got %d", a) + } +} + +func TestUnmarshalXML(t *testing.T) { + s := NewSession() + xmlSample := ` +` + type server struct { + XMLName xml.Name `xml:"server"` + Version string `xml:"version,attr"` + } + data := &server{} + s.Command("echo", xmlSample).UnmarshalXML(data) + if data.Version != "1" { + t.Error(data) + } +} + +func TestPipe(t *testing.T) { + s := NewSession() + s.ShowCMD = true + s.Call("echo", "hello") + err := s.Command("echo", "hi").Command("cat", "-n").Start() + if err != nil { + t.Error(err) + } + err = s.Wait() + if err != nil { + t.Error(err) + } + out, err := s.Command("echo", []string{"hello"}).Output() + if err != nil { + t.Error(err) + } + if string(out) != "hello\n" { + t.Error("capture wrong output:", out) + } + s.Command("echo", []string{"hello\tworld"}).Command("cut", []string{"-f2"}).Run() +} + +func TestPipeCommand(t *testing.T) { + c1 := exec.Command("echo", "good") + rd, wr := io.Pipe() + c1.Stdout = wr + c2 := exec.Command("cat", "-n") + c2.Stdout = os.Stdout + c2.Stdin = rd + c1.Start() + c2.Start() + + c1.Wait() + wc, ok := c1.Stdout.(io.WriteCloser) + if ok { + wc.Close() + } + c2.Wait() +} + +func TestPipeInput(t *testing.T) { + s := NewSession() + s.ShowCMD = true + s.SetInput("first line\nsecond line\n") + out, err := s.Command("grep", "second").Output() + if err != nil { + t.Error(err) + } + if string(out) != "second line\n" { + t.Error("capture wrong output:", out) + } +} + +func TestTimeout(t *testing.T) { + s := NewSession() + err := s.Command("sleep", "2").Start() + if err != nil { + t.Fatal(err) + } + err = s.WaitTimeout(time.Second) + if err != ErrExecTimeout { + t.Fatal(err) + } +} + +func TestSetTimeout(t *testing.T) { + s := NewSession() + s.SetTimeout(time.Second) + defer s.SetTimeout(0) + err := s.Command("sleep", "2").Run() + if err != ErrExecTimeout { + t.Fatal(err) + } +} + +func TestCombinedOutput(t *testing.T) { + s := NewSession() + bytes, err := s.Command("sh", "-c", "echo stderr >&2 ; echo stdout").CombinedOutput() + if err != nil { + t.Error(err) + } + stringOutput := string(bytes) + if !(strings.Contains(stringOutput, "stdout") && strings.Contains(stringOutput, "stderr")) { + t.Errorf("expect output from both output streams, got '%s'", strings.TrimSpace(stringOutput)) + } +} diff --git a/vendor/github.com/codeskyblue/go-sh/sh.go b/vendor/github.com/codeskyblue/go-sh/sh.go new file mode 100644 index 0000000..15f1b85 --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/sh.go @@ -0,0 +1,200 @@ +/* +Package go-sh is intented to make shell call with golang more easily. +Some usage is more similar to os/exec, eg: Run(), Output(), Command(name, args...) + +But with these similar function, pipe is added in and this package also got shell-session support. + +Why I love golang so much, because the usage of golang is simple, but the power is unlimited. I want to make this pakcage got the sample style like golang. + + // just like os/exec + sh.Command("echo", "hello").Run() + + // support pipe + sh.Command("echo", "hello").Command("wc", "-c").Run() + + // create a session to store dir and env + sh.NewSession().SetDir("/").Command("pwd") + + // shell buildin command - "test" + sh.Test("dir", "mydir") + + // like shell call: (cd /; pwd) + sh.Command("pwd", sh.Dir("/")) same with sh.Command(sh.Dir("/"), "pwd") + + // output to json and xml easily + v := map[string] int {} + err = sh.Command("echo", `{"number": 1}`).UnmarshalJSON(&v) +*/ +package sh + +import ( + "fmt" + "io" + "os" + "os/exec" + "reflect" + "strings" + "time" + + "github.com/codegangsta/inject" +) + +type Dir string + +type Session struct { + inj inject.Injector + alias map[string][]string + cmds []*exec.Cmd + dir Dir + started bool + Env map[string]string + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + ShowCMD bool // enable for debug + timeout time.Duration +} + +func (s *Session) writePrompt(args ...interface{}) { + var ps1 = fmt.Sprintf("[golang-sh]$") + args = append([]interface{}{ps1}, args...) + fmt.Fprintln(s.Stderr, args...) +} + +func NewSession() *Session { + env := make(map[string]string) + for _, key := range []string{"PATH"} { + env[key] = os.Getenv(key) + } + s := &Session{ + inj: inject.New(), + alias: make(map[string][]string), + dir: Dir(""), + Stdin: strings.NewReader(""), + Stdout: os.Stdout, + Stderr: os.Stderr, + Env: env, + } + return s +} + +func InteractiveSession() *Session { + s := NewSession() + s.SetStdin(os.Stdin) + return s +} + +func Command(name string, a ...interface{}) *Session { + s := NewSession() + return s.Command(name, a...) +} + +func Echo(in string) *Session { + s := NewSession() + return s.SetInput(in) +} + +func (s *Session) Alias(alias, cmd string, args ...string) { + v := []string{cmd} + v = append(v, args...) + s.alias[alias] = v +} + +func (s *Session) Command(name string, a ...interface{}) *Session { + var args = make([]string, 0) + var sType = reflect.TypeOf("") + + // init cmd, args, dir, envs + // if not init, program may panic + s.inj.Map(name).Map(args).Map(s.dir).Map(map[string]string{}) + for _, v := range a { + switch reflect.TypeOf(v) { + case sType: + args = append(args, v.(string)) + default: + s.inj.Map(v) + } + } + if len(args) != 0 { + s.inj.Map(args) + } + s.inj.Invoke(s.appendCmd) + return s +} + +// combine Command and Run +func (s *Session) Call(name string, a ...interface{}) error { + return s.Command(name, a...).Run() +} + +/* +func (s *Session) Exec(cmd string, args ...string) error { + return s.Call(cmd, args) +} +*/ + +func (s *Session) SetEnv(key, value string) *Session { + s.Env[key] = value + return s +} + +func (s *Session) SetDir(dir string) *Session { + s.dir = Dir(dir) + return s +} + +func (s *Session) SetInput(in string) *Session { + s.Stdin = strings.NewReader(in) + return s +} + +func (s *Session) SetStdin(r io.Reader) *Session { + s.Stdin = r + return s +} + +func (s *Session) SetTimeout(d time.Duration) *Session { + s.timeout = d + return s +} + +func newEnviron(env map[string]string, inherit bool) []string { //map[string]string { + environ := make([]string, 0, len(env)) + if inherit { + for _, line := range os.Environ() { + for k, _ := range env { + if strings.HasPrefix(line, k+"=") { + goto CONTINUE + } + } + environ = append(environ, line) + CONTINUE: + } + } + for k, v := range env { + environ = append(environ, k+"="+v) + } + return environ +} + +func (s *Session) appendCmd(cmd string, args []string, cwd Dir, env map[string]string) { + if s.started { + s.started = false + s.cmds = make([]*exec.Cmd, 0) + } + for k, v := range s.Env { + if _, ok := env[k]; !ok { + env[k] = v + } + } + environ := newEnviron(s.Env, true) // true: inherit sys-env + v, ok := s.alias[cmd] + if ok { + cmd = v[0] + args = append(v[1:], args...) + } + c := exec.Command(cmd, args...) + c.Env = environ + c.Dir = string(cwd) + s.cmds = append(s.cmds, c) +} diff --git a/vendor/github.com/codeskyblue/go-sh/sh_test.go b/vendor/github.com/codeskyblue/go-sh/sh_test.go new file mode 100644 index 0000000..be5c279 --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/sh_test.go @@ -0,0 +1,106 @@ +package sh + +import ( + "fmt" + "log" + "runtime" + "strings" + "testing" +) + +func TestAlias(t *testing.T) { + s := NewSession() + s.Alias("gr", "echo", "hi") + out, err := s.Command("gr", "sky").Output() + if err != nil { + t.Error(err) + } + if string(out) != "hi sky\n" { + t.Errorf("expect 'hi sky' but got:%s", string(out)) + } +} + +func ExampleSession_Command() { + s := NewSession() + out, err := s.Command("echo", "hello").Output() + if err != nil { + log.Fatal(err) + } + fmt.Println(string(out)) + // Output: hello +} + +func ExampleSession_Command_pipe() { + s := NewSession() + out, err := s.Command("echo", "hello", "world").Command("awk", "{print $2}").Output() + if err != nil { + log.Fatal(err) + } + fmt.Println(string(out)) + // Output: world +} + +func ExampleSession_Alias() { + s := NewSession() + s.Alias("alias_echo_hello", "echo", "hello") + out, err := s.Command("alias_echo_hello", "world").Output() + if err != nil { + log.Fatal(err) + } + fmt.Println(string(out)) + // Output: hello world +} + +func TestEcho(t *testing.T) { + out, err := Echo("one two three").Command("wc", "-w").Output() + if err != nil { + t.Error(err) + } + if strings.TrimSpace(string(out)) != "3" { + t.Errorf("expect '3' but got:%s", string(out)) + } +} + +func TestSession(t *testing.T) { + if runtime.GOOS == "windows" { + t.Log("ignore test on windows") + return + } + session := NewSession() + session.ShowCMD = true + err := session.Call("pwd") + if err != nil { + t.Error(err) + } + out, err := session.SetDir("/").Command("pwd").Output() + if err != nil { + t.Error(err) + } + if string(out) != "/\n" { + t.Errorf("expect /, but got %s", string(out)) + } +} + +/* + #!/bin/bash - + # + export PATH=/usr/bin:/bin + alias ll='ls -l' + cd /usr + if test -d "local" + then + ll local | awk '{print $1, $NF}' | grep bin + fi +*/ +func Example(t *testing.T) { + s := NewSession() + //s.ShowCMD = true + s.Env["PATH"] = "/usr/bin:/bin" + s.SetDir("/bin") + s.Alias("ll", "ls", "-l") + + if s.Test("d", "local") { + //s.Command("ll", []string{"local"}).Command("awk", []string{"{print $1, $NF}"}).Command("grep", []string{"bin"}).Run() + s.Command("ll", "local").Command("awk", "{print $1, $NF}").Command("grep", "bin").Run() + } +} diff --git a/vendor/github.com/codeskyblue/go-sh/test.go b/vendor/github.com/codeskyblue/go-sh/test.go new file mode 100644 index 0000000..b9d3105 --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/test.go @@ -0,0 +1,64 @@ +package sh + +import ( + "os" + "path/filepath" +) + +func filetest(name string, modemask os.FileMode) (match bool, err error) { + fi, err := os.Stat(name) + if err != nil { + return + } + match = (fi.Mode() & modemask) == modemask + return +} + +func (s *Session) pwd() string { + dir := string(s.dir) + if dir == "" { + dir, _ = os.Getwd() + } + return dir +} + +func (s *Session) abspath(name string) string { + if filepath.IsAbs(name) { + return name + } + return filepath.Join(s.pwd(), name) +} + +func init() { + //log.SetFlags(log.Lshortfile | log.LstdFlags) +} + +// expression can be dir, file, link +func (s *Session) Test(expression string, argument string) bool { + var err error + var fi os.FileInfo + fi, err = os.Lstat(s.abspath(argument)) + switch expression { + case "d", "dir": + return err == nil && fi.IsDir() + case "f", "file": + return err == nil && fi.Mode().IsRegular() + case "x", "executable": + /* + fmt.Println(expression, argument) + if err == nil { + fmt.Println(fi.Mode()) + } + */ + return err == nil && fi.Mode()&os.FileMode(0100) != 0 + case "L", "link": + return err == nil && fi.Mode()&os.ModeSymlink != 0 + } + return false +} + +// expression can be d,dir, f,file, link +func Test(exp string, arg string) bool { + s := NewSession() + return s.Test(exp, arg) +} diff --git a/vendor/github.com/codeskyblue/go-sh/test_test.go b/vendor/github.com/codeskyblue/go-sh/test_test.go new file mode 100644 index 0000000..15225aa --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/test_test.go @@ -0,0 +1,58 @@ +package sh_test + +import ( + "testing" + + "github.com/codeskyblue/go-sh" +) + +var s = sh.NewSession() + +type T struct{ *testing.T } + +func NewT(t *testing.T) *T { + return &T{t} +} + +func (t *T) checkTest(exp string, arg string, result bool) { + r := s.Test(exp, arg) + if r != result { + t.Errorf("test -%s %s, %v != %v", exp, arg, r, result) + } +} + +func TestTest(i *testing.T) { + t := NewT(i) + t.checkTest("d", "../go-sh", true) + t.checkTest("d", "./yymm", false) + + // file test + t.checkTest("f", "testdata/hello.txt", true) + t.checkTest("f", "testdata/xxxxx", false) + t.checkTest("f", "testdata/yymm", false) + + // link test + t.checkTest("link", "testdata/linkfile", true) + t.checkTest("link", "testdata/xxxxxlinkfile", false) + t.checkTest("link", "testdata/hello.txt", false) + + // executable test + t.checkTest("x", "testdata/executable", true) + t.checkTest("x", "testdata/xxxxx", false) + t.checkTest("x", "testdata/hello.txt", false) +} + +func ExampleShellTest(t *testing.T) { + // test -L + sh.Test("link", "testdata/linkfile") + sh.Test("L", "testdata/linkfile") + // test -f + sh.Test("file", "testdata/file") + sh.Test("f", "testdata/file") + // test -x + sh.Test("executable", "testdata/binfile") + sh.Test("x", "testdata/binfile") + // test -d + sh.Test("dir", "testdata/dir") + sh.Test("d", "testdata/dir") +} diff --git a/vendor/github.com/codeskyblue/go-sh/testdata/executable b/vendor/github.com/codeskyblue/go-sh/testdata/executable new file mode 100755 index 0000000..e69de29 diff --git a/vendor/github.com/codeskyblue/go-sh/testdata/hello.txt b/vendor/github.com/codeskyblue/go-sh/testdata/hello.txt new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/codeskyblue/go-sh/testdata/linkfile b/vendor/github.com/codeskyblue/go-sh/testdata/linkfile new file mode 120000 index 0000000..a5162f8 --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/testdata/linkfile @@ -0,0 +1 @@ +hello.txt \ No newline at end of file diff --git a/vendor/github.com/codeskyblue/go-sh/wercker.yml b/vendor/github.com/codeskyblue/go-sh/wercker.yml new file mode 100644 index 0000000..72c4702 --- /dev/null +++ b/vendor/github.com/codeskyblue/go-sh/wercker.yml @@ -0,0 +1,28 @@ +box: wercker/golang +# Build definition +build: + # The steps that will be executed on build + steps: + # Sets the go workspace and places you package + # at the right place in the workspace tree + - setup-go-workspace + + # Gets the dependencies + - script: + name: go get + code: | + cd $WERCKER_SOURCE_DIR + go version + go get -t . + + # Build the project + - script: + name: go build + code: | + go build . + + # Test the project + - script: + name: go test + code: | + go test -v ./... diff --git a/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..d9771f1 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,21 @@ +### Issue description +Tell us what should happen and what happens instead + +### Example code +```go +If possible, please enter some example code here to reproduce the issue. +``` + +### Error log +``` +If you have an error log, please paste it here. +``` + +### Configuration +*Driver version (or git SHA):* + +*Go version:* run `go version` in your console + +*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20 + +*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10 diff --git a/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..6f5c7eb --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,9 @@ +### Description +Please explain the changes you made here. + +### Checklist +- [ ] Code compiles correctly +- [ ] Created tests which fail without the change (if possible) +- [ ] All tests passing +- [ ] Extended the README / documentation, if necessary +- [ ] Added myself / the copyright holder to the AUTHORS file diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore new file mode 100644 index 0000000..ba8e0cb --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/.gitignore @@ -0,0 +1,8 @@ +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +Icon? +ehthumbs.db +Thumbs.db diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml new file mode 100644 index 0000000..c1cc10a --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/.travis.yml @@ -0,0 +1,13 @@ +sudo: false +language: go +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip + +before_script: + - mysql -e 'create database gotest;' diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS new file mode 100644 index 0000000..692c186 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -0,0 +1,56 @@ +# This is the official list of Go-MySQL-Driver authors for copyright purposes. + +# If you are submitting a patch, please add your name or the name of the +# organization which holds the copyright to this list in alphabetical order. + +# Names should be added to this file as +# Name +# The email address is not required for organizations. +# Please keep the list sorted. + + +# Individual Persons + +Aaron Hopkins +Arne Hormann +Carlos Nieto +Chris Moos +Daniel Nichter +Daniël van Eeden +DisposaBoy +Frederick Mayle +Gustavo Kristic +Hanno Braun +Henri Yandell +Hirotaka Yamamoto +INADA Naoki +James Harr +Jian Zhen +Joshua Prunier +Julien Lefevre +Julien Schmidt +Kamil Dziedzic +Kevin Malachowski +Lennart Rudolph +Leonardo YongUk Kim +Luca Looz +Lucas Liu +Luke Scott +Michael Woolnough +Nicola Peduzzi +Olivier Mengué +Paul Bonser +Runrioter Wung +Soroush Pour +Stan Putrya +Stanley Gunawan +Xiangyu Hu +Xiaobing Jiang +Xiuming Chen +Zhenye Xie + +# Organizations + +Barracuda Networks, Inc. +Google Inc. +Stripe Inc. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md new file mode 100644 index 0000000..6bcad7e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -0,0 +1,119 @@ +## Version 1.3 (2016-12-01) + +Changes: + + - Go 1.1 is no longer supported + - Use decimals fields in MySQL to format time types (#249) + - Buffer optimizations (#269) + - TLS ServerName defaults to the host (#283) + - Refactoring (#400, #410, #437) + - Adjusted documentation for second generation CloudSQL (#485) + - Documented DSN system var quoting rules (#502) + - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512) + +New Features: + + - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) + - Support for returning table alias on Columns() (#289, #359, #382) + - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490) + - Support for uint64 parameters with high bit set (#332, #345) + - Cleartext authentication plugin support (#327) + - Exported ParseDSN function and the Config struct (#403, #419, #429) + - Read / Write timeouts (#401) + - Support for JSON field type (#414) + - Support for multi-statements and multi-results (#411, #431) + - DSN parameter to set the driver-side max_allowed_packet value manually (#489) + - Native password authentication plugin support (#494, #524) + +Bugfixes: + + - Fixed handling of queries without columns and rows (#255) + - Fixed a panic when SetKeepAlive() failed (#298) + - Handle ERR packets while reading rows (#321) + - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349) + - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356) + - Actually zero out bytes in handshake response (#378) + - Fixed race condition in registering LOAD DATA INFILE handler (#383) + - Fixed tests with MySQL 5.7.9+ (#380) + - QueryUnescape TLS config names (#397) + - Fixed "broken pipe" error by writing to closed socket (#390) + - Fixed LOAD LOCAL DATA INFILE buffering (#424) + - Fixed parsing of floats into float64 when placeholders are used (#434) + - Fixed DSN tests with Go 1.7+ (#459) + - Handle ERR packets while waiting for EOF (#473) + - Invalidate connection on error while discarding additional results (#513) + - Allow terminating packets of length 0 (#516) + + +## Version 1.2 (2014-06-03) + +Changes: + + - We switched back to a "rolling release". `go get` installs the current master branch again + - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver + - Exported errors to allow easy checking from application code + - Enabled TCP Keepalives on TCP connections + - Optimized INFILE handling (better buffer size calculation, lazy init, ...) + - The DSN parser also checks for a missing separating slash + - Faster binary date / datetime to string formatting + - Also exported the MySQLWarning type + - mysqlConn.Close returns the first error encountered instead of ignoring all errors + - writePacket() automatically writes the packet size to the header + - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets + +New Features: + + - `RegisterDial` allows the usage of a custom dial function to establish the network connection + - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter + - Logging of critical errors is configurable with `SetLogger` + - Google CloudSQL support + +Bugfixes: + + - Allow more than 32 parameters in prepared statements + - Various old_password fixes + - Fixed TestConcurrent test to pass Go's race detection + - Fixed appendLengthEncodedInteger for large numbers + - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) + + +## Version 1.1 (2013-11-02) + +Changes: + + - Go-MySQL-Driver now requires Go 1.1 + - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore + - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors + - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` + - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. + - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries + - Optimized the buffer for reading + - stmt.Query now caches column metadata + - New Logo + - Changed the copyright header to include all contributors + - Improved the LOAD INFILE documentation + - The driver struct is now exported to make the driver directly accessible + - Refactored the driver tests + - Added more benchmarks and moved all to a separate file + - Other small refactoring + +New Features: + + - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure + - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs + - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used + +Bugfixes: + + - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification + - Convert to DB timezone when inserting `time.Time` + - Splitted packets (more than 16MB) are now merged correctly + - Fixed false positive `io.EOF` errors when the data was fully read + - Avoid panics on reuse of closed connections + - Fixed empty string producing false nil values + - Fixed sign byte for positive TIME fields + + +## Version 1.0 (2013-05-14) + +Initial Release diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md new file mode 100644 index 0000000..8fe16bc --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing Guidelines + +## Reporting Issues + +Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). + +## Contributing Code + +By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. +Don't forget to add yourself to the AUTHORS file. + +### Code Review + +Everyone is invited to review and comment on pull requests. +If it looks fine to you, comment with "LGTM" (Looks good to me). + +If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. + +Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". + +## Development Ideas + +If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE new file mode 100644 index 0000000..14e2f77 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md new file mode 100644 index 0000000..a16012f --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/README.md @@ -0,0 +1,443 @@ +# Go-MySQL-Driver + +A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package + +![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") + +--------------------------------------- + * [Features](#features) + * [Requirements](#requirements) + * [Installation](#installation) + * [Usage](#usage) + * [DSN (Data Source Name)](#dsn-data-source-name) + * [Password](#password) + * [Protocol](#protocol) + * [Address](#address) + * [Parameters](#parameters) + * [Examples](#examples) + * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) + * [time.Time support](#timetime-support) + * [Unicode support](#unicode-support) + * [Testing / Development](#testing--development) + * [License](#license) + +--------------------------------------- + +## Features + * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") + * Native Go implementation. No C-bindings, just pure Go + * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc) + * Automatic handling of broken connections + * Automatic Connection Pooling *(by database/sql package)* + * Supports queries larger than 16MB + * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support. + * Intelligent `LONG DATA` handling in prepared statements + * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support + * Optional `time.Time` parsing + * Optional placeholder interpolation + +## Requirements + * Go 1.2 or higher + * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) + +--------------------------------------- + +## Installation +Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell: +```bash +$ go get github.com/go-sql-driver/mysql +``` +Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`. + +## Usage +_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then. + +Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: +```go +import "database/sql" +import _ "github.com/go-sql-driver/mysql" + +db, err := sql.Open("mysql", "user:password@/dbname") +``` + +[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). + + +### DSN (Data Source Name) + +The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): +``` +[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] +``` + +A DSN in its fullest form: +``` +username:password@protocol(address)/dbname?param=value +``` + +Except for the databasename, all values are optional. So the minimal DSN is: +``` +/dbname +``` + +If you do not want to preselect a database, leave `dbname` empty: +``` +/ +``` +This has the same effect as an empty DSN string: +``` + +``` + +Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct. + +#### Password +Passwords can consist of any character. Escaping is **not** necessary. + +#### Protocol +See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available. +In general you should use an Unix domain socket if available and TCP otherwise for best performance. + +#### Address +For TCP and UDP networks, addresses have the form `host:port`. +If `host` is a literal IPv6 address, it must be enclosed in square brackets. +The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. + +For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. + +#### Parameters +*Parameters are case-sensitive!* + +Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. + +##### `allowAllFiles` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files. +[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) + +##### `allowCleartextPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. + +##### `allowNativePasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` +`allowNativePasswords=true` allows the usage of the mysql native password method. + +##### `allowOldPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` +`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). + +##### `charset` + +``` +Type: string +Valid Values: +Default: none +``` + +Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). + +Usage of the `charset` parameter is discouraged because it issues additional queries to the server. +Unless you need the fallback behavior, please use `collation` instead. + +##### `collation` + +``` +Type: string +Valid Values: +Default: utf8_general_ci +``` + +Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. + +A list of valid charsets for a server is retrievable with `SHOW COLLATION`. + +##### `clientFoundRows` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. + +##### `columnsWithAlias` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: + +``` +SELECT u.id FROM users as u +``` + +will return `u.id` instead of just `id` if `columnsWithAlias=true`. + +##### `interpolateParams` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. + +*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* + +##### `loc` + +``` +Type: string +Valid Values: +Default: UTC +``` + +Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details. + +Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. + +Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. + +##### `maxAllowedPacket` +``` +Type: decimal number +Default: 0 +``` + +Max packet size allowed in bytes. Use `maxAllowedPacket=0` to automatically fetch the `max_allowed_packet` variable from server. + +##### `multiStatements` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded. + +When `multiStatements` is used, `?` parameters must only be used in the first statement. + +##### `parseTime` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` + + +##### `readTimeout` + +``` +Type: decimal number +Default: 0 +``` + +I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. + +##### `strict` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`strict=true` enables a driver-side strict mode in which MySQL warnings are treated as errors. This mode should not be used in production as it may lead to data corruption in certain situations. + +A server-side strict mode, which is safe for production use, can be set via the [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html) system variable. + +By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. + +##### `timeout` + +``` +Type: decimal number +Default: OS default +``` + +*Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout). + +##### `tls` + +``` +Type: bool / string +Valid Values: true, false, skip-verify, +Default: false +``` + +`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). + +##### `writeTimeout` + +``` +Type: decimal number +Default: 0 +``` + +I/O write timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. + + +##### System Variables + +Any other parameters are interpreted as system variables: + * `=`: `SET =` + * `=`: `SET =` + * `=%27%27`: `SET =''` + +Rules: +* The values for string variables must be quoted with ' +* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed! + (which implies values of string variables must be wrapped with `%27`) + +Examples: + * `autocommit=1`: `SET autocommit=1` + * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'` + * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'` + + +#### Examples +``` +user@unix(/path/to/socket)/dbname +``` + +``` +root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local +``` + +``` +user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true +``` + +Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html): +``` +user:password@/dbname?sql_mode=TRADITIONAL +``` + +TCP via IPv6: +``` +user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci +``` + +TCP on a remote host, e.g. Amazon RDS: +``` +id:password@tcp(your-amazonaws-uri.com:3306)/dbname +``` + +Google Cloud SQL on App Engine (First Generation MySQL Server): +``` +user@cloudsql(project-id:instance-name)/dbname +``` + +Google Cloud SQL on App Engine (Second Generation MySQL Server): +``` +user@cloudsql(project-id:regionname:instance-name)/dbname +``` + +TCP using default port (3306) on localhost: +``` +user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped +``` + +Use the default protocol (tcp) and host (localhost:3306): +``` +user:password@/dbname +``` + +No Database preselected: +``` +user:password@/ +``` + +### `LOAD DATA LOCAL INFILE` support +For this feature you need direct access to the package. Therefore you must change the import path (no `_`): +```go +import "github.com/go-sql-driver/mysql" +``` + +Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). + +To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore. + +See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. + + +### `time.Time` support +The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm. + +However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter. + +**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). + +Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`. + + +### Unicode support +Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default. + +Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. + +Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. + +See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. + + +## Testing / Development +To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. + +Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. +If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). + +See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details. + +--------------------------------------- + +## License +Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +Mozilla summarizes the license scope as follows: +> MPL: The copyleft applies to any files containing MPLed code. + + +That means: + * You can **use** the **unchanged** source code both in private and commercially + * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0) + * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged** + +Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license. + +You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") + diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/appengine.go new file mode 100644 index 0000000..565614e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/appengine.go @@ -0,0 +1,19 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build appengine + +package mysql + +import ( + "appengine/cloudsql" +) + +func init() { + RegisterDial("cloudsql", cloudsql.Dial) +} diff --git a/vendor/github.com/go-sql-driver/mysql/benchmark_test.go b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go new file mode 100644 index 0000000..7da833a --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go @@ -0,0 +1,246 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "math" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +type TB testing.B + +func (tb *TB) check(err error) { + if err != nil { + tb.Fatal(err) + } +} + +func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB { + tb.check(err) + return db +} + +func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows { + tb.check(err) + return rows +} + +func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt { + tb.check(err) + return stmt +} + +func initDB(b *testing.B, queries ...string) *sql.DB { + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + for _, query := range queries { + if _, err := db.Exec(query); err != nil { + if w, ok := err.(MySQLWarnings); ok { + b.Logf("warning on %q: %v", query, w) + } else { + b.Fatalf("error on %q: %v", query, err) + } + } + } + return db +} + +const concurrencyLevel = 10 + +func BenchmarkQuery(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := initDB(b, + "DROP TABLE IF EXISTS foo", + "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))", + `INSERT INTO foo VALUES (1, "one")`, + `INSERT INTO foo VALUES (2, "two")`, + ) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + var got string + tb.check(stmt.QueryRow(1).Scan(&got)) + if got != "one" { + b.Errorf("query = %q; want one", got) + wg.Done() + return + } + } + }() + } +} + +func BenchmarkExec(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := tb.checkDB(sql.Open("mysql", dsn)) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("DO 1")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + if _, err := stmt.Exec(); err != nil { + b.Fatal(err.Error()) + } + } + }() + } +} + +// data, but no db writes +var roundtripSample []byte + +func initRoundtripBenchmarks() ([]byte, int, int) { + if roundtripSample == nil { + roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024)) + } + return roundtripSample, 16, len(roundtripSample) +} + +func BenchmarkRoundtripTxt(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + sampleString := string(sample) + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + b.StartTimer() + var result string + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sampleString[0:length] + rows := tb.checkRows(db.Query(`SELECT "` + test + `"`)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if result != test { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkRoundtripBin(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + stmt := tb.checkStmt(db.Prepare("SELECT ?")) + defer stmt.Close() + b.StartTimer() + var result sql.RawBytes + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sample[0:length] + rows := tb.checkRows(stmt.Query(test)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if !bytes.Equal(result, test) { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkInterpolation(b *testing.B) { + mc := &mysqlConn{ + cfg: &Config{ + InterpolateParams: true, + Loc: time.UTC, + }, + maxAllowedPacket: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + buf: newBuffer(nil), + } + + args := []driver.Value{ + int64(42424242), + float64(math.Pi), + false, + time.Unix(1423411542, 807015000), + []byte("bytes containing special chars ' \" \a \x00"), + "string containing special chars ' \" \a \x00", + } + q := "SELECT ?, ?, ?, ?, ?, ?" + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := mc.interpolateParams(q, args) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go new file mode 100644 index 0000000..2001fea --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/buffer.go @@ -0,0 +1,147 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "io" + "net" + "time" +) + +const defaultBufSize = 4096 + +// A buffer which is used for both reading and writing. +// This is possible since communication on each connection is synchronous. +// In other words, we can't write and read simultaneously on the same connection. +// The buffer is similar to bufio.Reader / Writer but zero-copy-ish +// Also highly optimized for this particular use case. +type buffer struct { + buf []byte + nc net.Conn + idx int + length int + timeout time.Duration +} + +func newBuffer(nc net.Conn) buffer { + var b [defaultBufSize]byte + return buffer{ + buf: b[:], + nc: nc, + } +} + +// fill reads into the buffer until at least _need_ bytes are in it +func (b *buffer) fill(need int) error { + n := b.length + + // move existing data to the beginning + if n > 0 && b.idx > 0 { + copy(b.buf[0:n], b.buf[b.idx:]) + } + + // grow buffer if necessary + // TODO: let the buffer shrink again at some point + // Maybe keep the org buf slice and swap back? + if need > len(b.buf) { + // Round up to the next multiple of the default size + newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) + copy(newBuf, b.buf) + b.buf = newBuf + } + + b.idx = 0 + + for { + if b.timeout > 0 { + if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil { + return err + } + } + + nn, err := b.nc.Read(b.buf[n:]) + n += nn + + switch err { + case nil: + if n < need { + continue + } + b.length = n + return nil + + case io.EOF: + if n >= need { + b.length = n + return nil + } + return io.ErrUnexpectedEOF + + default: + return err + } + } +} + +// returns next N bytes from buffer. +// The returned slice is only guaranteed to be valid until the next read +func (b *buffer) readNext(need int) ([]byte, error) { + if b.length < need { + // refill + if err := b.fill(need); err != nil { + return nil, err + } + } + + offset := b.idx + b.idx += need + b.length -= need + return b.buf[offset:b.idx], nil +} + +// returns a buffer with the requested size. +// If possible, a slice from the existing buffer is returned. +// Otherwise a bigger buffer is made. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeBuffer(length int) []byte { + if b.length > 0 { + return nil + } + + // test (cheap) general case first + if length <= defaultBufSize || length <= cap(b.buf) { + return b.buf[:length] + } + + if length < maxPacketSize { + b.buf = make([]byte, length) + return b.buf + } + return make([]byte, length) +} + +// shortcut which can be used if the requested buffer is guaranteed to be +// smaller than defaultBufSize +// Only one buffer (total) can be used at a time. +func (b *buffer) takeSmallBuffer(length int) []byte { + if b.length == 0 { + return b.buf[:length] + } + return nil +} + +// takeCompleteBuffer returns the complete existing buffer. +// This can be used if the necessary buffer size is unknown. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeCompleteBuffer() []byte { + if b.length == 0 { + return b.buf + } + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go new file mode 100644 index 0000000..82079cf --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/collations.go @@ -0,0 +1,250 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const defaultCollation = "utf8_general_ci" + +// A list of available collations mapped to the internal ID. +// To update this map use the following MySQL query: +// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS +var collations = map[string]byte{ + "big5_chinese_ci": 1, + "latin2_czech_cs": 2, + "dec8_swedish_ci": 3, + "cp850_general_ci": 4, + "latin1_german1_ci": 5, + "hp8_english_ci": 6, + "koi8r_general_ci": 7, + "latin1_swedish_ci": 8, + "latin2_general_ci": 9, + "swe7_swedish_ci": 10, + "ascii_general_ci": 11, + "ujis_japanese_ci": 12, + "sjis_japanese_ci": 13, + "cp1251_bulgarian_ci": 14, + "latin1_danish_ci": 15, + "hebrew_general_ci": 16, + "tis620_thai_ci": 18, + "euckr_korean_ci": 19, + "latin7_estonian_cs": 20, + "latin2_hungarian_ci": 21, + "koi8u_general_ci": 22, + "cp1251_ukrainian_ci": 23, + "gb2312_chinese_ci": 24, + "greek_general_ci": 25, + "cp1250_general_ci": 26, + "latin2_croatian_ci": 27, + "gbk_chinese_ci": 28, + "cp1257_lithuanian_ci": 29, + "latin5_turkish_ci": 30, + "latin1_german2_ci": 31, + "armscii8_general_ci": 32, + "utf8_general_ci": 33, + "cp1250_czech_cs": 34, + "ucs2_general_ci": 35, + "cp866_general_ci": 36, + "keybcs2_general_ci": 37, + "macce_general_ci": 38, + "macroman_general_ci": 39, + "cp852_general_ci": 40, + "latin7_general_ci": 41, + "latin7_general_cs": 42, + "macce_bin": 43, + "cp1250_croatian_ci": 44, + "utf8mb4_general_ci": 45, + "utf8mb4_bin": 46, + "latin1_bin": 47, + "latin1_general_ci": 48, + "latin1_general_cs": 49, + "cp1251_bin": 50, + "cp1251_general_ci": 51, + "cp1251_general_cs": 52, + "macroman_bin": 53, + "utf16_general_ci": 54, + "utf16_bin": 55, + "utf16le_general_ci": 56, + "cp1256_general_ci": 57, + "cp1257_bin": 58, + "cp1257_general_ci": 59, + "utf32_general_ci": 60, + "utf32_bin": 61, + "utf16le_bin": 62, + "binary": 63, + "armscii8_bin": 64, + "ascii_bin": 65, + "cp1250_bin": 66, + "cp1256_bin": 67, + "cp866_bin": 68, + "dec8_bin": 69, + "greek_bin": 70, + "hebrew_bin": 71, + "hp8_bin": 72, + "keybcs2_bin": 73, + "koi8r_bin": 74, + "koi8u_bin": 75, + "latin2_bin": 77, + "latin5_bin": 78, + "latin7_bin": 79, + "cp850_bin": 80, + "cp852_bin": 81, + "swe7_bin": 82, + "utf8_bin": 83, + "big5_bin": 84, + "euckr_bin": 85, + "gb2312_bin": 86, + "gbk_bin": 87, + "sjis_bin": 88, + "tis620_bin": 89, + "ucs2_bin": 90, + "ujis_bin": 91, + "geostd8_general_ci": 92, + "geostd8_bin": 93, + "latin1_spanish_ci": 94, + "cp932_japanese_ci": 95, + "cp932_bin": 96, + "eucjpms_japanese_ci": 97, + "eucjpms_bin": 98, + "cp1250_polish_ci": 99, + "utf16_unicode_ci": 101, + "utf16_icelandic_ci": 102, + "utf16_latvian_ci": 103, + "utf16_romanian_ci": 104, + "utf16_slovenian_ci": 105, + "utf16_polish_ci": 106, + "utf16_estonian_ci": 107, + "utf16_spanish_ci": 108, + "utf16_swedish_ci": 109, + "utf16_turkish_ci": 110, + "utf16_czech_ci": 111, + "utf16_danish_ci": 112, + "utf16_lithuanian_ci": 113, + "utf16_slovak_ci": 114, + "utf16_spanish2_ci": 115, + "utf16_roman_ci": 116, + "utf16_persian_ci": 117, + "utf16_esperanto_ci": 118, + "utf16_hungarian_ci": 119, + "utf16_sinhala_ci": 120, + "utf16_german2_ci": 121, + "utf16_croatian_ci": 122, + "utf16_unicode_520_ci": 123, + "utf16_vietnamese_ci": 124, + "ucs2_unicode_ci": 128, + "ucs2_icelandic_ci": 129, + "ucs2_latvian_ci": 130, + "ucs2_romanian_ci": 131, + "ucs2_slovenian_ci": 132, + "ucs2_polish_ci": 133, + "ucs2_estonian_ci": 134, + "ucs2_spanish_ci": 135, + "ucs2_swedish_ci": 136, + "ucs2_turkish_ci": 137, + "ucs2_czech_ci": 138, + "ucs2_danish_ci": 139, + "ucs2_lithuanian_ci": 140, + "ucs2_slovak_ci": 141, + "ucs2_spanish2_ci": 142, + "ucs2_roman_ci": 143, + "ucs2_persian_ci": 144, + "ucs2_esperanto_ci": 145, + "ucs2_hungarian_ci": 146, + "ucs2_sinhala_ci": 147, + "ucs2_german2_ci": 148, + "ucs2_croatian_ci": 149, + "ucs2_unicode_520_ci": 150, + "ucs2_vietnamese_ci": 151, + "ucs2_general_mysql500_ci": 159, + "utf32_unicode_ci": 160, + "utf32_icelandic_ci": 161, + "utf32_latvian_ci": 162, + "utf32_romanian_ci": 163, + "utf32_slovenian_ci": 164, + "utf32_polish_ci": 165, + "utf32_estonian_ci": 166, + "utf32_spanish_ci": 167, + "utf32_swedish_ci": 168, + "utf32_turkish_ci": 169, + "utf32_czech_ci": 170, + "utf32_danish_ci": 171, + "utf32_lithuanian_ci": 172, + "utf32_slovak_ci": 173, + "utf32_spanish2_ci": 174, + "utf32_roman_ci": 175, + "utf32_persian_ci": 176, + "utf32_esperanto_ci": 177, + "utf32_hungarian_ci": 178, + "utf32_sinhala_ci": 179, + "utf32_german2_ci": 180, + "utf32_croatian_ci": 181, + "utf32_unicode_520_ci": 182, + "utf32_vietnamese_ci": 183, + "utf8_unicode_ci": 192, + "utf8_icelandic_ci": 193, + "utf8_latvian_ci": 194, + "utf8_romanian_ci": 195, + "utf8_slovenian_ci": 196, + "utf8_polish_ci": 197, + "utf8_estonian_ci": 198, + "utf8_spanish_ci": 199, + "utf8_swedish_ci": 200, + "utf8_turkish_ci": 201, + "utf8_czech_ci": 202, + "utf8_danish_ci": 203, + "utf8_lithuanian_ci": 204, + "utf8_slovak_ci": 205, + "utf8_spanish2_ci": 206, + "utf8_roman_ci": 207, + "utf8_persian_ci": 208, + "utf8_esperanto_ci": 209, + "utf8_hungarian_ci": 210, + "utf8_sinhala_ci": 211, + "utf8_german2_ci": 212, + "utf8_croatian_ci": 213, + "utf8_unicode_520_ci": 214, + "utf8_vietnamese_ci": 215, + "utf8_general_mysql500_ci": 223, + "utf8mb4_unicode_ci": 224, + "utf8mb4_icelandic_ci": 225, + "utf8mb4_latvian_ci": 226, + "utf8mb4_romanian_ci": 227, + "utf8mb4_slovenian_ci": 228, + "utf8mb4_polish_ci": 229, + "utf8mb4_estonian_ci": 230, + "utf8mb4_spanish_ci": 231, + "utf8mb4_swedish_ci": 232, + "utf8mb4_turkish_ci": 233, + "utf8mb4_czech_ci": 234, + "utf8mb4_danish_ci": 235, + "utf8mb4_lithuanian_ci": 236, + "utf8mb4_slovak_ci": 237, + "utf8mb4_spanish2_ci": 238, + "utf8mb4_roman_ci": 239, + "utf8mb4_persian_ci": 240, + "utf8mb4_esperanto_ci": 241, + "utf8mb4_hungarian_ci": 242, + "utf8mb4_sinhala_ci": 243, + "utf8mb4_german2_ci": 244, + "utf8mb4_croatian_ci": 245, + "utf8mb4_unicode_520_ci": 246, + "utf8mb4_vietnamese_ci": 247, +} + +// A blacklist of collations which is unsafe to interpolate parameters. +// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. +var unsafeCollations = map[string]bool{ + "big5_chinese_ci": true, + "sjis_japanese_ci": true, + "gbk_chinese_ci": true, + "big5_bin": true, + "gb2312_bin": true, + "gbk_bin": true, + "sjis_bin": true, + "cp932_japanese_ci": true, + "cp932_bin": true, +} diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go new file mode 100644 index 0000000..d82c728 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/connection.go @@ -0,0 +1,377 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "net" + "strconv" + "strings" + "time" +) + +type mysqlConn struct { + buf buffer + netConn net.Conn + affectedRows uint64 + insertId uint64 + cfg *Config + maxAllowedPacket int + maxWriteSize int + writeTimeout time.Duration + flags clientFlag + status statusFlag + sequence uint8 + parseTime bool + strict bool +} + +// Handles parameters set in DSN after the connection is established +func (mc *mysqlConn) handleParams() (err error) { + for param, val := range mc.cfg.Params { + switch param { + // Charset + case "charset": + charsets := strings.Split(val, ",") + for i := range charsets { + // ignore errors here - a charset may not exist + err = mc.exec("SET NAMES " + charsets[i]) + if err == nil { + break + } + } + if err != nil { + return + } + + // System Vars + default: + err = mc.exec("SET " + param + "=" + val + "") + if err != nil { + return + } + } + } + + return +} + +func (mc *mysqlConn) Begin() (driver.Tx, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + err := mc.exec("START TRANSACTION") + if err == nil { + return &mysqlTx{mc}, err + } + + return nil, err +} + +func (mc *mysqlConn) Close() (err error) { + // Makes Close idempotent + if mc.netConn != nil { + err = mc.writeCommandPacket(comQuit) + } + + mc.cleanup() + + return +} + +// Closes the network connection and unsets internal variables. Do not call this +// function after successfully authentication, call Close instead. This function +// is called before auth or on auth failure because MySQL will have already +// closed the network connection. +func (mc *mysqlConn) cleanup() { + // Makes cleanup idempotent + if mc.netConn != nil { + if err := mc.netConn.Close(); err != nil { + errLog.Print(err) + } + mc.netConn = nil + } + mc.cfg = nil + mc.buf.nc = nil +} + +func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := mc.writeCommandPacketStr(comStmtPrepare, query) + if err != nil { + return nil, err + } + + stmt := &mysqlStmt{ + mc: mc, + } + + // Read Result + columnCount, err := stmt.readPrepareResultPacket() + if err == nil { + if stmt.paramCount > 0 { + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if columnCount > 0 { + err = mc.readUntilEOF() + } + } + + return stmt, err +} + +func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { + // Number of ? should be same to len(args) + if strings.Count(query, "?") != len(args) { + return "", driver.ErrSkip + } + + buf := mc.buf.takeCompleteBuffer() + if buf == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return "", driver.ErrBadConn + } + buf = buf[:0] + argPos := 0 + + for i := 0; i < len(query); i++ { + q := strings.IndexByte(query[i:], '?') + if q == -1 { + buf = append(buf, query[i:]...) + break + } + buf = append(buf, query[i:i+q]...) + i += q + + arg := args[argPos] + argPos++ + + if arg == nil { + buf = append(buf, "NULL"...) + continue + } + + switch v := arg.(type) { + case int64: + buf = strconv.AppendInt(buf, v, 10) + case float64: + buf = strconv.AppendFloat(buf, v, 'g', -1, 64) + case bool: + if v { + buf = append(buf, '1') + } else { + buf = append(buf, '0') + } + case time.Time: + if v.IsZero() { + buf = append(buf, "'0000-00-00'"...) + } else { + v := v.In(mc.cfg.Loc) + v = v.Add(time.Nanosecond * 500) // To round under microsecond + year := v.Year() + year100 := year / 100 + year1 := year % 100 + month := v.Month() + day := v.Day() + hour := v.Hour() + minute := v.Minute() + second := v.Second() + micro := v.Nanosecond() / 1000 + + buf = append(buf, []byte{ + '\'', + digits10[year100], digits01[year100], + digits10[year1], digits01[year1], + '-', + digits10[month], digits01[month], + '-', + digits10[day], digits01[day], + ' ', + digits10[hour], digits01[hour], + ':', + digits10[minute], digits01[minute], + ':', + digits10[second], digits01[second], + }...) + + if micro != 0 { + micro10000 := micro / 10000 + micro100 := micro / 100 % 100 + micro1 := micro % 100 + buf = append(buf, []byte{ + '.', + digits10[micro10000], digits01[micro10000], + digits10[micro100], digits01[micro100], + digits10[micro1], digits01[micro1], + }...) + } + buf = append(buf, '\'') + } + case []byte: + if v == nil { + buf = append(buf, "NULL"...) + } else { + buf = append(buf, "_binary'"...) + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeBytesBackslash(buf, v) + } else { + buf = escapeBytesQuotes(buf, v) + } + buf = append(buf, '\'') + } + case string: + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeStringBackslash(buf, v) + } else { + buf = escapeStringQuotes(buf, v) + } + buf = append(buf, '\'') + default: + return "", driver.ErrSkip + } + + if len(buf)+4 > mc.maxAllowedPacket { + return "", driver.ErrSkip + } + } + if argPos != len(args) { + return "", driver.ErrSkip + } + return string(buf), nil +} + +func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.InterpolateParams { + return nil, driver.ErrSkip + } + // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + args = nil + } + mc.affectedRows = 0 + mc.insertId = 0 + + err := mc.exec(query) + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, err + } + return nil, err +} + +// Internal function to execute commands +func (mc *mysqlConn) exec(query string) error { + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err != nil { + return err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil && resLen > 0 { + if err = mc.readUntilEOF(); err != nil { + return err + } + + err = mc.readUntilEOF() + } + + return err +} + +func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.InterpolateParams { + return nil, driver.ErrSkip + } + // try client-side prepare to reduce roundtrip + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + args = nil + } + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err == nil { + // Read Result + var resLen int + resLen, err = mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen == 0 { + // no columns, no more data + return emptyRows{}, nil + } + // Columns + rows.columns, err = mc.readColumns(resLen) + return rows, err + } + } + return nil, err +} + +// Gets the value of the given MySQL System Variable +// The returned byte slice is only valid until the next read +func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { + // Send command + if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { + return nil, err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + rows.columns = []mysqlField{{fieldType: fieldTypeVarChar}} + + if resLen > 0 { + // Columns + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + dest := make([]driver.Value, resLen) + if err = rows.readRow(dest); err == nil { + return dest[0].([]byte), mc.readUntilEOF() + } + } + return nil, err +} diff --git a/vendor/github.com/go-sql-driver/mysql/connection_test.go b/vendor/github.com/go-sql-driver/mysql/connection_test.go new file mode 100644 index 0000000..65325f1 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/connection_test.go @@ -0,0 +1,67 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "testing" +) + +func TestInterpolateParams(t *testing.T) { + mc := &mysqlConn{ + buf: newBuffer(nil), + maxAllowedPacket: maxPacketSize, + cfg: &Config{ + InterpolateParams: true, + }, + } + + q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42), "gopher"}) + if err != nil { + t.Errorf("Expected err=nil, got %#v", err) + return + } + expected := `SELECT 42+'gopher'` + if q != expected { + t.Errorf("Expected: %q\nGot: %q", expected, q) + } +} + +func TestInterpolateParamsTooManyPlaceholders(t *testing.T) { + mc := &mysqlConn{ + buf: newBuffer(nil), + maxAllowedPacket: maxPacketSize, + cfg: &Config{ + InterpolateParams: true, + }, + } + + q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42)}) + if err != driver.ErrSkip { + t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q) + } +} + +// We don't support placeholder in string literal for now. +// https://github.com/go-sql-driver/mysql/pull/490 +func TestInterpolateParamsPlaceholderInString(t *testing.T) { + mc := &mysqlConn{ + buf: newBuffer(nil), + maxAllowedPacket: maxPacketSize, + cfg: &Config{ + InterpolateParams: true, + }, + } + + q, err := mc.interpolateParams("SELECT 'abc?xyz',?", []driver.Value{int64(42)}) + // When InterpolateParams support string literal, this should return `"SELECT 'abc?xyz', 42` + if err != driver.ErrSkip { + t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q) + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go new file mode 100644 index 0000000..88cfff3 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/const.go @@ -0,0 +1,163 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const ( + minProtocolVersion byte = 10 + maxPacketSize = 1<<24 - 1 + timeFormat = "2006-01-02 15:04:05.999999" +) + +// MySQL constants documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +const ( + iOK byte = 0x00 + iLocalInFile byte = 0xfb + iEOF byte = 0xfe + iERR byte = 0xff +) + +// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags +type clientFlag uint32 + +const ( + clientLongPassword clientFlag = 1 << iota + clientFoundRows + clientLongFlag + clientConnectWithDB + clientNoSchema + clientCompress + clientODBC + clientLocalFiles + clientIgnoreSpace + clientProtocol41 + clientInteractive + clientSSL + clientIgnoreSIGPIPE + clientTransactions + clientReserved + clientSecureConn + clientMultiStatements + clientMultiResults + clientPSMultiResults + clientPluginAuth + clientConnectAttrs + clientPluginAuthLenEncClientData + clientCanHandleExpiredPasswords + clientSessionTrack + clientDeprecateEOF +) + +const ( + comQuit byte = iota + 1 + comInitDB + comQuery + comFieldList + comCreateDB + comDropDB + comRefresh + comShutdown + comStatistics + comProcessInfo + comConnect + comProcessKill + comDebug + comPing + comTime + comDelayedInsert + comChangeUser + comBinlogDump + comTableDump + comConnectOut + comRegisterSlave + comStmtPrepare + comStmtExecute + comStmtSendLongData + comStmtClose + comStmtReset + comSetOption + comStmtFetch +) + +// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType +const ( + fieldTypeDecimal byte = iota + fieldTypeTiny + fieldTypeShort + fieldTypeLong + fieldTypeFloat + fieldTypeDouble + fieldTypeNULL + fieldTypeTimestamp + fieldTypeLongLong + fieldTypeInt24 + fieldTypeDate + fieldTypeTime + fieldTypeDateTime + fieldTypeYear + fieldTypeNewDate + fieldTypeVarChar + fieldTypeBit +) +const ( + fieldTypeJSON byte = iota + 0xf5 + fieldTypeNewDecimal + fieldTypeEnum + fieldTypeSet + fieldTypeTinyBLOB + fieldTypeMediumBLOB + fieldTypeLongBLOB + fieldTypeBLOB + fieldTypeVarString + fieldTypeString + fieldTypeGeometry +) + +type fieldFlag uint16 + +const ( + flagNotNULL fieldFlag = 1 << iota + flagPriKey + flagUniqueKey + flagMultipleKey + flagBLOB + flagUnsigned + flagZeroFill + flagBinary + flagEnum + flagAutoIncrement + flagTimestamp + flagSet + flagUnknown1 + flagUnknown2 + flagUnknown3 + flagUnknown4 +) + +// http://dev.mysql.com/doc/internals/en/status-flags.html +type statusFlag uint16 + +const ( + statusInTrans statusFlag = 1 << iota + statusInAutocommit + statusReserved // Not in documentation + statusMoreResultsExists + statusNoGoodIndexUsed + statusNoIndexUsed + statusCursorExists + statusLastRowSent + statusDbDropped + statusNoBackslashEscapes + statusMetadataChanged + statusQueryWasSlow + statusPsOutParams + statusInTransReadonly + statusSessionStateChanged +) diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go new file mode 100644 index 0000000..0022d1f --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/driver.go @@ -0,0 +1,183 @@ +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// Package mysql provides a MySQL driver for Go's database/sql package +// +// The driver should be used via the database/sql package: +// +// import "database/sql" +// import _ "github.com/go-sql-driver/mysql" +// +// db, err := sql.Open("mysql", "user:password@/dbname") +// +// See https://github.com/go-sql-driver/mysql#usage for details +package mysql + +import ( + "database/sql" + "database/sql/driver" + "net" +) + +// MySQLDriver is exported to make the driver directly accessible. +// In general the driver is used via the database/sql package. +type MySQLDriver struct{} + +// DialFunc is a function which can be used to establish the network connection. +// Custom dial functions must be registered with RegisterDial +type DialFunc func(addr string) (net.Conn, error) + +var dials map[string]DialFunc + +// RegisterDial registers a custom dial function. It can then be used by the +// network address mynet(addr), where mynet is the registered new network. +// addr is passed as a parameter to the dial function. +func RegisterDial(net string, dial DialFunc) { + if dials == nil { + dials = make(map[string]DialFunc) + } + dials[net] = dial +} + +// Open new Connection. +// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how +// the DSN string is formated +func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { + var err error + + // New mysqlConn + mc := &mysqlConn{ + maxAllowedPacket: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + } + mc.cfg, err = ParseDSN(dsn) + if err != nil { + return nil, err + } + mc.parseTime = mc.cfg.ParseTime + mc.strict = mc.cfg.Strict + + // Connect to Server + if dial, ok := dials[mc.cfg.Net]; ok { + mc.netConn, err = dial(mc.cfg.Addr) + } else { + nd := net.Dialer{Timeout: mc.cfg.Timeout} + mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr) + } + if err != nil { + return nil, err + } + + // Enable TCP Keepalives on TCP connections + if tc, ok := mc.netConn.(*net.TCPConn); ok { + if err := tc.SetKeepAlive(true); err != nil { + // Don't send COM_QUIT before handshake. + mc.netConn.Close() + mc.netConn = nil + return nil, err + } + } + + mc.buf = newBuffer(mc.netConn) + + // Set I/O timeouts + mc.buf.timeout = mc.cfg.ReadTimeout + mc.writeTimeout = mc.cfg.WriteTimeout + + // Reading Handshake Initialization Packet + cipher, err := mc.readInitPacket() + if err != nil { + mc.cleanup() + return nil, err + } + + // Send Client Authentication Packet + if err = mc.writeAuthPacket(cipher); err != nil { + mc.cleanup() + return nil, err + } + + // Handle response to auth packet, switch methods if possible + if err = handleAuthResult(mc, cipher); err != nil { + // Authentication failed and MySQL has already closed the connection + // (https://dev.mysql.com/doc/internals/en/authentication-fails.html). + // Do not send COM_QUIT, just cleanup and return the error. + mc.cleanup() + return nil, err + } + + if mc.cfg.MaxAllowedPacket > 0 { + mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket + } else { + // Get max allowed packet size + maxap, err := mc.getSystemVar("max_allowed_packet") + if err != nil { + mc.Close() + return nil, err + } + mc.maxAllowedPacket = stringToInt(maxap) - 1 + } + if mc.maxAllowedPacket < maxPacketSize { + mc.maxWriteSize = mc.maxAllowedPacket + } + + // Handle DSN Params + err = mc.handleParams() + if err != nil { + mc.Close() + return nil, err + } + + return mc, nil +} + +func handleAuthResult(mc *mysqlConn, oldCipher []byte) error { + // Read Result Packet + cipher, err := mc.readResultOK() + if err == nil { + return nil // auth successful + } + + if mc.cfg == nil { + return err // auth failed and retry not possible + } + + // Retry auth if configured to do so. + if mc.cfg.AllowOldPasswords && err == ErrOldPassword { + // Retry with old authentication method. Note: there are edge cases + // where this should work but doesn't; this is currently "wontfix": + // https://github.com/go-sql-driver/mysql/issues/184 + + // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is + // sent and we have to keep using the cipher sent in the init packet. + if cipher == nil { + cipher = oldCipher + } + + if err = mc.writeOldAuthPacket(cipher); err != nil { + return err + } + _, err = mc.readResultOK() + } else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword { + // Retry with clear text password for + // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html + // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html + if err = mc.writeClearAuthPacket(); err != nil { + return err + } + _, err = mc.readResultOK() + } else if mc.cfg.AllowNativePasswords && err == ErrNativePassword { + if err = mc.writeNativeAuthPacket(cipher); err != nil { + return err + } + _, err = mc.readResultOK() + } + return err +} + +func init() { + sql.Register("mysql", &MySQLDriver{}) +} diff --git a/vendor/github.com/go-sql-driver/mysql/driver_test.go b/vendor/github.com/go-sql-driver/mysql/driver_test.go new file mode 100644 index 0000000..78e68f5 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/driver_test.go @@ -0,0 +1,1904 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/url" + "os" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +var ( + user string + pass string + prot string + addr string + dbname string + dsn string + netAddr string + available bool +) + +var ( + tDate = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC) + sDate = "2012-06-14" + tDateTime = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC) + sDateTime = "2011-11-20 21:27:37" + tDate0 = time.Time{} + sDate0 = "0000-00-00" + sDateTime0 = "0000-00-00 00:00:00" +) + +// See https://github.com/go-sql-driver/mysql/wiki/Testing +func init() { + // get environment variables + env := func(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + return defaultValue + } + user = env("MYSQL_TEST_USER", "root") + pass = env("MYSQL_TEST_PASS", "") + prot = env("MYSQL_TEST_PROT", "tcp") + addr = env("MYSQL_TEST_ADDR", "localhost:3306") + dbname = env("MYSQL_TEST_DBNAME", "gotest") + netAddr = fmt.Sprintf("%s(%s)", prot, addr) + dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s&strict=true", user, pass, netAddr, dbname) + c, err := net.Dial(prot, addr) + if err == nil { + available = true + c.Close() + } +} + +type DBTest struct { + *testing.T + db *sql.DB +} + +func runTestsWithMultiStatement(t *testing.T, dsn string, tests ...func(dbt *DBTest)) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + dsn += "&multiStatements=true" + var db *sql.DB + if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation { + db, err = sql.Open("mysql", dsn) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db.Close() + } + + dbt := &DBTest{t, db} + for _, test := range tests { + test(dbt) + dbt.db.Exec("DROP TABLE IF EXISTS test") + } +} + +func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + db, err := sql.Open("mysql", dsn) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db.Close() + + db.Exec("DROP TABLE IF EXISTS test") + + dsn2 := dsn + "&interpolateParams=true" + var db2 *sql.DB + if _, err := ParseDSN(dsn2); err != errInvalidDSNUnsafeCollation { + db2, err = sql.Open("mysql", dsn2) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db2.Close() + } + + dsn3 := dsn + "&multiStatements=true" + var db3 *sql.DB + if _, err := ParseDSN(dsn3); err != errInvalidDSNUnsafeCollation { + db3, err = sql.Open("mysql", dsn3) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db3.Close() + } + + dbt := &DBTest{t, db} + dbt2 := &DBTest{t, db2} + dbt3 := &DBTest{t, db3} + for _, test := range tests { + test(dbt) + dbt.db.Exec("DROP TABLE IF EXISTS test") + if db2 != nil { + test(dbt2) + dbt2.db.Exec("DROP TABLE IF EXISTS test") + } + if db3 != nil { + test(dbt3) + dbt3.db.Exec("DROP TABLE IF EXISTS test") + } + } +} + +func (dbt *DBTest) fail(method, query string, err error) { + if len(query) > 300 { + query = "[query too large to print]" + } + dbt.Fatalf("error on %s %s: %s", method, query, err.Error()) +} + +func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) { + res, err := dbt.db.Exec(query, args...) + if err != nil { + dbt.fail("exec", query, err) + } + return res +} + +func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) { + rows, err := dbt.db.Query(query, args...) + if err != nil { + dbt.fail("query", query, err) + } + return rows +} + +func TestEmptyQuery(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // just a comment, no query + rows := dbt.mustQuery("--") + // will hang before #255 + if rows.Next() { + dbt.Errorf("next on rows must be false") + } + }) +} + +func TestCRUD(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // Create Table + dbt.mustExec("CREATE TABLE test (value BOOL)") + + // Test for unexpected data + var out bool + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + dbt.Error("unexpected data in empty table") + } + + // Create Data + res := dbt.mustExec("INSERT INTO test VALUES (1)") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + id, err := res.LastInsertId() + if err != nil { + dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error()) + } + if id != 0 { + dbt.Fatalf("expected InsertId 0, got %d", id) + } + + // Read + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if true != out { + dbt.Errorf("true != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Update + res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + // Check Update + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if false != out { + dbt.Errorf("false != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Delete + res = dbt.mustExec("DELETE FROM test WHERE value = ?", false) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + // Check for unexpected rows + res = dbt.mustExec("DELETE FROM test") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 0 { + dbt.Fatalf("expected 0 affected row, got %d", count) + } + }) +} + +func TestMultiQuery(t *testing.T) { + runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) { + // Create Table + dbt.mustExec("CREATE TABLE `test` (`id` int(11) NOT NULL, `value` int(11) NOT NULL) ") + + // Create Data + res := dbt.mustExec("INSERT INTO test VALUES (1, 1)") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + // Update + res = dbt.mustExec("UPDATE test SET value = 3 WHERE id = 1; UPDATE test SET value = 4 WHERE id = 1; UPDATE test SET value = 5 WHERE id = 1;") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + // Read + var out int + rows := dbt.mustQuery("SELECT value FROM test WHERE id=1;") + if rows.Next() { + rows.Scan(&out) + if 5 != out { + dbt.Errorf("5 != %d", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + }) +} + +func TestInt(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"} + in := int64(42) + var out int64 + var rows *sql.Rows + + // SIGNED + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // UNSIGNED ZEROFILL + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s ZEROFILL: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat32(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + in := float32(42.23) + var out float32 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (?)", in) + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %g != %g", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat64(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + var expected float64 = 42.23 + var out float64 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (42.23)") + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if expected != out { + dbt.Errorf("%s: %g != %g", v, expected, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat64Placeholder(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + var expected float64 = 42.23 + var out float64 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (id int, value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (1, 42.23)") + rows = dbt.mustQuery("SELECT value FROM test WHERE id = ?", 1) + if rows.Next() { + rows.Scan(&out) + if expected != out { + dbt.Errorf("%s: %g != %g", v, expected, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestString(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"} + in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย" + var out string + var rows *sql.Rows + + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %s != %s", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // BLOB + dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8") + + id := 2 + in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " + + "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet." + dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in) + + err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out) + if err != nil { + dbt.Fatalf("Error on BLOB-Query: %s", err.Error()) + } else if out != in { + dbt.Errorf("BLOB: %s != %s", in, out) + } + }) +} + +type timeTests struct { + dbtype string + tlayout string + tests []timeTest +} + +type timeTest struct { + s string // leading "!": do not use t as value in queries + t time.Time +} + +type timeMode byte + +func (t timeMode) String() string { + switch t { + case binaryString: + return "binary:string" + case binaryTime: + return "binary:time.Time" + case textString: + return "text:string" + } + panic("unsupported timeMode") +} + +func (t timeMode) Binary() bool { + switch t { + case binaryString, binaryTime: + return true + } + return false +} + +const ( + binaryString timeMode = iota + binaryTime + textString +) + +func (t timeTest) genQuery(dbtype string, mode timeMode) string { + var inner string + if mode.Binary() { + inner = "?" + } else { + inner = `"%s"` + } + return `SELECT cast(` + inner + ` as ` + dbtype + `)` +} + +func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) { + var rows *sql.Rows + query := t.genQuery(dbtype, mode) + switch mode { + case binaryString: + rows = dbt.mustQuery(query, t.s) + case binaryTime: + rows = dbt.mustQuery(query, t.t) + case textString: + query = fmt.Sprintf(query, t.s) + rows = dbt.mustQuery(query) + default: + panic("unsupported mode") + } + defer rows.Close() + var err error + if !rows.Next() { + err = rows.Err() + if err == nil { + err = fmt.Errorf("no data") + } + dbt.Errorf("%s [%s]: %s", dbtype, mode, err) + return + } + var dst interface{} + err = rows.Scan(&dst) + if err != nil { + dbt.Errorf("%s [%s]: %s", dbtype, mode, err) + return + } + switch val := dst.(type) { + case []uint8: + str := string(val) + if str == t.s { + return + } + if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s { + // a fix mainly for TravisCI: + // accept full microsecond resolution in result for DATETIME columns + // where the binary protocol was used + return + } + dbt.Errorf("%s [%s] to string: expected %q, got %q", + dbtype, mode, + t.s, str, + ) + case time.Time: + if val == t.t { + return + } + dbt.Errorf("%s [%s] to string: expected %q, got %q", + dbtype, mode, + t.s, val.Format(tlayout), + ) + default: + fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t}) + dbt.Errorf("%s [%s]: unhandled type %T (is '%v')", + dbtype, mode, + val, val, + ) + } +} + +func TestDateTime(t *testing.T) { + afterTime := func(t time.Time, d string) time.Time { + dur, err := time.ParseDuration(d) + if err != nil { + panic(err) + } + return t.Add(dur) + } + // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests + format := "2006-01-02 15:04:05.999999" + t0 := time.Time{} + tstr0 := "0000-00-00 00:00:00.000000" + testcases := []timeTests{ + {"DATE", format[:10], []timeTest{ + {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)}, + {t: t0, s: tstr0[:10]}, + }}, + {"DATETIME", format[:19], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, + {t: t0, s: tstr0[:19]}, + }}, + {"DATETIME(0)", format[:21], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, + {t: t0, s: tstr0[:19]}, + }}, + {"DATETIME(1)", format[:21], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)}, + {t: t0, s: tstr0[:21]}, + }}, + {"DATETIME(6)", format, []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)}, + {t: t0, s: tstr0}, + }}, + {"TIME", format[11:19], []timeTest{ + {t: afterTime(t0, "12345s")}, + {s: "!-12:34:56"}, + {s: "!-838:59:59"}, + {s: "!838:59:59"}, + {t: t0, s: tstr0[11:19]}, + }}, + {"TIME(0)", format[11:19], []timeTest{ + {t: afterTime(t0, "12345s")}, + {s: "!-12:34:56"}, + {s: "!-838:59:59"}, + {s: "!838:59:59"}, + {t: t0, s: tstr0[11:19]}, + }}, + {"TIME(1)", format[11:21], []timeTest{ + {t: afterTime(t0, "12345600ms")}, + {s: "!-12:34:56.7"}, + {s: "!-838:59:58.9"}, + {s: "!838:59:58.9"}, + {t: t0, s: tstr0[11:21]}, + }}, + {"TIME(6)", format[11:], []timeTest{ + {t: afterTime(t0, "1234567890123000ns")}, + {s: "!-12:34:56.789012"}, + {s: "!-838:59:58.999999"}, + {s: "!838:59:58.999999"}, + {t: t0, s: tstr0[11:]}, + }}, + } + dsns := []string{ + dsn + "&parseTime=true", + dsn + "&parseTime=false", + } + for _, testdsn := range dsns { + runTests(t, testdsn, func(dbt *DBTest) { + microsecsSupported := false + zeroDateSupported := false + var rows *sql.Rows + var err error + rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`) + if err == nil { + rows.Scan(µsecsSupported) + rows.Close() + } + rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`) + if err == nil { + rows.Scan(&zeroDateSupported) + rows.Close() + } + for _, setups := range testcases { + if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" { + // skip fractional second tests if unsupported by server + continue + } + for _, setup := range setups.tests { + allowBinTime := true + if setup.s == "" { + // fill time string whereever Go can reliable produce it + setup.s = setup.t.Format(setups.tlayout) + } else if setup.s[0] == '!' { + // skip tests using setup.t as source in queries + allowBinTime = false + // fix setup.s - remove the "!" + setup.s = setup.s[1:] + } + if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] { + // skip disallowed 0000-00-00 date + continue + } + setup.run(dbt, setups.dbtype, setups.tlayout, textString) + setup.run(dbt, setups.dbtype, setups.tlayout, binaryString) + if allowBinTime { + setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime) + } + } + } + }) + } +} + +func TestTimestampMicros(t *testing.T) { + format := "2006-01-02 15:04:05.999999" + f0 := format[:19] + f1 := format[:21] + f6 := format[:26] + runTests(t, dsn, func(dbt *DBTest) { + // check if microseconds are supported. + // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width + // and not precision. + // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html + microsecsSupported := false + if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil { + rows.Scan(µsecsSupported) + rows.Close() + } + if !microsecsSupported { + // skip test + return + } + _, err := dbt.db.Exec(` + CREATE TABLE test ( + value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `', + value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `', + value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `' + )`, + ) + if err != nil { + dbt.Error(err) + } + defer dbt.mustExec("DROP TABLE IF EXISTS test") + dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6) + var res0, res1, res6 string + rows := dbt.mustQuery("SELECT * FROM test") + if !rows.Next() { + dbt.Errorf("test contained no selectable values") + } + err = rows.Scan(&res0, &res1, &res6) + if err != nil { + dbt.Error(err) + } + if res0 != f0 { + dbt.Errorf("expected %q, got %q", f0, res0) + } + if res1 != f1 { + dbt.Errorf("expected %q, got %q", f1, res1) + } + if res6 != f6 { + dbt.Errorf("expected %q, got %q", f6, res6) + } + }) +} + +func TestNULL(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + nullStmt, err := dbt.db.Prepare("SELECT NULL") + if err != nil { + dbt.Fatal(err) + } + defer nullStmt.Close() + + nonNullStmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + defer nonNullStmt.Close() + + // NullBool + var nb sql.NullBool + // Invalid + if err = nullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if nb.Valid { + dbt.Error("valid NullBool which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if !nb.Valid { + dbt.Error("invalid NullBool which should be valid") + } else if nb.Bool != true { + dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool) + } + + // NullFloat64 + var nf sql.NullFloat64 + // Invalid + if err = nullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if nf.Valid { + dbt.Error("valid NullFloat64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if !nf.Valid { + dbt.Error("invalid NullFloat64 which should be valid") + } else if nf.Float64 != float64(1) { + dbt.Errorf("unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64) + } + + // NullInt64 + var ni sql.NullInt64 + // Invalid + if err = nullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if ni.Valid { + dbt.Error("valid NullInt64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if !ni.Valid { + dbt.Error("invalid NullInt64 which should be valid") + } else if ni.Int64 != int64(1) { + dbt.Errorf("unexpected NullInt64 value: %d (should be 1)", ni.Int64) + } + + // NullString + var ns sql.NullString + // Invalid + if err = nullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if ns.Valid { + dbt.Error("valid NullString which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if !ns.Valid { + dbt.Error("invalid NullString which should be valid") + } else if ns.String != `1` { + dbt.Error("unexpected NullString value:" + ns.String + " (should be `1`)") + } + + // nil-bytes + var b []byte + // Read nil + if err = nullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("non-nil []byte wich should be nil") + } + // Read non-nil + if err = nonNullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("nil []byte wich should be non-nil") + } + // Insert nil + b = nil + success := false + if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil { + dbt.Fatal(err) + } + if !success { + dbt.Error("inserting []byte(nil) as NULL failed") + } + // Check input==output with input==nil + b = nil + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("non-nil echo from nil input") + } + // Check input==output with input!=nil + b = []byte("") + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("nil echo from non-nil input") + } + + // Insert NULL + dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)") + + dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2) + + var out interface{} + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + rows.Scan(&out) + if out != nil { + dbt.Errorf("%v != nil", out) + } + } else { + dbt.Error("no data") + } + }) +} + +func TestUint64(t *testing.T) { + const ( + u0 = uint64(0) + uall = ^u0 + uhigh = uall >> 1 + utop = ^uhigh + s0 = int64(0) + sall = ^s0 + shigh = int64(uhigh) + stop = ^shigh + ) + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`) + if err != nil { + dbt.Fatal(err) + } + defer stmt.Close() + row := stmt.QueryRow( + u0, uhigh, utop, uall, + s0, shigh, stop, sall, + ) + + var ua, ub, uc, ud uint64 + var sa, sb, sc, sd int64 + + err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd) + if err != nil { + dbt.Fatal(err) + } + switch { + case ua != u0, + ub != uhigh, + uc != utop, + ud != uall, + sa != s0, + sb != shigh, + sc != stop, + sd != sall: + dbt.Fatal("unexpected result value") + } + }) +} + +func TestLongData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + var maxAllowedPacketSize int + err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize) + if err != nil { + dbt.Fatal(err) + } + maxAllowedPacketSize-- + + // don't get too ambitious + if maxAllowedPacketSize > 1<<25 { + maxAllowedPacketSize = 1 << 25 + } + + dbt.mustExec("CREATE TABLE test (value LONGBLOB)") + + in := strings.Repeat(`a`, maxAllowedPacketSize+1) + var out string + var rows *sql.Rows + + // Long text data + const nonDataQueryLen = 28 // length query w/o value + inS := in[:maxAllowedPacketSize-nonDataQueryLen] + dbt.mustExec("INSERT INTO test VALUES('" + inS + "')") + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if inS != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + dbt.Fatalf("LONGBLOB: no data") + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Long binary data + dbt.mustExec("INSERT INTO test VALUES(?)", in) + rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1) + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + if err = rows.Err(); err != nil { + dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error()) + } else { + dbt.Fatal("LONGBLOB: no data (err: )") + } + } + }) +} + +func TestLoadData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + verifyLoadDataResult := func() { + rows, err := dbt.db.Query("SELECT * FROM test") + if err != nil { + dbt.Fatal(err.Error()) + } + + i := 0 + values := [4]string{ + "a string", + "a string containing a \t", + "a string containing a \n", + "a string containing both \t\n", + } + + var id int + var value string + + for rows.Next() { + i++ + err = rows.Scan(&id, &value) + if err != nil { + dbt.Fatal(err.Error()) + } + if i != id { + dbt.Fatalf("%d != %d", i, id) + } + if values[i-1] != value { + dbt.Fatalf("%q != %q", values[i-1], value) + } + } + err = rows.Err() + if err != nil { + dbt.Fatal(err.Error()) + } + + if i != 4 { + dbt.Fatalf("rows count mismatch. Got %d, want 4", i) + } + } + file, err := ioutil.TempFile("", "gotest") + defer os.Remove(file.Name()) + if err != nil { + dbt.Fatal(err) + } + file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n") + file.Close() + + dbt.db.Exec("DROP TABLE IF EXISTS test") + dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8") + + // Local File + RegisterLocalFile(file.Name()) + dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name())) + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("load non-existent file didn't fail") + } else if err.Error() != "local file 'doesnotexist' is not registered" { + dbt.Fatal(err.Error()) + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Reader + RegisterReaderHandler("test", func() io.Reader { + file, err = os.Open(file.Name()) + if err != nil { + dbt.Fatal(err) + } + return file + }) + dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test") + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("load non-existent Reader didn't fail") + } else if err.Error() != "Reader 'doesnotexist' is not registered" { + dbt.Fatal(err.Error()) + } + }) +} + +func TestFoundRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + }) + runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 matched rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 3 { + dbt.Fatalf("Expected 3 matched rows, got %d", count) + } + }) +} + +func TestStrict(t *testing.T) { + // ALLOW_INVALID_DATES to get rid of stricter modes - we want to test for warnings, not errors + relaxedDsn := dsn + "&sql_mode='ALLOW_INVALID_DATES,NO_AUTO_CREATE_USER'" + // make sure the MySQL version is recent enough with a separate connection + // before running the test + conn, err := MySQLDriver{}.Open(relaxedDsn) + if conn != nil { + conn.Close() + } + if me, ok := err.(*MySQLError); ok && me.Number == 1231 { + // Error 1231: Variable 'sql_mode' can't be set to the value of 'ALLOW_INVALID_DATES' + // => skip test, MySQL server version is too old + return + } + runTests(t, relaxedDsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (a TINYINT NOT NULL, b CHAR(4))") + + var queries = [...]struct { + in string + codes []string + }{ + {"DROP TABLE IF EXISTS no_such_table", []string{"1051"}}, + {"INSERT INTO test VALUES(10,'mysql'),(NULL,'test'),(300,'Open Source')", []string{"1265", "1048", "1264", "1265"}}, + } + var err error + + var checkWarnings = func(err error, mode string, idx int) { + if err == nil { + dbt.Errorf("expected STRICT error on query [%s] %s", mode, queries[idx].in) + } + + if warnings, ok := err.(MySQLWarnings); ok { + var codes = make([]string, len(warnings)) + for i := range warnings { + codes[i] = warnings[i].Code + } + if len(codes) != len(queries[idx].codes) { + dbt.Errorf("unexpected STRICT error count on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + } + + for i := range warnings { + if codes[i] != queries[idx].codes[i] { + dbt.Errorf("unexpected STRICT error codes on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + return + } + } + + } else { + dbt.Errorf("unexpected error on query [%s] %s: %s", mode, queries[idx].in, err.Error()) + } + } + + // text protocol + for i := range queries { + _, err = dbt.db.Exec(queries[i].in) + checkWarnings(err, "text", i) + } + + var stmt *sql.Stmt + + // binary protocol + for i := range queries { + stmt, err = dbt.db.Prepare(queries[i].in) + if err != nil { + dbt.Errorf("error on preparing query %s: %s", queries[i].in, err.Error()) + } + + _, err = stmt.Exec() + checkWarnings(err, "binary", i) + + err = stmt.Close() + if err != nil { + dbt.Errorf("error on closing stmt for query %s: %s", queries[i].in, err.Error()) + } + } + }) +} + +func TestTLS(t *testing.T) { + tlsTest := func(dbt *DBTest) { + if err := dbt.db.Ping(); err != nil { + if err == ErrNoTLS { + dbt.Skip("server does not support TLS") + } else { + dbt.Fatalf("error on Ping: %s", err.Error()) + } + } + + rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'") + + var variable, value *sql.RawBytes + for rows.Next() { + if err := rows.Scan(&variable, &value); err != nil { + dbt.Fatal(err.Error()) + } + + if value == nil { + dbt.Fatal("no Cipher") + } + } + } + + runTests(t, dsn+"&tls=skip-verify", tlsTest) + + // Verify that registering / using a custom cfg works + RegisterTLSConfig("custom-skip-verify", &tls.Config{ + InsecureSkipVerify: true, + }) + runTests(t, dsn+"&tls=custom-skip-verify", tlsTest) +} + +func TestReuseClosedConnection(t *testing.T) { + // this test does not use sql.database, it uses the driver directly + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + md := &MySQLDriver{} + conn, err := md.Open(dsn) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + stmt, err := conn.Prepare("DO 1") + if err != nil { + t.Fatalf("error preparing statement: %s", err.Error()) + } + _, err = stmt.Exec(nil) + if err != nil { + t.Fatalf("error executing statement: %s", err.Error()) + } + err = conn.Close() + if err != nil { + t.Fatalf("error closing connection: %s", err.Error()) + } + + defer func() { + if err := recover(); err != nil { + t.Errorf("panic after reusing a closed connection: %v", err) + } + }() + _, err = stmt.Exec(nil) + if err != nil && err != driver.ErrBadConn { + t.Errorf("unexpected error '%s', expected '%s'", + err.Error(), driver.ErrBadConn.Error()) + } +} + +func TestCharset(t *testing.T) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + mustSetCharset := func(charsetParam, expected string) { + runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) { + rows := dbt.mustQuery("SELECT @@character_set_connection") + defer rows.Close() + + if !rows.Next() { + dbt.Fatalf("error getting connection charset: %s", rows.Err()) + } + + var got string + rows.Scan(&got) + + if got != expected { + dbt.Fatalf("expected connection charset %s but got %s", expected, got) + } + }) + } + + // non utf8 test + mustSetCharset("charset=ascii", "ascii") + + // when the first charset is invalid, use the second + mustSetCharset("charset=none,utf8", "utf8") + + // when the first charset is valid, use it + mustSetCharset("charset=ascii,utf8", "ascii") + mustSetCharset("charset=utf8,ascii", "utf8") +} + +func TestFailingCharset(t *testing.T) { + runTests(t, dsn+"&charset=none", func(dbt *DBTest) { + // run query to really establish connection... + _, err := dbt.db.Exec("SELECT 1") + if err == nil { + dbt.db.Close() + t.Fatalf("connection must not succeed without a valid charset") + } + }) +} + +func TestCollation(t *testing.T) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + defaultCollation := "utf8_general_ci" + testCollations := []string{ + "", // do not set + defaultCollation, // driver default + "latin1_general_ci", + "binary", + "utf8_unicode_ci", + "cp1257_bin", + } + + for _, collation := range testCollations { + var expected, tdsn string + if collation != "" { + tdsn = dsn + "&collation=" + collation + expected = collation + } else { + tdsn = dsn + expected = defaultCollation + } + + runTests(t, tdsn, func(dbt *DBTest) { + var got string + if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil { + dbt.Fatal(err) + } + + if got != expected { + dbt.Fatalf("expected connection collation %s but got %s", expected, got) + } + }) + } +} + +func TestColumnsWithAlias(t *testing.T) { + runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) { + rows := dbt.mustQuery("SELECT 1 AS A") + defer rows.Close() + cols, _ := rows.Columns() + if len(cols) != 1 { + t.Fatalf("expected 1 column, got %d", len(cols)) + } + if cols[0] != "A" { + t.Fatalf("expected column name \"A\", got \"%s\"", cols[0]) + } + rows.Close() + + rows = dbt.mustQuery("SELECT * FROM (SELECT 1 AS one) AS A") + cols, _ = rows.Columns() + if len(cols) != 1 { + t.Fatalf("expected 1 column, got %d", len(cols)) + } + if cols[0] != "A.one" { + t.Fatalf("expected column name \"A.one\", got \"%s\"", cols[0]) + } + }) +} + +func TestRawBytesResultExceedsBuffer(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // defaultBufSize from buffer.go + expected := strings.Repeat("abc", defaultBufSize) + + rows := dbt.mustQuery("SELECT '" + expected + "'") + defer rows.Close() + if !rows.Next() { + dbt.Error("expected result, got none") + } + var result sql.RawBytes + rows.Scan(&result) + if expected != string(result) { + dbt.Error("result did not match expected value") + } + }) +} + +func TestTimezoneConversion(t *testing.T) { + zones := []string{"UTC", "US/Central", "US/Pacific", "Local"} + + // Regression test for timezone handling + tzTest := func(dbt *DBTest) { + + // Create table + dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)") + + // Insert local time into database (should be converted) + usCentral, _ := time.LoadLocation("US/Central") + reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral) + dbt.mustExec("INSERT INTO test VALUE (?)", reftime) + + // Retrieve time from DB + rows := dbt.mustQuery("SELECT ts FROM test") + if !rows.Next() { + dbt.Fatal("did not get any rows out") + } + + var dbTime time.Time + err := rows.Scan(&dbTime) + if err != nil { + dbt.Fatal("Err", err) + } + + // Check that dates match + if reftime.Unix() != dbTime.Unix() { + dbt.Errorf("times do not match.\n") + dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime) + dbt.Errorf(" Now(UTC)=%v\n", dbTime) + } + } + + for _, tz := range zones { + runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest) + } +} + +// Special cases + +func TestRowsClose(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + rows, err := dbt.db.Query("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + err = rows.Close() + if err != nil { + dbt.Fatal(err) + } + + if rows.Next() { + dbt.Fatal("unexpected row after rows.Close()") + } + + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + }) +} + +// dangling statements +// http://code.google.com/p/go/issues/detail?id=3865 +func TestCloseStmtBeforeRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + rows, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + + err = stmt.Close() + if err != nil { + dbt.Fatal(err) + } + + if !rows.Next() { + dbt.Fatal("getting row failed") + } else { + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + + var out bool + err = rows.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + }) +} + +// It is valid to have multiple Rows for the same Stmt +// http://code.google.com/p/go/issues/detail?id=3734 +func TestStmtMultiRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0") + if err != nil { + dbt.Fatal(err) + } + + rows1, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows1.Close() + + rows2, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows2.Close() + + var out bool + + // 1 + if !rows1.Next() { + dbt.Fatal("first rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + if !rows2.Next() { + dbt.Fatal("first rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + // 2 + if !rows1.Next() { + dbt.Fatal("second rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows1.Next() { + dbt.Fatal("unexpected row on rows1") + } + err = rows1.Close() + if err != nil { + dbt.Fatal(err) + } + } + + if !rows2.Next() { + dbt.Fatal("second rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows2.Next() { + dbt.Fatal("unexpected row on rows2") + } + err = rows2.Close() + if err != nil { + dbt.Fatal(err) + } + } + }) +} + +// Regression test for +// * more than 32 NULL parameters (issue 209) +// * more parameters than fit into the buffer (issue 201) +func TestPreparedManyCols(t *testing.T) { + const numParams = defaultBufSize + runTests(t, dsn, func(dbt *DBTest) { + query := "SELECT ?" + strings.Repeat(",?", numParams-1) + stmt, err := dbt.db.Prepare(query) + if err != nil { + dbt.Fatal(err) + } + defer stmt.Close() + // create more parameters than fit into the buffer + // which will take nil-values + params := make([]interface{}, numParams) + rows, err := stmt.Query(params...) + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + }) +} + +func TestConcurrent(t *testing.T) { + if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled { + t.Skip("MYSQL_TEST_CONCURRENT env var not set") + } + + runTests(t, dsn, func(dbt *DBTest) { + var max int + err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + dbt.Logf("testing up to %d concurrent connections \r\n", max) + + var remaining, succeeded int32 = int32(max), 0 + + var wg sync.WaitGroup + wg.Add(max) + + var fatalError string + var once sync.Once + fatalf := func(s string, vals ...interface{}) { + once.Do(func() { + fatalError = fmt.Sprintf(s, vals...) + }) + } + + for i := 0; i < max; i++ { + go func(id int) { + defer wg.Done() + + tx, err := dbt.db.Begin() + atomic.AddInt32(&remaining, -1) + + if err != nil { + if err.Error() != "Error 1040: Too many connections" { + fatalf("error on conn %d: %s", id, err.Error()) + } + return + } + + // keep the connection busy until all connections are open + for remaining > 0 { + if _, err = tx.Exec("DO 1"); err != nil { + fatalf("error on conn %d: %s", id, err.Error()) + return + } + } + + if err = tx.Commit(); err != nil { + fatalf("error on conn %d: %s", id, err.Error()) + return + } + + // everything went fine with this connection + atomic.AddInt32(&succeeded, 1) + }(i) + } + + // wait until all conections are open + wg.Wait() + + if fatalError != "" { + dbt.Fatal(fatalError) + } + + dbt.Logf("reached %d concurrent connections\r\n", succeeded) + }) +} + +// Tests custom dial functions +func TestCustomDial(t *testing.T) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + // our custom dial function which justs wraps net.Dial here + RegisterDial("mydial", func(addr string) (net.Conn, error) { + return net.Dial(prot, addr) + }) + + db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s&strict=true", user, pass, addr, dbname)) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db.Close() + + if _, err = db.Exec("DO 1"); err != nil { + t.Fatalf("connection failed: %s", err.Error()) + } +} + +func TestSQLInjection(t *testing.T) { + createTest := func(arg string) func(dbt *DBTest) { + return func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (v INTEGER)") + dbt.mustExec("INSERT INTO test VALUES (?)", 1) + + var v int + // NULL can't be equal to anything, the idea here is to inject query so it returns row + // This test verifies that escapeQuotes and escapeBackslash are working properly + err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v) + if err == sql.ErrNoRows { + return // success, sql injection failed + } else if err == nil { + dbt.Errorf("sql injection successful with arg: %s", arg) + } else { + dbt.Errorf("error running query with arg: %s; err: %s", arg, err.Error()) + } + } + } + + dsns := []string{ + dsn, + dsn + "&sql_mode='NO_BACKSLASH_ESCAPES,NO_AUTO_CREATE_USER'", + } + for _, testdsn := range dsns { + runTests(t, testdsn, createTest("1 OR 1=1")) + runTests(t, testdsn, createTest("' OR '1'='1")) + } +} + +// Test if inserted data is correctly retrieved after being escaped +func TestInsertRetrieveEscapedData(t *testing.T) { + testData := func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (v VARCHAR(255))") + + // All sequences that are escaped by escapeQuotes and escapeBackslash + v := "foo \x00\n\r\x1a\"'\\" + dbt.mustExec("INSERT INTO test VALUES (?)", v) + + var out string + err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + + if out != v { + dbt.Errorf("%q != %q", out, v) + } + } + + dsns := []string{ + dsn, + dsn + "&sql_mode='NO_BACKSLASH_ESCAPES,NO_AUTO_CREATE_USER'", + } + for _, testdsn := range dsns { + runTests(t, testdsn, testData) + } +} + +func TestUnixSocketAuthFail(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // Save the current logger so we can restore it. + oldLogger := errLog + + // Set a new logger so we can capture its output. + buffer := bytes.NewBuffer(make([]byte, 0, 64)) + newLogger := log.New(buffer, "prefix: ", 0) + SetLogger(newLogger) + + // Restore the logger. + defer SetLogger(oldLogger) + + // Make a new DSN that uses the MySQL socket file and a bad password, which + // we can make by simply appending any character to the real password. + badPass := pass + "x" + socket := "" + if prot == "unix" { + socket = addr + } else { + // Get socket file from MySQL. + err := dbt.db.QueryRow("SELECT @@socket").Scan(&socket) + if err != nil { + t.Fatalf("error on SELECT @@socket: %s", err.Error()) + } + } + t.Logf("socket: %s", socket) + badDSN := fmt.Sprintf("%s:%s@unix(%s)/%s?timeout=30s&strict=true", user, badPass, socket, dbname) + db, err := sql.Open("mysql", badDSN) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db.Close() + + // Connect to MySQL for real. This will cause an auth failure. + err = db.Ping() + if err == nil { + t.Error("expected Ping() to return an error") + } + + // The driver should not log anything. + if actual := buffer.String(); actual != "" { + t.Errorf("expected no output, got %q", actual) + } + }) +} + +// See Issue #422 +func TestInterruptBySignal(t *testing.T) { + runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) { + dbt.mustExec(` + DROP PROCEDURE IF EXISTS test_signal; + CREATE PROCEDURE test_signal(ret INT) + BEGIN + SELECT ret; + SIGNAL SQLSTATE + '45001' + SET + MESSAGE_TEXT = "an error", + MYSQL_ERRNO = 45001; + END + `) + defer dbt.mustExec("DROP PROCEDURE test_signal") + + var val int + + // text protocol + rows, err := dbt.db.Query("CALL test_signal(42)") + if err != nil { + dbt.Fatalf("error on text query: %s", err.Error()) + } + for rows.Next() { + if err := rows.Scan(&val); err != nil { + dbt.Error(err) + } else if val != 42 { + dbt.Errorf("expected val to be 42") + } + } + + // binary protocol + rows, err = dbt.db.Query("CALL test_signal(?)", 42) + if err != nil { + dbt.Fatalf("error on binary query: %s", err.Error()) + } + for rows.Next() { + if err := rows.Scan(&val); err != nil { + dbt.Error(err) + } else if val != 42 { + dbt.Errorf("expected val to be 42") + } + } + }) +} diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go new file mode 100644 index 0000000..ac00dce --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -0,0 +1,548 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "strconv" + "strings" + "time" +) + +var ( + errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?") + errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)") + errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name") + errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations") +) + +// Config is a configuration parsed from a DSN string +type Config struct { + User string // Username + Passwd string // Password (requires User) + Net string // Network type + Addr string // Network address (requires Net) + DBName string // Database name + Params map[string]string // Connection parameters + Collation string // Connection collation + Loc *time.Location // Location for time.Time values + MaxAllowedPacket int // Max packet size allowed + TLSConfig string // TLS configuration name + tls *tls.Config // TLS configuration + Timeout time.Duration // Dial timeout + ReadTimeout time.Duration // I/O read timeout + WriteTimeout time.Duration // I/O write timeout + + AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE + AllowCleartextPasswords bool // Allows the cleartext client side plugin + AllowNativePasswords bool // Allows the native password authentication method + AllowOldPasswords bool // Allows the old insecure password method + ClientFoundRows bool // Return number of matching rows instead of rows changed + ColumnsWithAlias bool // Prepend table alias to column names + InterpolateParams bool // Interpolate placeholders into query string + MultiStatements bool // Allow multiple statements in one query + ParseTime bool // Parse time values to time.Time + Strict bool // Return warnings as errors +} + +// FormatDSN formats the given Config into a DSN string which can be passed to +// the driver. +func (cfg *Config) FormatDSN() string { + var buf bytes.Buffer + + // [username[:password]@] + if len(cfg.User) > 0 { + buf.WriteString(cfg.User) + if len(cfg.Passwd) > 0 { + buf.WriteByte(':') + buf.WriteString(cfg.Passwd) + } + buf.WriteByte('@') + } + + // [protocol[(address)]] + if len(cfg.Net) > 0 { + buf.WriteString(cfg.Net) + if len(cfg.Addr) > 0 { + buf.WriteByte('(') + buf.WriteString(cfg.Addr) + buf.WriteByte(')') + } + } + + // /dbname + buf.WriteByte('/') + buf.WriteString(cfg.DBName) + + // [?param1=value1&...¶mN=valueN] + hasParam := false + + if cfg.AllowAllFiles { + hasParam = true + buf.WriteString("?allowAllFiles=true") + } + + if cfg.AllowCleartextPasswords { + if hasParam { + buf.WriteString("&allowCleartextPasswords=true") + } else { + hasParam = true + buf.WriteString("?allowCleartextPasswords=true") + } + } + + if cfg.AllowNativePasswords { + if hasParam { + buf.WriteString("&allowNativePasswords=true") + } else { + hasParam = true + buf.WriteString("?allowNativePasswords=true") + } + } + + if cfg.AllowOldPasswords { + if hasParam { + buf.WriteString("&allowOldPasswords=true") + } else { + hasParam = true + buf.WriteString("?allowOldPasswords=true") + } + } + + if cfg.ClientFoundRows { + if hasParam { + buf.WriteString("&clientFoundRows=true") + } else { + hasParam = true + buf.WriteString("?clientFoundRows=true") + } + } + + if col := cfg.Collation; col != defaultCollation && len(col) > 0 { + if hasParam { + buf.WriteString("&collation=") + } else { + hasParam = true + buf.WriteString("?collation=") + } + buf.WriteString(col) + } + + if cfg.ColumnsWithAlias { + if hasParam { + buf.WriteString("&columnsWithAlias=true") + } else { + hasParam = true + buf.WriteString("?columnsWithAlias=true") + } + } + + if cfg.InterpolateParams { + if hasParam { + buf.WriteString("&interpolateParams=true") + } else { + hasParam = true + buf.WriteString("?interpolateParams=true") + } + } + + if cfg.Loc != time.UTC && cfg.Loc != nil { + if hasParam { + buf.WriteString("&loc=") + } else { + hasParam = true + buf.WriteString("?loc=") + } + buf.WriteString(url.QueryEscape(cfg.Loc.String())) + } + + if cfg.MultiStatements { + if hasParam { + buf.WriteString("&multiStatements=true") + } else { + hasParam = true + buf.WriteString("?multiStatements=true") + } + } + + if cfg.ParseTime { + if hasParam { + buf.WriteString("&parseTime=true") + } else { + hasParam = true + buf.WriteString("?parseTime=true") + } + } + + if cfg.ReadTimeout > 0 { + if hasParam { + buf.WriteString("&readTimeout=") + } else { + hasParam = true + buf.WriteString("?readTimeout=") + } + buf.WriteString(cfg.ReadTimeout.String()) + } + + if cfg.Strict { + if hasParam { + buf.WriteString("&strict=true") + } else { + hasParam = true + buf.WriteString("?strict=true") + } + } + + if cfg.Timeout > 0 { + if hasParam { + buf.WriteString("&timeout=") + } else { + hasParam = true + buf.WriteString("?timeout=") + } + buf.WriteString(cfg.Timeout.String()) + } + + if len(cfg.TLSConfig) > 0 { + if hasParam { + buf.WriteString("&tls=") + } else { + hasParam = true + buf.WriteString("?tls=") + } + buf.WriteString(url.QueryEscape(cfg.TLSConfig)) + } + + if cfg.WriteTimeout > 0 { + if hasParam { + buf.WriteString("&writeTimeout=") + } else { + hasParam = true + buf.WriteString("?writeTimeout=") + } + buf.WriteString(cfg.WriteTimeout.String()) + } + + if cfg.MaxAllowedPacket > 0 { + if hasParam { + buf.WriteString("&maxAllowedPacket=") + } else { + hasParam = true + buf.WriteString("?maxAllowedPacket=") + } + buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket)) + + } + + // other params + if cfg.Params != nil { + for param, value := range cfg.Params { + if hasParam { + buf.WriteByte('&') + } else { + hasParam = true + buf.WriteByte('?') + } + + buf.WriteString(param) + buf.WriteByte('=') + buf.WriteString(url.QueryEscape(value)) + } + } + + return buf.String() +} + +// ParseDSN parses the DSN string to a Config +func ParseDSN(dsn string) (cfg *Config, err error) { + // New config with some default values + cfg = &Config{ + Loc: time.UTC, + Collation: defaultCollation, + } + + // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] + // Find the last '/' (since the password or the net addr might contain a '/') + foundSlash := false + for i := len(dsn) - 1; i >= 0; i-- { + if dsn[i] == '/' { + foundSlash = true + var j, k int + + // left part is empty if i <= 0 + if i > 0 { + // [username[:password]@][protocol[(address)]] + // Find the last '@' in dsn[:i] + for j = i; j >= 0; j-- { + if dsn[j] == '@' { + // username[:password] + // Find the first ':' in dsn[:j] + for k = 0; k < j; k++ { + if dsn[k] == ':' { + cfg.Passwd = dsn[k+1 : j] + break + } + } + cfg.User = dsn[:k] + + break + } + } + + // [protocol[(address)]] + // Find the first '(' in dsn[j+1:i] + for k = j + 1; k < i; k++ { + if dsn[k] == '(' { + // dsn[i-1] must be == ')' if an address is specified + if dsn[i-1] != ')' { + if strings.ContainsRune(dsn[k+1:i], ')') { + return nil, errInvalidDSNUnescaped + } + return nil, errInvalidDSNAddr + } + cfg.Addr = dsn[k+1 : i-1] + break + } + } + cfg.Net = dsn[j+1 : k] + } + + // dbname[?param1=value1&...¶mN=valueN] + // Find the first '?' in dsn[i+1:] + for j = i + 1; j < len(dsn); j++ { + if dsn[j] == '?' { + if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { + return + } + break + } + } + cfg.DBName = dsn[i+1 : j] + + break + } + } + + if !foundSlash && len(dsn) > 0 { + return nil, errInvalidDSNNoSlash + } + + if cfg.InterpolateParams && unsafeCollations[cfg.Collation] { + return nil, errInvalidDSNUnsafeCollation + } + + // Set default network if empty + if cfg.Net == "" { + cfg.Net = "tcp" + } + + // Set default address if empty + if cfg.Addr == "" { + switch cfg.Net { + case "tcp": + cfg.Addr = "127.0.0.1:3306" + case "unix": + cfg.Addr = "/tmp/mysql.sock" + default: + return nil, errors.New("default addr for network '" + cfg.Net + "' unknown") + } + + } + + return +} + +// parseDSNParams parses the DSN "query string" +// Values must be url.QueryEscape'ed +func parseDSNParams(cfg *Config, params string) (err error) { + for _, v := range strings.Split(params, "&") { + param := strings.SplitN(v, "=", 2) + if len(param) != 2 { + continue + } + + // cfg params + switch value := param[1]; param[0] { + + // Disable INFILE whitelist / enable all files + case "allowAllFiles": + var isBool bool + cfg.AllowAllFiles, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use cleartext authentication mode (MySQL 5.5.10+) + case "allowCleartextPasswords": + var isBool bool + cfg.AllowCleartextPasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use native password authentication + case "allowNativePasswords": + var isBool bool + cfg.AllowNativePasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use old authentication mode (pre MySQL 4.1) + case "allowOldPasswords": + var isBool bool + cfg.AllowOldPasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Switch "rowsAffected" mode + case "clientFoundRows": + var isBool bool + cfg.ClientFoundRows, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Collation + case "collation": + cfg.Collation = value + break + + case "columnsWithAlias": + var isBool bool + cfg.ColumnsWithAlias, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Compression + case "compress": + return errors.New("compression not implemented yet") + + // Enable client side placeholder substitution + case "interpolateParams": + var isBool bool + cfg.InterpolateParams, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Time Location + case "loc": + if value, err = url.QueryUnescape(value); err != nil { + return + } + cfg.Loc, err = time.LoadLocation(value) + if err != nil { + return + } + + // multiple statements in one query + case "multiStatements": + var isBool bool + cfg.MultiStatements, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // time.Time parsing + case "parseTime": + var isBool bool + cfg.ParseTime, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // I/O read Timeout + case "readTimeout": + cfg.ReadTimeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // Strict mode + case "strict": + var isBool bool + cfg.Strict, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Dial Timeout + case "timeout": + cfg.Timeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // TLS-Encryption + case "tls": + boolValue, isBool := readBool(value) + if isBool { + if boolValue { + cfg.TLSConfig = "true" + cfg.tls = &tls.Config{} + } else { + cfg.TLSConfig = "false" + } + } else if vl := strings.ToLower(value); vl == "skip-verify" { + cfg.TLSConfig = vl + cfg.tls = &tls.Config{InsecureSkipVerify: true} + } else { + name, err := url.QueryUnescape(value) + if err != nil { + return fmt.Errorf("invalid value for TLS config name: %v", err) + } + + if tlsConfig, ok := tlsConfigRegister[name]; ok { + if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify { + host, _, err := net.SplitHostPort(cfg.Addr) + if err == nil { + tlsConfig.ServerName = host + } + } + + cfg.TLSConfig = name + cfg.tls = tlsConfig + } else { + return errors.New("invalid value / unknown config name: " + name) + } + } + + // I/O write Timeout + case "writeTimeout": + cfg.WriteTimeout, err = time.ParseDuration(value) + if err != nil { + return + } + case "maxAllowedPacket": + cfg.MaxAllowedPacket, err = strconv.Atoi(value) + if err != nil { + return + } + default: + // lazy init + if cfg.Params == nil { + cfg.Params = make(map[string]string) + } + + if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil { + return + } + } + } + + return +} diff --git a/vendor/github.com/go-sql-driver/mysql/dsn_test.go b/vendor/github.com/go-sql-driver/mysql/dsn_test.go new file mode 100644 index 0000000..0693192 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/dsn_test.go @@ -0,0 +1,231 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "fmt" + "net/url" + "reflect" + "testing" + "time" +) + +var testDSNs = []struct { + in string + out *Config +}{{ + "username:password@protocol(address)/dbname?param=value", + &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true", + &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, ColumnsWithAlias: true}, +}, { + "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true", + &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, ColumnsWithAlias: true, MultiStatements: true}, +}, { + "user@unix(/path/to/socket)/dbname?charset=utf8", + &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true", + &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, TLSConfig: "true"}, +}, { + "user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify", + &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, TLSConfig: "skip-verify"}, +}, { + "user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216", + &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, ClientFoundRows: true, MaxAllowedPacket: 16777216}, +}, { + "user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local", + &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.Local}, +}, { + "/dbname", + &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "@/", + &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "/", + &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "", + &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "user:p@/ssword@/", + &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "unix/?arg=%2Fsome%2Fpath.ext", + &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8_general_ci", Loc: time.UTC}, +}} + +func TestDSNParser(t *testing.T) { + for i, tst := range testDSNs { + cfg, err := ParseDSN(tst.in) + if err != nil { + t.Error(err.Error()) + } + + // pointer not static + cfg.tls = nil + + if !reflect.DeepEqual(cfg, tst.out) { + t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out) + } + } +} + +func TestDSNParserInvalid(t *testing.T) { + var invalidDSNs = []string{ + "@net(addr/", // no closing brace + "@tcp(/", // no closing brace + "tcp(/", // no closing brace + "(/", // no closing brace + "net(addr)//", // unescaped + "User:pass@tcp(1.2.3.4:3306)", // no trailing slash + //"/dbname?arg=/some/unescaped/path", + } + + for i, tst := range invalidDSNs { + if _, err := ParseDSN(tst); err == nil { + t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst) + } + } +} + +func TestDSNReformat(t *testing.T) { + for i, tst := range testDSNs { + dsn1 := tst.in + cfg1, err := ParseDSN(dsn1) + if err != nil { + t.Error(err.Error()) + continue + } + cfg1.tls = nil // pointer not static + res1 := fmt.Sprintf("%+v", cfg1) + + dsn2 := cfg1.FormatDSN() + cfg2, err := ParseDSN(dsn2) + if err != nil { + t.Error(err.Error()) + continue + } + cfg2.tls = nil // pointer not static + res2 := fmt.Sprintf("%+v", cfg2) + + if res1 != res2 { + t.Errorf("%d. %q does not match %q", i, res2, res1) + } + } +} + +func TestDSNWithCustomTLS(t *testing.T) { + baseDSN := "User:password@tcp(localhost:5555)/dbname?tls=" + tlsCfg := tls.Config{} + + RegisterTLSConfig("utils_test", &tlsCfg) + + // Custom TLS is missing + tst := baseDSN + "invalid_tls" + cfg, err := ParseDSN(tst) + if err == nil { + t.Errorf("invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg) + } + + tst = baseDSN + "utils_test" + + // Custom TLS with a server name + name := "foohost" + tlsCfg.ServerName = name + cfg, err = ParseDSN(tst) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst) + } + + // Custom TLS without a server name + name = "localhost" + tlsCfg.ServerName = "" + cfg, err = ParseDSN(tst) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("did not get the correct ServerName (%s) parsing DSN (%s).", name, tst) + } + + DeregisterTLSConfig("utils_test") +} + +func TestDSNWithCustomTLSQueryEscape(t *testing.T) { + const configKey = "&%!:" + dsn := "User:password@tcp(localhost:5555)/dbname?tls=" + url.QueryEscape(configKey) + name := "foohost" + tlsCfg := tls.Config{ServerName: name} + + RegisterTLSConfig(configKey, &tlsCfg) + + cfg, err := ParseDSN(dsn) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, dsn) + } +} + +func TestDSNUnsafeCollation(t *testing.T) { + _, err := ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true") + if err != errInvalidDSNUnsafeCollation { + t.Errorf("expected %v, got %v", errInvalidDSNUnsafeCollation, err) + } + + _, err = ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=gbk_chinese_ci") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=ascii_bin&interpolateParams=true") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } +} + +func BenchmarkParseDSN(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, tst := range testDSNs { + if _, err := ParseDSN(tst.in); err != nil { + b.Error(err.Error()) + } + } + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go new file mode 100644 index 0000000..857854e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/errors.go @@ -0,0 +1,132 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "errors" + "fmt" + "io" + "log" + "os" +) + +// Various errors the driver might return. Can change between driver versions. +var ( + ErrInvalidConn = errors.New("invalid connection") + ErrMalformPkt = errors.New("malformed packet") + ErrNoTLS = errors.New("TLS requested but server does not support TLS") + ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN") + ErrNativePassword = errors.New("this user requires mysql native password authentication.") + ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") + ErrUnknownPlugin = errors.New("this authentication plugin is not supported") + ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+") + ErrPktSync = errors.New("commands out of sync. You can't run this command now") + ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?") + ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server") + ErrBusyBuffer = errors.New("busy buffer") +) + +var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile)) + +// Logger is used to log critical error messages. +type Logger interface { + Print(v ...interface{}) +} + +// SetLogger is used to set the logger for critical errors. +// The initial logger is os.Stderr. +func SetLogger(logger Logger) error { + if logger == nil { + return errors.New("logger is nil") + } + errLog = logger + return nil +} + +// MySQLError is an error type which represents a single MySQL error +type MySQLError struct { + Number uint16 + Message string +} + +func (me *MySQLError) Error() string { + return fmt.Sprintf("Error %d: %s", me.Number, me.Message) +} + +// MySQLWarnings is an error type which represents a group of one or more MySQL +// warnings +type MySQLWarnings []MySQLWarning + +func (mws MySQLWarnings) Error() string { + var msg string + for i, warning := range mws { + if i > 0 { + msg += "\r\n" + } + msg += fmt.Sprintf( + "%s %s: %s", + warning.Level, + warning.Code, + warning.Message, + ) + } + return msg +} + +// MySQLWarning is an error type which represents a single MySQL warning. +// Warnings are returned in groups only. See MySQLWarnings +type MySQLWarning struct { + Level string + Code string + Message string +} + +func (mc *mysqlConn) getWarnings() (err error) { + rows, err := mc.Query("SHOW WARNINGS", nil) + if err != nil { + return + } + + var warnings = MySQLWarnings{} + var values = make([]driver.Value, 3) + + for { + err = rows.Next(values) + switch err { + case nil: + warning := MySQLWarning{} + + if raw, ok := values[0].([]byte); ok { + warning.Level = string(raw) + } else { + warning.Level = fmt.Sprintf("%s", values[0]) + } + if raw, ok := values[1].([]byte); ok { + warning.Code = string(raw) + } else { + warning.Code = fmt.Sprintf("%s", values[1]) + } + if raw, ok := values[2].([]byte); ok { + warning.Message = string(raw) + } else { + warning.Message = fmt.Sprintf("%s", values[0]) + } + + warnings = append(warnings, warning) + + case io.EOF: + return warnings + + default: + rows.Close() + return + } + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/errors_test.go b/vendor/github.com/go-sql-driver/mysql/errors_test.go new file mode 100644 index 0000000..96f9126 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/errors_test.go @@ -0,0 +1,42 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "log" + "testing" +) + +func TestErrorsSetLogger(t *testing.T) { + previous := errLog + defer func() { + errLog = previous + }() + + // set up logger + const expected = "prefix: test\n" + buffer := bytes.NewBuffer(make([]byte, 0, 64)) + logger := log.New(buffer, "prefix: ", 0) + + // print + SetLogger(logger) + errLog.Print("test") + + // check result + if actual := buffer.String(); actual != expected { + t.Errorf("expected %q, got %q", expected, actual) + } +} + +func TestErrorsStrictIgnoreNotes(t *testing.T) { + runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) { + dbt.mustExec("DROP TABLE IF EXISTS does_not_exist") + }) +} diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go new file mode 100644 index 0000000..547357c --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/infile.go @@ -0,0 +1,182 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "fmt" + "io" + "os" + "strings" + "sync" +) + +var ( + fileRegister map[string]bool + fileRegisterLock sync.RWMutex + readerRegister map[string]func() io.Reader + readerRegisterLock sync.RWMutex +) + +// RegisterLocalFile adds the given file to the file whitelist, +// so that it can be used by "LOAD DATA LOCAL INFILE ". +// Alternatively you can allow the use of all local files with +// the DSN parameter 'allowAllFiles=true' +// +// filePath := "/home/gopher/data.csv" +// mysql.RegisterLocalFile(filePath) +// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterLocalFile(filePath string) { + fileRegisterLock.Lock() + // lazy map init + if fileRegister == nil { + fileRegister = make(map[string]bool) + } + + fileRegister[strings.Trim(filePath, `"`)] = true + fileRegisterLock.Unlock() +} + +// DeregisterLocalFile removes the given filepath from the whitelist. +func DeregisterLocalFile(filePath string) { + fileRegisterLock.Lock() + delete(fileRegister, strings.Trim(filePath, `"`)) + fileRegisterLock.Unlock() +} + +// RegisterReaderHandler registers a handler function which is used +// to receive a io.Reader. +// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". +// If the handler returns a io.ReadCloser Close() is called when the +// request is finished. +// +// mysql.RegisterReaderHandler("data", func() io.Reader { +// var csvReader io.Reader // Some Reader that returns CSV data +// ... // Open Reader here +// return csvReader +// }) +// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterReaderHandler(name string, handler func() io.Reader) { + readerRegisterLock.Lock() + // lazy map init + if readerRegister == nil { + readerRegister = make(map[string]func() io.Reader) + } + + readerRegister[name] = handler + readerRegisterLock.Unlock() +} + +// DeregisterReaderHandler removes the ReaderHandler function with +// the given name from the registry. +func DeregisterReaderHandler(name string) { + readerRegisterLock.Lock() + delete(readerRegister, name) + readerRegisterLock.Unlock() +} + +func deferredClose(err *error, closer io.Closer) { + closeErr := closer.Close() + if *err == nil { + *err = closeErr + } +} + +func (mc *mysqlConn) handleInFileRequest(name string) (err error) { + var rdr io.Reader + var data []byte + packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP + if mc.maxWriteSize < packetSize { + packetSize = mc.maxWriteSize + } + + if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader + // The server might return an an absolute path. See issue #355. + name = name[idx+8:] + + readerRegisterLock.RLock() + handler, inMap := readerRegister[name] + readerRegisterLock.RUnlock() + + if inMap { + rdr = handler() + if rdr != nil { + if cl, ok := rdr.(io.Closer); ok { + defer deferredClose(&err, cl) + } + } else { + err = fmt.Errorf("Reader '%s' is ", name) + } + } else { + err = fmt.Errorf("Reader '%s' is not registered", name) + } + } else { // File + name = strings.Trim(name, `"`) + fileRegisterLock.RLock() + fr := fileRegister[name] + fileRegisterLock.RUnlock() + if mc.cfg.AllowAllFiles || fr { + var file *os.File + var fi os.FileInfo + + if file, err = os.Open(name); err == nil { + defer deferredClose(&err, file) + + // get file size + if fi, err = file.Stat(); err == nil { + rdr = file + if fileSize := int(fi.Size()); fileSize < packetSize { + packetSize = fileSize + } + } + } + } else { + err = fmt.Errorf("local file '%s' is not registered", name) + } + } + + // send content packets + if err == nil { + data := make([]byte, 4+packetSize) + var n int + for err == nil { + n, err = rdr.Read(data[4:]) + if n > 0 { + if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { + return ioErr + } + } + } + if err == io.EOF { + err = nil + } + } + + // send empty packet (termination) + if data == nil { + data = make([]byte, 4) + } + if ioErr := mc.writePacket(data[:4]); ioErr != nil { + return ioErr + } + + // read OK packet + if err == nil { + _, err = mc.readResultOK() + return err + } + + mc.readPacket() + return err +} diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go new file mode 100644 index 0000000..aafe979 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -0,0 +1,1287 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "time" +) + +// Packets documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +// Read packet to buffer 'data' +func (mc *mysqlConn) readPacket() ([]byte, error) { + var prevData []byte + for { + // read packet header + data, err := mc.buf.readNext(4) + if err != nil { + errLog.Print(err) + mc.Close() + return nil, driver.ErrBadConn + } + + // packet length [24 bit] + pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) + + // check packet sync [8 bit] + if data[3] != mc.sequence { + if data[3] > mc.sequence { + return nil, ErrPktSyncMul + } + return nil, ErrPktSync + } + mc.sequence++ + + // packets with length 0 terminate a previous packet which is a + // multiple of (2^24)−1 bytes long + if pktLen == 0 { + // there was no previous packet + if prevData == nil { + errLog.Print(ErrMalformPkt) + mc.Close() + return nil, driver.ErrBadConn + } + + return prevData, nil + } + + // read packet body [pktLen bytes] + data, err = mc.buf.readNext(pktLen) + if err != nil { + errLog.Print(err) + mc.Close() + return nil, driver.ErrBadConn + } + + // return data if this was the last packet + if pktLen < maxPacketSize { + // zero allocations for non-split packets + if prevData == nil { + return data, nil + } + + return append(prevData, data...), nil + } + + prevData = append(prevData, data...) + } +} + +// Write packet buffer 'data' +func (mc *mysqlConn) writePacket(data []byte) error { + pktLen := len(data) - 4 + + if pktLen > mc.maxAllowedPacket { + return ErrPktTooLarge + } + + for { + var size int + if pktLen >= maxPacketSize { + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + size = maxPacketSize + } else { + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + size = pktLen + } + data[3] = mc.sequence + + // Write packet + if mc.writeTimeout > 0 { + if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil { + return err + } + } + + n, err := mc.netConn.Write(data[:4+size]) + if err == nil && n == 4+size { + mc.sequence++ + if size != maxPacketSize { + return nil + } + pktLen -= size + data = data[size:] + continue + } + + // Handle error + if err == nil { // n != len(data) + errLog.Print(ErrMalformPkt) + } else { + errLog.Print(err) + } + return driver.ErrBadConn + } +} + +/****************************************************************************** +* Initialisation Process * +******************************************************************************/ + +// Handshake Initialization Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake +func (mc *mysqlConn) readInitPacket() ([]byte, error) { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + if data[0] == iERR { + return nil, mc.handleErrorPacket(data) + } + + // protocol version [1 byte] + if data[0] < minProtocolVersion { + return nil, fmt.Errorf( + "unsupported protocol version %d. Version %d or higher is required", + data[0], + minProtocolVersion, + ) + } + + // server version [null terminated string] + // connection id [4 bytes] + pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 + + // first part of the password cipher [8 bytes] + cipher := data[pos : pos+8] + + // (filler) always 0x00 [1 byte] + pos += 8 + 1 + + // capability flags (lower 2 bytes) [2 bytes] + mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + if mc.flags&clientProtocol41 == 0 { + return nil, ErrOldProtocol + } + if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { + return nil, ErrNoTLS + } + pos += 2 + + if len(data) > pos { + // character set [1 byte] + // status flags [2 bytes] + // capability flags (upper 2 bytes) [2 bytes] + // length of auth-plugin-data [1 byte] + // reserved (all [00]) [10 bytes] + pos += 1 + 2 + 2 + 1 + 10 + + // second part of the password cipher [mininum 13 bytes], + // where len=MAX(13, length of auth-plugin-data - 8) + // + // The web documentation is ambiguous about the length. However, + // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, + // the 13th byte is "\0 byte, terminating the second part of + // a scramble". So the second part of the password cipher is + // a NULL terminated string that's at least 13 bytes with the + // last byte being NULL. + // + // The official Python library uses the fixed length 12 + // which seems to work but technically could have a hidden bug. + cipher = append(cipher, data[pos:pos+12]...) + + // TODO: Verify string termination + // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) + // \NUL otherwise + // + //if data[len(data)-1] == 0 { + // return + //} + //return ErrMalformPkt + + // make a memory safe copy of the cipher slice + var b [20]byte + copy(b[:], cipher) + return b[:], nil + } + + // make a memory safe copy of the cipher slice + var b [8]byte + copy(b[:], cipher) + return b[:], nil +} + +// Client Authentication Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse +func (mc *mysqlConn) writeAuthPacket(cipher []byte) error { + // Adjust client flags based on server support + clientFlags := clientProtocol41 | + clientSecureConn | + clientLongPassword | + clientTransactions | + clientLocalFiles | + clientPluginAuth | + clientMultiResults | + mc.flags&clientLongFlag + + if mc.cfg.ClientFoundRows { + clientFlags |= clientFoundRows + } + + // To enable TLS / SSL + if mc.cfg.tls != nil { + clientFlags |= clientSSL + } + + if mc.cfg.MultiStatements { + clientFlags |= clientMultiStatements + } + + // User Password + scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd)) + + pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + 1 + len(scrambleBuff) + 21 + 1 + + // To specify a db name + if n := len(mc.cfg.DBName); n > 0 { + clientFlags |= clientConnectWithDB + pktLen += n + 1 + } + + // Calculate packet length and get buffer with that size + data := mc.buf.takeSmallBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // ClientFlags [32 bit] + data[4] = byte(clientFlags) + data[5] = byte(clientFlags >> 8) + data[6] = byte(clientFlags >> 16) + data[7] = byte(clientFlags >> 24) + + // MaxPacketSize [32 bit] (none) + data[8] = 0x00 + data[9] = 0x00 + data[10] = 0x00 + data[11] = 0x00 + + // Charset [1 byte] + var found bool + data[12], found = collations[mc.cfg.Collation] + if !found { + // Note possibility for false negatives: + // could be triggered although the collation is valid if the + // collations map does not contain entries the server supports. + return errors.New("unknown collation") + } + + // SSL Connection Request Packet + // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest + if mc.cfg.tls != nil { + // Send TLS / SSL request packet + if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { + return err + } + + // Switch to TLS + tlsConn := tls.Client(mc.netConn, mc.cfg.tls) + if err := tlsConn.Handshake(); err != nil { + return err + } + mc.netConn = tlsConn + mc.buf.nc = tlsConn + } + + // Filler [23 bytes] (all 0x00) + pos := 13 + for ; pos < 13+23; pos++ { + data[pos] = 0 + } + + // User [null terminated string] + if len(mc.cfg.User) > 0 { + pos += copy(data[pos:], mc.cfg.User) + } + data[pos] = 0x00 + pos++ + + // ScrambleBuffer [length encoded integer] + data[pos] = byte(len(scrambleBuff)) + pos += 1 + copy(data[pos+1:], scrambleBuff) + + // Databasename [null terminated string] + if len(mc.cfg.DBName) > 0 { + pos += copy(data[pos:], mc.cfg.DBName) + data[pos] = 0x00 + pos++ + } + + // Assume native client during response + pos += copy(data[pos:], "mysql_native_password") + data[pos] = 0x00 + + // Send Auth packet + return mc.writePacket(data) +} + +// Client old authentication packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error { + // User password + scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.Passwd)) + + // Calculate the packet length and add a tailing 0 + pktLen := len(scrambleBuff) + 1 + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add the scrambled password [null terminated string] + copy(data[4:], scrambleBuff) + data[4+pktLen-1] = 0x00 + + return mc.writePacket(data) +} + +// Client clear text authentication packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeClearAuthPacket() error { + // Calculate the packet length and add a tailing 0 + pktLen := len(mc.cfg.Passwd) + 1 + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add the clear password [null terminated string] + copy(data[4:], mc.cfg.Passwd) + data[4+pktLen-1] = 0x00 + + return mc.writePacket(data) +} + +// Native password authentication method +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error { + scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd)) + + // Calculate the packet length and add a tailing 0 + pktLen := len(scrambleBuff) + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add the scramble + copy(data[4:], scrambleBuff) + + return mc.writePacket(data) +} + +/****************************************************************************** +* Command Packets * +******************************************************************************/ + +func (mc *mysqlConn) writeCommandPacket(command byte) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { + // Reset Packet Sequence + mc.sequence = 0 + + pktLen := 1 + len(arg) + data := mc.buf.takeBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Add arg + copy(data[5:], arg) + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1 + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Add arg [32 bit] + data[5] = byte(arg) + data[6] = byte(arg >> 8) + data[7] = byte(arg >> 16) + data[8] = byte(arg >> 24) + + // Send CMD packet + return mc.writePacket(data) +} + +/****************************************************************************** +* Result Packets * +******************************************************************************/ + +// Returns error if Packet is not an 'Result OK'-Packet +func (mc *mysqlConn) readResultOK() ([]byte, error) { + data, err := mc.readPacket() + if err == nil { + // packet indicator + switch data[0] { + + case iOK: + return nil, mc.handleOkPacket(data) + + case iEOF: + if len(data) > 1 { + pluginEndIndex := bytes.IndexByte(data, 0x00) + plugin := string(data[1:pluginEndIndex]) + cipher := data[pluginEndIndex+1 : len(data)-1] + + if plugin == "mysql_old_password" { + // using old_passwords + return cipher, ErrOldPassword + } else if plugin == "mysql_clear_password" { + // using clear text password + return cipher, ErrCleartextPassword + } else if plugin == "mysql_native_password" { + // using mysql default authentication method + return cipher, ErrNativePassword + } else { + return cipher, ErrUnknownPlugin + } + } else { + // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest + return nil, ErrOldPassword + } + + default: // Error otherwise + return nil, mc.handleErrorPacket(data) + } + } + return nil, err +} + +// Result Set Header Packet +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { + data, err := mc.readPacket() + if err == nil { + switch data[0] { + + case iOK: + return 0, mc.handleOkPacket(data) + + case iERR: + return 0, mc.handleErrorPacket(data) + + case iLocalInFile: + return 0, mc.handleInFileRequest(string(data[1:])) + } + + // column count + num, _, n := readLengthEncodedInteger(data) + if n-len(data) == 0 { + return int(num), nil + } + + return 0, ErrMalformPkt + } + return 0, err +} + +// Error Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet +func (mc *mysqlConn) handleErrorPacket(data []byte) error { + if data[0] != iERR { + return ErrMalformPkt + } + + // 0xff [1 byte] + + // Error Number [16 bit uint] + errno := binary.LittleEndian.Uint16(data[1:3]) + + pos := 3 + + // SQL State [optional: # + 5bytes string] + if data[3] == 0x23 { + //sqlstate := string(data[4 : 4+5]) + pos = 9 + } + + // Error Message [string] + return &MySQLError{ + Number: errno, + Message: string(data[pos:]), + } +} + +func readStatus(b []byte) statusFlag { + return statusFlag(b[0]) | statusFlag(b[1])<<8 +} + +// Ok Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet +func (mc *mysqlConn) handleOkPacket(data []byte) error { + var n, m int + + // 0x00 [1 byte] + + // Affected rows [Length Coded Binary] + mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) + + // Insert id [Length Coded Binary] + mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) + + // server_status [2 bytes] + mc.status = readStatus(data[1+n+m : 1+n+m+2]) + if err := mc.discardResults(); err != nil { + return err + } + + // warning count [2 bytes] + if !mc.strict { + return nil + } + + pos := 1 + n + m + 2 + if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 { + return mc.getWarnings() + } + return nil +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 +func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { + columns := make([]mysqlField, count) + + for i := 0; ; i++ { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + // EOF Packet + if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { + if i == count { + return columns, nil + } + return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns)) + } + + // Catalog + pos, err := skipLengthEncodedString(data) + if err != nil { + return nil, err + } + + // Database [len coded string] + n, err := skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Table [len coded string] + if mc.cfg.ColumnsWithAlias { + tableName, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + columns[i].tableName = string(tableName) + } else { + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + } + + // Original table [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Name [len coded string] + name, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + columns[i].name = string(name) + pos += n + + // Original name [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + + // Filler [uint8] + // Charset [charset, collation uint8] + // Length [uint32] + pos += n + 1 + 2 + 4 + + // Field type [uint8] + columns[i].fieldType = data[pos] + pos++ + + // Flags [uint16] + columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + pos += 2 + + // Decimals [uint8] + columns[i].decimals = data[pos] + //pos++ + + // Default value [len coded binary] + //if pos < len(data) { + // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) + //} + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow +func (rows *textRows) readRow(dest []driver.Value) error { + mc := rows.mc + + data, err := mc.readPacket() + if err != nil { + return err + } + + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + // server_status [2 bytes] + rows.mc.status = readStatus(data[3:]) + err = rows.mc.discardResults() + if err == nil { + err = io.EOF + } else { + // connection unusable + rows.mc.Close() + } + rows.mc = nil + return err + } + if data[0] == iERR { + rows.mc = nil + return mc.handleErrorPacket(data) + } + + // RowSet Packet + var n int + var isNull bool + pos := 0 + + for i := range dest { + // Read bytes and convert to string + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + if !mc.parseTime { + continue + } else { + switch rows.columns[i].fieldType { + case fieldTypeTimestamp, fieldTypeDateTime, + fieldTypeDate, fieldTypeNewDate: + dest[i], err = parseDateTime( + string(dest[i].([]byte)), + mc.cfg.Loc, + ) + if err == nil { + continue + } + default: + continue + } + } + + } else { + dest[i] = nil + continue + } + } + return err // err != nil + } + + return nil +} + +// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read +func (mc *mysqlConn) readUntilEOF() error { + for { + data, err := mc.readPacket() + if err != nil { + return err + } + + switch data[0] { + case iERR: + return mc.handleErrorPacket(data) + case iEOF: + if len(data) == 5 { + mc.status = readStatus(data[3:]) + } + return nil + } + } +} + +/****************************************************************************** +* Prepared Statements * +******************************************************************************/ + +// Prepare Result Packets +// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html +func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { + data, err := stmt.mc.readPacket() + if err == nil { + // packet indicator [1 byte] + if data[0] != iOK { + return 0, stmt.mc.handleErrorPacket(data) + } + + // statement id [4 bytes] + stmt.id = binary.LittleEndian.Uint32(data[1:5]) + + // Column count [16 bit uint] + columnCount := binary.LittleEndian.Uint16(data[5:7]) + + // Param count [16 bit uint] + stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) + + // Reserved [8 bit] + + // Warning count [16 bit uint] + if !stmt.mc.strict { + return columnCount, nil + } + + // Check for warnings count > 0, only available in MySQL > 4.1 + if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 { + return columnCount, stmt.mc.getWarnings() + } + return columnCount, nil + } + return 0, err +} + +// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html +func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { + maxLen := stmt.mc.maxAllowedPacket - 1 + pktLen := maxLen + + // After the header (bytes 0-3) follows before the data: + // 1 byte command + // 4 bytes stmtID + // 2 bytes paramID + const dataOffset = 1 + 4 + 2 + + // Can not use the write buffer since + // a) the buffer is too small + // b) it is in use + data := make([]byte, 4+1+4+2+len(arg)) + + copy(data[4+dataOffset:], arg) + + for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { + if dataOffset+argLen < maxLen { + pktLen = dataOffset + argLen + } + + stmt.mc.sequence = 0 + // Add command byte [1 byte] + data[4] = comStmtSendLongData + + // Add stmtID [32 bit] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // Add paramID [16 bit] + data[9] = byte(paramID) + data[10] = byte(paramID >> 8) + + // Send CMD packet + err := stmt.mc.writePacket(data[:4+pktLen]) + if err == nil { + data = data[pktLen-dataOffset:] + continue + } + return err + + } + + // Reset Packet Sequence + stmt.mc.sequence = 0 + return nil +} + +// Execute Prepared Statement +// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html +func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { + if len(args) != stmt.paramCount { + return fmt.Errorf( + "argument count mismatch (got: %d; has: %d)", + len(args), + stmt.paramCount, + ) + } + + const minPktLen = 4 + 1 + 4 + 1 + 4 + mc := stmt.mc + + // Reset packet-sequence + mc.sequence = 0 + + var data []byte + + if len(args) == 0 { + data = mc.buf.takeBuffer(minPktLen) + } else { + data = mc.buf.takeCompleteBuffer() + } + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // command [1 byte] + data[4] = comStmtExecute + + // statement_id [4 bytes] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] + data[9] = 0x00 + + // iteration_count (uint32(1)) [4 bytes] + data[10] = 0x01 + data[11] = 0x00 + data[12] = 0x00 + data[13] = 0x00 + + if len(args) > 0 { + pos := minPktLen + + var nullMask []byte + if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) { + // buffer has to be extended but we don't know by how much so + // we depend on append after all data with known sizes fit. + // We stop at that because we deal with a lot of columns here + // which makes the required allocation size hard to guess. + tmp := make([]byte, pos+maskLen+typesLen) + copy(tmp[:pos], data[:pos]) + data = tmp + nullMask = data[pos : pos+maskLen] + pos += maskLen + } else { + nullMask = data[pos : pos+maskLen] + for i := 0; i < maskLen; i++ { + nullMask[i] = 0 + } + pos += maskLen + } + + // newParameterBoundFlag 1 [1 byte] + data[pos] = 0x01 + pos++ + + // type of each parameter [len(args)*2 bytes] + paramTypes := data[pos:] + pos += len(args) * 2 + + // value of each parameter [n bytes] + paramValues := data[pos:pos] + valuesCap := cap(paramValues) + + for i, arg := range args { + // build NULL-bitmap + if arg == nil { + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i+1] = 0x00 + continue + } + + // cache types and values + switch v := arg.(type) { + case int64: + paramTypes[i+i] = fieldTypeLongLong + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + + case float64: + paramTypes[i+i] = fieldTypeDouble + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + math.Float64bits(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(math.Float64bits(v))..., + ) + } + + case bool: + paramTypes[i+i] = fieldTypeTiny + paramTypes[i+i+1] = 0x00 + + if v { + paramValues = append(paramValues, 0x01) + } else { + paramValues = append(paramValues, 0x00) + } + + case []byte: + // Common case (non-nil value) first + if v != nil { + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, v); err != nil { + return err + } + } + continue + } + + // Handle []byte(nil) as a NULL value + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i+1] = 0x00 + + case string: + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { + return err + } + } + + case time.Time: + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + var val []byte + if v.IsZero() { + val = []byte("0000-00-00") + } else { + val = []byte(v.In(mc.cfg.Loc).Format(timeFormat)) + } + + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(val)), + ) + paramValues = append(paramValues, val...) + + default: + return fmt.Errorf("can not convert type: %T", arg) + } + } + + // Check if param values exceeded the available buffer + // In that case we must build the data packet with the new values buffer + if valuesCap != cap(paramValues) { + data = append(data[:pos], paramValues...) + mc.buf.buf = data + } + + pos += len(paramValues) + data = data[:pos] + } + + return mc.writePacket(data) +} + +func (mc *mysqlConn) discardResults() error { + for mc.status&statusMoreResultsExists != 0 { + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return err + } + if resLen > 0 { + // columns + if err := mc.readUntilEOF(); err != nil { + return err + } + // rows + if err := mc.readUntilEOF(); err != nil { + return err + } + } else { + mc.status &^= statusMoreResultsExists + } + } + return nil +} + +// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html +func (rows *binaryRows) readRow(dest []driver.Value) error { + data, err := rows.mc.readPacket() + if err != nil { + return err + } + + // packet indicator [1 byte] + if data[0] != iOK { + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + rows.mc.status = readStatus(data[3:]) + err = rows.mc.discardResults() + if err == nil { + err = io.EOF + } else { + // connection unusable + rows.mc.Close() + } + rows.mc = nil + return err + } + rows.mc = nil + + // Error otherwise + return rows.mc.handleErrorPacket(data) + } + + // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] + pos := 1 + (len(dest)+7+2)>>3 + nullMask := data[1:pos] + + for i := range dest { + // Field is NULL + // (byte >> bit-pos) % 2 == 1 + if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { + dest[i] = nil + continue + } + + // Convert to byte-coded string + switch rows.columns[i].fieldType { + case fieldTypeNULL: + dest[i] = nil + continue + + // Numeric Types + case fieldTypeTiny: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(data[pos]) + } else { + dest[i] = int64(int8(data[pos])) + } + pos++ + continue + + case fieldTypeShort, fieldTypeYear: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) + } else { + dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) + } + pos += 2 + continue + + case fieldTypeInt24, fieldTypeLong: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) + } else { + dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) + } + pos += 4 + continue + + case fieldTypeLongLong: + if rows.columns[i].flags&flagUnsigned != 0 { + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if val > math.MaxInt64 { + dest[i] = uint64ToString(val) + } else { + dest[i] = int64(val) + } + } else { + dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + } + pos += 8 + continue + + case fieldTypeFloat: + dest[i] = float32(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))) + pos += 4 + continue + + case fieldTypeDouble: + dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) + pos += 8 + continue + + // Length coded Binary Strings + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON: + var isNull bool + var n int + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + continue + } else { + dest[i] = nil + continue + } + } + return err + + case + fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD + fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] + fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] + + num, isNull, n := readLengthEncodedInteger(data[pos:]) + pos += n + + switch { + case isNull: + dest[i] = nil + continue + case rows.columns[i].fieldType == fieldTypeTime: + // database/sql does not support an equivalent to TIME, return a string + var dstlen uint8 + switch decimals := rows.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 8 + case 1, 2, 3, 4, 5, 6: + dstlen = 8 + 1 + decimals + default: + return fmt.Errorf( + "protocol error, illegal decimals value %d", + rows.columns[i].decimals, + ) + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true) + case rows.mc.parseTime: + dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc) + default: + var dstlen uint8 + if rows.columns[i].fieldType == fieldTypeDate { + dstlen = 10 + } else { + switch decimals := rows.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 19 + case 1, 2, 3, 4, 5, 6: + dstlen = 19 + 1 + decimals + default: + return fmt.Errorf( + "protocol error, illegal decimals value %d", + rows.columns[i].decimals, + ) + } + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false) + } + + if err == nil { + pos += int(num) + continue + } else { + return err + } + + // Please report if this happens! + default: + return fmt.Errorf("unknown field type %d", rows.columns[i].fieldType) + } + } + + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/packets_test.go b/vendor/github.com/go-sql-driver/mysql/packets_test.go new file mode 100644 index 0000000..9840458 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/packets_test.go @@ -0,0 +1,282 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "errors" + "net" + "testing" + "time" +) + +var ( + errConnClosed = errors.New("connection is closed") + errConnTooManyReads = errors.New("too many reads") + errConnTooManyWrites = errors.New("too many writes") +) + +// struct to mock a net.Conn for testing purposes +type mockConn struct { + laddr net.Addr + raddr net.Addr + data []byte + closed bool + read int + written int + reads int + writes int + maxReads int + maxWrites int +} + +func (m *mockConn) Read(b []byte) (n int, err error) { + if m.closed { + return 0, errConnClosed + } + + m.reads++ + if m.maxReads > 0 && m.reads > m.maxReads { + return 0, errConnTooManyReads + } + + n = copy(b, m.data) + m.read += n + m.data = m.data[n:] + return +} +func (m *mockConn) Write(b []byte) (n int, err error) { + if m.closed { + return 0, errConnClosed + } + + m.writes++ + if m.maxWrites > 0 && m.writes > m.maxWrites { + return 0, errConnTooManyWrites + } + + n = len(b) + m.written += n + return +} +func (m *mockConn) Close() error { + m.closed = true + return nil +} +func (m *mockConn) LocalAddr() net.Addr { + return m.laddr +} +func (m *mockConn) RemoteAddr() net.Addr { + return m.raddr +} +func (m *mockConn) SetDeadline(t time.Time) error { + return nil +} +func (m *mockConn) SetReadDeadline(t time.Time) error { + return nil +} +func (m *mockConn) SetWriteDeadline(t time.Time) error { + return nil +} + +// make sure mockConn implements the net.Conn interface +var _ net.Conn = new(mockConn) + +func TestReadPacketSingleByte(t *testing.T) { + conn := new(mockConn) + mc := &mysqlConn{ + buf: newBuffer(conn), + } + + conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff} + conn.maxReads = 1 + packet, err := mc.readPacket() + if err != nil { + t.Fatal(err) + } + if len(packet) != 1 { + t.Fatalf("unexpected packet lenght: expected %d, got %d", 1, len(packet)) + } + if packet[0] != 0xff { + t.Fatalf("unexpected packet content: expected %x, got %x", 0xff, packet[0]) + } +} + +func TestReadPacketWrongSequenceID(t *testing.T) { + conn := new(mockConn) + mc := &mysqlConn{ + buf: newBuffer(conn), + } + + // too low sequence id + conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff} + conn.maxReads = 1 + mc.sequence = 1 + _, err := mc.readPacket() + if err != ErrPktSync { + t.Errorf("expected ErrPktSync, got %v", err) + } + + // reset + conn.reads = 0 + mc.sequence = 0 + mc.buf = newBuffer(conn) + + // too high sequence id + conn.data = []byte{0x01, 0x00, 0x00, 0x42, 0xff} + _, err = mc.readPacket() + if err != ErrPktSyncMul { + t.Errorf("expected ErrPktSyncMul, got %v", err) + } +} + +func TestReadPacketSplit(t *testing.T) { + conn := new(mockConn) + mc := &mysqlConn{ + buf: newBuffer(conn), + } + + data := make([]byte, maxPacketSize*2+4*3) + const pkt2ofs = maxPacketSize + 4 + const pkt3ofs = 2 * (maxPacketSize + 4) + + // case 1: payload has length maxPacketSize + data = data[:pkt2ofs+4] + + // 1st packet has maxPacketSize length and sequence id 0 + // ff ff ff 00 ... + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + + // mark the payload start and end of 1st packet so that we can check if the + // content was correctly appended + data[4] = 0x11 + data[maxPacketSize+3] = 0x22 + + // 2nd packet has payload length 0 and squence id 1 + // 00 00 00 01 + data[pkt2ofs+3] = 0x01 + + conn.data = data + conn.maxReads = 3 + packet, err := mc.readPacket() + if err != nil { + t.Fatal(err) + } + if len(packet) != maxPacketSize { + t.Fatalf("unexpected packet lenght: expected %d, got %d", maxPacketSize, len(packet)) + } + if packet[0] != 0x11 { + t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0]) + } + if packet[maxPacketSize-1] != 0x22 { + t.Fatalf("unexpected payload end: expected %x, got %x", 0x22, packet[maxPacketSize-1]) + } + + // case 2: payload has length which is a multiple of maxPacketSize + data = data[:cap(data)] + + // 2nd packet now has maxPacketSize length + data[pkt2ofs] = 0xff + data[pkt2ofs+1] = 0xff + data[pkt2ofs+2] = 0xff + + // mark the payload start and end of the 2nd packet + data[pkt2ofs+4] = 0x33 + data[pkt2ofs+maxPacketSize+3] = 0x44 + + // 3rd packet has payload length 0 and squence id 2 + // 00 00 00 02 + data[pkt3ofs+3] = 0x02 + + conn.data = data + conn.reads = 0 + conn.maxReads = 5 + mc.sequence = 0 + packet, err = mc.readPacket() + if err != nil { + t.Fatal(err) + } + if len(packet) != 2*maxPacketSize { + t.Fatalf("unexpected packet lenght: expected %d, got %d", 2*maxPacketSize, len(packet)) + } + if packet[0] != 0x11 { + t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0]) + } + if packet[2*maxPacketSize-1] != 0x44 { + t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[2*maxPacketSize-1]) + } + + // case 3: payload has a length larger maxPacketSize, which is not an exact + // multiple of it + data = data[:pkt2ofs+4+42] + data[pkt2ofs] = 0x2a + data[pkt2ofs+1] = 0x00 + data[pkt2ofs+2] = 0x00 + data[pkt2ofs+4+41] = 0x44 + + conn.data = data + conn.reads = 0 + conn.maxReads = 4 + mc.sequence = 0 + packet, err = mc.readPacket() + if err != nil { + t.Fatal(err) + } + if len(packet) != maxPacketSize+42 { + t.Fatalf("unexpected packet lenght: expected %d, got %d", maxPacketSize+42, len(packet)) + } + if packet[0] != 0x11 { + t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0]) + } + if packet[maxPacketSize+41] != 0x44 { + t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[maxPacketSize+41]) + } +} + +func TestReadPacketFail(t *testing.T) { + conn := new(mockConn) + mc := &mysqlConn{ + buf: newBuffer(conn), + } + + // illegal empty (stand-alone) packet + conn.data = []byte{0x00, 0x00, 0x00, 0x00} + conn.maxReads = 1 + _, err := mc.readPacket() + if err != driver.ErrBadConn { + t.Errorf("expected ErrBadConn, got %v", err) + } + + // reset + conn.reads = 0 + mc.sequence = 0 + mc.buf = newBuffer(conn) + + // fail to read header + conn.closed = true + _, err = mc.readPacket() + if err != driver.ErrBadConn { + t.Errorf("expected ErrBadConn, got %v", err) + } + + // reset + conn.closed = false + conn.reads = 0 + mc.sequence = 0 + mc.buf = newBuffer(conn) + + // fail to read body + conn.maxReads = 1 + _, err = mc.readPacket() + if err != driver.ErrBadConn { + t.Errorf("expected ErrBadConn, got %v", err) + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go new file mode 100644 index 0000000..c6438d0 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/result.go @@ -0,0 +1,22 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlResult struct { + affectedRows int64 + insertId int64 +} + +func (res *mysqlResult) LastInsertId() (int64, error) { + return res.insertId, nil +} + +func (res *mysqlResult) RowsAffected() (int64, error) { + return res.affectedRows, nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go new file mode 100644 index 0000000..c08255e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/rows.go @@ -0,0 +1,112 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "io" +) + +type mysqlField struct { + tableName string + name string + flags fieldFlag + fieldType byte + decimals byte +} + +type mysqlRows struct { + mc *mysqlConn + columns []mysqlField +} + +type binaryRows struct { + mysqlRows +} + +type textRows struct { + mysqlRows +} + +type emptyRows struct{} + +func (rows *mysqlRows) Columns() []string { + columns := make([]string, len(rows.columns)) + if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias { + for i := range columns { + if tableName := rows.columns[i].tableName; len(tableName) > 0 { + columns[i] = tableName + "." + rows.columns[i].name + } else { + columns[i] = rows.columns[i].name + } + } + } else { + for i := range columns { + columns[i] = rows.columns[i].name + } + } + return columns +} + +func (rows *mysqlRows) Close() error { + mc := rows.mc + if mc == nil { + return nil + } + if mc.netConn == nil { + return ErrInvalidConn + } + + // Remove unread packets from stream + err := mc.readUntilEOF() + if err == nil { + if err = mc.discardResults(); err != nil { + return err + } + } + + rows.mc = nil + return err +} + +func (rows *binaryRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if mc.netConn == nil { + return ErrInvalidConn + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows *textRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if mc.netConn == nil { + return ErrInvalidConn + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows emptyRows) Columns() []string { + return nil +} + +func (rows emptyRows) Close() error { + return nil +} + +func (rows emptyRows) Next(dest []driver.Value) error { + return io.EOF +} diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go new file mode 100644 index 0000000..7f9b045 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/statement.go @@ -0,0 +1,153 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "fmt" + "reflect" + "strconv" +) + +type mysqlStmt struct { + mc *mysqlConn + id uint32 + paramCount int + columns []mysqlField // cached from the first query +} + +func (stmt *mysqlStmt) Close() error { + if stmt.mc == nil || stmt.mc.netConn == nil { + // driver.Stmt.Close can be called more than once, thus this function + // has to be idempotent. + // See also Issue #450 and golang/go#16019. + //errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) + stmt.mc = nil + return err +} + +func (stmt *mysqlStmt) NumInput() int { + return stmt.paramCount +} + +func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { + return converter{} +} + +func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { + if stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, err + } + + mc := stmt.mc + + mc.affectedRows = 0 + mc.insertId = 0 + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + if resLen > 0 { + // Columns + err = mc.readUntilEOF() + if err != nil { + return nil, err + } + + // Rows + err = mc.readUntilEOF() + } + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, nil + } + } + + return nil, err +} + +func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { + if stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, err + } + + mc := stmt.mc + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + rows := new(binaryRows) + + if resLen > 0 { + rows.mc = mc + // Columns + // If not cached, read them and cache them + if stmt.columns == nil { + rows.columns, err = mc.readColumns(resLen) + stmt.columns = rows.columns + } else { + rows.columns = stmt.columns + err = mc.readUntilEOF() + } + } + + return rows, err +} + +type converter struct{} + +func (c converter) ConvertValue(v interface{}) (driver.Value, error) { + if driver.IsValue(v) { + return v, nil + } + + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Ptr: + // indirect pointers + if rv.IsNil() { + return nil, nil + } + return c.ConvertValue(rv.Elem().Interface()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(rv.Uint()), nil + case reflect.Uint64: + u64 := rv.Uint() + if u64 >= 1<<63 { + return strconv.FormatUint(u64, 10), nil + } + return int64(u64), nil + case reflect.Float32, reflect.Float64: + return rv.Float(), nil + } + return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) +} diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go new file mode 100644 index 0000000..33c749b --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/transaction.go @@ -0,0 +1,31 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlTx struct { + mc *mysqlConn +} + +func (tx *mysqlTx) Commit() (err error) { + if tx.mc == nil || tx.mc.netConn == nil { + return ErrInvalidConn + } + err = tx.mc.exec("COMMIT") + tx.mc = nil + return +} + +func (tx *mysqlTx) Rollback() (err error) { + if tx.mc == nil || tx.mc.netConn == nil { + return ErrInvalidConn + } + err = tx.mc.exec("ROLLBACK") + tx.mc = nil + return +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go new file mode 100644 index 0000000..d523b7f --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils.go @@ -0,0 +1,740 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/sha1" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "fmt" + "io" + "strings" + "time" +) + +var ( + tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs +) + +// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. +// Use the key as a value in the DSN where tls=value. +// +// rootCertPool := x509.NewCertPool() +// pem, err := ioutil.ReadFile("/path/ca-cert.pem") +// if err != nil { +// log.Fatal(err) +// } +// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { +// log.Fatal("Failed to append PEM.") +// } +// clientCert := make([]tls.Certificate, 0, 1) +// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") +// if err != nil { +// log.Fatal(err) +// } +// clientCert = append(clientCert, certs) +// mysql.RegisterTLSConfig("custom", &tls.Config{ +// RootCAs: rootCertPool, +// Certificates: clientCert, +// }) +// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") +// +func RegisterTLSConfig(key string, config *tls.Config) error { + if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { + return fmt.Errorf("key '%s' is reserved", key) + } + + if tlsConfigRegister == nil { + tlsConfigRegister = make(map[string]*tls.Config) + } + + tlsConfigRegister[key] = config + return nil +} + +// DeregisterTLSConfig removes the tls.Config associated with key. +func DeregisterTLSConfig(key string) { + if tlsConfigRegister != nil { + delete(tlsConfigRegister, key) + } +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} + +/****************************************************************************** +* Authentication * +******************************************************************************/ + +// Encrypt password using 4.1+ method +func scramblePassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA1(password) + crypt := sha1.New() + crypt.Write(password) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) + // inner Hash + crypt.Reset() + crypt.Write(stage1) + hash := crypt.Sum(nil) + + // outer Hash + crypt.Reset() + crypt.Write(scramble) + crypt.Write(hash) + scramble = crypt.Sum(nil) + + // token = scrambleHash XOR stage1Hash + for i := range scramble { + scramble[i] ^= stage1[i] + } + return scramble +} + +// Encrypt password using pre 4.1 (old password) method +// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c +type myRnd struct { + seed1, seed2 uint32 +} + +const myRndMaxVal = 0x3FFFFFFF + +// Pseudo random number generator +func newMyRnd(seed1, seed2 uint32) *myRnd { + return &myRnd{ + seed1: seed1 % myRndMaxVal, + seed2: seed2 % myRndMaxVal, + } +} + +// Tested to be equivalent to MariaDB's floating point variant +// http://play.golang.org/p/QHvhd4qved +// http://play.golang.org/p/RG0q4ElWDx +func (r *myRnd) NextByte() byte { + r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal + r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal + + return byte(uint64(r.seed1) * 31 / myRndMaxVal) +} + +// Generate binary hash from byte string using insecure pre 4.1 method +func pwHash(password []byte) (result [2]uint32) { + var add uint32 = 7 + var tmp uint32 + + result[0] = 1345345333 + result[1] = 0x12345671 + + for _, c := range password { + // skip spaces and tabs in password + if c == ' ' || c == '\t' { + continue + } + + tmp = uint32(c) + result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) + result[1] += (result[1] << 8) ^ result[0] + add += tmp + } + + // Remove sign bit (1<<31)-1) + result[0] &= 0x7FFFFFFF + result[1] &= 0x7FFFFFFF + + return +} + +// Encrypt password using insecure pre 4.1 method +func scrambleOldPassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + scramble = scramble[:8] + + hashPw := pwHash(password) + hashSc := pwHash(scramble) + + r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) + + var out [8]byte + for i := range out { + out[i] = r.NextByte() + 64 + } + + mask := r.NextByte() + for i := range out { + out[i] ^= mask + } + + return out[:] +} + +/****************************************************************************** +* Time related utils * +******************************************************************************/ + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +// The value type must be time.Time or string / []byte (formatted time-string), +// otherwise Scan fails. +func (nt *NullTime) Scan(value interface{}) (err error) { + if value == nil { + nt.Time, nt.Valid = time.Time{}, false + return + } + + switch v := value.(type) { + case time.Time: + nt.Time, nt.Valid = v, true + return + case []byte: + nt.Time, err = parseDateTime(string(v), time.UTC) + nt.Valid = (err == nil) + return + case string: + nt.Time, err = parseDateTime(v, time.UTC) + nt.Valid = (err == nil) + return + } + + nt.Valid = false + return fmt.Errorf("Can't convert %T to time.Time", value) +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { + base := "0000-00-00 00:00:00.0000000" + switch len(str) { + case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" + if str == base[:len(str)] { + return + } + t, err = time.Parse(timeFormat[:len(str)], str) + default: + err = fmt.Errorf("invalid time string: %s", str) + return + } + + // Adjust location + if err == nil && loc != time.UTC { + y, mo, d := t.Date() + h, mi, s := t.Clock() + t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil + } + + return +} + +func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { + switch num { + case 0: + return time.Time{}, nil + case 4: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + 0, 0, 0, 0, + loc, + ), nil + case 7: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + 0, + loc, + ), nil + case 11: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds + loc, + ), nil + } + return nil, fmt.Errorf("invalid DATETIME packet length %d", num) +} + +// zeroDateTime is used in formatBinaryDateTime to avoid an allocation +// if the DATE or DATETIME has the zero value. +// It must never be changed. +// The current behavior depends on database/sql copying the result. +var zeroDateTime = []byte("0000-00-00 00:00:00.000000") + +const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" + +func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) { + // length expects the deterministic length of the zero value, + // negative time and 100+ hours are automatically added if needed + if len(src) == 0 { + if justTime { + return zeroDateTime[11 : 11+length], nil + } + return zeroDateTime[:length], nil + } + var dst []byte // return value + var pt, p1, p2, p3 byte // current digit pair + var zOffs byte // offset of value in zeroDateTime + if justTime { + switch length { + case + 8, // time (can be up to 10 when negative and 100+ hours) + 10, 11, 12, 13, 14, 15: // time with fractional seconds + default: + return nil, fmt.Errorf("illegal TIME length %d", length) + } + switch len(src) { + case 8, 12: + default: + return nil, fmt.Errorf("invalid TIME packet length %d", len(src)) + } + // +2 to enable negative time and 100+ hours + dst = make([]byte, 0, length+2) + if src[0] == 1 { + dst = append(dst, '-') + } + if src[1] != 0 { + hour := uint16(src[1])*24 + uint16(src[5]) + pt = byte(hour / 100) + p1 = byte(hour - 100*uint16(pt)) + dst = append(dst, digits01[pt]) + } else { + p1 = src[5] + } + zOffs = 11 + src = src[6:] + } else { + switch length { + case 10, 19, 21, 22, 23, 24, 25, 26: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s length %d", t, length) + } + switch len(src) { + case 4, 7, 11: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s packet length %d", t, len(src)) + } + dst = make([]byte, 0, length) + // start with the date + year := binary.LittleEndian.Uint16(src[:2]) + pt = byte(year / 100) + p1 = byte(year - 100*uint16(pt)) + p2, p3 = src[2], src[3] + dst = append(dst, + digits10[pt], digits01[pt], + digits10[p1], digits01[p1], '-', + digits10[p2], digits01[p2], '-', + digits10[p3], digits01[p3], + ) + if length == 10 { + return dst, nil + } + if len(src) == 4 { + return append(dst, zeroDateTime[10:length]...), nil + } + dst = append(dst, ' ') + p1 = src[4] // hour + src = src[5:] + } + // p1 is 2-digit hour, src is after hour + p2, p3 = src[0], src[1] + dst = append(dst, + digits10[p1], digits01[p1], ':', + digits10[p2], digits01[p2], ':', + digits10[p3], digits01[p3], + ) + if length <= byte(len(dst)) { + return dst, nil + } + src = src[2:] + if len(src) == 0 { + return append(dst, zeroDateTime[19:zOffs+length]...), nil + } + microsecs := binary.LittleEndian.Uint32(src[:4]) + p1 = byte(microsecs / 10000) + microsecs -= 10000 * uint32(p1) + p2 = byte(microsecs / 100) + microsecs -= 100 * uint32(p2) + p3 = byte(microsecs) + switch decimals := zOffs + length - 20; decimals { + default: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], digits01[p3], + ), nil + case 1: + return append(dst, '.', + digits10[p1], + ), nil + case 2: + return append(dst, '.', + digits10[p1], digits01[p1], + ), nil + case 3: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], + ), nil + case 4: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + ), nil + case 5: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], + ), nil + } +} + +/****************************************************************************** +* Convert from and to bytes * +******************************************************************************/ + +func uint64ToBytes(n uint64) []byte { + return []byte{ + byte(n), + byte(n >> 8), + byte(n >> 16), + byte(n >> 24), + byte(n >> 32), + byte(n >> 40), + byte(n >> 48), + byte(n >> 56), + } +} + +func uint64ToString(n uint64) []byte { + var a [20]byte + i := 20 + + // U+0030 = 0 + // ... + // U+0039 = 9 + + var q uint64 + for n >= 10 { + i-- + q = n / 10 + a[i] = uint8(n-q*10) + 0x30 + n = q + } + + i-- + a[i] = uint8(n) + 0x30 + + return a[i:] +} + +// treats string value as unsigned integer representation +func stringToInt(b []byte) int { + val := 0 + for i := range b { + val *= 10 + val += int(b[i] - 0x30) + } + return val +} + +// returns the string read as a bytes slice, wheter the value is NULL, +// the number of bytes read and an error, in case the string is longer than +// the input slice +func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { + // Get length + num, isNull, n := readLengthEncodedInteger(b) + if num < 1 { + return b[n:n], isNull, n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return b[n-int(num) : n], false, n, nil + } + return nil, false, n, io.EOF +} + +// returns the number of bytes skipped and an error, in case the string is +// longer than the input slice +func skipLengthEncodedString(b []byte) (int, error) { + // Get length + num, _, n := readLengthEncodedInteger(b) + if num < 1 { + return n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return n, nil + } + return n, io.EOF +} + +// returns the number read, whether the value is NULL and the number of bytes read +func readLengthEncodedInteger(b []byte) (uint64, bool, int) { + // See issue #349 + if len(b) == 0 { + return 0, true, 1 + } + switch b[0] { + + // 251: NULL + case 0xfb: + return 0, true, 1 + + // 252: value of following 2 + case 0xfc: + return uint64(b[1]) | uint64(b[2])<<8, false, 3 + + // 253: value of following 3 + case 0xfd: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 + + // 254: value of following 8 + case 0xfe: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | + uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | + uint64(b[7])<<48 | uint64(b[8])<<56, + false, 9 + } + + // 0-250: value of first byte + return uint64(b[0]), false, 1 +} + +// encodes a uint64 value and appends it to the given bytes slice +func appendLengthEncodedInteger(b []byte, n uint64) []byte { + switch { + case n <= 250: + return append(b, byte(n)) + + case n <= 0xffff: + return append(b, 0xfc, byte(n), byte(n>>8)) + + case n <= 0xffffff: + return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) + } + return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), + byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) +} + +// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. +// If cap(buf) is not enough, reallocate new buffer. +func reserveBuffer(buf []byte, appendSize int) []byte { + newSize := len(buf) + appendSize + if cap(buf) < newSize { + // Grow buffer exponentially + newBuf := make([]byte, len(buf)*2+appendSize) + copy(newBuf, buf) + buf = newBuf + } + return buf[:newSize] +} + +// escapeBytesBackslash escapes []byte with backslashes (\) +// This escapes the contents of a string (provided as []byte) by adding backslashes before special +// characters, and turning others into specific escape sequences, such as +// turning newlines into \n and null bytes into \0. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 +func escapeBytesBackslash(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringBackslash is similar to escapeBytesBackslash but for string. +func escapeStringBackslash(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. +// This escapes the contents of a string by doubling up any apostrophes that +// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in +// effect on the server. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 +func escapeBytesQuotes(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringQuotes is similar to escapeBytesQuotes but for string. +func escapeStringQuotes(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils_test.go b/vendor/github.com/go-sql-driver/mysql/utils_test.go new file mode 100644 index 0000000..0d6c668 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils_test.go @@ -0,0 +1,197 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "encoding/binary" + "fmt" + "testing" + "time" +) + +func TestScanNullTime(t *testing.T) { + var scanTests = []struct { + in interface{} + error bool + valid bool + time time.Time + }{ + {tDate, false, true, tDate}, + {sDate, false, true, tDate}, + {[]byte(sDate), false, true, tDate}, + {tDateTime, false, true, tDateTime}, + {sDateTime, false, true, tDateTime}, + {[]byte(sDateTime), false, true, tDateTime}, + {tDate0, false, true, tDate0}, + {sDate0, false, true, tDate0}, + {[]byte(sDate0), false, true, tDate0}, + {sDateTime0, false, true, tDate0}, + {[]byte(sDateTime0), false, true, tDate0}, + {"", true, false, tDate0}, + {"1234", true, false, tDate0}, + {0, true, false, tDate0}, + } + + var nt = NullTime{} + var err error + + for _, tst := range scanTests { + err = nt.Scan(tst.in) + if (err != nil) != tst.error { + t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil)) + } + if nt.Valid != tst.valid { + t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid) + } + if nt.Time != tst.time { + t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time) + } + } +} + +func TestLengthEncodedInteger(t *testing.T) { + var integerTests = []struct { + num uint64 + encoded []byte + }{ + {0x0000000000000000, []byte{0x00}}, + {0x0000000000000012, []byte{0x12}}, + {0x00000000000000fa, []byte{0xfa}}, + {0x0000000000000100, []byte{0xfc, 0x00, 0x01}}, + {0x0000000000001234, []byte{0xfc, 0x34, 0x12}}, + {0x000000000000ffff, []byte{0xfc, 0xff, 0xff}}, + {0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}}, + {0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}}, + {0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}}, + {0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}}, + {0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}}, + {0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + } + + for _, tst := range integerTests { + num, isNull, numLen := readLengthEncodedInteger(tst.encoded) + if isNull { + t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num) + } + if num != tst.num { + t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num) + } + if numLen != len(tst.encoded) { + t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen) + } + encoded := appendLengthEncodedInteger(nil, num) + if !bytes.Equal(encoded, tst.encoded) { + t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded) + } + } +} + +func TestOldPass(t *testing.T) { + scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2} + vectors := []struct { + pass string + out string + }{ + {" pass", "47575c5a435b4251"}, + {"pass ", "47575c5a435b4251"}, + {"123\t456", "575c47505b5b5559"}, + {"C0mpl!ca ted#PASS123", "5d5d554849584a45"}, + } + for _, tuple := range vectors { + ours := scrambleOldPassword(scramble, []byte(tuple.pass)) + if tuple.out != fmt.Sprintf("%x", ours) { + t.Errorf("Failed old password %q", tuple.pass) + } + } +} + +func TestFormatBinaryDateTime(t *testing.T) { + rawDate := [11]byte{} + binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years + rawDate[2] = 12 // months + rawDate[3] = 30 // days + rawDate[4] = 15 // hours + rawDate[5] = 46 // minutes + rawDate[6] = 23 // seconds + binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds + expect := func(expected string, inlen, outlen uint8) { + actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen, false) + bytes, ok := actual.([]byte) + if !ok { + t.Errorf("formatBinaryDateTime must return []byte, was %T", actual) + } + if string(bytes) != expected { + t.Errorf( + "expected %q, got %q for length in %d, out %d", + bytes, actual, inlen, outlen, + ) + } + } + expect("0000-00-00", 0, 10) + expect("0000-00-00 00:00:00", 0, 19) + expect("1978-12-30", 4, 10) + expect("1978-12-30 15:46:23", 7, 19) + expect("1978-12-30 15:46:23.987654", 11, 26) +} + +func TestEscapeBackslash(t *testing.T) { + expect := func(expected, value string) { + actual := string(escapeBytesBackslash([]byte{}, []byte(value))) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + + actual = string(escapeStringBackslash([]byte{}, value)) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + } + + expect("foo\\0bar", "foo\x00bar") + expect("foo\\nbar", "foo\nbar") + expect("foo\\rbar", "foo\rbar") + expect("foo\\Zbar", "foo\x1abar") + expect("foo\\\"bar", "foo\"bar") + expect("foo\\\\bar", "foo\\bar") + expect("foo\\'bar", "foo'bar") +} + +func TestEscapeQuotes(t *testing.T) { + expect := func(expected, value string) { + actual := string(escapeBytesQuotes([]byte{}, []byte(value))) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + + actual = string(escapeStringQuotes([]byte{}, value)) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + } + + expect("foo\x00bar", "foo\x00bar") // not affected + expect("foo\nbar", "foo\nbar") // not affected + expect("foo\rbar", "foo\rbar") // not affected + expect("foo\x1abar", "foo\x1abar") // not affected + expect("foo''bar", "foo'bar") // affected + expect("foo\"bar", "foo\"bar") // not affected +} diff --git a/vendor/github.com/golang/protobuf/.gitignore b/vendor/github.com/golang/protobuf/.gitignore new file mode 100644 index 0000000..8f5b596 --- /dev/null +++ b/vendor/github.com/golang/protobuf/.gitignore @@ -0,0 +1,16 @@ +.DS_Store +*.[568ao] +*.ao +*.so +*.pyc +._* +.nfs.* +[568a].out +*~ +*.orig +core +_obj +_test +_testmain.go +protoc-gen-go/testdata/multi/*.pb.go +_conformance/_conformance diff --git a/vendor/github.com/golang/protobuf/.travis.yml b/vendor/github.com/golang/protobuf/.travis.yml new file mode 100644 index 0000000..93c6780 --- /dev/null +++ b/vendor/github.com/golang/protobuf/.travis.yml @@ -0,0 +1,18 @@ +sudo: false +language: go +go: +- 1.6.x +- 1.7.x +- 1.8.x +- 1.9.x + +install: + - go get -v -d -t github.com/golang/protobuf/... + - curl -L https://github.com/google/protobuf/releases/download/v3.3.0/protoc-3.3.0-linux-x86_64.zip -o /tmp/protoc.zip + - unzip /tmp/protoc.zip -d $HOME/protoc + +env: + - PATH=$HOME/protoc/bin:$PATH + +script: + - make all test diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 0000000..1b1b192 --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/Make.protobuf b/vendor/github.com/golang/protobuf/Make.protobuf new file mode 100644 index 0000000..15071de --- /dev/null +++ b/vendor/github.com/golang/protobuf/Make.protobuf @@ -0,0 +1,40 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Includable Makefile to add a rule for generating .pb.go files from .proto files +# (Google protocol buffer descriptions). +# Typical use if myproto.proto is a file in package mypackage in this directory: +# +# include $(GOROOT)/src/pkg/github.com/golang/protobuf/Make.protobuf + +%.pb.go: %.proto + protoc --go_out=. $< + diff --git a/vendor/github.com/golang/protobuf/Makefile b/vendor/github.com/golang/protobuf/Makefile new file mode 100644 index 0000000..a1421d8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/Makefile @@ -0,0 +1,55 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +all: install + +install: + go install ./proto ./jsonpb ./ptypes + go install ./protoc-gen-go + +test: + go test ./proto ./jsonpb ./ptypes + make -C protoc-gen-go/testdata test + +clean: + go clean ./... + +nuke: + go clean -i ./... + +regenerate: + make -C protoc-gen-go/descriptor regenerate + make -C protoc-gen-go/plugin regenerate + make -C protoc-gen-go/testdata regenerate + make -C proto/testdata regenerate + make -C jsonpb/jsonpb_test_proto regenerate + make -C _conformance regenerate diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md new file mode 100644 index 0000000..9c4c815 --- /dev/null +++ b/vendor/github.com/golang/protobuf/README.md @@ -0,0 +1,244 @@ +# Go support for Protocol Buffers + +[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf) +[![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf) + +Google's data interchange format. +Copyright 2010 The Go Authors. +https://github.com/golang/protobuf + +This package and the code it generates requires at least Go 1.4. + +This software implements Go bindings for protocol buffers. For +information about protocol buffers themselves, see + https://developers.google.com/protocol-buffers/ + +## Installation ## + +To use this software, you must: +- Install the standard C++ implementation of protocol buffers from + https://developers.google.com/protocol-buffers/ +- Of course, install the Go compiler and tools from + https://golang.org/ + See + https://golang.org/doc/install + for details or, if you are using gccgo, follow the instructions at + https://golang.org/doc/install/gccgo +- Grab the code from the repository and install the proto package. + The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`. + The compiler plugin, protoc-gen-go, will be installed in $GOBIN, + defaulting to $GOPATH/bin. It must be in your $PATH for the protocol + compiler, protoc, to find it. + +This software has two parts: a 'protocol compiler plugin' that +generates Go source files that, once compiled, can access and manage +protocol buffers; and a library that implements run-time support for +encoding (marshaling), decoding (unmarshaling), and accessing protocol +buffers. + +There is support for gRPC in Go using protocol buffers. +See the note at the bottom of this file for details. + +There are no insertion points in the plugin. + + +## Using protocol buffers with Go ## + +Once the software is installed, there are two steps to using it. +First you must compile the protocol buffer definitions and then import +them, with the support library, into your program. + +To compile the protocol buffer definition, run protoc with the --go_out +parameter set to the directory you want to output the Go code to. + + protoc --go_out=. *.proto + +The generated files will be suffixed .pb.go. See the Test code below +for an example using such a file. + + +The package comment for the proto library contains text describing +the interface provided in Go for protocol buffers. Here is an edited +version. + +========== + +The proto package converts data structures to and from the +wire format of protocol buffers. It works in concert with the +Go source code generated for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + Helpers for getting values are superseded by the + GetFoo methods and their use is deprecated. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed with the enum's type name. Enum types have + a String method, and a Enum method to assist in message construction. + - Nested groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +Consider file test.proto, containing + +```proto + syntax = "proto2"; + package example; + + enum FOO { X = 17; }; + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } +``` + +To create and play with a Test object from the example package, + +```go + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + "path/to/example" + ) + + func main() { + test := &example.Test { + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &example.Test_OptionalGroup { + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &example.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +``` + +## Parameters ## + +To pass extra parameters to the plugin, use a comma-separated +parameter list separated from the output directory by a colon: + + + protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto + + +- `import_prefix=xxx` - a prefix that is added onto the beginning of + all imports. Useful for things like generating protos in a + subdirectory, or regenerating vendored protobufs in-place. +- `import_path=foo/bar` - used as the package if no input files + declare `go_package`. If it contains slashes, everything up to the + rightmost slash is ignored. +- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to + load. The only plugin in this repo is `grpc`. +- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is + associated with Go package quux/shme. This is subject to the + import_prefix parameter. + +## gRPC Support ## + +If a proto file specifies RPC services, protoc-gen-go can be instructed to +generate code compatible with gRPC (http://www.grpc.io/). To do this, pass +the `plugins` parameter to protoc-gen-go; the usual way is to insert it into +the --go_out argument to protoc: + + protoc --go_out=plugins=grpc:. *.proto + +## Compatibility ## + +The library and the generated code are expected to be stable over time. +However, we reserve the right to make breaking changes without notice for the +following reasons: + +- Security. A security issue in the specification or implementation may come to + light whose resolution requires breaking compatibility. We reserve the right + to address such security issues. +- Unspecified behavior. There are some aspects of the Protocol Buffers + specification that are undefined. Programs that depend on such unspecified + behavior may break in future releases. +- Specification errors or changes. If it becomes necessary to address an + inconsistency, incompleteness, or change in the Protocol Buffers + specification, resolving the issue could affect the meaning or legality of + existing programs. We reserve the right to address such issues, including + updating the implementations. +- Bugs. If the library has a bug that violates the specification, a program + that depends on the buggy behavior may break if the bug is fixed. We reserve + the right to fix such bugs. +- Adding methods or fields to generated structs. These may conflict with field + names that already exist in a schema, causing applications to break. When the + code generator encounters a field in the schema that would collide with a + generated field or method name, the code generator will append an underscore + to the generated field or method name. +- Adding, removing, or changing methods or fields in generated structs that + start with `XXX`. These parts of the generated code are exported out of + necessity, but should not be considered part of the public API. +- Adding, removing, or changing unexported symbols in generated code. + +Any breaking changes outside of these will be announced 6 months in advance to +protobuf@googlegroups.com. + +You should, whenever possible, use generated code created by the `protoc-gen-go` +tool built at the same commit as the `proto` package. The `proto` package +declares package-level constants in the form `ProtoPackageIsVersionX`. +Application code and generated code may depend on one of these constants to +ensure that compilation will fail if the available version of the proto library +is too old. Whenever we make a change to the generated code that requires newer +library support, in the same commit we will increment the version number of the +generated code and declare a new package-level constant whose name incorporates +the latest version number. Removing a compatibility constant is considered a +breaking change and would be subject to the announcement policy stated above. + +The `protoc-gen-go/generator` package exposes a plugin interface, +which is used by the gRPC code generation. This interface is not +supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/golang/protobuf/_conformance/Makefile b/vendor/github.com/golang/protobuf/_conformance/Makefile new file mode 100644 index 0000000..89800e2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/_conformance/Makefile @@ -0,0 +1,33 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2016 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers,Mgoogle/protobuf/field_mask.proto=google.golang.org/genproto/protobuf:. conformance_proto/conformance.proto diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance.go b/vendor/github.com/golang/protobuf/_conformance/conformance.go new file mode 100644 index 0000000..c54212c --- /dev/null +++ b/vendor/github.com/golang/protobuf/_conformance/conformance.go @@ -0,0 +1,161 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// conformance implements the conformance test subprocess protocol as +// documented in conformance.proto. +package main + +import ( + "encoding/binary" + "fmt" + "io" + "os" + + pb "github.com/golang/protobuf/_conformance/conformance_proto" + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" +) + +func main() { + var sizeBuf [4]byte + inbuf := make([]byte, 0, 4096) + outbuf := proto.NewBuffer(nil) + for { + if _, err := io.ReadFull(os.Stdin, sizeBuf[:]); err == io.EOF { + break + } else if err != nil { + fmt.Fprintln(os.Stderr, "go conformance: read request:", err) + os.Exit(1) + } + size := binary.LittleEndian.Uint32(sizeBuf[:]) + if int(size) > cap(inbuf) { + inbuf = make([]byte, size) + } + inbuf = inbuf[:size] + if _, err := io.ReadFull(os.Stdin, inbuf); err != nil { + fmt.Fprintln(os.Stderr, "go conformance: read request:", err) + os.Exit(1) + } + + req := new(pb.ConformanceRequest) + if err := proto.Unmarshal(inbuf, req); err != nil { + fmt.Fprintln(os.Stderr, "go conformance: parse request:", err) + os.Exit(1) + } + res := handle(req) + + if err := outbuf.Marshal(res); err != nil { + fmt.Fprintln(os.Stderr, "go conformance: marshal response:", err) + os.Exit(1) + } + binary.LittleEndian.PutUint32(sizeBuf[:], uint32(len(outbuf.Bytes()))) + if _, err := os.Stdout.Write(sizeBuf[:]); err != nil { + fmt.Fprintln(os.Stderr, "go conformance: write response:", err) + os.Exit(1) + } + if _, err := os.Stdout.Write(outbuf.Bytes()); err != nil { + fmt.Fprintln(os.Stderr, "go conformance: write response:", err) + os.Exit(1) + } + outbuf.Reset() + } +} + +var jsonMarshaler = jsonpb.Marshaler{ + OrigName: true, +} + +func handle(req *pb.ConformanceRequest) *pb.ConformanceResponse { + var err error + var msg pb.TestAllTypes + switch p := req.Payload.(type) { + case *pb.ConformanceRequest_ProtobufPayload: + err = proto.Unmarshal(p.ProtobufPayload, &msg) + case *pb.ConformanceRequest_JsonPayload: + err = jsonpb.UnmarshalString(p.JsonPayload, &msg) + if err != nil && err.Error() == "unmarshaling Any not supported yet" { + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_Skipped{ + Skipped: err.Error(), + }, + } + } + default: + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_RuntimeError{ + RuntimeError: "unknown request payload type", + }, + } + } + if err != nil { + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_ParseError{ + ParseError: err.Error(), + }, + } + } + switch req.RequestedOutputFormat { + case pb.WireFormat_PROTOBUF: + p, err := proto.Marshal(&msg) + if err != nil { + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_SerializeError{ + SerializeError: err.Error(), + }, + } + } + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_ProtobufPayload{ + ProtobufPayload: p, + }, + } + case pb.WireFormat_JSON: + p, err := jsonMarshaler.MarshalToString(&msg) + if err != nil { + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_SerializeError{ + SerializeError: err.Error(), + }, + } + } + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_JsonPayload{ + JsonPayload: p, + }, + } + default: + return &pb.ConformanceResponse{ + Result: &pb.ConformanceResponse_RuntimeError{ + RuntimeError: "unknown output format", + }, + } + } +} diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go new file mode 100644 index 0000000..ec354ea --- /dev/null +++ b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go @@ -0,0 +1,1885 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: conformance_proto/conformance.proto + +/* +Package conformance is a generated protocol buffer package. + +It is generated from these files: + conformance_proto/conformance.proto + +It has these top-level messages: + ConformanceRequest + ConformanceResponse + TestAllTypes + ForeignMessage +*/ +package conformance + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "google.golang.org/genproto/protobuf" +import google_protobuf3 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" +import google_protobuf5 "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type WireFormat int32 + +const ( + WireFormat_UNSPECIFIED WireFormat = 0 + WireFormat_PROTOBUF WireFormat = 1 + WireFormat_JSON WireFormat = 2 +) + +var WireFormat_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "PROTOBUF", + 2: "JSON", +} +var WireFormat_value = map[string]int32{ + "UNSPECIFIED": 0, + "PROTOBUF": 1, + "JSON": 2, +} + +func (x WireFormat) String() string { + return proto.EnumName(WireFormat_name, int32(x)) +} +func (WireFormat) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type ForeignEnum int32 + +const ( + ForeignEnum_FOREIGN_FOO ForeignEnum = 0 + ForeignEnum_FOREIGN_BAR ForeignEnum = 1 + ForeignEnum_FOREIGN_BAZ ForeignEnum = 2 +) + +var ForeignEnum_name = map[int32]string{ + 0: "FOREIGN_FOO", + 1: "FOREIGN_BAR", + 2: "FOREIGN_BAZ", +} +var ForeignEnum_value = map[string]int32{ + "FOREIGN_FOO": 0, + "FOREIGN_BAR": 1, + "FOREIGN_BAZ": 2, +} + +func (x ForeignEnum) String() string { + return proto.EnumName(ForeignEnum_name, int32(x)) +} +func (ForeignEnum) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type TestAllTypes_NestedEnum int32 + +const ( + TestAllTypes_FOO TestAllTypes_NestedEnum = 0 + TestAllTypes_BAR TestAllTypes_NestedEnum = 1 + TestAllTypes_BAZ TestAllTypes_NestedEnum = 2 + TestAllTypes_NEG TestAllTypes_NestedEnum = -1 +) + +var TestAllTypes_NestedEnum_name = map[int32]string{ + 0: "FOO", + 1: "BAR", + 2: "BAZ", + -1: "NEG", +} +var TestAllTypes_NestedEnum_value = map[string]int32{ + "FOO": 0, + "BAR": 1, + "BAZ": 2, + "NEG": -1, +} + +func (x TestAllTypes_NestedEnum) String() string { + return proto.EnumName(TestAllTypes_NestedEnum_name, int32(x)) +} +func (TestAllTypes_NestedEnum) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +// Represents a single test case's input. The testee should: +// +// 1. parse this proto (which should always succeed) +// 2. parse the protobuf or JSON payload in "payload" (which may fail) +// 3. if the parse succeeded, serialize the message in the requested format. +type ConformanceRequest struct { + // The payload (whether protobuf of JSON) is always for a TestAllTypes proto + // (see below). + // + // Types that are valid to be assigned to Payload: + // *ConformanceRequest_ProtobufPayload + // *ConformanceRequest_JsonPayload + Payload isConformanceRequest_Payload `protobuf_oneof:"payload"` + // Which format should the testee serialize its message to? + RequestedOutputFormat WireFormat `protobuf:"varint,3,opt,name=requested_output_format,json=requestedOutputFormat,enum=conformance.WireFormat" json:"requested_output_format,omitempty"` +} + +func (m *ConformanceRequest) Reset() { *m = ConformanceRequest{} } +func (m *ConformanceRequest) String() string { return proto.CompactTextString(m) } +func (*ConformanceRequest) ProtoMessage() {} +func (*ConformanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isConformanceRequest_Payload interface { + isConformanceRequest_Payload() +} + +type ConformanceRequest_ProtobufPayload struct { + ProtobufPayload []byte `protobuf:"bytes,1,opt,name=protobuf_payload,json=protobufPayload,proto3,oneof"` +} +type ConformanceRequest_JsonPayload struct { + JsonPayload string `protobuf:"bytes,2,opt,name=json_payload,json=jsonPayload,oneof"` +} + +func (*ConformanceRequest_ProtobufPayload) isConformanceRequest_Payload() {} +func (*ConformanceRequest_JsonPayload) isConformanceRequest_Payload() {} + +func (m *ConformanceRequest) GetPayload() isConformanceRequest_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *ConformanceRequest) GetProtobufPayload() []byte { + if x, ok := m.GetPayload().(*ConformanceRequest_ProtobufPayload); ok { + return x.ProtobufPayload + } + return nil +} + +func (m *ConformanceRequest) GetJsonPayload() string { + if x, ok := m.GetPayload().(*ConformanceRequest_JsonPayload); ok { + return x.JsonPayload + } + return "" +} + +func (m *ConformanceRequest) GetRequestedOutputFormat() WireFormat { + if m != nil { + return m.RequestedOutputFormat + } + return WireFormat_UNSPECIFIED +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConformanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConformanceRequest_OneofMarshaler, _ConformanceRequest_OneofUnmarshaler, _ConformanceRequest_OneofSizer, []interface{}{ + (*ConformanceRequest_ProtobufPayload)(nil), + (*ConformanceRequest_JsonPayload)(nil), + } +} + +func _ConformanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConformanceRequest) + // payload + switch x := m.Payload.(type) { + case *ConformanceRequest_ProtobufPayload: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeRawBytes(x.ProtobufPayload) + case *ConformanceRequest_JsonPayload: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.JsonPayload) + case nil: + default: + return fmt.Errorf("ConformanceRequest.Payload has unexpected type %T", x) + } + return nil +} + +func _ConformanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConformanceRequest) + switch tag { + case 1: // payload.protobuf_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Payload = &ConformanceRequest_ProtobufPayload{x} + return true, err + case 2: // payload.json_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Payload = &ConformanceRequest_JsonPayload{x} + return true, err + default: + return false, nil + } +} + +func _ConformanceRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConformanceRequest) + // payload + switch x := m.Payload.(type) { + case *ConformanceRequest_ProtobufPayload: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ProtobufPayload))) + n += len(x.ProtobufPayload) + case *ConformanceRequest_JsonPayload: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.JsonPayload))) + n += len(x.JsonPayload) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Represents a single test case's output. +type ConformanceResponse struct { + // Types that are valid to be assigned to Result: + // *ConformanceResponse_ParseError + // *ConformanceResponse_SerializeError + // *ConformanceResponse_RuntimeError + // *ConformanceResponse_ProtobufPayload + // *ConformanceResponse_JsonPayload + // *ConformanceResponse_Skipped + Result isConformanceResponse_Result `protobuf_oneof:"result"` +} + +func (m *ConformanceResponse) Reset() { *m = ConformanceResponse{} } +func (m *ConformanceResponse) String() string { return proto.CompactTextString(m) } +func (*ConformanceResponse) ProtoMessage() {} +func (*ConformanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type isConformanceResponse_Result interface { + isConformanceResponse_Result() +} + +type ConformanceResponse_ParseError struct { + ParseError string `protobuf:"bytes,1,opt,name=parse_error,json=parseError,oneof"` +} +type ConformanceResponse_SerializeError struct { + SerializeError string `protobuf:"bytes,6,opt,name=serialize_error,json=serializeError,oneof"` +} +type ConformanceResponse_RuntimeError struct { + RuntimeError string `protobuf:"bytes,2,opt,name=runtime_error,json=runtimeError,oneof"` +} +type ConformanceResponse_ProtobufPayload struct { + ProtobufPayload []byte `protobuf:"bytes,3,opt,name=protobuf_payload,json=protobufPayload,proto3,oneof"` +} +type ConformanceResponse_JsonPayload struct { + JsonPayload string `protobuf:"bytes,4,opt,name=json_payload,json=jsonPayload,oneof"` +} +type ConformanceResponse_Skipped struct { + Skipped string `protobuf:"bytes,5,opt,name=skipped,oneof"` +} + +func (*ConformanceResponse_ParseError) isConformanceResponse_Result() {} +func (*ConformanceResponse_SerializeError) isConformanceResponse_Result() {} +func (*ConformanceResponse_RuntimeError) isConformanceResponse_Result() {} +func (*ConformanceResponse_ProtobufPayload) isConformanceResponse_Result() {} +func (*ConformanceResponse_JsonPayload) isConformanceResponse_Result() {} +func (*ConformanceResponse_Skipped) isConformanceResponse_Result() {} + +func (m *ConformanceResponse) GetResult() isConformanceResponse_Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *ConformanceResponse) GetParseError() string { + if x, ok := m.GetResult().(*ConformanceResponse_ParseError); ok { + return x.ParseError + } + return "" +} + +func (m *ConformanceResponse) GetSerializeError() string { + if x, ok := m.GetResult().(*ConformanceResponse_SerializeError); ok { + return x.SerializeError + } + return "" +} + +func (m *ConformanceResponse) GetRuntimeError() string { + if x, ok := m.GetResult().(*ConformanceResponse_RuntimeError); ok { + return x.RuntimeError + } + return "" +} + +func (m *ConformanceResponse) GetProtobufPayload() []byte { + if x, ok := m.GetResult().(*ConformanceResponse_ProtobufPayload); ok { + return x.ProtobufPayload + } + return nil +} + +func (m *ConformanceResponse) GetJsonPayload() string { + if x, ok := m.GetResult().(*ConformanceResponse_JsonPayload); ok { + return x.JsonPayload + } + return "" +} + +func (m *ConformanceResponse) GetSkipped() string { + if x, ok := m.GetResult().(*ConformanceResponse_Skipped); ok { + return x.Skipped + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConformanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConformanceResponse_OneofMarshaler, _ConformanceResponse_OneofUnmarshaler, _ConformanceResponse_OneofSizer, []interface{}{ + (*ConformanceResponse_ParseError)(nil), + (*ConformanceResponse_SerializeError)(nil), + (*ConformanceResponse_RuntimeError)(nil), + (*ConformanceResponse_ProtobufPayload)(nil), + (*ConformanceResponse_JsonPayload)(nil), + (*ConformanceResponse_Skipped)(nil), + } +} + +func _ConformanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConformanceResponse) + // result + switch x := m.Result.(type) { + case *ConformanceResponse_ParseError: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ParseError) + case *ConformanceResponse_SerializeError: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.SerializeError) + case *ConformanceResponse_RuntimeError: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.RuntimeError) + case *ConformanceResponse_ProtobufPayload: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeRawBytes(x.ProtobufPayload) + case *ConformanceResponse_JsonPayload: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.JsonPayload) + case *ConformanceResponse_Skipped: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Skipped) + case nil: + default: + return fmt.Errorf("ConformanceResponse.Result has unexpected type %T", x) + } + return nil +} + +func _ConformanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConformanceResponse) + switch tag { + case 1: // result.parse_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Result = &ConformanceResponse_ParseError{x} + return true, err + case 6: // result.serialize_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Result = &ConformanceResponse_SerializeError{x} + return true, err + case 2: // result.runtime_error + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Result = &ConformanceResponse_RuntimeError{x} + return true, err + case 3: // result.protobuf_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Result = &ConformanceResponse_ProtobufPayload{x} + return true, err + case 4: // result.json_payload + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Result = &ConformanceResponse_JsonPayload{x} + return true, err + case 5: // result.skipped + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Result = &ConformanceResponse_Skipped{x} + return true, err + default: + return false, nil + } +} + +func _ConformanceResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConformanceResponse) + // result + switch x := m.Result.(type) { + case *ConformanceResponse_ParseError: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ParseError))) + n += len(x.ParseError) + case *ConformanceResponse_SerializeError: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.SerializeError))) + n += len(x.SerializeError) + case *ConformanceResponse_RuntimeError: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.RuntimeError))) + n += len(x.RuntimeError) + case *ConformanceResponse_ProtobufPayload: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ProtobufPayload))) + n += len(x.ProtobufPayload) + case *ConformanceResponse_JsonPayload: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.JsonPayload))) + n += len(x.JsonPayload) + case *ConformanceResponse_Skipped: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Skipped))) + n += len(x.Skipped) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// This proto includes every type of field in both singular and repeated +// forms. +type TestAllTypes struct { + // Singular + OptionalInt32 int32 `protobuf:"varint,1,opt,name=optional_int32,json=optionalInt32" json:"optional_int32,omitempty"` + OptionalInt64 int64 `protobuf:"varint,2,opt,name=optional_int64,json=optionalInt64" json:"optional_int64,omitempty"` + OptionalUint32 uint32 `protobuf:"varint,3,opt,name=optional_uint32,json=optionalUint32" json:"optional_uint32,omitempty"` + OptionalUint64 uint64 `protobuf:"varint,4,opt,name=optional_uint64,json=optionalUint64" json:"optional_uint64,omitempty"` + OptionalSint32 int32 `protobuf:"zigzag32,5,opt,name=optional_sint32,json=optionalSint32" json:"optional_sint32,omitempty"` + OptionalSint64 int64 `protobuf:"zigzag64,6,opt,name=optional_sint64,json=optionalSint64" json:"optional_sint64,omitempty"` + OptionalFixed32 uint32 `protobuf:"fixed32,7,opt,name=optional_fixed32,json=optionalFixed32" json:"optional_fixed32,omitempty"` + OptionalFixed64 uint64 `protobuf:"fixed64,8,opt,name=optional_fixed64,json=optionalFixed64" json:"optional_fixed64,omitempty"` + OptionalSfixed32 int32 `protobuf:"fixed32,9,opt,name=optional_sfixed32,json=optionalSfixed32" json:"optional_sfixed32,omitempty"` + OptionalSfixed64 int64 `protobuf:"fixed64,10,opt,name=optional_sfixed64,json=optionalSfixed64" json:"optional_sfixed64,omitempty"` + OptionalFloat float32 `protobuf:"fixed32,11,opt,name=optional_float,json=optionalFloat" json:"optional_float,omitempty"` + OptionalDouble float64 `protobuf:"fixed64,12,opt,name=optional_double,json=optionalDouble" json:"optional_double,omitempty"` + OptionalBool bool `protobuf:"varint,13,opt,name=optional_bool,json=optionalBool" json:"optional_bool,omitempty"` + OptionalString string `protobuf:"bytes,14,opt,name=optional_string,json=optionalString" json:"optional_string,omitempty"` + OptionalBytes []byte `protobuf:"bytes,15,opt,name=optional_bytes,json=optionalBytes,proto3" json:"optional_bytes,omitempty"` + OptionalNestedMessage *TestAllTypes_NestedMessage `protobuf:"bytes,18,opt,name=optional_nested_message,json=optionalNestedMessage" json:"optional_nested_message,omitempty"` + OptionalForeignMessage *ForeignMessage `protobuf:"bytes,19,opt,name=optional_foreign_message,json=optionalForeignMessage" json:"optional_foreign_message,omitempty"` + OptionalNestedEnum TestAllTypes_NestedEnum `protobuf:"varint,21,opt,name=optional_nested_enum,json=optionalNestedEnum,enum=conformance.TestAllTypes_NestedEnum" json:"optional_nested_enum,omitempty"` + OptionalForeignEnum ForeignEnum `protobuf:"varint,22,opt,name=optional_foreign_enum,json=optionalForeignEnum,enum=conformance.ForeignEnum" json:"optional_foreign_enum,omitempty"` + OptionalStringPiece string `protobuf:"bytes,24,opt,name=optional_string_piece,json=optionalStringPiece" json:"optional_string_piece,omitempty"` + OptionalCord string `protobuf:"bytes,25,opt,name=optional_cord,json=optionalCord" json:"optional_cord,omitempty"` + RecursiveMessage *TestAllTypes `protobuf:"bytes,27,opt,name=recursive_message,json=recursiveMessage" json:"recursive_message,omitempty"` + // Repeated + RepeatedInt32 []int32 `protobuf:"varint,31,rep,packed,name=repeated_int32,json=repeatedInt32" json:"repeated_int32,omitempty"` + RepeatedInt64 []int64 `protobuf:"varint,32,rep,packed,name=repeated_int64,json=repeatedInt64" json:"repeated_int64,omitempty"` + RepeatedUint32 []uint32 `protobuf:"varint,33,rep,packed,name=repeated_uint32,json=repeatedUint32" json:"repeated_uint32,omitempty"` + RepeatedUint64 []uint64 `protobuf:"varint,34,rep,packed,name=repeated_uint64,json=repeatedUint64" json:"repeated_uint64,omitempty"` + RepeatedSint32 []int32 `protobuf:"zigzag32,35,rep,packed,name=repeated_sint32,json=repeatedSint32" json:"repeated_sint32,omitempty"` + RepeatedSint64 []int64 `protobuf:"zigzag64,36,rep,packed,name=repeated_sint64,json=repeatedSint64" json:"repeated_sint64,omitempty"` + RepeatedFixed32 []uint32 `protobuf:"fixed32,37,rep,packed,name=repeated_fixed32,json=repeatedFixed32" json:"repeated_fixed32,omitempty"` + RepeatedFixed64 []uint64 `protobuf:"fixed64,38,rep,packed,name=repeated_fixed64,json=repeatedFixed64" json:"repeated_fixed64,omitempty"` + RepeatedSfixed32 []int32 `protobuf:"fixed32,39,rep,packed,name=repeated_sfixed32,json=repeatedSfixed32" json:"repeated_sfixed32,omitempty"` + RepeatedSfixed64 []int64 `protobuf:"fixed64,40,rep,packed,name=repeated_sfixed64,json=repeatedSfixed64" json:"repeated_sfixed64,omitempty"` + RepeatedFloat []float32 `protobuf:"fixed32,41,rep,packed,name=repeated_float,json=repeatedFloat" json:"repeated_float,omitempty"` + RepeatedDouble []float64 `protobuf:"fixed64,42,rep,packed,name=repeated_double,json=repeatedDouble" json:"repeated_double,omitempty"` + RepeatedBool []bool `protobuf:"varint,43,rep,packed,name=repeated_bool,json=repeatedBool" json:"repeated_bool,omitempty"` + RepeatedString []string `protobuf:"bytes,44,rep,name=repeated_string,json=repeatedString" json:"repeated_string,omitempty"` + RepeatedBytes [][]byte `protobuf:"bytes,45,rep,name=repeated_bytes,json=repeatedBytes,proto3" json:"repeated_bytes,omitempty"` + RepeatedNestedMessage []*TestAllTypes_NestedMessage `protobuf:"bytes,48,rep,name=repeated_nested_message,json=repeatedNestedMessage" json:"repeated_nested_message,omitempty"` + RepeatedForeignMessage []*ForeignMessage `protobuf:"bytes,49,rep,name=repeated_foreign_message,json=repeatedForeignMessage" json:"repeated_foreign_message,omitempty"` + RepeatedNestedEnum []TestAllTypes_NestedEnum `protobuf:"varint,51,rep,packed,name=repeated_nested_enum,json=repeatedNestedEnum,enum=conformance.TestAllTypes_NestedEnum" json:"repeated_nested_enum,omitempty"` + RepeatedForeignEnum []ForeignEnum `protobuf:"varint,52,rep,packed,name=repeated_foreign_enum,json=repeatedForeignEnum,enum=conformance.ForeignEnum" json:"repeated_foreign_enum,omitempty"` + RepeatedStringPiece []string `protobuf:"bytes,54,rep,name=repeated_string_piece,json=repeatedStringPiece" json:"repeated_string_piece,omitempty"` + RepeatedCord []string `protobuf:"bytes,55,rep,name=repeated_cord,json=repeatedCord" json:"repeated_cord,omitempty"` + // Map + MapInt32Int32 map[int32]int32 `protobuf:"bytes,56,rep,name=map_int32_int32,json=mapInt32Int32" json:"map_int32_int32,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MapInt64Int64 map[int64]int64 `protobuf:"bytes,57,rep,name=map_int64_int64,json=mapInt64Int64" json:"map_int64_int64,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MapUint32Uint32 map[uint32]uint32 `protobuf:"bytes,58,rep,name=map_uint32_uint32,json=mapUint32Uint32" json:"map_uint32_uint32,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MapUint64Uint64 map[uint64]uint64 `protobuf:"bytes,59,rep,name=map_uint64_uint64,json=mapUint64Uint64" json:"map_uint64_uint64,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MapSint32Sint32 map[int32]int32 `protobuf:"bytes,60,rep,name=map_sint32_sint32,json=mapSint32Sint32" json:"map_sint32_sint32,omitempty" protobuf_key:"zigzag32,1,opt,name=key" protobuf_val:"zigzag32,2,opt,name=value"` + MapSint64Sint64 map[int64]int64 `protobuf:"bytes,61,rep,name=map_sint64_sint64,json=mapSint64Sint64" json:"map_sint64_sint64,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"zigzag64,2,opt,name=value"` + MapFixed32Fixed32 map[uint32]uint32 `protobuf:"bytes,62,rep,name=map_fixed32_fixed32,json=mapFixed32Fixed32" json:"map_fixed32_fixed32,omitempty" protobuf_key:"fixed32,1,opt,name=key" protobuf_val:"fixed32,2,opt,name=value"` + MapFixed64Fixed64 map[uint64]uint64 `protobuf:"bytes,63,rep,name=map_fixed64_fixed64,json=mapFixed64Fixed64" json:"map_fixed64_fixed64,omitempty" protobuf_key:"fixed64,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"` + MapSfixed32Sfixed32 map[int32]int32 `protobuf:"bytes,64,rep,name=map_sfixed32_sfixed32,json=mapSfixed32Sfixed32" json:"map_sfixed32_sfixed32,omitempty" protobuf_key:"fixed32,1,opt,name=key" protobuf_val:"fixed32,2,opt,name=value"` + MapSfixed64Sfixed64 map[int64]int64 `protobuf:"bytes,65,rep,name=map_sfixed64_sfixed64,json=mapSfixed64Sfixed64" json:"map_sfixed64_sfixed64,omitempty" protobuf_key:"fixed64,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"` + MapInt32Float map[int32]float32 `protobuf:"bytes,66,rep,name=map_int32_float,json=mapInt32Float" json:"map_int32_float,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"fixed32,2,opt,name=value"` + MapInt32Double map[int32]float64 `protobuf:"bytes,67,rep,name=map_int32_double,json=mapInt32Double" json:"map_int32_double,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value"` + MapBoolBool map[bool]bool `protobuf:"bytes,68,rep,name=map_bool_bool,json=mapBoolBool" json:"map_bool_bool,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MapStringString map[string]string `protobuf:"bytes,69,rep,name=map_string_string,json=mapStringString" json:"map_string_string,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MapStringBytes map[string][]byte `protobuf:"bytes,70,rep,name=map_string_bytes,json=mapStringBytes" json:"map_string_bytes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` + MapStringNestedMessage map[string]*TestAllTypes_NestedMessage `protobuf:"bytes,71,rep,name=map_string_nested_message,json=mapStringNestedMessage" json:"map_string_nested_message,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MapStringForeignMessage map[string]*ForeignMessage `protobuf:"bytes,72,rep,name=map_string_foreign_message,json=mapStringForeignMessage" json:"map_string_foreign_message,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MapStringNestedEnum map[string]TestAllTypes_NestedEnum `protobuf:"bytes,73,rep,name=map_string_nested_enum,json=mapStringNestedEnum" json:"map_string_nested_enum,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=conformance.TestAllTypes_NestedEnum"` + MapStringForeignEnum map[string]ForeignEnum `protobuf:"bytes,74,rep,name=map_string_foreign_enum,json=mapStringForeignEnum" json:"map_string_foreign_enum,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=conformance.ForeignEnum"` + // Types that are valid to be assigned to OneofField: + // *TestAllTypes_OneofUint32 + // *TestAllTypes_OneofNestedMessage + // *TestAllTypes_OneofString + // *TestAllTypes_OneofBytes + // *TestAllTypes_OneofBool + // *TestAllTypes_OneofUint64 + // *TestAllTypes_OneofFloat + // *TestAllTypes_OneofDouble + // *TestAllTypes_OneofEnum + OneofField isTestAllTypes_OneofField `protobuf_oneof:"oneof_field"` + // Well-known types + OptionalBoolWrapper *google_protobuf5.BoolValue `protobuf:"bytes,201,opt,name=optional_bool_wrapper,json=optionalBoolWrapper" json:"optional_bool_wrapper,omitempty"` + OptionalInt32Wrapper *google_protobuf5.Int32Value `protobuf:"bytes,202,opt,name=optional_int32_wrapper,json=optionalInt32Wrapper" json:"optional_int32_wrapper,omitempty"` + OptionalInt64Wrapper *google_protobuf5.Int64Value `protobuf:"bytes,203,opt,name=optional_int64_wrapper,json=optionalInt64Wrapper" json:"optional_int64_wrapper,omitempty"` + OptionalUint32Wrapper *google_protobuf5.UInt32Value `protobuf:"bytes,204,opt,name=optional_uint32_wrapper,json=optionalUint32Wrapper" json:"optional_uint32_wrapper,omitempty"` + OptionalUint64Wrapper *google_protobuf5.UInt64Value `protobuf:"bytes,205,opt,name=optional_uint64_wrapper,json=optionalUint64Wrapper" json:"optional_uint64_wrapper,omitempty"` + OptionalFloatWrapper *google_protobuf5.FloatValue `protobuf:"bytes,206,opt,name=optional_float_wrapper,json=optionalFloatWrapper" json:"optional_float_wrapper,omitempty"` + OptionalDoubleWrapper *google_protobuf5.DoubleValue `protobuf:"bytes,207,opt,name=optional_double_wrapper,json=optionalDoubleWrapper" json:"optional_double_wrapper,omitempty"` + OptionalStringWrapper *google_protobuf5.StringValue `protobuf:"bytes,208,opt,name=optional_string_wrapper,json=optionalStringWrapper" json:"optional_string_wrapper,omitempty"` + OptionalBytesWrapper *google_protobuf5.BytesValue `protobuf:"bytes,209,opt,name=optional_bytes_wrapper,json=optionalBytesWrapper" json:"optional_bytes_wrapper,omitempty"` + RepeatedBoolWrapper []*google_protobuf5.BoolValue `protobuf:"bytes,211,rep,name=repeated_bool_wrapper,json=repeatedBoolWrapper" json:"repeated_bool_wrapper,omitempty"` + RepeatedInt32Wrapper []*google_protobuf5.Int32Value `protobuf:"bytes,212,rep,name=repeated_int32_wrapper,json=repeatedInt32Wrapper" json:"repeated_int32_wrapper,omitempty"` + RepeatedInt64Wrapper []*google_protobuf5.Int64Value `protobuf:"bytes,213,rep,name=repeated_int64_wrapper,json=repeatedInt64Wrapper" json:"repeated_int64_wrapper,omitempty"` + RepeatedUint32Wrapper []*google_protobuf5.UInt32Value `protobuf:"bytes,214,rep,name=repeated_uint32_wrapper,json=repeatedUint32Wrapper" json:"repeated_uint32_wrapper,omitempty"` + RepeatedUint64Wrapper []*google_protobuf5.UInt64Value `protobuf:"bytes,215,rep,name=repeated_uint64_wrapper,json=repeatedUint64Wrapper" json:"repeated_uint64_wrapper,omitempty"` + RepeatedFloatWrapper []*google_protobuf5.FloatValue `protobuf:"bytes,216,rep,name=repeated_float_wrapper,json=repeatedFloatWrapper" json:"repeated_float_wrapper,omitempty"` + RepeatedDoubleWrapper []*google_protobuf5.DoubleValue `protobuf:"bytes,217,rep,name=repeated_double_wrapper,json=repeatedDoubleWrapper" json:"repeated_double_wrapper,omitempty"` + RepeatedStringWrapper []*google_protobuf5.StringValue `protobuf:"bytes,218,rep,name=repeated_string_wrapper,json=repeatedStringWrapper" json:"repeated_string_wrapper,omitempty"` + RepeatedBytesWrapper []*google_protobuf5.BytesValue `protobuf:"bytes,219,rep,name=repeated_bytes_wrapper,json=repeatedBytesWrapper" json:"repeated_bytes_wrapper,omitempty"` + OptionalDuration *google_protobuf1.Duration `protobuf:"bytes,301,opt,name=optional_duration,json=optionalDuration" json:"optional_duration,omitempty"` + OptionalTimestamp *google_protobuf4.Timestamp `protobuf:"bytes,302,opt,name=optional_timestamp,json=optionalTimestamp" json:"optional_timestamp,omitempty"` + OptionalFieldMask *google_protobuf2.FieldMask `protobuf:"bytes,303,opt,name=optional_field_mask,json=optionalFieldMask" json:"optional_field_mask,omitempty"` + OptionalStruct *google_protobuf3.Struct `protobuf:"bytes,304,opt,name=optional_struct,json=optionalStruct" json:"optional_struct,omitempty"` + OptionalAny *google_protobuf.Any `protobuf:"bytes,305,opt,name=optional_any,json=optionalAny" json:"optional_any,omitempty"` + OptionalValue *google_protobuf3.Value `protobuf:"bytes,306,opt,name=optional_value,json=optionalValue" json:"optional_value,omitempty"` + RepeatedDuration []*google_protobuf1.Duration `protobuf:"bytes,311,rep,name=repeated_duration,json=repeatedDuration" json:"repeated_duration,omitempty"` + RepeatedTimestamp []*google_protobuf4.Timestamp `protobuf:"bytes,312,rep,name=repeated_timestamp,json=repeatedTimestamp" json:"repeated_timestamp,omitempty"` + RepeatedFieldmask []*google_protobuf2.FieldMask `protobuf:"bytes,313,rep,name=repeated_fieldmask,json=repeatedFieldmask" json:"repeated_fieldmask,omitempty"` + RepeatedStruct []*google_protobuf3.Struct `protobuf:"bytes,324,rep,name=repeated_struct,json=repeatedStruct" json:"repeated_struct,omitempty"` + RepeatedAny []*google_protobuf.Any `protobuf:"bytes,315,rep,name=repeated_any,json=repeatedAny" json:"repeated_any,omitempty"` + RepeatedValue []*google_protobuf3.Value `protobuf:"bytes,316,rep,name=repeated_value,json=repeatedValue" json:"repeated_value,omitempty"` + // Test field-name-to-JSON-name convention. + // (protobuf says names can be any valid C/C++ identifier.) + Fieldname1 int32 `protobuf:"varint,401,opt,name=fieldname1" json:"fieldname1,omitempty"` + FieldName2 int32 `protobuf:"varint,402,opt,name=field_name2,json=fieldName2" json:"field_name2,omitempty"` + XFieldName3 int32 `protobuf:"varint,403,opt,name=_field_name3,json=FieldName3" json:"_field_name3,omitempty"` + Field_Name4_ int32 `protobuf:"varint,404,opt,name=field__name4_,json=fieldName4" json:"field__name4_,omitempty"` + Field0Name5 int32 `protobuf:"varint,405,opt,name=field0name5" json:"field0name5,omitempty"` + Field_0Name6 int32 `protobuf:"varint,406,opt,name=field_0_name6,json=field0Name6" json:"field_0_name6,omitempty"` + FieldName7 int32 `protobuf:"varint,407,opt,name=fieldName7" json:"fieldName7,omitempty"` + FieldName8 int32 `protobuf:"varint,408,opt,name=FieldName8" json:"FieldName8,omitempty"` + Field_Name9 int32 `protobuf:"varint,409,opt,name=field_Name9,json=fieldName9" json:"field_Name9,omitempty"` + Field_Name10 int32 `protobuf:"varint,410,opt,name=Field_Name10,json=FieldName10" json:"Field_Name10,omitempty"` + FIELD_NAME11 int32 `protobuf:"varint,411,opt,name=FIELD_NAME11,json=FIELDNAME11" json:"FIELD_NAME11,omitempty"` + FIELDName12 int32 `protobuf:"varint,412,opt,name=FIELD_name12,json=FIELDName12" json:"FIELD_name12,omitempty"` + XFieldName13 int32 `protobuf:"varint,413,opt,name=__field_name13,json=FieldName13" json:"__field_name13,omitempty"` + X_FieldName14 int32 `protobuf:"varint,414,opt,name=__Field_name14,json=FieldName14" json:"__Field_name14,omitempty"` + Field_Name15 int32 `protobuf:"varint,415,opt,name=field__name15,json=fieldName15" json:"field__name15,omitempty"` + Field__Name16 int32 `protobuf:"varint,416,opt,name=field__Name16,json=fieldName16" json:"field__Name16,omitempty"` + FieldName17__ int32 `protobuf:"varint,417,opt,name=field_name17__,json=fieldName17" json:"field_name17__,omitempty"` + FieldName18__ int32 `protobuf:"varint,418,opt,name=Field_name18__,json=FieldName18" json:"Field_name18__,omitempty"` +} + +func (m *TestAllTypes) Reset() { *m = TestAllTypes{} } +func (m *TestAllTypes) String() string { return proto.CompactTextString(m) } +func (*TestAllTypes) ProtoMessage() {} +func (*TestAllTypes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isTestAllTypes_OneofField interface { + isTestAllTypes_OneofField() +} + +type TestAllTypes_OneofUint32 struct { + OneofUint32 uint32 `protobuf:"varint,111,opt,name=oneof_uint32,json=oneofUint32,oneof"` +} +type TestAllTypes_OneofNestedMessage struct { + OneofNestedMessage *TestAllTypes_NestedMessage `protobuf:"bytes,112,opt,name=oneof_nested_message,json=oneofNestedMessage,oneof"` +} +type TestAllTypes_OneofString struct { + OneofString string `protobuf:"bytes,113,opt,name=oneof_string,json=oneofString,oneof"` +} +type TestAllTypes_OneofBytes struct { + OneofBytes []byte `protobuf:"bytes,114,opt,name=oneof_bytes,json=oneofBytes,proto3,oneof"` +} +type TestAllTypes_OneofBool struct { + OneofBool bool `protobuf:"varint,115,opt,name=oneof_bool,json=oneofBool,oneof"` +} +type TestAllTypes_OneofUint64 struct { + OneofUint64 uint64 `protobuf:"varint,116,opt,name=oneof_uint64,json=oneofUint64,oneof"` +} +type TestAllTypes_OneofFloat struct { + OneofFloat float32 `protobuf:"fixed32,117,opt,name=oneof_float,json=oneofFloat,oneof"` +} +type TestAllTypes_OneofDouble struct { + OneofDouble float64 `protobuf:"fixed64,118,opt,name=oneof_double,json=oneofDouble,oneof"` +} +type TestAllTypes_OneofEnum struct { + OneofEnum TestAllTypes_NestedEnum `protobuf:"varint,119,opt,name=oneof_enum,json=oneofEnum,enum=conformance.TestAllTypes_NestedEnum,oneof"` +} + +func (*TestAllTypes_OneofUint32) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofNestedMessage) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofString) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofBytes) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofBool) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofUint64) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofFloat) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofDouble) isTestAllTypes_OneofField() {} +func (*TestAllTypes_OneofEnum) isTestAllTypes_OneofField() {} + +func (m *TestAllTypes) GetOneofField() isTestAllTypes_OneofField { + if m != nil { + return m.OneofField + } + return nil +} + +func (m *TestAllTypes) GetOptionalInt32() int32 { + if m != nil { + return m.OptionalInt32 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalInt64() int64 { + if m != nil { + return m.OptionalInt64 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalUint32() uint32 { + if m != nil { + return m.OptionalUint32 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalUint64() uint64 { + if m != nil { + return m.OptionalUint64 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalSint32() int32 { + if m != nil { + return m.OptionalSint32 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalSint64() int64 { + if m != nil { + return m.OptionalSint64 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalFixed32() uint32 { + if m != nil { + return m.OptionalFixed32 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalFixed64() uint64 { + if m != nil { + return m.OptionalFixed64 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalSfixed32() int32 { + if m != nil { + return m.OptionalSfixed32 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalSfixed64() int64 { + if m != nil { + return m.OptionalSfixed64 + } + return 0 +} + +func (m *TestAllTypes) GetOptionalFloat() float32 { + if m != nil { + return m.OptionalFloat + } + return 0 +} + +func (m *TestAllTypes) GetOptionalDouble() float64 { + if m != nil { + return m.OptionalDouble + } + return 0 +} + +func (m *TestAllTypes) GetOptionalBool() bool { + if m != nil { + return m.OptionalBool + } + return false +} + +func (m *TestAllTypes) GetOptionalString() string { + if m != nil { + return m.OptionalString + } + return "" +} + +func (m *TestAllTypes) GetOptionalBytes() []byte { + if m != nil { + return m.OptionalBytes + } + return nil +} + +func (m *TestAllTypes) GetOptionalNestedMessage() *TestAllTypes_NestedMessage { + if m != nil { + return m.OptionalNestedMessage + } + return nil +} + +func (m *TestAllTypes) GetOptionalForeignMessage() *ForeignMessage { + if m != nil { + return m.OptionalForeignMessage + } + return nil +} + +func (m *TestAllTypes) GetOptionalNestedEnum() TestAllTypes_NestedEnum { + if m != nil { + return m.OptionalNestedEnum + } + return TestAllTypes_FOO +} + +func (m *TestAllTypes) GetOptionalForeignEnum() ForeignEnum { + if m != nil { + return m.OptionalForeignEnum + } + return ForeignEnum_FOREIGN_FOO +} + +func (m *TestAllTypes) GetOptionalStringPiece() string { + if m != nil { + return m.OptionalStringPiece + } + return "" +} + +func (m *TestAllTypes) GetOptionalCord() string { + if m != nil { + return m.OptionalCord + } + return "" +} + +func (m *TestAllTypes) GetRecursiveMessage() *TestAllTypes { + if m != nil { + return m.RecursiveMessage + } + return nil +} + +func (m *TestAllTypes) GetRepeatedInt32() []int32 { + if m != nil { + return m.RepeatedInt32 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedInt64() []int64 { + if m != nil { + return m.RepeatedInt64 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedUint32() []uint32 { + if m != nil { + return m.RepeatedUint32 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedUint64() []uint64 { + if m != nil { + return m.RepeatedUint64 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedSint32() []int32 { + if m != nil { + return m.RepeatedSint32 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedSint64() []int64 { + if m != nil { + return m.RepeatedSint64 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedFixed32() []uint32 { + if m != nil { + return m.RepeatedFixed32 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedFixed64() []uint64 { + if m != nil { + return m.RepeatedFixed64 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedSfixed32() []int32 { + if m != nil { + return m.RepeatedSfixed32 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedSfixed64() []int64 { + if m != nil { + return m.RepeatedSfixed64 + } + return nil +} + +func (m *TestAllTypes) GetRepeatedFloat() []float32 { + if m != nil { + return m.RepeatedFloat + } + return nil +} + +func (m *TestAllTypes) GetRepeatedDouble() []float64 { + if m != nil { + return m.RepeatedDouble + } + return nil +} + +func (m *TestAllTypes) GetRepeatedBool() []bool { + if m != nil { + return m.RepeatedBool + } + return nil +} + +func (m *TestAllTypes) GetRepeatedString() []string { + if m != nil { + return m.RepeatedString + } + return nil +} + +func (m *TestAllTypes) GetRepeatedBytes() [][]byte { + if m != nil { + return m.RepeatedBytes + } + return nil +} + +func (m *TestAllTypes) GetRepeatedNestedMessage() []*TestAllTypes_NestedMessage { + if m != nil { + return m.RepeatedNestedMessage + } + return nil +} + +func (m *TestAllTypes) GetRepeatedForeignMessage() []*ForeignMessage { + if m != nil { + return m.RepeatedForeignMessage + } + return nil +} + +func (m *TestAllTypes) GetRepeatedNestedEnum() []TestAllTypes_NestedEnum { + if m != nil { + return m.RepeatedNestedEnum + } + return nil +} + +func (m *TestAllTypes) GetRepeatedForeignEnum() []ForeignEnum { + if m != nil { + return m.RepeatedForeignEnum + } + return nil +} + +func (m *TestAllTypes) GetRepeatedStringPiece() []string { + if m != nil { + return m.RepeatedStringPiece + } + return nil +} + +func (m *TestAllTypes) GetRepeatedCord() []string { + if m != nil { + return m.RepeatedCord + } + return nil +} + +func (m *TestAllTypes) GetMapInt32Int32() map[int32]int32 { + if m != nil { + return m.MapInt32Int32 + } + return nil +} + +func (m *TestAllTypes) GetMapInt64Int64() map[int64]int64 { + if m != nil { + return m.MapInt64Int64 + } + return nil +} + +func (m *TestAllTypes) GetMapUint32Uint32() map[uint32]uint32 { + if m != nil { + return m.MapUint32Uint32 + } + return nil +} + +func (m *TestAllTypes) GetMapUint64Uint64() map[uint64]uint64 { + if m != nil { + return m.MapUint64Uint64 + } + return nil +} + +func (m *TestAllTypes) GetMapSint32Sint32() map[int32]int32 { + if m != nil { + return m.MapSint32Sint32 + } + return nil +} + +func (m *TestAllTypes) GetMapSint64Sint64() map[int64]int64 { + if m != nil { + return m.MapSint64Sint64 + } + return nil +} + +func (m *TestAllTypes) GetMapFixed32Fixed32() map[uint32]uint32 { + if m != nil { + return m.MapFixed32Fixed32 + } + return nil +} + +func (m *TestAllTypes) GetMapFixed64Fixed64() map[uint64]uint64 { + if m != nil { + return m.MapFixed64Fixed64 + } + return nil +} + +func (m *TestAllTypes) GetMapSfixed32Sfixed32() map[int32]int32 { + if m != nil { + return m.MapSfixed32Sfixed32 + } + return nil +} + +func (m *TestAllTypes) GetMapSfixed64Sfixed64() map[int64]int64 { + if m != nil { + return m.MapSfixed64Sfixed64 + } + return nil +} + +func (m *TestAllTypes) GetMapInt32Float() map[int32]float32 { + if m != nil { + return m.MapInt32Float + } + return nil +} + +func (m *TestAllTypes) GetMapInt32Double() map[int32]float64 { + if m != nil { + return m.MapInt32Double + } + return nil +} + +func (m *TestAllTypes) GetMapBoolBool() map[bool]bool { + if m != nil { + return m.MapBoolBool + } + return nil +} + +func (m *TestAllTypes) GetMapStringString() map[string]string { + if m != nil { + return m.MapStringString + } + return nil +} + +func (m *TestAllTypes) GetMapStringBytes() map[string][]byte { + if m != nil { + return m.MapStringBytes + } + return nil +} + +func (m *TestAllTypes) GetMapStringNestedMessage() map[string]*TestAllTypes_NestedMessage { + if m != nil { + return m.MapStringNestedMessage + } + return nil +} + +func (m *TestAllTypes) GetMapStringForeignMessage() map[string]*ForeignMessage { + if m != nil { + return m.MapStringForeignMessage + } + return nil +} + +func (m *TestAllTypes) GetMapStringNestedEnum() map[string]TestAllTypes_NestedEnum { + if m != nil { + return m.MapStringNestedEnum + } + return nil +} + +func (m *TestAllTypes) GetMapStringForeignEnum() map[string]ForeignEnum { + if m != nil { + return m.MapStringForeignEnum + } + return nil +} + +func (m *TestAllTypes) GetOneofUint32() uint32 { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofUint32); ok { + return x.OneofUint32 + } + return 0 +} + +func (m *TestAllTypes) GetOneofNestedMessage() *TestAllTypes_NestedMessage { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofNestedMessage); ok { + return x.OneofNestedMessage + } + return nil +} + +func (m *TestAllTypes) GetOneofString() string { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofString); ok { + return x.OneofString + } + return "" +} + +func (m *TestAllTypes) GetOneofBytes() []byte { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofBytes); ok { + return x.OneofBytes + } + return nil +} + +func (m *TestAllTypes) GetOneofBool() bool { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofBool); ok { + return x.OneofBool + } + return false +} + +func (m *TestAllTypes) GetOneofUint64() uint64 { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofUint64); ok { + return x.OneofUint64 + } + return 0 +} + +func (m *TestAllTypes) GetOneofFloat() float32 { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofFloat); ok { + return x.OneofFloat + } + return 0 +} + +func (m *TestAllTypes) GetOneofDouble() float64 { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofDouble); ok { + return x.OneofDouble + } + return 0 +} + +func (m *TestAllTypes) GetOneofEnum() TestAllTypes_NestedEnum { + if x, ok := m.GetOneofField().(*TestAllTypes_OneofEnum); ok { + return x.OneofEnum + } + return TestAllTypes_FOO +} + +func (m *TestAllTypes) GetOptionalBoolWrapper() *google_protobuf5.BoolValue { + if m != nil { + return m.OptionalBoolWrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalInt32Wrapper() *google_protobuf5.Int32Value { + if m != nil { + return m.OptionalInt32Wrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalInt64Wrapper() *google_protobuf5.Int64Value { + if m != nil { + return m.OptionalInt64Wrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalUint32Wrapper() *google_protobuf5.UInt32Value { + if m != nil { + return m.OptionalUint32Wrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalUint64Wrapper() *google_protobuf5.UInt64Value { + if m != nil { + return m.OptionalUint64Wrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalFloatWrapper() *google_protobuf5.FloatValue { + if m != nil { + return m.OptionalFloatWrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalDoubleWrapper() *google_protobuf5.DoubleValue { + if m != nil { + return m.OptionalDoubleWrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalStringWrapper() *google_protobuf5.StringValue { + if m != nil { + return m.OptionalStringWrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalBytesWrapper() *google_protobuf5.BytesValue { + if m != nil { + return m.OptionalBytesWrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedBoolWrapper() []*google_protobuf5.BoolValue { + if m != nil { + return m.RepeatedBoolWrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedInt32Wrapper() []*google_protobuf5.Int32Value { + if m != nil { + return m.RepeatedInt32Wrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedInt64Wrapper() []*google_protobuf5.Int64Value { + if m != nil { + return m.RepeatedInt64Wrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedUint32Wrapper() []*google_protobuf5.UInt32Value { + if m != nil { + return m.RepeatedUint32Wrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedUint64Wrapper() []*google_protobuf5.UInt64Value { + if m != nil { + return m.RepeatedUint64Wrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedFloatWrapper() []*google_protobuf5.FloatValue { + if m != nil { + return m.RepeatedFloatWrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedDoubleWrapper() []*google_protobuf5.DoubleValue { + if m != nil { + return m.RepeatedDoubleWrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedStringWrapper() []*google_protobuf5.StringValue { + if m != nil { + return m.RepeatedStringWrapper + } + return nil +} + +func (m *TestAllTypes) GetRepeatedBytesWrapper() []*google_protobuf5.BytesValue { + if m != nil { + return m.RepeatedBytesWrapper + } + return nil +} + +func (m *TestAllTypes) GetOptionalDuration() *google_protobuf1.Duration { + if m != nil { + return m.OptionalDuration + } + return nil +} + +func (m *TestAllTypes) GetOptionalTimestamp() *google_protobuf4.Timestamp { + if m != nil { + return m.OptionalTimestamp + } + return nil +} + +func (m *TestAllTypes) GetOptionalFieldMask() *google_protobuf2.FieldMask { + if m != nil { + return m.OptionalFieldMask + } + return nil +} + +func (m *TestAllTypes) GetOptionalStruct() *google_protobuf3.Struct { + if m != nil { + return m.OptionalStruct + } + return nil +} + +func (m *TestAllTypes) GetOptionalAny() *google_protobuf.Any { + if m != nil { + return m.OptionalAny + } + return nil +} + +func (m *TestAllTypes) GetOptionalValue() *google_protobuf3.Value { + if m != nil { + return m.OptionalValue + } + return nil +} + +func (m *TestAllTypes) GetRepeatedDuration() []*google_protobuf1.Duration { + if m != nil { + return m.RepeatedDuration + } + return nil +} + +func (m *TestAllTypes) GetRepeatedTimestamp() []*google_protobuf4.Timestamp { + if m != nil { + return m.RepeatedTimestamp + } + return nil +} + +func (m *TestAllTypes) GetRepeatedFieldmask() []*google_protobuf2.FieldMask { + if m != nil { + return m.RepeatedFieldmask + } + return nil +} + +func (m *TestAllTypes) GetRepeatedStruct() []*google_protobuf3.Struct { + if m != nil { + return m.RepeatedStruct + } + return nil +} + +func (m *TestAllTypes) GetRepeatedAny() []*google_protobuf.Any { + if m != nil { + return m.RepeatedAny + } + return nil +} + +func (m *TestAllTypes) GetRepeatedValue() []*google_protobuf3.Value { + if m != nil { + return m.RepeatedValue + } + return nil +} + +func (m *TestAllTypes) GetFieldname1() int32 { + if m != nil { + return m.Fieldname1 + } + return 0 +} + +func (m *TestAllTypes) GetFieldName2() int32 { + if m != nil { + return m.FieldName2 + } + return 0 +} + +func (m *TestAllTypes) GetXFieldName3() int32 { + if m != nil { + return m.XFieldName3 + } + return 0 +} + +func (m *TestAllTypes) GetField_Name4_() int32 { + if m != nil { + return m.Field_Name4_ + } + return 0 +} + +func (m *TestAllTypes) GetField0Name5() int32 { + if m != nil { + return m.Field0Name5 + } + return 0 +} + +func (m *TestAllTypes) GetField_0Name6() int32 { + if m != nil { + return m.Field_0Name6 + } + return 0 +} + +func (m *TestAllTypes) GetFieldName7() int32 { + if m != nil { + return m.FieldName7 + } + return 0 +} + +func (m *TestAllTypes) GetFieldName8() int32 { + if m != nil { + return m.FieldName8 + } + return 0 +} + +func (m *TestAllTypes) GetField_Name9() int32 { + if m != nil { + return m.Field_Name9 + } + return 0 +} + +func (m *TestAllTypes) GetField_Name10() int32 { + if m != nil { + return m.Field_Name10 + } + return 0 +} + +func (m *TestAllTypes) GetFIELD_NAME11() int32 { + if m != nil { + return m.FIELD_NAME11 + } + return 0 +} + +func (m *TestAllTypes) GetFIELDName12() int32 { + if m != nil { + return m.FIELDName12 + } + return 0 +} + +func (m *TestAllTypes) GetXFieldName13() int32 { + if m != nil { + return m.XFieldName13 + } + return 0 +} + +func (m *TestAllTypes) GetX_FieldName14() int32 { + if m != nil { + return m.X_FieldName14 + } + return 0 +} + +func (m *TestAllTypes) GetField_Name15() int32 { + if m != nil { + return m.Field_Name15 + } + return 0 +} + +func (m *TestAllTypes) GetField__Name16() int32 { + if m != nil { + return m.Field__Name16 + } + return 0 +} + +func (m *TestAllTypes) GetFieldName17__() int32 { + if m != nil { + return m.FieldName17__ + } + return 0 +} + +func (m *TestAllTypes) GetFieldName18__() int32 { + if m != nil { + return m.FieldName18__ + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TestAllTypes) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TestAllTypes_OneofMarshaler, _TestAllTypes_OneofUnmarshaler, _TestAllTypes_OneofSizer, []interface{}{ + (*TestAllTypes_OneofUint32)(nil), + (*TestAllTypes_OneofNestedMessage)(nil), + (*TestAllTypes_OneofString)(nil), + (*TestAllTypes_OneofBytes)(nil), + (*TestAllTypes_OneofBool)(nil), + (*TestAllTypes_OneofUint64)(nil), + (*TestAllTypes_OneofFloat)(nil), + (*TestAllTypes_OneofDouble)(nil), + (*TestAllTypes_OneofEnum)(nil), + } +} + +func _TestAllTypes_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TestAllTypes) + // oneof_field + switch x := m.OneofField.(type) { + case *TestAllTypes_OneofUint32: + b.EncodeVarint(111<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.OneofUint32)) + case *TestAllTypes_OneofNestedMessage: + b.EncodeVarint(112<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.OneofNestedMessage); err != nil { + return err + } + case *TestAllTypes_OneofString: + b.EncodeVarint(113<<3 | proto.WireBytes) + b.EncodeStringBytes(x.OneofString) + case *TestAllTypes_OneofBytes: + b.EncodeVarint(114<<3 | proto.WireBytes) + b.EncodeRawBytes(x.OneofBytes) + case *TestAllTypes_OneofBool: + t := uint64(0) + if x.OneofBool { + t = 1 + } + b.EncodeVarint(115<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *TestAllTypes_OneofUint64: + b.EncodeVarint(116<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.OneofUint64)) + case *TestAllTypes_OneofFloat: + b.EncodeVarint(117<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.OneofFloat))) + case *TestAllTypes_OneofDouble: + b.EncodeVarint(118<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.OneofDouble)) + case *TestAllTypes_OneofEnum: + b.EncodeVarint(119<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.OneofEnum)) + case nil: + default: + return fmt.Errorf("TestAllTypes.OneofField has unexpected type %T", x) + } + return nil +} + +func _TestAllTypes_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TestAllTypes) + switch tag { + case 111: // oneof_field.oneof_uint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.OneofField = &TestAllTypes_OneofUint32{uint32(x)} + return true, err + case 112: // oneof_field.oneof_nested_message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TestAllTypes_NestedMessage) + err := b.DecodeMessage(msg) + m.OneofField = &TestAllTypes_OneofNestedMessage{msg} + return true, err + case 113: // oneof_field.oneof_string + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.OneofField = &TestAllTypes_OneofString{x} + return true, err + case 114: // oneof_field.oneof_bytes + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.OneofField = &TestAllTypes_OneofBytes{x} + return true, err + case 115: // oneof_field.oneof_bool + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.OneofField = &TestAllTypes_OneofBool{x != 0} + return true, err + case 116: // oneof_field.oneof_uint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.OneofField = &TestAllTypes_OneofUint64{x} + return true, err + case 117: // oneof_field.oneof_float + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.OneofField = &TestAllTypes_OneofFloat{math.Float32frombits(uint32(x))} + return true, err + case 118: // oneof_field.oneof_double + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.OneofField = &TestAllTypes_OneofDouble{math.Float64frombits(x)} + return true, err + case 119: // oneof_field.oneof_enum + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.OneofField = &TestAllTypes_OneofEnum{TestAllTypes_NestedEnum(x)} + return true, err + default: + return false, nil + } +} + +func _TestAllTypes_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TestAllTypes) + // oneof_field + switch x := m.OneofField.(type) { + case *TestAllTypes_OneofUint32: + n += proto.SizeVarint(111<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.OneofUint32)) + case *TestAllTypes_OneofNestedMessage: + s := proto.Size(x.OneofNestedMessage) + n += proto.SizeVarint(112<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TestAllTypes_OneofString: + n += proto.SizeVarint(113<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.OneofString))) + n += len(x.OneofString) + case *TestAllTypes_OneofBytes: + n += proto.SizeVarint(114<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.OneofBytes))) + n += len(x.OneofBytes) + case *TestAllTypes_OneofBool: + n += proto.SizeVarint(115<<3 | proto.WireVarint) + n += 1 + case *TestAllTypes_OneofUint64: + n += proto.SizeVarint(116<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.OneofUint64)) + case *TestAllTypes_OneofFloat: + n += proto.SizeVarint(117<<3 | proto.WireFixed32) + n += 4 + case *TestAllTypes_OneofDouble: + n += proto.SizeVarint(118<<3 | proto.WireFixed64) + n += 8 + case *TestAllTypes_OneofEnum: + n += proto.SizeVarint(119<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.OneofEnum)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type TestAllTypes_NestedMessage struct { + A int32 `protobuf:"varint,1,opt,name=a" json:"a,omitempty"` + Corecursive *TestAllTypes `protobuf:"bytes,2,opt,name=corecursive" json:"corecursive,omitempty"` +} + +func (m *TestAllTypes_NestedMessage) Reset() { *m = TestAllTypes_NestedMessage{} } +func (m *TestAllTypes_NestedMessage) String() string { return proto.CompactTextString(m) } +func (*TestAllTypes_NestedMessage) ProtoMessage() {} +func (*TestAllTypes_NestedMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *TestAllTypes_NestedMessage) GetA() int32 { + if m != nil { + return m.A + } + return 0 +} + +func (m *TestAllTypes_NestedMessage) GetCorecursive() *TestAllTypes { + if m != nil { + return m.Corecursive + } + return nil +} + +type ForeignMessage struct { + C int32 `protobuf:"varint,1,opt,name=c" json:"c,omitempty"` +} + +func (m *ForeignMessage) Reset() { *m = ForeignMessage{} } +func (m *ForeignMessage) String() string { return proto.CompactTextString(m) } +func (*ForeignMessage) ProtoMessage() {} +func (*ForeignMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ForeignMessage) GetC() int32 { + if m != nil { + return m.C + } + return 0 +} + +func init() { + proto.RegisterType((*ConformanceRequest)(nil), "conformance.ConformanceRequest") + proto.RegisterType((*ConformanceResponse)(nil), "conformance.ConformanceResponse") + proto.RegisterType((*TestAllTypes)(nil), "conformance.TestAllTypes") + proto.RegisterType((*TestAllTypes_NestedMessage)(nil), "conformance.TestAllTypes.NestedMessage") + proto.RegisterType((*ForeignMessage)(nil), "conformance.ForeignMessage") + proto.RegisterEnum("conformance.WireFormat", WireFormat_name, WireFormat_value) + proto.RegisterEnum("conformance.ForeignEnum", ForeignEnum_name, ForeignEnum_value) + proto.RegisterEnum("conformance.TestAllTypes_NestedEnum", TestAllTypes_NestedEnum_name, TestAllTypes_NestedEnum_value) +} + +func init() { proto.RegisterFile("conformance_proto/conformance.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2737 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5a, 0xd9, 0x72, 0xdb, 0xc8, + 0xd5, 0x16, 0x08, 0x59, 0x4b, 0x93, 0x92, 0xa8, 0xd6, 0xd6, 0x96, 0x5d, 0x63, 0x58, 0xb2, 0x7f, + 0xd3, 0xf6, 0x8c, 0xac, 0x05, 0x86, 0x65, 0xcf, 0x3f, 0x8e, 0x45, 0x9b, 0xb4, 0xe4, 0x8c, 0x25, + 0x17, 0x64, 0x8d, 0xab, 0x9c, 0x0b, 0x06, 0xa6, 0x20, 0x15, 0xc7, 0x24, 0xc1, 0x01, 0x48, 0x4f, + 0x94, 0xcb, 0xbc, 0x41, 0xf6, 0x7d, 0xbd, 0xcf, 0x7a, 0x93, 0xa4, 0x92, 0xab, 0x54, 0x6e, 0xb2, + 0x27, 0x95, 0x3d, 0x79, 0x85, 0xbc, 0x43, 0x52, 0xbd, 0xa2, 0xbb, 0x01, 0x50, 0xf4, 0x54, 0x0d, + 0x25, 0x1e, 0x7c, 0xfd, 0x9d, 0xd3, 0xe7, 0x1c, 0x7c, 0x2d, 0x1c, 0x18, 0x2c, 0xd7, 0x83, 0xf6, + 0x51, 0x10, 0xb6, 0xbc, 0x76, 0xdd, 0xaf, 0x75, 0xc2, 0xa0, 0x1b, 0xdc, 0x90, 0x2c, 0x2b, 0xc4, + 0x02, 0xf3, 0x92, 0x69, 0xf1, 0xec, 0x71, 0x10, 0x1c, 0x37, 0xfd, 0x1b, 0xe4, 0xd2, 0x8b, 0xde, + 0xd1, 0x0d, 0xaf, 0x7d, 0x42, 0x71, 0x8b, 0x6f, 0xe8, 0x97, 0x0e, 0x7b, 0xa1, 0xd7, 0x6d, 0x04, + 0x6d, 0x76, 0xdd, 0xd2, 0xaf, 0x1f, 0x35, 0xfc, 0xe6, 0x61, 0xad, 0xe5, 0x45, 0x2f, 0x19, 0xe2, + 0xbc, 0x8e, 0x88, 0xba, 0x61, 0xaf, 0xde, 0x65, 0x57, 0x2f, 0xe8, 0x57, 0xbb, 0x8d, 0x96, 0x1f, + 0x75, 0xbd, 0x56, 0x27, 0x2b, 0x80, 0x0f, 0x43, 0xaf, 0xd3, 0xf1, 0xc3, 0x88, 0x5e, 0x5f, 0xfa, + 0x85, 0x01, 0xe0, 0xfd, 0x78, 0x2f, 0xae, 0xff, 0x41, 0xcf, 0x8f, 0xba, 0xf0, 0x3a, 0x28, 0xf2, + 0x15, 0xb5, 0x8e, 0x77, 0xd2, 0x0c, 0xbc, 0x43, 0x64, 0x58, 0x46, 0xa9, 0xb0, 0x3d, 0xe4, 0x4e, + 0xf1, 0x2b, 0x4f, 0xe8, 0x05, 0xb8, 0x0c, 0x0a, 0xef, 0x47, 0x41, 0x5b, 0x00, 0x73, 0x96, 0x51, + 0x1a, 0xdf, 0x1e, 0x72, 0xf3, 0xd8, 0xca, 0x41, 0x7b, 0x60, 0x21, 0xa4, 0xe4, 0xfe, 0x61, 0x2d, + 0xe8, 0x75, 0x3b, 0xbd, 0x6e, 0x8d, 0x78, 0xed, 0x22, 0xd3, 0x32, 0x4a, 0x93, 0xeb, 0x0b, 0x2b, + 0x72, 0x9a, 0x9f, 0x35, 0x42, 0xbf, 0x4a, 0x2e, 0xbb, 0x73, 0x62, 0xdd, 0x1e, 0x59, 0x46, 0xcd, + 0xe5, 0x71, 0x30, 0xca, 0x1c, 0x2e, 0x7d, 0x2a, 0x07, 0x66, 0x94, 0x4d, 0x44, 0x9d, 0xa0, 0x1d, + 0xf9, 0xf0, 0x22, 0xc8, 0x77, 0xbc, 0x30, 0xf2, 0x6b, 0x7e, 0x18, 0x06, 0x21, 0xd9, 0x00, 0x8e, + 0x0b, 0x10, 0x63, 0x05, 0xdb, 0xe0, 0x55, 0x30, 0x15, 0xf9, 0x61, 0xc3, 0x6b, 0x36, 0x3e, 0xc9, + 0x61, 0x23, 0x0c, 0x36, 0x29, 0x2e, 0x50, 0xe8, 0x65, 0x30, 0x11, 0xf6, 0xda, 0x38, 0xc1, 0x0c, + 0xc8, 0xf7, 0x59, 0x60, 0x66, 0x0a, 0x4b, 0x4b, 0x9d, 0x39, 0x68, 0xea, 0x86, 0xd3, 0x52, 0xb7, + 0x08, 0x46, 0xa3, 0x97, 0x8d, 0x4e, 0xc7, 0x3f, 0x44, 0x67, 0xd8, 0x75, 0x6e, 0x28, 0x8f, 0x81, + 0x91, 0xd0, 0x8f, 0x7a, 0xcd, 0xee, 0xd2, 0x7f, 0xaa, 0xa0, 0xf0, 0xd4, 0x8f, 0xba, 0x5b, 0xcd, + 0xe6, 0xd3, 0x93, 0x8e, 0x1f, 0xc1, 0xcb, 0x60, 0x32, 0xe8, 0xe0, 0x5e, 0xf3, 0x9a, 0xb5, 0x46, + 0xbb, 0xbb, 0xb1, 0x4e, 0x12, 0x70, 0xc6, 0x9d, 0xe0, 0xd6, 0x1d, 0x6c, 0xd4, 0x61, 0x8e, 0x4d, + 0xf6, 0x65, 0x2a, 0x30, 0xc7, 0x86, 0x57, 0xc0, 0x94, 0x80, 0xf5, 0x28, 0x1d, 0xde, 0xd5, 0x84, + 0x2b, 0x56, 0x1f, 0x10, 0x6b, 0x02, 0xe8, 0xd8, 0x64, 0x57, 0xc3, 0x2a, 0x50, 0x63, 0x8c, 0x28, + 0x23, 0xde, 0xde, 0x74, 0x0c, 0xdc, 0x4f, 0x32, 0x46, 0x94, 0x11, 0xd7, 0x08, 0xaa, 0x40, 0xc7, + 0x86, 0x57, 0x41, 0x51, 0x00, 0x8f, 0x1a, 0x9f, 0xf0, 0x0f, 0x37, 0xd6, 0xd1, 0xa8, 0x65, 0x94, + 0x46, 0x5d, 0x41, 0x50, 0xa5, 0xe6, 0x24, 0xd4, 0xb1, 0xd1, 0x98, 0x65, 0x94, 0x46, 0x34, 0xa8, + 0x63, 0xc3, 0xeb, 0x60, 0x3a, 0x76, 0xcf, 0x69, 0xc7, 0x2d, 0xa3, 0x34, 0xe5, 0x0a, 0x8e, 0x7d, + 0x66, 0x4f, 0x01, 0x3b, 0x36, 0x02, 0x96, 0x51, 0x2a, 0xea, 0x60, 0xc7, 0x56, 0x52, 0x7f, 0xd4, + 0x0c, 0xbc, 0x2e, 0xca, 0x5b, 0x46, 0x29, 0x17, 0xa7, 0xbe, 0x8a, 0x8d, 0xca, 0xfe, 0x0f, 0x83, + 0xde, 0x8b, 0xa6, 0x8f, 0x0a, 0x96, 0x51, 0x32, 0xe2, 0xfd, 0x3f, 0x20, 0x56, 0xb8, 0x0c, 0xc4, + 0xca, 0xda, 0x8b, 0x20, 0x68, 0xa2, 0x09, 0xcb, 0x28, 0x8d, 0xb9, 0x05, 0x6e, 0x2c, 0x07, 0x41, + 0x53, 0xcd, 0x66, 0x37, 0x6c, 0xb4, 0x8f, 0xd1, 0x24, 0xee, 0x2a, 0x29, 0x9b, 0xc4, 0xaa, 0x44, + 0xf7, 0xe2, 0xa4, 0xeb, 0x47, 0x68, 0x0a, 0xb7, 0x71, 0x1c, 0x5d, 0x19, 0x1b, 0x61, 0x0d, 0x2c, + 0x08, 0x58, 0x9b, 0xde, 0xde, 0x2d, 0x3f, 0x8a, 0xbc, 0x63, 0x1f, 0x41, 0xcb, 0x28, 0xe5, 0xd7, + 0xaf, 0x28, 0x37, 0xb6, 0xdc, 0xa2, 0x2b, 0xbb, 0x04, 0xff, 0x98, 0xc2, 0xdd, 0x39, 0xce, 0xa3, + 0x98, 0xe1, 0x01, 0x40, 0x71, 0x96, 0x82, 0xd0, 0x6f, 0x1c, 0xb7, 0x85, 0x87, 0x19, 0xe2, 0xe1, + 0x9c, 0xe2, 0xa1, 0x4a, 0x31, 0x9c, 0x75, 0x5e, 0x24, 0x53, 0xb1, 0xc3, 0xf7, 0xc0, 0xac, 0x1e, + 0xb7, 0xdf, 0xee, 0xb5, 0xd0, 0x1c, 0x51, 0xa3, 0x4b, 0xa7, 0x05, 0x5d, 0x69, 0xf7, 0x5a, 0x2e, + 0x54, 0x23, 0xc6, 0x36, 0xf8, 0x2e, 0x98, 0x4b, 0x84, 0x4b, 0x88, 0xe7, 0x09, 0x31, 0x4a, 0x8b, + 0x95, 0x90, 0xcd, 0x68, 0x81, 0x12, 0x36, 0x47, 0x62, 0xa3, 0xd5, 0xaa, 0x75, 0x1a, 0x7e, 0xdd, + 0x47, 0x08, 0xd7, 0xac, 0x9c, 0x1b, 0xcb, 0xc5, 0xeb, 0x68, 0xdd, 0x9e, 0xe0, 0xcb, 0xf0, 0x8a, + 0xd4, 0x0a, 0xf5, 0x20, 0x3c, 0x44, 0x67, 0x19, 0xde, 0x88, 0xdb, 0xe1, 0x7e, 0x10, 0x1e, 0xc2, + 0x2a, 0x98, 0x0e, 0xfd, 0x7a, 0x2f, 0x8c, 0x1a, 0xaf, 0x7c, 0x91, 0xd6, 0x73, 0x24, 0xad, 0x67, + 0x33, 0x73, 0xe0, 0x16, 0xc5, 0x1a, 0x9e, 0xce, 0xcb, 0x60, 0x32, 0xf4, 0x3b, 0xbe, 0x87, 0xf3, + 0x48, 0x6f, 0xe6, 0x0b, 0x96, 0x89, 0xd5, 0x86, 0x5b, 0x85, 0xda, 0xc8, 0x30, 0xc7, 0x46, 0x96, + 0x65, 0x62, 0xb5, 0x91, 0x60, 0x54, 0x1b, 0x04, 0x8c, 0xa9, 0xcd, 0x45, 0xcb, 0xc4, 0x6a, 0xc3, + 0xcd, 0xb1, 0xda, 0x28, 0x40, 0xc7, 0x46, 0x4b, 0x96, 0x89, 0xd5, 0x46, 0x06, 0x6a, 0x8c, 0x4c, + 0x6d, 0x96, 0x2d, 0x13, 0xab, 0x0d, 0x37, 0xef, 0x27, 0x19, 0x99, 0xda, 0x5c, 0xb2, 0x4c, 0xac, + 0x36, 0x32, 0x90, 0xaa, 0x8d, 0x00, 0x72, 0x59, 0xb8, 0x6c, 0x99, 0x58, 0x6d, 0xb8, 0x5d, 0x52, + 0x1b, 0x15, 0xea, 0xd8, 0xe8, 0xff, 0x2c, 0x13, 0xab, 0x8d, 0x02, 0xa5, 0x6a, 0x13, 0xbb, 0xe7, + 0xb4, 0x57, 0x2c, 0x13, 0xab, 0x8d, 0x08, 0x40, 0x52, 0x1b, 0x0d, 0xec, 0xd8, 0xa8, 0x64, 0x99, + 0x58, 0x6d, 0x54, 0x30, 0x55, 0x9b, 0x38, 0x08, 0xa2, 0x36, 0x57, 0x2d, 0x13, 0xab, 0x8d, 0x08, + 0x81, 0xab, 0x8d, 0x80, 0x31, 0xb5, 0xb9, 0x66, 0x99, 0x58, 0x6d, 0xb8, 0x39, 0x56, 0x1b, 0x01, + 0x24, 0x6a, 0x73, 0xdd, 0x32, 0xb1, 0xda, 0x70, 0x23, 0x57, 0x9b, 0x38, 0x42, 0xaa, 0x36, 0x6f, + 0x5a, 0x26, 0x56, 0x1b, 0x11, 0x9f, 0x50, 0x9b, 0x98, 0x8d, 0xa8, 0xcd, 0x5b, 0x96, 0x89, 0xd5, + 0x46, 0xd0, 0x71, 0xb5, 0x11, 0x30, 0x4d, 0x6d, 0x56, 0x2d, 0xf3, 0xb5, 0xd4, 0x86, 0xf3, 0x24, + 0xd4, 0x26, 0xce, 0x92, 0xa6, 0x36, 0x6b, 0xc4, 0x43, 0x7f, 0xb5, 0x11, 0xc9, 0x4c, 0xa8, 0x8d, + 0x1e, 0x37, 0x11, 0x85, 0x0d, 0xcb, 0x1c, 0x5c, 0x6d, 0xd4, 0x88, 0xb9, 0xda, 0x24, 0xc2, 0x25, + 0xc4, 0x36, 0x21, 0xee, 0xa3, 0x36, 0x5a, 0xa0, 0x5c, 0x6d, 0xb4, 0x6a, 0x31, 0xb5, 0x71, 0x70, + 0xcd, 0xa8, 0xda, 0xa8, 0x75, 0x13, 0x6a, 0x23, 0xd6, 0x11, 0xb5, 0xb9, 0xc5, 0xf0, 0x46, 0xdc, + 0x0e, 0x44, 0x6d, 0x9e, 0x82, 0xa9, 0x96, 0xd7, 0xa1, 0x02, 0xc1, 0x64, 0x62, 0x93, 0x24, 0xf5, + 0xcd, 0xec, 0x0c, 0x3c, 0xf6, 0x3a, 0x44, 0x3b, 0xc8, 0x47, 0xa5, 0xdd, 0x0d, 0x4f, 0xdc, 0x89, + 0x96, 0x6c, 0x93, 0x58, 0x1d, 0x9b, 0xa9, 0xca, 0xed, 0xc1, 0x58, 0x1d, 0x9b, 0x7c, 0x28, 0xac, + 0xcc, 0x06, 0x9f, 0x83, 0x69, 0xcc, 0x4a, 0xe5, 0x87, 0xab, 0xd0, 0x1d, 0xc2, 0xbb, 0xd2, 0x97, + 0x97, 0x4a, 0x13, 0xfd, 0xa4, 0xcc, 0x38, 0x3c, 0xd9, 0x2a, 0x73, 0x3b, 0x36, 0x17, 0xae, 0xb7, + 0x07, 0xe4, 0x76, 0x6c, 0xfa, 0xa9, 0x72, 0x73, 0x2b, 0xe7, 0xa6, 0x22, 0xc7, 0xb5, 0xee, 0xff, + 0x07, 0xe0, 0xa6, 0x02, 0xb8, 0xaf, 0xc5, 0x2d, 0x5b, 0x65, 0x6e, 0xc7, 0xe6, 0xf2, 0xf8, 0xce, + 0x80, 0xdc, 0x8e, 0xbd, 0xaf, 0xc5, 0x2d, 0x5b, 0xe1, 0xc7, 0xc1, 0x0c, 0xe6, 0x66, 0xda, 0x26, + 0x24, 0xf5, 0x2e, 0x61, 0x5f, 0xed, 0xcb, 0xce, 0x74, 0x96, 0xfd, 0xa0, 0xfc, 0x38, 0x50, 0xd5, + 0xae, 0x78, 0x70, 0x6c, 0xa1, 0xc4, 0x1f, 0x19, 0xd4, 0x83, 0x63, 0xb3, 0x1f, 0x9a, 0x07, 0x61, + 0x87, 0x47, 0x60, 0x8e, 0xe4, 0x87, 0x6f, 0x42, 0x28, 0xf8, 0x3d, 0xe2, 0x63, 0xbd, 0x7f, 0x8e, + 0x18, 0x98, 0xff, 0xa4, 0x5e, 0x70, 0xc8, 0xfa, 0x15, 0xd5, 0x0f, 0xae, 0x04, 0xdf, 0xcb, 0xd6, + 0xc0, 0x7e, 0x1c, 0x9b, 0xff, 0xd4, 0xfd, 0xc4, 0x57, 0xd4, 0xfb, 0x95, 0x1e, 0x1a, 0xe5, 0x41, + 0xef, 0x57, 0x72, 0x9c, 0x68, 0xf7, 0x2b, 0x3d, 0x62, 0x9e, 0x81, 0x62, 0xcc, 0xca, 0xce, 0x98, + 0xfb, 0x84, 0xf6, 0xad, 0xd3, 0x69, 0xe9, 0xe9, 0x43, 0x79, 0x27, 0x5b, 0x8a, 0x11, 0xee, 0x02, + 0xec, 0x89, 0x9c, 0x46, 0xf4, 0x48, 0x7a, 0x40, 0x58, 0xaf, 0xf5, 0x65, 0xc5, 0xe7, 0x14, 0xfe, + 0x9f, 0x52, 0xe6, 0x5b, 0xb1, 0x45, 0xb4, 0x3b, 0x95, 0x42, 0x76, 0x7e, 0x55, 0x06, 0x69, 0x77, + 0x02, 0xa5, 0x9f, 0x52, 0xbb, 0x4b, 0x56, 0x9e, 0x04, 0xc6, 0x4d, 0x8f, 0xbc, 0xea, 0x00, 0x49, + 0xa0, 0xcb, 0xc9, 0x69, 0x18, 0x27, 0x41, 0x32, 0xc2, 0x0e, 0x38, 0x2b, 0x11, 0x6b, 0x87, 0xe4, + 0x43, 0xe2, 0xe1, 0xe6, 0x00, 0x1e, 0x94, 0x63, 0x91, 0x7a, 0x9a, 0x6f, 0xa5, 0x5e, 0x84, 0x11, + 0x58, 0x94, 0x3c, 0xea, 0xa7, 0xe6, 0x36, 0x71, 0xe9, 0x0c, 0xe0, 0x52, 0x3d, 0x33, 0xa9, 0xcf, + 0x85, 0x56, 0xfa, 0x55, 0x78, 0x0c, 0xe6, 0x93, 0xdb, 0x24, 0x47, 0xdf, 0xce, 0x20, 0xf7, 0x80, + 0xb4, 0x0d, 0x7c, 0xf4, 0x49, 0xf7, 0x80, 0x76, 0x05, 0xbe, 0x0f, 0x16, 0x52, 0x76, 0x47, 0x3c, + 0x3d, 0x22, 0x9e, 0x36, 0x06, 0xdf, 0x5a, 0xec, 0x6a, 0xb6, 0x95, 0x72, 0x09, 0x2e, 0x83, 0x42, + 0xd0, 0xf6, 0x83, 0x23, 0x7e, 0xdc, 0x04, 0xf8, 0x11, 0x7b, 0x7b, 0xc8, 0xcd, 0x13, 0x2b, 0x3b, + 0x3c, 0x3e, 0x06, 0x66, 0x29, 0x48, 0xab, 0x6d, 0xe7, 0xb5, 0x1e, 0xb7, 0xb6, 0x87, 0x5c, 0x48, + 0x68, 0xd4, 0x5a, 0x8a, 0x08, 0x58, 0xb7, 0x7f, 0xc0, 0x27, 0x12, 0xc4, 0xca, 0x7a, 0xf7, 0x22, + 0xa0, 0x5f, 0x59, 0xdb, 0x86, 0x6c, 0xbc, 0x01, 0x88, 0x91, 0x76, 0xe1, 0x05, 0x00, 0x18, 0x04, + 0xdf, 0x87, 0x11, 0x7e, 0x10, 0xdd, 0x1e, 0x72, 0xc7, 0x29, 0x02, 0xdf, 0x5b, 0xca, 0x56, 0x1d, + 0x1b, 0x75, 0x2d, 0xa3, 0x34, 0xac, 0x6c, 0xd5, 0xb1, 0x63, 0x47, 0x54, 0x7b, 0x7a, 0xf8, 0xf1, + 0x58, 0x38, 0xa2, 0x62, 0x22, 0x78, 0x98, 0x90, 0xbc, 0xc2, 0x8f, 0xc6, 0x82, 0x87, 0x09, 0x43, + 0x85, 0x47, 0x43, 0xca, 0xf6, 0xe1, 0xe0, 0x8f, 0x78, 0x22, 0x66, 0x52, 0x9e, 0x3d, 0xe9, 0x69, + 0x8c, 0x88, 0x0c, 0x9b, 0xa6, 0xa1, 0x5f, 0x19, 0x24, 0xf7, 0x8b, 0x2b, 0x74, 0xdc, 0xb6, 0xc2, + 0xe7, 0x3c, 0x2b, 0x78, 0xab, 0xef, 0x79, 0xcd, 0x9e, 0x1f, 0x3f, 0xa6, 0x61, 0xd3, 0x33, 0xba, + 0x0e, 0xba, 0x60, 0x5e, 0x9d, 0xd1, 0x08, 0xc6, 0x5f, 0x1b, 0xec, 0xd1, 0x56, 0x67, 0x24, 0x7a, + 0x47, 0x29, 0x67, 0x95, 0x49, 0x4e, 0x06, 0xa7, 0x63, 0x0b, 0xce, 0xdf, 0xf4, 0xe1, 0x74, 0xec, + 0x24, 0xa7, 0x63, 0x73, 0xce, 0x03, 0xe9, 0x21, 0xbf, 0xa7, 0x06, 0xfa, 0x5b, 0x4a, 0x7a, 0x3e, + 0x41, 0x7a, 0x20, 0x45, 0x3a, 0xa7, 0x0e, 0x89, 0xb2, 0x68, 0xa5, 0x58, 0x7f, 0xd7, 0x8f, 0x96, + 0x07, 0x3b, 0xa7, 0x8e, 0x94, 0xd2, 0x32, 0x40, 0x1a, 0x47, 0xb0, 0xfe, 0x3e, 0x2b, 0x03, 0xa4, + 0x97, 0xb4, 0x0c, 0x10, 0x5b, 0x5a, 0xa8, 0xb4, 0xd3, 0x04, 0xe9, 0x1f, 0xb2, 0x42, 0xa5, 0xcd, + 0xa7, 0x85, 0x4a, 0x8d, 0x69, 0xb4, 0x4c, 0x61, 0x38, 0xed, 0x1f, 0xb3, 0x68, 0xe9, 0x4d, 0xa8, + 0xd1, 0x52, 0x63, 0x5a, 0x06, 0xc8, 0x3d, 0x2a, 0x58, 0xff, 0x94, 0x95, 0x01, 0x72, 0xdb, 0x6a, + 0x19, 0x20, 0x36, 0xce, 0xb9, 0x27, 0x3d, 0x1c, 0x28, 0xcd, 0xff, 0x67, 0x83, 0xc8, 0x60, 0xdf, + 0xe6, 0x97, 0x1f, 0x0a, 0xa5, 0x20, 0xd5, 0x91, 0x81, 0x60, 0xfc, 0x8b, 0xc1, 0x9e, 0xb4, 0xfa, + 0x35, 0xbf, 0x32, 0x58, 0xc8, 0xe0, 0x94, 0x1a, 0xea, 0xaf, 0x7d, 0x38, 0x45, 0xf3, 0x2b, 0x53, + 0x08, 0xa9, 0x46, 0xda, 0x30, 0x42, 0x90, 0xfe, 0x8d, 0x92, 0x9e, 0xd2, 0xfc, 0xea, 0xcc, 0x22, + 0x8b, 0x56, 0x8a, 0xf5, 0xef, 0xfd, 0x68, 0x45, 0xf3, 0xab, 0x13, 0x8e, 0xb4, 0x0c, 0xa8, 0xcd, + 0xff, 0x8f, 0xac, 0x0c, 0xc8, 0xcd, 0xaf, 0x0c, 0x03, 0xd2, 0x42, 0xd5, 0x9a, 0xff, 0x9f, 0x59, + 0xa1, 0x2a, 0xcd, 0xaf, 0x8e, 0x0e, 0xd2, 0x68, 0xb5, 0xe6, 0xff, 0x57, 0x16, 0xad, 0xd2, 0xfc, + 0xea, 0xb3, 0x68, 0x5a, 0x06, 0xd4, 0xe6, 0xff, 0x77, 0x56, 0x06, 0xe4, 0xe6, 0x57, 0x06, 0x0e, + 0x9c, 0xf3, 0xa1, 0x34, 0xd7, 0xe5, 0xef, 0x70, 0xd0, 0x77, 0x73, 0x6c, 0x4e, 0x96, 0xd8, 0x3b, + 0x43, 0xc4, 0x33, 0x5f, 0x6e, 0x81, 0x8f, 0x80, 0x18, 0x1a, 0xd6, 0xc4, 0xcb, 0x1a, 0xf4, 0xbd, + 0x5c, 0xc6, 0xf9, 0xf1, 0x94, 0x43, 0x5c, 0xe1, 0x5f, 0x98, 0xe0, 0x47, 0xc1, 0x8c, 0x34, 0xc4, + 0xe6, 0x2f, 0x8e, 0xd0, 0xf7, 0xb3, 0xc8, 0xaa, 0x18, 0xf3, 0xd8, 0x8b, 0x5e, 0xc6, 0x64, 0xc2, + 0x04, 0xb7, 0xd4, 0xb9, 0x70, 0xaf, 0xde, 0x45, 0x3f, 0xa0, 0x44, 0x0b, 0x69, 0x45, 0xe8, 0xd5, + 0xbb, 0xca, 0xc4, 0xb8, 0x57, 0xef, 0xc2, 0x4d, 0x20, 0x66, 0x8b, 0x35, 0xaf, 0x7d, 0x82, 0x7e, + 0x48, 0xd7, 0xcf, 0x26, 0xd6, 0x6f, 0xb5, 0x4f, 0xdc, 0x3c, 0x87, 0x6e, 0xb5, 0x4f, 0xe0, 0x5d, + 0x69, 0xd6, 0xfc, 0x0a, 0x97, 0x01, 0xfd, 0x88, 0xae, 0x9d, 0x4f, 0xac, 0xa5, 0x55, 0x12, 0xd3, + 0x4d, 0xf2, 0x15, 0x97, 0x27, 0x6e, 0x50, 0x5e, 0x9e, 0x1f, 0xe7, 0x48, 0xb5, 0xfb, 0x95, 0x47, + 0xf4, 0xa5, 0x54, 0x1e, 0x41, 0x14, 0x97, 0xe7, 0x27, 0xb9, 0x0c, 0x85, 0x93, 0xca, 0xc3, 0x97, + 0xc5, 0xe5, 0x91, 0xb9, 0x48, 0x79, 0x48, 0x75, 0x7e, 0x9a, 0xc5, 0x25, 0x55, 0x27, 0x1e, 0x0a, + 0xb2, 0x55, 0xb8, 0x3a, 0xf2, 0xad, 0x82, 0xab, 0xf3, 0x4b, 0x4a, 0x94, 0x5d, 0x1d, 0xe9, 0xee, + 0x60, 0xd5, 0x11, 0x14, 0xb8, 0x3a, 0x3f, 0xa3, 0xeb, 0x33, 0xaa, 0xc3, 0xa1, 0xac, 0x3a, 0x62, + 0x25, 0xad, 0xce, 0xcf, 0xe9, 0xda, 0xcc, 0xea, 0x70, 0x38, 0xad, 0xce, 0x05, 0x00, 0xc8, 0xfe, + 0xdb, 0x5e, 0xcb, 0x5f, 0x43, 0x9f, 0x36, 0xc9, 0x6b, 0x28, 0xc9, 0x04, 0x2d, 0x90, 0xa7, 0xfd, + 0x8b, 0xbf, 0xae, 0xa3, 0xcf, 0xc8, 0x88, 0x5d, 0x6c, 0x82, 0x17, 0x41, 0xa1, 0x16, 0x43, 0x36, + 0xd0, 0x67, 0x19, 0xa4, 0xca, 0x21, 0x1b, 0x70, 0x09, 0x4c, 0x50, 0x04, 0x81, 0xd8, 0x35, 0xf4, + 0x39, 0x9d, 0x86, 0xfc, 0x3d, 0x49, 0xbe, 0xad, 0x62, 0xc8, 0x4d, 0xf4, 0x79, 0x8a, 0x90, 0x6d, + 0x70, 0x99, 0xd3, 0xac, 0x12, 0x1e, 0x07, 0x7d, 0x41, 0x01, 0x61, 0x1e, 0x47, 0xec, 0x08, 0x7f, + 0xbb, 0x85, 0xbe, 0xa8, 0x3b, 0xba, 0x85, 0x01, 0x22, 0xb4, 0x4d, 0xf4, 0x25, 0x3d, 0xda, 0xcd, + 0x78, 0xcb, 0xf8, 0xeb, 0x6d, 0xf4, 0x65, 0x9d, 0xe2, 0x36, 0x5c, 0x02, 0x85, 0xaa, 0x40, 0xac, + 0xad, 0xa2, 0xaf, 0xb0, 0x38, 0x04, 0xc9, 0xda, 0x2a, 0xc1, 0xec, 0x54, 0xde, 0x7d, 0x50, 0xdb, + 0xdd, 0x7a, 0x5c, 0x59, 0x5b, 0x43, 0x5f, 0xe5, 0x18, 0x6c, 0xa4, 0xb6, 0x18, 0x43, 0x72, 0xbd, + 0x8e, 0xbe, 0xa6, 0x60, 0x88, 0x0d, 0x5e, 0x02, 0x93, 0x35, 0x29, 0xbf, 0x6b, 0x1b, 0xe8, 0xeb, + 0x09, 0x6f, 0x1b, 0x14, 0x55, 0x8d, 0x51, 0x36, 0xfa, 0x46, 0x02, 0x65, 0xc7, 0x09, 0xa4, 0xa0, + 0x9b, 0xe8, 0x9b, 0x72, 0x02, 0x09, 0x48, 0xca, 0x32, 0xdd, 0x9d, 0x83, 0xbe, 0x95, 0x00, 0x39, + 0xd8, 0x9f, 0x14, 0xd3, 0xad, 0x5a, 0x0d, 0x7d, 0x3b, 0x81, 0xba, 0x85, 0x51, 0x52, 0x4c, 0x9b, + 0xb5, 0x1a, 0xfa, 0x4e, 0x22, 0xaa, 0xcd, 0xc5, 0xe7, 0x60, 0x42, 0x7d, 0xd0, 0x29, 0x00, 0xc3, + 0x63, 0x6f, 0x44, 0x0d, 0x0f, 0xbe, 0x0d, 0xf2, 0xf5, 0x40, 0xbc, 0xd4, 0x40, 0xb9, 0xd3, 0x5e, + 0x80, 0xc8, 0xe8, 0xc5, 0x7b, 0x00, 0x26, 0x87, 0x94, 0xb0, 0x08, 0xcc, 0x97, 0xfe, 0x09, 0x73, + 0x81, 0x7f, 0x85, 0xb3, 0xe0, 0x0c, 0xbd, 0x7d, 0x72, 0xc4, 0x46, 0xbf, 0xdc, 0xc9, 0x6d, 0x1a, + 0x31, 0x83, 0x3c, 0x90, 0x94, 0x19, 0xcc, 0x14, 0x06, 0x53, 0x66, 0x28, 0x83, 0xd9, 0xb4, 0xd1, + 0xa3, 0xcc, 0x31, 0x91, 0xc2, 0x31, 0x91, 0xce, 0xa1, 0x8c, 0x18, 0x65, 0x8e, 0xe1, 0x14, 0x8e, + 0xe1, 0x24, 0x47, 0x62, 0x94, 0x28, 0x73, 0x4c, 0xa7, 0x70, 0x4c, 0xa7, 0x73, 0x28, 0x23, 0x43, + 0x99, 0x03, 0xa6, 0x70, 0x40, 0x99, 0xe3, 0x01, 0x98, 0x4f, 0x1f, 0x0c, 0xca, 0x2c, 0xa3, 0x29, + 0x2c, 0xa3, 0x19, 0x2c, 0xea, 0xf0, 0x4f, 0x66, 0x19, 0x49, 0x61, 0x19, 0x91, 0x59, 0xaa, 0x00, + 0x65, 0x8d, 0xf7, 0x64, 0x9e, 0xa9, 0x14, 0x9e, 0xa9, 0x2c, 0x1e, 0x6d, 0x7c, 0x27, 0xf3, 0x14, + 0x53, 0x78, 0x8a, 0xa9, 0xdd, 0x26, 0x0f, 0xe9, 0x4e, 0xeb, 0xd7, 0x9c, 0xcc, 0xb0, 0x05, 0x66, + 0x52, 0xe6, 0x71, 0xa7, 0x51, 0x18, 0x32, 0xc5, 0x5d, 0x50, 0xd4, 0x87, 0x6f, 0xf2, 0xfa, 0xb1, + 0x94, 0xf5, 0x63, 0x29, 0x4d, 0xa2, 0x0f, 0xda, 0x64, 0x8e, 0xf1, 0x14, 0x8e, 0xf1, 0xe4, 0x36, + 0xf4, 0x89, 0xda, 0x69, 0x14, 0x05, 0x99, 0x22, 0x04, 0xe7, 0xfa, 0x8c, 0xcc, 0x52, 0xa8, 0xde, + 0x91, 0xa9, 0x5e, 0xe3, 0x7d, 0x95, 0xe4, 0xf3, 0x18, 0x9c, 0xef, 0x37, 0x33, 0x4b, 0x71, 0xba, + 0xa6, 0x3a, 0xed, 0xfb, 0x0a, 0x4b, 0x72, 0xd4, 0xa4, 0x0d, 0x97, 0x36, 0x2b, 0x4b, 0x71, 0x72, + 0x47, 0x76, 0x32, 0xe8, 0x4b, 0x2d, 0xc9, 0x9b, 0x07, 0xce, 0x66, 0xce, 0xcb, 0x52, 0xdc, 0xad, + 0xa8, 0xee, 0xb2, 0x5f, 0x75, 0xc5, 0x2e, 0x96, 0x6e, 0x03, 0x20, 0x4d, 0xf6, 0x46, 0x81, 0x59, + 0xdd, 0xdb, 0x2b, 0x0e, 0xe1, 0x5f, 0xca, 0x5b, 0x6e, 0xd1, 0xa0, 0xbf, 0x3c, 0x2f, 0xe6, 0xb0, + 0xbb, 0xdd, 0xca, 0xc3, 0xe2, 0x7f, 0xf9, 0x7f, 0x46, 0x79, 0x42, 0x8c, 0xa2, 0xf0, 0xa9, 0xb2, + 0xf4, 0x06, 0x98, 0xd4, 0x06, 0x92, 0x05, 0x60, 0xd4, 0xf9, 0x81, 0x52, 0xbf, 0x76, 0x13, 0x80, + 0xf8, 0xdf, 0x30, 0xc1, 0x29, 0x90, 0x3f, 0xd8, 0xdd, 0x7f, 0x52, 0xb9, 0xbf, 0x53, 0xdd, 0xa9, + 0x3c, 0x28, 0x0e, 0xc1, 0x02, 0x18, 0x7b, 0xe2, 0xee, 0x3d, 0xdd, 0x2b, 0x1f, 0x54, 0x8b, 0x06, + 0x1c, 0x03, 0xc3, 0x8f, 0xf6, 0xf7, 0x76, 0x8b, 0xb9, 0x6b, 0xf7, 0x40, 0x5e, 0x9e, 0x07, 0x4e, + 0x81, 0x7c, 0x75, 0xcf, 0xad, 0xec, 0x3c, 0xdc, 0xad, 0xd1, 0x48, 0x25, 0x03, 0x8d, 0x58, 0x31, + 0x3c, 0x2f, 0xe6, 0xca, 0x17, 0xc1, 0x85, 0x7a, 0xd0, 0x4a, 0xfc, 0x61, 0x26, 0x25, 0xe7, 0xc5, + 0x08, 0xb1, 0x6e, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x33, 0xc2, 0x0c, 0xb6, 0xeb, 0x26, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto new file mode 100644 index 0000000..95a8fd1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto @@ -0,0 +1,285 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package conformance; +option java_package = "com.google.protobuf.conformance"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +// This defines the conformance testing protocol. This protocol exists between +// the conformance test suite itself and the code being tested. For each test, +// the suite will send a ConformanceRequest message and expect a +// ConformanceResponse message. +// +// You can either run the tests in two different ways: +// +// 1. in-process (using the interface in conformance_test.h). +// +// 2. as a sub-process communicating over a pipe. Information about how to +// do this is in conformance_test_runner.cc. +// +// Pros/cons of the two approaches: +// +// - running as a sub-process is much simpler for languages other than C/C++. +// +// - running as a sub-process may be more tricky in unusual environments like +// iOS apps, where fork/stdin/stdout are not available. + +enum WireFormat { + UNSPECIFIED = 0; + PROTOBUF = 1; + JSON = 2; +} + +// Represents a single test case's input. The testee should: +// +// 1. parse this proto (which should always succeed) +// 2. parse the protobuf or JSON payload in "payload" (which may fail) +// 3. if the parse succeeded, serialize the message in the requested format. +message ConformanceRequest { + // The payload (whether protobuf of JSON) is always for a TestAllTypes proto + // (see below). + oneof payload { + bytes protobuf_payload = 1; + string json_payload = 2; + } + + // Which format should the testee serialize its message to? + WireFormat requested_output_format = 3; +} + +// Represents a single test case's output. +message ConformanceResponse { + oneof result { + // This string should be set to indicate parsing failed. The string can + // provide more information about the parse error if it is available. + // + // Setting this string does not necessarily mean the testee failed the + // test. Some of the test cases are intentionally invalid input. + string parse_error = 1; + + // If the input was successfully parsed but errors occurred when + // serializing it to the requested output format, set the error message in + // this field. + string serialize_error = 6; + + // This should be set if some other error occurred. This will always + // indicate that the test failed. The string can provide more information + // about the failure. + string runtime_error = 2; + + // If the input was successfully parsed and the requested output was + // protobuf, serialize it to protobuf and set it in this field. + bytes protobuf_payload = 3; + + // If the input was successfully parsed and the requested output was JSON, + // serialize to JSON and set it in this field. + string json_payload = 4; + + // For when the testee skipped the test, likely because a certain feature + // wasn't supported, like JSON input/output. + string skipped = 5; + } +} + +// This proto includes every type of field in both singular and repeated +// forms. +message TestAllTypes { + message NestedMessage { + int32 a = 1; + TestAllTypes corecursive = 2; + } + + enum NestedEnum { + FOO = 0; + BAR = 1; + BAZ = 2; + NEG = -1; // Intentionally negative. + } + + // Singular + int32 optional_int32 = 1; + int64 optional_int64 = 2; + uint32 optional_uint32 = 3; + uint64 optional_uint64 = 4; + sint32 optional_sint32 = 5; + sint64 optional_sint64 = 6; + fixed32 optional_fixed32 = 7; + fixed64 optional_fixed64 = 8; + sfixed32 optional_sfixed32 = 9; + sfixed64 optional_sfixed64 = 10; + float optional_float = 11; + double optional_double = 12; + bool optional_bool = 13; + string optional_string = 14; + bytes optional_bytes = 15; + + NestedMessage optional_nested_message = 18; + ForeignMessage optional_foreign_message = 19; + + NestedEnum optional_nested_enum = 21; + ForeignEnum optional_foreign_enum = 22; + + string optional_string_piece = 24 [ctype=STRING_PIECE]; + string optional_cord = 25 [ctype=CORD]; + + TestAllTypes recursive_message = 27; + + // Repeated + repeated int32 repeated_int32 = 31; + repeated int64 repeated_int64 = 32; + repeated uint32 repeated_uint32 = 33; + repeated uint64 repeated_uint64 = 34; + repeated sint32 repeated_sint32 = 35; + repeated sint64 repeated_sint64 = 36; + repeated fixed32 repeated_fixed32 = 37; + repeated fixed64 repeated_fixed64 = 38; + repeated sfixed32 repeated_sfixed32 = 39; + repeated sfixed64 repeated_sfixed64 = 40; + repeated float repeated_float = 41; + repeated double repeated_double = 42; + repeated bool repeated_bool = 43; + repeated string repeated_string = 44; + repeated bytes repeated_bytes = 45; + + repeated NestedMessage repeated_nested_message = 48; + repeated ForeignMessage repeated_foreign_message = 49; + + repeated NestedEnum repeated_nested_enum = 51; + repeated ForeignEnum repeated_foreign_enum = 52; + + repeated string repeated_string_piece = 54 [ctype=STRING_PIECE]; + repeated string repeated_cord = 55 [ctype=CORD]; + + // Map + map < int32, int32> map_int32_int32 = 56; + map < int64, int64> map_int64_int64 = 57; + map < uint32, uint32> map_uint32_uint32 = 58; + map < uint64, uint64> map_uint64_uint64 = 59; + map < sint32, sint32> map_sint32_sint32 = 60; + map < sint64, sint64> map_sint64_sint64 = 61; + map < fixed32, fixed32> map_fixed32_fixed32 = 62; + map < fixed64, fixed64> map_fixed64_fixed64 = 63; + map map_sfixed32_sfixed32 = 64; + map map_sfixed64_sfixed64 = 65; + map < int32, float> map_int32_float = 66; + map < int32, double> map_int32_double = 67; + map < bool, bool> map_bool_bool = 68; + map < string, string> map_string_string = 69; + map < string, bytes> map_string_bytes = 70; + map < string, NestedMessage> map_string_nested_message = 71; + map < string, ForeignMessage> map_string_foreign_message = 72; + map < string, NestedEnum> map_string_nested_enum = 73; + map < string, ForeignEnum> map_string_foreign_enum = 74; + + oneof oneof_field { + uint32 oneof_uint32 = 111; + NestedMessage oneof_nested_message = 112; + string oneof_string = 113; + bytes oneof_bytes = 114; + bool oneof_bool = 115; + uint64 oneof_uint64 = 116; + float oneof_float = 117; + double oneof_double = 118; + NestedEnum oneof_enum = 119; + } + + // Well-known types + google.protobuf.BoolValue optional_bool_wrapper = 201; + google.protobuf.Int32Value optional_int32_wrapper = 202; + google.protobuf.Int64Value optional_int64_wrapper = 203; + google.protobuf.UInt32Value optional_uint32_wrapper = 204; + google.protobuf.UInt64Value optional_uint64_wrapper = 205; + google.protobuf.FloatValue optional_float_wrapper = 206; + google.protobuf.DoubleValue optional_double_wrapper = 207; + google.protobuf.StringValue optional_string_wrapper = 208; + google.protobuf.BytesValue optional_bytes_wrapper = 209; + + repeated google.protobuf.BoolValue repeated_bool_wrapper = 211; + repeated google.protobuf.Int32Value repeated_int32_wrapper = 212; + repeated google.protobuf.Int64Value repeated_int64_wrapper = 213; + repeated google.protobuf.UInt32Value repeated_uint32_wrapper = 214; + repeated google.protobuf.UInt64Value repeated_uint64_wrapper = 215; + repeated google.protobuf.FloatValue repeated_float_wrapper = 216; + repeated google.protobuf.DoubleValue repeated_double_wrapper = 217; + repeated google.protobuf.StringValue repeated_string_wrapper = 218; + repeated google.protobuf.BytesValue repeated_bytes_wrapper = 219; + + google.protobuf.Duration optional_duration = 301; + google.protobuf.Timestamp optional_timestamp = 302; + google.protobuf.FieldMask optional_field_mask = 303; + google.protobuf.Struct optional_struct = 304; + google.protobuf.Any optional_any = 305; + google.protobuf.Value optional_value = 306; + + repeated google.protobuf.Duration repeated_duration = 311; + repeated google.protobuf.Timestamp repeated_timestamp = 312; + repeated google.protobuf.FieldMask repeated_fieldmask = 313; + repeated google.protobuf.Struct repeated_struct = 324; + repeated google.protobuf.Any repeated_any = 315; + repeated google.protobuf.Value repeated_value = 316; + + // Test field-name-to-JSON-name convention. + // (protobuf says names can be any valid C/C++ identifier.) + int32 fieldname1 = 401; + int32 field_name2 = 402; + int32 _field_name3 = 403; + int32 field__name4_ = 404; + int32 field0name5 = 405; + int32 field_0_name6 = 406; + int32 fieldName7 = 407; + int32 FieldName8 = 408; + int32 field_Name9 = 409; + int32 Field_Name10 = 410; + int32 FIELD_NAME11 = 411; + int32 FIELD_name12 = 412; + int32 __field_name13 = 413; + int32 __Field_name14 = 414; + int32 field__name15 = 415; + int32 field__Name16 = 416; + int32 field_name17__ = 417; + int32 Field_name18__ = 418; +} + +message ForeignMessage { + int32 c = 1; +} + +enum ForeignEnum { + FOREIGN_FOO = 0; + FOREIGN_BAR = 1; + FOREIGN_BAZ = 2; +} diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go new file mode 100644 index 0000000..ac7e51b --- /dev/null +++ b/vendor/github.com/golang/protobuf/descriptor/descriptor.go @@ -0,0 +1,93 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/golang/protobuf/proto" + protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*protobuf.FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(protobuf.FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *protobuf.FileDescriptorProto, md *protobuf.DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go new file mode 100644 index 0000000..27b0729 --- /dev/null +++ b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go @@ -0,0 +1,32 @@ +package descriptor_test + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/descriptor" + tpb "github.com/golang/protobuf/proto/testdata" + protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +func TestMessage(t *testing.T) { + var msg *protobuf.DescriptorProto + fd, md := descriptor.ForMessage(msg) + if pkg, want := fd.GetPackage(), "google.protobuf"; pkg != want { + t.Errorf("descriptor.ForMessage(%T).GetPackage() = %q; want %q", msg, pkg, want) + } + if name, want := md.GetName(), "DescriptorProto"; name != want { + t.Fatalf("descriptor.ForMessage(%T).GetName() = %q; want %q", msg, name, want) + } +} + +func Example_Options() { + var msg *tpb.MyMessageSet + _, md := descriptor.ForMessage(msg) + if md.GetOptions().GetMessageSetWireFormat() { + fmt.Printf("%v uses option message_set_wire_format.\n", md.GetName()) + } + + // Output: + // MyMessageSet uses option message_set_wire_format. +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go new file mode 100644 index 0000000..dfdfc5b --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1082 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + stpb "github.com/golang/protobuf/ptypes/struct" +) + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type wkt interface { + XXX_WellKnownType() string +} + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + // "Generated output always contains 3, 6, or 9 fractional digits, + // depending on required precision." + s, ns := s.Field(0).Int(), s.Field(1).Int() + x := fmt.Sprintf("%d.%09d", s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(wkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + var err error + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if wkt, ok := v.Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + isKnownEnum := enumStr != valStr + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(wkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(wkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, err := json.Marshal(jsonFields) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := strconv.Unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := strconv.Unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) + for k, jv := range m { + pv := &stpb.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s), len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) + } else if v, err := strconv.Unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &stpb.ListValue{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &stpb.Struct{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + target.SetInt(int64(n)) + return nil + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays (which aren't encoded bytes) + if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + var keyprop, valprop *proto.Properties + if prop != nil { + // These could still be nil if the protobuf metadata is broken somehow. + // TODO: This won't work because the fields are unexported. + // We should probably just reparse them. + //keyprop, valprop = prop.mkeyprop, prop.mvalprop + } + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { + return err + } + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + if err := u.unmarshalValue(v, raw, valprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // 64-bit integers can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go new file mode 100644 index 0000000..4fdbde1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go @@ -0,0 +1,897 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package jsonpb + +import ( + "bytes" + "encoding/json" + "io" + "math" + "reflect" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + pb "github.com/golang/protobuf/jsonpb/jsonpb_test_proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + "github.com/golang/protobuf/ptypes" + anypb "github.com/golang/protobuf/ptypes/any" + durpb "github.com/golang/protobuf/ptypes/duration" + stpb "github.com/golang/protobuf/ptypes/struct" + tspb "github.com/golang/protobuf/ptypes/timestamp" + wpb "github.com/golang/protobuf/ptypes/wrappers" +) + +var ( + marshaler = Marshaler{} + + marshalerAllOptions = Marshaler{ + Indent: " ", + } + + simpleObject = &pb.Simple{ + OInt32: proto.Int32(-32), + OInt64: proto.Int64(-6400000000), + OUint32: proto.Uint32(32), + OUint64: proto.Uint64(6400000000), + OSint32: proto.Int32(-13), + OSint64: proto.Int64(-2600000000), + OFloat: proto.Float32(3.14), + ODouble: proto.Float64(6.02214179e23), + OBool: proto.Bool(true), + OString: proto.String("hello \"there\""), + OBytes: []byte("beep boop"), + } + + simpleObjectJSON = `{` + + `"oBool":true,` + + `"oInt32":-32,` + + `"oInt64":"-6400000000",` + + `"oUint32":32,` + + `"oUint64":"6400000000",` + + `"oSint32":-13,` + + `"oSint64":"-2600000000",` + + `"oFloat":3.14,` + + `"oDouble":6.02214179e+23,` + + `"oString":"hello \"there\"",` + + `"oBytes":"YmVlcCBib29w"` + + `}` + + simpleObjectPrettyJSON = `{ + "oBool": true, + "oInt32": -32, + "oInt64": "-6400000000", + "oUint32": 32, + "oUint64": "6400000000", + "oSint32": -13, + "oSint64": "-2600000000", + "oFloat": 3.14, + "oDouble": 6.02214179e+23, + "oString": "hello \"there\"", + "oBytes": "YmVlcCBib29w" +}` + + repeatsObject = &pb.Repeats{ + RBool: []bool{true, false, true}, + RInt32: []int32{-3, -4, -5}, + RInt64: []int64{-123456789, -987654321}, + RUint32: []uint32{1, 2, 3}, + RUint64: []uint64{6789012345, 3456789012}, + RSint32: []int32{-1, -2, -3}, + RSint64: []int64{-6789012345, -3456789012}, + RFloat: []float32{3.14, 6.28}, + RDouble: []float64{299792458 * 1e20, 6.62606957e-34}, + RString: []string{"happy", "days"}, + RBytes: [][]byte{[]byte("skittles"), []byte("m&m's")}, + } + + repeatsObjectJSON = `{` + + `"rBool":[true,false,true],` + + `"rInt32":[-3,-4,-5],` + + `"rInt64":["-123456789","-987654321"],` + + `"rUint32":[1,2,3],` + + `"rUint64":["6789012345","3456789012"],` + + `"rSint32":[-1,-2,-3],` + + `"rSint64":["-6789012345","-3456789012"],` + + `"rFloat":[3.14,6.28],` + + `"rDouble":[2.99792458e+28,6.62606957e-34],` + + `"rString":["happy","days"],` + + `"rBytes":["c2tpdHRsZXM=","bSZtJ3M="]` + + `}` + + repeatsObjectPrettyJSON = `{ + "rBool": [ + true, + false, + true + ], + "rInt32": [ + -3, + -4, + -5 + ], + "rInt64": [ + "-123456789", + "-987654321" + ], + "rUint32": [ + 1, + 2, + 3 + ], + "rUint64": [ + "6789012345", + "3456789012" + ], + "rSint32": [ + -1, + -2, + -3 + ], + "rSint64": [ + "-6789012345", + "-3456789012" + ], + "rFloat": [ + 3.14, + 6.28 + ], + "rDouble": [ + 2.99792458e+28, + 6.62606957e-34 + ], + "rString": [ + "happy", + "days" + ], + "rBytes": [ + "c2tpdHRsZXM=", + "bSZtJ3M=" + ] +}` + + innerSimple = &pb.Simple{OInt32: proto.Int32(-32)} + innerSimple2 = &pb.Simple{OInt64: proto.Int64(25)} + innerRepeats = &pb.Repeats{RString: []string{"roses", "red"}} + innerRepeats2 = &pb.Repeats{RString: []string{"violets", "blue"}} + complexObject = &pb.Widget{ + Color: pb.Widget_GREEN.Enum(), + RColor: []pb.Widget_Color{pb.Widget_RED, pb.Widget_GREEN, pb.Widget_BLUE}, + Simple: innerSimple, + RSimple: []*pb.Simple{innerSimple, innerSimple2}, + Repeats: innerRepeats, + RRepeats: []*pb.Repeats{innerRepeats, innerRepeats2}, + } + + complexObjectJSON = `{"color":"GREEN",` + + `"rColor":["RED","GREEN","BLUE"],` + + `"simple":{"oInt32":-32},` + + `"rSimple":[{"oInt32":-32},{"oInt64":"25"}],` + + `"repeats":{"rString":["roses","red"]},` + + `"rRepeats":[{"rString":["roses","red"]},{"rString":["violets","blue"]}]` + + `}` + + complexObjectPrettyJSON = `{ + "color": "GREEN", + "rColor": [ + "RED", + "GREEN", + "BLUE" + ], + "simple": { + "oInt32": -32 + }, + "rSimple": [ + { + "oInt32": -32 + }, + { + "oInt64": "25" + } + ], + "repeats": { + "rString": [ + "roses", + "red" + ] + }, + "rRepeats": [ + { + "rString": [ + "roses", + "red" + ] + }, + { + "rString": [ + "violets", + "blue" + ] + } + ] +}` + + colorPrettyJSON = `{ + "color": 2 +}` + + colorListPrettyJSON = `{ + "color": 1000, + "rColor": [ + "RED" + ] +}` + + nummyPrettyJSON = `{ + "nummy": { + "1": 2, + "3": 4 + } +}` + + objjyPrettyJSON = `{ + "objjy": { + "1": { + "dub": 1 + } + } +}` + realNumber = &pb.Real{Value: proto.Float64(3.14159265359)} + realNumberName = "Pi" + complexNumber = &pb.Complex{Imaginary: proto.Float64(0.5772156649)} + realNumberJSON = `{` + + `"value":3.14159265359,` + + `"[jsonpb.Complex.real_extension]":{"imaginary":0.5772156649},` + + `"[jsonpb.name]":"Pi"` + + `}` + + anySimple = &pb.KnownTypes{ + An: &anypb.Any{ + TypeUrl: "something.example.com/jsonpb.Simple", + Value: []byte{ + // &pb.Simple{OBool:true} + 1 << 3, 1, + }, + }, + } + anySimpleJSON = `{"an":{"@type":"something.example.com/jsonpb.Simple","oBool":true}}` + anySimplePrettyJSON = `{ + "an": { + "@type": "something.example.com/jsonpb.Simple", + "oBool": true + } +}` + + anyWellKnown = &pb.KnownTypes{ + An: &anypb.Any{ + TypeUrl: "type.googleapis.com/google.protobuf.Duration", + Value: []byte{ + // &durpb.Duration{Seconds: 1, Nanos: 212000000 } + 1 << 3, 1, // seconds + 2 << 3, 0x80, 0xba, 0x8b, 0x65, // nanos + }, + }, + } + anyWellKnownJSON = `{"an":{"@type":"type.googleapis.com/google.protobuf.Duration","value":"1.212s"}}` + anyWellKnownPrettyJSON = `{ + "an": { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } +}` + + nonFinites = &pb.NonFinites{ + FNan: proto.Float32(float32(math.NaN())), + FPinf: proto.Float32(float32(math.Inf(1))), + FNinf: proto.Float32(float32(math.Inf(-1))), + DNan: proto.Float64(float64(math.NaN())), + DPinf: proto.Float64(float64(math.Inf(1))), + DNinf: proto.Float64(float64(math.Inf(-1))), + } + nonFinitesJSON = `{` + + `"fNan":"NaN",` + + `"fPinf":"Infinity",` + + `"fNinf":"-Infinity",` + + `"dNan":"NaN",` + + `"dPinf":"Infinity",` + + `"dNinf":"-Infinity"` + + `}` +) + +func init() { + if err := proto.SetExtension(realNumber, pb.E_Name, &realNumberName); err != nil { + panic(err) + } + if err := proto.SetExtension(realNumber, pb.E_Complex_RealExtension, complexNumber); err != nil { + panic(err) + } +} + +var marshalingTests = []struct { + desc string + marshaler Marshaler + pb proto.Message + json string +}{ + {"simple flat object", marshaler, simpleObject, simpleObjectJSON}, + {"simple pretty object", marshalerAllOptions, simpleObject, simpleObjectPrettyJSON}, + {"non-finite floats fields object", marshaler, nonFinites, nonFinitesJSON}, + {"repeated fields flat object", marshaler, repeatsObject, repeatsObjectJSON}, + {"repeated fields pretty object", marshalerAllOptions, repeatsObject, repeatsObjectPrettyJSON}, + {"nested message/enum flat object", marshaler, complexObject, complexObjectJSON}, + {"nested message/enum pretty object", marshalerAllOptions, complexObject, complexObjectPrettyJSON}, + {"enum-string flat object", Marshaler{}, + &pb.Widget{Color: pb.Widget_BLUE.Enum()}, `{"color":"BLUE"}`}, + {"enum-value pretty object", Marshaler{EnumsAsInts: true, Indent: " "}, + &pb.Widget{Color: pb.Widget_BLUE.Enum()}, colorPrettyJSON}, + {"unknown enum value object", marshalerAllOptions, + &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}, colorListPrettyJSON}, + {"repeated proto3 enum", Marshaler{}, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}, + `{"rFunny":["PUNS","SLAPSTICK"]}`}, + {"repeated proto3 enum as int", Marshaler{EnumsAsInts: true}, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}, + `{"rFunny":[1,2]}`}, + {"empty value", marshaler, &pb.Simple3{}, `{}`}, + {"empty value emitted", Marshaler{EmitDefaults: true}, &pb.Simple3{}, `{"dub":0}`}, + {"empty repeated emitted", Marshaler{EmitDefaults: true}, &pb.SimpleSlice3{}, `{"slices":[]}`}, + {"empty map emitted", Marshaler{EmitDefaults: true}, &pb.SimpleMap3{}, `{"stringy":{}}`}, + {"nested struct null", Marshaler{EmitDefaults: true}, &pb.SimpleNull3{}, `{"simple":null}`}, + {"map", marshaler, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, `{"nummy":{"1":2,"3":4}}`}, + {"map", marshalerAllOptions, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, nummyPrettyJSON}, + {"map", marshaler, + &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}, + `{"strry":{"\"one\"":"two","three":"four"}}`}, + {"map", marshaler, + &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}, `{"objjy":{"1":{"dub":1}}}`}, + {"map", marshalerAllOptions, + &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}, objjyPrettyJSON}, + {"map", marshaler, &pb.Mappy{Buggy: map[int64]string{1234: "yup"}}, + `{"buggy":{"1234":"yup"}}`}, + {"map", marshaler, &pb.Mappy{Booly: map[bool]bool{false: true}}, `{"booly":{"false":true}}`}, + // TODO: This is broken. + //{"map", marshaler, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":"ROMAN"}`}, + {"map", Marshaler{EnumsAsInts: true}, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}, `{"enumy":{"XIV":2}}`}, + {"map", marshaler, &pb.Mappy{S32Booly: map[int32]bool{1: true, 3: false, 10: true, 12: false}}, `{"s32booly":{"1":true,"3":false,"10":true,"12":false}}`}, + {"map", marshaler, &pb.Mappy{S64Booly: map[int64]bool{1: true, 3: false, 10: true, 12: false}}, `{"s64booly":{"1":true,"3":false,"10":true,"12":false}}`}, + {"map", marshaler, &pb.Mappy{U32Booly: map[uint32]bool{1: true, 3: false, 10: true, 12: false}}, `{"u32booly":{"1":true,"3":false,"10":true,"12":false}}`}, + {"map", marshaler, &pb.Mappy{U64Booly: map[uint64]bool{1: true, 3: false, 10: true, 12: false}}, `{"u64booly":{"1":true,"3":false,"10":true,"12":false}}`}, + {"proto2 map", marshaler, &pb.Maps{MInt64Str: map[int64]string{213: "cat"}}, + `{"mInt64Str":{"213":"cat"}}`}, + {"proto2 map", marshaler, + &pb.Maps{MBoolSimple: map[bool]*pb.Simple{true: {OInt32: proto.Int32(1)}}}, + `{"mBoolSimple":{"true":{"oInt32":1}}}`}, + {"oneof, not set", marshaler, &pb.MsgWithOneof{}, `{}`}, + {"oneof, set", marshaler, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Title{"Grand Poobah"}}, `{"title":"Grand Poobah"}`}, + {"force orig_name", Marshaler{OrigName: true}, &pb.Simple{OInt32: proto.Int32(4)}, + `{"o_int32":4}`}, + {"proto2 extension", marshaler, realNumber, realNumberJSON}, + {"Any with message", marshaler, anySimple, anySimpleJSON}, + {"Any with message and indent", marshalerAllOptions, anySimple, anySimplePrettyJSON}, + {"Any with WKT", marshaler, anyWellKnown, anyWellKnownJSON}, + {"Any with WKT and indent", marshalerAllOptions, anyWellKnown, anyWellKnownPrettyJSON}, + {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3.000s"}`}, + {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 100000000, Nanos: 1}}, `{"dur":"100000000.000000001s"}`}, + {"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{ + Fields: map[string]*stpb.Value{ + "one": {Kind: &stpb.Value_StringValue{"loneliest number"}}, + "two": {Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}, + }, + }}, `{"st":{"one":"loneliest number","two":null}}`}, + {"empty ListValue", marshaler, &pb.KnownTypes{Lv: &stpb.ListValue{}}, `{"lv":[]}`}, + {"basic ListValue", marshaler, &pb.KnownTypes{Lv: &stpb.ListValue{Values: []*stpb.Value{ + {Kind: &stpb.Value_StringValue{"x"}}, + {Kind: &stpb.Value_NullValue{}}, + {Kind: &stpb.Value_NumberValue{3}}, + {Kind: &stpb.Value_BoolValue{true}}, + }}}, `{"lv":["x",null,3,true]}`}, + {"Timestamp", marshaler, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}, `{"ts":"2014-05-13T16:53:20.021Z"}`}, + {"number Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NumberValue{1}}}, `{"val":1}`}, + {"null Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}}, `{"val":null}`}, + {"string number value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"9223372036854775807"}}}, `{"val":"9223372036854775807"}`}, + {"list of lists Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{ + Kind: &stpb.Value_ListValue{&stpb.ListValue{ + Values: []*stpb.Value{ + {Kind: &stpb.Value_StringValue{"x"}}, + {Kind: &stpb.Value_ListValue{&stpb.ListValue{ + Values: []*stpb.Value{ + {Kind: &stpb.Value_ListValue{&stpb.ListValue{ + Values: []*stpb.Value{{Kind: &stpb.Value_StringValue{"y"}}}, + }}}, + {Kind: &stpb.Value_StringValue{"z"}}, + }, + }}}, + }, + }}, + }}, `{"val":["x",[["y"],"z"]]}`}, + + {"DoubleValue", marshaler, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}, `{"dbl":1.2}`}, + {"FloatValue", marshaler, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}, `{"flt":1.2}`}, + {"Int64Value", marshaler, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}, `{"i64":"-3"}`}, + {"UInt64Value", marshaler, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}, `{"u64":"3"}`}, + {"Int32Value", marshaler, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}, `{"i32":-4}`}, + {"UInt32Value", marshaler, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}, `{"u32":4}`}, + {"BoolValue", marshaler, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}, `{"bool":true}`}, + {"StringValue", marshaler, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}, `{"str":"plush"}`}, + {"BytesValue", marshaler, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}, `{"bytes":"d293"}`}, +} + +func TestMarshaling(t *testing.T) { + for _, tt := range marshalingTests { + json, err := tt.marshaler.MarshalToString(tt.pb) + if err != nil { + t.Errorf("%s: marshaling error: %v", tt.desc, err) + } else if tt.json != json { + t.Errorf("%s: got [%v] want [%v]", tt.desc, json, tt.json) + } + } +} + +func TestMarshalJSONPBMarshaler(t *testing.T) { + rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }` + msg := dynamicMessage{rawJson: rawJson} + str, err := new(Marshaler).MarshalToString(&msg) + if err != nil { + t.Errorf("an unexpected error occurred when marshalling JSONPBMarshaler: %v", err) + } + if str != rawJson { + t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, rawJson) + } +} + +func TestMarshalAnyJSONPBMarshaler(t *testing.T) { + msg := dynamicMessage{rawJson: `{ "foo": "bar", "baz": [0, 1, 2, 3] }`} + a, err := ptypes.MarshalAny(&msg) + if err != nil { + t.Errorf("an unexpected error occurred when marshalling to Any: %v", err) + } + str, err := new(Marshaler).MarshalToString(a) + if err != nil { + t.Errorf("an unexpected error occurred when marshalling Any to JSON: %v", err) + } + // after custom marshaling, it's round-tripped through JSON decoding/encoding already, + // so the keys are sorted, whitespace is compacted, and "@type" key has been added + expected := `{"@type":"type.googleapis.com/` + dynamicMessageName + `","baz":[0,1,2,3],"foo":"bar"}` + if str != expected { + t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, expected) + } +} + +var unmarshalingTests = []struct { + desc string + unmarshaler Unmarshaler + json string + pb proto.Message +}{ + {"simple flat object", Unmarshaler{}, simpleObjectJSON, simpleObject}, + {"simple pretty object", Unmarshaler{}, simpleObjectPrettyJSON, simpleObject}, + {"repeated fields flat object", Unmarshaler{}, repeatsObjectJSON, repeatsObject}, + {"repeated fields pretty object", Unmarshaler{}, repeatsObjectPrettyJSON, repeatsObject}, + {"nested message/enum flat object", Unmarshaler{}, complexObjectJSON, complexObject}, + {"nested message/enum pretty object", Unmarshaler{}, complexObjectPrettyJSON, complexObject}, + {"enum-string object", Unmarshaler{}, `{"color":"BLUE"}`, &pb.Widget{Color: pb.Widget_BLUE.Enum()}}, + {"enum-value object", Unmarshaler{}, "{\n \"color\": 2\n}", &pb.Widget{Color: pb.Widget_BLUE.Enum()}}, + {"unknown field with allowed option", Unmarshaler{AllowUnknownFields: true}, `{"unknown": "foo"}`, new(pb.Simple)}, + {"proto3 enum string", Unmarshaler{}, `{"hilarity":"PUNS"}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"proto3 enum value", Unmarshaler{}, `{"hilarity":1}`, &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"unknown enum value object", + Unmarshaler{}, + "{\n \"color\": 1000,\n \"r_color\": [\n \"RED\"\n ]\n}", + &pb.Widget{Color: pb.Widget_Color(1000).Enum(), RColor: []pb.Widget_Color{pb.Widget_RED}}}, + {"repeated proto3 enum", Unmarshaler{}, `{"rFunny":["PUNS","SLAPSTICK"]}`, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}}, + {"repeated proto3 enum as int", Unmarshaler{}, `{"rFunny":[1,2]}`, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}}, + {"repeated proto3 enum as mix of strings and ints", Unmarshaler{}, `{"rFunny":["PUNS",2]}`, + &proto3pb.Message{RFunny: []proto3pb.Message_Humour{ + proto3pb.Message_PUNS, + proto3pb.Message_SLAPSTICK, + }}}, + {"unquoted int64 object", Unmarshaler{}, `{"oInt64":-314}`, &pb.Simple{OInt64: proto.Int64(-314)}}, + {"unquoted uint64 object", Unmarshaler{}, `{"oUint64":123}`, &pb.Simple{OUint64: proto.Uint64(123)}}, + {"NaN", Unmarshaler{}, `{"oDouble":"NaN"}`, &pb.Simple{ODouble: proto.Float64(math.NaN())}}, + {"Inf", Unmarshaler{}, `{"oFloat":"Infinity"}`, &pb.Simple{OFloat: proto.Float32(float32(math.Inf(1)))}}, + {"-Inf", Unmarshaler{}, `{"oDouble":"-Infinity"}`, &pb.Simple{ODouble: proto.Float64(math.Inf(-1))}}, + {"map", Unmarshaler{}, `{"nummy":{"1":2,"3":4}}`, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}}, + {"map", Unmarshaler{}, `{"strry":{"\"one\"":"two","three":"four"}}`, &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}}, + {"map", Unmarshaler{}, `{"objjy":{"1":{"dub":1}}}`, &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}}, + {"proto2 extension", Unmarshaler{}, realNumberJSON, realNumber}, + {"Any with message", Unmarshaler{}, anySimpleJSON, anySimple}, + {"Any with message and indent", Unmarshaler{}, anySimplePrettyJSON, anySimple}, + {"Any with WKT", Unmarshaler{}, anyWellKnownJSON, anyWellKnown}, + {"Any with WKT and indent", Unmarshaler{}, anyWellKnownPrettyJSON, anyWellKnown}, + // TODO: This is broken. + //{"map", Unmarshaler{}, `{"enumy":{"XIV":"ROMAN"}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, + {"map", Unmarshaler{}, `{"enumy":{"XIV":2}}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, + {"oneof", Unmarshaler{}, `{"salary":31000}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Salary{31000}}}, + {"oneof spec name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, + {"oneof orig_name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{"Australia"}}}, + {"oneof spec name2", Unmarshaler{}, `{"homeAddress":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}}, + {"oneof orig_name2", Unmarshaler{}, `{"home_address":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{"Australia"}}}, + {"orig_name input", Unmarshaler{}, `{"o_bool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, + {"camelName input", Unmarshaler{}, `{"oBool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, + + {"Duration", Unmarshaler{}, `{"dur":"3.000s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}}, + {"null Duration", Unmarshaler{}, `{"dur":null}`, &pb.KnownTypes{Dur: nil}}, + {"Timestamp", Unmarshaler{}, `{"ts":"2014-05-13T16:53:20.021Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}}, + {"PreEpochTimestamp", Unmarshaler{}, `{"ts":"1969-12-31T23:59:58.999999995Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -2, Nanos: 999999995}}}, + {"ZeroTimeTimestamp", Unmarshaler{}, `{"ts":"0001-01-01T00:00:00Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -62135596800, Nanos: 0}}}, + {"null Timestamp", Unmarshaler{}, `{"ts":null}`, &pb.KnownTypes{Ts: nil}}, + {"null Struct", Unmarshaler{}, `{"st": null}`, &pb.KnownTypes{St: nil}}, + {"empty Struct", Unmarshaler{}, `{"st": {}}`, &pb.KnownTypes{St: &stpb.Struct{}}}, + {"basic Struct", Unmarshaler{}, `{"st": {"a": "x", "b": null, "c": 3, "d": true}}`, &pb.KnownTypes{St: &stpb.Struct{Fields: map[string]*stpb.Value{ + "a": {Kind: &stpb.Value_StringValue{"x"}}, + "b": {Kind: &stpb.Value_NullValue{}}, + "c": {Kind: &stpb.Value_NumberValue{3}}, + "d": {Kind: &stpb.Value_BoolValue{true}}, + }}}}, + {"nested Struct", Unmarshaler{}, `{"st": {"a": {"b": 1, "c": [{"d": true}, "f"]}}}`, &pb.KnownTypes{St: &stpb.Struct{Fields: map[string]*stpb.Value{ + "a": {Kind: &stpb.Value_StructValue{&stpb.Struct{Fields: map[string]*stpb.Value{ + "b": {Kind: &stpb.Value_NumberValue{1}}, + "c": {Kind: &stpb.Value_ListValue{&stpb.ListValue{Values: []*stpb.Value{ + {Kind: &stpb.Value_StructValue{&stpb.Struct{Fields: map[string]*stpb.Value{"d": {Kind: &stpb.Value_BoolValue{true}}}}}}, + {Kind: &stpb.Value_StringValue{"f"}}, + }}}}, + }}}}, + }}}}, + {"null ListValue", Unmarshaler{}, `{"lv": null}`, &pb.KnownTypes{Lv: nil}}, + {"empty ListValue", Unmarshaler{}, `{"lv": []}`, &pb.KnownTypes{Lv: &stpb.ListValue{}}}, + {"basic ListValue", Unmarshaler{}, `{"lv": ["x", null, 3, true]}`, &pb.KnownTypes{Lv: &stpb.ListValue{Values: []*stpb.Value{ + {Kind: &stpb.Value_StringValue{"x"}}, + {Kind: &stpb.Value_NullValue{}}, + {Kind: &stpb.Value_NumberValue{3}}, + {Kind: &stpb.Value_BoolValue{true}}, + }}}}, + {"number Value", Unmarshaler{}, `{"val":1}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NumberValue{1}}}}, + {"null Value", Unmarshaler{}, `{"val":null}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}}}, + {"bool Value", Unmarshaler{}, `{"val":true}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_BoolValue{true}}}}, + {"string Value", Unmarshaler{}, `{"val":"x"}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"x"}}}}, + {"string number value", Unmarshaler{}, `{"val":"9223372036854775807"}`, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"9223372036854775807"}}}}, + {"list of lists Value", Unmarshaler{}, `{"val":["x", [["y"], "z"]]}`, &pb.KnownTypes{Val: &stpb.Value{ + Kind: &stpb.Value_ListValue{&stpb.ListValue{ + Values: []*stpb.Value{ + {Kind: &stpb.Value_StringValue{"x"}}, + {Kind: &stpb.Value_ListValue{&stpb.ListValue{ + Values: []*stpb.Value{ + {Kind: &stpb.Value_ListValue{&stpb.ListValue{ + Values: []*stpb.Value{{Kind: &stpb.Value_StringValue{"y"}}}, + }}}, + {Kind: &stpb.Value_StringValue{"z"}}, + }, + }}}, + }, + }}}}}, + + {"DoubleValue", Unmarshaler{}, `{"dbl":1.2}`, &pb.KnownTypes{Dbl: &wpb.DoubleValue{Value: 1.2}}}, + {"FloatValue", Unmarshaler{}, `{"flt":1.2}`, &pb.KnownTypes{Flt: &wpb.FloatValue{Value: 1.2}}}, + {"Int64Value", Unmarshaler{}, `{"i64":"-3"}`, &pb.KnownTypes{I64: &wpb.Int64Value{Value: -3}}}, + {"UInt64Value", Unmarshaler{}, `{"u64":"3"}`, &pb.KnownTypes{U64: &wpb.UInt64Value{Value: 3}}}, + {"Int32Value", Unmarshaler{}, `{"i32":-4}`, &pb.KnownTypes{I32: &wpb.Int32Value{Value: -4}}}, + {"UInt32Value", Unmarshaler{}, `{"u32":4}`, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}}, + {"BoolValue", Unmarshaler{}, `{"bool":true}`, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}}, + {"StringValue", Unmarshaler{}, `{"str":"plush"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}}, + {"BytesValue", Unmarshaler{}, `{"bytes":"d293"}`, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}}, + + // Ensure that `null` as a value ends up with a nil pointer instead of a [type]Value struct. + {"null DoubleValue", Unmarshaler{}, `{"dbl":null}`, &pb.KnownTypes{Dbl: nil}}, + {"null FloatValue", Unmarshaler{}, `{"flt":null}`, &pb.KnownTypes{Flt: nil}}, + {"null Int64Value", Unmarshaler{}, `{"i64":null}`, &pb.KnownTypes{I64: nil}}, + {"null UInt64Value", Unmarshaler{}, `{"u64":null}`, &pb.KnownTypes{U64: nil}}, + {"null Int32Value", Unmarshaler{}, `{"i32":null}`, &pb.KnownTypes{I32: nil}}, + {"null UInt32Value", Unmarshaler{}, `{"u32":null}`, &pb.KnownTypes{U32: nil}}, + {"null BoolValue", Unmarshaler{}, `{"bool":null}`, &pb.KnownTypes{Bool: nil}}, + {"null StringValue", Unmarshaler{}, `{"str":null}`, &pb.KnownTypes{Str: nil}}, + {"null BytesValue", Unmarshaler{}, `{"bytes":null}`, &pb.KnownTypes{Bytes: nil}}, +} + +func TestUnmarshaling(t *testing.T) { + for _, tt := range unmarshalingTests { + // Make a new instance of the type of our expected object. + p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message) + + err := tt.unmarshaler.Unmarshal(strings.NewReader(tt.json), p) + if err != nil { + t.Errorf("%s: %v", tt.desc, err) + continue + } + + // For easier diffs, compare text strings of the protos. + exp := proto.MarshalTextString(tt.pb) + act := proto.MarshalTextString(p) + if string(exp) != string(act) { + t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp) + } + } +} + +func TestUnmarshalNullArray(t *testing.T) { + var repeats pb.Repeats + if err := UnmarshalString(`{"rBool":null}`, &repeats); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(repeats, pb.Repeats{}) { + t.Errorf("got non-nil fields in [%#v]", repeats) + } +} + +func TestUnmarshalNullObject(t *testing.T) { + var maps pb.Maps + if err := UnmarshalString(`{"mInt64Str":null}`, &maps); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(maps, pb.Maps{}) { + t.Errorf("got non-nil fields in [%#v]", maps) + } +} + +func TestUnmarshalNext(t *testing.T) { + // We only need to check against a few, not all of them. + tests := unmarshalingTests[:5] + + // Create a buffer with many concatenated JSON objects. + var b bytes.Buffer + for _, tt := range tests { + b.WriteString(tt.json) + } + + dec := json.NewDecoder(&b) + for _, tt := range tests { + // Make a new instance of the type of our expected object. + p := reflect.New(reflect.TypeOf(tt.pb).Elem()).Interface().(proto.Message) + + err := tt.unmarshaler.UnmarshalNext(dec, p) + if err != nil { + t.Errorf("%s: %v", tt.desc, err) + continue + } + + // For easier diffs, compare text strings of the protos. + exp := proto.MarshalTextString(tt.pb) + act := proto.MarshalTextString(p) + if string(exp) != string(act) { + t.Errorf("%s: got [%s] want [%s]", tt.desc, act, exp) + } + } + + p := &pb.Simple{} + err := new(Unmarshaler).UnmarshalNext(dec, p) + if err != io.EOF { + t.Errorf("eof: got %v, expected io.EOF", err) + } +} + +var unmarshalingShouldError = []struct { + desc string + in string + pb proto.Message +}{ + {"a value", "666", new(pb.Simple)}, + {"gibberish", "{adskja123;l23=-=", new(pb.Simple)}, + {"unknown field", `{"unknown": "foo"}`, new(pb.Simple)}, + {"unknown enum name", `{"hilarity":"DAVE"}`, new(proto3pb.Message)}, +} + +func TestUnmarshalingBadInput(t *testing.T) { + for _, tt := range unmarshalingShouldError { + err := UnmarshalString(tt.in, tt.pb) + if err == nil { + t.Errorf("an error was expected when parsing %q instead of an object", tt.desc) + } + } +} + +type funcResolver func(turl string) (proto.Message, error) + +func (fn funcResolver) Resolve(turl string) (proto.Message, error) { + return fn(turl) +} + +func TestAnyWithCustomResolver(t *testing.T) { + var resolvedTypeUrls []string + resolver := funcResolver(func(turl string) (proto.Message, error) { + resolvedTypeUrls = append(resolvedTypeUrls, turl) + return new(pb.Simple), nil + }) + msg := &pb.Simple{ + OBytes: []byte{1, 2, 3, 4}, + OBool: proto.Bool(true), + OString: proto.String("foobar"), + OInt64: proto.Int64(1020304), + } + msgBytes, err := proto.Marshal(msg) + if err != nil { + t.Errorf("an unexpected error occurred when marshaling message: %v", err) + } + // make an Any with a type URL that won't resolve w/out custom resolver + any := &anypb.Any{ + TypeUrl: "https://foobar.com/some.random.MessageKind", + Value: msgBytes, + } + + m := Marshaler{AnyResolver: resolver} + js, err := m.MarshalToString(any) + if err != nil { + t.Errorf("an unexpected error occurred when marshaling any to JSON: %v", err) + } + if len(resolvedTypeUrls) != 1 { + t.Errorf("custom resolver was not invoked during marshaling") + } else if resolvedTypeUrls[0] != "https://foobar.com/some.random.MessageKind" { + t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[0], "https://foobar.com/some.random.MessageKind") + } + wanted := `{"@type":"https://foobar.com/some.random.MessageKind","oBool":true,"oInt64":"1020304","oString":"foobar","oBytes":"AQIDBA=="}` + if js != wanted { + t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", js, wanted) + } + + u := Unmarshaler{AnyResolver: resolver} + roundTrip := &anypb.Any{} + err = u.Unmarshal(bytes.NewReader([]byte(js)), roundTrip) + if err != nil { + t.Errorf("an unexpected error occurred when unmarshaling any from JSON: %v", err) + } + if len(resolvedTypeUrls) != 2 { + t.Errorf("custom resolver was not invoked during marshaling") + } else if resolvedTypeUrls[1] != "https://foobar.com/some.random.MessageKind" { + t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[1], "https://foobar.com/some.random.MessageKind") + } + if !proto.Equal(any, roundTrip) { + t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", roundTrip, any) + } +} + +func TestUnmarshalJSONPBUnmarshaler(t *testing.T) { + rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }` + var msg dynamicMessage + if err := Unmarshal(strings.NewReader(rawJson), &msg); err != nil { + t.Errorf("an unexpected error occurred when parsing into JSONPBUnmarshaler: %v", err) + } + if msg.rawJson != rawJson { + t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", msg.rawJson, rawJson) + } +} + +func TestUnmarshalNullWithJSONPBUnmarshaler(t *testing.T) { + rawJson := `{"stringField":null}` + var ptrFieldMsg ptrFieldMessage + if err := Unmarshal(strings.NewReader(rawJson), &ptrFieldMsg); err != nil { + t.Errorf("unmarshal error: %v", err) + } + + want := ptrFieldMessage{StringField: &stringField{IsSet: true, StringValue: "null"}} + if !proto.Equal(&ptrFieldMsg, &want) { + t.Errorf("unmarshal result StringField: got %v, want %v", ptrFieldMsg, want) + } +} + +func TestUnmarshalAnyJSONPBUnmarshaler(t *testing.T) { + rawJson := `{ "@type": "blah.com/` + dynamicMessageName + `", "foo": "bar", "baz": [0, 1, 2, 3] }` + var got anypb.Any + if err := Unmarshal(strings.NewReader(rawJson), &got); err != nil { + t.Errorf("an unexpected error occurred when parsing into JSONPBUnmarshaler: %v", err) + } + + dm := &dynamicMessage{rawJson: `{"baz":[0,1,2,3],"foo":"bar"}`} + var want anypb.Any + if b, err := proto.Marshal(dm); err != nil { + t.Errorf("an unexpected error occurred when marshaling message: %v", err) + } else { + want.TypeUrl = "blah.com/" + dynamicMessageName + want.Value = b + } + + if !proto.Equal(&got, &want) { + t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", got, want) + } +} + +const ( + dynamicMessageName = "google.protobuf.jsonpb.testing.dynamicMessage" +) + +func init() { + // we register the custom type below so that we can use it in Any types + proto.RegisterType((*dynamicMessage)(nil), dynamicMessageName) +} + +type ptrFieldMessage struct { + StringField *stringField `protobuf:"bytes,1,opt,name=stringField"` +} + +func (m *ptrFieldMessage) Reset() { +} + +func (m *ptrFieldMessage) String() string { + return m.StringField.StringValue +} + +func (m *ptrFieldMessage) ProtoMessage() { +} + +type stringField struct { + IsSet bool `protobuf:"varint,1,opt,name=isSet"` + StringValue string `protobuf:"bytes,2,opt,name=stringValue"` +} + +func (s *stringField) Reset() { +} + +func (s *stringField) String() string { + return s.StringValue +} + +func (s *stringField) ProtoMessage() { +} + +func (s *stringField) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error { + s.IsSet = true + s.StringValue = string(js) + return nil +} + +// dynamicMessage implements protobuf.Message but is not a normal generated message type. +// It provides implementations of JSONPBMarshaler and JSONPBUnmarshaler for JSON support. +type dynamicMessage struct { + rawJson string `protobuf:"bytes,1,opt,name=rawJson"` +} + +func (m *dynamicMessage) Reset() { + m.rawJson = "{}" +} + +func (m *dynamicMessage) String() string { + return m.rawJson +} + +func (m *dynamicMessage) ProtoMessage() { +} + +func (m *dynamicMessage) MarshalJSONPB(jm *Marshaler) ([]byte, error) { + return []byte(m.rawJson), nil +} + +func (m *dynamicMessage) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error { + m.rawJson = string(js) + return nil +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile new file mode 100644 index 0000000..eeda8ae --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile @@ -0,0 +1,33 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2015 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers:. *.proto diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go new file mode 100644 index 0000000..ebb180e --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go @@ -0,0 +1,266 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: more_test_objects.proto + +/* +Package jsonpb is a generated protocol buffer package. + +It is generated from these files: + more_test_objects.proto + test_objects.proto + +It has these top-level messages: + Simple3 + SimpleSlice3 + SimpleMap3 + SimpleNull3 + Mappy + Simple + NonFinites + Repeats + Widget + Maps + MsgWithOneof + Real + Complex + KnownTypes +*/ +package jsonpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Numeral int32 + +const ( + Numeral_UNKNOWN Numeral = 0 + Numeral_ARABIC Numeral = 1 + Numeral_ROMAN Numeral = 2 +) + +var Numeral_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARABIC", + 2: "ROMAN", +} +var Numeral_value = map[string]int32{ + "UNKNOWN": 0, + "ARABIC": 1, + "ROMAN": 2, +} + +func (x Numeral) String() string { + return proto.EnumName(Numeral_name, int32(x)) +} +func (Numeral) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type Simple3 struct { + Dub float64 `protobuf:"fixed64,1,opt,name=dub" json:"dub,omitempty"` +} + +func (m *Simple3) Reset() { *m = Simple3{} } +func (m *Simple3) String() string { return proto.CompactTextString(m) } +func (*Simple3) ProtoMessage() {} +func (*Simple3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Simple3) GetDub() float64 { + if m != nil { + return m.Dub + } + return 0 +} + +type SimpleSlice3 struct { + Slices []string `protobuf:"bytes,1,rep,name=slices" json:"slices,omitempty"` +} + +func (m *SimpleSlice3) Reset() { *m = SimpleSlice3{} } +func (m *SimpleSlice3) String() string { return proto.CompactTextString(m) } +func (*SimpleSlice3) ProtoMessage() {} +func (*SimpleSlice3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *SimpleSlice3) GetSlices() []string { + if m != nil { + return m.Slices + } + return nil +} + +type SimpleMap3 struct { + Stringy map[string]string `protobuf:"bytes,1,rep,name=stringy" json:"stringy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *SimpleMap3) Reset() { *m = SimpleMap3{} } +func (m *SimpleMap3) String() string { return proto.CompactTextString(m) } +func (*SimpleMap3) ProtoMessage() {} +func (*SimpleMap3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *SimpleMap3) GetStringy() map[string]string { + if m != nil { + return m.Stringy + } + return nil +} + +type SimpleNull3 struct { + Simple *Simple3 `protobuf:"bytes,1,opt,name=simple" json:"simple,omitempty"` +} + +func (m *SimpleNull3) Reset() { *m = SimpleNull3{} } +func (m *SimpleNull3) String() string { return proto.CompactTextString(m) } +func (*SimpleNull3) ProtoMessage() {} +func (*SimpleNull3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *SimpleNull3) GetSimple() *Simple3 { + if m != nil { + return m.Simple + } + return nil +} + +type Mappy struct { + Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Strry map[string]string `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"` + S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *Mappy) Reset() { *m = Mappy{} } +func (m *Mappy) String() string { return proto.CompactTextString(m) } +func (*Mappy) ProtoMessage() {} +func (*Mappy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *Mappy) GetNummy() map[int64]int32 { + if m != nil { + return m.Nummy + } + return nil +} + +func (m *Mappy) GetStrry() map[string]string { + if m != nil { + return m.Strry + } + return nil +} + +func (m *Mappy) GetObjjy() map[int32]*Simple3 { + if m != nil { + return m.Objjy + } + return nil +} + +func (m *Mappy) GetBuggy() map[int64]string { + if m != nil { + return m.Buggy + } + return nil +} + +func (m *Mappy) GetBooly() map[bool]bool { + if m != nil { + return m.Booly + } + return nil +} + +func (m *Mappy) GetEnumy() map[string]Numeral { + if m != nil { + return m.Enumy + } + return nil +} + +func (m *Mappy) GetS32Booly() map[int32]bool { + if m != nil { + return m.S32Booly + } + return nil +} + +func (m *Mappy) GetS64Booly() map[int64]bool { + if m != nil { + return m.S64Booly + } + return nil +} + +func (m *Mappy) GetU32Booly() map[uint32]bool { + if m != nil { + return m.U32Booly + } + return nil +} + +func (m *Mappy) GetU64Booly() map[uint64]bool { + if m != nil { + return m.U64Booly + } + return nil +} + +func init() { + proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3") + proto.RegisterType((*SimpleSlice3)(nil), "jsonpb.SimpleSlice3") + proto.RegisterType((*SimpleMap3)(nil), "jsonpb.SimpleMap3") + proto.RegisterType((*SimpleNull3)(nil), "jsonpb.SimpleNull3") + proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy") + proto.RegisterEnum("jsonpb.Numeral", Numeral_name, Numeral_value) +} + +func init() { proto.RegisterFile("more_test_objects.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 526 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdd, 0x6b, 0xdb, 0x3c, + 0x14, 0x87, 0x5f, 0x27, 0xf5, 0xd7, 0x49, 0xfb, 0x2e, 0x88, 0xb1, 0x99, 0xf4, 0x62, 0xc5, 0xb0, + 0xad, 0x0c, 0xe6, 0x8b, 0x78, 0x74, 0x5d, 0x77, 0x95, 0x8e, 0x5e, 0x94, 0x11, 0x07, 0x1c, 0xc2, + 0x2e, 0x4b, 0xdc, 0x99, 0x90, 0xcc, 0x5f, 0xd8, 0xd6, 0xc0, 0xd7, 0xfb, 0xbb, 0x07, 0xe3, 0x48, + 0x72, 0x2d, 0x07, 0x85, 0x6c, 0x77, 0x52, 0x7e, 0xcf, 0xe3, 0x73, 0x24, 0x1d, 0x02, 0x2f, 0xd3, + 0xbc, 0x8c, 0x1f, 0xea, 0xb8, 0xaa, 0x1f, 0xf2, 0x68, 0x17, 0x3f, 0xd6, 0x95, 0x57, 0x94, 0x79, + 0x9d, 0x13, 0x63, 0x57, 0xe5, 0x59, 0x11, 0xb9, 0xe7, 0x60, 0x2e, 0xb7, 0x69, 0x91, 0xc4, 0x3e, + 0x19, 0xc3, 0xf0, 0x3b, 0x8d, 0x1c, 0xed, 0x42, 0xbb, 0xd4, 0x42, 0x5c, 0xba, 0x6f, 0xe0, 0x94, + 0x87, 0xcb, 0x64, 0xfb, 0x18, 0xfb, 0xe4, 0x05, 0x18, 0x15, 0xae, 0x2a, 0x47, 0xbb, 0x18, 0x5e, + 0xda, 0xa1, 0xd8, 0xb9, 0xbf, 0x34, 0x00, 0x0e, 0xce, 0xd7, 0x85, 0x4f, 0x3e, 0x81, 0x59, 0xd5, + 0xe5, 0x36, 0xdb, 0x34, 0x8c, 0x1b, 0x4d, 0x5f, 0x79, 0xbc, 0x9a, 0xd7, 0x41, 0xde, 0x92, 0x13, + 0x77, 0x59, 0x5d, 0x36, 0x61, 0xcb, 0x4f, 0x6e, 0xe0, 0x54, 0x0e, 0xb0, 0xa7, 0x1f, 0x71, 0xc3, + 0x7a, 0xb2, 0x43, 0x5c, 0x92, 0xe7, 0xa0, 0xff, 0x5c, 0x27, 0x34, 0x76, 0x06, 0xec, 0x37, 0xbe, + 0xb9, 0x19, 0x5c, 0x6b, 0xee, 0x15, 0x8c, 0xf8, 0xf7, 0x03, 0x9a, 0x24, 0x3e, 0x79, 0x0b, 0x46, + 0xc5, 0xb6, 0xcc, 0x1e, 0x4d, 0x9f, 0xf5, 0x9b, 0xf0, 0x43, 0x11, 0xbb, 0xbf, 0x2d, 0xd0, 0xe7, + 0xeb, 0xa2, 0x68, 0x88, 0x07, 0x7a, 0x46, 0xd3, 0xb4, 0x6d, 0xdb, 0x69, 0x0d, 0x96, 0x7a, 0x01, + 0x46, 0xbc, 0x5f, 0x8e, 0x21, 0x5f, 0xd5, 0x65, 0xd9, 0x38, 0x03, 0x15, 0xbf, 0xc4, 0x48, 0xf0, + 0x0c, 0x43, 0x3e, 0x8f, 0x76, 0xbb, 0xc6, 0x19, 0xaa, 0xf8, 0x05, 0x46, 0x82, 0x67, 0x18, 0xf2, + 0x11, 0xdd, 0x6c, 0x1a, 0xe7, 0x44, 0xc5, 0xdf, 0x62, 0x24, 0x78, 0x86, 0x31, 0x3e, 0xcf, 0x93, + 0xc6, 0xd1, 0x95, 0x3c, 0x46, 0x2d, 0x8f, 0x6b, 0xe4, 0xe3, 0x8c, 0xa6, 0x8d, 0x63, 0xa8, 0xf8, + 0x3b, 0x8c, 0x04, 0xcf, 0x30, 0xf2, 0x11, 0xac, 0xca, 0x9f, 0xf2, 0x12, 0x26, 0x53, 0xce, 0xf7, + 0x8e, 0x2c, 0x52, 0x6e, 0x3d, 0xc1, 0x4c, 0xbc, 0xfa, 0xc0, 0x45, 0x4b, 0x29, 0x8a, 0xb4, 0x15, + 0xc5, 0x16, 0x45, 0xda, 0x56, 0xb4, 0x55, 0xe2, 0xaa, 0x5f, 0x91, 0x4a, 0x15, 0x69, 0x5b, 0x11, + 0x94, 0x62, 0xbf, 0x62, 0x0b, 0x4f, 0xae, 0x01, 0xba, 0x87, 0x96, 0xe7, 0x6f, 0xa8, 0x98, 0x3f, + 0x5d, 0x9a, 0x3f, 0x34, 0xbb, 0x27, 0xff, 0x97, 0xc9, 0x9d, 0xdc, 0x03, 0x74, 0x8f, 0x2f, 0x9b, + 0x3a, 0x37, 0x5f, 0xcb, 0xa6, 0x62, 0x92, 0xfb, 0x4d, 0x74, 0x73, 0x71, 0xac, 0x7d, 0x7b, 0xdf, + 0x7c, 0xba, 0x10, 0xd9, 0xb4, 0x14, 0xa6, 0xb5, 0xd7, 0x7e, 0x37, 0x2b, 0x8a, 0x83, 0xf7, 0xda, + 0xff, 0xbf, 0x6b, 0x3f, 0xa0, 0x69, 0x5c, 0xae, 0x13, 0xf9, 0x53, 0x9f, 0xe1, 0xac, 0x37, 0x43, + 0x8a, 0xcb, 0x38, 0xdc, 0x07, 0xca, 0xf2, 0xab, 0x1e, 0x3b, 0xfe, 0xbe, 0xbc, 0x3a, 0x54, 0xf9, + 0xec, 0x6f, 0xe4, 0x43, 0x95, 0x4f, 0x8e, 0xc8, 0xef, 0xde, 0x83, 0x29, 0x6e, 0x82, 0x8c, 0xc0, + 0x5c, 0x05, 0x5f, 0x83, 0xc5, 0xb7, 0x60, 0xfc, 0x1f, 0x01, 0x30, 0x66, 0xe1, 0xec, 0xf6, 0xfe, + 0xcb, 0x58, 0x23, 0x36, 0xe8, 0xe1, 0x62, 0x3e, 0x0b, 0xc6, 0x83, 0xc8, 0x60, 0x7f, 0xe0, 0xfe, + 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x34, 0xaf, 0xdb, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto new file mode 100644 index 0000000..d254fa5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.proto @@ -0,0 +1,69 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package jsonpb; + +message Simple3 { + double dub = 1; +} + +message SimpleSlice3 { + repeated string slices = 1; +} + +message SimpleMap3 { + map stringy = 1; +} + +message SimpleNull3 { + Simple3 simple = 1; +} + +enum Numeral { + UNKNOWN = 0; + ARABIC = 1; + ROMAN = 2; +} + +message Mappy { + map nummy = 1; + map strry = 2; + map objjy = 3; + map buggy = 4; + map booly = 5; + map enumy = 6; + map s32booly = 7; + map s64booly = 8; + map u32booly = 9; + map u64booly = 10; +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go new file mode 100644 index 0000000..d413d74 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go @@ -0,0 +1,852 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: test_objects.proto + +package jsonpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" +import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Widget_Color int32 + +const ( + Widget_RED Widget_Color = 0 + Widget_GREEN Widget_Color = 1 + Widget_BLUE Widget_Color = 2 +) + +var Widget_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Widget_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Widget_Color) Enum() *Widget_Color { + p := new(Widget_Color) + *p = x + return p +} +func (x Widget_Color) String() string { + return proto.EnumName(Widget_Color_name, int32(x)) +} +func (x *Widget_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Widget_Color_value, data, "Widget_Color") + if err != nil { + return err + } + *x = Widget_Color(value) + return nil +} +func (Widget_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} } + +// Test message for holding primitive types. +type Simple struct { + OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"` + OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"` + OInt64 *int64 `protobuf:"varint,3,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"` + OUint32 *uint32 `protobuf:"varint,4,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"` + OUint64 *uint64 `protobuf:"varint,5,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"` + OSint32 *int32 `protobuf:"zigzag32,6,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"` + OSint64 *int64 `protobuf:"zigzag64,7,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"` + OFloat *float32 `protobuf:"fixed32,8,opt,name=o_float,json=oFloat" json:"o_float,omitempty"` + ODouble *float64 `protobuf:"fixed64,9,opt,name=o_double,json=oDouble" json:"o_double,omitempty"` + OString *string `protobuf:"bytes,10,opt,name=o_string,json=oString" json:"o_string,omitempty"` + OBytes []byte `protobuf:"bytes,11,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Simple) Reset() { *m = Simple{} } +func (m *Simple) String() string { return proto.CompactTextString(m) } +func (*Simple) ProtoMessage() {} +func (*Simple) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Simple) GetOBool() bool { + if m != nil && m.OBool != nil { + return *m.OBool + } + return false +} + +func (m *Simple) GetOInt32() int32 { + if m != nil && m.OInt32 != nil { + return *m.OInt32 + } + return 0 +} + +func (m *Simple) GetOInt64() int64 { + if m != nil && m.OInt64 != nil { + return *m.OInt64 + } + return 0 +} + +func (m *Simple) GetOUint32() uint32 { + if m != nil && m.OUint32 != nil { + return *m.OUint32 + } + return 0 +} + +func (m *Simple) GetOUint64() uint64 { + if m != nil && m.OUint64 != nil { + return *m.OUint64 + } + return 0 +} + +func (m *Simple) GetOSint32() int32 { + if m != nil && m.OSint32 != nil { + return *m.OSint32 + } + return 0 +} + +func (m *Simple) GetOSint64() int64 { + if m != nil && m.OSint64 != nil { + return *m.OSint64 + } + return 0 +} + +func (m *Simple) GetOFloat() float32 { + if m != nil && m.OFloat != nil { + return *m.OFloat + } + return 0 +} + +func (m *Simple) GetODouble() float64 { + if m != nil && m.ODouble != nil { + return *m.ODouble + } + return 0 +} + +func (m *Simple) GetOString() string { + if m != nil && m.OString != nil { + return *m.OString + } + return "" +} + +func (m *Simple) GetOBytes() []byte { + if m != nil { + return m.OBytes + } + return nil +} + +// Test message for holding special non-finites primitives. +type NonFinites struct { + FNan *float32 `protobuf:"fixed32,1,opt,name=f_nan,json=fNan" json:"f_nan,omitempty"` + FPinf *float32 `protobuf:"fixed32,2,opt,name=f_pinf,json=fPinf" json:"f_pinf,omitempty"` + FNinf *float32 `protobuf:"fixed32,3,opt,name=f_ninf,json=fNinf" json:"f_ninf,omitempty"` + DNan *float64 `protobuf:"fixed64,4,opt,name=d_nan,json=dNan" json:"d_nan,omitempty"` + DPinf *float64 `protobuf:"fixed64,5,opt,name=d_pinf,json=dPinf" json:"d_pinf,omitempty"` + DNinf *float64 `protobuf:"fixed64,6,opt,name=d_ninf,json=dNinf" json:"d_ninf,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonFinites) Reset() { *m = NonFinites{} } +func (m *NonFinites) String() string { return proto.CompactTextString(m) } +func (*NonFinites) ProtoMessage() {} +func (*NonFinites) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *NonFinites) GetFNan() float32 { + if m != nil && m.FNan != nil { + return *m.FNan + } + return 0 +} + +func (m *NonFinites) GetFPinf() float32 { + if m != nil && m.FPinf != nil { + return *m.FPinf + } + return 0 +} + +func (m *NonFinites) GetFNinf() float32 { + if m != nil && m.FNinf != nil { + return *m.FNinf + } + return 0 +} + +func (m *NonFinites) GetDNan() float64 { + if m != nil && m.DNan != nil { + return *m.DNan + } + return 0 +} + +func (m *NonFinites) GetDPinf() float64 { + if m != nil && m.DPinf != nil { + return *m.DPinf + } + return 0 +} + +func (m *NonFinites) GetDNinf() float64 { + if m != nil && m.DNinf != nil { + return *m.DNinf + } + return 0 +} + +// Test message for holding repeated primitives. +type Repeats struct { + RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"` + RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"` + RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"` + RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"` + RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"` + RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"` + RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"` + RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"` + RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"` + RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"` + RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Repeats) Reset() { *m = Repeats{} } +func (m *Repeats) String() string { return proto.CompactTextString(m) } +func (*Repeats) ProtoMessage() {} +func (*Repeats) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *Repeats) GetRBool() []bool { + if m != nil { + return m.RBool + } + return nil +} + +func (m *Repeats) GetRInt32() []int32 { + if m != nil { + return m.RInt32 + } + return nil +} + +func (m *Repeats) GetRInt64() []int64 { + if m != nil { + return m.RInt64 + } + return nil +} + +func (m *Repeats) GetRUint32() []uint32 { + if m != nil { + return m.RUint32 + } + return nil +} + +func (m *Repeats) GetRUint64() []uint64 { + if m != nil { + return m.RUint64 + } + return nil +} + +func (m *Repeats) GetRSint32() []int32 { + if m != nil { + return m.RSint32 + } + return nil +} + +func (m *Repeats) GetRSint64() []int64 { + if m != nil { + return m.RSint64 + } + return nil +} + +func (m *Repeats) GetRFloat() []float32 { + if m != nil { + return m.RFloat + } + return nil +} + +func (m *Repeats) GetRDouble() []float64 { + if m != nil { + return m.RDouble + } + return nil +} + +func (m *Repeats) GetRString() []string { + if m != nil { + return m.RString + } + return nil +} + +func (m *Repeats) GetRBytes() [][]byte { + if m != nil { + return m.RBytes + } + return nil +} + +// Test message for holding enums and nested messages. +type Widget struct { + Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"` + RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"` + Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"` + RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"` + Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"` + RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Widget) Reset() { *m = Widget{} } +func (m *Widget) String() string { return proto.CompactTextString(m) } +func (*Widget) ProtoMessage() {} +func (*Widget) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *Widget) GetColor() Widget_Color { + if m != nil && m.Color != nil { + return *m.Color + } + return Widget_RED +} + +func (m *Widget) GetRColor() []Widget_Color { + if m != nil { + return m.RColor + } + return nil +} + +func (m *Widget) GetSimple() *Simple { + if m != nil { + return m.Simple + } + return nil +} + +func (m *Widget) GetRSimple() []*Simple { + if m != nil { + return m.RSimple + } + return nil +} + +func (m *Widget) GetRepeats() *Repeats { + if m != nil { + return m.Repeats + } + return nil +} + +func (m *Widget) GetRRepeats() []*Repeats { + if m != nil { + return m.RRepeats + } + return nil +} + +type Maps struct { + MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Maps) Reset() { *m = Maps{} } +func (m *Maps) String() string { return proto.CompactTextString(m) } +func (*Maps) ProtoMessage() {} +func (*Maps) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *Maps) GetMInt64Str() map[int64]string { + if m != nil { + return m.MInt64Str + } + return nil +} + +func (m *Maps) GetMBoolSimple() map[bool]*Simple { + if m != nil { + return m.MBoolSimple + } + return nil +} + +type MsgWithOneof struct { + // Types that are valid to be assigned to Union: + // *MsgWithOneof_Title + // *MsgWithOneof_Salary + // *MsgWithOneof_Country + // *MsgWithOneof_HomeAddress + Union isMsgWithOneof_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MsgWithOneof) Reset() { *m = MsgWithOneof{} } +func (m *MsgWithOneof) String() string { return proto.CompactTextString(m) } +func (*MsgWithOneof) ProtoMessage() {} +func (*MsgWithOneof) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +type isMsgWithOneof_Union interface { + isMsgWithOneof_Union() +} + +type MsgWithOneof_Title struct { + Title string `protobuf:"bytes,1,opt,name=title,oneof"` +} +type MsgWithOneof_Salary struct { + Salary int64 `protobuf:"varint,2,opt,name=salary,oneof"` +} +type MsgWithOneof_Country struct { + Country string `protobuf:"bytes,3,opt,name=Country,oneof"` +} +type MsgWithOneof_HomeAddress struct { + HomeAddress string `protobuf:"bytes,4,opt,name=home_address,json=homeAddress,oneof"` +} + +func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} +func (*MsgWithOneof_HomeAddress) isMsgWithOneof_Union() {} + +func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *MsgWithOneof) GetTitle() string { + if x, ok := m.GetUnion().(*MsgWithOneof_Title); ok { + return x.Title + } + return "" +} + +func (m *MsgWithOneof) GetSalary() int64 { + if x, ok := m.GetUnion().(*MsgWithOneof_Salary); ok { + return x.Salary + } + return 0 +} + +func (m *MsgWithOneof) GetCountry() string { + if x, ok := m.GetUnion().(*MsgWithOneof_Country); ok { + return x.Country + } + return "" +} + +func (m *MsgWithOneof) GetHomeAddress() string { + if x, ok := m.GetUnion().(*MsgWithOneof_HomeAddress); ok { + return x.HomeAddress + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _MsgWithOneof_OneofMarshaler, _MsgWithOneof_OneofUnmarshaler, _MsgWithOneof_OneofSizer, []interface{}{ + (*MsgWithOneof_Title)(nil), + (*MsgWithOneof_Salary)(nil), + (*MsgWithOneof_Country)(nil), + (*MsgWithOneof_HomeAddress)(nil), + } +} + +func _MsgWithOneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*MsgWithOneof) + // union + switch x := m.Union.(type) { + case *MsgWithOneof_Title: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Title) + case *MsgWithOneof_Salary: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Salary)) + case *MsgWithOneof_Country: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Country) + case *MsgWithOneof_HomeAddress: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.HomeAddress) + case nil: + default: + return fmt.Errorf("MsgWithOneof.Union has unexpected type %T", x) + } + return nil +} + +func _MsgWithOneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*MsgWithOneof) + switch tag { + case 1: // union.title + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_Title{x} + return true, err + case 2: // union.salary + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &MsgWithOneof_Salary{int64(x)} + return true, err + case 3: // union.Country + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_Country{x} + return true, err + case 4: // union.home_address + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_HomeAddress{x} + return true, err + default: + return false, nil + } +} + +func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) { + m := msg.(*MsgWithOneof) + // union + switch x := m.Union.(type) { + case *MsgWithOneof_Title: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Title))) + n += len(x.Title) + case *MsgWithOneof_Salary: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Salary)) + case *MsgWithOneof_Country: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Country))) + n += len(x.Country) + case *MsgWithOneof_HomeAddress: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.HomeAddress))) + n += len(x.HomeAddress) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Real struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Real) Reset() { *m = Real{} } +func (m *Real) String() string { return proto.CompactTextString(m) } +func (*Real) ProtoMessage() {} +func (*Real) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +var extRange_Real = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Real) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Real +} + +func (m *Real) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Complex struct { + Imaginary *float64 `protobuf:"fixed64,1,opt,name=imaginary" json:"imaginary,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Complex) Reset() { *m = Complex{} } +func (m *Complex) String() string { return proto.CompactTextString(m) } +func (*Complex) ProtoMessage() {} +func (*Complex) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +var extRange_Complex = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Complex) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Complex +} + +func (m *Complex) GetImaginary() float64 { + if m != nil && m.Imaginary != nil { + return *m.Imaginary + } + return 0 +} + +var E_Complex_RealExtension = &proto.ExtensionDesc{ + ExtendedType: (*Real)(nil), + ExtensionType: (*Complex)(nil), + Field: 123, + Name: "jsonpb.Complex.real_extension", + Tag: "bytes,123,opt,name=real_extension,json=realExtension", + Filename: "test_objects.proto", +} + +type KnownTypes struct { + An *google_protobuf.Any `protobuf:"bytes,14,opt,name=an" json:"an,omitempty"` + Dur *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"` + St *google_protobuf2.Struct `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"` + Ts *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"` + Lv *google_protobuf2.ListValue `protobuf:"bytes,15,opt,name=lv" json:"lv,omitempty"` + Val *google_protobuf2.Value `protobuf:"bytes,16,opt,name=val" json:"val,omitempty"` + Dbl *google_protobuf4.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"` + Flt *google_protobuf4.FloatValue `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"` + I64 *google_protobuf4.Int64Value `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"` + U64 *google_protobuf4.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"` + I32 *google_protobuf4.Int32Value `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"` + U32 *google_protobuf4.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"` + Bool *google_protobuf4.BoolValue `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"` + Str *google_protobuf4.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"` + Bytes *google_protobuf4.BytesValue `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *KnownTypes) Reset() { *m = KnownTypes{} } +func (m *KnownTypes) String() string { return proto.CompactTextString(m) } +func (*KnownTypes) ProtoMessage() {} +func (*KnownTypes) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *KnownTypes) GetAn() *google_protobuf.Any { + if m != nil { + return m.An + } + return nil +} + +func (m *KnownTypes) GetDur() *google_protobuf1.Duration { + if m != nil { + return m.Dur + } + return nil +} + +func (m *KnownTypes) GetSt() *google_protobuf2.Struct { + if m != nil { + return m.St + } + return nil +} + +func (m *KnownTypes) GetTs() *google_protobuf3.Timestamp { + if m != nil { + return m.Ts + } + return nil +} + +func (m *KnownTypes) GetLv() *google_protobuf2.ListValue { + if m != nil { + return m.Lv + } + return nil +} + +func (m *KnownTypes) GetVal() *google_protobuf2.Value { + if m != nil { + return m.Val + } + return nil +} + +func (m *KnownTypes) GetDbl() *google_protobuf4.DoubleValue { + if m != nil { + return m.Dbl + } + return nil +} + +func (m *KnownTypes) GetFlt() *google_protobuf4.FloatValue { + if m != nil { + return m.Flt + } + return nil +} + +func (m *KnownTypes) GetI64() *google_protobuf4.Int64Value { + if m != nil { + return m.I64 + } + return nil +} + +func (m *KnownTypes) GetU64() *google_protobuf4.UInt64Value { + if m != nil { + return m.U64 + } + return nil +} + +func (m *KnownTypes) GetI32() *google_protobuf4.Int32Value { + if m != nil { + return m.I32 + } + return nil +} + +func (m *KnownTypes) GetU32() *google_protobuf4.UInt32Value { + if m != nil { + return m.U32 + } + return nil +} + +func (m *KnownTypes) GetBool() *google_protobuf4.BoolValue { + if m != nil { + return m.Bool + } + return nil +} + +func (m *KnownTypes) GetStr() *google_protobuf4.StringValue { + if m != nil { + return m.Str + } + return nil +} + +func (m *KnownTypes) GetBytes() *google_protobuf4.BytesValue { + if m != nil { + return m.Bytes + } + return nil +} + +var E_Name = &proto.ExtensionDesc{ + ExtendedType: (*Real)(nil), + ExtensionType: (*string)(nil), + Field: 124, + Name: "jsonpb.name", + Tag: "bytes,124,opt,name=name", + Filename: "test_objects.proto", +} + +func init() { + proto.RegisterType((*Simple)(nil), "jsonpb.Simple") + proto.RegisterType((*NonFinites)(nil), "jsonpb.NonFinites") + proto.RegisterType((*Repeats)(nil), "jsonpb.Repeats") + proto.RegisterType((*Widget)(nil), "jsonpb.Widget") + proto.RegisterType((*Maps)(nil), "jsonpb.Maps") + proto.RegisterType((*MsgWithOneof)(nil), "jsonpb.MsgWithOneof") + proto.RegisterType((*Real)(nil), "jsonpb.Real") + proto.RegisterType((*Complex)(nil), "jsonpb.Complex") + proto.RegisterType((*KnownTypes)(nil), "jsonpb.KnownTypes") + proto.RegisterEnum("jsonpb.Widget_Color", Widget_Color_name, Widget_Color_value) + proto.RegisterExtension(E_Complex_RealExtension) + proto.RegisterExtension(E_Name) +} + +func init() { proto.RegisterFile("test_objects.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 1160 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x95, 0x41, 0x73, 0xdb, 0x44, + 0x14, 0xc7, 0x23, 0xc9, 0x92, 0xed, 0x75, 0x92, 0x9a, 0x6d, 0xda, 0x2a, 0x26, 0x80, 0xc6, 0x94, + 0x22, 0x0a, 0x75, 0x07, 0xc7, 0xe3, 0x61, 0x0a, 0x97, 0xa4, 0x71, 0x29, 0x43, 0x13, 0x98, 0x4d, + 0x43, 0x8f, 0x1e, 0x39, 0x5a, 0xbb, 0x2a, 0xf2, 0xae, 0x67, 0x77, 0x95, 0xd4, 0x03, 0x87, 0x9c, + 0x39, 0x32, 0x7c, 0x05, 0xf8, 0x08, 0x1c, 0xf8, 0x74, 0xcc, 0xdb, 0x95, 0xac, 0xc4, 0x8e, 0x4f, + 0xf1, 0x7b, 0xef, 0xff, 0xfe, 0x59, 0xed, 0x6f, 0x77, 0x1f, 0xc2, 0x8a, 0x4a, 0x35, 0xe4, 0xa3, + 0x77, 0xf4, 0x5c, 0xc9, 0xce, 0x4c, 0x70, 0xc5, 0xb1, 0xf7, 0x4e, 0x72, 0x36, 0x1b, 0xb5, 0x76, + 0x27, 0x9c, 0x4f, 0x52, 0xfa, 0x54, 0x67, 0x47, 0xd9, 0xf8, 0x69, 0xc4, 0xe6, 0x46, 0xd2, 0xfa, + 0x78, 0xb9, 0x14, 0x67, 0x22, 0x52, 0x09, 0x67, 0x79, 0x7d, 0x6f, 0xb9, 0x2e, 0x95, 0xc8, 0xce, + 0x55, 0x5e, 0xfd, 0x64, 0xb9, 0xaa, 0x92, 0x29, 0x95, 0x2a, 0x9a, 0xce, 0xd6, 0xd9, 0x5f, 0x8a, + 0x68, 0x36, 0xa3, 0x22, 0x5f, 0x61, 0xfb, 0x6f, 0x1b, 0x79, 0xa7, 0xc9, 0x74, 0x96, 0x52, 0x7c, + 0x0f, 0x79, 0x7c, 0x38, 0xe2, 0x3c, 0xf5, 0xad, 0xc0, 0x0a, 0x6b, 0xc4, 0xe5, 0x87, 0x9c, 0xa7, + 0xf8, 0x01, 0xaa, 0xf2, 0x61, 0xc2, 0xd4, 0x7e, 0xd7, 0xb7, 0x03, 0x2b, 0x74, 0x89, 0xc7, 0x7f, + 0x80, 0x68, 0x51, 0xe8, 0xf7, 0x7c, 0x27, 0xb0, 0x42, 0xc7, 0x14, 0xfa, 0x3d, 0xbc, 0x8b, 0x6a, + 0x7c, 0x98, 0x99, 0x96, 0x4a, 0x60, 0x85, 0x5b, 0xa4, 0xca, 0xcf, 0x74, 0x58, 0x96, 0xfa, 0x3d, + 0xdf, 0x0d, 0xac, 0xb0, 0x92, 0x97, 0x8a, 0x2e, 0x69, 0xba, 0xbc, 0xc0, 0x0a, 0x3f, 0x20, 0x55, + 0x7e, 0x7a, 0xad, 0x4b, 0x9a, 0xae, 0x6a, 0x60, 0x85, 0x38, 0x2f, 0xf5, 0x7b, 0x66, 0x11, 0xe3, + 0x94, 0x47, 0xca, 0xaf, 0x05, 0x56, 0x68, 0x13, 0x8f, 0xbf, 0x80, 0xc8, 0xf4, 0xc4, 0x3c, 0x1b, + 0xa5, 0xd4, 0xaf, 0x07, 0x56, 0x68, 0x91, 0x2a, 0x3f, 0xd2, 0x61, 0x6e, 0xa7, 0x44, 0xc2, 0x26, + 0x3e, 0x0a, 0xac, 0xb0, 0x0e, 0x76, 0x3a, 0x34, 0x76, 0xa3, 0xb9, 0xa2, 0xd2, 0x6f, 0x04, 0x56, + 0xb8, 0x49, 0x3c, 0x7e, 0x08, 0x51, 0xfb, 0x4f, 0x0b, 0xa1, 0x13, 0xce, 0x5e, 0x24, 0x2c, 0x51, + 0x54, 0xe2, 0xbb, 0xc8, 0x1d, 0x0f, 0x59, 0xc4, 0xf4, 0x56, 0xd9, 0xa4, 0x32, 0x3e, 0x89, 0x18, + 0x6c, 0xe0, 0x78, 0x38, 0x4b, 0xd8, 0x58, 0x6f, 0x94, 0x4d, 0xdc, 0xf1, 0xcf, 0x09, 0x1b, 0x9b, + 0x34, 0x83, 0xb4, 0x93, 0xa7, 0x4f, 0x20, 0x7d, 0x17, 0xb9, 0xb1, 0xb6, 0xa8, 0xe8, 0xd5, 0x55, + 0xe2, 0xdc, 0x22, 0x36, 0x16, 0xae, 0xce, 0xba, 0x71, 0x61, 0x11, 0x1b, 0x0b, 0x2f, 0x4f, 0x83, + 0x45, 0xfb, 0x1f, 0x1b, 0x55, 0x09, 0x9d, 0xd1, 0x48, 0x49, 0x90, 0x88, 0x82, 0x9e, 0x03, 0xf4, + 0x44, 0x41, 0x4f, 0x2c, 0xe8, 0x39, 0x40, 0x4f, 0x2c, 0xe8, 0x89, 0x05, 0x3d, 0x07, 0xe8, 0x89, + 0x05, 0x3d, 0x51, 0xd2, 0x73, 0x80, 0x9e, 0x28, 0xe9, 0x89, 0x92, 0x9e, 0x03, 0xf4, 0x44, 0x49, + 0x4f, 0x94, 0xf4, 0x1c, 0xa0, 0x27, 0x4e, 0xaf, 0x75, 0x2d, 0xe8, 0x39, 0x40, 0x4f, 0x94, 0xf4, + 0xc4, 0x82, 0x9e, 0x03, 0xf4, 0xc4, 0x82, 0x9e, 0x28, 0xe9, 0x39, 0x40, 0x4f, 0x94, 0xf4, 0x44, + 0x49, 0xcf, 0x01, 0x7a, 0xa2, 0xa4, 0x27, 0x16, 0xf4, 0x1c, 0xa0, 0x27, 0x0c, 0xbd, 0x7f, 0x6d, + 0xe4, 0xbd, 0x49, 0xe2, 0x09, 0x55, 0xf8, 0x31, 0x72, 0xcf, 0x79, 0xca, 0x85, 0x26, 0xb7, 0xdd, + 0xdd, 0xe9, 0x98, 0x2b, 0xda, 0x31, 0xe5, 0xce, 0x73, 0xa8, 0x11, 0x23, 0xc1, 0x4f, 0xc0, 0xcf, + 0xa8, 0x61, 0xf3, 0xd6, 0xa9, 0x3d, 0xa1, 0xff, 0xe2, 0x47, 0xc8, 0x93, 0xfa, 0x2a, 0xe9, 0x53, + 0xd5, 0xe8, 0x6e, 0x17, 0x6a, 0x73, 0xc1, 0x48, 0x5e, 0xc5, 0x5f, 0x98, 0x0d, 0xd1, 0x4a, 0x58, + 0xe7, 0xaa, 0x12, 0x36, 0x28, 0x97, 0x56, 0x85, 0x01, 0xec, 0xef, 0x68, 0xcf, 0x3b, 0x85, 0x32, + 0xe7, 0x4e, 0x8a, 0x3a, 0xfe, 0x0a, 0xd5, 0xc5, 0xb0, 0x10, 0xdf, 0xd3, 0xb6, 0x2b, 0xe2, 0x9a, + 0xc8, 0x7f, 0xb5, 0x3f, 0x43, 0xae, 0x59, 0x74, 0x15, 0x39, 0x64, 0x70, 0xd4, 0xdc, 0xc0, 0x75, + 0xe4, 0x7e, 0x4f, 0x06, 0x83, 0x93, 0xa6, 0x85, 0x6b, 0xa8, 0x72, 0xf8, 0xea, 0x6c, 0xd0, 0xb4, + 0xdb, 0x7f, 0xd9, 0xa8, 0x72, 0x1c, 0xcd, 0x24, 0xfe, 0x16, 0x35, 0xa6, 0xe6, 0xb8, 0xc0, 0xde, + 0xeb, 0x33, 0xd6, 0xe8, 0x7e, 0x58, 0xf8, 0x83, 0xa4, 0x73, 0xac, 0xcf, 0xcf, 0xa9, 0x12, 0x03, + 0xa6, 0xc4, 0x9c, 0xd4, 0xa7, 0x45, 0x8c, 0x0f, 0xd0, 0xd6, 0x54, 0x9f, 0xcd, 0xe2, 0xab, 0x6d, + 0xdd, 0xfe, 0xd1, 0xcd, 0x76, 0x38, 0xaf, 0xe6, 0xb3, 0x8d, 0x41, 0x63, 0x5a, 0x66, 0x5a, 0xdf, + 0xa1, 0xed, 0x9b, 0xfe, 0xb8, 0x89, 0x9c, 0x5f, 0xe9, 0x5c, 0x63, 0x74, 0x08, 0xfc, 0xc4, 0x3b, + 0xc8, 0xbd, 0x88, 0xd2, 0x8c, 0xea, 0xeb, 0x57, 0x27, 0x26, 0x78, 0x66, 0x7f, 0x63, 0xb5, 0x4e, + 0x50, 0x73, 0xd9, 0xfe, 0x7a, 0x7f, 0xcd, 0xf4, 0x3f, 0xbc, 0xde, 0xbf, 0x0a, 0xa5, 0xf4, 0x6b, + 0xff, 0x61, 0xa1, 0xcd, 0x63, 0x39, 0x79, 0x93, 0xa8, 0xb7, 0x3f, 0x31, 0xca, 0xc7, 0xf8, 0x3e, + 0x72, 0x55, 0xa2, 0x52, 0xaa, 0xed, 0xea, 0x2f, 0x37, 0x88, 0x09, 0xb1, 0x8f, 0x3c, 0x19, 0xa5, + 0x91, 0x98, 0x6b, 0x4f, 0xe7, 0xe5, 0x06, 0xc9, 0x63, 0xdc, 0x42, 0xd5, 0xe7, 0x3c, 0x83, 0x95, + 0xe8, 0x67, 0x01, 0x7a, 0x8a, 0x04, 0xfe, 0x14, 0x6d, 0xbe, 0xe5, 0x53, 0x3a, 0x8c, 0xe2, 0x58, + 0x50, 0x29, 0xf5, 0x0b, 0x01, 0x82, 0x06, 0x64, 0x0f, 0x4c, 0xf2, 0xb0, 0x8a, 0xdc, 0x8c, 0x25, + 0x9c, 0xb5, 0x1f, 0xa1, 0x0a, 0xa1, 0x51, 0x5a, 0x7e, 0xbe, 0x65, 0xde, 0x08, 0x1d, 0x3c, 0xae, + 0xd5, 0xe2, 0xe6, 0xd5, 0xd5, 0xd5, 0x95, 0xdd, 0xbe, 0x84, 0xff, 0x08, 0x5f, 0xf2, 0x1e, 0xef, + 0xa1, 0x7a, 0x32, 0x8d, 0x26, 0x09, 0x83, 0x95, 0x19, 0x79, 0x99, 0x28, 0x5b, 0xba, 0x47, 0x68, + 0x5b, 0xd0, 0x28, 0x1d, 0xd2, 0xf7, 0x8a, 0x32, 0x99, 0x70, 0x86, 0x37, 0xcb, 0x23, 0x15, 0xa5, + 0xfe, 0x6f, 0x37, 0xcf, 0x64, 0x6e, 0x4f, 0xb6, 0xa0, 0x69, 0x50, 0xf4, 0xb4, 0xff, 0x73, 0x11, + 0xfa, 0x91, 0xf1, 0x4b, 0xf6, 0x7a, 0x3e, 0xa3, 0x12, 0x3f, 0x44, 0x76, 0xc4, 0xfc, 0x6d, 0xdd, + 0xba, 0xd3, 0x31, 0xf3, 0xa9, 0x53, 0xcc, 0xa7, 0xce, 0x01, 0x9b, 0x13, 0x3b, 0x62, 0xf8, 0x4b, + 0xe4, 0xc4, 0x99, 0xb9, 0xa5, 0x8d, 0xee, 0xee, 0x8a, 0xec, 0x28, 0x9f, 0x92, 0x04, 0x54, 0xf8, + 0x73, 0x64, 0x4b, 0xe5, 0x6f, 0x6a, 0xed, 0x83, 0x15, 0xed, 0xa9, 0x9e, 0x98, 0xc4, 0x96, 0x70, + 0xfb, 0x6d, 0x25, 0x73, 0xbe, 0xad, 0x15, 0xe1, 0xeb, 0x62, 0x78, 0x12, 0x5b, 0x49, 0xd0, 0xa6, + 0x17, 0xfe, 0x9d, 0x35, 0xda, 0x57, 0x89, 0x54, 0xbf, 0xc0, 0x0e, 0x13, 0x3b, 0xbd, 0xc0, 0x21, + 0x72, 0x2e, 0xa2, 0xd4, 0x6f, 0x6a, 0xf1, 0xfd, 0x15, 0xb1, 0x11, 0x82, 0x04, 0x77, 0x90, 0x13, + 0x8f, 0x52, 0xcd, 0xbc, 0xd1, 0xdd, 0x5b, 0xfd, 0x2e, 0xfd, 0xc8, 0xe5, 0xfa, 0x78, 0x94, 0xe2, + 0x27, 0xc8, 0x19, 0xa7, 0x4a, 0x1f, 0x01, 0xb8, 0x70, 0xcb, 0x7a, 0xfd, 0x5c, 0xe6, 0xf2, 0x71, + 0xaa, 0x40, 0x9e, 0xe4, 0xb3, 0xf5, 0x36, 0xb9, 0xbe, 0x42, 0xb9, 0x3c, 0xe9, 0xf7, 0x60, 0x35, + 0x59, 0xbf, 0xa7, 0xa7, 0xca, 0x6d, 0xab, 0x39, 0xbb, 0xae, 0xcf, 0xfa, 0x3d, 0x6d, 0xbf, 0xdf, + 0xd5, 0x43, 0x78, 0x8d, 0xfd, 0x7e, 0xb7, 0xb0, 0xdf, 0xef, 0x6a, 0xfb, 0xfd, 0xae, 0x9e, 0xcc, + 0xeb, 0xec, 0x17, 0xfa, 0x4c, 0xeb, 0x2b, 0x7a, 0x84, 0xd5, 0xd7, 0x6c, 0x3a, 0xdc, 0x61, 0x23, + 0xd7, 0x3a, 0xf0, 0x87, 0xd7, 0x08, 0xad, 0xf1, 0x37, 0x63, 0x21, 0xf7, 0x97, 0x4a, 0xe0, 0xaf, + 0x91, 0x5b, 0x0e, 0xf7, 0xdb, 0x3e, 0x40, 0x8f, 0x0b, 0xd3, 0x60, 0x94, 0xcf, 0x02, 0x54, 0x61, + 0xd1, 0x94, 0x2e, 0x1d, 0xfc, 0xdf, 0xf5, 0x0b, 0xa3, 0x2b, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, + 0xd5, 0x39, 0x32, 0x09, 0xf9, 0x09, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto new file mode 100644 index 0000000..0d2fc1f --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto @@ -0,0 +1,147 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +package jsonpb; + +// Test message for holding primitive types. +message Simple { + optional bool o_bool = 1; + optional int32 o_int32 = 2; + optional int64 o_int64 = 3; + optional uint32 o_uint32 = 4; + optional uint64 o_uint64 = 5; + optional sint32 o_sint32 = 6; + optional sint64 o_sint64 = 7; + optional float o_float = 8; + optional double o_double = 9; + optional string o_string = 10; + optional bytes o_bytes = 11; +} + +// Test message for holding special non-finites primitives. +message NonFinites { + optional float f_nan = 1; + optional float f_pinf = 2; + optional float f_ninf = 3; + optional double d_nan = 4; + optional double d_pinf = 5; + optional double d_ninf = 6; +} + +// Test message for holding repeated primitives. +message Repeats { + repeated bool r_bool = 1; + repeated int32 r_int32 = 2; + repeated int64 r_int64 = 3; + repeated uint32 r_uint32 = 4; + repeated uint64 r_uint64 = 5; + repeated sint32 r_sint32 = 6; + repeated sint64 r_sint64 = 7; + repeated float r_float = 8; + repeated double r_double = 9; + repeated string r_string = 10; + repeated bytes r_bytes = 11; +} + +// Test message for holding enums and nested messages. +message Widget { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color color = 1; + repeated Color r_color = 2; + + optional Simple simple = 10; + repeated Simple r_simple = 11; + + optional Repeats repeats = 20; + repeated Repeats r_repeats = 21; +} + +message Maps { + map m_int64_str = 1; + map m_bool_simple = 2; +} + +message MsgWithOneof { + oneof union { + string title = 1; + int64 salary = 2; + string Country = 3; + string home_address = 4; + } +} + +message Real { + optional double value = 1; + extensions 100 to max; +} + +extend Real { + optional string name = 124; +} + +message Complex { + extend Real { + optional Complex real_extension = 123; + } + optional double imaginary = 1; + extensions 100 to max; +} + +message KnownTypes { + optional google.protobuf.Any an = 14; + optional google.protobuf.Duration dur = 1; + optional google.protobuf.Struct st = 12; + optional google.protobuf.Timestamp ts = 2; + optional google.protobuf.ListValue lv = 15; + optional google.protobuf.Value val = 16; + + optional google.protobuf.DoubleValue dbl = 3; + optional google.protobuf.FloatValue flt = 4; + optional google.protobuf.Int64Value i64 = 5; + optional google.protobuf.UInt64Value u64 = 6; + optional google.protobuf.Int32Value i32 = 7; + optional google.protobuf.UInt32Value u32 = 8; + optional google.protobuf.BoolValue bool = 9; + optional google.protobuf.StringValue str = 10; + optional google.protobuf.BytesValue bytes = 11; +} diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile new file mode 100644 index 0000000..e2e0651 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto + make diff --git a/vendor/github.com/golang/protobuf/proto/all_test.go b/vendor/github.com/golang/protobuf/proto/all_test.go new file mode 100644 index 0000000..41451a4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/all_test.go @@ -0,0 +1,2278 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "math/rand" + "reflect" + "runtime/debug" + "strings" + "testing" + "time" + + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" +) + +var globalO *Buffer + +func old() *Buffer { + if globalO == nil { + globalO = NewBuffer(nil) + } + globalO.Reset() + return globalO +} + +func equalbytes(b1, b2 []byte, t *testing.T) { + if len(b1) != len(b2) { + t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) + return + } + for i := 0; i < len(b1); i++ { + if b1[i] != b2[i] { + t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) + } + } +} + +func initGoTestField() *GoTestField { + f := new(GoTestField) + f.Label = String("label") + f.Type = String("type") + return f +} + +// These are all structurally equivalent but the tag numbers differ. +// (It's remarkable that required, optional, and repeated all have +// 8 letters.) +func initGoTest_RequiredGroup() *GoTest_RequiredGroup { + return &GoTest_RequiredGroup{ + RequiredField: String("required"), + } +} + +func initGoTest_OptionalGroup() *GoTest_OptionalGroup { + return &GoTest_OptionalGroup{ + RequiredField: String("optional"), + } +} + +func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { + return &GoTest_RepeatedGroup{ + RequiredField: String("repeated"), + } +} + +func initGoTest(setdefaults bool) *GoTest { + pb := new(GoTest) + if setdefaults { + pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) + pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) + pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) + pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) + pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) + pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) + pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) + pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) + pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) + pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) + pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted + pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) + pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + } + + pb.Kind = GoTest_TIME.Enum() + pb.RequiredField = initGoTestField() + pb.F_BoolRequired = Bool(true) + pb.F_Int32Required = Int32(3) + pb.F_Int64Required = Int64(6) + pb.F_Fixed32Required = Uint32(32) + pb.F_Fixed64Required = Uint64(64) + pb.F_Uint32Required = Uint32(3232) + pb.F_Uint64Required = Uint64(6464) + pb.F_FloatRequired = Float32(3232) + pb.F_DoubleRequired = Float64(6464) + pb.F_StringRequired = String("string") + pb.F_BytesRequired = []byte("bytes") + pb.F_Sint32Required = Int32(-32) + pb.F_Sint64Required = Int64(-64) + pb.Requiredgroup = initGoTest_RequiredGroup() + + return pb +} + +func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { + data := b.Bytes() + ld := len(data) + ls := len(s) / 2 + + fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) + + // find the interesting spot - n + n := ls + if ld < ls { + n = ld + } + j := 0 + for i := 0; i < n; i++ { + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + if data[i] == bs { + continue + } + n = i + break + } + l := n - 10 + if l < 0 { + l = 0 + } + h := n + 10 + + // find the interesting spot - n + fmt.Printf("is[%d]:", l) + for i := l; i < h; i++ { + if i >= ld { + fmt.Printf(" --") + continue + } + fmt.Printf(" %.2x", data[i]) + } + fmt.Printf("\n") + + fmt.Printf("sb[%d]:", l) + for i := l; i < h; i++ { + if i >= ls { + fmt.Printf(" --") + continue + } + bs := hex(s[j])*16 + hex(s[j+1]) + j += 2 + fmt.Printf(" %.2x", bs) + } + fmt.Printf("\n") + + t.Fail() + + // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) + // Print the output in a partially-decoded format; can + // be helpful when updating the test. It produces the output + // that is pasted, with minor edits, into the argument to verify(). + // data := b.Bytes() + // nesting := 0 + // for b.Len() > 0 { + // start := len(data) - b.Len() + // var u uint64 + // u, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // wire := u & 0x7 + // tag := u >> 3 + // switch wire { + // case WireVarint: + // v, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on varint:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed32: + // v, err := DecodeFixed32(b) + // if err != nil { + // fmt.Printf("decode error on fixed32:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireFixed64: + // v, err := DecodeFixed64(b) + // if err != nil { + // fmt.Printf("decode error on fixed64:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", + // data[start:len(data)-b.Len()], tag, wire, v) + // case WireBytes: + // nb, err := DecodeVarint(b) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // after_tag := len(data) - b.Len() + // str := make([]byte, nb) + // _, err = b.Read(str) + // if err != nil { + // fmt.Printf("decode error on bytes:", err) + // return + // } + // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", + // data[start:after_tag], str, tag, wire) + // case WireStartGroup: + // nesting++ + // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // case WireEndGroup: + // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", + // data[start:len(data)-b.Len()], tag, nesting) + // nesting-- + // default: + // fmt.Printf("unrecognized wire type %d\n", wire) + // return + // } + // } +} + +func hex(c uint8) uint8 { + if '0' <= c && c <= '9' { + return c - '0' + } + if 'a' <= c && c <= 'f' { + return 10 + c - 'a' + } + if 'A' <= c && c <= 'F' { + return 10 + c - 'A' + } + return 0 +} + +func equal(b []byte, s string, t *testing.T) bool { + if 2*len(b) != len(s) { + // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) + fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) + return false + } + for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { + x := hex(s[j])*16 + hex(s[j+1]) + if b[i] != x { + // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) + fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) + return false + } + } + return true +} + +func overify(t *testing.T, pb *GoTest, expected string) { + o := old() + err := o.Marshal(pb) + if err != nil { + fmt.Printf("overify marshal-1 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 1", o.Bytes()) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = o.Unmarshal(pbd) + if err != nil { + t.Fatalf("overify unmarshal err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + o.Reset() + err = o.Marshal(pbd) + if err != nil { + t.Errorf("overify marshal-2 err = %v", err) + o.DebugPrint("", o.Bytes()) + t.Fatalf("string = %s", expected) + } + if !equal(o.Bytes(), expected, t) { + o.DebugPrint("overify neq 2", o.Bytes()) + t.Fatalf("string = %s", expected) + } +} + +// Simple tests for numeric encode/decode primitives (varint, etc.) +func TestNumericPrimitives(t *testing.T) { + for i := uint64(0); i < 1e6; i += 111 { + o := old() + if o.EncodeVarint(i) != nil { + t.Error("EncodeVarint") + break + } + x, e := o.DecodeVarint() + if e != nil { + t.Fatal("DecodeVarint") + } + if x != i { + t.Fatal("varint decode fail:", i, x) + } + + o = old() + if o.EncodeFixed32(i) != nil { + t.Fatal("encFixed32") + } + x, e = o.DecodeFixed32() + if e != nil { + t.Fatal("decFixed32") + } + if x != i { + t.Fatal("fixed32 decode fail:", i, x) + } + + o = old() + if o.EncodeFixed64(i*1234567) != nil { + t.Error("encFixed64") + break + } + x, e = o.DecodeFixed64() + if e != nil { + t.Error("decFixed64") + break + } + if x != i*1234567 { + t.Error("fixed64 decode fail:", i*1234567, x) + break + } + + o = old() + i32 := int32(i - 12345) + if o.EncodeZigzag32(uint64(i32)) != nil { + t.Fatal("EncodeZigzag32") + } + x, e = o.DecodeZigzag32() + if e != nil { + t.Fatal("DecodeZigzag32") + } + if x != uint64(uint32(i32)) { + t.Fatal("zigzag32 decode fail:", i32, x) + } + + o = old() + i64 := int64(i - 12345) + if o.EncodeZigzag64(uint64(i64)) != nil { + t.Fatal("EncodeZigzag64") + } + x, e = o.DecodeZigzag64() + if e != nil { + t.Fatal("DecodeZigzag64") + } + if x != uint64(i64) { + t.Fatal("zigzag64 decode fail:", i64, x) + } + } +} + +// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. +type fakeMarshaler struct { + b []byte + err error +} + +func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } +func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } +func (f *fakeMarshaler) ProtoMessage() {} +func (f *fakeMarshaler) Reset() {} + +type msgWithFakeMarshaler struct { + M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"` +} + +func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) } +func (m *msgWithFakeMarshaler) ProtoMessage() {} +func (m *msgWithFakeMarshaler) Reset() {} + +// Simple tests for proto messages that implement the Marshaler interface. +func TestMarshalerEncoding(t *testing.T) { + tests := []struct { + name string + m Message + want []byte + errType reflect.Type + }{ + { + name: "Marshaler that fails", + m: &fakeMarshaler{ + err: errors.New("some marshal err"), + b: []byte{5, 6, 7}, + }, + // Since the Marshal method returned bytes, they should be written to the + // buffer. (For efficiency, we assume that Marshal implementations are + // always correct w.r.t. RequiredNotSetError and output.) + want: []byte{5, 6, 7}, + errType: reflect.TypeOf(errors.New("some marshal err")), + }, + { + name: "Marshaler that fails with RequiredNotSetError", + m: &msgWithFakeMarshaler{ + M: &fakeMarshaler{ + err: &RequiredNotSetError{}, + b: []byte{5, 6, 7}, + }, + }, + // Since there's an error that can be continued after, + // the buffer should be written. + want: []byte{ + 10, 3, // for &msgWithFakeMarshaler + 5, 6, 7, // for &fakeMarshaler + }, + errType: reflect.TypeOf(&RequiredNotSetError{}), + }, + { + name: "Marshaler that succeeds", + m: &fakeMarshaler{ + b: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + want: []byte{0, 1, 2, 3, 4, 127, 255}, + }, + } + for _, test := range tests { + b := NewBuffer(nil) + err := b.Marshal(test.m) + if reflect.TypeOf(err) != test.errType { + t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType) + } + if !reflect.DeepEqual(test.want, b.Bytes()) { + t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) + } + if size := Size(test.m); size != len(b.Bytes()) { + t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes())) + } + + m, mErr := Marshal(test.m) + if !bytes.Equal(b.Bytes(), m) { + t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes()) + } + if !reflect.DeepEqual(err, mErr) { + t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q", + test.name, fmt.Sprint(mErr), fmt.Sprint(err)) + } + } +} + +// Simple tests for bytes +func TestBytesPrimitives(t *testing.T) { + o := old() + bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} + if o.EncodeRawBytes(bytes) != nil { + t.Error("EncodeRawBytes") + } + decb, e := o.DecodeRawBytes(false) + if e != nil { + t.Error("DecodeRawBytes") + } + equalbytes(bytes, decb, t) +} + +// Simple tests for strings +func TestStringPrimitives(t *testing.T) { + o := old() + s := "now is the time" + if o.EncodeStringBytes(s) != nil { + t.Error("enc_string") + } + decs, e := o.DecodeStringBytes() + if e != nil { + t.Error("dec_string") + } + if s != decs { + t.Error("string encode/decode fail:", s, decs) + } +} + +// Do we catch the "required bit not set" case? +func TestRequiredBit(t *testing.T) { + o := old() + pb := new(GoTest) + err := o.Marshal(pb) + if err == nil { + t.Error("did not catch missing required fields") + } else if strings.Index(err.Error(), "Kind") < 0 { + t.Error("wrong error type:", err) + } +} + +// Check that all fields are nil. +// Clearly silly, and a residue from a more interesting test with an earlier, +// different initialization property, but it once caught a compiler bug so +// it lives. +func checkInitialized(pb *GoTest, t *testing.T) { + if pb.F_BoolDefaulted != nil { + t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) + } + if pb.F_Int32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) + } + if pb.F_Int64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) + } + if pb.F_Fixed32Defaulted != nil { + t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) + } + if pb.F_Fixed64Defaulted != nil { + t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) + } + if pb.F_Uint32Defaulted != nil { + t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) + } + if pb.F_Uint64Defaulted != nil { + t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) + } + if pb.F_FloatDefaulted != nil { + t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) + } + if pb.F_DoubleDefaulted != nil { + t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) + } + if pb.F_StringDefaulted != nil { + t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) + } + if pb.F_BytesDefaulted != nil { + t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) + } + if pb.F_Sint32Defaulted != nil { + t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) + } + if pb.F_Sint64Defaulted != nil { + t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) + } +} + +// Does Reset() reset? +func TestReset(t *testing.T) { + pb := initGoTest(true) + // muck with some values + pb.F_BoolDefaulted = Bool(false) + pb.F_Int32Defaulted = Int32(237) + pb.F_Int64Defaulted = Int64(12346) + pb.F_Fixed32Defaulted = Uint32(32000) + pb.F_Fixed64Defaulted = Uint64(666) + pb.F_Uint32Defaulted = Uint32(323232) + pb.F_Uint64Defaulted = nil + pb.F_FloatDefaulted = nil + pb.F_DoubleDefaulted = Float64(0) + pb.F_StringDefaulted = String("gotcha") + pb.F_BytesDefaulted = []byte("asdfasdf") + pb.F_Sint32Defaulted = Int32(123) + pb.F_Sint64Defaulted = Int64(789) + pb.Reset() + checkInitialized(pb, t) +} + +// All required fields set, no defaults provided. +func TestEncodeDecode1(t *testing.T) { + pb := initGoTest(false) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 0x20 + "714000000000000000"+ // field 14, encoding 1, value 0x40 + "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 + "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" + "b304"+ // field 70, encoding 3, start group + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // field 70, encoding 4, end group + "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f") // field 103, encoding 0, 0x7f zigzag64 +} + +// All required fields set, defaults provided. +func TestEncodeDecode2(t *testing.T) { + pb := initGoTest(true) + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All default fields set to their default value by hand +func TestEncodeDecode3(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolDefaulted = Bool(true) + pb.F_Int32Defaulted = Int32(32) + pb.F_Int64Defaulted = Int64(64) + pb.F_Fixed32Defaulted = Uint32(320) + pb.F_Fixed64Defaulted = Uint64(640) + pb.F_Uint32Defaulted = Uint32(3200) + pb.F_Uint64Defaulted = Uint64(6400) + pb.F_FloatDefaulted = Float32(314159) + pb.F_DoubleDefaulted = Float64(271828) + pb.F_StringDefaulted = String("hello, \"world!\"\n") + pb.F_BytesDefaulted = []byte("Bignose") + pb.F_Sint32Defaulted = Int32(-32) + pb.F_Sint64Defaulted = Int64(-64) + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all non-defaulted optional fields have values. +func TestEncodeDecode4(t *testing.T) { + pb := initGoTest(true) + pb.Table = String("hello") + pb.Param = Int32(7) + pb.OptionalField = initGoTestField() + pb.F_BoolOptional = Bool(true) + pb.F_Int32Optional = Int32(32) + pb.F_Int64Optional = Int64(64) + pb.F_Fixed32Optional = Uint32(3232) + pb.F_Fixed64Optional = Uint64(6464) + pb.F_Uint32Optional = Uint32(323232) + pb.F_Uint64Optional = Uint64(646464) + pb.F_FloatOptional = Float32(32.) + pb.F_DoubleOptional = Float64(64.) + pb.F_StringOptional = String("hello") + pb.F_BytesOptional = []byte("Bignose") + pb.F_Sint32Optional = Int32(-32) + pb.F_Sint64Optional = Int64(-64) + pb.Optionalgroup = initGoTest_OptionalGroup() + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" + "1807"+ // field 3, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "f00101"+ // field 30, encoding 0, value 1 + "f80120"+ // field 31, encoding 0, value 32 + "800240"+ // field 32, encoding 0, value 64 + "8d02a00c0000"+ // field 33, encoding 5, value 3232 + "91024019000000000000"+ // field 34, encoding 1, value 6464 + "9802a0dd13"+ // field 35, encoding 0, value 323232 + "a002c0ba27"+ // field 36, encoding 0, value 646464 + "ad0200000042"+ // field 37, encoding 5, value 32.0 + "b1020000000000005040"+ // field 38, encoding 1, value 64.0 + "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "d305"+ // start group field 90 level 1 + "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" + "d405"+ // end group field 90 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" + "f0123f"+ // field 302, encoding 0, value 63 + "f8127f"+ // field 303, encoding 0, value 127 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestEncodeDecode5(t *testing.T) { + pb := initGoTest(true) + pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} + pb.F_BoolRepeated = []bool{false, true} + pb.F_Int32Repeated = []int32{32, 33} + pb.F_Int64Repeated = []int64{64, 65} + pb.F_Fixed32Repeated = []uint32{3232, 3333} + pb.F_Fixed64Repeated = []uint64{6464, 6565} + pb.F_Uint32Repeated = []uint32{323232, 333333} + pb.F_Uint64Repeated = []uint64{646464, 656565} + pb.F_FloatRepeated = []float32{32., 33.} + pb.F_DoubleRepeated = []float64{64., 65.} + pb.F_StringRepeated = []string{"hello", "sailor"} + pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} + pb.F_Sint32Repeated = []int32{32, -32} + pb.F_Sint64Repeated = []int64{64, -64} + pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "a00100"+ // field 20, encoding 0, value 0 + "a00101"+ // field 20, encoding 0, value 1 + "a80120"+ // field 21, encoding 0, value 32 + "a80121"+ // field 21, encoding 0, value 33 + "b00140"+ // field 22, encoding 0, value 64 + "b00141"+ // field 22, encoding 0, value 65 + "bd01a00c0000"+ // field 23, encoding 5, value 3232 + "bd01050d0000"+ // field 23, encoding 5, value 3333 + "c1014019000000000000"+ // field 24, encoding 1, value 6464 + "c101a519000000000000"+ // field 24, encoding 1, value 6565 + "c801a0dd13"+ // field 25, encoding 0, value 323232 + "c80195ac14"+ // field 25, encoding 0, value 333333 + "d001c0ba27"+ // field 26, encoding 0, value 646464 + "d001b58928"+ // field 26, encoding 0, value 656565 + "dd0100000042"+ // field 27, encoding 5, value 32.0 + "dd0100000442"+ // field 27, encoding 5, value 33.0 + "e1010000000000005040"+ // field 28, encoding 1, value 64.0 + "e1010000000000405040"+ // field 28, encoding 1, value 65.0 + "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" + "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" + "c00201"+ // field 40, encoding 0, value 1 + "c80220"+ // field 41, encoding 0, value 32 + "d00240"+ // field 42, encoding 0, value 64 + "dd0240010000"+ // field 43, encoding 5, value 320 + "e1028002000000000000"+ // field 44, encoding 1, value 640 + "e8028019"+ // field 45, encoding 0, value 3200 + "f0028032"+ // field 46, encoding 0, value 6400 + "fd02e0659948"+ // field 47, encoding 5, value 314159.0 + "81030000000050971041"+ // field 48, encoding 1, value 271828.0 + "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "8305"+ // start group field 80 level 1 + "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" + "8405"+ // end group field 80 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "ca0c03"+"626967"+ // field 201, encoding 2, string "big" + "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" + "d00c40"+ // field 202, encoding 0, value 32 + "d00c3f"+ // field 202, encoding 0, value -32 + "d80c8001"+ // field 203, encoding 0, value 64 + "d80c7f"+ // field 203, encoding 0, value -64 + "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" + "90193f"+ // field 402, encoding 0, value 63 + "98197f") // field 403, encoding 0, value 127 + +} + +// All required fields set, all packed repeated fields given two values. +func TestEncodeDecode6(t *testing.T) { + pb := initGoTest(false) + pb.F_BoolRepeatedPacked = []bool{false, true} + pb.F_Int32RepeatedPacked = []int32{32, 33} + pb.F_Int64RepeatedPacked = []int64{64, 65} + pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} + pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} + pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} + pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} + pb.F_FloatRepeatedPacked = []float32{32., 33.} + pb.F_DoubleRepeatedPacked = []float64{64., 65.} + pb.F_Sint32RepeatedPacked = []int32{32, -32} + pb.F_Sint64RepeatedPacked = []int64{64, -64} + + overify(t, pb, + "0807"+ // field 1, encoding 0, value 7 + "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) + "5001"+ // field 10, encoding 0, value 1 + "5803"+ // field 11, encoding 0, value 3 + "6006"+ // field 12, encoding 0, value 6 + "6d20000000"+ // field 13, encoding 5, value 32 + "714000000000000000"+ // field 14, encoding 1, value 64 + "78a019"+ // field 15, encoding 0, value 3232 + "8001c032"+ // field 16, encoding 0, value 6464 + "8d0100004a45"+ // field 17, encoding 5, value 3232.0 + "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 + "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" + "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 + "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 + "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 + "aa0308"+ // field 53, encoding 2, 8 bytes + "a00c0000050d0000"+ // value 3232, value 3333 + "b20310"+ // field 54, encoding 2, 16 bytes + "4019000000000000a519000000000000"+ // value 6464, value 6565 + "ba0306"+ // field 55, encoding 2, 6 bytes + "a0dd1395ac14"+ // value 323232, value 333333 + "c20306"+ // field 56, encoding 2, 6 bytes + "c0ba27b58928"+ // value 646464, value 656565 + "ca0308"+ // field 57, encoding 2, 8 bytes + "0000004200000442"+ // value 32.0, value 33.0 + "d20310"+ // field 58, encoding 2, 16 bytes + "00000000000050400000000000405040"+ // value 64.0, value 65.0 + "b304"+ // start group field 70 level 1 + "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" + "b404"+ // end group field 70 level 1 + "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" + "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "b21f02"+ // field 502, encoding 2, 2 bytes + "403f"+ // value 32, value -32 + "ba1f03"+ // field 503, encoding 2, 3 bytes + "80017f") // value 64, value -64 +} + +// Test that we can encode empty bytes fields. +func TestEncodeDecodeBytes1(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRequired = []byte{} + pb.F_BytesRepeated = [][]byte{{}} + pb.F_BytesOptional = []byte{} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { + t.Error("required empty bytes field is incorrect") + } + if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { + t.Error("repeated empty bytes field is incorrect") + } + if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { + t.Error("optional empty bytes field is incorrect") + } +} + +// Test that we encode nil-valued fields of a repeated bytes field correctly. +// Since entries in a repeated field cannot be nil, nil must mean empty value. +func TestEncodeDecodeBytes2(t *testing.T) { + pb := initGoTest(false) + + // Create our bytes + pb.F_BytesRepeated = [][]byte{nil} + + d, err := Marshal(pb) + if err != nil { + t.Error(err) + } + + pbd := new(GoTest) + if err := Unmarshal(d, pbd); err != nil { + t.Error(err) + } + + if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { + t.Error("Unexpected value for repeated bytes field") + } +} + +// All required fields set, defaults provided, all repeated fields given two values. +func TestSkippingUnrecognizedFields(t *testing.T) { + o := old() + pb := initGoTestField() + + // Marshal it normally. + o.Marshal(pb) + + // Now new a GoSkipTest record. + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + // Marshal it into same buffer. + o.Marshal(skip) + + pbd := new(GoTestField) + o.Unmarshal(pbd) + + // The __unrecognized field should be a marshaling of GoSkipTest + skipd := new(GoSkipTest) + + o.SetBuf(pbd.XXX_unrecognized) + o.Unmarshal(skipd) + + if *skipd.SkipInt32 != *skip.SkipInt32 { + t.Error("skip int32", skipd.SkipInt32) + } + if *skipd.SkipFixed32 != *skip.SkipFixed32 { + t.Error("skip fixed32", skipd.SkipFixed32) + } + if *skipd.SkipFixed64 != *skip.SkipFixed64 { + t.Error("skip fixed64", skipd.SkipFixed64) + } + if *skipd.SkipString != *skip.SkipString { + t.Error("skip string", *skipd.SkipString) + } + if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { + t.Error("skip group int32", skipd.Skipgroup.GroupInt32) + } + if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { + t.Error("skip group string", *skipd.Skipgroup.GroupString) + } +} + +// Check that unrecognized fields of a submessage are preserved. +func TestSubmessageUnrecognizedFields(t *testing.T) { + nm := &NewMessage{ + Nested: &NewMessage_Nested{ + Name: String("Nigel"), + FoodGroup: String("carbs"), + }, + } + b, err := Marshal(nm) + if err != nil { + t.Fatalf("Marshal of NewMessage: %v", err) + } + + // Unmarshal into an OldMessage. + om := new(OldMessage) + if err := Unmarshal(b, om); err != nil { + t.Fatalf("Unmarshal to OldMessage: %v", err) + } + exp := &OldMessage{ + Nested: &OldMessage_Nested{ + Name: String("Nigel"), + // normal protocol buffer users should not do this + XXX_unrecognized: []byte("\x12\x05carbs"), + }, + } + if !Equal(om, exp) { + t.Errorf("om = %v, want %v", om, exp) + } + + // Clone the OldMessage. + om = Clone(om).(*OldMessage) + if !Equal(om, exp) { + t.Errorf("Clone(om) = %v, want %v", om, exp) + } + + // Marshal the OldMessage, then unmarshal it into an empty NewMessage. + if b, err = Marshal(om); err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + t.Logf("Marshal(%v) -> %q", om, b) + nm2 := new(NewMessage) + if err := Unmarshal(b, nm2); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + if !Equal(nm, nm2) { + t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) + } +} + +// Check that an int32 field can be upgraded to an int64 field. +func TestNegativeInt32(t *testing.T) { + om := &OldMessage{ + Num: Int32(-1), + } + b, err := Marshal(om) + if err != nil { + t.Fatalf("Marshal of OldMessage: %v", err) + } + + // Check the size. It should be 11 bytes; + // 1 for the field/wire type, and 10 for the negative number. + if len(b) != 11 { + t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) + } + + // Unmarshal into a NewMessage. + nm := new(NewMessage) + if err := Unmarshal(b, nm); err != nil { + t.Fatalf("Unmarshal to NewMessage: %v", err) + } + want := &NewMessage{ + Num: Int64(-1), + } + if !Equal(nm, want) { + t.Errorf("nm = %v, want %v", nm, want) + } +} + +// Check that we can grow an array (repeated field) to have many elements. +// This test doesn't depend only on our encoding; for variety, it makes sure +// we create, encode, and decode the correct contents explicitly. It's therefore +// a bit messier. +// This test also uses (and hence tests) the Marshal/Unmarshal functions +// instead of the methods. +func TestBigRepeated(t *testing.T) { + pb := initGoTest(true) + + // Create the arrays + const N = 50 // Internally the library starts much smaller. + pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) + pb.F_Sint64Repeated = make([]int64, N) + pb.F_Sint32Repeated = make([]int32, N) + pb.F_BytesRepeated = make([][]byte, N) + pb.F_StringRepeated = make([]string, N) + pb.F_DoubleRepeated = make([]float64, N) + pb.F_FloatRepeated = make([]float32, N) + pb.F_Uint64Repeated = make([]uint64, N) + pb.F_Uint32Repeated = make([]uint32, N) + pb.F_Fixed64Repeated = make([]uint64, N) + pb.F_Fixed32Repeated = make([]uint32, N) + pb.F_Int64Repeated = make([]int64, N) + pb.F_Int32Repeated = make([]int32, N) + pb.F_BoolRepeated = make([]bool, N) + pb.RepeatedField = make([]*GoTestField, N) + + // Fill in the arrays with checkable values. + igtf := initGoTestField() + igtrg := initGoTest_RepeatedGroup() + for i := 0; i < N; i++ { + pb.Repeatedgroup[i] = igtrg + pb.F_Sint64Repeated[i] = int64(i) + pb.F_Sint32Repeated[i] = int32(i) + s := fmt.Sprint(i) + pb.F_BytesRepeated[i] = []byte(s) + pb.F_StringRepeated[i] = s + pb.F_DoubleRepeated[i] = float64(i) + pb.F_FloatRepeated[i] = float32(i) + pb.F_Uint64Repeated[i] = uint64(i) + pb.F_Uint32Repeated[i] = uint32(i) + pb.F_Fixed64Repeated[i] = uint64(i) + pb.F_Fixed32Repeated[i] = uint32(i) + pb.F_Int64Repeated[i] = int64(i) + pb.F_Int32Repeated[i] = int32(i) + pb.F_BoolRepeated[i] = i%2 == 0 + pb.RepeatedField[i] = igtf + } + + // Marshal. + buf, _ := Marshal(pb) + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + Unmarshal(buf, pbd) + + // Check the checkable values + for i := uint64(0); i < N; i++ { + if pbd.Repeatedgroup[i] == nil { // TODO: more checking? + t.Error("pbd.Repeatedgroup bad") + } + var x uint64 + x = uint64(pbd.F_Sint64Repeated[i]) + if x != i { + t.Error("pbd.F_Sint64Repeated bad", x, i) + } + x = uint64(pbd.F_Sint32Repeated[i]) + if x != i { + t.Error("pbd.F_Sint32Repeated bad", x, i) + } + s := fmt.Sprint(i) + equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) + if pbd.F_StringRepeated[i] != s { + t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) + } + x = uint64(pbd.F_DoubleRepeated[i]) + if x != i { + t.Error("pbd.F_DoubleRepeated bad", x, i) + } + x = uint64(pbd.F_FloatRepeated[i]) + if x != i { + t.Error("pbd.F_FloatRepeated bad", x, i) + } + x = pbd.F_Uint64Repeated[i] + if x != i { + t.Error("pbd.F_Uint64Repeated bad", x, i) + } + x = uint64(pbd.F_Uint32Repeated[i]) + if x != i { + t.Error("pbd.F_Uint32Repeated bad", x, i) + } + x = pbd.F_Fixed64Repeated[i] + if x != i { + t.Error("pbd.F_Fixed64Repeated bad", x, i) + } + x = uint64(pbd.F_Fixed32Repeated[i]) + if x != i { + t.Error("pbd.F_Fixed32Repeated bad", x, i) + } + x = uint64(pbd.F_Int64Repeated[i]) + if x != i { + t.Error("pbd.F_Int64Repeated bad", x, i) + } + x = uint64(pbd.F_Int32Repeated[i]) + if x != i { + t.Error("pbd.F_Int32Repeated bad", x, i) + } + if pbd.F_BoolRepeated[i] != (i%2 == 0) { + t.Error("pbd.F_BoolRepeated bad", x, i) + } + if pbd.RepeatedField[i] == nil { // TODO: more checking? + t.Error("pbd.RepeatedField bad") + } + } +} + +// Verify we give a useful message when decoding to the wrong structure type. +func TestTypeMismatch(t *testing.T) { + pb1 := initGoTest(true) + + // Marshal + o := old() + o.Marshal(pb1) + + // Now Unmarshal it to the wrong type. + pb2 := initGoTestField() + err := o.Unmarshal(pb2) + if err == nil { + t.Error("expected error, got no error") + } else if !strings.Contains(err.Error(), "bad wiretype") { + t.Error("expected bad wiretype error, got", err) + } +} + +func encodeDecode(t *testing.T, in, out Message, msg string) { + buf, err := Marshal(in) + if err != nil { + t.Fatalf("failed marshaling %v: %v", msg, err) + } + if err := Unmarshal(buf, out); err != nil { + t.Fatalf("failed unmarshaling %v: %v", msg, err) + } +} + +func TestPackedNonPackedDecoderSwitching(t *testing.T) { + np, p := new(NonPackedTest), new(PackedTest) + + // non-packed -> packed + np.A = []int32{0, 1, 1, 2, 3, 5} + encodeDecode(t, np, p, "non-packed -> packed") + if !reflect.DeepEqual(np.A, p.B) { + t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) + } + + // packed -> non-packed + np.Reset() + p.B = []int32{3, 1, 4, 1, 5, 9} + encodeDecode(t, p, np, "packed -> non-packed") + if !reflect.DeepEqual(p.B, np.A) { + t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) + } +} + +func TestProto1RepeatedGroup(t *testing.T) { + pb := &MessageList{ + Message: []*MessageList_Message{ + { + Name: String("blah"), + Count: Int32(7), + }, + // NOTE: pb.Message[1] is a nil + nil, + }, + } + + o := old() + err := o.Marshal(pb) + if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { + t.Fatalf("unexpected or no error when marshaling: %v", err) + } +} + +// Test that enums work. Checks for a bug introduced by making enums +// named types instead of int32: newInt32FromUint64 would crash with +// a type mismatch in reflect.PointTo. +func TestEnum(t *testing.T) { + pb := new(GoEnum) + pb.Foo = FOO_FOO1.Enum() + o := old() + if err := o.Marshal(pb); err != nil { + t.Fatal("error encoding enum:", err) + } + pb1 := new(GoEnum) + if err := o.Unmarshal(pb1); err != nil { + t.Fatal("error decoding enum:", err) + } + if *pb1.Foo != FOO_FOO1 { + t.Error("expected 7 but got ", *pb1.Foo) + } +} + +// Enum types have String methods. Check that enum fields can be printed. +// We don't care what the value actually is, just as long as it doesn't crash. +func TestPrintingNilEnumFields(t *testing.T) { + pb := new(GoEnum) + _ = fmt.Sprintf("%+v", pb) +} + +// Verify that absent required fields cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcement(t *testing.T) { + pb := new(GoTestField) + _, err := Marshal(pb) + if err == nil { + t.Error("marshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") { + t.Errorf("marshal: bad error type: %v", err) + } + + // A slightly sneaky, yet valid, proto. It encodes the same required field twice, + // so simply counting the required fields is insufficient. + // field 1, encoding 2, value "hi" + buf := []byte("\x0A\x02hi\x0A\x02hi") + err = Unmarshal(buf, pb) + if err == nil { + t.Error("unmarshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors. +func TestRequiredFieldEnforcementGroups(t *testing.T) { + pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}} + if _, err := Marshal(pb); err == nil { + t.Error("marshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") { + t.Errorf("marshal: bad error type: %v", err) + } + + buf := []byte{11, 12} + if err := Unmarshal(buf, pb); err == nil { + t.Error("unmarshal: expected error, got nil") + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") { + t.Errorf("unmarshal: bad error type: %v", err) + } +} + +func TestTypedNilMarshal(t *testing.T) { + // A typed nil should return ErrNil and not crash. + { + var m *GoEnum + if _, err := Marshal(m); err != ErrNil { + t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err) + } + } + + { + m := &Communique{Union: &Communique_Msg{nil}} + if _, err := Marshal(m); err == nil || err == ErrNil { + t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err) + } + } +} + +// A type that implements the Marshaler interface, but is not nillable. +type nonNillableInt uint64 + +func (nni nonNillableInt) Marshal() ([]byte, error) { + return EncodeVarint(uint64(nni)), nil +} + +type NNIMessage struct { + nni nonNillableInt +} + +func (*NNIMessage) Reset() {} +func (*NNIMessage) String() string { return "" } +func (*NNIMessage) ProtoMessage() {} + +// A type that implements the Marshaler interface and is nillable. +type nillableMessage struct { + x uint64 +} + +func (nm *nillableMessage) Marshal() ([]byte, error) { + return EncodeVarint(nm.x), nil +} + +type NMMessage struct { + nm *nillableMessage +} + +func (*NMMessage) Reset() {} +func (*NMMessage) String() string { return "" } +func (*NMMessage) ProtoMessage() {} + +// Verify a type that uses the Marshaler interface, but has a nil pointer. +func TestNilMarshaler(t *testing.T) { + // Try a struct with a Marshaler field that is nil. + // It should be directly marshable. + nmm := new(NMMessage) + if _, err := Marshal(nmm); err != nil { + t.Error("unexpected error marshaling nmm: ", err) + } + + // Try a struct with a Marshaler field that is not nillable. + nnim := new(NNIMessage) + nnim.nni = 7 + var _ Marshaler = nnim.nni // verify it is truly a Marshaler + if _, err := Marshal(nnim); err != nil { + t.Error("unexpected error marshaling nnim: ", err) + } +} + +func TestAllSetDefaults(t *testing.T) { + // Exercise SetDefaults with all scalar field types. + m := &Defaults{ + // NaN != NaN, so override that here. + F_Nan: Float32(1.7), + } + expected := &Defaults{ + F_Bool: Bool(true), + F_Int32: Int32(32), + F_Int64: Int64(64), + F_Fixed32: Uint32(320), + F_Fixed64: Uint64(640), + F_Uint32: Uint32(3200), + F_Uint64: Uint64(6400), + F_Float: Float32(314159), + F_Double: Float64(271828), + F_String: String(`hello, "world!"` + "\n"), + F_Bytes: []byte("Bignose"), + F_Sint32: Int32(-32), + F_Sint64: Int64(-64), + F_Enum: Defaults_GREEN.Enum(), + F_Pinf: Float32(float32(math.Inf(1))), + F_Ninf: Float32(float32(math.Inf(-1))), + F_Nan: Float32(1.7), + StrZero: String(""), + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithSetField(t *testing.T) { + // Check that a set value is not overridden. + m := &Defaults{ + F_Int32: Int32(12), + } + SetDefaults(m) + if v := m.GetF_Int32(); v != 12 { + t.Errorf("m.FInt32 = %v, want 12", v) + } +} + +func TestSetDefaultsWithSubMessage(t *testing.T) { + m := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + }, + } + expected := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("gopher"), + Port: Int32(4000), + }, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { + m := &MyMessage{ + RepInner: []*InnerMessage{{}}, + } + expected := &MyMessage{ + RepInner: []*InnerMessage{{ + Port: Int32(4000), + }}, + } + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { + m := &MyMessage{ + Pet: []string{"turtle", "wombat"}, + } + expected := Clone(m) + SetDefaults(m) + if !Equal(m, expected) { + t.Errorf("\n got %v\nwant %v", m, expected) + } +} + +func TestMaximumTagNumber(t *testing.T) { + m := &MaxTag{ + LastField: String("natural goat essence"), + } + buf, err := Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal failed: %v", err) + } + m2 := new(MaxTag) + if err := Unmarshal(buf, m2); err != nil { + t.Fatalf("proto.Unmarshal failed: %v", err) + } + if got, want := m2.GetLastField(), *m.LastField; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestJSON(t *testing.T) { + m := &MyMessage{ + Count: Int32(4), + Pet: []string{"bunny", "kitty"}, + Inner: &InnerMessage{ + Host: String("cauchy"), + }, + Bikeshed: MyMessage_GREEN.Enum(), + } + const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` + + b, err := json.Marshal(m) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + s := string(b) + if s != expected { + t.Errorf("got %s\nwant %s", s, expected) + } + + received := new(MyMessage) + if err := json.Unmarshal(b, received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } + + // Test unmarshalling of JSON with symbolic enum name. + const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` + received.Reset() + if err := json.Unmarshal([]byte(old), received); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !Equal(received, m) { + t.Fatalf("got %s, want %s", received, m) + } +} + +func TestBadWireType(t *testing.T) { + b := []byte{7<<3 | 6} // field 7, wire type 6 + pb := new(OtherMessage) + if err := Unmarshal(b, pb); err == nil { + t.Errorf("Unmarshal did not fail") + } else if !strings.Contains(err.Error(), "unknown wire type") { + t.Errorf("wrong error: %v", err) + } +} + +func TestBytesWithInvalidLength(t *testing.T) { + // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. + b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} + Unmarshal(b, new(MyMessage)) +} + +func TestLengthOverflow(t *testing.T) { + // Overflowing a length should not panic. + b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} + Unmarshal(b, new(MyMessage)) +} + +func TestVarintOverflow(t *testing.T) { + // Overflowing a 64-bit length should not be allowed. + b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} + if err := Unmarshal(b, new(MyMessage)); err == nil { + t.Fatalf("Overflowed uint64 length without error") + } +} + +func TestUnmarshalFuzz(t *testing.T) { + const N = 1000 + seed := time.Now().UnixNano() + t.Logf("RNG seed is %d", seed) + rng := rand.New(rand.NewSource(seed)) + buf := make([]byte, 20) + for i := 0; i < N; i++ { + for j := range buf { + buf[j] = byte(rng.Intn(256)) + } + fuzzUnmarshal(t, buf) + } +} + +func TestMergeMessages(t *testing.T) { + pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} + data, err := Marshal(pb) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + pb1 := new(MessageList) + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("first Unmarshal: %v", err) + } + if err := Unmarshal(data, pb1); err != nil { + t.Fatalf("second Unmarshal: %v", err) + } + if len(pb1.Message) != 1 { + t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) + } + + pb2 := new(MessageList) + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("first UnmarshalMerge: %v", err) + } + if err := UnmarshalMerge(data, pb2); err != nil { + t.Fatalf("second UnmarshalMerge: %v", err) + } + if len(pb2.Message) != 2 { + t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) + } +} + +func TestExtensionMarshalOrder(t *testing.T) { + m := &MyMessage{Count: Int(123)} + if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { + t.Fatalf("SetExtension: %v", err) + } + if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { + t.Fatalf("SetExtension: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + var orig []byte + for i := 0; i < 100; i++ { + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if i == 0 { + orig = b + continue + } + if !bytes.Equal(b, orig) { + t.Errorf("Bytes differ on attempt #%d", i) + } + } +} + +// Many extensions, because small maps might not iterate differently on each iteration. +var exts = []*ExtensionDesc{ + E_X201, + E_X202, + E_X203, + E_X204, + E_X205, + E_X206, + E_X207, + E_X208, + E_X209, + E_X210, + E_X211, + E_X212, + E_X213, + E_X214, + E_X215, + E_X216, + E_X217, + E_X218, + E_X219, + E_X220, + E_X221, + E_X222, + E_X223, + E_X224, + E_X225, + E_X226, + E_X227, + E_X228, + E_X229, + E_X230, + E_X231, + E_X232, + E_X233, + E_X234, + E_X235, + E_X236, + E_X237, + E_X238, + E_X239, + E_X240, + E_X241, + E_X242, + E_X243, + E_X244, + E_X245, + E_X246, + E_X247, + E_X248, + E_X249, + E_X250, +} + +func TestMessageSetMarshalOrder(t *testing.T) { + m := &MyMessageSet{} + for _, x := range exts { + if err := SetExtension(m, x, &Empty{}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + } + + buf, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // Serialize m several times, and check we get the same bytes each time. + for i := 0; i < 10; i++ { + b1, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(b1, buf) { + t.Errorf("Bytes differ on re-Marshal #%d", i) + } + + m2 := &MyMessageSet{} + if err := Unmarshal(buf, m2); err != nil { + t.Errorf("Unmarshal: %v", err) + } + b2, err := Marshal(m2) + if err != nil { + t.Errorf("re-Marshal: %v", err) + } + if !bytes.Equal(b2, buf) { + t.Errorf("Bytes differ on round-trip #%d", i) + } + } +} + +func TestUnmarshalMergesMessages(t *testing.T) { + // If a nested message occurs twice in the input, + // the fields should be merged when decoding. + a := &OtherMessage{ + Key: Int64(123), + Inner: &InnerMessage{ + Host: String("polhode"), + Port: Int32(1234), + }, + } + aData, err := Marshal(a) + if err != nil { + t.Fatalf("Marshal(a): %v", err) + } + b := &OtherMessage{ + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Connected: Bool(true), + }, + } + bData, err := Marshal(b) + if err != nil { + t.Fatalf("Marshal(b): %v", err) + } + want := &OtherMessage{ + Key: Int64(123), + Weight: Float32(1.2), + Inner: &InnerMessage{ + Host: String("herpolhode"), + Port: Int32(1234), + Connected: Bool(true), + }, + } + got := new(OtherMessage) + if err := Unmarshal(append(aData, bData...), got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !Equal(got, want) { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + +func TestEncodingSizes(t *testing.T) { + tests := []struct { + m Message + n int + }{ + {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, + {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, + {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, + {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, + } + for _, test := range tests { + b, err := Marshal(test.m) + if err != nil { + t.Errorf("Marshal(%v): %v", test.m, err) + continue + } + if len(b) != test.n { + t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) + } + } +} + +func TestRequiredNotSetError(t *testing.T) { + pb := initGoTest(false) + pb.RequiredField.Label = nil + pb.F_Int32Required = nil + pb.F_Int64Required = nil + + expected := "0807" + // field 1, encoding 0, value 7 + "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) + "5001" + // field 10, encoding 0, value 1 + "6d20000000" + // field 13, encoding 5, value 0x20 + "714000000000000000" + // field 14, encoding 1, value 0x40 + "78a019" + // field 15, encoding 0, value 0xca0 = 3232 + "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 + "8d0100004a45" + // field 17, encoding 5, value 3232.0 + "9101000000000040b940" + // field 18, encoding 1, value 6464.0 + "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" + "b304" + // field 70, encoding 3, start group + "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" + "b404" + // field 70, encoding 4, end group + "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" + "b0063f" + // field 102, encoding 0, 0x3f zigzag32 + "b8067f" // field 103, encoding 0, 0x7f zigzag64 + + o := old() + bytes, err := Marshal(pb) + if _, ok := err.(*RequiredNotSetError); !ok { + fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("expected = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-1 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 1", bytes) + t.Fatalf("expected = %s", expected) + } + + // Now test Unmarshal by recreating the original buffer. + pbd := new(GoTest) + err = Unmarshal(bytes, pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { + t.Errorf("unmarshal wrong err msg: %v", err) + } + bytes, err = Marshal(pbd) + if _, ok := err.(*RequiredNotSetError); !ok { + t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) + o.DebugPrint("", bytes) + t.Fatalf("string = %s", expected) + } + if strings.Index(err.Error(), "RequiredField.Label") < 0 { + t.Errorf("marshal-2 wrong err msg: %v", err) + } + if !equal(bytes, expected, t) { + o.DebugPrint("neq 2", bytes) + t.Fatalf("string = %s", expected) + } +} + +func fuzzUnmarshal(t *testing.T, data []byte) { + defer func() { + if e := recover(); e != nil { + t.Errorf("These bytes caused a panic: %+v", data) + t.Logf("Stack:\n%s", debug.Stack()) + t.FailNow() + } + }() + + pb := new(MyMessage) + Unmarshal(data, pb) +} + +func TestMapFieldMarshal(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + // b should be the concatenation of these three byte sequences in some order. + parts := []string{ + "\n\a\b\x01\x12\x03Rob", + "\n\a\b\x04\x12\x03Ian", + "\n\b\b\x08\x12\x04Dave", + } + ok := false + for i := range parts { + for j := range parts { + if j == i { + continue + } + for k := range parts { + if k == i || k == j { + continue + } + try := parts[i] + parts[j] + parts[k] + if bytes.Equal(b, []byte(try)) { + ok = true + break + } + } + } + } + if !ok { + t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) + } + t.Logf("FYI b: %q", b) + + (new(Buffer)).DebugPrint("Dump of b", b) +} + +func TestMapFieldRoundTrips(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + MsgMapping: map[int64]*FloatingPoint{ + 0x7001: &FloatingPoint{F: Float64(2.0)}, + }, + ByteMapping: map[bool][]byte{ + false: []byte("that's not right!"), + true: []byte("aye, 'tis true!"), + }, + } + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("FYI b: %q", b) + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + for _, pair := range [][2]interface{}{ + {m.NameMapping, m2.NameMapping}, + {m.MsgMapping, m2.MsgMapping}, + {m.ByteMapping, m2.ByteMapping}, + } { + if !reflect.DeepEqual(pair[0], pair[1]) { + t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) + } + } +} + +func TestMapFieldWithNil(t *testing.T) { + m1 := &MessageWithMap{ + MsgMapping: map[int64]*FloatingPoint{ + 1: nil, + }, + } + b, err := Marshal(m1) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) + } + if v, ok := m2.MsgMapping[1]; !ok { + t.Error("msg_mapping[1] not present") + } else if v != nil { + t.Errorf("msg_mapping[1] not nil: %v", v) + } +} + +func TestMapFieldWithNilBytes(t *testing.T) { + m1 := &MessageWithMap{ + ByteMapping: map[bool][]byte{ + false: []byte{}, + true: nil, + }, + } + n := Size(m1) + b, err := Marshal(m1) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if n != len(b) { + t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b)) + } + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) + } + if v, ok := m2.ByteMapping[false]; !ok { + t.Error("byte_mapping[false] not present") + } else if len(v) != 0 { + t.Errorf("byte_mapping[false] not empty: %#v", v) + } + if v, ok := m2.ByteMapping[true]; !ok { + t.Error("byte_mapping[true] not present") + } else if len(v) != 0 { + t.Errorf("byte_mapping[true] not empty: %#v", v) + } +} + +func TestDecodeMapFieldMissingKey(t *testing.T) { + b := []byte{ + 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes + // no key + 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m" + } + got := &MessageWithMap{} + err := Unmarshal(b, got) + if err != nil { + t.Fatalf("failed to marshal map with missing key: %v", err) + } + want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}} + if !Equal(got, want) { + t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want) + } +} + +func TestDecodeMapFieldMissingValue(t *testing.T) { + b := []byte{ + 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes + 0x08, 0x01, // varint key, value 1 + // no value + } + got := &MessageWithMap{} + err := Unmarshal(b, got) + if err != nil { + t.Fatalf("failed to marshal map with missing value: %v", err) + } + want := &MessageWithMap{NameMapping: map[int32]string{1: ""}} + if !Equal(got, want) { + t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want) + } +} + +func TestOneof(t *testing.T) { + m := &Communique{} + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal of empty message with oneof: %v", err) + } + if len(b) != 0 { + t.Errorf("Marshal of empty message yielded too many bytes: %v", b) + } + + m = &Communique{ + Union: &Communique_Name{"Barry"}, + } + + // Round-trip. + b, err = Marshal(m) + if err != nil { + t.Fatalf("Marshal of message with oneof: %v", err) + } + if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5) + t.Errorf("Incorrect marshal of message with oneof: %v", b) + } + m.Reset() + if err := Unmarshal(b, m); err != nil { + t.Fatalf("Unmarshal of message with oneof: %v", err) + } + if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" { + t.Errorf("After round trip, Union = %+v", m.Union) + } + if name := m.GetName(); name != "Barry" { + t.Errorf("After round trip, GetName = %q, want %q", name, "Barry") + } + + // Let's try with a message in the oneof. + m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}} + b, err = Marshal(m) + if err != nil { + t.Fatalf("Marshal of message with oneof set to message: %v", err) + } + if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16) + t.Errorf("Incorrect marshal of message with oneof set to message: %v", b) + } + m.Reset() + if err := Unmarshal(b, m); err != nil { + t.Fatalf("Unmarshal of message with oneof set to message: %v", err) + } + ss, ok := m.Union.(*Communique_Msg) + if !ok || ss.Msg.GetStringField() != "deep deep string" { + t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union) + } +} + +func TestInefficientPackedBool(t *testing.T) { + // https://github.com/golang/protobuf/issues/76 + inp := []byte{ + 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes + // Usually a bool should take a single byte, + // but it is permitted to be any varint. + 0xb9, 0x30, + } + if err := Unmarshal(inp, new(MoreRepeated)); err != nil { + t.Error(err) + } +} + +// Benchmarks + +func testMsg() *GoTest { + pb := initGoTest(true) + const N = 1000 // Internally the library starts much smaller. + pb.F_Int32Repeated = make([]int32, N) + pb.F_DoubleRepeated = make([]float64, N) + for i := 0; i < N; i++ { + pb.F_Int32Repeated[i] = int32(i) + pb.F_DoubleRepeated[i] = float64(i) + } + return pb +} + +func bytesMsg() *GoTest { + pb := initGoTest(true) + buf := make([]byte, 4000) + for i := range buf { + buf[i] = byte(i) + } + pb.F_BytesDefaulted = buf + return pb +} + +func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { + d, _ := marshal(pb) + b.SetBytes(int64(len(d))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + marshal(pb) + } +} + +func benchmarkBufferMarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + p.Reset() + err := p.Marshal(pb0) + return p.Bytes(), err + }) +} + +func benchmarkSize(b *testing.B, pb Message) { + benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { + Size(pb) + return nil, nil + }) +} + +func newOf(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + return reflect.New(in.Type().Elem()).Interface().(Message) +} + +func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { + d, _ := Marshal(pb) + b.SetBytes(int64(len(d))) + pbd := newOf(pb) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + unmarshal(d, pbd) + } +} + +func benchmarkBufferUnmarshal(b *testing.B, pb Message) { + p := NewBuffer(nil) + benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { + p.SetBuf(d) + return p.Unmarshal(pb0) + }) +} + +// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} + +func BenchmarkMarshal(b *testing.B) { + benchmarkMarshal(b, testMsg(), Marshal) +} + +func BenchmarkBufferMarshal(b *testing.B) { + benchmarkBufferMarshal(b, testMsg()) +} + +func BenchmarkSize(b *testing.B) { + benchmarkSize(b, testMsg()) +} + +func BenchmarkUnmarshal(b *testing.B) { + benchmarkUnmarshal(b, testMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshal(b *testing.B) { + benchmarkBufferUnmarshal(b, testMsg()) +} + +func BenchmarkMarshalBytes(b *testing.B) { + benchmarkMarshal(b, bytesMsg(), Marshal) +} + +func BenchmarkBufferMarshalBytes(b *testing.B) { + benchmarkBufferMarshal(b, bytesMsg()) +} + +func BenchmarkSizeBytes(b *testing.B) { + benchmarkSize(b, bytesMsg()) +} + +func BenchmarkUnmarshalBytes(b *testing.B) { + benchmarkUnmarshal(b, bytesMsg(), Unmarshal) +} + +func BenchmarkBufferUnmarshalBytes(b *testing.B) { + benchmarkBufferUnmarshal(b, bytesMsg()) +} + +func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { + b.StopTimer() + pb := initGoTestField() + skip := &GoSkipTest{ + SkipInt32: Int32(32), + SkipFixed32: Uint32(3232), + SkipFixed64: Uint64(6464), + SkipString: String("skipper"), + Skipgroup: &GoSkipTest_SkipGroup{ + GroupInt32: Int32(75), + GroupString: String("wxyz"), + }, + } + + pbd := new(GoTestField) + p := NewBuffer(nil) + p.Marshal(pb) + p.Marshal(skip) + p2 := NewBuffer(nil) + + b.StartTimer() + for i := 0; i < b.N; i++ { + p2.SetBuf(p.Bytes()) + p2.Unmarshal(pbd) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/any_test.go b/vendor/github.com/golang/protobuf/proto/any_test.go new file mode 100644 index 0000000..1a3c22e --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/any_test.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + pb "github.com/golang/protobuf/proto/proto3_proto" + testpb "github.com/golang/protobuf/proto/testdata" + anypb "github.com/golang/protobuf/ptypes/any" +) + +var ( + expandedMarshaler = proto.TextMarshaler{ExpandAny: true} + expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true} +) + +// anyEqual reports whether two messages which may be google.protobuf.Any or may +// contain google.protobuf.Any fields are equal. We can't use proto.Equal for +// comparison, because semantically equivalent messages may be marshaled to +// binary in different tag order. Instead, trust that TextMarshaler with +// ExpandAny option works and compare the text marshaling results. +func anyEqual(got, want proto.Message) bool { + // if messages are proto.Equal, no need to marshal. + if proto.Equal(got, want) { + return true + } + g := expandedMarshaler.Text(got) + w := expandedMarshaler.Text(want) + return g == w +} + +type golden struct { + m proto.Message + t, c string +} + +var goldenMessages = makeGolden() + +func makeGolden() []golden { + nested := &pb.Nested{Bunny: "Monty"} + nb, err := proto.Marshal(nested) + if err != nil { + panic(err) + } + m1 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m2 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m3 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb}, + } + m4 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb}, + } + m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb} + + any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} + proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")}) + proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar")) + any1b, err := proto.Marshal(any1) + if err != nil { + panic(err) + } + any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}} + proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")}) + any2b, err := proto.Marshal(any2) + if err != nil { + panic(err) + } + m6 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + ManyThings: []*anypb.Any{ + &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b}, + &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + }, + } + + const ( + m1Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m2Golden = ` +name: "David" +result_count: 47 +anything: < + ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m3Golden = ` +name: "David" +result_count: 47 +anything: < + ["type.googleapis.com/\"/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m4Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m5Golden = ` +[type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" +> +` + m6Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 42 + bikeshed: GREEN + rep_bytes: "roboto" + [testdata.Ext.more]: < + data: "baz" + > + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +` + ) + return []golden{ + {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "}, + {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "}, + {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "}, + {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "}, + {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "}, + {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "}, + } +} + +func TestMarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want { + t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want) + } + if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want { + t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want) + } + } +} + +func TestUnmarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + want := tt.m + got := proto.Clone(tt.m) + got.Reset() + if err := proto.UnmarshalText(tt.t, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want) + } + got.Reset() + if err := proto.UnmarshalText(tt.c, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want) + } + } +} + +func TestMarshalUnknownAny(t *testing.T) { + m := &pb.Message{ + Anything: &anypb.Any{ + TypeUrl: "foo", + Value: []byte("bar"), + }, + } + want := `anything: < + type_url: "foo" + value: "bar" +> +` + got := expandedMarshaler.Text(m) + if got != want { + t.Errorf("got\n`%s`\nwant\n`%s`", got, want) + } +} + +func TestAmbiguousAny(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + type_url: "ttt/proto3_proto.Nested" + value: "\n\x05Monty" + `, pb) + t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) + if err != nil { + t.Errorf("failed to parse ambiguous Any message: %v", err) + } +} + +func TestUnmarshalOverwriteAny(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Rabbit of Caerbannog" + > + `, pb) + want := `line 7: Any message unpacked multiple times, or "type_url" already set` + if err.Error() != want { + t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) + } +} + +func TestUnmarshalAnyMixAndMatch(t *testing.T) { + pb := &anypb.Any{} + err := proto.UnmarshalText(` + value: "\n\x05Monty" + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Rabbit of Caerbannog" + > + `, pb) + want := `line 5: Any message unpacked multiple times, or "value" already set` + if err.Error() != want { + t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 0000000..e392575 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,229 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone_test.go b/vendor/github.com/golang/protobuf/proto/clone_test.go new file mode 100644 index 0000000..f607ff4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone_test.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var cloneTestMessage = &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, +} + +func init() { + ext := &pb.Ext{ + Data: proto.String("extension"), + } + if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { + panic("SetExtension: " + err.Error()) + } +} + +func TestClone(t *testing.T) { + m := proto.Clone(cloneTestMessage).(*pb.MyMessage) + if !proto.Equal(m, cloneTestMessage) { + t.Errorf("Clone(%v) = %v", cloneTestMessage, m) + } + + // Verify it was a deep copy. + *m.Inner.Port++ + if proto.Equal(m, cloneTestMessage) { + t.Error("Mutating clone changed the original") + } + // Byte fields and repeated fields should be copied. + if &m.Pet[0] == &cloneTestMessage.Pet[0] { + t.Error("Pet: repeated field not copied") + } + if &m.Others[0] == &cloneTestMessage.Others[0] { + t.Error("Others: repeated field not copied") + } + if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { + t.Error("Others[0].Value: bytes field not copied") + } + if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { + t.Error("RepBytes: repeated field not copied") + } + if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { + t.Error("RepBytes[0]: bytes field not copied") + } +} + +func TestCloneNil(t *testing.T) { + var m *pb.MyMessage + if c := proto.Clone(m); !proto.Equal(m, c) { + t.Errorf("Clone(%v) = %v", m, c) + } +} + +var mergeTests = []struct { + src, dst, want proto.Message +}{ + { + src: &pb.MyMessage{ + Count: proto.Int32(42), + }, + dst: &pb.MyMessage{ + Name: proto.String("Dave"), + }, + want: &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + }, + }, + { + src: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + }, + Pet: []string{"horsey"}, + Others: []*pb.OtherMessage{ + { + Value: []byte("some bytes"), + }, + }, + }, + dst: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("niles"), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + { + // Explicitly test a src=nil field + Inner: nil, + }, + }, + }, + want: &pb.MyMessage{ + Inner: &pb.InnerMessage{ + Host: proto.String("hey"), + Connected: proto.Bool(true), + Port: proto.Int32(9099), + }, + Pet: []string{"bunny", "kitty", "horsey"}, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(31415926535), + }, + {}, + { + Value: []byte("some bytes"), + }, + }, + }, + }, + { + src: &pb.MyMessage{ + RepBytes: [][]byte{[]byte("wow")}, + }, + dst: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham")}, + }, + want: &pb.MyMessage{ + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + }, + RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, + }, + }, + // Check that a scalar bytes field replaces rather than appends. + { + src: &pb.OtherMessage{Value: []byte("foo")}, + dst: &pb.OtherMessage{Value: []byte("bar")}, + want: &pb.OtherMessage{Value: []byte("foo")}, + }, + { + src: &pb.MessageWithMap{ + NameMapping: map[int32]string{6: "Nigel"}, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(2.0), + }, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + dst: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Bruce", // should be overwritten + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(3.0), + Exact: proto.Bool(true), + }, // the entire message should be overwritten + }, + }, + want: &pb.MessageWithMap{ + NameMapping: map[int32]string{ + 6: "Nigel", + 7: "Andrew", + }, + MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, + 0x4002: &pb.FloatingPoint{ + F: proto.Float64(2.0), + }, + }, + ByteMapping: map[bool][]byte{true: []byte("wowsa")}, + }, + }, + // proto3 shouldn't merge zero values, + // in the same way that proto2 shouldn't merge nils. + { + src: &proto3pb.Message{ + Name: "Aaron", + Data: []byte(""), // zero value, but not nil + }, + dst: &proto3pb.Message{ + HeightInCm: 176, + Data: []byte("texas!"), + }, + want: &proto3pb.Message{ + Name: "Aaron", + HeightInCm: 176, + Data: []byte("texas!"), + }, + }, + // Oneof fields should merge by assignment. + { + src: &pb.Communique{ + Union: &pb.Communique_Number{41}, + }, + dst: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + want: &pb.Communique{ + Union: &pb.Communique_Number{41}, + }, + }, + // Oneof nil is the same as not set. + { + src: &pb.Communique{}, + dst: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + want: &pb.Communique{ + Union: &pb.Communique_Name{"Bobby Tables"}, + }, + }, + { + src: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Cute: true}, // replace + "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert + }, + }, + dst: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced + "kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep + }, + }, + want: &proto3pb.Message{ + Terrain: map[string]*proto3pb.Nested{ + "kay_a": &proto3pb.Nested{Cute: true}, + "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, + "kay_c": &proto3pb.Nested{Bunny: "bunny"}, + }, + }, + }, +} + +func TestMerge(t *testing.T) { + for _, m := range mergeTests { + got := proto.Clone(m.dst) + proto.Merge(got, m.src) + if !proto.Equal(got, m.want) { + t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 0000000..aa20729 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,970 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + extmap[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/decode_test.go b/vendor/github.com/golang/protobuf/proto/decode_test.go new file mode 100644 index 0000000..2c4c31d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode_test.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build go1.7 + +package proto_test + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + tpb "github.com/golang/protobuf/proto/proto3_proto" +) + +var ( + bytesBlackhole []byte + msgBlackhole = new(tpb.Message) +) + +// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and +// 2 bytes long). +func BenchmarkVarint32ArraySmall(b *testing.B) { + for i := uint(1); i <= 10; i++ { + dist := genInt32Dist([7]int{0, 3, 1}, 1<2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + p.buf = append(p.buf, data...) + return err + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Encode++ // Parens are to work around a goimports bug. + } + + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Size++ // Parens are to work around a goimports bug. + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { + return err + } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { + return err + } + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/encode_test.go b/vendor/github.com/golang/protobuf/proto/encode_test.go new file mode 100644 index 0000000..a720947 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode_test.go @@ -0,0 +1,85 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build go1.7 + +package proto_test + +import ( + "strconv" + "testing" + + "github.com/golang/protobuf/proto" + tpb "github.com/golang/protobuf/proto/proto3_proto" + "github.com/golang/protobuf/ptypes" +) + +var ( + blackhole []byte +) + +// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the +// same. +func BenchmarkAny(b *testing.B) { + data := make([]byte, 1<<20) + quantum := 1 << 10 + for i := uint(0); i <= 10; i++ { + b.Run(strconv.Itoa(quantum<= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions_test.go b/vendor/github.com/golang/protobuf/proto/extensions_test.go new file mode 100644 index 0000000..b6d9114 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions_test.go @@ -0,0 +1,536 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/testdata" + "golang.org/x/sync/errgroup" +) + +func TestGetExtensionsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", err) + } + exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ + pb.E_Ext_More, + pb.E_Ext_Text, + }) + if err != nil { + t.Fatalf("GetExtensions() failed: %s", err) + } + if exts[0] != ext1 { + t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) + } + if exts[1] != nil { + t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) + } +} + +func TestExtensionDescsWithMissingExtensions(t *testing.T) { + msg := &pb.MyMessage{Count: proto.Int32(0)} + extdesc1 := pb.E_Ext_More + if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil { + t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err) + } + + ext1 := &pb.Ext{} + if err := proto.SetExtension(msg, extdesc1, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", err) + } + extdesc2 := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 123456789, + Name: "a.b", + Tag: "varint,123456789,opt", + } + ext2 := proto.Bool(false) + if err := proto.SetExtension(msg, extdesc2, ext2); err != nil { + t.Fatalf("Could not set ext2: %s", err) + } + + b, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Could not marshal msg: %v", err) + } + if err := proto.Unmarshal(b, msg); err != nil { + t.Fatalf("Could not unmarshal into msg: %v", err) + } + + descs, err := proto.ExtensionDescs(msg) + if err != nil { + t.Fatalf("proto.ExtensionDescs: got error %v", err) + } + sortExtDescs(descs) + wantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}} + if !reflect.DeepEqual(descs, wantDescs) { + t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs) + } +} + +type ExtensionDescSlice []*proto.ExtensionDesc + +func (s ExtensionDescSlice) Len() int { return len(s) } +func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field } +func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func sortExtDescs(s []*proto.ExtensionDesc) { + sort.Sort(ExtensionDescSlice(s)) +} + +func TestGetExtensionStability(t *testing.T) { + check := func(m *pb.MyMessage) bool { + ext1, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + ext2, err := proto.GetExtension(m, pb.E_Ext_More) + if err != nil { + t.Fatalf("GetExtension() failed: %s", err) + } + return ext1 == ext2 + } + msg := &pb.MyMessage{Count: proto.Int32(4)} + ext0 := &pb.Ext{} + if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { + t.Fatalf("Could not set ext1: %s", ext0) + } + if !check(msg) { + t.Errorf("GetExtension() not stable before marshaling") + } + bb, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Marshal() failed: %s", err) + } + msg1 := &pb.MyMessage{} + err = proto.Unmarshal(bb, msg1) + if err != nil { + t.Fatalf("Unmarshal() failed: %s", err) + } + if !check(msg1) { + t.Errorf("GetExtension() not stable after unmarshaling") + } +} + +func TestGetExtensionDefaults(t *testing.T) { + var setFloat64 float64 = 1 + var setFloat32 float32 = 2 + var setInt32 int32 = 3 + var setInt64 int64 = 4 + var setUint32 uint32 = 5 + var setUint64 uint64 = 6 + var setBool = true + var setBool2 = false + var setString = "Goodnight string" + var setBytes = []byte("Goodnight bytes") + var setEnum = pb.DefaultsMessage_TWO + + type testcase struct { + ext *proto.ExtensionDesc // Extension we are testing. + want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail). + def interface{} // Expected value of extension after ClearExtension(). + } + tests := []testcase{ + {pb.E_NoDefaultDouble, setFloat64, nil}, + {pb.E_NoDefaultFloat, setFloat32, nil}, + {pb.E_NoDefaultInt32, setInt32, nil}, + {pb.E_NoDefaultInt64, setInt64, nil}, + {pb.E_NoDefaultUint32, setUint32, nil}, + {pb.E_NoDefaultUint64, setUint64, nil}, + {pb.E_NoDefaultSint32, setInt32, nil}, + {pb.E_NoDefaultSint64, setInt64, nil}, + {pb.E_NoDefaultFixed32, setUint32, nil}, + {pb.E_NoDefaultFixed64, setUint64, nil}, + {pb.E_NoDefaultSfixed32, setInt32, nil}, + {pb.E_NoDefaultSfixed64, setInt64, nil}, + {pb.E_NoDefaultBool, setBool, nil}, + {pb.E_NoDefaultBool, setBool2, nil}, + {pb.E_NoDefaultString, setString, nil}, + {pb.E_NoDefaultBytes, setBytes, nil}, + {pb.E_NoDefaultEnum, setEnum, nil}, + {pb.E_DefaultDouble, setFloat64, float64(3.1415)}, + {pb.E_DefaultFloat, setFloat32, float32(3.14)}, + {pb.E_DefaultInt32, setInt32, int32(42)}, + {pb.E_DefaultInt64, setInt64, int64(43)}, + {pb.E_DefaultUint32, setUint32, uint32(44)}, + {pb.E_DefaultUint64, setUint64, uint64(45)}, + {pb.E_DefaultSint32, setInt32, int32(46)}, + {pb.E_DefaultSint64, setInt64, int64(47)}, + {pb.E_DefaultFixed32, setUint32, uint32(48)}, + {pb.E_DefaultFixed64, setUint64, uint64(49)}, + {pb.E_DefaultSfixed32, setInt32, int32(50)}, + {pb.E_DefaultSfixed64, setInt64, int64(51)}, + {pb.E_DefaultBool, setBool, true}, + {pb.E_DefaultBool, setBool2, true}, + {pb.E_DefaultString, setString, "Hello, string"}, + {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, + {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, + } + + checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { + val, err := proto.GetExtension(msg, test.ext) + if err != nil { + if valWant != nil { + return fmt.Errorf("GetExtension(): %s", err) + } + if want := proto.ErrMissingExtension; err != want { + return fmt.Errorf("Unexpected error: got %v, want %v", err, want) + } + return nil + } + + // All proto2 extension values are either a pointer to a value or a slice of values. + ty := reflect.TypeOf(val) + tyWant := reflect.TypeOf(test.ext.ExtensionType) + if got, want := ty, tyWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) + } + tye := ty.Elem() + tyeWant := tyWant.Elem() + if got, want := tye, tyeWant; got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) + } + + // Check the name of the type of the value. + // If it is an enum it will be type int32 with the name of the enum. + if got, want := tye.Name(), tye.Name(); got != want { + return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) + } + + // Check that value is what we expect. + // If we have a pointer in val, get the value it points to. + valExp := val + if ty.Kind() == reflect.Ptr { + valExp = reflect.ValueOf(val).Elem().Interface() + } + if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { + return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) + } + + return nil + } + + setTo := func(test testcase) interface{} { + setTo := reflect.ValueOf(test.want) + if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { + setTo = reflect.New(typ).Elem() + setTo.Set(reflect.New(setTo.Type().Elem())) + setTo.Elem().Set(reflect.ValueOf(test.want)) + } + return setTo.Interface() + } + + for _, test := range tests { + msg := &pb.DefaultsMessage{} + name := test.ext.Name + + // Check the initial value. + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + + // Set the per-type value and check value. + name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) + if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { + t.Errorf("%s: SetExtension(): %v", name, err) + continue + } + if err := checkVal(test, msg, test.want); err != nil { + t.Errorf("%s: %v", name, err) + continue + } + + // Set and check the value. + name += " (cleared)" + proto.ClearExtension(msg, test.ext) + if err := checkVal(test, msg, test.def); err != nil { + t.Errorf("%s: %v", name, err) + } + } +} + +func TestExtensionsRoundTrip(t *testing.T) { + msg := &pb.MyMessage{} + ext1 := &pb.Ext{ + Data: proto.String("hi"), + } + ext2 := &pb.Ext{ + Data: proto.String("there"), + } + exists := proto.HasExtension(msg, pb.E_Ext_More) + if exists { + t.Error("Extension More present unexpectedly") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { + t.Error(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { + t.Error(err) + } + e, err := proto.GetExtension(msg, pb.E_Ext_More) + if err != nil { + t.Error(err) + } + x, ok := e.(*pb.Ext) + if !ok { + t.Errorf("e has type %T, expected testdata.Ext", e) + } else if *x.Data != "there" { + t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) + } + proto.ClearExtension(msg, pb.E_Ext_More) + if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { + t.Errorf("got %v, expected ErrMissingExtension", e) + } + if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { + t.Error("expected bad extension error, got nil") + } + if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { + t.Error("expected extension err") + } + if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { + t.Error("expected some sort of type mismatch error, got nil") + } +} + +func TestNilExtension(t *testing.T) { + msg := &pb.MyMessage{ + Count: proto.Int32(1), + } + if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { + t.Fatal(err) + } + if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { + t.Error("expected SetExtension to fail due to a nil extension") + } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { + t.Errorf("expected error %v, got %v", want, err) + } + // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update + // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. +} + +func TestMarshalUnmarshalRepeatedExtension(t *testing.T) { + // Add a repeated extension to the result. + tests := []struct { + name string + ext []*pb.ComplexExtension + }{ + { + "two fields", + []*pb.ComplexExtension{ + {First: proto.Int32(7)}, + {Second: proto.Int32(11)}, + }, + }, + { + "repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {Third: []int32{2000}}, + }, + }, + { + "two fields and repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {First: proto.Int32(9)}, + {Second: proto.Int32(21)}, + {Third: []int32{2000}}, + }, + }, + } + for _, test := range tests { + // Marshal message with a repeated extension. + msg1 := new(pb.OtherMessage) + err := proto.SetExtension(msg1, pb.E_RComplex, test.ext) + if err != nil { + t.Fatalf("[%s] Error setting extension: %v", test.name, err) + } + b, err := proto.Marshal(msg1) + if err != nil { + t.Fatalf("[%s] Error marshaling message: %v", test.name, err) + } + + // Unmarshal and read the merged proto. + msg2 := new(pb.OtherMessage) + err = proto.Unmarshal(b, msg2) + if err != nil { + t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) + } + e, err := proto.GetExtension(msg2, pb.E_RComplex) + if err != nil { + t.Fatalf("[%s] Error getting extension: %v", test.name, err) + } + ext := e.([]*pb.ComplexExtension) + if ext == nil { + t.Fatalf("[%s] Invalid extension", test.name) + } + if !reflect.DeepEqual(ext, test.ext) { + t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext) + } + } +} + +func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) { + // We may see multiple instances of the same extension in the wire + // format. For example, the proto compiler may encode custom options in + // this way. Here, we verify that we merge the extensions together. + tests := []struct { + name string + ext []*pb.ComplexExtension + }{ + { + "two fields", + []*pb.ComplexExtension{ + {First: proto.Int32(7)}, + {Second: proto.Int32(11)}, + }, + }, + { + "repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {Third: []int32{2000}}, + }, + }, + { + "two fields and repeated field", + []*pb.ComplexExtension{ + {Third: []int32{1000}}, + {First: proto.Int32(9)}, + {Second: proto.Int32(21)}, + {Third: []int32{2000}}, + }, + }, + } + for _, test := range tests { + var buf bytes.Buffer + var want pb.ComplexExtension + + // Generate a serialized representation of a repeated extension + // by catenating bytes together. + for i, e := range test.ext { + // Merge to create the wanted proto. + proto.Merge(&want, e) + + // serialize the message + msg := new(pb.OtherMessage) + err := proto.SetExtension(msg, pb.E_Complex, e) + if err != nil { + t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err) + } + b, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err) + } + buf.Write(b) + } + + // Unmarshal and read the merged proto. + msg2 := new(pb.OtherMessage) + err := proto.Unmarshal(buf.Bytes(), msg2) + if err != nil { + t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) + } + e, err := proto.GetExtension(msg2, pb.E_Complex) + if err != nil { + t.Fatalf("[%s] Error getting extension: %v", test.name, err) + } + ext := e.(*pb.ComplexExtension) + if ext == nil { + t.Fatalf("[%s] Invalid extension", test.name) + } + if !reflect.DeepEqual(*ext, want) { + t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want) + } + } +} + +func TestClearAllExtensions(t *testing.T) { + // unregistered extension + desc := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 101010100, + Name: "emptyextension", + Tag: "varint,0,opt", + } + m := &pb.MyMessage{} + if proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) + } + if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { + t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) + } + if !proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m)) + } + proto.ClearAllExtensions(m) + if proto.HasExtension(m, desc) { + t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) + } +} + +func TestMarshalRace(t *testing.T) { + // unregistered extension + desc := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 101010100, + Name: "emptyextension", + Tag: "varint,0,opt", + } + + m := &pb.MyMessage{Count: proto.Int32(4)} + if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { + t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) + } + + var g errgroup.Group + for n := 3; n > 0; n-- { + g.Go(func() error { + _, err := proto.Marshal(m) + return err + }) + } + if err := g.Wait(); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 0000000..1c22550 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,897 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/map_test.go b/vendor/github.com/golang/protobuf/proto/map_test.go new file mode 100644 index 0000000..313e879 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/map_test.go @@ -0,0 +1,46 @@ +package proto_test + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + ppb "github.com/golang/protobuf/proto/proto3_proto" +) + +func marshalled() []byte { + m := &ppb.IntMaps{} + for i := 0; i < 1000; i++ { + m.Maps = append(m.Maps, &ppb.IntMap{ + Rtt: map[int32]int32{1: 2}, + }) + } + b, err := proto.Marshal(m) + if err != nil { + panic(fmt.Sprintf("Can't marshal %+v: %v", m, err)) + } + return b +} + +func BenchmarkConcurrentMapUnmarshal(b *testing.B) { + in := marshalled() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + var out ppb.IntMaps + if err := proto.Unmarshal(in, &out); err != nil { + b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) + } + } + }) +} + +func BenchmarkSequentialMapUnmarshal(b *testing.B) { + in := marshalled() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var out ppb.IntMaps + if err := proto.Unmarshal(in, &out); err != nil { + b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 0000000..fd982de --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,311 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set_test.go b/vendor/github.com/golang/protobuf/proto/message_set_test.go new file mode 100644 index 0000000..353a3ea --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set_test.go @@ -0,0 +1,66 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "testing" +) + +func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { + // Check that a repeated message set entry will be concatenated. + in := &messageSet{ + Item: []*_MessageSet_Item{ + {TypeId: Int32(12345), Message: []byte("hoo")}, + {TypeId: Int32(12345), Message: []byte("hah")}, + }, + } + b, err := Marshal(in) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + t.Logf("Marshaled bytes: %q", b) + + var extensions XXX_InternalExtensions + if err := UnmarshalMessageSet(b, &extensions); err != nil { + t.Fatalf("UnmarshalMessageSet: %v", err) + } + ext, ok := extensions.p.extensionMap[12345] + if !ok { + t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap) + } + // Skip wire type/field number and length varints. + got := skipVarint(skipVarint(ext.enc)) + if want := []byte("hoohah"); !bytes.Equal(got, want) { + t.Errorf("Combined extension is %q, want %q", got, want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 0000000..fb512e2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,484 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 0000000..6b5567d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,270 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 0000000..ec2289c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,872 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.dec = (*Buffer).dec_slice_byte + if p.proto3 { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } else if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go new file mode 100644 index 0000000..cc4d048 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go @@ -0,0 +1,347 @@ +// Code generated by protoc-gen-go. +// source: proto3_proto/proto3.proto +// DO NOT EDIT! + +/* +Package proto3_proto is a generated protocol buffer package. + +It is generated from these files: + proto3_proto/proto3.proto + +It has these top-level messages: + Message + Nested + MessageWithMap + IntMap + IntMaps +*/ +package proto3_proto + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" +import testdata "github.com/golang/protobuf/proto/testdata" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Message_Humour int32 + +const ( + Message_UNKNOWN Message_Humour = 0 + Message_PUNS Message_Humour = 1 + Message_SLAPSTICK Message_Humour = 2 + Message_BILL_BAILEY Message_Humour = 3 +) + +var Message_Humour_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PUNS", + 2: "SLAPSTICK", + 3: "BILL_BAILEY", +} +var Message_Humour_value = map[string]int32{ + "UNKNOWN": 0, + "PUNS": 1, + "SLAPSTICK": 2, + "BILL_BAILEY": 3, +} + +func (x Message_Humour) String() string { + return proto.EnumName(Message_Humour_name, int32(x)) +} +func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +type Message struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,packed,name=key" json:"key,omitempty"` + ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey" json:"short_key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"` + ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"` + Submessage *Message `protobuf:"bytes,17,opt,name=submessage" json:"submessage,omitempty"` + Children []*Message `protobuf:"bytes,18,rep,name=children" json:"children,omitempty"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Message) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Message) GetHilarity() Message_Humour { + if m != nil { + return m.Hilarity + } + return Message_UNKNOWN +} + +func (m *Message) GetHeightInCm() uint32 { + if m != nil { + return m.HeightInCm + } + return 0 +} + +func (m *Message) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Message) GetResultCount() int64 { + if m != nil { + return m.ResultCount + } + return 0 +} + +func (m *Message) GetTrueScotsman() bool { + if m != nil { + return m.TrueScotsman + } + return false +} + +func (m *Message) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +func (m *Message) GetKey() []uint64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Message) GetShortKey() []int32 { + if m != nil { + return m.ShortKey + } + return nil +} + +func (m *Message) GetNested() *Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *Message) GetRFunny() []Message_Humour { + if m != nil { + return m.RFunny + } + return nil +} + +func (m *Message) GetTerrain() map[string]*Nested { + if m != nil { + return m.Terrain + } + return nil +} + +func (m *Message) GetProto2Field() *testdata.SubDefaults { + if m != nil { + return m.Proto2Field + } + return nil +} + +func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { + if m != nil { + return m.Proto2Value + } + return nil +} + +func (m *Message) GetAnything() *google_protobuf.Any { + if m != nil { + return m.Anything + } + return nil +} + +func (m *Message) GetManyThings() []*google_protobuf.Any { + if m != nil { + return m.ManyThings + } + return nil +} + +func (m *Message) GetSubmessage() *Message { + if m != nil { + return m.Submessage + } + return nil +} + +func (m *Message) GetChildren() []*Message { + if m != nil { + return m.Children + } + return nil +} + +type Nested struct { + Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` + Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} +func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Nested) GetBunny() string { + if m != nil { + return m.Bunny + } + return "" +} + +func (m *Nested) GetCute() bool { + if m != nil { + return m.Cute + } + return false +} + +type MessageWithMap struct { + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +type IntMap struct { + Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt" json:"rtt,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *IntMap) Reset() { *m = IntMap{} } +func (m *IntMap) String() string { return proto.CompactTextString(m) } +func (*IntMap) ProtoMessage() {} +func (*IntMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *IntMap) GetRtt() map[int32]int32 { + if m != nil { + return m.Rtt + } + return nil +} + +type IntMaps struct { + Maps []*IntMap `protobuf:"bytes,1,rep,name=maps" json:"maps,omitempty"` +} + +func (m *IntMaps) Reset() { *m = IntMaps{} } +func (m *IntMaps) String() string { return proto.CompactTextString(m) } +func (*IntMaps) ProtoMessage() {} +func (*IntMaps) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *IntMaps) GetMaps() []*IntMap { + if m != nil { + return m.Maps + } + return nil +} + +func init() { + proto.RegisterType((*Message)(nil), "proto3_proto.Message") + proto.RegisterType((*Nested)(nil), "proto3_proto.Nested") + proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap") + proto.RegisterType((*IntMap)(nil), "proto3_proto.IntMap") + proto.RegisterType((*IntMaps)(nil), "proto3_proto.IntMaps") + proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) +} + +func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 733 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x6d, 0x6f, 0xf3, 0x34, + 0x14, 0x25, 0x4d, 0x5f, 0xd2, 0x9b, 0x74, 0x0b, 0x5e, 0x91, 0xbc, 0x02, 0x52, 0x28, 0x12, 0x8a, + 0x78, 0x49, 0xa1, 0xd3, 0xd0, 0x84, 0x10, 0x68, 0x1b, 0x9b, 0xa8, 0xd6, 0x95, 0xca, 0xdd, 0x98, + 0xf8, 0x14, 0xa5, 0xad, 0xdb, 0x46, 0x34, 0x4e, 0x49, 0x1c, 0xa4, 0xfc, 0x1d, 0xfe, 0x28, 0x8f, + 0x6c, 0xa7, 0x5d, 0x36, 0x65, 0xcf, 0xf3, 0x29, 0xf6, 0xf1, 0xb9, 0xf7, 0x9c, 0x1c, 0x5f, 0xc3, + 0xe9, 0x2e, 0x89, 0x79, 0x7c, 0xe6, 0xcb, 0xcf, 0x40, 0x6d, 0x3c, 0xf9, 0x41, 0x56, 0xf9, 0xa8, + 0x77, 0xba, 0x8e, 0xe3, 0xf5, 0x96, 0x2a, 0xca, 0x3c, 0x5b, 0x0d, 0x02, 0x96, 0x2b, 0x62, 0xef, + 0x84, 0xd3, 0x94, 0x2f, 0x03, 0x1e, 0x0c, 0xc4, 0x42, 0x81, 0xfd, 0xff, 0x5b, 0xd0, 0xba, 0xa7, + 0x69, 0x1a, 0xac, 0x29, 0x42, 0x50, 0x67, 0x41, 0x44, 0xb1, 0xe6, 0x68, 0x6e, 0x9b, 0xc8, 0x35, + 0xba, 0x00, 0x63, 0x13, 0x6e, 0x83, 0x24, 0xe4, 0x39, 0xae, 0x39, 0x9a, 0x7b, 0x34, 0xfc, 0xcc, + 0x2b, 0x0b, 0x7a, 0x45, 0xb1, 0xf7, 0x7b, 0x16, 0xc5, 0x59, 0x42, 0x0e, 0x6c, 0xe4, 0x80, 0xb5, + 0xa1, 0xe1, 0x7a, 0xc3, 0xfd, 0x90, 0xf9, 0x8b, 0x08, 0xeb, 0x8e, 0xe6, 0x76, 0x08, 0x28, 0x6c, + 0xc4, 0xae, 0x23, 0xa1, 0x27, 0xec, 0xe0, 0xba, 0xa3, 0xb9, 0x16, 0x91, 0x6b, 0xf4, 0x05, 0x58, + 0x09, 0x4d, 0xb3, 0x2d, 0xf7, 0x17, 0x71, 0xc6, 0x38, 0x6e, 0x39, 0x9a, 0xab, 0x13, 0x53, 0x61, + 0xd7, 0x02, 0x42, 0x5f, 0x42, 0x87, 0x27, 0x19, 0xf5, 0xd3, 0x45, 0xcc, 0xd3, 0x28, 0x60, 0xd8, + 0x70, 0x34, 0xd7, 0x20, 0x96, 0x00, 0x67, 0x05, 0x86, 0xba, 0xd0, 0x48, 0x17, 0x71, 0x42, 0x71, + 0xdb, 0xd1, 0xdc, 0x1a, 0x51, 0x1b, 0x64, 0x83, 0xfe, 0x37, 0xcd, 0x71, 0xc3, 0xd1, 0xdd, 0x3a, + 0x11, 0x4b, 0xf4, 0x29, 0xb4, 0xd3, 0x4d, 0x9c, 0x70, 0x5f, 0xe0, 0x27, 0x8e, 0xee, 0x36, 0x88, + 0x21, 0x81, 0x3b, 0x9a, 0xa3, 0x6f, 0xa1, 0xc9, 0x68, 0xca, 0xe9, 0x12, 0x37, 0x1d, 0xcd, 0x35, + 0x87, 0xdd, 0x97, 0xbf, 0x3e, 0x91, 0x67, 0xa4, 0xe0, 0xa0, 0x73, 0x68, 0x25, 0xfe, 0x2a, 0x63, + 0x2c, 0xc7, 0xb6, 0xa3, 0x7f, 0x30, 0xa9, 0x66, 0x72, 0x2b, 0xb8, 0xe8, 0x67, 0x68, 0x71, 0x9a, + 0x24, 0x41, 0xc8, 0x30, 0x38, 0xba, 0x6b, 0x0e, 0xfb, 0xd5, 0x65, 0x0f, 0x8a, 0x74, 0xc3, 0x78, + 0x92, 0x93, 0x7d, 0x09, 0xba, 0x00, 0x75, 0xff, 0x43, 0x7f, 0x15, 0xd2, 0xed, 0x12, 0x9b, 0xd2, + 0xe8, 0x27, 0xde, 0xfe, 0xae, 0xbd, 0x59, 0x36, 0xff, 0x8d, 0xae, 0x82, 0x6c, 0xcb, 0x53, 0x62, + 0x2a, 0xea, 0xad, 0x60, 0xa2, 0xd1, 0xa1, 0xf2, 0xdf, 0x60, 0x9b, 0x51, 0xdc, 0x91, 0xe2, 0x5f, + 0x55, 0x8b, 0x4f, 0x25, 0xf3, 0x4f, 0x41, 0x54, 0x06, 0x8a, 0x56, 0x12, 0x41, 0xdf, 0x83, 0x11, + 0xb0, 0x9c, 0x6f, 0x42, 0xb6, 0xc6, 0x47, 0x45, 0x52, 0x6a, 0x0e, 0xbd, 0xfd, 0x1c, 0x7a, 0x97, + 0x2c, 0x27, 0x07, 0x16, 0x3a, 0x07, 0x33, 0x0a, 0x58, 0xee, 0xcb, 0x5d, 0x8a, 0x8f, 0xa5, 0x76, + 0x75, 0x11, 0x08, 0xe2, 0x83, 0xe4, 0xa1, 0x73, 0x80, 0x34, 0x9b, 0x47, 0xca, 0x14, 0xfe, 0xb8, + 0xf8, 0xd7, 0x2a, 0xc7, 0xa4, 0x44, 0x44, 0x3f, 0x80, 0xb1, 0xd8, 0x84, 0xdb, 0x65, 0x42, 0x19, + 0x46, 0x52, 0xea, 0x8d, 0xa2, 0x03, 0xad, 0x37, 0x05, 0xab, 0x1c, 0xf8, 0x7e, 0x72, 0xd4, 0xd3, + 0x90, 0x93, 0xf3, 0x35, 0x34, 0x54, 0x70, 0xb5, 0xf7, 0xcc, 0x86, 0xa2, 0xfc, 0x54, 0xbb, 0xd0, + 0x7a, 0x8f, 0x60, 0xbf, 0x4e, 0xb1, 0xa2, 0xeb, 0x37, 0x2f, 0xbb, 0xbe, 0x71, 0x91, 0xcf, 0x6d, + 0xfb, 0xbf, 0x42, 0x53, 0x0d, 0x14, 0x32, 0xa1, 0xf5, 0x38, 0xb9, 0x9b, 0xfc, 0xf1, 0x34, 0xb1, + 0x3f, 0x42, 0x06, 0xd4, 0xa7, 0x8f, 0x93, 0x99, 0xad, 0xa1, 0x0e, 0xb4, 0x67, 0xe3, 0xcb, 0xe9, + 0xec, 0x61, 0x74, 0x7d, 0x67, 0xd7, 0xd0, 0x31, 0x98, 0x57, 0xa3, 0xf1, 0xd8, 0xbf, 0xba, 0x1c, + 0x8d, 0x6f, 0xfe, 0xb2, 0xf5, 0xfe, 0x10, 0x9a, 0xca, 0xac, 0x78, 0x33, 0x73, 0x39, 0xbe, 0xca, + 0x8f, 0xda, 0x88, 0x57, 0xba, 0xc8, 0xb8, 0x32, 0x64, 0x10, 0xb9, 0xee, 0xff, 0xa7, 0xc1, 0x51, + 0x91, 0xd9, 0x53, 0xc8, 0x37, 0xf7, 0xc1, 0x0e, 0x4d, 0xc1, 0x9a, 0xe7, 0x9c, 0xfa, 0x51, 0xb0, + 0xdb, 0x89, 0x39, 0xd0, 0x64, 0xce, 0xdf, 0x55, 0xe6, 0x5c, 0xd4, 0x78, 0x57, 0x39, 0xa7, 0xf7, + 0x8a, 0x5f, 0x4c, 0xd5, 0xfc, 0x19, 0xe9, 0xfd, 0x02, 0xf6, 0x6b, 0x42, 0x39, 0x30, 0x43, 0x05, + 0xd6, 0x2d, 0x07, 0x66, 0x95, 0x93, 0xf9, 0x07, 0x9a, 0x23, 0xc6, 0x85, 0xb7, 0x01, 0xe8, 0x09, + 0xe7, 0x85, 0xa5, 0xcf, 0x5f, 0x5a, 0x52, 0x14, 0x8f, 0x70, 0xae, 0x2c, 0x08, 0x66, 0xef, 0x47, + 0x30, 0xf6, 0x40, 0x59, 0xb2, 0x51, 0x21, 0xd9, 0x28, 0x4b, 0x9e, 0x41, 0x4b, 0xf5, 0x4b, 0x91, + 0x0b, 0xf5, 0x28, 0xd8, 0xa5, 0x85, 0x68, 0xb7, 0x4a, 0x94, 0x48, 0xc6, 0xbc, 0xa9, 0x8e, 0xde, + 0x05, 0x00, 0x00, 0xff, 0xff, 0x75, 0x38, 0xad, 0x84, 0xe4, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto new file mode 100644 index 0000000..2048655 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto @@ -0,0 +1,87 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +import "testdata/test.proto"; + +package proto3_proto; + +message Message { + enum Humour { + UNKNOWN = 0; + PUNS = 1; + SLAPSTICK = 2; + BILL_BAILEY = 3; + } + + string name = 1; + Humour hilarity = 2; + uint32 height_in_cm = 3; + bytes data = 4; + int64 result_count = 7; + bool true_scotsman = 8; + float score = 9; + + repeated uint64 key = 5; + repeated int32 short_key = 19; + Nested nested = 6; + repeated Humour r_funny = 16; + + map terrain = 10; + testdata.SubDefaults proto2_field = 11; + map proto2_value = 13; + + google.protobuf.Any anything = 14; + repeated google.protobuf.Any many_things = 15; + + Message submessage = 17; + repeated Message children = 18; +} + +message Nested { + string bunny = 1; + bool cute = 2; +} + +message MessageWithMap { + map byte_mapping = 1; +} + + +message IntMap { + map rtt = 1; +} + +message IntMaps { + repeated IntMap maps = 1; +} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_test.go b/vendor/github.com/golang/protobuf/proto/proto3_test.go new file mode 100644 index 0000000..735837f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto3_test.go @@ -0,0 +1,135 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/proto/proto3_proto" + tpb "github.com/golang/protobuf/proto/testdata" +) + +func TestProto3ZeroValues(t *testing.T) { + tests := []struct { + desc string + m proto.Message + }{ + {"zero message", &pb.Message{}}, + {"empty bytes field", &pb.Message{Data: []byte{}}}, + } + for _, test := range tests { + b, err := proto.Marshal(test.m) + if err != nil { + t.Errorf("%s: proto.Marshal: %v", test.desc, err) + continue + } + if len(b) > 0 { + t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) + } + } +} + +func TestRoundTripProto3(t *testing.T) { + m := &pb.Message{ + Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" + Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 + HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 + Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" + ResultCount: 47, // (0 | 7<<3): 0x38 0x2f + TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 + Score: 8.1, // (5 | 9<<3): 0x4d <8.1> + + Key: []uint64{1, 0xdeadbeef}, + Nested: &pb.Nested{ + Bunny: "Monty", + }, + } + t.Logf(" m: %v", m) + + b, err := proto.Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal: %v", err) + } + t.Logf(" b: %q", b) + + m2 := new(pb.Message) + if err := proto.Unmarshal(b, m2); err != nil { + t.Fatalf("proto.Unmarshal: %v", err) + } + t.Logf("m2: %v", m2) + + if !proto.Equal(m, m2) { + t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) + } +} + +func TestGettersForBasicTypesExist(t *testing.T) { + var m pb.Message + if got := m.GetNested().GetBunny(); got != "" { + t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got) + } + if got := m.GetNested().GetCute(); got { + t.Errorf("m.GetNested().GetCute() = %t, want false", got) + } +} + +func TestProto3SetDefaults(t *testing.T) { + in := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: new(tpb.SubDefaults), + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": new(tpb.SubDefaults), + }, + } + + got := proto.Clone(in).(*pb.Message) + proto.SetDefaults(got) + + // There are no defaults in proto3. Everything should be the zero value, but + // we need to remember to set defaults for nested proto2 messages. + want := &pb.Message{ + Terrain: map[string]*pb.Nested{ + "meadow": new(pb.Nested), + }, + Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, + Proto2Value: map[string]*tpb.SubDefaults{ + "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, + }, + } + + if !proto.Equal(got, want) { + t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/size2_test.go b/vendor/github.com/golang/protobuf/proto/size2_test.go new file mode 100644 index 0000000..a2729c3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/size2_test.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "testing" +) + +// This is a separate file and package from size_test.go because that one uses +// generated messages and thus may not be in package proto without having a circular +// dependency, whereas this file tests unexported details of size.go. + +func TestVarintSize(t *testing.T) { + // Check the edge cases carefully. + testCases := []struct { + n uint64 + size int + }{ + {0, 1}, + {1, 1}, + {127, 1}, + {128, 2}, + {16383, 2}, + {16384, 3}, + {1<<63 - 1, 9}, + {1 << 63, 10}, + } + for _, tc := range testCases { + size := sizeVarint(tc.n) + if size != tc.size { + t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/size_test.go b/vendor/github.com/golang/protobuf/proto/size_test.go new file mode 100644 index 0000000..af1034d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/size_test.go @@ -0,0 +1,164 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "log" + "strings" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} + +// messageWithExtension2 is in equal_test.go. +var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} + +func init() { + if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { + log.Panicf("SetExtension: %v", err) + } + + // Force messageWithExtension3 to have the extension encoded. + Marshal(messageWithExtension3) + +} + +var SizeTests = []struct { + desc string + pb Message +}{ + {"empty", &pb.OtherMessage{}}, + // Basic types. + {"bool", &pb.Defaults{F_Bool: Bool(true)}}, + {"int32", &pb.Defaults{F_Int32: Int32(12)}}, + {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, + {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, + {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, + {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, + {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, + {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, + {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, + {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, + {"float", &pb.Defaults{F_Float: Float32(12.6)}}, + {"double", &pb.Defaults{F_Double: Float64(13.9)}}, + {"string", &pb.Defaults{F_String: String("niles")}}, + {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, + {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, + {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, + {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, + {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, + // Repeated. + {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, + {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, + {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, + {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, + {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, + {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ + // Need enough large numbers to verify that the header is counting the number of bytes + // for the field, not the number of elements. + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, + }}}, + {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, + {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, + // Nested. + {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, + {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, + // Other things. + {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, + {"extension (unencoded)", messageWithExtension1}, + {"extension (encoded)", messageWithExtension3}, + // proto3 message + {"proto3 empty", &proto3pb.Message{}}, + {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, + {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, + {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, + {"proto3 float", &proto3pb.Message{Score: 12.6}}, + {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, + {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, + {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, + {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, + {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, + + {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, + {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, + {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, + {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, + + {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, + {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, + {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, + + {"oneof not set", &pb.Oneof{}}, + {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}}, + {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}}, + {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}}, + {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}}, + {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}}, + {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}}, + {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}}, + {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}}, + {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}}, + {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}}, + {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}}, + {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}}, + {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}}, + {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}}, + {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}}, + {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}}, + {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}}, + {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}}, + {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}}, + {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}}, +} + +func TestSize(t *testing.T) { + for _, tc := range SizeTests { + size := Size(tc.pb) + b, err := Marshal(tc.pb) + if err != nil { + t.Errorf("%v: Marshal failed: %v", tc.desc, err) + continue + } + if size != len(b) { + t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) + t.Logf("%v: bytes: %#v", tc.desc, b) + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/testdata/Makefile b/vendor/github.com/golang/protobuf/proto/testdata/Makefile new file mode 100644 index 0000000..fc28862 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/testdata/Makefile @@ -0,0 +1,50 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +include ../../Make.protobuf + +all: regenerate + +regenerate: + rm -f test.pb.go + make test.pb.go + +# The following rules are just aids to development. Not needed for typical testing. + +diff: regenerate + git diff test.pb.go + +restore: + cp test.pb.go.golden test.pb.go + +preserve: + cp test.pb.go test.pb.go.golden diff --git a/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go b/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go new file mode 100644 index 0000000..7172d0e --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go @@ -0,0 +1,86 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Verify that the compiler output for test.proto is unchanged. + +package testdata + +import ( + "crypto/sha1" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) + +// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. +func sum(t *testing.T, name string) string { + data, err := ioutil.ReadFile(name) + if err != nil { + t.Fatal(err) + } + t.Logf("sum(%q): length is %d", name, len(data)) + hash := sha1.New() + _, err = hash.Write(data) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("% x", hash.Sum(nil)) +} + +func run(t *testing.T, name string, args ...string) { + cmd := exec.Command(name, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + t.Fatal(err) + } +} + +func TestGolden(t *testing.T) { + // Compute the original checksum. + goldenSum := sum(t, "test.pb.go") + // Run the proto compiler. + run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") + newFile := filepath.Join(os.TempDir(), "test.pb.go") + defer os.Remove(newFile) + // Compute the new checksum. + newSum := sum(t, newFile) + // Verify + if newSum != goldenSum { + run(t, "diff", "-u", "test.pb.go", newFile) + t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") + } +} diff --git a/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go b/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go new file mode 100644 index 0000000..e980d1a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go @@ -0,0 +1,4147 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: test.proto + +/* +Package testdata is a generated protocol buffer package. + +It is generated from these files: + test.proto + +It has these top-level messages: + GoEnum + GoTestField + GoTest + GoTestRequiredGroupField + GoSkipTest + NonPackedTest + PackedTest + MaxTag + OldMessage + NewMessage + InnerMessage + OtherMessage + RequiredInnerMessage + MyMessage + Ext + ComplexExtension + DefaultsMessage + MyMessageSet + Empty + MessageList + Strings + Defaults + SubDefaults + RepeatedEnum + MoreRepeated + GroupOld + GroupNew + FloatingPoint + MessageWithMap + Oneof + Communique +*/ +package testdata + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} +func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// An enum, for completeness. +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + // Basic types + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + // Groupings + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + // Table types + GoTest_TABLE GoTest_KIND = 11 + // Functions + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} +func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} +func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} } + +type DefaultsMessage_DefaultsEnum int32 + +const ( + DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 + DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 + DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 +) + +var DefaultsMessage_DefaultsEnum_name = map[int32]string{ + 0: "ZERO", + 1: "ONE", + 2: "TWO", +} +var DefaultsMessage_DefaultsEnum_value = map[string]int32{ + "ZERO": 0, + "ONE": 1, + "TWO": 2, +} + +func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { + p := new(DefaultsMessage_DefaultsEnum) + *p = x + return p +} +func (x DefaultsMessage_DefaultsEnum) String() string { + return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) +} +func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") + if err != nil { + return err + } + *x = DefaultsMessage_DefaultsEnum(value) + return nil +} +func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{16, 0} +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} +func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{21, 0} } + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} +func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{23, 0} } + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} +func (*GoEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return FOO_FOO1 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req,name=Label" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req,name=Type" json:"Type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} +func (*GoTestField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + // Some typical parameters + Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt,name=Table" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt,name=Param" json:"Param,omitempty"` + // Required, repeated and optional foreign fields. + RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField" json:"OptionalField,omitempty"` + // Required fields of all basic types + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=FBoolRequired" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=FInt32Required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=FInt64Required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=FFixed32Required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=FFixed64Required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=FUint32Required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=FUint64Required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=FFloatRequired" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=FDoubleRequired" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=FStringRequired" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=FBytesRequired" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=FSint32Required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=FSint64Required" json:"F_Sint64_required,omitempty"` + // Repeated fields of all basic types + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=FBoolRepeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=FInt32Repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=FInt64Repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=FFixed32Repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=FFixed64Repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=FUint32Repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=FUint64Repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=FFloatRepeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=FDoubleRepeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=FStringRepeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=FBytesRepeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=FSint32Repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=FSint64Repeated" json:"F_Sint64_repeated,omitempty"` + // Optional fields of all basic types + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=FBoolOptional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=FInt32Optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=FInt64Optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=FFixed32Optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=FFixed64Optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=FUint32Optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=FUint64Optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=FFloatOptional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=FDoubleOptional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=FStringOptional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=FBytesOptional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=FSint32Optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=FSint64Optional" json:"F_Sint64_optional,omitempty"` + // Default-valued fields of all basic types + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=FBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=FInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=FInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=FFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=FFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=FUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=FUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=FFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=FDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=FStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=FBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=FSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=FSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + // Packed repeated fields (no string or bytes). + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=FBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=FInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=FInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=FFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=FFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=FUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=FUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=FFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=FDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=FSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=FSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} +func (*GoTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return GoTest_VOID +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +// Required, repeated, and optional groups. +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} +func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} +func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} +func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 2} } + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +// For testing a group containing a required field. +type GoTestRequiredGroupField struct { + Group *GoTestRequiredGroupField_Group `protobuf:"group,1,req,name=Group,json=group" json:"group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestRequiredGroupField) Reset() { *m = GoTestRequiredGroupField{} } +func (m *GoTestRequiredGroupField) String() string { return proto.CompactTextString(m) } +func (*GoTestRequiredGroupField) ProtoMessage() {} +func (*GoTestRequiredGroupField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *GoTestRequiredGroupField) GetGroup() *GoTestRequiredGroupField_Group { + if m != nil { + return m.Group + } + return nil +} + +type GoTestRequiredGroupField_Group struct { + Field *int32 `protobuf:"varint,2,req,name=Field" json:"Field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoTestRequiredGroupField_Group) Reset() { *m = GoTestRequiredGroupField_Group{} } +func (m *GoTestRequiredGroupField_Group) String() string { return proto.CompactTextString(m) } +func (*GoTestRequiredGroupField_Group) ProtoMessage() {} +func (*GoTestRequiredGroupField_Group) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{3, 0} +} + +func (m *GoTestRequiredGroupField_Group) GetField() int32 { + if m != nil && m.Field != nil { + return *m.Field + } + return 0 +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} +func (*GoSkipTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} +func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} +func (*NonPackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} +func (*PackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + // Maximum possible tag number. + LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} +func (*MaxTag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} +func (*OldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *OldMessage) GetNum() int32 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} +func (*OldMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} } + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + // This is an int32 in OldMessage. + Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} +func (*NewMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *NewMessage) GetNum() int64 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} +func (*NewMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} +func (*InnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} +func (*OtherMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +var extRange_OtherMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherMessage +} + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type RequiredInnerMessage struct { + LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} } +func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) } +func (*RequiredInnerMessage) ProtoMessage() {} +func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage { + if m != nil { + return m.LeoFinallyWonAnOscar + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This field becomes [][]byte in the generated code. + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} +func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +var extRange_MyMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage { + if m != nil { + return m.WeMustGoDeeper + } + return nil +} + +func (m *MyMessage) GetRepInner() []*InnerMessage { + if m != nil { + return m.RepInner + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return MyMessage_RED +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} +func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} } + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} +func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "testdata.Ext.more", + Tag: "bytes,103,opt,name=more", + Filename: "test.proto", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "testdata.Ext.text", + Tag: "bytes,104,opt,name=text", + Filename: "test.proto", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "testdata.Ext.number", + Tag: "varint,105,opt,name=number", + Filename: "test.proto", +} + +type ComplexExtension struct { + First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"` + Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"` + Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ComplexExtension) Reset() { *m = ComplexExtension{} } +func (m *ComplexExtension) String() string { return proto.CompactTextString(m) } +func (*ComplexExtension) ProtoMessage() {} +func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *ComplexExtension) GetFirst() int32 { + if m != nil && m.First != nil { + return *m.First + } + return 0 +} + +func (m *ComplexExtension) GetSecond() int32 { + if m != nil && m.Second != nil { + return *m.Second + } + return 0 +} + +func (m *ComplexExtension) GetThird() []int32 { + if m != nil { + return m.Third + } + return nil +} + +type DefaultsMessage struct { + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } +func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } +func (*DefaultsMessage) ProtoMessage() {} +func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +var extRange_DefaultsMessage = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_DefaultsMessage +} + +type MyMessageSet struct { + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} +func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *MyMessageSet) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(&m.XXX_InternalExtensions) +} +func (m *MyMessageSet) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) +} +func (m *MyMessageSet) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*MyMessageSet)(nil) +var _ proto.Unmarshaler = (*MyMessageSet)(nil) + +var extRange_MyMessageSet = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessageSet +} + +type Empty struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} +func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} +func (*MessageList_Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} +func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,name=F_String,json=FString,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` + // More fields with crazy defaults. + F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=FPinf,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=FNinf,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=FNan,def=nan" json:"F_Nan,omitempty"` + // Sub-message. + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + // Redundant but explicit defaults. + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} +func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +func (m *Defaults) GetStrZero() string { + if m != nil && m.StrZero != nil { + return *m.StrZero + } + return "" +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} +func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} +func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} +func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetInt64SPacked() []int64 { + if m != nil { + return m.Int64SPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +func (m *MoreRepeated) GetFixeds() []uint32 { + if m != nil { + return m.Fixeds + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} +func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} +func (*GroupOld_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25, 0} } + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} +func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} +func (*GroupNew_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26, 0} } + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type FloatingPoint struct { + F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} +func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *FloatingPoint) GetF() float64 { + if m != nil && m.F != nil { + return *m.F + } + return 0 +} + +func (m *FloatingPoint) GetExact() bool { + if m != nil && m.Exact != nil { + return *m.Exact + } + return false +} + +type MessageWithMap struct { + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *MessageWithMap) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func (m *MessageWithMap) GetStrToStr() map[string]string { + if m != nil { + return m.StrToStr + } + return nil +} + +type Oneof struct { + // Types that are valid to be assigned to Union: + // *Oneof_F_Bool + // *Oneof_F_Int32 + // *Oneof_F_Int64 + // *Oneof_F_Fixed32 + // *Oneof_F_Fixed64 + // *Oneof_F_Uint32 + // *Oneof_F_Uint64 + // *Oneof_F_Float + // *Oneof_F_Double + // *Oneof_F_String + // *Oneof_F_Bytes + // *Oneof_F_Sint32 + // *Oneof_F_Sint64 + // *Oneof_F_Enum + // *Oneof_F_Message + // *Oneof_FGroup + // *Oneof_F_Largest_Tag + Union isOneof_Union `protobuf_oneof:"union"` + // Types that are valid to be assigned to Tormato: + // *Oneof_Value + Tormato isOneof_Tormato `protobuf_oneof:"tormato"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Oneof) Reset() { *m = Oneof{} } +func (m *Oneof) String() string { return proto.CompactTextString(m) } +func (*Oneof) ProtoMessage() {} +func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +type isOneof_Union interface { + isOneof_Union() +} +type isOneof_Tormato interface { + isOneof_Tormato() +} + +type Oneof_F_Bool struct { + F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,oneof"` +} +type Oneof_F_Int32 struct { + F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,oneof"` +} +type Oneof_F_Int64 struct { + F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,oneof"` +} +type Oneof_F_Fixed32 struct { + F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,oneof"` +} +type Oneof_F_Fixed64 struct { + F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,oneof"` +} +type Oneof_F_Uint32 struct { + F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,oneof"` +} +type Oneof_F_Uint64 struct { + F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,oneof"` +} +type Oneof_F_Float struct { + F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,oneof"` +} +type Oneof_F_Double struct { + F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,oneof"` +} +type Oneof_F_String struct { + F_String string `protobuf:"bytes,10,opt,name=F_String,json=FString,oneof"` +} +type Oneof_F_Bytes struct { + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,oneof"` +} +type Oneof_F_Sint32 struct { + F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,oneof"` +} +type Oneof_F_Sint64 struct { + F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,oneof"` +} +type Oneof_F_Enum struct { + F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=testdata.MyMessage_Color,oneof"` +} +type Oneof_F_Message struct { + F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=FMessage,oneof"` +} +type Oneof_FGroup struct { + FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"` +} +type Oneof_F_Largest_Tag struct { + F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=FLargestTag,oneof"` +} +type Oneof_Value struct { + Value int32 `protobuf:"varint,100,opt,name=value,oneof"` +} + +func (*Oneof_F_Bool) isOneof_Union() {} +func (*Oneof_F_Int32) isOneof_Union() {} +func (*Oneof_F_Int64) isOneof_Union() {} +func (*Oneof_F_Fixed32) isOneof_Union() {} +func (*Oneof_F_Fixed64) isOneof_Union() {} +func (*Oneof_F_Uint32) isOneof_Union() {} +func (*Oneof_F_Uint64) isOneof_Union() {} +func (*Oneof_F_Float) isOneof_Union() {} +func (*Oneof_F_Double) isOneof_Union() {} +func (*Oneof_F_String) isOneof_Union() {} +func (*Oneof_F_Bytes) isOneof_Union() {} +func (*Oneof_F_Sint32) isOneof_Union() {} +func (*Oneof_F_Sint64) isOneof_Union() {} +func (*Oneof_F_Enum) isOneof_Union() {} +func (*Oneof_F_Message) isOneof_Union() {} +func (*Oneof_FGroup) isOneof_Union() {} +func (*Oneof_F_Largest_Tag) isOneof_Union() {} +func (*Oneof_Value) isOneof_Tormato() {} + +func (m *Oneof) GetUnion() isOneof_Union { + if m != nil { + return m.Union + } + return nil +} +func (m *Oneof) GetTormato() isOneof_Tormato { + if m != nil { + return m.Tormato + } + return nil +} + +func (m *Oneof) GetF_Bool() bool { + if x, ok := m.GetUnion().(*Oneof_F_Bool); ok { + return x.F_Bool + } + return false +} + +func (m *Oneof) GetF_Int32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Int32); ok { + return x.F_Int32 + } + return 0 +} + +func (m *Oneof) GetF_Int64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Int64); ok { + return x.F_Int64 + } + return 0 +} + +func (m *Oneof) GetF_Fixed32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok { + return x.F_Fixed32 + } + return 0 +} + +func (m *Oneof) GetF_Fixed64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok { + return x.F_Fixed64 + } + return 0 +} + +func (m *Oneof) GetF_Uint32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok { + return x.F_Uint32 + } + return 0 +} + +func (m *Oneof) GetF_Uint64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok { + return x.F_Uint64 + } + return 0 +} + +func (m *Oneof) GetF_Float() float32 { + if x, ok := m.GetUnion().(*Oneof_F_Float); ok { + return x.F_Float + } + return 0 +} + +func (m *Oneof) GetF_Double() float64 { + if x, ok := m.GetUnion().(*Oneof_F_Double); ok { + return x.F_Double + } + return 0 +} + +func (m *Oneof) GetF_String() string { + if x, ok := m.GetUnion().(*Oneof_F_String); ok { + return x.F_String + } + return "" +} + +func (m *Oneof) GetF_Bytes() []byte { + if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok { + return x.F_Bytes + } + return nil +} + +func (m *Oneof) GetF_Sint32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok { + return x.F_Sint32 + } + return 0 +} + +func (m *Oneof) GetF_Sint64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok { + return x.F_Sint64 + } + return 0 +} + +func (m *Oneof) GetF_Enum() MyMessage_Color { + if x, ok := m.GetUnion().(*Oneof_F_Enum); ok { + return x.F_Enum + } + return MyMessage_RED +} + +func (m *Oneof) GetF_Message() *GoTestField { + if x, ok := m.GetUnion().(*Oneof_F_Message); ok { + return x.F_Message + } + return nil +} + +func (m *Oneof) GetFGroup() *Oneof_F_Group { + if x, ok := m.GetUnion().(*Oneof_FGroup); ok { + return x.FGroup + } + return nil +} + +func (m *Oneof) GetF_Largest_Tag() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok { + return x.F_Largest_Tag + } + return 0 +} + +func (m *Oneof) GetValue() int32 { + if x, ok := m.GetTormato().(*Oneof_Value); ok { + return x.Value + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{ + (*Oneof_F_Bool)(nil), + (*Oneof_F_Int32)(nil), + (*Oneof_F_Int64)(nil), + (*Oneof_F_Fixed32)(nil), + (*Oneof_F_Fixed64)(nil), + (*Oneof_F_Uint32)(nil), + (*Oneof_F_Uint64)(nil), + (*Oneof_F_Float)(nil), + (*Oneof_F_Double)(nil), + (*Oneof_F_String)(nil), + (*Oneof_F_Bytes)(nil), + (*Oneof_F_Sint32)(nil), + (*Oneof_F_Sint64)(nil), + (*Oneof_F_Enum)(nil), + (*Oneof_F_Message)(nil), + (*Oneof_FGroup)(nil), + (*Oneof_F_Largest_Tag)(nil), + (*Oneof_Value)(nil), + } +} + +func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + t := uint64(0) + if x.F_Bool { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Oneof_F_Int32: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + b.EncodeVarint(4<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(x.F_Fixed32)) + case *Oneof_F_Fixed64: + b.EncodeVarint(5<<3 | proto.WireFixed64) + b.EncodeFixed64(uint64(x.F_Fixed64)) + case *Oneof_F_Uint32: + b.EncodeVarint(6<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + b.EncodeVarint(7<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + b.EncodeVarint(8<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.F_Float))) + case *Oneof_F_Double: + b.EncodeVarint(9<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.F_Double)) + case *Oneof_F_String: + b.EncodeVarint(10<<3 | proto.WireBytes) + b.EncodeStringBytes(x.F_String) + case *Oneof_F_Bytes: + b.EncodeVarint(11<<3 | proto.WireBytes) + b.EncodeRawBytes(x.F_Bytes) + case *Oneof_F_Sint32: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.F_Sint32)) + case *Oneof_F_Sint64: + b.EncodeVarint(13<<3 | proto.WireVarint) + b.EncodeZigzag64(uint64(x.F_Sint64)) + case *Oneof_F_Enum: + b.EncodeVarint(14<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.F_Message); err != nil { + return err + } + case *Oneof_FGroup: + b.EncodeVarint(16<<3 | proto.WireStartGroup) + if err := b.Marshal(x.FGroup); err != nil { + return err + } + b.EncodeVarint(16<<3 | proto.WireEndGroup) + case *Oneof_F_Largest_Tag: + b.EncodeVarint(536870911<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + return fmt.Errorf("Oneof.Union has unexpected type %T", x) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + b.EncodeVarint(100<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Value)) + case nil: + default: + return fmt.Errorf("Oneof.Tormato has unexpected type %T", x) + } + return nil +} + +func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Oneof) + switch tag { + case 1: // union.F_Bool + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Bool{x != 0} + return true, err + case 2: // union.F_Int32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int32{int32(x)} + return true, err + case 3: // union.F_Int64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int64{int64(x)} + return true, err + case 4: // union.F_Fixed32 + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Fixed32{uint32(x)} + return true, err + case 5: // union.F_Fixed64 + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Fixed64{x} + return true, err + case 6: // union.F_Uint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint32{uint32(x)} + return true, err + case 7: // union.F_Uint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint64{x} + return true, err + case 8: // union.F_Float + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))} + return true, err + case 9: // union.F_Double + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Double{math.Float64frombits(x)} + return true, err + case 10: // union.F_String + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Oneof_F_String{x} + return true, err + case 11: // union.F_Bytes + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Oneof_F_Bytes{x} + return true, err + case 12: // union.F_Sint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Oneof_F_Sint32{int32(x)} + return true, err + case 13: // union.F_Sint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag64() + m.Union = &Oneof_F_Sint64{int64(x)} + return true, err + case 14: // union.F_Enum + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Enum{MyMessage_Color(x)} + return true, err + case 15: // union.F_Message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GoTestField) + err := b.DecodeMessage(msg) + m.Union = &Oneof_F_Message{msg} + return true, err + case 16: // union.f_group + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Oneof_F_Group) + err := b.DecodeGroup(msg) + m.Union = &Oneof_FGroup{msg} + return true, err + case 536870911: // union.F_Largest_Tag + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Largest_Tag{int32(x)} + return true, err + case 100: // tormato.value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Tormato = &Oneof_Value{int32(x)} + return true, err + default: + return false, nil + } +} + +func _Oneof_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 + case *Oneof_F_Int32: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + n += proto.SizeVarint(4<<3 | proto.WireFixed32) + n += 4 + case *Oneof_F_Fixed64: + n += proto.SizeVarint(5<<3 | proto.WireFixed64) + n += 8 + case *Oneof_F_Uint32: + n += proto.SizeVarint(6<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + n += proto.SizeVarint(7<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + n += proto.SizeVarint(8<<3 | proto.WireFixed32) + n += 4 + case *Oneof_F_Double: + n += proto.SizeVarint(9<<3 | proto.WireFixed64) + n += 8 + case *Oneof_F_String: + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.F_String))) + n += len(x.F_String) + case *Oneof_F_Bytes: + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.F_Bytes))) + n += len(x.F_Bytes) + case *Oneof_F_Sint32: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31)))) + case *Oneof_F_Sint64: + n += proto.SizeVarint(13<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63)))) + case *Oneof_F_Enum: + n += proto.SizeVarint(14<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + s := proto.Size(x.F_Message) + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Oneof_FGroup: + n += proto.SizeVarint(16<<3 | proto.WireStartGroup) + n += proto.Size(x.FGroup) + n += proto.SizeVarint(16<<3 | proto.WireEndGroup) + case *Oneof_F_Largest_Tag: + n += proto.SizeVarint(536870911<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + n += proto.SizeVarint(100<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Value)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oneof_F_Group struct { + X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} } +func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) } +func (*Oneof_F_Group) ProtoMessage() {} +func (*Oneof_F_Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29, 0} } + +func (m *Oneof_F_Group) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Col + // *Communique_Msg + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} +func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Col struct { + Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"` +} +type Communique_Msg struct { + Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Col) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetCol() MyMessage_Color { + if x, ok := m.GetUnion().(*Communique_Col); ok { + return x.Col + } + return MyMessage_RED +} + +func (m *Communique) GetMsg() *Strings { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Col)(nil), + (*Communique_Msg)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Col: + b.EncodeVarint(9<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Col)) + case *Communique_Msg: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.col + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Col{MyMessage_Color(x)} + return true, err + case 10: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Strings) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += proto.SizeVarint(8<<3 | proto.WireFixed64) + n += 8 + case *Communique_Col: + n += proto.SizeVarint(9<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Col)) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "testdata.greeting", + Tag: "bytes,106,rep,name=greeting", + Filename: "test.proto", +} + +var E_Complex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: (*ComplexExtension)(nil), + Field: 200, + Name: "testdata.complex", + Tag: "bytes,200,opt,name=complex", + Filename: "test.proto", +} + +var E_RComplex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: ([]*ComplexExtension)(nil), + Field: 201, + Name: "testdata.r_complex", + Tag: "bytes,201,rep,name=r_complex,json=rComplex", + Filename: "test.proto", +} + +var E_NoDefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "testdata.no_default_double", + Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble", + Filename: "test.proto", +} + +var E_NoDefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 102, + Name: "testdata.no_default_float", + Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat", + Filename: "test.proto", +} + +var E_NoDefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 103, + Name: "testdata.no_default_int32", + Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32", + Filename: "test.proto", +} + +var E_NoDefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 104, + Name: "testdata.no_default_int64", + Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64", + Filename: "test.proto", +} + +var E_NoDefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 105, + Name: "testdata.no_default_uint32", + Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32", + Filename: "test.proto", +} + +var E_NoDefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 106, + Name: "testdata.no_default_uint64", + Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64", + Filename: "test.proto", +} + +var E_NoDefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 107, + Name: "testdata.no_default_sint32", + Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32", + Filename: "test.proto", +} + +var E_NoDefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 108, + Name: "testdata.no_default_sint64", + Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64", + Filename: "test.proto", +} + +var E_NoDefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 109, + Name: "testdata.no_default_fixed32", + Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32", + Filename: "test.proto", +} + +var E_NoDefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 110, + Name: "testdata.no_default_fixed64", + Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64", + Filename: "test.proto", +} + +var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 111, + Name: "testdata.no_default_sfixed32", + Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32", + Filename: "test.proto", +} + +var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 112, + Name: "testdata.no_default_sfixed64", + Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64", + Filename: "test.proto", +} + +var E_NoDefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 113, + Name: "testdata.no_default_bool", + Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool", + Filename: "test.proto", +} + +var E_NoDefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 114, + Name: "testdata.no_default_string", + Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString", + Filename: "test.proto", +} + +var E_NoDefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 115, + Name: "testdata.no_default_bytes", + Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes", + Filename: "test.proto", +} + +var E_NoDefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 116, + Name: "testdata.no_default_enum", + Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum", + Filename: "test.proto", +} + +var E_DefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 201, + Name: "testdata.default_double", + Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415", + Filename: "test.proto", +} + +var E_DefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 202, + Name: "testdata.default_float", + Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14", + Filename: "test.proto", +} + +var E_DefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 203, + Name: "testdata.default_int32", + Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42", + Filename: "test.proto", +} + +var E_DefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 204, + Name: "testdata.default_int64", + Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43", + Filename: "test.proto", +} + +var E_DefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 205, + Name: "testdata.default_uint32", + Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44", + Filename: "test.proto", +} + +var E_DefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 206, + Name: "testdata.default_uint64", + Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45", + Filename: "test.proto", +} + +var E_DefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 207, + Name: "testdata.default_sint32", + Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46", + Filename: "test.proto", +} + +var E_DefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 208, + Name: "testdata.default_sint64", + Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47", + Filename: "test.proto", +} + +var E_DefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 209, + Name: "testdata.default_fixed32", + Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48", + Filename: "test.proto", +} + +var E_DefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 210, + Name: "testdata.default_fixed64", + Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49", + Filename: "test.proto", +} + +var E_DefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 211, + Name: "testdata.default_sfixed32", + Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50", + Filename: "test.proto", +} + +var E_DefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 212, + Name: "testdata.default_sfixed64", + Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51", + Filename: "test.proto", +} + +var E_DefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 213, + Name: "testdata.default_bool", + Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1", + Filename: "test.proto", +} + +var E_DefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 214, + Name: "testdata.default_string", + Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string", + Filename: "test.proto", +} + +var E_DefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 215, + Name: "testdata.default_bytes", + Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes", + Filename: "test.proto", +} + +var E_DefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 216, + Name: "testdata.default_enum", + Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", + Filename: "test.proto", +} + +var E_X201 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 201, + Name: "testdata.x201", + Tag: "bytes,201,opt,name=x201", + Filename: "test.proto", +} + +var E_X202 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 202, + Name: "testdata.x202", + Tag: "bytes,202,opt,name=x202", + Filename: "test.proto", +} + +var E_X203 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 203, + Name: "testdata.x203", + Tag: "bytes,203,opt,name=x203", + Filename: "test.proto", +} + +var E_X204 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 204, + Name: "testdata.x204", + Tag: "bytes,204,opt,name=x204", + Filename: "test.proto", +} + +var E_X205 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 205, + Name: "testdata.x205", + Tag: "bytes,205,opt,name=x205", + Filename: "test.proto", +} + +var E_X206 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 206, + Name: "testdata.x206", + Tag: "bytes,206,opt,name=x206", + Filename: "test.proto", +} + +var E_X207 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 207, + Name: "testdata.x207", + Tag: "bytes,207,opt,name=x207", + Filename: "test.proto", +} + +var E_X208 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 208, + Name: "testdata.x208", + Tag: "bytes,208,opt,name=x208", + Filename: "test.proto", +} + +var E_X209 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 209, + Name: "testdata.x209", + Tag: "bytes,209,opt,name=x209", + Filename: "test.proto", +} + +var E_X210 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 210, + Name: "testdata.x210", + Tag: "bytes,210,opt,name=x210", + Filename: "test.proto", +} + +var E_X211 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 211, + Name: "testdata.x211", + Tag: "bytes,211,opt,name=x211", + Filename: "test.proto", +} + +var E_X212 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 212, + Name: "testdata.x212", + Tag: "bytes,212,opt,name=x212", + Filename: "test.proto", +} + +var E_X213 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 213, + Name: "testdata.x213", + Tag: "bytes,213,opt,name=x213", + Filename: "test.proto", +} + +var E_X214 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 214, + Name: "testdata.x214", + Tag: "bytes,214,opt,name=x214", + Filename: "test.proto", +} + +var E_X215 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 215, + Name: "testdata.x215", + Tag: "bytes,215,opt,name=x215", + Filename: "test.proto", +} + +var E_X216 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 216, + Name: "testdata.x216", + Tag: "bytes,216,opt,name=x216", + Filename: "test.proto", +} + +var E_X217 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 217, + Name: "testdata.x217", + Tag: "bytes,217,opt,name=x217", + Filename: "test.proto", +} + +var E_X218 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 218, + Name: "testdata.x218", + Tag: "bytes,218,opt,name=x218", + Filename: "test.proto", +} + +var E_X219 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 219, + Name: "testdata.x219", + Tag: "bytes,219,opt,name=x219", + Filename: "test.proto", +} + +var E_X220 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 220, + Name: "testdata.x220", + Tag: "bytes,220,opt,name=x220", + Filename: "test.proto", +} + +var E_X221 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 221, + Name: "testdata.x221", + Tag: "bytes,221,opt,name=x221", + Filename: "test.proto", +} + +var E_X222 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 222, + Name: "testdata.x222", + Tag: "bytes,222,opt,name=x222", + Filename: "test.proto", +} + +var E_X223 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 223, + Name: "testdata.x223", + Tag: "bytes,223,opt,name=x223", + Filename: "test.proto", +} + +var E_X224 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 224, + Name: "testdata.x224", + Tag: "bytes,224,opt,name=x224", + Filename: "test.proto", +} + +var E_X225 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 225, + Name: "testdata.x225", + Tag: "bytes,225,opt,name=x225", + Filename: "test.proto", +} + +var E_X226 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 226, + Name: "testdata.x226", + Tag: "bytes,226,opt,name=x226", + Filename: "test.proto", +} + +var E_X227 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 227, + Name: "testdata.x227", + Tag: "bytes,227,opt,name=x227", + Filename: "test.proto", +} + +var E_X228 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 228, + Name: "testdata.x228", + Tag: "bytes,228,opt,name=x228", + Filename: "test.proto", +} + +var E_X229 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 229, + Name: "testdata.x229", + Tag: "bytes,229,opt,name=x229", + Filename: "test.proto", +} + +var E_X230 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 230, + Name: "testdata.x230", + Tag: "bytes,230,opt,name=x230", + Filename: "test.proto", +} + +var E_X231 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 231, + Name: "testdata.x231", + Tag: "bytes,231,opt,name=x231", + Filename: "test.proto", +} + +var E_X232 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 232, + Name: "testdata.x232", + Tag: "bytes,232,opt,name=x232", + Filename: "test.proto", +} + +var E_X233 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 233, + Name: "testdata.x233", + Tag: "bytes,233,opt,name=x233", + Filename: "test.proto", +} + +var E_X234 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 234, + Name: "testdata.x234", + Tag: "bytes,234,opt,name=x234", + Filename: "test.proto", +} + +var E_X235 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 235, + Name: "testdata.x235", + Tag: "bytes,235,opt,name=x235", + Filename: "test.proto", +} + +var E_X236 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 236, + Name: "testdata.x236", + Tag: "bytes,236,opt,name=x236", + Filename: "test.proto", +} + +var E_X237 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 237, + Name: "testdata.x237", + Tag: "bytes,237,opt,name=x237", + Filename: "test.proto", +} + +var E_X238 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 238, + Name: "testdata.x238", + Tag: "bytes,238,opt,name=x238", + Filename: "test.proto", +} + +var E_X239 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 239, + Name: "testdata.x239", + Tag: "bytes,239,opt,name=x239", + Filename: "test.proto", +} + +var E_X240 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 240, + Name: "testdata.x240", + Tag: "bytes,240,opt,name=x240", + Filename: "test.proto", +} + +var E_X241 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 241, + Name: "testdata.x241", + Tag: "bytes,241,opt,name=x241", + Filename: "test.proto", +} + +var E_X242 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 242, + Name: "testdata.x242", + Tag: "bytes,242,opt,name=x242", + Filename: "test.proto", +} + +var E_X243 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 243, + Name: "testdata.x243", + Tag: "bytes,243,opt,name=x243", + Filename: "test.proto", +} + +var E_X244 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 244, + Name: "testdata.x244", + Tag: "bytes,244,opt,name=x244", + Filename: "test.proto", +} + +var E_X245 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 245, + Name: "testdata.x245", + Tag: "bytes,245,opt,name=x245", + Filename: "test.proto", +} + +var E_X246 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 246, + Name: "testdata.x246", + Tag: "bytes,246,opt,name=x246", + Filename: "test.proto", +} + +var E_X247 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 247, + Name: "testdata.x247", + Tag: "bytes,247,opt,name=x247", + Filename: "test.proto", +} + +var E_X248 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 248, + Name: "testdata.x248", + Tag: "bytes,248,opt,name=x248", + Filename: "test.proto", +} + +var E_X249 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 249, + Name: "testdata.x249", + Tag: "bytes,249,opt,name=x249", + Filename: "test.proto", +} + +var E_X250 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 250, + Name: "testdata.x250", + Tag: "bytes,250,opt,name=x250", + Filename: "test.proto", +} + +func init() { + proto.RegisterType((*GoEnum)(nil), "testdata.GoEnum") + proto.RegisterType((*GoTestField)(nil), "testdata.GoTestField") + proto.RegisterType((*GoTest)(nil), "testdata.GoTest") + proto.RegisterType((*GoTest_RequiredGroup)(nil), "testdata.GoTest.RequiredGroup") + proto.RegisterType((*GoTest_RepeatedGroup)(nil), "testdata.GoTest.RepeatedGroup") + proto.RegisterType((*GoTest_OptionalGroup)(nil), "testdata.GoTest.OptionalGroup") + proto.RegisterType((*GoTestRequiredGroupField)(nil), "testdata.GoTestRequiredGroupField") + proto.RegisterType((*GoTestRequiredGroupField_Group)(nil), "testdata.GoTestRequiredGroupField.Group") + proto.RegisterType((*GoSkipTest)(nil), "testdata.GoSkipTest") + proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "testdata.GoSkipTest.SkipGroup") + proto.RegisterType((*NonPackedTest)(nil), "testdata.NonPackedTest") + proto.RegisterType((*PackedTest)(nil), "testdata.PackedTest") + proto.RegisterType((*MaxTag)(nil), "testdata.MaxTag") + proto.RegisterType((*OldMessage)(nil), "testdata.OldMessage") + proto.RegisterType((*OldMessage_Nested)(nil), "testdata.OldMessage.Nested") + proto.RegisterType((*NewMessage)(nil), "testdata.NewMessage") + proto.RegisterType((*NewMessage_Nested)(nil), "testdata.NewMessage.Nested") + proto.RegisterType((*InnerMessage)(nil), "testdata.InnerMessage") + proto.RegisterType((*OtherMessage)(nil), "testdata.OtherMessage") + proto.RegisterType((*RequiredInnerMessage)(nil), "testdata.RequiredInnerMessage") + proto.RegisterType((*MyMessage)(nil), "testdata.MyMessage") + proto.RegisterType((*MyMessage_SomeGroup)(nil), "testdata.MyMessage.SomeGroup") + proto.RegisterType((*Ext)(nil), "testdata.Ext") + proto.RegisterType((*ComplexExtension)(nil), "testdata.ComplexExtension") + proto.RegisterType((*DefaultsMessage)(nil), "testdata.DefaultsMessage") + proto.RegisterType((*MyMessageSet)(nil), "testdata.MyMessageSet") + proto.RegisterType((*Empty)(nil), "testdata.Empty") + proto.RegisterType((*MessageList)(nil), "testdata.MessageList") + proto.RegisterType((*MessageList_Message)(nil), "testdata.MessageList.Message") + proto.RegisterType((*Strings)(nil), "testdata.Strings") + proto.RegisterType((*Defaults)(nil), "testdata.Defaults") + proto.RegisterType((*SubDefaults)(nil), "testdata.SubDefaults") + proto.RegisterType((*RepeatedEnum)(nil), "testdata.RepeatedEnum") + proto.RegisterType((*MoreRepeated)(nil), "testdata.MoreRepeated") + proto.RegisterType((*GroupOld)(nil), "testdata.GroupOld") + proto.RegisterType((*GroupOld_G)(nil), "testdata.GroupOld.G") + proto.RegisterType((*GroupNew)(nil), "testdata.GroupNew") + proto.RegisterType((*GroupNew_G)(nil), "testdata.GroupNew.G") + proto.RegisterType((*FloatingPoint)(nil), "testdata.FloatingPoint") + proto.RegisterType((*MessageWithMap)(nil), "testdata.MessageWithMap") + proto.RegisterType((*Oneof)(nil), "testdata.Oneof") + proto.RegisterType((*Oneof_F_Group)(nil), "testdata.Oneof.F_Group") + proto.RegisterType((*Communique)(nil), "testdata.Communique") + proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) + proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) + proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_Complex) + proto.RegisterExtension(E_RComplex) + proto.RegisterExtension(E_NoDefaultDouble) + proto.RegisterExtension(E_NoDefaultFloat) + proto.RegisterExtension(E_NoDefaultInt32) + proto.RegisterExtension(E_NoDefaultInt64) + proto.RegisterExtension(E_NoDefaultUint32) + proto.RegisterExtension(E_NoDefaultUint64) + proto.RegisterExtension(E_NoDefaultSint32) + proto.RegisterExtension(E_NoDefaultSint64) + proto.RegisterExtension(E_NoDefaultFixed32) + proto.RegisterExtension(E_NoDefaultFixed64) + proto.RegisterExtension(E_NoDefaultSfixed32) + proto.RegisterExtension(E_NoDefaultSfixed64) + proto.RegisterExtension(E_NoDefaultBool) + proto.RegisterExtension(E_NoDefaultString) + proto.RegisterExtension(E_NoDefaultBytes) + proto.RegisterExtension(E_NoDefaultEnum) + proto.RegisterExtension(E_DefaultDouble) + proto.RegisterExtension(E_DefaultFloat) + proto.RegisterExtension(E_DefaultInt32) + proto.RegisterExtension(E_DefaultInt64) + proto.RegisterExtension(E_DefaultUint32) + proto.RegisterExtension(E_DefaultUint64) + proto.RegisterExtension(E_DefaultSint32) + proto.RegisterExtension(E_DefaultSint64) + proto.RegisterExtension(E_DefaultFixed32) + proto.RegisterExtension(E_DefaultFixed64) + proto.RegisterExtension(E_DefaultSfixed32) + proto.RegisterExtension(E_DefaultSfixed64) + proto.RegisterExtension(E_DefaultBool) + proto.RegisterExtension(E_DefaultString) + proto.RegisterExtension(E_DefaultBytes) + proto.RegisterExtension(E_DefaultEnum) + proto.RegisterExtension(E_X201) + proto.RegisterExtension(E_X202) + proto.RegisterExtension(E_X203) + proto.RegisterExtension(E_X204) + proto.RegisterExtension(E_X205) + proto.RegisterExtension(E_X206) + proto.RegisterExtension(E_X207) + proto.RegisterExtension(E_X208) + proto.RegisterExtension(E_X209) + proto.RegisterExtension(E_X210) + proto.RegisterExtension(E_X211) + proto.RegisterExtension(E_X212) + proto.RegisterExtension(E_X213) + proto.RegisterExtension(E_X214) + proto.RegisterExtension(E_X215) + proto.RegisterExtension(E_X216) + proto.RegisterExtension(E_X217) + proto.RegisterExtension(E_X218) + proto.RegisterExtension(E_X219) + proto.RegisterExtension(E_X220) + proto.RegisterExtension(E_X221) + proto.RegisterExtension(E_X222) + proto.RegisterExtension(E_X223) + proto.RegisterExtension(E_X224) + proto.RegisterExtension(E_X225) + proto.RegisterExtension(E_X226) + proto.RegisterExtension(E_X227) + proto.RegisterExtension(E_X228) + proto.RegisterExtension(E_X229) + proto.RegisterExtension(E_X230) + proto.RegisterExtension(E_X231) + proto.RegisterExtension(E_X232) + proto.RegisterExtension(E_X233) + proto.RegisterExtension(E_X234) + proto.RegisterExtension(E_X235) + proto.RegisterExtension(E_X236) + proto.RegisterExtension(E_X237) + proto.RegisterExtension(E_X238) + proto.RegisterExtension(E_X239) + proto.RegisterExtension(E_X240) + proto.RegisterExtension(E_X241) + proto.RegisterExtension(E_X242) + proto.RegisterExtension(E_X243) + proto.RegisterExtension(E_X244) + proto.RegisterExtension(E_X245) + proto.RegisterExtension(E_X246) + proto.RegisterExtension(E_X247) + proto.RegisterExtension(E_X248) + proto.RegisterExtension(E_X249) + proto.RegisterExtension(E_X250) +} + +func init() { proto.RegisterFile("test.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 4453 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5a, 0xc9, 0x77, 0xdb, 0x48, + 0x7a, 0x37, 0xc0, 0xfd, 0x23, 0x25, 0x42, 0x65, 0xb5, 0x9b, 0x96, 0xbc, 0xc0, 0x9c, 0xe9, 0x6e, + 0x7a, 0xd3, 0x48, 0x20, 0x44, 0xdb, 0x74, 0xa7, 0xdf, 0xf3, 0x42, 0xca, 0x7a, 0x63, 0x89, 0x0a, + 0xa4, 0xee, 0x7e, 0xd3, 0x39, 0xf0, 0x51, 0x22, 0x44, 0xb3, 0x4d, 0x02, 0x34, 0x09, 0xc5, 0x52, + 0x72, 0xe9, 0x4b, 0x72, 0xcd, 0x76, 0xc9, 0x35, 0xa7, 0x9c, 0x92, 0xbc, 0x97, 0x7f, 0x22, 0xe9, + 0xee, 0x59, 0x7b, 0xd6, 0xac, 0x93, 0x7d, 0x99, 0xec, 0xdb, 0x4c, 0x92, 0x4b, 0xcf, 0xab, 0xaf, + 0x0a, 0x40, 0x01, 0x24, 0x20, 0xf9, 0x24, 0x56, 0xd5, 0xef, 0xf7, 0xd5, 0xf6, 0xab, 0xef, 0xab, + 0xaf, 0x20, 0x00, 0xc7, 0x9c, 0x38, 0x2b, 0xa3, 0xb1, 0xed, 0xd8, 0x24, 0x4b, 0x7f, 0x77, 0x3b, + 0x4e, 0xa7, 0x7c, 0x1d, 0xd2, 0x1b, 0x76, 0xc3, 0x3a, 0x1a, 0x92, 0xab, 0x90, 0x38, 0xb4, 0xed, + 0x92, 0xa4, 0xca, 0x95, 0x79, 0x6d, 0x6e, 0xc5, 0x45, 0xac, 0x34, 0x5b, 0x2d, 0x83, 0xb6, 0x94, + 0xef, 0x40, 0x7e, 0xc3, 0xde, 0x33, 0x27, 0x4e, 0xb3, 0x6f, 0x0e, 0xba, 0x64, 0x11, 0x52, 0x4f, + 0x3b, 0xfb, 0xe6, 0x00, 0x19, 0x39, 0x83, 0x15, 0x08, 0x81, 0xe4, 0xde, 0xc9, 0xc8, 0x2c, 0xc9, + 0x58, 0x89, 0xbf, 0xcb, 0xbf, 0x72, 0x85, 0x76, 0x42, 0x99, 0xe4, 0x3a, 0x24, 0xbf, 0xdc, 0xb7, + 0xba, 0xbc, 0x97, 0xd7, 0xfc, 0x5e, 0x58, 0xfb, 0xca, 0x97, 0x37, 0xb7, 0x1f, 0x1b, 0x08, 0xa1, + 0xf6, 0xf7, 0x3a, 0xfb, 0x03, 0x6a, 0x4a, 0xa2, 0xf6, 0xb1, 0x40, 0x6b, 0x77, 0x3a, 0xe3, 0xce, + 0xb0, 0x94, 0x50, 0xa5, 0x4a, 0xca, 0x60, 0x05, 0x72, 0x1f, 0xe6, 0x0c, 0xf3, 0xc5, 0x51, 0x7f, + 0x6c, 0x76, 0x71, 0x70, 0xa5, 0xa4, 0x2a, 0x57, 0xf2, 0xd3, 0xf6, 0xb1, 0xd1, 0x08, 0x62, 0x19, + 0x79, 0x64, 0x76, 0x1c, 0x97, 0x9c, 0x52, 0x13, 0xb1, 0x64, 0x01, 0x4b, 0xc9, 0xad, 0x91, 0xd3, + 0xb7, 0xad, 0xce, 0x80, 0x91, 0xd3, 0xaa, 0x14, 0x43, 0x0e, 0x60, 0xc9, 0x9b, 0x50, 0x6c, 0xb6, + 0x1f, 0xda, 0xf6, 0xa0, 0x3d, 0xe6, 0x23, 0x2a, 0x81, 0x2a, 0x57, 0xb2, 0xc6, 0x5c, 0x93, 0xd6, + 0xba, 0xc3, 0x24, 0x15, 0x50, 0x9a, 0xed, 0x4d, 0xcb, 0xa9, 0x6a, 0x3e, 0x30, 0xaf, 0xca, 0x95, + 0x94, 0x31, 0xdf, 0xc4, 0xea, 0x29, 0x64, 0x4d, 0xf7, 0x91, 0x05, 0x55, 0xae, 0x24, 0x18, 0xb2, + 0xa6, 0x7b, 0xc8, 0x5b, 0x40, 0x9a, 0xed, 0x66, 0xff, 0xd8, 0xec, 0x8a, 0x56, 0xe7, 0x54, 0xb9, + 0x92, 0x31, 0x94, 0x26, 0x6f, 0x98, 0x81, 0x16, 0x2d, 0xcf, 0xab, 0x72, 0x25, 0xed, 0xa2, 0x05, + 0xdb, 0x37, 0x60, 0xa1, 0xd9, 0x7e, 0xb7, 0x1f, 0x1c, 0x70, 0x51, 0x95, 0x2b, 0x73, 0x46, 0xb1, + 0xc9, 0xea, 0xa7, 0xb1, 0xa2, 0x61, 0x45, 0x95, 0x2b, 0x49, 0x8e, 0x15, 0xec, 0xe2, 0xec, 0x9a, + 0x03, 0xbb, 0xe3, 0xf8, 0xd0, 0x05, 0x55, 0xae, 0xc8, 0xc6, 0x7c, 0x13, 0xab, 0x83, 0x56, 0x1f, + 0xdb, 0x47, 0xfb, 0x03, 0xd3, 0x87, 0x12, 0x55, 0xae, 0x48, 0x46, 0xb1, 0xc9, 0xea, 0x83, 0xd8, + 0x5d, 0x67, 0xdc, 0xb7, 0x7a, 0x3e, 0xf6, 0x3c, 0xea, 0xb7, 0xd8, 0x64, 0xf5, 0xc1, 0x11, 0x3c, + 0x3c, 0x71, 0xcc, 0x89, 0x0f, 0x35, 0x55, 0xb9, 0x52, 0x30, 0xe6, 0x9b, 0x58, 0x1d, 0xb2, 0x1a, + 0x5a, 0x83, 0x43, 0x55, 0xae, 0x2c, 0x50, 0xab, 0x33, 0xd6, 0x60, 0x37, 0xb4, 0x06, 0x3d, 0x55, + 0xae, 0x10, 0x8e, 0x15, 0xd6, 0x40, 0xd4, 0x0c, 0x13, 0x62, 0x69, 0x51, 0x4d, 0x08, 0x9a, 0x61, + 0x95, 0x41, 0xcd, 0x70, 0xe0, 0x6b, 0x6a, 0x42, 0xd4, 0x4c, 0x08, 0x89, 0x9d, 0x73, 0xe4, 0x05, + 0x35, 0x21, 0x6a, 0x86, 0x23, 0x43, 0x9a, 0xe1, 0xd8, 0xd7, 0xd5, 0x44, 0x50, 0x33, 0x53, 0x68, + 0xd1, 0x72, 0x49, 0x4d, 0x04, 0x35, 0xc3, 0xd1, 0x41, 0xcd, 0x70, 0xf0, 0x45, 0x35, 0x11, 0xd0, + 0x4c, 0x18, 0x2b, 0x1a, 0x5e, 0x52, 0x13, 0x01, 0xcd, 0x88, 0xb3, 0x73, 0x35, 0xc3, 0xa1, 0xcb, + 0x6a, 0x42, 0xd4, 0x8c, 0x68, 0xd5, 0xd3, 0x0c, 0x87, 0x5e, 0x52, 0x13, 0x01, 0xcd, 0x88, 0x58, + 0x4f, 0x33, 0x1c, 0x7b, 0x59, 0x4d, 0x04, 0x34, 0xc3, 0xb1, 0xd7, 0x45, 0xcd, 0x70, 0xe8, 0xc7, + 0x92, 0x9a, 0x10, 0x45, 0xc3, 0xa1, 0x37, 0x03, 0xa2, 0xe1, 0xd8, 0x4f, 0x28, 0x56, 0x54, 0x4d, + 0x18, 0x2c, 0xae, 0xc2, 0xa7, 0x14, 0x2c, 0xca, 0x86, 0x83, 0x7d, 0xd9, 0xd8, 0xdc, 0x05, 0x95, + 0xae, 0xa8, 0x92, 0x27, 0x1b, 0xd7, 0x2f, 0x89, 0xb2, 0xf1, 0x80, 0x57, 0xd1, 0xd5, 0x72, 0xd9, + 0x4c, 0x21, 0x6b, 0xba, 0x8f, 0x54, 0x55, 0xc9, 0x97, 0x8d, 0x87, 0x0c, 0xc8, 0xc6, 0xc3, 0x5e, + 0x53, 0x25, 0x51, 0x36, 0x33, 0xd0, 0xa2, 0xe5, 0xb2, 0x2a, 0x89, 0xb2, 0xf1, 0xd0, 0xa2, 0x6c, + 0x3c, 0xf0, 0x17, 0x54, 0x49, 0x90, 0xcd, 0x34, 0x56, 0x34, 0xfc, 0x45, 0x55, 0x12, 0x64, 0x13, + 0x9c, 0x1d, 0x93, 0x8d, 0x07, 0x7d, 0x43, 0x95, 0x7c, 0xd9, 0x04, 0xad, 0x72, 0xd9, 0x78, 0xd0, + 0x37, 0x55, 0x49, 0x90, 0x4d, 0x10, 0xcb, 0x65, 0xe3, 0x61, 0xdf, 0xc2, 0xf8, 0xe6, 0xca, 0xc6, + 0xc3, 0x0a, 0xb2, 0xf1, 0xa0, 0xbf, 0x43, 0x63, 0xa1, 0x27, 0x1b, 0x0f, 0x2a, 0xca, 0xc6, 0xc3, + 0xfe, 0x2e, 0xc5, 0xfa, 0xb2, 0x99, 0x06, 0x8b, 0xab, 0xf0, 0x7b, 0x14, 0xec, 0xcb, 0xc6, 0x03, + 0xaf, 0xe0, 0x20, 0xa8, 0x6c, 0xba, 0xe6, 0x61, 0xe7, 0x68, 0x40, 0x25, 0x56, 0xa1, 0xba, 0xa9, + 0x27, 0x9d, 0xf1, 0x91, 0x49, 0x47, 0x62, 0xdb, 0x83, 0xc7, 0x6e, 0x1b, 0x59, 0xa1, 0xc6, 0x99, + 0x7c, 0x7c, 0xc2, 0x75, 0xaa, 0x9f, 0xba, 0x5c, 0xd5, 0x8c, 0x22, 0xd3, 0xd0, 0x34, 0xbe, 0xa6, + 0x0b, 0xf8, 0x1b, 0x54, 0x45, 0x75, 0xb9, 0xa6, 0x33, 0x7c, 0x4d, 0xf7, 0xf1, 0x55, 0x38, 0xef, + 0x4b, 0xc9, 0x67, 0xdc, 0xa4, 0x5a, 0xaa, 0x27, 0xaa, 0xda, 0xaa, 0xb1, 0xe0, 0x0a, 0x6a, 0x16, + 0x29, 0xd0, 0xcd, 0x2d, 0x2a, 0xa9, 0x7a, 0xa2, 0xa6, 0x7b, 0x24, 0xb1, 0x27, 0x8d, 0xca, 0x90, + 0x0b, 0xcb, 0xe7, 0xdc, 0xa6, 0xca, 0xaa, 0x27, 0xab, 0xda, 0xea, 0xaa, 0xa1, 0x70, 0x7d, 0xcd, + 0xe0, 0x04, 0xfa, 0x59, 0xa1, 0x0a, 0xab, 0x27, 0x6b, 0xba, 0xc7, 0x09, 0xf6, 0xb3, 0xe0, 0x0a, + 0xcd, 0xa7, 0x7c, 0x89, 0x2a, 0xad, 0x9e, 0xae, 0xae, 0xe9, 0x6b, 0xeb, 0xf7, 0x8c, 0x22, 0x53, + 0x9c, 0xcf, 0xd1, 0x69, 0x3f, 0x5c, 0x72, 0x3e, 0x69, 0x95, 0x6a, 0xae, 0x9e, 0xd6, 0xee, 0xac, + 0xdd, 0xd5, 0xee, 0x1a, 0x0a, 0xd7, 0x9e, 0xcf, 0x7a, 0x87, 0xb2, 0xb8, 0xf8, 0x7c, 0xd6, 0x1a, + 0x55, 0x5f, 0x5d, 0x79, 0x66, 0x0e, 0x06, 0xf6, 0x2d, 0xb5, 0xfc, 0xd2, 0x1e, 0x0f, 0xba, 0xd7, + 0xca, 0x60, 0x28, 0x5c, 0x8f, 0x62, 0xaf, 0x0b, 0xae, 0x20, 0x7d, 0xfa, 0xaf, 0xd1, 0x7b, 0x58, + 0xa1, 0x9e, 0x79, 0xd8, 0xef, 0x59, 0xf6, 0xc4, 0x34, 0x8a, 0x4c, 0x9a, 0xa1, 0x35, 0xd9, 0x0d, + 0xaf, 0xe3, 0xaf, 0x53, 0xda, 0x42, 0x3d, 0x71, 0xbb, 0xaa, 0xd1, 0x9e, 0x66, 0xad, 0xe3, 0x6e, + 0x78, 0x1d, 0x7f, 0x83, 0x72, 0x48, 0x3d, 0x71, 0xbb, 0xa6, 0x73, 0x8e, 0xb8, 0x8e, 0x77, 0xe0, + 0x42, 0x28, 0x2e, 0xb6, 0x47, 0x9d, 0x83, 0xe7, 0x66, 0xb7, 0xa4, 0xd1, 0xf0, 0xf8, 0x50, 0x56, + 0x24, 0xe3, 0x7c, 0x20, 0x44, 0xee, 0x60, 0x33, 0xb9, 0x07, 0xaf, 0x87, 0x03, 0xa5, 0xcb, 0xac, + 0xd2, 0x78, 0x89, 0xcc, 0xc5, 0x60, 0xcc, 0x0c, 0x51, 0x05, 0x07, 0xec, 0x52, 0x75, 0x1a, 0x40, + 0x7d, 0xaa, 0xef, 0x89, 0x39, 0xf5, 0x67, 0xe0, 0xe2, 0x74, 0x28, 0x75, 0xc9, 0xeb, 0x34, 0xa2, + 0x22, 0xf9, 0x42, 0x38, 0xaa, 0x4e, 0xd1, 0x67, 0xf4, 0x5d, 0xa3, 0x21, 0x56, 0xa4, 0x4f, 0xf5, + 0x7e, 0x1f, 0x4a, 0x53, 0xc1, 0xd6, 0x65, 0xdf, 0xa1, 0x31, 0x17, 0xd9, 0xaf, 0x85, 0xe2, 0x6e, + 0x98, 0x3c, 0xa3, 0xeb, 0xbb, 0x34, 0x08, 0x0b, 0xe4, 0xa9, 0x9e, 0x71, 0xc9, 0x82, 0xe1, 0xd8, + 0xe5, 0xde, 0xa3, 0x51, 0x99, 0x2f, 0x59, 0x20, 0x32, 0x8b, 0xfd, 0x86, 0xe2, 0xb3, 0xcb, 0xad, + 0xd3, 0x30, 0xcd, 0xfb, 0x0d, 0x86, 0x6a, 0x4e, 0x7e, 0x9b, 0x92, 0x77, 0x67, 0xcf, 0xf8, 0xc7, + 0x09, 0x1a, 0x60, 0x39, 0x7b, 0x77, 0xd6, 0x94, 0x3d, 0xf6, 0x8c, 0x29, 0xff, 0x84, 0xb2, 0x89, + 0xc0, 0x9e, 0x9a, 0xf3, 0x63, 0x98, 0x73, 0x6f, 0x75, 0xbd, 0xb1, 0x7d, 0x34, 0x2a, 0x35, 0x55, + 0xb9, 0x02, 0xda, 0x95, 0xa9, 0xec, 0xc7, 0xbd, 0xe4, 0x6d, 0x50, 0x94, 0x11, 0x24, 0x31, 0x2b, + 0xcc, 0x2e, 0xb3, 0xb2, 0xa3, 0x26, 0x22, 0xac, 0x30, 0x94, 0x67, 0x45, 0x20, 0x51, 0x2b, 0xae, + 0xd3, 0x67, 0x56, 0x3e, 0x50, 0xa5, 0x99, 0x56, 0xdc, 0x10, 0xc0, 0xad, 0x04, 0x48, 0x4b, 0xeb, + 0x7e, 0xbe, 0x85, 0xed, 0xe4, 0x8b, 0xe1, 0x04, 0x6c, 0x03, 0xef, 0xcf, 0xc1, 0x4a, 0x46, 0x13, + 0x06, 0x37, 0x4d, 0xfb, 0xd9, 0x08, 0x5a, 0x60, 0x34, 0xd3, 0xb4, 0x9f, 0x9b, 0x41, 0x2b, 0xff, + 0xa6, 0x04, 0x49, 0x9a, 0x4f, 0x92, 0x2c, 0x24, 0xdf, 0x6b, 0x6d, 0x3e, 0x56, 0xce, 0xd1, 0x5f, + 0x0f, 0x5b, 0xad, 0xa7, 0x8a, 0x44, 0x72, 0x90, 0x7a, 0xf8, 0x95, 0xbd, 0xc6, 0xae, 0x22, 0x93, + 0x22, 0xe4, 0x9b, 0x9b, 0xdb, 0x1b, 0x0d, 0x63, 0xc7, 0xd8, 0xdc, 0xde, 0x53, 0x12, 0xb4, 0xad, + 0xf9, 0xb4, 0xf5, 0x60, 0x4f, 0x49, 0x92, 0x0c, 0x24, 0x68, 0x5d, 0x8a, 0x00, 0xa4, 0x77, 0xf7, + 0x8c, 0xcd, 0xed, 0x0d, 0x25, 0x4d, 0xad, 0xec, 0x6d, 0x6e, 0x35, 0x94, 0x0c, 0x45, 0xee, 0xbd, + 0xbb, 0xf3, 0xb4, 0xa1, 0x64, 0xe9, 0xcf, 0x07, 0x86, 0xf1, 0xe0, 0x2b, 0x4a, 0x8e, 0x92, 0xb6, + 0x1e, 0xec, 0x28, 0x80, 0xcd, 0x0f, 0x1e, 0x3e, 0x6d, 0x28, 0x79, 0x52, 0x80, 0x6c, 0xf3, 0xdd, + 0xed, 0x47, 0x7b, 0x9b, 0xad, 0x6d, 0xa5, 0x50, 0x3e, 0x81, 0x12, 0x5b, 0xe6, 0xc0, 0x2a, 0xb2, + 0xa4, 0xf0, 0x1d, 0x48, 0xb1, 0x9d, 0x91, 0x50, 0x25, 0x95, 0xf0, 0xce, 0x4c, 0x53, 0x56, 0xd8, + 0x1e, 0x31, 0xda, 0xd2, 0x65, 0x48, 0xb1, 0x55, 0x5a, 0x84, 0x14, 0x5b, 0x1d, 0x19, 0x53, 0x45, + 0x56, 0x28, 0xff, 0x96, 0x0c, 0xb0, 0x61, 0xef, 0x3e, 0xef, 0x8f, 0x30, 0x21, 0xbf, 0x0c, 0x30, + 0x79, 0xde, 0x1f, 0xb5, 0x51, 0xf5, 0x3c, 0xa9, 0xcc, 0xd1, 0x1a, 0xf4, 0x77, 0xe4, 0x1a, 0x14, + 0xb0, 0xf9, 0x90, 0x79, 0x21, 0xcc, 0x25, 0x33, 0x46, 0x9e, 0xd6, 0x71, 0xc7, 0x14, 0x84, 0xd4, + 0x74, 0x4c, 0x21, 0xd3, 0x02, 0xa4, 0xa6, 0x93, 0xab, 0x80, 0xc5, 0xf6, 0x04, 0x23, 0x0a, 0xa6, + 0x8d, 0x39, 0x03, 0xfb, 0x65, 0x31, 0x86, 0xbc, 0x0d, 0xd8, 0x27, 0x9b, 0x77, 0x71, 0xfa, 0x74, + 0xb8, 0xc3, 0x5d, 0xa1, 0x3f, 0xd8, 0x6c, 0x7d, 0xc2, 0x52, 0x0b, 0x72, 0x5e, 0x3d, 0xed, 0x0b, + 0x6b, 0xf9, 0x8c, 0x14, 0x9c, 0x11, 0x60, 0x95, 0x37, 0x25, 0x06, 0xe0, 0xa3, 0x59, 0xc0, 0xd1, + 0x30, 0x12, 0x1b, 0x4e, 0xf9, 0x32, 0xcc, 0x6d, 0xdb, 0x16, 0x3b, 0xbd, 0xb8, 0x4a, 0x05, 0x90, + 0x3a, 0x25, 0x09, 0xb3, 0x27, 0xa9, 0x53, 0xbe, 0x02, 0x20, 0xb4, 0x29, 0x20, 0xed, 0xb3, 0x36, + 0xf4, 0x01, 0xd2, 0x7e, 0xf9, 0x26, 0xa4, 0xb7, 0x3a, 0xc7, 0x7b, 0x9d, 0x1e, 0xb9, 0x06, 0x30, + 0xe8, 0x4c, 0x9c, 0xf6, 0x21, 0xee, 0xc3, 0xe7, 0x9f, 0x7f, 0xfe, 0xb9, 0x84, 0x97, 0xbd, 0x1c, + 0xad, 0x65, 0xfb, 0xf1, 0x02, 0xa0, 0x35, 0xe8, 0x6e, 0x99, 0x93, 0x49, 0xa7, 0x67, 0x92, 0x2a, + 0xa4, 0x2d, 0x73, 0x42, 0xa3, 0x9d, 0x84, 0xef, 0x08, 0xcb, 0xfe, 0x2a, 0xf8, 0xa8, 0x95, 0x6d, + 0x84, 0x18, 0x1c, 0x4a, 0x14, 0x48, 0x58, 0x47, 0x43, 0x7c, 0x27, 0x49, 0x19, 0xf4, 0xe7, 0xd2, + 0x25, 0x48, 0x33, 0x0c, 0x21, 0x90, 0xb4, 0x3a, 0x43, 0xb3, 0xc4, 0xfa, 0xc5, 0xdf, 0xe5, 0x5f, + 0x95, 0x00, 0xb6, 0xcd, 0x97, 0x67, 0xe8, 0xd3, 0x47, 0xc5, 0xf4, 0x99, 0x60, 0x7d, 0xde, 0x8f, + 0xeb, 0x93, 0xea, 0xec, 0xd0, 0xb6, 0xbb, 0x6d, 0xb6, 0xc5, 0xec, 0x49, 0x27, 0x47, 0x6b, 0x70, + 0xd7, 0xca, 0x1f, 0x40, 0x61, 0xd3, 0xb2, 0xcc, 0xb1, 0x3b, 0x26, 0x02, 0xc9, 0x67, 0xf6, 0xc4, + 0xe1, 0x6f, 0x4b, 0xf8, 0x9b, 0x94, 0x20, 0x39, 0xb2, 0xc7, 0x0e, 0x9b, 0x67, 0x3d, 0xa9, 0xaf, + 0xae, 0xae, 0x1a, 0x58, 0x43, 0x2e, 0x41, 0xee, 0xc0, 0xb6, 0x2c, 0xf3, 0x80, 0x4e, 0x22, 0x81, + 0x69, 0x8d, 0x5f, 0x51, 0xfe, 0x65, 0x09, 0x0a, 0x2d, 0xe7, 0x99, 0x6f, 0x5c, 0x81, 0xc4, 0x73, + 0xf3, 0x04, 0x87, 0x97, 0x30, 0xe8, 0x4f, 0x7a, 0x54, 0x7e, 0xbe, 0x33, 0x38, 0x62, 0x6f, 0x4d, + 0x05, 0x83, 0x15, 0xc8, 0x05, 0x48, 0xbf, 0x34, 0xfb, 0xbd, 0x67, 0x0e, 0xda, 0x94, 0x0d, 0x5e, + 0x22, 0xb7, 0x20, 0xd5, 0xa7, 0x83, 0x2d, 0x25, 0x71, 0xbd, 0x2e, 0xf8, 0xeb, 0x25, 0xce, 0xc1, + 0x60, 0xa0, 0x1b, 0xd9, 0x6c, 0x57, 0xf9, 0xe8, 0xa3, 0x8f, 0x3e, 0x92, 0xcb, 0x87, 0xb0, 0xe8, + 0x1e, 0xde, 0xc0, 0x64, 0xb7, 0xa1, 0x34, 0x30, 0xed, 0xf6, 0x61, 0xdf, 0xea, 0x0c, 0x06, 0x27, + 0xed, 0x97, 0xb6, 0xd5, 0xee, 0x58, 0x6d, 0x7b, 0x72, 0xd0, 0x19, 0xe3, 0x02, 0x44, 0x77, 0xb1, + 0x38, 0x30, 0xed, 0x26, 0xa3, 0xbd, 0x6f, 0x5b, 0x0f, 0xac, 0x16, 0xe5, 0x94, 0xff, 0x20, 0x09, + 0xb9, 0xad, 0x13, 0xd7, 0xfa, 0x22, 0xa4, 0x0e, 0xec, 0x23, 0x8b, 0xad, 0x65, 0xca, 0x60, 0x05, + 0x6f, 0x8f, 0x64, 0x61, 0x8f, 0x16, 0x21, 0xf5, 0xe2, 0xc8, 0x76, 0x4c, 0x9c, 0x6e, 0xce, 0x60, + 0x05, 0xba, 0x5a, 0x23, 0xd3, 0x29, 0x25, 0x31, 0xb9, 0xa5, 0x3f, 0xfd, 0xf9, 0xa7, 0xce, 0x30, + 0x7f, 0xb2, 0x02, 0x69, 0x9b, 0xae, 0xfe, 0xa4, 0x94, 0xc6, 0x77, 0x35, 0x01, 0x2e, 0xee, 0x8a, + 0xc1, 0x51, 0x64, 0x13, 0x16, 0x5e, 0x9a, 0xed, 0xe1, 0xd1, 0xc4, 0x69, 0xf7, 0xec, 0x76, 0xd7, + 0x34, 0x47, 0xe6, 0xb8, 0x34, 0x87, 0x3d, 0x09, 0x3e, 0x61, 0xd6, 0x42, 0x1a, 0xf3, 0x2f, 0xcd, + 0xad, 0xa3, 0x89, 0xb3, 0x61, 0x3f, 0x46, 0x16, 0xa9, 0x42, 0x6e, 0x6c, 0x52, 0x4f, 0x40, 0x07, + 0x5b, 0x08, 0xf7, 0x1e, 0xa0, 0x66, 0xc7, 0xe6, 0x08, 0x2b, 0xc8, 0x3a, 0x64, 0xf7, 0xfb, 0xcf, + 0xcd, 0xc9, 0x33, 0xb3, 0x5b, 0xca, 0xa8, 0x52, 0x65, 0x5e, 0xbb, 0xe8, 0x73, 0xbc, 0x65, 0x5d, + 0x79, 0x64, 0x0f, 0xec, 0xb1, 0xe1, 0x41, 0xc9, 0x7d, 0xc8, 0x4d, 0xec, 0xa1, 0xc9, 0xf4, 0x9d, + 0xc5, 0xa0, 0x7a, 0x79, 0x16, 0x6f, 0xd7, 0x1e, 0x9a, 0xae, 0x07, 0x73, 0xf1, 0x64, 0x99, 0x0d, + 0x74, 0x9f, 0x5e, 0x9d, 0x4b, 0x80, 0x4f, 0x03, 0x74, 0x40, 0x78, 0x95, 0x26, 0x4b, 0x74, 0x40, + 0xbd, 0x43, 0x7a, 0x23, 0x2a, 0xe5, 0x31, 0xaf, 0xf4, 0xca, 0x4b, 0xb7, 0x20, 0xe7, 0x19, 0xf4, + 0x5d, 0x1f, 0x73, 0x37, 0x39, 0xf4, 0x07, 0xcc, 0xf5, 0x31, 0x5f, 0xf3, 0x06, 0xa4, 0x70, 0xd8, + 0x34, 0x42, 0x19, 0x0d, 0x1a, 0x10, 0x73, 0x90, 0xda, 0x30, 0x1a, 0x8d, 0x6d, 0x45, 0xc2, 0xd8, + 0xf8, 0xf4, 0xdd, 0x86, 0x22, 0x0b, 0x8a, 0xfd, 0x6d, 0x09, 0x12, 0x8d, 0x63, 0x54, 0x0b, 0x9d, + 0x86, 0x7b, 0xa2, 0xe9, 0x6f, 0xad, 0x06, 0xc9, 0xa1, 0x3d, 0x36, 0xc9, 0xf9, 0x19, 0xb3, 0x2c, + 0xf5, 0x70, 0xbf, 0x84, 0x57, 0xe4, 0xc6, 0xb1, 0x63, 0x20, 0x5e, 0x7b, 0x0b, 0x92, 0x8e, 0x79, + 0xec, 0xcc, 0xe6, 0x3d, 0x63, 0x1d, 0x50, 0x80, 0x76, 0x13, 0xd2, 0xd6, 0xd1, 0x70, 0xdf, 0x1c, + 0xcf, 0x86, 0xf6, 0x71, 0x7a, 0x1c, 0x52, 0x7e, 0x0f, 0x94, 0x47, 0xf6, 0x70, 0x34, 0x30, 0x8f, + 0x1b, 0xc7, 0x8e, 0x69, 0x4d, 0xfa, 0xb6, 0x45, 0xf5, 0x7c, 0xd8, 0x1f, 0xa3, 0x17, 0xc1, 0xb7, + 0x62, 0x2c, 0xd0, 0x53, 0x3d, 0x31, 0x0f, 0x6c, 0xab, 0xcb, 0x1d, 0x26, 0x2f, 0x51, 0xb4, 0xf3, + 0xac, 0x3f, 0xa6, 0x0e, 0x84, 0xfa, 0x79, 0x56, 0x28, 0x6f, 0x40, 0x91, 0xe7, 0x18, 0x13, 0xde, + 0x71, 0xf9, 0x06, 0x14, 0xdc, 0x2a, 0x7c, 0x38, 0xcf, 0x42, 0xf2, 0x83, 0x86, 0xd1, 0x52, 0xce, + 0xd1, 0x65, 0x6d, 0x6d, 0x37, 0x14, 0x89, 0xfe, 0xd8, 0x7b, 0xbf, 0x15, 0x58, 0xca, 0x4b, 0x50, + 0xf0, 0xc6, 0xbe, 0x6b, 0x3a, 0xd8, 0x42, 0x03, 0x42, 0xa6, 0x2e, 0x67, 0xa5, 0x72, 0x06, 0x52, + 0x8d, 0xe1, 0xc8, 0x39, 0x29, 0xff, 0x22, 0xe4, 0x39, 0xe8, 0x69, 0x7f, 0xe2, 0x90, 0x3b, 0x90, + 0x19, 0xf2, 0xf9, 0x4a, 0x78, 0xdd, 0x13, 0x35, 0xe5, 0xe3, 0xdc, 0xdf, 0x86, 0x8b, 0x5e, 0xaa, + 0x42, 0x46, 0xf0, 0xa5, 0xfc, 0xa8, 0xcb, 0xe2, 0x51, 0x67, 0x4e, 0x21, 0x21, 0x38, 0x85, 0xf2, + 0x16, 0x64, 0x58, 0x04, 0x9c, 0x60, 0x54, 0x67, 0xa9, 0x22, 0x13, 0x13, 0xdb, 0xf9, 0x3c, 0xab, + 0x63, 0x17, 0x95, 0xab, 0x90, 0x47, 0xc1, 0x72, 0x04, 0x73, 0x9d, 0x80, 0x55, 0x4c, 0x6e, 0xbf, + 0x9f, 0x82, 0xac, 0xbb, 0x52, 0x64, 0x19, 0xd2, 0x2c, 0x3f, 0x43, 0x53, 0xee, 0xfb, 0x41, 0x0a, + 0x33, 0x32, 0xb2, 0x0c, 0x19, 0x9e, 0x83, 0x71, 0xef, 0x2e, 0x57, 0x35, 0x23, 0xcd, 0x72, 0x2e, + 0xaf, 0xb1, 0xa6, 0xa3, 0x63, 0x62, 0x2f, 0x03, 0x69, 0x96, 0x55, 0x11, 0x15, 0x72, 0x5e, 0x1e, + 0x85, 0xfe, 0x98, 0x3f, 0x03, 0x64, 0xdd, 0xc4, 0x49, 0x40, 0xd4, 0x74, 0xf4, 0x58, 0x3c, 0xe7, + 0xcf, 0x36, 0xfd, 0xeb, 0x49, 0xd6, 0xcd, 0x86, 0xf0, 0xf9, 0xde, 0x4d, 0xf0, 0x33, 0x3c, 0xff, + 0xf1, 0x01, 0x35, 0x1d, 0x5d, 0x82, 0x9b, 0xcd, 0x67, 0x78, 0x8e, 0x43, 0xae, 0xd2, 0x21, 0x62, + 0xce, 0x82, 0x47, 0xdf, 0x4f, 0xdd, 0xd3, 0x2c, 0x93, 0x21, 0xd7, 0xa8, 0x05, 0x96, 0x98, 0xe0, + 0xb9, 0xf4, 0xf3, 0xf4, 0x0c, 0xcf, 0x57, 0xc8, 0x4d, 0x0a, 0x61, 0xcb, 0x5f, 0x82, 0x88, 0xa4, + 0x3c, 0xc3, 0x93, 0x72, 0xa2, 0xd2, 0x0e, 0xd1, 0x3d, 0xa0, 0x4b, 0x10, 0x12, 0xf0, 0x34, 0x4b, + 0xc0, 0xc9, 0x15, 0x34, 0xc7, 0x26, 0x55, 0xf0, 0x93, 0xed, 0x0c, 0x4f, 0x70, 0xfc, 0x76, 0xbc, + 0xb2, 0x79, 0x89, 0x75, 0x86, 0xa7, 0x30, 0xa4, 0x46, 0xf7, 0x8b, 0xea, 0xbb, 0x34, 0x8f, 0x4e, + 0xb0, 0xe4, 0x0b, 0xcf, 0xdd, 0x53, 0xe6, 0x03, 0xeb, 0xcc, 0x83, 0x18, 0xa9, 0x26, 0x9e, 0x86, + 0x25, 0xca, 0xdb, 0xe9, 0x5b, 0x87, 0xa5, 0x22, 0xae, 0x44, 0xa2, 0x6f, 0x1d, 0x1a, 0xa9, 0x26, + 0xad, 0x61, 0x1a, 0xd8, 0xa6, 0x6d, 0x0a, 0xb6, 0x25, 0x6f, 0xb3, 0x46, 0x5a, 0x45, 0x4a, 0x90, + 0x6a, 0xb6, 0xb7, 0x3b, 0x56, 0x69, 0x81, 0xf1, 0xac, 0x8e, 0x65, 0x24, 0x9b, 0xdb, 0x1d, 0x8b, + 0xbc, 0x05, 0x89, 0xc9, 0xd1, 0x7e, 0x89, 0x84, 0xbf, 0xac, 0xec, 0x1e, 0xed, 0xbb, 0x43, 0x31, + 0x28, 0x82, 0x2c, 0x43, 0x76, 0xe2, 0x8c, 0xdb, 0xbf, 0x60, 0x8e, 0xed, 0xd2, 0x79, 0x5c, 0xc2, + 0x73, 0x46, 0x66, 0xe2, 0x8c, 0x3f, 0x30, 0xc7, 0xf6, 0x19, 0x9d, 0x5f, 0xf9, 0x0a, 0xe4, 0x05, + 0xbb, 0xa4, 0x08, 0x92, 0xc5, 0x6e, 0x0a, 0x75, 0xe9, 0x8e, 0x21, 0x59, 0xe5, 0x3d, 0x28, 0xb8, + 0x39, 0x0c, 0xce, 0x57, 0xa3, 0x27, 0x69, 0x60, 0x8f, 0xf1, 0x7c, 0xce, 0x6b, 0x97, 0xc4, 0x10, + 0xe5, 0xc3, 0x78, 0xb8, 0x60, 0xd0, 0xb2, 0x12, 0x1a, 0x8a, 0x54, 0xfe, 0xa1, 0x04, 0x85, 0x2d, + 0x7b, 0xec, 0x3f, 0x30, 0x2f, 0x42, 0x6a, 0xdf, 0xb6, 0x07, 0x13, 0x34, 0x9b, 0x35, 0x58, 0x81, + 0xbc, 0x01, 0x05, 0xfc, 0xe1, 0xe6, 0x9e, 0xb2, 0xf7, 0xb4, 0x91, 0xc7, 0x7a, 0x9e, 0x70, 0x12, + 0x48, 0xf6, 0x2d, 0x67, 0xc2, 0x3d, 0x19, 0xfe, 0x26, 0x5f, 0x80, 0x3c, 0xfd, 0xeb, 0x32, 0x93, + 0xde, 0x85, 0x15, 0x68, 0x35, 0x27, 0xbe, 0x05, 0x73, 0xb8, 0xfb, 0x1e, 0x2c, 0xe3, 0x3d, 0x63, + 0x14, 0x58, 0x03, 0x07, 0x96, 0x20, 0xc3, 0x5c, 0xc1, 0x04, 0xbf, 0x96, 0xe5, 0x0c, 0xb7, 0x48, + 0xdd, 0x2b, 0x66, 0x02, 0x2c, 0xdc, 0x67, 0x0c, 0x5e, 0x2a, 0x3f, 0x80, 0x2c, 0x46, 0xa9, 0xd6, + 0xa0, 0x4b, 0xca, 0x20, 0xf5, 0x4a, 0x26, 0xc6, 0xc8, 0x45, 0xe1, 0x9a, 0xcf, 0x9b, 0x57, 0x36, + 0x0c, 0xa9, 0xb7, 0xb4, 0x00, 0xd2, 0x06, 0xbd, 0x77, 0x1f, 0x73, 0x37, 0x2d, 0x1d, 0x97, 0x5b, + 0xdc, 0xc4, 0xb6, 0xf9, 0x32, 0xce, 0xc4, 0xb6, 0xf9, 0x92, 0x99, 0xb8, 0x3a, 0x65, 0x82, 0x96, + 0x4e, 0xf8, 0xa7, 0x43, 0xe9, 0xa4, 0x5c, 0x85, 0x39, 0x3c, 0x9e, 0x7d, 0xab, 0xb7, 0x63, 0xf7, + 0x2d, 0xbc, 0xe7, 0x1f, 0xe2, 0x3d, 0x49, 0x32, 0xa4, 0x43, 0xba, 0x07, 0xe6, 0x71, 0xe7, 0x80, + 0xdd, 0x38, 0xb3, 0x06, 0x2b, 0x94, 0x3f, 0x4b, 0xc2, 0x3c, 0x77, 0xad, 0xef, 0xf7, 0x9d, 0x67, + 0x5b, 0x9d, 0x11, 0x79, 0x0a, 0x05, 0xea, 0x55, 0xdb, 0xc3, 0xce, 0x68, 0x44, 0x8f, 0xaf, 0x84, + 0x57, 0x8d, 0xeb, 0x53, 0xae, 0x9a, 0xe3, 0x57, 0xb6, 0x3b, 0x43, 0x73, 0x8b, 0x61, 0x1b, 0x96, + 0x33, 0x3e, 0x31, 0xf2, 0x96, 0x5f, 0x43, 0x36, 0x21, 0x3f, 0x9c, 0xf4, 0x3c, 0x63, 0x32, 0x1a, + 0xab, 0x44, 0x1a, 0xdb, 0x9a, 0xf4, 0x02, 0xb6, 0x60, 0xe8, 0x55, 0xd0, 0x81, 0x51, 0x7f, 0xec, + 0xd9, 0x4a, 0x9c, 0x32, 0x30, 0xea, 0x3a, 0x82, 0x03, 0xdb, 0xf7, 0x6b, 0xc8, 0x63, 0x00, 0x7a, + 0xbc, 0x1c, 0x9b, 0xa6, 0x4e, 0xa8, 0xa0, 0xbc, 0xf6, 0x66, 0xa4, 0xad, 0x5d, 0x67, 0xbc, 0x67, + 0xef, 0x3a, 0x63, 0x66, 0x88, 0x1e, 0x4c, 0x2c, 0x2e, 0xbd, 0x03, 0x4a, 0x78, 0xfe, 0xe2, 0x8d, + 0x3c, 0x35, 0xe3, 0x46, 0x9e, 0xe3, 0x37, 0xf2, 0xba, 0x7c, 0x57, 0x5a, 0x7a, 0x0f, 0x8a, 0xa1, + 0x29, 0x8b, 0x74, 0xc2, 0xe8, 0xb7, 0x45, 0x7a, 0x5e, 0x7b, 0x5d, 0xf8, 0x9c, 0x2d, 0x6e, 0xb8, + 0x68, 0xf7, 0x1d, 0x50, 0xc2, 0xd3, 0x17, 0x0d, 0x67, 0x63, 0x32, 0x05, 0xe4, 0xdf, 0x87, 0xb9, + 0xc0, 0x94, 0x45, 0x72, 0xee, 0x94, 0x49, 0x95, 0x7f, 0x29, 0x05, 0xa9, 0x96, 0x65, 0xda, 0x87, + 0xe4, 0xf5, 0x60, 0x9c, 0x7c, 0x72, 0xce, 0x8d, 0x91, 0x17, 0x43, 0x31, 0xf2, 0xc9, 0x39, 0x2f, + 0x42, 0x5e, 0x0c, 0x45, 0x48, 0xb7, 0xa9, 0xa6, 0x93, 0xcb, 0x53, 0xf1, 0xf1, 0xc9, 0x39, 0x21, + 0x38, 0x5e, 0x9e, 0x0a, 0x8e, 0x7e, 0x73, 0x4d, 0xa7, 0x0e, 0x35, 0x18, 0x19, 0x9f, 0x9c, 0xf3, + 0xa3, 0xe2, 0x72, 0x38, 0x2a, 0x7a, 0x8d, 0x35, 0x9d, 0x0d, 0x49, 0x88, 0x88, 0x38, 0x24, 0x16, + 0x0b, 0x97, 0xc3, 0xb1, 0x10, 0x79, 0x3c, 0x0a, 0x2e, 0x87, 0xa3, 0x20, 0x36, 0xf2, 0xa8, 0x77, + 0x31, 0x14, 0xf5, 0xd0, 0x28, 0x0b, 0x77, 0xcb, 0xe1, 0x70, 0xc7, 0x78, 0xc2, 0x48, 0xc5, 0x58, + 0xe7, 0x35, 0xd6, 0x74, 0xa2, 0x85, 0x02, 0x5d, 0xf4, 0x6d, 0x1f, 0xf7, 0x02, 0x9d, 0xbe, 0x4e, + 0x97, 0xcd, 0xbd, 0x88, 0x16, 0x63, 0xbe, 0xf8, 0xe3, 0x6a, 0xba, 0x17, 0x31, 0x0d, 0x32, 0x87, + 0x3c, 0x01, 0x56, 0xd0, 0x73, 0x09, 0xb2, 0xc4, 0xcd, 0x5f, 0x69, 0xb6, 0xd1, 0x83, 0xd1, 0x79, + 0x1d, 0xb2, 0x3b, 0x7d, 0x05, 0xe6, 0x9a, 0xed, 0xa7, 0x9d, 0x71, 0xcf, 0x9c, 0x38, 0xed, 0xbd, + 0x4e, 0xcf, 0x7b, 0x44, 0xa0, 0xfb, 0x9f, 0x6f, 0xf2, 0x96, 0xbd, 0x4e, 0x8f, 0x5c, 0x70, 0xc5, + 0xd5, 0xc5, 0x56, 0x89, 0xcb, 0x6b, 0xe9, 0x75, 0xba, 0x68, 0xcc, 0x18, 0xfa, 0xc2, 0x05, 0xee, + 0x0b, 0x1f, 0x66, 0x20, 0x75, 0x64, 0xf5, 0x6d, 0xeb, 0x61, 0x0e, 0x32, 0x8e, 0x3d, 0x1e, 0x76, + 0x1c, 0xbb, 0xfc, 0x23, 0x09, 0xe0, 0x91, 0x3d, 0x1c, 0x1e, 0x59, 0xfd, 0x17, 0x47, 0x26, 0xb9, + 0x02, 0xf9, 0x61, 0xe7, 0xb9, 0xd9, 0x1e, 0x9a, 0xed, 0x83, 0xb1, 0x7b, 0x0e, 0x72, 0xb4, 0x6a, + 0xcb, 0x7c, 0x34, 0x3e, 0x21, 0x25, 0xf7, 0x8a, 0x8e, 0xda, 0x41, 0x49, 0xf2, 0x2b, 0xfb, 0x22, + 0xbf, 0x74, 0xa6, 0xf9, 0x1e, 0xba, 0xd7, 0x4e, 0x96, 0x47, 0x64, 0xf8, 0xee, 0x61, 0x89, 0x4a, + 0xde, 0x31, 0x87, 0xa3, 0xf6, 0x01, 0x4a, 0x85, 0xca, 0x21, 0x45, 0xcb, 0x8f, 0xc8, 0x6d, 0x48, + 0x1c, 0xd8, 0x03, 0x14, 0xc9, 0x29, 0xfb, 0x42, 0x71, 0xe4, 0x0d, 0x48, 0x0c, 0x27, 0x4c, 0x36, + 0x79, 0x6d, 0x41, 0xb8, 0x27, 0xb0, 0xd0, 0x44, 0x61, 0xc3, 0x49, 0xcf, 0x9b, 0xf7, 0x8d, 0x22, + 0x24, 0x9a, 0xad, 0x16, 0x8d, 0xfd, 0xcd, 0x56, 0x6b, 0x4d, 0x91, 0xea, 0x5f, 0x82, 0x6c, 0x6f, + 0x6c, 0x9a, 0xd4, 0x3d, 0xcc, 0xce, 0x39, 0x3e, 0xc4, 0x58, 0xe7, 0x81, 0xea, 0x5b, 0x90, 0x39, + 0x60, 0x59, 0x07, 0x89, 0x48, 0x6b, 0x4b, 0x7f, 0xc8, 0x1e, 0x55, 0x96, 0xfc, 0xe6, 0x70, 0x9e, + 0x62, 0xb8, 0x36, 0xea, 0x3b, 0x90, 0x1b, 0xb7, 0x4f, 0x33, 0xf8, 0x31, 0x8b, 0x2e, 0x71, 0x06, + 0xb3, 0x63, 0x5e, 0x55, 0x6f, 0xc0, 0x82, 0x65, 0xbb, 0xdf, 0x50, 0xda, 0x5d, 0x76, 0xc6, 0x2e, + 0x4e, 0x5f, 0xe5, 0x5c, 0xe3, 0x26, 0xfb, 0x6e, 0x69, 0xd9, 0xbc, 0x81, 0x9d, 0xca, 0xfa, 0x23, + 0x50, 0x04, 0x33, 0x98, 0x7a, 0xc6, 0x59, 0x39, 0x64, 0x1f, 0x4a, 0x3d, 0x2b, 0x78, 0xee, 0x43, + 0x46, 0xd8, 0xc9, 0x8c, 0x31, 0xd2, 0x63, 0x5f, 0x9d, 0x3d, 0x23, 0xe8, 0xea, 0xa6, 0x8d, 0x50, + 0x5f, 0x13, 0x6d, 0xe4, 0x19, 0xfb, 0x20, 0x2d, 0x1a, 0xa9, 0xe9, 0xa1, 0x55, 0x39, 0x3a, 0x75, + 0x28, 0x7d, 0xf6, 0x3d, 0xd9, 0xb3, 0xc2, 0x1c, 0xe0, 0x0c, 0x33, 0xf1, 0x83, 0xf9, 0x90, 0x7d, + 0x6a, 0x0e, 0x98, 0x99, 0x1a, 0xcd, 0xe4, 0xd4, 0xd1, 0x3c, 0x67, 0xdf, 0x75, 0x3d, 0x33, 0xbb, + 0xb3, 0x46, 0x33, 0x39, 0x75, 0x34, 0x03, 0xf6, 0xc5, 0x37, 0x60, 0xa6, 0xa6, 0xd7, 0x37, 0x80, + 0x88, 0x5b, 0xcd, 0xe3, 0x44, 0x8c, 0x9d, 0x21, 0xfb, 0x8e, 0xef, 0x6f, 0x36, 0xa3, 0xcc, 0x32, + 0x14, 0x3f, 0x20, 0x8b, 0x7d, 0xe2, 0x0f, 0x1a, 0xaa, 0xe9, 0xf5, 0x4d, 0x38, 0x2f, 0x4e, 0xec, + 0x0c, 0x43, 0xb2, 0x55, 0xa9, 0x52, 0x34, 0x16, 0xfc, 0xa9, 0x71, 0xce, 0x4c, 0x53, 0xf1, 0x83, + 0x1a, 0xa9, 0x52, 0x45, 0x99, 0x32, 0x55, 0xd3, 0xeb, 0x0f, 0xa0, 0x28, 0x98, 0xda, 0xc7, 0x08, + 0x1d, 0x6d, 0xe6, 0x05, 0xfb, 0x5f, 0x0b, 0xcf, 0x0c, 0x8d, 0xe8, 0xe1, 0x1d, 0xe3, 0x31, 0x2e, + 0xda, 0xc8, 0x98, 0xfd, 0xa3, 0x80, 0x3f, 0x16, 0x64, 0x84, 0x8e, 0x04, 0xe6, 0xdf, 0x71, 0x56, + 0x26, 0xec, 0x5f, 0x08, 0xfc, 0xa1, 0x50, 0x42, 0xbd, 0x1f, 0x98, 0x8e, 0x49, 0x83, 0x5c, 0x8c, + 0x0d, 0x07, 0x3d, 0xf2, 0x9b, 0x91, 0x80, 0x15, 0xf1, 0x81, 0x44, 0x98, 0x36, 0x2d, 0xd6, 0x37, + 0x61, 0xfe, 0xec, 0x0e, 0xe9, 0x63, 0x89, 0x65, 0xcb, 0xd5, 0x15, 0x9a, 0x50, 0x1b, 0x73, 0xdd, + 0x80, 0x5f, 0x6a, 0xc0, 0xdc, 0x99, 0x9d, 0xd2, 0x27, 0x12, 0xcb, 0x39, 0xa9, 0x25, 0xa3, 0xd0, + 0x0d, 0x7a, 0xa6, 0xb9, 0x33, 0xbb, 0xa5, 0x4f, 0x25, 0xf6, 0x40, 0xa1, 0x6b, 0x9e, 0x11, 0xd7, + 0x33, 0xcd, 0x9d, 0xd9, 0x2d, 0x7d, 0x95, 0x65, 0x94, 0xb2, 0x5e, 0x15, 0x8d, 0xa0, 0x2f, 0x98, + 0x3f, 0xbb, 0x5b, 0xfa, 0x9a, 0x84, 0x8f, 0x15, 0xb2, 0xae, 0x7b, 0xeb, 0xe2, 0x79, 0xa6, 0xf9, + 0xb3, 0xbb, 0xa5, 0xaf, 0x4b, 0xf8, 0xa4, 0x21, 0xeb, 0xeb, 0x01, 0x33, 0xc1, 0xd1, 0x9c, 0xee, + 0x96, 0xbe, 0x21, 0xe1, 0x2b, 0x83, 0xac, 0xd7, 0x3c, 0x33, 0xbb, 0x53, 0xa3, 0x39, 0xdd, 0x2d, + 0x7d, 0x13, 0x6f, 0xf1, 0x75, 0x59, 0xbf, 0x13, 0x30, 0x83, 0x9e, 0xa9, 0xf8, 0x0a, 0x6e, 0xe9, + 0x5b, 0x12, 0x3e, 0x06, 0xc9, 0xfa, 0x5d, 0xc3, 0xed, 0xdd, 0xf7, 0x4c, 0xc5, 0x57, 0x70, 0x4b, + 0x9f, 0x49, 0xf8, 0x66, 0x24, 0xeb, 0xf7, 0x82, 0x86, 0xd0, 0x33, 0x29, 0xaf, 0xe2, 0x96, 0xbe, + 0x4d, 0x2d, 0x15, 0xeb, 0xf2, 0xfa, 0xaa, 0xe1, 0x0e, 0x40, 0xf0, 0x4c, 0xca, 0xab, 0xb8, 0xa5, + 0xef, 0x50, 0x53, 0x4a, 0x5d, 0x5e, 0x5f, 0x0b, 0x99, 0xaa, 0xe9, 0xf5, 0x47, 0x50, 0x38, 0xab, + 0x5b, 0xfa, 0xae, 0xf8, 0x16, 0x97, 0xef, 0x0a, 0xbe, 0x69, 0x47, 0xd8, 0xb3, 0x53, 0x1d, 0xd3, + 0xf7, 0x30, 0xc7, 0xa9, 0xcf, 0x3d, 0x61, 0xef, 0x55, 0x8c, 0xe0, 0x6f, 0x1f, 0x73, 0x53, 0x5b, + 0xfe, 0xf9, 0x38, 0xd5, 0x47, 0x7d, 0x5f, 0xc2, 0x47, 0xad, 0x02, 0x37, 0x88, 0x78, 0xef, 0xa4, + 0x30, 0x87, 0xf5, 0xa1, 0x3f, 0xcb, 0xd3, 0xbc, 0xd5, 0x0f, 0xa4, 0x57, 0x71, 0x57, 0xf5, 0x44, + 0x6b, 0xbb, 0xe1, 0x2d, 0x06, 0xd6, 0xbc, 0x0d, 0xc9, 0x63, 0x6d, 0x75, 0x4d, 0xbc, 0x92, 0x89, + 0x6f, 0xb9, 0xcc, 0x49, 0xe5, 0xb5, 0xa2, 0xf0, 0xdc, 0x3d, 0x1c, 0x39, 0x27, 0x06, 0xb2, 0x38, + 0x5b, 0x8b, 0x64, 0x7f, 0x12, 0xc3, 0xd6, 0x38, 0xbb, 0x1a, 0xc9, 0xfe, 0x34, 0x86, 0x5d, 0xe5, + 0x6c, 0x3d, 0x92, 0xfd, 0xd5, 0x18, 0xb6, 0xce, 0xd9, 0xeb, 0x91, 0xec, 0xaf, 0xc5, 0xb0, 0xd7, + 0x39, 0xbb, 0x16, 0xc9, 0xfe, 0x7a, 0x0c, 0xbb, 0xc6, 0xd9, 0x77, 0x22, 0xd9, 0xdf, 0x88, 0x61, + 0xdf, 0xe1, 0xec, 0xbb, 0x91, 0xec, 0x6f, 0xc6, 0xb0, 0xef, 0x72, 0xf6, 0xbd, 0x48, 0xf6, 0xb7, + 0x62, 0xd8, 0xf7, 0x18, 0x7b, 0x6d, 0x35, 0x92, 0xfd, 0x59, 0x34, 0x7b, 0x6d, 0x95, 0xb3, 0xa3, + 0xb5, 0xf6, 0xed, 0x18, 0x36, 0xd7, 0xda, 0x5a, 0xb4, 0xd6, 0xbe, 0x13, 0xc3, 0xe6, 0x5a, 0x5b, + 0x8b, 0xd6, 0xda, 0x77, 0x63, 0xd8, 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, 0x5e, 0x0c, 0x9b, 0x6b, + 0x6d, 0x2d, 0x5a, 0x6b, 0xdf, 0x8f, 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, 0x07, 0x31, 0x6c, + 0xae, 0xb5, 0xb5, 0x68, 0xad, 0xfd, 0x51, 0x0c, 0x9b, 0x6b, 0x6d, 0x2d, 0x5a, 0x6b, 0x7f, 0x1c, + 0xc3, 0xe6, 0x5a, 0x5b, 0x8b, 0xd6, 0xda, 0x9f, 0xc4, 0xb0, 0xb9, 0xd6, 0xb4, 0x68, 0xad, 0xfd, + 0x69, 0x34, 0x5b, 0xe3, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0x67, 0x31, 0x6c, 0xae, 0x35, 0x2d, 0x5a, + 0x6b, 0x7f, 0x1e, 0xc3, 0xe6, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0xc3, 0x18, 0x36, 0xd7, 0x9a, 0x16, + 0xad, 0xb5, 0xbf, 0x88, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xcb, 0x18, 0x36, 0xd7, 0x9a, + 0x16, 0xad, 0xb5, 0xbf, 0x8a, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xeb, 0x18, 0x36, 0xd7, + 0x9a, 0x16, 0xad, 0xb5, 0xbf, 0x89, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xdb, 0x18, 0x36, + 0xd7, 0x5a, 0x35, 0x5a, 0x6b, 0x7f, 0x17, 0xcd, 0xae, 0x72, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0xf7, + 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x21, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, + 0x3f, 0xc6, 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0x51, 0x0c, 0x9b, 0x6b, 0xad, 0x1a, 0xad, + 0xb5, 0x7f, 0x8a, 0x61, 0x73, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0xcf, 0x31, 0x6c, 0xae, 0xb5, 0x6a, + 0xb4, 0xd6, 0xfe, 0x25, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0xbf, 0xc6, 0xb0, 0xb9, 0xd6, + 0xaa, 0xd1, 0x5a, 0xfb, 0xb7, 0x18, 0x36, 0xd7, 0x9a, 0x1e, 0xad, 0xb5, 0x7f, 0x8f, 0x66, 0xeb, + 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x23, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, 0x3f, 0x63, + 0xd8, 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x2b, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, 0xbf, + 0x63, 0xd8, 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x27, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, + 0xc7, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, 0x3f, 0x89, 0x61, 0x73, 0xad, 0xe9, 0xd1, 0x5a, + 0xfb, 0xdf, 0x18, 0x36, 0xd7, 0x9a, 0x1e, 0xad, 0xb5, 0xff, 0x8b, 0x61, 0x73, 0xad, 0xad, 0x47, + 0x6b, 0xed, 0xff, 0xa3, 0xd9, 0xeb, 0xab, 0x3f, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x00, 0xcd, + 0x32, 0x57, 0x39, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/proto/testdata/test.proto b/vendor/github.com/golang/protobuf/proto/testdata/test.proto new file mode 100644 index 0000000..70e3cfc --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/testdata/test.proto @@ -0,0 +1,548 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A feature-rich test file for the protocol compiler and libraries. + +syntax = "proto2"; + +package testdata; + +enum FOO { FOO1 = 1; }; + +message GoEnum { + required FOO foo = 1; +} + +message GoTestField { + required string Label = 1; + required string Type = 2; +} + +message GoTest { + // An enum, for completeness. + enum KIND { + VOID = 0; + + // Basic types + BOOL = 1; + BYTES = 2; + FINGERPRINT = 3; + FLOAT = 4; + INT = 5; + STRING = 6; + TIME = 7; + + // Groupings + TUPLE = 8; + ARRAY = 9; + MAP = 10; + + // Table types + TABLE = 11; + + // Functions + FUNCTION = 12; // last tag + }; + + // Some typical parameters + required KIND Kind = 1; + optional string Table = 2; + optional int32 Param = 3; + + // Required, repeated and optional foreign fields. + required GoTestField RequiredField = 4; + repeated GoTestField RepeatedField = 5; + optional GoTestField OptionalField = 6; + + // Required fields of all basic types + required bool F_Bool_required = 10; + required int32 F_Int32_required = 11; + required int64 F_Int64_required = 12; + required fixed32 F_Fixed32_required = 13; + required fixed64 F_Fixed64_required = 14; + required uint32 F_Uint32_required = 15; + required uint64 F_Uint64_required = 16; + required float F_Float_required = 17; + required double F_Double_required = 18; + required string F_String_required = 19; + required bytes F_Bytes_required = 101; + required sint32 F_Sint32_required = 102; + required sint64 F_Sint64_required = 103; + + // Repeated fields of all basic types + repeated bool F_Bool_repeated = 20; + repeated int32 F_Int32_repeated = 21; + repeated int64 F_Int64_repeated = 22; + repeated fixed32 F_Fixed32_repeated = 23; + repeated fixed64 F_Fixed64_repeated = 24; + repeated uint32 F_Uint32_repeated = 25; + repeated uint64 F_Uint64_repeated = 26; + repeated float F_Float_repeated = 27; + repeated double F_Double_repeated = 28; + repeated string F_String_repeated = 29; + repeated bytes F_Bytes_repeated = 201; + repeated sint32 F_Sint32_repeated = 202; + repeated sint64 F_Sint64_repeated = 203; + + // Optional fields of all basic types + optional bool F_Bool_optional = 30; + optional int32 F_Int32_optional = 31; + optional int64 F_Int64_optional = 32; + optional fixed32 F_Fixed32_optional = 33; + optional fixed64 F_Fixed64_optional = 34; + optional uint32 F_Uint32_optional = 35; + optional uint64 F_Uint64_optional = 36; + optional float F_Float_optional = 37; + optional double F_Double_optional = 38; + optional string F_String_optional = 39; + optional bytes F_Bytes_optional = 301; + optional sint32 F_Sint32_optional = 302; + optional sint64 F_Sint64_optional = 303; + + // Default-valued fields of all basic types + optional bool F_Bool_defaulted = 40 [default=true]; + optional int32 F_Int32_defaulted = 41 [default=32]; + optional int64 F_Int64_defaulted = 42 [default=64]; + optional fixed32 F_Fixed32_defaulted = 43 [default=320]; + optional fixed64 F_Fixed64_defaulted = 44 [default=640]; + optional uint32 F_Uint32_defaulted = 45 [default=3200]; + optional uint64 F_Uint64_defaulted = 46 [default=6400]; + optional float F_Float_defaulted = 47 [default=314159.]; + optional double F_Double_defaulted = 48 [default=271828.]; + optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; + optional sint32 F_Sint32_defaulted = 402 [default = -32]; + optional sint64 F_Sint64_defaulted = 403 [default = -64]; + + // Packed repeated fields (no string or bytes). + repeated bool F_Bool_repeated_packed = 50 [packed=true]; + repeated int32 F_Int32_repeated_packed = 51 [packed=true]; + repeated int64 F_Int64_repeated_packed = 52 [packed=true]; + repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; + repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; + repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; + repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; + repeated float F_Float_repeated_packed = 57 [packed=true]; + repeated double F_Double_repeated_packed = 58 [packed=true]; + repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; + repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; + + // Required, repeated, and optional groups. + required group RequiredGroup = 70 { + required string RequiredField = 71; + }; + + repeated group RepeatedGroup = 80 { + required string RequiredField = 81; + }; + + optional group OptionalGroup = 90 { + required string RequiredField = 91; + }; +} + +// For testing a group containing a required field. +message GoTestRequiredGroupField { + required group Group = 1 { + required int32 Field = 2; + }; +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +message GoSkipTest { + required int32 skip_int32 = 11; + required fixed32 skip_fixed32 = 12; + required fixed64 skip_fixed64 = 13; + required string skip_string = 14; + required group SkipGroup = 15 { + required int32 group_int32 = 16; + required string group_string = 17; + } +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +message NonPackedTest { + repeated int32 a = 1; +} + +message PackedTest { + repeated int32 b = 1 [packed=true]; +} + +message MaxTag { + // Maximum possible tag number. + optional string last_field = 536870911; +} + +message OldMessage { + message Nested { + optional string name = 1; + } + optional Nested nested = 1; + + optional int32 num = 2; +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +message NewMessage { + message Nested { + optional string name = 1; + optional string food_group = 2; + } + optional Nested nested = 1; + + // This is an int32 in OldMessage. + optional int64 num = 2; +} + +// Smaller tests for ASCII formatting. + +message InnerMessage { + required string host = 1; + optional int32 port = 2 [default=4000]; + optional bool connected = 3; +} + +message OtherMessage { + optional int64 key = 1; + optional bytes value = 2; + optional float weight = 3; + optional InnerMessage inner = 4; + + extensions 100 to max; +} + +message RequiredInnerMessage { + required InnerMessage leo_finally_won_an_oscar = 1; +} + +message MyMessage { + required int32 count = 1; + optional string name = 2; + optional string quote = 3; + repeated string pet = 4; + optional InnerMessage inner = 5; + repeated OtherMessage others = 6; + optional RequiredInnerMessage we_must_go_deeper = 13; + repeated InnerMessage rep_inner = 12; + + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + }; + optional Color bikeshed = 7; + + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // This field becomes [][]byte in the generated code. + repeated bytes rep_bytes = 10; + + optional double bigfloat = 11; + + extensions 100 to max; +} + +message Ext { + extend MyMessage { + optional Ext more = 103; + optional string text = 104; + optional int32 number = 105; + } + + optional string data = 1; +} + +extend MyMessage { + repeated string greeting = 106; +} + +message ComplexExtension { + optional int32 first = 1; + optional int32 second = 2; + repeated int32 third = 3; +} + +extend OtherMessage { + optional ComplexExtension complex = 200; + repeated ComplexExtension r_complex = 201; +} + +message DefaultsMessage { + enum DefaultsEnum { + ZERO = 0; + ONE = 1; + TWO = 2; + }; + extensions 100 to max; +} + +extend DefaultsMessage { + optional double no_default_double = 101; + optional float no_default_float = 102; + optional int32 no_default_int32 = 103; + optional int64 no_default_int64 = 104; + optional uint32 no_default_uint32 = 105; + optional uint64 no_default_uint64 = 106; + optional sint32 no_default_sint32 = 107; + optional sint64 no_default_sint64 = 108; + optional fixed32 no_default_fixed32 = 109; + optional fixed64 no_default_fixed64 = 110; + optional sfixed32 no_default_sfixed32 = 111; + optional sfixed64 no_default_sfixed64 = 112; + optional bool no_default_bool = 113; + optional string no_default_string = 114; + optional bytes no_default_bytes = 115; + optional DefaultsMessage.DefaultsEnum no_default_enum = 116; + + optional double default_double = 201 [default = 3.1415]; + optional float default_float = 202 [default = 3.14]; + optional int32 default_int32 = 203 [default = 42]; + optional int64 default_int64 = 204 [default = 43]; + optional uint32 default_uint32 = 205 [default = 44]; + optional uint64 default_uint64 = 206 [default = 45]; + optional sint32 default_sint32 = 207 [default = 46]; + optional sint64 default_sint64 = 208 [default = 47]; + optional fixed32 default_fixed32 = 209 [default = 48]; + optional fixed64 default_fixed64 = 210 [default = 49]; + optional sfixed32 default_sfixed32 = 211 [default = 50]; + optional sfixed64 default_sfixed64 = 212 [default = 51]; + optional bool default_bool = 213 [default = true]; + optional string default_string = 214 [default = "Hello, string"]; + optional bytes default_bytes = 215 [default = "Hello, bytes"]; + optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; +} + +message MyMessageSet { + option message_set_wire_format = true; + extensions 100 to max; +} + +message Empty { +} + +extend MyMessageSet { + optional Empty x201 = 201; + optional Empty x202 = 202; + optional Empty x203 = 203; + optional Empty x204 = 204; + optional Empty x205 = 205; + optional Empty x206 = 206; + optional Empty x207 = 207; + optional Empty x208 = 208; + optional Empty x209 = 209; + optional Empty x210 = 210; + optional Empty x211 = 211; + optional Empty x212 = 212; + optional Empty x213 = 213; + optional Empty x214 = 214; + optional Empty x215 = 215; + optional Empty x216 = 216; + optional Empty x217 = 217; + optional Empty x218 = 218; + optional Empty x219 = 219; + optional Empty x220 = 220; + optional Empty x221 = 221; + optional Empty x222 = 222; + optional Empty x223 = 223; + optional Empty x224 = 224; + optional Empty x225 = 225; + optional Empty x226 = 226; + optional Empty x227 = 227; + optional Empty x228 = 228; + optional Empty x229 = 229; + optional Empty x230 = 230; + optional Empty x231 = 231; + optional Empty x232 = 232; + optional Empty x233 = 233; + optional Empty x234 = 234; + optional Empty x235 = 235; + optional Empty x236 = 236; + optional Empty x237 = 237; + optional Empty x238 = 238; + optional Empty x239 = 239; + optional Empty x240 = 240; + optional Empty x241 = 241; + optional Empty x242 = 242; + optional Empty x243 = 243; + optional Empty x244 = 244; + optional Empty x245 = 245; + optional Empty x246 = 246; + optional Empty x247 = 247; + optional Empty x248 = 248; + optional Empty x249 = 249; + optional Empty x250 = 250; +} + +message MessageList { + repeated group Message = 1 { + required string name = 2; + required int32 count = 3; + } +} + +message Strings { + optional string string_field = 1; + optional bytes bytes_field = 2; +} + +message Defaults { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + optional bool F_Bool = 1 [default=true]; + optional int32 F_Int32 = 2 [default=32]; + optional int64 F_Int64 = 3 [default=64]; + optional fixed32 F_Fixed32 = 4 [default=320]; + optional fixed64 F_Fixed64 = 5 [default=640]; + optional uint32 F_Uint32 = 6 [default=3200]; + optional uint64 F_Uint64 = 7 [default=6400]; + optional float F_Float = 8 [default=314159.]; + optional double F_Double = 9 [default=271828.]; + optional string F_String = 10 [default="hello, \"world!\"\n"]; + optional bytes F_Bytes = 11 [default="Bignose"]; + optional sint32 F_Sint32 = 12 [default=-32]; + optional sint64 F_Sint64 = 13 [default=-64]; + optional Color F_Enum = 14 [default=GREEN]; + + // More fields with crazy defaults. + optional float F_Pinf = 15 [default=inf]; + optional float F_Ninf = 16 [default=-inf]; + optional float F_Nan = 17 [default=nan]; + + // Sub-message. + optional SubDefaults sub = 18; + + // Redundant but explicit defaults. + optional string str_zero = 19 [default=""]; +} + +message SubDefaults { + optional int64 n = 1 [default=7]; +} + +message RepeatedEnum { + enum Color { + RED = 1; + } + repeated Color color = 1; +} + +message MoreRepeated { + repeated bool bools = 1; + repeated bool bools_packed = 2 [packed=true]; + repeated int32 ints = 3; + repeated int32 ints_packed = 4 [packed=true]; + repeated int64 int64s_packed = 7 [packed=true]; + repeated string strings = 5; + repeated fixed32 fixeds = 6; +} + +// GroupOld and GroupNew have the same wire format. +// GroupNew has a new field inside a group. + +message GroupOld { + optional group G = 101 { + optional int32 x = 2; + } +} + +message GroupNew { + optional group G = 101 { + optional int32 x = 2; + optional int32 y = 3; + } +} + +message FloatingPoint { + required double f = 1; + optional bool exact = 2; +} + +message MessageWithMap { + map name_mapping = 1; + map msg_mapping = 2; + map byte_mapping = 3; + map str_to_str = 4; +} + +message Oneof { + oneof union { + bool F_Bool = 1; + int32 F_Int32 = 2; + int64 F_Int64 = 3; + fixed32 F_Fixed32 = 4; + fixed64 F_Fixed64 = 5; + uint32 F_Uint32 = 6; + uint64 F_Uint64 = 7; + float F_Float = 8; + double F_Double = 9; + string F_String = 10; + bytes F_Bytes = 11; + sint32 F_Sint32 = 12; + sint64 F_Sint64 = 13; + MyMessage.Color F_Enum = 14; + GoTestField F_Message = 15; + group F_Group = 16 { + optional int32 x = 17; + } + int32 F_Largest_Tag = 536870911; + } + + oneof tormato { + int32 value = 100; + } +} + +message Communique { + optional bool make_me_cry = 1; + + // This is a oneof, called "union". + oneof union { + int32 number = 5; + string name = 6; + bytes data = 7; + double temp_c = 8; + MyMessage.Color col = 9; + Strings msg = 10; + } +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 0000000..965876b --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,854 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if _, ok := extendable(pv.Interface()); ok { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := tm.writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep, _ := extendable(pv.Interface()) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 0000000..5e14513 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,895 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser_test.go b/vendor/github.com/golang/protobuf/proto/text_parser_test.go new file mode 100644 index 0000000..8f7cb4d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser_test.go @@ -0,0 +1,673 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "math" + "reflect" + "testing" + + . "github.com/golang/protobuf/proto" + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + . "github.com/golang/protobuf/proto/testdata" +) + +type UnmarshalTextTest struct { + in string + err string // if "", no error expected + out *MyMessage +} + +func buildExtStructTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_More, &Ext{ + Data: String("Hello, world!"), + }) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtDataTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + SetExtension(msg, E_Ext_Text, String("Hello, world!")) + SetExtension(msg, E_Ext_Number, Int32(1729)) + return UnmarshalTextTest{in: text, out: msg} +} + +func buildExtRepStringTest(text string) UnmarshalTextTest { + msg := &MyMessage{ + Count: Int32(42), + } + if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { + panic(err) + } + return UnmarshalTextTest{in: text, out: msg} +} + +var unMarshalTextTests = []UnmarshalTextTest{ + // Basic + { + in: " count:42\n name:\"Dave\" ", + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + }, + }, + + // Empty quoted string + { + in: `count:42 name:""`, + out: &MyMessage{ + Count: Int32(42), + Name: String(""), + }, + }, + + // Quoted string concatenation with double quotes + { + in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string concatenation with single quotes + { + in: "count:42 name: 'My name is '\n'elsewhere'", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string concatenations with mixed quotes + { + in: "count:42 name: 'My name is '\n\"elsewhere\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + { + in: "count:42 name: \"My name is \"\n'elsewhere'", + out: &MyMessage{ + Count: Int32(42), + Name: String("My name is elsewhere"), + }, + }, + + // Quoted string with escaped apostrophe + { + in: `count:42 name: "HOLIDAY - New Year\'s Day"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("HOLIDAY - New Year's Day"), + }, + }, + + // Quoted string with single quote + { + in: `count:42 name: 'Roger "The Ramster" Ramjet'`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`Roger "The Ramster" Ramjet`), + }, + }, + + // Quoted string with all the accepted special characters from the C++ test + { + in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", + out: &MyMessage{ + Count: Int32(42), + Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), + }, + }, + + // Quoted string with quoted backslash + { + in: `count:42 name: "\\'xyz"`, + out: &MyMessage{ + Count: Int32(42), + Name: String(`\'xyz`), + }, + }, + + // Quoted string with UTF-8 bytes. + { + in: "count:42 name: '\303\277\302\201\xAB'", + out: &MyMessage{ + Count: Int32(42), + Name: String("\303\277\302\201\xAB"), + }, + }, + + // Bad quoted string + { + in: `inner: < host: "\0" >` + "\n", + err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, + }, + + // Number too large for int64 + { + in: "count: 1 others { key: 123456789012345678901 }", + err: "line 1.23: invalid int64: 123456789012345678901", + }, + + // Number too large for int32 + { + in: "count: 1234567890123", + err: "line 1.7: invalid int32: 1234567890123", + }, + + // Number in hexadecimal + { + in: "count: 0x2beef", + out: &MyMessage{ + Count: Int32(0x2beef), + }, + }, + + // Number in octal + { + in: "count: 024601", + out: &MyMessage{ + Count: Int32(024601), + }, + }, + + // Floating point number with "f" suffix + { + in: "count: 4 others:< weight: 17.0f >", + out: &MyMessage{ + Count: Int32(4), + Others: []*OtherMessage{ + { + Weight: Float32(17), + }, + }, + }, + }, + + // Floating point positive infinity + { + in: "count: 4 bigfloat: inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(1)), + }, + }, + + // Floating point negative infinity + { + in: "count: 4 bigfloat: -inf", + out: &MyMessage{ + Count: Int32(4), + Bigfloat: Float64(math.Inf(-1)), + }, + }, + + // Number too large for float32 + { + in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", + err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", + }, + + // Number posing as a quoted string + { + in: `inner: < host: 12 >` + "\n", + err: `line 1.15: invalid string: 12`, + }, + + // Quoted string posing as int32 + { + in: `count: "12"`, + err: `line 1.7: invalid int32: "12"`, + }, + + // Quoted string posing a float32 + { + in: `others:< weight: "17.4" >`, + err: `line 1.17: invalid float32: "17.4"`, + }, + + // Enum + { + in: `count:42 bikeshed: BLUE`, + out: &MyMessage{ + Count: Int32(42), + Bikeshed: MyMessage_BLUE.Enum(), + }, + }, + + // Repeated field + { + in: `count:42 pet: "horsey" pet:"bunny"`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated field with list notation + { + in: `count:42 pet: ["horsey", "bunny"]`, + out: &MyMessage{ + Count: Int32(42), + Pet: []string{"horsey", "bunny"}, + }, + }, + + // Repeated message with/without colon and <>/{} + { + in: `count:42 others:{} others{} others:<> others:{}`, + out: &MyMessage{ + Count: Int32(42), + Others: []*OtherMessage{ + {}, + {}, + {}, + {}, + }, + }, + }, + + // Missing colon for inner message + { + in: `count:42 inner < host: "cauchy.syd" >`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("cauchy.syd"), + }, + }, + }, + + // Missing colon for string field + { + in: `name "Dave"`, + err: `line 1.5: expected ':', found "\"Dave\""`, + }, + + // Missing colon for int32 field + { + in: `count 42`, + err: `line 1.6: expected ':', found "42"`, + }, + + // Missing required field + { + in: `name: "Pawel"`, + err: `proto: required field "testdata.MyMessage.count" not set`, + out: &MyMessage{ + Name: String("Pawel"), + }, + }, + + // Missing required field in a required submessage + { + in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`, + err: `proto: required field "testdata.InnerMessage.host" not set`, + out: &MyMessage{ + Count: Int32(42), + WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}}, + }, + }, + + // Repeated non-repeated field + { + in: `name: "Rob" name: "Russ"`, + err: `line 1.12: non-repeated field "name" was repeated`, + }, + + // Group + { + in: `count: 17 SomeGroup { group_field: 12 }`, + out: &MyMessage{ + Count: Int32(17), + Somegroup: &MyMessage_SomeGroup{ + GroupField: Int32(12), + }, + }, + }, + + // Semicolon between fields + { + in: `count:3;name:"Calvin"`, + out: &MyMessage{ + Count: Int32(3), + Name: String("Calvin"), + }, + }, + // Comma between fields + { + in: `count:4,name:"Ezekiel"`, + out: &MyMessage{ + Count: Int32(4), + Name: String("Ezekiel"), + }, + }, + + // Boolean false + { + in: `count:42 inner { host: "example.com" connected: false }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean true + { + in: `count:42 inner { host: "example.com" connected: true }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean 0 + { + in: `count:42 inner { host: "example.com" connected: 0 }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean 1 + { + in: `count:42 inner { host: "example.com" connected: 1 }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean f + { + in: `count:42 inner { host: "example.com" connected: f }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean t + { + in: `count:42 inner { host: "example.com" connected: t }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean False + { + in: `count:42 inner { host: "example.com" connected: False }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean True + { + in: `count:42 inner { host: "example.com" connected: True }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + + // Extension + buildExtStructTest(`count: 42 [testdata.Ext.more]:`), + buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), + buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), + buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), + + // Big all-in-one + { + in: "count:42 # Meaning\n" + + `name:"Dave" ` + + `quote:"\"I didn't want to go.\"" ` + + `pet:"bunny" ` + + `pet:"kitty" ` + + `pet:"horsey" ` + + `inner:<` + + ` host:"footrest.syd" ` + + ` port:7001 ` + + ` connected:true ` + + `> ` + + `others:<` + + ` key:3735928559 ` + + ` value:"\x01A\a\f" ` + + `> ` + + `others:<` + + " weight:58.9 # Atomic weight of Co\n" + + ` inner:<` + + ` host:"lesha.mtv" ` + + ` port:8002 ` + + ` >` + + `>`, + out: &MyMessage{ + Count: Int32(42), + Name: String("Dave"), + Quote: String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &InnerMessage{ + Host: String("footrest.syd"), + Port: Int32(7001), + Connected: Bool(true), + }, + Others: []*OtherMessage{ + { + Key: Int64(3735928559), + Value: []byte{0x1, 'A', '\a', '\f'}, + }, + { + Weight: Float32(58.9), + Inner: &InnerMessage{ + Host: String("lesha.mtv"), + Port: Int32(8002), + }, + }, + }, + }, + }, +} + +func TestUnmarshalText(t *testing.T) { + for i, test := range unMarshalTextTests { + pb := new(MyMessage) + err := UnmarshalText(test.in, pb) + if test.err == "" { + // We don't expect failure. + if err != nil { + t.Errorf("Test %d: Unexpected error: %v", i, err) + } else if !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } else { + // We do expect failure. + if err == nil { + t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) + } else if err.Error() != test.err { + t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", + i, err.Error(), test.err) + } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { + t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", + i, pb, test.out) + } + } + } +} + +func TestUnmarshalTextCustomMessage(t *testing.T) { + msg := &textMessage{} + if err := UnmarshalText("custom", msg); err != nil { + t.Errorf("Unexpected error from custom unmarshal: %v", err) + } + if UnmarshalText("not custom", msg) == nil { + t.Errorf("Didn't get expected error from custom unmarshal") + } +} + +// Regression test; this caused a panic. +func TestRepeatedEnum(t *testing.T) { + pb := new(RepeatedEnum) + if err := UnmarshalText("color: RED", pb); err != nil { + t.Fatal(err) + } + exp := &RepeatedEnum{ + Color: []RepeatedEnum_Color{RepeatedEnum_RED}, + } + if !Equal(pb, exp) { + t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) + } +} + +func TestProto3TextParsing(t *testing.T) { + m := new(proto3pb.Message) + const in = `name: "Wallace" true_scotsman: true` + want := &proto3pb.Message{ + Name: "Wallace", + TrueScotsman: true, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestMapParsing(t *testing.T) { + m := new(MessageWithMap) + const in = `name_mapping: name_mapping:` + + `msg_mapping:,>` + // separating commas are okay + `msg_mapping>` + // no colon after "value" + `msg_mapping:>` + // omitted key + `msg_mapping:` + // omitted value + `byte_mapping:` + + `byte_mapping:<>` // omitted key and value + want := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Beatles", + 1234: "Feist", + }, + MsgMapping: map[int64]*FloatingPoint{ + -4: {F: Float64(2.0)}, + -2: {F: Float64(4.0)}, + 0: {F: Float64(5.0)}, + 1: nil, + }, + ByteMapping: map[bool][]byte{ + false: nil, + true: []byte("so be it"), + }, + } + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } +} + +func TestOneofParsing(t *testing.T) { + const in = `name:"Shrek"` + m := new(Communique) + want := &Communique{Union: &Communique_Name{"Shrek"}} + if err := UnmarshalText(in, m); err != nil { + t.Fatal(err) + } + if !Equal(m, want) { + t.Errorf("\n got %v\nwant %v", m, want) + } + + const inOverwrite = `name:"Shrek" number:42` + m = new(Communique) + testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'" + if err := UnmarshalText(inOverwrite, m); err == nil { + t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr) + } else if err.Error() != testErr { + t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v", + err.Error(), testErr) + } + +} + +var benchInput string + +func init() { + benchInput = "count: 4\n" + for i := 0; i < 1000; i++ { + benchInput += "pet: \"fido\"\n" + } + + // Check it is valid input. + pb := new(MyMessage) + err := UnmarshalText(benchInput, pb) + if err != nil { + panic("Bad benchmark input: " + err.Error()) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + pb := new(MyMessage) + for i := 0; i < b.N; i++ { + UnmarshalText(benchInput, pb) + } + b.SetBytes(int64(len(benchInput))) +} diff --git a/vendor/github.com/golang/protobuf/proto/text_test.go b/vendor/github.com/golang/protobuf/proto/text_test.go new file mode 100644 index 0000000..3eabaca --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_test.go @@ -0,0 +1,474 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "bytes" + "errors" + "io/ioutil" + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/testdata" +) + +// textMessage implements the methods that allow it to marshal and unmarshal +// itself as text. +type textMessage struct { +} + +func (*textMessage) MarshalText() ([]byte, error) { + return []byte("custom"), nil +} + +func (*textMessage) UnmarshalText(bytes []byte) error { + if string(bytes) != "custom" { + return errors.New("expected 'custom'") + } + return nil +} + +func (*textMessage) Reset() {} +func (*textMessage) String() string { return "" } +func (*textMessage) ProtoMessage() {} + +func newTestMessage() *pb.MyMessage { + msg := &pb.MyMessage{ + Count: proto.Int32(42), + Name: proto.String("Dave"), + Quote: proto.String(`"I didn't want to go."`), + Pet: []string{"bunny", "kitty", "horsey"}, + Inner: &pb.InnerMessage{ + Host: proto.String("footrest.syd"), + Port: proto.Int32(7001), + Connected: proto.Bool(true), + }, + Others: []*pb.OtherMessage{ + { + Key: proto.Int64(0xdeadbeef), + Value: []byte{1, 65, 7, 12}, + }, + { + Weight: proto.Float32(6.022), + Inner: &pb.InnerMessage{ + Host: proto.String("lesha.mtv"), + Port: proto.Int32(8002), + }, + }, + }, + Bikeshed: pb.MyMessage_BLUE.Enum(), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(8), + }, + // One normally wouldn't do this. + // This is an undeclared tag 13, as a varint (wire type 0) with value 4. + XXX_unrecognized: []byte{13<<3 | 0, 4}, + } + ext := &pb.Ext{ + Data: proto.String("Big gobs for big rats"), + } + if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { + panic(err) + } + greetings := []string{"adg", "easy", "cow"} + if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { + panic(err) + } + + // Add an unknown extension. We marshal a pb.Ext, and fake the ID. + b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) + if err != nil { + panic(err) + } + b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) + proto.SetRawExtension(msg, 201, b) + + // Extensions can be plain fields, too, so let's test that. + b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) + proto.SetRawExtension(msg, 202, b) + + return msg +} + +const text = `count: 42 +name: "Dave" +quote: "\"I didn't want to go.\"" +pet: "bunny" +pet: "kitty" +pet: "horsey" +inner: < + host: "footrest.syd" + port: 7001 + connected: true +> +others: < + key: 3735928559 + value: "\001A\007\014" +> +others: < + weight: 6.022 + inner: < + host: "lesha.mtv" + port: 8002 + > +> +bikeshed: BLUE +SomeGroup { + group_field: 8 +} +/* 2 unknown bytes */ +13: 4 +[testdata.Ext.more]: < + data: "Big gobs for big rats" +> +[testdata.greeting]: "adg" +[testdata.greeting]: "easy" +[testdata.greeting]: "cow" +/* 13 unknown bytes */ +201: "\t3G skiing" +/* 3 unknown bytes */ +202: 19 +` + +func TestMarshalText(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, newTestMessage()); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != text { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) + } +} + +func TestMarshalTextCustomMessage(t *testing.T) { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, &textMessage{}); err != nil { + t.Fatalf("proto.MarshalText: %v", err) + } + s := buf.String() + if s != "custom" { + t.Errorf("Got %q, expected %q", s, "custom") + } +} +func TestMarshalTextNil(t *testing.T) { + want := "" + tests := []proto.Message{nil, (*pb.MyMessage)(nil)} + for i, test := range tests { + buf := new(bytes.Buffer) + if err := proto.MarshalText(buf, test); err != nil { + t.Fatal(err) + } + if got := buf.String(); got != want { + t.Errorf("%d: got %q want %q", i, got, want) + } + } +} + +func TestMarshalTextUnknownEnum(t *testing.T) { + // The Color enum only specifies values 0-2. + m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} + got := m.String() + const want = `bikeshed:3 ` + if got != want { + t.Errorf("\n got %q\nwant %q", got, want) + } +} + +func TestTextOneof(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&pb.Communique{}, ``}, + // scalar field + {&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`}, + // message field + {&pb.Communique{Union: &pb.Communique_Msg{ + &pb.Strings{StringField: proto.String("why hello!")}, + }}, `msg:`}, + // bad oneof (should not panic) + {&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`}, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} + +func BenchmarkMarshalTextBuffered(b *testing.B) { + buf := new(bytes.Buffer) + m := newTestMessage() + for i := 0; i < b.N; i++ { + buf.Reset() + proto.MarshalText(buf, m) + } +} + +func BenchmarkMarshalTextUnbuffered(b *testing.B) { + w := ioutil.Discard + m := newTestMessage() + for i := 0; i < b.N; i++ { + proto.MarshalText(w, m) + } +} + +func compact(src string) string { + // s/[ \n]+/ /g; s/ $//; + dst := make([]byte, len(src)) + space, comment := false, false + j := 0 + for i := 0; i < len(src); i++ { + if strings.HasPrefix(src[i:], "/*") { + comment = true + i++ + continue + } + if comment && strings.HasPrefix(src[i:], "*/") { + comment = false + i++ + continue + } + if comment { + continue + } + c := src[i] + if c == ' ' || c == '\n' { + space = true + continue + } + if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { + space = false + } + if c == '{' { + space = false + } + if space { + dst[j] = ' ' + j++ + space = false + } + dst[j] = c + j++ + } + if space { + dst[j] = ' ' + j++ + } + return string(dst[0:j]) +} + +var compactText = compact(text) + +func TestCompactText(t *testing.T) { + s := proto.CompactTextString(newTestMessage()) + if s != compactText { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) + } +} + +func TestStringEscaping(t *testing.T) { + testCases := []struct { + in *pb.Strings + out string + }{ + { + // Test data from C++ test (TextFormatTest.StringEscape). + // Single divergence: we don't escape apostrophes. + &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, + "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", + }, + { + // Test data from the same C++ test. + &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, + "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", + }, + { + // Some UTF-8. + &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, + `string_field: "\000\001\377\201"` + "\n", + }, + } + + for i, tc := range testCases { + var buf bytes.Buffer + if err := proto.MarshalText(&buf, tc.in); err != nil { + t.Errorf("proto.MarsalText: %v", err) + continue + } + s := buf.String() + if s != tc.out { + t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) + continue + } + + // Check round-trip. + pb := new(pb.Strings) + if err := proto.UnmarshalText(s, pb); err != nil { + t.Errorf("#%d: UnmarshalText: %v", i, err) + continue + } + if !proto.Equal(pb, tc.in) { + t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) + } + } +} + +// A limitedWriter accepts some output before it fails. +// This is a proxy for something like a nearly-full or imminently-failing disk, +// or a network connection that is about to die. +type limitedWriter struct { + b bytes.Buffer + limit int +} + +var outOfSpace = errors.New("proto: insufficient space") + +func (w *limitedWriter) Write(p []byte) (n int, err error) { + var avail = w.limit - w.b.Len() + if avail <= 0 { + return 0, outOfSpace + } + if len(p) <= avail { + return w.b.Write(p) + } + n, _ = w.b.Write(p[:avail]) + return n, outOfSpace +} + +func TestMarshalTextFailing(t *testing.T) { + // Try lots of different sizes to exercise more error code-paths. + for lim := 0; lim < len(text); lim++ { + buf := new(limitedWriter) + buf.limit = lim + err := proto.MarshalText(buf, newTestMessage()) + // We expect a certain error, but also some partial results in the buffer. + if err != outOfSpace { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) + } + s := buf.b.String() + x := text[:buf.limit] + if s != x { + t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) + } + } +} + +func TestFloats(t *testing.T) { + tests := []struct { + f float64 + want string + }{ + {0, "0"}, + {4.7, "4.7"}, + {math.Inf(1), "inf"}, + {math.Inf(-1), "-inf"}, + {math.NaN(), "nan"}, + } + for _, test := range tests { + msg := &pb.FloatingPoint{F: &test.f} + got := strings.TrimSpace(msg.String()) + want := `f:` + test.want + if got != want { + t.Errorf("f=%f: got %q, want %q", test.f, got, want) + } + } +} + +func TestRepeatedNilText(t *testing.T) { + m := &pb.MessageList{ + Message: []*pb.MessageList_Message{ + nil, + &pb.MessageList_Message{ + Name: proto.String("Horse"), + }, + nil, + }, + } + want := `Message +Message { + name: "Horse" +} +Message +` + if s := proto.MarshalTextString(m); s != want { + t.Errorf(" got: %s\nwant: %s", s, want) + } +} + +func TestProto3Text(t *testing.T) { + tests := []struct { + m proto.Message + want string + }{ + // zero message + {&proto3pb.Message{}, ``}, + // zero message except for an empty byte slice + {&proto3pb.Message{Data: []byte{}}, ``}, + // trivial case + {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, + // empty map + {&pb.MessageWithMap{}, ``}, + // non-empty map; map format is the same as a repeated struct, + // and they are sorted by key (numerically for numeric keys). + { + &pb.MessageWithMap{NameMapping: map[int32]string{ + -1: "Negatory", + 7: "Lucky", + 1234: "Feist", + 6345789: "Otis", + }}, + `name_mapping: ` + + `name_mapping: ` + + `name_mapping: ` + + `name_mapping:`, + }, + // map with nil value; not well-defined, but we shouldn't crash + { + &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, + `msg_mapping:`, + }, + } + for _, test := range tests { + got := strings.TrimSpace(test.m.String()) + if got != test.want { + t.Errorf("\n got %s\nwant %s", got, test.want) + } + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile new file mode 100644 index 0000000..a42cc37 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile @@ -0,0 +1,33 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +test: + cd testdata && make test diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile new file mode 100644 index 0000000..f706871 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile @@ -0,0 +1,37 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/ +# at src/google/protobuf/descriptor.proto +regenerate: + @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION + cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto . + protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 0000000..c6a91bc --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2215 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + ExtensionRangeOptions + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} } + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} } + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +var extRange_FileOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +var extRange_MessageOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +var extRange_FieldOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +var extRange_EnumOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +var extRange_MethodOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{18, 0} +} + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{20, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) +} + +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2519 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, + 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63, + 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec, + 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad, + 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50, + 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb, + 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33, + 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d, + 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90, + 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43, + 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4, + 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61, + 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a, + 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76, + 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68, + 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3, + 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55, + 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d, + 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6, + 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7, + 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa, + 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13, + 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2, + 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35, + 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e, + 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2, + 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec, + 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07, + 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94, + 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2, + 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e, + 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16, + 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91, + 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda, + 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79, + 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1, + 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9, + 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67, + 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b, + 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65, + 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba, + 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e, + 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48, + 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c, + 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1, + 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91, + 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78, + 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c, + 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e, + 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61, + 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73, + 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76, + 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47, + 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f, + 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc, + 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f, + 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54, + 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e, + 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d, + 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7, + 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda, + 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4, + 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f, + 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82, + 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4, + 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13, + 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3, + 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9, + 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78, + 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e, + 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10, + 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80, + 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03, + 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1, + 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37, + 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f, + 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e, + 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0, + 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4, + 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80, + 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f, + 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96, + 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1, + 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa, + 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc, + 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59, + 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96, + 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50, + 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27, + 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58, + 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a, + 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67, + 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e, + 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27, + 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f, + 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0, + 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2, + 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4, + 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4, + 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c, + 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43, + 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76, + 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b, + 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28, + 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e, + 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55, + 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2, + 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd, + 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59, + 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27, + 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64, + 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d, + 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18, + 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1, + 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5, + 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24, + 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8, + 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94, + 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91, + 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72, + 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c, + 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75, + 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4, + 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee, + 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e, + 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f, + 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20, + 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e, + 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8, + 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb, + 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16, + 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc, + 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1, + 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a, + 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd, + 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0, + 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25, + 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12, + 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e, + 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4, + 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09, + 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd, + 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43, + 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14, + 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7, + 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59, + 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e, + 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8, + 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a, + 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed, + 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f, + 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4, + 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f, + 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24, + 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66, + 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57, + 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c, + 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto new file mode 100644 index 0000000..4d4fb37 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -0,0 +1,849 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 42 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go new file mode 100644 index 0000000..0d6055d --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go @@ -0,0 +1,51 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + A plugin for the Google protocol buffer compiler to generate Go code. + Run it by building this program and putting it in your path with the name + protoc-gen-go + That word 'go' at the end becomes part of the option string set for the + protocol compiler, so once the protocol compiler (protoc) is installed + you can run + protoc --go_out=output_directory input_directory/file.proto + to generate Go bindings for the protocol defined by file.proto. + With that input, the output will be written to + output_directory/file.pb.go + + The generated code is documented in the package comment for + the library. + + See the README and documentation for protocol buffers to learn more: + https://developers.google.com/protocol-buffers/ + +*/ +package documentation diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile new file mode 100644 index 0000000..b5715c3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile @@ -0,0 +1,40 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(GOROOT)/src/Make.inc + +TARG=github.com/golang/protobuf/compiler/generator +GOFILES=\ + generator.go\ + +DEPS=../descriptor ../plugin ../../proto + +include $(GOROOT)/src/Make.pkg diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go new file mode 100644 index 0000000..60d5246 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go @@ -0,0 +1,2866 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + The code generator for the plugin for the Google protocol buffer compiler. + It generates Go code from the protocol buffer description files read by the + main routine. +*/ +package generator + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "path" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" + plugin "github.com/golang/protobuf/protoc-gen-go/plugin" +) + +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// proto package is introduced; the generated code references +// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 2 + +// A Plugin provides functionality to add to the output during Go code generation, +// such as to produce RPC stubs. +type Plugin interface { + // Name identifies the plugin. + Name() string + // Init is called once after data structures are built but before + // code generation begins. + Init(g *Generator) + // Generate produces the code generated by the plugin for this file, + // except for the imports, by calling the generator's methods P, In, and Out. + Generate(file *FileDescriptor) + // GenerateImports produces the import declarations for this file. + // It is called after Generate. + GenerateImports(file *FileDescriptor) +} + +var plugins []Plugin + +// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. +// It is typically called during initialization. +func RegisterPlugin(p Plugin) { + plugins = append(plugins, p) +} + +// Each type we import as a protocol buffer (other than FileDescriptorProto) needs +// a pointer to the FileDescriptorProto that represents it. These types achieve that +// wrapping by placing each Proto inside a struct with the pointer to its File. The +// structs have the same names as their contents, with "Proto" removed. +// FileDescriptor is used to store the things that it points to. + +// The file and package name method are common to messages and enums. +type common struct { + file *descriptor.FileDescriptorProto // File this object comes from. +} + +// PackageName is name in the package clause in the generated file. +func (c *common) PackageName() string { return uniquePackageOf(c.file) } + +func (c *common) File() *descriptor.FileDescriptorProto { return c.file } + +func fileIsProto3(file *descriptor.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func (c *common) proto3() bool { return fileIsProto3(c.file) } + +// Descriptor represents a protocol buffer message. +type Descriptor struct { + common + *descriptor.DescriptorProto + parent *Descriptor // The containing message, if any. + nested []*Descriptor // Inner messages, if any. + enums []*EnumDescriptor // Inner enums, if any. + ext []*ExtensionDescriptor // Extensions, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or another message. + path string // The SourceCodeInfo path as comma-separated integers. + group bool +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (d *Descriptor) TypeName() []string { + if d.typename != nil { + return d.typename + } + n := 0 + for parent := d; parent != nil; parent = parent.parent { + n++ + } + s := make([]string, n, n) + for parent := d; parent != nil; parent = parent.parent { + n-- + s[n] = parent.GetName() + } + d.typename = s + return s +} + +// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type EnumDescriptor struct { + common + *descriptor.EnumDescriptorProto + parent *Descriptor // The containing message, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or a message. + path string // The SourceCodeInfo path as comma-separated integers. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *EnumDescriptor) TypeName() (s []string) { + if e.typename != nil { + return e.typename + } + name := e.GetName() + if e.parent == nil { + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + e.typename = s + return s +} + +// Everything but the last element of the full type name, CamelCased. +// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . +func (e *EnumDescriptor) prefix() string { + if e.parent == nil { + // If the enum is not part of a message, the prefix is just the type name. + return CamelCase(*e.Name) + "_" + } + typeName := e.TypeName() + return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" +} + +// The integer value of the named constant in this enumerated type. +func (e *EnumDescriptor) integerValueAsString(name string) string { + for _, c := range e.Value { + if c.GetName() == name { + return fmt.Sprint(c.GetNumber()) + } + } + log.Fatal("cannot find value for enum constant") + return "" +} + +// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type ExtensionDescriptor struct { + common + *descriptor.FieldDescriptorProto + parent *Descriptor // The containing message, if any. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *ExtensionDescriptor) TypeName() (s []string) { + name := e.GetName() + if e.parent == nil { + // top-level extension + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + return s +} + +// DescName returns the variable name used for the generated descriptor. +func (e *ExtensionDescriptor) DescName() string { + // The full type name. + typeName := e.TypeName() + // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. + for i, s := range typeName { + typeName[i] = CamelCase(s) + } + return "E_" + strings.Join(typeName, "_") +} + +// ImportedDescriptor describes a type that has been publicly imported from another file. +type ImportedDescriptor struct { + common + o Object +} + +func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } + +// FileDescriptor describes an protocol buffer descriptor file (.proto). +// It includes slices of all the messages and enums defined within it. +// Those slices are constructed by WrapTypes. +type FileDescriptor struct { + *descriptor.FileDescriptorProto + desc []*Descriptor // All the messages defined in this file. + enum []*EnumDescriptor // All the enums defined in this file. + ext []*ExtensionDescriptor // All the top-level extensions defined in this file. + imp []*ImportedDescriptor // All types defined in files publicly imported by this file. + + // Comments, stored as a map of path (comma-separated integers) to the comment. + comments map[string]*descriptor.SourceCodeInfo_Location + + // The full list of symbols that are exported, + // as a map from the exported object to its symbols. + // This is used for supporting public imports. + exported map[Object][]symbol + + index int // The index of this file in the list of files to generate code for + + proto3 bool // whether to generate proto3 code for this file +} + +// PackageName is the package name we'll use in the generated code to refer to this file. +func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDescriptorProto) } + +// VarName is the variable name we'll use in the generated code to refer +// to the compressed bytes of this descriptor. It is not exported, so +// it is only valid inside the generated package. +func (d *FileDescriptor) VarName() string { return fmt.Sprintf("fileDescriptor%d", d.index) } + +// goPackageOption interprets the file's go_package option. +// If there is no go_package, it returns ("", "", false). +// If there's a simple name, it returns ("", pkg, true). +// If the option implies an import path, it returns (impPath, pkg, true). +func (d *FileDescriptor) goPackageOption() (impPath, pkg string, ok bool) { + pkg = d.GetOptions().GetGoPackage() + if pkg == "" { + return + } + ok = true + // The presence of a slash implies there's an import path. + slash := strings.LastIndex(pkg, "/") + if slash < 0 { + return + } + impPath, pkg = pkg, pkg[slash+1:] + // A semicolon-delimited suffix overrides the package name. + sc := strings.IndexByte(impPath, ';') + if sc < 0 { + return + } + impPath, pkg = impPath[:sc], impPath[sc+1:] + return +} + +// goPackageName returns the Go package name to use in the +// generated Go file. The result explicit reports whether the name +// came from an option go_package statement. If explicit is false, +// the name was derived from the protocol buffer's package statement +// or the input file name. +func (d *FileDescriptor) goPackageName() (name string, explicit bool) { + // Does the file have a "go_package" option? + if _, pkg, ok := d.goPackageOption(); ok { + return pkg, true + } + + // Does the file have a package clause? + if pkg := d.GetPackage(); pkg != "" { + return pkg, false + } + // Use the file base name. + return baseName(d.GetName()), false +} + +// goFileName returns the output name for the generated Go file. +func (d *FileDescriptor) goFileName() string { + name := *d.Name + if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { + name = name[:len(name)-len(ext)] + } + name += ".pb.go" + + // Does the file have a "go_package" option? + // If it does, it may override the filename. + if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { + // Replace the existing dirname with the declared import path. + _, name = path.Split(name) + name = path.Join(impPath, name) + return name + } + + return name +} + +func (d *FileDescriptor) addExport(obj Object, sym symbol) { + d.exported[obj] = append(d.exported[obj], sym) +} + +// symbol is an interface representing an exported Go symbol. +type symbol interface { + // GenerateAlias should generate an appropriate alias + // for the symbol from the named package. + GenerateAlias(g *Generator, pkg string) +} + +type messageSymbol struct { + sym string + hasExtensions, isMessageSet bool + hasOneof bool + getters []getterSymbol +} + +type getterSymbol struct { + name string + typ string + typeName string // canonical name in proto world; empty for proto.Message and similar + genType bool // whether typ contains a generated type (message/group/enum) +} + +func (ms *messageSymbol) GenerateAlias(g *Generator, pkg string) { + remoteSym := pkg + "." + ms.sym + + g.P("type ", ms.sym, " ", remoteSym) + g.P("func (m *", ms.sym, ") Reset() { (*", remoteSym, ")(m).Reset() }") + g.P("func (m *", ms.sym, ") String() string { return (*", remoteSym, ")(m).String() }") + g.P("func (*", ms.sym, ") ProtoMessage() {}") + if ms.hasExtensions { + g.P("func (*", ms.sym, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange ", + "{ return (*", remoteSym, ")(nil).ExtensionRangeArray() }") + if ms.isMessageSet { + g.P("func (m *", ms.sym, ") Marshal() ([]byte, error) ", + "{ return (*", remoteSym, ")(m).Marshal() }") + g.P("func (m *", ms.sym, ") Unmarshal(buf []byte) error ", + "{ return (*", remoteSym, ")(m).Unmarshal(buf) }") + } + } + if ms.hasOneof { + // Oneofs and public imports do not mix well. + // We can make them work okay for the binary format, + // but they're going to break weirdly for text/JSON. + enc := "_" + ms.sym + "_OneofMarshaler" + dec := "_" + ms.sym + "_OneofUnmarshaler" + size := "_" + ms.sym + "_OneofSizer" + encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error" + decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)" + sizeSig := "(msg " + g.Pkg["proto"] + ".Message) int" + g.P("func (m *", ms.sym, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {") + g.P("return ", enc, ", ", dec, ", ", size, ", nil") + g.P("}") + + g.P("func ", enc, encSig, " {") + g.P("m := msg.(*", ms.sym, ")") + g.P("m0 := (*", remoteSym, ")(m)") + g.P("enc, _, _, _ := m0.XXX_OneofFuncs()") + g.P("return enc(m0, b)") + g.P("}") + + g.P("func ", dec, decSig, " {") + g.P("m := msg.(*", ms.sym, ")") + g.P("m0 := (*", remoteSym, ")(m)") + g.P("_, dec, _, _ := m0.XXX_OneofFuncs()") + g.P("return dec(m0, tag, wire, b)") + g.P("}") + + g.P("func ", size, sizeSig, " {") + g.P("m := msg.(*", ms.sym, ")") + g.P("m0 := (*", remoteSym, ")(m)") + g.P("_, _, size, _ := m0.XXX_OneofFuncs()") + g.P("return size(m0)") + g.P("}") + } + for _, get := range ms.getters { + + if get.typeName != "" { + g.RecordTypeUse(get.typeName) + } + typ := get.typ + val := "(*" + remoteSym + ")(m)." + get.name + "()" + if get.genType { + // typ will be "*pkg.T" (message/group) or "pkg.T" (enum) + // or "map[t]*pkg.T" (map to message/enum). + // The first two of those might have a "[]" prefix if it is repeated. + // Drop any package qualifier since we have hoisted the type into this package. + rep := strings.HasPrefix(typ, "[]") + if rep { + typ = typ[2:] + } + isMap := strings.HasPrefix(typ, "map[") + star := typ[0] == '*' + if !isMap { // map types handled lower down + typ = typ[strings.Index(typ, ".")+1:] + } + if star { + typ = "*" + typ + } + if rep { + // Go does not permit conversion between slice types where both + // element types are named. That means we need to generate a bit + // of code in this situation. + // typ is the element type. + // val is the expression to get the slice from the imported type. + + ctyp := typ // conversion type expression; "Foo" or "(*Foo)" + if star { + ctyp = "(" + typ + ")" + } + + g.P("func (m *", ms.sym, ") ", get.name, "() []", typ, " {") + g.In() + g.P("o := ", val) + g.P("if o == nil {") + g.In() + g.P("return nil") + g.Out() + g.P("}") + g.P("s := make([]", typ, ", len(o))") + g.P("for i, x := range o {") + g.In() + g.P("s[i] = ", ctyp, "(x)") + g.Out() + g.P("}") + g.P("return s") + g.Out() + g.P("}") + continue + } + if isMap { + // Split map[keyTyp]valTyp. + bra, ket := strings.Index(typ, "["), strings.Index(typ, "]") + keyTyp, valTyp := typ[bra+1:ket], typ[ket+1:] + // Drop any package qualifier. + // Only the value type may be foreign. + star := valTyp[0] == '*' + valTyp = valTyp[strings.Index(valTyp, ".")+1:] + if star { + valTyp = "*" + valTyp + } + + typ := "map[" + keyTyp + "]" + valTyp + g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " {") + g.P("o := ", val) + g.P("if o == nil { return nil }") + g.P("s := make(", typ, ", len(o))") + g.P("for k, v := range o {") + g.P("s[k] = (", valTyp, ")(v)") + g.P("}") + g.P("return s") + g.P("}") + continue + } + // Convert imported type into the forwarding type. + val = "(" + typ + ")(" + val + ")" + } + + g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " { return ", val, " }") + } + +} + +type enumSymbol struct { + name string + proto3 bool // Whether this came from a proto3 file. +} + +func (es enumSymbol) GenerateAlias(g *Generator, pkg string) { + s := es.name + g.P("type ", s, " ", pkg, ".", s) + g.P("var ", s, "_name = ", pkg, ".", s, "_name") + g.P("var ", s, "_value = ", pkg, ".", s, "_value") + g.P("func (x ", s, ") String() string { return (", pkg, ".", s, ")(x).String() }") + if !es.proto3 { + g.P("func (x ", s, ") Enum() *", s, "{ return (*", s, ")((", pkg, ".", s, ")(x).Enum()) }") + g.P("func (x *", s, ") UnmarshalJSON(data []byte) error { return (*", pkg, ".", s, ")(x).UnmarshalJSON(data) }") + } +} + +type constOrVarSymbol struct { + sym string + typ string // either "const" or "var" + cast string // if non-empty, a type cast is required (used for enums) +} + +func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg string) { + v := pkg + "." + cs.sym + if cs.cast != "" { + v = cs.cast + "(" + v + ")" + } + g.P(cs.typ, " ", cs.sym, " = ", v) +} + +// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. +type Object interface { + PackageName() string // The name we use in our output (a_b_c), possibly renamed for uniqueness. + TypeName() []string + File() *descriptor.FileDescriptorProto +} + +// Each package name we generate must be unique. The package we're generating +// gets its own name but every other package must have a unique name that does +// not conflict in the code we generate. These names are chosen globally (although +// they don't have to be, it simplifies things to do them globally). +func uniquePackageOf(fd *descriptor.FileDescriptorProto) string { + s, ok := uniquePackageName[fd] + if !ok { + log.Fatal("internal error: no package name defined for " + fd.GetName()) + } + return s +} + +// Generator is the type whose methods generate the output, stored in the associated response structure. +type Generator struct { + *bytes.Buffer + + Request *plugin.CodeGeneratorRequest // The input. + Response *plugin.CodeGeneratorResponse // The output. + + Param map[string]string // Command-line parameters. + PackageImportPath string // Go import path of the package we're generating code for + ImportPrefix string // String to prefix to imported package file names. + ImportMap map[string]string // Mapping from .proto file name to import path + + Pkg map[string]string // The names under which we import support packages + + packageName string // What we're calling ourselves. + allFiles []*FileDescriptor // All files in the tree + allFilesByName map[string]*FileDescriptor // All files by filename. + genFiles []*FileDescriptor // Those files we will generate output for. + file *FileDescriptor // The file we are compiling now. + usedPackages map[string]bool // Names of packages used in current file. + typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. + init []string // Lines to emit in the init function. + indent string + writeOutput bool +} + +// New creates a new generator and allocates the request and response protobufs. +func New() *Generator { + g := new(Generator) + g.Buffer = new(bytes.Buffer) + g.Request = new(plugin.CodeGeneratorRequest) + g.Response = new(plugin.CodeGeneratorResponse) + return g +} + +// Error reports a problem, including an error, and exits the program. +func (g *Generator) Error(err error, msgs ...string) { + s := strings.Join(msgs, " ") + ":" + err.Error() + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// Fail reports a problem and exits the program. +func (g *Generator) Fail(msgs ...string) { + s := strings.Join(msgs, " ") + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// CommandLineParameters breaks the comma-separated list of key=value pairs +// in the parameter (a member of the request protobuf) into a key/value map. +// It then sets file name mappings defined by those entries. +func (g *Generator) CommandLineParameters(parameter string) { + g.Param = make(map[string]string) + for _, p := range strings.Split(parameter, ",") { + if i := strings.Index(p, "="); i < 0 { + g.Param[p] = "" + } else { + g.Param[p[0:i]] = p[i+1:] + } + } + + g.ImportMap = make(map[string]string) + pluginList := "none" // Default list of plugin names to enable (empty means all). + for k, v := range g.Param { + switch k { + case "import_prefix": + g.ImportPrefix = v + case "import_path": + g.PackageImportPath = v + case "plugins": + pluginList = v + default: + if len(k) > 0 && k[0] == 'M' { + g.ImportMap[k[1:]] = v + } + } + } + if pluginList != "" { + // Amend the set of plugins. + enabled := make(map[string]bool) + for _, name := range strings.Split(pluginList, "+") { + enabled[name] = true + } + var nplugins []Plugin + for _, p := range plugins { + if enabled[p.Name()] { + nplugins = append(nplugins, p) + } + } + plugins = nplugins + } +} + +// DefaultPackageName returns the package name printed for the object. +// If its file is in a different package, it returns the package name we're using for this file, plus ".". +// Otherwise it returns the empty string. +func (g *Generator) DefaultPackageName(obj Object) string { + pkg := obj.PackageName() + if pkg == g.packageName { + return "" + } + return pkg + "." +} + +// For each input file, the unique package name to use, underscored. +var uniquePackageName = make(map[*descriptor.FileDescriptorProto]string) + +// Package names already registered. Key is the name from the .proto file; +// value is the name that appears in the generated code. +var pkgNamesInUse = make(map[string]bool) + +// Create and remember a guaranteed unique package name for this file descriptor. +// Pkg is the candidate name. If f is nil, it's a builtin package like "proto" and +// has no file descriptor. +func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { + // Convert dots to underscores before finding a unique alias. + pkg = strings.Map(badToUnderscore, pkg) + + for i, orig := 1, pkg; pkgNamesInUse[pkg]; i++ { + // It's a duplicate; must rename. + pkg = orig + strconv.Itoa(i) + } + // Install it. + pkgNamesInUse[pkg] = true + if f != nil { + uniquePackageName[f.FileDescriptorProto] = pkg + } + return pkg +} + +var isGoKeyword = map[string]bool{ + "break": true, + "case": true, + "chan": true, + "const": true, + "continue": true, + "default": true, + "else": true, + "defer": true, + "fallthrough": true, + "for": true, + "func": true, + "go": true, + "goto": true, + "if": true, + "import": true, + "interface": true, + "map": true, + "package": true, + "range": true, + "return": true, + "select": true, + "struct": true, + "switch": true, + "type": true, + "var": true, +} + +// defaultGoPackage returns the package name to use, +// derived from the import path of the package we're building code for. +func (g *Generator) defaultGoPackage() string { + p := g.PackageImportPath + if i := strings.LastIndex(p, "/"); i >= 0 { + p = p[i+1:] + } + if p == "" { + return "" + } + + p = strings.Map(badToUnderscore, p) + // Identifier must not be keyword: insert _. + if isGoKeyword[p] { + p = "_" + p + } + // Identifier must not begin with digit: insert _. + if r, _ := utf8.DecodeRuneInString(p); unicode.IsDigit(r) { + p = "_" + p + } + return p +} + +// SetPackageNames sets the package name for this run. +// The package name must agree across all files being generated. +// It also defines unique package names for all imported files. +func (g *Generator) SetPackageNames() { + // Register the name for this package. It will be the first name + // registered so is guaranteed to be unmodified. + pkg, explicit := g.genFiles[0].goPackageName() + + // Check all files for an explicit go_package option. + for _, f := range g.genFiles { + thisPkg, thisExplicit := f.goPackageName() + if thisExplicit { + if !explicit { + // Let this file's go_package option serve for all input files. + pkg, explicit = thisPkg, true + } else if thisPkg != pkg { + g.Fail("inconsistent package names:", thisPkg, pkg) + } + } + } + + // If we don't have an explicit go_package option but we have an + // import path, use that. + if !explicit { + p := g.defaultGoPackage() + if p != "" { + pkg, explicit = p, true + } + } + + // If there was no go_package and no import path to use, + // double-check that all the inputs have the same implicit + // Go package name. + if !explicit { + for _, f := range g.genFiles { + thisPkg, _ := f.goPackageName() + if thisPkg != pkg { + g.Fail("inconsistent package names:", thisPkg, pkg) + } + } + } + + g.packageName = RegisterUniquePackageName(pkg, g.genFiles[0]) + + // Register the support package names. They might collide with the + // name of a package we import. + g.Pkg = map[string]string{ + "fmt": RegisterUniquePackageName("fmt", nil), + "math": RegisterUniquePackageName("math", nil), + "proto": RegisterUniquePackageName("proto", nil), + } + +AllFiles: + for _, f := range g.allFiles { + for _, genf := range g.genFiles { + if f == genf { + // In this package already. + uniquePackageName[f.FileDescriptorProto] = g.packageName + continue AllFiles + } + } + // The file is a dependency, so we want to ignore its go_package option + // because that is only relevant for its specific generated output. + pkg := f.GetPackage() + if pkg == "" { + pkg = baseName(*f.Name) + } + RegisterUniquePackageName(pkg, f) + } +} + +// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos +// and FileDescriptorProtos into file-referenced objects within the Generator. +// It also creates the list of files to generate and so should be called before GenerateAllFiles. +func (g *Generator) WrapTypes() { + g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) + g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) + for _, f := range g.Request.ProtoFile { + // We must wrap the descriptors before we wrap the enums + descs := wrapDescriptors(f) + g.buildNestedDescriptors(descs) + enums := wrapEnumDescriptors(f, descs) + g.buildNestedEnums(descs, enums) + exts := wrapExtensions(f) + fd := &FileDescriptor{ + FileDescriptorProto: f, + desc: descs, + enum: enums, + ext: exts, + exported: make(map[Object][]symbol), + proto3: fileIsProto3(f), + } + extractComments(fd) + g.allFiles = append(g.allFiles, fd) + g.allFilesByName[f.GetName()] = fd + } + for _, fd := range g.allFiles { + fd.imp = wrapImported(fd.FileDescriptorProto, g) + } + + g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) + for _, fileName := range g.Request.FileToGenerate { + fd := g.allFilesByName[fileName] + if fd == nil { + g.Fail("could not find file named", fileName) + } + fd.index = len(g.genFiles) + g.genFiles = append(g.genFiles, fd) + } +} + +// Scan the descriptors in this file. For each one, build the slice of nested descriptors +func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { + for _, desc := range descs { + if len(desc.NestedType) != 0 { + for _, nest := range descs { + if nest.parent == desc { + desc.nested = append(desc.nested, nest) + } + } + if len(desc.nested) != len(desc.NestedType) { + g.Fail("internal error: nesting failure for", desc.GetName()) + } + } + } +} + +func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { + for _, desc := range descs { + if len(desc.EnumType) != 0 { + for _, enum := range enums { + if enum.parent == desc { + desc.enums = append(desc.enums, enum) + } + } + if len(desc.enums) != len(desc.EnumType) { + g.Fail("internal error: enum nesting failure for", desc.GetName()) + } + } + } +} + +// Construct the Descriptor +func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *Descriptor { + d := &Descriptor{ + common: common{file}, + DescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + d.path = fmt.Sprintf("%d,%d", messagePath, index) + } else { + d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) + } + + // The only way to distinguish a group from a message is whether + // the containing message has a TYPE_GROUP field that matches. + if parent != nil { + parts := d.TypeName() + if file.Package != nil { + parts = append([]string{*file.Package}, parts...) + } + exp := "." + strings.Join(parts, ".") + for _, field := range parent.Field { + if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { + d.group = true + break + } + } + } + + for _, field := range desc.Extension { + d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) + } + + return d +} + +// Return a slice of all the Descriptors defined within this file +func wrapDescriptors(file *descriptor.FileDescriptorProto) []*Descriptor { + sl := make([]*Descriptor, 0, len(file.MessageType)+10) + for i, desc := range file.MessageType { + sl = wrapThisDescriptor(sl, desc, nil, file, i) + } + return sl +} + +// Wrap this Descriptor, recursively +func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) []*Descriptor { + sl = append(sl, newDescriptor(desc, parent, file, index)) + me := sl[len(sl)-1] + for i, nested := range desc.NestedType { + sl = wrapThisDescriptor(sl, nested, me, file, i) + } + return sl +} + +// Construct the EnumDescriptor +func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *EnumDescriptor { + ed := &EnumDescriptor{ + common: common{file}, + EnumDescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + ed.path = fmt.Sprintf("%d,%d", enumPath, index) + } else { + ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) + } + return ed +} + +// Return a slice of all the EnumDescriptors defined within this file +func wrapEnumDescriptors(file *descriptor.FileDescriptorProto, descs []*Descriptor) []*EnumDescriptor { + sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) + // Top-level enums. + for i, enum := range file.EnumType { + sl = append(sl, newEnumDescriptor(enum, nil, file, i)) + } + // Enums within messages. Enums within embedded messages appear in the outer-most message. + for _, nested := range descs { + for i, enum := range nested.EnumType { + sl = append(sl, newEnumDescriptor(enum, nested, file, i)) + } + } + return sl +} + +// Return a slice of all the top-level ExtensionDescriptors defined within this file. +func wrapExtensions(file *descriptor.FileDescriptorProto) []*ExtensionDescriptor { + var sl []*ExtensionDescriptor + for _, field := range file.Extension { + sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) + } + return sl +} + +// Return a slice of all the types that are publicly imported into this file. +func wrapImported(file *descriptor.FileDescriptorProto, g *Generator) (sl []*ImportedDescriptor) { + for _, index := range file.PublicDependency { + df := g.fileByName(file.Dependency[index]) + for _, d := range df.desc { + if d.GetOptions().GetMapEntry() { + continue + } + sl = append(sl, &ImportedDescriptor{common{file}, d}) + } + for _, e := range df.enum { + sl = append(sl, &ImportedDescriptor{common{file}, e}) + } + for _, ext := range df.ext { + sl = append(sl, &ImportedDescriptor{common{file}, ext}) + } + } + return +} + +func extractComments(file *FileDescriptor) { + file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) + for _, loc := range file.GetSourceCodeInfo().GetLocation() { + if loc.LeadingComments == nil { + continue + } + var p []string + for _, n := range loc.Path { + p = append(p, strconv.Itoa(int(n))) + } + file.comments[strings.Join(p, ",")] = loc + } +} + +// BuildTypeNameMap builds the map from fully qualified type names to objects. +// The key names for the map come from the input data, which puts a period at the beginning. +// It should be called after SetPackageNames and before GenerateAllFiles. +func (g *Generator) BuildTypeNameMap() { + g.typeNameToObject = make(map[string]Object) + for _, f := range g.allFiles { + // The names in this loop are defined by the proto world, not us, so the + // package name may be empty. If so, the dotted package name of X will + // be ".X"; otherwise it will be ".pkg.X". + dottedPkg := "." + f.GetPackage() + if dottedPkg != "." { + dottedPkg += "." + } + for _, enum := range f.enum { + name := dottedPkg + dottedSlice(enum.TypeName()) + g.typeNameToObject[name] = enum + } + for _, desc := range f.desc { + name := dottedPkg + dottedSlice(desc.TypeName()) + g.typeNameToObject[name] = desc + } + } +} + +// ObjectNamed, given a fully-qualified input type name as it appears in the input data, +// returns the descriptor for the message or enum with that name. +func (g *Generator) ObjectNamed(typeName string) Object { + o, ok := g.typeNameToObject[typeName] + if !ok { + g.Fail("can't find object with type", typeName) + } + + // If the file of this object isn't a direct dependency of the current file, + // or in the current file, then this object has been publicly imported into + // a dependency of the current file. + // We should return the ImportedDescriptor object for it instead. + direct := *o.File().Name == *g.file.Name + if !direct { + for _, dep := range g.file.Dependency { + if *g.fileByName(dep).Name == *o.File().Name { + direct = true + break + } + } + } + if !direct { + found := false + Loop: + for _, dep := range g.file.Dependency { + df := g.fileByName(*g.fileByName(dep).Name) + for _, td := range df.imp { + if td.o == o { + // Found it! + o = td + found = true + break Loop + } + } + } + if !found { + log.Printf("protoc-gen-go: WARNING: failed finding publicly imported dependency for %v, used in %v", typeName, *g.file.Name) + } + } + + return o +} + +// P prints the arguments to the generated output. It handles strings and int32s, plus +// handling indirections because they may be *string, etc. +func (g *Generator) P(str ...interface{}) { + if !g.writeOutput { + return + } + g.WriteString(g.indent) + for _, v := range str { + switch s := v.(type) { + case string: + g.WriteString(s) + case *string: + g.WriteString(*s) + case bool: + fmt.Fprintf(g, "%t", s) + case *bool: + fmt.Fprintf(g, "%t", *s) + case int: + fmt.Fprintf(g, "%d", s) + case *int32: + fmt.Fprintf(g, "%d", *s) + case *int64: + fmt.Fprintf(g, "%d", *s) + case float64: + fmt.Fprintf(g, "%g", s) + case *float64: + fmt.Fprintf(g, "%g", *s) + default: + g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + } + } + g.WriteByte('\n') +} + +// addInitf stores the given statement to be printed inside the file's init function. +// The statement is given as a format specifier and arguments. +func (g *Generator) addInitf(stmt string, a ...interface{}) { + g.init = append(g.init, fmt.Sprintf(stmt, a...)) +} + +// In Indents the output one tab stop. +func (g *Generator) In() { g.indent += "\t" } + +// Out unindents the output one tab stop. +func (g *Generator) Out() { + if len(g.indent) > 0 { + g.indent = g.indent[1:] + } +} + +// GenerateAllFiles generates the output for all the files we're outputting. +func (g *Generator) GenerateAllFiles() { + // Initialize the plugins + for _, p := range plugins { + p.Init(g) + } + // Generate the output. The generator runs for every file, even the files + // that we don't generate output for, so that we can collate the full list + // of exported symbols to support public imports. + genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) + for _, file := range g.genFiles { + genFileMap[file] = true + } + for _, file := range g.allFiles { + g.Reset() + g.writeOutput = genFileMap[file] + g.generate(file) + if !g.writeOutput { + continue + } + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(file.goFileName()), + Content: proto.String(g.String()), + }) + } +} + +// Run all the plugins associated with the file. +func (g *Generator) runPlugins(file *FileDescriptor) { + for _, p := range plugins { + p.Generate(file) + } +} + +// FileOf return the FileDescriptor for this FileDescriptorProto. +func (g *Generator) FileOf(fd *descriptor.FileDescriptorProto) *FileDescriptor { + for _, file := range g.allFiles { + if file.FileDescriptorProto == fd { + return file + } + } + g.Fail("could not find file in table:", fd.GetName()) + return nil +} + +// Fill the response protocol buffer with the generated output for all the files we're +// supposed to generate. +func (g *Generator) generate(file *FileDescriptor) { + g.file = g.FileOf(file.FileDescriptorProto) + g.usedPackages = make(map[string]bool) + + if g.file.index == 0 { + // For one file in the package, assert version compatibility. + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the proto package it is being compiled against.") + g.P("// A compilation error at this line likely means your copy of the") + g.P("// proto package needs to be updated.") + g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") + g.P() + } + for _, td := range g.file.imp { + g.generateImported(td) + } + for _, enum := range g.file.enum { + g.generateEnum(enum) + } + for _, desc := range g.file.desc { + // Don't generate virtual messages for maps. + if desc.GetOptions().GetMapEntry() { + continue + } + g.generateMessage(desc) + } + for _, ext := range g.file.ext { + g.generateExtension(ext) + } + g.generateInitFunction() + + // Run the plugins before the imports so we know which imports are necessary. + g.runPlugins(file) + + g.generateFileDescriptor(file) + + // Generate header and imports last, though they appear first in the output. + rem := g.Buffer + g.Buffer = new(bytes.Buffer) + g.generateHeader() + g.generateImports() + if !g.writeOutput { + return + } + g.Write(rem.Bytes()) + + // Reformat generated code. + fset := token.NewFileSet() + raw := g.Bytes() + ast, err := parser.ParseFile(fset, "", g, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code, + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(raw)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) + } + g.Reset() + err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, ast) + if err != nil { + g.Fail("generated Go source code could not be reformatted:", err.Error()) + } +} + +// Generate the header, including package definition +func (g *Generator) generateHeader() { + g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") + g.P("// source: ", g.file.Name) + g.P() + + name := g.file.PackageName() + + if g.file.index == 0 { + // Generate package docs for the first file in the package. + g.P("/*") + g.P("Package ", name, " is a generated protocol buffer package.") + g.P() + if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok { + // not using g.PrintComments because this is a /* */ comment block. + text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") + for _, line := range strings.Split(text, "\n") { + line = strings.TrimPrefix(line, " ") + // ensure we don't escape from the block comment + line = strings.Replace(line, "*/", "* /", -1) + g.P(line) + } + g.P() + } + var topMsgs []string + g.P("It is generated from these files:") + for _, f := range g.genFiles { + g.P("\t", f.Name) + for _, msg := range f.desc { + if msg.parent != nil { + continue + } + topMsgs = append(topMsgs, CamelCaseSlice(msg.TypeName())) + } + } + g.P() + g.P("It has these top-level messages:") + for _, msg := range topMsgs { + g.P("\t", msg) + } + g.P("*/") + } + + g.P("package ", name) + g.P() +} + +// PrintComments prints any comments from the source .proto file. +// The path is a comma-separated list of integers. +// It returns an indication of whether any comments were printed. +// See descriptor.proto for its format. +func (g *Generator) PrintComments(path string) bool { + if !g.writeOutput { + return false + } + if loc, ok := g.file.comments[path]; ok { + text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") + for _, line := range strings.Split(text, "\n") { + g.P("// ", strings.TrimPrefix(line, " ")) + } + return true + } + return false +} + +func (g *Generator) fileByName(filename string) *FileDescriptor { + return g.allFilesByName[filename] +} + +// weak returns whether the ith import of the current file is a weak import. +func (g *Generator) weak(i int32) bool { + for _, j := range g.file.WeakDependency { + if j == i { + return true + } + } + return false +} + +// Generate the imports +func (g *Generator) generateImports() { + // We almost always need a proto import. Rather than computing when we + // do, which is tricky when there's a plugin, just import it and + // reference it later. The same argument applies to the fmt and math packages. + g.P("import " + g.Pkg["proto"] + " " + strconv.Quote(g.ImportPrefix+"github.com/golang/protobuf/proto")) + g.P("import " + g.Pkg["fmt"] + ` "fmt"`) + g.P("import " + g.Pkg["math"] + ` "math"`) + for i, s := range g.file.Dependency { + fd := g.fileByName(s) + // Do not import our own package. + if fd.PackageName() == g.packageName { + continue + } + filename := fd.goFileName() + // By default, import path is the dirname of the Go filename. + importPath := path.Dir(filename) + if substitution, ok := g.ImportMap[s]; ok { + importPath = substitution + } + importPath = g.ImportPrefix + importPath + // Skip weak imports. + if g.weak(int32(i)) { + g.P("// skipping weak import ", fd.PackageName(), " ", strconv.Quote(importPath)) + continue + } + // We need to import all the dependencies, even if we don't reference them, + // because other code and tools depend on having the full transitive closure + // of protocol buffer types in the binary. + pname := fd.PackageName() + if _, ok := g.usedPackages[pname]; !ok { + pname = "_" + } + g.P("import ", pname, " ", strconv.Quote(importPath)) + } + g.P() + // TODO: may need to worry about uniqueness across plugins + for _, p := range plugins { + p.GenerateImports(g.file) + g.P() + } + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ = ", g.Pkg["proto"], ".Marshal") + g.P("var _ = ", g.Pkg["fmt"], ".Errorf") + g.P("var _ = ", g.Pkg["math"], ".Inf") + g.P() +} + +func (g *Generator) generateImported(id *ImportedDescriptor) { + // Don't generate public import symbols for files that we are generating + // code for, since those symbols will already be in this package. + // We can't simply avoid creating the ImportedDescriptor objects, + // because g.genFiles isn't populated at that stage. + tn := id.TypeName() + sn := tn[len(tn)-1] + df := g.FileOf(id.o.File()) + filename := *df.Name + for _, fd := range g.genFiles { + if *fd.Name == filename { + g.P("// Ignoring public import of ", sn, " from ", filename) + g.P() + return + } + } + g.P("// ", sn, " from public import ", filename) + g.usedPackages[df.PackageName()] = true + + for _, sym := range df.exported[id.o] { + sym.GenerateAlias(g, df.PackageName()) + } + + g.P() +} + +// Generate the enum definitions for this EnumDescriptor. +func (g *Generator) generateEnum(enum *EnumDescriptor) { + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + ccPrefix := enum.prefix() + + g.PrintComments(enum.path) + g.P("type ", ccTypeName, " int32") + g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) + g.P("const (") + g.In() + for i, e := range enum.Value { + g.PrintComments(fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)) + + name := ccPrefix + *e.Name + g.P(name, " ", ccTypeName, " = ", e.Number) + g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) + } + g.Out() + g.P(")") + g.P("var ", ccTypeName, "_name = map[int32]string{") + g.In() + generated := make(map[int32]bool) // avoid duplicate values + for _, e := range enum.Value { + duplicate := "" + if _, present := generated[*e.Number]; present { + duplicate = "// Duplicate value: " + } + g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") + generated[*e.Number] = true + } + g.Out() + g.P("}") + g.P("var ", ccTypeName, "_value = map[string]int32{") + g.In() + for _, e := range enum.Value { + g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") + } + g.Out() + g.P("}") + + if !enum.proto3() { + g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") + g.In() + g.P("p := new(", ccTypeName, ")") + g.P("*p = x") + g.P("return p") + g.Out() + g.P("}") + } + + g.P("func (x ", ccTypeName, ") String() string {") + g.In() + g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") + g.Out() + g.P("}") + + if !enum.proto3() { + g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") + g.In() + g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) + g.P("if err != nil {") + g.In() + g.P("return err") + g.Out() + g.P("}") + g.P("*x = ", ccTypeName, "(value)") + g.P("return nil") + g.Out() + g.P("}") + } + + var indexes []string + for m := enum.parent; m != nil; m = m.parent { + // XXX: skip groups? + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + indexes = append(indexes, strconv.Itoa(enum.index)) + g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }") + if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { + g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) + } + + g.P() +} + +// The tag is a string like "varint,2,opt,name=fieldname,def=7" that +// identifies details of the field for the protocol buffer marshaling and unmarshaling +// code. The fields are: +// wire encoding +// protocol tag number +// opt,req,rep for optional, required, or repeated +// packed whether the encoding is "packed" (optional; repeated primitives only) +// name= the original declared name +// enum= the name of the enum type if it is an enum-typed field. +// proto3 if this field is in a proto3 message +// def= string representation of the default value, if any. +// The default value must be in a representation that can be used at run-time +// to generate the default value. Thus bools become 0 and 1, for instance. +func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { + optrepreq := "" + switch { + case isOptional(field): + optrepreq = "opt" + case isRequired(field): + optrepreq = "req" + case isRepeated(field): + optrepreq = "rep" + } + var defaultValue string + if dv := field.DefaultValue; dv != nil { // set means an explicit default + defaultValue = *dv + // Some types need tweaking. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if defaultValue == "true" { + defaultValue = "1" + } else { + defaultValue = "0" + } + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + // Nothing to do. Quoting is done for the whole tag. + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // For enums we need to provide the integer constant. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + // It is an enum that was publicly imported. + // We need the underlying type. + obj = id.o + } + enum, ok := obj.(*EnumDescriptor) + if !ok { + log.Printf("obj is a %T", obj) + if id, ok := obj.(*ImportedDescriptor); ok { + log.Printf("id.o is a %T", id.o) + } + g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) + } + defaultValue = enum.integerValueAsString(defaultValue) + } + defaultValue = ",def=" + defaultValue + } + enum := "" + if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { + // We avoid using obj.PackageName(), because we want to use the + // original (proto-world) package name. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + obj = id.o + } + enum = ",enum=" + if pkg := obj.File().GetPackage(); pkg != "" { + enum += pkg + "." + } + enum += CamelCaseSlice(obj.TypeName()) + } + packed := "" + if (field.Options != nil && field.Options.GetPacked()) || + // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: + // "In proto3, repeated fields of scalar numeric types use packed encoding by default." + (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && + isRepeated(field) && isScalar(field)) { + packed = ",packed" + } + fieldName := field.GetName() + name := fieldName + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + // We must use the type name for groups instead of + // the field name to preserve capitalization. + // type_name in FieldDescriptorProto is fully-qualified, + // but we only want the local part. + name = *field.TypeName + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[i+1:] + } + } + if json := field.GetJsonName(); json != "" && json != name { + // TODO: escaping might be needed, in which case + // perhaps this should be in its own "json" tag. + name += ",json=" + json + } + name = ",name=" + name + if message.proto3() { + // We only need the extra tag for []byte fields; + // no need to add noise for the others. + if *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES { + name += ",proto3" + } + + } + oneof := "" + if field.OneofIndex != nil { + oneof = ",oneof" + } + return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s", + wiretype, + field.GetNumber(), + optrepreq, + packed, + name, + enum, + oneof, + defaultValue)) +} + +func needsStar(typ descriptor.FieldDescriptorProto_Type) bool { + switch typ { + case descriptor.FieldDescriptorProto_TYPE_GROUP: + return false + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + return false + case descriptor.FieldDescriptorProto_TYPE_BYTES: + return false + } + return true +} + +// TypeName is the printed name appropriate for an item. If the object is in the current file, +// TypeName drops the package name and underscores the rest. +// Otherwise the object is from another package; and the result is the underscored +// package name followed by the item name. +// The result always has an initial capital. +func (g *Generator) TypeName(obj Object) string { + return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) +} + +// TypeNameWithPackage is like TypeName, but always includes the package +// name even if the object is in our own package. +func (g *Generator) TypeNameWithPackage(obj Object) string { + return obj.PackageName() + CamelCaseSlice(obj.TypeName()) +} + +// GoType returns a string representing the type name, and the wire type +func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { + // TODO: Options. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + typ, wire = "float64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + typ, wire = "float32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_INT64: + typ, wire = "int64", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT64: + typ, wire = "uint64", "varint" + case descriptor.FieldDescriptorProto_TYPE_INT32: + typ, wire = "int32", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT32: + typ, wire = "uint32", "varint" + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + typ, wire = "uint64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + typ, wire = "uint32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + typ, wire = "bool", "varint" + case descriptor.FieldDescriptorProto_TYPE_STRING: + typ, wire = "string", "bytes" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "group" + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "bytes" + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typ, wire = "[]byte", "bytes" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "varint" + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + typ, wire = "int32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + typ, wire = "int64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + typ, wire = "int32", "zigzag32" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + typ, wire = "int64", "zigzag64" + default: + g.Fail("unknown type for", field.GetName()) + } + if isRepeated(field) { + typ = "[]" + typ + } else if message != nil && message.proto3() { + return + } else if field.OneofIndex != nil && message != nil { + return + } else if needsStar(*field.Type) { + typ = "*" + typ + } + return +} + +func (g *Generator) RecordTypeUse(t string) { + if obj, ok := g.typeNameToObject[t]; ok { + // Call ObjectNamed to get the true object to record the use. + obj = g.ObjectNamed(t) + g.usedPackages[obj.PackageName()] = true + } +} + +// Method names that may be generated. Fields with these names get an +// underscore appended. Any change to this set is a potential incompatible +// API change because it changes generated field names. +var methodNames = [...]string{ + "Reset", + "String", + "ProtoMessage", + "Marshal", + "Unmarshal", + "ExtensionRangeArray", + "ExtensionMap", + "Descriptor", +} + +// Names of messages in the `google.protobuf` package for which +// we will generate XXX_WellKnownType methods. +var wellKnownTypes = map[string]bool{ + "Any": true, + "Duration": true, + "Empty": true, + "Struct": true, + "Timestamp": true, + + "Value": true, + "ListValue": true, + "DoubleValue": true, + "FloatValue": true, + "Int64Value": true, + "UInt64Value": true, + "Int32Value": true, + "UInt32Value": true, + "BoolValue": true, + "StringValue": true, + "BytesValue": true, +} + +// Generate the type and default constant definitions for this Descriptor. +func (g *Generator) generateMessage(message *Descriptor) { + // The full type name + typeName := message.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + + usedNames := make(map[string]bool) + for _, n := range methodNames { + usedNames[n] = true + } + fieldNames := make(map[*descriptor.FieldDescriptorProto]string) + fieldGetterNames := make(map[*descriptor.FieldDescriptorProto]string) + fieldTypes := make(map[*descriptor.FieldDescriptorProto]string) + mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) + + oneofFieldName := make(map[int32]string) // indexed by oneof_index field of FieldDescriptorProto + oneofDisc := make(map[int32]string) // name of discriminator method + oneofTypeName := make(map[*descriptor.FieldDescriptorProto]string) // without star + oneofInsertPoints := make(map[int32]int) // oneof_index => offset of g.Buffer + + g.PrintComments(message.path) + g.P("type ", ccTypeName, " struct {") + g.In() + + // allocNames finds a conflict-free variation of the given strings, + // consistently mutating their suffixes. + // It returns the same number of strings. + allocNames := func(ns ...string) []string { + Loop: + for { + for _, n := range ns { + if usedNames[n] { + for i := range ns { + ns[i] += "_" + } + continue Loop + } + } + for _, n := range ns { + usedNames[n] = true + } + return ns + } + } + + for i, field := range message.Field { + // Allocate the getter and the field at the same time so name + // collisions create field/method consistent names. + // TODO: This allocation occurs based on the order of the fields + // in the proto file, meaning that a change in the field + // ordering can change generated Method/Field names. + base := CamelCase(*field.Name) + ns := allocNames(base, "Get"+base) + fieldName, fieldGetterName := ns[0], ns[1] + typename, wiretype := g.GoType(message, field) + jsonName := *field.Name + tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty") + + fieldNames[field] = fieldName + fieldGetterNames[field] = fieldGetterName + + oneof := field.OneofIndex != nil + if oneof && oneofFieldName[*field.OneofIndex] == "" { + odp := message.OneofDecl[int(*field.OneofIndex)] + fname := allocNames(CamelCase(odp.GetName()))[0] + + // This is the first field of a oneof we haven't seen before. + // Generate the union field. + com := g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)) + if com { + g.P("//") + } + g.P("// Types that are valid to be assigned to ", fname, ":") + // Generate the rest of this comment later, + // when we've computed any disambiguation. + oneofInsertPoints[*field.OneofIndex] = g.Buffer.Len() + + dname := "is" + ccTypeName + "_" + fname + oneofFieldName[*field.OneofIndex] = fname + oneofDisc[*field.OneofIndex] = dname + tag := `protobuf_oneof:"` + odp.GetName() + `"` + g.P(fname, " ", dname, " `", tag, "`") + } + + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { + desc := g.ObjectNamed(field.GetTypeName()) + if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { + // Figure out the Go types and tags for the key and value types. + keyField, valField := d.Field[0], d.Field[1] + keyType, keyWire := g.GoType(d, keyField) + valType, valWire := g.GoType(d, valField) + keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire) + + // We don't use stars, except for message-typed values. + // Message and enum types are the only two possibly foreign types used in maps, + // so record their use. They are not permitted as map keys. + keyType = strings.TrimPrefix(keyType, "*") + switch *valField.Type { + case descriptor.FieldDescriptorProto_TYPE_ENUM: + valType = strings.TrimPrefix(valType, "*") + g.RecordTypeUse(valField.GetTypeName()) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + g.RecordTypeUse(valField.GetTypeName()) + default: + valType = strings.TrimPrefix(valType, "*") + } + + typename = fmt.Sprintf("map[%s]%s", keyType, valType) + mapFieldTypes[field] = typename // record for the getter generation + + tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag) + } + } + + fieldTypes[field] = typename + + if oneof { + tname := ccTypeName + "_" + fieldName + // It is possible for this to collide with a message or enum + // nested in this message. Check for collisions. + for { + ok := true + for _, desc := range message.nested { + if CamelCaseSlice(desc.TypeName()) == tname { + ok = false + break + } + } + for _, enum := range message.enums { + if CamelCaseSlice(enum.TypeName()) == tname { + ok = false + break + } + } + if !ok { + tname += "_" + continue + } + break + } + + oneofTypeName[field] = tname + continue + } + + g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)) + g.P(fieldName, "\t", typename, "\t`", tag, "`") + g.RecordTypeUse(field.GetTypeName()) + } + if len(message.ExtensionRange) > 0 { + g.P(g.Pkg["proto"], ".XXX_InternalExtensions `json:\"-\"`") + } + if !message.proto3() { + g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + } + g.Out() + g.P("}") + + // Update g.Buffer to list valid oneof types. + // We do this down here, after we've disambiguated the oneof type names. + // We go in reverse order of insertion point to avoid invalidating offsets. + for oi := int32(len(message.OneofDecl)); oi >= 0; oi-- { + ip := oneofInsertPoints[oi] + all := g.Buffer.Bytes() + rem := all[ip:] + g.Buffer = bytes.NewBuffer(all[:ip:ip]) // set cap so we don't scribble on rem + for _, field := range message.Field { + if field.OneofIndex == nil || *field.OneofIndex != oi { + continue + } + g.P("//\t*", oneofTypeName[field]) + } + g.Buffer.Write(rem) + } + + // Reset, String and ProtoMessage methods. + g.P("func (m *", ccTypeName, ") Reset() { *m = ", ccTypeName, "{} }") + g.P("func (m *", ccTypeName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") + g.P("func (*", ccTypeName, ") ProtoMessage() {}") + var indexes []string + for m := message; m != nil; m = m.parent { + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + g.P("func (*", ccTypeName, ") Descriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }") + // TODO: Revisit the decision to use a XXX_WellKnownType method + // if we change proto.MessageName to work with multiple equivalents. + if message.file.GetPackage() == "google.protobuf" && wellKnownTypes[message.GetName()] { + g.P("func (*", ccTypeName, `) XXX_WellKnownType() string { return "`, message.GetName(), `" }`) + } + + // Extension support methods + var hasExtensions, isMessageSet bool + if len(message.ExtensionRange) > 0 { + hasExtensions = true + // message_set_wire_format only makes sense when extensions are defined. + if opts := message.Options; opts != nil && opts.GetMessageSetWireFormat() { + isMessageSet = true + g.P() + g.P("func (m *", ccTypeName, ") Marshal() ([]byte, error) {") + g.In() + g.P("return ", g.Pkg["proto"], ".MarshalMessageSet(&m.XXX_InternalExtensions)") + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") Unmarshal(buf []byte) error {") + g.In() + g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSet(buf, &m.XXX_InternalExtensions)") + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") MarshalJSON() ([]byte, error) {") + g.In() + g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(&m.XXX_InternalExtensions)") + g.Out() + g.P("}") + g.P("func (m *", ccTypeName, ") UnmarshalJSON(buf []byte) error {") + g.In() + g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)") + g.Out() + g.P("}") + g.P("// ensure ", ccTypeName, " satisfies proto.Marshaler and proto.Unmarshaler") + g.P("var _ ", g.Pkg["proto"], ".Marshaler = (*", ccTypeName, ")(nil)") + g.P("var _ ", g.Pkg["proto"], ".Unmarshaler = (*", ccTypeName, ")(nil)") + } + + g.P() + g.P("var extRange_", ccTypeName, " = []", g.Pkg["proto"], ".ExtensionRange{") + g.In() + for _, r := range message.ExtensionRange { + end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends + g.P("{", r.Start, ", ", end, "},") + } + g.Out() + g.P("}") + g.P("func (*", ccTypeName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") + g.In() + g.P("return extRange_", ccTypeName) + g.Out() + g.P("}") + } + + // Default constants + defNames := make(map[*descriptor.FieldDescriptorProto]string) + for _, field := range message.Field { + def := field.GetDefaultValue() + if def == "" { + continue + } + fieldname := "Default_" + ccTypeName + "_" + CamelCase(*field.Name) + defNames[field] = fieldname + typename, _ := g.GoType(message, field) + if typename[0] == '*' { + typename = typename[1:] + } + kind := "const " + switch { + case typename == "bool": + case typename == "string": + def = strconv.Quote(def) + case typename == "[]byte": + def = "[]byte(" + strconv.Quote(unescape(def)) + ")" + kind = "var " + case def == "inf", def == "-inf", def == "nan": + // These names are known to, and defined by, the protocol language. + switch def { + case "inf": + def = "math.Inf(1)" + case "-inf": + def = "math.Inf(-1)" + case "nan": + def = "math.NaN()" + } + if *field.Type == descriptor.FieldDescriptorProto_TYPE_FLOAT { + def = "float32(" + def + ")" + } + kind = "var " + case *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM: + // Must be an enum. Need to construct the prefixed name. + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate constant for %s", fieldname) + continue + } + def = g.DefaultPackageName(obj) + enum.prefix() + def + } + g.P(kind, fieldname, " ", typename, " = ", def) + g.file.addExport(message, constOrVarSymbol{fieldname, kind, ""}) + } + g.P() + + // Oneof per-field types, discriminants and getters. + // + // Generate unexported named types for the discriminant interfaces. + // We shouldn't have to do this, but there was (~19 Aug 2015) a compiler/linker bug + // that was triggered by using anonymous interfaces here. + // TODO: Revisit this and consider reverting back to anonymous interfaces. + for oi := range message.OneofDecl { + dname := oneofDisc[int32(oi)] + g.P("type ", dname, " interface { ", dname, "() }") + } + g.P() + for _, field := range message.Field { + if field.OneofIndex == nil { + continue + } + _, wiretype := g.GoType(message, field) + tag := "protobuf:" + g.goTag(message, field, wiretype) + g.P("type ", oneofTypeName[field], " struct{ ", fieldNames[field], " ", fieldTypes[field], " `", tag, "` }") + g.RecordTypeUse(field.GetTypeName()) + } + g.P() + for _, field := range message.Field { + if field.OneofIndex == nil { + continue + } + g.P("func (*", oneofTypeName[field], ") ", oneofDisc[*field.OneofIndex], "() {}") + } + g.P() + for oi := range message.OneofDecl { + fname := oneofFieldName[int32(oi)] + g.P("func (m *", ccTypeName, ") Get", fname, "() ", oneofDisc[int32(oi)], " {") + g.P("if m != nil { return m.", fname, " }") + g.P("return nil") + g.P("}") + } + g.P() + + // Field getters + var getters []getterSymbol + for _, field := range message.Field { + oneof := field.OneofIndex != nil + + fname := fieldNames[field] + typename, _ := g.GoType(message, field) + if t, ok := mapFieldTypes[field]; ok { + typename = t + } + mname := fieldGetterNames[field] + star := "" + if needsStar(*field.Type) && typename[0] == '*' { + typename = typename[1:] + star = "*" + } + + // Only export getter symbols for basic types, + // and for messages and enums in the same package. + // Groups are not exported. + // Foreign types can't be hoisted through a public import because + // the importer may not already be importing the defining .proto. + // As an example, imagine we have an import tree like this: + // A.proto -> B.proto -> C.proto + // If A publicly imports B, we need to generate the getters from B in A's output, + // but if one such getter returns something from C then we cannot do that + // because A is not importing C already. + var getter, genType bool + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_GROUP: + getter = false + case descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_ENUM: + // Only export getter if its return type is in this package. + getter = g.ObjectNamed(field.GetTypeName()).PackageName() == message.PackageName() + genType = true + default: + getter = true + } + if getter { + getters = append(getters, getterSymbol{ + name: mname, + typ: typename, + typeName: field.GetTypeName(), + genType: genType, + }) + } + + g.P("func (m *", ccTypeName, ") "+mname+"() "+typename+" {") + g.In() + def, hasDef := defNames[field] + typeDefaultIsNil := false // whether this field type's default value is a literal nil unless specified + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typeDefaultIsNil = !hasDef + case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE: + typeDefaultIsNil = true + } + if isRepeated(field) { + typeDefaultIsNil = true + } + if typeDefaultIsNil && !oneof { + // A bytes field with no explicit default needs less generated code, + // as does a message or group field, or a repeated field. + g.P("if m != nil {") + g.In() + g.P("return m." + fname) + g.Out() + g.P("}") + g.P("return nil") + g.Out() + g.P("}") + g.P() + continue + } + if !oneof { + if message.proto3() { + g.P("if m != nil {") + } else { + g.P("if m != nil && m." + fname + " != nil {") + } + g.In() + g.P("return " + star + "m." + fname) + g.Out() + g.P("}") + } else { + uname := oneofFieldName[*field.OneofIndex] + tname := oneofTypeName[field] + g.P("if x, ok := m.Get", uname, "().(*", tname, "); ok {") + g.P("return x.", fname) + g.P("}") + } + if hasDef { + if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { + g.P("return " + def) + } else { + // The default is a []byte var. + // Make a copy when returning it to be safe. + g.P("return append([]byte(nil), ", def, "...)") + } + } else { + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + g.P("return false") + case descriptor.FieldDescriptorProto_TYPE_STRING: + g.P(`return ""`) + case descriptor.FieldDescriptorProto_TYPE_GROUP, + descriptor.FieldDescriptorProto_TYPE_MESSAGE, + descriptor.FieldDescriptorProto_TYPE_BYTES: + // This is only possible for oneof fields. + g.P("return nil") + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // The default default for an enum is the first value in the enum, + // not zero. + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate getter for %s", field.GetName()) + continue + } + if len(enum.Value) == 0 { + g.P("return 0 // empty enum") + } else { + first := enum.Value[0].GetName() + g.P("return ", g.DefaultPackageName(obj)+enum.prefix()+first) + } + default: + g.P("return 0") + } + } + g.Out() + g.P("}") + g.P() + } + + if !message.group { + ms := &messageSymbol{ + sym: ccTypeName, + hasExtensions: hasExtensions, + isMessageSet: isMessageSet, + hasOneof: len(message.OneofDecl) > 0, + getters: getters, + } + g.file.addExport(message, ms) + } + + // Oneof functions + if len(message.OneofDecl) > 0 { + fieldWire := make(map[*descriptor.FieldDescriptorProto]string) + + // method + enc := "_" + ccTypeName + "_OneofMarshaler" + dec := "_" + ccTypeName + "_OneofUnmarshaler" + size := "_" + ccTypeName + "_OneofSizer" + encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error" + decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)" + sizeSig := "(msg " + g.Pkg["proto"] + ".Message) (n int)" + + g.P("// XXX_OneofFuncs is for the internal use of the proto package.") + g.P("func (*", ccTypeName, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {") + g.P("return ", enc, ", ", dec, ", ", size, ", []interface{}{") + for _, field := range message.Field { + if field.OneofIndex == nil { + continue + } + g.P("(*", oneofTypeName[field], ")(nil),") + } + g.P("}") + g.P("}") + g.P() + + // marshaler + g.P("func ", enc, encSig, " {") + g.P("m := msg.(*", ccTypeName, ")") + for oi, odp := range message.OneofDecl { + g.P("// ", odp.GetName()) + fname := oneofFieldName[int32(oi)] + g.P("switch x := m.", fname, ".(type) {") + for _, field := range message.Field { + if field.OneofIndex == nil || int(*field.OneofIndex) != oi { + continue + } + g.P("case *", oneofTypeName[field], ":") + var wire, pre, post string + val := "x." + fieldNames[field] // overridden for TYPE_BOOL + canFail := false // only TYPE_MESSAGE and TYPE_GROUP can fail + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + wire = "WireFixed64" + pre = "b.EncodeFixed64(" + g.Pkg["math"] + ".Float64bits(" + post = "))" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + wire = "WireFixed32" + pre = "b.EncodeFixed32(uint64(" + g.Pkg["math"] + ".Float32bits(" + post = ")))" + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64: + wire = "WireVarint" + pre, post = "b.EncodeVarint(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM: + wire = "WireVarint" + pre, post = "b.EncodeVarint(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + wire = "WireFixed64" + pre, post = "b.EncodeFixed64(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + wire = "WireFixed32" + pre, post = "b.EncodeFixed32(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + // bool needs special handling. + g.P("t := uint64(0)") + g.P("if ", val, " { t = 1 }") + val = "t" + wire = "WireVarint" + pre, post = "b.EncodeVarint(", ")" + case descriptor.FieldDescriptorProto_TYPE_STRING: + wire = "WireBytes" + pre, post = "b.EncodeStringBytes(", ")" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + wire = "WireStartGroup" + pre, post = "b.Marshal(", ")" + canFail = true + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + wire = "WireBytes" + pre, post = "b.EncodeMessage(", ")" + canFail = true + case descriptor.FieldDescriptorProto_TYPE_BYTES: + wire = "WireBytes" + pre, post = "b.EncodeRawBytes(", ")" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + wire = "WireVarint" + pre, post = "b.EncodeZigzag32(uint64(", "))" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + wire = "WireVarint" + pre, post = "b.EncodeZigzag64(uint64(", "))" + default: + g.Fail("unhandled oneof field type ", field.Type.String()) + } + fieldWire[field] = wire + g.P("b.EncodeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")") + if !canFail { + g.P(pre, val, post) + } else { + g.P("if err := ", pre, val, post, "; err != nil {") + g.P("return err") + g.P("}") + } + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + g.P("b.EncodeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".WireEndGroup)") + } + } + g.P("case nil:") + g.P("default: return ", g.Pkg["fmt"], `.Errorf("`, ccTypeName, ".", fname, ` has unexpected type %T", x)`) + g.P("}") + } + g.P("return nil") + g.P("}") + g.P() + + // unmarshaler + g.P("func ", dec, decSig, " {") + g.P("m := msg.(*", ccTypeName, ")") + g.P("switch tag {") + for _, field := range message.Field { + if field.OneofIndex == nil { + continue + } + odp := message.OneofDecl[int(*field.OneofIndex)] + g.P("case ", field.Number, ": // ", odp.GetName(), ".", *field.Name) + g.P("if wire != ", g.Pkg["proto"], ".", fieldWire[field], " {") + g.P("return true, ", g.Pkg["proto"], ".ErrInternalBadWireType") + g.P("}") + lhs := "x, err" // overridden for TYPE_MESSAGE and TYPE_GROUP + var dec, cast, cast2 string + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + dec, cast = "b.DecodeFixed64()", g.Pkg["math"]+".Float64frombits" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + dec, cast, cast2 = "b.DecodeFixed32()", "uint32", g.Pkg["math"]+".Float32frombits" + case descriptor.FieldDescriptorProto_TYPE_INT64: + dec, cast = "b.DecodeVarint()", "int64" + case descriptor.FieldDescriptorProto_TYPE_UINT64: + dec = "b.DecodeVarint()" + case descriptor.FieldDescriptorProto_TYPE_INT32: + dec, cast = "b.DecodeVarint()", "int32" + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + dec = "b.DecodeFixed64()" + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + dec, cast = "b.DecodeFixed32()", "uint32" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + dec = "b.DecodeVarint()" + // handled specially below + case descriptor.FieldDescriptorProto_TYPE_STRING: + dec = "b.DecodeStringBytes()" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + g.P("msg := new(", fieldTypes[field][1:], ")") // drop star + lhs = "err" + dec = "b.DecodeGroup(msg)" + // handled specially below + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + g.P("msg := new(", fieldTypes[field][1:], ")") // drop star + lhs = "err" + dec = "b.DecodeMessage(msg)" + // handled specially below + case descriptor.FieldDescriptorProto_TYPE_BYTES: + dec = "b.DecodeRawBytes(true)" + case descriptor.FieldDescriptorProto_TYPE_UINT32: + dec, cast = "b.DecodeVarint()", "uint32" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + dec, cast = "b.DecodeVarint()", fieldTypes[field] + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + dec, cast = "b.DecodeFixed32()", "int32" + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + dec, cast = "b.DecodeFixed64()", "int64" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + dec, cast = "b.DecodeZigzag32()", "int32" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + dec, cast = "b.DecodeZigzag64()", "int64" + default: + g.Fail("unhandled oneof field type ", field.Type.String()) + } + g.P(lhs, " := ", dec) + val := "x" + if cast != "" { + val = cast + "(" + val + ")" + } + if cast2 != "" { + val = cast2 + "(" + val + ")" + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + val += " != 0" + case descriptor.FieldDescriptorProto_TYPE_GROUP, + descriptor.FieldDescriptorProto_TYPE_MESSAGE: + val = "msg" + } + g.P("m.", oneofFieldName[*field.OneofIndex], " = &", oneofTypeName[field], "{", val, "}") + g.P("return true, err") + } + g.P("default: return false, nil") + g.P("}") + g.P("}") + g.P() + + // sizer + g.P("func ", size, sizeSig, " {") + g.P("m := msg.(*", ccTypeName, ")") + for oi, odp := range message.OneofDecl { + g.P("// ", odp.GetName()) + fname := oneofFieldName[int32(oi)] + g.P("switch x := m.", fname, ".(type) {") + for _, field := range message.Field { + if field.OneofIndex == nil || int(*field.OneofIndex) != oi { + continue + } + g.P("case *", oneofTypeName[field], ":") + val := "x." + fieldNames[field] + var wire, varint, fixed string + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + wire = "WireFixed64" + fixed = "8" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + wire = "WireFixed32" + fixed = "4" + case descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM: + wire = "WireVarint" + varint = val + case descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_SFIXED64: + wire = "WireFixed64" + fixed = "8" + case descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED32: + wire = "WireFixed32" + fixed = "4" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + wire = "WireVarint" + fixed = "1" + case descriptor.FieldDescriptorProto_TYPE_STRING: + wire = "WireBytes" + fixed = "len(" + val + ")" + varint = fixed + case descriptor.FieldDescriptorProto_TYPE_GROUP: + wire = "WireStartGroup" + fixed = g.Pkg["proto"] + ".Size(" + val + ")" + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + wire = "WireBytes" + g.P("s := ", g.Pkg["proto"], ".Size(", val, ")") + fixed = "s" + varint = fixed + case descriptor.FieldDescriptorProto_TYPE_BYTES: + wire = "WireBytes" + fixed = "len(" + val + ")" + varint = fixed + case descriptor.FieldDescriptorProto_TYPE_SINT32: + wire = "WireVarint" + varint = "(uint32(" + val + ") << 1) ^ uint32((int32(" + val + ") >> 31))" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + wire = "WireVarint" + varint = "uint64(" + val + " << 1) ^ uint64((int64(" + val + ") >> 63))" + default: + g.Fail("unhandled oneof field type ", field.Type.String()) + } + g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")") + if varint != "" { + g.P("n += ", g.Pkg["proto"], ".SizeVarint(uint64(", varint, "))") + } + if fixed != "" { + g.P("n += ", fixed) + } + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".WireEndGroup)") + } + } + g.P("case nil:") + g.P("default:") + g.P("panic(", g.Pkg["fmt"], ".Sprintf(\"proto: unexpected type %T in oneof\", x))") + g.P("}") + } + g.P("return n") + g.P("}") + g.P() + } + + for _, ext := range message.ext { + g.generateExtension(ext) + } + + fullName := strings.Join(message.TypeName(), ".") + if g.file.Package != nil { + fullName = *g.file.Package + "." + fullName + } + + g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], ccTypeName, fullName) +} + +var escapeChars = [256]byte{ + 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', +} + +// unescape reverses the "C" escaping that protoc does for default values of bytes fields. +// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape +// sequences are conveyed, unmodified, into the decoded result. +func unescape(s string) string { + // NB: Sadly, we can't use strconv.Unquote because protoc will escape both + // single and double quotes, but strconv.Unquote only allows one or the + // other (based on actual surrounding quotes of its input argument). + + var out []byte + for len(s) > 0 { + // regular character, or too short to be valid escape + if s[0] != '\\' || len(s) < 2 { + out = append(out, s[0]) + s = s[1:] + } else if c := escapeChars[s[1]]; c != 0 { + // escape sequence + out = append(out, c) + s = s[2:] + } else if s[1] == 'x' || s[1] == 'X' { + // hex escape, e.g. "\x80 + if len(s) < 4 { + // too short to be valid + out = append(out, s[:2]...) + s = s[2:] + continue + } + v, err := strconv.ParseUint(s[2:4], 16, 8) + if err != nil { + out = append(out, s[:4]...) + } else { + out = append(out, byte(v)) + } + s = s[4:] + } else if '0' <= s[1] && s[1] <= '7' { + // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" + // so consume up to 2 more bytes or up to end-of-string + n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(s[1:1+n], 8, 8) + if err != nil { + out = append(out, s[:1+n]...) + } else { + out = append(out, byte(v)) + } + s = s[1+n:] + } else { + // bad escape, just propagate the slash as-is + out = append(out, s[0]) + s = s[1:] + } + } + + return string(out) +} + +func (g *Generator) generateExtension(ext *ExtensionDescriptor) { + ccTypeName := ext.DescName() + + extObj := g.ObjectNamed(*ext.Extendee) + var extDesc *Descriptor + if id, ok := extObj.(*ImportedDescriptor); ok { + // This is extending a publicly imported message. + // We need the underlying type for goTag. + extDesc = id.o.(*Descriptor) + } else { + extDesc = extObj.(*Descriptor) + } + extendedType := "*" + g.TypeName(extObj) // always use the original + field := ext.FieldDescriptorProto + fieldType, wireType := g.GoType(ext.parent, field) + tag := g.goTag(extDesc, field, wireType) + g.RecordTypeUse(*ext.Extendee) + if n := ext.FieldDescriptorProto.TypeName; n != nil { + // foreign extension type + g.RecordTypeUse(*n) + } + + typeName := ext.TypeName() + + // Special case for proto2 message sets: If this extension is extending + // proto2_bridge.MessageSet, and its final name component is "message_set_extension", + // then drop that last component. + mset := false + if extendedType == "*proto2_bridge.MessageSet" && typeName[len(typeName)-1] == "message_set_extension" { + typeName = typeName[:len(typeName)-1] + mset = true + } + + // For text formatting, the package must be exactly what the .proto file declares, + // ignoring overrides such as the go_package option, and with no dot/underscore mapping. + extName := strings.Join(typeName, ".") + if g.file.Package != nil { + extName = *g.file.Package + "." + extName + } + + g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") + g.In() + g.P("ExtendedType: (", extendedType, ")(nil),") + g.P("ExtensionType: (", fieldType, ")(nil),") + g.P("Field: ", field.Number, ",") + g.P(`Name: "`, extName, `",`) + g.P("Tag: ", tag, ",") + g.P(`Filename: "`, g.file.GetName(), `",`) + + g.Out() + g.P("}") + g.P() + + if mset { + // Generate a bit more code to register with message_set.go. + g.addInitf("%s.RegisterMessageSetType((%s)(nil), %d, %q)", g.Pkg["proto"], fieldType, *field.Number, extName) + } + + g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) +} + +func (g *Generator) generateInitFunction() { + for _, enum := range g.file.enum { + g.generateEnumRegistration(enum) + } + for _, d := range g.file.desc { + for _, ext := range d.ext { + g.generateExtensionRegistration(ext) + } + } + for _, ext := range g.file.ext { + g.generateExtensionRegistration(ext) + } + if len(g.init) == 0 { + return + } + g.P("func init() {") + g.In() + for _, l := range g.init { + g.P(l) + } + g.Out() + g.P("}") + g.init = nil +} + +func (g *Generator) generateFileDescriptor(file *FileDescriptor) { + // Make a copy and trim source_code_info data. + // TODO: Trim this more when we know exactly what we need. + pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) + pb.SourceCodeInfo = nil + + b, err := proto.Marshal(pb) + if err != nil { + g.Fail(err.Error()) + } + + var buf bytes.Buffer + w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) + w.Write(b) + w.Close() + b = buf.Bytes() + + v := file.VarName() + g.P() + g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") + g.P("var ", v, " = []byte{") + g.In() + g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.Out() + g.P("}") +} + +func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { + // // We always print the full (proto-world) package name here. + pkg := enum.File().GetPackage() + if pkg != "" { + pkg += "." + } + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) +} + +func (g *Generator) generateExtensionRegistration(ext *ExtensionDescriptor) { + g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) +} + +// And now lots of helper functions. + +// Is c an ASCII lower-case letter? +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +// Is c an ASCII digit? +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +// CamelCase returns the CamelCased name. +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +// There is a remote possibility of this rewrite causing a name collision, +// but it's so remote we're prepared to pretend it's nonexistent - since the +// C++ generator lowercases names, it's extremely unlikely to have two fields +// with different capitalizations. +// In short, _my_field_name_2 becomes XMyFieldName_2. +func CamelCase(s string) string { + if s == "" { + return "" + } + t := make([]byte, 0, 32) + i := 0 + if s[0] == '_' { + // Need a capital letter; drop the '_'. + t = append(t, 'X') + i++ + } + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + for ; i < len(s); i++ { + c := s[i] + if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { + continue // Skip the underscore in s. + } + if isASCIIDigit(c) { + t = append(t, c) + continue + } + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c ^= ' ' // Make it a capital letter. + } + t = append(t, c) // Guaranteed not lower case. + // Accept lower case sequence that follows. + for i+1 < len(s) && isASCIILower(s[i+1]) { + i++ + t = append(t, s[i]) + } + } + return string(t) +} + +// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to +// be joined with "_". +func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } + +// dottedSlice turns a sliced name into a dotted name. +func dottedSlice(elem []string) string { return strings.Join(elem, ".") } + +// Is this field optional? +func isOptional(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL +} + +// Is this field required? +func isRequired(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED +} + +// Is this field repeated? +func isRepeated(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED +} + +// Is this field a scalar numeric type? +func isScalar(field *descriptor.FieldDescriptorProto) bool { + if field.Type == nil { + return false + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_BOOL, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_SFIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED64, + descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} + +// badToUnderscore is the mapping function used to generate Go names from package names, +// which can be dotted in the input .proto file. It replaces non-identifier characters such as +// dot or dash with underscore. +func badToUnderscore(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + return r + } + return '_' +} + +// baseName returns the last path element of the name, with the last dotted suffix removed. +func baseName(name string) string { + // First, find the last element + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i+1:] + } + // Now drop the suffix + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[0:i] + } + return name +} + +// The SourceCodeInfo message describes the location of elements of a parsed +// .proto file by way of a "path", which is a sequence of integers that +// describe the route from a FileDescriptorProto to the relevant submessage. +// The path alternates between a field number of a repeated field, and an index +// into that repeated field. The constants below define the field numbers that +// are used. +// +// See descriptor.proto for more information about this. +const ( + // tag numbers in FileDescriptorProto + packagePath = 2 // package + messagePath = 4 // message_type + enumPath = 5 // enum_type + // tag numbers in DescriptorProto + messageFieldPath = 2 // field + messageMessagePath = 3 // nested_type + messageEnumPath = 4 // enum_type + messageOneofPath = 8 // oneof_decl + // tag numbers in EnumDescriptorProto + enumValuePath = 2 // value +) diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go new file mode 100644 index 0000000..76808f3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go @@ -0,0 +1,114 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2013 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package generator + +import ( + "testing" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +func TestCamelCase(t *testing.T) { + tests := []struct { + in, want string + }{ + {"one", "One"}, + {"one_two", "OneTwo"}, + {"_my_field_name_2", "XMyFieldName_2"}, + {"Something_Capped", "Something_Capped"}, + {"my_Name", "My_Name"}, + {"OneTwo", "OneTwo"}, + {"_", "X"}, + {"_a_", "XA_"}, + } + for _, tc := range tests { + if got := CamelCase(tc.in); got != tc.want { + t.Errorf("CamelCase(%q) = %q, want %q", tc.in, got, tc.want) + } + } +} + +func TestGoPackageOption(t *testing.T) { + tests := []struct { + in string + impPath, pkg string + ok bool + }{ + {"", "", "", false}, + {"foo", "", "foo", true}, + {"github.com/golang/bar", "github.com/golang/bar", "bar", true}, + {"github.com/golang/bar;baz", "github.com/golang/bar", "baz", true}, + } + for _, tc := range tests { + d := &FileDescriptor{ + FileDescriptorProto: &descriptor.FileDescriptorProto{ + Options: &descriptor.FileOptions{ + GoPackage: &tc.in, + }, + }, + } + impPath, pkg, ok := d.goPackageOption() + if impPath != tc.impPath || pkg != tc.pkg || ok != tc.ok { + t.Errorf("go_package = %q => (%q, %q, %t), want (%q, %q, %t)", tc.in, + impPath, pkg, ok, tc.impPath, tc.pkg, tc.ok) + } + } +} + +func TestUnescape(t *testing.T) { + tests := []struct { + in string + out string + }{ + // successful cases, including all kinds of escapes + {"", ""}, + {"foo bar baz frob nitz", "foo bar baz frob nitz"}, + {`\000\001\002\003\004\005\006\007`, string([]byte{0, 1, 2, 3, 4, 5, 6, 7})}, + {`\a\b\f\n\r\t\v\\\?\'\"`, string([]byte{'\a', '\b', '\f', '\n', '\r', '\t', '\v', '\\', '?', '\'', '"'})}, + {`\x10\x20\x30\x40\x50\x60\x70\x80`, string([]byte{16, 32, 48, 64, 80, 96, 112, 128})}, + // variable length octal escapes + {`\0\018\222\377\3\04\005\6\07`, string([]byte{0, 1, '8', 0222, 255, 3, 4, 5, 6, 7})}, + // malformed escape sequences left as is + {"foo \\g bar", "foo \\g bar"}, + {"foo \\xg0 bar", "foo \\xg0 bar"}, + {"\\", "\\"}, + {"\\x", "\\x"}, + {"\\xf", "\\xf"}, + {"\\777", "\\777"}, // overflows byte + } + for _, tc := range tests { + s := unescape(tc.in) + if s != tc.out { + t.Errorf("doUnescape(%q) = %q; should have been %q", tc.in, s, tc.out) + } + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go new file mode 100644 index 0000000..2660e47 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go @@ -0,0 +1,463 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package grpc outputs gRPC service descriptions in Go code. +// It runs as a plugin for the Go protocol buffer compiler plugin. +// It is linked in to protoc-gen-go. +package grpc + +import ( + "fmt" + "path" + "strconv" + "strings" + + pb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/golang/protobuf/protoc-gen-go/generator" +) + +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// the grpc package is introduced; the generated code references +// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 4 + +// Paths for packages used by code generated in this file, +// relative to the import_prefix of the generator.Generator. +const ( + contextPkgPath = "golang.org/x/net/context" + grpcPkgPath = "google.golang.org/grpc" +) + +func init() { + generator.RegisterPlugin(new(grpc)) +} + +// grpc is an implementation of the Go protocol buffer compiler's +// plugin architecture. It generates bindings for gRPC support. +type grpc struct { + gen *generator.Generator +} + +// Name returns the name of this plugin, "grpc". +func (g *grpc) Name() string { + return "grpc" +} + +// The names for packages imported in the generated code. +// They may vary from the final path component of the import path +// if the name is used by other packages. +var ( + contextPkg string + grpcPkg string +) + +// Init initializes the plugin. +func (g *grpc) Init(gen *generator.Generator) { + g.gen = gen + contextPkg = generator.RegisterUniquePackageName("context", nil) + grpcPkg = generator.RegisterUniquePackageName("grpc", nil) +} + +// Given a type name defined in a .proto, return its object. +// Also record that we're using it, to guarantee the associated import. +func (g *grpc) objectNamed(name string) generator.Object { + g.gen.RecordTypeUse(name) + return g.gen.ObjectNamed(name) +} + +// Given a type name defined in a .proto, return its name as we will print it. +func (g *grpc) typeName(str string) string { + return g.gen.TypeName(g.objectNamed(str)) +} + +// P forwards to g.gen.P. +func (g *grpc) P(args ...interface{}) { g.gen.P(args...) } + +// Generate generates code for the services in the given file. +func (g *grpc) Generate(file *generator.FileDescriptor) { + if len(file.FileDescriptorProto.Service) == 0 { + return + } + + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ ", contextPkg, ".Context") + g.P("var _ ", grpcPkg, ".ClientConn") + g.P() + + // Assert version compatibility. + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the grpc package it is being compiled against.") + g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion) + g.P() + + for i, service := range file.FileDescriptorProto.Service { + g.generateService(file, service, i) + } +} + +// GenerateImports generates the import declaration for this file. +func (g *grpc) GenerateImports(file *generator.FileDescriptor) { + if len(file.FileDescriptorProto.Service) == 0 { + return + } + g.P("import (") + g.P(contextPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, contextPkgPath))) + g.P(grpcPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, grpcPkgPath))) + g.P(")") + g.P() +} + +// reservedClientName records whether a client name is reserved on the client side. +var reservedClientName = map[string]bool{ +// TODO: do we need any in gRPC? +} + +func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } + +// generateService generates all the code for the named service. +func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) { + path := fmt.Sprintf("6,%d", index) // 6 means service. + + origServName := service.GetName() + fullServName := origServName + if pkg := file.GetPackage(); pkg != "" { + fullServName = pkg + "." + fullServName + } + servName := generator.CamelCase(origServName) + + g.P() + g.P("// Client API for ", servName, " service") + g.P() + + // Client interface. + g.P("type ", servName, "Client interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + g.P(g.generateClientSignature(servName, method)) + } + g.P("}") + g.P() + + // Client structure. + g.P("type ", unexport(servName), "Client struct {") + g.P("cc *", grpcPkg, ".ClientConn") + g.P("}") + g.P() + + // NewClient factory. + g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {") + g.P("return &", unexport(servName), "Client{cc}") + g.P("}") + g.P() + + var methodIndex, streamIndex int + serviceDescVar := "_" + servName + "_serviceDesc" + // Client method implementations. + for _, method := range service.Method { + var descExpr string + if !method.GetServerStreaming() && !method.GetClientStreaming() { + // Unary RPC method + descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex) + methodIndex++ + } else { + // Streaming RPC method + descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex) + streamIndex++ + } + g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr) + } + + g.P("// Server API for ", servName, " service") + g.P() + + // Server interface. + serverType := servName + "Server" + g.P("type ", serverType, " interface {") + for i, method := range service.Method { + g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. + g.P(g.generateServerSignature(servName, method)) + } + g.P("}") + g.P() + + // Server registration. + g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {") + g.P("s.RegisterService(&", serviceDescVar, `, srv)`) + g.P("}") + g.P() + + // Server handler implementations. + var handlerNames []string + for _, method := range service.Method { + hname := g.generateServerMethod(servName, fullServName, method) + handlerNames = append(handlerNames, hname) + } + + // Service descriptor. + g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {") + g.P("ServiceName: ", strconv.Quote(fullServName), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPkg, ".MethodDesc{") + for i, method := range service.Method { + if method.GetServerStreaming() || method.GetClientStreaming() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPkg, ".StreamDesc{") + for i, method := range service.Method { + if !method.GetServerStreaming() && !method.GetClientStreaming() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(method.GetName()), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.GetServerStreaming() { + g.P("ServerStreams: true,") + } + if method.GetClientStreaming() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("Metadata: \"", file.GetName(), "\",") + g.P("}") + g.P() +} + +// generateClientSignature returns the client-side signature for a method. +func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + reqArg := ", in *" + g.typeName(method.GetInputType()) + if method.GetClientStreaming() { + reqArg = "" + } + respName := "*" + g.typeName(method.GetOutputType()) + if method.GetServerStreaming() || method.GetClientStreaming() { + respName = servName + "_" + generator.CamelCase(origMethName) + "Client" + } + return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName) +} + +func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) { + sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName()) + methName := generator.CamelCase(method.GetName()) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{") + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("out := new(", outType, ")") + // TODO: Pass descExpr to Invoke. + g.P("err := ", grpcPkg, `.Invoke(ctx, "`, sname, `", in, out, c.cc, opts...)`) + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return + } + streamType := unexport(servName) + methName + "Client" + g.P("stream, err := ", grpcPkg, ".NewClientStream(ctx, ", descExpr, `, c.cc, "`, sname, `", opts...)`) + g.P("if err != nil { return nil, err }") + g.P("x := &", streamType, "{stream}") + if !method.GetClientStreaming() { + g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + } + g.P("return x, nil") + g.P("}") + g.P() + + genSend := method.GetClientStreaming() + genRecv := method.GetServerStreaming() + genCloseAndRecv := !method.GetServerStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Client interface {") + if genSend { + g.P("Send(*", inType, ") error") + } + if genRecv { + g.P("Recv() (*", outType, ", error)") + } + if genCloseAndRecv { + g.P("CloseAndRecv() (*", outType, ", error)") + } + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ClientStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", inType, ") error {") + g.P("return x.ClientStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + if genCloseAndRecv { + g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + g.P("m := new(", outType, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } +} + +// generateServerSignature returns the server-side signature for a method. +func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + + var reqArgs []string + ret := "error" + if !method.GetServerStreaming() && !method.GetClientStreaming() { + reqArgs = append(reqArgs, contextPkg+".Context") + ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" + } + if !method.GetClientStreaming() { + reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType())) + } + if method.GetServerStreaming() || method.GetClientStreaming() { + reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server") + } + + return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string { + methName := generator.CamelCase(method.GetName()) + hname := fmt.Sprintf("_%s_%s_Handler", servName, methName) + inType := g.typeName(method.GetInputType()) + outType := g.typeName(method.GetOutputType()) + + if !method.GetServerStreaming() && !method.GetClientStreaming() { + g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {") + g.P("in := new(", inType, ")") + g.P("if err := dec(in); err != nil { return nil, err }") + g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }") + g.P("info := &", grpcPkg, ".UnaryServerInfo{") + g.P("Server: srv,") + g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",") + g.P("}") + g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {") + g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))") + g.P("}") + g.P("return interceptor(ctx, in, info, handler)") + g.P("}") + g.P() + return hname + } + streamType := unexport(servName) + methName + "Server" + g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {") + if !method.GetClientStreaming() { + g.P("m := new(", inType, ")") + g.P("if err := stream.RecvMsg(m); err != nil { return err }") + g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})") + } else { + g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})") + } + g.P("}") + g.P() + + genSend := method.GetServerStreaming() + genSendAndClose := !method.GetServerStreaming() + genRecv := method.GetClientStreaming() + + // Stream auxiliary types and methods. + g.P("type ", servName, "_", methName, "Server interface {") + if genSend { + g.P("Send(*", outType, ") error") + } + if genSendAndClose { + g.P("SendAndClose(*", outType, ") error") + } + if genRecv { + g.P("Recv() (*", inType, ", error)") + } + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPkg, ".ServerStream") + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genSendAndClose { + g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {") + g.P("m := new(", inType, ")") + g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + + return hname +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go new file mode 100644 index 0000000..532a550 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go @@ -0,0 +1,34 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import _ "github.com/golang/protobuf/protoc-gen-go/grpc" diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go new file mode 100644 index 0000000..8e2486d --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go @@ -0,0 +1,98 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate +// Go code. Run it by building this program and putting it in your path with +// the name +// protoc-gen-go +// That word 'go' at the end becomes part of the option string set for the +// protocol compiler, so once the protocol compiler (protoc) is installed +// you can run +// protoc --go_out=output_directory input_directory/file.proto +// to generate Go bindings for the protocol defined by file.proto. +// With that input, the output will be written to +// output_directory/file.pb.go +// +// The generated code is documented in the package comment for +// the library. +// +// See the README and documentation for protocol buffers to learn more: +// https://developers.google.com/protocol-buffers/ +package main + +import ( + "io/ioutil" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/protoc-gen-go/generator" +) + +func main() { + // Begin by allocating a generator. The request and response structures are stored there + // so we can do error handling easily - the response structure contains the field to + // report failure. + g := generator.New() + + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + g.Error(err, "reading input") + } + + if err := proto.Unmarshal(data, g.Request); err != nil { + g.Error(err, "parsing input proto") + } + + if len(g.Request.FileToGenerate) == 0 { + g.Fail("no files to generate") + } + + g.CommandLineParameters(g.Request.GetParameter()) + + // Create a wrapped version of the Descriptors and EnumDescriptors that + // point to the file that defines them. + g.WrapTypes() + + g.SetPackageNames() + g.BuildTypeNameMap() + + g.GenerateAllFiles() + + // Send back the results. + data, err = proto.Marshal(g.Response) + if err != nil { + g.Error(err, "failed to marshal output proto") + } + _, err = os.Stdout.Write(data) + if err != nil { + g.Error(err, "failed to write output proto") + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile new file mode 100644 index 0000000..bc0463d --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile @@ -0,0 +1,45 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Not stored here, but plugin.proto is in https://github.com/google/protobuf/ +# at src/google/protobuf/compiler/plugin.proto +# Also we need to fix an import. +regenerate: + @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION + cp $(HOME)/src/protobuf/include/google/protobuf/compiler/plugin.proto . + protoc --go_out=Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor:../../../../.. \ + -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/compiler/plugin.proto + +restore: + cp plugin.pb.golden plugin.pb.go + +preserve: + cp plugin.pb.go plugin.pb.golden diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go new file mode 100644 index 0000000..c608a24 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go @@ -0,0 +1,293 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +/* +Package plugin_go is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/compiler/plugin.proto + +It has these top-level messages: + Version + CodeGeneratorRequest + CodeGeneratorResponse +*/ +package plugin_go + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of protocol compiler. +type Version struct { + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Version) GetMajor() int32 { + if m != nil && m.Major != nil { + return *m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil && m.Minor != nil { + return *m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil && m.Patch != nil { + return *m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil && m.Suffix != nil { + return *m.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } +func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorRequest) ProtoMessage() {} +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *CodeGeneratorRequest) GetFileToGenerate() []string { + if m != nil { + return m.FileToGenerate + } + return nil +} + +func (m *CodeGeneratorRequest) GetParameter() string { + if m != nil && m.Parameter != nil { + return *m.Parameter + } + return "" +} + +func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { + if m != nil { + return m.ProtoFile + } + return nil +} + +func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } +func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse) ProtoMessage() {} +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *CodeGeneratorResponse) GetError() string { + if m != nil && m.Error != nil { + return *m.Error + } + return "" +} + +func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if m != nil { + return m.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } +func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +func (m *CodeGeneratorResponse_File) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { + if m != nil && m.InsertionPoint != nil { + return *m.InsertionPoint + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetContent() string { + if m != nil && m.Content != nil { + return *m.Content + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") + proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") + proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") + proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") +} + +func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41, + 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2, + 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30, + 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa, + 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91, + 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63, + 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb, + 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55, + 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8, + 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1, + 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f, + 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d, + 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2, + 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a, + 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2, + 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d, + 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda, + 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed, + 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34, + 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79, + 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45, + 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4, + 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e, + 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92, + 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d, + 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden new file mode 100644 index 0000000..8953d0f --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden @@ -0,0 +1,83 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/compiler/plugin.proto +// DO NOT EDIT! + +package google_protobuf_compiler + +import proto "github.com/golang/protobuf/proto" +import "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference proto and math imports to suppress error if they are not otherwise used. +var _ = proto.GetString +var _ = math.Inf + +type CodeGeneratorRequest struct { + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} } +func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (this *CodeGeneratorRequest) GetParameter() string { + if this != nil && this.Parameter != nil { + return *this.Parameter + } + return "" +} + +type CodeGeneratorResponse struct { + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} } +func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (this *CodeGeneratorResponse) GetError() string { + if this != nil && this.Error != nil { + return *this.Error + } + return "" +} + +type CodeGeneratorResponse_File struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} } +func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (this *CodeGeneratorResponse_File) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *CodeGeneratorResponse_File) GetInsertionPoint() string { + if this != nil && this.InsertionPoint != nil { + return *this.InsertionPoint + } + return "" +} + +func (this *CodeGeneratorResponse_File) GetContent() string { + if this != nil && this.Content != nil { + return *this.Content + } + return "" +} + +func init() { +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto new file mode 100644 index 0000000..5b55745 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto @@ -0,0 +1,167 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; + +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + } + repeated File file = 15; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile new file mode 100644 index 0000000..a0bf9fe --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile @@ -0,0 +1,73 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +all: + @echo run make test + +include ../../Make.protobuf + +test: golden testbuild + +#test: golden testbuild extension_test +# ./extension_test +# @echo PASS + +my_test/test.pb.go: my_test/test.proto + protoc --go_out=Mmulti/multi1.proto=github.com/golang/protobuf/protoc-gen-go/testdata/multi:. $< + +golden: + make -B my_test/test.pb.go + sed -i -e '/return.*fileDescriptor/d' my_test/test.pb.go + sed -i -e '/^var fileDescriptor/,/^}/d' my_test/test.pb.go + sed -i -e '/proto.RegisterFile.*fileDescriptor/d' my_test/test.pb.go + gofmt -w my_test/test.pb.go + diff -w my_test/test.pb.go my_test/test.pb.go.golden + +nuke: clean + +testbuild: regenerate + go test + +regenerate: + # Invoke protoc once to generate three independent .pb.go files in the same package. + protoc --go_out=. multi/multi1.proto multi/multi2.proto multi/multi3.proto + +#extension_test: extension_test.$O +# $(LD) -L. -o $@ $< + +#multi.a: multi3.pb.$O multi2.pb.$O multi1.pb.$O +# rm -f multi.a +# $(QUOTED_GOBIN)/gopack grc $@ $< + +#test.pb.go: imp.pb.go +#multi1.pb.go: multi2.pb.go multi3.pb.go +#main.$O: imp.pb.$O test.pb.$O multi.a +#extension_test.$O: extension_base.pb.$O extension_extra.pb.$O extension_user.pb.$O diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto new file mode 100644 index 0000000..94acfc1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package extension_base; + +message BaseMessage { + optional int32 height = 1; + extensions 4 to 9; + extensions 16 to max; +} + +// Another message that may be extended, using message_set_wire_format. +message OldStyleMessage { + option message_set_wire_format = true; + extensions 100 to max; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto new file mode 100644 index 0000000..fca7f60 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto @@ -0,0 +1,38 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package extension_extra; + +message ExtraMessage { + optional int32 width = 1; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go new file mode 100644 index 0000000..86e9c11 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go @@ -0,0 +1,210 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test that we can use protocol buffers that use extensions. + +package testdata + +/* + +import ( + "bytes" + "regexp" + "testing" + + "github.com/golang/protobuf/proto" + base "extension_base.pb" + user "extension_user.pb" +) + +func TestSingleFieldExtension(t *testing.T) { + bm := &base.BaseMessage{ + Height: proto.Int32(178), + } + + // Use extension within scope of another type. + vol := proto.Uint32(11) + err := proto.SetExtension(bm, user.E_LoudMessage_Volume, vol) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + buf, err := proto.Marshal(bm) + if err != nil { + t.Fatal("Failed encoding message with extension:", err) + } + bm_new := new(base.BaseMessage) + if err := proto.Unmarshal(buf, bm_new); err != nil { + t.Fatal("Failed decoding message with extension:", err) + } + if !proto.HasExtension(bm_new, user.E_LoudMessage_Volume) { + t.Fatal("Decoded message didn't contain extension.") + } + vol_out, err := proto.GetExtension(bm_new, user.E_LoudMessage_Volume) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + if v := vol_out.(*uint32); *v != *vol { + t.Errorf("vol_out = %v, expected %v", *v, *vol) + } + proto.ClearExtension(bm_new, user.E_LoudMessage_Volume) + if proto.HasExtension(bm_new, user.E_LoudMessage_Volume) { + t.Fatal("Failed clearing extension.") + } +} + +func TestMessageExtension(t *testing.T) { + bm := &base.BaseMessage{ + Height: proto.Int32(179), + } + + // Use extension that is itself a message. + um := &user.UserMessage{ + Name: proto.String("Dave"), + Rank: proto.String("Major"), + } + err := proto.SetExtension(bm, user.E_LoginMessage_UserMessage, um) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + buf, err := proto.Marshal(bm) + if err != nil { + t.Fatal("Failed encoding message with extension:", err) + } + bm_new := new(base.BaseMessage) + if err := proto.Unmarshal(buf, bm_new); err != nil { + t.Fatal("Failed decoding message with extension:", err) + } + if !proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) { + t.Fatal("Decoded message didn't contain extension.") + } + um_out, err := proto.GetExtension(bm_new, user.E_LoginMessage_UserMessage) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + if n := um_out.(*user.UserMessage).Name; *n != *um.Name { + t.Errorf("um_out.Name = %q, expected %q", *n, *um.Name) + } + if r := um_out.(*user.UserMessage).Rank; *r != *um.Rank { + t.Errorf("um_out.Rank = %q, expected %q", *r, *um.Rank) + } + proto.ClearExtension(bm_new, user.E_LoginMessage_UserMessage) + if proto.HasExtension(bm_new, user.E_LoginMessage_UserMessage) { + t.Fatal("Failed clearing extension.") + } +} + +func TestTopLevelExtension(t *testing.T) { + bm := &base.BaseMessage{ + Height: proto.Int32(179), + } + + width := proto.Int32(17) + err := proto.SetExtension(bm, user.E_Width, width) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + buf, err := proto.Marshal(bm) + if err != nil { + t.Fatal("Failed encoding message with extension:", err) + } + bm_new := new(base.BaseMessage) + if err := proto.Unmarshal(buf, bm_new); err != nil { + t.Fatal("Failed decoding message with extension:", err) + } + if !proto.HasExtension(bm_new, user.E_Width) { + t.Fatal("Decoded message didn't contain extension.") + } + width_out, err := proto.GetExtension(bm_new, user.E_Width) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + if w := width_out.(*int32); *w != *width { + t.Errorf("width_out = %v, expected %v", *w, *width) + } + proto.ClearExtension(bm_new, user.E_Width) + if proto.HasExtension(bm_new, user.E_Width) { + t.Fatal("Failed clearing extension.") + } +} + +func TestMessageSetWireFormat(t *testing.T) { + osm := new(base.OldStyleMessage) + osp := &user.OldStyleParcel{ + Name: proto.String("Dave"), + Height: proto.Int32(178), + } + + err := proto.SetExtension(osm, user.E_OldStyleParcel_MessageSetExtension, osp) + if err != nil { + t.Fatal("Failed setting extension:", err) + } + + buf, err := proto.Marshal(osm) + if err != nil { + t.Fatal("Failed encoding message:", err) + } + + // Data generated from Python implementation. + expected := []byte{ + 11, 16, 209, 15, 26, 9, 10, 4, 68, 97, 118, 101, 16, 178, 1, 12, + } + + if !bytes.Equal(expected, buf) { + t.Errorf("Encoding mismatch.\nwant %+v\n got %+v", expected, buf) + } + + // Check that it is restored correctly. + osm = new(base.OldStyleMessage) + if err := proto.Unmarshal(buf, osm); err != nil { + t.Fatal("Failed decoding message:", err) + } + osp_out, err := proto.GetExtension(osm, user.E_OldStyleParcel_MessageSetExtension) + if err != nil { + t.Fatal("Failed getting extension:", err) + } + osp = osp_out.(*user.OldStyleParcel) + if *osp.Name != "Dave" || *osp.Height != 178 { + t.Errorf("Retrieved extension from decoded message is not correct: %+v", osp) + } +} + +func main() { + // simpler than rigging up gotest + testing.Main(regexp.MatchString, []testing.InternalTest{ + {"TestSingleFieldExtension", TestSingleFieldExtension}, + {"TestMessageExtension", TestMessageExtension}, + {"TestTopLevelExtension", TestTopLevelExtension}, + }, + []testing.InternalBenchmark{}, + []testing.InternalExample{}) +} + +*/ diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto new file mode 100644 index 0000000..ff65873 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "extension_base.proto"; +import "extension_extra.proto"; + +package extension_user; + +message UserMessage { + optional string name = 1; + optional string rank = 2; +} + +// Extend with a message +extend extension_base.BaseMessage { + optional UserMessage user_message = 5; +} + +// Extend with a foreign message +extend extension_base.BaseMessage { + optional extension_extra.ExtraMessage extra_message = 9; +} + +// Extend with some primitive types +extend extension_base.BaseMessage { + optional int32 width = 6; + optional int64 area = 7; +} + +// Extend inside the scope of another type +message LoudMessage { + extend extension_base.BaseMessage { + optional uint32 volume = 8; + } + extensions 100 to max; +} + +// Extend inside the scope of another type, using a message. +message LoginMessage { + extend extension_base.BaseMessage { + optional UserMessage user_message = 16; + } +} + +// Extend with a repeated field +extend extension_base.BaseMessage { + repeated Detail detail = 17; +} + +message Detail { + optional string color = 1; +} + +// An extension of an extension +message Announcement { + optional string words = 1; + extend LoudMessage { + optional Announcement loud_ext = 100; + } +} + +// Something that can be put in a message set. +message OldStyleParcel { + extend extension_base.OldStyleMessage { + optional OldStyleParcel message_set_extension = 2001; + } + + required string name = 1; + optional int32 height = 2; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto new file mode 100644 index 0000000..b8bc41a --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto @@ -0,0 +1,59 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package grpc.testing; + +message SimpleRequest { +} + +message SimpleResponse { +} + +message StreamMsg { +} + +message StreamMsg2 { +} + +service Test { + rpc UnaryCall(SimpleRequest) returns (SimpleResponse); + + // This RPC streams from the server only. + rpc Downstream(SimpleRequest) returns (stream StreamMsg); + + // This RPC streams from the client. + rpc Upstream(stream StreamMsg) returns (SimpleResponse); + + // This one streams in both directions. + rpc Bidi(stream StreamMsg) returns (stream StreamMsg2); +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden new file mode 100644 index 0000000..784a4f8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden @@ -0,0 +1,113 @@ +// Code generated by protoc-gen-go. +// source: imp.proto +// DO NOT EDIT! + +package imp + +import proto "github.com/golang/protobuf/proto" +import "math" +import "os" +import imp1 "imp2.pb" + +// Reference proto & math imports to suppress error if they are not otherwise used. +var _ = proto.GetString +var _ = math.Inf + +// Types from public import imp2.proto +type PubliclyImportedMessage imp1.PubliclyImportedMessage + +func (this *PubliclyImportedMessage) Reset() { (*imp1.PubliclyImportedMessage)(this).Reset() } +func (this *PubliclyImportedMessage) String() string { + return (*imp1.PubliclyImportedMessage)(this).String() +} + +// PubliclyImportedMessage from public import imp.proto + +type ImportedMessage_Owner int32 + +const ( + ImportedMessage_DAVE ImportedMessage_Owner = 1 + ImportedMessage_MIKE ImportedMessage_Owner = 2 +) + +var ImportedMessage_Owner_name = map[int32]string{ + 1: "DAVE", + 2: "MIKE", +} +var ImportedMessage_Owner_value = map[string]int32{ + "DAVE": 1, + "MIKE": 2, +} + +// NewImportedMessage_Owner is deprecated. Use x.Enum() instead. +func NewImportedMessage_Owner(x ImportedMessage_Owner) *ImportedMessage_Owner { + e := ImportedMessage_Owner(x) + return &e +} +func (x ImportedMessage_Owner) Enum() *ImportedMessage_Owner { + p := new(ImportedMessage_Owner) + *p = x + return p +} +func (x ImportedMessage_Owner) String() string { + return proto.EnumName(ImportedMessage_Owner_name, int32(x)) +} + +type ImportedMessage struct { + Field *int64 `protobuf:"varint,1,req,name=field" json:"field,omitempty"` + XXX_extensions map[int32][]byte `json:",omitempty"` + XXX_unrecognized []byte `json:",omitempty"` +} + +func (this *ImportedMessage) Reset() { *this = ImportedMessage{} } +func (this *ImportedMessage) String() string { return proto.CompactTextString(this) } + +var extRange_ImportedMessage = []proto.ExtensionRange{ + proto.ExtensionRange{90, 100}, +} + +func (*ImportedMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ImportedMessage +} +func (this *ImportedMessage) ExtensionMap() map[int32][]byte { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32][]byte) + } + return this.XXX_extensions +} + +type ImportedExtendable struct { + XXX_extensions map[int32][]byte `json:",omitempty"` + XXX_unrecognized []byte `json:",omitempty"` +} + +func (this *ImportedExtendable) Reset() { *this = ImportedExtendable{} } +func (this *ImportedExtendable) String() string { return proto.CompactTextString(this) } + +func (this *ImportedExtendable) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(this.ExtensionMap()) +} +func (this *ImportedExtendable) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, this.ExtensionMap()) +} +// ensure ImportedExtendable satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*ImportedExtendable)(nil) +var _ proto.Unmarshaler = (*ImportedExtendable)(nil) + +var extRange_ImportedExtendable = []proto.ExtensionRange{ + proto.ExtensionRange{100, 536870911}, +} + +func (*ImportedExtendable) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ImportedExtendable +} +func (this *ImportedExtendable) ExtensionMap() map[int32][]byte { + if this.XXX_extensions == nil { + this.XXX_extensions = make(map[int32][]byte) + } + return this.XXX_extensions +} + +func init() { + proto.RegisterEnum("imp.ImportedMessage_Owner", ImportedMessage_Owner_name, ImportedMessage_Owner_value) +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto new file mode 100644 index 0000000..156e078 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto @@ -0,0 +1,70 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package imp; + +import "imp2.proto"; +import "imp3.proto"; + +message ImportedMessage { + required int64 field = 1; + + // The forwarded getters for these fields are fiddly to get right. + optional ImportedMessage2 local_msg = 2; + optional ForeignImportedMessage foreign_msg = 3; // in imp3.proto + optional Owner enum_field = 4; + oneof union { + int32 state = 9; + } + + repeated string name = 5; + repeated Owner boss = 6; + repeated ImportedMessage2 memo = 7; + + map msg_map = 8; + + enum Owner { + DAVE = 1; + MIKE = 2; + } + + extensions 90 to 100; +} + +message ImportedMessage2 { +} + +message ImportedExtendable { + option message_set_wire_format = true; + extensions 100 to max; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto new file mode 100644 index 0000000..3bb0632 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto @@ -0,0 +1,43 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package imp; + +message PubliclyImportedMessage { + optional int64 field = 1; +} + +enum PubliclyImportedEnum { + GLASSES = 1; + HAIR = 2; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto new file mode 100644 index 0000000..58fc759 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto @@ -0,0 +1,38 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package imp; + +message ForeignImportedMessage { + optional string tuber = 1; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go new file mode 100644 index 0000000..f9b5ccf --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A simple binary to link together the protocol buffers in this test. + +package testdata + +import ( + "testing" + + mytestpb "./my_test" + multipb "github.com/golang/protobuf/protoc-gen-go/testdata/multi" +) + +func TestLink(t *testing.T) { + _ = &multipb.Multi1{} + _ = &mytestpb.Request{} +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto new file mode 100644 index 0000000..0da6e0a --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto @@ -0,0 +1,44 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "multi/multi2.proto"; +import "multi/multi3.proto"; + +package multitest; + +message Multi1 { + required Multi2 multi2 = 1; + optional Multi2.Color color = 2; + optional Multi3.HatType hat_type = 3; +} + diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto new file mode 100644 index 0000000..e6bfc71 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto @@ -0,0 +1,46 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package multitest; + +message Multi2 { + required int32 required_value = 1; + + enum Color { + BLUE = 1; + GREEN = 2; + RED = 3; + }; + optional Color color = 2; +} + diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto new file mode 100644 index 0000000..146c255 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto @@ -0,0 +1,43 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package multitest; + +message Multi3 { + enum HatType { + FEDORA = 1; + FEZ = 2; + }; + optional HatType hat_type = 1; +} + diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go new file mode 100644 index 0000000..1954e3f --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go @@ -0,0 +1,870 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: my_test/test.proto + +/* +Package my_test is a generated protocol buffer package. + +This package holds interesting messages. + +It is generated from these files: + my_test/test.proto + +It has these top-level messages: + Request + Reply + OtherBase + ReplyExtensions + OtherReplyExtensions + OldReply + Communique +*/ +package my_test + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HatType int32 + +const ( + // deliberately skipping 0 + HatType_FEDORA HatType = 1 + HatType_FEZ HatType = 2 +) + +var HatType_name = map[int32]string{ + 1: "FEDORA", + 2: "FEZ", +} +var HatType_value = map[string]int32{ + "FEDORA": 1, + "FEZ": 2, +} + +func (x HatType) Enum() *HatType { + p := new(HatType) + *p = x + return p +} +func (x HatType) String() string { + return proto.EnumName(HatType_name, int32(x)) +} +func (x *HatType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType") + if err != nil { + return err + } + *x = HatType(value) + return nil +} + +// This enum represents days of the week. +type Days int32 + +const ( + Days_MONDAY Days = 1 + Days_TUESDAY Days = 2 + Days_LUNDI Days = 1 +) + +var Days_name = map[int32]string{ + 1: "MONDAY", + 2: "TUESDAY", + // Duplicate value: 1: "LUNDI", +} +var Days_value = map[string]int32{ + "MONDAY": 1, + "TUESDAY": 2, + "LUNDI": 1, +} + +func (x Days) Enum() *Days { + p := new(Days) + *p = x + return p +} +func (x Days) String() string { + return proto.EnumName(Days_name, int32(x)) +} +func (x *Days) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days") + if err != nil { + return err + } + *x = Days(value) + return nil +} + +type Request_Color int32 + +const ( + Request_RED Request_Color = 0 + Request_GREEN Request_Color = 1 + Request_BLUE Request_Color = 2 +) + +var Request_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Request_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Request_Color) Enum() *Request_Color { + p := new(Request_Color) + *p = x + return p +} +func (x Request_Color) String() string { + return proto.EnumName(Request_Color_name, int32(x)) +} +func (x *Request_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color") + if err != nil { + return err + } + *x = Request_Color(value) + return nil +} + +type Reply_Entry_Game int32 + +const ( + Reply_Entry_FOOTBALL Reply_Entry_Game = 1 + Reply_Entry_TENNIS Reply_Entry_Game = 2 +) + +var Reply_Entry_Game_name = map[int32]string{ + 1: "FOOTBALL", + 2: "TENNIS", +} +var Reply_Entry_Game_value = map[string]int32{ + "FOOTBALL": 1, + "TENNIS": 2, +} + +func (x Reply_Entry_Game) Enum() *Reply_Entry_Game { + p := new(Reply_Entry_Game) + *p = x + return p +} +func (x Reply_Entry_Game) String() string { + return proto.EnumName(Reply_Entry_Game_name, int32(x)) +} +func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game") + if err != nil { + return err + } + *x = Reply_Entry_Game(value) + return nil +} + +// This is a message that might be sent somewhere. +type Request struct { + Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"` + // optional imp.ImportedMessage imported_message = 2; + Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"` + Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` + // optional imp.ImportedMessage.Owner owner = 6; + Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` + Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This is a map field. It will generate map[int32]string. + NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // This is a map field whose value type is a message. + MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` + // This field should not conflict with any getters. + GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +const Default_Request_Hat HatType = HatType_FEDORA + +var Default_Request_Deadline float32 = float32(math.Inf(1)) + +func (m *Request) GetKey() []int64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Request) GetHue() Request_Color { + if m != nil && m.Hue != nil { + return *m.Hue + } + return Request_RED +} + +func (m *Request) GetHat() HatType { + if m != nil && m.Hat != nil { + return *m.Hat + } + return Default_Request_Hat +} + +func (m *Request) GetDeadline() float32 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return Default_Request_Deadline +} + +func (m *Request) GetSomegroup() *Request_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *Request) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *Request) GetMsgMapping() map[int64]*Reply { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *Request) GetReset_() int32 { + if m != nil && m.Reset_ != nil { + return *m.Reset_ + } + return 0 +} + +func (m *Request) GetGetKey_() string { + if m != nil && m.GetKey_ != nil { + return *m.GetKey_ + } + return "" +} + +type Request_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} } +func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Request_SomeGroup) ProtoMessage() {} + +func (m *Request_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Reply struct { + Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply) Reset() { *m = Reply{} } +func (m *Reply) String() string { return proto.CompactTextString(m) } +func (*Reply) ProtoMessage() {} + +var extRange_Reply = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Reply +} + +func (m *Reply) GetFound() []*Reply_Entry { + if m != nil { + return m.Found + } + return nil +} + +func (m *Reply) GetCompactKeys() []int32 { + if m != nil { + return m.CompactKeys + } + return nil +} + +type Reply_Entry struct { + KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` + Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` + XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply_Entry) Reset() { *m = Reply_Entry{} } +func (m *Reply_Entry) String() string { return proto.CompactTextString(m) } +func (*Reply_Entry) ProtoMessage() {} + +const Default_Reply_Entry_Value int64 = 7 + +func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 { + if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil { + return *m.KeyThatNeeds_1234Camel_CasIng + } + return 0 +} + +func (m *Reply_Entry) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return Default_Reply_Entry_Value +} + +func (m *Reply_Entry) GetXMyFieldName_2() int64 { + if m != nil && m.XMyFieldName_2 != nil { + return *m.XMyFieldName_2 + } + return 0 +} + +type OtherBase struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherBase) Reset() { *m = OtherBase{} } +func (m *OtherBase) String() string { return proto.CompactTextString(m) } +func (*OtherBase) ProtoMessage() {} + +var extRange_OtherBase = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherBase +} + +func (m *OtherBase) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type ReplyExtensions struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} } +func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*ReplyExtensions) ProtoMessage() {} + +var E_ReplyExtensions_Time = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.time", + Tag: "fixed64,101,opt,name=time", + Filename: "my_test/test.proto", +} + +var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 105, + Name: "my.test.ReplyExtensions.carrot", + Tag: "bytes,105,opt,name=carrot", + Filename: "my_test/test.proto", +} + +var E_ReplyExtensions_Donut = &proto.ExtensionDesc{ + ExtendedType: (*OtherBase)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.donut", + Tag: "bytes,101,opt,name=donut", + Filename: "my_test/test.proto", +} + +type OtherReplyExtensions struct { + Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} } +func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*OtherReplyExtensions) ProtoMessage() {} + +func (m *OtherReplyExtensions) GetKey() int32 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +type OldReply struct { + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldReply) Reset() { *m = OldReply{} } +func (m *OldReply) String() string { return proto.CompactTextString(m) } +func (*OldReply) ProtoMessage() {} + +func (m *OldReply) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(&m.XXX_InternalExtensions) +} +func (m *OldReply) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) +} +func (m *OldReply) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *OldReply) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*OldReply)(nil) +var _ proto.Unmarshaler = (*OldReply)(nil) + +var extRange_OldReply = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OldReply +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Height + // *Communique_Today + // *Communique_Maybe + // *Communique_Delta_ + // *Communique_Msg + // *Communique_Somegroup + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Height struct { + Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"` +} +type Communique_Today struct { + Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"` +} +type Communique_Maybe struct { + Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"` +} +type Communique_Delta_ struct { + Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"` +} +type Communique_Msg struct { + Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"` +} +type Communique_Somegroup struct { + Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Height) isCommunique_Union() {} +func (*Communique_Today) isCommunique_Union() {} +func (*Communique_Maybe) isCommunique_Union() {} +func (*Communique_Delta_) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} +func (*Communique_Somegroup) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetHeight() float32 { + if x, ok := m.GetUnion().(*Communique_Height); ok { + return x.Height + } + return 0 +} + +func (m *Communique) GetToday() Days { + if x, ok := m.GetUnion().(*Communique_Today); ok { + return x.Today + } + return Days_MONDAY +} + +func (m *Communique) GetMaybe() bool { + if x, ok := m.GetUnion().(*Communique_Maybe); ok { + return x.Maybe + } + return false +} + +func (m *Communique) GetDelta() int32 { + if x, ok := m.GetUnion().(*Communique_Delta_); ok { + return x.Delta + } + return 0 +} + +func (m *Communique) GetMsg() *Reply { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +func (m *Communique) GetSomegroup() *Communique_SomeGroup { + if x, ok := m.GetUnion().(*Communique_Somegroup); ok { + return x.Somegroup + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Height)(nil), + (*Communique_Today)(nil), + (*Communique_Maybe)(nil), + (*Communique_Delta_)(nil), + (*Communique_Msg)(nil), + (*Communique_Somegroup)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Height: + b.EncodeVarint(9<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.Height))) + case *Communique_Today: + b.EncodeVarint(10<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Today)) + case *Communique_Maybe: + t := uint64(0) + if x.Maybe { + t = 1 + } + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Communique_Delta_: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.Delta)) + case *Communique_Msg: + b.EncodeVarint(13<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case *Communique_Somegroup: + b.EncodeVarint(14<<3 | proto.WireStartGroup) + if err := b.Marshal(x.Somegroup); err != nil { + return err + } + b.EncodeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.height + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Communique_Height{math.Float32frombits(uint32(x))} + return true, err + case 10: // union.today + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Today{Days(x)} + return true, err + case 11: // union.maybe + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Maybe{x != 0} + return true, err + case 12: // union.delta + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Communique_Delta_{int32(x)} + return true, err + case 13: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Reply) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + case 14: // union.somegroup + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Communique_SomeGroup) + err := b.DecodeGroup(msg) + m.Union = &Communique_Somegroup{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += proto.SizeVarint(8<<3 | proto.WireFixed64) + n += 8 + case *Communique_Height: + n += proto.SizeVarint(9<<3 | proto.WireFixed32) + n += 4 + case *Communique_Today: + n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Today)) + case *Communique_Maybe: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += 1 + case *Communique_Delta_: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31)))) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += proto.SizeVarint(13<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Communique_Somegroup: + n += proto.SizeVarint(14<<3 | proto.WireStartGroup) + n += proto.Size(x.Somegroup) + n += proto.SizeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Communique_SomeGroup struct { + Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique_SomeGroup) Reset() { *m = Communique_SomeGroup{} } +func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Communique_SomeGroup) ProtoMessage() {} + +func (m *Communique_SomeGroup) GetMember() string { + if m != nil && m.Member != nil { + return *m.Member + } + return "" +} + +type Communique_Delta struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique_Delta) Reset() { *m = Communique_Delta{} } +func (m *Communique_Delta) String() string { return proto.CompactTextString(m) } +func (*Communique_Delta) ProtoMessage() {} + +var E_Tag = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*string)(nil), + Field: 103, + Name: "my.test.tag", + Tag: "bytes,103,opt,name=tag", + Filename: "my_test/test.proto", +} + +var E_Donut = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*OtherReplyExtensions)(nil), + Field: 106, + Name: "my.test.donut", + Tag: "bytes,106,opt,name=donut", + Filename: "my_test/test.proto", +} + +func init() { + proto.RegisterType((*Request)(nil), "my.test.Request") + proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup") + proto.RegisterType((*Reply)(nil), "my.test.Reply") + proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry") + proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase") + proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions") + proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions") + proto.RegisterType((*OldReply)(nil), "my.test.OldReply") + proto.RegisterType((*Communique)(nil), "my.test.Communique") + proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup") + proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta") + proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value) + proto.RegisterEnum("my.test.Days", Days_name, Days_value) + proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value) + proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value) + proto.RegisterExtension(E_ReplyExtensions_Time) + proto.RegisterExtension(E_ReplyExtensions_Carrot) + proto.RegisterExtension(E_ReplyExtensions_Donut) + proto.RegisterExtension(E_Tag) + proto.RegisterExtension(E_Donut) +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden new file mode 100644 index 0000000..1954e3f --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden @@ -0,0 +1,870 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: my_test/test.proto + +/* +Package my_test is a generated protocol buffer package. + +This package holds interesting messages. + +It is generated from these files: + my_test/test.proto + +It has these top-level messages: + Request + Reply + OtherBase + ReplyExtensions + OtherReplyExtensions + OldReply + Communique +*/ +package my_test + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HatType int32 + +const ( + // deliberately skipping 0 + HatType_FEDORA HatType = 1 + HatType_FEZ HatType = 2 +) + +var HatType_name = map[int32]string{ + 1: "FEDORA", + 2: "FEZ", +} +var HatType_value = map[string]int32{ + "FEDORA": 1, + "FEZ": 2, +} + +func (x HatType) Enum() *HatType { + p := new(HatType) + *p = x + return p +} +func (x HatType) String() string { + return proto.EnumName(HatType_name, int32(x)) +} +func (x *HatType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType") + if err != nil { + return err + } + *x = HatType(value) + return nil +} + +// This enum represents days of the week. +type Days int32 + +const ( + Days_MONDAY Days = 1 + Days_TUESDAY Days = 2 + Days_LUNDI Days = 1 +) + +var Days_name = map[int32]string{ + 1: "MONDAY", + 2: "TUESDAY", + // Duplicate value: 1: "LUNDI", +} +var Days_value = map[string]int32{ + "MONDAY": 1, + "TUESDAY": 2, + "LUNDI": 1, +} + +func (x Days) Enum() *Days { + p := new(Days) + *p = x + return p +} +func (x Days) String() string { + return proto.EnumName(Days_name, int32(x)) +} +func (x *Days) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days") + if err != nil { + return err + } + *x = Days(value) + return nil +} + +type Request_Color int32 + +const ( + Request_RED Request_Color = 0 + Request_GREEN Request_Color = 1 + Request_BLUE Request_Color = 2 +) + +var Request_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Request_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Request_Color) Enum() *Request_Color { + p := new(Request_Color) + *p = x + return p +} +func (x Request_Color) String() string { + return proto.EnumName(Request_Color_name, int32(x)) +} +func (x *Request_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color") + if err != nil { + return err + } + *x = Request_Color(value) + return nil +} + +type Reply_Entry_Game int32 + +const ( + Reply_Entry_FOOTBALL Reply_Entry_Game = 1 + Reply_Entry_TENNIS Reply_Entry_Game = 2 +) + +var Reply_Entry_Game_name = map[int32]string{ + 1: "FOOTBALL", + 2: "TENNIS", +} +var Reply_Entry_Game_value = map[string]int32{ + "FOOTBALL": 1, + "TENNIS": 2, +} + +func (x Reply_Entry_Game) Enum() *Reply_Entry_Game { + p := new(Reply_Entry_Game) + *p = x + return p +} +func (x Reply_Entry_Game) String() string { + return proto.EnumName(Reply_Entry_Game_name, int32(x)) +} +func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game") + if err != nil { + return err + } + *x = Reply_Entry_Game(value) + return nil +} + +// This is a message that might be sent somewhere. +type Request struct { + Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"` + // optional imp.ImportedMessage imported_message = 2; + Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"` + Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` + // optional imp.ImportedMessage.Owner owner = 6; + Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` + Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This is a map field. It will generate map[int32]string. + NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // This is a map field whose value type is a message. + MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` + // This field should not conflict with any getters. + GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +const Default_Request_Hat HatType = HatType_FEDORA + +var Default_Request_Deadline float32 = float32(math.Inf(1)) + +func (m *Request) GetKey() []int64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Request) GetHue() Request_Color { + if m != nil && m.Hue != nil { + return *m.Hue + } + return Request_RED +} + +func (m *Request) GetHat() HatType { + if m != nil && m.Hat != nil { + return *m.Hat + } + return Default_Request_Hat +} + +func (m *Request) GetDeadline() float32 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return Default_Request_Deadline +} + +func (m *Request) GetSomegroup() *Request_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *Request) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *Request) GetMsgMapping() map[int64]*Reply { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *Request) GetReset_() int32 { + if m != nil && m.Reset_ != nil { + return *m.Reset_ + } + return 0 +} + +func (m *Request) GetGetKey_() string { + if m != nil && m.GetKey_ != nil { + return *m.GetKey_ + } + return "" +} + +type Request_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} } +func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Request_SomeGroup) ProtoMessage() {} + +func (m *Request_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Reply struct { + Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` + CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply) Reset() { *m = Reply{} } +func (m *Reply) String() string { return proto.CompactTextString(m) } +func (*Reply) ProtoMessage() {} + +var extRange_Reply = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Reply +} + +func (m *Reply) GetFound() []*Reply_Entry { + if m != nil { + return m.Found + } + return nil +} + +func (m *Reply) GetCompactKeys() []int32 { + if m != nil { + return m.CompactKeys + } + return nil +} + +type Reply_Entry struct { + KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` + Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` + XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reply_Entry) Reset() { *m = Reply_Entry{} } +func (m *Reply_Entry) String() string { return proto.CompactTextString(m) } +func (*Reply_Entry) ProtoMessage() {} + +const Default_Reply_Entry_Value int64 = 7 + +func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 { + if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil { + return *m.KeyThatNeeds_1234Camel_CasIng + } + return 0 +} + +func (m *Reply_Entry) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return Default_Reply_Entry_Value +} + +func (m *Reply_Entry) GetXMyFieldName_2() int64 { + if m != nil && m.XMyFieldName_2 != nil { + return *m.XMyFieldName_2 + } + return 0 +} + +type OtherBase struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherBase) Reset() { *m = OtherBase{} } +func (m *OtherBase) String() string { return proto.CompactTextString(m) } +func (*OtherBase) ProtoMessage() {} + +var extRange_OtherBase = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherBase +} + +func (m *OtherBase) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type ReplyExtensions struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} } +func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*ReplyExtensions) ProtoMessage() {} + +var E_ReplyExtensions_Time = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.time", + Tag: "fixed64,101,opt,name=time", + Filename: "my_test/test.proto", +} + +var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 105, + Name: "my.test.ReplyExtensions.carrot", + Tag: "bytes,105,opt,name=carrot", + Filename: "my_test/test.proto", +} + +var E_ReplyExtensions_Donut = &proto.ExtensionDesc{ + ExtendedType: (*OtherBase)(nil), + ExtensionType: (*ReplyExtensions)(nil), + Field: 101, + Name: "my.test.ReplyExtensions.donut", + Tag: "bytes,101,opt,name=donut", + Filename: "my_test/test.proto", +} + +type OtherReplyExtensions struct { + Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} } +func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) } +func (*OtherReplyExtensions) ProtoMessage() {} + +func (m *OtherReplyExtensions) GetKey() int32 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +type OldReply struct { + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OldReply) Reset() { *m = OldReply{} } +func (m *OldReply) String() string { return proto.CompactTextString(m) } +func (*OldReply) ProtoMessage() {} + +func (m *OldReply) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(&m.XXX_InternalExtensions) +} +func (m *OldReply) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) +} +func (m *OldReply) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *OldReply) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*OldReply)(nil) +var _ proto.Unmarshaler = (*OldReply)(nil) + +var extRange_OldReply = []proto.ExtensionRange{ + {100, 2147483646}, +} + +func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OldReply +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Height + // *Communique_Today + // *Communique_Maybe + // *Communique_Delta_ + // *Communique_Msg + // *Communique_Somegroup + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Height struct { + Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"` +} +type Communique_Today struct { + Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"` +} +type Communique_Maybe struct { + Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"` +} +type Communique_Delta_ struct { + Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"` +} +type Communique_Msg struct { + Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"` +} +type Communique_Somegroup struct { + Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Height) isCommunique_Union() {} +func (*Communique_Today) isCommunique_Union() {} +func (*Communique_Maybe) isCommunique_Union() {} +func (*Communique_Delta_) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} +func (*Communique_Somegroup) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetHeight() float32 { + if x, ok := m.GetUnion().(*Communique_Height); ok { + return x.Height + } + return 0 +} + +func (m *Communique) GetToday() Days { + if x, ok := m.GetUnion().(*Communique_Today); ok { + return x.Today + } + return Days_MONDAY +} + +func (m *Communique) GetMaybe() bool { + if x, ok := m.GetUnion().(*Communique_Maybe); ok { + return x.Maybe + } + return false +} + +func (m *Communique) GetDelta() int32 { + if x, ok := m.GetUnion().(*Communique_Delta_); ok { + return x.Delta + } + return 0 +} + +func (m *Communique) GetMsg() *Reply { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +func (m *Communique) GetSomegroup() *Communique_SomeGroup { + if x, ok := m.GetUnion().(*Communique_Somegroup); ok { + return x.Somegroup + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Height)(nil), + (*Communique_Today)(nil), + (*Communique_Maybe)(nil), + (*Communique_Delta_)(nil), + (*Communique_Msg)(nil), + (*Communique_Somegroup)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Height: + b.EncodeVarint(9<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.Height))) + case *Communique_Today: + b.EncodeVarint(10<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Today)) + case *Communique_Maybe: + t := uint64(0) + if x.Maybe { + t = 1 + } + b.EncodeVarint(11<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Communique_Delta_: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.Delta)) + case *Communique_Msg: + b.EncodeVarint(13<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case *Communique_Somegroup: + b.EncodeVarint(14<<3 | proto.WireStartGroup) + if err := b.Marshal(x.Somegroup); err != nil { + return err + } + b.EncodeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.height + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Communique_Height{math.Float32frombits(uint32(x))} + return true, err + case 10: // union.today + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Today{Days(x)} + return true, err + case 11: // union.maybe + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Maybe{x != 0} + return true, err + case 12: // union.delta + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Communique_Delta_{int32(x)} + return true, err + case 13: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Reply) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + case 14: // union.somegroup + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Communique_SomeGroup) + err := b.DecodeGroup(msg) + m.Union = &Communique_Somegroup{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += proto.SizeVarint(8<<3 | proto.WireFixed64) + n += 8 + case *Communique_Height: + n += proto.SizeVarint(9<<3 | proto.WireFixed32) + n += 4 + case *Communique_Today: + n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Today)) + case *Communique_Maybe: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += 1 + case *Communique_Delta_: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31)))) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += proto.SizeVarint(13<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Communique_Somegroup: + n += proto.SizeVarint(14<<3 | proto.WireStartGroup) + n += proto.Size(x.Somegroup) + n += proto.SizeVarint(14<<3 | proto.WireEndGroup) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Communique_SomeGroup struct { + Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique_SomeGroup) Reset() { *m = Communique_SomeGroup{} } +func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*Communique_SomeGroup) ProtoMessage() {} + +func (m *Communique_SomeGroup) GetMember() string { + if m != nil && m.Member != nil { + return *m.Member + } + return "" +} + +type Communique_Delta struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Communique_Delta) Reset() { *m = Communique_Delta{} } +func (m *Communique_Delta) String() string { return proto.CompactTextString(m) } +func (*Communique_Delta) ProtoMessage() {} + +var E_Tag = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*string)(nil), + Field: 103, + Name: "my.test.tag", + Tag: "bytes,103,opt,name=tag", + Filename: "my_test/test.proto", +} + +var E_Donut = &proto.ExtensionDesc{ + ExtendedType: (*Reply)(nil), + ExtensionType: (*OtherReplyExtensions)(nil), + Field: 106, + Name: "my.test.donut", + Tag: "bytes,106,opt,name=donut", + Filename: "my_test/test.proto", +} + +func init() { + proto.RegisterType((*Request)(nil), "my.test.Request") + proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup") + proto.RegisterType((*Reply)(nil), "my.test.Reply") + proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry") + proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase") + proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions") + proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions") + proto.RegisterType((*OldReply)(nil), "my.test.OldReply") + proto.RegisterType((*Communique)(nil), "my.test.Communique") + proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup") + proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta") + proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value) + proto.RegisterEnum("my.test.Days", Days_name, Days_value) + proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value) + proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value) + proto.RegisterExtension(E_ReplyExtensions_Time) + proto.RegisterExtension(E_ReplyExtensions_Carrot) + proto.RegisterExtension(E_ReplyExtensions_Donut) + proto.RegisterExtension(E_Tag) + proto.RegisterExtension(E_Donut) +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto new file mode 100644 index 0000000..8e70946 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto @@ -0,0 +1,156 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +// This package holds interesting messages. +package my.test; // dotted package name + +//import "imp.proto"; +import "multi/multi1.proto"; // unused import + +enum HatType { + // deliberately skipping 0 + FEDORA = 1; + FEZ = 2; +} + +// This enum represents days of the week. +enum Days { + option allow_alias = true; + + MONDAY = 1; + TUESDAY = 2; + LUNDI = 1; // same value as MONDAY +} + +// This is a message that might be sent somewhere. +message Request { + enum Color { + RED = 0; + GREEN = 1; + BLUE = 2; + } + repeated int64 key = 1; +// optional imp.ImportedMessage imported_message = 2; + optional Color hue = 3; // no default + optional HatType hat = 4 [default=FEDORA]; +// optional imp.ImportedMessage.Owner owner = 6; + optional float deadline = 7 [default=inf]; + optional group SomeGroup = 8 { + optional int32 group_field = 9; + } + + // These foreign types are in imp2.proto, + // which is publicly imported by imp.proto. +// optional imp.PubliclyImportedMessage pub = 10; +// optional imp.PubliclyImportedEnum pub_enum = 13 [default=HAIR]; + + + // This is a map field. It will generate map[int32]string. + map name_mapping = 14; + // This is a map field whose value type is a message. + map msg_mapping = 15; + + optional int32 reset = 12; + // This field should not conflict with any getters. + optional string get_key = 16; +} + +message Reply { + message Entry { + required int64 key_that_needs_1234camel_CasIng = 1; + optional int64 value = 2 [default=7]; + optional int64 _my_field_name_2 = 3; + enum Game { + FOOTBALL = 1; + TENNIS = 2; + } + } + repeated Entry found = 1; + repeated int32 compact_keys = 2 [packed=true]; + extensions 100 to max; +} + +message OtherBase { + optional string name = 1; + extensions 100 to max; +} + +message ReplyExtensions { + extend Reply { + optional double time = 101; + optional ReplyExtensions carrot = 105; + } + extend OtherBase { + optional ReplyExtensions donut = 101; + } +} + +message OtherReplyExtensions { + optional int32 key = 1; +} + +// top-level extension +extend Reply { + optional string tag = 103; + optional OtherReplyExtensions donut = 106; +// optional imp.ImportedMessage elephant = 107; // extend with message from another file. +} + +message OldReply { + // Extensions will be encoded in MessageSet wire format. + option message_set_wire_format = true; + extensions 100 to max; +} + +message Communique { + optional bool make_me_cry = 1; + + // This is a oneof, called "union". + oneof union { + int32 number = 5; + string name = 6; + bytes data = 7; + double temp_c = 8; + float height = 9; + Days today = 10; + bool maybe = 11; + sint32 delta = 12; // name will conflict with Delta below + Reply msg = 13; + group SomeGroup = 14 { + optional string member = 15; + } + } + + message Delta {} +} + diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto new file mode 100644 index 0000000..869b9af --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto @@ -0,0 +1,53 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2014 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package proto3; + +message Request { + enum Flavour { + SWEET = 0; + SOUR = 1; + UMAMI = 2; + GOPHERLICIOUS = 3; + } + string name = 1; + repeated int64 key = 2; + Flavour taste = 3; + Book book = 4; + repeated int64 unpacked = 5 [packed=false]; +} + +message Book { + string title = 1; + bytes raw_data = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 0000000..b2af97f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,139 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 0000000..f346017 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,178 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +/* +Package any is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/any.proto + +It has these top-level messages: + Any +*/ +package any + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 0000000..c748667 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,149 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any_test.go b/vendor/github.com/golang/protobuf/ptypes/any_test.go new file mode 100644 index 0000000..ed675b4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any_test.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "testing" + + "github.com/golang/protobuf/proto" + pb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/golang/protobuf/ptypes/any" +) + +func TestMarshalUnmarshal(t *testing.T) { + orig := &any.Any{Value: []byte("test")} + + packed, err := MarshalAny(orig) + if err != nil { + t.Errorf("MarshalAny(%+v): got: _, %v exp: _, nil", orig, err) + } + + unpacked := &any.Any{} + err = UnmarshalAny(packed, unpacked) + if err != nil || !proto.Equal(unpacked, orig) { + t.Errorf("got: %v, %+v; want nil, %+v", err, unpacked, orig) + } +} + +func TestIs(t *testing.T) { + a, err := MarshalAny(&pb.FileDescriptorProto{}) + if err != nil { + t.Fatal(err) + } + if Is(a, &pb.DescriptorProto{}) { + t.Error("FileDescriptorProto is not a DescriptorProto, but Is says it is") + } + if !Is(a, &pb.FileDescriptorProto{}) { + t.Error("FileDescriptorProto is indeed a FileDescriptorProto, but Is says it is not") + } +} + +func TestIsDifferentUrlPrefixes(t *testing.T) { + m := &pb.FileDescriptorProto{} + a := &any.Any{TypeUrl: "foo/bar/" + proto.MessageName(m)} + if !Is(a, m) { + t.Errorf("message with type url %q didn't satisfy Is for type %q", a.TypeUrl, proto.MessageName(m)) + } +} + +func TestUnmarshalDynamic(t *testing.T) { + want := &pb.FileDescriptorProto{Name: proto.String("foo")} + a, err := MarshalAny(want) + if err != nil { + t.Fatal(err) + } + var got DynamicAny + if err := UnmarshalAny(a, &got); err != nil { + t.Fatal(err) + } + if !proto.Equal(got.Message, want) { + t.Errorf("invalid result from UnmarshalAny, got %q want %q", got.Message, want) + } +} + +func TestEmpty(t *testing.T) { + want := &pb.FileDescriptorProto{} + a, err := MarshalAny(want) + if err != nil { + t.Fatal(err) + } + got, err := Empty(a) + if err != nil { + t.Fatal(err) + } + if !proto.Equal(got, want) { + t.Errorf("unequal empty message, got %q, want %q", got, want) + } + + // that's a valid type_url for a message which shouldn't be linked into this + // test binary. We want an error. + a.TypeUrl = "type.googleapis.com/google.protobuf.FieldMask" + if _, err := Empty(a); err == nil { + t.Errorf("got no error for an attempt to create a message of type %q, which shouldn't be linked in", a.TypeUrl) + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 0000000..c0d595d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 0000000..65cb0f8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 0000000..b2410a0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,144 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +/* +Package duration is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/duration.proto + +It has these top-level messages: + Duration +*/ +package duration + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 0000000..975fce4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration_test.go b/vendor/github.com/golang/protobuf/ptypes/duration_test.go new file mode 100644 index 0000000..e00491a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration_test.go @@ -0,0 +1,121 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "math" + "testing" + "time" + + "github.com/golang/protobuf/proto" + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + minGoSeconds = math.MinInt64 / int64(1e9) + maxGoSeconds = math.MaxInt64 / int64(1e9) +) + +var durationTests = []struct { + proto *durpb.Duration + isValid bool + inRange bool + dur time.Duration +}{ + // The zero duration. + {&durpb.Duration{Seconds: 0, Nanos: 0}, true, true, 0}, + // Some ordinary non-zero durations. + {&durpb.Duration{Seconds: 100, Nanos: 0}, true, true, 100 * time.Second}, + {&durpb.Duration{Seconds: -100, Nanos: 0}, true, true, -100 * time.Second}, + {&durpb.Duration{Seconds: 100, Nanos: 987}, true, true, 100*time.Second + 987}, + {&durpb.Duration{Seconds: -100, Nanos: -987}, true, true, -(100*time.Second + 987)}, + // The largest duration representable in Go. + {&durpb.Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64}, + // The smallest duration representable in Go. + {&durpb.Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64}, + {nil, false, false, 0}, + {&durpb.Duration{Seconds: -100, Nanos: 987}, false, false, 0}, + {&durpb.Duration{Seconds: 100, Nanos: -987}, false, false, 0}, + {&durpb.Duration{Seconds: math.MinInt64, Nanos: 0}, false, false, 0}, + {&durpb.Duration{Seconds: math.MaxInt64, Nanos: 0}, false, false, 0}, + // The largest valid duration. + {&durpb.Duration{Seconds: maxSeconds, Nanos: 1e9 - 1}, true, false, 0}, + // The smallest valid duration. + {&durpb.Duration{Seconds: minSeconds, Nanos: -(1e9 - 1)}, true, false, 0}, + // The smallest invalid duration above the valid range. + {&durpb.Duration{Seconds: maxSeconds + 1, Nanos: 0}, false, false, 0}, + // The largest invalid duration below the valid range. + {&durpb.Duration{Seconds: minSeconds - 1, Nanos: -(1e9 - 1)}, false, false, 0}, + // One nanosecond past the largest duration representable in Go. + {&durpb.Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0}, + // One nanosecond past the smallest duration representable in Go. + {&durpb.Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0}, + // One second past the largest duration representable in Go. + {&durpb.Duration{Seconds: maxGoSeconds + 1, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0}, + // One second past the smallest duration representable in Go. + {&durpb.Duration{Seconds: minGoSeconds - 1, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0}, +} + +func TestValidateDuration(t *testing.T) { + for _, test := range durationTests { + err := validateDuration(test.proto) + gotValid := (err == nil) + if gotValid != test.isValid { + t.Errorf("validateDuration(%v) = %t, want %t", test.proto, gotValid, test.isValid) + } + } +} + +func TestDuration(t *testing.T) { + for _, test := range durationTests { + got, err := Duration(test.proto) + gotOK := (err == nil) + wantOK := test.isValid && test.inRange + if gotOK != wantOK { + t.Errorf("Duration(%v) ok = %t, want %t", test.proto, gotOK, wantOK) + } + if err == nil && got != test.dur { + t.Errorf("Duration(%v) = %v, want %v", test.proto, got, test.dur) + } + } +} + +func TestDurationProto(t *testing.T) { + for _, test := range durationTests { + if test.isValid && test.inRange { + got := DurationProto(test.dur) + if !proto.Equal(got, test.proto) { + t.Errorf("DurationProto(%v) = %v, want %v", test.dur, got, test.proto) + } + } + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 0000000..e877b72 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,66 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +/* +Package empty is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/empty.proto + +It has these top-level messages: + Empty +*/ +package empty + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 148 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, + 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c, + 0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, + 0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, + 0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6, + 0xb7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto new file mode 100644 index 0000000..03cacd2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/empty"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh new file mode 100755 index 0000000..b50a941 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/regen.sh @@ -0,0 +1,43 @@ +#!/bin/bash -e +# +# This script fetches and rebuilds the "well-known types" protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. +# You also need Go and Git installed. + +PKG=github.com/golang/protobuf/ptypes +UPSTREAM=https://github.com/google/protobuf +UPSTREAM_SUBDIR=src/google/protobuf +PROTO_FILES=(any duration empty struct timestamp wrappers) + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go git protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) +trap 'rm -rf $tmpdir' EXIT + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd "$base" + +echo 1>&2 "fetching latest protos... " +git clone -q $UPSTREAM $tmpdir + +for file in ${PROTO_FILES[@]}; do + echo 1>&2 "* $file" + protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die + cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file +done + +echo 1>&2 "All OK" diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 0000000..4cfe608 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,380 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +/* +Package structpb is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/struct.proto + +It has these top-level messages: + Struct + Value + ListValue +*/ +package structpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} +func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*Value) XXX_WellKnownType() string { return "Value" } + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_StructValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto new file mode 100644 index 0000000..7d7808e --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 0000000..47f10db --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,134 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &tspb.Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 0000000..e23e4a2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,160 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +/* +Package timestamp is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/timestamp.proto + +It has these top-level messages: + Timestamp +*/ +package timestamp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 0000000..b7cbd17 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,133 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go new file mode 100644 index 0000000..6e3c969 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp_test.go @@ -0,0 +1,153 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +import ( + "math" + "testing" + "time" + + "github.com/golang/protobuf/proto" + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +var tests = []struct { + ts *tspb.Timestamp + valid bool + t time.Time +}{ + // The timestamp representing the Unix epoch date. + {&tspb.Timestamp{Seconds: 0, Nanos: 0}, true, utcDate(1970, 1, 1)}, + // The smallest representable timestamp. + {&tspb.Timestamp{Seconds: math.MinInt64, Nanos: math.MinInt32}, false, + time.Unix(math.MinInt64, math.MinInt32).UTC()}, + // The smallest representable timestamp with non-negative nanos. + {&tspb.Timestamp{Seconds: math.MinInt64, Nanos: 0}, false, time.Unix(math.MinInt64, 0).UTC()}, + // The earliest valid timestamp. + {&tspb.Timestamp{Seconds: minValidSeconds, Nanos: 0}, true, utcDate(1, 1, 1)}, + //"0001-01-01T00:00:00Z"}, + // The largest representable timestamp. + {&tspb.Timestamp{Seconds: math.MaxInt64, Nanos: math.MaxInt32}, false, + time.Unix(math.MaxInt64, math.MaxInt32).UTC()}, + // The largest representable timestamp with nanos in range. + {&tspb.Timestamp{Seconds: math.MaxInt64, Nanos: 1e9 - 1}, false, + time.Unix(math.MaxInt64, 1e9-1).UTC()}, + // The largest valid timestamp. + {&tspb.Timestamp{Seconds: maxValidSeconds - 1, Nanos: 1e9 - 1}, true, + time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)}, + // The smallest invalid timestamp that is larger than the valid range. + {&tspb.Timestamp{Seconds: maxValidSeconds, Nanos: 0}, false, time.Unix(maxValidSeconds, 0).UTC()}, + // A date before the epoch. + {&tspb.Timestamp{Seconds: -281836800, Nanos: 0}, true, utcDate(1961, 1, 26)}, + // A date after the epoch. + {&tspb.Timestamp{Seconds: 1296000000, Nanos: 0}, true, utcDate(2011, 1, 26)}, + // A date after the epoch, in the middle of the day. + {&tspb.Timestamp{Seconds: 1296012345, Nanos: 940483}, true, + time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)}, +} + +func TestValidateTimestamp(t *testing.T) { + for _, s := range tests { + got := validateTimestamp(s.ts) + if (got == nil) != s.valid { + t.Errorf("validateTimestamp(%v) = %v, want %v", s.ts, got, s.valid) + } + } +} + +func TestTimestamp(t *testing.T) { + for _, s := range tests { + got, err := Timestamp(s.ts) + if (err == nil) != s.valid { + t.Errorf("Timestamp(%v) error = %v, but valid = %t", s.ts, err, s.valid) + } else if s.valid && got != s.t { + t.Errorf("Timestamp(%v) = %v, want %v", s.ts, got, s.t) + } + } + // Special case: a nil Timestamp is an error, but returns the 0 Unix time. + got, err := Timestamp(nil) + want := time.Unix(0, 0).UTC() + if got != want { + t.Errorf("Timestamp(nil) = %v, want %v", got, want) + } + if err == nil { + t.Errorf("Timestamp(nil) error = nil, expected error") + } +} + +func TestTimestampProto(t *testing.T) { + for _, s := range tests { + got, err := TimestampProto(s.t) + if (err == nil) != s.valid { + t.Errorf("TimestampProto(%v) error = %v, but valid = %t", s.t, err, s.valid) + } else if s.valid && !proto.Equal(got, s.ts) { + t.Errorf("TimestampProto(%v) = %v, want %v", s.t, got, s.ts) + } + } + // No corresponding special case here: no time.Time results in a nil Timestamp. +} + +func TestTimestampString(t *testing.T) { + for _, test := range []struct { + ts *tspb.Timestamp + want string + }{ + // Not much testing needed because presumably time.Format is + // well-tested. + {&tspb.Timestamp{Seconds: 0, Nanos: 0}, "1970-01-01T00:00:00Z"}, + {&tspb.Timestamp{Seconds: minValidSeconds - 1, Nanos: 0}, "(timestamp: seconds:-62135596801 before 0001-01-01)"}, + } { + got := TimestampString(test.ts) + if got != test.want { + t.Errorf("TimestampString(%v) = %q, want %q", test.ts, got, test.want) + } + } +} + +func utcDate(year, month, day int) time.Time { + return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) +} + +func TestTimestampNow(t *testing.T) { + // Bracket the expected time. + before := time.Now() + ts := TimestampNow() + after := time.Now() + + tm, err := Timestamp(ts) + if err != nil { + t.Errorf("between %v and %v\nTimestampNow() = %v\nwhich is invalid (%v)", before, after, ts, err) + } + if tm.Before(before) || tm.After(after) { + t.Errorf("between %v and %v\nTimestamp(TimestampNow()) = %v", before, after, tm) + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 0000000..0ed59bf --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,260 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +/* +Package wrappers is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/wrappers.proto + +It has these top-level messages: + DoubleValue + FloatValue + Int64Value + UInt64Value + Int32Value + UInt32Value + BoolValue + StringValue + BytesValue +*/ +package wrappers + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, + 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, + 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, + 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, + 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, + 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, + 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto new file mode 100644 index 0000000..0194763 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/wrappers"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/gorilla/securecookie/.travis.yml b/vendor/github.com/gorilla/securecookie/.travis.yml new file mode 100644 index 0000000..24882fc --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/.travis.yml @@ -0,0 +1,18 @@ +language: go +sudo: false + +matrix: + include: + - go: 1.3 + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: tip + allow_failures: + - go: tip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go vet $(go list ./... | grep -v /vendor/) + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/securecookie/LICENSE b/vendor/github.com/gorilla/securecookie/LICENSE new file mode 100644 index 0000000..0e5fb87 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/securecookie/README.md b/vendor/github.com/gorilla/securecookie/README.md new file mode 100644 index 0000000..5ed299e --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/README.md @@ -0,0 +1,76 @@ +securecookie +============ +[![GoDoc](https://godoc.org/github.com/gorilla/securecookie?status.svg)](https://godoc.org/github.com/gorilla/securecookie) [![Build Status](https://travis-ci.org/gorilla/securecookie.png?branch=master)](https://travis-ci.org/gorilla/securecookie) + +securecookie encodes and decodes authenticated and optionally encrypted +cookie values. + +Secure cookies can't be forged, because their values are validated using HMAC. +When encrypted, the content is also inaccessible to malicious eyes. It is still +recommended that sensitive data not be stored in cookies, and that HTTPS be used +to prevent cookie [replay attacks](https://en.wikipedia.org/wiki/Replay_attack). + +## Examples + +To use it, first create a new SecureCookie instance: + +```go +// Hash keys should be at least 32 bytes long +var hashKey = []byte("very-secret") +// Block keys should be 16 bytes (AES-128) or 32 bytes (AES-256) long. +// Shorter keys may weaken the encryption used. +var blockKey = []byte("a-lot-secret") +var s = securecookie.New(hashKey, blockKey) +``` + +The hashKey is required, used to authenticate the cookie value using HMAC. +It is recommended to use a key with 32 or 64 bytes. + +The blockKey is optional, used to encrypt the cookie value -- set it to nil +to not use encryption. If set, the length must correspond to the block size +of the encryption algorithm. For AES, used by default, valid lengths are +16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. + +Strong keys can be created using the convenience function GenerateRandomKey(). + +Once a SecureCookie instance is set, use it to encode a cookie value: + +```go +func SetCookieHandler(w http.ResponseWriter, r *http.Request) { + value := map[string]string{ + "foo": "bar", + } + if encoded, err := s.Encode("cookie-name", value); err == nil { + cookie := &http.Cookie{ + Name: "cookie-name", + Value: encoded, + Path: "/", + } + http.SetCookie(w, cookie) + } +} +``` + +Later, use the same SecureCookie instance to decode and validate a cookie +value: + +```go +func ReadCookieHandler(w http.ResponseWriter, r *http.Request) { + if cookie, err := r.Cookie("cookie-name"); err == nil { + value := make(map[string]string) + if err = s2.Decode("cookie-name", cookie.Value, &value); err == nil { + fmt.Fprintf(w, "The value of foo is %q", value["foo"]) + } + } +} +``` + +We stored a map[string]string, but secure cookies can hold any value that +can be encoded using `encoding/gob`. To store custom types, they must be +registered first using gob.Register(). For basic types this is not needed; +it works out of the box. An optional JSON encoder that uses `encoding/json` is +available for types compatible with JSON. + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/securecookie/doc.go b/vendor/github.com/gorilla/securecookie/doc.go new file mode 100644 index 0000000..ae89408 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/doc.go @@ -0,0 +1,61 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package securecookie encodes and decodes authenticated and optionally +encrypted cookie values. + +Secure cookies can't be forged, because their values are validated using HMAC. +When encrypted, the content is also inaccessible to malicious eyes. + +To use it, first create a new SecureCookie instance: + + var hashKey = []byte("very-secret") + var blockKey = []byte("a-lot-secret") + var s = securecookie.New(hashKey, blockKey) + +The hashKey is required, used to authenticate the cookie value using HMAC. +It is recommended to use a key with 32 or 64 bytes. + +The blockKey is optional, used to encrypt the cookie value -- set it to nil +to not use encryption. If set, the length must correspond to the block size +of the encryption algorithm. For AES, used by default, valid lengths are +16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. + +Strong keys can be created using the convenience function GenerateRandomKey(). + +Once a SecureCookie instance is set, use it to encode a cookie value: + + func SetCookieHandler(w http.ResponseWriter, r *http.Request) { + value := map[string]string{ + "foo": "bar", + } + if encoded, err := s.Encode("cookie-name", value); err == nil { + cookie := &http.Cookie{ + Name: "cookie-name", + Value: encoded, + Path: "/", + } + http.SetCookie(w, cookie) + } + } + +Later, use the same SecureCookie instance to decode and validate a cookie +value: + + func ReadCookieHandler(w http.ResponseWriter, r *http.Request) { + if cookie, err := r.Cookie("cookie-name"); err == nil { + value := make(map[string]string) + if err = s2.Decode("cookie-name", cookie.Value, &value); err == nil { + fmt.Fprintf(w, "The value of foo is %q", value["foo"]) + } + } + } + +We stored a map[string]string, but secure cookies can hold any value that +can be encoded using encoding/gob. To store custom types, they must be +registered first using gob.Register(). For basic types this is not needed; +it works out of the box. +*/ +package securecookie diff --git a/vendor/github.com/gorilla/securecookie/fuzz.go b/vendor/github.com/gorilla/securecookie/fuzz.go new file mode 100644 index 0000000..e4d0534 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz.go @@ -0,0 +1,25 @@ +// +build gofuzz + +package securecookie + +var hashKey = []byte("very-secret12345") +var blockKey = []byte("a-lot-secret1234") +var s = New(hashKey, blockKey) + +type Cookie struct { + B bool + I int + S string +} + +func Fuzz(data []byte) int { + datas := string(data) + var c Cookie + if err := s.Decode("fuzz", datas, &c); err != nil { + return 0 + } + if _, err := s.Encode("fuzz", c); err != nil { + panic(err) + } + return 1 +} diff --git a/vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go b/vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go new file mode 100644 index 0000000..368192b --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go @@ -0,0 +1,47 @@ +package main + +import ( + "fmt" + "io" + "math/rand" + "os" + "reflect" + "testing/quick" + + "github.com/gorilla/securecookie" +) + +var hashKey = []byte("very-secret12345") +var blockKey = []byte("a-lot-secret1234") +var s = securecookie.New(hashKey, blockKey) + +type Cookie struct { + B bool + I int + S string +} + +func main() { + var c Cookie + t := reflect.TypeOf(c) + rnd := rand.New(rand.NewSource(0)) + for i := 0; i < 100; i++ { + v, ok := quick.Value(t, rnd) + if !ok { + panic("couldn't generate value") + } + encoded, err := s.Encode("fuzz", v.Interface()) + if err != nil { + panic(err) + } + f, err := os.Create(fmt.Sprintf("corpus/%d.sc", i)) + if err != nil { + panic(err) + } + _, err = io.WriteString(f, encoded) + if err != nil { + panic(err) + } + f.Close() + } +} diff --git a/vendor/github.com/gorilla/securecookie/securecookie.go b/vendor/github.com/gorilla/securecookie/securecookie.go new file mode 100644 index 0000000..83dd606 --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/securecookie.go @@ -0,0 +1,646 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securecookie + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "encoding/gob" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + "strings" + "time" +) + +// Error is the interface of all errors returned by functions in this library. +type Error interface { + error + + // IsUsage returns true for errors indicating the client code probably + // uses this library incorrectly. For example, the client may have + // failed to provide a valid hash key, or may have failed to configure + // the Serializer adequately for encoding value. + IsUsage() bool + + // IsDecode returns true for errors indicating that a cookie could not + // be decoded and validated. Since cookies are usually untrusted + // user-provided input, errors of this type should be expected. + // Usually, the proper action is simply to reject the request. + IsDecode() bool + + // IsInternal returns true for unexpected errors occurring in the + // securecookie implementation. + IsInternal() bool + + // Cause, if it returns a non-nil value, indicates that this error was + // propagated from some underlying library. If this method returns nil, + // this error was raised directly by this library. + // + // Cause is provided principally for debugging/logging purposes; it is + // rare that application logic should perform meaningfully different + // logic based on Cause. See, for example, the caveats described on + // (MultiError).Cause(). + Cause() error +} + +// errorType is a bitmask giving the error type(s) of an cookieError value. +type errorType int + +const ( + usageError = errorType(1 << iota) + decodeError + internalError +) + +type cookieError struct { + typ errorType + msg string + cause error +} + +func (e cookieError) IsUsage() bool { return (e.typ & usageError) != 0 } +func (e cookieError) IsDecode() bool { return (e.typ & decodeError) != 0 } +func (e cookieError) IsInternal() bool { return (e.typ & internalError) != 0 } + +func (e cookieError) Cause() error { return e.cause } + +func (e cookieError) Error() string { + parts := []string{"securecookie: "} + if e.msg == "" { + parts = append(parts, "error") + } else { + parts = append(parts, e.msg) + } + if c := e.Cause(); c != nil { + parts = append(parts, " - caused by: ", c.Error()) + } + return strings.Join(parts, "") +} + +var ( + errGeneratingIV = cookieError{typ: internalError, msg: "failed to generate random iv"} + + errNoCodecs = cookieError{typ: usageError, msg: "no codecs provided"} + errHashKeyNotSet = cookieError{typ: usageError, msg: "hash key is not set"} + errBlockKeyNotSet = cookieError{typ: usageError, msg: "block key is not set"} + errEncodedValueTooLong = cookieError{typ: usageError, msg: "the value is too long"} + + errValueToDecodeTooLong = cookieError{typ: decodeError, msg: "the value is too long"} + errTimestampInvalid = cookieError{typ: decodeError, msg: "invalid timestamp"} + errTimestampTooNew = cookieError{typ: decodeError, msg: "timestamp is too new"} + errTimestampExpired = cookieError{typ: decodeError, msg: "expired timestamp"} + errDecryptionFailed = cookieError{typ: decodeError, msg: "the value could not be decrypted"} + errValueNotByte = cookieError{typ: decodeError, msg: "value not a []byte."} + + // ErrMacInvalid indicates that cookie decoding failed because the HMAC + // could not be extracted and verified. Direct use of this error + // variable is deprecated; it is public only for legacy compatibility, + // and may be privatized in the future, as it is rarely useful to + // distinguish between this error and other Error implementations. + ErrMacInvalid = cookieError{typ: decodeError, msg: "the value is not valid"} +) + +// Codec defines an interface to encode and decode cookie values. +type Codec interface { + Encode(name string, value interface{}) (string, error) + Decode(name, value string, dst interface{}) error +} + +// New returns a new SecureCookie. +// +// hashKey is required, used to authenticate values using HMAC. Create it using +// GenerateRandomKey(). It is recommended to use a key with 32 or 64 bytes. +// +// blockKey is optional, used to encrypt values. Create it using +// GenerateRandomKey(). The key length must correspond to the block size +// of the encryption algorithm. For AES, used by default, valid lengths are +// 16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. +// The default encoder used for cookie serialization is encoding/gob. +// +// Note that keys created using GenerateRandomKey() are not automatically +// persisted. New keys will be created when the application is restarted, and +// previously issued cookies will not be able to be decoded. +func New(hashKey, blockKey []byte) *SecureCookie { + s := &SecureCookie{ + hashKey: hashKey, + blockKey: blockKey, + hashFunc: sha256.New, + maxAge: 86400 * 30, + maxLength: 4096, + sz: GobEncoder{}, + } + if hashKey == nil { + s.err = errHashKeyNotSet + } + if blockKey != nil { + s.BlockFunc(aes.NewCipher) + } + return s +} + +// SecureCookie encodes and decodes authenticated and optionally encrypted +// cookie values. +type SecureCookie struct { + hashKey []byte + hashFunc func() hash.Hash + blockKey []byte + block cipher.Block + maxLength int + maxAge int64 + minAge int64 + err error + sz Serializer + // For testing purposes, the function that returns the current timestamp. + // If not set, it will use time.Now().UTC().Unix(). + timeFunc func() int64 +} + +// Serializer provides an interface for providing custom serializers for cookie +// values. +type Serializer interface { + Serialize(src interface{}) ([]byte, error) + Deserialize(src []byte, dst interface{}) error +} + +// GobEncoder encodes cookie values using encoding/gob. This is the simplest +// encoder and can handle complex types via gob.Register. +type GobEncoder struct{} + +// JSONEncoder encodes cookie values using encoding/json. Users who wish to +// encode complex types need to satisfy the json.Marshaller and +// json.Unmarshaller interfaces. +type JSONEncoder struct{} + +// NopEncoder does not encode cookie values, and instead simply accepts a []byte +// (as an interface{}) and returns a []byte. This is particularly useful when +// you encoding an object upstream and do not wish to re-encode it. +type NopEncoder struct{} + +// MaxLength restricts the maximum length, in bytes, for the cookie value. +// +// Default is 4096, which is the maximum value accepted by Internet Explorer. +func (s *SecureCookie) MaxLength(value int) *SecureCookie { + s.maxLength = value + return s +} + +// MaxAge restricts the maximum age, in seconds, for the cookie value. +// +// Default is 86400 * 30. Set it to 0 for no restriction. +func (s *SecureCookie) MaxAge(value int) *SecureCookie { + s.maxAge = int64(value) + return s +} + +// MinAge restricts the minimum age, in seconds, for the cookie value. +// +// Default is 0 (no restriction). +func (s *SecureCookie) MinAge(value int) *SecureCookie { + s.minAge = int64(value) + return s +} + +// HashFunc sets the hash function used to create HMAC. +// +// Default is crypto/sha256.New. +func (s *SecureCookie) HashFunc(f func() hash.Hash) *SecureCookie { + s.hashFunc = f + return s +} + +// BlockFunc sets the encryption function used to create a cipher.Block. +// +// Default is crypto/aes.New. +func (s *SecureCookie) BlockFunc(f func([]byte) (cipher.Block, error)) *SecureCookie { + if s.blockKey == nil { + s.err = errBlockKeyNotSet + } else if block, err := f(s.blockKey); err == nil { + s.block = block + } else { + s.err = cookieError{cause: err, typ: usageError} + } + return s +} + +// Encoding sets the encoding/serialization method for cookies. +// +// Default is encoding/gob. To encode special structures using encoding/gob, +// they must be registered first using gob.Register(). +func (s *SecureCookie) SetSerializer(sz Serializer) *SecureCookie { + s.sz = sz + + return s +} + +// Encode encodes a cookie value. +// +// It serializes, optionally encrypts, signs with a message authentication code, +// and finally encodes the value. +// +// The name argument is the cookie name. It is stored with the encoded value. +// The value argument is the value to be encoded. It can be any value that can +// be encoded using the currently selected serializer; see SetSerializer(). +// +// It is the client's responsibility to ensure that value, when encoded using +// the current serialization/encryption settings on s and then base64-encoded, +// is shorter than the maximum permissible length. +func (s *SecureCookie) Encode(name string, value interface{}) (string, error) { + if s.err != nil { + return "", s.err + } + if s.hashKey == nil { + s.err = errHashKeyNotSet + return "", s.err + } + var err error + var b []byte + // 1. Serialize. + if b, err = s.sz.Serialize(value); err != nil { + return "", cookieError{cause: err, typ: usageError} + } + // 2. Encrypt (optional). + if s.block != nil { + if b, err = encrypt(s.block, b); err != nil { + return "", cookieError{cause: err, typ: usageError} + } + } + b = encode(b) + // 3. Create MAC for "name|date|value". Extra pipe to be used later. + b = []byte(fmt.Sprintf("%s|%d|%s|", name, s.timestamp(), b)) + mac := createMac(hmac.New(s.hashFunc, s.hashKey), b[:len(b)-1]) + // Append mac, remove name. + b = append(b, mac...)[len(name)+1:] + // 4. Encode to base64. + b = encode(b) + // 5. Check length. + if s.maxLength != 0 && len(b) > s.maxLength { + return "", errEncodedValueTooLong + } + // Done. + return string(b), nil +} + +// Decode decodes a cookie value. +// +// It decodes, verifies a message authentication code, optionally decrypts and +// finally deserializes the value. +// +// The name argument is the cookie name. It must be the same name used when +// it was stored. The value argument is the encoded cookie value. The dst +// argument is where the cookie will be decoded. It must be a pointer. +func (s *SecureCookie) Decode(name, value string, dst interface{}) error { + if s.err != nil { + return s.err + } + if s.hashKey == nil { + s.err = errHashKeyNotSet + return s.err + } + // 1. Check length. + if s.maxLength != 0 && len(value) > s.maxLength { + return errValueToDecodeTooLong + } + // 2. Decode from base64. + b, err := decode([]byte(value)) + if err != nil { + return err + } + // 3. Verify MAC. Value is "date|value|mac". + parts := bytes.SplitN(b, []byte("|"), 3) + if len(parts) != 3 { + return ErrMacInvalid + } + h := hmac.New(s.hashFunc, s.hashKey) + b = append([]byte(name+"|"), b[:len(b)-len(parts[2])-1]...) + if err = verifyMac(h, b, parts[2]); err != nil { + return err + } + // 4. Verify date ranges. + var t1 int64 + if t1, err = strconv.ParseInt(string(parts[0]), 10, 64); err != nil { + return errTimestampInvalid + } + t2 := s.timestamp() + if s.minAge != 0 && t1 > t2-s.minAge { + return errTimestampTooNew + } + if s.maxAge != 0 && t1 < t2-s.maxAge { + return errTimestampExpired + } + // 5. Decrypt (optional). + b, err = decode(parts[1]) + if err != nil { + return err + } + if s.block != nil { + if b, err = decrypt(s.block, b); err != nil { + return err + } + } + // 6. Deserialize. + if err = s.sz.Deserialize(b, dst); err != nil { + return cookieError{cause: err, typ: decodeError} + } + // Done. + return nil +} + +// timestamp returns the current timestamp, in seconds. +// +// For testing purposes, the function that generates the timestamp can be +// overridden. If not set, it will return time.Now().UTC().Unix(). +func (s *SecureCookie) timestamp() int64 { + if s.timeFunc == nil { + return time.Now().UTC().Unix() + } + return s.timeFunc() +} + +// Authentication ------------------------------------------------------------- + +// createMac creates a message authentication code (MAC). +func createMac(h hash.Hash, value []byte) []byte { + h.Write(value) + return h.Sum(nil) +} + +// verifyMac verifies that a message authentication code (MAC) is valid. +func verifyMac(h hash.Hash, value []byte, mac []byte) error { + mac2 := createMac(h, value) + // Check that both MACs are of equal length, as subtle.ConstantTimeCompare + // does not do this prior to Go 1.4. + if len(mac) == len(mac2) && subtle.ConstantTimeCompare(mac, mac2) == 1 { + return nil + } + return ErrMacInvalid +} + +// Encryption ----------------------------------------------------------------- + +// encrypt encrypts a value using the given block in counter mode. +// +// A random initialization vector (http://goo.gl/zF67k) with the length of the +// block size is prepended to the resulting ciphertext. +func encrypt(block cipher.Block, value []byte) ([]byte, error) { + iv := GenerateRandomKey(block.BlockSize()) + if iv == nil { + return nil, errGeneratingIV + } + // Encrypt it. + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(value, value) + // Return iv + ciphertext. + return append(iv, value...), nil +} + +// decrypt decrypts a value using the given block in counter mode. +// +// The value to be decrypted must be prepended by a initialization vector +// (http://goo.gl/zF67k) with the length of the block size. +func decrypt(block cipher.Block, value []byte) ([]byte, error) { + size := block.BlockSize() + if len(value) > size { + // Extract iv. + iv := value[:size] + // Extract ciphertext. + value = value[size:] + // Decrypt it. + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(value, value) + return value, nil + } + return nil, errDecryptionFailed +} + +// Serialization -------------------------------------------------------------- + +// Serialize encodes a value using gob. +func (e GobEncoder) Serialize(src interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + if err := enc.Encode(src); err != nil { + return nil, cookieError{cause: err, typ: usageError} + } + return buf.Bytes(), nil +} + +// Deserialize decodes a value using gob. +func (e GobEncoder) Deserialize(src []byte, dst interface{}) error { + dec := gob.NewDecoder(bytes.NewBuffer(src)) + if err := dec.Decode(dst); err != nil { + return cookieError{cause: err, typ: decodeError} + } + return nil +} + +// Serialize encodes a value using encoding/json. +func (e JSONEncoder) Serialize(src interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + if err := enc.Encode(src); err != nil { + return nil, cookieError{cause: err, typ: usageError} + } + return buf.Bytes(), nil +} + +// Deserialize decodes a value using encoding/json. +func (e JSONEncoder) Deserialize(src []byte, dst interface{}) error { + dec := json.NewDecoder(bytes.NewReader(src)) + if err := dec.Decode(dst); err != nil { + return cookieError{cause: err, typ: decodeError} + } + return nil +} + +// Serialize passes a []byte through as-is. +func (e NopEncoder) Serialize(src interface{}) ([]byte, error) { + if b, ok := src.([]byte); ok { + return b, nil + } + + return nil, errValueNotByte +} + +// Deserialize passes a []byte through as-is. +func (e NopEncoder) Deserialize(src []byte, dst interface{}) error { + if _, ok := dst.([]byte); ok { + dst = src + return nil + } + + return errValueNotByte +} + +// Encoding ------------------------------------------------------------------- + +// encode encodes a value using base64. +func encode(value []byte) []byte { + encoded := make([]byte, base64.URLEncoding.EncodedLen(len(value))) + base64.URLEncoding.Encode(encoded, value) + return encoded +} + +// decode decodes a cookie using base64. +func decode(value []byte) ([]byte, error) { + decoded := make([]byte, base64.URLEncoding.DecodedLen(len(value))) + b, err := base64.URLEncoding.Decode(decoded, value) + if err != nil { + return nil, cookieError{cause: err, typ: decodeError, msg: "base64 decode failed"} + } + return decoded[:b], nil +} + +// Helpers -------------------------------------------------------------------- + +// GenerateRandomKey creates a random key with the given length in bytes. +// On failure, returns nil. +// +// Callers should explicitly check for the possibility of a nil return, treat +// it as a failure of the system random number generator, and not continue. +func GenerateRandomKey(length int) []byte { + k := make([]byte, length) + if _, err := io.ReadFull(rand.Reader, k); err != nil { + return nil + } + return k +} + +// CodecsFromPairs returns a slice of SecureCookie instances. +// +// It is a convenience function to create a list of codecs for key rotation. Note +// that the generated Codecs will have the default options applied: callers +// should iterate over each Codec and type-assert the underlying *SecureCookie to +// change these. +// +// Example: +// +// codecs := securecookie.CodecsFromPairs( +// []byte("new-hash-key"), +// []byte("new-block-key"), +// []byte("old-hash-key"), +// []byte("old-block-key"), +// ) +// +// // Modify each instance. +// for _, s := range codecs { +// if cookie, ok := s.(*securecookie.SecureCookie); ok { +// cookie.MaxAge(86400 * 7) +// cookie.SetSerializer(securecookie.JSONEncoder{}) +// cookie.HashFunc(sha512.New512_256) +// } +// } +// +func CodecsFromPairs(keyPairs ...[]byte) []Codec { + codecs := make([]Codec, len(keyPairs)/2+len(keyPairs)%2) + for i := 0; i < len(keyPairs); i += 2 { + var blockKey []byte + if i+1 < len(keyPairs) { + blockKey = keyPairs[i+1] + } + codecs[i/2] = New(keyPairs[i], blockKey) + } + return codecs +} + +// EncodeMulti encodes a cookie value using a group of codecs. +// +// The codecs are tried in order. Multiple codecs are accepted to allow +// key rotation. +// +// On error, may return a MultiError. +func EncodeMulti(name string, value interface{}, codecs ...Codec) (string, error) { + if len(codecs) == 0 { + return "", errNoCodecs + } + + var errors MultiError + for _, codec := range codecs { + encoded, err := codec.Encode(name, value) + if err == nil { + return encoded, nil + } + errors = append(errors, err) + } + return "", errors +} + +// DecodeMulti decodes a cookie value using a group of codecs. +// +// The codecs are tried in order. Multiple codecs are accepted to allow +// key rotation. +// +// On error, may return a MultiError. +func DecodeMulti(name string, value string, dst interface{}, codecs ...Codec) error { + if len(codecs) == 0 { + return errNoCodecs + } + + var errors MultiError + for _, codec := range codecs { + err := codec.Decode(name, value, dst) + if err == nil { + return nil + } + errors = append(errors, err) + } + return errors +} + +// MultiError groups multiple errors. +type MultiError []error + +func (m MultiError) IsUsage() bool { return m.any(func(e Error) bool { return e.IsUsage() }) } +func (m MultiError) IsDecode() bool { return m.any(func(e Error) bool { return e.IsDecode() }) } +func (m MultiError) IsInternal() bool { return m.any(func(e Error) bool { return e.IsInternal() }) } + +// Cause returns nil for MultiError; there is no unique underlying cause in the +// general case. +// +// Note: we could conceivably return a non-nil Cause only when there is exactly +// one child error with a Cause. However, it would be brittle for client code +// to rely on the arity of causes inside a MultiError, so we have opted not to +// provide this functionality. Clients which really wish to access the Causes +// of the underlying errors are free to iterate through the errors themselves. +func (m MultiError) Cause() error { return nil } + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} + +// any returns true if any element of m is an Error for which pred returns true. +func (m MultiError) any(pred func(Error) bool) bool { + for _, e := range m { + if ourErr, ok := e.(Error); ok && pred(ourErr) { + return true + } + } + return false +} diff --git a/vendor/github.com/gorilla/securecookie/securecookie_test.go b/vendor/github.com/gorilla/securecookie/securecookie_test.go new file mode 100644 index 0000000..33ce4fc --- /dev/null +++ b/vendor/github.com/gorilla/securecookie/securecookie_test.go @@ -0,0 +1,274 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securecookie + +import ( + "crypto/aes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "reflect" + "strings" + "testing" +) + +// Asserts that cookieError and MultiError are Error implementations. +var _ Error = cookieError{} +var _ Error = MultiError{} + +var testCookies = []interface{}{ + map[string]string{"foo": "bar"}, + map[string]string{"baz": "ding"}, +} + +var testStrings = []string{"foo", "bar", "baz"} + +func TestSecureCookie(t *testing.T) { + // TODO test too old / too new timestamps + s1 := New([]byte("12345"), []byte("1234567890123456")) + s2 := New([]byte("54321"), []byte("6543210987654321")) + value := map[string]interface{}{ + "foo": "bar", + "baz": 128, + } + + for i := 0; i < 50; i++ { + // Running this multiple times to check if any special character + // breaks encoding/decoding. + encoded, err1 := s1.Encode("sid", value) + if err1 != nil { + t.Error(err1) + continue + } + dst := make(map[string]interface{}) + err2 := s1.Decode("sid", encoded, &dst) + if err2 != nil { + t.Fatalf("%v: %v", err2, encoded) + } + if !reflect.DeepEqual(dst, value) { + t.Fatalf("Expected %v, got %v.", value, dst) + } + dst2 := make(map[string]interface{}) + err3 := s2.Decode("sid", encoded, &dst2) + if err3 == nil { + t.Fatalf("Expected failure decoding.") + } + err4, ok := err3.(Error) + if !ok { + t.Fatalf("Expected error to implement Error, got: %#v", err3) + } + if !err4.IsDecode() { + t.Fatalf("Expected DecodeError, got: %#v", err4) + } + + // Test other error type flags. + if err4.IsUsage() { + t.Fatalf("Expected IsUsage() == false, got: %#v", err4) + } + if err4.IsInternal() { + t.Fatalf("Expected IsInternal() == false, got: %#v", err4) + } + } +} + +func TestSecureCookieNilKey(t *testing.T) { + s1 := New(nil, nil) + value := map[string]interface{}{ + "foo": "bar", + "baz": 128, + } + _, err := s1.Encode("sid", value) + if err != errHashKeyNotSet { + t.Fatal("Wrong error returned:", err) + } +} + +func TestDecodeInvalid(t *testing.T) { + // List of invalid cookies, which must not be accepted, base64-decoded + // (they will be encoded before passing to Decode). + invalidCookies := []string{ + "", + " ", + "\n", + "||", + "|||", + "cookie", + } + s := New([]byte("12345"), nil) + var dst string + for i, v := range invalidCookies { + for _, enc := range []*base64.Encoding{ + base64.StdEncoding, + base64.URLEncoding, + } { + err := s.Decode("name", enc.EncodeToString([]byte(v)), &dst) + if err == nil { + t.Fatalf("%d: expected failure decoding", i) + } + err2, ok := err.(Error) + if !ok || !err2.IsDecode() { + t.Fatalf("%d: Expected IsDecode(), got: %#v", i, err) + } + } + } +} + +func TestAuthentication(t *testing.T) { + hash := hmac.New(sha256.New, []byte("secret-key")) + for _, value := range testStrings { + hash.Reset() + signed := createMac(hash, []byte(value)) + hash.Reset() + err := verifyMac(hash, []byte(value), signed) + if err != nil { + t.Error(err) + } + } +} + +func TestEncryption(t *testing.T) { + block, err := aes.NewCipher([]byte("1234567890123456")) + if err != nil { + t.Fatalf("Block could not be created") + } + var encrypted, decrypted []byte + for _, value := range testStrings { + if encrypted, err = encrypt(block, []byte(value)); err != nil { + t.Error(err) + } else { + if decrypted, err = decrypt(block, encrypted); err != nil { + t.Error(err) + } + if string(decrypted) != value { + t.Errorf("Expected %v, got %v.", value, string(decrypted)) + } + } + } +} + +func TestGobSerialization(t *testing.T) { + var ( + sz GobEncoder + serialized []byte + deserialized map[string]string + err error + ) + for _, value := range testCookies { + if serialized, err = sz.Serialize(value); err != nil { + t.Error(err) + } else { + deserialized = make(map[string]string) + if err = sz.Deserialize(serialized, &deserialized); err != nil { + t.Error(err) + } + if fmt.Sprintf("%v", deserialized) != fmt.Sprintf("%v", value) { + t.Errorf("Expected %v, got %v.", value, deserialized) + } + } + } +} + +func TestJSONSerialization(t *testing.T) { + var ( + sz JSONEncoder + serialized []byte + deserialized map[string]string + err error + ) + for _, value := range testCookies { + if serialized, err = sz.Serialize(value); err != nil { + t.Error(err) + } else { + deserialized = make(map[string]string) + if err = sz.Deserialize(serialized, &deserialized); err != nil { + t.Error(err) + } + if fmt.Sprintf("%v", deserialized) != fmt.Sprintf("%v", value) { + t.Errorf("Expected %v, got %v.", value, deserialized) + } + } + } +} + +func TestEncoding(t *testing.T) { + for _, value := range testStrings { + encoded := encode([]byte(value)) + decoded, err := decode(encoded) + if err != nil { + t.Error(err) + } else if string(decoded) != value { + t.Errorf("Expected %v, got %s.", value, string(decoded)) + } + } +} + +func TestMultiError(t *testing.T) { + s1, s2 := New(nil, nil), New(nil, nil) + _, err := EncodeMulti("sid", "value", s1, s2) + if len(err.(MultiError)) != 2 { + t.Errorf("Expected 2 errors, got %s.", err) + } else { + if strings.Index(err.Error(), "hash key is not set") == -1 { + t.Errorf("Expected missing hash key error, got %s.", err.Error()) + } + ourErr, ok := err.(Error) + if !ok || !ourErr.IsUsage() { + t.Fatalf("Expected error to be a usage error; got %#v", err) + } + if ourErr.IsDecode() { + t.Errorf("Expected error NOT to be a decode error; got %#v", ourErr) + } + if ourErr.IsInternal() { + t.Errorf("Expected error NOT to be an internal error; got %#v", ourErr) + } + } +} + +func TestMultiNoCodecs(t *testing.T) { + _, err := EncodeMulti("foo", "bar") + if err != errNoCodecs { + t.Errorf("EncodeMulti: bad value for error, got: %v", err) + } + + var dst []byte + err = DecodeMulti("foo", "bar", &dst) + if err != errNoCodecs { + t.Errorf("DecodeMulti: bad value for error, got: %v", err) + } +} + +func TestMissingKey(t *testing.T) { + s1 := New(nil, nil) + + var dst []byte + err := s1.Decode("sid", "value", &dst) + if err != errHashKeyNotSet { + t.Fatalf("Expected %#v, got %#v", errHashKeyNotSet, err) + } + if err2, ok := err.(Error); !ok || !err2.IsUsage() { + t.Errorf("Expected missing hash key to be IsUsage(); was %#v", err) + } +} + +// ---------------------------------------------------------------------------- + +type FooBar struct { + Foo int + Bar string +} + +func TestCustomType(t *testing.T) { + s1 := New([]byte("12345"), []byte("1234567890123456")) + // Type is not registered in gob. (!!!) + src := &FooBar{42, "bar"} + encoded, _ := s1.Encode("sid", src) + + dst := &FooBar{} + _ = s1.Decode("sid", encoded, dst) + if dst.Foo != 42 || dst.Bar != "bar" { + t.Fatalf("Expected %#v, got %#v", src, dst) + } +} diff --git a/vendor/github.com/jinzhu/gorm/.codeclimate.yml b/vendor/github.com/jinzhu/gorm/.codeclimate.yml new file mode 100644 index 0000000..51aba50 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/.codeclimate.yml @@ -0,0 +1,11 @@ +--- +engines: + gofmt: + enabled: true + govet: + enabled: true + golint: + enabled: true +ratings: + paths: + - "**.go" diff --git a/vendor/github.com/jinzhu/gorm/.gitignore b/vendor/github.com/jinzhu/gorm/.gitignore new file mode 100644 index 0000000..01dc5ce --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/.gitignore @@ -0,0 +1,2 @@ +documents +_book diff --git a/vendor/github.com/jinzhu/gorm/CONTRIBUTING.md b/vendor/github.com/jinzhu/gorm/CONTRIBUTING.md new file mode 100644 index 0000000..c54d572 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/CONTRIBUTING.md @@ -0,0 +1,52 @@ +# How to Contribute + +## Bug Report + +- Do a search on GitHub under Issues in case it has already been reported +- Submit __executable script__ or failing test pull request that could demonstrates the issue is *MUST HAVE* + +## Feature Request + +- Feature request with pull request is welcome +- Or it won't be implemented until I (other developers) find it is helpful for my (their) daily work + +## Pull Request + +- Prefer single commit pull request, that make the git history can be a bit easier to follow. +- New features need to be covered with tests to make sure your code works as expected, and won't be broken by others in future + +## Contributing to Documentation + +- You are welcome ;) +- You can help improve the README by making them more coherent, consistent or readable, and add more godoc documents to make people easier to follow. +- Blogs & Usage Guides & PPT also welcome, please add them to https://github.com/jinzhu/gorm/wiki/Guides + +### Executable script template + +```go +package main + +import ( + _ "github.com/mattn/go-sqlite3" + _ "github.com/go-sql-driver/mysql" + _ "github.com/lib/pq" + "github.com/jinzhu/gorm" +) + +var db gorm.DB + +func init() { + var err error + db, err = gorm.Open("sqlite3", "test.db") + // db, err := gorm.Open("postgres", "user=username dbname=password sslmode=disable") + // db, err := gorm.Open("mysql", "user:password@/dbname?charset=utf8&parseTime=True") + if err != nil { + panic(err) + } + db.LogMode(true) +} + +func main() { + // Your code +} +``` diff --git a/vendor/github.com/jinzhu/gorm/License b/vendor/github.com/jinzhu/gorm/License new file mode 100644 index 0000000..037e165 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/License @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013-NOW Jinzhu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/jinzhu/gorm/README.md b/vendor/github.com/jinzhu/gorm/README.md new file mode 100644 index 0000000..c3f209c --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/README.md @@ -0,0 +1,46 @@ +# GORM + +The fantastic ORM library for Golang, aims to be developer friendly. + +[![Join the chat at https://gitter.im/jinzhu/gorm](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/jinzhu/gorm?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![wercker status](https://app.wercker.com/status/0cb7bb1039e21b74f8274941428e0921/s/master "wercker status")](https://app.wercker.com/project/bykey/0cb7bb1039e21b74f8274941428e0921) +[![GoDoc](https://godoc.org/github.com/jinzhu/gorm?status.svg)](https://godoc.org/github.com/jinzhu/gorm) + +## Overview + +* Full-Featured ORM (almost) +* Associations (Has One, Has Many, Belongs To, Many To Many, Polymorphism) +* Callbacks (Before/After Create/Save/Update/Delete/Find) +* Preloading (eager loading) +* Transactions +* Composite Primary Key +* SQL Builder +* Auto Migrations +* Logger +* Extendable, write Plugins based on GORM callbacks +* Every feature comes with tests +* Developer Friendly + +## Getting Started + +* GORM Guides [jinzhu.github.com/gorm](https://jinzhu.github.io/gorm) + +## Upgrading To V1.0 + +* [CHANGELOG](https://jinzhu.github.io/gorm/changelog.html) + +# Author + +**jinzhu** + +* +* +* + +# Contributors + +https://github.com/jinzhu/gorm/graphs/contributors + +## License + +Released under the [MIT License](https://github.com/jinzhu/gorm/blob/master/License). diff --git a/vendor/github.com/jinzhu/gorm/association.go b/vendor/github.com/jinzhu/gorm/association.go new file mode 100644 index 0000000..cd8fd91 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/association.go @@ -0,0 +1,359 @@ +package gorm + +import ( + "errors" + "fmt" + "reflect" +) + +// Association Mode contains some helper methods to handle relationship things easily. +type Association struct { + Error error + scope *Scope + column string + field *Field +} + +// Find find out all related associations +func (association *Association) Find(value interface{}) *Association { + association.scope.related(value, association.column) + return association.setErr(association.scope.db.Error) +} + +// Append append new associations for many2many, has_many, replace current association for has_one, belongs_to +func (association *Association) Append(values ...interface{}) *Association { + if relationship := association.field.Relationship; relationship.Kind == "has_one" { + return association.Replace(values...) + } + return association.saveAssociations(values...) +} + +// Replace replace current associations with new one +func (association *Association) Replace(values ...interface{}) *Association { + var ( + relationship = association.field.Relationship + scope = association.scope + field = association.field.Field + newDB = scope.NewDB() + ) + + // Append new values + association.field.Set(reflect.Zero(association.field.Field.Type())) + association.saveAssociations(values...) + + // Belongs To + if relationship.Kind == "belongs_to" { + // Set foreign key to be null when clearing value (length equals 0) + if len(values) == 0 { + // Set foreign key to be nil + var foreignKeyMap = map[string]interface{}{} + for _, foreignKey := range relationship.ForeignDBNames { + foreignKeyMap[foreignKey] = nil + } + association.setErr(newDB.Model(scope.Value).UpdateColumn(foreignKeyMap).Error) + } + } else { + // Polymorphic Relations + if relationship.PolymorphicDBName != "" { + newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), scope.TableName()) + } + + // Delete Relations except new created + if len(values) > 0 { + var associationForeignFieldNames []string + if relationship.Kind == "many_to_many" { + // if many to many relations, get association fields name from association foreign keys + associationScope := scope.New(reflect.New(field.Type()).Interface()) + for _, dbName := range relationship.AssociationForeignFieldNames { + if field, ok := associationScope.FieldByName(dbName); ok { + associationForeignFieldNames = append(associationForeignFieldNames, field.Name) + } + } + } else { + // If other relations, use primary keys + for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() { + associationForeignFieldNames = append(associationForeignFieldNames, field.Name) + } + } + + newPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, field.Interface()) + + if len(newPrimaryKeys) > 0 { + sql := fmt.Sprintf("%v NOT IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(newPrimaryKeys)) + newDB = newDB.Where(sql, toQueryValues(newPrimaryKeys)...) + } + } + + if relationship.Kind == "many_to_many" { + // if many to many relations, delete related relations from join table + var sourceForeignFieldNames []string + + for _, dbName := range relationship.ForeignFieldNames { + if field, ok := scope.FieldByName(dbName); ok { + sourceForeignFieldNames = append(sourceForeignFieldNames, field.Name) + } + } + + if sourcePrimaryKeys := scope.getColumnAsArray(sourceForeignFieldNames, scope.Value); len(sourcePrimaryKeys) > 0 { + newDB = newDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(sourcePrimaryKeys)), toQueryValues(sourcePrimaryKeys)...) + + association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB, relationship)) + } + } else if relationship.Kind == "has_one" || relationship.Kind == "has_many" { + // has_one or has_many relations, set foreign key to be nil (TODO or delete them?) + var foreignKeyMap = map[string]interface{}{} + for idx, foreignKey := range relationship.ForeignDBNames { + foreignKeyMap[foreignKey] = nil + if field, ok := scope.FieldByName(relationship.AssociationForeignFieldNames[idx]); ok { + newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) + } + } + + fieldValue := reflect.New(association.field.Field.Type()).Interface() + association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error) + } + } + return association +} + +// Delete remove relationship between source & passed arguments, but won't delete those arguments +func (association *Association) Delete(values ...interface{}) *Association { + var ( + relationship = association.field.Relationship + scope = association.scope + field = association.field.Field + newDB = scope.NewDB() + ) + + if len(values) == 0 { + return association + } + + var deletingResourcePrimaryFieldNames, deletingResourcePrimaryDBNames []string + for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() { + deletingResourcePrimaryFieldNames = append(deletingResourcePrimaryFieldNames, field.Name) + deletingResourcePrimaryDBNames = append(deletingResourcePrimaryDBNames, field.DBName) + } + + deletingPrimaryKeys := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, values...) + + if relationship.Kind == "many_to_many" { + // source value's foreign keys + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(relationship.ForeignFieldNames[idx]); ok { + newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) + } + } + + // get association's foreign fields name + var associationScope = scope.New(reflect.New(field.Type()).Interface()) + var associationForeignFieldNames []string + for _, associationDBName := range relationship.AssociationForeignFieldNames { + if field, ok := associationScope.FieldByName(associationDBName); ok { + associationForeignFieldNames = append(associationForeignFieldNames, field.Name) + } + } + + // association value's foreign keys + deletingPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, values...) + sql := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(deletingPrimaryKeys)) + newDB = newDB.Where(sql, toQueryValues(deletingPrimaryKeys)...) + + association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB, relationship)) + } else { + var foreignKeyMap = map[string]interface{}{} + for _, foreignKey := range relationship.ForeignDBNames { + foreignKeyMap[foreignKey] = nil + } + + if relationship.Kind == "belongs_to" { + // find with deleting relation's foreign keys + primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, values...) + newDB = newDB.Where( + fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)), + toQueryValues(primaryKeys)..., + ) + + // set foreign key to be null if there are some records affected + modelValue := reflect.New(scope.GetModelStruct().ModelType).Interface() + if results := newDB.Model(modelValue).UpdateColumn(foreignKeyMap); results.Error == nil { + if results.RowsAffected > 0 { + scope.updatedAttrsWithValues(foreignKeyMap) + } + } else { + association.setErr(results.Error) + } + } else if relationship.Kind == "has_one" || relationship.Kind == "has_many" { + // find all relations + primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value) + newDB = newDB.Where( + fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)), + toQueryValues(primaryKeys)..., + ) + + // only include those deleting relations + newDB = newDB.Where( + fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, deletingResourcePrimaryDBNames), toQueryMarks(deletingPrimaryKeys)), + toQueryValues(deletingPrimaryKeys)..., + ) + + // set matched relation's foreign key to be null + fieldValue := reflect.New(association.field.Field.Type()).Interface() + association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error) + } + } + + // Remove deleted records from source's field + if association.Error == nil { + if field.Kind() == reflect.Slice { + leftValues := reflect.Zero(field.Type()) + + for i := 0; i < field.Len(); i++ { + reflectValue := field.Index(i) + primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, reflectValue.Interface())[0] + var isDeleted = false + for _, pk := range deletingPrimaryKeys { + if equalAsString(primaryKey, pk) { + isDeleted = true + break + } + } + if !isDeleted { + leftValues = reflect.Append(leftValues, reflectValue) + } + } + + association.field.Set(leftValues) + } else if field.Kind() == reflect.Struct { + primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, field.Interface())[0] + for _, pk := range deletingPrimaryKeys { + if equalAsString(primaryKey, pk) { + association.field.Set(reflect.Zero(field.Type())) + break + } + } + } + } + + return association +} + +// Clear remove relationship between source & current associations, won't delete those associations +func (association *Association) Clear() *Association { + return association.Replace() +} + +// Count return the count of current associations +func (association *Association) Count() int { + var ( + count = 0 + relationship = association.field.Relationship + scope = association.scope + fieldValue = association.field.Field.Interface() + query = scope.DB() + ) + + if relationship.Kind == "many_to_many" { + query = relationship.JoinTableHandler.JoinWith(relationship.JoinTableHandler, query, scope.Value) + } else if relationship.Kind == "has_many" || relationship.Kind == "has_one" { + primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value) + query = query.Where( + fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)), + toQueryValues(primaryKeys)..., + ) + } else if relationship.Kind == "belongs_to" { + primaryKeys := scope.getColumnAsArray(relationship.ForeignFieldNames, scope.Value) + query = query.Where( + fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(primaryKeys)), + toQueryValues(primaryKeys)..., + ) + } + + if relationship.PolymorphicType != "" { + query = query.Where( + fmt.Sprintf("%v.%v = ?", scope.New(fieldValue).QuotedTableName(), scope.Quote(relationship.PolymorphicDBName)), + scope.TableName(), + ) + } + + query.Model(fieldValue).Count(&count) + return count +} + +// saveAssociations save passed values as associations +func (association *Association) saveAssociations(values ...interface{}) *Association { + var ( + scope = association.scope + field = association.field + relationship = field.Relationship + ) + + saveAssociation := func(reflectValue reflect.Value) { + // value has to been pointer + if reflectValue.Kind() != reflect.Ptr { + reflectPtr := reflect.New(reflectValue.Type()) + reflectPtr.Elem().Set(reflectValue) + reflectValue = reflectPtr + } + + // value has to been saved for many2many + if relationship.Kind == "many_to_many" { + if scope.New(reflectValue.Interface()).PrimaryKeyZero() { + association.setErr(scope.NewDB().Save(reflectValue.Interface()).Error) + } + } + + // Assign Fields + var fieldType = field.Field.Type() + var setFieldBackToValue, setSliceFieldBackToValue bool + if reflectValue.Type().AssignableTo(fieldType) { + field.Set(reflectValue) + } else if reflectValue.Type().Elem().AssignableTo(fieldType) { + // if field's type is struct, then need to set value back to argument after save + setFieldBackToValue = true + field.Set(reflectValue.Elem()) + } else if fieldType.Kind() == reflect.Slice { + if reflectValue.Type().AssignableTo(fieldType.Elem()) { + field.Set(reflect.Append(field.Field, reflectValue)) + } else if reflectValue.Type().Elem().AssignableTo(fieldType.Elem()) { + // if field's type is slice of struct, then need to set value back to argument after save + setSliceFieldBackToValue = true + field.Set(reflect.Append(field.Field, reflectValue.Elem())) + } + } + + if relationship.Kind == "many_to_many" { + association.setErr(relationship.JoinTableHandler.Add(relationship.JoinTableHandler, scope.NewDB(), scope.Value, reflectValue.Interface())) + } else { + association.setErr(scope.NewDB().Select(field.Name).Save(scope.Value).Error) + + if setFieldBackToValue { + reflectValue.Elem().Set(field.Field) + } else if setSliceFieldBackToValue { + reflectValue.Elem().Set(field.Field.Index(field.Field.Len() - 1)) + } + } + } + + for _, value := range values { + reflectValue := reflect.ValueOf(value) + indirectReflectValue := reflect.Indirect(reflectValue) + if indirectReflectValue.Kind() == reflect.Struct { + saveAssociation(reflectValue) + } else if indirectReflectValue.Kind() == reflect.Slice { + for i := 0; i < indirectReflectValue.Len(); i++ { + saveAssociation(indirectReflectValue.Index(i)) + } + } else { + association.setErr(errors.New("invalid value type")) + } + } + return association +} + +func (association *Association) setErr(err error) *Association { + if err != nil { + association.Error = err + } + return association +} diff --git a/vendor/github.com/jinzhu/gorm/association_test.go b/vendor/github.com/jinzhu/gorm/association_test.go new file mode 100644 index 0000000..52d2303 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/association_test.go @@ -0,0 +1,842 @@ +package gorm_test + +import ( + "fmt" + "reflect" + "sort" + "testing" + + "github.com/jinzhu/gorm" +) + +func TestBelongsTo(t *testing.T) { + post := Post{ + Title: "post belongs to", + Body: "body belongs to", + Category: Category{Name: "Category 1"}, + MainCategory: Category{Name: "Main Category 1"}, + } + + if err := DB.Save(&post).Error; err != nil { + t.Error("Got errors when save post", err) + } + + if post.Category.ID == 0 || post.MainCategory.ID == 0 { + t.Errorf("Category's primary key should be updated") + } + + if post.CategoryId.Int64 == 0 || post.MainCategoryId == 0 { + t.Errorf("post's foreign key should be updated") + } + + // Query + var category1 Category + DB.Model(&post).Association("Category").Find(&category1) + if category1.Name != "Category 1" { + t.Errorf("Query belongs to relations with Association") + } + + var mainCategory1 Category + DB.Model(&post).Association("MainCategory").Find(&mainCategory1) + if mainCategory1.Name != "Main Category 1" { + t.Errorf("Query belongs to relations with Association") + } + + var category11 Category + DB.Model(&post).Related(&category11) + if category11.Name != "Category 1" { + t.Errorf("Query belongs to relations with Related") + } + + if DB.Model(&post).Association("Category").Count() != 1 { + t.Errorf("Post's category count should be 1") + } + + if DB.Model(&post).Association("MainCategory").Count() != 1 { + t.Errorf("Post's main category count should be 1") + } + + // Append + var category2 = Category{ + Name: "Category 2", + } + DB.Model(&post).Association("Category").Append(&category2) + + if category2.ID == 0 { + t.Errorf("Category should has ID when created with Append") + } + + var category21 Category + DB.Model(&post).Related(&category21) + + if category21.Name != "Category 2" { + t.Errorf("Category should be updated with Append") + } + + if DB.Model(&post).Association("Category").Count() != 1 { + t.Errorf("Post's category count should be 1") + } + + // Replace + var category3 = Category{ + Name: "Category 3", + } + DB.Model(&post).Association("Category").Replace(&category3) + + if category3.ID == 0 { + t.Errorf("Category should has ID when created with Replace") + } + + var category31 Category + DB.Model(&post).Related(&category31) + if category31.Name != "Category 3" { + t.Errorf("Category should be updated with Replace") + } + + if DB.Model(&post).Association("Category").Count() != 1 { + t.Errorf("Post's category count should be 1") + } + + // Delete + DB.Model(&post).Association("Category").Delete(&category2) + if DB.Model(&post).Related(&Category{}).RecordNotFound() { + t.Errorf("Should not delete any category when Delete a unrelated Category") + } + + if post.Category.Name == "" { + t.Errorf("Post's category should not be reseted when Delete a unrelated Category") + } + + DB.Model(&post).Association("Category").Delete(&category3) + + if post.Category.Name != "" { + t.Errorf("Post's category should be reseted after Delete") + } + + var category41 Category + DB.Model(&post).Related(&category41) + if category41.Name != "" { + t.Errorf("Category should be deleted with Delete") + } + + if count := DB.Model(&post).Association("Category").Count(); count != 0 { + t.Errorf("Post's category count should be 0 after Delete, but got %v", count) + } + + // Clear + DB.Model(&post).Association("Category").Append(&Category{ + Name: "Category 2", + }) + + if DB.Model(&post).Related(&Category{}).RecordNotFound() { + t.Errorf("Should find category after append") + } + + if post.Category.Name == "" { + t.Errorf("Post's category should has value after Append") + } + + DB.Model(&post).Association("Category").Clear() + + if post.Category.Name != "" { + t.Errorf("Post's category should be cleared after Clear") + } + + if !DB.Model(&post).Related(&Category{}).RecordNotFound() { + t.Errorf("Should not find any category after Clear") + } + + if count := DB.Model(&post).Association("Category").Count(); count != 0 { + t.Errorf("Post's category count should be 0 after Clear, but got %v", count) + } + + // Check Association mode with soft delete + category6 := Category{ + Name: "Category 6", + } + DB.Model(&post).Association("Category").Append(&category6) + + if count := DB.Model(&post).Association("Category").Count(); count != 1 { + t.Errorf("Post's category count should be 1 after Append, but got %v", count) + } + + DB.Delete(&category6) + + if count := DB.Model(&post).Association("Category").Count(); count != 0 { + t.Errorf("Post's category count should be 0 after the category has been deleted, but got %v", count) + } + + if err := DB.Model(&post).Association("Category").Find(&Category{}).Error; err == nil { + t.Errorf("Post's category is not findable after Delete") + } + + if count := DB.Unscoped().Model(&post).Association("Category").Count(); count != 1 { + t.Errorf("Post's category count should be 1 when query with Unscoped, but got %v", count) + } + + if err := DB.Unscoped().Model(&post).Association("Category").Find(&Category{}).Error; err != nil { + t.Errorf("Post's category should be findable when query with Unscoped, got %v", err) + } +} + +func TestBelongsToOverrideForeignKey1(t *testing.T) { + type Profile struct { + gorm.Model + Name string + } + + type User struct { + gorm.Model + Profile Profile `gorm:"ForeignKey:ProfileRefer"` + ProfileRefer int + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "belongs_to" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"ProfileRefer"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestBelongsToOverrideForeignKey2(t *testing.T) { + type Profile struct { + gorm.Model + Refer string + Name string + } + + type User struct { + gorm.Model + Profile Profile `gorm:"ForeignKey:ProfileID;AssociationForeignKey:Refer"` + ProfileID int + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "belongs_to" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"ProfileID"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestHasOne(t *testing.T) { + user := User{ + Name: "has one", + CreditCard: CreditCard{Number: "411111111111"}, + } + + if err := DB.Save(&user).Error; err != nil { + t.Error("Got errors when save user", err.Error()) + } + + if user.CreditCard.UserId.Int64 == 0 { + t.Errorf("CreditCard's foreign key should be updated") + } + + // Query + var creditCard1 CreditCard + DB.Model(&user).Related(&creditCard1) + + if creditCard1.Number != "411111111111" { + t.Errorf("Query has one relations with Related") + } + + var creditCard11 CreditCard + DB.Model(&user).Association("CreditCard").Find(&creditCard11) + + if creditCard11.Number != "411111111111" { + t.Errorf("Query has one relations with Related") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + // Append + var creditcard2 = CreditCard{ + Number: "411111111112", + } + DB.Model(&user).Association("CreditCard").Append(&creditcard2) + + if creditcard2.ID == 0 { + t.Errorf("Creditcard should has ID when created with Append") + } + + var creditcard21 CreditCard + DB.Model(&user).Related(&creditcard21) + if creditcard21.Number != "411111111112" { + t.Errorf("CreditCard should be updated with Append") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + // Replace + var creditcard3 = CreditCard{ + Number: "411111111113", + } + DB.Model(&user).Association("CreditCard").Replace(&creditcard3) + + if creditcard3.ID == 0 { + t.Errorf("Creditcard should has ID when created with Replace") + } + + var creditcard31 CreditCard + DB.Model(&user).Related(&creditcard31) + if creditcard31.Number != "411111111113" { + t.Errorf("CreditCard should be updated with Replace") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + // Delete + DB.Model(&user).Association("CreditCard").Delete(&creditcard2) + var creditcard4 CreditCard + DB.Model(&user).Related(&creditcard4) + if creditcard4.Number != "411111111113" { + t.Errorf("Should not delete credit card when Delete a unrelated CreditCard") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + DB.Model(&user).Association("CreditCard").Delete(&creditcard3) + if !DB.Model(&user).Related(&CreditCard{}).RecordNotFound() { + t.Errorf("Should delete credit card with Delete") + } + + if DB.Model(&user).Association("CreditCard").Count() != 0 { + t.Errorf("User's credit card count should be 0 after Delete") + } + + // Clear + var creditcard5 = CreditCard{ + Number: "411111111115", + } + DB.Model(&user).Association("CreditCard").Append(&creditcard5) + + if DB.Model(&user).Related(&CreditCard{}).RecordNotFound() { + t.Errorf("Should added credit card with Append") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + DB.Model(&user).Association("CreditCard").Clear() + if !DB.Model(&user).Related(&CreditCard{}).RecordNotFound() { + t.Errorf("Credit card should be deleted with Clear") + } + + if DB.Model(&user).Association("CreditCard").Count() != 0 { + t.Errorf("User's credit card count should be 0 after Clear") + } + + // Check Association mode with soft delete + var creditcard6 = CreditCard{ + Number: "411111111116", + } + DB.Model(&user).Association("CreditCard").Append(&creditcard6) + + if count := DB.Model(&user).Association("CreditCard").Count(); count != 1 { + t.Errorf("User's credit card count should be 1 after Append, but got %v", count) + } + + DB.Delete(&creditcard6) + + if count := DB.Model(&user).Association("CreditCard").Count(); count != 0 { + t.Errorf("User's credit card count should be 0 after credit card deleted, but got %v", count) + } + + if err := DB.Model(&user).Association("CreditCard").Find(&CreditCard{}).Error; err == nil { + t.Errorf("User's creditcard is not findable after Delete") + } + + if count := DB.Unscoped().Model(&user).Association("CreditCard").Count(); count != 1 { + t.Errorf("User's credit card count should be 1 when query with Unscoped, but got %v", count) + } + + if err := DB.Unscoped().Model(&user).Association("CreditCard").Find(&CreditCard{}).Error; err != nil { + t.Errorf("User's creditcard should be findable when query with Unscoped, got %v", err) + } +} + +func TestHasOneOverrideForeignKey1(t *testing.T) { + type Profile struct { + gorm.Model + Name string + UserRefer uint + } + + type User struct { + gorm.Model + Profile Profile `gorm:"ForeignKey:UserRefer"` + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "has_one" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserRefer"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestHasOneOverrideForeignKey2(t *testing.T) { + type Profile struct { + gorm.Model + Name string + UserID uint + } + + type User struct { + gorm.Model + Refer string + Profile Profile `gorm:"ForeignKey:UserID;AssociationForeignKey:Refer"` + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "has_one" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserID"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestHasMany(t *testing.T) { + post := Post{ + Title: "post has many", + Body: "body has many", + Comments: []*Comment{{Content: "Comment 1"}, {Content: "Comment 2"}}, + } + + if err := DB.Save(&post).Error; err != nil { + t.Error("Got errors when save post", err) + } + + for _, comment := range post.Comments { + if comment.PostId == 0 { + t.Errorf("comment's PostID should be updated") + } + } + + var compareComments = func(comments []Comment, contents []string) bool { + var commentContents []string + for _, comment := range comments { + commentContents = append(commentContents, comment.Content) + } + sort.Strings(commentContents) + sort.Strings(contents) + return reflect.DeepEqual(commentContents, contents) + } + + // Query + if DB.First(&Comment{}, "content = ?", "Comment 1").Error != nil { + t.Errorf("Comment 1 should be saved") + } + + var comments1 []Comment + DB.Model(&post).Association("Comments").Find(&comments1) + if !compareComments(comments1, []string{"Comment 1", "Comment 2"}) { + t.Errorf("Query has many relations with Association") + } + + var comments11 []Comment + DB.Model(&post).Related(&comments11) + if !compareComments(comments11, []string{"Comment 1", "Comment 2"}) { + t.Errorf("Query has many relations with Related") + } + + if DB.Model(&post).Association("Comments").Count() != 2 { + t.Errorf("Post's comments count should be 2") + } + + // Append + DB.Model(&post).Association("Comments").Append(&Comment{Content: "Comment 3"}) + + var comments2 []Comment + DB.Model(&post).Related(&comments2) + if !compareComments(comments2, []string{"Comment 1", "Comment 2", "Comment 3"}) { + t.Errorf("Append new record to has many relations") + } + + if DB.Model(&post).Association("Comments").Count() != 3 { + t.Errorf("Post's comments count should be 3 after Append") + } + + // Delete + DB.Model(&post).Association("Comments").Delete(comments11) + + var comments3 []Comment + DB.Model(&post).Related(&comments3) + if !compareComments(comments3, []string{"Comment 3"}) { + t.Errorf("Delete an existing resource for has many relations") + } + + if DB.Model(&post).Association("Comments").Count() != 1 { + t.Errorf("Post's comments count should be 1 after Delete 2") + } + + // Replace + DB.Model(&Post{Id: 999}).Association("Comments").Replace() + + var comments4 []Comment + DB.Model(&post).Related(&comments4) + if len(comments4) == 0 { + t.Errorf("Replace for other resource should not clear all comments") + } + + DB.Model(&post).Association("Comments").Replace(&Comment{Content: "Comment 4"}, &Comment{Content: "Comment 5"}) + + var comments41 []Comment + DB.Model(&post).Related(&comments41) + if !compareComments(comments41, []string{"Comment 4", "Comment 5"}) { + t.Errorf("Replace has many relations") + } + + // Clear + DB.Model(&Post{Id: 999}).Association("Comments").Clear() + + var comments5 []Comment + DB.Model(&post).Related(&comments5) + if len(comments5) == 0 { + t.Errorf("Clear should not clear all comments") + } + + DB.Model(&post).Association("Comments").Clear() + + var comments51 []Comment + DB.Model(&post).Related(&comments51) + if len(comments51) != 0 { + t.Errorf("Clear has many relations") + } + + // Check Association mode with soft delete + var comment6 = Comment{ + Content: "comment 6", + } + DB.Model(&post).Association("Comments").Append(&comment6) + + if count := DB.Model(&post).Association("Comments").Count(); count != 1 { + t.Errorf("post's comments count should be 1 after Append, but got %v", count) + } + + DB.Delete(&comment6) + + if count := DB.Model(&post).Association("Comments").Count(); count != 0 { + t.Errorf("post's comments count should be 0 after comment been deleted, but got %v", count) + } + + var comments6 []Comment + if DB.Model(&post).Association("Comments").Find(&comments6); len(comments6) != 0 { + t.Errorf("post's comments count should be 0 when find with Find, but got %v", len(comments6)) + } + + if count := DB.Unscoped().Model(&post).Association("Comments").Count(); count != 1 { + t.Errorf("post's comments count should be 1 when query with Unscoped, but got %v", count) + } + + var comments61 []Comment + if DB.Unscoped().Model(&post).Association("Comments").Find(&comments61); len(comments61) != 1 { + t.Errorf("post's comments count should be 1 when query with Unscoped, but got %v", len(comments61)) + } +} + +func TestHasManyOverrideForeignKey1(t *testing.T) { + type Profile struct { + gorm.Model + Name string + UserRefer uint + } + + type User struct { + gorm.Model + Profile []Profile `gorm:"ForeignKey:UserRefer"` + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "has_many" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserRefer"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestHasManyOverrideForeignKey2(t *testing.T) { + type Profile struct { + gorm.Model + Name string + UserID uint + } + + type User struct { + gorm.Model + Refer string + Profile []Profile `gorm:"ForeignKey:UserID;AssociationForeignKey:Refer"` + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "has_many" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserID"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestManyToMany(t *testing.T) { + DB.Raw("delete from languages") + var languages = []Language{{Name: "ZH"}, {Name: "EN"}} + user := User{Name: "Many2Many", Languages: languages} + DB.Save(&user) + + // Query + var newLanguages []Language + DB.Model(&user).Related(&newLanguages, "Languages") + if len(newLanguages) != len([]string{"ZH", "EN"}) { + t.Errorf("Query many to many relations") + } + + DB.Model(&user).Association("Languages").Find(&newLanguages) + if len(newLanguages) != len([]string{"ZH", "EN"}) { + t.Errorf("Should be able to find many to many relations") + } + + if DB.Model(&user).Association("Languages").Count() != len([]string{"ZH", "EN"}) { + t.Errorf("Count should return correct result") + } + + // Append + DB.Model(&user).Association("Languages").Append(&Language{Name: "DE"}) + if DB.Where("name = ?", "DE").First(&Language{}).RecordNotFound() { + t.Errorf("New record should be saved when append") + } + + languageA := Language{Name: "AA"} + DB.Save(&languageA) + DB.Model(&User{Id: user.Id}).Association("Languages").Append(&languageA) + + languageC := Language{Name: "CC"} + DB.Save(&languageC) + DB.Model(&user).Association("Languages").Append(&[]Language{{Name: "BB"}, languageC}) + + DB.Model(&User{Id: user.Id}).Association("Languages").Append(&[]Language{{Name: "DD"}, {Name: "EE"}}) + + totalLanguages := []string{"ZH", "EN", "DE", "AA", "BB", "CC", "DD", "EE"} + + if DB.Model(&user).Association("Languages").Count() != len(totalLanguages) { + t.Errorf("All appended languages should be saved") + } + + // Delete + user.Languages = []Language{} + DB.Model(&user).Association("Languages").Find(&user.Languages) + + var language Language + DB.Where("name = ?", "EE").First(&language) + DB.Model(&user).Association("Languages").Delete(language, &language) + + if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-1 || len(user.Languages) != len(totalLanguages)-1 { + t.Errorf("Relations should be deleted with Delete") + } + if DB.Where("name = ?", "EE").First(&Language{}).RecordNotFound() { + t.Errorf("Language EE should not be deleted") + } + + DB.Where("name IN (?)", []string{"CC", "DD"}).Find(&languages) + + user2 := User{Name: "Many2Many_User2", Languages: languages} + DB.Save(&user2) + + DB.Model(&user).Association("Languages").Delete(languages, &languages) + if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-3 || len(user.Languages) != len(totalLanguages)-3 { + t.Errorf("Relations should be deleted with Delete") + } + + if DB.Model(&user2).Association("Languages").Count() == 0 { + t.Errorf("Other user's relations should not be deleted") + } + + // Replace + var languageB Language + DB.Where("name = ?", "BB").First(&languageB) + DB.Model(&user).Association("Languages").Replace(languageB) + if len(user.Languages) != 1 || DB.Model(&user).Association("Languages").Count() != 1 { + t.Errorf("Relations should be replaced") + } + + DB.Model(&user).Association("Languages").Replace() + if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 { + t.Errorf("Relations should be replaced with empty") + } + + DB.Model(&user).Association("Languages").Replace(&[]Language{{Name: "FF"}, {Name: "JJ"}}) + if len(user.Languages) != 2 || DB.Model(&user).Association("Languages").Count() != len([]string{"FF", "JJ"}) { + t.Errorf("Relations should be replaced") + } + + // Clear + DB.Model(&user).Association("Languages").Clear() + if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 { + t.Errorf("Relations should be cleared") + } + + // Check Association mode with soft delete + var language6 = Language{ + Name: "language 6", + } + DB.Model(&user).Association("Languages").Append(&language6) + + if count := DB.Model(&user).Association("Languages").Count(); count != 1 { + t.Errorf("user's languages count should be 1 after Append, but got %v", count) + } + + DB.Delete(&language6) + + if count := DB.Model(&user).Association("Languages").Count(); count != 0 { + t.Errorf("user's languages count should be 0 after language been deleted, but got %v", count) + } + + var languages6 []Language + if DB.Model(&user).Association("Languages").Find(&languages6); len(languages6) != 0 { + t.Errorf("user's languages count should be 0 when find with Find, but got %v", len(languages6)) + } + + if count := DB.Unscoped().Model(&user).Association("Languages").Count(); count != 1 { + t.Errorf("user's languages count should be 1 when query with Unscoped, but got %v", count) + } + + var languages61 []Language + if DB.Unscoped().Model(&user).Association("Languages").Find(&languages61); len(languages61) != 1 { + t.Errorf("user's languages count should be 1 when query with Unscoped, but got %v", len(languages61)) + } +} + +func TestRelated(t *testing.T) { + user := User{ + Name: "jinzhu", + BillingAddress: Address{Address1: "Billing Address - Address 1"}, + ShippingAddress: Address{Address1: "Shipping Address - Address 1"}, + Emails: []Email{{Email: "jinzhu@example.com"}, {Email: "jinzhu-2@example@example.com"}}, + CreditCard: CreditCard{Number: "1234567890"}, + Company: Company{Name: "company1"}, + } + + if err := DB.Save(&user).Error; err != nil { + t.Errorf("No error should happen when saving user") + } + + if user.CreditCard.ID == 0 { + t.Errorf("After user save, credit card should have id") + } + + if user.BillingAddress.ID == 0 { + t.Errorf("After user save, billing address should have id") + } + + if user.Emails[0].Id == 0 { + t.Errorf("After user save, billing address should have id") + } + + var emails []Email + DB.Model(&user).Related(&emails) + if len(emails) != 2 { + t.Errorf("Should have two emails") + } + + var emails2 []Email + DB.Model(&user).Where("email = ?", "jinzhu@example.com").Related(&emails2) + if len(emails2) != 1 { + t.Errorf("Should have two emails") + } + + var emails3 []*Email + DB.Model(&user).Related(&emails3) + if len(emails3) != 2 { + t.Errorf("Should have two emails") + } + + var user1 User + DB.Model(&user).Related(&user1.Emails) + if len(user1.Emails) != 2 { + t.Errorf("Should have only one email match related condition") + } + + var address1 Address + DB.Model(&user).Related(&address1, "BillingAddressId") + if address1.Address1 != "Billing Address - Address 1" { + t.Errorf("Should get billing address from user correctly") + } + + user1 = User{} + DB.Model(&address1).Related(&user1, "BillingAddressId") + if DB.NewRecord(user1) { + t.Errorf("Should get user from address correctly") + } + + var user2 User + DB.Model(&emails[0]).Related(&user2) + if user2.Id != user.Id || user2.Name != user.Name { + t.Errorf("Should get user from email correctly") + } + + var creditcard CreditCard + var user3 User + DB.First(&creditcard, "number = ?", "1234567890") + DB.Model(&creditcard).Related(&user3) + if user3.Id != user.Id || user3.Name != user.Name { + t.Errorf("Should get user from credit card correctly") + } + + if !DB.Model(&CreditCard{}).Related(&User{}).RecordNotFound() { + t.Errorf("RecordNotFound for Related") + } + + var company Company + if DB.Model(&user).Related(&company, "Company").RecordNotFound() || company.Name != "company1" { + t.Errorf("RecordNotFound for Related") + } +} + +func TestForeignKey(t *testing.T) { + for _, structField := range DB.NewScope(&User{}).GetStructFields() { + for _, foreignKey := range []string{"BillingAddressID", "ShippingAddressId", "CompanyID"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } + + for _, structField := range DB.NewScope(&Email{}).GetStructFields() { + for _, foreignKey := range []string{"UserId"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } + + for _, structField := range DB.NewScope(&Post{}).GetStructFields() { + for _, foreignKey := range []string{"CategoryId", "MainCategoryId"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } + + for _, structField := range DB.NewScope(&Comment{}).GetStructFields() { + for _, foreignKey := range []string{"PostId"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback.go b/vendor/github.com/jinzhu/gorm/callback.go new file mode 100644 index 0000000..93198a7 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback.go @@ -0,0 +1,237 @@ +package gorm + +import ( + "fmt" +) + +// DefaultCallback default callbacks defined by gorm +var DefaultCallback = &Callback{} + +// Callback is a struct that contains all CURD callbacks +// Field `creates` contains callbacks will be call when creating object +// Field `updates` contains callbacks will be call when updating object +// Field `deletes` contains callbacks will be call when deleting object +// Field `queries` contains callbacks will be call when querying object with query methods like Find, First, Related, Association... +// Field `rowQueries` contains callbacks will be call when querying object with Row, Rows... +// Field `processors` contains all callback processors, will be used to generate above callbacks in order +type Callback struct { + creates []*func(scope *Scope) + updates []*func(scope *Scope) + deletes []*func(scope *Scope) + queries []*func(scope *Scope) + rowQueries []*func(scope *Scope) + processors []*CallbackProcessor +} + +// CallbackProcessor contains callback informations +type CallbackProcessor struct { + name string // current callback's name + before string // register current callback before a callback + after string // register current callback after a callback + replace bool // replace callbacks with same name + remove bool // delete callbacks with same name + kind string // callback type: create, update, delete, query, row_query + processor *func(scope *Scope) // callback handler + parent *Callback +} + +func (c *Callback) clone() *Callback { + return &Callback{ + creates: c.creates, + updates: c.updates, + deletes: c.deletes, + queries: c.queries, + rowQueries: c.rowQueries, + processors: c.processors, + } +} + +// Create could be used to register callbacks for creating object +// db.Callback().Create().After("gorm:create").Register("plugin:run_after_create", func(*Scope) { +// // business logic +// ... +// +// // set error if some thing wrong happened, will rollback the creating +// scope.Err(errors.New("error")) +// }) +func (c *Callback) Create() *CallbackProcessor { + return &CallbackProcessor{kind: "create", parent: c} +} + +// Update could be used to register callbacks for updating object, refer `Create` for usage +func (c *Callback) Update() *CallbackProcessor { + return &CallbackProcessor{kind: "update", parent: c} +} + +// Delete could be used to register callbacks for deleting object, refer `Create` for usage +func (c *Callback) Delete() *CallbackProcessor { + return &CallbackProcessor{kind: "delete", parent: c} +} + +// Query could be used to register callbacks for querying objects with query methods like `Find`, `First`, `Related`, `Association`... +// Refer `Create` for usage +func (c *Callback) Query() *CallbackProcessor { + return &CallbackProcessor{kind: "query", parent: c} +} + +// RowQuery could be used to register callbacks for querying objects with `Row`, `Rows`, refer `Create` for usage +func (c *Callback) RowQuery() *CallbackProcessor { + return &CallbackProcessor{kind: "row_query", parent: c} +} + +// After insert a new callback after callback `callbackName`, refer `Callbacks.Create` +func (cp *CallbackProcessor) After(callbackName string) *CallbackProcessor { + cp.after = callbackName + return cp +} + +// Before insert a new callback before callback `callbackName`, refer `Callbacks.Create` +func (cp *CallbackProcessor) Before(callbackName string) *CallbackProcessor { + cp.before = callbackName + return cp +} + +// Register a new callback, refer `Callbacks.Create` +func (cp *CallbackProcessor) Register(callbackName string, callback func(scope *Scope)) { + cp.name = callbackName + cp.processor = &callback + cp.parent.processors = append(cp.parent.processors, cp) + cp.parent.reorder() +} + +// Remove a registered callback +// db.Callback().Create().Remove("gorm:update_time_stamp_when_create") +func (cp *CallbackProcessor) Remove(callbackName string) { + fmt.Printf("[info] removing callback `%v` from %v\n", callbackName, fileWithLineNum()) + cp.name = callbackName + cp.remove = true + cp.parent.processors = append(cp.parent.processors, cp) + cp.parent.reorder() +} + +// Replace a registered callback with new callback +// db.Callback().Create().Replace("gorm:update_time_stamp_when_create", func(*Scope) { +// scope.SetColumn("Created", now) +// scope.SetColumn("Updated", now) +// }) +func (cp *CallbackProcessor) Replace(callbackName string, callback func(scope *Scope)) { + fmt.Printf("[info] replacing callback `%v` from %v\n", callbackName, fileWithLineNum()) + cp.name = callbackName + cp.processor = &callback + cp.replace = true + cp.parent.processors = append(cp.parent.processors, cp) + cp.parent.reorder() +} + +// Get registered callback +// db.Callback().Create().Get("gorm:create") +func (cp *CallbackProcessor) Get(callbackName string) (callback func(scope *Scope)) { + for _, p := range cp.parent.processors { + if p.name == callbackName && p.kind == cp.kind && !cp.remove { + return *p.processor + } + } + return nil +} + +// getRIndex get right index from string slice +func getRIndex(strs []string, str string) int { + for i := len(strs) - 1; i >= 0; i-- { + if strs[i] == str { + return i + } + } + return -1 +} + +// sortProcessors sort callback processors based on its before, after, remove, replace +func sortProcessors(cps []*CallbackProcessor) []*func(scope *Scope) { + var ( + allNames, sortedNames []string + sortCallbackProcessor func(c *CallbackProcessor) + ) + + for _, cp := range cps { + // show warning message the callback name already exists + if index := getRIndex(allNames, cp.name); index > -1 && !cp.replace && !cp.remove { + fmt.Printf("[warning] duplicated callback `%v` from %v\n", cp.name, fileWithLineNum()) + } + allNames = append(allNames, cp.name) + } + + sortCallbackProcessor = func(c *CallbackProcessor) { + if getRIndex(sortedNames, c.name) == -1 { // if not sorted + if c.before != "" { // if defined before callback + if index := getRIndex(sortedNames, c.before); index != -1 { + // if before callback already sorted, append current callback just after it + sortedNames = append(sortedNames[:index], append([]string{c.name}, sortedNames[index:]...)...) + } else if index := getRIndex(allNames, c.before); index != -1 { + // if before callback exists but haven't sorted, append current callback to last + sortedNames = append(sortedNames, c.name) + sortCallbackProcessor(cps[index]) + } + } + + if c.after != "" { // if defined after callback + if index := getRIndex(sortedNames, c.after); index != -1 { + // if after callback already sorted, append current callback just before it + sortedNames = append(sortedNames[:index+1], append([]string{c.name}, sortedNames[index+1:]...)...) + } else if index := getRIndex(allNames, c.after); index != -1 { + // if after callback exists but haven't sorted + cp := cps[index] + // set after callback's before callback to current callback + if cp.before == "" { + cp.before = c.name + } + sortCallbackProcessor(cp) + } + } + + // if current callback haven't been sorted, append it to last + if getRIndex(sortedNames, c.name) == -1 { + sortedNames = append(sortedNames, c.name) + } + } + } + + for _, cp := range cps { + sortCallbackProcessor(cp) + } + + var sortedFuncs []*func(scope *Scope) + for _, name := range sortedNames { + if index := getRIndex(allNames, name); !cps[index].remove { + sortedFuncs = append(sortedFuncs, cps[index].processor) + } + } + + return sortedFuncs +} + +// reorder all registered processors, and reset CURD callbacks +func (c *Callback) reorder() { + var creates, updates, deletes, queries, rowQueries []*CallbackProcessor + + for _, processor := range c.processors { + if processor.name != "" { + switch processor.kind { + case "create": + creates = append(creates, processor) + case "update": + updates = append(updates, processor) + case "delete": + deletes = append(deletes, processor) + case "query": + queries = append(queries, processor) + case "row_query": + rowQueries = append(rowQueries, processor) + } + } + } + + c.creates = sortProcessors(creates) + c.updates = sortProcessors(updates) + c.deletes = sortProcessors(deletes) + c.queries = sortProcessors(queries) + c.rowQueries = sortProcessors(rowQueries) +} diff --git a/vendor/github.com/jinzhu/gorm/callback_create.go b/vendor/github.com/jinzhu/gorm/callback_create.go new file mode 100644 index 0000000..e3cd2f0 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_create.go @@ -0,0 +1,144 @@ +package gorm + +import ( + "fmt" + "strings" +) + +// Define callbacks for creating +func init() { + DefaultCallback.Create().Register("gorm:begin_transaction", beginTransactionCallback) + DefaultCallback.Create().Register("gorm:before_create", beforeCreateCallback) + DefaultCallback.Create().Register("gorm:save_before_associations", saveBeforeAssociationsCallback) + DefaultCallback.Create().Register("gorm:update_time_stamp", updateTimeStampForCreateCallback) + DefaultCallback.Create().Register("gorm:create", createCallback) + DefaultCallback.Create().Register("gorm:force_reload_after_create", forceReloadAfterCreateCallback) + DefaultCallback.Create().Register("gorm:save_after_associations", saveAfterAssociationsCallback) + DefaultCallback.Create().Register("gorm:after_create", afterCreateCallback) + DefaultCallback.Create().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback) +} + +// beforeCreateCallback will invoke `BeforeSave`, `BeforeCreate` method before creating +func beforeCreateCallback(scope *Scope) { + if !scope.HasError() { + scope.CallMethod("BeforeSave") + } + if !scope.HasError() { + scope.CallMethod("BeforeCreate") + } +} + +// updateTimeStampForCreateCallback will set `CreatedAt`, `UpdatedAt` when creating +func updateTimeStampForCreateCallback(scope *Scope) { + if !scope.HasError() { + now := NowFunc() + scope.SetColumn("CreatedAt", now) + scope.SetColumn("UpdatedAt", now) + } +} + +// createCallback the callback used to insert data into database +func createCallback(scope *Scope) { + if !scope.HasError() { + defer scope.trace(NowFunc()) + + var ( + columns, placeholders []string + blankColumnsWithDefaultValue []string + ) + + for _, field := range scope.Fields() { + if scope.changeableField(field) { + if field.IsNormal { + if !field.IsPrimaryKey || !field.IsBlank { + if field.IsBlank && field.HasDefaultValue { + blankColumnsWithDefaultValue = append(blankColumnsWithDefaultValue, field.DBName) + scope.InstanceSet("gorm:blank_columns_with_default_value", blankColumnsWithDefaultValue) + } else { + columns = append(columns, scope.Quote(field.DBName)) + placeholders = append(placeholders, scope.AddToVars(field.Field.Interface())) + } + } + } else if field.Relationship != nil && field.Relationship.Kind == "belongs_to" { + for _, foreignKey := range field.Relationship.ForeignDBNames { + if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) { + columns = append(columns, scope.Quote(foreignField.DBName)) + placeholders = append(placeholders, scope.AddToVars(foreignField.Field.Interface())) + } + } + } + } + } + + var ( + returningColumn = "*" + quotedTableName = scope.QuotedTableName() + primaryField = scope.PrimaryField() + extraOption string + ) + + if str, ok := scope.Get("gorm:insert_option"); ok { + extraOption = fmt.Sprint(str) + } + + if primaryField != nil { + returningColumn = scope.Quote(primaryField.DBName) + } + + lastInsertIDReturningSuffix := scope.Dialect().LastInsertIDReturningSuffix(quotedTableName, returningColumn) + + if len(columns) == 0 { + scope.Raw(fmt.Sprintf( + "INSERT INTO %v DEFAULT VALUES%v%v", + quotedTableName, + addExtraSpaceIfExist(extraOption), + addExtraSpaceIfExist(lastInsertIDReturningSuffix), + )) + } else { + scope.Raw(fmt.Sprintf( + "INSERT INTO %v (%v) VALUES (%v)%v%v", + scope.QuotedTableName(), + strings.Join(columns, ","), + strings.Join(placeholders, ","), + addExtraSpaceIfExist(extraOption), + addExtraSpaceIfExist(lastInsertIDReturningSuffix), + )) + } + + // execute create sql + if lastInsertIDReturningSuffix == "" || primaryField == nil { + if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil { + // set rows affected count + scope.db.RowsAffected, _ = result.RowsAffected() + + // set primary value to primary field + if primaryField != nil && primaryField.IsBlank { + if primaryValue, err := result.LastInsertId(); scope.Err(err) == nil { + scope.Err(primaryField.Set(primaryValue)) + } + } + } + } else { + if err := scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...).Scan(primaryField.Field.Addr().Interface()); scope.Err(err) == nil { + scope.db.RowsAffected = 1 + } + } + } +} + +// forceReloadAfterCreateCallback will reload columns that having default value, and set it back to current object +func forceReloadAfterCreateCallback(scope *Scope) { + if blankColumnsWithDefaultValue, ok := scope.InstanceGet("gorm:blank_columns_with_default_value"); ok { + scope.DB().New().Select(blankColumnsWithDefaultValue.([]string)).First(scope.Value) + } +} + +// afterCreateCallback will invoke `AfterCreate`, `AfterSave` method after creating +func afterCreateCallback(scope *Scope) { + if !scope.HasError() { + scope.CallMethod("AfterCreate") + } + if !scope.HasError() { + scope.CallMethod("AfterSave") + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_delete.go b/vendor/github.com/jinzhu/gorm/callback_delete.go new file mode 100644 index 0000000..c8ffcc8 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_delete.go @@ -0,0 +1,53 @@ +package gorm + +import "fmt" + +// Define callbacks for deleting +func init() { + DefaultCallback.Delete().Register("gorm:begin_transaction", beginTransactionCallback) + DefaultCallback.Delete().Register("gorm:before_delete", beforeDeleteCallback) + DefaultCallback.Delete().Register("gorm:delete", deleteCallback) + DefaultCallback.Delete().Register("gorm:after_delete", afterDeleteCallback) + DefaultCallback.Delete().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback) +} + +// beforeDeleteCallback will invoke `BeforeDelete` method before deleting +func beforeDeleteCallback(scope *Scope) { + if !scope.HasError() { + scope.CallMethod("BeforeDelete") + } +} + +// deleteCallback used to delete data from database or set deleted_at to current time (when using with soft delete) +func deleteCallback(scope *Scope) { + if !scope.HasError() { + var extraOption string + if str, ok := scope.Get("gorm:delete_option"); ok { + extraOption = fmt.Sprint(str) + } + + if !scope.Search.Unscoped && scope.HasColumn("DeletedAt") { + scope.Raw(fmt.Sprintf( + "UPDATE %v SET deleted_at=%v%v%v", + scope.QuotedTableName(), + scope.AddToVars(NowFunc()), + addExtraSpaceIfExist(scope.CombinedConditionSql()), + addExtraSpaceIfExist(extraOption), + )).Exec() + } else { + scope.Raw(fmt.Sprintf( + "DELETE FROM %v%v%v", + scope.QuotedTableName(), + addExtraSpaceIfExist(scope.CombinedConditionSql()), + addExtraSpaceIfExist(extraOption), + )).Exec() + } + } +} + +// afterDeleteCallback will invoke `AfterDelete` method after deleting +func afterDeleteCallback(scope *Scope) { + if !scope.HasError() { + scope.CallMethod("AfterDelete") + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_query.go b/vendor/github.com/jinzhu/gorm/callback_query.go new file mode 100644 index 0000000..93782b1 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_query.go @@ -0,0 +1,93 @@ +package gorm + +import ( + "errors" + "fmt" + "reflect" +) + +// Define callbacks for querying +func init() { + DefaultCallback.Query().Register("gorm:query", queryCallback) + DefaultCallback.Query().Register("gorm:preload", preloadCallback) + DefaultCallback.Query().Register("gorm:after_query", afterQueryCallback) +} + +// queryCallback used to query data from database +func queryCallback(scope *Scope) { + defer scope.trace(NowFunc()) + + var ( + isSlice, isPtr bool + resultType reflect.Type + results = scope.IndirectValue() + ) + + if orderBy, ok := scope.Get("gorm:order_by_primary_key"); ok { + if primaryField := scope.PrimaryField(); primaryField != nil { + scope.Search.Order(fmt.Sprintf("%v.%v %v", scope.QuotedTableName(), scope.Quote(primaryField.DBName), orderBy)) + } + } + + if value, ok := scope.Get("gorm:query_destination"); ok { + results = reflect.Indirect(reflect.ValueOf(value)) + } + + if kind := results.Kind(); kind == reflect.Slice { + isSlice = true + resultType = results.Type().Elem() + results.Set(reflect.MakeSlice(results.Type(), 0, 0)) + + if resultType.Kind() == reflect.Ptr { + isPtr = true + resultType = resultType.Elem() + } + } else if kind != reflect.Struct { + scope.Err(errors.New("unsupported destination, should be slice or struct")) + return + } + + scope.prepareQuerySQL() + + if !scope.HasError() { + scope.db.RowsAffected = 0 + if str, ok := scope.Get("gorm:query_option"); ok { + scope.SQL += addExtraSpaceIfExist(fmt.Sprint(str)) + } + + if rows, err := scope.SQLDB().Query(scope.SQL, scope.SQLVars...); scope.Err(err) == nil { + defer rows.Close() + + columns, _ := rows.Columns() + for rows.Next() { + scope.db.RowsAffected++ + + elem := results + if isSlice { + elem = reflect.New(resultType).Elem() + } + + scope.scan(rows, columns, scope.New(elem.Addr().Interface()).Fields()) + + if isSlice { + if isPtr { + results.Set(reflect.Append(results, elem.Addr())) + } else { + results.Set(reflect.Append(results, elem)) + } + } + } + + if scope.db.RowsAffected == 0 && !isSlice { + scope.Err(ErrRecordNotFound) + } + } + } +} + +// afterQueryCallback will invoke `AfterFind` method after querying +func afterQueryCallback(scope *Scope) { + if !scope.HasError() { + scope.CallMethod("AfterFind") + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_query_preload.go b/vendor/github.com/jinzhu/gorm/callback_query_preload.go new file mode 100644 index 0000000..5746f53 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_query_preload.go @@ -0,0 +1,310 @@ +package gorm + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +// preloadCallback used to preload associations +func preloadCallback(scope *Scope) { + if scope.Search.preload == nil || scope.HasError() { + return + } + + var ( + preloadedMap = map[string]bool{} + fields = scope.Fields() + ) + + for _, preload := range scope.Search.preload { + var ( + preloadFields = strings.Split(preload.schema, ".") + currentScope = scope + currentFields = fields + ) + + for idx, preloadField := range preloadFields { + var currentPreloadConditions []interface{} + + // if not preloaded + if preloadKey := strings.Join(preloadFields[:idx+1], "."); !preloadedMap[preloadKey] { + + // assign search conditions to last preload + if idx == len(preloadFields)-1 { + currentPreloadConditions = preload.conditions + } + + for _, field := range currentFields { + if field.Name != preloadField || field.Relationship == nil { + continue + } + + switch field.Relationship.Kind { + case "has_one": + currentScope.handleHasOnePreload(field, currentPreloadConditions) + case "has_many": + currentScope.handleHasManyPreload(field, currentPreloadConditions) + case "belongs_to": + currentScope.handleBelongsToPreload(field, currentPreloadConditions) + case "many_to_many": + currentScope.handleManyToManyPreload(field, currentPreloadConditions) + default: + scope.Err(errors.New("unsupported relation")) + } + + preloadedMap[preloadKey] = true + break + } + + if !preloadedMap[preloadKey] { + scope.Err(fmt.Errorf("can't preload field %s for %s", preloadField, currentScope.GetModelStruct().ModelType)) + return + } + } + + // preload next level + if idx < len(preloadFields)-1 { + currentScope = currentScope.getColumnAsScope(preloadField) + currentFields = currentScope.Fields() + } + } + } +} + +func (scope *Scope) generatePreloadDBWithConditions(conditions []interface{}) (*DB, []interface{}) { + var ( + preloadDB = scope.NewDB() + preloadConditions []interface{} + ) + + for _, condition := range conditions { + if scopes, ok := condition.(func(*DB) *DB); ok { + preloadDB = scopes(preloadDB) + } else { + preloadConditions = append(preloadConditions, condition) + } + } + + return preloadDB, preloadConditions +} + +// handleHasOnePreload used to preload has one associations +func (scope *Scope) handleHasOnePreload(field *Field, conditions []interface{}) { + relation := field.Relationship + + // get relations's primary keys + primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value) + if len(primaryKeys) == 0 { + return + } + + // preload conditions + preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions) + + // find relations + results := makeSlice(field.Struct.Type) + scope.Err(preloadDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys)), toQueryValues(primaryKeys)...).Find(results, preloadConditions...).Error) + + // assign find results + var ( + resultsValue = indirect(reflect.ValueOf(results)) + indirectScopeValue = scope.IndirectValue() + ) + + for i := 0; i < resultsValue.Len(); i++ { + result := resultsValue.Index(i) + if indirectScopeValue.Kind() == reflect.Slice { + foreignValues := getValueFromFields(result, relation.ForeignFieldNames) + for j := 0; j < indirectScopeValue.Len(); j++ { + if indirectValue := indirect(indirectScopeValue.Index(j)); equalAsString(getValueFromFields(indirectValue, relation.AssociationForeignFieldNames), foreignValues) { + indirectValue.FieldByName(field.Name).Set(result) + break + } + } + } else { + scope.Err(field.Set(result)) + } + } +} + +// handleHasManyPreload used to preload has many associations +func (scope *Scope) handleHasManyPreload(field *Field, conditions []interface{}) { + relation := field.Relationship + + // get relations's primary keys + primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value) + if len(primaryKeys) == 0 { + return + } + + // preload conditions + preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions) + + // find relations + results := makeSlice(field.Struct.Type) + scope.Err(preloadDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys)), toQueryValues(primaryKeys)...).Find(results, preloadConditions...).Error) + + // assign find results + var ( + resultsValue = indirect(reflect.ValueOf(results)) + indirectScopeValue = scope.IndirectValue() + ) + + if indirectScopeValue.Kind() == reflect.Slice { + for i := 0; i < resultsValue.Len(); i++ { + result := resultsValue.Index(i) + foreignValues := getValueFromFields(result, relation.ForeignFieldNames) + for j := 0; j < indirectScopeValue.Len(); j++ { + object := indirect(indirectScopeValue.Index(j)) + if equalAsString(getValueFromFields(object, relation.AssociationForeignFieldNames), foreignValues) { + objectField := object.FieldByName(field.Name) + objectField.Set(reflect.Append(objectField, result)) + break + } + } + } + } else { + scope.Err(field.Set(resultsValue)) + } +} + +// handleBelongsToPreload used to preload belongs to associations +func (scope *Scope) handleBelongsToPreload(field *Field, conditions []interface{}) { + relation := field.Relationship + + // preload conditions + preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions) + + // get relations's primary keys + primaryKeys := scope.getColumnAsArray(relation.ForeignFieldNames, scope.Value) + if len(primaryKeys) == 0 { + return + } + + // find relations + results := makeSlice(field.Struct.Type) + scope.Err(preloadDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.AssociationForeignDBNames), toQueryMarks(primaryKeys)), toQueryValues(primaryKeys)...).Find(results, preloadConditions...).Error) + + // assign find results + var ( + resultsValue = indirect(reflect.ValueOf(results)) + indirectScopeValue = scope.IndirectValue() + ) + + for i := 0; i < resultsValue.Len(); i++ { + result := resultsValue.Index(i) + if indirectScopeValue.Kind() == reflect.Slice { + value := getValueFromFields(result, relation.AssociationForeignFieldNames) + for j := 0; j < indirectScopeValue.Len(); j++ { + object := indirect(indirectScopeValue.Index(j)) + if equalAsString(getValueFromFields(object, relation.ForeignFieldNames), value) { + object.FieldByName(field.Name).Set(result) + } + } + } else { + scope.Err(field.Set(result)) + } + } +} + +// handleManyToManyPreload used to preload many to many associations +func (scope *Scope) handleManyToManyPreload(field *Field, conditions []interface{}) { + var ( + relation = field.Relationship + joinTableHandler = relation.JoinTableHandler + fieldType = field.Struct.Type.Elem() + foreignKeyValue interface{} + foreignKeyType = reflect.ValueOf(&foreignKeyValue).Type() + linkHash = map[string][]reflect.Value{} + isPtr bool + ) + + if fieldType.Kind() == reflect.Ptr { + isPtr = true + fieldType = fieldType.Elem() + } + + var sourceKeys = []string{} + for _, key := range joinTableHandler.SourceForeignKeys() { + sourceKeys = append(sourceKeys, key.DBName) + } + + // preload conditions + preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions) + + // generate query with join table + newScope := scope.New(reflect.New(fieldType).Interface()) + preloadDB = preloadDB.Table(newScope.TableName()).Select("*") + preloadDB = joinTableHandler.JoinWith(joinTableHandler, preloadDB, scope.Value) + + // preload inline conditions + if len(preloadConditions) > 0 { + preloadDB = preloadDB.Where(preloadConditions[0], preloadConditions[1:]...) + } + + rows, err := preloadDB.Rows() + + if scope.Err(err) != nil { + return + } + defer rows.Close() + + columns, _ := rows.Columns() + for rows.Next() { + var ( + elem = reflect.New(fieldType).Elem() + fields = scope.New(elem.Addr().Interface()).Fields() + ) + + // register foreign keys in join tables + var joinTableFields []*Field + for _, sourceKey := range sourceKeys { + joinTableFields = append(joinTableFields, &Field{StructField: &StructField{DBName: sourceKey, IsNormal: true}, Field: reflect.New(foreignKeyType).Elem()}) + } + + scope.scan(rows, columns, append(fields, joinTableFields...)) + + var foreignKeys = make([]interface{}, len(sourceKeys)) + // generate hashed forkey keys in join table + for idx, joinTableField := range joinTableFields { + if !joinTableField.Field.IsNil() { + foreignKeys[idx] = joinTableField.Field.Elem().Interface() + } + } + hashedSourceKeys := toString(foreignKeys) + + if isPtr { + linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem.Addr()) + } else { + linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem) + } + } + + // assign find results + var ( + indirectScopeValue = scope.IndirectValue() + fieldsSourceMap = map[string]reflect.Value{} + foreignFieldNames = []string{} + ) + + for _, dbName := range relation.ForeignFieldNames { + if field, ok := scope.FieldByName(dbName); ok { + foreignFieldNames = append(foreignFieldNames, field.Name) + } + } + + if indirectScopeValue.Kind() == reflect.Slice { + for j := 0; j < indirectScopeValue.Len(); j++ { + object := indirect(indirectScopeValue.Index(j)) + fieldsSourceMap[toString(getValueFromFields(object, foreignFieldNames))] = object.FieldByName(field.Name) + } + } else if indirectScopeValue.IsValid() { + fieldsSourceMap[toString(getValueFromFields(indirectScopeValue, foreignFieldNames))] = indirectScopeValue.FieldByName(field.Name) + } + + for source, link := range linkHash { + fieldsSourceMap[source].Set(reflect.Append(fieldsSourceMap[source], link...)) + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_save.go b/vendor/github.com/jinzhu/gorm/callback_save.go new file mode 100644 index 0000000..5ffe53b --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_save.go @@ -0,0 +1,92 @@ +package gorm + +import "reflect" + +func beginTransactionCallback(scope *Scope) { + scope.Begin() +} + +func commitOrRollbackTransactionCallback(scope *Scope) { + scope.CommitOrRollback() +} + +func saveBeforeAssociationsCallback(scope *Scope) { + if !scope.shouldSaveAssociations() { + return + } + for _, field := range scope.Fields() { + if scope.changeableField(field) && !field.IsBlank && !field.IsIgnored { + if relationship := field.Relationship; relationship != nil && relationship.Kind == "belongs_to" { + fieldValue := field.Field.Addr().Interface() + scope.Err(scope.NewDB().Save(fieldValue).Error) + if len(relationship.ForeignFieldNames) != 0 { + // set value's foreign key + for idx, fieldName := range relationship.ForeignFieldNames { + associationForeignName := relationship.AssociationForeignDBNames[idx] + if foreignField, ok := scope.New(fieldValue).FieldByName(associationForeignName); ok { + scope.Err(scope.SetColumn(fieldName, foreignField.Field.Interface())) + } + } + } + } + } + } +} + +func saveAfterAssociationsCallback(scope *Scope) { + if !scope.shouldSaveAssociations() { + return + } + for _, field := range scope.Fields() { + if scope.changeableField(field) && !field.IsBlank && !field.IsIgnored { + if relationship := field.Relationship; relationship != nil && + (relationship.Kind == "has_one" || relationship.Kind == "has_many" || relationship.Kind == "many_to_many") { + value := field.Field + + switch value.Kind() { + case reflect.Slice: + for i := 0; i < value.Len(); i++ { + newDB := scope.NewDB() + elem := value.Index(i).Addr().Interface() + newScope := newDB.NewScope(elem) + + if relationship.JoinTableHandler == nil && len(relationship.ForeignFieldNames) != 0 { + for idx, fieldName := range relationship.ForeignFieldNames { + associationForeignName := relationship.AssociationForeignDBNames[idx] + if f, ok := scope.FieldByName(associationForeignName); ok { + scope.Err(newScope.SetColumn(fieldName, f.Field.Interface())) + } + } + } + + if relationship.PolymorphicType != "" { + scope.Err(newScope.SetColumn(relationship.PolymorphicType, scope.TableName())) + } + + scope.Err(newDB.Save(elem).Error) + + if joinTableHandler := relationship.JoinTableHandler; joinTableHandler != nil { + scope.Err(joinTableHandler.Add(joinTableHandler, newDB, scope.Value, newScope.Value)) + } + } + default: + elem := value.Addr().Interface() + newScope := scope.New(elem) + if len(relationship.ForeignFieldNames) != 0 { + for idx, fieldName := range relationship.ForeignFieldNames { + associationForeignName := relationship.AssociationForeignDBNames[idx] + if f, ok := scope.FieldByName(associationForeignName); ok { + scope.Err(newScope.SetColumn(fieldName, f.Field.Interface())) + } + } + } + + if relationship.PolymorphicType != "" { + scope.Err(newScope.SetColumn(relationship.PolymorphicType, scope.TableName())) + } + scope.Err(scope.NewDB().Save(elem).Error) + } + } + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_system_test.go b/vendor/github.com/jinzhu/gorm/callback_system_test.go new file mode 100644 index 0000000..13ca3f4 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_system_test.go @@ -0,0 +1,112 @@ +package gorm + +import ( + "reflect" + "runtime" + "strings" + "testing" +) + +func equalFuncs(funcs []*func(s *Scope), fnames []string) bool { + var names []string + for _, f := range funcs { + fnames := strings.Split(runtime.FuncForPC(reflect.ValueOf(*f).Pointer()).Name(), ".") + names = append(names, fnames[len(fnames)-1]) + } + return reflect.DeepEqual(names, fnames) +} + +func create(s *Scope) {} +func beforeCreate1(s *Scope) {} +func beforeCreate2(s *Scope) {} +func afterCreate1(s *Scope) {} +func afterCreate2(s *Scope) {} + +func TestRegisterCallback(t *testing.T) { + var callback = &Callback{} + + callback.Create().Register("before_create1", beforeCreate1) + callback.Create().Register("before_create2", beforeCreate2) + callback.Create().Register("create", create) + callback.Create().Register("after_create1", afterCreate1) + callback.Create().Register("after_create2", afterCreate2) + + if !equalFuncs(callback.creates, []string{"beforeCreate1", "beforeCreate2", "create", "afterCreate1", "afterCreate2"}) { + t.Errorf("register callback") + } +} + +func TestRegisterCallbackWithOrder(t *testing.T) { + var callback1 = &Callback{} + callback1.Create().Register("before_create1", beforeCreate1) + callback1.Create().Register("create", create) + callback1.Create().Register("after_create1", afterCreate1) + callback1.Create().Before("after_create1").Register("after_create2", afterCreate2) + if !equalFuncs(callback1.creates, []string{"beforeCreate1", "create", "afterCreate2", "afterCreate1"}) { + t.Errorf("register callback with order") + } + + var callback2 = &Callback{} + + callback2.Update().Register("create", create) + callback2.Update().Before("create").Register("before_create1", beforeCreate1) + callback2.Update().After("after_create2").Register("after_create1", afterCreate1) + callback2.Update().Before("before_create1").Register("before_create2", beforeCreate2) + callback2.Update().Register("after_create2", afterCreate2) + + if !equalFuncs(callback2.updates, []string{"beforeCreate2", "beforeCreate1", "create", "afterCreate2", "afterCreate1"}) { + t.Errorf("register callback with order") + } +} + +func TestRegisterCallbackWithComplexOrder(t *testing.T) { + var callback1 = &Callback{} + + callback1.Query().Before("after_create1").After("before_create1").Register("create", create) + callback1.Query().Register("before_create1", beforeCreate1) + callback1.Query().Register("after_create1", afterCreate1) + + if !equalFuncs(callback1.queries, []string{"beforeCreate1", "create", "afterCreate1"}) { + t.Errorf("register callback with order") + } + + var callback2 = &Callback{} + + callback2.Delete().Before("after_create1").After("before_create1").Register("create", create) + callback2.Delete().Before("create").Register("before_create1", beforeCreate1) + callback2.Delete().After("before_create1").Register("before_create2", beforeCreate2) + callback2.Delete().Register("after_create1", afterCreate1) + callback2.Delete().After("after_create1").Register("after_create2", afterCreate2) + + if !equalFuncs(callback2.deletes, []string{"beforeCreate1", "beforeCreate2", "create", "afterCreate1", "afterCreate2"}) { + t.Errorf("register callback with order") + } +} + +func replaceCreate(s *Scope) {} + +func TestReplaceCallback(t *testing.T) { + var callback = &Callback{} + + callback.Create().Before("after_create1").After("before_create1").Register("create", create) + callback.Create().Register("before_create1", beforeCreate1) + callback.Create().Register("after_create1", afterCreate1) + callback.Create().Replace("create", replaceCreate) + + if !equalFuncs(callback.creates, []string{"beforeCreate1", "replaceCreate", "afterCreate1"}) { + t.Errorf("replace callback") + } +} + +func TestRemoveCallback(t *testing.T) { + var callback = &Callback{} + + callback.Create().Before("after_create1").After("before_create1").Register("create", create) + callback.Create().Register("before_create1", beforeCreate1) + callback.Create().Register("after_create1", afterCreate1) + callback.Create().Remove("create") + + if !equalFuncs(callback.creates, []string{"beforeCreate1", "afterCreate1"}) { + t.Errorf("remove callback") + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_update.go b/vendor/github.com/jinzhu/gorm/callback_update.go new file mode 100644 index 0000000..aa27b5f --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_update.go @@ -0,0 +1,104 @@ +package gorm + +import ( + "fmt" + "strings" +) + +// Define callbacks for updating +func init() { + DefaultCallback.Update().Register("gorm:assign_updating_attributes", assignUpdatingAttributesCallback) + DefaultCallback.Update().Register("gorm:begin_transaction", beginTransactionCallback) + DefaultCallback.Update().Register("gorm:before_update", beforeUpdateCallback) + DefaultCallback.Update().Register("gorm:save_before_associations", saveBeforeAssociationsCallback) + DefaultCallback.Update().Register("gorm:update_time_stamp", updateTimeStampForUpdateCallback) + DefaultCallback.Update().Register("gorm:update", updateCallback) + DefaultCallback.Update().Register("gorm:save_after_associations", saveAfterAssociationsCallback) + DefaultCallback.Update().Register("gorm:after_update", afterUpdateCallback) + DefaultCallback.Update().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback) +} + +// assignUpdatingAttributesCallback assign updating attributes to model +func assignUpdatingAttributesCallback(scope *Scope) { + if attrs, ok := scope.InstanceGet("gorm:update_interface"); ok { + if updateMaps, hasUpdate := scope.updatedAttrsWithValues(attrs); hasUpdate { + scope.InstanceSet("gorm:update_attrs", updateMaps) + } else { + scope.SkipLeft() + } + } +} + +// beforeUpdateCallback will invoke `BeforeSave`, `BeforeUpdate` method before updating +func beforeUpdateCallback(scope *Scope) { + if _, ok := scope.Get("gorm:update_column"); !ok { + if !scope.HasError() { + scope.CallMethod("BeforeSave") + } + if !scope.HasError() { + scope.CallMethod("BeforeUpdate") + } + } +} + +// updateTimeStampForUpdateCallback will set `UpdatedAt` when updating +func updateTimeStampForUpdateCallback(scope *Scope) { + if _, ok := scope.Get("gorm:update_column"); !ok { + scope.SetColumn("UpdatedAt", NowFunc()) + } +} + +// updateCallback the callback used to update data to database +func updateCallback(scope *Scope) { + if !scope.HasError() { + var sqls []string + + if updateAttrs, ok := scope.InstanceGet("gorm:update_attrs"); ok { + for column, value := range updateAttrs.(map[string]interface{}) { + sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(column), scope.AddToVars(value))) + } + } else { + for _, field := range scope.Fields() { + if scope.changeableField(field) { + if !field.IsPrimaryKey && field.IsNormal { + sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface()))) + } else if relationship := field.Relationship; relationship != nil && relationship.Kind == "belongs_to" { + for _, foreignKey := range relationship.ForeignDBNames { + if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) { + sqls = append(sqls, + fmt.Sprintf("%v = %v", scope.Quote(foreignField.DBName), scope.AddToVars(foreignField.Field.Interface()))) + } + } + } + } + } + } + + var extraOption string + if str, ok := scope.Get("gorm:update_option"); ok { + extraOption = fmt.Sprint(str) + } + + if len(sqls) > 0 { + scope.Raw(fmt.Sprintf( + "UPDATE %v SET %v%v%v", + scope.QuotedTableName(), + strings.Join(sqls, ", "), + addExtraSpaceIfExist(scope.CombinedConditionSql()), + addExtraSpaceIfExist(extraOption), + )).Exec() + } + } +} + +// afterUpdateCallback will invoke `AfterUpdate`, `AfterSave` method after updating +func afterUpdateCallback(scope *Scope) { + if _, ok := scope.Get("gorm:update_column"); !ok { + if !scope.HasError() { + scope.CallMethod("AfterUpdate") + } + if !scope.HasError() { + scope.CallMethod("AfterSave") + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/callbacks_test.go b/vendor/github.com/jinzhu/gorm/callbacks_test.go new file mode 100644 index 0000000..a58913d --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callbacks_test.go @@ -0,0 +1,177 @@ +package gorm_test + +import ( + "errors" + + "github.com/jinzhu/gorm" + + "reflect" + "testing" +) + +func (s *Product) BeforeCreate() (err error) { + if s.Code == "Invalid" { + err = errors.New("invalid product") + } + s.BeforeCreateCallTimes = s.BeforeCreateCallTimes + 1 + return +} + +func (s *Product) BeforeUpdate() (err error) { + if s.Code == "dont_update" { + err = errors.New("can't update") + } + s.BeforeUpdateCallTimes = s.BeforeUpdateCallTimes + 1 + return +} + +func (s *Product) BeforeSave() (err error) { + if s.Code == "dont_save" { + err = errors.New("can't save") + } + s.BeforeSaveCallTimes = s.BeforeSaveCallTimes + 1 + return +} + +func (s *Product) AfterFind() { + s.AfterFindCallTimes = s.AfterFindCallTimes + 1 +} + +func (s *Product) AfterCreate(tx *gorm.DB) { + tx.Model(s).UpdateColumn(Product{AfterCreateCallTimes: s.AfterCreateCallTimes + 1}) +} + +func (s *Product) AfterUpdate() { + s.AfterUpdateCallTimes = s.AfterUpdateCallTimes + 1 +} + +func (s *Product) AfterSave() (err error) { + if s.Code == "after_save_error" { + err = errors.New("can't save") + } + s.AfterSaveCallTimes = s.AfterSaveCallTimes + 1 + return +} + +func (s *Product) BeforeDelete() (err error) { + if s.Code == "dont_delete" { + err = errors.New("can't delete") + } + s.BeforeDeleteCallTimes = s.BeforeDeleteCallTimes + 1 + return +} + +func (s *Product) AfterDelete() (err error) { + if s.Code == "after_delete_error" { + err = errors.New("can't delete") + } + s.AfterDeleteCallTimes = s.AfterDeleteCallTimes + 1 + return +} + +func (s *Product) GetCallTimes() []int64 { + return []int64{s.BeforeCreateCallTimes, s.BeforeSaveCallTimes, s.BeforeUpdateCallTimes, s.AfterCreateCallTimes, s.AfterSaveCallTimes, s.AfterUpdateCallTimes, s.BeforeDeleteCallTimes, s.AfterDeleteCallTimes, s.AfterFindCallTimes} +} + +func TestRunCallbacks(t *testing.T) { + p := Product{Code: "unique_code", Price: 100} + DB.Save(&p) + + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 1, 0, 0, 0, 0}) { + t.Errorf("Callbacks should be invoked successfully, %v", p.GetCallTimes()) + } + + DB.Where("Code = ?", "unique_code").First(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 0, 0, 0, 0, 1}) { + t.Errorf("After callbacks values are not saved, %v", p.GetCallTimes()) + } + + p.Price = 200 + DB.Save(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 1, 1, 0, 0, 1}) { + t.Errorf("After update callbacks should be invoked successfully, %v", p.GetCallTimes()) + } + + var products []Product + DB.Find(&products, "code = ?", "unique_code") + if products[0].AfterFindCallTimes != 2 { + t.Errorf("AfterFind callbacks should work with slice") + } + + DB.Where("Code = ?", "unique_code").First(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 0, 0, 2}) { + t.Errorf("After update callbacks values are not saved, %v", p.GetCallTimes()) + } + + DB.Delete(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 1, 1, 2}) { + t.Errorf("After delete callbacks should be invoked successfully, %v", p.GetCallTimes()) + } + + if DB.Where("Code = ?", "unique_code").First(&p).Error == nil { + t.Errorf("Can't find a deleted record") + } +} + +func TestCallbacksWithErrors(t *testing.T) { + p := Product{Code: "Invalid", Price: 100} + if DB.Save(&p).Error == nil { + t.Errorf("An error from before create callbacks happened when create with invalid value") + } + + if DB.Where("code = ?", "Invalid").First(&Product{}).Error == nil { + t.Errorf("Should not save record that have errors") + } + + if DB.Save(&Product{Code: "dont_save", Price: 100}).Error == nil { + t.Errorf("An error from after create callbacks happened when create with invalid value") + } + + p2 := Product{Code: "update_callback", Price: 100} + DB.Save(&p2) + + p2.Code = "dont_update" + if DB.Save(&p2).Error == nil { + t.Errorf("An error from before update callbacks happened when update with invalid value") + } + + if DB.Where("code = ?", "update_callback").First(&Product{}).Error != nil { + t.Errorf("Record Should not be updated due to errors happened in before update callback") + } + + if DB.Where("code = ?", "dont_update").First(&Product{}).Error == nil { + t.Errorf("Record Should not be updated due to errors happened in before update callback") + } + + p2.Code = "dont_save" + if DB.Save(&p2).Error == nil { + t.Errorf("An error from before save callbacks happened when update with invalid value") + } + + p3 := Product{Code: "dont_delete", Price: 100} + DB.Save(&p3) + if DB.Delete(&p3).Error == nil { + t.Errorf("An error from before delete callbacks happened when delete") + } + + if DB.Where("Code = ?", "dont_delete").First(&p3).Error != nil { + t.Errorf("An error from before delete callbacks happened") + } + + p4 := Product{Code: "after_save_error", Price: 100} + DB.Save(&p4) + if err := DB.First(&Product{}, "code = ?", "after_save_error").Error; err == nil { + t.Errorf("Record should be reverted if get an error in after save callback") + } + + p5 := Product{Code: "after_delete_error", Price: 100} + DB.Save(&p5) + if err := DB.First(&Product{}, "code = ?", "after_delete_error").Error; err != nil { + t.Errorf("Record should be found") + } + + DB.Delete(&p5) + if err := DB.First(&Product{}, "code = ?", "after_delete_error").Error; err != nil { + t.Errorf("Record shouldn't be deleted because of an error happened in after delete callback") + } +} diff --git a/vendor/github.com/jinzhu/gorm/create_test.go b/vendor/github.com/jinzhu/gorm/create_test.go new file mode 100644 index 0000000..dc82de5 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/create_test.go @@ -0,0 +1,164 @@ +package gorm_test + +import ( + "os" + "reflect" + "testing" + "time" +) + +func TestCreate(t *testing.T) { + float := 35.03554004971999 + user := User{Name: "CreateUser", Age: 18, Birthday: time.Now(), UserNum: Num(111), PasswordHash: []byte{'f', 'a', 'k', '4'}, Latitude: float} + + if !DB.NewRecord(user) || !DB.NewRecord(&user) { + t.Error("User should be new record before create") + } + + if count := DB.Save(&user).RowsAffected; count != 1 { + t.Error("There should be one record be affected when create record") + } + + if DB.NewRecord(user) || DB.NewRecord(&user) { + t.Error("User should not new record after save") + } + + var newUser User + DB.First(&newUser, user.Id) + + if !reflect.DeepEqual(newUser.PasswordHash, []byte{'f', 'a', 'k', '4'}) { + t.Errorf("User's PasswordHash should be saved ([]byte)") + } + + if newUser.Age != 18 { + t.Errorf("User's Age should be saved (int)") + } + + if newUser.UserNum != Num(111) { + t.Errorf("User's UserNum should be saved (custom type)") + } + + if newUser.Latitude != float { + t.Errorf("Float64 should not be changed after save") + } + + if user.CreatedAt.IsZero() { + t.Errorf("Should have created_at after create") + } + + if newUser.CreatedAt.IsZero() { + t.Errorf("Should have created_at after create") + } + + DB.Model(user).Update("name", "create_user_new_name") + DB.First(&user, user.Id) + if user.CreatedAt.Format(time.RFC3339Nano) != newUser.CreatedAt.Format(time.RFC3339Nano) { + t.Errorf("CreatedAt should not be changed after update") + } +} + +func TestCreateWithNoGORMPrimayKey(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect == "mssql" { + t.Skip("Skipping this because MSSQL will return identity only if the table has an Id column") + } + + jt := JoinTable{From: 1, To: 2} + err := DB.Create(&jt).Error + if err != nil { + t.Errorf("No error should happen when create a record without a GORM primary key. But in the database this primary key exists and is the union of 2 or more fields\n But got: %s", err) + } +} + +func TestCreateWithNoStdPrimaryKeyAndDefaultValues(t *testing.T) { + animal := Animal{Name: "Ferdinand"} + if DB.Save(&animal).Error != nil { + t.Errorf("No error should happen when create a record without std primary key") + } + + if animal.Counter == 0 { + t.Errorf("No std primary key should be filled value after create") + } + + if animal.Name != "Ferdinand" { + t.Errorf("Default value should be overrided") + } + + // Test create with default value not overrided + an := Animal{From: "nerdz"} + + if DB.Save(&an).Error != nil { + t.Errorf("No error should happen when create an record without std primary key") + } + + // We must fetch the value again, to have the default fields updated + // (We can't do this in the update statements, since sql default can be expressions + // And be different from the fields' type (eg. a time.Time fields has a default value of "now()" + DB.Model(Animal{}).Where(&Animal{Counter: an.Counter}).First(&an) + + if an.Name != "galeone" { + t.Errorf("Default value should fill the field. But got %v", an.Name) + } +} + +func TestAnonymousScanner(t *testing.T) { + user := User{Name: "anonymous_scanner", Role: Role{Name: "admin"}} + DB.Save(&user) + + var user2 User + DB.First(&user2, "name = ?", "anonymous_scanner") + if user2.Role.Name != "admin" { + t.Errorf("Should be able to get anonymous scanner") + } + + if !user2.IsAdmin() { + t.Errorf("Should be able to get anonymous scanner") + } +} + +func TestAnonymousField(t *testing.T) { + user := User{Name: "anonymous_field", Company: Company{Name: "company"}} + DB.Save(&user) + + var user2 User + DB.First(&user2, "name = ?", "anonymous_field") + DB.Model(&user2).Related(&user2.Company) + if user2.Company.Name != "company" { + t.Errorf("Should be able to get anonymous field") + } +} + +func TestSelectWithCreate(t *testing.T) { + user := getPreparedUser("select_user", "select_with_create") + DB.Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Create(user) + + var queryuser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryuser, user.Id) + + if queryuser.Name != user.Name || queryuser.Age == user.Age { + t.Errorf("Should only create users with name column") + } + + if queryuser.BillingAddressID.Int64 == 0 || queryuser.ShippingAddressId != 0 || + queryuser.CreditCard.ID == 0 || len(queryuser.Emails) == 0 { + t.Errorf("Should only create selected relationships") + } +} + +func TestOmitWithCreate(t *testing.T) { + user := getPreparedUser("omit_user", "omit_with_create") + DB.Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Create(user) + + var queryuser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryuser, user.Id) + + if queryuser.Name == user.Name || queryuser.Age != user.Age { + t.Errorf("Should only create users with age column") + } + + if queryuser.BillingAddressID.Int64 != 0 || queryuser.ShippingAddressId == 0 || + queryuser.CreditCard.ID != 0 || len(queryuser.Emails) != 0 { + t.Errorf("Should not create omited relationships") + } +} diff --git a/vendor/github.com/jinzhu/gorm/customize_column_test.go b/vendor/github.com/jinzhu/gorm/customize_column_test.go new file mode 100644 index 0000000..177b4a5 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/customize_column_test.go @@ -0,0 +1,280 @@ +package gorm_test + +import ( + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +type CustomizeColumn struct { + ID int64 `gorm:"column:mapped_id; primary_key:yes"` + Name string `gorm:"column:mapped_name"` + Date time.Time `gorm:"column:mapped_time"` +} + +// Make sure an ignored field does not interfere with another field's custom +// column name that matches the ignored field. +type CustomColumnAndIgnoredFieldClash struct { + Body string `sql:"-"` + RawBody string `gorm:"column:body"` +} + +func TestCustomizeColumn(t *testing.T) { + col := "mapped_name" + DB.DropTable(&CustomizeColumn{}) + DB.AutoMigrate(&CustomizeColumn{}) + + scope := DB.NewScope(&CustomizeColumn{}) + if !scope.Dialect().HasColumn(scope.TableName(), col) { + t.Errorf("CustomizeColumn should have column %s", col) + } + + col = "mapped_id" + if scope.PrimaryKey() != col { + t.Errorf("CustomizeColumn should have primary key %s, but got %q", col, scope.PrimaryKey()) + } + + expected := "foo" + cc := CustomizeColumn{ID: 666, Name: expected, Date: time.Now()} + + if count := DB.Create(&cc).RowsAffected; count != 1 { + t.Error("There should be one record be affected when create record") + } + + var cc1 CustomizeColumn + DB.First(&cc1, 666) + + if cc1.Name != expected { + t.Errorf("Failed to query CustomizeColumn") + } + + cc.Name = "bar" + DB.Save(&cc) + + var cc2 CustomizeColumn + DB.First(&cc2, 666) + if cc2.Name != "bar" { + t.Errorf("Failed to query CustomizeColumn") + } +} + +func TestCustomColumnAndIgnoredFieldClash(t *testing.T) { + DB.DropTable(&CustomColumnAndIgnoredFieldClash{}) + if err := DB.AutoMigrate(&CustomColumnAndIgnoredFieldClash{}).Error; err != nil { + t.Errorf("Should not raise error: %s", err) + } +} + +type CustomizePerson struct { + IdPerson string `gorm:"column:idPerson;primary_key:true"` + Accounts []CustomizeAccount `gorm:"many2many:PersonAccount;associationforeignkey:idAccount;foreignkey:idPerson"` +} + +type CustomizeAccount struct { + IdAccount string `gorm:"column:idAccount;primary_key:true"` + Name string +} + +func TestManyToManyWithCustomizedColumn(t *testing.T) { + DB.DropTable(&CustomizePerson{}, &CustomizeAccount{}, "PersonAccount") + DB.AutoMigrate(&CustomizePerson{}, &CustomizeAccount{}) + + account := CustomizeAccount{IdAccount: "account", Name: "id1"} + person := CustomizePerson{ + IdPerson: "person", + Accounts: []CustomizeAccount{account}, + } + + if err := DB.Create(&account).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + if err := DB.Create(&person).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + var person1 CustomizePerson + scope := DB.NewScope(nil) + if err := DB.Preload("Accounts").First(&person1, scope.Quote("idPerson")+" = ?", person.IdPerson).Error; err != nil { + t.Errorf("no error should happen when preloading customized column many2many relations, but got %v", err) + } + + if len(person1.Accounts) != 1 || person1.Accounts[0].IdAccount != "account" { + t.Errorf("should preload correct accounts") + } +} + +type CustomizeUser struct { + gorm.Model + Email string `sql:"column:email_address"` +} + +type CustomizeInvitation struct { + gorm.Model + Address string `sql:"column:invitation"` + Person *CustomizeUser `gorm:"foreignkey:Email;associationforeignkey:invitation"` +} + +func TestOneToOneWithCustomizedColumn(t *testing.T) { + DB.DropTable(&CustomizeUser{}, &CustomizeInvitation{}) + DB.AutoMigrate(&CustomizeUser{}, &CustomizeInvitation{}) + + user := CustomizeUser{ + Email: "hello@example.com", + } + invitation := CustomizeInvitation{ + Address: "hello@example.com", + } + + DB.Create(&user) + DB.Create(&invitation) + + var invitation2 CustomizeInvitation + if err := DB.Preload("Person").Find(&invitation2, invitation.ID).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + if invitation2.Person.Email != user.Email { + t.Errorf("Should preload one to one relation with customize foreign keys") + } +} + +type PromotionDiscount struct { + gorm.Model + Name string + Coupons []*PromotionCoupon `gorm:"ForeignKey:discount_id"` + Rule *PromotionRule `gorm:"ForeignKey:discount_id"` + Benefits []PromotionBenefit `gorm:"ForeignKey:promotion_id"` +} + +type PromotionBenefit struct { + gorm.Model + Name string + PromotionID uint + Discount PromotionDiscount `gorm:"ForeignKey:promotion_id"` +} + +type PromotionCoupon struct { + gorm.Model + Code string + DiscountID uint + Discount PromotionDiscount +} + +type PromotionRule struct { + gorm.Model + Name string + Begin *time.Time + End *time.Time + DiscountID uint + Discount *PromotionDiscount +} + +func TestOneToManyWithCustomizedColumn(t *testing.T) { + DB.DropTable(&PromotionDiscount{}, &PromotionCoupon{}) + DB.AutoMigrate(&PromotionDiscount{}, &PromotionCoupon{}) + + discount := PromotionDiscount{ + Name: "Happy New Year", + Coupons: []*PromotionCoupon{ + {Code: "newyear1"}, + {Code: "newyear2"}, + }, + } + + if err := DB.Create(&discount).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + var discount1 PromotionDiscount + if err := DB.Preload("Coupons").First(&discount1, "id = ?", discount.ID).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if len(discount.Coupons) != 2 { + t.Errorf("should find two coupons") + } + + var coupon PromotionCoupon + if err := DB.Preload("Discount").First(&coupon, "code = ?", "newyear1").Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if coupon.Discount.Name != "Happy New Year" { + t.Errorf("should preload discount from coupon") + } +} + +func TestHasOneWithPartialCustomizedColumn(t *testing.T) { + DB.DropTable(&PromotionDiscount{}, &PromotionRule{}) + DB.AutoMigrate(&PromotionDiscount{}, &PromotionRule{}) + + var begin = time.Now() + var end = time.Now().Add(24 * time.Hour) + discount := PromotionDiscount{ + Name: "Happy New Year 2", + Rule: &PromotionRule{ + Name: "time_limited", + Begin: &begin, + End: &end, + }, + } + + if err := DB.Create(&discount).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + var discount1 PromotionDiscount + if err := DB.Preload("Rule").First(&discount1, "id = ?", discount.ID).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if discount.Rule.Begin.Format(time.RFC3339Nano) != begin.Format(time.RFC3339Nano) { + t.Errorf("Should be able to preload Rule") + } + + var rule PromotionRule + if err := DB.Preload("Discount").First(&rule, "name = ?", "time_limited").Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if rule.Discount.Name != "Happy New Year 2" { + t.Errorf("should preload discount from rule") + } +} + +func TestBelongsToWithPartialCustomizedColumn(t *testing.T) { + DB.DropTable(&PromotionDiscount{}, &PromotionBenefit{}) + DB.AutoMigrate(&PromotionDiscount{}, &PromotionBenefit{}) + + discount := PromotionDiscount{ + Name: "Happy New Year 3", + Benefits: []PromotionBenefit{ + {Name: "free cod"}, + {Name: "free shipping"}, + }, + } + + if err := DB.Create(&discount).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + var discount1 PromotionDiscount + if err := DB.Preload("Benefits").First(&discount1, "id = ?", discount.ID).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if len(discount.Benefits) != 2 { + t.Errorf("should find two benefits") + } + + var benefit PromotionBenefit + if err := DB.Preload("Discount").First(&benefit, "name = ?", "free cod").Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if benefit.Discount.Name != "Happy New Year 3" { + t.Errorf("should preload discount from coupon") + } +} diff --git a/vendor/github.com/jinzhu/gorm/delete_test.go b/vendor/github.com/jinzhu/gorm/delete_test.go new file mode 100644 index 0000000..d3de0a6 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/delete_test.go @@ -0,0 +1,68 @@ +package gorm_test + +import ( + "testing" + "time" +) + +func TestDelete(t *testing.T) { + user1, user2 := User{Name: "delete1"}, User{Name: "delete2"} + DB.Save(&user1) + DB.Save(&user2) + + if err := DB.Delete(&user1).Error; err != nil { + t.Errorf("No error should happen when delete a record, err=%s", err) + } + + if !DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() { + t.Errorf("User can't be found after delete") + } + + if DB.Where("name = ?", user2.Name).First(&User{}).RecordNotFound() { + t.Errorf("Other users that not deleted should be found-able") + } +} + +func TestInlineDelete(t *testing.T) { + user1, user2 := User{Name: "inline_delete1"}, User{Name: "inline_delete2"} + DB.Save(&user1) + DB.Save(&user2) + + if DB.Delete(&User{}, user1.Id).Error != nil { + t.Errorf("No error should happen when delete a record") + } else if !DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() { + t.Errorf("User can't be found after delete") + } + + if err := DB.Delete(&User{}, "name = ?", user2.Name).Error; err != nil { + t.Errorf("No error should happen when delete a record, err=%s", err) + } else if !DB.Where("name = ?", user2.Name).First(&User{}).RecordNotFound() { + t.Errorf("User can't be found after delete") + } +} + +func TestSoftDelete(t *testing.T) { + type User struct { + Id int64 + Name string + DeletedAt *time.Time + } + DB.AutoMigrate(&User{}) + + user := User{Name: "soft_delete"} + DB.Save(&user) + DB.Delete(&user) + + if DB.First(&User{}, "name = ?", user.Name).Error == nil { + t.Errorf("Can't find a soft deleted record") + } + + if err := DB.Unscoped().First(&User{}, "name = ?", user.Name).Error; err != nil { + t.Errorf("Should be able to find soft deleted record with Unscoped, but err=%s", err) + } + + DB.Unscoped().Delete(&user) + if !DB.Unscoped().First(&User{}, "name = ?", user.Name).RecordNotFound() { + t.Errorf("Can't find permanently deleted record") + } +} diff --git a/vendor/github.com/jinzhu/gorm/dialect.go b/vendor/github.com/jinzhu/gorm/dialect.go new file mode 100644 index 0000000..6c9405d --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialect.go @@ -0,0 +1,100 @@ +package gorm + +import ( + "database/sql" + "fmt" + "reflect" + "strconv" + "strings" +) + +// Dialect interface contains behaviors that differ across SQL database +type Dialect interface { + // GetName get dialect's name + GetName() string + + // SetDB set db for dialect + SetDB(db *sql.DB) + + // BindVar return the placeholder for actual values in SQL statements, in many dbs it is "?", Postgres using $1 + BindVar(i int) string + // Quote quotes field name to avoid SQL parsing exceptions by using a reserved word as a field name + Quote(key string) string + // DataTypeOf return data's sql type + DataTypeOf(field *StructField) string + + // HasIndex check has index or not + HasIndex(tableName string, indexName string) bool + // HasForeignKey check has foreign key or not + HasForeignKey(tableName string, foreignKeyName string) bool + // RemoveIndex remove index + RemoveIndex(tableName string, indexName string) error + // HasTable check has table or not + HasTable(tableName string) bool + // HasColumn check has column or not + HasColumn(tableName string, columnName string) bool + + // LimitAndOffsetSQL return generated SQL with Limit and Offset, as mssql has special case + LimitAndOffsetSQL(limit, offset int) string + // SelectFromDummyTable return select values, for most dbs, `SELECT values` just works, mysql needs `SELECT value FROM DUAL` + SelectFromDummyTable() string + // LastInsertIdReturningSuffix most dbs support LastInsertId, but postgres needs to use `RETURNING` + LastInsertIDReturningSuffix(tableName, columnName string) string +} + +var dialectsMap = map[string]Dialect{} + +func newDialect(name string, db *sql.DB) Dialect { + if value, ok := dialectsMap[name]; ok { + dialect := reflect.New(reflect.TypeOf(value).Elem()).Interface().(Dialect) + dialect.SetDB(db) + return dialect + } + + fmt.Printf("`%v` is not officially supported, running under compatibility mode.\n", name) + commontDialect := &commonDialect{} + commontDialect.SetDB(db) + return commontDialect +} + +// RegisterDialect register new dialect +func RegisterDialect(name string, dialect Dialect) { + dialectsMap[name] = dialect +} + +// ParseFieldStructForDialect parse field struct for dialect +func ParseFieldStructForDialect(field *StructField) (fieldValue reflect.Value, sqlType string, size int, additionalType string) { + // Get redirected field type + var reflectType = field.Struct.Type + for reflectType.Kind() == reflect.Ptr { + reflectType = reflectType.Elem() + } + + // Get redirected field value + fieldValue = reflect.Indirect(reflect.New(reflectType)) + + // Get scanner's real value + var getScannerValue func(reflect.Value) + getScannerValue = func(value reflect.Value) { + fieldValue = value + if _, isScanner := reflect.New(fieldValue.Type()).Interface().(sql.Scanner); isScanner && fieldValue.Kind() == reflect.Struct { + getScannerValue(fieldValue.Field(0)) + } + } + getScannerValue(fieldValue) + + // Default Size + if num, ok := field.TagSettings["SIZE"]; ok { + size, _ = strconv.Atoi(num) + } else { + size = 255 + } + + // Default type from tag setting + additionalType = field.TagSettings["NOT NULL"] + " " + field.TagSettings["UNIQUE"] + if value, ok := field.TagSettings["DEFAULT"]; ok { + additionalType = additionalType + " DEFAULT " + value + } + + return fieldValue, field.TagSettings["TYPE"], size, strings.TrimSpace(additionalType) +} diff --git a/vendor/github.com/jinzhu/gorm/dialect_common.go b/vendor/github.com/jinzhu/gorm/dialect_common.go new file mode 100644 index 0000000..f009271 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialect_common.go @@ -0,0 +1,137 @@ +package gorm + +import ( + "database/sql" + "fmt" + "reflect" + "strings" + "time" +) + +type commonDialect struct { + db *sql.DB +} + +func init() { + RegisterDialect("common", &commonDialect{}) +} + +func (commonDialect) GetName() string { + return "common" +} + +func (s *commonDialect) SetDB(db *sql.DB) { + s.db = db +} + +func (commonDialect) BindVar(i int) string { + return "$$" // ? +} + +func (commonDialect) Quote(key string) string { + return fmt.Sprintf(`"%s"`, key) +} + +func (commonDialect) DataTypeOf(field *StructField) string { + var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field) + + if sqlType == "" { + switch dataValue.Kind() { + case reflect.Bool: + sqlType = "BOOLEAN" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok { + sqlType = "INTEGER AUTO_INCREMENT" + } else { + sqlType = "INTEGER" + } + case reflect.Int64, reflect.Uint64: + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok { + sqlType = "BIGINT AUTO_INCREMENT" + } else { + sqlType = "BIGINT" + } + case reflect.Float32, reflect.Float64: + sqlType = "FLOAT" + case reflect.String: + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("VARCHAR(%d)", size) + } else { + sqlType = "VARCHAR(65532)" + } + case reflect.Struct: + if _, ok := dataValue.Interface().(time.Time); ok { + sqlType = "TIMESTAMP" + } + default: + if _, ok := dataValue.Interface().([]byte); ok { + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("BINARY(%d)", size) + } else { + sqlType = "BINARY(65532)" + } + } + } + } + + if sqlType == "" { + panic(fmt.Sprintf("invalid sql type %s (%s) for commonDialect", dataValue.Type().Name(), dataValue.Kind().String())) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } + return fmt.Sprintf("%v %v", sqlType, additionalType) +} + +func (s commonDialect) HasIndex(tableName string, indexName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE table_schema = ? AND table_name = ? AND index_name = ?", s.currentDatabase(), tableName, indexName).Scan(&count) + return count > 0 +} + +func (s commonDialect) RemoveIndex(tableName string, indexName string) error { + _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v", indexName)) + return err +} + +func (s commonDialect) HasForeignKey(tableName string, foreignKeyName string) bool { + return false +} + +func (s commonDialect) HasTable(tableName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = ? AND table_name = ?", s.currentDatabase(), tableName).Scan(&count) + return count > 0 +} + +func (s commonDialect) HasColumn(tableName string, columnName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? AND table_name = ? AND column_name = ?", s.currentDatabase(), tableName, columnName).Scan(&count) + return count > 0 +} + +func (s commonDialect) currentDatabase() (name string) { + s.db.QueryRow("SELECT DATABASE()").Scan(&name) + return +} + +func (commonDialect) LimitAndOffsetSQL(limit, offset int) (sql string) { + if limit > 0 || offset > 0 { + if limit >= 0 { + sql += fmt.Sprintf(" LIMIT %d", limit) + } + if offset >= 0 { + sql += fmt.Sprintf(" OFFSET %d", offset) + } + } + return +} + +func (commonDialect) SelectFromDummyTable() string { + return "" +} + +func (commonDialect) LastInsertIDReturningSuffix(tableName, columnName string) string { + return "" +} diff --git a/vendor/github.com/jinzhu/gorm/dialect_mysql.go b/vendor/github.com/jinzhu/gorm/dialect_mysql.go new file mode 100644 index 0000000..6fade59 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialect_mysql.go @@ -0,0 +1,113 @@ +package gorm + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +type mysql struct { + commonDialect +} + +func init() { + RegisterDialect("mysql", &mysql{}) +} + +func (mysql) GetName() string { + return "mysql" +} + +func (mysql) Quote(key string) string { + return fmt.Sprintf("`%s`", key) +} + +// Get Data Type for MySQL Dialect +func (mysql) DataTypeOf(field *StructField) string { + var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field) + + if sqlType == "" { + switch dataValue.Kind() { + case reflect.Bool: + sqlType = "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32: + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok || field.IsPrimaryKey { + sqlType = "int AUTO_INCREMENT" + } else { + sqlType = "int" + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok || field.IsPrimaryKey { + sqlType = "int unsigned AUTO_INCREMENT" + } else { + sqlType = "int unsigned" + } + case reflect.Int64: + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok || field.IsPrimaryKey { + sqlType = "bigint AUTO_INCREMENT" + } else { + sqlType = "bigint" + } + case reflect.Uint64: + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok || field.IsPrimaryKey { + sqlType = "bigint unsigned AUTO_INCREMENT" + } else { + sqlType = "bigint unsigned" + } + case reflect.Float32, reflect.Float64: + sqlType = "double" + case reflect.String: + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("varchar(%d)", size) + } else { + sqlType = "longtext" + } + case reflect.Struct: + if _, ok := dataValue.Interface().(time.Time); ok { + if _, ok := field.TagSettings["NOT NULL"]; ok { + sqlType = "timestamp" + } else { + sqlType = "timestamp NULL" + } + } + default: + if _, ok := dataValue.Interface().([]byte); ok { + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("varbinary(%d)", size) + } else { + sqlType = "longblob" + } + } + } + } + + if sqlType == "" { + panic(fmt.Sprintf("invalid sql type %s (%s) for mysql", dataValue.Type().Name(), dataValue.Kind().String())) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } + return fmt.Sprintf("%v %v", sqlType, additionalType) +} + +func (s mysql) RemoveIndex(tableName string, indexName string) error { + _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v ON %v", indexName, s.Quote(tableName))) + return err +} + +func (s mysql) HasForeignKey(tableName string, foreignKeyName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_SCHEMA=? AND TABLE_NAME=? AND CONSTRAINT_NAME=? AND CONSTRAINT_TYPE='FOREIGN KEY'", s.currentDatabase(), tableName, foreignKeyName).Scan(&count) + return count > 0 +} + +func (s mysql) currentDatabase() (name string) { + s.db.QueryRow("SELECT DATABASE()").Scan(&name) + return +} + +func (mysql) SelectFromDummyTable() string { + return "FROM DUAL" +} diff --git a/vendor/github.com/jinzhu/gorm/dialect_postgres.go b/vendor/github.com/jinzhu/gorm/dialect_postgres.go new file mode 100644 index 0000000..09ac596 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialect_postgres.go @@ -0,0 +1,132 @@ +package gorm + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +type postgres struct { + commonDialect +} + +func init() { + RegisterDialect("postgres", &postgres{}) +} + +func (postgres) GetName() string { + return "postgres" +} + +func (postgres) BindVar(i int) string { + return fmt.Sprintf("$%v", i) +} + +func (postgres) DataTypeOf(field *StructField) string { + var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field) + + if sqlType == "" { + switch dataValue.Kind() { + case reflect.Bool: + sqlType = "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok || field.IsPrimaryKey { + sqlType = "serial" + } else { + sqlType = "integer" + } + case reflect.Int64, reflect.Uint64: + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok || field.IsPrimaryKey { + sqlType = "bigserial" + } else { + sqlType = "bigint" + } + case reflect.Float32, reflect.Float64: + sqlType = "numeric" + case reflect.String: + if _, ok := field.TagSettings["SIZE"]; !ok { + size = 0 // if SIZE haven't been set, use `text` as the default type, as there are no performance different + } + + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("varchar(%d)", size) + } else { + sqlType = "text" + } + case reflect.Struct: + if _, ok := dataValue.Interface().(time.Time); ok { + sqlType = "timestamp with time zone" + } + case reflect.Map: + if dataValue.Type().Name() == "Hstore" { + sqlType = "hstore" + } + default: + if isByteArrayOrSlice(dataValue) { + sqlType = "bytea" + } else if isUUID(dataValue) { + sqlType = "uuid" + } + } + } + + if sqlType == "" { + panic(fmt.Sprintf("invalid sql type %s (%s) for postgres", dataValue.Type().Name(), dataValue.Kind().String())) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } + return fmt.Sprintf("%v %v", sqlType, additionalType) +} + +func (s postgres) HasIndex(tableName string, indexName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM pg_indexes WHERE tablename = $1 AND indexname = $2", tableName, indexName).Scan(&count) + return count > 0 +} + +func (s postgres) HasForeignKey(tableName string, foreignKeyName string) bool { + var count int + s.db.QueryRow("SELECT count(con.conname) FROM pg_constraint con WHERE $1::regclass::oid = con.conrelid AND con.conname = $2 AND con.contype='f'", s.currentDatabase(), foreignKeyName).Scan(&count) + return count > 0 +} + +func (s postgres) HasTable(tableName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_name = $1 AND table_type = 'BASE TABLE'", tableName).Scan(&count) + return count > 0 +} + +func (s postgres) HasColumn(tableName string, columnName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.columns WHERE table_name = $1 AND column_name = $2", tableName, columnName).Scan(&count) + return count > 0 +} + +func (s postgres) currentDatabase() (name string) { + s.db.QueryRow("SELECT CURRENT_DATABASE()").Scan(&name) + return +} + +func (s postgres) LastInsertIDReturningSuffix(tableName, key string) string { + return fmt.Sprintf("RETURNING %v.%v", tableName, key) +} + +func (postgres) SupportLastInsertID() bool { + return false +} + +func isByteArrayOrSlice(value reflect.Value) bool { + return (value.Kind() == reflect.Array || value.Kind() == reflect.Slice) && value.Type().Elem() == reflect.TypeOf(uint8(0)) +} + +func isUUID(value reflect.Value) bool { + if value.Kind() != reflect.Array || value.Type().Len() != 16 { + return false + } + typename := value.Type().Name() + lower := strings.ToLower(typename) + return "uuid" == lower || "guid" == lower +} diff --git a/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go b/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go new file mode 100644 index 0000000..5c262aa --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go @@ -0,0 +1,106 @@ +package gorm + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +type sqlite3 struct { + commonDialect +} + +func init() { + RegisterDialect("sqlite", &sqlite3{}) + RegisterDialect("sqlite3", &sqlite3{}) +} + +func (sqlite3) GetName() string { + return "sqlite3" +} + +// Get Data Type for Sqlite Dialect +func (sqlite3) DataTypeOf(field *StructField) string { + var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field) + + if sqlType == "" { + switch dataValue.Kind() { + case reflect.Bool: + sqlType = "bool" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if field.IsPrimaryKey { + sqlType = "integer primary key autoincrement" + } else { + sqlType = "integer" + } + case reflect.Int64, reflect.Uint64: + if field.IsPrimaryKey { + sqlType = "integer primary key autoincrement" + } else { + sqlType = "bigint" + } + case reflect.Float32, reflect.Float64: + sqlType = "real" + case reflect.String: + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("varchar(%d)", size) + } else { + sqlType = "text" + } + case reflect.Struct: + if _, ok := dataValue.Interface().(time.Time); ok { + sqlType = "datetime" + } + default: + if _, ok := dataValue.Interface().([]byte); ok { + sqlType = "blob" + } + } + } + + if sqlType == "" { + panic(fmt.Sprintf("invalid sql type %s (%s) for sqlite3", dataValue.Type().Name(), dataValue.Kind().String())) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } + return fmt.Sprintf("%v %v", sqlType, additionalType) +} + +func (s sqlite3) HasIndex(tableName string, indexName string) bool { + var count int + s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND sql LIKE '%%INDEX %v ON%%'", indexName), tableName).Scan(&count) + return count > 0 +} + +func (s sqlite3) HasTable(tableName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", tableName).Scan(&count) + return count > 0 +} + +func (s sqlite3) HasColumn(tableName string, columnName string) bool { + var count int + s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND (sql LIKE '%%\"%v\" %%' OR sql LIKE '%%%v %%');\n", columnName, columnName), tableName).Scan(&count) + return count > 0 +} + +func (s sqlite3) currentDatabase() (name string) { + var ( + ifaces = make([]interface{}, 3) + pointers = make([]*string, 3) + i int + ) + for i = 0; i < 3; i++ { + ifaces[i] = &pointers[i] + } + if err := s.db.QueryRow("PRAGMA database_list").Scan(ifaces...); err != nil { + return + } + if pointers[1] != nil { + name = *pointers[1] + } + return +} diff --git a/vendor/github.com/jinzhu/gorm/dialects/mssql/mssql.go b/vendor/github.com/jinzhu/gorm/dialects/mssql/mssql.go new file mode 100644 index 0000000..5b994f9 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialects/mssql/mssql.go @@ -0,0 +1,150 @@ +package mssql + +import ( + "database/sql" + "fmt" + "reflect" + "strings" + "time" + + _ "github.com/denisenkom/go-mssqldb" + "github.com/jinzhu/gorm" +) + +func setIdentityInsert(scope *gorm.Scope) { + if scope.Dialect().GetName() == "mssql" { + scope.NewDB().Exec(fmt.Sprintf("SET IDENTITY_INSERT %v ON", scope.TableName())) + } +} + +func init() { + gorm.DefaultCallback.Create().After("gorm:begin_transaction").Register("mssql:set_identity_insert", setIdentityInsert) + gorm.RegisterDialect("mssql", &mssql{}) +} + +type mssql struct { + db *sql.DB +} + +func (mssql) GetName() string { + return "mssql" +} + +func (s *mssql) SetDB(db *sql.DB) { + s.db = db +} + +func (mssql) BindVar(i int) string { + return "$$" // ? +} + +func (mssql) Quote(key string) string { + return fmt.Sprintf(`"%s"`, key) +} + +func (mssql) DataTypeOf(field *gorm.StructField) string { + var dataValue, sqlType, size, additionalType = gorm.ParseFieldStructForDialect(field) + + if sqlType == "" { + switch dataValue.Kind() { + case reflect.Bool: + sqlType = "bit" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok || field.IsPrimaryKey { + sqlType = "int IDENTITY(1,1)" + } else { + sqlType = "int" + } + case reflect.Int64, reflect.Uint64: + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok || field.IsPrimaryKey { + sqlType = "bigint IDENTITY(1,1)" + } else { + sqlType = "bigint" + } + case reflect.Float32, reflect.Float64: + sqlType = "float" + case reflect.String: + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("nvarchar(%d)", size) + } else { + sqlType = "text" + } + case reflect.Struct: + if _, ok := dataValue.Interface().(time.Time); ok { + sqlType = "datetime2" + } + default: + if _, ok := dataValue.Interface().([]byte); ok { + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("varchar(%d)", size) + } else { + sqlType = "text" + } + } + } + } + + if sqlType == "" { + panic(fmt.Sprintf("invalid sql type %s (%s) for mssql", dataValue.Type().Name(), dataValue.Kind().String())) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } + return fmt.Sprintf("%v %v", sqlType, additionalType) +} + +func (s mssql) HasIndex(tableName string, indexName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM sys.indexes WHERE name=? AND object_id=OBJECT_ID(?)", indexName, tableName).Scan(&count) + return count > 0 +} + +func (s mssql) RemoveIndex(tableName string, indexName string) error { + _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v ON %v", indexName, s.Quote(tableName))) + return err +} + +func (s mssql) HasForeignKey(tableName string, foreignKeyName string) bool { + return false +} + +func (s mssql) HasTable(tableName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_name = ? AND table_catalog = ?", tableName, s.currentDatabase()).Scan(&count) + return count > 0 +} + +func (s mssql) HasColumn(tableName string, columnName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM information_schema.columns WHERE table_catalog = ? AND table_name = ? AND column_name = ?", s.currentDatabase(), tableName, columnName).Scan(&count) + return count > 0 +} + +func (s mssql) currentDatabase() (name string) { + s.db.QueryRow("SELECT DB_NAME() AS [Current Database]").Scan(&name) + return +} + +func (mssql) LimitAndOffsetSQL(limit, offset int) (sql string) { + if limit > 0 || offset > 0 { + if offset < 0 { + offset = 0 + } + + sql += fmt.Sprintf(" OFFSET %d ROWS", offset) + + if limit >= 0 { + sql += fmt.Sprintf(" FETCH NEXT %d ROWS ONLY", limit) + } + } + return +} + +func (mssql) SelectFromDummyTable() string { + return "" +} + +func (mssql) LastInsertIDReturningSuffix(tableName, columnName string) string { + return "" +} diff --git a/vendor/github.com/jinzhu/gorm/dialects/mysql/mysql.go b/vendor/github.com/jinzhu/gorm/dialects/mysql/mysql.go new file mode 100644 index 0000000..9deba48 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialects/mysql/mysql.go @@ -0,0 +1,3 @@ +package mysql + +import _ "github.com/go-sql-driver/mysql" diff --git a/vendor/github.com/jinzhu/gorm/dialects/postgres/postgres.go b/vendor/github.com/jinzhu/gorm/dialects/postgres/postgres.go new file mode 100644 index 0000000..adeeec7 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialects/postgres/postgres.go @@ -0,0 +1,54 @@ +package postgres + +import ( + "database/sql" + "database/sql/driver" + + _ "github.com/lib/pq" + "github.com/lib/pq/hstore" +) + +type Hstore map[string]*string + +// Value get value of Hstore +func (h Hstore) Value() (driver.Value, error) { + hstore := hstore.Hstore{Map: map[string]sql.NullString{}} + if len(h) == 0 { + return nil, nil + } + + for key, value := range h { + var s sql.NullString + if value != nil { + s.String = *value + s.Valid = true + } + hstore.Map[key] = s + } + return hstore.Value() +} + +// Scan scan value into Hstore +func (h *Hstore) Scan(value interface{}) error { + hstore := hstore.Hstore{} + + if err := hstore.Scan(value); err != nil { + return err + } + + if len(hstore.Map) == 0 { + return nil + } + + *h = Hstore{} + for k := range hstore.Map { + if hstore.Map[k].Valid { + s := hstore.Map[k].String + (*h)[k] = &s + } else { + (*h)[k] = nil + } + } + + return nil +} diff --git a/vendor/github.com/jinzhu/gorm/dialects/sqlite/sqlite.go b/vendor/github.com/jinzhu/gorm/dialects/sqlite/sqlite.go new file mode 100644 index 0000000..069ad3a --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialects/sqlite/sqlite.go @@ -0,0 +1,3 @@ +package sqlite + +import _ "github.com/mattn/go-sqlite3" diff --git a/vendor/github.com/jinzhu/gorm/embedded_struct_test.go b/vendor/github.com/jinzhu/gorm/embedded_struct_test.go new file mode 100644 index 0000000..7be75d9 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/embedded_struct_test.go @@ -0,0 +1,48 @@ +package gorm_test + +import "testing" + +type BasePost struct { + Id int64 + Title string + URL string +} + +type HNPost struct { + BasePost + Upvotes int32 +} + +type EngadgetPost struct { + BasePost BasePost `gorm:"embedded"` + ImageUrl string +} + +func TestSaveAndQueryEmbeddedStruct(t *testing.T) { + DB.Save(&HNPost{BasePost: BasePost{Title: "news"}}) + DB.Save(&HNPost{BasePost: BasePost{Title: "hn_news"}}) + var news HNPost + if err := DB.First(&news, "title = ?", "hn_news").Error; err != nil { + t.Errorf("no error should happen when query with embedded struct, but got %v", err) + } else if news.Title != "hn_news" { + t.Errorf("embedded struct's value should be scanned correctly") + } + + DB.Save(&EngadgetPost{BasePost: BasePost{Title: "engadget_news"}}) + var egNews EngadgetPost + if err := DB.First(&egNews, "title = ?", "engadget_news").Error; err != nil { + t.Errorf("no error should happen when query with embedded struct, but got %v", err) + } else if egNews.BasePost.Title != "engadget_news" { + t.Errorf("embedded struct's value should be scanned correctly") + } + + if DB.NewScope(&HNPost{}).PrimaryField() == nil { + t.Errorf("primary key with embedded struct should works") + } + + for _, field := range DB.NewScope(&HNPost{}).Fields() { + if field.Name == "BasePost" { + t.Errorf("scope Fields should not contain embedded struct") + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/errors.go b/vendor/github.com/jinzhu/gorm/errors.go new file mode 100644 index 0000000..ce3a25c --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/errors.go @@ -0,0 +1,58 @@ +package gorm + +import ( + "errors" + "strings" +) + +var ( + // ErrRecordNotFound record not found error, happens when haven't find any matched data when looking up with a struct + ErrRecordNotFound = errors.New("record not found") + // ErrInvalidSQL invalid SQL error, happens when you passed invalid SQL + ErrInvalidSQL = errors.New("invalid SQL") + // ErrInvalidTransaction invalid transaction when you are trying to `Commit` or `Rollback` + ErrInvalidTransaction = errors.New("no valid transaction") + // ErrCantStartTransaction can't start transaction when you are trying to start one with `Begin` + ErrCantStartTransaction = errors.New("can't start transaction") + // ErrUnaddressable unaddressable value + ErrUnaddressable = errors.New("using unaddressable value") +) + +type errorsInterface interface { + GetErrors() []error +} + +// Errors contains all happened errors +type Errors struct { + errors []error +} + +// GetErrors get all happened errors +func (errs Errors) GetErrors() []error { + return errs.errors +} + +// Add add an error +func (errs *Errors) Add(err error) { + if errors, ok := err.(errorsInterface); ok { + for _, err := range errors.GetErrors() { + errs.Add(err) + } + } else { + for _, e := range errs.errors { + if err == e { + return + } + } + errs.errors = append(errs.errors, err) + } +} + +// Error format happened errors +func (errs Errors) Error() string { + var errors = []string{} + for _, e := range errs.errors { + errors = append(errors, e.Error()) + } + return strings.Join(errors, "; ") +} diff --git a/vendor/github.com/jinzhu/gorm/field.go b/vendor/github.com/jinzhu/gorm/field.go new file mode 100644 index 0000000..11c410b --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/field.go @@ -0,0 +1,58 @@ +package gorm + +import ( + "database/sql" + "errors" + "fmt" + "reflect" +) + +// Field model field definition +type Field struct { + *StructField + IsBlank bool + Field reflect.Value +} + +// Set set a value to the field +func (field *Field) Set(value interface{}) (err error) { + if !field.Field.IsValid() { + return errors.New("field value not valid") + } + + if !field.Field.CanAddr() { + return ErrUnaddressable + } + + reflectValue, ok := value.(reflect.Value) + if !ok { + reflectValue = reflect.ValueOf(value) + } + + fieldValue := field.Field + if reflectValue.IsValid() { + if reflectValue.Type().ConvertibleTo(fieldValue.Type()) { + fieldValue.Set(reflectValue.Convert(fieldValue.Type())) + } else { + if fieldValue.Kind() == reflect.Ptr { + if fieldValue.IsNil() { + fieldValue.Set(reflect.New(field.Struct.Type.Elem())) + } + fieldValue = fieldValue.Elem() + } + + if reflectValue.Type().ConvertibleTo(fieldValue.Type()) { + fieldValue.Set(reflectValue.Convert(fieldValue.Type())) + } else if scanner, ok := fieldValue.Addr().Interface().(sql.Scanner); ok { + err = scanner.Scan(reflectValue.Interface()) + } else { + err = fmt.Errorf("could not convert argument of field %s from %s to %s", field.Name, reflectValue.Type(), fieldValue.Type()) + } + } + } else { + field.Field.Set(reflect.Zero(field.Field.Type())) + } + + field.IsBlank = isBlank(field.Field) + return err +} diff --git a/vendor/github.com/jinzhu/gorm/field_test.go b/vendor/github.com/jinzhu/gorm/field_test.go new file mode 100644 index 0000000..30e9a77 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/field_test.go @@ -0,0 +1,49 @@ +package gorm_test + +import ( + "testing" + + "github.com/jinzhu/gorm" +) + +type CalculateField struct { + gorm.Model + Name string + Children []CalculateFieldChild + Category CalculateFieldCategory + EmbeddedField +} + +type EmbeddedField struct { + EmbeddedName string `sql:"NOT NULL;DEFAULT:'hello'"` +} + +type CalculateFieldChild struct { + gorm.Model + CalculateFieldID uint + Name string +} + +type CalculateFieldCategory struct { + gorm.Model + CalculateFieldID uint + Name string +} + +func TestCalculateField(t *testing.T) { + var field CalculateField + var scope = DB.NewScope(&field) + if field, ok := scope.FieldByName("Children"); !ok || field.Relationship == nil { + t.Errorf("Should calculate fields correctly for the first time") + } + + if field, ok := scope.FieldByName("Category"); !ok || field.Relationship == nil { + t.Errorf("Should calculate fields correctly for the first time") + } + + if field, ok := scope.FieldByName("embedded_name"); !ok { + t.Errorf("should find embedded field") + } else if _, ok := field.TagSettings["NOT NULL"]; !ok { + t.Errorf("should find embedded field's tag settings") + } +} diff --git a/vendor/github.com/jinzhu/gorm/interface.go b/vendor/github.com/jinzhu/gorm/interface.go new file mode 100644 index 0000000..7b02aa6 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/interface.go @@ -0,0 +1,19 @@ +package gorm + +import "database/sql" + +type sqlCommon interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Prepare(query string) (*sql.Stmt, error) + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryRow(query string, args ...interface{}) *sql.Row +} + +type sqlDb interface { + Begin() (*sql.Tx, error) +} + +type sqlTx interface { + Commit() error + Rollback() error +} diff --git a/vendor/github.com/jinzhu/gorm/join_table_handler.go b/vendor/github.com/jinzhu/gorm/join_table_handler.go new file mode 100644 index 0000000..18c12a8 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/join_table_handler.go @@ -0,0 +1,204 @@ +package gorm + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +// JoinTableHandlerInterface is an interface for how to handle many2many relations +type JoinTableHandlerInterface interface { + // initialize join table handler + Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type) + // Table return join table's table name + Table(db *DB) string + // Add create relationship in join table for source and destination + Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error + // Delete delete relationship in join table for sources + Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error + // JoinWith query with `Join` conditions + JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB + // SourceForeignKeys return source foreign keys + SourceForeignKeys() []JoinTableForeignKey + // DestinationForeignKeys return destination foreign keys + DestinationForeignKeys() []JoinTableForeignKey +} + +// JoinTableForeignKey join table foreign key struct +type JoinTableForeignKey struct { + DBName string + AssociationDBName string +} + +// JoinTableSource is a struct that contains model type and foreign keys +type JoinTableSource struct { + ModelType reflect.Type + ForeignKeys []JoinTableForeignKey +} + +// JoinTableHandler default join table handler +type JoinTableHandler struct { + TableName string `sql:"-"` + Source JoinTableSource `sql:"-"` + Destination JoinTableSource `sql:"-"` +} + +// SourceForeignKeys return source foreign keys +func (s *JoinTableHandler) SourceForeignKeys() []JoinTableForeignKey { + return s.Source.ForeignKeys +} + +// DestinationForeignKeys return destination foreign keys +func (s *JoinTableHandler) DestinationForeignKeys() []JoinTableForeignKey { + return s.Destination.ForeignKeys +} + +// Setup initialize a default join table handler +func (s *JoinTableHandler) Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type) { + s.TableName = tableName + + s.Source = JoinTableSource{ModelType: source} + for idx, dbName := range relationship.ForeignFieldNames { + s.Source.ForeignKeys = append(s.Source.ForeignKeys, JoinTableForeignKey{ + DBName: relationship.ForeignDBNames[idx], + AssociationDBName: dbName, + }) + } + + s.Destination = JoinTableSource{ModelType: destination} + for idx, dbName := range relationship.AssociationForeignFieldNames { + s.Destination.ForeignKeys = append(s.Destination.ForeignKeys, JoinTableForeignKey{ + DBName: relationship.AssociationForeignDBNames[idx], + AssociationDBName: dbName, + }) + } +} + +// Table return join table's table name +func (s JoinTableHandler) Table(db *DB) string { + return s.TableName +} + +func (s JoinTableHandler) getSearchMap(db *DB, sources ...interface{}) map[string]interface{} { + values := map[string]interface{}{} + + for _, source := range sources { + scope := db.NewScope(source) + modelType := scope.GetModelStruct().ModelType + + if s.Source.ModelType == modelType { + for _, foreignKey := range s.Source.ForeignKeys { + if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok { + values[foreignKey.DBName] = field.Field.Interface() + } + } + } else if s.Destination.ModelType == modelType { + for _, foreignKey := range s.Destination.ForeignKeys { + if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok { + values[foreignKey.DBName] = field.Field.Interface() + } + } + } + } + return values +} + +// Add create relationship in join table for source and destination +func (s JoinTableHandler) Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error { + scope := db.NewScope("") + searchMap := s.getSearchMap(db, source, destination) + + var assignColumns, binVars, conditions []string + var values []interface{} + for key, value := range searchMap { + assignColumns = append(assignColumns, scope.Quote(key)) + binVars = append(binVars, `?`) + conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key))) + values = append(values, value) + } + + for _, value := range values { + values = append(values, value) + } + + quotedTable := scope.Quote(handler.Table(db)) + sql := fmt.Sprintf( + "INSERT INTO %v (%v) SELECT %v %v WHERE NOT EXISTS (SELECT * FROM %v WHERE %v)", + quotedTable, + strings.Join(assignColumns, ","), + strings.Join(binVars, ","), + scope.Dialect().SelectFromDummyTable(), + quotedTable, + strings.Join(conditions, " AND "), + ) + + return db.Exec(sql, values...).Error +} + +// Delete delete relationship in join table for sources +func (s JoinTableHandler) Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error { + var ( + scope = db.NewScope(nil) + conditions []string + values []interface{} + ) + + for key, value := range s.getSearchMap(db, sources...) { + conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key))) + values = append(values, value) + } + + return db.Table(handler.Table(db)).Where(strings.Join(conditions, " AND "), values...).Delete("").Error +} + +// JoinWith query with `Join` conditions +func (s JoinTableHandler) JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB { + var ( + scope = db.NewScope(source) + tableName = handler.Table(db) + quotedTableName = scope.Quote(tableName) + joinConditions []string + values []interface{} + ) + + if s.Source.ModelType == scope.GetModelStruct().ModelType { + destinationTableName := db.NewScope(reflect.New(s.Destination.ModelType).Interface()).QuotedTableName() + for _, foreignKey := range s.Destination.ForeignKeys { + joinConditions = append(joinConditions, fmt.Sprintf("%v.%v = %v.%v", quotedTableName, scope.Quote(foreignKey.DBName), destinationTableName, scope.Quote(foreignKey.AssociationDBName))) + } + + var foreignDBNames []string + var foreignFieldNames []string + + for _, foreignKey := range s.Source.ForeignKeys { + foreignDBNames = append(foreignDBNames, foreignKey.DBName) + if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok { + foreignFieldNames = append(foreignFieldNames, field.Name) + } + } + + foreignFieldValues := scope.getColumnAsArray(foreignFieldNames, scope.Value) + + var condString string + if len(foreignFieldValues) > 0 { + var quotedForeignDBNames []string + for _, dbName := range foreignDBNames { + quotedForeignDBNames = append(quotedForeignDBNames, tableName+"."+dbName) + } + + condString = fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, quotedForeignDBNames), toQueryMarks(foreignFieldValues)) + + keys := scope.getColumnAsArray(foreignFieldNames, scope.Value) + values = append(values, toQueryValues(keys)) + } else { + condString = fmt.Sprintf("1 <> 1") + } + + return db.Joins(fmt.Sprintf("INNER JOIN %v ON %v", quotedTableName, strings.Join(joinConditions, " AND "))). + Where(condString, toQueryValues(foreignFieldValues)...) + } + + db.Error = errors.New("wrong source type for join table handler") + return db +} diff --git a/vendor/github.com/jinzhu/gorm/join_table_test.go b/vendor/github.com/jinzhu/gorm/join_table_test.go new file mode 100644 index 0000000..1a83a9c --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/join_table_test.go @@ -0,0 +1,72 @@ +package gorm_test + +import ( + "fmt" + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +type Person struct { + Id int + Name string + Addresses []*Address `gorm:"many2many:person_addresses;"` +} + +type PersonAddress struct { + gorm.JoinTableHandler + PersonID int + AddressID int + DeletedAt *time.Time + CreatedAt time.Time +} + +func (*PersonAddress) Add(handler gorm.JoinTableHandlerInterface, db *gorm.DB, foreignValue interface{}, associationValue interface{}) error { + return db.Where(map[string]interface{}{ + "person_id": db.NewScope(foreignValue).PrimaryKeyValue(), + "address_id": db.NewScope(associationValue).PrimaryKeyValue(), + }).Assign(map[string]interface{}{ + "person_id": foreignValue, + "address_id": associationValue, + "deleted_at": gorm.Expr("NULL"), + }).FirstOrCreate(&PersonAddress{}).Error +} + +func (*PersonAddress) Delete(handler gorm.JoinTableHandlerInterface, db *gorm.DB, sources ...interface{}) error { + return db.Delete(&PersonAddress{}).Error +} + +func (pa *PersonAddress) JoinWith(handler gorm.JoinTableHandlerInterface, db *gorm.DB, source interface{}) *gorm.DB { + table := pa.Table(db) + return db.Joins("INNER JOIN person_addresses ON person_addresses.address_id = addresses.id").Where(fmt.Sprintf("%v.deleted_at IS NULL OR %v.deleted_at <= '0001-01-02'", table, table)) +} + +func TestJoinTable(t *testing.T) { + DB.Exec("drop table person_addresses;") + DB.AutoMigrate(&Person{}) + DB.SetJoinTableHandler(&Person{}, "Addresses", &PersonAddress{}) + + address1 := &Address{Address1: "address 1"} + address2 := &Address{Address1: "address 2"} + person := &Person{Name: "person", Addresses: []*Address{address1, address2}} + DB.Save(person) + + DB.Model(person).Association("Addresses").Delete(address1) + + if DB.Find(&[]PersonAddress{}, "person_id = ?", person.Id).RowsAffected != 1 { + t.Errorf("Should found one address") + } + + if DB.Model(person).Association("Addresses").Count() != 1 { + t.Errorf("Should found one address") + } + + if DB.Unscoped().Find(&[]PersonAddress{}, "person_id = ?", person.Id).RowsAffected != 2 { + t.Errorf("Found two addresses with Unscoped") + } + + if DB.Model(person).Association("Addresses").Clear(); DB.Model(person).Association("Addresses").Count() != 0 { + t.Errorf("Should deleted all addresses") + } +} diff --git a/vendor/github.com/jinzhu/gorm/logger.go b/vendor/github.com/jinzhu/gorm/logger.go new file mode 100644 index 0000000..2c4ccbb --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/logger.go @@ -0,0 +1,99 @@ +package gorm + +import ( + "database/sql/driver" + "fmt" + "log" + "os" + "reflect" + "regexp" + "time" + "unicode" +) + +var ( + defaultLogger = Logger{log.New(os.Stdout, "\r\n", 0)} + sqlRegexp = regexp.MustCompile(`(\$\d+)|\?`) +) + +type logger interface { + Print(v ...interface{}) +} + +// LogWriter log writer interface +type LogWriter interface { + Println(v ...interface{}) +} + +// Logger default logger +type Logger struct { + LogWriter +} + +// Print format & print log +func (logger Logger) Print(values ...interface{}) { + if len(values) > 1 { + level := values[0] + currentTime := "\n\033[33m[" + NowFunc().Format("2006-01-02 15:04:05") + "]\033[0m" + source := fmt.Sprintf("\033[35m(%v)\033[0m", values[1]) + messages := []interface{}{source, currentTime} + + if level == "sql" { + // duration + messages = append(messages, fmt.Sprintf(" \033[36;1m[%.2fms]\033[0m ", float64(values[2].(time.Duration).Nanoseconds()/1e4)/100.0)) + // sql + var sql string + var formattedValues []string + + for _, value := range values[4].([]interface{}) { + indirectValue := reflect.Indirect(reflect.ValueOf(value)) + if indirectValue.IsValid() { + value = indirectValue.Interface() + if t, ok := value.(time.Time); ok { + formattedValues = append(formattedValues, fmt.Sprintf("'%v'", t.Format(time.RFC3339))) + } else if b, ok := value.([]byte); ok { + if str := string(b); isPrintable(str) { + formattedValues = append(formattedValues, fmt.Sprintf("'%v'", str)) + } else { + formattedValues = append(formattedValues, "''") + } + } else if r, ok := value.(driver.Valuer); ok { + if value, err := r.Value(); err == nil && value != nil { + formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value)) + } else { + formattedValues = append(formattedValues, "NULL") + } + } else { + formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value)) + } + } else { + formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value)) + } + } + + var formattedValuesLength = len(formattedValues) + for index, value := range sqlRegexp.Split(values[3].(string), -1) { + sql += value + if index < formattedValuesLength { + sql += formattedValues[index] + } + } + + messages = append(messages, sql) + } else { + messages = append(messages, "\033[31;1m") + messages = append(messages, values[2:]...) + messages = append(messages, "\033[0m") + } + logger.Println(messages...) + } +} + +func isPrintable(s string) bool { + for _, r := range s { + if !unicode.IsPrint(r) { + return false + } + } + return true +} diff --git a/vendor/github.com/jinzhu/gorm/main.go b/vendor/github.com/jinzhu/gorm/main.go new file mode 100644 index 0000000..cd44555 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/main.go @@ -0,0 +1,700 @@ +package gorm + +import ( + "database/sql" + "errors" + "fmt" + "reflect" + "strings" + "time" +) + +// DB contains information for current db connection +type DB struct { + Value interface{} + Error error + RowsAffected int64 + callbacks *Callback + db sqlCommon + parent *DB + search *search + logMode int + logger logger + dialect Dialect + singularTable bool + source string + values map[string]interface{} + joinTableHandlers map[string]JoinTableHandler +} + +// Open initialize a new db connection, need to import driver first, e.g: +// +// import _ "github.com/go-sql-driver/mysql" +// func main() { +// db, err := gorm.Open("mysql", "user:password@/dbname?charset=utf8&parseTime=True&loc=Local") +// } +// GORM has wrapped some drivers, for easier to remember driver's import path, so you could import the mysql driver with +// import _ "github.com/jinzhu/gorm/dialects/mysql" +// // import _ "github.com/jinzhu/gorm/dialects/postgres" +// // import _ "github.com/jinzhu/gorm/dialects/sqlite" +// // import _ "github.com/jinzhu/gorm/dialects/mssql" +func Open(dialect string, args ...interface{}) (*DB, error) { + var db DB + var err error + + if len(args) == 0 { + err = errors.New("invalid database source") + } else { + var source string + var dbSQL sqlCommon + + switch value := args[0].(type) { + case string: + var driver = dialect + if len(args) == 1 { + source = value + } else if len(args) >= 2 { + driver = value + source = args[1].(string) + } + dbSQL, err = sql.Open(driver, source) + case sqlCommon: + source = reflect.Indirect(reflect.ValueOf(value)).FieldByName("dsn").String() + dbSQL = value + } + + db = DB{ + dialect: newDialect(dialect, dbSQL.(*sql.DB)), + logger: defaultLogger, + callbacks: DefaultCallback, + source: source, + values: map[string]interface{}{}, + db: dbSQL, + } + db.parent = &db + + if err == nil { + err = db.DB().Ping() // Send a ping to make sure the database connection is alive. + } + } + + return &db, err +} + +// Close close current db connection +func (s *DB) Close() error { + return s.parent.db.(*sql.DB).Close() +} + +// DB get `*sql.DB` from current connection +func (s *DB) DB() *sql.DB { + return s.db.(*sql.DB) +} + +// New clone a new db connection without search conditions +func (s *DB) New() *DB { + clone := s.clone() + clone.search = nil + clone.Value = nil + return clone +} + +// NewScope create a scope for current operation +func (s *DB) NewScope(value interface{}) *Scope { + dbClone := s.clone() + dbClone.Value = value + return &Scope{db: dbClone, Search: dbClone.search.clone(), Value: value} +} + +// CommonDB return the underlying `*sql.DB` or `*sql.Tx` instance, mainly intended to allow coexistence with legacy non-GORM code. +func (s *DB) CommonDB() sqlCommon { + return s.db +} + +// Callback return `Callbacks` container, you could add/change/delete callbacks with it +// db.Callback().Create().Register("update_created_at", updateCreated) +// Refer https://jinzhu.github.io/gorm/development.html#callbacks +func (s *DB) Callback() *Callback { + s.parent.callbacks = s.parent.callbacks.clone() + return s.parent.callbacks +} + +// SetLogger replace default logger +func (s *DB) SetLogger(log logger) { + s.logger = log +} + +// LogMode set log mode, `true` for detailed logs, `false` for no log, default, will only print error logs +func (s *DB) LogMode(enable bool) *DB { + if enable { + s.logMode = 2 + } else { + s.logMode = 1 + } + return s +} + +// SingularTable use singular table by default +func (s *DB) SingularTable(enable bool) { + modelStructsMap = newModelStructsMap() + s.parent.singularTable = enable +} + +// Where return a new relation, filter records with given conditions, accepts `map`, `struct` or `string` as conditions, refer http://jinzhu.github.io/gorm/curd.html#query +func (s *DB) Where(query interface{}, args ...interface{}) *DB { + return s.clone().search.Where(query, args...).db +} + +// Or filter records that match before conditions or this one, similar to `Where` +func (s *DB) Or(query interface{}, args ...interface{}) *DB { + return s.clone().search.Or(query, args...).db +} + +// Not filter records that don't match current conditions, similar to `Where` +func (s *DB) Not(query interface{}, args ...interface{}) *DB { + return s.clone().search.Not(query, args...).db +} + +// Limit specify the number of records to be retrieved +func (s *DB) Limit(limit int) *DB { + return s.clone().search.Limit(limit).db +} + +// Offset specify the number of records to skip before starting to return the records +func (s *DB) Offset(offset int) *DB { + return s.clone().search.Offset(offset).db +} + +// Order specify order when retrieve records from database, set reorder to `true` to overwrite defined conditions +func (s *DB) Order(value string, reorder ...bool) *DB { + return s.clone().search.Order(value, reorder...).db +} + +// Select specify fields that you want to retrieve from database when querying, by default, will select all fields; +// When creating/updating, specify fields that you want to save to database +func (s *DB) Select(query interface{}, args ...interface{}) *DB { + return s.clone().search.Select(query, args...).db +} + +// Omit specify fields that you want to ignore when saving to database for creating, updating +func (s *DB) Omit(columns ...string) *DB { + return s.clone().search.Omit(columns...).db +} + +// Group specify the group method on the find +func (s *DB) Group(query string) *DB { + return s.clone().search.Group(query).db +} + +// Having specify HAVING conditions for GROUP BY +func (s *DB) Having(query string, values ...interface{}) *DB { + return s.clone().search.Having(query, values...).db +} + +// Joins specify Joins conditions +// db.Joins("JOIN emails ON emails.user_id = users.id AND emails.email = ?", "jinzhu@example.org").Find(&user) +func (s *DB) Joins(query string, args ...interface{}) *DB { + return s.clone().search.Joins(query, args...).db +} + +// Scopes pass current database connection to arguments `func(*DB) *DB`, which could be used to add conditions dynamically +// func AmountGreaterThan1000(db *gorm.DB) *gorm.DB { +// return db.Where("amount > ?", 1000) +// } +// +// func OrderStatus(status []string) func (db *gorm.DB) *gorm.DB { +// return func (db *gorm.DB) *gorm.DB { +// return db.Scopes(AmountGreaterThan1000).Where("status in (?)", status) +// } +// } +// +// db.Scopes(AmountGreaterThan1000, OrderStatus([]string{"paid", "shipped"})).Find(&orders) +// Refer https://jinzhu.github.io/gorm/curd.html#scopes +func (s *DB) Scopes(funcs ...func(*DB) *DB) *DB { + for _, f := range funcs { + s = f(s) + } + return s +} + +// Unscoped return all record including deleted record, refer Soft Delete https://jinzhu.github.io/gorm/curd.html#soft-delete +func (s *DB) Unscoped() *DB { + return s.clone().search.unscoped().db +} + +// Attrs initialize struct with argument if record not found with `FirstOrInit` https://jinzhu.github.io/gorm/curd.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/curd.html#firstorcreate +func (s *DB) Attrs(attrs ...interface{}) *DB { + return s.clone().search.Attrs(attrs...).db +} + +// Assign assign result with argument regardless it is found or not with `FirstOrInit` https://jinzhu.github.io/gorm/curd.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/curd.html#firstorcreate +func (s *DB) Assign(attrs ...interface{}) *DB { + return s.clone().search.Assign(attrs...).db +} + +// First find first record that match given conditions, order by primary key +func (s *DB) First(out interface{}, where ...interface{}) *DB { + newScope := s.clone().NewScope(out) + newScope.Search.Limit(1) + return newScope.Set("gorm:order_by_primary_key", "ASC"). + inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db +} + +// Last find last record that match given conditions, order by primary key +func (s *DB) Last(out interface{}, where ...interface{}) *DB { + newScope := s.clone().NewScope(out) + newScope.Search.Limit(1) + return newScope.Set("gorm:order_by_primary_key", "DESC"). + inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db +} + +// Find find records that match given conditions +func (s *DB) Find(out interface{}, where ...interface{}) *DB { + return s.clone().NewScope(out).inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db +} + +// Scan scan value to a struct +func (s *DB) Scan(dest interface{}) *DB { + return s.clone().NewScope(s.Value).Set("gorm:query_destination", dest).callCallbacks(s.parent.callbacks.queries).db +} + +// Row return `*sql.Row` with given conditions +func (s *DB) Row() *sql.Row { + return s.NewScope(s.Value).row() +} + +// Rows return `*sql.Rows` with given conditions +func (s *DB) Rows() (*sql.Rows, error) { + return s.NewScope(s.Value).rows() +} + +// ScanRows scan `*sql.Rows` to give struct +func (s *DB) ScanRows(rows *sql.Rows, result interface{}) error { + var ( + clone = s.clone() + scope = clone.NewScope(result) + columns, err = rows.Columns() + ) + + if clone.AddError(err) == nil { + scope.scan(rows, columns, scope.Fields()) + } + + return clone.Error +} + +// Pluck used to query single column from a model as a map +// var ages []int64 +// db.Find(&users).Pluck("age", &ages) +func (s *DB) Pluck(column string, value interface{}) *DB { + return s.NewScope(s.Value).pluck(column, value).db +} + +// Count get how many records for a model +func (s *DB) Count(value interface{}) *DB { + return s.NewScope(s.Value).count(value).db +} + +// Related get related associations +func (s *DB) Related(value interface{}, foreignKeys ...string) *DB { + return s.clone().NewScope(s.Value).related(value, foreignKeys...).db +} + +// FirstOrInit find first matched record or initialize a new one with given conditions (only works with struct, map conditions) +// https://jinzhu.github.io/gorm/curd.html#firstorinit +func (s *DB) FirstOrInit(out interface{}, where ...interface{}) *DB { + c := s.clone() + if result := c.First(out, where...); result.Error != nil { + if !result.RecordNotFound() { + return result + } + c.NewScope(out).inlineCondition(where...).initialize() + } else { + c.NewScope(out).updatedAttrsWithValues(c.search.assignAttrs) + } + return c +} + +// FirstOrCreate find first matched record or create a new one with given conditions (only works with struct, map conditions) +// https://jinzhu.github.io/gorm/curd.html#firstorcreate +func (s *DB) FirstOrCreate(out interface{}, where ...interface{}) *DB { + c := s.clone() + if result := c.First(out, where...); result.Error != nil { + if !result.RecordNotFound() { + return result + } + c.AddError(c.NewScope(out).inlineCondition(where...).initialize().callCallbacks(c.parent.callbacks.creates).db.Error) + } else if len(c.search.assignAttrs) > 0 { + c.AddError(c.NewScope(out).InstanceSet("gorm:update_interface", c.search.assignAttrs).callCallbacks(c.parent.callbacks.updates).db.Error) + } + return c +} + +// Update update attributes with callbacks, refer: https://jinzhu.github.io/gorm/curd.html#update +func (s *DB) Update(attrs ...interface{}) *DB { + return s.Updates(toSearchableMap(attrs...), true) +} + +// Updates update attributes with callbacks, refer: https://jinzhu.github.io/gorm/curd.html#update +func (s *DB) Updates(values interface{}, ignoreProtectedAttrs ...bool) *DB { + return s.clone().NewScope(s.Value). + Set("gorm:ignore_protected_attrs", len(ignoreProtectedAttrs) > 0). + InstanceSet("gorm:update_interface", values). + callCallbacks(s.parent.callbacks.updates).db +} + +// UpdateColumn update attributes without callbacks, refer: https://jinzhu.github.io/gorm/curd.html#update +func (s *DB) UpdateColumn(attrs ...interface{}) *DB { + return s.UpdateColumns(toSearchableMap(attrs...)) +} + +// UpdateColumns update attributes without callbacks, refer: https://jinzhu.github.io/gorm/curd.html#update +func (s *DB) UpdateColumns(values interface{}) *DB { + return s.clone().NewScope(s.Value). + Set("gorm:update_column", true). + Set("gorm:save_associations", false). + InstanceSet("gorm:update_interface", values). + callCallbacks(s.parent.callbacks.updates).db +} + +// Save update value in database, if the value doesn't have primary key, will insert it +func (s *DB) Save(value interface{}) *DB { + scope := s.clone().NewScope(value) + if scope.PrimaryKeyZero() { + return scope.callCallbacks(s.parent.callbacks.creates).db + } + return scope.callCallbacks(s.parent.callbacks.updates).db +} + +// Create insert the value into database +func (s *DB) Create(value interface{}) *DB { + scope := s.clone().NewScope(value) + return scope.callCallbacks(s.parent.callbacks.creates).db +} + +// Delete delete value match given conditions, if the value has primary key, then will including the primary key as condition +func (s *DB) Delete(value interface{}, where ...interface{}) *DB { + return s.clone().NewScope(value).inlineCondition(where...).callCallbacks(s.parent.callbacks.deletes).db +} + +// Raw use raw sql as conditions, won't run it unless invoked by other methods +// db.Raw("SELECT name, age FROM users WHERE name = ?", 3).Scan(&result) +func (s *DB) Raw(sql string, values ...interface{}) *DB { + return s.clone().search.Raw(true).Where(sql, values...).db +} + +// Exec execute raw sql +func (s *DB) Exec(sql string, values ...interface{}) *DB { + scope := s.clone().NewScope(nil) + generatedSQL := scope.buildWhereCondition(map[string]interface{}{"query": sql, "args": values}) + generatedSQL = strings.TrimSuffix(strings.TrimPrefix(generatedSQL, "("), ")") + scope.Raw(generatedSQL) + return scope.Exec().db +} + +// Model specify the model you would like to run db operations +// // update all users's name to `hello` +// db.Model(&User{}).Update("name", "hello") +// // if user's primary key is non-blank, will use it as condition, then will only update the user's name to `hello` +// db.Model(&user).Update("name", "hello") +func (s *DB) Model(value interface{}) *DB { + c := s.clone() + c.Value = value + return c +} + +// Table specify the table you would like to run db operations +func (s *DB) Table(name string) *DB { + clone := s.clone() + clone.search.Table(name) + clone.Value = nil + return clone +} + +// Debug start debug mode +func (s *DB) Debug() *DB { + return s.clone().LogMode(true) +} + +// Begin begin a transaction +func (s *DB) Begin() *DB { + c := s.clone() + if db, ok := c.db.(sqlDb); ok { + tx, err := db.Begin() + c.db = interface{}(tx).(sqlCommon) + c.AddError(err) + } else { + c.AddError(ErrCantStartTransaction) + } + return c +} + +// Commit commit a transaction +func (s *DB) Commit() *DB { + if db, ok := s.db.(sqlTx); ok { + s.AddError(db.Commit()) + } else { + s.AddError(ErrInvalidTransaction) + } + return s +} + +// Rollback rollback a transaction +func (s *DB) Rollback() *DB { + if db, ok := s.db.(sqlTx); ok { + s.AddError(db.Rollback()) + } else { + s.AddError(ErrInvalidTransaction) + } + return s +} + +// NewRecord check if value's primary key is blank +func (s *DB) NewRecord(value interface{}) bool { + return s.clone().NewScope(value).PrimaryKeyZero() +} + +// RecordNotFound check if returning ErrRecordNotFound error +func (s *DB) RecordNotFound() bool { + for _, err := range s.GetErrors() { + if err == ErrRecordNotFound { + return true + } + } + return false +} + +// CreateTable create table for models +func (s *DB) CreateTable(models ...interface{}) *DB { + db := s.Unscoped() + for _, model := range models { + db = db.NewScope(model).createTable().db + } + return db +} + +// DropTable drop table for models +func (s *DB) DropTable(values ...interface{}) *DB { + db := s.clone() + for _, value := range values { + if tableName, ok := value.(string); ok { + db = db.Table(tableName) + } + + db = db.NewScope(value).dropTable().db + } + return db +} + +// DropTableIfExists drop table if it is exist +func (s *DB) DropTableIfExists(values ...interface{}) *DB { + db := s.clone() + for _, value := range values { + if s.HasTable(value) { + db.AddError(s.DropTable(value).Error) + } + } + return db +} + +// HasTable check has table or not +func (s *DB) HasTable(value interface{}) bool { + var ( + scope = s.clone().NewScope(value) + tableName string + ) + + if name, ok := value.(string); ok { + tableName = name + } else { + tableName = scope.TableName() + } + + has := scope.Dialect().HasTable(tableName) + s.AddError(scope.db.Error) + return has +} + +// AutoMigrate run auto migration for given models, will only add missing fields, won't delete/change current data +func (s *DB) AutoMigrate(values ...interface{}) *DB { + db := s.Unscoped() + for _, value := range values { + db = db.NewScope(value).autoMigrate().db + } + return db +} + +// ModifyColumn modify column to type +func (s *DB) ModifyColumn(column string, typ string) *DB { + scope := s.clone().NewScope(s.Value) + scope.modifyColumn(column, typ) + return scope.db +} + +// DropColumn drop a column +func (s *DB) DropColumn(column string) *DB { + scope := s.clone().NewScope(s.Value) + scope.dropColumn(column) + return scope.db +} + +// AddIndex add index for columns with given name +func (s *DB) AddIndex(indexName string, columns ...string) *DB { + scope := s.Unscoped().NewScope(s.Value) + scope.addIndex(false, indexName, columns...) + return scope.db +} + +// AddUniqueIndex add unique index for columns with given name +func (s *DB) AddUniqueIndex(indexName string, columns ...string) *DB { + scope := s.Unscoped().NewScope(s.Value) + scope.addIndex(true, indexName, columns...) + return scope.db +} + +// RemoveIndex remove index with name +func (s *DB) RemoveIndex(indexName string) *DB { + scope := s.clone().NewScope(s.Value) + scope.removeIndex(indexName) + return scope.db +} + +// AddForeignKey Add foreign key to the given scope, e.g: +// db.Model(&User{}).AddForeignKey("city_id", "cities(id)", "RESTRICT", "RESTRICT") +func (s *DB) AddForeignKey(field string, dest string, onDelete string, onUpdate string) *DB { + scope := s.clone().NewScope(s.Value) + scope.addForeignKey(field, dest, onDelete, onUpdate) + return scope.db +} + +// Association start `Association Mode` to handler relations things easir in that mode, refer: https://jinzhu.github.io/gorm/associations.html#association-mode +func (s *DB) Association(column string) *Association { + var err error + scope := s.clone().NewScope(s.Value) + + if primaryField := scope.PrimaryField(); primaryField.IsBlank { + err = errors.New("primary key can't be nil") + } else { + if field, ok := scope.FieldByName(column); ok { + if field.Relationship == nil || len(field.Relationship.ForeignFieldNames) == 0 { + err = fmt.Errorf("invalid association %v for %v", column, scope.IndirectValue().Type()) + } else { + return &Association{scope: scope, column: column, field: field} + } + } else { + err = fmt.Errorf("%v doesn't have column %v", scope.IndirectValue().Type(), column) + } + } + + return &Association{Error: err} +} + +// Preload preload associations with given conditions +// db.Preload("Orders", "state NOT IN (?)", "cancelled").Find(&users) +func (s *DB) Preload(column string, conditions ...interface{}) *DB { + return s.clone().search.Preload(column, conditions...).db +} + +// Set set setting by name, which could be used in callbacks, will clone a new db, and update its setting +func (s *DB) Set(name string, value interface{}) *DB { + return s.clone().InstantSet(name, value) +} + +// InstantSet instant set setting, will affect current db +func (s *DB) InstantSet(name string, value interface{}) *DB { + s.values[name] = value + return s +} + +// Get get setting by name +func (s *DB) Get(name string) (value interface{}, ok bool) { + value, ok = s.values[name] + return +} + +// SetJoinTableHandler set a model's join table handler for a relation +func (s *DB) SetJoinTableHandler(source interface{}, column string, handler JoinTableHandlerInterface) { + scope := s.NewScope(source) + for _, field := range scope.GetModelStruct().StructFields { + if field.Name == column || field.DBName == column { + if many2many := field.TagSettings["MANY2MANY"]; many2many != "" { + source := (&Scope{Value: source}).GetModelStruct().ModelType + destination := (&Scope{Value: reflect.New(field.Struct.Type).Interface()}).GetModelStruct().ModelType + handler.Setup(field.Relationship, many2many, source, destination) + field.Relationship.JoinTableHandler = handler + if table := handler.Table(s); scope.Dialect().HasTable(table) { + s.Table(table).AutoMigrate(handler) + } + } + } + } +} + +// AddError add error to the db +func (s *DB) AddError(err error) error { + if err != nil { + if err != ErrRecordNotFound { + if s.logMode == 0 { + go s.print(fileWithLineNum(), err) + } else { + s.log(err) + } + + errors := Errors{errors: s.GetErrors()} + errors.Add(err) + if len(errors.GetErrors()) > 1 { + err = errors + } + } + + s.Error = err + } + return err +} + +// GetErrors get happened errors from the db +func (s *DB) GetErrors() (errors []error) { + if errs, ok := s.Error.(errorsInterface); ok { + return errs.GetErrors() + } else if s.Error != nil { + return []error{s.Error} + } + return +} + +//////////////////////////////////////////////////////////////////////////////// +// Private Methods For *gorm.DB +//////////////////////////////////////////////////////////////////////////////// + +func (s *DB) clone() *DB { + db := DB{db: s.db, parent: s.parent, logger: s.logger, logMode: s.logMode, values: map[string]interface{}{}, Value: s.Value, Error: s.Error} + + for key, value := range s.values { + db.values[key] = value + } + + if s.search == nil { + db.search = &search{limit: -1, offset: -1} + } else { + db.search = s.search.clone() + } + + db.search.db = &db + return &db +} + +func (s *DB) print(v ...interface{}) { + s.logger.(logger).Print(v...) +} + +func (s *DB) log(v ...interface{}) { + if s != nil && s.logMode == 2 { + s.print(append([]interface{}{"log", fileWithLineNum()}, v...)...) + } +} + +func (s *DB) slog(sql string, t time.Time, vars ...interface{}) { + if s.logMode == 2 { + s.print("sql", fileWithLineNum(), NowFunc().Sub(t), sql, vars) + } +} diff --git a/vendor/github.com/jinzhu/gorm/main_test.go b/vendor/github.com/jinzhu/gorm/main_test.go new file mode 100644 index 0000000..8ac015c --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/main_test.go @@ -0,0 +1,774 @@ +package gorm_test + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "os" + "reflect" + "strconv" + "testing" + "time" + + "github.com/erikstmartin/go-testdb" + "github.com/jinzhu/gorm" + _ "github.com/jinzhu/gorm/dialects/mssql" + _ "github.com/jinzhu/gorm/dialects/mysql" + "github.com/jinzhu/gorm/dialects/postgres" + _ "github.com/jinzhu/gorm/dialects/sqlite" + "github.com/jinzhu/now" +) + +var ( + DB *gorm.DB + t1, t2, t3, t4, t5 time.Time +) + +func init() { + var err error + + if DB, err = OpenTestConnection(); err != nil { + panic(fmt.Sprintf("No error should happen when connecting to test database, but got err=%+v", err)) + } + + // DB.SetLogger(Logger{log.New(os.Stdout, "\r\n", 0)}) + // DB.SetLogger(log.New(os.Stdout, "\r\n", 0)) + if os.Getenv("DEBUG") == "true" { + DB.LogMode(true) + } + + DB.DB().SetMaxIdleConns(10) + + runMigration() +} + +func OpenTestConnection() (db *gorm.DB, err error) { + switch os.Getenv("GORM_DIALECT") { + case "mysql": + // CREATE USER 'gorm'@'localhost' IDENTIFIED BY 'gorm'; + // CREATE DATABASE gorm; + // GRANT ALL ON gorm.* TO 'gorm'@'localhost'; + fmt.Println("testing mysql...") + db, err = gorm.Open("mysql", "gorm:gorm@/gorm?charset=utf8&parseTime=True") + case "postgres": + fmt.Println("testing postgres...") + db, err = gorm.Open("postgres", "user=gorm DB.name=gorm sslmode=disable") + case "foundation": + fmt.Println("testing foundation...") + db, err = gorm.Open("foundation", "dbname=gorm port=15432 sslmode=disable") + case "mssql": + fmt.Println("testing mssql...") + db, err = gorm.Open("mssql", "server=SERVER_HERE;database=rogue;user id=USER_HERE;password=PW_HERE;port=1433") + default: + fmt.Println("testing sqlite3...") + db, err = gorm.Open("sqlite3", "/tmp/gorm.db") + } + return +} + +func TestStringPrimaryKey(t *testing.T) { + type UUIDStruct struct { + ID string `gorm:"primary_key"` + Name string + } + DB.AutoMigrate(&UUIDStruct{}) + + data := UUIDStruct{ID: "uuid", Name: "hello"} + if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" { + t.Errorf("string primary key should not be populated") + } +} + +func TestExceptionsWithInvalidSql(t *testing.T) { + var columns []string + if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + if DB.Model(&User{}).Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Find(&User{}).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + var count1, count2 int64 + DB.Model(&User{}).Count(&count1) + if count1 <= 0 { + t.Errorf("Should find some users") + } + + if DB.Where("name = ?", "jinzhu; delete * from users").First(&User{}).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + DB.Model(&User{}).Count(&count2) + if count1 != count2 { + t.Errorf("No user should not be deleted by invalid SQL") + } +} + +func TestSetTable(t *testing.T) { + DB.Create(getPreparedUser("pluck_user1", "pluck_user")) + DB.Create(getPreparedUser("pluck_user2", "pluck_user")) + DB.Create(getPreparedUser("pluck_user3", "pluck_user")) + + if err := DB.Table("users").Where("role = ?", "pluck_user").Pluck("age", &[]int{}).Error; err != nil { + t.Error("No errors should happen if set table for pluck", err) + } + + var users []User + if DB.Table("users").Find(&[]User{}).Error != nil { + t.Errorf("No errors should happen if set table for find") + } + + if DB.Table("invalid_table").Find(&users).Error == nil { + t.Errorf("Should got error when table is set to an invalid table") + } + + DB.Exec("drop table deleted_users;") + if DB.Table("deleted_users").CreateTable(&User{}).Error != nil { + t.Errorf("Create table with specified table") + } + + DB.Table("deleted_users").Save(&User{Name: "DeletedUser"}) + + var deletedUsers []User + DB.Table("deleted_users").Find(&deletedUsers) + if len(deletedUsers) != 1 { + t.Errorf("Query from specified table") + } + + DB.Save(getPreparedUser("normal_user", "reset_table")) + DB.Table("deleted_users").Save(getPreparedUser("deleted_user", "reset_table")) + var user1, user2, user3 User + DB.Where("role = ?", "reset_table").First(&user1).Table("deleted_users").First(&user2).Table("").First(&user3) + if (user1.Name != "normal_user") || (user2.Name != "deleted_user") || (user3.Name != "normal_user") { + t.Errorf("unset specified table with blank string") + } +} + +type Order struct { +} + +type Cart struct { +} + +func (c Cart) TableName() string { + return "shopping_cart" +} + +func TestHasTable(t *testing.T) { + type Foo struct { + Id int + Stuff string + } + DB.DropTable(&Foo{}) + + // Table should not exist at this point, HasTable should return false + if ok := DB.HasTable("foos"); ok { + t.Errorf("Table should not exist, but does") + } + if ok := DB.HasTable(&Foo{}); ok { + t.Errorf("Table should not exist, but does") + } + + // We create the table + if err := DB.CreateTable(&Foo{}).Error; err != nil { + t.Errorf("Table should be created") + } + + // And now it should exits, and HasTable should return true + if ok := DB.HasTable("foos"); !ok { + t.Errorf("Table should exist, but HasTable informs it does not") + } + if ok := DB.HasTable(&Foo{}); !ok { + t.Errorf("Table should exist, but HasTable informs it does not") + } +} + +func TestTableName(t *testing.T) { + DB := DB.Model("") + if DB.NewScope(Order{}).TableName() != "orders" { + t.Errorf("Order's table name should be orders") + } + + if DB.NewScope(&Order{}).TableName() != "orders" { + t.Errorf("&Order's table name should be orders") + } + + if DB.NewScope([]Order{}).TableName() != "orders" { + t.Errorf("[]Order's table name should be orders") + } + + if DB.NewScope(&[]Order{}).TableName() != "orders" { + t.Errorf("&[]Order's table name should be orders") + } + + DB.SingularTable(true) + if DB.NewScope(Order{}).TableName() != "order" { + t.Errorf("Order's singular table name should be order") + } + + if DB.NewScope(&Order{}).TableName() != "order" { + t.Errorf("&Order's singular table name should be order") + } + + if DB.NewScope([]Order{}).TableName() != "order" { + t.Errorf("[]Order's singular table name should be order") + } + + if DB.NewScope(&[]Order{}).TableName() != "order" { + t.Errorf("&[]Order's singular table name should be order") + } + + if DB.NewScope(&Cart{}).TableName() != "shopping_cart" { + t.Errorf("&Cart's singular table name should be shopping_cart") + } + + if DB.NewScope(Cart{}).TableName() != "shopping_cart" { + t.Errorf("Cart's singular table name should be shopping_cart") + } + + if DB.NewScope(&[]Cart{}).TableName() != "shopping_cart" { + t.Errorf("&[]Cart's singular table name should be shopping_cart") + } + + if DB.NewScope([]Cart{}).TableName() != "shopping_cart" { + t.Errorf("[]Cart's singular table name should be shopping_cart") + } + DB.SingularTable(false) +} + +func TestNullValues(t *testing.T) { + DB.DropTable(&NullValue{}) + DB.AutoMigrate(&NullValue{}) + + if err := DB.Save(&NullValue{ + Name: sql.NullString{String: "hello", Valid: true}, + Gender: &sql.NullString{String: "M", Valid: true}, + Age: sql.NullInt64{Int64: 18, Valid: true}, + Male: sql.NullBool{Bool: true, Valid: true}, + Height: sql.NullFloat64{Float64: 100.11, Valid: true}, + AddedAt: NullTime{Time: time.Now(), Valid: true}, + }).Error; err != nil { + t.Errorf("Not error should raise when test null value") + } + + var nv NullValue + DB.First(&nv, "name = ?", "hello") + + if nv.Name.String != "hello" || nv.Gender.String != "M" || nv.Age.Int64 != 18 || nv.Male.Bool != true || nv.Height.Float64 != 100.11 || nv.AddedAt.Valid != true { + t.Errorf("Should be able to fetch null value") + } + + if err := DB.Save(&NullValue{ + Name: sql.NullString{String: "hello-2", Valid: true}, + Gender: &sql.NullString{String: "F", Valid: true}, + Age: sql.NullInt64{Int64: 18, Valid: false}, + Male: sql.NullBool{Bool: true, Valid: true}, + Height: sql.NullFloat64{Float64: 100.11, Valid: true}, + AddedAt: NullTime{Time: time.Now(), Valid: false}, + }).Error; err != nil { + t.Errorf("Not error should raise when test null value") + } + + var nv2 NullValue + DB.First(&nv2, "name = ?", "hello-2") + if nv2.Name.String != "hello-2" || nv2.Gender.String != "F" || nv2.Age.Int64 != 0 || nv2.Male.Bool != true || nv2.Height.Float64 != 100.11 || nv2.AddedAt.Valid != false { + t.Errorf("Should be able to fetch null value") + } + + if err := DB.Save(&NullValue{ + Name: sql.NullString{String: "hello-3", Valid: false}, + Gender: &sql.NullString{String: "M", Valid: true}, + Age: sql.NullInt64{Int64: 18, Valid: false}, + Male: sql.NullBool{Bool: true, Valid: true}, + Height: sql.NullFloat64{Float64: 100.11, Valid: true}, + AddedAt: NullTime{Time: time.Now(), Valid: false}, + }).Error; err == nil { + t.Errorf("Can't save because of name can't be null") + } +} + +func TestNullValuesWithFirstOrCreate(t *testing.T) { + var nv1 = NullValue{ + Name: sql.NullString{String: "first_or_create", Valid: true}, + Gender: &sql.NullString{String: "M", Valid: true}, + } + + var nv2 NullValue + if err := DB.Where(nv1).FirstOrCreate(&nv2).Error; err != nil { + t.Errorf("Should not raise any error, but got %v", err) + } + + if nv2.Name.String != "first_or_create" || nv2.Gender.String != "M" { + t.Errorf("first or create with nullvalues") + } + + if err := DB.Where(nv1).Assign(NullValue{Age: sql.NullInt64{Int64: 18, Valid: true}}).FirstOrCreate(&nv2).Error; err != nil { + t.Errorf("Should not raise any error, but got %v", err) + } + + if nv2.Age.Int64 != 18 { + t.Errorf("should update age to 18") + } +} + +func TestTransaction(t *testing.T) { + tx := DB.Begin() + u := User{Name: "transcation"} + if err := tx.Save(&u).Error; err != nil { + t.Errorf("No error should raise") + } + + if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil { + t.Errorf("Should find saved record") + } + + if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil { + t.Errorf("Should return the underlying sql.Tx") + } + + tx.Rollback() + + if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil { + t.Errorf("Should not find record after rollback") + } + + tx2 := DB.Begin() + u2 := User{Name: "transcation-2"} + if err := tx2.Save(&u2).Error; err != nil { + t.Errorf("No error should raise") + } + + if err := tx2.First(&User{}, "name = ?", "transcation-2").Error; err != nil { + t.Errorf("Should find saved record") + } + + tx2.Commit() + + if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil { + t.Errorf("Should be able to find committed record") + } +} + +func TestRow(t *testing.T) { + user1 := User{Name: "RowUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "RowUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "RowUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + row := DB.Table("users").Where("name = ?", user2.Name).Select("age").Row() + var age int64 + row.Scan(&age) + if age != 10 { + t.Errorf("Scan with Row") + } +} + +func TestRows(t *testing.T) { + user1 := User{Name: "RowsUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "RowsUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "RowsUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows() + if err != nil { + t.Errorf("Not error should happen, got %v", err) + } + + count := 0 + for rows.Next() { + var name string + var age int64 + rows.Scan(&name, &age) + count++ + } + + if count != 2 { + t.Errorf("Should found two records") + } +} + +func TestScanRows(t *testing.T) { + user1 := User{Name: "ScanRowsUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "ScanRowsUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "ScanRowsUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows() + if err != nil { + t.Errorf("Not error should happen, got %v", err) + } + + type Result struct { + Name string + Age int + } + + var results []Result + for rows.Next() { + var result Result + if err := DB.ScanRows(rows, &result); err != nil { + t.Errorf("should get no error, but got %v", err) + } + results = append(results, result) + } + + if !reflect.DeepEqual(results, []Result{{Name: "ScanRowsUser2", Age: 10}, {Name: "ScanRowsUser3", Age: 20}}) { + t.Errorf("Should find expected results") + } +} + +func TestScan(t *testing.T) { + user1 := User{Name: "ScanUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "ScanUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "ScanUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + type result struct { + Name string + Age int + } + + var res result + DB.Table("users").Select("name, age").Where("name = ?", user3.Name).Scan(&res) + if res.Name != user3.Name { + t.Errorf("Scan into struct should work") + } + + var doubleAgeRes result + DB.Table("users").Select("age + age as age").Where("name = ?", user3.Name).Scan(&doubleAgeRes) + if doubleAgeRes.Age != res.Age*2 { + t.Errorf("Scan double age as age") + } + + var ress []result + DB.Table("users").Select("name, age").Where("name in (?)", []string{user2.Name, user3.Name}).Scan(&ress) + if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name { + t.Errorf("Scan into struct map") + } +} + +func TestRaw(t *testing.T) { + user1 := User{Name: "ExecRawSqlUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "ExecRawSqlUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "ExecRawSqlUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + type result struct { + Name string + Email string + } + + var ress []result + DB.Raw("SELECT name, age FROM users WHERE name = ? or name = ?", user2.Name, user3.Name).Scan(&ress) + if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name { + t.Errorf("Raw with scan") + } + + rows, _ := DB.Raw("select name, age from users where name = ?", user3.Name).Rows() + count := 0 + for rows.Next() { + count++ + } + if count != 1 { + t.Errorf("Raw with Rows should find one record with name 3") + } + + DB.Exec("update users set name=? where name in (?)", "jinzhu", []string{user1.Name, user2.Name, user3.Name}) + if DB.Where("name in (?)", []string{user1.Name, user2.Name, user3.Name}).First(&User{}).Error != gorm.ErrRecordNotFound { + t.Error("Raw sql to update records") + } +} + +func TestGroup(t *testing.T) { + rows, err := DB.Select("name").Table("users").Group("name").Rows() + + if err == nil { + defer rows.Close() + for rows.Next() { + var name string + rows.Scan(&name) + } + } else { + t.Errorf("Should not raise any error") + } +} + +func TestJoins(t *testing.T) { + var user = User{ + Name: "joins", + CreditCard: CreditCard{Number: "411111111111"}, + Emails: []Email{{Email: "join1@example.com"}, {Email: "join2@example.com"}}, + } + DB.Save(&user) + + var users1 []User + DB.Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins").Find(&users1) + if len(users1) != 2 { + t.Errorf("should find two users using left join") + } + + var users2 []User + DB.Joins("left join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Where("name = ?", "joins").First(&users2) + if len(users2) != 1 { + t.Errorf("should find one users using left join with conditions") + } + + var users3 []User + DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where("name = ?", "joins").First(&users3) + if len(users3) != 1 { + t.Errorf("should find one users using multiple left join conditions") + } + + var users4 []User + DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "422222222222").Where("name = ?", "joins").First(&users4) + if len(users4) != 0 { + t.Errorf("should find no user when searching with unexisting credit card") + } +} + +func TestJoinsWithSelect(t *testing.T) { + type result struct { + Name string + Email string + } + + user := User{ + Name: "joins_with_select", + Emails: []Email{{Email: "join1@example.com"}, {Email: "join2@example.com"}}, + } + DB.Save(&user) + + var results []result + DB.Table("users").Select("name, emails.email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins_with_select").Scan(&results) + if len(results) != 2 || results[0].Email != "join1@example.com" || results[1].Email != "join2@example.com" { + t.Errorf("Should find all two emails with Join select") + } +} + +func TestHaving(t *testing.T) { + rows, err := DB.Select("name, count(*) as total").Table("users").Group("name").Having("name IN (?)", []string{"2", "3"}).Rows() + + if err == nil { + defer rows.Close() + for rows.Next() { + var name string + var total int64 + rows.Scan(&name, &total) + + if name == "2" && total != 1 { + t.Errorf("Should have one user having name 2") + } + if name == "3" && total != 2 { + t.Errorf("Should have two users having name 3") + } + } + } else { + t.Errorf("Should not raise any error") + } +} + +func DialectHasTzSupport() bool { + // NB: mssql and FoundationDB do not support time zones. + if dialect := os.Getenv("GORM_DIALECT"); dialect == "mssql" || dialect == "foundation" { + return false + } + return true +} + +func TestTimeWithZone(t *testing.T) { + var format = "2006-01-02 15:04:05 -0700" + var times []time.Time + GMT8, _ := time.LoadLocation("Asia/Shanghai") + times = append(times, time.Date(2013, 02, 19, 1, 51, 49, 123456789, GMT8)) + times = append(times, time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.UTC)) + + for index, vtime := range times { + name := "time_with_zone_" + strconv.Itoa(index) + user := User{Name: name, Birthday: vtime} + + if !DialectHasTzSupport() { + // If our driver dialect doesn't support TZ's, just use UTC for everything here. + user.Birthday = vtime.UTC() + } + + DB.Save(&user) + expectedBirthday := "2013-02-18 17:51:49 +0000" + foundBirthday := user.Birthday.UTC().Format(format) + if foundBirthday != expectedBirthday { + t.Errorf("User's birthday should not be changed after save for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday) + } + + var findUser, findUser2, findUser3 User + DB.First(&findUser, "name = ?", name) + foundBirthday = findUser.Birthday.UTC().Format(format) + if foundBirthday != expectedBirthday { + t.Errorf("User's birthday should not be changed after find for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday) + } + + if DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(-time.Minute)).First(&findUser2).RecordNotFound() { + t.Errorf("User should be found") + } + + if !DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(time.Minute)).First(&findUser3).RecordNotFound() { + t.Errorf("User should not be found") + } + } +} + +func TestHstore(t *testing.T) { + type Details struct { + Id int64 + Bulk postgres.Hstore + } + + if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" { + t.Skip() + } + + if err := DB.Exec("CREATE EXTENSION IF NOT EXISTS hstore").Error; err != nil { + fmt.Println("\033[31mHINT: Must be superuser to create hstore extension (ALTER USER gorm WITH SUPERUSER;)\033[0m") + panic(fmt.Sprintf("No error should happen when create hstore extension, but got %+v", err)) + } + + DB.Exec("drop table details") + + if err := DB.CreateTable(&Details{}).Error; err != nil { + panic(fmt.Sprintf("No error should happen when create table, but got %+v", err)) + } + + bankAccountId, phoneNumber, opinion := "123456", "14151321232", "sharkbait" + bulk := map[string]*string{ + "bankAccountId": &bankAccountId, + "phoneNumber": &phoneNumber, + "opinion": &opinion, + } + d := Details{Bulk: bulk} + DB.Save(&d) + + var d2 Details + if err := DB.First(&d2).Error; err != nil { + t.Errorf("Got error when tried to fetch details: %+v", err) + } + + for k := range bulk { + if r, ok := d2.Bulk[k]; ok { + if res, _ := bulk[k]; *res != *r { + t.Errorf("Details should be equal") + } + } else { + t.Errorf("Details should be existed") + } + } +} + +func TestSetAndGet(t *testing.T) { + if value, ok := DB.Set("hello", "world").Get("hello"); !ok { + t.Errorf("Should be able to get setting after set") + } else { + if value.(string) != "world" { + t.Errorf("Setted value should not be changed") + } + } + + if _, ok := DB.Get("non_existing"); ok { + t.Errorf("Get non existing key should return error") + } +} + +func TestCompatibilityMode(t *testing.T) { + DB, _ := gorm.Open("testdb", "") + testdb.SetQueryFunc(func(query string) (driver.Rows, error) { + columns := []string{"id", "name", "age"} + result := ` + 1,Tim,20 + 2,Joe,25 + 3,Bob,30 + ` + return testdb.RowsFromCSVString(columns, result), nil + }) + + var users []User + DB.Find(&users) + if (users[0].Name != "Tim") || len(users) != 3 { + t.Errorf("Unexcepted result returned") + } +} + +func TestOpenExistingDB(t *testing.T) { + DB.Save(&User{Name: "jnfeinstein"}) + dialect := os.Getenv("GORM_DIALECT") + + db, err := gorm.Open(dialect, DB.DB()) + if err != nil { + t.Errorf("Should have wrapped the existing DB connection") + } + + var user User + if db.Where("name = ?", "jnfeinstein").First(&user).Error == gorm.ErrRecordNotFound { + t.Errorf("Should have found existing record") + } +} + +func TestDdlErrors(t *testing.T) { + var err error + + if err = DB.Close(); err != nil { + t.Errorf("Closing DDL test db connection err=%s", err) + } + defer func() { + // Reopen DB connection. + if DB, err = OpenTestConnection(); err != nil { + t.Fatalf("Failed re-opening db connection: %s", err) + } + }() + + if err := DB.Find(&User{}).Error; err == nil { + t.Errorf("Expected operation on closed db to produce an error, but err was nil") + } +} + +func BenchmarkGorm(b *testing.B) { + b.N = 2000 + for x := 0; x < b.N; x++ { + e := strconv.Itoa(x) + "benchmark@example.org" + email := BigEmail{Email: e, UserAgent: "pc", RegisteredAt: time.Now()} + // Insert + DB.Save(&email) + // Query + DB.First(&BigEmail{}, "email = ?", e) + // Update + DB.Model(&email).UpdateColumn("email", "new-"+e) + // Delete + DB.Delete(&email) + } +} + +func BenchmarkRawSql(b *testing.B) { + DB, _ := sql.Open("postgres", "user=gorm DB.ame=gorm sslmode=disable") + DB.SetMaxIdleConns(10) + insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id" + querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1" + updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3" + deleteSql := "DELETE FROM orders WHERE id = $1" + + b.N = 2000 + for x := 0; x < b.N; x++ { + var id int64 + e := strconv.Itoa(x) + "benchmark@example.org" + email := BigEmail{Email: e, UserAgent: "pc", RegisteredAt: time.Now()} + // Insert + DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id) + // Query + rows, _ := DB.Query(querySql, email.Email) + rows.Close() + // Update + DB.Exec(updateSql, "new-"+e, time.Now(), id) + // Delete + DB.Exec(deleteSql, id) + } +} diff --git a/vendor/github.com/jinzhu/gorm/migration_test.go b/vendor/github.com/jinzhu/gorm/migration_test.go new file mode 100644 index 0000000..38e5c1c --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/migration_test.go @@ -0,0 +1,349 @@ +package gorm_test + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "reflect" + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +type User struct { + Id int64 + Age int64 + UserNum Num + Name string `sql:"size:255"` + Email string + Birthday time.Time // Time + CreatedAt time.Time // CreatedAt: Time of record is created, will be insert automatically + UpdatedAt time.Time // UpdatedAt: Time of record is updated, will be updated automatically + Emails []Email // Embedded structs + BillingAddress Address // Embedded struct + BillingAddressID sql.NullInt64 // Embedded struct's foreign key + ShippingAddress Address // Embedded struct + ShippingAddressId int64 // Embedded struct's foreign key + CreditCard CreditCard + Latitude float64 + Languages []Language `gorm:"many2many:user_languages;"` + CompanyID *int + Company Company + Role + PasswordHash []byte + IgnoreMe int64 `sql:"-"` + IgnoreStringSlice []string `sql:"-"` + Ignored struct{ Name string } `sql:"-"` + IgnoredPointer *User `sql:"-"` +} + +type CreditCard struct { + ID int8 + Number string + UserId sql.NullInt64 + CreatedAt time.Time `sql:"not null"` + UpdatedAt time.Time + DeletedAt *time.Time +} + +type Email struct { + Id int16 + UserId int + Email string `sql:"type:varchar(100);"` + CreatedAt time.Time + UpdatedAt time.Time +} + +type Address struct { + ID int + Address1 string + Address2 string + Post string + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time +} + +type Language struct { + gorm.Model + Name string + Users []User `gorm:"many2many:user_languages;"` +} + +type Product struct { + Id int64 + Code string + Price int64 + CreatedAt time.Time + UpdatedAt time.Time + AfterFindCallTimes int64 + BeforeCreateCallTimes int64 + AfterCreateCallTimes int64 + BeforeUpdateCallTimes int64 + AfterUpdateCallTimes int64 + BeforeSaveCallTimes int64 + AfterSaveCallTimes int64 + BeforeDeleteCallTimes int64 + AfterDeleteCallTimes int64 +} + +type Company struct { + Id int64 + Name string + Owner *User `sql:"-"` +} + +type Role struct { + Name string +} + +func (role *Role) Scan(value interface{}) error { + if b, ok := value.([]uint8); ok { + role.Name = string(b) + } else { + role.Name = value.(string) + } + return nil +} + +func (role Role) Value() (driver.Value, error) { + return role.Name, nil +} + +func (role Role) IsAdmin() bool { + return role.Name == "admin" +} + +type Num int64 + +func (i *Num) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + case int64: + *i = Num(s) + default: + return errors.New("Cannot scan NamedInt from " + reflect.ValueOf(src).String()) + } + return nil +} + +type Animal struct { + Counter uint64 `gorm:"primary_key:yes"` + Name string `sql:"DEFAULT:'galeone'"` + From string //test reserved sql keyword as field name + Age time.Time `sql:"DEFAULT:current_timestamp"` + unexported string // unexported value + CreatedAt time.Time + UpdatedAt time.Time +} + +type JoinTable struct { + From uint64 + To uint64 + Time time.Time `sql:"default: null"` +} + +type Post struct { + Id int64 + CategoryId sql.NullInt64 + MainCategoryId int64 + Title string + Body string + Comments []*Comment + Category Category + MainCategory Category +} + +type Category struct { + gorm.Model + Name string +} + +type Comment struct { + gorm.Model + PostId int64 + Content string + Post Post +} + +// Scanner +type NullValue struct { + Id int64 + Name sql.NullString `sql:"not null"` + Gender *sql.NullString `sql:"not null"` + Age sql.NullInt64 + Male sql.NullBool + Height sql.NullFloat64 + AddedAt NullTime +} + +type NullTime struct { + Time time.Time + Valid bool +} + +func (nt *NullTime) Scan(value interface{}) error { + if value == nil { + nt.Valid = false + return nil + } + nt.Time, nt.Valid = value.(time.Time), true + return nil +} + +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func getPreparedUser(name string, role string) *User { + var company Company + DB.Where(Company{Name: role}).FirstOrCreate(&company) + + return &User{ + Name: name, + Age: 20, + Role: Role{role}, + BillingAddress: Address{Address1: fmt.Sprintf("Billing Address %v", name)}, + ShippingAddress: Address{Address1: fmt.Sprintf("Shipping Address %v", name)}, + CreditCard: CreditCard{Number: fmt.Sprintf("123456%v", name)}, + Emails: []Email{ + {Email: fmt.Sprintf("user_%v@example1.com", name)}, {Email: fmt.Sprintf("user_%v@example2.com", name)}, + }, + Company: company, + Languages: []Language{ + {Name: fmt.Sprintf("lang_1_%v", name)}, + {Name: fmt.Sprintf("lang_2_%v", name)}, + }, + } +} + +func runMigration() { + if err := DB.DropTableIfExists(&User{}).Error; err != nil { + fmt.Printf("Got error when try to delete table users, %+v\n", err) + } + + for _, table := range []string{"animals", "user_languages"} { + DB.Exec(fmt.Sprintf("drop table %v;", table)) + } + + values := []interface{}{&Product{}, &Email{}, &Address{}, &CreditCard{}, &Company{}, &Role{}, &Language{}, &HNPost{}, &EngadgetPost{}, &Animal{}, &User{}, &JoinTable{}, &Post{}, &Category{}, &Comment{}, &Cat{}, &Dog{}, &Toy{}} + for _, value := range values { + DB.DropTable(value) + } + + if err := DB.AutoMigrate(values...).Error; err != nil { + panic(fmt.Sprintf("No error should happen when create table, but got %+v", err)) + } +} + +func TestIndexes(t *testing.T) { + if err := DB.Model(&Email{}).AddIndex("idx_email_email", "email").Error; err != nil { + t.Errorf("Got error when tried to create index: %+v", err) + } + + scope := DB.NewScope(&Email{}) + if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email") { + t.Errorf("Email should have index idx_email_email") + } + + if err := DB.Model(&Email{}).RemoveIndex("idx_email_email").Error; err != nil { + t.Errorf("Got error when tried to remove index: %+v", err) + } + + if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email") { + t.Errorf("Email's index idx_email_email should be deleted") + } + + if err := DB.Model(&Email{}).AddIndex("idx_email_email_and_user_id", "user_id", "email").Error; err != nil { + t.Errorf("Got error when tried to create index: %+v", err) + } + + if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email should have index idx_email_email_and_user_id") + } + + if err := DB.Model(&Email{}).RemoveIndex("idx_email_email_and_user_id").Error; err != nil { + t.Errorf("Got error when tried to remove index: %+v", err) + } + + if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email's index idx_email_email_and_user_id should be deleted") + } + + if err := DB.Model(&Email{}).AddUniqueIndex("idx_email_email_and_user_id", "user_id", "email").Error; err != nil { + t.Errorf("Got error when tried to create index: %+v", err) + } + + if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email should have index idx_email_email_and_user_id") + } + + if DB.Save(&User{Name: "unique_indexes", Emails: []Email{{Email: "user1@example.comiii"}, {Email: "user1@example.com"}, {Email: "user1@example.com"}}}).Error == nil { + t.Errorf("Should get to create duplicate record when having unique index") + } + + var user = User{Name: "sample_user"} + DB.Save(&user) + if DB.Model(&user).Association("Emails").Append(Email{Email: "not-1duplicated@gmail.com"}, Email{Email: "not-duplicated2@gmail.com"}).Error != nil { + t.Errorf("Should get no error when append two emails for user") + } + + if DB.Model(&user).Association("Emails").Append(Email{Email: "duplicated@gmail.com"}, Email{Email: "duplicated@gmail.com"}).Error == nil { + t.Errorf("Should get no duplicated email error when insert duplicated emails for a user") + } + + if err := DB.Model(&Email{}).RemoveIndex("idx_email_email_and_user_id").Error; err != nil { + t.Errorf("Got error when tried to remove index: %+v", err) + } + + if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email's index idx_email_email_and_user_id should be deleted") + } + + if DB.Save(&User{Name: "unique_indexes", Emails: []Email{{Email: "user1@example.com"}, {Email: "user1@example.com"}}}).Error != nil { + t.Errorf("Should be able to create duplicated emails after remove unique index") + } +} + +type BigEmail struct { + Id int64 + UserId int64 + Email string `sql:"index:idx_email_agent"` + UserAgent string `sql:"index:idx_email_agent"` + RegisteredAt time.Time `sql:"unique_index"` + CreatedAt time.Time + UpdatedAt time.Time +} + +func (b BigEmail) TableName() string { + return "emails" +} + +func TestAutoMigration(t *testing.T) { + DB.AutoMigrate(&Address{}) + if err := DB.Table("emails").AutoMigrate(&BigEmail{}).Error; err != nil { + t.Errorf("Auto Migrate should not raise any error") + } + + DB.Save(&BigEmail{Email: "jinzhu@example.org", UserAgent: "pc", RegisteredAt: time.Now()}) + + scope := DB.NewScope(&BigEmail{}) + if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_agent") { + t.Errorf("Failed to create index") + } + + if !scope.Dialect().HasIndex(scope.TableName(), "uix_emails_registered_at") { + t.Errorf("Failed to create index") + } + + var bigemail BigEmail + DB.First(&bigemail, "user_agent = ?", "pc") + if bigemail.Email != "jinzhu@example.org" || bigemail.UserAgent != "pc" || bigemail.RegisteredAt.IsZero() { + t.Error("Big Emails should be saved and fetched correctly") + } +} diff --git a/vendor/github.com/jinzhu/gorm/model.go b/vendor/github.com/jinzhu/gorm/model.go new file mode 100644 index 0000000..f37ff7e --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/model.go @@ -0,0 +1,14 @@ +package gorm + +import "time" + +// Model base model definition, including fields `ID`, `CreatedAt`, `UpdatedAt`, `DeletedAt`, which could be embedded in your models +// type User struct { +// gorm.Model +// } +type Model struct { + ID uint `gorm:"primary_key"` + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time `sql:"index"` +} diff --git a/vendor/github.com/jinzhu/gorm/model_struct.go b/vendor/github.com/jinzhu/gorm/model_struct.go new file mode 100644 index 0000000..6df615d --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/model_struct.go @@ -0,0 +1,542 @@ +package gorm + +import ( + "database/sql" + "errors" + "go/ast" + "reflect" + "strings" + "sync" + "time" + + "github.com/jinzhu/inflection" +) + +// DefaultTableNameHandler default table name handler +var DefaultTableNameHandler = func(db *DB, defaultTableName string) string { + return defaultTableName +} + +type safeModelStructsMap struct { + m map[reflect.Type]*ModelStruct + l *sync.RWMutex +} + +func (s *safeModelStructsMap) Set(key reflect.Type, value *ModelStruct) { + s.l.Lock() + defer s.l.Unlock() + s.m[key] = value +} + +func (s *safeModelStructsMap) Get(key reflect.Type) *ModelStruct { + s.l.RLock() + defer s.l.RUnlock() + return s.m[key] +} + +func newModelStructsMap() *safeModelStructsMap { + return &safeModelStructsMap{l: new(sync.RWMutex), m: make(map[reflect.Type]*ModelStruct)} +} + +var modelStructsMap = newModelStructsMap() + +// ModelStruct model definition +type ModelStruct struct { + PrimaryFields []*StructField + StructFields []*StructField + ModelType reflect.Type + defaultTableName string +} + +// TableName get model's table name +func (s *ModelStruct) TableName(db *DB) string { + return DefaultTableNameHandler(db, s.defaultTableName) +} + +// StructField model field's struct definition +type StructField struct { + DBName string + Name string + Names []string + IsPrimaryKey bool + IsNormal bool + IsIgnored bool + IsScanner bool + HasDefaultValue bool + Tag reflect.StructTag + TagSettings map[string]string + Struct reflect.StructField + IsForeignKey bool + Relationship *Relationship +} + +func (structField *StructField) clone() *StructField { + return &StructField{ + DBName: structField.DBName, + Name: structField.Name, + Names: structField.Names, + IsPrimaryKey: structField.IsPrimaryKey, + IsNormal: structField.IsNormal, + IsIgnored: structField.IsIgnored, + IsScanner: structField.IsScanner, + HasDefaultValue: structField.HasDefaultValue, + Tag: structField.Tag, + TagSettings: structField.TagSettings, + Struct: structField.Struct, + IsForeignKey: structField.IsForeignKey, + Relationship: structField.Relationship, + } +} + +// Relationship described the relationship between models +type Relationship struct { + Kind string + PolymorphicType string + PolymorphicDBName string + ForeignFieldNames []string + ForeignDBNames []string + AssociationForeignFieldNames []string + AssociationForeignDBNames []string + JoinTableHandler JoinTableHandlerInterface +} + +func getForeignField(column string, fields []*StructField) *StructField { + for _, field := range fields { + if field.Name == column || field.DBName == column || field.DBName == ToDBName(column) { + return field + } + } + return nil +} + +// GetModelStruct get value's model struct, relationships based on struct and tag definition +func (scope *Scope) GetModelStruct() *ModelStruct { + var modelStruct ModelStruct + // Scope value can't be nil + if scope.Value == nil { + return &modelStruct + } + + reflectType := reflect.ValueOf(scope.Value).Type() + for reflectType.Kind() == reflect.Slice || reflectType.Kind() == reflect.Ptr { + reflectType = reflectType.Elem() + } + + // Scope value need to be a struct + if reflectType.Kind() != reflect.Struct { + return &modelStruct + } + + // Get Cached model struct + if value := modelStructsMap.Get(reflectType); value != nil { + return value + } + + modelStruct.ModelType = reflectType + + // Set default table name + if tabler, ok := reflect.New(reflectType).Interface().(tabler); ok { + modelStruct.defaultTableName = tabler.TableName() + } else { + tableName := ToDBName(reflectType.Name()) + if scope.db == nil || !scope.db.parent.singularTable { + tableName = inflection.Plural(tableName) + } + modelStruct.defaultTableName = tableName + } + + // Get all fields + for i := 0; i < reflectType.NumField(); i++ { + if fieldStruct := reflectType.Field(i); ast.IsExported(fieldStruct.Name) { + field := &StructField{ + Struct: fieldStruct, + Name: fieldStruct.Name, + Names: []string{fieldStruct.Name}, + Tag: fieldStruct.Tag, + TagSettings: parseTagSetting(fieldStruct.Tag), + } + + // is ignored field + if fieldStruct.Tag.Get("sql") == "-" { + field.IsIgnored = true + } else { + if _, ok := field.TagSettings["PRIMARY_KEY"]; ok { + field.IsPrimaryKey = true + modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field) + } + + if _, ok := field.TagSettings["DEFAULT"]; ok { + field.HasDefaultValue = true + } + + indirectType := fieldStruct.Type + for indirectType.Kind() == reflect.Ptr { + indirectType = indirectType.Elem() + } + + fieldValue := reflect.New(indirectType).Interface() + if _, isScanner := fieldValue.(sql.Scanner); isScanner { + // is scanner + field.IsScanner, field.IsNormal = true, true + } else if _, isTime := fieldValue.(*time.Time); isTime { + // is time + field.IsNormal = true + } else if _, ok := field.TagSettings["EMBEDDED"]; ok || fieldStruct.Anonymous { + // is embedded struct + for _, subField := range scope.New(fieldValue).GetStructFields() { + subField = subField.clone() + subField.Names = append([]string{fieldStruct.Name}, subField.Names...) + if subField.IsPrimaryKey { + modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, subField) + } + modelStruct.StructFields = append(modelStruct.StructFields, subField) + } + continue + } else { + // build relationships + switch indirectType.Kind() { + case reflect.Slice: + defer func(field *StructField) { + var ( + relationship = &Relationship{} + toScope = scope.New(reflect.New(field.Struct.Type).Interface()) + foreignKeys []string + associationForeignKeys []string + elemType = field.Struct.Type + ) + + if foreignKey := field.TagSettings["FOREIGNKEY"]; foreignKey != "" { + foreignKeys = strings.Split(field.TagSettings["FOREIGNKEY"], ",") + } + + if foreignKey := field.TagSettings["ASSOCIATIONFOREIGNKEY"]; foreignKey != "" { + associationForeignKeys = strings.Split(field.TagSettings["ASSOCIATIONFOREIGNKEY"], ",") + } + + for elemType.Kind() == reflect.Slice || elemType.Kind() == reflect.Ptr { + elemType = elemType.Elem() + } + + if elemType.Kind() == reflect.Struct { + if many2many := field.TagSettings["MANY2MANY"]; many2many != "" { + relationship.Kind = "many_to_many" + + // if no foreign keys defined with tag + if len(foreignKeys) == 0 { + for _, field := range modelStruct.PrimaryFields { + foreignKeys = append(foreignKeys, field.DBName) + } + } + + for _, foreignKey := range foreignKeys { + if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil { + // source foreign keys (db names) + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.DBName) + // join table foreign keys for source + joinTableDBName := ToDBName(reflectType.Name()) + "_" + foreignField.DBName + relationship.ForeignDBNames = append(relationship.ForeignDBNames, joinTableDBName) + } + } + + // if no association foreign keys defined with tag + if len(associationForeignKeys) == 0 { + for _, field := range toScope.PrimaryFields() { + associationForeignKeys = append(associationForeignKeys, field.DBName) + } + } + + for _, name := range associationForeignKeys { + if field, ok := toScope.FieldByName(name); ok { + // association foreign keys (db names) + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, field.DBName) + // join table foreign keys for association + joinTableDBName := ToDBName(elemType.Name()) + "_" + field.DBName + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, joinTableDBName) + } + } + + joinTableHandler := JoinTableHandler{} + joinTableHandler.Setup(relationship, many2many, reflectType, elemType) + relationship.JoinTableHandler = &joinTableHandler + field.Relationship = relationship + } else { + // User has many comments, associationType is User, comment use UserID as foreign key + var associationType = reflectType.Name() + var toFields = toScope.GetStructFields() + relationship.Kind = "has_many" + + if polymorphic := field.TagSettings["POLYMORPHIC"]; polymorphic != "" { + // Dog has many toys, tag polymorphic is Owner, then associationType is Owner + // Toy use OwnerID, OwnerType ('dogs') as foreign key + if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil { + associationType = polymorphic + relationship.PolymorphicType = polymorphicType.Name + relationship.PolymorphicDBName = polymorphicType.DBName + polymorphicType.IsForeignKey = true + } + } + + // if no foreign keys defined with tag + if len(foreignKeys) == 0 { + // if no association foreign keys defined with tag + if len(associationForeignKeys) == 0 { + for _, field := range modelStruct.PrimaryFields { + foreignKeys = append(foreignKeys, associationType+field.Name) + associationForeignKeys = append(associationForeignKeys, field.Name) + } + } else { + // generate foreign keys from defined association foreign keys + for _, scopeFieldName := range associationForeignKeys { + if foreignField := getForeignField(scopeFieldName, modelStruct.StructFields); foreignField != nil { + foreignKeys = append(foreignKeys, associationType+foreignField.Name) + associationForeignKeys = append(associationForeignKeys, foreignField.Name) + } + } + } + } else { + // generate association foreign keys from foreign keys + if len(associationForeignKeys) == 0 { + for _, foreignKey := range foreignKeys { + if strings.HasPrefix(foreignKey, associationType) { + associationForeignKey := strings.TrimPrefix(foreignKey, associationType) + if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil { + associationForeignKeys = append(associationForeignKeys, associationForeignKey) + } + } + } + if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 { + associationForeignKeys = []string{scope.PrimaryKey()} + } + } else if len(foreignKeys) != len(associationForeignKeys) { + scope.Err(errors.New("invalid foreign keys, should have same length")) + return + } + } + + for idx, foreignKey := range foreignKeys { + if foreignField := getForeignField(foreignKey, toFields); foreignField != nil { + if associationField := getForeignField(associationForeignKeys[idx], modelStruct.StructFields); associationField != nil { + // source foreign keys + foreignField.IsForeignKey = true + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name) + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName) + + // association foreign keys + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) + relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) + } + } + } + + if len(relationship.ForeignFieldNames) != 0 { + field.Relationship = relationship + } + } + } else { + field.IsNormal = true + } + }(field) + case reflect.Struct: + defer func(field *StructField) { + var ( + // user has one profile, associationType is User, profile use UserID as foreign key + // user belongs to profile, associationType is Profile, user use ProfileID as foreign key + associationType = reflectType.Name() + relationship = &Relationship{} + toScope = scope.New(reflect.New(field.Struct.Type).Interface()) + toFields = toScope.GetStructFields() + tagForeignKeys []string + tagAssociationForeignKeys []string + ) + + if foreignKey := field.TagSettings["FOREIGNKEY"]; foreignKey != "" { + tagForeignKeys = strings.Split(field.TagSettings["FOREIGNKEY"], ",") + } + + if foreignKey := field.TagSettings["ASSOCIATIONFOREIGNKEY"]; foreignKey != "" { + tagAssociationForeignKeys = strings.Split(field.TagSettings["ASSOCIATIONFOREIGNKEY"], ",") + } + + if polymorphic := field.TagSettings["POLYMORPHIC"]; polymorphic != "" { + // Cat has one toy, tag polymorphic is Owner, then associationType is Owner + // Toy use OwnerID, OwnerType ('cats') as foreign key + if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil { + associationType = polymorphic + relationship.PolymorphicType = polymorphicType.Name + relationship.PolymorphicDBName = polymorphicType.DBName + polymorphicType.IsForeignKey = true + } + } + + // Has One + { + var foreignKeys = tagForeignKeys + var associationForeignKeys = tagAssociationForeignKeys + // if no foreign keys defined with tag + if len(foreignKeys) == 0 { + // if no association foreign keys defined with tag + if len(associationForeignKeys) == 0 { + for _, primaryField := range modelStruct.PrimaryFields { + foreignKeys = append(foreignKeys, associationType+primaryField.Name) + associationForeignKeys = append(associationForeignKeys, primaryField.Name) + } + } else { + // generate foreign keys form association foreign keys + for _, associationForeignKey := range tagAssociationForeignKeys { + if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil { + foreignKeys = append(foreignKeys, associationType+foreignField.Name) + associationForeignKeys = append(associationForeignKeys, foreignField.Name) + } + } + } + } else { + // generate association foreign keys from foreign keys + if len(associationForeignKeys) == 0 { + for _, foreignKey := range foreignKeys { + if strings.HasPrefix(foreignKey, associationType) { + associationForeignKey := strings.TrimPrefix(foreignKey, associationType) + if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil { + associationForeignKeys = append(associationForeignKeys, associationForeignKey) + } + } + } + if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 { + associationForeignKeys = []string{scope.PrimaryKey()} + } + } else if len(foreignKeys) != len(associationForeignKeys) { + scope.Err(errors.New("invalid foreign keys, should have same length")) + return + } + } + + for idx, foreignKey := range foreignKeys { + if foreignField := getForeignField(foreignKey, toFields); foreignField != nil { + if scopeField := getForeignField(associationForeignKeys[idx], modelStruct.StructFields); scopeField != nil { + foreignField.IsForeignKey = true + // source foreign keys + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, scopeField.Name) + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, scopeField.DBName) + + // association foreign keys + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) + relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) + } + } + } + } + + if len(relationship.ForeignFieldNames) != 0 { + relationship.Kind = "has_one" + field.Relationship = relationship + } else { + var foreignKeys = tagForeignKeys + var associationForeignKeys = tagAssociationForeignKeys + + if len(foreignKeys) == 0 { + // generate foreign keys & association foreign keys + if len(associationForeignKeys) == 0 { + for _, primaryField := range toScope.PrimaryFields() { + foreignKeys = append(foreignKeys, field.Name+primaryField.Name) + associationForeignKeys = append(associationForeignKeys, primaryField.Name) + } + } else { + // generate foreign keys with association foreign keys + for _, associationForeignKey := range associationForeignKeys { + if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil { + foreignKeys = append(foreignKeys, field.Name+foreignField.Name) + associationForeignKeys = append(associationForeignKeys, foreignField.Name) + } + } + } + } else { + // generate foreign keys & association foreign keys + if len(associationForeignKeys) == 0 { + for _, foreignKey := range foreignKeys { + if strings.HasPrefix(foreignKey, field.Name) { + associationForeignKey := strings.TrimPrefix(foreignKey, field.Name) + if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil { + associationForeignKeys = append(associationForeignKeys, associationForeignKey) + } + } + } + if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 { + associationForeignKeys = []string{toScope.PrimaryKey()} + } + } else if len(foreignKeys) != len(associationForeignKeys) { + scope.Err(errors.New("invalid foreign keys, should have same length")) + return + } + } + + for idx, foreignKey := range foreignKeys { + if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil { + if associationField := getForeignField(associationForeignKeys[idx], toFields); associationField != nil { + foreignField.IsForeignKey = true + + // association foreign keys + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name) + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName) + + // source foreign keys + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) + relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) + } + } + } + + if len(relationship.ForeignFieldNames) != 0 { + relationship.Kind = "belongs_to" + field.Relationship = relationship + } + } + }(field) + default: + field.IsNormal = true + } + } + } + + // Even it is ignored, also possible to decode db value into the field + if value, ok := field.TagSettings["COLUMN"]; ok { + field.DBName = value + } else { + field.DBName = ToDBName(fieldStruct.Name) + } + + modelStruct.StructFields = append(modelStruct.StructFields, field) + } + } + + if len(modelStruct.PrimaryFields) == 0 { + if field := getForeignField("id", modelStruct.StructFields); field != nil { + field.IsPrimaryKey = true + modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field) + } + } + + modelStructsMap.Set(reflectType, &modelStruct) + + return &modelStruct +} + +// GetStructFields get model's field structs +func (scope *Scope) GetStructFields() (fields []*StructField) { + return scope.GetModelStruct().StructFields +} + +func parseTagSetting(tags reflect.StructTag) map[string]string { + setting := map[string]string{} + for _, str := range []string{tags.Get("sql"), tags.Get("gorm")} { + tags := strings.Split(str, ";") + for _, value := range tags { + v := strings.Split(value, ":") + k := strings.TrimSpace(strings.ToUpper(v[0])) + if len(v) >= 2 { + setting[k] = strings.Join(v[1:], ":") + } else { + setting[k] = k + } + } + } + return setting +} diff --git a/vendor/github.com/jinzhu/gorm/multi_primary_keys_test.go b/vendor/github.com/jinzhu/gorm/multi_primary_keys_test.go new file mode 100644 index 0000000..8b275d1 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/multi_primary_keys_test.go @@ -0,0 +1,381 @@ +package gorm_test + +import ( + "os" + "reflect" + "sort" + "testing" +) + +type Blog struct { + ID uint `gorm:"primary_key"` + Locale string `gorm:"primary_key"` + Subject string + Body string + Tags []Tag `gorm:"many2many:blog_tags;"` + SharedTags []Tag `gorm:"many2many:shared_blog_tags;ForeignKey:id;AssociationForeignKey:id"` + LocaleTags []Tag `gorm:"many2many:locale_blog_tags;ForeignKey:id,locale;AssociationForeignKey:id"` +} + +type Tag struct { + ID uint `gorm:"primary_key"` + Locale string `gorm:"primary_key"` + Value string + Blogs []*Blog `gorm:"many2many:blogs_tags"` +} + +func compareTags(tags []Tag, contents []string) bool { + var tagContents []string + for _, tag := range tags { + tagContents = append(tagContents, tag.Value) + } + sort.Strings(tagContents) + sort.Strings(contents) + return reflect.DeepEqual(tagContents, contents) +} + +func TestManyToManyWithMultiPrimaryKeys(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" { + DB.DropTable(&Blog{}, &Tag{}) + DB.DropTable("blog_tags") + DB.CreateTable(&Blog{}, &Tag{}) + blog := Blog{ + Locale: "ZH", + Subject: "subject", + Body: "body", + Tags: []Tag{ + {Locale: "ZH", Value: "tag1"}, + {Locale: "ZH", Value: "tag2"}, + }, + } + + DB.Save(&blog) + if !compareTags(blog.Tags, []string{"tag1", "tag2"}) { + t.Errorf("Blog should has two tags") + } + + // Append + var tag3 = &Tag{Locale: "ZH", Value: "tag3"} + DB.Model(&blog).Association("Tags").Append([]*Tag{tag3}) + if !compareTags(blog.Tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog).Association("Tags").Count() != 3 { + t.Errorf("Blog should has three tags after Append") + } + + var tags []Tag + DB.Model(&blog).Related(&tags, "Tags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related") + } + + var blog1 Blog + DB.Preload("Tags").Find(&blog1) + if !compareTags(blog1.Tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Preload many2many relations") + } + + // Replace + var tag5 = &Tag{Locale: "ZH", Value: "tag5"} + var tag6 = &Tag{Locale: "ZH", Value: "tag6"} + DB.Model(&blog).Association("Tags").Replace(tag5, tag6) + var tags2 []Tag + DB.Model(&blog).Related(&tags2, "Tags") + if !compareTags(tags2, []string{"tag5", "tag6"}) { + t.Errorf("Should find 2 tags after Replace") + } + + if DB.Model(&blog).Association("Tags").Count() != 2 { + t.Errorf("Blog should has three tags after Replace") + } + + // Delete + DB.Model(&blog).Association("Tags").Delete(tag5) + var tags3 []Tag + DB.Model(&blog).Related(&tags3, "Tags") + if !compareTags(tags3, []string{"tag6"}) { + t.Errorf("Should find 1 tags after Delete") + } + + if DB.Model(&blog).Association("Tags").Count() != 1 { + t.Errorf("Blog should has three tags after Delete") + } + + DB.Model(&blog).Association("Tags").Delete(tag3) + var tags4 []Tag + DB.Model(&blog).Related(&tags4, "Tags") + if !compareTags(tags4, []string{"tag6"}) { + t.Errorf("Tag should not be deleted when Delete with a unrelated tag") + } + + // Clear + DB.Model(&blog).Association("Tags").Clear() + if DB.Model(&blog).Association("Tags").Count() != 0 { + t.Errorf("All tags should be cleared") + } + } +} + +func TestManyToManyWithCustomizedForeignKeys(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" { + DB.DropTable(&Blog{}, &Tag{}) + DB.DropTable("shared_blog_tags") + DB.CreateTable(&Blog{}, &Tag{}) + blog := Blog{ + Locale: "ZH", + Subject: "subject", + Body: "body", + SharedTags: []Tag{ + {Locale: "ZH", Value: "tag1"}, + {Locale: "ZH", Value: "tag2"}, + }, + } + DB.Save(&blog) + + blog2 := Blog{ + ID: blog.ID, + Locale: "EN", + } + DB.Create(&blog2) + + if !compareTags(blog.SharedTags, []string{"tag1", "tag2"}) { + t.Errorf("Blog should has two tags") + } + + // Append + var tag3 = &Tag{Locale: "ZH", Value: "tag3"} + DB.Model(&blog).Association("SharedTags").Append([]*Tag{tag3}) + if !compareTags(blog.SharedTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog).Association("SharedTags").Count() != 3 { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog2).Association("SharedTags").Count() != 3 { + t.Errorf("Blog should has three tags after Append") + } + + var tags []Tag + DB.Model(&blog).Related(&tags, "SharedTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related") + } + + DB.Model(&blog2).Related(&tags, "SharedTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related") + } + + var blog1 Blog + DB.Preload("SharedTags").Find(&blog1) + if !compareTags(blog1.SharedTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Preload many2many relations") + } + + var tag4 = &Tag{Locale: "ZH", Value: "tag4"} + DB.Model(&blog2).Association("SharedTags").Append(tag4) + + DB.Model(&blog).Related(&tags, "SharedTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3", "tag4"}) { + t.Errorf("Should find 3 tags with Related") + } + + DB.Model(&blog2).Related(&tags, "SharedTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3", "tag4"}) { + t.Errorf("Should find 3 tags with Related") + } + + // Replace + var tag5 = &Tag{Locale: "ZH", Value: "tag5"} + var tag6 = &Tag{Locale: "ZH", Value: "tag6"} + DB.Model(&blog2).Association("SharedTags").Replace(tag5, tag6) + var tags2 []Tag + DB.Model(&blog).Related(&tags2, "SharedTags") + if !compareTags(tags2, []string{"tag5", "tag6"}) { + t.Errorf("Should find 2 tags after Replace") + } + + DB.Model(&blog2).Related(&tags2, "SharedTags") + if !compareTags(tags2, []string{"tag5", "tag6"}) { + t.Errorf("Should find 2 tags after Replace") + } + + if DB.Model(&blog).Association("SharedTags").Count() != 2 { + t.Errorf("Blog should has three tags after Replace") + } + + // Delete + DB.Model(&blog).Association("SharedTags").Delete(tag5) + var tags3 []Tag + DB.Model(&blog).Related(&tags3, "SharedTags") + if !compareTags(tags3, []string{"tag6"}) { + t.Errorf("Should find 1 tags after Delete") + } + + if DB.Model(&blog).Association("SharedTags").Count() != 1 { + t.Errorf("Blog should has three tags after Delete") + } + + DB.Model(&blog2).Association("SharedTags").Delete(tag3) + var tags4 []Tag + DB.Model(&blog).Related(&tags4, "SharedTags") + if !compareTags(tags4, []string{"tag6"}) { + t.Errorf("Tag should not be deleted when Delete with a unrelated tag") + } + + // Clear + DB.Model(&blog2).Association("SharedTags").Clear() + if DB.Model(&blog).Association("SharedTags").Count() != 0 { + t.Errorf("All tags should be cleared") + } + } +} + +func TestManyToManyWithCustomizedForeignKeys2(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" { + DB.DropTable(&Blog{}, &Tag{}) + DB.DropTable("locale_blog_tags") + DB.CreateTable(&Blog{}, &Tag{}) + blog := Blog{ + Locale: "ZH", + Subject: "subject", + Body: "body", + LocaleTags: []Tag{ + {Locale: "ZH", Value: "tag1"}, + {Locale: "ZH", Value: "tag2"}, + }, + } + DB.Save(&blog) + + blog2 := Blog{ + ID: blog.ID, + Locale: "EN", + } + DB.Create(&blog2) + + // Append + var tag3 = &Tag{Locale: "ZH", Value: "tag3"} + DB.Model(&blog).Association("LocaleTags").Append([]*Tag{tag3}) + if !compareTags(blog.LocaleTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 0 { + t.Errorf("EN Blog should has 0 tags after ZH Blog Append") + } + + var tags []Tag + DB.Model(&blog).Related(&tags, "LocaleTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related") + } + + DB.Model(&blog2).Related(&tags, "LocaleTags") + if len(tags) != 0 { + t.Errorf("Should find 0 tags with Related for EN Blog") + } + + var blog1 Blog + DB.Preload("LocaleTags").Find(&blog1, "locale = ? AND id = ?", "ZH", blog.ID) + if !compareTags(blog1.LocaleTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Preload many2many relations") + } + + var tag4 = &Tag{Locale: "ZH", Value: "tag4"} + DB.Model(&blog2).Association("LocaleTags").Append(tag4) + + DB.Model(&blog).Related(&tags, "LocaleTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related for EN Blog") + } + + DB.Model(&blog2).Related(&tags, "LocaleTags") + if !compareTags(tags, []string{"tag4"}) { + t.Errorf("Should find 1 tags with Related for EN Blog") + } + + // Replace + var tag5 = &Tag{Locale: "ZH", Value: "tag5"} + var tag6 = &Tag{Locale: "ZH", Value: "tag6"} + DB.Model(&blog2).Association("LocaleTags").Replace(tag5, tag6) + + var tags2 []Tag + DB.Model(&blog).Related(&tags2, "LocaleTags") + if !compareTags(tags2, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("CN Blog's tags should not be changed after EN Blog Replace") + } + + var blog11 Blog + DB.Preload("LocaleTags").First(&blog11, "id = ? AND locale = ?", blog.ID, blog.Locale) + if !compareTags(blog11.LocaleTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("CN Blog's tags should not be changed after EN Blog Replace") + } + + DB.Model(&blog2).Related(&tags2, "LocaleTags") + if !compareTags(tags2, []string{"tag5", "tag6"}) { + t.Errorf("Should find 2 tags after Replace") + } + + var blog21 Blog + DB.Preload("LocaleTags").First(&blog21, "id = ? AND locale = ?", blog2.ID, blog2.Locale) + if !compareTags(blog21.LocaleTags, []string{"tag5", "tag6"}) { + t.Errorf("EN Blog's tags should be changed after Replace") + } + + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("ZH Blog should has three tags after Replace") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 2 { + t.Errorf("EN Blog should has two tags after Replace") + } + + // Delete + DB.Model(&blog).Association("LocaleTags").Delete(tag5) + + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("ZH Blog should has three tags after Delete with EN's tag") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 2 { + t.Errorf("EN Blog should has two tags after ZH Blog Delete with EN's tag") + } + + DB.Model(&blog2).Association("LocaleTags").Delete(tag5) + + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("ZH Blog should has three tags after EN Blog Delete with EN's tag") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 1 { + t.Errorf("EN Blog should has 1 tags after EN Blog Delete with EN's tag") + } + + // Clear + DB.Model(&blog2).Association("LocaleTags").Clear() + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("ZH Blog's tags should not be cleared when clear EN Blog's tags") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 0 { + t.Errorf("EN Blog's tags should be cleared when clear EN Blog's tags") + } + + DB.Model(&blog).Association("LocaleTags").Clear() + if DB.Model(&blog).Association("LocaleTags").Count() != 0 { + t.Errorf("ZH Blog's tags should be cleared when clear ZH Blog's tags") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 0 { + t.Errorf("EN Blog's tags should be cleared") + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/pointer_test.go b/vendor/github.com/jinzhu/gorm/pointer_test.go new file mode 100644 index 0000000..2a68a5a --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/pointer_test.go @@ -0,0 +1,84 @@ +package gorm_test + +import "testing" + +type PointerStruct struct { + ID int64 + Name *string + Num *int +} + +type NormalStruct struct { + ID int64 + Name string + Num int +} + +func TestPointerFields(t *testing.T) { + DB.DropTable(&PointerStruct{}) + DB.AutoMigrate(&PointerStruct{}) + var name = "pointer struct 1" + var num = 100 + pointerStruct := PointerStruct{Name: &name, Num: &num} + if DB.Create(&pointerStruct).Error != nil { + t.Errorf("Failed to save pointer struct") + } + + var pointerStructResult PointerStruct + if err := DB.First(&pointerStructResult, "id = ?", pointerStruct.ID).Error; err != nil || *pointerStructResult.Name != name || *pointerStructResult.Num != num { + t.Errorf("Failed to query saved pointer struct") + } + + var tableName = DB.NewScope(&PointerStruct{}).TableName() + + var normalStruct NormalStruct + DB.Table(tableName).First(&normalStruct) + if normalStruct.Name != name || normalStruct.Num != num { + t.Errorf("Failed to query saved Normal struct") + } + + var nilPointerStruct = PointerStruct{} + if err := DB.Create(&nilPointerStruct).Error; err != nil { + t.Error("Failed to save nil pointer struct", err) + } + + var pointerStruct2 PointerStruct + if err := DB.First(&pointerStruct2, "id = ?", nilPointerStruct.ID).Error; err != nil { + t.Error("Failed to query saved nil pointer struct", err) + } + + var normalStruct2 NormalStruct + if err := DB.Table(tableName).First(&normalStruct2, "id = ?", nilPointerStruct.ID).Error; err != nil { + t.Error("Failed to query saved nil pointer struct", err) + } + + var partialNilPointerStruct1 = PointerStruct{Num: &num} + if err := DB.Create(&partialNilPointerStruct1).Error; err != nil { + t.Error("Failed to save partial nil pointer struct", err) + } + + var pointerStruct3 PointerStruct + if err := DB.First(&pointerStruct3, "id = ?", partialNilPointerStruct1.ID).Error; err != nil || *pointerStruct3.Num != num { + t.Error("Failed to query saved partial nil pointer struct", err) + } + + var normalStruct3 NormalStruct + if err := DB.Table(tableName).First(&normalStruct3, "id = ?", partialNilPointerStruct1.ID).Error; err != nil || normalStruct3.Num != num { + t.Error("Failed to query saved partial pointer struct", err) + } + + var partialNilPointerStruct2 = PointerStruct{Name: &name} + if err := DB.Create(&partialNilPointerStruct2).Error; err != nil { + t.Error("Failed to save partial nil pointer struct", err) + } + + var pointerStruct4 PointerStruct + if err := DB.First(&pointerStruct4, "id = ?", partialNilPointerStruct2.ID).Error; err != nil || *pointerStruct4.Name != name { + t.Error("Failed to query saved partial nil pointer struct", err) + } + + var normalStruct4 NormalStruct + if err := DB.Table(tableName).First(&normalStruct4, "id = ?", partialNilPointerStruct2.ID).Error; err != nil || normalStruct4.Name != name { + t.Error("Failed to query saved partial pointer struct", err) + } +} diff --git a/vendor/github.com/jinzhu/gorm/polymorphic_test.go b/vendor/github.com/jinzhu/gorm/polymorphic_test.go new file mode 100644 index 0000000..df573f9 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/polymorphic_test.go @@ -0,0 +1,219 @@ +package gorm_test + +import ( + "reflect" + "sort" + "testing" +) + +type Cat struct { + Id int + Name string + Toy Toy `gorm:"polymorphic:Owner;"` +} + +type Dog struct { + Id int + Name string + Toys []Toy `gorm:"polymorphic:Owner;"` +} + +type Toy struct { + Id int + Name string + OwnerId int + OwnerType string +} + +var compareToys = func(toys []Toy, contents []string) bool { + var toyContents []string + for _, toy := range toys { + toyContents = append(toyContents, toy.Name) + } + sort.Strings(toyContents) + sort.Strings(contents) + return reflect.DeepEqual(toyContents, contents) +} + +func TestPolymorphic(t *testing.T) { + cat := Cat{Name: "Mr. Bigglesworth", Toy: Toy{Name: "cat toy"}} + dog := Dog{Name: "Pluto", Toys: []Toy{{Name: "dog toy 1"}, {Name: "dog toy 2"}}} + DB.Save(&cat).Save(&dog) + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys count should be 1") + } + + if DB.Model(&dog).Association("Toys").Count() != 2 { + t.Errorf("Dog's toys count should be 2") + } + + // Query + var catToys []Toy + if DB.Model(&cat).Related(&catToys, "Toy").RecordNotFound() { + t.Errorf("Did not find any has one polymorphic association") + } else if len(catToys) != 1 { + t.Errorf("Should have found only one polymorphic has one association") + } else if catToys[0].Name != cat.Toy.Name { + t.Errorf("Should have found the proper has one polymorphic association") + } + + var dogToys []Toy + if DB.Model(&dog).Related(&dogToys, "Toys").RecordNotFound() { + t.Errorf("Did not find any polymorphic has many associations") + } else if len(dogToys) != len(dog.Toys) { + t.Errorf("Should have found all polymorphic has many associations") + } + + var catToy Toy + DB.Model(&cat).Association("Toy").Find(&catToy) + if catToy.Name != cat.Toy.Name { + t.Errorf("Should find has one polymorphic association") + } + + var dogToys1 []Toy + DB.Model(&dog).Association("Toys").Find(&dogToys1) + if !compareToys(dogToys1, []string{"dog toy 1", "dog toy 2"}) { + t.Errorf("Should find has many polymorphic association") + } + + // Append + DB.Model(&cat).Association("Toy").Append(&Toy{ + Name: "cat toy 2", + }) + + var catToy2 Toy + DB.Model(&cat).Association("Toy").Find(&catToy2) + if catToy2.Name != "cat toy 2" { + t.Errorf("Should update has one polymorphic association with Append") + } + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys count should be 1 after Append") + } + + if DB.Model(&dog).Association("Toys").Count() != 2 { + t.Errorf("Should return two polymorphic has many associations") + } + + DB.Model(&dog).Association("Toys").Append(&Toy{ + Name: "dog toy 3", + }) + + var dogToys2 []Toy + DB.Model(&dog).Association("Toys").Find(&dogToys2) + if !compareToys(dogToys2, []string{"dog toy 1", "dog toy 2", "dog toy 3"}) { + t.Errorf("Dog's toys should be updated with Append") + } + + if DB.Model(&dog).Association("Toys").Count() != 3 { + t.Errorf("Should return three polymorphic has many associations") + } + + // Replace + DB.Model(&cat).Association("Toy").Replace(&Toy{ + Name: "cat toy 3", + }) + + var catToy3 Toy + DB.Model(&cat).Association("Toy").Find(&catToy3) + if catToy3.Name != "cat toy 3" { + t.Errorf("Should update has one polymorphic association with Replace") + } + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys count should be 1 after Replace") + } + + if DB.Model(&dog).Association("Toys").Count() != 3 { + t.Errorf("Should return three polymorphic has many associations") + } + + DB.Model(&dog).Association("Toys").Replace(&Toy{ + Name: "dog toy 4", + }, []Toy{ + {Name: "dog toy 5"}, {Name: "dog toy 6"}, {Name: "dog toy 7"}, + }) + + var dogToys3 []Toy + DB.Model(&dog).Association("Toys").Find(&dogToys3) + if !compareToys(dogToys3, []string{"dog toy 4", "dog toy 5", "dog toy 6", "dog toy 7"}) { + t.Errorf("Dog's toys should be updated with Replace") + } + + if DB.Model(&dog).Association("Toys").Count() != 4 { + t.Errorf("Should return three polymorphic has many associations") + } + + // Delete + DB.Model(&cat).Association("Toy").Delete(&catToy2) + + var catToy4 Toy + DB.Model(&cat).Association("Toy").Find(&catToy4) + if catToy4.Name != "cat toy 3" { + t.Errorf("Should not update has one polymorphic association when Delete a unrelated Toy") + } + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys count should be 1") + } + + if DB.Model(&dog).Association("Toys").Count() != 4 { + t.Errorf("Dog's toys count should be 4") + } + + DB.Model(&cat).Association("Toy").Delete(&catToy3) + + if !DB.Model(&cat).Related(&Toy{}, "Toy").RecordNotFound() { + t.Errorf("Toy should be deleted with Delete") + } + + if DB.Model(&cat).Association("Toy").Count() != 0 { + t.Errorf("Cat's toys count should be 0 after Delete") + } + + if DB.Model(&dog).Association("Toys").Count() != 4 { + t.Errorf("Dog's toys count should not be changed when delete cat's toy") + } + + DB.Model(&dog).Association("Toys").Delete(&dogToys2) + + if DB.Model(&dog).Association("Toys").Count() != 4 { + t.Errorf("Dog's toys count should not be changed when delete unrelated toys") + } + + DB.Model(&dog).Association("Toys").Delete(&dogToys3) + + if DB.Model(&dog).Association("Toys").Count() != 0 { + t.Errorf("Dog's toys count should be deleted with Delete") + } + + // Clear + DB.Model(&cat).Association("Toy").Append(&Toy{ + Name: "cat toy 2", + }) + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys should be added with Append") + } + + DB.Model(&cat).Association("Toy").Clear() + + if DB.Model(&cat).Association("Toy").Count() != 0 { + t.Errorf("Cat's toys should be cleared with Clear") + } + + DB.Model(&dog).Association("Toys").Append(&Toy{ + Name: "dog toy 8", + }) + + if DB.Model(&dog).Association("Toys").Count() != 1 { + t.Errorf("Dog's toys should be added with Append") + } + + DB.Model(&dog).Association("Toys").Clear() + + if DB.Model(&dog).Association("Toys").Count() != 0 { + t.Errorf("Dog's toys should be cleared with Clear") + } +} diff --git a/vendor/github.com/jinzhu/gorm/preload_test.go b/vendor/github.com/jinzhu/gorm/preload_test.go new file mode 100644 index 0000000..5c49ecc --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/preload_test.go @@ -0,0 +1,1327 @@ +package gorm_test + +import ( + "database/sql" + "encoding/json" + "os" + "reflect" + "testing" + + "github.com/jinzhu/gorm" +) + +func getPreloadUser(name string) *User { + return getPreparedUser(name, "Preload") +} + +func checkUserHasPreloadData(user User, t *testing.T) { + u := getPreloadUser(user.Name) + if user.BillingAddress.Address1 != u.BillingAddress.Address1 { + t.Error("Failed to preload user's BillingAddress") + } + + if user.ShippingAddress.Address1 != u.ShippingAddress.Address1 { + t.Error("Failed to preload user's ShippingAddress") + } + + if user.CreditCard.Number != u.CreditCard.Number { + t.Error("Failed to preload user's CreditCard") + } + + if user.Company.Name != u.Company.Name { + t.Error("Failed to preload user's Company") + } + + if len(user.Emails) != len(u.Emails) { + t.Error("Failed to preload user's Emails") + } else { + var found int + for _, e1 := range u.Emails { + for _, e2 := range user.Emails { + if e1.Email == e2.Email { + found++ + break + } + } + } + if found != len(u.Emails) { + t.Error("Failed to preload user's email details") + } + } +} + +func TestPreload(t *testing.T) { + user1 := getPreloadUser("user1") + DB.Save(user1) + + preloadDB := DB.Where("role = ?", "Preload").Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company") + var user User + preloadDB.Find(&user) + checkUserHasPreloadData(user, t) + + user2 := getPreloadUser("user2") + DB.Save(user2) + + user3 := getPreloadUser("user3") + DB.Save(user3) + + var users []User + preloadDB.Find(&users) + + for _, user := range users { + checkUserHasPreloadData(user, t) + } + + var users2 []*User + preloadDB.Find(&users2) + + for _, user := range users2 { + checkUserHasPreloadData(*user, t) + } + + var users3 []*User + preloadDB.Preload("Emails", "email = ?", user3.Emails[0].Email).Find(&users3) + + for _, user := range users3 { + if user.Name == user3.Name { + if len(user.Emails) != 1 { + t.Errorf("should only preload one emails for user3 when with condition") + } + } else if len(user.Emails) != 0 { + t.Errorf("should not preload any emails for other users when with condition") + } + } +} + +func TestNestedPreload1(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{Level2: Level2{Level1: Level1{Value: "value"}}} + if err := DB.Create(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got, "name = ?", "not_found").Error; err != gorm.ErrRecordNotFound { + t.Error(err) + } +} + +func TestNestedPreload2(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []*Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Level2s: []Level2{ + { + Level1s: []*Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + { + Level1s: []*Level1{ + {Value: "value3"}, + }, + }, + }, + } + if err := DB.Create(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload3(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + Name string + ID uint + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Level2s: []Level2{ + {Level1: Level1{Value: "value1"}}, + {Level1: Level1{Value: "value2"}}, + }, + } + if err := DB.Create(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload4(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + } + if err := DB.Create(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +// Slice: []Level3 +func TestNestedPreload5(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{Level2: Level2{Level1: Level1{Value: "value"}}} + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + want[1] = Level3{Level2: Level2{Level1: Level1{Value: "value2"}}} + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload6(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2s: []Level2{ + { + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + { + Level1s: []Level1{ + {Value: "value3"}, + }, + }, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + + want[1] = Level3{ + Level2s: []Level2{ + { + Level1s: []Level1{ + {Value: "value3"}, + {Value: "value4"}, + }, + }, + { + Level1s: []Level1{ + {Value: "value5"}, + }, + }, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload7(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2s: []Level2{ + {Level1: Level1{Value: "value1"}}, + {Level1: Level1{Value: "value2"}}, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + + want[1] = Level3{ + Level2s: []Level2{ + {Level1: Level1{Value: "value3"}}, + {Level1: Level1{Value: "value4"}}, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload8(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + want[1] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value3"}, + {Value: "value4"}, + }, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload9(t *testing.T) { + type ( + Level0 struct { + ID uint + Value string + Level1ID uint + } + Level1 struct { + ID uint + Value string + Level2ID uint + Level2_1ID uint + Level0s []Level0 + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level2_1 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + Level2_1 Level2_1 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level2_1{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level0{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}, &Level2_1{}, &Level0{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + Level2_1: Level2_1{ + Level1s: []Level1{ + { + Value: "value1-1", + Level0s: []Level0{{Value: "Level0-1"}}, + }, + { + Value: "value2-2", + Level0s: []Level0{{Value: "Level0-2"}}, + }, + }, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + want[1] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value3"}, + {Value: "value4"}, + }, + }, + Level2_1: Level2_1{ + Level1s: []Level1{ + {Value: "value3-3"}, + {Value: "value4-4"}, + }, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2").Preload("Level2.Level1s").Preload("Level2_1").Preload("Level2_1.Level1s").Preload("Level2_1.Level1s.Level0s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +type LevelA1 struct { + ID uint + Value string +} + +type LevelA2 struct { + ID uint + Value string + LevelA3s []*LevelA3 +} + +type LevelA3 struct { + ID uint + Value string + LevelA1ID sql.NullInt64 + LevelA1 *LevelA1 + LevelA2ID sql.NullInt64 + LevelA2 *LevelA2 +} + +func TestNestedPreload10(t *testing.T) { + DB.DropTableIfExists(&LevelA3{}) + DB.DropTableIfExists(&LevelA2{}) + DB.DropTableIfExists(&LevelA1{}) + + if err := DB.AutoMigrate(&LevelA1{}, &LevelA2{}, &LevelA3{}).Error; err != nil { + t.Error(err) + } + + levelA1 := &LevelA1{Value: "foo"} + if err := DB.Save(levelA1).Error; err != nil { + t.Error(err) + } + + want := []*LevelA2{ + { + Value: "bar", + LevelA3s: []*LevelA3{ + { + Value: "qux", + LevelA1: levelA1, + }, + }, + }, + { + Value: "bar 2", + }, + } + for _, levelA2 := range want { + if err := DB.Save(levelA2).Error; err != nil { + t.Error(err) + } + } + + var got []*LevelA2 + if err := DB.Preload("LevelA3s.LevelA1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +type LevelB1 struct { + ID uint + Value string + LevelB3s []*LevelB3 +} + +type LevelB2 struct { + ID uint + Value string +} + +type LevelB3 struct { + ID uint + Value string + LevelB1ID sql.NullInt64 + LevelB1 *LevelB1 + LevelB2s []*LevelB2 `gorm:"many2many:levelb1_levelb3_levelb2s"` +} + +func TestNestedPreload11(t *testing.T) { + DB.DropTableIfExists(&LevelB2{}) + DB.DropTableIfExists(&LevelB3{}) + DB.DropTableIfExists(&LevelB1{}) + if err := DB.AutoMigrate(&LevelB1{}, &LevelB2{}, &LevelB3{}).Error; err != nil { + t.Error(err) + } + + levelB1 := &LevelB1{Value: "foo"} + if err := DB.Create(levelB1).Error; err != nil { + t.Error(err) + } + + levelB3 := &LevelB3{ + Value: "bar", + LevelB1ID: sql.NullInt64{Valid: true, Int64: int64(levelB1.ID)}, + } + if err := DB.Create(levelB3).Error; err != nil { + t.Error(err) + } + levelB1.LevelB3s = []*LevelB3{levelB3} + + want := []*LevelB1{levelB1} + var got []*LevelB1 + if err := DB.Preload("LevelB3s.LevelB2s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestManyToManyPreloadWithMultiPrimaryKeys(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect == "" || dialect == "sqlite" { + return + } + + type ( + Level1 struct { + ID uint `gorm:"primary_key;"` + LanguageCode string `gorm:"primary_key"` + Value string + } + Level2 struct { + ID uint `gorm:"primary_key;"` + LanguageCode string `gorm:"primary_key"` + Value string + Level1s []Level1 `gorm:"many2many:levels;"` + } + ) + + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists("levels") + + if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level2{Value: "Bob", LanguageCode: "ru", Level1s: []Level1{ + {Value: "ru", LanguageCode: "ru"}, + {Value: "en", LanguageCode: "en"}, + }} + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + want2 := Level2{Value: "Tom", LanguageCode: "zh", Level1s: []Level1{ + {Value: "zh", LanguageCode: "zh"}, + {Value: "de", LanguageCode: "de"}, + }} + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + var got Level2 + if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + var got2 Level2 + if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got2, want2) { + t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2)) + } + + var got3 []Level2 + if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got3, []Level2{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2})) + } + + var got4 []Level2 + if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + var ruLevel1 Level1 + var zhLevel1 Level1 + DB.First(&ruLevel1, "value = ?", "ru") + DB.First(&zhLevel1, "value = ?", "zh") + + got.Level1s = []Level1{ruLevel1} + got2.Level1s = []Level1{zhLevel1} + if !reflect.DeepEqual(got4, []Level2{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2})) + } + + if err := DB.Preload("Level1s").Find(&got4, "value IN (?)", []string{"non-existing"}).Error; err != nil { + t.Error(err) + } +} + +func TestManyToManyPreloadForNestedPointer(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:levels;"` + } + Level3 struct { + ID uint + Value string + Level2ID sql.NullInt64 + Level2 *Level2 + } + ) + + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists("levels") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Value: "Bob", + Level2: &Level2{ + Value: "Foo", + Level1s: []*Level1{ + {Value: "ru"}, + {Value: "en"}, + }, + }, + } + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + want2 := Level3{ + Value: "Tom", + Level2: &Level2{ + Value: "Bar", + Level1s: []*Level1{ + {Value: "zh"}, + {Value: "de"}, + }, + }, + } + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Bob").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + var got2 Level3 + if err := DB.Preload("Level2.Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got2, want2) { + t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2)) + } + + var got3 []Level3 + if err := DB.Preload("Level2.Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got3, []Level3{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level3{got, got2})) + } + + var got4 []Level3 + if err := DB.Preload("Level2.Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + var got5 Level3 + DB.Preload("Level2.Level1s").Find(&got5, "value = ?", "bogus") + + var ruLevel1 Level1 + var zhLevel1 Level1 + DB.First(&ruLevel1, "value = ?", "ru") + DB.First(&zhLevel1, "value = ?", "zh") + + got.Level2.Level1s = []*Level1{&ruLevel1} + got2.Level2.Level1s = []*Level1{&zhLevel1} + if !reflect.DeepEqual(got4, []Level3{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level3{got, got2})) + } +} + +func TestNestedManyToManyPreload(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:level1_level2;"` + } + Level3 struct { + ID uint + Value string + Level2s []Level2 `gorm:"many2many:level2_level3;"` + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists("level1_level2") + DB.DropTableIfExists("level2_level3") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Value: "Level3", + Level2s: []Level2{ + { + Value: "Bob", + Level1s: []*Level1{ + {Value: "ru"}, + {Value: "en"}, + }, + }, { + Value: "Tom", + Level1s: []*Level1{ + {Value: "zh"}, + {Value: "de"}, + }, + }, + }, + } + + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2s").Preload("Level2s.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + if err := DB.Preload("Level2s.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound { + t.Error(err) + } +} + +func TestNestedManyToManyPreload2(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:level1_level2;"` + } + Level3 struct { + ID uint + Value string + Level2ID sql.NullInt64 + Level2 *Level2 + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists("level1_level2") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Value: "Level3", + Level2: &Level2{ + Value: "Bob", + Level1s: []*Level1{ + {Value: "ru"}, + {Value: "en"}, + }, + }, + } + + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound { + t.Error(err) + } +} + +func TestNestedManyToManyPreload3(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:level1_level2;"` + } + Level3 struct { + ID uint + Value string + Level2ID sql.NullInt64 + Level2 *Level2 + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists("level1_level2") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + level1Zh := &Level1{Value: "zh"} + level1Ru := &Level1{Value: "ru"} + level1En := &Level1{Value: "en"} + + level21 := &Level2{ + Value: "Level2-1", + Level1s: []*Level1{level1Zh, level1Ru}, + } + + level22 := &Level2{ + Value: "Level2-2", + Level1s: []*Level1{level1Zh, level1En}, + } + + wants := []*Level3{ + { + Value: "Level3-1", + Level2: level21, + }, + { + Value: "Level3-2", + Level2: level22, + }, + { + Value: "Level3-3", + Level2: level21, + }, + } + + for _, want := range wants { + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + } + + var gots []*Level3 + if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB { + return db.Order("level1.id ASC") + }).Find(&gots).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(gots, wants) { + t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants)) + } +} + +func TestNestedManyToManyPreload4(t *testing.T) { + type ( + Level4 struct { + ID uint + Value string + Level3ID uint + } + Level3 struct { + ID uint + Value string + Level4s []*Level4 + } + Level2 struct { + ID uint + Value string + Level3s []*Level3 `gorm:"many2many:level2_level3;"` + } + Level1 struct { + ID uint + Value string + Level2s []*Level2 `gorm:"many2many:level1_level2;"` + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level4{}) + DB.DropTableIfExists("level1_level2") + DB.DropTableIfExists("level2_level3") + + dummy := Level1{ + Value: "Level1", + Level2s: []*Level2{{ + Value: "Level2", + Level3s: []*Level3{{ + Value: "Level3", + Level4s: []*Level4{{ + Value: "Level4", + }}, + }}, + }}, + } + + if err := DB.AutoMigrate(&Level4{}, &Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + if err := DB.Save(&dummy).Error; err != nil { + t.Error(err) + } + + var level1 Level1 + if err := DB.Preload("Level2s").Preload("Level2s.Level3s").Preload("Level2s.Level3s.Level4s").First(&level1).Error; err != nil { + t.Error(err) + } +} + +func TestManyToManyPreloadForPointer(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:levels;"` + } + ) + + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists("levels") + + if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level2{Value: "Bob", Level1s: []*Level1{ + {Value: "ru"}, + {Value: "en"}, + }} + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + want2 := Level2{Value: "Tom", Level1s: []*Level1{ + {Value: "zh"}, + {Value: "de"}, + }} + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + var got Level2 + if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + var got2 Level2 + if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got2, want2) { + t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2)) + } + + var got3 []Level2 + if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got3, []Level2{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2})) + } + + var got4 []Level2 + if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + var got5 Level2 + DB.Preload("Level1s").First(&got5, "value = ?", "bogus") + + var ruLevel1 Level1 + var zhLevel1 Level1 + DB.First(&ruLevel1, "value = ?", "ru") + DB.First(&zhLevel1, "value = ?", "zh") + + got.Level1s = []*Level1{&ruLevel1} + got2.Level1s = []*Level1{&zhLevel1} + if !reflect.DeepEqual(got4, []Level2{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2})) + } +} + +func TestNilPointerSlice(t *testing.T) { + type ( + Level3 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level3ID uint + Level3 *Level3 + } + Level1 struct { + ID uint + Value string + Level2ID uint + Level2 *Level2 + } + ) + + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level1{Value: "Bob", Level2: &Level2{ + Value: "en", + Level3: &Level3{ + Value: "native", + }, + }} + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + want2 := Level1{Value: "Tom", Level2: nil} + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + var got []Level1 + if err := DB.Preload("Level2").Preload("Level2.Level3").Find(&got).Error; err != nil { + t.Error(err) + } + + if len(got) != 2 { + t.Errorf("got %v items, expected 2", len(got)) + } + + if !reflect.DeepEqual(got[0], want) && !reflect.DeepEqual(got[1], want) { + t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want)) + } + + if !reflect.DeepEqual(got[0], want2) && !reflect.DeepEqual(got[1], want2) { + t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want2)) + } +} + +func toJSONString(v interface{}) []byte { + r, _ := json.MarshalIndent(v, "", " ") + return r +} diff --git a/vendor/github.com/jinzhu/gorm/query_test.go b/vendor/github.com/jinzhu/gorm/query_test.go new file mode 100644 index 0000000..7dc3d91 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/query_test.go @@ -0,0 +1,636 @@ +package gorm_test + +import ( + "fmt" + "reflect" + + "github.com/jinzhu/gorm" + "github.com/jinzhu/now" + + "testing" + "time" +) + +func TestFirstAndLast(t *testing.T) { + DB.Save(&User{Name: "user1", Emails: []Email{{Email: "user1@example.com"}}}) + DB.Save(&User{Name: "user2", Emails: []Email{{Email: "user2@example.com"}}}) + + var user1, user2, user3, user4 User + DB.First(&user1) + DB.Order("id").Limit(1).Find(&user2) + + DB.Last(&user3) + DB.Order("id desc").Limit(1).Find(&user4) + if user1.Id != user2.Id || user3.Id != user4.Id { + t.Errorf("First and Last should by order by primary key") + } + + var users []User + DB.First(&users) + if len(users) != 1 { + t.Errorf("Find first record as slice") + } + + var user User + if DB.Joins("left join emails on emails.user_id = users.id").First(&user).Error != nil { + t.Errorf("Should not raise any error when order with Join table") + } + + if user.Email != "" { + t.Errorf("User's Email should be blank as no one set it") + } +} + +func TestFirstAndLastWithNoStdPrimaryKey(t *testing.T) { + DB.Save(&Animal{Name: "animal1"}) + DB.Save(&Animal{Name: "animal2"}) + + var animal1, animal2, animal3, animal4 Animal + DB.First(&animal1) + DB.Order("counter").Limit(1).Find(&animal2) + + DB.Last(&animal3) + DB.Order("counter desc").Limit(1).Find(&animal4) + if animal1.Counter != animal2.Counter || animal3.Counter != animal4.Counter { + t.Errorf("First and Last should work correctly") + } +} + +func TestUIntPrimaryKey(t *testing.T) { + var animal Animal + DB.First(&animal, uint64(1)) + if animal.Counter != 1 { + t.Errorf("Fetch a record from with a non-int primary key should work, but failed") + } + + DB.Model(Animal{}).Where(Animal{Counter: uint64(2)}).Scan(&animal) + if animal.Counter != 2 { + t.Errorf("Fetch a record from with a non-int primary key should work, but failed") + } +} + +func TestStringPrimaryKeyForNumericValueStartingWithZero(t *testing.T) { + type AddressByZipCode struct { + ZipCode string `gorm:"primary_key"` + Address string + } + + DB.AutoMigrate(&AddressByZipCode{}) + DB.Create(&AddressByZipCode{ZipCode: "00501", Address: "Holtsville"}) + + var address AddressByZipCode + DB.First(&address, "00501") + if address.ZipCode != "00501" { + t.Errorf("Fetch a record from with a string primary key for a numeric value starting with zero should work, but failed") + } +} + +func TestFindAsSliceOfPointers(t *testing.T) { + DB.Save(&User{Name: "user"}) + + var users []User + DB.Find(&users) + + var userPointers []*User + DB.Find(&userPointers) + + if len(users) == 0 || len(users) != len(userPointers) { + t.Errorf("Find slice of pointers") + } +} + +func TestSearchWithPlainSQL(t *testing.T) { + user1 := User{Name: "PlainSqlUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "PlainSqlUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "PlainSqlUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + scopedb := DB.Where("name LIKE ?", "%PlainSqlUser%") + + if DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() { + t.Errorf("Search with plain SQL") + } + + if DB.Where("name LIKE ?", "%"+user1.Name+"%").First(&User{}).RecordNotFound() { + t.Errorf("Search with plan SQL (regexp)") + } + + var users []User + DB.Find(&users, "name LIKE ? and age > ?", "%PlainSqlUser%", 1) + if len(users) != 2 { + t.Errorf("Should found 2 users that age > 1, but got %v", len(users)) + } + + DB.Where("name LIKE ?", "%PlainSqlUser%").Where("age >= ?", 1).Find(&users) + if len(users) != 3 { + t.Errorf("Should found 3 users that age >= 1, but got %v", len(users)) + } + + scopedb.Where("age <> ?", 20).Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users age != 20, but got %v", len(users)) + } + + scopedb.Where("birthday > ?", now.MustParse("2000-1-1")).Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users's birthday > 2000-1-1, but got %v", len(users)) + } + + scopedb.Where("birthday > ?", "2002-10-10").Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users's birthday >= 2002-10-10, but got %v", len(users)) + } + + scopedb.Where("birthday >= ?", "2010-1-1").Where("birthday < ?", "2020-1-1").Find(&users) + if len(users) != 1 { + t.Errorf("Should found 1 users's birthday < 2020-1-1 and >= 2010-1-1, but got %v", len(users)) + } + + DB.Where("name in (?)", []string{user1.Name, user2.Name}).Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users, but got %v", len(users)) + } + + DB.Where("id in (?)", []int64{user1.Id, user2.Id, user3.Id}).Find(&users) + if len(users) != 3 { + t.Errorf("Should found 3 users, but got %v", len(users)) + } + + DB.Where("id in (?)", user1.Id).Find(&users) + if len(users) != 1 { + t.Errorf("Should found 1 users, but got %v", len(users)) + } + + if err := DB.Where("id IN (?)", []string{}).Find(&users).Error; err != nil { + t.Error("no error should happen when query with empty slice, but got: ", err) + } + + if err := DB.Not("id IN (?)", []string{}).Find(&users).Error; err != nil { + t.Error("no error should happen when query with empty slice, but got: ", err) + } + + if DB.Where("name = ?", "none existing").Find(&[]User{}).RecordNotFound() { + t.Errorf("Should not get RecordNotFound error when looking for none existing records") + } +} + +func TestSearchWithStruct(t *testing.T) { + user1 := User{Name: "StructSearchUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "StructSearchUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "StructSearchUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + if DB.Where(user1.Id).First(&User{}).RecordNotFound() { + t.Errorf("Search with primary key") + } + + if DB.First(&User{}, user1.Id).RecordNotFound() { + t.Errorf("Search with primary key as inline condition") + } + + if DB.First(&User{}, fmt.Sprintf("%v", user1.Id)).RecordNotFound() { + t.Errorf("Search with primary key as inline condition") + } + + var users []User + DB.Where([]int64{user1.Id, user2.Id, user3.Id}).Find(&users) + if len(users) != 3 { + t.Errorf("Should found 3 users when search with primary keys, but got %v", len(users)) + } + + var user User + DB.First(&user, &User{Name: user1.Name}) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with inline pointer of struct") + } + + DB.First(&user, User{Name: user1.Name}) + if user.Id == 0 || user.Name != user.Name { + t.Errorf("Search first record with inline struct") + } + + DB.Where(&User{Name: user1.Name}).First(&user) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with where struct") + } + + DB.Find(&users, &User{Name: user2.Name}) + if len(users) != 1 { + t.Errorf("Search all records with inline struct") + } +} + +func TestSearchWithMap(t *testing.T) { + companyID := 1 + user1 := User{Name: "MapSearchUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "MapSearchUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "MapSearchUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + user4 := User{Name: "MapSearchUser4", Age: 30, Birthday: now.MustParse("2020-1-1"), CompanyID: &companyID} + DB.Save(&user1).Save(&user2).Save(&user3).Save(&user4) + + var user User + DB.First(&user, map[string]interface{}{"name": user1.Name}) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with inline map") + } + + user = User{} + DB.Where(map[string]interface{}{"name": user2.Name}).First(&user) + if user.Id == 0 || user.Name != user2.Name { + t.Errorf("Search first record with where map") + } + + var users []User + DB.Where(map[string]interface{}{"name": user3.Name}).Find(&users) + if len(users) != 1 { + t.Errorf("Search all records with inline map") + } + + DB.Find(&users, map[string]interface{}{"name": user3.Name}) + if len(users) != 1 { + t.Errorf("Search all records with inline map") + } + + DB.Find(&users, map[string]interface{}{"name": user4.Name, "company_id": nil}) + if len(users) != 0 { + t.Errorf("Search all records with inline map containing null value finding 0 records") + } + + DB.Find(&users, map[string]interface{}{"name": user1.Name, "company_id": nil}) + if len(users) != 1 { + t.Errorf("Search all records with inline map containing null value finding 1 record") + } + + DB.Find(&users, map[string]interface{}{"name": user4.Name, "company_id": companyID}) + if len(users) != 1 { + t.Errorf("Search all records with inline multiple value map") + } +} + +func TestSearchWithEmptyChain(t *testing.T) { + user1 := User{Name: "ChainSearchUser1", Age: 1, Birthday: now.MustParse("2000-1-1")} + user2 := User{Name: "ChainearchUser2", Age: 10, Birthday: now.MustParse("2010-1-1")} + user3 := User{Name: "ChainearchUser3", Age: 20, Birthday: now.MustParse("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + if DB.Where("").Where("").First(&User{}).Error != nil { + t.Errorf("Should not raise any error if searching with empty strings") + } + + if DB.Where(&User{}).Where("name = ?", user1.Name).First(&User{}).Error != nil { + t.Errorf("Should not raise any error if searching with empty struct") + } + + if DB.Where(map[string]interface{}{}).Where("name = ?", user1.Name).First(&User{}).Error != nil { + t.Errorf("Should not raise any error if searching with empty map") + } +} + +func TestSelect(t *testing.T) { + user1 := User{Name: "SelectUser1"} + DB.Save(&user1) + + var user User + DB.Where("name = ?", user1.Name).Select("name").Find(&user) + if user.Id != 0 { + t.Errorf("Should not have ID because only selected name, %+v", user.Id) + } + + if user.Name != user1.Name { + t.Errorf("Should have user Name when selected it") + } +} + +func TestOrderAndPluck(t *testing.T) { + user1 := User{Name: "OrderPluckUser1", Age: 1} + user2 := User{Name: "OrderPluckUser2", Age: 10} + user3 := User{Name: "OrderPluckUser3", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3) + scopedb := DB.Model(&User{}).Where("name like ?", "%OrderPluckUser%") + + var ages []int64 + scopedb.Order("age desc").Pluck("age", &ages) + if ages[0] != 20 { + t.Errorf("The first age should be 20 when order with age desc") + } + + var ages1, ages2 []int64 + scopedb.Order("age desc").Pluck("age", &ages1).Pluck("age", &ages2) + if !reflect.DeepEqual(ages1, ages2) { + t.Errorf("The first order is the primary order") + } + + var ages3, ages4 []int64 + scopedb.Model(&User{}).Order("age desc").Pluck("age", &ages3).Order("age", true).Pluck("age", &ages4) + if reflect.DeepEqual(ages3, ages4) { + t.Errorf("Reorder should work") + } + + var names []string + var ages5 []int64 + scopedb.Model(User{}).Order("name").Order("age desc").Pluck("age", &ages5).Pluck("name", &names) + if names != nil && ages5 != nil { + if !(names[0] == user1.Name && names[1] == user2.Name && names[2] == user3.Name && ages5[2] == 20) { + t.Errorf("Order with multiple orders") + } + } else { + t.Errorf("Order with multiple orders") + } + + DB.Model(User{}).Select("name, age").Find(&[]User{}) +} + +func TestLimit(t *testing.T) { + user1 := User{Name: "LimitUser1", Age: 1} + user2 := User{Name: "LimitUser2", Age: 10} + user3 := User{Name: "LimitUser3", Age: 20} + user4 := User{Name: "LimitUser4", Age: 10} + user5 := User{Name: "LimitUser5", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3).Save(&user4).Save(&user5) + + var users1, users2, users3 []User + DB.Order("age desc").Limit(3).Find(&users1).Limit(5).Find(&users2).Limit(-1).Find(&users3) + + if len(users1) != 3 || len(users2) != 5 || len(users3) <= 5 { + t.Errorf("Limit should works") + } +} + +func TestOffset(t *testing.T) { + for i := 0; i < 20; i++ { + DB.Save(&User{Name: fmt.Sprintf("OffsetUser%v", i)}) + } + var users1, users2, users3, users4 []User + DB.Limit(100).Order("age desc").Find(&users1).Offset(3).Find(&users2).Offset(5).Find(&users3).Offset(-1).Find(&users4) + + if (len(users1) != len(users4)) || (len(users1)-len(users2) != 3) || (len(users1)-len(users3) != 5) { + t.Errorf("Offset should work") + } +} + +func TestOr(t *testing.T) { + user1 := User{Name: "OrUser1", Age: 1} + user2 := User{Name: "OrUser2", Age: 10} + user3 := User{Name: "OrUser3", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3) + + var users []User + DB.Where("name = ?", user1.Name).Or("name = ?", user2.Name).Find(&users) + if len(users) != 2 { + t.Errorf("Find users with or") + } +} + +func TestCount(t *testing.T) { + user1 := User{Name: "CountUser1", Age: 1} + user2 := User{Name: "CountUser2", Age: 10} + user3 := User{Name: "CountUser3", Age: 20} + + DB.Save(&user1).Save(&user2).Save(&user3) + var count, count1, count2 int64 + var users []User + + if err := DB.Where("name = ?", user1.Name).Or("name = ?", user3.Name).Find(&users).Count(&count).Error; err != nil { + t.Errorf(fmt.Sprintf("Count should work, but got err %v", err)) + } + + if count != int64(len(users)) { + t.Errorf("Count() method should get correct value") + } + + DB.Model(&User{}).Where("name = ?", user1.Name).Count(&count1).Or("name in (?)", []string{user2.Name, user3.Name}).Count(&count2) + if count1 != 1 || count2 != 3 { + t.Errorf("Multiple count in chain") + } +} + +func TestNot(t *testing.T) { + DB.Create(getPreparedUser("user1", "not")) + DB.Create(getPreparedUser("user2", "not")) + DB.Create(getPreparedUser("user3", "not")) + + user4 := getPreparedUser("user4", "not") + user4.Company = Company{} + DB.Create(user4) + + DB := DB.Where("role = ?", "not") + + var users1, users2, users3, users4, users5, users6, users7, users8, users9 []User + if DB.Find(&users1).RowsAffected != 4 { + t.Errorf("should find 4 not users") + } + DB.Not(users1[0].Id).Find(&users2) + + if len(users1)-len(users2) != 1 { + t.Errorf("Should ignore the first users with Not") + } + + DB.Not([]int{}).Find(&users3) + if len(users1)-len(users3) != 0 { + t.Errorf("Should find all users with a blank condition") + } + + var name3Count int64 + DB.Table("users").Where("name = ?", "user3").Count(&name3Count) + DB.Not("name", "user3").Find(&users4) + if len(users1)-len(users4) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not("name = ?", "user3").Find(&users4) + if len(users1)-len(users4) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not("name <> ?", "user3").Find(&users4) + if len(users4) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not(User{Name: "user3"}).Find(&users5) + + if len(users1)-len(users5) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not(map[string]interface{}{"name": "user3"}).Find(&users6) + if len(users1)-len(users6) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not(map[string]interface{}{"name": "user3", "company_id": nil}).Find(&users7) + if len(users1)-len(users7) != 2 { // not user3 or user4 + t.Errorf("Should find all user's name not equal to 3 who do not have company id") + } + + DB.Not("name", []string{"user3"}).Find(&users8) + if len(users1)-len(users8) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + var name2Count int64 + DB.Table("users").Where("name = ?", "user2").Count(&name2Count) + DB.Not("name", []string{"user3", "user2"}).Find(&users9) + if len(users1)-len(users9) != (int(name3Count) + int(name2Count)) { + t.Errorf("Should find all users's name not equal 3") + } +} + +func TestFillSmallerStruct(t *testing.T) { + user1 := User{Name: "SmallerUser", Age: 100} + DB.Save(&user1) + type SimpleUser struct { + Name string + Id int64 + UpdatedAt time.Time + CreatedAt time.Time + } + + var simpleUser SimpleUser + DB.Table("users").Where("name = ?", user1.Name).First(&simpleUser) + + if simpleUser.Id == 0 || simpleUser.Name == "" { + t.Errorf("Should fill data correctly into smaller struct") + } +} + +func TestFindOrInitialize(t *testing.T) { + var user1, user2, user3, user4, user5, user6 User + DB.Where(&User{Name: "find or init", Age: 33}).FirstOrInit(&user1) + if user1.Name != "find or init" || user1.Id != 0 || user1.Age != 33 { + t.Errorf("user should be initialized with search value") + } + + DB.Where(User{Name: "find or init", Age: 33}).FirstOrInit(&user2) + if user2.Name != "find or init" || user2.Id != 0 || user2.Age != 33 { + t.Errorf("user should be initialized with search value") + } + + DB.FirstOrInit(&user3, map[string]interface{}{"name": "find or init 2"}) + if user3.Name != "find or init 2" || user3.Id != 0 { + t.Errorf("user should be initialized with inline search value") + } + + DB.Where(&User{Name: "find or init"}).Attrs(User{Age: 44}).FirstOrInit(&user4) + if user4.Name != "find or init" || user4.Id != 0 || user4.Age != 44 { + t.Errorf("user should be initialized with search value and attrs") + } + + DB.Where(&User{Name: "find or init"}).Assign("age", 44).FirstOrInit(&user4) + if user4.Name != "find or init" || user4.Id != 0 || user4.Age != 44 { + t.Errorf("user should be initialized with search value and assign attrs") + } + + DB.Save(&User{Name: "find or init", Age: 33}) + DB.Where(&User{Name: "find or init"}).Attrs("age", 44).FirstOrInit(&user5) + if user5.Name != "find or init" || user5.Id == 0 || user5.Age != 33 { + t.Errorf("user should be found and not initialized by Attrs") + } + + DB.Where(&User{Name: "find or init", Age: 33}).FirstOrInit(&user6) + if user6.Name != "find or init" || user6.Id == 0 || user6.Age != 33 { + t.Errorf("user should be found with FirstOrInit") + } + + DB.Where(&User{Name: "find or init"}).Assign(User{Age: 44}).FirstOrInit(&user6) + if user6.Name != "find or init" || user6.Id == 0 || user6.Age != 44 { + t.Errorf("user should be found and updated with assigned attrs") + } +} + +func TestFindOrCreate(t *testing.T) { + var user1, user2, user3, user4, user5, user6, user7, user8 User + DB.Where(&User{Name: "find or create", Age: 33}).FirstOrCreate(&user1) + if user1.Name != "find or create" || user1.Id == 0 || user1.Age != 33 { + t.Errorf("user should be created with search value") + } + + DB.Where(&User{Name: "find or create", Age: 33}).FirstOrCreate(&user2) + if user1.Id != user2.Id || user2.Name != "find or create" || user2.Id == 0 || user2.Age != 33 { + t.Errorf("user should be created with search value") + } + + DB.FirstOrCreate(&user3, map[string]interface{}{"name": "find or create 2"}) + if user3.Name != "find or create 2" || user3.Id == 0 { + t.Errorf("user should be created with inline search value") + } + + DB.Where(&User{Name: "find or create 3"}).Attrs("age", 44).FirstOrCreate(&user4) + if user4.Name != "find or create 3" || user4.Id == 0 || user4.Age != 44 { + t.Errorf("user should be created with search value and attrs") + } + + updatedAt1 := user4.UpdatedAt + DB.Where(&User{Name: "find or create 3"}).Assign("age", 55).FirstOrCreate(&user4) + if updatedAt1.Format(time.RFC3339Nano) == user4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("UpdateAt should be changed when update values with assign") + } + + DB.Where(&User{Name: "find or create 4"}).Assign(User{Age: 44}).FirstOrCreate(&user4) + if user4.Name != "find or create 4" || user4.Id == 0 || user4.Age != 44 { + t.Errorf("user should be created with search value and assigned attrs") + } + + DB.Where(&User{Name: "find or create"}).Attrs("age", 44).FirstOrInit(&user5) + if user5.Name != "find or create" || user5.Id == 0 || user5.Age != 33 { + t.Errorf("user should be found and not initialized by Attrs") + } + + DB.Where(&User{Name: "find or create"}).Assign(User{Age: 44}).FirstOrCreate(&user6) + if user6.Name != "find or create" || user6.Id == 0 || user6.Age != 44 { + t.Errorf("user should be found and updated with assigned attrs") + } + + DB.Where(&User{Name: "find or create"}).Find(&user7) + if user7.Name != "find or create" || user7.Id == 0 || user7.Age != 44 { + t.Errorf("user should be found and updated with assigned attrs") + } + + DB.Where(&User{Name: "find or create embedded struct"}).Assign(User{Age: 44, CreditCard: CreditCard{Number: "1231231231"}, Emails: []Email{{Email: "jinzhu@assign_embedded_struct.com"}, {Email: "jinzhu-2@assign_embedded_struct.com"}}}).FirstOrCreate(&user8) + if DB.Where("email = ?", "jinzhu-2@assign_embedded_struct.com").First(&Email{}).RecordNotFound() { + t.Errorf("embedded struct email should be saved") + } + + if DB.Where("email = ?", "1231231231").First(&CreditCard{}).RecordNotFound() { + t.Errorf("embedded struct credit card should be saved") + } +} + +func TestSelectWithEscapedFieldName(t *testing.T) { + user1 := User{Name: "EscapedFieldNameUser", Age: 1} + user2 := User{Name: "EscapedFieldNameUser", Age: 10} + user3 := User{Name: "EscapedFieldNameUser", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3) + + var names []string + DB.Model(User{}).Where(&User{Name: "EscapedFieldNameUser"}).Pluck("\"name\"", &names) + + if len(names) != 3 { + t.Errorf("Expected 3 name, but got: %d", len(names)) + } +} + +func TestSelectWithVariables(t *testing.T) { + DB.Save(&User{Name: "jinzhu"}) + + rows, _ := DB.Table("users").Select("? as fake", gorm.Expr("name")).Rows() + + if !rows.Next() { + t.Errorf("Should have returned at least one row") + } else { + columns, _ := rows.Columns() + if !reflect.DeepEqual(columns, []string{"fake"}) { + t.Errorf("Should only contains one column") + } + } +} + +func TestSelectWithArrayInput(t *testing.T) { + DB.Save(&User{Name: "jinzhu", Age: 42}) + + var user User + DB.Select([]string{"name", "age"}).Where("age = 42 AND name = 'jinzhu'").First(&user) + + if user.Name != "jinzhu" || user.Age != 42 { + t.Errorf("Should have selected both age and name") + } +} diff --git a/vendor/github.com/jinzhu/gorm/scaner_test.go b/vendor/github.com/jinzhu/gorm/scaner_test.go new file mode 100644 index 0000000..2141054 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/scaner_test.go @@ -0,0 +1,70 @@ +package gorm_test + +import ( + "database/sql/driver" + "encoding/json" + "testing" +) + +func TestScannableSlices(t *testing.T) { + if err := DB.AutoMigrate(&RecordWithSlice{}).Error; err != nil { + t.Errorf("Should create table with slice values correctly: %s", err) + } + + r1 := RecordWithSlice{ + Strings: ExampleStringSlice{"a", "b", "c"}, + Structs: ExampleStructSlice{ + {"name1", "value1"}, + {"name2", "value2"}, + }, + } + + if err := DB.Save(&r1).Error; err != nil { + t.Errorf("Should save record with slice values") + } + + var r2 RecordWithSlice + + if err := DB.Find(&r2).Error; err != nil { + t.Errorf("Should fetch record with slice values") + } + + if len(r2.Strings) != 3 || r2.Strings[0] != "a" || r2.Strings[1] != "b" || r2.Strings[2] != "c" { + t.Errorf("Should have serialised and deserialised a string array") + } + + if len(r2.Structs) != 2 || r2.Structs[0].Name != "name1" || r2.Structs[0].Value != "value1" || r2.Structs[1].Name != "name2" || r2.Structs[1].Value != "value2" { + t.Errorf("Should have serialised and deserialised a struct array") + } +} + +type RecordWithSlice struct { + ID uint64 + Strings ExampleStringSlice `sql:"type:text"` + Structs ExampleStructSlice `sql:"type:text"` +} + +type ExampleStringSlice []string + +func (l ExampleStringSlice) Value() (driver.Value, error) { + return json.Marshal(l) +} + +func (l *ExampleStringSlice) Scan(input interface{}) error { + return json.Unmarshal(input.([]byte), l) +} + +type ExampleStruct struct { + Name string + Value string +} + +type ExampleStructSlice []ExampleStruct + +func (l ExampleStructSlice) Value() (driver.Value, error) { + return json.Marshal(l) +} + +func (l *ExampleStructSlice) Scan(input interface{}) error { + return json.Unmarshal(input.([]byte), l) +} diff --git a/vendor/github.com/jinzhu/gorm/scope.go b/vendor/github.com/jinzhu/gorm/scope.go new file mode 100644 index 0000000..844df85 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/scope.go @@ -0,0 +1,1246 @@ +package gorm + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" + + "reflect" +) + +// Scope contain current operation's information when you perform any operation on the database +type Scope struct { + Search *search + Value interface{} + SQL string + SQLVars []interface{} + db *DB + instanceID string + primaryKeyField *Field + skipLeft bool + fields *[]*Field + selectAttrs *[]string +} + +// IndirectValue return scope's reflect value's indirect value +func (scope *Scope) IndirectValue() reflect.Value { + return indirect(reflect.ValueOf(scope.Value)) +} + +// New create a new Scope without search information +func (scope *Scope) New(value interface{}) *Scope { + return &Scope{db: scope.NewDB(), Search: &search{}, Value: value} +} + +//////////////////////////////////////////////////////////////////////////////// +// Scope DB +//////////////////////////////////////////////////////////////////////////////// + +// DB return scope's DB connection +func (scope *Scope) DB() *DB { + return scope.db +} + +// NewDB create a new DB without search information +func (scope *Scope) NewDB() *DB { + if scope.db != nil { + db := scope.db.clone() + db.search = nil + db.Value = nil + return db + } + return nil +} + +// SQLDB return *sql.DB +func (scope *Scope) SQLDB() sqlCommon { + return scope.db.db +} + +// Dialect get dialect +func (scope *Scope) Dialect() Dialect { + return scope.db.parent.dialect +} + +// Quote used to quote string to escape them for database +func (scope *Scope) Quote(str string) string { + if strings.Index(str, ".") != -1 { + newStrs := []string{} + for _, str := range strings.Split(str, ".") { + newStrs = append(newStrs, scope.Dialect().Quote(str)) + } + return strings.Join(newStrs, ".") + } + + return scope.Dialect().Quote(str) +} + +// Err add error to Scope +func (scope *Scope) Err(err error) error { + if err != nil { + scope.db.AddError(err) + } + return err +} + +// HasError check if there are any error +func (scope *Scope) HasError() bool { + return scope.db.Error != nil +} + +// Log print log message +func (scope *Scope) Log(v ...interface{}) { + scope.db.log(v...) +} + +// SkipLeft skip remaining callbacks +func (scope *Scope) SkipLeft() { + scope.skipLeft = true +} + +// Fields get value's fields +func (scope *Scope) Fields() []*Field { + if scope.fields == nil { + var ( + fields []*Field + indirectScopeValue = scope.IndirectValue() + isStruct = indirectScopeValue.Kind() == reflect.Struct + ) + + for _, structField := range scope.GetModelStruct().StructFields { + if isStruct { + fieldValue := indirectScopeValue + for _, name := range structField.Names { + fieldValue = reflect.Indirect(fieldValue).FieldByName(name) + } + fields = append(fields, &Field{StructField: structField, Field: fieldValue, IsBlank: isBlank(fieldValue)}) + } else { + fields = append(fields, &Field{StructField: structField, IsBlank: true}) + } + } + scope.fields = &fields + } + + return *scope.fields +} + +// FieldByName find `gorm.Field` with field name or db name +func (scope *Scope) FieldByName(name string) (field *Field, ok bool) { + var ( + dbName = ToDBName(name) + mostMatchedField *Field + ) + + for _, field := range scope.Fields() { + if field.Name == name || field.DBName == name { + return field, true + } + if field.DBName == dbName { + mostMatchedField = field + } + } + return mostMatchedField, mostMatchedField != nil +} + +// PrimaryFields return scope's primary fields +func (scope *Scope) PrimaryFields() (fields []*Field) { + for _, field := range scope.Fields() { + if field.IsPrimaryKey { + fields = append(fields, field) + } + } + return fields +} + +// PrimaryField return scope's main primary field, if defined more that one primary fields, will return the one having column name `id` or the first one +func (scope *Scope) PrimaryField() *Field { + if primaryFields := scope.GetModelStruct().PrimaryFields; len(primaryFields) > 0 { + if len(primaryFields) > 1 { + if field, ok := scope.FieldByName("id"); ok { + return field + } + } + return scope.PrimaryFields()[0] + } + return nil +} + +// PrimaryKey get main primary field's db name +func (scope *Scope) PrimaryKey() string { + if field := scope.PrimaryField(); field != nil { + return field.DBName + } + return "" +} + +// PrimaryKeyZero check main primary field's value is blank or not +func (scope *Scope) PrimaryKeyZero() bool { + field := scope.PrimaryField() + return field == nil || field.IsBlank +} + +// PrimaryKeyValue get the primary key's value +func (scope *Scope) PrimaryKeyValue() interface{} { + if field := scope.PrimaryField(); field != nil && field.Field.IsValid() { + return field.Field.Interface() + } + return 0 +} + +// HasColumn to check if has column +func (scope *Scope) HasColumn(column string) bool { + for _, field := range scope.GetStructFields() { + if field.IsNormal && (field.Name == column || field.DBName == column) { + return true + } + } + return false +} + +// SetColumn to set the column's value, column could be field or field's name/dbname +func (scope *Scope) SetColumn(column interface{}, value interface{}) error { + var updateAttrs = map[string]interface{}{} + if attrs, ok := scope.InstanceGet("gorm:update_attrs"); ok { + updateAttrs = attrs.(map[string]interface{}) + defer scope.InstanceSet("gorm:update_attrs", updateAttrs) + } + + if field, ok := column.(*Field); ok { + updateAttrs[field.DBName] = value + return field.Set(value) + } else if name, ok := column.(string); ok { + var ( + dbName = ToDBName(name) + mostMatchedField *Field + ) + for _, field := range scope.Fields() { + if field.DBName == value { + updateAttrs[field.DBName] = value + return field.Set(value) + } + if (field.DBName == dbName) || (field.Name == name && mostMatchedField == nil) { + mostMatchedField = field + } + } + + if mostMatchedField != nil { + updateAttrs[mostMatchedField.DBName] = value + return mostMatchedField.Set(value) + } + } + return errors.New("could not convert column to field") +} + +// CallMethod call scope value's method, if it is a slice, will call its element's method one by one +func (scope *Scope) CallMethod(methodName string) { + if scope.Value == nil { + return + } + + if indirectScopeValue := scope.IndirectValue(); indirectScopeValue.Kind() == reflect.Slice { + for i := 0; i < indirectScopeValue.Len(); i++ { + scope.callMethod(methodName, indirectScopeValue.Index(i)) + } + } else { + scope.callMethod(methodName, indirectScopeValue) + } +} + +// AddToVars add value as sql's vars, used to prevent SQL injection +func (scope *Scope) AddToVars(value interface{}) string { + if expr, ok := value.(*expr); ok { + exp := expr.expr + for _, arg := range expr.args { + exp = strings.Replace(exp, "?", scope.AddToVars(arg), 1) + } + return exp + } + + scope.SQLVars = append(scope.SQLVars, value) + return scope.Dialect().BindVar(len(scope.SQLVars)) +} + +// SelectAttrs return selected attributes +func (scope *Scope) SelectAttrs() []string { + if scope.selectAttrs == nil { + attrs := []string{} + for _, value := range scope.Search.selects { + if str, ok := value.(string); ok { + attrs = append(attrs, str) + } else if strs, ok := value.([]string); ok { + attrs = append(attrs, strs...) + } else if strs, ok := value.([]interface{}); ok { + for _, str := range strs { + attrs = append(attrs, fmt.Sprintf("%v", str)) + } + } + } + scope.selectAttrs = &attrs + } + return *scope.selectAttrs +} + +// OmitAttrs return omitted attributes +func (scope *Scope) OmitAttrs() []string { + return scope.Search.omits +} + +type tabler interface { + TableName() string +} + +type dbTabler interface { + TableName(*DB) string +} + +// TableName return table name +func (scope *Scope) TableName() string { + if scope.Search != nil && len(scope.Search.tableName) > 0 { + return scope.Search.tableName + } + + if tabler, ok := scope.Value.(tabler); ok { + return tabler.TableName() + } + + if tabler, ok := scope.Value.(dbTabler); ok { + return tabler.TableName(scope.db) + } + + return scope.GetModelStruct().TableName(scope.db.Model(scope.Value)) +} + +// QuotedTableName return quoted table name +func (scope *Scope) QuotedTableName() (name string) { + if scope.Search != nil && len(scope.Search.tableName) > 0 { + if strings.Index(scope.Search.tableName, " ") != -1 { + return scope.Search.tableName + } + return scope.Quote(scope.Search.tableName) + } + + return scope.Quote(scope.TableName()) +} + +// CombinedConditionSql return combined condition sql +func (scope *Scope) CombinedConditionSql() string { + return scope.joinsSQL() + scope.whereSQL() + scope.groupSQL() + + scope.havingSQL() + scope.orderSQL() + scope.limitAndOffsetSQL() +} + +// Raw set raw sql +func (scope *Scope) Raw(sql string) *Scope { + scope.SQL = strings.Replace(sql, "$$", "?", -1) + return scope +} + +// Exec perform generated SQL +func (scope *Scope) Exec() *Scope { + defer scope.trace(NowFunc()) + + if !scope.HasError() { + if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil { + if count, err := result.RowsAffected(); scope.Err(err) == nil { + scope.db.RowsAffected = count + } + } + } + return scope +} + +// Set set value by name +func (scope *Scope) Set(name string, value interface{}) *Scope { + scope.db.InstantSet(name, value) + return scope +} + +// Get get setting by name +func (scope *Scope) Get(name string) (interface{}, bool) { + return scope.db.Get(name) +} + +// InstanceID get InstanceID for scope +func (scope *Scope) InstanceID() string { + if scope.instanceID == "" { + scope.instanceID = fmt.Sprintf("%v%v", &scope, &scope.db) + } + return scope.instanceID +} + +// InstanceSet set instance setting for current operation, but not for operations in callbacks, like saving associations callback +func (scope *Scope) InstanceSet(name string, value interface{}) *Scope { + return scope.Set(name+scope.InstanceID(), value) +} + +// InstanceGet get instance setting from current operation +func (scope *Scope) InstanceGet(name string) (interface{}, bool) { + return scope.Get(name + scope.InstanceID()) +} + +// Begin start a transaction +func (scope *Scope) Begin() *Scope { + if db, ok := scope.SQLDB().(sqlDb); ok { + if tx, err := db.Begin(); err == nil { + scope.db.db = interface{}(tx).(sqlCommon) + scope.InstanceSet("gorm:started_transaction", true) + } + } + return scope +} + +// CommitOrRollback commit current transaction if no error happened, otherwise will rollback it +func (scope *Scope) CommitOrRollback() *Scope { + if _, ok := scope.InstanceGet("gorm:started_transaction"); ok { + if db, ok := scope.db.db.(sqlTx); ok { + if scope.HasError() { + db.Rollback() + } else { + scope.Err(db.Commit()) + } + scope.db.db = scope.db.parent.db + } + } + return scope +} + +//////////////////////////////////////////////////////////////////////////////// +// Private Methods For *gorm.Scope +//////////////////////////////////////////////////////////////////////////////// + +func (scope *Scope) callMethod(methodName string, reflectValue reflect.Value) { + // Only get address from non-pointer + if reflectValue.CanAddr() && reflectValue.Kind() != reflect.Ptr { + reflectValue = reflectValue.Addr() + } + + if methodValue := reflectValue.MethodByName(methodName); methodValue.IsValid() { + switch method := methodValue.Interface().(type) { + case func(): + method() + case func(*Scope): + method(scope) + case func(*DB): + newDB := scope.NewDB() + method(newDB) + scope.Err(newDB.Error) + case func() error: + scope.Err(method()) + case func(*Scope) error: + scope.Err(method(scope)) + case func(*DB) error: + newDB := scope.NewDB() + scope.Err(method(newDB)) + scope.Err(newDB.Error) + default: + scope.Err(fmt.Errorf("unsupported function %v", methodName)) + } + } +} + +var columnRegexp = regexp.MustCompile("^[a-zA-Z]+(\\.[a-zA-Z]+)*$") // only match string like `name`, `users.name` + +func (scope *Scope) quoteIfPossible(str string) string { + if columnRegexp.MatchString(str) { + return scope.Quote(str) + } + return str +} + +func (scope *Scope) scan(rows *sql.Rows, columns []string, fields []*Field) { + var ( + ignored interface{} + selectFields []*Field + values = make([]interface{}, len(columns)) + selectedColumnsMap = map[string]int{} + resetFields = map[*Field]int{} + ) + + for index, column := range columns { + values[index] = &ignored + + selectFields = fields + if idx, ok := selectedColumnsMap[column]; ok { + selectFields = selectFields[idx+1:] + } + + for fieldIndex, field := range selectFields { + if field.DBName == column { + if field.Field.Kind() == reflect.Ptr { + values[index] = field.Field.Addr().Interface() + } else { + reflectValue := reflect.New(reflect.PtrTo(field.Struct.Type)) + reflectValue.Elem().Set(field.Field.Addr()) + values[index] = reflectValue.Interface() + resetFields[field] = index + } + + selectedColumnsMap[column] = fieldIndex + break + } + } + } + + scope.Err(rows.Scan(values...)) + + for field, index := range resetFields { + if v := reflect.ValueOf(values[index]).Elem().Elem(); v.IsValid() { + field.Field.Set(v) + } + } +} + +func (scope *Scope) primaryCondition(value interface{}) string { + return fmt.Sprintf("(%v = %v)", scope.Quote(scope.PrimaryKey()), value) +} + +func (scope *Scope) buildWhereCondition(clause map[string]interface{}) (str string) { + switch value := clause["query"].(type) { + case string: + // if string is number + if regexp.MustCompile("^\\s*\\d+\\s*$").MatchString(value) { + return scope.primaryCondition(scope.AddToVars(value)) + } else if value != "" { + str = fmt.Sprintf("(%v)", value) + } + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, sql.NullInt64: + return scope.primaryCondition(scope.AddToVars(value)) + case []int, []int8, []int16, []int32, []int64, []uint, []uint8, []uint16, []uint32, []uint64, []string, []interface{}: + str = fmt.Sprintf("(%v IN (?))", scope.Quote(scope.PrimaryKey())) + clause["args"] = []interface{}{value} + case map[string]interface{}: + var sqls []string + for key, value := range value { + if value != nil { + sqls = append(sqls, fmt.Sprintf("(%v = %v)", scope.Quote(key), scope.AddToVars(value))) + } else { + sqls = append(sqls, fmt.Sprintf("(%v IS NULL)", scope.Quote(key))) + } + } + return strings.Join(sqls, " AND ") + case interface{}: + var sqls []string + for _, field := range scope.New(value).Fields() { + if !field.IsIgnored && !field.IsBlank { + sqls = append(sqls, fmt.Sprintf("(%v = %v)", scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface()))) + } + } + return strings.Join(sqls, " AND ") + } + + args := clause["args"].([]interface{}) + for _, arg := range args { + switch reflect.ValueOf(arg).Kind() { + case reflect.Slice: // For where("id in (?)", []int64{1,2}) + if bytes, ok := arg.([]byte); ok { + str = strings.Replace(str, "?", scope.AddToVars(bytes), 1) + } else if values := reflect.ValueOf(arg); values.Len() > 0 { + var tempMarks []string + for i := 0; i < values.Len(); i++ { + tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface())) + } + str = strings.Replace(str, "?", strings.Join(tempMarks, ","), 1) + } else { + str = strings.Replace(str, "?", scope.AddToVars(Expr("NULL")), 1) + } + default: + if valuer, ok := interface{}(arg).(driver.Valuer); ok { + arg, _ = valuer.Value() + } + + str = strings.Replace(str, "?", scope.AddToVars(arg), 1) + } + } + return +} + +func (scope *Scope) buildNotCondition(clause map[string]interface{}) (str string) { + var notEqualSQL string + var primaryKey = scope.PrimaryKey() + + switch value := clause["query"].(type) { + case string: + // is number + if regexp.MustCompile("^\\s*\\d+\\s*$").MatchString(value) { + id, _ := strconv.Atoi(value) + return fmt.Sprintf("(%v <> %v)", scope.Quote(primaryKey), id) + } else if regexp.MustCompile("(?i) (=|<>|>|<|LIKE|IS|IN) ").MatchString(value) { + str = fmt.Sprintf(" NOT (%v) ", value) + notEqualSQL = fmt.Sprintf("NOT (%v)", value) + } else { + str = fmt.Sprintf("(%v NOT IN (?))", scope.Quote(value)) + notEqualSQL = fmt.Sprintf("(%v <> ?)", scope.Quote(value)) + } + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, sql.NullInt64: + return fmt.Sprintf("(%v <> %v)", scope.Quote(primaryKey), value) + case []int, []int8, []int16, []int32, []int64, []uint, []uint8, []uint16, []uint32, []uint64, []string: + if reflect.ValueOf(value).Len() > 0 { + str = fmt.Sprintf("(%v NOT IN (?))", scope.Quote(primaryKey)) + clause["args"] = []interface{}{value} + } + return "" + case map[string]interface{}: + var sqls []string + for key, value := range value { + if value != nil { + sqls = append(sqls, fmt.Sprintf("(%v <> %v)", scope.Quote(key), scope.AddToVars(value))) + } else { + sqls = append(sqls, fmt.Sprintf("(%v IS NOT NULL)", scope.Quote(key))) + } + } + return strings.Join(sqls, " AND ") + case interface{}: + var sqls []string + for _, field := range scope.New(value).Fields() { + if !field.IsBlank { + sqls = append(sqls, fmt.Sprintf("(%v <> %v)", scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface()))) + } + } + return strings.Join(sqls, " AND ") + } + + args := clause["args"].([]interface{}) + for _, arg := range args { + switch reflect.ValueOf(arg).Kind() { + case reflect.Slice: // For where("id in (?)", []int64{1,2}) + if bytes, ok := arg.([]byte); ok { + str = strings.Replace(str, "?", scope.AddToVars(bytes), 1) + } else if values := reflect.ValueOf(arg); values.Len() > 0 { + var tempMarks []string + for i := 0; i < values.Len(); i++ { + tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface())) + } + str = strings.Replace(str, "?", strings.Join(tempMarks, ","), 1) + } else { + str = strings.Replace(str, "?", scope.AddToVars(Expr("NULL")), 1) + } + default: + if scanner, ok := interface{}(arg).(driver.Valuer); ok { + arg, _ = scanner.Value() + } + str = strings.Replace(notEqualSQL, "?", scope.AddToVars(arg), 1) + } + } + return +} + +func (scope *Scope) buildSelectQuery(clause map[string]interface{}) (str string) { + switch value := clause["query"].(type) { + case string: + str = value + case []string: + str = strings.Join(value, ", ") + } + + args := clause["args"].([]interface{}) + for _, arg := range args { + switch reflect.ValueOf(arg).Kind() { + case reflect.Slice: + values := reflect.ValueOf(arg) + var tempMarks []string + for i := 0; i < values.Len(); i++ { + tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface())) + } + str = strings.Replace(str, "?", strings.Join(tempMarks, ","), 1) + default: + if valuer, ok := interface{}(arg).(driver.Valuer); ok { + arg, _ = valuer.Value() + } + str = strings.Replace(str, "?", scope.AddToVars(arg), 1) + } + } + return +} + +func (scope *Scope) whereSQL() (sql string) { + var ( + quotedTableName = scope.QuotedTableName() + primaryConditions, andConditions, orConditions []string + ) + + if !scope.Search.Unscoped && scope.HasColumn("deleted_at") { + sql := fmt.Sprintf("%v.deleted_at IS NULL", quotedTableName) + primaryConditions = append(primaryConditions, sql) + } + + if !scope.PrimaryKeyZero() { + for _, field := range scope.PrimaryFields() { + sql := fmt.Sprintf("%v.%v = %v", quotedTableName, scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface())) + primaryConditions = append(primaryConditions, sql) + } + } + + for _, clause := range scope.Search.whereConditions { + if sql := scope.buildWhereCondition(clause); sql != "" { + andConditions = append(andConditions, sql) + } + } + + for _, clause := range scope.Search.orConditions { + if sql := scope.buildWhereCondition(clause); sql != "" { + orConditions = append(orConditions, sql) + } + } + + for _, clause := range scope.Search.notConditions { + if sql := scope.buildNotCondition(clause); sql != "" { + andConditions = append(andConditions, sql) + } + } + + orSQL := strings.Join(orConditions, " OR ") + combinedSQL := strings.Join(andConditions, " AND ") + if len(combinedSQL) > 0 { + if len(orSQL) > 0 { + combinedSQL = combinedSQL + " OR " + orSQL + } + } else { + combinedSQL = orSQL + } + + if len(primaryConditions) > 0 { + sql = "WHERE " + strings.Join(primaryConditions, " AND ") + if len(combinedSQL) > 0 { + sql = sql + " AND (" + combinedSQL + ")" + } + } else if len(combinedSQL) > 0 { + sql = "WHERE " + combinedSQL + } + return +} + +func (scope *Scope) selectSQL() string { + if len(scope.Search.selects) == 0 { + if len(scope.Search.joinConditions) > 0 { + return fmt.Sprintf("%v.*", scope.QuotedTableName()) + } + return "*" + } + return scope.buildSelectQuery(scope.Search.selects) +} + +func (scope *Scope) orderSQL() string { + if len(scope.Search.orders) == 0 || scope.Search.countingQuery { + return "" + } + + var orders []string + for _, order := range scope.Search.orders { + orders = append(orders, scope.quoteIfPossible(order)) + } + return " ORDER BY " + strings.Join(orders, ",") +} + +func (scope *Scope) limitAndOffsetSQL() string { + return scope.Dialect().LimitAndOffsetSQL(scope.Search.limit, scope.Search.offset) +} + +func (scope *Scope) groupSQL() string { + if len(scope.Search.group) == 0 { + return "" + } + return " GROUP BY " + scope.Search.group +} + +func (scope *Scope) havingSQL() string { + if len(scope.Search.havingConditions) == 0 { + return "" + } + + var andConditions []string + for _, clause := range scope.Search.havingConditions { + if sql := scope.buildWhereCondition(clause); sql != "" { + andConditions = append(andConditions, sql) + } + } + + combinedSQL := strings.Join(andConditions, " AND ") + if len(combinedSQL) == 0 { + return "" + } + + return " HAVING " + combinedSQL +} + +func (scope *Scope) joinsSQL() string { + var joinConditions []string + for _, clause := range scope.Search.joinConditions { + if sql := scope.buildWhereCondition(clause); sql != "" { + joinConditions = append(joinConditions, strings.TrimSuffix(strings.TrimPrefix(sql, "("), ")")) + } + } + + return strings.Join(joinConditions, " ") + " " +} + +func (scope *Scope) prepareQuerySQL() { + if scope.Search.raw { + scope.Raw(strings.TrimSuffix(strings.TrimPrefix(scope.CombinedConditionSql(), " WHERE ("), ")")) + } else { + scope.Raw(fmt.Sprintf("SELECT %v FROM %v %v", scope.selectSQL(), scope.QuotedTableName(), scope.CombinedConditionSql())) + } + return +} + +func (scope *Scope) inlineCondition(values ...interface{}) *Scope { + if len(values) > 0 { + scope.Search.Where(values[0], values[1:]...) + } + return scope +} + +func (scope *Scope) callCallbacks(funcs []*func(s *Scope)) *Scope { + for _, f := range funcs { + (*f)(scope) + if scope.skipLeft { + break + } + } + return scope +} + +func convertInterfaceToMap(values interface{}) map[string]interface{} { + var attrs = map[string]interface{}{} + + switch value := values.(type) { + case map[string]interface{}: + return value + case []interface{}: + for _, v := range value { + for key, value := range convertInterfaceToMap(v) { + attrs[key] = value + } + } + case interface{}: + reflectValue := reflect.ValueOf(values) + + switch reflectValue.Kind() { + case reflect.Map: + for _, key := range reflectValue.MapKeys() { + attrs[ToDBName(key.Interface().(string))] = reflectValue.MapIndex(key).Interface() + } + default: + for _, field := range (&Scope{Value: values}).Fields() { + if !field.IsBlank { + attrs[field.DBName] = field.Field.Interface() + } + } + } + } + return attrs +} + +func (scope *Scope) updatedAttrsWithValues(value interface{}) (results map[string]interface{}, hasUpdate bool) { + if scope.IndirectValue().Kind() != reflect.Struct { + return convertInterfaceToMap(value), true + } + + results = map[string]interface{}{} + + for key, value := range convertInterfaceToMap(value) { + if field, ok := scope.FieldByName(key); ok && scope.changeableField(field) { + if _, ok := value.(*expr); ok { + hasUpdate = true + results[field.DBName] = value + } else { + err := field.Set(value) + if field.IsNormal { + hasUpdate = true + if err == ErrUnaddressable { + fmt.Println(err) + results[field.DBName] = value + } else { + results[field.DBName] = field.Field.Interface() + } + } + } + } + } + return +} + +func (scope *Scope) row() *sql.Row { + defer scope.trace(NowFunc()) + scope.callCallbacks(scope.db.parent.callbacks.rowQueries) + scope.prepareQuerySQL() + return scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...) +} + +func (scope *Scope) rows() (*sql.Rows, error) { + defer scope.trace(NowFunc()) + scope.callCallbacks(scope.db.parent.callbacks.rowQueries) + scope.prepareQuerySQL() + return scope.SQLDB().Query(scope.SQL, scope.SQLVars...) +} + +func (scope *Scope) initialize() *Scope { + for _, clause := range scope.Search.whereConditions { + scope.updatedAttrsWithValues(clause["query"]) + } + scope.updatedAttrsWithValues(scope.Search.initAttrs) + scope.updatedAttrsWithValues(scope.Search.assignAttrs) + return scope +} + +func (scope *Scope) pluck(column string, value interface{}) *Scope { + dest := reflect.Indirect(reflect.ValueOf(value)) + scope.Search.Select(column) + if dest.Kind() != reflect.Slice { + scope.Err(fmt.Errorf("results should be a slice, not %s", dest.Kind())) + return scope + } + + rows, err := scope.rows() + if scope.Err(err) == nil { + defer rows.Close() + for rows.Next() { + elem := reflect.New(dest.Type().Elem()).Interface() + scope.Err(rows.Scan(elem)) + dest.Set(reflect.Append(dest, reflect.ValueOf(elem).Elem())) + } + } + return scope +} + +func (scope *Scope) count(value interface{}) *Scope { + scope.Search.Select("count(*)") + scope.Search.countingQuery = true + scope.Err(scope.row().Scan(value)) + return scope +} + +func (scope *Scope) typeName() string { + typ := scope.IndirectValue().Type() + + for typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + return typ.Name() +} + +// trace print sql log +func (scope *Scope) trace(t time.Time) { + if len(scope.SQL) > 0 { + scope.db.slog(scope.SQL, t, scope.SQLVars...) + } +} + +func (scope *Scope) changeableField(field *Field) bool { + if selectAttrs := scope.SelectAttrs(); len(selectAttrs) > 0 { + for _, attr := range selectAttrs { + if field.Name == attr || field.DBName == attr { + return true + } + } + return false + } + + for _, attr := range scope.OmitAttrs() { + if field.Name == attr || field.DBName == attr { + return false + } + } + + return true +} + +func (scope *Scope) shouldSaveAssociations() bool { + if saveAssociations, ok := scope.Get("gorm:save_associations"); ok && !saveAssociations.(bool) { + return false + } + return true && !scope.HasError() +} + +func (scope *Scope) related(value interface{}, foreignKeys ...string) *Scope { + toScope := scope.db.NewScope(value) + + for _, foreignKey := range append(foreignKeys, toScope.typeName()+"Id", scope.typeName()+"Id") { + fromField, _ := scope.FieldByName(foreignKey) + toField, _ := toScope.FieldByName(foreignKey) + + if fromField != nil { + if relationship := fromField.Relationship; relationship != nil { + if relationship.Kind == "many_to_many" { + joinTableHandler := relationship.JoinTableHandler + scope.Err(joinTableHandler.JoinWith(joinTableHandler, toScope.db, scope.Value).Find(value).Error) + } else if relationship.Kind == "belongs_to" { + query := toScope.db + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(foreignKey); ok { + query = query.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.AssociationForeignDBNames[idx])), field.Field.Interface()) + } + } + scope.Err(query.Find(value).Error) + } else if relationship.Kind == "has_many" || relationship.Kind == "has_one" { + query := toScope.db + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(relationship.AssociationForeignDBNames[idx]); ok { + query = query.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) + } + } + + if relationship.PolymorphicType != "" { + query = query.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), scope.TableName()) + } + scope.Err(query.Find(value).Error) + } + } else { + sql := fmt.Sprintf("%v = ?", scope.Quote(toScope.PrimaryKey())) + scope.Err(toScope.db.Where(sql, fromField.Field.Interface()).Find(value).Error) + } + return scope + } else if toField != nil { + sql := fmt.Sprintf("%v = ?", scope.Quote(toField.DBName)) + scope.Err(toScope.db.Where(sql, scope.PrimaryKeyValue()).Find(value).Error) + return scope + } + } + + scope.Err(fmt.Errorf("invalid association %v", foreignKeys)) + return scope +} + +// getTableOptions return the table options string or an empty string if the table options does not exist +func (scope *Scope) getTableOptions() string { + tableOptions, ok := scope.Get("gorm:table_options") + if !ok { + return "" + } + return tableOptions.(string) +} + +func (scope *Scope) createJoinTable(field *StructField) { + if relationship := field.Relationship; relationship != nil && relationship.JoinTableHandler != nil { + joinTableHandler := relationship.JoinTableHandler + joinTable := joinTableHandler.Table(scope.db) + if !scope.Dialect().HasTable(joinTable) { + toScope := &Scope{Value: reflect.New(field.Struct.Type).Interface()} + + var sqlTypes, primaryKeys []string + for idx, fieldName := range relationship.ForeignFieldNames { + if field, ok := scope.FieldByName(fieldName); ok { + foreignKeyStruct := field.clone() + foreignKeyStruct.IsPrimaryKey = false + foreignKeyStruct.TagSettings["IS_JOINTABLE_FOREIGNKEY"] = "true" + sqlTypes = append(sqlTypes, scope.Quote(relationship.ForeignDBNames[idx])+" "+scope.Dialect().DataTypeOf(foreignKeyStruct)) + primaryKeys = append(primaryKeys, scope.Quote(relationship.ForeignDBNames[idx])) + } + } + + for idx, fieldName := range relationship.AssociationForeignFieldNames { + if field, ok := toScope.FieldByName(fieldName); ok { + foreignKeyStruct := field.clone() + foreignKeyStruct.IsPrimaryKey = false + foreignKeyStruct.TagSettings["IS_JOINTABLE_FOREIGNKEY"] = "true" + sqlTypes = append(sqlTypes, scope.Quote(relationship.AssociationForeignDBNames[idx])+" "+scope.Dialect().DataTypeOf(foreignKeyStruct)) + primaryKeys = append(primaryKeys, scope.Quote(relationship.AssociationForeignDBNames[idx])) + } + } + + scope.Err(scope.NewDB().Exec(fmt.Sprintf("CREATE TABLE %v (%v, PRIMARY KEY (%v)) %s", scope.Quote(joinTable), strings.Join(sqlTypes, ","), strings.Join(primaryKeys, ","), scope.getTableOptions())).Error) + } + scope.NewDB().Table(joinTable).AutoMigrate(joinTableHandler) + } +} + +func (scope *Scope) createTable() *Scope { + var tags []string + var primaryKeys []string + var primaryKeyInColumnType = false + for _, field := range scope.GetModelStruct().StructFields { + if field.IsNormal { + sqlTag := scope.Dialect().DataTypeOf(field) + + // Check if the primary key constraint was specified as + // part of the column type. If so, we can only support + // one column as the primary key. + if strings.Contains(strings.ToLower(sqlTag), "primary key") { + primaryKeyInColumnType = true + } + + tags = append(tags, scope.Quote(field.DBName)+" "+sqlTag) + } + + if field.IsPrimaryKey { + primaryKeys = append(primaryKeys, scope.Quote(field.DBName)) + } + scope.createJoinTable(field) + } + + var primaryKeyStr string + if len(primaryKeys) > 0 && !primaryKeyInColumnType { + primaryKeyStr = fmt.Sprintf(", PRIMARY KEY (%v)", strings.Join(primaryKeys, ",")) + } + + scope.Raw(fmt.Sprintf("CREATE TABLE %v (%v %v) %s", scope.QuotedTableName(), strings.Join(tags, ","), primaryKeyStr, scope.getTableOptions())).Exec() + + scope.autoIndex() + return scope +} + +func (scope *Scope) dropTable() *Scope { + scope.Raw(fmt.Sprintf("DROP TABLE %v", scope.QuotedTableName())).Exec() + return scope +} + +func (scope *Scope) modifyColumn(column string, typ string) { + scope.Raw(fmt.Sprintf("ALTER TABLE %v MODIFY %v %v", scope.QuotedTableName(), scope.Quote(column), typ)).Exec() +} + +func (scope *Scope) dropColumn(column string) { + scope.Raw(fmt.Sprintf("ALTER TABLE %v DROP COLUMN %v", scope.QuotedTableName(), scope.Quote(column))).Exec() +} + +func (scope *Scope) addIndex(unique bool, indexName string, column ...string) { + if scope.Dialect().HasIndex(scope.TableName(), indexName) { + return + } + + var columns []string + for _, name := range column { + columns = append(columns, scope.quoteIfPossible(name)) + } + + sqlCreate := "CREATE INDEX" + if unique { + sqlCreate = "CREATE UNIQUE INDEX" + } + + scope.Raw(fmt.Sprintf("%s %v ON %v(%v) %v", sqlCreate, indexName, scope.QuotedTableName(), strings.Join(columns, ", "), scope.whereSQL())).Exec() +} + +func (scope *Scope) addForeignKey(field string, dest string, onDelete string, onUpdate string) { + var keyName = fmt.Sprintf("%s_%s_%s_foreign", scope.TableName(), field, dest) + keyName = regexp.MustCompile("(_*[^a-zA-Z]+_*|_+)").ReplaceAllString(keyName, "_") + + if scope.Dialect().HasForeignKey(scope.TableName(), keyName) { + return + } + var query = `ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s ON DELETE %s ON UPDATE %s;` + scope.Raw(fmt.Sprintf(query, scope.QuotedTableName(), scope.quoteIfPossible(keyName), scope.quoteIfPossible(field), dest, onDelete, onUpdate)).Exec() +} + +func (scope *Scope) removeIndex(indexName string) { + scope.Dialect().RemoveIndex(scope.TableName(), indexName) +} + +func (scope *Scope) autoMigrate() *Scope { + tableName := scope.TableName() + quotedTableName := scope.QuotedTableName() + + if !scope.Dialect().HasTable(tableName) { + scope.createTable() + } else { + for _, field := range scope.GetModelStruct().StructFields { + if !scope.Dialect().HasColumn(tableName, field.DBName) { + if field.IsNormal { + sqlTag := scope.Dialect().DataTypeOf(field) + scope.Raw(fmt.Sprintf("ALTER TABLE %v ADD %v %v;", quotedTableName, scope.Quote(field.DBName), sqlTag)).Exec() + } + } + scope.createJoinTable(field) + } + scope.autoIndex() + } + return scope +} + +func (scope *Scope) autoIndex() *Scope { + var indexes = map[string][]string{} + var uniqueIndexes = map[string][]string{} + + for _, field := range scope.GetStructFields() { + if name, ok := field.TagSettings["INDEX"]; ok { + if name == "INDEX" { + name = fmt.Sprintf("idx_%v_%v", scope.TableName(), field.DBName) + } + indexes[name] = append(indexes[name], field.DBName) + } + + if name, ok := field.TagSettings["UNIQUE_INDEX"]; ok { + if name == "UNIQUE_INDEX" { + name = fmt.Sprintf("uix_%v_%v", scope.TableName(), field.DBName) + } + uniqueIndexes[name] = append(uniqueIndexes[name], field.DBName) + } + } + + for name, columns := range indexes { + scope.NewDB().Model(scope.Value).AddIndex(name, columns...) + } + + for name, columns := range uniqueIndexes { + scope.NewDB().Model(scope.Value).AddUniqueIndex(name, columns...) + } + + return scope +} + +func (scope *Scope) getColumnAsArray(columns []string, values ...interface{}) (results [][]interface{}) { + for _, value := range values { + indirectValue := reflect.ValueOf(value) + for indirectValue.Kind() == reflect.Ptr { + indirectValue = indirectValue.Elem() + } + + switch indirectValue.Kind() { + case reflect.Slice: + for i := 0; i < indirectValue.Len(); i++ { + var result []interface{} + var object = indirect(indirectValue.Index(i)) + for _, column := range columns { + result = append(result, object.FieldByName(column).Interface()) + } + results = append(results, result) + } + case reflect.Struct: + var result []interface{} + for _, column := range columns { + result = append(result, indirectValue.FieldByName(column).Interface()) + } + results = append(results, result) + } + } + return +} + +func (scope *Scope) getColumnAsScope(column string) *Scope { + indirectScopeValue := scope.IndirectValue() + + switch indirectScopeValue.Kind() { + case reflect.Slice: + if fieldStruct, ok := scope.GetModelStruct().ModelType.FieldByName(column); ok { + fieldType := fieldStruct.Type + if fieldType.Kind() == reflect.Slice || fieldType.Kind() == reflect.Ptr { + fieldType = fieldType.Elem() + } + + results := reflect.New(reflect.SliceOf(reflect.PtrTo(fieldType))).Elem() + + for i := 0; i < indirectScopeValue.Len(); i++ { + result := indirect(indirect(indirectScopeValue.Index(i)).FieldByName(column)) + + if result.Kind() == reflect.Slice { + for j := 0; j < result.Len(); j++ { + if elem := result.Index(j); elem.CanAddr() { + results = reflect.Append(results, elem.Addr()) + } + } + } else if result.CanAddr() { + results = reflect.Append(results, result.Addr()) + } + } + return scope.New(results.Interface()) + } + case reflect.Struct: + if field := indirectScopeValue.FieldByName(column); field.CanAddr() { + return scope.New(field.Addr().Interface()) + } + } + return nil +} diff --git a/vendor/github.com/jinzhu/gorm/scope_test.go b/vendor/github.com/jinzhu/gorm/scope_test.go new file mode 100644 index 0000000..4245899 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/scope_test.go @@ -0,0 +1,43 @@ +package gorm_test + +import ( + "github.com/jinzhu/gorm" + "testing" +) + +func NameIn1And2(d *gorm.DB) *gorm.DB { + return d.Where("name in (?)", []string{"ScopeUser1", "ScopeUser2"}) +} + +func NameIn2And3(d *gorm.DB) *gorm.DB { + return d.Where("name in (?)", []string{"ScopeUser2", "ScopeUser3"}) +} + +func NameIn(names []string) func(d *gorm.DB) *gorm.DB { + return func(d *gorm.DB) *gorm.DB { + return d.Where("name in (?)", names) + } +} + +func TestScopes(t *testing.T) { + user1 := User{Name: "ScopeUser1", Age: 1} + user2 := User{Name: "ScopeUser2", Age: 1} + user3 := User{Name: "ScopeUser3", Age: 2} + DB.Save(&user1).Save(&user2).Save(&user3) + + var users1, users2, users3 []User + DB.Scopes(NameIn1And2).Find(&users1) + if len(users1) != 2 { + t.Errorf("Should found two users's name in 1, 2") + } + + DB.Scopes(NameIn1And2, NameIn2And3).Find(&users2) + if len(users2) != 1 { + t.Errorf("Should found one user's name is 2") + } + + DB.Scopes(NameIn([]string{user1.Name, user3.Name})).Find(&users3) + if len(users3) != 2 { + t.Errorf("Should found two users's name in 1, 3") + } +} diff --git a/vendor/github.com/jinzhu/gorm/search.go b/vendor/github.com/jinzhu/gorm/search.go new file mode 100644 index 0000000..078bd42 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/search.go @@ -0,0 +1,149 @@ +package gorm + +import "fmt" + +type search struct { + db *DB + whereConditions []map[string]interface{} + orConditions []map[string]interface{} + notConditions []map[string]interface{} + havingConditions []map[string]interface{} + joinConditions []map[string]interface{} + initAttrs []interface{} + assignAttrs []interface{} + selects map[string]interface{} + omits []string + orders []string + preload []searchPreload + offset int + limit int + group string + tableName string + raw bool + Unscoped bool + countingQuery bool +} + +type searchPreload struct { + schema string + conditions []interface{} +} + +func (s *search) clone() *search { + clone := *s + return &clone +} + +func (s *search) Where(query interface{}, values ...interface{}) *search { + s.whereConditions = append(s.whereConditions, map[string]interface{}{"query": query, "args": values}) + return s +} + +func (s *search) Not(query interface{}, values ...interface{}) *search { + s.notConditions = append(s.notConditions, map[string]interface{}{"query": query, "args": values}) + return s +} + +func (s *search) Or(query interface{}, values ...interface{}) *search { + s.orConditions = append(s.orConditions, map[string]interface{}{"query": query, "args": values}) + return s +} + +func (s *search) Attrs(attrs ...interface{}) *search { + s.initAttrs = append(s.initAttrs, toSearchableMap(attrs...)) + return s +} + +func (s *search) Assign(attrs ...interface{}) *search { + s.assignAttrs = append(s.assignAttrs, toSearchableMap(attrs...)) + return s +} + +func (s *search) Order(value string, reorder ...bool) *search { + if len(reorder) > 0 && reorder[0] { + if value != "" { + s.orders = []string{value} + } else { + s.orders = []string{} + } + } else if value != "" { + s.orders = append(s.orders, value) + } + return s +} + +func (s *search) Select(query interface{}, args ...interface{}) *search { + s.selects = map[string]interface{}{"query": query, "args": args} + return s +} + +func (s *search) Omit(columns ...string) *search { + s.omits = columns + return s +} + +func (s *search) Limit(limit int) *search { + s.limit = limit + return s +} + +func (s *search) Offset(offset int) *search { + s.offset = offset + return s +} + +func (s *search) Group(query string) *search { + s.group = s.getInterfaceAsSQL(query) + return s +} + +func (s *search) Having(query string, values ...interface{}) *search { + s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": query, "args": values}) + return s +} + +func (s *search) Joins(query string, values ...interface{}) *search { + s.joinConditions = append(s.joinConditions, map[string]interface{}{"query": query, "args": values}) + return s +} + +func (s *search) Preload(schema string, values ...interface{}) *search { + var preloads []searchPreload + for _, preload := range s.preload { + if preload.schema != schema { + preloads = append(preloads, preload) + } + } + preloads = append(preloads, searchPreload{schema, values}) + s.preload = preloads + return s +} + +func (s *search) Raw(b bool) *search { + s.raw = b + return s +} + +func (s *search) unscoped() *search { + s.Unscoped = true + return s +} + +func (s *search) Table(name string) *search { + s.tableName = name + return s +} + +func (s *search) getInterfaceAsSQL(value interface{}) (str string) { + switch value.(type) { + case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + str = fmt.Sprintf("%v", value) + default: + s.db.AddError(ErrInvalidSQL) + } + + if str == "-1" { + return "" + } + return +} diff --git a/vendor/github.com/jinzhu/gorm/search_test.go b/vendor/github.com/jinzhu/gorm/search_test.go new file mode 100644 index 0000000..4db7ab6 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/search_test.go @@ -0,0 +1,30 @@ +package gorm + +import ( + "reflect" + "testing" +) + +func TestCloneSearch(t *testing.T) { + s := new(search) + s.Where("name = ?", "jinzhu").Order("name").Attrs("name", "jinzhu").Select("name, age") + + s1 := s.clone() + s1.Where("age = ?", 20).Order("age").Attrs("email", "a@e.org").Select("email") + + if reflect.DeepEqual(s.whereConditions, s1.whereConditions) { + t.Errorf("Where should be copied") + } + + if reflect.DeepEqual(s.orders, s1.orders) { + t.Errorf("Order should be copied") + } + + if reflect.DeepEqual(s.initAttrs, s1.initAttrs) { + t.Errorf("InitAttrs should be copied") + } + + if reflect.DeepEqual(s.Select, s1.Select) { + t.Errorf("selectStr should be copied") + } +} diff --git a/vendor/github.com/jinzhu/gorm/test_all.sh b/vendor/github.com/jinzhu/gorm/test_all.sh new file mode 100755 index 0000000..6c5593b --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/test_all.sh @@ -0,0 +1,5 @@ +dialects=("postgres" "mysql" "sqlite") + +for dialect in "${dialects[@]}" ; do + GORM_DIALECT=${dialect} go test +done diff --git a/vendor/github.com/jinzhu/gorm/update_test.go b/vendor/github.com/jinzhu/gorm/update_test.go new file mode 100644 index 0000000..bdf0109 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/update_test.go @@ -0,0 +1,435 @@ +package gorm_test + +import ( + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +func TestUpdate(t *testing.T) { + product1 := Product{Code: "product1code"} + product2 := Product{Code: "product2code"} + + DB.Save(&product1).Save(&product2).Update("code", "product2newcode") + + if product2.Code != "product2newcode" { + t.Errorf("Record should be updated") + } + + DB.First(&product1, product1.Id) + DB.First(&product2, product2.Id) + updatedAt1 := product1.UpdatedAt + + if DB.First(&Product{}, "code = ?", product1.Code).RecordNotFound() { + t.Errorf("Product1 should not be updated") + } + + if !DB.First(&Product{}, "code = ?", "product2code").RecordNotFound() { + t.Errorf("Product2's code should be updated") + } + + if DB.First(&Product{}, "code = ?", "product2newcode").RecordNotFound() { + t.Errorf("Product2's code should be updated") + } + + DB.Table("products").Where("code in (?)", []string{"product1code"}).Update("code", "product1newcode") + + var product4 Product + DB.First(&product4, product1.Id) + if updatedAt1.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should be updated if something changed") + } + + if !DB.First(&Product{}, "code = 'product1code'").RecordNotFound() { + t.Errorf("Product1's code should be updated") + } + + if DB.First(&Product{}, "code = 'product1newcode'").RecordNotFound() { + t.Errorf("Product should not be changed to 789") + } + + if DB.Model(product2).Update("CreatedAt", time.Now().Add(time.Hour)).Error != nil { + t.Error("No error should raise when update with CamelCase") + } + + if DB.Model(&product2).UpdateColumn("CreatedAt", time.Now().Add(time.Hour)).Error != nil { + t.Error("No error should raise when update_column with CamelCase") + } + + var products []Product + DB.Find(&products) + if count := DB.Model(Product{}).Update("CreatedAt", time.Now().Add(2*time.Hour)).RowsAffected; count != int64(len(products)) { + t.Error("RowsAffected should be correct when do batch update") + } + + DB.First(&product4, product4.Id) + updatedAt4 := product4.UpdatedAt + DB.Model(&product4).Update("price", gorm.Expr("price + ? - ?", 100, 50)) + var product5 Product + DB.First(&product5, product4.Id) + if product5.Price != product4.Price+100-50 { + t.Errorf("Update with expression") + } + if product4.UpdatedAt.Format(time.RFC3339Nano) == updatedAt4.Format(time.RFC3339Nano) { + t.Errorf("Update with expression should update UpdatedAt") + } +} + +func TestUpdateWithNoStdPrimaryKeyAndDefaultValues(t *testing.T) { + animal := Animal{Name: "Ferdinand"} + DB.Save(&animal) + updatedAt1 := animal.UpdatedAt + + DB.Save(&animal).Update("name", "Francis") + + if updatedAt1.Format(time.RFC3339Nano) == animal.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should not be updated if nothing changed") + } + + var animals []Animal + DB.Find(&animals) + if count := DB.Model(Animal{}).Update("CreatedAt", time.Now().Add(2*time.Hour)).RowsAffected; count != int64(len(animals)) { + t.Error("RowsAffected should be correct when do batch update") + } + + animal = Animal{From: "somewhere"} // No name fields, should be filled with the default value (galeone) + DB.Save(&animal).Update("From", "a nice place") // The name field shoul be untouched + DB.First(&animal, animal.Counter) + if animal.Name != "galeone" { + t.Errorf("Name fiels shouldn't be changed if untouched, but got %v", animal.Name) + } + + // When changing a field with a default value, the change must occur + animal.Name = "amazing horse" + DB.Save(&animal) + DB.First(&animal, animal.Counter) + if animal.Name != "amazing horse" { + t.Errorf("Update a filed with a default value should occur. But got %v\n", animal.Name) + } + + // When changing a field with a default value with blank value + animal.Name = "" + DB.Save(&animal) + DB.First(&animal, animal.Counter) + if animal.Name != "" { + t.Errorf("Update a filed to blank with a default value should occur. But got %v\n", animal.Name) + } +} + +func TestUpdates(t *testing.T) { + product1 := Product{Code: "product1code", Price: 10} + product2 := Product{Code: "product2code", Price: 10} + DB.Save(&product1).Save(&product2) + DB.Model(&product1).Updates(map[string]interface{}{"code": "product1newcode", "price": 100}) + if product1.Code != "product1newcode" || product1.Price != 100 { + t.Errorf("Record should be updated also with map") + } + + DB.First(&product1, product1.Id) + DB.First(&product2, product2.Id) + updatedAt2 := product2.UpdatedAt + + if DB.First(&Product{}, "code = ? and price = ?", product2.Code, product2.Price).RecordNotFound() { + t.Errorf("Product2 should not be updated") + } + + if DB.First(&Product{}, "code = ?", "product1newcode").RecordNotFound() { + t.Errorf("Product1 should be updated") + } + + DB.Table("products").Where("code in (?)", []string{"product2code"}).Updates(Product{Code: "product2newcode"}) + if !DB.First(&Product{}, "code = 'product2code'").RecordNotFound() { + t.Errorf("Product2's code should be updated") + } + + var product4 Product + DB.First(&product4, product2.Id) + if updatedAt2.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should be updated if something changed") + } + + if DB.First(&Product{}, "code = ?", "product2newcode").RecordNotFound() { + t.Errorf("product2's code should be updated") + } + + updatedAt4 := product4.UpdatedAt + DB.Model(&product4).Updates(map[string]interface{}{"price": gorm.Expr("price + ?", 100)}) + var product5 Product + DB.First(&product5, product4.Id) + if product5.Price != product4.Price+100 { + t.Errorf("Updates with expression") + } + // product4's UpdatedAt will be reset when updating + if product4.UpdatedAt.Format(time.RFC3339Nano) == updatedAt4.Format(time.RFC3339Nano) { + t.Errorf("Updates with expression should update UpdatedAt") + } +} + +func TestUpdateColumn(t *testing.T) { + product1 := Product{Code: "product1code", Price: 10} + product2 := Product{Code: "product2code", Price: 20} + DB.Save(&product1).Save(&product2).UpdateColumn(map[string]interface{}{"code": "product2newcode", "price": 100}) + if product2.Code != "product2newcode" || product2.Price != 100 { + t.Errorf("product 2 should be updated with update column") + } + + var product3 Product + DB.First(&product3, product1.Id) + if product3.Code != "product1code" || product3.Price != 10 { + t.Errorf("product 1 should not be updated") + } + + DB.First(&product2, product2.Id) + updatedAt2 := product2.UpdatedAt + DB.Model(product2).UpdateColumn("code", "update_column_new") + var product4 Product + DB.First(&product4, product2.Id) + if updatedAt2.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should not be updated with update column") + } + + DB.Model(&product4).UpdateColumn("price", gorm.Expr("price + 100 - 50")) + var product5 Product + DB.First(&product5, product4.Id) + if product5.Price != product4.Price+100-50 { + t.Errorf("UpdateColumn with expression") + } + if product5.UpdatedAt.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("UpdateColumn with expression should not update UpdatedAt") + } +} + +func TestSelectWithUpdate(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update") + DB.Create(user) + + var reloadUser User + DB.First(&reloadUser, user.Id) + reloadUser.Name = "new_name" + reloadUser.Age = 50 + reloadUser.BillingAddress = Address{Address1: "New Billing Address"} + reloadUser.ShippingAddress = Address{Address1: "New ShippingAddress Address"} + reloadUser.CreditCard = CreditCard{Number: "987654321"} + reloadUser.Emails = []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + } + reloadUser.Company = Company{Name: "new company"} + + DB.Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Save(&reloadUser) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name == user.Name || queryUser.Age != user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 == user.BillingAddressID.Int64 || + queryUser.ShippingAddressId != user.ShippingAddressId || + queryUser.CreditCard.ID == user.CreditCard.ID || + len(queryUser.Emails) == len(user.Emails) || queryUser.Company.Id == user.Company.Id { + t.Errorf("Should only update selected relationships") + } +} + +func TestSelectWithUpdateWithMap(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{ + "Name": "new_name", + "Age": 50, + "BillingAddress": Address{Address1: "New Billing Address"}, + "ShippingAddress": Address{Address1: "New ShippingAddress Address"}, + "CreditCard": CreditCard{Number: "987654321"}, + "Emails": []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + }, + "Company": Company{Name: "new company"}, + } + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Update(updateValues) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name == user.Name || queryUser.Age != user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 == user.BillingAddressID.Int64 || + queryUser.ShippingAddressId != user.ShippingAddressId || + queryUser.CreditCard.ID == user.CreditCard.ID || + len(queryUser.Emails) == len(user.Emails) || queryUser.Company.Id == user.Company.Id { + t.Errorf("Should only update selected relationships") + } +} + +func TestOmitWithUpdate(t *testing.T) { + user := getPreparedUser("omit_user", "omit_with_update") + DB.Create(user) + + var reloadUser User + DB.First(&reloadUser, user.Id) + reloadUser.Name = "new_name" + reloadUser.Age = 50 + reloadUser.BillingAddress = Address{Address1: "New Billing Address"} + reloadUser.ShippingAddress = Address{Address1: "New ShippingAddress Address"} + reloadUser.CreditCard = CreditCard{Number: "987654321"} + reloadUser.Emails = []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + } + reloadUser.Company = Company{Name: "new company"} + + DB.Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Save(&reloadUser) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name != user.Name || queryUser.Age == user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 != user.BillingAddressID.Int64 || + queryUser.ShippingAddressId == user.ShippingAddressId || + queryUser.CreditCard.ID != user.CreditCard.ID || + len(queryUser.Emails) != len(user.Emails) || queryUser.Company.Id != user.Company.Id { + t.Errorf("Should only update relationships that not omited") + } +} + +func TestOmitWithUpdateWithMap(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{ + "Name": "new_name", + "Age": 50, + "BillingAddress": Address{Address1: "New Billing Address"}, + "ShippingAddress": Address{Address1: "New ShippingAddress Address"}, + "CreditCard": CreditCard{Number: "987654321"}, + "Emails": []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + }, + "Company": Company{Name: "new company"}, + } + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Update(updateValues) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name != user.Name || queryUser.Age == user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 != user.BillingAddressID.Int64 || + queryUser.ShippingAddressId == user.ShippingAddressId || + queryUser.CreditCard.ID != user.CreditCard.ID || + len(queryUser.Emails) != len(user.Emails) || queryUser.Company.Id != user.Company.Id { + t.Errorf("Should only update relationships not omited") + } +} + +func TestSelectWithUpdateColumn(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{"Name": "new_name", "Age": 50} + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Select("Name").UpdateColumn(updateValues) + + var queryUser User + DB.First(&queryUser, user.Id) + + if queryUser.Name == user.Name || queryUser.Age != user.Age { + t.Errorf("Should only update users with name column") + } +} + +func TestOmitWithUpdateColumn(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{"Name": "new_name", "Age": 50} + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Omit("Name").UpdateColumn(updateValues) + + var queryUser User + DB.First(&queryUser, user.Id) + + if queryUser.Name != user.Name || queryUser.Age == user.Age { + t.Errorf("Should omit name column when update user") + } +} + +func TestUpdateColumnsSkipsAssociations(t *testing.T) { + user := getPreparedUser("update_columns_user", "special_role") + user.Age = 99 + address1 := "first street" + user.BillingAddress = Address{Address1: address1} + DB.Save(user) + + // Update a single field of the user and verify that the changed address is not stored. + newAge := int64(100) + user.BillingAddress.Address1 = "second street" + db := DB.Model(user).UpdateColumns(User{Age: newAge}) + if db.RowsAffected != 1 { + t.Errorf("Expected RowsAffected=1 but instead RowsAffected=%v", DB.RowsAffected) + } + + // Verify that Age now=`newAge`. + freshUser := &User{Id: user.Id} + DB.First(freshUser) + if freshUser.Age != newAge { + t.Errorf("Expected freshly queried user to have Age=%v but instead found Age=%v", newAge, freshUser.Age) + } + + // Verify that user's BillingAddress.Address1 is not changed and is still "first street". + DB.First(&freshUser.BillingAddress, freshUser.BillingAddressID) + if freshUser.BillingAddress.Address1 != address1 { + t.Errorf("Expected user's BillingAddress.Address1=%s to remain unchanged after UpdateColumns invocation, but BillingAddress.Address1=%s", address1, freshUser.BillingAddress.Address1) + } +} + +func TestUpdatesWithBlankValues(t *testing.T) { + product := Product{Code: "product1", Price: 10} + DB.Save(&product) + + DB.Model(&Product{Id: product.Id}).Updates(&Product{Price: 100}) + + var product1 Product + DB.First(&product1, product.Id) + + if product1.Code != "product1" || product1.Price != 100 { + t.Errorf("product's code should not be updated") + } +} + +func TestUpdateDecodeVirtualAttributes(t *testing.T) { + var user = User{ + Name: "jinzhu", + IgnoreMe: 88, + } + + DB.Save(&user) + + DB.Model(&user).Updates(User{Name: "jinzhu2", IgnoreMe: 100}) + + if user.IgnoreMe != 100 { + t.Errorf("should decode virtual attributes to struct, so it could be used in callbacks") + } +} diff --git a/vendor/github.com/jinzhu/gorm/utils.go b/vendor/github.com/jinzhu/gorm/utils.go new file mode 100644 index 0000000..dc69e80 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/utils.go @@ -0,0 +1,264 @@ +package gorm + +import ( + "bytes" + "database/sql/driver" + "fmt" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" +) + +// NowFunc returns current time, this function is exported in order to be able +// to give the flexibility to the developer to customize it according to their +// needs, e.g: +// gorm.NowFunc = func() time.Time { +// return time.Now().UTC() +// } +var NowFunc = func() time.Time { + return time.Now() +} + +// Copied from golint +var commonInitialisms = []string{"API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "LHS", "QPS", "RAM", "RHS", "RPC", "SLA", "SMTP", "SSH", "TLS", "TTL", "UI", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XSRF", "XSS"} +var commonInitialismsReplacer *strings.Replacer + +func init() { + var commonInitialismsForReplacer []string + for _, initialism := range commonInitialisms { + commonInitialismsForReplacer = append(commonInitialismsForReplacer, initialism, strings.Title(strings.ToLower(initialism))) + } + commonInitialismsReplacer = strings.NewReplacer(commonInitialismsForReplacer...) +} + +type safeMap struct { + m map[string]string + l *sync.RWMutex +} + +func (s *safeMap) Set(key string, value string) { + s.l.Lock() + defer s.l.Unlock() + s.m[key] = value +} + +func (s *safeMap) Get(key string) string { + s.l.RLock() + defer s.l.RUnlock() + return s.m[key] +} + +func newSafeMap() *safeMap { + return &safeMap{l: new(sync.RWMutex), m: make(map[string]string)} +} + +var smap = newSafeMap() + +type strCase bool + +const ( + lower strCase = false + upper strCase = true +) + +// ToDBName convert string to db name +func ToDBName(name string) string { + if v := smap.Get(name); v != "" { + return v + } + + if name == "" { + return "" + } + + var ( + value = commonInitialismsReplacer.Replace(name) + buf = bytes.NewBufferString("") + lastCase, currCase, nextCase strCase + ) + + for i, v := range value[:len(value)-1] { + nextCase = strCase(value[i+1] >= 'A' && value[i+1] <= 'Z') + if i > 0 { + if currCase == upper { + if lastCase == upper && nextCase == upper { + buf.WriteRune(v) + } else { + if value[i-1] != '_' && value[i+1] != '_' { + buf.WriteRune('_') + } + buf.WriteRune(v) + } + } else { + buf.WriteRune(v) + } + } else { + currCase = upper + buf.WriteRune(v) + } + lastCase = currCase + currCase = nextCase + } + + buf.WriteByte(value[len(value)-1]) + + s := strings.ToLower(buf.String()) + smap.Set(name, s) + return s +} + +// SQL expression +type expr struct { + expr string + args []interface{} +} + +// Expr generate raw SQL expression, for example: +// DB.Model(&product).Update("price", gorm.Expr("price * ? + ?", 2, 100)) +func Expr(expression string, args ...interface{}) *expr { + return &expr{expr: expression, args: args} +} + +func indirect(reflectValue reflect.Value) reflect.Value { + for reflectValue.Kind() == reflect.Ptr { + reflectValue = reflectValue.Elem() + } + return reflectValue +} + +func toQueryMarks(primaryValues [][]interface{}) string { + var results []string + + for _, primaryValue := range primaryValues { + var marks []string + for range primaryValue { + marks = append(marks, "?") + } + + if len(marks) > 1 { + results = append(results, fmt.Sprintf("(%v)", strings.Join(marks, ","))) + } else { + results = append(results, strings.Join(marks, "")) + } + } + return strings.Join(results, ",") +} + +func toQueryCondition(scope *Scope, columns []string) string { + var newColumns []string + for _, column := range columns { + newColumns = append(newColumns, scope.Quote(column)) + } + + if len(columns) > 1 { + return fmt.Sprintf("(%v)", strings.Join(newColumns, ",")) + } + return strings.Join(newColumns, ",") +} + +func toQueryValues(values [][]interface{}) (results []interface{}) { + for _, value := range values { + for _, v := range value { + results = append(results, v) + } + } + return +} + +func fileWithLineNum() string { + for i := 2; i < 15; i++ { + _, file, line, ok := runtime.Caller(i) + if ok && (!regexp.MustCompile(`jinzhu/gorm/.*.go`).MatchString(file) || regexp.MustCompile(`jinzhu/gorm/.*test.go`).MatchString(file)) { + return fmt.Sprintf("%v:%v", file, line) + } + } + return "" +} + +func isBlank(value reflect.Value) bool { + return reflect.DeepEqual(value.Interface(), reflect.Zero(value.Type()).Interface()) +} + +func toSearchableMap(attrs ...interface{}) (result interface{}) { + if len(attrs) > 1 { + if str, ok := attrs[0].(string); ok { + result = map[string]interface{}{str: attrs[1]} + } + } else if len(attrs) == 1 { + if attr, ok := attrs[0].(map[string]interface{}); ok { + result = attr + } + + if attr, ok := attrs[0].(interface{}); ok { + result = attr + } + } + return +} + +func equalAsString(a interface{}, b interface{}) bool { + return toString(a) == toString(b) +} + +func toString(str interface{}) string { + if values, ok := str.([]interface{}); ok { + var results []string + for _, value := range values { + results = append(results, toString(value)) + } + return strings.Join(results, "_") + } else if bytes, ok := str.([]byte); ok { + return string(bytes) + } else if reflectValue := reflect.Indirect(reflect.ValueOf(str)); reflectValue.IsValid() { + return fmt.Sprintf("%v", reflectValue.Interface()) + } + return "" +} + +func makeSlice(elemType reflect.Type) interface{} { + if elemType.Kind() == reflect.Slice { + elemType = elemType.Elem() + } + sliceType := reflect.SliceOf(elemType) + slice := reflect.New(sliceType) + slice.Elem().Set(reflect.MakeSlice(sliceType, 0, 0)) + return slice.Interface() +} + +func strInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +// getValueFromFields return given fields's value +func getValueFromFields(value reflect.Value, fieldNames []string) (results []interface{}) { + // If value is a nil pointer, Indirect returns a zero Value! + // Therefor we need to check for a zero value, + // as FieldByName could panic + if indirectValue := reflect.Indirect(value); indirectValue.IsValid() { + for _, fieldName := range fieldNames { + if fieldValue := indirectValue.FieldByName(fieldName); fieldValue.IsValid() { + result := fieldValue.Interface() + if r, ok := result.(driver.Valuer); ok { + result, _ = r.Value() + } + results = append(results, result) + } + } + } + return +} + +func addExtraSpaceIfExist(str string) string { + if str != "" { + return " " + str + } + return "" +} diff --git a/vendor/github.com/jinzhu/gorm/utils_test.go b/vendor/github.com/jinzhu/gorm/utils_test.go new file mode 100644 index 0000000..07f5b17 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/utils_test.go @@ -0,0 +1,30 @@ +package gorm_test + +import ( + "testing" + + "github.com/jinzhu/gorm" +) + +func TestToDBNameGenerateFriendlyName(t *testing.T) { + var maps = map[string]string{ + "": "", + "ThisIsATest": "this_is_a_test", + "PFAndESI": "pf_and_esi", + "AbcAndJkl": "abc_and_jkl", + "EmployeeID": "employee_id", + "SKU_ID": "sku_id", + "HTTPAndSMTP": "http_and_smtp", + "HTTPServerHandlerForURLID": "http_server_handler_for_url_id", + "UUID": "uuid", + "HTTPURL": "http_url", + "HTTP_URL": "http_url", + "ThisIsActuallyATestSoWeMayBeAbleToUseThisCodeInGormPackageAlsoIdCanBeUsedAtTheEndAsID": "this_is_actually_a_test_so_we_may_be_able_to_use_this_code_in_gorm_package_also_id_can_be_used_at_the_end_as_id", + } + + for key, value := range maps { + if gorm.ToDBName(key) != value { + t.Errorf("%v ToDBName should equal %v, but got %v", key, value, gorm.ToDBName(key)) + } + } +} diff --git a/vendor/github.com/jinzhu/inflection/LICENSE b/vendor/github.com/jinzhu/inflection/LICENSE new file mode 100644 index 0000000..a1ca9a0 --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 - Jinzhu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/jinzhu/inflection/README.md b/vendor/github.com/jinzhu/inflection/README.md new file mode 100644 index 0000000..4dd0f2d --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/README.md @@ -0,0 +1,55 @@ +Inflection +========= + +Inflection pluralizes and singularizes English nouns + +## Basic Usage + +```go +inflection.Plural("person") => "people" +inflection.Plural("Person") => "People" +inflection.Plural("PERSON") => "PEOPLE" +inflection.Plural("bus") => "buses" +inflection.Plural("BUS") => "BUSES" +inflection.Plural("Bus") => "Buses" + +inflection.Singular("people") => "person" +inflection.Singular("People") => "Person" +inflection.Singular("PEOPLE") => "PERSON" +inflection.Singular("buses") => "bus" +inflection.Singular("BUSES") => "BUS" +inflection.Singular("Buses") => "Bus" + +inflection.Plural("FancyPerson") => "FancyPeople" +inflection.Singular("FancyPeople") => "FancyPerson" +``` + +## Register Rules + +Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) + +If you want to register more rules, follow: + +``` +inflection.AddUncountable("fish") +inflection.AddIrregular("person", "people") +inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" +inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" +``` + +## Supporting the project + +[![http://patreon.com/jinzhu](http://patreon_public_assets.s3.amazonaws.com/sized/becomeAPatronBanner.png)](http://patreon.com/jinzhu) + + +## Author + +**jinzhu** + +* +* +* + +## License + +Released under the [MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/jinzhu/inflection/inflections.go b/vendor/github.com/jinzhu/inflection/inflections.go new file mode 100644 index 0000000..606263b --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/inflections.go @@ -0,0 +1,273 @@ +/* +Package inflection pluralizes and singularizes English nouns. + + inflection.Plural("person") => "people" + inflection.Plural("Person") => "People" + inflection.Plural("PERSON") => "PEOPLE" + + inflection.Singular("people") => "person" + inflection.Singular("People") => "Person" + inflection.Singular("PEOPLE") => "PERSON" + + inflection.Plural("FancyPerson") => "FancydPeople" + inflection.Singular("FancyPeople") => "FancydPerson" + +Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) + +If you want to register more rules, follow: + + inflection.AddUncountable("fish") + inflection.AddIrregular("person", "people") + inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" + inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" +*/ +package inflection + +import ( + "regexp" + "strings" +) + +type inflection struct { + regexp *regexp.Regexp + replace string +} + +// Regular is a regexp find replace inflection +type Regular struct { + find string + replace string +} + +// Irregular is a hard replace inflection, +// containing both singular and plural forms +type Irregular struct { + singular string + plural string +} + +// RegularSlice is a slice of Regular inflections +type RegularSlice []Regular + +// IrregularSlice is a slice of Irregular inflections +type IrregularSlice []Irregular + +var pluralInflections = RegularSlice{ + {"([a-z])$", "${1}s"}, + {"s$", "s"}, + {"^(ax|test)is$", "${1}es"}, + {"(octop|vir)us$", "${1}i"}, + {"(octop|vir)i$", "${1}i"}, + {"(alias|status)$", "${1}es"}, + {"(bu)s$", "${1}ses"}, + {"(buffal|tomat)o$", "${1}oes"}, + {"([ti])um$", "${1}a"}, + {"([ti])a$", "${1}a"}, + {"sis$", "ses"}, + {"(?:([^f])fe|([lr])f)$", "${1}${2}ves"}, + {"(hive)$", "${1}s"}, + {"([^aeiouy]|qu)y$", "${1}ies"}, + {"(x|ch|ss|sh)$", "${1}es"}, + {"(matr|vert|ind)(?:ix|ex)$", "${1}ices"}, + {"^(m|l)ouse$", "${1}ice"}, + {"^(m|l)ice$", "${1}ice"}, + {"^(ox)$", "${1}en"}, + {"^(oxen)$", "${1}"}, + {"(quiz)$", "${1}zes"}, +} + +var singularInflections = RegularSlice{ + {"s$", ""}, + {"(ss)$", "${1}"}, + {"(n)ews$", "${1}ews"}, + {"([ti])a$", "${1}um"}, + {"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"}, + {"(^analy)(sis|ses)$", "${1}sis"}, + {"([^f])ves$", "${1}fe"}, + {"(hive)s$", "${1}"}, + {"(tive)s$", "${1}"}, + {"([lr])ves$", "${1}f"}, + {"([^aeiouy]|qu)ies$", "${1}y"}, + {"(s)eries$", "${1}eries"}, + {"(m)ovies$", "${1}ovie"}, + {"(c)ookies$", "${1}ookie"}, + {"(x|ch|ss|sh)es$", "${1}"}, + {"^(m|l)ice$", "${1}ouse"}, + {"(bus)(es)?$", "${1}"}, + {"(o)es$", "${1}"}, + {"(shoe)s$", "${1}"}, + {"(cris|test)(is|es)$", "${1}is"}, + {"^(a)x[ie]s$", "${1}xis"}, + {"(octop|vir)(us|i)$", "${1}us"}, + {"(alias|status)(es)?$", "${1}"}, + {"^(ox)en", "${1}"}, + {"(vert|ind)ices$", "${1}ex"}, + {"(matr)ices$", "${1}ix"}, + {"(quiz)zes$", "${1}"}, + {"(database)s$", "${1}"}, +} + +var irregularInflections = IrregularSlice{ + {"person", "people"}, + {"man", "men"}, + {"child", "children"}, + {"sex", "sexes"}, + {"move", "moves"}, + {"mombie", "mombies"}, +} + +var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"} + +var compiledPluralMaps []inflection +var compiledSingularMaps []inflection + +func compile() { + compiledPluralMaps = []inflection{} + compiledSingularMaps = []inflection{} + for _, uncountable := range uncountableInflections { + inf := inflection{ + regexp: regexp.MustCompile("^(?i)(" + uncountable + ")$"), + replace: "${1}", + } + compiledPluralMaps = append(compiledPluralMaps, inf) + compiledSingularMaps = append(compiledSingularMaps, inf) + } + + for _, value := range irregularInflections { + infs := []inflection{ + inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)}, + inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)}, + inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural}, + } + compiledPluralMaps = append(compiledPluralMaps, infs...) + } + + for _, value := range irregularInflections { + infs := []inflection{ + inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)}, + inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)}, + inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular}, + } + compiledSingularMaps = append(compiledSingularMaps, infs...) + } + + for i := len(pluralInflections) - 1; i >= 0; i-- { + value := pluralInflections[i] + infs := []inflection{ + inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)}, + inflection{regexp: regexp.MustCompile(value.find), replace: value.replace}, + inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace}, + } + compiledPluralMaps = append(compiledPluralMaps, infs...) + } + + for i := len(singularInflections) - 1; i >= 0; i-- { + value := singularInflections[i] + infs := []inflection{ + inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)}, + inflection{regexp: regexp.MustCompile(value.find), replace: value.replace}, + inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace}, + } + compiledSingularMaps = append(compiledSingularMaps, infs...) + } +} + +func init() { + compile() +} + +// AddPlural adds a plural inflection +func AddPlural(find, replace string) { + pluralInflections = append(pluralInflections, Regular{find, replace}) + compile() +} + +// AddSingular adds a singular inflection +func AddSingular(find, replace string) { + singularInflections = append(singularInflections, Regular{find, replace}) + compile() +} + +// AddIrregular adds an irregular inflection +func AddIrregular(singular, plural string) { + irregularInflections = append(irregularInflections, Irregular{singular, plural}) + compile() +} + +// AddUncountable adds an uncountable inflection +func AddUncountable(values ...string) { + uncountableInflections = append(uncountableInflections, values...) + compile() +} + +// GetPlural retrieves the plural inflection values +func GetPlural() RegularSlice { + plurals := make(RegularSlice, len(pluralInflections)) + copy(plurals, pluralInflections) + return plurals +} + +// GetSingular retrieves the singular inflection values +func GetSingular() RegularSlice { + singulars := make(RegularSlice, len(singularInflections)) + copy(singulars, singularInflections) + return singulars +} + +// GetIrregular retrieves the irregular inflection values +func GetIrregular() IrregularSlice { + irregular := make(IrregularSlice, len(irregularInflections)) + copy(irregular, irregularInflections) + return irregular +} + +// GetUncountable retrieves the uncountable inflection values +func GetUncountable() []string { + uncountables := make([]string, len(uncountableInflections)) + copy(uncountables, uncountableInflections) + return uncountables +} + +// SetPlural sets the plural inflections slice +func SetPlural(inflections RegularSlice) { + pluralInflections = inflections + compile() +} + +// SetSingular sets the singular inflections slice +func SetSingular(inflections RegularSlice) { + singularInflections = inflections + compile() +} + +// SetIrregular sets the irregular inflections slice +func SetIrregular(inflections IrregularSlice) { + irregularInflections = inflections + compile() +} + +// SetUncountable sets the uncountable inflections slice +func SetUncountable(inflections []string) { + uncountableInflections = inflections + compile() +} + +// Plural converts a word to its plural form +func Plural(str string) string { + for _, inflection := range compiledPluralMaps { + if inflection.regexp.MatchString(str) { + return inflection.regexp.ReplaceAllString(str, inflection.replace) + } + } + return str +} + +// Singular converts a word to its singular form +func Singular(str string) string { + for _, inflection := range compiledSingularMaps { + if inflection.regexp.MatchString(str) { + return inflection.regexp.ReplaceAllString(str, inflection.replace) + } + } + return str +} diff --git a/vendor/github.com/jinzhu/inflection/inflections_test.go b/vendor/github.com/jinzhu/inflection/inflections_test.go new file mode 100644 index 0000000..689e1df --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/inflections_test.go @@ -0,0 +1,213 @@ +package inflection + +import ( + "strings" + "testing" +) + +var inflections = map[string]string{ + "star": "stars", + "STAR": "STARS", + "Star": "Stars", + "bus": "buses", + "fish": "fish", + "mouse": "mice", + "query": "queries", + "ability": "abilities", + "agency": "agencies", + "movie": "movies", + "archive": "archives", + "index": "indices", + "wife": "wives", + "safe": "saves", + "half": "halves", + "move": "moves", + "salesperson": "salespeople", + "person": "people", + "spokesman": "spokesmen", + "man": "men", + "woman": "women", + "basis": "bases", + "diagnosis": "diagnoses", + "diagnosis_a": "diagnosis_as", + "datum": "data", + "medium": "media", + "stadium": "stadia", + "analysis": "analyses", + "node_child": "node_children", + "child": "children", + "experience": "experiences", + "day": "days", + "comment": "comments", + "foobar": "foobars", + "newsletter": "newsletters", + "old_news": "old_news", + "news": "news", + "series": "series", + "species": "species", + "quiz": "quizzes", + "perspective": "perspectives", + "ox": "oxen", + "photo": "photos", + "buffalo": "buffaloes", + "tomato": "tomatoes", + "dwarf": "dwarves", + "elf": "elves", + "information": "information", + "equipment": "equipment", + "criterion": "criteria", +} + +// storage is used to restore the state of the global variables +// on each test execution, to ensure no global state pollution +type storage struct { + singulars RegularSlice + plurals RegularSlice + irregulars IrregularSlice + uncountables []string +} + +var backup = storage{} + +func init() { + AddIrregular("criterion", "criteria") + copy(backup.singulars, singularInflections) + copy(backup.plurals, pluralInflections) + copy(backup.irregulars, irregularInflections) + copy(backup.uncountables, uncountableInflections) +} + +func restore() { + copy(singularInflections, backup.singulars) + copy(pluralInflections, backup.plurals) + copy(irregularInflections, backup.irregulars) + copy(uncountableInflections, backup.uncountables) +} + +func TestPlural(t *testing.T) { + for key, value := range inflections { + if v := Plural(strings.ToUpper(key)); v != strings.ToUpper(value) { + t.Errorf("%v's plural should be %v, but got %v", strings.ToUpper(key), strings.ToUpper(value), v) + } + + if v := Plural(strings.Title(key)); v != strings.Title(value) { + t.Errorf("%v's plural should be %v, but got %v", strings.Title(key), strings.Title(value), v) + } + + if v := Plural(key); v != value { + t.Errorf("%v's plural should be %v, but got %v", key, value, v) + } + } +} + +func TestSingular(t *testing.T) { + for key, value := range inflections { + if v := Singular(strings.ToUpper(value)); v != strings.ToUpper(key) { + t.Errorf("%v's singular should be %v, but got %v", strings.ToUpper(value), strings.ToUpper(key), v) + } + + if v := Singular(strings.Title(value)); v != strings.Title(key) { + t.Errorf("%v's singular should be %v, but got %v", strings.Title(value), strings.Title(key), v) + } + + if v := Singular(value); v != key { + t.Errorf("%v's singular should be %v, but got %v", value, key, v) + } + } +} + +func TestAddPlural(t *testing.T) { + defer restore() + ln := len(pluralInflections) + AddPlural("", "") + if ln+1 != len(pluralInflections) { + t.Errorf("Expected len %d, got %d", ln+1, len(pluralInflections)) + } +} + +func TestAddSingular(t *testing.T) { + defer restore() + ln := len(singularInflections) + AddSingular("", "") + if ln+1 != len(singularInflections) { + t.Errorf("Expected len %d, got %d", ln+1, len(singularInflections)) + } +} + +func TestAddIrregular(t *testing.T) { + defer restore() + ln := len(irregularInflections) + AddIrregular("", "") + if ln+1 != len(irregularInflections) { + t.Errorf("Expected len %d, got %d", ln+1, len(irregularInflections)) + } +} + +func TestAddUncountable(t *testing.T) { + defer restore() + ln := len(uncountableInflections) + AddUncountable("", "") + if ln+2 != len(uncountableInflections) { + t.Errorf("Expected len %d, got %d", ln+2, len(uncountableInflections)) + } +} + +func TestGetPlural(t *testing.T) { + plurals := GetPlural() + if len(plurals) != len(pluralInflections) { + t.Errorf("Expected len %d, got %d", len(plurals), len(pluralInflections)) + } +} + +func TestGetSingular(t *testing.T) { + singular := GetSingular() + if len(singular) != len(singularInflections) { + t.Errorf("Expected len %d, got %d", len(singular), len(singularInflections)) + } +} + +func TestGetIrregular(t *testing.T) { + irregular := GetIrregular() + if len(irregular) != len(irregularInflections) { + t.Errorf("Expected len %d, got %d", len(irregular), len(irregularInflections)) + } +} + +func TestGetUncountable(t *testing.T) { + uncountables := GetUncountable() + if len(uncountables) != len(uncountableInflections) { + t.Errorf("Expected len %d, got %d", len(uncountables), len(uncountableInflections)) + } +} + +func TestSetPlural(t *testing.T) { + defer restore() + SetPlural(RegularSlice{{}, {}}) + if len(pluralInflections) != 2 { + t.Errorf("Expected len 2, got %d", len(pluralInflections)) + } +} + +func TestSetSingular(t *testing.T) { + defer restore() + SetSingular(RegularSlice{{}, {}}) + if len(singularInflections) != 2 { + t.Errorf("Expected len 2, got %d", len(singularInflections)) + } +} + +func TestSetIrregular(t *testing.T) { + defer restore() + SetIrregular(IrregularSlice{{}, {}}) + if len(irregularInflections) != 2 { + t.Errorf("Expected len 2, got %d", len(irregularInflections)) + } +} + +func TestSetUncountable(t *testing.T) { + defer restore() + SetUncountable([]string{"", ""}) + if len(uncountableInflections) != 2 { + t.Errorf("Expected len 2, got %d", len(uncountableInflections)) + } +} diff --git a/vendor/github.com/julienschmidt/httprouter/.travis.yml b/vendor/github.com/julienschmidt/httprouter/.travis.yml new file mode 100644 index 0000000..a4d6cc5 --- /dev/null +++ b/vendor/github.com/julienschmidt/httprouter/.travis.yml @@ -0,0 +1,8 @@ +sudo: false +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - tip diff --git a/vendor/github.com/julienschmidt/httprouter/LICENSE b/vendor/github.com/julienschmidt/httprouter/LICENSE new file mode 100644 index 0000000..b829abc --- /dev/null +++ b/vendor/github.com/julienschmidt/httprouter/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2013 Julien Schmidt. All rights reserved. + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * The names of the contributors may not be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL JULIEN SCHMIDT BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/julienschmidt/httprouter/README.md b/vendor/github.com/julienschmidt/httprouter/README.md new file mode 100644 index 0000000..9875c70 --- /dev/null +++ b/vendor/github.com/julienschmidt/httprouter/README.md @@ -0,0 +1,323 @@ +# HttpRouter [![Build Status](https://travis-ci.org/julienschmidt/httprouter.png?branch=master)](https://travis-ci.org/julienschmidt/httprouter) [![Coverage](http://gocover.io/_badge/github.com/julienschmidt/httprouter?0)](http://gocover.io/github.com/julienschmidt/httprouter) [![GoDoc](http://godoc.org/github.com/julienschmidt/httprouter?status.png)](http://godoc.org/github.com/julienschmidt/httprouter) + +HttpRouter is a lightweight high performance HTTP request router +(also called *multiplexer* or just *mux* for short) for [Go](http://golang.org/). + +In contrast to the [default mux](http://golang.org/pkg/net/http/#ServeMux) of Go's net/http package, this router supports +variables in the routing pattern and matches against the request method. +It also scales better. + +The router is optimized for high performance and a small memory footprint. +It scales well even with very long paths and a large number of routes. +A compressing dynamic trie (radix tree) structure is used for efficient matching. + +## Features +**Only explicit matches:** With other routers, like [http.ServeMux](http://golang.org/pkg/net/http/#ServeMux), +a requested URL path could match multiple patterns. Therefore they have some +awkward pattern priority rules, like *longest match* or *first registered, +first matched*. By design of this router, a request can only match exactly one +or no route. As a result, there are also no unintended matches, which makes it +great for SEO and improves the user experience. + +**Stop caring about trailing slashes:** Choose the URL style you like, the +router automatically redirects the client if a trailing slash is missing or if +there is one extra. Of course it only does so, if the new path has a handler. +If you don't like it, you can [turn off this behavior](http://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash). + +**Path auto-correction:** Besides detecting the missing or additional trailing +slash at no extra cost, the router can also fix wrong cases and remove +superfluous path elements (like `../` or `//`). +Is [CAPTAIN CAPS LOCK](http://www.urbandictionary.com/define.php?term=Captain+Caps+Lock) one of your users? +HttpRouter can help him by making a case-insensitive look-up and redirecting him +to the correct URL. + +**Parameters in your routing pattern:** Stop parsing the requested URL path, +just give the path segment a name and the router delivers the dynamic value to +you. Because of the design of the router, path parameters are very cheap. + +**Zero Garbage:** The matching and dispatching process generates zero bytes of +garbage. In fact, the only heap allocations that are made, is by building the +slice of the key-value pairs for path parameters. If the request path contains +no parameters, not a single heap allocation is necessary. + +**Best Performance:** [Benchmarks speak for themselves](https://github.com/julienschmidt/go-http-routing-benchmark). +See below for technical details of the implementation. + +**No more server crashes:** You can set a [Panic handler](http://godoc.org/github.com/julienschmidt/httprouter#Router.PanicHandler) to deal with panics +occurring during handling a HTTP request. The router then recovers and lets the +PanicHandler log what happened and deliver a nice error page. + +Of course you can also set **custom [NotFound](http://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) and [MethodNotAllowed](http://godoc.org/github.com/julienschmidt/httprouter#Router.MethodNotAllowed) handlers** and [**serve static files**](http://godoc.org/github.com/julienschmidt/httprouter#Router.ServeFiles). + +## Usage +This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/julienschmidt/httprouter) for details. + +Let's start with a trivial example: +```go +package main + +import ( + "fmt" + "github.com/julienschmidt/httprouter" + "net/http" + "log" +) + +func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + fmt.Fprint(w, "Welcome!\n") +} + +func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name")) +} + +func main() { + router := httprouter.New() + router.GET("/", Index) + router.GET("/hello/:name", Hello) + + log.Fatal(http.ListenAndServe(":8080", router)) +} +``` + +### Named parameters +As you can see, `:name` is a *named parameter*. +The values are accessible via `httprouter.Params`, which is just a slice of `httprouter.Param`s. +You can get the value of a parameter either by its index in the slice, or by using the `ByName(name)` method: +`:name` can be retrived by `ByName("name")`. + +Named parameters only match a single path segment: +``` +Pattern: /user/:user + + /user/gordon match + /user/you match + /user/gordon/profile no match + /user/ no match +``` + +**Note:** Since this router has only explicit matches, you can not register static routes and parameters for the same path segment. For example you can not register the patterns `/user/new` and `/user/:user` for the same request method at the same time. The routing of different request methods is independent from each other. + +### Catch-All parameters +The second type are *catch-all* parameters and have the form `*name`. +Like the name suggests, they match everything. +Therefore they must always be at the **end** of the pattern: +``` +Pattern: /src/*filepath + + /src/ match + /src/somefile.go match + /src/subdir/somefile.go match +``` + +## How does it work? +The router relies on a tree structure which makes heavy use of *common prefixes*, +it is basically a *compact* [*prefix tree*](http://en.wikipedia.org/wiki/Trie) +(or just [*Radix tree*](http://en.wikipedia.org/wiki/Radix_tree)). +Nodes with a common prefix also share a common parent. Here is a short example +what the routing tree for the `GET` request method could look like: + +``` +Priority Path Handle +9 \ *<1> +3 ├s nil +2 |├earch\ *<2> +1 |└upport\ *<3> +2 ├blog\ *<4> +1 | └:post nil +1 | └\ *<5> +2 ├about-us\ *<6> +1 | └team\ *<7> +1 └contact\ *<8> +``` +Every `*` represents the memory address of a handler function (a pointer). +If you follow a path trough the tree from the root to the leaf, you get the +complete route path, e.g `\blog\:post\`, where `:post` is just a placeholder +([*parameter*](#named-parameters)) for an actual post name. Unlike hash-maps, a +tree structure also allows us to use dynamic parts like the `:post` parameter, +since we actually match against the routing patterns instead of just comparing +hashes. [As benchmarks show](https://github.com/julienschmidt/go-http-routing-benchmark), +this works very well and efficient. + +Since URL paths have a hierarchical structure and make use only of a limited set +of characters (byte values), it is very likely that there are a lot of common +prefixes. This allows us to easily reduce the routing into ever smaller problems. +Moreover the router manages a separate tree for every request method. +For one thing it is more space efficient than holding a method->handle map in +every single node, for another thing is also allows us to greatly reduce the +routing problem before even starting the look-up in the prefix-tree. + +For even better scalability, the child nodes on each tree level are ordered by +priority, where the priority is just the number of handles registered in sub +nodes (children, grandchildren, and so on..). +This helps in two ways: + +1. Nodes which are part of the most routing paths are evaluated first. This +helps to make as much routes as possible to be reachable as fast as possible. +2. It is some sort of cost compensation. The longest reachable path (highest +cost) can always be evaluated first. The following scheme visualizes the tree +structure. Nodes are evaluated from top to bottom and from left to right. + +``` +├------------ +├--------- +├----- +├---- +├-- +├-- +└- +``` + + +## Why doesn't this work with http.Handler? +**It does!** The router itself implements the http.Handler interface. +Moreover the router provides convenient [adapters for http.Handler](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handler)s and [http.HandlerFunc](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandlerFunc)s +which allows them to be used as a [httprouter.Handle](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) when registering a route. +The only disadvantage is, that no parameter values can be retrieved when a +http.Handler or http.HandlerFunc is used, since there is no efficient way to +pass the values with the existing function parameters. +Therefore [httprouter.Handle](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) has a third function parameter. + +Just try it out for yourself, the usage of HttpRouter is very straightforward. The package is compact and minimalistic, but also probably one of the easiest routers to set up. + + +## Where can I find Middleware *X*? +This package just provides a very efficient request router with a few extra +features. The router is just a [http.Handler](http://golang.org/pkg/net/http/#Handler), +you can chain any http.Handler compatible middleware before the router, +for example the [Gorilla handlers](http://www.gorillatoolkit.org/pkg/handlers). +Or you could [just write your own](http://justinas.org/writing-http-middleware-in-go/), +it's very easy! + +Alternatively, you could try [a web framework based on HttpRouter](#web-frameworks-based-on-httprouter). + +### Multi-domain / Sub-domains +Here is a quick example: Does your server serve multiple domains / hosts? +You want to use sub-domains? +Define a router per host! +```go +// We need an object that implements the http.Handler interface. +// Therefore we need a type for which we implement the ServeHTTP method. +// We just use a map here, in which we map host names (with port) to http.Handlers +type HostSwitch map[string]http.Handler + +// Implement the ServerHTTP method on our new type +func (hs HostSwitch) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Check if a http.Handler is registered for the given host. + // If yes, use it to handle the request. + if handler := hs[r.Host]; handler != nil { + handler.ServeHTTP(w, r) + } else { + // Handle host names for wich no handler is registered + http.Error(w, "Forbidden", 403) // Or Redirect? + } +} + +func main() { + // Initialize a router as usual + router := httprouter.New() + router.GET("/", Index) + router.GET("/hello/:name", Hello) + + // Make a new HostSwitch and insert the router (our http handler) + // for example.com and port 12345 + hs := make(HostSwitch) + hs["example.com:12345"] = router + + // Use the HostSwitch to listen and serve on port 12345 + log.Fatal(http.ListenAndServe(":12345", hs)) +} +``` + +### Basic Authentication +Another quick example: Basic Authentification (RFC 2617) for handles: + +```go +package main + +import ( + "bytes" + "encoding/base64" + "fmt" + "github.com/julienschmidt/httprouter" + "net/http" + "log" + "strings" +) + +func BasicAuth(h httprouter.Handle, user, pass []byte) httprouter.Handle { + return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + const basicAuthPrefix string = "Basic " + + // Get the Basic Authentication credentials + auth := r.Header.Get("Authorization") + if strings.HasPrefix(auth, basicAuthPrefix) { + // Check credentials + payload, err := base64.StdEncoding.DecodeString(auth[len(basicAuthPrefix):]) + if err == nil { + pair := bytes.SplitN(payload, []byte(":"), 2) + if len(pair) == 2 && + bytes.Equal(pair[0], user) && + bytes.Equal(pair[1], pass) { + + // Delegate request to the given handle + h(w, r, ps) + return + } + } + } + + // Request Basic Authentication otherwise + w.Header().Set("WWW-Authenticate", "Basic realm=Restricted") + http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) + } +} + +func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + fmt.Fprint(w, "Not protected!\n") +} + +func Protected(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + fmt.Fprint(w, "Protected!\n") +} + +func main() { + user := []byte("gordon") + pass := []byte("secret!") + + router := httprouter.New() + router.GET("/", Index) + router.GET("/protected/", BasicAuth(Protected, user, pass)) + + log.Fatal(http.ListenAndServe(":8080", router)) +} +``` + +## Chaining with the NotFound handler + +**NOTE: It might be required to set [Router.HandleMethodNotAllowed](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandleMethodNotAllowed) to `false` to avoid problems.** + +You can use another [http.HandlerFunc](http://golang.org/pkg/net/http/#HandlerFunc), for example another router, to handle requests which could not be matched by this router by using the [Router.NotFound](http://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) handler. This allows chaining. + +### Static files +The `NotFound` handler can for example be used to serve static files from the root path `/` (like an index.html file along with other assets): +```go +// Serve static files from the ./public directory +router.NotFound = http.FileServer(http.Dir("public")).ServeHTTP +``` + +But this approach sidesteps the strict core rules of this router to avoid routing problems. A cleaner approach is to use a distinct sub-path for serving files, like `/static/*filepath` or `/files/*filepath`. + +## Web Frameworks based on HttpRouter +If the HttpRouter is a bit too minimalistic for you, you might try one of the following more high-level 3rd-party web frameworks building upon the HttpRouter package: +* [Ace](https://github.com/plimble/ace): Blazing fast Go Web Framework +* [api2go](https://github.com/univedo/api2go): A JSON API Implementation for Go +* [Gin](https://github.com/gin-gonic/gin): Features a martini-like API with much better performance +* [Goat](https://github.com/bahlo/goat): A minimalistic REST API server in Go +* [Hikaru](https://github.com/najeira/hikaru): Supports standalone and Google AppEngine +* [Hitch](https://github.com/nbio/hitch): Hitch ties httprouter, [httpcontext](https://github.com/nbio/httpcontext), and middleware up in a bow +* [kami](https://github.com/guregu/kami): A tiny web framework using x/net/context +* [Medeina](https://github.com/imdario/medeina): Inspired by Ruby's Roda and Cuba +* [Neko](https://github.com/rocwong/neko): A lightweight web application framework for Golang +* [Roxanna](https://github.com/iamthemuffinman/Roxanna): An amalgamation of httprouter, better logging, and hot reload +* [siesta](https://github.com/VividCortex/siesta): Composable HTTP handlers with contexts diff --git a/vendor/github.com/julienschmidt/httprouter/path.go b/vendor/github.com/julienschmidt/httprouter/path.go new file mode 100644 index 0000000..486134d --- /dev/null +++ b/vendor/github.com/julienschmidt/httprouter/path.go @@ -0,0 +1,123 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Based on the path package, Copyright 2009 The Go Authors. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +// CleanPath is the URL version of path.Clean, it returns a canonical URL path +// for p, eliminating . and .. elements. +// +// The following rules are applied iteratively until no further processing can +// be done: +// 1. Replace multiple slashes with a single slash. +// 2. Eliminate each . path name element (the current directory). +// 3. Eliminate each inner .. path name element (the parent directory) +// along with the non-.. element that precedes it. +// 4. Eliminate .. elements that begin a rooted path: +// that is, replace "/.." by "/" at the beginning of a path. +// +// If the result of this process is an empty string, "/" is returned +func CleanPath(p string) string { + // Turn empty string into "/" + if p == "" { + return "/" + } + + n := len(p) + var buf []byte + + // Invariants: + // reading from path; r is index of next byte to process. + // writing to buf; w is index of next byte to write. + + // path must start with '/' + r := 1 + w := 1 + + if p[0] != '/' { + r = 0 + buf = make([]byte, n+1) + buf[0] = '/' + } + + trailing := n > 2 && p[n-1] == '/' + + // A bit more clunky without a 'lazybuf' like the path package, but the loop + // gets completely inlined (bufApp). So in contrast to the path package this + // loop has no expensive function calls (except 1x make) + + for r < n { + switch { + case p[r] == '/': + // empty path element, trailing slash is added after the end + r++ + + case p[r] == '.' && r+1 == n: + trailing = true + r++ + + case p[r] == '.' && p[r+1] == '/': + // . element + r++ + + case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): + // .. element: remove to last / + r += 2 + + if w > 1 { + // can backtrack + w-- + + if buf == nil { + for w > 1 && p[w] != '/' { + w-- + } + } else { + for w > 1 && buf[w] != '/' { + w-- + } + } + } + + default: + // real path element. + // add slash if needed + if w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + // copy element + for r < n && p[r] != '/' { + bufApp(&buf, p, w, p[r]) + w++ + r++ + } + } + } + + // re-append trailing slash + if trailing && w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + if buf == nil { + return p[:w] + } + return string(buf[:w]) +} + +// internal helper to lazily create a buffer if necessary +func bufApp(buf *[]byte, s string, w int, c byte) { + if *buf == nil { + if s[w] == c { + return + } + + *buf = make([]byte, len(s)) + copy(*buf, s[:w]) + } + (*buf)[w] = c +} diff --git a/vendor/github.com/julienschmidt/httprouter/path_test.go b/vendor/github.com/julienschmidt/httprouter/path_test.go new file mode 100644 index 0000000..c4ceda5 --- /dev/null +++ b/vendor/github.com/julienschmidt/httprouter/path_test.go @@ -0,0 +1,92 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Based on the path package, Copyright 2009 The Go Authors. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +import ( + "runtime" + "testing" +) + +var cleanTests = []struct { + path, result string +}{ + // Already clean + {"/", "/"}, + {"/abc", "/abc"}, + {"/a/b/c", "/a/b/c"}, + {"/abc/", "/abc/"}, + {"/a/b/c/", "/a/b/c/"}, + + // missing root + {"", "/"}, + {"abc", "/abc"}, + {"abc/def", "/abc/def"}, + {"a/b/c", "/a/b/c"}, + + // Remove doubled slash + {"//", "/"}, + {"/abc//", "/abc/"}, + {"/abc/def//", "/abc/def/"}, + {"/a/b/c//", "/a/b/c/"}, + {"/abc//def//ghi", "/abc/def/ghi"}, + {"//abc", "/abc"}, + {"///abc", "/abc"}, + {"//abc//", "/abc/"}, + + // Remove . elements + {".", "/"}, + {"./", "/"}, + {"/abc/./def", "/abc/def"}, + {"/./abc/def", "/abc/def"}, + {"/abc/.", "/abc/"}, + + // Remove .. elements + {"..", "/"}, + {"../", "/"}, + {"../../", "/"}, + {"../..", "/"}, + {"../../abc", "/abc"}, + {"/abc/def/ghi/../jkl", "/abc/def/jkl"}, + {"/abc/def/../ghi/../jkl", "/abc/jkl"}, + {"/abc/def/..", "/abc"}, + {"/abc/def/../..", "/"}, + {"/abc/def/../../..", "/"}, + {"/abc/def/../../..", "/"}, + {"/abc/def/../../../ghi/jkl/../../../mno", "/mno"}, + + // Combinations + {"abc/./../def", "/def"}, + {"abc//./../def", "/def"}, + {"abc/../../././../def", "/def"}, +} + +func TestPathClean(t *testing.T) { + for _, test := range cleanTests { + if s := CleanPath(test.path); s != test.result { + t.Errorf("CleanPath(%q) = %q, want %q", test.path, s, test.result) + } + if s := CleanPath(test.result); s != test.result { + t.Errorf("CleanPath(%q) = %q, want %q", test.result, s, test.result) + } + } +} + +func TestPathCleanMallocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping malloc count in short mode") + } + if runtime.GOMAXPROCS(0) > 1 { + t.Log("skipping AllocsPerRun checks; GOMAXPROCS>1") + return + } + + for _, test := range cleanTests { + allocs := testing.AllocsPerRun(100, func() { CleanPath(test.result) }) + if allocs > 0 { + t.Errorf("CleanPath(%q): %v allocs, want zero", test.result, allocs) + } + } +} diff --git a/vendor/github.com/julienschmidt/httprouter/router.go b/vendor/github.com/julienschmidt/httprouter/router.go new file mode 100644 index 0000000..155b871 --- /dev/null +++ b/vendor/github.com/julienschmidt/httprouter/router.go @@ -0,0 +1,363 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +// Package httprouter is a trie based high performance HTTP request router. +// +// A trivial example is: +// +// package main +// +// import ( +// "fmt" +// "github.com/julienschmidt/httprouter" +// "net/http" +// "log" +// ) +// +// func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { +// fmt.Fprint(w, "Welcome!\n") +// } +// +// func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { +// fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name")) +// } +// +// func main() { +// router := httprouter.New() +// router.GET("/", Index) +// router.GET("/hello/:name", Hello) +// +// log.Fatal(http.ListenAndServe(":8080", router)) +// } +// +// The router matches incoming requests by the request method and the path. +// If a handle is registered for this path and method, the router delegates the +// request to that function. +// For the methods GET, POST, PUT, PATCH and DELETE shortcut functions exist to +// register handles, for all other methods router.Handle can be used. +// +// The registered path, against which the router matches incoming requests, can +// contain two types of parameters: +// Syntax Type +// :name named parameter +// *name catch-all parameter +// +// Named parameters are dynamic path segments. They match anything until the +// next '/' or the path end: +// Path: /blog/:category/:post +// +// Requests: +// /blog/go/request-routers match: category="go", post="request-routers" +// /blog/go/request-routers/ no match, but the router would redirect +// /blog/go/ no match +// /blog/go/request-routers/comments no match +// +// Catch-all parameters match anything until the path end, including the +// directory index (the '/' before the catch-all). Since they match anything +// until the end, catch-all paramerters must always be the final path element. +// Path: /files/*filepath +// +// Requests: +// /files/ match: filepath="/" +// /files/LICENSE match: filepath="/LICENSE" +// /files/templates/article.html match: filepath="/templates/article.html" +// /files no match, but the router would redirect +// +// The value of parameters is saved as a slice of the Param struct, consisting +// each of a key and a value. The slice is passed to the Handle func as a third +// parameter. +// There are two ways to retrieve the value of a parameter: +// // by the name of the parameter +// user := ps.ByName("user") // defined by :user or *user +// +// // by the index of the parameter. This way you can also get the name (key) +// thirdKey := ps[2].Key // the name of the 3rd parameter +// thirdValue := ps[2].Value // the value of the 3rd parameter +package httprouter + +import ( + "net/http" +) + +// Handle is a function that can be registered to a route to handle HTTP +// requests. Like http.HandlerFunc, but has a third parameter for the values of +// wildcards (variables). +type Handle func(http.ResponseWriter, *http.Request, Params) + +// Param is a single URL parameter, consisting of a key and a value. +type Param struct { + Key string + Value string +} + +// Params is a Param-slice, as returned by the router. +// The slice is ordered, the first URL parameter is also the first slice value. +// It is therefore safe to read values by the index. +type Params []Param + +// ByName returns the value of the first Param which key matches the given name. +// If no matching Param is found, an empty string is returned. +func (ps Params) ByName(name string) string { + for i := range ps { + if ps[i].Key == name { + return ps[i].Value + } + } + return "" +} + +// Router is a http.Handler which can be used to dispatch requests to different +// handler functions via configurable routes +type Router struct { + trees map[string]*node + + // Enables automatic redirection if the current route can't be matched but a + // handler for the path with (without) the trailing slash exists. + // For example if /foo/ is requested but a route only exists for /foo, the + // client is redirected to /foo with http status code 301 for GET requests + // and 307 for all other request methods. + RedirectTrailingSlash bool + + // If enabled, the router tries to fix the current request path, if no + // handle is registered for it. + // First superfluous path elements like ../ or // are removed. + // Afterwards the router does a case-insensitive lookup of the cleaned path. + // If a handle can be found for this route, the router makes a redirection + // to the corrected path with status code 301 for GET requests and 307 for + // all other request methods. + // For example /FOO and /..//Foo could be redirected to /foo. + // RedirectTrailingSlash is independent of this option. + RedirectFixedPath bool + + // If enabled, the router checks if another method is allowed for the + // current route, if the current request can not be routed. + // If this is the case, the request is answered with 'Method Not Allowed' + // and HTTP status code 405. + // If no other Method is allowed, the request is delegated to the NotFound + // handler. + HandleMethodNotAllowed bool + + // Configurable http.HandlerFunc which is called when no matching route is + // found. If it is not set, http.NotFound is used. + NotFound http.HandlerFunc + + // Configurable http.HandlerFunc which is called when a request + // cannot be routed and HandleMethodNotAllowed is true. + // If it is not set, http.Error with http.StatusMethodNotAllowed is used. + MethodNotAllowed http.HandlerFunc + + // Function to handle panics recovered from http handlers. + // It should be used to generate a error page and return the http error code + // 500 (Internal Server Error). + // The handler can be used to keep your server from crashing because of + // unrecovered panics. + PanicHandler func(http.ResponseWriter, *http.Request, interface{}) +} + +// Make sure the Router conforms with the http.Handler interface +var _ http.Handler = New() + +// New returns a new initialized Router. +// Path auto-correction, including trailing slashes, is enabled by default. +func New() *Router { + return &Router{ + RedirectTrailingSlash: true, + RedirectFixedPath: true, + HandleMethodNotAllowed: true, + } +} + +// GET is a shortcut for router.Handle("GET", path, handle) +func (r *Router) GET(path string, handle Handle) { + r.Handle("GET", path, handle) +} + +// HEAD is a shortcut for router.Handle("HEAD", path, handle) +func (r *Router) HEAD(path string, handle Handle) { + r.Handle("HEAD", path, handle) +} + +// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle) +func (r *Router) OPTIONS(path string, handle Handle) { + r.Handle("OPTIONS", path, handle) +} + +// POST is a shortcut for router.Handle("POST", path, handle) +func (r *Router) POST(path string, handle Handle) { + r.Handle("POST", path, handle) +} + +// PUT is a shortcut for router.Handle("PUT", path, handle) +func (r *Router) PUT(path string, handle Handle) { + r.Handle("PUT", path, handle) +} + +// PATCH is a shortcut for router.Handle("PATCH", path, handle) +func (r *Router) PATCH(path string, handle Handle) { + r.Handle("PATCH", path, handle) +} + +// DELETE is a shortcut for router.Handle("DELETE", path, handle) +func (r *Router) DELETE(path string, handle Handle) { + r.Handle("DELETE", path, handle) +} + +// Handle registers a new request handle with the given path and method. +// +// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut +// functions can be used. +// +// This function is intended for bulk loading and to allow the usage of less +// frequently used, non-standardized or custom methods (e.g. for internal +// communication with a proxy). +func (r *Router) Handle(method, path string, handle Handle) { + if path[0] != '/' { + panic("path must begin with '/' in path '" + path + "'") + } + + if r.trees == nil { + r.trees = make(map[string]*node) + } + + root := r.trees[method] + if root == nil { + root = new(node) + r.trees[method] = root + } + + root.addRoute(path, handle) +} + +// Handler is an adapter which allows the usage of an http.Handler as a +// request handle. +func (r *Router) Handler(method, path string, handler http.Handler) { + r.Handle(method, path, + func(w http.ResponseWriter, req *http.Request, _ Params) { + handler.ServeHTTP(w, req) + }, + ) +} + +// HandlerFunc is an adapter which allows the usage of an http.HandlerFunc as a +// request handle. +func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) { + r.Handler(method, path, handler) +} + +// ServeFiles serves files from the given file system root. +// The path must end with "/*filepath", files are then served from the local +// path /defined/root/dir/*filepath. +// For example if root is "/etc" and *filepath is "passwd", the local file +// "/etc/passwd" would be served. +// Internally a http.FileServer is used, therefore http.NotFound is used instead +// of the Router's NotFound handler. +// To use the operating system's file system implementation, +// use http.Dir: +// router.ServeFiles("/src/*filepath", http.Dir("/var/www")) +func (r *Router) ServeFiles(path string, root http.FileSystem) { + if len(path) < 10 || path[len(path)-10:] != "/*filepath" { + panic("path must end with /*filepath in path '" + path + "'") + } + + fileServer := http.FileServer(root) + + r.GET(path, func(w http.ResponseWriter, req *http.Request, ps Params) { + req.URL.Path = ps.ByName("filepath") + fileServer.ServeHTTP(w, req) + }) +} + +func (r *Router) recv(w http.ResponseWriter, req *http.Request) { + if rcv := recover(); rcv != nil { + r.PanicHandler(w, req, rcv) + } +} + +// Lookup allows the manual lookup of a method + path combo. +// This is e.g. useful to build a framework around this router. +// If the path was found, it returns the handle function and the path parameter +// values. Otherwise the third return value indicates whether a redirection to +// the same path with an extra / without the trailing slash should be performed. +func (r *Router) Lookup(method, path string) (Handle, Params, bool) { + if root := r.trees[method]; root != nil { + return root.getValue(path) + } + return nil, nil, false +} + +// ServeHTTP makes the router implement the http.Handler interface. +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if r.PanicHandler != nil { + defer r.recv(w, req) + } + + if root := r.trees[req.Method]; root != nil { + path := req.URL.Path + + if handle, ps, tsr := root.getValue(path); handle != nil { + handle(w, req, ps) + return + } else if req.Method != "CONNECT" && path != "/" { + code := 301 // Permanent redirect, request with GET method + if req.Method != "GET" { + // Temporary redirect, request with same method + // As of Go 1.3, Go does not support status code 308. + code = 307 + } + + if tsr && r.RedirectTrailingSlash { + if len(path) > 1 && path[len(path)-1] == '/' { + req.URL.Path = path[:len(path)-1] + } else { + req.URL.Path = path + "/" + } + http.Redirect(w, req, req.URL.String(), code) + return + } + + // Try to fix the request path + if r.RedirectFixedPath { + fixedPath, found := root.findCaseInsensitivePath( + CleanPath(path), + r.RedirectTrailingSlash, + ) + if found { + req.URL.Path = string(fixedPath) + http.Redirect(w, req, req.URL.String(), code) + return + } + } + } + } + + // Handle 405 + if r.HandleMethodNotAllowed { + for method := range r.trees { + // Skip the requested method - we already tried this one + if method == req.Method { + continue + } + + handle, _, _ := r.trees[method].getValue(req.URL.Path) + if handle != nil { + if r.MethodNotAllowed != nil { + r.MethodNotAllowed(w, req) + } else { + http.Error(w, + http.StatusText(http.StatusMethodNotAllowed), + http.StatusMethodNotAllowed, + ) + } + return + } + } + } + + // Handle 404 + if r.NotFound != nil { + r.NotFound(w, req) + } else { + http.NotFound(w, req) + } +} diff --git a/vendor/github.com/julienschmidt/httprouter/router_test.go b/vendor/github.com/julienschmidt/httprouter/router_test.go new file mode 100644 index 0000000..9dc6296 --- /dev/null +++ b/vendor/github.com/julienschmidt/httprouter/router_test.go @@ -0,0 +1,378 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +import ( + "errors" + "fmt" + "net/http" + "net/http/httptest" + "reflect" + "testing" +) + +type mockResponseWriter struct{} + +func (m *mockResponseWriter) Header() (h http.Header) { + return http.Header{} +} + +func (m *mockResponseWriter) Write(p []byte) (n int, err error) { + return len(p), nil +} + +func (m *mockResponseWriter) WriteString(s string) (n int, err error) { + return len(s), nil +} + +func (m *mockResponseWriter) WriteHeader(int) {} + +func TestParams(t *testing.T) { + ps := Params{ + Param{"param1", "value1"}, + Param{"param2", "value2"}, + Param{"param3", "value3"}, + } + for i := range ps { + if val := ps.ByName(ps[i].Key); val != ps[i].Value { + t.Errorf("Wrong value for %s: Got %s; Want %s", ps[i].Key, val, ps[i].Value) + } + } + if val := ps.ByName("noKey"); val != "" { + t.Errorf("Expected empty string for not found key; got: %s", val) + } +} + +func TestRouter(t *testing.T) { + router := New() + + routed := false + router.Handle("GET", "/user/:name", func(w http.ResponseWriter, r *http.Request, ps Params) { + routed = true + want := Params{Param{"name", "gopher"}} + if !reflect.DeepEqual(ps, want) { + t.Fatalf("wrong wildcard values: want %v, got %v", want, ps) + } + }) + + w := new(mockResponseWriter) + + req, _ := http.NewRequest("GET", "/user/gopher", nil) + router.ServeHTTP(w, req) + + if !routed { + t.Fatal("routing failed") + } +} + +type handlerStruct struct { + handeled *bool +} + +func (h handlerStruct) ServeHTTP(w http.ResponseWriter, r *http.Request) { + *h.handeled = true +} + +func TestRouterAPI(t *testing.T) { + var get, head, options, post, put, patch, delete, handler, handlerFunc bool + + httpHandler := handlerStruct{&handler} + + router := New() + router.GET("/GET", func(w http.ResponseWriter, r *http.Request, _ Params) { + get = true + }) + router.HEAD("/GET", func(w http.ResponseWriter, r *http.Request, _ Params) { + head = true + }) + router.OPTIONS("/GET", func(w http.ResponseWriter, r *http.Request, _ Params) { + options = true + }) + router.POST("/POST", func(w http.ResponseWriter, r *http.Request, _ Params) { + post = true + }) + router.PUT("/PUT", func(w http.ResponseWriter, r *http.Request, _ Params) { + put = true + }) + router.PATCH("/PATCH", func(w http.ResponseWriter, r *http.Request, _ Params) { + patch = true + }) + router.DELETE("/DELETE", func(w http.ResponseWriter, r *http.Request, _ Params) { + delete = true + }) + router.Handler("GET", "/Handler", httpHandler) + router.HandlerFunc("GET", "/HandlerFunc", func(w http.ResponseWriter, r *http.Request) { + handlerFunc = true + }) + + w := new(mockResponseWriter) + + r, _ := http.NewRequest("GET", "/GET", nil) + router.ServeHTTP(w, r) + if !get { + t.Error("routing GET failed") + } + + r, _ = http.NewRequest("HEAD", "/GET", nil) + router.ServeHTTP(w, r) + if !head { + t.Error("routing HEAD failed") + } + + r, _ = http.NewRequest("OPTIONS", "/GET", nil) + router.ServeHTTP(w, r) + if !options { + t.Error("routing OPTIONS failed") + } + + r, _ = http.NewRequest("POST", "/POST", nil) + router.ServeHTTP(w, r) + if !post { + t.Error("routing POST failed") + } + + r, _ = http.NewRequest("PUT", "/PUT", nil) + router.ServeHTTP(w, r) + if !put { + t.Error("routing PUT failed") + } + + r, _ = http.NewRequest("PATCH", "/PATCH", nil) + router.ServeHTTP(w, r) + if !patch { + t.Error("routing PATCH failed") + } + + r, _ = http.NewRequest("DELETE", "/DELETE", nil) + router.ServeHTTP(w, r) + if !delete { + t.Error("routing DELETE failed") + } + + r, _ = http.NewRequest("GET", "/Handler", nil) + router.ServeHTTP(w, r) + if !handler { + t.Error("routing Handler failed") + } + + r, _ = http.NewRequest("GET", "/HandlerFunc", nil) + router.ServeHTTP(w, r) + if !handlerFunc { + t.Error("routing HandlerFunc failed") + } +} + +func TestRouterRoot(t *testing.T) { + router := New() + recv := catchPanic(func() { + router.GET("noSlashRoot", nil) + }) + if recv == nil { + t.Fatal("registering path not beginning with '/' did not panic") + } +} + +func TestRouterNotAllowed(t *testing.T) { + handlerFunc := func(_ http.ResponseWriter, _ *http.Request, _ Params) {} + + router := New() + router.POST("/path", handlerFunc) + + // Test not allowed + r, _ := http.NewRequest("GET", "/path", nil) + w := httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == http.StatusMethodNotAllowed) { + t.Errorf("NotAllowed handling failed: Code=%d, Header=%v", w.Code, w.Header()) + } + + w = httptest.NewRecorder() + responseText := "custom method" + router.MethodNotAllowed = func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusTeapot) + w.Write([]byte(responseText)) + } + router.ServeHTTP(w, r) + if got := w.Body.String(); !(got == responseText) { + t.Errorf("unexpected response got %q want %q", got, responseText) + } + if w.Code != http.StatusTeapot { + t.Errorf("unexpected response code %d want %d", w.Code, http.StatusTeapot) + } +} + +func TestRouterNotFound(t *testing.T) { + handlerFunc := func(_ http.ResponseWriter, _ *http.Request, _ Params) {} + + router := New() + router.GET("/path", handlerFunc) + router.GET("/dir/", handlerFunc) + router.GET("/", handlerFunc) + + testRoutes := []struct { + route string + code int + header string + }{ + {"/path/", 301, "map[Location:[/path]]"}, // TSR -/ + {"/dir", 301, "map[Location:[/dir/]]"}, // TSR +/ + {"", 301, "map[Location:[/]]"}, // TSR +/ + {"/PATH", 301, "map[Location:[/path]]"}, // Fixed Case + {"/DIR/", 301, "map[Location:[/dir/]]"}, // Fixed Case + {"/PATH/", 301, "map[Location:[/path]]"}, // Fixed Case -/ + {"/DIR", 301, "map[Location:[/dir/]]"}, // Fixed Case +/ + {"/../path", 301, "map[Location:[/path]]"}, // CleanPath + {"/nope", 404, ""}, // NotFound + } + for _, tr := range testRoutes { + r, _ := http.NewRequest("GET", tr.route, nil) + w := httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == tr.code && (w.Code == 404 || fmt.Sprint(w.Header()) == tr.header)) { + t.Errorf("NotFound handling route %s failed: Code=%d, Header=%v", tr.route, w.Code, w.Header()) + } + } + + // Test custom not found handler + var notFound bool + router.NotFound = func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(404) + notFound = true + } + r, _ := http.NewRequest("GET", "/nope", nil) + w := httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == 404 && notFound == true) { + t.Errorf("Custom NotFound handler failed: Code=%d, Header=%v", w.Code, w.Header()) + } + + // Test other method than GET (want 307 instead of 301) + router.PATCH("/path", handlerFunc) + r, _ = http.NewRequest("PATCH", "/path/", nil) + w = httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == 307 && fmt.Sprint(w.Header()) == "map[Location:[/path]]") { + t.Errorf("Custom NotFound handler failed: Code=%d, Header=%v", w.Code, w.Header()) + } + + // Test special case where no node for the prefix "/" exists + router = New() + router.GET("/a", handlerFunc) + r, _ = http.NewRequest("GET", "/", nil) + w = httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == 404) { + t.Errorf("NotFound handling route / failed: Code=%d", w.Code) + } +} + +func TestRouterPanicHandler(t *testing.T) { + router := New() + panicHandled := false + + router.PanicHandler = func(rw http.ResponseWriter, r *http.Request, p interface{}) { + panicHandled = true + } + + router.Handle("PUT", "/user/:name", func(_ http.ResponseWriter, _ *http.Request, _ Params) { + panic("oops!") + }) + + w := new(mockResponseWriter) + req, _ := http.NewRequest("PUT", "/user/gopher", nil) + + defer func() { + if rcv := recover(); rcv != nil { + t.Fatal("handling panic failed") + } + }() + + router.ServeHTTP(w, req) + + if !panicHandled { + t.Fatal("simulating failed") + } +} + +func TestRouterLookup(t *testing.T) { + routed := false + wantHandle := func(_ http.ResponseWriter, _ *http.Request, _ Params) { + routed = true + } + wantParams := Params{Param{"name", "gopher"}} + + router := New() + + // try empty router first + handle, _, tsr := router.Lookup("GET", "/nope") + if handle != nil { + t.Fatalf("Got handle for unregistered pattern: %v", handle) + } + if tsr { + t.Error("Got wrong TSR recommendation!") + } + + // insert route and try again + router.GET("/user/:name", wantHandle) + + handle, params, tsr := router.Lookup("GET", "/user/gopher") + if handle == nil { + t.Fatal("Got no handle!") + } else { + handle(nil, nil, nil) + if !routed { + t.Fatal("Routing failed!") + } + } + + if !reflect.DeepEqual(params, wantParams) { + t.Fatalf("Wrong parameter values: want %v, got %v", wantParams, params) + } + + handle, _, tsr = router.Lookup("GET", "/user/gopher/") + if handle != nil { + t.Fatalf("Got handle for unregistered pattern: %v", handle) + } + if !tsr { + t.Error("Got no TSR recommendation!") + } + + handle, _, tsr = router.Lookup("GET", "/nope") + if handle != nil { + t.Fatalf("Got handle for unregistered pattern: %v", handle) + } + if tsr { + t.Error("Got wrong TSR recommendation!") + } +} + +type mockFileSystem struct { + opened bool +} + +func (mfs *mockFileSystem) Open(name string) (http.File, error) { + mfs.opened = true + return nil, errors.New("this is just a mock") +} + +func TestRouterServeFiles(t *testing.T) { + router := New() + mfs := &mockFileSystem{} + + recv := catchPanic(func() { + router.ServeFiles("/noFilepath", mfs) + }) + if recv == nil { + t.Fatal("registering path not ending with '*filepath' did not panic") + } + + router.ServeFiles("/*filepath", mfs) + w := new(mockResponseWriter) + r, _ := http.NewRequest("GET", "/favicon.ico", nil) + router.ServeHTTP(w, r) + if !mfs.opened { + t.Error("serving file failed") + } +} diff --git a/vendor/github.com/julienschmidt/httprouter/tree.go b/vendor/github.com/julienschmidt/httprouter/tree.go new file mode 100644 index 0000000..a15bc2c --- /dev/null +++ b/vendor/github.com/julienschmidt/httprouter/tree.go @@ -0,0 +1,555 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +import ( + "strings" + "unicode" +) + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +func countParams(path string) uint8 { + var n uint + for i := 0; i < len(path); i++ { + if path[i] != ':' && path[i] != '*' { + continue + } + n++ + } + if n >= 255 { + return 255 + } + return uint8(n) +} + +type nodeType uint8 + +const ( + static nodeType = 0 + param nodeType = 1 + catchAll nodeType = 2 +) + +type node struct { + path string + wildChild bool + nType nodeType + maxParams uint8 + indices string + children []*node + handle Handle + priority uint32 +} + +// increments priority of the given child and reorders if necessary +func (n *node) incrementChildPrio(pos int) int { + n.children[pos].priority++ + prio := n.children[pos].priority + + // adjust position (move to front) + newPos := pos + for newPos > 0 && n.children[newPos-1].priority < prio { + // swap node positions + tmpN := n.children[newPos-1] + n.children[newPos-1] = n.children[newPos] + n.children[newPos] = tmpN + + newPos-- + } + + // build new index char string + if newPos != pos { + n.indices = n.indices[:newPos] + // unchanged prefix, might be empty + n.indices[pos:pos+1] + // the index char we move + n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos' + } + + return newPos +} + +// addRoute adds a node with the given handle to the path. +// Not concurrency-safe! +func (n *node) addRoute(path string, handle Handle) { + fullPath := path + n.priority++ + numParams := countParams(path) + + // non-empty tree + if len(n.path) > 0 || len(n.children) > 0 { + walk: + for { + // Update maxParams of the current node + if numParams > n.maxParams { + n.maxParams = numParams + } + + // Find the longest common prefix. + // This also implies that the common prefix contains no ':' or '*' + // since the existing key can't contain those chars. + i := 0 + max := min(len(path), len(n.path)) + for i < max && path[i] == n.path[i] { + i++ + } + + // Split edge + if i < len(n.path) { + child := node{ + path: n.path[i:], + wildChild: n.wildChild, + indices: n.indices, + children: n.children, + handle: n.handle, + priority: n.priority - 1, + } + + // Update maxParams (max of all children) + for i := range child.children { + if child.children[i].maxParams > child.maxParams { + child.maxParams = child.children[i].maxParams + } + } + + n.children = []*node{&child} + // []byte for proper unicode char conversion, see #65 + n.indices = string([]byte{n.path[i]}) + n.path = path[:i] + n.handle = nil + n.wildChild = false + } + + // Make new node a child of this node + if i < len(path) { + path = path[i:] + + if n.wildChild { + n = n.children[0] + n.priority++ + + // Update maxParams of the child node + if numParams > n.maxParams { + n.maxParams = numParams + } + numParams-- + + // Check if the wildcard matches + if len(path) >= len(n.path) && n.path == path[:len(n.path)] { + // check for longer wildcard, e.g. :name and :names + if len(n.path) >= len(path) || path[len(n.path)] == '/' { + continue walk + } + } + + panic("path segment '" + path + + "' conflicts with existing wildcard '" + n.path + + "' in path '" + fullPath + "'") + } + + c := path[0] + + // slash after param + if n.nType == param && c == '/' && len(n.children) == 1 { + n = n.children[0] + n.priority++ + continue walk + } + + // Check if a child with the next path byte exists + for i := 0; i < len(n.indices); i++ { + if c == n.indices[i] { + i = n.incrementChildPrio(i) + n = n.children[i] + continue walk + } + } + + // Otherwise insert it + if c != ':' && c != '*' { + // []byte for proper unicode char conversion, see #65 + n.indices += string([]byte{c}) + child := &node{ + maxParams: numParams, + } + n.children = append(n.children, child) + n.incrementChildPrio(len(n.indices) - 1) + n = child + } + n.insertChild(numParams, path, fullPath, handle) + return + + } else if i == len(path) { // Make node a (in-path) leaf + if n.handle != nil { + panic("a handle is already registered for path ''" + fullPath + "'") + } + n.handle = handle + } + return + } + } else { // Empty tree + n.insertChild(numParams, path, fullPath, handle) + } +} + +func (n *node) insertChild(numParams uint8, path, fullPath string, handle Handle) { + var offset int // already handled bytes of the path + + // find prefix until first wildcard (beginning with ':'' or '*'') + for i, max := 0, len(path); numParams > 0; i++ { + c := path[i] + if c != ':' && c != '*' { + continue + } + + // find wildcard end (either '/' or path end) + end := i + 1 + for end < max && path[end] != '/' { + switch path[end] { + // the wildcard name must not contain ':' and '*' + case ':', '*': + panic("only one wildcard per path segment is allowed, has: '" + + path[i:] + "' in path '" + fullPath + "'") + default: + end++ + } + } + + // check if this Node existing children which would be + // unreachable if we insert the wildcard here + if len(n.children) > 0 { + panic("wildcard route '" + path[i:end] + + "' conflicts with existing children in path '" + fullPath + "'") + } + + // check if the wildcard has a name + if end-i < 2 { + panic("wildcards must be named with a non-empty name in path '" + fullPath + "'") + } + + if c == ':' { // param + // split path at the beginning of the wildcard + if i > 0 { + n.path = path[offset:i] + offset = i + } + + child := &node{ + nType: param, + maxParams: numParams, + } + n.children = []*node{child} + n.wildChild = true + n = child + n.priority++ + numParams-- + + // if the path doesn't end with the wildcard, then there + // will be another non-wildcard subpath starting with '/' + if end < max { + n.path = path[offset:end] + offset = end + + child := &node{ + maxParams: numParams, + priority: 1, + } + n.children = []*node{child} + n = child + } + + } else { // catchAll + if end != max || numParams > 1 { + panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") + } + + if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { + panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") + } + + // currently fixed width 1 for '/' + i-- + if path[i] != '/' { + panic("no / before catch-all in path '" + fullPath + "'") + } + + n.path = path[offset:i] + + // first node: catchAll node with empty path + child := &node{ + wildChild: true, + nType: catchAll, + maxParams: 1, + } + n.children = []*node{child} + n.indices = string(path[i]) + n = child + n.priority++ + + // second node: node holding the variable + child = &node{ + path: path[i:], + nType: catchAll, + maxParams: 1, + handle: handle, + priority: 1, + } + n.children = []*node{child} + + return + } + } + + // insert remaining path part and handle to the leaf + n.path = path[offset:] + n.handle = handle +} + +// Returns the handle registered with the given path (key). The values of +// wildcards are saved to a map. +// If no handle can be found, a TSR (trailing slash redirect) recommendation is +// made if a handle exists with an extra (without the) trailing slash for the +// given path. +func (n *node) getValue(path string) (handle Handle, p Params, tsr bool) { +walk: // Outer loop for walking the tree + for { + if len(path) > len(n.path) { + if path[:len(n.path)] == n.path { + path = path[len(n.path):] + // If this node does not have a wildcard (param or catchAll) + // child, we can just look up the next child node and continue + // to walk down the tree + if !n.wildChild { + c := path[0] + for i := 0; i < len(n.indices); i++ { + if c == n.indices[i] { + n = n.children[i] + continue walk + } + } + + // Nothing found. + // We can recommend to redirect to the same URL without a + // trailing slash if a leaf exists for that path. + tsr = (path == "/" && n.handle != nil) + return + + } + + // handle wildcard child + n = n.children[0] + switch n.nType { + case param: + // find param end (either '/' or path end) + end := 0 + for end < len(path) && path[end] != '/' { + end++ + } + + // save param value + if p == nil { + // lazy allocation + p = make(Params, 0, n.maxParams) + } + i := len(p) + p = p[:i+1] // expand slice within preallocated capacity + p[i].Key = n.path[1:] + p[i].Value = path[:end] + + // we need to go deeper! + if end < len(path) { + if len(n.children) > 0 { + path = path[end:] + n = n.children[0] + continue walk + } + + // ... but we can't + tsr = (len(path) == end+1) + return + } + + if handle = n.handle; handle != nil { + return + } else if len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists for TSR recommendation + n = n.children[0] + tsr = (n.path == "/" && n.handle != nil) + } + + return + + case catchAll: + // save param value + if p == nil { + // lazy allocation + p = make(Params, 0, n.maxParams) + } + i := len(p) + p = p[:i+1] // expand slice within preallocated capacity + p[i].Key = n.path[2:] + p[i].Value = path + + handle = n.handle + return + + default: + panic("invalid node type") + } + } + } else if path == n.path { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if handle = n.handle; handle != nil { + return + } + + // No handle found. Check if a handle for this path + a + // trailing slash exists for trailing slash recommendation + for i := 0; i < len(n.indices); i++ { + if n.indices[i] == '/' { + n = n.children[i] + tsr = (len(n.path) == 1 && n.handle != nil) || + (n.nType == catchAll && n.children[0].handle != nil) + return + } + } + + return + } + + // Nothing found. We can recommend to redirect to the same URL with an + // extra trailing slash if a leaf exists for that path + tsr = (path == "/") || + (len(n.path) == len(path)+1 && n.path[len(path)] == '/' && + path == n.path[:len(n.path)-1] && n.handle != nil) + return + } +} + +// Makes a case-insensitive lookup of the given path and tries to find a handler. +// It can optionally also fix trailing slashes. +// It returns the case-corrected path and a bool indicating whether the lookup +// was successful. +func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) { + ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory + + // Outer loop for walking the tree + for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) { + path = path[len(n.path):] + ciPath = append(ciPath, n.path...) + + if len(path) > 0 { + // If this node does not have a wildcard (param or catchAll) child, + // we can just look up the next child node and continue to walk down + // the tree + if !n.wildChild { + r := unicode.ToLower(rune(path[0])) + for i, index := range n.indices { + // must use recursive approach since both index and + // ToLower(index) could exist. We must check both. + if r == unicode.ToLower(index) { + out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash) + if found { + return append(ciPath, out...), true + } + } + } + + // Nothing found. We can recommend to redirect to the same URL + // without a trailing slash if a leaf exists for that path + found = (fixTrailingSlash && path == "/" && n.handle != nil) + return + } + + n = n.children[0] + switch n.nType { + case param: + // find param end (either '/' or path end) + k := 0 + for k < len(path) && path[k] != '/' { + k++ + } + + // add param value to case insensitive path + ciPath = append(ciPath, path[:k]...) + + // we need to go deeper! + if k < len(path) { + if len(n.children) > 0 { + path = path[k:] + n = n.children[0] + continue + } + + // ... but we can't + if fixTrailingSlash && len(path) == k+1 { + return ciPath, true + } + return + } + + if n.handle != nil { + return ciPath, true + } else if fixTrailingSlash && len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists + n = n.children[0] + if n.path == "/" && n.handle != nil { + return append(ciPath, '/'), true + } + } + return + + case catchAll: + return append(ciPath, path...), true + + default: + panic("invalid node type") + } + } else { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if n.handle != nil { + return ciPath, true + } + + // No handle found. + // Try to fix the path by adding a trailing slash + if fixTrailingSlash { + for i := 0; i < len(n.indices); i++ { + if n.indices[i] == '/' { + n = n.children[i] + if (len(n.path) == 1 && n.handle != nil) || + (n.nType == catchAll && n.children[0].handle != nil) { + return append(ciPath, '/'), true + } + return + } + } + } + return + } + } + + // Nothing found. + // Try to fix the path by adding / removing a trailing slash + if fixTrailingSlash { + if path == "/" { + return ciPath, true + } + if len(path)+1 == len(n.path) && n.path[len(path)] == '/' && + strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) && + n.handle != nil { + return append(ciPath, n.path...), true + } + } + return +} diff --git a/vendor/github.com/julienschmidt/httprouter/tree_test.go b/vendor/github.com/julienschmidt/httprouter/tree_test.go new file mode 100644 index 0000000..64f26d1 --- /dev/null +++ b/vendor/github.com/julienschmidt/httprouter/tree_test.go @@ -0,0 +1,611 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +import ( + "fmt" + "net/http" + "reflect" + "strings" + "testing" +) + +func printChildren(n *node, prefix string) { + fmt.Printf(" %02d:%02d %s%s[%d] %v %t %d \r\n", n.priority, n.maxParams, prefix, n.path, len(n.children), n.handle, n.wildChild, n.nType) + for l := len(n.path); l > 0; l-- { + prefix += " " + } + for _, child := range n.children { + printChildren(child, prefix) + } +} + +// Used as a workaround since we can't compare functions or their adresses +var fakeHandlerValue string + +func fakeHandler(val string) Handle { + return func(http.ResponseWriter, *http.Request, Params) { + fakeHandlerValue = val + } +} + +type testRequests []struct { + path string + nilHandler bool + route string + ps Params +} + +func checkRequests(t *testing.T, tree *node, requests testRequests) { + for _, request := range requests { + handler, ps, _ := tree.getValue(request.path) + + if handler == nil { + if !request.nilHandler { + t.Errorf("handle mismatch for route '%s': Expected non-nil handle", request.path) + } + } else if request.nilHandler { + t.Errorf("handle mismatch for route '%s': Expected nil handle", request.path) + } else { + handler(nil, nil, nil) + if fakeHandlerValue != request.route { + t.Errorf("handle mismatch for route '%s': Wrong handle (%s != %s)", request.path, fakeHandlerValue, request.route) + } + } + + if !reflect.DeepEqual(ps, request.ps) { + t.Errorf("Params mismatch for route '%s'", request.path) + } + } +} + +func checkPriorities(t *testing.T, n *node) uint32 { + var prio uint32 + for i := range n.children { + prio += checkPriorities(t, n.children[i]) + } + + if n.handle != nil { + prio++ + } + + if n.priority != prio { + t.Errorf( + "priority mismatch for node '%s': is %d, should be %d", + n.path, n.priority, prio, + ) + } + + return prio +} + +func checkMaxParams(t *testing.T, n *node) uint8 { + var maxParams uint8 + for i := range n.children { + params := checkMaxParams(t, n.children[i]) + if params > maxParams { + maxParams = params + } + } + if n.nType != static && !n.wildChild { + maxParams++ + } + + if n.maxParams != maxParams { + t.Errorf( + "maxParams mismatch for node '%s': is %d, should be %d", + n.path, n.maxParams, maxParams, + ) + } + + return maxParams +} + +func TestCountParams(t *testing.T) { + if countParams("/path/:param1/static/*catch-all") != 2 { + t.Fail() + } + if countParams(strings.Repeat("/:param", 256)) != 255 { + t.Fail() + } +} + +func TestTreeAddAndGet(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/contact", + "/co", + "/c", + "/a", + "/ab", + "/doc/", + "/doc/go_faq.html", + "/doc/go1.html", + "/α", + "/β", + } + for _, route := range routes { + tree.addRoute(route, fakeHandler(route)) + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/a", false, "/a", nil}, + {"/", true, "", nil}, + {"/hi", false, "/hi", nil}, + {"/contact", false, "/contact", nil}, + {"/co", false, "/co", nil}, + {"/con", true, "", nil}, // key mismatch + {"/cona", true, "", nil}, // key mismatch + {"/no", true, "", nil}, // no matching child + {"/ab", false, "/ab", nil}, + {"/α", false, "/α", nil}, + {"/β", false, "/β", nil}, + }) + + checkPriorities(t, tree) + checkMaxParams(t, tree) +} + +func TestTreeWildcard(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/", + "/cmd/:tool/:sub", + "/cmd/:tool/", + "/src/*filepath", + "/search/", + "/search/:query", + "/user_:name", + "/user_:name/about", + "/files/:dir/*filepath", + "/doc/", + "/doc/go_faq.html", + "/doc/go1.html", + "/info/:user/public", + "/info/:user/project/:project", + } + for _, route := range routes { + tree.addRoute(route, fakeHandler(route)) + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/", false, "/", nil}, + {"/cmd/test/", false, "/cmd/:tool/", Params{Param{"tool", "test"}}}, + {"/cmd/test", true, "", Params{Param{"tool", "test"}}}, + {"/cmd/test/3", false, "/cmd/:tool/:sub", Params{Param{"tool", "test"}, Param{"sub", "3"}}}, + {"/src/", false, "/src/*filepath", Params{Param{"filepath", "/"}}}, + {"/src/some/file.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file.png"}}}, + {"/search/", false, "/search/", nil}, + {"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, + {"/search/someth!ng+in+ünìcodé/", true, "", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, + {"/user_gopher", false, "/user_:name", Params{Param{"name", "gopher"}}}, + {"/user_gopher/about", false, "/user_:name/about", Params{Param{"name", "gopher"}}}, + {"/files/js/inc/framework.js", false, "/files/:dir/*filepath", Params{Param{"dir", "js"}, Param{"filepath", "/inc/framework.js"}}}, + {"/info/gordon/public", false, "/info/:user/public", Params{Param{"user", "gordon"}}}, + {"/info/gordon/project/go", false, "/info/:user/project/:project", Params{Param{"user", "gordon"}, Param{"project", "go"}}}, + }) + + checkPriorities(t, tree) + checkMaxParams(t, tree) +} + +func catchPanic(testFunc func()) (recv interface{}) { + defer func() { + recv = recover() + }() + + testFunc() + return +} + +type testRoute struct { + path string + conflict bool +} + +func testRoutes(t *testing.T, routes []testRoute) { + tree := &node{} + + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route.path, nil) + }) + + if route.conflict { + if recv == nil { + t.Errorf("no panic for conflicting route '%s'", route.path) + } + } else if recv != nil { + t.Errorf("unexpected panic for route '%s': %v", route.path, recv) + } + } + + //printChildren(tree, "") +} + +func TestTreeWildcardConflict(t *testing.T) { + routes := []testRoute{ + {"/cmd/:tool/:sub", false}, + {"/cmd/vet", true}, + {"/src/*filepath", false}, + {"/src/*filepathx", true}, + {"/src/", true}, + {"/src1/", false}, + {"/src1/*filepath", true}, + {"/src2*filepath", true}, + {"/search/:query", false}, + {"/search/invalid", true}, + {"/user_:name", false}, + {"/user_x", true}, + {"/user_:name", false}, + {"/id:id", false}, + {"/id/:id", true}, + } + testRoutes(t, routes) +} + +func TestTreeChildConflict(t *testing.T) { + routes := []testRoute{ + {"/cmd/vet", false}, + {"/cmd/:tool/:sub", true}, + {"/src/AUTHORS", false}, + {"/src/*filepath", true}, + {"/user_x", false}, + {"/user_:name", true}, + {"/id/:id", false}, + {"/id:id", true}, + {"/:id", true}, + {"/*filepath", true}, + } + testRoutes(t, routes) +} + +func TestTreeDupliatePath(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/", + "/doc/", + "/src/*filepath", + "/search/:query", + "/user_:name", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + + // Add again + recv = catchPanic(func() { + tree.addRoute(route, nil) + }) + if recv == nil { + t.Fatalf("no panic while inserting duplicate route '%s", route) + } + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/", false, "/", nil}, + {"/doc/", false, "/doc/", nil}, + {"/src/some/file.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file.png"}}}, + {"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, + {"/user_gopher", false, "/user_:name", Params{Param{"name", "gopher"}}}, + }) +} + +func TestEmptyWildcardName(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/user:", + "/user:/", + "/cmd/:/", + "/src/*", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, nil) + }) + if recv == nil { + t.Fatalf("no panic while inserting route with empty wildcard name '%s", route) + } + } +} + +func TestTreeCatchAllConflict(t *testing.T) { + routes := []testRoute{ + {"/src/*filepath/x", true}, + {"/src2/", false}, + {"/src2/*filepath/x", true}, + } + testRoutes(t, routes) +} + +func TestTreeCatchAllConflictRoot(t *testing.T) { + routes := []testRoute{ + {"/", false}, + {"/*filepath", true}, + } + testRoutes(t, routes) +} + +func TestTreeDoubleWildcard(t *testing.T) { + const panicMsg = "only one wildcard per path segment is allowed" + + routes := [...]string{ + "/:foo:bar", + "/:foo:bar/", + "/:foo*bar", + } + + for _, route := range routes { + tree := &node{} + recv := catchPanic(func() { + tree.addRoute(route, nil) + }) + + if rs, ok := recv.(string); !ok || !strings.HasPrefix(rs, panicMsg) { + t.Fatalf(`"Expected panic "%s" for route '%s', got "%v"`, panicMsg, route, recv) + } + } +} + +/*func TestTreeDuplicateWildcard(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/:id/:name/:id", + } + for _, route := range routes { + ... + } +}*/ + +func TestTreeTrailingSlashRedirect(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/b/", + "/search/:query", + "/cmd/:tool/", + "/src/*filepath", + "/x", + "/x/y", + "/y/", + "/y/z", + "/0/:id", + "/0/:id/1", + "/1/:id/", + "/1/:id/2", + "/aa", + "/a/", + "/doc", + "/doc/go_faq.html", + "/doc/go1.html", + "/no/a", + "/no/b", + "/api/hello/:name", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + } + + //printChildren(tree, "") + + tsrRoutes := [...]string{ + "/hi/", + "/b", + "/search/gopher/", + "/cmd/vet", + "/src", + "/x/", + "/y", + "/0/go/", + "/1/go", + "/a", + "/doc/", + } + for _, route := range tsrRoutes { + handler, _, tsr := tree.getValue(route) + if handler != nil { + t.Fatalf("non-nil handler for TSR route '%s", route) + } else if !tsr { + t.Errorf("expected TSR recommendation for route '%s'", route) + } + } + + noTsrRoutes := [...]string{ + "/", + "/no", + "/no/", + "/_", + "/_/", + "/api/world/abc", + } + for _, route := range noTsrRoutes { + handler, _, tsr := tree.getValue(route) + if handler != nil { + t.Fatalf("non-nil handler for No-TSR route '%s", route) + } else if tsr { + t.Errorf("expected no TSR recommendation for route '%s'", route) + } + } +} + +func TestTreeFindCaseInsensitivePath(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/b/", + "/ABC/", + "/search/:query", + "/cmd/:tool/", + "/src/*filepath", + "/x", + "/x/y", + "/y/", + "/y/z", + "/0/:id", + "/0/:id/1", + "/1/:id/", + "/1/:id/2", + "/aa", + "/a/", + "/doc", + "/doc/go_faq.html", + "/doc/go1.html", + "/doc/go/away", + "/no/a", + "/no/b", + } + + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + } + + // Check out == in for all registered routes + // With fixTrailingSlash = true + for _, route := range routes { + out, found := tree.findCaseInsensitivePath(route, true) + if !found { + t.Errorf("Route '%s' not found!", route) + } else if string(out) != route { + t.Errorf("Wrong result for route '%s': %s", route, string(out)) + } + } + // With fixTrailingSlash = false + for _, route := range routes { + out, found := tree.findCaseInsensitivePath(route, false) + if !found { + t.Errorf("Route '%s' not found!", route) + } else if string(out) != route { + t.Errorf("Wrong result for route '%s': %s", route, string(out)) + } + } + + tests := []struct { + in string + out string + found bool + slash bool + }{ + {"/HI", "/hi", true, false}, + {"/HI/", "/hi", true, true}, + {"/B", "/b/", true, true}, + {"/B/", "/b/", true, false}, + {"/abc", "/ABC/", true, true}, + {"/abc/", "/ABC/", true, false}, + {"/aBc", "/ABC/", true, true}, + {"/aBc/", "/ABC/", true, false}, + {"/abC", "/ABC/", true, true}, + {"/abC/", "/ABC/", true, false}, + {"/SEARCH/QUERY", "/search/QUERY", true, false}, + {"/SEARCH/QUERY/", "/search/QUERY", true, true}, + {"/CMD/TOOL/", "/cmd/TOOL/", true, false}, + {"/CMD/TOOL", "/cmd/TOOL/", true, true}, + {"/SRC/FILE/PATH", "/src/FILE/PATH", true, false}, + {"/x/Y", "/x/y", true, false}, + {"/x/Y/", "/x/y", true, true}, + {"/X/y", "/x/y", true, false}, + {"/X/y/", "/x/y", true, true}, + {"/X/Y", "/x/y", true, false}, + {"/X/Y/", "/x/y", true, true}, + {"/Y/", "/y/", true, false}, + {"/Y", "/y/", true, true}, + {"/Y/z", "/y/z", true, false}, + {"/Y/z/", "/y/z", true, true}, + {"/Y/Z", "/y/z", true, false}, + {"/Y/Z/", "/y/z", true, true}, + {"/y/Z", "/y/z", true, false}, + {"/y/Z/", "/y/z", true, true}, + {"/Aa", "/aa", true, false}, + {"/Aa/", "/aa", true, true}, + {"/AA", "/aa", true, false}, + {"/AA/", "/aa", true, true}, + {"/aA", "/aa", true, false}, + {"/aA/", "/aa", true, true}, + {"/A/", "/a/", true, false}, + {"/A", "/a/", true, true}, + {"/DOC", "/doc", true, false}, + {"/DOC/", "/doc", true, true}, + {"/NO", "", false, true}, + {"/DOC/GO", "", false, true}, + } + // With fixTrailingSlash = true + for _, test := range tests { + out, found := tree.findCaseInsensitivePath(test.in, true) + if found != test.found || (found && (string(out) != test.out)) { + t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t", + test.in, string(out), found, test.out, test.found) + return + } + } + // With fixTrailingSlash = false + for _, test := range tests { + out, found := tree.findCaseInsensitivePath(test.in, false) + if test.slash { + if found { // test needs a trailingSlash fix. It must not be found! + t.Errorf("Found without fixTrailingSlash: %s; got %s", test.in, string(out)) + } + } else { + if found != test.found || (found && (string(out) != test.out)) { + t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t", + test.in, string(out), found, test.out, test.found) + return + } + } + } +} + +func TestTreeInvalidNodeType(t *testing.T) { + const panicMsg = "invalid node type" + + tree := &node{} + tree.addRoute("/", fakeHandler("/")) + tree.addRoute("/:page", fakeHandler("/:page")) + + // set invalid node type + tree.children[0].nType = 42 + + // normal lookup + recv := catchPanic(func() { + tree.getValue("/test") + }) + if rs, ok := recv.(string); !ok || rs != panicMsg { + t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv) + } + + // case-insensitive lookup + recv = catchPanic(func() { + tree.findCaseInsensitivePath("/test", true) + }) + if rs, ok := recv.(string); !ok || rs != panicMsg { + t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv) + } +} diff --git a/vendor/github.com/mattn/go-sqlite3/.gitignore b/vendor/github.com/mattn/go-sqlite3/.gitignore new file mode 100644 index 0000000..8a0e48d --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/.gitignore @@ -0,0 +1,4 @@ +*.db +*.exe +*.dll +*.o diff --git a/vendor/github.com/mattn/go-sqlite3/.travis.yml b/vendor/github.com/mattn/go-sqlite3/.travis.yml new file mode 100644 index 0000000..46e70cb --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/.travis.yml @@ -0,0 +1,19 @@ +language: go +sudo: required +dist: trusty +env: + - GOTAGS= + - GOTAGS=libsqlite3 + - GOTAGS=trace + - GOTAGS=vtable +go: + - 1.7.x + - 1.8.x + - 1.9.x + - master +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken 3qJVUE0iQwqnCbmNcDsjYu1nh4J4KIFXx + - go test -race -v . -tags "$GOTAGS" diff --git a/vendor/github.com/mattn/go-sqlite3/LICENSE b/vendor/github.com/mattn/go-sqlite3/LICENSE new file mode 100644 index 0000000..ca458bb --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md new file mode 100644 index 0000000..ad00f10 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/README.md @@ -0,0 +1,97 @@ +go-sqlite3 +========== + +[![GoDoc Reference](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3) +[![Build Status](https://travis-ci.org/mattn/go-sqlite3.svg?branch=master)](https://travis-ci.org/mattn/go-sqlite3) +[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.svg?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-sqlite3)](https://goreportcard.com/report/github.com/mattn/go-sqlite3) + +Description +----------- + +sqlite3 driver conforming to the built-in database/sql interface + +Installation +------------ + +This package can be installed with the go get command: + + go get github.com/mattn/go-sqlite3 + +_go-sqlite3_ is *cgo* package. +If you want to build your app using go-sqlite3, you need gcc. +However, if you install _go-sqlite3_ with `go install github.com/mattn/go-sqlite3`, you don't need gcc to build your app anymore. + +Documentation +------------- + +API documentation can be found here: http://godoc.org/github.com/mattn/go-sqlite3 + +Examples can be found under the `./_example` directory + +FAQ +--- + +* Want to build go-sqlite3 with libsqlite3 on my linux. + + Use `go build --tags "libsqlite3 linux"` + +* Want to build go-sqlite3 with libsqlite3 on OS X. + + Install sqlite3 from homebrew: `brew install sqlite3` + + Use `go build --tags "libsqlite3 darwin"` + +* Want to build go-sqlite3 with icu extension. + + Use `go build --tags "icu"` + + Available extensions: `json1`, `fts5`, `icu` + +* Can't build go-sqlite3 on windows 64bit. + + > Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit. + > See: [#27](https://github.com/mattn/go-sqlite3/issues/27) + +* Getting insert error while query is opened. + + > You can pass some arguments into the connection string, for example, a URI. + > See: [#39](https://github.com/mattn/go-sqlite3/issues/39) + +* Do you want to cross compile? mingw on Linux or Mac? + + > See: [#106](https://github.com/mattn/go-sqlite3/issues/106) + > See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html + +* Want to get time.Time with current locale + + Use `_loc=auto` in SQLite3 filename schema like `file:foo.db?_loc=auto`. + +* Can I use this in multiple routines concurrently? + + Yes for readonly. But, No for writable. See [#50](https://github.com/mattn/go-sqlite3/issues/50), [#51](https://github.com/mattn/go-sqlite3/issues/51), [#209](https://github.com/mattn/go-sqlite3/issues/209). + +* Why is it racy if I use a `sql.Open("sqlite3", ":memory:")` database? + + Each connection to :memory: opens a brand new in-memory sql database, so if + the stdlib's sql engine happens to open another connection and you've only + specified ":memory:", that connection will see a brand new database. A + workaround is to use "file::memory:?mode=memory&cache=shared". Every + connection to this string will point to the same in-memory database. See + [#204](https://github.com/mattn/go-sqlite3/issues/204) for more info. + +License +------- + +MIT: http://mattn.mit-license.org/2012 + +sqlite3-binding.c, sqlite3-binding.h, sqlite3ext.h + +The -binding suffix was added to avoid build failures under gccgo. + +In this repository, those files are an amalgamation of code that was copied from SQLite3. The license of that code is the same as the license of SQLite3. + +Author +------ + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-sqlite3/_example/custom_func/main.go b/vendor/github.com/mattn/go-sqlite3/_example/custom_func/main.go new file mode 100644 index 0000000..85657e6 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/custom_func/main.go @@ -0,0 +1,133 @@ +package main + +import ( + "database/sql" + "fmt" + "log" + "math" + "math/rand" + + sqlite "github.com/mattn/go-sqlite3" +) + +// Computes x^y +func pow(x, y int64) int64 { + return int64(math.Pow(float64(x), float64(y))) +} + +// Computes the bitwise exclusive-or of all its arguments +func xor(xs ...int64) int64 { + var ret int64 + for _, x := range xs { + ret ^= x + } + return ret +} + +// Returns a random number. It's actually deterministic here because +// we don't seed the RNG, but it's an example of a non-pure function +// from SQLite's POV. +func getrand() int64 { + return rand.Int63() +} + +// Computes the standard deviation of a GROUPed BY set of values +type stddev struct { + xs []int64 + // Running average calculation + sum int64 + n int64 +} + +func newStddev() *stddev { return &stddev{} } + +func (s *stddev) Step(x int64) { + s.xs = append(s.xs, x) + s.sum += x + s.n++ +} + +func (s *stddev) Done() float64 { + mean := float64(s.sum) / float64(s.n) + var sqDiff []float64 + for _, x := range s.xs { + sqDiff = append(sqDiff, math.Pow(float64(x)-mean, 2)) + } + var dev float64 + for _, x := range sqDiff { + dev += x + } + dev /= float64(len(sqDiff)) + return math.Sqrt(dev) +} + +func main() { + sql.Register("sqlite3_custom", &sqlite.SQLiteDriver{ + ConnectHook: func(conn *sqlite.SQLiteConn) error { + if err := conn.RegisterFunc("pow", pow, true); err != nil { + return err + } + if err := conn.RegisterFunc("xor", xor, true); err != nil { + return err + } + if err := conn.RegisterFunc("rand", getrand, false); err != nil { + return err + } + if err := conn.RegisterAggregator("stddev", newStddev, true); err != nil { + return err + } + return nil + }, + }) + + db, err := sql.Open("sqlite3_custom", ":memory:") + if err != nil { + log.Fatal("Failed to open database:", err) + } + defer db.Close() + + var i int64 + err = db.QueryRow("SELECT pow(2,3)").Scan(&i) + if err != nil { + log.Fatal("POW query error:", err) + } + fmt.Println("pow(2,3) =", i) // 8 + + err = db.QueryRow("SELECT xor(1,2,3,4,5,6)").Scan(&i) + if err != nil { + log.Fatal("XOR query error:", err) + } + fmt.Println("xor(1,2,3,4,5) =", i) // 7 + + err = db.QueryRow("SELECT rand()").Scan(&i) + if err != nil { + log.Fatal("RAND query error:", err) + } + fmt.Println("rand() =", i) // pseudorandom + + _, err = db.Exec("create table foo (department integer, profits integer)") + if err != nil { + log.Fatal("Failed to create table:", err) + } + _, err = db.Exec("insert into foo values (1, 10), (1, 20), (1, 45), (2, 42), (2, 115)") + if err != nil { + log.Fatal("Failed to insert records:", err) + } + + rows, err := db.Query("select department, stddev(profits) from foo group by department") + if err != nil { + log.Fatal("STDDEV query error:", err) + } + defer rows.Close() + for rows.Next() { + var dept int64 + var dev float64 + if err := rows.Scan(&dept, &dev); err != nil { + log.Fatal(err) + } + fmt.Printf("dept=%d stddev=%f\n", dept, dev) + } + if err := rows.Err(); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go b/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go new file mode 100644 index 0000000..6023181 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/hook/hook.go @@ -0,0 +1,78 @@ +package main + +import ( + "database/sql" + "log" + "os" + + "github.com/mattn/go-sqlite3" +) + +func main() { + sqlite3conn := []*sqlite3.SQLiteConn{} + sql.Register("sqlite3_with_hook_example", + &sqlite3.SQLiteDriver{ + ConnectHook: func(conn *sqlite3.SQLiteConn) error { + sqlite3conn = append(sqlite3conn, conn) + conn.RegisterUpdateHook(func(op int, db string, table string, rowid int64) { + switch op { + case sqlite3.SQLITE_INSERT: + log.Println("Notified of insert on db", db, "table", table, "rowid", rowid) + } + }) + return nil + }, + }) + os.Remove("./foo.db") + os.Remove("./bar.db") + + srcDb, err := sql.Open("sqlite3_with_hook_example", "./foo.db") + if err != nil { + log.Fatal(err) + } + defer srcDb.Close() + srcDb.Ping() + + _, err = srcDb.Exec("create table foo(id int, value text)") + if err != nil { + log.Fatal(err) + } + _, err = srcDb.Exec("insert into foo values(1, 'foo')") + if err != nil { + log.Fatal(err) + } + _, err = srcDb.Exec("insert into foo values(2, 'bar')") + if err != nil { + log.Fatal(err) + } + _, err = srcDb.Query("select * from foo") + if err != nil { + log.Fatal(err) + } + destDb, err := sql.Open("sqlite3_with_hook_example", "./bar.db") + if err != nil { + log.Fatal(err) + } + defer destDb.Close() + destDb.Ping() + + bk, err := sqlite3conn[1].Backup("main", sqlite3conn[0], "main") + if err != nil { + log.Fatal(err) + } + + _, err = bk.Step(-1) + if err != nil { + log.Fatal(err) + } + _, err = destDb.Query("select * from foo") + if err != nil { + log.Fatal(err) + } + _, err = destDb.Exec("insert into foo values(3, 'bar')") + if err != nil { + log.Fatal(err) + } + + bk.Finish() +} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/limit/limit.go b/vendor/github.com/mattn/go-sqlite3/_example/limit/limit.go new file mode 100644 index 0000000..4e4b897 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/limit/limit.go @@ -0,0 +1,113 @@ +package main + +import ( + "database/sql" + "fmt" + "log" + "os" + "strings" + + "github.com/mattn/go-sqlite3" +) + +func createBulkInsertQuery(n int, start int) (query string, args []interface{}) { + values := make([]string, n) + args = make([]interface{}, n*2) + pos := 0 + for i := 0; i < n; i++ { + values[i] = "(?, ?)" + args[pos] = start + i + args[pos+1] = fmt.Sprintf("こんにちわ世界%03d", i) + pos += 2 + } + query = fmt.Sprintf( + "insert into foo(id, name) values %s", + strings.Join(values, ", "), + ) + return +} + +func bukInsert(db *sql.DB, query string, args []interface{}) (err error) { + stmt, err := db.Prepare(query) + if err != nil { + return + } + + _, err = stmt.Exec(args...) + if err != nil { + return + } + + return +} + +func main() { + var sqlite3conn *sqlite3.SQLiteConn + sql.Register("sqlite3_with_limit", &sqlite3.SQLiteDriver{ + ConnectHook: func(conn *sqlite3.SQLiteConn) error { + sqlite3conn = conn + return nil + }, + }) + + os.Remove("./foo.db") + db, err := sql.Open("sqlite3_with_limit", "./foo.db") + if err != nil { + log.Fatal(err) + } + defer db.Close() + + sqlStmt := ` + create table foo (id integer not null primary key, name text); + delete from foo; + ` + _, err = db.Exec(sqlStmt) + if err != nil { + log.Printf("%q: %s\n", err, sqlStmt) + return + } + + if sqlite3conn == nil { + log.Fatal("not set sqlite3 connection") + } + + limitVariableNumber := sqlite3conn.GetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER) + log.Printf("default SQLITE_LIMIT_VARIABLE_NUMBER: %d", limitVariableNumber) + + num := 400 + query, args := createBulkInsertQuery(num, 0) + err = bukInsert(db, query, args) + if err != nil { + log.Fatal(err) + } + + smallLimitVariableNumber := 100 + sqlite3conn.SetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER, smallLimitVariableNumber) + + limitVariableNumber = sqlite3conn.GetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER) + log.Printf("updated SQLITE_LIMIT_VARIABLE_NUMBER: %d", limitVariableNumber) + + query, args = createBulkInsertQuery(num, num) + err = bukInsert(db, query, args) + if err != nil { + if err != nil { + log.Printf("expect failed since SQLITE_LIMIT_VARIABLE_NUMBER is too small: %v", err) + } + } + + bigLimitVariableNumber := 999999 + sqlite3conn.SetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER, bigLimitVariableNumber) + limitVariableNumber = sqlite3conn.GetLimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER) + log.Printf("set SQLITE_LIMIT_VARIABLE_NUMBER: %d", bigLimitVariableNumber) + log.Printf("updated SQLITE_LIMIT_VARIABLE_NUMBER: %d", limitVariableNumber) + + query, args = createBulkInsertQuery(500, num+num) + err = bukInsert(db, query, args) + if err != nil { + if err != nil { + log.Fatal(err) + } + } + + log.Println("no error if SQLITE_LIMIT_VARIABLE_NUMBER > 999") +} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile new file mode 100644 index 0000000..97b1e0f --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile @@ -0,0 +1,22 @@ +ifeq ($(OS),Windows_NT) +EXE=extension.exe +EXT=sqlite3_mod_regexp.dll +RM=cmd /c del +LDFLAG= +else +EXE=extension +EXT=sqlite3_mod_regexp.so +RM=rm +LDFLAG=-fPIC +endif + +all : $(EXE) $(EXT) + +$(EXE) : extension.go + go build $< + +$(EXT) : sqlite3_mod_regexp.c + gcc $(LDFLAG) -shared -o $@ $< -lsqlite3 -lpcre + +clean : + @-$(RM) $(EXE) $(EXT) diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go new file mode 100644 index 0000000..61ceb55 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go @@ -0,0 +1,43 @@ +package main + +import ( + "database/sql" + "fmt" + "github.com/mattn/go-sqlite3" + "log" +) + +func main() { + sql.Register("sqlite3_with_extensions", + &sqlite3.SQLiteDriver{ + Extensions: []string{ + "sqlite3_mod_regexp", + }, + }) + + db, err := sql.Open("sqlite3_with_extensions", ":memory:") + if err != nil { + log.Fatal(err) + } + defer db.Close() + + // Force db to make a new connection in pool + // by putting the original in a transaction + tx, err := db.Begin() + if err != nil { + log.Fatal(err) + } + defer tx.Commit() + + // New connection works (hopefully!) + rows, err := db.Query("select 'hello world' where 'hello world' regexp '^hello.*d$'") + if err != nil { + log.Fatal(err) + } + defer rows.Close() + for rows.Next() { + var helloworld string + rows.Scan(&helloworld) + fmt.Println(helloworld) + } +} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c new file mode 100644 index 0000000..277764d --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c @@ -0,0 +1,31 @@ +#include +#include +#include +#include + +SQLITE_EXTENSION_INIT1 +static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) { + if (argc >= 2) { + const char *target = (const char *)sqlite3_value_text(argv[1]); + const char *pattern = (const char *)sqlite3_value_text(argv[0]); + const char* errstr = NULL; + int erroff = 0; + int vec[500]; + int n, rc; + pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL); + rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500); + if (rc <= 0) { + sqlite3_result_error(context, errstr, 0); + return; + } + sqlite3_result_int(context, 1); + } +} + +#ifdef _WIN32 +__declspec(dllexport) +#endif +int sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) { + SQLITE_EXTENSION_INIT2(api); + return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, (void*)db, regexp_func, NULL, NULL); +} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile new file mode 100644 index 0000000..cdd4853 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile @@ -0,0 +1,24 @@ +ifeq ($(OS),Windows_NT) +EXE=extension.exe +EXT=sqlite3_mod_vtable.dll +RM=cmd /c del +LIBCURL=-lcurldll +LDFLAG= +else +EXE=extension +EXT=sqlite3_mod_vtable.so +RM=rm +LDFLAG=-fPIC +LIBCURL=-lcurl +endif + +all : $(EXE) $(EXT) + +$(EXE) : extension.go + go build $< + +$(EXT) : sqlite3_mod_vtable.cc + g++ $(LDFLAG) -shared -o $@ $< -lsqlite3 $(LIBCURL) + +clean : + @-$(RM) $(EXE) $(EXT) diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go new file mode 100644 index 0000000..f738af6 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go @@ -0,0 +1,37 @@ +package main + +import ( + "database/sql" + "fmt" + "log" + + "github.com/mattn/go-sqlite3" +) + +func main() { + sql.Register("sqlite3_with_extensions", + &sqlite3.SQLiteDriver{ + Extensions: []string{ + "sqlite3_mod_vtable", + }, + }) + + db, err := sql.Open("sqlite3_with_extensions", ":memory:") + if err != nil { + log.Fatal(err) + } + defer db.Close() + + db.Exec("create virtual table repo using github(id, full_name, description, html_url)") + + rows, err := db.Query("select id, full_name, description, html_url from repo") + if err != nil { + log.Fatal(err) + } + defer rows.Close() + for rows.Next() { + var id, fullName, description, htmlURL string + rows.Scan(&id, &fullName, &description, &htmlURL) + fmt.Printf("%s: %s\n\t%s\n\t%s\n\n", id, fullName, description, htmlURL) + } +} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h new file mode 100644 index 0000000..2142647 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h @@ -0,0 +1,1040 @@ +/* + * Copyright 2009-2010 Cybozu Labs, Inc. + * Copyright 2011 Kazuho Oku + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY CYBOZU LABS, INC. ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL CYBOZU LABS, INC. OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * The views and conclusions contained in the software and documentation are + * those of the authors and should not be interpreted as representing official + * policies, either expressed or implied, of Cybozu Labs, Inc. + * + */ +#ifndef picojson_h +#define picojson_h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #define SNPRINTF _snprintf_s + #pragma warning(push) + #pragma warning(disable : 4244) // conversion from int to char +#else + #define SNPRINTF snprintf +#endif + +namespace picojson { + + enum { + null_type, + boolean_type, + number_type, + string_type, + array_type, + object_type + }; + + struct null {}; + + class value { + public: + typedef std::vector array; + typedef std::map object; + union _storage { + bool boolean_; + double number_; + std::string* string_; + array* array_; + object* object_; + }; + protected: + int type_; + _storage u_; + public: + value(); + value(int type, bool); + explicit value(bool b); + explicit value(double n); + explicit value(const std::string& s); + explicit value(const array& a); + explicit value(const object& o); + explicit value(const char* s); + value(const char* s, size_t len); + ~value(); + value(const value& x); + value& operator=(const value& x); + void swap(value& x); + template bool is() const; + template const T& get() const; + template T& get(); + bool evaluate_as_boolean() const; + const value& get(size_t idx) const; + const value& get(const std::string& key) const; + bool contains(size_t idx) const; + bool contains(const std::string& key) const; + std::string to_str() const; + template void serialize(Iter os) const; + std::string serialize() const; + private: + template value(const T*); // intentionally defined to block implicit conversion of pointer to bool + }; + + typedef value::array array; + typedef value::object object; + + inline value::value() : type_(null_type) {} + + inline value::value(int type, bool) : type_(type) { + switch (type) { +#define INIT(p, v) case p##type: u_.p = v; break + INIT(boolean_, false); + INIT(number_, 0.0); + INIT(string_, new std::string()); + INIT(array_, new array()); + INIT(object_, new object()); +#undef INIT + default: break; + } + } + + inline value::value(bool b) : type_(boolean_type) { + u_.boolean_ = b; + } + + inline value::value(double n) : type_(number_type) { + u_.number_ = n; + } + + inline value::value(const std::string& s) : type_(string_type) { + u_.string_ = new std::string(s); + } + + inline value::value(const array& a) : type_(array_type) { + u_.array_ = new array(a); + } + + inline value::value(const object& o) : type_(object_type) { + u_.object_ = new object(o); + } + + inline value::value(const char* s) : type_(string_type) { + u_.string_ = new std::string(s); + } + + inline value::value(const char* s, size_t len) : type_(string_type) { + u_.string_ = new std::string(s, len); + } + + inline value::~value() { + switch (type_) { +#define DEINIT(p) case p##type: delete u_.p; break + DEINIT(string_); + DEINIT(array_); + DEINIT(object_); +#undef DEINIT + default: break; + } + } + + inline value::value(const value& x) : type_(x.type_) { + switch (type_) { +#define INIT(p, v) case p##type: u_.p = v; break + INIT(string_, new std::string(*x.u_.string_)); + INIT(array_, new array(*x.u_.array_)); + INIT(object_, new object(*x.u_.object_)); +#undef INIT + default: + u_ = x.u_; + break; + } + } + + inline value& value::operator=(const value& x) { + if (this != &x) { + this->~value(); + new (this) value(x); + } + return *this; + } + + inline void value::swap(value& x) { + std::swap(type_, x.type_); + std::swap(u_, x.u_); + } + +#define IS(ctype, jtype) \ + template <> inline bool value::is() const { \ + return type_ == jtype##_type; \ + } + IS(null, null) + IS(bool, boolean) + IS(int, number) + IS(double, number) + IS(std::string, string) + IS(array, array) + IS(object, object) +#undef IS + +#define GET(ctype, var) \ + template <> inline const ctype& value::get() const { \ + assert("type mismatch! call vis() before get()" \ + && is()); \ + return var; \ + } \ + template <> inline ctype& value::get() { \ + assert("type mismatch! call is() before get()" \ + && is()); \ + return var; \ + } + GET(bool, u_.boolean_) + GET(double, u_.number_) + GET(std::string, *u_.string_) + GET(array, *u_.array_) + GET(object, *u_.object_) +#undef GET + + inline bool value::evaluate_as_boolean() const { + switch (type_) { + case null_type: + return false; + case boolean_type: + return u_.boolean_; + case number_type: + return u_.number_ != 0; + case string_type: + return ! u_.string_->empty(); + default: + return true; + } + } + + inline const value& value::get(size_t idx) const { + static value s_null; + assert(is()); + return idx < u_.array_->size() ? (*u_.array_)[idx] : s_null; + } + + inline const value& value::get(const std::string& key) const { + static value s_null; + assert(is()); + object::const_iterator i = u_.object_->find(key); + return i != u_.object_->end() ? i->second : s_null; + } + + inline bool value::contains(size_t idx) const { + assert(is()); + return idx < u_.array_->size(); + } + + inline bool value::contains(const std::string& key) const { + assert(is()); + object::const_iterator i = u_.object_->find(key); + return i != u_.object_->end(); + } + + inline std::string value::to_str() const { + switch (type_) { + case null_type: return "null"; + case boolean_type: return u_.boolean_ ? "true" : "false"; + case number_type: { + char buf[256]; + double tmp; + SNPRINTF(buf, sizeof(buf), fabs(u_.number_) < (1ULL << 53) && modf(u_.number_, &tmp) == 0 ? "%.f" : "%.17g", u_.number_); + return buf; + } + case string_type: return *u_.string_; + case array_type: return "array"; + case object_type: return "object"; + default: assert(0); +#ifdef _MSC_VER + __assume(0); +#endif + } + return std::string(); + } + + template void copy(const std::string& s, Iter oi) { + std::copy(s.begin(), s.end(), oi); + } + + template void serialize_str(const std::string& s, Iter oi) { + *oi++ = '"'; + for (std::string::const_iterator i = s.begin(); i != s.end(); ++i) { + switch (*i) { +#define MAP(val, sym) case val: copy(sym, oi); break + MAP('"', "\\\""); + MAP('\\', "\\\\"); + MAP('/', "\\/"); + MAP('\b', "\\b"); + MAP('\f', "\\f"); + MAP('\n', "\\n"); + MAP('\r', "\\r"); + MAP('\t', "\\t"); +#undef MAP + default: + if ((unsigned char)*i < 0x20 || *i == 0x7f) { + char buf[7]; + SNPRINTF(buf, sizeof(buf), "\\u%04x", *i & 0xff); + copy(buf, buf + 6, oi); + } else { + *oi++ = *i; + } + break; + } + } + *oi++ = '"'; + } + + template void value::serialize(Iter oi) const { + switch (type_) { + case string_type: + serialize_str(*u_.string_, oi); + break; + case array_type: { + *oi++ = '['; + for (array::const_iterator i = u_.array_->begin(); + i != u_.array_->end(); + ++i) { + if (i != u_.array_->begin()) { + *oi++ = ','; + } + i->serialize(oi); + } + *oi++ = ']'; + break; + } + case object_type: { + *oi++ = '{'; + for (object::const_iterator i = u_.object_->begin(); + i != u_.object_->end(); + ++i) { + if (i != u_.object_->begin()) { + *oi++ = ','; + } + serialize_str(i->first, oi); + *oi++ = ':'; + i->second.serialize(oi); + } + *oi++ = '}'; + break; + } + default: + copy(to_str(), oi); + break; + } + } + + inline std::string value::serialize() const { + std::string s; + serialize(std::back_inserter(s)); + return s; + } + + template class input { + protected: + Iter cur_, end_; + int last_ch_; + bool ungot_; + int line_; + public: + input(const Iter& first, const Iter& last) : cur_(first), end_(last), last_ch_(-1), ungot_(false), line_(1) {} + int getc() { + if (ungot_) { + ungot_ = false; + return last_ch_; + } + if (cur_ == end_) { + last_ch_ = -1; + return -1; + } + if (last_ch_ == '\n') { + line_++; + } + last_ch_ = *cur_++ & 0xff; + return last_ch_; + } + void ungetc() { + if (last_ch_ != -1) { + assert(! ungot_); + ungot_ = true; + } + } + Iter cur() const { return cur_; } + int line() const { return line_; } + void skip_ws() { + while (1) { + int ch = getc(); + if (! (ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r')) { + ungetc(); + break; + } + } + } + bool expect(int expect) { + skip_ws(); + if (getc() != expect) { + ungetc(); + return false; + } + return true; + } + bool match(const std::string& pattern) { + for (std::string::const_iterator pi(pattern.begin()); + pi != pattern.end(); + ++pi) { + if (getc() != *pi) { + ungetc(); + return false; + } + } + return true; + } + }; + + template inline int _parse_quadhex(input &in) { + int uni_ch = 0, hex; + for (int i = 0; i < 4; i++) { + if ((hex = in.getc()) == -1) { + return -1; + } + if ('0' <= hex && hex <= '9') { + hex -= '0'; + } else if ('A' <= hex && hex <= 'F') { + hex -= 'A' - 0xa; + } else if ('a' <= hex && hex <= 'f') { + hex -= 'a' - 0xa; + } else { + in.ungetc(); + return -1; + } + uni_ch = uni_ch * 16 + hex; + } + return uni_ch; + } + + template inline bool _parse_codepoint(String& out, input& in) { + int uni_ch; + if ((uni_ch = _parse_quadhex(in)) == -1) { + return false; + } + if (0xd800 <= uni_ch && uni_ch <= 0xdfff) { + if (0xdc00 <= uni_ch) { + // a second 16-bit of a surrogate pair appeared + return false; + } + // first 16-bit of surrogate pair, get the next one + if (in.getc() != '\\' || in.getc() != 'u') { + in.ungetc(); + return false; + } + int second = _parse_quadhex(in); + if (! (0xdc00 <= second && second <= 0xdfff)) { + return false; + } + uni_ch = ((uni_ch - 0xd800) << 10) | ((second - 0xdc00) & 0x3ff); + uni_ch += 0x10000; + } + if (uni_ch < 0x80) { + out.push_back(uni_ch); + } else { + if (uni_ch < 0x800) { + out.push_back(0xc0 | (uni_ch >> 6)); + } else { + if (uni_ch < 0x10000) { + out.push_back(0xe0 | (uni_ch >> 12)); + } else { + out.push_back(0xf0 | (uni_ch >> 18)); + out.push_back(0x80 | ((uni_ch >> 12) & 0x3f)); + } + out.push_back(0x80 | ((uni_ch >> 6) & 0x3f)); + } + out.push_back(0x80 | (uni_ch & 0x3f)); + } + return true; + } + + template inline bool _parse_string(String& out, input& in) { + while (1) { + int ch = in.getc(); + if (ch < ' ') { + in.ungetc(); + return false; + } else if (ch == '"') { + return true; + } else if (ch == '\\') { + if ((ch = in.getc()) == -1) { + return false; + } + switch (ch) { +#define MAP(sym, val) case sym: out.push_back(val); break + MAP('"', '\"'); + MAP('\\', '\\'); + MAP('/', '/'); + MAP('b', '\b'); + MAP('f', '\f'); + MAP('n', '\n'); + MAP('r', '\r'); + MAP('t', '\t'); +#undef MAP + case 'u': + if (! _parse_codepoint(out, in)) { + return false; + } + break; + default: + return false; + } + } else { + out.push_back(ch); + } + } + return false; + } + + template inline bool _parse_array(Context& ctx, input& in) { + if (! ctx.parse_array_start()) { + return false; + } + size_t idx = 0; + if (in.expect(']')) { + return ctx.parse_array_stop(idx); + } + do { + if (! ctx.parse_array_item(in, idx)) { + return false; + } + idx++; + } while (in.expect(',')); + return in.expect(']') && ctx.parse_array_stop(idx); + } + + template inline bool _parse_object(Context& ctx, input& in) { + if (! ctx.parse_object_start()) { + return false; + } + if (in.expect('}')) { + return true; + } + do { + std::string key; + if (! in.expect('"') + || ! _parse_string(key, in) + || ! in.expect(':')) { + return false; + } + if (! ctx.parse_object_item(in, key)) { + return false; + } + } while (in.expect(',')); + return in.expect('}'); + } + + template inline bool _parse_number(double& out, input& in) { + std::string num_str; + while (1) { + int ch = in.getc(); + if (('0' <= ch && ch <= '9') || ch == '+' || ch == '-' || ch == '.' + || ch == 'e' || ch == 'E') { + num_str.push_back(ch); + } else { + in.ungetc(); + break; + } + } + char* endp; + out = strtod(num_str.c_str(), &endp); + return endp == num_str.c_str() + num_str.size(); + } + + template inline bool _parse(Context& ctx, input& in) { + in.skip_ws(); + int ch = in.getc(); + switch (ch) { +#define IS(ch, text, op) case ch: \ + if (in.match(text) && op) { \ + return true; \ + } else { \ + return false; \ + } + IS('n', "ull", ctx.set_null()); + IS('f', "alse", ctx.set_bool(false)); + IS('t', "rue", ctx.set_bool(true)); +#undef IS + case '"': + return ctx.parse_string(in); + case '[': + return _parse_array(ctx, in); + case '{': + return _parse_object(ctx, in); + default: + if (('0' <= ch && ch <= '9') || ch == '-') { + in.ungetc(); + double f; + if (_parse_number(f, in)) { + ctx.set_number(f); + return true; + } else { + return false; + } + } + break; + } + in.ungetc(); + return false; + } + + class deny_parse_context { + public: + bool set_null() { return false; } + bool set_bool(bool) { return false; } + bool set_number(double) { return false; } + template bool parse_string(input&) { return false; } + bool parse_array_start() { return false; } + template bool parse_array_item(input&, size_t) { + return false; + } + bool parse_array_stop(size_t) { return false; } + bool parse_object_start() { return false; } + template bool parse_object_item(input&, const std::string&) { + return false; + } + }; + + class default_parse_context { + protected: + value* out_; + public: + default_parse_context(value* out) : out_(out) {} + bool set_null() { + *out_ = value(); + return true; + } + bool set_bool(bool b) { + *out_ = value(b); + return true; + } + bool set_number(double f) { + *out_ = value(f); + return true; + } + template bool parse_string(input& in) { + *out_ = value(string_type, false); + return _parse_string(out_->get(), in); + } + bool parse_array_start() { + *out_ = value(array_type, false); + return true; + } + template bool parse_array_item(input& in, size_t) { + array& a = out_->get(); + a.push_back(value()); + default_parse_context ctx(&a.back()); + return _parse(ctx, in); + } + bool parse_array_stop(size_t) { return true; } + bool parse_object_start() { + *out_ = value(object_type, false); + return true; + } + template bool parse_object_item(input& in, const std::string& key) { + object& o = out_->get(); + default_parse_context ctx(&o[key]); + return _parse(ctx, in); + } + private: + default_parse_context(const default_parse_context&); + default_parse_context& operator=(const default_parse_context&); + }; + + class null_parse_context { + public: + struct dummy_str { + void push_back(int) {} + }; + public: + null_parse_context() {} + bool set_null() { return true; } + bool set_bool(bool) { return true; } + bool set_number(double) { return true; } + template bool parse_string(input& in) { + dummy_str s; + return _parse_string(s, in); + } + bool parse_array_start() { return true; } + template bool parse_array_item(input& in, size_t) { + return _parse(*this, in); + } + bool parse_array_stop(size_t) { return true; } + bool parse_object_start() { return true; } + template bool parse_object_item(input& in, const std::string&) { + return _parse(*this, in); + } + private: + null_parse_context(const null_parse_context&); + null_parse_context& operator=(const null_parse_context&); + }; + + // obsolete, use the version below + template inline std::string parse(value& out, Iter& pos, const Iter& last) { + std::string err; + pos = parse(out, pos, last, &err); + return err; + } + + template inline Iter _parse(Context& ctx, const Iter& first, const Iter& last, std::string* err) { + input in(first, last); + if (! _parse(ctx, in) && err != NULL) { + char buf[64]; + SNPRINTF(buf, sizeof(buf), "syntax error at line %d near: ", in.line()); + *err = buf; + while (1) { + int ch = in.getc(); + if (ch == -1 || ch == '\n') { + break; + } else if (ch >= ' ') { + err->push_back(ch); + } + } + } + return in.cur(); + } + + template inline Iter parse(value& out, const Iter& first, const Iter& last, std::string* err) { + default_parse_context ctx(&out); + return _parse(ctx, first, last, err); + } + + inline std::string parse(value& out, std::istream& is) { + std::string err; + parse(out, std::istreambuf_iterator(is.rdbuf()), + std::istreambuf_iterator(), &err); + return err; + } + + template struct last_error_t { + static std::string s; + }; + template std::string last_error_t::s; + + inline void set_last_error(const std::string& s) { + last_error_t::s = s; + } + + inline const std::string& get_last_error() { + return last_error_t::s; + } + + inline bool operator==(const value& x, const value& y) { + if (x.is()) + return y.is(); +#define PICOJSON_CMP(type) \ + if (x.is()) \ + return y.is() && x.get() == y.get() + PICOJSON_CMP(bool); + PICOJSON_CMP(double); + PICOJSON_CMP(std::string); + PICOJSON_CMP(array); + PICOJSON_CMP(object); +#undef PICOJSON_CMP + assert(0); +#ifdef _MSC_VER + __assume(0); +#endif + return false; + } + + inline bool operator!=(const value& x, const value& y) { + return ! (x == y); + } +} + +namespace std { + template<> inline void swap(picojson::value& x, picojson::value& y) + { + x.swap(y); + } +} + +inline std::istream& operator>>(std::istream& is, picojson::value& x) +{ + picojson::set_last_error(std::string()); + std::string err = picojson::parse(x, is); + if (! err.empty()) { + picojson::set_last_error(err); + is.setstate(std::ios::failbit); + } + return is; +} + +inline std::ostream& operator<<(std::ostream& os, const picojson::value& x) +{ + x.serialize(std::ostream_iterator(os)); + return os; +} +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +#endif +#ifdef TEST_PICOJSON +#ifdef _MSC_VER + #pragma warning(disable : 4127) // conditional expression is constant +#endif + +using namespace std; + +static void plan(int num) +{ + printf("1..%d\n", num); +} + +static bool success = true; + +static void ok(bool b, const char* name = "") +{ + static int n = 1; + if (! b) + success = false; + printf("%s %d - %s\n", b ? "ok" : "ng", n++, name); +} + +template void is(const T& x, const T& y, const char* name = "") +{ + if (x == y) { + ok(true, name); + } else { + ok(false, name); + } +} + +#include +#include +#include +#include + +int main(void) +{ + plan(85); + + // constructors +#define TEST(expr, expected) \ + is(picojson::value expr .serialize(), string(expected), "picojson::value" #expr) + + TEST( (true), "true"); + TEST( (false), "false"); + TEST( (42.0), "42"); + TEST( (string("hello")), "\"hello\""); + TEST( ("hello"), "\"hello\""); + TEST( ("hello", 4), "\"hell\""); + + { + double a = 1; + for (int i = 0; i < 1024; i++) { + picojson::value vi(a); + std::stringstream ss; + ss << vi; + picojson::value vo; + ss >> vo; + double b = vo.get(); + if ((i < 53 && a != b) || fabs(a - b) / b > 1e-8) { + printf("ng i=%d a=%.18e b=%.18e\n", i, a, b); + } + a *= 2; + } + } + +#undef TEST + +#define TEST(in, type, cmp, serialize_test) { \ + picojson::value v; \ + const char* s = in; \ + string err = picojson::parse(v, s, s + strlen(s)); \ + ok(err.empty(), in " no error"); \ + ok(v.is(), in " check type"); \ + is(v.get(), cmp, in " correct output"); \ + is(*s, '\0', in " read to eof"); \ + if (serialize_test) { \ + is(v.serialize(), string(in), in " serialize"); \ + } \ + } + TEST("false", bool, false, true); + TEST("true", bool, true, true); + TEST("90.5", double, 90.5, false); + TEST("1.7976931348623157e+308", double, DBL_MAX, false); + TEST("\"hello\"", string, string("hello"), true); + TEST("\"\\\"\\\\\\/\\b\\f\\n\\r\\t\"", string, string("\"\\/\b\f\n\r\t"), + true); + TEST("\"\\u0061\\u30af\\u30ea\\u30b9\"", string, + string("a\xe3\x82\xaf\xe3\x83\xaa\xe3\x82\xb9"), false); + TEST("\"\\ud840\\udc0b\"", string, string("\xf0\xa0\x80\x8b"), false); +#undef TEST + +#define TEST(type, expr) { \ + picojson::value v; \ + const char *s = expr; \ + string err = picojson::parse(v, s, s + strlen(s)); \ + ok(err.empty(), "empty " #type " no error"); \ + ok(v.is(), "empty " #type " check type"); \ + ok(v.get().empty(), "check " #type " array size"); \ + } + TEST(array, "[]"); + TEST(object, "{}"); +#undef TEST + + { + picojson::value v; + const char *s = "[1,true,\"hello\"]"; + string err = picojson::parse(v, s, s + strlen(s)); + ok(err.empty(), "array no error"); + ok(v.is(), "array check type"); + is(v.get().size(), size_t(3), "check array size"); + ok(v.contains(0), "check contains array[0]"); + ok(v.get(0).is(), "check array[0] type"); + is(v.get(0).get(), 1.0, "check array[0] value"); + ok(v.contains(1), "check contains array[1]"); + ok(v.get(1).is(), "check array[1] type"); + ok(v.get(1).get(), "check array[1] value"); + ok(v.contains(2), "check contains array[2]"); + ok(v.get(2).is(), "check array[2] type"); + is(v.get(2).get(), string("hello"), "check array[2] value"); + ok(!v.contains(3), "check not contains array[3]"); + } + + { + picojson::value v; + const char *s = "{ \"a\": true }"; + string err = picojson::parse(v, s, s + strlen(s)); + ok(err.empty(), "object no error"); + ok(v.is(), "object check type"); + is(v.get().size(), size_t(1), "check object size"); + ok(v.contains("a"), "check contains property"); + ok(v.get("a").is(), "check bool property exists"); + is(v.get("a").get(), true, "check bool property value"); + is(v.serialize(), string("{\"a\":true}"), "serialize object"); + ok(!v.contains("z"), "check not contains property"); + } + +#define TEST(json, msg) do { \ + picojson::value v; \ + const char *s = json; \ + string err = picojson::parse(v, s, s + strlen(s)); \ + is(err, string("syntax error at line " msg), msg); \ + } while (0) + TEST("falsoa", "1 near: oa"); + TEST("{]", "1 near: ]"); + TEST("\n\bbell", "2 near: bell"); + TEST("\"abc\nd\"", "1 near: "); +#undef TEST + + { + picojson::value v1, v2; + const char *s; + string err; + s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }"; + err = picojson::parse(v1, s, s + strlen(s)); + s = "{ \"d\": 2.0, \"b\": true, \"a\": [1,2,\"three\"] }"; + err = picojson::parse(v2, s, s + strlen(s)); + ok((v1 == v2), "check == operator in deep comparison"); + } + + { + picojson::value v1, v2; + const char *s; + string err; + s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }"; + err = picojson::parse(v1, s, s + strlen(s)); + s = "{ \"d\": 2.0, \"a\": [1,\"three\"], \"b\": true }"; + err = picojson::parse(v2, s, s + strlen(s)); + ok((v1 != v2), "check != operator for array in deep comparison"); + } + + { + picojson::value v1, v2; + const char *s; + string err; + s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }"; + err = picojson::parse(v1, s, s + strlen(s)); + s = "{ \"d\": 2.0, \"a\": [1,2,\"three\"], \"b\": false }"; + err = picojson::parse(v2, s, s + strlen(s)); + ok((v1 != v2), "check != operator for object in deep comparison"); + } + + { + picojson::value v1, v2; + const char *s; + string err; + s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }"; + err = picojson::parse(v1, s, s + strlen(s)); + picojson::object& o = v1.get(); + o.erase("b"); + picojson::array& a = o["a"].get(); + picojson::array::iterator i; + i = std::remove(a.begin(), a.end(), picojson::value(std::string("three"))); + a.erase(i, a.end()); + s = "{ \"a\": [1,2], \"d\": 2 }"; + err = picojson::parse(v2, s, s + strlen(s)); + ok((v1 == v2), "check erase()"); + } + + ok(picojson::value(3.0).serialize() == "3", + "integral number should be serialized as a integer"); + + { + const char* s = "{ \"a\": [1,2], \"d\": 2 }"; + picojson::null_parse_context ctx; + string err; + picojson::_parse(ctx, s, s + strlen(s), &err); + ok(err.empty(), "null_parse_context"); + } + + { + picojson::value v1, v2; + v1 = picojson::value(true); + swap(v1, v2); + ok(v1.is(), "swap (null)"); + ok(v2.get() == true, "swap (bool)"); + + v1 = picojson::value("a"); + v2 = picojson::value(1.0); + swap(v1, v2); + ok(v1.get() == 1.0, "swap (dobule)"); + ok(v2.get() == "a", "swap (string)"); + + v1 = picojson::value(picojson::object()); + v2 = picojson::value(picojson::array()); + swap(v1, v2); + ok(v1.is(), "swap (array)"); + ok(v2.is(), "swap (object)"); + } + + return success ? 0 : 1; +} + +#endif diff --git a/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/sqlite3_mod_vtable.cc b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/sqlite3_mod_vtable.cc new file mode 100644 index 0000000..5bd4e66 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/mod_vtable/sqlite3_mod_vtable.cc @@ -0,0 +1,238 @@ +#include +#include +#include +#include +#include +#include "picojson.h" + +#ifdef _WIN32 +# define EXPORT __declspec(dllexport) +#else +# define EXPORT +#endif + +SQLITE_EXTENSION_INIT1; + +typedef struct { + char* data; // response data from server + size_t size; // response size of data +} MEMFILE; + +MEMFILE* +memfopen() { + MEMFILE* mf = (MEMFILE*) malloc(sizeof(MEMFILE)); + if (mf) { + mf->data = NULL; + mf->size = 0; + } + return mf; +} + +void +memfclose(MEMFILE* mf) { + if (mf->data) free(mf->data); + free(mf); +} + +size_t +memfwrite(char* ptr, size_t size, size_t nmemb, void* stream) { + MEMFILE* mf = (MEMFILE*) stream; + int block = size * nmemb; + if (!mf) return block; // through + if (!mf->data) + mf->data = (char*) malloc(block); + else + mf->data = (char*) realloc(mf->data, mf->size + block); + if (mf->data) { + memcpy(mf->data + mf->size, ptr, block); + mf->size += block; + } + return block; +} + +char* +memfstrdup(MEMFILE* mf) { + char* buf; + if (mf->size == 0) return NULL; + buf = (char*) malloc(mf->size + 1); + memcpy(buf, mf->data, mf->size); + buf[mf->size] = 0; + return buf; +} + +static int +my_connect(sqlite3 *db, void *pAux, int argc, const char * const *argv, sqlite3_vtab **ppVTab, char **c) { + std::stringstream ss; + ss << "CREATE TABLE " << argv[0] + << "(id int, full_name text, description text, html_url text)"; + int rc = sqlite3_declare_vtab(db, ss.str().c_str()); + *ppVTab = (sqlite3_vtab *) sqlite3_malloc(sizeof(sqlite3_vtab)); + memset(*ppVTab, 0, sizeof(sqlite3_vtab)); + return rc; +} + +static int +my_create(sqlite3 *db, void *pAux, int argc, const char * const * argv, sqlite3_vtab **ppVTab, char **c) { + return my_connect(db, pAux, argc, argv, ppVTab, c); +} + +static int my_disconnect(sqlite3_vtab *pVTab) { + sqlite3_free(pVTab); + return SQLITE_OK; +} + +static int +my_destroy(sqlite3_vtab *pVTab) { + sqlite3_free(pVTab); + return SQLITE_OK; +} + +typedef struct { + sqlite3_vtab_cursor base; + int index; + picojson::value* rows; +} cursor; + +static int +my_open(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor) { + MEMFILE* mf; + CURL* curl; + char* json; + CURLcode res = CURLE_OK; + char error[CURL_ERROR_SIZE] = {0}; + char* cert_file = getenv("SSL_CERT_FILE"); + + mf = memfopen(); + curl = curl_easy_init(); + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 1); + curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 2); + curl_easy_setopt(curl, CURLOPT_USERAGENT, "curl/7.29.0"); + curl_easy_setopt(curl, CURLOPT_URL, "https://api.github.com/repositories"); + if (cert_file) + curl_easy_setopt(curl, CURLOPT_CAINFO, cert_file); + curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); + curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, error); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, mf); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, memfwrite); + res = curl_easy_perform(curl); + curl_easy_cleanup(curl); + if (res != CURLE_OK) { + std::cerr << error << std::endl; + return SQLITE_FAIL; + } + + picojson::value* v = new picojson::value; + std::string err; + picojson::parse(*v, mf->data, mf->data + mf->size, &err); + memfclose(mf); + + if (!err.empty()) { + delete v; + std::cerr << err << std::endl; + return SQLITE_FAIL; + } + + cursor *c = (cursor *)sqlite3_malloc(sizeof(cursor)); + c->rows = v; + c->index = 0; + *ppCursor = &c->base; + return SQLITE_OK; +} + +static int +my_close(cursor *c) { + delete c->rows; + sqlite3_free(c); + return SQLITE_OK; +} + +static int +my_filter(cursor *c, int idxNum, const char *idxStr, int argc, sqlite3_value **argv) { + c->index = 0; + return SQLITE_OK; +} + +static int +my_next(cursor *c) { + c->index++; + return SQLITE_OK; +} + +static int +my_eof(cursor *c) { + return c->index >= c->rows->get().size() ? 1 : 0; +} + +static int +my_column(cursor *c, sqlite3_context *ctxt, int i) { + picojson::value v = c->rows->get()[c->index]; + picojson::object row = v.get(); + const char* p = NULL; + switch (i) { + case 0: + p = row["id"].to_str().c_str(); + break; + case 1: + p = row["full_name"].to_str().c_str(); + break; + case 2: + p = row["description"].to_str().c_str(); + break; + case 3: + p = row["html_url"].to_str().c_str(); + break; + } + sqlite3_result_text(ctxt, strdup(p), strlen(p), free); + return SQLITE_OK; +} + +static int +my_rowid(cursor *c, sqlite3_int64 *pRowid) { + *pRowid = c->index; + return SQLITE_OK; +} + +static int +my_bestindex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo) { + return SQLITE_OK; +} + +static const sqlite3_module module = { + 0, + my_create, + my_connect, + my_bestindex, + my_disconnect, + my_destroy, + my_open, + (int (*)(sqlite3_vtab_cursor *)) my_close, + (int (*)(sqlite3_vtab_cursor *, int, char const *, int, sqlite3_value **)) my_filter, + (int (*)(sqlite3_vtab_cursor *)) my_next, + (int (*)(sqlite3_vtab_cursor *)) my_eof, + (int (*)(sqlite3_vtab_cursor *, sqlite3_context *, int)) my_column, + (int (*)(sqlite3_vtab_cursor *, sqlite3_int64 *)) my_rowid, + NULL, // my_update + NULL, // my_begin + NULL, // my_sync + NULL, // my_commit + NULL, // my_rollback + NULL, // my_findfunction + NULL, // my_rename +}; + +static void +destructor(void *arg) { + return; +} + + +extern "C" { + +EXPORT int +sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) { + SQLITE_EXTENSION_INIT2(api); + sqlite3_create_module_v2(db, "github", &module, NULL, destructor); + return 0; +} + +} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/simple/simple.go b/vendor/github.com/mattn/go-sqlite3/_example/simple/simple.go new file mode 100644 index 0000000..261ed4d --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/simple/simple.go @@ -0,0 +1,106 @@ +package main + +import ( + "database/sql" + "fmt" + _ "github.com/mattn/go-sqlite3" + "log" + "os" +) + +func main() { + os.Remove("./foo.db") + + db, err := sql.Open("sqlite3", "./foo.db") + if err != nil { + log.Fatal(err) + } + defer db.Close() + + sqlStmt := ` + create table foo (id integer not null primary key, name text); + delete from foo; + ` + _, err = db.Exec(sqlStmt) + if err != nil { + log.Printf("%q: %s\n", err, sqlStmt) + return + } + + tx, err := db.Begin() + if err != nil { + log.Fatal(err) + } + stmt, err := tx.Prepare("insert into foo(id, name) values(?, ?)") + if err != nil { + log.Fatal(err) + } + defer stmt.Close() + for i := 0; i < 100; i++ { + _, err = stmt.Exec(i, fmt.Sprintf("こんにちわ世界%03d", i)) + if err != nil { + log.Fatal(err) + } + } + tx.Commit() + + rows, err := db.Query("select id, name from foo") + if err != nil { + log.Fatal(err) + } + defer rows.Close() + for rows.Next() { + var id int + var name string + err = rows.Scan(&id, &name) + if err != nil { + log.Fatal(err) + } + fmt.Println(id, name) + } + err = rows.Err() + if err != nil { + log.Fatal(err) + } + + stmt, err = db.Prepare("select name from foo where id = ?") + if err != nil { + log.Fatal(err) + } + defer stmt.Close() + var name string + err = stmt.QueryRow("3").Scan(&name) + if err != nil { + log.Fatal(err) + } + fmt.Println(name) + + _, err = db.Exec("delete from foo") + if err != nil { + log.Fatal(err) + } + + _, err = db.Exec("insert into foo(id, name) values(1, 'foo'), (2, 'bar'), (3, 'baz')") + if err != nil { + log.Fatal(err) + } + + rows, err = db.Query("select id, name from foo") + if err != nil { + log.Fatal(err) + } + defer rows.Close() + for rows.Next() { + var id int + var name string + err = rows.Scan(&id, &name) + if err != nil { + log.Fatal(err) + } + fmt.Println(id, name) + } + err = rows.Err() + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/trace/main.go b/vendor/github.com/mattn/go-sqlite3/_example/trace/main.go new file mode 100644 index 0000000..9f83ee1 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/trace/main.go @@ -0,0 +1,264 @@ +package main + +import ( + "database/sql" + "fmt" + "log" + "os" + + sqlite3 "github.com/mattn/go-sqlite3" +) + +func traceCallback(info sqlite3.TraceInfo) int { + // Not very readable but may be useful; uncomment next line in case of doubt: + //fmt.Printf("Trace: %#v\n", info) + + var dbErrText string + if info.DBError.Code != 0 || info.DBError.ExtendedCode != 0 { + dbErrText = fmt.Sprintf("; DB error: %#v", info.DBError) + } else { + dbErrText = "." + } + + // Show the Statement-or-Trigger text in curly braces ('{', '}') + // since from the *paired* ASCII characters they are + // the least used in SQL syntax, therefore better visual delimiters. + // Maybe show 'ExpandedSQL' the same way as 'StmtOrTrigger'. + // + // A known use of curly braces (outside strings) is + // for ODBC escape sequences. Not likely to appear here. + // + // Template languages, etc. don't matter, we should see their *result* + // at *this* level. + // Strange curly braces in SQL code that reached the database driver + // suggest that there is a bug in the application. + // The braces are likely to be either template syntax or + // a programming language's string interpolation syntax. + + var expandedText string + if info.ExpandedSQL != "" { + if info.ExpandedSQL == info.StmtOrTrigger { + expandedText = " = exp" + } else { + expandedText = fmt.Sprintf(" expanded {%q}", info.ExpandedSQL) + } + } else { + expandedText = "" + } + + // SQLite docs as of September 6, 2016: Tracing and Profiling Functions + // https://www.sqlite.org/c3ref/profile.html + // + // The profile callback time is in units of nanoseconds, however + // the current implementation is only capable of millisecond resolution + // so the six least significant digits in the time are meaningless. + // Future versions of SQLite might provide greater resolution on the profiler callback. + + var runTimeText string + if info.RunTimeNanosec == 0 { + if info.EventCode == sqlite3.TraceProfile { + //runTimeText = "; no time" // seems confusing + runTimeText = "; time 0" // no measurement unit + } else { + //runTimeText = "; no time" // seems useless and confusing + } + } else { + const nanosPerMillisec = 1000000 + if info.RunTimeNanosec%nanosPerMillisec == 0 { + runTimeText = fmt.Sprintf("; time %d ms", info.RunTimeNanosec/nanosPerMillisec) + } else { + // unexpected: better than millisecond resolution + runTimeText = fmt.Sprintf("; time %d ns!!!", info.RunTimeNanosec) + } + } + + var modeText string + if info.AutoCommit { + modeText = "-AC-" + } else { + modeText = "+Tx+" + } + + fmt.Printf("Trace: ev %d %s conn 0x%x, stmt 0x%x {%q}%s%s%s\n", + info.EventCode, modeText, info.ConnHandle, info.StmtHandle, + info.StmtOrTrigger, expandedText, + runTimeText, + dbErrText) + return 0 +} + +func main() { + eventMask := sqlite3.TraceStmt | sqlite3.TraceProfile | sqlite3.TraceRow | sqlite3.TraceClose + + sql.Register("sqlite3_tracing", + &sqlite3.SQLiteDriver{ + ConnectHook: func(conn *sqlite3.SQLiteConn) error { + err := conn.SetTrace(&sqlite3.TraceConfig{ + Callback: traceCallback, + EventMask: uint(eventMask), + WantExpandedSQL: true, + }) + return err + }, + }) + + os.Exit(dbMain()) +} + +// Harder to do DB work in main(). +// It's better with a separate function because +// 'defer' and 'os.Exit' don't go well together. +// +// DO NOT use 'log.Fatal...' below: remember that it's equivalent to +// Print() followed by a call to os.Exit(1) --- and +// we want to avoid Exit() so 'defer' can do cleanup. +// Use 'log.Panic...' instead. + +func dbMain() int { + db, err := sql.Open("sqlite3_tracing", ":memory:") + if err != nil { + fmt.Printf("Failed to open database: %#+v\n", err) + return 1 + } + defer db.Close() + + err = db.Ping() + if err != nil { + log.Panic(err) + } + + dbSetup(db) + + dbDoInsert(db) + dbDoInsertPrepared(db) + dbDoSelect(db) + dbDoSelectPrepared(db) + + return 0 +} + +// 'DDL' stands for "Data Definition Language": + +// Note: "INTEGER PRIMARY KEY NOT NULL AUTOINCREMENT" causes the error +// 'near "AUTOINCREMENT": syntax error'; without "NOT NULL" it works. +const tableDDL = `CREATE TABLE t1 ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + note VARCHAR NOT NULL +)` + +// 'DML' stands for "Data Manipulation Language": + +const insertDML = "INSERT INTO t1 (note) VALUES (?)" +const selectDML = "SELECT id, note FROM t1 WHERE note LIKE ?" + +const textPrefix = "bla-1234567890-" +const noteTextPattern = "%Prep%" + +const nGenRows = 4 // Number of Rows to Generate (for *each* approach tested) + +func dbSetup(db *sql.DB) { + var err error + + _, err = db.Exec("DROP TABLE IF EXISTS t1") + if err != nil { + log.Panic(err) + } + _, err = db.Exec(tableDDL) + if err != nil { + log.Panic(err) + } +} + +func dbDoInsert(db *sql.DB) { + const Descr = "DB-Exec" + for i := 0; i < nGenRows; i++ { + result, err := db.Exec(insertDML, textPrefix+Descr) + if err != nil { + log.Panic(err) + } + + resultDoCheck(result, Descr, i) + } +} + +func dbDoInsertPrepared(db *sql.DB) { + const Descr = "DB-Prepare" + + stmt, err := db.Prepare(insertDML) + if err != nil { + log.Panic(err) + } + defer stmt.Close() + + for i := 0; i < nGenRows; i++ { + result, err := stmt.Exec(textPrefix + Descr) + if err != nil { + log.Panic(err) + } + + resultDoCheck(result, Descr, i) + } +} + +func resultDoCheck(result sql.Result, callerDescr string, callIndex int) { + lastID, err := result.LastInsertId() + if err != nil { + log.Panic(err) + } + nAffected, err := result.RowsAffected() + if err != nil { + log.Panic(err) + } + + log.Printf("Exec result for %s (%d): ID = %d, affected = %d\n", callerDescr, callIndex, lastID, nAffected) +} + +func dbDoSelect(db *sql.DB) { + const Descr = "DB-Query" + + rows, err := db.Query(selectDML, noteTextPattern) + if err != nil { + log.Panic(err) + } + defer rows.Close() + + rowsDoFetch(rows, Descr) +} + +func dbDoSelectPrepared(db *sql.DB) { + const Descr = "DB-Prepare" + + stmt, err := db.Prepare(selectDML) + if err != nil { + log.Panic(err) + } + defer stmt.Close() + + rows, err := stmt.Query(noteTextPattern) + if err != nil { + log.Panic(err) + } + defer rows.Close() + + rowsDoFetch(rows, Descr) +} + +func rowsDoFetch(rows *sql.Rows, callerDescr string) { + var nRows int + var id int64 + var note string + + for rows.Next() { + err := rows.Scan(&id, ¬e) + if err != nil { + log.Panic(err) + } + log.Printf("Row for %s (%d): id=%d, note=%q\n", + callerDescr, nRows, id, note) + nRows++ + } + if err := rows.Err(); err != nil { + log.Panic(err) + } + log.Printf("Total %d rows for %s.\n", nRows, callerDescr) +} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/vtable/main.go b/vendor/github.com/mattn/go-sqlite3/_example/vtable/main.go new file mode 100644 index 0000000..aad8dda --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/vtable/main.go @@ -0,0 +1,38 @@ +package main + +import ( + "database/sql" + "fmt" + "log" + + "github.com/mattn/go-sqlite3" +) + +func main() { + sql.Register("sqlite3_with_extensions", &sqlite3.SQLiteDriver{ + ConnectHook: func(conn *sqlite3.SQLiteConn) error { + return conn.CreateModule("github", &githubModule{}) + }, + }) + db, err := sql.Open("sqlite3_with_extensions", ":memory:") + if err != nil { + log.Fatal(err) + } + defer db.Close() + + _, err = db.Exec("create virtual table repo using github(id, full_name, description, html_url)") + if err != nil { + log.Fatal(err) + } + + rows, err := db.Query("select id, full_name, description, html_url from repo") + if err != nil { + log.Fatal(err) + } + defer rows.Close() + for rows.Next() { + var id, fullName, description, htmlURL string + rows.Scan(&id, &fullName, &description, &htmlURL) + fmt.Printf("%s: %s\n\t%s\n\t%s\n\n", id, fullName, description, htmlURL) + } +} diff --git a/vendor/github.com/mattn/go-sqlite3/_example/vtable/vtable.go b/vendor/github.com/mattn/go-sqlite3/_example/vtable/vtable.go new file mode 100644 index 0000000..1d6d824 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/_example/vtable/vtable.go @@ -0,0 +1,111 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + + "github.com/mattn/go-sqlite3" +) + +type githubRepo struct { + ID int `json:"id"` + FullName string `json:"full_name"` + Description string `json:"description"` + HTMLURL string `json:"html_url"` +} + +type githubModule struct { +} + +func (m *githubModule) Create(c *sqlite3.SQLiteConn, args []string) (sqlite3.VTab, error) { + err := c.DeclareVTab(fmt.Sprintf(` + CREATE TABLE %s ( + id INT, + full_name TEXT, + description TEXT, + html_url TEXT + )`, args[0])) + if err != nil { + return nil, err + } + return &ghRepoTable{}, nil +} + +func (m *githubModule) Connect(c *sqlite3.SQLiteConn, args []string) (sqlite3.VTab, error) { + return m.Create(c, args) +} + +func (m *githubModule) DestroyModule() {} + +type ghRepoTable struct { + repos []githubRepo +} + +func (v *ghRepoTable) Open() (sqlite3.VTabCursor, error) { + resp, err := http.Get("https://api.github.com/repositories") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var repos []githubRepo + if err := json.Unmarshal(body, &repos); err != nil { + return nil, err + } + return &ghRepoCursor{0, repos}, nil +} + +func (v *ghRepoTable) BestIndex(cst []sqlite3.InfoConstraint, ob []sqlite3.InfoOrderBy) (*sqlite3.IndexResult, error) { + return &sqlite3.IndexResult{}, nil +} + +func (v *ghRepoTable) Disconnect() error { return nil } +func (v *ghRepoTable) Destroy() error { return nil } + +type ghRepoCursor struct { + index int + repos []githubRepo +} + +func (vc *ghRepoCursor) Column(c *sqlite3.SQLiteContext, col int) error { + switch col { + case 0: + c.ResultInt(vc.repos[vc.index].ID) + case 1: + c.ResultText(vc.repos[vc.index].FullName) + case 2: + c.ResultText(vc.repos[vc.index].Description) + case 3: + c.ResultText(vc.repos[vc.index].HTMLURL) + } + return nil +} + +func (vc *ghRepoCursor) Filter(idxNum int, idxStr string, vals []interface{}) error { + vc.index = 0 + return nil +} + +func (vc *ghRepoCursor) Next() error { + vc.index++ + return nil +} + +func (vc *ghRepoCursor) EOF() bool { + return vc.index >= len(vc.repos) +} + +func (vc *ghRepoCursor) Rowid() (int64, error) { + return int64(vc.index), nil +} + +func (vc *ghRepoCursor) Close() error { + return nil +} diff --git a/vendor/github.com/mattn/go-sqlite3/backup.go b/vendor/github.com/mattn/go-sqlite3/backup.go new file mode 100644 index 0000000..5ab3a54 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/backup.go @@ -0,0 +1,85 @@ +// Copyright (C) 2014 Yasuhiro Matsumoto . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package sqlite3 + +/* +#ifndef USE_LIBSQLITE3 +#include +#else +#include +#endif +#include +*/ +import "C" +import ( + "runtime" + "unsafe" +) + +// SQLiteBackup implement interface of Backup. +type SQLiteBackup struct { + b *C.sqlite3_backup +} + +// Backup make backup from src to dest. +func (c *SQLiteConn) Backup(dest string, conn *SQLiteConn, src string) (*SQLiteBackup, error) { + destptr := C.CString(dest) + defer C.free(unsafe.Pointer(destptr)) + srcptr := C.CString(src) + defer C.free(unsafe.Pointer(srcptr)) + + if b := C.sqlite3_backup_init(c.db, destptr, conn.db, srcptr); b != nil { + bb := &SQLiteBackup{b: b} + runtime.SetFinalizer(bb, (*SQLiteBackup).Finish) + return bb, nil + } + return nil, c.lastError() +} + +// Step to backs up for one step. Calls the underlying `sqlite3_backup_step` +// function. This function returns a boolean indicating if the backup is done +// and an error signalling any other error. Done is returned if the underlying +// C function returns SQLITE_DONE (Code 101) +func (b *SQLiteBackup) Step(p int) (bool, error) { + ret := C.sqlite3_backup_step(b.b, C.int(p)) + if ret == C.SQLITE_DONE { + return true, nil + } else if ret != 0 && ret != C.SQLITE_LOCKED && ret != C.SQLITE_BUSY { + return false, Error{Code: ErrNo(ret)} + } + return false, nil +} + +// Remaining return whether have the rest for backup. +func (b *SQLiteBackup) Remaining() int { + return int(C.sqlite3_backup_remaining(b.b)) +} + +// PageCount return count of pages. +func (b *SQLiteBackup) PageCount() int { + return int(C.sqlite3_backup_pagecount(b.b)) +} + +// Finish close backup. +func (b *SQLiteBackup) Finish() error { + return b.Close() +} + +// Close close backup. +func (b *SQLiteBackup) Close() error { + ret := C.sqlite3_backup_finish(b.b) + + // sqlite3_backup_finish() never fails, it just returns the + // error code from previous operations, so clean up before + // checking and returning an error + b.b = nil + runtime.SetFinalizer(b, nil) + + if ret != 0 { + return Error{Code: ErrNo(ret)} + } + return nil +} diff --git a/vendor/github.com/mattn/go-sqlite3/backup_test.go b/vendor/github.com/mattn/go-sqlite3/backup_test.go new file mode 100644 index 0000000..73c0a4b --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/backup_test.go @@ -0,0 +1,290 @@ +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package sqlite3 + +import ( + "database/sql" + "fmt" + "os" + "testing" + "time" +) + +// The number of rows of test data to create in the source database. +// Can be used to control how many pages are available to be backed up. +const testRowCount = 100 + +// The maximum number of seconds after which the page-by-page backup is considered to have taken too long. +const usePagePerStepsTimeoutSeconds = 30 + +// Test the backup functionality. +func testBackup(t *testing.T, testRowCount int, usePerPageSteps bool) { + // This function will be called multiple times. + // It uses sql.Register(), which requires the name parameter value to be unique. + // There does not currently appear to be a way to unregister a registered driver, however. + // So generate a database driver name that will likely be unique. + var driverName = fmt.Sprintf("sqlite3_testBackup_%v_%v_%v", testRowCount, usePerPageSteps, time.Now().UnixNano()) + + // The driver's connection will be needed in order to perform the backup. + driverConns := []*SQLiteConn{} + sql.Register(driverName, &SQLiteDriver{ + ConnectHook: func(conn *SQLiteConn) error { + driverConns = append(driverConns, conn) + return nil + }, + }) + + // Connect to the source database. + srcTempFilename := TempFilename(t) + defer os.Remove(srcTempFilename) + srcDb, err := sql.Open(driverName, srcTempFilename) + if err != nil { + t.Fatal("Failed to open the source database:", err) + } + defer srcDb.Close() + err = srcDb.Ping() + if err != nil { + t.Fatal("Failed to connect to the source database:", err) + } + + // Connect to the destination database. + destTempFilename := TempFilename(t) + defer os.Remove(destTempFilename) + destDb, err := sql.Open(driverName, destTempFilename) + if err != nil { + t.Fatal("Failed to open the destination database:", err) + } + defer destDb.Close() + err = destDb.Ping() + if err != nil { + t.Fatal("Failed to connect to the destination database:", err) + } + + // Check the driver connections. + if len(driverConns) != 2 { + t.Fatalf("Expected 2 driver connections, but found %v.", len(driverConns)) + } + srcDbDriverConn := driverConns[0] + if srcDbDriverConn == nil { + t.Fatal("The source database driver connection is nil.") + } + destDbDriverConn := driverConns[1] + if destDbDriverConn == nil { + t.Fatal("The destination database driver connection is nil.") + } + + // Generate some test data for the given ID. + var generateTestData = func(id int) string { + return fmt.Sprintf("test-%v", id) + } + + // Populate the source database with a test table containing some test data. + tx, err := srcDb.Begin() + if err != nil { + t.Fatal("Failed to begin a transaction when populating the source database:", err) + } + _, err = srcDb.Exec("CREATE TABLE test (id INTEGER PRIMARY KEY, value TEXT)") + if err != nil { + tx.Rollback() + t.Fatal("Failed to create the source database \"test\" table:", err) + } + for id := 0; id < testRowCount; id++ { + _, err = srcDb.Exec("INSERT INTO test (id, value) VALUES (?, ?)", id, generateTestData(id)) + if err != nil { + tx.Rollback() + t.Fatal("Failed to insert a row into the source database \"test\" table:", err) + } + } + err = tx.Commit() + if err != nil { + t.Fatal("Failed to populate the source database:", err) + } + + // Confirm that the destination database is initially empty. + var destTableCount int + err = destDb.QueryRow("SELECT COUNT(*) FROM sqlite_master WHERE type = 'table'").Scan(&destTableCount) + if err != nil { + t.Fatal("Failed to check the destination table count:", err) + } + if destTableCount != 0 { + t.Fatalf("The destination database is not empty; %v table(s) found.", destTableCount) + } + + // Prepare to perform the backup. + backup, err := destDbDriverConn.Backup("main", srcDbDriverConn, "main") + if err != nil { + t.Fatal("Failed to initialize the backup:", err) + } + + // Allow the initial page count and remaining values to be retrieved. + // According to , the page count and remaining values are "... only updated by sqlite3_backup_step()." + isDone, err := backup.Step(0) + if err != nil { + t.Fatal("Unable to perform an initial 0-page backup step:", err) + } + if isDone { + t.Fatal("Backup is unexpectedly done.") + } + + // Check that the page count and remaining values are reasonable. + initialPageCount := backup.PageCount() + if initialPageCount <= 0 { + t.Fatalf("Unexpected initial page count value: %v", initialPageCount) + } + initialRemaining := backup.Remaining() + if initialRemaining <= 0 { + t.Fatalf("Unexpected initial remaining value: %v", initialRemaining) + } + if initialRemaining != initialPageCount { + t.Fatalf("Initial remaining value differs from the initial page count value; remaining: %v; page count: %v", initialRemaining, initialPageCount) + } + + // Perform the backup. + if usePerPageSteps { + var startTime = time.Now().Unix() + + // Test backing-up using a page-by-page approach. + var latestRemaining = initialRemaining + for { + // Perform the backup step. + isDone, err = backup.Step(1) + if err != nil { + t.Fatal("Failed to perform a backup step:", err) + } + + // The page count should remain unchanged from its initial value. + currentPageCount := backup.PageCount() + if currentPageCount != initialPageCount { + t.Fatalf("Current page count differs from the initial page count; initial page count: %v; current page count: %v", initialPageCount, currentPageCount) + } + + // There should now be one less page remaining. + currentRemaining := backup.Remaining() + expectedRemaining := latestRemaining - 1 + if currentRemaining != expectedRemaining { + t.Fatalf("Unexpected remaining value; expected remaining value: %v; actual remaining value: %v", expectedRemaining, currentRemaining) + } + latestRemaining = currentRemaining + + if isDone { + break + } + + // Limit the runtime of the backup attempt. + if (time.Now().Unix() - startTime) > usePagePerStepsTimeoutSeconds { + t.Fatal("Backup is taking longer than expected.") + } + } + } else { + // Test the copying of all remaining pages. + isDone, err = backup.Step(-1) + if err != nil { + t.Fatal("Failed to perform a backup step:", err) + } + if !isDone { + t.Fatal("Backup is unexpectedly not done.") + } + } + + // Check that the page count and remaining values are reasonable. + finalPageCount := backup.PageCount() + if finalPageCount != initialPageCount { + t.Fatalf("Final page count differs from the initial page count; initial page count: %v; final page count: %v", initialPageCount, finalPageCount) + } + finalRemaining := backup.Remaining() + if finalRemaining != 0 { + t.Fatalf("Unexpected remaining value: %v", finalRemaining) + } + + // Finish the backup. + err = backup.Finish() + if err != nil { + t.Fatal("Failed to finish backup:", err) + } + + // Confirm that the "test" table now exists in the destination database. + var doesTestTableExist bool + err = destDb.QueryRow("SELECT EXISTS (SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = 'test' LIMIT 1) AS test_table_exists").Scan(&doesTestTableExist) + if err != nil { + t.Fatal("Failed to check if the \"test\" table exists in the destination database:", err) + } + if !doesTestTableExist { + t.Fatal("The \"test\" table could not be found in the destination database.") + } + + // Confirm that the number of rows in the destination database's "test" table matches that of the source table. + var actualTestTableRowCount int + err = destDb.QueryRow("SELECT COUNT(*) FROM test").Scan(&actualTestTableRowCount) + if err != nil { + t.Fatal("Failed to determine the rowcount of the \"test\" table in the destination database:", err) + } + if testRowCount != actualTestTableRowCount { + t.Fatalf("Unexpected destination \"test\" table row count; expected: %v; found: %v", testRowCount, actualTestTableRowCount) + } + + // Check each of the rows in the destination database. + for id := 0; id < testRowCount; id++ { + var checkedValue string + err = destDb.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&checkedValue) + if err != nil { + t.Fatal("Failed to query the \"test\" table in the destination database:", err) + } + + var expectedValue = generateTestData(id) + if checkedValue != expectedValue { + t.Fatalf("Unexpected value in the \"test\" table in the destination database; expected value: %v; actual value: %v", expectedValue, checkedValue) + } + } +} + +func TestBackupStepByStep(t *testing.T) { + testBackup(t, testRowCount, true) +} + +func TestBackupAllRemainingPages(t *testing.T) { + testBackup(t, testRowCount, false) +} + +// Test the error reporting when preparing to perform a backup. +func TestBackupError(t *testing.T) { + const driverName = "sqlite3_TestBackupError" + + // The driver's connection will be needed in order to perform the backup. + var dbDriverConn *SQLiteConn + sql.Register(driverName, &SQLiteDriver{ + ConnectHook: func(conn *SQLiteConn) error { + dbDriverConn = conn + return nil + }, + }) + + // Connect to the database. + dbTempFilename := TempFilename(t) + defer os.Remove(dbTempFilename) + db, err := sql.Open(driverName, dbTempFilename) + if err != nil { + t.Fatal("Failed to open the database:", err) + } + defer db.Close() + db.Ping() + + // Need the driver connection in order to perform the backup. + if dbDriverConn == nil { + t.Fatal("Failed to get the driver connection.") + } + + // Prepare to perform the backup. + // Intentionally using the same connection for both the source and destination databases, to trigger an error result. + backup, err := dbDriverConn.Backup("main", dbDriverConn, "main") + if err == nil { + t.Fatal("Failed to get the expected error result.") + } + const expectedError = "source and destination must be distinct" + if err.Error() != expectedError { + t.Fatalf("Unexpected error message; expected value: \"%v\"; actual value: \"%v\"", expectedError, err.Error()) + } + if backup != nil { + t.Fatal("Failed to get the expected nil backup result.") + } +} diff --git a/vendor/github.com/mattn/go-sqlite3/callback.go b/vendor/github.com/mattn/go-sqlite3/callback.go new file mode 100644 index 0000000..29ece3d --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/callback.go @@ -0,0 +1,364 @@ +// Copyright (C) 2014 Yasuhiro Matsumoto . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package sqlite3 + +// You can't export a Go function to C and have definitions in the C +// preamble in the same file, so we have to have callbackTrampoline in +// its own file. Because we need a separate file anyway, the support +// code for SQLite custom functions is in here. + +/* +#ifndef USE_LIBSQLITE3 +#include +#else +#include +#endif +#include + +void _sqlite3_result_text(sqlite3_context* ctx, const char* s); +void _sqlite3_result_blob(sqlite3_context* ctx, const void* b, int l); +*/ +import "C" + +import ( + "errors" + "fmt" + "math" + "reflect" + "sync" + "unsafe" +) + +//export callbackTrampoline +func callbackTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) { + args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc] + fi := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*functionInfo) + fi.Call(ctx, args) +} + +//export stepTrampoline +func stepTrampoline(ctx *C.sqlite3_context, argc C.int, argv **C.sqlite3_value) { + args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:int(argc):int(argc)] + ai := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*aggInfo) + ai.Step(ctx, args) +} + +//export doneTrampoline +func doneTrampoline(ctx *C.sqlite3_context) { + handle := uintptr(C.sqlite3_user_data(ctx)) + ai := lookupHandle(handle).(*aggInfo) + ai.Done(ctx) +} + +//export compareTrampoline +func compareTrampoline(handlePtr uintptr, la C.int, a *C.char, lb C.int, b *C.char) C.int { + cmp := lookupHandle(handlePtr).(func(string, string) int) + return C.int(cmp(C.GoStringN(a, la), C.GoStringN(b, lb))) +} + +//export commitHookTrampoline +func commitHookTrampoline(handle uintptr) int { + callback := lookupHandle(handle).(func() int) + return callback() +} + +//export rollbackHookTrampoline +func rollbackHookTrampoline(handle uintptr) { + callback := lookupHandle(handle).(func()) + callback() +} + +//export updateHookTrampoline +func updateHookTrampoline(handle uintptr, op int, db *C.char, table *C.char, rowid int64) { + callback := lookupHandle(handle).(func(int, string, string, int64)) + callback(op, C.GoString(db), C.GoString(table), rowid) +} + +// Use handles to avoid passing Go pointers to C. + +type handleVal struct { + db *SQLiteConn + val interface{} +} + +var handleLock sync.Mutex +var handleVals = make(map[uintptr]handleVal) +var handleIndex uintptr = 100 + +func newHandle(db *SQLiteConn, v interface{}) uintptr { + handleLock.Lock() + defer handleLock.Unlock() + i := handleIndex + handleIndex++ + handleVals[i] = handleVal{db, v} + return i +} + +func lookupHandle(handle uintptr) interface{} { + handleLock.Lock() + defer handleLock.Unlock() + r, ok := handleVals[handle] + if !ok { + if handle >= 100 && handle < handleIndex { + panic("deleted handle") + } else { + panic("invalid handle") + } + } + return r.val +} + +func deleteHandles(db *SQLiteConn) { + handleLock.Lock() + defer handleLock.Unlock() + for handle, val := range handleVals { + if val.db == db { + delete(handleVals, handle) + } + } +} + +// This is only here so that tests can refer to it. +type callbackArgRaw C.sqlite3_value + +type callbackArgConverter func(*C.sqlite3_value) (reflect.Value, error) + +type callbackArgCast struct { + f callbackArgConverter + typ reflect.Type +} + +func (c callbackArgCast) Run(v *C.sqlite3_value) (reflect.Value, error) { + val, err := c.f(v) + if err != nil { + return reflect.Value{}, err + } + if !val.Type().ConvertibleTo(c.typ) { + return reflect.Value{}, fmt.Errorf("cannot convert %s to %s", val.Type(), c.typ) + } + return val.Convert(c.typ), nil +} + +func callbackArgInt64(v *C.sqlite3_value) (reflect.Value, error) { + if C.sqlite3_value_type(v) != C.SQLITE_INTEGER { + return reflect.Value{}, fmt.Errorf("argument must be an INTEGER") + } + return reflect.ValueOf(int64(C.sqlite3_value_int64(v))), nil +} + +func callbackArgBool(v *C.sqlite3_value) (reflect.Value, error) { + if C.sqlite3_value_type(v) != C.SQLITE_INTEGER { + return reflect.Value{}, fmt.Errorf("argument must be an INTEGER") + } + i := int64(C.sqlite3_value_int64(v)) + val := false + if i != 0 { + val = true + } + return reflect.ValueOf(val), nil +} + +func callbackArgFloat64(v *C.sqlite3_value) (reflect.Value, error) { + if C.sqlite3_value_type(v) != C.SQLITE_FLOAT { + return reflect.Value{}, fmt.Errorf("argument must be a FLOAT") + } + return reflect.ValueOf(float64(C.sqlite3_value_double(v))), nil +} + +func callbackArgBytes(v *C.sqlite3_value) (reflect.Value, error) { + switch C.sqlite3_value_type(v) { + case C.SQLITE_BLOB: + l := C.sqlite3_value_bytes(v) + p := C.sqlite3_value_blob(v) + return reflect.ValueOf(C.GoBytes(p, l)), nil + case C.SQLITE_TEXT: + l := C.sqlite3_value_bytes(v) + c := unsafe.Pointer(C.sqlite3_value_text(v)) + return reflect.ValueOf(C.GoBytes(c, l)), nil + default: + return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT") + } +} + +func callbackArgString(v *C.sqlite3_value) (reflect.Value, error) { + switch C.sqlite3_value_type(v) { + case C.SQLITE_BLOB: + l := C.sqlite3_value_bytes(v) + p := (*C.char)(C.sqlite3_value_blob(v)) + return reflect.ValueOf(C.GoStringN(p, l)), nil + case C.SQLITE_TEXT: + c := (*C.char)(unsafe.Pointer(C.sqlite3_value_text(v))) + return reflect.ValueOf(C.GoString(c)), nil + default: + return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT") + } +} + +func callbackArgGeneric(v *C.sqlite3_value) (reflect.Value, error) { + switch C.sqlite3_value_type(v) { + case C.SQLITE_INTEGER: + return callbackArgInt64(v) + case C.SQLITE_FLOAT: + return callbackArgFloat64(v) + case C.SQLITE_TEXT: + return callbackArgString(v) + case C.SQLITE_BLOB: + return callbackArgBytes(v) + case C.SQLITE_NULL: + // Interpret NULL as a nil byte slice. + var ret []byte + return reflect.ValueOf(ret), nil + default: + panic("unreachable") + } +} + +func callbackArg(typ reflect.Type) (callbackArgConverter, error) { + switch typ.Kind() { + case reflect.Interface: + if typ.NumMethod() != 0 { + return nil, errors.New("the only supported interface type is interface{}") + } + return callbackArgGeneric, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + return nil, errors.New("the only supported slice type is []byte") + } + return callbackArgBytes, nil + case reflect.String: + return callbackArgString, nil + case reflect.Bool: + return callbackArgBool, nil + case reflect.Int64: + return callbackArgInt64, nil + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint: + c := callbackArgCast{callbackArgInt64, typ} + return c.Run, nil + case reflect.Float64: + return callbackArgFloat64, nil + case reflect.Float32: + c := callbackArgCast{callbackArgFloat64, typ} + return c.Run, nil + default: + return nil, fmt.Errorf("don't know how to convert to %s", typ) + } +} + +func callbackConvertArgs(argv []*C.sqlite3_value, converters []callbackArgConverter, variadic callbackArgConverter) ([]reflect.Value, error) { + var args []reflect.Value + + if len(argv) < len(converters) { + return nil, fmt.Errorf("function requires at least %d arguments", len(converters)) + } + + for i, arg := range argv[:len(converters)] { + v, err := converters[i](arg) + if err != nil { + return nil, err + } + args = append(args, v) + } + + if variadic != nil { + for _, arg := range argv[len(converters):] { + v, err := variadic(arg) + if err != nil { + return nil, err + } + args = append(args, v) + } + } + return args, nil +} + +type callbackRetConverter func(*C.sqlite3_context, reflect.Value) error + +func callbackRetInteger(ctx *C.sqlite3_context, v reflect.Value) error { + switch v.Type().Kind() { + case reflect.Int64: + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint: + v = v.Convert(reflect.TypeOf(int64(0))) + case reflect.Bool: + b := v.Interface().(bool) + if b { + v = reflect.ValueOf(int64(1)) + } else { + v = reflect.ValueOf(int64(0)) + } + default: + return fmt.Errorf("cannot convert %s to INTEGER", v.Type()) + } + + C.sqlite3_result_int64(ctx, C.sqlite3_int64(v.Interface().(int64))) + return nil +} + +func callbackRetFloat(ctx *C.sqlite3_context, v reflect.Value) error { + switch v.Type().Kind() { + case reflect.Float64: + case reflect.Float32: + v = v.Convert(reflect.TypeOf(float64(0))) + default: + return fmt.Errorf("cannot convert %s to FLOAT", v.Type()) + } + + C.sqlite3_result_double(ctx, C.double(v.Interface().(float64))) + return nil +} + +func callbackRetBlob(ctx *C.sqlite3_context, v reflect.Value) error { + if v.Type().Kind() != reflect.Slice || v.Type().Elem().Kind() != reflect.Uint8 { + return fmt.Errorf("cannot convert %s to BLOB", v.Type()) + } + i := v.Interface() + if i == nil || len(i.([]byte)) == 0 { + C.sqlite3_result_null(ctx) + } else { + bs := i.([]byte) + C._sqlite3_result_blob(ctx, unsafe.Pointer(&bs[0]), C.int(len(bs))) + } + return nil +} + +func callbackRetText(ctx *C.sqlite3_context, v reflect.Value) error { + if v.Type().Kind() != reflect.String { + return fmt.Errorf("cannot convert %s to TEXT", v.Type()) + } + C._sqlite3_result_text(ctx, C.CString(v.Interface().(string))) + return nil +} + +func callbackRet(typ reflect.Type) (callbackRetConverter, error) { + switch typ.Kind() { + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + return nil, errors.New("the only supported slice type is []byte") + } + return callbackRetBlob, nil + case reflect.String: + return callbackRetText, nil + case reflect.Bool, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint: + return callbackRetInteger, nil + case reflect.Float32, reflect.Float64: + return callbackRetFloat, nil + default: + return nil, fmt.Errorf("don't know how to convert to %s", typ) + } +} + +func callbackError(ctx *C.sqlite3_context, err error) { + cstr := C.CString(err.Error()) + defer C.free(unsafe.Pointer(cstr)) + C.sqlite3_result_error(ctx, cstr, -1) +} + +// Test support code. Tests are not allowed to import "C", so we can't +// declare any functions that use C.sqlite3_value. +func callbackSyntheticForTests(v reflect.Value, err error) callbackArgConverter { + return func(*C.sqlite3_value) (reflect.Value, error) { + return v, err + } +} diff --git a/vendor/github.com/mattn/go-sqlite3/callback_test.go b/vendor/github.com/mattn/go-sqlite3/callback_test.go new file mode 100644 index 0000000..5c61f44 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/callback_test.go @@ -0,0 +1,97 @@ +package sqlite3 + +import ( + "errors" + "math" + "reflect" + "testing" +) + +func TestCallbackArgCast(t *testing.T) { + intConv := callbackSyntheticForTests(reflect.ValueOf(int64(math.MaxInt64)), nil) + floatConv := callbackSyntheticForTests(reflect.ValueOf(float64(math.MaxFloat64)), nil) + errConv := callbackSyntheticForTests(reflect.Value{}, errors.New("test")) + + tests := []struct { + f callbackArgConverter + o reflect.Value + }{ + {intConv, reflect.ValueOf(int8(-1))}, + {intConv, reflect.ValueOf(int16(-1))}, + {intConv, reflect.ValueOf(int32(-1))}, + {intConv, reflect.ValueOf(uint8(math.MaxUint8))}, + {intConv, reflect.ValueOf(uint16(math.MaxUint16))}, + {intConv, reflect.ValueOf(uint32(math.MaxUint32))}, + // Special case, int64->uint64 is only 1<<63 - 1, not 1<<64 - 1 + {intConv, reflect.ValueOf(uint64(math.MaxInt64))}, + {floatConv, reflect.ValueOf(float32(math.Inf(1)))}, + } + + for _, test := range tests { + conv := callbackArgCast{test.f, test.o.Type()} + val, err := conv.Run(nil) + if err != nil { + t.Errorf("Couldn't convert to %s: %s", test.o.Type(), err) + } else if !reflect.DeepEqual(val.Interface(), test.o.Interface()) { + t.Errorf("Unexpected result from converting to %s: got %v, want %v", test.o.Type(), val.Interface(), test.o.Interface()) + } + } + + conv := callbackArgCast{errConv, reflect.TypeOf(int8(0))} + _, err := conv.Run(nil) + if err == nil { + t.Errorf("Expected error during callbackArgCast, but got none") + } +} + +func TestCallbackConverters(t *testing.T) { + tests := []struct { + v interface{} + err bool + }{ + // Unfortunately, we can't tell which converter was returned, + // but we can at least check which types can be converted. + {[]byte{0}, false}, + {"text", false}, + {true, false}, + {int8(0), false}, + {int16(0), false}, + {int32(0), false}, + {int64(0), false}, + {uint8(0), false}, + {uint16(0), false}, + {uint32(0), false}, + {uint64(0), false}, + {int(0), false}, + {uint(0), false}, + {float64(0), false}, + {float32(0), false}, + + {func() {}, true}, + {complex64(complex(0, 0)), true}, + {complex128(complex(0, 0)), true}, + {struct{}{}, true}, + {map[string]string{}, true}, + {[]string{}, true}, + {(*int8)(nil), true}, + {make(chan int), true}, + } + + for _, test := range tests { + _, err := callbackArg(reflect.TypeOf(test.v)) + if test.err && err == nil { + t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v)) + } else if !test.err && err != nil { + t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err) + } + } + + for _, test := range tests { + _, err := callbackRet(reflect.TypeOf(test.v)) + if test.err && err == nil { + t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v)) + } else if !test.err && err != nil { + t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err) + } + } +} diff --git a/vendor/github.com/mattn/go-sqlite3/doc.go b/vendor/github.com/mattn/go-sqlite3/doc.go new file mode 100644 index 0000000..c721f77 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/doc.go @@ -0,0 +1,112 @@ +/* +Package sqlite3 provides interface to SQLite3 databases. + +This works as a driver for database/sql. + +Installation + + go get github.com/mattn/go-sqlite3 + +Supported Types + +Currently, go-sqlite3 supports the following data types. + + +------------------------------+ + |go | sqlite3 | + |----------|-------------------| + |nil | null | + |int | integer | + |int64 | integer | + |float64 | float | + |bool | integer | + |[]byte | blob | + |string | text | + |time.Time | timestamp/datetime| + +------------------------------+ + +SQLite3 Extension + +You can write your own extension module for sqlite3. For example, below is an +extension for a Regexp matcher operation. + + #include + #include + #include + #include + + SQLITE_EXTENSION_INIT1 + static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) { + if (argc >= 2) { + const char *target = (const char *)sqlite3_value_text(argv[1]); + const char *pattern = (const char *)sqlite3_value_text(argv[0]); + const char* errstr = NULL; + int erroff = 0; + int vec[500]; + int n, rc; + pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL); + rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500); + if (rc <= 0) { + sqlite3_result_error(context, errstr, 0); + return; + } + sqlite3_result_int(context, 1); + } + } + + #ifdef _WIN32 + __declspec(dllexport) + #endif + int sqlite3_extension_init(sqlite3 *db, char **errmsg, + const sqlite3_api_routines *api) { + SQLITE_EXTENSION_INIT2(api); + return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, + (void*)db, regexp_func, NULL, NULL); + } + +It needs to be built as a so/dll shared library. And you need to register +the extension module like below. + + sql.Register("sqlite3_with_extensions", + &sqlite3.SQLiteDriver{ + Extensions: []string{ + "sqlite3_mod_regexp", + }, + }) + +Then, you can use this extension. + + rows, err := db.Query("select text from mytable where name regexp '^golang'") + +Connection Hook + +You can hook and inject your code when the connection is established. database/sql +doesn't provide a way to get native go-sqlite3 interfaces. So if you want, +you need to set ConnectHook and get the SQLiteConn. + + sql.Register("sqlite3_with_hook_example", + &sqlite3.SQLiteDriver{ + ConnectHook: func(conn *sqlite3.SQLiteConn) error { + sqlite3conn = append(sqlite3conn, conn) + return nil + }, + }) + +Go SQlite3 Extensions + +If you want to register Go functions as SQLite extension functions, +call RegisterFunction from ConnectHook. + + regex = func(re, s string) (bool, error) { + return regexp.MatchString(re, s) + } + sql.Register("sqlite3_with_go_func", + &sqlite3.SQLiteDriver{ + ConnectHook: func(conn *sqlite3.SQLiteConn) error { + return conn.RegisterFunc("regexp", regex, true) + }, + }) + +See the documentation of RegisterFunc for more details. + +*/ +package sqlite3 diff --git a/vendor/github.com/mattn/go-sqlite3/error.go b/vendor/github.com/mattn/go-sqlite3/error.go new file mode 100644 index 0000000..49ab890 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/error.go @@ -0,0 +1,135 @@ +// Copyright (C) 2014 Yasuhiro Matsumoto . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package sqlite3 + +import "C" + +// ErrNo inherit errno. +type ErrNo int + +// ErrNoMask is mask code. +const ErrNoMask C.int = 0xff + +// ErrNoExtended is extended errno. +type ErrNoExtended int + +// Error implement sqlite error code. +type Error struct { + Code ErrNo /* The error code returned by SQLite */ + ExtendedCode ErrNoExtended /* The extended error code returned by SQLite */ + err string /* The error string returned by sqlite3_errmsg(), + this usually contains more specific details. */ +} + +// result codes from http://www.sqlite.org/c3ref/c_abort.html +var ( + ErrError = ErrNo(1) /* SQL error or missing database */ + ErrInternal = ErrNo(2) /* Internal logic error in SQLite */ + ErrPerm = ErrNo(3) /* Access permission denied */ + ErrAbort = ErrNo(4) /* Callback routine requested an abort */ + ErrBusy = ErrNo(5) /* The database file is locked */ + ErrLocked = ErrNo(6) /* A table in the database is locked */ + ErrNomem = ErrNo(7) /* A malloc() failed */ + ErrReadonly = ErrNo(8) /* Attempt to write a readonly database */ + ErrInterrupt = ErrNo(9) /* Operation terminated by sqlite3_interrupt() */ + ErrIoErr = ErrNo(10) /* Some kind of disk I/O error occurred */ + ErrCorrupt = ErrNo(11) /* The database disk image is malformed */ + ErrNotFound = ErrNo(12) /* Unknown opcode in sqlite3_file_control() */ + ErrFull = ErrNo(13) /* Insertion failed because database is full */ + ErrCantOpen = ErrNo(14) /* Unable to open the database file */ + ErrProtocol = ErrNo(15) /* Database lock protocol error */ + ErrEmpty = ErrNo(16) /* Database is empty */ + ErrSchema = ErrNo(17) /* The database schema changed */ + ErrTooBig = ErrNo(18) /* String or BLOB exceeds size limit */ + ErrConstraint = ErrNo(19) /* Abort due to constraint violation */ + ErrMismatch = ErrNo(20) /* Data type mismatch */ + ErrMisuse = ErrNo(21) /* Library used incorrectly */ + ErrNoLFS = ErrNo(22) /* Uses OS features not supported on host */ + ErrAuth = ErrNo(23) /* Authorization denied */ + ErrFormat = ErrNo(24) /* Auxiliary database format error */ + ErrRange = ErrNo(25) /* 2nd parameter to sqlite3_bind out of range */ + ErrNotADB = ErrNo(26) /* File opened that is not a database file */ + ErrNotice = ErrNo(27) /* Notifications from sqlite3_log() */ + ErrWarning = ErrNo(28) /* Warnings from sqlite3_log() */ +) + +// Error return error message from errno. +func (err ErrNo) Error() string { + return Error{Code: err}.Error() +} + +// Extend return extended errno. +func (err ErrNo) Extend(by int) ErrNoExtended { + return ErrNoExtended(int(err) | (by << 8)) +} + +// Error return error message that is extended code. +func (err ErrNoExtended) Error() string { + return Error{Code: ErrNo(C.int(err) & ErrNoMask), ExtendedCode: err}.Error() +} + +func (err Error) Error() string { + if err.err != "" { + return err.err + } + return errorString(err) +} + +// result codes from http://www.sqlite.org/c3ref/c_abort_rollback.html +var ( + ErrIoErrRead = ErrIoErr.Extend(1) + ErrIoErrShortRead = ErrIoErr.Extend(2) + ErrIoErrWrite = ErrIoErr.Extend(3) + ErrIoErrFsync = ErrIoErr.Extend(4) + ErrIoErrDirFsync = ErrIoErr.Extend(5) + ErrIoErrTruncate = ErrIoErr.Extend(6) + ErrIoErrFstat = ErrIoErr.Extend(7) + ErrIoErrUnlock = ErrIoErr.Extend(8) + ErrIoErrRDlock = ErrIoErr.Extend(9) + ErrIoErrDelete = ErrIoErr.Extend(10) + ErrIoErrBlocked = ErrIoErr.Extend(11) + ErrIoErrNoMem = ErrIoErr.Extend(12) + ErrIoErrAccess = ErrIoErr.Extend(13) + ErrIoErrCheckReservedLock = ErrIoErr.Extend(14) + ErrIoErrLock = ErrIoErr.Extend(15) + ErrIoErrClose = ErrIoErr.Extend(16) + ErrIoErrDirClose = ErrIoErr.Extend(17) + ErrIoErrSHMOpen = ErrIoErr.Extend(18) + ErrIoErrSHMSize = ErrIoErr.Extend(19) + ErrIoErrSHMLock = ErrIoErr.Extend(20) + ErrIoErrSHMMap = ErrIoErr.Extend(21) + ErrIoErrSeek = ErrIoErr.Extend(22) + ErrIoErrDeleteNoent = ErrIoErr.Extend(23) + ErrIoErrMMap = ErrIoErr.Extend(24) + ErrIoErrGetTempPath = ErrIoErr.Extend(25) + ErrIoErrConvPath = ErrIoErr.Extend(26) + ErrLockedSharedCache = ErrLocked.Extend(1) + ErrBusyRecovery = ErrBusy.Extend(1) + ErrBusySnapshot = ErrBusy.Extend(2) + ErrCantOpenNoTempDir = ErrCantOpen.Extend(1) + ErrCantOpenIsDir = ErrCantOpen.Extend(2) + ErrCantOpenFullPath = ErrCantOpen.Extend(3) + ErrCantOpenConvPath = ErrCantOpen.Extend(4) + ErrCorruptVTab = ErrCorrupt.Extend(1) + ErrReadonlyRecovery = ErrReadonly.Extend(1) + ErrReadonlyCantLock = ErrReadonly.Extend(2) + ErrReadonlyRollback = ErrReadonly.Extend(3) + ErrReadonlyDbMoved = ErrReadonly.Extend(4) + ErrAbortRollback = ErrAbort.Extend(2) + ErrConstraintCheck = ErrConstraint.Extend(1) + ErrConstraintCommitHook = ErrConstraint.Extend(2) + ErrConstraintForeignKey = ErrConstraint.Extend(3) + ErrConstraintFunction = ErrConstraint.Extend(4) + ErrConstraintNotNull = ErrConstraint.Extend(5) + ErrConstraintPrimaryKey = ErrConstraint.Extend(6) + ErrConstraintTrigger = ErrConstraint.Extend(7) + ErrConstraintUnique = ErrConstraint.Extend(8) + ErrConstraintVTab = ErrConstraint.Extend(9) + ErrConstraintRowID = ErrConstraint.Extend(10) + ErrNoticeRecoverWAL = ErrNotice.Extend(1) + ErrNoticeRecoverRollback = ErrNotice.Extend(2) + ErrWarningAutoIndex = ErrWarning.Extend(1) +) diff --git a/vendor/github.com/mattn/go-sqlite3/error_test.go b/vendor/github.com/mattn/go-sqlite3/error_test.go new file mode 100644 index 0000000..1ccbe5b --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/error_test.go @@ -0,0 +1,242 @@ +// Copyright (C) 2014 Yasuhiro Matsumoto . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +package sqlite3 + +import ( + "database/sql" + "io/ioutil" + "os" + "path" + "testing" +) + +func TestSimpleError(t *testing.T) { + e := ErrError.Error() + if e != "SQL logic error or missing database" { + t.Error("wrong error code:" + e) + } +} + +func TestCorruptDbErrors(t *testing.T) { + dirName, err := ioutil.TempDir("", "sqlite3") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dirName) + + dbFileName := path.Join(dirName, "test.db") + f, err := os.Create(dbFileName) + if err != nil { + t.Error(err) + } + f.Write([]byte{1, 2, 3, 4, 5}) + f.Close() + + db, err := sql.Open("sqlite3", dbFileName) + if err == nil { + _, err = db.Exec("drop table foo") + } + + sqliteErr := err.(Error) + if sqliteErr.Code != ErrNotADB { + t.Error("wrong error code for corrupted DB") + } + if err.Error() == "" { + t.Error("wrong error string for corrupted DB") + } + db.Close() +} + +func TestSqlLogicErrors(t *testing.T) { + dirName, err := ioutil.TempDir("", "sqlite3") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dirName) + + dbFileName := path.Join(dirName, "test.db") + db, err := sql.Open("sqlite3", dbFileName) + if err != nil { + t.Error(err) + } + defer db.Close() + + _, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)") + if err != nil { + t.Error(err) + } + + const expectedErr = "table Foo already exists" + _, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)") + if err.Error() != expectedErr { + t.Errorf("Unexpected error: %s, expected %s", err.Error(), expectedErr) + } + +} + +func TestExtendedErrorCodes_ForeignKey(t *testing.T) { + dirName, err := ioutil.TempDir("", "sqlite3-err") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dirName) + + dbFileName := path.Join(dirName, "test.db") + db, err := sql.Open("sqlite3", dbFileName) + if err != nil { + t.Error(err) + } + defer db.Close() + + _, err = db.Exec("PRAGMA foreign_keys=ON;") + if err != nil { + t.Errorf("PRAGMA foreign_keys=ON: %v", err) + } + + _, err = db.Exec(`CREATE TABLE Foo ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + value INTEGER NOT NULL, + ref INTEGER NULL REFERENCES Foo (id), + UNIQUE(value) + );`) + if err != nil { + t.Error(err) + } + + _, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (100, 100);") + if err == nil { + t.Error("No error!") + } else { + sqliteErr := err.(Error) + if sqliteErr.Code != ErrConstraint { + t.Errorf("Wrong basic error code: %d != %d", + sqliteErr.Code, ErrConstraint) + } + if sqliteErr.ExtendedCode != ErrConstraintForeignKey { + t.Errorf("Wrong extended error code: %d != %d", + sqliteErr.ExtendedCode, ErrConstraintForeignKey) + } + } + +} + +func TestExtendedErrorCodes_NotNull(t *testing.T) { + dirName, err := ioutil.TempDir("", "sqlite3-err") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dirName) + + dbFileName := path.Join(dirName, "test.db") + db, err := sql.Open("sqlite3", dbFileName) + if err != nil { + t.Error(err) + } + defer db.Close() + + _, err = db.Exec("PRAGMA foreign_keys=ON;") + if err != nil { + t.Errorf("PRAGMA foreign_keys=ON: %v", err) + } + + _, err = db.Exec(`CREATE TABLE Foo ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + value INTEGER NOT NULL, + ref INTEGER NULL REFERENCES Foo (id), + UNIQUE(value) + );`) + if err != nil { + t.Error(err) + } + + res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);") + if err != nil { + t.Fatalf("Creating first row: %v", err) + } + + id, err := res.LastInsertId() + if err != nil { + t.Fatalf("Retrieving last insert id: %v", err) + } + + _, err = db.Exec("INSERT INTO Foo (ref) VALUES (?);", id) + if err == nil { + t.Error("No error!") + } else { + sqliteErr := err.(Error) + if sqliteErr.Code != ErrConstraint { + t.Errorf("Wrong basic error code: %d != %d", + sqliteErr.Code, ErrConstraint) + } + if sqliteErr.ExtendedCode != ErrConstraintNotNull { + t.Errorf("Wrong extended error code: %d != %d", + sqliteErr.ExtendedCode, ErrConstraintNotNull) + } + } + +} + +func TestExtendedErrorCodes_Unique(t *testing.T) { + dirName, err := ioutil.TempDir("", "sqlite3-err") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dirName) + + dbFileName := path.Join(dirName, "test.db") + db, err := sql.Open("sqlite3", dbFileName) + if err != nil { + t.Error(err) + } + defer db.Close() + + _, err = db.Exec("PRAGMA foreign_keys=ON;") + if err != nil { + t.Errorf("PRAGMA foreign_keys=ON: %v", err) + } + + _, err = db.Exec(`CREATE TABLE Foo ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + value INTEGER NOT NULL, + ref INTEGER NULL REFERENCES Foo (id), + UNIQUE(value) + );`) + if err != nil { + t.Error(err) + } + + res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);") + if err != nil { + t.Fatalf("Creating first row: %v", err) + } + + id, err := res.LastInsertId() + if err != nil { + t.Fatalf("Retrieving last insert id: %v", err) + } + + _, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (?, 100);", id) + if err == nil { + t.Error("No error!") + } else { + sqliteErr := err.(Error) + if sqliteErr.Code != ErrConstraint { + t.Errorf("Wrong basic error code: %d != %d", + sqliteErr.Code, ErrConstraint) + } + if sqliteErr.ExtendedCode != ErrConstraintUnique { + t.Errorf("Wrong extended error code: %d != %d", + sqliteErr.ExtendedCode, ErrConstraintUnique) + } + extended := sqliteErr.Code.Extend(3).Error() + expected := "constraint failed" + if extended != expected { + t.Errorf("Wrong basic error code: %q != %q", + extended, expected) + } + } + +} diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c new file mode 100644 index 0000000..825e7d8 --- /dev/null +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c @@ -0,0 +1,201408 @@ +#ifndef USE_LIBSQLITE3 +#define SQLITE_DISABLE_INTRINSIC 1 +/****************************************************************************** +** This file is an amalgamation of many separate C source files from SQLite +** version 3.17.0. By combining all the individual C code files into this +** single large file, the entire code can be compiled as a single translation +** unit. This allows many compilers to do optimizations that would not be +** possible if the files were compiled separately. Performance improvements +** of 5% or more are commonly seen when SQLite is compiled as a single +** translation unit. +** +** This file is all you need to compile SQLite. To use SQLite in other +** programs, you need this file and the "sqlite3.h" header file that defines +** the programming interface to the SQLite library. (If you do not have +** the "sqlite3.h" header file at hand, you will find a copy embedded within +** the text of this file. Search for "Begin file sqlite3.h" to find the start +** of the embedded sqlite3.h header file.) Additional code files may be needed +** if you want a wrapper to interface SQLite with your choice of programming +** language. The code for the "sqlite3" command-line shell is also in a +** separate file. This file contains only code for the core SQLite library. +*/ +#define SQLITE_CORE 1 +#define SQLITE_AMALGAMATION 1 +#ifndef SQLITE_PRIVATE +# define SQLITE_PRIVATE static +#endif +/************** Begin file sqliteInt.h ***************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Internal interface definitions for SQLite. +** +*/ +#ifndef SQLITEINT_H +#define SQLITEINT_H + +/* Special Comments: +** +** Some comments have special meaning to the tools that measure test +** coverage: +** +** NO_TEST - The branches on this line are not +** measured by branch coverage. This is +** used on lines of code that actually +** implement parts of coverage testing. +** +** OPTIMIZATION-IF-TRUE - This branch is allowed to alway be false +** and the correct answer is still obtained, +** though perhaps more slowly. +** +** OPTIMIZATION-IF-FALSE - This branch is allowed to alway be true +** and the correct answer is still obtained, +** though perhaps more slowly. +** +** PREVENTS-HARMLESS-OVERREAD - This branch prevents a buffer overread +** that would be harmless and undetectable +** if it did occur. +** +** In all cases, the special comment must be enclosed in the usual +** slash-asterisk...asterisk-slash comment marks, with no spaces between the +** asterisks and the comment text. +*/ + +/* +** Make sure the Tcl calling convention macro is defined. This macro is +** only used by test code and Tcl integration code. +*/ +#ifndef SQLITE_TCLAPI +# define SQLITE_TCLAPI +#endif + +/* +** Make sure that rand_s() is available on Windows systems with MSVC 2005 +** or higher. +*/ +#if defined(_MSC_VER) && _MSC_VER>=1400 +# define _CRT_RAND_S +#endif + +/* +** Include the header file used to customize the compiler options for MSVC. +** This should be done first so that it can successfully prevent spurious +** compiler warnings due to subsequent content in this file and other files +** that are included by this file. +*/ +/************** Include msvc.h in the middle of sqliteInt.h ******************/ +/************** Begin file msvc.h ********************************************/ +/* +** 2015 January 12 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains code that is specific to MSVC. +*/ +#ifndef SQLITE_MSVC_H +#define SQLITE_MSVC_H + +#if defined(_MSC_VER) +#pragma warning(disable : 4054) +#pragma warning(disable : 4055) +#pragma warning(disable : 4100) +#pragma warning(disable : 4127) +#pragma warning(disable : 4130) +#pragma warning(disable : 4152) +#pragma warning(disable : 4189) +#pragma warning(disable : 4206) +#pragma warning(disable : 4210) +#pragma warning(disable : 4232) +#pragma warning(disable : 4244) +#pragma warning(disable : 4305) +#pragma warning(disable : 4306) +#pragma warning(disable : 4702) +#pragma warning(disable : 4706) +#endif /* defined(_MSC_VER) */ + +#endif /* SQLITE_MSVC_H */ + +/************** End of msvc.h ************************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ + +/* +** Special setup for VxWorks +*/ +/************** Include vxworks.h in the middle of sqliteInt.h ***************/ +/************** Begin file vxworks.h *****************************************/ +/* +** 2015-03-02 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains code that is specific to Wind River's VxWorks +*/ +#if defined(__RTP__) || defined(_WRS_KERNEL) +/* This is VxWorks. Set up things specially for that OS +*/ +#include +#include /* amalgamator: dontcache */ +#define OS_VXWORKS 1 +#define SQLITE_OS_OTHER 0 +#define SQLITE_HOMEGROWN_RECURSIVE_MUTEX 1 +#define SQLITE_OMIT_LOAD_EXTENSION 1 +#define SQLITE_ENABLE_LOCKING_STYLE 0 +#define HAVE_UTIME 1 +#else +/* This is not VxWorks. */ +#define OS_VXWORKS 0 +#define HAVE_FCHOWN 1 +#define HAVE_READLINK 1 +#define HAVE_LSTAT 1 +#endif /* defined(_WRS_KERNEL) */ + +/************** End of vxworks.h *********************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ + +/* +** These #defines should enable >2GB file support on POSIX if the +** underlying operating system supports it. If the OS lacks +** large file support, or if the OS is windows, these should be no-ops. +** +** Ticket #2739: The _LARGEFILE_SOURCE macro must appear before any +** system #includes. Hence, this block of code must be the very first +** code in all source files. +** +** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch +** on the compiler command line. This is necessary if you are compiling +** on a recent machine (ex: Red Hat 7.2) but you want your code to work +** on an older machine (ex: Red Hat 6.0). If you compile on Red Hat 7.2 +** without this option, LFS is enable. But LFS does not exist in the kernel +** in Red Hat 6.0, so the code won't work. Hence, for maximum binary +** portability you should omit LFS. +** +** The previous paragraph was written in 2005. (This paragraph is written +** on 2008-11-28.) These days, all Linux kernels support large files, so +** you should probably leave LFS enabled. But some embedded platforms might +** lack LFS in which case the SQLITE_DISABLE_LFS macro might still be useful. +** +** Similar is true for Mac OS X. LFS is only supported on Mac OS X 9 and later. +*/ +#ifndef SQLITE_DISABLE_LFS +# define _LARGE_FILE 1 +# ifndef _FILE_OFFSET_BITS +# define _FILE_OFFSET_BITS 64 +# endif +# define _LARGEFILE_SOURCE 1 +#endif + +/* The GCC_VERSION, CLANG_VERSION, and MSVC_VERSION macros are used to +** conditionally include optimizations for each of these compilers. A +** value of 0 means that compiler is not being used. The +** SQLITE_DISABLE_INTRINSIC macro means do not use any compiler-specific +** optimizations, and hence set all compiler macros to 0 +*/ +#if defined(__GNUC__) && !defined(SQLITE_DISABLE_INTRINSIC) +# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__) +#else +# define GCC_VERSION 0 +#endif +#if defined(__clang__) && !defined(_WIN32) && !defined(SQLITE_DISABLE_INTRINSIC) +# define CLANG_VERSION \ + (__clang_major__*1000000+__clang_minor__*1000+__clang_patchlevel__) +#else +# define CLANG_VERSION 0 +#endif +#if defined(_MSC_VER) && !defined(SQLITE_DISABLE_INTRINSIC) +# define MSVC_VERSION _MSC_VER +#else +# define MSVC_VERSION 0 +#endif + +/* Needed for various definitions... */ +#if defined(__GNUC__) && !defined(_GNU_SOURCE) +# define _GNU_SOURCE +#endif + +#if defined(__OpenBSD__) && !defined(_BSD_SOURCE) +# define _BSD_SOURCE +#endif + +/* +** For MinGW, check to see if we can include the header file containing its +** version information, among other things. Normally, this internal MinGW +** header file would [only] be included automatically by other MinGW header +** files; however, the contained version information is now required by this +** header file to work around binary compatibility issues (see below) and +** this is the only known way to reliably obtain it. This entire #if block +** would be completely unnecessary if there was any other way of detecting +** MinGW via their preprocessor (e.g. if they customized their GCC to define +** some MinGW-specific macros). When compiling for MinGW, either the +** _HAVE_MINGW_H or _HAVE__MINGW_H (note the extra underscore) macro must be +** defined; otherwise, detection of conditions specific to MinGW will be +** disabled. +*/ +#if defined(_HAVE_MINGW_H) +# include "mingw.h" +#elif defined(_HAVE__MINGW_H) +# include "_mingw.h" +#endif + +/* +** For MinGW version 4.x (and higher), check to see if the _USE_32BIT_TIME_T +** define is required to maintain binary compatibility with the MSVC runtime +** library in use (e.g. for Windows XP). +*/ +#if !defined(_USE_32BIT_TIME_T) && !defined(_USE_64BIT_TIME_T) && \ + defined(_WIN32) && !defined(_WIN64) && \ + defined(__MINGW_MAJOR_VERSION) && __MINGW_MAJOR_VERSION >= 4 && \ + defined(__MSVCRT__) +# define _USE_32BIT_TIME_T +#endif + +/* The public SQLite interface. The _FILE_OFFSET_BITS macro must appear +** first in QNX. Also, the _USE_32BIT_TIME_T macro must appear first for +** MinGW. +*/ +/************** Include sqlite3.h in the middle of sqliteInt.h ***************/ +/************** Begin file sqlite3.h *****************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the interface that the SQLite library +** presents to client programs. If a C-function, structure, datatype, +** or constant definition does not appear in this file, then it is +** not a published API of SQLite, is subject to change without +** notice, and should not be referenced by programs that use SQLite. +** +** Some of the definitions that are in this file are marked as +** "experimental". Experimental interfaces are normally new +** features recently added to SQLite. We do not anticipate changes +** to experimental interfaces but reserve the right to make minor changes +** if experience from use "in the wild" suggest such changes are prudent. +** +** The official C-language API documentation for SQLite is derived +** from comments in this file. This file is the authoritative source +** on how SQLite interfaces are supposed to operate. +** +** The name of this file under configuration management is "sqlite.h.in". +** The makefile makes some minor changes to this file (such as inserting +** the version number) and changes its name to "sqlite3.h" as +** part of the build process. +*/ +#ifndef SQLITE3_H +#define SQLITE3_H +#include /* Needed for the definition of va_list */ + +/* +** Make sure we can call this stuff from C++. +*/ +#if 0 +extern "C" { +#endif + + +/* +** Provide the ability to override linkage features of the interface. +*/ +#ifndef SQLITE_EXTERN +# define SQLITE_EXTERN extern +#endif +#ifndef SQLITE_API +# define SQLITE_API +#endif +#ifndef SQLITE_CDECL +# define SQLITE_CDECL +#endif +#ifndef SQLITE_APICALL +# define SQLITE_APICALL +#endif +#ifndef SQLITE_STDCALL +# define SQLITE_STDCALL SQLITE_APICALL +#endif +#ifndef SQLITE_CALLBACK +# define SQLITE_CALLBACK +#endif +#ifndef SQLITE_SYSAPI +# define SQLITE_SYSAPI +#endif + +/* +** These no-op macros are used in front of interfaces to mark those +** interfaces as either deprecated or experimental. New applications +** should not use deprecated interfaces - they are supported for backwards +** compatibility only. Application writers should be aware that +** experimental interfaces are subject to change in point releases. +** +** These macros used to resolve to various kinds of compiler magic that +** would generate warning messages when they were used. But that +** compiler magic ended up generating such a flurry of bug reports +** that we have taken it all out and gone back to using simple +** noop macros. +*/ +#define SQLITE_DEPRECATED +#define SQLITE_EXPERIMENTAL + +/* +** Ensure these symbols were not defined by some previous header file. +*/ +#ifdef SQLITE_VERSION +# undef SQLITE_VERSION +#endif +#ifdef SQLITE_VERSION_NUMBER +# undef SQLITE_VERSION_NUMBER +#endif + +/* +** CAPI3REF: Compile-Time Library Version Numbers +** +** ^(The [SQLITE_VERSION] C preprocessor macro in the sqlite3.h header +** evaluates to a string literal that is the SQLite version in the +** format "X.Y.Z" where X is the major version number (always 3 for +** SQLite3) and Y is the minor version number and Z is the release number.)^ +** ^(The [SQLITE_VERSION_NUMBER] C preprocessor macro resolves to an integer +** with the value (X*1000000 + Y*1000 + Z) where X, Y, and Z are the same +** numbers used in [SQLITE_VERSION].)^ +** The SQLITE_VERSION_NUMBER for any given release of SQLite will also +** be larger than the release from which it is derived. Either Y will +** be held constant and Z will be incremented or else Y will be incremented +** and Z will be reset to zero. +** +** Since [version 3.6.18] ([dateof:3.6.18]), +** SQLite source code has been stored in the +** Fossil configuration management +** system. ^The SQLITE_SOURCE_ID macro evaluates to +** a string which identifies a particular check-in of SQLite +** within its configuration management system. ^The SQLITE_SOURCE_ID +** string contains the date and time of the check-in (UTC) and an SHA1 +** hash of the entire source tree. +** +** See also: [sqlite3_libversion()], +** [sqlite3_libversion_number()], [sqlite3_sourceid()], +** [sqlite_version()] and [sqlite_source_id()]. +*/ +#define SQLITE_VERSION "3.17.0" +#define SQLITE_VERSION_NUMBER 3017000 +#define SQLITE_SOURCE_ID "2017-02-13 16:02:40 ada05cfa86ad7f5645450ac7a2a21c9aa6e57d2c" + +/* +** CAPI3REF: Run-Time Library Version Numbers +** KEYWORDS: sqlite3_version sqlite3_sourceid +** +** These interfaces provide the same information as the [SQLITE_VERSION], +** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros +** but are associated with the library instead of the header file. ^(Cautious +** programmers might include assert() statements in their application to +** verify that values returned by these interfaces match the macros in +** the header, and thus ensure that the application is +** compiled with matching library and header files. +** +**
+** assert( sqlite3_libversion_number()==SQLITE_VERSION_NUMBER );
+** assert( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)==0 );
+** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 );
+** 
)^ +** +** ^The sqlite3_version[] string constant contains the text of [SQLITE_VERSION] +** macro. ^The sqlite3_libversion() function returns a pointer to the +** to the sqlite3_version[] string constant. The sqlite3_libversion() +** function is provided for use in DLLs since DLL users usually do not have +** direct access to string constants within the DLL. ^The +** sqlite3_libversion_number() function returns an integer equal to +** [SQLITE_VERSION_NUMBER]. ^The sqlite3_sourceid() function returns +** a pointer to a string constant whose value is the same as the +** [SQLITE_SOURCE_ID] C preprocessor macro. +** +** See also: [sqlite_version()] and [sqlite_source_id()]. +*/ +SQLITE_API const char sqlite3_version[] = SQLITE_VERSION; +SQLITE_API const char *sqlite3_libversion(void); +SQLITE_API const char *sqlite3_sourceid(void); +SQLITE_API int sqlite3_libversion_number(void); + +/* +** CAPI3REF: Run-Time Library Compilation Options Diagnostics +** +** ^The sqlite3_compileoption_used() function returns 0 or 1 +** indicating whether the specified option was defined at +** compile time. ^The SQLITE_ prefix may be omitted from the +** option name passed to sqlite3_compileoption_used(). +** +** ^The sqlite3_compileoption_get() function allows iterating +** over the list of options that were defined at compile time by +** returning the N-th compile time option string. ^If N is out of range, +** sqlite3_compileoption_get() returns a NULL pointer. ^The SQLITE_ +** prefix is omitted from any strings returned by +** sqlite3_compileoption_get(). +** +** ^Support for the diagnostic functions sqlite3_compileoption_used() +** and sqlite3_compileoption_get() may be omitted by specifying the +** [SQLITE_OMIT_COMPILEOPTION_DIAGS] option at compile time. +** +** See also: SQL functions [sqlite_compileoption_used()] and +** [sqlite_compileoption_get()] and the [compile_options pragma]. +*/ +#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS +SQLITE_API int sqlite3_compileoption_used(const char *zOptName); +SQLITE_API const char *sqlite3_compileoption_get(int N); +#endif + +/* +** CAPI3REF: Test To See If The Library Is Threadsafe +** +** ^The sqlite3_threadsafe() function returns zero if and only if +** SQLite was compiled with mutexing code omitted due to the +** [SQLITE_THREADSAFE] compile-time option being set to 0. +** +** SQLite can be compiled with or without mutexes. When +** the [SQLITE_THREADSAFE] C preprocessor macro is 1 or 2, mutexes +** are enabled and SQLite is threadsafe. When the +** [SQLITE_THREADSAFE] macro is 0, +** the mutexes are omitted. Without the mutexes, it is not safe +** to use SQLite concurrently from more than one thread. +** +** Enabling mutexes incurs a measurable performance penalty. +** So if speed is of utmost importance, it makes sense to disable +** the mutexes. But for maximum safety, mutexes should be enabled. +** ^The default behavior is for mutexes to be enabled. +** +** This interface can be used by an application to make sure that the +** version of SQLite that it is linking against was compiled with +** the desired setting of the [SQLITE_THREADSAFE] macro. +** +** This interface only reports on the compile-time mutex setting +** of the [SQLITE_THREADSAFE] flag. If SQLite is compiled with +** SQLITE_THREADSAFE=1 or =2 then mutexes are enabled by default but +** can be fully or partially disabled using a call to [sqlite3_config()] +** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD], +** or [SQLITE_CONFIG_SERIALIZED]. ^(The return value of the +** sqlite3_threadsafe() function shows only the compile-time setting of +** thread safety, not any run-time changes to that setting made by +** sqlite3_config(). In other words, the return value from sqlite3_threadsafe() +** is unchanged by calls to sqlite3_config().)^ +** +** See the [threading mode] documentation for additional information. +*/ +SQLITE_API int sqlite3_threadsafe(void); + +/* +** CAPI3REF: Database Connection Handle +** KEYWORDS: {database connection} {database connections} +** +** Each open SQLite database is represented by a pointer to an instance of +** the opaque structure named "sqlite3". It is useful to think of an sqlite3 +** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and +** [sqlite3_open_v2()] interfaces are its constructors, and [sqlite3_close()] +** and [sqlite3_close_v2()] are its destructors. There are many other +** interfaces (such as +** [sqlite3_prepare_v2()], [sqlite3_create_function()], and +** [sqlite3_busy_timeout()] to name but three) that are methods on an +** sqlite3 object. +*/ +typedef struct sqlite3 sqlite3; + +/* +** CAPI3REF: 64-Bit Integer Types +** KEYWORDS: sqlite_int64 sqlite_uint64 +** +** Because there is no cross-platform way to specify 64-bit integer types +** SQLite includes typedefs for 64-bit signed and unsigned integers. +** +** The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions. +** The sqlite_int64 and sqlite_uint64 types are supported for backwards +** compatibility only. +** +** ^The sqlite3_int64 and sqlite_int64 types can store integer values +** between -9223372036854775808 and +9223372036854775807 inclusive. ^The +** sqlite3_uint64 and sqlite_uint64 types can store integer values +** between 0 and +18446744073709551615 inclusive. +*/ +#ifdef SQLITE_INT64_TYPE + typedef SQLITE_INT64_TYPE sqlite_int64; +# ifdef SQLITE_UINT64_TYPE + typedef SQLITE_UINT64_TYPE sqlite_uint64; +# else + typedef unsigned SQLITE_INT64_TYPE sqlite_uint64; +# endif +#elif defined(_MSC_VER) || defined(__BORLANDC__) + typedef __int64 sqlite_int64; + typedef unsigned __int64 sqlite_uint64; +#else + typedef long long int sqlite_int64; + typedef unsigned long long int sqlite_uint64; +#endif +typedef sqlite_int64 sqlite3_int64; +typedef sqlite_uint64 sqlite3_uint64; + +/* +** If compiling for a processor that lacks floating point support, +** substitute integer for floating-point. +*/ +#ifdef SQLITE_OMIT_FLOATING_POINT +# define double sqlite3_int64 +#endif + +/* +** CAPI3REF: Closing A Database Connection +** DESTRUCTOR: sqlite3 +** +** ^The sqlite3_close() and sqlite3_close_v2() routines are destructors +** for the [sqlite3] object. +** ^Calls to sqlite3_close() and sqlite3_close_v2() return [SQLITE_OK] if +** the [sqlite3] object is successfully destroyed and all associated +** resources are deallocated. +** +** ^If the database connection is associated with unfinalized prepared +** statements or unfinished sqlite3_backup objects then sqlite3_close() +** will leave the database connection open and return [SQLITE_BUSY]. +** ^If sqlite3_close_v2() is called with unfinalized prepared statements +** and/or unfinished sqlite3_backups, then the database connection becomes +** an unusable "zombie" which will automatically be deallocated when the +** last prepared statement is finalized or the last sqlite3_backup is +** finished. The sqlite3_close_v2() interface is intended for use with +** host languages that are garbage collected, and where the order in which +** destructors are called is arbitrary. +** +** Applications should [sqlite3_finalize | finalize] all [prepared statements], +** [sqlite3_blob_close | close] all [BLOB handles], and +** [sqlite3_backup_finish | finish] all [sqlite3_backup] objects associated +** with the [sqlite3] object prior to attempting to close the object. ^If +** sqlite3_close_v2() is called on a [database connection] that still has +** outstanding [prepared statements], [BLOB handles], and/or +** [sqlite3_backup] objects then it returns [SQLITE_OK] and the deallocation +** of resources is deferred until all [prepared statements], [BLOB handles], +** and [sqlite3_backup] objects are also destroyed. +** +** ^If an [sqlite3] object is destroyed while a transaction is open, +** the transaction is automatically rolled back. +** +** The C parameter to [sqlite3_close(C)] and [sqlite3_close_v2(C)] +** must be either a NULL +** pointer or an [sqlite3] object pointer obtained +** from [sqlite3_open()], [sqlite3_open16()], or +** [sqlite3_open_v2()], and not previously closed. +** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer +** argument is a harmless no-op. +*/ +SQLITE_API int sqlite3_close(sqlite3*); +SQLITE_API int sqlite3_close_v2(sqlite3*); + +/* +** The type for a callback function. +** This is legacy and deprecated. It is included for historical +** compatibility and is not documented. +*/ +typedef int (*sqlite3_callback)(void*,int,char**, char**); + +/* +** CAPI3REF: One-Step Query Execution Interface +** METHOD: sqlite3 +** +** The sqlite3_exec() interface is a convenience wrapper around +** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()], +** that allows an application to run multiple statements of SQL +** without having to use a lot of C code. +** +** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded, +** semicolon-separate SQL statements passed into its 2nd argument, +** in the context of the [database connection] passed in as its 1st +** argument. ^If the callback function of the 3rd argument to +** sqlite3_exec() is not NULL, then it is invoked for each result row +** coming out of the evaluated SQL statements. ^The 4th argument to +** sqlite3_exec() is relayed through to the 1st argument of each +** callback invocation. ^If the callback pointer to sqlite3_exec() +** is NULL, then no callback is ever invoked and result rows are +** ignored. +** +** ^If an error occurs while evaluating the SQL statements passed into +** sqlite3_exec(), then execution of the current statement stops and +** subsequent statements are skipped. ^If the 5th parameter to sqlite3_exec() +** is not NULL then any error message is written into memory obtained +** from [sqlite3_malloc()] and passed back through the 5th parameter. +** To avoid memory leaks, the application should invoke [sqlite3_free()] +** on error message strings returned through the 5th parameter of +** sqlite3_exec() after the error message string is no longer needed. +** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors +** occur, then sqlite3_exec() sets the pointer in its 5th parameter to +** NULL before returning. +** +** ^If an sqlite3_exec() callback returns non-zero, the sqlite3_exec() +** routine returns SQLITE_ABORT without invoking the callback again and +** without running any subsequent SQL statements. +** +** ^The 2nd argument to the sqlite3_exec() callback function is the +** number of columns in the result. ^The 3rd argument to the sqlite3_exec() +** callback is an array of pointers to strings obtained as if from +** [sqlite3_column_text()], one for each column. ^If an element of a +** result row is NULL then the corresponding string pointer for the +** sqlite3_exec() callback is a NULL pointer. ^The 4th argument to the +** sqlite3_exec() callback is an array of pointers to strings where each +** entry represents the name of corresponding result column as obtained +** from [sqlite3_column_name()]. +** +** ^If the 2nd parameter to sqlite3_exec() is a NULL pointer, a pointer +** to an empty string, or a pointer that contains only whitespace and/or +** SQL comments, then no SQL statements are evaluated and the database +** is not changed. +** +** Restrictions: +** +**
    +**
  • The application must ensure that the 1st parameter to sqlite3_exec() +** is a valid and open [database connection]. +**
  • The application must not close the [database connection] specified by +** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running. +**
  • The application must not modify the SQL statement text passed into +** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running. +**
+*/ +SQLITE_API int sqlite3_exec( + sqlite3*, /* An open database */ + const char *sql, /* SQL to be evaluated */ + int (*callback)(void*,int,char**,char**), /* Callback function */ + void *, /* 1st argument to callback */ + char **errmsg /* Error msg written here */ +); + +/* +** CAPI3REF: Result Codes +** KEYWORDS: {result code definitions} +** +** Many SQLite functions return an integer result code from the set shown +** here in order to indicate success or failure. +** +** New error codes may be added in future versions of SQLite. +** +** See also: [extended result code definitions] +*/ +#define SQLITE_OK 0 /* Successful result */ +/* beginning-of-error-codes */ +#define SQLITE_ERROR 1 /* SQL error or missing database */ +#define SQLITE_INTERNAL 2 /* Internal logic error in SQLite */ +#define SQLITE_PERM 3 /* Access permission denied */ +#define SQLITE_ABORT 4 /* Callback routine requested an abort */ +#define SQLITE_BUSY 5 /* The database file is locked */ +#define SQLITE_LOCKED 6 /* A table in the database is locked */ +#define SQLITE_NOMEM 7 /* A malloc() failed */ +#define SQLITE_READONLY 8 /* Attempt to write a readonly database */ +#define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite3_interrupt()*/ +#define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */ +#define SQLITE_CORRUPT 11 /* The database disk image is malformed */ +#define SQLITE_NOTFOUND 12 /* Unknown opcode in sqlite3_file_control() */ +#define SQLITE_FULL 13 /* Insertion failed because database is full */ +#define SQLITE_CANTOPEN 14 /* Unable to open the database file */ +#define SQLITE_PROTOCOL 15 /* Database lock protocol error */ +#define SQLITE_EMPTY 16 /* Database is empty */ +#define SQLITE_SCHEMA 17 /* The database schema changed */ +#define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */ +#define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */ +#define SQLITE_MISMATCH 20 /* Data type mismatch */ +#define SQLITE_MISUSE 21 /* Library used incorrectly */ +#define SQLITE_NOLFS 22 /* Uses OS features not supported on host */ +#define SQLITE_AUTH 23 /* Authorization denied */ +#define SQLITE_FORMAT 24 /* Auxiliary database format error */ +#define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */ +#define SQLITE_NOTADB 26 /* File opened that is not a database file */ +#define SQLITE_NOTICE 27 /* Notifications from sqlite3_log() */ +#define SQLITE_WARNING 28 /* Warnings from sqlite3_log() */ +#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */ +#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */ +/* end-of-error-codes */ + +/* +** CAPI3REF: Extended Result Codes +** KEYWORDS: {extended result code definitions} +** +** In its default configuration, SQLite API routines return one of 30 integer +** [result codes]. However, experience has shown that many of +** these result codes are too coarse-grained. They do not provide as +** much information about problems as programmers might like. In an effort to +** address this, newer versions of SQLite (version 3.3.8 [dateof:3.3.8] +** and later) include +** support for additional result codes that provide more detailed information +** about errors. These [extended result codes] are enabled or disabled +** on a per database connection basis using the +** [sqlite3_extended_result_codes()] API. Or, the extended code for +** the most recent error can be obtained using +** [sqlite3_extended_errcode()]. +*/ +#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) +#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) +#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) +#define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8)) +#define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8)) +#define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8)) +#define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8)) +#define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8)) +#define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8)) +#define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8)) +#define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8)) +#define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8)) +#define SQLITE_IOERR_ACCESS (SQLITE_IOERR | (13<<8)) +#define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8)) +#define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8)) +#define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8)) +#define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8)) +#define SQLITE_IOERR_SHMOPEN (SQLITE_IOERR | (18<<8)) +#define SQLITE_IOERR_SHMSIZE (SQLITE_IOERR | (19<<8)) +#define SQLITE_IOERR_SHMLOCK (SQLITE_IOERR | (20<<8)) +#define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8)) +#define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8)) +#define SQLITE_IOERR_DELETE_NOENT (SQLITE_IOERR | (23<<8)) +#define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8)) +#define SQLITE_IOERR_GETTEMPPATH (SQLITE_IOERR | (25<<8)) +#define SQLITE_IOERR_CONVPATH (SQLITE_IOERR | (26<<8)) +#define SQLITE_IOERR_VNODE (SQLITE_IOERR | (27<<8)) +#define SQLITE_IOERR_AUTH (SQLITE_IOERR | (28<<8)) +#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) +#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) +#define SQLITE_BUSY_SNAPSHOT (SQLITE_BUSY | (2<<8)) +#define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8)) +#define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8)) +#define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8)) +#define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8)) +#define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8)) +#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8)) +#define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8)) +#define SQLITE_READONLY_ROLLBACK (SQLITE_READONLY | (3<<8)) +#define SQLITE_READONLY_DBMOVED (SQLITE_READONLY | (4<<8)) +#define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8)) +#define SQLITE_CONSTRAINT_CHECK (SQLITE_CONSTRAINT | (1<<8)) +#define SQLITE_CONSTRAINT_COMMITHOOK (SQLITE_CONSTRAINT | (2<<8)) +#define SQLITE_CONSTRAINT_FOREIGNKEY (SQLITE_CONSTRAINT | (3<<8)) +#define SQLITE_CONSTRAINT_FUNCTION (SQLITE_CONSTRAINT | (4<<8)) +#define SQLITE_CONSTRAINT_NOTNULL (SQLITE_CONSTRAINT | (5<<8)) +#define SQLITE_CONSTRAINT_PRIMARYKEY (SQLITE_CONSTRAINT | (6<<8)) +#define SQLITE_CONSTRAINT_TRIGGER (SQLITE_CONSTRAINT | (7<<8)) +#define SQLITE_CONSTRAINT_UNIQUE (SQLITE_CONSTRAINT | (8<<8)) +#define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8)) +#define SQLITE_CONSTRAINT_ROWID (SQLITE_CONSTRAINT |(10<<8)) +#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) +#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) +#define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) +#define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) +#define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8)) + +/* +** CAPI3REF: Flags For File Open Operations +** +** These bit values are intended for use in the +** 3rd parameter to the [sqlite3_open_v2()] interface and +** in the 4th parameter to the [sqlite3_vfs.xOpen] method. +*/ +#define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_CREATE 0x00000004 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_DELETEONCLOSE 0x00000008 /* VFS only */ +#define SQLITE_OPEN_EXCLUSIVE 0x00000010 /* VFS only */ +#define SQLITE_OPEN_AUTOPROXY 0x00000020 /* VFS only */ +#define SQLITE_OPEN_URI 0x00000040 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_MEMORY 0x00000080 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_MAIN_DB 0x00000100 /* VFS only */ +#define SQLITE_OPEN_TEMP_DB 0x00000200 /* VFS only */ +#define SQLITE_OPEN_TRANSIENT_DB 0x00000400 /* VFS only */ +#define SQLITE_OPEN_MAIN_JOURNAL 0x00000800 /* VFS only */ +#define SQLITE_OPEN_TEMP_JOURNAL 0x00001000 /* VFS only */ +#define SQLITE_OPEN_SUBJOURNAL 0x00002000 /* VFS only */ +#define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 /* VFS only */ +#define SQLITE_OPEN_NOMUTEX 0x00008000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_FULLMUTEX 0x00010000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_SHAREDCACHE 0x00020000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_WAL 0x00080000 /* VFS only */ + +/* Reserved: 0x00F00000 */ + +/* +** CAPI3REF: Device Characteristics +** +** The xDeviceCharacteristics method of the [sqlite3_io_methods] +** object returns an integer which is a vector of these +** bit values expressing I/O characteristics of the mass storage +** device that holds the file that the [sqlite3_io_methods] +** refers to. +** +** The SQLITE_IOCAP_ATOMIC property means that all writes of +** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values +** mean that writes of blocks that are nnn bytes in size and +** are aligned to an address which is an integer multiple of +** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means +** that when data is appended to a file, the data is appended +** first then the size of the file is extended, never the other +** way around. The SQLITE_IOCAP_SEQUENTIAL property means that +** information is written to disk in the same order as calls +** to xWrite(). The SQLITE_IOCAP_POWERSAFE_OVERWRITE property means that +** after reboot following a crash or power loss, the only bytes in a +** file that were written at the application level might have changed +** and that adjacent bytes, even bytes within the same sector are +** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN +** flag indicates that a file cannot be deleted when open. The +** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on +** read-only media and cannot be changed even by processes with +** elevated privileges. +*/ +#define SQLITE_IOCAP_ATOMIC 0x00000001 +#define SQLITE_IOCAP_ATOMIC512 0x00000002 +#define SQLITE_IOCAP_ATOMIC1K 0x00000004 +#define SQLITE_IOCAP_ATOMIC2K 0x00000008 +#define SQLITE_IOCAP_ATOMIC4K 0x00000010 +#define SQLITE_IOCAP_ATOMIC8K 0x00000020 +#define SQLITE_IOCAP_ATOMIC16K 0x00000040 +#define SQLITE_IOCAP_ATOMIC32K 0x00000080 +#define SQLITE_IOCAP_ATOMIC64K 0x00000100 +#define SQLITE_IOCAP_SAFE_APPEND 0x00000200 +#define SQLITE_IOCAP_SEQUENTIAL 0x00000400 +#define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800 +#define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 +#define SQLITE_IOCAP_IMMUTABLE 0x00002000 + +/* +** CAPI3REF: File Locking Levels +** +** SQLite uses one of these integer values as the second +** argument to calls it makes to the xLock() and xUnlock() methods +** of an [sqlite3_io_methods] object. +*/ +#define SQLITE_LOCK_NONE 0 +#define SQLITE_LOCK_SHARED 1 +#define SQLITE_LOCK_RESERVED 2 +#define SQLITE_LOCK_PENDING 3 +#define SQLITE_LOCK_EXCLUSIVE 4 + +/* +** CAPI3REF: Synchronization Type Flags +** +** When SQLite invokes the xSync() method of an +** [sqlite3_io_methods] object it uses a combination of +** these integer values as the second argument. +** +** When the SQLITE_SYNC_DATAONLY flag is used, it means that the +** sync operation only needs to flush data to mass storage. Inode +** information need not be flushed. If the lower four bits of the flag +** equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics. +** If the lower four bits equal SQLITE_SYNC_FULL, that means +** to use Mac OS X style fullsync instead of fsync(). +** +** Do not confuse the SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags +** with the [PRAGMA synchronous]=NORMAL and [PRAGMA synchronous]=FULL +** settings. The [synchronous pragma] determines when calls to the +** xSync VFS method occur and applies uniformly across all platforms. +** The SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags determine how +** energetic or rigorous or forceful the sync operations are and +** only make a difference on Mac OSX for the default SQLite code. +** (Third-party VFS implementations might also make the distinction +** between SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL, but among the +** operating systems natively supported by SQLite, only Mac OSX +** cares about the difference.) +*/ +#define SQLITE_SYNC_NORMAL 0x00002 +#define SQLITE_SYNC_FULL 0x00003 +#define SQLITE_SYNC_DATAONLY 0x00010 + +/* +** CAPI3REF: OS Interface Open File Handle +** +** An [sqlite3_file] object represents an open file in the +** [sqlite3_vfs | OS interface layer]. Individual OS interface +** implementations will +** want to subclass this object by appending additional fields +** for their own use. The pMethods entry is a pointer to an +** [sqlite3_io_methods] object that defines methods for performing +** I/O operations on the open file. +*/ +typedef struct sqlite3_file sqlite3_file; +struct sqlite3_file { + const struct sqlite3_io_methods *pMethods; /* Methods for an open file */ +}; + +/* +** CAPI3REF: OS Interface File Virtual Methods Object +** +** Every file opened by the [sqlite3_vfs.xOpen] method populates an +** [sqlite3_file] object (or, more commonly, a subclass of the +** [sqlite3_file] object) with a pointer to an instance of this object. +** This object defines the methods used to perform various operations +** against the open file represented by the [sqlite3_file] object. +** +** If the [sqlite3_vfs.xOpen] method sets the sqlite3_file.pMethods element +** to a non-NULL pointer, then the sqlite3_io_methods.xClose method +** may be invoked even if the [sqlite3_vfs.xOpen] reported that it failed. The +** only way to prevent a call to xClose following a failed [sqlite3_vfs.xOpen] +** is for the [sqlite3_vfs.xOpen] to set the sqlite3_file.pMethods element +** to NULL. +** +** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or +** [SQLITE_SYNC_FULL]. The first choice is the normal fsync(). +** The second choice is a Mac OS X style fullsync. The [SQLITE_SYNC_DATAONLY] +** flag may be ORed in to indicate that only the data of the file +** and not its inode needs to be synced. +** +** The integer values to xLock() and xUnlock() are one of +**
    +**
  • [SQLITE_LOCK_NONE], +**
  • [SQLITE_LOCK_SHARED], +**
  • [SQLITE_LOCK_RESERVED], +**
  • [SQLITE_LOCK_PENDING], or +**
  • [SQLITE_LOCK_EXCLUSIVE]. +**
+** xLock() increases the lock. xUnlock() decreases the lock. +** The xCheckReservedLock() method checks whether any database connection, +** either in this process or in some other process, is holding a RESERVED, +** PENDING, or EXCLUSIVE lock on the file. It returns true +** if such a lock exists and false otherwise. +** +** The xFileControl() method is a generic interface that allows custom +** VFS implementations to directly control an open file using the +** [sqlite3_file_control()] interface. The second "op" argument is an +** integer opcode. The third argument is a generic pointer intended to +** point to a structure that may contain arguments or space in which to +** write return values. Potential uses for xFileControl() might be +** functions to enable blocking locks with timeouts, to change the +** locking strategy (for example to use dot-file locks), to inquire +** about the status of a lock, or to break stale locks. The SQLite +** core reserves all opcodes less than 100 for its own use. +** A [file control opcodes | list of opcodes] less than 100 is available. +** Applications that define a custom xFileControl method should use opcodes +** greater than 100 to avoid conflicts. VFS implementations should +** return [SQLITE_NOTFOUND] for file control opcodes that they do not +** recognize. +** +** The xSectorSize() method returns the sector size of the +** device that underlies the file. The sector size is the +** minimum write that can be performed without disturbing +** other bytes in the file. The xDeviceCharacteristics() +** method returns a bit vector describing behaviors of the +** underlying device: +** +**
    +**
  • [SQLITE_IOCAP_ATOMIC] +**
  • [SQLITE_IOCAP_ATOMIC512] +**
  • [SQLITE_IOCAP_ATOMIC1K] +**
  • [SQLITE_IOCAP_ATOMIC2K] +**
  • [SQLITE_IOCAP_ATOMIC4K] +**
  • [SQLITE_IOCAP_ATOMIC8K] +**
  • [SQLITE_IOCAP_ATOMIC16K] +**
  • [SQLITE_IOCAP_ATOMIC32K] +**
  • [SQLITE_IOCAP_ATOMIC64K] +**
  • [SQLITE_IOCAP_SAFE_APPEND] +**
  • [SQLITE_IOCAP_SEQUENTIAL] +**
  • [SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN] +**
  • [SQLITE_IOCAP_POWERSAFE_OVERWRITE] +**
  • [SQLITE_IOCAP_IMMUTABLE] +**
+** +** The SQLITE_IOCAP_ATOMIC property means that all writes of +** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values +** mean that writes of blocks that are nnn bytes in size and +** are aligned to an address which is an integer multiple of +** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means +** that when data is appended to a file, the data is appended +** first then the size of the file is extended, never the other +** way around. The SQLITE_IOCAP_SEQUENTIAL property means that +** information is written to disk in the same order as calls +** to xWrite(). +** +** If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill +** in the unread portions of the buffer with zeros. A VFS that +** fails to zero-fill short reads might seem to work. However, +** failure to zero-fill short reads will eventually lead to +** database corruption. +*/ +typedef struct sqlite3_io_methods sqlite3_io_methods; +struct sqlite3_io_methods { + int iVersion; + int (*xClose)(sqlite3_file*); + int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); + int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); + int (*xTruncate)(sqlite3_file*, sqlite3_int64 size); + int (*xSync)(sqlite3_file*, int flags); + int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize); + int (*xLock)(sqlite3_file*, int); + int (*xUnlock)(sqlite3_file*, int); + int (*xCheckReservedLock)(sqlite3_file*, int *pResOut); + int (*xFileControl)(sqlite3_file*, int op, void *pArg); + int (*xSectorSize)(sqlite3_file*); + int (*xDeviceCharacteristics)(sqlite3_file*); + /* Methods above are valid for version 1 */ + int (*xShmMap)(sqlite3_file*, int iPg, int pgsz, int, void volatile**); + int (*xShmLock)(sqlite3_file*, int offset, int n, int flags); + void (*xShmBarrier)(sqlite3_file*); + int (*xShmUnmap)(sqlite3_file*, int deleteFlag); + /* Methods above are valid for version 2 */ + int (*xFetch)(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp); + int (*xUnfetch)(sqlite3_file*, sqlite3_int64 iOfst, void *p); + /* Methods above are valid for version 3 */ + /* Additional methods may be added in future releases */ +}; + +/* +** CAPI3REF: Standard File Control Opcodes +** KEYWORDS: {file control opcodes} {file control opcode} +** +** These integer constants are opcodes for the xFileControl method +** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()] +** interface. +** +**
    +**
  • [[SQLITE_FCNTL_LOCKSTATE]] +** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This +** opcode causes the xFileControl method to write the current state of +** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED], +** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE]) +** into an integer that the pArg argument points to. This capability +** is used during testing and is only available when the SQLITE_TEST +** compile-time option is used. +** +**
  • [[SQLITE_FCNTL_SIZE_HINT]] +** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS +** layer a hint of how large the database file will grow to be during the +** current transaction. This hint is not guaranteed to be accurate but it +** is often close. The underlying VFS might choose to preallocate database +** file space based on this hint in order to help writes to the database +** file run faster. +** +**
  • [[SQLITE_FCNTL_CHUNK_SIZE]] +** The [SQLITE_FCNTL_CHUNK_SIZE] opcode is used to request that the VFS +** extends and truncates the database file in chunks of a size specified +** by the user. The fourth argument to [sqlite3_file_control()] should +** point to an integer (type int) containing the new chunk-size to use +** for the nominated database. Allocating database file space in large +** chunks (say 1MB at a time), may reduce file-system fragmentation and +** improve performance on some systems. +** +**
  • [[SQLITE_FCNTL_FILE_POINTER]] +** The [SQLITE_FCNTL_FILE_POINTER] opcode is used to obtain a pointer +** to the [sqlite3_file] object associated with a particular database +** connection. See also [SQLITE_FCNTL_JOURNAL_POINTER]. +** +**
  • [[SQLITE_FCNTL_JOURNAL_POINTER]] +** The [SQLITE_FCNTL_JOURNAL_POINTER] opcode is used to obtain a pointer +** to the [sqlite3_file] object associated with the journal file (either +** the [rollback journal] or the [write-ahead log]) for a particular database +** connection. See also [SQLITE_FCNTL_FILE_POINTER]. +** +**
  • [[SQLITE_FCNTL_SYNC_OMITTED]] +** No longer in use. +** +**
  • [[SQLITE_FCNTL_SYNC]] +** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and +** sent to the VFS immediately before the xSync method is invoked on a +** database file descriptor. Or, if the xSync method is not invoked +** because the user has configured SQLite with +** [PRAGMA synchronous | PRAGMA synchronous=OFF] it is invoked in place +** of the xSync method. In most cases, the pointer argument passed with +** this file-control is NULL. However, if the database file is being synced +** as part of a multi-database commit, the argument points to a nul-terminated +** string containing the transactions master-journal file name. VFSes that +** do not need this signal should silently ignore this opcode. Applications +** should not call [sqlite3_file_control()] with this opcode as doing so may +** disrupt the operation of the specialized VFSes that do require it. +** +**
  • [[SQLITE_FCNTL_COMMIT_PHASETWO]] +** The [SQLITE_FCNTL_COMMIT_PHASETWO] opcode is generated internally by SQLite +** and sent to the VFS after a transaction has been committed immediately +** but before the database is unlocked. VFSes that do not need this signal +** should silently ignore this opcode. Applications should not call +** [sqlite3_file_control()] with this opcode as doing so may disrupt the +** operation of the specialized VFSes that do require it. +** +**
  • [[SQLITE_FCNTL_WIN32_AV_RETRY]] +** ^The [SQLITE_FCNTL_WIN32_AV_RETRY] opcode is used to configure automatic +** retry counts and intervals for certain disk I/O operations for the +** windows [VFS] in order to provide robustness in the presence of +** anti-virus programs. By default, the windows VFS will retry file read, +** file write, and file delete operations up to 10 times, with a delay +** of 25 milliseconds before the first retry and with the delay increasing +** by an additional 25 milliseconds with each subsequent retry. This +** opcode allows these two values (10 retries and 25 milliseconds of delay) +** to be adjusted. The values are changed for all database connections +** within the same process. The argument is a pointer to an array of two +** integers where the first integer i the new retry count and the second +** integer is the delay. If either integer is negative, then the setting +** is not changed but instead the prior value of that setting is written +** into the array entry, allowing the current retry settings to be +** interrogated. The zDbName parameter is ignored. +** +**
  • [[SQLITE_FCNTL_PERSIST_WAL]] +** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the +** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary +** write ahead log and shared memory files used for transaction control +** are automatically deleted when the latest connection to the database +** closes. Setting persistent WAL mode causes those files to persist after +** close. Persisting the files is useful when other processes that do not +** have write permission on the directory containing the database file want +** to read the database file, as the WAL and shared memory files must exist +** in order for the database to be readable. The fourth parameter to +** [sqlite3_file_control()] for this opcode should be a pointer to an integer. +** That integer is 0 to disable persistent WAL mode or 1 to enable persistent +** WAL mode. If the integer is -1, then it is overwritten with the current +** WAL persistence setting. +** +**
  • [[SQLITE_FCNTL_POWERSAFE_OVERWRITE]] +** ^The [SQLITE_FCNTL_POWERSAFE_OVERWRITE] opcode is used to set or query the +** persistent "powersafe-overwrite" or "PSOW" setting. The PSOW setting +** determines the [SQLITE_IOCAP_POWERSAFE_OVERWRITE] bit of the +** xDeviceCharacteristics methods. The fourth parameter to +** [sqlite3_file_control()] for this opcode should be a pointer to an integer. +** That integer is 0 to disable zero-damage mode or 1 to enable zero-damage +** mode. If the integer is -1, then it is overwritten with the current +** zero-damage mode setting. +** +**
  • [[SQLITE_FCNTL_OVERWRITE]] +** ^The [SQLITE_FCNTL_OVERWRITE] opcode is invoked by SQLite after opening +** a write transaction to indicate that, unless it is rolled back for some +** reason, the entire database file will be overwritten by the current +** transaction. This is used by VACUUM operations. +** +**
  • [[SQLITE_FCNTL_VFSNAME]] +** ^The [SQLITE_FCNTL_VFSNAME] opcode can be used to obtain the names of +** all [VFSes] in the VFS stack. The names are of all VFS shims and the +** final bottom-level VFS are written into memory obtained from +** [sqlite3_malloc()] and the result is stored in the char* variable +** that the fourth parameter of [sqlite3_file_control()] points to. +** The caller is responsible for freeing the memory when done. As with +** all file-control actions, there is no guarantee that this will actually +** do anything. Callers should initialize the char* variable to a NULL +** pointer in case this file-control is not implemented. This file-control +** is intended for diagnostic use only. +** +**
  • [[SQLITE_FCNTL_VFS_POINTER]] +** ^The [SQLITE_FCNTL_VFS_POINTER] opcode finds a pointer to the top-level +** [VFSes] currently in use. ^(The argument X in +** sqlite3_file_control(db,SQLITE_FCNTL_VFS_POINTER,X) must be +** of type "[sqlite3_vfs] **". This opcodes will set *X +** to a pointer to the top-level VFS.)^ +** ^When there are multiple VFS shims in the stack, this opcode finds the +** upper-most shim only. +** +**
  • [[SQLITE_FCNTL_PRAGMA]] +** ^Whenever a [PRAGMA] statement is parsed, an [SQLITE_FCNTL_PRAGMA] +** file control is sent to the open [sqlite3_file] object corresponding +** to the database file to which the pragma statement refers. ^The argument +** to the [SQLITE_FCNTL_PRAGMA] file control is an array of +** pointers to strings (char**) in which the second element of the array +** is the name of the pragma and the third element is the argument to the +** pragma or NULL if the pragma has no argument. ^The handler for an +** [SQLITE_FCNTL_PRAGMA] file control can optionally make the first element +** of the char** argument point to a string obtained from [sqlite3_mprintf()] +** or the equivalent and that string will become the result of the pragma or +** the error message if the pragma fails. ^If the +** [SQLITE_FCNTL_PRAGMA] file control returns [SQLITE_NOTFOUND], then normal +** [PRAGMA] processing continues. ^If the [SQLITE_FCNTL_PRAGMA] +** file control returns [SQLITE_OK], then the parser assumes that the +** VFS has handled the PRAGMA itself and the parser generates a no-op +** prepared statement if result string is NULL, or that returns a copy +** of the result string if the string is non-NULL. +** ^If the [SQLITE_FCNTL_PRAGMA] file control returns +** any result code other than [SQLITE_OK] or [SQLITE_NOTFOUND], that means +** that the VFS encountered an error while handling the [PRAGMA] and the +** compilation of the PRAGMA fails with an error. ^The [SQLITE_FCNTL_PRAGMA] +** file control occurs at the beginning of pragma statement analysis and so +** it is able to override built-in [PRAGMA] statements. +** +**
  • [[SQLITE_FCNTL_BUSYHANDLER]] +** ^The [SQLITE_FCNTL_BUSYHANDLER] +** file-control may be invoked by SQLite on the database file handle +** shortly after it is opened in order to provide a custom VFS with access +** to the connections busy-handler callback. The argument is of type (void **) +** - an array of two (void *) values. The first (void *) actually points +** to a function of type (int (*)(void *)). In order to invoke the connections +** busy-handler, this function should be invoked with the second (void *) in +** the array as the only argument. If it returns non-zero, then the operation +** should be retried. If it returns zero, the custom VFS should abandon the +** current operation. +** +**
  • [[SQLITE_FCNTL_TEMPFILENAME]] +** ^Application can invoke the [SQLITE_FCNTL_TEMPFILENAME] file-control +** to have SQLite generate a +** temporary filename using the same algorithm that is followed to generate +** temporary filenames for TEMP tables and other internal uses. The +** argument should be a char** which will be filled with the filename +** written into memory obtained from [sqlite3_malloc()]. The caller should +** invoke [sqlite3_free()] on the result to avoid a memory leak. +** +**
  • [[SQLITE_FCNTL_MMAP_SIZE]] +** The [SQLITE_FCNTL_MMAP_SIZE] file control is used to query or set the +** maximum number of bytes that will be used for memory-mapped I/O. +** The argument is a pointer to a value of type sqlite3_int64 that +** is an advisory maximum number of bytes in the file to memory map. The +** pointer is overwritten with the old value. The limit is not changed if +** the value originally pointed to is negative, and so the current limit +** can be queried by passing in a pointer to a negative number. This +** file-control is used internally to implement [PRAGMA mmap_size]. +** +**
  • [[SQLITE_FCNTL_TRACE]] +** The [SQLITE_FCNTL_TRACE] file control provides advisory information +** to the VFS about what the higher layers of the SQLite stack are doing. +** This file control is used by some VFS activity tracing [shims]. +** The argument is a zero-terminated string. Higher layers in the +** SQLite stack may generate instances of this file control if +** the [SQLITE_USE_FCNTL_TRACE] compile-time option is enabled. +** +**
  • [[SQLITE_FCNTL_HAS_MOVED]] +** The [SQLITE_FCNTL_HAS_MOVED] file control interprets its argument as a +** pointer to an integer and it writes a boolean into that integer depending +** on whether or not the file has been renamed, moved, or deleted since it +** was first opened. +** +**
  • [[SQLITE_FCNTL_WIN32_GET_HANDLE]] +** The [SQLITE_FCNTL_WIN32_GET_HANDLE] opcode can be used to obtain the +** underlying native file handle associated with a file handle. This file +** control interprets its argument as a pointer to a native file handle and +** writes the resulting value there. +** +**
  • [[SQLITE_FCNTL_WIN32_SET_HANDLE]] +** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This +** opcode causes the xFileControl method to swap the file handle with the one +** pointed to by the pArg argument. This capability is used during testing +** and only needs to be supported when SQLITE_TEST is defined. +** +**
  • [[SQLITE_FCNTL_WAL_BLOCK]] +** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might +** be advantageous to block on the next WAL lock if the lock is not immediately +** available. The WAL subsystem issues this signal during rare +** circumstances in order to fix a problem with priority inversion. +** Applications should not use this file-control. +** +**
  • [[SQLITE_FCNTL_ZIPVFS]] +** The [SQLITE_FCNTL_ZIPVFS] opcode is implemented by zipvfs only. All other +** VFS should return SQLITE_NOTFOUND for this opcode. +** +**
  • [[SQLITE_FCNTL_RBU]] +** The [SQLITE_FCNTL_RBU] opcode is implemented by the special VFS used by +** the RBU extension only. All other VFS should return SQLITE_NOTFOUND for +** this opcode. +**
+*/ +#define SQLITE_FCNTL_LOCKSTATE 1 +#define SQLITE_FCNTL_GET_LOCKPROXYFILE 2 +#define SQLITE_FCNTL_SET_LOCKPROXYFILE 3 +#define SQLITE_FCNTL_LAST_ERRNO 4 +#define SQLITE_FCNTL_SIZE_HINT 5 +#define SQLITE_FCNTL_CHUNK_SIZE 6 +#define SQLITE_FCNTL_FILE_POINTER 7 +#define SQLITE_FCNTL_SYNC_OMITTED 8 +#define SQLITE_FCNTL_WIN32_AV_RETRY 9 +#define SQLITE_FCNTL_PERSIST_WAL 10 +#define SQLITE_FCNTL_OVERWRITE 11 +#define SQLITE_FCNTL_VFSNAME 12 +#define SQLITE_FCNTL_POWERSAFE_OVERWRITE 13 +#define SQLITE_FCNTL_PRAGMA 14 +#define SQLITE_FCNTL_BUSYHANDLER 15 +#define SQLITE_FCNTL_TEMPFILENAME 16 +#define SQLITE_FCNTL_MMAP_SIZE 18 +#define SQLITE_FCNTL_TRACE 19 +#define SQLITE_FCNTL_HAS_MOVED 20 +#define SQLITE_FCNTL_SYNC 21 +#define SQLITE_FCNTL_COMMIT_PHASETWO 22 +#define SQLITE_FCNTL_WIN32_SET_HANDLE 23 +#define SQLITE_FCNTL_WAL_BLOCK 24 +#define SQLITE_FCNTL_ZIPVFS 25 +#define SQLITE_FCNTL_RBU 26 +#define SQLITE_FCNTL_VFS_POINTER 27 +#define SQLITE_FCNTL_JOURNAL_POINTER 28 +#define SQLITE_FCNTL_WIN32_GET_HANDLE 29 +#define SQLITE_FCNTL_PDB 30 + +/* deprecated names */ +#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE +#define SQLITE_SET_LOCKPROXYFILE SQLITE_FCNTL_SET_LOCKPROXYFILE +#define SQLITE_LAST_ERRNO SQLITE_FCNTL_LAST_ERRNO + + +/* +** CAPI3REF: Mutex Handle +** +** The mutex module within SQLite defines [sqlite3_mutex] to be an +** abstract type for a mutex object. The SQLite core never looks +** at the internal representation of an [sqlite3_mutex]. It only +** deals with pointers to the [sqlite3_mutex] object. +** +** Mutexes are created using [sqlite3_mutex_alloc()]. +*/ +typedef struct sqlite3_mutex sqlite3_mutex; + +/* +** CAPI3REF: Loadable Extension Thunk +** +** A pointer to the opaque sqlite3_api_routines structure is passed as +** the third parameter to entry points of [loadable extensions]. This +** structure must be typedefed in order to work around compiler warnings +** on some platforms. +*/ +typedef struct sqlite3_api_routines sqlite3_api_routines; + +/* +** CAPI3REF: OS Interface Object +** +** An instance of the sqlite3_vfs object defines the interface between +** the SQLite core and the underlying operating system. The "vfs" +** in the name of the object stands for "virtual file system". See +** the [VFS | VFS documentation] for further information. +** +** The value of the iVersion field is initially 1 but may be larger in +** future versions of SQLite. Additional fields may be appended to this +** object when the iVersion value is increased. Note that the structure +** of the sqlite3_vfs object changes in the transaction between +** SQLite version 3.5.9 and 3.6.0 and yet the iVersion field was not +** modified. +** +** The szOsFile field is the size of the subclassed [sqlite3_file] +** structure used by this VFS. mxPathname is the maximum length of +** a pathname in this VFS. +** +** Registered sqlite3_vfs objects are kept on a linked list formed by +** the pNext pointer. The [sqlite3_vfs_register()] +** and [sqlite3_vfs_unregister()] interfaces manage this list +** in a thread-safe way. The [sqlite3_vfs_find()] interface +** searches the list. Neither the application code nor the VFS +** implementation should use the pNext pointer. +** +** The pNext field is the only field in the sqlite3_vfs +** structure that SQLite will ever modify. SQLite will only access +** or modify this field while holding a particular static mutex. +** The application should never modify anything within the sqlite3_vfs +** object once the object has been registered. +** +** The zName field holds the name of the VFS module. The name must +** be unique across all VFS modules. +** +** [[sqlite3_vfs.xOpen]] +** ^SQLite guarantees that the zFilename parameter to xOpen +** is either a NULL pointer or string obtained +** from xFullPathname() with an optional suffix added. +** ^If a suffix is added to the zFilename parameter, it will +** consist of a single "-" character followed by no more than +** 11 alphanumeric and/or "-" characters. +** ^SQLite further guarantees that +** the string will be valid and unchanged until xClose() is +** called. Because of the previous sentence, +** the [sqlite3_file] can safely store a pointer to the +** filename if it needs to remember the filename for some reason. +** If the zFilename parameter to xOpen is a NULL pointer then xOpen +** must invent its own temporary name for the file. ^Whenever the +** xFilename parameter is NULL it will also be the case that the +** flags parameter will include [SQLITE_OPEN_DELETEONCLOSE]. +** +** The flags argument to xOpen() includes all bits set in +** the flags argument to [sqlite3_open_v2()]. Or if [sqlite3_open()] +** or [sqlite3_open16()] is used, then flags includes at least +** [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]. +** If xOpen() opens a file read-only then it sets *pOutFlags to +** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be set. +** +** ^(SQLite will also add one of the following flags to the xOpen() +** call, depending on the object being opened: +** +**
    +**
  • [SQLITE_OPEN_MAIN_DB] +**
  • [SQLITE_OPEN_MAIN_JOURNAL] +**
  • [SQLITE_OPEN_TEMP_DB] +**
  • [SQLITE_OPEN_TEMP_JOURNAL] +**
  • [SQLITE_OPEN_TRANSIENT_DB] +**
  • [SQLITE_OPEN_SUBJOURNAL] +**
  • [SQLITE_OPEN_MASTER_JOURNAL] +**
  • [SQLITE_OPEN_WAL] +**
)^ +** +** The file I/O implementation can use the object type flags to +** change the way it deals with files. For example, an application +** that does not care about crash recovery or rollback might make +** the open of a journal file a no-op. Writes to this journal would +** also be no-ops, and any attempt to read the journal would return +** SQLITE_IOERR. Or the implementation might recognize that a database +** file will be doing page-aligned sector reads and writes in a random +** order and set up its I/O subsystem accordingly. +** +** SQLite might also add one of the following flags to the xOpen method: +** +**
    +**
  • [SQLITE_OPEN_DELETEONCLOSE] +**
  • [SQLITE_OPEN_EXCLUSIVE] +**
+** +** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be +** deleted when it is closed. ^The [SQLITE_OPEN_DELETEONCLOSE] +** will be set for TEMP databases and their journals, transient +** databases, and subjournals. +** +** ^The [SQLITE_OPEN_EXCLUSIVE] flag is always used in conjunction +** with the [SQLITE_OPEN_CREATE] flag, which are both directly +** analogous to the O_EXCL and O_CREAT flags of the POSIX open() +** API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the +** SQLITE_OPEN_CREATE, is used to indicate that file should always +** be created, and that it is an error if it already exists. +** It is not used to indicate the file should be opened +** for exclusive access. +** +** ^At least szOsFile bytes of memory are allocated by SQLite +** to hold the [sqlite3_file] structure passed as the third +** argument to xOpen. The xOpen method does not have to +** allocate the structure; it should just fill it in. Note that +** the xOpen method must set the sqlite3_file.pMethods to either +** a valid [sqlite3_io_methods] object or to NULL. xOpen must do +** this even if the open fails. SQLite expects that the sqlite3_file.pMethods +** element will be valid after xOpen returns regardless of the success +** or failure of the xOpen call. +** +** [[sqlite3_vfs.xAccess]] +** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS] +** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to +** test whether a file is readable and writable, or [SQLITE_ACCESS_READ] +** to test whether a file is at least readable. The file can be a +** directory. +** +** ^SQLite will always allocate at least mxPathname+1 bytes for the +** output buffer xFullPathname. The exact size of the output buffer +** is also passed as a parameter to both methods. If the output buffer +** is not large enough, [SQLITE_CANTOPEN] should be returned. Since this is +** handled as a fatal error by SQLite, vfs implementations should endeavor +** to prevent this by setting mxPathname to a sufficiently large value. +** +** The xRandomness(), xSleep(), xCurrentTime(), and xCurrentTimeInt64() +** interfaces are not strictly a part of the filesystem, but they are +** included in the VFS structure for completeness. +** The xRandomness() function attempts to return nBytes bytes +** of good-quality randomness into zOut. The return value is +** the actual number of bytes of randomness obtained. +** The xSleep() method causes the calling thread to sleep for at +** least the number of microseconds given. ^The xCurrentTime() +** method returns a Julian Day Number for the current date and time as +** a floating point value. +** ^The xCurrentTimeInt64() method returns, as an integer, the Julian +** Day Number multiplied by 86400000 (the number of milliseconds in +** a 24-hour day). +** ^SQLite will use the xCurrentTimeInt64() method to get the current +** date and time if that method is available (if iVersion is 2 or +** greater and the function pointer is not NULL) and will fall back +** to xCurrentTime() if xCurrentTimeInt64() is unavailable. +** +** ^The xSetSystemCall(), xGetSystemCall(), and xNestSystemCall() interfaces +** are not used by the SQLite core. These optional interfaces are provided +** by some VFSes to facilitate testing of the VFS code. By overriding +** system calls with functions under its control, a test program can +** simulate faults and error conditions that would otherwise be difficult +** or impossible to induce. The set of system calls that can be overridden +** varies from one VFS to another, and from one version of the same VFS to the +** next. Applications that use these interfaces must be prepared for any +** or all of these interfaces to be NULL or for their behavior to change +** from one release to the next. Applications must not attempt to access +** any of these methods if the iVersion of the VFS is less than 3. +*/ +typedef struct sqlite3_vfs sqlite3_vfs; +typedef void (*sqlite3_syscall_ptr)(void); +struct sqlite3_vfs { + int iVersion; /* Structure version number (currently 3) */ + int szOsFile; /* Size of subclassed sqlite3_file */ + int mxPathname; /* Maximum file pathname length */ + sqlite3_vfs *pNext; /* Next registered VFS */ + const char *zName; /* Name of this virtual file system */ + void *pAppData; /* Pointer to application-specific data */ + int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, + int flags, int *pOutFlags); + int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); + int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut); + int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut); + void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename); + void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg); + void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void); + void (*xDlClose)(sqlite3_vfs*, void*); + int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut); + int (*xSleep)(sqlite3_vfs*, int microseconds); + int (*xCurrentTime)(sqlite3_vfs*, double*); + int (*xGetLastError)(sqlite3_vfs*, int, char *); + /* + ** The methods above are in version 1 of the sqlite_vfs object + ** definition. Those that follow are added in version 2 or later + */ + int (*xCurrentTimeInt64)(sqlite3_vfs*, sqlite3_int64*); + /* + ** The methods above are in versions 1 and 2 of the sqlite_vfs object. + ** Those below are for version 3 and greater. + */ + int (*xSetSystemCall)(sqlite3_vfs*, const char *zName, sqlite3_syscall_ptr); + sqlite3_syscall_ptr (*xGetSystemCall)(sqlite3_vfs*, const char *zName); + const char *(*xNextSystemCall)(sqlite3_vfs*, const char *zName); + /* + ** The methods above are in versions 1 through 3 of the sqlite_vfs object. + ** New fields may be appended in future versions. The iVersion + ** value will increment whenever this happens. + */ +}; + +/* +** CAPI3REF: Flags for the xAccess VFS method +** +** These integer constants can be used as the third parameter to +** the xAccess method of an [sqlite3_vfs] object. They determine +** what kind of permissions the xAccess method is looking for. +** With SQLITE_ACCESS_EXISTS, the xAccess method +** simply checks whether the file exists. +** With SQLITE_ACCESS_READWRITE, the xAccess method +** checks whether the named directory is both readable and writable +** (in other words, if files can be added, removed, and renamed within +** the directory). +** The SQLITE_ACCESS_READWRITE constant is currently used only by the +** [temp_store_directory pragma], though this could change in a future +** release of SQLite. +** With SQLITE_ACCESS_READ, the xAccess method +** checks whether the file is readable. The SQLITE_ACCESS_READ constant is +** currently unused, though it might be used in a future release of +** SQLite. +*/ +#define SQLITE_ACCESS_EXISTS 0 +#define SQLITE_ACCESS_READWRITE 1 /* Used by PRAGMA temp_store_directory */ +#define SQLITE_ACCESS_READ 2 /* Unused */ + +/* +** CAPI3REF: Flags for the xShmLock VFS method +** +** These integer constants define the various locking operations +** allowed by the xShmLock method of [sqlite3_io_methods]. The +** following are the only legal combinations of flags to the +** xShmLock method: +** +**
    +**
  • SQLITE_SHM_LOCK | SQLITE_SHM_SHARED +**
  • SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE +**
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED +**
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE +**
+** +** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as +** was given on the corresponding lock. +** +** The xShmLock method can transition between unlocked and SHARED or +** between unlocked and EXCLUSIVE. It cannot transition between SHARED +** and EXCLUSIVE. +*/ +#define SQLITE_SHM_UNLOCK 1 +#define SQLITE_SHM_LOCK 2 +#define SQLITE_SHM_SHARED 4 +#define SQLITE_SHM_EXCLUSIVE 8 + +/* +** CAPI3REF: Maximum xShmLock index +** +** The xShmLock method on [sqlite3_io_methods] may use values +** between 0 and this upper bound as its "offset" argument. +** The SQLite core will never attempt to acquire or release a +** lock outside of this range +*/ +#define SQLITE_SHM_NLOCK 8 + + +/* +** CAPI3REF: Initialize The SQLite Library +** +** ^The sqlite3_initialize() routine initializes the +** SQLite library. ^The sqlite3_shutdown() routine +** deallocates any resources that were allocated by sqlite3_initialize(). +** These routines are designed to aid in process initialization and +** shutdown on embedded systems. Workstation applications using +** SQLite normally do not need to invoke either of these routines. +** +** A call to sqlite3_initialize() is an "effective" call if it is +** the first time sqlite3_initialize() is invoked during the lifetime of +** the process, or if it is the first time sqlite3_initialize() is invoked +** following a call to sqlite3_shutdown(). ^(Only an effective call +** of sqlite3_initialize() does any initialization. All other calls +** are harmless no-ops.)^ +** +** A call to sqlite3_shutdown() is an "effective" call if it is the first +** call to sqlite3_shutdown() since the last sqlite3_initialize(). ^(Only +** an effective call to sqlite3_shutdown() does any deinitialization. +** All other valid calls to sqlite3_shutdown() are harmless no-ops.)^ +** +** The sqlite3_initialize() interface is threadsafe, but sqlite3_shutdown() +** is not. The sqlite3_shutdown() interface must only be called from a +** single thread. All open [database connections] must be closed and all +** other SQLite resources must be deallocated prior to invoking +** sqlite3_shutdown(). +** +** Among other things, ^sqlite3_initialize() will invoke +** sqlite3_os_init(). Similarly, ^sqlite3_shutdown() +** will invoke sqlite3_os_end(). +** +** ^The sqlite3_initialize() routine returns [SQLITE_OK] on success. +** ^If for some reason, sqlite3_initialize() is unable to initialize +** the library (perhaps it is unable to allocate a needed resource such +** as a mutex) it returns an [error code] other than [SQLITE_OK]. +** +** ^The sqlite3_initialize() routine is called internally by many other +** SQLite interfaces so that an application usually does not need to +** invoke sqlite3_initialize() directly. For example, [sqlite3_open()] +** calls sqlite3_initialize() so the SQLite library will be automatically +** initialized when [sqlite3_open()] is called if it has not be initialized +** already. ^However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT] +** compile-time option, then the automatic calls to sqlite3_initialize() +** are omitted and the application must call sqlite3_initialize() directly +** prior to using any other SQLite interface. For maximum portability, +** it is recommended that applications always invoke sqlite3_initialize() +** directly prior to using any other SQLite interface. Future releases +** of SQLite may require this. In other words, the behavior exhibited +** when SQLite is compiled with [SQLITE_OMIT_AUTOINIT] might become the +** default behavior in some future release of SQLite. +** +** The sqlite3_os_init() routine does operating-system specific +** initialization of the SQLite library. The sqlite3_os_end() +** routine undoes the effect of sqlite3_os_init(). Typical tasks +** performed by these routines include allocation or deallocation +** of static resources, initialization of global variables, +** setting up a default [sqlite3_vfs] module, or setting up +** a default configuration using [sqlite3_config()]. +** +** The application should never invoke either sqlite3_os_init() +** or sqlite3_os_end() directly. The application should only invoke +** sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init() +** interface is called automatically by sqlite3_initialize() and +** sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate +** implementations for sqlite3_os_init() and sqlite3_os_end() +** are built into SQLite when it is compiled for Unix, Windows, or OS/2. +** When [custom builds | built for other platforms] +** (using the [SQLITE_OS_OTHER=1] compile-time +** option) the application must supply a suitable implementation for +** sqlite3_os_init() and sqlite3_os_end(). An application-supplied +** implementation of sqlite3_os_init() or sqlite3_os_end() +** must return [SQLITE_OK] on success and some other [error code] upon +** failure. +*/ +SQLITE_API int sqlite3_initialize(void); +SQLITE_API int sqlite3_shutdown(void); +SQLITE_API int sqlite3_os_init(void); +SQLITE_API int sqlite3_os_end(void); + +/* +** CAPI3REF: Configuring The SQLite Library +** +** The sqlite3_config() interface is used to make global configuration +** changes to SQLite in order to tune SQLite to the specific needs of +** the application. The default configuration is recommended for most +** applications and so this routine is usually not necessary. It is +** provided to support rare applications with unusual needs. +** +** The sqlite3_config() interface is not threadsafe. The application +** must ensure that no other SQLite interfaces are invoked by other +** threads while sqlite3_config() is running. +** +** The sqlite3_config() interface +** may only be invoked prior to library initialization using +** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. +** ^If sqlite3_config() is called after [sqlite3_initialize()] and before +** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. +** Note, however, that ^sqlite3_config() can be called as part of the +** implementation of an application-defined [sqlite3_os_init()]. +** +** The first argument to sqlite3_config() is an integer +** [configuration option] that determines +** what property of SQLite is to be configured. Subsequent arguments +** vary depending on the [configuration option] +** in the first argument. +** +** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. +** ^If the option is unknown or SQLite is unable to set the option +** then this routine returns a non-zero [error code]. +*/ +SQLITE_API int sqlite3_config(int, ...); + +/* +** CAPI3REF: Configure database connections +** METHOD: sqlite3 +** +** The sqlite3_db_config() interface is used to make configuration +** changes to a [database connection]. The interface is similar to +** [sqlite3_config()] except that the changes apply to a single +** [database connection] (specified in the first argument). +** +** The second argument to sqlite3_db_config(D,V,...) is the +** [SQLITE_DBCONFIG_LOOKASIDE | configuration verb] - an integer code +** that indicates what aspect of the [database connection] is being configured. +** Subsequent arguments vary depending on the configuration verb. +** +** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if +** the call is considered successful. +*/ +SQLITE_API int sqlite3_db_config(sqlite3*, int op, ...); + +/* +** CAPI3REF: Memory Allocation Routines +** +** An instance of this object defines the interface between SQLite +** and low-level memory allocation routines. +** +** This object is used in only one place in the SQLite interface. +** A pointer to an instance of this object is the argument to +** [sqlite3_config()] when the configuration option is +** [SQLITE_CONFIG_MALLOC] or [SQLITE_CONFIG_GETMALLOC]. +** By creating an instance of this object +** and passing it to [sqlite3_config]([SQLITE_CONFIG_MALLOC]) +** during configuration, an application can specify an alternative +** memory allocation subsystem for SQLite to use for all of its +** dynamic memory needs. +** +** Note that SQLite comes with several [built-in memory allocators] +** that are perfectly adequate for the overwhelming majority of applications +** and that this object is only useful to a tiny minority of applications +** with specialized memory allocation requirements. This object is +** also used during testing of SQLite in order to specify an alternative +** memory allocator that simulates memory out-of-memory conditions in +** order to verify that SQLite recovers gracefully from such +** conditions. +** +** The xMalloc, xRealloc, and xFree methods must work like the +** malloc(), realloc() and free() functions from the standard C library. +** ^SQLite guarantees that the second argument to +** xRealloc is always a value returned by a prior call to xRoundup. +** +** xSize should return the allocated size of a memory allocation +** previously obtained from xMalloc or xRealloc. The allocated size +** is always at least as big as the requested size but may be larger. +** +** The xRoundup method returns what would be the allocated size of +** a memory allocation given a particular requested size. Most memory +** allocators round up memory allocations at least to the next multiple +** of 8. Some allocators round up to a larger multiple or to a power of 2. +** Every memory allocation request coming in through [sqlite3_malloc()] +** or [sqlite3_realloc()] first calls xRoundup. If xRoundup returns 0, +** that causes the corresponding memory allocation to fail. +** +** The xInit method initializes the memory allocator. For example, +** it might allocate any require mutexes or initialize internal data +** structures. The xShutdown method is invoked (indirectly) by +** [sqlite3_shutdown()] and should deallocate any resources acquired +** by xInit. The pAppData pointer is used as the only parameter to +** xInit and xShutdown. +** +** SQLite holds the [SQLITE_MUTEX_STATIC_MASTER] mutex when it invokes +** the xInit method, so the xInit method need not be threadsafe. The +** xShutdown method is only called from [sqlite3_shutdown()] so it does +** not need to be threadsafe either. For all other methods, SQLite +** holds the [SQLITE_MUTEX_STATIC_MEM] mutex as long as the +** [SQLITE_CONFIG_MEMSTATUS] configuration option is turned on (which +** it is by default) and so the methods are automatically serialized. +** However, if [SQLITE_CONFIG_MEMSTATUS] is disabled, then the other +** methods must be threadsafe or else make their own arrangements for +** serialization. +** +** SQLite will never invoke xInit() more than once without an intervening +** call to xShutdown(). +*/ +typedef struct sqlite3_mem_methods sqlite3_mem_methods; +struct sqlite3_mem_methods { + void *(*xMalloc)(int); /* Memory allocation function */ + void (*xFree)(void*); /* Free a prior allocation */ + void *(*xRealloc)(void*,int); /* Resize an allocation */ + int (*xSize)(void*); /* Return the size of an allocation */ + int (*xRoundup)(int); /* Round up request size to allocation size */ + int (*xInit)(void*); /* Initialize the memory allocator */ + void (*xShutdown)(void*); /* Deinitialize the memory allocator */ + void *pAppData; /* Argument to xInit() and xShutdown() */ +}; + +/* +** CAPI3REF: Configuration Options +** KEYWORDS: {configuration option} +** +** These constants are the available integer configuration options that +** can be passed as the first argument to the [sqlite3_config()] interface. +** +** New configuration options may be added in future releases of SQLite. +** Existing configuration options might be discontinued. Applications +** should check the return code from [sqlite3_config()] to make sure that +** the call worked. The [sqlite3_config()] interface will return a +** non-zero [error code] if a discontinued or unsupported configuration option +** is invoked. +** +**
+** [[SQLITE_CONFIG_SINGLETHREAD]]
SQLITE_CONFIG_SINGLETHREAD
+**
There are no arguments to this option. ^This option sets the +** [threading mode] to Single-thread. In other words, it disables +** all mutexing and puts SQLite into a mode where it can only be used +** by a single thread. ^If SQLite is compiled with +** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then +** it is not possible to change the [threading mode] from its default +** value of Single-thread and so [sqlite3_config()] will return +** [SQLITE_ERROR] if called with the SQLITE_CONFIG_SINGLETHREAD +** configuration option.
+** +** [[SQLITE_CONFIG_MULTITHREAD]]
SQLITE_CONFIG_MULTITHREAD
+**
There are no arguments to this option. ^This option sets the +** [threading mode] to Multi-thread. In other words, it disables +** mutexing on [database connection] and [prepared statement] objects. +** The application is responsible for serializing access to +** [database connections] and [prepared statements]. But other mutexes +** are enabled so that SQLite will be safe to use in a multi-threaded +** environment as long as no two threads attempt to use the same +** [database connection] at the same time. ^If SQLite is compiled with +** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then +** it is not possible to set the Multi-thread [threading mode] and +** [sqlite3_config()] will return [SQLITE_ERROR] if called with the +** SQLITE_CONFIG_MULTITHREAD configuration option.
+** +** [[SQLITE_CONFIG_SERIALIZED]]
SQLITE_CONFIG_SERIALIZED
+**
There are no arguments to this option. ^This option sets the +** [threading mode] to Serialized. In other words, this option enables +** all mutexes including the recursive +** mutexes on [database connection] and [prepared statement] objects. +** In this mode (which is the default when SQLite is compiled with +** [SQLITE_THREADSAFE=1]) the SQLite library will itself serialize access +** to [database connections] and [prepared statements] so that the +** application is free to use the same [database connection] or the +** same [prepared statement] in different threads at the same time. +** ^If SQLite is compiled with +** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then +** it is not possible to set the Serialized [threading mode] and +** [sqlite3_config()] will return [SQLITE_ERROR] if called with the +** SQLITE_CONFIG_SERIALIZED configuration option.
+** +** [[SQLITE_CONFIG_MALLOC]]
SQLITE_CONFIG_MALLOC
+**
^(The SQLITE_CONFIG_MALLOC option takes a single argument which is +** a pointer to an instance of the [sqlite3_mem_methods] structure. +** The argument specifies +** alternative low-level memory allocation routines to be used in place of +** the memory allocation routines built into SQLite.)^ ^SQLite makes +** its own private copy of the content of the [sqlite3_mem_methods] structure +** before the [sqlite3_config()] call returns.
+** +** [[SQLITE_CONFIG_GETMALLOC]]
SQLITE_CONFIG_GETMALLOC
+**
^(The SQLITE_CONFIG_GETMALLOC option takes a single argument which +** is a pointer to an instance of the [sqlite3_mem_methods] structure. +** The [sqlite3_mem_methods] +** structure is filled with the currently defined memory allocation routines.)^ +** This option can be used to overload the default memory allocation +** routines with a wrapper that simulations memory allocation failure or +** tracks memory usage, for example.
+** +** [[SQLITE_CONFIG_MEMSTATUS]]
SQLITE_CONFIG_MEMSTATUS
+**
^The SQLITE_CONFIG_MEMSTATUS option takes single argument of type int, +** interpreted as a boolean, which enables or disables the collection of +** memory allocation statistics. ^(When memory allocation statistics are +** disabled, the following SQLite interfaces become non-operational: +**
    +**
  • [sqlite3_memory_used()] +**
  • [sqlite3_memory_highwater()] +**
  • [sqlite3_soft_heap_limit64()] +**
  • [sqlite3_status64()] +**
)^ +** ^Memory allocation statistics are enabled by default unless SQLite is +** compiled with [SQLITE_DEFAULT_MEMSTATUS]=0 in which case memory +** allocation statistics are disabled by default. +**
+** +** [[SQLITE_CONFIG_SCRATCH]]
SQLITE_CONFIG_SCRATCH
+**
^The SQLITE_CONFIG_SCRATCH option specifies a static memory buffer +** that SQLite can use for scratch memory. ^(There are three arguments +** to SQLITE_CONFIG_SCRATCH: A pointer an 8-byte +** aligned memory buffer from which the scratch allocations will be +** drawn, the size of each scratch allocation (sz), +** and the maximum number of scratch allocations (N).)^ +** The first argument must be a pointer to an 8-byte aligned buffer +** of at least sz*N bytes of memory. +** ^SQLite will not use more than one scratch buffers per thread. +** ^SQLite will never request a scratch buffer that is more than 6 +** times the database page size. +** ^If SQLite needs needs additional +** scratch memory beyond what is provided by this configuration option, then +** [sqlite3_malloc()] will be used to obtain the memory needed.

+** ^When the application provides any amount of scratch memory using +** SQLITE_CONFIG_SCRATCH, SQLite avoids unnecessary large +** [sqlite3_malloc|heap allocations]. +** This can help [Robson proof|prevent memory allocation failures] due to heap +** fragmentation in low-memory embedded systems. +**

+** +** [[SQLITE_CONFIG_PAGECACHE]]
SQLITE_CONFIG_PAGECACHE
+**
^The SQLITE_CONFIG_PAGECACHE option specifies a memory pool +** that SQLite can use for the database page cache with the default page +** cache implementation. +** This configuration option is a no-op if an application-define page +** cache implementation is loaded using the [SQLITE_CONFIG_PCACHE2]. +** ^There are three arguments to SQLITE_CONFIG_PAGECACHE: A pointer to +** 8-byte aligned memory (pMem), the size of each page cache line (sz), +** and the number of cache lines (N). +** The sz argument should be the size of the largest database page +** (a power of two between 512 and 65536) plus some extra bytes for each +** page header. ^The number of extra bytes needed by the page header +** can be determined using [SQLITE_CONFIG_PCACHE_HDRSZ]. +** ^It is harmless, apart from the wasted memory, +** for the sz parameter to be larger than necessary. The pMem +** argument must be either a NULL pointer or a pointer to an 8-byte +** aligned block of memory of at least sz*N bytes, otherwise +** subsequent behavior is undefined. +** ^When pMem is not NULL, SQLite will strive to use the memory provided +** to satisfy page cache needs, falling back to [sqlite3_malloc()] if +** a page cache line is larger than sz bytes or if all of the pMem buffer +** is exhausted. +** ^If pMem is NULL and N is non-zero, then each database connection +** does an initial bulk allocation for page cache memory +** from [sqlite3_malloc()] sufficient for N cache lines if N is positive or +** of -1024*N bytes if N is negative, . ^If additional +** page cache memory is needed beyond what is provided by the initial +** allocation, then SQLite goes to [sqlite3_malloc()] separately for each +** additional cache line.
+** +** [[SQLITE_CONFIG_HEAP]]
SQLITE_CONFIG_HEAP
+**
^The SQLITE_CONFIG_HEAP option specifies a static memory buffer +** that SQLite will use for all of its dynamic memory allocation needs +** beyond those provided for by [SQLITE_CONFIG_SCRATCH] and +** [SQLITE_CONFIG_PAGECACHE]. +** ^The SQLITE_CONFIG_HEAP option is only available if SQLite is compiled +** with either [SQLITE_ENABLE_MEMSYS3] or [SQLITE_ENABLE_MEMSYS5] and returns +** [SQLITE_ERROR] if invoked otherwise. +** ^There are three arguments to SQLITE_CONFIG_HEAP: +** An 8-byte aligned pointer to the memory, +** the number of bytes in the memory buffer, and the minimum allocation size. +** ^If the first pointer (the memory pointer) is NULL, then SQLite reverts +** to using its default memory allocator (the system malloc() implementation), +** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. ^If the +** memory pointer is not NULL then the alternative memory +** allocator is engaged to handle all of SQLites memory allocation needs. +** The first pointer (the memory pointer) must be aligned to an 8-byte +** boundary or subsequent behavior of SQLite will be undefined. +** The minimum allocation size is capped at 2**12. Reasonable values +** for the minimum allocation size are 2**5 through 2**8.
+** +** [[SQLITE_CONFIG_MUTEX]]
SQLITE_CONFIG_MUTEX
+**
^(The SQLITE_CONFIG_MUTEX option takes a single argument which is a +** pointer to an instance of the [sqlite3_mutex_methods] structure. +** The argument specifies alternative low-level mutex routines to be used +** in place the mutex routines built into SQLite.)^ ^SQLite makes a copy of +** the content of the [sqlite3_mutex_methods] structure before the call to +** [sqlite3_config()] returns. ^If SQLite is compiled with +** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then +** the entire mutexing subsystem is omitted from the build and hence calls to +** [sqlite3_config()] with the SQLITE_CONFIG_MUTEX configuration option will +** return [SQLITE_ERROR].
+** +** [[SQLITE_CONFIG_GETMUTEX]]
SQLITE_CONFIG_GETMUTEX
+**
^(The SQLITE_CONFIG_GETMUTEX option takes a single argument which +** is a pointer to an instance of the [sqlite3_mutex_methods] structure. The +** [sqlite3_mutex_methods] +** structure is filled with the currently defined mutex routines.)^ +** This option can be used to overload the default mutex allocation +** routines with a wrapper used to track mutex usage for performance +** profiling or testing, for example. ^If SQLite is compiled with +** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then +** the entire mutexing subsystem is omitted from the build and hence calls to +** [sqlite3_config()] with the SQLITE_CONFIG_GETMUTEX configuration option will +** return [SQLITE_ERROR].
+** +** [[SQLITE_CONFIG_LOOKASIDE]]
SQLITE_CONFIG_LOOKASIDE
+**
^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine +** the default size of lookaside memory on each [database connection]. +** The first argument is the +** size of each lookaside buffer slot and the second is the number of +** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE +** sets the default lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE] +** option to [sqlite3_db_config()] can be used to change the lookaside +** configuration on individual connections.)^
+** +** [[SQLITE_CONFIG_PCACHE2]]
SQLITE_CONFIG_PCACHE2
+**
^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is +** a pointer to an [sqlite3_pcache_methods2] object. This object specifies +** the interface to a custom page cache implementation.)^ +** ^SQLite makes a copy of the [sqlite3_pcache_methods2] object.
+** +** [[SQLITE_CONFIG_GETPCACHE2]]
SQLITE_CONFIG_GETPCACHE2
+**
^(The SQLITE_CONFIG_GETPCACHE2 option takes a single argument which +** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies of +** the current page cache implementation into that object.)^
+** +** [[SQLITE_CONFIG_LOG]]
SQLITE_CONFIG_LOG
+**
The SQLITE_CONFIG_LOG option is used to configure the SQLite +** global [error log]. +** (^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a +** function with a call signature of void(*)(void*,int,const char*), +** and a pointer to void. ^If the function pointer is not NULL, it is +** invoked by [sqlite3_log()] to process each logging event. ^If the +** function pointer is NULL, the [sqlite3_log()] interface becomes a no-op. +** ^The void pointer that is the second argument to SQLITE_CONFIG_LOG is +** passed through as the first parameter to the application-defined logger +** function whenever that function is invoked. ^The second parameter to +** the logger function is a copy of the first parameter to the corresponding +** [sqlite3_log()] call and is intended to be a [result code] or an +** [extended result code]. ^The third parameter passed to the logger is +** log message after formatting via [sqlite3_snprintf()]. +** The SQLite logging interface is not reentrant; the logger function +** supplied by the application must not invoke any SQLite interface. +** In a multi-threaded application, the application-defined logger +** function must be threadsafe.
+** +** [[SQLITE_CONFIG_URI]]
SQLITE_CONFIG_URI +**
^(The SQLITE_CONFIG_URI option takes a single argument of type int. +** If non-zero, then URI handling is globally enabled. If the parameter is zero, +** then URI handling is globally disabled.)^ ^If URI handling is globally +** enabled, all filenames passed to [sqlite3_open()], [sqlite3_open_v2()], +** [sqlite3_open16()] or +** specified as part of [ATTACH] commands are interpreted as URIs, regardless +** of whether or not the [SQLITE_OPEN_URI] flag is set when the database +** connection is opened. ^If it is globally disabled, filenames are +** only interpreted as URIs if the SQLITE_OPEN_URI flag is set when the +** database connection is opened. ^(By default, URI handling is globally +** disabled. The default value may be changed by compiling with the +** [SQLITE_USE_URI] symbol defined.)^ +** +** [[SQLITE_CONFIG_COVERING_INDEX_SCAN]]
SQLITE_CONFIG_COVERING_INDEX_SCAN +**
^The SQLITE_CONFIG_COVERING_INDEX_SCAN option takes a single integer +** argument which is interpreted as a boolean in order to enable or disable +** the use of covering indices for full table scans in the query optimizer. +** ^The default setting is determined +** by the [SQLITE_ALLOW_COVERING_INDEX_SCAN] compile-time option, or is "on" +** if that compile-time option is omitted. +** The ability to disable the use of covering indices for full table scans +** is because some incorrectly coded legacy applications might malfunction +** when the optimization is enabled. Providing the ability to +** disable the optimization allows the older, buggy application code to work +** without change even with newer versions of SQLite. +** +** [[SQLITE_CONFIG_PCACHE]] [[SQLITE_CONFIG_GETPCACHE]] +**
SQLITE_CONFIG_PCACHE and SQLITE_CONFIG_GETPCACHE +**
These options are obsolete and should not be used by new code. +** They are retained for backwards compatibility but are now no-ops. +**
+** +** [[SQLITE_CONFIG_SQLLOG]] +**
SQLITE_CONFIG_SQLLOG +**
This option is only available if sqlite is compiled with the +** [SQLITE_ENABLE_SQLLOG] pre-processor macro defined. The first argument should +** be a pointer to a function of type void(*)(void*,sqlite3*,const char*, int). +** The second should be of type (void*). The callback is invoked by the library +** in three separate circumstances, identified by the value passed as the +** fourth parameter. If the fourth parameter is 0, then the database connection +** passed as the second argument has just been opened. The third argument +** points to a buffer containing the name of the main database file. If the +** fourth parameter is 1, then the SQL statement that the third parameter +** points to has just been executed. Or, if the fourth parameter is 2, then +** the connection being passed as the second parameter is being closed. The +** third parameter is passed NULL In this case. An example of using this +** configuration option can be seen in the "test_sqllog.c" source file in +** the canonical SQLite source tree.
+** +** [[SQLITE_CONFIG_MMAP_SIZE]] +**
SQLITE_CONFIG_MMAP_SIZE +**
^SQLITE_CONFIG_MMAP_SIZE takes two 64-bit integer (sqlite3_int64) values +** that are the default mmap size limit (the default setting for +** [PRAGMA mmap_size]) and the maximum allowed mmap size limit. +** ^The default setting can be overridden by each database connection using +** either the [PRAGMA mmap_size] command, or by using the +** [SQLITE_FCNTL_MMAP_SIZE] file control. ^(The maximum allowed mmap size +** will be silently truncated if necessary so that it does not exceed the +** compile-time maximum mmap size set by the +** [SQLITE_MAX_MMAP_SIZE] compile-time option.)^ +** ^If either argument to this option is negative, then that argument is +** changed to its compile-time default. +** +** [[SQLITE_CONFIG_WIN32_HEAPSIZE]] +**
SQLITE_CONFIG_WIN32_HEAPSIZE +**
^The SQLITE_CONFIG_WIN32_HEAPSIZE option is only available if SQLite is +** compiled for Windows with the [SQLITE_WIN32_MALLOC] pre-processor macro +** defined. ^SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value +** that specifies the maximum size of the created heap. +** +** [[SQLITE_CONFIG_PCACHE_HDRSZ]] +**
SQLITE_CONFIG_PCACHE_HDRSZ +**
^The SQLITE_CONFIG_PCACHE_HDRSZ option takes a single parameter which +** is a pointer to an integer and writes into that integer the number of extra +** bytes per page required for each page in [SQLITE_CONFIG_PAGECACHE]. +** The amount of extra space required can change depending on the compiler, +** target platform, and SQLite version. +** +** [[SQLITE_CONFIG_PMASZ]] +**
SQLITE_CONFIG_PMASZ +**
^The SQLITE_CONFIG_PMASZ option takes a single parameter which +** is an unsigned integer and sets the "Minimum PMA Size" for the multithreaded +** sorter to that integer. The default minimum PMA Size is set by the +** [SQLITE_SORTER_PMASZ] compile-time option. New threads are launched +** to help with sort operations when multithreaded sorting +** is enabled (using the [PRAGMA threads] command) and the amount of content +** to be sorted exceeds the page size times the minimum of the +** [PRAGMA cache_size] setting and this value. +** +** [[SQLITE_CONFIG_STMTJRNL_SPILL]] +**
SQLITE_CONFIG_STMTJRNL_SPILL +**
^The SQLITE_CONFIG_STMTJRNL_SPILL option takes a single parameter which +** becomes the [statement journal] spill-to-disk threshold. +** [Statement journals] are held in memory until their size (in bytes) +** exceeds this threshold, at which point they are written to disk. +** Or if the threshold is -1, statement journals are always held +** exclusively in memory. +** Since many statement journals never become large, setting the spill +** threshold to a value such as 64KiB can greatly reduce the amount of +** I/O required to support statement rollback. +** The default value for this setting is controlled by the +** [SQLITE_STMTJRNL_SPILL] compile-time option. +**
+*/ +#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ +#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ +#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ +#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_SCRATCH 6 /* void*, int sz, int N */ +#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ +#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ +#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ +#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ +#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ +/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ +#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ +#define SQLITE_CONFIG_PCACHE 14 /* no-op */ +#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ +#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ +#define SQLITE_CONFIG_URI 17 /* int */ +#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ +#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ +#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ +#define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ +#define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */ +#define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */ +#define SQLITE_CONFIG_STMTJRNL_SPILL 26 /* int nByte */ + +/* +** CAPI3REF: Database Connection Configuration Options +** +** These constants are the available integer configuration options that +** can be passed as the second argument to the [sqlite3_db_config()] interface. +** +** New configuration options may be added in future releases of SQLite. +** Existing configuration options might be discontinued. Applications +** should check the return code from [sqlite3_db_config()] to make sure that +** the call worked. ^The [sqlite3_db_config()] interface will return a +** non-zero [error code] if a discontinued or unsupported configuration option +** is invoked. +** +**
+**
SQLITE_DBCONFIG_LOOKASIDE
+**
^This option takes three additional arguments that determine the +** [lookaside memory allocator] configuration for the [database connection]. +** ^The first argument (the third parameter to [sqlite3_db_config()] is a +** pointer to a memory buffer to use for lookaside memory. +** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb +** may be NULL in which case SQLite will allocate the +** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the +** size of each lookaside buffer slot. ^The third argument is the number of +** slots. The size of the buffer in the first argument must be greater than +** or equal to the product of the second and third arguments. The buffer +** must be aligned to an 8-byte boundary. ^If the second argument to +** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally +** rounded down to the next smaller multiple of 8. ^(The lookaside memory +** configuration for a database connection can only be changed when that +** connection is not currently using lookaside memory, or in other words +** when the "current value" returned by +** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero. +** Any attempt to change the lookaside memory configuration when lookaside +** memory is in use leaves the configuration unchanged and returns +** [SQLITE_BUSY].)^
+** +**
SQLITE_DBCONFIG_ENABLE_FKEY
+**
^This option is used to enable or disable the enforcement of +** [foreign key constraints]. There should be two additional arguments. +** The first argument is an integer which is 0 to disable FK enforcement, +** positive to enable FK enforcement or negative to leave FK enforcement +** unchanged. The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether FK enforcement is off or on +** following this call. The second parameter may be a NULL pointer, in +** which case the FK enforcement setting is not reported back.
+** +**
SQLITE_DBCONFIG_ENABLE_TRIGGER
+**
^This option is used to enable or disable [CREATE TRIGGER | triggers]. +** There should be two additional arguments. +** The first argument is an integer which is 0 to disable triggers, +** positive to enable triggers or negative to leave the setting unchanged. +** The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether triggers are disabled or enabled +** following this call. The second parameter may be a NULL pointer, in +** which case the trigger setting is not reported back.
+** +**
SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER
+**
^This option is used to enable or disable the two-argument +** version of the [fts3_tokenizer()] function which is part of the +** [FTS3] full-text search engine extension. +** There should be two additional arguments. +** The first argument is an integer which is 0 to disable fts3_tokenizer() or +** positive to enable fts3_tokenizer() or negative to leave the setting +** unchanged. +** The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether fts3_tokenizer is disabled or enabled +** following this call. The second parameter may be a NULL pointer, in +** which case the new setting is not reported back.
+** +**
SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION
+**
^This option is used to enable or disable the [sqlite3_load_extension()] +** interface independently of the [load_extension()] SQL function. +** The [sqlite3_enable_load_extension()] API enables or disables both the +** C-API [sqlite3_load_extension()] and the SQL function [load_extension()]. +** There should be two additional arguments. +** When the first argument to this interface is 1, then only the C-API is +** enabled and the SQL function remains disabled. If the first argument to +** this interface is 0, then both the C-API and the SQL function are disabled. +** If the first argument is -1, then no changes are made to state of either the +** C-API or the SQL function. +** The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether [sqlite3_load_extension()] interface +** is disabled or enabled following this call. The second parameter may +** be a NULL pointer, in which case the new setting is not reported back. +**
+** +**
SQLITE_DBCONFIG_MAINDBNAME
+**
^This option is used to change the name of the "main" database +** schema. ^The sole argument is a pointer to a constant UTF8 string +** which will become the new schema name in place of "main". ^SQLite +** does not make a copy of the new main schema name string, so the application +** must ensure that the argument passed into this DBCONFIG option is unchanged +** until after the database connection closes. +**
+** +**
SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE
+**
Usually, when a database in wal mode is closed or detached from a +** database handle, SQLite checks if this will mean that there are now no +** connections at all to the database. If so, it performs a checkpoint +** operation before closing the connection. This option may be used to +** override this behaviour. The first parameter passed to this operation +** is an integer - non-zero to disable checkpoints-on-close, or zero (the +** default) to enable them. The second parameter is a pointer to an integer +** into which is written 0 or 1 to indicate whether checkpoints-on-close +** have been disabled - 0 if they are not disabled, 1 if they are. +**
+** +**
+*/ +#define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ +#define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ +#define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005 /* int int* */ +#define SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE 1006 /* int int* */ + + +/* +** CAPI3REF: Enable Or Disable Extended Result Codes +** METHOD: sqlite3 +** +** ^The sqlite3_extended_result_codes() routine enables or disables the +** [extended result codes] feature of SQLite. ^The extended result +** codes are disabled by default for historical compatibility. +*/ +SQLITE_API int sqlite3_extended_result_codes(sqlite3*, int onoff); + +/* +** CAPI3REF: Last Insert Rowid +** METHOD: sqlite3 +** +** ^Each entry in most SQLite tables (except for [WITHOUT ROWID] tables) +** has a unique 64-bit signed +** integer key called the [ROWID | "rowid"]. ^The rowid is always available +** as an undeclared column named ROWID, OID, or _ROWID_ as long as those +** names are not also used by explicitly declared columns. ^If +** the table has a column of type [INTEGER PRIMARY KEY] then that column +** is another alias for the rowid. +** +** ^The sqlite3_last_insert_rowid(D) interface returns the [rowid] of the +** most recent successful [INSERT] into a rowid table or [virtual table] +** on database connection D. +** ^Inserts into [WITHOUT ROWID] tables are not recorded. +** ^If no successful [INSERT]s into rowid tables +** have ever occurred on the database connection D, +** then sqlite3_last_insert_rowid(D) returns zero. +** +** ^(If an [INSERT] occurs within a trigger or within a [virtual table] +** method, then this routine will return the [rowid] of the inserted +** row as long as the trigger or virtual table method is running. +** But once the trigger or virtual table method ends, the value returned +** by this routine reverts to what it was before the trigger or virtual +** table method began.)^ +** +** ^An [INSERT] that fails due to a constraint violation is not a +** successful [INSERT] and does not change the value returned by this +** routine. ^Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK, +** and INSERT OR ABORT make no changes to the return value of this +** routine when their insertion fails. ^(When INSERT OR REPLACE +** encounters a constraint violation, it does not fail. The +** INSERT continues to completion after deleting rows that caused +** the constraint problem so INSERT OR REPLACE will always change +** the return value of this interface.)^ +** +** ^For the purposes of this routine, an [INSERT] is considered to +** be successful even if it is subsequently rolled back. +** +** This function is accessible to SQL statements via the +** [last_insert_rowid() SQL function]. +** +** If a separate thread performs a new [INSERT] on the same +** database connection while the [sqlite3_last_insert_rowid()] +** function is running and thus changes the last insert [rowid], +** then the value returned by [sqlite3_last_insert_rowid()] is +** unpredictable and might not equal either the old or the new +** last insert [rowid]. +*/ +SQLITE_API sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*); + +/* +** CAPI3REF: Count The Number Of Rows Modified +** METHOD: sqlite3 +** +** ^This function returns the number of rows modified, inserted or +** deleted by the most recently completed INSERT, UPDATE or DELETE +** statement on the database connection specified by the only parameter. +** ^Executing any other type of SQL statement does not modify the value +** returned by this function. +** +** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are +** considered - auxiliary changes caused by [CREATE TRIGGER | triggers], +** [foreign key actions] or [REPLACE] constraint resolution are not counted. +** +** Changes to a view that are intercepted by +** [INSTEAD OF trigger | INSTEAD OF triggers] are not counted. ^The value +** returned by sqlite3_changes() immediately after an INSERT, UPDATE or +** DELETE statement run on a view is always zero. Only changes made to real +** tables are counted. +** +** Things are more complicated if the sqlite3_changes() function is +** executed while a trigger program is running. This may happen if the +** program uses the [changes() SQL function], or if some other callback +** function invokes sqlite3_changes() directly. Essentially: +** +**
    +**
  • ^(Before entering a trigger program the value returned by +** sqlite3_changes() function is saved. After the trigger program +** has finished, the original value is restored.)^ +** +**
  • ^(Within a trigger program each INSERT, UPDATE and DELETE +** statement sets the value returned by sqlite3_changes() +** upon completion as normal. Of course, this value will not include +** any changes performed by sub-triggers, as the sqlite3_changes() +** value will be saved and restored after each sub-trigger has run.)^ +**
+** +** ^This means that if the changes() SQL function (or similar) is used +** by the first INSERT, UPDATE or DELETE statement within a trigger, it +** returns the value as set when the calling statement began executing. +** ^If it is used by the second or subsequent such statement within a trigger +** program, the value returned reflects the number of rows modified by the +** previous INSERT, UPDATE or DELETE statement within the same trigger. +** +** See also the [sqlite3_total_changes()] interface, the +** [count_changes pragma], and the [changes() SQL function]. +** +** If a separate thread makes changes on the same database connection +** while [sqlite3_changes()] is running then the value returned +** is unpredictable and not meaningful. +*/ +SQLITE_API int sqlite3_changes(sqlite3*); + +/* +** CAPI3REF: Total Number Of Rows Modified +** METHOD: sqlite3 +** +** ^This function returns the total number of rows inserted, modified or +** deleted by all [INSERT], [UPDATE] or [DELETE] statements completed +** since the database connection was opened, including those executed as +** part of trigger programs. ^Executing any other type of SQL statement +** does not affect the value returned by sqlite3_total_changes(). +** +** ^Changes made as part of [foreign key actions] are included in the +** count, but those made as part of REPLACE constraint resolution are +** not. ^Changes to a view that are intercepted by INSTEAD OF triggers +** are not counted. +** +** See also the [sqlite3_changes()] interface, the +** [count_changes pragma], and the [total_changes() SQL function]. +** +** If a separate thread makes changes on the same database connection +** while [sqlite3_total_changes()] is running then the value +** returned is unpredictable and not meaningful. +*/ +SQLITE_API int sqlite3_total_changes(sqlite3*); + +/* +** CAPI3REF: Interrupt A Long-Running Query +** METHOD: sqlite3 +** +** ^This function causes any pending database operation to abort and +** return at its earliest opportunity. This routine is typically +** called in response to a user action such as pressing "Cancel" +** or Ctrl-C where the user wants a long query operation to halt +** immediately. +** +** ^It is safe to call this routine from a thread different from the +** thread that is currently running the database operation. But it +** is not safe to call this routine with a [database connection] that +** is closed or might close before sqlite3_interrupt() returns. +** +** ^If an SQL operation is very nearly finished at the time when +** sqlite3_interrupt() is called, then it might not have an opportunity +** to be interrupted and might continue to completion. +** +** ^An SQL operation that is interrupted will return [SQLITE_INTERRUPT]. +** ^If the interrupted SQL operation is an INSERT, UPDATE, or DELETE +** that is inside an explicit transaction, then the entire transaction +** will be rolled back automatically. +** +** ^The sqlite3_interrupt(D) call is in effect until all currently running +** SQL statements on [database connection] D complete. ^Any new SQL statements +** that are started after the sqlite3_interrupt() call and before the +** running statements reaches zero are interrupted as if they had been +** running prior to the sqlite3_interrupt() call. ^New SQL statements +** that are started after the running statement count reaches zero are +** not effected by the sqlite3_interrupt(). +** ^A call to sqlite3_interrupt(D) that occurs when there are no running +** SQL statements is a no-op and has no effect on SQL statements +** that are started after the sqlite3_interrupt() call returns. +** +** If the database connection closes while [sqlite3_interrupt()] +** is running then bad things will likely happen. +*/ +SQLITE_API void sqlite3_interrupt(sqlite3*); + +/* +** CAPI3REF: Determine If An SQL Statement Is Complete +** +** These routines are useful during command-line input to determine if the +** currently entered text seems to form a complete SQL statement or +** if additional input is needed before sending the text into +** SQLite for parsing. ^These routines return 1 if the input string +** appears to be a complete SQL statement. ^A statement is judged to be +** complete if it ends with a semicolon token and is not a prefix of a +** well-formed CREATE TRIGGER statement. ^Semicolons that are embedded within +** string literals or quoted identifier names or comments are not +** independent tokens (they are part of the token in which they are +** embedded) and thus do not count as a statement terminator. ^Whitespace +** and comments that follow the final semicolon are ignored. +** +** ^These routines return 0 if the statement is incomplete. ^If a +** memory allocation fails, then SQLITE_NOMEM is returned. +** +** ^These routines do not parse the SQL statements thus +** will not detect syntactically incorrect SQL. +** +** ^(If SQLite has not been initialized using [sqlite3_initialize()] prior +** to invoking sqlite3_complete16() then sqlite3_initialize() is invoked +** automatically by sqlite3_complete16(). If that initialization fails, +** then the return value from sqlite3_complete16() will be non-zero +** regardless of whether or not the input SQL is complete.)^ +** +** The input to [sqlite3_complete()] must be a zero-terminated +** UTF-8 string. +** +** The input to [sqlite3_complete16()] must be a zero-terminated +** UTF-16 string in native byte order. +*/ +SQLITE_API int sqlite3_complete(const char *sql); +SQLITE_API int sqlite3_complete16(const void *sql); + +/* +** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors +** KEYWORDS: {busy-handler callback} {busy handler} +** METHOD: sqlite3 +** +** ^The sqlite3_busy_handler(D,X,P) routine sets a callback function X +** that might be invoked with argument P whenever +** an attempt is made to access a database table associated with +** [database connection] D when another thread +** or process has the table locked. +** The sqlite3_busy_handler() interface is used to implement +** [sqlite3_busy_timeout()] and [PRAGMA busy_timeout]. +** +** ^If the busy callback is NULL, then [SQLITE_BUSY] +** is returned immediately upon encountering the lock. ^If the busy callback +** is not NULL, then the callback might be invoked with two arguments. +** +** ^The first argument to the busy handler is a copy of the void* pointer which +** is the third argument to sqlite3_busy_handler(). ^The second argument to +** the busy handler callback is the number of times that the busy handler has +** been invoked previously for the same locking event. ^If the +** busy callback returns 0, then no additional attempts are made to +** access the database and [SQLITE_BUSY] is returned +** to the application. +** ^If the callback returns non-zero, then another attempt +** is made to access the database and the cycle repeats. +** +** The presence of a busy handler does not guarantee that it will be invoked +** when there is lock contention. ^If SQLite determines that invoking the busy +** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY] +** to the application instead of invoking the +** busy handler. +** Consider a scenario where one process is holding a read lock that +** it is trying to promote to a reserved lock and +** a second process is holding a reserved lock that it is trying +** to promote to an exclusive lock. The first process cannot proceed +** because it is blocked by the second and the second process cannot +** proceed because it is blocked by the first. If both processes +** invoke the busy handlers, neither will make any progress. Therefore, +** SQLite returns [SQLITE_BUSY] for the first process, hoping that this +** will induce the first process to release its read lock and allow +** the second process to proceed. +** +** ^The default busy callback is NULL. +** +** ^(There can only be a single busy handler defined for each +** [database connection]. Setting a new busy handler clears any +** previously set handler.)^ ^Note that calling [sqlite3_busy_timeout()] +** or evaluating [PRAGMA busy_timeout=N] will change the +** busy handler and thus clear any previously set busy handler. +** +** The busy callback should not take any actions which modify the +** database connection that invoked the busy handler. In other words, +** the busy handler is not reentrant. Any such actions +** result in undefined behavior. +** +** A busy handler must not close the database connection +** or [prepared statement] that invoked the busy handler. +*/ +SQLITE_API int sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*); + +/* +** CAPI3REF: Set A Busy Timeout +** METHOD: sqlite3 +** +** ^This routine sets a [sqlite3_busy_handler | busy handler] that sleeps +** for a specified amount of time when a table is locked. ^The handler +** will sleep multiple times until at least "ms" milliseconds of sleeping +** have accumulated. ^After at least "ms" milliseconds of sleeping, +** the handler returns 0 which causes [sqlite3_step()] to return +** [SQLITE_BUSY]. +** +** ^Calling this routine with an argument less than or equal to zero +** turns off all busy handlers. +** +** ^(There can only be a single busy handler for a particular +** [database connection] at any given moment. If another busy handler +** was defined (using [sqlite3_busy_handler()]) prior to calling +** this routine, that other busy handler is cleared.)^ +** +** See also: [PRAGMA busy_timeout] +*/ +SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms); + +/* +** CAPI3REF: Convenience Routines For Running Queries +** METHOD: sqlite3 +** +** This is a legacy interface that is preserved for backwards compatibility. +** Use of this interface is not recommended. +** +** Definition: A result table is memory data structure created by the +** [sqlite3_get_table()] interface. A result table records the +** complete query results from one or more queries. +** +** The table conceptually has a number of rows and columns. But +** these numbers are not part of the result table itself. These +** numbers are obtained separately. Let N be the number of rows +** and M be the number of columns. +** +** A result table is an array of pointers to zero-terminated UTF-8 strings. +** There are (N+1)*M elements in the array. The first M pointers point +** to zero-terminated strings that contain the names of the columns. +** The remaining entries all point to query results. NULL values result +** in NULL pointers. All other values are in their UTF-8 zero-terminated +** string representation as returned by [sqlite3_column_text()]. +** +** A result table might consist of one or more memory allocations. +** It is not safe to pass a result table directly to [sqlite3_free()]. +** A result table should be deallocated using [sqlite3_free_table()]. +** +** ^(As an example of the result table format, suppose a query result +** is as follows: +** +**
+**        Name        | Age
+**        -----------------------
+**        Alice       | 43
+**        Bob         | 28
+**        Cindy       | 21
+** 
+** +** There are two column (M==2) and three rows (N==3). Thus the +** result table has 8 entries. Suppose the result table is stored +** in an array names azResult. Then azResult holds this content: +** +**
+**        azResult[0] = "Name";
+**        azResult[1] = "Age";
+**        azResult[2] = "Alice";
+**        azResult[3] = "43";
+**        azResult[4] = "Bob";
+**        azResult[5] = "28";
+**        azResult[6] = "Cindy";
+**        azResult[7] = "21";
+** 
)^ +** +** ^The sqlite3_get_table() function evaluates one or more +** semicolon-separated SQL statements in the zero-terminated UTF-8 +** string of its 2nd parameter and returns a result table to the +** pointer given in its 3rd parameter. +** +** After the application has finished with the result from sqlite3_get_table(), +** it must pass the result table pointer to sqlite3_free_table() in order to +** release the memory that was malloced. Because of the way the +** [sqlite3_malloc()] happens within sqlite3_get_table(), the calling +** function must not try to call [sqlite3_free()] directly. Only +** [sqlite3_free_table()] is able to release the memory properly and safely. +** +** The sqlite3_get_table() interface is implemented as a wrapper around +** [sqlite3_exec()]. The sqlite3_get_table() routine does not have access +** to any internal data structures of SQLite. It uses only the public +** interface defined here. As a consequence, errors that occur in the +** wrapper layer outside of the internal [sqlite3_exec()] call are not +** reflected in subsequent calls to [sqlite3_errcode()] or +** [sqlite3_errmsg()]. +*/ +SQLITE_API int sqlite3_get_table( + sqlite3 *db, /* An open database */ + const char *zSql, /* SQL to be evaluated */ + char ***pazResult, /* Results of the query */ + int *pnRow, /* Number of result rows written here */ + int *pnColumn, /* Number of result columns written here */ + char **pzErrmsg /* Error msg written here */ +); +SQLITE_API void sqlite3_free_table(char **result); + +/* +** CAPI3REF: Formatted String Printing Functions +** +** These routines are work-alikes of the "printf()" family of functions +** from the standard C library. +** These routines understand most of the common K&R formatting options, +** plus some additional non-standard formats, detailed below. +** Note that some of the more obscure formatting options from recent +** C-library standards are omitted from this implementation. +** +** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their +** results into memory obtained from [sqlite3_malloc()]. +** The strings returned by these two routines should be +** released by [sqlite3_free()]. ^Both routines return a +** NULL pointer if [sqlite3_malloc()] is unable to allocate enough +** memory to hold the resulting string. +** +** ^(The sqlite3_snprintf() routine is similar to "snprintf()" from +** the standard C library. The result is written into the +** buffer supplied as the second parameter whose size is given by +** the first parameter. Note that the order of the +** first two parameters is reversed from snprintf().)^ This is an +** historical accident that cannot be fixed without breaking +** backwards compatibility. ^(Note also that sqlite3_snprintf() +** returns a pointer to its buffer instead of the number of +** characters actually written into the buffer.)^ We admit that +** the number of characters written would be a more useful return +** value but we cannot change the implementation of sqlite3_snprintf() +** now without breaking compatibility. +** +** ^As long as the buffer size is greater than zero, sqlite3_snprintf() +** guarantees that the buffer is always zero-terminated. ^The first +** parameter "n" is the total size of the buffer, including space for +** the zero terminator. So the longest string that can be completely +** written will be n-1 characters. +** +** ^The sqlite3_vsnprintf() routine is a varargs version of sqlite3_snprintf(). +** +** These routines all implement some additional formatting +** options that are useful for constructing SQL statements. +** All of the usual printf() formatting options apply. In addition, there +** is are "%q", "%Q", "%w" and "%z" options. +** +** ^(The %q option works like %s in that it substitutes a nul-terminated +** string from the argument list. But %q also doubles every '\'' character. +** %q is designed for use inside a string literal.)^ By doubling each '\'' +** character it escapes that character and allows it to be inserted into +** the string. +** +** For example, assume the string variable zText contains text as follows: +** +**
+**  char *zText = "It's a happy day!";
+** 
+** +** One can use this text in an SQL statement as follows: +** +**
+**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES('%q')", zText);
+**  sqlite3_exec(db, zSQL, 0, 0, 0);
+**  sqlite3_free(zSQL);
+** 
+** +** Because the %q format string is used, the '\'' character in zText +** is escaped and the SQL generated is as follows: +** +**
+**  INSERT INTO table1 VALUES('It''s a happy day!')
+** 
+** +** This is correct. Had we used %s instead of %q, the generated SQL +** would have looked like this: +** +**
+**  INSERT INTO table1 VALUES('It's a happy day!');
+** 
+** +** This second example is an SQL syntax error. As a general rule you should +** always use %q instead of %s when inserting text into a string literal. +** +** ^(The %Q option works like %q except it also adds single quotes around +** the outside of the total string. Additionally, if the parameter in the +** argument list is a NULL pointer, %Q substitutes the text "NULL" (without +** single quotes).)^ So, for example, one could say: +** +**
+**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
+**  sqlite3_exec(db, zSQL, 0, 0, 0);
+**  sqlite3_free(zSQL);
+** 
+** +** The code above will render a correct SQL statement in the zSQL +** variable even if the zText variable is a NULL pointer. +** +** ^(The "%w" formatting option is like "%q" except that it expects to +** be contained within double-quotes instead of single quotes, and it +** escapes the double-quote character instead of the single-quote +** character.)^ The "%w" formatting option is intended for safely inserting +** table and column names into a constructed SQL statement. +** +** ^(The "%z" formatting option works like "%s" but with the +** addition that after the string has been read and copied into +** the result, [sqlite3_free()] is called on the input string.)^ +*/ +SQLITE_API char *sqlite3_mprintf(const char*,...); +SQLITE_API char *sqlite3_vmprintf(const char*, va_list); +SQLITE_API char *sqlite3_snprintf(int,char*,const char*, ...); +SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list); + +/* +** CAPI3REF: Memory Allocation Subsystem +** +** The SQLite core uses these three routines for all of its own +** internal memory allocation needs. "Core" in the previous sentence +** does not include operating-system specific VFS implementation. The +** Windows VFS uses native malloc() and free() for some operations. +** +** ^The sqlite3_malloc() routine returns a pointer to a block +** of memory at least N bytes in length, where N is the parameter. +** ^If sqlite3_malloc() is unable to obtain sufficient free +** memory, it returns a NULL pointer. ^If the parameter N to +** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns +** a NULL pointer. +** +** ^The sqlite3_malloc64(N) routine works just like +** sqlite3_malloc(N) except that N is an unsigned 64-bit integer instead +** of a signed 32-bit integer. +** +** ^Calling sqlite3_free() with a pointer previously returned +** by sqlite3_malloc() or sqlite3_realloc() releases that memory so +** that it might be reused. ^The sqlite3_free() routine is +** a no-op if is called with a NULL pointer. Passing a NULL pointer +** to sqlite3_free() is harmless. After being freed, memory +** should neither be read nor written. Even reading previously freed +** memory might result in a segmentation fault or other severe error. +** Memory corruption, a segmentation fault, or other severe error +** might result if sqlite3_free() is called with a non-NULL pointer that +** was not obtained from sqlite3_malloc() or sqlite3_realloc(). +** +** ^The sqlite3_realloc(X,N) interface attempts to resize a +** prior memory allocation X to be at least N bytes. +** ^If the X parameter to sqlite3_realloc(X,N) +** is a NULL pointer then its behavior is identical to calling +** sqlite3_malloc(N). +** ^If the N parameter to sqlite3_realloc(X,N) is zero or +** negative then the behavior is exactly the same as calling +** sqlite3_free(X). +** ^sqlite3_realloc(X,N) returns a pointer to a memory allocation +** of at least N bytes in size or NULL if insufficient memory is available. +** ^If M is the size of the prior allocation, then min(N,M) bytes +** of the prior allocation are copied into the beginning of buffer returned +** by sqlite3_realloc(X,N) and the prior allocation is freed. +** ^If sqlite3_realloc(X,N) returns NULL and N is positive, then the +** prior allocation is not freed. +** +** ^The sqlite3_realloc64(X,N) interfaces works the same as +** sqlite3_realloc(X,N) except that N is a 64-bit unsigned integer instead +** of a 32-bit signed integer. +** +** ^If X is a memory allocation previously obtained from sqlite3_malloc(), +** sqlite3_malloc64(), sqlite3_realloc(), or sqlite3_realloc64(), then +** sqlite3_msize(X) returns the size of that memory allocation in bytes. +** ^The value returned by sqlite3_msize(X) might be larger than the number +** of bytes requested when X was allocated. ^If X is a NULL pointer then +** sqlite3_msize(X) returns zero. If X points to something that is not +** the beginning of memory allocation, or if it points to a formerly +** valid memory allocation that has now been freed, then the behavior +** of sqlite3_msize(X) is undefined and possibly harmful. +** +** ^The memory returned by sqlite3_malloc(), sqlite3_realloc(), +** sqlite3_malloc64(), and sqlite3_realloc64() +** is always aligned to at least an 8 byte boundary, or to a +** 4 byte boundary if the [SQLITE_4_BYTE_ALIGNED_MALLOC] compile-time +** option is used. +** +** In SQLite version 3.5.0 and 3.5.1, it was possible to define +** the SQLITE_OMIT_MEMORY_ALLOCATION which would cause the built-in +** implementation of these routines to be omitted. That capability +** is no longer provided. Only built-in memory allocators can be used. +** +** Prior to SQLite version 3.7.10, the Windows OS interface layer called +** the system malloc() and free() directly when converting +** filenames between the UTF-8 encoding used by SQLite +** and whatever filename encoding is used by the particular Windows +** installation. Memory allocation errors were detected, but +** they were reported back as [SQLITE_CANTOPEN] or +** [SQLITE_IOERR] rather than [SQLITE_NOMEM]. +** +** The pointer arguments to [sqlite3_free()] and [sqlite3_realloc()] +** must be either NULL or else pointers obtained from a prior +** invocation of [sqlite3_malloc()] or [sqlite3_realloc()] that have +** not yet been released. +** +** The application must not read or write any part of +** a block of memory after it has been released using +** [sqlite3_free()] or [sqlite3_realloc()]. +*/ +SQLITE_API void *sqlite3_malloc(int); +SQLITE_API void *sqlite3_malloc64(sqlite3_uint64); +SQLITE_API void *sqlite3_realloc(void*, int); +SQLITE_API void *sqlite3_realloc64(void*, sqlite3_uint64); +SQLITE_API void sqlite3_free(void*); +SQLITE_API sqlite3_uint64 sqlite3_msize(void*); + +/* +** CAPI3REF: Memory Allocator Statistics +** +** SQLite provides these two interfaces for reporting on the status +** of the [sqlite3_malloc()], [sqlite3_free()], and [sqlite3_realloc()] +** routines, which form the built-in memory allocation subsystem. +** +** ^The [sqlite3_memory_used()] routine returns the number of bytes +** of memory currently outstanding (malloced but not freed). +** ^The [sqlite3_memory_highwater()] routine returns the maximum +** value of [sqlite3_memory_used()] since the high-water mark +** was last reset. ^The values returned by [sqlite3_memory_used()] and +** [sqlite3_memory_highwater()] include any overhead +** added by SQLite in its implementation of [sqlite3_malloc()], +** but not overhead added by the any underlying system library +** routines that [sqlite3_malloc()] may call. +** +** ^The memory high-water mark is reset to the current value of +** [sqlite3_memory_used()] if and only if the parameter to +** [sqlite3_memory_highwater()] is true. ^The value returned +** by [sqlite3_memory_highwater(1)] is the high-water mark +** prior to the reset. +*/ +SQLITE_API sqlite3_int64 sqlite3_memory_used(void); +SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag); + +/* +** CAPI3REF: Pseudo-Random Number Generator +** +** SQLite contains a high-quality pseudo-random number generator (PRNG) used to +** select random [ROWID | ROWIDs] when inserting new records into a table that +** already uses the largest possible [ROWID]. The PRNG is also used for +** the build-in random() and randomblob() SQL functions. This interface allows +** applications to access the same PRNG for other purposes. +** +** ^A call to this routine stores N bytes of randomness into buffer P. +** ^The P parameter can be a NULL pointer. +** +** ^If this routine has not been previously called or if the previous +** call had N less than one or a NULL pointer for P, then the PRNG is +** seeded using randomness obtained from the xRandomness method of +** the default [sqlite3_vfs] object. +** ^If the previous call to this routine had an N of 1 or more and a +** non-NULL P then the pseudo-randomness is generated +** internally and without recourse to the [sqlite3_vfs] xRandomness +** method. +*/ +SQLITE_API void sqlite3_randomness(int N, void *P); + +/* +** CAPI3REF: Compile-Time Authorization Callbacks +** METHOD: sqlite3 +** +** ^This routine registers an authorizer callback with a particular +** [database connection], supplied in the first argument. +** ^The authorizer callback is invoked as SQL statements are being compiled +** by [sqlite3_prepare()] or its variants [sqlite3_prepare_v2()], +** [sqlite3_prepare16()] and [sqlite3_prepare16_v2()]. ^At various +** points during the compilation process, as logic is being created +** to perform various actions, the authorizer callback is invoked to +** see if those actions are allowed. ^The authorizer callback should +** return [SQLITE_OK] to allow the action, [SQLITE_IGNORE] to disallow the +** specific action but allow the SQL statement to continue to be +** compiled, or [SQLITE_DENY] to cause the entire SQL statement to be +** rejected with an error. ^If the authorizer callback returns +** any value other than [SQLITE_IGNORE], [SQLITE_OK], or [SQLITE_DENY] +** then the [sqlite3_prepare_v2()] or equivalent call that triggered +** the authorizer will fail with an error message. +** +** When the callback returns [SQLITE_OK], that means the operation +** requested is ok. ^When the callback returns [SQLITE_DENY], the +** [sqlite3_prepare_v2()] or equivalent call that triggered the +** authorizer will fail with an error message explaining that +** access is denied. +** +** ^The first parameter to the authorizer callback is a copy of the third +** parameter to the sqlite3_set_authorizer() interface. ^The second parameter +** to the callback is an integer [SQLITE_COPY | action code] that specifies +** the particular action to be authorized. ^The third through sixth parameters +** to the callback are zero-terminated strings that contain additional +** details about the action to be authorized. +** +** ^If the action code is [SQLITE_READ] +** and the callback returns [SQLITE_IGNORE] then the +** [prepared statement] statement is constructed to substitute +** a NULL value in place of the table column that would have +** been read if [SQLITE_OK] had been returned. The [SQLITE_IGNORE] +** return can be used to deny an untrusted user access to individual +** columns of a table. +** ^If the action code is [SQLITE_DELETE] and the callback returns +** [SQLITE_IGNORE] then the [DELETE] operation proceeds but the +** [truncate optimization] is disabled and all rows are deleted individually. +** +** An authorizer is used when [sqlite3_prepare | preparing] +** SQL statements from an untrusted source, to ensure that the SQL statements +** do not try to access data they are not allowed to see, or that they do not +** try to execute malicious statements that damage the database. For +** example, an application may allow a user to enter arbitrary +** SQL queries for evaluation by a database. But the application does +** not want the user to be able to make arbitrary changes to the +** database. An authorizer could then be put in place while the +** user-entered SQL is being [sqlite3_prepare | prepared] that +** disallows everything except [SELECT] statements. +** +** Applications that need to process SQL from untrusted sources +** might also consider lowering resource limits using [sqlite3_limit()] +** and limiting database size using the [max_page_count] [PRAGMA] +** in addition to using an authorizer. +** +** ^(Only a single authorizer can be in place on a database connection +** at a time. Each call to sqlite3_set_authorizer overrides the +** previous call.)^ ^Disable the authorizer by installing a NULL callback. +** The authorizer is disabled by default. +** +** The authorizer callback must not do anything that will modify +** the database connection that invoked the authorizer callback. +** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their +** database connections for the meaning of "modify" in this paragraph. +** +** ^When [sqlite3_prepare_v2()] is used to prepare a statement, the +** statement might be re-prepared during [sqlite3_step()] due to a +** schema change. Hence, the application should ensure that the +** correct authorizer callback remains in place during the [sqlite3_step()]. +** +** ^Note that the authorizer callback is invoked only during +** [sqlite3_prepare()] or its variants. Authorization is not +** performed during statement evaluation in [sqlite3_step()], unless +** as stated in the previous paragraph, sqlite3_step() invokes +** sqlite3_prepare_v2() to reprepare a statement after a schema change. +*/ +SQLITE_API int sqlite3_set_authorizer( + sqlite3*, + int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), + void *pUserData +); + +/* +** CAPI3REF: Authorizer Return Codes +** +** The [sqlite3_set_authorizer | authorizer callback function] must +** return either [SQLITE_OK] or one of these two constants in order +** to signal SQLite whether or not the action is permitted. See the +** [sqlite3_set_authorizer | authorizer documentation] for additional +** information. +** +** Note that SQLITE_IGNORE is also used as a [conflict resolution mode] +** returned from the [sqlite3_vtab_on_conflict()] interface. +*/ +#define SQLITE_DENY 1 /* Abort the SQL statement with an error */ +#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */ + +/* +** CAPI3REF: Authorizer Action Codes +** +** The [sqlite3_set_authorizer()] interface registers a callback function +** that is invoked to authorize certain SQL statement actions. The +** second parameter to the callback is an integer code that specifies +** what action is being authorized. These are the integer action codes that +** the authorizer callback may be passed. +** +** These action code values signify what kind of operation is to be +** authorized. The 3rd and 4th parameters to the authorization +** callback function will be parameters or NULL depending on which of these +** codes is used as the second parameter. ^(The 5th parameter to the +** authorizer callback is the name of the database ("main", "temp", +** etc.) if applicable.)^ ^The 6th parameter to the authorizer callback +** is the name of the inner-most trigger or view that is responsible for +** the access attempt or NULL if this access attempt is directly from +** top-level SQL code. +*/ +/******************************************* 3rd ************ 4th ***********/ +#define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */ +#define SQLITE_CREATE_TABLE 2 /* Table Name NULL */ +#define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */ +#define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */ +#define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */ +#define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */ +#define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */ +#define SQLITE_CREATE_VIEW 8 /* View Name NULL */ +#define SQLITE_DELETE 9 /* Table Name NULL */ +#define SQLITE_DROP_INDEX 10 /* Index Name Table Name */ +#define SQLITE_DROP_TABLE 11 /* Table Name NULL */ +#define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */ +#define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */ +#define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */ +#define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */ +#define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */ +#define SQLITE_DROP_VIEW 17 /* View Name NULL */ +#define SQLITE_INSERT 18 /* Table Name NULL */ +#define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */ +#define SQLITE_READ 20 /* Table Name Column Name */ +#define SQLITE_SELECT 21 /* NULL NULL */ +#define SQLITE_TRANSACTION 22 /* Operation NULL */ +#define SQLITE_UPDATE 23 /* Table Name Column Name */ +#define SQLITE_ATTACH 24 /* Filename NULL */ +#define SQLITE_DETACH 25 /* Database Name NULL */ +#define SQLITE_ALTER_TABLE 26 /* Database Name Table Name */ +#define SQLITE_REINDEX 27 /* Index Name NULL */ +#define SQLITE_ANALYZE 28 /* Table Name NULL */ +#define SQLITE_CREATE_VTABLE 29 /* Table Name Module Name */ +#define SQLITE_DROP_VTABLE 30 /* Table Name Module Name */ +#define SQLITE_FUNCTION 31 /* NULL Function Name */ +#define SQLITE_SAVEPOINT 32 /* Operation Savepoint Name */ +#define SQLITE_COPY 0 /* No longer used */ +#define SQLITE_RECURSIVE 33 /* NULL NULL */ + +/* +** CAPI3REF: Tracing And Profiling Functions +** METHOD: sqlite3 +** +** These routines are deprecated. Use the [sqlite3_trace_v2()] interface +** instead of the routines described here. +** +** These routines register callback functions that can be used for +** tracing and profiling the execution of SQL statements. +** +** ^The callback function registered by sqlite3_trace() is invoked at +** various times when an SQL statement is being run by [sqlite3_step()]. +** ^The sqlite3_trace() callback is invoked with a UTF-8 rendering of the +** SQL statement text as the statement first begins executing. +** ^(Additional sqlite3_trace() callbacks might occur +** as each triggered subprogram is entered. The callbacks for triggers +** contain a UTF-8 SQL comment that identifies the trigger.)^ +** +** The [SQLITE_TRACE_SIZE_LIMIT] compile-time option can be used to limit +** the length of [bound parameter] expansion in the output of sqlite3_trace(). +** +** ^The callback function registered by sqlite3_profile() is invoked +** as each SQL statement finishes. ^The profile callback contains +** the original statement text and an estimate of wall-clock time +** of how long that statement took to run. ^The profile callback +** time is in units of nanoseconds, however the current implementation +** is only capable of millisecond resolution so the six least significant +** digits in the time are meaningless. Future versions of SQLite +** might provide greater resolution on the profiler callback. The +** sqlite3_profile() function is considered experimental and is +** subject to change in future versions of SQLite. +*/ +SQLITE_API SQLITE_DEPRECATED void *sqlite3_trace(sqlite3*, + void(*xTrace)(void*,const char*), void*); +SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*, + void(*xProfile)(void*,const char*,sqlite3_uint64), void*); + +/* +** CAPI3REF: SQL Trace Event Codes +** KEYWORDS: SQLITE_TRACE +** +** These constants identify classes of events that can be monitored +** using the [sqlite3_trace_v2()] tracing logic. The third argument +** to [sqlite3_trace_v2()] is an OR-ed combination of one or more of +** the following constants. ^The first argument to the trace callback +** is one of the following constants. +** +** New tracing constants may be added in future releases. +** +** ^A trace callback has four arguments: xCallback(T,C,P,X). +** ^The T argument is one of the integer type codes above. +** ^The C argument is a copy of the context pointer passed in as the +** fourth argument to [sqlite3_trace_v2()]. +** The P and X arguments are pointers whose meanings depend on T. +** +**
+** [[SQLITE_TRACE_STMT]]
SQLITE_TRACE_STMT
+**
^An SQLITE_TRACE_STMT callback is invoked when a prepared statement +** first begins running and possibly at other times during the +** execution of the prepared statement, such as at the start of each +** trigger subprogram. ^The P argument is a pointer to the +** [prepared statement]. ^The X argument is a pointer to a string which +** is the unexpanded SQL text of the prepared statement or an SQL comment +** that indicates the invocation of a trigger. ^The callback can compute +** the same text that would have been returned by the legacy [sqlite3_trace()] +** interface by using the X argument when X begins with "--" and invoking +** [sqlite3_expanded_sql(P)] otherwise. +** +** [[SQLITE_TRACE_PROFILE]]
SQLITE_TRACE_PROFILE
+**
^An SQLITE_TRACE_PROFILE callback provides approximately the same +** information as is provided by the [sqlite3_profile()] callback. +** ^The P argument is a pointer to the [prepared statement] and the +** X argument points to a 64-bit integer which is the estimated of +** the number of nanosecond that the prepared statement took to run. +** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes. +** +** [[SQLITE_TRACE_ROW]]
SQLITE_TRACE_ROW
+**
^An SQLITE_TRACE_ROW callback is invoked whenever a prepared +** statement generates a single row of result. +** ^The P argument is a pointer to the [prepared statement] and the +** X argument is unused. +** +** [[SQLITE_TRACE_CLOSE]]
SQLITE_TRACE_CLOSE
+**
^An SQLITE_TRACE_CLOSE callback is invoked when a database +** connection closes. +** ^The P argument is a pointer to the [database connection] object +** and the X argument is unused. +**
+*/ +#define SQLITE_TRACE_STMT 0x01 +#define SQLITE_TRACE_PROFILE 0x02 +#define SQLITE_TRACE_ROW 0x04 +#define SQLITE_TRACE_CLOSE 0x08 + +/* +** CAPI3REF: SQL Trace Hook +** METHOD: sqlite3 +** +** ^The sqlite3_trace_v2(D,M,X,P) interface registers a trace callback +** function X against [database connection] D, using property mask M +** and context pointer P. ^If the X callback is +** NULL or if the M mask is zero, then tracing is disabled. The +** M argument should be the bitwise OR-ed combination of +** zero or more [SQLITE_TRACE] constants. +** +** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides +** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2(). +** +** ^The X callback is invoked whenever any of the events identified by +** mask M occur. ^The integer return value from the callback is currently +** ignored, though this may change in future releases. Callback +** implementations should return zero to ensure future compatibility. +** +** ^A trace callback is invoked with four arguments: callback(T,C,P,X). +** ^The T argument is one of the [SQLITE_TRACE] +** constants to indicate why the callback was invoked. +** ^The C argument is a copy of the context pointer. +** The P and X arguments are pointers whose meanings depend on T. +** +** The sqlite3_trace_v2() interface is intended to replace the legacy +** interfaces [sqlite3_trace()] and [sqlite3_profile()], both of which +** are deprecated. +*/ +SQLITE_API int sqlite3_trace_v2( + sqlite3*, + unsigned uMask, + int(*xCallback)(unsigned,void*,void*,void*), + void *pCtx +); + +/* +** CAPI3REF: Query Progress Callbacks +** METHOD: sqlite3 +** +** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback +** function X to be invoked periodically during long running calls to +** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for +** database connection D. An example use for this +** interface is to keep a GUI updated during a large query. +** +** ^The parameter P is passed through as the only parameter to the +** callback function X. ^The parameter N is the approximate number of +** [virtual machine instructions] that are evaluated between successive +** invocations of the callback X. ^If N is less than one then the progress +** handler is disabled. +** +** ^Only a single progress handler may be defined at one time per +** [database connection]; setting a new progress handler cancels the +** old one. ^Setting parameter X to NULL disables the progress handler. +** ^The progress handler is also disabled by setting N to a value less +** than 1. +** +** ^If the progress callback returns non-zero, the operation is +** interrupted. This feature can be used to implement a +** "Cancel" button on a GUI progress dialog box. +** +** The progress handler callback must not do anything that will modify +** the database connection that invoked the progress handler. +** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their +** database connections for the meaning of "modify" in this paragraph. +** +*/ +SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); + +/* +** CAPI3REF: Opening A New Database Connection +** CONSTRUCTOR: sqlite3 +** +** ^These routines open an SQLite database file as specified by the +** filename argument. ^The filename argument is interpreted as UTF-8 for +** sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte +** order for sqlite3_open16(). ^(A [database connection] handle is usually +** returned in *ppDb, even if an error occurs. The only exception is that +** if SQLite is unable to allocate memory to hold the [sqlite3] object, +** a NULL will be written into *ppDb instead of a pointer to the [sqlite3] +** object.)^ ^(If the database is opened (and/or created) successfully, then +** [SQLITE_OK] is returned. Otherwise an [error code] is returned.)^ ^The +** [sqlite3_errmsg()] or [sqlite3_errmsg16()] routines can be used to obtain +** an English language description of the error following a failure of any +** of the sqlite3_open() routines. +** +** ^The default encoding will be UTF-8 for databases created using +** sqlite3_open() or sqlite3_open_v2(). ^The default encoding for databases +** created using sqlite3_open16() will be UTF-16 in the native byte order. +** +** Whether or not an error occurs when it is opened, resources +** associated with the [database connection] handle should be released by +** passing it to [sqlite3_close()] when it is no longer required. +** +** The sqlite3_open_v2() interface works like sqlite3_open() +** except that it accepts two additional parameters for additional control +** over the new database connection. ^(The flags parameter to +** sqlite3_open_v2() can take one of +** the following three values, optionally combined with the +** [SQLITE_OPEN_NOMUTEX], [SQLITE_OPEN_FULLMUTEX], [SQLITE_OPEN_SHAREDCACHE], +** [SQLITE_OPEN_PRIVATECACHE], and/or [SQLITE_OPEN_URI] flags:)^ +** +**
+** ^(
[SQLITE_OPEN_READONLY]
+**
The database is opened in read-only mode. If the database does not +** already exist, an error is returned.
)^ +** +** ^(
[SQLITE_OPEN_READWRITE]
+**
The database is opened for reading and writing if possible, or reading +** only if the file is write protected by the operating system. In either +** case the database must already exist, otherwise an error is returned.
)^ +** +** ^(
[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]
+**
The database is opened for reading and writing, and is created if +** it does not already exist. This is the behavior that is always used for +** sqlite3_open() and sqlite3_open16().
)^ +**
+** +** If the 3rd parameter to sqlite3_open_v2() is not one of the +** combinations shown above optionally combined with other +** [SQLITE_OPEN_READONLY | SQLITE_OPEN_* bits] +** then the behavior is undefined. +** +** ^If the [SQLITE_OPEN_NOMUTEX] flag is set, then the database connection +** opens in the multi-thread [threading mode] as long as the single-thread +** mode has not been set at compile-time or start-time. ^If the +** [SQLITE_OPEN_FULLMUTEX] flag is set then the database connection opens +** in the serialized [threading mode] unless single-thread was +** previously selected at compile-time or start-time. +** ^The [SQLITE_OPEN_SHAREDCACHE] flag causes the database connection to be +** eligible to use [shared cache mode], regardless of whether or not shared +** cache is enabled using [sqlite3_enable_shared_cache()]. ^The +** [SQLITE_OPEN_PRIVATECACHE] flag causes the database connection to not +** participate in [shared cache mode] even if it is enabled. +** +** ^The fourth parameter to sqlite3_open_v2() is the name of the +** [sqlite3_vfs] object that defines the operating system interface that +** the new database connection should use. ^If the fourth parameter is +** a NULL pointer then the default [sqlite3_vfs] object is used. +** +** ^If the filename is ":memory:", then a private, temporary in-memory database +** is created for the connection. ^This in-memory database will vanish when +** the database connection is closed. Future versions of SQLite might +** make use of additional special filenames that begin with the ":" character. +** It is recommended that when a database filename actually does begin with +** a ":" character you should prefix the filename with a pathname such as +** "./" to avoid ambiguity. +** +** ^If the filename is an empty string, then a private, temporary +** on-disk database will be created. ^This private database will be +** automatically deleted as soon as the database connection is closed. +** +** [[URI filenames in sqlite3_open()]]

URI Filenames

+** +** ^If [URI filename] interpretation is enabled, and the filename argument +** begins with "file:", then the filename is interpreted as a URI. ^URI +** filename interpretation is enabled if the [SQLITE_OPEN_URI] flag is +** set in the fourth argument to sqlite3_open_v2(), or if it has +** been enabled globally using the [SQLITE_CONFIG_URI] option with the +** [sqlite3_config()] method or by the [SQLITE_USE_URI] compile-time option. +** As of SQLite version 3.7.7, URI filename interpretation is turned off +** by default, but future releases of SQLite might enable URI filename +** interpretation by default. See "[URI filenames]" for additional +** information. +** +** URI filenames are parsed according to RFC 3986. ^If the URI contains an +** authority, then it must be either an empty string or the string +** "localhost". ^If the authority is not an empty string or "localhost", an +** error is returned to the caller. ^The fragment component of a URI, if +** present, is ignored. +** +** ^SQLite uses the path component of the URI as the name of the disk file +** which contains the database. ^If the path begins with a '/' character, +** then it is interpreted as an absolute path. ^If the path does not begin +** with a '/' (meaning that the authority section is omitted from the URI) +** then the path is interpreted as a relative path. +** ^(On windows, the first component of an absolute path +** is a drive specification (e.g. "C:").)^ +** +** [[core URI query parameters]] +** The query component of a URI may contain parameters that are interpreted +** either by SQLite itself, or by a [VFS | custom VFS implementation]. +** SQLite and its built-in [VFSes] interpret the +** following query parameters: +** +**
    +**
  • vfs: ^The "vfs" parameter may be used to specify the name of +** a VFS object that provides the operating system interface that should +** be used to access the database file on disk. ^If this option is set to +** an empty string the default VFS object is used. ^Specifying an unknown +** VFS is an error. ^If sqlite3_open_v2() is used and the vfs option is +** present, then the VFS specified by the option takes precedence over +** the value passed as the fourth parameter to sqlite3_open_v2(). +** +**
  • mode: ^(The mode parameter may be set to either "ro", "rw", +** "rwc", or "memory". Attempting to set it to any other value is +** an error)^. +** ^If "ro" is specified, then the database is opened for read-only +** access, just as if the [SQLITE_OPEN_READONLY] flag had been set in the +** third argument to sqlite3_open_v2(). ^If the mode option is set to +** "rw", then the database is opened for read-write (but not create) +** access, as if SQLITE_OPEN_READWRITE (but not SQLITE_OPEN_CREATE) had +** been set. ^Value "rwc" is equivalent to setting both +** SQLITE_OPEN_READWRITE and SQLITE_OPEN_CREATE. ^If the mode option is +** set to "memory" then a pure [in-memory database] that never reads +** or writes from disk is used. ^It is an error to specify a value for +** the mode parameter that is less restrictive than that specified by +** the flags passed in the third parameter to sqlite3_open_v2(). +** +**
  • cache: ^The cache parameter may be set to either "shared" or +** "private". ^Setting it to "shared" is equivalent to setting the +** SQLITE_OPEN_SHAREDCACHE bit in the flags argument passed to +** sqlite3_open_v2(). ^Setting the cache parameter to "private" is +** equivalent to setting the SQLITE_OPEN_PRIVATECACHE bit. +** ^If sqlite3_open_v2() is used and the "cache" parameter is present in +** a URI filename, its value overrides any behavior requested by setting +** SQLITE_OPEN_PRIVATECACHE or SQLITE_OPEN_SHAREDCACHE flag. +** +**
  • psow: ^The psow parameter indicates whether or not the +** [powersafe overwrite] property does or does not apply to the +** storage media on which the database file resides. +** +**
  • nolock: ^The nolock parameter is a boolean query parameter +** which if set disables file locking in rollback journal modes. This +** is useful for accessing a database on a filesystem that does not +** support locking. Caution: Database corruption might result if two +** or more processes write to the same database and any one of those +** processes uses nolock=1. +** +**
  • immutable: ^The immutable parameter is a boolean query +** parameter that indicates that the database file is stored on +** read-only media. ^When immutable is set, SQLite assumes that the +** database file cannot be changed, even by a process with higher +** privilege, and so the database is opened read-only and all locking +** and change detection is disabled. Caution: Setting the immutable +** property on a database file that does in fact change can result +** in incorrect query results and/or [SQLITE_CORRUPT] errors. +** See also: [SQLITE_IOCAP_IMMUTABLE]. +** +**
+** +** ^Specifying an unknown parameter in the query component of a URI is not an +** error. Future versions of SQLite might understand additional query +** parameters. See "[query parameters with special meaning to SQLite]" for +** additional information. +** +** [[URI filename examples]]

URI filename examples

+** +** +**
URI filenames Results +**
file:data.db +** Open the file "data.db" in the current directory. +**
file:/home/fred/data.db
+** file:///home/fred/data.db
+** file://localhost/home/fred/data.db
+** Open the database file "/home/fred/data.db". +**
file://darkstar/home/fred/data.db +** An error. "darkstar" is not a recognized authority. +**
+** file:///C:/Documents%20and%20Settings/fred/Desktop/data.db +** Windows only: Open the file "data.db" on fred's desktop on drive +** C:. Note that the %20 escaping in this example is not strictly +** necessary - space characters can be used literally +** in URI filenames. +**
file:data.db?mode=ro&cache=private +** Open file "data.db" in the current directory for read-only access. +** Regardless of whether or not shared-cache mode is enabled by +** default, use a private cache. +**
file:/home/fred/data.db?vfs=unix-dotfile +** Open file "/home/fred/data.db". Use the special VFS "unix-dotfile" +** that uses dot-files in place of posix advisory locking. +**
file:data.db?mode=readonly +** An error. "readonly" is not a valid option for the "mode" parameter. +**
+** +** ^URI hexadecimal escape sequences (%HH) are supported within the path and +** query components of a URI. A hexadecimal escape sequence consists of a +** percent sign - "%" - followed by exactly two hexadecimal digits +** specifying an octet value. ^Before the path or query components of a +** URI filename are interpreted, they are encoded using UTF-8 and all +** hexadecimal escape sequences replaced by a single byte containing the +** corresponding octet. If this process generates an invalid UTF-8 encoding, +** the results are undefined. +** +** Note to Windows users: The encoding used for the filename argument +** of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever +** codepage is currently defined. Filenames containing international +** characters must be converted to UTF-8 prior to passing them into +** sqlite3_open() or sqlite3_open_v2(). +** +** Note to Windows Runtime users: The temporary directory must be set +** prior to calling sqlite3_open() or sqlite3_open_v2(). Otherwise, various +** features that require the use of temporary files may fail. +** +** See also: [sqlite3_temp_directory] +*/ +SQLITE_API int sqlite3_open( + const char *filename, /* Database filename (UTF-8) */ + sqlite3 **ppDb /* OUT: SQLite db handle */ +); +SQLITE_API int sqlite3_open16( + const void *filename, /* Database filename (UTF-16) */ + sqlite3 **ppDb /* OUT: SQLite db handle */ +); +SQLITE_API int sqlite3_open_v2( + const char *filename, /* Database filename (UTF-8) */ + sqlite3 **ppDb, /* OUT: SQLite db handle */ + int flags, /* Flags */ + const char *zVfs /* Name of VFS module to use */ +); + +/* +** CAPI3REF: Obtain Values For URI Parameters +** +** These are utility routines, useful to VFS implementations, that check +** to see if a database file was a URI that contained a specific query +** parameter, and if so obtains the value of that query parameter. +** +** If F is the database filename pointer passed into the xOpen() method of +** a VFS implementation when the flags parameter to xOpen() has one or +** more of the [SQLITE_OPEN_URI] or [SQLITE_OPEN_MAIN_DB] bits set and +** P is the name of the query parameter, then +** sqlite3_uri_parameter(F,P) returns the value of the P +** parameter if it exists or a NULL pointer if P does not appear as a +** query parameter on F. If P is a query parameter of F +** has no explicit value, then sqlite3_uri_parameter(F,P) returns +** a pointer to an empty string. +** +** The sqlite3_uri_boolean(F,P,B) routine assumes that P is a boolean +** parameter and returns true (1) or false (0) according to the value +** of P. The sqlite3_uri_boolean(F,P,B) routine returns true (1) if the +** value of query parameter P is one of "yes", "true", or "on" in any +** case or if the value begins with a non-zero number. The +** sqlite3_uri_boolean(F,P,B) routines returns false (0) if the value of +** query parameter P is one of "no", "false", or "off" in any case or +** if the value begins with a numeric zero. If P is not a query +** parameter on F or if the value of P is does not match any of the +** above, then sqlite3_uri_boolean(F,P,B) returns (B!=0). +** +** The sqlite3_uri_int64(F,P,D) routine converts the value of P into a +** 64-bit signed integer and returns that integer, or D if P does not +** exist. If the value of P is something other than an integer, then +** zero is returned. +** +** If F is a NULL pointer, then sqlite3_uri_parameter(F,P) returns NULL and +** sqlite3_uri_boolean(F,P,B) returns B. If F is not a NULL pointer and +** is not a database file pathname pointer that SQLite passed into the xOpen +** VFS method, then the behavior of this routine is undefined and probably +** undesirable. +*/ +SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam); +SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault); +SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64); + + +/* +** CAPI3REF: Error Codes And Messages +** METHOD: sqlite3 +** +** ^If the most recent sqlite3_* API call associated with +** [database connection] D failed, then the sqlite3_errcode(D) interface +** returns the numeric [result code] or [extended result code] for that +** API call. +** If the most recent API call was successful, +** then the return value from sqlite3_errcode() is undefined. +** ^The sqlite3_extended_errcode() +** interface is the same except that it always returns the +** [extended result code] even when extended result codes are +** disabled. +** +** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language +** text that describes the error, as either UTF-8 or UTF-16 respectively. +** ^(Memory to hold the error message string is managed internally. +** The application does not need to worry about freeing the result. +** However, the error string might be overwritten or deallocated by +** subsequent calls to other SQLite interface functions.)^ +** +** ^The sqlite3_errstr() interface returns the English-language text +** that describes the [result code], as UTF-8. +** ^(Memory to hold the error message string is managed internally +** and must not be freed by the application)^. +** +** When the serialized [threading mode] is in use, it might be the +** case that a second error occurs on a separate thread in between +** the time of the first error and the call to these interfaces. +** When that happens, the second error will be reported since these +** interfaces always report the most recent result. To avoid +** this, each thread can obtain exclusive use of the [database connection] D +** by invoking [sqlite3_mutex_enter]([sqlite3_db_mutex](D)) before beginning +** to use D and invoking [sqlite3_mutex_leave]([sqlite3_db_mutex](D)) after +** all calls to the interfaces listed here are completed. +** +** If an interface fails with SQLITE_MISUSE, that means the interface +** was invoked incorrectly by the application. In that case, the +** error code and message may or may not be set. +*/ +SQLITE_API int sqlite3_errcode(sqlite3 *db); +SQLITE_API int sqlite3_extended_errcode(sqlite3 *db); +SQLITE_API const char *sqlite3_errmsg(sqlite3*); +SQLITE_API const void *sqlite3_errmsg16(sqlite3*); +SQLITE_API const char *sqlite3_errstr(int); + +/* +** CAPI3REF: Prepared Statement Object +** KEYWORDS: {prepared statement} {prepared statements} +** +** An instance of this object represents a single SQL statement that +** has been compiled into binary form and is ready to be evaluated. +** +** Think of each SQL statement as a separate computer program. The +** original SQL text is source code. A prepared statement object +** is the compiled object code. All SQL must be converted into a +** prepared statement before it can be run. +** +** The life-cycle of a prepared statement object usually goes like this: +** +**
    +**
  1. Create the prepared statement object using [sqlite3_prepare_v2()]. +**
  2. Bind values to [parameters] using the sqlite3_bind_*() +** interfaces. +**
  3. Run the SQL by calling [sqlite3_step()] one or more times. +**
  4. Reset the prepared statement using [sqlite3_reset()] then go back +** to step 2. Do this zero or more times. +**
  5. Destroy the object using [sqlite3_finalize()]. +**
+*/ +typedef struct sqlite3_stmt sqlite3_stmt; + +/* +** CAPI3REF: Run-time Limits +** METHOD: sqlite3 +** +** ^(This interface allows the size of various constructs to be limited +** on a connection by connection basis. The first parameter is the +** [database connection] whose limit is to be set or queried. The +** second parameter is one of the [limit categories] that define a +** class of constructs to be size limited. The third parameter is the +** new limit for that construct.)^ +** +** ^If the new limit is a negative number, the limit is unchanged. +** ^(For each limit category SQLITE_LIMIT_NAME there is a +** [limits | hard upper bound] +** set at compile-time by a C preprocessor macro called +** [limits | SQLITE_MAX_NAME]. +** (The "_LIMIT_" in the name is changed to "_MAX_".))^ +** ^Attempts to increase a limit above its hard upper bound are +** silently truncated to the hard upper bound. +** +** ^Regardless of whether or not the limit was changed, the +** [sqlite3_limit()] interface returns the prior value of the limit. +** ^Hence, to find the current value of a limit without changing it, +** simply invoke this interface with the third parameter set to -1. +** +** Run-time limits are intended for use in applications that manage +** both their own internal database and also databases that are controlled +** by untrusted external sources. An example application might be a +** web browser that has its own databases for storing history and +** separate databases controlled by JavaScript applications downloaded +** off the Internet. The internal databases can be given the +** large, default limits. Databases managed by external sources can +** be given much smaller limits designed to prevent a denial of service +** attack. Developers might also want to use the [sqlite3_set_authorizer()] +** interface to further control untrusted SQL. The size of the database +** created by an untrusted script can be contained using the +** [max_page_count] [PRAGMA]. +** +** New run-time limit categories may be added in future releases. +*/ +SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); + +/* +** CAPI3REF: Run-Time Limit Categories +** KEYWORDS: {limit category} {*limit categories} +** +** These constants define various performance limits +** that can be lowered at run-time using [sqlite3_limit()]. +** The synopsis of the meanings of the various limits is shown below. +** Additional information is available at [limits | Limits in SQLite]. +** +**
+** [[SQLITE_LIMIT_LENGTH]] ^(
SQLITE_LIMIT_LENGTH
+**
The maximum size of any string or BLOB or table row, in bytes.
)^ +** +** [[SQLITE_LIMIT_SQL_LENGTH]] ^(
SQLITE_LIMIT_SQL_LENGTH
+**
The maximum length of an SQL statement, in bytes.
)^ +** +** [[SQLITE_LIMIT_COLUMN]] ^(
SQLITE_LIMIT_COLUMN
+**
The maximum number of columns in a table definition or in the +** result set of a [SELECT] or the maximum number of columns in an index +** or in an ORDER BY or GROUP BY clause.
)^ +** +** [[SQLITE_LIMIT_EXPR_DEPTH]] ^(
SQLITE_LIMIT_EXPR_DEPTH
+**
The maximum depth of the parse tree on any expression.
)^ +** +** [[SQLITE_LIMIT_COMPOUND_SELECT]] ^(
SQLITE_LIMIT_COMPOUND_SELECT
+**
The maximum number of terms in a compound SELECT statement.
)^ +** +** [[SQLITE_LIMIT_VDBE_OP]] ^(
SQLITE_LIMIT_VDBE_OP
+**
The maximum number of instructions in a virtual machine program +** used to implement an SQL statement. This limit is not currently +** enforced, though that might be added in some future release of +** SQLite.
)^ +** +** [[SQLITE_LIMIT_FUNCTION_ARG]] ^(
SQLITE_LIMIT_FUNCTION_ARG
+**
The maximum number of arguments on a function.
)^ +** +** [[SQLITE_LIMIT_ATTACHED]] ^(
SQLITE_LIMIT_ATTACHED
+**
The maximum number of [ATTACH | attached databases].)^
+** +** [[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]] +** ^(
SQLITE_LIMIT_LIKE_PATTERN_LENGTH
+**
The maximum length of the pattern argument to the [LIKE] or +** [GLOB] operators.
)^ +** +** [[SQLITE_LIMIT_VARIABLE_NUMBER]] +** ^(
SQLITE_LIMIT_VARIABLE_NUMBER
+**
The maximum index number of any [parameter] in an SQL statement.)^ +** +** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^(
SQLITE_LIMIT_TRIGGER_DEPTH
+**
The maximum depth of recursion for triggers.
)^ +** +** [[SQLITE_LIMIT_WORKER_THREADS]] ^(
SQLITE_LIMIT_WORKER_THREADS
+**
The maximum number of auxiliary worker threads that a single +** [prepared statement] may start.
)^ +**
+*/ +#define SQLITE_LIMIT_LENGTH 0 +#define SQLITE_LIMIT_SQL_LENGTH 1 +#define SQLITE_LIMIT_COLUMN 2 +#define SQLITE_LIMIT_EXPR_DEPTH 3 +#define SQLITE_LIMIT_COMPOUND_SELECT 4 +#define SQLITE_LIMIT_VDBE_OP 5 +#define SQLITE_LIMIT_FUNCTION_ARG 6 +#define SQLITE_LIMIT_ATTACHED 7 +#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8 +#define SQLITE_LIMIT_VARIABLE_NUMBER 9 +#define SQLITE_LIMIT_TRIGGER_DEPTH 10 +#define SQLITE_LIMIT_WORKER_THREADS 11 + +/* +** CAPI3REF: Compiling An SQL Statement +** KEYWORDS: {SQL statement compiler} +** METHOD: sqlite3 +** CONSTRUCTOR: sqlite3_stmt +** +** To execute an SQL query, it must first be compiled into a byte-code +** program using one of these routines. +** +** The first argument, "db", is a [database connection] obtained from a +** prior successful call to [sqlite3_open()], [sqlite3_open_v2()] or +** [sqlite3_open16()]. The database connection must not have been closed. +** +** The second argument, "zSql", is the statement to be compiled, encoded +** as either UTF-8 or UTF-16. The sqlite3_prepare() and sqlite3_prepare_v2() +** interfaces use UTF-8, and sqlite3_prepare16() and sqlite3_prepare16_v2() +** use UTF-16. +** +** ^If the nByte argument is negative, then zSql is read up to the +** first zero terminator. ^If nByte is positive, then it is the +** number of bytes read from zSql. ^If nByte is zero, then no prepared +** statement is generated. +** If the caller knows that the supplied string is nul-terminated, then +** there is a small performance advantage to passing an nByte parameter that +** is the number of bytes in the input string including +** the nul-terminator. +** +** ^If pzTail is not NULL then *pzTail is made to point to the first byte +** past the end of the first SQL statement in zSql. These routines only +** compile the first statement in zSql, so *pzTail is left pointing to +** what remains uncompiled. +** +** ^*ppStmt is left pointing to a compiled [prepared statement] that can be +** executed using [sqlite3_step()]. ^If there is an error, *ppStmt is set +** to NULL. ^If the input text contains no SQL (if the input is an empty +** string or a comment) then *ppStmt is set to NULL. +** The calling procedure is responsible for deleting the compiled +** SQL statement using [sqlite3_finalize()] after it has finished with it. +** ppStmt may not be NULL. +** +** ^On success, the sqlite3_prepare() family of routines return [SQLITE_OK]; +** otherwise an [error code] is returned. +** +** The sqlite3_prepare_v2() and sqlite3_prepare16_v2() interfaces are +** recommended for all new programs. The two older interfaces are retained +** for backwards compatibility, but their use is discouraged. +** ^In the "v2" interfaces, the prepared statement +** that is returned (the [sqlite3_stmt] object) contains a copy of the +** original SQL text. This causes the [sqlite3_step()] interface to +** behave differently in three ways: +** +**
    +**
  1. +** ^If the database schema changes, instead of returning [SQLITE_SCHEMA] as it +** always used to do, [sqlite3_step()] will automatically recompile the SQL +** statement and try to run it again. As many as [SQLITE_MAX_SCHEMA_RETRY] +** retries will occur before sqlite3_step() gives up and returns an error. +**
  2. +** +**
  3. +** ^When an error occurs, [sqlite3_step()] will return one of the detailed +** [error codes] or [extended error codes]. ^The legacy behavior was that +** [sqlite3_step()] would only return a generic [SQLITE_ERROR] result code +** and the application would have to make a second call to [sqlite3_reset()] +** in order to find the underlying cause of the problem. With the "v2" prepare +** interfaces, the underlying reason for the error is returned immediately. +**
  4. +** +**
  5. +** ^If the specific value bound to [parameter | host parameter] in the +** WHERE clause might influence the choice of query plan for a statement, +** then the statement will be automatically recompiled, as if there had been +** a schema change, on the first [sqlite3_step()] call following any change +** to the [sqlite3_bind_text | bindings] of that [parameter]. +** ^The specific value of WHERE-clause [parameter] might influence the +** choice of query plan if the parameter is the left-hand side of a [LIKE] +** or [GLOB] operator or if the parameter is compared to an indexed column +** and the [SQLITE_ENABLE_STAT3] compile-time option is enabled. +**
  6. +**
+*/ +SQLITE_API int sqlite3_prepare( + sqlite3 *db, /* Database handle */ + const char *zSql, /* SQL statement, UTF-8 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const char **pzTail /* OUT: Pointer to unused portion of zSql */ +); +SQLITE_API int sqlite3_prepare_v2( + sqlite3 *db, /* Database handle */ + const char *zSql, /* SQL statement, UTF-8 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const char **pzTail /* OUT: Pointer to unused portion of zSql */ +); +SQLITE_API int sqlite3_prepare16( + sqlite3 *db, /* Database handle */ + const void *zSql, /* SQL statement, UTF-16 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const void **pzTail /* OUT: Pointer to unused portion of zSql */ +); +SQLITE_API int sqlite3_prepare16_v2( + sqlite3 *db, /* Database handle */ + const void *zSql, /* SQL statement, UTF-16 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const void **pzTail /* OUT: Pointer to unused portion of zSql */ +); + +/* +** CAPI3REF: Retrieving Statement SQL +** METHOD: sqlite3_stmt +** +** ^The sqlite3_sql(P) interface returns a pointer to a copy of the UTF-8 +** SQL text used to create [prepared statement] P if P was +** created by either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. +** ^The sqlite3_expanded_sql(P) interface returns a pointer to a UTF-8 +** string containing the SQL text of prepared statement P with +** [bound parameters] expanded. +** +** ^(For example, if a prepared statement is created using the SQL +** text "SELECT $abc,:xyz" and if parameter $abc is bound to integer 2345 +** and parameter :xyz is unbound, then sqlite3_sql() will return +** the original string, "SELECT $abc,:xyz" but sqlite3_expanded_sql() +** will return "SELECT 2345,NULL".)^ +** +** ^The sqlite3_expanded_sql() interface returns NULL if insufficient memory +** is available to hold the result, or if the result would exceed the +** the maximum string length determined by the [SQLITE_LIMIT_LENGTH]. +** +** ^The [SQLITE_TRACE_SIZE_LIMIT] compile-time option limits the size of +** bound parameter expansions. ^The [SQLITE_OMIT_TRACE] compile-time +** option causes sqlite3_expanded_sql() to always return NULL. +** +** ^The string returned by sqlite3_sql(P) is managed by SQLite and is +** automatically freed when the prepared statement is finalized. +** ^The string returned by sqlite3_expanded_sql(P), on the other hand, +** is obtained from [sqlite3_malloc()] and must be free by the application +** by passing it to [sqlite3_free()]. +*/ +SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt); +SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Determine If An SQL Statement Writes The Database +** METHOD: sqlite3_stmt +** +** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if +** and only if the [prepared statement] X makes no direct changes to +** the content of the database file. +** +** Note that [application-defined SQL functions] or +** [virtual tables] might change the database indirectly as a side effect. +** ^(For example, if an application defines a function "eval()" that +** calls [sqlite3_exec()], then the following SQL statement would +** change the database file through side-effects: +** +**
+**    SELECT eval('DELETE FROM t1') FROM t2;
+** 
+** +** But because the [SELECT] statement does not change the database file +** directly, sqlite3_stmt_readonly() would still return true.)^ +** +** ^Transaction control statements such as [BEGIN], [COMMIT], [ROLLBACK], +** [SAVEPOINT], and [RELEASE] cause sqlite3_stmt_readonly() to return true, +** since the statements themselves do not actually modify the database but +** rather they control the timing of when other statements modify the +** database. ^The [ATTACH] and [DETACH] statements also cause +** sqlite3_stmt_readonly() to return true since, while those statements +** change the configuration of a database connection, they do not make +** changes to the content of the database files on disk. +** ^The sqlite3_stmt_readonly() interface returns true for [BEGIN] since +** [BEGIN] merely sets internal flags, but the [BEGIN|BEGIN IMMEDIATE] and +** [BEGIN|BEGIN EXCLUSIVE] commands do touch the database and so +** sqlite3_stmt_readonly() returns false for those commands. +*/ +SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Determine If A Prepared Statement Has Been Reset +** METHOD: sqlite3_stmt +** +** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the +** [prepared statement] S has been stepped at least once using +** [sqlite3_step(S)] but has neither run to completion (returned +** [SQLITE_DONE] from [sqlite3_step(S)]) nor +** been reset using [sqlite3_reset(S)]. ^The sqlite3_stmt_busy(S) +** interface returns false if S is a NULL pointer. If S is not a +** NULL pointer and is not a pointer to a valid [prepared statement] +** object, then the behavior is undefined and probably undesirable. +** +** This interface can be used in combination [sqlite3_next_stmt()] +** to locate all prepared statements associated with a database +** connection that are in need of being reset. This can be used, +** for example, in diagnostic routines to search for prepared +** statements that are holding a transaction open. +*/ +SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*); + +/* +** CAPI3REF: Dynamically Typed Value Object +** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value} +** +** SQLite uses the sqlite3_value object to represent all values +** that can be stored in a database table. SQLite uses dynamic typing +** for the values it stores. ^Values stored in sqlite3_value objects +** can be integers, floating point values, strings, BLOBs, or NULL. +** +** An sqlite3_value object may be either "protected" or "unprotected". +** Some interfaces require a protected sqlite3_value. Other interfaces +** will accept either a protected or an unprotected sqlite3_value. +** Every interface that accepts sqlite3_value arguments specifies +** whether or not it requires a protected sqlite3_value. The +** [sqlite3_value_dup()] interface can be used to construct a new +** protected sqlite3_value from an unprotected sqlite3_value. +** +** The terms "protected" and "unprotected" refer to whether or not +** a mutex is held. An internal mutex is held for a protected +** sqlite3_value object but no mutex is held for an unprotected +** sqlite3_value object. If SQLite is compiled to be single-threaded +** (with [SQLITE_THREADSAFE=0] and with [sqlite3_threadsafe()] returning 0) +** or if SQLite is run in one of reduced mutex modes +** [SQLITE_CONFIG_SINGLETHREAD] or [SQLITE_CONFIG_MULTITHREAD] +** then there is no distinction between protected and unprotected +** sqlite3_value objects and they can be used interchangeably. However, +** for maximum code portability it is recommended that applications +** still make the distinction between protected and unprotected +** sqlite3_value objects even when not strictly required. +** +** ^The sqlite3_value objects that are passed as parameters into the +** implementation of [application-defined SQL functions] are protected. +** ^The sqlite3_value object returned by +** [sqlite3_column_value()] is unprotected. +** Unprotected sqlite3_value objects may only be used with +** [sqlite3_result_value()] and [sqlite3_bind_value()]. +** The [sqlite3_value_blob | sqlite3_value_type()] family of +** interfaces require protected sqlite3_value objects. +*/ +typedef struct Mem sqlite3_value; + +/* +** CAPI3REF: SQL Function Context Object +** +** The context in which an SQL function executes is stored in an +** sqlite3_context object. ^A pointer to an sqlite3_context object +** is always first parameter to [application-defined SQL functions]. +** The application-defined SQL function implementation will pass this +** pointer through into calls to [sqlite3_result_int | sqlite3_result()], +** [sqlite3_aggregate_context()], [sqlite3_user_data()], +** [sqlite3_context_db_handle()], [sqlite3_get_auxdata()], +** and/or [sqlite3_set_auxdata()]. +*/ +typedef struct sqlite3_context sqlite3_context; + +/* +** CAPI3REF: Binding Values To Prepared Statements +** KEYWORDS: {host parameter} {host parameters} {host parameter name} +** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding} +** METHOD: sqlite3_stmt +** +** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants, +** literals may be replaced by a [parameter] that matches one of following +** templates: +** +**
    +**
  • ? +**
  • ?NNN +**
  • :VVV +**
  • @VVV +**
  • $VVV +**
+** +** In the templates above, NNN represents an integer literal, +** and VVV represents an alphanumeric identifier.)^ ^The values of these +** parameters (also called "host parameter names" or "SQL parameters") +** can be set using the sqlite3_bind_*() routines defined here. +** +** ^The first argument to the sqlite3_bind_*() routines is always +** a pointer to the [sqlite3_stmt] object returned from +** [sqlite3_prepare_v2()] or its variants. +** +** ^The second argument is the index of the SQL parameter to be set. +** ^The leftmost SQL parameter has an index of 1. ^When the same named +** SQL parameter is used more than once, second and subsequent +** occurrences have the same index as the first occurrence. +** ^The index for named parameters can be looked up using the +** [sqlite3_bind_parameter_index()] API if desired. ^The index +** for "?NNN" parameters is the value of NNN. +** ^The NNN value must be between 1 and the [sqlite3_limit()] +** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 999). +** +** ^The third argument is the value to bind to the parameter. +** ^If the third parameter to sqlite3_bind_text() or sqlite3_bind_text16() +** or sqlite3_bind_blob() is a NULL pointer then the fourth parameter +** is ignored and the end result is the same as sqlite3_bind_null(). +** +** ^(In those routines that have a fourth argument, its value is the +** number of bytes in the parameter. To be clear: the value is the +** number of bytes in the value, not the number of characters.)^ +** ^If the fourth parameter to sqlite3_bind_text() or sqlite3_bind_text16() +** is negative, then the length of the string is +** the number of bytes up to the first zero terminator. +** If the fourth parameter to sqlite3_bind_blob() is negative, then +** the behavior is undefined. +** If a non-negative fourth parameter is provided to sqlite3_bind_text() +** or sqlite3_bind_text16() or sqlite3_bind_text64() then +** that parameter must be the byte offset +** where the NUL terminator would occur assuming the string were NUL +** terminated. If any NUL characters occur at byte offsets less than +** the value of the fourth parameter then the resulting string value will +** contain embedded NULs. The result of expressions involving strings +** with embedded NULs is undefined. +** +** ^The fifth argument to the BLOB and string binding interfaces +** is a destructor used to dispose of the BLOB or +** string after SQLite has finished with it. ^The destructor is called +** to dispose of the BLOB or string even if the call to bind API fails. +** ^If the fifth argument is +** the special value [SQLITE_STATIC], then SQLite assumes that the +** information is in static, unmanaged space and does not need to be freed. +** ^If the fifth argument has the value [SQLITE_TRANSIENT], then +** SQLite makes its own private copy of the data immediately, before +** the sqlite3_bind_*() routine returns. +** +** ^The sixth argument to sqlite3_bind_text64() must be one of +** [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE] +** to specify the encoding of the text in the third parameter. If +** the sixth argument to sqlite3_bind_text64() is not one of the +** allowed values shown above, or if the text encoding is different +** from the encoding specified by the sixth parameter, then the behavior +** is undefined. +** +** ^The sqlite3_bind_zeroblob() routine binds a BLOB of length N that +** is filled with zeroes. ^A zeroblob uses a fixed amount of memory +** (just an integer to hold its size) while it is being processed. +** Zeroblobs are intended to serve as placeholders for BLOBs whose +** content is later written using +** [sqlite3_blob_open | incremental BLOB I/O] routines. +** ^A negative value for the zeroblob results in a zero-length BLOB. +** +** ^If any of the sqlite3_bind_*() routines are called with a NULL pointer +** for the [prepared statement] or with a prepared statement for which +** [sqlite3_step()] has been called more recently than [sqlite3_reset()], +** then the call will return [SQLITE_MISUSE]. If any sqlite3_bind_() +** routine is passed a [prepared statement] that has been finalized, the +** result is undefined and probably harmful. +** +** ^Bindings are not cleared by the [sqlite3_reset()] routine. +** ^Unbound parameters are interpreted as NULL. +** +** ^The sqlite3_bind_* routines return [SQLITE_OK] on success or an +** [error code] if anything goes wrong. +** ^[SQLITE_TOOBIG] might be returned if the size of a string or BLOB +** exceeds limits imposed by [sqlite3_limit]([SQLITE_LIMIT_LENGTH]) or +** [SQLITE_MAX_LENGTH]. +** ^[SQLITE_RANGE] is returned if the parameter +** index is out of range. ^[SQLITE_NOMEM] is returned if malloc() fails. +** +** See also: [sqlite3_bind_parameter_count()], +** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()]. +*/ +SQLITE_API int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*)); +SQLITE_API int sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64, + void(*)(void*)); +SQLITE_API int sqlite3_bind_double(sqlite3_stmt*, int, double); +SQLITE_API int sqlite3_bind_int(sqlite3_stmt*, int, int); +SQLITE_API int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64); +SQLITE_API int sqlite3_bind_null(sqlite3_stmt*, int); +SQLITE_API int sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*)); +SQLITE_API int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*)); +SQLITE_API int sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64, + void(*)(void*), unsigned char encoding); +SQLITE_API int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*); +SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n); +SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64); + +/* +** CAPI3REF: Number Of SQL Parameters +** METHOD: sqlite3_stmt +** +** ^This routine can be used to find the number of [SQL parameters] +** in a [prepared statement]. SQL parameters are tokens of the +** form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as +** placeholders for values that are [sqlite3_bind_blob | bound] +** to the parameters at a later time. +** +** ^(This routine actually returns the index of the largest (rightmost) +** parameter. For all forms except ?NNN, this will correspond to the +** number of unique parameters. If parameters of the ?NNN form are used, +** there may be gaps in the list.)^ +** +** See also: [sqlite3_bind_blob|sqlite3_bind()], +** [sqlite3_bind_parameter_name()], and +** [sqlite3_bind_parameter_index()]. +*/ +SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt*); + +/* +** CAPI3REF: Name Of A Host Parameter +** METHOD: sqlite3_stmt +** +** ^The sqlite3_bind_parameter_name(P,N) interface returns +** the name of the N-th [SQL parameter] in the [prepared statement] P. +** ^(SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA" +** have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA" +** respectively. +** In other words, the initial ":" or "$" or "@" or "?" +** is included as part of the name.)^ +** ^Parameters of the form "?" without a following integer have no name +** and are referred to as "nameless" or "anonymous parameters". +** +** ^The first host parameter has an index of 1, not 0. +** +** ^If the value N is out of range or if the N-th parameter is +** nameless, then NULL is returned. ^The returned string is +** always in UTF-8 encoding even if the named parameter was +** originally specified as UTF-16 in [sqlite3_prepare16()] or +** [sqlite3_prepare16_v2()]. +** +** See also: [sqlite3_bind_blob|sqlite3_bind()], +** [sqlite3_bind_parameter_count()], and +** [sqlite3_bind_parameter_index()]. +*/ +SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int); + +/* +** CAPI3REF: Index Of A Parameter With A Given Name +** METHOD: sqlite3_stmt +** +** ^Return the index of an SQL parameter given its name. ^The +** index value returned is suitable for use as the second +** parameter to [sqlite3_bind_blob|sqlite3_bind()]. ^A zero +** is returned if no matching parameter is found. ^The parameter +** name must be given in UTF-8 even if the original statement +** was prepared from UTF-16 text using [sqlite3_prepare16_v2()]. +** +** See also: [sqlite3_bind_blob|sqlite3_bind()], +** [sqlite3_bind_parameter_count()], and +** [sqlite3_bind_parameter_name()]. +*/ +SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName); + +/* +** CAPI3REF: Reset All Bindings On A Prepared Statement +** METHOD: sqlite3_stmt +** +** ^Contrary to the intuition of many, [sqlite3_reset()] does not reset +** the [sqlite3_bind_blob | bindings] on a [prepared statement]. +** ^Use this routine to reset all host parameters to NULL. +*/ +SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*); + +/* +** CAPI3REF: Number Of Columns In A Result Set +** METHOD: sqlite3_stmt +** +** ^Return the number of columns in the result set returned by the +** [prepared statement]. ^If this routine returns 0, that means the +** [prepared statement] returns no data (for example an [UPDATE]). +** ^However, just because this routine returns a positive number does not +** mean that one or more rows of data will be returned. ^A SELECT statement +** will always have a positive sqlite3_column_count() but depending on the +** WHERE clause constraints and the table content, it might return no rows. +** +** See also: [sqlite3_data_count()] +*/ +SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Column Names In A Result Set +** METHOD: sqlite3_stmt +** +** ^These routines return the name assigned to a particular column +** in the result set of a [SELECT] statement. ^The sqlite3_column_name() +** interface returns a pointer to a zero-terminated UTF-8 string +** and sqlite3_column_name16() returns a pointer to a zero-terminated +** UTF-16 string. ^The first parameter is the [prepared statement] +** that implements the [SELECT] statement. ^The second parameter is the +** column number. ^The leftmost column is number 0. +** +** ^The returned string pointer is valid until either the [prepared statement] +** is destroyed by [sqlite3_finalize()] or until the statement is automatically +** reprepared by the first call to [sqlite3_step()] for a particular run +** or until the next call to +** sqlite3_column_name() or sqlite3_column_name16() on the same column. +** +** ^If sqlite3_malloc() fails during the processing of either routine +** (for example during a conversion from UTF-8 to UTF-16) then a +** NULL pointer is returned. +** +** ^The name of a result column is the value of the "AS" clause for +** that column, if there is an AS clause. If there is no AS clause +** then the name of the column is unspecified and may change from +** one release of SQLite to the next. +*/ +SQLITE_API const char *sqlite3_column_name(sqlite3_stmt*, int N); +SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N); + +/* +** CAPI3REF: Source Of Data In A Query Result +** METHOD: sqlite3_stmt +** +** ^These routines provide a means to determine the database, table, and +** table column that is the origin of a particular result column in +** [SELECT] statement. +** ^The name of the database or table or column can be returned as +** either a UTF-8 or UTF-16 string. ^The _database_ routines return +** the database name, the _table_ routines return the table name, and +** the origin_ routines return the column name. +** ^The returned string is valid until the [prepared statement] is destroyed +** using [sqlite3_finalize()] or until the statement is automatically +** reprepared by the first call to [sqlite3_step()] for a particular run +** or until the same information is requested +** again in a different encoding. +** +** ^The names returned are the original un-aliased names of the +** database, table, and column. +** +** ^The first argument to these interfaces is a [prepared statement]. +** ^These functions return information about the Nth result column returned by +** the statement, where N is the second function argument. +** ^The left-most column is column 0 for these routines. +** +** ^If the Nth column returned by the statement is an expression or +** subquery and is not a column value, then all of these functions return +** NULL. ^These routine might also return NULL if a memory allocation error +** occurs. ^Otherwise, they return the name of the attached database, table, +** or column that query result column was extracted from. +** +** ^As with all other SQLite APIs, those whose names end with "16" return +** UTF-16 encoded strings and the other functions return UTF-8. +** +** ^These APIs are only available if the library was compiled with the +** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol. +** +** If two or more threads call one or more of these routines against the same +** prepared statement and column at the same time then the results are +** undefined. +** +** If two or more threads call one or more +** [sqlite3_column_database_name | column metadata interfaces] +** for the same [prepared statement] and result column +** at the same time then the results are undefined. +*/ +SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt*,int); +SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt*,int); +SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt*,int); +SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt*,int); +SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt*,int); +SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt*,int); + +/* +** CAPI3REF: Declared Datatype Of A Query Result +** METHOD: sqlite3_stmt +** +** ^(The first parameter is a [prepared statement]. +** If this statement is a [SELECT] statement and the Nth column of the +** returned result set of that [SELECT] is a table column (not an +** expression or subquery) then the declared type of the table +** column is returned.)^ ^If the Nth column of the result set is an +** expression or subquery, then a NULL pointer is returned. +** ^The returned string is always UTF-8 encoded. +** +** ^(For example, given the database schema: +** +** CREATE TABLE t1(c1 VARIANT); +** +** and the following statement to be compiled: +** +** SELECT c1 + 1, c1 FROM t1; +** +** this routine would return the string "VARIANT" for the second result +** column (i==1), and a NULL pointer for the first result column (i==0).)^ +** +** ^SQLite uses dynamic run-time typing. ^So just because a column +** is declared to contain a particular type does not mean that the +** data stored in that column is of the declared type. SQLite is +** strongly typed, but the typing is dynamic not static. ^Type +** is associated with individual values, not with the containers +** used to hold those values. +*/ +SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt*,int); +SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int); + +/* +** CAPI3REF: Evaluate An SQL Statement +** METHOD: sqlite3_stmt +** +** After a [prepared statement] has been prepared using either +** [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] or one of the legacy +** interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], this function +** must be called one or more times to evaluate the statement. +** +** The details of the behavior of the sqlite3_step() interface depend +** on whether the statement was prepared using the newer "v2" interface +** [sqlite3_prepare_v2()] and [sqlite3_prepare16_v2()] or the older legacy +** interface [sqlite3_prepare()] and [sqlite3_prepare16()]. The use of the +** new "v2" interface is recommended for new applications but the legacy +** interface will continue to be supported. +** +** ^In the legacy interface, the return value will be either [SQLITE_BUSY], +** [SQLITE_DONE], [SQLITE_ROW], [SQLITE_ERROR], or [SQLITE_MISUSE]. +** ^With the "v2" interface, any of the other [result codes] or +** [extended result codes] might be returned as well. +** +** ^[SQLITE_BUSY] means that the database engine was unable to acquire the +** database locks it needs to do its job. ^If the statement is a [COMMIT] +** or occurs outside of an explicit transaction, then you can retry the +** statement. If the statement is not a [COMMIT] and occurs within an +** explicit transaction then you should rollback the transaction before +** continuing. +** +** ^[SQLITE_DONE] means that the statement has finished executing +** successfully. sqlite3_step() should not be called again on this virtual +** machine without first calling [sqlite3_reset()] to reset the virtual +** machine back to its initial state. +** +** ^If the SQL statement being executed returns any data, then [SQLITE_ROW] +** is returned each time a new row of data is ready for processing by the +** caller. The values may be accessed using the [column access functions]. +** sqlite3_step() is called again to retrieve the next row of data. +** +** ^[SQLITE_ERROR] means that a run-time error (such as a constraint +** violation) has occurred. sqlite3_step() should not be called again on +** the VM. More information may be found by calling [sqlite3_errmsg()]. +** ^With the legacy interface, a more specific error code (for example, +** [SQLITE_INTERRUPT], [SQLITE_SCHEMA], [SQLITE_CORRUPT], and so forth) +** can be obtained by calling [sqlite3_reset()] on the +** [prepared statement]. ^In the "v2" interface, +** the more specific error code is returned directly by sqlite3_step(). +** +** [SQLITE_MISUSE] means that the this routine was called inappropriately. +** Perhaps it was called on a [prepared statement] that has +** already been [sqlite3_finalize | finalized] or on one that had +** previously returned [SQLITE_ERROR] or [SQLITE_DONE]. Or it could +** be the case that the same database connection is being used by two or +** more threads at the same moment in time. +** +** For all versions of SQLite up to and including 3.6.23.1, a call to +** [sqlite3_reset()] was required after sqlite3_step() returned anything +** other than [SQLITE_ROW] before any subsequent invocation of +** sqlite3_step(). Failure to reset the prepared statement using +** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from +** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1], +** sqlite3_step() began +** calling [sqlite3_reset()] automatically in this circumstance rather +** than returning [SQLITE_MISUSE]. This is not considered a compatibility +** break because any application that ever receives an SQLITE_MISUSE error +** is broken by definition. The [SQLITE_OMIT_AUTORESET] compile-time option +** can be used to restore the legacy behavior. +** +** Goofy Interface Alert: In the legacy interface, the sqlite3_step() +** API always returns a generic error code, [SQLITE_ERROR], following any +** error other than [SQLITE_BUSY] and [SQLITE_MISUSE]. You must call +** [sqlite3_reset()] or [sqlite3_finalize()] in order to find one of the +** specific [error codes] that better describes the error. +** We admit that this is a goofy design. The problem has been fixed +** with the "v2" interface. If you prepare all of your SQL statements +** using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] instead +** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()] interfaces, +** then the more specific [error codes] are returned directly +** by sqlite3_step(). The use of the "v2" interface is recommended. +*/ +SQLITE_API int sqlite3_step(sqlite3_stmt*); + +/* +** CAPI3REF: Number of columns in a result set +** METHOD: sqlite3_stmt +** +** ^The sqlite3_data_count(P) interface returns the number of columns in the +** current row of the result set of [prepared statement] P. +** ^If prepared statement P does not have results ready to return +** (via calls to the [sqlite3_column_int | sqlite3_column_*()] of +** interfaces) then sqlite3_data_count(P) returns 0. +** ^The sqlite3_data_count(P) routine also returns 0 if P is a NULL pointer. +** ^The sqlite3_data_count(P) routine returns 0 if the previous call to +** [sqlite3_step](P) returned [SQLITE_DONE]. ^The sqlite3_data_count(P) +** will return non-zero if previous call to [sqlite3_step](P) returned +** [SQLITE_ROW], except in the case of the [PRAGMA incremental_vacuum] +** where it always returns zero since each step of that multi-step +** pragma returns 0 columns of data. +** +** See also: [sqlite3_column_count()] +*/ +SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Fundamental Datatypes +** KEYWORDS: SQLITE_TEXT +** +** ^(Every value in SQLite has one of five fundamental datatypes: +** +**
    +**
  • 64-bit signed integer +**
  • 64-bit IEEE floating point number +**
  • string +**
  • BLOB +**
  • NULL +**
)^ +** +** These constants are codes for each of those types. +** +** Note that the SQLITE_TEXT constant was also used in SQLite version 2 +** for a completely different meaning. Software that links against both +** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not +** SQLITE_TEXT. +*/ +#define SQLITE_INTEGER 1 +#define SQLITE_FLOAT 2 +#define SQLITE_BLOB 4 +#define SQLITE_NULL 5 +#ifdef SQLITE_TEXT +# undef SQLITE_TEXT +#else +# define SQLITE_TEXT 3 +#endif +#define SQLITE3_TEXT 3 + +/* +** CAPI3REF: Result Values From A Query +** KEYWORDS: {column access functions} +** METHOD: sqlite3_stmt +** +** ^These routines return information about a single column of the current +** result row of a query. ^In every case the first argument is a pointer +** to the [prepared statement] that is being evaluated (the [sqlite3_stmt*] +** that was returned from [sqlite3_prepare_v2()] or one of its variants) +** and the second argument is the index of the column for which information +** should be returned. ^The leftmost column of the result set has the index 0. +** ^The number of columns in the result can be determined using +** [sqlite3_column_count()]. +** +** If the SQL statement does not currently point to a valid row, or if the +** column index is out of range, the result is undefined. +** These routines may only be called when the most recent call to +** [sqlite3_step()] has returned [SQLITE_ROW] and neither +** [sqlite3_reset()] nor [sqlite3_finalize()] have been called subsequently. +** If any of these routines are called after [sqlite3_reset()] or +** [sqlite3_finalize()] or after [sqlite3_step()] has returned +** something other than [SQLITE_ROW], the results are undefined. +** If [sqlite3_step()] or [sqlite3_reset()] or [sqlite3_finalize()] +** are called from a different thread while any of these routines +** are pending, then the results are undefined. +** +** ^The sqlite3_column_type() routine returns the +** [SQLITE_INTEGER | datatype code] for the initial data type +** of the result column. ^The returned value is one of [SQLITE_INTEGER], +** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL]. The value +** returned by sqlite3_column_type() is only meaningful if no type +** conversions have occurred as described below. After a type conversion, +** the value returned by sqlite3_column_type() is undefined. Future +** versions of SQLite may change the behavior of sqlite3_column_type() +** following a type conversion. +** +** ^If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() +** routine returns the number of bytes in that BLOB or string. +** ^If the result is a UTF-16 string, then sqlite3_column_bytes() converts +** the string to UTF-8 and then returns the number of bytes. +** ^If the result is a numeric value then sqlite3_column_bytes() uses +** [sqlite3_snprintf()] to convert that value to a UTF-8 string and returns +** the number of bytes in that string. +** ^If the result is NULL, then sqlite3_column_bytes() returns zero. +** +** ^If the result is a BLOB or UTF-16 string then the sqlite3_column_bytes16() +** routine returns the number of bytes in that BLOB or string. +** ^If the result is a UTF-8 string, then sqlite3_column_bytes16() converts +** the string to UTF-16 and then returns the number of bytes. +** ^If the result is a numeric value then sqlite3_column_bytes16() uses +** [sqlite3_snprintf()] to convert that value to a UTF-16 string and returns +** the number of bytes in that string. +** ^If the result is NULL, then sqlite3_column_bytes16() returns zero. +** +** ^The values returned by [sqlite3_column_bytes()] and +** [sqlite3_column_bytes16()] do not include the zero terminators at the end +** of the string. ^For clarity: the values returned by +** [sqlite3_column_bytes()] and [sqlite3_column_bytes16()] are the number of +** bytes in the string, not the number of characters. +** +** ^Strings returned by sqlite3_column_text() and sqlite3_column_text16(), +** even empty strings, are always zero-terminated. ^The return +** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer. +** +** Warning: ^The object returned by [sqlite3_column_value()] is an +** [unprotected sqlite3_value] object. In a multithreaded environment, +** an unprotected sqlite3_value object may only be used safely with +** [sqlite3_bind_value()] and [sqlite3_result_value()]. +** If the [unprotected sqlite3_value] object returned by +** [sqlite3_column_value()] is used in any other way, including calls +** to routines like [sqlite3_value_int()], [sqlite3_value_text()], +** or [sqlite3_value_bytes()], the behavior is not threadsafe. +** +** These routines attempt to convert the value where appropriate. ^For +** example, if the internal representation is FLOAT and a text result +** is requested, [sqlite3_snprintf()] is used internally to perform the +** conversion automatically. ^(The following table details the conversions +** that are applied: +** +**
+** +**
Internal
Type
Requested
Type
Conversion +** +**
NULL INTEGER Result is 0 +**
NULL FLOAT Result is 0.0 +**
NULL TEXT Result is a NULL pointer +**
NULL BLOB Result is a NULL pointer +**
INTEGER FLOAT Convert from integer to float +**
INTEGER TEXT ASCII rendering of the integer +**
INTEGER BLOB Same as INTEGER->TEXT +**
FLOAT INTEGER [CAST] to INTEGER +**
FLOAT TEXT ASCII rendering of the float +**
FLOAT BLOB [CAST] to BLOB +**
TEXT INTEGER [CAST] to INTEGER +**
TEXT FLOAT [CAST] to REAL +**
TEXT BLOB No change +**
BLOB INTEGER [CAST] to INTEGER +**
BLOB FLOAT [CAST] to REAL +**
BLOB TEXT Add a zero terminator if needed +**
+**
)^ +** +** Note that when type conversions occur, pointers returned by prior +** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or +** sqlite3_column_text16() may be invalidated. +** Type conversions and pointer invalidations might occur +** in the following cases: +** +**
    +**
  • The initial content is a BLOB and sqlite3_column_text() or +** sqlite3_column_text16() is called. A zero-terminator might +** need to be added to the string.
  • +**
  • The initial content is UTF-8 text and sqlite3_column_bytes16() or +** sqlite3_column_text16() is called. The content must be converted +** to UTF-16.
  • +**
  • The initial content is UTF-16 text and sqlite3_column_bytes() or +** sqlite3_column_text() is called. The content must be converted +** to UTF-8.
  • +**
+** +** ^Conversions between UTF-16be and UTF-16le are always done in place and do +** not invalidate a prior pointer, though of course the content of the buffer +** that the prior pointer references will have been modified. Other kinds +** of conversion are done in place when it is possible, but sometimes they +** are not possible and in those cases prior pointers are invalidated. +** +** The safest policy is to invoke these routines +** in one of the following ways: +** +**
    +**
  • sqlite3_column_text() followed by sqlite3_column_bytes()
  • +**
  • sqlite3_column_blob() followed by sqlite3_column_bytes()
  • +**
  • sqlite3_column_text16() followed by sqlite3_column_bytes16()
  • +**
+** +** In other words, you should call sqlite3_column_text(), +** sqlite3_column_blob(), or sqlite3_column_text16() first to force the result +** into the desired format, then invoke sqlite3_column_bytes() or +** sqlite3_column_bytes16() to find the size of the result. Do not mix calls +** to sqlite3_column_text() or sqlite3_column_blob() with calls to +** sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16() +** with calls to sqlite3_column_bytes(). +** +** ^The pointers returned are valid until a type conversion occurs as +** described above, or until [sqlite3_step()] or [sqlite3_reset()] or +** [sqlite3_finalize()] is called. ^The memory space used to hold strings +** and BLOBs is freed automatically. Do not pass the pointers returned +** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into +** [sqlite3_free()]. +** +** ^(If a memory allocation error occurs during the evaluation of any +** of these routines, a default value is returned. The default value +** is either the integer 0, the floating point number 0.0, or a NULL +** pointer. Subsequent calls to [sqlite3_errcode()] will return +** [SQLITE_NOMEM].)^ +*/ +SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol); +SQLITE_API int sqlite3_column_bytes(sqlite3_stmt*, int iCol); +SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt*, int iCol); +SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol); +SQLITE_API int sqlite3_column_int(sqlite3_stmt*, int iCol); +SQLITE_API sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol); +SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol); +SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt*, int iCol); +SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol); +SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol); + +/* +** CAPI3REF: Destroy A Prepared Statement Object +** DESTRUCTOR: sqlite3_stmt +** +** ^The sqlite3_finalize() function is called to delete a [prepared statement]. +** ^If the most recent evaluation of the statement encountered no errors +** or if the statement is never been evaluated, then sqlite3_finalize() returns +** SQLITE_OK. ^If the most recent evaluation of statement S failed, then +** sqlite3_finalize(S) returns the appropriate [error code] or +** [extended error code]. +** +** ^The sqlite3_finalize(S) routine can be called at any point during +** the life cycle of [prepared statement] S: +** before statement S is ever evaluated, after +** one or more calls to [sqlite3_reset()], or after any call +** to [sqlite3_step()] regardless of whether or not the statement has +** completed execution. +** +** ^Invoking sqlite3_finalize() on a NULL pointer is a harmless no-op. +** +** The application must finalize every [prepared statement] in order to avoid +** resource leaks. It is a grievous error for the application to try to use +** a prepared statement after it has been finalized. Any use of a prepared +** statement after it has been finalized can result in undefined and +** undesirable behavior such as segfaults and heap corruption. +*/ +SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Reset A Prepared Statement Object +** METHOD: sqlite3_stmt +** +** The sqlite3_reset() function is called to reset a [prepared statement] +** object back to its initial state, ready to be re-executed. +** ^Any SQL statement variables that had values bound to them using +** the [sqlite3_bind_blob | sqlite3_bind_*() API] retain their values. +** Use [sqlite3_clear_bindings()] to reset the bindings. +** +** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S +** back to the beginning of its program. +** +** ^If the most recent call to [sqlite3_step(S)] for the +** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE], +** or if [sqlite3_step(S)] has never before been called on S, +** then [sqlite3_reset(S)] returns [SQLITE_OK]. +** +** ^If the most recent call to [sqlite3_step(S)] for the +** [prepared statement] S indicated an error, then +** [sqlite3_reset(S)] returns an appropriate [error code]. +** +** ^The [sqlite3_reset(S)] interface does not change the values +** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. +*/ +SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Create Or Redefine SQL Functions +** KEYWORDS: {function creation routines} +** KEYWORDS: {application-defined SQL function} +** KEYWORDS: {application-defined SQL functions} +** METHOD: sqlite3 +** +** ^These functions (collectively known as "function creation routines") +** are used to add SQL functions or aggregates or to redefine the behavior +** of existing SQL functions or aggregates. The only differences between +** these routines are the text encoding expected for +** the second parameter (the name of the function being created) +** and the presence or absence of a destructor callback for +** the application data pointer. +** +** ^The first parameter is the [database connection] to which the SQL +** function is to be added. ^If an application uses more than one database +** connection then application-defined SQL functions must be added +** to each database connection separately. +** +** ^The second parameter is the name of the SQL function to be created or +** redefined. ^The length of the name is limited to 255 bytes in a UTF-8 +** representation, exclusive of the zero-terminator. ^Note that the name +** length limit is in UTF-8 bytes, not characters nor UTF-16 bytes. +** ^Any attempt to create a function with a longer name +** will result in [SQLITE_MISUSE] being returned. +** +** ^The third parameter (nArg) +** is the number of arguments that the SQL function or +** aggregate takes. ^If this parameter is -1, then the SQL function or +** aggregate may take any number of arguments between 0 and the limit +** set by [sqlite3_limit]([SQLITE_LIMIT_FUNCTION_ARG]). If the third +** parameter is less than -1 or greater than 127 then the behavior is +** undefined. +** +** ^The fourth parameter, eTextRep, specifies what +** [SQLITE_UTF8 | text encoding] this SQL function prefers for +** its parameters. The application should set this parameter to +** [SQLITE_UTF16LE] if the function implementation invokes +** [sqlite3_value_text16le()] on an input, or [SQLITE_UTF16BE] if the +** implementation invokes [sqlite3_value_text16be()] on an input, or +** [SQLITE_UTF16] if [sqlite3_value_text16()] is used, or [SQLITE_UTF8] +** otherwise. ^The same SQL function may be registered multiple times using +** different preferred text encodings, with different implementations for +** each encoding. +** ^When multiple implementations of the same function are available, SQLite +** will pick the one that involves the least amount of data conversion. +** +** ^The fourth parameter may optionally be ORed with [SQLITE_DETERMINISTIC] +** to signal that the function will always return the same result given +** the same inputs within a single SQL statement. Most SQL functions are +** deterministic. The built-in [random()] SQL function is an example of a +** function that is not deterministic. The SQLite query planner is able to +** perform additional optimizations on deterministic functions, so use +** of the [SQLITE_DETERMINISTIC] flag is recommended where possible. +** +** ^(The fifth parameter is an arbitrary pointer. The implementation of the +** function can gain access to this pointer using [sqlite3_user_data()].)^ +** +** ^The sixth, seventh and eighth parameters, xFunc, xStep and xFinal, are +** pointers to C-language functions that implement the SQL function or +** aggregate. ^A scalar SQL function requires an implementation of the xFunc +** callback only; NULL pointers must be passed as the xStep and xFinal +** parameters. ^An aggregate SQL function requires an implementation of xStep +** and xFinal and NULL pointer must be passed for xFunc. ^To delete an existing +** SQL function or aggregate, pass NULL pointers for all three function +** callbacks. +** +** ^(If the ninth parameter to sqlite3_create_function_v2() is not NULL, +** then it is destructor for the application data pointer. +** The destructor is invoked when the function is deleted, either by being +** overloaded or when the database connection closes.)^ +** ^The destructor is also invoked if the call to +** sqlite3_create_function_v2() fails. +** ^When the destructor callback of the tenth parameter is invoked, it +** is passed a single argument which is a copy of the application data +** pointer which was the fifth parameter to sqlite3_create_function_v2(). +** +** ^It is permitted to register multiple implementations of the same +** functions with the same name but with either differing numbers of +** arguments or differing preferred text encodings. ^SQLite will use +** the implementation that most closely matches the way in which the +** SQL function is used. ^A function implementation with a non-negative +** nArg parameter is a better match than a function implementation with +** a negative nArg. ^A function where the preferred text encoding +** matches the database encoding is a better +** match than a function where the encoding is different. +** ^A function where the encoding difference is between UTF16le and UTF16be +** is a closer match than a function where the encoding difference is +** between UTF8 and UTF16. +** +** ^Built-in functions may be overloaded by new application-defined functions. +** +** ^An application-defined function is permitted to call other +** SQLite interfaces. However, such calls must not +** close the database connection nor finalize or reset the prepared +** statement in which the function is running. +*/ +SQLITE_API int sqlite3_create_function( + sqlite3 *db, + const char *zFunctionName, + int nArg, + int eTextRep, + void *pApp, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*) +); +SQLITE_API int sqlite3_create_function16( + sqlite3 *db, + const void *zFunctionName, + int nArg, + int eTextRep, + void *pApp, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*) +); +SQLITE_API int sqlite3_create_function_v2( + sqlite3 *db, + const char *zFunctionName, + int nArg, + int eTextRep, + void *pApp, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*), + void(*xDestroy)(void*) +); + +/* +** CAPI3REF: Text Encodings +** +** These constant define integer codes that represent the various +** text encodings supported by SQLite. +*/ +#define SQLITE_UTF8 1 /* IMP: R-37514-35566 */ +#define SQLITE_UTF16LE 2 /* IMP: R-03371-37637 */ +#define SQLITE_UTF16BE 3 /* IMP: R-51971-34154 */ +#define SQLITE_UTF16 4 /* Use native byte order */ +#define SQLITE_ANY 5 /* Deprecated */ +#define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */ + +/* +** CAPI3REF: Function Flags +** +** These constants may be ORed together with the +** [SQLITE_UTF8 | preferred text encoding] as the fourth argument +** to [sqlite3_create_function()], [sqlite3_create_function16()], or +** [sqlite3_create_function_v2()]. +*/ +#define SQLITE_DETERMINISTIC 0x800 + +/* +** CAPI3REF: Deprecated Functions +** DEPRECATED +** +** These functions are [deprecated]. In order to maintain +** backwards compatibility with older code, these functions continue +** to be supported. However, new applications should avoid +** the use of these functions. To encourage programmers to avoid +** these functions, we will not explain what they do. +*/ +#ifndef SQLITE_OMIT_DEPRECATED +SQLITE_API SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*); +SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*); +SQLITE_API SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*); +SQLITE_API SQLITE_DEPRECATED int sqlite3_global_recover(void); +SQLITE_API SQLITE_DEPRECATED void sqlite3_thread_cleanup(void); +SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int), + void*,sqlite3_int64); +#endif + +/* +** CAPI3REF: Obtaining SQL Values +** METHOD: sqlite3_value +** +** The C-language implementation of SQL functions and aggregates uses +** this set of interface routines to access the parameter values on +** the function or aggregate. +** +** The xFunc (for scalar functions) or xStep (for aggregates) parameters +** to [sqlite3_create_function()] and [sqlite3_create_function16()] +** define callbacks that implement the SQL functions and aggregates. +** The 3rd parameter to these callbacks is an array of pointers to +** [protected sqlite3_value] objects. There is one [sqlite3_value] object for +** each parameter to the SQL function. These routines are used to +** extract values from the [sqlite3_value] objects. +** +** These routines work only with [protected sqlite3_value] objects. +** Any attempt to use these routines on an [unprotected sqlite3_value] +** object results in undefined behavior. +** +** ^These routines work just like the corresponding [column access functions] +** except that these routines take a single [protected sqlite3_value] object +** pointer instead of a [sqlite3_stmt*] pointer and an integer column number. +** +** ^The sqlite3_value_text16() interface extracts a UTF-16 string +** in the native byte-order of the host machine. ^The +** sqlite3_value_text16be() and sqlite3_value_text16le() interfaces +** extract UTF-16 strings as big-endian and little-endian respectively. +** +** ^(The sqlite3_value_numeric_type() interface attempts to apply +** numeric affinity to the value. This means that an attempt is +** made to convert the value to an integer or floating point. If +** such a conversion is possible without loss of information (in other +** words, if the value is a string that looks like a number) +** then the conversion is performed. Otherwise no conversion occurs. +** The [SQLITE_INTEGER | datatype] after conversion is returned.)^ +** +** Please pay particular attention to the fact that the pointer returned +** from [sqlite3_value_blob()], [sqlite3_value_text()], or +** [sqlite3_value_text16()] can be invalidated by a subsequent call to +** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite3_value_text()], +** or [sqlite3_value_text16()]. +** +** These routines must be called from the same thread as +** the SQL function that supplied the [sqlite3_value*] parameters. +*/ +SQLITE_API const void *sqlite3_value_blob(sqlite3_value*); +SQLITE_API int sqlite3_value_bytes(sqlite3_value*); +SQLITE_API int sqlite3_value_bytes16(sqlite3_value*); +SQLITE_API double sqlite3_value_double(sqlite3_value*); +SQLITE_API int sqlite3_value_int(sqlite3_value*); +SQLITE_API sqlite3_int64 sqlite3_value_int64(sqlite3_value*); +SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value*); +SQLITE_API const void *sqlite3_value_text16(sqlite3_value*); +SQLITE_API const void *sqlite3_value_text16le(sqlite3_value*); +SQLITE_API const void *sqlite3_value_text16be(sqlite3_value*); +SQLITE_API int sqlite3_value_type(sqlite3_value*); +SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*); + +/* +** CAPI3REF: Finding The Subtype Of SQL Values +** METHOD: sqlite3_value +** +** The sqlite3_value_subtype(V) function returns the subtype for +** an [application-defined SQL function] argument V. The subtype +** information can be used to pass a limited amount of context from +** one SQL function to another. Use the [sqlite3_result_subtype()] +** routine to set the subtype for the return value of an SQL function. +** +** SQLite makes no use of subtype itself. It merely passes the subtype +** from the result of one [application-defined SQL function] into the +** input of another. +*/ +SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); + +/* +** CAPI3REF: Copy And Free SQL Values +** METHOD: sqlite3_value +** +** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value] +** object D and returns a pointer to that copy. ^The [sqlite3_value] returned +** is a [protected sqlite3_value] object even if the input is not. +** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a +** memory allocation fails. +** +** ^The sqlite3_value_free(V) interface frees an [sqlite3_value] object +** previously obtained from [sqlite3_value_dup()]. ^If V is a NULL pointer +** then sqlite3_value_free(V) is a harmless no-op. +*/ +SQLITE_API sqlite3_value *sqlite3_value_dup(const sqlite3_value*); +SQLITE_API void sqlite3_value_free(sqlite3_value*); + +/* +** CAPI3REF: Obtain Aggregate Function Context +** METHOD: sqlite3_context +** +** Implementations of aggregate SQL functions use this +** routine to allocate memory for storing their state. +** +** ^The first time the sqlite3_aggregate_context(C,N) routine is called +** for a particular aggregate function, SQLite +** allocates N of memory, zeroes out that memory, and returns a pointer +** to the new memory. ^On second and subsequent calls to +** sqlite3_aggregate_context() for the same aggregate function instance, +** the same buffer is returned. Sqlite3_aggregate_context() is normally +** called once for each invocation of the xStep callback and then one +** last time when the xFinal callback is invoked. ^(When no rows match +** an aggregate query, the xStep() callback of the aggregate function +** implementation is never called and xFinal() is called exactly once. +** In those cases, sqlite3_aggregate_context() might be called for the +** first time from within xFinal().)^ +** +** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer +** when first called if N is less than or equal to zero or if a memory +** allocate error occurs. +** +** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is +** determined by the N parameter on first successful call. Changing the +** value of N in subsequent call to sqlite3_aggregate_context() within +** the same aggregate function instance will not resize the memory +** allocation.)^ Within the xFinal callback, it is customary to set +** N=0 in calls to sqlite3_aggregate_context(C,N) so that no +** pointless memory allocations occur. +** +** ^SQLite automatically frees the memory allocated by +** sqlite3_aggregate_context() when the aggregate query concludes. +** +** The first parameter must be a copy of the +** [sqlite3_context | SQL function context] that is the first parameter +** to the xStep or xFinal callback routine that implements the aggregate +** function. +** +** This routine must be called from the same thread in which +** the aggregate SQL function is running. +*/ +SQLITE_API void *sqlite3_aggregate_context(sqlite3_context*, int nBytes); + +/* +** CAPI3REF: User Data For Functions +** METHOD: sqlite3_context +** +** ^The sqlite3_user_data() interface returns a copy of +** the pointer that was the pUserData parameter (the 5th parameter) +** of the [sqlite3_create_function()] +** and [sqlite3_create_function16()] routines that originally +** registered the application defined function. +** +** This routine must be called from the same thread in which +** the application-defined function is running. +*/ +SQLITE_API void *sqlite3_user_data(sqlite3_context*); + +/* +** CAPI3REF: Database Connection For Functions +** METHOD: sqlite3_context +** +** ^The sqlite3_context_db_handle() interface returns a copy of +** the pointer to the [database connection] (the 1st parameter) +** of the [sqlite3_create_function()] +** and [sqlite3_create_function16()] routines that originally +** registered the application defined function. +*/ +SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); + +/* +** CAPI3REF: Function Auxiliary Data +** METHOD: sqlite3_context +** +** These functions may be used by (non-aggregate) SQL functions to +** associate metadata with argument values. If the same value is passed to +** multiple invocations of the same SQL function during query execution, under +** some circumstances the associated metadata may be preserved. An example +** of where this might be useful is in a regular-expression matching +** function. The compiled version of the regular expression can be stored as +** metadata associated with the pattern string. +** Then as long as the pattern string remains the same, +** the compiled regular expression can be reused on multiple +** invocations of the same function. +** +** ^The sqlite3_get_auxdata() interface returns a pointer to the metadata +** associated by the sqlite3_set_auxdata() function with the Nth argument +** value to the application-defined function. ^If there is no metadata +** associated with the function argument, this sqlite3_get_auxdata() interface +** returns a NULL pointer. +** +** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th +** argument of the application-defined function. ^Subsequent +** calls to sqlite3_get_auxdata(C,N) return P from the most recent +** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or +** NULL if the metadata has been discarded. +** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL, +** SQLite will invoke the destructor function X with parameter P exactly +** once, when the metadata is discarded. +** SQLite is free to discard the metadata at any time, including:
    +**
  • ^(when the corresponding function parameter changes)^, or +**
  • ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the +** SQL statement)^, or +**
  • ^(when sqlite3_set_auxdata() is invoked again on the same +** parameter)^, or +**
  • ^(during the original sqlite3_set_auxdata() call when a memory +** allocation error occurs.)^
+** +** Note the last bullet in particular. The destructor X in +** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the +** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() +** should be called near the end of the function implementation and the +** function implementation should not make any use of P after +** sqlite3_set_auxdata() has been called. +** +** ^(In practice, metadata is preserved between function calls for +** function parameters that are compile-time constants, including literal +** values and [parameters] and expressions composed from the same.)^ +** +** These routines must be called from the same thread in which +** the SQL function is running. +*/ +SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N); +SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); + + +/* +** CAPI3REF: Constants Defining Special Destructor Behavior +** +** These are special values for the destructor that is passed in as the +** final argument to routines like [sqlite3_result_blob()]. ^If the destructor +** argument is SQLITE_STATIC, it means that the content pointer is constant +** and will never change. It does not need to be destroyed. ^The +** SQLITE_TRANSIENT value means that the content will likely change in +** the near future and that SQLite should make its own private copy of +** the content before returning. +** +** The typedef is necessary to work around problems in certain +** C++ compilers. +*/ +typedef void (*sqlite3_destructor_type)(void*); +#define SQLITE_STATIC ((sqlite3_destructor_type)0) +#define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1) + +/* +** CAPI3REF: Setting The Result Of An SQL Function +** METHOD: sqlite3_context +** +** These routines are used by the xFunc or xFinal callbacks that +** implement SQL functions and aggregates. See +** [sqlite3_create_function()] and [sqlite3_create_function16()] +** for additional information. +** +** These functions work very much like the [parameter binding] family of +** functions used to bind values to host parameters in prepared statements. +** Refer to the [SQL parameter] documentation for additional information. +** +** ^The sqlite3_result_blob() interface sets the result from +** an application-defined function to be the BLOB whose content is pointed +** to by the second parameter and which is N bytes long where N is the +** third parameter. +** +** ^The sqlite3_result_zeroblob(C,N) and sqlite3_result_zeroblob64(C,N) +** interfaces set the result of the application-defined function to be +** a BLOB containing all zero bytes and N bytes in size. +** +** ^The sqlite3_result_double() interface sets the result from +** an application-defined function to be a floating point value specified +** by its 2nd argument. +** +** ^The sqlite3_result_error() and sqlite3_result_error16() functions +** cause the implemented SQL function to throw an exception. +** ^SQLite uses the string pointed to by the +** 2nd parameter of sqlite3_result_error() or sqlite3_result_error16() +** as the text of an error message. ^SQLite interprets the error +** message string from sqlite3_result_error() as UTF-8. ^SQLite +** interprets the string from sqlite3_result_error16() as UTF-16 in native +** byte order. ^If the third parameter to sqlite3_result_error() +** or sqlite3_result_error16() is negative then SQLite takes as the error +** message all text up through the first zero character. +** ^If the third parameter to sqlite3_result_error() or +** sqlite3_result_error16() is non-negative then SQLite takes that many +** bytes (not characters) from the 2nd parameter as the error message. +** ^The sqlite3_result_error() and sqlite3_result_error16() +** routines make a private copy of the error message text before +** they return. Hence, the calling function can deallocate or +** modify the text after they return without harm. +** ^The sqlite3_result_error_code() function changes the error code +** returned by SQLite as a result of an error in a function. ^By default, +** the error code is SQLITE_ERROR. ^A subsequent call to sqlite3_result_error() +** or sqlite3_result_error16() resets the error code to SQLITE_ERROR. +** +** ^The sqlite3_result_error_toobig() interface causes SQLite to throw an +** error indicating that a string or BLOB is too long to represent. +** +** ^The sqlite3_result_error_nomem() interface causes SQLite to throw an +** error indicating that a memory allocation failed. +** +** ^The sqlite3_result_int() interface sets the return value +** of the application-defined function to be the 32-bit signed integer +** value given in the 2nd argument. +** ^The sqlite3_result_int64() interface sets the return value +** of the application-defined function to be the 64-bit signed integer +** value given in the 2nd argument. +** +** ^The sqlite3_result_null() interface sets the return value +** of the application-defined function to be NULL. +** +** ^The sqlite3_result_text(), sqlite3_result_text16(), +** sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces +** set the return value of the application-defined function to be +** a text string which is represented as UTF-8, UTF-16 native byte order, +** UTF-16 little endian, or UTF-16 big endian, respectively. +** ^The sqlite3_result_text64() interface sets the return value of an +** application-defined function to be a text string in an encoding +** specified by the fifth (and last) parameter, which must be one +** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE]. +** ^SQLite takes the text result from the application from +** the 2nd parameter of the sqlite3_result_text* interfaces. +** ^If the 3rd parameter to the sqlite3_result_text* interfaces +** is negative, then SQLite takes result text from the 2nd parameter +** through the first zero character. +** ^If the 3rd parameter to the sqlite3_result_text* interfaces +** is non-negative, then as many bytes (not characters) of the text +** pointed to by the 2nd parameter are taken as the application-defined +** function result. If the 3rd parameter is non-negative, then it +** must be the byte offset into the string where the NUL terminator would +** appear if the string where NUL terminated. If any NUL characters occur +** in the string at a byte offset that is less than the value of the 3rd +** parameter, then the resulting string will contain embedded NULs and the +** result of expressions operating on strings with embedded NULs is undefined. +** ^If the 4th parameter to the sqlite3_result_text* interfaces +** or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that +** function as the destructor on the text or BLOB result when it has +** finished using that result. +** ^If the 4th parameter to the sqlite3_result_text* interfaces or to +** sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite +** assumes that the text or BLOB result is in constant space and does not +** copy the content of the parameter nor call a destructor on the content +** when it has finished using that result. +** ^If the 4th parameter to the sqlite3_result_text* interfaces +** or sqlite3_result_blob is the special constant SQLITE_TRANSIENT +** then SQLite makes a copy of the result into space obtained from +** from [sqlite3_malloc()] before it returns. +** +** ^The sqlite3_result_value() interface sets the result of +** the application-defined function to be a copy of the +** [unprotected sqlite3_value] object specified by the 2nd parameter. ^The +** sqlite3_result_value() interface makes a copy of the [sqlite3_value] +** so that the [sqlite3_value] specified in the parameter may change or +** be deallocated after sqlite3_result_value() returns without harm. +** ^A [protected sqlite3_value] object may always be used where an +** [unprotected sqlite3_value] object is required, so either +** kind of [sqlite3_value] object can be used with this interface. +** +** If these routines are called from within the different thread +** than the one containing the application-defined function that received +** the [sqlite3_context] pointer, the results are undefined. +*/ +SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*)); +SQLITE_API void sqlite3_result_blob64(sqlite3_context*,const void*, + sqlite3_uint64,void(*)(void*)); +SQLITE_API void sqlite3_result_double(sqlite3_context*, double); +SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int); +SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int); +SQLITE_API void sqlite3_result_error_toobig(sqlite3_context*); +SQLITE_API void sqlite3_result_error_nomem(sqlite3_context*); +SQLITE_API void sqlite3_result_error_code(sqlite3_context*, int); +SQLITE_API void sqlite3_result_int(sqlite3_context*, int); +SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64); +SQLITE_API void sqlite3_result_null(sqlite3_context*); +SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*)); +SQLITE_API void sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64, + void(*)(void*), unsigned char encoding); +SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*)); +SQLITE_API void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*)); +SQLITE_API void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*)); +SQLITE_API void sqlite3_result_value(sqlite3_context*, sqlite3_value*); +SQLITE_API void sqlite3_result_zeroblob(sqlite3_context*, int n); +SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n); + + +/* +** CAPI3REF: Setting The Subtype Of An SQL Function +** METHOD: sqlite3_context +** +** The sqlite3_result_subtype(C,T) function causes the subtype of +** the result from the [application-defined SQL function] with +** [sqlite3_context] C to be the value T. Only the lower 8 bits +** of the subtype T are preserved in current versions of SQLite; +** higher order bits are discarded. +** The number of subtype bytes preserved by SQLite might increase +** in future releases of SQLite. +*/ +SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int); + +/* +** CAPI3REF: Define New Collating Sequences +** METHOD: sqlite3 +** +** ^These functions add, remove, or modify a [collation] associated +** with the [database connection] specified as the first argument. +** +** ^The name of the collation is a UTF-8 string +** for sqlite3_create_collation() and sqlite3_create_collation_v2() +** and a UTF-16 string in native byte order for sqlite3_create_collation16(). +** ^Collation names that compare equal according to [sqlite3_strnicmp()] are +** considered to be the same name. +** +** ^(The third argument (eTextRep) must be one of the constants: +**
    +**
  • [SQLITE_UTF8], +**
  • [SQLITE_UTF16LE], +**
  • [SQLITE_UTF16BE], +**
  • [SQLITE_UTF16], or +**
  • [SQLITE_UTF16_ALIGNED]. +**
)^ +** ^The eTextRep argument determines the encoding of strings passed +** to the collating function callback, xCallback. +** ^The [SQLITE_UTF16] and [SQLITE_UTF16_ALIGNED] values for eTextRep +** force strings to be UTF16 with native byte order. +** ^The [SQLITE_UTF16_ALIGNED] value for eTextRep forces strings to begin +** on an even byte address. +** +** ^The fourth argument, pArg, is an application data pointer that is passed +** through as the first argument to the collating function callback. +** +** ^The fifth argument, xCallback, is a pointer to the collating function. +** ^Multiple collating functions can be registered using the same name but +** with different eTextRep parameters and SQLite will use whichever +** function requires the least amount of data transformation. +** ^If the xCallback argument is NULL then the collating function is +** deleted. ^When all collating functions having the same name are deleted, +** that collation is no longer usable. +** +** ^The collating function callback is invoked with a copy of the pArg +** application data pointer and with two strings in the encoding specified +** by the eTextRep argument. The collating function must return an +** integer that is negative, zero, or positive +** if the first string is less than, equal to, or greater than the second, +** respectively. A collating function must always return the same answer +** given the same inputs. If two or more collating functions are registered +** to the same collation name (using different eTextRep values) then all +** must give an equivalent answer when invoked with equivalent strings. +** The collating function must obey the following properties for all +** strings A, B, and C: +** +**
    +**
  1. If A==B then B==A. +**
  2. If A==B and B==C then A==C. +**
  3. If A<B THEN B>A. +**
  4. If A<B and B<C then A<C. +**
+** +** If a collating function fails any of the above constraints and that +** collating function is registered and used, then the behavior of SQLite +** is undefined. +** +** ^The sqlite3_create_collation_v2() works like sqlite3_create_collation() +** with the addition that the xDestroy callback is invoked on pArg when +** the collating function is deleted. +** ^Collating functions are deleted when they are overridden by later +** calls to the collation creation functions or when the +** [database connection] is closed using [sqlite3_close()]. +** +** ^The xDestroy callback is not called if the +** sqlite3_create_collation_v2() function fails. Applications that invoke +** sqlite3_create_collation_v2() with a non-NULL xDestroy argument should +** check the return code and dispose of the application data pointer +** themselves rather than expecting SQLite to deal with it for them. +** This is different from every other SQLite interface. The inconsistency +** is unfortunate but cannot be changed without breaking backwards +** compatibility. +** +** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()]. +*/ +SQLITE_API int sqlite3_create_collation( + sqlite3*, + const char *zName, + int eTextRep, + void *pArg, + int(*xCompare)(void*,int,const void*,int,const void*) +); +SQLITE_API int sqlite3_create_collation_v2( + sqlite3*, + const char *zName, + int eTextRep, + void *pArg, + int(*xCompare)(void*,int,const void*,int,const void*), + void(*xDestroy)(void*) +); +SQLITE_API int sqlite3_create_collation16( + sqlite3*, + const void *zName, + int eTextRep, + void *pArg, + int(*xCompare)(void*,int,const void*,int,const void*) +); + +/* +** CAPI3REF: Collation Needed Callbacks +** METHOD: sqlite3 +** +** ^To avoid having to register all collation sequences before a database +** can be used, a single callback function may be registered with the +** [database connection] to be invoked whenever an undefined collation +** sequence is required. +** +** ^If the function is registered using the sqlite3_collation_needed() API, +** then it is passed the names of undefined collation sequences as strings +** encoded in UTF-8. ^If sqlite3_collation_needed16() is used, +** the names are passed as UTF-16 in machine native byte order. +** ^A call to either function replaces the existing collation-needed callback. +** +** ^(When the callback is invoked, the first argument passed is a copy +** of the second argument to sqlite3_collation_needed() or +** sqlite3_collation_needed16(). The second argument is the database +** connection. The third argument is one of [SQLITE_UTF8], [SQLITE_UTF16BE], +** or [SQLITE_UTF16LE], indicating the most desirable form of the collation +** sequence function required. The fourth parameter is the name of the +** required collation sequence.)^ +** +** The callback function should register the desired collation using +** [sqlite3_create_collation()], [sqlite3_create_collation16()], or +** [sqlite3_create_collation_v2()]. +*/ +SQLITE_API int sqlite3_collation_needed( + sqlite3*, + void*, + void(*)(void*,sqlite3*,int eTextRep,const char*) +); +SQLITE_API int sqlite3_collation_needed16( + sqlite3*, + void*, + void(*)(void*,sqlite3*,int eTextRep,const void*) +); + +#ifdef SQLITE_HAS_CODEC +/* +** Specify the key for an encrypted database. This routine should be +** called right after sqlite3_open(). +** +** The code to implement this API is not available in the public release +** of SQLite. +*/ +SQLITE_API int sqlite3_key( + sqlite3 *db, /* Database to be rekeyed */ + const void *pKey, int nKey /* The key */ +); +SQLITE_API int sqlite3_key_v2( + sqlite3 *db, /* Database to be rekeyed */ + const char *zDbName, /* Name of the database */ + const void *pKey, int nKey /* The key */ +); + +/* +** Change the key on an open database. If the current database is not +** encrypted, this routine will encrypt it. If pNew==0 or nNew==0, the +** database is decrypted. +** +** The code to implement this API is not available in the public release +** of SQLite. +*/ +SQLITE_API int sqlite3_rekey( + sqlite3 *db, /* Database to be rekeyed */ + const void *pKey, int nKey /* The new key */ +); +SQLITE_API int sqlite3_rekey_v2( + sqlite3 *db, /* Database to be rekeyed */ + const char *zDbName, /* Name of the database */ + const void *pKey, int nKey /* The new key */ +); + +/* +** Specify the activation key for a SEE database. Unless +** activated, none of the SEE routines will work. +*/ +SQLITE_API void sqlite3_activate_see( + const char *zPassPhrase /* Activation phrase */ +); +#endif + +#ifdef SQLITE_ENABLE_CEROD +/* +** Specify the activation key for a CEROD database. Unless +** activated, none of the CEROD routines will work. +*/ +SQLITE_API void sqlite3_activate_cerod( + const char *zPassPhrase /* Activation phrase */ +); +#endif + +/* +** CAPI3REF: Suspend Execution For A Short Time +** +** The sqlite3_sleep() function causes the current thread to suspend execution +** for at least a number of milliseconds specified in its parameter. +** +** If the operating system does not support sleep requests with +** millisecond time resolution, then the time will be rounded up to +** the nearest second. The number of milliseconds of sleep actually +** requested from the operating system is returned. +** +** ^SQLite implements this interface by calling the xSleep() +** method of the default [sqlite3_vfs] object. If the xSleep() method +** of the default VFS is not implemented correctly, or not implemented at +** all, then the behavior of sqlite3_sleep() may deviate from the description +** in the previous paragraphs. +*/ +SQLITE_API int sqlite3_sleep(int); + +/* +** CAPI3REF: Name Of The Folder Holding Temporary Files +** +** ^(If this global variable is made to point to a string which is +** the name of a folder (a.k.a. directory), then all temporary files +** created by SQLite when using a built-in [sqlite3_vfs | VFS] +** will be placed in that directory.)^ ^If this variable +** is a NULL pointer, then SQLite performs a search for an appropriate +** temporary file directory. +** +** Applications are strongly discouraged from using this global variable. +** It is required to set a temporary folder on Windows Runtime (WinRT). +** But for all other platforms, it is highly recommended that applications +** neither read nor write this variable. This global variable is a relic +** that exists for backwards compatibility of legacy applications and should +** be avoided in new projects. +** +** It is not safe to read or modify this variable in more than one +** thread at a time. It is not safe to read or modify this variable +** if a [database connection] is being used at the same time in a separate +** thread. +** It is intended that this variable be set once +** as part of process initialization and before any SQLite interface +** routines have been called and that this variable remain unchanged +** thereafter. +** +** ^The [temp_store_directory pragma] may modify this variable and cause +** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore, +** the [temp_store_directory pragma] always assumes that any string +** that this variable points to is held in memory obtained from +** [sqlite3_malloc] and the pragma may attempt to free that memory +** using [sqlite3_free]. +** Hence, if this variable is modified directly, either it should be +** made NULL or made to point to memory obtained from [sqlite3_malloc] +** or else the use of the [temp_store_directory pragma] should be avoided. +** Except when requested by the [temp_store_directory pragma], SQLite +** does not free the memory that sqlite3_temp_directory points to. If +** the application wants that memory to be freed, it must do +** so itself, taking care to only do so after all [database connection] +** objects have been destroyed. +** +** Note to Windows Runtime users: The temporary directory must be set +** prior to calling [sqlite3_open] or [sqlite3_open_v2]. Otherwise, various +** features that require the use of temporary files may fail. Here is an +** example of how to do this using C++ with the Windows Runtime: +** +**
+** LPCWSTR zPath = Windows::Storage::ApplicationData::Current->
+**       TemporaryFolder->Path->Data();
+** char zPathBuf[MAX_PATH + 1];
+** memset(zPathBuf, 0, sizeof(zPathBuf));
+** WideCharToMultiByte(CP_UTF8, 0, zPath, -1, zPathBuf, sizeof(zPathBuf),
+**       NULL, NULL);
+** sqlite3_temp_directory = sqlite3_mprintf("%s", zPathBuf);
+** 
+*/ +SQLITE_API char *sqlite3_temp_directory; + +/* +** CAPI3REF: Name Of The Folder Holding Database Files +** +** ^(If this global variable is made to point to a string which is +** the name of a folder (a.k.a. directory), then all database files +** specified with a relative pathname and created or accessed by +** SQLite when using a built-in windows [sqlite3_vfs | VFS] will be assumed +** to be relative to that directory.)^ ^If this variable is a NULL +** pointer, then SQLite assumes that all database files specified +** with a relative pathname are relative to the current directory +** for the process. Only the windows VFS makes use of this global +** variable; it is ignored by the unix VFS. +** +** Changing the value of this variable while a database connection is +** open can result in a corrupt database. +** +** It is not safe to read or modify this variable in more than one +** thread at a time. It is not safe to read or modify this variable +** if a [database connection] is being used at the same time in a separate +** thread. +** It is intended that this variable be set once +** as part of process initialization and before any SQLite interface +** routines have been called and that this variable remain unchanged +** thereafter. +** +** ^The [data_store_directory pragma] may modify this variable and cause +** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore, +** the [data_store_directory pragma] always assumes that any string +** that this variable points to is held in memory obtained from +** [sqlite3_malloc] and the pragma may attempt to free that memory +** using [sqlite3_free]. +** Hence, if this variable is modified directly, either it should be +** made NULL or made to point to memory obtained from [sqlite3_malloc] +** or else the use of the [data_store_directory pragma] should be avoided. +*/ +SQLITE_API char *sqlite3_data_directory; + +/* +** CAPI3REF: Test For Auto-Commit Mode +** KEYWORDS: {autocommit mode} +** METHOD: sqlite3 +** +** ^The sqlite3_get_autocommit() interface returns non-zero or +** zero if the given database connection is or is not in autocommit mode, +** respectively. ^Autocommit mode is on by default. +** ^Autocommit mode is disabled by a [BEGIN] statement. +** ^Autocommit mode is re-enabled by a [COMMIT] or [ROLLBACK]. +** +** If certain kinds of errors occur on a statement within a multi-statement +** transaction (errors including [SQLITE_FULL], [SQLITE_IOERR], +** [SQLITE_NOMEM], [SQLITE_BUSY], and [SQLITE_INTERRUPT]) then the +** transaction might be rolled back automatically. The only way to +** find out whether SQLite automatically rolled back the transaction after +** an error is to use this function. +** +** If another thread changes the autocommit status of the database +** connection while this routine is running, then the return value +** is undefined. +*/ +SQLITE_API int sqlite3_get_autocommit(sqlite3*); + +/* +** CAPI3REF: Find The Database Handle Of A Prepared Statement +** METHOD: sqlite3_stmt +** +** ^The sqlite3_db_handle interface returns the [database connection] handle +** to which a [prepared statement] belongs. ^The [database connection] +** returned by sqlite3_db_handle is the same [database connection] +** that was the first argument +** to the [sqlite3_prepare_v2()] call (or its variants) that was used to +** create the statement in the first place. +*/ +SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*); + +/* +** CAPI3REF: Return The Filename For A Database Connection +** METHOD: sqlite3 +** +** ^The sqlite3_db_filename(D,N) interface returns a pointer to a filename +** associated with database N of connection D. ^The main database file +** has the name "main". If there is no attached database N on the database +** connection D, or if database N is a temporary or in-memory database, then +** a NULL pointer is returned. +** +** ^The filename returned by this function is the output of the +** xFullPathname method of the [VFS]. ^In other words, the filename +** will be an absolute pathname, even if the filename used +** to open the database originally was a URI or relative pathname. +*/ +SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName); + +/* +** CAPI3REF: Determine if a database is read-only +** METHOD: sqlite3 +** +** ^The sqlite3_db_readonly(D,N) interface returns 1 if the database N +** of connection D is read-only, 0 if it is read/write, or -1 if N is not +** the name of a database on connection D. +*/ +SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName); + +/* +** CAPI3REF: Find the next prepared statement +** METHOD: sqlite3 +** +** ^This interface returns a pointer to the next [prepared statement] after +** pStmt associated with the [database connection] pDb. ^If pStmt is NULL +** then this interface returns a pointer to the first prepared statement +** associated with the database connection pDb. ^If no prepared statement +** satisfies the conditions of this routine, it returns NULL. +** +** The [database connection] pointer D in a call to +** [sqlite3_next_stmt(D,S)] must refer to an open database +** connection and in particular must not be a NULL pointer. +*/ +SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Commit And Rollback Notification Callbacks +** METHOD: sqlite3 +** +** ^The sqlite3_commit_hook() interface registers a callback +** function to be invoked whenever a transaction is [COMMIT | committed]. +** ^Any callback set by a previous call to sqlite3_commit_hook() +** for the same database connection is overridden. +** ^The sqlite3_rollback_hook() interface registers a callback +** function to be invoked whenever a transaction is [ROLLBACK | rolled back]. +** ^Any callback set by a previous call to sqlite3_rollback_hook() +** for the same database connection is overridden. +** ^The pArg argument is passed through to the callback. +** ^If the callback on a commit hook function returns non-zero, +** then the commit is converted into a rollback. +** +** ^The sqlite3_commit_hook(D,C,P) and sqlite3_rollback_hook(D,C,P) functions +** return the P argument from the previous call of the same function +** on the same [database connection] D, or NULL for +** the first call for each function on D. +** +** The commit and rollback hook callbacks are not reentrant. +** The callback implementation must not do anything that will modify +** the database connection that invoked the callback. Any actions +** to modify the database connection must be deferred until after the +** completion of the [sqlite3_step()] call that triggered the commit +** or rollback hook in the first place. +** Note that running any other SQL statements, including SELECT statements, +** or merely calling [sqlite3_prepare_v2()] and [sqlite3_step()] will modify +** the database connections for the meaning of "modify" in this paragraph. +** +** ^Registering a NULL function disables the callback. +** +** ^When the commit hook callback routine returns zero, the [COMMIT] +** operation is allowed to continue normally. ^If the commit hook +** returns non-zero, then the [COMMIT] is converted into a [ROLLBACK]. +** ^The rollback hook is invoked on a rollback that results from a commit +** hook returning non-zero, just as it would be with any other rollback. +** +** ^For the purposes of this API, a transaction is said to have been +** rolled back if an explicit "ROLLBACK" statement is executed, or +** an error or constraint causes an implicit rollback to occur. +** ^The rollback callback is not invoked if a transaction is +** automatically rolled back because the database connection is closed. +** +** See also the [sqlite3_update_hook()] interface. +*/ +SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*); +SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); + +/* +** CAPI3REF: Data Change Notification Callbacks +** METHOD: sqlite3 +** +** ^The sqlite3_update_hook() interface registers a callback function +** with the [database connection] identified by the first argument +** to be invoked whenever a row is updated, inserted or deleted in +** a [rowid table]. +** ^Any callback set by a previous call to this function +** for the same database connection is overridden. +** +** ^The second argument is a pointer to the function to invoke when a +** row is updated, inserted or deleted in a rowid table. +** ^The first argument to the callback is a copy of the third argument +** to sqlite3_update_hook(). +** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE], +** or [SQLITE_UPDATE], depending on the operation that caused the callback +** to be invoked. +** ^The third and fourth arguments to the callback contain pointers to the +** database and table name containing the affected row. +** ^The final callback parameter is the [rowid] of the row. +** ^In the case of an update, this is the [rowid] after the update takes place. +** +** ^(The update hook is not invoked when internal system tables are +** modified (i.e. sqlite_master and sqlite_sequence).)^ +** ^The update hook is not invoked when [WITHOUT ROWID] tables are modified. +** +** ^In the current implementation, the update hook +** is not invoked when conflicting rows are deleted because of an +** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook +** invoked when rows are deleted using the [truncate optimization]. +** The exceptions defined in this paragraph might change in a future +** release of SQLite. +** +** The update hook implementation must not do anything that will modify +** the database connection that invoked the update hook. Any actions +** to modify the database connection must be deferred until after the +** completion of the [sqlite3_step()] call that triggered the update hook. +** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their +** database connections for the meaning of "modify" in this paragraph. +** +** ^The sqlite3_update_hook(D,C,P) function +** returns the P argument from the previous call +** on the same [database connection] D, or NULL for +** the first call on D. +** +** See also the [sqlite3_commit_hook()], [sqlite3_rollback_hook()], +** and [sqlite3_preupdate_hook()] interfaces. +*/ +SQLITE_API void *sqlite3_update_hook( + sqlite3*, + void(*)(void *,int ,char const *,char const *,sqlite3_int64), + void* +); + +/* +** CAPI3REF: Enable Or Disable Shared Pager Cache +** +** ^(This routine enables or disables the sharing of the database cache +** and schema data structures between [database connection | connections] +** to the same database. Sharing is enabled if the argument is true +** and disabled if the argument is false.)^ +** +** ^Cache sharing is enabled and disabled for an entire process. +** This is a change as of SQLite [version 3.5.0] ([dateof:3.5.0]). +** In prior versions of SQLite, +** sharing was enabled or disabled for each thread separately. +** +** ^(The cache sharing mode set by this interface effects all subsequent +** calls to [sqlite3_open()], [sqlite3_open_v2()], and [sqlite3_open16()]. +** Existing database connections continue use the sharing mode +** that was in effect at the time they were opened.)^ +** +** ^(This routine returns [SQLITE_OK] if shared cache was enabled or disabled +** successfully. An [error code] is returned otherwise.)^ +** +** ^Shared cache is disabled by default. But this might change in +** future releases of SQLite. Applications that care about shared +** cache setting should set it explicitly. +** +** Note: This method is disabled on MacOS X 10.7 and iOS version 5.0 +** and will always return SQLITE_MISUSE. On those systems, +** shared cache mode should be enabled per-database connection via +** [sqlite3_open_v2()] with [SQLITE_OPEN_SHAREDCACHE]. +** +** This interface is threadsafe on processors where writing a +** 32-bit integer is atomic. +** +** See Also: [SQLite Shared-Cache Mode] +*/ +SQLITE_API int sqlite3_enable_shared_cache(int); + +/* +** CAPI3REF: Attempt To Free Heap Memory +** +** ^The sqlite3_release_memory() interface attempts to free N bytes +** of heap memory by deallocating non-essential memory allocations +** held by the database library. Memory used to cache database +** pages to improve performance is an example of non-essential memory. +** ^sqlite3_release_memory() returns the number of bytes actually freed, +** which might be more or less than the amount requested. +** ^The sqlite3_release_memory() routine is a no-op returning zero +** if SQLite is not compiled with [SQLITE_ENABLE_MEMORY_MANAGEMENT]. +** +** See also: [sqlite3_db_release_memory()] +*/ +SQLITE_API int sqlite3_release_memory(int); + +/* +** CAPI3REF: Free Memory Used By A Database Connection +** METHOD: sqlite3 +** +** ^The sqlite3_db_release_memory(D) interface attempts to free as much heap +** memory as possible from database connection D. Unlike the +** [sqlite3_release_memory()] interface, this interface is in effect even +** when the [SQLITE_ENABLE_MEMORY_MANAGEMENT] compile-time option is +** omitted. +** +** See also: [sqlite3_release_memory()] +*/ +SQLITE_API int sqlite3_db_release_memory(sqlite3*); + +/* +** CAPI3REF: Impose A Limit On Heap Size +** +** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the +** soft limit on the amount of heap memory that may be allocated by SQLite. +** ^SQLite strives to keep heap memory utilization below the soft heap +** limit by reducing the number of pages held in the page cache +** as heap memory usages approaches the limit. +** ^The soft heap limit is "soft" because even though SQLite strives to stay +** below the limit, it will exceed the limit rather than generate +** an [SQLITE_NOMEM] error. In other words, the soft heap limit +** is advisory only. +** +** ^The return value from sqlite3_soft_heap_limit64() is the size of +** the soft heap limit prior to the call, or negative in the case of an +** error. ^If the argument N is negative +** then no change is made to the soft heap limit. Hence, the current +** size of the soft heap limit can be determined by invoking +** sqlite3_soft_heap_limit64() with a negative argument. +** +** ^If the argument N is zero then the soft heap limit is disabled. +** +** ^(The soft heap limit is not enforced in the current implementation +** if one or more of following conditions are true: +** +**
    +**
  • The soft heap limit is set to zero. +**
  • Memory accounting is disabled using a combination of the +** [sqlite3_config]([SQLITE_CONFIG_MEMSTATUS],...) start-time option and +** the [SQLITE_DEFAULT_MEMSTATUS] compile-time option. +**
  • An alternative page cache implementation is specified using +** [sqlite3_config]([SQLITE_CONFIG_PCACHE2],...). +**
  • The page cache allocates from its own memory pool supplied +** by [sqlite3_config]([SQLITE_CONFIG_PAGECACHE],...) rather than +** from the heap. +**
)^ +** +** Beginning with SQLite [version 3.7.3] ([dateof:3.7.3]), +** the soft heap limit is enforced +** regardless of whether or not the [SQLITE_ENABLE_MEMORY_MANAGEMENT] +** compile-time option is invoked. With [SQLITE_ENABLE_MEMORY_MANAGEMENT], +** the soft heap limit is enforced on every memory allocation. Without +** [SQLITE_ENABLE_MEMORY_MANAGEMENT], the soft heap limit is only enforced +** when memory is allocated by the page cache. Testing suggests that because +** the page cache is the predominate memory user in SQLite, most +** applications will achieve adequate soft heap limit enforcement without +** the use of [SQLITE_ENABLE_MEMORY_MANAGEMENT]. +** +** The circumstances under which SQLite will enforce the soft heap limit may +** changes in future releases of SQLite. +*/ +SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N); + +/* +** CAPI3REF: Deprecated Soft Heap Limit Interface +** DEPRECATED +** +** This is a deprecated version of the [sqlite3_soft_heap_limit64()] +** interface. This routine is provided for historical compatibility +** only. All new applications should use the +** [sqlite3_soft_heap_limit64()] interface rather than this one. +*/ +SQLITE_API SQLITE_DEPRECATED void sqlite3_soft_heap_limit(int N); + + +/* +** CAPI3REF: Extract Metadata About A Column Of A Table +** METHOD: sqlite3 +** +** ^(The sqlite3_table_column_metadata(X,D,T,C,....) routine returns +** information about column C of table T in database D +** on [database connection] X.)^ ^The sqlite3_table_column_metadata() +** interface returns SQLITE_OK and fills in the non-NULL pointers in +** the final five arguments with appropriate values if the specified +** column exists. ^The sqlite3_table_column_metadata() interface returns +** SQLITE_ERROR and if the specified column does not exist. +** ^If the column-name parameter to sqlite3_table_column_metadata() is a +** NULL pointer, then this routine simply checks for the existence of the +** table and returns SQLITE_OK if the table exists and SQLITE_ERROR if it +** does not. +** +** ^The column is identified by the second, third and fourth parameters to +** this function. ^(The second parameter is either the name of the database +** (i.e. "main", "temp", or an attached database) containing the specified +** table or NULL.)^ ^If it is NULL, then all attached databases are searched +** for the table using the same algorithm used by the database engine to +** resolve unqualified table references. +** +** ^The third and fourth parameters to this function are the table and column +** name of the desired column, respectively. +** +** ^Metadata is returned by writing to the memory locations passed as the 5th +** and subsequent parameters to this function. ^Any of these arguments may be +** NULL, in which case the corresponding element of metadata is omitted. +** +** ^(
+** +**
Parameter Output
Type
Description +** +**
5th const char* Data type +**
6th const char* Name of default collation sequence +**
7th int True if column has a NOT NULL constraint +**
8th int True if column is part of the PRIMARY KEY +**
9th int True if column is [AUTOINCREMENT] +**
+**
)^ +** +** ^The memory pointed to by the character pointers returned for the +** declaration type and collation sequence is valid until the next +** call to any SQLite API function. +** +** ^If the specified table is actually a view, an [error code] is returned. +** +** ^If the specified column is "rowid", "oid" or "_rowid_" and the table +** is not a [WITHOUT ROWID] table and an +** [INTEGER PRIMARY KEY] column has been explicitly declared, then the output +** parameters are set for the explicitly declared column. ^(If there is no +** [INTEGER PRIMARY KEY] column, then the outputs +** for the [rowid] are set as follows: +** +**
+**     data type: "INTEGER"
+**     collation sequence: "BINARY"
+**     not null: 0
+**     primary key: 1
+**     auto increment: 0
+** 
)^ +** +** ^This function causes all database schemas to be read from disk and +** parsed, if that has not already been done, and returns an error if +** any errors are encountered while loading the schema. +*/ +SQLITE_API int sqlite3_table_column_metadata( + sqlite3 *db, /* Connection handle */ + const char *zDbName, /* Database name or NULL */ + const char *zTableName, /* Table name */ + const char *zColumnName, /* Column name */ + char const **pzDataType, /* OUTPUT: Declared data type */ + char const **pzCollSeq, /* OUTPUT: Collation sequence name */ + int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ + int *pPrimaryKey, /* OUTPUT: True if column part of PK */ + int *pAutoinc /* OUTPUT: True if column is auto-increment */ +); + +/* +** CAPI3REF: Load An Extension +** METHOD: sqlite3 +** +** ^This interface loads an SQLite extension library from the named file. +** +** ^The sqlite3_load_extension() interface attempts to load an +** [SQLite extension] library contained in the file zFile. If +** the file cannot be loaded directly, attempts are made to load +** with various operating-system specific extensions added. +** So for example, if "samplelib" cannot be loaded, then names like +** "samplelib.so" or "samplelib.dylib" or "samplelib.dll" might +** be tried also. +** +** ^The entry point is zProc. +** ^(zProc may be 0, in which case SQLite will try to come up with an +** entry point name on its own. It first tries "sqlite3_extension_init". +** If that does not work, it constructs a name "sqlite3_X_init" where the +** X is consists of the lower-case equivalent of all ASCII alphabetic +** characters in the filename from the last "/" to the first following +** "." and omitting any initial "lib".)^ +** ^The sqlite3_load_extension() interface returns +** [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong. +** ^If an error occurs and pzErrMsg is not 0, then the +** [sqlite3_load_extension()] interface shall attempt to +** fill *pzErrMsg with error message text stored in memory +** obtained from [sqlite3_malloc()]. The calling function +** should free this memory by calling [sqlite3_free()]. +** +** ^Extension loading must be enabled using +** [sqlite3_enable_load_extension()] or +** [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],1,NULL) +** prior to calling this API, +** otherwise an error will be returned. +** +** Security warning: It is recommended that the +** [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method be used to enable only this +** interface. The use of the [sqlite3_enable_load_extension()] interface +** should be avoided. This will keep the SQL function [load_extension()] +** disabled and prevent SQL injections from giving attackers +** access to extension loading capabilities. +** +** See also the [load_extension() SQL function]. +*/ +SQLITE_API int sqlite3_load_extension( + sqlite3 *db, /* Load the extension into this database connection */ + const char *zFile, /* Name of the shared library containing extension */ + const char *zProc, /* Entry point. Derived from zFile if 0 */ + char **pzErrMsg /* Put error message here if not 0 */ +); + +/* +** CAPI3REF: Enable Or Disable Extension Loading +** METHOD: sqlite3 +** +** ^So as not to open security holes in older applications that are +** unprepared to deal with [extension loading], and as a means of disabling +** [extension loading] while evaluating user-entered SQL, the following API +** is provided to turn the [sqlite3_load_extension()] mechanism on and off. +** +** ^Extension loading is off by default. +** ^Call the sqlite3_enable_load_extension() routine with onoff==1 +** to turn extension loading on and call it with onoff==0 to turn +** it back off again. +** +** ^This interface enables or disables both the C-API +** [sqlite3_load_extension()] and the SQL function [load_extension()]. +** ^(Use [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],..) +** to enable or disable only the C-API.)^ +** +** Security warning: It is recommended that extension loading +** be disabled using the [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method +** rather than this interface, so the [load_extension()] SQL function +** remains disabled. This will prevent SQL injections from giving attackers +** access to extension loading capabilities. +*/ +SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff); + +/* +** CAPI3REF: Automatically Load Statically Linked Extensions +** +** ^This interface causes the xEntryPoint() function to be invoked for +** each new [database connection] that is created. The idea here is that +** xEntryPoint() is the entry point for a statically linked [SQLite extension] +** that is to be automatically loaded into all new database connections. +** +** ^(Even though the function prototype shows that xEntryPoint() takes +** no arguments and returns void, SQLite invokes xEntryPoint() with three +** arguments and expects an integer result as if the signature of the +** entry point where as follows: +** +**
+**    int xEntryPoint(
+**      sqlite3 *db,
+**      const char **pzErrMsg,
+**      const struct sqlite3_api_routines *pThunk
+**    );
+** 
)^ +** +** If the xEntryPoint routine encounters an error, it should make *pzErrMsg +** point to an appropriate error message (obtained from [sqlite3_mprintf()]) +** and return an appropriate [error code]. ^SQLite ensures that *pzErrMsg +** is NULL before calling the xEntryPoint(). ^SQLite will invoke +** [sqlite3_free()] on *pzErrMsg after xEntryPoint() returns. ^If any +** xEntryPoint() returns an error, the [sqlite3_open()], [sqlite3_open16()], +** or [sqlite3_open_v2()] call that provoked the xEntryPoint() will fail. +** +** ^Calling sqlite3_auto_extension(X) with an entry point X that is already +** on the list of automatic extensions is a harmless no-op. ^No entry point +** will be called more than once for each database connection that is opened. +** +** See also: [sqlite3_reset_auto_extension()] +** and [sqlite3_cancel_auto_extension()] +*/ +SQLITE_API int sqlite3_auto_extension(void(*xEntryPoint)(void)); + +/* +** CAPI3REF: Cancel Automatic Extension Loading +** +** ^The [sqlite3_cancel_auto_extension(X)] interface unregisters the +** initialization routine X that was registered using a prior call to +** [sqlite3_auto_extension(X)]. ^The [sqlite3_cancel_auto_extension(X)] +** routine returns 1 if initialization routine X was successfully +** unregistered and it returns 0 if X was not on the list of initialization +** routines. +*/ +SQLITE_API int sqlite3_cancel_auto_extension(void(*xEntryPoint)(void)); + +/* +** CAPI3REF: Reset Automatic Extension Loading +** +** ^This interface disables all automatic extensions previously +** registered using [sqlite3_auto_extension()]. +*/ +SQLITE_API void sqlite3_reset_auto_extension(void); + +/* +** The interface to the virtual-table mechanism is currently considered +** to be experimental. The interface might change in incompatible ways. +** If this is a problem for you, do not use the interface at this time. +** +** When the virtual-table mechanism stabilizes, we will declare the +** interface fixed, support it indefinitely, and remove this comment. +*/ + +/* +** Structures used by the virtual table interface +*/ +typedef struct sqlite3_vtab sqlite3_vtab; +typedef struct sqlite3_index_info sqlite3_index_info; +typedef struct sqlite3_vtab_cursor sqlite3_vtab_cursor; +typedef struct sqlite3_module sqlite3_module; + +/* +** CAPI3REF: Virtual Table Object +** KEYWORDS: sqlite3_module {virtual table module} +** +** This structure, sometimes called a "virtual table module", +** defines the implementation of a [virtual tables]. +** This structure consists mostly of methods for the module. +** +** ^A virtual table module is created by filling in a persistent +** instance of this structure and passing a pointer to that instance +** to [sqlite3_create_module()] or [sqlite3_create_module_v2()]. +** ^The registration remains valid until it is replaced by a different +** module or until the [database connection] closes. The content +** of this structure must not change while it is registered with +** any database connection. +*/ +struct sqlite3_module { + int iVersion; + int (*xCreate)(sqlite3*, void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, char**); + int (*xConnect)(sqlite3*, void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, char**); + int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*); + int (*xDisconnect)(sqlite3_vtab *pVTab); + int (*xDestroy)(sqlite3_vtab *pVTab); + int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor); + int (*xClose)(sqlite3_vtab_cursor*); + int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr, + int argc, sqlite3_value **argv); + int (*xNext)(sqlite3_vtab_cursor*); + int (*xEof)(sqlite3_vtab_cursor*); + int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int); + int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid); + int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *); + int (*xBegin)(sqlite3_vtab *pVTab); + int (*xSync)(sqlite3_vtab *pVTab); + int (*xCommit)(sqlite3_vtab *pVTab); + int (*xRollback)(sqlite3_vtab *pVTab); + int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName, + void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), + void **ppArg); + int (*xRename)(sqlite3_vtab *pVtab, const char *zNew); + /* The methods above are in version 1 of the sqlite_module object. Those + ** below are for version 2 and greater. */ + int (*xSavepoint)(sqlite3_vtab *pVTab, int); + int (*xRelease)(sqlite3_vtab *pVTab, int); + int (*xRollbackTo)(sqlite3_vtab *pVTab, int); +}; + +/* +** CAPI3REF: Virtual Table Indexing Information +** KEYWORDS: sqlite3_index_info +** +** The sqlite3_index_info structure and its substructures is used as part +** of the [virtual table] interface to +** pass information into and receive the reply from the [xBestIndex] +** method of a [virtual table module]. The fields under **Inputs** are the +** inputs to xBestIndex and are read-only. xBestIndex inserts its +** results into the **Outputs** fields. +** +** ^(The aConstraint[] array records WHERE clause constraints of the form: +** +**
column OP expr
+** +** where OP is =, <, <=, >, or >=.)^ ^(The particular operator is +** stored in aConstraint[].op using one of the +** [SQLITE_INDEX_CONSTRAINT_EQ | SQLITE_INDEX_CONSTRAINT_ values].)^ +** ^(The index of the column is stored in +** aConstraint[].iColumn.)^ ^(aConstraint[].usable is TRUE if the +** expr on the right-hand side can be evaluated (and thus the constraint +** is usable) and false if it cannot.)^ +** +** ^The optimizer automatically inverts terms of the form "expr OP column" +** and makes other simplifications to the WHERE clause in an attempt to +** get as many WHERE clause terms into the form shown above as possible. +** ^The aConstraint[] array only reports WHERE clause terms that are +** relevant to the particular virtual table being queried. +** +** ^Information about the ORDER BY clause is stored in aOrderBy[]. +** ^Each term of aOrderBy records a column of the ORDER BY clause. +** +** The colUsed field indicates which columns of the virtual table may be +** required by the current scan. Virtual table columns are numbered from +** zero in the order in which they appear within the CREATE TABLE statement +** passed to sqlite3_declare_vtab(). For the first 63 columns (columns 0-62), +** the corresponding bit is set within the colUsed mask if the column may be +** required by SQLite. If the table has at least 64 columns and any column +** to the right of the first 63 is required, then bit 63 of colUsed is also +** set. In other words, column iCol may be required if the expression +** (colUsed & ((sqlite3_uint64)1 << (iCol>=63 ? 63 : iCol))) evaluates to +** non-zero. +** +** The [xBestIndex] method must fill aConstraintUsage[] with information +** about what parameters to pass to xFilter. ^If argvIndex>0 then +** the right-hand side of the corresponding aConstraint[] is evaluated +** and becomes the argvIndex-th entry in argv. ^(If aConstraintUsage[].omit +** is true, then the constraint is assumed to be fully handled by the +** virtual table and is not checked again by SQLite.)^ +** +** ^The idxNum and idxPtr values are recorded and passed into the +** [xFilter] method. +** ^[sqlite3_free()] is used to free idxPtr if and only if +** needToFreeIdxPtr is true. +** +** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in +** the correct order to satisfy the ORDER BY clause so that no separate +** sorting step is required. +** +** ^The estimatedCost value is an estimate of the cost of a particular +** strategy. A cost of N indicates that the cost of the strategy is similar +** to a linear scan of an SQLite table with N rows. A cost of log(N) +** indicates that the expense of the operation is similar to that of a +** binary search on a unique indexed field of an SQLite table with N rows. +** +** ^The estimatedRows value is an estimate of the number of rows that +** will be returned by the strategy. +** +** The xBestIndex method may optionally populate the idxFlags field with a +** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag - +** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite +** assumes that the strategy may visit at most one row. +** +** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then +** SQLite also assumes that if a call to the xUpdate() method is made as +** part of the same statement to delete or update a virtual table row and the +** implementation returns SQLITE_CONSTRAINT, then there is no need to rollback +** any database changes. In other words, if the xUpdate() returns +** SQLITE_CONSTRAINT, the database contents must be exactly as they were +** before xUpdate was called. By contrast, if SQLITE_INDEX_SCAN_UNIQUE is not +** set and xUpdate returns SQLITE_CONSTRAINT, any database changes made by +** the xUpdate method are automatically rolled back by SQLite. +** +** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info +** structure for SQLite [version 3.8.2] ([dateof:3.8.2]). +** If a virtual table extension is +** used with an SQLite version earlier than 3.8.2, the results of attempting +** to read or write the estimatedRows field are undefined (but are likely +** to included crashing the application). The estimatedRows field should +** therefore only be used if [sqlite3_libversion_number()] returns a +** value greater than or equal to 3008002. Similarly, the idxFlags field +** was added for [version 3.9.0] ([dateof:3.9.0]). +** It may therefore only be used if +** sqlite3_libversion_number() returns a value greater than or equal to +** 3009000. +*/ +struct sqlite3_index_info { + /* Inputs */ + int nConstraint; /* Number of entries in aConstraint */ + struct sqlite3_index_constraint { + int iColumn; /* Column constrained. -1 for ROWID */ + unsigned char op; /* Constraint operator */ + unsigned char usable; /* True if this constraint is usable */ + int iTermOffset; /* Used internally - xBestIndex should ignore */ + } *aConstraint; /* Table of WHERE clause constraints */ + int nOrderBy; /* Number of terms in the ORDER BY clause */ + struct sqlite3_index_orderby { + int iColumn; /* Column number */ + unsigned char desc; /* True for DESC. False for ASC. */ + } *aOrderBy; /* The ORDER BY clause */ + /* Outputs */ + struct sqlite3_index_constraint_usage { + int argvIndex; /* if >0, constraint is part of argv to xFilter */ + unsigned char omit; /* Do not code a test for this constraint */ + } *aConstraintUsage; + int idxNum; /* Number used to identify the index */ + char *idxStr; /* String, possibly obtained from sqlite3_malloc */ + int needToFreeIdxStr; /* Free idxStr using sqlite3_free() if true */ + int orderByConsumed; /* True if output is already ordered */ + double estimatedCost; /* Estimated cost of using this index */ + /* Fields below are only available in SQLite 3.8.2 and later */ + sqlite3_int64 estimatedRows; /* Estimated number of rows returned */ + /* Fields below are only available in SQLite 3.9.0 and later */ + int idxFlags; /* Mask of SQLITE_INDEX_SCAN_* flags */ + /* Fields below are only available in SQLite 3.10.0 and later */ + sqlite3_uint64 colUsed; /* Input: Mask of columns used by statement */ +}; + +/* +** CAPI3REF: Virtual Table Scan Flags +*/ +#define SQLITE_INDEX_SCAN_UNIQUE 1 /* Scan visits at most 1 row */ + +/* +** CAPI3REF: Virtual Table Constraint Operator Codes +** +** These macros defined the allowed values for the +** [sqlite3_index_info].aConstraint[].op field. Each value represents +** an operator that is part of a constraint term in the wHERE clause of +** a query that uses a [virtual table]. +*/ +#define SQLITE_INDEX_CONSTRAINT_EQ 2 +#define SQLITE_INDEX_CONSTRAINT_GT 4 +#define SQLITE_INDEX_CONSTRAINT_LE 8 +#define SQLITE_INDEX_CONSTRAINT_LT 16 +#define SQLITE_INDEX_CONSTRAINT_GE 32 +#define SQLITE_INDEX_CONSTRAINT_MATCH 64 +#define SQLITE_INDEX_CONSTRAINT_LIKE 65 +#define SQLITE_INDEX_CONSTRAINT_GLOB 66 +#define SQLITE_INDEX_CONSTRAINT_REGEXP 67 + +/* +** CAPI3REF: Register A Virtual Table Implementation +** METHOD: sqlite3 +** +** ^These routines are used to register a new [virtual table module] name. +** ^Module names must be registered before +** creating a new [virtual table] using the module and before using a +** preexisting [virtual table] for the module. +** +** ^The module name is registered on the [database connection] specified +** by the first parameter. ^The name of the module is given by the +** second parameter. ^The third parameter is a pointer to +** the implementation of the [virtual table module]. ^The fourth +** parameter is an arbitrary client data pointer that is passed through +** into the [xCreate] and [xConnect] methods of the virtual table module +** when a new virtual table is be being created or reinitialized. +** +** ^The sqlite3_create_module_v2() interface has a fifth parameter which +** is a pointer to a destructor for the pClientData. ^SQLite will +** invoke the destructor function (if it is not NULL) when SQLite +** no longer needs the pClientData pointer. ^The destructor will also +** be invoked if the call to sqlite3_create_module_v2() fails. +** ^The sqlite3_create_module() +** interface is equivalent to sqlite3_create_module_v2() with a NULL +** destructor. +*/ +SQLITE_API int sqlite3_create_module( + sqlite3 *db, /* SQLite connection to register module with */ + const char *zName, /* Name of the module */ + const sqlite3_module *p, /* Methods for the module */ + void *pClientData /* Client data for xCreate/xConnect */ +); +SQLITE_API int sqlite3_create_module_v2( + sqlite3 *db, /* SQLite connection to register module with */ + const char *zName, /* Name of the module */ + const sqlite3_module *p, /* Methods for the module */ + void *pClientData, /* Client data for xCreate/xConnect */ + void(*xDestroy)(void*) /* Module destructor function */ +); + +/* +** CAPI3REF: Virtual Table Instance Object +** KEYWORDS: sqlite3_vtab +** +** Every [virtual table module] implementation uses a subclass +** of this object to describe a particular instance +** of the [virtual table]. Each subclass will +** be tailored to the specific needs of the module implementation. +** The purpose of this superclass is to define certain fields that are +** common to all module implementations. +** +** ^Virtual tables methods can set an error message by assigning a +** string obtained from [sqlite3_mprintf()] to zErrMsg. The method should +** take care that any prior string is freed by a call to [sqlite3_free()] +** prior to assigning a new string to zErrMsg. ^After the error message +** is delivered up to the client application, the string will be automatically +** freed by sqlite3_free() and the zErrMsg field will be zeroed. +*/ +struct sqlite3_vtab { + const sqlite3_module *pModule; /* The module for this virtual table */ + int nRef; /* Number of open cursors */ + char *zErrMsg; /* Error message from sqlite3_mprintf() */ + /* Virtual table implementations will typically add additional fields */ +}; + +/* +** CAPI3REF: Virtual Table Cursor Object +** KEYWORDS: sqlite3_vtab_cursor {virtual table cursor} +** +** Every [virtual table module] implementation uses a subclass of the +** following structure to describe cursors that point into the +** [virtual table] and are used +** to loop through the virtual table. Cursors are created using the +** [sqlite3_module.xOpen | xOpen] method of the module and are destroyed +** by the [sqlite3_module.xClose | xClose] method. Cursors are used +** by the [xFilter], [xNext], [xEof], [xColumn], and [xRowid] methods +** of the module. Each module implementation will define +** the content of a cursor structure to suit its own needs. +** +** This superclass exists in order to define fields of the cursor that +** are common to all implementations. +*/ +struct sqlite3_vtab_cursor { + sqlite3_vtab *pVtab; /* Virtual table of this cursor */ + /* Virtual table implementations will typically add additional fields */ +}; + +/* +** CAPI3REF: Declare The Schema Of A Virtual Table +** +** ^The [xCreate] and [xConnect] methods of a +** [virtual table module] call this interface +** to declare the format (the names and datatypes of the columns) of +** the virtual tables they implement. +*/ +SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL); + +/* +** CAPI3REF: Overload A Function For A Virtual Table +** METHOD: sqlite3 +** +** ^(Virtual tables can provide alternative implementations of functions +** using the [xFindFunction] method of the [virtual table module]. +** But global versions of those functions +** must exist in order to be overloaded.)^ +** +** ^(This API makes sure a global version of a function with a particular +** name and number of parameters exists. If no such function exists +** before this API is called, a new function is created.)^ ^The implementation +** of the new function always causes an exception to be thrown. So +** the new function is not good for anything by itself. Its only +** purpose is to be a placeholder function that can be overloaded +** by a [virtual table]. +*/ +SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); + +/* +** The interface to the virtual-table mechanism defined above (back up +** to a comment remarkably similar to this one) is currently considered +** to be experimental. The interface might change in incompatible ways. +** If this is a problem for you, do not use the interface at this time. +** +** When the virtual-table mechanism stabilizes, we will declare the +** interface fixed, support it indefinitely, and remove this comment. +*/ + +/* +** CAPI3REF: A Handle To An Open BLOB +** KEYWORDS: {BLOB handle} {BLOB handles} +** +** An instance of this object represents an open BLOB on which +** [sqlite3_blob_open | incremental BLOB I/O] can be performed. +** ^Objects of this type are created by [sqlite3_blob_open()] +** and destroyed by [sqlite3_blob_close()]. +** ^The [sqlite3_blob_read()] and [sqlite3_blob_write()] interfaces +** can be used to read or write small subsections of the BLOB. +** ^The [sqlite3_blob_bytes()] interface returns the size of the BLOB in bytes. +*/ +typedef struct sqlite3_blob sqlite3_blob; + +/* +** CAPI3REF: Open A BLOB For Incremental I/O +** METHOD: sqlite3 +** CONSTRUCTOR: sqlite3_blob +** +** ^(This interfaces opens a [BLOB handle | handle] to the BLOB located +** in row iRow, column zColumn, table zTable in database zDb; +** in other words, the same BLOB that would be selected by: +** +**
+**     SELECT zColumn FROM zDb.zTable WHERE [rowid] = iRow;
+** 
)^ +** +** ^(Parameter zDb is not the filename that contains the database, but +** rather the symbolic name of the database. For attached databases, this is +** the name that appears after the AS keyword in the [ATTACH] statement. +** For the main database file, the database name is "main". For TEMP +** tables, the database name is "temp".)^ +** +** ^If the flags parameter is non-zero, then the BLOB is opened for read +** and write access. ^If the flags parameter is zero, the BLOB is opened for +** read-only access. +** +** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is stored +** in *ppBlob. Otherwise an [error code] is returned and, unless the error +** code is SQLITE_MISUSE, *ppBlob is set to NULL.)^ ^This means that, provided +** the API is not misused, it is always safe to call [sqlite3_blob_close()] +** on *ppBlob after this function it returns. +** +** This function fails with SQLITE_ERROR if any of the following are true: +**
    +**
  • ^(Database zDb does not exist)^, +**
  • ^(Table zTable does not exist within database zDb)^, +**
  • ^(Table zTable is a WITHOUT ROWID table)^, +**
  • ^(Column zColumn does not exist)^, +**
  • ^(Row iRow is not present in the table)^, +**
  • ^(The specified column of row iRow contains a value that is not +** a TEXT or BLOB value)^, +**
  • ^(Column zColumn is part of an index, PRIMARY KEY or UNIQUE +** constraint and the blob is being opened for read/write access)^, +**
  • ^([foreign key constraints | Foreign key constraints] are enabled, +** column zColumn is part of a [child key] definition and the blob is +** being opened for read/write access)^. +**
+** +** ^Unless it returns SQLITE_MISUSE, this function sets the +** [database connection] error code and message accessible via +** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions. +** +** A BLOB referenced by sqlite3_blob_open() may be read using the +** [sqlite3_blob_read()] interface and modified by using +** [sqlite3_blob_write()]. The [BLOB handle] can be moved to a +** different row of the same table using the [sqlite3_blob_reopen()] +** interface. However, the column, table, or database of a [BLOB handle] +** cannot be changed after the [BLOB handle] is opened. +** +** ^(If the row that a BLOB handle points to is modified by an +** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects +** then the BLOB handle is marked as "expired". +** This is true if any column of the row is changed, even a column +** other than the one the BLOB handle is open on.)^ +** ^Calls to [sqlite3_blob_read()] and [sqlite3_blob_write()] for +** an expired BLOB handle fail with a return code of [SQLITE_ABORT]. +** ^(Changes written into a BLOB prior to the BLOB expiring are not +** rolled back by the expiration of the BLOB. Such changes will eventually +** commit if the transaction continues to completion.)^ +** +** ^Use the [sqlite3_blob_bytes()] interface to determine the size of +** the opened blob. ^The size of a blob may not be changed by this +** interface. Use the [UPDATE] SQL command to change the size of a +** blob. +** +** ^The [sqlite3_bind_zeroblob()] and [sqlite3_result_zeroblob()] interfaces +** and the built-in [zeroblob] SQL function may be used to create a +** zero-filled blob to read or write using the incremental-blob interface. +** +** To avoid a resource leak, every open [BLOB handle] should eventually +** be released by a call to [sqlite3_blob_close()]. +** +** See also: [sqlite3_blob_close()], +** [sqlite3_blob_reopen()], [sqlite3_blob_read()], +** [sqlite3_blob_bytes()], [sqlite3_blob_write()]. +*/ +SQLITE_API int sqlite3_blob_open( + sqlite3*, + const char *zDb, + const char *zTable, + const char *zColumn, + sqlite3_int64 iRow, + int flags, + sqlite3_blob **ppBlob +); + +/* +** CAPI3REF: Move a BLOB Handle to a New Row +** METHOD: sqlite3_blob +** +** ^This function is used to move an existing [BLOB handle] so that it points +** to a different row of the same database table. ^The new row is identified +** by the rowid value passed as the second argument. Only the row can be +** changed. ^The database, table and column on which the blob handle is open +** remain the same. Moving an existing [BLOB handle] to a new row is +** faster than closing the existing handle and opening a new one. +** +** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] - +** it must exist and there must be either a blob or text value stored in +** the nominated column.)^ ^If the new row is not present in the table, or if +** it does not contain a blob or text value, or if another error occurs, an +** SQLite error code is returned and the blob handle is considered aborted. +** ^All subsequent calls to [sqlite3_blob_read()], [sqlite3_blob_write()] or +** [sqlite3_blob_reopen()] on an aborted blob handle immediately return +** SQLITE_ABORT. ^Calling [sqlite3_blob_bytes()] on an aborted blob handle +** always returns zero. +** +** ^This function sets the database handle error code and message. +*/ +SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64); + +/* +** CAPI3REF: Close A BLOB Handle +** DESTRUCTOR: sqlite3_blob +** +** ^This function closes an open [BLOB handle]. ^(The BLOB handle is closed +** unconditionally. Even if this routine returns an error code, the +** handle is still closed.)^ +** +** ^If the blob handle being closed was opened for read-write access, and if +** the database is in auto-commit mode and there are no other open read-write +** blob handles or active write statements, the current transaction is +** committed. ^If an error occurs while committing the transaction, an error +** code is returned and the transaction rolled back. +** +** Calling this function with an argument that is not a NULL pointer or an +** open blob handle results in undefined behaviour. ^Calling this routine +** with a null pointer (such as would be returned by a failed call to +** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function +** is passed a valid open blob handle, the values returned by the +** sqlite3_errcode() and sqlite3_errmsg() functions are set before returning. +*/ +SQLITE_API int sqlite3_blob_close(sqlite3_blob *); + +/* +** CAPI3REF: Return The Size Of An Open BLOB +** METHOD: sqlite3_blob +** +** ^Returns the size in bytes of the BLOB accessible via the +** successfully opened [BLOB handle] in its only argument. ^The +** incremental blob I/O routines can only read or overwriting existing +** blob content; they cannot change the size of a blob. +** +** This routine only works on a [BLOB handle] which has been created +** by a prior successful call to [sqlite3_blob_open()] and which has not +** been closed by [sqlite3_blob_close()]. Passing any other pointer in +** to this routine results in undefined and probably undesirable behavior. +*/ +SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *); + +/* +** CAPI3REF: Read Data From A BLOB Incrementally +** METHOD: sqlite3_blob +** +** ^(This function is used to read data from an open [BLOB handle] into a +** caller-supplied buffer. N bytes of data are copied into buffer Z +** from the open BLOB, starting at offset iOffset.)^ +** +** ^If offset iOffset is less than N bytes from the end of the BLOB, +** [SQLITE_ERROR] is returned and no data is read. ^If N or iOffset is +** less than zero, [SQLITE_ERROR] is returned and no data is read. +** ^The size of the blob (and hence the maximum value of N+iOffset) +** can be determined using the [sqlite3_blob_bytes()] interface. +** +** ^An attempt to read from an expired [BLOB handle] fails with an +** error code of [SQLITE_ABORT]. +** +** ^(On success, sqlite3_blob_read() returns SQLITE_OK. +** Otherwise, an [error code] or an [extended error code] is returned.)^ +** +** This routine only works on a [BLOB handle] which has been created +** by a prior successful call to [sqlite3_blob_open()] and which has not +** been closed by [sqlite3_blob_close()]. Passing any other pointer in +** to this routine results in undefined and probably undesirable behavior. +** +** See also: [sqlite3_blob_write()]. +*/ +SQLITE_API int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset); + +/* +** CAPI3REF: Write Data Into A BLOB Incrementally +** METHOD: sqlite3_blob +** +** ^(This function is used to write data into an open [BLOB handle] from a +** caller-supplied buffer. N bytes of data are copied from the buffer Z +** into the open BLOB, starting at offset iOffset.)^ +** +** ^(On success, sqlite3_blob_write() returns SQLITE_OK. +** Otherwise, an [error code] or an [extended error code] is returned.)^ +** ^Unless SQLITE_MISUSE is returned, this function sets the +** [database connection] error code and message accessible via +** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions. +** +** ^If the [BLOB handle] passed as the first argument was not opened for +** writing (the flags parameter to [sqlite3_blob_open()] was zero), +** this function returns [SQLITE_READONLY]. +** +** This function may only modify the contents of the BLOB; it is +** not possible to increase the size of a BLOB using this API. +** ^If offset iOffset is less than N bytes from the end of the BLOB, +** [SQLITE_ERROR] is returned and no data is written. The size of the +** BLOB (and hence the maximum value of N+iOffset) can be determined +** using the [sqlite3_blob_bytes()] interface. ^If N or iOffset are less +** than zero [SQLITE_ERROR] is returned and no data is written. +** +** ^An attempt to write to an expired [BLOB handle] fails with an +** error code of [SQLITE_ABORT]. ^Writes to the BLOB that occurred +** before the [BLOB handle] expired are not rolled back by the +** expiration of the handle, though of course those changes might +** have been overwritten by the statement that expired the BLOB handle +** or by other independent statements. +** +** This routine only works on a [BLOB handle] which has been created +** by a prior successful call to [sqlite3_blob_open()] and which has not +** been closed by [sqlite3_blob_close()]. Passing any other pointer in +** to this routine results in undefined and probably undesirable behavior. +** +** See also: [sqlite3_blob_read()]. +*/ +SQLITE_API int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset); + +/* +** CAPI3REF: Virtual File System Objects +** +** A virtual filesystem (VFS) is an [sqlite3_vfs] object +** that SQLite uses to interact +** with the underlying operating system. Most SQLite builds come with a +** single default VFS that is appropriate for the host computer. +** New VFSes can be registered and existing VFSes can be unregistered. +** The following interfaces are provided. +** +** ^The sqlite3_vfs_find() interface returns a pointer to a VFS given its name. +** ^Names are case sensitive. +** ^Names are zero-terminated UTF-8 strings. +** ^If there is no match, a NULL pointer is returned. +** ^If zVfsName is NULL then the default VFS is returned. +** +** ^New VFSes are registered with sqlite3_vfs_register(). +** ^Each new VFS becomes the default VFS if the makeDflt flag is set. +** ^The same VFS can be registered multiple times without injury. +** ^To make an existing VFS into the default VFS, register it again +** with the makeDflt flag set. If two different VFSes with the +** same name are registered, the behavior is undefined. If a +** VFS is registered with a name that is NULL or an empty string, +** then the behavior is undefined. +** +** ^Unregister a VFS with the sqlite3_vfs_unregister() interface. +** ^(If the default VFS is unregistered, another VFS is chosen as +** the default. The choice for the new VFS is arbitrary.)^ +*/ +SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName); +SQLITE_API int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt); +SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); + +/* +** CAPI3REF: Mutexes +** +** The SQLite core uses these routines for thread +** synchronization. Though they are intended for internal +** use by SQLite, code that links against SQLite is +** permitted to use any of these routines. +** +** The SQLite source code contains multiple implementations +** of these mutex routines. An appropriate implementation +** is selected automatically at compile-time. The following +** implementations are available in the SQLite core: +** +**
    +**
  • SQLITE_MUTEX_PTHREADS +**
  • SQLITE_MUTEX_W32 +**
  • SQLITE_MUTEX_NOOP +**
+** +** The SQLITE_MUTEX_NOOP implementation is a set of routines +** that does no real locking and is appropriate for use in +** a single-threaded application. The SQLITE_MUTEX_PTHREADS and +** SQLITE_MUTEX_W32 implementations are appropriate for use on Unix +** and Windows. +** +** If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor +** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex +** implementation is included with the library. In this case the +** application must supply a custom mutex implementation using the +** [SQLITE_CONFIG_MUTEX] option of the sqlite3_config() function +** before calling sqlite3_initialize() or any other public sqlite3_ +** function that calls sqlite3_initialize(). +** +** ^The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. ^The sqlite3_mutex_alloc() +** routine returns NULL if it is unable to allocate the requested +** mutex. The argument to sqlite3_mutex_alloc() must one of these +** integer constants: +** +**
    +**
  • SQLITE_MUTEX_FAST +**
  • SQLITE_MUTEX_RECURSIVE +**
  • SQLITE_MUTEX_STATIC_MASTER +**
  • SQLITE_MUTEX_STATIC_MEM +**
  • SQLITE_MUTEX_STATIC_OPEN +**
  • SQLITE_MUTEX_STATIC_PRNG +**
  • SQLITE_MUTEX_STATIC_LRU +**
  • SQLITE_MUTEX_STATIC_PMEM +**
  • SQLITE_MUTEX_STATIC_APP1 +**
  • SQLITE_MUTEX_STATIC_APP2 +**
  • SQLITE_MUTEX_STATIC_APP3 +**
  • SQLITE_MUTEX_STATIC_VFS1 +**
  • SQLITE_MUTEX_STATIC_VFS2 +**
  • SQLITE_MUTEX_STATIC_VFS3 +**
+** +** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) +** cause sqlite3_mutex_alloc() to create +** a new mutex. ^The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +** is used but not necessarily so when SQLITE_MUTEX_FAST is used. +** The mutex implementation does not need to make a distinction +** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +** not want to. SQLite will only request a recursive mutex in +** cases where it really needs one. If a faster non-recursive mutex +** implementation is available on the host platform, the mutex subsystem +** might return such a mutex in response to SQLITE_MUTEX_FAST. +** +** ^The other allowed parameters to sqlite3_mutex_alloc() (anything other +** than SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) each return +** a pointer to a static preexisting mutex. ^Nine static mutexes are +** used by the current version of SQLite. Future versions of SQLite +** may add additional static mutexes. Static mutexes are for internal +** use by SQLite only. Applications that use SQLite mutexes should +** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +** SQLITE_MUTEX_RECURSIVE. +** +** ^Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +** returns a different mutex on every call. ^For the static +** mutex types, the same mutex is returned on every call that has +** the same type number. +** +** ^The sqlite3_mutex_free() routine deallocates a previously +** allocated dynamic mutex. Attempting to deallocate a static +** mutex results in undefined behavior. +** +** ^The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. ^If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. ^The sqlite3_mutex_try() interface returns [SQLITE_OK] +** upon successful entry. ^(Mutexes created using +** SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread. +** In such cases, the +** mutex must be exited an equal number of times before another thread +** can enter.)^ If the same thread tries to enter any mutex other +** than an SQLITE_MUTEX_RECURSIVE more than once, the behavior is undefined. +** +** ^(Some systems (for example, Windows 95) do not support the operation +** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() +** will always return SQLITE_BUSY. The SQLite core only ever uses +** sqlite3_mutex_try() as an optimization so this is acceptable +** behavior.)^ +** +** ^The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered by the +** calling thread or is not currently allocated. +** +** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or +** sqlite3_mutex_leave() is a NULL pointer, then all three routines +** behave as no-ops. +** +** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. +*/ +SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int); +SQLITE_API void sqlite3_mutex_free(sqlite3_mutex*); +SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex*); +SQLITE_API int sqlite3_mutex_try(sqlite3_mutex*); +SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex*); + +/* +** CAPI3REF: Mutex Methods Object +** +** An instance of this structure defines the low-level routines +** used to allocate and use mutexes. +** +** Usually, the default mutex implementations provided by SQLite are +** sufficient, however the application has the option of substituting a custom +** implementation for specialized deployments or systems for which SQLite +** does not provide a suitable implementation. In this case, the application +** creates and populates an instance of this structure to pass +** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option. +** Additionally, an instance of this structure can be used as an +** output variable when querying the system for the current mutex +** implementation, using the [SQLITE_CONFIG_GETMUTEX] option. +** +** ^The xMutexInit method defined by this structure is invoked as +** part of system initialization by the sqlite3_initialize() function. +** ^The xMutexInit routine is called by SQLite exactly once for each +** effective call to [sqlite3_initialize()]. +** +** ^The xMutexEnd method defined by this structure is invoked as +** part of system shutdown by the sqlite3_shutdown() function. The +** implementation of this method is expected to release all outstanding +** resources obtained by the mutex methods implementation, especially +** those obtained by the xMutexInit method. ^The xMutexEnd() +** interface is invoked exactly once for each call to [sqlite3_shutdown()]. +** +** ^(The remaining seven methods defined by this structure (xMutexAlloc, +** xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and +** xMutexNotheld) implement the following interfaces (respectively): +** +**
    +**
  • [sqlite3_mutex_alloc()]
  • +**
  • [sqlite3_mutex_free()]
  • +**
  • [sqlite3_mutex_enter()]
  • +**
  • [sqlite3_mutex_try()]
  • +**
  • [sqlite3_mutex_leave()]
  • +**
  • [sqlite3_mutex_held()]
  • +**
  • [sqlite3_mutex_notheld()]
  • +**
)^ +** +** The only difference is that the public sqlite3_XXX functions enumerated +** above silently ignore any invocations that pass a NULL pointer instead +** of a valid mutex handle. The implementations of the methods defined +** by this structure are not required to handle this case, the results +** of passing a NULL pointer instead of a valid mutex handle are undefined +** (i.e. it is acceptable to provide an implementation that segfaults if +** it is passed a NULL pointer). +** +** The xMutexInit() method must be threadsafe. It must be harmless to +** invoke xMutexInit() multiple times within the same process and without +** intervening calls to xMutexEnd(). Second and subsequent calls to +** xMutexInit() must be no-ops. +** +** xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()] +** and its associates). Similarly, xMutexAlloc() must not use SQLite memory +** allocation for a static mutex. ^However xMutexAlloc() may use SQLite +** memory allocation for a fast or recursive mutex. +** +** ^SQLite will invoke the xMutexEnd() method when [sqlite3_shutdown()] is +** called, but only if the prior call to xMutexInit returned SQLITE_OK. +** If xMutexInit fails in any way, it is expected to clean up after itself +** prior to returning. +*/ +typedef struct sqlite3_mutex_methods sqlite3_mutex_methods; +struct sqlite3_mutex_methods { + int (*xMutexInit)(void); + int (*xMutexEnd)(void); + sqlite3_mutex *(*xMutexAlloc)(int); + void (*xMutexFree)(sqlite3_mutex *); + void (*xMutexEnter)(sqlite3_mutex *); + int (*xMutexTry)(sqlite3_mutex *); + void (*xMutexLeave)(sqlite3_mutex *); + int (*xMutexHeld)(sqlite3_mutex *); + int (*xMutexNotheld)(sqlite3_mutex *); +}; + +/* +** CAPI3REF: Mutex Verification Routines +** +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines +** are intended for use inside assert() statements. The SQLite core +** never uses these routines except inside an assert() and applications +** are advised to follow the lead of the core. The SQLite core only +** provides implementations for these routines when it is compiled +** with the SQLITE_DEBUG flag. External mutex implementations +** are only required to provide these routines if SQLITE_DEBUG is +** defined and if NDEBUG is not defined. +** +** These routines should return true if the mutex in their argument +** is held or not held, respectively, by the calling thread. +** +** The implementation is not required to provide versions of these +** routines that actually work. If the implementation does not provide working +** versions of these routines, it should at least provide stubs that always +** return true so that one does not get spurious assertion failures. +** +** If the argument to sqlite3_mutex_held() is a NULL pointer then +** the routine should return 1. This seems counter-intuitive since +** clearly the mutex cannot be held if it does not exist. But +** the reason the mutex does not exist is because the build is not +** using mutexes. And we do not want the assert() containing the +** call to sqlite3_mutex_held() to fail, so a non-zero return is +** the appropriate thing to do. The sqlite3_mutex_notheld() +** interface should also return 1 when given a NULL pointer. +*/ +#ifndef NDEBUG +SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*); +SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*); +#endif + +/* +** CAPI3REF: Mutex Types +** +** The [sqlite3_mutex_alloc()] interface takes a single argument +** which is one of these integer constants. +** +** The set of static mutexes may change from one SQLite release to the +** next. Applications that override the built-in mutex logic must be +** prepared to accommodate additional static mutexes. +*/ +#define SQLITE_MUTEX_FAST 0 +#define SQLITE_MUTEX_RECURSIVE 1 +#define SQLITE_MUTEX_STATIC_MASTER 2 +#define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */ +#define SQLITE_MUTEX_STATIC_MEM2 4 /* NOT USED */ +#define SQLITE_MUTEX_STATIC_OPEN 4 /* sqlite3BtreeOpen() */ +#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_randomness() */ +#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */ +#define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */ +#define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */ +#define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */ +#define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */ +#define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */ +#define SQLITE_MUTEX_STATIC_VFS1 11 /* For use by built-in VFS */ +#define SQLITE_MUTEX_STATIC_VFS2 12 /* For use by extension VFS */ +#define SQLITE_MUTEX_STATIC_VFS3 13 /* For use by application VFS */ + +/* +** CAPI3REF: Retrieve the mutex for a database connection +** METHOD: sqlite3 +** +** ^This interface returns a pointer the [sqlite3_mutex] object that +** serializes access to the [database connection] given in the argument +** when the [threading mode] is Serialized. +** ^If the [threading mode] is Single-thread or Multi-thread then this +** routine returns a NULL pointer. +*/ +SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*); + +/* +** CAPI3REF: Low-Level Control Of Database Files +** METHOD: sqlite3 +** +** ^The [sqlite3_file_control()] interface makes a direct call to the +** xFileControl method for the [sqlite3_io_methods] object associated +** with a particular database identified by the second argument. ^The +** name of the database is "main" for the main database or "temp" for the +** TEMP database, or the name that appears after the AS keyword for +** databases that are added using the [ATTACH] SQL command. +** ^A NULL pointer can be used in place of "main" to refer to the +** main database file. +** ^The third and fourth parameters to this routine +** are passed directly through to the second and third parameters of +** the xFileControl method. ^The return value of the xFileControl +** method becomes the return value of this routine. +** +** ^The SQLITE_FCNTL_FILE_POINTER value for the op parameter causes +** a pointer to the underlying [sqlite3_file] object to be written into +** the space pointed to by the 4th parameter. ^The SQLITE_FCNTL_FILE_POINTER +** case is a short-circuit path which does not actually invoke the +** underlying sqlite3_io_methods.xFileControl method. +** +** ^If the second parameter (zDbName) does not match the name of any +** open database file, then SQLITE_ERROR is returned. ^This error +** code is not remembered and will not be recalled by [sqlite3_errcode()] +** or [sqlite3_errmsg()]. The underlying xFileControl method might +** also return SQLITE_ERROR. There is no way to distinguish between +** an incorrect zDbName and an SQLITE_ERROR return from the underlying +** xFileControl method. +** +** See also: [SQLITE_FCNTL_LOCKSTATE] +*/ +SQLITE_API int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*); + +/* +** CAPI3REF: Testing Interface +** +** ^The sqlite3_test_control() interface is used to read out internal +** state of SQLite and to inject faults into SQLite for testing +** purposes. ^The first parameter is an operation code that determines +** the number, meaning, and operation of all subsequent parameters. +** +** This interface is not for use by applications. It exists solely +** for verifying the correct operation of the SQLite library. Depending +** on how the SQLite library is compiled, this interface might not exist. +** +** The details of the operation codes, their meanings, the parameters +** they take, and what they do are all subject to change without notice. +** Unlike most of the SQLite API, this function is not guaranteed to +** operate consistently from one release to the next. +*/ +SQLITE_API int sqlite3_test_control(int op, ...); + +/* +** CAPI3REF: Testing Interface Operation Codes +** +** These constants are the valid operation code parameters used +** as the first argument to [sqlite3_test_control()]. +** +** These parameters and their meanings are subject to change +** without notice. These values are for testing purposes only. +** Applications should not use any of these parameters or the +** [sqlite3_test_control()] interface. +*/ +#define SQLITE_TESTCTRL_FIRST 5 +#define SQLITE_TESTCTRL_PRNG_SAVE 5 +#define SQLITE_TESTCTRL_PRNG_RESTORE 6 +#define SQLITE_TESTCTRL_PRNG_RESET 7 +#define SQLITE_TESTCTRL_BITVEC_TEST 8 +#define SQLITE_TESTCTRL_FAULT_INSTALL 9 +#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10 +#define SQLITE_TESTCTRL_PENDING_BYTE 11 +#define SQLITE_TESTCTRL_ASSERT 12 +#define SQLITE_TESTCTRL_ALWAYS 13 +#define SQLITE_TESTCTRL_RESERVE 14 +#define SQLITE_TESTCTRL_OPTIMIZATIONS 15 +#define SQLITE_TESTCTRL_ISKEYWORD 16 +#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 +#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18 +#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 /* NOT USED */ +#define SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD 19 +#define SQLITE_TESTCTRL_NEVER_CORRUPT 20 +#define SQLITE_TESTCTRL_VDBE_COVERAGE 21 +#define SQLITE_TESTCTRL_BYTEORDER 22 +#define SQLITE_TESTCTRL_ISINIT 23 +#define SQLITE_TESTCTRL_SORTER_MMAP 24 +#define SQLITE_TESTCTRL_IMPOSTER 25 +#define SQLITE_TESTCTRL_LAST 25 + +/* +** CAPI3REF: SQLite Runtime Status +** +** ^These interfaces are used to retrieve runtime status information +** about the performance of SQLite, and optionally to reset various +** highwater marks. ^The first argument is an integer code for +** the specific parameter to measure. ^(Recognized integer codes +** are of the form [status parameters | SQLITE_STATUS_...].)^ +** ^The current value of the parameter is returned into *pCurrent. +** ^The highest recorded value is returned in *pHighwater. ^If the +** resetFlag is true, then the highest record value is reset after +** *pHighwater is written. ^(Some parameters do not record the highest +** value. For those parameters +** nothing is written into *pHighwater and the resetFlag is ignored.)^ +** ^(Other parameters record only the highwater mark and not the current +** value. For these latter parameters nothing is written into *pCurrent.)^ +** +** ^The sqlite3_status() and sqlite3_status64() routines return +** SQLITE_OK on success and a non-zero [error code] on failure. +** +** If either the current value or the highwater mark is too large to +** be represented by a 32-bit integer, then the values returned by +** sqlite3_status() are undefined. +** +** See also: [sqlite3_db_status()] +*/ +SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag); +SQLITE_API int sqlite3_status64( + int op, + sqlite3_int64 *pCurrent, + sqlite3_int64 *pHighwater, + int resetFlag +); + + +/* +** CAPI3REF: Status Parameters +** KEYWORDS: {status parameters} +** +** These integer constants designate various run-time status parameters +** that can be returned by [sqlite3_status()]. +** +**
+** [[SQLITE_STATUS_MEMORY_USED]] ^(
SQLITE_STATUS_MEMORY_USED
+**
This parameter is the current amount of memory checked out +** using [sqlite3_malloc()], either directly or indirectly. The +** figure includes calls made to [sqlite3_malloc()] by the application +** and internal memory usage by the SQLite library. Scratch memory +** controlled by [SQLITE_CONFIG_SCRATCH] and auxiliary page-cache +** memory controlled by [SQLITE_CONFIG_PAGECACHE] is not included in +** this parameter. The amount returned is the sum of the allocation +** sizes as reported by the xSize method in [sqlite3_mem_methods].
)^ +** +** [[SQLITE_STATUS_MALLOC_SIZE]] ^(
SQLITE_STATUS_MALLOC_SIZE
+**
This parameter records the largest memory allocation request +** handed to [sqlite3_malloc()] or [sqlite3_realloc()] (or their +** internal equivalents). Only the value returned in the +** *pHighwater parameter to [sqlite3_status()] is of interest. +** The value written into the *pCurrent parameter is undefined.
)^ +** +** [[SQLITE_STATUS_MALLOC_COUNT]] ^(
SQLITE_STATUS_MALLOC_COUNT
+**
This parameter records the number of separate memory allocations +** currently checked out.
)^ +** +** [[SQLITE_STATUS_PAGECACHE_USED]] ^(
SQLITE_STATUS_PAGECACHE_USED
+**
This parameter returns the number of pages used out of the +** [pagecache memory allocator] that was configured using +** [SQLITE_CONFIG_PAGECACHE]. The +** value returned is in pages, not in bytes.
)^ +** +** [[SQLITE_STATUS_PAGECACHE_OVERFLOW]] +** ^(
SQLITE_STATUS_PAGECACHE_OVERFLOW
+**
This parameter returns the number of bytes of page cache +** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE] +** buffer and where forced to overflow to [sqlite3_malloc()]. The +** returned value includes allocations that overflowed because they +** where too large (they were larger than the "sz" parameter to +** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because +** no space was left in the page cache.
)^ +** +** [[SQLITE_STATUS_PAGECACHE_SIZE]] ^(
SQLITE_STATUS_PAGECACHE_SIZE
+**
This parameter records the largest memory allocation request +** handed to [pagecache memory allocator]. Only the value returned in the +** *pHighwater parameter to [sqlite3_status()] is of interest. +** The value written into the *pCurrent parameter is undefined.
)^ +** +** [[SQLITE_STATUS_SCRATCH_USED]] ^(
SQLITE_STATUS_SCRATCH_USED
+**
This parameter returns the number of allocations used out of the +** [scratch memory allocator] configured using +** [SQLITE_CONFIG_SCRATCH]. The value returned is in allocations, not +** in bytes. Since a single thread may only have one scratch allocation +** outstanding at time, this parameter also reports the number of threads +** using scratch memory at the same time.
)^ +** +** [[SQLITE_STATUS_SCRATCH_OVERFLOW]] ^(
SQLITE_STATUS_SCRATCH_OVERFLOW
+**
This parameter returns the number of bytes of scratch memory +** allocation which could not be satisfied by the [SQLITE_CONFIG_SCRATCH] +** buffer and where forced to overflow to [sqlite3_malloc()]. The values +** returned include overflows because the requested allocation was too +** larger (that is, because the requested allocation was larger than the +** "sz" parameter to [SQLITE_CONFIG_SCRATCH]) and because no scratch buffer +** slots were available. +**
)^ +** +** [[SQLITE_STATUS_SCRATCH_SIZE]] ^(
SQLITE_STATUS_SCRATCH_SIZE
+**
This parameter records the largest memory allocation request +** handed to [scratch memory allocator]. Only the value returned in the +** *pHighwater parameter to [sqlite3_status()] is of interest. +** The value written into the *pCurrent parameter is undefined.
)^ +** +** [[SQLITE_STATUS_PARSER_STACK]] ^(
SQLITE_STATUS_PARSER_STACK
+**
The *pHighwater parameter records the deepest parser stack. +** The *pCurrent value is undefined. The *pHighwater value is only +** meaningful if SQLite is compiled with [YYTRACKMAXSTACKDEPTH].
)^ +**
+** +** New status parameters may be added from time to time. +*/ +#define SQLITE_STATUS_MEMORY_USED 0 +#define SQLITE_STATUS_PAGECACHE_USED 1 +#define SQLITE_STATUS_PAGECACHE_OVERFLOW 2 +#define SQLITE_STATUS_SCRATCH_USED 3 +#define SQLITE_STATUS_SCRATCH_OVERFLOW 4 +#define SQLITE_STATUS_MALLOC_SIZE 5 +#define SQLITE_STATUS_PARSER_STACK 6 +#define SQLITE_STATUS_PAGECACHE_SIZE 7 +#define SQLITE_STATUS_SCRATCH_SIZE 8 +#define SQLITE_STATUS_MALLOC_COUNT 9 + +/* +** CAPI3REF: Database Connection Status +** METHOD: sqlite3 +** +** ^This interface is used to retrieve runtime status information +** about a single [database connection]. ^The first argument is the +** database connection object to be interrogated. ^The second argument +** is an integer constant, taken from the set of +** [SQLITE_DBSTATUS options], that +** determines the parameter to interrogate. The set of +** [SQLITE_DBSTATUS options] is likely +** to grow in future releases of SQLite. +** +** ^The current value of the requested parameter is written into *pCur +** and the highest instantaneous value is written into *pHiwtr. ^If +** the resetFlg is true, then the highest instantaneous value is +** reset back down to the current value. +** +** ^The sqlite3_db_status() routine returns SQLITE_OK on success and a +** non-zero [error code] on failure. +** +** See also: [sqlite3_status()] and [sqlite3_stmt_status()]. +*/ +SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg); + +/* +** CAPI3REF: Status Parameters for database connections +** KEYWORDS: {SQLITE_DBSTATUS options} +** +** These constants are the available integer "verbs" that can be passed as +** the second argument to the [sqlite3_db_status()] interface. +** +** New verbs may be added in future releases of SQLite. Existing verbs +** might be discontinued. Applications should check the return code from +** [sqlite3_db_status()] to make sure that the call worked. +** The [sqlite3_db_status()] interface will return a non-zero error code +** if a discontinued or unsupported verb is invoked. +** +**
+** [[SQLITE_DBSTATUS_LOOKASIDE_USED]] ^(
SQLITE_DBSTATUS_LOOKASIDE_USED
+**
This parameter returns the number of lookaside memory slots currently +** checked out.
)^ +** +** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(
SQLITE_DBSTATUS_LOOKASIDE_HIT
+**
This parameter returns the number malloc attempts that were +** satisfied using lookaside memory. Only the high-water value is meaningful; +** the current value is always zero.)^ +** +** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]] +** ^(
SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE
+**
This parameter returns the number malloc attempts that might have +** been satisfied using lookaside memory but failed due to the amount of +** memory requested being larger than the lookaside slot size. +** Only the high-water value is meaningful; +** the current value is always zero.)^ +** +** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]] +** ^(
SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL
+**
This parameter returns the number malloc attempts that might have +** been satisfied using lookaside memory but failed due to all lookaside +** memory already being in use. +** Only the high-water value is meaningful; +** the current value is always zero.)^ +** +** [[SQLITE_DBSTATUS_CACHE_USED]] ^(
SQLITE_DBSTATUS_CACHE_USED
+**
This parameter returns the approximate number of bytes of heap +** memory used by all pager caches associated with the database connection.)^ +** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0. +** +** [[SQLITE_DBSTATUS_CACHE_USED_SHARED]] +** ^(
SQLITE_DBSTATUS_CACHE_USED_SHARED
+**
This parameter is similar to DBSTATUS_CACHE_USED, except that if a +** pager cache is shared between two or more connections the bytes of heap +** memory used by that pager cache is divided evenly between the attached +** connections.)^ In other words, if none of the pager caches associated +** with the database connection are shared, this request returns the same +** value as DBSTATUS_CACHE_USED. Or, if one or more or the pager caches are +** shared, the value returned by this call will be smaller than that returned +** by DBSTATUS_CACHE_USED. ^The highwater mark associated with +** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0. +** +** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(
SQLITE_DBSTATUS_SCHEMA_USED
+**
This parameter returns the approximate number of bytes of heap +** memory used to store the schema for all databases associated +** with the connection - main, temp, and any [ATTACH]-ed databases.)^ +** ^The full amount of memory used by the schemas is reported, even if the +** schema memory is shared with other database connections due to +** [shared cache mode] being enabled. +** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0. +** +** [[SQLITE_DBSTATUS_STMT_USED]] ^(
SQLITE_DBSTATUS_STMT_USED
+**
This parameter returns the approximate number of bytes of heap +** and lookaside memory used by all prepared statements associated with +** the database connection.)^ +** ^The highwater mark associated with SQLITE_DBSTATUS_STMT_USED is always 0. +**
+** +** [[SQLITE_DBSTATUS_CACHE_HIT]] ^(
SQLITE_DBSTATUS_CACHE_HIT
+**
This parameter returns the number of pager cache hits that have +** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_HIT +** is always 0. +**
+** +** [[SQLITE_DBSTATUS_CACHE_MISS]] ^(
SQLITE_DBSTATUS_CACHE_MISS
+**
This parameter returns the number of pager cache misses that have +** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_MISS +** is always 0. +**
+** +** [[SQLITE_DBSTATUS_CACHE_WRITE]] ^(
SQLITE_DBSTATUS_CACHE_WRITE
+**
This parameter returns the number of dirty cache entries that have +** been written to disk. Specifically, the number of pages written to the +** wal file in wal mode databases, or the number of pages written to the +** database file in rollback mode databases. Any pages written as part of +** transaction rollback or database recovery operations are not included. +** If an IO or other error occurs while writing a page to disk, the effect +** on subsequent SQLITE_DBSTATUS_CACHE_WRITE requests is undefined.)^ ^The +** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0. +**
+** +** [[SQLITE_DBSTATUS_DEFERRED_FKS]] ^(
SQLITE_DBSTATUS_DEFERRED_FKS
+**
This parameter returns zero for the current value if and only if +** all foreign key constraints (deferred or immediate) have been +** resolved.)^ ^The highwater mark is always 0. +**
+**
+*/ +#define SQLITE_DBSTATUS_LOOKASIDE_USED 0 +#define SQLITE_DBSTATUS_CACHE_USED 1 +#define SQLITE_DBSTATUS_SCHEMA_USED 2 +#define SQLITE_DBSTATUS_STMT_USED 3 +#define SQLITE_DBSTATUS_LOOKASIDE_HIT 4 +#define SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE 5 +#define SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL 6 +#define SQLITE_DBSTATUS_CACHE_HIT 7 +#define SQLITE_DBSTATUS_CACHE_MISS 8 +#define SQLITE_DBSTATUS_CACHE_WRITE 9 +#define SQLITE_DBSTATUS_DEFERRED_FKS 10 +#define SQLITE_DBSTATUS_CACHE_USED_SHARED 11 +#define SQLITE_DBSTATUS_MAX 11 /* Largest defined DBSTATUS */ + + +/* +** CAPI3REF: Prepared Statement Status +** METHOD: sqlite3_stmt +** +** ^(Each prepared statement maintains various +** [SQLITE_STMTSTATUS counters] that measure the number +** of times it has performed specific operations.)^ These counters can +** be used to monitor the performance characteristics of the prepared +** statements. For example, if the number of table steps greatly exceeds +** the number of table searches or result rows, that would tend to indicate +** that the prepared statement is using a full table scan rather than +** an index. +** +** ^(This interface is used to retrieve and reset counter values from +** a [prepared statement]. The first argument is the prepared statement +** object to be interrogated. The second argument +** is an integer code for a specific [SQLITE_STMTSTATUS counter] +** to be interrogated.)^ +** ^The current value of the requested counter is returned. +** ^If the resetFlg is true, then the counter is reset to zero after this +** interface call returns. +** +** See also: [sqlite3_status()] and [sqlite3_db_status()]. +*/ +SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); + +/* +** CAPI3REF: Status Parameters for prepared statements +** KEYWORDS: {SQLITE_STMTSTATUS counter} {SQLITE_STMTSTATUS counters} +** +** These preprocessor macros define integer codes that name counter +** values associated with the [sqlite3_stmt_status()] interface. +** The meanings of the various counters are as follows: +** +**
+** [[SQLITE_STMTSTATUS_FULLSCAN_STEP]]
SQLITE_STMTSTATUS_FULLSCAN_STEP
+**
^This is the number of times that SQLite has stepped forward in +** a table as part of a full table scan. Large numbers for this counter +** may indicate opportunities for performance improvement through +** careful use of indices.
+** +** [[SQLITE_STMTSTATUS_SORT]]
SQLITE_STMTSTATUS_SORT
+**
^This is the number of sort operations that have occurred. +** A non-zero value in this counter may indicate an opportunity to +** improvement performance through careful use of indices.
+** +** [[SQLITE_STMTSTATUS_AUTOINDEX]]
SQLITE_STMTSTATUS_AUTOINDEX
+**
^This is the number of rows inserted into transient indices that +** were created automatically in order to help joins run faster. +** A non-zero value in this counter may indicate an opportunity to +** improvement performance by adding permanent indices that do not +** need to be reinitialized each time the statement is run.
+** +** [[SQLITE_STMTSTATUS_VM_STEP]]
SQLITE_STMTSTATUS_VM_STEP
+**
^This is the number of virtual machine operations executed +** by the prepared statement if that number is less than or equal +** to 2147483647. The number of virtual machine operations can be +** used as a proxy for the total work done by the prepared statement. +** If the number of virtual machine operations exceeds 2147483647 +** then the value returned by this statement status code is undefined. +**
+**
+*/ +#define SQLITE_STMTSTATUS_FULLSCAN_STEP 1 +#define SQLITE_STMTSTATUS_SORT 2 +#define SQLITE_STMTSTATUS_AUTOINDEX 3 +#define SQLITE_STMTSTATUS_VM_STEP 4 + +/* +** CAPI3REF: Custom Page Cache Object +** +** The sqlite3_pcache type is opaque. It is implemented by +** the pluggable module. The SQLite core has no knowledge of +** its size or internal structure and never deals with the +** sqlite3_pcache object except by holding and passing pointers +** to the object. +** +** See [sqlite3_pcache_methods2] for additional information. +*/ +typedef struct sqlite3_pcache sqlite3_pcache; + +/* +** CAPI3REF: Custom Page Cache Object +** +** The sqlite3_pcache_page object represents a single page in the +** page cache. The page cache will allocate instances of this +** object. Various methods of the page cache use pointers to instances +** of this object as parameters or as their return value. +** +** See [sqlite3_pcache_methods2] for additional information. +*/ +typedef struct sqlite3_pcache_page sqlite3_pcache_page; +struct sqlite3_pcache_page { + void *pBuf; /* The content of the page */ + void *pExtra; /* Extra information associated with the page */ +}; + +/* +** CAPI3REF: Application Defined Page Cache. +** KEYWORDS: {page cache} +** +** ^(The [sqlite3_config]([SQLITE_CONFIG_PCACHE2], ...) interface can +** register an alternative page cache implementation by passing in an +** instance of the sqlite3_pcache_methods2 structure.)^ +** In many applications, most of the heap memory allocated by +** SQLite is used for the page cache. +** By implementing a +** custom page cache using this API, an application can better control +** the amount of memory consumed by SQLite, the way in which +** that memory is allocated and released, and the policies used to +** determine exactly which parts of a database file are cached and for +** how long. +** +** The alternative page cache mechanism is an +** extreme measure that is only needed by the most demanding applications. +** The built-in page cache is recommended for most uses. +** +** ^(The contents of the sqlite3_pcache_methods2 structure are copied to an +** internal buffer by SQLite within the call to [sqlite3_config]. Hence +** the application may discard the parameter after the call to +** [sqlite3_config()] returns.)^ +** +** [[the xInit() page cache method]] +** ^(The xInit() method is called once for each effective +** call to [sqlite3_initialize()])^ +** (usually only once during the lifetime of the process). ^(The xInit() +** method is passed a copy of the sqlite3_pcache_methods2.pArg value.)^ +** The intent of the xInit() method is to set up global data structures +** required by the custom page cache implementation. +** ^(If the xInit() method is NULL, then the +** built-in default page cache is used instead of the application defined +** page cache.)^ +** +** [[the xShutdown() page cache method]] +** ^The xShutdown() method is called by [sqlite3_shutdown()]. +** It can be used to clean up +** any outstanding resources before process shutdown, if required. +** ^The xShutdown() method may be NULL. +** +** ^SQLite automatically serializes calls to the xInit method, +** so the xInit method need not be threadsafe. ^The +** xShutdown method is only called from [sqlite3_shutdown()] so it does +** not need to be threadsafe either. All other methods must be threadsafe +** in multithreaded applications. +** +** ^SQLite will never invoke xInit() more than once without an intervening +** call to xShutdown(). +** +** [[the xCreate() page cache methods]] +** ^SQLite invokes the xCreate() method to construct a new cache instance. +** SQLite will typically create one cache instance for each open database file, +** though this is not guaranteed. ^The +** first parameter, szPage, is the size in bytes of the pages that must +** be allocated by the cache. ^szPage will always a power of two. ^The +** second parameter szExtra is a number of bytes of extra storage +** associated with each page cache entry. ^The szExtra parameter will +** a number less than 250. SQLite will use the +** extra szExtra bytes on each page to store metadata about the underlying +** database page on disk. The value passed into szExtra depends +** on the SQLite version, the target platform, and how SQLite was compiled. +** ^The third argument to xCreate(), bPurgeable, is true if the cache being +** created will be used to cache database pages of a file stored on disk, or +** false if it is used for an in-memory database. The cache implementation +** does not have to do anything special based with the value of bPurgeable; +** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will +** never invoke xUnpin() except to deliberately delete a page. +** ^In other words, calls to xUnpin() on a cache with bPurgeable set to +** false will always have the "discard" flag set to true. +** ^Hence, a cache created with bPurgeable false will +** never contain any unpinned pages. +** +** [[the xCachesize() page cache method]] +** ^(The xCachesize() method may be called at any time by SQLite to set the +** suggested maximum cache-size (number of pages stored by) the cache +** instance passed as the first argument. This is the value configured using +** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable +** parameter, the implementation is not required to do anything with this +** value; it is advisory only. +** +** [[the xPagecount() page cache methods]] +** The xPagecount() method must return the number of pages currently +** stored in the cache, both pinned and unpinned. +** +** [[the xFetch() page cache methods]] +** The xFetch() method locates a page in the cache and returns a pointer to +** an sqlite3_pcache_page object associated with that page, or a NULL pointer. +** The pBuf element of the returned sqlite3_pcache_page object will be a +** pointer to a buffer of szPage bytes used to store the content of a +** single database page. The pExtra element of sqlite3_pcache_page will be +** a pointer to the szExtra bytes of extra storage that SQLite has requested +** for each entry in the page cache. +** +** The page to be fetched is determined by the key. ^The minimum key value +** is 1. After it has been retrieved using xFetch, the page is considered +** to be "pinned". +** +** If the requested page is already in the page cache, then the page cache +** implementation must return a pointer to the page buffer with its content +** intact. If the requested page is not already in the cache, then the +** cache implementation should use the value of the createFlag +** parameter to help it determined what action to take: +** +** +**
createFlag Behavior when page is not already in cache +**
0 Do not allocate a new page. Return NULL. +**
1 Allocate a new page if it easy and convenient to do so. +** Otherwise return NULL. +**
2 Make every effort to allocate a new page. Only return +** NULL if allocating a new page is effectively impossible. +**
+** +** ^(SQLite will normally invoke xFetch() with a createFlag of 0 or 1. SQLite +** will only use a createFlag of 2 after a prior call with a createFlag of 1 +** failed.)^ In between the to xFetch() calls, SQLite may +** attempt to unpin one or more cache pages by spilling the content of +** pinned pages to disk and synching the operating system disk cache. +** +** [[the xUnpin() page cache method]] +** ^xUnpin() is called by SQLite with a pointer to a currently pinned page +** as its second argument. If the third parameter, discard, is non-zero, +** then the page must be evicted from the cache. +** ^If the discard parameter is +** zero, then the page may be discarded or retained at the discretion of +** page cache implementation. ^The page cache implementation +** may choose to evict unpinned pages at any time. +** +** The cache must not perform any reference counting. A single +** call to xUnpin() unpins the page regardless of the number of prior calls +** to xFetch(). +** +** [[the xRekey() page cache methods]] +** The xRekey() method is used to change the key value associated with the +** page passed as the second argument. If the cache +** previously contains an entry associated with newKey, it must be +** discarded. ^Any prior cache entry associated with newKey is guaranteed not +** to be pinned. +** +** When SQLite calls the xTruncate() method, the cache must discard all +** existing cache entries with page numbers (keys) greater than or equal +** to the value of the iLimit parameter passed to xTruncate(). If any +** of these pages are pinned, they are implicitly unpinned, meaning that +** they can be safely discarded. +** +** [[the xDestroy() page cache method]] +** ^The xDestroy() method is used to delete a cache allocated by xCreate(). +** All resources associated with the specified cache should be freed. ^After +** calling the xDestroy() method, SQLite considers the [sqlite3_pcache*] +** handle invalid, and will not use it with any other sqlite3_pcache_methods2 +** functions. +** +** [[the xShrink() page cache method]] +** ^SQLite invokes the xShrink() method when it wants the page cache to +** free up as much of heap memory as possible. The page cache implementation +** is not obligated to free any memory, but well-behaved implementations should +** do their best. +*/ +typedef struct sqlite3_pcache_methods2 sqlite3_pcache_methods2; +struct sqlite3_pcache_methods2 { + int iVersion; + void *pArg; + int (*xInit)(void*); + void (*xShutdown)(void*); + sqlite3_pcache *(*xCreate)(int szPage, int szExtra, int bPurgeable); + void (*xCachesize)(sqlite3_pcache*, int nCachesize); + int (*xPagecount)(sqlite3_pcache*); + sqlite3_pcache_page *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); + void (*xUnpin)(sqlite3_pcache*, sqlite3_pcache_page*, int discard); + void (*xRekey)(sqlite3_pcache*, sqlite3_pcache_page*, + unsigned oldKey, unsigned newKey); + void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); + void (*xDestroy)(sqlite3_pcache*); + void (*xShrink)(sqlite3_pcache*); +}; + +/* +** This is the obsolete pcache_methods object that has now been replaced +** by sqlite3_pcache_methods2. This object is not used by SQLite. It is +** retained in the header file for backwards compatibility only. +*/ +typedef struct sqlite3_pcache_methods sqlite3_pcache_methods; +struct sqlite3_pcache_methods { + void *pArg; + int (*xInit)(void*); + void (*xShutdown)(void*); + sqlite3_pcache *(*xCreate)(int szPage, int bPurgeable); + void (*xCachesize)(sqlite3_pcache*, int nCachesize); + int (*xPagecount)(sqlite3_pcache*); + void *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); + void (*xUnpin)(sqlite3_pcache*, void*, int discard); + void (*xRekey)(sqlite3_pcache*, void*, unsigned oldKey, unsigned newKey); + void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); + void (*xDestroy)(sqlite3_pcache*); +}; + + +/* +** CAPI3REF: Online Backup Object +** +** The sqlite3_backup object records state information about an ongoing +** online backup operation. ^The sqlite3_backup object is created by +** a call to [sqlite3_backup_init()] and is destroyed by a call to +** [sqlite3_backup_finish()]. +** +** See Also: [Using the SQLite Online Backup API] +*/ +typedef struct sqlite3_backup sqlite3_backup; + +/* +** CAPI3REF: Online Backup API. +** +** The backup API copies the content of one database into another. +** It is useful either for creating backups of databases or +** for copying in-memory databases to or from persistent files. +** +** See Also: [Using the SQLite Online Backup API] +** +** ^SQLite holds a write transaction open on the destination database file +** for the duration of the backup operation. +** ^The source database is read-locked only while it is being read; +** it is not locked continuously for the entire backup operation. +** ^Thus, the backup may be performed on a live source database without +** preventing other database connections from +** reading or writing to the source database while the backup is underway. +** +** ^(To perform a backup operation: +**
    +**
  1. sqlite3_backup_init() is called once to initialize the +** backup, +**
  2. sqlite3_backup_step() is called one or more times to transfer +** the data between the two databases, and finally +**
  3. sqlite3_backup_finish() is called to release all resources +** associated with the backup operation. +**
)^ +** There should be exactly one call to sqlite3_backup_finish() for each +** successful call to sqlite3_backup_init(). +** +** [[sqlite3_backup_init()]] sqlite3_backup_init() +** +** ^The D and N arguments to sqlite3_backup_init(D,N,S,M) are the +** [database connection] associated with the destination database +** and the database name, respectively. +** ^The database name is "main" for the main database, "temp" for the +** temporary database, or the name specified after the AS keyword in +** an [ATTACH] statement for an attached database. +** ^The S and M arguments passed to +** sqlite3_backup_init(D,N,S,M) identify the [database connection] +** and database name of the source database, respectively. +** ^The source and destination [database connections] (parameters S and D) +** must be different or else sqlite3_backup_init(D,N,S,M) will fail with +** an error. +** +** ^A call to sqlite3_backup_init() will fail, returning NULL, if +** there is already a read or read-write transaction open on the +** destination database. +** +** ^If an error occurs within sqlite3_backup_init(D,N,S,M), then NULL is +** returned and an error code and error message are stored in the +** destination [database connection] D. +** ^The error code and message for the failed call to sqlite3_backup_init() +** can be retrieved using the [sqlite3_errcode()], [sqlite3_errmsg()], and/or +** [sqlite3_errmsg16()] functions. +** ^A successful call to sqlite3_backup_init() returns a pointer to an +** [sqlite3_backup] object. +** ^The [sqlite3_backup] object may be used with the sqlite3_backup_step() and +** sqlite3_backup_finish() functions to perform the specified backup +** operation. +** +** [[sqlite3_backup_step()]] sqlite3_backup_step() +** +** ^Function sqlite3_backup_step(B,N) will copy up to N pages between +** the source and destination databases specified by [sqlite3_backup] object B. +** ^If N is negative, all remaining source pages are copied. +** ^If sqlite3_backup_step(B,N) successfully copies N pages and there +** are still more pages to be copied, then the function returns [SQLITE_OK]. +** ^If sqlite3_backup_step(B,N) successfully finishes copying all pages +** from source to destination, then it returns [SQLITE_DONE]. +** ^If an error occurs while running sqlite3_backup_step(B,N), +** then an [error code] is returned. ^As well as [SQLITE_OK] and +** [SQLITE_DONE], a call to sqlite3_backup_step() may return [SQLITE_READONLY], +** [SQLITE_NOMEM], [SQLITE_BUSY], [SQLITE_LOCKED], or an +** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX] extended error code. +** +** ^(The sqlite3_backup_step() might return [SQLITE_READONLY] if +**
    +**
  1. the destination database was opened read-only, or +**
  2. the destination database is using write-ahead-log journaling +** and the destination and source page sizes differ, or +**
  3. the destination database is an in-memory database and the +** destination and source page sizes differ. +**
)^ +** +** ^If sqlite3_backup_step() cannot obtain a required file-system lock, then +** the [sqlite3_busy_handler | busy-handler function] +** is invoked (if one is specified). ^If the +** busy-handler returns non-zero before the lock is available, then +** [SQLITE_BUSY] is returned to the caller. ^In this case the call to +** sqlite3_backup_step() can be retried later. ^If the source +** [database connection] +** is being used to write to the source database when sqlite3_backup_step() +** is called, then [SQLITE_LOCKED] is returned immediately. ^Again, in this +** case the call to sqlite3_backup_step() can be retried later on. ^(If +** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX], [SQLITE_NOMEM], or +** [SQLITE_READONLY] is returned, then +** there is no point in retrying the call to sqlite3_backup_step(). These +** errors are considered fatal.)^ The application must accept +** that the backup operation has failed and pass the backup operation handle +** to the sqlite3_backup_finish() to release associated resources. +** +** ^The first call to sqlite3_backup_step() obtains an exclusive lock +** on the destination file. ^The exclusive lock is not released until either +** sqlite3_backup_finish() is called or the backup operation is complete +** and sqlite3_backup_step() returns [SQLITE_DONE]. ^Every call to +** sqlite3_backup_step() obtains a [shared lock] on the source database that +** lasts for the duration of the sqlite3_backup_step() call. +** ^Because the source database is not locked between calls to +** sqlite3_backup_step(), the source database may be modified mid-way +** through the backup process. ^If the source database is modified by an +** external process or via a database connection other than the one being +** used by the backup operation, then the backup will be automatically +** restarted by the next call to sqlite3_backup_step(). ^If the source +** database is modified by the using the same database connection as is used +** by the backup operation, then the backup database is automatically +** updated at the same time. +** +** [[sqlite3_backup_finish()]] sqlite3_backup_finish() +** +** When sqlite3_backup_step() has returned [SQLITE_DONE], or when the +** application wishes to abandon the backup operation, the application +** should destroy the [sqlite3_backup] by passing it to sqlite3_backup_finish(). +** ^The sqlite3_backup_finish() interfaces releases all +** resources associated with the [sqlite3_backup] object. +** ^If sqlite3_backup_step() has not yet returned [SQLITE_DONE], then any +** active write-transaction on the destination database is rolled back. +** The [sqlite3_backup] object is invalid +** and may not be used following a call to sqlite3_backup_finish(). +** +** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no +** sqlite3_backup_step() errors occurred, regardless or whether or not +** sqlite3_backup_step() completed. +** ^If an out-of-memory condition or IO error occurred during any prior +** sqlite3_backup_step() call on the same [sqlite3_backup] object, then +** sqlite3_backup_finish() returns the corresponding [error code]. +** +** ^A return of [SQLITE_BUSY] or [SQLITE_LOCKED] from sqlite3_backup_step() +** is not a permanent error and does not affect the return value of +** sqlite3_backup_finish(). +** +** [[sqlite3_backup_remaining()]] [[sqlite3_backup_pagecount()]] +** sqlite3_backup_remaining() and sqlite3_backup_pagecount() +** +** ^The sqlite3_backup_remaining() routine returns the number of pages still +** to be backed up at the conclusion of the most recent sqlite3_backup_step(). +** ^The sqlite3_backup_pagecount() routine returns the total number of pages +** in the source database at the conclusion of the most recent +** sqlite3_backup_step(). +** ^(The values returned by these functions are only updated by +** sqlite3_backup_step(). If the source database is modified in a way that +** changes the size of the source database or the number of pages remaining, +** those changes are not reflected in the output of sqlite3_backup_pagecount() +** and sqlite3_backup_remaining() until after the next +** sqlite3_backup_step().)^ +** +** Concurrent Usage of Database Handles +** +** ^The source [database connection] may be used by the application for other +** purposes while a backup operation is underway or being initialized. +** ^If SQLite is compiled and configured to support threadsafe database +** connections, then the source database connection may be used concurrently +** from within other threads. +** +** However, the application must guarantee that the destination +** [database connection] is not passed to any other API (by any thread) after +** sqlite3_backup_init() is called and before the corresponding call to +** sqlite3_backup_finish(). SQLite does not currently check to see +** if the application incorrectly accesses the destination [database connection] +** and so no error code is reported, but the operations may malfunction +** nevertheless. Use of the destination database connection while a +** backup is in progress might also also cause a mutex deadlock. +** +** If running in [shared cache mode], the application must +** guarantee that the shared cache used by the destination database +** is not accessed while the backup is running. In practice this means +** that the application must guarantee that the disk file being +** backed up to is not accessed by any connection within the process, +** not just the specific connection that was passed to sqlite3_backup_init(). +** +** The [sqlite3_backup] object itself is partially threadsafe. Multiple +** threads may safely make multiple concurrent calls to sqlite3_backup_step(). +** However, the sqlite3_backup_remaining() and sqlite3_backup_pagecount() +** APIs are not strictly speaking threadsafe. If they are invoked at the +** same time as another thread is invoking sqlite3_backup_step() it is +** possible that they return invalid values. +*/ +SQLITE_API sqlite3_backup *sqlite3_backup_init( + sqlite3 *pDest, /* Destination database handle */ + const char *zDestName, /* Destination database name */ + sqlite3 *pSource, /* Source database handle */ + const char *zSourceName /* Source database name */ +); +SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage); +SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p); +SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p); +SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); + +/* +** CAPI3REF: Unlock Notification +** METHOD: sqlite3 +** +** ^When running in shared-cache mode, a database operation may fail with +** an [SQLITE_LOCKED] error if the required locks on the shared-cache or +** individual tables within the shared-cache cannot be obtained. See +** [SQLite Shared-Cache Mode] for a description of shared-cache locking. +** ^This API may be used to register a callback that SQLite will invoke +** when the connection currently holding the required lock relinquishes it. +** ^This API is only available if the library was compiled with the +** [SQLITE_ENABLE_UNLOCK_NOTIFY] C-preprocessor symbol defined. +** +** See Also: [Using the SQLite Unlock Notification Feature]. +** +** ^Shared-cache locks are released when a database connection concludes +** its current transaction, either by committing it or rolling it back. +** +** ^When a connection (known as the blocked connection) fails to obtain a +** shared-cache lock and SQLITE_LOCKED is returned to the caller, the +** identity of the database connection (the blocking connection) that +** has locked the required resource is stored internally. ^After an +** application receives an SQLITE_LOCKED error, it may call the +** sqlite3_unlock_notify() method with the blocked connection handle as +** the first argument to register for a callback that will be invoked +** when the blocking connections current transaction is concluded. ^The +** callback is invoked from within the [sqlite3_step] or [sqlite3_close] +** call that concludes the blocking connections transaction. +** +** ^(If sqlite3_unlock_notify() is called in a multi-threaded application, +** there is a chance that the blocking connection will have already +** concluded its transaction by the time sqlite3_unlock_notify() is invoked. +** If this happens, then the specified callback is invoked immediately, +** from within the call to sqlite3_unlock_notify().)^ +** +** ^If the blocked connection is attempting to obtain a write-lock on a +** shared-cache table, and more than one other connection currently holds +** a read-lock on the same table, then SQLite arbitrarily selects one of +** the other connections to use as the blocking connection. +** +** ^(There may be at most one unlock-notify callback registered by a +** blocked connection. If sqlite3_unlock_notify() is called when the +** blocked connection already has a registered unlock-notify callback, +** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is +** called with a NULL pointer as its second argument, then any existing +** unlock-notify callback is canceled. ^The blocked connections +** unlock-notify callback may also be canceled by closing the blocked +** connection using [sqlite3_close()]. +** +** The unlock-notify callback is not reentrant. If an application invokes +** any sqlite3_xxx API functions from within an unlock-notify callback, a +** crash or deadlock may be the result. +** +** ^Unless deadlock is detected (see below), sqlite3_unlock_notify() always +** returns SQLITE_OK. +** +** Callback Invocation Details +** +** When an unlock-notify callback is registered, the application provides a +** single void* pointer that is passed to the callback when it is invoked. +** However, the signature of the callback function allows SQLite to pass +** it an array of void* context pointers. The first argument passed to +** an unlock-notify callback is a pointer to an array of void* pointers, +** and the second is the number of entries in the array. +** +** When a blocking connections transaction is concluded, there may be +** more than one blocked connection that has registered for an unlock-notify +** callback. ^If two or more such blocked connections have specified the +** same callback function, then instead of invoking the callback function +** multiple times, it is invoked once with the set of void* context pointers +** specified by the blocked connections bundled together into an array. +** This gives the application an opportunity to prioritize any actions +** related to the set of unblocked database connections. +** +** Deadlock Detection +** +** Assuming that after registering for an unlock-notify callback a +** database waits for the callback to be issued before taking any further +** action (a reasonable assumption), then using this API may cause the +** application to deadlock. For example, if connection X is waiting for +** connection Y's transaction to be concluded, and similarly connection +** Y is waiting on connection X's transaction, then neither connection +** will proceed and the system may remain deadlocked indefinitely. +** +** To avoid this scenario, the sqlite3_unlock_notify() performs deadlock +** detection. ^If a given call to sqlite3_unlock_notify() would put the +** system in a deadlocked state, then SQLITE_LOCKED is returned and no +** unlock-notify callback is registered. The system is said to be in +** a deadlocked state if connection A has registered for an unlock-notify +** callback on the conclusion of connection B's transaction, and connection +** B has itself registered for an unlock-notify callback when connection +** A's transaction is concluded. ^Indirect deadlock is also detected, so +** the system is also considered to be deadlocked if connection B has +** registered for an unlock-notify callback on the conclusion of connection +** C's transaction, where connection C is waiting on connection A. ^Any +** number of levels of indirection are allowed. +** +** The "DROP TABLE" Exception +** +** When a call to [sqlite3_step()] returns SQLITE_LOCKED, it is almost +** always appropriate to call sqlite3_unlock_notify(). There is however, +** one exception. When executing a "DROP TABLE" or "DROP INDEX" statement, +** SQLite checks if there are any currently executing SELECT statements +** that belong to the same connection. If there are, SQLITE_LOCKED is +** returned. In this case there is no "blocking connection", so invoking +** sqlite3_unlock_notify() results in the unlock-notify callback being +** invoked immediately. If the application then re-attempts the "DROP TABLE" +** or "DROP INDEX" query, an infinite loop might be the result. +** +** One way around this problem is to check the extended error code returned +** by an sqlite3_step() call. ^(If there is a blocking connection, then the +** extended error code is set to SQLITE_LOCKED_SHAREDCACHE. Otherwise, in +** the special "DROP TABLE/INDEX" case, the extended error code is just +** SQLITE_LOCKED.)^ +*/ +SQLITE_API int sqlite3_unlock_notify( + sqlite3 *pBlocked, /* Waiting connection */ + void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */ + void *pNotifyArg /* Argument to pass to xNotify */ +); + + +/* +** CAPI3REF: String Comparison +** +** ^The [sqlite3_stricmp()] and [sqlite3_strnicmp()] APIs allow applications +** and extensions to compare the contents of two buffers containing UTF-8 +** strings in a case-independent fashion, using the same definition of "case +** independence" that SQLite uses internally when comparing identifiers. +*/ +SQLITE_API int sqlite3_stricmp(const char *, const char *); +SQLITE_API int sqlite3_strnicmp(const char *, const char *, int); + +/* +** CAPI3REF: String Globbing +* +** ^The [sqlite3_strglob(P,X)] interface returns zero if and only if +** string X matches the [GLOB] pattern P. +** ^The definition of [GLOB] pattern matching used in +** [sqlite3_strglob(P,X)] is the same as for the "X GLOB P" operator in the +** SQL dialect understood by SQLite. ^The [sqlite3_strglob(P,X)] function +** is case sensitive. +** +** Note that this routine returns zero on a match and non-zero if the strings +** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()]. +** +** See also: [sqlite3_strlike()]. +*/ +SQLITE_API int sqlite3_strglob(const char *zGlob, const char *zStr); + +/* +** CAPI3REF: String LIKE Matching +* +** ^The [sqlite3_strlike(P,X,E)] interface returns zero if and only if +** string X matches the [LIKE] pattern P with escape character E. +** ^The definition of [LIKE] pattern matching used in +** [sqlite3_strlike(P,X,E)] is the same as for the "X LIKE P ESCAPE E" +** operator in the SQL dialect understood by SQLite. ^For "X LIKE P" without +** the ESCAPE clause, set the E parameter of [sqlite3_strlike(P,X,E)] to 0. +** ^As with the LIKE operator, the [sqlite3_strlike(P,X,E)] function is case +** insensitive - equivalent upper and lower case ASCII characters match +** one another. +** +** ^The [sqlite3_strlike(P,X,E)] function matches Unicode characters, though +** only ASCII characters are case folded. +** +** Note that this routine returns zero on a match and non-zero if the strings +** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()]. +** +** See also: [sqlite3_strglob()]. +*/ +SQLITE_API int sqlite3_strlike(const char *zGlob, const char *zStr, unsigned int cEsc); + +/* +** CAPI3REF: Error Logging Interface +** +** ^The [sqlite3_log()] interface writes a message into the [error log] +** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()]. +** ^If logging is enabled, the zFormat string and subsequent arguments are +** used with [sqlite3_snprintf()] to generate the final output string. +** +** The sqlite3_log() interface is intended for use by extensions such as +** virtual tables, collating functions, and SQL functions. While there is +** nothing to prevent an application from calling sqlite3_log(), doing so +** is considered bad form. +** +** The zFormat string must not be NULL. +** +** To avoid deadlocks and other threading problems, the sqlite3_log() routine +** will not use dynamically allocated memory. The log message is stored in +** a fixed-length buffer on the stack. If the log message is longer than +** a few hundred characters, it will be truncated to the length of the +** buffer. +*/ +SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...); + +/* +** CAPI3REF: Write-Ahead Log Commit Hook +** METHOD: sqlite3 +** +** ^The [sqlite3_wal_hook()] function is used to register a callback that +** is invoked each time data is committed to a database in wal mode. +** +** ^(The callback is invoked by SQLite after the commit has taken place and +** the associated write-lock on the database released)^, so the implementation +** may read, write or [checkpoint] the database as required. +** +** ^The first parameter passed to the callback function when it is invoked +** is a copy of the third parameter passed to sqlite3_wal_hook() when +** registering the callback. ^The second is a copy of the database handle. +** ^The third parameter is the name of the database that was written to - +** either "main" or the name of an [ATTACH]-ed database. ^The fourth parameter +** is the number of pages currently in the write-ahead log file, +** including those that were just committed. +** +** The callback function should normally return [SQLITE_OK]. ^If an error +** code is returned, that error will propagate back up through the +** SQLite code base to cause the statement that provoked the callback +** to report an error, though the commit will have still occurred. If the +** callback returns [SQLITE_ROW] or [SQLITE_DONE], or if it returns a value +** that does not correspond to any valid SQLite error code, the results +** are undefined. +** +** A single database handle may have at most a single write-ahead log callback +** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any +** previously registered write-ahead log callback. ^Note that the +** [sqlite3_wal_autocheckpoint()] interface and the +** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will +** overwrite any prior [sqlite3_wal_hook()] settings. +*/ +SQLITE_API void *sqlite3_wal_hook( + sqlite3*, + int(*)(void *,sqlite3*,const char*,int), + void* +); + +/* +** CAPI3REF: Configure an auto-checkpoint +** METHOD: sqlite3 +** +** ^The [sqlite3_wal_autocheckpoint(D,N)] is a wrapper around +** [sqlite3_wal_hook()] that causes any database on [database connection] D +** to automatically [checkpoint] +** after committing a transaction if there are N or +** more frames in the [write-ahead log] file. ^Passing zero or +** a negative value as the nFrame parameter disables automatic +** checkpoints entirely. +** +** ^The callback registered by this function replaces any existing callback +** registered using [sqlite3_wal_hook()]. ^Likewise, registering a callback +** using [sqlite3_wal_hook()] disables the automatic checkpoint mechanism +** configured by this function. +** +** ^The [wal_autocheckpoint pragma] can be used to invoke this interface +** from SQL. +** +** ^Checkpoints initiated by this mechanism are +** [sqlite3_wal_checkpoint_v2|PASSIVE]. +** +** ^Every new [database connection] defaults to having the auto-checkpoint +** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT] +** pages. The use of this interface +** is only necessary if the default setting is found to be suboptimal +** for a particular application. +*/ +SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N); + +/* +** CAPI3REF: Checkpoint a database +** METHOD: sqlite3 +** +** ^(The sqlite3_wal_checkpoint(D,X) is equivalent to +** [sqlite3_wal_checkpoint_v2](D,X,[SQLITE_CHECKPOINT_PASSIVE],0,0).)^ +** +** In brief, sqlite3_wal_checkpoint(D,X) causes the content in the +** [write-ahead log] for database X on [database connection] D to be +** transferred into the database file and for the write-ahead log to +** be reset. See the [checkpointing] documentation for addition +** information. +** +** This interface used to be the only way to cause a checkpoint to +** occur. But then the newer and more powerful [sqlite3_wal_checkpoint_v2()] +** interface was added. This interface is retained for backwards +** compatibility and as a convenience for applications that need to manually +** start a callback but which do not need the full power (and corresponding +** complication) of [sqlite3_wal_checkpoint_v2()]. +*/ +SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb); + +/* +** CAPI3REF: Checkpoint a database +** METHOD: sqlite3 +** +** ^(The sqlite3_wal_checkpoint_v2(D,X,M,L,C) interface runs a checkpoint +** operation on database X of [database connection] D in mode M. Status +** information is written back into integers pointed to by L and C.)^ +** ^(The M parameter must be a valid [checkpoint mode]:)^ +** +**
+**
SQLITE_CHECKPOINT_PASSIVE
+** ^Checkpoint as many frames as possible without waiting for any database +** readers or writers to finish, then sync the database file if all frames +** in the log were checkpointed. ^The [busy-handler callback] +** is never invoked in the SQLITE_CHECKPOINT_PASSIVE mode. +** ^On the other hand, passive mode might leave the checkpoint unfinished +** if there are concurrent readers or writers. +** +**
SQLITE_CHECKPOINT_FULL
+** ^This mode blocks (it invokes the +** [sqlite3_busy_handler|busy-handler callback]) until there is no +** database writer and all readers are reading from the most recent database +** snapshot. ^It then checkpoints all frames in the log file and syncs the +** database file. ^This mode blocks new database writers while it is pending, +** but new database readers are allowed to continue unimpeded. +** +**
SQLITE_CHECKPOINT_RESTART
+** ^This mode works the same way as SQLITE_CHECKPOINT_FULL with the addition +** that after checkpointing the log file it blocks (calls the +** [busy-handler callback]) +** until all readers are reading from the database file only. ^This ensures +** that the next writer will restart the log file from the beginning. +** ^Like SQLITE_CHECKPOINT_FULL, this mode blocks new +** database writer attempts while it is pending, but does not impede readers. +** +**
SQLITE_CHECKPOINT_TRUNCATE
+** ^This mode works the same way as SQLITE_CHECKPOINT_RESTART with the +** addition that it also truncates the log file to zero bytes just prior +** to a successful return. +**
+** +** ^If pnLog is not NULL, then *pnLog is set to the total number of frames in +** the log file or to -1 if the checkpoint could not run because +** of an error or because the database is not in [WAL mode]. ^If pnCkpt is not +** NULL,then *pnCkpt is set to the total number of checkpointed frames in the +** log file (including any that were already checkpointed before the function +** was called) or to -1 if the checkpoint could not run due to an error or +** because the database is not in WAL mode. ^Note that upon successful +** completion of an SQLITE_CHECKPOINT_TRUNCATE, the log file will have been +** truncated to zero bytes and so both *pnLog and *pnCkpt will be set to zero. +** +** ^All calls obtain an exclusive "checkpoint" lock on the database file. ^If +** any other process is running a checkpoint operation at the same time, the +** lock cannot be obtained and SQLITE_BUSY is returned. ^Even if there is a +** busy-handler configured, it will not be invoked in this case. +** +** ^The SQLITE_CHECKPOINT_FULL, RESTART and TRUNCATE modes also obtain the +** exclusive "writer" lock on the database file. ^If the writer lock cannot be +** obtained immediately, and a busy-handler is configured, it is invoked and +** the writer lock retried until either the busy-handler returns 0 or the lock +** is successfully obtained. ^The busy-handler is also invoked while waiting for +** database readers as described above. ^If the busy-handler returns 0 before +** the writer lock is obtained or while waiting for database readers, the +** checkpoint operation proceeds from that point in the same way as +** SQLITE_CHECKPOINT_PASSIVE - checkpointing as many frames as possible +** without blocking any further. ^SQLITE_BUSY is returned in this case. +** +** ^If parameter zDb is NULL or points to a zero length string, then the +** specified operation is attempted on all WAL databases [attached] to +** [database connection] db. In this case the +** values written to output parameters *pnLog and *pnCkpt are undefined. ^If +** an SQLITE_BUSY error is encountered when processing one or more of the +** attached WAL databases, the operation is still attempted on any remaining +** attached databases and SQLITE_BUSY is returned at the end. ^If any other +** error occurs while processing an attached database, processing is abandoned +** and the error code is returned to the caller immediately. ^If no error +** (SQLITE_BUSY or otherwise) is encountered while processing the attached +** databases, SQLITE_OK is returned. +** +** ^If database zDb is the name of an attached database that is not in WAL +** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. ^If +** zDb is not NULL (or a zero length string) and is not the name of any +** attached database, SQLITE_ERROR is returned to the caller. +** +** ^Unless it returns SQLITE_MISUSE, +** the sqlite3_wal_checkpoint_v2() interface +** sets the error information that is queried by +** [sqlite3_errcode()] and [sqlite3_errmsg()]. +** +** ^The [PRAGMA wal_checkpoint] command can be used to invoke this interface +** from SQL. +*/ +SQLITE_API int sqlite3_wal_checkpoint_v2( + sqlite3 *db, /* Database handle */ + const char *zDb, /* Name of attached database (or NULL) */ + int eMode, /* SQLITE_CHECKPOINT_* value */ + int *pnLog, /* OUT: Size of WAL log in frames */ + int *pnCkpt /* OUT: Total number of frames checkpointed */ +); + +/* +** CAPI3REF: Checkpoint Mode Values +** KEYWORDS: {checkpoint mode} +** +** These constants define all valid values for the "checkpoint mode" passed +** as the third parameter to the [sqlite3_wal_checkpoint_v2()] interface. +** See the [sqlite3_wal_checkpoint_v2()] documentation for details on the +** meaning of each of these checkpoint modes. +*/ +#define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */ +#define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */ +#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for for readers */ +#define SQLITE_CHECKPOINT_TRUNCATE 3 /* Like RESTART but also truncate WAL */ + +/* +** CAPI3REF: Virtual Table Interface Configuration +** +** This function may be called by either the [xConnect] or [xCreate] method +** of a [virtual table] implementation to configure +** various facets of the virtual table interface. +** +** If this interface is invoked outside the context of an xConnect or +** xCreate virtual table method then the behavior is undefined. +** +** At present, there is only one option that may be configured using +** this function. (See [SQLITE_VTAB_CONSTRAINT_SUPPORT].) Further options +** may be added in the future. +*/ +SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); + +/* +** CAPI3REF: Virtual Table Configuration Options +** +** These macros define the various options to the +** [sqlite3_vtab_config()] interface that [virtual table] implementations +** can use to customize and optimize their behavior. +** +**
+**
SQLITE_VTAB_CONSTRAINT_SUPPORT +**
Calls of the form +** [sqlite3_vtab_config](db,SQLITE_VTAB_CONSTRAINT_SUPPORT,X) are supported, +** where X is an integer. If X is zero, then the [virtual table] whose +** [xCreate] or [xConnect] method invoked [sqlite3_vtab_config()] does not +** support constraints. In this configuration (which is the default) if +** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire +** statement is rolled back as if [ON CONFLICT | OR ABORT] had been +** specified as part of the users SQL statement, regardless of the actual +** ON CONFLICT mode specified. +** +** If X is non-zero, then the virtual table implementation guarantees +** that if [xUpdate] returns [SQLITE_CONSTRAINT], it will do so before +** any modifications to internal or persistent data structures have been made. +** If the [ON CONFLICT] mode is ABORT, FAIL, IGNORE or ROLLBACK, SQLite +** is able to roll back a statement or database transaction, and abandon +** or continue processing the current SQL statement as appropriate. +** If the ON CONFLICT mode is REPLACE and the [xUpdate] method returns +** [SQLITE_CONSTRAINT], SQLite handles this as if the ON CONFLICT mode +** had been ABORT. +** +** Virtual table implementations that are required to handle OR REPLACE +** must do so within the [xUpdate] method. If a call to the +** [sqlite3_vtab_on_conflict()] function indicates that the current ON +** CONFLICT policy is REPLACE, the virtual table implementation should +** silently replace the appropriate rows within the xUpdate callback and +** return SQLITE_OK. Or, if this is not possible, it may return +** SQLITE_CONSTRAINT, in which case SQLite falls back to OR ABORT +** constraint handling. +**
+*/ +#define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 + +/* +** CAPI3REF: Determine The Virtual Table Conflict Policy +** +** This function may only be called from within a call to the [xUpdate] method +** of a [virtual table] implementation for an INSERT or UPDATE operation. ^The +** value returned is one of [SQLITE_ROLLBACK], [SQLITE_IGNORE], [SQLITE_FAIL], +** [SQLITE_ABORT], or [SQLITE_REPLACE], according to the [ON CONFLICT] mode +** of the SQL statement that triggered the call to the [xUpdate] method of the +** [virtual table]. +*/ +SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *); + +/* +** CAPI3REF: Conflict resolution modes +** KEYWORDS: {conflict resolution mode} +** +** These constants are returned by [sqlite3_vtab_on_conflict()] to +** inform a [virtual table] implementation what the [ON CONFLICT] mode +** is for the SQL statement being evaluated. +** +** Note that the [SQLITE_IGNORE] constant is also used as a potential +** return value from the [sqlite3_set_authorizer()] callback and that +** [SQLITE_ABORT] is also a [result code]. +*/ +#define SQLITE_ROLLBACK 1 +/* #define SQLITE_IGNORE 2 // Also used by sqlite3_authorizer() callback */ +#define SQLITE_FAIL 3 +/* #define SQLITE_ABORT 4 // Also an error code */ +#define SQLITE_REPLACE 5 + +/* +** CAPI3REF: Prepared Statement Scan Status Opcodes +** KEYWORDS: {scanstatus options} +** +** The following constants can be used for the T parameter to the +** [sqlite3_stmt_scanstatus(S,X,T,V)] interface. Each constant designates a +** different metric for sqlite3_stmt_scanstatus() to return. +** +** When the value returned to V is a string, space to hold that string is +** managed by the prepared statement S and will be automatically freed when +** S is finalized. +** +**
+** [[SQLITE_SCANSTAT_NLOOP]]
SQLITE_SCANSTAT_NLOOP
+**
^The [sqlite3_int64] variable pointed to by the T parameter will be +** set to the total number of times that the X-th loop has run.
+** +** [[SQLITE_SCANSTAT_NVISIT]]
SQLITE_SCANSTAT_NVISIT
+**
^The [sqlite3_int64] variable pointed to by the T parameter will be set +** to the total number of rows examined by all iterations of the X-th loop.
+** +** [[SQLITE_SCANSTAT_EST]]
SQLITE_SCANSTAT_EST
+**
^The "double" variable pointed to by the T parameter will be set to the +** query planner's estimate for the average number of rows output from each +** iteration of the X-th loop. If the query planner's estimates was accurate, +** then this value will approximate the quotient NVISIT/NLOOP and the +** product of this value for all prior loops with the same SELECTID will +** be the NLOOP value for the current loop. +** +** [[SQLITE_SCANSTAT_NAME]]
SQLITE_SCANSTAT_NAME
+**
^The "const char *" variable pointed to by the T parameter will be set +** to a zero-terminated UTF-8 string containing the name of the index or table +** used for the X-th loop. +** +** [[SQLITE_SCANSTAT_EXPLAIN]]
SQLITE_SCANSTAT_EXPLAIN
+**
^The "const char *" variable pointed to by the T parameter will be set +** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN] +** description for the X-th loop. +** +** [[SQLITE_SCANSTAT_SELECTID]]
SQLITE_SCANSTAT_SELECT
+**
^The "int" variable pointed to by the T parameter will be set to the +** "select-id" for the X-th loop. The select-id identifies which query or +** subquery the loop is part of. The main query has a select-id of zero. +** The select-id is the same value as is output in the first column +** of an [EXPLAIN QUERY PLAN] query. +**
+*/ +#define SQLITE_SCANSTAT_NLOOP 0 +#define SQLITE_SCANSTAT_NVISIT 1 +#define SQLITE_SCANSTAT_EST 2 +#define SQLITE_SCANSTAT_NAME 3 +#define SQLITE_SCANSTAT_EXPLAIN 4 +#define SQLITE_SCANSTAT_SELECTID 5 + +/* +** CAPI3REF: Prepared Statement Scan Status +** METHOD: sqlite3_stmt +** +** This interface returns information about the predicted and measured +** performance for pStmt. Advanced applications can use this +** interface to compare the predicted and the measured performance and +** issue warnings and/or rerun [ANALYZE] if discrepancies are found. +** +** Since this interface is expected to be rarely used, it is only +** available if SQLite is compiled using the [SQLITE_ENABLE_STMT_SCANSTATUS] +** compile-time option. +** +** The "iScanStatusOp" parameter determines which status information to return. +** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior +** of this interface is undefined. +** ^The requested measurement is written into a variable pointed to by +** the "pOut" parameter. +** Parameter "idx" identifies the specific loop to retrieve statistics for. +** Loops are numbered starting from zero. ^If idx is out of range - less than +** zero or greater than or equal to the total number of loops used to implement +** the statement - a non-zero value is returned and the variable that pOut +** points to is unchanged. +** +** ^Statistics might not be available for all loops in all statements. ^In cases +** where there exist loops with no available statistics, this function behaves +** as if the loop did not exist - it returns non-zero and leave the variable +** that pOut points to unchanged. +** +** See also: [sqlite3_stmt_scanstatus_reset()] +*/ +SQLITE_API int sqlite3_stmt_scanstatus( + sqlite3_stmt *pStmt, /* Prepared statement for which info desired */ + int idx, /* Index of loop to report on */ + int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ + void *pOut /* Result written here */ +); + +/* +** CAPI3REF: Zero Scan-Status Counters +** METHOD: sqlite3_stmt +** +** ^Zero all [sqlite3_stmt_scanstatus()] related event counters. +** +** This API is only available if the library is built with pre-processor +** symbol [SQLITE_ENABLE_STMT_SCANSTATUS] defined. +*/ +SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt*); + +/* +** CAPI3REF: Flush caches to disk mid-transaction +** +** ^If a write-transaction is open on [database connection] D when the +** [sqlite3_db_cacheflush(D)] interface invoked, any dirty +** pages in the pager-cache that are not currently in use are written out +** to disk. A dirty page may be in use if a database cursor created by an +** active SQL statement is reading from it, or if it is page 1 of a database +** file (page 1 is always "in use"). ^The [sqlite3_db_cacheflush(D)] +** interface flushes caches for all schemas - "main", "temp", and +** any [attached] databases. +** +** ^If this function needs to obtain extra database locks before dirty pages +** can be flushed to disk, it does so. ^If those locks cannot be obtained +** immediately and there is a busy-handler callback configured, it is invoked +** in the usual manner. ^If the required lock still cannot be obtained, then +** the database is skipped and an attempt made to flush any dirty pages +** belonging to the next (if any) database. ^If any databases are skipped +** because locks cannot be obtained, but no other error occurs, this +** function returns SQLITE_BUSY. +** +** ^If any other error occurs while flushing dirty pages to disk (for +** example an IO error or out-of-memory condition), then processing is +** abandoned and an SQLite [error code] is returned to the caller immediately. +** +** ^Otherwise, if no error occurs, [sqlite3_db_cacheflush()] returns SQLITE_OK. +** +** ^This function does not set the database handle error code or message +** returned by the [sqlite3_errcode()] and [sqlite3_errmsg()] functions. +*/ +SQLITE_API int sqlite3_db_cacheflush(sqlite3*); + +/* +** CAPI3REF: The pre-update hook. +** +** ^These interfaces are only available if SQLite is compiled using the +** [SQLITE_ENABLE_PREUPDATE_HOOK] compile-time option. +** +** ^The [sqlite3_preupdate_hook()] interface registers a callback function +** that is invoked prior to each [INSERT], [UPDATE], and [DELETE] operation +** on a database table. +** ^At most one preupdate hook may be registered at a time on a single +** [database connection]; each call to [sqlite3_preupdate_hook()] overrides +** the previous setting. +** ^The preupdate hook is disabled by invoking [sqlite3_preupdate_hook()] +** with a NULL pointer as the second parameter. +** ^The third parameter to [sqlite3_preupdate_hook()] is passed through as +** the first parameter to callbacks. +** +** ^The preupdate hook only fires for changes to real database tables; the +** preupdate hook is not invoked for changes to [virtual tables] or to +** system tables like sqlite_master or sqlite_stat1. +** +** ^The second parameter to the preupdate callback is a pointer to +** the [database connection] that registered the preupdate hook. +** ^The third parameter to the preupdate callback is one of the constants +** [SQLITE_INSERT], [SQLITE_DELETE], or [SQLITE_UPDATE] to identify the +** kind of update operation that is about to occur. +** ^(The fourth parameter to the preupdate callback is the name of the +** database within the database connection that is being modified. This +** will be "main" for the main database or "temp" for TEMP tables or +** the name given after the AS keyword in the [ATTACH] statement for attached +** databases.)^ +** ^The fifth parameter to the preupdate callback is the name of the +** table that is being modified. +** +** For an UPDATE or DELETE operation on a [rowid table], the sixth +** parameter passed to the preupdate callback is the initial [rowid] of the +** row being modified or deleted. For an INSERT operation on a rowid table, +** or any operation on a WITHOUT ROWID table, the value of the sixth +** parameter is undefined. For an INSERT or UPDATE on a rowid table the +** seventh parameter is the final rowid value of the row being inserted +** or updated. The value of the seventh parameter passed to the callback +** function is not defined for operations on WITHOUT ROWID tables, or for +** INSERT operations on rowid tables. +** +** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], +** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces +** provide additional information about a preupdate event. These routines +** may only be called from within a preupdate callback. Invoking any of +** these routines from outside of a preupdate callback or with a +** [database connection] pointer that is different from the one supplied +** to the preupdate callback results in undefined and probably undesirable +** behavior. +** +** ^The [sqlite3_preupdate_count(D)] interface returns the number of columns +** in the row that is being inserted, updated, or deleted. +** +** ^The [sqlite3_preupdate_old(D,N,P)] interface writes into P a pointer to +** a [protected sqlite3_value] that contains the value of the Nth column of +** the table row before it is updated. The N parameter must be between 0 +** and one less than the number of columns or the behavior will be +** undefined. This must only be used within SQLITE_UPDATE and SQLITE_DELETE +** preupdate callbacks; if it is used by an SQLITE_INSERT callback then the +** behavior is undefined. The [sqlite3_value] that P points to +** will be destroyed when the preupdate callback returns. +** +** ^The [sqlite3_preupdate_new(D,N,P)] interface writes into P a pointer to +** a [protected sqlite3_value] that contains the value of the Nth column of +** the table row after it is updated. The N parameter must be between 0 +** and one less than the number of columns or the behavior will be +** undefined. This must only be used within SQLITE_INSERT and SQLITE_UPDATE +** preupdate callbacks; if it is used by an SQLITE_DELETE callback then the +** behavior is undefined. The [sqlite3_value] that P points to +** will be destroyed when the preupdate callback returns. +** +** ^The [sqlite3_preupdate_depth(D)] interface returns 0 if the preupdate +** callback was invoked as a result of a direct insert, update, or delete +** operation; or 1 for inserts, updates, or deletes invoked by top-level +** triggers; or 2 for changes resulting from triggers called by top-level +** triggers; and so forth. +** +** See also: [sqlite3_update_hook()] +*/ +#if defined(SQLITE_ENABLE_PREUPDATE_HOOK) +SQLITE_API void *sqlite3_preupdate_hook( + sqlite3 *db, + void(*xPreUpdate)( + void *pCtx, /* Copy of third arg to preupdate_hook() */ + sqlite3 *db, /* Database handle */ + int op, /* SQLITE_UPDATE, DELETE or INSERT */ + char const *zDb, /* Database name */ + char const *zName, /* Table name */ + sqlite3_int64 iKey1, /* Rowid of row about to be deleted/updated */ + sqlite3_int64 iKey2 /* New rowid value (for a rowid UPDATE) */ + ), + void* +); +SQLITE_API int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **); +SQLITE_API int sqlite3_preupdate_count(sqlite3 *); +SQLITE_API int sqlite3_preupdate_depth(sqlite3 *); +SQLITE_API int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **); +#endif + +/* +** CAPI3REF: Low-level system error code +** +** ^Attempt to return the underlying operating system error code or error +** number that caused the most recent I/O error or failure to open a file. +** The return value is OS-dependent. For example, on unix systems, after +** [sqlite3_open_v2()] returns [SQLITE_CANTOPEN], this interface could be +** called to get back the underlying "errno" that caused the problem, such +** as ENOSPC, EAUTH, EISDIR, and so forth. +*/ +SQLITE_API int sqlite3_system_errno(sqlite3*); + +/* +** CAPI3REF: Database Snapshot +** KEYWORDS: {snapshot} {sqlite3_snapshot} +** EXPERIMENTAL +** +** An instance of the snapshot object records the state of a [WAL mode] +** database for some specific point in history. +** +** In [WAL mode], multiple [database connections] that are open on the +** same database file can each be reading a different historical version +** of the database file. When a [database connection] begins a read +** transaction, that connection sees an unchanging copy of the database +** as it existed for the point in time when the transaction first started. +** Subsequent changes to the database from other connections are not seen +** by the reader until a new read transaction is started. +** +** The sqlite3_snapshot object records state information about an historical +** version of the database file so that it is possible to later open a new read +** transaction that sees that historical version of the database rather than +** the most recent version. +** +** The constructor for this object is [sqlite3_snapshot_get()]. The +** [sqlite3_snapshot_open()] method causes a fresh read transaction to refer +** to an historical snapshot (if possible). The destructor for +** sqlite3_snapshot objects is [sqlite3_snapshot_free()]. +*/ +typedef struct sqlite3_snapshot { + unsigned char hidden[48]; +} sqlite3_snapshot; + +/* +** CAPI3REF: Record A Database Snapshot +** EXPERIMENTAL +** +** ^The [sqlite3_snapshot_get(D,S,P)] interface attempts to make a +** new [sqlite3_snapshot] object that records the current state of +** schema S in database connection D. ^On success, the +** [sqlite3_snapshot_get(D,S,P)] interface writes a pointer to the newly +** created [sqlite3_snapshot] object into *P and returns SQLITE_OK. +** If there is not already a read-transaction open on schema S when +** this function is called, one is opened automatically. +** +** The following must be true for this function to succeed. If any of +** the following statements are false when sqlite3_snapshot_get() is +** called, SQLITE_ERROR is returned. The final value of *P is undefined +** in this case. +** +**
    +**
  • The database handle must be in [autocommit mode]. +** +**
  • Schema S of [database connection] D must be a [WAL mode] database. +** +**
  • There must not be a write transaction open on schema S of database +** connection D. +** +**
  • One or more transactions must have been written to the current wal +** file since it was created on disk (by any connection). This means +** that a snapshot cannot be taken on a wal mode database with no wal +** file immediately after it is first opened. At least one transaction +** must be written to it first. +**
+** +** This function may also return SQLITE_NOMEM. If it is called with the +** database handle in autocommit mode but fails for some other reason, +** whether or not a read transaction is opened on schema S is undefined. +** +** The [sqlite3_snapshot] object returned from a successful call to +** [sqlite3_snapshot_get()] must be freed using [sqlite3_snapshot_free()] +** to avoid a memory leak. +** +** The [sqlite3_snapshot_get()] interface is only available when the +** SQLITE_ENABLE_SNAPSHOT compile-time option is used. +*/ +SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get( + sqlite3 *db, + const char *zSchema, + sqlite3_snapshot **ppSnapshot +); + +/* +** CAPI3REF: Start a read transaction on an historical snapshot +** EXPERIMENTAL +** +** ^The [sqlite3_snapshot_open(D,S,P)] interface starts a +** read transaction for schema S of +** [database connection] D such that the read transaction +** refers to historical [snapshot] P, rather than the most +** recent change to the database. +** ^The [sqlite3_snapshot_open()] interface returns SQLITE_OK on success +** or an appropriate [error code] if it fails. +** +** ^In order to succeed, a call to [sqlite3_snapshot_open(D,S,P)] must be +** the first operation following the [BEGIN] that takes the schema S +** out of [autocommit mode]. +** ^In other words, schema S must not currently be in +** a transaction for [sqlite3_snapshot_open(D,S,P)] to work, but the +** database connection D must be out of [autocommit mode]. +** ^A [snapshot] will fail to open if it has been overwritten by a +** [checkpoint]. +** ^(A call to [sqlite3_snapshot_open(D,S,P)] will fail if the +** database connection D does not know that the database file for +** schema S is in [WAL mode]. A database connection might not know +** that the database file is in [WAL mode] if there has been no prior +** I/O on that database connection, or if the database entered [WAL mode] +** after the most recent I/O on the database connection.)^ +** (Hint: Run "[PRAGMA application_id]" against a newly opened +** database connection in order to make it ready to use snapshots.) +** +** The [sqlite3_snapshot_open()] interface is only available when the +** SQLITE_ENABLE_SNAPSHOT compile-time option is used. +*/ +SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open( + sqlite3 *db, + const char *zSchema, + sqlite3_snapshot *pSnapshot +); + +/* +** CAPI3REF: Destroy a snapshot +** EXPERIMENTAL +** +** ^The [sqlite3_snapshot_free(P)] interface destroys [sqlite3_snapshot] P. +** The application must eventually free every [sqlite3_snapshot] object +** using this routine to avoid a memory leak. +** +** The [sqlite3_snapshot_free()] interface is only available when the +** SQLITE_ENABLE_SNAPSHOT compile-time option is used. +*/ +SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*); + +/* +** CAPI3REF: Compare the ages of two snapshot handles. +** EXPERIMENTAL +** +** The sqlite3_snapshot_cmp(P1, P2) interface is used to compare the ages +** of two valid snapshot handles. +** +** If the two snapshot handles are not associated with the same database +** file, the result of the comparison is undefined. +** +** Additionally, the result of the comparison is only valid if both of the +** snapshot handles were obtained by calling sqlite3_snapshot_get() since the +** last time the wal file was deleted. The wal file is deleted when the +** database is changed back to rollback mode or when the number of database +** clients drops to zero. If either snapshot handle was obtained before the +** wal file was last deleted, the value returned by this function +** is undefined. +** +** Otherwise, this API returns a negative value if P1 refers to an older +** snapshot than P2, zero if the two handles refer to the same database +** snapshot, and a positive value if P1 is a newer snapshot than P2. +*/ +SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp( + sqlite3_snapshot *p1, + sqlite3_snapshot *p2 +); + +/* +** CAPI3REF: Recover snapshots from a wal file +** EXPERIMENTAL +** +** If all connections disconnect from a database file but do not perform +** a checkpoint, the existing wal file is opened along with the database +** file the next time the database is opened. At this point it is only +** possible to successfully call sqlite3_snapshot_open() to open the most +** recent snapshot of the database (the one at the head of the wal file), +** even though the wal file may contain other valid snapshots for which +** clients have sqlite3_snapshot handles. +** +** This function attempts to scan the wal file associated with database zDb +** of database handle db and make all valid snapshots available to +** sqlite3_snapshot_open(). It is an error if there is already a read +** transaction open on the database, or if the database is not a wal mode +** database. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb); + +/* +** Undo the hack that converts floating point types to integer for +** builds on processors without floating point support. +*/ +#ifdef SQLITE_OMIT_FLOATING_POINT +# undef double +#endif + +#if 0 +} /* End of the 'extern "C"' block */ +#endif +#endif /* SQLITE3_H */ + +/******** Begin file sqlite3rtree.h *********/ +/* +** 2010 August 30 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +*/ + +#ifndef _SQLITE3RTREE_H_ +#define _SQLITE3RTREE_H_ + + +#if 0 +extern "C" { +#endif + +typedef struct sqlite3_rtree_geometry sqlite3_rtree_geometry; +typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info; + +/* The double-precision datatype used by RTree depends on the +** SQLITE_RTREE_INT_ONLY compile-time option. +*/ +#ifdef SQLITE_RTREE_INT_ONLY + typedef sqlite3_int64 sqlite3_rtree_dbl; +#else + typedef double sqlite3_rtree_dbl; +#endif + +/* +** Register a geometry callback named zGeom that can be used as part of an +** R-Tree geometry query as follows: +** +** SELECT ... FROM WHERE MATCH $zGeom(... params ...) +*/ +SQLITE_API int sqlite3_rtree_geometry_callback( + sqlite3 *db, + const char *zGeom, + int (*xGeom)(sqlite3_rtree_geometry*, int, sqlite3_rtree_dbl*,int*), + void *pContext +); + + +/* +** A pointer to a structure of the following type is passed as the first +** argument to callbacks registered using rtree_geometry_callback(). +*/ +struct sqlite3_rtree_geometry { + void *pContext; /* Copy of pContext passed to s_r_g_c() */ + int nParam; /* Size of array aParam[] */ + sqlite3_rtree_dbl *aParam; /* Parameters passed to SQL geom function */ + void *pUser; /* Callback implementation user data */ + void (*xDelUser)(void *); /* Called by SQLite to clean up pUser */ +}; + +/* +** Register a 2nd-generation geometry callback named zScore that can be +** used as part of an R-Tree geometry query as follows: +** +** SELECT ... FROM WHERE MATCH $zQueryFunc(... params ...) +*/ +SQLITE_API int sqlite3_rtree_query_callback( + sqlite3 *db, + const char *zQueryFunc, + int (*xQueryFunc)(sqlite3_rtree_query_info*), + void *pContext, + void (*xDestructor)(void*) +); + + +/* +** A pointer to a structure of the following type is passed as the +** argument to scored geometry callback registered using +** sqlite3_rtree_query_callback(). +** +** Note that the first 5 fields of this structure are identical to +** sqlite3_rtree_geometry. This structure is a subclass of +** sqlite3_rtree_geometry. +*/ +struct sqlite3_rtree_query_info { + void *pContext; /* pContext from when function registered */ + int nParam; /* Number of function parameters */ + sqlite3_rtree_dbl *aParam; /* value of function parameters */ + void *pUser; /* callback can use this, if desired */ + void (*xDelUser)(void*); /* function to free pUser */ + sqlite3_rtree_dbl *aCoord; /* Coordinates of node or entry to check */ + unsigned int *anQueue; /* Number of pending entries in the queue */ + int nCoord; /* Number of coordinates */ + int iLevel; /* Level of current node or entry */ + int mxLevel; /* The largest iLevel value in the tree */ + sqlite3_int64 iRowid; /* Rowid for current entry */ + sqlite3_rtree_dbl rParentScore; /* Score of parent node */ + int eParentWithin; /* Visibility of parent node */ + int eWithin; /* OUT: Visiblity */ + sqlite3_rtree_dbl rScore; /* OUT: Write the score here */ + /* The following fields are only available in 3.8.11 and later */ + sqlite3_value **apSqlParam; /* Original SQL values of parameters */ +}; + +/* +** Allowed values for sqlite3_rtree_query.eWithin and .eParentWithin. +*/ +#define NOT_WITHIN 0 /* Object completely outside of query region */ +#define PARTLY_WITHIN 1 /* Object partially overlaps query region */ +#define FULLY_WITHIN 2 /* Object fully contained within query region */ + + +#if 0 +} /* end of the 'extern "C"' block */ +#endif + +#endif /* ifndef _SQLITE3RTREE_H_ */ + +/******** End of sqlite3rtree.h *********/ +/******** Begin file sqlite3session.h *********/ + +#if !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION) +#define __SQLITESESSION_H_ 1 + +/* +** Make sure we can call this stuff from C++. +*/ +#if 0 +extern "C" { +#endif + + +/* +** CAPI3REF: Session Object Handle +*/ +typedef struct sqlite3_session sqlite3_session; + +/* +** CAPI3REF: Changeset Iterator Handle +*/ +typedef struct sqlite3_changeset_iter sqlite3_changeset_iter; + +/* +** CAPI3REF: Create A New Session Object +** +** Create a new session object attached to database handle db. If successful, +** a pointer to the new object is written to *ppSession and SQLITE_OK is +** returned. If an error occurs, *ppSession is set to NULL and an SQLite +** error code (e.g. SQLITE_NOMEM) is returned. +** +** It is possible to create multiple session objects attached to a single +** database handle. +** +** Session objects created using this function should be deleted using the +** [sqlite3session_delete()] function before the database handle that they +** are attached to is itself closed. If the database handle is closed before +** the session object is deleted, then the results of calling any session +** module function, including [sqlite3session_delete()] on the session object +** are undefined. +** +** Because the session module uses the [sqlite3_preupdate_hook()] API, it +** is not possible for an application to register a pre-update hook on a +** database handle that has one or more session objects attached. Nor is +** it possible to create a session object attached to a database handle for +** which a pre-update hook is already defined. The results of attempting +** either of these things are undefined. +** +** The session object will be used to create changesets for tables in +** database zDb, where zDb is either "main", or "temp", or the name of an +** attached database. It is not an error if database zDb is not attached +** to the database when the session object is created. +*/ +SQLITE_API int sqlite3session_create( + sqlite3 *db, /* Database handle */ + const char *zDb, /* Name of db (e.g. "main") */ + sqlite3_session **ppSession /* OUT: New session object */ +); + +/* +** CAPI3REF: Delete A Session Object +** +** Delete a session object previously allocated using +** [sqlite3session_create()]. Once a session object has been deleted, the +** results of attempting to use pSession with any other session module +** function are undefined. +** +** Session objects must be deleted before the database handle to which they +** are attached is closed. Refer to the documentation for +** [sqlite3session_create()] for details. +*/ +SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); + + +/* +** CAPI3REF: Enable Or Disable A Session Object +** +** Enable or disable the recording of changes by a session object. When +** enabled, a session object records changes made to the database. When +** disabled - it does not. A newly created session object is enabled. +** Refer to the documentation for [sqlite3session_changeset()] for further +** details regarding how enabling and disabling a session object affects +** the eventual changesets. +** +** Passing zero to this function disables the session. Passing a value +** greater than zero enables it. Passing a value less than zero is a +** no-op, and may be used to query the current state of the session. +** +** The return value indicates the final state of the session object: 0 if +** the session is disabled, or 1 if it is enabled. +*/ +SQLITE_API int sqlite3session_enable(sqlite3_session *pSession, int bEnable); + +/* +** CAPI3REF: Set Or Clear the Indirect Change Flag +** +** Each change recorded by a session object is marked as either direct or +** indirect. A change is marked as indirect if either: +** +**
    +**
  • The session object "indirect" flag is set when the change is +** made, or +**
  • The change is made by an SQL trigger or foreign key action +** instead of directly as a result of a users SQL statement. +**
+** +** If a single row is affected by more than one operation within a session, +** then the change is considered indirect if all operations meet the criteria +** for an indirect change above, or direct otherwise. +** +** This function is used to set, clear or query the session object indirect +** flag. If the second argument passed to this function is zero, then the +** indirect flag is cleared. If it is greater than zero, the indirect flag +** is set. Passing a value less than zero does not modify the current value +** of the indirect flag, and may be used to query the current state of the +** indirect flag for the specified session object. +** +** The return value indicates the final state of the indirect flag: 0 if +** it is clear, or 1 if it is set. +*/ +SQLITE_API int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect); + +/* +** CAPI3REF: Attach A Table To A Session Object +** +** If argument zTab is not NULL, then it is the name of a table to attach +** to the session object passed as the first argument. All subsequent changes +** made to the table while the session object is enabled will be recorded. See +** documentation for [sqlite3session_changeset()] for further details. +** +** Or, if argument zTab is NULL, then changes are recorded for all tables +** in the database. If additional tables are added to the database (by +** executing "CREATE TABLE" statements) after this call is made, changes for +** the new tables are also recorded. +** +** Changes can only be recorded for tables that have a PRIMARY KEY explicitly +** defined as part of their CREATE TABLE statement. It does not matter if the +** PRIMARY KEY is an "INTEGER PRIMARY KEY" (rowid alias) or not. The PRIMARY +** KEY may consist of a single column, or may be a composite key. +** +** It is not an error if the named table does not exist in the database. Nor +** is it an error if the named table does not have a PRIMARY KEY. However, +** no changes will be recorded in either of these scenarios. +** +** Changes are not recorded for individual rows that have NULL values stored +** in one or more of their PRIMARY KEY columns. +** +** SQLITE_OK is returned if the call completes without error. Or, if an error +** occurs, an SQLite error code (e.g. SQLITE_NOMEM) is returned. +*/ +SQLITE_API int sqlite3session_attach( + sqlite3_session *pSession, /* Session object */ + const char *zTab /* Table name */ +); + +/* +** CAPI3REF: Set a table filter on a Session Object. +** +** The second argument (xFilter) is the "filter callback". For changes to rows +** in tables that are not attached to the Session object, the filter is called +** to determine whether changes to the table's rows should be tracked or not. +** If xFilter returns 0, changes is not tracked. Note that once a table is +** attached, xFilter will not be called again. +*/ +SQLITE_API void sqlite3session_table_filter( + sqlite3_session *pSession, /* Session object */ + int(*xFilter)( + void *pCtx, /* Copy of third arg to _filter_table() */ + const char *zTab /* Table name */ + ), + void *pCtx /* First argument passed to xFilter */ +); + +/* +** CAPI3REF: Generate A Changeset From A Session Object +** +** Obtain a changeset containing changes to the tables attached to the +** session object passed as the first argument. If successful, +** set *ppChangeset to point to a buffer containing the changeset +** and *pnChangeset to the size of the changeset in bytes before returning +** SQLITE_OK. If an error occurs, set both *ppChangeset and *pnChangeset to +** zero and return an SQLite error code. +** +** A changeset consists of zero or more INSERT, UPDATE and/or DELETE changes, +** each representing a change to a single row of an attached table. An INSERT +** change contains the values of each field of a new database row. A DELETE +** contains the original values of each field of a deleted database row. An +** UPDATE change contains the original values of each field of an updated +** database row along with the updated values for each updated non-primary-key +** column. It is not possible for an UPDATE change to represent a change that +** modifies the values of primary key columns. If such a change is made, it +** is represented in a changeset as a DELETE followed by an INSERT. +** +** Changes are not recorded for rows that have NULL values stored in one or +** more of their PRIMARY KEY columns. If such a row is inserted or deleted, +** no corresponding change is present in the changesets returned by this +** function. If an existing row with one or more NULL values stored in +** PRIMARY KEY columns is updated so that all PRIMARY KEY columns are non-NULL, +** only an INSERT is appears in the changeset. Similarly, if an existing row +** with non-NULL PRIMARY KEY values is updated so that one or more of its +** PRIMARY KEY columns are set to NULL, the resulting changeset contains a +** DELETE change only. +** +** The contents of a changeset may be traversed using an iterator created +** using the [sqlite3changeset_start()] API. A changeset may be applied to +** a database with a compatible schema using the [sqlite3changeset_apply()] +** API. +** +** Within a changeset generated by this function, all changes related to a +** single table are grouped together. In other words, when iterating through +** a changeset or when applying a changeset to a database, all changes related +** to a single table are processed before moving on to the next table. Tables +** are sorted in the same order in which they were attached (or auto-attached) +** to the sqlite3_session object. The order in which the changes related to +** a single table are stored is undefined. +** +** Following a successful call to this function, it is the responsibility of +** the caller to eventually free the buffer that *ppChangeset points to using +** [sqlite3_free()]. +** +**

Changeset Generation

+** +** Once a table has been attached to a session object, the session object +** records the primary key values of all new rows inserted into the table. +** It also records the original primary key and other column values of any +** deleted or updated rows. For each unique primary key value, data is only +** recorded once - the first time a row with said primary key is inserted, +** updated or deleted in the lifetime of the session. +** +** There is one exception to the previous paragraph: when a row is inserted, +** updated or deleted, if one or more of its primary key columns contain a +** NULL value, no record of the change is made. +** +** The session object therefore accumulates two types of records - those +** that consist of primary key values only (created when the user inserts +** a new record) and those that consist of the primary key values and the +** original values of other table columns (created when the users deletes +** or updates a record). +** +** When this function is called, the requested changeset is created using +** both the accumulated records and the current contents of the database +** file. Specifically: +** +**
    +**
  • For each record generated by an insert, the database is queried +** for a row with a matching primary key. If one is found, an INSERT +** change is added to the changeset. If no such row is found, no change +** is added to the changeset. +** +**
  • For each record generated by an update or delete, the database is +** queried for a row with a matching primary key. If such a row is +** found and one or more of the non-primary key fields have been +** modified from their original values, an UPDATE change is added to +** the changeset. Or, if no such row is found in the table, a DELETE +** change is added to the changeset. If there is a row with a matching +** primary key in the database, but all fields contain their original +** values, no change is added to the changeset. +**
+** +** This means, amongst other things, that if a row is inserted and then later +** deleted while a session object is active, neither the insert nor the delete +** will be present in the changeset. Or if a row is deleted and then later a +** row with the same primary key values inserted while a session object is +** active, the resulting changeset will contain an UPDATE change instead of +** a DELETE and an INSERT. +** +** When a session object is disabled (see the [sqlite3session_enable()] API), +** it does not accumulate records when rows are inserted, updated or deleted. +** This may appear to have some counter-intuitive effects if a single row +** is written to more than once during a session. For example, if a row +** is inserted while a session object is enabled, then later deleted while +** the same session object is disabled, no INSERT record will appear in the +** changeset, even though the delete took place while the session was disabled. +** Or, if one field of a row is updated while a session is disabled, and +** another field of the same row is updated while the session is enabled, the +** resulting changeset will contain an UPDATE change that updates both fields. +*/ +SQLITE_API int sqlite3session_changeset( + sqlite3_session *pSession, /* Session object */ + int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ + void **ppChangeset /* OUT: Buffer containing changeset */ +); + +/* +** CAPI3REF: Load The Difference Between Tables Into A Session +** +** If it is not already attached to the session object passed as the first +** argument, this function attaches table zTbl in the same manner as the +** [sqlite3session_attach()] function. If zTbl does not exist, or if it +** does not have a primary key, this function is a no-op (but does not return +** an error). +** +** Argument zFromDb must be the name of a database ("main", "temp" etc.) +** attached to the same database handle as the session object that contains +** a table compatible with the table attached to the session by this function. +** A table is considered compatible if it: +** +**
    +**
  • Has the same name, +**
  • Has the same set of columns declared in the same order, and +**
  • Has the same PRIMARY KEY definition. +**
+** +** If the tables are not compatible, SQLITE_SCHEMA is returned. If the tables +** are compatible but do not have any PRIMARY KEY columns, it is not an error +** but no changes are added to the session object. As with other session +** APIs, tables without PRIMARY KEYs are simply ignored. +** +** This function adds a set of changes to the session object that could be +** used to update the table in database zFrom (call this the "from-table") +** so that its content is the same as the table attached to the session +** object (call this the "to-table"). Specifically: +** +**
    +**
  • For each row (primary key) that exists in the to-table but not in +** the from-table, an INSERT record is added to the session object. +** +**
  • For each row (primary key) that exists in the to-table but not in +** the from-table, a DELETE record is added to the session object. +** +**
  • For each row (primary key) that exists in both tables, but features +** different non-PK values in each, an UPDATE record is added to the +** session. +**
+** +** To clarify, if this function is called and then a changeset constructed +** using [sqlite3session_changeset()], then after applying that changeset to +** database zFrom the contents of the two compatible tables would be +** identical. +** +** It an error if database zFrom does not exist or does not contain the +** required compatible table. +** +** If the operation successful, SQLITE_OK is returned. Otherwise, an SQLite +** error code. In this case, if argument pzErrMsg is not NULL, *pzErrMsg +** may be set to point to a buffer containing an English language error +** message. It is the responsibility of the caller to free this buffer using +** sqlite3_free(). +*/ +SQLITE_API int sqlite3session_diff( + sqlite3_session *pSession, + const char *zFromDb, + const char *zTbl, + char **pzErrMsg +); + + +/* +** CAPI3REF: Generate A Patchset From A Session Object +** +** The differences between a patchset and a changeset are that: +** +**
    +**
  • DELETE records consist of the primary key fields only. The +** original values of other fields are omitted. +**
  • The original values of any modified fields are omitted from +** UPDATE records. +**
+** +** A patchset blob may be used with up to date versions of all +** sqlite3changeset_xxx API functions except for sqlite3changeset_invert(), +** which returns SQLITE_CORRUPT if it is passed a patchset. Similarly, +** attempting to use a patchset blob with old versions of the +** sqlite3changeset_xxx APIs also provokes an SQLITE_CORRUPT error. +** +** Because the non-primary key "old.*" fields are omitted, no +** SQLITE_CHANGESET_DATA conflicts can be detected or reported if a patchset +** is passed to the sqlite3changeset_apply() API. Other conflict types work +** in the same way as for changesets. +** +** Changes within a patchset are ordered in the same way as for changesets +** generated by the sqlite3session_changeset() function (i.e. all changes for +** a single table are grouped together, tables appear in the order in which +** they were attached to the session object). +*/ +SQLITE_API int sqlite3session_patchset( + sqlite3_session *pSession, /* Session object */ + int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */ + void **ppPatchset /* OUT: Buffer containing changeset */ +); + +/* +** CAPI3REF: Test if a changeset has recorded any changes. +** +** Return non-zero if no changes to attached tables have been recorded by +** the session object passed as the first argument. Otherwise, if one or +** more changes have been recorded, return zero. +** +** Even if this function returns zero, it is possible that calling +** [sqlite3session_changeset()] on the session handle may still return a +** changeset that contains no changes. This can happen when a row in +** an attached table is modified and then later on the original values +** are restored. However, if this function returns non-zero, then it is +** guaranteed that a call to sqlite3session_changeset() will return a +** changeset containing zero changes. +*/ +SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession); + +/* +** CAPI3REF: Create An Iterator To Traverse A Changeset +** +** Create an iterator used to iterate through the contents of a changeset. +** If successful, *pp is set to point to the iterator handle and SQLITE_OK +** is returned. Otherwise, if an error occurs, *pp is set to zero and an +** SQLite error code is returned. +** +** The following functions can be used to advance and query a changeset +** iterator created by this function: +** +**
    +**
  • [sqlite3changeset_next()] +**
  • [sqlite3changeset_op()] +**
  • [sqlite3changeset_new()] +**
  • [sqlite3changeset_old()] +**
+** +** It is the responsibility of the caller to eventually destroy the iterator +** by passing it to [sqlite3changeset_finalize()]. The buffer containing the +** changeset (pChangeset) must remain valid until after the iterator is +** destroyed. +** +** Assuming the changeset blob was created by one of the +** [sqlite3session_changeset()], [sqlite3changeset_concat()] or +** [sqlite3changeset_invert()] functions, all changes within the changeset +** that apply to a single table are grouped together. This means that when +** an application iterates through a changeset using an iterator created by +** this function, all changes that relate to a single table are visited +** consecutively. There is no chance that the iterator will visit a change +** the applies to table X, then one for table Y, and then later on visit +** another change for table X. +*/ +SQLITE_API int sqlite3changeset_start( + sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */ + int nChangeset, /* Size of changeset blob in bytes */ + void *pChangeset /* Pointer to blob containing changeset */ +); + + +/* +** CAPI3REF: Advance A Changeset Iterator +** +** This function may only be used with iterators created by function +** [sqlite3changeset_start()]. If it is called on an iterator passed to +** a conflict-handler callback by [sqlite3changeset_apply()], SQLITE_MISUSE +** is returned and the call has no effect. +** +** Immediately after an iterator is created by sqlite3changeset_start(), it +** does not point to any change in the changeset. Assuming the changeset +** is not empty, the first call to this function advances the iterator to +** point to the first change in the changeset. Each subsequent call advances +** the iterator to point to the next change in the changeset (if any). If +** no error occurs and the iterator points to a valid change after a call +** to sqlite3changeset_next() has advanced it, SQLITE_ROW is returned. +** Otherwise, if all changes in the changeset have already been visited, +** SQLITE_DONE is returned. +** +** If an error occurs, an SQLite error code is returned. Possible error +** codes include SQLITE_CORRUPT (if the changeset buffer is corrupt) or +** SQLITE_NOMEM. +*/ +SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter); + +/* +** CAPI3REF: Obtain The Current Operation From A Changeset Iterator +** +** The pIter argument passed to this function may either be an iterator +** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator +** created by [sqlite3changeset_start()]. In the latter case, the most recent +** call to [sqlite3changeset_next()] must have returned [SQLITE_ROW]. If this +** is not the case, this function returns [SQLITE_MISUSE]. +** +** If argument pzTab is not NULL, then *pzTab is set to point to a +** nul-terminated utf-8 encoded string containing the name of the table +** affected by the current change. The buffer remains valid until either +** sqlite3changeset_next() is called on the iterator or until the +** conflict-handler function returns. If pnCol is not NULL, then *pnCol is +** set to the number of columns in the table affected by the change. If +** pbIncorrect is not NULL, then *pbIndirect is set to true (1) if the change +** is an indirect change, or false (0) otherwise. See the documentation for +** [sqlite3session_indirect()] for a description of direct and indirect +** changes. Finally, if pOp is not NULL, then *pOp is set to one of +** [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], depending on the +** type of change that the iterator currently points to. +** +** If no error occurs, SQLITE_OK is returned. If an error does occur, an +** SQLite error code is returned. The values of the output variables may not +** be trusted in this case. +*/ +SQLITE_API int sqlite3changeset_op( + sqlite3_changeset_iter *pIter, /* Iterator object */ + const char **pzTab, /* OUT: Pointer to table name */ + int *pnCol, /* OUT: Number of columns in table */ + int *pOp, /* OUT: SQLITE_INSERT, DELETE or UPDATE */ + int *pbIndirect /* OUT: True for an 'indirect' change */ +); + +/* +** CAPI3REF: Obtain The Primary Key Definition Of A Table +** +** For each modified table, a changeset includes the following: +** +**
    +**
  • The number of columns in the table, and +**
  • Which of those columns make up the tables PRIMARY KEY. +**
+** +** This function is used to find which columns comprise the PRIMARY KEY of +** the table modified by the change that iterator pIter currently points to. +** If successful, *pabPK is set to point to an array of nCol entries, where +** nCol is the number of columns in the table. Elements of *pabPK are set to +** 0x01 if the corresponding column is part of the tables primary key, or +** 0x00 if it is not. +** +** If argument pnCol is not NULL, then *pnCol is set to the number of columns +** in the table. +** +** If this function is called when the iterator does not point to a valid +** entry, SQLITE_MISUSE is returned and the output variables zeroed. Otherwise, +** SQLITE_OK is returned and the output variables populated as described +** above. +*/ +SQLITE_API int sqlite3changeset_pk( + sqlite3_changeset_iter *pIter, /* Iterator object */ + unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */ + int *pnCol /* OUT: Number of entries in output array */ +); + +/* +** CAPI3REF: Obtain old.* Values From A Changeset Iterator +** +** The pIter argument passed to this function may either be an iterator +** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator +** created by [sqlite3changeset_start()]. In the latter case, the most recent +** call to [sqlite3changeset_next()] must have returned SQLITE_ROW. +** Furthermore, it may only be called if the type of change that the iterator +** currently points to is either [SQLITE_DELETE] or [SQLITE_UPDATE]. Otherwise, +** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL. +** +** Argument iVal must be greater than or equal to 0, and less than the number +** of columns in the table affected by the current change. Otherwise, +** [SQLITE_RANGE] is returned and *ppValue is set to NULL. +** +** If successful, this function sets *ppValue to point to a protected +** sqlite3_value object containing the iVal'th value from the vector of +** original row values stored as part of the UPDATE or DELETE change and +** returns SQLITE_OK. The name of the function comes from the fact that this +** is similar to the "old.*" columns available to update or delete triggers. +** +** If some other error occurs (e.g. an OOM condition), an SQLite error code +** is returned and *ppValue is set to NULL. +*/ +SQLITE_API int sqlite3changeset_old( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Column number */ + sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */ +); + +/* +** CAPI3REF: Obtain new.* Values From A Changeset Iterator +** +** The pIter argument passed to this function may either be an iterator +** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator +** created by [sqlite3changeset_start()]. In the latter case, the most recent +** call to [sqlite3changeset_next()] must have returned SQLITE_ROW. +** Furthermore, it may only be called if the type of change that the iterator +** currently points to is either [SQLITE_UPDATE] or [SQLITE_INSERT]. Otherwise, +** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL. +** +** Argument iVal must be greater than or equal to 0, and less than the number +** of columns in the table affected by the current change. Otherwise, +** [SQLITE_RANGE] is returned and *ppValue is set to NULL. +** +** If successful, this function sets *ppValue to point to a protected +** sqlite3_value object containing the iVal'th value from the vector of +** new row values stored as part of the UPDATE or INSERT change and +** returns SQLITE_OK. If the change is an UPDATE and does not include +** a new value for the requested column, *ppValue is set to NULL and +** SQLITE_OK returned. The name of the function comes from the fact that +** this is similar to the "new.*" columns available to update or delete +** triggers. +** +** If some other error occurs (e.g. an OOM condition), an SQLite error code +** is returned and *ppValue is set to NULL. +*/ +SQLITE_API int sqlite3changeset_new( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Column number */ + sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */ +); + +/* +** CAPI3REF: Obtain Conflicting Row Values From A Changeset Iterator +** +** This function should only be used with iterator objects passed to a +** conflict-handler callback by [sqlite3changeset_apply()] with either +** [SQLITE_CHANGESET_DATA] or [SQLITE_CHANGESET_CONFLICT]. If this function +** is called on any other iterator, [SQLITE_MISUSE] is returned and *ppValue +** is set to NULL. +** +** Argument iVal must be greater than or equal to 0, and less than the number +** of columns in the table affected by the current change. Otherwise, +** [SQLITE_RANGE] is returned and *ppValue is set to NULL. +** +** If successful, this function sets *ppValue to point to a protected +** sqlite3_value object containing the iVal'th value from the +** "conflicting row" associated with the current conflict-handler callback +** and returns SQLITE_OK. +** +** If some other error occurs (e.g. an OOM condition), an SQLite error code +** is returned and *ppValue is set to NULL. +*/ +SQLITE_API int sqlite3changeset_conflict( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Column number */ + sqlite3_value **ppValue /* OUT: Value from conflicting row */ +); + +/* +** CAPI3REF: Determine The Number Of Foreign Key Constraint Violations +** +** This function may only be called with an iterator passed to an +** SQLITE_CHANGESET_FOREIGN_KEY conflict handler callback. In this case +** it sets the output variable to the total number of known foreign key +** violations in the destination database and returns SQLITE_OK. +** +** In all other cases this function returns SQLITE_MISUSE. +*/ +SQLITE_API int sqlite3changeset_fk_conflicts( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int *pnOut /* OUT: Number of FK violations */ +); + + +/* +** CAPI3REF: Finalize A Changeset Iterator +** +** This function is used to finalize an iterator allocated with +** [sqlite3changeset_start()]. +** +** This function should only be called on iterators created using the +** [sqlite3changeset_start()] function. If an application calls this +** function with an iterator passed to a conflict-handler by +** [sqlite3changeset_apply()], [SQLITE_MISUSE] is immediately returned and the +** call has no effect. +** +** If an error was encountered within a call to an sqlite3changeset_xxx() +** function (for example an [SQLITE_CORRUPT] in [sqlite3changeset_next()] or an +** [SQLITE_NOMEM] in [sqlite3changeset_new()]) then an error code corresponding +** to that error is returned by this function. Otherwise, SQLITE_OK is +** returned. This is to allow the following pattern (pseudo-code): +** +** sqlite3changeset_start(); +** while( SQLITE_ROW==sqlite3changeset_next() ){ +** // Do something with change. +** } +** rc = sqlite3changeset_finalize(); +** if( rc!=SQLITE_OK ){ +** // An error has occurred +** } +*/ +SQLITE_API int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter); + +/* +** CAPI3REF: Invert A Changeset +** +** This function is used to "invert" a changeset object. Applying an inverted +** changeset to a database reverses the effects of applying the uninverted +** changeset. Specifically: +** +**
    +**
  • Each DELETE change is changed to an INSERT, and +**
  • Each INSERT change is changed to a DELETE, and +**
  • For each UPDATE change, the old.* and new.* values are exchanged. +**
+** +** This function does not change the order in which changes appear within +** the changeset. It merely reverses the sense of each individual change. +** +** If successful, a pointer to a buffer containing the inverted changeset +** is stored in *ppOut, the size of the same buffer is stored in *pnOut, and +** SQLITE_OK is returned. If an error occurs, both *pnOut and *ppOut are +** zeroed and an SQLite error code returned. +** +** It is the responsibility of the caller to eventually call sqlite3_free() +** on the *ppOut pointer to free the buffer allocation following a successful +** call to this function. +** +** WARNING/TODO: This function currently assumes that the input is a valid +** changeset. If it is not, the results are undefined. +*/ +SQLITE_API int sqlite3changeset_invert( + int nIn, const void *pIn, /* Input changeset */ + int *pnOut, void **ppOut /* OUT: Inverse of input */ +); + +/* +** CAPI3REF: Concatenate Two Changeset Objects +** +** This function is used to concatenate two changesets, A and B, into a +** single changeset. The result is a changeset equivalent to applying +** changeset A followed by changeset B. +** +** This function combines the two input changesets using an +** sqlite3_changegroup object. Calling it produces similar results as the +** following code fragment: +** +** sqlite3_changegroup *pGrp; +** rc = sqlite3_changegroup_new(&pGrp); +** if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nA, pA); +** if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nB, pB); +** if( rc==SQLITE_OK ){ +** rc = sqlite3changegroup_output(pGrp, pnOut, ppOut); +** }else{ +** *ppOut = 0; +** *pnOut = 0; +** } +** +** Refer to the sqlite3_changegroup documentation below for details. +*/ +SQLITE_API int sqlite3changeset_concat( + int nA, /* Number of bytes in buffer pA */ + void *pA, /* Pointer to buffer containing changeset A */ + int nB, /* Number of bytes in buffer pB */ + void *pB, /* Pointer to buffer containing changeset B */ + int *pnOut, /* OUT: Number of bytes in output changeset */ + void **ppOut /* OUT: Buffer containing output changeset */ +); + + +/* +** CAPI3REF: Changegroup Handle +*/ +typedef struct sqlite3_changegroup sqlite3_changegroup; + +/* +** CAPI3REF: Create A New Changegroup Object +** +** An sqlite3_changegroup object is used to combine two or more changesets +** (or patchsets) into a single changeset (or patchset). A single changegroup +** object may combine changesets or patchsets, but not both. The output is +** always in the same format as the input. +** +** If successful, this function returns SQLITE_OK and populates (*pp) with +** a pointer to a new sqlite3_changegroup object before returning. The caller +** should eventually free the returned object using a call to +** sqlite3changegroup_delete(). If an error occurs, an SQLite error code +** (i.e. SQLITE_NOMEM) is returned and *pp is set to NULL. +** +** The usual usage pattern for an sqlite3_changegroup object is as follows: +** +**
    +**
  • It is created using a call to sqlite3changegroup_new(). +** +**
  • Zero or more changesets (or patchsets) are added to the object +** by calling sqlite3changegroup_add(). +** +**
  • The result of combining all input changesets together is obtained +** by the application via a call to sqlite3changegroup_output(). +** +**
  • The object is deleted using a call to sqlite3changegroup_delete(). +**
+** +** Any number of calls to add() and output() may be made between the calls to +** new() and delete(), and in any order. +** +** As well as the regular sqlite3changegroup_add() and +** sqlite3changegroup_output() functions, also available are the streaming +** versions sqlite3changegroup_add_strm() and sqlite3changegroup_output_strm(). +*/ +int sqlite3changegroup_new(sqlite3_changegroup **pp); + +/* +** CAPI3REF: Add A Changeset To A Changegroup +** +** Add all changes within the changeset (or patchset) in buffer pData (size +** nData bytes) to the changegroup. +** +** If the buffer contains a patchset, then all prior calls to this function +** on the same changegroup object must also have specified patchsets. Or, if +** the buffer contains a changeset, so must have the earlier calls to this +** function. Otherwise, SQLITE_ERROR is returned and no changes are added +** to the changegroup. +** +** Rows within the changeset and changegroup are identified by the values in +** their PRIMARY KEY columns. A change in the changeset is considered to +** apply to the same row as a change already present in the changegroup if +** the two rows have the same primary key. +** +** Changes to rows that do not already appear in the changegroup are +** simply copied into it. Or, if both the new changeset and the changegroup +** contain changes that apply to a single row, the final contents of the +** changegroup depends on the type of each change, as follows: +** +** +** +** +**
Existing Change New Change Output Change +**
INSERT INSERT +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
INSERT UPDATE +** The INSERT change remains in the changegroup. The values in the +** INSERT change are modified as if the row was inserted by the +** existing change and then updated according to the new change. +**
INSERT DELETE +** The existing INSERT is removed from the changegroup. The DELETE is +** not added. +**
UPDATE INSERT +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
UPDATE UPDATE +** The existing UPDATE remains within the changegroup. It is amended +** so that the accompanying values are as if the row was updated once +** by the existing change and then again by the new change. +**
UPDATE DELETE +** The existing UPDATE is replaced by the new DELETE within the +** changegroup. +**
DELETE INSERT +** If one or more of the column values in the row inserted by the +** new change differ from those in the row deleted by the existing +** change, the existing DELETE is replaced by an UPDATE within the +** changegroup. Otherwise, if the inserted row is exactly the same +** as the deleted row, the existing DELETE is simply discarded. +**
DELETE UPDATE +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
DELETE DELETE +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
+** +** If the new changeset contains changes to a table that is already present +** in the changegroup, then the number of columns and the position of the +** primary key columns for the table must be consistent. If this is not the +** case, this function fails with SQLITE_SCHEMA. If the input changeset +** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is +** returned. Or, if an out-of-memory condition occurs during processing, this +** function returns SQLITE_NOMEM. In all cases, if an error occurs the +** final contents of the changegroup is undefined. +** +** If no error occurs, SQLITE_OK is returned. +*/ +int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); + +/* +** CAPI3REF: Obtain A Composite Changeset From A Changegroup +** +** Obtain a buffer containing a changeset (or patchset) representing the +** current contents of the changegroup. If the inputs to the changegroup +** were themselves changesets, the output is a changeset. Or, if the +** inputs were patchsets, the output is also a patchset. +** +** As with the output of the sqlite3session_changeset() and +** sqlite3session_patchset() functions, all changes related to a single +** table are grouped together in the output of this function. Tables appear +** in the same order as for the very first changeset added to the changegroup. +** If the second or subsequent changesets added to the changegroup contain +** changes for tables that do not appear in the first changeset, they are +** appended onto the end of the output changeset, again in the order in +** which they are first encountered. +** +** If an error occurs, an SQLite error code is returned and the output +** variables (*pnData) and (*ppData) are set to 0. Otherwise, SQLITE_OK +** is returned and the output variables are set to the size of and a +** pointer to the output buffer, respectively. In this case it is the +** responsibility of the caller to eventually free the buffer using a +** call to sqlite3_free(). +*/ +int sqlite3changegroup_output( + sqlite3_changegroup*, + int *pnData, /* OUT: Size of output buffer in bytes */ + void **ppData /* OUT: Pointer to output buffer */ +); + +/* +** CAPI3REF: Delete A Changegroup Object +*/ +void sqlite3changegroup_delete(sqlite3_changegroup*); + +/* +** CAPI3REF: Apply A Changeset To A Database +** +** Apply a changeset to a database. This function attempts to update the +** "main" database attached to handle db with the changes found in the +** changeset passed via the second and third arguments. +** +** The fourth argument (xFilter) passed to this function is the "filter +** callback". If it is not NULL, then for each table affected by at least one +** change in the changeset, the filter callback is invoked with +** the table name as the second argument, and a copy of the context pointer +** passed as the sixth argument to this function as the first. If the "filter +** callback" returns zero, then no attempt is made to apply any changes to +** the table. Otherwise, if the return value is non-zero or the xFilter +** argument to this function is NULL, all changes related to the table are +** attempted. +** +** For each table that is not excluded by the filter callback, this function +** tests that the target database contains a compatible table. A table is +** considered compatible if all of the following are true: +** +**
    +**
  • The table has the same name as the name recorded in the +** changeset, and +**
  • The table has at least as many columns as recorded in the +** changeset, and +**
  • The table has primary key columns in the same position as +** recorded in the changeset. +**
+** +** If there is no compatible table, it is not an error, but none of the +** changes associated with the table are applied. A warning message is issued +** via the sqlite3_log() mechanism with the error code SQLITE_SCHEMA. At most +** one such warning is issued for each table in the changeset. +** +** For each change for which there is a compatible table, an attempt is made +** to modify the table contents according to the UPDATE, INSERT or DELETE +** change. If a change cannot be applied cleanly, the conflict handler +** function passed as the fifth argument to sqlite3changeset_apply() may be +** invoked. A description of exactly when the conflict handler is invoked for +** each type of change is below. +** +** Unlike the xFilter argument, xConflict may not be passed NULL. The results +** of passing anything other than a valid function pointer as the xConflict +** argument are undefined. +** +** Each time the conflict handler function is invoked, it must return one +** of [SQLITE_CHANGESET_OMIT], [SQLITE_CHANGESET_ABORT] or +** [SQLITE_CHANGESET_REPLACE]. SQLITE_CHANGESET_REPLACE may only be returned +** if the second argument passed to the conflict handler is either +** SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If the conflict-handler +** returns an illegal value, any changes already made are rolled back and +** the call to sqlite3changeset_apply() returns SQLITE_MISUSE. Different +** actions are taken by sqlite3changeset_apply() depending on the value +** returned by each invocation of the conflict-handler function. Refer to +** the documentation for the three +** [SQLITE_CHANGESET_OMIT|available return values] for details. +** +**
+**
DELETE Changes
+** For each DELETE change, this function checks if the target database +** contains a row with the same primary key value (or values) as the +** original row values stored in the changeset. If it does, and the values +** stored in all non-primary key columns also match the values stored in +** the changeset the row is deleted from the target database. +** +** If a row with matching primary key values is found, but one or more of +** the non-primary key fields contains a value different from the original +** row value stored in the changeset, the conflict-handler function is +** invoked with [SQLITE_CHANGESET_DATA] as the second argument. If the +** database table has more columns than are recorded in the changeset, +** only the values of those non-primary key fields are compared against +** the current database contents - any trailing database table columns +** are ignored. +** +** If no row with matching primary key values is found in the database, +** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND] +** passed as the second argument. +** +** If the DELETE operation is attempted, but SQLite returns SQLITE_CONSTRAINT +** (which can only happen if a foreign key constraint is violated), the +** conflict-handler function is invoked with [SQLITE_CHANGESET_CONSTRAINT] +** passed as the second argument. This includes the case where the DELETE +** operation is attempted because an earlier call to the conflict handler +** function returned [SQLITE_CHANGESET_REPLACE]. +** +**
INSERT Changes
+** For each INSERT change, an attempt is made to insert the new row into +** the database. If the changeset row contains fewer fields than the +** database table, the trailing fields are populated with their default +** values. +** +** If the attempt to insert the row fails because the database already +** contains a row with the same primary key values, the conflict handler +** function is invoked with the second argument set to +** [SQLITE_CHANGESET_CONFLICT]. +** +** If the attempt to insert the row fails because of some other constraint +** violation (e.g. NOT NULL or UNIQUE), the conflict handler function is +** invoked with the second argument set to [SQLITE_CHANGESET_CONSTRAINT]. +** This includes the case where the INSERT operation is re-attempted because +** an earlier call to the conflict handler function returned +** [SQLITE_CHANGESET_REPLACE]. +** +**
UPDATE Changes
+** For each UPDATE change, this function checks if the target database +** contains a row with the same primary key value (or values) as the +** original row values stored in the changeset. If it does, and the values +** stored in all modified non-primary key columns also match the values +** stored in the changeset the row is updated within the target database. +** +** If a row with matching primary key values is found, but one or more of +** the modified non-primary key fields contains a value different from an +** original row value stored in the changeset, the conflict-handler function +** is invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since +** UPDATE changes only contain values for non-primary key fields that are +** to be modified, only those fields need to match the original values to +** avoid the SQLITE_CHANGESET_DATA conflict-handler callback. +** +** If no row with matching primary key values is found in the database, +** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND] +** passed as the second argument. +** +** If the UPDATE operation is attempted, but SQLite returns +** SQLITE_CONSTRAINT, the conflict-handler function is invoked with +** [SQLITE_CHANGESET_CONSTRAINT] passed as the second argument. +** This includes the case where the UPDATE operation is attempted after +** an earlier call to the conflict handler function returned +** [SQLITE_CHANGESET_REPLACE]. +**
+** +** It is safe to execute SQL statements, including those that write to the +** table that the callback related to, from within the xConflict callback. +** This can be used to further customize the applications conflict +** resolution strategy. +** +** All changes made by this function are enclosed in a savepoint transaction. +** If any other error (aside from a constraint failure when attempting to +** write to the target database) occurs, then the savepoint transaction is +** rolled back, restoring the target database to its original state, and an +** SQLite error code returned. +*/ +SQLITE_API int sqlite3changeset_apply( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx /* First argument passed to xConflict */ +); + +/* +** CAPI3REF: Constants Passed To The Conflict Handler +** +** Values that may be passed as the second argument to a conflict-handler. +** +**
+**
SQLITE_CHANGESET_DATA
+** The conflict handler is invoked with CHANGESET_DATA as the second argument +** when processing a DELETE or UPDATE change if a row with the required +** PRIMARY KEY fields is present in the database, but one or more other +** (non primary-key) fields modified by the update do not contain the +** expected "before" values. +** +** The conflicting row, in this case, is the database row with the matching +** primary key. +** +**
SQLITE_CHANGESET_NOTFOUND
+** The conflict handler is invoked with CHANGESET_NOTFOUND as the second +** argument when processing a DELETE or UPDATE change if a row with the +** required PRIMARY KEY fields is not present in the database. +** +** There is no conflicting row in this case. The results of invoking the +** sqlite3changeset_conflict() API are undefined. +** +**
SQLITE_CHANGESET_CONFLICT
+** CHANGESET_CONFLICT is passed as the second argument to the conflict +** handler while processing an INSERT change if the operation would result +** in duplicate primary key values. +** +** The conflicting row in this case is the database row with the matching +** primary key. +** +**
SQLITE_CHANGESET_FOREIGN_KEY
+** If foreign key handling is enabled, and applying a changeset leaves the +** database in a state containing foreign key violations, the conflict +** handler is invoked with CHANGESET_FOREIGN_KEY as the second argument +** exactly once before the changeset is committed. If the conflict handler +** returns CHANGESET_OMIT, the changes, including those that caused the +** foreign key constraint violation, are committed. Or, if it returns +** CHANGESET_ABORT, the changeset is rolled back. +** +** No current or conflicting row information is provided. The only function +** it is possible to call on the supplied sqlite3_changeset_iter handle +** is sqlite3changeset_fk_conflicts(). +** +**
SQLITE_CHANGESET_CONSTRAINT
+** If any other constraint violation occurs while applying a change (i.e. +** a UNIQUE, CHECK or NOT NULL constraint), the conflict handler is +** invoked with CHANGESET_CONSTRAINT as the second argument. +** +** There is no conflicting row in this case. The results of invoking the +** sqlite3changeset_conflict() API are undefined. +** +**
+*/ +#define SQLITE_CHANGESET_DATA 1 +#define SQLITE_CHANGESET_NOTFOUND 2 +#define SQLITE_CHANGESET_CONFLICT 3 +#define SQLITE_CHANGESET_CONSTRAINT 4 +#define SQLITE_CHANGESET_FOREIGN_KEY 5 + +/* +** CAPI3REF: Constants Returned By The Conflict Handler +** +** A conflict handler callback must return one of the following three values. +** +**
+**
SQLITE_CHANGESET_OMIT
+** If a conflict handler returns this value no special action is taken. The +** change that caused the conflict is not applied. The session module +** continues to the next change in the changeset. +** +**
SQLITE_CHANGESET_REPLACE
+** This value may only be returned if the second argument to the conflict +** handler was SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If this +** is not the case, any changes applied so far are rolled back and the +** call to sqlite3changeset_apply() returns SQLITE_MISUSE. +** +** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_DATA conflict +** handler, then the conflicting row is either updated or deleted, depending +** on the type of change. +** +** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_CONFLICT conflict +** handler, then the conflicting row is removed from the database and a +** second attempt to apply the change is made. If this second attempt fails, +** the original row is restored to the database before continuing. +** +**
SQLITE_CHANGESET_ABORT
+** If this value is returned, any changes applied so far are rolled back +** and the call to sqlite3changeset_apply() returns SQLITE_ABORT. +**
+*/ +#define SQLITE_CHANGESET_OMIT 0 +#define SQLITE_CHANGESET_REPLACE 1 +#define SQLITE_CHANGESET_ABORT 2 + +/* +** CAPI3REF: Streaming Versions of API functions. +** +** The six streaming API xxx_strm() functions serve similar purposes to the +** corresponding non-streaming API functions: +** +** +** +**
Streaming functionNon-streaming equivalent
sqlite3changeset_apply_str[sqlite3changeset_apply] +**
sqlite3changeset_concat_str[sqlite3changeset_concat] +**
sqlite3changeset_invert_str[sqlite3changeset_invert] +**
sqlite3changeset_start_str[sqlite3changeset_start] +**
sqlite3session_changeset_str[sqlite3session_changeset] +**
sqlite3session_patchset_str[sqlite3session_patchset] +**
+** +** Non-streaming functions that accept changesets (or patchsets) as input +** require that the entire changeset be stored in a single buffer in memory. +** Similarly, those that return a changeset or patchset do so by returning +** a pointer to a single large buffer allocated using sqlite3_malloc(). +** Normally this is convenient. However, if an application running in a +** low-memory environment is required to handle very large changesets, the +** large contiguous memory allocations required can become onerous. +** +** In order to avoid this problem, instead of a single large buffer, input +** is passed to a streaming API functions by way of a callback function that +** the sessions module invokes to incrementally request input data as it is +** required. In all cases, a pair of API function parameters such as +** +**
+**        int nChangeset,
+**        void *pChangeset,
+**  
+** +** Is replaced by: +** +**
+**        int (*xInput)(void *pIn, void *pData, int *pnData),
+**        void *pIn,
+**  
+** +** Each time the xInput callback is invoked by the sessions module, the first +** argument passed is a copy of the supplied pIn context pointer. The second +** argument, pData, points to a buffer (*pnData) bytes in size. Assuming no +** error occurs the xInput method should copy up to (*pnData) bytes of data +** into the buffer and set (*pnData) to the actual number of bytes copied +** before returning SQLITE_OK. If the input is completely exhausted, (*pnData) +** should be set to zero to indicate this. Or, if an error occurs, an SQLite +** error code should be returned. In all cases, if an xInput callback returns +** an error, all processing is abandoned and the streaming API function +** returns a copy of the error code to the caller. +** +** In the case of sqlite3changeset_start_strm(), the xInput callback may be +** invoked by the sessions module at any point during the lifetime of the +** iterator. If such an xInput callback returns an error, the iterator enters +** an error state, whereby all subsequent calls to iterator functions +** immediately fail with the same error code as returned by xInput. +** +** Similarly, streaming API functions that return changesets (or patchsets) +** return them in chunks by way of a callback function instead of via a +** pointer to a single large buffer. In this case, a pair of parameters such +** as: +** +**
+**        int *pnChangeset,
+**        void **ppChangeset,
+**  
+** +** Is replaced by: +** +**
+**        int (*xOutput)(void *pOut, const void *pData, int nData),
+**        void *pOut
+**  
+** +** The xOutput callback is invoked zero or more times to return data to +** the application. The first parameter passed to each call is a copy of the +** pOut pointer supplied by the application. The second parameter, pData, +** points to a buffer nData bytes in size containing the chunk of output +** data being returned. If the xOutput callback successfully processes the +** supplied data, it should return SQLITE_OK to indicate success. Otherwise, +** it should return some other SQLite error code. In this case processing +** is immediately abandoned and the streaming API function returns a copy +** of the xOutput error code to the application. +** +** The sessions module never invokes an xOutput callback with the third +** parameter set to a value less than or equal to zero. Other than this, +** no guarantees are made as to the size of the chunks of data returned. +*/ +SQLITE_API int sqlite3changeset_apply_strm( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx /* First argument passed to xConflict */ +); +SQLITE_API int sqlite3changeset_concat_strm( + int (*xInputA)(void *pIn, void *pData, int *pnData), + void *pInA, + int (*xInputB)(void *pIn, void *pData, int *pnData), + void *pInB, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +SQLITE_API int sqlite3changeset_invert_strm( + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +SQLITE_API int sqlite3changeset_start_strm( + sqlite3_changeset_iter **pp, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn +); +SQLITE_API int sqlite3session_changeset_strm( + sqlite3_session *pSession, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +SQLITE_API int sqlite3session_patchset_strm( + sqlite3_session *pSession, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +int sqlite3changegroup_add_strm(sqlite3_changegroup*, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn +); +int sqlite3changegroup_output_strm(sqlite3_changegroup*, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); + + +/* +** Make sure we can call this stuff from C++. +*/ +#if 0 +} +#endif + +#endif /* !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION) */ + +/******** End of sqlite3session.h *********/ +/******** Begin file fts5.h *********/ +/* +** 2014 May 31 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** Interfaces to extend FTS5. Using the interfaces defined in this file, +** FTS5 may be extended with: +** +** * custom tokenizers, and +** * custom auxiliary functions. +*/ + + +#ifndef _FTS5_H +#define _FTS5_H + + +#if 0 +extern "C" { +#endif + +/************************************************************************* +** CUSTOM AUXILIARY FUNCTIONS +** +** Virtual table implementations may overload SQL functions by implementing +** the sqlite3_module.xFindFunction() method. +*/ + +typedef struct Fts5ExtensionApi Fts5ExtensionApi; +typedef struct Fts5Context Fts5Context; +typedef struct Fts5PhraseIter Fts5PhraseIter; + +typedef void (*fts5_extension_function)( + const Fts5ExtensionApi *pApi, /* API offered by current FTS version */ + Fts5Context *pFts, /* First arg to pass to pApi functions */ + sqlite3_context *pCtx, /* Context for returning result/error */ + int nVal, /* Number of values in apVal[] array */ + sqlite3_value **apVal /* Array of trailing arguments */ +); + +struct Fts5PhraseIter { + const unsigned char *a; + const unsigned char *b; +}; + +/* +** EXTENSION API FUNCTIONS +** +** xUserData(pFts): +** Return a copy of the context pointer the extension function was +** registered with. +** +** xColumnTotalSize(pFts, iCol, pnToken): +** If parameter iCol is less than zero, set output variable *pnToken +** to the total number of tokens in the FTS5 table. Or, if iCol is +** non-negative but less than the number of columns in the table, return +** the total number of tokens in column iCol, considering all rows in +** the FTS5 table. +** +** If parameter iCol is greater than or equal to the number of columns +** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g. +** an OOM condition or IO error), an appropriate SQLite error code is +** returned. +** +** xColumnCount(pFts): +** Return the number of columns in the table. +** +** xColumnSize(pFts, iCol, pnToken): +** If parameter iCol is less than zero, set output variable *pnToken +** to the total number of tokens in the current row. Or, if iCol is +** non-negative but less than the number of columns in the table, set +** *pnToken to the number of tokens in column iCol of the current row. +** +** If parameter iCol is greater than or equal to the number of columns +** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g. +** an OOM condition or IO error), an appropriate SQLite error code is +** returned. +** +** This function may be quite inefficient if used with an FTS5 table +** created with the "columnsize=0" option. +** +** xColumnText: +** This function attempts to retrieve the text of column iCol of the +** current document. If successful, (*pz) is set to point to a buffer +** containing the text in utf-8 encoding, (*pn) is set to the size in bytes +** (not characters) of the buffer and SQLITE_OK is returned. Otherwise, +** if an error occurs, an SQLite error code is returned and the final values +** of (*pz) and (*pn) are undefined. +** +** xPhraseCount: +** Returns the number of phrases in the current query expression. +** +** xPhraseSize: +** Returns the number of tokens in phrase iPhrase of the query. Phrases +** are numbered starting from zero. +** +** xInstCount: +** Set *pnInst to the total number of occurrences of all phrases within +** the query within the current row. Return SQLITE_OK if successful, or +** an error code (i.e. SQLITE_NOMEM) if an error occurs. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. If the FTS5 table is created +** with either "detail=none" or "detail=column" and "content=" option +** (i.e. if it is a contentless table), then this API always returns 0. +** +** xInst: +** Query for the details of phrase match iIdx within the current row. +** Phrase matches are numbered starting from zero, so the iIdx argument +** should be greater than or equal to zero and smaller than the value +** output by xInstCount(). +** +** Usually, output parameter *piPhrase is set to the phrase number, *piCol +** to the column in which it occurs and *piOff the token offset of the +** first token of the phrase. The exception is if the table was created +** with the offsets=0 option specified. In this case *piOff is always +** set to -1. +** +** Returns SQLITE_OK if successful, or an error code (i.e. SQLITE_NOMEM) +** if an error occurs. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. +** +** xRowid: +** Returns the rowid of the current row. +** +** xTokenize: +** Tokenize text using the tokenizer belonging to the FTS5 table. +** +** xQueryPhrase(pFts5, iPhrase, pUserData, xCallback): +** This API function is used to query the FTS table for phrase iPhrase +** of the current query. Specifically, a query equivalent to: +** +** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid +** +** with $p set to a phrase equivalent to the phrase iPhrase of the +** current query is executed. Any column filter that applies to +** phrase iPhrase of the current query is included in $p. For each +** row visited, the callback function passed as the fourth argument +** is invoked. The context and API objects passed to the callback +** function may be used to access the properties of each matched row. +** Invoking Api.xUserData() returns a copy of the pointer passed as +** the third argument to pUserData. +** +** If the callback function returns any value other than SQLITE_OK, the +** query is abandoned and the xQueryPhrase function returns immediately. +** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK. +** Otherwise, the error code is propagated upwards. +** +** If the query runs to completion without incident, SQLITE_OK is returned. +** Or, if some error occurs before the query completes or is aborted by +** the callback, an SQLite error code is returned. +** +** +** xSetAuxdata(pFts5, pAux, xDelete) +** +** Save the pointer passed as the second argument as the extension functions +** "auxiliary data". The pointer may then be retrieved by the current or any +** future invocation of the same fts5 extension function made as part of +** of the same MATCH query using the xGetAuxdata() API. +** +** Each extension function is allocated a single auxiliary data slot for +** each FTS query (MATCH expression). If the extension function is invoked +** more than once for a single FTS query, then all invocations share a +** single auxiliary data context. +** +** If there is already an auxiliary data pointer when this function is +** invoked, then it is replaced by the new pointer. If an xDelete callback +** was specified along with the original pointer, it is invoked at this +** point. +** +** The xDelete callback, if one is specified, is also invoked on the +** auxiliary data pointer after the FTS5 query has finished. +** +** If an error (e.g. an OOM condition) occurs within this function, an +** the auxiliary data is set to NULL and an error code returned. If the +** xDelete parameter was not NULL, it is invoked on the auxiliary data +** pointer before returning. +** +** +** xGetAuxdata(pFts5, bClear) +** +** Returns the current auxiliary data pointer for the fts5 extension +** function. See the xSetAuxdata() method for details. +** +** If the bClear argument is non-zero, then the auxiliary data is cleared +** (set to NULL) before this function returns. In this case the xDelete, +** if any, is not invoked. +** +** +** xRowCount(pFts5, pnRow) +** +** This function is used to retrieve the total number of rows in the table. +** In other words, the same value that would be returned by: +** +** SELECT count(*) FROM ftstable; +** +** xPhraseFirst() +** This function is used, along with type Fts5PhraseIter and the xPhraseNext +** method, to iterate through all instances of a single query phrase within +** the current row. This is the same information as is accessible via the +** xInstCount/xInst APIs. While the xInstCount/xInst APIs are more convenient +** to use, this API may be faster under some circumstances. To iterate +** through instances of phrase iPhrase, use the following code: +** +** Fts5PhraseIter iter; +** int iCol, iOff; +** for(pApi->xPhraseFirst(pFts, iPhrase, &iter, &iCol, &iOff); +** iCol>=0; +** pApi->xPhraseNext(pFts, &iter, &iCol, &iOff) +** ){ +** // An instance of phrase iPhrase at offset iOff of column iCol +** } +** +** The Fts5PhraseIter structure is defined above. Applications should not +** modify this structure directly - it should only be used as shown above +** with the xPhraseFirst() and xPhraseNext() API methods (and by +** xPhraseFirstColumn() and xPhraseNextColumn() as illustrated below). +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. If the FTS5 table is created +** with either "detail=none" or "detail=column" and "content=" option +** (i.e. if it is a contentless table), then this API always iterates +** through an empty set (all calls to xPhraseFirst() set iCol to -1). +** +** xPhraseNext() +** See xPhraseFirst above. +** +** xPhraseFirstColumn() +** This function and xPhraseNextColumn() are similar to the xPhraseFirst() +** and xPhraseNext() APIs described above. The difference is that instead +** of iterating through all instances of a phrase in the current row, these +** APIs are used to iterate through the set of columns in the current row +** that contain one or more instances of a specified phrase. For example: +** +** Fts5PhraseIter iter; +** int iCol; +** for(pApi->xPhraseFirstColumn(pFts, iPhrase, &iter, &iCol); +** iCol>=0; +** pApi->xPhraseNextColumn(pFts, &iter, &iCol) +** ){ +** // Column iCol contains at least one instance of phrase iPhrase +** } +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" option. If the FTS5 table is created with either +** "detail=none" "content=" option (i.e. if it is a contentless table), +** then this API always iterates through an empty set (all calls to +** xPhraseFirstColumn() set iCol to -1). +** +** The information accessed using this API and its companion +** xPhraseFirstColumn() may also be obtained using xPhraseFirst/xPhraseNext +** (or xInst/xInstCount). The chief advantage of this API is that it is +** significantly more efficient than those alternatives when used with +** "detail=column" tables. +** +** xPhraseNextColumn() +** See xPhraseFirstColumn above. +*/ +struct Fts5ExtensionApi { + int iVersion; /* Currently always set to 3 */ + + void *(*xUserData)(Fts5Context*); + + int (*xColumnCount)(Fts5Context*); + int (*xRowCount)(Fts5Context*, sqlite3_int64 *pnRow); + int (*xColumnTotalSize)(Fts5Context*, int iCol, sqlite3_int64 *pnToken); + + int (*xTokenize)(Fts5Context*, + const char *pText, int nText, /* Text to tokenize */ + void *pCtx, /* Context passed to xToken() */ + int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ + ); + + int (*xPhraseCount)(Fts5Context*); + int (*xPhraseSize)(Fts5Context*, int iPhrase); + + int (*xInstCount)(Fts5Context*, int *pnInst); + int (*xInst)(Fts5Context*, int iIdx, int *piPhrase, int *piCol, int *piOff); + + sqlite3_int64 (*xRowid)(Fts5Context*); + int (*xColumnText)(Fts5Context*, int iCol, const char **pz, int *pn); + int (*xColumnSize)(Fts5Context*, int iCol, int *pnToken); + + int (*xQueryPhrase)(Fts5Context*, int iPhrase, void *pUserData, + int(*)(const Fts5ExtensionApi*,Fts5Context*,void*) + ); + int (*xSetAuxdata)(Fts5Context*, void *pAux, void(*xDelete)(void*)); + void *(*xGetAuxdata)(Fts5Context*, int bClear); + + int (*xPhraseFirst)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*, int*); + void (*xPhraseNext)(Fts5Context*, Fts5PhraseIter*, int *piCol, int *piOff); + + int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*); + void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol); +}; + +/* +** CUSTOM AUXILIARY FUNCTIONS +*************************************************************************/ + +/************************************************************************* +** CUSTOM TOKENIZERS +** +** Applications may also register custom tokenizer types. A tokenizer +** is registered by providing fts5 with a populated instance of the +** following structure. All structure methods must be defined, setting +** any member of the fts5_tokenizer struct to NULL leads to undefined +** behaviour. The structure methods are expected to function as follows: +** +** xCreate: +** This function is used to allocate and initialize a tokenizer instance. +** A tokenizer instance is required to actually tokenize text. +** +** The first argument passed to this function is a copy of the (void*) +** pointer provided by the application when the fts5_tokenizer object +** was registered with FTS5 (the third argument to xCreateTokenizer()). +** The second and third arguments are an array of nul-terminated strings +** containing the tokenizer arguments, if any, specified following the +** tokenizer name as part of the CREATE VIRTUAL TABLE statement used +** to create the FTS5 table. +** +** The final argument is an output variable. If successful, (*ppOut) +** should be set to point to the new tokenizer handle and SQLITE_OK +** returned. If an error occurs, some value other than SQLITE_OK should +** be returned. In this case, fts5 assumes that the final value of *ppOut +** is undefined. +** +** xDelete: +** This function is invoked to delete a tokenizer handle previously +** allocated using xCreate(). Fts5 guarantees that this function will +** be invoked exactly once for each successful call to xCreate(). +** +** xTokenize: +** This function is expected to tokenize the nText byte string indicated +** by argument pText. pText may or may not be nul-terminated. The first +** argument passed to this function is a pointer to an Fts5Tokenizer object +** returned by an earlier call to xCreate(). +** +** The second argument indicates the reason that FTS5 is requesting +** tokenization of the supplied text. This is always one of the following +** four values: +** +**
  • FTS5_TOKENIZE_DOCUMENT - A document is being inserted into +** or removed from the FTS table. The tokenizer is being invoked to +** determine the set of tokens to add to (or delete from) the +** FTS index. +** +**
  • FTS5_TOKENIZE_QUERY - A MATCH query is being executed +** against the FTS index. The tokenizer is being called to tokenize +** a bareword or quoted string specified as part of the query. +** +**
  • (FTS5_TOKENIZE_QUERY | FTS5_TOKENIZE_PREFIX) - Same as +** FTS5_TOKENIZE_QUERY, except that the bareword or quoted string is +** followed by a "*" character, indicating that the last token +** returned by the tokenizer will be treated as a token prefix. +** +**
  • FTS5_TOKENIZE_AUX - The tokenizer is being invoked to +** satisfy an fts5_api.xTokenize() request made by an auxiliary +** function. Or an fts5_api.xColumnSize() request made by the same +** on a columnsize=0 database. +**
+** +** For each token in the input string, the supplied callback xToken() must +** be invoked. The first argument to it should be a copy of the pointer +** passed as the second argument to xTokenize(). The third and fourth +** arguments are a pointer to a buffer containing the token text, and the +** size of the token in bytes. The 4th and 5th arguments are the byte offsets +** of the first byte of and first byte immediately following the text from +** which the token is derived within the input. +** +** The second argument passed to the xToken() callback ("tflags") should +** normally be set to 0. The exception is if the tokenizer supports +** synonyms. In this case see the discussion below for details. +** +** FTS5 assumes the xToken() callback is invoked for each token in the +** order that they occur within the input text. +** +** If an xToken() callback returns any value other than SQLITE_OK, then +** the tokenization should be abandoned and the xTokenize() method should +** immediately return a copy of the xToken() return value. Or, if the +** input buffer is exhausted, xTokenize() should return SQLITE_OK. Finally, +** if an error occurs with the xTokenize() implementation itself, it +** may abandon the tokenization and return any error code other than +** SQLITE_OK or SQLITE_DONE. +** +** SYNONYM SUPPORT +** +** Custom tokenizers may also support synonyms. Consider a case in which a +** user wishes to query for a phrase such as "first place". Using the +** built-in tokenizers, the FTS5 query 'first + place' will match instances +** of "first place" within the document set, but not alternative forms +** such as "1st place". In some applications, it would be better to match +** all instances of "first place" or "1st place" regardless of which form +** the user specified in the MATCH query text. +** +** There are several ways to approach this in FTS5: +** +**
  1. By mapping all synonyms to a single token. In this case, the +** In the above example, this means that the tokenizer returns the +** same token for inputs "first" and "1st". Say that token is in +** fact "first", so that when the user inserts the document "I won +** 1st place" entries are added to the index for tokens "i", "won", +** "first" and "place". If the user then queries for '1st + place', +** the tokenizer substitutes "first" for "1st" and the query works +** as expected. +** +**
  2. By adding multiple synonyms for a single term to the FTS index. +** In this case, when tokenizing query text, the tokenizer may +** provide multiple synonyms for a single term within the document. +** FTS5 then queries the index for each synonym individually. For +** example, faced with the query: +** +** +** ... MATCH 'first place' +** +** the tokenizer offers both "1st" and "first" as synonyms for the +** first token in the MATCH query and FTS5 effectively runs a query +** similar to: +** +** +** ... MATCH '(first OR 1st) place' +** +** except that, for the purposes of auxiliary functions, the query +** still appears to contain just two phrases - "(first OR 1st)" +** being treated as a single phrase. +** +**
  3. By adding multiple synonyms for a single term to the FTS index. +** Using this method, when tokenizing document text, the tokenizer +** provides multiple synonyms for each token. So that when a +** document such as "I won first place" is tokenized, entries are +** added to the FTS index for "i", "won", "first", "1st" and +** "place". +** +** This way, even if the tokenizer does not provide synonyms +** when tokenizing query text (it should not - to do would be +** inefficient), it doesn't matter if the user queries for +** 'first + place' or '1st + place', as there are entires in the +** FTS index corresponding to both forms of the first token. +**
+** +** Whether it is parsing document or query text, any call to xToken that +** specifies a tflags argument with the FTS5_TOKEN_COLOCATED bit +** is considered to supply a synonym for the previous token. For example, +** when parsing the document "I won first place", a tokenizer that supports +** synonyms would call xToken() 5 times, as follows: +** +** +** xToken(pCtx, 0, "i", 1, 0, 1); +** xToken(pCtx, 0, "won", 3, 2, 5); +** xToken(pCtx, 0, "first", 5, 6, 11); +** xToken(pCtx, FTS5_TOKEN_COLOCATED, "1st", 3, 6, 11); +** xToken(pCtx, 0, "place", 5, 12, 17); +** +** +** It is an error to specify the FTS5_TOKEN_COLOCATED flag the first time +** xToken() is called. Multiple synonyms may be specified for a single token +** by making multiple calls to xToken(FTS5_TOKEN_COLOCATED) in sequence. +** There is no limit to the number of synonyms that may be provided for a +** single token. +** +** In many cases, method (1) above is the best approach. It does not add +** extra data to the FTS index or require FTS5 to query for multiple terms, +** so it is efficient in terms of disk space and query speed. However, it +** does not support prefix queries very well. If, as suggested above, the +** token "first" is subsituted for "1st" by the tokenizer, then the query: +** +** +** ... MATCH '1s*' +** +** will not match documents that contain the token "1st" (as the tokenizer +** will probably not map "1s" to any prefix of "first"). +** +** For full prefix support, method (3) may be preferred. In this case, +** because the index contains entries for both "first" and "1st", prefix +** queries such as 'fi*' or '1s*' will match correctly. However, because +** extra entries are added to the FTS index, this method uses more space +** within the database. +** +** Method (2) offers a midpoint between (1) and (3). Using this method, +** a query such as '1s*' will match documents that contain the literal +** token "1st", but not "first" (assuming the tokenizer is not able to +** provide synonyms for prefixes). However, a non-prefix query like '1st' +** will match against "1st" and "first". This method does not require +** extra disk space, as no extra entries are added to the FTS index. +** On the other hand, it may require more CPU cycles to run MATCH queries, +** as separate queries of the FTS index are required for each synonym. +** +** When using methods (2) or (3), it is important that the tokenizer only +** provide synonyms when tokenizing document text (method (2)) or query +** text (method (3)), not both. Doing so will not cause any errors, but is +** inefficient. +*/ +typedef struct Fts5Tokenizer Fts5Tokenizer; +typedef struct fts5_tokenizer fts5_tokenizer; +struct fts5_tokenizer { + int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); + void (*xDelete)(Fts5Tokenizer*); + int (*xTokenize)(Fts5Tokenizer*, + void *pCtx, + int flags, /* Mask of FTS5_TOKENIZE_* flags */ + const char *pText, int nText, + int (*xToken)( + void *pCtx, /* Copy of 2nd argument to xTokenize() */ + int tflags, /* Mask of FTS5_TOKEN_* flags */ + const char *pToken, /* Pointer to buffer containing token */ + int nToken, /* Size of token in bytes */ + int iStart, /* Byte offset of token within input text */ + int iEnd /* Byte offset of end of token within input text */ + ) + ); +}; + +/* Flags that may be passed as the third argument to xTokenize() */ +#define FTS5_TOKENIZE_QUERY 0x0001 +#define FTS5_TOKENIZE_PREFIX 0x0002 +#define FTS5_TOKENIZE_DOCUMENT 0x0004 +#define FTS5_TOKENIZE_AUX 0x0008 + +/* Flags that may be passed by the tokenizer implementation back to FTS5 +** as the third argument to the supplied xToken callback. */ +#define FTS5_TOKEN_COLOCATED 0x0001 /* Same position as prev. token */ + +/* +** END OF CUSTOM TOKENIZERS +*************************************************************************/ + +/************************************************************************* +** FTS5 EXTENSION REGISTRATION API +*/ +typedef struct fts5_api fts5_api; +struct fts5_api { + int iVersion; /* Currently always set to 2 */ + + /* Create a new tokenizer */ + int (*xCreateTokenizer)( + fts5_api *pApi, + const char *zName, + void *pContext, + fts5_tokenizer *pTokenizer, + void (*xDestroy)(void*) + ); + + /* Find an existing tokenizer */ + int (*xFindTokenizer)( + fts5_api *pApi, + const char *zName, + void **ppContext, + fts5_tokenizer *pTokenizer + ); + + /* Create a new auxiliary function */ + int (*xCreateFunction)( + fts5_api *pApi, + const char *zName, + void *pContext, + fts5_extension_function xFunction, + void (*xDestroy)(void*) + ); +}; + +/* +** END OF REGISTRATION API +*************************************************************************/ + +#if 0 +} /* end of the 'extern "C"' block */ +#endif + +#endif /* _FTS5_H */ + +/******** End of fts5.h *********/ + +/************** End of sqlite3.h *********************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ + +/* +** Include the configuration header output by 'configure' if we're using the +** autoconf-based build +*/ +#ifdef _HAVE_SQLITE_CONFIG_H +#include "config.h" +#endif + +/************** Include sqliteLimit.h in the middle of sqliteInt.h ***********/ +/************** Begin file sqliteLimit.h *************************************/ +/* +** 2007 May 7 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file defines various limits of what SQLite can process. +*/ + +/* +** The maximum length of a TEXT or BLOB in bytes. This also +** limits the size of a row in a table or index. +** +** The hard limit is the ability of a 32-bit signed integer +** to count the size: 2^31-1 or 2147483647. +*/ +#ifndef SQLITE_MAX_LENGTH +# define SQLITE_MAX_LENGTH 1000000000 +#endif + +/* +** This is the maximum number of +** +** * Columns in a table +** * Columns in an index +** * Columns in a view +** * Terms in the SET clause of an UPDATE statement +** * Terms in the result set of a SELECT statement +** * Terms in the GROUP BY or ORDER BY clauses of a SELECT statement. +** * Terms in the VALUES clause of an INSERT statement +** +** The hard upper limit here is 32676. Most database people will +** tell you that in a well-normalized database, you usually should +** not have more than a dozen or so columns in any table. And if +** that is the case, there is no point in having more than a few +** dozen values in any of the other situations described above. +*/ +#ifndef SQLITE_MAX_COLUMN +# define SQLITE_MAX_COLUMN 2000 +#endif + +/* +** The maximum length of a single SQL statement in bytes. +** +** It used to be the case that setting this value to zero would +** turn the limit off. That is no longer true. It is not possible +** to turn this limit off. +*/ +#ifndef SQLITE_MAX_SQL_LENGTH +# define SQLITE_MAX_SQL_LENGTH 1000000000 +#endif + +/* +** The maximum depth of an expression tree. This is limited to +** some extent by SQLITE_MAX_SQL_LENGTH. But sometime you might +** want to place more severe limits on the complexity of an +** expression. +** +** A value of 0 used to mean that the limit was not enforced. +** But that is no longer true. The limit is now strictly enforced +** at all times. +*/ +#ifndef SQLITE_MAX_EXPR_DEPTH +# define SQLITE_MAX_EXPR_DEPTH 1000 +#endif + +/* +** The maximum number of terms in a compound SELECT statement. +** The code generator for compound SELECT statements does one +** level of recursion for each term. A stack overflow can result +** if the number of terms is too large. In practice, most SQL +** never has more than 3 or 4 terms. Use a value of 0 to disable +** any limit on the number of terms in a compount SELECT. +*/ +#ifndef SQLITE_MAX_COMPOUND_SELECT +# define SQLITE_MAX_COMPOUND_SELECT 500 +#endif + +/* +** The maximum number of opcodes in a VDBE program. +** Not currently enforced. +*/ +#ifndef SQLITE_MAX_VDBE_OP +# define SQLITE_MAX_VDBE_OP 25000 +#endif + +/* +** The maximum number of arguments to an SQL function. +*/ +#ifndef SQLITE_MAX_FUNCTION_ARG +# define SQLITE_MAX_FUNCTION_ARG 127 +#endif + +/* +** The suggested maximum number of in-memory pages to use for +** the main database table and for temporary tables. +** +** IMPLEMENTATION-OF: R-30185-15359 The default suggested cache size is -2000, +** which means the cache size is limited to 2048000 bytes of memory. +** IMPLEMENTATION-OF: R-48205-43578 The default suggested cache size can be +** altered using the SQLITE_DEFAULT_CACHE_SIZE compile-time options. +*/ +#ifndef SQLITE_DEFAULT_CACHE_SIZE +# define SQLITE_DEFAULT_CACHE_SIZE -2000 +#endif + +/* +** The default number of frames to accumulate in the log file before +** checkpointing the database in WAL mode. +*/ +#ifndef SQLITE_DEFAULT_WAL_AUTOCHECKPOINT +# define SQLITE_DEFAULT_WAL_AUTOCHECKPOINT 1000 +#endif + +/* +** The maximum number of attached databases. This must be between 0 +** and 125. The upper bound of 125 is because the attached databases are +** counted using a signed 8-bit integer which has a maximum value of 127 +** and we have to allow 2 extra counts for the "main" and "temp" databases. +*/ +#ifndef SQLITE_MAX_ATTACHED +# define SQLITE_MAX_ATTACHED 10 +#endif + + +/* +** The maximum value of a ?nnn wildcard that the parser will accept. +*/ +#ifndef SQLITE_MAX_VARIABLE_NUMBER +# define SQLITE_MAX_VARIABLE_NUMBER 999 +#endif + +/* Maximum page size. The upper bound on this value is 65536. This a limit +** imposed by the use of 16-bit offsets within each page. +** +** Earlier versions of SQLite allowed the user to change this value at +** compile time. This is no longer permitted, on the grounds that it creates +** a library that is technically incompatible with an SQLite library +** compiled with a different limit. If a process operating on a database +** with a page-size of 65536 bytes crashes, then an instance of SQLite +** compiled with the default page-size limit will not be able to rollback +** the aborted transaction. This could lead to database corruption. +*/ +#ifdef SQLITE_MAX_PAGE_SIZE +# undef SQLITE_MAX_PAGE_SIZE +#endif +#define SQLITE_MAX_PAGE_SIZE 65536 + + +/* +** The default size of a database page. +*/ +#ifndef SQLITE_DEFAULT_PAGE_SIZE +# define SQLITE_DEFAULT_PAGE_SIZE 4096 +#endif +#if SQLITE_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE +# undef SQLITE_DEFAULT_PAGE_SIZE +# define SQLITE_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE +#endif + +/* +** Ordinarily, if no value is explicitly provided, SQLite creates databases +** with page size SQLITE_DEFAULT_PAGE_SIZE. However, based on certain +** device characteristics (sector-size and atomic write() support), +** SQLite may choose a larger value. This constant is the maximum value +** SQLite will choose on its own. +*/ +#ifndef SQLITE_MAX_DEFAULT_PAGE_SIZE +# define SQLITE_MAX_DEFAULT_PAGE_SIZE 8192 +#endif +#if SQLITE_MAX_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE +# undef SQLITE_MAX_DEFAULT_PAGE_SIZE +# define SQLITE_MAX_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE +#endif + + +/* +** Maximum number of pages in one database file. +** +** This is really just the default value for the max_page_count pragma. +** This value can be lowered (or raised) at run-time using that the +** max_page_count macro. +*/ +#ifndef SQLITE_MAX_PAGE_COUNT +# define SQLITE_MAX_PAGE_COUNT 1073741823 +#endif + +/* +** Maximum length (in bytes) of the pattern in a LIKE or GLOB +** operator. +*/ +#ifndef SQLITE_MAX_LIKE_PATTERN_LENGTH +# define SQLITE_MAX_LIKE_PATTERN_LENGTH 50000 +#endif + +/* +** Maximum depth of recursion for triggers. +** +** A value of 1 means that a trigger program will not be able to itself +** fire any triggers. A value of 0 means that no trigger programs at all +** may be executed. +*/ +#ifndef SQLITE_MAX_TRIGGER_DEPTH +# define SQLITE_MAX_TRIGGER_DEPTH 1000 +#endif + +/************** End of sqliteLimit.h *****************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ + +/* Disable nuisance warnings on Borland compilers */ +#if defined(__BORLANDC__) +#pragma warn -rch /* unreachable code */ +#pragma warn -ccc /* Condition is always true or false */ +#pragma warn -aus /* Assigned value is never used */ +#pragma warn -csu /* Comparing signed and unsigned */ +#pragma warn -spa /* Suspicious pointer arithmetic */ +#endif + +/* +** Include standard header files as necessary +*/ +#ifdef HAVE_STDINT_H +#include +#endif +#ifdef HAVE_INTTYPES_H +#include +#endif + +/* +** The following macros are used to cast pointers to integers and +** integers to pointers. The way you do this varies from one compiler +** to the next, so we have developed the following set of #if statements +** to generate appropriate macros for a wide range of compilers. +** +** The correct "ANSI" way to do this is to use the intptr_t type. +** Unfortunately, that typedef is not available on all compilers, or +** if it is available, it requires an #include of specific headers +** that vary from one machine to the next. +** +** Ticket #3860: The llvm-gcc-4.2 compiler from Apple chokes on +** the ((void*)&((char*)0)[X]) construct. But MSVC chokes on ((void*)(X)). +** So we have to define the macros in different ways depending on the +** compiler. +*/ +#if defined(__PTRDIFF_TYPE__) /* This case should work for GCC */ +# define SQLITE_INT_TO_PTR(X) ((void*)(__PTRDIFF_TYPE__)(X)) +# define SQLITE_PTR_TO_INT(X) ((int)(__PTRDIFF_TYPE__)(X)) +#elif !defined(__GNUC__) /* Works for compilers other than LLVM */ +# define SQLITE_INT_TO_PTR(X) ((void*)&((char*)0)[X]) +# define SQLITE_PTR_TO_INT(X) ((int)(((char*)X)-(char*)0)) +#elif defined(HAVE_STDINT_H) /* Use this case if we have ANSI headers */ +# define SQLITE_INT_TO_PTR(X) ((void*)(intptr_t)(X)) +# define SQLITE_PTR_TO_INT(X) ((int)(intptr_t)(X)) +#else /* Generates a warning - but it always works */ +# define SQLITE_INT_TO_PTR(X) ((void*)(X)) +# define SQLITE_PTR_TO_INT(X) ((int)(X)) +#endif + +/* +** A macro to hint to the compiler that a function should not be +** inlined. +*/ +#if defined(__GNUC__) +# define SQLITE_NOINLINE __attribute__((noinline)) +#elif defined(_MSC_VER) && _MSC_VER>=1310 +# define SQLITE_NOINLINE __declspec(noinline) +#else +# define SQLITE_NOINLINE +#endif + +/* +** Make sure that the compiler intrinsics we desire are enabled when +** compiling with an appropriate version of MSVC unless prevented by +** the SQLITE_DISABLE_INTRINSIC define. +*/ +#if !defined(SQLITE_DISABLE_INTRINSIC) +# if defined(_MSC_VER) && _MSC_VER>=1400 +# if !defined(_WIN32_WCE) +# include +# pragma intrinsic(_byteswap_ushort) +# pragma intrinsic(_byteswap_ulong) +# pragma intrinsic(_byteswap_uint64) +# pragma intrinsic(_ReadWriteBarrier) +# else +# include +# endif +# endif +#endif + +/* +** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2. +** 0 means mutexes are permanently disable and the library is never +** threadsafe. 1 means the library is serialized which is the highest +** level of threadsafety. 2 means the library is multithreaded - multiple +** threads can use SQLite as long as no two threads try to use the same +** database connection at the same time. +** +** Older versions of SQLite used an optional THREADSAFE macro. +** We support that for legacy. +*/ +#if !defined(SQLITE_THREADSAFE) +# if defined(THREADSAFE) +# define SQLITE_THREADSAFE THREADSAFE +# else +# define SQLITE_THREADSAFE 1 /* IMP: R-07272-22309 */ +# endif +#endif + +/* +** Powersafe overwrite is on by default. But can be turned off using +** the -DSQLITE_POWERSAFE_OVERWRITE=0 command-line option. +*/ +#ifndef SQLITE_POWERSAFE_OVERWRITE +# define SQLITE_POWERSAFE_OVERWRITE 1 +#endif + +/* +** EVIDENCE-OF: R-25715-37072 Memory allocation statistics are enabled by +** default unless SQLite is compiled with SQLITE_DEFAULT_MEMSTATUS=0 in +** which case memory allocation statistics are disabled by default. +*/ +#if !defined(SQLITE_DEFAULT_MEMSTATUS) +# define SQLITE_DEFAULT_MEMSTATUS 1 +#endif + +/* +** Exactly one of the following macros must be defined in order to +** specify which memory allocation subsystem to use. +** +** SQLITE_SYSTEM_MALLOC // Use normal system malloc() +** SQLITE_WIN32_MALLOC // Use Win32 native heap API +** SQLITE_ZERO_MALLOC // Use a stub allocator that always fails +** SQLITE_MEMDEBUG // Debugging version of system malloc() +** +** On Windows, if the SQLITE_WIN32_MALLOC_VALIDATE macro is defined and the +** assert() macro is enabled, each call into the Win32 native heap subsystem +** will cause HeapValidate to be called. If heap validation should fail, an +** assertion will be triggered. +** +** If none of the above are defined, then set SQLITE_SYSTEM_MALLOC as +** the default. +*/ +#if defined(SQLITE_SYSTEM_MALLOC) \ + + defined(SQLITE_WIN32_MALLOC) \ + + defined(SQLITE_ZERO_MALLOC) \ + + defined(SQLITE_MEMDEBUG)>1 +# error "Two or more of the following compile-time configuration options\ + are defined but at most one is allowed:\ + SQLITE_SYSTEM_MALLOC, SQLITE_WIN32_MALLOC, SQLITE_MEMDEBUG,\ + SQLITE_ZERO_MALLOC" +#endif +#if defined(SQLITE_SYSTEM_MALLOC) \ + + defined(SQLITE_WIN32_MALLOC) \ + + defined(SQLITE_ZERO_MALLOC) \ + + defined(SQLITE_MEMDEBUG)==0 +# define SQLITE_SYSTEM_MALLOC 1 +#endif + +/* +** If SQLITE_MALLOC_SOFT_LIMIT is not zero, then try to keep the +** sizes of memory allocations below this value where possible. +*/ +#if !defined(SQLITE_MALLOC_SOFT_LIMIT) +# define SQLITE_MALLOC_SOFT_LIMIT 1024 +#endif + +/* +** We need to define _XOPEN_SOURCE as follows in order to enable +** recursive mutexes on most Unix systems and fchmod() on OpenBSD. +** But _XOPEN_SOURCE define causes problems for Mac OS X, so omit +** it. +*/ +#if !defined(_XOPEN_SOURCE) && !defined(__DARWIN__) && !defined(__APPLE__) +# define _XOPEN_SOURCE 600 +#endif + +/* +** NDEBUG and SQLITE_DEBUG are opposites. It should always be true that +** defined(NDEBUG)==!defined(SQLITE_DEBUG). If this is not currently true, +** make it true by defining or undefining NDEBUG. +** +** Setting NDEBUG makes the code smaller and faster by disabling the +** assert() statements in the code. So we want the default action +** to be for NDEBUG to be set and NDEBUG to be undefined only if SQLITE_DEBUG +** is set. Thus NDEBUG becomes an opt-in rather than an opt-out +** feature. +*/ +#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) +# define NDEBUG 1 +#endif +#if defined(NDEBUG) && defined(SQLITE_DEBUG) +# undef NDEBUG +#endif + +/* +** Enable SQLITE_ENABLE_EXPLAIN_COMMENTS if SQLITE_DEBUG is turned on. +*/ +#if !defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) && defined(SQLITE_DEBUG) +# define SQLITE_ENABLE_EXPLAIN_COMMENTS 1 +#endif + +/* +** The testcase() macro is used to aid in coverage testing. When +** doing coverage testing, the condition inside the argument to +** testcase() must be evaluated both true and false in order to +** get full branch coverage. The testcase() macro is inserted +** to help ensure adequate test coverage in places where simple +** condition/decision coverage is inadequate. For example, testcase() +** can be used to make sure boundary values are tested. For +** bitmask tests, testcase() can be used to make sure each bit +** is significant and used at least once. On switch statements +** where multiple cases go to the same block of code, testcase() +** can insure that all cases are evaluated. +** +*/ +#ifdef SQLITE_COVERAGE_TEST +SQLITE_PRIVATE void sqlite3Coverage(int); +# define testcase(X) if( X ){ sqlite3Coverage(__LINE__); } +#else +# define testcase(X) +#endif + +/* +** The TESTONLY macro is used to enclose variable declarations or +** other bits of code that are needed to support the arguments +** within testcase() and assert() macros. +*/ +#if !defined(NDEBUG) || defined(SQLITE_COVERAGE_TEST) +# define TESTONLY(X) X +#else +# define TESTONLY(X) +#endif + +/* +** Sometimes we need a small amount of code such as a variable initialization +** to setup for a later assert() statement. We do not want this code to +** appear when assert() is disabled. The following macro is therefore +** used to contain that setup code. The "VVA" acronym stands for +** "Verification, Validation, and Accreditation". In other words, the +** code within VVA_ONLY() will only run during verification processes. +*/ +#ifndef NDEBUG +# define VVA_ONLY(X) X +#else +# define VVA_ONLY(X) +#endif + +/* +** The ALWAYS and NEVER macros surround boolean expressions which +** are intended to always be true or false, respectively. Such +** expressions could be omitted from the code completely. But they +** are included in a few cases in order to enhance the resilience +** of SQLite to unexpected behavior - to make the code "self-healing" +** or "ductile" rather than being "brittle" and crashing at the first +** hint of unplanned behavior. +** +** In other words, ALWAYS and NEVER are added for defensive code. +** +** When doing coverage testing ALWAYS and NEVER are hard-coded to +** be true and false so that the unreachable code they specify will +** not be counted as untested code. +*/ +#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_MUTATION_TEST) +# define ALWAYS(X) (1) +# define NEVER(X) (0) +#elif !defined(NDEBUG) +# define ALWAYS(X) ((X)?1:(assert(0),0)) +# define NEVER(X) ((X)?(assert(0),1):0) +#else +# define ALWAYS(X) (X) +# define NEVER(X) (X) +#endif + +/* +** Some malloc failures are only possible if SQLITE_TEST_REALLOC_STRESS is +** defined. We need to defend against those failures when testing with +** SQLITE_TEST_REALLOC_STRESS, but we don't want the unreachable branches +** during a normal build. The following macro can be used to disable tests +** that are always false except when SQLITE_TEST_REALLOC_STRESS is set. +*/ +#if defined(SQLITE_TEST_REALLOC_STRESS) +# define ONLY_IF_REALLOC_STRESS(X) (X) +#elif !defined(NDEBUG) +# define ONLY_IF_REALLOC_STRESS(X) ((X)?(assert(0),1):0) +#else +# define ONLY_IF_REALLOC_STRESS(X) (0) +#endif + +/* +** Declarations used for tracing the operating system interfaces. +*/ +#if defined(SQLITE_FORCE_OS_TRACE) || defined(SQLITE_TEST) || \ + (defined(SQLITE_DEBUG) && SQLITE_OS_WIN) + extern int sqlite3OSTrace; +# define OSTRACE(X) if( sqlite3OSTrace ) sqlite3DebugPrintf X +# define SQLITE_HAVE_OS_TRACE +#else +# define OSTRACE(X) +# undef SQLITE_HAVE_OS_TRACE +#endif + +/* +** Is the sqlite3ErrName() function needed in the build? Currently, +** it is needed by "mutex_w32.c" (when debugging), "os_win.c" (when +** OSTRACE is enabled), and by several "test*.c" files (which are +** compiled using SQLITE_TEST). +*/ +#if defined(SQLITE_HAVE_OS_TRACE) || defined(SQLITE_TEST) || \ + (defined(SQLITE_DEBUG) && SQLITE_OS_WIN) +# define SQLITE_NEED_ERR_NAME +#else +# undef SQLITE_NEED_ERR_NAME +#endif + +/* +** SQLITE_ENABLE_EXPLAIN_COMMENTS is incompatible with SQLITE_OMIT_EXPLAIN +*/ +#ifdef SQLITE_OMIT_EXPLAIN +# undef SQLITE_ENABLE_EXPLAIN_COMMENTS +#endif + +/* +** Return true (non-zero) if the input is an integer that is too large +** to fit in 32-bits. This macro is used inside of various testcase() +** macros to verify that we have tested SQLite for large-file support. +*/ +#define IS_BIG_INT(X) (((X)&~(i64)0xffffffff)!=0) + +/* +** The macro unlikely() is a hint that surrounds a boolean +** expression that is usually false. Macro likely() surrounds +** a boolean expression that is usually true. These hints could, +** in theory, be used by the compiler to generate better code, but +** currently they are just comments for human readers. +*/ +#define likely(X) (X) +#define unlikely(X) (X) + +/************** Include hash.h in the middle of sqliteInt.h ******************/ +/************** Begin file hash.h ********************************************/ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the header file for the generic hash-table implementation +** used in SQLite. +*/ +#ifndef SQLITE_HASH_H +#define SQLITE_HASH_H + +/* Forward declarations of structures. */ +typedef struct Hash Hash; +typedef struct HashElem HashElem; + +/* A complete hash table is an instance of the following structure. +** The internals of this structure are intended to be opaque -- client +** code should not attempt to access or modify the fields of this structure +** directly. Change this structure only by using the routines below. +** However, some of the "procedures" and "functions" for modifying and +** accessing this structure are really macros, so we can't really make +** this structure opaque. +** +** All elements of the hash table are on a single doubly-linked list. +** Hash.first points to the head of this list. +** +** There are Hash.htsize buckets. Each bucket points to a spot in +** the global doubly-linked list. The contents of the bucket are the +** element pointed to plus the next _ht.count-1 elements in the list. +** +** Hash.htsize and Hash.ht may be zero. In that case lookup is done +** by a linear search of the global list. For small tables, the +** Hash.ht table is never allocated because if there are few elements +** in the table, it is faster to do a linear search than to manage +** the hash table. +*/ +struct Hash { + unsigned int htsize; /* Number of buckets in the hash table */ + unsigned int count; /* Number of entries in this table */ + HashElem *first; /* The first element of the array */ + struct _ht { /* the hash table */ + int count; /* Number of entries with this hash */ + HashElem *chain; /* Pointer to first entry with this hash */ + } *ht; +}; + +/* Each element in the hash table is an instance of the following +** structure. All elements are stored on a single doubly-linked list. +** +** Again, this structure is intended to be opaque, but it can't really +** be opaque because it is used by macros. +*/ +struct HashElem { + HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + const char *pKey; /* Key associated with this element */ +}; + +/* +** Access routines. To delete, insert a NULL pointer. +*/ +SQLITE_PRIVATE void sqlite3HashInit(Hash*); +SQLITE_PRIVATE void *sqlite3HashInsert(Hash*, const char *pKey, void *pData); +SQLITE_PRIVATE void *sqlite3HashFind(const Hash*, const char *pKey); +SQLITE_PRIVATE void sqlite3HashClear(Hash*); + +/* +** Macros for looping over all elements of a hash table. The idiom is +** like this: +** +** Hash h; +** HashElem *p; +** ... +** for(p=sqliteHashFirst(&h); p; p=sqliteHashNext(p)){ +** SomeStructure *pData = sqliteHashData(p); +** // do something with pData +** } +*/ +#define sqliteHashFirst(H) ((H)->first) +#define sqliteHashNext(E) ((E)->next) +#define sqliteHashData(E) ((E)->data) +/* #define sqliteHashKey(E) ((E)->pKey) // NOT USED */ +/* #define sqliteHashKeysize(E) ((E)->nKey) // NOT USED */ + +/* +** Number of entries in a hash table +*/ +/* #define sqliteHashCount(H) ((H)->count) // NOT USED */ + +#endif /* SQLITE_HASH_H */ + +/************** End of hash.h ************************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ +/************** Include parse.h in the middle of sqliteInt.h *****************/ +/************** Begin file parse.h *******************************************/ +#define TK_SEMI 1 +#define TK_EXPLAIN 2 +#define TK_QUERY 3 +#define TK_PLAN 4 +#define TK_BEGIN 5 +#define TK_TRANSACTION 6 +#define TK_DEFERRED 7 +#define TK_IMMEDIATE 8 +#define TK_EXCLUSIVE 9 +#define TK_COMMIT 10 +#define TK_END 11 +#define TK_ROLLBACK 12 +#define TK_SAVEPOINT 13 +#define TK_RELEASE 14 +#define TK_TO 15 +#define TK_TABLE 16 +#define TK_CREATE 17 +#define TK_IF 18 +#define TK_NOT 19 +#define TK_EXISTS 20 +#define TK_TEMP 21 +#define TK_LP 22 +#define TK_RP 23 +#define TK_AS 24 +#define TK_WITHOUT 25 +#define TK_COMMA 26 +#define TK_OR 27 +#define TK_AND 28 +#define TK_IS 29 +#define TK_MATCH 30 +#define TK_LIKE_KW 31 +#define TK_BETWEEN 32 +#define TK_IN 33 +#define TK_ISNULL 34 +#define TK_NOTNULL 35 +#define TK_NE 36 +#define TK_EQ 37 +#define TK_GT 38 +#define TK_LE 39 +#define TK_LT 40 +#define TK_GE 41 +#define TK_ESCAPE 42 +#define TK_BITAND 43 +#define TK_BITOR 44 +#define TK_LSHIFT 45 +#define TK_RSHIFT 46 +#define TK_PLUS 47 +#define TK_MINUS 48 +#define TK_STAR 49 +#define TK_SLASH 50 +#define TK_REM 51 +#define TK_CONCAT 52 +#define TK_COLLATE 53 +#define TK_BITNOT 54 +#define TK_ID 55 +#define TK_INDEXED 56 +#define TK_ABORT 57 +#define TK_ACTION 58 +#define TK_AFTER 59 +#define TK_ANALYZE 60 +#define TK_ASC 61 +#define TK_ATTACH 62 +#define TK_BEFORE 63 +#define TK_BY 64 +#define TK_CASCADE 65 +#define TK_CAST 66 +#define TK_COLUMNKW 67 +#define TK_CONFLICT 68 +#define TK_DATABASE 69 +#define TK_DESC 70 +#define TK_DETACH 71 +#define TK_EACH 72 +#define TK_FAIL 73 +#define TK_FOR 74 +#define TK_IGNORE 75 +#define TK_INITIALLY 76 +#define TK_INSTEAD 77 +#define TK_NO 78 +#define TK_KEY 79 +#define TK_OF 80 +#define TK_OFFSET 81 +#define TK_PRAGMA 82 +#define TK_RAISE 83 +#define TK_RECURSIVE 84 +#define TK_REPLACE 85 +#define TK_RESTRICT 86 +#define TK_ROW 87 +#define TK_TRIGGER 88 +#define TK_VACUUM 89 +#define TK_VIEW 90 +#define TK_VIRTUAL 91 +#define TK_WITH 92 +#define TK_REINDEX 93 +#define TK_RENAME 94 +#define TK_CTIME_KW 95 +#define TK_ANY 96 +#define TK_STRING 97 +#define TK_JOIN_KW 98 +#define TK_CONSTRAINT 99 +#define TK_DEFAULT 100 +#define TK_NULL 101 +#define TK_PRIMARY 102 +#define TK_UNIQUE 103 +#define TK_CHECK 104 +#define TK_REFERENCES 105 +#define TK_AUTOINCR 106 +#define TK_ON 107 +#define TK_INSERT 108 +#define TK_DELETE 109 +#define TK_UPDATE 110 +#define TK_SET 111 +#define TK_DEFERRABLE 112 +#define TK_FOREIGN 113 +#define TK_DROP 114 +#define TK_UNION 115 +#define TK_ALL 116 +#define TK_EXCEPT 117 +#define TK_INTERSECT 118 +#define TK_SELECT 119 +#define TK_VALUES 120 +#define TK_DISTINCT 121 +#define TK_DOT 122 +#define TK_FROM 123 +#define TK_JOIN 124 +#define TK_USING 125 +#define TK_ORDER 126 +#define TK_GROUP 127 +#define TK_HAVING 128 +#define TK_LIMIT 129 +#define TK_WHERE 130 +#define TK_INTO 131 +#define TK_FLOAT 132 +#define TK_BLOB 133 +#define TK_INTEGER 134 +#define TK_VARIABLE 135 +#define TK_CASE 136 +#define TK_WHEN 137 +#define TK_THEN 138 +#define TK_ELSE 139 +#define TK_INDEX 140 +#define TK_ALTER 141 +#define TK_ADD 142 +#define TK_TO_TEXT 143 +#define TK_TO_BLOB 144 +#define TK_TO_NUMERIC 145 +#define TK_TO_INT 146 +#define TK_TO_REAL 147 +#define TK_ISNOT 148 +#define TK_END_OF_FILE 149 +#define TK_UNCLOSED_STRING 150 +#define TK_FUNCTION 151 +#define TK_COLUMN 152 +#define TK_AGG_FUNCTION 153 +#define TK_AGG_COLUMN 154 +#define TK_UMINUS 155 +#define TK_UPLUS 156 +#define TK_REGISTER 157 +#define TK_VECTOR 158 +#define TK_SELECT_COLUMN 159 +#define TK_ASTERISK 160 +#define TK_SPAN 161 +#define TK_SPACE 162 +#define TK_ILLEGAL 163 + +/* The token codes above must all fit in 8 bits */ +#define TKFLG_MASK 0xff + +/* Flags that can be added to a token code when it is not +** being stored in a u8: */ +#define TKFLG_DONTFOLD 0x100 /* Omit constant folding optimizations */ + +/************** End of parse.h ***********************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ +#include +#include +#include +#include +#include + +/* +** Use a macro to replace memcpy() if compiled with SQLITE_INLINE_MEMCPY. +** This allows better measurements of where memcpy() is used when running +** cachegrind. But this macro version of memcpy() is very slow so it +** should not be used in production. This is a performance measurement +** hack only. +*/ +#ifdef SQLITE_INLINE_MEMCPY +# define memcpy(D,S,N) {char*xxd=(char*)(D);const char*xxs=(const char*)(S);\ + int xxn=(N);while(xxn-->0)*(xxd++)=*(xxs++);} +#endif + +/* +** If compiling for a processor that lacks floating point support, +** substitute integer for floating-point +*/ +#ifdef SQLITE_OMIT_FLOATING_POINT +# define double sqlite_int64 +# define float sqlite_int64 +# define LONGDOUBLE_TYPE sqlite_int64 +# ifndef SQLITE_BIG_DBL +# define SQLITE_BIG_DBL (((sqlite3_int64)1)<<50) +# endif +# define SQLITE_OMIT_DATETIME_FUNCS 1 +# define SQLITE_OMIT_TRACE 1 +# undef SQLITE_MIXED_ENDIAN_64BIT_FLOAT +# undef SQLITE_HAVE_ISNAN +#endif +#ifndef SQLITE_BIG_DBL +# define SQLITE_BIG_DBL (1e99) +#endif + +/* +** OMIT_TEMPDB is set to 1 if SQLITE_OMIT_TEMPDB is defined, or 0 +** afterward. Having this macro allows us to cause the C compiler +** to omit code used by TEMP tables without messy #ifndef statements. +*/ +#ifdef SQLITE_OMIT_TEMPDB +#define OMIT_TEMPDB 1 +#else +#define OMIT_TEMPDB 0 +#endif + +/* +** The "file format" number is an integer that is incremented whenever +** the VDBE-level file format changes. The following macros define the +** the default file format for new databases and the maximum file format +** that the library can read. +*/ +#define SQLITE_MAX_FILE_FORMAT 4 +#ifndef SQLITE_DEFAULT_FILE_FORMAT +# define SQLITE_DEFAULT_FILE_FORMAT 4 +#endif + +/* +** Determine whether triggers are recursive by default. This can be +** changed at run-time using a pragma. +*/ +#ifndef SQLITE_DEFAULT_RECURSIVE_TRIGGERS +# define SQLITE_DEFAULT_RECURSIVE_TRIGGERS 0 +#endif + +/* +** Provide a default value for SQLITE_TEMP_STORE in case it is not specified +** on the command-line +*/ +#ifndef SQLITE_TEMP_STORE +# define SQLITE_TEMP_STORE 1 +# define SQLITE_TEMP_STORE_xc 1 /* Exclude from ctime.c */ +#endif + +/* +** If no value has been provided for SQLITE_MAX_WORKER_THREADS, or if +** SQLITE_TEMP_STORE is set to 3 (never use temporary files), set it +** to zero. +*/ +#if SQLITE_TEMP_STORE==3 || SQLITE_THREADSAFE==0 +# undef SQLITE_MAX_WORKER_THREADS +# define SQLITE_MAX_WORKER_THREADS 0 +#endif +#ifndef SQLITE_MAX_WORKER_THREADS +# define SQLITE_MAX_WORKER_THREADS 8 +#endif +#ifndef SQLITE_DEFAULT_WORKER_THREADS +# define SQLITE_DEFAULT_WORKER_THREADS 0 +#endif +#if SQLITE_DEFAULT_WORKER_THREADS>SQLITE_MAX_WORKER_THREADS +# undef SQLITE_MAX_WORKER_THREADS +# define SQLITE_MAX_WORKER_THREADS SQLITE_DEFAULT_WORKER_THREADS +#endif + +/* +** The default initial allocation for the pagecache when using separate +** pagecaches for each database connection. A positive number is the +** number of pages. A negative number N translations means that a buffer +** of -1024*N bytes is allocated and used for as many pages as it will hold. +** +** The default value of "20" was choosen to minimize the run-time of the +** speedtest1 test program with options: --shrink-memory --reprepare +*/ +#ifndef SQLITE_DEFAULT_PCACHE_INITSZ +# define SQLITE_DEFAULT_PCACHE_INITSZ 20 +#endif + +/* +** GCC does not define the offsetof() macro so we'll have to do it +** ourselves. +*/ +#ifndef offsetof +#define offsetof(STRUCTURE,FIELD) ((int)((char*)&((STRUCTURE*)0)->FIELD)) +#endif + +/* +** Macros to compute minimum and maximum of two numbers. +*/ +#ifndef MIN +# define MIN(A,B) ((A)<(B)?(A):(B)) +#endif +#ifndef MAX +# define MAX(A,B) ((A)>(B)?(A):(B)) +#endif + +/* +** Swap two objects of type TYPE. +*/ +#define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;} + +/* +** Check to see if this machine uses EBCDIC. (Yes, believe it or +** not, there are still machines out there that use EBCDIC.) +*/ +#if 'A' == '\301' +# define SQLITE_EBCDIC 1 +#else +# define SQLITE_ASCII 1 +#endif + +/* +** Integers of known sizes. These typedefs might change for architectures +** where the sizes very. Preprocessor macros are available so that the +** types can be conveniently redefined at compile-type. Like this: +** +** cc '-DUINTPTR_TYPE=long long int' ... +*/ +#ifndef UINT32_TYPE +# ifdef HAVE_UINT32_T +# define UINT32_TYPE uint32_t +# else +# define UINT32_TYPE unsigned int +# endif +#endif +#ifndef UINT16_TYPE +# ifdef HAVE_UINT16_T +# define UINT16_TYPE uint16_t +# else +# define UINT16_TYPE unsigned short int +# endif +#endif +#ifndef INT16_TYPE +# ifdef HAVE_INT16_T +# define INT16_TYPE int16_t +# else +# define INT16_TYPE short int +# endif +#endif +#ifndef UINT8_TYPE +# ifdef HAVE_UINT8_T +# define UINT8_TYPE uint8_t +# else +# define UINT8_TYPE unsigned char +# endif +#endif +#ifndef INT8_TYPE +# ifdef HAVE_INT8_T +# define INT8_TYPE int8_t +# else +# define INT8_TYPE signed char +# endif +#endif +#ifndef LONGDOUBLE_TYPE +# define LONGDOUBLE_TYPE long double +#endif +typedef sqlite_int64 i64; /* 8-byte signed integer */ +typedef sqlite_uint64 u64; /* 8-byte unsigned integer */ +typedef UINT32_TYPE u32; /* 4-byte unsigned integer */ +typedef UINT16_TYPE u16; /* 2-byte unsigned integer */ +typedef INT16_TYPE i16; /* 2-byte signed integer */ +typedef UINT8_TYPE u8; /* 1-byte unsigned integer */ +typedef INT8_TYPE i8; /* 1-byte signed integer */ + +/* +** SQLITE_MAX_U32 is a u64 constant that is the maximum u64 value +** that can be stored in a u32 without loss of data. The value +** is 0x00000000ffffffff. But because of quirks of some compilers, we +** have to specify the value in the less intuitive manner shown: +*/ +#define SQLITE_MAX_U32 ((((u64)1)<<32)-1) + +/* +** The datatype used to store estimates of the number of rows in a +** table or index. This is an unsigned integer type. For 99.9% of +** the world, a 32-bit integer is sufficient. But a 64-bit integer +** can be used at compile-time if desired. +*/ +#ifdef SQLITE_64BIT_STATS + typedef u64 tRowcnt; /* 64-bit only if requested at compile-time */ +#else + typedef u32 tRowcnt; /* 32-bit is the default */ +#endif + +/* +** Estimated quantities used for query planning are stored as 16-bit +** logarithms. For quantity X, the value stored is 10*log2(X). This +** gives a possible range of values of approximately 1.0e986 to 1e-986. +** But the allowed values are "grainy". Not every value is representable. +** For example, quantities 16 and 17 are both represented by a LogEst +** of 40. However, since LogEst quantities are suppose to be estimates, +** not exact values, this imprecision is not a problem. +** +** "LogEst" is short for "Logarithmic Estimate". +** +** Examples: +** 1 -> 0 20 -> 43 10000 -> 132 +** 2 -> 10 25 -> 46 25000 -> 146 +** 3 -> 16 100 -> 66 1000000 -> 199 +** 4 -> 20 1000 -> 99 1048576 -> 200 +** 10 -> 33 1024 -> 100 4294967296 -> 320 +** +** The LogEst can be negative to indicate fractional values. +** Examples: +** +** 0.5 -> -10 0.1 -> -33 0.0625 -> -40 +*/ +typedef INT16_TYPE LogEst; + +/* +** Set the SQLITE_PTRSIZE macro to the number of bytes in a pointer +*/ +#ifndef SQLITE_PTRSIZE +# if defined(__SIZEOF_POINTER__) +# define SQLITE_PTRSIZE __SIZEOF_POINTER__ +# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \ + defined(_M_ARM) || defined(__arm__) || defined(__x86) +# define SQLITE_PTRSIZE 4 +# else +# define SQLITE_PTRSIZE 8 +# endif +#endif + +/* The uptr type is an unsigned integer large enough to hold a pointer +*/ +#if defined(HAVE_STDINT_H) + typedef uintptr_t uptr; +#elif SQLITE_PTRSIZE==4 + typedef u32 uptr; +#else + typedef u64 uptr; +#endif + +/* +** The SQLITE_WITHIN(P,S,E) macro checks to see if pointer P points to +** something between S (inclusive) and E (exclusive). +** +** In other words, S is a buffer and E is a pointer to the first byte after +** the end of buffer S. This macro returns true if P points to something +** contained within the buffer S. +*/ +#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E))) + + +/* +** Macros to determine whether the machine is big or little endian, +** and whether or not that determination is run-time or compile-time. +** +** For best performance, an attempt is made to guess at the byte-order +** using C-preprocessor macros. If that is unsuccessful, or if +** -DSQLITE_BYTEORDER=0 is set, then byte-order is determined +** at run-time. +*/ +#ifndef SQLITE_BYTEORDER +# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \ + defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ + defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ + defined(__arm__) +# define SQLITE_BYTEORDER 1234 +# elif defined(sparc) || defined(__ppc__) +# define SQLITE_BYTEORDER 4321 +# else +# define SQLITE_BYTEORDER 0 +# endif +#endif +#if SQLITE_BYTEORDER==4321 +# define SQLITE_BIGENDIAN 1 +# define SQLITE_LITTLEENDIAN 0 +# define SQLITE_UTF16NATIVE SQLITE_UTF16BE +#elif SQLITE_BYTEORDER==1234 +# define SQLITE_BIGENDIAN 0 +# define SQLITE_LITTLEENDIAN 1 +# define SQLITE_UTF16NATIVE SQLITE_UTF16LE +#else +# ifdef SQLITE_AMALGAMATION + const int sqlite3one = 1; +# else + extern const int sqlite3one; +# endif +# define SQLITE_BIGENDIAN (*(char *)(&sqlite3one)==0) +# define SQLITE_LITTLEENDIAN (*(char *)(&sqlite3one)==1) +# define SQLITE_UTF16NATIVE (SQLITE_BIGENDIAN?SQLITE_UTF16BE:SQLITE_UTF16LE) +#endif + +/* +** Constants for the largest and smallest possible 64-bit signed integers. +** These macros are designed to work correctly on both 32-bit and 64-bit +** compilers. +*/ +#define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32)) +#define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64) + +/* +** Round up a number to the next larger multiple of 8. This is used +** to force 8-byte alignment on 64-bit architectures. +*/ +#define ROUND8(x) (((x)+7)&~7) + +/* +** Round down to the nearest multiple of 8 +*/ +#define ROUNDDOWN8(x) ((x)&~7) + +/* +** Assert that the pointer X is aligned to an 8-byte boundary. This +** macro is used only within assert() to verify that the code gets +** all alignment restrictions correct. +** +** Except, if SQLITE_4_BYTE_ALIGNED_MALLOC is defined, then the +** underlying malloc() implementation might return us 4-byte aligned +** pointers. In that case, only verify 4-byte alignment. +*/ +#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC +# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&3)==0) +#else +# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&7)==0) +#endif + +/* +** Disable MMAP on platforms where it is known to not work +*/ +#if defined(__OpenBSD__) || defined(__QNXNTO__) +# undef SQLITE_MAX_MMAP_SIZE +# define SQLITE_MAX_MMAP_SIZE 0 +#endif + +/* +** Default maximum size of memory used by memory-mapped I/O in the VFS +*/ +#ifdef __APPLE__ +# include +#endif +#ifndef SQLITE_MAX_MMAP_SIZE +# if defined(__linux__) \ + || defined(_WIN32) \ + || (defined(__APPLE__) && defined(__MACH__)) \ + || defined(__sun) \ + || defined(__FreeBSD__) \ + || defined(__DragonFly__) +# define SQLITE_MAX_MMAP_SIZE 0x7fff0000 /* 2147418112 */ +# else +# define SQLITE_MAX_MMAP_SIZE 0 +# endif +# define SQLITE_MAX_MMAP_SIZE_xc 1 /* exclude from ctime.c */ +#endif + +/* +** The default MMAP_SIZE is zero on all platforms. Or, even if a larger +** default MMAP_SIZE is specified at compile-time, make sure that it does +** not exceed the maximum mmap size. +*/ +#ifndef SQLITE_DEFAULT_MMAP_SIZE +# define SQLITE_DEFAULT_MMAP_SIZE 0 +# define SQLITE_DEFAULT_MMAP_SIZE_xc 1 /* Exclude from ctime.c */ +#endif +#if SQLITE_DEFAULT_MMAP_SIZE>SQLITE_MAX_MMAP_SIZE +# undef SQLITE_DEFAULT_MMAP_SIZE +# define SQLITE_DEFAULT_MMAP_SIZE SQLITE_MAX_MMAP_SIZE +#endif + +/* +** Only one of SQLITE_ENABLE_STAT3 or SQLITE_ENABLE_STAT4 can be defined. +** Priority is given to SQLITE_ENABLE_STAT4. If either are defined, also +** define SQLITE_ENABLE_STAT3_OR_STAT4 +*/ +#ifdef SQLITE_ENABLE_STAT4 +# undef SQLITE_ENABLE_STAT3 +# define SQLITE_ENABLE_STAT3_OR_STAT4 1 +#elif SQLITE_ENABLE_STAT3 +# define SQLITE_ENABLE_STAT3_OR_STAT4 1 +#elif SQLITE_ENABLE_STAT3_OR_STAT4 +# undef SQLITE_ENABLE_STAT3_OR_STAT4 +#endif + +/* +** SELECTTRACE_ENABLED will be either 1 or 0 depending on whether or not +** the Select query generator tracing logic is turned on. +*/ +#if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_SELECTTRACE) +# define SELECTTRACE_ENABLED 1 +#else +# define SELECTTRACE_ENABLED 0 +#endif + +/* +** An instance of the following structure is used to store the busy-handler +** callback for a given sqlite handle. +** +** The sqlite.busyHandler member of the sqlite struct contains the busy +** callback for the database handle. Each pager opened via the sqlite +** handle is passed a pointer to sqlite.busyHandler. The busy-handler +** callback is currently invoked only from within pager.c. +*/ +typedef struct BusyHandler BusyHandler; +struct BusyHandler { + int (*xFunc)(void *,int); /* The busy callback */ + void *pArg; /* First arg to busy callback */ + int nBusy; /* Incremented with each busy call */ +}; + +/* +** Name of the master database table. The master database table +** is a special table that holds the names and attributes of all +** user tables and indices. +*/ +#define MASTER_NAME "sqlite_master" +#define TEMP_MASTER_NAME "sqlite_temp_master" + +/* +** The root-page of the master database table. +*/ +#define MASTER_ROOT 1 + +/* +** The name of the schema table. +*/ +#define SCHEMA_TABLE(x) ((!OMIT_TEMPDB)&&(x==1)?TEMP_MASTER_NAME:MASTER_NAME) + +/* +** A convenience macro that returns the number of elements in +** an array. +*/ +#define ArraySize(X) ((int)(sizeof(X)/sizeof(X[0]))) + +/* +** Determine if the argument is a power of two +*/ +#define IsPowerOfTwo(X) (((X)&((X)-1))==0) + +/* +** The following value as a destructor means to use sqlite3DbFree(). +** The sqlite3DbFree() routine requires two parameters instead of the +** one parameter that destructors normally want. So we have to introduce +** this magic value that the code knows to handle differently. Any +** pointer will work here as long as it is distinct from SQLITE_STATIC +** and SQLITE_TRANSIENT. +*/ +#define SQLITE_DYNAMIC ((sqlite3_destructor_type)sqlite3MallocSize) + +/* +** When SQLITE_OMIT_WSD is defined, it means that the target platform does +** not support Writable Static Data (WSD) such as global and static variables. +** All variables must either be on the stack or dynamically allocated from +** the heap. When WSD is unsupported, the variable declarations scattered +** throughout the SQLite code must become constants instead. The SQLITE_WSD +** macro is used for this purpose. And instead of referencing the variable +** directly, we use its constant as a key to lookup the run-time allocated +** buffer that holds real variable. The constant is also the initializer +** for the run-time allocated buffer. +** +** In the usual case where WSD is supported, the SQLITE_WSD and GLOBAL +** macros become no-ops and have zero performance impact. +*/ +#ifdef SQLITE_OMIT_WSD + #define SQLITE_WSD const + #define GLOBAL(t,v) (*(t*)sqlite3_wsd_find((void*)&(v), sizeof(v))) + #define sqlite3GlobalConfig GLOBAL(struct Sqlite3Config, sqlite3Config) +SQLITE_API int sqlite3_wsd_init(int N, int J); +SQLITE_API void *sqlite3_wsd_find(void *K, int L); +#else + #define SQLITE_WSD + #define GLOBAL(t,v) v + #define sqlite3GlobalConfig sqlite3Config +#endif + +/* +** The following macros are used to suppress compiler warnings and to +** make it clear to human readers when a function parameter is deliberately +** left unused within the body of a function. This usually happens when +** a function is called via a function pointer. For example the +** implementation of an SQL aggregate step callback may not use the +** parameter indicating the number of arguments passed to the aggregate, +** if it knows that this is enforced elsewhere. +** +** When a function parameter is not used at all within the body of a function, +** it is generally named "NotUsed" or "NotUsed2" to make things even clearer. +** However, these macros may also be used to suppress warnings related to +** parameters that may or may not be used depending on compilation options. +** For example those parameters only used in assert() statements. In these +** cases the parameters are named as per the usual conventions. +*/ +#define UNUSED_PARAMETER(x) (void)(x) +#define UNUSED_PARAMETER2(x,y) UNUSED_PARAMETER(x),UNUSED_PARAMETER(y) + +/* +** Forward references to structures +*/ +typedef struct AggInfo AggInfo; +typedef struct AuthContext AuthContext; +typedef struct AutoincInfo AutoincInfo; +typedef struct Bitvec Bitvec; +typedef struct CollSeq CollSeq; +typedef struct Column Column; +typedef struct Db Db; +typedef struct Schema Schema; +typedef struct Expr Expr; +typedef struct ExprList ExprList; +typedef struct ExprSpan ExprSpan; +typedef struct FKey FKey; +typedef struct FuncDestructor FuncDestructor; +typedef struct FuncDef FuncDef; +typedef struct FuncDefHash FuncDefHash; +typedef struct IdList IdList; +typedef struct Index Index; +typedef struct IndexSample IndexSample; +typedef struct KeyClass KeyClass; +typedef struct KeyInfo KeyInfo; +typedef struct Lookaside Lookaside; +typedef struct LookasideSlot LookasideSlot; +typedef struct Module Module; +typedef struct NameContext NameContext; +typedef struct Parse Parse; +typedef struct PreUpdate PreUpdate; +typedef struct PrintfArguments PrintfArguments; +typedef struct RowSet RowSet; +typedef struct Savepoint Savepoint; +typedef struct Select Select; +typedef struct SQLiteThread SQLiteThread; +typedef struct SelectDest SelectDest; +typedef struct SrcList SrcList; +typedef struct StrAccum StrAccum; +typedef struct Table Table; +typedef struct TableLock TableLock; +typedef struct Token Token; +typedef struct TreeView TreeView; +typedef struct Trigger Trigger; +typedef struct TriggerPrg TriggerPrg; +typedef struct TriggerStep TriggerStep; +typedef struct UnpackedRecord UnpackedRecord; +typedef struct VTable VTable; +typedef struct VtabCtx VtabCtx; +typedef struct Walker Walker; +typedef struct WhereInfo WhereInfo; +typedef struct With With; + +/* A VList object records a mapping between parameters/variables/wildcards +** in the SQL statement (such as $abc, @pqr, or :xyz) and the integer +** variable number associated with that parameter. See the format description +** on the sqlite3VListAdd() routine for more information. A VList is really +** just an array of integers. +*/ +typedef int VList; + +/* +** Defer sourcing vdbe.h and btree.h until after the "u8" and +** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque +** pointer types (i.e. FuncDef) defined above. +*/ +/************** Include btree.h in the middle of sqliteInt.h *****************/ +/************** Begin file btree.h *******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the interface that the sqlite B-Tree file +** subsystem. See comments in the source code for a detailed description +** of what each interface routine does. +*/ +#ifndef SQLITE_BTREE_H +#define SQLITE_BTREE_H + +/* TODO: This definition is just included so other modules compile. It +** needs to be revisited. +*/ +#define SQLITE_N_BTREE_META 16 + +/* +** If defined as non-zero, auto-vacuum is enabled by default. Otherwise +** it must be turned on for each database using "PRAGMA auto_vacuum = 1". +*/ +#ifndef SQLITE_DEFAULT_AUTOVACUUM + #define SQLITE_DEFAULT_AUTOVACUUM 0 +#endif + +#define BTREE_AUTOVACUUM_NONE 0 /* Do not do auto-vacuum */ +#define BTREE_AUTOVACUUM_FULL 1 /* Do full auto-vacuum */ +#define BTREE_AUTOVACUUM_INCR 2 /* Incremental vacuum */ + +/* +** Forward declarations of structure +*/ +typedef struct Btree Btree; +typedef struct BtCursor BtCursor; +typedef struct BtShared BtShared; +typedef struct BtreePayload BtreePayload; + + +SQLITE_PRIVATE int sqlite3BtreeOpen( + sqlite3_vfs *pVfs, /* VFS to use with this b-tree */ + const char *zFilename, /* Name of database file to open */ + sqlite3 *db, /* Associated database connection */ + Btree **ppBtree, /* Return open Btree* here */ + int flags, /* Flags */ + int vfsFlags /* Flags passed through to VFS open */ +); + +/* The flags parameter to sqlite3BtreeOpen can be the bitwise or of the +** following values. +** +** NOTE: These values must match the corresponding PAGER_ values in +** pager.h. +*/ +#define BTREE_OMIT_JOURNAL 1 /* Do not create or use a rollback journal */ +#define BTREE_MEMORY 2 /* This is an in-memory DB */ +#define BTREE_SINGLE 4 /* The file contains at most 1 b-tree */ +#define BTREE_UNORDERED 8 /* Use of a hash implementation is OK */ + +SQLITE_PRIVATE int sqlite3BtreeClose(Btree*); +SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree*,int); +SQLITE_PRIVATE int sqlite3BtreeSetSpillSize(Btree*,int); +#if SQLITE_MAX_MMAP_SIZE>0 +SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree*,sqlite3_int64); +#endif +SQLITE_PRIVATE int sqlite3BtreeSetPagerFlags(Btree*,unsigned); +SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int nPagesize, int nReserve, int eFix); +SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree*); +SQLITE_PRIVATE int sqlite3BtreeMaxPageCount(Btree*,int); +SQLITE_PRIVATE u32 sqlite3BtreeLastPage(Btree*); +SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree*,int); +SQLITE_PRIVATE int sqlite3BtreeGetOptimalReserve(Btree*); +SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p); +SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *, int); +SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *); +SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree*,int); +SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree*, const char *zMaster); +SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree*, int); +SQLITE_PRIVATE int sqlite3BtreeCommit(Btree*); +SQLITE_PRIVATE int sqlite3BtreeRollback(Btree*,int,int); +SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree*,int); +SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree*, int*, int flags); +SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree*); +SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree*); +SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree*); +SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *, int, void(*)(void *)); +SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *pBtree); +#ifndef SQLITE_OMIT_SHARED_CACHE +SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *pBtree, int iTab, u8 isWriteLock); +#endif +SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *, int, int); + +SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *); +SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *); +SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *, Btree *); + +SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *); + +/* The flags parameter to sqlite3BtreeCreateTable can be the bitwise OR +** of the flags shown below. +** +** Every SQLite table must have either BTREE_INTKEY or BTREE_BLOBKEY set. +** With BTREE_INTKEY, the table key is a 64-bit integer and arbitrary data +** is stored in the leaves. (BTREE_INTKEY is used for SQL tables.) With +** BTREE_BLOBKEY, the key is an arbitrary BLOB and no content is stored +** anywhere - the key is the content. (BTREE_BLOBKEY is used for SQL +** indices.) +*/ +#define BTREE_INTKEY 1 /* Table has only 64-bit signed integer keys */ +#define BTREE_BLOBKEY 2 /* Table has keys only - no data */ + +SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree*, int, int*); +SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree*, int, int*); +SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor*); +SQLITE_PRIVATE int sqlite3BtreeTripAllCursors(Btree*, int, int); + +SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *pBtree, int idx, u32 *pValue); +SQLITE_PRIVATE int sqlite3BtreeUpdateMeta(Btree*, int idx, u32 value); + +SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p); + +/* +** The second parameter to sqlite3BtreeGetMeta or sqlite3BtreeUpdateMeta +** should be one of the following values. The integer values are assigned +** to constants so that the offset of the corresponding field in an +** SQLite database header may be found using the following formula: +** +** offset = 36 + (idx * 4) +** +** For example, the free-page-count field is located at byte offset 36 of +** the database file header. The incr-vacuum-flag field is located at +** byte offset 64 (== 36+4*7). +** +** The BTREE_DATA_VERSION value is not really a value stored in the header. +** It is a read-only number computed by the pager. But we merge it with +** the header value access routines since its access pattern is the same. +** Call it a "virtual meta value". +*/ +#define BTREE_FREE_PAGE_COUNT 0 +#define BTREE_SCHEMA_VERSION 1 +#define BTREE_FILE_FORMAT 2 +#define BTREE_DEFAULT_CACHE_SIZE 3 +#define BTREE_LARGEST_ROOT_PAGE 4 +#define BTREE_TEXT_ENCODING 5 +#define BTREE_USER_VERSION 6 +#define BTREE_INCR_VACUUM 7 +#define BTREE_APPLICATION_ID 8 +#define BTREE_DATA_VERSION 15 /* A virtual meta-value */ + +/* +** Kinds of hints that can be passed into the sqlite3BtreeCursorHint() +** interface. +** +** BTREE_HINT_RANGE (arguments: Expr*, Mem*) +** +** The first argument is an Expr* (which is guaranteed to be constant for +** the lifetime of the cursor) that defines constraints on which rows +** might be fetched with this cursor. The Expr* tree may contain +** TK_REGISTER nodes that refer to values stored in the array of registers +** passed as the second parameter. In other words, if Expr.op==TK_REGISTER +** then the value of the node is the value in Mem[pExpr.iTable]. Any +** TK_COLUMN node in the expression tree refers to the Expr.iColumn-th +** column of the b-tree of the cursor. The Expr tree will not contain +** any function calls nor subqueries nor references to b-trees other than +** the cursor being hinted. +** +** The design of the _RANGE hint is aid b-tree implementations that try +** to prefetch content from remote machines - to provide those +** implementations with limits on what needs to be prefetched and thereby +** reduce network bandwidth. +** +** Note that BTREE_HINT_FLAGS with BTREE_BULKLOAD is the only hint used by +** standard SQLite. The other hints are provided for extentions that use +** the SQLite parser and code generator but substitute their own storage +** engine. +*/ +#define BTREE_HINT_RANGE 0 /* Range constraints on queries */ + +/* +** Values that may be OR'd together to form the argument to the +** BTREE_HINT_FLAGS hint for sqlite3BtreeCursorHint(): +** +** The BTREE_BULKLOAD flag is set on index cursors when the index is going +** to be filled with content that is already in sorted order. +** +** The BTREE_SEEK_EQ flag is set on cursors that will get OP_SeekGE or +** OP_SeekLE opcodes for a range search, but where the range of entries +** selected will all have the same key. In other words, the cursor will +** be used only for equality key searches. +** +*/ +#define BTREE_BULKLOAD 0x00000001 /* Used to full index in sorted order */ +#define BTREE_SEEK_EQ 0x00000002 /* EQ seeks only - no range seeks */ + +/* +** Flags passed as the third argument to sqlite3BtreeCursor(). +** +** For read-only cursors the wrFlag argument is always zero. For read-write +** cursors it may be set to either (BTREE_WRCSR|BTREE_FORDELETE) or just +** (BTREE_WRCSR). If the BTREE_FORDELETE bit is set, then the cursor will +** only be used by SQLite for the following: +** +** * to seek to and then delete specific entries, and/or +** +** * to read values that will be used to create keys that other +** BTREE_FORDELETE cursors will seek to and delete. +** +** The BTREE_FORDELETE flag is an optimization hint. It is not used by +** by this, the native b-tree engine of SQLite, but it is available to +** alternative storage engines that might be substituted in place of this +** b-tree system. For alternative storage engines in which a delete of +** the main table row automatically deletes corresponding index rows, +** the FORDELETE flag hint allows those alternative storage engines to +** skip a lot of work. Namely: FORDELETE cursors may treat all SEEK +** and DELETE operations as no-ops, and any READ operation against a +** FORDELETE cursor may return a null row: 0x01 0x00. +*/ +#define BTREE_WRCSR 0x00000004 /* read-write cursor */ +#define BTREE_FORDELETE 0x00000008 /* Cursor is for seek/delete only */ + +SQLITE_PRIVATE int sqlite3BtreeCursor( + Btree*, /* BTree containing table to open */ + int iTable, /* Index of root page */ + int wrFlag, /* 1 for writing. 0 for read-only */ + struct KeyInfo*, /* First argument to compare function */ + BtCursor *pCursor /* Space to write cursor structure */ +); +SQLITE_PRIVATE int sqlite3BtreeCursorSize(void); +SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor*); +SQLITE_PRIVATE void sqlite3BtreeCursorHintFlags(BtCursor*, unsigned); +#ifdef SQLITE_ENABLE_CURSOR_HINTS +SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor*, int, ...); +#endif + +SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor*); +SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( + BtCursor*, + UnpackedRecord *pUnKey, + i64 intKey, + int bias, + int *pRes +); +SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor*); +SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor*, int*); +SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*, u8 flags); + +/* Allowed flags for sqlite3BtreeDelete() and sqlite3BtreeInsert() */ +#define BTREE_SAVEPOSITION 0x02 /* Leave cursor pointing at NEXT or PREV */ +#define BTREE_AUXDELETE 0x04 /* not the primary delete operation */ +#define BTREE_APPEND 0x08 /* Insert is likely an append */ + +/* An instance of the BtreePayload object describes the content of a single +** entry in either an index or table btree. +** +** Index btrees (used for indexes and also WITHOUT ROWID tables) contain +** an arbitrary key and no data. These btrees have pKey,nKey set to their +** key and pData,nData,nZero set to zero. +** +** Table btrees (used for rowid tables) contain an integer rowid used as +** the key and passed in the nKey field. The pKey field is zero. +** pData,nData hold the content of the new entry. nZero extra zero bytes +** are appended to the end of the content when constructing the entry. +** +** This object is used to pass information into sqlite3BtreeInsert(). The +** same information used to be passed as five separate parameters. But placing +** the information into this object helps to keep the interface more +** organized and understandable, and it also helps the resulting code to +** run a little faster by using fewer registers for parameter passing. +*/ +struct BtreePayload { + const void *pKey; /* Key content for indexes. NULL for tables */ + sqlite3_int64 nKey; /* Size of pKey for indexes. PRIMARY KEY for tabs */ + const void *pData; /* Data for tables. NULL for indexes */ + struct Mem *aMem; /* First of nMem value in the unpacked pKey */ + u16 nMem; /* Number of aMem[] value. Might be zero */ + int nData; /* Size of pData. 0 if none. */ + int nZero; /* Extra zero data appended after pData,nData */ +}; + +SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const BtreePayload *pPayload, + int flags, int seekResult); +SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor*, int *pRes); +SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor*, int *pRes); +SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor*, int *pRes); +SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor*); +SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int *pRes); +SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor*); +SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor*, u32 offset, u32 amt, void*); +SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt); +SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor*); + +SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*); +SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*); + +#ifndef SQLITE_OMIT_INCRBLOB +SQLITE_PRIVATE int sqlite3BtreePayloadChecked(BtCursor*, u32 offset, u32 amt, void*); +SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*); +SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *); +#endif +SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *); +SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBt, int iVersion); +SQLITE_PRIVATE int sqlite3BtreeCursorHasHint(BtCursor*, unsigned int mask); +SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *pBt); +SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void); + +#ifndef NDEBUG +SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor*); +#endif +SQLITE_PRIVATE int sqlite3BtreeCursorIsValidNN(BtCursor*); + +#ifndef SQLITE_OMIT_BTREECOUNT +SQLITE_PRIVATE int sqlite3BtreeCount(BtCursor *, i64 *); +#endif + +#ifdef SQLITE_TEST +SQLITE_PRIVATE int sqlite3BtreeCursorInfo(BtCursor*, int*, int); +SQLITE_PRIVATE void sqlite3BtreeCursorList(Btree*); +#endif + +#ifndef SQLITE_OMIT_WAL +SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *); +#endif + +/* +** If we are not using shared cache, then there is no need to +** use mutexes to access the BtShared structures. So make the +** Enter and Leave procedures no-ops. +*/ +#ifndef SQLITE_OMIT_SHARED_CACHE +SQLITE_PRIVATE void sqlite3BtreeEnter(Btree*); +SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3*); +SQLITE_PRIVATE int sqlite3BtreeSharable(Btree*); +SQLITE_PRIVATE void sqlite3BtreeEnterCursor(BtCursor*); +SQLITE_PRIVATE int sqlite3BtreeConnectionCount(Btree*); +#else +# define sqlite3BtreeEnter(X) +# define sqlite3BtreeEnterAll(X) +# define sqlite3BtreeSharable(X) 0 +# define sqlite3BtreeEnterCursor(X) +# define sqlite3BtreeConnectionCount(X) 1 +#endif + +#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE +SQLITE_PRIVATE void sqlite3BtreeLeave(Btree*); +SQLITE_PRIVATE void sqlite3BtreeLeaveCursor(BtCursor*); +SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3*); +#ifndef NDEBUG + /* These routines are used inside assert() statements only. */ +SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree*); +SQLITE_PRIVATE int sqlite3BtreeHoldsAllMutexes(sqlite3*); +SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3*,int,Schema*); +#endif +#else + +# define sqlite3BtreeLeave(X) +# define sqlite3BtreeLeaveCursor(X) +# define sqlite3BtreeLeaveAll(X) + +# define sqlite3BtreeHoldsMutex(X) 1 +# define sqlite3BtreeHoldsAllMutexes(X) 1 +# define sqlite3SchemaMutexHeld(X,Y,Z) 1 +#endif + + +#endif /* SQLITE_BTREE_H */ + +/************** End of btree.h ***********************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ +/************** Include vdbe.h in the middle of sqliteInt.h ******************/ +/************** Begin file vdbe.h ********************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Header file for the Virtual DataBase Engine (VDBE) +** +** This header defines the interface to the virtual database engine +** or VDBE. The VDBE implements an abstract machine that runs a +** simple program to access and modify the underlying database. +*/ +#ifndef SQLITE_VDBE_H +#define SQLITE_VDBE_H +/* #include */ + +/* +** A single VDBE is an opaque structure named "Vdbe". Only routines +** in the source file sqliteVdbe.c are allowed to see the insides +** of this structure. +*/ +typedef struct Vdbe Vdbe; + +/* +** The names of the following types declared in vdbeInt.h are required +** for the VdbeOp definition. +*/ +typedef struct Mem Mem; +typedef struct SubProgram SubProgram; + +/* +** A single instruction of the virtual machine has an opcode +** and as many as three operands. The instruction is recorded +** as an instance of the following structure: +*/ +struct VdbeOp { + u8 opcode; /* What operation to perform */ + signed char p4type; /* One of the P4_xxx constants for p4 */ + u16 p5; /* Fifth parameter is an unsigned 16-bit integer */ + int p1; /* First operand */ + int p2; /* Second parameter (often the jump destination) */ + int p3; /* The third parameter */ + union p4union { /* fourth parameter */ + int i; /* Integer value if p4type==P4_INT32 */ + void *p; /* Generic pointer */ + char *z; /* Pointer to data for string (char array) types */ + i64 *pI64; /* Used when p4type is P4_INT64 */ + double *pReal; /* Used when p4type is P4_REAL */ + FuncDef *pFunc; /* Used when p4type is P4_FUNCDEF */ + sqlite3_context *pCtx; /* Used when p4type is P4_FUNCCTX */ + CollSeq *pColl; /* Used when p4type is P4_COLLSEQ */ + Mem *pMem; /* Used when p4type is P4_MEM */ + VTable *pVtab; /* Used when p4type is P4_VTAB */ + KeyInfo *pKeyInfo; /* Used when p4type is P4_KEYINFO */ + int *ai; /* Used when p4type is P4_INTARRAY */ + SubProgram *pProgram; /* Used when p4type is P4_SUBPROGRAM */ + Table *pTab; /* Used when p4type is P4_TABLE */ +#ifdef SQLITE_ENABLE_CURSOR_HINTS + Expr *pExpr; /* Used when p4type is P4_EXPR */ +#endif + int (*xAdvance)(BtCursor *, int *); + } p4; +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + char *zComment; /* Comment to improve readability */ +#endif +#ifdef VDBE_PROFILE + u32 cnt; /* Number of times this instruction was executed */ + u64 cycles; /* Total time spent executing this instruction */ +#endif +#ifdef SQLITE_VDBE_COVERAGE + int iSrcLine; /* Source-code line that generated this opcode */ +#endif +}; +typedef struct VdbeOp VdbeOp; + + +/* +** A sub-routine used to implement a trigger program. +*/ +struct SubProgram { + VdbeOp *aOp; /* Array of opcodes for sub-program */ + int nOp; /* Elements in aOp[] */ + int nMem; /* Number of memory cells required */ + int nCsr; /* Number of cursors required */ + void *token; /* id that may be used to recursive triggers */ + SubProgram *pNext; /* Next sub-program already visited */ +}; + +/* +** A smaller version of VdbeOp used for the VdbeAddOpList() function because +** it takes up less space. +*/ +struct VdbeOpList { + u8 opcode; /* What operation to perform */ + signed char p1; /* First operand */ + signed char p2; /* Second parameter (often the jump destination) */ + signed char p3; /* Third parameter */ +}; +typedef struct VdbeOpList VdbeOpList; + +/* +** Allowed values of VdbeOp.p4type +*/ +#define P4_NOTUSED 0 /* The P4 parameter is not used */ +#define P4_DYNAMIC (-1) /* Pointer to a string obtained from sqliteMalloc() */ +#define P4_STATIC (-2) /* Pointer to a static string */ +#define P4_COLLSEQ (-3) /* P4 is a pointer to a CollSeq structure */ +#define P4_FUNCDEF (-4) /* P4 is a pointer to a FuncDef structure */ +#define P4_KEYINFO (-5) /* P4 is a pointer to a KeyInfo structure */ +#define P4_EXPR (-6) /* P4 is a pointer to an Expr tree */ +#define P4_MEM (-7) /* P4 is a pointer to a Mem* structure */ +#define P4_TRANSIENT 0 /* P4 is a pointer to a transient string */ +#define P4_VTAB (-8) /* P4 is a pointer to an sqlite3_vtab structure */ +#define P4_REAL (-9) /* P4 is a 64-bit floating point value */ +#define P4_INT64 (-10) /* P4 is a 64-bit signed integer */ +#define P4_INT32 (-11) /* P4 is a 32-bit signed integer */ +#define P4_INTARRAY (-12) /* P4 is a vector of 32-bit integers */ +#define P4_SUBPROGRAM (-13) /* P4 is a pointer to a SubProgram structure */ +#define P4_ADVANCE (-14) /* P4 is a pointer to BtreeNext() or BtreePrev() */ +#define P4_TABLE (-15) /* P4 is a pointer to a Table structure */ +#define P4_FUNCCTX (-16) /* P4 is a pointer to an sqlite3_context object */ + +/* Error message codes for OP_Halt */ +#define P5_ConstraintNotNull 1 +#define P5_ConstraintUnique 2 +#define P5_ConstraintCheck 3 +#define P5_ConstraintFK 4 + +/* +** The Vdbe.aColName array contains 5n Mem structures, where n is the +** number of columns of data returned by the statement. +*/ +#define COLNAME_NAME 0 +#define COLNAME_DECLTYPE 1 +#define COLNAME_DATABASE 2 +#define COLNAME_TABLE 3 +#define COLNAME_COLUMN 4 +#ifdef SQLITE_ENABLE_COLUMN_METADATA +# define COLNAME_N 5 /* Number of COLNAME_xxx symbols */ +#else +# ifdef SQLITE_OMIT_DECLTYPE +# define COLNAME_N 1 /* Store only the name */ +# else +# define COLNAME_N 2 /* Store the name and decltype */ +# endif +#endif + +/* +** The following macro converts a relative address in the p2 field +** of a VdbeOp structure into a negative number so that +** sqlite3VdbeAddOpList() knows that the address is relative. Calling +** the macro again restores the address. +*/ +#define ADDR(X) (-1-(X)) + +/* +** The makefile scans the vdbe.c source file and creates the "opcodes.h" +** header file that defines a number for each opcode used by the VDBE. +*/ +/************** Include opcodes.h in the middle of vdbe.h ********************/ +/************** Begin file opcodes.h *****************************************/ +/* Automatically generated. Do not edit */ +/* See the tool/mkopcodeh.tcl script for details */ +#define OP_Savepoint 0 +#define OP_AutoCommit 1 +#define OP_Transaction 2 +#define OP_SorterNext 3 +#define OP_PrevIfOpen 4 +#define OP_NextIfOpen 5 +#define OP_Prev 6 +#define OP_Next 7 +#define OP_Checkpoint 8 +#define OP_JournalMode 9 +#define OP_Vacuum 10 +#define OP_VFilter 11 /* synopsis: iplan=r[P3] zplan='P4' */ +#define OP_VUpdate 12 /* synopsis: data=r[P3@P2] */ +#define OP_Goto 13 +#define OP_Gosub 14 +#define OP_InitCoroutine 15 +#define OP_Yield 16 +#define OP_MustBeInt 17 +#define OP_Jump 18 +#define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */ +#define OP_Once 20 +#define OP_If 21 +#define OP_IfNot 22 +#define OP_SeekLT 23 /* synopsis: key=r[P3@P4] */ +#define OP_SeekLE 24 /* synopsis: key=r[P3@P4] */ +#define OP_SeekGE 25 /* synopsis: key=r[P3@P4] */ +#define OP_SeekGT 26 /* synopsis: key=r[P3@P4] */ +#define OP_Or 27 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */ +#define OP_And 28 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */ +#define OP_NoConflict 29 /* synopsis: key=r[P3@P4] */ +#define OP_NotFound 30 /* synopsis: key=r[P3@P4] */ +#define OP_Found 31 /* synopsis: key=r[P3@P4] */ +#define OP_SeekRowid 32 /* synopsis: intkey=r[P3] */ +#define OP_NotExists 33 /* synopsis: intkey=r[P3] */ +#define OP_IsNull 34 /* same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ +#define OP_NotNull 35 /* same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ +#define OP_Ne 36 /* same as TK_NE, synopsis: IF r[P3]!=r[P1] */ +#define OP_Eq 37 /* same as TK_EQ, synopsis: IF r[P3]==r[P1] */ +#define OP_Gt 38 /* same as TK_GT, synopsis: IF r[P3]>r[P1] */ +#define OP_Le 39 /* same as TK_LE, synopsis: IF r[P3]<=r[P1] */ +#define OP_Lt 40 /* same as TK_LT, synopsis: IF r[P3]=r[P1] */ +#define OP_ElseNotEq 42 /* same as TK_ESCAPE */ +#define OP_BitAnd 43 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ +#define OP_BitOr 44 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ +#define OP_ShiftLeft 45 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<>r[P1] */ +#define OP_Add 47 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */ +#define OP_Subtract 48 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */ +#define OP_Multiply 49 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */ +#define OP_Divide 50 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */ +#define OP_Remainder 51 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */ +#define OP_Concat 52 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */ +#define OP_Last 53 +#define OP_BitNot 54 /* same as TK_BITNOT, synopsis: r[P1]= ~r[P1] */ +#define OP_SorterSort 55 +#define OP_Sort 56 +#define OP_Rewind 57 +#define OP_IdxLE 58 /* synopsis: key=r[P3@P4] */ +#define OP_IdxGT 59 /* synopsis: key=r[P3@P4] */ +#define OP_IdxLT 60 /* synopsis: key=r[P3@P4] */ +#define OP_IdxGE 61 /* synopsis: key=r[P3@P4] */ +#define OP_RowSetRead 62 /* synopsis: r[P3]=rowset(P1) */ +#define OP_RowSetTest 63 /* synopsis: if r[P3] in rowset(P1) goto P2 */ +#define OP_Program 64 +#define OP_FkIfZero 65 /* synopsis: if fkctr[P1]==0 goto P2 */ +#define OP_IfPos 66 /* synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */ +#define OP_IfNotZero 67 /* synopsis: if r[P1]!=0 then r[P1]--, goto P2 */ +#define OP_DecrJumpZero 68 /* synopsis: if (--r[P1])==0 goto P2 */ +#define OP_IncrVacuum 69 +#define OP_VNext 70 +#define OP_Init 71 /* synopsis: Start at P2 */ +#define OP_Return 72 +#define OP_EndCoroutine 73 +#define OP_HaltIfNull 74 /* synopsis: if r[P3]=null halt */ +#define OP_Halt 75 +#define OP_Integer 76 /* synopsis: r[P2]=P1 */ +#define OP_Int64 77 /* synopsis: r[P2]=P4 */ +#define OP_String 78 /* synopsis: r[P2]='P4' (len=P1) */ +#define OP_Null 79 /* synopsis: r[P2..P3]=NULL */ +#define OP_SoftNull 80 /* synopsis: r[P1]=NULL */ +#define OP_Blob 81 /* synopsis: r[P2]=P4 (len=P1) */ +#define OP_Variable 82 /* synopsis: r[P2]=parameter(P1,P4) */ +#define OP_Move 83 /* synopsis: r[P2@P3]=r[P1@P3] */ +#define OP_Copy 84 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ +#define OP_SCopy 85 /* synopsis: r[P2]=r[P1] */ +#define OP_IntCopy 86 /* synopsis: r[P2]=r[P1] */ +#define OP_ResultRow 87 /* synopsis: output=r[P1@P2] */ +#define OP_CollSeq 88 +#define OP_Function0 89 /* synopsis: r[P3]=func(r[P2@P5]) */ +#define OP_Function 90 /* synopsis: r[P3]=func(r[P2@P5]) */ +#define OP_AddImm 91 /* synopsis: r[P1]=r[P1]+P2 */ +#define OP_RealAffinity 92 +#define OP_Cast 93 /* synopsis: affinity(r[P1]) */ +#define OP_Permutation 94 +#define OP_Compare 95 /* synopsis: r[P1@P3] <-> r[P2@P3] */ +#define OP_Column 96 /* synopsis: r[P3]=PX */ +#define OP_String8 97 /* same as TK_STRING, synopsis: r[P2]='P4' */ +#define OP_Affinity 98 /* synopsis: affinity(r[P1@P2]) */ +#define OP_MakeRecord 99 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ +#define OP_Count 100 /* synopsis: r[P2]=count() */ +#define OP_ReadCookie 101 +#define OP_SetCookie 102 +#define OP_ReopenIdx 103 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenRead 104 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenWrite 105 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenAutoindex 106 /* synopsis: nColumn=P2 */ +#define OP_OpenEphemeral 107 /* synopsis: nColumn=P2 */ +#define OP_SorterOpen 108 +#define OP_SequenceTest 109 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */ +#define OP_OpenPseudo 110 /* synopsis: P3 columns in r[P2] */ +#define OP_Close 111 +#define OP_ColumnsUsed 112 +#define OP_Sequence 113 /* synopsis: r[P2]=cursor[P1].ctr++ */ +#define OP_NewRowid 114 /* synopsis: r[P2]=rowid */ +#define OP_Insert 115 /* synopsis: intkey=r[P3] data=r[P2] */ +#define OP_InsertInt 116 /* synopsis: intkey=P3 data=r[P2] */ +#define OP_Delete 117 +#define OP_ResetCount 118 +#define OP_SorterCompare 119 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */ +#define OP_SorterData 120 /* synopsis: r[P2]=data */ +#define OP_RowData 121 /* synopsis: r[P2]=data */ +#define OP_Rowid 122 /* synopsis: r[P2]=rowid */ +#define OP_NullRow 123 +#define OP_SorterInsert 124 /* synopsis: key=r[P2] */ +#define OP_IdxInsert 125 /* synopsis: key=r[P2] */ +#define OP_IdxDelete 126 /* synopsis: key=r[P2@P3] */ +#define OP_Seek 127 /* synopsis: Move P3 to P1.rowid */ +#define OP_IdxRowid 128 /* synopsis: r[P2]=rowid */ +#define OP_Destroy 129 +#define OP_Clear 130 +#define OP_ResetSorter 131 +#define OP_Real 132 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ +#define OP_CreateIndex 133 /* synopsis: r[P2]=root iDb=P1 */ +#define OP_CreateTable 134 /* synopsis: r[P2]=root iDb=P1 */ +#define OP_ParseSchema 135 +#define OP_LoadAnalysis 136 +#define OP_DropTable 137 +#define OP_DropIndex 138 +#define OP_DropTrigger 139 +#define OP_IntegrityCk 140 +#define OP_RowSetAdd 141 /* synopsis: rowset(P1)=r[P2] */ +#define OP_Param 142 +#define OP_FkCounter 143 /* synopsis: fkctr[P1]+=P2 */ +#define OP_MemMax 144 /* synopsis: r[P1]=max(r[P1],r[P2]) */ +#define OP_OffsetLimit 145 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ +#define OP_AggStep0 146 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggStep 147 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggFinal 148 /* synopsis: accum=r[P1] N=P2 */ +#define OP_Expire 149 +#define OP_TableLock 150 /* synopsis: iDb=P1 root=P2 write=P3 */ +#define OP_VBegin 151 +#define OP_VCreate 152 +#define OP_VDestroy 153 +#define OP_VOpen 154 +#define OP_VColumn 155 /* synopsis: r[P3]=vcolumn(P2) */ +#define OP_VRename 156 +#define OP_Pagecount 157 +#define OP_MaxPgcnt 158 +#define OP_CursorHint 159 +#define OP_Noop 160 +#define OP_Explain 161 + +/* Properties such as "out2" or "jump" that are specified in +** comments following the "case" for each opcode in the vdbe.c +** are encoded into bitvectors as follows: +*/ +#define OPFLG_JUMP 0x01 /* jump: P2 holds jmp target */ +#define OPFLG_IN1 0x02 /* in1: P1 is an input */ +#define OPFLG_IN2 0x04 /* in2: P2 is an input */ +#define OPFLG_IN3 0x08 /* in3: P3 is an input */ +#define OPFLG_OUT2 0x10 /* out2: P2 is an output */ +#define OPFLG_OUT3 0x20 /* out3: P3 is an output */ +#define OPFLG_INITIALIZER {\ +/* 0 */ 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01,\ +/* 8 */ 0x00, 0x10, 0x00, 0x01, 0x00, 0x01, 0x01, 0x01,\ +/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x03, 0x03, 0x09,\ +/* 24 */ 0x09, 0x09, 0x09, 0x26, 0x26, 0x09, 0x09, 0x09,\ +/* 32 */ 0x09, 0x09, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ +/* 40 */ 0x0b, 0x0b, 0x01, 0x26, 0x26, 0x26, 0x26, 0x26,\ +/* 48 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x01, 0x12, 0x01,\ +/* 56 */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x23, 0x0b,\ +/* 64 */ 0x01, 0x01, 0x03, 0x03, 0x03, 0x01, 0x01, 0x01,\ +/* 72 */ 0x02, 0x02, 0x08, 0x00, 0x10, 0x10, 0x10, 0x10,\ +/* 80 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00,\ +/* 88 */ 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00,\ +/* 96 */ 0x00, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\ +/* 104 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 112 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 120 */ 0x00, 0x00, 0x10, 0x00, 0x04, 0x04, 0x00, 0x00,\ +/* 128 */ 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x10, 0x00,\ +/* 136 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x10, 0x00,\ +/* 144 */ 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 152 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00,\ +/* 160 */ 0x00, 0x00,} + +/* The sqlite3P2Values() routine is able to run faster if it knows +** the value of the largest JUMP opcode. The smaller the maximum +** JUMP opcode the better, so the mkopcodeh.tcl script that +** generated this include file strives to group all JUMP opcodes +** together near the beginning of the list. +*/ +#define SQLITE_MX_JUMP_OPCODE 71 /* Maximum JUMP opcode */ + +/************** End of opcodes.h *********************************************/ +/************** Continuing where we left off in vdbe.h ***********************/ + +/* +** Prototypes for the VDBE interface. See comments on the implementation +** for a description of what each of these routines does. +*/ +SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse*); +SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe*,int); +SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe*,int,int); +SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe*,int,int,int); +SQLITE_PRIVATE int sqlite3VdbeGoto(Vdbe*,int); +SQLITE_PRIVATE int sqlite3VdbeLoadString(Vdbe*,int,const char*); +SQLITE_PRIVATE void sqlite3VdbeMultiLoad(Vdbe*,int,const char*,...); +SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe*,int,int,int,int); +SQLITE_PRIVATE int sqlite3VdbeAddOp4(Vdbe*,int,int,int,int,const char *zP4,int); +SQLITE_PRIVATE int sqlite3VdbeAddOp4Dup8(Vdbe*,int,int,int,int,const u8*,int); +SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(Vdbe*,int,int,int,int,int); +SQLITE_PRIVATE void sqlite3VdbeEndCoroutine(Vdbe*,int); +#if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS) +SQLITE_PRIVATE void sqlite3VdbeVerifyNoMallocRequired(Vdbe *p, int N); +SQLITE_PRIVATE void sqlite3VdbeVerifyNoResultRow(Vdbe *p); +#else +# define sqlite3VdbeVerifyNoMallocRequired(A,B) +# define sqlite3VdbeVerifyNoResultRow(A) +#endif +SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp, int iLineno); +SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*,int,char*); +SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe*, u32 addr, u8); +SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, u32 addr, int P1); +SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, u32 addr, int P2); +SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe*, u32 addr, int P3); +SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u16 P5); +SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe*, int addr); +SQLITE_PRIVATE int sqlite3VdbeChangeToNoop(Vdbe*, int addr); +SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe*, u8 op); +SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe*, int addr, const char *zP4, int N); +SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe*, void *pP4, int p4type); +SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse*, Index*); +SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe*, int); +SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe*, int); +SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Vdbe*); +SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe*); +SQLITE_PRIVATE void sqlite3VdbeReusable(Vdbe*); +SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe*); +SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3*,Vdbe*); +SQLITE_PRIVATE void sqlite3VdbeMakeReady(Vdbe*,Parse*); +SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe*); +SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe*, int); +SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe*); +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *, int); +#endif +SQLITE_PRIVATE void sqlite3VdbeResetStepResult(Vdbe*); +SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe*); +SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe*); +SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe*,int); +SQLITE_PRIVATE int sqlite3VdbeSetColName(Vdbe*, int, int, const char *, void(*)(void*)); +SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe*); +SQLITE_PRIVATE sqlite3 *sqlite3VdbeDb(Vdbe*); +SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe*, const char *z, int n, int); +SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe*,Vdbe*); +SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe*, int*, int*); +SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe*, int, u8); +SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe*, int); +#ifndef SQLITE_OMIT_TRACE +SQLITE_PRIVATE char *sqlite3VdbeExpandSql(Vdbe*, const char*); +#endif +SQLITE_PRIVATE int sqlite3MemCompare(const Mem*, const Mem*, const CollSeq*); + +SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(KeyInfo*,int,const void*,UnpackedRecord*); +SQLITE_PRIVATE int sqlite3VdbeRecordCompare(int,const void*,UnpackedRecord*); +SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(int, const void *, UnpackedRecord *, int); +SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(KeyInfo*); + +typedef int (*RecordCompare)(int,const void*,UnpackedRecord*); +SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord*); + +#ifndef SQLITE_OMIT_TRIGGER +SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *, SubProgram *); +#endif + +/* Use SQLITE_ENABLE_COMMENTS to enable generation of extra comments on +** each VDBE opcode. +** +** Use the SQLITE_ENABLE_MODULE_COMMENTS macro to see some extra no-op +** comments in VDBE programs that show key decision points in the code +** generator. +*/ +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS +SQLITE_PRIVATE void sqlite3VdbeComment(Vdbe*, const char*, ...); +# define VdbeComment(X) sqlite3VdbeComment X +SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...); +# define VdbeNoopComment(X) sqlite3VdbeNoopComment X +# ifdef SQLITE_ENABLE_MODULE_COMMENTS +# define VdbeModuleComment(X) sqlite3VdbeNoopComment X +# else +# define VdbeModuleComment(X) +# endif +#else +# define VdbeComment(X) +# define VdbeNoopComment(X) +# define VdbeModuleComment(X) +#endif + +/* +** The VdbeCoverage macros are used to set a coverage testing point +** for VDBE branch instructions. The coverage testing points are line +** numbers in the sqlite3.c source file. VDBE branch coverage testing +** only works with an amalagmation build. That's ok since a VDBE branch +** coverage build designed for testing the test suite only. No application +** should ever ship with VDBE branch coverage measuring turned on. +** +** VdbeCoverage(v) // Mark the previously coded instruction +** // as a branch +** +** VdbeCoverageIf(v, conditional) // Mark previous if conditional true +** +** VdbeCoverageAlwaysTaken(v) // Previous branch is always taken +** +** VdbeCoverageNeverTaken(v) // Previous branch is never taken +** +** Every VDBE branch operation must be tagged with one of the macros above. +** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and +** -DSQLITE_DEBUG then an ALWAYS() will fail in the vdbeTakeBranch() +** routine in vdbe.c, alerting the developer to the missed tag. +*/ +#ifdef SQLITE_VDBE_COVERAGE +SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe*,int); +# define VdbeCoverage(v) sqlite3VdbeSetLineNumber(v,__LINE__) +# define VdbeCoverageIf(v,x) if(x)sqlite3VdbeSetLineNumber(v,__LINE__) +# define VdbeCoverageAlwaysTaken(v) sqlite3VdbeSetLineNumber(v,2); +# define VdbeCoverageNeverTaken(v) sqlite3VdbeSetLineNumber(v,1); +# define VDBE_OFFSET_LINENO(x) (__LINE__+x) +#else +# define VdbeCoverage(v) +# define VdbeCoverageIf(v,x) +# define VdbeCoverageAlwaysTaken(v) +# define VdbeCoverageNeverTaken(v) +# define VDBE_OFFSET_LINENO(x) 0 +#endif + +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS +SQLITE_PRIVATE void sqlite3VdbeScanStatus(Vdbe*, int, int, int, LogEst, const char*); +#else +# define sqlite3VdbeScanStatus(a,b,c,d,e) +#endif + +#endif /* SQLITE_VDBE_H */ + +/************** End of vdbe.h ************************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ +/************** Include pager.h in the middle of sqliteInt.h *****************/ +/************** Begin file pager.h *******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the interface that the sqlite page cache +** subsystem. The page cache subsystem reads and writes a file a page +** at a time and provides a journal for rollback. +*/ + +#ifndef SQLITE_PAGER_H +#define SQLITE_PAGER_H + +/* +** Default maximum size for persistent journal files. A negative +** value means no limit. This value may be overridden using the +** sqlite3PagerJournalSizeLimit() API. See also "PRAGMA journal_size_limit". +*/ +#ifndef SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT + #define SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT -1 +#endif + +/* +** The type used to represent a page number. The first page in a file +** is called page 1. 0 is used to represent "not a page". +*/ +typedef u32 Pgno; + +/* +** Each open file is managed by a separate instance of the "Pager" structure. +*/ +typedef struct Pager Pager; + +/* +** Handle type for pages. +*/ +typedef struct PgHdr DbPage; + +/* +** Page number PAGER_MJ_PGNO is never used in an SQLite database (it is +** reserved for working around a windows/posix incompatibility). It is +** used in the journal to signify that the remainder of the journal file +** is devoted to storing a master journal name - there are no more pages to +** roll back. See comments for function writeMasterJournal() in pager.c +** for details. +*/ +#define PAGER_MJ_PGNO(x) ((Pgno)((PENDING_BYTE/((x)->pageSize))+1)) + +/* +** Allowed values for the flags parameter to sqlite3PagerOpen(). +** +** NOTE: These values must match the corresponding BTREE_ values in btree.h. +*/ +#define PAGER_OMIT_JOURNAL 0x0001 /* Do not use a rollback journal */ +#define PAGER_MEMORY 0x0002 /* In-memory database */ + +/* +** Valid values for the second argument to sqlite3PagerLockingMode(). +*/ +#define PAGER_LOCKINGMODE_QUERY -1 +#define PAGER_LOCKINGMODE_NORMAL 0 +#define PAGER_LOCKINGMODE_EXCLUSIVE 1 + +/* +** Numeric constants that encode the journalmode. +** +** The numeric values encoded here (other than PAGER_JOURNALMODE_QUERY) +** are exposed in the API via the "PRAGMA journal_mode" command and +** therefore cannot be changed without a compatibility break. +*/ +#define PAGER_JOURNALMODE_QUERY (-1) /* Query the value of journalmode */ +#define PAGER_JOURNALMODE_DELETE 0 /* Commit by deleting journal file */ +#define PAGER_JOURNALMODE_PERSIST 1 /* Commit by zeroing journal header */ +#define PAGER_JOURNALMODE_OFF 2 /* Journal omitted. */ +#define PAGER_JOURNALMODE_TRUNCATE 3 /* Commit by truncating journal */ +#define PAGER_JOURNALMODE_MEMORY 4 /* In-memory journal file */ +#define PAGER_JOURNALMODE_WAL 5 /* Use write-ahead logging */ + +/* +** Flags that make up the mask passed to sqlite3PagerGet(). +*/ +#define PAGER_GET_NOCONTENT 0x01 /* Do not load data from disk */ +#define PAGER_GET_READONLY 0x02 /* Read-only page is acceptable */ + +/* +** Flags for sqlite3PagerSetFlags() +** +** Value constraints (enforced via assert()): +** PAGER_FULLFSYNC == SQLITE_FullFSync +** PAGER_CKPT_FULLFSYNC == SQLITE_CkptFullFSync +** PAGER_CACHE_SPILL == SQLITE_CacheSpill +*/ +#define PAGER_SYNCHRONOUS_OFF 0x01 /* PRAGMA synchronous=OFF */ +#define PAGER_SYNCHRONOUS_NORMAL 0x02 /* PRAGMA synchronous=NORMAL */ +#define PAGER_SYNCHRONOUS_FULL 0x03 /* PRAGMA synchronous=FULL */ +#define PAGER_SYNCHRONOUS_EXTRA 0x04 /* PRAGMA synchronous=EXTRA */ +#define PAGER_SYNCHRONOUS_MASK 0x07 /* Mask for four values above */ +#define PAGER_FULLFSYNC 0x08 /* PRAGMA fullfsync=ON */ +#define PAGER_CKPT_FULLFSYNC 0x10 /* PRAGMA checkpoint_fullfsync=ON */ +#define PAGER_CACHESPILL 0x20 /* PRAGMA cache_spill=ON */ +#define PAGER_FLAGS_MASK 0x38 /* All above except SYNCHRONOUS */ + +/* +** The remainder of this file contains the declarations of the functions +** that make up the Pager sub-system API. See source code comments for +** a detailed description of each routine. +*/ + +/* Open and close a Pager connection. */ +SQLITE_PRIVATE int sqlite3PagerOpen( + sqlite3_vfs*, + Pager **ppPager, + const char*, + int, + int, + int, + void(*)(DbPage*) +); +SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager, sqlite3*); +SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager*, int, unsigned char*); + +/* Functions used to configure a Pager object. */ +SQLITE_PRIVATE void sqlite3PagerSetBusyhandler(Pager*, int(*)(void *), void *); +SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager*, u32*, int); +#ifdef SQLITE_HAS_CODEC +SQLITE_PRIVATE void sqlite3PagerAlignReserve(Pager*,Pager*); +#endif +SQLITE_PRIVATE int sqlite3PagerMaxPageCount(Pager*, int); +SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager*, int); +SQLITE_PRIVATE int sqlite3PagerSetSpillsize(Pager*, int); +SQLITE_PRIVATE void sqlite3PagerSetMmapLimit(Pager *, sqlite3_int64); +SQLITE_PRIVATE void sqlite3PagerShrink(Pager*); +SQLITE_PRIVATE void sqlite3PagerSetFlags(Pager*,unsigned); +SQLITE_PRIVATE int sqlite3PagerLockingMode(Pager *, int); +SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *, int); +SQLITE_PRIVATE int sqlite3PagerGetJournalMode(Pager*); +SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager*); +SQLITE_PRIVATE i64 sqlite3PagerJournalSizeLimit(Pager *, i64); +SQLITE_PRIVATE sqlite3_backup **sqlite3PagerBackupPtr(Pager*); +SQLITE_PRIVATE int sqlite3PagerFlush(Pager*); + +/* Functions used to obtain and release page references. */ +SQLITE_PRIVATE int sqlite3PagerGet(Pager *pPager, Pgno pgno, DbPage **ppPage, int clrFlag); +SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno); +SQLITE_PRIVATE void sqlite3PagerRef(DbPage*); +SQLITE_PRIVATE void sqlite3PagerUnref(DbPage*); +SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage*); + +/* Operations on page references. */ +SQLITE_PRIVATE int sqlite3PagerWrite(DbPage*); +SQLITE_PRIVATE void sqlite3PagerDontWrite(DbPage*); +SQLITE_PRIVATE int sqlite3PagerMovepage(Pager*,DbPage*,Pgno,int); +SQLITE_PRIVATE int sqlite3PagerPageRefcount(DbPage*); +SQLITE_PRIVATE void *sqlite3PagerGetData(DbPage *); +SQLITE_PRIVATE void *sqlite3PagerGetExtra(DbPage *); + +/* Functions used to manage pager transactions and savepoints. */ +SQLITE_PRIVATE void sqlite3PagerPagecount(Pager*, int*); +SQLITE_PRIVATE int sqlite3PagerBegin(Pager*, int exFlag, int); +SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(Pager*,const char *zMaster, int); +SQLITE_PRIVATE int sqlite3PagerExclusiveLock(Pager*); +SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager, const char *zMaster); +SQLITE_PRIVATE int sqlite3PagerCommitPhaseTwo(Pager*); +SQLITE_PRIVATE int sqlite3PagerRollback(Pager*); +SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int n); +SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint); +SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager); + +#ifndef SQLITE_OMIT_WAL +SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, sqlite3*, int, int*, int*); +SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager); +SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager); +SQLITE_PRIVATE int sqlite3PagerOpenWal(Pager *pPager, int *pisOpen); +SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager, sqlite3*); +# ifdef SQLITE_DIRECT_OVERFLOW_READ +SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager, Pgno); +# endif +# ifdef SQLITE_ENABLE_SNAPSHOT +SQLITE_PRIVATE int sqlite3PagerSnapshotGet(Pager *pPager, sqlite3_snapshot **ppSnapshot); +SQLITE_PRIVATE int sqlite3PagerSnapshotOpen(Pager *pPager, sqlite3_snapshot *pSnapshot); +SQLITE_PRIVATE int sqlite3PagerSnapshotRecover(Pager *pPager); +# endif +#else +# define sqlite3PagerUseWal(x,y) 0 +#endif + +#ifdef SQLITE_ENABLE_ZIPVFS +SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager); +#endif + +/* Functions used to query pager state and configuration. */ +SQLITE_PRIVATE u8 sqlite3PagerIsreadonly(Pager*); +SQLITE_PRIVATE u32 sqlite3PagerDataVersion(Pager*); +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3PagerRefcount(Pager*); +#endif +SQLITE_PRIVATE int sqlite3PagerMemUsed(Pager*); +SQLITE_PRIVATE const char *sqlite3PagerFilename(Pager*, int); +SQLITE_PRIVATE sqlite3_vfs *sqlite3PagerVfs(Pager*); +SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager*); +SQLITE_PRIVATE sqlite3_file *sqlite3PagerJrnlFile(Pager*); +SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager*); +SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager*); +SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*); +SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *); +SQLITE_PRIVATE void sqlite3PagerClearCache(Pager*); +SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *); + +/* Functions used to truncate the database file. */ +SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager*,Pgno); + +SQLITE_PRIVATE void sqlite3PagerRekey(DbPage*, Pgno, u16); + +#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_WAL) +SQLITE_PRIVATE void *sqlite3PagerCodec(DbPage *); +#endif + +/* Functions to support testing and debugging. */ +#if !defined(NDEBUG) || defined(SQLITE_TEST) +SQLITE_PRIVATE Pgno sqlite3PagerPagenumber(DbPage*); +SQLITE_PRIVATE int sqlite3PagerIswriteable(DbPage*); +#endif +#ifdef SQLITE_TEST +SQLITE_PRIVATE int *sqlite3PagerStats(Pager*); +SQLITE_PRIVATE void sqlite3PagerRefdump(Pager*); + void disable_simulated_io_errors(void); + void enable_simulated_io_errors(void); +#else +# define disable_simulated_io_errors() +# define enable_simulated_io_errors() +#endif + +#endif /* SQLITE_PAGER_H */ + +/************** End of pager.h ***********************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ +/************** Include pcache.h in the middle of sqliteInt.h ****************/ +/************** Begin file pcache.h ******************************************/ +/* +** 2008 August 05 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the interface that the sqlite page cache +** subsystem. +*/ + +#ifndef _PCACHE_H_ + +typedef struct PgHdr PgHdr; +typedef struct PCache PCache; + +/* +** Every page in the cache is controlled by an instance of the following +** structure. +*/ +struct PgHdr { + sqlite3_pcache_page *pPage; /* Pcache object page handle */ + void *pData; /* Page data */ + void *pExtra; /* Extra content */ + PgHdr *pDirty; /* Transient list of dirty sorted by pgno */ + Pager *pPager; /* The pager this page is part of */ + Pgno pgno; /* Page number for this page */ +#ifdef SQLITE_CHECK_PAGES + u32 pageHash; /* Hash of page content */ +#endif + u16 flags; /* PGHDR flags defined below */ + + /********************************************************************** + ** Elements above are public. All that follows is private to pcache.c + ** and should not be accessed by other modules. + */ + i16 nRef; /* Number of users of this page */ + PCache *pCache; /* Cache that owns this page */ + + PgHdr *pDirtyNext; /* Next element in list of dirty pages */ + PgHdr *pDirtyPrev; /* Previous element in list of dirty pages */ +}; + +/* Bit values for PgHdr.flags */ +#define PGHDR_CLEAN 0x001 /* Page not on the PCache.pDirty list */ +#define PGHDR_DIRTY 0x002 /* Page is on the PCache.pDirty list */ +#define PGHDR_WRITEABLE 0x004 /* Journaled and ready to modify */ +#define PGHDR_NEED_SYNC 0x008 /* Fsync the rollback journal before + ** writing this page to the database */ +#define PGHDR_DONT_WRITE 0x010 /* Do not write content to disk */ +#define PGHDR_MMAP 0x020 /* This is an mmap page object */ + +#define PGHDR_WAL_APPEND 0x040 /* Appended to wal file */ + +/* Initialize and shutdown the page cache subsystem */ +SQLITE_PRIVATE int sqlite3PcacheInitialize(void); +SQLITE_PRIVATE void sqlite3PcacheShutdown(void); + +/* Page cache buffer management: +** These routines implement SQLITE_CONFIG_PAGECACHE. +*/ +SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *, int sz, int n); + +/* Create a new pager cache. +** Under memory stress, invoke xStress to try to make pages clean. +** Only clean and unpinned pages can be reclaimed. +*/ +SQLITE_PRIVATE int sqlite3PcacheOpen( + int szPage, /* Size of every page */ + int szExtra, /* Extra space associated with each page */ + int bPurgeable, /* True if pages are on backing store */ + int (*xStress)(void*, PgHdr*), /* Call to try to make pages clean */ + void *pStress, /* Argument to xStress */ + PCache *pToInit /* Preallocated space for the PCache */ +); + +/* Modify the page-size after the cache has been created. */ +SQLITE_PRIVATE int sqlite3PcacheSetPageSize(PCache *, int); + +/* Return the size in bytes of a PCache object. Used to preallocate +** storage space. +*/ +SQLITE_PRIVATE int sqlite3PcacheSize(void); + +/* One release per successful fetch. Page is pinned until released. +** Reference counted. +*/ +SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch(PCache*, Pgno, int createFlag); +SQLITE_PRIVATE int sqlite3PcacheFetchStress(PCache*, Pgno, sqlite3_pcache_page**); +SQLITE_PRIVATE PgHdr *sqlite3PcacheFetchFinish(PCache*, Pgno, sqlite3_pcache_page *pPage); +SQLITE_PRIVATE void sqlite3PcacheRelease(PgHdr*); + +SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr*); /* Remove page from cache */ +SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr*); /* Make sure page is marked dirty */ +SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr*); /* Mark a single page as clean */ +SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache*); /* Mark all dirty list pages as clean */ +SQLITE_PRIVATE void sqlite3PcacheClearWritable(PCache*); + +/* Change a page number. Used by incr-vacuum. */ +SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr*, Pgno); + +/* Remove all pages with pgno>x. Reset the cache if x==0 */ +SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache*, Pgno x); + +/* Get a list of all dirty pages in the cache, sorted by page number */ +SQLITE_PRIVATE PgHdr *sqlite3PcacheDirtyList(PCache*); + +/* Reset and close the cache object */ +SQLITE_PRIVATE void sqlite3PcacheClose(PCache*); + +/* Clear flags from pages of the page cache */ +SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *); + +/* Discard the contents of the cache */ +SQLITE_PRIVATE void sqlite3PcacheClear(PCache*); + +/* Return the total number of outstanding page references */ +SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache*); + +/* Increment the reference count of an existing page */ +SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr*); + +SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr*); + +/* Return the total number of pages stored in the cache */ +SQLITE_PRIVATE int sqlite3PcachePagecount(PCache*); + +#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG) +/* Iterate through all dirty pages currently stored in the cache. This +** interface is only available if SQLITE_CHECK_PAGES is defined when the +** library is built. +*/ +SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)); +#endif + +#if defined(SQLITE_DEBUG) +/* Check invariants on a PgHdr object */ +SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr*); +#endif + +/* Set and get the suggested cache-size for the specified pager-cache. +** +** If no global maximum is configured, then the system attempts to limit +** the total number of pages cached by purgeable pager-caches to the sum +** of the suggested cache-sizes. +*/ +SQLITE_PRIVATE void sqlite3PcacheSetCachesize(PCache *, int); +#ifdef SQLITE_TEST +SQLITE_PRIVATE int sqlite3PcacheGetCachesize(PCache *); +#endif + +/* Set or get the suggested spill-size for the specified pager-cache. +** +** The spill-size is the minimum number of pages in cache before the cache +** will attempt to spill dirty pages by calling xStress. +*/ +SQLITE_PRIVATE int sqlite3PcacheSetSpillsize(PCache *, int); + +/* Free up as much memory as possible from the page cache */ +SQLITE_PRIVATE void sqlite3PcacheShrink(PCache*); + +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT +/* Try to return memory used by the pcache module to the main memory heap */ +SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int); +#endif + +#ifdef SQLITE_TEST +SQLITE_PRIVATE void sqlite3PcacheStats(int*,int*,int*,int*); +#endif + +SQLITE_PRIVATE void sqlite3PCacheSetDefault(void); + +/* Return the header size */ +SQLITE_PRIVATE int sqlite3HeaderSizePcache(void); +SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void); + +/* Number of dirty pages as a percentage of the configured cache size */ +SQLITE_PRIVATE int sqlite3PCachePercentDirty(PCache*); + +#endif /* _PCACHE_H_ */ + +/************** End of pcache.h **********************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ +/************** Include os.h in the middle of sqliteInt.h ********************/ +/************** Begin file os.h **********************************************/ +/* +** 2001 September 16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file (together with is companion C source-code file +** "os.c") attempt to abstract the underlying operating system so that +** the SQLite library will work on both POSIX and windows systems. +** +** This header file is #include-ed by sqliteInt.h and thus ends up +** being included by every source file. +*/ +#ifndef _SQLITE_OS_H_ +#define _SQLITE_OS_H_ + +/* +** Attempt to automatically detect the operating system and setup the +** necessary pre-processor macros for it. +*/ +/************** Include os_setup.h in the middle of os.h *********************/ +/************** Begin file os_setup.h ****************************************/ +/* +** 2013 November 25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains pre-processor directives related to operating system +** detection and/or setup. +*/ +#ifndef SQLITE_OS_SETUP_H +#define SQLITE_OS_SETUP_H + +/* +** Figure out if we are dealing with Unix, Windows, or some other operating +** system. +** +** After the following block of preprocess macros, all of SQLITE_OS_UNIX, +** SQLITE_OS_WIN, and SQLITE_OS_OTHER will defined to either 1 or 0. One of +** the three will be 1. The other two will be 0. +*/ +#if defined(SQLITE_OS_OTHER) +# if SQLITE_OS_OTHER==1 +# undef SQLITE_OS_UNIX +# define SQLITE_OS_UNIX 0 +# undef SQLITE_OS_WIN +# define SQLITE_OS_WIN 0 +# else +# undef SQLITE_OS_OTHER +# endif +#endif +#if !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_OTHER) +# define SQLITE_OS_OTHER 0 +# ifndef SQLITE_OS_WIN +# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \ + defined(__MINGW32__) || defined(__BORLANDC__) +# define SQLITE_OS_WIN 1 +# define SQLITE_OS_UNIX 0 +# else +# define SQLITE_OS_WIN 0 +# define SQLITE_OS_UNIX 1 +# endif +# else +# define SQLITE_OS_UNIX 0 +# endif +#else +# ifndef SQLITE_OS_WIN +# define SQLITE_OS_WIN 0 +# endif +#endif + +#endif /* SQLITE_OS_SETUP_H */ + +/************** End of os_setup.h ********************************************/ +/************** Continuing where we left off in os.h *************************/ + +/* If the SET_FULLSYNC macro is not defined above, then make it +** a no-op +*/ +#ifndef SET_FULLSYNC +# define SET_FULLSYNC(x,y) +#endif + +/* +** The default size of a disk sector +*/ +#ifndef SQLITE_DEFAULT_SECTOR_SIZE +# define SQLITE_DEFAULT_SECTOR_SIZE 4096 +#endif + +/* +** Temporary files are named starting with this prefix followed by 16 random +** alphanumeric characters, and no file extension. They are stored in the +** OS's standard temporary file directory, and are deleted prior to exit. +** If sqlite is being embedded in another program, you may wish to change the +** prefix to reflect your program's name, so that if your program exits +** prematurely, old temporary files can be easily identified. This can be done +** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line. +** +** 2006-10-31: The default prefix used to be "sqlite_". But then +** Mcafee started using SQLite in their anti-virus product and it +** started putting files with the "sqlite" name in the c:/temp folder. +** This annoyed many windows users. Those users would then do a +** Google search for "sqlite", find the telephone numbers of the +** developers and call to wake them up at night and complain. +** For this reason, the default name prefix is changed to be "sqlite" +** spelled backwards. So the temp files are still identified, but +** anybody smart enough to figure out the code is also likely smart +** enough to know that calling the developer will not help get rid +** of the file. +*/ +#ifndef SQLITE_TEMP_FILE_PREFIX +# define SQLITE_TEMP_FILE_PREFIX "etilqs_" +#endif + +/* +** The following values may be passed as the second argument to +** sqlite3OsLock(). The various locks exhibit the following semantics: +** +** SHARED: Any number of processes may hold a SHARED lock simultaneously. +** RESERVED: A single process may hold a RESERVED lock on a file at +** any time. Other processes may hold and obtain new SHARED locks. +** PENDING: A single process may hold a PENDING lock on a file at +** any one time. Existing SHARED locks may persist, but no new +** SHARED locks may be obtained by other processes. +** EXCLUSIVE: An EXCLUSIVE lock precludes all other locks. +** +** PENDING_LOCK may not be passed directly to sqlite3OsLock(). Instead, a +** process that requests an EXCLUSIVE lock may actually obtain a PENDING +** lock. This can be upgraded to an EXCLUSIVE lock by a subsequent call to +** sqlite3OsLock(). +*/ +#define NO_LOCK 0 +#define SHARED_LOCK 1 +#define RESERVED_LOCK 2 +#define PENDING_LOCK 3 +#define EXCLUSIVE_LOCK 4 + +/* +** File Locking Notes: (Mostly about windows but also some info for Unix) +** +** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because +** those functions are not available. So we use only LockFile() and +** UnlockFile(). +** +** LockFile() prevents not just writing but also reading by other processes. +** A SHARED_LOCK is obtained by locking a single randomly-chosen +** byte out of a specific range of bytes. The lock byte is obtained at +** random so two separate readers can probably access the file at the +** same time, unless they are unlucky and choose the same lock byte. +** An EXCLUSIVE_LOCK is obtained by locking all bytes in the range. +** There can only be one writer. A RESERVED_LOCK is obtained by locking +** a single byte of the file that is designated as the reserved lock byte. +** A PENDING_LOCK is obtained by locking a designated byte different from +** the RESERVED_LOCK byte. +** +** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available, +** which means we can use reader/writer locks. When reader/writer locks +** are used, the lock is placed on the same range of bytes that is used +** for probabilistic locking in Win95/98/ME. Hence, the locking scheme +** will support two or more Win95 readers or two or more WinNT readers. +** But a single Win95 reader will lock out all WinNT readers and a single +** WinNT reader will lock out all other Win95 readers. +** +** The following #defines specify the range of bytes used for locking. +** SHARED_SIZE is the number of bytes available in the pool from which +** a random byte is selected for a shared lock. The pool of bytes for +** shared locks begins at SHARED_FIRST. +** +** The same locking strategy and +** byte ranges are used for Unix. This leaves open the possibility of having +** clients on win95, winNT, and unix all talking to the same shared file +** and all locking correctly. To do so would require that samba (or whatever +** tool is being used for file sharing) implements locks correctly between +** windows and unix. I'm guessing that isn't likely to happen, but by +** using the same locking range we are at least open to the possibility. +** +** Locking in windows is manditory. For this reason, we cannot store +** actual data in the bytes used for locking. The pager never allocates +** the pages involved in locking therefore. SHARED_SIZE is selected so +** that all locks will fit on a single page even at the minimum page size. +** PENDING_BYTE defines the beginning of the locks. By default PENDING_BYTE +** is set high so that we don't have to allocate an unused page except +** for very large databases. But one should test the page skipping logic +** by setting PENDING_BYTE low and running the entire regression suite. +** +** Changing the value of PENDING_BYTE results in a subtly incompatible +** file format. Depending on how it is changed, you might not notice +** the incompatibility right away, even running a full regression test. +** The default location of PENDING_BYTE is the first byte past the +** 1GB boundary. +** +*/ +#ifdef SQLITE_OMIT_WSD +# define PENDING_BYTE (0x40000000) +#else +# define PENDING_BYTE sqlite3PendingByte +#endif +#define RESERVED_BYTE (PENDING_BYTE+1) +#define SHARED_FIRST (PENDING_BYTE+2) +#define SHARED_SIZE 510 + +/* +** Wrapper around OS specific sqlite3_os_init() function. +*/ +SQLITE_PRIVATE int sqlite3OsInit(void); + +/* +** Functions for accessing sqlite3_file methods +*/ +SQLITE_PRIVATE void sqlite3OsClose(sqlite3_file*); +SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset); +SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset); +SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file*, i64 size); +SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file*, int); +SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file*, i64 *pSize); +SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file*, int); +SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file*, int); +SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut); +SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file*,int,void*); +SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file*,int,void*); +#define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0 +SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id); +SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id); +SQLITE_PRIVATE int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **); +SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int, int, int); +SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id); +SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int); +SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64, int, void **); +SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *, i64, void *); + + +/* +** Functions for accessing sqlite3_vfs methods +*/ +SQLITE_PRIVATE int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *); +SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *, const char *, int); +SQLITE_PRIVATE int sqlite3OsAccess(sqlite3_vfs *, const char *, int, int *pResOut); +SQLITE_PRIVATE int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *); +#ifndef SQLITE_OMIT_LOAD_EXTENSION +SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *, const char *); +SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *, int, char *); +SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *, void *, const char *))(void); +SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *, void *); +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ +SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *, int, char *); +SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *, int); +SQLITE_PRIVATE int sqlite3OsGetLastError(sqlite3_vfs*); +SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*); + +/* +** Convenience functions for opening and closing files using +** sqlite3_malloc() to obtain space for the file-handle structure. +*/ +SQLITE_PRIVATE int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*); +SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *); + +#endif /* _SQLITE_OS_H_ */ + +/************** End of os.h **************************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ +/************** Include mutex.h in the middle of sqliteInt.h *****************/ +/************** Begin file mutex.h *******************************************/ +/* +** 2007 August 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains the common header for all mutex implementations. +** The sqliteInt.h header #includes this file so that it is available +** to all source files. We break it out in an effort to keep the code +** better organized. +** +** NOTE: source files should *not* #include this header file directly. +** Source files should #include the sqliteInt.h file and let that file +** include this one indirectly. +*/ + + +/* +** Figure out what version of the code to use. The choices are +** +** SQLITE_MUTEX_OMIT No mutex logic. Not even stubs. The +** mutexes implementation cannot be overridden +** at start-time. +** +** SQLITE_MUTEX_NOOP For single-threaded applications. No +** mutual exclusion is provided. But this +** implementation can be overridden at +** start-time. +** +** SQLITE_MUTEX_PTHREADS For multi-threaded applications on Unix. +** +** SQLITE_MUTEX_W32 For multi-threaded applications on Win32. +*/ +#if !SQLITE_THREADSAFE +# define SQLITE_MUTEX_OMIT +#endif +#if SQLITE_THREADSAFE && !defined(SQLITE_MUTEX_NOOP) +# if SQLITE_OS_UNIX +# define SQLITE_MUTEX_PTHREADS +# elif SQLITE_OS_WIN +# define SQLITE_MUTEX_W32 +# else +# define SQLITE_MUTEX_NOOP +# endif +#endif + +#ifdef SQLITE_MUTEX_OMIT +/* +** If this is a no-op implementation, implement everything as macros. +*/ +#define sqlite3_mutex_alloc(X) ((sqlite3_mutex*)8) +#define sqlite3_mutex_free(X) +#define sqlite3_mutex_enter(X) +#define sqlite3_mutex_try(X) SQLITE_OK +#define sqlite3_mutex_leave(X) +#define sqlite3_mutex_held(X) ((void)(X),1) +#define sqlite3_mutex_notheld(X) ((void)(X),1) +#define sqlite3MutexAlloc(X) ((sqlite3_mutex*)8) +#define sqlite3MutexInit() SQLITE_OK +#define sqlite3MutexEnd() +#define MUTEX_LOGIC(X) +#else +#define MUTEX_LOGIC(X) X +#endif /* defined(SQLITE_MUTEX_OMIT) */ + +/************** End of mutex.h ***********************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ + +/* The SQLITE_EXTRA_DURABLE compile-time option used to set the default +** synchronous setting to EXTRA. It is no longer supported. +*/ +#ifdef SQLITE_EXTRA_DURABLE +# warning Use SQLITE_DEFAULT_SYNCHRONOUS=3 instead of SQLITE_EXTRA_DURABLE +# define SQLITE_DEFAULT_SYNCHRONOUS 3 +#endif + +/* +** Default synchronous levels. +** +** Note that (for historcal reasons) the PAGER_SYNCHRONOUS_* macros differ +** from the SQLITE_DEFAULT_SYNCHRONOUS value by 1. +** +** PAGER_SYNCHRONOUS DEFAULT_SYNCHRONOUS +** OFF 1 0 +** NORMAL 2 1 +** FULL 3 2 +** EXTRA 4 3 +** +** The "PRAGMA synchronous" statement also uses the zero-based numbers. +** In other words, the zero-based numbers are used for all external interfaces +** and the one-based values are used internally. +*/ +#ifndef SQLITE_DEFAULT_SYNCHRONOUS +# define SQLITE_DEFAULT_SYNCHRONOUS (PAGER_SYNCHRONOUS_FULL-1) +#endif +#ifndef SQLITE_DEFAULT_WAL_SYNCHRONOUS +# define SQLITE_DEFAULT_WAL_SYNCHRONOUS SQLITE_DEFAULT_SYNCHRONOUS +#endif + +/* +** Each database file to be accessed by the system is an instance +** of the following structure. There are normally two of these structures +** in the sqlite.aDb[] array. aDb[0] is the main database file and +** aDb[1] is the database file used to hold temporary tables. Additional +** databases may be attached. +*/ +struct Db { + char *zDbSName; /* Name of this database. (schema name, not filename) */ + Btree *pBt; /* The B*Tree structure for this database file */ + u8 safety_level; /* How aggressive at syncing data to disk */ + u8 bSyncSet; /* True if "PRAGMA synchronous=N" has been run */ + Schema *pSchema; /* Pointer to database schema (possibly shared) */ +}; + +/* +** An instance of the following structure stores a database schema. +** +** Most Schema objects are associated with a Btree. The exception is +** the Schema for the TEMP databaes (sqlite3.aDb[1]) which is free-standing. +** In shared cache mode, a single Schema object can be shared by multiple +** Btrees that refer to the same underlying BtShared object. +** +** Schema objects are automatically deallocated when the last Btree that +** references them is destroyed. The TEMP Schema is manually freed by +** sqlite3_close(). +* +** A thread must be holding a mutex on the corresponding Btree in order +** to access Schema content. This implies that the thread must also be +** holding a mutex on the sqlite3 connection pointer that owns the Btree. +** For a TEMP Schema, only the connection mutex is required. +*/ +struct Schema { + int schema_cookie; /* Database schema version number for this file */ + int iGeneration; /* Generation counter. Incremented with each change */ + Hash tblHash; /* All tables indexed by name */ + Hash idxHash; /* All (named) indices indexed by name */ + Hash trigHash; /* All triggers indexed by name */ + Hash fkeyHash; /* All foreign keys by referenced table name */ + Table *pSeqTab; /* The sqlite_sequence table used by AUTOINCREMENT */ + u8 file_format; /* Schema format version for this file */ + u8 enc; /* Text encoding used by this database */ + u16 schemaFlags; /* Flags associated with this schema */ + int cache_size; /* Number of pages to use in the cache */ +}; + +/* +** These macros can be used to test, set, or clear bits in the +** Db.pSchema->flags field. +*/ +#define DbHasProperty(D,I,P) (((D)->aDb[I].pSchema->schemaFlags&(P))==(P)) +#define DbHasAnyProperty(D,I,P) (((D)->aDb[I].pSchema->schemaFlags&(P))!=0) +#define DbSetProperty(D,I,P) (D)->aDb[I].pSchema->schemaFlags|=(P) +#define DbClearProperty(D,I,P) (D)->aDb[I].pSchema->schemaFlags&=~(P) + +/* +** Allowed values for the DB.pSchema->flags field. +** +** The DB_SchemaLoaded flag is set after the database schema has been +** read into internal hash tables. +** +** DB_UnresetViews means that one or more views have column names that +** have been filled out. If the schema changes, these column names might +** changes and so the view will need to be reset. +*/ +#define DB_SchemaLoaded 0x0001 /* The schema has been loaded */ +#define DB_UnresetViews 0x0002 /* Some views have defined column names */ +#define DB_Empty 0x0004 /* The file is empty (length 0 bytes) */ + +/* +** The number of different kinds of things that can be limited +** using the sqlite3_limit() interface. +*/ +#define SQLITE_N_LIMIT (SQLITE_LIMIT_WORKER_THREADS+1) + +/* +** Lookaside malloc is a set of fixed-size buffers that can be used +** to satisfy small transient memory allocation requests for objects +** associated with a particular database connection. The use of +** lookaside malloc provides a significant performance enhancement +** (approx 10%) by avoiding numerous malloc/free requests while parsing +** SQL statements. +** +** The Lookaside structure holds configuration information about the +** lookaside malloc subsystem. Each available memory allocation in +** the lookaside subsystem is stored on a linked list of LookasideSlot +** objects. +** +** Lookaside allocations are only allowed for objects that are associated +** with a particular database connection. Hence, schema information cannot +** be stored in lookaside because in shared cache mode the schema information +** is shared by multiple database connections. Therefore, while parsing +** schema information, the Lookaside.bEnabled flag is cleared so that +** lookaside allocations are not used to construct the schema objects. +*/ +struct Lookaside { + u32 bDisable; /* Only operate the lookaside when zero */ + u16 sz; /* Size of each buffer in bytes */ + u8 bMalloced; /* True if pStart obtained from sqlite3_malloc() */ + int nOut; /* Number of buffers currently checked out */ + int mxOut; /* Highwater mark for nOut */ + int anStat[3]; /* 0: hits. 1: size misses. 2: full misses */ + LookasideSlot *pFree; /* List of available buffers */ + void *pStart; /* First byte of available memory space */ + void *pEnd; /* First byte past end of available space */ +}; +struct LookasideSlot { + LookasideSlot *pNext; /* Next buffer in the list of free buffers */ +}; + +/* +** A hash table for built-in function definitions. (Application-defined +** functions use a regular table table from hash.h.) +** +** Hash each FuncDef structure into one of the FuncDefHash.a[] slots. +** Collisions are on the FuncDef.u.pHash chain. +*/ +#define SQLITE_FUNC_HASH_SZ 23 +struct FuncDefHash { + FuncDef *a[SQLITE_FUNC_HASH_SZ]; /* Hash table for functions */ +}; + +#ifdef SQLITE_USER_AUTHENTICATION +/* +** Information held in the "sqlite3" database connection object and used +** to manage user authentication. +*/ +typedef struct sqlite3_userauth sqlite3_userauth; +struct sqlite3_userauth { + u8 authLevel; /* Current authentication level */ + int nAuthPW; /* Size of the zAuthPW in bytes */ + char *zAuthPW; /* Password used to authenticate */ + char *zAuthUser; /* User name used to authenticate */ +}; + +/* Allowed values for sqlite3_userauth.authLevel */ +#define UAUTH_Unknown 0 /* Authentication not yet checked */ +#define UAUTH_Fail 1 /* User authentication failed */ +#define UAUTH_User 2 /* Authenticated as a normal user */ +#define UAUTH_Admin 3 /* Authenticated as an administrator */ + +/* Functions used only by user authorization logic */ +SQLITE_PRIVATE int sqlite3UserAuthTable(const char*); +SQLITE_PRIVATE int sqlite3UserAuthCheckLogin(sqlite3*,const char*,u8*); +SQLITE_PRIVATE void sqlite3UserAuthInit(sqlite3*); +SQLITE_PRIVATE void sqlite3CryptFunc(sqlite3_context*,int,sqlite3_value**); + +#endif /* SQLITE_USER_AUTHENTICATION */ + +/* +** typedef for the authorization callback function. +*/ +#ifdef SQLITE_USER_AUTHENTICATION + typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*, + const char*, const char*); +#else + typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*, + const char*); +#endif + +#ifndef SQLITE_OMIT_DEPRECATED +/* This is an extra SQLITE_TRACE macro that indicates "legacy" tracing +** in the style of sqlite3_trace() +*/ +#define SQLITE_TRACE_LEGACY 0x80 +#else +#define SQLITE_TRACE_LEGACY 0 +#endif /* SQLITE_OMIT_DEPRECATED */ + + +/* +** Each database connection is an instance of the following structure. +*/ +struct sqlite3 { + sqlite3_vfs *pVfs; /* OS Interface */ + struct Vdbe *pVdbe; /* List of active virtual machines */ + CollSeq *pDfltColl; /* The default collating sequence (BINARY) */ + sqlite3_mutex *mutex; /* Connection mutex */ + Db *aDb; /* All backends */ + int nDb; /* Number of backends currently in use */ + int flags; /* Miscellaneous flags. See below */ + i64 lastRowid; /* ROWID of most recent insert (see above) */ + i64 szMmap; /* Default mmap_size setting */ + unsigned int openFlags; /* Flags passed to sqlite3_vfs.xOpen() */ + int errCode; /* Most recent error code (SQLITE_*) */ + int errMask; /* & result codes with this before returning */ + int iSysErrno; /* Errno value from last system error */ + u16 dbOptFlags; /* Flags to enable/disable optimizations */ + u8 enc; /* Text encoding */ + u8 autoCommit; /* The auto-commit flag. */ + u8 temp_store; /* 1: file 2: memory 0: default */ + u8 mallocFailed; /* True if we have seen a malloc failure */ + u8 bBenignMalloc; /* Do not require OOMs if true */ + u8 dfltLockMode; /* Default locking-mode for attached dbs */ + signed char nextAutovac; /* Autovac setting after VACUUM if >=0 */ + u8 suppressErr; /* Do not issue error messages if true */ + u8 vtabOnConflict; /* Value to return for s3_vtab_on_conflict() */ + u8 isTransactionSavepoint; /* True if the outermost savepoint is a TS */ + u8 mTrace; /* zero or more SQLITE_TRACE flags */ + u8 skipBtreeMutex; /* True if no shared-cache backends */ + int nextPagesize; /* Pagesize after VACUUM if >0 */ + u32 magic; /* Magic number for detect library misuse */ + int nChange; /* Value returned by sqlite3_changes() */ + int nTotalChange; /* Value returned by sqlite3_total_changes() */ + int aLimit[SQLITE_N_LIMIT]; /* Limits */ + int nMaxSorterMmap; /* Maximum size of regions mapped by sorter */ + struct sqlite3InitInfo { /* Information used during initialization */ + int newTnum; /* Rootpage of table being initialized */ + u8 iDb; /* Which db file is being initialized */ + u8 busy; /* TRUE if currently initializing */ + u8 orphanTrigger; /* Last statement is orphaned TEMP trigger */ + u8 imposterTable; /* Building an imposter table */ + } init; + int nVdbeActive; /* Number of VDBEs currently running */ + int nVdbeRead; /* Number of active VDBEs that read or write */ + int nVdbeWrite; /* Number of active VDBEs that read and write */ + int nVdbeExec; /* Number of nested calls to VdbeExec() */ + int nVDestroy; /* Number of active OP_VDestroy operations */ + int nExtension; /* Number of loaded extensions */ + void **aExtension; /* Array of shared library handles */ + int (*xTrace)(u32,void*,void*,void*); /* Trace function */ + void *pTraceArg; /* Argument to the trace function */ + void (*xProfile)(void*,const char*,u64); /* Profiling function */ + void *pProfileArg; /* Argument to profile function */ + void *pCommitArg; /* Argument to xCommitCallback() */ + int (*xCommitCallback)(void*); /* Invoked at every commit. */ + void *pRollbackArg; /* Argument to xRollbackCallback() */ + void (*xRollbackCallback)(void*); /* Invoked at every commit. */ + void *pUpdateArg; + void (*xUpdateCallback)(void*,int, const char*,const char*,sqlite_int64); +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + void *pPreUpdateArg; /* First argument to xPreUpdateCallback */ + void (*xPreUpdateCallback)( /* Registered using sqlite3_preupdate_hook() */ + void*,sqlite3*,int,char const*,char const*,sqlite3_int64,sqlite3_int64 + ); + PreUpdate *pPreUpdate; /* Context for active pre-update callback */ +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ +#ifndef SQLITE_OMIT_WAL + int (*xWalCallback)(void *, sqlite3 *, const char *, int); + void *pWalArg; +#endif + void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*); + void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*); + void *pCollNeededArg; + sqlite3_value *pErr; /* Most recent error message */ + union { + volatile int isInterrupted; /* True if sqlite3_interrupt has been called */ + double notUsed1; /* Spacer */ + } u1; + Lookaside lookaside; /* Lookaside malloc configuration */ +#ifndef SQLITE_OMIT_AUTHORIZATION + sqlite3_xauth xAuth; /* Access authorization function */ + void *pAuthArg; /* 1st argument to the access auth function */ +#endif +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + int (*xProgress)(void *); /* The progress callback */ + void *pProgressArg; /* Argument to the progress callback */ + unsigned nProgressOps; /* Number of opcodes for progress callback */ +#endif +#ifndef SQLITE_OMIT_VIRTUALTABLE + int nVTrans; /* Allocated size of aVTrans */ + Hash aModule; /* populated by sqlite3_create_module() */ + VtabCtx *pVtabCtx; /* Context for active vtab connect/create */ + VTable **aVTrans; /* Virtual tables with open transactions */ + VTable *pDisconnect; /* Disconnect these in next sqlite3_prepare() */ +#endif + Hash aFunc; /* Hash table of connection functions */ + Hash aCollSeq; /* All collating sequences */ + BusyHandler busyHandler; /* Busy callback */ + Db aDbStatic[2]; /* Static space for the 2 default backends */ + Savepoint *pSavepoint; /* List of active savepoints */ + int busyTimeout; /* Busy handler timeout, in msec */ + int nSavepoint; /* Number of non-transaction savepoints */ + int nStatement; /* Number of nested statement-transactions */ + i64 nDeferredCons; /* Net deferred constraints this transaction. */ + i64 nDeferredImmCons; /* Net deferred immediate constraints */ + int *pnBytesFreed; /* If not NULL, increment this in DbFree() */ +#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY + /* The following variables are all protected by the STATIC_MASTER + ** mutex, not by sqlite3.mutex. They are used by code in notify.c. + ** + ** When X.pUnlockConnection==Y, that means that X is waiting for Y to + ** unlock so that it can proceed. + ** + ** When X.pBlockingConnection==Y, that means that something that X tried + ** tried to do recently failed with an SQLITE_LOCKED error due to locks + ** held by Y. + */ + sqlite3 *pBlockingConnection; /* Connection that caused SQLITE_LOCKED */ + sqlite3 *pUnlockConnection; /* Connection to watch for unlock */ + void *pUnlockArg; /* Argument to xUnlockNotify */ + void (*xUnlockNotify)(void **, int); /* Unlock notify callback */ + sqlite3 *pNextBlocked; /* Next in list of all blocked connections */ +#endif +#ifdef SQLITE_USER_AUTHENTICATION + sqlite3_userauth auth; /* User authentication information */ +#endif +}; + +/* +** A macro to discover the encoding of a database. +*/ +#define SCHEMA_ENC(db) ((db)->aDb[0].pSchema->enc) +#define ENC(db) ((db)->enc) + +/* +** Possible values for the sqlite3.flags. +** +** Value constraints (enforced via assert()): +** SQLITE_FullFSync == PAGER_FULLFSYNC +** SQLITE_CkptFullFSync == PAGER_CKPT_FULLFSYNC +** SQLITE_CacheSpill == PAGER_CACHE_SPILL +*/ +#define SQLITE_VdbeTrace 0x00000001 /* True to trace VDBE execution */ +#define SQLITE_InternChanges 0x00000002 /* Uncommitted Hash table changes */ +#define SQLITE_FullColNames 0x00000004 /* Show full column names on SELECT */ +#define SQLITE_FullFSync 0x00000008 /* Use full fsync on the backend */ +#define SQLITE_CkptFullFSync 0x00000010 /* Use full fsync for checkpoint */ +#define SQLITE_CacheSpill 0x00000020 /* OK to spill pager cache */ +#define SQLITE_ShortColNames 0x00000040 /* Show short columns names */ +#define SQLITE_CountRows 0x00000080 /* Count rows changed by INSERT, */ + /* DELETE, or UPDATE and return */ + /* the count using a callback. */ +#define SQLITE_NullCallback 0x00000100 /* Invoke the callback once if the */ + /* result set is empty */ +#define SQLITE_SqlTrace 0x00000200 /* Debug print SQL as it executes */ +#define SQLITE_VdbeListing 0x00000400 /* Debug listings of VDBE programs */ +#define SQLITE_WriteSchema 0x00000800 /* OK to update SQLITE_MASTER */ +#define SQLITE_VdbeAddopTrace 0x00001000 /* Trace sqlite3VdbeAddOp() calls */ +#define SQLITE_IgnoreChecks 0x00002000 /* Do not enforce check constraints */ +#define SQLITE_ReadUncommitted 0x0004000 /* For shared-cache mode */ +#define SQLITE_LegacyFileFmt 0x00008000 /* Create new databases in format 1 */ +#define SQLITE_RecoveryMode 0x00010000 /* Ignore schema errors */ +#define SQLITE_ReverseOrder 0x00020000 /* Reverse unordered SELECTs */ +#define SQLITE_RecTriggers 0x00040000 /* Enable recursive triggers */ +#define SQLITE_ForeignKeys 0x00080000 /* Enforce foreign key constraints */ +#define SQLITE_AutoIndex 0x00100000 /* Enable automatic indexes */ +#define SQLITE_PreferBuiltin 0x00200000 /* Preference to built-in funcs */ +#define SQLITE_LoadExtension 0x00400000 /* Enable load_extension */ +#define SQLITE_LoadExtFunc 0x00800000 /* Enable load_extension() SQL func */ +#define SQLITE_EnableTrigger 0x01000000 /* True to enable triggers */ +#define SQLITE_DeferFKs 0x02000000 /* Defer all FK constraints */ +#define SQLITE_QueryOnly 0x04000000 /* Disable database changes */ +#define SQLITE_VdbeEQP 0x08000000 /* Debug EXPLAIN QUERY PLAN */ +#define SQLITE_Vacuum 0x10000000 /* Currently in a VACUUM */ +#define SQLITE_CellSizeCk 0x20000000 /* Check btree cell sizes on load */ +#define SQLITE_Fts3Tokenizer 0x40000000 /* Enable fts3_tokenizer(2) */ +#define SQLITE_NoCkptOnClose 0x80000000 /* No checkpoint on close()/DETACH */ + + +/* +** Bits of the sqlite3.dbOptFlags field that are used by the +** sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS,...) interface to +** selectively disable various optimizations. +*/ +#define SQLITE_QueryFlattener 0x0001 /* Query flattening */ +#define SQLITE_ColumnCache 0x0002 /* Column cache */ +#define SQLITE_GroupByOrder 0x0004 /* GROUPBY cover of ORDERBY */ +#define SQLITE_FactorOutConst 0x0008 /* Constant factoring */ +/* not used 0x0010 // Was: SQLITE_IdxRealAsInt */ +#define SQLITE_DistinctOpt 0x0020 /* DISTINCT using indexes */ +#define SQLITE_CoverIdxScan 0x0040 /* Covering index scans */ +#define SQLITE_OrderByIdxJoin 0x0080 /* ORDER BY of joins via index */ +#define SQLITE_SubqCoroutine 0x0100 /* Evaluate subqueries as coroutines */ +#define SQLITE_Transitive 0x0200 /* Transitive constraints */ +#define SQLITE_OmitNoopJoin 0x0400 /* Omit unused tables in joins */ +#define SQLITE_Stat34 0x0800 /* Use STAT3 or STAT4 data */ +#define SQLITE_CursorHints 0x2000 /* Add OP_CursorHint opcodes */ +#define SQLITE_AllOpts 0xffff /* All optimizations */ + +/* +** Macros for testing whether or not optimizations are enabled or disabled. +*/ +#define OptimizationDisabled(db, mask) (((db)->dbOptFlags&(mask))!=0) +#define OptimizationEnabled(db, mask) (((db)->dbOptFlags&(mask))==0) + +/* +** Return true if it OK to factor constant expressions into the initialization +** code. The argument is a Parse object for the code generator. +*/ +#define ConstFactorOk(P) ((P)->okConstFactor) + +/* +** Possible values for the sqlite.magic field. +** The numbers are obtained at random and have no special meaning, other +** than being distinct from one another. +*/ +#define SQLITE_MAGIC_OPEN 0xa029a697 /* Database is open */ +#define SQLITE_MAGIC_CLOSED 0x9f3c2d33 /* Database is closed */ +#define SQLITE_MAGIC_SICK 0x4b771290 /* Error and awaiting close */ +#define SQLITE_MAGIC_BUSY 0xf03b7906 /* Database currently in use */ +#define SQLITE_MAGIC_ERROR 0xb5357930 /* An SQLITE_MISUSE error occurred */ +#define SQLITE_MAGIC_ZOMBIE 0x64cffc7f /* Close with last statement close */ + +/* +** Each SQL function is defined by an instance of the following +** structure. For global built-in functions (ex: substr(), max(), count()) +** a pointer to this structure is held in the sqlite3BuiltinFunctions object. +** For per-connection application-defined functions, a pointer to this +** structure is held in the db->aHash hash table. +** +** The u.pHash field is used by the global built-ins. The u.pDestructor +** field is used by per-connection app-def functions. +*/ +struct FuncDef { + i8 nArg; /* Number of arguments. -1 means unlimited */ + u16 funcFlags; /* Some combination of SQLITE_FUNC_* */ + void *pUserData; /* User data parameter */ + FuncDef *pNext; /* Next function with same name */ + void (*xSFunc)(sqlite3_context*,int,sqlite3_value**); /* func or agg-step */ + void (*xFinalize)(sqlite3_context*); /* Agg finalizer */ + const char *zName; /* SQL name of the function. */ + union { + FuncDef *pHash; /* Next with a different name but the same hash */ + FuncDestructor *pDestructor; /* Reference counted destructor function */ + } u; +}; + +/* +** This structure encapsulates a user-function destructor callback (as +** configured using create_function_v2()) and a reference counter. When +** create_function_v2() is called to create a function with a destructor, +** a single object of this type is allocated. FuncDestructor.nRef is set to +** the number of FuncDef objects created (either 1 or 3, depending on whether +** or not the specified encoding is SQLITE_ANY). The FuncDef.pDestructor +** member of each of the new FuncDef objects is set to point to the allocated +** FuncDestructor. +** +** Thereafter, when one of the FuncDef objects is deleted, the reference +** count on this object is decremented. When it reaches 0, the destructor +** is invoked and the FuncDestructor structure freed. +*/ +struct FuncDestructor { + int nRef; + void (*xDestroy)(void *); + void *pUserData; +}; + +/* +** Possible values for FuncDef.flags. Note that the _LENGTH and _TYPEOF +** values must correspond to OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG. And +** SQLITE_FUNC_CONSTANT must be the same as SQLITE_DETERMINISTIC. There +** are assert() statements in the code to verify this. +** +** Value constraints (enforced via assert()): +** SQLITE_FUNC_MINMAX == NC_MinMaxAgg == SF_MinMaxAgg +** SQLITE_FUNC_LENGTH == OPFLAG_LENGTHARG +** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG +** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API +** SQLITE_FUNC_ENCMASK depends on SQLITE_UTF* macros in the API +*/ +#define SQLITE_FUNC_ENCMASK 0x0003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */ +#define SQLITE_FUNC_LIKE 0x0004 /* Candidate for the LIKE optimization */ +#define SQLITE_FUNC_CASE 0x0008 /* Case-sensitive LIKE-type function */ +#define SQLITE_FUNC_EPHEM 0x0010 /* Ephemeral. Delete with VDBE */ +#define SQLITE_FUNC_NEEDCOLL 0x0020 /* sqlite3GetFuncCollSeq() might be called*/ +#define SQLITE_FUNC_LENGTH 0x0040 /* Built-in length() function */ +#define SQLITE_FUNC_TYPEOF 0x0080 /* Built-in typeof() function */ +#define SQLITE_FUNC_COUNT 0x0100 /* Built-in count(*) aggregate */ +#define SQLITE_FUNC_COALESCE 0x0200 /* Built-in coalesce() or ifnull() */ +#define SQLITE_FUNC_UNLIKELY 0x0400 /* Built-in unlikely() function */ +#define SQLITE_FUNC_CONSTANT 0x0800 /* Constant inputs give a constant output */ +#define SQLITE_FUNC_MINMAX 0x1000 /* True for min() and max() aggregates */ +#define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a + ** single query - might change over time */ +#define SQLITE_FUNC_AFFINITY 0x4000 /* Built-in affinity() function */ + +/* +** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are +** used to create the initializers for the FuncDef structures. +** +** FUNCTION(zName, nArg, iArg, bNC, xFunc) +** Used to create a scalar function definition of a function zName +** implemented by C function xFunc that accepts nArg arguments. The +** value passed as iArg is cast to a (void*) and made available +** as the user-data (sqlite3_user_data()) for the function. If +** argument bNC is true, then the SQLITE_FUNC_NEEDCOLL flag is set. +** +** VFUNCTION(zName, nArg, iArg, bNC, xFunc) +** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag. +** +** DFUNCTION(zName, nArg, iArg, bNC, xFunc) +** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag and +** adds the SQLITE_FUNC_SLOCHNG flag. Used for date & time functions +** and functions like sqlite_version() that can change, but not during +** a single query. +** +** AGGREGATE(zName, nArg, iArg, bNC, xStep, xFinal) +** Used to create an aggregate function definition implemented by +** the C functions xStep and xFinal. The first four parameters +** are interpreted in the same way as the first 4 parameters to +** FUNCTION(). +** +** LIKEFUNC(zName, nArg, pArg, flags) +** Used to create a scalar function definition of a function zName +** that accepts nArg arguments and is implemented by a call to C +** function likeFunc. Argument pArg is cast to a (void *) and made +** available as the function user-data (sqlite3_user_data()). The +** FuncDef.flags variable is set to the value passed as the flags +** parameter. +*/ +#define FUNCTION(zName, nArg, iArg, bNC, xFunc) \ + {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ + SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, #zName, {0} } +#define VFUNCTION(zName, nArg, iArg, bNC, xFunc) \ + {nArg, SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ + SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, #zName, {0} } +#define DFUNCTION(zName, nArg, iArg, bNC, xFunc) \ + {nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ + SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, #zName, {0} } +#define FUNCTION2(zName, nArg, iArg, bNC, xFunc, extraFlags) \ + {nArg,SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL)|extraFlags,\ + SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, #zName, {0} } +#define STR_FUNCTION(zName, nArg, pArg, bNC, xFunc) \ + {nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ + pArg, 0, xFunc, 0, #zName, } +#define LIKEFUNC(zName, nArg, arg, flags) \ + {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|flags, \ + (void *)arg, 0, likeFunc, 0, #zName, {0} } +#define AGGREGATE(zName, nArg, arg, nc, xStep, xFinal) \ + {nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL), \ + SQLITE_INT_TO_PTR(arg), 0, xStep,xFinal,#zName, {0}} +#define AGGREGATE2(zName, nArg, arg, nc, xStep, xFinal, extraFlags) \ + {nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL)|extraFlags, \ + SQLITE_INT_TO_PTR(arg), 0, xStep,xFinal,#zName, {0}} + +/* +** All current savepoints are stored in a linked list starting at +** sqlite3.pSavepoint. The first element in the list is the most recently +** opened savepoint. Savepoints are added to the list by the vdbe +** OP_Savepoint instruction. +*/ +struct Savepoint { + char *zName; /* Savepoint name (nul-terminated) */ + i64 nDeferredCons; /* Number of deferred fk violations */ + i64 nDeferredImmCons; /* Number of deferred imm fk. */ + Savepoint *pNext; /* Parent savepoint (if any) */ +}; + +/* +** The following are used as the second parameter to sqlite3Savepoint(), +** and as the P1 argument to the OP_Savepoint instruction. +*/ +#define SAVEPOINT_BEGIN 0 +#define SAVEPOINT_RELEASE 1 +#define SAVEPOINT_ROLLBACK 2 + + +/* +** Each SQLite module (virtual table definition) is defined by an +** instance of the following structure, stored in the sqlite3.aModule +** hash table. +*/ +struct Module { + const sqlite3_module *pModule; /* Callback pointers */ + const char *zName; /* Name passed to create_module() */ + void *pAux; /* pAux passed to create_module() */ + void (*xDestroy)(void *); /* Module destructor function */ + Table *pEpoTab; /* Eponymous table for this module */ +}; + +/* +** information about each column of an SQL table is held in an instance +** of this structure. +*/ +struct Column { + char *zName; /* Name of this column, \000, then the type */ + Expr *pDflt; /* Default value of this column */ + char *zColl; /* Collating sequence. If NULL, use the default */ + u8 notNull; /* An OE_ code for handling a NOT NULL constraint */ + char affinity; /* One of the SQLITE_AFF_... values */ + u8 szEst; /* Estimated size of value in this column. sizeof(INT)==1 */ + u8 colFlags; /* Boolean properties. See COLFLAG_ defines below */ +}; + +/* Allowed values for Column.colFlags: +*/ +#define COLFLAG_PRIMKEY 0x0001 /* Column is part of the primary key */ +#define COLFLAG_HIDDEN 0x0002 /* A hidden column in a virtual table */ +#define COLFLAG_HASTYPE 0x0004 /* Type name follows column name */ + +/* +** A "Collating Sequence" is defined by an instance of the following +** structure. Conceptually, a collating sequence consists of a name and +** a comparison routine that defines the order of that sequence. +** +** If CollSeq.xCmp is NULL, it means that the +** collating sequence is undefined. Indices built on an undefined +** collating sequence may not be read or written. +*/ +struct CollSeq { + char *zName; /* Name of the collating sequence, UTF-8 encoded */ + u8 enc; /* Text encoding handled by xCmp() */ + void *pUser; /* First argument to xCmp() */ + int (*xCmp)(void*,int, const void*, int, const void*); + void (*xDel)(void*); /* Destructor for pUser */ +}; + +/* +** A sort order can be either ASC or DESC. +*/ +#define SQLITE_SO_ASC 0 /* Sort in ascending order */ +#define SQLITE_SO_DESC 1 /* Sort in ascending order */ +#define SQLITE_SO_UNDEFINED -1 /* No sort order specified */ + +/* +** Column affinity types. +** +** These used to have mnemonic name like 'i' for SQLITE_AFF_INTEGER and +** 't' for SQLITE_AFF_TEXT. But we can save a little space and improve +** the speed a little by numbering the values consecutively. +** +** But rather than start with 0 or 1, we begin with 'A'. That way, +** when multiple affinity types are concatenated into a string and +** used as the P4 operand, they will be more readable. +** +** Note also that the numeric types are grouped together so that testing +** for a numeric type is a single comparison. And the BLOB type is first. +*/ +#define SQLITE_AFF_BLOB 'A' +#define SQLITE_AFF_TEXT 'B' +#define SQLITE_AFF_NUMERIC 'C' +#define SQLITE_AFF_INTEGER 'D' +#define SQLITE_AFF_REAL 'E' + +#define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC) + +/* +** The SQLITE_AFF_MASK values masks off the significant bits of an +** affinity value. +*/ +#define SQLITE_AFF_MASK 0x47 + +/* +** Additional bit values that can be ORed with an affinity without +** changing the affinity. +** +** The SQLITE_NOTNULL flag is a combination of NULLEQ and JUMPIFNULL. +** It causes an assert() to fire if either operand to a comparison +** operator is NULL. It is added to certain comparison operators to +** prove that the operands are always NOT NULL. +*/ +#define SQLITE_KEEPNULL 0x08 /* Used by vector == or <> */ +#define SQLITE_JUMPIFNULL 0x10 /* jumps if either operand is NULL */ +#define SQLITE_STOREP2 0x20 /* Store result in reg[P2] rather than jump */ +#define SQLITE_NULLEQ 0x80 /* NULL=NULL */ +#define SQLITE_NOTNULL 0x90 /* Assert that operands are never NULL */ + +/* +** An object of this type is created for each virtual table present in +** the database schema. +** +** If the database schema is shared, then there is one instance of this +** structure for each database connection (sqlite3*) that uses the shared +** schema. This is because each database connection requires its own unique +** instance of the sqlite3_vtab* handle used to access the virtual table +** implementation. sqlite3_vtab* handles can not be shared between +** database connections, even when the rest of the in-memory database +** schema is shared, as the implementation often stores the database +** connection handle passed to it via the xConnect() or xCreate() method +** during initialization internally. This database connection handle may +** then be used by the virtual table implementation to access real tables +** within the database. So that they appear as part of the callers +** transaction, these accesses need to be made via the same database +** connection as that used to execute SQL operations on the virtual table. +** +** All VTable objects that correspond to a single table in a shared +** database schema are initially stored in a linked-list pointed to by +** the Table.pVTable member variable of the corresponding Table object. +** When an sqlite3_prepare() operation is required to access the virtual +** table, it searches the list for the VTable that corresponds to the +** database connection doing the preparing so as to use the correct +** sqlite3_vtab* handle in the compiled query. +** +** When an in-memory Table object is deleted (for example when the +** schema is being reloaded for some reason), the VTable objects are not +** deleted and the sqlite3_vtab* handles are not xDisconnect()ed +** immediately. Instead, they are moved from the Table.pVTable list to +** another linked list headed by the sqlite3.pDisconnect member of the +** corresponding sqlite3 structure. They are then deleted/xDisconnected +** next time a statement is prepared using said sqlite3*. This is done +** to avoid deadlock issues involving multiple sqlite3.mutex mutexes. +** Refer to comments above function sqlite3VtabUnlockList() for an +** explanation as to why it is safe to add an entry to an sqlite3.pDisconnect +** list without holding the corresponding sqlite3.mutex mutex. +** +** The memory for objects of this type is always allocated by +** sqlite3DbMalloc(), using the connection handle stored in VTable.db as +** the first argument. +*/ +struct VTable { + sqlite3 *db; /* Database connection associated with this table */ + Module *pMod; /* Pointer to module implementation */ + sqlite3_vtab *pVtab; /* Pointer to vtab instance */ + int nRef; /* Number of pointers to this structure */ + u8 bConstraint; /* True if constraints are supported */ + int iSavepoint; /* Depth of the SAVEPOINT stack */ + VTable *pNext; /* Next in linked list (see above) */ +}; + +/* +** The schema for each SQL table and view is represented in memory +** by an instance of the following structure. +*/ +struct Table { + char *zName; /* Name of the table or view */ + Column *aCol; /* Information about each column */ + Index *pIndex; /* List of SQL indexes on this table. */ + Select *pSelect; /* NULL for tables. Points to definition if a view. */ + FKey *pFKey; /* Linked list of all foreign keys in this table */ + char *zColAff; /* String defining the affinity of each column */ + ExprList *pCheck; /* All CHECK constraints */ + /* ... also used as column name list in a VIEW */ + int tnum; /* Root BTree page for this table */ + u32 nTabRef; /* Number of pointers to this Table */ + i16 iPKey; /* If not negative, use aCol[iPKey] as the rowid */ + i16 nCol; /* Number of columns in this table */ + LogEst nRowLogEst; /* Estimated rows in table - from sqlite_stat1 table */ + LogEst szTabRow; /* Estimated size of each table row in bytes */ +#ifdef SQLITE_ENABLE_COSTMULT + LogEst costMult; /* Cost multiplier for using this table */ +#endif + u8 tabFlags; /* Mask of TF_* values */ + u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */ +#ifndef SQLITE_OMIT_ALTERTABLE + int addColOffset; /* Offset in CREATE TABLE stmt to add a new column */ +#endif +#ifndef SQLITE_OMIT_VIRTUALTABLE + int nModuleArg; /* Number of arguments to the module */ + char **azModuleArg; /* 0: module 1: schema 2: vtab name 3...: args */ + VTable *pVTable; /* List of VTable objects. */ +#endif + Trigger *pTrigger; /* List of triggers stored in pSchema */ + Schema *pSchema; /* Schema that contains this table */ + Table *pNextZombie; /* Next on the Parse.pZombieTab list */ +}; + +/* +** Allowed values for Table.tabFlags. +** +** TF_OOOHidden applies to tables or view that have hidden columns that are +** followed by non-hidden columns. Example: "CREATE VIRTUAL TABLE x USING +** vtab1(a HIDDEN, b);". Since "b" is a non-hidden column but "a" is hidden, +** the TF_OOOHidden attribute would apply in this case. Such tables require +** special handling during INSERT processing. +*/ +#define TF_Readonly 0x01 /* Read-only system table */ +#define TF_Ephemeral 0x02 /* An ephemeral table */ +#define TF_HasPrimaryKey 0x04 /* Table has a primary key */ +#define TF_Autoincrement 0x08 /* Integer primary key is autoincrement */ +#define TF_Virtual 0x10 /* Is a virtual table */ +#define TF_WithoutRowid 0x20 /* No rowid. PRIMARY KEY is the key */ +#define TF_NoVisibleRowid 0x40 /* No user-visible "rowid" column */ +#define TF_OOOHidden 0x80 /* Out-of-Order hidden columns */ + + +/* +** Test to see whether or not a table is a virtual table. This is +** done as a macro so that it will be optimized out when virtual +** table support is omitted from the build. +*/ +#ifndef SQLITE_OMIT_VIRTUALTABLE +# define IsVirtual(X) (((X)->tabFlags & TF_Virtual)!=0) +#else +# define IsVirtual(X) 0 +#endif + +/* +** Macros to determine if a column is hidden. IsOrdinaryHiddenColumn() +** only works for non-virtual tables (ordinary tables and views) and is +** always false unless SQLITE_ENABLE_HIDDEN_COLUMNS is defined. The +** IsHiddenColumn() macro is general purpose. +*/ +#if defined(SQLITE_ENABLE_HIDDEN_COLUMNS) +# define IsHiddenColumn(X) (((X)->colFlags & COLFLAG_HIDDEN)!=0) +# define IsOrdinaryHiddenColumn(X) (((X)->colFlags & COLFLAG_HIDDEN)!=0) +#elif !defined(SQLITE_OMIT_VIRTUALTABLE) +# define IsHiddenColumn(X) (((X)->colFlags & COLFLAG_HIDDEN)!=0) +# define IsOrdinaryHiddenColumn(X) 0 +#else +# define IsHiddenColumn(X) 0 +# define IsOrdinaryHiddenColumn(X) 0 +#endif + + +/* Does the table have a rowid */ +#define HasRowid(X) (((X)->tabFlags & TF_WithoutRowid)==0) +#define VisibleRowid(X) (((X)->tabFlags & TF_NoVisibleRowid)==0) + +/* +** Each foreign key constraint is an instance of the following structure. +** +** A foreign key is associated with two tables. The "from" table is +** the table that contains the REFERENCES clause that creates the foreign +** key. The "to" table is the table that is named in the REFERENCES clause. +** Consider this example: +** +** CREATE TABLE ex1( +** a INTEGER PRIMARY KEY, +** b INTEGER CONSTRAINT fk1 REFERENCES ex2(x) +** ); +** +** For foreign key "fk1", the from-table is "ex1" and the to-table is "ex2". +** Equivalent names: +** +** from-table == child-table +** to-table == parent-table +** +** Each REFERENCES clause generates an instance of the following structure +** which is attached to the from-table. The to-table need not exist when +** the from-table is created. The existence of the to-table is not checked. +** +** The list of all parents for child Table X is held at X.pFKey. +** +** A list of all children for a table named Z (which might not even exist) +** is held in Schema.fkeyHash with a hash key of Z. +*/ +struct FKey { + Table *pFrom; /* Table containing the REFERENCES clause (aka: Child) */ + FKey *pNextFrom; /* Next FKey with the same in pFrom. Next parent of pFrom */ + char *zTo; /* Name of table that the key points to (aka: Parent) */ + FKey *pNextTo; /* Next with the same zTo. Next child of zTo. */ + FKey *pPrevTo; /* Previous with the same zTo */ + int nCol; /* Number of columns in this key */ + /* EV: R-30323-21917 */ + u8 isDeferred; /* True if constraint checking is deferred till COMMIT */ + u8 aAction[2]; /* ON DELETE and ON UPDATE actions, respectively */ + Trigger *apTrigger[2];/* Triggers for aAction[] actions */ + struct sColMap { /* Mapping of columns in pFrom to columns in zTo */ + int iFrom; /* Index of column in pFrom */ + char *zCol; /* Name of column in zTo. If NULL use PRIMARY KEY */ + } aCol[1]; /* One entry for each of nCol columns */ +}; + +/* +** SQLite supports many different ways to resolve a constraint +** error. ROLLBACK processing means that a constraint violation +** causes the operation in process to fail and for the current transaction +** to be rolled back. ABORT processing means the operation in process +** fails and any prior changes from that one operation are backed out, +** but the transaction is not rolled back. FAIL processing means that +** the operation in progress stops and returns an error code. But prior +** changes due to the same operation are not backed out and no rollback +** occurs. IGNORE means that the particular row that caused the constraint +** error is not inserted or updated. Processing continues and no error +** is returned. REPLACE means that preexisting database rows that caused +** a UNIQUE constraint violation are removed so that the new insert or +** update can proceed. Processing continues and no error is reported. +** +** RESTRICT, SETNULL, and CASCADE actions apply only to foreign keys. +** RESTRICT is the same as ABORT for IMMEDIATE foreign keys and the +** same as ROLLBACK for DEFERRED keys. SETNULL means that the foreign +** key is set to NULL. CASCADE means that a DELETE or UPDATE of the +** referenced table row is propagated into the row that holds the +** foreign key. +** +** The following symbolic values are used to record which type +** of action to take. +*/ +#define OE_None 0 /* There is no constraint to check */ +#define OE_Rollback 1 /* Fail the operation and rollback the transaction */ +#define OE_Abort 2 /* Back out changes but do no rollback transaction */ +#define OE_Fail 3 /* Stop the operation but leave all prior changes */ +#define OE_Ignore 4 /* Ignore the error. Do not do the INSERT or UPDATE */ +#define OE_Replace 5 /* Delete existing record, then do INSERT or UPDATE */ + +#define OE_Restrict 6 /* OE_Abort for IMMEDIATE, OE_Rollback for DEFERRED */ +#define OE_SetNull 7 /* Set the foreign key value to NULL */ +#define OE_SetDflt 8 /* Set the foreign key value to its default */ +#define OE_Cascade 9 /* Cascade the changes */ + +#define OE_Default 10 /* Do whatever the default action is */ + + +/* +** An instance of the following structure is passed as the first +** argument to sqlite3VdbeKeyCompare and is used to control the +** comparison of the two index keys. +** +** Note that aSortOrder[] and aColl[] have nField+1 slots. There +** are nField slots for the columns of an index then one extra slot +** for the rowid at the end. +*/ +struct KeyInfo { + u32 nRef; /* Number of references to this KeyInfo object */ + u8 enc; /* Text encoding - one of the SQLITE_UTF* values */ + u16 nField; /* Number of key columns in the index */ + u16 nXField; /* Number of columns beyond the key columns */ + sqlite3 *db; /* The database connection */ + u8 *aSortOrder; /* Sort order for each column. */ + CollSeq *aColl[1]; /* Collating sequence for each term of the key */ +}; + +/* +** This object holds a record which has been parsed out into individual +** fields, for the purposes of doing a comparison. +** +** A record is an object that contains one or more fields of data. +** Records are used to store the content of a table row and to store +** the key of an index. A blob encoding of a record is created by +** the OP_MakeRecord opcode of the VDBE and is disassembled by the +** OP_Column opcode. +** +** An instance of this object serves as a "key" for doing a search on +** an index b+tree. The goal of the search is to find the entry that +** is closed to the key described by this object. This object might hold +** just a prefix of the key. The number of fields is given by +** pKeyInfo->nField. +** +** The r1 and r2 fields are the values to return if this key is less than +** or greater than a key in the btree, respectively. These are normally +** -1 and +1 respectively, but might be inverted to +1 and -1 if the b-tree +** is in DESC order. +** +** The key comparison functions actually return default_rc when they find +** an equals comparison. default_rc can be -1, 0, or +1. If there are +** multiple entries in the b-tree with the same key (when only looking +** at the first pKeyInfo->nFields,) then default_rc can be set to -1 to +** cause the search to find the last match, or +1 to cause the search to +** find the first match. +** +** The key comparison functions will set eqSeen to true if they ever +** get and equal results when comparing this structure to a b-tree record. +** When default_rc!=0, the search might end up on the record immediately +** before the first match or immediately after the last match. The +** eqSeen field will indicate whether or not an exact match exists in the +** b-tree. +*/ +struct UnpackedRecord { + KeyInfo *pKeyInfo; /* Collation and sort-order information */ + Mem *aMem; /* Values */ + u16 nField; /* Number of entries in apMem[] */ + i8 default_rc; /* Comparison result if keys are equal */ + u8 errCode; /* Error detected by xRecordCompare (CORRUPT or NOMEM) */ + i8 r1; /* Value to return if (lhs > rhs) */ + i8 r2; /* Value to return if (rhs < lhs) */ + u8 eqSeen; /* True if an equality comparison has been seen */ +}; + + +/* +** Each SQL index is represented in memory by an +** instance of the following structure. +** +** The columns of the table that are to be indexed are described +** by the aiColumn[] field of this structure. For example, suppose +** we have the following table and index: +** +** CREATE TABLE Ex1(c1 int, c2 int, c3 text); +** CREATE INDEX Ex2 ON Ex1(c3,c1); +** +** In the Table structure describing Ex1, nCol==3 because there are +** three columns in the table. In the Index structure describing +** Ex2, nColumn==2 since 2 of the 3 columns of Ex1 are indexed. +** The value of aiColumn is {2, 0}. aiColumn[0]==2 because the +** first column to be indexed (c3) has an index of 2 in Ex1.aCol[]. +** The second column to be indexed (c1) has an index of 0 in +** Ex1.aCol[], hence Ex2.aiColumn[1]==0. +** +** The Index.onError field determines whether or not the indexed columns +** must be unique and what to do if they are not. When Index.onError=OE_None, +** it means this is not a unique index. Otherwise it is a unique index +** and the value of Index.onError indicate the which conflict resolution +** algorithm to employ whenever an attempt is made to insert a non-unique +** element. +** +** While parsing a CREATE TABLE or CREATE INDEX statement in order to +** generate VDBE code (as opposed to parsing one read from an sqlite_master +** table as part of parsing an existing database schema), transient instances +** of this structure may be created. In this case the Index.tnum variable is +** used to store the address of a VDBE instruction, not a database page +** number (it cannot - the database page is not allocated until the VDBE +** program is executed). See convertToWithoutRowidTable() for details. +*/ +struct Index { + char *zName; /* Name of this index */ + i16 *aiColumn; /* Which columns are used by this index. 1st is 0 */ + LogEst *aiRowLogEst; /* From ANALYZE: Est. rows selected by each column */ + Table *pTable; /* The SQL table being indexed */ + char *zColAff; /* String defining the affinity of each column */ + Index *pNext; /* The next index associated with the same table */ + Schema *pSchema; /* Schema containing this index */ + u8 *aSortOrder; /* for each column: True==DESC, False==ASC */ + const char **azColl; /* Array of collation sequence names for index */ + Expr *pPartIdxWhere; /* WHERE clause for partial indices */ + ExprList *aColExpr; /* Column expressions */ + int tnum; /* DB Page containing root of this index */ + LogEst szIdxRow; /* Estimated average row size in bytes */ + u16 nKeyCol; /* Number of columns forming the key */ + u16 nColumn; /* Number of columns stored in the index */ + u8 onError; /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */ + unsigned idxType:2; /* 1==UNIQUE, 2==PRIMARY KEY, 0==CREATE INDEX */ + unsigned bUnordered:1; /* Use this index for == or IN queries only */ + unsigned uniqNotNull:1; /* True if UNIQUE and NOT NULL for all columns */ + unsigned isResized:1; /* True if resizeIndexObject() has been called */ + unsigned isCovering:1; /* True if this is a covering index */ + unsigned noSkipScan:1; /* Do not try to use skip-scan if true */ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + int nSample; /* Number of elements in aSample[] */ + int nSampleCol; /* Size of IndexSample.anEq[] and so on */ + tRowcnt *aAvgEq; /* Average nEq values for keys not in aSample */ + IndexSample *aSample; /* Samples of the left-most key */ + tRowcnt *aiRowEst; /* Non-logarithmic stat1 data for this index */ + tRowcnt nRowEst0; /* Non-logarithmic number of rows in the index */ +#endif +}; + +/* +** Allowed values for Index.idxType +*/ +#define SQLITE_IDXTYPE_APPDEF 0 /* Created using CREATE INDEX */ +#define SQLITE_IDXTYPE_UNIQUE 1 /* Implements a UNIQUE constraint */ +#define SQLITE_IDXTYPE_PRIMARYKEY 2 /* Is the PRIMARY KEY for the table */ + +/* Return true if index X is a PRIMARY KEY index */ +#define IsPrimaryKeyIndex(X) ((X)->idxType==SQLITE_IDXTYPE_PRIMARYKEY) + +/* Return true if index X is a UNIQUE index */ +#define IsUniqueIndex(X) ((X)->onError!=OE_None) + +/* The Index.aiColumn[] values are normally positive integer. But +** there are some negative values that have special meaning: +*/ +#define XN_ROWID (-1) /* Indexed column is the rowid */ +#define XN_EXPR (-2) /* Indexed column is an expression */ + +/* +** Each sample stored in the sqlite_stat3 table is represented in memory +** using a structure of this type. See documentation at the top of the +** analyze.c source file for additional information. +*/ +struct IndexSample { + void *p; /* Pointer to sampled record */ + int n; /* Size of record in bytes */ + tRowcnt *anEq; /* Est. number of rows where the key equals this sample */ + tRowcnt *anLt; /* Est. number of rows where key is less than this sample */ + tRowcnt *anDLt; /* Est. number of distinct keys less than this sample */ +}; + +/* +** Each token coming out of the lexer is an instance of +** this structure. Tokens are also used as part of an expression. +** +** Note if Token.z==0 then Token.dyn and Token.n are undefined and +** may contain random values. Do not make any assumptions about Token.dyn +** and Token.n when Token.z==0. +*/ +struct Token { + const char *z; /* Text of the token. Not NULL-terminated! */ + unsigned int n; /* Number of characters in this token */ +}; + +/* +** An instance of this structure contains information needed to generate +** code for a SELECT that contains aggregate functions. +** +** If Expr.op==TK_AGG_COLUMN or TK_AGG_FUNCTION then Expr.pAggInfo is a +** pointer to this structure. The Expr.iColumn field is the index in +** AggInfo.aCol[] or AggInfo.aFunc[] of information needed to generate +** code for that node. +** +** AggInfo.pGroupBy and AggInfo.aFunc.pExpr point to fields within the +** original Select structure that describes the SELECT statement. These +** fields do not need to be freed when deallocating the AggInfo structure. +*/ +struct AggInfo { + u8 directMode; /* Direct rendering mode means take data directly + ** from source tables rather than from accumulators */ + u8 useSortingIdx; /* In direct mode, reference the sorting index rather + ** than the source table */ + int sortingIdx; /* Cursor number of the sorting index */ + int sortingIdxPTab; /* Cursor number of pseudo-table */ + int nSortingColumn; /* Number of columns in the sorting index */ + int mnReg, mxReg; /* Range of registers allocated for aCol and aFunc */ + ExprList *pGroupBy; /* The group by clause */ + struct AggInfo_col { /* For each column used in source tables */ + Table *pTab; /* Source table */ + int iTable; /* Cursor number of the source table */ + int iColumn; /* Column number within the source table */ + int iSorterColumn; /* Column number in the sorting index */ + int iMem; /* Memory location that acts as accumulator */ + Expr *pExpr; /* The original expression */ + } *aCol; + int nColumn; /* Number of used entries in aCol[] */ + int nAccumulator; /* Number of columns that show through to the output. + ** Additional columns are used only as parameters to + ** aggregate functions */ + struct AggInfo_func { /* For each aggregate function */ + Expr *pExpr; /* Expression encoding the function */ + FuncDef *pFunc; /* The aggregate function implementation */ + int iMem; /* Memory location that acts as accumulator */ + int iDistinct; /* Ephemeral table used to enforce DISTINCT */ + } *aFunc; + int nFunc; /* Number of entries in aFunc[] */ +}; + +/* +** The datatype ynVar is a signed integer, either 16-bit or 32-bit. +** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater +** than 32767 we have to make it 32-bit. 16-bit is preferred because +** it uses less memory in the Expr object, which is a big memory user +** in systems with lots of prepared statements. And few applications +** need more than about 10 or 20 variables. But some extreme users want +** to have prepared statements with over 32767 variables, and for them +** the option is available (at compile-time). +*/ +#if SQLITE_MAX_VARIABLE_NUMBER<=32767 +typedef i16 ynVar; +#else +typedef int ynVar; +#endif + +/* +** Each node of an expression in the parse tree is an instance +** of this structure. +** +** Expr.op is the opcode. The integer parser token codes are reused +** as opcodes here. For example, the parser defines TK_GE to be an integer +** code representing the ">=" operator. This same integer code is reused +** to represent the greater-than-or-equal-to operator in the expression +** tree. +** +** If the expression is an SQL literal (TK_INTEGER, TK_FLOAT, TK_BLOB, +** or TK_STRING), then Expr.token contains the text of the SQL literal. If +** the expression is a variable (TK_VARIABLE), then Expr.token contains the +** variable name. Finally, if the expression is an SQL function (TK_FUNCTION), +** then Expr.token contains the name of the function. +** +** Expr.pRight and Expr.pLeft are the left and right subexpressions of a +** binary operator. Either or both may be NULL. +** +** Expr.x.pList is a list of arguments if the expression is an SQL function, +** a CASE expression or an IN expression of the form " IN (, ...)". +** Expr.x.pSelect is used if the expression is a sub-select or an expression of +** the form " IN (SELECT ...)". If the EP_xIsSelect bit is set in the +** Expr.flags mask, then Expr.x.pSelect is valid. Otherwise, Expr.x.pList is +** valid. +** +** An expression of the form ID or ID.ID refers to a column in a table. +** For such expressions, Expr.op is set to TK_COLUMN and Expr.iTable is +** the integer cursor number of a VDBE cursor pointing to that table and +** Expr.iColumn is the column number for the specific column. If the +** expression is used as a result in an aggregate SELECT, then the +** value is also stored in the Expr.iAgg column in the aggregate so that +** it can be accessed after all aggregates are computed. +** +** If the expression is an unbound variable marker (a question mark +** character '?' in the original SQL) then the Expr.iTable holds the index +** number for that variable. +** +** If the expression is a subquery then Expr.iColumn holds an integer +** register number containing the result of the subquery. If the +** subquery gives a constant result, then iTable is -1. If the subquery +** gives a different answer at different times during statement processing +** then iTable is the address of a subroutine that computes the subquery. +** +** If the Expr is of type OP_Column, and the table it is selecting from +** is a disk table or the "old.*" pseudo-table, then pTab points to the +** corresponding table definition. +** +** ALLOCATION NOTES: +** +** Expr objects can use a lot of memory space in database schema. To +** help reduce memory requirements, sometimes an Expr object will be +** truncated. And to reduce the number of memory allocations, sometimes +** two or more Expr objects will be stored in a single memory allocation, +** together with Expr.zToken strings. +** +** If the EP_Reduced and EP_TokenOnly flags are set when +** an Expr object is truncated. When EP_Reduced is set, then all +** the child Expr objects in the Expr.pLeft and Expr.pRight subtrees +** are contained within the same memory allocation. Note, however, that +** the subtrees in Expr.x.pList or Expr.x.pSelect are always separately +** allocated, regardless of whether or not EP_Reduced is set. +*/ +struct Expr { + u8 op; /* Operation performed by this node */ + char affinity; /* The affinity of the column or 0 if not a column */ + u32 flags; /* Various flags. EP_* See below */ + union { + char *zToken; /* Token value. Zero terminated and dequoted */ + int iValue; /* Non-negative integer value if EP_IntValue */ + } u; + + /* If the EP_TokenOnly flag is set in the Expr.flags mask, then no + ** space is allocated for the fields below this point. An attempt to + ** access them will result in a segfault or malfunction. + *********************************************************************/ + + Expr *pLeft; /* Left subnode */ + Expr *pRight; /* Right subnode */ + union { + ExprList *pList; /* op = IN, EXISTS, SELECT, CASE, FUNCTION, BETWEEN */ + Select *pSelect; /* EP_xIsSelect and op = IN, EXISTS, SELECT */ + } x; + + /* If the EP_Reduced flag is set in the Expr.flags mask, then no + ** space is allocated for the fields below this point. An attempt to + ** access them will result in a segfault or malfunction. + *********************************************************************/ + +#if SQLITE_MAX_EXPR_DEPTH>0 + int nHeight; /* Height of the tree headed by this node */ +#endif + int iTable; /* TK_COLUMN: cursor number of table holding column + ** TK_REGISTER: register number + ** TK_TRIGGER: 1 -> new, 0 -> old + ** EP_Unlikely: 134217728 times likelihood + ** TK_SELECT: 1st register of result vector */ + ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid. + ** TK_VARIABLE: variable number (always >= 1). + ** TK_SELECT_COLUMN: column of the result vector */ + i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */ + i16 iRightJoinTable; /* If EP_FromJoin, the right table of the join */ + u8 op2; /* TK_REGISTER: original value of Expr.op + ** TK_COLUMN: the value of p5 for OP_Column + ** TK_AGG_FUNCTION: nesting depth */ + AggInfo *pAggInfo; /* Used by TK_AGG_COLUMN and TK_AGG_FUNCTION */ + Table *pTab; /* Table for TK_COLUMN expressions. */ +}; + +/* +** The following are the meanings of bits in the Expr.flags field. +*/ +#define EP_FromJoin 0x000001 /* Originates in ON/USING clause of outer join */ +#define EP_Agg 0x000002 /* Contains one or more aggregate functions */ +#define EP_Resolved 0x000004 /* IDs have been resolved to COLUMNs */ +#define EP_Error 0x000008 /* Expression contains one or more errors */ +#define EP_Distinct 0x000010 /* Aggregate function with DISTINCT keyword */ +#define EP_VarSelect 0x000020 /* pSelect is correlated, not constant */ +#define EP_DblQuoted 0x000040 /* token.z was originally in "..." */ +#define EP_InfixFunc 0x000080 /* True for an infix function: LIKE, GLOB, etc */ +#define EP_Collate 0x000100 /* Tree contains a TK_COLLATE operator */ +#define EP_Generic 0x000200 /* Ignore COLLATE or affinity on this tree */ +#define EP_IntValue 0x000400 /* Integer value contained in u.iValue */ +#define EP_xIsSelect 0x000800 /* x.pSelect is valid (otherwise x.pList is) */ +#define EP_Skip 0x001000 /* COLLATE, AS, or UNLIKELY */ +#define EP_Reduced 0x002000 /* Expr struct EXPR_REDUCEDSIZE bytes only */ +#define EP_TokenOnly 0x004000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */ +#define EP_Static 0x008000 /* Held in memory not obtained from malloc() */ +#define EP_MemToken 0x010000 /* Need to sqlite3DbFree() Expr.zToken */ +#define EP_NoReduce 0x020000 /* Cannot EXPRDUP_REDUCE this Expr */ +#define EP_Unlikely 0x040000 /* unlikely() or likelihood() function */ +#define EP_ConstFunc 0x080000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */ +#define EP_CanBeNull 0x100000 /* Can be null despite NOT NULL constraint */ +#define EP_Subquery 0x200000 /* Tree contains a TK_SELECT operator */ +#define EP_Alias 0x400000 /* Is an alias for a result set column */ +#define EP_Leaf 0x800000 /* Expr.pLeft, .pRight, .u.pSelect all NULL */ + +/* +** Combinations of two or more EP_* flags +*/ +#define EP_Propagate (EP_Collate|EP_Subquery) /* Propagate these bits up tree */ + +/* +** These macros can be used to test, set, or clear bits in the +** Expr.flags field. +*/ +#define ExprHasProperty(E,P) (((E)->flags&(P))!=0) +#define ExprHasAllProperty(E,P) (((E)->flags&(P))==(P)) +#define ExprSetProperty(E,P) (E)->flags|=(P) +#define ExprClearProperty(E,P) (E)->flags&=~(P) + +/* The ExprSetVVAProperty() macro is used for Verification, Validation, +** and Accreditation only. It works like ExprSetProperty() during VVA +** processes but is a no-op for delivery. +*/ +#ifdef SQLITE_DEBUG +# define ExprSetVVAProperty(E,P) (E)->flags|=(P) +#else +# define ExprSetVVAProperty(E,P) +#endif + +/* +** Macros to determine the number of bytes required by a normal Expr +** struct, an Expr struct with the EP_Reduced flag set in Expr.flags +** and an Expr struct with the EP_TokenOnly flag set. +*/ +#define EXPR_FULLSIZE sizeof(Expr) /* Full size */ +#define EXPR_REDUCEDSIZE offsetof(Expr,iTable) /* Common features */ +#define EXPR_TOKENONLYSIZE offsetof(Expr,pLeft) /* Fewer features */ + +/* +** Flags passed to the sqlite3ExprDup() function. See the header comment +** above sqlite3ExprDup() for details. +*/ +#define EXPRDUP_REDUCE 0x0001 /* Used reduced-size Expr nodes */ + +/* +** A list of expressions. Each expression may optionally have a +** name. An expr/name combination can be used in several ways, such +** as the list of "expr AS ID" fields following a "SELECT" or in the +** list of "ID = expr" items in an UPDATE. A list of expressions can +** also be used as the argument to a function, in which case the a.zName +** field is not used. +** +** By default the Expr.zSpan field holds a human-readable description of +** the expression that is used in the generation of error messages and +** column labels. In this case, Expr.zSpan is typically the text of a +** column expression as it exists in a SELECT statement. However, if +** the bSpanIsTab flag is set, then zSpan is overloaded to mean the name +** of the result column in the form: DATABASE.TABLE.COLUMN. This later +** form is used for name resolution with nested FROM clauses. +*/ +struct ExprList { + int nExpr; /* Number of expressions on the list */ + struct ExprList_item { /* For each expression in the list */ + Expr *pExpr; /* The list of expressions */ + char *zName; /* Token associated with this expression */ + char *zSpan; /* Original text of the expression */ + u8 sortOrder; /* 1 for DESC or 0 for ASC */ + unsigned done :1; /* A flag to indicate when processing is finished */ + unsigned bSpanIsTab :1; /* zSpan holds DB.TABLE.COLUMN */ + unsigned reusable :1; /* Constant expression is reusable */ + union { + struct { + u16 iOrderByCol; /* For ORDER BY, column number in result set */ + u16 iAlias; /* Index into Parse.aAlias[] for zName */ + } x; + int iConstExprReg; /* Register in which Expr value is cached */ + } u; + } *a; /* Alloc a power of two greater or equal to nExpr */ +}; + +/* +** An instance of this structure is used by the parser to record both +** the parse tree for an expression and the span of input text for an +** expression. +*/ +struct ExprSpan { + Expr *pExpr; /* The expression parse tree */ + const char *zStart; /* First character of input text */ + const char *zEnd; /* One character past the end of input text */ +}; + +/* +** An instance of this structure can hold a simple list of identifiers, +** such as the list "a,b,c" in the following statements: +** +** INSERT INTO t(a,b,c) VALUES ...; +** CREATE INDEX idx ON t(a,b,c); +** CREATE TRIGGER trig BEFORE UPDATE ON t(a,b,c) ...; +** +** The IdList.a.idx field is used when the IdList represents the list of +** column names after a table name in an INSERT statement. In the statement +** +** INSERT INTO t(a,b,c) ... +** +** If "a" is the k-th column of table "t", then IdList.a[0].idx==k. +*/ +struct IdList { + struct IdList_item { + char *zName; /* Name of the identifier */ + int idx; /* Index in some Table.aCol[] of a column named zName */ + } *a; + int nId; /* Number of identifiers on the list */ +}; + +/* +** The bitmask datatype defined below is used for various optimizations. +** +** Changing this from a 64-bit to a 32-bit type limits the number of +** tables in a join to 32 instead of 64. But it also reduces the size +** of the library by 738 bytes on ix86. +*/ +#ifdef SQLITE_BITMASK_TYPE + typedef SQLITE_BITMASK_TYPE Bitmask; +#else + typedef u64 Bitmask; +#endif + +/* +** The number of bits in a Bitmask. "BMS" means "BitMask Size". +*/ +#define BMS ((int)(sizeof(Bitmask)*8)) + +/* +** A bit in a Bitmask +*/ +#define MASKBIT(n) (((Bitmask)1)<<(n)) +#define MASKBIT32(n) (((unsigned int)1)<<(n)) +#define ALLBITS ((Bitmask)-1) + +/* +** The following structure describes the FROM clause of a SELECT statement. +** Each table or subquery in the FROM clause is a separate element of +** the SrcList.a[] array. +** +** With the addition of multiple database support, the following structure +** can also be used to describe a particular table such as the table that +** is modified by an INSERT, DELETE, or UPDATE statement. In standard SQL, +** such a table must be a simple name: ID. But in SQLite, the table can +** now be identified by a database name, a dot, then the table name: ID.ID. +** +** The jointype starts out showing the join type between the current table +** and the next table on the list. The parser builds the list this way. +** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each +** jointype expresses the join between the table and the previous table. +** +** In the colUsed field, the high-order bit (bit 63) is set if the table +** contains more than 63 columns and the 64-th or later column is used. +*/ +struct SrcList { + int nSrc; /* Number of tables or subqueries in the FROM clause */ + u32 nAlloc; /* Number of entries allocated in a[] below */ + struct SrcList_item { + Schema *pSchema; /* Schema to which this item is fixed */ + char *zDatabase; /* Name of database holding this table */ + char *zName; /* Name of the table */ + char *zAlias; /* The "B" part of a "A AS B" phrase. zName is the "A" */ + Table *pTab; /* An SQL table corresponding to zName */ + Select *pSelect; /* A SELECT statement used in place of a table name */ + int addrFillSub; /* Address of subroutine to manifest a subquery */ + int regReturn; /* Register holding return address of addrFillSub */ + int regResult; /* Registers holding results of a co-routine */ + struct { + u8 jointype; /* Type of join between this table and the previous */ + unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */ + unsigned isIndexedBy :1; /* True if there is an INDEXED BY clause */ + unsigned isTabFunc :1; /* True if table-valued-function syntax */ + unsigned isCorrelated :1; /* True if sub-query is correlated */ + unsigned viaCoroutine :1; /* Implemented as a co-routine */ + unsigned isRecursive :1; /* True for recursive reference in WITH */ + } fg; +#ifndef SQLITE_OMIT_EXPLAIN + u8 iSelectId; /* If pSelect!=0, the id of the sub-select in EQP */ +#endif + int iCursor; /* The VDBE cursor number used to access this table */ + Expr *pOn; /* The ON clause of a join */ + IdList *pUsing; /* The USING clause of a join */ + Bitmask colUsed; /* Bit N (1<" clause */ + ExprList *pFuncArg; /* Arguments to table-valued-function */ + } u1; + Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */ + } a[1]; /* One entry for each identifier on the list */ +}; + +/* +** Permitted values of the SrcList.a.jointype field +*/ +#define JT_INNER 0x0001 /* Any kind of inner or cross join */ +#define JT_CROSS 0x0002 /* Explicit use of the CROSS keyword */ +#define JT_NATURAL 0x0004 /* True for a "natural" join */ +#define JT_LEFT 0x0008 /* Left outer join */ +#define JT_RIGHT 0x0010 /* Right outer join */ +#define JT_OUTER 0x0020 /* The "OUTER" keyword is present */ +#define JT_ERROR 0x0040 /* unknown or unsupported join type */ + + +/* +** Flags appropriate for the wctrlFlags parameter of sqlite3WhereBegin() +** and the WhereInfo.wctrlFlags member. +** +** Value constraints (enforced via assert()): +** WHERE_USE_LIMIT == SF_FixedLimit +*/ +#define WHERE_ORDERBY_NORMAL 0x0000 /* No-op */ +#define WHERE_ORDERBY_MIN 0x0001 /* ORDER BY processing for min() func */ +#define WHERE_ORDERBY_MAX 0x0002 /* ORDER BY processing for max() func */ +#define WHERE_ONEPASS_DESIRED 0x0004 /* Want to do one-pass UPDATE/DELETE */ +#define WHERE_ONEPASS_MULTIROW 0x0008 /* ONEPASS is ok with multiple rows */ +#define WHERE_DUPLICATES_OK 0x0010 /* Ok to return a row more than once */ +#define WHERE_OR_SUBCLAUSE 0x0020 /* Processing a sub-WHERE as part of + ** the OR optimization */ +#define WHERE_GROUPBY 0x0040 /* pOrderBy is really a GROUP BY */ +#define WHERE_DISTINCTBY 0x0080 /* pOrderby is really a DISTINCT clause */ +#define WHERE_WANT_DISTINCT 0x0100 /* All output needs to be distinct */ +#define WHERE_SORTBYGROUP 0x0200 /* Support sqlite3WhereIsSorted() */ +#define WHERE_SEEK_TABLE 0x0400 /* Do not defer seeks on main table */ +#define WHERE_ORDERBY_LIMIT 0x0800 /* ORDERBY+LIMIT on the inner loop */ +#define WHERE_SEEK_UNIQ_TABLE 0x1000 /* Do not defer seeks if unique */ + /* 0x2000 not currently used */ +#define WHERE_USE_LIMIT 0x4000 /* Use the LIMIT in cost estimates */ + /* 0x8000 not currently used */ + +/* Allowed return values from sqlite3WhereIsDistinct() +*/ +#define WHERE_DISTINCT_NOOP 0 /* DISTINCT keyword not used */ +#define WHERE_DISTINCT_UNIQUE 1 /* No duplicates */ +#define WHERE_DISTINCT_ORDERED 2 /* All duplicates are adjacent */ +#define WHERE_DISTINCT_UNORDERED 3 /* Duplicates are scattered */ + +/* +** A NameContext defines a context in which to resolve table and column +** names. The context consists of a list of tables (the pSrcList) field and +** a list of named expression (pEList). The named expression list may +** be NULL. The pSrc corresponds to the FROM clause of a SELECT or +** to the table being operated on by INSERT, UPDATE, or DELETE. The +** pEList corresponds to the result set of a SELECT and is NULL for +** other statements. +** +** NameContexts can be nested. When resolving names, the inner-most +** context is searched first. If no match is found, the next outer +** context is checked. If there is still no match, the next context +** is checked. This process continues until either a match is found +** or all contexts are check. When a match is found, the nRef member of +** the context containing the match is incremented. +** +** Each subquery gets a new NameContext. The pNext field points to the +** NameContext in the parent query. Thus the process of scanning the +** NameContext list corresponds to searching through successively outer +** subqueries looking for a match. +*/ +struct NameContext { + Parse *pParse; /* The parser */ + SrcList *pSrcList; /* One or more tables used to resolve names */ + ExprList *pEList; /* Optional list of result-set columns */ + AggInfo *pAggInfo; /* Information about aggregates at this level */ + NameContext *pNext; /* Next outer name context. NULL for outermost */ + int nRef; /* Number of names resolved by this context */ + int nErr; /* Number of errors encountered while resolving names */ + u16 ncFlags; /* Zero or more NC_* flags defined below */ +}; + +/* +** Allowed values for the NameContext, ncFlags field. +** +** Value constraints (all checked via assert()): +** NC_HasAgg == SF_HasAgg +** NC_MinMaxAgg == SF_MinMaxAgg == SQLITE_FUNC_MINMAX +** +*/ +#define NC_AllowAgg 0x0001 /* Aggregate functions are allowed here */ +#define NC_PartIdx 0x0002 /* True if resolving a partial index WHERE */ +#define NC_IsCheck 0x0004 /* True if resolving names in a CHECK constraint */ +#define NC_InAggFunc 0x0008 /* True if analyzing arguments to an agg func */ +#define NC_HasAgg 0x0010 /* One or more aggregate functions seen */ +#define NC_IdxExpr 0x0020 /* True if resolving columns of CREATE INDEX */ +#define NC_VarSelect 0x0040 /* A correlated subquery has been seen */ +#define NC_MinMaxAgg 0x1000 /* min/max aggregates seen. See note above */ + +/* +** An instance of the following structure contains all information +** needed to generate code for a single SELECT statement. +** +** nLimit is set to -1 if there is no LIMIT clause. nOffset is set to 0. +** If there is a LIMIT clause, the parser sets nLimit to the value of the +** limit and nOffset to the value of the offset (or 0 if there is not +** offset). But later on, nLimit and nOffset become the memory locations +** in the VDBE that record the limit and offset counters. +** +** addrOpenEphm[] entries contain the address of OP_OpenEphemeral opcodes. +** These addresses must be stored so that we can go back and fill in +** the P4_KEYINFO and P2 parameters later. Neither the KeyInfo nor +** the number of columns in P2 can be computed at the same time +** as the OP_OpenEphm instruction is coded because not +** enough information about the compound query is known at that point. +** The KeyInfo for addrOpenTran[0] and [1] contains collating sequences +** for the result set. The KeyInfo for addrOpenEphm[2] contains collating +** sequences for the ORDER BY clause. +*/ +struct Select { + ExprList *pEList; /* The fields of the result */ + u8 op; /* One of: TK_UNION TK_ALL TK_INTERSECT TK_EXCEPT */ + LogEst nSelectRow; /* Estimated number of result rows */ + u32 selFlags; /* Various SF_* values */ + int iLimit, iOffset; /* Memory registers holding LIMIT & OFFSET counters */ +#if SELECTTRACE_ENABLED + char zSelName[12]; /* Symbolic name of this SELECT use for debugging */ +#endif + int addrOpenEphm[2]; /* OP_OpenEphem opcodes related to this select */ + SrcList *pSrc; /* The FROM clause */ + Expr *pWhere; /* The WHERE clause */ + ExprList *pGroupBy; /* The GROUP BY clause */ + Expr *pHaving; /* The HAVING clause */ + ExprList *pOrderBy; /* The ORDER BY clause */ + Select *pPrior; /* Prior select in a compound select statement */ + Select *pNext; /* Next select to the left in a compound */ + Expr *pLimit; /* LIMIT expression. NULL means not used. */ + Expr *pOffset; /* OFFSET expression. NULL means not used. */ + With *pWith; /* WITH clause attached to this select. Or NULL. */ +}; + +/* +** Allowed values for Select.selFlags. The "SF" prefix stands for +** "Select Flag". +** +** Value constraints (all checked via assert()) +** SF_HasAgg == NC_HasAgg +** SF_MinMaxAgg == NC_MinMaxAgg == SQLITE_FUNC_MINMAX +** SF_FixedLimit == WHERE_USE_LIMIT +*/ +#define SF_Distinct 0x00001 /* Output should be DISTINCT */ +#define SF_All 0x00002 /* Includes the ALL keyword */ +#define SF_Resolved 0x00004 /* Identifiers have been resolved */ +#define SF_Aggregate 0x00008 /* Contains agg functions or a GROUP BY */ +#define SF_HasAgg 0x00010 /* Contains aggregate functions */ +#define SF_UsesEphemeral 0x00020 /* Uses the OpenEphemeral opcode */ +#define SF_Expanded 0x00040 /* sqlite3SelectExpand() called on this */ +#define SF_HasTypeInfo 0x00080 /* FROM subqueries have Table metadata */ +#define SF_Compound 0x00100 /* Part of a compound query */ +#define SF_Values 0x00200 /* Synthesized from VALUES clause */ +#define SF_MultiValue 0x00400 /* Single VALUES term with multiple rows */ +#define SF_NestedFrom 0x00800 /* Part of a parenthesized FROM clause */ +#define SF_MinMaxAgg 0x01000 /* Aggregate containing min() or max() */ +#define SF_Recursive 0x02000 /* The recursive part of a recursive CTE */ +#define SF_FixedLimit 0x04000 /* nSelectRow set by a constant LIMIT */ +#define SF_MaybeConvert 0x08000 /* Need convertCompoundSelectToSubquery() */ +#define SF_Converted 0x10000 /* By convertCompoundSelectToSubquery() */ +#define SF_IncludeHidden 0x20000 /* Include hidden columns in output */ + + +/* +** The results of a SELECT can be distributed in several ways, as defined +** by one of the following macros. The "SRT" prefix means "SELECT Result +** Type". +** +** SRT_Union Store results as a key in a temporary index +** identified by pDest->iSDParm. +** +** SRT_Except Remove results from the temporary index pDest->iSDParm. +** +** SRT_Exists Store a 1 in memory cell pDest->iSDParm if the result +** set is not empty. +** +** SRT_Discard Throw the results away. This is used by SELECT +** statements within triggers whose only purpose is +** the side-effects of functions. +** +** All of the above are free to ignore their ORDER BY clause. Those that +** follow must honor the ORDER BY clause. +** +** SRT_Output Generate a row of output (using the OP_ResultRow +** opcode) for each row in the result set. +** +** SRT_Mem Only valid if the result is a single column. +** Store the first column of the first result row +** in register pDest->iSDParm then abandon the rest +** of the query. This destination implies "LIMIT 1". +** +** SRT_Set The result must be a single column. Store each +** row of result as the key in table pDest->iSDParm. +** Apply the affinity pDest->affSdst before storing +** results. Used to implement "IN (SELECT ...)". +** +** SRT_EphemTab Create an temporary table pDest->iSDParm and store +** the result there. The cursor is left open after +** returning. This is like SRT_Table except that +** this destination uses OP_OpenEphemeral to create +** the table first. +** +** SRT_Coroutine Generate a co-routine that returns a new row of +** results each time it is invoked. The entry point +** of the co-routine is stored in register pDest->iSDParm +** and the result row is stored in pDest->nDest registers +** starting with pDest->iSdst. +** +** SRT_Table Store results in temporary table pDest->iSDParm. +** SRT_Fifo This is like SRT_EphemTab except that the table +** is assumed to already be open. SRT_Fifo has +** the additional property of being able to ignore +** the ORDER BY clause. +** +** SRT_DistFifo Store results in a temporary table pDest->iSDParm. +** But also use temporary table pDest->iSDParm+1 as +** a record of all prior results and ignore any duplicate +** rows. Name means: "Distinct Fifo". +** +** SRT_Queue Store results in priority queue pDest->iSDParm (really +** an index). Append a sequence number so that all entries +** are distinct. +** +** SRT_DistQueue Store results in priority queue pDest->iSDParm only if +** the same record has never been stored before. The +** index at pDest->iSDParm+1 hold all prior stores. +*/ +#define SRT_Union 1 /* Store result as keys in an index */ +#define SRT_Except 2 /* Remove result from a UNION index */ +#define SRT_Exists 3 /* Store 1 if the result is not empty */ +#define SRT_Discard 4 /* Do not save the results anywhere */ +#define SRT_Fifo 5 /* Store result as data with an automatic rowid */ +#define SRT_DistFifo 6 /* Like SRT_Fifo, but unique results only */ +#define SRT_Queue 7 /* Store result in an queue */ +#define SRT_DistQueue 8 /* Like SRT_Queue, but unique results only */ + +/* The ORDER BY clause is ignored for all of the above */ +#define IgnorableOrderby(X) ((X->eDest)<=SRT_DistQueue) + +#define SRT_Output 9 /* Output each row of result */ +#define SRT_Mem 10 /* Store result in a memory cell */ +#define SRT_Set 11 /* Store results as keys in an index */ +#define SRT_EphemTab 12 /* Create transient tab and store like SRT_Table */ +#define SRT_Coroutine 13 /* Generate a single row of result */ +#define SRT_Table 14 /* Store result as data with an automatic rowid */ + +/* +** An instance of this object describes where to put of the results of +** a SELECT statement. +*/ +struct SelectDest { + u8 eDest; /* How to dispose of the results. On of SRT_* above. */ + char *zAffSdst; /* Affinity used when eDest==SRT_Set */ + int iSDParm; /* A parameter used by the eDest disposal method */ + int iSdst; /* Base register where results are written */ + int nSdst; /* Number of registers allocated */ + ExprList *pOrderBy; /* Key columns for SRT_Queue and SRT_DistQueue */ +}; + +/* +** During code generation of statements that do inserts into AUTOINCREMENT +** tables, the following information is attached to the Table.u.autoInc.p +** pointer of each autoincrement table to record some side information that +** the code generator needs. We have to keep per-table autoincrement +** information in case inserts are done within triggers. Triggers do not +** normally coordinate their activities, but we do need to coordinate the +** loading and saving of autoincrement information. +*/ +struct AutoincInfo { + AutoincInfo *pNext; /* Next info block in a list of them all */ + Table *pTab; /* Table this info block refers to */ + int iDb; /* Index in sqlite3.aDb[] of database holding pTab */ + int regCtr; /* Memory register holding the rowid counter */ +}; + +/* +** Size of the column cache +*/ +#ifndef SQLITE_N_COLCACHE +# define SQLITE_N_COLCACHE 10 +#endif + +/* +** At least one instance of the following structure is created for each +** trigger that may be fired while parsing an INSERT, UPDATE or DELETE +** statement. All such objects are stored in the linked list headed at +** Parse.pTriggerPrg and deleted once statement compilation has been +** completed. +** +** A Vdbe sub-program that implements the body and WHEN clause of trigger +** TriggerPrg.pTrigger, assuming a default ON CONFLICT clause of +** TriggerPrg.orconf, is stored in the TriggerPrg.pProgram variable. +** The Parse.pTriggerPrg list never contains two entries with the same +** values for both pTrigger and orconf. +** +** The TriggerPrg.aColmask[0] variable is set to a mask of old.* columns +** accessed (or set to 0 for triggers fired as a result of INSERT +** statements). Similarly, the TriggerPrg.aColmask[1] variable is set to +** a mask of new.* columns used by the program. +*/ +struct TriggerPrg { + Trigger *pTrigger; /* Trigger this program was coded from */ + TriggerPrg *pNext; /* Next entry in Parse.pTriggerPrg list */ + SubProgram *pProgram; /* Program implementing pTrigger/orconf */ + int orconf; /* Default ON CONFLICT policy */ + u32 aColmask[2]; /* Masks of old.*, new.* columns accessed */ +}; + +/* +** The yDbMask datatype for the bitmask of all attached databases. +*/ +#if SQLITE_MAX_ATTACHED>30 + typedef unsigned char yDbMask[(SQLITE_MAX_ATTACHED+9)/8]; +# define DbMaskTest(M,I) (((M)[(I)/8]&(1<<((I)&7)))!=0) +# define DbMaskZero(M) memset((M),0,sizeof(M)) +# define DbMaskSet(M,I) (M)[(I)/8]|=(1<<((I)&7)) +# define DbMaskAllZero(M) sqlite3DbMaskAllZero(M) +# define DbMaskNonZero(M) (sqlite3DbMaskAllZero(M)==0) +#else + typedef unsigned int yDbMask; +# define DbMaskTest(M,I) (((M)&(((yDbMask)1)<<(I)))!=0) +# define DbMaskZero(M) (M)=0 +# define DbMaskSet(M,I) (M)|=(((yDbMask)1)<<(I)) +# define DbMaskAllZero(M) (M)==0 +# define DbMaskNonZero(M) (M)!=0 +#endif + +/* +** An SQL parser context. A copy of this structure is passed through +** the parser and down into all the parser action routine in order to +** carry around information that is global to the entire parse. +** +** The structure is divided into two parts. When the parser and code +** generate call themselves recursively, the first part of the structure +** is constant but the second part is reset at the beginning and end of +** each recursion. +** +** The nTableLock and aTableLock variables are only used if the shared-cache +** feature is enabled (if sqlite3Tsd()->useSharedData is true). They are +** used to store the set of table-locks required by the statement being +** compiled. Function sqlite3TableLock() is used to add entries to the +** list. +*/ +struct Parse { + sqlite3 *db; /* The main database structure */ + char *zErrMsg; /* An error message */ + Vdbe *pVdbe; /* An engine for executing database bytecode */ + int rc; /* Return code from execution */ + u8 colNamesSet; /* TRUE after OP_ColumnName has been issued to pVdbe */ + u8 checkSchema; /* Causes schema cookie check after an error */ + u8 nested; /* Number of nested calls to the parser/code generator */ + u8 nTempReg; /* Number of temporary registers in aTempReg[] */ + u8 isMultiWrite; /* True if statement may modify/insert multiple rows */ + u8 mayAbort; /* True if statement may throw an ABORT exception */ + u8 hasCompound; /* Need to invoke convertCompoundSelectToSubquery() */ + u8 okConstFactor; /* OK to factor out constants */ + u8 disableLookaside; /* Number of times lookaside has been disabled */ + u8 nColCache; /* Number of entries in aColCache[] */ + int nRangeReg; /* Size of the temporary register block */ + int iRangeReg; /* First register in temporary register block */ + int nErr; /* Number of errors seen */ + int nTab; /* Number of previously allocated VDBE cursors */ + int nMem; /* Number of memory cells used so far */ + int nOpAlloc; /* Number of slots allocated for Vdbe.aOp[] */ + int szOpAlloc; /* Bytes of memory space allocated for Vdbe.aOp[] */ + int ckBase; /* Base register of data during check constraints */ + int iSelfTab; /* Table of an index whose exprs are being coded */ + int iCacheLevel; /* ColCache valid when aColCache[].iLevel<=iCacheLevel */ + int iCacheCnt; /* Counter used to generate aColCache[].lru values */ + int nLabel; /* Number of labels used */ + int *aLabel; /* Space to hold the labels */ + ExprList *pConstExpr;/* Constant expressions */ + Token constraintName;/* Name of the constraint currently being parsed */ + yDbMask writeMask; /* Start a write transaction on these databases */ + yDbMask cookieMask; /* Bitmask of schema verified databases */ + int regRowid; /* Register holding rowid of CREATE TABLE entry */ + int regRoot; /* Register holding root page number for new objects */ + int nMaxArg; /* Max args passed to user function by sub-program */ +#if SELECTTRACE_ENABLED + int nSelect; /* Number of SELECT statements seen */ + int nSelectIndent; /* How far to indent SELECTTRACE() output */ +#endif +#ifndef SQLITE_OMIT_SHARED_CACHE + int nTableLock; /* Number of locks in aTableLock */ + TableLock *aTableLock; /* Required table locks for shared-cache mode */ +#endif + AutoincInfo *pAinc; /* Information about AUTOINCREMENT counters */ + Parse *pToplevel; /* Parse structure for main program (or NULL) */ + Table *pTriggerTab; /* Table triggers are being coded for */ + int addrCrTab; /* Address of OP_CreateTable opcode on CREATE TABLE */ + u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ + u32 oldmask; /* Mask of old.* columns referenced */ + u32 newmask; /* Mask of new.* columns referenced */ + u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */ + u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */ + u8 disableTriggers; /* True to disable triggers */ + + /************************************************************************** + ** Fields above must be initialized to zero. The fields that follow, + ** down to the beginning of the recursive section, do not need to be + ** initialized as they will be set before being used. The boundary is + ** determined by offsetof(Parse,aColCache). + **************************************************************************/ + + struct yColCache { + int iTable; /* Table cursor number */ + i16 iColumn; /* Table column number */ + u8 tempReg; /* iReg is a temp register that needs to be freed */ + int iLevel; /* Nesting level */ + int iReg; /* Reg with value of this column. 0 means none. */ + int lru; /* Least recently used entry has the smallest value */ + } aColCache[SQLITE_N_COLCACHE]; /* One for each column cache entry */ + int aTempReg[8]; /* Holding area for temporary registers */ + Token sNameToken; /* Token with unqualified schema object name */ + + /************************************************************************ + ** Above is constant between recursions. Below is reset before and after + ** each recursion. The boundary between these two regions is determined + ** using offsetof(Parse,sLastToken) so the sLastToken field must be the + ** first field in the recursive region. + ************************************************************************/ + + Token sLastToken; /* The last token parsed */ + ynVar nVar; /* Number of '?' variables seen in the SQL so far */ + u8 iPkSortOrder; /* ASC or DESC for INTEGER PRIMARY KEY */ + u8 explain; /* True if the EXPLAIN flag is found on the query */ +#ifndef SQLITE_OMIT_VIRTUALTABLE + u8 declareVtab; /* True if inside sqlite3_declare_vtab() */ + int nVtabLock; /* Number of virtual tables to lock */ +#endif + int nHeight; /* Expression tree height of current sub-select */ +#ifndef SQLITE_OMIT_EXPLAIN + int iSelectId; /* ID of current select for EXPLAIN output */ + int iNextSelectId; /* Next available select ID for EXPLAIN output */ +#endif + VList *pVList; /* Mapping between variable names and numbers */ + Vdbe *pReprepare; /* VM being reprepared (sqlite3Reprepare()) */ + const char *zTail; /* All SQL text past the last semicolon parsed */ + Table *pNewTable; /* A table being constructed by CREATE TABLE */ + Trigger *pNewTrigger; /* Trigger under construct by a CREATE TRIGGER */ + const char *zAuthContext; /* The 6th parameter to db->xAuth callbacks */ +#ifndef SQLITE_OMIT_VIRTUALTABLE + Token sArg; /* Complete text of a module argument */ + Table **apVtabLock; /* Pointer to virtual tables needing locking */ +#endif + Table *pZombieTab; /* List of Table objects to delete after code gen */ + TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */ + With *pWith; /* Current WITH clause, or NULL */ + With *pWithToFree; /* Free this WITH object at the end of the parse */ +}; + +/* +** Sizes and pointers of various parts of the Parse object. +*/ +#define PARSE_HDR_SZ offsetof(Parse,aColCache) /* Recursive part w/o aColCache*/ +#define PARSE_RECURSE_SZ offsetof(Parse,sLastToken) /* Recursive part */ +#define PARSE_TAIL_SZ (sizeof(Parse)-PARSE_RECURSE_SZ) /* Non-recursive part */ +#define PARSE_TAIL(X) (((char*)(X))+PARSE_RECURSE_SZ) /* Pointer to tail */ + +/* +** Return true if currently inside an sqlite3_declare_vtab() call. +*/ +#ifdef SQLITE_OMIT_VIRTUALTABLE + #define IN_DECLARE_VTAB 0 +#else + #define IN_DECLARE_VTAB (pParse->declareVtab) +#endif + +/* +** An instance of the following structure can be declared on a stack and used +** to save the Parse.zAuthContext value so that it can be restored later. +*/ +struct AuthContext { + const char *zAuthContext; /* Put saved Parse.zAuthContext here */ + Parse *pParse; /* The Parse structure */ +}; + +/* +** Bitfield flags for P5 value in various opcodes. +** +** Value constraints (enforced via assert()): +** OPFLAG_LENGTHARG == SQLITE_FUNC_LENGTH +** OPFLAG_TYPEOFARG == SQLITE_FUNC_TYPEOF +** OPFLAG_BULKCSR == BTREE_BULKLOAD +** OPFLAG_SEEKEQ == BTREE_SEEK_EQ +** OPFLAG_FORDELETE == BTREE_FORDELETE +** OPFLAG_SAVEPOSITION == BTREE_SAVEPOSITION +** OPFLAG_AUXDELETE == BTREE_AUXDELETE +*/ +#define OPFLAG_NCHANGE 0x01 /* OP_Insert: Set to update db->nChange */ + /* Also used in P2 (not P5) of OP_Delete */ +#define OPFLAG_EPHEM 0x01 /* OP_Column: Ephemeral output is ok */ +#define OPFLAG_LASTROWID 0x20 /* Set to update db->lastRowid */ +#define OPFLAG_ISUPDATE 0x04 /* This OP_Insert is an sql UPDATE */ +#define OPFLAG_APPEND 0x08 /* This is likely to be an append */ +#define OPFLAG_USESEEKRESULT 0x10 /* Try to avoid a seek in BtreeInsert() */ +#define OPFLAG_ISNOOP 0x40 /* OP_Delete does pre-update-hook only */ +#define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */ +#define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */ +#define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */ +#define OPFLAG_SEEKEQ 0x02 /* OP_Open** cursor uses EQ seek only */ +#define OPFLAG_FORDELETE 0x08 /* OP_Open should use BTREE_FORDELETE */ +#define OPFLAG_P2ISREG 0x10 /* P2 to OP_Open** is a register number */ +#define OPFLAG_PERMUTE 0x01 /* OP_Compare: use the permutation */ +#define OPFLAG_SAVEPOSITION 0x02 /* OP_Delete/Insert: save cursor pos */ +#define OPFLAG_AUXDELETE 0x04 /* OP_Delete: index in a DELETE op */ + +/* + * Each trigger present in the database schema is stored as an instance of + * struct Trigger. + * + * Pointers to instances of struct Trigger are stored in two ways. + * 1. In the "trigHash" hash table (part of the sqlite3* that represents the + * database). This allows Trigger structures to be retrieved by name. + * 2. All triggers associated with a single table form a linked list, using the + * pNext member of struct Trigger. A pointer to the first element of the + * linked list is stored as the "pTrigger" member of the associated + * struct Table. + * + * The "step_list" member points to the first element of a linked list + * containing the SQL statements specified as the trigger program. + */ +struct Trigger { + char *zName; /* The name of the trigger */ + char *table; /* The table or view to which the trigger applies */ + u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT */ + u8 tr_tm; /* One of TRIGGER_BEFORE, TRIGGER_AFTER */ + Expr *pWhen; /* The WHEN clause of the expression (may be NULL) */ + IdList *pColumns; /* If this is an UPDATE OF trigger, + the is stored here */ + Schema *pSchema; /* Schema containing the trigger */ + Schema *pTabSchema; /* Schema containing the table */ + TriggerStep *step_list; /* Link list of trigger program steps */ + Trigger *pNext; /* Next trigger associated with the table */ +}; + +/* +** A trigger is either a BEFORE or an AFTER trigger. The following constants +** determine which. +** +** If there are multiple triggers, you might of some BEFORE and some AFTER. +** In that cases, the constants below can be ORed together. +*/ +#define TRIGGER_BEFORE 1 +#define TRIGGER_AFTER 2 + +/* + * An instance of struct TriggerStep is used to store a single SQL statement + * that is a part of a trigger-program. + * + * Instances of struct TriggerStep are stored in a singly linked list (linked + * using the "pNext" member) referenced by the "step_list" member of the + * associated struct Trigger instance. The first element of the linked list is + * the first step of the trigger-program. + * + * The "op" member indicates whether this is a "DELETE", "INSERT", "UPDATE" or + * "SELECT" statement. The meanings of the other members is determined by the + * value of "op" as follows: + * + * (op == TK_INSERT) + * orconf -> stores the ON CONFLICT algorithm + * pSelect -> If this is an INSERT INTO ... SELECT ... statement, then + * this stores a pointer to the SELECT statement. Otherwise NULL. + * zTarget -> Dequoted name of the table to insert into. + * pExprList -> If this is an INSERT INTO ... VALUES ... statement, then + * this stores values to be inserted. Otherwise NULL. + * pIdList -> If this is an INSERT INTO ... () VALUES ... + * statement, then this stores the column-names to be + * inserted into. + * + * (op == TK_DELETE) + * zTarget -> Dequoted name of the table to delete from. + * pWhere -> The WHERE clause of the DELETE statement if one is specified. + * Otherwise NULL. + * + * (op == TK_UPDATE) + * zTarget -> Dequoted name of the table to update. + * pWhere -> The WHERE clause of the UPDATE statement if one is specified. + * Otherwise NULL. + * pExprList -> A list of the columns to update and the expressions to update + * them to. See sqlite3Update() documentation of "pChanges" + * argument. + * + */ +struct TriggerStep { + u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT, TK_SELECT */ + u8 orconf; /* OE_Rollback etc. */ + Trigger *pTrig; /* The trigger that this step is a part of */ + Select *pSelect; /* SELECT statement or RHS of INSERT INTO SELECT ... */ + char *zTarget; /* Target table for DELETE, UPDATE, INSERT */ + Expr *pWhere; /* The WHERE clause for DELETE or UPDATE steps */ + ExprList *pExprList; /* SET clause for UPDATE. */ + IdList *pIdList; /* Column names for INSERT */ + TriggerStep *pNext; /* Next in the link-list */ + TriggerStep *pLast; /* Last element in link-list. Valid for 1st elem only */ +}; + +/* +** The following structure contains information used by the sqliteFix... +** routines as they walk the parse tree to make database references +** explicit. +*/ +typedef struct DbFixer DbFixer; +struct DbFixer { + Parse *pParse; /* The parsing context. Error messages written here */ + Schema *pSchema; /* Fix items to this schema */ + int bVarOnly; /* Check for variable references only */ + const char *zDb; /* Make sure all objects are contained in this database */ + const char *zType; /* Type of the container - used for error messages */ + const Token *pName; /* Name of the container - used for error messages */ +}; + +/* +** An objected used to accumulate the text of a string where we +** do not necessarily know how big the string will be in the end. +*/ +struct StrAccum { + sqlite3 *db; /* Optional database for lookaside. Can be NULL */ + char *zBase; /* A base allocation. Not from malloc. */ + char *zText; /* The string collected so far */ + u32 nChar; /* Length of the string so far */ + u32 nAlloc; /* Amount of space allocated in zText */ + u32 mxAlloc; /* Maximum allowed allocation. 0 for no malloc usage */ + u8 accError; /* STRACCUM_NOMEM or STRACCUM_TOOBIG */ + u8 printfFlags; /* SQLITE_PRINTF flags below */ +}; +#define STRACCUM_NOMEM 1 +#define STRACCUM_TOOBIG 2 +#define SQLITE_PRINTF_INTERNAL 0x01 /* Internal-use-only converters allowed */ +#define SQLITE_PRINTF_SQLFUNC 0x02 /* SQL function arguments to VXPrintf */ +#define SQLITE_PRINTF_MALLOCED 0x04 /* True if xText is allocated space */ + +#define isMalloced(X) (((X)->printfFlags & SQLITE_PRINTF_MALLOCED)!=0) + + +/* +** A pointer to this structure is used to communicate information +** from sqlite3Init and OP_ParseSchema into the sqlite3InitCallback. +*/ +typedef struct { + sqlite3 *db; /* The database being initialized */ + char **pzErrMsg; /* Error message stored here */ + int iDb; /* 0 for main database. 1 for TEMP, 2.. for ATTACHed */ + int rc; /* Result code stored here */ +} InitData; + +/* +** Structure containing global configuration data for the SQLite library. +** +** This structure also contains some state information. +*/ +struct Sqlite3Config { + int bMemstat; /* True to enable memory status */ + int bCoreMutex; /* True to enable core mutexing */ + int bFullMutex; /* True to enable full mutexing */ + int bOpenUri; /* True to interpret filenames as URIs */ + int bUseCis; /* Use covering indices for full-scans */ + int mxStrlen; /* Maximum string length */ + int neverCorrupt; /* Database is always well-formed */ + int szLookaside; /* Default lookaside buffer size */ + int nLookaside; /* Default lookaside buffer count */ + int nStmtSpill; /* Stmt-journal spill-to-disk threshold */ + sqlite3_mem_methods m; /* Low-level memory allocation interface */ + sqlite3_mutex_methods mutex; /* Low-level mutex interface */ + sqlite3_pcache_methods2 pcache2; /* Low-level page-cache interface */ + void *pHeap; /* Heap storage space */ + int nHeap; /* Size of pHeap[] */ + int mnReq, mxReq; /* Min and max heap requests sizes */ + sqlite3_int64 szMmap; /* mmap() space per open file */ + sqlite3_int64 mxMmap; /* Maximum value for szMmap */ + void *pScratch; /* Scratch memory */ + int szScratch; /* Size of each scratch buffer */ + int nScratch; /* Number of scratch buffers */ + void *pPage; /* Page cache memory */ + int szPage; /* Size of each page in pPage[] */ + int nPage; /* Number of pages in pPage[] */ + int mxParserStack; /* maximum depth of the parser stack */ + int sharedCacheEnabled; /* true if shared-cache mode enabled */ + u32 szPma; /* Maximum Sorter PMA size */ + /* The above might be initialized to non-zero. The following need to always + ** initially be zero, however. */ + int isInit; /* True after initialization has finished */ + int inProgress; /* True while initialization in progress */ + int isMutexInit; /* True after mutexes are initialized */ + int isMallocInit; /* True after malloc is initialized */ + int isPCacheInit; /* True after malloc is initialized */ + int nRefInitMutex; /* Number of users of pInitMutex */ + sqlite3_mutex *pInitMutex; /* Mutex used by sqlite3_initialize() */ + void (*xLog)(void*,int,const char*); /* Function for logging */ + void *pLogArg; /* First argument to xLog() */ +#ifdef SQLITE_ENABLE_SQLLOG + void(*xSqllog)(void*,sqlite3*,const char*, int); + void *pSqllogArg; +#endif +#ifdef SQLITE_VDBE_COVERAGE + /* The following callback (if not NULL) is invoked on every VDBE branch + ** operation. Set the callback using SQLITE_TESTCTRL_VDBE_COVERAGE. + */ + void (*xVdbeBranch)(void*,int iSrcLine,u8 eThis,u8 eMx); /* Callback */ + void *pVdbeBranchArg; /* 1st argument */ +#endif +#ifndef SQLITE_UNTESTABLE + int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */ +#endif + int bLocaltimeFault; /* True to fail localtime() calls */ + int iOnceResetThreshold; /* When to reset OP_Once counters */ +}; + +/* +** This macro is used inside of assert() statements to indicate that +** the assert is only valid on a well-formed database. Instead of: +** +** assert( X ); +** +** One writes: +** +** assert( X || CORRUPT_DB ); +** +** CORRUPT_DB is true during normal operation. CORRUPT_DB does not indicate +** that the database is definitely corrupt, only that it might be corrupt. +** For most test cases, CORRUPT_DB is set to false using a special +** sqlite3_test_control(). This enables assert() statements to prove +** things that are always true for well-formed databases. +*/ +#define CORRUPT_DB (sqlite3Config.neverCorrupt==0) + +/* +** Context pointer passed down through the tree-walk. +*/ +struct Walker { + Parse *pParse; /* Parser context. */ + int (*xExprCallback)(Walker*, Expr*); /* Callback for expressions */ + int (*xSelectCallback)(Walker*,Select*); /* Callback for SELECTs */ + void (*xSelectCallback2)(Walker*,Select*);/* Second callback for SELECTs */ + int walkerDepth; /* Number of subqueries */ + u8 eCode; /* A small processing code */ + union { /* Extra data for callback */ + NameContext *pNC; /* Naming context */ + int n; /* A counter */ + int iCur; /* A cursor number */ + SrcList *pSrcList; /* FROM clause */ + struct SrcCount *pSrcCount; /* Counting column references */ + struct CCurHint *pCCurHint; /* Used by codeCursorHint() */ + int *aiCol; /* array of column indexes */ + struct IdxCover *pIdxCover; /* Check for index coverage */ + } u; +}; + +/* Forward declarations */ +SQLITE_PRIVATE int sqlite3WalkExpr(Walker*, Expr*); +SQLITE_PRIVATE int sqlite3WalkExprList(Walker*, ExprList*); +SQLITE_PRIVATE int sqlite3WalkSelect(Walker*, Select*); +SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker*, Select*); +SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker*, Select*); +SQLITE_PRIVATE int sqlite3ExprWalkNoop(Walker*, Expr*); + +/* +** Return code from the parse-tree walking primitives and their +** callbacks. +*/ +#define WRC_Continue 0 /* Continue down into children */ +#define WRC_Prune 1 /* Omit children but continue walking siblings */ +#define WRC_Abort 2 /* Abandon the tree walk */ + +/* +** An instance of this structure represents a set of one or more CTEs +** (common table expressions) created by a single WITH clause. +*/ +struct With { + int nCte; /* Number of CTEs in the WITH clause */ + With *pOuter; /* Containing WITH clause, or NULL */ + struct Cte { /* For each CTE in the WITH clause.... */ + char *zName; /* Name of this CTE */ + ExprList *pCols; /* List of explicit column names, or NULL */ + Select *pSelect; /* The definition of this CTE */ + const char *zCteErr; /* Error message for circular references */ + } a[1]; +}; + +#ifdef SQLITE_DEBUG +/* +** An instance of the TreeView object is used for printing the content of +** data structures on sqlite3DebugPrintf() using a tree-like view. +*/ +struct TreeView { + int iLevel; /* Which level of the tree we are on */ + u8 bLine[100]; /* Draw vertical in column i if bLine[i] is true */ +}; +#endif /* SQLITE_DEBUG */ + +/* +** Assuming zIn points to the first byte of a UTF-8 character, +** advance zIn to point to the first byte of the next UTF-8 character. +*/ +#define SQLITE_SKIP_UTF8(zIn) { \ + if( (*(zIn++))>=0xc0 ){ \ + while( (*zIn & 0xc0)==0x80 ){ zIn++; } \ + } \ +} + +/* +** The SQLITE_*_BKPT macros are substitutes for the error codes with +** the same name but without the _BKPT suffix. These macros invoke +** routines that report the line-number on which the error originated +** using sqlite3_log(). The routines also provide a convenient place +** to set a debugger breakpoint. +*/ +SQLITE_PRIVATE int sqlite3CorruptError(int); +SQLITE_PRIVATE int sqlite3MisuseError(int); +SQLITE_PRIVATE int sqlite3CantopenError(int); +#define SQLITE_CORRUPT_BKPT sqlite3CorruptError(__LINE__) +#define SQLITE_MISUSE_BKPT sqlite3MisuseError(__LINE__) +#define SQLITE_CANTOPEN_BKPT sqlite3CantopenError(__LINE__) +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3NomemError(int); +SQLITE_PRIVATE int sqlite3IoerrnomemError(int); +# define SQLITE_NOMEM_BKPT sqlite3NomemError(__LINE__) +# define SQLITE_IOERR_NOMEM_BKPT sqlite3IoerrnomemError(__LINE__) +#else +# define SQLITE_NOMEM_BKPT SQLITE_NOMEM +# define SQLITE_IOERR_NOMEM_BKPT SQLITE_IOERR_NOMEM +#endif + +/* +** FTS3 and FTS4 both require virtual table support +*/ +#if defined(SQLITE_OMIT_VIRTUALTABLE) +# undef SQLITE_ENABLE_FTS3 +# undef SQLITE_ENABLE_FTS4 +#endif + +/* +** FTS4 is really an extension for FTS3. It is enabled using the +** SQLITE_ENABLE_FTS3 macro. But to avoid confusion we also call +** the SQLITE_ENABLE_FTS4 macro to serve as an alias for SQLITE_ENABLE_FTS3. +*/ +#if defined(SQLITE_ENABLE_FTS4) && !defined(SQLITE_ENABLE_FTS3) +# define SQLITE_ENABLE_FTS3 1 +#endif + +/* +** The ctype.h header is needed for non-ASCII systems. It is also +** needed by FTS3 when FTS3 is included in the amalgamation. +*/ +#if !defined(SQLITE_ASCII) || \ + (defined(SQLITE_ENABLE_FTS3) && defined(SQLITE_AMALGAMATION)) +# include +#endif + +/* +** The following macros mimic the standard library functions toupper(), +** isspace(), isalnum(), isdigit() and isxdigit(), respectively. The +** sqlite versions only work for ASCII characters, regardless of locale. +*/ +#ifdef SQLITE_ASCII +# define sqlite3Toupper(x) ((x)&~(sqlite3CtypeMap[(unsigned char)(x)]&0x20)) +# define sqlite3Isspace(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x01) +# define sqlite3Isalnum(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x06) +# define sqlite3Isalpha(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x02) +# define sqlite3Isdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x04) +# define sqlite3Isxdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x08) +# define sqlite3Tolower(x) (sqlite3UpperToLower[(unsigned char)(x)]) +# define sqlite3Isquote(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x80) +#else +# define sqlite3Toupper(x) toupper((unsigned char)(x)) +# define sqlite3Isspace(x) isspace((unsigned char)(x)) +# define sqlite3Isalnum(x) isalnum((unsigned char)(x)) +# define sqlite3Isalpha(x) isalpha((unsigned char)(x)) +# define sqlite3Isdigit(x) isdigit((unsigned char)(x)) +# define sqlite3Isxdigit(x) isxdigit((unsigned char)(x)) +# define sqlite3Tolower(x) tolower((unsigned char)(x)) +# define sqlite3Isquote(x) ((x)=='"'||(x)=='\''||(x)=='['||(x)=='`') +#endif +#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS +SQLITE_PRIVATE int sqlite3IsIdChar(u8); +#endif + +/* +** Internal function prototypes +*/ +SQLITE_PRIVATE int sqlite3StrICmp(const char*,const char*); +SQLITE_PRIVATE int sqlite3Strlen30(const char*); +SQLITE_PRIVATE char *sqlite3ColumnType(Column*,char*); +#define sqlite3StrNICmp sqlite3_strnicmp + +SQLITE_PRIVATE int sqlite3MallocInit(void); +SQLITE_PRIVATE void sqlite3MallocEnd(void); +SQLITE_PRIVATE void *sqlite3Malloc(u64); +SQLITE_PRIVATE void *sqlite3MallocZero(u64); +SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3*, u64); +SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3*, u64); +SQLITE_PRIVATE void *sqlite3DbMallocRawNN(sqlite3*, u64); +SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3*,const char*); +SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3*,const char*, u64); +SQLITE_PRIVATE void *sqlite3Realloc(void*, u64); +SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *, void *, u64); +SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *, void *, u64); +SQLITE_PRIVATE void sqlite3DbFree(sqlite3*, void*); +SQLITE_PRIVATE int sqlite3MallocSize(void*); +SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3*, void*); +SQLITE_PRIVATE void *sqlite3ScratchMalloc(int); +SQLITE_PRIVATE void sqlite3ScratchFree(void*); +SQLITE_PRIVATE void *sqlite3PageMalloc(int); +SQLITE_PRIVATE void sqlite3PageFree(void*); +SQLITE_PRIVATE void sqlite3MemSetDefault(void); +#ifndef SQLITE_UNTESTABLE +SQLITE_PRIVATE void sqlite3BenignMallocHooks(void (*)(void), void (*)(void)); +#endif +SQLITE_PRIVATE int sqlite3HeapNearlyFull(void); + +/* +** On systems with ample stack space and that support alloca(), make +** use of alloca() to obtain space for large automatic objects. By default, +** obtain space from malloc(). +** +** The alloca() routine never returns NULL. This will cause code paths +** that deal with sqlite3StackAlloc() failures to be unreachable. +*/ +#ifdef SQLITE_USE_ALLOCA +# define sqlite3StackAllocRaw(D,N) alloca(N) +# define sqlite3StackAllocZero(D,N) memset(alloca(N), 0, N) +# define sqlite3StackFree(D,P) +#else +# define sqlite3StackAllocRaw(D,N) sqlite3DbMallocRaw(D,N) +# define sqlite3StackAllocZero(D,N) sqlite3DbMallocZero(D,N) +# define sqlite3StackFree(D,P) sqlite3DbFree(D,P) +#endif + +/* Do not allow both MEMSYS5 and MEMSYS3 to be defined together. If they +** are, disable MEMSYS3 +*/ +#ifdef SQLITE_ENABLE_MEMSYS5 +SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void); +#undef SQLITE_ENABLE_MEMSYS3 +#endif +#ifdef SQLITE_ENABLE_MEMSYS3 +SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void); +#endif + + +#ifndef SQLITE_MUTEX_OMIT +SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void); +SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void); +SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int); +SQLITE_PRIVATE int sqlite3MutexInit(void); +SQLITE_PRIVATE int sqlite3MutexEnd(void); +#endif +#if !defined(SQLITE_MUTEX_OMIT) && !defined(SQLITE_MUTEX_NOOP) +SQLITE_PRIVATE void sqlite3MemoryBarrier(void); +#else +# define sqlite3MemoryBarrier() +#endif + +SQLITE_PRIVATE sqlite3_int64 sqlite3StatusValue(int); +SQLITE_PRIVATE void sqlite3StatusUp(int, int); +SQLITE_PRIVATE void sqlite3StatusDown(int, int); +SQLITE_PRIVATE void sqlite3StatusHighwater(int, int); + +/* Access to mutexes used by sqlite3_status() */ +SQLITE_PRIVATE sqlite3_mutex *sqlite3Pcache1Mutex(void); +SQLITE_PRIVATE sqlite3_mutex *sqlite3MallocMutex(void); + +#ifndef SQLITE_OMIT_FLOATING_POINT +SQLITE_PRIVATE int sqlite3IsNaN(double); +#else +# define sqlite3IsNaN(X) 0 +#endif + +/* +** An instance of the following structure holds information about SQL +** functions arguments that are the parameters to the printf() function. +*/ +struct PrintfArguments { + int nArg; /* Total number of arguments */ + int nUsed; /* Number of arguments used so far */ + sqlite3_value **apArg; /* The argument values */ +}; + +SQLITE_PRIVATE void sqlite3VXPrintf(StrAccum*, const char*, va_list); +SQLITE_PRIVATE void sqlite3XPrintf(StrAccum*, const char*, ...); +SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3*,const char*, ...); +SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3*,const char*, va_list); +#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) +SQLITE_PRIVATE void sqlite3DebugPrintf(const char*, ...); +#endif +#if defined(SQLITE_TEST) +SQLITE_PRIVATE void *sqlite3TestTextToPtr(const char*); +#endif + +#if defined(SQLITE_DEBUG) +SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView*, const Expr*, u8); +SQLITE_PRIVATE void sqlite3TreeViewBareExprList(TreeView*, const ExprList*, const char*); +SQLITE_PRIVATE void sqlite3TreeViewExprList(TreeView*, const ExprList*, u8, const char*); +SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView*, const Select*, u8); +SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView*, const With*, u8); +#endif + + +SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*); +SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...); +SQLITE_PRIVATE void sqlite3Dequote(char*); +SQLITE_PRIVATE void sqlite3TokenInit(Token*,char*); +SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char*, int); +SQLITE_PRIVATE int sqlite3RunParser(Parse*, const char*, char **); +SQLITE_PRIVATE void sqlite3FinishCoding(Parse*); +SQLITE_PRIVATE int sqlite3GetTempReg(Parse*); +SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse*,int); +SQLITE_PRIVATE int sqlite3GetTempRange(Parse*,int); +SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse*,int,int); +SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse*); +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse*,int,int); +#endif +SQLITE_PRIVATE Expr *sqlite3ExprAlloc(sqlite3*,int,const Token*,int); +SQLITE_PRIVATE Expr *sqlite3Expr(sqlite3*,int,const char*); +SQLITE_PRIVATE void sqlite3ExprAttachSubtrees(sqlite3*,Expr*,Expr*,Expr*); +SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*); +SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*); +SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3*,Expr*, Expr*); +SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*); +SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32); +SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*); +SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); +SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(Parse*,ExprList*,IdList*,Expr*); +SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList*,int); +SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,Token*,int); +SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,ExprSpan*); +SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*); +SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*); +SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**); +SQLITE_PRIVATE int sqlite3InitCallback(void*, int, char**, char**); +SQLITE_PRIVATE void sqlite3Pragma(Parse*,Token*,Token*,Token*,int); +#ifndef SQLITE_OMIT_VIRTUALTABLE +SQLITE_PRIVATE Module *sqlite3PragmaVtabRegister(sqlite3*,const char *zName); +#endif +SQLITE_PRIVATE void sqlite3ResetAllSchemasOfConnection(sqlite3*); +SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3*,int); +SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3*); +SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3*); +SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3*,Table*); +SQLITE_PRIVATE int sqlite3ColumnsFromExprList(Parse*,ExprList*,i16*,Column**); +SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation(Parse*,Table*,Select*); +SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse*,Select*); +SQLITE_PRIVATE void sqlite3OpenMasterTable(Parse *, int); +SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table*); +SQLITE_PRIVATE i16 sqlite3ColumnOfIndex(Index*, i16); +SQLITE_PRIVATE void sqlite3StartTable(Parse*,Token*,Token*,int,int,int,int); +#if SQLITE_ENABLE_HIDDEN_COLUMNS +SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table*, Column*); +#else +# define sqlite3ColumnPropertiesFromName(T,C) /* no-op */ +#endif +SQLITE_PRIVATE void sqlite3AddColumn(Parse*,Token*,Token*); +SQLITE_PRIVATE void sqlite3AddNotNull(Parse*, int); +SQLITE_PRIVATE void sqlite3AddPrimaryKey(Parse*, ExprList*, int, int, int); +SQLITE_PRIVATE void sqlite3AddCheckConstraint(Parse*, Expr*); +SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse*,ExprSpan*); +SQLITE_PRIVATE void sqlite3AddCollateType(Parse*, Token*); +SQLITE_PRIVATE void sqlite3EndTable(Parse*,Token*,Token*,u8,Select*); +SQLITE_PRIVATE int sqlite3ParseUri(const char*,const char*,unsigned int*, + sqlite3_vfs**,char**,char **); +SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3*,const char*); + +#ifdef SQLITE_UNTESTABLE +# define sqlite3FaultSim(X) SQLITE_OK +#else +SQLITE_PRIVATE int sqlite3FaultSim(int); +#endif + +SQLITE_PRIVATE Bitvec *sqlite3BitvecCreate(u32); +SQLITE_PRIVATE int sqlite3BitvecTest(Bitvec*, u32); +SQLITE_PRIVATE int sqlite3BitvecTestNotNull(Bitvec*, u32); +SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec*, u32); +SQLITE_PRIVATE void sqlite3BitvecClear(Bitvec*, u32, void*); +SQLITE_PRIVATE void sqlite3BitvecDestroy(Bitvec*); +SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec*); +#ifndef SQLITE_UNTESTABLE +SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int,int*); +#endif + +SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3*, void*, unsigned int); +SQLITE_PRIVATE void sqlite3RowSetClear(RowSet*); +SQLITE_PRIVATE void sqlite3RowSetInsert(RowSet*, i64); +SQLITE_PRIVATE int sqlite3RowSetTest(RowSet*, int iBatch, i64); +SQLITE_PRIVATE int sqlite3RowSetNext(RowSet*, i64*); + +SQLITE_PRIVATE void sqlite3CreateView(Parse*,Token*,Token*,Token*,ExprList*,Select*,int,int); + +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) +SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse*,Table*); +#else +# define sqlite3ViewGetColumnNames(A,B) 0 +#endif + +#if SQLITE_MAX_ATTACHED>30 +SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask); +#endif +SQLITE_PRIVATE void sqlite3DropTable(Parse*, SrcList*, int, int); +SQLITE_PRIVATE void sqlite3CodeDropTable(Parse*, Table*, int, int); +SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3*, Table*); +#ifndef SQLITE_OMIT_AUTOINCREMENT +SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse); +SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse); +#else +# define sqlite3AutoincrementBegin(X) +# define sqlite3AutoincrementEnd(X) +#endif +SQLITE_PRIVATE void sqlite3Insert(Parse*, SrcList*, Select*, IdList*, int); +SQLITE_PRIVATE void *sqlite3ArrayAllocate(sqlite3*,void*,int,int*,int*); +SQLITE_PRIVATE IdList *sqlite3IdListAppend(sqlite3*, IdList*, Token*); +SQLITE_PRIVATE int sqlite3IdListIndex(IdList*,const char*); +SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(sqlite3*, SrcList*, int, int); +SQLITE_PRIVATE SrcList *sqlite3SrcListAppend(sqlite3*, SrcList*, Token*, Token*); +SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(Parse*, SrcList*, Token*, Token*, + Token*, Select*, Expr*, IdList*); +SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *, SrcList *, Token *); +SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse*, SrcList*, ExprList*); +SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *, struct SrcList_item *); +SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList*); +SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse*, SrcList*); +SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3*, IdList*); +SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3*, SrcList*); +SQLITE_PRIVATE Index *sqlite3AllocateIndexObject(sqlite3*,i16,int,char**); +SQLITE_PRIVATE void sqlite3CreateIndex(Parse*,Token*,Token*,SrcList*,ExprList*,int,Token*, + Expr*, int, int, u8); +SQLITE_PRIVATE void sqlite3DropIndex(Parse*, SrcList*, int); +SQLITE_PRIVATE int sqlite3Select(Parse*, Select*, SelectDest*); +SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*, + Expr*,ExprList*,u32,Expr*,Expr*); +SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*); +SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*); +SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, int); +SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int); +#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) +SQLITE_PRIVATE Expr *sqlite3LimitWhere(Parse*,SrcList*,Expr*,ExprList*,Expr*,Expr*,char*); +#endif +SQLITE_PRIVATE void sqlite3DeleteFrom(Parse*, SrcList*, Expr*); +SQLITE_PRIVATE void sqlite3Update(Parse*, SrcList*, ExprList*, Expr*, int); +SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(Parse*,SrcList*,Expr*,ExprList*,ExprList*,u16,int); +SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo*); +SQLITE_PRIVATE LogEst sqlite3WhereOutputRowCount(WhereInfo*); +SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo*); +SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo*); +SQLITE_PRIVATE int sqlite3WhereOrderedInnerLoop(WhereInfo*); +SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo*); +SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo*); +SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo*); +SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo*, int*); +#define ONEPASS_OFF 0 /* Use of ONEPASS not allowed */ +#define ONEPASS_SINGLE 1 /* ONEPASS valid for a single row update */ +#define ONEPASS_MULTI 2 /* ONEPASS is valid for multiple rows */ +SQLITE_PRIVATE void sqlite3ExprCodeLoadIndexColumn(Parse*, Index*, int, int, int); +SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(Parse*, Table*, int, int, int, u8); +SQLITE_PRIVATE void sqlite3ExprCodeGetColumnToReg(Parse*, Table*, int, int, int); +SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(Vdbe*, Table*, int, int, int); +SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse*, int, int, int); +SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse*, int, int, int); +SQLITE_PRIVATE void sqlite3ExprCachePush(Parse*); +SQLITE_PRIVATE void sqlite3ExprCachePop(Parse*); +SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse*, int, int); +SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse*); +SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse*, int, int); +SQLITE_PRIVATE void sqlite3ExprCode(Parse*, Expr*, int); +SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse*, Expr*, int); +SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse*, Expr*, int); +SQLITE_PRIVATE int sqlite3ExprCodeAtInit(Parse*, Expr*, int); +SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse*, Expr*, int*); +SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse*, Expr*, int); +SQLITE_PRIVATE void sqlite3ExprCodeAndCache(Parse*, Expr*, int); +SQLITE_PRIVATE int sqlite3ExprCodeExprList(Parse*, ExprList*, int, int, u8); +#define SQLITE_ECEL_DUP 0x01 /* Deep, not shallow copies */ +#define SQLITE_ECEL_FACTOR 0x02 /* Factor out constant terms */ +#define SQLITE_ECEL_REF 0x04 /* Use ExprList.u.x.iOrderByCol */ +#define SQLITE_ECEL_OMITREF 0x08 /* Omit if ExprList.u.x.iOrderByCol */ +SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse*, Expr*, int, int); +SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse*, Expr*, int, int); +SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse*, Expr*, int, int); +SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3*,const char*, const char*); +#define LOCATE_VIEW 0x01 +#define LOCATE_NOERR 0x02 +SQLITE_PRIVATE Table *sqlite3LocateTable(Parse*,u32 flags,const char*, const char*); +SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,u32 flags,struct SrcList_item *); +SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3*,const char*, const char*); +SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3*,int,const char*); +SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3*,int,const char*); +SQLITE_PRIVATE void sqlite3Vacuum(Parse*,Token*); +SQLITE_PRIVATE int sqlite3RunVacuum(char**, sqlite3*, int); +SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3*, Token*); +SQLITE_PRIVATE int sqlite3ExprCompare(Expr*, Expr*, int); +SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList*, ExprList*, int); +SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Expr*, Expr*, int); +SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*); +SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*); +SQLITE_PRIVATE int sqlite3ExprCoveredByIndex(Expr*, int iCur, Index *pIdx); +SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr*, SrcList*); +SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse*); +#ifndef SQLITE_UNTESTABLE +SQLITE_PRIVATE void sqlite3PrngSaveState(void); +SQLITE_PRIVATE void sqlite3PrngRestoreState(void); +#endif +SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3*,int); +SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse*, int); +SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse*, const char *zDb); +SQLITE_PRIVATE void sqlite3BeginTransaction(Parse*, int); +SQLITE_PRIVATE void sqlite3CommitTransaction(Parse*); +SQLITE_PRIVATE void sqlite3RollbackTransaction(Parse*); +SQLITE_PRIVATE void sqlite3Savepoint(Parse*, int, Token*); +SQLITE_PRIVATE void sqlite3CloseSavepoints(sqlite3 *); +SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3*); +SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr*); +SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*); +SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8); +SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int); +#ifdef SQLITE_ENABLE_CURSOR_HINTS +SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr*); +#endif +SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr*, int*); +SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*); +SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char); +SQLITE_PRIVATE int sqlite3IsRowid(const char*); +SQLITE_PRIVATE void sqlite3GenerateRowDelete( + Parse*,Table*,Trigger*,int,int,int,i16,u8,u8,u8,int); +SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int, int*, int); +SQLITE_PRIVATE int sqlite3GenerateIndexKey(Parse*, Index*, int, int, int, int*,Index*,int); +SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse*,int); +SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(Parse*,Table*,int*,int,int,int,int, + u8,u8,int,int*,int*); +#ifdef SQLITE_ENABLE_NULL_TRIM +SQLITE_PRIVATE void sqlite3SetMakeRecordP5(Vdbe*,Table*); +#else +# define sqlite3SetMakeRecordP5(A,B) +#endif +SQLITE_PRIVATE void sqlite3CompleteInsertion(Parse*,Table*,int,int,int,int*,int,int,int); +SQLITE_PRIVATE int sqlite3OpenTableAndIndices(Parse*, Table*, int, u8, int, u8*, int*, int*); +SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse*, int, int); +SQLITE_PRIVATE void sqlite3MultiWrite(Parse*); +SQLITE_PRIVATE void sqlite3MayAbort(Parse*); +SQLITE_PRIVATE void sqlite3HaltConstraint(Parse*, int, int, char*, i8, u8); +SQLITE_PRIVATE void sqlite3UniqueConstraint(Parse*, int, Index*); +SQLITE_PRIVATE void sqlite3RowidConstraint(Parse*, int, Table*); +SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3*,Expr*,int); +SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3*,ExprList*,int); +SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3*,SrcList*,int); +SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3*,IdList*); +SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3*,Select*,int); +#if SELECTTRACE_ENABLED +SQLITE_PRIVATE void sqlite3SelectSetName(Select*,const char*); +#else +# define sqlite3SelectSetName(A,B) +#endif +SQLITE_PRIVATE void sqlite3InsertBuiltinFuncs(FuncDef*,int); +SQLITE_PRIVATE FuncDef *sqlite3FindFunction(sqlite3*,const char*,int,u8,u8); +SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void); +SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void); +SQLITE_PRIVATE void sqlite3RegisterPerConnectionBuiltinFunctions(sqlite3*); +SQLITE_PRIVATE int sqlite3SafetyCheckOk(sqlite3*); +SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3*); +SQLITE_PRIVATE void sqlite3ChangeCookie(Parse*, int); + +#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) +SQLITE_PRIVATE void sqlite3MaterializeView(Parse*, Table*, Expr*, int); +#endif + +#ifndef SQLITE_OMIT_TRIGGER +SQLITE_PRIVATE void sqlite3BeginTrigger(Parse*, Token*,Token*,int,int,IdList*,SrcList*, + Expr*,int, int); +SQLITE_PRIVATE void sqlite3FinishTrigger(Parse*, TriggerStep*, Token*); +SQLITE_PRIVATE void sqlite3DropTrigger(Parse*, SrcList*, int); +SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse*, Trigger*); +SQLITE_PRIVATE Trigger *sqlite3TriggersExist(Parse *, Table*, int, ExprList*, int *pMask); +SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *, Table *); +SQLITE_PRIVATE void sqlite3CodeRowTrigger(Parse*, Trigger *, int, ExprList*, int, Table *, + int, int, int); +SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect(Parse *, Trigger *, Table *, int, int, int); + void sqliteViewTriggers(Parse*, Table*, Expr*, int, ExprList*); +SQLITE_PRIVATE void sqlite3DeleteTriggerStep(sqlite3*, TriggerStep*); +SQLITE_PRIVATE TriggerStep *sqlite3TriggerSelectStep(sqlite3*,Select*); +SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(sqlite3*,Token*, IdList*, + Select*,u8); +SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(sqlite3*,Token*,ExprList*, Expr*, u8); +SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep(sqlite3*,Token*, Expr*); +SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3*, Trigger*); +SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTrigger(sqlite3*,int,const char*); +SQLITE_PRIVATE u32 sqlite3TriggerColmask(Parse*,Trigger*,ExprList*,int,int,Table*,int); +# define sqlite3ParseToplevel(p) ((p)->pToplevel ? (p)->pToplevel : (p)) +# define sqlite3IsToplevel(p) ((p)->pToplevel==0) +#else +# define sqlite3TriggersExist(B,C,D,E,F) 0 +# define sqlite3DeleteTrigger(A,B) +# define sqlite3DropTriggerPtr(A,B) +# define sqlite3UnlinkAndDeleteTrigger(A,B,C) +# define sqlite3CodeRowTrigger(A,B,C,D,E,F,G,H,I) +# define sqlite3CodeRowTriggerDirect(A,B,C,D,E,F) +# define sqlite3TriggerList(X, Y) 0 +# define sqlite3ParseToplevel(p) p +# define sqlite3IsToplevel(p) 1 +# define sqlite3TriggerColmask(A,B,C,D,E,F,G) 0 +#endif + +SQLITE_PRIVATE int sqlite3JoinType(Parse*, Token*, Token*, Token*); +SQLITE_PRIVATE void sqlite3CreateForeignKey(Parse*, ExprList*, Token*, ExprList*, int); +SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse*, int); +#ifndef SQLITE_OMIT_AUTHORIZATION +SQLITE_PRIVATE void sqlite3AuthRead(Parse*,Expr*,Schema*,SrcList*); +SQLITE_PRIVATE int sqlite3AuthCheck(Parse*,int, const char*, const char*, const char*); +SQLITE_PRIVATE void sqlite3AuthContextPush(Parse*, AuthContext*, const char*); +SQLITE_PRIVATE void sqlite3AuthContextPop(AuthContext*); +SQLITE_PRIVATE int sqlite3AuthReadCol(Parse*, const char *, const char *, int); +#else +# define sqlite3AuthRead(a,b,c,d) +# define sqlite3AuthCheck(a,b,c,d,e) SQLITE_OK +# define sqlite3AuthContextPush(a,b,c) +# define sqlite3AuthContextPop(a) ((void)(a)) +#endif +SQLITE_PRIVATE void sqlite3Attach(Parse*, Expr*, Expr*, Expr*); +SQLITE_PRIVATE void sqlite3Detach(Parse*, Expr*); +SQLITE_PRIVATE void sqlite3FixInit(DbFixer*, Parse*, int, const char*, const Token*); +SQLITE_PRIVATE int sqlite3FixSrcList(DbFixer*, SrcList*); +SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*); +SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*); +SQLITE_PRIVATE int sqlite3FixExprList(DbFixer*, ExprList*); +SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*); +SQLITE_PRIVATE int sqlite3AtoF(const char *z, double*, int, u8); +SQLITE_PRIVATE int sqlite3GetInt32(const char *, int*); +SQLITE_PRIVATE int sqlite3Atoi(const char*); +SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *pData, int nChar); +SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *pData, int nByte); +SQLITE_PRIVATE u32 sqlite3Utf8Read(const u8**); +SQLITE_PRIVATE LogEst sqlite3LogEst(u64); +SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst,LogEst); +#ifndef SQLITE_OMIT_VIRTUALTABLE +SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double); +#endif +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || \ + defined(SQLITE_ENABLE_STAT3_OR_STAT4) || \ + defined(SQLITE_EXPLAIN_ESTIMATED_ROWS) +SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst); +#endif +SQLITE_PRIVATE VList *sqlite3VListAdd(sqlite3*,VList*,const char*,int,int); +SQLITE_PRIVATE const char *sqlite3VListNumToName(VList*,int); +SQLITE_PRIVATE int sqlite3VListNameToNum(VList*,const char*,int); + +/* +** Routines to read and write variable-length integers. These used to +** be defined locally, but now we use the varint routines in the util.c +** file. +*/ +SQLITE_PRIVATE int sqlite3PutVarint(unsigned char*, u64); +SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *, u64 *); +SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *, u32 *); +SQLITE_PRIVATE int sqlite3VarintLen(u64 v); + +/* +** The common case is for a varint to be a single byte. They following +** macros handle the common case without a procedure call, but then call +** the procedure for larger varints. +*/ +#define getVarint32(A,B) \ + (u8)((*(A)<(u8)0x80)?((B)=(u32)*(A)),1:sqlite3GetVarint32((A),(u32 *)&(B))) +#define putVarint32(A,B) \ + (u8)(((u32)(B)<(u32)0x80)?(*(A)=(unsigned char)(B)),1:\ + sqlite3PutVarint((A),(B))) +#define getVarint sqlite3GetVarint +#define putVarint sqlite3PutVarint + + +SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3*, Index*); +SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe*, Table*, int); +SQLITE_PRIVATE char sqlite3CompareAffinity(Expr *pExpr, char aff2); +SQLITE_PRIVATE int sqlite3IndexAffinityOk(Expr *pExpr, char idx_affinity); +SQLITE_PRIVATE char sqlite3TableColumnAffinity(Table*,int); +SQLITE_PRIVATE char sqlite3ExprAffinity(Expr *pExpr); +SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8); +SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*); +SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...); +SQLITE_PRIVATE void sqlite3Error(sqlite3*,int); +SQLITE_PRIVATE void sqlite3SystemError(sqlite3*,int); +SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3*, const char *z, int n); +SQLITE_PRIVATE u8 sqlite3HexToInt(int h); +SQLITE_PRIVATE int sqlite3TwoPartName(Parse *, Token *, Token *, Token **); + +#if defined(SQLITE_NEED_ERR_NAME) +SQLITE_PRIVATE const char *sqlite3ErrName(int); +#endif + +SQLITE_PRIVATE const char *sqlite3ErrStr(int); +SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse); +SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq(sqlite3*,u8 enc, const char*,int); +SQLITE_PRIVATE CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char*zName); +SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr); +SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken(Parse *pParse, Expr*, const Token*, int); +SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse*,Expr*,const char*); +SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr*); +SQLITE_PRIVATE int sqlite3CheckCollSeq(Parse *, CollSeq *); +SQLITE_PRIVATE int sqlite3CheckObjectName(Parse *, const char *); +SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *, int); +SQLITE_PRIVATE int sqlite3AddInt64(i64*,i64); +SQLITE_PRIVATE int sqlite3SubInt64(i64*,i64); +SQLITE_PRIVATE int sqlite3MulInt64(i64*,i64); +SQLITE_PRIVATE int sqlite3AbsInt32(int); +#ifdef SQLITE_ENABLE_8_3_NAMES +SQLITE_PRIVATE void sqlite3FileSuffix3(const char*, char*); +#else +# define sqlite3FileSuffix3(X,Y) +#endif +SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z,u8); + +SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value*, u8); +SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value*, u8); +SQLITE_PRIVATE void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8, + void(*)(void*)); +SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value*); +SQLITE_PRIVATE void sqlite3ValueFree(sqlite3_value*); +SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *); +SQLITE_PRIVATE char *sqlite3Utf16to8(sqlite3 *, const void*, int, u8); +SQLITE_PRIVATE int sqlite3ValueFromExpr(sqlite3 *, Expr *, u8, u8, sqlite3_value **); +SQLITE_PRIVATE void sqlite3ValueApplyAffinity(sqlite3_value *, u8, u8); +#ifndef SQLITE_AMALGAMATION +SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[]; +SQLITE_PRIVATE const char sqlite3StrBINARY[]; +SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[]; +SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[]; +SQLITE_PRIVATE const Token sqlite3IntTokens[]; +SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config; +SQLITE_PRIVATE FuncDefHash sqlite3BuiltinFunctions; +#ifndef SQLITE_OMIT_WSD +SQLITE_PRIVATE int sqlite3PendingByte; +#endif +#endif +SQLITE_PRIVATE void sqlite3RootPageMoved(sqlite3*, int, int, int); +SQLITE_PRIVATE void sqlite3Reindex(Parse*, Token*, Token*); +SQLITE_PRIVATE void sqlite3AlterFunctions(void); +SQLITE_PRIVATE void sqlite3AlterRenameTable(Parse*, SrcList*, Token*); +SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *, int *); +SQLITE_PRIVATE void sqlite3NestedParse(Parse*, const char*, ...); +SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3*); +SQLITE_PRIVATE int sqlite3CodeSubselect(Parse*, Expr *, int, int); +SQLITE_PRIVATE void sqlite3SelectPrep(Parse*, Select*, NameContext*); +SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p); +SQLITE_PRIVATE int sqlite3MatchSpanName(const char*, const char*, const char*, const char*); +SQLITE_PRIVATE int sqlite3ResolveExprNames(NameContext*, Expr*); +SQLITE_PRIVATE int sqlite3ResolveExprListNames(NameContext*, ExprList*); +SQLITE_PRIVATE void sqlite3ResolveSelectNames(Parse*, Select*, NameContext*); +SQLITE_PRIVATE void sqlite3ResolveSelfReference(Parse*,Table*,int,Expr*,ExprList*); +SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy(Parse*, Select*, ExprList*, const char*); +SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *, Table *, int, int); +SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *, Token *); +SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *, SrcList *); +SQLITE_PRIVATE CollSeq *sqlite3GetCollSeq(Parse*, u8, CollSeq *, const char*); +SQLITE_PRIVATE char sqlite3AffinityType(const char*, u8*); +SQLITE_PRIVATE void sqlite3Analyze(Parse*, Token*, Token*); +SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler*); +SQLITE_PRIVATE int sqlite3FindDb(sqlite3*, Token*); +SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *, const char *); +SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3*,int iDB); +SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3*,Index*); +SQLITE_PRIVATE void sqlite3DefaultRowEst(Index*); +SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3*, int); +SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3*,Expr*,int*,char*); +SQLITE_PRIVATE void sqlite3SchemaClear(void *); +SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *, Btree *); +SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *); +SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3*,int,int); +SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo*); +SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoRef(KeyInfo*); +SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse*, Index*); +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3KeyInfoIsWriteable(KeyInfo*); +#endif +SQLITE_PRIVATE int sqlite3CreateFunc(sqlite3 *, const char *, int, int, void *, + void (*)(sqlite3_context*,int,sqlite3_value **), + void (*)(sqlite3_context*,int,sqlite3_value **), void (*)(sqlite3_context*), + FuncDestructor *pDestructor +); +SQLITE_PRIVATE void sqlite3OomFault(sqlite3*); +SQLITE_PRIVATE void sqlite3OomClear(sqlite3*); +SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int); +SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *); + +SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, sqlite3*, char*, int, int); +SQLITE_PRIVATE void sqlite3StrAccumAppend(StrAccum*,const char*,int); +SQLITE_PRIVATE void sqlite3StrAccumAppendAll(StrAccum*,const char*); +SQLITE_PRIVATE void sqlite3AppendChar(StrAccum*,int,char); +SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*); +SQLITE_PRIVATE void sqlite3StrAccumReset(StrAccum*); +SQLITE_PRIVATE void sqlite3SelectDestInit(SelectDest*,int,int); +SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *, SrcList *, int, int); + +SQLITE_PRIVATE void sqlite3BackupRestart(sqlite3_backup *); +SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *, Pgno, const u8 *); + +#ifndef SQLITE_OMIT_SUBQUERY +SQLITE_PRIVATE int sqlite3ExprCheckIN(Parse*, Expr*); +#else +# define sqlite3ExprCheckIN(x,y) SQLITE_OK +#endif + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +SQLITE_PRIVATE void sqlite3AnalyzeFunctions(void); +SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue( + Parse*,Index*,UnpackedRecord**,Expr*,int,int,int*); +SQLITE_PRIVATE int sqlite3Stat4ValueFromExpr(Parse*, Expr*, u8, sqlite3_value**); +SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord*); +SQLITE_PRIVATE int sqlite3Stat4Column(sqlite3*, const void*, int, int, sqlite3_value**); +SQLITE_PRIVATE char sqlite3IndexColumnAffinity(sqlite3*, Index*, int); +#endif + +/* +** The interface to the LEMON-generated parser +*/ +#ifndef SQLITE_AMALGAMATION +SQLITE_PRIVATE void *sqlite3ParserAlloc(void*(*)(u64)); +SQLITE_PRIVATE void sqlite3ParserFree(void*, void(*)(void*)); +#endif +SQLITE_PRIVATE void sqlite3Parser(void*, int, Token, Parse*); +#ifdef YYTRACKMAXSTACKDEPTH +SQLITE_PRIVATE int sqlite3ParserStackPeak(void*); +#endif + +SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3*); +#ifndef SQLITE_OMIT_LOAD_EXTENSION +SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3*); +#else +# define sqlite3CloseExtensions(X) +#endif + +#ifndef SQLITE_OMIT_SHARED_CACHE +SQLITE_PRIVATE void sqlite3TableLock(Parse *, int, int, u8, const char *); +#else + #define sqlite3TableLock(v,w,x,y,z) +#endif + +#ifdef SQLITE_TEST +SQLITE_PRIVATE int sqlite3Utf8To8(unsigned char*); +#endif + +#ifdef SQLITE_OMIT_VIRTUALTABLE +# define sqlite3VtabClear(Y) +# define sqlite3VtabSync(X,Y) SQLITE_OK +# define sqlite3VtabRollback(X) +# define sqlite3VtabCommit(X) +# define sqlite3VtabInSync(db) 0 +# define sqlite3VtabLock(X) +# define sqlite3VtabUnlock(X) +# define sqlite3VtabUnlockList(X) +# define sqlite3VtabSavepoint(X, Y, Z) SQLITE_OK +# define sqlite3GetVTable(X,Y) ((VTable*)0) +#else +SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table*); +SQLITE_PRIVATE void sqlite3VtabDisconnect(sqlite3 *db, Table *p); +SQLITE_PRIVATE int sqlite3VtabSync(sqlite3 *db, Vdbe*); +SQLITE_PRIVATE int sqlite3VtabRollback(sqlite3 *db); +SQLITE_PRIVATE int sqlite3VtabCommit(sqlite3 *db); +SQLITE_PRIVATE void sqlite3VtabLock(VTable *); +SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *); +SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3*); +SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *, int, int); +SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe*, sqlite3_vtab*); +SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3*, Table*); +SQLITE_PRIVATE Module *sqlite3VtabCreateModule( + sqlite3*, + const char*, + const sqlite3_module*, + void*, + void(*)(void*) + ); +# define sqlite3VtabInSync(db) ((db)->nVTrans>0 && (db)->aVTrans==0) +#endif +SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse*,Module*); +SQLITE_PRIVATE void sqlite3VtabEponymousTableClear(sqlite3*,Module*); +SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse*,Table*); +SQLITE_PRIVATE void sqlite3VtabBeginParse(Parse*, Token*, Token*, Token*, int); +SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse*, Token*); +SQLITE_PRIVATE void sqlite3VtabArgInit(Parse*); +SQLITE_PRIVATE void sqlite3VtabArgExtend(Parse*, Token*); +SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3*, int, const char *, char **); +SQLITE_PRIVATE int sqlite3VtabCallConnect(Parse*, Table*); +SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3*, int, const char *); +SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *, VTable *); +SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(sqlite3 *,FuncDef*, int nArg, Expr*); +SQLITE_PRIVATE void sqlite3InvalidFunction(sqlite3_context*,int,sqlite3_value**); +SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*); +SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int); +SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *); +SQLITE_PRIVATE void sqlite3ParserReset(Parse*); +SQLITE_PRIVATE int sqlite3Reprepare(Vdbe*); +SQLITE_PRIVATE void sqlite3ExprListCheckLength(Parse*, ExprList*, const char*); +SQLITE_PRIVATE CollSeq *sqlite3BinaryCompareCollSeq(Parse *, Expr *, Expr *); +SQLITE_PRIVATE int sqlite3TempInMemory(const sqlite3*); +SQLITE_PRIVATE const char *sqlite3JournalModename(int); +#ifndef SQLITE_OMIT_WAL +SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3*, int, int, int*, int*); +SQLITE_PRIVATE int sqlite3WalDefaultHook(void*,sqlite3*,const char*,int); +#endif +#ifndef SQLITE_OMIT_CTE +SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Token*,ExprList*,Select*); +SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*); +SQLITE_PRIVATE void sqlite3WithPush(Parse*, With*, u8); +#else +#define sqlite3WithPush(x,y,z) +#define sqlite3WithDelete(x,y) +#endif + +/* Declarations for functions in fkey.c. All of these are replaced by +** no-op macros if OMIT_FOREIGN_KEY is defined. In this case no foreign +** key functionality is available. If OMIT_TRIGGER is defined but +** OMIT_FOREIGN_KEY is not, only some of the functions are no-oped. In +** this case foreign keys are parsed, but no other functionality is +** provided (enforcement of FK constraints requires the triggers sub-system). +*/ +#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) +SQLITE_PRIVATE void sqlite3FkCheck(Parse*, Table*, int, int, int*, int); +SQLITE_PRIVATE void sqlite3FkDropTable(Parse*, SrcList *, Table*); +SQLITE_PRIVATE void sqlite3FkActions(Parse*, Table*, ExprList*, int, int*, int); +SQLITE_PRIVATE int sqlite3FkRequired(Parse*, Table*, int*, int); +SQLITE_PRIVATE u32 sqlite3FkOldmask(Parse*, Table*); +SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *); +#else + #define sqlite3FkActions(a,b,c,d,e,f) + #define sqlite3FkCheck(a,b,c,d,e,f) + #define sqlite3FkDropTable(a,b,c) + #define sqlite3FkOldmask(a,b) 0 + #define sqlite3FkRequired(a,b,c,d) 0 + #define sqlite3FkReferences(a) 0 +#endif +#ifndef SQLITE_OMIT_FOREIGN_KEY +SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *, Table*); +SQLITE_PRIVATE int sqlite3FkLocateIndex(Parse*,Table*,FKey*,Index**,int**); +#else + #define sqlite3FkDelete(a,b) + #define sqlite3FkLocateIndex(a,b,c,d,e) +#endif + + +/* +** Available fault injectors. Should be numbered beginning with 0. +*/ +#define SQLITE_FAULTINJECTOR_MALLOC 0 +#define SQLITE_FAULTINJECTOR_COUNT 1 + +/* +** The interface to the code in fault.c used for identifying "benign" +** malloc failures. This is only present if SQLITE_UNTESTABLE +** is not defined. +*/ +#ifndef SQLITE_UNTESTABLE +SQLITE_PRIVATE void sqlite3BeginBenignMalloc(void); +SQLITE_PRIVATE void sqlite3EndBenignMalloc(void); +#else + #define sqlite3BeginBenignMalloc() + #define sqlite3EndBenignMalloc() +#endif + +/* +** Allowed return values from sqlite3FindInIndex() +*/ +#define IN_INDEX_ROWID 1 /* Search the rowid of the table */ +#define IN_INDEX_EPH 2 /* Search an ephemeral b-tree */ +#define IN_INDEX_INDEX_ASC 3 /* Existing index ASCENDING */ +#define IN_INDEX_INDEX_DESC 4 /* Existing index DESCENDING */ +#define IN_INDEX_NOOP 5 /* No table available. Use comparisons */ +/* +** Allowed flags for the 3rd parameter to sqlite3FindInIndex(). +*/ +#define IN_INDEX_NOOP_OK 0x0001 /* OK to return IN_INDEX_NOOP */ +#define IN_INDEX_MEMBERSHIP 0x0002 /* IN operator used for membership test */ +#define IN_INDEX_LOOP 0x0004 /* IN operator used as a loop */ +SQLITE_PRIVATE int sqlite3FindInIndex(Parse *, Expr *, u32, int*, int*); + +SQLITE_PRIVATE int sqlite3JournalOpen(sqlite3_vfs *, const char *, sqlite3_file *, int, int); +SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *); +#ifdef SQLITE_ENABLE_ATOMIC_WRITE +SQLITE_PRIVATE int sqlite3JournalCreate(sqlite3_file *); +#endif + +SQLITE_PRIVATE int sqlite3JournalIsInMemory(sqlite3_file *p); +SQLITE_PRIVATE void sqlite3MemJournalOpen(sqlite3_file *); + +SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p); +#if SQLITE_MAX_EXPR_DEPTH>0 +SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *); +SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse*, int); +#else + #define sqlite3SelectExprHeight(x) 0 + #define sqlite3ExprCheckHeight(x,y) +#endif + +SQLITE_PRIVATE u32 sqlite3Get4byte(const u8*); +SQLITE_PRIVATE void sqlite3Put4byte(u8*, u32); + +#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY +SQLITE_PRIVATE void sqlite3ConnectionBlocked(sqlite3 *, sqlite3 *); +SQLITE_PRIVATE void sqlite3ConnectionUnlocked(sqlite3 *db); +SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db); +#else + #define sqlite3ConnectionBlocked(x,y) + #define sqlite3ConnectionUnlocked(x) + #define sqlite3ConnectionClosed(x) +#endif + +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE void sqlite3ParserTrace(FILE*, char *); +#endif + +/* +** If the SQLITE_ENABLE IOTRACE exists then the global variable +** sqlite3IoTrace is a pointer to a printf-like routine used to +** print I/O tracing messages. +*/ +#ifdef SQLITE_ENABLE_IOTRACE +# define IOTRACE(A) if( sqlite3IoTrace ){ sqlite3IoTrace A; } +SQLITE_PRIVATE void sqlite3VdbeIOTraceSql(Vdbe*); +SQLITE_API SQLITE_EXTERN void (SQLITE_CDECL *sqlite3IoTrace)(const char*,...); +#else +# define IOTRACE(A) +# define sqlite3VdbeIOTraceSql(X) +#endif + +/* +** These routines are available for the mem2.c debugging memory allocator +** only. They are used to verify that different "types" of memory +** allocations are properly tracked by the system. +** +** sqlite3MemdebugSetType() sets the "type" of an allocation to one of +** the MEMTYPE_* macros defined below. The type must be a bitmask with +** a single bit set. +** +** sqlite3MemdebugHasType() returns true if any of the bits in its second +** argument match the type set by the previous sqlite3MemdebugSetType(). +** sqlite3MemdebugHasType() is intended for use inside assert() statements. +** +** sqlite3MemdebugNoType() returns true if none of the bits in its second +** argument match the type set by the previous sqlite3MemdebugSetType(). +** +** Perhaps the most important point is the difference between MEMTYPE_HEAP +** and MEMTYPE_LOOKASIDE. If an allocation is MEMTYPE_LOOKASIDE, that means +** it might have been allocated by lookaside, except the allocation was +** too large or lookaside was already full. It is important to verify +** that allocations that might have been satisfied by lookaside are not +** passed back to non-lookaside free() routines. Asserts such as the +** example above are placed on the non-lookaside free() routines to verify +** this constraint. +** +** All of this is no-op for a production build. It only comes into +** play when the SQLITE_MEMDEBUG compile-time option is used. +*/ +#ifdef SQLITE_MEMDEBUG +SQLITE_PRIVATE void sqlite3MemdebugSetType(void*,u8); +SQLITE_PRIVATE int sqlite3MemdebugHasType(void*,u8); +SQLITE_PRIVATE int sqlite3MemdebugNoType(void*,u8); +#else +# define sqlite3MemdebugSetType(X,Y) /* no-op */ +# define sqlite3MemdebugHasType(X,Y) 1 +# define sqlite3MemdebugNoType(X,Y) 1 +#endif +#define MEMTYPE_HEAP 0x01 /* General heap allocations */ +#define MEMTYPE_LOOKASIDE 0x02 /* Heap that might have been lookaside */ +#define MEMTYPE_SCRATCH 0x04 /* Scratch allocations */ +#define MEMTYPE_PCACHE 0x08 /* Page cache allocations */ + +/* +** Threading interface +*/ +#if SQLITE_MAX_WORKER_THREADS>0 +SQLITE_PRIVATE int sqlite3ThreadCreate(SQLiteThread**,void*(*)(void*),void*); +SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread*, void**); +#endif + +#if defined(SQLITE_ENABLE_DBSTAT_VTAB) || defined(SQLITE_TEST) +SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3*); +#endif + +SQLITE_PRIVATE int sqlite3ExprVectorSize(Expr *pExpr); +SQLITE_PRIVATE int sqlite3ExprIsVector(Expr *pExpr); +SQLITE_PRIVATE Expr *sqlite3VectorFieldSubexpr(Expr*, int); +SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(Parse*,Expr*,int); +SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse*, Expr*); + +#endif /* SQLITEINT_H */ + +/************** End of sqliteInt.h *******************************************/ +/************** Begin file global.c ******************************************/ +/* +** 2008 June 13 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains definitions of global variables and constants. +*/ +/* #include "sqliteInt.h" */ + +/* An array to map all upper-case characters into their corresponding +** lower-case character. +** +** SQLite only considers US-ASCII (or EBCDIC) characters. We do not +** handle case conversions for the UTF character set since the tables +** involved are nearly as big or bigger than SQLite itself. +*/ +SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[] = { +#ifdef SQLITE_ASCII + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 97, 98, 99,100,101,102,103, + 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121, + 122, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,105,106,107, + 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, + 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, + 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, + 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, + 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, + 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, + 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, + 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, + 252,253,254,255 +#endif +#ifdef SQLITE_EBCDIC + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* 0x */ + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, /* 1x */ + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, /* 2x */ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, /* 3x */ + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, /* 4x */ + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, /* 5x */ + 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, /* 6x */ + 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, /* 7x */ + 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, /* 8x */ + 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, /* 9x */ + 160,161,162,163,164,165,166,167,168,169,170,171,140,141,142,175, /* Ax */ + 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, /* Bx */ + 192,129,130,131,132,133,134,135,136,137,202,203,204,205,206,207, /* Cx */ + 208,145,146,147,148,149,150,151,152,153,218,219,220,221,222,223, /* Dx */ + 224,225,162,163,164,165,166,167,168,169,234,235,236,237,238,239, /* Ex */ + 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255, /* Fx */ +#endif +}; + +/* +** The following 256 byte lookup table is used to support SQLites built-in +** equivalents to the following standard library functions: +** +** isspace() 0x01 +** isalpha() 0x02 +** isdigit() 0x04 +** isalnum() 0x06 +** isxdigit() 0x08 +** toupper() 0x20 +** SQLite identifier character 0x40 +** Quote character 0x80 +** +** Bit 0x20 is set if the mapped character requires translation to upper +** case. i.e. if the character is a lower-case ASCII character. +** If x is a lower-case ASCII character, then its upper-case equivalent +** is (x - 0x20). Therefore toupper() can be implemented as: +** +** (x & ~(map[x]&0x20)) +** +** The equivalent of tolower() is implemented using the sqlite3UpperToLower[] +** array. tolower() is used more often than toupper() by SQLite. +** +** Bit 0x40 is set if the character is non-alphanumeric and can be used in an +** SQLite identifier. Identifiers are alphanumerics, "_", "$", and any +** non-ASCII UTF character. Hence the test for whether or not a character is +** part of an identifier is 0x46. +*/ +#ifdef SQLITE_ASCII +SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[256] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00..07 ........ */ + 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, /* 08..0f ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10..17 ........ */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 18..1f ........ */ + 0x01, 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x80, /* 20..27 !"#$%&' */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 28..2f ()*+,-./ */ + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, /* 30..37 01234567 */ + 0x0c, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 38..3f 89:;<=>? */ + + 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x02, /* 40..47 @ABCDEFG */ + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 48..4f HIJKLMNO */ + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 50..57 PQRSTUVW */ + 0x02, 0x02, 0x02, 0x80, 0x00, 0x00, 0x00, 0x40, /* 58..5f XYZ[\]^_ */ + 0x80, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x22, /* 60..67 `abcdefg */ + 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 68..6f hijklmno */ + 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 70..77 pqrstuvw */ + 0x22, 0x22, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, /* 78..7f xyz{|}~. */ + + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 80..87 ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 88..8f ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 90..97 ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 98..9f ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* a0..a7 ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* a8..af ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* b0..b7 ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* b8..bf ........ */ + + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* c0..c7 ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* c8..cf ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* d0..d7 ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* d8..df ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* e0..e7 ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* e8..ef ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* f0..f7 ........ */ + 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 /* f8..ff ........ */ +}; +#endif + +/* EVIDENCE-OF: R-02982-34736 In order to maintain full backwards +** compatibility for legacy applications, the URI filename capability is +** disabled by default. +** +** EVIDENCE-OF: R-38799-08373 URI filenames can be enabled or disabled +** using the SQLITE_USE_URI=1 or SQLITE_USE_URI=0 compile-time options. +** +** EVIDENCE-OF: R-43642-56306 By default, URI handling is globally +** disabled. The default value may be changed by compiling with the +** SQLITE_USE_URI symbol defined. +*/ +#ifndef SQLITE_USE_URI +# define SQLITE_USE_URI 0 +#endif + +/* EVIDENCE-OF: R-38720-18127 The default setting is determined by the +** SQLITE_ALLOW_COVERING_INDEX_SCAN compile-time option, or is "on" if +** that compile-time option is omitted. +*/ +#ifndef SQLITE_ALLOW_COVERING_INDEX_SCAN +# define SQLITE_ALLOW_COVERING_INDEX_SCAN 1 +#endif + +/* The minimum PMA size is set to this value multiplied by the database +** page size in bytes. +*/ +#ifndef SQLITE_SORTER_PMASZ +# define SQLITE_SORTER_PMASZ 250 +#endif + +/* Statement journals spill to disk when their size exceeds the following +** threshold (in bytes). 0 means that statement journals are created and +** written to disk immediately (the default behavior for SQLite versions +** before 3.12.0). -1 means always keep the entire statement journal in +** memory. (The statement journal is also always held entirely in memory +** if journal_mode=MEMORY or if temp_store=MEMORY, regardless of this +** setting.) +*/ +#ifndef SQLITE_STMTJRNL_SPILL +# define SQLITE_STMTJRNL_SPILL (64*1024) +#endif + +/* +** The default lookaside-configuration, the format "SZ,N". SZ is the +** number of bytes in each lookaside slot (should be a multiple of 8) +** and N is the number of slots. The lookaside-configuration can be +** changed as start-time using sqlite3_config(SQLITE_CONFIG_LOOKASIDE) +** or at run-time for an individual database connection using +** sqlite3_db_config(db, SQLITE_DBCONFIG_LOOKASIDE); +*/ +#ifndef SQLITE_DEFAULT_LOOKASIDE +# define SQLITE_DEFAULT_LOOKASIDE 1200,100 +#endif + + +/* +** The following singleton contains the global configuration for +** the SQLite library. +*/ +SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { + SQLITE_DEFAULT_MEMSTATUS, /* bMemstat */ + 1, /* bCoreMutex */ + SQLITE_THREADSAFE==1, /* bFullMutex */ + SQLITE_USE_URI, /* bOpenUri */ + SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */ + 0x7ffffffe, /* mxStrlen */ + 0, /* neverCorrupt */ + SQLITE_DEFAULT_LOOKASIDE, /* szLookaside, nLookaside */ + SQLITE_STMTJRNL_SPILL, /* nStmtSpill */ + {0,0,0,0,0,0,0,0}, /* m */ + {0,0,0,0,0,0,0,0,0}, /* mutex */ + {0,0,0,0,0,0,0,0,0,0,0,0,0},/* pcache2 */ + (void*)0, /* pHeap */ + 0, /* nHeap */ + 0, 0, /* mnHeap, mxHeap */ + SQLITE_DEFAULT_MMAP_SIZE, /* szMmap */ + SQLITE_MAX_MMAP_SIZE, /* mxMmap */ + (void*)0, /* pScratch */ + 0, /* szScratch */ + 0, /* nScratch */ + (void*)0, /* pPage */ + 0, /* szPage */ + SQLITE_DEFAULT_PCACHE_INITSZ, /* nPage */ + 0, /* mxParserStack */ + 0, /* sharedCacheEnabled */ + SQLITE_SORTER_PMASZ, /* szPma */ + /* All the rest should always be initialized to zero */ + 0, /* isInit */ + 0, /* inProgress */ + 0, /* isMutexInit */ + 0, /* isMallocInit */ + 0, /* isPCacheInit */ + 0, /* nRefInitMutex */ + 0, /* pInitMutex */ + 0, /* xLog */ + 0, /* pLogArg */ +#ifdef SQLITE_ENABLE_SQLLOG + 0, /* xSqllog */ + 0, /* pSqllogArg */ +#endif +#ifdef SQLITE_VDBE_COVERAGE + 0, /* xVdbeBranch */ + 0, /* pVbeBranchArg */ +#endif +#ifndef SQLITE_UNTESTABLE + 0, /* xTestCallback */ +#endif + 0, /* bLocaltimeFault */ + 0x7ffffffe /* iOnceResetThreshold */ +}; + +/* +** Hash table for global functions - functions common to all +** database connections. After initialization, this table is +** read-only. +*/ +SQLITE_PRIVATE FuncDefHash sqlite3BuiltinFunctions; + +/* +** Constant tokens for values 0 and 1. +*/ +SQLITE_PRIVATE const Token sqlite3IntTokens[] = { + { "0", 1 }, + { "1", 1 } +}; + + +/* +** The value of the "pending" byte must be 0x40000000 (1 byte past the +** 1-gibabyte boundary) in a compatible database. SQLite never uses +** the database page that contains the pending byte. It never attempts +** to read or write that page. The pending byte page is set aside +** for use by the VFS layers as space for managing file locks. +** +** During testing, it is often desirable to move the pending byte to +** a different position in the file. This allows code that has to +** deal with the pending byte to run on files that are much smaller +** than 1 GiB. The sqlite3_test_control() interface can be used to +** move the pending byte. +** +** IMPORTANT: Changing the pending byte to any value other than +** 0x40000000 results in an incompatible database file format! +** Changing the pending byte during operation will result in undefined +** and incorrect behavior. +*/ +#ifndef SQLITE_OMIT_WSD +SQLITE_PRIVATE int sqlite3PendingByte = 0x40000000; +#endif + +/* #include "opcodes.h" */ +/* +** Properties of opcodes. The OPFLG_INITIALIZER macro is +** created by mkopcodeh.awk during compilation. Data is obtained +** from the comments following the "case OP_xxxx:" statements in +** the vdbe.c file. +*/ +SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[] = OPFLG_INITIALIZER; + +/* +** Name of the default collating sequence +*/ +SQLITE_PRIVATE const char sqlite3StrBINARY[] = "BINARY"; + +/************** End of global.c **********************************************/ +/************** Begin file ctime.c *******************************************/ +/* +** 2010 February 23 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file implements routines used to report what compile-time options +** SQLite was built with. +*/ + +#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS + +/* #include "sqliteInt.h" */ + +/* +** An array of names of all compile-time options. This array should +** be sorted A-Z. +** +** This array looks large, but in a typical installation actually uses +** only a handful of compile-time options, so most times this array is usually +** rather short and uses little memory space. +*/ +static const char * const azCompileOpt[] = { + +/* These macros are provided to "stringify" the value of the define +** for those options in which the value is meaningful. */ +#define CTIMEOPT_VAL_(opt) #opt +#define CTIMEOPT_VAL(opt) CTIMEOPT_VAL_(opt) + +#if SQLITE_32BIT_ROWID + "32BIT_ROWID", +#endif +#if SQLITE_4_BYTE_ALIGNED_MALLOC + "4_BYTE_ALIGNED_MALLOC", +#endif +#if SQLITE_CASE_SENSITIVE_LIKE + "CASE_SENSITIVE_LIKE", +#endif +#if SQLITE_CHECK_PAGES + "CHECK_PAGES", +#endif +#if defined(__clang__) && defined(__clang_major__) + "COMPILER=clang-" CTIMEOPT_VAL(__clang_major__) "." + CTIMEOPT_VAL(__clang_minor__) "." + CTIMEOPT_VAL(__clang_patchlevel__), +#elif defined(_MSC_VER) + "COMPILER=msvc-" CTIMEOPT_VAL(_MSC_VER), +#elif defined(__GNUC__) && defined(__VERSION__) + "COMPILER=gcc-" __VERSION__, +#endif +#if SQLITE_COVERAGE_TEST + "COVERAGE_TEST", +#endif +#if SQLITE_DEBUG + "DEBUG", +#endif +#if SQLITE_DEFAULT_LOCKING_MODE + "DEFAULT_LOCKING_MODE=" CTIMEOPT_VAL(SQLITE_DEFAULT_LOCKING_MODE), +#endif +#if defined(SQLITE_DEFAULT_MMAP_SIZE) && !defined(SQLITE_DEFAULT_MMAP_SIZE_xc) + "DEFAULT_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_MMAP_SIZE), +#endif +#if SQLITE_DIRECT_OVERFLOW_READ + "DIRECT_OVERFLOW_READ", +#endif +#if SQLITE_DISABLE_DIRSYNC + "DISABLE_DIRSYNC", +#endif +#if SQLITE_DISABLE_LFS + "DISABLE_LFS", +#endif +#if SQLITE_ENABLE_8_3_NAMES + "ENABLE_8_3_NAMES=" CTIMEOPT_VAL(SQLITE_ENABLE_8_3_NAMES), +#endif +#if SQLITE_ENABLE_API_ARMOR + "ENABLE_API_ARMOR", +#endif +#if SQLITE_ENABLE_ATOMIC_WRITE + "ENABLE_ATOMIC_WRITE", +#endif +#if SQLITE_ENABLE_CEROD + "ENABLE_CEROD", +#endif +#if SQLITE_ENABLE_COLUMN_METADATA + "ENABLE_COLUMN_METADATA", +#endif +#if SQLITE_ENABLE_DBSTAT_VTAB + "ENABLE_DBSTAT_VTAB", +#endif +#if SQLITE_ENABLE_EXPENSIVE_ASSERT + "ENABLE_EXPENSIVE_ASSERT", +#endif +#if SQLITE_ENABLE_FTS1 + "ENABLE_FTS1", +#endif +#if SQLITE_ENABLE_FTS2 + "ENABLE_FTS2", +#endif +#if SQLITE_ENABLE_FTS3 + "ENABLE_FTS3", +#endif +#if SQLITE_ENABLE_FTS3_PARENTHESIS + "ENABLE_FTS3_PARENTHESIS", +#endif +#if SQLITE_ENABLE_FTS4 + "ENABLE_FTS4", +#endif +#if SQLITE_ENABLE_FTS5 + "ENABLE_FTS5", +#endif +#if SQLITE_ENABLE_ICU + "ENABLE_ICU", +#endif +#if SQLITE_ENABLE_IOTRACE + "ENABLE_IOTRACE", +#endif +#if SQLITE_ENABLE_JSON1 + "ENABLE_JSON1", +#endif +#if SQLITE_ENABLE_LOAD_EXTENSION + "ENABLE_LOAD_EXTENSION", +#endif +#if SQLITE_ENABLE_LOCKING_STYLE + "ENABLE_LOCKING_STYLE=" CTIMEOPT_VAL(SQLITE_ENABLE_LOCKING_STYLE), +#endif +#if SQLITE_ENABLE_MEMORY_MANAGEMENT + "ENABLE_MEMORY_MANAGEMENT", +#endif +#if SQLITE_ENABLE_MEMSYS3 + "ENABLE_MEMSYS3", +#endif +#if SQLITE_ENABLE_MEMSYS5 + "ENABLE_MEMSYS5", +#endif +#if SQLITE_ENABLE_OVERSIZE_CELL_CHECK + "ENABLE_OVERSIZE_CELL_CHECK", +#endif +#if SQLITE_ENABLE_RTREE + "ENABLE_RTREE", +#endif +#if defined(SQLITE_ENABLE_STAT4) + "ENABLE_STAT4", +#elif defined(SQLITE_ENABLE_STAT3) + "ENABLE_STAT3", +#endif +#if SQLITE_ENABLE_UNLOCK_NOTIFY + "ENABLE_UNLOCK_NOTIFY", +#endif +#if SQLITE_ENABLE_UPDATE_DELETE_LIMIT + "ENABLE_UPDATE_DELETE_LIMIT", +#endif +#if defined(SQLITE_ENABLE_URI_00_ERROR) + "ENABLE_URI_00_ERROR", +#endif +#if SQLITE_HAS_CODEC + "HAS_CODEC", +#endif +#if HAVE_ISNAN || SQLITE_HAVE_ISNAN + "HAVE_ISNAN", +#endif +#if SQLITE_HOMEGROWN_RECURSIVE_MUTEX + "HOMEGROWN_RECURSIVE_MUTEX", +#endif +#if SQLITE_IGNORE_AFP_LOCK_ERRORS + "IGNORE_AFP_LOCK_ERRORS", +#endif +#if SQLITE_IGNORE_FLOCK_LOCK_ERRORS + "IGNORE_FLOCK_LOCK_ERRORS", +#endif +#ifdef SQLITE_INT64_TYPE + "INT64_TYPE", +#endif +#ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS + "LIKE_DOESNT_MATCH_BLOBS", +#endif +#if SQLITE_LOCK_TRACE + "LOCK_TRACE", +#endif +#if defined(SQLITE_MAX_MMAP_SIZE) && !defined(SQLITE_MAX_MMAP_SIZE_xc) + "MAX_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_MMAP_SIZE), +#endif +#ifdef SQLITE_MAX_SCHEMA_RETRY + "MAX_SCHEMA_RETRY=" CTIMEOPT_VAL(SQLITE_MAX_SCHEMA_RETRY), +#endif +#if SQLITE_MEMDEBUG + "MEMDEBUG", +#endif +#if SQLITE_MIXED_ENDIAN_64BIT_FLOAT + "MIXED_ENDIAN_64BIT_FLOAT", +#endif +#if SQLITE_NO_SYNC + "NO_SYNC", +#endif +#if SQLITE_OMIT_ALTERTABLE + "OMIT_ALTERTABLE", +#endif +#if SQLITE_OMIT_ANALYZE + "OMIT_ANALYZE", +#endif +#if SQLITE_OMIT_ATTACH + "OMIT_ATTACH", +#endif +#if SQLITE_OMIT_AUTHORIZATION + "OMIT_AUTHORIZATION", +#endif +#if SQLITE_OMIT_AUTOINCREMENT + "OMIT_AUTOINCREMENT", +#endif +#if SQLITE_OMIT_AUTOINIT + "OMIT_AUTOINIT", +#endif +#if SQLITE_OMIT_AUTOMATIC_INDEX + "OMIT_AUTOMATIC_INDEX", +#endif +#if SQLITE_OMIT_AUTORESET + "OMIT_AUTORESET", +#endif +#if SQLITE_OMIT_AUTOVACUUM + "OMIT_AUTOVACUUM", +#endif +#if SQLITE_OMIT_BETWEEN_OPTIMIZATION + "OMIT_BETWEEN_OPTIMIZATION", +#endif +#if SQLITE_OMIT_BLOB_LITERAL + "OMIT_BLOB_LITERAL", +#endif +#if SQLITE_OMIT_BTREECOUNT + "OMIT_BTREECOUNT", +#endif +#if SQLITE_OMIT_CAST + "OMIT_CAST", +#endif +#if SQLITE_OMIT_CHECK + "OMIT_CHECK", +#endif +#if SQLITE_OMIT_COMPLETE + "OMIT_COMPLETE", +#endif +#if SQLITE_OMIT_COMPOUND_SELECT + "OMIT_COMPOUND_SELECT", +#endif +#if SQLITE_OMIT_CTE + "OMIT_CTE", +#endif +#if SQLITE_OMIT_DATETIME_FUNCS + "OMIT_DATETIME_FUNCS", +#endif +#if SQLITE_OMIT_DECLTYPE + "OMIT_DECLTYPE", +#endif +#if SQLITE_OMIT_DEPRECATED + "OMIT_DEPRECATED", +#endif +#if SQLITE_OMIT_DISKIO + "OMIT_DISKIO", +#endif +#if SQLITE_OMIT_EXPLAIN + "OMIT_EXPLAIN", +#endif +#if SQLITE_OMIT_FLAG_PRAGMAS + "OMIT_FLAG_PRAGMAS", +#endif +#if SQLITE_OMIT_FLOATING_POINT + "OMIT_FLOATING_POINT", +#endif +#if SQLITE_OMIT_FOREIGN_KEY + "OMIT_FOREIGN_KEY", +#endif +#if SQLITE_OMIT_GET_TABLE + "OMIT_GET_TABLE", +#endif +#if SQLITE_OMIT_INCRBLOB + "OMIT_INCRBLOB", +#endif +#if SQLITE_OMIT_INTEGRITY_CHECK + "OMIT_INTEGRITY_CHECK", +#endif +#if SQLITE_OMIT_LIKE_OPTIMIZATION + "OMIT_LIKE_OPTIMIZATION", +#endif +#if SQLITE_OMIT_LOAD_EXTENSION + "OMIT_LOAD_EXTENSION", +#endif +#if SQLITE_OMIT_LOCALTIME + "OMIT_LOCALTIME", +#endif +#if SQLITE_OMIT_LOOKASIDE + "OMIT_LOOKASIDE", +#endif +#if SQLITE_OMIT_MEMORYDB + "OMIT_MEMORYDB", +#endif +#if SQLITE_OMIT_OR_OPTIMIZATION + "OMIT_OR_OPTIMIZATION", +#endif +#if SQLITE_OMIT_PAGER_PRAGMAS + "OMIT_PAGER_PRAGMAS", +#endif +#if SQLITE_OMIT_PRAGMA + "OMIT_PRAGMA", +#endif +#if SQLITE_OMIT_PROGRESS_CALLBACK + "OMIT_PROGRESS_CALLBACK", +#endif +#if SQLITE_OMIT_QUICKBALANCE + "OMIT_QUICKBALANCE", +#endif +#if SQLITE_OMIT_REINDEX + "OMIT_REINDEX", +#endif +#if SQLITE_OMIT_SCHEMA_PRAGMAS + "OMIT_SCHEMA_PRAGMAS", +#endif +#if SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS + "OMIT_SCHEMA_VERSION_PRAGMAS", +#endif +#if SQLITE_OMIT_SHARED_CACHE + "OMIT_SHARED_CACHE", +#endif +#if SQLITE_OMIT_SUBQUERY + "OMIT_SUBQUERY", +#endif +#if SQLITE_OMIT_TCL_VARIABLE + "OMIT_TCL_VARIABLE", +#endif +#if SQLITE_OMIT_TEMPDB + "OMIT_TEMPDB", +#endif +#if SQLITE_OMIT_TRACE + "OMIT_TRACE", +#endif +#if SQLITE_OMIT_TRIGGER + "OMIT_TRIGGER", +#endif +#if SQLITE_OMIT_TRUNCATE_OPTIMIZATION + "OMIT_TRUNCATE_OPTIMIZATION", +#endif +#if SQLITE_OMIT_UTF16 + "OMIT_UTF16", +#endif +#if SQLITE_OMIT_VACUUM + "OMIT_VACUUM", +#endif +#if SQLITE_OMIT_VIEW + "OMIT_VIEW", +#endif +#if SQLITE_OMIT_VIRTUALTABLE + "OMIT_VIRTUALTABLE", +#endif +#if SQLITE_OMIT_WAL + "OMIT_WAL", +#endif +#if SQLITE_OMIT_WSD + "OMIT_WSD", +#endif +#if SQLITE_OMIT_XFER_OPT + "OMIT_XFER_OPT", +#endif +#if SQLITE_PERFORMANCE_TRACE + "PERFORMANCE_TRACE", +#endif +#if SQLITE_PROXY_DEBUG + "PROXY_DEBUG", +#endif +#if SQLITE_RTREE_INT_ONLY + "RTREE_INT_ONLY", +#endif +#if SQLITE_SECURE_DELETE + "SECURE_DELETE", +#endif +#if SQLITE_SMALL_STACK + "SMALL_STACK", +#endif +#if SQLITE_SOUNDEX + "SOUNDEX", +#endif +#if SQLITE_SYSTEM_MALLOC + "SYSTEM_MALLOC", +#endif +#if SQLITE_TCL + "TCL", +#endif +#if defined(SQLITE_TEMP_STORE) && !defined(SQLITE_TEMP_STORE_xc) + "TEMP_STORE=" CTIMEOPT_VAL(SQLITE_TEMP_STORE), +#endif +#if SQLITE_TEST + "TEST", +#endif +#if defined(SQLITE_THREADSAFE) + "THREADSAFE=" CTIMEOPT_VAL(SQLITE_THREADSAFE), +#endif +#if SQLITE_UNTESTABLE + "UNTESTABLE" +#endif +#if SQLITE_USE_ALLOCA + "USE_ALLOCA", +#endif +#if SQLITE_USER_AUTHENTICATION + "USER_AUTHENTICATION", +#endif +#if SQLITE_WIN32_MALLOC + "WIN32_MALLOC", +#endif +#if SQLITE_ZERO_MALLOC + "ZERO_MALLOC" +#endif +}; + +/* +** Given the name of a compile-time option, return true if that option +** was used and false if not. +** +** The name can optionally begin with "SQLITE_" but the "SQLITE_" prefix +** is not required for a match. +*/ +SQLITE_API int sqlite3_compileoption_used(const char *zOptName){ + int i, n; + +#if SQLITE_ENABLE_API_ARMOR + if( zOptName==0 ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + if( sqlite3StrNICmp(zOptName, "SQLITE_", 7)==0 ) zOptName += 7; + n = sqlite3Strlen30(zOptName); + + /* Since ArraySize(azCompileOpt) is normally in single digits, a + ** linear search is adequate. No need for a binary search. */ + for(i=0; i=0 && NaDb[] (or -1) */ + u8 nullRow; /* True if pointing to a row with no data */ + u8 deferredMoveto; /* A call to sqlite3BtreeMoveto() is needed */ + u8 isTable; /* True for rowid tables. False for indexes */ +#ifdef SQLITE_DEBUG + u8 seekOp; /* Most recent seek operation on this cursor */ + u8 wrFlag; /* The wrFlag argument to sqlite3BtreeCursor() */ +#endif + Bool isEphemeral:1; /* True for an ephemeral table */ + Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */ + Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */ + Btree *pBtx; /* Separate file holding temporary table */ + i64 seqCount; /* Sequence counter */ + int *aAltMap; /* Mapping from table to index column numbers */ + + /* Cached OP_Column parse information is only valid if cacheStatus matches + ** Vdbe.cacheCtr. Vdbe.cacheCtr will never take on the value of + ** CACHE_STALE (0) and so setting cacheStatus=CACHE_STALE guarantees that + ** the cache is out of date. */ + u32 cacheStatus; /* Cache is valid if this matches Vdbe.cacheCtr */ + int seekResult; /* Result of previous sqlite3BtreeMoveto() or 0 + ** if there have been no prior seeks on the cursor. */ + /* NB: seekResult does not distinguish between "no seeks have ever occurred + ** on this cursor" and "the most recent seek was an exact match". */ + + /* When a new VdbeCursor is allocated, only the fields above are zeroed. + ** The fields that follow are uninitialized, and must be individually + ** initialized prior to first use. */ + VdbeCursor *pAltCursor; /* Associated index cursor from which to read */ + union { + BtCursor *pCursor; /* CURTYPE_BTREE. Btree cursor */ + sqlite3_vtab_cursor *pVCur; /* CURTYPE_VTAB. Vtab cursor */ + int pseudoTableReg; /* CURTYPE_PSEUDO. Reg holding content. */ + VdbeSorter *pSorter; /* CURTYPE_SORTER. Sorter object */ + } uc; + KeyInfo *pKeyInfo; /* Info about index keys needed by index cursors */ + u32 iHdrOffset; /* Offset to next unparsed byte of the header */ + Pgno pgnoRoot; /* Root page of the open btree cursor */ + i16 nField; /* Number of fields in the header */ + u16 nHdrParsed; /* Number of header fields parsed so far */ + i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */ + u32 *aOffset; /* Pointer to aType[nField] */ + const u8 *aRow; /* Data for the current row, if all on one page */ + u32 payloadSize; /* Total number of bytes in the record */ + u32 szRow; /* Byte available in aRow */ +#ifdef SQLITE_ENABLE_COLUMN_USED_MASK + u64 maskUsed; /* Mask of columns used by this cursor */ +#endif + + /* 2*nField extra array elements allocated for aType[], beyond the one + ** static element declared in the structure. nField total array slots for + ** aType[] and nField+1 array slots for aOffset[] */ + u32 aType[1]; /* Type values record decode. MUST BE LAST */ +}; + + +/* +** A value for VdbeCursor.cacheStatus that means the cache is always invalid. +*/ +#define CACHE_STALE 0 + +/* +** When a sub-program is executed (OP_Program), a structure of this type +** is allocated to store the current value of the program counter, as +** well as the current memory cell array and various other frame specific +** values stored in the Vdbe struct. When the sub-program is finished, +** these values are copied back to the Vdbe from the VdbeFrame structure, +** restoring the state of the VM to as it was before the sub-program +** began executing. +** +** The memory for a VdbeFrame object is allocated and managed by a memory +** cell in the parent (calling) frame. When the memory cell is deleted or +** overwritten, the VdbeFrame object is not freed immediately. Instead, it +** is linked into the Vdbe.pDelFrame list. The contents of the Vdbe.pDelFrame +** list is deleted when the VM is reset in VdbeHalt(). The reason for doing +** this instead of deleting the VdbeFrame immediately is to avoid recursive +** calls to sqlite3VdbeMemRelease() when the memory cells belonging to the +** child frame are released. +** +** The currently executing frame is stored in Vdbe.pFrame. Vdbe.pFrame is +** set to NULL if the currently executing frame is the main program. +*/ +typedef struct VdbeFrame VdbeFrame; +struct VdbeFrame { + Vdbe *v; /* VM this frame belongs to */ + VdbeFrame *pParent; /* Parent of this frame, or NULL if parent is main */ + Op *aOp; /* Program instructions for parent frame */ + i64 *anExec; /* Event counters from parent frame */ + Mem *aMem; /* Array of memory cells for parent frame */ + VdbeCursor **apCsr; /* Array of Vdbe cursors for parent frame */ + void *token; /* Copy of SubProgram.token */ + i64 lastRowid; /* Last insert rowid (sqlite3.lastRowid) */ + AuxData *pAuxData; /* Linked list of auxdata allocations */ + int nCursor; /* Number of entries in apCsr */ + int pc; /* Program Counter in parent (calling) frame */ + int nOp; /* Size of aOp array */ + int nMem; /* Number of entries in aMem */ + int nChildMem; /* Number of memory cells for child frame */ + int nChildCsr; /* Number of cursors for child frame */ + int nChange; /* Statement changes (Vdbe.nChange) */ + int nDbChange; /* Value of db->nChange */ +}; + +#define VdbeFrameMem(p) ((Mem *)&((u8 *)p)[ROUND8(sizeof(VdbeFrame))]) + +/* +** Internally, the vdbe manipulates nearly all SQL values as Mem +** structures. Each Mem struct may cache multiple representations (string, +** integer etc.) of the same value. +*/ +struct Mem { + union MemValue { + double r; /* Real value used when MEM_Real is set in flags */ + i64 i; /* Integer value used when MEM_Int is set in flags */ + int nZero; /* Used when bit MEM_Zero is set in flags */ + FuncDef *pDef; /* Used only when flags==MEM_Agg */ + RowSet *pRowSet; /* Used only when flags==MEM_RowSet */ + VdbeFrame *pFrame; /* Used when flags==MEM_Frame */ + } u; + u16 flags; /* Some combination of MEM_Null, MEM_Str, MEM_Dyn, etc. */ + u8 enc; /* SQLITE_UTF8, SQLITE_UTF16BE, SQLITE_UTF16LE */ + u8 eSubtype; /* Subtype for this value */ + int n; /* Number of characters in string value, excluding '\0' */ + char *z; /* String or BLOB value */ + /* ShallowCopy only needs to copy the information above */ + char *zMalloc; /* Space to hold MEM_Str or MEM_Blob if szMalloc>0 */ + int szMalloc; /* Size of the zMalloc allocation */ + u32 uTemp; /* Transient storage for serial_type in OP_MakeRecord */ + sqlite3 *db; /* The associated database connection */ + void (*xDel)(void*);/* Destructor for Mem.z - only valid if MEM_Dyn */ +#ifdef SQLITE_DEBUG + Mem *pScopyFrom; /* This Mem is a shallow copy of pScopyFrom */ + void *pFiller; /* So that sizeof(Mem) is a multiple of 8 */ +#endif +}; + +/* +** Size of struct Mem not including the Mem.zMalloc member or anything that +** follows. +*/ +#define MEMCELLSIZE offsetof(Mem,zMalloc) + +/* One or more of the following flags are set to indicate the validOK +** representations of the value stored in the Mem struct. +** +** If the MEM_Null flag is set, then the value is an SQL NULL value. +** No other flags may be set in this case. +** +** If the MEM_Str flag is set then Mem.z points at a string representation. +** Usually this is encoded in the same unicode encoding as the main +** database (see below for exceptions). If the MEM_Term flag is also +** set, then the string is nul terminated. The MEM_Int and MEM_Real +** flags may coexist with the MEM_Str flag. +*/ +#define MEM_Null 0x0001 /* Value is NULL */ +#define MEM_Str 0x0002 /* Value is a string */ +#define MEM_Int 0x0004 /* Value is an integer */ +#define MEM_Real 0x0008 /* Value is a real number */ +#define MEM_Blob 0x0010 /* Value is a BLOB */ +#define MEM_AffMask 0x001f /* Mask of affinity bits */ +#define MEM_RowSet 0x0020 /* Value is a RowSet object */ +#define MEM_Frame 0x0040 /* Value is a VdbeFrame object */ +#define MEM_Undefined 0x0080 /* Value is undefined */ +#define MEM_Cleared 0x0100 /* NULL set by OP_Null, not from data */ +#define MEM_TypeMask 0x81ff /* Mask of type bits */ + + +/* Whenever Mem contains a valid string or blob representation, one of +** the following flags must be set to determine the memory management +** policy for Mem.z. The MEM_Term flag tells us whether or not the +** string is \000 or \u0000 terminated +*/ +#define MEM_Term 0x0200 /* String rep is nul terminated */ +#define MEM_Dyn 0x0400 /* Need to call Mem.xDel() on Mem.z */ +#define MEM_Static 0x0800 /* Mem.z points to a static string */ +#define MEM_Ephem 0x1000 /* Mem.z points to an ephemeral string */ +#define MEM_Agg 0x2000 /* Mem.z points to an agg function context */ +#define MEM_Zero 0x4000 /* Mem.i contains count of 0s appended to blob */ +#define MEM_Subtype 0x8000 /* Mem.eSubtype is valid */ +#ifdef SQLITE_OMIT_INCRBLOB + #undef MEM_Zero + #define MEM_Zero 0x0000 +#endif + +/* Return TRUE if Mem X contains dynamically allocated content - anything +** that needs to be deallocated to avoid a leak. +*/ +#define VdbeMemDynamic(X) \ + (((X)->flags&(MEM_Agg|MEM_Dyn|MEM_RowSet|MEM_Frame))!=0) + +/* +** Clear any existing type flags from a Mem and replace them with f +*/ +#define MemSetTypeFlag(p, f) \ + ((p)->flags = ((p)->flags&~(MEM_TypeMask|MEM_Zero))|f) + +/* +** Return true if a memory cell is not marked as invalid. This macro +** is for use inside assert() statements only. +*/ +#ifdef SQLITE_DEBUG +#define memIsValid(M) ((M)->flags & MEM_Undefined)==0 +#endif + +/* +** Each auxiliary data pointer stored by a user defined function +** implementation calling sqlite3_set_auxdata() is stored in an instance +** of this structure. All such structures associated with a single VM +** are stored in a linked list headed at Vdbe.pAuxData. All are destroyed +** when the VM is halted (if not before). +*/ +struct AuxData { + int iOp; /* Instruction number of OP_Function opcode */ + int iArg; /* Index of function argument. */ + void *pAux; /* Aux data pointer */ + void (*xDelete)(void *); /* Destructor for the aux data */ + AuxData *pNext; /* Next element in list */ +}; + +/* +** The "context" argument for an installable function. A pointer to an +** instance of this structure is the first argument to the routines used +** implement the SQL functions. +** +** There is a typedef for this structure in sqlite.h. So all routines, +** even the public interface to SQLite, can use a pointer to this structure. +** But this file is the only place where the internal details of this +** structure are known. +** +** This structure is defined inside of vdbeInt.h because it uses substructures +** (Mem) which are only defined there. +*/ +struct sqlite3_context { + Mem *pOut; /* The return value is stored here */ + FuncDef *pFunc; /* Pointer to function information */ + Mem *pMem; /* Memory cell used to store aggregate context */ + Vdbe *pVdbe; /* The VM that owns this context */ + int iOp; /* Instruction number of OP_Function */ + int isError; /* Error code returned by the function. */ + u8 skipFlag; /* Skip accumulator loading if true */ + u8 fErrorOrAux; /* isError!=0 or pVdbe->pAuxData modified */ + u8 argc; /* Number of arguments */ + sqlite3_value *argv[1]; /* Argument set */ +}; + +/* A bitfield type for use inside of structures. Always follow with :N where +** N is the number of bits. +*/ +typedef unsigned bft; /* Bit Field Type */ + +typedef struct ScanStatus ScanStatus; +struct ScanStatus { + int addrExplain; /* OP_Explain for loop */ + int addrLoop; /* Address of "loops" counter */ + int addrVisit; /* Address of "rows visited" counter */ + int iSelectID; /* The "Select-ID" for this loop */ + LogEst nEst; /* Estimated output rows per loop */ + char *zName; /* Name of table or index */ +}; + +/* +** An instance of the virtual machine. This structure contains the complete +** state of the virtual machine. +** +** The "sqlite3_stmt" structure pointer that is returned by sqlite3_prepare() +** is really a pointer to an instance of this structure. +*/ +struct Vdbe { + sqlite3 *db; /* The database connection that owns this statement */ + Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */ + Parse *pParse; /* Parsing context used to create this Vdbe */ + ynVar nVar; /* Number of entries in aVar[] */ + u32 magic; /* Magic number for sanity checking */ + int nMem; /* Number of memory locations currently allocated */ + int nCursor; /* Number of slots in apCsr[] */ + u32 cacheCtr; /* VdbeCursor row cache generation counter */ + int pc; /* The program counter */ + int rc; /* Value to return */ + int nChange; /* Number of db changes made since last reset */ + int iStatement; /* Statement number (or 0 if has not opened stmt) */ + i64 iCurrentTime; /* Value of julianday('now') for this statement */ + i64 nFkConstraint; /* Number of imm. FK constraints this VM */ + i64 nStmtDefCons; /* Number of def. constraints when stmt started */ + i64 nStmtDefImmCons; /* Number of def. imm constraints when stmt started */ + + /* When allocating a new Vdbe object, all of the fields below should be + ** initialized to zero or NULL */ + + Op *aOp; /* Space to hold the virtual machine's program */ + Mem *aMem; /* The memory locations */ + Mem **apArg; /* Arguments to currently executing user function */ + Mem *aColName; /* Column names to return */ + Mem *pResultSet; /* Pointer to an array of results */ + char *zErrMsg; /* Error message written here */ + VdbeCursor **apCsr; /* One element of this array for each open cursor */ + Mem *aVar; /* Values for the OP_Variable opcode. */ + VList *pVList; /* Name of variables */ +#ifndef SQLITE_OMIT_TRACE + i64 startTime; /* Time when query started - used for profiling */ +#endif + int nOp; /* Number of instructions in the program */ +#ifdef SQLITE_DEBUG + int rcApp; /* errcode set by sqlite3_result_error_code() */ +#endif + u16 nResColumn; /* Number of columns in one row of the result set */ + u8 errorAction; /* Recovery action to do in case of an error */ + u8 minWriteFileFormat; /* Minimum file format for writable database files */ + bft expired:1; /* True if the VM needs to be recompiled */ + bft doingRerun:1; /* True if rerunning after an auto-reprepare */ + bft explain:2; /* True if EXPLAIN present on SQL command */ + bft changeCntOn:1; /* True to update the change-counter */ + bft runOnlyOnce:1; /* Automatically expire on reset */ + bft usesStmtJournal:1; /* True if uses a statement journal */ + bft readOnly:1; /* True for statements that do not write */ + bft bIsReader:1; /* True for statements that read */ + bft isPrepareV2:1; /* True if prepared with prepare_v2() */ + yDbMask btreeMask; /* Bitmask of db->aDb[] entries referenced */ + yDbMask lockMask; /* Subset of btreeMask that requires a lock */ + u32 aCounter[5]; /* Counters used by sqlite3_stmt_status() */ + char *zSql; /* Text of the SQL statement that generated this */ + void *pFree; /* Free this when deleting the vdbe */ + VdbeFrame *pFrame; /* Parent frame */ + VdbeFrame *pDelFrame; /* List of frame objects to free on VM reset */ + int nFrame; /* Number of frames in pFrame list */ + u32 expmask; /* Binding to these vars invalidates VM */ + SubProgram *pProgram; /* Linked list of all sub-programs used by VM */ + AuxData *pAuxData; /* Linked list of auxdata allocations */ +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + i64 *anExec; /* Number of times each op has been executed */ + int nScan; /* Entries in aScan[] */ + ScanStatus *aScan; /* Scan definitions for sqlite3_stmt_scanstatus() */ +#endif +}; + +/* +** The following are allowed values for Vdbe.magic +*/ +#define VDBE_MAGIC_INIT 0x16bceaa5 /* Building a VDBE program */ +#define VDBE_MAGIC_RUN 0x2df20da3 /* VDBE is ready to execute */ +#define VDBE_MAGIC_HALT 0x319c2973 /* VDBE has completed execution */ +#define VDBE_MAGIC_RESET 0x48fa9f76 /* Reset and ready to run again */ +#define VDBE_MAGIC_DEAD 0x5606c3c8 /* The VDBE has been deallocated */ + +/* +** Structure used to store the context required by the +** sqlite3_preupdate_*() API functions. +*/ +struct PreUpdate { + Vdbe *v; + VdbeCursor *pCsr; /* Cursor to read old values from */ + int op; /* One of SQLITE_INSERT, UPDATE, DELETE */ + u8 *aRecord; /* old.* database record */ + KeyInfo keyinfo; + UnpackedRecord *pUnpacked; /* Unpacked version of aRecord[] */ + UnpackedRecord *pNewUnpacked; /* Unpacked version of new.* record */ + int iNewReg; /* Register for new.* values */ + i64 iKey1; /* First key value passed to hook */ + i64 iKey2; /* Second key value passed to hook */ + Mem *aNew; /* Array of new.* values */ + Table *pTab; /* Schema object being upated */ + Index *pPk; /* PK index if pTab is WITHOUT ROWID */ +}; + +/* +** Function prototypes +*/ +SQLITE_PRIVATE void sqlite3VdbeError(Vdbe*, const char *, ...); +SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *, VdbeCursor*); +void sqliteVdbePopStack(Vdbe*,int); +SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor**, int*); +SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor*); +#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) +SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE*, int, Op*); +#endif +SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32); +SQLITE_PRIVATE u8 sqlite3VdbeOneByteSerialTypeLen(u8); +SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem*, int, u32*); +SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(unsigned char*, Mem*, u32); +SQLITE_PRIVATE u32 sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*); +SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(sqlite3*, AuxData**, int, int); + +int sqlite2BtreeKeyCompare(BtCursor *, const void *, int, int, int *); +SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(sqlite3*,VdbeCursor*,UnpackedRecord*,int*); +SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3*, BtCursor*, i64*); +SQLITE_PRIVATE int sqlite3VdbeExec(Vdbe*); +SQLITE_PRIVATE int sqlite3VdbeList(Vdbe*); +SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe*); +SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *, int); +SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem*); +SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem*, const Mem*); +SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem*, const Mem*, int); +SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem*, Mem*); +SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem*); +SQLITE_PRIVATE int sqlite3VdbeMemSetStr(Mem*, const char*, int, u8, void(*)(void*)); +SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem*, i64); +#ifdef SQLITE_OMIT_FLOATING_POINT +# define sqlite3VdbeMemSetDouble sqlite3VdbeMemSetInt64 +#else +SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem*, double); +#endif +SQLITE_PRIVATE void sqlite3VdbeMemInit(Mem*,sqlite3*,u16); +SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem*); +SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem*,int); +SQLITE_PRIVATE void sqlite3VdbeMemSetRowSet(Mem*); +SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*); +SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8); +SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem*); +SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem*); +SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem*); +SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem*); +SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem*); +SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem*); +SQLITE_PRIVATE void sqlite3VdbeMemCast(Mem*,u8,u8); +SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(BtCursor*,u32,u32,Mem*); +SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p); +SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem*, FuncDef*); +SQLITE_PRIVATE const char *sqlite3OpcodeName(int); +SQLITE_PRIVATE int sqlite3VdbeMemGrow(Mem *pMem, int n, int preserve); +SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int n); +SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *, int); +SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame*); +SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *); +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(Vdbe*,VdbeCursor*,int,const char*,Table*,i64,int); +#endif +SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p); + +SQLITE_PRIVATE int sqlite3VdbeSorterInit(sqlite3 *, int, VdbeCursor *); +SQLITE_PRIVATE void sqlite3VdbeSorterReset(sqlite3 *, VdbeSorter *); +SQLITE_PRIVATE void sqlite3VdbeSorterClose(sqlite3 *, VdbeCursor *); +SQLITE_PRIVATE int sqlite3VdbeSorterRowkey(const VdbeCursor *, Mem *); +SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *, const VdbeCursor *, int *); +SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *, int *); +SQLITE_PRIVATE int sqlite3VdbeSorterWrite(const VdbeCursor *, Mem *); +SQLITE_PRIVATE int sqlite3VdbeSorterCompare(const VdbeCursor *, Mem *, int, int *); + +#if !defined(SQLITE_OMIT_SHARED_CACHE) +SQLITE_PRIVATE void sqlite3VdbeEnter(Vdbe*); +#else +# define sqlite3VdbeEnter(X) +#endif + +#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0 +SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe*); +#else +# define sqlite3VdbeLeave(X) +#endif + +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe*,Mem*); +SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem*); +#endif + +#ifndef SQLITE_OMIT_FOREIGN_KEY +SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *, int); +#else +# define sqlite3VdbeCheckFk(p,i) 0 +#endif + +SQLITE_PRIVATE int sqlite3VdbeMemTranslate(Mem*, u8); +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE void sqlite3VdbePrintSql(Vdbe*); +SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, char *zBuf); +#endif +SQLITE_PRIVATE int sqlite3VdbeMemHandleBom(Mem *pMem); + +#ifndef SQLITE_OMIT_INCRBLOB +SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *); + #define ExpandBlob(P) (((P)->flags&MEM_Zero)?sqlite3VdbeMemExpandBlob(P):0) +#else + #define sqlite3VdbeMemExpandBlob(x) SQLITE_OK + #define ExpandBlob(P) SQLITE_OK +#endif + +#endif /* !defined(SQLITE_VDBEINT_H) */ + +/************** End of vdbeInt.h *********************************************/ +/************** Continuing where we left off in status.c *********************/ + +/* +** Variables in which to record status information. +*/ +#if SQLITE_PTRSIZE>4 +typedef sqlite3_int64 sqlite3StatValueType; +#else +typedef u32 sqlite3StatValueType; +#endif +typedef struct sqlite3StatType sqlite3StatType; +static SQLITE_WSD struct sqlite3StatType { + sqlite3StatValueType nowValue[10]; /* Current value */ + sqlite3StatValueType mxValue[10]; /* Maximum value */ +} sqlite3Stat = { {0,}, {0,} }; + +/* +** Elements of sqlite3Stat[] are protected by either the memory allocator +** mutex, or by the pcache1 mutex. The following array determines which. +*/ +static const char statMutex[] = { + 0, /* SQLITE_STATUS_MEMORY_USED */ + 1, /* SQLITE_STATUS_PAGECACHE_USED */ + 1, /* SQLITE_STATUS_PAGECACHE_OVERFLOW */ + 0, /* SQLITE_STATUS_SCRATCH_USED */ + 0, /* SQLITE_STATUS_SCRATCH_OVERFLOW */ + 0, /* SQLITE_STATUS_MALLOC_SIZE */ + 0, /* SQLITE_STATUS_PARSER_STACK */ + 1, /* SQLITE_STATUS_PAGECACHE_SIZE */ + 0, /* SQLITE_STATUS_SCRATCH_SIZE */ + 0, /* SQLITE_STATUS_MALLOC_COUNT */ +}; + + +/* The "wsdStat" macro will resolve to the status information +** state vector. If writable static data is unsupported on the target, +** we have to locate the state vector at run-time. In the more common +** case where writable static data is supported, wsdStat can refer directly +** to the "sqlite3Stat" state vector declared above. +*/ +#ifdef SQLITE_OMIT_WSD +# define wsdStatInit sqlite3StatType *x = &GLOBAL(sqlite3StatType,sqlite3Stat) +# define wsdStat x[0] +#else +# define wsdStatInit +# define wsdStat sqlite3Stat +#endif + +/* +** Return the current value of a status parameter. The caller must +** be holding the appropriate mutex. +*/ +SQLITE_PRIVATE sqlite3_int64 sqlite3StatusValue(int op){ + wsdStatInit; + assert( op>=0 && op=0 && op=0 && op=0 && opwsdStat.mxValue[op] ){ + wsdStat.mxValue[op] = wsdStat.nowValue[op]; + } +} +SQLITE_PRIVATE void sqlite3StatusDown(int op, int N){ + wsdStatInit; + assert( N>=0 ); + assert( op>=0 && op=0 && op=0 ); + newValue = (sqlite3StatValueType)X; + assert( op>=0 && op=0 && opwsdStat.mxValue[op] ){ + wsdStat.mxValue[op] = newValue; + } +} + +/* +** Query status information. +*/ +SQLITE_API int sqlite3_status64( + int op, + sqlite3_int64 *pCurrent, + sqlite3_int64 *pHighwater, + int resetFlag +){ + sqlite3_mutex *pMutex; + wsdStatInit; + if( op<0 || op>=ArraySize(wsdStat.nowValue) ){ + return SQLITE_MISUSE_BKPT; + } +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCurrent==0 || pHighwater==0 ) return SQLITE_MISUSE_BKPT; +#endif + pMutex = statMutex[op] ? sqlite3Pcache1Mutex() : sqlite3MallocMutex(); + sqlite3_mutex_enter(pMutex); + *pCurrent = wsdStat.nowValue[op]; + *pHighwater = wsdStat.mxValue[op]; + if( resetFlag ){ + wsdStat.mxValue[op] = wsdStat.nowValue[op]; + } + sqlite3_mutex_leave(pMutex); + (void)pMutex; /* Prevent warning when SQLITE_THREADSAFE=0 */ + return SQLITE_OK; +} +SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag){ + sqlite3_int64 iCur = 0, iHwtr = 0; + int rc; +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCurrent==0 || pHighwater==0 ) return SQLITE_MISUSE_BKPT; +#endif + rc = sqlite3_status64(op, &iCur, &iHwtr, resetFlag); + if( rc==0 ){ + *pCurrent = (int)iCur; + *pHighwater = (int)iHwtr; + } + return rc; +} + +/* +** Query status information for a single database connection +*/ +SQLITE_API int sqlite3_db_status( + sqlite3 *db, /* The database connection whose status is desired */ + int op, /* Status verb */ + int *pCurrent, /* Write current value here */ + int *pHighwater, /* Write high-water mark here */ + int resetFlag /* Reset high-water mark if true */ +){ + int rc = SQLITE_OK; /* Return code */ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) || pCurrent==0|| pHighwater==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif + sqlite3_mutex_enter(db->mutex); + switch( op ){ + case SQLITE_DBSTATUS_LOOKASIDE_USED: { + *pCurrent = db->lookaside.nOut; + *pHighwater = db->lookaside.mxOut; + if( resetFlag ){ + db->lookaside.mxOut = db->lookaside.nOut; + } + break; + } + + case SQLITE_DBSTATUS_LOOKASIDE_HIT: + case SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE: + case SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL: { + testcase( op==SQLITE_DBSTATUS_LOOKASIDE_HIT ); + testcase( op==SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE ); + testcase( op==SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL ); + assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)>=0 ); + assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)<3 ); + *pCurrent = 0; + *pHighwater = db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT]; + if( resetFlag ){ + db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT] = 0; + } + break; + } + + /* + ** Return an approximation for the amount of memory currently used + ** by all pagers associated with the given database connection. The + ** highwater mark is meaningless and is returned as zero. + */ + case SQLITE_DBSTATUS_CACHE_USED_SHARED: + case SQLITE_DBSTATUS_CACHE_USED: { + int totalUsed = 0; + int i; + sqlite3BtreeEnterAll(db); + for(i=0; inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + Pager *pPager = sqlite3BtreePager(pBt); + int nByte = sqlite3PagerMemUsed(pPager); + if( op==SQLITE_DBSTATUS_CACHE_USED_SHARED ){ + nByte = nByte / sqlite3BtreeConnectionCount(pBt); + } + totalUsed += nByte; + } + } + sqlite3BtreeLeaveAll(db); + *pCurrent = totalUsed; + *pHighwater = 0; + break; + } + + /* + ** *pCurrent gets an accurate estimate of the amount of memory used + ** to store the schema for all databases (main, temp, and any ATTACHed + ** databases. *pHighwater is set to zero. + */ + case SQLITE_DBSTATUS_SCHEMA_USED: { + int i; /* Used to iterate through schemas */ + int nByte = 0; /* Used to accumulate return value */ + + sqlite3BtreeEnterAll(db); + db->pnBytesFreed = &nByte; + for(i=0; inDb; i++){ + Schema *pSchema = db->aDb[i].pSchema; + if( ALWAYS(pSchema!=0) ){ + HashElem *p; + + nByte += sqlite3GlobalConfig.m.xRoundup(sizeof(HashElem)) * ( + pSchema->tblHash.count + + pSchema->trigHash.count + + pSchema->idxHash.count + + pSchema->fkeyHash.count + ); + nByte += sqlite3_msize(pSchema->tblHash.ht); + nByte += sqlite3_msize(pSchema->trigHash.ht); + nByte += sqlite3_msize(pSchema->idxHash.ht); + nByte += sqlite3_msize(pSchema->fkeyHash.ht); + + for(p=sqliteHashFirst(&pSchema->trigHash); p; p=sqliteHashNext(p)){ + sqlite3DeleteTrigger(db, (Trigger*)sqliteHashData(p)); + } + for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){ + sqlite3DeleteTable(db, (Table *)sqliteHashData(p)); + } + } + } + db->pnBytesFreed = 0; + sqlite3BtreeLeaveAll(db); + + *pHighwater = 0; + *pCurrent = nByte; + break; + } + + /* + ** *pCurrent gets an accurate estimate of the amount of memory used + ** to store all prepared statements. + ** *pHighwater is set to zero. + */ + case SQLITE_DBSTATUS_STMT_USED: { + struct Vdbe *pVdbe; /* Used to iterate through VMs */ + int nByte = 0; /* Used to accumulate return value */ + + db->pnBytesFreed = &nByte; + for(pVdbe=db->pVdbe; pVdbe; pVdbe=pVdbe->pNext){ + sqlite3VdbeClearObject(db, pVdbe); + sqlite3DbFree(db, pVdbe); + } + db->pnBytesFreed = 0; + + *pHighwater = 0; /* IMP: R-64479-57858 */ + *pCurrent = nByte; + + break; + } + + /* + ** Set *pCurrent to the total cache hits or misses encountered by all + ** pagers the database handle is connected to. *pHighwater is always set + ** to zero. + */ + case SQLITE_DBSTATUS_CACHE_HIT: + case SQLITE_DBSTATUS_CACHE_MISS: + case SQLITE_DBSTATUS_CACHE_WRITE:{ + int i; + int nRet = 0; + assert( SQLITE_DBSTATUS_CACHE_MISS==SQLITE_DBSTATUS_CACHE_HIT+1 ); + assert( SQLITE_DBSTATUS_CACHE_WRITE==SQLITE_DBSTATUS_CACHE_HIT+2 ); + + for(i=0; inDb; i++){ + if( db->aDb[i].pBt ){ + Pager *pPager = sqlite3BtreePager(db->aDb[i].pBt); + sqlite3PagerCacheStat(pPager, op, resetFlag, &nRet); + } + } + *pHighwater = 0; /* IMP: R-42420-56072 */ + /* IMP: R-54100-20147 */ + /* IMP: R-29431-39229 */ + *pCurrent = nRet; + break; + } + + /* Set *pCurrent to non-zero if there are unresolved deferred foreign + ** key constraints. Set *pCurrent to zero if all foreign key constraints + ** have been satisfied. The *pHighwater is always set to zero. + */ + case SQLITE_DBSTATUS_DEFERRED_FKS: { + *pHighwater = 0; /* IMP: R-11967-56545 */ + *pCurrent = db->nDeferredImmCons>0 || db->nDeferredCons>0; + break; + } + + default: { + rc = SQLITE_ERROR; + } + } + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/************** End of status.c **********************************************/ +/************** Begin file date.c ********************************************/ +/* +** 2003 October 31 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement date and time +** functions for SQLite. +** +** There is only one exported symbol in this file - the function +** sqlite3RegisterDateTimeFunctions() found at the bottom of the file. +** All other code has file scope. +** +** SQLite processes all times and dates as julian day numbers. The +** dates and times are stored as the number of days since noon +** in Greenwich on November 24, 4714 B.C. according to the Gregorian +** calendar system. +** +** 1970-01-01 00:00:00 is JD 2440587.5 +** 2000-01-01 00:00:00 is JD 2451544.5 +** +** This implementation requires years to be expressed as a 4-digit number +** which means that only dates between 0000-01-01 and 9999-12-31 can +** be represented, even though julian day numbers allow a much wider +** range of dates. +** +** The Gregorian calendar system is used for all dates and times, +** even those that predate the Gregorian calendar. Historians usually +** use the julian calendar for dates prior to 1582-10-15 and for some +** dates afterwards, depending on locale. Beware of this difference. +** +** The conversion algorithms are implemented based on descriptions +** in the following text: +** +** Jean Meeus +** Astronomical Algorithms, 2nd Edition, 1998 +** ISBM 0-943396-61-1 +** Willmann-Bell, Inc +** Richmond, Virginia (USA) +*/ +/* #include "sqliteInt.h" */ +/* #include */ +/* #include */ +#include + +#ifndef SQLITE_OMIT_DATETIME_FUNCS + +/* +** The MSVC CRT on Windows CE may not have a localtime() function. +** So declare a substitute. The substitute function itself is +** defined in "os_win.c". +*/ +#if !defined(SQLITE_OMIT_LOCALTIME) && defined(_WIN32_WCE) && \ + (!defined(SQLITE_MSVC_LOCALTIME_API) || !SQLITE_MSVC_LOCALTIME_API) +struct tm *__cdecl localtime(const time_t *); +#endif + +/* +** A structure for holding a single date and time. +*/ +typedef struct DateTime DateTime; +struct DateTime { + sqlite3_int64 iJD; /* The julian day number times 86400000 */ + int Y, M, D; /* Year, month, and day */ + int h, m; /* Hour and minutes */ + int tz; /* Timezone offset in minutes */ + double s; /* Seconds */ + char validJD; /* True (1) if iJD is valid */ + char rawS; /* Raw numeric value stored in s */ + char validYMD; /* True (1) if Y,M,D are valid */ + char validHMS; /* True (1) if h,m,s are valid */ + char validTZ; /* True (1) if tz is valid */ + char tzSet; /* Timezone was set explicitly */ + char isError; /* An overflow has occurred */ +}; + + +/* +** Convert zDate into one or more integers according to the conversion +** specifier zFormat. +** +** zFormat[] contains 4 characters for each integer converted, except for +** the last integer which is specified by three characters. The meaning +** of a four-character format specifiers ABCD is: +** +** A: number of digits to convert. Always "2" or "4". +** B: minimum value. Always "0" or "1". +** C: maximum value, decoded as: +** a: 12 +** b: 14 +** c: 24 +** d: 31 +** e: 59 +** f: 9999 +** D: the separator character, or \000 to indicate this is the +** last number to convert. +** +** Example: To translate an ISO-8601 date YYYY-MM-DD, the format would +** be "40f-21a-20c". The "40f-" indicates the 4-digit year followed by "-". +** The "21a-" indicates the 2-digit month followed by "-". The "20c" indicates +** the 2-digit day which is the last integer in the set. +** +** The function returns the number of successful conversions. +*/ +static int getDigits(const char *zDate, const char *zFormat, ...){ + /* The aMx[] array translates the 3rd character of each format + ** spec into a max size: a b c d e f */ + static const u16 aMx[] = { 12, 14, 24, 31, 59, 9999 }; + va_list ap; + int cnt = 0; + char nextC; + va_start(ap, zFormat); + do{ + char N = zFormat[0] - '0'; + char min = zFormat[1] - '0'; + int val = 0; + u16 max; + + assert( zFormat[2]>='a' && zFormat[2]<='f' ); + max = aMx[zFormat[2] - 'a']; + nextC = zFormat[3]; + val = 0; + while( N-- ){ + if( !sqlite3Isdigit(*zDate) ){ + goto end_getDigits; + } + val = val*10 + *zDate - '0'; + zDate++; + } + if( val<(int)min || val>(int)max || (nextC!=0 && nextC!=*zDate) ){ + goto end_getDigits; + } + *va_arg(ap,int*) = val; + zDate++; + cnt++; + zFormat += 4; + }while( nextC ); +end_getDigits: + va_end(ap); + return cnt; +} + +/* +** Parse a timezone extension on the end of a date-time. +** The extension is of the form: +** +** (+/-)HH:MM +** +** Or the "zulu" notation: +** +** Z +** +** If the parse is successful, write the number of minutes +** of change in p->tz and return 0. If a parser error occurs, +** return non-zero. +** +** A missing specifier is not considered an error. +*/ +static int parseTimezone(const char *zDate, DateTime *p){ + int sgn = 0; + int nHr, nMn; + int c; + while( sqlite3Isspace(*zDate) ){ zDate++; } + p->tz = 0; + c = *zDate; + if( c=='-' ){ + sgn = -1; + }else if( c=='+' ){ + sgn = +1; + }else if( c=='Z' || c=='z' ){ + zDate++; + goto zulu_time; + }else{ + return c!=0; + } + zDate++; + if( getDigits(zDate, "20b:20e", &nHr, &nMn)!=2 ){ + return 1; + } + zDate += 5; + p->tz = sgn*(nMn + nHr*60); +zulu_time: + while( sqlite3Isspace(*zDate) ){ zDate++; } + p->tzSet = 1; + return *zDate!=0; +} + +/* +** Parse times of the form HH:MM or HH:MM:SS or HH:MM:SS.FFFF. +** The HH, MM, and SS must each be exactly 2 digits. The +** fractional seconds FFFF can be one or more digits. +** +** Return 1 if there is a parsing error and 0 on success. +*/ +static int parseHhMmSs(const char *zDate, DateTime *p){ + int h, m, s; + double ms = 0.0; + if( getDigits(zDate, "20c:20e", &h, &m)!=2 ){ + return 1; + } + zDate += 5; + if( *zDate==':' ){ + zDate++; + if( getDigits(zDate, "20e", &s)!=1 ){ + return 1; + } + zDate += 2; + if( *zDate=='.' && sqlite3Isdigit(zDate[1]) ){ + double rScale = 1.0; + zDate++; + while( sqlite3Isdigit(*zDate) ){ + ms = ms*10.0 + *zDate - '0'; + rScale *= 10.0; + zDate++; + } + ms /= rScale; + } + }else{ + s = 0; + } + p->validJD = 0; + p->rawS = 0; + p->validHMS = 1; + p->h = h; + p->m = m; + p->s = s + ms; + if( parseTimezone(zDate, p) ) return 1; + p->validTZ = (p->tz!=0)?1:0; + return 0; +} + +/* +** Put the DateTime object into its error state. +*/ +static void datetimeError(DateTime *p){ + memset(p, 0, sizeof(*p)); + p->isError = 1; +} + +/* +** Convert from YYYY-MM-DD HH:MM:SS to julian day. We always assume +** that the YYYY-MM-DD is according to the Gregorian calendar. +** +** Reference: Meeus page 61 +*/ +static void computeJD(DateTime *p){ + int Y, M, D, A, B, X1, X2; + + if( p->validJD ) return; + if( p->validYMD ){ + Y = p->Y; + M = p->M; + D = p->D; + }else{ + Y = 2000; /* If no YMD specified, assume 2000-Jan-01 */ + M = 1; + D = 1; + } + if( Y<-4713 || Y>9999 || p->rawS ){ + datetimeError(p); + return; + } + if( M<=2 ){ + Y--; + M += 12; + } + A = Y/100; + B = 2 - A + (A/4); + X1 = 36525*(Y+4716)/100; + X2 = 306001*(M+1)/10000; + p->iJD = (sqlite3_int64)((X1 + X2 + D + B - 1524.5 ) * 86400000); + p->validJD = 1; + if( p->validHMS ){ + p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000); + if( p->validTZ ){ + p->iJD -= p->tz*60000; + p->validYMD = 0; + p->validHMS = 0; + p->validTZ = 0; + } + } +} + +/* +** Parse dates of the form +** +** YYYY-MM-DD HH:MM:SS.FFF +** YYYY-MM-DD HH:MM:SS +** YYYY-MM-DD HH:MM +** YYYY-MM-DD +** +** Write the result into the DateTime structure and return 0 +** on success and 1 if the input string is not a well-formed +** date. +*/ +static int parseYyyyMmDd(const char *zDate, DateTime *p){ + int Y, M, D, neg; + + if( zDate[0]=='-' ){ + zDate++; + neg = 1; + }else{ + neg = 0; + } + if( getDigits(zDate, "40f-21a-21d", &Y, &M, &D)!=3 ){ + return 1; + } + zDate += 10; + while( sqlite3Isspace(*zDate) || 'T'==*(u8*)zDate ){ zDate++; } + if( parseHhMmSs(zDate, p)==0 ){ + /* We got the time */ + }else if( *zDate==0 ){ + p->validHMS = 0; + }else{ + return 1; + } + p->validJD = 0; + p->validYMD = 1; + p->Y = neg ? -Y : Y; + p->M = M; + p->D = D; + if( p->validTZ ){ + computeJD(p); + } + return 0; +} + +/* +** Set the time to the current time reported by the VFS. +** +** Return the number of errors. +*/ +static int setDateTimeToCurrent(sqlite3_context *context, DateTime *p){ + p->iJD = sqlite3StmtCurrentTime(context); + if( p->iJD>0 ){ + p->validJD = 1; + return 0; + }else{ + return 1; + } +} + +/* +** Input "r" is a numeric quantity which might be a julian day number, +** or the number of seconds since 1970. If the value if r is within +** range of a julian day number, install it as such and set validJD. +** If the value is a valid unix timestamp, put it in p->s and set p->rawS. +*/ +static void setRawDateNumber(DateTime *p, double r){ + p->s = r; + p->rawS = 1; + if( r>=0.0 && r<5373484.5 ){ + p->iJD = (sqlite3_int64)(r*86400000.0 + 0.5); + p->validJD = 1; + } +} + +/* +** Attempt to parse the given string into a julian day number. Return +** the number of errors. +** +** The following are acceptable forms for the input string: +** +** YYYY-MM-DD HH:MM:SS.FFF +/-HH:MM +** DDDD.DD +** now +** +** In the first form, the +/-HH:MM is always optional. The fractional +** seconds extension (the ".FFF") is optional. The seconds portion +** (":SS.FFF") is option. The year and date can be omitted as long +** as there is a time string. The time string can be omitted as long +** as there is a year and date. +*/ +static int parseDateOrTime( + sqlite3_context *context, + const char *zDate, + DateTime *p +){ + double r; + if( parseYyyyMmDd(zDate,p)==0 ){ + return 0; + }else if( parseHhMmSs(zDate, p)==0 ){ + return 0; + }else if( sqlite3StrICmp(zDate,"now")==0){ + return setDateTimeToCurrent(context, p); + }else if( sqlite3AtoF(zDate, &r, sqlite3Strlen30(zDate), SQLITE_UTF8) ){ + setRawDateNumber(p, r); + return 0; + } + return 1; +} + +/* The julian day number for 9999-12-31 23:59:59.999 is 5373484.4999999. +** Multiplying this by 86400000 gives 464269060799999 as the maximum value +** for DateTime.iJD. +** +** But some older compilers (ex: gcc 4.2.1 on older Macs) cannot deal with +** such a large integer literal, so we have to encode it. +*/ +#define INT_464269060799999 ((((i64)0x1a640)<<32)|0x1072fdff) + +/* +** Return TRUE if the given julian day number is within range. +** +** The input is the JulianDay times 86400000. +*/ +static int validJulianDay(sqlite3_int64 iJD){ + return iJD>=0 && iJD<=INT_464269060799999; +} + +/* +** Compute the Year, Month, and Day from the julian day number. +*/ +static void computeYMD(DateTime *p){ + int Z, A, B, C, D, E, X1; + if( p->validYMD ) return; + if( !p->validJD ){ + p->Y = 2000; + p->M = 1; + p->D = 1; + }else{ + assert( validJulianDay(p->iJD) ); + Z = (int)((p->iJD + 43200000)/86400000); + A = (int)((Z - 1867216.25)/36524.25); + A = Z + 1 + A - (A/4); + B = A + 1524; + C = (int)((B - 122.1)/365.25); + D = (36525*(C&32767))/100; + E = (int)((B-D)/30.6001); + X1 = (int)(30.6001*E); + p->D = B - D - X1; + p->M = E<14 ? E-1 : E-13; + p->Y = p->M>2 ? C - 4716 : C - 4715; + } + p->validYMD = 1; +} + +/* +** Compute the Hour, Minute, and Seconds from the julian day number. +*/ +static void computeHMS(DateTime *p){ + int s; + if( p->validHMS ) return; + computeJD(p); + s = (int)((p->iJD + 43200000) % 86400000); + p->s = s/1000.0; + s = (int)p->s; + p->s -= s; + p->h = s/3600; + s -= p->h*3600; + p->m = s/60; + p->s += s - p->m*60; + p->rawS = 0; + p->validHMS = 1; +} + +/* +** Compute both YMD and HMS +*/ +static void computeYMD_HMS(DateTime *p){ + computeYMD(p); + computeHMS(p); +} + +/* +** Clear the YMD and HMS and the TZ +*/ +static void clearYMD_HMS_TZ(DateTime *p){ + p->validYMD = 0; + p->validHMS = 0; + p->validTZ = 0; +} + +#ifndef SQLITE_OMIT_LOCALTIME +/* +** On recent Windows platforms, the localtime_s() function is available +** as part of the "Secure CRT". It is essentially equivalent to +** localtime_r() available under most POSIX platforms, except that the +** order of the parameters is reversed. +** +** See http://msdn.microsoft.com/en-us/library/a442x3ye(VS.80).aspx. +** +** If the user has not indicated to use localtime_r() or localtime_s() +** already, check for an MSVC build environment that provides +** localtime_s(). +*/ +#if !HAVE_LOCALTIME_R && !HAVE_LOCALTIME_S \ + && defined(_MSC_VER) && defined(_CRT_INSECURE_DEPRECATE) +#undef HAVE_LOCALTIME_S +#define HAVE_LOCALTIME_S 1 +#endif + +/* +** The following routine implements the rough equivalent of localtime_r() +** using whatever operating-system specific localtime facility that +** is available. This routine returns 0 on success and +** non-zero on any kind of error. +** +** If the sqlite3GlobalConfig.bLocaltimeFault variable is true then this +** routine will always fail. +** +** EVIDENCE-OF: R-62172-00036 In this implementation, the standard C +** library function localtime_r() is used to assist in the calculation of +** local time. +*/ +static int osLocaltime(time_t *t, struct tm *pTm){ + int rc; +#if !HAVE_LOCALTIME_R && !HAVE_LOCALTIME_S + struct tm *pX; +#if SQLITE_THREADSAFE>0 + sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); +#endif + sqlite3_mutex_enter(mutex); + pX = localtime(t); +#ifndef SQLITE_UNTESTABLE + if( sqlite3GlobalConfig.bLocaltimeFault ) pX = 0; +#endif + if( pX ) *pTm = *pX; + sqlite3_mutex_leave(mutex); + rc = pX==0; +#else +#ifndef SQLITE_UNTESTABLE + if( sqlite3GlobalConfig.bLocaltimeFault ) return 1; +#endif +#if HAVE_LOCALTIME_R + rc = localtime_r(t, pTm)==0; +#else + rc = localtime_s(pTm, t); +#endif /* HAVE_LOCALTIME_R */ +#endif /* HAVE_LOCALTIME_R || HAVE_LOCALTIME_S */ + return rc; +} +#endif /* SQLITE_OMIT_LOCALTIME */ + + +#ifndef SQLITE_OMIT_LOCALTIME +/* +** Compute the difference (in milliseconds) between localtime and UTC +** (a.k.a. GMT) for the time value p where p is in UTC. If no error occurs, +** return this value and set *pRc to SQLITE_OK. +** +** Or, if an error does occur, set *pRc to SQLITE_ERROR. The returned value +** is undefined in this case. +*/ +static sqlite3_int64 localtimeOffset( + DateTime *p, /* Date at which to calculate offset */ + sqlite3_context *pCtx, /* Write error here if one occurs */ + int *pRc /* OUT: Error code. SQLITE_OK or ERROR */ +){ + DateTime x, y; + time_t t; + struct tm sLocal; + + /* Initialize the contents of sLocal to avoid a compiler warning. */ + memset(&sLocal, 0, sizeof(sLocal)); + + x = *p; + computeYMD_HMS(&x); + if( x.Y<1971 || x.Y>=2038 ){ + /* EVIDENCE-OF: R-55269-29598 The localtime_r() C function normally only + ** works for years between 1970 and 2037. For dates outside this range, + ** SQLite attempts to map the year into an equivalent year within this + ** range, do the calculation, then map the year back. + */ + x.Y = 2000; + x.M = 1; + x.D = 1; + x.h = 0; + x.m = 0; + x.s = 0.0; + } else { + int s = (int)(x.s + 0.5); + x.s = s; + } + x.tz = 0; + x.validJD = 0; + computeJD(&x); + t = (time_t)(x.iJD/1000 - 21086676*(i64)10000); + if( osLocaltime(&t, &sLocal) ){ + sqlite3_result_error(pCtx, "local time unavailable", -1); + *pRc = SQLITE_ERROR; + return 0; + } + y.Y = sLocal.tm_year + 1900; + y.M = sLocal.tm_mon + 1; + y.D = sLocal.tm_mday; + y.h = sLocal.tm_hour; + y.m = sLocal.tm_min; + y.s = sLocal.tm_sec; + y.validYMD = 1; + y.validHMS = 1; + y.validJD = 0; + y.rawS = 0; + y.validTZ = 0; + y.isError = 0; + computeJD(&y); + *pRc = SQLITE_OK; + return y.iJD - x.iJD; +} +#endif /* SQLITE_OMIT_LOCALTIME */ + +/* +** The following table defines various date transformations of the form +** +** 'NNN days' +** +** Where NNN is an arbitrary floating-point number and "days" can be one +** of several units of time. +*/ +static const struct { + u8 eType; /* Transformation type code */ + u8 nName; /* Length of th name */ + char *zName; /* Name of the transformation */ + double rLimit; /* Maximum NNN value for this transform */ + double rXform; /* Constant used for this transform */ +} aXformType[] = { + { 0, 6, "second", 464269060800.0, 86400000.0/(24.0*60.0*60.0) }, + { 0, 6, "minute", 7737817680.0, 86400000.0/(24.0*60.0) }, + { 0, 4, "hour", 128963628.0, 86400000.0/24.0 }, + { 0, 3, "day", 5373485.0, 86400000.0 }, + { 1, 5, "month", 176546.0, 30.0*86400000.0 }, + { 2, 4, "year", 14713.0, 365.0*86400000.0 }, +}; + +/* +** Process a modifier to a date-time stamp. The modifiers are +** as follows: +** +** NNN days +** NNN hours +** NNN minutes +** NNN.NNNN seconds +** NNN months +** NNN years +** start of month +** start of year +** start of week +** start of day +** weekday N +** unixepoch +** localtime +** utc +** +** Return 0 on success and 1 if there is any kind of error. If the error +** is in a system call (i.e. localtime()), then an error message is written +** to context pCtx. If the error is an unrecognized modifier, no error is +** written to pCtx. +*/ +static int parseModifier( + sqlite3_context *pCtx, /* Function context */ + const char *z, /* The text of the modifier */ + int n, /* Length of zMod in bytes */ + DateTime *p /* The date/time value to be modified */ +){ + int rc = 1; + double r; + switch(sqlite3UpperToLower[(u8)z[0]] ){ +#ifndef SQLITE_OMIT_LOCALTIME + case 'l': { + /* localtime + ** + ** Assuming the current time value is UTC (a.k.a. GMT), shift it to + ** show local time. + */ + if( sqlite3_stricmp(z, "localtime")==0 ){ + computeJD(p); + p->iJD += localtimeOffset(p, pCtx, &rc); + clearYMD_HMS_TZ(p); + } + break; + } +#endif + case 'u': { + /* + ** unixepoch + ** + ** Treat the current value of p->s as the number of + ** seconds since 1970. Convert to a real julian day number. + */ + if( sqlite3_stricmp(z, "unixepoch")==0 && p->rawS ){ + r = p->s*1000.0 + 210866760000000.0; + if( r>=0.0 && r<464269060800000.0 ){ + clearYMD_HMS_TZ(p); + p->iJD = (sqlite3_int64)r; + p->validJD = 1; + p->rawS = 0; + rc = 0; + } + } +#ifndef SQLITE_OMIT_LOCALTIME + else if( sqlite3_stricmp(z, "utc")==0 ){ + if( p->tzSet==0 ){ + sqlite3_int64 c1; + computeJD(p); + c1 = localtimeOffset(p, pCtx, &rc); + if( rc==SQLITE_OK ){ + p->iJD -= c1; + clearYMD_HMS_TZ(p); + p->iJD += c1 - localtimeOffset(p, pCtx, &rc); + } + p->tzSet = 1; + }else{ + rc = SQLITE_OK; + } + } +#endif + break; + } + case 'w': { + /* + ** weekday N + ** + ** Move the date to the same time on the next occurrence of + ** weekday N where 0==Sunday, 1==Monday, and so forth. If the + ** date is already on the appropriate weekday, this is a no-op. + */ + if( sqlite3_strnicmp(z, "weekday ", 8)==0 + && sqlite3AtoF(&z[8], &r, sqlite3Strlen30(&z[8]), SQLITE_UTF8) + && (n=(int)r)==r && n>=0 && r<7 ){ + sqlite3_int64 Z; + computeYMD_HMS(p); + p->validTZ = 0; + p->validJD = 0; + computeJD(p); + Z = ((p->iJD + 129600000)/86400000) % 7; + if( Z>n ) Z -= 7; + p->iJD += (n - Z)*86400000; + clearYMD_HMS_TZ(p); + rc = 0; + } + break; + } + case 's': { + /* + ** start of TTTTT + ** + ** Move the date backwards to the beginning of the current day, + ** or month or year. + */ + if( sqlite3_strnicmp(z, "start of ", 9)!=0 ) break; + z += 9; + computeYMD(p); + p->validHMS = 1; + p->h = p->m = 0; + p->s = 0.0; + p->validTZ = 0; + p->validJD = 0; + if( sqlite3_stricmp(z,"month")==0 ){ + p->D = 1; + rc = 0; + }else if( sqlite3_stricmp(z,"year")==0 ){ + computeYMD(p); + p->M = 1; + p->D = 1; + rc = 0; + }else if( sqlite3_stricmp(z,"day")==0 ){ + rc = 0; + } + break; + } + case '+': + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': { + double rRounder; + int i; + for(n=1; z[n] && z[n]!=':' && !sqlite3Isspace(z[n]); n++){} + if( !sqlite3AtoF(z, &r, n, SQLITE_UTF8) ){ + rc = 1; + break; + } + if( z[n]==':' ){ + /* A modifier of the form (+|-)HH:MM:SS.FFF adds (or subtracts) the + ** specified number of hours, minutes, seconds, and fractional seconds + ** to the time. The ".FFF" may be omitted. The ":SS.FFF" may be + ** omitted. + */ + const char *z2 = z; + DateTime tx; + sqlite3_int64 day; + if( !sqlite3Isdigit(*z2) ) z2++; + memset(&tx, 0, sizeof(tx)); + if( parseHhMmSs(z2, &tx) ) break; + computeJD(&tx); + tx.iJD -= 43200000; + day = tx.iJD/86400000; + tx.iJD -= day*86400000; + if( z[0]=='-' ) tx.iJD = -tx.iJD; + computeJD(p); + clearYMD_HMS_TZ(p); + p->iJD += tx.iJD; + rc = 0; + break; + } + + /* If control reaches this point, it means the transformation is + ** one of the forms like "+NNN days". */ + z += n; + while( sqlite3Isspace(*z) ) z++; + n = sqlite3Strlen30(z); + if( n>10 || n<3 ) break; + if( sqlite3UpperToLower[(u8)z[n-1]]=='s' ) n--; + computeJD(p); + rc = 1; + rRounder = r<0 ? -0.5 : +0.5; + for(i=0; i-aXformType[i].rLimit && rM += (int)r; + x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; + p->Y += x; + p->M -= x*12; + p->validJD = 0; + r -= (int)r; + break; + } + case 2: { /* Special processing to add years */ + int y = (int)r; + computeYMD_HMS(p); + p->Y += y; + p->validJD = 0; + r -= (int)r; + break; + } + } + computeJD(p); + p->iJD += (sqlite3_int64)(r*aXformType[i].rXform + rRounder); + rc = 0; + break; + } + } + clearYMD_HMS_TZ(p); + break; + } + default: { + break; + } + } + return rc; +} + +/* +** Process time function arguments. argv[0] is a date-time stamp. +** argv[1] and following are modifiers. Parse them all and write +** the resulting time into the DateTime structure p. Return 0 +** on success and 1 if there are any errors. +** +** If there are zero parameters (if even argv[0] is undefined) +** then assume a default value of "now" for argv[0]. +*/ +static int isDate( + sqlite3_context *context, + int argc, + sqlite3_value **argv, + DateTime *p +){ + int i, n; + const unsigned char *z; + int eType; + memset(p, 0, sizeof(*p)); + if( argc==0 ){ + return setDateTimeToCurrent(context, p); + } + if( (eType = sqlite3_value_type(argv[0]))==SQLITE_FLOAT + || eType==SQLITE_INTEGER ){ + setRawDateNumber(p, sqlite3_value_double(argv[0])); + }else{ + z = sqlite3_value_text(argv[0]); + if( !z || parseDateOrTime(context, (char*)z, p) ){ + return 1; + } + } + for(i=1; iisError || !validJulianDay(p->iJD) ) return 1; + return 0; +} + + +/* +** The following routines implement the various date and time functions +** of SQLite. +*/ + +/* +** julianday( TIMESTRING, MOD, MOD, ...) +** +** Return the julian day number of the date specified in the arguments +*/ +static void juliandayFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + DateTime x; + if( isDate(context, argc, argv, &x)==0 ){ + computeJD(&x); + sqlite3_result_double(context, x.iJD/86400000.0); + } +} + +/* +** datetime( TIMESTRING, MOD, MOD, ...) +** +** Return YYYY-MM-DD HH:MM:SS +*/ +static void datetimeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + DateTime x; + if( isDate(context, argc, argv, &x)==0 ){ + char zBuf[100]; + computeYMD_HMS(&x); + sqlite3_snprintf(sizeof(zBuf), zBuf, "%04d-%02d-%02d %02d:%02d:%02d", + x.Y, x.M, x.D, x.h, x.m, (int)(x.s)); + sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); + } +} + +/* +** time( TIMESTRING, MOD, MOD, ...) +** +** Return HH:MM:SS +*/ +static void timeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + DateTime x; + if( isDate(context, argc, argv, &x)==0 ){ + char zBuf[100]; + computeHMS(&x); + sqlite3_snprintf(sizeof(zBuf), zBuf, "%02d:%02d:%02d", x.h, x.m, (int)x.s); + sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); + } +} + +/* +** date( TIMESTRING, MOD, MOD, ...) +** +** Return YYYY-MM-DD +*/ +static void dateFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + DateTime x; + if( isDate(context, argc, argv, &x)==0 ){ + char zBuf[100]; + computeYMD(&x); + sqlite3_snprintf(sizeof(zBuf), zBuf, "%04d-%02d-%02d", x.Y, x.M, x.D); + sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); + } +} + +/* +** strftime( FORMAT, TIMESTRING, MOD, MOD, ...) +** +** Return a string described by FORMAT. Conversions as follows: +** +** %d day of month +** %f ** fractional seconds SS.SSS +** %H hour 00-24 +** %j day of year 000-366 +** %J ** julian day number +** %m month 01-12 +** %M minute 00-59 +** %s seconds since 1970-01-01 +** %S seconds 00-59 +** %w day of week 0-6 sunday==0 +** %W week of year 00-53 +** %Y year 0000-9999 +** %% % +*/ +static void strftimeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + DateTime x; + u64 n; + size_t i,j; + char *z; + sqlite3 *db; + const char *zFmt; + char zBuf[100]; + if( argc==0 ) return; + zFmt = (const char*)sqlite3_value_text(argv[0]); + if( zFmt==0 || isDate(context, argc-1, argv+1, &x) ) return; + db = sqlite3_context_db_handle(context); + for(i=0, n=1; zFmt[i]; i++, n++){ + if( zFmt[i]=='%' ){ + switch( zFmt[i+1] ){ + case 'd': + case 'H': + case 'm': + case 'M': + case 'S': + case 'W': + n++; + /* fall thru */ + case 'w': + case '%': + break; + case 'f': + n += 8; + break; + case 'j': + n += 3; + break; + case 'Y': + n += 8; + break; + case 's': + case 'J': + n += 50; + break; + default: + return; /* ERROR. return a NULL */ + } + i++; + } + } + testcase( n==sizeof(zBuf)-1 ); + testcase( n==sizeof(zBuf) ); + testcase( n==(u64)db->aLimit[SQLITE_LIMIT_LENGTH]+1 ); + testcase( n==(u64)db->aLimit[SQLITE_LIMIT_LENGTH] ); + if( n(u64)db->aLimit[SQLITE_LIMIT_LENGTH] ){ + sqlite3_result_error_toobig(context); + return; + }else{ + z = sqlite3DbMallocRawNN(db, (int)n); + if( z==0 ){ + sqlite3_result_error_nomem(context); + return; + } + } + computeJD(&x); + computeYMD_HMS(&x); + for(i=j=0; zFmt[i]; i++){ + if( zFmt[i]!='%' ){ + z[j++] = zFmt[i]; + }else{ + i++; + switch( zFmt[i] ){ + case 'd': sqlite3_snprintf(3, &z[j],"%02d",x.D); j+=2; break; + case 'f': { + double s = x.s; + if( s>59.999 ) s = 59.999; + sqlite3_snprintf(7, &z[j],"%06.3f", s); + j += sqlite3Strlen30(&z[j]); + break; + } + case 'H': sqlite3_snprintf(3, &z[j],"%02d",x.h); j+=2; break; + case 'W': /* Fall thru */ + case 'j': { + int nDay; /* Number of days since 1st day of year */ + DateTime y = x; + y.validJD = 0; + y.M = 1; + y.D = 1; + computeJD(&y); + nDay = (int)((x.iJD-y.iJD+43200000)/86400000); + if( zFmt[i]=='W' ){ + int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ + wd = (int)(((x.iJD+43200000)/86400000)%7); + sqlite3_snprintf(3, &z[j],"%02d",(nDay+7-wd)/7); + j += 2; + }else{ + sqlite3_snprintf(4, &z[j],"%03d",nDay+1); + j += 3; + } + break; + } + case 'J': { + sqlite3_snprintf(20, &z[j],"%.16g",x.iJD/86400000.0); + j+=sqlite3Strlen30(&z[j]); + break; + } + case 'm': sqlite3_snprintf(3, &z[j],"%02d",x.M); j+=2; break; + case 'M': sqlite3_snprintf(3, &z[j],"%02d",x.m); j+=2; break; + case 's': { + sqlite3_snprintf(30,&z[j],"%lld", + (i64)(x.iJD/1000 - 21086676*(i64)10000)); + j += sqlite3Strlen30(&z[j]); + break; + } + case 'S': sqlite3_snprintf(3,&z[j],"%02d",(int)x.s); j+=2; break; + case 'w': { + z[j++] = (char)(((x.iJD+129600000)/86400000) % 7) + '0'; + break; + } + case 'Y': { + sqlite3_snprintf(5,&z[j],"%04d",x.Y); j+=sqlite3Strlen30(&z[j]); + break; + } + default: z[j++] = '%'; break; + } + } + } + z[j] = 0; + sqlite3_result_text(context, z, -1, + z==zBuf ? SQLITE_TRANSIENT : SQLITE_DYNAMIC); +} + +/* +** current_time() +** +** This function returns the same value as time('now'). +*/ +static void ctimeFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **NotUsed2 +){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + timeFunc(context, 0, 0); +} + +/* +** current_date() +** +** This function returns the same value as date('now'). +*/ +static void cdateFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **NotUsed2 +){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + dateFunc(context, 0, 0); +} + +/* +** current_timestamp() +** +** This function returns the same value as datetime('now'). +*/ +static void ctimestampFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **NotUsed2 +){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + datetimeFunc(context, 0, 0); +} +#endif /* !defined(SQLITE_OMIT_DATETIME_FUNCS) */ + +#ifdef SQLITE_OMIT_DATETIME_FUNCS +/* +** If the library is compiled to omit the full-scale date and time +** handling (to get a smaller binary), the following minimal version +** of the functions current_time(), current_date() and current_timestamp() +** are included instead. This is to support column declarations that +** include "DEFAULT CURRENT_TIME" etc. +** +** This function uses the C-library functions time(), gmtime() +** and strftime(). The format string to pass to strftime() is supplied +** as the user-data for the function. +*/ +static void currentTimeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + time_t t; + char *zFormat = (char *)sqlite3_user_data(context); + sqlite3_int64 iT; + struct tm *pTm; + struct tm sNow; + char zBuf[20]; + + UNUSED_PARAMETER(argc); + UNUSED_PARAMETER(argv); + + iT = sqlite3StmtCurrentTime(context); + if( iT<=0 ) return; + t = iT/1000 - 10000*(sqlite3_int64)21086676; +#if HAVE_GMTIME_R + pTm = gmtime_r(&t, &sNow); +#else + sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); + pTm = gmtime(&t); + if( pTm ) memcpy(&sNow, pTm, sizeof(sNow)); + sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); +#endif + if( pTm ){ + strftime(zBuf, 20, zFormat, &sNow); + sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); + } +} +#endif + +/* +** This function registered all of the above C functions as SQL +** functions. This should be the only routine in this file with +** external linkage. +*/ +SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){ + static FuncDef aDateTimeFuncs[] = { +#ifndef SQLITE_OMIT_DATETIME_FUNCS + DFUNCTION(julianday, -1, 0, 0, juliandayFunc ), + DFUNCTION(date, -1, 0, 0, dateFunc ), + DFUNCTION(time, -1, 0, 0, timeFunc ), + DFUNCTION(datetime, -1, 0, 0, datetimeFunc ), + DFUNCTION(strftime, -1, 0, 0, strftimeFunc ), + DFUNCTION(current_time, 0, 0, 0, ctimeFunc ), + DFUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc), + DFUNCTION(current_date, 0, 0, 0, cdateFunc ), +#else + STR_FUNCTION(current_time, 0, "%H:%M:%S", 0, currentTimeFunc), + STR_FUNCTION(current_date, 0, "%Y-%m-%d", 0, currentTimeFunc), + STR_FUNCTION(current_timestamp, 0, "%Y-%m-%d %H:%M:%S", 0, currentTimeFunc), +#endif + }; + sqlite3InsertBuiltinFuncs(aDateTimeFuncs, ArraySize(aDateTimeFuncs)); +} + +/************** End of date.c ************************************************/ +/************** Begin file os.c **********************************************/ +/* +** 2005 November 29 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains OS interface code that is common to all +** architectures. +*/ +/* #include "sqliteInt.h" */ + +/* +** If we compile with the SQLITE_TEST macro set, then the following block +** of code will give us the ability to simulate a disk I/O error. This +** is used for testing the I/O recovery logic. +*/ +#if defined(SQLITE_TEST) +SQLITE_API int sqlite3_io_error_hit = 0; /* Total number of I/O Errors */ +SQLITE_API int sqlite3_io_error_hardhit = 0; /* Number of non-benign errors */ +SQLITE_API int sqlite3_io_error_pending = 0; /* Count down to first I/O error */ +SQLITE_API int sqlite3_io_error_persist = 0; /* True if I/O errors persist */ +SQLITE_API int sqlite3_io_error_benign = 0; /* True if errors are benign */ +SQLITE_API int sqlite3_diskfull_pending = 0; +SQLITE_API int sqlite3_diskfull = 0; +#endif /* defined(SQLITE_TEST) */ + +/* +** When testing, also keep a count of the number of open files. +*/ +#if defined(SQLITE_TEST) +SQLITE_API int sqlite3_open_file_count = 0; +#endif /* defined(SQLITE_TEST) */ + +/* +** The default SQLite sqlite3_vfs implementations do not allocate +** memory (actually, os_unix.c allocates a small amount of memory +** from within OsOpen()), but some third-party implementations may. +** So we test the effects of a malloc() failing and the sqlite3OsXXX() +** function returning SQLITE_IOERR_NOMEM using the DO_OS_MALLOC_TEST macro. +** +** The following functions are instrumented for malloc() failure +** testing: +** +** sqlite3OsRead() +** sqlite3OsWrite() +** sqlite3OsSync() +** sqlite3OsFileSize() +** sqlite3OsLock() +** sqlite3OsCheckReservedLock() +** sqlite3OsFileControl() +** sqlite3OsShmMap() +** sqlite3OsOpen() +** sqlite3OsDelete() +** sqlite3OsAccess() +** sqlite3OsFullPathname() +** +*/ +#if defined(SQLITE_TEST) +SQLITE_API int sqlite3_memdebug_vfs_oom_test = 1; + #define DO_OS_MALLOC_TEST(x) \ + if (sqlite3_memdebug_vfs_oom_test && (!x || !sqlite3JournalIsInMemory(x))) { \ + void *pTstAlloc = sqlite3Malloc(10); \ + if (!pTstAlloc) return SQLITE_IOERR_NOMEM_BKPT; \ + sqlite3_free(pTstAlloc); \ + } +#else + #define DO_OS_MALLOC_TEST(x) +#endif + +/* +** The following routines are convenience wrappers around methods +** of the sqlite3_file object. This is mostly just syntactic sugar. All +** of this would be completely automatic if SQLite were coded using +** C++ instead of plain old C. +*/ +SQLITE_PRIVATE void sqlite3OsClose(sqlite3_file *pId){ + if( pId->pMethods ){ + pId->pMethods->xClose(pId); + pId->pMethods = 0; + } +} +SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file *id, void *pBuf, int amt, i64 offset){ + DO_OS_MALLOC_TEST(id); + return id->pMethods->xRead(id, pBuf, amt, offset); +} +SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file *id, const void *pBuf, int amt, i64 offset){ + DO_OS_MALLOC_TEST(id); + return id->pMethods->xWrite(id, pBuf, amt, offset); +} +SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file *id, i64 size){ + return id->pMethods->xTruncate(id, size); +} +SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file *id, int flags){ + DO_OS_MALLOC_TEST(id); + return id->pMethods->xSync(id, flags); +} +SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file *id, i64 *pSize){ + DO_OS_MALLOC_TEST(id); + return id->pMethods->xFileSize(id, pSize); +} +SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file *id, int lockType){ + DO_OS_MALLOC_TEST(id); + return id->pMethods->xLock(id, lockType); +} +SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file *id, int lockType){ + return id->pMethods->xUnlock(id, lockType); +} +SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut){ + DO_OS_MALLOC_TEST(id); + return id->pMethods->xCheckReservedLock(id, pResOut); +} + +/* +** Use sqlite3OsFileControl() when we are doing something that might fail +** and we need to know about the failures. Use sqlite3OsFileControlHint() +** when simply tossing information over the wall to the VFS and we do not +** really care if the VFS receives and understands the information since it +** is only a hint and can be safely ignored. The sqlite3OsFileControlHint() +** routine has no return value since the return value would be meaningless. +*/ +SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){ +#ifdef SQLITE_TEST + if( op!=SQLITE_FCNTL_COMMIT_PHASETWO ){ + /* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite + ** is using a regular VFS, it is called after the corresponding + ** transaction has been committed. Injecting a fault at this point + ** confuses the test scripts - the COMMIT comand returns SQLITE_NOMEM + ** but the transaction is committed anyway. + ** + ** The core must call OsFileControl() though, not OsFileControlHint(), + ** as if a custom VFS (e.g. zipvfs) returns an error here, it probably + ** means the commit really has failed and an error should be returned + ** to the user. */ + DO_OS_MALLOC_TEST(id); + } +#endif + return id->pMethods->xFileControl(id, op, pArg); +} +SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file *id, int op, void *pArg){ + (void)id->pMethods->xFileControl(id, op, pArg); +} + +SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id){ + int (*xSectorSize)(sqlite3_file*) = id->pMethods->xSectorSize; + return (xSectorSize ? xSectorSize(id) : SQLITE_DEFAULT_SECTOR_SIZE); +} +SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id){ + return id->pMethods->xDeviceCharacteristics(id); +} +SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int offset, int n, int flags){ + return id->pMethods->xShmLock(id, offset, n, flags); +} +SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id){ + id->pMethods->xShmBarrier(id); +} +SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int deleteFlag){ + return id->pMethods->xShmUnmap(id, deleteFlag); +} +SQLITE_PRIVATE int sqlite3OsShmMap( + sqlite3_file *id, /* Database file handle */ + int iPage, + int pgsz, + int bExtend, /* True to extend file if necessary */ + void volatile **pp /* OUT: Pointer to mapping */ +){ + DO_OS_MALLOC_TEST(id); + return id->pMethods->xShmMap(id, iPage, pgsz, bExtend, pp); +} + +#if SQLITE_MAX_MMAP_SIZE>0 +/* The real implementation of xFetch and xUnfetch */ +SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64 iOff, int iAmt, void **pp){ + DO_OS_MALLOC_TEST(id); + return id->pMethods->xFetch(id, iOff, iAmt, pp); +} +SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *id, i64 iOff, void *p){ + return id->pMethods->xUnfetch(id, iOff, p); +} +#else +/* No-op stubs to use when memory-mapped I/O is disabled */ +SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64 iOff, int iAmt, void **pp){ + *pp = 0; + return SQLITE_OK; +} +SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *id, i64 iOff, void *p){ + return SQLITE_OK; +} +#endif + +/* +** The next group of routines are convenience wrappers around the +** VFS methods. +*/ +SQLITE_PRIVATE int sqlite3OsOpen( + sqlite3_vfs *pVfs, + const char *zPath, + sqlite3_file *pFile, + int flags, + int *pFlagsOut +){ + int rc; + DO_OS_MALLOC_TEST(0); + /* 0x87f7f is a mask of SQLITE_OPEN_ flags that are valid to be passed + ** down into the VFS layer. Some SQLITE_OPEN_ flags (for example, + ** SQLITE_OPEN_FULLMUTEX or SQLITE_OPEN_SHAREDCACHE) are blocked before + ** reaching the VFS. */ + rc = pVfs->xOpen(pVfs, zPath, pFile, flags & 0x87f7f, pFlagsOut); + assert( rc==SQLITE_OK || pFile->pMethods==0 ); + return rc; +} +SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + DO_OS_MALLOC_TEST(0); + assert( dirSync==0 || dirSync==1 ); + return pVfs->xDelete(pVfs, zPath, dirSync); +} +SQLITE_PRIVATE int sqlite3OsAccess( + sqlite3_vfs *pVfs, + const char *zPath, + int flags, + int *pResOut +){ + DO_OS_MALLOC_TEST(0); + return pVfs->xAccess(pVfs, zPath, flags, pResOut); +} +SQLITE_PRIVATE int sqlite3OsFullPathname( + sqlite3_vfs *pVfs, + const char *zPath, + int nPathOut, + char *zPathOut +){ + DO_OS_MALLOC_TEST(0); + zPathOut[0] = 0; + return pVfs->xFullPathname(pVfs, zPath, nPathOut, zPathOut); +} +#ifndef SQLITE_OMIT_LOAD_EXTENSION +SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *pVfs, const char *zPath){ + return pVfs->xDlOpen(pVfs, zPath); +} +SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + pVfs->xDlError(pVfs, nByte, zBufOut); +} +SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *pVfs, void *pHdle, const char *zSym))(void){ + return pVfs->xDlSym(pVfs, pHdle, zSym); +} +SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *pVfs, void *pHandle){ + pVfs->xDlClose(pVfs, pHandle); +} +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ +SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ + return pVfs->xRandomness(pVfs, nByte, zBufOut); +} +SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *pVfs, int nMicro){ + return pVfs->xSleep(pVfs, nMicro); +} +SQLITE_PRIVATE int sqlite3OsGetLastError(sqlite3_vfs *pVfs){ + return pVfs->xGetLastError ? pVfs->xGetLastError(pVfs, 0, 0) : 0; +} +SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *pTimeOut){ + int rc; + /* IMPLEMENTATION-OF: R-49045-42493 SQLite will use the xCurrentTimeInt64() + ** method to get the current date and time if that method is available + ** (if iVersion is 2 or greater and the function pointer is not NULL) and + ** will fall back to xCurrentTime() if xCurrentTimeInt64() is + ** unavailable. + */ + if( pVfs->iVersion>=2 && pVfs->xCurrentTimeInt64 ){ + rc = pVfs->xCurrentTimeInt64(pVfs, pTimeOut); + }else{ + double r; + rc = pVfs->xCurrentTime(pVfs, &r); + *pTimeOut = (sqlite3_int64)(r*86400000.0); + } + return rc; +} + +SQLITE_PRIVATE int sqlite3OsOpenMalloc( + sqlite3_vfs *pVfs, + const char *zFile, + sqlite3_file **ppFile, + int flags, + int *pOutFlags +){ + int rc; + sqlite3_file *pFile; + pFile = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile); + if( pFile ){ + rc = sqlite3OsOpen(pVfs, zFile, pFile, flags, pOutFlags); + if( rc!=SQLITE_OK ){ + sqlite3_free(pFile); + }else{ + *ppFile = pFile; + } + }else{ + rc = SQLITE_NOMEM_BKPT; + } + return rc; +} +SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *pFile){ + assert( pFile ); + sqlite3OsClose(pFile); + sqlite3_free(pFile); +} + +/* +** This function is a wrapper around the OS specific implementation of +** sqlite3_os_init(). The purpose of the wrapper is to provide the +** ability to simulate a malloc failure, so that the handling of an +** error in sqlite3_os_init() by the upper layers can be tested. +*/ +SQLITE_PRIVATE int sqlite3OsInit(void){ + void *p = sqlite3_malloc(10); + if( p==0 ) return SQLITE_NOMEM_BKPT; + sqlite3_free(p); + return sqlite3_os_init(); +} + +/* +** The list of all registered VFS implementations. +*/ +static sqlite3_vfs * SQLITE_WSD vfsList = 0; +#define vfsList GLOBAL(sqlite3_vfs *, vfsList) + +/* +** Locate a VFS by name. If no name is given, simply return the +** first VFS on the list. +*/ +SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfs){ + sqlite3_vfs *pVfs = 0; +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex; +#endif +#ifndef SQLITE_OMIT_AUTOINIT + int rc = sqlite3_initialize(); + if( rc ) return 0; +#endif +#if SQLITE_THREADSAFE + mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); +#endif + sqlite3_mutex_enter(mutex); + for(pVfs = vfsList; pVfs; pVfs=pVfs->pNext){ + if( zVfs==0 ) break; + if( strcmp(zVfs, pVfs->zName)==0 ) break; + } + sqlite3_mutex_leave(mutex); + return pVfs; +} + +/* +** Unlink a VFS from the linked list +*/ +static void vfsUnlink(sqlite3_vfs *pVfs){ + assert( sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)) ); + if( pVfs==0 ){ + /* No-op */ + }else if( vfsList==pVfs ){ + vfsList = pVfs->pNext; + }else if( vfsList ){ + sqlite3_vfs *p = vfsList; + while( p->pNext && p->pNext!=pVfs ){ + p = p->pNext; + } + if( p->pNext==pVfs ){ + p->pNext = pVfs->pNext; + } + } +} + +/* +** Register a VFS with the system. It is harmless to register the same +** VFS multiple times. The new VFS becomes the default if makeDflt is +** true. +*/ +SQLITE_API int sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDflt){ + MUTEX_LOGIC(sqlite3_mutex *mutex;) +#ifndef SQLITE_OMIT_AUTOINIT + int rc = sqlite3_initialize(); + if( rc ) return rc; +#endif +#ifdef SQLITE_ENABLE_API_ARMOR + if( pVfs==0 ) return SQLITE_MISUSE_BKPT; +#endif + + MUTEX_LOGIC( mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); ) + sqlite3_mutex_enter(mutex); + vfsUnlink(pVfs); + if( makeDflt || vfsList==0 ){ + pVfs->pNext = vfsList; + vfsList = pVfs; + }else{ + pVfs->pNext = vfsList->pNext; + vfsList->pNext = pVfs; + } + assert(vfsList); + sqlite3_mutex_leave(mutex); + return SQLITE_OK; +} + +/* +** Unregister a VFS so that it is no longer accessible. +*/ +SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs *pVfs){ +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); +#endif + sqlite3_mutex_enter(mutex); + vfsUnlink(pVfs); + sqlite3_mutex_leave(mutex); + return SQLITE_OK; +} + +/************** End of os.c **************************************************/ +/************** Begin file fault.c *******************************************/ +/* +** 2008 Jan 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains code to support the concept of "benign" +** malloc failures (when the xMalloc() or xRealloc() method of the +** sqlite3_mem_methods structure fails to allocate a block of memory +** and returns 0). +** +** Most malloc failures are non-benign. After they occur, SQLite +** abandons the current operation and returns an error code (usually +** SQLITE_NOMEM) to the user. However, sometimes a fault is not necessarily +** fatal. For example, if a malloc fails while resizing a hash table, this +** is completely recoverable simply by not carrying out the resize. The +** hash table will continue to function normally. So a malloc failure +** during a hash table resize is a benign fault. +*/ + +/* #include "sqliteInt.h" */ + +#ifndef SQLITE_UNTESTABLE + +/* +** Global variables. +*/ +typedef struct BenignMallocHooks BenignMallocHooks; +static SQLITE_WSD struct BenignMallocHooks { + void (*xBenignBegin)(void); + void (*xBenignEnd)(void); +} sqlite3Hooks = { 0, 0 }; + +/* The "wsdHooks" macro will resolve to the appropriate BenignMallocHooks +** structure. If writable static data is unsupported on the target, +** we have to locate the state vector at run-time. In the more common +** case where writable static data is supported, wsdHooks can refer directly +** to the "sqlite3Hooks" state vector declared above. +*/ +#ifdef SQLITE_OMIT_WSD +# define wsdHooksInit \ + BenignMallocHooks *x = &GLOBAL(BenignMallocHooks,sqlite3Hooks) +# define wsdHooks x[0] +#else +# define wsdHooksInit +# define wsdHooks sqlite3Hooks +#endif + + +/* +** Register hooks to call when sqlite3BeginBenignMalloc() and +** sqlite3EndBenignMalloc() are called, respectively. +*/ +SQLITE_PRIVATE void sqlite3BenignMallocHooks( + void (*xBenignBegin)(void), + void (*xBenignEnd)(void) +){ + wsdHooksInit; + wsdHooks.xBenignBegin = xBenignBegin; + wsdHooks.xBenignEnd = xBenignEnd; +} + +/* +** This (sqlite3EndBenignMalloc()) is called by SQLite code to indicate that +** subsequent malloc failures are benign. A call to sqlite3EndBenignMalloc() +** indicates that subsequent malloc failures are non-benign. +*/ +SQLITE_PRIVATE void sqlite3BeginBenignMalloc(void){ + wsdHooksInit; + if( wsdHooks.xBenignBegin ){ + wsdHooks.xBenignBegin(); + } +} +SQLITE_PRIVATE void sqlite3EndBenignMalloc(void){ + wsdHooksInit; + if( wsdHooks.xBenignEnd ){ + wsdHooks.xBenignEnd(); + } +} + +#endif /* #ifndef SQLITE_UNTESTABLE */ + +/************** End of fault.c ***********************************************/ +/************** Begin file mem0.c ********************************************/ +/* +** 2008 October 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains a no-op memory allocation drivers for use when +** SQLITE_ZERO_MALLOC is defined. The allocation drivers implemented +** here always fail. SQLite will not operate with these drivers. These +** are merely placeholders. Real drivers must be substituted using +** sqlite3_config() before SQLite will operate. +*/ +/* #include "sqliteInt.h" */ + +/* +** This version of the memory allocator is the default. It is +** used when no other memory allocator is specified using compile-time +** macros. +*/ +#ifdef SQLITE_ZERO_MALLOC + +/* +** No-op versions of all memory allocation routines +*/ +static void *sqlite3MemMalloc(int nByte){ return 0; } +static void sqlite3MemFree(void *pPrior){ return; } +static void *sqlite3MemRealloc(void *pPrior, int nByte){ return 0; } +static int sqlite3MemSize(void *pPrior){ return 0; } +static int sqlite3MemRoundup(int n){ return n; } +static int sqlite3MemInit(void *NotUsed){ return SQLITE_OK; } +static void sqlite3MemShutdown(void *NotUsed){ return; } + +/* +** This routine is the only routine in this file with external linkage. +** +** Populate the low-level memory allocation function pointers in +** sqlite3GlobalConfig.m with pointers to the routines in this file. +*/ +SQLITE_PRIVATE void sqlite3MemSetDefault(void){ + static const sqlite3_mem_methods defaultMethods = { + sqlite3MemMalloc, + sqlite3MemFree, + sqlite3MemRealloc, + sqlite3MemSize, + sqlite3MemRoundup, + sqlite3MemInit, + sqlite3MemShutdown, + 0 + }; + sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); +} + +#endif /* SQLITE_ZERO_MALLOC */ + +/************** End of mem0.c ************************************************/ +/************** Begin file mem1.c ********************************************/ +/* +** 2007 August 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains low-level memory allocation drivers for when +** SQLite will use the standard C-library malloc/realloc/free interface +** to obtain the memory it needs. +** +** This file contains implementations of the low-level memory allocation +** routines specified in the sqlite3_mem_methods object. The content of +** this file is only used if SQLITE_SYSTEM_MALLOC is defined. The +** SQLITE_SYSTEM_MALLOC macro is defined automatically if neither the +** SQLITE_MEMDEBUG nor the SQLITE_WIN32_MALLOC macros are defined. The +** default configuration is to use memory allocation routines in this +** file. +** +** C-preprocessor macro summary: +** +** HAVE_MALLOC_USABLE_SIZE The configure script sets this symbol if +** the malloc_usable_size() interface exists +** on the target platform. Or, this symbol +** can be set manually, if desired. +** If an equivalent interface exists by +** a different name, using a separate -D +** option to rename it. +** +** SQLITE_WITHOUT_ZONEMALLOC Some older macs lack support for the zone +** memory allocator. Set this symbol to enable +** building on older macs. +** +** SQLITE_WITHOUT_MSIZE Set this symbol to disable the use of +** _msize() on windows systems. This might +** be necessary when compiling for Delphi, +** for example. +*/ +/* #include "sqliteInt.h" */ + +/* +** This version of the memory allocator is the default. It is +** used when no other memory allocator is specified using compile-time +** macros. +*/ +#ifdef SQLITE_SYSTEM_MALLOC +#if defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC) + +/* +** Use the zone allocator available on apple products unless the +** SQLITE_WITHOUT_ZONEMALLOC symbol is defined. +*/ +#include +#include +#include +static malloc_zone_t* _sqliteZone_; +#define SQLITE_MALLOC(x) malloc_zone_malloc(_sqliteZone_, (x)) +#define SQLITE_FREE(x) malloc_zone_free(_sqliteZone_, (x)); +#define SQLITE_REALLOC(x,y) malloc_zone_realloc(_sqliteZone_, (x), (y)) +#define SQLITE_MALLOCSIZE(x) \ + (_sqliteZone_ ? _sqliteZone_->size(_sqliteZone_,x) : malloc_size(x)) + +#else /* if not __APPLE__ */ + +/* +** Use standard C library malloc and free on non-Apple systems. +** Also used by Apple systems if SQLITE_WITHOUT_ZONEMALLOC is defined. +*/ +#define SQLITE_MALLOC(x) malloc(x) +#define SQLITE_FREE(x) free(x) +#define SQLITE_REALLOC(x,y) realloc((x),(y)) + +/* +** The malloc.h header file is needed for malloc_usable_size() function +** on some systems (e.g. Linux). +*/ +#if HAVE_MALLOC_H && HAVE_MALLOC_USABLE_SIZE +# define SQLITE_USE_MALLOC_H 1 +# define SQLITE_USE_MALLOC_USABLE_SIZE 1 +/* +** The MSVCRT has malloc_usable_size(), but it is called _msize(). The +** use of _msize() is automatic, but can be disabled by compiling with +** -DSQLITE_WITHOUT_MSIZE. Using the _msize() function also requires +** the malloc.h header file. +*/ +#elif defined(_MSC_VER) && !defined(SQLITE_WITHOUT_MSIZE) +# define SQLITE_USE_MALLOC_H +# define SQLITE_USE_MSIZE +#endif + +/* +** Include the malloc.h header file, if necessary. Also set define macro +** SQLITE_MALLOCSIZE to the appropriate function name, which is _msize() +** for MSVC and malloc_usable_size() for most other systems (e.g. Linux). +** The memory size function can always be overridden manually by defining +** the macro SQLITE_MALLOCSIZE to the desired function name. +*/ +#if defined(SQLITE_USE_MALLOC_H) +# include +# if defined(SQLITE_USE_MALLOC_USABLE_SIZE) +# if !defined(SQLITE_MALLOCSIZE) +# define SQLITE_MALLOCSIZE(x) malloc_usable_size(x) +# endif +# elif defined(SQLITE_USE_MSIZE) +# if !defined(SQLITE_MALLOCSIZE) +# define SQLITE_MALLOCSIZE _msize +# endif +# endif +#endif /* defined(SQLITE_USE_MALLOC_H) */ + +#endif /* __APPLE__ or not __APPLE__ */ + +/* +** Like malloc(), but remember the size of the allocation +** so that we can find it later using sqlite3MemSize(). +** +** For this low-level routine, we are guaranteed that nByte>0 because +** cases of nByte<=0 will be intercepted and dealt with by higher level +** routines. +*/ +static void *sqlite3MemMalloc(int nByte){ +#ifdef SQLITE_MALLOCSIZE + void *p; + testcase( ROUND8(nByte)==nByte ); + p = SQLITE_MALLOC( nByte ); + if( p==0 ){ + testcase( sqlite3GlobalConfig.xLog!=0 ); + sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes of memory", nByte); + } + return p; +#else + sqlite3_int64 *p; + assert( nByte>0 ); + testcase( ROUND8(nByte)!=nByte ); + p = SQLITE_MALLOC( nByte+8 ); + if( p ){ + p[0] = nByte; + p++; + }else{ + testcase( sqlite3GlobalConfig.xLog!=0 ); + sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes of memory", nByte); + } + return (void *)p; +#endif +} + +/* +** Like free() but works for allocations obtained from sqlite3MemMalloc() +** or sqlite3MemRealloc(). +** +** For this low-level routine, we already know that pPrior!=0 since +** cases where pPrior==0 will have been intecepted and dealt with +** by higher-level routines. +*/ +static void sqlite3MemFree(void *pPrior){ +#ifdef SQLITE_MALLOCSIZE + SQLITE_FREE(pPrior); +#else + sqlite3_int64 *p = (sqlite3_int64*)pPrior; + assert( pPrior!=0 ); + p--; + SQLITE_FREE(p); +#endif +} + +/* +** Report the allocated size of a prior return from xMalloc() +** or xRealloc(). +*/ +static int sqlite3MemSize(void *pPrior){ +#ifdef SQLITE_MALLOCSIZE + assert( pPrior!=0 ); + return (int)SQLITE_MALLOCSIZE(pPrior); +#else + sqlite3_int64 *p; + assert( pPrior!=0 ); + p = (sqlite3_int64*)pPrior; + p--; + return (int)p[0]; +#endif +} + +/* +** Like realloc(). Resize an allocation previously obtained from +** sqlite3MemMalloc(). +** +** For this low-level interface, we know that pPrior!=0. Cases where +** pPrior==0 while have been intercepted by higher-level routine and +** redirected to xMalloc. Similarly, we know that nByte>0 because +** cases where nByte<=0 will have been intercepted by higher-level +** routines and redirected to xFree. +*/ +static void *sqlite3MemRealloc(void *pPrior, int nByte){ +#ifdef SQLITE_MALLOCSIZE + void *p = SQLITE_REALLOC(pPrior, nByte); + if( p==0 ){ + testcase( sqlite3GlobalConfig.xLog!=0 ); + sqlite3_log(SQLITE_NOMEM, + "failed memory resize %u to %u bytes", + SQLITE_MALLOCSIZE(pPrior), nByte); + } + return p; +#else + sqlite3_int64 *p = (sqlite3_int64*)pPrior; + assert( pPrior!=0 && nByte>0 ); + assert( nByte==ROUND8(nByte) ); /* EV: R-46199-30249 */ + p--; + p = SQLITE_REALLOC(p, nByte+8 ); + if( p ){ + p[0] = nByte; + p++; + }else{ + testcase( sqlite3GlobalConfig.xLog!=0 ); + sqlite3_log(SQLITE_NOMEM, + "failed memory resize %u to %u bytes", + sqlite3MemSize(pPrior), nByte); + } + return (void*)p; +#endif +} + +/* +** Round up a request size to the next valid allocation size. +*/ +static int sqlite3MemRoundup(int n){ + return ROUND8(n); +} + +/* +** Initialize this module. +*/ +static int sqlite3MemInit(void *NotUsed){ +#if defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC) + int cpuCount; + size_t len; + if( _sqliteZone_ ){ + return SQLITE_OK; + } + len = sizeof(cpuCount); + /* One usually wants to use hw.acctivecpu for MT decisions, but not here */ + sysctlbyname("hw.ncpu", &cpuCount, &len, NULL, 0); + if( cpuCount>1 ){ + /* defer MT decisions to system malloc */ + _sqliteZone_ = malloc_default_zone(); + }else{ + /* only 1 core, use our own zone to contention over global locks, + ** e.g. we have our own dedicated locks */ + bool success; + malloc_zone_t* newzone = malloc_create_zone(4096, 0); + malloc_set_zone_name(newzone, "Sqlite_Heap"); + do{ + success = OSAtomicCompareAndSwapPtrBarrier(NULL, newzone, + (void * volatile *)&_sqliteZone_); + }while(!_sqliteZone_); + if( !success ){ + /* somebody registered a zone first */ + malloc_destroy_zone(newzone); + } + } +#endif + UNUSED_PARAMETER(NotUsed); + return SQLITE_OK; +} + +/* +** Deinitialize this module. +*/ +static void sqlite3MemShutdown(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + return; +} + +/* +** This routine is the only routine in this file with external linkage. +** +** Populate the low-level memory allocation function pointers in +** sqlite3GlobalConfig.m with pointers to the routines in this file. +*/ +SQLITE_PRIVATE void sqlite3MemSetDefault(void){ + static const sqlite3_mem_methods defaultMethods = { + sqlite3MemMalloc, + sqlite3MemFree, + sqlite3MemRealloc, + sqlite3MemSize, + sqlite3MemRoundup, + sqlite3MemInit, + sqlite3MemShutdown, + 0 + }; + sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); +} + +#endif /* SQLITE_SYSTEM_MALLOC */ + +/************** End of mem1.c ************************************************/ +/************** Begin file mem2.c ********************************************/ +/* +** 2007 August 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains low-level memory allocation drivers for when +** SQLite will use the standard C-library malloc/realloc/free interface +** to obtain the memory it needs while adding lots of additional debugging +** information to each allocation in order to help detect and fix memory +** leaks and memory usage errors. +** +** This file contains implementations of the low-level memory allocation +** routines specified in the sqlite3_mem_methods object. +*/ +/* #include "sqliteInt.h" */ + +/* +** This version of the memory allocator is used only if the +** SQLITE_MEMDEBUG macro is defined +*/ +#ifdef SQLITE_MEMDEBUG + +/* +** The backtrace functionality is only available with GLIBC +*/ +#ifdef __GLIBC__ + extern int backtrace(void**,int); + extern void backtrace_symbols_fd(void*const*,int,int); +#else +# define backtrace(A,B) 1 +# define backtrace_symbols_fd(A,B,C) +#endif +/* #include */ + +/* +** Each memory allocation looks like this: +** +** ------------------------------------------------------------------------ +** | Title | backtrace pointers | MemBlockHdr | allocation | EndGuard | +** ------------------------------------------------------------------------ +** +** The application code sees only a pointer to the allocation. We have +** to back up from the allocation pointer to find the MemBlockHdr. The +** MemBlockHdr tells us the size of the allocation and the number of +** backtrace pointers. There is also a guard word at the end of the +** MemBlockHdr. +*/ +struct MemBlockHdr { + i64 iSize; /* Size of this allocation */ + struct MemBlockHdr *pNext, *pPrev; /* Linked list of all unfreed memory */ + char nBacktrace; /* Number of backtraces on this alloc */ + char nBacktraceSlots; /* Available backtrace slots */ + u8 nTitle; /* Bytes of title; includes '\0' */ + u8 eType; /* Allocation type code */ + int iForeGuard; /* Guard word for sanity */ +}; + +/* +** Guard words +*/ +#define FOREGUARD 0x80F5E153 +#define REARGUARD 0xE4676B53 + +/* +** Number of malloc size increments to track. +*/ +#define NCSIZE 1000 + +/* +** All of the static variables used by this module are collected +** into a single structure named "mem". This is to keep the +** static variables organized and to reduce namespace pollution +** when this module is combined with other in the amalgamation. +*/ +static struct { + + /* + ** Mutex to control access to the memory allocation subsystem. + */ + sqlite3_mutex *mutex; + + /* + ** Head and tail of a linked list of all outstanding allocations + */ + struct MemBlockHdr *pFirst; + struct MemBlockHdr *pLast; + + /* + ** The number of levels of backtrace to save in new allocations. + */ + int nBacktrace; + void (*xBacktrace)(int, int, void **); + + /* + ** Title text to insert in front of each block + */ + int nTitle; /* Bytes of zTitle to save. Includes '\0' and padding */ + char zTitle[100]; /* The title text */ + + /* + ** sqlite3MallocDisallow() increments the following counter. + ** sqlite3MallocAllow() decrements it. + */ + int disallow; /* Do not allow memory allocation */ + + /* + ** Gather statistics on the sizes of memory allocations. + ** nAlloc[i] is the number of allocation attempts of i*8 + ** bytes. i==NCSIZE is the number of allocation attempts for + ** sizes more than NCSIZE*8 bytes. + */ + int nAlloc[NCSIZE]; /* Total number of allocations */ + int nCurrent[NCSIZE]; /* Current number of allocations */ + int mxCurrent[NCSIZE]; /* Highwater mark for nCurrent */ + +} mem; + + +/* +** Adjust memory usage statistics +*/ +static void adjustStats(int iSize, int increment){ + int i = ROUND8(iSize)/8; + if( i>NCSIZE-1 ){ + i = NCSIZE - 1; + } + if( increment>0 ){ + mem.nAlloc[i]++; + mem.nCurrent[i]++; + if( mem.nCurrent[i]>mem.mxCurrent[i] ){ + mem.mxCurrent[i] = mem.nCurrent[i]; + } + }else{ + mem.nCurrent[i]--; + assert( mem.nCurrent[i]>=0 ); + } +} + +/* +** Given an allocation, find the MemBlockHdr for that allocation. +** +** This routine checks the guards at either end of the allocation and +** if they are incorrect it asserts. +*/ +static struct MemBlockHdr *sqlite3MemsysGetHeader(void *pAllocation){ + struct MemBlockHdr *p; + int *pInt; + u8 *pU8; + int nReserve; + + p = (struct MemBlockHdr*)pAllocation; + p--; + assert( p->iForeGuard==(int)FOREGUARD ); + nReserve = ROUND8(p->iSize); + pInt = (int*)pAllocation; + pU8 = (u8*)pAllocation; + assert( pInt[nReserve/sizeof(int)]==(int)REARGUARD ); + /* This checks any of the "extra" bytes allocated due + ** to rounding up to an 8 byte boundary to ensure + ** they haven't been overwritten. + */ + while( nReserve-- > p->iSize ) assert( pU8[nReserve]==0x65 ); + return p; +} + +/* +** Return the number of bytes currently allocated at address p. +*/ +static int sqlite3MemSize(void *p){ + struct MemBlockHdr *pHdr; + if( !p ){ + return 0; + } + pHdr = sqlite3MemsysGetHeader(p); + return (int)pHdr->iSize; +} + +/* +** Initialize the memory allocation subsystem. +*/ +static int sqlite3MemInit(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + assert( (sizeof(struct MemBlockHdr)&7) == 0 ); + if( !sqlite3GlobalConfig.bMemstat ){ + /* If memory status is enabled, then the malloc.c wrapper will already + ** hold the STATIC_MEM mutex when the routines here are invoked. */ + mem.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); + } + return SQLITE_OK; +} + +/* +** Deinitialize the memory allocation subsystem. +*/ +static void sqlite3MemShutdown(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + mem.mutex = 0; +} + +/* +** Round up a request size to the next valid allocation size. +*/ +static int sqlite3MemRoundup(int n){ + return ROUND8(n); +} + +/* +** Fill a buffer with pseudo-random bytes. This is used to preset +** the content of a new memory allocation to unpredictable values and +** to clear the content of a freed allocation to unpredictable values. +*/ +static void randomFill(char *pBuf, int nByte){ + unsigned int x, y, r; + x = SQLITE_PTR_TO_INT(pBuf); + y = nByte | 1; + while( nByte >= 4 ){ + x = (x>>1) ^ (-(int)(x&1) & 0xd0000001); + y = y*1103515245 + 12345; + r = x ^ y; + *(int*)pBuf = r; + pBuf += 4; + nByte -= 4; + } + while( nByte-- > 0 ){ + x = (x>>1) ^ (-(int)(x&1) & 0xd0000001); + y = y*1103515245 + 12345; + r = x ^ y; + *(pBuf++) = r & 0xff; + } +} + +/* +** Allocate nByte bytes of memory. +*/ +static void *sqlite3MemMalloc(int nByte){ + struct MemBlockHdr *pHdr; + void **pBt; + char *z; + int *pInt; + void *p = 0; + int totalSize; + int nReserve; + sqlite3_mutex_enter(mem.mutex); + assert( mem.disallow==0 ); + nReserve = ROUND8(nByte); + totalSize = nReserve + sizeof(*pHdr) + sizeof(int) + + mem.nBacktrace*sizeof(void*) + mem.nTitle; + p = malloc(totalSize); + if( p ){ + z = p; + pBt = (void**)&z[mem.nTitle]; + pHdr = (struct MemBlockHdr*)&pBt[mem.nBacktrace]; + pHdr->pNext = 0; + pHdr->pPrev = mem.pLast; + if( mem.pLast ){ + mem.pLast->pNext = pHdr; + }else{ + mem.pFirst = pHdr; + } + mem.pLast = pHdr; + pHdr->iForeGuard = FOREGUARD; + pHdr->eType = MEMTYPE_HEAP; + pHdr->nBacktraceSlots = mem.nBacktrace; + pHdr->nTitle = mem.nTitle; + if( mem.nBacktrace ){ + void *aAddr[40]; + pHdr->nBacktrace = backtrace(aAddr, mem.nBacktrace+1)-1; + memcpy(pBt, &aAddr[1], pHdr->nBacktrace*sizeof(void*)); + assert(pBt[0]); + if( mem.xBacktrace ){ + mem.xBacktrace(nByte, pHdr->nBacktrace-1, &aAddr[1]); + } + }else{ + pHdr->nBacktrace = 0; + } + if( mem.nTitle ){ + memcpy(z, mem.zTitle, mem.nTitle); + } + pHdr->iSize = nByte; + adjustStats(nByte, +1); + pInt = (int*)&pHdr[1]; + pInt[nReserve/sizeof(int)] = REARGUARD; + randomFill((char*)pInt, nByte); + memset(((char*)pInt)+nByte, 0x65, nReserve-nByte); + p = (void*)pInt; + } + sqlite3_mutex_leave(mem.mutex); + return p; +} + +/* +** Free memory. +*/ +static void sqlite3MemFree(void *pPrior){ + struct MemBlockHdr *pHdr; + void **pBt; + char *z; + assert( sqlite3GlobalConfig.bMemstat || sqlite3GlobalConfig.bCoreMutex==0 + || mem.mutex!=0 ); + pHdr = sqlite3MemsysGetHeader(pPrior); + pBt = (void**)pHdr; + pBt -= pHdr->nBacktraceSlots; + sqlite3_mutex_enter(mem.mutex); + if( pHdr->pPrev ){ + assert( pHdr->pPrev->pNext==pHdr ); + pHdr->pPrev->pNext = pHdr->pNext; + }else{ + assert( mem.pFirst==pHdr ); + mem.pFirst = pHdr->pNext; + } + if( pHdr->pNext ){ + assert( pHdr->pNext->pPrev==pHdr ); + pHdr->pNext->pPrev = pHdr->pPrev; + }else{ + assert( mem.pLast==pHdr ); + mem.pLast = pHdr->pPrev; + } + z = (char*)pBt; + z -= pHdr->nTitle; + adjustStats((int)pHdr->iSize, -1); + randomFill(z, sizeof(void*)*pHdr->nBacktraceSlots + sizeof(*pHdr) + + (int)pHdr->iSize + sizeof(int) + pHdr->nTitle); + free(z); + sqlite3_mutex_leave(mem.mutex); +} + +/* +** Change the size of an existing memory allocation. +** +** For this debugging implementation, we *always* make a copy of the +** allocation into a new place in memory. In this way, if the +** higher level code is using pointer to the old allocation, it is +** much more likely to break and we are much more liking to find +** the error. +*/ +static void *sqlite3MemRealloc(void *pPrior, int nByte){ + struct MemBlockHdr *pOldHdr; + void *pNew; + assert( mem.disallow==0 ); + assert( (nByte & 7)==0 ); /* EV: R-46199-30249 */ + pOldHdr = sqlite3MemsysGetHeader(pPrior); + pNew = sqlite3MemMalloc(nByte); + if( pNew ){ + memcpy(pNew, pPrior, (int)(nByteiSize ? nByte : pOldHdr->iSize)); + if( nByte>pOldHdr->iSize ){ + randomFill(&((char*)pNew)[pOldHdr->iSize], nByte - (int)pOldHdr->iSize); + } + sqlite3MemFree(pPrior); + } + return pNew; +} + +/* +** Populate the low-level memory allocation function pointers in +** sqlite3GlobalConfig.m with pointers to the routines in this file. +*/ +SQLITE_PRIVATE void sqlite3MemSetDefault(void){ + static const sqlite3_mem_methods defaultMethods = { + sqlite3MemMalloc, + sqlite3MemFree, + sqlite3MemRealloc, + sqlite3MemSize, + sqlite3MemRoundup, + sqlite3MemInit, + sqlite3MemShutdown, + 0 + }; + sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); +} + +/* +** Set the "type" of an allocation. +*/ +SQLITE_PRIVATE void sqlite3MemdebugSetType(void *p, u8 eType){ + if( p && sqlite3GlobalConfig.m.xMalloc==sqlite3MemMalloc ){ + struct MemBlockHdr *pHdr; + pHdr = sqlite3MemsysGetHeader(p); + assert( pHdr->iForeGuard==FOREGUARD ); + pHdr->eType = eType; + } +} + +/* +** Return TRUE if the mask of type in eType matches the type of the +** allocation p. Also return true if p==NULL. +** +** This routine is designed for use within an assert() statement, to +** verify the type of an allocation. For example: +** +** assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); +*/ +SQLITE_PRIVATE int sqlite3MemdebugHasType(void *p, u8 eType){ + int rc = 1; + if( p && sqlite3GlobalConfig.m.xMalloc==sqlite3MemMalloc ){ + struct MemBlockHdr *pHdr; + pHdr = sqlite3MemsysGetHeader(p); + assert( pHdr->iForeGuard==FOREGUARD ); /* Allocation is valid */ + if( (pHdr->eType&eType)==0 ){ + rc = 0; + } + } + return rc; +} + +/* +** Return TRUE if the mask of type in eType matches no bits of the type of the +** allocation p. Also return true if p==NULL. +** +** This routine is designed for use within an assert() statement, to +** verify the type of an allocation. For example: +** +** assert( sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) ); +*/ +SQLITE_PRIVATE int sqlite3MemdebugNoType(void *p, u8 eType){ + int rc = 1; + if( p && sqlite3GlobalConfig.m.xMalloc==sqlite3MemMalloc ){ + struct MemBlockHdr *pHdr; + pHdr = sqlite3MemsysGetHeader(p); + assert( pHdr->iForeGuard==FOREGUARD ); /* Allocation is valid */ + if( (pHdr->eType&eType)!=0 ){ + rc = 0; + } + } + return rc; +} + +/* +** Set the number of backtrace levels kept for each allocation. +** A value of zero turns off backtracing. The number is always rounded +** up to a multiple of 2. +*/ +SQLITE_PRIVATE void sqlite3MemdebugBacktrace(int depth){ + if( depth<0 ){ depth = 0; } + if( depth>20 ){ depth = 20; } + depth = (depth+1)&0xfe; + mem.nBacktrace = depth; +} + +SQLITE_PRIVATE void sqlite3MemdebugBacktraceCallback(void (*xBacktrace)(int, int, void **)){ + mem.xBacktrace = xBacktrace; +} + +/* +** Set the title string for subsequent allocations. +*/ +SQLITE_PRIVATE void sqlite3MemdebugSettitle(const char *zTitle){ + unsigned int n = sqlite3Strlen30(zTitle) + 1; + sqlite3_mutex_enter(mem.mutex); + if( n>=sizeof(mem.zTitle) ) n = sizeof(mem.zTitle)-1; + memcpy(mem.zTitle, zTitle, n); + mem.zTitle[n] = 0; + mem.nTitle = ROUND8(n); + sqlite3_mutex_leave(mem.mutex); +} + +SQLITE_PRIVATE void sqlite3MemdebugSync(){ + struct MemBlockHdr *pHdr; + for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){ + void **pBt = (void**)pHdr; + pBt -= pHdr->nBacktraceSlots; + mem.xBacktrace((int)pHdr->iSize, pHdr->nBacktrace-1, &pBt[1]); + } +} + +/* +** Open the file indicated and write a log of all unfreed memory +** allocations into that log. +*/ +SQLITE_PRIVATE void sqlite3MemdebugDump(const char *zFilename){ + FILE *out; + struct MemBlockHdr *pHdr; + void **pBt; + int i; + out = fopen(zFilename, "w"); + if( out==0 ){ + fprintf(stderr, "** Unable to output memory debug output log: %s **\n", + zFilename); + return; + } + for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){ + char *z = (char*)pHdr; + z -= pHdr->nBacktraceSlots*sizeof(void*) + pHdr->nTitle; + fprintf(out, "**** %lld bytes at %p from %s ****\n", + pHdr->iSize, &pHdr[1], pHdr->nTitle ? z : "???"); + if( pHdr->nBacktrace ){ + fflush(out); + pBt = (void**)pHdr; + pBt -= pHdr->nBacktraceSlots; + backtrace_symbols_fd(pBt, pHdr->nBacktrace, fileno(out)); + fprintf(out, "\n"); + } + } + fprintf(out, "COUNTS:\n"); + for(i=0; i=1 ); + size = mem3.aPool[i-1].u.hdr.size4x/4; + assert( size==mem3.aPool[i+size-1].u.hdr.prevSize ); + assert( size>=2 ); + if( size <= MX_SMALL ){ + memsys3UnlinkFromList(i, &mem3.aiSmall[size-2]); + }else{ + hash = size % N_HASH; + memsys3UnlinkFromList(i, &mem3.aiHash[hash]); + } +} + +/* +** Link the chunk at mem3.aPool[i] so that is on the list rooted +** at *pRoot. +*/ +static void memsys3LinkIntoList(u32 i, u32 *pRoot){ + assert( sqlite3_mutex_held(mem3.mutex) ); + mem3.aPool[i].u.list.next = *pRoot; + mem3.aPool[i].u.list.prev = 0; + if( *pRoot ){ + mem3.aPool[*pRoot].u.list.prev = i; + } + *pRoot = i; +} + +/* +** Link the chunk at index i into either the appropriate +** small chunk list, or into the large chunk hash table. +*/ +static void memsys3Link(u32 i){ + u32 size, hash; + assert( sqlite3_mutex_held(mem3.mutex) ); + assert( i>=1 ); + assert( (mem3.aPool[i-1].u.hdr.size4x & 1)==0 ); + size = mem3.aPool[i-1].u.hdr.size4x/4; + assert( size==mem3.aPool[i+size-1].u.hdr.prevSize ); + assert( size>=2 ); + if( size <= MX_SMALL ){ + memsys3LinkIntoList(i, &mem3.aiSmall[size-2]); + }else{ + hash = size % N_HASH; + memsys3LinkIntoList(i, &mem3.aiHash[hash]); + } +} + +/* +** If the STATIC_MEM mutex is not already held, obtain it now. The mutex +** will already be held (obtained by code in malloc.c) if +** sqlite3GlobalConfig.bMemStat is true. +*/ +static void memsys3Enter(void){ + if( sqlite3GlobalConfig.bMemstat==0 && mem3.mutex==0 ){ + mem3.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); + } + sqlite3_mutex_enter(mem3.mutex); +} +static void memsys3Leave(void){ + sqlite3_mutex_leave(mem3.mutex); +} + +/* +** Called when we are unable to satisfy an allocation of nBytes. +*/ +static void memsys3OutOfMemory(int nByte){ + if( !mem3.alarmBusy ){ + mem3.alarmBusy = 1; + assert( sqlite3_mutex_held(mem3.mutex) ); + sqlite3_mutex_leave(mem3.mutex); + sqlite3_release_memory(nByte); + sqlite3_mutex_enter(mem3.mutex); + mem3.alarmBusy = 0; + } +} + + +/* +** Chunk i is a free chunk that has been unlinked. Adjust its +** size parameters for check-out and return a pointer to the +** user portion of the chunk. +*/ +static void *memsys3Checkout(u32 i, u32 nBlock){ + u32 x; + assert( sqlite3_mutex_held(mem3.mutex) ); + assert( i>=1 ); + assert( mem3.aPool[i-1].u.hdr.size4x/4==nBlock ); + assert( mem3.aPool[i+nBlock-1].u.hdr.prevSize==nBlock ); + x = mem3.aPool[i-1].u.hdr.size4x; + mem3.aPool[i-1].u.hdr.size4x = nBlock*4 | 1 | (x&2); + mem3.aPool[i+nBlock-1].u.hdr.prevSize = nBlock; + mem3.aPool[i+nBlock-1].u.hdr.size4x |= 2; + return &mem3.aPool[i]; +} + +/* +** Carve a piece off of the end of the mem3.iMaster free chunk. +** Return a pointer to the new allocation. Or, if the master chunk +** is not large enough, return 0. +*/ +static void *memsys3FromMaster(u32 nBlock){ + assert( sqlite3_mutex_held(mem3.mutex) ); + assert( mem3.szMaster>=nBlock ); + if( nBlock>=mem3.szMaster-1 ){ + /* Use the entire master */ + void *p = memsys3Checkout(mem3.iMaster, mem3.szMaster); + mem3.iMaster = 0; + mem3.szMaster = 0; + mem3.mnMaster = 0; + return p; + }else{ + /* Split the master block. Return the tail. */ + u32 newi, x; + newi = mem3.iMaster + mem3.szMaster - nBlock; + assert( newi > mem3.iMaster+1 ); + mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = nBlock; + mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x |= 2; + mem3.aPool[newi-1].u.hdr.size4x = nBlock*4 + 1; + mem3.szMaster -= nBlock; + mem3.aPool[newi-1].u.hdr.prevSize = mem3.szMaster; + x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2; + mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x; + if( mem3.szMaster < mem3.mnMaster ){ + mem3.mnMaster = mem3.szMaster; + } + return (void*)&mem3.aPool[newi]; + } +} + +/* +** *pRoot is the head of a list of free chunks of the same size +** or same size hash. In other words, *pRoot is an entry in either +** mem3.aiSmall[] or mem3.aiHash[]. +** +** This routine examines all entries on the given list and tries +** to coalesce each entries with adjacent free chunks. +** +** If it sees a chunk that is larger than mem3.iMaster, it replaces +** the current mem3.iMaster with the new larger chunk. In order for +** this mem3.iMaster replacement to work, the master chunk must be +** linked into the hash tables. That is not the normal state of +** affairs, of course. The calling routine must link the master +** chunk before invoking this routine, then must unlink the (possibly +** changed) master chunk once this routine has finished. +*/ +static void memsys3Merge(u32 *pRoot){ + u32 iNext, prev, size, i, x; + + assert( sqlite3_mutex_held(mem3.mutex) ); + for(i=*pRoot; i>0; i=iNext){ + iNext = mem3.aPool[i].u.list.next; + size = mem3.aPool[i-1].u.hdr.size4x; + assert( (size&1)==0 ); + if( (size&2)==0 ){ + memsys3UnlinkFromList(i, pRoot); + assert( i > mem3.aPool[i-1].u.hdr.prevSize ); + prev = i - mem3.aPool[i-1].u.hdr.prevSize; + if( prev==iNext ){ + iNext = mem3.aPool[prev].u.list.next; + } + memsys3Unlink(prev); + size = i + size/4 - prev; + x = mem3.aPool[prev-1].u.hdr.size4x & 2; + mem3.aPool[prev-1].u.hdr.size4x = size*4 | x; + mem3.aPool[prev+size-1].u.hdr.prevSize = size; + memsys3Link(prev); + i = prev; + }else{ + size /= 4; + } + if( size>mem3.szMaster ){ + mem3.iMaster = i; + mem3.szMaster = size; + } + } +} + +/* +** Return a block of memory of at least nBytes in size. +** Return NULL if unable. +** +** This function assumes that the necessary mutexes, if any, are +** already held by the caller. Hence "Unsafe". +*/ +static void *memsys3MallocUnsafe(int nByte){ + u32 i; + u32 nBlock; + u32 toFree; + + assert( sqlite3_mutex_held(mem3.mutex) ); + assert( sizeof(Mem3Block)==8 ); + if( nByte<=12 ){ + nBlock = 2; + }else{ + nBlock = (nByte + 11)/8; + } + assert( nBlock>=2 ); + + /* STEP 1: + ** Look for an entry of the correct size in either the small + ** chunk table or in the large chunk hash table. This is + ** successful most of the time (about 9 times out of 10). + */ + if( nBlock <= MX_SMALL ){ + i = mem3.aiSmall[nBlock-2]; + if( i>0 ){ + memsys3UnlinkFromList(i, &mem3.aiSmall[nBlock-2]); + return memsys3Checkout(i, nBlock); + } + }else{ + int hash = nBlock % N_HASH; + for(i=mem3.aiHash[hash]; i>0; i=mem3.aPool[i].u.list.next){ + if( mem3.aPool[i-1].u.hdr.size4x/4==nBlock ){ + memsys3UnlinkFromList(i, &mem3.aiHash[hash]); + return memsys3Checkout(i, nBlock); + } + } + } + + /* STEP 2: + ** Try to satisfy the allocation by carving a piece off of the end + ** of the master chunk. This step usually works if step 1 fails. + */ + if( mem3.szMaster>=nBlock ){ + return memsys3FromMaster(nBlock); + } + + + /* STEP 3: + ** Loop through the entire memory pool. Coalesce adjacent free + ** chunks. Recompute the master chunk as the largest free chunk. + ** Then try again to satisfy the allocation by carving a piece off + ** of the end of the master chunk. This step happens very + ** rarely (we hope!) + */ + for(toFree=nBlock*16; toFree<(mem3.nPool*16); toFree *= 2){ + memsys3OutOfMemory(toFree); + if( mem3.iMaster ){ + memsys3Link(mem3.iMaster); + mem3.iMaster = 0; + mem3.szMaster = 0; + } + for(i=0; i=nBlock ){ + return memsys3FromMaster(nBlock); + } + } + } + + /* If none of the above worked, then we fail. */ + return 0; +} + +/* +** Free an outstanding memory allocation. +** +** This function assumes that the necessary mutexes, if any, are +** already held by the caller. Hence "Unsafe". +*/ +static void memsys3FreeUnsafe(void *pOld){ + Mem3Block *p = (Mem3Block*)pOld; + int i; + u32 size, x; + assert( sqlite3_mutex_held(mem3.mutex) ); + assert( p>mem3.aPool && p<&mem3.aPool[mem3.nPool] ); + i = p - mem3.aPool; + assert( (mem3.aPool[i-1].u.hdr.size4x&1)==1 ); + size = mem3.aPool[i-1].u.hdr.size4x/4; + assert( i+size<=mem3.nPool+1 ); + mem3.aPool[i-1].u.hdr.size4x &= ~1; + mem3.aPool[i+size-1].u.hdr.prevSize = size; + mem3.aPool[i+size-1].u.hdr.size4x &= ~2; + memsys3Link(i); + + /* Try to expand the master using the newly freed chunk */ + if( mem3.iMaster ){ + while( (mem3.aPool[mem3.iMaster-1].u.hdr.size4x&2)==0 ){ + size = mem3.aPool[mem3.iMaster-1].u.hdr.prevSize; + mem3.iMaster -= size; + mem3.szMaster += size; + memsys3Unlink(mem3.iMaster); + x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2; + mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x; + mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = mem3.szMaster; + } + x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2; + while( (mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x&1)==0 ){ + memsys3Unlink(mem3.iMaster+mem3.szMaster); + mem3.szMaster += mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x/4; + mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x; + mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = mem3.szMaster; + } + } +} + +/* +** Return the size of an outstanding allocation, in bytes. The +** size returned omits the 8-byte header overhead. This only +** works for chunks that are currently checked out. +*/ +static int memsys3Size(void *p){ + Mem3Block *pBlock; + assert( p!=0 ); + pBlock = (Mem3Block*)p; + assert( (pBlock[-1].u.hdr.size4x&1)!=0 ); + return (pBlock[-1].u.hdr.size4x&~3)*2 - 4; +} + +/* +** Round up a request size to the next valid allocation size. +*/ +static int memsys3Roundup(int n){ + if( n<=12 ){ + return 12; + }else{ + return ((n+11)&~7) - 4; + } +} + +/* +** Allocate nBytes of memory. +*/ +static void *memsys3Malloc(int nBytes){ + sqlite3_int64 *p; + assert( nBytes>0 ); /* malloc.c filters out 0 byte requests */ + memsys3Enter(); + p = memsys3MallocUnsafe(nBytes); + memsys3Leave(); + return (void*)p; +} + +/* +** Free memory. +*/ +static void memsys3Free(void *pPrior){ + assert( pPrior ); + memsys3Enter(); + memsys3FreeUnsafe(pPrior); + memsys3Leave(); +} + +/* +** Change the size of an existing memory allocation +*/ +static void *memsys3Realloc(void *pPrior, int nBytes){ + int nOld; + void *p; + if( pPrior==0 ){ + return sqlite3_malloc(nBytes); + } + if( nBytes<=0 ){ + sqlite3_free(pPrior); + return 0; + } + nOld = memsys3Size(pPrior); + if( nBytes<=nOld && nBytes>=nOld-128 ){ + return pPrior; + } + memsys3Enter(); + p = memsys3MallocUnsafe(nBytes); + if( p ){ + if( nOld>1)!=(size&1) ){ + fprintf(out, "%p tail checkout bit is incorrect\n", &mem3.aPool[i]); + assert( 0 ); + break; + } + if( size&1 ){ + fprintf(out, "%p %6d bytes checked out\n", &mem3.aPool[i], (size/4)*8-8); + }else{ + fprintf(out, "%p %6d bytes free%s\n", &mem3.aPool[i], (size/4)*8-8, + i==mem3.iMaster ? " **master**" : ""); + } + } + for(i=0; i0; j=mem3.aPool[j].u.list.next){ + fprintf(out, " %p(%d)", &mem3.aPool[j], + (mem3.aPool[j-1].u.hdr.size4x/4)*8-8); + } + fprintf(out, "\n"); + } + for(i=0; i0; j=mem3.aPool[j].u.list.next){ + fprintf(out, " %p(%d)", &mem3.aPool[j], + (mem3.aPool[j-1].u.hdr.size4x/4)*8-8); + } + fprintf(out, "\n"); + } + fprintf(out, "master=%d\n", mem3.iMaster); + fprintf(out, "nowUsed=%d\n", mem3.nPool*8 - mem3.szMaster*8); + fprintf(out, "mxUsed=%d\n", mem3.nPool*8 - mem3.mnMaster*8); + sqlite3_mutex_leave(mem3.mutex); + if( out==stdout ){ + fflush(stdout); + }else{ + fclose(out); + } +#else + UNUSED_PARAMETER(zFilename); +#endif +} + +/* +** This routine is the only routine in this file with external +** linkage. +** +** Populate the low-level memory allocation function pointers in +** sqlite3GlobalConfig.m with pointers to the routines in this file. The +** arguments specify the block of memory to manage. +** +** This routine is only called by sqlite3_config(), and therefore +** is not required to be threadsafe (it is not). +*/ +SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void){ + static const sqlite3_mem_methods mempoolMethods = { + memsys3Malloc, + memsys3Free, + memsys3Realloc, + memsys3Size, + memsys3Roundup, + memsys3Init, + memsys3Shutdown, + 0 + }; + return &mempoolMethods; +} + +#endif /* SQLITE_ENABLE_MEMSYS3 */ + +/************** End of mem3.c ************************************************/ +/************** Begin file mem5.c ********************************************/ +/* +** 2007 October 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement a memory +** allocation subsystem for use by SQLite. +** +** This version of the memory allocation subsystem omits all +** use of malloc(). The application gives SQLite a block of memory +** before calling sqlite3_initialize() from which allocations +** are made and returned by the xMalloc() and xRealloc() +** implementations. Once sqlite3_initialize() has been called, +** the amount of memory available to SQLite is fixed and cannot +** be changed. +** +** This version of the memory allocation subsystem is included +** in the build only if SQLITE_ENABLE_MEMSYS5 is defined. +** +** This memory allocator uses the following algorithm: +** +** 1. All memory allocation sizes are rounded up to a power of 2. +** +** 2. If two adjacent free blocks are the halves of a larger block, +** then the two blocks are coalesced into the single larger block. +** +** 3. New memory is allocated from the first available free block. +** +** This algorithm is described in: J. M. Robson. "Bounds for Some Functions +** Concerning Dynamic Storage Allocation". Journal of the Association for +** Computing Machinery, Volume 21, Number 8, July 1974, pages 491-499. +** +** Let n be the size of the largest allocation divided by the minimum +** allocation size (after rounding all sizes up to a power of 2.) Let M +** be the maximum amount of memory ever outstanding at one time. Let +** N be the total amount of memory available for allocation. Robson +** proved that this memory allocator will never breakdown due to +** fragmentation as long as the following constraint holds: +** +** N >= M*(1 + log2(n)/2) - n + 1 +** +** The sqlite3_status() logic tracks the maximum values of n and M so +** that an application can, at any time, verify this constraint. +*/ +/* #include "sqliteInt.h" */ + +/* +** This version of the memory allocator is used only when +** SQLITE_ENABLE_MEMSYS5 is defined. +*/ +#ifdef SQLITE_ENABLE_MEMSYS5 + +/* +** A minimum allocation is an instance of the following structure. +** Larger allocations are an array of these structures where the +** size of the array is a power of 2. +** +** The size of this object must be a power of two. That fact is +** verified in memsys5Init(). +*/ +typedef struct Mem5Link Mem5Link; +struct Mem5Link { + int next; /* Index of next free chunk */ + int prev; /* Index of previous free chunk */ +}; + +/* +** Maximum size of any allocation is ((1<=0 && i=0 && iLogsize<=LOGMAX ); + assert( (mem5.aCtrl[i] & CTRL_LOGSIZE)==iLogsize ); + + next = MEM5LINK(i)->next; + prev = MEM5LINK(i)->prev; + if( prev<0 ){ + mem5.aiFreelist[iLogsize] = next; + }else{ + MEM5LINK(prev)->next = next; + } + if( next>=0 ){ + MEM5LINK(next)->prev = prev; + } +} + +/* +** Link the chunk at mem5.aPool[i] so that is on the iLogsize +** free list. +*/ +static void memsys5Link(int i, int iLogsize){ + int x; + assert( sqlite3_mutex_held(mem5.mutex) ); + assert( i>=0 && i=0 && iLogsize<=LOGMAX ); + assert( (mem5.aCtrl[i] & CTRL_LOGSIZE)==iLogsize ); + + x = MEM5LINK(i)->next = mem5.aiFreelist[iLogsize]; + MEM5LINK(i)->prev = -1; + if( x>=0 ){ + assert( xprev = i; + } + mem5.aiFreelist[iLogsize] = i; +} + +/* +** Obtain or release the mutex needed to access global data structures. +*/ +static void memsys5Enter(void){ + sqlite3_mutex_enter(mem5.mutex); +} +static void memsys5Leave(void){ + sqlite3_mutex_leave(mem5.mutex); +} + +/* +** Return the size of an outstanding allocation, in bytes. +** This only works for chunks that are currently checked out. +*/ +static int memsys5Size(void *p){ + int iSize, i; + assert( p!=0 ); + i = (int)(((u8 *)p-mem5.zPool)/mem5.szAtom); + assert( i>=0 && i0 ); + + /* No more than 1GiB per allocation */ + if( nByte > 0x40000000 ) return 0; + +#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) + /* Keep track of the maximum allocation request. Even unfulfilled + ** requests are counted */ + if( (u32)nByte>mem5.maxRequest ){ + mem5.maxRequest = nByte; + } +#endif + + + /* Round nByte up to the next valid power of two */ + for(iFullSz=mem5.szAtom,iLogsize=0; iFullSzLOGMAX ){ + testcase( sqlite3GlobalConfig.xLog!=0 ); + sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes", nByte); + return 0; + } + i = mem5.aiFreelist[iBin]; + memsys5Unlink(i, iBin); + while( iBin>iLogsize ){ + int newSize; + + iBin--; + newSize = 1 << iBin; + mem5.aCtrl[i+newSize] = CTRL_FREE | iBin; + memsys5Link(i+newSize, iBin); + } + mem5.aCtrl[i] = iLogsize; + +#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) + /* Update allocator performance statistics. */ + mem5.nAlloc++; + mem5.totalAlloc += iFullSz; + mem5.totalExcess += iFullSz - nByte; + mem5.currentCount++; + mem5.currentOut += iFullSz; + if( mem5.maxCount=0 && iBlock0 ); + assert( mem5.currentOut>=(size*mem5.szAtom) ); + mem5.currentCount--; + mem5.currentOut -= size*mem5.szAtom; + assert( mem5.currentOut>0 || mem5.currentCount==0 ); + assert( mem5.currentCount>0 || mem5.currentOut==0 ); +#endif + + mem5.aCtrl[iBlock] = CTRL_FREE | iLogsize; + while( ALWAYS(iLogsize>iLogsize) & 1 ){ + iBuddy = iBlock - size; + assert( iBuddy>=0 ); + }else{ + iBuddy = iBlock + size; + if( iBuddy>=mem5.nBlock ) break; + } + if( mem5.aCtrl[iBuddy]!=(CTRL_FREE | iLogsize) ) break; + memsys5Unlink(iBuddy, iLogsize); + iLogsize++; + if( iBuddy0 ){ + memsys5Enter(); + p = memsys5MallocUnsafe(nBytes); + memsys5Leave(); + } + return (void*)p; +} + +/* +** Free memory. +** +** The outer layer memory allocator prevents this routine from +** being called with pPrior==0. +*/ +static void memsys5Free(void *pPrior){ + assert( pPrior!=0 ); + memsys5Enter(); + memsys5FreeUnsafe(pPrior); + memsys5Leave(); +} + +/* +** Change the size of an existing memory allocation. +** +** The outer layer memory allocator prevents this routine from +** being called with pPrior==0. +** +** nBytes is always a value obtained from a prior call to +** memsys5Round(). Hence nBytes is always a non-negative power +** of two. If nBytes==0 that means that an oversize allocation +** (an allocation larger than 0x40000000) was requested and this +** routine should return 0 without freeing pPrior. +*/ +static void *memsys5Realloc(void *pPrior, int nBytes){ + int nOld; + void *p; + assert( pPrior!=0 ); + assert( (nBytes&(nBytes-1))==0 ); /* EV: R-46199-30249 */ + assert( nBytes>=0 ); + if( nBytes==0 ){ + return 0; + } + nOld = memsys5Size(pPrior); + if( nBytes<=nOld ){ + return pPrior; + } + p = memsys5Malloc(nBytes); + if( p ){ + memcpy(p, pPrior, nOld); + memsys5Free(pPrior); + } + return p; +} + +/* +** Round up a request size to the next valid allocation size. If +** the allocation is too large to be handled by this allocation system, +** return 0. +** +** All allocations must be a power of two and must be expressed by a +** 32-bit signed integer. Hence the largest allocation is 0x40000000 +** or 1073741824 bytes. +*/ +static int memsys5Roundup(int n){ + int iFullSz; + if( n > 0x40000000 ) return 0; + for(iFullSz=mem5.szAtom; iFullSz 0 +** memsys5Log(2) -> 1 +** memsys5Log(4) -> 2 +** memsys5Log(5) -> 3 +** memsys5Log(8) -> 3 +** memsys5Log(9) -> 4 +*/ +static int memsys5Log(int iValue){ + int iLog; + for(iLog=0; (iLog<(int)((sizeof(int)*8)-1)) && (1<mem5.szAtom ){ + mem5.szAtom = mem5.szAtom << 1; + } + + mem5.nBlock = (nByte / (mem5.szAtom+sizeof(u8))); + mem5.zPool = zByte; + mem5.aCtrl = (u8 *)&mem5.zPool[mem5.nBlock*mem5.szAtom]; + + for(ii=0; ii<=LOGMAX; ii++){ + mem5.aiFreelist[ii] = -1; + } + + iOffset = 0; + for(ii=LOGMAX; ii>=0; ii--){ + int nAlloc = (1<mem5.nBlock); + } + + /* If a mutex is required for normal operation, allocate one */ + if( sqlite3GlobalConfig.bMemstat==0 ){ + mem5.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); + } + + return SQLITE_OK; +} + +/* +** Deinitialize this module. +*/ +static void memsys5Shutdown(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + mem5.mutex = 0; + return; +} + +#ifdef SQLITE_TEST +/* +** Open the file indicated and write a log of all unfreed memory +** allocations into that log. +*/ +SQLITE_PRIVATE void sqlite3Memsys5Dump(const char *zFilename){ + FILE *out; + int i, j, n; + int nMinLog; + + if( zFilename==0 || zFilename[0]==0 ){ + out = stdout; + }else{ + out = fopen(zFilename, "w"); + if( out==0 ){ + fprintf(stderr, "** Unable to output memory debug output log: %s **\n", + zFilename); + return; + } + } + memsys5Enter(); + nMinLog = memsys5Log(mem5.szAtom); + for(i=0; i<=LOGMAX && i+nMinLog<32; i++){ + for(n=0, j=mem5.aiFreelist[i]; j>=0; j = MEM5LINK(j)->next, n++){} + fprintf(out, "freelist items of size %d: %d\n", mem5.szAtom << i, n); + } + fprintf(out, "mem5.nAlloc = %llu\n", mem5.nAlloc); + fprintf(out, "mem5.totalAlloc = %llu\n", mem5.totalAlloc); + fprintf(out, "mem5.totalExcess = %llu\n", mem5.totalExcess); + fprintf(out, "mem5.currentOut = %u\n", mem5.currentOut); + fprintf(out, "mem5.currentCount = %u\n", mem5.currentCount); + fprintf(out, "mem5.maxOut = %u\n", mem5.maxOut); + fprintf(out, "mem5.maxCount = %u\n", mem5.maxCount); + fprintf(out, "mem5.maxRequest = %u\n", mem5.maxRequest); + memsys5Leave(); + if( out==stdout ){ + fflush(stdout); + }else{ + fclose(out); + } +} +#endif + +/* +** This routine is the only routine in this file with external +** linkage. It returns a pointer to a static sqlite3_mem_methods +** struct populated with the memsys5 methods. +*/ +SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void){ + static const sqlite3_mem_methods memsys5Methods = { + memsys5Malloc, + memsys5Free, + memsys5Realloc, + memsys5Size, + memsys5Roundup, + memsys5Init, + memsys5Shutdown, + 0 + }; + return &memsys5Methods; +} + +#endif /* SQLITE_ENABLE_MEMSYS5 */ + +/************** End of mem5.c ************************************************/ +/************** Begin file mutex.c *******************************************/ +/* +** 2007 August 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes. +** +** This file contains code that is common across all mutex implementations. +*/ +/* #include "sqliteInt.h" */ + +#if defined(SQLITE_DEBUG) && !defined(SQLITE_MUTEX_OMIT) +/* +** For debugging purposes, record when the mutex subsystem is initialized +** and uninitialized so that we can assert() if there is an attempt to +** allocate a mutex while the system is uninitialized. +*/ +static SQLITE_WSD int mutexIsInit = 0; +#endif /* SQLITE_DEBUG && !defined(SQLITE_MUTEX_OMIT) */ + + +#ifndef SQLITE_MUTEX_OMIT +/* +** Initialize the mutex system. +*/ +SQLITE_PRIVATE int sqlite3MutexInit(void){ + int rc = SQLITE_OK; + if( !sqlite3GlobalConfig.mutex.xMutexAlloc ){ + /* If the xMutexAlloc method has not been set, then the user did not + ** install a mutex implementation via sqlite3_config() prior to + ** sqlite3_initialize() being called. This block copies pointers to + ** the default implementation into the sqlite3GlobalConfig structure. + */ + sqlite3_mutex_methods const *pFrom; + sqlite3_mutex_methods *pTo = &sqlite3GlobalConfig.mutex; + + if( sqlite3GlobalConfig.bCoreMutex ){ + pFrom = sqlite3DefaultMutex(); + }else{ + pFrom = sqlite3NoopMutex(); + } + pTo->xMutexInit = pFrom->xMutexInit; + pTo->xMutexEnd = pFrom->xMutexEnd; + pTo->xMutexFree = pFrom->xMutexFree; + pTo->xMutexEnter = pFrom->xMutexEnter; + pTo->xMutexTry = pFrom->xMutexTry; + pTo->xMutexLeave = pFrom->xMutexLeave; + pTo->xMutexHeld = pFrom->xMutexHeld; + pTo->xMutexNotheld = pFrom->xMutexNotheld; + sqlite3MemoryBarrier(); + pTo->xMutexAlloc = pFrom->xMutexAlloc; + } + assert( sqlite3GlobalConfig.mutex.xMutexInit ); + rc = sqlite3GlobalConfig.mutex.xMutexInit(); + +#ifdef SQLITE_DEBUG + GLOBAL(int, mutexIsInit) = 1; +#endif + + return rc; +} + +/* +** Shutdown the mutex system. This call frees resources allocated by +** sqlite3MutexInit(). +*/ +SQLITE_PRIVATE int sqlite3MutexEnd(void){ + int rc = SQLITE_OK; + if( sqlite3GlobalConfig.mutex.xMutexEnd ){ + rc = sqlite3GlobalConfig.mutex.xMutexEnd(); + } + +#ifdef SQLITE_DEBUG + GLOBAL(int, mutexIsInit) = 0; +#endif + + return rc; +} + +/* +** Retrieve a pointer to a static mutex or allocate a new dynamic one. +*/ +SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int id){ +#ifndef SQLITE_OMIT_AUTOINIT + if( id<=SQLITE_MUTEX_RECURSIVE && sqlite3_initialize() ) return 0; + if( id>SQLITE_MUTEX_RECURSIVE && sqlite3MutexInit() ) return 0; +#endif + assert( sqlite3GlobalConfig.mutex.xMutexAlloc ); + return sqlite3GlobalConfig.mutex.xMutexAlloc(id); +} + +SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int id){ + if( !sqlite3GlobalConfig.bCoreMutex ){ + return 0; + } + assert( GLOBAL(int, mutexIsInit) ); + assert( sqlite3GlobalConfig.mutex.xMutexAlloc ); + return sqlite3GlobalConfig.mutex.xMutexAlloc(id); +} + +/* +** Free a dynamic mutex. +*/ +SQLITE_API void sqlite3_mutex_free(sqlite3_mutex *p){ + if( p ){ + assert( sqlite3GlobalConfig.mutex.xMutexFree ); + sqlite3GlobalConfig.mutex.xMutexFree(p); + } +} + +/* +** Obtain the mutex p. If some other thread already has the mutex, block +** until it can be obtained. +*/ +SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex *p){ + if( p ){ + assert( sqlite3GlobalConfig.mutex.xMutexEnter ); + sqlite3GlobalConfig.mutex.xMutexEnter(p); + } +} + +/* +** Obtain the mutex p. If successful, return SQLITE_OK. Otherwise, if another +** thread holds the mutex and it cannot be obtained, return SQLITE_BUSY. +*/ +SQLITE_API int sqlite3_mutex_try(sqlite3_mutex *p){ + int rc = SQLITE_OK; + if( p ){ + assert( sqlite3GlobalConfig.mutex.xMutexTry ); + return sqlite3GlobalConfig.mutex.xMutexTry(p); + } + return rc; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was previously +** entered by the same thread. The behavior is undefined if the mutex +** is not currently entered. If a NULL pointer is passed as an argument +** this function is a no-op. +*/ +SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex *p){ + if( p ){ + assert( sqlite3GlobalConfig.mutex.xMutexLeave ); + sqlite3GlobalConfig.mutex.xMutexLeave(p); + } +} + +#ifndef NDEBUG +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use inside assert() statements. +*/ +SQLITE_API int sqlite3_mutex_held(sqlite3_mutex *p){ + assert( p==0 || sqlite3GlobalConfig.mutex.xMutexHeld ); + return p==0 || sqlite3GlobalConfig.mutex.xMutexHeld(p); +} +SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex *p){ + assert( p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld ); + return p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld(p); +} +#endif + +#endif /* !defined(SQLITE_MUTEX_OMIT) */ + +/************** End of mutex.c ***********************************************/ +/************** Begin file mutex_noop.c **************************************/ +/* +** 2008 October 07 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes. +** +** This implementation in this file does not provide any mutual +** exclusion and is thus suitable for use only in applications +** that use SQLite in a single thread. The routines defined +** here are place-holders. Applications can substitute working +** mutex routines at start-time using the +** +** sqlite3_config(SQLITE_CONFIG_MUTEX,...) +** +** interface. +** +** If compiled with SQLITE_DEBUG, then additional logic is inserted +** that does error checking on mutexes to make sure they are being +** called correctly. +*/ +/* #include "sqliteInt.h" */ + +#ifndef SQLITE_MUTEX_OMIT + +#ifndef SQLITE_DEBUG +/* +** Stub routines for all mutex methods. +** +** This routines provide no mutual exclusion or error checking. +*/ +static int noopMutexInit(void){ return SQLITE_OK; } +static int noopMutexEnd(void){ return SQLITE_OK; } +static sqlite3_mutex *noopMutexAlloc(int id){ + UNUSED_PARAMETER(id); + return (sqlite3_mutex*)8; +} +static void noopMutexFree(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; } +static void noopMutexEnter(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; } +static int noopMutexTry(sqlite3_mutex *p){ + UNUSED_PARAMETER(p); + return SQLITE_OK; +} +static void noopMutexLeave(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; } + +SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void){ + static const sqlite3_mutex_methods sMutex = { + noopMutexInit, + noopMutexEnd, + noopMutexAlloc, + noopMutexFree, + noopMutexEnter, + noopMutexTry, + noopMutexLeave, + + 0, + 0, + }; + + return &sMutex; +} +#endif /* !SQLITE_DEBUG */ + +#ifdef SQLITE_DEBUG +/* +** In this implementation, error checking is provided for testing +** and debugging purposes. The mutexes still do not provide any +** mutual exclusion. +*/ + +/* +** The mutex object +*/ +typedef struct sqlite3_debug_mutex { + int id; /* The mutex type */ + int cnt; /* Number of entries without a matching leave */ +} sqlite3_debug_mutex; + +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use inside assert() statements. +*/ +static int debugMutexHeld(sqlite3_mutex *pX){ + sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; + return p==0 || p->cnt>0; +} +static int debugMutexNotheld(sqlite3_mutex *pX){ + sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; + return p==0 || p->cnt==0; +} + +/* +** Initialize and deinitialize the mutex subsystem. +*/ +static int debugMutexInit(void){ return SQLITE_OK; } +static int debugMutexEnd(void){ return SQLITE_OK; } + +/* +** The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. If it returns NULL +** that means that a mutex could not be allocated. +*/ +static sqlite3_mutex *debugMutexAlloc(int id){ + static sqlite3_debug_mutex aStatic[SQLITE_MUTEX_STATIC_VFS3 - 1]; + sqlite3_debug_mutex *pNew = 0; + switch( id ){ + case SQLITE_MUTEX_FAST: + case SQLITE_MUTEX_RECURSIVE: { + pNew = sqlite3Malloc(sizeof(*pNew)); + if( pNew ){ + pNew->id = id; + pNew->cnt = 0; + } + break; + } + default: { +#ifdef SQLITE_ENABLE_API_ARMOR + if( id-2<0 || id-2>=ArraySize(aStatic) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + pNew = &aStatic[id-2]; + pNew->id = id; + break; + } + } + return (sqlite3_mutex*)pNew; +} + +/* +** This routine deallocates a previously allocated mutex. +*/ +static void debugMutexFree(sqlite3_mutex *pX){ + sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; + assert( p->cnt==0 ); + if( p->id==SQLITE_MUTEX_RECURSIVE || p->id==SQLITE_MUTEX_FAST ){ + sqlite3_free(p); + }else{ +#ifdef SQLITE_ENABLE_API_ARMOR + (void)SQLITE_MISUSE_BKPT; +#endif + } +} + +/* +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can +** be entered multiple times by the same thread. In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. If the same thread tries to enter any other kind of mutex +** more than once, the behavior is undefined. +*/ +static void debugMutexEnter(sqlite3_mutex *pX){ + sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; + assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) ); + p->cnt++; +} +static int debugMutexTry(sqlite3_mutex *pX){ + sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; + assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) ); + p->cnt++; + return SQLITE_OK; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered or +** is not currently allocated. SQLite will never do either. +*/ +static void debugMutexLeave(sqlite3_mutex *pX){ + sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; + assert( debugMutexHeld(pX) ); + p->cnt--; + assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) ); +} + +SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void){ + static const sqlite3_mutex_methods sMutex = { + debugMutexInit, + debugMutexEnd, + debugMutexAlloc, + debugMutexFree, + debugMutexEnter, + debugMutexTry, + debugMutexLeave, + + debugMutexHeld, + debugMutexNotheld + }; + + return &sMutex; +} +#endif /* SQLITE_DEBUG */ + +/* +** If compiled with SQLITE_MUTEX_NOOP, then the no-op mutex implementation +** is used regardless of the run-time threadsafety setting. +*/ +#ifdef SQLITE_MUTEX_NOOP +SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ + return sqlite3NoopMutex(); +} +#endif /* defined(SQLITE_MUTEX_NOOP) */ +#endif /* !defined(SQLITE_MUTEX_OMIT) */ + +/************** End of mutex_noop.c ******************************************/ +/************** Begin file mutex_unix.c **************************************/ +/* +** 2007 August 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes for pthreads +*/ +/* #include "sqliteInt.h" */ + +/* +** The code in this file is only used if we are compiling threadsafe +** under unix with pthreads. +** +** Note that this implementation requires a version of pthreads that +** supports recursive mutexes. +*/ +#ifdef SQLITE_MUTEX_PTHREADS + +#include + +/* +** The sqlite3_mutex.id, sqlite3_mutex.nRef, and sqlite3_mutex.owner fields +** are necessary under two condidtions: (1) Debug builds and (2) using +** home-grown mutexes. Encapsulate these conditions into a single #define. +*/ +#if defined(SQLITE_DEBUG) || defined(SQLITE_HOMEGROWN_RECURSIVE_MUTEX) +# define SQLITE_MUTEX_NREF 1 +#else +# define SQLITE_MUTEX_NREF 0 +#endif + +/* +** Each recursive mutex is an instance of the following structure. +*/ +struct sqlite3_mutex { + pthread_mutex_t mutex; /* Mutex controlling the lock */ +#if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR) + int id; /* Mutex type */ +#endif +#if SQLITE_MUTEX_NREF + volatile int nRef; /* Number of entrances */ + volatile pthread_t owner; /* Thread that is within this mutex */ + int trace; /* True to trace changes */ +#endif +}; +#if SQLITE_MUTEX_NREF +#define SQLITE3_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER,0,0,(pthread_t)0,0} +#elif defined(SQLITE_ENABLE_API_ARMOR) +#define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, 0 } +#else +#define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER } +#endif + +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use only inside assert() statements. On some platforms, +** there might be race conditions that can cause these routines to +** deliver incorrect results. In particular, if pthread_equal() is +** not an atomic operation, then these routines might delivery +** incorrect results. On most platforms, pthread_equal() is a +** comparison of two integers and is therefore atomic. But we are +** told that HPUX is not such a platform. If so, then these routines +** will not always work correctly on HPUX. +** +** On those platforms where pthread_equal() is not atomic, SQLite +** should be compiled without -DSQLITE_DEBUG and with -DNDEBUG to +** make sure no assert() statements are evaluated and hence these +** routines are never called. +*/ +#if !defined(NDEBUG) || defined(SQLITE_DEBUG) +static int pthreadMutexHeld(sqlite3_mutex *p){ + return (p->nRef!=0 && pthread_equal(p->owner, pthread_self())); +} +static int pthreadMutexNotheld(sqlite3_mutex *p){ + return p->nRef==0 || pthread_equal(p->owner, pthread_self())==0; +} +#endif + +/* +** Try to provide a memory barrier operation, needed for initialization +** and also for the implementation of xShmBarrier in the VFS in cases +** where SQLite is compiled without mutexes. +*/ +SQLITE_PRIVATE void sqlite3MemoryBarrier(void){ +#if defined(SQLITE_MEMORY_BARRIER) + SQLITE_MEMORY_BARRIER; +#elif defined(__GNUC__) && GCC_VERSION>=4001000 + __sync_synchronize(); +#endif +} + +/* +** Initialize and deinitialize the mutex subsystem. +*/ +static int pthreadMutexInit(void){ return SQLITE_OK; } +static int pthreadMutexEnd(void){ return SQLITE_OK; } + +/* +** The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. If it returns NULL +** that means that a mutex could not be allocated. SQLite +** will unwind its stack and return an error. The argument +** to sqlite3_mutex_alloc() is one of these integer constants: +** +**
    +**
  • SQLITE_MUTEX_FAST +**
  • SQLITE_MUTEX_RECURSIVE +**
  • SQLITE_MUTEX_STATIC_MASTER +**
  • SQLITE_MUTEX_STATIC_MEM +**
  • SQLITE_MUTEX_STATIC_OPEN +**
  • SQLITE_MUTEX_STATIC_PRNG +**
  • SQLITE_MUTEX_STATIC_LRU +**
  • SQLITE_MUTEX_STATIC_PMEM +**
  • SQLITE_MUTEX_STATIC_APP1 +**
  • SQLITE_MUTEX_STATIC_APP2 +**
  • SQLITE_MUTEX_STATIC_APP3 +**
  • SQLITE_MUTEX_STATIC_VFS1 +**
  • SQLITE_MUTEX_STATIC_VFS2 +**
  • SQLITE_MUTEX_STATIC_VFS3 +**
+** +** The first two constants cause sqlite3_mutex_alloc() to create +** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +** is used but not necessarily so when SQLITE_MUTEX_FAST is used. +** The mutex implementation does not need to make a distinction +** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +** not want to. But SQLite will only request a recursive mutex in +** cases where it really needs one. If a faster non-recursive mutex +** implementation is available on the host platform, the mutex subsystem +** might return such a mutex in response to SQLITE_MUTEX_FAST. +** +** The other allowed parameters to sqlite3_mutex_alloc() each return +** a pointer to a static preexisting mutex. Six static mutexes are +** used by the current version of SQLite. Future versions of SQLite +** may add additional static mutexes. Static mutexes are for internal +** use by SQLite only. Applications that use SQLite mutexes should +** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +** SQLITE_MUTEX_RECURSIVE. +** +** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +** returns a different mutex on every call. But for the static +** mutex types, the same mutex is returned on every call that has +** the same type number. +*/ +static sqlite3_mutex *pthreadMutexAlloc(int iType){ + static sqlite3_mutex staticMutexes[] = { + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER + }; + sqlite3_mutex *p; + switch( iType ){ + case SQLITE_MUTEX_RECURSIVE: { + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ +#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX + /* If recursive mutexes are not available, we will have to + ** build our own. See below. */ + pthread_mutex_init(&p->mutex, 0); +#else + /* Use a recursive mutex if it is available */ + pthread_mutexattr_t recursiveAttr; + pthread_mutexattr_init(&recursiveAttr); + pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&p->mutex, &recursiveAttr); + pthread_mutexattr_destroy(&recursiveAttr); +#endif + } + break; + } + case SQLITE_MUTEX_FAST: { + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ + pthread_mutex_init(&p->mutex, 0); + } + break; + } + default: { +#ifdef SQLITE_ENABLE_API_ARMOR + if( iType-2<0 || iType-2>=ArraySize(staticMutexes) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + p = &staticMutexes[iType-2]; + break; + } + } +#if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR) + if( p ) p->id = iType; +#endif + return p; +} + + +/* +** This routine deallocates a previously +** allocated mutex. SQLite is careful to deallocate every +** mutex that it allocates. +*/ +static void pthreadMutexFree(sqlite3_mutex *p){ + assert( p->nRef==0 ); +#if SQLITE_ENABLE_API_ARMOR + if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ) +#endif + { + pthread_mutex_destroy(&p->mutex); + sqlite3_free(p); + } +#ifdef SQLITE_ENABLE_API_ARMOR + else{ + (void)SQLITE_MISUSE_BKPT; + } +#endif +} + +/* +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can +** be entered multiple times by the same thread. In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. If the same thread tries to enter any other kind of mutex +** more than once, the behavior is undefined. +*/ +static void pthreadMutexEnter(sqlite3_mutex *p){ + assert( p->id==SQLITE_MUTEX_RECURSIVE || pthreadMutexNotheld(p) ); + +#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX + /* If recursive mutexes are not available, then we have to grow + ** our own. This implementation assumes that pthread_equal() + ** is atomic - that it cannot be deceived into thinking self + ** and p->owner are equal if p->owner changes between two values + ** that are not equal to self while the comparison is taking place. + ** This implementation also assumes a coherent cache - that + ** separate processes cannot read different values from the same + ** address at the same time. If either of these two conditions + ** are not met, then the mutexes will fail and problems will result. + */ + { + pthread_t self = pthread_self(); + if( p->nRef>0 && pthread_equal(p->owner, self) ){ + p->nRef++; + }else{ + pthread_mutex_lock(&p->mutex); + assert( p->nRef==0 ); + p->owner = self; + p->nRef = 1; + } + } +#else + /* Use the built-in recursive mutexes if they are available. + */ + pthread_mutex_lock(&p->mutex); +#if SQLITE_MUTEX_NREF + assert( p->nRef>0 || p->owner==0 ); + p->owner = pthread_self(); + p->nRef++; +#endif +#endif + +#ifdef SQLITE_DEBUG + if( p->trace ){ + printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); + } +#endif +} +static int pthreadMutexTry(sqlite3_mutex *p){ + int rc; + assert( p->id==SQLITE_MUTEX_RECURSIVE || pthreadMutexNotheld(p) ); + +#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX + /* If recursive mutexes are not available, then we have to grow + ** our own. This implementation assumes that pthread_equal() + ** is atomic - that it cannot be deceived into thinking self + ** and p->owner are equal if p->owner changes between two values + ** that are not equal to self while the comparison is taking place. + ** This implementation also assumes a coherent cache - that + ** separate processes cannot read different values from the same + ** address at the same time. If either of these two conditions + ** are not met, then the mutexes will fail and problems will result. + */ + { + pthread_t self = pthread_self(); + if( p->nRef>0 && pthread_equal(p->owner, self) ){ + p->nRef++; + rc = SQLITE_OK; + }else if( pthread_mutex_trylock(&p->mutex)==0 ){ + assert( p->nRef==0 ); + p->owner = self; + p->nRef = 1; + rc = SQLITE_OK; + }else{ + rc = SQLITE_BUSY; + } + } +#else + /* Use the built-in recursive mutexes if they are available. + */ + if( pthread_mutex_trylock(&p->mutex)==0 ){ +#if SQLITE_MUTEX_NREF + p->owner = pthread_self(); + p->nRef++; +#endif + rc = SQLITE_OK; + }else{ + rc = SQLITE_BUSY; + } +#endif + +#ifdef SQLITE_DEBUG + if( rc==SQLITE_OK && p->trace ){ + printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); + } +#endif + return rc; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered or +** is not currently allocated. SQLite will never do either. +*/ +static void pthreadMutexLeave(sqlite3_mutex *p){ + assert( pthreadMutexHeld(p) ); +#if SQLITE_MUTEX_NREF + p->nRef--; + if( p->nRef==0 ) p->owner = 0; +#endif + assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); + +#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX + if( p->nRef==0 ){ + pthread_mutex_unlock(&p->mutex); + } +#else + pthread_mutex_unlock(&p->mutex); +#endif + +#ifdef SQLITE_DEBUG + if( p->trace ){ + printf("leave mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); + } +#endif +} + +SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ + static const sqlite3_mutex_methods sMutex = { + pthreadMutexInit, + pthreadMutexEnd, + pthreadMutexAlloc, + pthreadMutexFree, + pthreadMutexEnter, + pthreadMutexTry, + pthreadMutexLeave, +#ifdef SQLITE_DEBUG + pthreadMutexHeld, + pthreadMutexNotheld +#else + 0, + 0 +#endif + }; + + return &sMutex; +} + +#endif /* SQLITE_MUTEX_PTHREADS */ + +/************** End of mutex_unix.c ******************************************/ +/************** Begin file mutex_w32.c ***************************************/ +/* +** 2007 August 14 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C functions that implement mutexes for Win32. +*/ +/* #include "sqliteInt.h" */ + +#if SQLITE_OS_WIN +/* +** Include code that is common to all os_*.c files +*/ +/************** Include os_common.h in the middle of mutex_w32.c *************/ +/************** Begin file os_common.h ***************************************/ +/* +** 2004 May 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains macros and a little bit of code that is common to +** all of the platform-specific files (os_*.c) and is #included into those +** files. +** +** This file should be #included by the os_*.c files only. It is not a +** general purpose header file. +*/ +#ifndef _OS_COMMON_H_ +#define _OS_COMMON_H_ + +/* +** At least two bugs have slipped in because we changed the MEMORY_DEBUG +** macro to SQLITE_DEBUG and some older makefiles have not yet made the +** switch. The following code should catch this problem at compile-time. +*/ +#ifdef MEMORY_DEBUG +# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." +#endif + +/* +** Macros for performance tracing. Normally turned off. Only works +** on i486 hardware. +*/ +#ifdef SQLITE_PERFORMANCE_TRACE + +/* +** hwtime.h contains inline assembler code for implementing +** high-performance timing routines. +*/ +/************** Include hwtime.h in the middle of os_common.h ****************/ +/************** Begin file hwtime.h ******************************************/ +/* +** 2008 May 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains inline asm code for retrieving "high-performance" +** counters for x86 class CPUs. +*/ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H + +/* +** The following routine only works on pentium-class (or newer) processors. +** It uses the RDTSC opcode to read the cycle count value out of the +** processor and returns that value. This can be used for high-res +** profiling. +*/ +#if (defined(__GNUC__) || defined(_MSC_VER)) && \ + (defined(i386) || defined(__i386__) || defined(_M_IX86)) + + #if defined(__GNUC__) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + + #elif defined(_MSC_VER) + + __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ + __asm { + rdtsc + ret ; return value at EDX:EAX + } + } + + #endif + +#elif (defined(__GNUC__) && defined(__x86_64__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long val; + __asm__ __volatile__ ("rdtsc" : "=A" (val)); + return val; + } + +#elif (defined(__GNUC__) && defined(__ppc__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long long retval; + unsigned long junk; + __asm__ __volatile__ ("\n\ + 1: mftbu %1\n\ + mftb %L0\n\ + mftbu %0\n\ + cmpw %0,%1\n\ + bne 1b" + : "=r" (retval), "=r" (junk)); + return retval; + } + +#else + + #error Need implementation of sqlite3Hwtime() for your platform. + + /* + ** To compile without implementing sqlite3Hwtime() for your platform, + ** you can remove the above #error and use the following + ** stub function. You will lose timing support for many + ** of the debugging and testing utilities, but it should at + ** least compile and run. + */ +SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } + +#endif + +#endif /* !defined(SQLITE_HWTIME_H) */ + +/************** End of hwtime.h **********************************************/ +/************** Continuing where we left off in os_common.h ******************/ + +static sqlite_uint64 g_start; +static sqlite_uint64 g_elapsed; +#define TIMER_START g_start=sqlite3Hwtime() +#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start +#define TIMER_ELAPSED g_elapsed +#else +#define TIMER_START +#define TIMER_END +#define TIMER_ELAPSED ((sqlite_uint64)0) +#endif + +/* +** If we compile with the SQLITE_TEST macro set, then the following block +** of code will give us the ability to simulate a disk I/O error. This +** is used for testing the I/O recovery logic. +*/ +#if defined(SQLITE_TEST) +SQLITE_API extern int sqlite3_io_error_hit; +SQLITE_API extern int sqlite3_io_error_hardhit; +SQLITE_API extern int sqlite3_io_error_pending; +SQLITE_API extern int sqlite3_io_error_persist; +SQLITE_API extern int sqlite3_io_error_benign; +SQLITE_API extern int sqlite3_diskfull_pending; +SQLITE_API extern int sqlite3_diskfull; +#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) +#define SimulateIOError(CODE) \ + if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ + || sqlite3_io_error_pending-- == 1 ) \ + { local_ioerr(); CODE; } +static void local_ioerr(){ + IOTRACE(("IOERR\n")); + sqlite3_io_error_hit++; + if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; +} +#define SimulateDiskfullError(CODE) \ + if( sqlite3_diskfull_pending ){ \ + if( sqlite3_diskfull_pending == 1 ){ \ + local_ioerr(); \ + sqlite3_diskfull = 1; \ + sqlite3_io_error_hit = 1; \ + CODE; \ + }else{ \ + sqlite3_diskfull_pending--; \ + } \ + } +#else +#define SimulateIOErrorBenign(X) +#define SimulateIOError(A) +#define SimulateDiskfullError(A) +#endif /* defined(SQLITE_TEST) */ + +/* +** When testing, keep a count of the number of open files. +*/ +#if defined(SQLITE_TEST) +SQLITE_API extern int sqlite3_open_file_count; +#define OpenCounter(X) sqlite3_open_file_count+=(X) +#else +#define OpenCounter(X) +#endif /* defined(SQLITE_TEST) */ + +#endif /* !defined(_OS_COMMON_H_) */ + +/************** End of os_common.h *******************************************/ +/************** Continuing where we left off in mutex_w32.c ******************/ + +/* +** Include the header file for the Windows VFS. +*/ +/************** Include os_win.h in the middle of mutex_w32.c ****************/ +/************** Begin file os_win.h ******************************************/ +/* +** 2013 November 25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains code that is specific to Windows. +*/ +#ifndef SQLITE_OS_WIN_H +#define SQLITE_OS_WIN_H + +/* +** Include the primary Windows SDK header file. +*/ +#include "windows.h" + +#ifdef __CYGWIN__ +# include +# include /* amalgamator: dontcache */ +#endif + +/* +** Determine if we are dealing with Windows NT. +** +** We ought to be able to determine if we are compiling for Windows 9x or +** Windows NT using the _WIN32_WINNT macro as follows: +** +** #if defined(_WIN32_WINNT) +** # define SQLITE_OS_WINNT 1 +** #else +** # define SQLITE_OS_WINNT 0 +** #endif +** +** However, Visual Studio 2005 does not set _WIN32_WINNT by default, as +** it ought to, so the above test does not work. We'll just assume that +** everything is Windows NT unless the programmer explicitly says otherwise +** by setting SQLITE_OS_WINNT to 0. +*/ +#if SQLITE_OS_WIN && !defined(SQLITE_OS_WINNT) +# define SQLITE_OS_WINNT 1 +#endif + +/* +** Determine if we are dealing with Windows CE - which has a much reduced +** API. +*/ +#if defined(_WIN32_WCE) +# define SQLITE_OS_WINCE 1 +#else +# define SQLITE_OS_WINCE 0 +#endif + +/* +** Determine if we are dealing with WinRT, which provides only a subset of +** the full Win32 API. +*/ +#if !defined(SQLITE_OS_WINRT) +# define SQLITE_OS_WINRT 0 +#endif + +/* +** For WinCE, some API function parameters do not appear to be declared as +** volatile. +*/ +#if SQLITE_OS_WINCE +# define SQLITE_WIN32_VOLATILE +#else +# define SQLITE_WIN32_VOLATILE volatile +#endif + +/* +** For some Windows sub-platforms, the _beginthreadex() / _endthreadex() +** functions are not available (e.g. those not using MSVC, Cygwin, etc). +*/ +#if SQLITE_OS_WIN && !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && \ + SQLITE_THREADSAFE>0 && !defined(__CYGWIN__) +# define SQLITE_OS_WIN_THREADS 1 +#else +# define SQLITE_OS_WIN_THREADS 0 +#endif + +#endif /* SQLITE_OS_WIN_H */ + +/************** End of os_win.h **********************************************/ +/************** Continuing where we left off in mutex_w32.c ******************/ +#endif + +/* +** The code in this file is only used if we are compiling multithreaded +** on a Win32 system. +*/ +#ifdef SQLITE_MUTEX_W32 + +/* +** Each recursive mutex is an instance of the following structure. +*/ +struct sqlite3_mutex { + CRITICAL_SECTION mutex; /* Mutex controlling the lock */ + int id; /* Mutex type */ +#ifdef SQLITE_DEBUG + volatile int nRef; /* Number of enterances */ + volatile DWORD owner; /* Thread holding this mutex */ + volatile int trace; /* True to trace changes */ +#endif +}; + +/* +** These are the initializer values used when declaring a "static" mutex +** on Win32. It should be noted that all mutexes require initialization +** on the Win32 platform. +*/ +#define SQLITE_W32_MUTEX_INITIALIZER { 0 } + +#ifdef SQLITE_DEBUG +#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0, \ + 0L, (DWORD)0, 0 } +#else +#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0 } +#endif + +#ifdef SQLITE_DEBUG +/* +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are +** intended for use only inside assert() statements. +*/ +static int winMutexHeld(sqlite3_mutex *p){ + return p->nRef!=0 && p->owner==GetCurrentThreadId(); +} + +static int winMutexNotheld2(sqlite3_mutex *p, DWORD tid){ + return p->nRef==0 || p->owner!=tid; +} + +static int winMutexNotheld(sqlite3_mutex *p){ + DWORD tid = GetCurrentThreadId(); + return winMutexNotheld2(p, tid); +} +#endif + +/* +** Try to provide a memory barrier operation, needed for initialization +** and also for the xShmBarrier method of the VFS in cases when SQLite is +** compiled without mutexes (SQLITE_THREADSAFE=0). +*/ +SQLITE_PRIVATE void sqlite3MemoryBarrier(void){ +#if defined(SQLITE_MEMORY_BARRIER) + SQLITE_MEMORY_BARRIER; +#elif defined(__GNUC__) + __sync_synchronize(); +#elif MSVC_VERSION>=1300 + _ReadWriteBarrier(); +#elif defined(MemoryBarrier) + MemoryBarrier(); +#endif +} + +/* +** Initialize and deinitialize the mutex subsystem. +*/ +static sqlite3_mutex winMutex_staticMutexes[] = { + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER, + SQLITE3_MUTEX_INITIALIZER +}; + +static int winMutex_isInit = 0; +static int winMutex_isNt = -1; /* <0 means "need to query" */ + +/* As the winMutexInit() and winMutexEnd() functions are called as part +** of the sqlite3_initialize() and sqlite3_shutdown() processing, the +** "interlocked" magic used here is probably not strictly necessary. +*/ +static LONG SQLITE_WIN32_VOLATILE winMutex_lock = 0; + +SQLITE_API int sqlite3_win32_is_nt(void); /* os_win.c */ +SQLITE_API void sqlite3_win32_sleep(DWORD milliseconds); /* os_win.c */ + +static int winMutexInit(void){ + /* The first to increment to 1 does actual initialization */ + if( InterlockedCompareExchange(&winMutex_lock, 1, 0)==0 ){ + int i; + for(i=0; i +**
  • SQLITE_MUTEX_FAST +**
  • SQLITE_MUTEX_RECURSIVE +**
  • SQLITE_MUTEX_STATIC_MASTER +**
  • SQLITE_MUTEX_STATIC_MEM +**
  • SQLITE_MUTEX_STATIC_OPEN +**
  • SQLITE_MUTEX_STATIC_PRNG +**
  • SQLITE_MUTEX_STATIC_LRU +**
  • SQLITE_MUTEX_STATIC_PMEM +**
  • SQLITE_MUTEX_STATIC_APP1 +**
  • SQLITE_MUTEX_STATIC_APP2 +**
  • SQLITE_MUTEX_STATIC_APP3 +**
  • SQLITE_MUTEX_STATIC_VFS1 +**
  • SQLITE_MUTEX_STATIC_VFS2 +**
  • SQLITE_MUTEX_STATIC_VFS3 +** +** +** The first two constants cause sqlite3_mutex_alloc() to create +** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +** is used but not necessarily so when SQLITE_MUTEX_FAST is used. +** The mutex implementation does not need to make a distinction +** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +** not want to. But SQLite will only request a recursive mutex in +** cases where it really needs one. If a faster non-recursive mutex +** implementation is available on the host platform, the mutex subsystem +** might return such a mutex in response to SQLITE_MUTEX_FAST. +** +** The other allowed parameters to sqlite3_mutex_alloc() each return +** a pointer to a static preexisting mutex. Six static mutexes are +** used by the current version of SQLite. Future versions of SQLite +** may add additional static mutexes. Static mutexes are for internal +** use by SQLite only. Applications that use SQLite mutexes should +** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +** SQLITE_MUTEX_RECURSIVE. +** +** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +** returns a different mutex on every call. But for the static +** mutex types, the same mutex is returned on every call that has +** the same type number. +*/ +static sqlite3_mutex *winMutexAlloc(int iType){ + sqlite3_mutex *p; + + switch( iType ){ + case SQLITE_MUTEX_FAST: + case SQLITE_MUTEX_RECURSIVE: { + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ + p->id = iType; +#ifdef SQLITE_DEBUG +#ifdef SQLITE_WIN32_MUTEX_TRACE_DYNAMIC + p->trace = 1; +#endif +#endif +#if SQLITE_OS_WINRT + InitializeCriticalSectionEx(&p->mutex, 0, 0); +#else + InitializeCriticalSection(&p->mutex); +#endif + } + break; + } + default: { +#ifdef SQLITE_ENABLE_API_ARMOR + if( iType-2<0 || iType-2>=ArraySize(winMutex_staticMutexes) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + p = &winMutex_staticMutexes[iType-2]; + p->id = iType; +#ifdef SQLITE_DEBUG +#ifdef SQLITE_WIN32_MUTEX_TRACE_STATIC + p->trace = 1; +#endif +#endif + break; + } + } + return p; +} + + +/* +** This routine deallocates a previously +** allocated mutex. SQLite is careful to deallocate every +** mutex that it allocates. +*/ +static void winMutexFree(sqlite3_mutex *p){ + assert( p ); + assert( p->nRef==0 && p->owner==0 ); + if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ){ + DeleteCriticalSection(&p->mutex); + sqlite3_free(p); + }else{ +#ifdef SQLITE_ENABLE_API_ARMOR + (void)SQLITE_MISUSE_BKPT; +#endif + } +} + +/* +** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK +** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can +** be entered multiple times by the same thread. In such cases the, +** mutex must be exited an equal number of times before another thread +** can enter. If the same thread tries to enter any other kind of mutex +** more than once, the behavior is undefined. +*/ +static void winMutexEnter(sqlite3_mutex *p){ +#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) + DWORD tid = GetCurrentThreadId(); +#endif +#ifdef SQLITE_DEBUG + assert( p ); + assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) ); +#else + assert( p ); +#endif + assert( winMutex_isInit==1 ); + EnterCriticalSection(&p->mutex); +#ifdef SQLITE_DEBUG + assert( p->nRef>0 || p->owner==0 ); + p->owner = tid; + p->nRef++; + if( p->trace ){ + OSTRACE(("ENTER-MUTEX tid=%lu, mutex=%p (%d), nRef=%d\n", + tid, p, p->trace, p->nRef)); + } +#endif +} + +static int winMutexTry(sqlite3_mutex *p){ +#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) + DWORD tid = GetCurrentThreadId(); +#endif + int rc = SQLITE_BUSY; + assert( p ); + assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) ); + /* + ** The sqlite3_mutex_try() routine is very rarely used, and when it + ** is used it is merely an optimization. So it is OK for it to always + ** fail. + ** + ** The TryEnterCriticalSection() interface is only available on WinNT. + ** And some windows compilers complain if you try to use it without + ** first doing some #defines that prevent SQLite from building on Win98. + ** For that reason, we will omit this optimization for now. See + ** ticket #2685. + */ +#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x0400 + assert( winMutex_isInit==1 ); + assert( winMutex_isNt>=-1 && winMutex_isNt<=1 ); + if( winMutex_isNt<0 ){ + winMutex_isNt = sqlite3_win32_is_nt(); + } + assert( winMutex_isNt==0 || winMutex_isNt==1 ); + if( winMutex_isNt && TryEnterCriticalSection(&p->mutex) ){ +#ifdef SQLITE_DEBUG + p->owner = tid; + p->nRef++; +#endif + rc = SQLITE_OK; + } +#else + UNUSED_PARAMETER(p); +#endif +#ifdef SQLITE_DEBUG + if( p->trace ){ + OSTRACE(("TRY-MUTEX tid=%lu, mutex=%p (%d), owner=%lu, nRef=%d, rc=%s\n", + tid, p, p->trace, p->owner, p->nRef, sqlite3ErrName(rc))); + } +#endif + return rc; +} + +/* +** The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered or +** is not currently allocated. SQLite will never do either. +*/ +static void winMutexLeave(sqlite3_mutex *p){ +#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) + DWORD tid = GetCurrentThreadId(); +#endif + assert( p ); +#ifdef SQLITE_DEBUG + assert( p->nRef>0 ); + assert( p->owner==tid ); + p->nRef--; + if( p->nRef==0 ) p->owner = 0; + assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); +#endif + assert( winMutex_isInit==1 ); + LeaveCriticalSection(&p->mutex); +#ifdef SQLITE_DEBUG + if( p->trace ){ + OSTRACE(("LEAVE-MUTEX tid=%lu, mutex=%p (%d), nRef=%d\n", + tid, p, p->trace, p->nRef)); + } +#endif +} + +SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ + static const sqlite3_mutex_methods sMutex = { + winMutexInit, + winMutexEnd, + winMutexAlloc, + winMutexFree, + winMutexEnter, + winMutexTry, + winMutexLeave, +#ifdef SQLITE_DEBUG + winMutexHeld, + winMutexNotheld +#else + 0, + 0 +#endif + }; + return &sMutex; +} + +#endif /* SQLITE_MUTEX_W32 */ + +/************** End of mutex_w32.c *******************************************/ +/************** Begin file malloc.c ******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** Memory allocation functions used throughout sqlite. +*/ +/* #include "sqliteInt.h" */ +/* #include */ + +/* +** Attempt to release up to n bytes of non-essential memory currently +** held by SQLite. An example of non-essential memory is memory used to +** cache database pages that are not currently in use. +*/ +SQLITE_API int sqlite3_release_memory(int n){ +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + return sqlite3PcacheReleaseMemory(n); +#else + /* IMPLEMENTATION-OF: R-34391-24921 The sqlite3_release_memory() routine + ** is a no-op returning zero if SQLite is not compiled with + ** SQLITE_ENABLE_MEMORY_MANAGEMENT. */ + UNUSED_PARAMETER(n); + return 0; +#endif +} + +/* +** An instance of the following object records the location of +** each unused scratch buffer. +*/ +typedef struct ScratchFreeslot { + struct ScratchFreeslot *pNext; /* Next unused scratch buffer */ +} ScratchFreeslot; + +/* +** State information local to the memory allocation subsystem. +*/ +static SQLITE_WSD struct Mem0Global { + sqlite3_mutex *mutex; /* Mutex to serialize access */ + sqlite3_int64 alarmThreshold; /* The soft heap limit */ + + /* + ** Pointers to the end of sqlite3GlobalConfig.pScratch memory + ** (so that a range test can be used to determine if an allocation + ** being freed came from pScratch) and a pointer to the list of + ** unused scratch allocations. + */ + void *pScratchEnd; + ScratchFreeslot *pScratchFree; + u32 nScratchFree; + + /* + ** True if heap is nearly "full" where "full" is defined by the + ** sqlite3_soft_heap_limit() setting. + */ + int nearlyFull; +} mem0 = { 0, 0, 0, 0, 0, 0 }; + +#define mem0 GLOBAL(struct Mem0Global, mem0) + +/* +** Return the memory allocator mutex. sqlite3_status() needs it. +*/ +SQLITE_PRIVATE sqlite3_mutex *sqlite3MallocMutex(void){ + return mem0.mutex; +} + +#ifndef SQLITE_OMIT_DEPRECATED +/* +** Deprecated external interface. It used to set an alarm callback +** that was invoked when memory usage grew too large. Now it is a +** no-op. +*/ +SQLITE_API int sqlite3_memory_alarm( + void(*xCallback)(void *pArg, sqlite3_int64 used,int N), + void *pArg, + sqlite3_int64 iThreshold +){ + (void)xCallback; + (void)pArg; + (void)iThreshold; + return SQLITE_OK; +} +#endif + +/* +** Set the soft heap-size limit for the library. Passing a zero or +** negative value indicates no limit. +*/ +SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 n){ + sqlite3_int64 priorLimit; + sqlite3_int64 excess; + sqlite3_int64 nUsed; +#ifndef SQLITE_OMIT_AUTOINIT + int rc = sqlite3_initialize(); + if( rc ) return -1; +#endif + sqlite3_mutex_enter(mem0.mutex); + priorLimit = mem0.alarmThreshold; + if( n<0 ){ + sqlite3_mutex_leave(mem0.mutex); + return priorLimit; + } + mem0.alarmThreshold = n; + nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); + mem0.nearlyFull = (n>0 && n<=nUsed); + sqlite3_mutex_leave(mem0.mutex); + excess = sqlite3_memory_used() - n; + if( excess>0 ) sqlite3_release_memory((int)(excess & 0x7fffffff)); + return priorLimit; +} +SQLITE_API void sqlite3_soft_heap_limit(int n){ + if( n<0 ) n = 0; + sqlite3_soft_heap_limit64(n); +} + +/* +** Initialize the memory allocation subsystem. +*/ +SQLITE_PRIVATE int sqlite3MallocInit(void){ + int rc; + if( sqlite3GlobalConfig.m.xMalloc==0 ){ + sqlite3MemSetDefault(); + } + memset(&mem0, 0, sizeof(mem0)); + mem0.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); + if( sqlite3GlobalConfig.pScratch && sqlite3GlobalConfig.szScratch>=100 + && sqlite3GlobalConfig.nScratch>0 ){ + int i, n, sz; + ScratchFreeslot *pSlot; + sz = ROUNDDOWN8(sqlite3GlobalConfig.szScratch); + sqlite3GlobalConfig.szScratch = sz; + pSlot = (ScratchFreeslot*)sqlite3GlobalConfig.pScratch; + n = sqlite3GlobalConfig.nScratch; + mem0.pScratchFree = pSlot; + mem0.nScratchFree = n; + for(i=0; ipNext = (ScratchFreeslot*)(sz+(char*)pSlot); + pSlot = pSlot->pNext; + } + pSlot->pNext = 0; + mem0.pScratchEnd = (void*)&pSlot[1]; + }else{ + mem0.pScratchEnd = 0; + sqlite3GlobalConfig.pScratch = 0; + sqlite3GlobalConfig.szScratch = 0; + sqlite3GlobalConfig.nScratch = 0; + } + if( sqlite3GlobalConfig.pPage==0 || sqlite3GlobalConfig.szPage<512 + || sqlite3GlobalConfig.nPage<=0 ){ + sqlite3GlobalConfig.pPage = 0; + sqlite3GlobalConfig.szPage = 0; + } + rc = sqlite3GlobalConfig.m.xInit(sqlite3GlobalConfig.m.pAppData); + if( rc!=SQLITE_OK ) memset(&mem0, 0, sizeof(mem0)); + return rc; +} + +/* +** Return true if the heap is currently under memory pressure - in other +** words if the amount of heap used is close to the limit set by +** sqlite3_soft_heap_limit(). +*/ +SQLITE_PRIVATE int sqlite3HeapNearlyFull(void){ + return mem0.nearlyFull; +} + +/* +** Deinitialize the memory allocation subsystem. +*/ +SQLITE_PRIVATE void sqlite3MallocEnd(void){ + if( sqlite3GlobalConfig.m.xShutdown ){ + sqlite3GlobalConfig.m.xShutdown(sqlite3GlobalConfig.m.pAppData); + } + memset(&mem0, 0, sizeof(mem0)); +} + +/* +** Return the amount of memory currently checked out. +*/ +SQLITE_API sqlite3_int64 sqlite3_memory_used(void){ + sqlite3_int64 res, mx; + sqlite3_status64(SQLITE_STATUS_MEMORY_USED, &res, &mx, 0); + return res; +} + +/* +** Return the maximum amount of memory that has ever been +** checked out since either the beginning of this process +** or since the most recent reset. +*/ +SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag){ + sqlite3_int64 res, mx; + sqlite3_status64(SQLITE_STATUS_MEMORY_USED, &res, &mx, resetFlag); + return mx; +} + +/* +** Trigger the alarm +*/ +static void sqlite3MallocAlarm(int nByte){ + if( mem0.alarmThreshold<=0 ) return; + sqlite3_mutex_leave(mem0.mutex); + sqlite3_release_memory(nByte); + sqlite3_mutex_enter(mem0.mutex); +} + +/* +** Do a memory allocation with statistics and alarms. Assume the +** lock is already held. +*/ +static void mallocWithAlarm(int n, void **pp){ + void *p; + int nFull; + assert( sqlite3_mutex_held(mem0.mutex) ); + assert( n>0 ); + + /* In Firefox (circa 2017-02-08), xRoundup() is remapped to an internal + ** implementation of malloc_good_size(), which must be called in debug + ** mode and specifically when the DMD "Dark Matter Detector" is enabled + ** or else a crash results. Hence, do not attempt to optimize out the + ** following xRoundup() call. */ + nFull = sqlite3GlobalConfig.m.xRoundup(n); + + sqlite3StatusHighwater(SQLITE_STATUS_MALLOC_SIZE, n); + if( mem0.alarmThreshold>0 ){ + sqlite3_int64 nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); + if( nUsed >= mem0.alarmThreshold - nFull ){ + mem0.nearlyFull = 1; + sqlite3MallocAlarm(nFull); + }else{ + mem0.nearlyFull = 0; + } + } + p = sqlite3GlobalConfig.m.xMalloc(nFull); +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + if( p==0 && mem0.alarmThreshold>0 ){ + sqlite3MallocAlarm(nFull); + p = sqlite3GlobalConfig.m.xMalloc(nFull); + } +#endif + if( p ){ + nFull = sqlite3MallocSize(p); + sqlite3StatusUp(SQLITE_STATUS_MEMORY_USED, nFull); + sqlite3StatusUp(SQLITE_STATUS_MALLOC_COUNT, 1); + } + *pp = p; +} + +/* +** Allocate memory. This routine is like sqlite3_malloc() except that it +** assumes the memory subsystem has already been initialized. +*/ +SQLITE_PRIVATE void *sqlite3Malloc(u64 n){ + void *p; + if( n==0 || n>=0x7fffff00 ){ + /* A memory allocation of a number of bytes which is near the maximum + ** signed integer value might cause an integer overflow inside of the + ** xMalloc(). Hence we limit the maximum size to 0x7fffff00, giving + ** 255 bytes of overhead. SQLite itself will never use anything near + ** this amount. The only way to reach the limit is with sqlite3_malloc() */ + p = 0; + }else if( sqlite3GlobalConfig.bMemstat ){ + sqlite3_mutex_enter(mem0.mutex); + mallocWithAlarm((int)n, &p); + sqlite3_mutex_leave(mem0.mutex); + }else{ + p = sqlite3GlobalConfig.m.xMalloc((int)n); + } + assert( EIGHT_BYTE_ALIGNMENT(p) ); /* IMP: R-11148-40995 */ + return p; +} + +/* +** This version of the memory allocation is for use by the application. +** First make sure the memory subsystem is initialized, then do the +** allocation. +*/ +SQLITE_API void *sqlite3_malloc(int n){ +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return n<=0 ? 0 : sqlite3Malloc(n); +} +SQLITE_API void *sqlite3_malloc64(sqlite3_uint64 n){ +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return sqlite3Malloc(n); +} + +/* +** Each thread may only have a single outstanding allocation from +** xScratchMalloc(). We verify this constraint in the single-threaded +** case by setting scratchAllocOut to 1 when an allocation +** is outstanding clearing it when the allocation is freed. +*/ +#if SQLITE_THREADSAFE==0 && !defined(NDEBUG) +static int scratchAllocOut = 0; +#endif + + +/* +** Allocate memory that is to be used and released right away. +** This routine is similar to alloca() in that it is not intended +** for situations where the memory might be held long-term. This +** routine is intended to get memory to old large transient data +** structures that would not normally fit on the stack of an +** embedded processor. +*/ +SQLITE_PRIVATE void *sqlite3ScratchMalloc(int n){ + void *p; + assert( n>0 ); + + sqlite3_mutex_enter(mem0.mutex); + sqlite3StatusHighwater(SQLITE_STATUS_SCRATCH_SIZE, n); + if( mem0.nScratchFree && sqlite3GlobalConfig.szScratch>=n ){ + p = mem0.pScratchFree; + mem0.pScratchFree = mem0.pScratchFree->pNext; + mem0.nScratchFree--; + sqlite3StatusUp(SQLITE_STATUS_SCRATCH_USED, 1); + sqlite3_mutex_leave(mem0.mutex); + }else{ + sqlite3_mutex_leave(mem0.mutex); + p = sqlite3Malloc(n); + if( sqlite3GlobalConfig.bMemstat && p ){ + sqlite3_mutex_enter(mem0.mutex); + sqlite3StatusUp(SQLITE_STATUS_SCRATCH_OVERFLOW, sqlite3MallocSize(p)); + sqlite3_mutex_leave(mem0.mutex); + } + sqlite3MemdebugSetType(p, MEMTYPE_SCRATCH); + } + assert( sqlite3_mutex_notheld(mem0.mutex) ); + + +#if SQLITE_THREADSAFE==0 && !defined(NDEBUG) + /* EVIDENCE-OF: R-12970-05880 SQLite will not use more than one scratch + ** buffers per thread. + ** + ** This can only be checked in single-threaded mode. + */ + assert( scratchAllocOut==0 ); + if( p ) scratchAllocOut++; +#endif + + return p; +} +SQLITE_PRIVATE void sqlite3ScratchFree(void *p){ + if( p ){ + +#if SQLITE_THREADSAFE==0 && !defined(NDEBUG) + /* Verify that no more than two scratch allocation per thread + ** is outstanding at one time. (This is only checked in the + ** single-threaded case since checking in the multi-threaded case + ** would be much more complicated.) */ + assert( scratchAllocOut>=1 && scratchAllocOut<=2 ); + scratchAllocOut--; +#endif + + if( SQLITE_WITHIN(p, sqlite3GlobalConfig.pScratch, mem0.pScratchEnd) ){ + /* Release memory from the SQLITE_CONFIG_SCRATCH allocation */ + ScratchFreeslot *pSlot; + pSlot = (ScratchFreeslot*)p; + sqlite3_mutex_enter(mem0.mutex); + pSlot->pNext = mem0.pScratchFree; + mem0.pScratchFree = pSlot; + mem0.nScratchFree++; + assert( mem0.nScratchFree <= (u32)sqlite3GlobalConfig.nScratch ); + sqlite3StatusDown(SQLITE_STATUS_SCRATCH_USED, 1); + sqlite3_mutex_leave(mem0.mutex); + }else{ + /* Release memory back to the heap */ + assert( sqlite3MemdebugHasType(p, MEMTYPE_SCRATCH) ); + assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_SCRATCH) ); + sqlite3MemdebugSetType(p, MEMTYPE_HEAP); + if( sqlite3GlobalConfig.bMemstat ){ + int iSize = sqlite3MallocSize(p); + sqlite3_mutex_enter(mem0.mutex); + sqlite3StatusDown(SQLITE_STATUS_SCRATCH_OVERFLOW, iSize); + sqlite3StatusDown(SQLITE_STATUS_MEMORY_USED, iSize); + sqlite3StatusDown(SQLITE_STATUS_MALLOC_COUNT, 1); + sqlite3GlobalConfig.m.xFree(p); + sqlite3_mutex_leave(mem0.mutex); + }else{ + sqlite3GlobalConfig.m.xFree(p); + } + } + } +} + +/* +** TRUE if p is a lookaside memory allocation from db +*/ +#ifndef SQLITE_OMIT_LOOKASIDE +static int isLookaside(sqlite3 *db, void *p){ + return SQLITE_WITHIN(p, db->lookaside.pStart, db->lookaside.pEnd); +} +#else +#define isLookaside(A,B) 0 +#endif + +/* +** Return the size of a memory allocation previously obtained from +** sqlite3Malloc() or sqlite3_malloc(). +*/ +SQLITE_PRIVATE int sqlite3MallocSize(void *p){ + assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); + return sqlite3GlobalConfig.m.xSize(p); +} +SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3 *db, void *p){ + assert( p!=0 ); + if( db==0 || !isLookaside(db,p) ){ +#if SQLITE_DEBUG + if( db==0 ){ + assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) ); + assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); + }else{ + assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); + assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); + } +#endif + return sqlite3GlobalConfig.m.xSize(p); + }else{ + assert( sqlite3_mutex_held(db->mutex) ); + return db->lookaside.sz; + } +} +SQLITE_API sqlite3_uint64 sqlite3_msize(void *p){ + assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) ); + assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); + return p ? sqlite3GlobalConfig.m.xSize(p) : 0; +} + +/* +** Free memory previously obtained from sqlite3Malloc(). +*/ +SQLITE_API void sqlite3_free(void *p){ + if( p==0 ) return; /* IMP: R-49053-54554 */ + assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); + assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) ); + if( sqlite3GlobalConfig.bMemstat ){ + sqlite3_mutex_enter(mem0.mutex); + sqlite3StatusDown(SQLITE_STATUS_MEMORY_USED, sqlite3MallocSize(p)); + sqlite3StatusDown(SQLITE_STATUS_MALLOC_COUNT, 1); + sqlite3GlobalConfig.m.xFree(p); + sqlite3_mutex_leave(mem0.mutex); + }else{ + sqlite3GlobalConfig.m.xFree(p); + } +} + +/* +** Add the size of memory allocation "p" to the count in +** *db->pnBytesFreed. +*/ +static SQLITE_NOINLINE void measureAllocationSize(sqlite3 *db, void *p){ + *db->pnBytesFreed += sqlite3DbMallocSize(db,p); +} + +/* +** Free memory that might be associated with a particular database +** connection. +*/ +SQLITE_PRIVATE void sqlite3DbFree(sqlite3 *db, void *p){ + assert( db==0 || sqlite3_mutex_held(db->mutex) ); + if( p==0 ) return; + if( db ){ + if( db->pnBytesFreed ){ + measureAllocationSize(db, p); + return; + } + if( isLookaside(db, p) ){ + LookasideSlot *pBuf = (LookasideSlot*)p; +#if SQLITE_DEBUG + /* Trash all content in the buffer being freed */ + memset(p, 0xaa, db->lookaside.sz); +#endif + pBuf->pNext = db->lookaside.pFree; + db->lookaside.pFree = pBuf; + db->lookaside.nOut--; + return; + } + } + assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); + assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); + assert( db!=0 || sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) ); + sqlite3MemdebugSetType(p, MEMTYPE_HEAP); + sqlite3_free(p); +} + +/* +** Change the size of an existing memory allocation +*/ +SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){ + int nOld, nNew, nDiff; + void *pNew; + assert( sqlite3MemdebugHasType(pOld, MEMTYPE_HEAP) ); + assert( sqlite3MemdebugNoType(pOld, (u8)~MEMTYPE_HEAP) ); + if( pOld==0 ){ + return sqlite3Malloc(nBytes); /* IMP: R-04300-56712 */ + } + if( nBytes==0 ){ + sqlite3_free(pOld); /* IMP: R-26507-47431 */ + return 0; + } + if( nBytes>=0x7fffff00 ){ + /* The 0x7ffff00 limit term is explained in comments on sqlite3Malloc() */ + return 0; + } + nOld = sqlite3MallocSize(pOld); + /* IMPLEMENTATION-OF: R-46199-30249 SQLite guarantees that the second + ** argument to xRealloc is always a value returned by a prior call to + ** xRoundup. */ + nNew = sqlite3GlobalConfig.m.xRoundup((int)nBytes); + if( nOld==nNew ){ + pNew = pOld; + }else if( sqlite3GlobalConfig.bMemstat ){ + sqlite3_mutex_enter(mem0.mutex); + sqlite3StatusHighwater(SQLITE_STATUS_MALLOC_SIZE, (int)nBytes); + nDiff = nNew - nOld; + if( nDiff>0 && sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED) >= + mem0.alarmThreshold-nDiff ){ + sqlite3MallocAlarm(nDiff); + } + pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); + if( pNew==0 && mem0.alarmThreshold>0 ){ + sqlite3MallocAlarm((int)nBytes); + pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); + } + if( pNew ){ + nNew = sqlite3MallocSize(pNew); + sqlite3StatusUp(SQLITE_STATUS_MEMORY_USED, nNew-nOld); + } + sqlite3_mutex_leave(mem0.mutex); + }else{ + pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); + } + assert( EIGHT_BYTE_ALIGNMENT(pNew) ); /* IMP: R-11148-40995 */ + return pNew; +} + +/* +** The public interface to sqlite3Realloc. Make sure that the memory +** subsystem is initialized prior to invoking sqliteRealloc. +*/ +SQLITE_API void *sqlite3_realloc(void *pOld, int n){ +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + if( n<0 ) n = 0; /* IMP: R-26507-47431 */ + return sqlite3Realloc(pOld, n); +} +SQLITE_API void *sqlite3_realloc64(void *pOld, sqlite3_uint64 n){ +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return sqlite3Realloc(pOld, n); +} + + +/* +** Allocate and zero memory. +*/ +SQLITE_PRIVATE void *sqlite3MallocZero(u64 n){ + void *p = sqlite3Malloc(n); + if( p ){ + memset(p, 0, (size_t)n); + } + return p; +} + +/* +** Allocate and zero memory. If the allocation fails, make +** the mallocFailed flag in the connection pointer. +*/ +SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3 *db, u64 n){ + void *p; + testcase( db==0 ); + p = sqlite3DbMallocRaw(db, n); + if( p ) memset(p, 0, (size_t)n); + return p; +} + + +/* Finish the work of sqlite3DbMallocRawNN for the unusual and +** slower case when the allocation cannot be fulfilled using lookaside. +*/ +static SQLITE_NOINLINE void *dbMallocRawFinish(sqlite3 *db, u64 n){ + void *p; + assert( db!=0 ); + p = sqlite3Malloc(n); + if( !p ) sqlite3OomFault(db); + sqlite3MemdebugSetType(p, + (db->lookaside.bDisable==0) ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP); + return p; +} + +/* +** Allocate memory, either lookaside (if possible) or heap. +** If the allocation fails, set the mallocFailed flag in +** the connection pointer. +** +** If db!=0 and db->mallocFailed is true (indicating a prior malloc +** failure on the same database connection) then always return 0. +** Hence for a particular database connection, once malloc starts +** failing, it fails consistently until mallocFailed is reset. +** This is an important assumption. There are many places in the +** code that do things like this: +** +** int *a = (int*)sqlite3DbMallocRaw(db, 100); +** int *b = (int*)sqlite3DbMallocRaw(db, 200); +** if( b ) a[10] = 9; +** +** In other words, if a subsequent malloc (ex: "b") worked, it is assumed +** that all prior mallocs (ex: "a") worked too. +** +** The sqlite3MallocRawNN() variant guarantees that the "db" parameter is +** not a NULL pointer. +*/ +SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3 *db, u64 n){ + void *p; + if( db ) return sqlite3DbMallocRawNN(db, n); + p = sqlite3Malloc(n); + sqlite3MemdebugSetType(p, MEMTYPE_HEAP); + return p; +} +SQLITE_PRIVATE void *sqlite3DbMallocRawNN(sqlite3 *db, u64 n){ +#ifndef SQLITE_OMIT_LOOKASIDE + LookasideSlot *pBuf; + assert( db!=0 ); + assert( sqlite3_mutex_held(db->mutex) ); + assert( db->pnBytesFreed==0 ); + if( db->lookaside.bDisable==0 ){ + assert( db->mallocFailed==0 ); + if( n>db->lookaside.sz ){ + db->lookaside.anStat[1]++; + }else if( (pBuf = db->lookaside.pFree)==0 ){ + db->lookaside.anStat[2]++; + }else{ + db->lookaside.pFree = pBuf->pNext; + db->lookaside.nOut++; + db->lookaside.anStat[0]++; + if( db->lookaside.nOut>db->lookaside.mxOut ){ + db->lookaside.mxOut = db->lookaside.nOut; + } + return (void*)pBuf; + } + }else if( db->mallocFailed ){ + return 0; + } +#else + assert( db!=0 ); + assert( sqlite3_mutex_held(db->mutex) ); + assert( db->pnBytesFreed==0 ); + if( db->mallocFailed ){ + return 0; + } +#endif + return dbMallocRawFinish(db, n); +} + +/* Forward declaration */ +static SQLITE_NOINLINE void *dbReallocFinish(sqlite3 *db, void *p, u64 n); + +/* +** Resize the block of memory pointed to by p to n bytes. If the +** resize fails, set the mallocFailed flag in the connection object. +*/ +SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *db, void *p, u64 n){ + assert( db!=0 ); + if( p==0 ) return sqlite3DbMallocRawNN(db, n); + assert( sqlite3_mutex_held(db->mutex) ); + if( isLookaside(db,p) && n<=db->lookaside.sz ) return p; + return dbReallocFinish(db, p, n); +} +static SQLITE_NOINLINE void *dbReallocFinish(sqlite3 *db, void *p, u64 n){ + void *pNew = 0; + assert( db!=0 ); + assert( p!=0 ); + if( db->mallocFailed==0 ){ + if( isLookaside(db, p) ){ + pNew = sqlite3DbMallocRawNN(db, n); + if( pNew ){ + memcpy(pNew, p, db->lookaside.sz); + sqlite3DbFree(db, p); + } + }else{ + assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); + assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); + sqlite3MemdebugSetType(p, MEMTYPE_HEAP); + pNew = sqlite3_realloc64(p, n); + if( !pNew ){ + sqlite3OomFault(db); + } + sqlite3MemdebugSetType(pNew, + (db->lookaside.bDisable==0 ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP)); + } + } + return pNew; +} + +/* +** Attempt to reallocate p. If the reallocation fails, then free p +** and set the mallocFailed flag in the database connection. +*/ +SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *db, void *p, u64 n){ + void *pNew; + pNew = sqlite3DbRealloc(db, p, n); + if( !pNew ){ + sqlite3DbFree(db, p); + } + return pNew; +} + +/* +** Make a copy of a string in memory obtained from sqliteMalloc(). These +** functions call sqlite3MallocRaw() directly instead of sqliteMalloc(). This +** is because when memory debugging is turned on, these two functions are +** called via macros that record the current file and line number in the +** ThreadData structure. +*/ +SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3 *db, const char *z){ + char *zNew; + size_t n; + if( z==0 ){ + return 0; + } + n = strlen(z) + 1; + zNew = sqlite3DbMallocRaw(db, n); + if( zNew ){ + memcpy(zNew, z, n); + } + return zNew; +} +SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3 *db, const char *z, u64 n){ + char *zNew; + assert( db!=0 ); + if( z==0 ){ + return 0; + } + assert( (n&0x7fffffff)==n ); + zNew = sqlite3DbMallocRawNN(db, n+1); + if( zNew ){ + memcpy(zNew, z, (size_t)n); + zNew[n] = 0; + } + return zNew; +} + +/* +** Free any prior content in *pz and replace it with a copy of zNew. +*/ +SQLITE_PRIVATE void sqlite3SetString(char **pz, sqlite3 *db, const char *zNew){ + sqlite3DbFree(db, *pz); + *pz = sqlite3DbStrDup(db, zNew); +} + +/* +** Call this routine to record the fact that an OOM (out-of-memory) error +** has happened. This routine will set db->mallocFailed, and also +** temporarily disable the lookaside memory allocator and interrupt +** any running VDBEs. +*/ +SQLITE_PRIVATE void sqlite3OomFault(sqlite3 *db){ + if( db->mallocFailed==0 && db->bBenignMalloc==0 ){ + db->mallocFailed = 1; + if( db->nVdbeExec>0 ){ + db->u1.isInterrupted = 1; + } + db->lookaside.bDisable++; + } +} + +/* +** This routine reactivates the memory allocator and clears the +** db->mallocFailed flag as necessary. +** +** The memory allocator is not restarted if there are running +** VDBEs. +*/ +SQLITE_PRIVATE void sqlite3OomClear(sqlite3 *db){ + if( db->mallocFailed && db->nVdbeExec==0 ){ + db->mallocFailed = 0; + db->u1.isInterrupted = 0; + assert( db->lookaside.bDisable>0 ); + db->lookaside.bDisable--; + } +} + +/* +** Take actions at the end of an API call to indicate an OOM error +*/ +static SQLITE_NOINLINE int apiOomError(sqlite3 *db){ + sqlite3OomClear(db); + sqlite3Error(db, SQLITE_NOMEM); + return SQLITE_NOMEM_BKPT; +} + +/* +** This function must be called before exiting any API function (i.e. +** returning control to the user) that has called sqlite3_malloc or +** sqlite3_realloc. +** +** The returned value is normally a copy of the second argument to this +** function. However, if a malloc() failure has occurred since the previous +** invocation SQLITE_NOMEM is returned instead. +** +** If an OOM as occurred, then the connection error-code (the value +** returned by sqlite3_errcode()) is set to SQLITE_NOMEM. +*/ +SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){ + /* If the db handle must hold the connection handle mutex here. + ** Otherwise the read (and possible write) of db->mallocFailed + ** is unsafe, as is the call to sqlite3Error(). + */ + assert( db!=0 ); + assert( sqlite3_mutex_held(db->mutex) ); + if( db->mallocFailed || rc==SQLITE_IOERR_NOMEM ){ + return apiOomError(db); + } + return rc & db->errMask; +} + +/************** End of malloc.c **********************************************/ +/************** Begin file printf.c ******************************************/ +/* +** The "printf" code that follows dates from the 1980's. It is in +** the public domain. +** +************************************************************************** +** +** This file contains code for a set of "printf"-like routines. These +** routines format strings much like the printf() from the standard C +** library, though the implementation here has enhancements to support +** SQLite. +*/ +/* #include "sqliteInt.h" */ + +/* +** Conversion types fall into various categories as defined by the +** following enumeration. +*/ +#define etRADIX 0 /* Integer types. %d, %x, %o, and so forth */ +#define etFLOAT 1 /* Floating point. %f */ +#define etEXP 2 /* Exponentional notation. %e and %E */ +#define etGENERIC 3 /* Floating or exponential, depending on exponent. %g */ +#define etSIZE 4 /* Return number of characters processed so far. %n */ +#define etSTRING 5 /* Strings. %s */ +#define etDYNSTRING 6 /* Dynamically allocated strings. %z */ +#define etPERCENT 7 /* Percent symbol. %% */ +#define etCHARX 8 /* Characters. %c */ +/* The rest are extensions, not normally found in printf() */ +#define etSQLESCAPE 9 /* Strings with '\'' doubled. %q */ +#define etSQLESCAPE2 10 /* Strings with '\'' doubled and enclosed in '', + NULL pointers replaced by SQL NULL. %Q */ +#define etTOKEN 11 /* a pointer to a Token structure */ +#define etSRCLIST 12 /* a pointer to a SrcList */ +#define etPOINTER 13 /* The %p conversion */ +#define etSQLESCAPE3 14 /* %w -> Strings with '\"' doubled */ +#define etORDINAL 15 /* %r -> 1st, 2nd, 3rd, 4th, etc. English only */ + +#define etINVALID 16 /* Any unrecognized conversion type */ + + +/* +** An "etByte" is an 8-bit unsigned value. +*/ +typedef unsigned char etByte; + +/* +** Each builtin conversion character (ex: the 'd' in "%d") is described +** by an instance of the following structure +*/ +typedef struct et_info { /* Information about each format field */ + char fmttype; /* The format field code letter */ + etByte base; /* The base for radix conversion */ + etByte flags; /* One or more of FLAG_ constants below */ + etByte type; /* Conversion paradigm */ + etByte charset; /* Offset into aDigits[] of the digits string */ + etByte prefix; /* Offset into aPrefix[] of the prefix string */ +} et_info; + +/* +** Allowed values for et_info.flags +*/ +#define FLAG_SIGNED 1 /* True if the value to convert is signed */ +#define FLAG_STRING 4 /* Allow infinity precision */ + + +/* +** The following table is searched linearly, so it is good to put the +** most frequently used conversion types first. +*/ +static const char aDigits[] = "0123456789ABCDEF0123456789abcdef"; +static const char aPrefix[] = "-x0\000X0"; +static const et_info fmtinfo[] = { + { 'd', 10, 1, etRADIX, 0, 0 }, + { 's', 0, 4, etSTRING, 0, 0 }, + { 'g', 0, 1, etGENERIC, 30, 0 }, + { 'z', 0, 4, etDYNSTRING, 0, 0 }, + { 'q', 0, 4, etSQLESCAPE, 0, 0 }, + { 'Q', 0, 4, etSQLESCAPE2, 0, 0 }, + { 'w', 0, 4, etSQLESCAPE3, 0, 0 }, + { 'c', 0, 0, etCHARX, 0, 0 }, + { 'o', 8, 0, etRADIX, 0, 2 }, + { 'u', 10, 0, etRADIX, 0, 0 }, + { 'x', 16, 0, etRADIX, 16, 1 }, + { 'X', 16, 0, etRADIX, 0, 4 }, +#ifndef SQLITE_OMIT_FLOATING_POINT + { 'f', 0, 1, etFLOAT, 0, 0 }, + { 'e', 0, 1, etEXP, 30, 0 }, + { 'E', 0, 1, etEXP, 14, 0 }, + { 'G', 0, 1, etGENERIC, 14, 0 }, +#endif + { 'i', 10, 1, etRADIX, 0, 0 }, + { 'n', 0, 0, etSIZE, 0, 0 }, + { '%', 0, 0, etPERCENT, 0, 0 }, + { 'p', 16, 0, etPOINTER, 0, 1 }, + + /* All the rest are undocumented and are for internal use only */ + { 'T', 0, 0, etTOKEN, 0, 0 }, + { 'S', 0, 0, etSRCLIST, 0, 0 }, + { 'r', 10, 1, etORDINAL, 0, 0 }, +}; + +/* +** If SQLITE_OMIT_FLOATING_POINT is defined, then none of the floating point +** conversions will work. +*/ +#ifndef SQLITE_OMIT_FLOATING_POINT +/* +** "*val" is a double such that 0.1 <= *val < 10.0 +** Return the ascii code for the leading digit of *val, then +** multiply "*val" by 10.0 to renormalize. +** +** Example: +** input: *val = 3.14159 +** output: *val = 1.4159 function return = '3' +** +** The counter *cnt is incremented each time. After counter exceeds +** 16 (the number of significant digits in a 64-bit float) '0' is +** always returned. +*/ +static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){ + int digit; + LONGDOUBLE_TYPE d; + if( (*cnt)<=0 ) return '0'; + (*cnt)--; + digit = (int)*val; + d = digit; + digit += '0'; + *val = (*val - d)*10.0; + return (char)digit; +} +#endif /* SQLITE_OMIT_FLOATING_POINT */ + +/* +** Set the StrAccum object to an error mode. +*/ +static void setStrAccumError(StrAccum *p, u8 eError){ + assert( eError==STRACCUM_NOMEM || eError==STRACCUM_TOOBIG ); + p->accError = eError; + p->nAlloc = 0; +} + +/* +** Extra argument values from a PrintfArguments object +*/ +static sqlite3_int64 getIntArg(PrintfArguments *p){ + if( p->nArg<=p->nUsed ) return 0; + return sqlite3_value_int64(p->apArg[p->nUsed++]); +} +static double getDoubleArg(PrintfArguments *p){ + if( p->nArg<=p->nUsed ) return 0.0; + return sqlite3_value_double(p->apArg[p->nUsed++]); +} +static char *getTextArg(PrintfArguments *p){ + if( p->nArg<=p->nUsed ) return 0; + return (char*)sqlite3_value_text(p->apArg[p->nUsed++]); +} + + +/* +** On machines with a small stack size, you can redefine the +** SQLITE_PRINT_BUF_SIZE to be something smaller, if desired. +*/ +#ifndef SQLITE_PRINT_BUF_SIZE +# define SQLITE_PRINT_BUF_SIZE 70 +#endif +#define etBUFSIZE SQLITE_PRINT_BUF_SIZE /* Size of the output buffer */ + +/* +** Render a string given by "fmt" into the StrAccum object. +*/ +SQLITE_PRIVATE void sqlite3VXPrintf( + StrAccum *pAccum, /* Accumulate results here */ + const char *fmt, /* Format string */ + va_list ap /* arguments */ +){ + int c; /* Next character in the format string */ + char *bufpt; /* Pointer to the conversion buffer */ + int precision; /* Precision of the current field */ + int length; /* Length of the field */ + int idx; /* A general purpose loop counter */ + int width; /* Width of the current field */ + etByte flag_leftjustify; /* True if "-" flag is present */ + etByte flag_plussign; /* True if "+" flag is present */ + etByte flag_blanksign; /* True if " " flag is present */ + etByte flag_alternateform; /* True if "#" flag is present */ + etByte flag_altform2; /* True if "!" flag is present */ + etByte flag_zeropad; /* True if field width constant starts with zero */ + etByte flag_long; /* True if "l" flag is present */ + etByte flag_longlong; /* True if the "ll" flag is present */ + etByte done; /* Loop termination flag */ + etByte xtype = etINVALID; /* Conversion paradigm */ + u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */ + char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */ + sqlite_uint64 longvalue; /* Value for integer types */ + LONGDOUBLE_TYPE realvalue; /* Value for real types */ + const et_info *infop; /* Pointer to the appropriate info structure */ + char *zOut; /* Rendering buffer */ + int nOut; /* Size of the rendering buffer */ + char *zExtra = 0; /* Malloced memory used by some conversion */ +#ifndef SQLITE_OMIT_FLOATING_POINT + int exp, e2; /* exponent of real numbers */ + int nsd; /* Number of significant digits returned */ + double rounder; /* Used for rounding floating point values */ + etByte flag_dp; /* True if decimal point should be shown */ + etByte flag_rtz; /* True if trailing zeros should be removed */ +#endif + PrintfArguments *pArgList = 0; /* Arguments for SQLITE_PRINTF_SQLFUNC */ + char buf[etBUFSIZE]; /* Conversion buffer */ + + bufpt = 0; + if( (pAccum->printfFlags & SQLITE_PRINTF_SQLFUNC)!=0 ){ + pArgList = va_arg(ap, PrintfArguments*); + bArgList = 1; + }else{ + bArgList = 0; + } + for(; (c=(*fmt))!=0; ++fmt){ + if( c!='%' ){ + bufpt = (char *)fmt; +#if HAVE_STRCHRNUL + fmt = strchrnul(fmt, '%'); +#else + do{ fmt++; }while( *fmt && *fmt != '%' ); +#endif + sqlite3StrAccumAppend(pAccum, bufpt, (int)(fmt - bufpt)); + if( *fmt==0 ) break; + } + if( (c=(*++fmt))==0 ){ + sqlite3StrAccumAppend(pAccum, "%", 1); + break; + } + /* Find out what flags are present */ + flag_leftjustify = flag_plussign = flag_blanksign = + flag_alternateform = flag_altform2 = flag_zeropad = 0; + done = 0; + do{ + switch( c ){ + case '-': flag_leftjustify = 1; break; + case '+': flag_plussign = 1; break; + case ' ': flag_blanksign = 1; break; + case '#': flag_alternateform = 1; break; + case '!': flag_altform2 = 1; break; + case '0': flag_zeropad = 1; break; + default: done = 1; break; + } + }while( !done && (c=(*++fmt))!=0 ); + /* Get the field width */ + if( c=='*' ){ + if( bArgList ){ + width = (int)getIntArg(pArgList); + }else{ + width = va_arg(ap,int); + } + if( width<0 ){ + flag_leftjustify = 1; + width = width >= -2147483647 ? -width : 0; + } + c = *++fmt; + }else{ + unsigned wx = 0; + while( c>='0' && c<='9' ){ + wx = wx*10 + c - '0'; + c = *++fmt; + } + testcase( wx>0x7fffffff ); + width = wx & 0x7fffffff; + } + assert( width>=0 ); +#ifdef SQLITE_PRINTF_PRECISION_LIMIT + if( width>SQLITE_PRINTF_PRECISION_LIMIT ){ + width = SQLITE_PRINTF_PRECISION_LIMIT; + } +#endif + + /* Get the precision */ + if( c=='.' ){ + c = *++fmt; + if( c=='*' ){ + if( bArgList ){ + precision = (int)getIntArg(pArgList); + }else{ + precision = va_arg(ap,int); + } + c = *++fmt; + if( precision<0 ){ + precision = precision >= -2147483647 ? -precision : -1; + } + }else{ + unsigned px = 0; + while( c>='0' && c<='9' ){ + px = px*10 + c - '0'; + c = *++fmt; + } + testcase( px>0x7fffffff ); + precision = px & 0x7fffffff; + } + }else{ + precision = -1; + } + assert( precision>=(-1) ); +#ifdef SQLITE_PRINTF_PRECISION_LIMIT + if( precision>SQLITE_PRINTF_PRECISION_LIMIT ){ + precision = SQLITE_PRINTF_PRECISION_LIMIT; + } +#endif + + + /* Get the conversion type modifier */ + if( c=='l' ){ + flag_long = 1; + c = *++fmt; + if( c=='l' ){ + flag_longlong = 1; + c = *++fmt; + }else{ + flag_longlong = 0; + } + }else{ + flag_long = flag_longlong = 0; + } + /* Fetch the info entry for the field */ + infop = &fmtinfo[0]; + xtype = etINVALID; + for(idx=0; idxtype; + break; + } + } + + /* + ** At this point, variables are initialized as follows: + ** + ** flag_alternateform TRUE if a '#' is present. + ** flag_altform2 TRUE if a '!' is present. + ** flag_plussign TRUE if a '+' is present. + ** flag_leftjustify TRUE if a '-' is present or if the + ** field width was negative. + ** flag_zeropad TRUE if the width began with 0. + ** flag_long TRUE if the letter 'l' (ell) prefixed + ** the conversion character. + ** flag_longlong TRUE if the letter 'll' (ell ell) prefixed + ** the conversion character. + ** flag_blanksign TRUE if a ' ' is present. + ** width The specified field width. This is + ** always non-negative. Zero is the default. + ** precision The specified precision. The default + ** is -1. + ** xtype The class of the conversion. + ** infop Pointer to the appropriate info struct. + */ + switch( xtype ){ + case etPOINTER: + flag_longlong = sizeof(char*)==sizeof(i64); + flag_long = sizeof(char*)==sizeof(long int); + /* Fall through into the next case */ + case etORDINAL: + case etRADIX: + if( infop->flags & FLAG_SIGNED ){ + i64 v; + if( bArgList ){ + v = getIntArg(pArgList); + }else if( flag_longlong ){ + v = va_arg(ap,i64); + }else if( flag_long ){ + v = va_arg(ap,long int); + }else{ + v = va_arg(ap,int); + } + if( v<0 ){ + if( v==SMALLEST_INT64 ){ + longvalue = ((u64)1)<<63; + }else{ + longvalue = -v; + } + prefix = '-'; + }else{ + longvalue = v; + if( flag_plussign ) prefix = '+'; + else if( flag_blanksign ) prefix = ' '; + else prefix = 0; + } + }else{ + if( bArgList ){ + longvalue = (u64)getIntArg(pArgList); + }else if( flag_longlong ){ + longvalue = va_arg(ap,u64); + }else if( flag_long ){ + longvalue = va_arg(ap,unsigned long int); + }else{ + longvalue = va_arg(ap,unsigned int); + } + prefix = 0; + } + if( longvalue==0 ) flag_alternateform = 0; + if( flag_zeropad && precision=4 || (longvalue/10)%10==1 ){ + x = 0; + } + *(--bufpt) = zOrd[x*2+1]; + *(--bufpt) = zOrd[x*2]; + } + { + const char *cset = &aDigits[infop->charset]; + u8 base = infop->base; + do{ /* Convert to ascii */ + *(--bufpt) = cset[longvalue%base]; + longvalue = longvalue/base; + }while( longvalue>0 ); + } + length = (int)(&zOut[nOut-1]-bufpt); + for(idx=precision-length; idx>0; idx--){ + *(--bufpt) = '0'; /* Zero pad */ + } + if( prefix ) *(--bufpt) = prefix; /* Add sign */ + if( flag_alternateform && infop->prefix ){ /* Add "0" or "0x" */ + const char *pre; + char x; + pre = &aPrefix[infop->prefix]; + for(; (x=(*pre))!=0; pre++) *(--bufpt) = x; + } + length = (int)(&zOut[nOut-1]-bufpt); + break; + case etFLOAT: + case etEXP: + case etGENERIC: + if( bArgList ){ + realvalue = getDoubleArg(pArgList); + }else{ + realvalue = va_arg(ap,double); + } +#ifdef SQLITE_OMIT_FLOATING_POINT + length = 0; +#else + if( precision<0 ) precision = 6; /* Set default precision */ + if( realvalue<0.0 ){ + realvalue = -realvalue; + prefix = '-'; + }else{ + if( flag_plussign ) prefix = '+'; + else if( flag_blanksign ) prefix = ' '; + else prefix = 0; + } + if( xtype==etGENERIC && precision>0 ) precision--; + testcase( precision>0xfff ); + for(idx=precision&0xfff, rounder=0.5; idx>0; idx--, rounder*=0.1){} + if( xtype==etFLOAT ) realvalue += rounder; + /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */ + exp = 0; + if( sqlite3IsNaN((double)realvalue) ){ + bufpt = "NaN"; + length = 3; + break; + } + if( realvalue>0.0 ){ + LONGDOUBLE_TYPE scale = 1.0; + while( realvalue>=1e100*scale && exp<=350 ){ scale *= 1e100;exp+=100;} + while( realvalue>=1e10*scale && exp<=350 ){ scale *= 1e10; exp+=10; } + while( realvalue>=10.0*scale && exp<=350 ){ scale *= 10.0; exp++; } + realvalue /= scale; + while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; } + while( realvalue<1.0 ){ realvalue *= 10.0; exp--; } + if( exp>350 ){ + bufpt = buf; + buf[0] = prefix; + memcpy(buf+(prefix!=0),"Inf",4); + length = 3+(prefix!=0); + break; + } + } + bufpt = buf; + /* + ** If the field type is etGENERIC, then convert to either etEXP + ** or etFLOAT, as appropriate. + */ + if( xtype!=etFLOAT ){ + realvalue += rounder; + if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; } + } + if( xtype==etGENERIC ){ + flag_rtz = !flag_alternateform; + if( exp<-4 || exp>precision ){ + xtype = etEXP; + }else{ + precision = precision - exp; + xtype = etFLOAT; + } + }else{ + flag_rtz = flag_altform2; + } + if( xtype==etEXP ){ + e2 = 0; + }else{ + e2 = exp; + } + if( MAX(e2,0)+(i64)precision+(i64)width > etBUFSIZE - 15 ){ + bufpt = zExtra + = sqlite3Malloc( MAX(e2,0)+(i64)precision+(i64)width+15 ); + if( bufpt==0 ){ + setStrAccumError(pAccum, STRACCUM_NOMEM); + return; + } + } + zOut = bufpt; + nsd = 16 + flag_altform2*10; + flag_dp = (precision>0 ?1:0) | flag_alternateform | flag_altform2; + /* The sign in front of the number */ + if( prefix ){ + *(bufpt++) = prefix; + } + /* Digits prior to the decimal point */ + if( e2<0 ){ + *(bufpt++) = '0'; + }else{ + for(; e2>=0; e2--){ + *(bufpt++) = et_getdigit(&realvalue,&nsd); + } + } + /* The decimal point */ + if( flag_dp ){ + *(bufpt++) = '.'; + } + /* "0" digits after the decimal point but before the first + ** significant digit of the number */ + for(e2++; e2<0; precision--, e2++){ + assert( precision>0 ); + *(bufpt++) = '0'; + } + /* Significant digits after the decimal point */ + while( (precision--)>0 ){ + *(bufpt++) = et_getdigit(&realvalue,&nsd); + } + /* Remove trailing zeros and the "." if no digits follow the "." */ + if( flag_rtz && flag_dp ){ + while( bufpt[-1]=='0' ) *(--bufpt) = 0; + assert( bufpt>zOut ); + if( bufpt[-1]=='.' ){ + if( flag_altform2 ){ + *(bufpt++) = '0'; + }else{ + *(--bufpt) = 0; + } + } + } + /* Add the "eNNN" suffix */ + if( xtype==etEXP ){ + *(bufpt++) = aDigits[infop->charset]; + if( exp<0 ){ + *(bufpt++) = '-'; exp = -exp; + }else{ + *(bufpt++) = '+'; + } + if( exp>=100 ){ + *(bufpt++) = (char)((exp/100)+'0'); /* 100's digit */ + exp %= 100; + } + *(bufpt++) = (char)(exp/10+'0'); /* 10's digit */ + *(bufpt++) = (char)(exp%10+'0'); /* 1's digit */ + } + *bufpt = 0; + + /* The converted number is in buf[] and zero terminated. Output it. + ** Note that the number is in the usual order, not reversed as with + ** integer conversions. */ + length = (int)(bufpt-zOut); + bufpt = zOut; + + /* Special case: Add leading zeros if the flag_zeropad flag is + ** set and we are not left justified */ + if( flag_zeropad && !flag_leftjustify && length < width){ + int i; + int nPad = width - length; + for(i=width; i>=nPad; i--){ + bufpt[i] = bufpt[i-nPad]; + } + i = prefix!=0; + while( nPad-- ) bufpt[i++] = '0'; + length = width; + } +#endif /* !defined(SQLITE_OMIT_FLOATING_POINT) */ + break; + case etSIZE: + if( !bArgList ){ + *(va_arg(ap,int*)) = pAccum->nChar; + } + length = width = 0; + break; + case etPERCENT: + buf[0] = '%'; + bufpt = buf; + length = 1; + break; + case etCHARX: + if( bArgList ){ + bufpt = getTextArg(pArgList); + c = bufpt ? bufpt[0] : 0; + }else{ + c = va_arg(ap,int); + } + if( precision>1 ){ + width -= precision-1; + if( width>1 && !flag_leftjustify ){ + sqlite3AppendChar(pAccum, width-1, ' '); + width = 0; + } + sqlite3AppendChar(pAccum, precision-1, c); + } + length = 1; + buf[0] = c; + bufpt = buf; + break; + case etSTRING: + case etDYNSTRING: + if( bArgList ){ + bufpt = getTextArg(pArgList); + xtype = etSTRING; + }else{ + bufpt = va_arg(ap,char*); + } + if( bufpt==0 ){ + bufpt = ""; + }else if( xtype==etDYNSTRING ){ + zExtra = bufpt; + } + if( precision>=0 ){ + for(length=0; lengthetBUFSIZE ){ + bufpt = zExtra = sqlite3Malloc( n ); + if( bufpt==0 ){ + setStrAccumError(pAccum, STRACCUM_NOMEM); + return; + } + }else{ + bufpt = buf; + } + j = 0; + if( needQuote ) bufpt[j++] = q; + k = i; + for(i=0; i=0 && precisionprintfFlags & SQLITE_PRINTF_INTERNAL)==0 ) return; + pToken = va_arg(ap, Token*); + assert( bArgList==0 ); + if( pToken && pToken->n ){ + sqlite3StrAccumAppend(pAccum, (const char*)pToken->z, pToken->n); + } + length = width = 0; + break; + } + case etSRCLIST: { + SrcList *pSrc; + int k; + struct SrcList_item *pItem; + if( (pAccum->printfFlags & SQLITE_PRINTF_INTERNAL)==0 ) return; + pSrc = va_arg(ap, SrcList*); + k = va_arg(ap, int); + pItem = &pSrc->a[k]; + assert( bArgList==0 ); + assert( k>=0 && knSrc ); + if( pItem->zDatabase ){ + sqlite3StrAccumAppendAll(pAccum, pItem->zDatabase); + sqlite3StrAccumAppend(pAccum, ".", 1); + } + sqlite3StrAccumAppendAll(pAccum, pItem->zName); + length = width = 0; + break; + } + default: { + assert( xtype==etINVALID ); + return; + } + }/* End switch over the format type */ + /* + ** The text of the conversion is pointed to by "bufpt" and is + ** "length" characters long. The field width is "width". Do + ** the output. + */ + width -= length; + if( width>0 ){ + if( !flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' '); + sqlite3StrAccumAppend(pAccum, bufpt, length); + if( flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' '); + }else{ + sqlite3StrAccumAppend(pAccum, bufpt, length); + } + + if( zExtra ){ + sqlite3DbFree(pAccum->db, zExtra); + zExtra = 0; + } + }/* End for loop over the format string */ +} /* End of function */ + +/* +** Enlarge the memory allocation on a StrAccum object so that it is +** able to accept at least N more bytes of text. +** +** Return the number of bytes of text that StrAccum is able to accept +** after the attempted enlargement. The value returned might be zero. +*/ +static int sqlite3StrAccumEnlarge(StrAccum *p, int N){ + char *zNew; + assert( p->nChar+(i64)N >= p->nAlloc ); /* Only called if really needed */ + if( p->accError ){ + testcase(p->accError==STRACCUM_TOOBIG); + testcase(p->accError==STRACCUM_NOMEM); + return 0; + } + if( p->mxAlloc==0 ){ + N = p->nAlloc - p->nChar - 1; + setStrAccumError(p, STRACCUM_TOOBIG); + return N; + }else{ + char *zOld = isMalloced(p) ? p->zText : 0; + i64 szNew = p->nChar; + assert( (p->zText==0 || p->zText==p->zBase)==!isMalloced(p) ); + szNew += N + 1; + if( szNew+p->nChar<=p->mxAlloc ){ + /* Force exponential buffer size growth as long as it does not overflow, + ** to avoid having to call this routine too often */ + szNew += p->nChar; + } + if( szNew > p->mxAlloc ){ + sqlite3StrAccumReset(p); + setStrAccumError(p, STRACCUM_TOOBIG); + return 0; + }else{ + p->nAlloc = (int)szNew; + } + if( p->db ){ + zNew = sqlite3DbRealloc(p->db, zOld, p->nAlloc); + }else{ + zNew = sqlite3_realloc64(zOld, p->nAlloc); + } + if( zNew ){ + assert( p->zText!=0 || p->nChar==0 ); + if( !isMalloced(p) && p->nChar>0 ) memcpy(zNew, p->zText, p->nChar); + p->zText = zNew; + p->nAlloc = sqlite3DbMallocSize(p->db, zNew); + p->printfFlags |= SQLITE_PRINTF_MALLOCED; + }else{ + sqlite3StrAccumReset(p); + setStrAccumError(p, STRACCUM_NOMEM); + return 0; + } + } + return N; +} + +/* +** Append N copies of character c to the given string buffer. +*/ +SQLITE_PRIVATE void sqlite3AppendChar(StrAccum *p, int N, char c){ + testcase( p->nChar + (i64)N > 0x7fffffff ); + if( p->nChar+(i64)N >= p->nAlloc && (N = sqlite3StrAccumEnlarge(p, N))<=0 ){ + return; + } + assert( (p->zText==p->zBase)==!isMalloced(p) ); + while( (N--)>0 ) p->zText[p->nChar++] = c; +} + +/* +** The StrAccum "p" is not large enough to accept N new bytes of z[]. +** So enlarge if first, then do the append. +** +** This is a helper routine to sqlite3StrAccumAppend() that does special-case +** work (enlarging the buffer) using tail recursion, so that the +** sqlite3StrAccumAppend() routine can use fast calling semantics. +*/ +static void SQLITE_NOINLINE enlargeAndAppend(StrAccum *p, const char *z, int N){ + N = sqlite3StrAccumEnlarge(p, N); + if( N>0 ){ + memcpy(&p->zText[p->nChar], z, N); + p->nChar += N; + } + assert( (p->zText==0 || p->zText==p->zBase)==!isMalloced(p) ); +} + +/* +** Append N bytes of text from z to the StrAccum object. Increase the +** size of the memory allocation for StrAccum if necessary. +*/ +SQLITE_PRIVATE void sqlite3StrAccumAppend(StrAccum *p, const char *z, int N){ + assert( z!=0 || N==0 ); + assert( p->zText!=0 || p->nChar==0 || p->accError ); + assert( N>=0 ); + assert( p->accError==0 || p->nAlloc==0 ); + if( p->nChar+N >= p->nAlloc ){ + enlargeAndAppend(p,z,N); + }else if( N ){ + assert( p->zText ); + p->nChar += N; + memcpy(&p->zText[p->nChar-N], z, N); + } +} + +/* +** Append the complete text of zero-terminated string z[] to the p string. +*/ +SQLITE_PRIVATE void sqlite3StrAccumAppendAll(StrAccum *p, const char *z){ + sqlite3StrAccumAppend(p, z, sqlite3Strlen30(z)); +} + + +/* +** Finish off a string by making sure it is zero-terminated. +** Return a pointer to the resulting string. Return a NULL +** pointer if any kind of error was encountered. +*/ +static SQLITE_NOINLINE char *strAccumFinishRealloc(StrAccum *p){ + assert( p->mxAlloc>0 && !isMalloced(p) ); + p->zText = sqlite3DbMallocRaw(p->db, p->nChar+1 ); + if( p->zText ){ + memcpy(p->zText, p->zBase, p->nChar+1); + p->printfFlags |= SQLITE_PRINTF_MALLOCED; + }else{ + setStrAccumError(p, STRACCUM_NOMEM); + } + return p->zText; +} +SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum *p){ + if( p->zText ){ + assert( (p->zText==p->zBase)==!isMalloced(p) ); + p->zText[p->nChar] = 0; + if( p->mxAlloc>0 && !isMalloced(p) ){ + return strAccumFinishRealloc(p); + } + } + return p->zText; +} + +/* +** Reset an StrAccum string. Reclaim all malloced memory. +*/ +SQLITE_PRIVATE void sqlite3StrAccumReset(StrAccum *p){ + assert( (p->zText==0 || p->zText==p->zBase)==!isMalloced(p) ); + if( isMalloced(p) ){ + sqlite3DbFree(p->db, p->zText); + p->printfFlags &= ~SQLITE_PRINTF_MALLOCED; + } + p->zText = 0; +} + +/* +** Initialize a string accumulator. +** +** p: The accumulator to be initialized. +** db: Pointer to a database connection. May be NULL. Lookaside +** memory is used if not NULL. db->mallocFailed is set appropriately +** when not NULL. +** zBase: An initial buffer. May be NULL in which case the initial buffer +** is malloced. +** n: Size of zBase in bytes. If total space requirements never exceed +** n then no memory allocations ever occur. +** mx: Maximum number of bytes to accumulate. If mx==0 then no memory +** allocations will ever occur. +*/ +SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum *p, sqlite3 *db, char *zBase, int n, int mx){ + p->zText = p->zBase = zBase; + p->db = db; + p->nChar = 0; + p->nAlloc = n; + p->mxAlloc = mx; + p->accError = 0; + p->printfFlags = 0; +} + +/* +** Print into memory obtained from sqliteMalloc(). Use the internal +** %-conversion extensions. +*/ +SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3 *db, const char *zFormat, va_list ap){ + char *z; + char zBase[SQLITE_PRINT_BUF_SIZE]; + StrAccum acc; + assert( db!=0 ); + sqlite3StrAccumInit(&acc, db, zBase, sizeof(zBase), + db->aLimit[SQLITE_LIMIT_LENGTH]); + acc.printfFlags = SQLITE_PRINTF_INTERNAL; + sqlite3VXPrintf(&acc, zFormat, ap); + z = sqlite3StrAccumFinish(&acc); + if( acc.accError==STRACCUM_NOMEM ){ + sqlite3OomFault(db); + } + return z; +} + +/* +** Print into memory obtained from sqliteMalloc(). Use the internal +** %-conversion extensions. +*/ +SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3 *db, const char *zFormat, ...){ + va_list ap; + char *z; + va_start(ap, zFormat); + z = sqlite3VMPrintf(db, zFormat, ap); + va_end(ap); + return z; +} + +/* +** Print into memory obtained from sqlite3_malloc(). Omit the internal +** %-conversion extensions. +*/ +SQLITE_API char *sqlite3_vmprintf(const char *zFormat, va_list ap){ + char *z; + char zBase[SQLITE_PRINT_BUF_SIZE]; + StrAccum acc; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( zFormat==0 ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + sqlite3StrAccumInit(&acc, 0, zBase, sizeof(zBase), SQLITE_MAX_LENGTH); + sqlite3VXPrintf(&acc, zFormat, ap); + z = sqlite3StrAccumFinish(&acc); + return z; +} + +/* +** Print into memory obtained from sqlite3_malloc()(). Omit the internal +** %-conversion extensions. +*/ +SQLITE_API char *sqlite3_mprintf(const char *zFormat, ...){ + va_list ap; + char *z; +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + va_start(ap, zFormat); + z = sqlite3_vmprintf(zFormat, ap); + va_end(ap); + return z; +} + +/* +** sqlite3_snprintf() works like snprintf() except that it ignores the +** current locale settings. This is important for SQLite because we +** are not able to use a "," as the decimal point in place of "." as +** specified by some locales. +** +** Oops: The first two arguments of sqlite3_snprintf() are backwards +** from the snprintf() standard. Unfortunately, it is too late to change +** this without breaking compatibility, so we just have to live with the +** mistake. +** +** sqlite3_vsnprintf() is the varargs version. +*/ +SQLITE_API char *sqlite3_vsnprintf(int n, char *zBuf, const char *zFormat, va_list ap){ + StrAccum acc; + if( n<=0 ) return zBuf; +#ifdef SQLITE_ENABLE_API_ARMOR + if( zBuf==0 || zFormat==0 ) { + (void)SQLITE_MISUSE_BKPT; + if( zBuf ) zBuf[0] = 0; + return zBuf; + } +#endif + sqlite3StrAccumInit(&acc, 0, zBuf, n, 0); + sqlite3VXPrintf(&acc, zFormat, ap); + zBuf[acc.nChar] = 0; + return zBuf; +} +SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){ + char *z; + va_list ap; + va_start(ap,zFormat); + z = sqlite3_vsnprintf(n, zBuf, zFormat, ap); + va_end(ap); + return z; +} + +/* +** This is the routine that actually formats the sqlite3_log() message. +** We house it in a separate routine from sqlite3_log() to avoid using +** stack space on small-stack systems when logging is disabled. +** +** sqlite3_log() must render into a static buffer. It cannot dynamically +** allocate memory because it might be called while the memory allocator +** mutex is held. +** +** sqlite3VXPrintf() might ask for *temporary* memory allocations for +** certain format characters (%q) or for very large precisions or widths. +** Care must be taken that any sqlite3_log() calls that occur while the +** memory mutex is held do not use these mechanisms. +*/ +static void renderLogMsg(int iErrCode, const char *zFormat, va_list ap){ + StrAccum acc; /* String accumulator */ + char zMsg[SQLITE_PRINT_BUF_SIZE*3]; /* Complete log message */ + + sqlite3StrAccumInit(&acc, 0, zMsg, sizeof(zMsg), 0); + sqlite3VXPrintf(&acc, zFormat, ap); + sqlite3GlobalConfig.xLog(sqlite3GlobalConfig.pLogArg, iErrCode, + sqlite3StrAccumFinish(&acc)); +} + +/* +** Format and write a message to the log if logging is enabled. +*/ +SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...){ + va_list ap; /* Vararg list */ + if( sqlite3GlobalConfig.xLog ){ + va_start(ap, zFormat); + renderLogMsg(iErrCode, zFormat, ap); + va_end(ap); + } +} + +#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) +/* +** A version of printf() that understands %lld. Used for debugging. +** The printf() built into some versions of windows does not understand %lld +** and segfaults if you give it a long long int. +*/ +SQLITE_PRIVATE void sqlite3DebugPrintf(const char *zFormat, ...){ + va_list ap; + StrAccum acc; + char zBuf[500]; + sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0); + va_start(ap,zFormat); + sqlite3VXPrintf(&acc, zFormat, ap); + va_end(ap); + sqlite3StrAccumFinish(&acc); + fprintf(stdout,"%s", zBuf); + fflush(stdout); +} +#endif + + +/* +** variable-argument wrapper around sqlite3VXPrintf(). The bFlags argument +** can contain the bit SQLITE_PRINTF_INTERNAL enable internal formats. +*/ +SQLITE_PRIVATE void sqlite3XPrintf(StrAccum *p, const char *zFormat, ...){ + va_list ap; + va_start(ap,zFormat); + sqlite3VXPrintf(p, zFormat, ap); + va_end(ap); +} + +/************** End of printf.c **********************************************/ +/************** Begin file treeview.c ****************************************/ +/* +** 2015-06-08 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains C code to implement the TreeView debugging routines. +** These routines print a parse tree to standard output for debugging and +** analysis. +** +** The interfaces in this file is only available when compiling +** with SQLITE_DEBUG. +*/ +/* #include "sqliteInt.h" */ +#ifdef SQLITE_DEBUG + +/* +** Add a new subitem to the tree. The moreToFollow flag indicates that this +** is not the last item in the tree. +*/ +static TreeView *sqlite3TreeViewPush(TreeView *p, u8 moreToFollow){ + if( p==0 ){ + p = sqlite3_malloc64( sizeof(*p) ); + if( p==0 ) return 0; + memset(p, 0, sizeof(*p)); + }else{ + p->iLevel++; + } + assert( moreToFollow==0 || moreToFollow==1 ); + if( p->iLevelbLine) ) p->bLine[p->iLevel] = moreToFollow; + return p; +} + +/* +** Finished with one layer of the tree +*/ +static void sqlite3TreeViewPop(TreeView *p){ + if( p==0 ) return; + p->iLevel--; + if( p->iLevel<0 ) sqlite3_free(p); +} + +/* +** Generate a single line of output for the tree, with a prefix that contains +** all the appropriate tree lines +*/ +static void sqlite3TreeViewLine(TreeView *p, const char *zFormat, ...){ + va_list ap; + int i; + StrAccum acc; + char zBuf[500]; + sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0); + if( p ){ + for(i=0; iiLevel && ibLine)-1; i++){ + sqlite3StrAccumAppend(&acc, p->bLine[i] ? "| " : " ", 4); + } + sqlite3StrAccumAppend(&acc, p->bLine[i] ? "|-- " : "'-- ", 4); + } + va_start(ap, zFormat); + sqlite3VXPrintf(&acc, zFormat, ap); + va_end(ap); + assert( acc.nChar>0 ); + if( zBuf[acc.nChar-1]!='\n' ) sqlite3StrAccumAppend(&acc, "\n", 1); + sqlite3StrAccumFinish(&acc); + fprintf(stdout,"%s", zBuf); + fflush(stdout); +} + +/* +** Shorthand for starting a new tree item that consists of a single label +*/ +static void sqlite3TreeViewItem(TreeView *p, const char *zLabel,u8 moreFollows){ + p = sqlite3TreeViewPush(p, moreFollows); + sqlite3TreeViewLine(p, "%s", zLabel); +} + +/* +** Generate a human-readable description of a WITH clause. +*/ +SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView *pView, const With *pWith, u8 moreToFollow){ + int i; + if( pWith==0 ) return; + if( pWith->nCte==0 ) return; + if( pWith->pOuter ){ + sqlite3TreeViewLine(pView, "WITH (0x%p, pOuter=0x%p)",pWith,pWith->pOuter); + }else{ + sqlite3TreeViewLine(pView, "WITH (0x%p)", pWith); + } + if( pWith->nCte>0 ){ + pView = sqlite3TreeViewPush(pView, 1); + for(i=0; inCte; i++){ + StrAccum x; + char zLine[1000]; + const struct Cte *pCte = &pWith->a[i]; + sqlite3StrAccumInit(&x, 0, zLine, sizeof(zLine), 0); + sqlite3XPrintf(&x, "%s", pCte->zName); + if( pCte->pCols && pCte->pCols->nExpr>0 ){ + char cSep = '('; + int j; + for(j=0; jpCols->nExpr; j++){ + sqlite3XPrintf(&x, "%c%s", cSep, pCte->pCols->a[j].zName); + cSep = ','; + } + sqlite3XPrintf(&x, ")"); + } + sqlite3XPrintf(&x, " AS"); + sqlite3StrAccumFinish(&x); + sqlite3TreeViewItem(pView, zLine, inCte-1); + sqlite3TreeViewSelect(pView, pCte->pSelect, 0); + sqlite3TreeViewPop(pView); + } + sqlite3TreeViewPop(pView); + } +} + + +/* +** Generate a human-readable description of a Select object. +*/ +SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 moreToFollow){ + int n = 0; + int cnt = 0; + pView = sqlite3TreeViewPush(pView, moreToFollow); + if( p->pWith ){ + sqlite3TreeViewWith(pView, p->pWith, 1); + cnt = 1; + sqlite3TreeViewPush(pView, 1); + } + do{ + sqlite3TreeViewLine(pView, "SELECT%s%s (0x%p) selFlags=0x%x nSelectRow=%d", + ((p->selFlags & SF_Distinct) ? " DISTINCT" : ""), + ((p->selFlags & SF_Aggregate) ? " agg_flag" : ""), p, p->selFlags, + (int)p->nSelectRow + ); + if( cnt++ ) sqlite3TreeViewPop(pView); + if( p->pPrior ){ + n = 1000; + }else{ + n = 0; + if( p->pSrc && p->pSrc->nSrc ) n++; + if( p->pWhere ) n++; + if( p->pGroupBy ) n++; + if( p->pHaving ) n++; + if( p->pOrderBy ) n++; + if( p->pLimit ) n++; + if( p->pOffset ) n++; + } + sqlite3TreeViewExprList(pView, p->pEList, (n--)>0, "result-set"); + if( p->pSrc && p->pSrc->nSrc ){ + int i; + pView = sqlite3TreeViewPush(pView, (n--)>0); + sqlite3TreeViewLine(pView, "FROM"); + for(i=0; ipSrc->nSrc; i++){ + struct SrcList_item *pItem = &p->pSrc->a[i]; + StrAccum x; + char zLine[100]; + sqlite3StrAccumInit(&x, 0, zLine, sizeof(zLine), 0); + sqlite3XPrintf(&x, "{%d,*}", pItem->iCursor); + if( pItem->zDatabase ){ + sqlite3XPrintf(&x, " %s.%s", pItem->zDatabase, pItem->zName); + }else if( pItem->zName ){ + sqlite3XPrintf(&x, " %s", pItem->zName); + } + if( pItem->pTab ){ + sqlite3XPrintf(&x, " tabname=%Q", pItem->pTab->zName); + } + if( pItem->zAlias ){ + sqlite3XPrintf(&x, " (AS %s)", pItem->zAlias); + } + if( pItem->fg.jointype & JT_LEFT ){ + sqlite3XPrintf(&x, " LEFT-JOIN"); + } + sqlite3StrAccumFinish(&x); + sqlite3TreeViewItem(pView, zLine, ipSrc->nSrc-1); + if( pItem->pSelect ){ + sqlite3TreeViewSelect(pView, pItem->pSelect, 0); + } + if( pItem->fg.isTabFunc ){ + sqlite3TreeViewExprList(pView, pItem->u1.pFuncArg, 0, "func-args:"); + } + sqlite3TreeViewPop(pView); + } + sqlite3TreeViewPop(pView); + } + if( p->pWhere ){ + sqlite3TreeViewItem(pView, "WHERE", (n--)>0); + sqlite3TreeViewExpr(pView, p->pWhere, 0); + sqlite3TreeViewPop(pView); + } + if( p->pGroupBy ){ + sqlite3TreeViewExprList(pView, p->pGroupBy, (n--)>0, "GROUPBY"); + } + if( p->pHaving ){ + sqlite3TreeViewItem(pView, "HAVING", (n--)>0); + sqlite3TreeViewExpr(pView, p->pHaving, 0); + sqlite3TreeViewPop(pView); + } + if( p->pOrderBy ){ + sqlite3TreeViewExprList(pView, p->pOrderBy, (n--)>0, "ORDERBY"); + } + if( p->pLimit ){ + sqlite3TreeViewItem(pView, "LIMIT", (n--)>0); + sqlite3TreeViewExpr(pView, p->pLimit, 0); + sqlite3TreeViewPop(pView); + } + if( p->pOffset ){ + sqlite3TreeViewItem(pView, "OFFSET", (n--)>0); + sqlite3TreeViewExpr(pView, p->pOffset, 0); + sqlite3TreeViewPop(pView); + } + if( p->pPrior ){ + const char *zOp = "UNION"; + switch( p->op ){ + case TK_ALL: zOp = "UNION ALL"; break; + case TK_INTERSECT: zOp = "INTERSECT"; break; + case TK_EXCEPT: zOp = "EXCEPT"; break; + } + sqlite3TreeViewItem(pView, zOp, 1); + } + p = p->pPrior; + }while( p!=0 ); + sqlite3TreeViewPop(pView); +} + +/* +** Generate a human-readable explanation of an expression tree. +*/ +SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 moreToFollow){ + const char *zBinOp = 0; /* Binary operator */ + const char *zUniOp = 0; /* Unary operator */ + char zFlgs[30]; + pView = sqlite3TreeViewPush(pView, moreToFollow); + if( pExpr==0 ){ + sqlite3TreeViewLine(pView, "nil"); + sqlite3TreeViewPop(pView); + return; + } + if( pExpr->flags ){ + sqlite3_snprintf(sizeof(zFlgs),zFlgs," flags=0x%x",pExpr->flags); + }else{ + zFlgs[0] = 0; + } + switch( pExpr->op ){ + case TK_AGG_COLUMN: { + sqlite3TreeViewLine(pView, "AGG{%d:%d}%s", + pExpr->iTable, pExpr->iColumn, zFlgs); + break; + } + case TK_COLUMN: { + if( pExpr->iTable<0 ){ + /* This only happens when coding check constraints */ + sqlite3TreeViewLine(pView, "COLUMN(%d)%s", pExpr->iColumn, zFlgs); + }else{ + sqlite3TreeViewLine(pView, "{%d:%d}%s", + pExpr->iTable, pExpr->iColumn, zFlgs); + } + break; + } + case TK_INTEGER: { + if( pExpr->flags & EP_IntValue ){ + sqlite3TreeViewLine(pView, "%d", pExpr->u.iValue); + }else{ + sqlite3TreeViewLine(pView, "%s", pExpr->u.zToken); + } + break; + } +#ifndef SQLITE_OMIT_FLOATING_POINT + case TK_FLOAT: { + sqlite3TreeViewLine(pView,"%s", pExpr->u.zToken); + break; + } +#endif + case TK_STRING: { + sqlite3TreeViewLine(pView,"%Q", pExpr->u.zToken); + break; + } + case TK_NULL: { + sqlite3TreeViewLine(pView,"NULL"); + break; + } +#ifndef SQLITE_OMIT_BLOB_LITERAL + case TK_BLOB: { + sqlite3TreeViewLine(pView,"%s", pExpr->u.zToken); + break; + } +#endif + case TK_VARIABLE: { + sqlite3TreeViewLine(pView,"VARIABLE(%s,%d)", + pExpr->u.zToken, pExpr->iColumn); + break; + } + case TK_REGISTER: { + sqlite3TreeViewLine(pView,"REGISTER(%d)", pExpr->iTable); + break; + } + case TK_ID: { + sqlite3TreeViewLine(pView,"ID \"%w\"", pExpr->u.zToken); + break; + } +#ifndef SQLITE_OMIT_CAST + case TK_CAST: { + /* Expressions of the form: CAST(pLeft AS token) */ + sqlite3TreeViewLine(pView,"CAST %Q", pExpr->u.zToken); + sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); + break; + } +#endif /* SQLITE_OMIT_CAST */ + case TK_LT: zBinOp = "LT"; break; + case TK_LE: zBinOp = "LE"; break; + case TK_GT: zBinOp = "GT"; break; + case TK_GE: zBinOp = "GE"; break; + case TK_NE: zBinOp = "NE"; break; + case TK_EQ: zBinOp = "EQ"; break; + case TK_IS: zBinOp = "IS"; break; + case TK_ISNOT: zBinOp = "ISNOT"; break; + case TK_AND: zBinOp = "AND"; break; + case TK_OR: zBinOp = "OR"; break; + case TK_PLUS: zBinOp = "ADD"; break; + case TK_STAR: zBinOp = "MUL"; break; + case TK_MINUS: zBinOp = "SUB"; break; + case TK_REM: zBinOp = "REM"; break; + case TK_BITAND: zBinOp = "BITAND"; break; + case TK_BITOR: zBinOp = "BITOR"; break; + case TK_SLASH: zBinOp = "DIV"; break; + case TK_LSHIFT: zBinOp = "LSHIFT"; break; + case TK_RSHIFT: zBinOp = "RSHIFT"; break; + case TK_CONCAT: zBinOp = "CONCAT"; break; + case TK_DOT: zBinOp = "DOT"; break; + + case TK_UMINUS: zUniOp = "UMINUS"; break; + case TK_UPLUS: zUniOp = "UPLUS"; break; + case TK_BITNOT: zUniOp = "BITNOT"; break; + case TK_NOT: zUniOp = "NOT"; break; + case TK_ISNULL: zUniOp = "ISNULL"; break; + case TK_NOTNULL: zUniOp = "NOTNULL"; break; + + case TK_SPAN: { + sqlite3TreeViewLine(pView, "SPAN %Q", pExpr->u.zToken); + sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); + break; + } + + case TK_COLLATE: { + sqlite3TreeViewLine(pView, "COLLATE %Q", pExpr->u.zToken); + sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); + break; + } + + case TK_AGG_FUNCTION: + case TK_FUNCTION: { + ExprList *pFarg; /* List of function arguments */ + if( ExprHasProperty(pExpr, EP_TokenOnly) ){ + pFarg = 0; + }else{ + pFarg = pExpr->x.pList; + } + if( pExpr->op==TK_AGG_FUNCTION ){ + sqlite3TreeViewLine(pView, "AGG_FUNCTION%d %Q", + pExpr->op2, pExpr->u.zToken); + }else{ + sqlite3TreeViewLine(pView, "FUNCTION %Q", pExpr->u.zToken); + } + if( pFarg ){ + sqlite3TreeViewExprList(pView, pFarg, 0, 0); + } + break; + } +#ifndef SQLITE_OMIT_SUBQUERY + case TK_EXISTS: { + sqlite3TreeViewLine(pView, "EXISTS-expr"); + sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0); + break; + } + case TK_SELECT: { + sqlite3TreeViewLine(pView, "SELECT-expr"); + sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0); + break; + } + case TK_IN: { + sqlite3TreeViewLine(pView, "IN"); + sqlite3TreeViewExpr(pView, pExpr->pLeft, 1); + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0); + }else{ + sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, 0); + } + break; + } +#endif /* SQLITE_OMIT_SUBQUERY */ + + /* + ** x BETWEEN y AND z + ** + ** This is equivalent to + ** + ** x>=y AND x<=z + ** + ** X is stored in pExpr->pLeft. + ** Y is stored in pExpr->pList->a[0].pExpr. + ** Z is stored in pExpr->pList->a[1].pExpr. + */ + case TK_BETWEEN: { + Expr *pX = pExpr->pLeft; + Expr *pY = pExpr->x.pList->a[0].pExpr; + Expr *pZ = pExpr->x.pList->a[1].pExpr; + sqlite3TreeViewLine(pView, "BETWEEN"); + sqlite3TreeViewExpr(pView, pX, 1); + sqlite3TreeViewExpr(pView, pY, 1); + sqlite3TreeViewExpr(pView, pZ, 0); + break; + } + case TK_TRIGGER: { + /* If the opcode is TK_TRIGGER, then the expression is a reference + ** to a column in the new.* or old.* pseudo-tables available to + ** trigger programs. In this case Expr.iTable is set to 1 for the + ** new.* pseudo-table, or 0 for the old.* pseudo-table. Expr.iColumn + ** is set to the column of the pseudo-table to read, or to -1 to + ** read the rowid field. + */ + sqlite3TreeViewLine(pView, "%s(%d)", + pExpr->iTable ? "NEW" : "OLD", pExpr->iColumn); + break; + } + case TK_CASE: { + sqlite3TreeViewLine(pView, "CASE"); + sqlite3TreeViewExpr(pView, pExpr->pLeft, 1); + sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, 0); + break; + } +#ifndef SQLITE_OMIT_TRIGGER + case TK_RAISE: { + const char *zType = "unk"; + switch( pExpr->affinity ){ + case OE_Rollback: zType = "rollback"; break; + case OE_Abort: zType = "abort"; break; + case OE_Fail: zType = "fail"; break; + case OE_Ignore: zType = "ignore"; break; + } + sqlite3TreeViewLine(pView, "RAISE %s(%Q)", zType, pExpr->u.zToken); + break; + } +#endif + case TK_MATCH: { + sqlite3TreeViewLine(pView, "MATCH {%d:%d}%s", + pExpr->iTable, pExpr->iColumn, zFlgs); + sqlite3TreeViewExpr(pView, pExpr->pRight, 0); + break; + } + case TK_VECTOR: { + sqlite3TreeViewBareExprList(pView, pExpr->x.pList, "VECTOR"); + break; + } + case TK_SELECT_COLUMN: { + sqlite3TreeViewLine(pView, "SELECT-COLUMN %d", pExpr->iColumn); + sqlite3TreeViewSelect(pView, pExpr->pLeft->x.pSelect, 0); + break; + } + default: { + sqlite3TreeViewLine(pView, "op=%d", pExpr->op); + break; + } + } + if( zBinOp ){ + sqlite3TreeViewLine(pView, "%s%s", zBinOp, zFlgs); + sqlite3TreeViewExpr(pView, pExpr->pLeft, 1); + sqlite3TreeViewExpr(pView, pExpr->pRight, 0); + }else if( zUniOp ){ + sqlite3TreeViewLine(pView, "%s%s", zUniOp, zFlgs); + sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); + } + sqlite3TreeViewPop(pView); +} + + +/* +** Generate a human-readable explanation of an expression list. +*/ +SQLITE_PRIVATE void sqlite3TreeViewBareExprList( + TreeView *pView, + const ExprList *pList, + const char *zLabel +){ + if( zLabel==0 || zLabel[0]==0 ) zLabel = "LIST"; + if( pList==0 ){ + sqlite3TreeViewLine(pView, "%s (empty)", zLabel); + }else{ + int i; + sqlite3TreeViewLine(pView, "%s", zLabel); + for(i=0; inExpr; i++){ + int j = pList->a[i].u.x.iOrderByCol; + if( j ){ + sqlite3TreeViewPush(pView, 0); + sqlite3TreeViewLine(pView, "iOrderByCol=%d", j); + } + sqlite3TreeViewExpr(pView, pList->a[i].pExpr, inExpr-1); + if( j ) sqlite3TreeViewPop(pView); + } + } +} +SQLITE_PRIVATE void sqlite3TreeViewExprList( + TreeView *pView, + const ExprList *pList, + u8 moreToFollow, + const char *zLabel +){ + pView = sqlite3TreeViewPush(pView, moreToFollow); + sqlite3TreeViewBareExprList(pView, pList, zLabel); + sqlite3TreeViewPop(pView); +} + +#endif /* SQLITE_DEBUG */ + +/************** End of treeview.c ********************************************/ +/************** Begin file random.c ******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code to implement a pseudo-random number +** generator (PRNG) for SQLite. +** +** Random numbers are used by some of the database backends in order +** to generate random integer keys for tables or random filenames. +*/ +/* #include "sqliteInt.h" */ + + +/* All threads share a single random number generator. +** This structure is the current state of the generator. +*/ +static SQLITE_WSD struct sqlite3PrngType { + unsigned char isInit; /* True if initialized */ + unsigned char i, j; /* State variables */ + unsigned char s[256]; /* State variables */ +} sqlite3Prng; + +/* +** Return N random bytes. +*/ +SQLITE_API void sqlite3_randomness(int N, void *pBuf){ + unsigned char t; + unsigned char *zBuf = pBuf; + + /* The "wsdPrng" macro will resolve to the pseudo-random number generator + ** state vector. If writable static data is unsupported on the target, + ** we have to locate the state vector at run-time. In the more common + ** case where writable static data is supported, wsdPrng can refer directly + ** to the "sqlite3Prng" state vector declared above. + */ +#ifdef SQLITE_OMIT_WSD + struct sqlite3PrngType *p = &GLOBAL(struct sqlite3PrngType, sqlite3Prng); +# define wsdPrng p[0] +#else +# define wsdPrng sqlite3Prng +#endif + +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex; +#endif + +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return; +#endif + +#if SQLITE_THREADSAFE + mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_PRNG); +#endif + + sqlite3_mutex_enter(mutex); + if( N<=0 || pBuf==0 ){ + wsdPrng.isInit = 0; + sqlite3_mutex_leave(mutex); + return; + } + + /* Initialize the state of the random number generator once, + ** the first time this routine is called. The seed value does + ** not need to contain a lot of randomness since we are not + ** trying to do secure encryption or anything like that... + ** + ** Nothing in this file or anywhere else in SQLite does any kind of + ** encryption. The RC4 algorithm is being used as a PRNG (pseudo-random + ** number generator) not as an encryption device. + */ + if( !wsdPrng.isInit ){ + int i; + char k[256]; + wsdPrng.j = 0; + wsdPrng.i = 0; + sqlite3OsRandomness(sqlite3_vfs_find(0), 256, k); + for(i=0; i<256; i++){ + wsdPrng.s[i] = (u8)i; + } + for(i=0; i<256; i++){ + wsdPrng.j += wsdPrng.s[i] + k[i]; + t = wsdPrng.s[wsdPrng.j]; + wsdPrng.s[wsdPrng.j] = wsdPrng.s[i]; + wsdPrng.s[i] = t; + } + wsdPrng.isInit = 1; + } + + assert( N>0 ); + do{ + wsdPrng.i++; + t = wsdPrng.s[wsdPrng.i]; + wsdPrng.j += t; + wsdPrng.s[wsdPrng.i] = wsdPrng.s[wsdPrng.j]; + wsdPrng.s[wsdPrng.j] = t; + t += wsdPrng.s[wsdPrng.i]; + *(zBuf++) = wsdPrng.s[t]; + }while( --N ); + sqlite3_mutex_leave(mutex); +} + +#ifndef SQLITE_UNTESTABLE +/* +** For testing purposes, we sometimes want to preserve the state of +** PRNG and restore the PRNG to its saved state at a later time, or +** to reset the PRNG to its initial state. These routines accomplish +** those tasks. +** +** The sqlite3_test_control() interface calls these routines to +** control the PRNG. +*/ +static SQLITE_WSD struct sqlite3PrngType sqlite3SavedPrng; +SQLITE_PRIVATE void sqlite3PrngSaveState(void){ + memcpy( + &GLOBAL(struct sqlite3PrngType, sqlite3SavedPrng), + &GLOBAL(struct sqlite3PrngType, sqlite3Prng), + sizeof(sqlite3Prng) + ); +} +SQLITE_PRIVATE void sqlite3PrngRestoreState(void){ + memcpy( + &GLOBAL(struct sqlite3PrngType, sqlite3Prng), + &GLOBAL(struct sqlite3PrngType, sqlite3SavedPrng), + sizeof(sqlite3Prng) + ); +} +#endif /* SQLITE_UNTESTABLE */ + +/************** End of random.c **********************************************/ +/************** Begin file threads.c *****************************************/ +/* +** 2012 July 21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file presents a simple cross-platform threading interface for +** use internally by SQLite. +** +** A "thread" can be created using sqlite3ThreadCreate(). This thread +** runs independently of its creator until it is joined using +** sqlite3ThreadJoin(), at which point it terminates. +** +** Threads do not have to be real. It could be that the work of the +** "thread" is done by the main thread at either the sqlite3ThreadCreate() +** or sqlite3ThreadJoin() call. This is, in fact, what happens in +** single threaded systems. Nothing in SQLite requires multiple threads. +** This interface exists so that applications that want to take advantage +** of multiple cores can do so, while also allowing applications to stay +** single-threaded if desired. +*/ +/* #include "sqliteInt.h" */ +#if SQLITE_OS_WIN +/* # include "os_win.h" */ +#endif + +#if SQLITE_MAX_WORKER_THREADS>0 + +/********************************* Unix Pthreads ****************************/ +#if SQLITE_OS_UNIX && defined(SQLITE_MUTEX_PTHREADS) && SQLITE_THREADSAFE>0 + +#define SQLITE_THREADS_IMPLEMENTED 1 /* Prevent the single-thread code below */ +/* #include */ + +/* A running thread */ +struct SQLiteThread { + pthread_t tid; /* Thread ID */ + int done; /* Set to true when thread finishes */ + void *pOut; /* Result returned by the thread */ + void *(*xTask)(void*); /* The thread routine */ + void *pIn; /* Argument to the thread */ +}; + +/* Create a new thread */ +SQLITE_PRIVATE int sqlite3ThreadCreate( + SQLiteThread **ppThread, /* OUT: Write the thread object here */ + void *(*xTask)(void*), /* Routine to run in a separate thread */ + void *pIn /* Argument passed into xTask() */ +){ + SQLiteThread *p; + int rc; + + assert( ppThread!=0 ); + assert( xTask!=0 ); + /* This routine is never used in single-threaded mode */ + assert( sqlite3GlobalConfig.bCoreMutex!=0 ); + + *ppThread = 0; + p = sqlite3Malloc(sizeof(*p)); + if( p==0 ) return SQLITE_NOMEM_BKPT; + memset(p, 0, sizeof(*p)); + p->xTask = xTask; + p->pIn = pIn; + /* If the SQLITE_TESTCTRL_FAULT_INSTALL callback is registered to a + ** function that returns SQLITE_ERROR when passed the argument 200, that + ** forces worker threads to run sequentially and deterministically + ** for testing purposes. */ + if( sqlite3FaultSim(200) ){ + rc = 1; + }else{ + rc = pthread_create(&p->tid, 0, xTask, pIn); + } + if( rc ){ + p->done = 1; + p->pOut = xTask(pIn); + } + *ppThread = p; + return SQLITE_OK; +} + +/* Get the results of the thread */ +SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){ + int rc; + + assert( ppOut!=0 ); + if( NEVER(p==0) ) return SQLITE_NOMEM_BKPT; + if( p->done ){ + *ppOut = p->pOut; + rc = SQLITE_OK; + }else{ + rc = pthread_join(p->tid, ppOut) ? SQLITE_ERROR : SQLITE_OK; + } + sqlite3_free(p); + return rc; +} + +#endif /* SQLITE_OS_UNIX && defined(SQLITE_MUTEX_PTHREADS) */ +/******************************** End Unix Pthreads *************************/ + + +/********************************* Win32 Threads ****************************/ +#if SQLITE_OS_WIN_THREADS + +#define SQLITE_THREADS_IMPLEMENTED 1 /* Prevent the single-thread code below */ +#include + +/* A running thread */ +struct SQLiteThread { + void *tid; /* The thread handle */ + unsigned id; /* The thread identifier */ + void *(*xTask)(void*); /* The routine to run as a thread */ + void *pIn; /* Argument to xTask */ + void *pResult; /* Result of xTask */ +}; + +/* Thread procedure Win32 compatibility shim */ +static unsigned __stdcall sqlite3ThreadProc( + void *pArg /* IN: Pointer to the SQLiteThread structure */ +){ + SQLiteThread *p = (SQLiteThread *)pArg; + + assert( p!=0 ); +#if 0 + /* + ** This assert appears to trigger spuriously on certain + ** versions of Windows, possibly due to _beginthreadex() + ** and/or CreateThread() not fully setting their thread + ** ID parameter before starting the thread. + */ + assert( p->id==GetCurrentThreadId() ); +#endif + assert( p->xTask!=0 ); + p->pResult = p->xTask(p->pIn); + + _endthreadex(0); + return 0; /* NOT REACHED */ +} + +/* Create a new thread */ +SQLITE_PRIVATE int sqlite3ThreadCreate( + SQLiteThread **ppThread, /* OUT: Write the thread object here */ + void *(*xTask)(void*), /* Routine to run in a separate thread */ + void *pIn /* Argument passed into xTask() */ +){ + SQLiteThread *p; + + assert( ppThread!=0 ); + assert( xTask!=0 ); + *ppThread = 0; + p = sqlite3Malloc(sizeof(*p)); + if( p==0 ) return SQLITE_NOMEM_BKPT; + /* If the SQLITE_TESTCTRL_FAULT_INSTALL callback is registered to a + ** function that returns SQLITE_ERROR when passed the argument 200, that + ** forces worker threads to run sequentially and deterministically + ** (via the sqlite3FaultSim() term of the conditional) for testing + ** purposes. */ + if( sqlite3GlobalConfig.bCoreMutex==0 || sqlite3FaultSim(200) ){ + memset(p, 0, sizeof(*p)); + }else{ + p->xTask = xTask; + p->pIn = pIn; + p->tid = (void*)_beginthreadex(0, 0, sqlite3ThreadProc, p, 0, &p->id); + if( p->tid==0 ){ + memset(p, 0, sizeof(*p)); + } + } + if( p->xTask==0 ){ + p->id = GetCurrentThreadId(); + p->pResult = xTask(pIn); + } + *ppThread = p; + return SQLITE_OK; +} + +SQLITE_PRIVATE DWORD sqlite3Win32Wait(HANDLE hObject); /* os_win.c */ + +/* Get the results of the thread */ +SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){ + DWORD rc; + BOOL bRc; + + assert( ppOut!=0 ); + if( NEVER(p==0) ) return SQLITE_NOMEM_BKPT; + if( p->xTask==0 ){ + /* assert( p->id==GetCurrentThreadId() ); */ + rc = WAIT_OBJECT_0; + assert( p->tid==0 ); + }else{ + assert( p->id!=0 && p->id!=GetCurrentThreadId() ); + rc = sqlite3Win32Wait((HANDLE)p->tid); + assert( rc!=WAIT_IO_COMPLETION ); + bRc = CloseHandle((HANDLE)p->tid); + assert( bRc ); + } + if( rc==WAIT_OBJECT_0 ) *ppOut = p->pResult; + sqlite3_free(p); + return (rc==WAIT_OBJECT_0) ? SQLITE_OK : SQLITE_ERROR; +} + +#endif /* SQLITE_OS_WIN_THREADS */ +/******************************** End Win32 Threads *************************/ + + +/********************************* Single-Threaded **************************/ +#ifndef SQLITE_THREADS_IMPLEMENTED +/* +** This implementation does not actually create a new thread. It does the +** work of the thread in the main thread, when either the thread is created +** or when it is joined +*/ + +/* A running thread */ +struct SQLiteThread { + void *(*xTask)(void*); /* The routine to run as a thread */ + void *pIn; /* Argument to xTask */ + void *pResult; /* Result of xTask */ +}; + +/* Create a new thread */ +SQLITE_PRIVATE int sqlite3ThreadCreate( + SQLiteThread **ppThread, /* OUT: Write the thread object here */ + void *(*xTask)(void*), /* Routine to run in a separate thread */ + void *pIn /* Argument passed into xTask() */ +){ + SQLiteThread *p; + + assert( ppThread!=0 ); + assert( xTask!=0 ); + *ppThread = 0; + p = sqlite3Malloc(sizeof(*p)); + if( p==0 ) return SQLITE_NOMEM_BKPT; + if( (SQLITE_PTR_TO_INT(p)/17)&1 ){ + p->xTask = xTask; + p->pIn = pIn; + }else{ + p->xTask = 0; + p->pResult = xTask(pIn); + } + *ppThread = p; + return SQLITE_OK; +} + +/* Get the results of the thread */ +SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){ + + assert( ppOut!=0 ); + if( NEVER(p==0) ) return SQLITE_NOMEM_BKPT; + if( p->xTask ){ + *ppOut = p->xTask(p->pIn); + }else{ + *ppOut = p->pResult; + } + sqlite3_free(p); + +#if defined(SQLITE_TEST) + { + void *pTstAlloc = sqlite3Malloc(10); + if (!pTstAlloc) return SQLITE_NOMEM_BKPT; + sqlite3_free(pTstAlloc); + } +#endif + + return SQLITE_OK; +} + +#endif /* !defined(SQLITE_THREADS_IMPLEMENTED) */ +/****************************** End Single-Threaded *************************/ +#endif /* SQLITE_MAX_WORKER_THREADS>0 */ + +/************** End of threads.c *********************************************/ +/************** Begin file utf.c *********************************************/ +/* +** 2004 April 13 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains routines used to translate between UTF-8, +** UTF-16, UTF-16BE, and UTF-16LE. +** +** Notes on UTF-8: +** +** Byte-0 Byte-1 Byte-2 Byte-3 Value +** 0xxxxxxx 00000000 00000000 0xxxxxxx +** 110yyyyy 10xxxxxx 00000000 00000yyy yyxxxxxx +** 1110zzzz 10yyyyyy 10xxxxxx 00000000 zzzzyyyy yyxxxxxx +** 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx 000uuuuu zzzzyyyy yyxxxxxx +** +** +** Notes on UTF-16: (with wwww+1==uuuuu) +** +** Word-0 Word-1 Value +** 110110ww wwzzzzyy 110111yy yyxxxxxx 000uuuuu zzzzyyyy yyxxxxxx +** zzzzyyyy yyxxxxxx 00000000 zzzzyyyy yyxxxxxx +** +** +** BOM or Byte Order Mark: +** 0xff 0xfe little-endian utf-16 follows +** 0xfe 0xff big-endian utf-16 follows +** +*/ +/* #include "sqliteInt.h" */ +/* #include */ +/* #include "vdbeInt.h" */ + +#if !defined(SQLITE_AMALGAMATION) && SQLITE_BYTEORDER==0 +/* +** The following constant value is used by the SQLITE_BIGENDIAN and +** SQLITE_LITTLEENDIAN macros. +*/ +SQLITE_PRIVATE const int sqlite3one = 1; +#endif /* SQLITE_AMALGAMATION && SQLITE_BYTEORDER==0 */ + +/* +** This lookup table is used to help decode the first byte of +** a multi-byte UTF8 character. +*/ +static const unsigned char sqlite3Utf8Trans1[] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00, +}; + + +#define WRITE_UTF8(zOut, c) { \ + if( c<0x00080 ){ \ + *zOut++ = (u8)(c&0xFF); \ + } \ + else if( c<0x00800 ){ \ + *zOut++ = 0xC0 + (u8)((c>>6)&0x1F); \ + *zOut++ = 0x80 + (u8)(c & 0x3F); \ + } \ + else if( c<0x10000 ){ \ + *zOut++ = 0xE0 + (u8)((c>>12)&0x0F); \ + *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ + *zOut++ = 0x80 + (u8)(c & 0x3F); \ + }else{ \ + *zOut++ = 0xF0 + (u8)((c>>18) & 0x07); \ + *zOut++ = 0x80 + (u8)((c>>12) & 0x3F); \ + *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ + *zOut++ = 0x80 + (u8)(c & 0x3F); \ + } \ +} + +#define WRITE_UTF16LE(zOut, c) { \ + if( c<=0xFFFF ){ \ + *zOut++ = (u8)(c&0x00FF); \ + *zOut++ = (u8)((c>>8)&0x00FF); \ + }else{ \ + *zOut++ = (u8)(((c>>10)&0x003F) + (((c-0x10000)>>10)&0x00C0)); \ + *zOut++ = (u8)(0x00D8 + (((c-0x10000)>>18)&0x03)); \ + *zOut++ = (u8)(c&0x00FF); \ + *zOut++ = (u8)(0x00DC + ((c>>8)&0x03)); \ + } \ +} + +#define WRITE_UTF16BE(zOut, c) { \ + if( c<=0xFFFF ){ \ + *zOut++ = (u8)((c>>8)&0x00FF); \ + *zOut++ = (u8)(c&0x00FF); \ + }else{ \ + *zOut++ = (u8)(0x00D8 + (((c-0x10000)>>18)&0x03)); \ + *zOut++ = (u8)(((c>>10)&0x003F) + (((c-0x10000)>>10)&0x00C0)); \ + *zOut++ = (u8)(0x00DC + ((c>>8)&0x03)); \ + *zOut++ = (u8)(c&0x00FF); \ + } \ +} + +#define READ_UTF16LE(zIn, TERM, c){ \ + c = (*zIn++); \ + c += ((*zIn++)<<8); \ + if( c>=0xD800 && c<0xE000 && TERM ){ \ + int c2 = (*zIn++); \ + c2 += ((*zIn++)<<8); \ + c = (c2&0x03FF) + ((c&0x003F)<<10) + (((c&0x03C0)+0x0040)<<10); \ + } \ +} + +#define READ_UTF16BE(zIn, TERM, c){ \ + c = ((*zIn++)<<8); \ + c += (*zIn++); \ + if( c>=0xD800 && c<0xE000 && TERM ){ \ + int c2 = ((*zIn++)<<8); \ + c2 += (*zIn++); \ + c = (c2&0x03FF) + ((c&0x003F)<<10) + (((c&0x03C0)+0x0040)<<10); \ + } \ +} + +/* +** Translate a single UTF-8 character. Return the unicode value. +** +** During translation, assume that the byte that zTerm points +** is a 0x00. +** +** Write a pointer to the next unread byte back into *pzNext. +** +** Notes On Invalid UTF-8: +** +** * This routine never allows a 7-bit character (0x00 through 0x7f) to +** be encoded as a multi-byte character. Any multi-byte character that +** attempts to encode a value between 0x00 and 0x7f is rendered as 0xfffd. +** +** * This routine never allows a UTF16 surrogate value to be encoded. +** If a multi-byte character attempts to encode a value between +** 0xd800 and 0xe000 then it is rendered as 0xfffd. +** +** * Bytes in the range of 0x80 through 0xbf which occur as the first +** byte of a character are interpreted as single-byte characters +** and rendered as themselves even though they are technically +** invalid characters. +** +** * This routine accepts over-length UTF8 encodings +** for unicode values 0x80 and greater. It does not change over-length +** encodings to 0xfffd as some systems recommend. +*/ +#define READ_UTF8(zIn, zTerm, c) \ + c = *(zIn++); \ + if( c>=0xc0 ){ \ + c = sqlite3Utf8Trans1[c-0xc0]; \ + while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \ + c = (c<<6) + (0x3f & *(zIn++)); \ + } \ + if( c<0x80 \ + || (c&0xFFFFF800)==0xD800 \ + || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } \ + } +SQLITE_PRIVATE u32 sqlite3Utf8Read( + const unsigned char **pz /* Pointer to string from which to read char */ +){ + unsigned int c; + + /* Same as READ_UTF8() above but without the zTerm parameter. + ** For this routine, we assume the UTF8 string is always zero-terminated. + */ + c = *((*pz)++); + if( c>=0xc0 ){ + c = sqlite3Utf8Trans1[c-0xc0]; + while( (*(*pz) & 0xc0)==0x80 ){ + c = (c<<6) + (0x3f & *((*pz)++)); + } + if( c<0x80 + || (c&0xFFFFF800)==0xD800 + || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } + } + return c; +} + + + + +/* +** If the TRANSLATE_TRACE macro is defined, the value of each Mem is +** printed on stderr on the way into and out of sqlite3VdbeMemTranslate(). +*/ +/* #define TRANSLATE_TRACE 1 */ + +#ifndef SQLITE_OMIT_UTF16 +/* +** This routine transforms the internal text encoding used by pMem to +** desiredEnc. It is an error if the string is already of the desired +** encoding, or if *pMem does not contain a string value. +*/ +SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3VdbeMemTranslate(Mem *pMem, u8 desiredEnc){ + int len; /* Maximum length of output string in bytes */ + unsigned char *zOut; /* Output buffer */ + unsigned char *zIn; /* Input iterator */ + unsigned char *zTerm; /* End of input */ + unsigned char *z; /* Output iterator */ + unsigned int c; + + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( pMem->flags&MEM_Str ); + assert( pMem->enc!=desiredEnc ); + assert( pMem->enc!=0 ); + assert( pMem->n>=0 ); + +#if defined(TRANSLATE_TRACE) && defined(SQLITE_DEBUG) + { + char zBuf[100]; + sqlite3VdbeMemPrettyPrint(pMem, zBuf); + fprintf(stderr, "INPUT: %s\n", zBuf); + } +#endif + + /* If the translation is between UTF-16 little and big endian, then + ** all that is required is to swap the byte order. This case is handled + ** differently from the others. + */ + if( pMem->enc!=SQLITE_UTF8 && desiredEnc!=SQLITE_UTF8 ){ + u8 temp; + int rc; + rc = sqlite3VdbeMemMakeWriteable(pMem); + if( rc!=SQLITE_OK ){ + assert( rc==SQLITE_NOMEM ); + return SQLITE_NOMEM_BKPT; + } + zIn = (u8*)pMem->z; + zTerm = &zIn[pMem->n&~1]; + while( zInenc = desiredEnc; + goto translate_out; + } + + /* Set len to the maximum number of bytes required in the output buffer. */ + if( desiredEnc==SQLITE_UTF8 ){ + /* When converting from UTF-16, the maximum growth results from + ** translating a 2-byte character to a 4-byte UTF-8 character. + ** A single byte is required for the output string + ** nul-terminator. + */ + pMem->n &= ~1; + len = pMem->n * 2 + 1; + }else{ + /* When converting from UTF-8 to UTF-16 the maximum growth is caused + ** when a 1-byte UTF-8 character is translated into a 2-byte UTF-16 + ** character. Two bytes are required in the output buffer for the + ** nul-terminator. + */ + len = pMem->n * 2 + 2; + } + + /* Set zIn to point at the start of the input buffer and zTerm to point 1 + ** byte past the end. + ** + ** Variable zOut is set to point at the output buffer, space obtained + ** from sqlite3_malloc(). + */ + zIn = (u8*)pMem->z; + zTerm = &zIn[pMem->n]; + zOut = sqlite3DbMallocRaw(pMem->db, len); + if( !zOut ){ + return SQLITE_NOMEM_BKPT; + } + z = zOut; + + if( pMem->enc==SQLITE_UTF8 ){ + if( desiredEnc==SQLITE_UTF16LE ){ + /* UTF-8 -> UTF-16 Little-endian */ + while( zIn UTF-16 Big-endian */ + while( zInn = (int)(z - zOut); + *z++ = 0; + }else{ + assert( desiredEnc==SQLITE_UTF8 ); + if( pMem->enc==SQLITE_UTF16LE ){ + /* UTF-16 Little-endian -> UTF-8 */ + while( zIn UTF-8 */ + while( zInn = (int)(z - zOut); + } + *z = 0; + assert( (pMem->n+(desiredEnc==SQLITE_UTF8?1:2))<=len ); + + c = pMem->flags; + sqlite3VdbeMemRelease(pMem); + pMem->flags = MEM_Str|MEM_Term|(c&(MEM_AffMask|MEM_Subtype)); + pMem->enc = desiredEnc; + pMem->z = (char*)zOut; + pMem->zMalloc = pMem->z; + pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->z); + +translate_out: +#if defined(TRANSLATE_TRACE) && defined(SQLITE_DEBUG) + { + char zBuf[100]; + sqlite3VdbeMemPrettyPrint(pMem, zBuf); + fprintf(stderr, "OUTPUT: %s\n", zBuf); + } +#endif + return SQLITE_OK; +} + +/* +** This routine checks for a byte-order mark at the beginning of the +** UTF-16 string stored in *pMem. If one is present, it is removed and +** the encoding of the Mem adjusted. This routine does not do any +** byte-swapping, it just sets Mem.enc appropriately. +** +** The allocation (static, dynamic etc.) and encoding of the Mem may be +** changed by this function. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemHandleBom(Mem *pMem){ + int rc = SQLITE_OK; + u8 bom = 0; + + assert( pMem->n>=0 ); + if( pMem->n>1 ){ + u8 b1 = *(u8 *)pMem->z; + u8 b2 = *(((u8 *)pMem->z) + 1); + if( b1==0xFE && b2==0xFF ){ + bom = SQLITE_UTF16BE; + } + if( b1==0xFF && b2==0xFE ){ + bom = SQLITE_UTF16LE; + } + } + + if( bom ){ + rc = sqlite3VdbeMemMakeWriteable(pMem); + if( rc==SQLITE_OK ){ + pMem->n -= 2; + memmove(pMem->z, &pMem->z[2], pMem->n); + pMem->z[pMem->n] = '\0'; + pMem->z[pMem->n+1] = '\0'; + pMem->flags |= MEM_Term; + pMem->enc = bom; + } + } + return rc; +} +#endif /* SQLITE_OMIT_UTF16 */ + +/* +** pZ is a UTF-8 encoded unicode string. If nByte is less than zero, +** return the number of unicode characters in pZ up to (but not including) +** the first 0x00 byte. If nByte is not less than zero, return the +** number of unicode characters in the first nByte of pZ (or up to +** the first 0x00, whichever comes first). +*/ +SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *zIn, int nByte){ + int r = 0; + const u8 *z = (const u8*)zIn; + const u8 *zTerm; + if( nByte>=0 ){ + zTerm = &z[nByte]; + }else{ + zTerm = (const u8*)(-1); + } + assert( z<=zTerm ); + while( *z!=0 && zmallocFailed ){ + sqlite3VdbeMemRelease(&m); + m.z = 0; + } + assert( (m.flags & MEM_Term)!=0 || db->mallocFailed ); + assert( (m.flags & MEM_Str)!=0 || db->mallocFailed ); + assert( m.z || db->mallocFailed ); + return m.z; +} + +/* +** zIn is a UTF-16 encoded unicode string at least nChar characters long. +** Return the number of bytes in the first nChar unicode characters +** in pZ. nChar must be non-negative. +*/ +SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *zIn, int nChar){ + int c; + unsigned char const *z = zIn; + int n = 0; + + if( SQLITE_UTF16NATIVE==SQLITE_UTF16BE ){ + while( n0 && n<=4 ); + z[0] = 0; + z = zBuf; + c = sqlite3Utf8Read((const u8**)&z); + t = i; + if( i>=0xD800 && i<=0xDFFF ) t = 0xFFFD; + if( (i&0xFFFFFFFE)==0xFFFE ) t = 0xFFFD; + assert( c==t ); + assert( (z-zBuf)==n ); + } + for(i=0; i<0x00110000; i++){ + if( i>=0xD800 && i<0xE000 ) continue; + z = zBuf; + WRITE_UTF16LE(z, i); + n = (int)(z-zBuf); + assert( n>0 && n<=4 ); + z[0] = 0; + z = zBuf; + READ_UTF16LE(z, 1, c); + assert( c==i ); + assert( (z-zBuf)==n ); + } + for(i=0; i<0x00110000; i++){ + if( i>=0xD800 && i<0xE000 ) continue; + z = zBuf; + WRITE_UTF16BE(z, i); + n = (int)(z-zBuf); + assert( n>0 && n<=4 ); + z[0] = 0; + z = zBuf; + READ_UTF16BE(z, 1, c); + assert( c==i ); + assert( (z-zBuf)==n ); + } +} +#endif /* SQLITE_TEST */ +#endif /* SQLITE_OMIT_UTF16 */ + +/************** End of utf.c *************************************************/ +/************** Begin file util.c ********************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Utility functions used throughout sqlite. +** +** This file contains functions for allocating memory, comparing +** strings, and stuff like that. +** +*/ +/* #include "sqliteInt.h" */ +/* #include */ +#if HAVE_ISNAN || SQLITE_HAVE_ISNAN +# include +#endif + +/* +** Routine needed to support the testcase() macro. +*/ +#ifdef SQLITE_COVERAGE_TEST +SQLITE_PRIVATE void sqlite3Coverage(int x){ + static unsigned dummy = 0; + dummy += (unsigned)x; +} +#endif + +/* +** Give a callback to the test harness that can be used to simulate faults +** in places where it is difficult or expensive to do so purely by means +** of inputs. +** +** The intent of the integer argument is to let the fault simulator know +** which of multiple sqlite3FaultSim() calls has been hit. +** +** Return whatever integer value the test callback returns, or return +** SQLITE_OK if no test callback is installed. +*/ +#ifndef SQLITE_UNTESTABLE +SQLITE_PRIVATE int sqlite3FaultSim(int iTest){ + int (*xCallback)(int) = sqlite3GlobalConfig.xTestCallback; + return xCallback ? xCallback(iTest) : SQLITE_OK; +} +#endif + +#ifndef SQLITE_OMIT_FLOATING_POINT +/* +** Return true if the floating point value is Not a Number (NaN). +** +** Use the math library isnan() function if compiled with SQLITE_HAVE_ISNAN. +** Otherwise, we have our own implementation that works on most systems. +*/ +SQLITE_PRIVATE int sqlite3IsNaN(double x){ + int rc; /* The value return */ +#if !SQLITE_HAVE_ISNAN && !HAVE_ISNAN + /* + ** Systems that support the isnan() library function should probably + ** make use of it by compiling with -DSQLITE_HAVE_ISNAN. But we have + ** found that many systems do not have a working isnan() function so + ** this implementation is provided as an alternative. + ** + ** This NaN test sometimes fails if compiled on GCC with -ffast-math. + ** On the other hand, the use of -ffast-math comes with the following + ** warning: + ** + ** This option [-ffast-math] should never be turned on by any + ** -O option since it can result in incorrect output for programs + ** which depend on an exact implementation of IEEE or ISO + ** rules/specifications for math functions. + ** + ** Under MSVC, this NaN test may fail if compiled with a floating- + ** point precision mode other than /fp:precise. From the MSDN + ** documentation: + ** + ** The compiler [with /fp:precise] will properly handle comparisons + ** involving NaN. For example, x != x evaluates to true if x is NaN + ** ... + */ +#ifdef __FAST_MATH__ +# error SQLite will not work correctly with the -ffast-math option of GCC. +#endif + volatile double y = x; + volatile double z = y; + rc = (y!=z); +#else /* if HAVE_ISNAN */ + rc = isnan(x); +#endif /* HAVE_ISNAN */ + testcase( rc ); + return rc; +} +#endif /* SQLITE_OMIT_FLOATING_POINT */ + +/* +** Compute a string length that is limited to what can be stored in +** lower 30 bits of a 32-bit signed integer. +** +** The value returned will never be negative. Nor will it ever be greater +** than the actual length of the string. For very long strings (greater +** than 1GiB) the value returned might be less than the true string length. +*/ +SQLITE_PRIVATE int sqlite3Strlen30(const char *z){ + if( z==0 ) return 0; + return 0x3fffffff & (int)strlen(z); +} + +/* +** Return the declared type of a column. Or return zDflt if the column +** has no declared type. +** +** The column type is an extra string stored after the zero-terminator on +** the column name if and only if the COLFLAG_HASTYPE flag is set. +*/ +SQLITE_PRIVATE char *sqlite3ColumnType(Column *pCol, char *zDflt){ + if( (pCol->colFlags & COLFLAG_HASTYPE)==0 ) return zDflt; + return pCol->zName + strlen(pCol->zName) + 1; +} + +/* +** Helper function for sqlite3Error() - called rarely. Broken out into +** a separate routine to avoid unnecessary register saves on entry to +** sqlite3Error(). +*/ +static SQLITE_NOINLINE void sqlite3ErrorFinish(sqlite3 *db, int err_code){ + if( db->pErr ) sqlite3ValueSetNull(db->pErr); + sqlite3SystemError(db, err_code); +} + +/* +** Set the current error code to err_code and clear any prior error message. +** Also set iSysErrno (by calling sqlite3System) if the err_code indicates +** that would be appropriate. +*/ +SQLITE_PRIVATE void sqlite3Error(sqlite3 *db, int err_code){ + assert( db!=0 ); + db->errCode = err_code; + if( err_code || db->pErr ) sqlite3ErrorFinish(db, err_code); +} + +/* +** Load the sqlite3.iSysErrno field if that is an appropriate thing +** to do based on the SQLite error code in rc. +*/ +SQLITE_PRIVATE void sqlite3SystemError(sqlite3 *db, int rc){ + if( rc==SQLITE_IOERR_NOMEM ) return; + rc &= 0xff; + if( rc==SQLITE_CANTOPEN || rc==SQLITE_IOERR ){ + db->iSysErrno = sqlite3OsGetLastError(db->pVfs); + } +} + +/* +** Set the most recent error code and error string for the sqlite +** handle "db". The error code is set to "err_code". +** +** If it is not NULL, string zFormat specifies the format of the +** error string in the style of the printf functions: The following +** format characters are allowed: +** +** %s Insert a string +** %z A string that should be freed after use +** %d Insert an integer +** %T Insert a token +** %S Insert the first element of a SrcList +** +** zFormat and any string tokens that follow it are assumed to be +** encoded in UTF-8. +** +** To clear the most recent error for sqlite handle "db", sqlite3Error +** should be called with err_code set to SQLITE_OK and zFormat set +** to NULL. +*/ +SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3 *db, int err_code, const char *zFormat, ...){ + assert( db!=0 ); + db->errCode = err_code; + sqlite3SystemError(db, err_code); + if( zFormat==0 ){ + sqlite3Error(db, err_code); + }else if( db->pErr || (db->pErr = sqlite3ValueNew(db))!=0 ){ + char *z; + va_list ap; + va_start(ap, zFormat); + z = sqlite3VMPrintf(db, zFormat, ap); + va_end(ap); + sqlite3ValueSetStr(db->pErr, -1, z, SQLITE_UTF8, SQLITE_DYNAMIC); + } +} + +/* +** Add an error message to pParse->zErrMsg and increment pParse->nErr. +** The following formatting characters are allowed: +** +** %s Insert a string +** %z A string that should be freed after use +** %d Insert an integer +** %T Insert a token +** %S Insert the first element of a SrcList +** +** This function should be used to report any error that occurs while +** compiling an SQL statement (i.e. within sqlite3_prepare()). The +** last thing the sqlite3_prepare() function does is copy the error +** stored by this function into the database handle using sqlite3Error(). +** Functions sqlite3Error() or sqlite3ErrorWithMsg() should be used +** during statement execution (sqlite3_step() etc.). +*/ +SQLITE_PRIVATE void sqlite3ErrorMsg(Parse *pParse, const char *zFormat, ...){ + char *zMsg; + va_list ap; + sqlite3 *db = pParse->db; + va_start(ap, zFormat); + zMsg = sqlite3VMPrintf(db, zFormat, ap); + va_end(ap); + if( db->suppressErr ){ + sqlite3DbFree(db, zMsg); + }else{ + pParse->nErr++; + sqlite3DbFree(db, pParse->zErrMsg); + pParse->zErrMsg = zMsg; + pParse->rc = SQLITE_ERROR; + } +} + +/* +** Convert an SQL-style quoted string into a normal string by removing +** the quote characters. The conversion is done in-place. If the +** input does not begin with a quote character, then this routine +** is a no-op. +** +** The input string must be zero-terminated. A new zero-terminator +** is added to the dequoted string. +** +** The return value is -1 if no dequoting occurs or the length of the +** dequoted string, exclusive of the zero terminator, if dequoting does +** occur. +** +** 2002-Feb-14: This routine is extended to remove MS-Access style +** brackets from around identifiers. For example: "[a-b-c]" becomes +** "a-b-c". +*/ +SQLITE_PRIVATE void sqlite3Dequote(char *z){ + char quote; + int i, j; + if( z==0 ) return; + quote = z[0]; + if( !sqlite3Isquote(quote) ) return; + if( quote=='[' ) quote = ']'; + for(i=1, j=0;; i++){ + assert( z[i] ); + if( z[i]==quote ){ + if( z[i+1]==quote ){ + z[j++] = quote; + i++; + }else{ + break; + } + }else{ + z[j++] = z[i]; + } + } + z[j] = 0; +} + +/* +** Generate a Token object from a string +*/ +SQLITE_PRIVATE void sqlite3TokenInit(Token *p, char *z){ + p->z = z; + p->n = sqlite3Strlen30(z); +} + +/* Convenient short-hand */ +#define UpperToLower sqlite3UpperToLower + +/* +** Some systems have stricmp(). Others have strcasecmp(). Because +** there is no consistency, we will define our own. +** +** IMPLEMENTATION-OF: R-30243-02494 The sqlite3_stricmp() and +** sqlite3_strnicmp() APIs allow applications and extensions to compare +** the contents of two buffers containing UTF-8 strings in a +** case-independent fashion, using the same definition of "case +** independence" that SQLite uses internally when comparing identifiers. +*/ +SQLITE_API int sqlite3_stricmp(const char *zLeft, const char *zRight){ + if( zLeft==0 ){ + return zRight ? -1 : 0; + }else if( zRight==0 ){ + return 1; + } + return sqlite3StrICmp(zLeft, zRight); +} +SQLITE_PRIVATE int sqlite3StrICmp(const char *zLeft, const char *zRight){ + unsigned char *a, *b; + int c; + a = (unsigned char *)zLeft; + b = (unsigned char *)zRight; + for(;;){ + c = (int)UpperToLower[*a] - (int)UpperToLower[*b]; + if( c || *a==0 ) break; + a++; + b++; + } + return c; +} +SQLITE_API int sqlite3_strnicmp(const char *zLeft, const char *zRight, int N){ + register unsigned char *a, *b; + if( zLeft==0 ){ + return zRight ? -1 : 0; + }else if( zRight==0 ){ + return 1; + } + a = (unsigned char *)zLeft; + b = (unsigned char *)zRight; + while( N-- > 0 && *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; } + return N<0 ? 0 : UpperToLower[*a] - UpperToLower[*b]; +} + +/* +** The string z[] is an text representation of a real number. +** Convert this string to a double and write it into *pResult. +** +** The string z[] is length bytes in length (bytes, not characters) and +** uses the encoding enc. The string is not necessarily zero-terminated. +** +** Return TRUE if the result is a valid real number (or integer) and FALSE +** if the string is empty or contains extraneous text. Valid numbers +** are in one of these formats: +** +** [+-]digits[E[+-]digits] +** [+-]digits.[digits][E[+-]digits] +** [+-].digits[E[+-]digits] +** +** Leading and trailing whitespace is ignored for the purpose of determining +** validity. +** +** If some prefix of the input string is a valid number, this routine +** returns FALSE but it still converts the prefix and writes the result +** into *pResult. +*/ +SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 enc){ +#ifndef SQLITE_OMIT_FLOATING_POINT + int incr; + const char *zEnd = z + length; + /* sign * significand * (10 ^ (esign * exponent)) */ + int sign = 1; /* sign of significand */ + i64 s = 0; /* significand */ + int d = 0; /* adjust exponent for shifting decimal point */ + int esign = 1; /* sign of exponent */ + int e = 0; /* exponent */ + int eValid = 1; /* True exponent is either not used or is well-formed */ + double result; + int nDigits = 0; + int nonNum = 0; /* True if input contains UTF16 with high byte non-zero */ + + assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE ); + *pResult = 0.0; /* Default return value, in case of an error */ + + if( enc==SQLITE_UTF8 ){ + incr = 1; + }else{ + int i; + incr = 2; + assert( SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); + for(i=3-enc; i=zEnd ) return 0; + + /* get sign of significand */ + if( *z=='-' ){ + sign = -1; + z+=incr; + }else if( *z=='+' ){ + z+=incr; + } + + /* copy max significant digits to significand */ + while( z=zEnd ) goto do_atof_calc; + + /* if decimal point is present */ + if( *z=='.' ){ + z+=incr; + /* copy digits from after decimal to significand + ** (decrease exponent by d to shift decimal right) */ + while( z=zEnd ) goto do_atof_calc; + + /* if exponent is present */ + if( *z=='e' || *z=='E' ){ + z+=incr; + eValid = 0; + + /* This branch is needed to avoid a (harmless) buffer overread. The + ** special comment alerts the mutation tester that the correct answer + ** is obtained even if the branch is omitted */ + if( z>=zEnd ) goto do_atof_calc; /*PREVENTS-HARMLESS-OVERREAD*/ + + /* get sign of exponent */ + if( *z=='-' ){ + esign = -1; + z+=incr; + }else if( *z=='+' ){ + z+=incr; + } + /* copy digits to exponent */ + while( z0 ){ /*OPTIMIZATION-IF-TRUE*/ + if( esign>0 ){ + if( s>=(LARGEST_INT64/10) ) break; /*OPTIMIZATION-IF-FALSE*/ + s *= 10; + }else{ + if( s%10!=0 ) break; /*OPTIMIZATION-IF-FALSE*/ + s /= 10; + } + e--; + } + + /* adjust the sign of significand */ + s = sign<0 ? -s : s; + + if( e==0 ){ /*OPTIMIZATION-IF-TRUE*/ + result = (double)s; + }else{ + LONGDOUBLE_TYPE scale = 1.0; + /* attempt to handle extremely small/large numbers better */ + if( e>307 ){ /*OPTIMIZATION-IF-TRUE*/ + if( e<342 ){ /*OPTIMIZATION-IF-TRUE*/ + while( e%308 ) { scale *= 1.0e+1; e -= 1; } + if( esign<0 ){ + result = s / scale; + result /= 1.0e+308; + }else{ + result = s * scale; + result *= 1.0e+308; + } + }else{ assert( e>=342 ); + if( esign<0 ){ + result = 0.0*s; + }else{ + result = 1e308*1e308*s; /* Infinity */ + } + } + }else{ + /* 1.0e+22 is the largest power of 10 than can be + ** represented exactly. */ + while( e%22 ) { scale *= 1.0e+1; e -= 1; } + while( e>0 ) { scale *= 1.0e+22; e -= 22; } + if( esign<0 ){ + result = s / scale; + }else{ + result = s * scale; + } + } + } + } + + /* store the result */ + *pResult = result; + + /* return true if number and no extra non-whitespace chracters after */ + return z==zEnd && nDigits>0 && eValid && nonNum==0; +#else + return !sqlite3Atoi64(z, pResult, length, enc); +#endif /* SQLITE_OMIT_FLOATING_POINT */ +} + +/* +** Compare the 19-character string zNum against the text representation +** value 2^63: 9223372036854775808. Return negative, zero, or positive +** if zNum is less than, equal to, or greater than the string. +** Note that zNum must contain exactly 19 characters. +** +** Unlike memcmp() this routine is guaranteed to return the difference +** in the values of the last digit if the only difference is in the +** last digit. So, for example, +** +** compare2pow63("9223372036854775800", 1) +** +** will return -8. +*/ +static int compare2pow63(const char *zNum, int incr){ + int c = 0; + int i; + /* 012345678901234567 */ + const char *pow63 = "922337203685477580"; + for(i=0; c==0 && i<18; i++){ + c = (zNum[i*incr]-pow63[i])*10; + } + if( c==0 ){ + c = zNum[18*incr] - '8'; + testcase( c==(-1) ); + testcase( c==0 ); + testcase( c==(+1) ); + } + return c; +} + +/* +** Convert zNum to a 64-bit signed integer. zNum must be decimal. This +** routine does *not* accept hexadecimal notation. +** +** If the zNum value is representable as a 64-bit twos-complement +** integer, then write that value into *pNum and return 0. +** +** If zNum is exactly 9223372036854775808, return 2. This special +** case is broken out because while 9223372036854775808 cannot be a +** signed 64-bit integer, its negative -9223372036854775808 can be. +** +** If zNum is too big for a 64-bit integer and is not +** 9223372036854775808 or if zNum contains any non-numeric text, +** then return 1. +** +** length is the number of bytes in the string (bytes, not characters). +** The string is not necessarily zero-terminated. The encoding is +** given by enc. +*/ +SQLITE_PRIVATE int sqlite3Atoi64(const char *zNum, i64 *pNum, int length, u8 enc){ + int incr; + u64 u = 0; + int neg = 0; /* assume positive */ + int i; + int c = 0; + int nonNum = 0; /* True if input contains UTF16 with high byte non-zero */ + const char *zStart; + const char *zEnd = zNum + length; + assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE ); + if( enc==SQLITE_UTF8 ){ + incr = 1; + }else{ + incr = 2; + assert( SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); + for(i=3-enc; i='0' && c<='9'; i+=incr){ + u = u*10 + c - '0'; + } + if( u>LARGEST_INT64 ){ + *pNum = neg ? SMALLEST_INT64 : LARGEST_INT64; + }else if( neg ){ + *pNum = -(i64)u; + }else{ + *pNum = (i64)u; + } + testcase( i==18 ); + testcase( i==19 ); + testcase( i==20 ); + if( &zNum[i]19*incr /* Too many digits */ + || nonNum /* UTF16 with high-order bytes non-zero */ + ){ + /* zNum is empty or contains non-numeric text or is longer + ** than 19 digits (thus guaranteeing that it is too large) */ + return 1; + }else if( i<19*incr ){ + /* Less than 19 digits, so we know that it fits in 64 bits */ + assert( u<=LARGEST_INT64 ); + return 0; + }else{ + /* zNum is a 19-digit numbers. Compare it against 9223372036854775808. */ + c = compare2pow63(zNum, incr); + if( c<0 ){ + /* zNum is less than 9223372036854775808 so it fits */ + assert( u<=LARGEST_INT64 ); + return 0; + }else if( c>0 ){ + /* zNum is greater than 9223372036854775808 so it overflows */ + return 1; + }else{ + /* zNum is exactly 9223372036854775808. Fits if negative. The + ** special case 2 overflow if positive */ + assert( u-1==LARGEST_INT64 ); + return neg ? 0 : 2; + } + } +} + +/* +** Transform a UTF-8 integer literal, in either decimal or hexadecimal, +** into a 64-bit signed integer. This routine accepts hexadecimal literals, +** whereas sqlite3Atoi64() does not. +** +** Returns: +** +** 0 Successful transformation. Fits in a 64-bit signed integer. +** 1 Integer too large for a 64-bit signed integer or is malformed +** 2 Special case of 9223372036854775808 +*/ +SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char *z, i64 *pOut){ +#ifndef SQLITE_OMIT_HEX_INTEGER + if( z[0]=='0' + && (z[1]=='x' || z[1]=='X') + ){ + u64 u = 0; + int i, k; + for(i=2; z[i]=='0'; i++){} + for(k=i; sqlite3Isxdigit(z[k]); k++){ + u = u*16 + sqlite3HexToInt(z[k]); + } + memcpy(pOut, &u, 8); + return (z[k]==0 && k-i<=16) ? 0 : 1; + }else +#endif /* SQLITE_OMIT_HEX_INTEGER */ + { + return sqlite3Atoi64(z, pOut, sqlite3Strlen30(z), SQLITE_UTF8); + } +} + +/* +** If zNum represents an integer that will fit in 32-bits, then set +** *pValue to that integer and return true. Otherwise return false. +** +** This routine accepts both decimal and hexadecimal notation for integers. +** +** Any non-numeric characters that following zNum are ignored. +** This is different from sqlite3Atoi64() which requires the +** input number to be zero-terminated. +*/ +SQLITE_PRIVATE int sqlite3GetInt32(const char *zNum, int *pValue){ + sqlite_int64 v = 0; + int i, c; + int neg = 0; + if( zNum[0]=='-' ){ + neg = 1; + zNum++; + }else if( zNum[0]=='+' ){ + zNum++; + } +#ifndef SQLITE_OMIT_HEX_INTEGER + else if( zNum[0]=='0' + && (zNum[1]=='x' || zNum[1]=='X') + && sqlite3Isxdigit(zNum[2]) + ){ + u32 u = 0; + zNum += 2; + while( zNum[0]=='0' ) zNum++; + for(i=0; sqlite3Isxdigit(zNum[i]) && i<8; i++){ + u = u*16 + sqlite3HexToInt(zNum[i]); + } + if( (u&0x80000000)==0 && sqlite3Isxdigit(zNum[i])==0 ){ + memcpy(pValue, &u, 4); + return 1; + }else{ + return 0; + } + } +#endif + while( zNum[0]=='0' ) zNum++; + for(i=0; i<11 && (c = zNum[i] - '0')>=0 && c<=9; i++){ + v = v*10 + c; + } + + /* The longest decimal representation of a 32 bit integer is 10 digits: + ** + ** 1234567890 + ** 2^31 -> 2147483648 + */ + testcase( i==10 ); + if( i>10 ){ + return 0; + } + testcase( v-neg==2147483647 ); + if( v-neg>2147483647 ){ + return 0; + } + if( neg ){ + v = -v; + } + *pValue = (int)v; + return 1; +} + +/* +** Return a 32-bit integer value extracted from a string. If the +** string is not an integer, just return 0. +*/ +SQLITE_PRIVATE int sqlite3Atoi(const char *z){ + int x = 0; + if( z ) sqlite3GetInt32(z, &x); + return x; +} + +/* +** The variable-length integer encoding is as follows: +** +** KEY: +** A = 0xxxxxxx 7 bits of data and one flag bit +** B = 1xxxxxxx 7 bits of data and one flag bit +** C = xxxxxxxx 8 bits of data +** +** 7 bits - A +** 14 bits - BA +** 21 bits - BBA +** 28 bits - BBBA +** 35 bits - BBBBA +** 42 bits - BBBBBA +** 49 bits - BBBBBBA +** 56 bits - BBBBBBBA +** 64 bits - BBBBBBBBC +*/ + +/* +** Write a 64-bit variable-length integer to memory starting at p[0]. +** The length of data write will be between 1 and 9 bytes. The number +** of bytes written is returned. +** +** A variable-length integer consists of the lower 7 bits of each byte +** for all bytes that have the 8th bit set and one byte with the 8th +** bit clear. Except, if we get to the 9th byte, it stores the full +** 8 bits and is the last byte. +*/ +static int SQLITE_NOINLINE putVarint64(unsigned char *p, u64 v){ + int i, j, n; + u8 buf[10]; + if( v & (((u64)0xff000000)<<32) ){ + p[8] = (u8)v; + v >>= 8; + for(i=7; i>=0; i--){ + p[i] = (u8)((v & 0x7f) | 0x80); + v >>= 7; + } + return 9; + } + n = 0; + do{ + buf[n++] = (u8)((v & 0x7f) | 0x80); + v >>= 7; + }while( v!=0 ); + buf[0] &= 0x7f; + assert( n<=9 ); + for(i=0, j=n-1; j>=0; j--, i++){ + p[i] = buf[j]; + } + return n; +} +SQLITE_PRIVATE int sqlite3PutVarint(unsigned char *p, u64 v){ + if( v<=0x7f ){ + p[0] = v&0x7f; + return 1; + } + if( v<=0x3fff ){ + p[0] = ((v>>7)&0x7f)|0x80; + p[1] = v&0x7f; + return 2; + } + return putVarint64(p,v); +} + +/* +** Bitmasks used by sqlite3GetVarint(). These precomputed constants +** are defined here rather than simply putting the constant expressions +** inline in order to work around bugs in the RVT compiler. +** +** SLOT_2_0 A mask for (0x7f<<14) | 0x7f +** +** SLOT_4_2_0 A mask for (0x7f<<28) | SLOT_2_0 +*/ +#define SLOT_2_0 0x001fc07f +#define SLOT_4_2_0 0xf01fc07f + + +/* +** Read a 64-bit variable-length integer from memory starting at p[0]. +** Return the number of bytes read. The value is stored in *v. +*/ +SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *p, u64 *v){ + u32 a,b,s; + + a = *p; + /* a: p0 (unmasked) */ + if (!(a&0x80)) + { + *v = a; + return 1; + } + + p++; + b = *p; + /* b: p1 (unmasked) */ + if (!(b&0x80)) + { + a &= 0x7f; + a = a<<7; + a |= b; + *v = a; + return 2; + } + + /* Verify that constants are precomputed correctly */ + assert( SLOT_2_0 == ((0x7f<<14) | (0x7f)) ); + assert( SLOT_4_2_0 == ((0xfU<<28) | (0x7f<<14) | (0x7f)) ); + + p++; + a = a<<14; + a |= *p; + /* a: p0<<14 | p2 (unmasked) */ + if (!(a&0x80)) + { + a &= SLOT_2_0; + b &= 0x7f; + b = b<<7; + a |= b; + *v = a; + return 3; + } + + /* CSE1 from below */ + a &= SLOT_2_0; + p++; + b = b<<14; + b |= *p; + /* b: p1<<14 | p3 (unmasked) */ + if (!(b&0x80)) + { + b &= SLOT_2_0; + /* moved CSE1 up */ + /* a &= (0x7f<<14)|(0x7f); */ + a = a<<7; + a |= b; + *v = a; + return 4; + } + + /* a: p0<<14 | p2 (masked) */ + /* b: p1<<14 | p3 (unmasked) */ + /* 1:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */ + /* moved CSE1 up */ + /* a &= (0x7f<<14)|(0x7f); */ + b &= SLOT_2_0; + s = a; + /* s: p0<<14 | p2 (masked) */ + + p++; + a = a<<14; + a |= *p; + /* a: p0<<28 | p2<<14 | p4 (unmasked) */ + if (!(a&0x80)) + { + /* we can skip these cause they were (effectively) done above + ** while calculating s */ + /* a &= (0x7f<<28)|(0x7f<<14)|(0x7f); */ + /* b &= (0x7f<<14)|(0x7f); */ + b = b<<7; + a |= b; + s = s>>18; + *v = ((u64)s)<<32 | a; + return 5; + } + + /* 2:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */ + s = s<<7; + s |= b; + /* s: p0<<21 | p1<<14 | p2<<7 | p3 (masked) */ + + p++; + b = b<<14; + b |= *p; + /* b: p1<<28 | p3<<14 | p5 (unmasked) */ + if (!(b&0x80)) + { + /* we can skip this cause it was (effectively) done above in calc'ing s */ + /* b &= (0x7f<<28)|(0x7f<<14)|(0x7f); */ + a &= SLOT_2_0; + a = a<<7; + a |= b; + s = s>>18; + *v = ((u64)s)<<32 | a; + return 6; + } + + p++; + a = a<<14; + a |= *p; + /* a: p2<<28 | p4<<14 | p6 (unmasked) */ + if (!(a&0x80)) + { + a &= SLOT_4_2_0; + b &= SLOT_2_0; + b = b<<7; + a |= b; + s = s>>11; + *v = ((u64)s)<<32 | a; + return 7; + } + + /* CSE2 from below */ + a &= SLOT_2_0; + p++; + b = b<<14; + b |= *p; + /* b: p3<<28 | p5<<14 | p7 (unmasked) */ + if (!(b&0x80)) + { + b &= SLOT_4_2_0; + /* moved CSE2 up */ + /* a &= (0x7f<<14)|(0x7f); */ + a = a<<7; + a |= b; + s = s>>4; + *v = ((u64)s)<<32 | a; + return 8; + } + + p++; + a = a<<15; + a |= *p; + /* a: p4<<29 | p6<<15 | p8 (unmasked) */ + + /* moved CSE2 up */ + /* a &= (0x7f<<29)|(0x7f<<15)|(0xff); */ + b &= SLOT_2_0; + b = b<<8; + a |= b; + + s = s<<4; + b = p[-4]; + b &= 0x7f; + b = b>>3; + s |= b; + + *v = ((u64)s)<<32 | a; + + return 9; +} + +/* +** Read a 32-bit variable-length integer from memory starting at p[0]. +** Return the number of bytes read. The value is stored in *v. +** +** If the varint stored in p[0] is larger than can fit in a 32-bit unsigned +** integer, then set *v to 0xffffffff. +** +** A MACRO version, getVarint32, is provided which inlines the +** single-byte case. All code should use the MACRO version as +** this function assumes the single-byte case has already been handled. +*/ +SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *p, u32 *v){ + u32 a,b; + + /* The 1-byte case. Overwhelmingly the most common. Handled inline + ** by the getVarin32() macro */ + a = *p; + /* a: p0 (unmasked) */ +#ifndef getVarint32 + if (!(a&0x80)) + { + /* Values between 0 and 127 */ + *v = a; + return 1; + } +#endif + + /* The 2-byte case */ + p++; + b = *p; + /* b: p1 (unmasked) */ + if (!(b&0x80)) + { + /* Values between 128 and 16383 */ + a &= 0x7f; + a = a<<7; + *v = a | b; + return 2; + } + + /* The 3-byte case */ + p++; + a = a<<14; + a |= *p; + /* a: p0<<14 | p2 (unmasked) */ + if (!(a&0x80)) + { + /* Values between 16384 and 2097151 */ + a &= (0x7f<<14)|(0x7f); + b &= 0x7f; + b = b<<7; + *v = a | b; + return 3; + } + + /* A 32-bit varint is used to store size information in btrees. + ** Objects are rarely larger than 2MiB limit of a 3-byte varint. + ** A 3-byte varint is sufficient, for example, to record the size + ** of a 1048569-byte BLOB or string. + ** + ** We only unroll the first 1-, 2-, and 3- byte cases. The very + ** rare larger cases can be handled by the slower 64-bit varint + ** routine. + */ +#if 1 + { + u64 v64; + u8 n; + + p -= 2; + n = sqlite3GetVarint(p, &v64); + assert( n>3 && n<=9 ); + if( (v64 & SQLITE_MAX_U32)!=v64 ){ + *v = 0xffffffff; + }else{ + *v = (u32)v64; + } + return n; + } + +#else + /* For following code (kept for historical record only) shows an + ** unrolling for the 3- and 4-byte varint cases. This code is + ** slightly faster, but it is also larger and much harder to test. + */ + p++; + b = b<<14; + b |= *p; + /* b: p1<<14 | p3 (unmasked) */ + if (!(b&0x80)) + { + /* Values between 2097152 and 268435455 */ + b &= (0x7f<<14)|(0x7f); + a &= (0x7f<<14)|(0x7f); + a = a<<7; + *v = a | b; + return 4; + } + + p++; + a = a<<14; + a |= *p; + /* a: p0<<28 | p2<<14 | p4 (unmasked) */ + if (!(a&0x80)) + { + /* Values between 268435456 and 34359738367 */ + a &= SLOT_4_2_0; + b &= SLOT_4_2_0; + b = b<<7; + *v = a | b; + return 5; + } + + /* We can only reach this point when reading a corrupt database + ** file. In that case we are not in any hurry. Use the (relatively + ** slow) general-purpose sqlite3GetVarint() routine to extract the + ** value. */ + { + u64 v64; + u8 n; + + p -= 4; + n = sqlite3GetVarint(p, &v64); + assert( n>5 && n<=9 ); + *v = (u32)v64; + return n; + } +#endif +} + +/* +** Return the number of bytes that will be needed to store the given +** 64-bit integer. +*/ +SQLITE_PRIVATE int sqlite3VarintLen(u64 v){ + int i; + for(i=1; (v >>= 7)!=0; i++){ assert( i<10 ); } + return i; +} + + +/* +** Read or write a four-byte big-endian integer value. +*/ +SQLITE_PRIVATE u32 sqlite3Get4byte(const u8 *p){ +#if SQLITE_BYTEORDER==4321 + u32 x; + memcpy(&x,p,4); + return x; +#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) + u32 x; + memcpy(&x,p,4); + return __builtin_bswap32(x); +#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 + u32 x; + memcpy(&x,p,4); + return _byteswap_ulong(x); +#else + testcase( p[0]&0x80 ); + return ((unsigned)p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3]; +#endif +} +SQLITE_PRIVATE void sqlite3Put4byte(unsigned char *p, u32 v){ +#if SQLITE_BYTEORDER==4321 + memcpy(p,&v,4); +#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) + u32 x = __builtin_bswap32(v); + memcpy(p,&x,4); +#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 + u32 x = _byteswap_ulong(v); + memcpy(p,&x,4); +#else + p[0] = (u8)(v>>24); + p[1] = (u8)(v>>16); + p[2] = (u8)(v>>8); + p[3] = (u8)v; +#endif +} + + + +/* +** Translate a single byte of Hex into an integer. +** This routine only works if h really is a valid hexadecimal +** character: 0..9a..fA..F +*/ +SQLITE_PRIVATE u8 sqlite3HexToInt(int h){ + assert( (h>='0' && h<='9') || (h>='a' && h<='f') || (h>='A' && h<='F') ); +#ifdef SQLITE_ASCII + h += 9*(1&(h>>6)); +#endif +#ifdef SQLITE_EBCDIC + h += 9*(1&~(h>>4)); +#endif + return (u8)(h & 0xf); +} + +#if !defined(SQLITE_OMIT_BLOB_LITERAL) || defined(SQLITE_HAS_CODEC) +/* +** Convert a BLOB literal of the form "x'hhhhhh'" into its binary +** value. Return a pointer to its binary value. Space to hold the +** binary value has been obtained from malloc and must be freed by +** the calling routine. +*/ +SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3 *db, const char *z, int n){ + char *zBlob; + int i; + + zBlob = (char *)sqlite3DbMallocRawNN(db, n/2 + 1); + n--; + if( zBlob ){ + for(i=0; imagic; + if( magic!=SQLITE_MAGIC_OPEN ){ + if( sqlite3SafetyCheckSickOrOk(db) ){ + testcase( sqlite3GlobalConfig.xLog!=0 ); + logBadConnection("unopened"); + } + return 0; + }else{ + return 1; + } +} +SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3 *db){ + u32 magic; + magic = db->magic; + if( magic!=SQLITE_MAGIC_SICK && + magic!=SQLITE_MAGIC_OPEN && + magic!=SQLITE_MAGIC_BUSY ){ + testcase( sqlite3GlobalConfig.xLog!=0 ); + logBadConnection("invalid"); + return 0; + }else{ + return 1; + } +} + +/* +** Attempt to add, substract, or multiply the 64-bit signed value iB against +** the other 64-bit signed integer at *pA and store the result in *pA. +** Return 0 on success. Or if the operation would have resulted in an +** overflow, leave *pA unchanged and return 1. +*/ +SQLITE_PRIVATE int sqlite3AddInt64(i64 *pA, i64 iB){ +#if GCC_VERSION>=5004000 || CLANG_VERSION>=4000000 + return __builtin_add_overflow(*pA, iB, pA); +#else + i64 iA = *pA; + testcase( iA==0 ); testcase( iA==1 ); + testcase( iB==-1 ); testcase( iB==0 ); + if( iB>=0 ){ + testcase( iA>0 && LARGEST_INT64 - iA == iB ); + testcase( iA>0 && LARGEST_INT64 - iA == iB - 1 ); + if( iA>0 && LARGEST_INT64 - iA < iB ) return 1; + }else{ + testcase( iA<0 && -(iA + LARGEST_INT64) == iB + 1 ); + testcase( iA<0 && -(iA + LARGEST_INT64) == iB + 2 ); + if( iA<0 && -(iA + LARGEST_INT64) > iB + 1 ) return 1; + } + *pA += iB; + return 0; +#endif +} +SQLITE_PRIVATE int sqlite3SubInt64(i64 *pA, i64 iB){ +#if GCC_VERSION>=5004000 || CLANG_VERSION>=4000000 + return __builtin_sub_overflow(*pA, iB, pA); +#else + testcase( iB==SMALLEST_INT64+1 ); + if( iB==SMALLEST_INT64 ){ + testcase( (*pA)==(-1) ); testcase( (*pA)==0 ); + if( (*pA)>=0 ) return 1; + *pA -= iB; + return 0; + }else{ + return sqlite3AddInt64(pA, -iB); + } +#endif +} +SQLITE_PRIVATE int sqlite3MulInt64(i64 *pA, i64 iB){ +#if GCC_VERSION>=5004000 || CLANG_VERSION>=4000000 + return __builtin_mul_overflow(*pA, iB, pA); +#else + i64 iA = *pA; + if( iB>0 ){ + if( iA>LARGEST_INT64/iB ) return 1; + if( iA0 ){ + if( iBLARGEST_INT64/-iB ) return 1; + } + } + *pA = iA*iB; + return 0; +#endif +} + +/* +** Compute the absolute value of a 32-bit signed integer, of possible. Or +** if the integer has a value of -2147483648, return +2147483647 +*/ +SQLITE_PRIVATE int sqlite3AbsInt32(int x){ + if( x>=0 ) return x; + if( x==(int)0x80000000 ) return 0x7fffffff; + return -x; +} + +#ifdef SQLITE_ENABLE_8_3_NAMES +/* +** If SQLITE_ENABLE_8_3_NAMES is set at compile-time and if the database +** filename in zBaseFilename is a URI with the "8_3_names=1" parameter and +** if filename in z[] has a suffix (a.k.a. "extension") that is longer than +** three characters, then shorten the suffix on z[] to be the last three +** characters of the original suffix. +** +** If SQLITE_ENABLE_8_3_NAMES is set to 2 at compile-time, then always +** do the suffix shortening regardless of URI parameter. +** +** Examples: +** +** test.db-journal => test.nal +** test.db-wal => test.wal +** test.db-shm => test.shm +** test.db-mj7f3319fa => test.9fa +*/ +SQLITE_PRIVATE void sqlite3FileSuffix3(const char *zBaseFilename, char *z){ +#if SQLITE_ENABLE_8_3_NAMES<2 + if( sqlite3_uri_boolean(zBaseFilename, "8_3_names", 0) ) +#endif + { + int i, sz; + sz = sqlite3Strlen30(z); + for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){} + if( z[i]=='.' && ALWAYS(sz>i+4) ) memmove(&z[i+1], &z[sz-3], 4); + } +} +#endif + +/* +** Find (an approximate) sum of two LogEst values. This computation is +** not a simple "+" operator because LogEst is stored as a logarithmic +** value. +** +*/ +SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst a, LogEst b){ + static const unsigned char x[] = { + 10, 10, /* 0,1 */ + 9, 9, /* 2,3 */ + 8, 8, /* 4,5 */ + 7, 7, 7, /* 6,7,8 */ + 6, 6, 6, /* 9,10,11 */ + 5, 5, 5, /* 12-14 */ + 4, 4, 4, 4, /* 15-18 */ + 3, 3, 3, 3, 3, 3, /* 19-24 */ + 2, 2, 2, 2, 2, 2, 2, /* 25-31 */ + }; + if( a>=b ){ + if( a>b+49 ) return a; + if( a>b+31 ) return a+1; + return a+x[a-b]; + }else{ + if( b>a+49 ) return b; + if( b>a+31 ) return b+1; + return b+x[b-a]; + } +} + +/* +** Convert an integer into a LogEst. In other words, compute an +** approximation for 10*log2(x). +*/ +SQLITE_PRIVATE LogEst sqlite3LogEst(u64 x){ + static LogEst a[] = { 0, 2, 3, 5, 6, 7, 8, 9 }; + LogEst y = 40; + if( x<8 ){ + if( x<2 ) return 0; + while( x<8 ){ y -= 10; x <<= 1; } + }else{ + while( x>255 ){ y += 40; x >>= 4; } /*OPTIMIZATION-IF-TRUE*/ + while( x>15 ){ y += 10; x >>= 1; } + } + return a[x&7] + y - 10; +} + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* +** Convert a double into a LogEst +** In other words, compute an approximation for 10*log2(x). +*/ +SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double x){ + u64 a; + LogEst e; + assert( sizeof(x)==8 && sizeof(a)==8 ); + if( x<=1 ) return 0; + if( x<=2000000000 ) return sqlite3LogEst((u64)x); + memcpy(&a, &x, 8); + e = (a>>52) - 1022; + return e*10; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || \ + defined(SQLITE_ENABLE_STAT3_OR_STAT4) || \ + defined(SQLITE_EXPLAIN_ESTIMATED_ROWS) +/* +** Convert a LogEst into an integer. +** +** Note that this routine is only used when one or more of various +** non-standard compile-time options is enabled. +*/ +SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst x){ + u64 n; + n = x%10; + x /= 10; + if( n>=5 ) n -= 2; + else if( n>=1 ) n -= 1; +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || \ + defined(SQLITE_EXPLAIN_ESTIMATED_ROWS) + if( x>60 ) return (u64)LARGEST_INT64; +#else + /* If only SQLITE_ENABLE_STAT3_OR_STAT4 is on, then the largest input + ** possible to this routine is 310, resulting in a maximum x of 31 */ + assert( x<=60 ); +#endif + return x>=3 ? (n+8)<<(x-3) : (n+8)>>(3-x); +} +#endif /* defined SCANSTAT or STAT4 or ESTIMATED_ROWS */ + +/* +** Add a new name/number pair to a VList. This might require that the +** VList object be reallocated, so return the new VList. If an OOM +** error occurs, the original VList returned and the +** db->mallocFailed flag is set. +** +** A VList is really just an array of integers. To destroy a VList, +** simply pass it to sqlite3DbFree(). +** +** The first integer is the number of integers allocated for the whole +** VList. The second integer is the number of integers actually used. +** Each name/number pair is encoded by subsequent groups of 3 or more +** integers. +** +** Each name/number pair starts with two integers which are the numeric +** value for the pair and the size of the name/number pair, respectively. +** The text name overlays one or more following integers. The text name +** is always zero-terminated. +** +** Conceptually: +** +** struct VList { +** int nAlloc; // Number of allocated slots +** int nUsed; // Number of used slots +** struct VListEntry { +** int iValue; // Value for this entry +** int nSlot; // Slots used by this entry +** // ... variable name goes here +** } a[0]; +** } +** +** During code generation, pointers to the variable names within the +** VList are taken. When that happens, nAlloc is set to zero as an +** indication that the VList may never again be enlarged, since the +** accompanying realloc() would invalidate the pointers. +*/ +SQLITE_PRIVATE VList *sqlite3VListAdd( + sqlite3 *db, /* The database connection used for malloc() */ + VList *pIn, /* The input VList. Might be NULL */ + const char *zName, /* Name of symbol to add */ + int nName, /* Bytes of text in zName */ + int iVal /* Value to associate with zName */ +){ + int nInt; /* number of sizeof(int) objects needed for zName */ + char *z; /* Pointer to where zName will be stored */ + int i; /* Index in pIn[] where zName is stored */ + + nInt = nName/4 + 3; + assert( pIn==0 || pIn[0]>=3 ); /* Verify ok to add new elements */ + if( pIn==0 || pIn[1]+nInt > pIn[0] ){ + /* Enlarge the allocation */ + int nAlloc = (pIn ? pIn[0]*2 : 10) + nInt; + VList *pOut = sqlite3DbRealloc(db, pIn, nAlloc*sizeof(int)); + if( pOut==0 ) return pIn; + if( pIn==0 ) pOut[1] = 2; + pIn = pOut; + pIn[0] = nAlloc; + } + i = pIn[1]; + pIn[i] = iVal; + pIn[i+1] = nInt; + z = (char*)&pIn[i+2]; + pIn[1] = i+nInt; + assert( pIn[1]<=pIn[0] ); + memcpy(z, zName, nName); + z[nName] = 0; + return pIn; +} + +/* +** Return a pointer to the name of a variable in the given VList that +** has the value iVal. Or return a NULL if there is no such variable in +** the list +*/ +SQLITE_PRIVATE const char *sqlite3VListNumToName(VList *pIn, int iVal){ + int i, mx; + if( pIn==0 ) return 0; + mx = pIn[1]; + i = 2; + do{ + if( pIn[i]==iVal ) return (char*)&pIn[i+2]; + i += pIn[i+1]; + }while( i */ + +/* Turn bulk memory into a hash table object by initializing the +** fields of the Hash structure. +** +** "pNew" is a pointer to the hash table that is to be initialized. +*/ +SQLITE_PRIVATE void sqlite3HashInit(Hash *pNew){ + assert( pNew!=0 ); + pNew->first = 0; + pNew->count = 0; + pNew->htsize = 0; + pNew->ht = 0; +} + +/* Remove all entries from a hash table. Reclaim all memory. +** Call this routine to delete a hash table or to reset a hash table +** to the empty state. +*/ +SQLITE_PRIVATE void sqlite3HashClear(Hash *pH){ + HashElem *elem; /* For looping over all elements of the table */ + + assert( pH!=0 ); + elem = pH->first; + pH->first = 0; + sqlite3_free(pH->ht); + pH->ht = 0; + pH->htsize = 0; + while( elem ){ + HashElem *next_elem = elem->next; + sqlite3_free(elem); + elem = next_elem; + } + pH->count = 0; +} + +/* +** The hashing function. +*/ +static unsigned int strHash(const char *z){ + unsigned int h = 0; + unsigned char c; + while( (c = (unsigned char)*z++)!=0 ){ /*OPTIMIZATION-IF-TRUE*/ + /* Knuth multiplicative hashing. (Sorting & Searching, p. 510). + ** 0x9e3779b1 is 2654435761 which is the closest prime number to + ** (2**32)*golden_ratio, where golden_ratio = (sqrt(5) - 1)/2. */ + h += sqlite3UpperToLower[c]; + h *= 0x9e3779b1; + } + return h; +} + + +/* Link pNew element into the hash table pH. If pEntry!=0 then also +** insert pNew into the pEntry hash bucket. +*/ +static void insertElement( + Hash *pH, /* The complete hash table */ + struct _ht *pEntry, /* The entry into which pNew is inserted */ + HashElem *pNew /* The element to be inserted */ +){ + HashElem *pHead; /* First element already in pEntry */ + if( pEntry ){ + pHead = pEntry->count ? pEntry->chain : 0; + pEntry->count++; + pEntry->chain = pNew; + }else{ + pHead = 0; + } + if( pHead ){ + pNew->next = pHead; + pNew->prev = pHead->prev; + if( pHead->prev ){ pHead->prev->next = pNew; } + else { pH->first = pNew; } + pHead->prev = pNew; + }else{ + pNew->next = pH->first; + if( pH->first ){ pH->first->prev = pNew; } + pNew->prev = 0; + pH->first = pNew; + } +} + + +/* Resize the hash table so that it cantains "new_size" buckets. +** +** The hash table might fail to resize if sqlite3_malloc() fails or +** if the new size is the same as the prior size. +** Return TRUE if the resize occurs and false if not. +*/ +static int rehash(Hash *pH, unsigned int new_size){ + struct _ht *new_ht; /* The new hash table */ + HashElem *elem, *next_elem; /* For looping over existing elements */ + +#if SQLITE_MALLOC_SOFT_LIMIT>0 + if( new_size*sizeof(struct _ht)>SQLITE_MALLOC_SOFT_LIMIT ){ + new_size = SQLITE_MALLOC_SOFT_LIMIT/sizeof(struct _ht); + } + if( new_size==pH->htsize ) return 0; +#endif + + /* The inability to allocates space for a larger hash table is + ** a performance hit but it is not a fatal error. So mark the + ** allocation as a benign. Use sqlite3Malloc()/memset(0) instead of + ** sqlite3MallocZero() to make the allocation, as sqlite3MallocZero() + ** only zeroes the requested number of bytes whereas this module will + ** use the actual amount of space allocated for the hash table (which + ** may be larger than the requested amount). + */ + sqlite3BeginBenignMalloc(); + new_ht = (struct _ht *)sqlite3Malloc( new_size*sizeof(struct _ht) ); + sqlite3EndBenignMalloc(); + + if( new_ht==0 ) return 0; + sqlite3_free(pH->ht); + pH->ht = new_ht; + pH->htsize = new_size = sqlite3MallocSize(new_ht)/sizeof(struct _ht); + memset(new_ht, 0, new_size*sizeof(struct _ht)); + for(elem=pH->first, pH->first=0; elem; elem = next_elem){ + unsigned int h = strHash(elem->pKey) % new_size; + next_elem = elem->next; + insertElement(pH, &new_ht[h], elem); + } + return 1; +} + +/* This function (for internal use only) locates an element in an +** hash table that matches the given key. The hash for this key is +** also computed and returned in the *pH parameter. +*/ +static HashElem *findElementWithHash( + const Hash *pH, /* The pH to be searched */ + const char *pKey, /* The key we are searching for */ + unsigned int *pHash /* Write the hash value here */ +){ + HashElem *elem; /* Used to loop thru the element list */ + int count; /* Number of elements left to test */ + unsigned int h; /* The computed hash */ + + if( pH->ht ){ /*OPTIMIZATION-IF-TRUE*/ + struct _ht *pEntry; + h = strHash(pKey) % pH->htsize; + pEntry = &pH->ht[h]; + elem = pEntry->chain; + count = pEntry->count; + }else{ + h = 0; + elem = pH->first; + count = pH->count; + } + *pHash = h; + while( count-- ){ + assert( elem!=0 ); + if( sqlite3StrICmp(elem->pKey,pKey)==0 ){ + return elem; + } + elem = elem->next; + } + return 0; +} + +/* Remove a single entry from the hash table given a pointer to that +** element and a hash on the element's key. +*/ +static void removeElementGivenHash( + Hash *pH, /* The pH containing "elem" */ + HashElem* elem, /* The element to be removed from the pH */ + unsigned int h /* Hash value for the element */ +){ + struct _ht *pEntry; + if( elem->prev ){ + elem->prev->next = elem->next; + }else{ + pH->first = elem->next; + } + if( elem->next ){ + elem->next->prev = elem->prev; + } + if( pH->ht ){ + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + assert( pEntry->count>=0 ); + } + sqlite3_free( elem ); + pH->count--; + if( pH->count==0 ){ + assert( pH->first==0 ); + assert( pH->count==0 ); + sqlite3HashClear(pH); + } +} + +/* Attempt to locate an element of the hash table pH with a key +** that matches pKey. Return the data for this element if it is +** found, or NULL if there is no match. +*/ +SQLITE_PRIVATE void *sqlite3HashFind(const Hash *pH, const char *pKey){ + HashElem *elem; /* The element that matches key */ + unsigned int h; /* A hash on key */ + + assert( pH!=0 ); + assert( pKey!=0 ); + elem = findElementWithHash(pH, pKey, &h); + return elem ? elem->data : 0; +} + +/* Insert an element into the hash table pH. The key is pKey +** and the data is "data". +** +** If no element exists with a matching key, then a new +** element is created and NULL is returned. +** +** If another element already exists with the same key, then the +** new data replaces the old data and the old data is returned. +** The key is not copied in this instance. If a malloc fails, then +** the new data is returned and the hash table is unchanged. +** +** If the "data" parameter to this function is NULL, then the +** element corresponding to "key" is removed from the hash table. +*/ +SQLITE_PRIVATE void *sqlite3HashInsert(Hash *pH, const char *pKey, void *data){ + unsigned int h; /* the hash of the key modulo hash table size */ + HashElem *elem; /* Used to loop thru the element list */ + HashElem *new_elem; /* New element added to the pH */ + + assert( pH!=0 ); + assert( pKey!=0 ); + elem = findElementWithHash(pH,pKey,&h); + if( elem ){ + void *old_data = elem->data; + if( data==0 ){ + removeElementGivenHash(pH,elem,h); + }else{ + elem->data = data; + elem->pKey = pKey; + } + return old_data; + } + if( data==0 ) return 0; + new_elem = (HashElem*)sqlite3Malloc( sizeof(HashElem) ); + if( new_elem==0 ) return data; + new_elem->pKey = pKey; + new_elem->data = data; + pH->count++; + if( pH->count>=10 && pH->count > 2*pH->htsize ){ + if( rehash(pH, pH->count*2) ){ + assert( pH->htsize>0 ); + h = strHash(pKey) % pH->htsize; + } + } + insertElement(pH, pH->ht ? &pH->ht[h] : 0, new_elem); + return 0; +} + +/************** End of hash.c ************************************************/ +/************** Begin file opcodes.c *****************************************/ +/* Automatically generated. Do not edit */ +/* See the tool/mkopcodec.tcl script for details. */ +#if !defined(SQLITE_OMIT_EXPLAIN) \ + || defined(VDBE_PROFILE) \ + || defined(SQLITE_DEBUG) +#if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) || defined(SQLITE_DEBUG) +# define OpHelp(X) "\0" X +#else +# define OpHelp(X) +#endif +SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ + static const char *const azName[] = { + /* 0 */ "Savepoint" OpHelp(""), + /* 1 */ "AutoCommit" OpHelp(""), + /* 2 */ "Transaction" OpHelp(""), + /* 3 */ "SorterNext" OpHelp(""), + /* 4 */ "PrevIfOpen" OpHelp(""), + /* 5 */ "NextIfOpen" OpHelp(""), + /* 6 */ "Prev" OpHelp(""), + /* 7 */ "Next" OpHelp(""), + /* 8 */ "Checkpoint" OpHelp(""), + /* 9 */ "JournalMode" OpHelp(""), + /* 10 */ "Vacuum" OpHelp(""), + /* 11 */ "VFilter" OpHelp("iplan=r[P3] zplan='P4'"), + /* 12 */ "VUpdate" OpHelp("data=r[P3@P2]"), + /* 13 */ "Goto" OpHelp(""), + /* 14 */ "Gosub" OpHelp(""), + /* 15 */ "InitCoroutine" OpHelp(""), + /* 16 */ "Yield" OpHelp(""), + /* 17 */ "MustBeInt" OpHelp(""), + /* 18 */ "Jump" OpHelp(""), + /* 19 */ "Not" OpHelp("r[P2]= !r[P1]"), + /* 20 */ "Once" OpHelp(""), + /* 21 */ "If" OpHelp(""), + /* 22 */ "IfNot" OpHelp(""), + /* 23 */ "SeekLT" OpHelp("key=r[P3@P4]"), + /* 24 */ "SeekLE" OpHelp("key=r[P3@P4]"), + /* 25 */ "SeekGE" OpHelp("key=r[P3@P4]"), + /* 26 */ "SeekGT" OpHelp("key=r[P3@P4]"), + /* 27 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"), + /* 28 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"), + /* 29 */ "NoConflict" OpHelp("key=r[P3@P4]"), + /* 30 */ "NotFound" OpHelp("key=r[P3@P4]"), + /* 31 */ "Found" OpHelp("key=r[P3@P4]"), + /* 32 */ "SeekRowid" OpHelp("intkey=r[P3]"), + /* 33 */ "NotExists" OpHelp("intkey=r[P3]"), + /* 34 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"), + /* 35 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"), + /* 36 */ "Ne" OpHelp("IF r[P3]!=r[P1]"), + /* 37 */ "Eq" OpHelp("IF r[P3]==r[P1]"), + /* 38 */ "Gt" OpHelp("IF r[P3]>r[P1]"), + /* 39 */ "Le" OpHelp("IF r[P3]<=r[P1]"), + /* 40 */ "Lt" OpHelp("IF r[P3]=r[P1]"), + /* 42 */ "ElseNotEq" OpHelp(""), + /* 43 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), + /* 44 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), + /* 45 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<>r[P1]"), + /* 47 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"), + /* 48 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"), + /* 49 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"), + /* 50 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"), + /* 51 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"), + /* 52 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"), + /* 53 */ "Last" OpHelp(""), + /* 54 */ "BitNot" OpHelp("r[P1]= ~r[P1]"), + /* 55 */ "SorterSort" OpHelp(""), + /* 56 */ "Sort" OpHelp(""), + /* 57 */ "Rewind" OpHelp(""), + /* 58 */ "IdxLE" OpHelp("key=r[P3@P4]"), + /* 59 */ "IdxGT" OpHelp("key=r[P3@P4]"), + /* 60 */ "IdxLT" OpHelp("key=r[P3@P4]"), + /* 61 */ "IdxGE" OpHelp("key=r[P3@P4]"), + /* 62 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"), + /* 63 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"), + /* 64 */ "Program" OpHelp(""), + /* 65 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"), + /* 66 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"), + /* 67 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"), + /* 68 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), + /* 69 */ "IncrVacuum" OpHelp(""), + /* 70 */ "VNext" OpHelp(""), + /* 71 */ "Init" OpHelp("Start at P2"), + /* 72 */ "Return" OpHelp(""), + /* 73 */ "EndCoroutine" OpHelp(""), + /* 74 */ "HaltIfNull" OpHelp("if r[P3]=null halt"), + /* 75 */ "Halt" OpHelp(""), + /* 76 */ "Integer" OpHelp("r[P2]=P1"), + /* 77 */ "Int64" OpHelp("r[P2]=P4"), + /* 78 */ "String" OpHelp("r[P2]='P4' (len=P1)"), + /* 79 */ "Null" OpHelp("r[P2..P3]=NULL"), + /* 80 */ "SoftNull" OpHelp("r[P1]=NULL"), + /* 81 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"), + /* 82 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"), + /* 83 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"), + /* 84 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), + /* 85 */ "SCopy" OpHelp("r[P2]=r[P1]"), + /* 86 */ "IntCopy" OpHelp("r[P2]=r[P1]"), + /* 87 */ "ResultRow" OpHelp("output=r[P1@P2]"), + /* 88 */ "CollSeq" OpHelp(""), + /* 89 */ "Function0" OpHelp("r[P3]=func(r[P2@P5])"), + /* 90 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"), + /* 91 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), + /* 92 */ "RealAffinity" OpHelp(""), + /* 93 */ "Cast" OpHelp("affinity(r[P1])"), + /* 94 */ "Permutation" OpHelp(""), + /* 95 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), + /* 96 */ "Column" OpHelp("r[P3]=PX"), + /* 97 */ "String8" OpHelp("r[P2]='P4'"), + /* 98 */ "Affinity" OpHelp("affinity(r[P1@P2])"), + /* 99 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), + /* 100 */ "Count" OpHelp("r[P2]=count()"), + /* 101 */ "ReadCookie" OpHelp(""), + /* 102 */ "SetCookie" OpHelp(""), + /* 103 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), + /* 104 */ "OpenRead" OpHelp("root=P2 iDb=P3"), + /* 105 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), + /* 106 */ "OpenAutoindex" OpHelp("nColumn=P2"), + /* 107 */ "OpenEphemeral" OpHelp("nColumn=P2"), + /* 108 */ "SorterOpen" OpHelp(""), + /* 109 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"), + /* 110 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), + /* 111 */ "Close" OpHelp(""), + /* 112 */ "ColumnsUsed" OpHelp(""), + /* 113 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"), + /* 114 */ "NewRowid" OpHelp("r[P2]=rowid"), + /* 115 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"), + /* 116 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"), + /* 117 */ "Delete" OpHelp(""), + /* 118 */ "ResetCount" OpHelp(""), + /* 119 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"), + /* 120 */ "SorterData" OpHelp("r[P2]=data"), + /* 121 */ "RowData" OpHelp("r[P2]=data"), + /* 122 */ "Rowid" OpHelp("r[P2]=rowid"), + /* 123 */ "NullRow" OpHelp(""), + /* 124 */ "SorterInsert" OpHelp("key=r[P2]"), + /* 125 */ "IdxInsert" OpHelp("key=r[P2]"), + /* 126 */ "IdxDelete" OpHelp("key=r[P2@P3]"), + /* 127 */ "Seek" OpHelp("Move P3 to P1.rowid"), + /* 128 */ "IdxRowid" OpHelp("r[P2]=rowid"), + /* 129 */ "Destroy" OpHelp(""), + /* 130 */ "Clear" OpHelp(""), + /* 131 */ "ResetSorter" OpHelp(""), + /* 132 */ "Real" OpHelp("r[P2]=P4"), + /* 133 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"), + /* 134 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"), + /* 135 */ "ParseSchema" OpHelp(""), + /* 136 */ "LoadAnalysis" OpHelp(""), + /* 137 */ "DropTable" OpHelp(""), + /* 138 */ "DropIndex" OpHelp(""), + /* 139 */ "DropTrigger" OpHelp(""), + /* 140 */ "IntegrityCk" OpHelp(""), + /* 141 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), + /* 142 */ "Param" OpHelp(""), + /* 143 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), + /* 144 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), + /* 145 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), + /* 146 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 147 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 148 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), + /* 149 */ "Expire" OpHelp(""), + /* 150 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), + /* 151 */ "VBegin" OpHelp(""), + /* 152 */ "VCreate" OpHelp(""), + /* 153 */ "VDestroy" OpHelp(""), + /* 154 */ "VOpen" OpHelp(""), + /* 155 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), + /* 156 */ "VRename" OpHelp(""), + /* 157 */ "Pagecount" OpHelp(""), + /* 158 */ "MaxPgcnt" OpHelp(""), + /* 159 */ "CursorHint" OpHelp(""), + /* 160 */ "Noop" OpHelp(""), + /* 161 */ "Explain" OpHelp(""), + }; + return azName[i]; +} +#endif + +/************** End of opcodes.c *********************************************/ +/************** Begin file os_unix.c *****************************************/ +/* +** 2004 May 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains the VFS implementation for unix-like operating systems +** include Linux, MacOSX, *BSD, QNX, VxWorks, AIX, HPUX, and others. +** +** There are actually several different VFS implementations in this file. +** The differences are in the way that file locking is done. The default +** implementation uses Posix Advisory Locks. Alternative implementations +** use flock(), dot-files, various proprietary locking schemas, or simply +** skip locking all together. +** +** This source file is organized into divisions where the logic for various +** subfunctions is contained within the appropriate division. PLEASE +** KEEP THE STRUCTURE OF THIS FILE INTACT. New code should be placed +** in the correct division and should be clearly labeled. +** +** The layout of divisions is as follows: +** +** * General-purpose declarations and utility functions. +** * Unique file ID logic used by VxWorks. +** * Various locking primitive implementations (all except proxy locking): +** + for Posix Advisory Locks +** + for no-op locks +** + for dot-file locks +** + for flock() locking +** + for named semaphore locks (VxWorks only) +** + for AFP filesystem locks (MacOSX only) +** * sqlite3_file methods not associated with locking. +** * Definitions of sqlite3_io_methods objects for all locking +** methods plus "finder" functions for each locking method. +** * sqlite3_vfs method implementations. +** * Locking primitives for the proxy uber-locking-method. (MacOSX only) +** * Definitions of sqlite3_vfs objects for all locking methods +** plus implementations of sqlite3_os_init() and sqlite3_os_end(). +*/ +/* #include "sqliteInt.h" */ +#if SQLITE_OS_UNIX /* This file is used on unix only */ + +/* +** There are various methods for file locking used for concurrency +** control: +** +** 1. POSIX locking (the default), +** 2. No locking, +** 3. Dot-file locking, +** 4. flock() locking, +** 5. AFP locking (OSX only), +** 6. Named POSIX semaphores (VXWorks only), +** 7. proxy locking. (OSX only) +** +** Styles 4, 5, and 7 are only available of SQLITE_ENABLE_LOCKING_STYLE +** is defined to 1. The SQLITE_ENABLE_LOCKING_STYLE also enables automatic +** selection of the appropriate locking style based on the filesystem +** where the database is located. +*/ +#if !defined(SQLITE_ENABLE_LOCKING_STYLE) +# if defined(__APPLE__) +# define SQLITE_ENABLE_LOCKING_STYLE 1 +# else +# define SQLITE_ENABLE_LOCKING_STYLE 0 +# endif +#endif + +/* Use pread() and pwrite() if they are available */ +#if defined(__APPLE__) +# define HAVE_PREAD 1 +# define HAVE_PWRITE 1 +#endif +#if defined(HAVE_PREAD64) && defined(HAVE_PWRITE64) +# undef USE_PREAD +# define USE_PREAD64 1 +#elif defined(HAVE_PREAD) && defined(HAVE_PWRITE) +# undef USE_PREAD64 +# define USE_PREAD 1 +#endif + +/* +** standard include files. +*/ +#include +#include +#include +#include +/* #include */ +#include +#include +#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 +# include +#endif + +#if SQLITE_ENABLE_LOCKING_STYLE +# include +# include +# include +#endif /* SQLITE_ENABLE_LOCKING_STYLE */ + +#if defined(__APPLE__) && ((__MAC_OS_X_VERSION_MIN_REQUIRED > 1050) || \ + (__IPHONE_OS_VERSION_MIN_REQUIRED > 2000)) +# if (!defined(TARGET_OS_EMBEDDED) || (TARGET_OS_EMBEDDED==0)) \ + && (!defined(TARGET_IPHONE_SIMULATOR) || (TARGET_IPHONE_SIMULATOR==0)) +# define HAVE_GETHOSTUUID 1 +# else +# warning "gethostuuid() is disabled." +# endif +#endif + + +#if OS_VXWORKS +/* # include */ +# include +# include +#endif /* OS_VXWORKS */ + +#if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE +# include +#endif + +#ifdef HAVE_UTIME +# include +#endif + +/* +** Allowed values of unixFile.fsFlags +*/ +#define SQLITE_FSFLAGS_IS_MSDOS 0x1 + +/* +** If we are to be thread-safe, include the pthreads header and define +** the SQLITE_UNIX_THREADS macro. +*/ +#if SQLITE_THREADSAFE +/* # include */ +# define SQLITE_UNIX_THREADS 1 +#endif + +/* +** Default permissions when creating a new file +*/ +#ifndef SQLITE_DEFAULT_FILE_PERMISSIONS +# define SQLITE_DEFAULT_FILE_PERMISSIONS 0644 +#endif + +/* +** Default permissions when creating auto proxy dir +*/ +#ifndef SQLITE_DEFAULT_PROXYDIR_PERMISSIONS +# define SQLITE_DEFAULT_PROXYDIR_PERMISSIONS 0755 +#endif + +/* +** Maximum supported path-length. +*/ +#define MAX_PATHNAME 512 + +/* +** Maximum supported symbolic links +*/ +#define SQLITE_MAX_SYMLINKS 100 + +/* Always cast the getpid() return type for compatibility with +** kernel modules in VxWorks. */ +#define osGetpid(X) (pid_t)getpid() + +/* +** Only set the lastErrno if the error code is a real error and not +** a normal expected return code of SQLITE_BUSY or SQLITE_OK +*/ +#define IS_LOCK_ERROR(x) ((x != SQLITE_OK) && (x != SQLITE_BUSY)) + +/* Forward references */ +typedef struct unixShm unixShm; /* Connection shared memory */ +typedef struct unixShmNode unixShmNode; /* Shared memory instance */ +typedef struct unixInodeInfo unixInodeInfo; /* An i-node */ +typedef struct UnixUnusedFd UnixUnusedFd; /* An unused file descriptor */ + +/* +** Sometimes, after a file handle is closed by SQLite, the file descriptor +** cannot be closed immediately. In these cases, instances of the following +** structure are used to store the file descriptor while waiting for an +** opportunity to either close or reuse it. +*/ +struct UnixUnusedFd { + int fd; /* File descriptor to close */ + int flags; /* Flags this file descriptor was opened with */ + UnixUnusedFd *pNext; /* Next unused file descriptor on same file */ +}; + +/* +** The unixFile structure is subclass of sqlite3_file specific to the unix +** VFS implementations. +*/ +typedef struct unixFile unixFile; +struct unixFile { + sqlite3_io_methods const *pMethod; /* Always the first entry */ + sqlite3_vfs *pVfs; /* The VFS that created this unixFile */ + unixInodeInfo *pInode; /* Info about locks on this inode */ + int h; /* The file descriptor */ + unsigned char eFileLock; /* The type of lock held on this fd */ + unsigned short int ctrlFlags; /* Behavioral bits. UNIXFILE_* flags */ + int lastErrno; /* The unix errno from last I/O error */ + void *lockingContext; /* Locking style specific state */ + UnixUnusedFd *pUnused; /* Pre-allocated UnixUnusedFd */ + const char *zPath; /* Name of the file */ + unixShm *pShm; /* Shared memory segment information */ + int szChunk; /* Configured by FCNTL_CHUNK_SIZE */ +#if SQLITE_MAX_MMAP_SIZE>0 + int nFetchOut; /* Number of outstanding xFetch refs */ + sqlite3_int64 mmapSize; /* Usable size of mapping at pMapRegion */ + sqlite3_int64 mmapSizeActual; /* Actual size of mapping at pMapRegion */ + sqlite3_int64 mmapSizeMax; /* Configured FCNTL_MMAP_SIZE value */ + void *pMapRegion; /* Memory mapped region */ +#endif +#ifdef __QNXNTO__ + int sectorSize; /* Device sector size */ + int deviceCharacteristics; /* Precomputed device characteristics */ +#endif +#if SQLITE_ENABLE_LOCKING_STYLE + int openFlags; /* The flags specified at open() */ +#endif +#if SQLITE_ENABLE_LOCKING_STYLE || defined(__APPLE__) + unsigned fsFlags; /* cached details from statfs() */ +#endif +#if OS_VXWORKS + struct vxworksFileId *pId; /* Unique file ID */ +#endif +#ifdef SQLITE_DEBUG + /* The next group of variables are used to track whether or not the + ** transaction counter in bytes 24-27 of database files are updated + ** whenever any part of the database changes. An assertion fault will + ** occur if a file is updated without also updating the transaction + ** counter. This test is made to avoid new problems similar to the + ** one described by ticket #3584. + */ + unsigned char transCntrChng; /* True if the transaction counter changed */ + unsigned char dbUpdate; /* True if any part of database file changed */ + unsigned char inNormalWrite; /* True if in a normal write operation */ + +#endif + +#ifdef SQLITE_TEST + /* In test mode, increase the size of this structure a bit so that + ** it is larger than the struct CrashFile defined in test6.c. + */ + char aPadding[32]; +#endif +}; + +/* This variable holds the process id (pid) from when the xRandomness() +** method was called. If xOpen() is called from a different process id, +** indicating that a fork() has occurred, the PRNG will be reset. +*/ +static pid_t randomnessPid = 0; + +/* +** Allowed values for the unixFile.ctrlFlags bitmask: +*/ +#define UNIXFILE_EXCL 0x01 /* Connections from one process only */ +#define UNIXFILE_RDONLY 0x02 /* Connection is read only */ +#define UNIXFILE_PERSIST_WAL 0x04 /* Persistent WAL mode */ +#ifndef SQLITE_DISABLE_DIRSYNC +# define UNIXFILE_DIRSYNC 0x08 /* Directory sync needed */ +#else +# define UNIXFILE_DIRSYNC 0x00 +#endif +#define UNIXFILE_PSOW 0x10 /* SQLITE_IOCAP_POWERSAFE_OVERWRITE */ +#define UNIXFILE_DELETE 0x20 /* Delete on close */ +#define UNIXFILE_URI 0x40 /* Filename might have query parameters */ +#define UNIXFILE_NOLOCK 0x80 /* Do no file locking */ + +/* +** Include code that is common to all os_*.c files +*/ +/************** Include os_common.h in the middle of os_unix.c ***************/ +/************** Begin file os_common.h ***************************************/ +/* +** 2004 May 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains macros and a little bit of code that is common to +** all of the platform-specific files (os_*.c) and is #included into those +** files. +** +** This file should be #included by the os_*.c files only. It is not a +** general purpose header file. +*/ +#ifndef _OS_COMMON_H_ +#define _OS_COMMON_H_ + +/* +** At least two bugs have slipped in because we changed the MEMORY_DEBUG +** macro to SQLITE_DEBUG and some older makefiles have not yet made the +** switch. The following code should catch this problem at compile-time. +*/ +#ifdef MEMORY_DEBUG +# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." +#endif + +/* +** Macros for performance tracing. Normally turned off. Only works +** on i486 hardware. +*/ +#ifdef SQLITE_PERFORMANCE_TRACE + +/* +** hwtime.h contains inline assembler code for implementing +** high-performance timing routines. +*/ +/************** Include hwtime.h in the middle of os_common.h ****************/ +/************** Begin file hwtime.h ******************************************/ +/* +** 2008 May 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains inline asm code for retrieving "high-performance" +** counters for x86 class CPUs. +*/ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H + +/* +** The following routine only works on pentium-class (or newer) processors. +** It uses the RDTSC opcode to read the cycle count value out of the +** processor and returns that value. This can be used for high-res +** profiling. +*/ +#if (defined(__GNUC__) || defined(_MSC_VER)) && \ + (defined(i386) || defined(__i386__) || defined(_M_IX86)) + + #if defined(__GNUC__) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + + #elif defined(_MSC_VER) + + __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ + __asm { + rdtsc + ret ; return value at EDX:EAX + } + } + + #endif + +#elif (defined(__GNUC__) && defined(__x86_64__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long val; + __asm__ __volatile__ ("rdtsc" : "=A" (val)); + return val; + } + +#elif (defined(__GNUC__) && defined(__ppc__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long long retval; + unsigned long junk; + __asm__ __volatile__ ("\n\ + 1: mftbu %1\n\ + mftb %L0\n\ + mftbu %0\n\ + cmpw %0,%1\n\ + bne 1b" + : "=r" (retval), "=r" (junk)); + return retval; + } + +#else + + #error Need implementation of sqlite3Hwtime() for your platform. + + /* + ** To compile without implementing sqlite3Hwtime() for your platform, + ** you can remove the above #error and use the following + ** stub function. You will lose timing support for many + ** of the debugging and testing utilities, but it should at + ** least compile and run. + */ +SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } + +#endif + +#endif /* !defined(SQLITE_HWTIME_H) */ + +/************** End of hwtime.h **********************************************/ +/************** Continuing where we left off in os_common.h ******************/ + +static sqlite_uint64 g_start; +static sqlite_uint64 g_elapsed; +#define TIMER_START g_start=sqlite3Hwtime() +#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start +#define TIMER_ELAPSED g_elapsed +#else +#define TIMER_START +#define TIMER_END +#define TIMER_ELAPSED ((sqlite_uint64)0) +#endif + +/* +** If we compile with the SQLITE_TEST macro set, then the following block +** of code will give us the ability to simulate a disk I/O error. This +** is used for testing the I/O recovery logic. +*/ +#if defined(SQLITE_TEST) +SQLITE_API extern int sqlite3_io_error_hit; +SQLITE_API extern int sqlite3_io_error_hardhit; +SQLITE_API extern int sqlite3_io_error_pending; +SQLITE_API extern int sqlite3_io_error_persist; +SQLITE_API extern int sqlite3_io_error_benign; +SQLITE_API extern int sqlite3_diskfull_pending; +SQLITE_API extern int sqlite3_diskfull; +#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) +#define SimulateIOError(CODE) \ + if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ + || sqlite3_io_error_pending-- == 1 ) \ + { local_ioerr(); CODE; } +static void local_ioerr(){ + IOTRACE(("IOERR\n")); + sqlite3_io_error_hit++; + if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; +} +#define SimulateDiskfullError(CODE) \ + if( sqlite3_diskfull_pending ){ \ + if( sqlite3_diskfull_pending == 1 ){ \ + local_ioerr(); \ + sqlite3_diskfull = 1; \ + sqlite3_io_error_hit = 1; \ + CODE; \ + }else{ \ + sqlite3_diskfull_pending--; \ + } \ + } +#else +#define SimulateIOErrorBenign(X) +#define SimulateIOError(A) +#define SimulateDiskfullError(A) +#endif /* defined(SQLITE_TEST) */ + +/* +** When testing, keep a count of the number of open files. +*/ +#if defined(SQLITE_TEST) +SQLITE_API extern int sqlite3_open_file_count; +#define OpenCounter(X) sqlite3_open_file_count+=(X) +#else +#define OpenCounter(X) +#endif /* defined(SQLITE_TEST) */ + +#endif /* !defined(_OS_COMMON_H_) */ + +/************** End of os_common.h *******************************************/ +/************** Continuing where we left off in os_unix.c ********************/ + +/* +** Define various macros that are missing from some systems. +*/ +#ifndef O_LARGEFILE +# define O_LARGEFILE 0 +#endif +#ifdef SQLITE_DISABLE_LFS +# undef O_LARGEFILE +# define O_LARGEFILE 0 +#endif +#ifndef O_NOFOLLOW +# define O_NOFOLLOW 0 +#endif +#ifndef O_BINARY +# define O_BINARY 0 +#endif + +/* +** The threadid macro resolves to the thread-id or to 0. Used for +** testing and debugging only. +*/ +#if SQLITE_THREADSAFE +#define threadid pthread_self() +#else +#define threadid 0 +#endif + +/* +** HAVE_MREMAP defaults to true on Linux and false everywhere else. +*/ +#if !defined(HAVE_MREMAP) +# if defined(__linux__) && defined(_GNU_SOURCE) +# define HAVE_MREMAP 1 +# else +# define HAVE_MREMAP 0 +# endif +#endif + +/* +** Explicitly call the 64-bit version of lseek() on Android. Otherwise, lseek() +** is the 32-bit version, even if _FILE_OFFSET_BITS=64 is defined. +*/ +#ifdef __ANDROID__ +# define lseek lseek64 +#endif + +/* +** Different Unix systems declare open() in different ways. Same use +** open(const char*,int,mode_t). Others use open(const char*,int,...). +** The difference is important when using a pointer to the function. +** +** The safest way to deal with the problem is to always use this wrapper +** which always has the same well-defined interface. +*/ +static int posixOpen(const char *zFile, int flags, int mode){ + return open(zFile, flags, mode); +} + +/* Forward reference */ +static int openDirectory(const char*, int*); +static int unixGetpagesize(void); + +/* +** Many system calls are accessed through pointer-to-functions so that +** they may be overridden at runtime to facilitate fault injection during +** testing and sandboxing. The following array holds the names and pointers +** to all overrideable system calls. +*/ +static struct unix_syscall { + const char *zName; /* Name of the system call */ + sqlite3_syscall_ptr pCurrent; /* Current value of the system call */ + sqlite3_syscall_ptr pDefault; /* Default value */ +} aSyscall[] = { + { "open", (sqlite3_syscall_ptr)posixOpen, 0 }, +#define osOpen ((int(*)(const char*,int,int))aSyscall[0].pCurrent) + + { "close", (sqlite3_syscall_ptr)close, 0 }, +#define osClose ((int(*)(int))aSyscall[1].pCurrent) + + { "access", (sqlite3_syscall_ptr)access, 0 }, +#define osAccess ((int(*)(const char*,int))aSyscall[2].pCurrent) + + { "getcwd", (sqlite3_syscall_ptr)getcwd, 0 }, +#define osGetcwd ((char*(*)(char*,size_t))aSyscall[3].pCurrent) + + { "stat", (sqlite3_syscall_ptr)stat, 0 }, +#define osStat ((int(*)(const char*,struct stat*))aSyscall[4].pCurrent) + +/* +** The DJGPP compiler environment looks mostly like Unix, but it +** lacks the fcntl() system call. So redefine fcntl() to be something +** that always succeeds. This means that locking does not occur under +** DJGPP. But it is DOS - what did you expect? +*/ +#ifdef __DJGPP__ + { "fstat", 0, 0 }, +#define osFstat(a,b,c) 0 +#else + { "fstat", (sqlite3_syscall_ptr)fstat, 0 }, +#define osFstat ((int(*)(int,struct stat*))aSyscall[5].pCurrent) +#endif + + { "ftruncate", (sqlite3_syscall_ptr)ftruncate, 0 }, +#define osFtruncate ((int(*)(int,off_t))aSyscall[6].pCurrent) + + { "fcntl", (sqlite3_syscall_ptr)fcntl, 0 }, +#define osFcntl ((int(*)(int,int,...))aSyscall[7].pCurrent) + + { "read", (sqlite3_syscall_ptr)read, 0 }, +#define osRead ((ssize_t(*)(int,void*,size_t))aSyscall[8].pCurrent) + +#if defined(USE_PREAD) || SQLITE_ENABLE_LOCKING_STYLE + { "pread", (sqlite3_syscall_ptr)pread, 0 }, +#else + { "pread", (sqlite3_syscall_ptr)0, 0 }, +#endif +#define osPread ((ssize_t(*)(int,void*,size_t,off_t))aSyscall[9].pCurrent) + +#if defined(USE_PREAD64) + { "pread64", (sqlite3_syscall_ptr)pread64, 0 }, +#else + { "pread64", (sqlite3_syscall_ptr)0, 0 }, +#endif +#define osPread64 ((ssize_t(*)(int,void*,size_t,off64_t))aSyscall[10].pCurrent) + + { "write", (sqlite3_syscall_ptr)write, 0 }, +#define osWrite ((ssize_t(*)(int,const void*,size_t))aSyscall[11].pCurrent) + +#if defined(USE_PREAD) || SQLITE_ENABLE_LOCKING_STYLE + { "pwrite", (sqlite3_syscall_ptr)pwrite, 0 }, +#else + { "pwrite", (sqlite3_syscall_ptr)0, 0 }, +#endif +#define osPwrite ((ssize_t(*)(int,const void*,size_t,off_t))\ + aSyscall[12].pCurrent) + +#if defined(USE_PREAD64) + { "pwrite64", (sqlite3_syscall_ptr)pwrite64, 0 }, +#else + { "pwrite64", (sqlite3_syscall_ptr)0, 0 }, +#endif +#define osPwrite64 ((ssize_t(*)(int,const void*,size_t,off64_t))\ + aSyscall[13].pCurrent) + + { "fchmod", (sqlite3_syscall_ptr)fchmod, 0 }, +#define osFchmod ((int(*)(int,mode_t))aSyscall[14].pCurrent) + +#if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE + { "fallocate", (sqlite3_syscall_ptr)posix_fallocate, 0 }, +#else + { "fallocate", (sqlite3_syscall_ptr)0, 0 }, +#endif +#define osFallocate ((int(*)(int,off_t,off_t))aSyscall[15].pCurrent) + + { "unlink", (sqlite3_syscall_ptr)unlink, 0 }, +#define osUnlink ((int(*)(const char*))aSyscall[16].pCurrent) + + { "openDirectory", (sqlite3_syscall_ptr)openDirectory, 0 }, +#define osOpenDirectory ((int(*)(const char*,int*))aSyscall[17].pCurrent) + + { "mkdir", (sqlite3_syscall_ptr)mkdir, 0 }, +#define osMkdir ((int(*)(const char*,mode_t))aSyscall[18].pCurrent) + + { "rmdir", (sqlite3_syscall_ptr)rmdir, 0 }, +#define osRmdir ((int(*)(const char*))aSyscall[19].pCurrent) + +#if defined(HAVE_FCHOWN) + { "fchown", (sqlite3_syscall_ptr)fchown, 0 }, +#else + { "fchown", (sqlite3_syscall_ptr)0, 0 }, +#endif +#define osFchown ((int(*)(int,uid_t,gid_t))aSyscall[20].pCurrent) + + { "geteuid", (sqlite3_syscall_ptr)geteuid, 0 }, +#define osGeteuid ((uid_t(*)(void))aSyscall[21].pCurrent) + +#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 + { "mmap", (sqlite3_syscall_ptr)mmap, 0 }, +#else + { "mmap", (sqlite3_syscall_ptr)0, 0 }, +#endif +#define osMmap ((void*(*)(void*,size_t,int,int,int,off_t))aSyscall[22].pCurrent) + +#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 + { "munmap", (sqlite3_syscall_ptr)munmap, 0 }, +#else + { "munmap", (sqlite3_syscall_ptr)0, 0 }, +#endif +#define osMunmap ((void*(*)(void*,size_t))aSyscall[23].pCurrent) + +#if HAVE_MREMAP && (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) + { "mremap", (sqlite3_syscall_ptr)mremap, 0 }, +#else + { "mremap", (sqlite3_syscall_ptr)0, 0 }, +#endif +#define osMremap ((void*(*)(void*,size_t,size_t,int,...))aSyscall[24].pCurrent) + +#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 + { "getpagesize", (sqlite3_syscall_ptr)unixGetpagesize, 0 }, +#else + { "getpagesize", (sqlite3_syscall_ptr)0, 0 }, +#endif +#define osGetpagesize ((int(*)(void))aSyscall[25].pCurrent) + +#if defined(HAVE_READLINK) + { "readlink", (sqlite3_syscall_ptr)readlink, 0 }, +#else + { "readlink", (sqlite3_syscall_ptr)0, 0 }, +#endif +#define osReadlink ((ssize_t(*)(const char*,char*,size_t))aSyscall[26].pCurrent) + +#if defined(HAVE_LSTAT) + { "lstat", (sqlite3_syscall_ptr)lstat, 0 }, +#else + { "lstat", (sqlite3_syscall_ptr)0, 0 }, +#endif +#define osLstat ((int(*)(const char*,struct stat*))aSyscall[27].pCurrent) + +}; /* End of the overrideable system calls */ + + +/* +** On some systems, calls to fchown() will trigger a message in a security +** log if they come from non-root processes. So avoid calling fchown() if +** we are not running as root. +*/ +static int robustFchown(int fd, uid_t uid, gid_t gid){ +#if defined(HAVE_FCHOWN) + return osGeteuid() ? 0 : osFchown(fd,uid,gid); +#else + return 0; +#endif +} + +/* +** This is the xSetSystemCall() method of sqlite3_vfs for all of the +** "unix" VFSes. Return SQLITE_OK opon successfully updating the +** system call pointer, or SQLITE_NOTFOUND if there is no configurable +** system call named zName. +*/ +static int unixSetSystemCall( + sqlite3_vfs *pNotUsed, /* The VFS pointer. Not used */ + const char *zName, /* Name of system call to override */ + sqlite3_syscall_ptr pNewFunc /* Pointer to new system call value */ +){ + unsigned int i; + int rc = SQLITE_NOTFOUND; + + UNUSED_PARAMETER(pNotUsed); + if( zName==0 ){ + /* If no zName is given, restore all system calls to their default + ** settings and return NULL + */ + rc = SQLITE_OK; + for(i=0; i=SQLITE_MINIMUM_FILE_DESCRIPTOR ) break; + osClose(fd); + sqlite3_log(SQLITE_WARNING, + "attempt to open \"%s\" as file descriptor %d", z, fd); + fd = -1; + if( osOpen("/dev/null", f, m)<0 ) break; + } + if( fd>=0 ){ + if( m!=0 ){ + struct stat statbuf; + if( osFstat(fd, &statbuf)==0 + && statbuf.st_size==0 + && (statbuf.st_mode&0777)!=m + ){ + osFchmod(fd, m); + } + } +#if defined(FD_CLOEXEC) && (!defined(O_CLOEXEC) || O_CLOEXEC==0) + osFcntl(fd, F_SETFD, osFcntl(fd, F_GETFD, 0) | FD_CLOEXEC); +#endif + } + return fd; +} + +/* +** Helper functions to obtain and relinquish the global mutex. The +** global mutex is used to protect the unixInodeInfo and +** vxworksFileId objects used by this file, all of which may be +** shared by multiple threads. +** +** Function unixMutexHeld() is used to assert() that the global mutex +** is held when required. This function is only used as part of assert() +** statements. e.g. +** +** unixEnterMutex() +** assert( unixMutexHeld() ); +** unixEnterLeave() +*/ +static void unixEnterMutex(void){ + sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); +} +static void unixLeaveMutex(void){ + sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); +} +#ifdef SQLITE_DEBUG +static int unixMutexHeld(void) { + return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); +} +#endif + + +#ifdef SQLITE_HAVE_OS_TRACE +/* +** Helper function for printing out trace information from debugging +** binaries. This returns the string representation of the supplied +** integer lock-type. +*/ +static const char *azFileLock(int eFileLock){ + switch( eFileLock ){ + case NO_LOCK: return "NONE"; + case SHARED_LOCK: return "SHARED"; + case RESERVED_LOCK: return "RESERVED"; + case PENDING_LOCK: return "PENDING"; + case EXCLUSIVE_LOCK: return "EXCLUSIVE"; + } + return "ERROR"; +} +#endif + +#ifdef SQLITE_LOCK_TRACE +/* +** Print out information about all locking operations. +** +** This routine is used for troubleshooting locks on multithreaded +** platforms. Enable by compiling with the -DSQLITE_LOCK_TRACE +** command-line option on the compiler. This code is normally +** turned off. +*/ +static int lockTrace(int fd, int op, struct flock *p){ + char *zOpName, *zType; + int s; + int savedErrno; + if( op==F_GETLK ){ + zOpName = "GETLK"; + }else if( op==F_SETLK ){ + zOpName = "SETLK"; + }else{ + s = osFcntl(fd, op, p); + sqlite3DebugPrintf("fcntl unknown %d %d %d\n", fd, op, s); + return s; + } + if( p->l_type==F_RDLCK ){ + zType = "RDLCK"; + }else if( p->l_type==F_WRLCK ){ + zType = "WRLCK"; + }else if( p->l_type==F_UNLCK ){ + zType = "UNLCK"; + }else{ + assert( 0 ); + } + assert( p->l_whence==SEEK_SET ); + s = osFcntl(fd, op, p); + savedErrno = errno; + sqlite3DebugPrintf("fcntl %d %d %s %s %d %d %d %d\n", + threadid, fd, zOpName, zType, (int)p->l_start, (int)p->l_len, + (int)p->l_pid, s); + if( s==(-1) && op==F_SETLK && (p->l_type==F_RDLCK || p->l_type==F_WRLCK) ){ + struct flock l2; + l2 = *p; + osFcntl(fd, F_GETLK, &l2); + if( l2.l_type==F_RDLCK ){ + zType = "RDLCK"; + }else if( l2.l_type==F_WRLCK ){ + zType = "WRLCK"; + }else if( l2.l_type==F_UNLCK ){ + zType = "UNLCK"; + }else{ + assert( 0 ); + } + sqlite3DebugPrintf("fcntl-failure-reason: %s %d %d %d\n", + zType, (int)l2.l_start, (int)l2.l_len, (int)l2.l_pid); + } + errno = savedErrno; + return s; +} +#undef osFcntl +#define osFcntl lockTrace +#endif /* SQLITE_LOCK_TRACE */ + +/* +** Retry ftruncate() calls that fail due to EINTR +** +** All calls to ftruncate() within this file should be made through +** this wrapper. On the Android platform, bypassing the logic below +** could lead to a corrupt database. +*/ +static int robust_ftruncate(int h, sqlite3_int64 sz){ + int rc; +#ifdef __ANDROID__ + /* On Android, ftruncate() always uses 32-bit offsets, even if + ** _FILE_OFFSET_BITS=64 is defined. This means it is unsafe to attempt to + ** truncate a file to any size larger than 2GiB. Silently ignore any + ** such attempts. */ + if( sz>(sqlite3_int64)0x7FFFFFFF ){ + rc = SQLITE_OK; + }else +#endif + do{ rc = osFtruncate(h,sz); }while( rc<0 && errno==EINTR ); + return rc; +} + +/* +** This routine translates a standard POSIX errno code into something +** useful to the clients of the sqlite3 functions. Specifically, it is +** intended to translate a variety of "try again" errors into SQLITE_BUSY +** and a variety of "please close the file descriptor NOW" errors into +** SQLITE_IOERR +** +** Errors during initialization of locks, or file system support for locks, +** should handle ENOLCK, ENOTSUP, EOPNOTSUPP separately. +*/ +static int sqliteErrorFromPosixError(int posixError, int sqliteIOErr) { + assert( (sqliteIOErr == SQLITE_IOERR_LOCK) || + (sqliteIOErr == SQLITE_IOERR_UNLOCK) || + (sqliteIOErr == SQLITE_IOERR_RDLOCK) || + (sqliteIOErr == SQLITE_IOERR_CHECKRESERVEDLOCK) ); + switch (posixError) { + case EACCES: + case EAGAIN: + case ETIMEDOUT: + case EBUSY: + case EINTR: + case ENOLCK: + /* random NFS retry error, unless during file system support + * introspection, in which it actually means what it says */ + return SQLITE_BUSY; + + case EPERM: + return SQLITE_PERM; + + default: + return sqliteIOErr; + } +} + + +/****************************************************************************** +****************** Begin Unique File ID Utility Used By VxWorks *************** +** +** On most versions of unix, we can get a unique ID for a file by concatenating +** the device number and the inode number. But this does not work on VxWorks. +** On VxWorks, a unique file id must be based on the canonical filename. +** +** A pointer to an instance of the following structure can be used as a +** unique file ID in VxWorks. Each instance of this structure contains +** a copy of the canonical filename. There is also a reference count. +** The structure is reclaimed when the number of pointers to it drops to +** zero. +** +** There are never very many files open at one time and lookups are not +** a performance-critical path, so it is sufficient to put these +** structures on a linked list. +*/ +struct vxworksFileId { + struct vxworksFileId *pNext; /* Next in a list of them all */ + int nRef; /* Number of references to this one */ + int nName; /* Length of the zCanonicalName[] string */ + char *zCanonicalName; /* Canonical filename */ +}; + +#if OS_VXWORKS +/* +** All unique filenames are held on a linked list headed by this +** variable: +*/ +static struct vxworksFileId *vxworksFileList = 0; + +/* +** Simplify a filename into its canonical form +** by making the following changes: +** +** * removing any trailing and duplicate / +** * convert /./ into just / +** * convert /A/../ where A is any simple name into just / +** +** Changes are made in-place. Return the new name length. +** +** The original filename is in z[0..n-1]. Return the number of +** characters in the simplified name. +*/ +static int vxworksSimplifyName(char *z, int n){ + int i, j; + while( n>1 && z[n-1]=='/' ){ n--; } + for(i=j=0; i0 && z[j-1]!='/' ){ j--; } + if( j>0 ){ j--; } + i += 2; + continue; + } + } + z[j++] = z[i]; + } + z[j] = 0; + return j; +} + +/* +** Find a unique file ID for the given absolute pathname. Return +** a pointer to the vxworksFileId object. This pointer is the unique +** file ID. +** +** The nRef field of the vxworksFileId object is incremented before +** the object is returned. A new vxworksFileId object is created +** and added to the global list if necessary. +** +** If a memory allocation error occurs, return NULL. +*/ +static struct vxworksFileId *vxworksFindFileId(const char *zAbsoluteName){ + struct vxworksFileId *pNew; /* search key and new file ID */ + struct vxworksFileId *pCandidate; /* For looping over existing file IDs */ + int n; /* Length of zAbsoluteName string */ + + assert( zAbsoluteName[0]=='/' ); + n = (int)strlen(zAbsoluteName); + pNew = sqlite3_malloc64( sizeof(*pNew) + (n+1) ); + if( pNew==0 ) return 0; + pNew->zCanonicalName = (char*)&pNew[1]; + memcpy(pNew->zCanonicalName, zAbsoluteName, n+1); + n = vxworksSimplifyName(pNew->zCanonicalName, n); + + /* Search for an existing entry that matching the canonical name. + ** If found, increment the reference count and return a pointer to + ** the existing file ID. + */ + unixEnterMutex(); + for(pCandidate=vxworksFileList; pCandidate; pCandidate=pCandidate->pNext){ + if( pCandidate->nName==n + && memcmp(pCandidate->zCanonicalName, pNew->zCanonicalName, n)==0 + ){ + sqlite3_free(pNew); + pCandidate->nRef++; + unixLeaveMutex(); + return pCandidate; + } + } + + /* No match was found. We will make a new file ID */ + pNew->nRef = 1; + pNew->nName = n; + pNew->pNext = vxworksFileList; + vxworksFileList = pNew; + unixLeaveMutex(); + return pNew; +} + +/* +** Decrement the reference count on a vxworksFileId object. Free +** the object when the reference count reaches zero. +*/ +static void vxworksReleaseFileId(struct vxworksFileId *pId){ + unixEnterMutex(); + assert( pId->nRef>0 ); + pId->nRef--; + if( pId->nRef==0 ){ + struct vxworksFileId **pp; + for(pp=&vxworksFileList; *pp && *pp!=pId; pp = &((*pp)->pNext)){} + assert( *pp==pId ); + *pp = pId->pNext; + sqlite3_free(pId); + } + unixLeaveMutex(); +} +#endif /* OS_VXWORKS */ +/*************** End of Unique File ID Utility Used By VxWorks **************** +******************************************************************************/ + + +/****************************************************************************** +*************************** Posix Advisory Locking **************************** +** +** POSIX advisory locks are broken by design. ANSI STD 1003.1 (1996) +** section 6.5.2.2 lines 483 through 490 specify that when a process +** sets or clears a lock, that operation overrides any prior locks set +** by the same process. It does not explicitly say so, but this implies +** that it overrides locks set by the same process using a different +** file descriptor. Consider this test case: +** +** int fd1 = open("./file1", O_RDWR|O_CREAT, 0644); +** int fd2 = open("./file2", O_RDWR|O_CREAT, 0644); +** +** Suppose ./file1 and ./file2 are really the same file (because +** one is a hard or symbolic link to the other) then if you set +** an exclusive lock on fd1, then try to get an exclusive lock +** on fd2, it works. I would have expected the second lock to +** fail since there was already a lock on the file due to fd1. +** But not so. Since both locks came from the same process, the +** second overrides the first, even though they were on different +** file descriptors opened on different file names. +** +** This means that we cannot use POSIX locks to synchronize file access +** among competing threads of the same process. POSIX locks will work fine +** to synchronize access for threads in separate processes, but not +** threads within the same process. +** +** To work around the problem, SQLite has to manage file locks internally +** on its own. Whenever a new database is opened, we have to find the +** specific inode of the database file (the inode is determined by the +** st_dev and st_ino fields of the stat structure that fstat() fills in) +** and check for locks already existing on that inode. When locks are +** created or removed, we have to look at our own internal record of the +** locks to see if another thread has previously set a lock on that same +** inode. +** +** (Aside: The use of inode numbers as unique IDs does not work on VxWorks. +** For VxWorks, we have to use the alternative unique ID system based on +** canonical filename and implemented in the previous division.) +** +** The sqlite3_file structure for POSIX is no longer just an integer file +** descriptor. It is now a structure that holds the integer file +** descriptor and a pointer to a structure that describes the internal +** locks on the corresponding inode. There is one locking structure +** per inode, so if the same inode is opened twice, both unixFile structures +** point to the same locking structure. The locking structure keeps +** a reference count (so we will know when to delete it) and a "cnt" +** field that tells us its internal lock status. cnt==0 means the +** file is unlocked. cnt==-1 means the file has an exclusive lock. +** cnt>0 means there are cnt shared locks on the file. +** +** Any attempt to lock or unlock a file first checks the locking +** structure. The fcntl() system call is only invoked to set a +** POSIX lock if the internal lock structure transitions between +** a locked and an unlocked state. +** +** But wait: there are yet more problems with POSIX advisory locks. +** +** If you close a file descriptor that points to a file that has locks, +** all locks on that file that are owned by the current process are +** released. To work around this problem, each unixInodeInfo object +** maintains a count of the number of pending locks on tha inode. +** When an attempt is made to close an unixFile, if there are +** other unixFile open on the same inode that are holding locks, the call +** to close() the file descriptor is deferred until all of the locks clear. +** The unixInodeInfo structure keeps a list of file descriptors that need to +** be closed and that list is walked (and cleared) when the last lock +** clears. +** +** Yet another problem: LinuxThreads do not play well with posix locks. +** +** Many older versions of linux use the LinuxThreads library which is +** not posix compliant. Under LinuxThreads, a lock created by thread +** A cannot be modified or overridden by a different thread B. +** Only thread A can modify the lock. Locking behavior is correct +** if the appliation uses the newer Native Posix Thread Library (NPTL) +** on linux - with NPTL a lock created by thread A can override locks +** in thread B. But there is no way to know at compile-time which +** threading library is being used. So there is no way to know at +** compile-time whether or not thread A can override locks on thread B. +** One has to do a run-time check to discover the behavior of the +** current process. +** +** SQLite used to support LinuxThreads. But support for LinuxThreads +** was dropped beginning with version 3.7.0. SQLite will still work with +** LinuxThreads provided that (1) there is no more than one connection +** per database file in the same process and (2) database connections +** do not move across threads. +*/ + +/* +** An instance of the following structure serves as the key used +** to locate a particular unixInodeInfo object. +*/ +struct unixFileId { + dev_t dev; /* Device number */ +#if OS_VXWORKS + struct vxworksFileId *pId; /* Unique file ID for vxworks. */ +#else + /* We are told that some versions of Android contain a bug that + ** sizes ino_t at only 32-bits instead of 64-bits. (See + ** https://android-review.googlesource.com/#/c/115351/3/dist/sqlite3.c) + ** To work around this, always allocate 64-bits for the inode number. + ** On small machines that only have 32-bit inodes, this wastes 4 bytes, + ** but that should not be a big deal. */ + /* WAS: ino_t ino; */ + u64 ino; /* Inode number */ +#endif +}; + +/* +** An instance of the following structure is allocated for each open +** inode. Or, on LinuxThreads, there is one of these structures for +** each inode opened by each thread. +** +** A single inode can have multiple file descriptors, so each unixFile +** structure contains a pointer to an instance of this object and this +** object keeps a count of the number of unixFile pointing to it. +*/ +struct unixInodeInfo { + struct unixFileId fileId; /* The lookup key */ + int nShared; /* Number of SHARED locks held */ + unsigned char eFileLock; /* One of SHARED_LOCK, RESERVED_LOCK etc. */ + unsigned char bProcessLock; /* An exclusive process lock is held */ + int nRef; /* Number of pointers to this structure */ + unixShmNode *pShmNode; /* Shared memory associated with this inode */ + int nLock; /* Number of outstanding file locks */ + UnixUnusedFd *pUnused; /* Unused file descriptors to close */ + unixInodeInfo *pNext; /* List of all unixInodeInfo objects */ + unixInodeInfo *pPrev; /* .... doubly linked */ +#if SQLITE_ENABLE_LOCKING_STYLE + unsigned long long sharedByte; /* for AFP simulated shared lock */ +#endif +#if OS_VXWORKS + sem_t *pSem; /* Named POSIX semaphore */ + char aSemName[MAX_PATHNAME+2]; /* Name of that semaphore */ +#endif +}; + +/* +** A lists of all unixInodeInfo objects. +*/ +static unixInodeInfo *inodeList = 0; + +/* +** +** This function - unixLogErrorAtLine(), is only ever called via the macro +** unixLogError(). +** +** It is invoked after an error occurs in an OS function and errno has been +** set. It logs a message using sqlite3_log() containing the current value of +** errno and, if possible, the human-readable equivalent from strerror() or +** strerror_r(). +** +** The first argument passed to the macro should be the error code that +** will be returned to SQLite (e.g. SQLITE_IOERR_DELETE, SQLITE_CANTOPEN). +** The two subsequent arguments should be the name of the OS function that +** failed (e.g. "unlink", "open") and the associated file-system path, +** if any. +*/ +#define unixLogError(a,b,c) unixLogErrorAtLine(a,b,c,__LINE__) +static int unixLogErrorAtLine( + int errcode, /* SQLite error code */ + const char *zFunc, /* Name of OS function that failed */ + const char *zPath, /* File path associated with error */ + int iLine /* Source line number where error occurred */ +){ + char *zErr; /* Message from strerror() or equivalent */ + int iErrno = errno; /* Saved syscall error number */ + + /* If this is not a threadsafe build (SQLITE_THREADSAFE==0), then use + ** the strerror() function to obtain the human-readable error message + ** equivalent to errno. Otherwise, use strerror_r(). + */ +#if SQLITE_THREADSAFE && defined(HAVE_STRERROR_R) + char aErr[80]; + memset(aErr, 0, sizeof(aErr)); + zErr = aErr; + + /* If STRERROR_R_CHAR_P (set by autoconf scripts) or __USE_GNU is defined, + ** assume that the system provides the GNU version of strerror_r() that + ** returns a pointer to a buffer containing the error message. That pointer + ** may point to aErr[], or it may point to some static storage somewhere. + ** Otherwise, assume that the system provides the POSIX version of + ** strerror_r(), which always writes an error message into aErr[]. + ** + ** If the code incorrectly assumes that it is the POSIX version that is + ** available, the error message will often be an empty string. Not a + ** huge problem. Incorrectly concluding that the GNU version is available + ** could lead to a segfault though. + */ +#if defined(STRERROR_R_CHAR_P) || defined(__USE_GNU) + zErr = +# endif + strerror_r(iErrno, aErr, sizeof(aErr)-1); + +#elif SQLITE_THREADSAFE + /* This is a threadsafe build, but strerror_r() is not available. */ + zErr = ""; +#else + /* Non-threadsafe build, use strerror(). */ + zErr = strerror(iErrno); +#endif + + if( zPath==0 ) zPath = ""; + sqlite3_log(errcode, + "os_unix.c:%d: (%d) %s(%s) - %s", + iLine, iErrno, zFunc, zPath, zErr + ); + + return errcode; +} + +/* +** Close a file descriptor. +** +** We assume that close() almost always works, since it is only in a +** very sick application or on a very sick platform that it might fail. +** If it does fail, simply leak the file descriptor, but do log the +** error. +** +** Note that it is not safe to retry close() after EINTR since the +** file descriptor might have already been reused by another thread. +** So we don't even try to recover from an EINTR. Just log the error +** and move on. +*/ +static void robust_close(unixFile *pFile, int h, int lineno){ + if( osClose(h) ){ + unixLogErrorAtLine(SQLITE_IOERR_CLOSE, "close", + pFile ? pFile->zPath : 0, lineno); + } +} + +/* +** Set the pFile->lastErrno. Do this in a subroutine as that provides +** a convenient place to set a breakpoint. +*/ +static void storeLastErrno(unixFile *pFile, int error){ + pFile->lastErrno = error; +} + +/* +** Close all file descriptors accumuated in the unixInodeInfo->pUnused list. +*/ +static void closePendingFds(unixFile *pFile){ + unixInodeInfo *pInode = pFile->pInode; + UnixUnusedFd *p; + UnixUnusedFd *pNext; + for(p=pInode->pUnused; p; p=pNext){ + pNext = p->pNext; + robust_close(pFile, p->fd, __LINE__); + sqlite3_free(p); + } + pInode->pUnused = 0; +} + +/* +** Release a unixInodeInfo structure previously allocated by findInodeInfo(). +** +** The mutex entered using the unixEnterMutex() function must be held +** when this function is called. +*/ +static void releaseInodeInfo(unixFile *pFile){ + unixInodeInfo *pInode = pFile->pInode; + assert( unixMutexHeld() ); + if( ALWAYS(pInode) ){ + pInode->nRef--; + if( pInode->nRef==0 ){ + assert( pInode->pShmNode==0 ); + closePendingFds(pFile); + if( pInode->pPrev ){ + assert( pInode->pPrev->pNext==pInode ); + pInode->pPrev->pNext = pInode->pNext; + }else{ + assert( inodeList==pInode ); + inodeList = pInode->pNext; + } + if( pInode->pNext ){ + assert( pInode->pNext->pPrev==pInode ); + pInode->pNext->pPrev = pInode->pPrev; + } + sqlite3_free(pInode); + } + } +} + +/* +** Given a file descriptor, locate the unixInodeInfo object that +** describes that file descriptor. Create a new one if necessary. The +** return value might be uninitialized if an error occurs. +** +** The mutex entered using the unixEnterMutex() function must be held +** when this function is called. +** +** Return an appropriate error code. +*/ +static int findInodeInfo( + unixFile *pFile, /* Unix file with file desc used in the key */ + unixInodeInfo **ppInode /* Return the unixInodeInfo object here */ +){ + int rc; /* System call return code */ + int fd; /* The file descriptor for pFile */ + struct unixFileId fileId; /* Lookup key for the unixInodeInfo */ + struct stat statbuf; /* Low-level file information */ + unixInodeInfo *pInode = 0; /* Candidate unixInodeInfo object */ + + assert( unixMutexHeld() ); + + /* Get low-level information about the file that we can used to + ** create a unique name for the file. + */ + fd = pFile->h; + rc = osFstat(fd, &statbuf); + if( rc!=0 ){ + storeLastErrno(pFile, errno); +#if defined(EOVERFLOW) && defined(SQLITE_DISABLE_LFS) + if( pFile->lastErrno==EOVERFLOW ) return SQLITE_NOLFS; +#endif + return SQLITE_IOERR; + } + +#ifdef __APPLE__ + /* On OS X on an msdos filesystem, the inode number is reported + ** incorrectly for zero-size files. See ticket #3260. To work + ** around this problem (we consider it a bug in OS X, not SQLite) + ** we always increase the file size to 1 by writing a single byte + ** prior to accessing the inode number. The one byte written is + ** an ASCII 'S' character which also happens to be the first byte + ** in the header of every SQLite database. In this way, if there + ** is a race condition such that another thread has already populated + ** the first page of the database, no damage is done. + */ + if( statbuf.st_size==0 && (pFile->fsFlags & SQLITE_FSFLAGS_IS_MSDOS)!=0 ){ + do{ rc = osWrite(fd, "S", 1); }while( rc<0 && errno==EINTR ); + if( rc!=1 ){ + storeLastErrno(pFile, errno); + return SQLITE_IOERR; + } + rc = osFstat(fd, &statbuf); + if( rc!=0 ){ + storeLastErrno(pFile, errno); + return SQLITE_IOERR; + } + } +#endif + + memset(&fileId, 0, sizeof(fileId)); + fileId.dev = statbuf.st_dev; +#if OS_VXWORKS + fileId.pId = pFile->pId; +#else + fileId.ino = (u64)statbuf.st_ino; +#endif + pInode = inodeList; + while( pInode && memcmp(&fileId, &pInode->fileId, sizeof(fileId)) ){ + pInode = pInode->pNext; + } + if( pInode==0 ){ + pInode = sqlite3_malloc64( sizeof(*pInode) ); + if( pInode==0 ){ + return SQLITE_NOMEM_BKPT; + } + memset(pInode, 0, sizeof(*pInode)); + memcpy(&pInode->fileId, &fileId, sizeof(fileId)); + pInode->nRef = 1; + pInode->pNext = inodeList; + pInode->pPrev = 0; + if( inodeList ) inodeList->pPrev = pInode; + inodeList = pInode; + }else{ + pInode->nRef++; + } + *ppInode = pInode; + return SQLITE_OK; +} + +/* +** Return TRUE if pFile has been renamed or unlinked since it was first opened. +*/ +static int fileHasMoved(unixFile *pFile){ +#if OS_VXWORKS + return pFile->pInode!=0 && pFile->pId!=pFile->pInode->fileId.pId; +#else + struct stat buf; + return pFile->pInode!=0 && + (osStat(pFile->zPath, &buf)!=0 + || (u64)buf.st_ino!=pFile->pInode->fileId.ino); +#endif +} + + +/* +** Check a unixFile that is a database. Verify the following: +** +** (1) There is exactly one hard link on the file +** (2) The file is not a symbolic link +** (3) The file has not been renamed or unlinked +** +** Issue sqlite3_log(SQLITE_WARNING,...) messages if anything is not right. +*/ +static void verifyDbFile(unixFile *pFile){ + struct stat buf; + int rc; + + /* These verifications occurs for the main database only */ + if( pFile->ctrlFlags & UNIXFILE_NOLOCK ) return; + + rc = osFstat(pFile->h, &buf); + if( rc!=0 ){ + sqlite3_log(SQLITE_WARNING, "cannot fstat db file %s", pFile->zPath); + return; + } + if( buf.st_nlink==0 ){ + sqlite3_log(SQLITE_WARNING, "file unlinked while open: %s", pFile->zPath); + return; + } + if( buf.st_nlink>1 ){ + sqlite3_log(SQLITE_WARNING, "multiple links to file: %s", pFile->zPath); + return; + } + if( fileHasMoved(pFile) ){ + sqlite3_log(SQLITE_WARNING, "file renamed while open: %s", pFile->zPath); + return; + } +} + + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, set *pResOut +** to a non-zero value otherwise *pResOut is set to zero. The return value +** is set to SQLITE_OK unless an I/O error occurs during lock checking. +*/ +static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){ + int rc = SQLITE_OK; + int reserved = 0; + unixFile *pFile = (unixFile*)id; + + SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); + + assert( pFile ); + assert( pFile->eFileLock<=SHARED_LOCK ); + unixEnterMutex(); /* Because pFile->pInode is shared across threads */ + + /* Check if a thread in this process holds such a lock */ + if( pFile->pInode->eFileLock>SHARED_LOCK ){ + reserved = 1; + } + + /* Otherwise see if some other process holds it. + */ +#ifndef __DJGPP__ + if( !reserved && !pFile->pInode->bProcessLock ){ + struct flock lock; + lock.l_whence = SEEK_SET; + lock.l_start = RESERVED_BYTE; + lock.l_len = 1; + lock.l_type = F_WRLCK; + if( osFcntl(pFile->h, F_GETLK, &lock) ){ + rc = SQLITE_IOERR_CHECKRESERVEDLOCK; + storeLastErrno(pFile, errno); + } else if( lock.l_type!=F_UNLCK ){ + reserved = 1; + } + } +#endif + + unixLeaveMutex(); + OSTRACE(("TEST WR-LOCK %d %d %d (unix)\n", pFile->h, rc, reserved)); + + *pResOut = reserved; + return rc; +} + +/* +** Attempt to set a system-lock on the file pFile. The lock is +** described by pLock. +** +** If the pFile was opened read/write from unix-excl, then the only lock +** ever obtained is an exclusive lock, and it is obtained exactly once +** the first time any lock is attempted. All subsequent system locking +** operations become no-ops. Locking operations still happen internally, +** in order to coordinate access between separate database connections +** within this process, but all of that is handled in memory and the +** operating system does not participate. +** +** This function is a pass-through to fcntl(F_SETLK) if pFile is using +** any VFS other than "unix-excl" or if pFile is opened on "unix-excl" +** and is read-only. +** +** Zero is returned if the call completes successfully, or -1 if a call +** to fcntl() fails. In this case, errno is set appropriately (by fcntl()). +*/ +static int unixFileLock(unixFile *pFile, struct flock *pLock){ + int rc; + unixInodeInfo *pInode = pFile->pInode; + assert( unixMutexHeld() ); + assert( pInode!=0 ); + if( (pFile->ctrlFlags & (UNIXFILE_EXCL|UNIXFILE_RDONLY))==UNIXFILE_EXCL ){ + if( pInode->bProcessLock==0 ){ + struct flock lock; + assert( pInode->nLock==0 ); + lock.l_whence = SEEK_SET; + lock.l_start = SHARED_FIRST; + lock.l_len = SHARED_SIZE; + lock.l_type = F_WRLCK; + rc = osFcntl(pFile->h, F_SETLK, &lock); + if( rc<0 ) return rc; + pInode->bProcessLock = 1; + pInode->nLock++; + }else{ + rc = 0; + } + }else{ + rc = osFcntl(pFile->h, F_SETLK, pLock); + } + return rc; +} + +/* +** Lock the file with the lock specified by parameter eFileLock - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. +*/ +static int unixLock(sqlite3_file *id, int eFileLock){ + /* The following describes the implementation of the various locks and + ** lock transitions in terms of the POSIX advisory shared and exclusive + ** lock primitives (called read-locks and write-locks below, to avoid + ** confusion with SQLite lock names). The algorithms are complicated + ** slightly in order to be compatible with Windows95 systems simultaneously + ** accessing the same database file, in case that is ever required. + ** + ** Symbols defined in os.h indentify the 'pending byte' and the 'reserved + ** byte', each single bytes at well known offsets, and the 'shared byte + ** range', a range of 510 bytes at a well known offset. + ** + ** To obtain a SHARED lock, a read-lock is obtained on the 'pending + ** byte'. If this is successful, 'shared byte range' is read-locked + ** and the lock on the 'pending byte' released. (Legacy note: When + ** SQLite was first developed, Windows95 systems were still very common, + ** and Widnows95 lacks a shared-lock capability. So on Windows95, a + ** single randomly selected by from the 'shared byte range' is locked. + ** Windows95 is now pretty much extinct, but this work-around for the + ** lack of shared-locks on Windows95 lives on, for backwards + ** compatibility.) + ** + ** A process may only obtain a RESERVED lock after it has a SHARED lock. + ** A RESERVED lock is implemented by grabbing a write-lock on the + ** 'reserved byte'. + ** + ** A process may only obtain a PENDING lock after it has obtained a + ** SHARED lock. A PENDING lock is implemented by obtaining a write-lock + ** on the 'pending byte'. This ensures that no new SHARED locks can be + ** obtained, but existing SHARED locks are allowed to persist. A process + ** does not have to obtain a RESERVED lock on the way to a PENDING lock. + ** This property is used by the algorithm for rolling back a journal file + ** after a crash. + ** + ** An EXCLUSIVE lock, obtained after a PENDING lock is held, is + ** implemented by obtaining a write-lock on the entire 'shared byte + ** range'. Since all other locks require a read-lock on one of the bytes + ** within this range, this ensures that no other locks are held on the + ** database. + */ + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + unixInodeInfo *pInode; + struct flock lock; + int tErrno = 0; + + assert( pFile ); + OSTRACE(("LOCK %d %s was %s(%s,%d) pid=%d (unix)\n", pFile->h, + azFileLock(eFileLock), azFileLock(pFile->eFileLock), + azFileLock(pFile->pInode->eFileLock), pFile->pInode->nShared, + osGetpid(0))); + + /* If there is already a lock of this type or more restrictive on the + ** unixFile, do nothing. Don't use the end_lock: exit path, as + ** unixEnterMutex() hasn't been called yet. + */ + if( pFile->eFileLock>=eFileLock ){ + OSTRACE(("LOCK %d %s ok (already held) (unix)\n", pFile->h, + azFileLock(eFileLock))); + return SQLITE_OK; + } + + /* Make sure the locking sequence is correct. + ** (1) We never move from unlocked to anything higher than shared lock. + ** (2) SQLite never explicitly requests a pendig lock. + ** (3) A shared lock is always held when a reserve lock is requested. + */ + assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK ); + assert( eFileLock!=PENDING_LOCK ); + assert( eFileLock!=RESERVED_LOCK || pFile->eFileLock==SHARED_LOCK ); + + /* This mutex is needed because pFile->pInode is shared across threads + */ + unixEnterMutex(); + pInode = pFile->pInode; + + /* If some thread using this PID has a lock via a different unixFile* + ** handle that precludes the requested lock, return BUSY. + */ + if( (pFile->eFileLock!=pInode->eFileLock && + (pInode->eFileLock>=PENDING_LOCK || eFileLock>SHARED_LOCK)) + ){ + rc = SQLITE_BUSY; + goto end_lock; + } + + /* If a SHARED lock is requested, and some thread using this PID already + ** has a SHARED or RESERVED lock, then increment reference counts and + ** return SQLITE_OK. + */ + if( eFileLock==SHARED_LOCK && + (pInode->eFileLock==SHARED_LOCK || pInode->eFileLock==RESERVED_LOCK) ){ + assert( eFileLock==SHARED_LOCK ); + assert( pFile->eFileLock==0 ); + assert( pInode->nShared>0 ); + pFile->eFileLock = SHARED_LOCK; + pInode->nShared++; + pInode->nLock++; + goto end_lock; + } + + + /* A PENDING lock is needed before acquiring a SHARED lock and before + ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will + ** be released. + */ + lock.l_len = 1L; + lock.l_whence = SEEK_SET; + if( eFileLock==SHARED_LOCK + || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLocknShared==0 ); + assert( pInode->eFileLock==0 ); + assert( rc==SQLITE_OK ); + + /* Now get the read-lock */ + lock.l_start = SHARED_FIRST; + lock.l_len = SHARED_SIZE; + if( unixFileLock(pFile, &lock) ){ + tErrno = errno; + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); + } + + /* Drop the temporary PENDING lock */ + lock.l_start = PENDING_BYTE; + lock.l_len = 1L; + lock.l_type = F_UNLCK; + if( unixFileLock(pFile, &lock) && rc==SQLITE_OK ){ + /* This could happen with a network mount */ + tErrno = errno; + rc = SQLITE_IOERR_UNLOCK; + } + + if( rc ){ + if( rc!=SQLITE_BUSY ){ + storeLastErrno(pFile, tErrno); + } + goto end_lock; + }else{ + pFile->eFileLock = SHARED_LOCK; + pInode->nLock++; + pInode->nShared = 1; + } + }else if( eFileLock==EXCLUSIVE_LOCK && pInode->nShared>1 ){ + /* We are trying for an exclusive lock but another thread in this + ** same process is still holding a shared lock. */ + rc = SQLITE_BUSY; + }else{ + /* The request was for a RESERVED or EXCLUSIVE lock. It is + ** assumed that there is a SHARED or greater lock on the file + ** already. + */ + assert( 0!=pFile->eFileLock ); + lock.l_type = F_WRLCK; + + assert( eFileLock==RESERVED_LOCK || eFileLock==EXCLUSIVE_LOCK ); + if( eFileLock==RESERVED_LOCK ){ + lock.l_start = RESERVED_BYTE; + lock.l_len = 1L; + }else{ + lock.l_start = SHARED_FIRST; + lock.l_len = SHARED_SIZE; + } + + if( unixFileLock(pFile, &lock) ){ + tErrno = errno; + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); + if( rc!=SQLITE_BUSY ){ + storeLastErrno(pFile, tErrno); + } + } + } + + +#ifdef SQLITE_DEBUG + /* Set up the transaction-counter change checking flags when + ** transitioning from a SHARED to a RESERVED lock. The change + ** from SHARED to RESERVED marks the beginning of a normal + ** write operation (not a hot journal rollback). + */ + if( rc==SQLITE_OK + && pFile->eFileLock<=SHARED_LOCK + && eFileLock==RESERVED_LOCK + ){ + pFile->transCntrChng = 0; + pFile->dbUpdate = 0; + pFile->inNormalWrite = 1; + } +#endif + + + if( rc==SQLITE_OK ){ + pFile->eFileLock = eFileLock; + pInode->eFileLock = eFileLock; + }else if( eFileLock==EXCLUSIVE_LOCK ){ + pFile->eFileLock = PENDING_LOCK; + pInode->eFileLock = PENDING_LOCK; + } + +end_lock: + unixLeaveMutex(); + OSTRACE(("LOCK %d %s %s (unix)\n", pFile->h, azFileLock(eFileLock), + rc==SQLITE_OK ? "ok" : "failed")); + return rc; +} + +/* +** Add the file descriptor used by file handle pFile to the corresponding +** pUnused list. +*/ +static void setPendingFd(unixFile *pFile){ + unixInodeInfo *pInode = pFile->pInode; + UnixUnusedFd *p = pFile->pUnused; + p->pNext = pInode->pUnused; + pInode->pUnused = p; + pFile->h = -1; + pFile->pUnused = 0; +} + +/* +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +** +** If handleNFSUnlock is true, then on downgrading an EXCLUSIVE_LOCK to SHARED +** the byte range is divided into 2 parts and the first part is unlocked then +** set to a read lock, then the other part is simply unlocked. This works +** around a bug in BSD NFS lockd (also seen on MacOSX 10.3+) that fails to +** remove the write lock on a region when a read lock is set. +*/ +static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){ + unixFile *pFile = (unixFile*)id; + unixInodeInfo *pInode; + struct flock lock; + int rc = SQLITE_OK; + + assert( pFile ); + OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (unix)\n", pFile->h, eFileLock, + pFile->eFileLock, pFile->pInode->eFileLock, pFile->pInode->nShared, + osGetpid(0))); + + assert( eFileLock<=SHARED_LOCK ); + if( pFile->eFileLock<=eFileLock ){ + return SQLITE_OK; + } + unixEnterMutex(); + pInode = pFile->pInode; + assert( pInode->nShared!=0 ); + if( pFile->eFileLock>SHARED_LOCK ){ + assert( pInode->eFileLock==pFile->eFileLock ); + +#ifdef SQLITE_DEBUG + /* When reducing a lock such that other processes can start + ** reading the database file again, make sure that the + ** transaction counter was updated if any part of the database + ** file changed. If the transaction counter is not updated, + ** other connections to the same file might not realize that + ** the file has changed and hence might not know to flush their + ** cache. The use of a stale cache can lead to database corruption. + */ + pFile->inNormalWrite = 0; +#endif + + /* downgrading to a shared lock on NFS involves clearing the write lock + ** before establishing the readlock - to avoid a race condition we downgrade + ** the lock in 2 blocks, so that part of the range will be covered by a + ** write lock until the rest is covered by a read lock: + ** 1: [WWWWW] + ** 2: [....W] + ** 3: [RRRRW] + ** 4: [RRRR.] + */ + if( eFileLock==SHARED_LOCK ){ +#if !defined(__APPLE__) || !SQLITE_ENABLE_LOCKING_STYLE + (void)handleNFSUnlock; + assert( handleNFSUnlock==0 ); +#endif +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE + if( handleNFSUnlock ){ + int tErrno; /* Error code from system call errors */ + off_t divSize = SHARED_SIZE - 1; + + lock.l_type = F_UNLCK; + lock.l_whence = SEEK_SET; + lock.l_start = SHARED_FIRST; + lock.l_len = divSize; + if( unixFileLock(pFile, &lock)==(-1) ){ + tErrno = errno; + rc = SQLITE_IOERR_UNLOCK; + storeLastErrno(pFile, tErrno); + goto end_unlock; + } + lock.l_type = F_RDLCK; + lock.l_whence = SEEK_SET; + lock.l_start = SHARED_FIRST; + lock.l_len = divSize; + if( unixFileLock(pFile, &lock)==(-1) ){ + tErrno = errno; + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_RDLOCK); + if( IS_LOCK_ERROR(rc) ){ + storeLastErrno(pFile, tErrno); + } + goto end_unlock; + } + lock.l_type = F_UNLCK; + lock.l_whence = SEEK_SET; + lock.l_start = SHARED_FIRST+divSize; + lock.l_len = SHARED_SIZE-divSize; + if( unixFileLock(pFile, &lock)==(-1) ){ + tErrno = errno; + rc = SQLITE_IOERR_UNLOCK; + storeLastErrno(pFile, tErrno); + goto end_unlock; + } + }else +#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ + { + lock.l_type = F_RDLCK; + lock.l_whence = SEEK_SET; + lock.l_start = SHARED_FIRST; + lock.l_len = SHARED_SIZE; + if( unixFileLock(pFile, &lock) ){ + /* In theory, the call to unixFileLock() cannot fail because another + ** process is holding an incompatible lock. If it does, this + ** indicates that the other process is not following the locking + ** protocol. If this happens, return SQLITE_IOERR_RDLOCK. Returning + ** SQLITE_BUSY would confuse the upper layer (in practice it causes + ** an assert to fail). */ + rc = SQLITE_IOERR_RDLOCK; + storeLastErrno(pFile, errno); + goto end_unlock; + } + } + } + lock.l_type = F_UNLCK; + lock.l_whence = SEEK_SET; + lock.l_start = PENDING_BYTE; + lock.l_len = 2L; assert( PENDING_BYTE+1==RESERVED_BYTE ); + if( unixFileLock(pFile, &lock)==0 ){ + pInode->eFileLock = SHARED_LOCK; + }else{ + rc = SQLITE_IOERR_UNLOCK; + storeLastErrno(pFile, errno); + goto end_unlock; + } + } + if( eFileLock==NO_LOCK ){ + /* Decrement the shared lock counter. Release the lock using an + ** OS call only when all threads in this same process have released + ** the lock. + */ + pInode->nShared--; + if( pInode->nShared==0 ){ + lock.l_type = F_UNLCK; + lock.l_whence = SEEK_SET; + lock.l_start = lock.l_len = 0L; + if( unixFileLock(pFile, &lock)==0 ){ + pInode->eFileLock = NO_LOCK; + }else{ + rc = SQLITE_IOERR_UNLOCK; + storeLastErrno(pFile, errno); + pInode->eFileLock = NO_LOCK; + pFile->eFileLock = NO_LOCK; + } + } + + /* Decrement the count of locks against this same file. When the + ** count reaches zero, close any other file descriptors whose close + ** was deferred because of outstanding locks. + */ + pInode->nLock--; + assert( pInode->nLock>=0 ); + if( pInode->nLock==0 ){ + closePendingFds(pFile); + } + } + +end_unlock: + unixLeaveMutex(); + if( rc==SQLITE_OK ) pFile->eFileLock = eFileLock; + return rc; +} + +/* +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +*/ +static int unixUnlock(sqlite3_file *id, int eFileLock){ +#if SQLITE_MAX_MMAP_SIZE>0 + assert( eFileLock==SHARED_LOCK || ((unixFile *)id)->nFetchOut==0 ); +#endif + return posixUnlock(id, eFileLock, 0); +} + +#if SQLITE_MAX_MMAP_SIZE>0 +static int unixMapfile(unixFile *pFd, i64 nByte); +static void unixUnmapfile(unixFile *pFd); +#endif + +/* +** This function performs the parts of the "close file" operation +** common to all locking schemes. It closes the directory and file +** handles, if they are valid, and sets all fields of the unixFile +** structure to 0. +** +** It is *not* necessary to hold the mutex when this routine is called, +** even on VxWorks. A mutex will be acquired on VxWorks by the +** vxworksReleaseFileId() routine. +*/ +static int closeUnixFile(sqlite3_file *id){ + unixFile *pFile = (unixFile*)id; +#if SQLITE_MAX_MMAP_SIZE>0 + unixUnmapfile(pFile); +#endif + if( pFile->h>=0 ){ + robust_close(pFile, pFile->h, __LINE__); + pFile->h = -1; + } +#if OS_VXWORKS + if( pFile->pId ){ + if( pFile->ctrlFlags & UNIXFILE_DELETE ){ + osUnlink(pFile->pId->zCanonicalName); + } + vxworksReleaseFileId(pFile->pId); + pFile->pId = 0; + } +#endif +#ifdef SQLITE_UNLINK_AFTER_CLOSE + if( pFile->ctrlFlags & UNIXFILE_DELETE ){ + osUnlink(pFile->zPath); + sqlite3_free(*(char**)&pFile->zPath); + pFile->zPath = 0; + } +#endif + OSTRACE(("CLOSE %-3d\n", pFile->h)); + OpenCounter(-1); + sqlite3_free(pFile->pUnused); + memset(pFile, 0, sizeof(unixFile)); + return SQLITE_OK; +} + +/* +** Close a file. +*/ +static int unixClose(sqlite3_file *id){ + int rc = SQLITE_OK; + unixFile *pFile = (unixFile *)id; + verifyDbFile(pFile); + unixUnlock(id, NO_LOCK); + unixEnterMutex(); + + /* unixFile.pInode is always valid here. Otherwise, a different close + ** routine (e.g. nolockClose()) would be called instead. + */ + assert( pFile->pInode->nLock>0 || pFile->pInode->bProcessLock==0 ); + if( ALWAYS(pFile->pInode) && pFile->pInode->nLock ){ + /* If there are outstanding locks, do not actually close the file just + ** yet because that would clear those locks. Instead, add the file + ** descriptor to pInode->pUnused list. It will be automatically closed + ** when the last lock is cleared. + */ + setPendingFd(pFile); + } + releaseInodeInfo(pFile); + rc = closeUnixFile(id); + unixLeaveMutex(); + return rc; +} + +/************** End of the posix advisory lock implementation ***************** +******************************************************************************/ + +/****************************************************************************** +****************************** No-op Locking ********************************** +** +** Of the various locking implementations available, this is by far the +** simplest: locking is ignored. No attempt is made to lock the database +** file for reading or writing. +** +** This locking mode is appropriate for use on read-only databases +** (ex: databases that are burned into CD-ROM, for example.) It can +** also be used if the application employs some external mechanism to +** prevent simultaneous access of the same database by two or more +** database connections. But there is a serious risk of database +** corruption if this locking mode is used in situations where multiple +** database connections are accessing the same database file at the same +** time and one or more of those connections are writing. +*/ + +static int nolockCheckReservedLock(sqlite3_file *NotUsed, int *pResOut){ + UNUSED_PARAMETER(NotUsed); + *pResOut = 0; + return SQLITE_OK; +} +static int nolockLock(sqlite3_file *NotUsed, int NotUsed2){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + return SQLITE_OK; +} +static int nolockUnlock(sqlite3_file *NotUsed, int NotUsed2){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + return SQLITE_OK; +} + +/* +** Close the file. +*/ +static int nolockClose(sqlite3_file *id) { + return closeUnixFile(id); +} + +/******************* End of the no-op lock implementation ********************* +******************************************************************************/ + +/****************************************************************************** +************************* Begin dot-file Locking ****************************** +** +** The dotfile locking implementation uses the existence of separate lock +** files (really a directory) to control access to the database. This works +** on just about every filesystem imaginable. But there are serious downsides: +** +** (1) There is zero concurrency. A single reader blocks all other +** connections from reading or writing the database. +** +** (2) An application crash or power loss can leave stale lock files +** sitting around that need to be cleared manually. +** +** Nevertheless, a dotlock is an appropriate locking mode for use if no +** other locking strategy is available. +** +** Dotfile locking works by creating a subdirectory in the same directory as +** the database and with the same name but with a ".lock" extension added. +** The existence of a lock directory implies an EXCLUSIVE lock. All other +** lock types (SHARED, RESERVED, PENDING) are mapped into EXCLUSIVE. +*/ + +/* +** The file suffix added to the data base filename in order to create the +** lock directory. +*/ +#define DOTLOCK_SUFFIX ".lock" + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, set *pResOut +** to a non-zero value otherwise *pResOut is set to zero. The return value +** is set to SQLITE_OK unless an I/O error occurs during lock checking. +** +** In dotfile locking, either a lock exists or it does not. So in this +** variation of CheckReservedLock(), *pResOut is set to true if any lock +** is held on the file and false if the file is unlocked. +*/ +static int dotlockCheckReservedLock(sqlite3_file *id, int *pResOut) { + int rc = SQLITE_OK; + int reserved = 0; + unixFile *pFile = (unixFile*)id; + + SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); + + assert( pFile ); + reserved = osAccess((const char*)pFile->lockingContext, 0)==0; + OSTRACE(("TEST WR-LOCK %d %d %d (dotlock)\n", pFile->h, rc, reserved)); + *pResOut = reserved; + return rc; +} + +/* +** Lock the file with the lock specified by parameter eFileLock - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. +** +** With dotfile locking, we really only support state (4): EXCLUSIVE. +** But we track the other locking levels internally. +*/ +static int dotlockLock(sqlite3_file *id, int eFileLock) { + unixFile *pFile = (unixFile*)id; + char *zLockFile = (char *)pFile->lockingContext; + int rc = SQLITE_OK; + + + /* If we have any lock, then the lock file already exists. All we have + ** to do is adjust our internal record of the lock level. + */ + if( pFile->eFileLock > NO_LOCK ){ + pFile->eFileLock = eFileLock; + /* Always update the timestamp on the old file */ +#ifdef HAVE_UTIME + utime(zLockFile, NULL); +#else + utimes(zLockFile, NULL); +#endif + return SQLITE_OK; + } + + /* grab an exclusive lock */ + rc = osMkdir(zLockFile, 0777); + if( rc<0 ){ + /* failed to open/create the lock directory */ + int tErrno = errno; + if( EEXIST == tErrno ){ + rc = SQLITE_BUSY; + } else { + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); + if( rc!=SQLITE_BUSY ){ + storeLastErrno(pFile, tErrno); + } + } + return rc; + } + + /* got it, set the type and return ok */ + pFile->eFileLock = eFileLock; + return rc; +} + +/* +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +** +** When the locking level reaches NO_LOCK, delete the lock file. +*/ +static int dotlockUnlock(sqlite3_file *id, int eFileLock) { + unixFile *pFile = (unixFile*)id; + char *zLockFile = (char *)pFile->lockingContext; + int rc; + + assert( pFile ); + OSTRACE(("UNLOCK %d %d was %d pid=%d (dotlock)\n", pFile->h, eFileLock, + pFile->eFileLock, osGetpid(0))); + assert( eFileLock<=SHARED_LOCK ); + + /* no-op if possible */ + if( pFile->eFileLock==eFileLock ){ + return SQLITE_OK; + } + + /* To downgrade to shared, simply update our internal notion of the + ** lock state. No need to mess with the file on disk. + */ + if( eFileLock==SHARED_LOCK ){ + pFile->eFileLock = SHARED_LOCK; + return SQLITE_OK; + } + + /* To fully unlock the database, delete the lock file */ + assert( eFileLock==NO_LOCK ); + rc = osRmdir(zLockFile); + if( rc<0 ){ + int tErrno = errno; + if( tErrno==ENOENT ){ + rc = SQLITE_OK; + }else{ + rc = SQLITE_IOERR_UNLOCK; + storeLastErrno(pFile, tErrno); + } + return rc; + } + pFile->eFileLock = NO_LOCK; + return SQLITE_OK; +} + +/* +** Close a file. Make sure the lock has been released before closing. +*/ +static int dotlockClose(sqlite3_file *id) { + unixFile *pFile = (unixFile*)id; + assert( id!=0 ); + dotlockUnlock(id, NO_LOCK); + sqlite3_free(pFile->lockingContext); + return closeUnixFile(id); +} +/****************** End of the dot-file lock implementation ******************* +******************************************************************************/ + +/****************************************************************************** +************************** Begin flock Locking ******************************** +** +** Use the flock() system call to do file locking. +** +** flock() locking is like dot-file locking in that the various +** fine-grain locking levels supported by SQLite are collapsed into +** a single exclusive lock. In other words, SHARED, RESERVED, and +** PENDING locks are the same thing as an EXCLUSIVE lock. SQLite +** still works when you do this, but concurrency is reduced since +** only a single process can be reading the database at a time. +** +** Omit this section if SQLITE_ENABLE_LOCKING_STYLE is turned off +*/ +#if SQLITE_ENABLE_LOCKING_STYLE + +/* +** Retry flock() calls that fail with EINTR +*/ +#ifdef EINTR +static int robust_flock(int fd, int op){ + int rc; + do{ rc = flock(fd,op); }while( rc<0 && errno==EINTR ); + return rc; +} +#else +# define robust_flock(a,b) flock(a,b) +#endif + + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, set *pResOut +** to a non-zero value otherwise *pResOut is set to zero. The return value +** is set to SQLITE_OK unless an I/O error occurs during lock checking. +*/ +static int flockCheckReservedLock(sqlite3_file *id, int *pResOut){ + int rc = SQLITE_OK; + int reserved = 0; + unixFile *pFile = (unixFile*)id; + + SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); + + assert( pFile ); + + /* Check if a thread in this process holds such a lock */ + if( pFile->eFileLock>SHARED_LOCK ){ + reserved = 1; + } + + /* Otherwise see if some other process holds it. */ + if( !reserved ){ + /* attempt to get the lock */ + int lrc = robust_flock(pFile->h, LOCK_EX | LOCK_NB); + if( !lrc ){ + /* got the lock, unlock it */ + lrc = robust_flock(pFile->h, LOCK_UN); + if ( lrc ) { + int tErrno = errno; + /* unlock failed with an error */ + lrc = SQLITE_IOERR_UNLOCK; + storeLastErrno(pFile, tErrno); + rc = lrc; + } + } else { + int tErrno = errno; + reserved = 1; + /* someone else might have it reserved */ + lrc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); + if( IS_LOCK_ERROR(lrc) ){ + storeLastErrno(pFile, tErrno); + rc = lrc; + } + } + } + OSTRACE(("TEST WR-LOCK %d %d %d (flock)\n", pFile->h, rc, reserved)); + +#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS + if( (rc & SQLITE_IOERR) == SQLITE_IOERR ){ + rc = SQLITE_OK; + reserved=1; + } +#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ + *pResOut = reserved; + return rc; +} + +/* +** Lock the file with the lock specified by parameter eFileLock - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** flock() only really support EXCLUSIVE locks. We track intermediate +** lock states in the sqlite3_file structure, but all locks SHARED or +** above are really EXCLUSIVE locks and exclude all other processes from +** access the file. +** +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. +*/ +static int flockLock(sqlite3_file *id, int eFileLock) { + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + + assert( pFile ); + + /* if we already have a lock, it is exclusive. + ** Just adjust level and punt on outta here. */ + if (pFile->eFileLock > NO_LOCK) { + pFile->eFileLock = eFileLock; + return SQLITE_OK; + } + + /* grab an exclusive lock */ + + if (robust_flock(pFile->h, LOCK_EX | LOCK_NB)) { + int tErrno = errno; + /* didn't get, must be busy */ + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); + if( IS_LOCK_ERROR(rc) ){ + storeLastErrno(pFile, tErrno); + } + } else { + /* got it, set the type and return ok */ + pFile->eFileLock = eFileLock; + } + OSTRACE(("LOCK %d %s %s (flock)\n", pFile->h, azFileLock(eFileLock), + rc==SQLITE_OK ? "ok" : "failed")); +#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS + if( (rc & SQLITE_IOERR) == SQLITE_IOERR ){ + rc = SQLITE_BUSY; + } +#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ + return rc; +} + + +/* +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +*/ +static int flockUnlock(sqlite3_file *id, int eFileLock) { + unixFile *pFile = (unixFile*)id; + + assert( pFile ); + OSTRACE(("UNLOCK %d %d was %d pid=%d (flock)\n", pFile->h, eFileLock, + pFile->eFileLock, osGetpid(0))); + assert( eFileLock<=SHARED_LOCK ); + + /* no-op if possible */ + if( pFile->eFileLock==eFileLock ){ + return SQLITE_OK; + } + + /* shared can just be set because we always have an exclusive */ + if (eFileLock==SHARED_LOCK) { + pFile->eFileLock = eFileLock; + return SQLITE_OK; + } + + /* no, really, unlock. */ + if( robust_flock(pFile->h, LOCK_UN) ){ +#ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS + return SQLITE_OK; +#endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ + return SQLITE_IOERR_UNLOCK; + }else{ + pFile->eFileLock = NO_LOCK; + return SQLITE_OK; + } +} + +/* +** Close a file. +*/ +static int flockClose(sqlite3_file *id) { + assert( id!=0 ); + flockUnlock(id, NO_LOCK); + return closeUnixFile(id); +} + +#endif /* SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORK */ + +/******************* End of the flock lock implementation ********************* +******************************************************************************/ + +/****************************************************************************** +************************ Begin Named Semaphore Locking ************************ +** +** Named semaphore locking is only supported on VxWorks. +** +** Semaphore locking is like dot-lock and flock in that it really only +** supports EXCLUSIVE locking. Only a single process can read or write +** the database file at a time. This reduces potential concurrency, but +** makes the lock implementation much easier. +*/ +#if OS_VXWORKS + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, set *pResOut +** to a non-zero value otherwise *pResOut is set to zero. The return value +** is set to SQLITE_OK unless an I/O error occurs during lock checking. +*/ +static int semXCheckReservedLock(sqlite3_file *id, int *pResOut) { + int rc = SQLITE_OK; + int reserved = 0; + unixFile *pFile = (unixFile*)id; + + SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); + + assert( pFile ); + + /* Check if a thread in this process holds such a lock */ + if( pFile->eFileLock>SHARED_LOCK ){ + reserved = 1; + } + + /* Otherwise see if some other process holds it. */ + if( !reserved ){ + sem_t *pSem = pFile->pInode->pSem; + + if( sem_trywait(pSem)==-1 ){ + int tErrno = errno; + if( EAGAIN != tErrno ){ + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_CHECKRESERVEDLOCK); + storeLastErrno(pFile, tErrno); + } else { + /* someone else has the lock when we are in NO_LOCK */ + reserved = (pFile->eFileLock < SHARED_LOCK); + } + }else{ + /* we could have it if we want it */ + sem_post(pSem); + } + } + OSTRACE(("TEST WR-LOCK %d %d %d (sem)\n", pFile->h, rc, reserved)); + + *pResOut = reserved; + return rc; +} + +/* +** Lock the file with the lock specified by parameter eFileLock - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** Semaphore locks only really support EXCLUSIVE locks. We track intermediate +** lock states in the sqlite3_file structure, but all locks SHARED or +** above are really EXCLUSIVE locks and exclude all other processes from +** access the file. +** +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. +*/ +static int semXLock(sqlite3_file *id, int eFileLock) { + unixFile *pFile = (unixFile*)id; + sem_t *pSem = pFile->pInode->pSem; + int rc = SQLITE_OK; + + /* if we already have a lock, it is exclusive. + ** Just adjust level and punt on outta here. */ + if (pFile->eFileLock > NO_LOCK) { + pFile->eFileLock = eFileLock; + rc = SQLITE_OK; + goto sem_end_lock; + } + + /* lock semaphore now but bail out when already locked. */ + if( sem_trywait(pSem)==-1 ){ + rc = SQLITE_BUSY; + goto sem_end_lock; + } + + /* got it, set the type and return ok */ + pFile->eFileLock = eFileLock; + + sem_end_lock: + return rc; +} + +/* +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +*/ +static int semXUnlock(sqlite3_file *id, int eFileLock) { + unixFile *pFile = (unixFile*)id; + sem_t *pSem = pFile->pInode->pSem; + + assert( pFile ); + assert( pSem ); + OSTRACE(("UNLOCK %d %d was %d pid=%d (sem)\n", pFile->h, eFileLock, + pFile->eFileLock, osGetpid(0))); + assert( eFileLock<=SHARED_LOCK ); + + /* no-op if possible */ + if( pFile->eFileLock==eFileLock ){ + return SQLITE_OK; + } + + /* shared can just be set because we always have an exclusive */ + if (eFileLock==SHARED_LOCK) { + pFile->eFileLock = eFileLock; + return SQLITE_OK; + } + + /* no, really unlock. */ + if ( sem_post(pSem)==-1 ) { + int rc, tErrno = errno; + rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); + if( IS_LOCK_ERROR(rc) ){ + storeLastErrno(pFile, tErrno); + } + return rc; + } + pFile->eFileLock = NO_LOCK; + return SQLITE_OK; +} + +/* + ** Close a file. + */ +static int semXClose(sqlite3_file *id) { + if( id ){ + unixFile *pFile = (unixFile*)id; + semXUnlock(id, NO_LOCK); + assert( pFile ); + unixEnterMutex(); + releaseInodeInfo(pFile); + unixLeaveMutex(); + closeUnixFile(id); + } + return SQLITE_OK; +} + +#endif /* OS_VXWORKS */ +/* +** Named semaphore locking is only available on VxWorks. +** +*************** End of the named semaphore lock implementation **************** +******************************************************************************/ + + +/****************************************************************************** +*************************** Begin AFP Locking ********************************* +** +** AFP is the Apple Filing Protocol. AFP is a network filesystem found +** on Apple Macintosh computers - both OS9 and OSX. +** +** Third-party implementations of AFP are available. But this code here +** only works on OSX. +*/ + +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE +/* +** The afpLockingContext structure contains all afp lock specific state +*/ +typedef struct afpLockingContext afpLockingContext; +struct afpLockingContext { + int reserved; + const char *dbPath; /* Name of the open file */ +}; + +struct ByteRangeLockPB2 +{ + unsigned long long offset; /* offset to first byte to lock */ + unsigned long long length; /* nbr of bytes to lock */ + unsigned long long retRangeStart; /* nbr of 1st byte locked if successful */ + unsigned char unLockFlag; /* 1 = unlock, 0 = lock */ + unsigned char startEndFlag; /* 1=rel to end of fork, 0=rel to start */ + int fd; /* file desc to assoc this lock with */ +}; + +#define afpfsByteRangeLock2FSCTL _IOWR('z', 23, struct ByteRangeLockPB2) + +/* +** This is a utility for setting or clearing a bit-range lock on an +** AFP filesystem. +** +** Return SQLITE_OK on success, SQLITE_BUSY on failure. +*/ +static int afpSetLock( + const char *path, /* Name of the file to be locked or unlocked */ + unixFile *pFile, /* Open file descriptor on path */ + unsigned long long offset, /* First byte to be locked */ + unsigned long long length, /* Number of bytes to lock */ + int setLockFlag /* True to set lock. False to clear lock */ +){ + struct ByteRangeLockPB2 pb; + int err; + + pb.unLockFlag = setLockFlag ? 0 : 1; + pb.startEndFlag = 0; + pb.offset = offset; + pb.length = length; + pb.fd = pFile->h; + + OSTRACE(("AFPSETLOCK [%s] for %d%s in range %llx:%llx\n", + (setLockFlag?"ON":"OFF"), pFile->h, (pb.fd==-1?"[testval-1]":""), + offset, length)); + err = fsctl(path, afpfsByteRangeLock2FSCTL, &pb, 0); + if ( err==-1 ) { + int rc; + int tErrno = errno; + OSTRACE(("AFPSETLOCK failed to fsctl() '%s' %d %s\n", + path, tErrno, strerror(tErrno))); +#ifdef SQLITE_IGNORE_AFP_LOCK_ERRORS + rc = SQLITE_BUSY; +#else + rc = sqliteErrorFromPosixError(tErrno, + setLockFlag ? SQLITE_IOERR_LOCK : SQLITE_IOERR_UNLOCK); +#endif /* SQLITE_IGNORE_AFP_LOCK_ERRORS */ + if( IS_LOCK_ERROR(rc) ){ + storeLastErrno(pFile, tErrno); + } + return rc; + } else { + return SQLITE_OK; + } +} + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, set *pResOut +** to a non-zero value otherwise *pResOut is set to zero. The return value +** is set to SQLITE_OK unless an I/O error occurs during lock checking. +*/ +static int afpCheckReservedLock(sqlite3_file *id, int *pResOut){ + int rc = SQLITE_OK; + int reserved = 0; + unixFile *pFile = (unixFile*)id; + afpLockingContext *context; + + SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); + + assert( pFile ); + context = (afpLockingContext *) pFile->lockingContext; + if( context->reserved ){ + *pResOut = 1; + return SQLITE_OK; + } + unixEnterMutex(); /* Because pFile->pInode is shared across threads */ + + /* Check if a thread in this process holds such a lock */ + if( pFile->pInode->eFileLock>SHARED_LOCK ){ + reserved = 1; + } + + /* Otherwise see if some other process holds it. + */ + if( !reserved ){ + /* lock the RESERVED byte */ + int lrc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1,1); + if( SQLITE_OK==lrc ){ + /* if we succeeded in taking the reserved lock, unlock it to restore + ** the original state */ + lrc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1, 0); + } else { + /* if we failed to get the lock then someone else must have it */ + reserved = 1; + } + if( IS_LOCK_ERROR(lrc) ){ + rc=lrc; + } + } + + unixLeaveMutex(); + OSTRACE(("TEST WR-LOCK %d %d %d (afp)\n", pFile->h, rc, reserved)); + + *pResOut = reserved; + return rc; +} + +/* +** Lock the file with the lock specified by parameter eFileLock - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. +*/ +static int afpLock(sqlite3_file *id, int eFileLock){ + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + unixInodeInfo *pInode = pFile->pInode; + afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; + + assert( pFile ); + OSTRACE(("LOCK %d %s was %s(%s,%d) pid=%d (afp)\n", pFile->h, + azFileLock(eFileLock), azFileLock(pFile->eFileLock), + azFileLock(pInode->eFileLock), pInode->nShared , osGetpid(0))); + + /* If there is already a lock of this type or more restrictive on the + ** unixFile, do nothing. Don't use the afp_end_lock: exit path, as + ** unixEnterMutex() hasn't been called yet. + */ + if( pFile->eFileLock>=eFileLock ){ + OSTRACE(("LOCK %d %s ok (already held) (afp)\n", pFile->h, + azFileLock(eFileLock))); + return SQLITE_OK; + } + + /* Make sure the locking sequence is correct + ** (1) We never move from unlocked to anything higher than shared lock. + ** (2) SQLite never explicitly requests a pendig lock. + ** (3) A shared lock is always held when a reserve lock is requested. + */ + assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK ); + assert( eFileLock!=PENDING_LOCK ); + assert( eFileLock!=RESERVED_LOCK || pFile->eFileLock==SHARED_LOCK ); + + /* This mutex is needed because pFile->pInode is shared across threads + */ + unixEnterMutex(); + pInode = pFile->pInode; + + /* If some thread using this PID has a lock via a different unixFile* + ** handle that precludes the requested lock, return BUSY. + */ + if( (pFile->eFileLock!=pInode->eFileLock && + (pInode->eFileLock>=PENDING_LOCK || eFileLock>SHARED_LOCK)) + ){ + rc = SQLITE_BUSY; + goto afp_end_lock; + } + + /* If a SHARED lock is requested, and some thread using this PID already + ** has a SHARED or RESERVED lock, then increment reference counts and + ** return SQLITE_OK. + */ + if( eFileLock==SHARED_LOCK && + (pInode->eFileLock==SHARED_LOCK || pInode->eFileLock==RESERVED_LOCK) ){ + assert( eFileLock==SHARED_LOCK ); + assert( pFile->eFileLock==0 ); + assert( pInode->nShared>0 ); + pFile->eFileLock = SHARED_LOCK; + pInode->nShared++; + pInode->nLock++; + goto afp_end_lock; + } + + /* A PENDING lock is needed before acquiring a SHARED lock and before + ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will + ** be released. + */ + if( eFileLock==SHARED_LOCK + || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLockdbPath, pFile, PENDING_BYTE, 1, 1); + if (failed) { + rc = failed; + goto afp_end_lock; + } + } + + /* If control gets to this point, then actually go ahead and make + ** operating system calls for the specified lock. + */ + if( eFileLock==SHARED_LOCK ){ + int lrc1, lrc2, lrc1Errno = 0; + long lk, mask; + + assert( pInode->nShared==0 ); + assert( pInode->eFileLock==0 ); + + mask = (sizeof(long)==8) ? LARGEST_INT64 : 0x7fffffff; + /* Now get the read-lock SHARED_LOCK */ + /* note that the quality of the randomness doesn't matter that much */ + lk = random(); + pInode->sharedByte = (lk & mask)%(SHARED_SIZE - 1); + lrc1 = afpSetLock(context->dbPath, pFile, + SHARED_FIRST+pInode->sharedByte, 1, 1); + if( IS_LOCK_ERROR(lrc1) ){ + lrc1Errno = pFile->lastErrno; + } + /* Drop the temporary PENDING lock */ + lrc2 = afpSetLock(context->dbPath, pFile, PENDING_BYTE, 1, 0); + + if( IS_LOCK_ERROR(lrc1) ) { + storeLastErrno(pFile, lrc1Errno); + rc = lrc1; + goto afp_end_lock; + } else if( IS_LOCK_ERROR(lrc2) ){ + rc = lrc2; + goto afp_end_lock; + } else if( lrc1 != SQLITE_OK ) { + rc = lrc1; + } else { + pFile->eFileLock = SHARED_LOCK; + pInode->nLock++; + pInode->nShared = 1; + } + }else if( eFileLock==EXCLUSIVE_LOCK && pInode->nShared>1 ){ + /* We are trying for an exclusive lock but another thread in this + ** same process is still holding a shared lock. */ + rc = SQLITE_BUSY; + }else{ + /* The request was for a RESERVED or EXCLUSIVE lock. It is + ** assumed that there is a SHARED or greater lock on the file + ** already. + */ + int failed = 0; + assert( 0!=pFile->eFileLock ); + if (eFileLock >= RESERVED_LOCK && pFile->eFileLock < RESERVED_LOCK) { + /* Acquire a RESERVED lock */ + failed = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1,1); + if( !failed ){ + context->reserved = 1; + } + } + if (!failed && eFileLock == EXCLUSIVE_LOCK) { + /* Acquire an EXCLUSIVE lock */ + + /* Remove the shared lock before trying the range. we'll need to + ** reestablish the shared lock if we can't get the afpUnlock + */ + if( !(failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST + + pInode->sharedByte, 1, 0)) ){ + int failed2 = SQLITE_OK; + /* now attemmpt to get the exclusive lock range */ + failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST, + SHARED_SIZE, 1); + if( failed && (failed2 = afpSetLock(context->dbPath, pFile, + SHARED_FIRST + pInode->sharedByte, 1, 1)) ){ + /* Can't reestablish the shared lock. Sqlite can't deal, this is + ** a critical I/O error + */ + rc = ((failed & SQLITE_IOERR) == SQLITE_IOERR) ? failed2 : + SQLITE_IOERR_LOCK; + goto afp_end_lock; + } + }else{ + rc = failed; + } + } + if( failed ){ + rc = failed; + } + } + + if( rc==SQLITE_OK ){ + pFile->eFileLock = eFileLock; + pInode->eFileLock = eFileLock; + }else if( eFileLock==EXCLUSIVE_LOCK ){ + pFile->eFileLock = PENDING_LOCK; + pInode->eFileLock = PENDING_LOCK; + } + +afp_end_lock: + unixLeaveMutex(); + OSTRACE(("LOCK %d %s %s (afp)\n", pFile->h, azFileLock(eFileLock), + rc==SQLITE_OK ? "ok" : "failed")); + return rc; +} + +/* +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +*/ +static int afpUnlock(sqlite3_file *id, int eFileLock) { + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + unixInodeInfo *pInode; + afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; + int skipShared = 0; +#ifdef SQLITE_TEST + int h = pFile->h; +#endif + + assert( pFile ); + OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (afp)\n", pFile->h, eFileLock, + pFile->eFileLock, pFile->pInode->eFileLock, pFile->pInode->nShared, + osGetpid(0))); + + assert( eFileLock<=SHARED_LOCK ); + if( pFile->eFileLock<=eFileLock ){ + return SQLITE_OK; + } + unixEnterMutex(); + pInode = pFile->pInode; + assert( pInode->nShared!=0 ); + if( pFile->eFileLock>SHARED_LOCK ){ + assert( pInode->eFileLock==pFile->eFileLock ); + SimulateIOErrorBenign(1); + SimulateIOError( h=(-1) ) + SimulateIOErrorBenign(0); + +#ifdef SQLITE_DEBUG + /* When reducing a lock such that other processes can start + ** reading the database file again, make sure that the + ** transaction counter was updated if any part of the database + ** file changed. If the transaction counter is not updated, + ** other connections to the same file might not realize that + ** the file has changed and hence might not know to flush their + ** cache. The use of a stale cache can lead to database corruption. + */ + assert( pFile->inNormalWrite==0 + || pFile->dbUpdate==0 + || pFile->transCntrChng==1 ); + pFile->inNormalWrite = 0; +#endif + + if( pFile->eFileLock==EXCLUSIVE_LOCK ){ + rc = afpSetLock(context->dbPath, pFile, SHARED_FIRST, SHARED_SIZE, 0); + if( rc==SQLITE_OK && (eFileLock==SHARED_LOCK || pInode->nShared>1) ){ + /* only re-establish the shared lock if necessary */ + int sharedLockByte = SHARED_FIRST+pInode->sharedByte; + rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 1); + } else { + skipShared = 1; + } + } + if( rc==SQLITE_OK && pFile->eFileLock>=PENDING_LOCK ){ + rc = afpSetLock(context->dbPath, pFile, PENDING_BYTE, 1, 0); + } + if( rc==SQLITE_OK && pFile->eFileLock>=RESERVED_LOCK && context->reserved ){ + rc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1, 0); + if( !rc ){ + context->reserved = 0; + } + } + if( rc==SQLITE_OK && (eFileLock==SHARED_LOCK || pInode->nShared>1)){ + pInode->eFileLock = SHARED_LOCK; + } + } + if( rc==SQLITE_OK && eFileLock==NO_LOCK ){ + + /* Decrement the shared lock counter. Release the lock using an + ** OS call only when all threads in this same process have released + ** the lock. + */ + unsigned long long sharedLockByte = SHARED_FIRST+pInode->sharedByte; + pInode->nShared--; + if( pInode->nShared==0 ){ + SimulateIOErrorBenign(1); + SimulateIOError( h=(-1) ) + SimulateIOErrorBenign(0); + if( !skipShared ){ + rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 0); + } + if( !rc ){ + pInode->eFileLock = NO_LOCK; + pFile->eFileLock = NO_LOCK; + } + } + if( rc==SQLITE_OK ){ + pInode->nLock--; + assert( pInode->nLock>=0 ); + if( pInode->nLock==0 ){ + closePendingFds(pFile); + } + } + } + + unixLeaveMutex(); + if( rc==SQLITE_OK ) pFile->eFileLock = eFileLock; + return rc; +} + +/* +** Close a file & cleanup AFP specific locking context +*/ +static int afpClose(sqlite3_file *id) { + int rc = SQLITE_OK; + unixFile *pFile = (unixFile*)id; + assert( id!=0 ); + afpUnlock(id, NO_LOCK); + unixEnterMutex(); + if( pFile->pInode && pFile->pInode->nLock ){ + /* If there are outstanding locks, do not actually close the file just + ** yet because that would clear those locks. Instead, add the file + ** descriptor to pInode->aPending. It will be automatically closed when + ** the last lock is cleared. + */ + setPendingFd(pFile); + } + releaseInodeInfo(pFile); + sqlite3_free(pFile->lockingContext); + rc = closeUnixFile(id); + unixLeaveMutex(); + return rc; +} + +#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ +/* +** The code above is the AFP lock implementation. The code is specific +** to MacOSX and does not work on other unix platforms. No alternative +** is available. If you don't compile for a mac, then the "unix-afp" +** VFS is not available. +** +********************* End of the AFP lock implementation ********************** +******************************************************************************/ + +/****************************************************************************** +*************************** Begin NFS Locking ********************************/ + +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE +/* + ** Lower the locking level on file descriptor pFile to eFileLock. eFileLock + ** must be either NO_LOCK or SHARED_LOCK. + ** + ** If the locking level of the file descriptor is already at or below + ** the requested locking level, this routine is a no-op. + */ +static int nfsUnlock(sqlite3_file *id, int eFileLock){ + return posixUnlock(id, eFileLock, 1); +} + +#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ +/* +** The code above is the NFS lock implementation. The code is specific +** to MacOSX and does not work on other unix platforms. No alternative +** is available. +** +********************* End of the NFS lock implementation ********************** +******************************************************************************/ + +/****************************************************************************** +**************** Non-locking sqlite3_file methods ***************************** +** +** The next division contains implementations for all methods of the +** sqlite3_file object other than the locking methods. The locking +** methods were defined in divisions above (one locking method per +** division). Those methods that are common to all locking modes +** are gather together into this division. +*/ + +/* +** Seek to the offset passed as the second argument, then read cnt +** bytes into pBuf. Return the number of bytes actually read. +** +** NB: If you define USE_PREAD or USE_PREAD64, then it might also +** be necessary to define _XOPEN_SOURCE to be 500. This varies from +** one system to another. Since SQLite does not define USE_PREAD +** in any form by default, we will not attempt to define _XOPEN_SOURCE. +** See tickets #2741 and #2681. +** +** To avoid stomping the errno value on a failed read the lastErrno value +** is set before returning. +*/ +static int seekAndRead(unixFile *id, sqlite3_int64 offset, void *pBuf, int cnt){ + int got; + int prior = 0; +#if (!defined(USE_PREAD) && !defined(USE_PREAD64)) + i64 newOffset; +#endif + TIMER_START; + assert( cnt==(cnt&0x1ffff) ); + assert( id->h>2 ); + do{ +#if defined(USE_PREAD) + got = osPread(id->h, pBuf, cnt, offset); + SimulateIOError( got = -1 ); +#elif defined(USE_PREAD64) + got = osPread64(id->h, pBuf, cnt, offset); + SimulateIOError( got = -1 ); +#else + newOffset = lseek(id->h, offset, SEEK_SET); + SimulateIOError( newOffset = -1 ); + if( newOffset<0 ){ + storeLastErrno((unixFile*)id, errno); + return -1; + } + got = osRead(id->h, pBuf, cnt); +#endif + if( got==cnt ) break; + if( got<0 ){ + if( errno==EINTR ){ got = 1; continue; } + prior = 0; + storeLastErrno((unixFile*)id, errno); + break; + }else if( got>0 ){ + cnt -= got; + offset += got; + prior += got; + pBuf = (void*)(got + (char*)pBuf); + } + }while( got>0 ); + TIMER_END; + OSTRACE(("READ %-3d %5d %7lld %llu\n", + id->h, got+prior, offset-prior, TIMER_ELAPSED)); + return got+prior; +} + +/* +** Read data from a file into a buffer. Return SQLITE_OK if all +** bytes were read successfully and SQLITE_IOERR if anything goes +** wrong. +*/ +static int unixRead( + sqlite3_file *id, + void *pBuf, + int amt, + sqlite3_int64 offset +){ + unixFile *pFile = (unixFile *)id; + int got; + assert( id ); + assert( offset>=0 ); + assert( amt>0 ); + + /* If this is a database file (not a journal, master-journal or temp + ** file), the bytes in the locking range should never be read or written. */ +#if 0 + assert( pFile->pUnused==0 + || offset>=PENDING_BYTE+512 + || offset+amt<=PENDING_BYTE + ); +#endif + +#if SQLITE_MAX_MMAP_SIZE>0 + /* Deal with as much of this read request as possible by transfering + ** data from the memory mapping using memcpy(). */ + if( offsetmmapSize ){ + if( offset+amt <= pFile->mmapSize ){ + memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], amt); + return SQLITE_OK; + }else{ + int nCopy = pFile->mmapSize - offset; + memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], nCopy); + pBuf = &((u8 *)pBuf)[nCopy]; + amt -= nCopy; + offset += nCopy; + } + } +#endif + + got = seekAndRead(pFile, offset, pBuf, amt); + if( got==amt ){ + return SQLITE_OK; + }else if( got<0 ){ + /* lastErrno set by seekAndRead */ + return SQLITE_IOERR_READ; + }else{ + storeLastErrno(pFile, 0); /* not a system error */ + /* Unread parts of the buffer must be zero-filled */ + memset(&((char*)pBuf)[got], 0, amt-got); + return SQLITE_IOERR_SHORT_READ; + } +} + +/* +** Attempt to seek the file-descriptor passed as the first argument to +** absolute offset iOff, then attempt to write nBuf bytes of data from +** pBuf to it. If an error occurs, return -1 and set *piErrno. Otherwise, +** return the actual number of bytes written (which may be less than +** nBuf). +*/ +static int seekAndWriteFd( + int fd, /* File descriptor to write to */ + i64 iOff, /* File offset to begin writing at */ + const void *pBuf, /* Copy data from this buffer to the file */ + int nBuf, /* Size of buffer pBuf in bytes */ + int *piErrno /* OUT: Error number if error occurs */ +){ + int rc = 0; /* Value returned by system call */ + + assert( nBuf==(nBuf&0x1ffff) ); + assert( fd>2 ); + assert( piErrno!=0 ); + nBuf &= 0x1ffff; + TIMER_START; + +#if defined(USE_PREAD) + do{ rc = (int)osPwrite(fd, pBuf, nBuf, iOff); }while( rc<0 && errno==EINTR ); +#elif defined(USE_PREAD64) + do{ rc = (int)osPwrite64(fd, pBuf, nBuf, iOff);}while( rc<0 && errno==EINTR); +#else + do{ + i64 iSeek = lseek(fd, iOff, SEEK_SET); + SimulateIOError( iSeek = -1 ); + if( iSeek<0 ){ + rc = -1; + break; + } + rc = osWrite(fd, pBuf, nBuf); + }while( rc<0 && errno==EINTR ); +#endif + + TIMER_END; + OSTRACE(("WRITE %-3d %5d %7lld %llu\n", fd, rc, iOff, TIMER_ELAPSED)); + + if( rc<0 ) *piErrno = errno; + return rc; +} + + +/* +** Seek to the offset in id->offset then read cnt bytes into pBuf. +** Return the number of bytes actually read. Update the offset. +** +** To avoid stomping the errno value on a failed write the lastErrno value +** is set before returning. +*/ +static int seekAndWrite(unixFile *id, i64 offset, const void *pBuf, int cnt){ + return seekAndWriteFd(id->h, offset, pBuf, cnt, &id->lastErrno); +} + + +/* +** Write data from a buffer into a file. Return SQLITE_OK on success +** or some other error code on failure. +*/ +static int unixWrite( + sqlite3_file *id, + const void *pBuf, + int amt, + sqlite3_int64 offset +){ + unixFile *pFile = (unixFile*)id; + int wrote = 0; + assert( id ); + assert( amt>0 ); + + /* If this is a database file (not a journal, master-journal or temp + ** file), the bytes in the locking range should never be read or written. */ +#if 0 + assert( pFile->pUnused==0 + || offset>=PENDING_BYTE+512 + || offset+amt<=PENDING_BYTE + ); +#endif + +#ifdef SQLITE_DEBUG + /* If we are doing a normal write to a database file (as opposed to + ** doing a hot-journal rollback or a write to some file other than a + ** normal database file) then record the fact that the database + ** has changed. If the transaction counter is modified, record that + ** fact too. + */ + if( pFile->inNormalWrite ){ + pFile->dbUpdate = 1; /* The database has been modified */ + if( offset<=24 && offset+amt>=27 ){ + int rc; + char oldCntr[4]; + SimulateIOErrorBenign(1); + rc = seekAndRead(pFile, 24, oldCntr, 4); + SimulateIOErrorBenign(0); + if( rc!=4 || memcmp(oldCntr, &((char*)pBuf)[24-offset], 4)!=0 ){ + pFile->transCntrChng = 1; /* The transaction counter has changed */ + } + } + } +#endif + +#if defined(SQLITE_MMAP_READWRITE) && SQLITE_MAX_MMAP_SIZE>0 + /* Deal with as much of this write request as possible by transfering + ** data from the memory mapping using memcpy(). */ + if( offsetmmapSize ){ + if( offset+amt <= pFile->mmapSize ){ + memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, amt); + return SQLITE_OK; + }else{ + int nCopy = pFile->mmapSize - offset; + memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, nCopy); + pBuf = &((u8 *)pBuf)[nCopy]; + amt -= nCopy; + offset += nCopy; + } + } +#endif + + while( (wrote = seekAndWrite(pFile, offset, pBuf, amt))0 ){ + amt -= wrote; + offset += wrote; + pBuf = &((char*)pBuf)[wrote]; + } + SimulateIOError(( wrote=(-1), amt=1 )); + SimulateDiskfullError(( wrote=0, amt=1 )); + + if( amt>wrote ){ + if( wrote<0 && pFile->lastErrno!=ENOSPC ){ + /* lastErrno set by seekAndWrite */ + return SQLITE_IOERR_WRITE; + }else{ + storeLastErrno(pFile, 0); /* not a system error */ + return SQLITE_FULL; + } + } + + return SQLITE_OK; +} + +#ifdef SQLITE_TEST +/* +** Count the number of fullsyncs and normal syncs. This is used to test +** that syncs and fullsyncs are occurring at the right times. +*/ +SQLITE_API int sqlite3_sync_count = 0; +SQLITE_API int sqlite3_fullsync_count = 0; +#endif + +/* +** We do not trust systems to provide a working fdatasync(). Some do. +** Others do no. To be safe, we will stick with the (slightly slower) +** fsync(). If you know that your system does support fdatasync() correctly, +** then simply compile with -Dfdatasync=fdatasync or -DHAVE_FDATASYNC +*/ +#if !defined(fdatasync) && !HAVE_FDATASYNC +# define fdatasync fsync +#endif + +/* +** Define HAVE_FULLFSYNC to 0 or 1 depending on whether or not +** the F_FULLFSYNC macro is defined. F_FULLFSYNC is currently +** only available on Mac OS X. But that could change. +*/ +#ifdef F_FULLFSYNC +# define HAVE_FULLFSYNC 1 +#else +# define HAVE_FULLFSYNC 0 +#endif + + +/* +** The fsync() system call does not work as advertised on many +** unix systems. The following procedure is an attempt to make +** it work better. +** +** The SQLITE_NO_SYNC macro disables all fsync()s. This is useful +** for testing when we want to run through the test suite quickly. +** You are strongly advised *not* to deploy with SQLITE_NO_SYNC +** enabled, however, since with SQLITE_NO_SYNC enabled, an OS crash +** or power failure will likely corrupt the database file. +** +** SQLite sets the dataOnly flag if the size of the file is unchanged. +** The idea behind dataOnly is that it should only write the file content +** to disk, not the inode. We only set dataOnly if the file size is +** unchanged since the file size is part of the inode. However, +** Ted Ts'o tells us that fdatasync() will also write the inode if the +** file size has changed. The only real difference between fdatasync() +** and fsync(), Ted tells us, is that fdatasync() will not flush the +** inode if the mtime or owner or other inode attributes have changed. +** We only care about the file size, not the other file attributes, so +** as far as SQLite is concerned, an fdatasync() is always adequate. +** So, we always use fdatasync() if it is available, regardless of +** the value of the dataOnly flag. +*/ +static int full_fsync(int fd, int fullSync, int dataOnly){ + int rc; + + /* The following "ifdef/elif/else/" block has the same structure as + ** the one below. It is replicated here solely to avoid cluttering + ** up the real code with the UNUSED_PARAMETER() macros. + */ +#ifdef SQLITE_NO_SYNC + UNUSED_PARAMETER(fd); + UNUSED_PARAMETER(fullSync); + UNUSED_PARAMETER(dataOnly); +#elif HAVE_FULLFSYNC + UNUSED_PARAMETER(dataOnly); +#else + UNUSED_PARAMETER(fullSync); + UNUSED_PARAMETER(dataOnly); +#endif + + /* Record the number of times that we do a normal fsync() and + ** FULLSYNC. This is used during testing to verify that this procedure + ** gets called with the correct arguments. + */ +#ifdef SQLITE_TEST + if( fullSync ) sqlite3_fullsync_count++; + sqlite3_sync_count++; +#endif + + /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a + ** no-op. But go ahead and call fstat() to validate the file + ** descriptor as we need a method to provoke a failure during + ** coverate testing. + */ +#ifdef SQLITE_NO_SYNC + { + struct stat buf; + rc = osFstat(fd, &buf); + } +#elif HAVE_FULLFSYNC + if( fullSync ){ + rc = osFcntl(fd, F_FULLFSYNC, 0); + }else{ + rc = 1; + } + /* If the FULLFSYNC failed, fall back to attempting an fsync(). + ** It shouldn't be possible for fullfsync to fail on the local + ** file system (on OSX), so failure indicates that FULLFSYNC + ** isn't supported for this file system. So, attempt an fsync + ** and (for now) ignore the overhead of a superfluous fcntl call. + ** It'd be better to detect fullfsync support once and avoid + ** the fcntl call every time sync is called. + */ + if( rc ) rc = fsync(fd); + +#elif defined(__APPLE__) + /* fdatasync() on HFS+ doesn't yet flush the file size if it changed correctly + ** so currently we default to the macro that redefines fdatasync to fsync + */ + rc = fsync(fd); +#else + rc = fdatasync(fd); +#if OS_VXWORKS + if( rc==-1 && errno==ENOTSUP ){ + rc = fsync(fd); + } +#endif /* OS_VXWORKS */ +#endif /* ifdef SQLITE_NO_SYNC elif HAVE_FULLFSYNC */ + + if( OS_VXWORKS && rc!= -1 ){ + rc = 0; + } + return rc; +} + +/* +** Open a file descriptor to the directory containing file zFilename. +** If successful, *pFd is set to the opened file descriptor and +** SQLITE_OK is returned. If an error occurs, either SQLITE_NOMEM +** or SQLITE_CANTOPEN is returned and *pFd is set to an undefined +** value. +** +** The directory file descriptor is used for only one thing - to +** fsync() a directory to make sure file creation and deletion events +** are flushed to disk. Such fsyncs are not needed on newer +** journaling filesystems, but are required on older filesystems. +** +** This routine can be overridden using the xSetSysCall interface. +** The ability to override this routine was added in support of the +** chromium sandbox. Opening a directory is a security risk (we are +** told) so making it overrideable allows the chromium sandbox to +** replace this routine with a harmless no-op. To make this routine +** a no-op, replace it with a stub that returns SQLITE_OK but leaves +** *pFd set to a negative number. +** +** If SQLITE_OK is returned, the caller is responsible for closing +** the file descriptor *pFd using close(). +*/ +static int openDirectory(const char *zFilename, int *pFd){ + int ii; + int fd = -1; + char zDirname[MAX_PATHNAME+1]; + + sqlite3_snprintf(MAX_PATHNAME, zDirname, "%s", zFilename); + for(ii=(int)strlen(zDirname); ii>0 && zDirname[ii]!='/'; ii--); + if( ii>0 ){ + zDirname[ii] = '\0'; + }else{ + if( zDirname[0]!='/' ) zDirname[0] = '.'; + zDirname[1] = 0; + } + fd = robust_open(zDirname, O_RDONLY|O_BINARY, 0); + if( fd>=0 ){ + OSTRACE(("OPENDIR %-3d %s\n", fd, zDirname)); + } + *pFd = fd; + if( fd>=0 ) return SQLITE_OK; + return unixLogError(SQLITE_CANTOPEN_BKPT, "openDirectory", zDirname); +} + +/* +** Make sure all writes to a particular file are committed to disk. +** +** If dataOnly==0 then both the file itself and its metadata (file +** size, access time, etc) are synced. If dataOnly!=0 then only the +** file data is synced. +** +** Under Unix, also make sure that the directory entry for the file +** has been created by fsync-ing the directory that contains the file. +** If we do not do this and we encounter a power failure, the directory +** entry for the journal might not exist after we reboot. The next +** SQLite to access the file will not know that the journal exists (because +** the directory entry for the journal was never created) and the transaction +** will not roll back - possibly leading to database corruption. +*/ +static int unixSync(sqlite3_file *id, int flags){ + int rc; + unixFile *pFile = (unixFile*)id; + + int isDataOnly = (flags&SQLITE_SYNC_DATAONLY); + int isFullsync = (flags&0x0F)==SQLITE_SYNC_FULL; + + /* Check that one of SQLITE_SYNC_NORMAL or FULL was passed */ + assert((flags&0x0F)==SQLITE_SYNC_NORMAL + || (flags&0x0F)==SQLITE_SYNC_FULL + ); + + /* Unix cannot, but some systems may return SQLITE_FULL from here. This + ** line is to test that doing so does not cause any problems. + */ + SimulateDiskfullError( return SQLITE_FULL ); + + assert( pFile ); + OSTRACE(("SYNC %-3d\n", pFile->h)); + rc = full_fsync(pFile->h, isFullsync, isDataOnly); + SimulateIOError( rc=1 ); + if( rc ){ + storeLastErrno(pFile, errno); + return unixLogError(SQLITE_IOERR_FSYNC, "full_fsync", pFile->zPath); + } + + /* Also fsync the directory containing the file if the DIRSYNC flag + ** is set. This is a one-time occurrence. Many systems (examples: AIX) + ** are unable to fsync a directory, so ignore errors on the fsync. + */ + if( pFile->ctrlFlags & UNIXFILE_DIRSYNC ){ + int dirfd; + OSTRACE(("DIRSYNC %s (have_fullfsync=%d fullsync=%d)\n", pFile->zPath, + HAVE_FULLFSYNC, isFullsync)); + rc = osOpenDirectory(pFile->zPath, &dirfd); + if( rc==SQLITE_OK ){ + full_fsync(dirfd, 0, 0); + robust_close(pFile, dirfd, __LINE__); + }else{ + assert( rc==SQLITE_CANTOPEN ); + rc = SQLITE_OK; + } + pFile->ctrlFlags &= ~UNIXFILE_DIRSYNC; + } + return rc; +} + +/* +** Truncate an open file to a specified size +*/ +static int unixTruncate(sqlite3_file *id, i64 nByte){ + unixFile *pFile = (unixFile *)id; + int rc; + assert( pFile ); + SimulateIOError( return SQLITE_IOERR_TRUNCATE ); + + /* If the user has configured a chunk-size for this file, truncate the + ** file so that it consists of an integer number of chunks (i.e. the + ** actual file size after the operation may be larger than the requested + ** size). + */ + if( pFile->szChunk>0 ){ + nByte = ((nByte + pFile->szChunk - 1)/pFile->szChunk) * pFile->szChunk; + } + + rc = robust_ftruncate(pFile->h, nByte); + if( rc ){ + storeLastErrno(pFile, errno); + return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath); + }else{ +#ifdef SQLITE_DEBUG + /* If we are doing a normal write to a database file (as opposed to + ** doing a hot-journal rollback or a write to some file other than a + ** normal database file) and we truncate the file to zero length, + ** that effectively updates the change counter. This might happen + ** when restoring a database using the backup API from a zero-length + ** source. + */ + if( pFile->inNormalWrite && nByte==0 ){ + pFile->transCntrChng = 1; + } +#endif + +#if SQLITE_MAX_MMAP_SIZE>0 + /* If the file was just truncated to a size smaller than the currently + ** mapped region, reduce the effective mapping size as well. SQLite will + ** use read() and write() to access data beyond this point from now on. + */ + if( nBytemmapSize ){ + pFile->mmapSize = nByte; + } +#endif + + return SQLITE_OK; + } +} + +/* +** Determine the current size of a file in bytes +*/ +static int unixFileSize(sqlite3_file *id, i64 *pSize){ + int rc; + struct stat buf; + assert( id ); + rc = osFstat(((unixFile*)id)->h, &buf); + SimulateIOError( rc=1 ); + if( rc!=0 ){ + storeLastErrno((unixFile*)id, errno); + return SQLITE_IOERR_FSTAT; + } + *pSize = buf.st_size; + + /* When opening a zero-size database, the findInodeInfo() procedure + ** writes a single byte into that file in order to work around a bug + ** in the OS-X msdos filesystem. In order to avoid problems with upper + ** layers, we need to report this file size as zero even though it is + ** really 1. Ticket #3260. + */ + if( *pSize==1 ) *pSize = 0; + + + return SQLITE_OK; +} + +#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) +/* +** Handler for proxy-locking file-control verbs. Defined below in the +** proxying locking division. +*/ +static int proxyFileControl(sqlite3_file*,int,void*); +#endif + +/* +** This function is called to handle the SQLITE_FCNTL_SIZE_HINT +** file-control operation. Enlarge the database to nBytes in size +** (rounded up to the next chunk-size). If the database is already +** nBytes or larger, this routine is a no-op. +*/ +static int fcntlSizeHint(unixFile *pFile, i64 nByte){ + if( pFile->szChunk>0 ){ + i64 nSize; /* Required file size */ + struct stat buf; /* Used to hold return values of fstat() */ + + if( osFstat(pFile->h, &buf) ){ + return SQLITE_IOERR_FSTAT; + } + + nSize = ((nByte+pFile->szChunk-1) / pFile->szChunk) * pFile->szChunk; + if( nSize>(i64)buf.st_size ){ + +#if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE + /* The code below is handling the return value of osFallocate() + ** correctly. posix_fallocate() is defined to "returns zero on success, + ** or an error number on failure". See the manpage for details. */ + int err; + do{ + err = osFallocate(pFile->h, buf.st_size, nSize-buf.st_size); + }while( err==EINTR ); + if( err ) return SQLITE_IOERR_WRITE; +#else + /* If the OS does not have posix_fallocate(), fake it. Write a + ** single byte to the last byte in each block that falls entirely + ** within the extended region. Then, if required, a single byte + ** at offset (nSize-1), to set the size of the file correctly. + ** This is a similar technique to that used by glibc on systems + ** that do not have a real fallocate() call. + */ + int nBlk = buf.st_blksize; /* File-system block size */ + int nWrite = 0; /* Number of bytes written by seekAndWrite */ + i64 iWrite; /* Next offset to write to */ + + iWrite = (buf.st_size/nBlk)*nBlk + nBlk - 1; + assert( iWrite>=buf.st_size ); + assert( ((iWrite+1)%nBlk)==0 ); + for(/*no-op*/; iWrite=nSize ) iWrite = nSize - 1; + nWrite = seekAndWrite(pFile, iWrite, "", 1); + if( nWrite!=1 ) return SQLITE_IOERR_WRITE; + } +#endif + } + } + +#if SQLITE_MAX_MMAP_SIZE>0 + if( pFile->mmapSizeMax>0 && nByte>pFile->mmapSize ){ + int rc; + if( pFile->szChunk<=0 ){ + if( robust_ftruncate(pFile->h, nByte) ){ + storeLastErrno(pFile, errno); + return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath); + } + } + + rc = unixMapfile(pFile, nByte); + return rc; + } +#endif + + return SQLITE_OK; +} + +/* +** If *pArg is initially negative then this is a query. Set *pArg to +** 1 or 0 depending on whether or not bit mask of pFile->ctrlFlags is set. +** +** If *pArg is 0 or 1, then clear or set the mask bit of pFile->ctrlFlags. +*/ +static void unixModeBit(unixFile *pFile, unsigned char mask, int *pArg){ + if( *pArg<0 ){ + *pArg = (pFile->ctrlFlags & mask)!=0; + }else if( (*pArg)==0 ){ + pFile->ctrlFlags &= ~mask; + }else{ + pFile->ctrlFlags |= mask; + } +} + +/* Forward declaration */ +static int unixGetTempname(int nBuf, char *zBuf); + +/* +** Information and control of an open file handle. +*/ +static int unixFileControl(sqlite3_file *id, int op, void *pArg){ + unixFile *pFile = (unixFile*)id; + switch( op ){ + case SQLITE_FCNTL_LOCKSTATE: { + *(int*)pArg = pFile->eFileLock; + return SQLITE_OK; + } + case SQLITE_FCNTL_LAST_ERRNO: { + *(int*)pArg = pFile->lastErrno; + return SQLITE_OK; + } + case SQLITE_FCNTL_CHUNK_SIZE: { + pFile->szChunk = *(int *)pArg; + return SQLITE_OK; + } + case SQLITE_FCNTL_SIZE_HINT: { + int rc; + SimulateIOErrorBenign(1); + rc = fcntlSizeHint(pFile, *(i64 *)pArg); + SimulateIOErrorBenign(0); + return rc; + } + case SQLITE_FCNTL_PERSIST_WAL: { + unixModeBit(pFile, UNIXFILE_PERSIST_WAL, (int*)pArg); + return SQLITE_OK; + } + case SQLITE_FCNTL_POWERSAFE_OVERWRITE: { + unixModeBit(pFile, UNIXFILE_PSOW, (int*)pArg); + return SQLITE_OK; + } + case SQLITE_FCNTL_VFSNAME: { + *(char**)pArg = sqlite3_mprintf("%s", pFile->pVfs->zName); + return SQLITE_OK; + } + case SQLITE_FCNTL_TEMPFILENAME: { + char *zTFile = sqlite3_malloc64( pFile->pVfs->mxPathname ); + if( zTFile ){ + unixGetTempname(pFile->pVfs->mxPathname, zTFile); + *(char**)pArg = zTFile; + } + return SQLITE_OK; + } + case SQLITE_FCNTL_HAS_MOVED: { + *(int*)pArg = fileHasMoved(pFile); + return SQLITE_OK; + } +#if SQLITE_MAX_MMAP_SIZE>0 + case SQLITE_FCNTL_MMAP_SIZE: { + i64 newLimit = *(i64*)pArg; + int rc = SQLITE_OK; + if( newLimit>sqlite3GlobalConfig.mxMmap ){ + newLimit = sqlite3GlobalConfig.mxMmap; + } + *(i64*)pArg = pFile->mmapSizeMax; + if( newLimit>=0 && newLimit!=pFile->mmapSizeMax && pFile->nFetchOut==0 ){ + pFile->mmapSizeMax = newLimit; + if( pFile->mmapSize>0 ){ + unixUnmapfile(pFile); + rc = unixMapfile(pFile, -1); + } + } + return rc; + } +#endif +#ifdef SQLITE_DEBUG + /* The pager calls this method to signal that it has done + ** a rollback and that the database is therefore unchanged and + ** it hence it is OK for the transaction change counter to be + ** unchanged. + */ + case SQLITE_FCNTL_DB_UNCHANGED: { + ((unixFile*)id)->dbUpdate = 0; + return SQLITE_OK; + } +#endif +#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) + case SQLITE_FCNTL_SET_LOCKPROXYFILE: + case SQLITE_FCNTL_GET_LOCKPROXYFILE: { + return proxyFileControl(id,op,pArg); + } +#endif /* SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) */ + } + return SQLITE_NOTFOUND; +} + +/* +** Return the sector size in bytes of the underlying block device for +** the specified file. This is almost always 512 bytes, but may be +** larger for some devices. +** +** SQLite code assumes this function cannot fail. It also assumes that +** if two files are created in the same file-system directory (i.e. +** a database and its journal file) that the sector size will be the +** same for both. +*/ +#ifndef __QNXNTO__ +static int unixSectorSize(sqlite3_file *NotUsed){ + UNUSED_PARAMETER(NotUsed); + return SQLITE_DEFAULT_SECTOR_SIZE; +} +#endif + +/* +** The following version of unixSectorSize() is optimized for QNX. +*/ +#ifdef __QNXNTO__ +#include +#include +static int unixSectorSize(sqlite3_file *id){ + unixFile *pFile = (unixFile*)id; + if( pFile->sectorSize == 0 ){ + struct statvfs fsInfo; + + /* Set defaults for non-supported filesystems */ + pFile->sectorSize = SQLITE_DEFAULT_SECTOR_SIZE; + pFile->deviceCharacteristics = 0; + if( fstatvfs(pFile->h, &fsInfo) == -1 ) { + return pFile->sectorSize; + } + + if( !strcmp(fsInfo.f_basetype, "tmp") ) { + pFile->sectorSize = fsInfo.f_bsize; + pFile->deviceCharacteristics = + SQLITE_IOCAP_ATOMIC4K | /* All ram filesystem writes are atomic */ + SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until + ** the write succeeds */ + SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind + ** so it is ordered */ + 0; + }else if( strstr(fsInfo.f_basetype, "etfs") ){ + pFile->sectorSize = fsInfo.f_bsize; + pFile->deviceCharacteristics = + /* etfs cluster size writes are atomic */ + (pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) | + SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until + ** the write succeeds */ + SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind + ** so it is ordered */ + 0; + }else if( !strcmp(fsInfo.f_basetype, "qnx6") ){ + pFile->sectorSize = fsInfo.f_bsize; + pFile->deviceCharacteristics = + SQLITE_IOCAP_ATOMIC | /* All filesystem writes are atomic */ + SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until + ** the write succeeds */ + SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind + ** so it is ordered */ + 0; + }else if( !strcmp(fsInfo.f_basetype, "qnx4") ){ + pFile->sectorSize = fsInfo.f_bsize; + pFile->deviceCharacteristics = + /* full bitset of atomics from max sector size and smaller */ + ((pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) << 1) - 2 | + SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind + ** so it is ordered */ + 0; + }else if( strstr(fsInfo.f_basetype, "dos") ){ + pFile->sectorSize = fsInfo.f_bsize; + pFile->deviceCharacteristics = + /* full bitset of atomics from max sector size and smaller */ + ((pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) << 1) - 2 | + SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind + ** so it is ordered */ + 0; + }else{ + pFile->deviceCharacteristics = + SQLITE_IOCAP_ATOMIC512 | /* blocks are atomic */ + SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until + ** the write succeeds */ + 0; + } + } + /* Last chance verification. If the sector size isn't a multiple of 512 + ** then it isn't valid.*/ + if( pFile->sectorSize % 512 != 0 ){ + pFile->deviceCharacteristics = 0; + pFile->sectorSize = SQLITE_DEFAULT_SECTOR_SIZE; + } + return pFile->sectorSize; +} +#endif /* __QNXNTO__ */ + +/* +** Return the device characteristics for the file. +** +** This VFS is set up to return SQLITE_IOCAP_POWERSAFE_OVERWRITE by default. +** However, that choice is controversial since technically the underlying +** file system does not always provide powersafe overwrites. (In other +** words, after a power-loss event, parts of the file that were never +** written might end up being altered.) However, non-PSOW behavior is very, +** very rare. And asserting PSOW makes a large reduction in the amount +** of required I/O for journaling, since a lot of padding is eliminated. +** Hence, while POWERSAFE_OVERWRITE is on by default, there is a file-control +** available to turn it off and URI query parameter available to turn it off. +*/ +static int unixDeviceCharacteristics(sqlite3_file *id){ + unixFile *p = (unixFile*)id; + int rc = 0; +#ifdef __QNXNTO__ + if( p->sectorSize==0 ) unixSectorSize(id); + rc = p->deviceCharacteristics; +#endif + if( p->ctrlFlags & UNIXFILE_PSOW ){ + rc |= SQLITE_IOCAP_POWERSAFE_OVERWRITE; + } + return rc; +} + +#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 + +/* +** Return the system page size. +** +** This function should not be called directly by other code in this file. +** Instead, it should be called via macro osGetpagesize(). +*/ +static int unixGetpagesize(void){ +#if OS_VXWORKS + return 1024; +#elif defined(_BSD_SOURCE) + return getpagesize(); +#else + return (int)sysconf(_SC_PAGESIZE); +#endif +} + +#endif /* !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 */ + +#ifndef SQLITE_OMIT_WAL + +/* +** Object used to represent an shared memory buffer. +** +** When multiple threads all reference the same wal-index, each thread +** has its own unixShm object, but they all point to a single instance +** of this unixShmNode object. In other words, each wal-index is opened +** only once per process. +** +** Each unixShmNode object is connected to a single unixInodeInfo object. +** We could coalesce this object into unixInodeInfo, but that would mean +** every open file that does not use shared memory (in other words, most +** open files) would have to carry around this extra information. So +** the unixInodeInfo object contains a pointer to this unixShmNode object +** and the unixShmNode object is created only when needed. +** +** unixMutexHeld() must be true when creating or destroying +** this object or while reading or writing the following fields: +** +** nRef +** +** The following fields are read-only after the object is created: +** +** fid +** zFilename +** +** Either unixShmNode.mutex must be held or unixShmNode.nRef==0 and +** unixMutexHeld() is true when reading or writing any other field +** in this structure. +*/ +struct unixShmNode { + unixInodeInfo *pInode; /* unixInodeInfo that owns this SHM node */ + sqlite3_mutex *mutex; /* Mutex to access this object */ + char *zFilename; /* Name of the mmapped file */ + int h; /* Open file descriptor */ + int szRegion; /* Size of shared-memory regions */ + u16 nRegion; /* Size of array apRegion */ + u8 isReadonly; /* True if read-only */ + char **apRegion; /* Array of mapped shared-memory regions */ + int nRef; /* Number of unixShm objects pointing to this */ + unixShm *pFirst; /* All unixShm objects pointing to this */ +#ifdef SQLITE_DEBUG + u8 exclMask; /* Mask of exclusive locks held */ + u8 sharedMask; /* Mask of shared locks held */ + u8 nextShmId; /* Next available unixShm.id value */ +#endif +}; + +/* +** Structure used internally by this VFS to record the state of an +** open shared memory connection. +** +** The following fields are initialized when this object is created and +** are read-only thereafter: +** +** unixShm.pFile +** unixShm.id +** +** All other fields are read/write. The unixShm.pFile->mutex must be held +** while accessing any read/write fields. +*/ +struct unixShm { + unixShmNode *pShmNode; /* The underlying unixShmNode object */ + unixShm *pNext; /* Next unixShm with the same unixShmNode */ + u8 hasMutex; /* True if holding the unixShmNode mutex */ + u8 id; /* Id of this connection within its unixShmNode */ + u16 sharedMask; /* Mask of shared locks held */ + u16 exclMask; /* Mask of exclusive locks held */ +}; + +/* +** Constants used for locking +*/ +#define UNIX_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ +#define UNIX_SHM_DMS (UNIX_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ + +/* +** Apply posix advisory locks for all bytes from ofst through ofst+n-1. +** +** Locks block if the mask is exactly UNIX_SHM_C and are non-blocking +** otherwise. +*/ +static int unixShmSystemLock( + unixFile *pFile, /* Open connection to the WAL file */ + int lockType, /* F_UNLCK, F_RDLCK, or F_WRLCK */ + int ofst, /* First byte of the locking range */ + int n /* Number of bytes to lock */ +){ + unixShmNode *pShmNode; /* Apply locks to this open shared-memory segment */ + struct flock f; /* The posix advisory locking structure */ + int rc = SQLITE_OK; /* Result code form fcntl() */ + + /* Access to the unixShmNode object is serialized by the caller */ + pShmNode = pFile->pInode->pShmNode; + assert( sqlite3_mutex_held(pShmNode->mutex) || pShmNode->nRef==0 ); + + /* Shared locks never span more than one byte */ + assert( n==1 || lockType!=F_RDLCK ); + + /* Locks are within range */ + assert( n>=1 && n<=SQLITE_SHM_NLOCK ); + + if( pShmNode->h>=0 ){ + /* Initialize the locking parameters */ + memset(&f, 0, sizeof(f)); + f.l_type = lockType; + f.l_whence = SEEK_SET; + f.l_start = ofst; + f.l_len = n; + + rc = osFcntl(pShmNode->h, F_SETLK, &f); + rc = (rc!=(-1)) ? SQLITE_OK : SQLITE_BUSY; + } + + /* Update the global lock state and do debug tracing */ +#ifdef SQLITE_DEBUG + { u16 mask; + OSTRACE(("SHM-LOCK ")); + mask = ofst>31 ? 0xffff : (1<<(ofst+n)) - (1<exclMask &= ~mask; + pShmNode->sharedMask &= ~mask; + }else if( lockType==F_RDLCK ){ + OSTRACE(("read-lock %d ok", ofst)); + pShmNode->exclMask &= ~mask; + pShmNode->sharedMask |= mask; + }else{ + assert( lockType==F_WRLCK ); + OSTRACE(("write-lock %d ok", ofst)); + pShmNode->exclMask |= mask; + pShmNode->sharedMask &= ~mask; + } + }else{ + if( lockType==F_UNLCK ){ + OSTRACE(("unlock %d failed", ofst)); + }else if( lockType==F_RDLCK ){ + OSTRACE(("read-lock failed")); + }else{ + assert( lockType==F_WRLCK ); + OSTRACE(("write-lock %d failed", ofst)); + } + } + OSTRACE((" - afterwards %03x,%03x\n", + pShmNode->sharedMask, pShmNode->exclMask)); + } +#endif + + return rc; +} + +/* +** Return the minimum number of 32KB shm regions that should be mapped at +** a time, assuming that each mapping must be an integer multiple of the +** current system page-size. +** +** Usually, this is 1. The exception seems to be systems that are configured +** to use 64KB pages - in this case each mapping must cover at least two +** shm regions. +*/ +static int unixShmRegionPerMap(void){ + int shmsz = 32*1024; /* SHM region size */ + int pgsz = osGetpagesize(); /* System page size */ + assert( ((pgsz-1)&pgsz)==0 ); /* Page size must be a power of 2 */ + if( pgszpInode->pShmNode; + assert( unixMutexHeld() ); + if( p && ALWAYS(p->nRef==0) ){ + int nShmPerMap = unixShmRegionPerMap(); + int i; + assert( p->pInode==pFd->pInode ); + sqlite3_mutex_free(p->mutex); + for(i=0; inRegion; i+=nShmPerMap){ + if( p->h>=0 ){ + osMunmap(p->apRegion[i], p->szRegion); + }else{ + sqlite3_free(p->apRegion[i]); + } + } + sqlite3_free(p->apRegion); + if( p->h>=0 ){ + robust_close(pFd, p->h, __LINE__); + p->h = -1; + } + p->pInode->pShmNode = 0; + sqlite3_free(p); + } +} + +/* +** Open a shared-memory area associated with open database file pDbFd. +** This particular implementation uses mmapped files. +** +** The file used to implement shared-memory is in the same directory +** as the open database file and has the same name as the open database +** file with the "-shm" suffix added. For example, if the database file +** is "/home/user1/config.db" then the file that is created and mmapped +** for shared memory will be called "/home/user1/config.db-shm". +** +** Another approach to is to use files in /dev/shm or /dev/tmp or an +** some other tmpfs mount. But if a file in a different directory +** from the database file is used, then differing access permissions +** or a chroot() might cause two different processes on the same +** database to end up using different files for shared memory - +** meaning that their memory would not really be shared - resulting +** in database corruption. Nevertheless, this tmpfs file usage +** can be enabled at compile-time using -DSQLITE_SHM_DIRECTORY="/dev/shm" +** or the equivalent. The use of the SQLITE_SHM_DIRECTORY compile-time +** option results in an incompatible build of SQLite; builds of SQLite +** that with differing SQLITE_SHM_DIRECTORY settings attempt to use the +** same database file at the same time, database corruption will likely +** result. The SQLITE_SHM_DIRECTORY compile-time option is considered +** "unsupported" and may go away in a future SQLite release. +** +** When opening a new shared-memory file, if no other instances of that +** file are currently open, in this process or in other processes, then +** the file must be truncated to zero length or have its header cleared. +** +** If the original database file (pDbFd) is using the "unix-excl" VFS +** that means that an exclusive lock is held on the database file and +** that no other processes are able to read or write the database. In +** that case, we do not really need shared memory. No shared memory +** file is created. The shared memory will be simulated with heap memory. +*/ +static int unixOpenSharedMemory(unixFile *pDbFd){ + struct unixShm *p = 0; /* The connection to be opened */ + struct unixShmNode *pShmNode; /* The underlying mmapped file */ + int rc; /* Result code */ + unixInodeInfo *pInode; /* The inode of fd */ + char *zShmFilename; /* Name of the file used for SHM */ + int nShmFilename; /* Size of the SHM filename in bytes */ + + /* Allocate space for the new unixShm object. */ + p = sqlite3_malloc64( sizeof(*p) ); + if( p==0 ) return SQLITE_NOMEM_BKPT; + memset(p, 0, sizeof(*p)); + assert( pDbFd->pShm==0 ); + + /* Check to see if a unixShmNode object already exists. Reuse an existing + ** one if present. Create a new one if necessary. + */ + unixEnterMutex(); + pInode = pDbFd->pInode; + pShmNode = pInode->pShmNode; + if( pShmNode==0 ){ + struct stat sStat; /* fstat() info for database file */ +#ifndef SQLITE_SHM_DIRECTORY + const char *zBasePath = pDbFd->zPath; +#endif + + /* Call fstat() to figure out the permissions on the database file. If + ** a new *-shm file is created, an attempt will be made to create it + ** with the same permissions. + */ + if( osFstat(pDbFd->h, &sStat) ){ + rc = SQLITE_IOERR_FSTAT; + goto shm_open_err; + } + +#ifdef SQLITE_SHM_DIRECTORY + nShmFilename = sizeof(SQLITE_SHM_DIRECTORY) + 31; +#else + nShmFilename = 6 + (int)strlen(zBasePath); +#endif + pShmNode = sqlite3_malloc64( sizeof(*pShmNode) + nShmFilename ); + if( pShmNode==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto shm_open_err; + } + memset(pShmNode, 0, sizeof(*pShmNode)+nShmFilename); + zShmFilename = pShmNode->zFilename = (char*)&pShmNode[1]; +#ifdef SQLITE_SHM_DIRECTORY + sqlite3_snprintf(nShmFilename, zShmFilename, + SQLITE_SHM_DIRECTORY "/sqlite-shm-%x-%x", + (u32)sStat.st_ino, (u32)sStat.st_dev); +#else + sqlite3_snprintf(nShmFilename, zShmFilename, "%s-shm", zBasePath); + sqlite3FileSuffix3(pDbFd->zPath, zShmFilename); +#endif + pShmNode->h = -1; + pDbFd->pInode->pShmNode = pShmNode; + pShmNode->pInode = pDbFd->pInode; + if( sqlite3GlobalConfig.bCoreMutex ){ + pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + if( pShmNode->mutex==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto shm_open_err; + } + } + + if( pInode->bProcessLock==0 ){ + int openFlags = O_RDWR | O_CREAT; + if( sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0) ){ + openFlags = O_RDONLY; + pShmNode->isReadonly = 1; + } + pShmNode->h = robust_open(zShmFilename, openFlags, (sStat.st_mode&0777)); + if( pShmNode->h<0 ){ + rc = unixLogError(SQLITE_CANTOPEN_BKPT, "open", zShmFilename); + goto shm_open_err; + } + + /* If this process is running as root, make sure that the SHM file + ** is owned by the same user that owns the original database. Otherwise, + ** the original owner will not be able to connect. + */ + robustFchown(pShmNode->h, sStat.st_uid, sStat.st_gid); + + /* Check to see if another process is holding the dead-man switch. + ** If not, truncate the file to zero length. + */ + rc = SQLITE_OK; + if( unixShmSystemLock(pDbFd, F_WRLCK, UNIX_SHM_DMS, 1)==SQLITE_OK ){ + if( robust_ftruncate(pShmNode->h, 0) ){ + rc = unixLogError(SQLITE_IOERR_SHMOPEN, "ftruncate", zShmFilename); + } + } + if( rc==SQLITE_OK ){ + rc = unixShmSystemLock(pDbFd, F_RDLCK, UNIX_SHM_DMS, 1); + } + if( rc ) goto shm_open_err; + } + } + + /* Make the new connection a child of the unixShmNode */ + p->pShmNode = pShmNode; +#ifdef SQLITE_DEBUG + p->id = pShmNode->nextShmId++; +#endif + pShmNode->nRef++; + pDbFd->pShm = p; + unixLeaveMutex(); + + /* The reference count on pShmNode has already been incremented under + ** the cover of the unixEnterMutex() mutex and the pointer from the + ** new (struct unixShm) object to the pShmNode has been set. All that is + ** left to do is to link the new object into the linked list starting + ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex + ** mutex. + */ + sqlite3_mutex_enter(pShmNode->mutex); + p->pNext = pShmNode->pFirst; + pShmNode->pFirst = p; + sqlite3_mutex_leave(pShmNode->mutex); + return SQLITE_OK; + + /* Jump here on any error */ +shm_open_err: + unixShmPurge(pDbFd); /* This call frees pShmNode if required */ + sqlite3_free(p); + unixLeaveMutex(); + return rc; +} + +/* +** This function is called to obtain a pointer to region iRegion of the +** shared-memory associated with the database file fd. Shared-memory regions +** are numbered starting from zero. Each shared-memory region is szRegion +** bytes in size. +** +** If an error occurs, an error code is returned and *pp is set to NULL. +** +** Otherwise, if the bExtend parameter is 0 and the requested shared-memory +** region has not been allocated (by any client, including one running in a +** separate process), then *pp is set to NULL and SQLITE_OK returned. If +** bExtend is non-zero and the requested shared-memory region has not yet +** been allocated, it is allocated by this function. +** +** If the shared-memory region has already been allocated or is allocated by +** this call as described above, then it is mapped into this processes +** address space (if it is not already), *pp is set to point to the mapped +** memory and SQLITE_OK returned. +*/ +static int unixShmMap( + sqlite3_file *fd, /* Handle open on database file */ + int iRegion, /* Region to retrieve */ + int szRegion, /* Size of regions */ + int bExtend, /* True to extend file if necessary */ + void volatile **pp /* OUT: Mapped memory */ +){ + unixFile *pDbFd = (unixFile*)fd; + unixShm *p; + unixShmNode *pShmNode; + int rc = SQLITE_OK; + int nShmPerMap = unixShmRegionPerMap(); + int nReqRegion; + + /* If the shared-memory file has not yet been opened, open it now. */ + if( pDbFd->pShm==0 ){ + rc = unixOpenSharedMemory(pDbFd); + if( rc!=SQLITE_OK ) return rc; + } + + p = pDbFd->pShm; + pShmNode = p->pShmNode; + sqlite3_mutex_enter(pShmNode->mutex); + assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); + assert( pShmNode->pInode==pDbFd->pInode ); + assert( pShmNode->h>=0 || pDbFd->pInode->bProcessLock==1 ); + assert( pShmNode->h<0 || pDbFd->pInode->bProcessLock==0 ); + + /* Minimum number of regions required to be mapped. */ + nReqRegion = ((iRegion+nShmPerMap) / nShmPerMap) * nShmPerMap; + + if( pShmNode->nRegionszRegion = szRegion; + + if( pShmNode->h>=0 ){ + /* The requested region is not mapped into this processes address space. + ** Check to see if it has been allocated (i.e. if the wal-index file is + ** large enough to contain the requested region). + */ + if( osFstat(pShmNode->h, &sStat) ){ + rc = SQLITE_IOERR_SHMSIZE; + goto shmpage_out; + } + + if( sStat.st_sizeh, iPg*pgsz + pgsz-1, "", 1, &x)!=1 ){ + const char *zFile = pShmNode->zFilename; + rc = unixLogError(SQLITE_IOERR_SHMSIZE, "write", zFile); + goto shmpage_out; + } + } + } + } + } + + /* Map the requested memory region into this processes address space. */ + apNew = (char **)sqlite3_realloc( + pShmNode->apRegion, nReqRegion*sizeof(char *) + ); + if( !apNew ){ + rc = SQLITE_IOERR_NOMEM_BKPT; + goto shmpage_out; + } + pShmNode->apRegion = apNew; + while( pShmNode->nRegionh>=0 ){ + pMem = osMmap(0, nMap, + pShmNode->isReadonly ? PROT_READ : PROT_READ|PROT_WRITE, + MAP_SHARED, pShmNode->h, szRegion*(i64)pShmNode->nRegion + ); + if( pMem==MAP_FAILED ){ + rc = unixLogError(SQLITE_IOERR_SHMMAP, "mmap", pShmNode->zFilename); + goto shmpage_out; + } + }else{ + pMem = sqlite3_malloc64(szRegion); + if( pMem==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto shmpage_out; + } + memset(pMem, 0, szRegion); + } + + for(i=0; iapRegion[pShmNode->nRegion+i] = &((char*)pMem)[szRegion*i]; + } + pShmNode->nRegion += nShmPerMap; + } + } + +shmpage_out: + if( pShmNode->nRegion>iRegion ){ + *pp = pShmNode->apRegion[iRegion]; + }else{ + *pp = 0; + } + if( pShmNode->isReadonly && rc==SQLITE_OK ) rc = SQLITE_READONLY; + sqlite3_mutex_leave(pShmNode->mutex); + return rc; +} + +/* +** Change the lock state for a shared-memory segment. +** +** Note that the relationship between SHAREd and EXCLUSIVE locks is a little +** different here than in posix. In xShmLock(), one can go from unlocked +** to shared and back or from unlocked to exclusive and back. But one may +** not go from shared to exclusive or from exclusive to shared. +*/ +static int unixShmLock( + sqlite3_file *fd, /* Database file holding the shared memory */ + int ofst, /* First lock to acquire or release */ + int n, /* Number of locks to acquire or release */ + int flags /* What to do with the lock */ +){ + unixFile *pDbFd = (unixFile*)fd; /* Connection holding shared memory */ + unixShm *p = pDbFd->pShm; /* The shared memory being locked */ + unixShm *pX; /* For looping over all siblings */ + unixShmNode *pShmNode = p->pShmNode; /* The underlying file iNode */ + int rc = SQLITE_OK; /* Result code */ + u16 mask; /* Mask of locks to take or release */ + + assert( pShmNode==pDbFd->pInode->pShmNode ); + assert( pShmNode->pInode==pDbFd->pInode ); + assert( ofst>=0 && ofst+n<=SQLITE_SHM_NLOCK ); + assert( n>=1 ); + assert( flags==(SQLITE_SHM_LOCK | SQLITE_SHM_SHARED) + || flags==(SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE) + || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED) + || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) ); + assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 ); + assert( pShmNode->h>=0 || pDbFd->pInode->bProcessLock==1 ); + assert( pShmNode->h<0 || pDbFd->pInode->bProcessLock==0 ); + + mask = (1<<(ofst+n)) - (1<1 || mask==(1<mutex); + if( flags & SQLITE_SHM_UNLOCK ){ + u16 allMask = 0; /* Mask of locks held by siblings */ + + /* See if any siblings hold this same lock */ + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + if( pX==p ) continue; + assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 ); + allMask |= pX->sharedMask; + } + + /* Unlock the system-level locks */ + if( (mask & allMask)==0 ){ + rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n); + }else{ + rc = SQLITE_OK; + } + + /* Undo the local locks */ + if( rc==SQLITE_OK ){ + p->exclMask &= ~mask; + p->sharedMask &= ~mask; + } + }else if( flags & SQLITE_SHM_SHARED ){ + u16 allShared = 0; /* Union of locks held by connections other than "p" */ + + /* Find out which shared locks are already held by sibling connections. + ** If any sibling already holds an exclusive lock, go ahead and return + ** SQLITE_BUSY. + */ + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + if( (pX->exclMask & mask)!=0 ){ + rc = SQLITE_BUSY; + break; + } + allShared |= pX->sharedMask; + } + + /* Get shared locks at the system level, if necessary */ + if( rc==SQLITE_OK ){ + if( (allShared & mask)==0 ){ + rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n); + }else{ + rc = SQLITE_OK; + } + } + + /* Get the local shared locks */ + if( rc==SQLITE_OK ){ + p->sharedMask |= mask; + } + }else{ + /* Make sure no sibling connections hold locks that will block this + ** lock. If any do, return SQLITE_BUSY right away. + */ + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){ + rc = SQLITE_BUSY; + break; + } + } + + /* Get the exclusive locks at the system level. Then if successful + ** also mark the local connection as being locked. + */ + if( rc==SQLITE_OK ){ + rc = unixShmSystemLock(pDbFd, F_WRLCK, ofst+UNIX_SHM_BASE, n); + if( rc==SQLITE_OK ){ + assert( (p->sharedMask & mask)==0 ); + p->exclMask |= mask; + } + } + } + sqlite3_mutex_leave(pShmNode->mutex); + OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n", + p->id, osGetpid(0), p->sharedMask, p->exclMask)); + return rc; +} + +/* +** Implement a memory barrier or memory fence on shared memory. +** +** All loads and stores begun before the barrier must complete before +** any load or store begun after the barrier. +*/ +static void unixShmBarrier( + sqlite3_file *fd /* Database file holding the shared memory */ +){ + UNUSED_PARAMETER(fd); + sqlite3MemoryBarrier(); /* compiler-defined memory barrier */ + unixEnterMutex(); /* Also mutex, for redundancy */ + unixLeaveMutex(); +} + +/* +** Close a connection to shared-memory. Delete the underlying +** storage if deleteFlag is true. +** +** If there is no shared memory associated with the connection then this +** routine is a harmless no-op. +*/ +static int unixShmUnmap( + sqlite3_file *fd, /* The underlying database file */ + int deleteFlag /* Delete shared-memory if true */ +){ + unixShm *p; /* The connection to be closed */ + unixShmNode *pShmNode; /* The underlying shared-memory file */ + unixShm **pp; /* For looping over sibling connections */ + unixFile *pDbFd; /* The underlying database file */ + + pDbFd = (unixFile*)fd; + p = pDbFd->pShm; + if( p==0 ) return SQLITE_OK; + pShmNode = p->pShmNode; + + assert( pShmNode==pDbFd->pInode->pShmNode ); + assert( pShmNode->pInode==pDbFd->pInode ); + + /* Remove connection p from the set of connections associated + ** with pShmNode */ + sqlite3_mutex_enter(pShmNode->mutex); + for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){} + *pp = p->pNext; + + /* Free the connection p */ + sqlite3_free(p); + pDbFd->pShm = 0; + sqlite3_mutex_leave(pShmNode->mutex); + + /* If pShmNode->nRef has reached 0, then close the underlying + ** shared-memory file, too */ + unixEnterMutex(); + assert( pShmNode->nRef>0 ); + pShmNode->nRef--; + if( pShmNode->nRef==0 ){ + if( deleteFlag && pShmNode->h>=0 ){ + osUnlink(pShmNode->zFilename); + } + unixShmPurge(pDbFd); + } + unixLeaveMutex(); + + return SQLITE_OK; +} + + +#else +# define unixShmMap 0 +# define unixShmLock 0 +# define unixShmBarrier 0 +# define unixShmUnmap 0 +#endif /* #ifndef SQLITE_OMIT_WAL */ + +#if SQLITE_MAX_MMAP_SIZE>0 +/* +** If it is currently memory mapped, unmap file pFd. +*/ +static void unixUnmapfile(unixFile *pFd){ + assert( pFd->nFetchOut==0 ); + if( pFd->pMapRegion ){ + osMunmap(pFd->pMapRegion, pFd->mmapSizeActual); + pFd->pMapRegion = 0; + pFd->mmapSize = 0; + pFd->mmapSizeActual = 0; + } +} + +/* +** Attempt to set the size of the memory mapping maintained by file +** descriptor pFd to nNew bytes. Any existing mapping is discarded. +** +** If successful, this function sets the following variables: +** +** unixFile.pMapRegion +** unixFile.mmapSize +** unixFile.mmapSizeActual +** +** If unsuccessful, an error message is logged via sqlite3_log() and +** the three variables above are zeroed. In this case SQLite should +** continue accessing the database using the xRead() and xWrite() +** methods. +*/ +static void unixRemapfile( + unixFile *pFd, /* File descriptor object */ + i64 nNew /* Required mapping size */ +){ + const char *zErr = "mmap"; + int h = pFd->h; /* File descriptor open on db file */ + u8 *pOrig = (u8 *)pFd->pMapRegion; /* Pointer to current file mapping */ + i64 nOrig = pFd->mmapSizeActual; /* Size of pOrig region in bytes */ + u8 *pNew = 0; /* Location of new mapping */ + int flags = PROT_READ; /* Flags to pass to mmap() */ + + assert( pFd->nFetchOut==0 ); + assert( nNew>pFd->mmapSize ); + assert( nNew<=pFd->mmapSizeMax ); + assert( nNew>0 ); + assert( pFd->mmapSizeActual>=pFd->mmapSize ); + assert( MAP_FAILED!=0 ); + +#ifdef SQLITE_MMAP_READWRITE + if( (pFd->ctrlFlags & UNIXFILE_RDONLY)==0 ) flags |= PROT_WRITE; +#endif + + if( pOrig ){ +#if HAVE_MREMAP + i64 nReuse = pFd->mmapSize; +#else + const int szSyspage = osGetpagesize(); + i64 nReuse = (pFd->mmapSize & ~(szSyspage-1)); +#endif + u8 *pReq = &pOrig[nReuse]; + + /* Unmap any pages of the existing mapping that cannot be reused. */ + if( nReuse!=nOrig ){ + osMunmap(pReq, nOrig-nReuse); + } + +#if HAVE_MREMAP + pNew = osMremap(pOrig, nReuse, nNew, MREMAP_MAYMOVE); + zErr = "mremap"; +#else + pNew = osMmap(pReq, nNew-nReuse, flags, MAP_SHARED, h, nReuse); + if( pNew!=MAP_FAILED ){ + if( pNew!=pReq ){ + osMunmap(pNew, nNew - nReuse); + pNew = 0; + }else{ + pNew = pOrig; + } + } +#endif + + /* The attempt to extend the existing mapping failed. Free it. */ + if( pNew==MAP_FAILED || pNew==0 ){ + osMunmap(pOrig, nReuse); + } + } + + /* If pNew is still NULL, try to create an entirely new mapping. */ + if( pNew==0 ){ + pNew = osMmap(0, nNew, flags, MAP_SHARED, h, 0); + } + + if( pNew==MAP_FAILED ){ + pNew = 0; + nNew = 0; + unixLogError(SQLITE_OK, zErr, pFd->zPath); + + /* If the mmap() above failed, assume that all subsequent mmap() calls + ** will probably fail too. Fall back to using xRead/xWrite exclusively + ** in this case. */ + pFd->mmapSizeMax = 0; + } + pFd->pMapRegion = (void *)pNew; + pFd->mmapSize = pFd->mmapSizeActual = nNew; +} + +/* +** Memory map or remap the file opened by file-descriptor pFd (if the file +** is already mapped, the existing mapping is replaced by the new). Or, if +** there already exists a mapping for this file, and there are still +** outstanding xFetch() references to it, this function is a no-op. +** +** If parameter nByte is non-negative, then it is the requested size of +** the mapping to create. Otherwise, if nByte is less than zero, then the +** requested size is the size of the file on disk. The actual size of the +** created mapping is either the requested size or the value configured +** using SQLITE_FCNTL_MMAP_LIMIT, whichever is smaller. +** +** SQLITE_OK is returned if no error occurs (even if the mapping is not +** recreated as a result of outstanding references) or an SQLite error +** code otherwise. +*/ +static int unixMapfile(unixFile *pFd, i64 nMap){ + assert( nMap>=0 || pFd->nFetchOut==0 ); + assert( nMap>0 || (pFd->mmapSize==0 && pFd->pMapRegion==0) ); + if( pFd->nFetchOut>0 ) return SQLITE_OK; + + if( nMap<0 ){ + struct stat statbuf; /* Low-level file information */ + if( osFstat(pFd->h, &statbuf) ){ + return SQLITE_IOERR_FSTAT; + } + nMap = statbuf.st_size; + } + if( nMap>pFd->mmapSizeMax ){ + nMap = pFd->mmapSizeMax; + } + + assert( nMap>0 || (pFd->mmapSize==0 && pFd->pMapRegion==0) ); + if( nMap!=pFd->mmapSize ){ + unixRemapfile(pFd, nMap); + } + + return SQLITE_OK; +} +#endif /* SQLITE_MAX_MMAP_SIZE>0 */ + +/* +** If possible, return a pointer to a mapping of file fd starting at offset +** iOff. The mapping must be valid for at least nAmt bytes. +** +** If such a pointer can be obtained, store it in *pp and return SQLITE_OK. +** Or, if one cannot but no error occurs, set *pp to 0 and return SQLITE_OK. +** Finally, if an error does occur, return an SQLite error code. The final +** value of *pp is undefined in this case. +** +** If this function does return a pointer, the caller must eventually +** release the reference by calling unixUnfetch(). +*/ +static int unixFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ +#if SQLITE_MAX_MMAP_SIZE>0 + unixFile *pFd = (unixFile *)fd; /* The underlying database file */ +#endif + *pp = 0; + +#if SQLITE_MAX_MMAP_SIZE>0 + if( pFd->mmapSizeMax>0 ){ + if( pFd->pMapRegion==0 ){ + int rc = unixMapfile(pFd, -1); + if( rc!=SQLITE_OK ) return rc; + } + if( pFd->mmapSize >= iOff+nAmt ){ + *pp = &((u8 *)pFd->pMapRegion)[iOff]; + pFd->nFetchOut++; + } + } +#endif + return SQLITE_OK; +} + +/* +** If the third argument is non-NULL, then this function releases a +** reference obtained by an earlier call to unixFetch(). The second +** argument passed to this function must be the same as the corresponding +** argument that was passed to the unixFetch() invocation. +** +** Or, if the third argument is NULL, then this function is being called +** to inform the VFS layer that, according to POSIX, any existing mapping +** may now be invalid and should be unmapped. +*/ +static int unixUnfetch(sqlite3_file *fd, i64 iOff, void *p){ +#if SQLITE_MAX_MMAP_SIZE>0 + unixFile *pFd = (unixFile *)fd; /* The underlying database file */ + UNUSED_PARAMETER(iOff); + + /* If p==0 (unmap the entire file) then there must be no outstanding + ** xFetch references. Or, if p!=0 (meaning it is an xFetch reference), + ** then there must be at least one outstanding. */ + assert( (p==0)==(pFd->nFetchOut==0) ); + + /* If p!=0, it must match the iOff value. */ + assert( p==0 || p==&((u8 *)pFd->pMapRegion)[iOff] ); + + if( p ){ + pFd->nFetchOut--; + }else{ + unixUnmapfile(pFd); + } + + assert( pFd->nFetchOut>=0 ); +#else + UNUSED_PARAMETER(fd); + UNUSED_PARAMETER(p); + UNUSED_PARAMETER(iOff); +#endif + return SQLITE_OK; +} + +/* +** Here ends the implementation of all sqlite3_file methods. +** +********************** End sqlite3_file Methods ******************************* +******************************************************************************/ + +/* +** This division contains definitions of sqlite3_io_methods objects that +** implement various file locking strategies. It also contains definitions +** of "finder" functions. A finder-function is used to locate the appropriate +** sqlite3_io_methods object for a particular database file. The pAppData +** field of the sqlite3_vfs VFS objects are initialized to be pointers to +** the correct finder-function for that VFS. +** +** Most finder functions return a pointer to a fixed sqlite3_io_methods +** object. The only interesting finder-function is autolockIoFinder, which +** looks at the filesystem type and tries to guess the best locking +** strategy from that. +** +** For finder-function F, two objects are created: +** +** (1) The real finder-function named "FImpt()". +** +** (2) A constant pointer to this function named just "F". +** +** +** A pointer to the F pointer is used as the pAppData value for VFS +** objects. We have to do this instead of letting pAppData point +** directly at the finder-function since C90 rules prevent a void* +** from be cast into a function pointer. +** +** +** Each instance of this macro generates two objects: +** +** * A constant sqlite3_io_methods object call METHOD that has locking +** methods CLOSE, LOCK, UNLOCK, CKRESLOCK. +** +** * An I/O method finder function called FINDER that returns a pointer +** to the METHOD object in the previous bullet. +*/ +#define IOMETHODS(FINDER,METHOD,VERSION,CLOSE,LOCK,UNLOCK,CKLOCK,SHMMAP) \ +static const sqlite3_io_methods METHOD = { \ + VERSION, /* iVersion */ \ + CLOSE, /* xClose */ \ + unixRead, /* xRead */ \ + unixWrite, /* xWrite */ \ + unixTruncate, /* xTruncate */ \ + unixSync, /* xSync */ \ + unixFileSize, /* xFileSize */ \ + LOCK, /* xLock */ \ + UNLOCK, /* xUnlock */ \ + CKLOCK, /* xCheckReservedLock */ \ + unixFileControl, /* xFileControl */ \ + unixSectorSize, /* xSectorSize */ \ + unixDeviceCharacteristics, /* xDeviceCapabilities */ \ + SHMMAP, /* xShmMap */ \ + unixShmLock, /* xShmLock */ \ + unixShmBarrier, /* xShmBarrier */ \ + unixShmUnmap, /* xShmUnmap */ \ + unixFetch, /* xFetch */ \ + unixUnfetch, /* xUnfetch */ \ +}; \ +static const sqlite3_io_methods *FINDER##Impl(const char *z, unixFile *p){ \ + UNUSED_PARAMETER(z); UNUSED_PARAMETER(p); \ + return &METHOD; \ +} \ +static const sqlite3_io_methods *(*const FINDER)(const char*,unixFile *p) \ + = FINDER##Impl; + +/* +** Here are all of the sqlite3_io_methods objects for each of the +** locking strategies. Functions that return pointers to these methods +** are also created. +*/ +IOMETHODS( + posixIoFinder, /* Finder function name */ + posixIoMethods, /* sqlite3_io_methods object name */ + 3, /* shared memory and mmap are enabled */ + unixClose, /* xClose method */ + unixLock, /* xLock method */ + unixUnlock, /* xUnlock method */ + unixCheckReservedLock, /* xCheckReservedLock method */ + unixShmMap /* xShmMap method */ +) +IOMETHODS( + nolockIoFinder, /* Finder function name */ + nolockIoMethods, /* sqlite3_io_methods object name */ + 3, /* shared memory is disabled */ + nolockClose, /* xClose method */ + nolockLock, /* xLock method */ + nolockUnlock, /* xUnlock method */ + nolockCheckReservedLock, /* xCheckReservedLock method */ + 0 /* xShmMap method */ +) +IOMETHODS( + dotlockIoFinder, /* Finder function name */ + dotlockIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ + dotlockClose, /* xClose method */ + dotlockLock, /* xLock method */ + dotlockUnlock, /* xUnlock method */ + dotlockCheckReservedLock, /* xCheckReservedLock method */ + 0 /* xShmMap method */ +) + +#if SQLITE_ENABLE_LOCKING_STYLE +IOMETHODS( + flockIoFinder, /* Finder function name */ + flockIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ + flockClose, /* xClose method */ + flockLock, /* xLock method */ + flockUnlock, /* xUnlock method */ + flockCheckReservedLock, /* xCheckReservedLock method */ + 0 /* xShmMap method */ +) +#endif + +#if OS_VXWORKS +IOMETHODS( + semIoFinder, /* Finder function name */ + semIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ + semXClose, /* xClose method */ + semXLock, /* xLock method */ + semXUnlock, /* xUnlock method */ + semXCheckReservedLock, /* xCheckReservedLock method */ + 0 /* xShmMap method */ +) +#endif + +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE +IOMETHODS( + afpIoFinder, /* Finder function name */ + afpIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ + afpClose, /* xClose method */ + afpLock, /* xLock method */ + afpUnlock, /* xUnlock method */ + afpCheckReservedLock, /* xCheckReservedLock method */ + 0 /* xShmMap method */ +) +#endif + +/* +** The proxy locking method is a "super-method" in the sense that it +** opens secondary file descriptors for the conch and lock files and +** it uses proxy, dot-file, AFP, and flock() locking methods on those +** secondary files. For this reason, the division that implements +** proxy locking is located much further down in the file. But we need +** to go ahead and define the sqlite3_io_methods and finder function +** for proxy locking here. So we forward declare the I/O methods. +*/ +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE +static int proxyClose(sqlite3_file*); +static int proxyLock(sqlite3_file*, int); +static int proxyUnlock(sqlite3_file*, int); +static int proxyCheckReservedLock(sqlite3_file*, int*); +IOMETHODS( + proxyIoFinder, /* Finder function name */ + proxyIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ + proxyClose, /* xClose method */ + proxyLock, /* xLock method */ + proxyUnlock, /* xUnlock method */ + proxyCheckReservedLock, /* xCheckReservedLock method */ + 0 /* xShmMap method */ +) +#endif + +/* nfs lockd on OSX 10.3+ doesn't clear write locks when a read lock is set */ +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE +IOMETHODS( + nfsIoFinder, /* Finder function name */ + nfsIoMethods, /* sqlite3_io_methods object name */ + 1, /* shared memory is disabled */ + unixClose, /* xClose method */ + unixLock, /* xLock method */ + nfsUnlock, /* xUnlock method */ + unixCheckReservedLock, /* xCheckReservedLock method */ + 0 /* xShmMap method */ +) +#endif + +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE +/* +** This "finder" function attempts to determine the best locking strategy +** for the database file "filePath". It then returns the sqlite3_io_methods +** object that implements that strategy. +** +** This is for MacOSX only. +*/ +static const sqlite3_io_methods *autolockIoFinderImpl( + const char *filePath, /* name of the database file */ + unixFile *pNew /* open file object for the database file */ +){ + static const struct Mapping { + const char *zFilesystem; /* Filesystem type name */ + const sqlite3_io_methods *pMethods; /* Appropriate locking method */ + } aMap[] = { + { "hfs", &posixIoMethods }, + { "ufs", &posixIoMethods }, + { "afpfs", &afpIoMethods }, + { "smbfs", &afpIoMethods }, + { "webdav", &nolockIoMethods }, + { 0, 0 } + }; + int i; + struct statfs fsInfo; + struct flock lockInfo; + + if( !filePath ){ + /* If filePath==NULL that means we are dealing with a transient file + ** that does not need to be locked. */ + return &nolockIoMethods; + } + if( statfs(filePath, &fsInfo) != -1 ){ + if( fsInfo.f_flags & MNT_RDONLY ){ + return &nolockIoMethods; + } + for(i=0; aMap[i].zFilesystem; i++){ + if( strcmp(fsInfo.f_fstypename, aMap[i].zFilesystem)==0 ){ + return aMap[i].pMethods; + } + } + } + + /* Default case. Handles, amongst others, "nfs". + ** Test byte-range lock using fcntl(). If the call succeeds, + ** assume that the file-system supports POSIX style locks. + */ + lockInfo.l_len = 1; + lockInfo.l_start = 0; + lockInfo.l_whence = SEEK_SET; + lockInfo.l_type = F_RDLCK; + if( osFcntl(pNew->h, F_GETLK, &lockInfo)!=-1 ) { + if( strcmp(fsInfo.f_fstypename, "nfs")==0 ){ + return &nfsIoMethods; + } else { + return &posixIoMethods; + } + }else{ + return &dotlockIoMethods; + } +} +static const sqlite3_io_methods + *(*const autolockIoFinder)(const char*,unixFile*) = autolockIoFinderImpl; + +#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ + +#if OS_VXWORKS +/* +** This "finder" function for VxWorks checks to see if posix advisory +** locking works. If it does, then that is what is used. If it does not +** work, then fallback to named semaphore locking. +*/ +static const sqlite3_io_methods *vxworksIoFinderImpl( + const char *filePath, /* name of the database file */ + unixFile *pNew /* the open file object */ +){ + struct flock lockInfo; + + if( !filePath ){ + /* If filePath==NULL that means we are dealing with a transient file + ** that does not need to be locked. */ + return &nolockIoMethods; + } + + /* Test if fcntl() is supported and use POSIX style locks. + ** Otherwise fall back to the named semaphore method. + */ + lockInfo.l_len = 1; + lockInfo.l_start = 0; + lockInfo.l_whence = SEEK_SET; + lockInfo.l_type = F_RDLCK; + if( osFcntl(pNew->h, F_GETLK, &lockInfo)!=-1 ) { + return &posixIoMethods; + }else{ + return &semIoMethods; + } +} +static const sqlite3_io_methods + *(*const vxworksIoFinder)(const char*,unixFile*) = vxworksIoFinderImpl; + +#endif /* OS_VXWORKS */ + +/* +** An abstract type for a pointer to an IO method finder function: +*/ +typedef const sqlite3_io_methods *(*finder_type)(const char*,unixFile*); + + +/**************************************************************************** +**************************** sqlite3_vfs methods **************************** +** +** This division contains the implementation of methods on the +** sqlite3_vfs object. +*/ + +/* +** Initialize the contents of the unixFile structure pointed to by pId. +*/ +static int fillInUnixFile( + sqlite3_vfs *pVfs, /* Pointer to vfs object */ + int h, /* Open file descriptor of file being opened */ + sqlite3_file *pId, /* Write to the unixFile structure here */ + const char *zFilename, /* Name of the file being opened */ + int ctrlFlags /* Zero or more UNIXFILE_* values */ +){ + const sqlite3_io_methods *pLockingStyle; + unixFile *pNew = (unixFile *)pId; + int rc = SQLITE_OK; + + assert( pNew->pInode==NULL ); + + /* Usually the path zFilename should not be a relative pathname. The + ** exception is when opening the proxy "conch" file in builds that + ** include the special Apple locking styles. + */ +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE + assert( zFilename==0 || zFilename[0]=='/' + || pVfs->pAppData==(void*)&autolockIoFinder ); +#else + assert( zFilename==0 || zFilename[0]=='/' ); +#endif + + /* No locking occurs in temporary files */ + assert( zFilename!=0 || (ctrlFlags & UNIXFILE_NOLOCK)!=0 ); + + OSTRACE(("OPEN %-3d %s\n", h, zFilename)); + pNew->h = h; + pNew->pVfs = pVfs; + pNew->zPath = zFilename; + pNew->ctrlFlags = (u8)ctrlFlags; +#if SQLITE_MAX_MMAP_SIZE>0 + pNew->mmapSizeMax = sqlite3GlobalConfig.szMmap; +#endif + if( sqlite3_uri_boolean(((ctrlFlags & UNIXFILE_URI) ? zFilename : 0), + "psow", SQLITE_POWERSAFE_OVERWRITE) ){ + pNew->ctrlFlags |= UNIXFILE_PSOW; + } + if( strcmp(pVfs->zName,"unix-excl")==0 ){ + pNew->ctrlFlags |= UNIXFILE_EXCL; + } + +#if OS_VXWORKS + pNew->pId = vxworksFindFileId(zFilename); + if( pNew->pId==0 ){ + ctrlFlags |= UNIXFILE_NOLOCK; + rc = SQLITE_NOMEM_BKPT; + } +#endif + + if( ctrlFlags & UNIXFILE_NOLOCK ){ + pLockingStyle = &nolockIoMethods; + }else{ + pLockingStyle = (**(finder_type*)pVfs->pAppData)(zFilename, pNew); +#if SQLITE_ENABLE_LOCKING_STYLE + /* Cache zFilename in the locking context (AFP and dotlock override) for + ** proxyLock activation is possible (remote proxy is based on db name) + ** zFilename remains valid until file is closed, to support */ + pNew->lockingContext = (void*)zFilename; +#endif + } + + if( pLockingStyle == &posixIoMethods +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE + || pLockingStyle == &nfsIoMethods +#endif + ){ + unixEnterMutex(); + rc = findInodeInfo(pNew, &pNew->pInode); + if( rc!=SQLITE_OK ){ + /* If an error occurred in findInodeInfo(), close the file descriptor + ** immediately, before releasing the mutex. findInodeInfo() may fail + ** in two scenarios: + ** + ** (a) A call to fstat() failed. + ** (b) A malloc failed. + ** + ** Scenario (b) may only occur if the process is holding no other + ** file descriptors open on the same file. If there were other file + ** descriptors on this file, then no malloc would be required by + ** findInodeInfo(). If this is the case, it is quite safe to close + ** handle h - as it is guaranteed that no posix locks will be released + ** by doing so. + ** + ** If scenario (a) caused the error then things are not so safe. The + ** implicit assumption here is that if fstat() fails, things are in + ** such bad shape that dropping a lock or two doesn't matter much. + */ + robust_close(pNew, h, __LINE__); + h = -1; + } + unixLeaveMutex(); + } + +#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) + else if( pLockingStyle == &afpIoMethods ){ + /* AFP locking uses the file path so it needs to be included in + ** the afpLockingContext. + */ + afpLockingContext *pCtx; + pNew->lockingContext = pCtx = sqlite3_malloc64( sizeof(*pCtx) ); + if( pCtx==0 ){ + rc = SQLITE_NOMEM_BKPT; + }else{ + /* NB: zFilename exists and remains valid until the file is closed + ** according to requirement F11141. So we do not need to make a + ** copy of the filename. */ + pCtx->dbPath = zFilename; + pCtx->reserved = 0; + srandomdev(); + unixEnterMutex(); + rc = findInodeInfo(pNew, &pNew->pInode); + if( rc!=SQLITE_OK ){ + sqlite3_free(pNew->lockingContext); + robust_close(pNew, h, __LINE__); + h = -1; + } + unixLeaveMutex(); + } + } +#endif + + else if( pLockingStyle == &dotlockIoMethods ){ + /* Dotfile locking uses the file path so it needs to be included in + ** the dotlockLockingContext + */ + char *zLockFile; + int nFilename; + assert( zFilename!=0 ); + nFilename = (int)strlen(zFilename) + 6; + zLockFile = (char *)sqlite3_malloc64(nFilename); + if( zLockFile==0 ){ + rc = SQLITE_NOMEM_BKPT; + }else{ + sqlite3_snprintf(nFilename, zLockFile, "%s" DOTLOCK_SUFFIX, zFilename); + } + pNew->lockingContext = zLockFile; + } + +#if OS_VXWORKS + else if( pLockingStyle == &semIoMethods ){ + /* Named semaphore locking uses the file path so it needs to be + ** included in the semLockingContext + */ + unixEnterMutex(); + rc = findInodeInfo(pNew, &pNew->pInode); + if( (rc==SQLITE_OK) && (pNew->pInode->pSem==NULL) ){ + char *zSemName = pNew->pInode->aSemName; + int n; + sqlite3_snprintf(MAX_PATHNAME, zSemName, "/%s.sem", + pNew->pId->zCanonicalName); + for( n=1; zSemName[n]; n++ ) + if( zSemName[n]=='/' ) zSemName[n] = '_'; + pNew->pInode->pSem = sem_open(zSemName, O_CREAT, 0666, 1); + if( pNew->pInode->pSem == SEM_FAILED ){ + rc = SQLITE_NOMEM_BKPT; + pNew->pInode->aSemName[0] = '\0'; + } + } + unixLeaveMutex(); + } +#endif + + storeLastErrno(pNew, 0); +#if OS_VXWORKS + if( rc!=SQLITE_OK ){ + if( h>=0 ) robust_close(pNew, h, __LINE__); + h = -1; + osUnlink(zFilename); + pNew->ctrlFlags |= UNIXFILE_DELETE; + } +#endif + if( rc!=SQLITE_OK ){ + if( h>=0 ) robust_close(pNew, h, __LINE__); + }else{ + pNew->pMethod = pLockingStyle; + OpenCounter(+1); + verifyDbFile(pNew); + } + return rc; +} + +/* +** Return the name of a directory in which to put temporary files. +** If no suitable temporary file directory can be found, return NULL. +*/ +static const char *unixTempFileDir(void){ + static const char *azDirs[] = { + 0, + 0, + "/var/tmp", + "/usr/tmp", + "/tmp", + "." + }; + unsigned int i = 0; + struct stat buf; + const char *zDir = sqlite3_temp_directory; + + if( !azDirs[0] ) azDirs[0] = getenv("SQLITE_TMPDIR"); + if( !azDirs[1] ) azDirs[1] = getenv("TMPDIR"); + while(1){ + if( zDir!=0 + && osStat(zDir, &buf)==0 + && S_ISDIR(buf.st_mode) + && osAccess(zDir, 03)==0 + ){ + return zDir; + } + if( i>=sizeof(azDirs)/sizeof(azDirs[0]) ) break; + zDir = azDirs[i++]; + } + return 0; +} + +/* +** Create a temporary file name in zBuf. zBuf must be allocated +** by the calling process and must be big enough to hold at least +** pVfs->mxPathname bytes. +*/ +static int unixGetTempname(int nBuf, char *zBuf){ + const char *zDir; + int iLimit = 0; + + /* It's odd to simulate an io-error here, but really this is just + ** using the io-error infrastructure to test that SQLite handles this + ** function failing. + */ + zBuf[0] = 0; + SimulateIOError( return SQLITE_IOERR ); + + zDir = unixTempFileDir(); + if( zDir==0 ) return SQLITE_IOERR_GETTEMPPATH; + do{ + u64 r; + sqlite3_randomness(sizeof(r), &r); + assert( nBuf>2 ); + zBuf[nBuf-2] = 0; + sqlite3_snprintf(nBuf, zBuf, "%s/"SQLITE_TEMP_FILE_PREFIX"%llx%c", + zDir, r, 0); + if( zBuf[nBuf-2]!=0 || (iLimit++)>10 ) return SQLITE_ERROR; + }while( osAccess(zBuf,0)==0 ); + return SQLITE_OK; +} + +#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) +/* +** Routine to transform a unixFile into a proxy-locking unixFile. +** Implementation in the proxy-lock division, but used by unixOpen() +** if SQLITE_PREFER_PROXY_LOCKING is defined. +*/ +static int proxyTransformUnixFile(unixFile*, const char*); +#endif + +/* +** Search for an unused file descriptor that was opened on the database +** file (not a journal or master-journal file) identified by pathname +** zPath with SQLITE_OPEN_XXX flags matching those passed as the second +** argument to this function. +** +** Such a file descriptor may exist if a database connection was closed +** but the associated file descriptor could not be closed because some +** other file descriptor open on the same file is holding a file-lock. +** Refer to comments in the unixClose() function and the lengthy comment +** describing "Posix Advisory Locking" at the start of this file for +** further details. Also, ticket #4018. +** +** If a suitable file descriptor is found, then it is returned. If no +** such file descriptor is located, -1 is returned. +*/ +static UnixUnusedFd *findReusableFd(const char *zPath, int flags){ + UnixUnusedFd *pUnused = 0; + + /* Do not search for an unused file descriptor on vxworks. Not because + ** vxworks would not benefit from the change (it might, we're not sure), + ** but because no way to test it is currently available. It is better + ** not to risk breaking vxworks support for the sake of such an obscure + ** feature. */ +#if !OS_VXWORKS + struct stat sStat; /* Results of stat() call */ + + /* A stat() call may fail for various reasons. If this happens, it is + ** almost certain that an open() call on the same path will also fail. + ** For this reason, if an error occurs in the stat() call here, it is + ** ignored and -1 is returned. The caller will try to open a new file + ** descriptor on the same path, fail, and return an error to SQLite. + ** + ** Even if a subsequent open() call does succeed, the consequences of + ** not searching for a reusable file descriptor are not dire. */ + if( 0==osStat(zPath, &sStat) ){ + unixInodeInfo *pInode; + + unixEnterMutex(); + pInode = inodeList; + while( pInode && (pInode->fileId.dev!=sStat.st_dev + || pInode->fileId.ino!=(u64)sStat.st_ino) ){ + pInode = pInode->pNext; + } + if( pInode ){ + UnixUnusedFd **pp; + for(pp=&pInode->pUnused; *pp && (*pp)->flags!=flags; pp=&((*pp)->pNext)); + pUnused = *pp; + if( pUnused ){ + *pp = pUnused->pNext; + } + } + unixLeaveMutex(); + } +#endif /* if !OS_VXWORKS */ + return pUnused; +} + +/* +** Find the mode, uid and gid of file zFile. +*/ +static int getFileMode( + const char *zFile, /* File name */ + mode_t *pMode, /* OUT: Permissions of zFile */ + uid_t *pUid, /* OUT: uid of zFile. */ + gid_t *pGid /* OUT: gid of zFile. */ +){ + struct stat sStat; /* Output of stat() on database file */ + int rc = SQLITE_OK; + if( 0==osStat(zFile, &sStat) ){ + *pMode = sStat.st_mode & 0777; + *pUid = sStat.st_uid; + *pGid = sStat.st_gid; + }else{ + rc = SQLITE_IOERR_FSTAT; + } + return rc; +} + +/* +** This function is called by unixOpen() to determine the unix permissions +** to create new files with. If no error occurs, then SQLITE_OK is returned +** and a value suitable for passing as the third argument to open(2) is +** written to *pMode. If an IO error occurs, an SQLite error code is +** returned and the value of *pMode is not modified. +** +** In most cases, this routine sets *pMode to 0, which will become +** an indication to robust_open() to create the file using +** SQLITE_DEFAULT_FILE_PERMISSIONS adjusted by the umask. +** But if the file being opened is a WAL or regular journal file, then +** this function queries the file-system for the permissions on the +** corresponding database file and sets *pMode to this value. Whenever +** possible, WAL and journal files are created using the same permissions +** as the associated database file. +** +** If the SQLITE_ENABLE_8_3_NAMES option is enabled, then the +** original filename is unavailable. But 8_3_NAMES is only used for +** FAT filesystems and permissions do not matter there, so just use +** the default permissions. +*/ +static int findCreateFileMode( + const char *zPath, /* Path of file (possibly) being created */ + int flags, /* Flags passed as 4th argument to xOpen() */ + mode_t *pMode, /* OUT: Permissions to open file with */ + uid_t *pUid, /* OUT: uid to set on the file */ + gid_t *pGid /* OUT: gid to set on the file */ +){ + int rc = SQLITE_OK; /* Return Code */ + *pMode = 0; + *pUid = 0; + *pGid = 0; + if( flags & (SQLITE_OPEN_WAL|SQLITE_OPEN_MAIN_JOURNAL) ){ + char zDb[MAX_PATHNAME+1]; /* Database file path */ + int nDb; /* Number of valid bytes in zDb */ + + /* zPath is a path to a WAL or journal file. The following block derives + ** the path to the associated database file from zPath. This block handles + ** the following naming conventions: + ** + ** "-journal" + ** "-wal" + ** "-journalNN" + ** "-walNN" + ** + ** where NN is a decimal number. The NN naming schemes are + ** used by the test_multiplex.c module. + */ + nDb = sqlite3Strlen30(zPath) - 1; + while( zPath[nDb]!='-' ){ +#ifndef SQLITE_ENABLE_8_3_NAMES + /* In the normal case (8+3 filenames disabled) the journal filename + ** is guaranteed to contain a '-' character. */ + assert( nDb>0 ); + assert( sqlite3Isalnum(zPath[nDb]) ); +#else + /* If 8+3 names are possible, then the journal file might not contain + ** a '-' character. So check for that case and return early. */ + if( nDb==0 || zPath[nDb]=='.' ) return SQLITE_OK; +#endif + nDb--; + } + memcpy(zDb, zPath, nDb); + zDb[nDb] = '\0'; + + rc = getFileMode(zDb, pMode, pUid, pGid); + }else if( flags & SQLITE_OPEN_DELETEONCLOSE ){ + *pMode = 0600; + }else if( flags & SQLITE_OPEN_URI ){ + /* If this is a main database file and the file was opened using a URI + ** filename, check for the "modeof" parameter. If present, interpret + ** its value as a filename and try to copy the mode, uid and gid from + ** that file. */ + const char *z = sqlite3_uri_parameter(zPath, "modeof"); + if( z ){ + rc = getFileMode(z, pMode, pUid, pGid); + } + } + return rc; +} + +/* +** Open the file zPath. +** +** Previously, the SQLite OS layer used three functions in place of this +** one: +** +** sqlite3OsOpenReadWrite(); +** sqlite3OsOpenReadOnly(); +** sqlite3OsOpenExclusive(); +** +** These calls correspond to the following combinations of flags: +** +** ReadWrite() -> (READWRITE | CREATE) +** ReadOnly() -> (READONLY) +** OpenExclusive() -> (READWRITE | CREATE | EXCLUSIVE) +** +** The old OpenExclusive() accepted a boolean argument - "delFlag". If +** true, the file was configured to be automatically deleted when the +** file handle closed. To achieve the same effect using this new +** interface, add the DELETEONCLOSE flag to those specified above for +** OpenExclusive(). +*/ +static int unixOpen( + sqlite3_vfs *pVfs, /* The VFS for which this is the xOpen method */ + const char *zPath, /* Pathname of file to be opened */ + sqlite3_file *pFile, /* The file descriptor to be filled in */ + int flags, /* Input flags to control the opening */ + int *pOutFlags /* Output flags returned to SQLite core */ +){ + unixFile *p = (unixFile *)pFile; + int fd = -1; /* File descriptor returned by open() */ + int openFlags = 0; /* Flags to pass to open() */ + int eType = flags&0xFFFFFF00; /* Type of file to open */ + int noLock; /* True to omit locking primitives */ + int rc = SQLITE_OK; /* Function Return Code */ + int ctrlFlags = 0; /* UNIXFILE_* flags */ + + int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE); + int isDelete = (flags & SQLITE_OPEN_DELETEONCLOSE); + int isCreate = (flags & SQLITE_OPEN_CREATE); + int isReadonly = (flags & SQLITE_OPEN_READONLY); + int isReadWrite = (flags & SQLITE_OPEN_READWRITE); +#if SQLITE_ENABLE_LOCKING_STYLE + int isAutoProxy = (flags & SQLITE_OPEN_AUTOPROXY); +#endif +#if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE + struct statfs fsInfo; +#endif + + /* If creating a master or main-file journal, this function will open + ** a file-descriptor on the directory too. The first time unixSync() + ** is called the directory file descriptor will be fsync()ed and close()d. + */ + int syncDir = (isCreate && ( + eType==SQLITE_OPEN_MASTER_JOURNAL + || eType==SQLITE_OPEN_MAIN_JOURNAL + || eType==SQLITE_OPEN_WAL + )); + + /* If argument zPath is a NULL pointer, this function is required to open + ** a temporary file. Use this buffer to store the file name in. + */ + char zTmpname[MAX_PATHNAME+2]; + const char *zName = zPath; + + /* Check the following statements are true: + ** + ** (a) Exactly one of the READWRITE and READONLY flags must be set, and + ** (b) if CREATE is set, then READWRITE must also be set, and + ** (c) if EXCLUSIVE is set, then CREATE must also be set. + ** (d) if DELETEONCLOSE is set, then CREATE must also be set. + */ + assert((isReadonly==0 || isReadWrite==0) && (isReadWrite || isReadonly)); + assert(isCreate==0 || isReadWrite); + assert(isExclusive==0 || isCreate); + assert(isDelete==0 || isCreate); + + /* The main DB, main journal, WAL file and master journal are never + ** automatically deleted. Nor are they ever temporary files. */ + assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_DB ); + assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_JOURNAL ); + assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MASTER_JOURNAL ); + assert( (!isDelete && zName) || eType!=SQLITE_OPEN_WAL ); + + /* Assert that the upper layer has set one of the "file-type" flags. */ + assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB + || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL + || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_MASTER_JOURNAL + || eType==SQLITE_OPEN_TRANSIENT_DB || eType==SQLITE_OPEN_WAL + ); + + /* Detect a pid change and reset the PRNG. There is a race condition + ** here such that two or more threads all trying to open databases at + ** the same instant might all reset the PRNG. But multiple resets + ** are harmless. + */ + if( randomnessPid!=osGetpid(0) ){ + randomnessPid = osGetpid(0); + sqlite3_randomness(0,0); + } + + memset(p, 0, sizeof(unixFile)); + + if( eType==SQLITE_OPEN_MAIN_DB ){ + UnixUnusedFd *pUnused; + pUnused = findReusableFd(zName, flags); + if( pUnused ){ + fd = pUnused->fd; + }else{ + pUnused = sqlite3_malloc64(sizeof(*pUnused)); + if( !pUnused ){ + return SQLITE_NOMEM_BKPT; + } + } + p->pUnused = pUnused; + + /* Database filenames are double-zero terminated if they are not + ** URIs with parameters. Hence, they can always be passed into + ** sqlite3_uri_parameter(). */ + assert( (flags & SQLITE_OPEN_URI) || zName[strlen(zName)+1]==0 ); + + }else if( !zName ){ + /* If zName is NULL, the upper layer is requesting a temp file. */ + assert(isDelete && !syncDir); + rc = unixGetTempname(pVfs->mxPathname, zTmpname); + if( rc!=SQLITE_OK ){ + return rc; + } + zName = zTmpname; + + /* Generated temporary filenames are always double-zero terminated + ** for use by sqlite3_uri_parameter(). */ + assert( zName[strlen(zName)+1]==0 ); + } + + /* Determine the value of the flags parameter passed to POSIX function + ** open(). These must be calculated even if open() is not called, as + ** they may be stored as part of the file handle and used by the + ** 'conch file' locking functions later on. */ + if( isReadonly ) openFlags |= O_RDONLY; + if( isReadWrite ) openFlags |= O_RDWR; + if( isCreate ) openFlags |= O_CREAT; + if( isExclusive ) openFlags |= (O_EXCL|O_NOFOLLOW); + openFlags |= (O_LARGEFILE|O_BINARY); + + if( fd<0 ){ + mode_t openMode; /* Permissions to create file with */ + uid_t uid; /* Userid for the file */ + gid_t gid; /* Groupid for the file */ + rc = findCreateFileMode(zName, flags, &openMode, &uid, &gid); + if( rc!=SQLITE_OK ){ + assert( !p->pUnused ); + assert( eType==SQLITE_OPEN_WAL || eType==SQLITE_OPEN_MAIN_JOURNAL ); + return rc; + } + fd = robust_open(zName, openFlags, openMode); + OSTRACE(("OPENX %-3d %s 0%o\n", fd, zName, openFlags)); + assert( !isExclusive || (openFlags & O_CREAT)!=0 ); + if( fd<0 && errno!=EISDIR && isReadWrite ){ + /* Failed to open the file for read/write access. Try read-only. */ + flags &= ~(SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE); + openFlags &= ~(O_RDWR|O_CREAT); + flags |= SQLITE_OPEN_READONLY; + openFlags |= O_RDONLY; + isReadonly = 1; + fd = robust_open(zName, openFlags, openMode); + } + if( fd<0 ){ + rc = unixLogError(SQLITE_CANTOPEN_BKPT, "open", zName); + goto open_finished; + } + + /* If this process is running as root and if creating a new rollback + ** journal or WAL file, set the ownership of the journal or WAL to be + ** the same as the original database. + */ + if( flags & (SQLITE_OPEN_WAL|SQLITE_OPEN_MAIN_JOURNAL) ){ + robustFchown(fd, uid, gid); + } + } + assert( fd>=0 ); + if( pOutFlags ){ + *pOutFlags = flags; + } + + if( p->pUnused ){ + p->pUnused->fd = fd; + p->pUnused->flags = flags; + } + + if( isDelete ){ +#if OS_VXWORKS + zPath = zName; +#elif defined(SQLITE_UNLINK_AFTER_CLOSE) + zPath = sqlite3_mprintf("%s", zName); + if( zPath==0 ){ + robust_close(p, fd, __LINE__); + return SQLITE_NOMEM_BKPT; + } +#else + osUnlink(zName); +#endif + } +#if SQLITE_ENABLE_LOCKING_STYLE + else{ + p->openFlags = openFlags; + } +#endif + +#if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE + if( fstatfs(fd, &fsInfo) == -1 ){ + storeLastErrno(p, errno); + robust_close(p, fd, __LINE__); + return SQLITE_IOERR_ACCESS; + } + if (0 == strncmp("msdos", fsInfo.f_fstypename, 5)) { + ((unixFile*)pFile)->fsFlags |= SQLITE_FSFLAGS_IS_MSDOS; + } + if (0 == strncmp("exfat", fsInfo.f_fstypename, 5)) { + ((unixFile*)pFile)->fsFlags |= SQLITE_FSFLAGS_IS_MSDOS; + } +#endif + + /* Set up appropriate ctrlFlags */ + if( isDelete ) ctrlFlags |= UNIXFILE_DELETE; + if( isReadonly ) ctrlFlags |= UNIXFILE_RDONLY; + noLock = eType!=SQLITE_OPEN_MAIN_DB; + if( noLock ) ctrlFlags |= UNIXFILE_NOLOCK; + if( syncDir ) ctrlFlags |= UNIXFILE_DIRSYNC; + if( flags & SQLITE_OPEN_URI ) ctrlFlags |= UNIXFILE_URI; + +#if SQLITE_ENABLE_LOCKING_STYLE +#if SQLITE_PREFER_PROXY_LOCKING + isAutoProxy = 1; +#endif + if( isAutoProxy && (zPath!=NULL) && (!noLock) && pVfs->xOpen ){ + char *envforce = getenv("SQLITE_FORCE_PROXY_LOCKING"); + int useProxy = 0; + + /* SQLITE_FORCE_PROXY_LOCKING==1 means force always use proxy, 0 means + ** never use proxy, NULL means use proxy for non-local files only. */ + if( envforce!=NULL ){ + useProxy = atoi(envforce)>0; + }else{ + useProxy = !(fsInfo.f_flags&MNT_LOCAL); + } + if( useProxy ){ + rc = fillInUnixFile(pVfs, fd, pFile, zPath, ctrlFlags); + if( rc==SQLITE_OK ){ + rc = proxyTransformUnixFile((unixFile*)pFile, ":auto:"); + if( rc!=SQLITE_OK ){ + /* Use unixClose to clean up the resources added in fillInUnixFile + ** and clear all the structure's references. Specifically, + ** pFile->pMethods will be NULL so sqlite3OsClose will be a no-op + */ + unixClose(pFile); + return rc; + } + } + goto open_finished; + } + } +#endif + + rc = fillInUnixFile(pVfs, fd, pFile, zPath, ctrlFlags); + +open_finished: + if( rc!=SQLITE_OK ){ + sqlite3_free(p->pUnused); + } + return rc; +} + + +/* +** Delete the file at zPath. If the dirSync argument is true, fsync() +** the directory after deleting the file. +*/ +static int unixDelete( + sqlite3_vfs *NotUsed, /* VFS containing this as the xDelete method */ + const char *zPath, /* Name of file to be deleted */ + int dirSync /* If true, fsync() directory after deleting file */ +){ + int rc = SQLITE_OK; + UNUSED_PARAMETER(NotUsed); + SimulateIOError(return SQLITE_IOERR_DELETE); + if( osUnlink(zPath)==(-1) ){ + if( errno==ENOENT +#if OS_VXWORKS + || osAccess(zPath,0)!=0 +#endif + ){ + rc = SQLITE_IOERR_DELETE_NOENT; + }else{ + rc = unixLogError(SQLITE_IOERR_DELETE, "unlink", zPath); + } + return rc; + } +#ifndef SQLITE_DISABLE_DIRSYNC + if( (dirSync & 1)!=0 ){ + int fd; + rc = osOpenDirectory(zPath, &fd); + if( rc==SQLITE_OK ){ + if( full_fsync(fd,0,0) ){ + rc = unixLogError(SQLITE_IOERR_DIR_FSYNC, "fsync", zPath); + } + robust_close(0, fd, __LINE__); + }else{ + assert( rc==SQLITE_CANTOPEN ); + rc = SQLITE_OK; + } + } +#endif + return rc; +} + +/* +** Test the existence of or access permissions of file zPath. The +** test performed depends on the value of flags: +** +** SQLITE_ACCESS_EXISTS: Return 1 if the file exists +** SQLITE_ACCESS_READWRITE: Return 1 if the file is read and writable. +** SQLITE_ACCESS_READONLY: Return 1 if the file is readable. +** +** Otherwise return 0. +*/ +static int unixAccess( + sqlite3_vfs *NotUsed, /* The VFS containing this xAccess method */ + const char *zPath, /* Path of the file to examine */ + int flags, /* What do we want to learn about the zPath file? */ + int *pResOut /* Write result boolean here */ +){ + UNUSED_PARAMETER(NotUsed); + SimulateIOError( return SQLITE_IOERR_ACCESS; ); + assert( pResOut!=0 ); + + /* The spec says there are three possible values for flags. But only + ** two of them are actually used */ + assert( flags==SQLITE_ACCESS_EXISTS || flags==SQLITE_ACCESS_READWRITE ); + + if( flags==SQLITE_ACCESS_EXISTS ){ + struct stat buf; + *pResOut = (0==osStat(zPath, &buf) && buf.st_size>0); + }else{ + *pResOut = osAccess(zPath, W_OK|R_OK)==0; + } + return SQLITE_OK; +} + +/* +** +*/ +static int mkFullPathname( + const char *zPath, /* Input path */ + char *zOut, /* Output buffer */ + int nOut /* Allocated size of buffer zOut */ +){ + int nPath = sqlite3Strlen30(zPath); + int iOff = 0; + if( zPath[0]!='/' ){ + if( osGetcwd(zOut, nOut-2)==0 ){ + return unixLogError(SQLITE_CANTOPEN_BKPT, "getcwd", zPath); + } + iOff = sqlite3Strlen30(zOut); + zOut[iOff++] = '/'; + } + if( (iOff+nPath+1)>nOut ){ + /* SQLite assumes that xFullPathname() nul-terminates the output buffer + ** even if it returns an error. */ + zOut[iOff] = '\0'; + return SQLITE_CANTOPEN_BKPT; + } + sqlite3_snprintf(nOut-iOff, &zOut[iOff], "%s", zPath); + return SQLITE_OK; +} + +/* +** Turn a relative pathname into a full pathname. The relative path +** is stored as a nul-terminated string in the buffer pointed to by +** zPath. +** +** zOut points to a buffer of at least sqlite3_vfs.mxPathname bytes +** (in this case, MAX_PATHNAME bytes). The full-path is written to +** this buffer before returning. +*/ +static int unixFullPathname( + sqlite3_vfs *pVfs, /* Pointer to vfs object */ + const char *zPath, /* Possibly relative input path */ + int nOut, /* Size of output buffer in bytes */ + char *zOut /* Output buffer */ +){ +#if !defined(HAVE_READLINK) || !defined(HAVE_LSTAT) + return mkFullPathname(zPath, zOut, nOut); +#else + int rc = SQLITE_OK; + int nByte; + int nLink = 1; /* Number of symbolic links followed so far */ + const char *zIn = zPath; /* Input path for each iteration of loop */ + char *zDel = 0; + + assert( pVfs->mxPathname==MAX_PATHNAME ); + UNUSED_PARAMETER(pVfs); + + /* It's odd to simulate an io-error here, but really this is just + ** using the io-error infrastructure to test that SQLite handles this + ** function failing. This function could fail if, for example, the + ** current working directory has been unlinked. + */ + SimulateIOError( return SQLITE_ERROR ); + + do { + + /* Call stat() on path zIn. Set bLink to true if the path is a symbolic + ** link, or false otherwise. */ + int bLink = 0; + struct stat buf; + if( osLstat(zIn, &buf)!=0 ){ + if( errno!=ENOENT ){ + rc = unixLogError(SQLITE_CANTOPEN_BKPT, "lstat", zIn); + } + }else{ + bLink = S_ISLNK(buf.st_mode); + } + + if( bLink ){ + if( zDel==0 ){ + zDel = sqlite3_malloc(nOut); + if( zDel==0 ) rc = SQLITE_NOMEM_BKPT; + }else if( ++nLink>SQLITE_MAX_SYMLINKS ){ + rc = SQLITE_CANTOPEN_BKPT; + } + + if( rc==SQLITE_OK ){ + nByte = osReadlink(zIn, zDel, nOut-1); + if( nByte<0 ){ + rc = unixLogError(SQLITE_CANTOPEN_BKPT, "readlink", zIn); + }else{ + if( zDel[0]!='/' ){ + int n; + for(n = sqlite3Strlen30(zIn); n>0 && zIn[n-1]!='/'; n--); + if( nByte+n+1>nOut ){ + rc = SQLITE_CANTOPEN_BKPT; + }else{ + memmove(&zDel[n], zDel, nByte+1); + memcpy(zDel, zIn, n); + nByte += n; + } + } + zDel[nByte] = '\0'; + } + } + + zIn = zDel; + } + + assert( rc!=SQLITE_OK || zIn!=zOut || zIn[0]=='/' ); + if( rc==SQLITE_OK && zIn!=zOut ){ + rc = mkFullPathname(zIn, zOut, nOut); + } + if( bLink==0 ) break; + zIn = zOut; + }while( rc==SQLITE_OK ); + + sqlite3_free(zDel); + return rc; +#endif /* HAVE_READLINK && HAVE_LSTAT */ +} + + +#ifndef SQLITE_OMIT_LOAD_EXTENSION +/* +** Interfaces for opening a shared library, finding entry points +** within the shared library, and closing the shared library. +*/ +#include +static void *unixDlOpen(sqlite3_vfs *NotUsed, const char *zFilename){ + UNUSED_PARAMETER(NotUsed); + return dlopen(zFilename, RTLD_NOW | RTLD_GLOBAL); +} + +/* +** SQLite calls this function immediately after a call to unixDlSym() or +** unixDlOpen() fails (returns a null pointer). If a more detailed error +** message is available, it is written to zBufOut. If no error message +** is available, zBufOut is left unmodified and SQLite uses a default +** error message. +*/ +static void unixDlError(sqlite3_vfs *NotUsed, int nBuf, char *zBufOut){ + const char *zErr; + UNUSED_PARAMETER(NotUsed); + unixEnterMutex(); + zErr = dlerror(); + if( zErr ){ + sqlite3_snprintf(nBuf, zBufOut, "%s", zErr); + } + unixLeaveMutex(); +} +static void (*unixDlSym(sqlite3_vfs *NotUsed, void *p, const char*zSym))(void){ + /* + ** GCC with -pedantic-errors says that C90 does not allow a void* to be + ** cast into a pointer to a function. And yet the library dlsym() routine + ** returns a void* which is really a pointer to a function. So how do we + ** use dlsym() with -pedantic-errors? + ** + ** Variable x below is defined to be a pointer to a function taking + ** parameters void* and const char* and returning a pointer to a function. + ** We initialize x by assigning it a pointer to the dlsym() function. + ** (That assignment requires a cast.) Then we call the function that + ** x points to. + ** + ** This work-around is unlikely to work correctly on any system where + ** you really cannot cast a function pointer into void*. But then, on the + ** other hand, dlsym() will not work on such a system either, so we have + ** not really lost anything. + */ + void (*(*x)(void*,const char*))(void); + UNUSED_PARAMETER(NotUsed); + x = (void(*(*)(void*,const char*))(void))dlsym; + return (*x)(p, zSym); +} +static void unixDlClose(sqlite3_vfs *NotUsed, void *pHandle){ + UNUSED_PARAMETER(NotUsed); + dlclose(pHandle); +} +#else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */ + #define unixDlOpen 0 + #define unixDlError 0 + #define unixDlSym 0 + #define unixDlClose 0 +#endif + +/* +** Write nBuf bytes of random data to the supplied buffer zBuf. +*/ +static int unixRandomness(sqlite3_vfs *NotUsed, int nBuf, char *zBuf){ + UNUSED_PARAMETER(NotUsed); + assert((size_t)nBuf>=(sizeof(time_t)+sizeof(int))); + + /* We have to initialize zBuf to prevent valgrind from reporting + ** errors. The reports issued by valgrind are incorrect - we would + ** prefer that the randomness be increased by making use of the + ** uninitialized space in zBuf - but valgrind errors tend to worry + ** some users. Rather than argue, it seems easier just to initialize + ** the whole array and silence valgrind, even if that means less randomness + ** in the random seed. + ** + ** When testing, initializing zBuf[] to zero is all we do. That means + ** that we always use the same random number sequence. This makes the + ** tests repeatable. + */ + memset(zBuf, 0, nBuf); + randomnessPid = osGetpid(0); +#if !defined(SQLITE_TEST) && !defined(SQLITE_OMIT_RANDOMNESS) + { + int fd, got; + fd = robust_open("/dev/urandom", O_RDONLY, 0); + if( fd<0 ){ + time_t t; + time(&t); + memcpy(zBuf, &t, sizeof(t)); + memcpy(&zBuf[sizeof(t)], &randomnessPid, sizeof(randomnessPid)); + assert( sizeof(t)+sizeof(randomnessPid)<=(size_t)nBuf ); + nBuf = sizeof(t) + sizeof(randomnessPid); + }else{ + do{ got = osRead(fd, zBuf, nBuf); }while( got<0 && errno==EINTR ); + robust_close(0, fd, __LINE__); + } + } +#endif + return nBuf; +} + + +/* +** Sleep for a little while. Return the amount of time slept. +** The argument is the number of microseconds we want to sleep. +** The return value is the number of microseconds of sleep actually +** requested from the underlying operating system, a number which +** might be greater than or equal to the argument, but not less +** than the argument. +*/ +static int unixSleep(sqlite3_vfs *NotUsed, int microseconds){ +#if OS_VXWORKS + struct timespec sp; + + sp.tv_sec = microseconds / 1000000; + sp.tv_nsec = (microseconds % 1000000) * 1000; + nanosleep(&sp, NULL); + UNUSED_PARAMETER(NotUsed); + return microseconds; +#elif defined(HAVE_USLEEP) && HAVE_USLEEP + usleep(microseconds); + UNUSED_PARAMETER(NotUsed); + return microseconds; +#else + int seconds = (microseconds+999999)/1000000; + sleep(seconds); + UNUSED_PARAMETER(NotUsed); + return seconds*1000000; +#endif +} + +/* +** The following variable, if set to a non-zero value, is interpreted as +** the number of seconds since 1970 and is used to set the result of +** sqlite3OsCurrentTime() during testing. +*/ +#ifdef SQLITE_TEST +SQLITE_API int sqlite3_current_time = 0; /* Fake system time in seconds since 1970. */ +#endif + +/* +** Find the current time (in Universal Coordinated Time). Write into *piNow +** the current time and date as a Julian Day number times 86_400_000. In +** other words, write into *piNow the number of milliseconds since the Julian +** epoch of noon in Greenwich on November 24, 4714 B.C according to the +** proleptic Gregorian calendar. +** +** On success, return SQLITE_OK. Return SQLITE_ERROR if the time and date +** cannot be found. +*/ +static int unixCurrentTimeInt64(sqlite3_vfs *NotUsed, sqlite3_int64 *piNow){ + static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000; + int rc = SQLITE_OK; +#if defined(NO_GETTOD) + time_t t; + time(&t); + *piNow = ((sqlite3_int64)t)*1000 + unixEpoch; +#elif OS_VXWORKS + struct timespec sNow; + clock_gettime(CLOCK_REALTIME, &sNow); + *piNow = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_nsec/1000000; +#else + struct timeval sNow; + (void)gettimeofday(&sNow, 0); /* Cannot fail given valid arguments */ + *piNow = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_usec/1000; +#endif + +#ifdef SQLITE_TEST + if( sqlite3_current_time ){ + *piNow = 1000*(sqlite3_int64)sqlite3_current_time + unixEpoch; + } +#endif + UNUSED_PARAMETER(NotUsed); + return rc; +} + +#ifndef SQLITE_OMIT_DEPRECATED +/* +** Find the current time (in Universal Coordinated Time). Write the +** current time and date as a Julian Day number into *prNow and +** return 0. Return 1 if the time and date cannot be found. +*/ +static int unixCurrentTime(sqlite3_vfs *NotUsed, double *prNow){ + sqlite3_int64 i = 0; + int rc; + UNUSED_PARAMETER(NotUsed); + rc = unixCurrentTimeInt64(0, &i); + *prNow = i/86400000.0; + return rc; +} +#else +# define unixCurrentTime 0 +#endif + +/* +** The xGetLastError() method is designed to return a better +** low-level error message when operating-system problems come up +** during SQLite operation. Only the integer return code is currently +** used. +*/ +static int unixGetLastError(sqlite3_vfs *NotUsed, int NotUsed2, char *NotUsed3){ + UNUSED_PARAMETER(NotUsed); + UNUSED_PARAMETER(NotUsed2); + UNUSED_PARAMETER(NotUsed3); + return errno; +} + + +/* +************************ End of sqlite3_vfs methods *************************** +******************************************************************************/ + +/****************************************************************************** +************************** Begin Proxy Locking ******************************** +** +** Proxy locking is a "uber-locking-method" in this sense: It uses the +** other locking methods on secondary lock files. Proxy locking is a +** meta-layer over top of the primitive locking implemented above. For +** this reason, the division that implements of proxy locking is deferred +** until late in the file (here) after all of the other I/O methods have +** been defined - so that the primitive locking methods are available +** as services to help with the implementation of proxy locking. +** +**** +** +** The default locking schemes in SQLite use byte-range locks on the +** database file to coordinate safe, concurrent access by multiple readers +** and writers [http://sqlite.org/lockingv3.html]. The five file locking +** states (UNLOCKED, PENDING, SHARED, RESERVED, EXCLUSIVE) are implemented +** as POSIX read & write locks over fixed set of locations (via fsctl), +** on AFP and SMB only exclusive byte-range locks are available via fsctl +** with _IOWR('z', 23, struct ByteRangeLockPB2) to track the same 5 states. +** To simulate a F_RDLCK on the shared range, on AFP a randomly selected +** address in the shared range is taken for a SHARED lock, the entire +** shared range is taken for an EXCLUSIVE lock): +** +** PENDING_BYTE 0x40000000 +** RESERVED_BYTE 0x40000001 +** SHARED_RANGE 0x40000002 -> 0x40000200 +** +** This works well on the local file system, but shows a nearly 100x +** slowdown in read performance on AFP because the AFP client disables +** the read cache when byte-range locks are present. Enabling the read +** cache exposes a cache coherency problem that is present on all OS X +** supported network file systems. NFS and AFP both observe the +** close-to-open semantics for ensuring cache coherency +** [http://nfs.sourceforge.net/#faq_a8], which does not effectively +** address the requirements for concurrent database access by multiple +** readers and writers +** [http://www.nabble.com/SQLite-on-NFS-cache-coherency-td15655701.html]. +** +** To address the performance and cache coherency issues, proxy file locking +** changes the way database access is controlled by limiting access to a +** single host at a time and moving file locks off of the database file +** and onto a proxy file on the local file system. +** +** +** Using proxy locks +** ----------------- +** +** C APIs +** +** sqlite3_file_control(db, dbname, SQLITE_FCNTL_SET_LOCKPROXYFILE, +** | ":auto:"); +** sqlite3_file_control(db, dbname, SQLITE_FCNTL_GET_LOCKPROXYFILE, +** &); +** +** +** SQL pragmas +** +** PRAGMA [database.]lock_proxy_file= | :auto: +** PRAGMA [database.]lock_proxy_file +** +** Specifying ":auto:" means that if there is a conch file with a matching +** host ID in it, the proxy path in the conch file will be used, otherwise +** a proxy path based on the user's temp dir +** (via confstr(_CS_DARWIN_USER_TEMP_DIR,...)) will be used and the +** actual proxy file name is generated from the name and path of the +** database file. For example: +** +** For database path "/Users/me/foo.db" +** The lock path will be "/sqliteplocks/_Users_me_foo.db:auto:") +** +** Once a lock proxy is configured for a database connection, it can not +** be removed, however it may be switched to a different proxy path via +** the above APIs (assuming the conch file is not being held by another +** connection or process). +** +** +** How proxy locking works +** ----------------------- +** +** Proxy file locking relies primarily on two new supporting files: +** +** * conch file to limit access to the database file to a single host +** at a time +** +** * proxy file to act as a proxy for the advisory locks normally +** taken on the database +** +** The conch file - to use a proxy file, sqlite must first "hold the conch" +** by taking an sqlite-style shared lock on the conch file, reading the +** contents and comparing the host's unique host ID (see below) and lock +** proxy path against the values stored in the conch. The conch file is +** stored in the same directory as the database file and the file name +** is patterned after the database file name as ".-conch". +** If the conch file does not exist, or its contents do not match the +** host ID and/or proxy path, then the lock is escalated to an exclusive +** lock and the conch file contents is updated with the host ID and proxy +** path and the lock is downgraded to a shared lock again. If the conch +** is held by another process (with a shared lock), the exclusive lock +** will fail and SQLITE_BUSY is returned. +** +** The proxy file - a single-byte file used for all advisory file locks +** normally taken on the database file. This allows for safe sharing +** of the database file for multiple readers and writers on the same +** host (the conch ensures that they all use the same local lock file). +** +** Requesting the lock proxy does not immediately take the conch, it is +** only taken when the first request to lock database file is made. +** This matches the semantics of the traditional locking behavior, where +** opening a connection to a database file does not take a lock on it. +** The shared lock and an open file descriptor are maintained until +** the connection to the database is closed. +** +** The proxy file and the lock file are never deleted so they only need +** to be created the first time they are used. +** +** Configuration options +** --------------------- +** +** SQLITE_PREFER_PROXY_LOCKING +** +** Database files accessed on non-local file systems are +** automatically configured for proxy locking, lock files are +** named automatically using the same logic as +** PRAGMA lock_proxy_file=":auto:" +** +** SQLITE_PROXY_DEBUG +** +** Enables the logging of error messages during host id file +** retrieval and creation +** +** LOCKPROXYDIR +** +** Overrides the default directory used for lock proxy files that +** are named automatically via the ":auto:" setting +** +** SQLITE_DEFAULT_PROXYDIR_PERMISSIONS +** +** Permissions to use when creating a directory for storing the +** lock proxy files, only used when LOCKPROXYDIR is not set. +** +** +** As mentioned above, when compiled with SQLITE_PREFER_PROXY_LOCKING, +** setting the environment variable SQLITE_FORCE_PROXY_LOCKING to 1 will +** force proxy locking to be used for every database file opened, and 0 +** will force automatic proxy locking to be disabled for all database +** files (explicitly calling the SQLITE_FCNTL_SET_LOCKPROXYFILE pragma or +** sqlite_file_control API is not affected by SQLITE_FORCE_PROXY_LOCKING). +*/ + +/* +** Proxy locking is only available on MacOSX +*/ +#if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE + +/* +** The proxyLockingContext has the path and file structures for the remote +** and local proxy files in it +*/ +typedef struct proxyLockingContext proxyLockingContext; +struct proxyLockingContext { + unixFile *conchFile; /* Open conch file */ + char *conchFilePath; /* Name of the conch file */ + unixFile *lockProxy; /* Open proxy lock file */ + char *lockProxyPath; /* Name of the proxy lock file */ + char *dbPath; /* Name of the open file */ + int conchHeld; /* 1 if the conch is held, -1 if lockless */ + int nFails; /* Number of conch taking failures */ + void *oldLockingContext; /* Original lockingcontext to restore on close */ + sqlite3_io_methods const *pOldMethod; /* Original I/O methods for close */ +}; + +/* +** The proxy lock file path for the database at dbPath is written into lPath, +** which must point to valid, writable memory large enough for a maxLen length +** file path. +*/ +static int proxyGetLockPath(const char *dbPath, char *lPath, size_t maxLen){ + int len; + int dbLen; + int i; + +#ifdef LOCKPROXYDIR + len = strlcpy(lPath, LOCKPROXYDIR, maxLen); +#else +# ifdef _CS_DARWIN_USER_TEMP_DIR + { + if( !confstr(_CS_DARWIN_USER_TEMP_DIR, lPath, maxLen) ){ + OSTRACE(("GETLOCKPATH failed %s errno=%d pid=%d\n", + lPath, errno, osGetpid(0))); + return SQLITE_IOERR_LOCK; + } + len = strlcat(lPath, "sqliteplocks", maxLen); + } +# else + len = strlcpy(lPath, "/tmp/", maxLen); +# endif +#endif + + if( lPath[len-1]!='/' ){ + len = strlcat(lPath, "/", maxLen); + } + + /* transform the db path to a unique cache name */ + dbLen = (int)strlen(dbPath); + for( i=0; i 0) ){ + /* only mkdir if leaf dir != "." or "/" or ".." */ + if( i-start>2 || (i-start==1 && buf[start] != '.' && buf[start] != '/') + || (i-start==2 && buf[start] != '.' && buf[start+1] != '.') ){ + buf[i]='\0'; + if( osMkdir(buf, SQLITE_DEFAULT_PROXYDIR_PERMISSIONS) ){ + int err=errno; + if( err!=EEXIST ) { + OSTRACE(("CREATELOCKPATH FAILED creating %s, " + "'%s' proxy lock path=%s pid=%d\n", + buf, strerror(err), lockPath, osGetpid(0))); + return err; + } + } + } + start=i+1; + } + buf[i] = lockPath[i]; + } + OSTRACE(("CREATELOCKPATH proxy lock path=%s pid=%d\n",lockPath,osGetpid(0))); + return 0; +} + +/* +** Create a new VFS file descriptor (stored in memory obtained from +** sqlite3_malloc) and open the file named "path" in the file descriptor. +** +** The caller is responsible not only for closing the file descriptor +** but also for freeing the memory associated with the file descriptor. +*/ +static int proxyCreateUnixFile( + const char *path, /* path for the new unixFile */ + unixFile **ppFile, /* unixFile created and returned by ref */ + int islockfile /* if non zero missing dirs will be created */ +) { + int fd = -1; + unixFile *pNew; + int rc = SQLITE_OK; + int openFlags = O_RDWR | O_CREAT; + sqlite3_vfs dummyVfs; + int terrno = 0; + UnixUnusedFd *pUnused = NULL; + + /* 1. first try to open/create the file + ** 2. if that fails, and this is a lock file (not-conch), try creating + ** the parent directories and then try again. + ** 3. if that fails, try to open the file read-only + ** otherwise return BUSY (if lock file) or CANTOPEN for the conch file + */ + pUnused = findReusableFd(path, openFlags); + if( pUnused ){ + fd = pUnused->fd; + }else{ + pUnused = sqlite3_malloc64(sizeof(*pUnused)); + if( !pUnused ){ + return SQLITE_NOMEM_BKPT; + } + } + if( fd<0 ){ + fd = robust_open(path, openFlags, 0); + terrno = errno; + if( fd<0 && errno==ENOENT && islockfile ){ + if( proxyCreateLockPath(path) == SQLITE_OK ){ + fd = robust_open(path, openFlags, 0); + } + } + } + if( fd<0 ){ + openFlags = O_RDONLY; + fd = robust_open(path, openFlags, 0); + terrno = errno; + } + if( fd<0 ){ + if( islockfile ){ + return SQLITE_BUSY; + } + switch (terrno) { + case EACCES: + return SQLITE_PERM; + case EIO: + return SQLITE_IOERR_LOCK; /* even though it is the conch */ + default: + return SQLITE_CANTOPEN_BKPT; + } + } + + pNew = (unixFile *)sqlite3_malloc64(sizeof(*pNew)); + if( pNew==NULL ){ + rc = SQLITE_NOMEM_BKPT; + goto end_create_proxy; + } + memset(pNew, 0, sizeof(unixFile)); + pNew->openFlags = openFlags; + memset(&dummyVfs, 0, sizeof(dummyVfs)); + dummyVfs.pAppData = (void*)&autolockIoFinder; + dummyVfs.zName = "dummy"; + pUnused->fd = fd; + pUnused->flags = openFlags; + pNew->pUnused = pUnused; + + rc = fillInUnixFile(&dummyVfs, fd, (sqlite3_file*)pNew, path, 0); + if( rc==SQLITE_OK ){ + *ppFile = pNew; + return SQLITE_OK; + } +end_create_proxy: + robust_close(pNew, fd, __LINE__); + sqlite3_free(pNew); + sqlite3_free(pUnused); + return rc; +} + +#ifdef SQLITE_TEST +/* simulate multiple hosts by creating unique hostid file paths */ +SQLITE_API int sqlite3_hostid_num = 0; +#endif + +#define PROXY_HOSTIDLEN 16 /* conch file host id length */ + +#ifdef HAVE_GETHOSTUUID +/* Not always defined in the headers as it ought to be */ +extern int gethostuuid(uuid_t id, const struct timespec *wait); +#endif + +/* get the host ID via gethostuuid(), pHostID must point to PROXY_HOSTIDLEN +** bytes of writable memory. +*/ +static int proxyGetHostID(unsigned char *pHostID, int *pError){ + assert(PROXY_HOSTIDLEN == sizeof(uuid_t)); + memset(pHostID, 0, PROXY_HOSTIDLEN); +#ifdef HAVE_GETHOSTUUID + { + struct timespec timeout = {1, 0}; /* 1 sec timeout */ + if( gethostuuid(pHostID, &timeout) ){ + int err = errno; + if( pError ){ + *pError = err; + } + return SQLITE_IOERR; + } + } +#else + UNUSED_PARAMETER(pError); +#endif +#ifdef SQLITE_TEST + /* simulate multiple hosts by creating unique hostid file paths */ + if( sqlite3_hostid_num != 0){ + pHostID[0] = (char)(pHostID[0] + (char)(sqlite3_hostid_num & 0xFF)); + } +#endif + + return SQLITE_OK; +} + +/* The conch file contains the header, host id and lock file path + */ +#define PROXY_CONCHVERSION 2 /* 1-byte header, 16-byte host id, path */ +#define PROXY_HEADERLEN 1 /* conch file header length */ +#define PROXY_PATHINDEX (PROXY_HEADERLEN+PROXY_HOSTIDLEN) +#define PROXY_MAXCONCHLEN (PROXY_HEADERLEN+PROXY_HOSTIDLEN+MAXPATHLEN) + +/* +** Takes an open conch file, copies the contents to a new path and then moves +** it back. The newly created file's file descriptor is assigned to the +** conch file structure and finally the original conch file descriptor is +** closed. Returns zero if successful. +*/ +static int proxyBreakConchLock(unixFile *pFile, uuid_t myHostID){ + proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; + unixFile *conchFile = pCtx->conchFile; + char tPath[MAXPATHLEN]; + char buf[PROXY_MAXCONCHLEN]; + char *cPath = pCtx->conchFilePath; + size_t readLen = 0; + size_t pathLen = 0; + char errmsg[64] = ""; + int fd = -1; + int rc = -1; + UNUSED_PARAMETER(myHostID); + + /* create a new path by replace the trailing '-conch' with '-break' */ + pathLen = strlcpy(tPath, cPath, MAXPATHLEN); + if( pathLen>MAXPATHLEN || pathLen<6 || + (strlcpy(&tPath[pathLen-5], "break", 6) != 5) ){ + sqlite3_snprintf(sizeof(errmsg),errmsg,"path error (len %d)",(int)pathLen); + goto end_breaklock; + } + /* read the conch content */ + readLen = osPread(conchFile->h, buf, PROXY_MAXCONCHLEN, 0); + if( readLenh, __LINE__); + conchFile->h = fd; + conchFile->openFlags = O_RDWR | O_CREAT; + +end_breaklock: + if( rc ){ + if( fd>=0 ){ + osUnlink(tPath); + robust_close(pFile, fd, __LINE__); + } + fprintf(stderr, "failed to break stale lock on %s, %s\n", cPath, errmsg); + } + return rc; +} + +/* Take the requested lock on the conch file and break a stale lock if the +** host id matches. +*/ +static int proxyConchLock(unixFile *pFile, uuid_t myHostID, int lockType){ + proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; + unixFile *conchFile = pCtx->conchFile; + int rc = SQLITE_OK; + int nTries = 0; + struct timespec conchModTime; + + memset(&conchModTime, 0, sizeof(conchModTime)); + do { + rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, lockType); + nTries ++; + if( rc==SQLITE_BUSY ){ + /* If the lock failed (busy): + * 1st try: get the mod time of the conch, wait 0.5s and try again. + * 2nd try: fail if the mod time changed or host id is different, wait + * 10 sec and try again + * 3rd try: break the lock unless the mod time has changed. + */ + struct stat buf; + if( osFstat(conchFile->h, &buf) ){ + storeLastErrno(pFile, errno); + return SQLITE_IOERR_LOCK; + } + + if( nTries==1 ){ + conchModTime = buf.st_mtimespec; + usleep(500000); /* wait 0.5 sec and try the lock again*/ + continue; + } + + assert( nTries>1 ); + if( conchModTime.tv_sec != buf.st_mtimespec.tv_sec || + conchModTime.tv_nsec != buf.st_mtimespec.tv_nsec ){ + return SQLITE_BUSY; + } + + if( nTries==2 ){ + char tBuf[PROXY_MAXCONCHLEN]; + int len = osPread(conchFile->h, tBuf, PROXY_MAXCONCHLEN, 0); + if( len<0 ){ + storeLastErrno(pFile, errno); + return SQLITE_IOERR_LOCK; + } + if( len>PROXY_PATHINDEX && tBuf[0]==(char)PROXY_CONCHVERSION){ + /* don't break the lock if the host id doesn't match */ + if( 0!=memcmp(&tBuf[PROXY_HEADERLEN], myHostID, PROXY_HOSTIDLEN) ){ + return SQLITE_BUSY; + } + }else{ + /* don't break the lock on short read or a version mismatch */ + return SQLITE_BUSY; + } + usleep(10000000); /* wait 10 sec and try the lock again */ + continue; + } + + assert( nTries==3 ); + if( 0==proxyBreakConchLock(pFile, myHostID) ){ + rc = SQLITE_OK; + if( lockType==EXCLUSIVE_LOCK ){ + rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, SHARED_LOCK); + } + if( !rc ){ + rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, lockType); + } + } + } + } while( rc==SQLITE_BUSY && nTries<3 ); + + return rc; +} + +/* Takes the conch by taking a shared lock and read the contents conch, if +** lockPath is non-NULL, the host ID and lock file path must match. A NULL +** lockPath means that the lockPath in the conch file will be used if the +** host IDs match, or a new lock path will be generated automatically +** and written to the conch file. +*/ +static int proxyTakeConch(unixFile *pFile){ + proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; + + if( pCtx->conchHeld!=0 ){ + return SQLITE_OK; + }else{ + unixFile *conchFile = pCtx->conchFile; + uuid_t myHostID; + int pError = 0; + char readBuf[PROXY_MAXCONCHLEN]; + char lockPath[MAXPATHLEN]; + char *tempLockPath = NULL; + int rc = SQLITE_OK; + int createConch = 0; + int hostIdMatch = 0; + int readLen = 0; + int tryOldLockPath = 0; + int forceNewLockPath = 0; + + OSTRACE(("TAKECONCH %d for %s pid=%d\n", conchFile->h, + (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"), + osGetpid(0))); + + rc = proxyGetHostID(myHostID, &pError); + if( (rc&0xff)==SQLITE_IOERR ){ + storeLastErrno(pFile, pError); + goto end_takeconch; + } + rc = proxyConchLock(pFile, myHostID, SHARED_LOCK); + if( rc!=SQLITE_OK ){ + goto end_takeconch; + } + /* read the existing conch file */ + readLen = seekAndRead((unixFile*)conchFile, 0, readBuf, PROXY_MAXCONCHLEN); + if( readLen<0 ){ + /* I/O error: lastErrno set by seekAndRead */ + storeLastErrno(pFile, conchFile->lastErrno); + rc = SQLITE_IOERR_READ; + goto end_takeconch; + }else if( readLen<=(PROXY_HEADERLEN+PROXY_HOSTIDLEN) || + readBuf[0]!=(char)PROXY_CONCHVERSION ){ + /* a short read or version format mismatch means we need to create a new + ** conch file. + */ + createConch = 1; + } + /* if the host id matches and the lock path already exists in the conch + ** we'll try to use the path there, if we can't open that path, we'll + ** retry with a new auto-generated path + */ + do { /* in case we need to try again for an :auto: named lock file */ + + if( !createConch && !forceNewLockPath ){ + hostIdMatch = !memcmp(&readBuf[PROXY_HEADERLEN], myHostID, + PROXY_HOSTIDLEN); + /* if the conch has data compare the contents */ + if( !pCtx->lockProxyPath ){ + /* for auto-named local lock file, just check the host ID and we'll + ** use the local lock file path that's already in there + */ + if( hostIdMatch ){ + size_t pathLen = (readLen - PROXY_PATHINDEX); + + if( pathLen>=MAXPATHLEN ){ + pathLen=MAXPATHLEN-1; + } + memcpy(lockPath, &readBuf[PROXY_PATHINDEX], pathLen); + lockPath[pathLen] = 0; + tempLockPath = lockPath; + tryOldLockPath = 1; + /* create a copy of the lock path if the conch is taken */ + goto end_takeconch; + } + }else if( hostIdMatch + && !strncmp(pCtx->lockProxyPath, &readBuf[PROXY_PATHINDEX], + readLen-PROXY_PATHINDEX) + ){ + /* conch host and lock path match */ + goto end_takeconch; + } + } + + /* if the conch isn't writable and doesn't match, we can't take it */ + if( (conchFile->openFlags&O_RDWR) == 0 ){ + rc = SQLITE_BUSY; + goto end_takeconch; + } + + /* either the conch didn't match or we need to create a new one */ + if( !pCtx->lockProxyPath ){ + proxyGetLockPath(pCtx->dbPath, lockPath, MAXPATHLEN); + tempLockPath = lockPath; + /* create a copy of the lock path _only_ if the conch is taken */ + } + + /* update conch with host and path (this will fail if other process + ** has a shared lock already), if the host id matches, use the big + ** stick. + */ + futimes(conchFile->h, NULL); + if( hostIdMatch && !createConch ){ + if( conchFile->pInode && conchFile->pInode->nShared>1 ){ + /* We are trying for an exclusive lock but another thread in this + ** same process is still holding a shared lock. */ + rc = SQLITE_BUSY; + } else { + rc = proxyConchLock(pFile, myHostID, EXCLUSIVE_LOCK); + } + }else{ + rc = proxyConchLock(pFile, myHostID, EXCLUSIVE_LOCK); + } + if( rc==SQLITE_OK ){ + char writeBuffer[PROXY_MAXCONCHLEN]; + int writeSize = 0; + + writeBuffer[0] = (char)PROXY_CONCHVERSION; + memcpy(&writeBuffer[PROXY_HEADERLEN], myHostID, PROXY_HOSTIDLEN); + if( pCtx->lockProxyPath!=NULL ){ + strlcpy(&writeBuffer[PROXY_PATHINDEX], pCtx->lockProxyPath, + MAXPATHLEN); + }else{ + strlcpy(&writeBuffer[PROXY_PATHINDEX], tempLockPath, MAXPATHLEN); + } + writeSize = PROXY_PATHINDEX + strlen(&writeBuffer[PROXY_PATHINDEX]); + robust_ftruncate(conchFile->h, writeSize); + rc = unixWrite((sqlite3_file *)conchFile, writeBuffer, writeSize, 0); + full_fsync(conchFile->h,0,0); + /* If we created a new conch file (not just updated the contents of a + ** valid conch file), try to match the permissions of the database + */ + if( rc==SQLITE_OK && createConch ){ + struct stat buf; + int err = osFstat(pFile->h, &buf); + if( err==0 ){ + mode_t cmode = buf.st_mode&(S_IRUSR|S_IWUSR | S_IRGRP|S_IWGRP | + S_IROTH|S_IWOTH); + /* try to match the database file R/W permissions, ignore failure */ +#ifndef SQLITE_PROXY_DEBUG + osFchmod(conchFile->h, cmode); +#else + do{ + rc = osFchmod(conchFile->h, cmode); + }while( rc==(-1) && errno==EINTR ); + if( rc!=0 ){ + int code = errno; + fprintf(stderr, "fchmod %o FAILED with %d %s\n", + cmode, code, strerror(code)); + } else { + fprintf(stderr, "fchmod %o SUCCEDED\n",cmode); + } + }else{ + int code = errno; + fprintf(stderr, "STAT FAILED[%d] with %d %s\n", + err, code, strerror(code)); +#endif + } + } + } + conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, SHARED_LOCK); + + end_takeconch: + OSTRACE(("TRANSPROXY: CLOSE %d\n", pFile->h)); + if( rc==SQLITE_OK && pFile->openFlags ){ + int fd; + if( pFile->h>=0 ){ + robust_close(pFile, pFile->h, __LINE__); + } + pFile->h = -1; + fd = robust_open(pCtx->dbPath, pFile->openFlags, 0); + OSTRACE(("TRANSPROXY: OPEN %d\n", fd)); + if( fd>=0 ){ + pFile->h = fd; + }else{ + rc=SQLITE_CANTOPEN_BKPT; /* SQLITE_BUSY? proxyTakeConch called + during locking */ + } + } + if( rc==SQLITE_OK && !pCtx->lockProxy ){ + char *path = tempLockPath ? tempLockPath : pCtx->lockProxyPath; + rc = proxyCreateUnixFile(path, &pCtx->lockProxy, 1); + if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM && tryOldLockPath ){ + /* we couldn't create the proxy lock file with the old lock file path + ** so try again via auto-naming + */ + forceNewLockPath = 1; + tryOldLockPath = 0; + continue; /* go back to the do {} while start point, try again */ + } + } + if( rc==SQLITE_OK ){ + /* Need to make a copy of path if we extracted the value + ** from the conch file or the path was allocated on the stack + */ + if( tempLockPath ){ + pCtx->lockProxyPath = sqlite3DbStrDup(0, tempLockPath); + if( !pCtx->lockProxyPath ){ + rc = SQLITE_NOMEM_BKPT; + } + } + } + if( rc==SQLITE_OK ){ + pCtx->conchHeld = 1; + + if( pCtx->lockProxy->pMethod == &afpIoMethods ){ + afpLockingContext *afpCtx; + afpCtx = (afpLockingContext *)pCtx->lockProxy->lockingContext; + afpCtx->dbPath = pCtx->lockProxyPath; + } + } else { + conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK); + } + OSTRACE(("TAKECONCH %d %s\n", conchFile->h, + rc==SQLITE_OK?"ok":"failed")); + return rc; + } while (1); /* in case we need to retry the :auto: lock file - + ** we should never get here except via the 'continue' call. */ + } +} + +/* +** If pFile holds a lock on a conch file, then release that lock. +*/ +static int proxyReleaseConch(unixFile *pFile){ + int rc = SQLITE_OK; /* Subroutine return code */ + proxyLockingContext *pCtx; /* The locking context for the proxy lock */ + unixFile *conchFile; /* Name of the conch file */ + + pCtx = (proxyLockingContext *)pFile->lockingContext; + conchFile = pCtx->conchFile; + OSTRACE(("RELEASECONCH %d for %s pid=%d\n", conchFile->h, + (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"), + osGetpid(0))); + if( pCtx->conchHeld>0 ){ + rc = conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK); + } + pCtx->conchHeld = 0; + OSTRACE(("RELEASECONCH %d %s\n", conchFile->h, + (rc==SQLITE_OK ? "ok" : "failed"))); + return rc; +} + +/* +** Given the name of a database file, compute the name of its conch file. +** Store the conch filename in memory obtained from sqlite3_malloc64(). +** Make *pConchPath point to the new name. Return SQLITE_OK on success +** or SQLITE_NOMEM if unable to obtain memory. +** +** The caller is responsible for ensuring that the allocated memory +** space is eventually freed. +** +** *pConchPath is set to NULL if a memory allocation error occurs. +*/ +static int proxyCreateConchPathname(char *dbPath, char **pConchPath){ + int i; /* Loop counter */ + int len = (int)strlen(dbPath); /* Length of database filename - dbPath */ + char *conchPath; /* buffer in which to construct conch name */ + + /* Allocate space for the conch filename and initialize the name to + ** the name of the original database file. */ + *pConchPath = conchPath = (char *)sqlite3_malloc64(len + 8); + if( conchPath==0 ){ + return SQLITE_NOMEM_BKPT; + } + memcpy(conchPath, dbPath, len+1); + + /* now insert a "." before the last / character */ + for( i=(len-1); i>=0; i-- ){ + if( conchPath[i]=='/' ){ + i++; + break; + } + } + conchPath[i]='.'; + while ( ilockingContext; + char *oldPath = pCtx->lockProxyPath; + int rc = SQLITE_OK; + + if( pFile->eFileLock!=NO_LOCK ){ + return SQLITE_BUSY; + } + + /* nothing to do if the path is NULL, :auto: or matches the existing path */ + if( !path || path[0]=='\0' || !strcmp(path, ":auto:") || + (oldPath && !strncmp(oldPath, path, MAXPATHLEN)) ){ + return SQLITE_OK; + }else{ + unixFile *lockProxy = pCtx->lockProxy; + pCtx->lockProxy=NULL; + pCtx->conchHeld = 0; + if( lockProxy!=NULL ){ + rc=lockProxy->pMethod->xClose((sqlite3_file *)lockProxy); + if( rc ) return rc; + sqlite3_free(lockProxy); + } + sqlite3_free(oldPath); + pCtx->lockProxyPath = sqlite3DbStrDup(0, path); + } + + return rc; +} + +/* +** pFile is a file that has been opened by a prior xOpen call. dbPath +** is a string buffer at least MAXPATHLEN+1 characters in size. +** +** This routine find the filename associated with pFile and writes it +** int dbPath. +*/ +static int proxyGetDbPathForUnixFile(unixFile *pFile, char *dbPath){ +#if defined(__APPLE__) + if( pFile->pMethod == &afpIoMethods ){ + /* afp style keeps a reference to the db path in the filePath field + ** of the struct */ + assert( (int)strlen((char*)pFile->lockingContext)<=MAXPATHLEN ); + strlcpy(dbPath, ((afpLockingContext *)pFile->lockingContext)->dbPath, + MAXPATHLEN); + } else +#endif + if( pFile->pMethod == &dotlockIoMethods ){ + /* dot lock style uses the locking context to store the dot lock + ** file path */ + int len = strlen((char *)pFile->lockingContext) - strlen(DOTLOCK_SUFFIX); + memcpy(dbPath, (char *)pFile->lockingContext, len + 1); + }else{ + /* all other styles use the locking context to store the db file path */ + assert( strlen((char*)pFile->lockingContext)<=MAXPATHLEN ); + strlcpy(dbPath, (char *)pFile->lockingContext, MAXPATHLEN); + } + return SQLITE_OK; +} + +/* +** Takes an already filled in unix file and alters it so all file locking +** will be performed on the local proxy lock file. The following fields +** are preserved in the locking context so that they can be restored and +** the unix structure properly cleaned up at close time: +** ->lockingContext +** ->pMethod +*/ +static int proxyTransformUnixFile(unixFile *pFile, const char *path) { + proxyLockingContext *pCtx; + char dbPath[MAXPATHLEN+1]; /* Name of the database file */ + char *lockPath=NULL; + int rc = SQLITE_OK; + + if( pFile->eFileLock!=NO_LOCK ){ + return SQLITE_BUSY; + } + proxyGetDbPathForUnixFile(pFile, dbPath); + if( !path || path[0]=='\0' || !strcmp(path, ":auto:") ){ + lockPath=NULL; + }else{ + lockPath=(char *)path; + } + + OSTRACE(("TRANSPROXY %d for %s pid=%d\n", pFile->h, + (lockPath ? lockPath : ":auto:"), osGetpid(0))); + + pCtx = sqlite3_malloc64( sizeof(*pCtx) ); + if( pCtx==0 ){ + return SQLITE_NOMEM_BKPT; + } + memset(pCtx, 0, sizeof(*pCtx)); + + rc = proxyCreateConchPathname(dbPath, &pCtx->conchFilePath); + if( rc==SQLITE_OK ){ + rc = proxyCreateUnixFile(pCtx->conchFilePath, &pCtx->conchFile, 0); + if( rc==SQLITE_CANTOPEN && ((pFile->openFlags&O_RDWR) == 0) ){ + /* if (a) the open flags are not O_RDWR, (b) the conch isn't there, and + ** (c) the file system is read-only, then enable no-locking access. + ** Ugh, since O_RDONLY==0x0000 we test for !O_RDWR since unixOpen asserts + ** that openFlags will have only one of O_RDONLY or O_RDWR. + */ + struct statfs fsInfo; + struct stat conchInfo; + int goLockless = 0; + + if( osStat(pCtx->conchFilePath, &conchInfo) == -1 ) { + int err = errno; + if( (err==ENOENT) && (statfs(dbPath, &fsInfo) != -1) ){ + goLockless = (fsInfo.f_flags&MNT_RDONLY) == MNT_RDONLY; + } + } + if( goLockless ){ + pCtx->conchHeld = -1; /* read only FS/ lockless */ + rc = SQLITE_OK; + } + } + } + if( rc==SQLITE_OK && lockPath ){ + pCtx->lockProxyPath = sqlite3DbStrDup(0, lockPath); + } + + if( rc==SQLITE_OK ){ + pCtx->dbPath = sqlite3DbStrDup(0, dbPath); + if( pCtx->dbPath==NULL ){ + rc = SQLITE_NOMEM_BKPT; + } + } + if( rc==SQLITE_OK ){ + /* all memory is allocated, proxys are created and assigned, + ** switch the locking context and pMethod then return. + */ + pCtx->oldLockingContext = pFile->lockingContext; + pFile->lockingContext = pCtx; + pCtx->pOldMethod = pFile->pMethod; + pFile->pMethod = &proxyIoMethods; + }else{ + if( pCtx->conchFile ){ + pCtx->conchFile->pMethod->xClose((sqlite3_file *)pCtx->conchFile); + sqlite3_free(pCtx->conchFile); + } + sqlite3DbFree(0, pCtx->lockProxyPath); + sqlite3_free(pCtx->conchFilePath); + sqlite3_free(pCtx); + } + OSTRACE(("TRANSPROXY %d %s\n", pFile->h, + (rc==SQLITE_OK ? "ok" : "failed"))); + return rc; +} + + +/* +** This routine handles sqlite3_file_control() calls that are specific +** to proxy locking. +*/ +static int proxyFileControl(sqlite3_file *id, int op, void *pArg){ + switch( op ){ + case SQLITE_FCNTL_GET_LOCKPROXYFILE: { + unixFile *pFile = (unixFile*)id; + if( pFile->pMethod == &proxyIoMethods ){ + proxyLockingContext *pCtx = (proxyLockingContext*)pFile->lockingContext; + proxyTakeConch(pFile); + if( pCtx->lockProxyPath ){ + *(const char **)pArg = pCtx->lockProxyPath; + }else{ + *(const char **)pArg = ":auto: (not held)"; + } + } else { + *(const char **)pArg = NULL; + } + return SQLITE_OK; + } + case SQLITE_FCNTL_SET_LOCKPROXYFILE: { + unixFile *pFile = (unixFile*)id; + int rc = SQLITE_OK; + int isProxyStyle = (pFile->pMethod == &proxyIoMethods); + if( pArg==NULL || (const char *)pArg==0 ){ + if( isProxyStyle ){ + /* turn off proxy locking - not supported. If support is added for + ** switching proxy locking mode off then it will need to fail if + ** the journal mode is WAL mode. + */ + rc = SQLITE_ERROR /*SQLITE_PROTOCOL? SQLITE_MISUSE?*/; + }else{ + /* turn off proxy locking - already off - NOOP */ + rc = SQLITE_OK; + } + }else{ + const char *proxyPath = (const char *)pArg; + if( isProxyStyle ){ + proxyLockingContext *pCtx = + (proxyLockingContext*)pFile->lockingContext; + if( !strcmp(pArg, ":auto:") + || (pCtx->lockProxyPath && + !strncmp(pCtx->lockProxyPath, proxyPath, MAXPATHLEN)) + ){ + rc = SQLITE_OK; + }else{ + rc = switchLockProxyPath(pFile, proxyPath); + } + }else{ + /* turn on proxy file locking */ + rc = proxyTransformUnixFile(pFile, proxyPath); + } + } + return rc; + } + default: { + assert( 0 ); /* The call assures that only valid opcodes are sent */ + } + } + /*NOTREACHED*/ + return SQLITE_ERROR; +} + +/* +** Within this division (the proxying locking implementation) the procedures +** above this point are all utilities. The lock-related methods of the +** proxy-locking sqlite3_io_method object follow. +*/ + + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, set *pResOut +** to a non-zero value otherwise *pResOut is set to zero. The return value +** is set to SQLITE_OK unless an I/O error occurs during lock checking. +*/ +static int proxyCheckReservedLock(sqlite3_file *id, int *pResOut) { + unixFile *pFile = (unixFile*)id; + int rc = proxyTakeConch(pFile); + if( rc==SQLITE_OK ){ + proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; + if( pCtx->conchHeld>0 ){ + unixFile *proxy = pCtx->lockProxy; + return proxy->pMethod->xCheckReservedLock((sqlite3_file*)proxy, pResOut); + }else{ /* conchHeld < 0 is lockless */ + pResOut=0; + } + } + return rc; +} + +/* +** Lock the file with the lock specified by parameter eFileLock - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** This routine will only increase a lock. Use the sqlite3OsUnlock() +** routine to lower a locking level. +*/ +static int proxyLock(sqlite3_file *id, int eFileLock) { + unixFile *pFile = (unixFile*)id; + int rc = proxyTakeConch(pFile); + if( rc==SQLITE_OK ){ + proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; + if( pCtx->conchHeld>0 ){ + unixFile *proxy = pCtx->lockProxy; + rc = proxy->pMethod->xLock((sqlite3_file*)proxy, eFileLock); + pFile->eFileLock = proxy->eFileLock; + }else{ + /* conchHeld < 0 is lockless */ + } + } + return rc; +} + + +/* +** Lower the locking level on file descriptor pFile to eFileLock. eFileLock +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +*/ +static int proxyUnlock(sqlite3_file *id, int eFileLock) { + unixFile *pFile = (unixFile*)id; + int rc = proxyTakeConch(pFile); + if( rc==SQLITE_OK ){ + proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; + if( pCtx->conchHeld>0 ){ + unixFile *proxy = pCtx->lockProxy; + rc = proxy->pMethod->xUnlock((sqlite3_file*)proxy, eFileLock); + pFile->eFileLock = proxy->eFileLock; + }else{ + /* conchHeld < 0 is lockless */ + } + } + return rc; +} + +/* +** Close a file that uses proxy locks. +*/ +static int proxyClose(sqlite3_file *id) { + if( ALWAYS(id) ){ + unixFile *pFile = (unixFile*)id; + proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; + unixFile *lockProxy = pCtx->lockProxy; + unixFile *conchFile = pCtx->conchFile; + int rc = SQLITE_OK; + + if( lockProxy ){ + rc = lockProxy->pMethod->xUnlock((sqlite3_file*)lockProxy, NO_LOCK); + if( rc ) return rc; + rc = lockProxy->pMethod->xClose((sqlite3_file*)lockProxy); + if( rc ) return rc; + sqlite3_free(lockProxy); + pCtx->lockProxy = 0; + } + if( conchFile ){ + if( pCtx->conchHeld ){ + rc = proxyReleaseConch(pFile); + if( rc ) return rc; + } + rc = conchFile->pMethod->xClose((sqlite3_file*)conchFile); + if( rc ) return rc; + sqlite3_free(conchFile); + } + sqlite3DbFree(0, pCtx->lockProxyPath); + sqlite3_free(pCtx->conchFilePath); + sqlite3DbFree(0, pCtx->dbPath); + /* restore the original locking context and pMethod then close it */ + pFile->lockingContext = pCtx->oldLockingContext; + pFile->pMethod = pCtx->pOldMethod; + sqlite3_free(pCtx); + return pFile->pMethod->xClose(id); + } + return SQLITE_OK; +} + + + +#endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ +/* +** The proxy locking style is intended for use with AFP filesystems. +** And since AFP is only supported on MacOSX, the proxy locking is also +** restricted to MacOSX. +** +** +******************* End of the proxy lock implementation ********************** +******************************************************************************/ + +/* +** Initialize the operating system interface. +** +** This routine registers all VFS implementations for unix-like operating +** systems. This routine, and the sqlite3_os_end() routine that follows, +** should be the only routines in this file that are visible from other +** files. +** +** This routine is called once during SQLite initialization and by a +** single thread. The memory allocation and mutex subsystems have not +** necessarily been initialized when this routine is called, and so they +** should not be used. +*/ +SQLITE_API int sqlite3_os_init(void){ + /* + ** The following macro defines an initializer for an sqlite3_vfs object. + ** The name of the VFS is NAME. The pAppData is a pointer to a pointer + ** to the "finder" function. (pAppData is a pointer to a pointer because + ** silly C90 rules prohibit a void* from being cast to a function pointer + ** and so we have to go through the intermediate pointer to avoid problems + ** when compiling with -pedantic-errors on GCC.) + ** + ** The FINDER parameter to this macro is the name of the pointer to the + ** finder-function. The finder-function returns a pointer to the + ** sqlite_io_methods object that implements the desired locking + ** behaviors. See the division above that contains the IOMETHODS + ** macro for addition information on finder-functions. + ** + ** Most finders simply return a pointer to a fixed sqlite3_io_methods + ** object. But the "autolockIoFinder" available on MacOSX does a little + ** more than that; it looks at the filesystem type that hosts the + ** database file and tries to choose an locking method appropriate for + ** that filesystem time. + */ + #define UNIXVFS(VFSNAME, FINDER) { \ + 3, /* iVersion */ \ + sizeof(unixFile), /* szOsFile */ \ + MAX_PATHNAME, /* mxPathname */ \ + 0, /* pNext */ \ + VFSNAME, /* zName */ \ + (void*)&FINDER, /* pAppData */ \ + unixOpen, /* xOpen */ \ + unixDelete, /* xDelete */ \ + unixAccess, /* xAccess */ \ + unixFullPathname, /* xFullPathname */ \ + unixDlOpen, /* xDlOpen */ \ + unixDlError, /* xDlError */ \ + unixDlSym, /* xDlSym */ \ + unixDlClose, /* xDlClose */ \ + unixRandomness, /* xRandomness */ \ + unixSleep, /* xSleep */ \ + unixCurrentTime, /* xCurrentTime */ \ + unixGetLastError, /* xGetLastError */ \ + unixCurrentTimeInt64, /* xCurrentTimeInt64 */ \ + unixSetSystemCall, /* xSetSystemCall */ \ + unixGetSystemCall, /* xGetSystemCall */ \ + unixNextSystemCall, /* xNextSystemCall */ \ + } + + /* + ** All default VFSes for unix are contained in the following array. + ** + ** Note that the sqlite3_vfs.pNext field of the VFS object is modified + ** by the SQLite core when the VFS is registered. So the following + ** array cannot be const. + */ + static sqlite3_vfs aVfs[] = { +#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) + UNIXVFS("unix", autolockIoFinder ), +#elif OS_VXWORKS + UNIXVFS("unix", vxworksIoFinder ), +#else + UNIXVFS("unix", posixIoFinder ), +#endif + UNIXVFS("unix-none", nolockIoFinder ), + UNIXVFS("unix-dotfile", dotlockIoFinder ), + UNIXVFS("unix-excl", posixIoFinder ), +#if OS_VXWORKS + UNIXVFS("unix-namedsem", semIoFinder ), +#endif +#if SQLITE_ENABLE_LOCKING_STYLE || OS_VXWORKS + UNIXVFS("unix-posix", posixIoFinder ), +#endif +#if SQLITE_ENABLE_LOCKING_STYLE + UNIXVFS("unix-flock", flockIoFinder ), +#endif +#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) + UNIXVFS("unix-afp", afpIoFinder ), + UNIXVFS("unix-nfs", nfsIoFinder ), + UNIXVFS("unix-proxy", proxyIoFinder ), +#endif + }; + unsigned int i; /* Loop counter */ + + /* Double-check that the aSyscall[] array has been constructed + ** correctly. See ticket [bb3a86e890c8e96ab] */ + assert( ArraySize(aSyscall)==28 ); + + /* Register all VFSes defined in the aVfs[] array */ + for(i=0; i<(sizeof(aVfs)/sizeof(sqlite3_vfs)); i++){ + sqlite3_vfs_register(&aVfs[i], i==0); + } + return SQLITE_OK; +} + +/* +** Shutdown the operating system interface. +** +** Some operating systems might need to do some cleanup in this routine, +** to release dynamically allocated objects. But not on unix. +** This routine is a no-op for unix. +*/ +SQLITE_API int sqlite3_os_end(void){ + return SQLITE_OK; +} + +#endif /* SQLITE_OS_UNIX */ + +/************** End of os_unix.c *********************************************/ +/************** Begin file os_win.c ******************************************/ +/* +** 2004 May 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains code that is specific to Windows. +*/ +/* #include "sqliteInt.h" */ +#if SQLITE_OS_WIN /* This file is used for Windows only */ + +/* +** Include code that is common to all os_*.c files +*/ +/************** Include os_common.h in the middle of os_win.c ****************/ +/************** Begin file os_common.h ***************************************/ +/* +** 2004 May 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains macros and a little bit of code that is common to +** all of the platform-specific files (os_*.c) and is #included into those +** files. +** +** This file should be #included by the os_*.c files only. It is not a +** general purpose header file. +*/ +#ifndef _OS_COMMON_H_ +#define _OS_COMMON_H_ + +/* +** At least two bugs have slipped in because we changed the MEMORY_DEBUG +** macro to SQLITE_DEBUG and some older makefiles have not yet made the +** switch. The following code should catch this problem at compile-time. +*/ +#ifdef MEMORY_DEBUG +# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." +#endif + +/* +** Macros for performance tracing. Normally turned off. Only works +** on i486 hardware. +*/ +#ifdef SQLITE_PERFORMANCE_TRACE + +/* +** hwtime.h contains inline assembler code for implementing +** high-performance timing routines. +*/ +/************** Include hwtime.h in the middle of os_common.h ****************/ +/************** Begin file hwtime.h ******************************************/ +/* +** 2008 May 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains inline asm code for retrieving "high-performance" +** counters for x86 class CPUs. +*/ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H + +/* +** The following routine only works on pentium-class (or newer) processors. +** It uses the RDTSC opcode to read the cycle count value out of the +** processor and returns that value. This can be used for high-res +** profiling. +*/ +#if (defined(__GNUC__) || defined(_MSC_VER)) && \ + (defined(i386) || defined(__i386__) || defined(_M_IX86)) + + #if defined(__GNUC__) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + + #elif defined(_MSC_VER) + + __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ + __asm { + rdtsc + ret ; return value at EDX:EAX + } + } + + #endif + +#elif (defined(__GNUC__) && defined(__x86_64__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long val; + __asm__ __volatile__ ("rdtsc" : "=A" (val)); + return val; + } + +#elif (defined(__GNUC__) && defined(__ppc__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long long retval; + unsigned long junk; + __asm__ __volatile__ ("\n\ + 1: mftbu %1\n\ + mftb %L0\n\ + mftbu %0\n\ + cmpw %0,%1\n\ + bne 1b" + : "=r" (retval), "=r" (junk)); + return retval; + } + +#else + + #error Need implementation of sqlite3Hwtime() for your platform. + + /* + ** To compile without implementing sqlite3Hwtime() for your platform, + ** you can remove the above #error and use the following + ** stub function. You will lose timing support for many + ** of the debugging and testing utilities, but it should at + ** least compile and run. + */ +SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } + +#endif + +#endif /* !defined(SQLITE_HWTIME_H) */ + +/************** End of hwtime.h **********************************************/ +/************** Continuing where we left off in os_common.h ******************/ + +static sqlite_uint64 g_start; +static sqlite_uint64 g_elapsed; +#define TIMER_START g_start=sqlite3Hwtime() +#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start +#define TIMER_ELAPSED g_elapsed +#else +#define TIMER_START +#define TIMER_END +#define TIMER_ELAPSED ((sqlite_uint64)0) +#endif + +/* +** If we compile with the SQLITE_TEST macro set, then the following block +** of code will give us the ability to simulate a disk I/O error. This +** is used for testing the I/O recovery logic. +*/ +#if defined(SQLITE_TEST) +SQLITE_API extern int sqlite3_io_error_hit; +SQLITE_API extern int sqlite3_io_error_hardhit; +SQLITE_API extern int sqlite3_io_error_pending; +SQLITE_API extern int sqlite3_io_error_persist; +SQLITE_API extern int sqlite3_io_error_benign; +SQLITE_API extern int sqlite3_diskfull_pending; +SQLITE_API extern int sqlite3_diskfull; +#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) +#define SimulateIOError(CODE) \ + if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ + || sqlite3_io_error_pending-- == 1 ) \ + { local_ioerr(); CODE; } +static void local_ioerr(){ + IOTRACE(("IOERR\n")); + sqlite3_io_error_hit++; + if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; +} +#define SimulateDiskfullError(CODE) \ + if( sqlite3_diskfull_pending ){ \ + if( sqlite3_diskfull_pending == 1 ){ \ + local_ioerr(); \ + sqlite3_diskfull = 1; \ + sqlite3_io_error_hit = 1; \ + CODE; \ + }else{ \ + sqlite3_diskfull_pending--; \ + } \ + } +#else +#define SimulateIOErrorBenign(X) +#define SimulateIOError(A) +#define SimulateDiskfullError(A) +#endif /* defined(SQLITE_TEST) */ + +/* +** When testing, keep a count of the number of open files. +*/ +#if defined(SQLITE_TEST) +SQLITE_API extern int sqlite3_open_file_count; +#define OpenCounter(X) sqlite3_open_file_count+=(X) +#else +#define OpenCounter(X) +#endif /* defined(SQLITE_TEST) */ + +#endif /* !defined(_OS_COMMON_H_) */ + +/************** End of os_common.h *******************************************/ +/************** Continuing where we left off in os_win.c *********************/ + +/* +** Include the header file for the Windows VFS. +*/ +/* #include "os_win.h" */ + +/* +** Compiling and using WAL mode requires several APIs that are only +** available in Windows platforms based on the NT kernel. +*/ +#if !SQLITE_OS_WINNT && !defined(SQLITE_OMIT_WAL) +# error "WAL mode requires support from the Windows NT kernel, compile\ + with SQLITE_OMIT_WAL." +#endif + +#if !SQLITE_OS_WINNT && SQLITE_MAX_MMAP_SIZE>0 +# error "Memory mapped files require support from the Windows NT kernel,\ + compile with SQLITE_MAX_MMAP_SIZE=0." +#endif + +/* +** Are most of the Win32 ANSI APIs available (i.e. with certain exceptions +** based on the sub-platform)? +*/ +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(SQLITE_WIN32_NO_ANSI) +# define SQLITE_WIN32_HAS_ANSI +#endif + +/* +** Are most of the Win32 Unicode APIs available (i.e. with certain exceptions +** based on the sub-platform)? +*/ +#if (SQLITE_OS_WINCE || SQLITE_OS_WINNT || SQLITE_OS_WINRT) && \ + !defined(SQLITE_WIN32_NO_WIDE) +# define SQLITE_WIN32_HAS_WIDE +#endif + +/* +** Make sure at least one set of Win32 APIs is available. +*/ +#if !defined(SQLITE_WIN32_HAS_ANSI) && !defined(SQLITE_WIN32_HAS_WIDE) +# error "At least one of SQLITE_WIN32_HAS_ANSI and SQLITE_WIN32_HAS_WIDE\ + must be defined." +#endif + +/* +** Define the required Windows SDK version constants if they are not +** already available. +*/ +#ifndef NTDDI_WIN8 +# define NTDDI_WIN8 0x06020000 +#endif + +#ifndef NTDDI_WINBLUE +# define NTDDI_WINBLUE 0x06030000 +#endif + +#ifndef NTDDI_WINTHRESHOLD +# define NTDDI_WINTHRESHOLD 0x06040000 +#endif + +/* +** Check to see if the GetVersionEx[AW] functions are deprecated on the +** target system. GetVersionEx was first deprecated in Win8.1. +*/ +#ifndef SQLITE_WIN32_GETVERSIONEX +# if defined(NTDDI_VERSION) && NTDDI_VERSION >= NTDDI_WINBLUE +# define SQLITE_WIN32_GETVERSIONEX 0 /* GetVersionEx() is deprecated */ +# else +# define SQLITE_WIN32_GETVERSIONEX 1 /* GetVersionEx() is current */ +# endif +#endif + +/* +** Check to see if the CreateFileMappingA function is supported on the +** target system. It is unavailable when using "mincore.lib" on Win10. +** When compiling for Windows 10, always assume "mincore.lib" is in use. +*/ +#ifndef SQLITE_WIN32_CREATEFILEMAPPINGA +# if defined(NTDDI_VERSION) && NTDDI_VERSION >= NTDDI_WINTHRESHOLD +# define SQLITE_WIN32_CREATEFILEMAPPINGA 0 +# else +# define SQLITE_WIN32_CREATEFILEMAPPINGA 1 +# endif +#endif + +/* +** This constant should already be defined (in the "WinDef.h" SDK file). +*/ +#ifndef MAX_PATH +# define MAX_PATH (260) +#endif + +/* +** Maximum pathname length (in chars) for Win32. This should normally be +** MAX_PATH. +*/ +#ifndef SQLITE_WIN32_MAX_PATH_CHARS +# define SQLITE_WIN32_MAX_PATH_CHARS (MAX_PATH) +#endif + +/* +** This constant should already be defined (in the "WinNT.h" SDK file). +*/ +#ifndef UNICODE_STRING_MAX_CHARS +# define UNICODE_STRING_MAX_CHARS (32767) +#endif + +/* +** Maximum pathname length (in chars) for WinNT. This should normally be +** UNICODE_STRING_MAX_CHARS. +*/ +#ifndef SQLITE_WINNT_MAX_PATH_CHARS +# define SQLITE_WINNT_MAX_PATH_CHARS (UNICODE_STRING_MAX_CHARS) +#endif + +/* +** Maximum pathname length (in bytes) for Win32. The MAX_PATH macro is in +** characters, so we allocate 4 bytes per character assuming worst-case of +** 4-bytes-per-character for UTF8. +*/ +#ifndef SQLITE_WIN32_MAX_PATH_BYTES +# define SQLITE_WIN32_MAX_PATH_BYTES (SQLITE_WIN32_MAX_PATH_CHARS*4) +#endif + +/* +** Maximum pathname length (in bytes) for WinNT. This should normally be +** UNICODE_STRING_MAX_CHARS * sizeof(WCHAR). +*/ +#ifndef SQLITE_WINNT_MAX_PATH_BYTES +# define SQLITE_WINNT_MAX_PATH_BYTES \ + (sizeof(WCHAR) * SQLITE_WINNT_MAX_PATH_CHARS) +#endif + +/* +** Maximum error message length (in chars) for WinRT. +*/ +#ifndef SQLITE_WIN32_MAX_ERRMSG_CHARS +# define SQLITE_WIN32_MAX_ERRMSG_CHARS (1024) +#endif + +/* +** Returns non-zero if the character should be treated as a directory +** separator. +*/ +#ifndef winIsDirSep +# define winIsDirSep(a) (((a) == '/') || ((a) == '\\')) +#endif + +/* +** This macro is used when a local variable is set to a value that is +** [sometimes] not used by the code (e.g. via conditional compilation). +*/ +#ifndef UNUSED_VARIABLE_VALUE +# define UNUSED_VARIABLE_VALUE(x) (void)(x) +#endif + +/* +** Returns the character that should be used as the directory separator. +*/ +#ifndef winGetDirSep +# define winGetDirSep() '\\' +#endif + +/* +** Do we need to manually define the Win32 file mapping APIs for use with WAL +** mode or memory mapped files (e.g. these APIs are available in the Windows +** CE SDK; however, they are not present in the header file)? +*/ +#if SQLITE_WIN32_FILEMAPPING_API && \ + (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) +/* +** Two of the file mapping APIs are different under WinRT. Figure out which +** set we need. +*/ +#if SQLITE_OS_WINRT +WINBASEAPI HANDLE WINAPI CreateFileMappingFromApp(HANDLE, \ + LPSECURITY_ATTRIBUTES, ULONG, ULONG64, LPCWSTR); + +WINBASEAPI LPVOID WINAPI MapViewOfFileFromApp(HANDLE, ULONG, ULONG64, SIZE_T); +#else +#if defined(SQLITE_WIN32_HAS_ANSI) +WINBASEAPI HANDLE WINAPI CreateFileMappingA(HANDLE, LPSECURITY_ATTRIBUTES, \ + DWORD, DWORD, DWORD, LPCSTR); +#endif /* defined(SQLITE_WIN32_HAS_ANSI) */ + +#if defined(SQLITE_WIN32_HAS_WIDE) +WINBASEAPI HANDLE WINAPI CreateFileMappingW(HANDLE, LPSECURITY_ATTRIBUTES, \ + DWORD, DWORD, DWORD, LPCWSTR); +#endif /* defined(SQLITE_WIN32_HAS_WIDE) */ + +WINBASEAPI LPVOID WINAPI MapViewOfFile(HANDLE, DWORD, DWORD, DWORD, SIZE_T); +#endif /* SQLITE_OS_WINRT */ + +/* +** These file mapping APIs are common to both Win32 and WinRT. +*/ + +WINBASEAPI BOOL WINAPI FlushViewOfFile(LPCVOID, SIZE_T); +WINBASEAPI BOOL WINAPI UnmapViewOfFile(LPCVOID); +#endif /* SQLITE_WIN32_FILEMAPPING_API */ + +/* +** Some Microsoft compilers lack this definition. +*/ +#ifndef INVALID_FILE_ATTRIBUTES +# define INVALID_FILE_ATTRIBUTES ((DWORD)-1) +#endif + +#ifndef FILE_FLAG_MASK +# define FILE_FLAG_MASK (0xFF3C0000) +#endif + +#ifndef FILE_ATTRIBUTE_MASK +# define FILE_ATTRIBUTE_MASK (0x0003FFF7) +#endif + +#ifndef SQLITE_OMIT_WAL +/* Forward references to structures used for WAL */ +typedef struct winShm winShm; /* A connection to shared-memory */ +typedef struct winShmNode winShmNode; /* A region of shared-memory */ +#endif + +/* +** WinCE lacks native support for file locking so we have to fake it +** with some code of our own. +*/ +#if SQLITE_OS_WINCE +typedef struct winceLock { + int nReaders; /* Number of reader locks obtained */ + BOOL bPending; /* Indicates a pending lock has been obtained */ + BOOL bReserved; /* Indicates a reserved lock has been obtained */ + BOOL bExclusive; /* Indicates an exclusive lock has been obtained */ +} winceLock; +#endif + +/* +** The winFile structure is a subclass of sqlite3_file* specific to the win32 +** portability layer. +*/ +typedef struct winFile winFile; +struct winFile { + const sqlite3_io_methods *pMethod; /*** Must be first ***/ + sqlite3_vfs *pVfs; /* The VFS used to open this file */ + HANDLE h; /* Handle for accessing the file */ + u8 locktype; /* Type of lock currently held on this file */ + short sharedLockByte; /* Randomly chosen byte used as a shared lock */ + u8 ctrlFlags; /* Flags. See WINFILE_* below */ + DWORD lastErrno; /* The Windows errno from the last I/O error */ +#ifndef SQLITE_OMIT_WAL + winShm *pShm; /* Instance of shared memory on this file */ +#endif + const char *zPath; /* Full pathname of this file */ + int szChunk; /* Chunk size configured by FCNTL_CHUNK_SIZE */ +#if SQLITE_OS_WINCE + LPWSTR zDeleteOnClose; /* Name of file to delete when closing */ + HANDLE hMutex; /* Mutex used to control access to shared lock */ + HANDLE hShared; /* Shared memory segment used for locking */ + winceLock local; /* Locks obtained by this instance of winFile */ + winceLock *shared; /* Global shared lock memory for the file */ +#endif +#if SQLITE_MAX_MMAP_SIZE>0 + int nFetchOut; /* Number of outstanding xFetch references */ + HANDLE hMap; /* Handle for accessing memory mapping */ + void *pMapRegion; /* Area memory mapped */ + sqlite3_int64 mmapSize; /* Usable size of mapped region */ + sqlite3_int64 mmapSizeActual; /* Actual size of mapped region */ + sqlite3_int64 mmapSizeMax; /* Configured FCNTL_MMAP_SIZE value */ +#endif +}; + +/* +** The winVfsAppData structure is used for the pAppData member for all of the +** Win32 VFS variants. +*/ +typedef struct winVfsAppData winVfsAppData; +struct winVfsAppData { + const sqlite3_io_methods *pMethod; /* The file I/O methods to use. */ + void *pAppData; /* The extra pAppData, if any. */ + BOOL bNoLock; /* Non-zero if locking is disabled. */ +}; + +/* +** Allowed values for winFile.ctrlFlags +*/ +#define WINFILE_RDONLY 0x02 /* Connection is read only */ +#define WINFILE_PERSIST_WAL 0x04 /* Persistent WAL mode */ +#define WINFILE_PSOW 0x10 /* SQLITE_IOCAP_POWERSAFE_OVERWRITE */ + +/* + * The size of the buffer used by sqlite3_win32_write_debug(). + */ +#ifndef SQLITE_WIN32_DBG_BUF_SIZE +# define SQLITE_WIN32_DBG_BUF_SIZE ((int)(4096-sizeof(DWORD))) +#endif + +/* + * The value used with sqlite3_win32_set_directory() to specify that + * the data directory should be changed. + */ +#ifndef SQLITE_WIN32_DATA_DIRECTORY_TYPE +# define SQLITE_WIN32_DATA_DIRECTORY_TYPE (1) +#endif + +/* + * The value used with sqlite3_win32_set_directory() to specify that + * the temporary directory should be changed. + */ +#ifndef SQLITE_WIN32_TEMP_DIRECTORY_TYPE +# define SQLITE_WIN32_TEMP_DIRECTORY_TYPE (2) +#endif + +/* + * If compiled with SQLITE_WIN32_MALLOC on Windows, we will use the + * various Win32 API heap functions instead of our own. + */ +#ifdef SQLITE_WIN32_MALLOC + +/* + * If this is non-zero, an isolated heap will be created by the native Win32 + * allocator subsystem; otherwise, the default process heap will be used. This + * setting has no effect when compiling for WinRT. By default, this is enabled + * and an isolated heap will be created to store all allocated data. + * + ****************************************************************************** + * WARNING: It is important to note that when this setting is non-zero and the + * winMemShutdown function is called (e.g. by the sqlite3_shutdown + * function), all data that was allocated using the isolated heap will + * be freed immediately and any attempt to access any of that freed + * data will almost certainly result in an immediate access violation. + ****************************************************************************** + */ +#ifndef SQLITE_WIN32_HEAP_CREATE +# define SQLITE_WIN32_HEAP_CREATE (TRUE) +#endif + +/* + * This is cache size used in the calculation of the initial size of the + * Win32-specific heap. It cannot be negative. + */ +#ifndef SQLITE_WIN32_CACHE_SIZE +# if SQLITE_DEFAULT_CACHE_SIZE>=0 +# define SQLITE_WIN32_CACHE_SIZE (SQLITE_DEFAULT_CACHE_SIZE) +# else +# define SQLITE_WIN32_CACHE_SIZE (-(SQLITE_DEFAULT_CACHE_SIZE)) +# endif +#endif + +/* + * The initial size of the Win32-specific heap. This value may be zero. + */ +#ifndef SQLITE_WIN32_HEAP_INIT_SIZE +# define SQLITE_WIN32_HEAP_INIT_SIZE ((SQLITE_WIN32_CACHE_SIZE) * \ + (SQLITE_DEFAULT_PAGE_SIZE) + 4194304) +#endif + +/* + * The maximum size of the Win32-specific heap. This value may be zero. + */ +#ifndef SQLITE_WIN32_HEAP_MAX_SIZE +# define SQLITE_WIN32_HEAP_MAX_SIZE (0) +#endif + +/* + * The extra flags to use in calls to the Win32 heap APIs. This value may be + * zero for the default behavior. + */ +#ifndef SQLITE_WIN32_HEAP_FLAGS +# define SQLITE_WIN32_HEAP_FLAGS (0) +#endif + + +/* +** The winMemData structure stores information required by the Win32-specific +** sqlite3_mem_methods implementation. +*/ +typedef struct winMemData winMemData; +struct winMemData { +#ifndef NDEBUG + u32 magic1; /* Magic number to detect structure corruption. */ +#endif + HANDLE hHeap; /* The handle to our heap. */ + BOOL bOwned; /* Do we own the heap (i.e. destroy it on shutdown)? */ +#ifndef NDEBUG + u32 magic2; /* Magic number to detect structure corruption. */ +#endif +}; + +#ifndef NDEBUG +#define WINMEM_MAGIC1 0x42b2830b +#define WINMEM_MAGIC2 0xbd4d7cf4 +#endif + +static struct winMemData win_mem_data = { +#ifndef NDEBUG + WINMEM_MAGIC1, +#endif + NULL, FALSE +#ifndef NDEBUG + ,WINMEM_MAGIC2 +#endif +}; + +#ifndef NDEBUG +#define winMemAssertMagic1() assert( win_mem_data.magic1==WINMEM_MAGIC1 ) +#define winMemAssertMagic2() assert( win_mem_data.magic2==WINMEM_MAGIC2 ) +#define winMemAssertMagic() winMemAssertMagic1(); winMemAssertMagic2(); +#else +#define winMemAssertMagic() +#endif + +#define winMemGetDataPtr() &win_mem_data +#define winMemGetHeap() win_mem_data.hHeap +#define winMemGetOwned() win_mem_data.bOwned + +static void *winMemMalloc(int nBytes); +static void winMemFree(void *pPrior); +static void *winMemRealloc(void *pPrior, int nBytes); +static int winMemSize(void *p); +static int winMemRoundup(int n); +static int winMemInit(void *pAppData); +static void winMemShutdown(void *pAppData); + +SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetWin32(void); +#endif /* SQLITE_WIN32_MALLOC */ + +/* +** The following variable is (normally) set once and never changes +** thereafter. It records whether the operating system is Win9x +** or WinNT. +** +** 0: Operating system unknown. +** 1: Operating system is Win9x. +** 2: Operating system is WinNT. +** +** In order to facilitate testing on a WinNT system, the test fixture +** can manually set this value to 1 to emulate Win98 behavior. +*/ +#ifdef SQLITE_TEST +SQLITE_API LONG SQLITE_WIN32_VOLATILE sqlite3_os_type = 0; +#else +static LONG SQLITE_WIN32_VOLATILE sqlite3_os_type = 0; +#endif + +#ifndef SYSCALL +# define SYSCALL sqlite3_syscall_ptr +#endif + +/* +** This function is not available on Windows CE or WinRT. + */ + +#if SQLITE_OS_WINCE || SQLITE_OS_WINRT +# define osAreFileApisANSI() 1 +#endif + +/* +** Many system calls are accessed through pointer-to-functions so that +** they may be overridden at runtime to facilitate fault injection during +** testing and sandboxing. The following array holds the names and pointers +** to all overrideable system calls. +*/ +static struct win_syscall { + const char *zName; /* Name of the system call */ + sqlite3_syscall_ptr pCurrent; /* Current value of the system call */ + sqlite3_syscall_ptr pDefault; /* Default value */ +} aSyscall[] = { +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT + { "AreFileApisANSI", (SYSCALL)AreFileApisANSI, 0 }, +#else + { "AreFileApisANSI", (SYSCALL)0, 0 }, +#endif + +#ifndef osAreFileApisANSI +#define osAreFileApisANSI ((BOOL(WINAPI*)(VOID))aSyscall[0].pCurrent) +#endif + +#if SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_WIDE) + { "CharLowerW", (SYSCALL)CharLowerW, 0 }, +#else + { "CharLowerW", (SYSCALL)0, 0 }, +#endif + +#define osCharLowerW ((LPWSTR(WINAPI*)(LPWSTR))aSyscall[1].pCurrent) + +#if SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_WIDE) + { "CharUpperW", (SYSCALL)CharUpperW, 0 }, +#else + { "CharUpperW", (SYSCALL)0, 0 }, +#endif + +#define osCharUpperW ((LPWSTR(WINAPI*)(LPWSTR))aSyscall[2].pCurrent) + + { "CloseHandle", (SYSCALL)CloseHandle, 0 }, + +#define osCloseHandle ((BOOL(WINAPI*)(HANDLE))aSyscall[3].pCurrent) + +#if defined(SQLITE_WIN32_HAS_ANSI) + { "CreateFileA", (SYSCALL)CreateFileA, 0 }, +#else + { "CreateFileA", (SYSCALL)0, 0 }, +#endif + +#define osCreateFileA ((HANDLE(WINAPI*)(LPCSTR,DWORD,DWORD, \ + LPSECURITY_ATTRIBUTES,DWORD,DWORD,HANDLE))aSyscall[4].pCurrent) + +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) + { "CreateFileW", (SYSCALL)CreateFileW, 0 }, +#else + { "CreateFileW", (SYSCALL)0, 0 }, +#endif + +#define osCreateFileW ((HANDLE(WINAPI*)(LPCWSTR,DWORD,DWORD, \ + LPSECURITY_ATTRIBUTES,DWORD,DWORD,HANDLE))aSyscall[5].pCurrent) + +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_ANSI) && \ + (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) && \ + SQLITE_WIN32_CREATEFILEMAPPINGA + { "CreateFileMappingA", (SYSCALL)CreateFileMappingA, 0 }, +#else + { "CreateFileMappingA", (SYSCALL)0, 0 }, +#endif + +#define osCreateFileMappingA ((HANDLE(WINAPI*)(HANDLE,LPSECURITY_ATTRIBUTES, \ + DWORD,DWORD,DWORD,LPCSTR))aSyscall[6].pCurrent) + +#if SQLITE_OS_WINCE || (!SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \ + (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0)) + { "CreateFileMappingW", (SYSCALL)CreateFileMappingW, 0 }, +#else + { "CreateFileMappingW", (SYSCALL)0, 0 }, +#endif + +#define osCreateFileMappingW ((HANDLE(WINAPI*)(HANDLE,LPSECURITY_ATTRIBUTES, \ + DWORD,DWORD,DWORD,LPCWSTR))aSyscall[7].pCurrent) + +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) + { "CreateMutexW", (SYSCALL)CreateMutexW, 0 }, +#else + { "CreateMutexW", (SYSCALL)0, 0 }, +#endif + +#define osCreateMutexW ((HANDLE(WINAPI*)(LPSECURITY_ATTRIBUTES,BOOL, \ + LPCWSTR))aSyscall[8].pCurrent) + +#if defined(SQLITE_WIN32_HAS_ANSI) + { "DeleteFileA", (SYSCALL)DeleteFileA, 0 }, +#else + { "DeleteFileA", (SYSCALL)0, 0 }, +#endif + +#define osDeleteFileA ((BOOL(WINAPI*)(LPCSTR))aSyscall[9].pCurrent) + +#if defined(SQLITE_WIN32_HAS_WIDE) + { "DeleteFileW", (SYSCALL)DeleteFileW, 0 }, +#else + { "DeleteFileW", (SYSCALL)0, 0 }, +#endif + +#define osDeleteFileW ((BOOL(WINAPI*)(LPCWSTR))aSyscall[10].pCurrent) + +#if SQLITE_OS_WINCE + { "FileTimeToLocalFileTime", (SYSCALL)FileTimeToLocalFileTime, 0 }, +#else + { "FileTimeToLocalFileTime", (SYSCALL)0, 0 }, +#endif + +#define osFileTimeToLocalFileTime ((BOOL(WINAPI*)(CONST FILETIME*, \ + LPFILETIME))aSyscall[11].pCurrent) + +#if SQLITE_OS_WINCE + { "FileTimeToSystemTime", (SYSCALL)FileTimeToSystemTime, 0 }, +#else + { "FileTimeToSystemTime", (SYSCALL)0, 0 }, +#endif + +#define osFileTimeToSystemTime ((BOOL(WINAPI*)(CONST FILETIME*, \ + LPSYSTEMTIME))aSyscall[12].pCurrent) + + { "FlushFileBuffers", (SYSCALL)FlushFileBuffers, 0 }, + +#define osFlushFileBuffers ((BOOL(WINAPI*)(HANDLE))aSyscall[13].pCurrent) + +#if defined(SQLITE_WIN32_HAS_ANSI) + { "FormatMessageA", (SYSCALL)FormatMessageA, 0 }, +#else + { "FormatMessageA", (SYSCALL)0, 0 }, +#endif + +#define osFormatMessageA ((DWORD(WINAPI*)(DWORD,LPCVOID,DWORD,DWORD,LPSTR, \ + DWORD,va_list*))aSyscall[14].pCurrent) + +#if defined(SQLITE_WIN32_HAS_WIDE) + { "FormatMessageW", (SYSCALL)FormatMessageW, 0 }, +#else + { "FormatMessageW", (SYSCALL)0, 0 }, +#endif + +#define osFormatMessageW ((DWORD(WINAPI*)(DWORD,LPCVOID,DWORD,DWORD,LPWSTR, \ + DWORD,va_list*))aSyscall[15].pCurrent) + +#if !defined(SQLITE_OMIT_LOAD_EXTENSION) + { "FreeLibrary", (SYSCALL)FreeLibrary, 0 }, +#else + { "FreeLibrary", (SYSCALL)0, 0 }, +#endif + +#define osFreeLibrary ((BOOL(WINAPI*)(HMODULE))aSyscall[16].pCurrent) + + { "GetCurrentProcessId", (SYSCALL)GetCurrentProcessId, 0 }, + +#define osGetCurrentProcessId ((DWORD(WINAPI*)(VOID))aSyscall[17].pCurrent) + +#if !SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_ANSI) + { "GetDiskFreeSpaceA", (SYSCALL)GetDiskFreeSpaceA, 0 }, +#else + { "GetDiskFreeSpaceA", (SYSCALL)0, 0 }, +#endif + +#define osGetDiskFreeSpaceA ((BOOL(WINAPI*)(LPCSTR,LPDWORD,LPDWORD,LPDWORD, \ + LPDWORD))aSyscall[18].pCurrent) + +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) + { "GetDiskFreeSpaceW", (SYSCALL)GetDiskFreeSpaceW, 0 }, +#else + { "GetDiskFreeSpaceW", (SYSCALL)0, 0 }, +#endif + +#define osGetDiskFreeSpaceW ((BOOL(WINAPI*)(LPCWSTR,LPDWORD,LPDWORD,LPDWORD, \ + LPDWORD))aSyscall[19].pCurrent) + +#if defined(SQLITE_WIN32_HAS_ANSI) + { "GetFileAttributesA", (SYSCALL)GetFileAttributesA, 0 }, +#else + { "GetFileAttributesA", (SYSCALL)0, 0 }, +#endif + +#define osGetFileAttributesA ((DWORD(WINAPI*)(LPCSTR))aSyscall[20].pCurrent) + +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) + { "GetFileAttributesW", (SYSCALL)GetFileAttributesW, 0 }, +#else + { "GetFileAttributesW", (SYSCALL)0, 0 }, +#endif + +#define osGetFileAttributesW ((DWORD(WINAPI*)(LPCWSTR))aSyscall[21].pCurrent) + +#if defined(SQLITE_WIN32_HAS_WIDE) + { "GetFileAttributesExW", (SYSCALL)GetFileAttributesExW, 0 }, +#else + { "GetFileAttributesExW", (SYSCALL)0, 0 }, +#endif + +#define osGetFileAttributesExW ((BOOL(WINAPI*)(LPCWSTR,GET_FILEEX_INFO_LEVELS, \ + LPVOID))aSyscall[22].pCurrent) + +#if !SQLITE_OS_WINRT + { "GetFileSize", (SYSCALL)GetFileSize, 0 }, +#else + { "GetFileSize", (SYSCALL)0, 0 }, +#endif + +#define osGetFileSize ((DWORD(WINAPI*)(HANDLE,LPDWORD))aSyscall[23].pCurrent) + +#if !SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_ANSI) + { "GetFullPathNameA", (SYSCALL)GetFullPathNameA, 0 }, +#else + { "GetFullPathNameA", (SYSCALL)0, 0 }, +#endif + +#define osGetFullPathNameA ((DWORD(WINAPI*)(LPCSTR,DWORD,LPSTR, \ + LPSTR*))aSyscall[24].pCurrent) + +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) + { "GetFullPathNameW", (SYSCALL)GetFullPathNameW, 0 }, +#else + { "GetFullPathNameW", (SYSCALL)0, 0 }, +#endif + +#define osGetFullPathNameW ((DWORD(WINAPI*)(LPCWSTR,DWORD,LPWSTR, \ + LPWSTR*))aSyscall[25].pCurrent) + + { "GetLastError", (SYSCALL)GetLastError, 0 }, + +#define osGetLastError ((DWORD(WINAPI*)(VOID))aSyscall[26].pCurrent) + +#if !defined(SQLITE_OMIT_LOAD_EXTENSION) +#if SQLITE_OS_WINCE + /* The GetProcAddressA() routine is only available on Windows CE. */ + { "GetProcAddressA", (SYSCALL)GetProcAddressA, 0 }, +#else + /* All other Windows platforms expect GetProcAddress() to take + ** an ANSI string regardless of the _UNICODE setting */ + { "GetProcAddressA", (SYSCALL)GetProcAddress, 0 }, +#endif +#else + { "GetProcAddressA", (SYSCALL)0, 0 }, +#endif + +#define osGetProcAddressA ((FARPROC(WINAPI*)(HMODULE, \ + LPCSTR))aSyscall[27].pCurrent) + +#if !SQLITE_OS_WINRT + { "GetSystemInfo", (SYSCALL)GetSystemInfo, 0 }, +#else + { "GetSystemInfo", (SYSCALL)0, 0 }, +#endif + +#define osGetSystemInfo ((VOID(WINAPI*)(LPSYSTEM_INFO))aSyscall[28].pCurrent) + + { "GetSystemTime", (SYSCALL)GetSystemTime, 0 }, + +#define osGetSystemTime ((VOID(WINAPI*)(LPSYSTEMTIME))aSyscall[29].pCurrent) + +#if !SQLITE_OS_WINCE + { "GetSystemTimeAsFileTime", (SYSCALL)GetSystemTimeAsFileTime, 0 }, +#else + { "GetSystemTimeAsFileTime", (SYSCALL)0, 0 }, +#endif + +#define osGetSystemTimeAsFileTime ((VOID(WINAPI*)( \ + LPFILETIME))aSyscall[30].pCurrent) + +#if defined(SQLITE_WIN32_HAS_ANSI) + { "GetTempPathA", (SYSCALL)GetTempPathA, 0 }, +#else + { "GetTempPathA", (SYSCALL)0, 0 }, +#endif + +#define osGetTempPathA ((DWORD(WINAPI*)(DWORD,LPSTR))aSyscall[31].pCurrent) + +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) + { "GetTempPathW", (SYSCALL)GetTempPathW, 0 }, +#else + { "GetTempPathW", (SYSCALL)0, 0 }, +#endif + +#define osGetTempPathW ((DWORD(WINAPI*)(DWORD,LPWSTR))aSyscall[32].pCurrent) + +#if !SQLITE_OS_WINRT + { "GetTickCount", (SYSCALL)GetTickCount, 0 }, +#else + { "GetTickCount", (SYSCALL)0, 0 }, +#endif + +#define osGetTickCount ((DWORD(WINAPI*)(VOID))aSyscall[33].pCurrent) + +#if defined(SQLITE_WIN32_HAS_ANSI) && SQLITE_WIN32_GETVERSIONEX + { "GetVersionExA", (SYSCALL)GetVersionExA, 0 }, +#else + { "GetVersionExA", (SYSCALL)0, 0 }, +#endif + +#define osGetVersionExA ((BOOL(WINAPI*)( \ + LPOSVERSIONINFOA))aSyscall[34].pCurrent) + +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \ + SQLITE_WIN32_GETVERSIONEX + { "GetVersionExW", (SYSCALL)GetVersionExW, 0 }, +#else + { "GetVersionExW", (SYSCALL)0, 0 }, +#endif + +#define osGetVersionExW ((BOOL(WINAPI*)( \ + LPOSVERSIONINFOW))aSyscall[35].pCurrent) + + { "HeapAlloc", (SYSCALL)HeapAlloc, 0 }, + +#define osHeapAlloc ((LPVOID(WINAPI*)(HANDLE,DWORD, \ + SIZE_T))aSyscall[36].pCurrent) + +#if !SQLITE_OS_WINRT + { "HeapCreate", (SYSCALL)HeapCreate, 0 }, +#else + { "HeapCreate", (SYSCALL)0, 0 }, +#endif + +#define osHeapCreate ((HANDLE(WINAPI*)(DWORD,SIZE_T, \ + SIZE_T))aSyscall[37].pCurrent) + +#if !SQLITE_OS_WINRT + { "HeapDestroy", (SYSCALL)HeapDestroy, 0 }, +#else + { "HeapDestroy", (SYSCALL)0, 0 }, +#endif + +#define osHeapDestroy ((BOOL(WINAPI*)(HANDLE))aSyscall[38].pCurrent) + + { "HeapFree", (SYSCALL)HeapFree, 0 }, + +#define osHeapFree ((BOOL(WINAPI*)(HANDLE,DWORD,LPVOID))aSyscall[39].pCurrent) + + { "HeapReAlloc", (SYSCALL)HeapReAlloc, 0 }, + +#define osHeapReAlloc ((LPVOID(WINAPI*)(HANDLE,DWORD,LPVOID, \ + SIZE_T))aSyscall[40].pCurrent) + + { "HeapSize", (SYSCALL)HeapSize, 0 }, + +#define osHeapSize ((SIZE_T(WINAPI*)(HANDLE,DWORD, \ + LPCVOID))aSyscall[41].pCurrent) + +#if !SQLITE_OS_WINRT + { "HeapValidate", (SYSCALL)HeapValidate, 0 }, +#else + { "HeapValidate", (SYSCALL)0, 0 }, +#endif + +#define osHeapValidate ((BOOL(WINAPI*)(HANDLE,DWORD, \ + LPCVOID))aSyscall[42].pCurrent) + +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT + { "HeapCompact", (SYSCALL)HeapCompact, 0 }, +#else + { "HeapCompact", (SYSCALL)0, 0 }, +#endif + +#define osHeapCompact ((UINT(WINAPI*)(HANDLE,DWORD))aSyscall[43].pCurrent) + +#if defined(SQLITE_WIN32_HAS_ANSI) && !defined(SQLITE_OMIT_LOAD_EXTENSION) + { "LoadLibraryA", (SYSCALL)LoadLibraryA, 0 }, +#else + { "LoadLibraryA", (SYSCALL)0, 0 }, +#endif + +#define osLoadLibraryA ((HMODULE(WINAPI*)(LPCSTR))aSyscall[44].pCurrent) + +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \ + !defined(SQLITE_OMIT_LOAD_EXTENSION) + { "LoadLibraryW", (SYSCALL)LoadLibraryW, 0 }, +#else + { "LoadLibraryW", (SYSCALL)0, 0 }, +#endif + +#define osLoadLibraryW ((HMODULE(WINAPI*)(LPCWSTR))aSyscall[45].pCurrent) + +#if !SQLITE_OS_WINRT + { "LocalFree", (SYSCALL)LocalFree, 0 }, +#else + { "LocalFree", (SYSCALL)0, 0 }, +#endif + +#define osLocalFree ((HLOCAL(WINAPI*)(HLOCAL))aSyscall[46].pCurrent) + +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT + { "LockFile", (SYSCALL)LockFile, 0 }, +#else + { "LockFile", (SYSCALL)0, 0 }, +#endif + +#ifndef osLockFile +#define osLockFile ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ + DWORD))aSyscall[47].pCurrent) +#endif + +#if !SQLITE_OS_WINCE + { "LockFileEx", (SYSCALL)LockFileEx, 0 }, +#else + { "LockFileEx", (SYSCALL)0, 0 }, +#endif + +#ifndef osLockFileEx +#define osLockFileEx ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD,DWORD, \ + LPOVERLAPPED))aSyscall[48].pCurrent) +#endif + +#if SQLITE_OS_WINCE || (!SQLITE_OS_WINRT && \ + (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0)) + { "MapViewOfFile", (SYSCALL)MapViewOfFile, 0 }, +#else + { "MapViewOfFile", (SYSCALL)0, 0 }, +#endif + +#define osMapViewOfFile ((LPVOID(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ + SIZE_T))aSyscall[49].pCurrent) + + { "MultiByteToWideChar", (SYSCALL)MultiByteToWideChar, 0 }, + +#define osMultiByteToWideChar ((int(WINAPI*)(UINT,DWORD,LPCSTR,int,LPWSTR, \ + int))aSyscall[50].pCurrent) + + { "QueryPerformanceCounter", (SYSCALL)QueryPerformanceCounter, 0 }, + +#define osQueryPerformanceCounter ((BOOL(WINAPI*)( \ + LARGE_INTEGER*))aSyscall[51].pCurrent) + + { "ReadFile", (SYSCALL)ReadFile, 0 }, + +#define osReadFile ((BOOL(WINAPI*)(HANDLE,LPVOID,DWORD,LPDWORD, \ + LPOVERLAPPED))aSyscall[52].pCurrent) + + { "SetEndOfFile", (SYSCALL)SetEndOfFile, 0 }, + +#define osSetEndOfFile ((BOOL(WINAPI*)(HANDLE))aSyscall[53].pCurrent) + +#if !SQLITE_OS_WINRT + { "SetFilePointer", (SYSCALL)SetFilePointer, 0 }, +#else + { "SetFilePointer", (SYSCALL)0, 0 }, +#endif + +#define osSetFilePointer ((DWORD(WINAPI*)(HANDLE,LONG,PLONG, \ + DWORD))aSyscall[54].pCurrent) + +#if !SQLITE_OS_WINRT + { "Sleep", (SYSCALL)Sleep, 0 }, +#else + { "Sleep", (SYSCALL)0, 0 }, +#endif + +#define osSleep ((VOID(WINAPI*)(DWORD))aSyscall[55].pCurrent) + + { "SystemTimeToFileTime", (SYSCALL)SystemTimeToFileTime, 0 }, + +#define osSystemTimeToFileTime ((BOOL(WINAPI*)(CONST SYSTEMTIME*, \ + LPFILETIME))aSyscall[56].pCurrent) + +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT + { "UnlockFile", (SYSCALL)UnlockFile, 0 }, +#else + { "UnlockFile", (SYSCALL)0, 0 }, +#endif + +#ifndef osUnlockFile +#define osUnlockFile ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ + DWORD))aSyscall[57].pCurrent) +#endif + +#if !SQLITE_OS_WINCE + { "UnlockFileEx", (SYSCALL)UnlockFileEx, 0 }, +#else + { "UnlockFileEx", (SYSCALL)0, 0 }, +#endif + +#define osUnlockFileEx ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ + LPOVERLAPPED))aSyscall[58].pCurrent) + +#if SQLITE_OS_WINCE || !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 + { "UnmapViewOfFile", (SYSCALL)UnmapViewOfFile, 0 }, +#else + { "UnmapViewOfFile", (SYSCALL)0, 0 }, +#endif + +#define osUnmapViewOfFile ((BOOL(WINAPI*)(LPCVOID))aSyscall[59].pCurrent) + + { "WideCharToMultiByte", (SYSCALL)WideCharToMultiByte, 0 }, + +#define osWideCharToMultiByte ((int(WINAPI*)(UINT,DWORD,LPCWSTR,int,LPSTR,int, \ + LPCSTR,LPBOOL))aSyscall[60].pCurrent) + + { "WriteFile", (SYSCALL)WriteFile, 0 }, + +#define osWriteFile ((BOOL(WINAPI*)(HANDLE,LPCVOID,DWORD,LPDWORD, \ + LPOVERLAPPED))aSyscall[61].pCurrent) + +#if SQLITE_OS_WINRT + { "CreateEventExW", (SYSCALL)CreateEventExW, 0 }, +#else + { "CreateEventExW", (SYSCALL)0, 0 }, +#endif + +#define osCreateEventExW ((HANDLE(WINAPI*)(LPSECURITY_ATTRIBUTES,LPCWSTR, \ + DWORD,DWORD))aSyscall[62].pCurrent) + +#if !SQLITE_OS_WINRT + { "WaitForSingleObject", (SYSCALL)WaitForSingleObject, 0 }, +#else + { "WaitForSingleObject", (SYSCALL)0, 0 }, +#endif + +#define osWaitForSingleObject ((DWORD(WINAPI*)(HANDLE, \ + DWORD))aSyscall[63].pCurrent) + +#if !SQLITE_OS_WINCE + { "WaitForSingleObjectEx", (SYSCALL)WaitForSingleObjectEx, 0 }, +#else + { "WaitForSingleObjectEx", (SYSCALL)0, 0 }, +#endif + +#define osWaitForSingleObjectEx ((DWORD(WINAPI*)(HANDLE,DWORD, \ + BOOL))aSyscall[64].pCurrent) + +#if SQLITE_OS_WINRT + { "SetFilePointerEx", (SYSCALL)SetFilePointerEx, 0 }, +#else + { "SetFilePointerEx", (SYSCALL)0, 0 }, +#endif + +#define osSetFilePointerEx ((BOOL(WINAPI*)(HANDLE,LARGE_INTEGER, \ + PLARGE_INTEGER,DWORD))aSyscall[65].pCurrent) + +#if SQLITE_OS_WINRT + { "GetFileInformationByHandleEx", (SYSCALL)GetFileInformationByHandleEx, 0 }, +#else + { "GetFileInformationByHandleEx", (SYSCALL)0, 0 }, +#endif + +#define osGetFileInformationByHandleEx ((BOOL(WINAPI*)(HANDLE, \ + FILE_INFO_BY_HANDLE_CLASS,LPVOID,DWORD))aSyscall[66].pCurrent) + +#if SQLITE_OS_WINRT && (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) + { "MapViewOfFileFromApp", (SYSCALL)MapViewOfFileFromApp, 0 }, +#else + { "MapViewOfFileFromApp", (SYSCALL)0, 0 }, +#endif + +#define osMapViewOfFileFromApp ((LPVOID(WINAPI*)(HANDLE,ULONG,ULONG64, \ + SIZE_T))aSyscall[67].pCurrent) + +#if SQLITE_OS_WINRT + { "CreateFile2", (SYSCALL)CreateFile2, 0 }, +#else + { "CreateFile2", (SYSCALL)0, 0 }, +#endif + +#define osCreateFile2 ((HANDLE(WINAPI*)(LPCWSTR,DWORD,DWORD,DWORD, \ + LPCREATEFILE2_EXTENDED_PARAMETERS))aSyscall[68].pCurrent) + +#if SQLITE_OS_WINRT && !defined(SQLITE_OMIT_LOAD_EXTENSION) + { "LoadPackagedLibrary", (SYSCALL)LoadPackagedLibrary, 0 }, +#else + { "LoadPackagedLibrary", (SYSCALL)0, 0 }, +#endif + +#define osLoadPackagedLibrary ((HMODULE(WINAPI*)(LPCWSTR, \ + DWORD))aSyscall[69].pCurrent) + +#if SQLITE_OS_WINRT + { "GetTickCount64", (SYSCALL)GetTickCount64, 0 }, +#else + { "GetTickCount64", (SYSCALL)0, 0 }, +#endif + +#define osGetTickCount64 ((ULONGLONG(WINAPI*)(VOID))aSyscall[70].pCurrent) + +#if SQLITE_OS_WINRT + { "GetNativeSystemInfo", (SYSCALL)GetNativeSystemInfo, 0 }, +#else + { "GetNativeSystemInfo", (SYSCALL)0, 0 }, +#endif + +#define osGetNativeSystemInfo ((VOID(WINAPI*)( \ + LPSYSTEM_INFO))aSyscall[71].pCurrent) + +#if defined(SQLITE_WIN32_HAS_ANSI) + { "OutputDebugStringA", (SYSCALL)OutputDebugStringA, 0 }, +#else + { "OutputDebugStringA", (SYSCALL)0, 0 }, +#endif + +#define osOutputDebugStringA ((VOID(WINAPI*)(LPCSTR))aSyscall[72].pCurrent) + +#if defined(SQLITE_WIN32_HAS_WIDE) + { "OutputDebugStringW", (SYSCALL)OutputDebugStringW, 0 }, +#else + { "OutputDebugStringW", (SYSCALL)0, 0 }, +#endif + +#define osOutputDebugStringW ((VOID(WINAPI*)(LPCWSTR))aSyscall[73].pCurrent) + + { "GetProcessHeap", (SYSCALL)GetProcessHeap, 0 }, + +#define osGetProcessHeap ((HANDLE(WINAPI*)(VOID))aSyscall[74].pCurrent) + +#if SQLITE_OS_WINRT && (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) + { "CreateFileMappingFromApp", (SYSCALL)CreateFileMappingFromApp, 0 }, +#else + { "CreateFileMappingFromApp", (SYSCALL)0, 0 }, +#endif + +#define osCreateFileMappingFromApp ((HANDLE(WINAPI*)(HANDLE, \ + LPSECURITY_ATTRIBUTES,ULONG,ULONG64,LPCWSTR))aSyscall[75].pCurrent) + +/* +** NOTE: On some sub-platforms, the InterlockedCompareExchange "function" +** is really just a macro that uses a compiler intrinsic (e.g. x64). +** So do not try to make this is into a redefinable interface. +*/ +#if defined(InterlockedCompareExchange) + { "InterlockedCompareExchange", (SYSCALL)0, 0 }, + +#define osInterlockedCompareExchange InterlockedCompareExchange +#else + { "InterlockedCompareExchange", (SYSCALL)InterlockedCompareExchange, 0 }, + +#define osInterlockedCompareExchange ((LONG(WINAPI*)(LONG \ + SQLITE_WIN32_VOLATILE*, LONG,LONG))aSyscall[76].pCurrent) +#endif /* defined(InterlockedCompareExchange) */ + +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID + { "UuidCreate", (SYSCALL)UuidCreate, 0 }, +#else + { "UuidCreate", (SYSCALL)0, 0 }, +#endif + +#define osUuidCreate ((RPC_STATUS(RPC_ENTRY*)(UUID*))aSyscall[77].pCurrent) + +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID + { "UuidCreateSequential", (SYSCALL)UuidCreateSequential, 0 }, +#else + { "UuidCreateSequential", (SYSCALL)0, 0 }, +#endif + +#define osUuidCreateSequential \ + ((RPC_STATUS(RPC_ENTRY*)(UUID*))aSyscall[78].pCurrent) + +#if !defined(SQLITE_NO_SYNC) && SQLITE_MAX_MMAP_SIZE>0 + { "FlushViewOfFile", (SYSCALL)FlushViewOfFile, 0 }, +#else + { "FlushViewOfFile", (SYSCALL)0, 0 }, +#endif + +#define osFlushViewOfFile \ + ((BOOL(WINAPI*)(LPCVOID,SIZE_T))aSyscall[79].pCurrent) + +}; /* End of the overrideable system calls */ + +/* +** This is the xSetSystemCall() method of sqlite3_vfs for all of the +** "win32" VFSes. Return SQLITE_OK opon successfully updating the +** system call pointer, or SQLITE_NOTFOUND if there is no configurable +** system call named zName. +*/ +static int winSetSystemCall( + sqlite3_vfs *pNotUsed, /* The VFS pointer. Not used */ + const char *zName, /* Name of system call to override */ + sqlite3_syscall_ptr pNewFunc /* Pointer to new system call value */ +){ + unsigned int i; + int rc = SQLITE_NOTFOUND; + + UNUSED_PARAMETER(pNotUsed); + if( zName==0 ){ + /* If no zName is given, restore all system calls to their default + ** settings and return NULL + */ + rc = SQLITE_OK; + for(i=0; i0 ){ + memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE); + memcpy(zDbgBuf, zBuf, nMin); + osOutputDebugStringA(zDbgBuf); + }else{ + osOutputDebugStringA(zBuf); + } +#elif defined(SQLITE_WIN32_HAS_WIDE) + memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE); + if ( osMultiByteToWideChar( + osAreFileApisANSI() ? CP_ACP : CP_OEMCP, 0, zBuf, + nMin, (LPWSTR)zDbgBuf, SQLITE_WIN32_DBG_BUF_SIZE/sizeof(WCHAR))<=0 ){ + return; + } + osOutputDebugStringW((LPCWSTR)zDbgBuf); +#else + if( nMin>0 ){ + memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE); + memcpy(zDbgBuf, zBuf, nMin); + fprintf(stderr, "%s", zDbgBuf); + }else{ + fprintf(stderr, "%s", zBuf); + } +#endif +} + +/* +** The following routine suspends the current thread for at least ms +** milliseconds. This is equivalent to the Win32 Sleep() interface. +*/ +#if SQLITE_OS_WINRT +static HANDLE sleepObj = NULL; +#endif + +SQLITE_API void sqlite3_win32_sleep(DWORD milliseconds){ +#if SQLITE_OS_WINRT + if ( sleepObj==NULL ){ + sleepObj = osCreateEventExW(NULL, NULL, CREATE_EVENT_MANUAL_RESET, + SYNCHRONIZE); + } + assert( sleepObj!=NULL ); + osWaitForSingleObjectEx(sleepObj, milliseconds, FALSE); +#else + osSleep(milliseconds); +#endif +} + +#if SQLITE_MAX_WORKER_THREADS>0 && !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && \ + SQLITE_THREADSAFE>0 +SQLITE_PRIVATE DWORD sqlite3Win32Wait(HANDLE hObject){ + DWORD rc; + while( (rc = osWaitForSingleObjectEx(hObject, INFINITE, + TRUE))==WAIT_IO_COMPLETION ){} + return rc; +} +#endif + +/* +** Return true (non-zero) if we are running under WinNT, Win2K, WinXP, +** or WinCE. Return false (zero) for Win95, Win98, or WinME. +** +** Here is an interesting observation: Win95, Win98, and WinME lack +** the LockFileEx() API. But we can still statically link against that +** API as long as we don't call it when running Win95/98/ME. A call to +** this routine is used to determine if the host is Win95/98/ME or +** WinNT/2K/XP so that we will know whether or not we can safely call +** the LockFileEx() API. +*/ + +#if !SQLITE_WIN32_GETVERSIONEX +# define osIsNT() (1) +#elif SQLITE_OS_WINCE || SQLITE_OS_WINRT || !defined(SQLITE_WIN32_HAS_ANSI) +# define osIsNT() (1) +#elif !defined(SQLITE_WIN32_HAS_WIDE) +# define osIsNT() (0) +#else +# define osIsNT() ((sqlite3_os_type==2) || sqlite3_win32_is_nt()) +#endif + +/* +** This function determines if the machine is running a version of Windows +** based on the NT kernel. +*/ +SQLITE_API int sqlite3_win32_is_nt(void){ +#if SQLITE_OS_WINRT + /* + ** NOTE: The WinRT sub-platform is always assumed to be based on the NT + ** kernel. + */ + return 1; +#elif SQLITE_WIN32_GETVERSIONEX + if( osInterlockedCompareExchange(&sqlite3_os_type, 0, 0)==0 ){ +#if defined(SQLITE_WIN32_HAS_ANSI) + OSVERSIONINFOA sInfo; + sInfo.dwOSVersionInfoSize = sizeof(sInfo); + osGetVersionExA(&sInfo); + osInterlockedCompareExchange(&sqlite3_os_type, + (sInfo.dwPlatformId == VER_PLATFORM_WIN32_NT) ? 2 : 1, 0); +#elif defined(SQLITE_WIN32_HAS_WIDE) + OSVERSIONINFOW sInfo; + sInfo.dwOSVersionInfoSize = sizeof(sInfo); + osGetVersionExW(&sInfo); + osInterlockedCompareExchange(&sqlite3_os_type, + (sInfo.dwPlatformId == VER_PLATFORM_WIN32_NT) ? 2 : 1, 0); +#endif + } + return osInterlockedCompareExchange(&sqlite3_os_type, 2, 2)==2; +#elif SQLITE_TEST + return osInterlockedCompareExchange(&sqlite3_os_type, 2, 2)==2; +#else + /* + ** NOTE: All sub-platforms where the GetVersionEx[AW] functions are + ** deprecated are always assumed to be based on the NT kernel. + */ + return 1; +#endif +} + +#ifdef SQLITE_WIN32_MALLOC +/* +** Allocate nBytes of memory. +*/ +static void *winMemMalloc(int nBytes){ + HANDLE hHeap; + void *p; + + winMemAssertMagic(); + hHeap = winMemGetHeap(); + assert( hHeap!=0 ); + assert( hHeap!=INVALID_HANDLE_VALUE ); +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) + assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) ); +#endif + assert( nBytes>=0 ); + p = osHeapAlloc(hHeap, SQLITE_WIN32_HEAP_FLAGS, (SIZE_T)nBytes); + if( !p ){ + sqlite3_log(SQLITE_NOMEM, "failed to HeapAlloc %u bytes (%lu), heap=%p", + nBytes, osGetLastError(), (void*)hHeap); + } + return p; +} + +/* +** Free memory. +*/ +static void winMemFree(void *pPrior){ + HANDLE hHeap; + + winMemAssertMagic(); + hHeap = winMemGetHeap(); + assert( hHeap!=0 ); + assert( hHeap!=INVALID_HANDLE_VALUE ); +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) + assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior) ); +#endif + if( !pPrior ) return; /* Passing NULL to HeapFree is undefined. */ + if( !osHeapFree(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior) ){ + sqlite3_log(SQLITE_NOMEM, "failed to HeapFree block %p (%lu), heap=%p", + pPrior, osGetLastError(), (void*)hHeap); + } +} + +/* +** Change the size of an existing memory allocation +*/ +static void *winMemRealloc(void *pPrior, int nBytes){ + HANDLE hHeap; + void *p; + + winMemAssertMagic(); + hHeap = winMemGetHeap(); + assert( hHeap!=0 ); + assert( hHeap!=INVALID_HANDLE_VALUE ); +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) + assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior) ); +#endif + assert( nBytes>=0 ); + if( !pPrior ){ + p = osHeapAlloc(hHeap, SQLITE_WIN32_HEAP_FLAGS, (SIZE_T)nBytes); + }else{ + p = osHeapReAlloc(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior, (SIZE_T)nBytes); + } + if( !p ){ + sqlite3_log(SQLITE_NOMEM, "failed to %s %u bytes (%lu), heap=%p", + pPrior ? "HeapReAlloc" : "HeapAlloc", nBytes, osGetLastError(), + (void*)hHeap); + } + return p; +} + +/* +** Return the size of an outstanding allocation, in bytes. +*/ +static int winMemSize(void *p){ + HANDLE hHeap; + SIZE_T n; + + winMemAssertMagic(); + hHeap = winMemGetHeap(); + assert( hHeap!=0 ); + assert( hHeap!=INVALID_HANDLE_VALUE ); +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) + assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, p) ); +#endif + if( !p ) return 0; + n = osHeapSize(hHeap, SQLITE_WIN32_HEAP_FLAGS, p); + if( n==(SIZE_T)-1 ){ + sqlite3_log(SQLITE_NOMEM, "failed to HeapSize block %p (%lu), heap=%p", + p, osGetLastError(), (void*)hHeap); + return 0; + } + return (int)n; +} + +/* +** Round up a request size to the next valid allocation size. +*/ +static int winMemRoundup(int n){ + return n; +} + +/* +** Initialize this module. +*/ +static int winMemInit(void *pAppData){ + winMemData *pWinMemData = (winMemData *)pAppData; + + if( !pWinMemData ) return SQLITE_ERROR; + assert( pWinMemData->magic1==WINMEM_MAGIC1 ); + assert( pWinMemData->magic2==WINMEM_MAGIC2 ); + +#if !SQLITE_OS_WINRT && SQLITE_WIN32_HEAP_CREATE + if( !pWinMemData->hHeap ){ + DWORD dwInitialSize = SQLITE_WIN32_HEAP_INIT_SIZE; + DWORD dwMaximumSize = (DWORD)sqlite3GlobalConfig.nHeap; + if( dwMaximumSize==0 ){ + dwMaximumSize = SQLITE_WIN32_HEAP_MAX_SIZE; + }else if( dwInitialSize>dwMaximumSize ){ + dwInitialSize = dwMaximumSize; + } + pWinMemData->hHeap = osHeapCreate(SQLITE_WIN32_HEAP_FLAGS, + dwInitialSize, dwMaximumSize); + if( !pWinMemData->hHeap ){ + sqlite3_log(SQLITE_NOMEM, + "failed to HeapCreate (%lu), flags=%u, initSize=%lu, maxSize=%lu", + osGetLastError(), SQLITE_WIN32_HEAP_FLAGS, dwInitialSize, + dwMaximumSize); + return SQLITE_NOMEM_BKPT; + } + pWinMemData->bOwned = TRUE; + assert( pWinMemData->bOwned ); + } +#else + pWinMemData->hHeap = osGetProcessHeap(); + if( !pWinMemData->hHeap ){ + sqlite3_log(SQLITE_NOMEM, + "failed to GetProcessHeap (%lu)", osGetLastError()); + return SQLITE_NOMEM_BKPT; + } + pWinMemData->bOwned = FALSE; + assert( !pWinMemData->bOwned ); +#endif + assert( pWinMemData->hHeap!=0 ); + assert( pWinMemData->hHeap!=INVALID_HANDLE_VALUE ); +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) + assert( osHeapValidate(pWinMemData->hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) ); +#endif + return SQLITE_OK; +} + +/* +** Deinitialize this module. +*/ +static void winMemShutdown(void *pAppData){ + winMemData *pWinMemData = (winMemData *)pAppData; + + if( !pWinMemData ) return; + assert( pWinMemData->magic1==WINMEM_MAGIC1 ); + assert( pWinMemData->magic2==WINMEM_MAGIC2 ); + + if( pWinMemData->hHeap ){ + assert( pWinMemData->hHeap!=INVALID_HANDLE_VALUE ); +#if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) + assert( osHeapValidate(pWinMemData->hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) ); +#endif + if( pWinMemData->bOwned ){ + if( !osHeapDestroy(pWinMemData->hHeap) ){ + sqlite3_log(SQLITE_NOMEM, "failed to HeapDestroy (%lu), heap=%p", + osGetLastError(), (void*)pWinMemData->hHeap); + } + pWinMemData->bOwned = FALSE; + } + pWinMemData->hHeap = NULL; + } +} + +/* +** Populate the low-level memory allocation function pointers in +** sqlite3GlobalConfig.m with pointers to the routines in this file. The +** arguments specify the block of memory to manage. +** +** This routine is only called by sqlite3_config(), and therefore +** is not required to be threadsafe (it is not). +*/ +SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetWin32(void){ + static const sqlite3_mem_methods winMemMethods = { + winMemMalloc, + winMemFree, + winMemRealloc, + winMemSize, + winMemRoundup, + winMemInit, + winMemShutdown, + &win_mem_data + }; + return &winMemMethods; +} + +SQLITE_PRIVATE void sqlite3MemSetDefault(void){ + sqlite3_config(SQLITE_CONFIG_MALLOC, sqlite3MemGetWin32()); +} +#endif /* SQLITE_WIN32_MALLOC */ + +/* +** Convert a UTF-8 string to Microsoft Unicode. +** +** Space to hold the returned string is obtained from sqlite3_malloc(). +*/ +static LPWSTR winUtf8ToUnicode(const char *zText){ + int nChar; + LPWSTR zWideText; + + nChar = osMultiByteToWideChar(CP_UTF8, 0, zText, -1, NULL, 0); + if( nChar==0 ){ + return 0; + } + zWideText = sqlite3MallocZero( nChar*sizeof(WCHAR) ); + if( zWideText==0 ){ + return 0; + } + nChar = osMultiByteToWideChar(CP_UTF8, 0, zText, -1, zWideText, + nChar); + if( nChar==0 ){ + sqlite3_free(zWideText); + zWideText = 0; + } + return zWideText; +} + +/* +** Convert a Microsoft Unicode string to UTF-8. +** +** Space to hold the returned string is obtained from sqlite3_malloc(). +*/ +static char *winUnicodeToUtf8(LPCWSTR zWideText){ + int nByte; + char *zText; + + nByte = osWideCharToMultiByte(CP_UTF8, 0, zWideText, -1, 0, 0, 0, 0); + if( nByte == 0 ){ + return 0; + } + zText = sqlite3MallocZero( nByte ); + if( zText==0 ){ + return 0; + } + nByte = osWideCharToMultiByte(CP_UTF8, 0, zWideText, -1, zText, nByte, + 0, 0); + if( nByte == 0 ){ + sqlite3_free(zText); + zText = 0; + } + return zText; +} + +/* +** Convert an ANSI string to Microsoft Unicode, using the ANSI or OEM +** code page. +** +** Space to hold the returned string is obtained from sqlite3_malloc(). +*/ +static LPWSTR winMbcsToUnicode(const char *zText, int useAnsi){ + int nByte; + LPWSTR zMbcsText; + int codepage = useAnsi ? CP_ACP : CP_OEMCP; + + nByte = osMultiByteToWideChar(codepage, 0, zText, -1, NULL, + 0)*sizeof(WCHAR); + if( nByte==0 ){ + return 0; + } + zMbcsText = sqlite3MallocZero( nByte*sizeof(WCHAR) ); + if( zMbcsText==0 ){ + return 0; + } + nByte = osMultiByteToWideChar(codepage, 0, zText, -1, zMbcsText, + nByte); + if( nByte==0 ){ + sqlite3_free(zMbcsText); + zMbcsText = 0; + } + return zMbcsText; +} + +/* +** Convert a Microsoft Unicode string to a multi-byte character string, +** using the ANSI or OEM code page. +** +** Space to hold the returned string is obtained from sqlite3_malloc(). +*/ +static char *winUnicodeToMbcs(LPCWSTR zWideText, int useAnsi){ + int nByte; + char *zText; + int codepage = useAnsi ? CP_ACP : CP_OEMCP; + + nByte = osWideCharToMultiByte(codepage, 0, zWideText, -1, 0, 0, 0, 0); + if( nByte == 0 ){ + return 0; + } + zText = sqlite3MallocZero( nByte ); + if( zText==0 ){ + return 0; + } + nByte = osWideCharToMultiByte(codepage, 0, zWideText, -1, zText, + nByte, 0, 0); + if( nByte == 0 ){ + sqlite3_free(zText); + zText = 0; + } + return zText; +} + +/* +** Convert a multi-byte character string to UTF-8. +** +** Space to hold the returned string is obtained from sqlite3_malloc(). +*/ +static char *winMbcsToUtf8(const char *zText, int useAnsi){ + char *zTextUtf8; + LPWSTR zTmpWide; + + zTmpWide = winMbcsToUnicode(zText, useAnsi); + if( zTmpWide==0 ){ + return 0; + } + zTextUtf8 = winUnicodeToUtf8(zTmpWide); + sqlite3_free(zTmpWide); + return zTextUtf8; +} + +/* +** Convert a UTF-8 string to a multi-byte character string. +** +** Space to hold the returned string is obtained from sqlite3_malloc(). +*/ +static char *winUtf8ToMbcs(const char *zText, int useAnsi){ + char *zTextMbcs; + LPWSTR zTmpWide; + + zTmpWide = winUtf8ToUnicode(zText); + if( zTmpWide==0 ){ + return 0; + } + zTextMbcs = winUnicodeToMbcs(zTmpWide, useAnsi); + sqlite3_free(zTmpWide); + return zTextMbcs; +} + +/* +** This is a public wrapper for the winUtf8ToUnicode() function. +*/ +SQLITE_API LPWSTR sqlite3_win32_utf8_to_unicode(const char *zText){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !zText ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return winUtf8ToUnicode(zText); +} + +/* +** This is a public wrapper for the winUnicodeToUtf8() function. +*/ +SQLITE_API char *sqlite3_win32_unicode_to_utf8(LPCWSTR zWideText){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !zWideText ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return winUnicodeToUtf8(zWideText); +} + +/* +** This is a public wrapper for the winMbcsToUtf8() function. +*/ +SQLITE_API char *sqlite3_win32_mbcs_to_utf8(const char *zText){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !zText ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return winMbcsToUtf8(zText, osAreFileApisANSI()); +} + +/* +** This is a public wrapper for the winMbcsToUtf8() function. +*/ +SQLITE_API char *sqlite3_win32_mbcs_to_utf8_v2(const char *zText, int useAnsi){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !zText ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return winMbcsToUtf8(zText, useAnsi); +} + +/* +** This is a public wrapper for the winUtf8ToMbcs() function. +*/ +SQLITE_API char *sqlite3_win32_utf8_to_mbcs(const char *zText){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !zText ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return winUtf8ToMbcs(zText, osAreFileApisANSI()); +} + +/* +** This is a public wrapper for the winUtf8ToMbcs() function. +*/ +SQLITE_API char *sqlite3_win32_utf8_to_mbcs_v2(const char *zText, int useAnsi){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !zText ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return winUtf8ToMbcs(zText, useAnsi); +} + +/* +** This function sets the data directory or the temporary directory based on +** the provided arguments. The type argument must be 1 in order to set the +** data directory or 2 in order to set the temporary directory. The zValue +** argument is the name of the directory to use. The return value will be +** SQLITE_OK if successful. +*/ +SQLITE_API int sqlite3_win32_set_directory(DWORD type, LPCWSTR zValue){ + char **ppDirectory = 0; +#ifndef SQLITE_OMIT_AUTOINIT + int rc = sqlite3_initialize(); + if( rc ) return rc; +#endif + if( type==SQLITE_WIN32_DATA_DIRECTORY_TYPE ){ + ppDirectory = &sqlite3_data_directory; + }else if( type==SQLITE_WIN32_TEMP_DIRECTORY_TYPE ){ + ppDirectory = &sqlite3_temp_directory; + } + assert( !ppDirectory || type==SQLITE_WIN32_DATA_DIRECTORY_TYPE + || type==SQLITE_WIN32_TEMP_DIRECTORY_TYPE + ); + assert( !ppDirectory || sqlite3MemdebugHasType(*ppDirectory, MEMTYPE_HEAP) ); + if( ppDirectory ){ + char *zValueUtf8 = 0; + if( zValue && zValue[0] ){ + zValueUtf8 = winUnicodeToUtf8(zValue); + if ( zValueUtf8==0 ){ + return SQLITE_NOMEM_BKPT; + } + } + sqlite3_free(*ppDirectory); + *ppDirectory = zValueUtf8; + return SQLITE_OK; + } + return SQLITE_ERROR; +} + +/* +** The return value of winGetLastErrorMsg +** is zero if the error message fits in the buffer, or non-zero +** otherwise (if the message was truncated). +*/ +static int winGetLastErrorMsg(DWORD lastErrno, int nBuf, char *zBuf){ + /* FormatMessage returns 0 on failure. Otherwise it + ** returns the number of TCHARs written to the output + ** buffer, excluding the terminating null char. + */ + DWORD dwLen = 0; + char *zOut = 0; + + if( osIsNT() ){ +#if SQLITE_OS_WINRT + WCHAR zTempWide[SQLITE_WIN32_MAX_ERRMSG_CHARS+1]; + dwLen = osFormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, + lastErrno, + 0, + zTempWide, + SQLITE_WIN32_MAX_ERRMSG_CHARS, + 0); +#else + LPWSTR zTempWide = NULL; + dwLen = osFormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, + lastErrno, + 0, + (LPWSTR) &zTempWide, + 0, + 0); +#endif + if( dwLen > 0 ){ + /* allocate a buffer and convert to UTF8 */ + sqlite3BeginBenignMalloc(); + zOut = winUnicodeToUtf8(zTempWide); + sqlite3EndBenignMalloc(); +#if !SQLITE_OS_WINRT + /* free the system buffer allocated by FormatMessage */ + osLocalFree(zTempWide); +#endif + } + } +#ifdef SQLITE_WIN32_HAS_ANSI + else{ + char *zTemp = NULL; + dwLen = osFormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, + lastErrno, + 0, + (LPSTR) &zTemp, + 0, + 0); + if( dwLen > 0 ){ + /* allocate a buffer and convert to UTF8 */ + sqlite3BeginBenignMalloc(); + zOut = winMbcsToUtf8(zTemp, osAreFileApisANSI()); + sqlite3EndBenignMalloc(); + /* free the system buffer allocated by FormatMessage */ + osLocalFree(zTemp); + } + } +#endif + if( 0 == dwLen ){ + sqlite3_snprintf(nBuf, zBuf, "OsError 0x%lx (%lu)", lastErrno, lastErrno); + }else{ + /* copy a maximum of nBuf chars to output buffer */ + sqlite3_snprintf(nBuf, zBuf, "%s", zOut); + /* free the UTF8 buffer */ + sqlite3_free(zOut); + } + return 0; +} + +/* +** +** This function - winLogErrorAtLine() - is only ever called via the macro +** winLogError(). +** +** This routine is invoked after an error occurs in an OS function. +** It logs a message using sqlite3_log() containing the current value of +** error code and, if possible, the human-readable equivalent from +** FormatMessage. +** +** The first argument passed to the macro should be the error code that +** will be returned to SQLite (e.g. SQLITE_IOERR_DELETE, SQLITE_CANTOPEN). +** The two subsequent arguments should be the name of the OS function that +** failed and the associated file-system path, if any. +*/ +#define winLogError(a,b,c,d) winLogErrorAtLine(a,b,c,d,__LINE__) +static int winLogErrorAtLine( + int errcode, /* SQLite error code */ + DWORD lastErrno, /* Win32 last error */ + const char *zFunc, /* Name of OS function that failed */ + const char *zPath, /* File path associated with error */ + int iLine /* Source line number where error occurred */ +){ + char zMsg[500]; /* Human readable error text */ + int i; /* Loop counter */ + + zMsg[0] = 0; + winGetLastErrorMsg(lastErrno, sizeof(zMsg), zMsg); + assert( errcode!=SQLITE_OK ); + if( zPath==0 ) zPath = ""; + for(i=0; zMsg[i] && zMsg[i]!='\r' && zMsg[i]!='\n'; i++){} + zMsg[i] = 0; + sqlite3_log(errcode, + "os_win.c:%d: (%lu) %s(%s) - %s", + iLine, lastErrno, zFunc, zPath, zMsg + ); + + return errcode; +} + +/* +** The number of times that a ReadFile(), WriteFile(), and DeleteFile() +** will be retried following a locking error - probably caused by +** antivirus software. Also the initial delay before the first retry. +** The delay increases linearly with each retry. +*/ +#ifndef SQLITE_WIN32_IOERR_RETRY +# define SQLITE_WIN32_IOERR_RETRY 10 +#endif +#ifndef SQLITE_WIN32_IOERR_RETRY_DELAY +# define SQLITE_WIN32_IOERR_RETRY_DELAY 25 +#endif +static int winIoerrRetry = SQLITE_WIN32_IOERR_RETRY; +static int winIoerrRetryDelay = SQLITE_WIN32_IOERR_RETRY_DELAY; + +/* +** The "winIoerrCanRetry1" macro is used to determine if a particular I/O +** error code obtained via GetLastError() is eligible to be retried. It +** must accept the error code DWORD as its only argument and should return +** non-zero if the error code is transient in nature and the operation +** responsible for generating the original error might succeed upon being +** retried. The argument to this macro should be a variable. +** +** Additionally, a macro named "winIoerrCanRetry2" may be defined. If it +** is defined, it will be consulted only when the macro "winIoerrCanRetry1" +** returns zero. The "winIoerrCanRetry2" macro is completely optional and +** may be used to include additional error codes in the set that should +** result in the failing I/O operation being retried by the caller. If +** defined, the "winIoerrCanRetry2" macro must exhibit external semantics +** identical to those of the "winIoerrCanRetry1" macro. +*/ +#if !defined(winIoerrCanRetry1) +#define winIoerrCanRetry1(a) (((a)==ERROR_ACCESS_DENIED) || \ + ((a)==ERROR_SHARING_VIOLATION) || \ + ((a)==ERROR_LOCK_VIOLATION) || \ + ((a)==ERROR_DEV_NOT_EXIST) || \ + ((a)==ERROR_NETNAME_DELETED) || \ + ((a)==ERROR_SEM_TIMEOUT) || \ + ((a)==ERROR_NETWORK_UNREACHABLE)) +#endif + +/* +** If a ReadFile() or WriteFile() error occurs, invoke this routine +** to see if it should be retried. Return TRUE to retry. Return FALSE +** to give up with an error. +*/ +static int winRetryIoerr(int *pnRetry, DWORD *pError){ + DWORD e = osGetLastError(); + if( *pnRetry>=winIoerrRetry ){ + if( pError ){ + *pError = e; + } + return 0; + } + if( winIoerrCanRetry1(e) ){ + sqlite3_win32_sleep(winIoerrRetryDelay*(1+*pnRetry)); + ++*pnRetry; + return 1; + } +#if defined(winIoerrCanRetry2) + else if( winIoerrCanRetry2(e) ){ + sqlite3_win32_sleep(winIoerrRetryDelay*(1+*pnRetry)); + ++*pnRetry; + return 1; + } +#endif + if( pError ){ + *pError = e; + } + return 0; +} + +/* +** Log a I/O error retry episode. +*/ +static void winLogIoerr(int nRetry, int lineno){ + if( nRetry ){ + sqlite3_log(SQLITE_NOTICE, + "delayed %dms for lock/sharing conflict at line %d", + winIoerrRetryDelay*nRetry*(nRetry+1)/2, lineno + ); + } +} + +/* +** This #if does not rely on the SQLITE_OS_WINCE define because the +** corresponding section in "date.c" cannot use it. +*/ +#if !defined(SQLITE_OMIT_LOCALTIME) && defined(_WIN32_WCE) && \ + (!defined(SQLITE_MSVC_LOCALTIME_API) || !SQLITE_MSVC_LOCALTIME_API) +/* +** The MSVC CRT on Windows CE may not have a localtime() function. +** So define a substitute. +*/ +/* # include */ +struct tm *__cdecl localtime(const time_t *t) +{ + static struct tm y; + FILETIME uTm, lTm; + SYSTEMTIME pTm; + sqlite3_int64 t64; + t64 = *t; + t64 = (t64 + 11644473600)*10000000; + uTm.dwLowDateTime = (DWORD)(t64 & 0xFFFFFFFF); + uTm.dwHighDateTime= (DWORD)(t64 >> 32); + osFileTimeToLocalFileTime(&uTm,&lTm); + osFileTimeToSystemTime(&lTm,&pTm); + y.tm_year = pTm.wYear - 1900; + y.tm_mon = pTm.wMonth - 1; + y.tm_wday = pTm.wDayOfWeek; + y.tm_mday = pTm.wDay; + y.tm_hour = pTm.wHour; + y.tm_min = pTm.wMinute; + y.tm_sec = pTm.wSecond; + return &y; +} +#endif + +#if SQLITE_OS_WINCE +/************************************************************************* +** This section contains code for WinCE only. +*/ +#define HANDLE_TO_WINFILE(a) (winFile*)&((char*)a)[-(int)offsetof(winFile,h)] + +/* +** Acquire a lock on the handle h +*/ +static void winceMutexAcquire(HANDLE h){ + DWORD dwErr; + do { + dwErr = osWaitForSingleObject(h, INFINITE); + } while (dwErr != WAIT_OBJECT_0 && dwErr != WAIT_ABANDONED); +} +/* +** Release a lock acquired by winceMutexAcquire() +*/ +#define winceMutexRelease(h) ReleaseMutex(h) + +/* +** Create the mutex and shared memory used for locking in the file +** descriptor pFile +*/ +static int winceCreateLock(const char *zFilename, winFile *pFile){ + LPWSTR zTok; + LPWSTR zName; + DWORD lastErrno; + BOOL bLogged = FALSE; + BOOL bInit = TRUE; + + zName = winUtf8ToUnicode(zFilename); + if( zName==0 ){ + /* out of memory */ + return SQLITE_IOERR_NOMEM_BKPT; + } + + /* Initialize the local lockdata */ + memset(&pFile->local, 0, sizeof(pFile->local)); + + /* Replace the backslashes from the filename and lowercase it + ** to derive a mutex name. */ + zTok = osCharLowerW(zName); + for (;*zTok;zTok++){ + if (*zTok == '\\') *zTok = '_'; + } + + /* Create/open the named mutex */ + pFile->hMutex = osCreateMutexW(NULL, FALSE, zName); + if (!pFile->hMutex){ + pFile->lastErrno = osGetLastError(); + sqlite3_free(zName); + return winLogError(SQLITE_IOERR, pFile->lastErrno, + "winceCreateLock1", zFilename); + } + + /* Acquire the mutex before continuing */ + winceMutexAcquire(pFile->hMutex); + + /* Since the names of named mutexes, semaphores, file mappings etc are + ** case-sensitive, take advantage of that by uppercasing the mutex name + ** and using that as the shared filemapping name. + */ + osCharUpperW(zName); + pFile->hShared = osCreateFileMappingW(INVALID_HANDLE_VALUE, NULL, + PAGE_READWRITE, 0, sizeof(winceLock), + zName); + + /* Set a flag that indicates we're the first to create the memory so it + ** must be zero-initialized */ + lastErrno = osGetLastError(); + if (lastErrno == ERROR_ALREADY_EXISTS){ + bInit = FALSE; + } + + sqlite3_free(zName); + + /* If we succeeded in making the shared memory handle, map it. */ + if( pFile->hShared ){ + pFile->shared = (winceLock*)osMapViewOfFile(pFile->hShared, + FILE_MAP_READ|FILE_MAP_WRITE, 0, 0, sizeof(winceLock)); + /* If mapping failed, close the shared memory handle and erase it */ + if( !pFile->shared ){ + pFile->lastErrno = osGetLastError(); + winLogError(SQLITE_IOERR, pFile->lastErrno, + "winceCreateLock2", zFilename); + bLogged = TRUE; + osCloseHandle(pFile->hShared); + pFile->hShared = NULL; + } + } + + /* If shared memory could not be created, then close the mutex and fail */ + if( pFile->hShared==NULL ){ + if( !bLogged ){ + pFile->lastErrno = lastErrno; + winLogError(SQLITE_IOERR, pFile->lastErrno, + "winceCreateLock3", zFilename); + bLogged = TRUE; + } + winceMutexRelease(pFile->hMutex); + osCloseHandle(pFile->hMutex); + pFile->hMutex = NULL; + return SQLITE_IOERR; + } + + /* Initialize the shared memory if we're supposed to */ + if( bInit ){ + memset(pFile->shared, 0, sizeof(winceLock)); + } + + winceMutexRelease(pFile->hMutex); + return SQLITE_OK; +} + +/* +** Destroy the part of winFile that deals with wince locks +*/ +static void winceDestroyLock(winFile *pFile){ + if (pFile->hMutex){ + /* Acquire the mutex */ + winceMutexAcquire(pFile->hMutex); + + /* The following blocks should probably assert in debug mode, but they + are to cleanup in case any locks remained open */ + if (pFile->local.nReaders){ + pFile->shared->nReaders --; + } + if (pFile->local.bReserved){ + pFile->shared->bReserved = FALSE; + } + if (pFile->local.bPending){ + pFile->shared->bPending = FALSE; + } + if (pFile->local.bExclusive){ + pFile->shared->bExclusive = FALSE; + } + + /* De-reference and close our copy of the shared memory handle */ + osUnmapViewOfFile(pFile->shared); + osCloseHandle(pFile->hShared); + + /* Done with the mutex */ + winceMutexRelease(pFile->hMutex); + osCloseHandle(pFile->hMutex); + pFile->hMutex = NULL; + } +} + +/* +** An implementation of the LockFile() API of Windows for CE +*/ +static BOOL winceLockFile( + LPHANDLE phFile, + DWORD dwFileOffsetLow, + DWORD dwFileOffsetHigh, + DWORD nNumberOfBytesToLockLow, + DWORD nNumberOfBytesToLockHigh +){ + winFile *pFile = HANDLE_TO_WINFILE(phFile); + BOOL bReturn = FALSE; + + UNUSED_PARAMETER(dwFileOffsetHigh); + UNUSED_PARAMETER(nNumberOfBytesToLockHigh); + + if (!pFile->hMutex) return TRUE; + winceMutexAcquire(pFile->hMutex); + + /* Wanting an exclusive lock? */ + if (dwFileOffsetLow == (DWORD)SHARED_FIRST + && nNumberOfBytesToLockLow == (DWORD)SHARED_SIZE){ + if (pFile->shared->nReaders == 0 && pFile->shared->bExclusive == 0){ + pFile->shared->bExclusive = TRUE; + pFile->local.bExclusive = TRUE; + bReturn = TRUE; + } + } + + /* Want a read-only lock? */ + else if (dwFileOffsetLow == (DWORD)SHARED_FIRST && + nNumberOfBytesToLockLow == 1){ + if (pFile->shared->bExclusive == 0){ + pFile->local.nReaders ++; + if (pFile->local.nReaders == 1){ + pFile->shared->nReaders ++; + } + bReturn = TRUE; + } + } + + /* Want a pending lock? */ + else if (dwFileOffsetLow == (DWORD)PENDING_BYTE + && nNumberOfBytesToLockLow == 1){ + /* If no pending lock has been acquired, then acquire it */ + if (pFile->shared->bPending == 0) { + pFile->shared->bPending = TRUE; + pFile->local.bPending = TRUE; + bReturn = TRUE; + } + } + + /* Want a reserved lock? */ + else if (dwFileOffsetLow == (DWORD)RESERVED_BYTE + && nNumberOfBytesToLockLow == 1){ + if (pFile->shared->bReserved == 0) { + pFile->shared->bReserved = TRUE; + pFile->local.bReserved = TRUE; + bReturn = TRUE; + } + } + + winceMutexRelease(pFile->hMutex); + return bReturn; +} + +/* +** An implementation of the UnlockFile API of Windows for CE +*/ +static BOOL winceUnlockFile( + LPHANDLE phFile, + DWORD dwFileOffsetLow, + DWORD dwFileOffsetHigh, + DWORD nNumberOfBytesToUnlockLow, + DWORD nNumberOfBytesToUnlockHigh +){ + winFile *pFile = HANDLE_TO_WINFILE(phFile); + BOOL bReturn = FALSE; + + UNUSED_PARAMETER(dwFileOffsetHigh); + UNUSED_PARAMETER(nNumberOfBytesToUnlockHigh); + + if (!pFile->hMutex) return TRUE; + winceMutexAcquire(pFile->hMutex); + + /* Releasing a reader lock or an exclusive lock */ + if (dwFileOffsetLow == (DWORD)SHARED_FIRST){ + /* Did we have an exclusive lock? */ + if (pFile->local.bExclusive){ + assert(nNumberOfBytesToUnlockLow == (DWORD)SHARED_SIZE); + pFile->local.bExclusive = FALSE; + pFile->shared->bExclusive = FALSE; + bReturn = TRUE; + } + + /* Did we just have a reader lock? */ + else if (pFile->local.nReaders){ + assert(nNumberOfBytesToUnlockLow == (DWORD)SHARED_SIZE + || nNumberOfBytesToUnlockLow == 1); + pFile->local.nReaders --; + if (pFile->local.nReaders == 0) + { + pFile->shared->nReaders --; + } + bReturn = TRUE; + } + } + + /* Releasing a pending lock */ + else if (dwFileOffsetLow == (DWORD)PENDING_BYTE + && nNumberOfBytesToUnlockLow == 1){ + if (pFile->local.bPending){ + pFile->local.bPending = FALSE; + pFile->shared->bPending = FALSE; + bReturn = TRUE; + } + } + /* Releasing a reserved lock */ + else if (dwFileOffsetLow == (DWORD)RESERVED_BYTE + && nNumberOfBytesToUnlockLow == 1){ + if (pFile->local.bReserved) { + pFile->local.bReserved = FALSE; + pFile->shared->bReserved = FALSE; + bReturn = TRUE; + } + } + + winceMutexRelease(pFile->hMutex); + return bReturn; +} +/* +** End of the special code for wince +*****************************************************************************/ +#endif /* SQLITE_OS_WINCE */ + +/* +** Lock a file region. +*/ +static BOOL winLockFile( + LPHANDLE phFile, + DWORD flags, + DWORD offsetLow, + DWORD offsetHigh, + DWORD numBytesLow, + DWORD numBytesHigh +){ +#if SQLITE_OS_WINCE + /* + ** NOTE: Windows CE is handled differently here due its lack of the Win32 + ** API LockFile. + */ + return winceLockFile(phFile, offsetLow, offsetHigh, + numBytesLow, numBytesHigh); +#else + if( osIsNT() ){ + OVERLAPPED ovlp; + memset(&ovlp, 0, sizeof(OVERLAPPED)); + ovlp.Offset = offsetLow; + ovlp.OffsetHigh = offsetHigh; + return osLockFileEx(*phFile, flags, 0, numBytesLow, numBytesHigh, &ovlp); + }else{ + return osLockFile(*phFile, offsetLow, offsetHigh, numBytesLow, + numBytesHigh); + } +#endif +} + +/* +** Unlock a file region. + */ +static BOOL winUnlockFile( + LPHANDLE phFile, + DWORD offsetLow, + DWORD offsetHigh, + DWORD numBytesLow, + DWORD numBytesHigh +){ +#if SQLITE_OS_WINCE + /* + ** NOTE: Windows CE is handled differently here due its lack of the Win32 + ** API UnlockFile. + */ + return winceUnlockFile(phFile, offsetLow, offsetHigh, + numBytesLow, numBytesHigh); +#else + if( osIsNT() ){ + OVERLAPPED ovlp; + memset(&ovlp, 0, sizeof(OVERLAPPED)); + ovlp.Offset = offsetLow; + ovlp.OffsetHigh = offsetHigh; + return osUnlockFileEx(*phFile, 0, numBytesLow, numBytesHigh, &ovlp); + }else{ + return osUnlockFile(*phFile, offsetLow, offsetHigh, numBytesLow, + numBytesHigh); + } +#endif +} + +/***************************************************************************** +** The next group of routines implement the I/O methods specified +** by the sqlite3_io_methods object. +******************************************************************************/ + +/* +** Some Microsoft compilers lack this definition. +*/ +#ifndef INVALID_SET_FILE_POINTER +# define INVALID_SET_FILE_POINTER ((DWORD)-1) +#endif + +/* +** Move the current position of the file handle passed as the first +** argument to offset iOffset within the file. If successful, return 0. +** Otherwise, set pFile->lastErrno and return non-zero. +*/ +static int winSeekFile(winFile *pFile, sqlite3_int64 iOffset){ +#if !SQLITE_OS_WINRT + LONG upperBits; /* Most sig. 32 bits of new offset */ + LONG lowerBits; /* Least sig. 32 bits of new offset */ + DWORD dwRet; /* Value returned by SetFilePointer() */ + DWORD lastErrno; /* Value returned by GetLastError() */ + + OSTRACE(("SEEK file=%p, offset=%lld\n", pFile->h, iOffset)); + + upperBits = (LONG)((iOffset>>32) & 0x7fffffff); + lowerBits = (LONG)(iOffset & 0xffffffff); + + /* API oddity: If successful, SetFilePointer() returns a dword + ** containing the lower 32-bits of the new file-offset. Or, if it fails, + ** it returns INVALID_SET_FILE_POINTER. However according to MSDN, + ** INVALID_SET_FILE_POINTER may also be a valid new offset. So to determine + ** whether an error has actually occurred, it is also necessary to call + ** GetLastError(). + */ + dwRet = osSetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); + + if( (dwRet==INVALID_SET_FILE_POINTER + && ((lastErrno = osGetLastError())!=NO_ERROR)) ){ + pFile->lastErrno = lastErrno; + winLogError(SQLITE_IOERR_SEEK, pFile->lastErrno, + "winSeekFile", pFile->zPath); + OSTRACE(("SEEK file=%p, rc=SQLITE_IOERR_SEEK\n", pFile->h)); + return 1; + } + + OSTRACE(("SEEK file=%p, rc=SQLITE_OK\n", pFile->h)); + return 0; +#else + /* + ** Same as above, except that this implementation works for WinRT. + */ + + LARGE_INTEGER x; /* The new offset */ + BOOL bRet; /* Value returned by SetFilePointerEx() */ + + x.QuadPart = iOffset; + bRet = osSetFilePointerEx(pFile->h, x, 0, FILE_BEGIN); + + if(!bRet){ + pFile->lastErrno = osGetLastError(); + winLogError(SQLITE_IOERR_SEEK, pFile->lastErrno, + "winSeekFile", pFile->zPath); + OSTRACE(("SEEK file=%p, rc=SQLITE_IOERR_SEEK\n", pFile->h)); + return 1; + } + + OSTRACE(("SEEK file=%p, rc=SQLITE_OK\n", pFile->h)); + return 0; +#endif +} + +#if SQLITE_MAX_MMAP_SIZE>0 +/* Forward references to VFS helper methods used for memory mapped files */ +static int winMapfile(winFile*, sqlite3_int64); +static int winUnmapfile(winFile*); +#endif + +/* +** Close a file. +** +** It is reported that an attempt to close a handle might sometimes +** fail. This is a very unreasonable result, but Windows is notorious +** for being unreasonable so I do not doubt that it might happen. If +** the close fails, we pause for 100 milliseconds and try again. As +** many as MX_CLOSE_ATTEMPT attempts to close the handle are made before +** giving up and returning an error. +*/ +#define MX_CLOSE_ATTEMPT 3 +static int winClose(sqlite3_file *id){ + int rc, cnt = 0; + winFile *pFile = (winFile*)id; + + assert( id!=0 ); +#ifndef SQLITE_OMIT_WAL + assert( pFile->pShm==0 ); +#endif + assert( pFile->h!=NULL && pFile->h!=INVALID_HANDLE_VALUE ); + OSTRACE(("CLOSE pid=%lu, pFile=%p, file=%p\n", + osGetCurrentProcessId(), pFile, pFile->h)); + +#if SQLITE_MAX_MMAP_SIZE>0 + winUnmapfile(pFile); +#endif + + do{ + rc = osCloseHandle(pFile->h); + /* SimulateIOError( rc=0; cnt=MX_CLOSE_ATTEMPT; ); */ + }while( rc==0 && ++cnt < MX_CLOSE_ATTEMPT && (sqlite3_win32_sleep(100), 1) ); +#if SQLITE_OS_WINCE +#define WINCE_DELETION_ATTEMPTS 3 + { + winVfsAppData *pAppData = (winVfsAppData*)pFile->pVfs->pAppData; + if( pAppData==NULL || !pAppData->bNoLock ){ + winceDestroyLock(pFile); + } + } + if( pFile->zDeleteOnClose ){ + int cnt = 0; + while( + osDeleteFileW(pFile->zDeleteOnClose)==0 + && osGetFileAttributesW(pFile->zDeleteOnClose)!=0xffffffff + && cnt++ < WINCE_DELETION_ATTEMPTS + ){ + sqlite3_win32_sleep(100); /* Wait a little before trying again */ + } + sqlite3_free(pFile->zDeleteOnClose); + } +#endif + if( rc ){ + pFile->h = NULL; + } + OpenCounter(-1); + OSTRACE(("CLOSE pid=%lu, pFile=%p, file=%p, rc=%s\n", + osGetCurrentProcessId(), pFile, pFile->h, rc ? "ok" : "failed")); + return rc ? SQLITE_OK + : winLogError(SQLITE_IOERR_CLOSE, osGetLastError(), + "winClose", pFile->zPath); +} + +/* +** Read data from a file into a buffer. Return SQLITE_OK if all +** bytes were read successfully and SQLITE_IOERR if anything goes +** wrong. +*/ +static int winRead( + sqlite3_file *id, /* File to read from */ + void *pBuf, /* Write content into this buffer */ + int amt, /* Number of bytes to read */ + sqlite3_int64 offset /* Begin reading at this offset */ +){ +#if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED) + OVERLAPPED overlapped; /* The offset for ReadFile. */ +#endif + winFile *pFile = (winFile*)id; /* file handle */ + DWORD nRead; /* Number of bytes actually read from file */ + int nRetry = 0; /* Number of retrys */ + + assert( id!=0 ); + assert( amt>0 ); + assert( offset>=0 ); + SimulateIOError(return SQLITE_IOERR_READ); + OSTRACE(("READ pid=%lu, pFile=%p, file=%p, buffer=%p, amount=%d, " + "offset=%lld, lock=%d\n", osGetCurrentProcessId(), pFile, + pFile->h, pBuf, amt, offset, pFile->locktype)); + +#if SQLITE_MAX_MMAP_SIZE>0 + /* Deal with as much of this read request as possible by transfering + ** data from the memory mapping using memcpy(). */ + if( offsetmmapSize ){ + if( offset+amt <= pFile->mmapSize ){ + memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], amt); + OSTRACE(("READ-MMAP pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n", + osGetCurrentProcessId(), pFile, pFile->h)); + return SQLITE_OK; + }else{ + int nCopy = (int)(pFile->mmapSize - offset); + memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], nCopy); + pBuf = &((u8 *)pBuf)[nCopy]; + amt -= nCopy; + offset += nCopy; + } + } +#endif + +#if SQLITE_OS_WINCE || defined(SQLITE_WIN32_NO_OVERLAPPED) + if( winSeekFile(pFile, offset) ){ + OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_FULL\n", + osGetCurrentProcessId(), pFile, pFile->h)); + return SQLITE_FULL; + } + while( !osReadFile(pFile->h, pBuf, amt, &nRead, 0) ){ +#else + memset(&overlapped, 0, sizeof(OVERLAPPED)); + overlapped.Offset = (LONG)(offset & 0xffffffff); + overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff); + while( !osReadFile(pFile->h, pBuf, amt, &nRead, &overlapped) && + osGetLastError()!=ERROR_HANDLE_EOF ){ +#endif + DWORD lastErrno; + if( winRetryIoerr(&nRetry, &lastErrno) ) continue; + pFile->lastErrno = lastErrno; + OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_READ\n", + osGetCurrentProcessId(), pFile, pFile->h)); + return winLogError(SQLITE_IOERR_READ, pFile->lastErrno, + "winRead", pFile->zPath); + } + winLogIoerr(nRetry, __LINE__); + if( nRead<(DWORD)amt ){ + /* Unread parts of the buffer must be zero-filled */ + memset(&((char*)pBuf)[nRead], 0, amt-nRead); + OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_SHORT_READ\n", + osGetCurrentProcessId(), pFile, pFile->h)); + return SQLITE_IOERR_SHORT_READ; + } + + OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n", + osGetCurrentProcessId(), pFile, pFile->h)); + return SQLITE_OK; +} + +/* +** Write data from a buffer into a file. Return SQLITE_OK on success +** or some other error code on failure. +*/ +static int winWrite( + sqlite3_file *id, /* File to write into */ + const void *pBuf, /* The bytes to be written */ + int amt, /* Number of bytes to write */ + sqlite3_int64 offset /* Offset into the file to begin writing at */ +){ + int rc = 0; /* True if error has occurred, else false */ + winFile *pFile = (winFile*)id; /* File handle */ + int nRetry = 0; /* Number of retries */ + + assert( amt>0 ); + assert( pFile ); + SimulateIOError(return SQLITE_IOERR_WRITE); + SimulateDiskfullError(return SQLITE_FULL); + + OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, buffer=%p, amount=%d, " + "offset=%lld, lock=%d\n", osGetCurrentProcessId(), pFile, + pFile->h, pBuf, amt, offset, pFile->locktype)); + +#if defined(SQLITE_MMAP_READWRITE) && SQLITE_MAX_MMAP_SIZE>0 + /* Deal with as much of this write request as possible by transfering + ** data from the memory mapping using memcpy(). */ + if( offsetmmapSize ){ + if( offset+amt <= pFile->mmapSize ){ + memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, amt); + OSTRACE(("WRITE-MMAP pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n", + osGetCurrentProcessId(), pFile, pFile->h)); + return SQLITE_OK; + }else{ + int nCopy = (int)(pFile->mmapSize - offset); + memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, nCopy); + pBuf = &((u8 *)pBuf)[nCopy]; + amt -= nCopy; + offset += nCopy; + } + } +#endif + +#if SQLITE_OS_WINCE || defined(SQLITE_WIN32_NO_OVERLAPPED) + rc = winSeekFile(pFile, offset); + if( rc==0 ){ +#else + { +#endif +#if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED) + OVERLAPPED overlapped; /* The offset for WriteFile. */ +#endif + u8 *aRem = (u8 *)pBuf; /* Data yet to be written */ + int nRem = amt; /* Number of bytes yet to be written */ + DWORD nWrite; /* Bytes written by each WriteFile() call */ + DWORD lastErrno = NO_ERROR; /* Value returned by GetLastError() */ + +#if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED) + memset(&overlapped, 0, sizeof(OVERLAPPED)); + overlapped.Offset = (LONG)(offset & 0xffffffff); + overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff); +#endif + + while( nRem>0 ){ +#if SQLITE_OS_WINCE || defined(SQLITE_WIN32_NO_OVERLAPPED) + if( !osWriteFile(pFile->h, aRem, nRem, &nWrite, 0) ){ +#else + if( !osWriteFile(pFile->h, aRem, nRem, &nWrite, &overlapped) ){ +#endif + if( winRetryIoerr(&nRetry, &lastErrno) ) continue; + break; + } + assert( nWrite==0 || nWrite<=(DWORD)nRem ); + if( nWrite==0 || nWrite>(DWORD)nRem ){ + lastErrno = osGetLastError(); + break; + } +#if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED) + offset += nWrite; + overlapped.Offset = (LONG)(offset & 0xffffffff); + overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff); +#endif + aRem += nWrite; + nRem -= nWrite; + } + if( nRem>0 ){ + pFile->lastErrno = lastErrno; + rc = 1; + } + } + + if( rc ){ + if( ( pFile->lastErrno==ERROR_HANDLE_DISK_FULL ) + || ( pFile->lastErrno==ERROR_DISK_FULL )){ + OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, rc=SQLITE_FULL\n", + osGetCurrentProcessId(), pFile, pFile->h)); + return winLogError(SQLITE_FULL, pFile->lastErrno, + "winWrite1", pFile->zPath); + } + OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_WRITE\n", + osGetCurrentProcessId(), pFile, pFile->h)); + return winLogError(SQLITE_IOERR_WRITE, pFile->lastErrno, + "winWrite2", pFile->zPath); + }else{ + winLogIoerr(nRetry, __LINE__); + } + OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n", + osGetCurrentProcessId(), pFile, pFile->h)); + return SQLITE_OK; +} + +/* +** Truncate an open file to a specified size +*/ +static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){ + winFile *pFile = (winFile*)id; /* File handle object */ + int rc = SQLITE_OK; /* Return code for this function */ + DWORD lastErrno; + + assert( pFile ); + SimulateIOError(return SQLITE_IOERR_TRUNCATE); + OSTRACE(("TRUNCATE pid=%lu, pFile=%p, file=%p, size=%lld, lock=%d\n", + osGetCurrentProcessId(), pFile, pFile->h, nByte, pFile->locktype)); + + /* If the user has configured a chunk-size for this file, truncate the + ** file so that it consists of an integer number of chunks (i.e. the + ** actual file size after the operation may be larger than the requested + ** size). + */ + if( pFile->szChunk>0 ){ + nByte = ((nByte + pFile->szChunk - 1)/pFile->szChunk) * pFile->szChunk; + } + + /* SetEndOfFile() returns non-zero when successful, or zero when it fails. */ + if( winSeekFile(pFile, nByte) ){ + rc = winLogError(SQLITE_IOERR_TRUNCATE, pFile->lastErrno, + "winTruncate1", pFile->zPath); + }else if( 0==osSetEndOfFile(pFile->h) && + ((lastErrno = osGetLastError())!=ERROR_USER_MAPPED_FILE) ){ + pFile->lastErrno = lastErrno; + rc = winLogError(SQLITE_IOERR_TRUNCATE, pFile->lastErrno, + "winTruncate2", pFile->zPath); + } + +#if SQLITE_MAX_MMAP_SIZE>0 + /* If the file was truncated to a size smaller than the currently + ** mapped region, reduce the effective mapping size as well. SQLite will + ** use read() and write() to access data beyond this point from now on. + */ + if( pFile->pMapRegion && nBytemmapSize ){ + pFile->mmapSize = nByte; + } +#endif + + OSTRACE(("TRUNCATE pid=%lu, pFile=%p, file=%p, rc=%s\n", + osGetCurrentProcessId(), pFile, pFile->h, sqlite3ErrName(rc))); + return rc; +} + +#ifdef SQLITE_TEST +/* +** Count the number of fullsyncs and normal syncs. This is used to test +** that syncs and fullsyncs are occuring at the right times. +*/ +SQLITE_API int sqlite3_sync_count = 0; +SQLITE_API int sqlite3_fullsync_count = 0; +#endif + +/* +** Make sure all writes to a particular file are committed to disk. +*/ +static int winSync(sqlite3_file *id, int flags){ +#ifndef SQLITE_NO_SYNC + /* + ** Used only when SQLITE_NO_SYNC is not defined. + */ + BOOL rc; +#endif +#if !defined(NDEBUG) || !defined(SQLITE_NO_SYNC) || \ + defined(SQLITE_HAVE_OS_TRACE) + /* + ** Used when SQLITE_NO_SYNC is not defined and by the assert() and/or + ** OSTRACE() macros. + */ + winFile *pFile = (winFile*)id; +#else + UNUSED_PARAMETER(id); +#endif + + assert( pFile ); + /* Check that one of SQLITE_SYNC_NORMAL or FULL was passed */ + assert((flags&0x0F)==SQLITE_SYNC_NORMAL + || (flags&0x0F)==SQLITE_SYNC_FULL + ); + + /* Unix cannot, but some systems may return SQLITE_FULL from here. This + ** line is to test that doing so does not cause any problems. + */ + SimulateDiskfullError( return SQLITE_FULL ); + + OSTRACE(("SYNC pid=%lu, pFile=%p, file=%p, flags=%x, lock=%d\n", + osGetCurrentProcessId(), pFile, pFile->h, flags, + pFile->locktype)); + +#ifndef SQLITE_TEST + UNUSED_PARAMETER(flags); +#else + if( (flags&0x0F)==SQLITE_SYNC_FULL ){ + sqlite3_fullsync_count++; + } + sqlite3_sync_count++; +#endif + + /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a + ** no-op + */ +#ifdef SQLITE_NO_SYNC + OSTRACE(("SYNC-NOP pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n", + osGetCurrentProcessId(), pFile, pFile->h)); + return SQLITE_OK; +#else +#if SQLITE_MAX_MMAP_SIZE>0 + if( pFile->pMapRegion ){ + if( osFlushViewOfFile(pFile->pMapRegion, 0) ){ + OSTRACE(("SYNC-MMAP pid=%lu, pFile=%p, pMapRegion=%p, " + "rc=SQLITE_OK\n", osGetCurrentProcessId(), + pFile, pFile->pMapRegion)); + }else{ + pFile->lastErrno = osGetLastError(); + OSTRACE(("SYNC-MMAP pid=%lu, pFile=%p, pMapRegion=%p, " + "rc=SQLITE_IOERR_MMAP\n", osGetCurrentProcessId(), + pFile, pFile->pMapRegion)); + return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno, + "winSync1", pFile->zPath); + } + } +#endif + rc = osFlushFileBuffers(pFile->h); + SimulateIOError( rc=FALSE ); + if( rc ){ + OSTRACE(("SYNC pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n", + osGetCurrentProcessId(), pFile, pFile->h)); + return SQLITE_OK; + }else{ + pFile->lastErrno = osGetLastError(); + OSTRACE(("SYNC pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_FSYNC\n", + osGetCurrentProcessId(), pFile, pFile->h)); + return winLogError(SQLITE_IOERR_FSYNC, pFile->lastErrno, + "winSync2", pFile->zPath); + } +#endif +} + +/* +** Determine the current size of a file in bytes +*/ +static int winFileSize(sqlite3_file *id, sqlite3_int64 *pSize){ + winFile *pFile = (winFile*)id; + int rc = SQLITE_OK; + + assert( id!=0 ); + assert( pSize!=0 ); + SimulateIOError(return SQLITE_IOERR_FSTAT); + OSTRACE(("SIZE file=%p, pSize=%p\n", pFile->h, pSize)); + +#if SQLITE_OS_WINRT + { + FILE_STANDARD_INFO info; + if( osGetFileInformationByHandleEx(pFile->h, FileStandardInfo, + &info, sizeof(info)) ){ + *pSize = info.EndOfFile.QuadPart; + }else{ + pFile->lastErrno = osGetLastError(); + rc = winLogError(SQLITE_IOERR_FSTAT, pFile->lastErrno, + "winFileSize", pFile->zPath); + } + } +#else + { + DWORD upperBits; + DWORD lowerBits; + DWORD lastErrno; + + lowerBits = osGetFileSize(pFile->h, &upperBits); + *pSize = (((sqlite3_int64)upperBits)<<32) + lowerBits; + if( (lowerBits == INVALID_FILE_SIZE) + && ((lastErrno = osGetLastError())!=NO_ERROR) ){ + pFile->lastErrno = lastErrno; + rc = winLogError(SQLITE_IOERR_FSTAT, pFile->lastErrno, + "winFileSize", pFile->zPath); + } + } +#endif + OSTRACE(("SIZE file=%p, pSize=%p, *pSize=%lld, rc=%s\n", + pFile->h, pSize, *pSize, sqlite3ErrName(rc))); + return rc; +} + +/* +** LOCKFILE_FAIL_IMMEDIATELY is undefined on some Windows systems. +*/ +#ifndef LOCKFILE_FAIL_IMMEDIATELY +# define LOCKFILE_FAIL_IMMEDIATELY 1 +#endif + +#ifndef LOCKFILE_EXCLUSIVE_LOCK +# define LOCKFILE_EXCLUSIVE_LOCK 2 +#endif + +/* +** Historically, SQLite has used both the LockFile and LockFileEx functions. +** When the LockFile function was used, it was always expected to fail +** immediately if the lock could not be obtained. Also, it always expected to +** obtain an exclusive lock. These flags are used with the LockFileEx function +** and reflect those expectations; therefore, they should not be changed. +*/ +#ifndef SQLITE_LOCKFILE_FLAGS +# define SQLITE_LOCKFILE_FLAGS (LOCKFILE_FAIL_IMMEDIATELY | \ + LOCKFILE_EXCLUSIVE_LOCK) +#endif + +/* +** Currently, SQLite never calls the LockFileEx function without wanting the +** call to fail immediately if the lock cannot be obtained. +*/ +#ifndef SQLITE_LOCKFILEEX_FLAGS +# define SQLITE_LOCKFILEEX_FLAGS (LOCKFILE_FAIL_IMMEDIATELY) +#endif + +/* +** Acquire a reader lock. +** Different API routines are called depending on whether or not this +** is Win9x or WinNT. +*/ +static int winGetReadLock(winFile *pFile){ + int res; + OSTRACE(("READ-LOCK file=%p, lock=%d\n", pFile->h, pFile->locktype)); + if( osIsNT() ){ +#if SQLITE_OS_WINCE + /* + ** NOTE: Windows CE is handled differently here due its lack of the Win32 + ** API LockFileEx. + */ + res = winceLockFile(&pFile->h, SHARED_FIRST, 0, 1, 0); +#else + res = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS, SHARED_FIRST, 0, + SHARED_SIZE, 0); +#endif + } +#ifdef SQLITE_WIN32_HAS_ANSI + else{ + int lk; + sqlite3_randomness(sizeof(lk), &lk); + pFile->sharedLockByte = (short)((lk & 0x7fffffff)%(SHARED_SIZE - 1)); + res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, + SHARED_FIRST+pFile->sharedLockByte, 0, 1, 0); + } +#endif + if( res == 0 ){ + pFile->lastErrno = osGetLastError(); + /* No need to log a failure to lock */ + } + OSTRACE(("READ-LOCK file=%p, result=%d\n", pFile->h, res)); + return res; +} + +/* +** Undo a readlock +*/ +static int winUnlockReadLock(winFile *pFile){ + int res; + DWORD lastErrno; + OSTRACE(("READ-UNLOCK file=%p, lock=%d\n", pFile->h, pFile->locktype)); + if( osIsNT() ){ + res = winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); + } +#ifdef SQLITE_WIN32_HAS_ANSI + else{ + res = winUnlockFile(&pFile->h, SHARED_FIRST+pFile->sharedLockByte, 0, 1, 0); + } +#endif + if( res==0 && ((lastErrno = osGetLastError())!=ERROR_NOT_LOCKED) ){ + pFile->lastErrno = lastErrno; + winLogError(SQLITE_IOERR_UNLOCK, pFile->lastErrno, + "winUnlockReadLock", pFile->zPath); + } + OSTRACE(("READ-UNLOCK file=%p, result=%d\n", pFile->h, res)); + return res; +} + +/* +** Lock the file with the lock specified by parameter locktype - one +** of the following: +** +** (1) SHARED_LOCK +** (2) RESERVED_LOCK +** (3) PENDING_LOCK +** (4) EXCLUSIVE_LOCK +** +** Sometimes when requesting one lock state, additional lock states +** are inserted in between. The locking might fail on one of the later +** transitions leaving the lock state different from what it started but +** still short of its goal. The following chart shows the allowed +** transitions and the inserted intermediate states: +** +** UNLOCKED -> SHARED +** SHARED -> RESERVED +** SHARED -> (PENDING) -> EXCLUSIVE +** RESERVED -> (PENDING) -> EXCLUSIVE +** PENDING -> EXCLUSIVE +** +** This routine will only increase a lock. The winUnlock() routine +** erases all locks at once and returns us immediately to locking level 0. +** It is not possible to lower the locking level one step at a time. You +** must go straight to locking level 0. +*/ +static int winLock(sqlite3_file *id, int locktype){ + int rc = SQLITE_OK; /* Return code from subroutines */ + int res = 1; /* Result of a Windows lock call */ + int newLocktype; /* Set pFile->locktype to this value before exiting */ + int gotPendingLock = 0;/* True if we acquired a PENDING lock this time */ + winFile *pFile = (winFile*)id; + DWORD lastErrno = NO_ERROR; + + assert( id!=0 ); + OSTRACE(("LOCK file=%p, oldLock=%d(%d), newLock=%d\n", + pFile->h, pFile->locktype, pFile->sharedLockByte, locktype)); + + /* If there is already a lock of this type or more restrictive on the + ** OsFile, do nothing. Don't use the end_lock: exit path, as + ** sqlite3OsEnterMutex() hasn't been called yet. + */ + if( pFile->locktype>=locktype ){ + OSTRACE(("LOCK-HELD file=%p, rc=SQLITE_OK\n", pFile->h)); + return SQLITE_OK; + } + + /* Do not allow any kind of write-lock on a read-only database + */ + if( (pFile->ctrlFlags & WINFILE_RDONLY)!=0 && locktype>=RESERVED_LOCK ){ + return SQLITE_IOERR_LOCK; + } + + /* Make sure the locking sequence is correct + */ + assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); + assert( locktype!=PENDING_LOCK ); + assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); + + /* Lock the PENDING_LOCK byte if we need to acquire a PENDING lock or + ** a SHARED lock. If we are acquiring a SHARED lock, the acquisition of + ** the PENDING_LOCK byte is temporary. + */ + newLocktype = pFile->locktype; + if( pFile->locktype==NO_LOCK + || (locktype==EXCLUSIVE_LOCK && pFile->locktype<=RESERVED_LOCK) + ){ + int cnt = 3; + while( cnt-->0 && (res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, + PENDING_BYTE, 0, 1, 0))==0 ){ + /* Try 3 times to get the pending lock. This is needed to work + ** around problems caused by indexing and/or anti-virus software on + ** Windows systems. + ** If you are using this code as a model for alternative VFSes, do not + ** copy this retry logic. It is a hack intended for Windows only. + */ + lastErrno = osGetLastError(); + OSTRACE(("LOCK-PENDING-FAIL file=%p, count=%d, result=%d\n", + pFile->h, cnt, res)); + if( lastErrno==ERROR_INVALID_HANDLE ){ + pFile->lastErrno = lastErrno; + rc = SQLITE_IOERR_LOCK; + OSTRACE(("LOCK-FAIL file=%p, count=%d, rc=%s\n", + pFile->h, cnt, sqlite3ErrName(rc))); + return rc; + } + if( cnt ) sqlite3_win32_sleep(1); + } + gotPendingLock = res; + if( !res ){ + lastErrno = osGetLastError(); + } + } + + /* Acquire a shared lock + */ + if( locktype==SHARED_LOCK && res ){ + assert( pFile->locktype==NO_LOCK ); + res = winGetReadLock(pFile); + if( res ){ + newLocktype = SHARED_LOCK; + }else{ + lastErrno = osGetLastError(); + } + } + + /* Acquire a RESERVED lock + */ + if( locktype==RESERVED_LOCK && res ){ + assert( pFile->locktype==SHARED_LOCK ); + res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, RESERVED_BYTE, 0, 1, 0); + if( res ){ + newLocktype = RESERVED_LOCK; + }else{ + lastErrno = osGetLastError(); + } + } + + /* Acquire a PENDING lock + */ + if( locktype==EXCLUSIVE_LOCK && res ){ + newLocktype = PENDING_LOCK; + gotPendingLock = 0; + } + + /* Acquire an EXCLUSIVE lock + */ + if( locktype==EXCLUSIVE_LOCK && res ){ + assert( pFile->locktype>=SHARED_LOCK ); + res = winUnlockReadLock(pFile); + res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, SHARED_FIRST, 0, + SHARED_SIZE, 0); + if( res ){ + newLocktype = EXCLUSIVE_LOCK; + }else{ + lastErrno = osGetLastError(); + winGetReadLock(pFile); + } + } + + /* If we are holding a PENDING lock that ought to be released, then + ** release it now. + */ + if( gotPendingLock && locktype==SHARED_LOCK ){ + winUnlockFile(&pFile->h, PENDING_BYTE, 0, 1, 0); + } + + /* Update the state of the lock has held in the file descriptor then + ** return the appropriate result code. + */ + if( res ){ + rc = SQLITE_OK; + }else{ + pFile->lastErrno = lastErrno; + rc = SQLITE_BUSY; + OSTRACE(("LOCK-FAIL file=%p, wanted=%d, got=%d\n", + pFile->h, locktype, newLocktype)); + } + pFile->locktype = (u8)newLocktype; + OSTRACE(("LOCK file=%p, lock=%d, rc=%s\n", + pFile->h, pFile->locktype, sqlite3ErrName(rc))); + return rc; +} + +/* +** This routine checks if there is a RESERVED lock held on the specified +** file by this or any other process. If such a lock is held, return +** non-zero, otherwise zero. +*/ +static int winCheckReservedLock(sqlite3_file *id, int *pResOut){ + int res; + winFile *pFile = (winFile*)id; + + SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); + OSTRACE(("TEST-WR-LOCK file=%p, pResOut=%p\n", pFile->h, pResOut)); + + assert( id!=0 ); + if( pFile->locktype>=RESERVED_LOCK ){ + res = 1; + OSTRACE(("TEST-WR-LOCK file=%p, result=%d (local)\n", pFile->h, res)); + }else{ + res = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS,RESERVED_BYTE,0,1,0); + if( res ){ + winUnlockFile(&pFile->h, RESERVED_BYTE, 0, 1, 0); + } + res = !res; + OSTRACE(("TEST-WR-LOCK file=%p, result=%d (remote)\n", pFile->h, res)); + } + *pResOut = res; + OSTRACE(("TEST-WR-LOCK file=%p, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n", + pFile->h, pResOut, *pResOut)); + return SQLITE_OK; +} + +/* +** Lower the locking level on file descriptor id to locktype. locktype +** must be either NO_LOCK or SHARED_LOCK. +** +** If the locking level of the file descriptor is already at or below +** the requested locking level, this routine is a no-op. +** +** It is not possible for this routine to fail if the second argument +** is NO_LOCK. If the second argument is SHARED_LOCK then this routine +** might return SQLITE_IOERR; +*/ +static int winUnlock(sqlite3_file *id, int locktype){ + int type; + winFile *pFile = (winFile*)id; + int rc = SQLITE_OK; + assert( pFile!=0 ); + assert( locktype<=SHARED_LOCK ); + OSTRACE(("UNLOCK file=%p, oldLock=%d(%d), newLock=%d\n", + pFile->h, pFile->locktype, pFile->sharedLockByte, locktype)); + type = pFile->locktype; + if( type>=EXCLUSIVE_LOCK ){ + winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); + if( locktype==SHARED_LOCK && !winGetReadLock(pFile) ){ + /* This should never happen. We should always be able to + ** reacquire the read lock */ + rc = winLogError(SQLITE_IOERR_UNLOCK, osGetLastError(), + "winUnlock", pFile->zPath); + } + } + if( type>=RESERVED_LOCK ){ + winUnlockFile(&pFile->h, RESERVED_BYTE, 0, 1, 0); + } + if( locktype==NO_LOCK && type>=SHARED_LOCK ){ + winUnlockReadLock(pFile); + } + if( type>=PENDING_LOCK ){ + winUnlockFile(&pFile->h, PENDING_BYTE, 0, 1, 0); + } + pFile->locktype = (u8)locktype; + OSTRACE(("UNLOCK file=%p, lock=%d, rc=%s\n", + pFile->h, pFile->locktype, sqlite3ErrName(rc))); + return rc; +} + +/****************************************************************************** +****************************** No-op Locking ********************************** +** +** Of the various locking implementations available, this is by far the +** simplest: locking is ignored. No attempt is made to lock the database +** file for reading or writing. +** +** This locking mode is appropriate for use on read-only databases +** (ex: databases that are burned into CD-ROM, for example.) It can +** also be used if the application employs some external mechanism to +** prevent simultaneous access of the same database by two or more +** database connections. But there is a serious risk of database +** corruption if this locking mode is used in situations where multiple +** database connections are accessing the same database file at the same +** time and one or more of those connections are writing. +*/ + +static int winNolockLock(sqlite3_file *id, int locktype){ + UNUSED_PARAMETER(id); + UNUSED_PARAMETER(locktype); + return SQLITE_OK; +} + +static int winNolockCheckReservedLock(sqlite3_file *id, int *pResOut){ + UNUSED_PARAMETER(id); + UNUSED_PARAMETER(pResOut); + return SQLITE_OK; +} + +static int winNolockUnlock(sqlite3_file *id, int locktype){ + UNUSED_PARAMETER(id); + UNUSED_PARAMETER(locktype); + return SQLITE_OK; +} + +/******************* End of the no-op lock implementation ********************* +******************************************************************************/ + +/* +** If *pArg is initially negative then this is a query. Set *pArg to +** 1 or 0 depending on whether or not bit mask of pFile->ctrlFlags is set. +** +** If *pArg is 0 or 1, then clear or set the mask bit of pFile->ctrlFlags. +*/ +static void winModeBit(winFile *pFile, unsigned char mask, int *pArg){ + if( *pArg<0 ){ + *pArg = (pFile->ctrlFlags & mask)!=0; + }else if( (*pArg)==0 ){ + pFile->ctrlFlags &= ~mask; + }else{ + pFile->ctrlFlags |= mask; + } +} + +/* Forward references to VFS helper methods used for temporary files */ +static int winGetTempname(sqlite3_vfs *, char **); +static int winIsDir(const void *); +static BOOL winIsDriveLetterAndColon(const char *); + +/* +** Control and query of the open file handle. +*/ +static int winFileControl(sqlite3_file *id, int op, void *pArg){ + winFile *pFile = (winFile*)id; + OSTRACE(("FCNTL file=%p, op=%d, pArg=%p\n", pFile->h, op, pArg)); + switch( op ){ + case SQLITE_FCNTL_LOCKSTATE: { + *(int*)pArg = pFile->locktype; + OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); + return SQLITE_OK; + } + case SQLITE_FCNTL_LAST_ERRNO: { + *(int*)pArg = (int)pFile->lastErrno; + OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); + return SQLITE_OK; + } + case SQLITE_FCNTL_CHUNK_SIZE: { + pFile->szChunk = *(int *)pArg; + OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); + return SQLITE_OK; + } + case SQLITE_FCNTL_SIZE_HINT: { + if( pFile->szChunk>0 ){ + sqlite3_int64 oldSz; + int rc = winFileSize(id, &oldSz); + if( rc==SQLITE_OK ){ + sqlite3_int64 newSz = *(sqlite3_int64*)pArg; + if( newSz>oldSz ){ + SimulateIOErrorBenign(1); + rc = winTruncate(id, newSz); + SimulateIOErrorBenign(0); + } + } + OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc))); + return rc; + } + OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); + return SQLITE_OK; + } + case SQLITE_FCNTL_PERSIST_WAL: { + winModeBit(pFile, WINFILE_PERSIST_WAL, (int*)pArg); + OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); + return SQLITE_OK; + } + case SQLITE_FCNTL_POWERSAFE_OVERWRITE: { + winModeBit(pFile, WINFILE_PSOW, (int*)pArg); + OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); + return SQLITE_OK; + } + case SQLITE_FCNTL_VFSNAME: { + *(char**)pArg = sqlite3_mprintf("%s", pFile->pVfs->zName); + OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); + return SQLITE_OK; + } + case SQLITE_FCNTL_WIN32_AV_RETRY: { + int *a = (int*)pArg; + if( a[0]>0 ){ + winIoerrRetry = a[0]; + }else{ + a[0] = winIoerrRetry; + } + if( a[1]>0 ){ + winIoerrRetryDelay = a[1]; + }else{ + a[1] = winIoerrRetryDelay; + } + OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); + return SQLITE_OK; + } + case SQLITE_FCNTL_WIN32_GET_HANDLE: { + LPHANDLE phFile = (LPHANDLE)pArg; + *phFile = pFile->h; + OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); + return SQLITE_OK; + } +#ifdef SQLITE_TEST + case SQLITE_FCNTL_WIN32_SET_HANDLE: { + LPHANDLE phFile = (LPHANDLE)pArg; + HANDLE hOldFile = pFile->h; + pFile->h = *phFile; + *phFile = hOldFile; + OSTRACE(("FCNTL oldFile=%p, newFile=%p, rc=SQLITE_OK\n", + hOldFile, pFile->h)); + return SQLITE_OK; + } +#endif + case SQLITE_FCNTL_TEMPFILENAME: { + char *zTFile = 0; + int rc = winGetTempname(pFile->pVfs, &zTFile); + if( rc==SQLITE_OK ){ + *(char**)pArg = zTFile; + } + OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc))); + return rc; + } +#if SQLITE_MAX_MMAP_SIZE>0 + case SQLITE_FCNTL_MMAP_SIZE: { + i64 newLimit = *(i64*)pArg; + int rc = SQLITE_OK; + if( newLimit>sqlite3GlobalConfig.mxMmap ){ + newLimit = sqlite3GlobalConfig.mxMmap; + } + *(i64*)pArg = pFile->mmapSizeMax; + if( newLimit>=0 && newLimit!=pFile->mmapSizeMax && pFile->nFetchOut==0 ){ + pFile->mmapSizeMax = newLimit; + if( pFile->mmapSize>0 ){ + winUnmapfile(pFile); + rc = winMapfile(pFile, -1); + } + } + OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc))); + return rc; + } +#endif + } + OSTRACE(("FCNTL file=%p, rc=SQLITE_NOTFOUND\n", pFile->h)); + return SQLITE_NOTFOUND; +} + +/* +** Return the sector size in bytes of the underlying block device for +** the specified file. This is almost always 512 bytes, but may be +** larger for some devices. +** +** SQLite code assumes this function cannot fail. It also assumes that +** if two files are created in the same file-system directory (i.e. +** a database and its journal file) that the sector size will be the +** same for both. +*/ +static int winSectorSize(sqlite3_file *id){ + (void)id; + return SQLITE_DEFAULT_SECTOR_SIZE; +} + +/* +** Return a vector of device characteristics. +*/ +static int winDeviceCharacteristics(sqlite3_file *id){ + winFile *p = (winFile*)id; + return SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN | + ((p->ctrlFlags & WINFILE_PSOW)?SQLITE_IOCAP_POWERSAFE_OVERWRITE:0); +} + +/* +** Windows will only let you create file view mappings +** on allocation size granularity boundaries. +** During sqlite3_os_init() we do a GetSystemInfo() +** to get the granularity size. +*/ +static SYSTEM_INFO winSysInfo; + +#ifndef SQLITE_OMIT_WAL + +/* +** Helper functions to obtain and relinquish the global mutex. The +** global mutex is used to protect the winLockInfo objects used by +** this file, all of which may be shared by multiple threads. +** +** Function winShmMutexHeld() is used to assert() that the global mutex +** is held when required. This function is only used as part of assert() +** statements. e.g. +** +** winShmEnterMutex() +** assert( winShmMutexHeld() ); +** winShmLeaveMutex() +*/ +static void winShmEnterMutex(void){ + sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); +} +static void winShmLeaveMutex(void){ + sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); +} +#ifndef NDEBUG +static int winShmMutexHeld(void) { + return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); +} +#endif + +/* +** Object used to represent a single file opened and mmapped to provide +** shared memory. When multiple threads all reference the same +** log-summary, each thread has its own winFile object, but they all +** point to a single instance of this object. In other words, each +** log-summary is opened only once per process. +** +** winShmMutexHeld() must be true when creating or destroying +** this object or while reading or writing the following fields: +** +** nRef +** pNext +** +** The following fields are read-only after the object is created: +** +** fid +** zFilename +** +** Either winShmNode.mutex must be held or winShmNode.nRef==0 and +** winShmMutexHeld() is true when reading or writing any other field +** in this structure. +** +*/ +struct winShmNode { + sqlite3_mutex *mutex; /* Mutex to access this object */ + char *zFilename; /* Name of the file */ + winFile hFile; /* File handle from winOpen */ + + int szRegion; /* Size of shared-memory regions */ + int nRegion; /* Size of array apRegion */ + struct ShmRegion { + HANDLE hMap; /* File handle from CreateFileMapping */ + void *pMap; + } *aRegion; + DWORD lastErrno; /* The Windows errno from the last I/O error */ + + int nRef; /* Number of winShm objects pointing to this */ + winShm *pFirst; /* All winShm objects pointing to this */ + winShmNode *pNext; /* Next in list of all winShmNode objects */ +#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) + u8 nextShmId; /* Next available winShm.id value */ +#endif +}; + +/* +** A global array of all winShmNode objects. +** +** The winShmMutexHeld() must be true while reading or writing this list. +*/ +static winShmNode *winShmNodeList = 0; + +/* +** Structure used internally by this VFS to record the state of an +** open shared memory connection. +** +** The following fields are initialized when this object is created and +** are read-only thereafter: +** +** winShm.pShmNode +** winShm.id +** +** All other fields are read/write. The winShm.pShmNode->mutex must be held +** while accessing any read/write fields. +*/ +struct winShm { + winShmNode *pShmNode; /* The underlying winShmNode object */ + winShm *pNext; /* Next winShm with the same winShmNode */ + u8 hasMutex; /* True if holding the winShmNode mutex */ + u16 sharedMask; /* Mask of shared locks held */ + u16 exclMask; /* Mask of exclusive locks held */ +#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) + u8 id; /* Id of this connection with its winShmNode */ +#endif +}; + +/* +** Constants used for locking +*/ +#define WIN_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ +#define WIN_SHM_DMS (WIN_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ + +/* +** Apply advisory locks for all n bytes beginning at ofst. +*/ +#define WINSHM_UNLCK 1 +#define WINSHM_RDLCK 2 +#define WINSHM_WRLCK 3 +static int winShmSystemLock( + winShmNode *pFile, /* Apply locks to this open shared-memory segment */ + int lockType, /* WINSHM_UNLCK, WINSHM_RDLCK, or WINSHM_WRLCK */ + int ofst, /* Offset to first byte to be locked/unlocked */ + int nByte /* Number of bytes to lock or unlock */ +){ + int rc = 0; /* Result code form Lock/UnlockFileEx() */ + + /* Access to the winShmNode object is serialized by the caller */ + assert( sqlite3_mutex_held(pFile->mutex) || pFile->nRef==0 ); + + OSTRACE(("SHM-LOCK file=%p, lock=%d, offset=%d, size=%d\n", + pFile->hFile.h, lockType, ofst, nByte)); + + /* Release/Acquire the system-level lock */ + if( lockType==WINSHM_UNLCK ){ + rc = winUnlockFile(&pFile->hFile.h, ofst, 0, nByte, 0); + }else{ + /* Initialize the locking parameters */ + DWORD dwFlags = LOCKFILE_FAIL_IMMEDIATELY; + if( lockType == WINSHM_WRLCK ) dwFlags |= LOCKFILE_EXCLUSIVE_LOCK; + rc = winLockFile(&pFile->hFile.h, dwFlags, ofst, 0, nByte, 0); + } + + if( rc!= 0 ){ + rc = SQLITE_OK; + }else{ + pFile->lastErrno = osGetLastError(); + rc = SQLITE_BUSY; + } + + OSTRACE(("SHM-LOCK file=%p, func=%s, errno=%lu, rc=%s\n", + pFile->hFile.h, (lockType == WINSHM_UNLCK) ? "winUnlockFile" : + "winLockFile", pFile->lastErrno, sqlite3ErrName(rc))); + + return rc; +} + +/* Forward references to VFS methods */ +static int winOpen(sqlite3_vfs*,const char*,sqlite3_file*,int,int*); +static int winDelete(sqlite3_vfs *,const char*,int); + +/* +** Purge the winShmNodeList list of all entries with winShmNode.nRef==0. +** +** This is not a VFS shared-memory method; it is a utility function called +** by VFS shared-memory methods. +*/ +static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){ + winShmNode **pp; + winShmNode *p; + assert( winShmMutexHeld() ); + OSTRACE(("SHM-PURGE pid=%lu, deleteFlag=%d\n", + osGetCurrentProcessId(), deleteFlag)); + pp = &winShmNodeList; + while( (p = *pp)!=0 ){ + if( p->nRef==0 ){ + int i; + if( p->mutex ){ sqlite3_mutex_free(p->mutex); } + for(i=0; inRegion; i++){ + BOOL bRc = osUnmapViewOfFile(p->aRegion[i].pMap); + OSTRACE(("SHM-PURGE-UNMAP pid=%lu, region=%d, rc=%s\n", + osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); + UNUSED_VARIABLE_VALUE(bRc); + bRc = osCloseHandle(p->aRegion[i].hMap); + OSTRACE(("SHM-PURGE-CLOSE pid=%lu, region=%d, rc=%s\n", + osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); + UNUSED_VARIABLE_VALUE(bRc); + } + if( p->hFile.h!=NULL && p->hFile.h!=INVALID_HANDLE_VALUE ){ + SimulateIOErrorBenign(1); + winClose((sqlite3_file *)&p->hFile); + SimulateIOErrorBenign(0); + } + if( deleteFlag ){ + SimulateIOErrorBenign(1); + sqlite3BeginBenignMalloc(); + winDelete(pVfs, p->zFilename, 0); + sqlite3EndBenignMalloc(); + SimulateIOErrorBenign(0); + } + *pp = p->pNext; + sqlite3_free(p->aRegion); + sqlite3_free(p); + }else{ + pp = &p->pNext; + } + } +} + +/* +** Open the shared-memory area associated with database file pDbFd. +** +** When opening a new shared-memory file, if no other instances of that +** file are currently open, in this process or in other processes, then +** the file must be truncated to zero length or have its header cleared. +*/ +static int winOpenSharedMemory(winFile *pDbFd){ + struct winShm *p; /* The connection to be opened */ + struct winShmNode *pShmNode = 0; /* The underlying mmapped file */ + int rc; /* Result code */ + struct winShmNode *pNew; /* Newly allocated winShmNode */ + int nName; /* Size of zName in bytes */ + + assert( pDbFd->pShm==0 ); /* Not previously opened */ + + /* Allocate space for the new sqlite3_shm object. Also speculatively + ** allocate space for a new winShmNode and filename. + */ + p = sqlite3MallocZero( sizeof(*p) ); + if( p==0 ) return SQLITE_IOERR_NOMEM_BKPT; + nName = sqlite3Strlen30(pDbFd->zPath); + pNew = sqlite3MallocZero( sizeof(*pShmNode) + nName + 17 ); + if( pNew==0 ){ + sqlite3_free(p); + return SQLITE_IOERR_NOMEM_BKPT; + } + pNew->zFilename = (char*)&pNew[1]; + sqlite3_snprintf(nName+15, pNew->zFilename, "%s-shm", pDbFd->zPath); + sqlite3FileSuffix3(pDbFd->zPath, pNew->zFilename); + + /* Look to see if there is an existing winShmNode that can be used. + ** If no matching winShmNode currently exists, create a new one. + */ + winShmEnterMutex(); + for(pShmNode = winShmNodeList; pShmNode; pShmNode=pShmNode->pNext){ + /* TBD need to come up with better match here. Perhaps + ** use FILE_ID_BOTH_DIR_INFO Structure. + */ + if( sqlite3StrICmp(pShmNode->zFilename, pNew->zFilename)==0 ) break; + } + if( pShmNode ){ + sqlite3_free(pNew); + }else{ + pShmNode = pNew; + pNew = 0; + ((winFile*)(&pShmNode->hFile))->h = INVALID_HANDLE_VALUE; + pShmNode->pNext = winShmNodeList; + winShmNodeList = pShmNode; + + if( sqlite3GlobalConfig.bCoreMutex ){ + pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + if( pShmNode->mutex==0 ){ + rc = SQLITE_IOERR_NOMEM_BKPT; + goto shm_open_err; + } + } + + rc = winOpen(pDbFd->pVfs, + pShmNode->zFilename, /* Name of the file (UTF-8) */ + (sqlite3_file*)&pShmNode->hFile, /* File handle here */ + SQLITE_OPEN_WAL | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, + 0); + if( SQLITE_OK!=rc ){ + goto shm_open_err; + } + + /* Check to see if another process is holding the dead-man switch. + ** If not, truncate the file to zero length. + */ + if( winShmSystemLock(pShmNode, WINSHM_WRLCK, WIN_SHM_DMS, 1)==SQLITE_OK ){ + rc = winTruncate((sqlite3_file *)&pShmNode->hFile, 0); + if( rc!=SQLITE_OK ){ + rc = winLogError(SQLITE_IOERR_SHMOPEN, osGetLastError(), + "winOpenShm", pDbFd->zPath); + } + } + if( rc==SQLITE_OK ){ + winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); + rc = winShmSystemLock(pShmNode, WINSHM_RDLCK, WIN_SHM_DMS, 1); + } + if( rc ) goto shm_open_err; + } + + /* Make the new connection a child of the winShmNode */ + p->pShmNode = pShmNode; +#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) + p->id = pShmNode->nextShmId++; +#endif + pShmNode->nRef++; + pDbFd->pShm = p; + winShmLeaveMutex(); + + /* The reference count on pShmNode has already been incremented under + ** the cover of the winShmEnterMutex() mutex and the pointer from the + ** new (struct winShm) object to the pShmNode has been set. All that is + ** left to do is to link the new object into the linked list starting + ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex + ** mutex. + */ + sqlite3_mutex_enter(pShmNode->mutex); + p->pNext = pShmNode->pFirst; + pShmNode->pFirst = p; + sqlite3_mutex_leave(pShmNode->mutex); + return SQLITE_OK; + + /* Jump here on any error */ +shm_open_err: + winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); + winShmPurge(pDbFd->pVfs, 0); /* This call frees pShmNode if required */ + sqlite3_free(p); + sqlite3_free(pNew); + winShmLeaveMutex(); + return rc; +} + +/* +** Close a connection to shared-memory. Delete the underlying +** storage if deleteFlag is true. +*/ +static int winShmUnmap( + sqlite3_file *fd, /* Database holding shared memory */ + int deleteFlag /* Delete after closing if true */ +){ + winFile *pDbFd; /* Database holding shared-memory */ + winShm *p; /* The connection to be closed */ + winShmNode *pShmNode; /* The underlying shared-memory file */ + winShm **pp; /* For looping over sibling connections */ + + pDbFd = (winFile*)fd; + p = pDbFd->pShm; + if( p==0 ) return SQLITE_OK; + pShmNode = p->pShmNode; + + /* Remove connection p from the set of connections associated + ** with pShmNode */ + sqlite3_mutex_enter(pShmNode->mutex); + for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){} + *pp = p->pNext; + + /* Free the connection p */ + sqlite3_free(p); + pDbFd->pShm = 0; + sqlite3_mutex_leave(pShmNode->mutex); + + /* If pShmNode->nRef has reached 0, then close the underlying + ** shared-memory file, too */ + winShmEnterMutex(); + assert( pShmNode->nRef>0 ); + pShmNode->nRef--; + if( pShmNode->nRef==0 ){ + winShmPurge(pDbFd->pVfs, deleteFlag); + } + winShmLeaveMutex(); + + return SQLITE_OK; +} + +/* +** Change the lock state for a shared-memory segment. +*/ +static int winShmLock( + sqlite3_file *fd, /* Database file holding the shared memory */ + int ofst, /* First lock to acquire or release */ + int n, /* Number of locks to acquire or release */ + int flags /* What to do with the lock */ +){ + winFile *pDbFd = (winFile*)fd; /* Connection holding shared memory */ + winShm *p = pDbFd->pShm; /* The shared memory being locked */ + winShm *pX; /* For looping over all siblings */ + winShmNode *pShmNode = p->pShmNode; + int rc = SQLITE_OK; /* Result code */ + u16 mask; /* Mask of locks to take or release */ + + assert( ofst>=0 && ofst+n<=SQLITE_SHM_NLOCK ); + assert( n>=1 ); + assert( flags==(SQLITE_SHM_LOCK | SQLITE_SHM_SHARED) + || flags==(SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE) + || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED) + || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) ); + assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 ); + + mask = (u16)((1U<<(ofst+n)) - (1U<1 || mask==(1<mutex); + if( flags & SQLITE_SHM_UNLOCK ){ + u16 allMask = 0; /* Mask of locks held by siblings */ + + /* See if any siblings hold this same lock */ + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + if( pX==p ) continue; + assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 ); + allMask |= pX->sharedMask; + } + + /* Unlock the system-level locks */ + if( (mask & allMask)==0 ){ + rc = winShmSystemLock(pShmNode, WINSHM_UNLCK, ofst+WIN_SHM_BASE, n); + }else{ + rc = SQLITE_OK; + } + + /* Undo the local locks */ + if( rc==SQLITE_OK ){ + p->exclMask &= ~mask; + p->sharedMask &= ~mask; + } + }else if( flags & SQLITE_SHM_SHARED ){ + u16 allShared = 0; /* Union of locks held by connections other than "p" */ + + /* Find out which shared locks are already held by sibling connections. + ** If any sibling already holds an exclusive lock, go ahead and return + ** SQLITE_BUSY. + */ + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + if( (pX->exclMask & mask)!=0 ){ + rc = SQLITE_BUSY; + break; + } + allShared |= pX->sharedMask; + } + + /* Get shared locks at the system level, if necessary */ + if( rc==SQLITE_OK ){ + if( (allShared & mask)==0 ){ + rc = winShmSystemLock(pShmNode, WINSHM_RDLCK, ofst+WIN_SHM_BASE, n); + }else{ + rc = SQLITE_OK; + } + } + + /* Get the local shared locks */ + if( rc==SQLITE_OK ){ + p->sharedMask |= mask; + } + }else{ + /* Make sure no sibling connections hold locks that will block this + ** lock. If any do, return SQLITE_BUSY right away. + */ + for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ + if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){ + rc = SQLITE_BUSY; + break; + } + } + + /* Get the exclusive locks at the system level. Then if successful + ** also mark the local connection as being locked. + */ + if( rc==SQLITE_OK ){ + rc = winShmSystemLock(pShmNode, WINSHM_WRLCK, ofst+WIN_SHM_BASE, n); + if( rc==SQLITE_OK ){ + assert( (p->sharedMask & mask)==0 ); + p->exclMask |= mask; + } + } + } + sqlite3_mutex_leave(pShmNode->mutex); + OSTRACE(("SHM-LOCK pid=%lu, id=%d, sharedMask=%03x, exclMask=%03x, rc=%s\n", + osGetCurrentProcessId(), p->id, p->sharedMask, p->exclMask, + sqlite3ErrName(rc))); + return rc; +} + +/* +** Implement a memory barrier or memory fence on shared memory. +** +** All loads and stores begun before the barrier must complete before +** any load or store begun after the barrier. +*/ +static void winShmBarrier( + sqlite3_file *fd /* Database holding the shared memory */ +){ + UNUSED_PARAMETER(fd); + sqlite3MemoryBarrier(); /* compiler-defined memory barrier */ + winShmEnterMutex(); /* Also mutex, for redundancy */ + winShmLeaveMutex(); +} + +/* +** This function is called to obtain a pointer to region iRegion of the +** shared-memory associated with the database file fd. Shared-memory regions +** are numbered starting from zero. Each shared-memory region is szRegion +** bytes in size. +** +** If an error occurs, an error code is returned and *pp is set to NULL. +** +** Otherwise, if the isWrite parameter is 0 and the requested shared-memory +** region has not been allocated (by any client, including one running in a +** separate process), then *pp is set to NULL and SQLITE_OK returned. If +** isWrite is non-zero and the requested shared-memory region has not yet +** been allocated, it is allocated by this function. +** +** If the shared-memory region has already been allocated or is allocated by +** this call as described above, then it is mapped into this processes +** address space (if it is not already), *pp is set to point to the mapped +** memory and SQLITE_OK returned. +*/ +static int winShmMap( + sqlite3_file *fd, /* Handle open on database file */ + int iRegion, /* Region to retrieve */ + int szRegion, /* Size of regions */ + int isWrite, /* True to extend file if necessary */ + void volatile **pp /* OUT: Mapped memory */ +){ + winFile *pDbFd = (winFile*)fd; + winShm *pShm = pDbFd->pShm; + winShmNode *pShmNode; + int rc = SQLITE_OK; + + if( !pShm ){ + rc = winOpenSharedMemory(pDbFd); + if( rc!=SQLITE_OK ) return rc; + pShm = pDbFd->pShm; + } + pShmNode = pShm->pShmNode; + + sqlite3_mutex_enter(pShmNode->mutex); + assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); + + if( pShmNode->nRegion<=iRegion ){ + struct ShmRegion *apNew; /* New aRegion[] array */ + int nByte = (iRegion+1)*szRegion; /* Minimum required file size */ + sqlite3_int64 sz; /* Current size of wal-index file */ + + pShmNode->szRegion = szRegion; + + /* The requested region is not mapped into this processes address space. + ** Check to see if it has been allocated (i.e. if the wal-index file is + ** large enough to contain the requested region). + */ + rc = winFileSize((sqlite3_file *)&pShmNode->hFile, &sz); + if( rc!=SQLITE_OK ){ + rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(), + "winShmMap1", pDbFd->zPath); + goto shmpage_out; + } + + if( szhFile, nByte); + if( rc!=SQLITE_OK ){ + rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(), + "winShmMap2", pDbFd->zPath); + goto shmpage_out; + } + } + + /* Map the requested memory region into this processes address space. */ + apNew = (struct ShmRegion *)sqlite3_realloc64( + pShmNode->aRegion, (iRegion+1)*sizeof(apNew[0]) + ); + if( !apNew ){ + rc = SQLITE_IOERR_NOMEM_BKPT; + goto shmpage_out; + } + pShmNode->aRegion = apNew; + + while( pShmNode->nRegion<=iRegion ){ + HANDLE hMap = NULL; /* file-mapping handle */ + void *pMap = 0; /* Mapped memory region */ + +#if SQLITE_OS_WINRT + hMap = osCreateFileMappingFromApp(pShmNode->hFile.h, + NULL, PAGE_READWRITE, nByte, NULL + ); +#elif defined(SQLITE_WIN32_HAS_WIDE) + hMap = osCreateFileMappingW(pShmNode->hFile.h, + NULL, PAGE_READWRITE, 0, nByte, NULL + ); +#elif defined(SQLITE_WIN32_HAS_ANSI) && SQLITE_WIN32_CREATEFILEMAPPINGA + hMap = osCreateFileMappingA(pShmNode->hFile.h, + NULL, PAGE_READWRITE, 0, nByte, NULL + ); +#endif + OSTRACE(("SHM-MAP-CREATE pid=%lu, region=%d, size=%d, rc=%s\n", + osGetCurrentProcessId(), pShmNode->nRegion, nByte, + hMap ? "ok" : "failed")); + if( hMap ){ + int iOffset = pShmNode->nRegion*szRegion; + int iOffsetShift = iOffset % winSysInfo.dwAllocationGranularity; +#if SQLITE_OS_WINRT + pMap = osMapViewOfFileFromApp(hMap, FILE_MAP_WRITE | FILE_MAP_READ, + iOffset - iOffsetShift, szRegion + iOffsetShift + ); +#else + pMap = osMapViewOfFile(hMap, FILE_MAP_WRITE | FILE_MAP_READ, + 0, iOffset - iOffsetShift, szRegion + iOffsetShift + ); +#endif + OSTRACE(("SHM-MAP-MAP pid=%lu, region=%d, offset=%d, size=%d, rc=%s\n", + osGetCurrentProcessId(), pShmNode->nRegion, iOffset, + szRegion, pMap ? "ok" : "failed")); + } + if( !pMap ){ + pShmNode->lastErrno = osGetLastError(); + rc = winLogError(SQLITE_IOERR_SHMMAP, pShmNode->lastErrno, + "winShmMap3", pDbFd->zPath); + if( hMap ) osCloseHandle(hMap); + goto shmpage_out; + } + + pShmNode->aRegion[pShmNode->nRegion].pMap = pMap; + pShmNode->aRegion[pShmNode->nRegion].hMap = hMap; + pShmNode->nRegion++; + } + } + +shmpage_out: + if( pShmNode->nRegion>iRegion ){ + int iOffset = iRegion*szRegion; + int iOffsetShift = iOffset % winSysInfo.dwAllocationGranularity; + char *p = (char *)pShmNode->aRegion[iRegion].pMap; + *pp = (void *)&p[iOffsetShift]; + }else{ + *pp = 0; + } + sqlite3_mutex_leave(pShmNode->mutex); + return rc; +} + +#else +# define winShmMap 0 +# define winShmLock 0 +# define winShmBarrier 0 +# define winShmUnmap 0 +#endif /* #ifndef SQLITE_OMIT_WAL */ + +/* +** Cleans up the mapped region of the specified file, if any. +*/ +#if SQLITE_MAX_MMAP_SIZE>0 +static int winUnmapfile(winFile *pFile){ + assert( pFile!=0 ); + OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, hMap=%p, pMapRegion=%p, " + "mmapSize=%lld, mmapSizeActual=%lld, mmapSizeMax=%lld\n", + osGetCurrentProcessId(), pFile, pFile->hMap, pFile->pMapRegion, + pFile->mmapSize, pFile->mmapSizeActual, pFile->mmapSizeMax)); + if( pFile->pMapRegion ){ + if( !osUnmapViewOfFile(pFile->pMapRegion) ){ + pFile->lastErrno = osGetLastError(); + OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, pMapRegion=%p, " + "rc=SQLITE_IOERR_MMAP\n", osGetCurrentProcessId(), pFile, + pFile->pMapRegion)); + return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno, + "winUnmapfile1", pFile->zPath); + } + pFile->pMapRegion = 0; + pFile->mmapSize = 0; + pFile->mmapSizeActual = 0; + } + if( pFile->hMap!=NULL ){ + if( !osCloseHandle(pFile->hMap) ){ + pFile->lastErrno = osGetLastError(); + OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, hMap=%p, rc=SQLITE_IOERR_MMAP\n", + osGetCurrentProcessId(), pFile, pFile->hMap)); + return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno, + "winUnmapfile2", pFile->zPath); + } + pFile->hMap = NULL; + } + OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, rc=SQLITE_OK\n", + osGetCurrentProcessId(), pFile)); + return SQLITE_OK; +} + +/* +** Memory map or remap the file opened by file-descriptor pFd (if the file +** is already mapped, the existing mapping is replaced by the new). Or, if +** there already exists a mapping for this file, and there are still +** outstanding xFetch() references to it, this function is a no-op. +** +** If parameter nByte is non-negative, then it is the requested size of +** the mapping to create. Otherwise, if nByte is less than zero, then the +** requested size is the size of the file on disk. The actual size of the +** created mapping is either the requested size or the value configured +** using SQLITE_FCNTL_MMAP_SIZE, whichever is smaller. +** +** SQLITE_OK is returned if no error occurs (even if the mapping is not +** recreated as a result of outstanding references) or an SQLite error +** code otherwise. +*/ +static int winMapfile(winFile *pFd, sqlite3_int64 nByte){ + sqlite3_int64 nMap = nByte; + int rc; + + assert( nMap>=0 || pFd->nFetchOut==0 ); + OSTRACE(("MAP-FILE pid=%lu, pFile=%p, size=%lld\n", + osGetCurrentProcessId(), pFd, nByte)); + + if( pFd->nFetchOut>0 ) return SQLITE_OK; + + if( nMap<0 ){ + rc = winFileSize((sqlite3_file*)pFd, &nMap); + if( rc ){ + OSTRACE(("MAP-FILE pid=%lu, pFile=%p, rc=SQLITE_IOERR_FSTAT\n", + osGetCurrentProcessId(), pFd)); + return SQLITE_IOERR_FSTAT; + } + } + if( nMap>pFd->mmapSizeMax ){ + nMap = pFd->mmapSizeMax; + } + nMap &= ~(sqlite3_int64)(winSysInfo.dwPageSize - 1); + + if( nMap==0 && pFd->mmapSize>0 ){ + winUnmapfile(pFd); + } + if( nMap!=pFd->mmapSize ){ + void *pNew = 0; + DWORD protect = PAGE_READONLY; + DWORD flags = FILE_MAP_READ; + + winUnmapfile(pFd); +#ifdef SQLITE_MMAP_READWRITE + if( (pFd->ctrlFlags & WINFILE_RDONLY)==0 ){ + protect = PAGE_READWRITE; + flags |= FILE_MAP_WRITE; + } +#endif +#if SQLITE_OS_WINRT + pFd->hMap = osCreateFileMappingFromApp(pFd->h, NULL, protect, nMap, NULL); +#elif defined(SQLITE_WIN32_HAS_WIDE) + pFd->hMap = osCreateFileMappingW(pFd->h, NULL, protect, + (DWORD)((nMap>>32) & 0xffffffff), + (DWORD)(nMap & 0xffffffff), NULL); +#elif defined(SQLITE_WIN32_HAS_ANSI) && SQLITE_WIN32_CREATEFILEMAPPINGA + pFd->hMap = osCreateFileMappingA(pFd->h, NULL, protect, + (DWORD)((nMap>>32) & 0xffffffff), + (DWORD)(nMap & 0xffffffff), NULL); +#endif + if( pFd->hMap==NULL ){ + pFd->lastErrno = osGetLastError(); + rc = winLogError(SQLITE_IOERR_MMAP, pFd->lastErrno, + "winMapfile1", pFd->zPath); + /* Log the error, but continue normal operation using xRead/xWrite */ + OSTRACE(("MAP-FILE-CREATE pid=%lu, pFile=%p, rc=%s\n", + osGetCurrentProcessId(), pFd, sqlite3ErrName(rc))); + return SQLITE_OK; + } + assert( (nMap % winSysInfo.dwPageSize)==0 ); + assert( sizeof(SIZE_T)==sizeof(sqlite3_int64) || nMap<=0xffffffff ); +#if SQLITE_OS_WINRT + pNew = osMapViewOfFileFromApp(pFd->hMap, flags, 0, (SIZE_T)nMap); +#else + pNew = osMapViewOfFile(pFd->hMap, flags, 0, 0, (SIZE_T)nMap); +#endif + if( pNew==NULL ){ + osCloseHandle(pFd->hMap); + pFd->hMap = NULL; + pFd->lastErrno = osGetLastError(); + rc = winLogError(SQLITE_IOERR_MMAP, pFd->lastErrno, + "winMapfile2", pFd->zPath); + /* Log the error, but continue normal operation using xRead/xWrite */ + OSTRACE(("MAP-FILE-MAP pid=%lu, pFile=%p, rc=%s\n", + osGetCurrentProcessId(), pFd, sqlite3ErrName(rc))); + return SQLITE_OK; + } + pFd->pMapRegion = pNew; + pFd->mmapSize = nMap; + pFd->mmapSizeActual = nMap; + } + + OSTRACE(("MAP-FILE pid=%lu, pFile=%p, rc=SQLITE_OK\n", + osGetCurrentProcessId(), pFd)); + return SQLITE_OK; +} +#endif /* SQLITE_MAX_MMAP_SIZE>0 */ + +/* +** If possible, return a pointer to a mapping of file fd starting at offset +** iOff. The mapping must be valid for at least nAmt bytes. +** +** If such a pointer can be obtained, store it in *pp and return SQLITE_OK. +** Or, if one cannot but no error occurs, set *pp to 0 and return SQLITE_OK. +** Finally, if an error does occur, return an SQLite error code. The final +** value of *pp is undefined in this case. +** +** If this function does return a pointer, the caller must eventually +** release the reference by calling winUnfetch(). +*/ +static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ +#if SQLITE_MAX_MMAP_SIZE>0 + winFile *pFd = (winFile*)fd; /* The underlying database file */ +#endif + *pp = 0; + + OSTRACE(("FETCH pid=%lu, pFile=%p, offset=%lld, amount=%d, pp=%p\n", + osGetCurrentProcessId(), fd, iOff, nAmt, pp)); + +#if SQLITE_MAX_MMAP_SIZE>0 + if( pFd->mmapSizeMax>0 ){ + if( pFd->pMapRegion==0 ){ + int rc = winMapfile(pFd, -1); + if( rc!=SQLITE_OK ){ + OSTRACE(("FETCH pid=%lu, pFile=%p, rc=%s\n", + osGetCurrentProcessId(), pFd, sqlite3ErrName(rc))); + return rc; + } + } + if( pFd->mmapSize >= iOff+nAmt ){ + *pp = &((u8 *)pFd->pMapRegion)[iOff]; + pFd->nFetchOut++; + } + } +#endif + + OSTRACE(("FETCH pid=%lu, pFile=%p, pp=%p, *pp=%p, rc=SQLITE_OK\n", + osGetCurrentProcessId(), fd, pp, *pp)); + return SQLITE_OK; +} + +/* +** If the third argument is non-NULL, then this function releases a +** reference obtained by an earlier call to winFetch(). The second +** argument passed to this function must be the same as the corresponding +** argument that was passed to the winFetch() invocation. +** +** Or, if the third argument is NULL, then this function is being called +** to inform the VFS layer that, according to POSIX, any existing mapping +** may now be invalid and should be unmapped. +*/ +static int winUnfetch(sqlite3_file *fd, i64 iOff, void *p){ +#if SQLITE_MAX_MMAP_SIZE>0 + winFile *pFd = (winFile*)fd; /* The underlying database file */ + + /* If p==0 (unmap the entire file) then there must be no outstanding + ** xFetch references. Or, if p!=0 (meaning it is an xFetch reference), + ** then there must be at least one outstanding. */ + assert( (p==0)==(pFd->nFetchOut==0) ); + + /* If p!=0, it must match the iOff value. */ + assert( p==0 || p==&((u8 *)pFd->pMapRegion)[iOff] ); + + OSTRACE(("UNFETCH pid=%lu, pFile=%p, offset=%lld, p=%p\n", + osGetCurrentProcessId(), pFd, iOff, p)); + + if( p ){ + pFd->nFetchOut--; + }else{ + /* FIXME: If Windows truly always prevents truncating or deleting a + ** file while a mapping is held, then the following winUnmapfile() call + ** is unnecessary can be omitted - potentially improving + ** performance. */ + winUnmapfile(pFd); + } + + assert( pFd->nFetchOut>=0 ); +#endif + + OSTRACE(("UNFETCH pid=%lu, pFile=%p, rc=SQLITE_OK\n", + osGetCurrentProcessId(), fd)); + return SQLITE_OK; +} + +/* +** Here ends the implementation of all sqlite3_file methods. +** +********************** End sqlite3_file Methods ******************************* +******************************************************************************/ + +/* +** This vector defines all the methods that can operate on an +** sqlite3_file for win32. +*/ +static const sqlite3_io_methods winIoMethod = { + 3, /* iVersion */ + winClose, /* xClose */ + winRead, /* xRead */ + winWrite, /* xWrite */ + winTruncate, /* xTruncate */ + winSync, /* xSync */ + winFileSize, /* xFileSize */ + winLock, /* xLock */ + winUnlock, /* xUnlock */ + winCheckReservedLock, /* xCheckReservedLock */ + winFileControl, /* xFileControl */ + winSectorSize, /* xSectorSize */ + winDeviceCharacteristics, /* xDeviceCharacteristics */ + winShmMap, /* xShmMap */ + winShmLock, /* xShmLock */ + winShmBarrier, /* xShmBarrier */ + winShmUnmap, /* xShmUnmap */ + winFetch, /* xFetch */ + winUnfetch /* xUnfetch */ +}; + +/* +** This vector defines all the methods that can operate on an +** sqlite3_file for win32 without performing any locking. +*/ +static const sqlite3_io_methods winIoNolockMethod = { + 3, /* iVersion */ + winClose, /* xClose */ + winRead, /* xRead */ + winWrite, /* xWrite */ + winTruncate, /* xTruncate */ + winSync, /* xSync */ + winFileSize, /* xFileSize */ + winNolockLock, /* xLock */ + winNolockUnlock, /* xUnlock */ + winNolockCheckReservedLock, /* xCheckReservedLock */ + winFileControl, /* xFileControl */ + winSectorSize, /* xSectorSize */ + winDeviceCharacteristics, /* xDeviceCharacteristics */ + winShmMap, /* xShmMap */ + winShmLock, /* xShmLock */ + winShmBarrier, /* xShmBarrier */ + winShmUnmap, /* xShmUnmap */ + winFetch, /* xFetch */ + winUnfetch /* xUnfetch */ +}; + +static winVfsAppData winAppData = { + &winIoMethod, /* pMethod */ + 0, /* pAppData */ + 0 /* bNoLock */ +}; + +static winVfsAppData winNolockAppData = { + &winIoNolockMethod, /* pMethod */ + 0, /* pAppData */ + 1 /* bNoLock */ +}; + +/**************************************************************************** +**************************** sqlite3_vfs methods **************************** +** +** This division contains the implementation of methods on the +** sqlite3_vfs object. +*/ + +#if defined(__CYGWIN__) +/* +** Convert a filename from whatever the underlying operating system +** supports for filenames into UTF-8. Space to hold the result is +** obtained from malloc and must be freed by the calling function. +*/ +static char *winConvertToUtf8Filename(const void *zFilename){ + char *zConverted = 0; + if( osIsNT() ){ + zConverted = winUnicodeToUtf8(zFilename); + } +#ifdef SQLITE_WIN32_HAS_ANSI + else{ + zConverted = winMbcsToUtf8(zFilename, osAreFileApisANSI()); + } +#endif + /* caller will handle out of memory */ + return zConverted; +} +#endif + +/* +** Convert a UTF-8 filename into whatever form the underlying +** operating system wants filenames in. Space to hold the result +** is obtained from malloc and must be freed by the calling +** function. +*/ +static void *winConvertFromUtf8Filename(const char *zFilename){ + void *zConverted = 0; + if( osIsNT() ){ + zConverted = winUtf8ToUnicode(zFilename); + } +#ifdef SQLITE_WIN32_HAS_ANSI + else{ + zConverted = winUtf8ToMbcs(zFilename, osAreFileApisANSI()); + } +#endif + /* caller will handle out of memory */ + return zConverted; +} + +/* +** This function returns non-zero if the specified UTF-8 string buffer +** ends with a directory separator character or one was successfully +** added to it. +*/ +static int winMakeEndInDirSep(int nBuf, char *zBuf){ + if( zBuf ){ + int nLen = sqlite3Strlen30(zBuf); + if( nLen>0 ){ + if( winIsDirSep(zBuf[nLen-1]) ){ + return 1; + }else if( nLen+1mxPathname; nBuf = nMax + 2; + zBuf = sqlite3MallocZero( nBuf ); + if( !zBuf ){ + OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n")); + return SQLITE_IOERR_NOMEM_BKPT; + } + + /* Figure out the effective temporary directory. First, check if one + ** has been explicitly set by the application; otherwise, use the one + ** configured by the operating system. + */ + nDir = nMax - (nPre + 15); + assert( nDir>0 ); + if( sqlite3_temp_directory ){ + int nDirLen = sqlite3Strlen30(sqlite3_temp_directory); + if( nDirLen>0 ){ + if( !winIsDirSep(sqlite3_temp_directory[nDirLen-1]) ){ + nDirLen++; + } + if( nDirLen>nDir ){ + sqlite3_free(zBuf); + OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n")); + return winLogError(SQLITE_ERROR, 0, "winGetTempname1", 0); + } + sqlite3_snprintf(nMax, zBuf, "%s", sqlite3_temp_directory); + } + } +#if defined(__CYGWIN__) + else{ + static const char *azDirs[] = { + 0, /* getenv("SQLITE_TMPDIR") */ + 0, /* getenv("TMPDIR") */ + 0, /* getenv("TMP") */ + 0, /* getenv("TEMP") */ + 0, /* getenv("USERPROFILE") */ + "/var/tmp", + "/usr/tmp", + "/tmp", + ".", + 0 /* List terminator */ + }; + unsigned int i; + const char *zDir = 0; + + if( !azDirs[0] ) azDirs[0] = getenv("SQLITE_TMPDIR"); + if( !azDirs[1] ) azDirs[1] = getenv("TMPDIR"); + if( !azDirs[2] ) azDirs[2] = getenv("TMP"); + if( !azDirs[3] ) azDirs[3] = getenv("TEMP"); + if( !azDirs[4] ) azDirs[4] = getenv("USERPROFILE"); + for(i=0; i/etilqs_XXXXXXXXXXXXXXX\0\0" + ** + ** If not, return SQLITE_ERROR. The number 17 is used here in order to + ** account for the space used by the 15 character random suffix and the + ** two trailing NUL characters. The final directory separator character + ** has already added if it was not already present. + */ + nLen = sqlite3Strlen30(zBuf); + if( (nLen + nPre + 17) > nBuf ){ + sqlite3_free(zBuf); + OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n")); + return winLogError(SQLITE_ERROR, 0, "winGetTempname5", 0); + } + + sqlite3_snprintf(nBuf-16-nLen, zBuf+nLen, SQLITE_TEMP_FILE_PREFIX); + + j = sqlite3Strlen30(zBuf); + sqlite3_randomness(15, &zBuf[j]); + for(i=0; i<15; i++, j++){ + zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; + } + zBuf[j] = 0; + zBuf[j+1] = 0; + *pzBuf = zBuf; + + OSTRACE(("TEMP-FILENAME name=%s, rc=SQLITE_OK\n", zBuf)); + return SQLITE_OK; +} + +/* +** Return TRUE if the named file is really a directory. Return false if +** it is something other than a directory, or if there is any kind of memory +** allocation failure. +*/ +static int winIsDir(const void *zConverted){ + DWORD attr; + int rc = 0; + DWORD lastErrno; + + if( osIsNT() ){ + int cnt = 0; + WIN32_FILE_ATTRIBUTE_DATA sAttrData; + memset(&sAttrData, 0, sizeof(sAttrData)); + while( !(rc = osGetFileAttributesExW((LPCWSTR)zConverted, + GetFileExInfoStandard, + &sAttrData)) && winRetryIoerr(&cnt, &lastErrno) ){} + if( !rc ){ + return 0; /* Invalid name? */ + } + attr = sAttrData.dwFileAttributes; +#if SQLITE_OS_WINCE==0 + }else{ + attr = osGetFileAttributesA((char*)zConverted); +#endif + } + return (attr!=INVALID_FILE_ATTRIBUTES) && (attr&FILE_ATTRIBUTE_DIRECTORY); +} + +/* +** Open a file. +*/ +static int winOpen( + sqlite3_vfs *pVfs, /* Used to get maximum path length and AppData */ + const char *zName, /* Name of the file (UTF-8) */ + sqlite3_file *id, /* Write the SQLite file handle here */ + int flags, /* Open mode flags */ + int *pOutFlags /* Status return flags */ +){ + HANDLE h; + DWORD lastErrno = 0; + DWORD dwDesiredAccess; + DWORD dwShareMode; + DWORD dwCreationDisposition; + DWORD dwFlagsAndAttributes = 0; +#if SQLITE_OS_WINCE + int isTemp = 0; +#endif + winVfsAppData *pAppData; + winFile *pFile = (winFile*)id; + void *zConverted; /* Filename in OS encoding */ + const char *zUtf8Name = zName; /* Filename in UTF-8 encoding */ + int cnt = 0; + + /* If argument zPath is a NULL pointer, this function is required to open + ** a temporary file. Use this buffer to store the file name in. + */ + char *zTmpname = 0; /* For temporary filename, if necessary. */ + + int rc = SQLITE_OK; /* Function Return Code */ +#if !defined(NDEBUG) || SQLITE_OS_WINCE + int eType = flags&0xFFFFFF00; /* Type of file to open */ +#endif + + int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE); + int isDelete = (flags & SQLITE_OPEN_DELETEONCLOSE); + int isCreate = (flags & SQLITE_OPEN_CREATE); + int isReadonly = (flags & SQLITE_OPEN_READONLY); + int isReadWrite = (flags & SQLITE_OPEN_READWRITE); + +#ifndef NDEBUG + int isOpenJournal = (isCreate && ( + eType==SQLITE_OPEN_MASTER_JOURNAL + || eType==SQLITE_OPEN_MAIN_JOURNAL + || eType==SQLITE_OPEN_WAL + )); +#endif + + OSTRACE(("OPEN name=%s, pFile=%p, flags=%x, pOutFlags=%p\n", + zUtf8Name, id, flags, pOutFlags)); + + /* Check the following statements are true: + ** + ** (a) Exactly one of the READWRITE and READONLY flags must be set, and + ** (b) if CREATE is set, then READWRITE must also be set, and + ** (c) if EXCLUSIVE is set, then CREATE must also be set. + ** (d) if DELETEONCLOSE is set, then CREATE must also be set. + */ + assert((isReadonly==0 || isReadWrite==0) && (isReadWrite || isReadonly)); + assert(isCreate==0 || isReadWrite); + assert(isExclusive==0 || isCreate); + assert(isDelete==0 || isCreate); + + /* The main DB, main journal, WAL file and master journal are never + ** automatically deleted. Nor are they ever temporary files. */ + assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_DB ); + assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_JOURNAL ); + assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MASTER_JOURNAL ); + assert( (!isDelete && zName) || eType!=SQLITE_OPEN_WAL ); + + /* Assert that the upper layer has set one of the "file-type" flags. */ + assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB + || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL + || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_MASTER_JOURNAL + || eType==SQLITE_OPEN_TRANSIENT_DB || eType==SQLITE_OPEN_WAL + ); + + assert( pFile!=0 ); + memset(pFile, 0, sizeof(winFile)); + pFile->h = INVALID_HANDLE_VALUE; + +#if SQLITE_OS_WINRT + if( !zUtf8Name && !sqlite3_temp_directory ){ + sqlite3_log(SQLITE_ERROR, + "sqlite3_temp_directory variable should be set for WinRT"); + } +#endif + + /* If the second argument to this function is NULL, generate a + ** temporary file name to use + */ + if( !zUtf8Name ){ + assert( isDelete && !isOpenJournal ); + rc = winGetTempname(pVfs, &zTmpname); + if( rc!=SQLITE_OK ){ + OSTRACE(("OPEN name=%s, rc=%s", zUtf8Name, sqlite3ErrName(rc))); + return rc; + } + zUtf8Name = zTmpname; + } + + /* Database filenames are double-zero terminated if they are not + ** URIs with parameters. Hence, they can always be passed into + ** sqlite3_uri_parameter(). + */ + assert( (eType!=SQLITE_OPEN_MAIN_DB) || (flags & SQLITE_OPEN_URI) || + zUtf8Name[sqlite3Strlen30(zUtf8Name)+1]==0 ); + + /* Convert the filename to the system encoding. */ + zConverted = winConvertFromUtf8Filename(zUtf8Name); + if( zConverted==0 ){ + sqlite3_free(zTmpname); + OSTRACE(("OPEN name=%s, rc=SQLITE_IOERR_NOMEM", zUtf8Name)); + return SQLITE_IOERR_NOMEM_BKPT; + } + + if( winIsDir(zConverted) ){ + sqlite3_free(zConverted); + sqlite3_free(zTmpname); + OSTRACE(("OPEN name=%s, rc=SQLITE_CANTOPEN_ISDIR", zUtf8Name)); + return SQLITE_CANTOPEN_ISDIR; + } + + if( isReadWrite ){ + dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; + }else{ + dwDesiredAccess = GENERIC_READ; + } + + /* SQLITE_OPEN_EXCLUSIVE is used to make sure that a new file is + ** created. SQLite doesn't use it to indicate "exclusive access" + ** as it is usually understood. + */ + if( isExclusive ){ + /* Creates a new file, only if it does not already exist. */ + /* If the file exists, it fails. */ + dwCreationDisposition = CREATE_NEW; + }else if( isCreate ){ + /* Open existing file, or create if it doesn't exist */ + dwCreationDisposition = OPEN_ALWAYS; + }else{ + /* Opens a file, only if it exists. */ + dwCreationDisposition = OPEN_EXISTING; + } + + dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE; + + if( isDelete ){ +#if SQLITE_OS_WINCE + dwFlagsAndAttributes = FILE_ATTRIBUTE_HIDDEN; + isTemp = 1; +#else + dwFlagsAndAttributes = FILE_ATTRIBUTE_TEMPORARY + | FILE_ATTRIBUTE_HIDDEN + | FILE_FLAG_DELETE_ON_CLOSE; +#endif + }else{ + dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL; + } + /* Reports from the internet are that performance is always + ** better if FILE_FLAG_RANDOM_ACCESS is used. Ticket #2699. */ +#if SQLITE_OS_WINCE + dwFlagsAndAttributes |= FILE_FLAG_RANDOM_ACCESS; +#endif + + if( osIsNT() ){ +#if SQLITE_OS_WINRT + CREATEFILE2_EXTENDED_PARAMETERS extendedParameters; + extendedParameters.dwSize = sizeof(CREATEFILE2_EXTENDED_PARAMETERS); + extendedParameters.dwFileAttributes = + dwFlagsAndAttributes & FILE_ATTRIBUTE_MASK; + extendedParameters.dwFileFlags = dwFlagsAndAttributes & FILE_FLAG_MASK; + extendedParameters.dwSecurityQosFlags = SECURITY_ANONYMOUS; + extendedParameters.lpSecurityAttributes = NULL; + extendedParameters.hTemplateFile = NULL; + while( (h = osCreateFile2((LPCWSTR)zConverted, + dwDesiredAccess, + dwShareMode, + dwCreationDisposition, + &extendedParameters))==INVALID_HANDLE_VALUE && + winRetryIoerr(&cnt, &lastErrno) ){ + /* Noop */ + } +#else + while( (h = osCreateFileW((LPCWSTR)zConverted, + dwDesiredAccess, + dwShareMode, NULL, + dwCreationDisposition, + dwFlagsAndAttributes, + NULL))==INVALID_HANDLE_VALUE && + winRetryIoerr(&cnt, &lastErrno) ){ + /* Noop */ + } +#endif + } +#ifdef SQLITE_WIN32_HAS_ANSI + else{ + while( (h = osCreateFileA((LPCSTR)zConverted, + dwDesiredAccess, + dwShareMode, NULL, + dwCreationDisposition, + dwFlagsAndAttributes, + NULL))==INVALID_HANDLE_VALUE && + winRetryIoerr(&cnt, &lastErrno) ){ + /* Noop */ + } + } +#endif + winLogIoerr(cnt, __LINE__); + + OSTRACE(("OPEN file=%p, name=%s, access=%lx, rc=%s\n", h, zUtf8Name, + dwDesiredAccess, (h==INVALID_HANDLE_VALUE) ? "failed" : "ok")); + + if( h==INVALID_HANDLE_VALUE ){ + pFile->lastErrno = lastErrno; + winLogError(SQLITE_CANTOPEN, pFile->lastErrno, "winOpen", zUtf8Name); + sqlite3_free(zConverted); + sqlite3_free(zTmpname); + if( isReadWrite && !isExclusive ){ + return winOpen(pVfs, zName, id, + ((flags|SQLITE_OPEN_READONLY) & + ~(SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE)), + pOutFlags); + }else{ + return SQLITE_CANTOPEN_BKPT; + } + } + + if( pOutFlags ){ + if( isReadWrite ){ + *pOutFlags = SQLITE_OPEN_READWRITE; + }else{ + *pOutFlags = SQLITE_OPEN_READONLY; + } + } + + OSTRACE(("OPEN file=%p, name=%s, access=%lx, pOutFlags=%p, *pOutFlags=%d, " + "rc=%s\n", h, zUtf8Name, dwDesiredAccess, pOutFlags, pOutFlags ? + *pOutFlags : 0, (h==INVALID_HANDLE_VALUE) ? "failed" : "ok")); + + pAppData = (winVfsAppData*)pVfs->pAppData; + +#if SQLITE_OS_WINCE + { + if( isReadWrite && eType==SQLITE_OPEN_MAIN_DB + && ((pAppData==NULL) || !pAppData->bNoLock) + && (rc = winceCreateLock(zName, pFile))!=SQLITE_OK + ){ + osCloseHandle(h); + sqlite3_free(zConverted); + sqlite3_free(zTmpname); + OSTRACE(("OPEN-CE-LOCK name=%s, rc=%s\n", zName, sqlite3ErrName(rc))); + return rc; + } + } + if( isTemp ){ + pFile->zDeleteOnClose = zConverted; + }else +#endif + { + sqlite3_free(zConverted); + } + + sqlite3_free(zTmpname); + pFile->pMethod = pAppData ? pAppData->pMethod : &winIoMethod; + pFile->pVfs = pVfs; + pFile->h = h; + if( isReadonly ){ + pFile->ctrlFlags |= WINFILE_RDONLY; + } + if( sqlite3_uri_boolean(zName, "psow", SQLITE_POWERSAFE_OVERWRITE) ){ + pFile->ctrlFlags |= WINFILE_PSOW; + } + pFile->lastErrno = NO_ERROR; + pFile->zPath = zName; +#if SQLITE_MAX_MMAP_SIZE>0 + pFile->hMap = NULL; + pFile->pMapRegion = 0; + pFile->mmapSize = 0; + pFile->mmapSizeActual = 0; + pFile->mmapSizeMax = sqlite3GlobalConfig.szMmap; +#endif + + OpenCounter(+1); + return rc; +} + +/* +** Delete the named file. +** +** Note that Windows does not allow a file to be deleted if some other +** process has it open. Sometimes a virus scanner or indexing program +** will open a journal file shortly after it is created in order to do +** whatever it does. While this other process is holding the +** file open, we will be unable to delete it. To work around this +** problem, we delay 100 milliseconds and try to delete again. Up +** to MX_DELETION_ATTEMPTs deletion attempts are run before giving +** up and returning an error. +*/ +static int winDelete( + sqlite3_vfs *pVfs, /* Not used on win32 */ + const char *zFilename, /* Name of file to delete */ + int syncDir /* Not used on win32 */ +){ + int cnt = 0; + int rc; + DWORD attr; + DWORD lastErrno = 0; + void *zConverted; + UNUSED_PARAMETER(pVfs); + UNUSED_PARAMETER(syncDir); + + SimulateIOError(return SQLITE_IOERR_DELETE); + OSTRACE(("DELETE name=%s, syncDir=%d\n", zFilename, syncDir)); + + zConverted = winConvertFromUtf8Filename(zFilename); + if( zConverted==0 ){ + OSTRACE(("DELETE name=%s, rc=SQLITE_IOERR_NOMEM\n", zFilename)); + return SQLITE_IOERR_NOMEM_BKPT; + } + if( osIsNT() ){ + do { +#if SQLITE_OS_WINRT + WIN32_FILE_ATTRIBUTE_DATA sAttrData; + memset(&sAttrData, 0, sizeof(sAttrData)); + if ( osGetFileAttributesExW(zConverted, GetFileExInfoStandard, + &sAttrData) ){ + attr = sAttrData.dwFileAttributes; + }else{ + lastErrno = osGetLastError(); + if( lastErrno==ERROR_FILE_NOT_FOUND + || lastErrno==ERROR_PATH_NOT_FOUND ){ + rc = SQLITE_IOERR_DELETE_NOENT; /* Already gone? */ + }else{ + rc = SQLITE_ERROR; + } + break; + } +#else + attr = osGetFileAttributesW(zConverted); +#endif + if ( attr==INVALID_FILE_ATTRIBUTES ){ + lastErrno = osGetLastError(); + if( lastErrno==ERROR_FILE_NOT_FOUND + || lastErrno==ERROR_PATH_NOT_FOUND ){ + rc = SQLITE_IOERR_DELETE_NOENT; /* Already gone? */ + }else{ + rc = SQLITE_ERROR; + } + break; + } + if ( attr&FILE_ATTRIBUTE_DIRECTORY ){ + rc = SQLITE_ERROR; /* Files only. */ + break; + } + if ( osDeleteFileW(zConverted) ){ + rc = SQLITE_OK; /* Deleted OK. */ + break; + } + if ( !winRetryIoerr(&cnt, &lastErrno) ){ + rc = SQLITE_ERROR; /* No more retries. */ + break; + } + } while(1); + } +#ifdef SQLITE_WIN32_HAS_ANSI + else{ + do { + attr = osGetFileAttributesA(zConverted); + if ( attr==INVALID_FILE_ATTRIBUTES ){ + lastErrno = osGetLastError(); + if( lastErrno==ERROR_FILE_NOT_FOUND + || lastErrno==ERROR_PATH_NOT_FOUND ){ + rc = SQLITE_IOERR_DELETE_NOENT; /* Already gone? */ + }else{ + rc = SQLITE_ERROR; + } + break; + } + if ( attr&FILE_ATTRIBUTE_DIRECTORY ){ + rc = SQLITE_ERROR; /* Files only. */ + break; + } + if ( osDeleteFileA(zConverted) ){ + rc = SQLITE_OK; /* Deleted OK. */ + break; + } + if ( !winRetryIoerr(&cnt, &lastErrno) ){ + rc = SQLITE_ERROR; /* No more retries. */ + break; + } + } while(1); + } +#endif + if( rc && rc!=SQLITE_IOERR_DELETE_NOENT ){ + rc = winLogError(SQLITE_IOERR_DELETE, lastErrno, "winDelete", zFilename); + }else{ + winLogIoerr(cnt, __LINE__); + } + sqlite3_free(zConverted); + OSTRACE(("DELETE name=%s, rc=%s\n", zFilename, sqlite3ErrName(rc))); + return rc; +} + +/* +** Check the existence and status of a file. +*/ +static int winAccess( + sqlite3_vfs *pVfs, /* Not used on win32 */ + const char *zFilename, /* Name of file to check */ + int flags, /* Type of test to make on this file */ + int *pResOut /* OUT: Result */ +){ + DWORD attr; + int rc = 0; + DWORD lastErrno = 0; + void *zConverted; + UNUSED_PARAMETER(pVfs); + + SimulateIOError( return SQLITE_IOERR_ACCESS; ); + OSTRACE(("ACCESS name=%s, flags=%x, pResOut=%p\n", + zFilename, flags, pResOut)); + + zConverted = winConvertFromUtf8Filename(zFilename); + if( zConverted==0 ){ + OSTRACE(("ACCESS name=%s, rc=SQLITE_IOERR_NOMEM\n", zFilename)); + return SQLITE_IOERR_NOMEM_BKPT; + } + if( osIsNT() ){ + int cnt = 0; + WIN32_FILE_ATTRIBUTE_DATA sAttrData; + memset(&sAttrData, 0, sizeof(sAttrData)); + while( !(rc = osGetFileAttributesExW((LPCWSTR)zConverted, + GetFileExInfoStandard, + &sAttrData)) && winRetryIoerr(&cnt, &lastErrno) ){} + if( rc ){ + /* For an SQLITE_ACCESS_EXISTS query, treat a zero-length file + ** as if it does not exist. + */ + if( flags==SQLITE_ACCESS_EXISTS + && sAttrData.nFileSizeHigh==0 + && sAttrData.nFileSizeLow==0 ){ + attr = INVALID_FILE_ATTRIBUTES; + }else{ + attr = sAttrData.dwFileAttributes; + } + }else{ + winLogIoerr(cnt, __LINE__); + if( lastErrno!=ERROR_FILE_NOT_FOUND && lastErrno!=ERROR_PATH_NOT_FOUND ){ + sqlite3_free(zConverted); + return winLogError(SQLITE_IOERR_ACCESS, lastErrno, "winAccess", + zFilename); + }else{ + attr = INVALID_FILE_ATTRIBUTES; + } + } + } +#ifdef SQLITE_WIN32_HAS_ANSI + else{ + attr = osGetFileAttributesA((char*)zConverted); + } +#endif + sqlite3_free(zConverted); + switch( flags ){ + case SQLITE_ACCESS_READ: + case SQLITE_ACCESS_EXISTS: + rc = attr!=INVALID_FILE_ATTRIBUTES; + break; + case SQLITE_ACCESS_READWRITE: + rc = attr!=INVALID_FILE_ATTRIBUTES && + (attr & FILE_ATTRIBUTE_READONLY)==0; + break; + default: + assert(!"Invalid flags argument"); + } + *pResOut = rc; + OSTRACE(("ACCESS name=%s, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n", + zFilename, pResOut, *pResOut)); + return SQLITE_OK; +} + +/* +** Returns non-zero if the specified path name starts with a drive letter +** followed by a colon character. +*/ +static BOOL winIsDriveLetterAndColon( + const char *zPathname +){ + return ( sqlite3Isalpha(zPathname[0]) && zPathname[1]==':' ); +} + +/* +** Returns non-zero if the specified path name should be used verbatim. If +** non-zero is returned from this function, the calling function must simply +** use the provided path name verbatim -OR- resolve it into a full path name +** using the GetFullPathName Win32 API function (if available). +*/ +static BOOL winIsVerbatimPathname( + const char *zPathname +){ + /* + ** If the path name starts with a forward slash or a backslash, it is either + ** a legal UNC name, a volume relative path, or an absolute path name in the + ** "Unix" format on Windows. There is no easy way to differentiate between + ** the final two cases; therefore, we return the safer return value of TRUE + ** so that callers of this function will simply use it verbatim. + */ + if ( winIsDirSep(zPathname[0]) ){ + return TRUE; + } + + /* + ** If the path name starts with a letter and a colon it is either a volume + ** relative path or an absolute path. Callers of this function must not + ** attempt to treat it as a relative path name (i.e. they should simply use + ** it verbatim). + */ + if ( winIsDriveLetterAndColon(zPathname) ){ + return TRUE; + } + + /* + ** If we get to this point, the path name should almost certainly be a purely + ** relative one (i.e. not a UNC name, not absolute, and not volume relative). + */ + return FALSE; +} + +/* +** Turn a relative pathname into a full pathname. Write the full +** pathname into zOut[]. zOut[] will be at least pVfs->mxPathname +** bytes in size. +*/ +static int winFullPathname( + sqlite3_vfs *pVfs, /* Pointer to vfs object */ + const char *zRelative, /* Possibly relative input path */ + int nFull, /* Size of output buffer in bytes */ + char *zFull /* Output buffer */ +){ +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__) + DWORD nByte; + void *zConverted; + char *zOut; +#endif + + /* If this path name begins with "/X:", where "X" is any alphabetic + ** character, discard the initial "/" from the pathname. + */ + if( zRelative[0]=='/' && winIsDriveLetterAndColon(zRelative+1) ){ + zRelative++; + } + +#if defined(__CYGWIN__) + SimulateIOError( return SQLITE_ERROR ); + UNUSED_PARAMETER(nFull); + assert( nFull>=pVfs->mxPathname ); + if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){ + /* + ** NOTE: We are dealing with a relative path name and the data + ** directory has been set. Therefore, use it as the basis + ** for converting the relative path name to an absolute + ** one by prepending the data directory and a slash. + */ + char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 ); + if( !zOut ){ + return SQLITE_IOERR_NOMEM_BKPT; + } + if( cygwin_conv_path( + (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A) | + CCP_RELATIVE, zRelative, zOut, pVfs->mxPathname+1)<0 ){ + sqlite3_free(zOut); + return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno, + "winFullPathname1", zRelative); + }else{ + char *zUtf8 = winConvertToUtf8Filename(zOut); + if( !zUtf8 ){ + sqlite3_free(zOut); + return SQLITE_IOERR_NOMEM_BKPT; + } + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s", + sqlite3_data_directory, winGetDirSep(), zUtf8); + sqlite3_free(zUtf8); + sqlite3_free(zOut); + } + }else{ + char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 ); + if( !zOut ){ + return SQLITE_IOERR_NOMEM_BKPT; + } + if( cygwin_conv_path( + (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A), + zRelative, zOut, pVfs->mxPathname+1)<0 ){ + sqlite3_free(zOut); + return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno, + "winFullPathname2", zRelative); + }else{ + char *zUtf8 = winConvertToUtf8Filename(zOut); + if( !zUtf8 ){ + sqlite3_free(zOut); + return SQLITE_IOERR_NOMEM_BKPT; + } + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zUtf8); + sqlite3_free(zUtf8); + sqlite3_free(zOut); + } + } + return SQLITE_OK; +#endif + +#if (SQLITE_OS_WINCE || SQLITE_OS_WINRT) && !defined(__CYGWIN__) + SimulateIOError( return SQLITE_ERROR ); + /* WinCE has no concept of a relative pathname, or so I am told. */ + /* WinRT has no way to convert a relative path to an absolute one. */ + if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){ + /* + ** NOTE: We are dealing with a relative path name and the data + ** directory has been set. Therefore, use it as the basis + ** for converting the relative path name to an absolute + ** one by prepending the data directory and a backslash. + */ + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s", + sqlite3_data_directory, winGetDirSep(), zRelative); + }else{ + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zRelative); + } + return SQLITE_OK; +#endif + +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__) + /* It's odd to simulate an io-error here, but really this is just + ** using the io-error infrastructure to test that SQLite handles this + ** function failing. This function could fail if, for example, the + ** current working directory has been unlinked. + */ + SimulateIOError( return SQLITE_ERROR ); + if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){ + /* + ** NOTE: We are dealing with a relative path name and the data + ** directory has been set. Therefore, use it as the basis + ** for converting the relative path name to an absolute + ** one by prepending the data directory and a backslash. + */ + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s", + sqlite3_data_directory, winGetDirSep(), zRelative); + return SQLITE_OK; + } + zConverted = winConvertFromUtf8Filename(zRelative); + if( zConverted==0 ){ + return SQLITE_IOERR_NOMEM_BKPT; + } + if( osIsNT() ){ + LPWSTR zTemp; + nByte = osGetFullPathNameW((LPCWSTR)zConverted, 0, 0, 0); + if( nByte==0 ){ + sqlite3_free(zConverted); + return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), + "winFullPathname1", zRelative); + } + nByte += 3; + zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) ); + if( zTemp==0 ){ + sqlite3_free(zConverted); + return SQLITE_IOERR_NOMEM_BKPT; + } + nByte = osGetFullPathNameW((LPCWSTR)zConverted, nByte, zTemp, 0); + if( nByte==0 ){ + sqlite3_free(zConverted); + sqlite3_free(zTemp); + return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), + "winFullPathname2", zRelative); + } + sqlite3_free(zConverted); + zOut = winUnicodeToUtf8(zTemp); + sqlite3_free(zTemp); + } +#ifdef SQLITE_WIN32_HAS_ANSI + else{ + char *zTemp; + nByte = osGetFullPathNameA((char*)zConverted, 0, 0, 0); + if( nByte==0 ){ + sqlite3_free(zConverted); + return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), + "winFullPathname3", zRelative); + } + nByte += 3; + zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) ); + if( zTemp==0 ){ + sqlite3_free(zConverted); + return SQLITE_IOERR_NOMEM_BKPT; + } + nByte = osGetFullPathNameA((char*)zConverted, nByte, zTemp, 0); + if( nByte==0 ){ + sqlite3_free(zConverted); + sqlite3_free(zTemp); + return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), + "winFullPathname4", zRelative); + } + sqlite3_free(zConverted); + zOut = winMbcsToUtf8(zTemp, osAreFileApisANSI()); + sqlite3_free(zTemp); + } +#endif + if( zOut ){ + sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut); + sqlite3_free(zOut); + return SQLITE_OK; + }else{ + return SQLITE_IOERR_NOMEM_BKPT; + } +#endif +} + +#ifndef SQLITE_OMIT_LOAD_EXTENSION +/* +** Interfaces for opening a shared library, finding entry points +** within the shared library, and closing the shared library. +*/ +static void *winDlOpen(sqlite3_vfs *pVfs, const char *zFilename){ + HANDLE h; +#if defined(__CYGWIN__) + int nFull = pVfs->mxPathname+1; + char *zFull = sqlite3MallocZero( nFull ); + void *zConverted = 0; + if( zFull==0 ){ + OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); + return 0; + } + if( winFullPathname(pVfs, zFilename, nFull, zFull)!=SQLITE_OK ){ + sqlite3_free(zFull); + OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); + return 0; + } + zConverted = winConvertFromUtf8Filename(zFull); + sqlite3_free(zFull); +#else + void *zConverted = winConvertFromUtf8Filename(zFilename); + UNUSED_PARAMETER(pVfs); +#endif + if( zConverted==0 ){ + OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); + return 0; + } + if( osIsNT() ){ +#if SQLITE_OS_WINRT + h = osLoadPackagedLibrary((LPCWSTR)zConverted, 0); +#else + h = osLoadLibraryW((LPCWSTR)zConverted); +#endif + } +#ifdef SQLITE_WIN32_HAS_ANSI + else{ + h = osLoadLibraryA((char*)zConverted); + } +#endif + OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)h)); + sqlite3_free(zConverted); + return (void*)h; +} +static void winDlError(sqlite3_vfs *pVfs, int nBuf, char *zBufOut){ + UNUSED_PARAMETER(pVfs); + winGetLastErrorMsg(osGetLastError(), nBuf, zBufOut); +} +static void (*winDlSym(sqlite3_vfs *pVfs,void *pH,const char *zSym))(void){ + FARPROC proc; + UNUSED_PARAMETER(pVfs); + proc = osGetProcAddressA((HANDLE)pH, zSym); + OSTRACE(("DLSYM handle=%p, symbol=%s, address=%p\n", + (void*)pH, zSym, (void*)proc)); + return (void(*)(void))proc; +} +static void winDlClose(sqlite3_vfs *pVfs, void *pHandle){ + UNUSED_PARAMETER(pVfs); + osFreeLibrary((HANDLE)pHandle); + OSTRACE(("DLCLOSE handle=%p\n", (void*)pHandle)); +} +#else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */ + #define winDlOpen 0 + #define winDlError 0 + #define winDlSym 0 + #define winDlClose 0 +#endif + +/* State information for the randomness gatherer. */ +typedef struct EntropyGatherer EntropyGatherer; +struct EntropyGatherer { + unsigned char *a; /* Gather entropy into this buffer */ + int na; /* Size of a[] in bytes */ + int i; /* XOR next input into a[i] */ + int nXor; /* Number of XOR operations done */ +}; + +#if !defined(SQLITE_TEST) && !defined(SQLITE_OMIT_RANDOMNESS) +/* Mix sz bytes of entropy into p. */ +static void xorMemory(EntropyGatherer *p, unsigned char *x, int sz){ + int j, k; + for(j=0, k=p->i; ja[k++] ^= x[j]; + if( k>=p->na ) k = 0; + } + p->i = k; + p->nXor += sz; +} +#endif /* !defined(SQLITE_TEST) && !defined(SQLITE_OMIT_RANDOMNESS) */ + +/* +** Write up to nBuf bytes of randomness into zBuf. +*/ +static int winRandomness(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ +#if defined(SQLITE_TEST) || defined(SQLITE_OMIT_RANDOMNESS) + UNUSED_PARAMETER(pVfs); + memset(zBuf, 0, nBuf); + return nBuf; +#else + EntropyGatherer e; + UNUSED_PARAMETER(pVfs); + memset(zBuf, 0, nBuf); +#if defined(_MSC_VER) && _MSC_VER>=1400 && !SQLITE_OS_WINCE + rand_s((unsigned int*)zBuf); /* rand_s() is not available with MinGW */ +#endif /* defined(_MSC_VER) && _MSC_VER>=1400 */ + e.a = (unsigned char*)zBuf; + e.na = nBuf; + e.nXor = 0; + e.i = 0; + { + SYSTEMTIME x; + osGetSystemTime(&x); + xorMemory(&e, (unsigned char*)&x, sizeof(SYSTEMTIME)); + } + { + DWORD pid = osGetCurrentProcessId(); + xorMemory(&e, (unsigned char*)&pid, sizeof(DWORD)); + } +#if SQLITE_OS_WINRT + { + ULONGLONG cnt = osGetTickCount64(); + xorMemory(&e, (unsigned char*)&cnt, sizeof(ULONGLONG)); + } +#else + { + DWORD cnt = osGetTickCount(); + xorMemory(&e, (unsigned char*)&cnt, sizeof(DWORD)); + } +#endif /* SQLITE_OS_WINRT */ + { + LARGE_INTEGER i; + osQueryPerformanceCounter(&i); + xorMemory(&e, (unsigned char*)&i, sizeof(LARGE_INTEGER)); + } +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID + { + UUID id; + memset(&id, 0, sizeof(UUID)); + osUuidCreate(&id); + xorMemory(&e, (unsigned char*)&id, sizeof(UUID)); + memset(&id, 0, sizeof(UUID)); + osUuidCreateSequential(&id); + xorMemory(&e, (unsigned char*)&id, sizeof(UUID)); + } +#endif /* !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID */ + return e.nXor>nBuf ? nBuf : e.nXor; +#endif /* defined(SQLITE_TEST) || defined(SQLITE_OMIT_RANDOMNESS) */ +} + + +/* +** Sleep for a little while. Return the amount of time slept. +*/ +static int winSleep(sqlite3_vfs *pVfs, int microsec){ + sqlite3_win32_sleep((microsec+999)/1000); + UNUSED_PARAMETER(pVfs); + return ((microsec+999)/1000)*1000; +} + +/* +** The following variable, if set to a non-zero value, is interpreted as +** the number of seconds since 1970 and is used to set the result of +** sqlite3OsCurrentTime() during testing. +*/ +#ifdef SQLITE_TEST +SQLITE_API int sqlite3_current_time = 0; /* Fake system time in seconds since 1970. */ +#endif + +/* +** Find the current time (in Universal Coordinated Time). Write into *piNow +** the current time and date as a Julian Day number times 86_400_000. In +** other words, write into *piNow the number of milliseconds since the Julian +** epoch of noon in Greenwich on November 24, 4714 B.C according to the +** proleptic Gregorian calendar. +** +** On success, return SQLITE_OK. Return SQLITE_ERROR if the time and date +** cannot be found. +*/ +static int winCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *piNow){ + /* FILETIME structure is a 64-bit value representing the number of + 100-nanosecond intervals since January 1, 1601 (= JD 2305813.5). + */ + FILETIME ft; + static const sqlite3_int64 winFiletimeEpoch = 23058135*(sqlite3_int64)8640000; +#ifdef SQLITE_TEST + static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000; +#endif + /* 2^32 - to avoid use of LL and warnings in gcc */ + static const sqlite3_int64 max32BitValue = + (sqlite3_int64)2000000000 + (sqlite3_int64)2000000000 + + (sqlite3_int64)294967296; + +#if SQLITE_OS_WINCE + SYSTEMTIME time; + osGetSystemTime(&time); + /* if SystemTimeToFileTime() fails, it returns zero. */ + if (!osSystemTimeToFileTime(&time,&ft)){ + return SQLITE_ERROR; + } +#else + osGetSystemTimeAsFileTime( &ft ); +#endif + + *piNow = winFiletimeEpoch + + ((((sqlite3_int64)ft.dwHighDateTime)*max32BitValue) + + (sqlite3_int64)ft.dwLowDateTime)/(sqlite3_int64)10000; + +#ifdef SQLITE_TEST + if( sqlite3_current_time ){ + *piNow = 1000*(sqlite3_int64)sqlite3_current_time + unixEpoch; + } +#endif + UNUSED_PARAMETER(pVfs); + return SQLITE_OK; +} + +/* +** Find the current time (in Universal Coordinated Time). Write the +** current time and date as a Julian Day number into *prNow and +** return 0. Return 1 if the time and date cannot be found. +*/ +static int winCurrentTime(sqlite3_vfs *pVfs, double *prNow){ + int rc; + sqlite3_int64 i; + rc = winCurrentTimeInt64(pVfs, &i); + if( !rc ){ + *prNow = i/86400000.0; + } + return rc; +} + +/* +** The idea is that this function works like a combination of +** GetLastError() and FormatMessage() on Windows (or errno and +** strerror_r() on Unix). After an error is returned by an OS +** function, SQLite calls this function with zBuf pointing to +** a buffer of nBuf bytes. The OS layer should populate the +** buffer with a nul-terminated UTF-8 encoded error message +** describing the last IO error to have occurred within the calling +** thread. +** +** If the error message is too large for the supplied buffer, +** it should be truncated. The return value of xGetLastError +** is zero if the error message fits in the buffer, or non-zero +** otherwise (if the message was truncated). If non-zero is returned, +** then it is not necessary to include the nul-terminator character +** in the output buffer. +** +** Not supplying an error message will have no adverse effect +** on SQLite. It is fine to have an implementation that never +** returns an error message: +** +** int xGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ +** assert(zBuf[0]=='\0'); +** return 0; +** } +** +** However if an error message is supplied, it will be incorporated +** by sqlite into the error message available to the user using +** sqlite3_errmsg(), possibly making IO errors easier to debug. +*/ +static int winGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ + DWORD e = osGetLastError(); + UNUSED_PARAMETER(pVfs); + if( nBuf>0 ) winGetLastErrorMsg(e, nBuf, zBuf); + return e; +} + +/* +** Initialize and deinitialize the operating system interface. +*/ +SQLITE_API int sqlite3_os_init(void){ + static sqlite3_vfs winVfs = { + 3, /* iVersion */ + sizeof(winFile), /* szOsFile */ + SQLITE_WIN32_MAX_PATH_BYTES, /* mxPathname */ + 0, /* pNext */ + "win32", /* zName */ + &winAppData, /* pAppData */ + winOpen, /* xOpen */ + winDelete, /* xDelete */ + winAccess, /* xAccess */ + winFullPathname, /* xFullPathname */ + winDlOpen, /* xDlOpen */ + winDlError, /* xDlError */ + winDlSym, /* xDlSym */ + winDlClose, /* xDlClose */ + winRandomness, /* xRandomness */ + winSleep, /* xSleep */ + winCurrentTime, /* xCurrentTime */ + winGetLastError, /* xGetLastError */ + winCurrentTimeInt64, /* xCurrentTimeInt64 */ + winSetSystemCall, /* xSetSystemCall */ + winGetSystemCall, /* xGetSystemCall */ + winNextSystemCall, /* xNextSystemCall */ + }; +#if defined(SQLITE_WIN32_HAS_WIDE) + static sqlite3_vfs winLongPathVfs = { + 3, /* iVersion */ + sizeof(winFile), /* szOsFile */ + SQLITE_WINNT_MAX_PATH_BYTES, /* mxPathname */ + 0, /* pNext */ + "win32-longpath", /* zName */ + &winAppData, /* pAppData */ + winOpen, /* xOpen */ + winDelete, /* xDelete */ + winAccess, /* xAccess */ + winFullPathname, /* xFullPathname */ + winDlOpen, /* xDlOpen */ + winDlError, /* xDlError */ + winDlSym, /* xDlSym */ + winDlClose, /* xDlClose */ + winRandomness, /* xRandomness */ + winSleep, /* xSleep */ + winCurrentTime, /* xCurrentTime */ + winGetLastError, /* xGetLastError */ + winCurrentTimeInt64, /* xCurrentTimeInt64 */ + winSetSystemCall, /* xSetSystemCall */ + winGetSystemCall, /* xGetSystemCall */ + winNextSystemCall, /* xNextSystemCall */ + }; +#endif + static sqlite3_vfs winNolockVfs = { + 3, /* iVersion */ + sizeof(winFile), /* szOsFile */ + SQLITE_WIN32_MAX_PATH_BYTES, /* mxPathname */ + 0, /* pNext */ + "win32-none", /* zName */ + &winNolockAppData, /* pAppData */ + winOpen, /* xOpen */ + winDelete, /* xDelete */ + winAccess, /* xAccess */ + winFullPathname, /* xFullPathname */ + winDlOpen, /* xDlOpen */ + winDlError, /* xDlError */ + winDlSym, /* xDlSym */ + winDlClose, /* xDlClose */ + winRandomness, /* xRandomness */ + winSleep, /* xSleep */ + winCurrentTime, /* xCurrentTime */ + winGetLastError, /* xGetLastError */ + winCurrentTimeInt64, /* xCurrentTimeInt64 */ + winSetSystemCall, /* xSetSystemCall */ + winGetSystemCall, /* xGetSystemCall */ + winNextSystemCall, /* xNextSystemCall */ + }; +#if defined(SQLITE_WIN32_HAS_WIDE) + static sqlite3_vfs winLongPathNolockVfs = { + 3, /* iVersion */ + sizeof(winFile), /* szOsFile */ + SQLITE_WINNT_MAX_PATH_BYTES, /* mxPathname */ + 0, /* pNext */ + "win32-longpath-none", /* zName */ + &winNolockAppData, /* pAppData */ + winOpen, /* xOpen */ + winDelete, /* xDelete */ + winAccess, /* xAccess */ + winFullPathname, /* xFullPathname */ + winDlOpen, /* xDlOpen */ + winDlError, /* xDlError */ + winDlSym, /* xDlSym */ + winDlClose, /* xDlClose */ + winRandomness, /* xRandomness */ + winSleep, /* xSleep */ + winCurrentTime, /* xCurrentTime */ + winGetLastError, /* xGetLastError */ + winCurrentTimeInt64, /* xCurrentTimeInt64 */ + winSetSystemCall, /* xSetSystemCall */ + winGetSystemCall, /* xGetSystemCall */ + winNextSystemCall, /* xNextSystemCall */ + }; +#endif + + /* Double-check that the aSyscall[] array has been constructed + ** correctly. See ticket [bb3a86e890c8e96ab] */ + assert( ArraySize(aSyscall)==80 ); + + /* get memory map allocation granularity */ + memset(&winSysInfo, 0, sizeof(SYSTEM_INFO)); +#if SQLITE_OS_WINRT + osGetNativeSystemInfo(&winSysInfo); +#else + osGetSystemInfo(&winSysInfo); +#endif + assert( winSysInfo.dwAllocationGranularity>0 ); + assert( winSysInfo.dwPageSize>0 ); + + sqlite3_vfs_register(&winVfs, 1); + +#if defined(SQLITE_WIN32_HAS_WIDE) + sqlite3_vfs_register(&winLongPathVfs, 0); +#endif + + sqlite3_vfs_register(&winNolockVfs, 0); + +#if defined(SQLITE_WIN32_HAS_WIDE) + sqlite3_vfs_register(&winLongPathNolockVfs, 0); +#endif + + return SQLITE_OK; +} + +SQLITE_API int sqlite3_os_end(void){ +#if SQLITE_OS_WINRT + if( sleepObj!=NULL ){ + osCloseHandle(sleepObj); + sleepObj = NULL; + } +#endif + return SQLITE_OK; +} + +#endif /* SQLITE_OS_WIN */ + +/************** End of os_win.c **********************************************/ +/************** Begin file bitvec.c ******************************************/ +/* +** 2008 February 16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements an object that represents a fixed-length +** bitmap. Bits are numbered starting with 1. +** +** A bitmap is used to record which pages of a database file have been +** journalled during a transaction, or which pages have the "dont-write" +** property. Usually only a few pages are meet either condition. +** So the bitmap is usually sparse and has low cardinality. +** But sometimes (for example when during a DROP of a large table) most +** or all of the pages in a database can get journalled. In those cases, +** the bitmap becomes dense with high cardinality. The algorithm needs +** to handle both cases well. +** +** The size of the bitmap is fixed when the object is created. +** +** All bits are clear when the bitmap is created. Individual bits +** may be set or cleared one at a time. +** +** Test operations are about 100 times more common that set operations. +** Clear operations are exceedingly rare. There are usually between +** 5 and 500 set operations per Bitvec object, though the number of sets can +** sometimes grow into tens of thousands or larger. The size of the +** Bitvec object is the number of pages in the database file at the +** start of a transaction, and is thus usually less than a few thousand, +** but can be as large as 2 billion for a really big database. +*/ +/* #include "sqliteInt.h" */ + +/* Size of the Bitvec structure in bytes. */ +#define BITVEC_SZ 512 + +/* Round the union size down to the nearest pointer boundary, since that's how +** it will be aligned within the Bitvec struct. */ +#define BITVEC_USIZE \ + (((BITVEC_SZ-(3*sizeof(u32)))/sizeof(Bitvec*))*sizeof(Bitvec*)) + +/* Type of the array "element" for the bitmap representation. +** Should be a power of 2, and ideally, evenly divide into BITVEC_USIZE. +** Setting this to the "natural word" size of your CPU may improve +** performance. */ +#define BITVEC_TELEM u8 +/* Size, in bits, of the bitmap element. */ +#define BITVEC_SZELEM 8 +/* Number of elements in a bitmap array. */ +#define BITVEC_NELEM (BITVEC_USIZE/sizeof(BITVEC_TELEM)) +/* Number of bits in the bitmap array. */ +#define BITVEC_NBIT (BITVEC_NELEM*BITVEC_SZELEM) + +/* Number of u32 values in hash table. */ +#define BITVEC_NINT (BITVEC_USIZE/sizeof(u32)) +/* Maximum number of entries in hash table before +** sub-dividing and re-hashing. */ +#define BITVEC_MXHASH (BITVEC_NINT/2) +/* Hashing function for the aHash representation. +** Empirical testing showed that the *37 multiplier +** (an arbitrary prime)in the hash function provided +** no fewer collisions than the no-op *1. */ +#define BITVEC_HASH(X) (((X)*1)%BITVEC_NINT) + +#define BITVEC_NPTR (BITVEC_USIZE/sizeof(Bitvec *)) + + +/* +** A bitmap is an instance of the following structure. +** +** This bitmap records the existence of zero or more bits +** with values between 1 and iSize, inclusive. +** +** There are three possible representations of the bitmap. +** If iSize<=BITVEC_NBIT, then Bitvec.u.aBitmap[] is a straight +** bitmap. The least significant bit is bit 1. +** +** If iSize>BITVEC_NBIT and iDivisor==0 then Bitvec.u.aHash[] is +** a hash table that will hold up to BITVEC_MXHASH distinct values. +** +** Otherwise, the value i is redirected into one of BITVEC_NPTR +** sub-bitmaps pointed to by Bitvec.u.apSub[]. Each subbitmap +** handles up to iDivisor separate values of i. apSub[0] holds +** values between 1 and iDivisor. apSub[1] holds values between +** iDivisor+1 and 2*iDivisor. apSub[N] holds values between +** N*iDivisor+1 and (N+1)*iDivisor. Each subbitmap is normalized +** to hold deal with values between 1 and iDivisor. +*/ +struct Bitvec { + u32 iSize; /* Maximum bit index. Max iSize is 4,294,967,296. */ + u32 nSet; /* Number of bits that are set - only valid for aHash + ** element. Max is BITVEC_NINT. For BITVEC_SZ of 512, + ** this would be 125. */ + u32 iDivisor; /* Number of bits handled by each apSub[] entry. */ + /* Should >=0 for apSub element. */ + /* Max iDivisor is max(u32) / BITVEC_NPTR + 1. */ + /* For a BITVEC_SZ of 512, this would be 34,359,739. */ + union { + BITVEC_TELEM aBitmap[BITVEC_NELEM]; /* Bitmap representation */ + u32 aHash[BITVEC_NINT]; /* Hash table representation */ + Bitvec *apSub[BITVEC_NPTR]; /* Recursive representation */ + } u; +}; + +/* +** Create a new bitmap object able to handle bits between 0 and iSize, +** inclusive. Return a pointer to the new object. Return NULL if +** malloc fails. +*/ +SQLITE_PRIVATE Bitvec *sqlite3BitvecCreate(u32 iSize){ + Bitvec *p; + assert( sizeof(*p)==BITVEC_SZ ); + p = sqlite3MallocZero( sizeof(*p) ); + if( p ){ + p->iSize = iSize; + } + return p; +} + +/* +** Check to see if the i-th bit is set. Return true or false. +** If p is NULL (if the bitmap has not been created) or if +** i is out of range, then return false. +*/ +SQLITE_PRIVATE int sqlite3BitvecTestNotNull(Bitvec *p, u32 i){ + assert( p!=0 ); + i--; + if( i>=p->iSize ) return 0; + while( p->iDivisor ){ + u32 bin = i/p->iDivisor; + i = i%p->iDivisor; + p = p->u.apSub[bin]; + if (!p) { + return 0; + } + } + if( p->iSize<=BITVEC_NBIT ){ + return (p->u.aBitmap[i/BITVEC_SZELEM] & (1<<(i&(BITVEC_SZELEM-1))))!=0; + } else{ + u32 h = BITVEC_HASH(i++); + while( p->u.aHash[h] ){ + if( p->u.aHash[h]==i ) return 1; + h = (h+1) % BITVEC_NINT; + } + return 0; + } +} +SQLITE_PRIVATE int sqlite3BitvecTest(Bitvec *p, u32 i){ + return p!=0 && sqlite3BitvecTestNotNull(p,i); +} + +/* +** Set the i-th bit. Return 0 on success and an error code if +** anything goes wrong. +** +** This routine might cause sub-bitmaps to be allocated. Failing +** to get the memory needed to hold the sub-bitmap is the only +** that can go wrong with an insert, assuming p and i are valid. +** +** The calling function must ensure that p is a valid Bitvec object +** and that the value for "i" is within range of the Bitvec object. +** Otherwise the behavior is undefined. +*/ +SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec *p, u32 i){ + u32 h; + if( p==0 ) return SQLITE_OK; + assert( i>0 ); + assert( i<=p->iSize ); + i--; + while((p->iSize > BITVEC_NBIT) && p->iDivisor) { + u32 bin = i/p->iDivisor; + i = i%p->iDivisor; + if( p->u.apSub[bin]==0 ){ + p->u.apSub[bin] = sqlite3BitvecCreate( p->iDivisor ); + if( p->u.apSub[bin]==0 ) return SQLITE_NOMEM_BKPT; + } + p = p->u.apSub[bin]; + } + if( p->iSize<=BITVEC_NBIT ){ + p->u.aBitmap[i/BITVEC_SZELEM] |= 1 << (i&(BITVEC_SZELEM-1)); + return SQLITE_OK; + } + h = BITVEC_HASH(i++); + /* if there wasn't a hash collision, and this doesn't */ + /* completely fill the hash, then just add it without */ + /* worring about sub-dividing and re-hashing. */ + if( !p->u.aHash[h] ){ + if (p->nSet<(BITVEC_NINT-1)) { + goto bitvec_set_end; + } else { + goto bitvec_set_rehash; + } + } + /* there was a collision, check to see if it's already */ + /* in hash, if not, try to find a spot for it */ + do { + if( p->u.aHash[h]==i ) return SQLITE_OK; + h++; + if( h>=BITVEC_NINT ) h = 0; + } while( p->u.aHash[h] ); + /* we didn't find it in the hash. h points to the first */ + /* available free spot. check to see if this is going to */ + /* make our hash too "full". */ +bitvec_set_rehash: + if( p->nSet>=BITVEC_MXHASH ){ + unsigned int j; + int rc; + u32 *aiValues = sqlite3StackAllocRaw(0, sizeof(p->u.aHash)); + if( aiValues==0 ){ + return SQLITE_NOMEM_BKPT; + }else{ + memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash)); + memset(p->u.apSub, 0, sizeof(p->u.apSub)); + p->iDivisor = (p->iSize + BITVEC_NPTR - 1)/BITVEC_NPTR; + rc = sqlite3BitvecSet(p, i); + for(j=0; jnSet++; + p->u.aHash[h] = i; + return SQLITE_OK; +} + +/* +** Clear the i-th bit. +** +** pBuf must be a pointer to at least BITVEC_SZ bytes of temporary storage +** that BitvecClear can use to rebuilt its hash table. +*/ +SQLITE_PRIVATE void sqlite3BitvecClear(Bitvec *p, u32 i, void *pBuf){ + if( p==0 ) return; + assert( i>0 ); + i--; + while( p->iDivisor ){ + u32 bin = i/p->iDivisor; + i = i%p->iDivisor; + p = p->u.apSub[bin]; + if (!p) { + return; + } + } + if( p->iSize<=BITVEC_NBIT ){ + p->u.aBitmap[i/BITVEC_SZELEM] &= ~(1 << (i&(BITVEC_SZELEM-1))); + }else{ + unsigned int j; + u32 *aiValues = pBuf; + memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash)); + memset(p->u.aHash, 0, sizeof(p->u.aHash)); + p->nSet = 0; + for(j=0; jnSet++; + while( p->u.aHash[h] ){ + h++; + if( h>=BITVEC_NINT ) h = 0; + } + p->u.aHash[h] = aiValues[j]; + } + } + } +} + +/* +** Destroy a bitmap object. Reclaim all memory used. +*/ +SQLITE_PRIVATE void sqlite3BitvecDestroy(Bitvec *p){ + if( p==0 ) return; + if( p->iDivisor ){ + unsigned int i; + for(i=0; iu.apSub[i]); + } + } + sqlite3_free(p); +} + +/* +** Return the value of the iSize parameter specified when Bitvec *p +** was created. +*/ +SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){ + return p->iSize; +} + +#ifndef SQLITE_UNTESTABLE +/* +** Let V[] be an array of unsigned characters sufficient to hold +** up to N bits. Let I be an integer between 0 and N. 0<=I>3] |= (1<<(I&7)) +#define CLEARBIT(V,I) V[I>>3] &= ~(1<<(I&7)) +#define TESTBIT(V,I) (V[I>>3]&(1<<(I&7)))!=0 + +/* +** This routine runs an extensive test of the Bitvec code. +** +** The input is an array of integers that acts as a program +** to test the Bitvec. The integers are opcodes followed +** by 0, 1, or 3 operands, depending on the opcode. Another +** opcode follows immediately after the last operand. +** +** There are 6 opcodes numbered from 0 through 5. 0 is the +** "halt" opcode and causes the test to end. +** +** 0 Halt and return the number of errors +** 1 N S X Set N bits beginning with S and incrementing by X +** 2 N S X Clear N bits beginning with S and incrementing by X +** 3 N Set N randomly chosen bits +** 4 N Clear N randomly chosen bits +** 5 N S X Set N bits from S increment X in array only, not in bitvec +** +** The opcodes 1 through 4 perform set and clear operations are performed +** on both a Bitvec object and on a linear array of bits obtained from malloc. +** Opcode 5 works on the linear array only, not on the Bitvec. +** Opcode 5 is used to deliberately induce a fault in order to +** confirm that error detection works. +** +** At the conclusion of the test the linear array is compared +** against the Bitvec object. If there are any differences, +** an error is returned. If they are the same, zero is returned. +** +** If a memory allocation error occurs, return -1. +*/ +SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ + Bitvec *pBitvec = 0; + unsigned char *pV = 0; + int rc = -1; + int i, nx, pc, op; + void *pTmpSpace; + + /* Allocate the Bitvec to be tested and a linear array of + ** bits to act as the reference */ + pBitvec = sqlite3BitvecCreate( sz ); + pV = sqlite3MallocZero( (sz+7)/8 + 1 ); + pTmpSpace = sqlite3_malloc64(BITVEC_SZ); + if( pBitvec==0 || pV==0 || pTmpSpace==0 ) goto bitvec_end; + + /* NULL pBitvec tests */ + sqlite3BitvecSet(0, 1); + sqlite3BitvecClear(0, 1, pTmpSpace); + + /* Run the program */ + pc = 0; + while( (op = aOp[pc])!=0 ){ + switch( op ){ + case 1: + case 2: + case 5: { + nx = 4; + i = aOp[pc+2] - 1; + aOp[pc+2] += aOp[pc+3]; + break; + } + case 3: + case 4: + default: { + nx = 2; + sqlite3_randomness(sizeof(i), &i); + break; + } + } + if( (--aOp[pc+1]) > 0 ) nx = 0; + pc += nx; + i = (i & 0x7fffffff)%sz; + if( (op & 1)!=0 ){ + SETBIT(pV, (i+1)); + if( op!=5 ){ + if( sqlite3BitvecSet(pBitvec, i+1) ) goto bitvec_end; + } + }else{ + CLEARBIT(pV, (i+1)); + sqlite3BitvecClear(pBitvec, i+1, pTmpSpace); + } + } + + /* Test to make sure the linear array exactly matches the + ** Bitvec object. Start with the assumption that they do + ** match (rc==0). Change rc to non-zero if a discrepancy + ** is found. + */ + rc = sqlite3BitvecTest(0,0) + sqlite3BitvecTest(pBitvec, sz+1) + + sqlite3BitvecTest(pBitvec, 0) + + (sqlite3BitvecSize(pBitvec) - sz); + for(i=1; i<=sz; i++){ + if( (TESTBIT(pV,i))!=sqlite3BitvecTest(pBitvec,i) ){ + rc = i; + break; + } + } + + /* Free allocated structure */ +bitvec_end: + sqlite3_free(pTmpSpace); + sqlite3_free(pV); + sqlite3BitvecDestroy(pBitvec); + return rc; +} +#endif /* SQLITE_UNTESTABLE */ + +/************** End of bitvec.c **********************************************/ +/************** Begin file pcache.c ******************************************/ +/* +** 2008 August 05 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements that page cache. +*/ +/* #include "sqliteInt.h" */ + +/* +** A complete page cache is an instance of this structure. Every +** entry in the cache holds a single page of the database file. The +** btree layer only operates on the cached copy of the database pages. +** +** A page cache entry is "clean" if it exactly matches what is currently +** on disk. A page is "dirty" if it has been modified and needs to be +** persisted to disk. +** +** pDirty, pDirtyTail, pSynced: +** All dirty pages are linked into the doubly linked list using +** PgHdr.pDirtyNext and pDirtyPrev. The list is maintained in LRU order +** such that p was added to the list more recently than p->pDirtyNext. +** PCache.pDirty points to the first (newest) element in the list and +** pDirtyTail to the last (oldest). +** +** The PCache.pSynced variable is used to optimize searching for a dirty +** page to eject from the cache mid-transaction. It is better to eject +** a page that does not require a journal sync than one that does. +** Therefore, pSynced is maintained to that it *almost* always points +** to either the oldest page in the pDirty/pDirtyTail list that has a +** clear PGHDR_NEED_SYNC flag or to a page that is older than this one +** (so that the right page to eject can be found by following pDirtyPrev +** pointers). +*/ +struct PCache { + PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ + PgHdr *pSynced; /* Last synced page in dirty page list */ + int nRefSum; /* Sum of ref counts over all pages */ + int szCache; /* Configured cache size */ + int szSpill; /* Size before spilling occurs */ + int szPage; /* Size of every page in this cache */ + int szExtra; /* Size of extra space for each page */ + u8 bPurgeable; /* True if pages are on backing store */ + u8 eCreate; /* eCreate value for for xFetch() */ + int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */ + void *pStress; /* Argument to xStress */ + sqlite3_pcache *pCache; /* Pluggable cache module */ +}; + +/********************************** Test and Debug Logic **********************/ +/* +** Debug tracing macros. Enable by by changing the "0" to "1" and +** recompiling. +** +** When sqlite3PcacheTrace is 1, single line trace messages are issued. +** When sqlite3PcacheTrace is 2, a dump of the pcache showing all cache entries +** is displayed for many operations, resulting in a lot of output. +*/ +#if defined(SQLITE_DEBUG) && 0 + int sqlite3PcacheTrace = 2; /* 0: off 1: simple 2: cache dumps */ + int sqlite3PcacheMxDump = 9999; /* Max cache entries for pcacheDump() */ +# define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;} + void pcacheDump(PCache *pCache){ + int N; + int i, j; + sqlite3_pcache_page *pLower; + PgHdr *pPg; + unsigned char *a; + + if( sqlite3PcacheTrace<2 ) return; + if( pCache->pCache==0 ) return; + N = sqlite3PcachePagecount(pCache); + if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump; + for(i=1; i<=N; i++){ + pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0); + if( pLower==0 ) continue; + pPg = (PgHdr*)pLower->pExtra; + printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags); + a = (unsigned char *)pLower->pBuf; + for(j=0; j<12; j++) printf("%02x", a[j]); + printf("\n"); + if( pPg->pPage==0 ){ + sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0); + } + } + } + #else +# define pcacheTrace(X) +# define pcacheDump(X) +#endif + +/* +** Check invariants on a PgHdr entry. Return true if everything is OK. +** Return false if any invariant is violated. +** +** This routine is for use inside of assert() statements only. For +** example: +** +** assert( sqlite3PcachePageSanity(pPg) ); +*/ +#if SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr *pPg){ + PCache *pCache; + assert( pPg!=0 ); + assert( pPg->pgno>0 || pPg->pPager==0 ); /* Page number is 1 or more */ + pCache = pPg->pCache; + assert( pCache!=0 ); /* Every page has an associated PCache */ + if( pPg->flags & PGHDR_CLEAN ){ + assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */ + assert( pCache->pDirty!=pPg ); /* CLEAN pages not on dirty list */ + assert( pCache->pDirtyTail!=pPg ); + } + /* WRITEABLE pages must also be DIRTY */ + if( pPg->flags & PGHDR_WRITEABLE ){ + assert( pPg->flags & PGHDR_DIRTY ); /* WRITEABLE implies DIRTY */ + } + /* NEED_SYNC can be set independently of WRITEABLE. This can happen, + ** for example, when using the sqlite3PagerDontWrite() optimization: + ** (1) Page X is journalled, and gets WRITEABLE and NEED_SEEK. + ** (2) Page X moved to freelist, WRITEABLE is cleared + ** (3) Page X reused, WRITEABLE is set again + ** If NEED_SYNC had been cleared in step 2, then it would not be reset + ** in step 3, and page might be written into the database without first + ** syncing the rollback journal, which might cause corruption on a power + ** loss. + ** + ** Another example is when the database page size is smaller than the + ** disk sector size. When any page of a sector is journalled, all pages + ** in that sector are marked NEED_SYNC even if they are still CLEAN, just + ** in case they are later modified, since all pages in the same sector + ** must be journalled and synced before any of those pages can be safely + ** written. + */ + return 1; +} +#endif /* SQLITE_DEBUG */ + + +/********************************** Linked List Management ********************/ + +/* Allowed values for second argument to pcacheManageDirtyList() */ +#define PCACHE_DIRTYLIST_REMOVE 1 /* Remove pPage from dirty list */ +#define PCACHE_DIRTYLIST_ADD 2 /* Add pPage to the dirty list */ +#define PCACHE_DIRTYLIST_FRONT 3 /* Move pPage to the front of the list */ + +/* +** Manage pPage's participation on the dirty list. Bits of the addRemove +** argument determines what operation to do. The 0x01 bit means first +** remove pPage from the dirty list. The 0x02 means add pPage back to +** the dirty list. Doing both moves pPage to the front of the dirty list. +*/ +static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){ + PCache *p = pPage->pCache; + + pcacheTrace(("%p.DIRTYLIST.%s %d\n", p, + addRemove==1 ? "REMOVE" : addRemove==2 ? "ADD" : "FRONT", + pPage->pgno)); + if( addRemove & PCACHE_DIRTYLIST_REMOVE ){ + assert( pPage->pDirtyNext || pPage==p->pDirtyTail ); + assert( pPage->pDirtyPrev || pPage==p->pDirty ); + + /* Update the PCache1.pSynced variable if necessary. */ + if( p->pSynced==pPage ){ + p->pSynced = pPage->pDirtyPrev; + } + + if( pPage->pDirtyNext ){ + pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev; + }else{ + assert( pPage==p->pDirtyTail ); + p->pDirtyTail = pPage->pDirtyPrev; + } + if( pPage->pDirtyPrev ){ + pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext; + }else{ + /* If there are now no dirty pages in the cache, set eCreate to 2. + ** This is an optimization that allows sqlite3PcacheFetch() to skip + ** searching for a dirty page to eject from the cache when it might + ** otherwise have to. */ + assert( pPage==p->pDirty ); + p->pDirty = pPage->pDirtyNext; + assert( p->bPurgeable || p->eCreate==2 ); + if( p->pDirty==0 ){ /*OPTIMIZATION-IF-TRUE*/ + assert( p->bPurgeable==0 || p->eCreate==1 ); + p->eCreate = 2; + } + } + pPage->pDirtyNext = 0; + pPage->pDirtyPrev = 0; + } + if( addRemove & PCACHE_DIRTYLIST_ADD ){ + assert( pPage->pDirtyNext==0 && pPage->pDirtyPrev==0 && p->pDirty!=pPage ); + + pPage->pDirtyNext = p->pDirty; + if( pPage->pDirtyNext ){ + assert( pPage->pDirtyNext->pDirtyPrev==0 ); + pPage->pDirtyNext->pDirtyPrev = pPage; + }else{ + p->pDirtyTail = pPage; + if( p->bPurgeable ){ + assert( p->eCreate==2 ); + p->eCreate = 1; + } + } + p->pDirty = pPage; + + /* If pSynced is NULL and this page has a clear NEED_SYNC flag, set + ** pSynced to point to it. Checking the NEED_SYNC flag is an + ** optimization, as if pSynced points to a page with the NEED_SYNC + ** flag set sqlite3PcacheFetchStress() searches through all newer + ** entries of the dirty-list for a page with NEED_SYNC clear anyway. */ + if( !p->pSynced + && 0==(pPage->flags&PGHDR_NEED_SYNC) /*OPTIMIZATION-IF-FALSE*/ + ){ + p->pSynced = pPage; + } + } + pcacheDump(p); +} + +/* +** Wrapper around the pluggable caches xUnpin method. If the cache is +** being used for an in-memory database, this function is a no-op. +*/ +static void pcacheUnpin(PgHdr *p){ + if( p->pCache->bPurgeable ){ + pcacheTrace(("%p.UNPIN %d\n", p->pCache, p->pgno)); + sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0); + pcacheDump(p->pCache); + } +} + +/* +** Compute the number of pages of cache requested. p->szCache is the +** cache size requested by the "PRAGMA cache_size" statement. +*/ +static int numberOfCachePages(PCache *p){ + if( p->szCache>=0 ){ + /* IMPLEMENTATION-OF: R-42059-47211 If the argument N is positive then the + ** suggested cache size is set to N. */ + return p->szCache; + }else{ + /* IMPLEMENTATION-OF: R-61436-13639 If the argument N is negative, then + ** the number of cache pages is adjusted to use approximately abs(N*1024) + ** bytes of memory. */ + return (int)((-1024*(i64)p->szCache)/(p->szPage+p->szExtra)); + } +} + +/*************************************************** General Interfaces ****** +** +** Initialize and shutdown the page cache subsystem. Neither of these +** functions are threadsafe. +*/ +SQLITE_PRIVATE int sqlite3PcacheInitialize(void){ + if( sqlite3GlobalConfig.pcache2.xInit==0 ){ + /* IMPLEMENTATION-OF: R-26801-64137 If the xInit() method is NULL, then the + ** built-in default page cache is used instead of the application defined + ** page cache. */ + sqlite3PCacheSetDefault(); + } + return sqlite3GlobalConfig.pcache2.xInit(sqlite3GlobalConfig.pcache2.pArg); +} +SQLITE_PRIVATE void sqlite3PcacheShutdown(void){ + if( sqlite3GlobalConfig.pcache2.xShutdown ){ + /* IMPLEMENTATION-OF: R-26000-56589 The xShutdown() method may be NULL. */ + sqlite3GlobalConfig.pcache2.xShutdown(sqlite3GlobalConfig.pcache2.pArg); + } +} + +/* +** Return the size in bytes of a PCache object. +*/ +SQLITE_PRIVATE int sqlite3PcacheSize(void){ return sizeof(PCache); } + +/* +** Create a new PCache object. Storage space to hold the object +** has already been allocated and is passed in as the p pointer. +** The caller discovers how much space needs to be allocated by +** calling sqlite3PcacheSize(). +** +** szExtra is some extra space allocated for each page. The first +** 8 bytes of the extra space will be zeroed as the page is allocated, +** but remaining content will be uninitialized. Though it is opaque +** to this module, the extra space really ends up being the MemPage +** structure in the pager. +*/ +SQLITE_PRIVATE int sqlite3PcacheOpen( + int szPage, /* Size of every page */ + int szExtra, /* Extra space associated with each page */ + int bPurgeable, /* True if pages are on backing store */ + int (*xStress)(void*,PgHdr*),/* Call to try to make pages clean */ + void *pStress, /* Argument to xStress */ + PCache *p /* Preallocated space for the PCache */ +){ + memset(p, 0, sizeof(PCache)); + p->szPage = 1; + p->szExtra = szExtra; + assert( szExtra>=8 ); /* First 8 bytes will be zeroed */ + p->bPurgeable = bPurgeable; + p->eCreate = 2; + p->xStress = xStress; + p->pStress = pStress; + p->szCache = 100; + p->szSpill = 1; + pcacheTrace(("%p.OPEN szPage %d bPurgeable %d\n",p,szPage,bPurgeable)); + return sqlite3PcacheSetPageSize(p, szPage); +} + +/* +** Change the page size for PCache object. The caller must ensure that there +** are no outstanding page references when this function is called. +*/ +SQLITE_PRIVATE int sqlite3PcacheSetPageSize(PCache *pCache, int szPage){ + assert( pCache->nRefSum==0 && pCache->pDirty==0 ); + if( pCache->szPage ){ + sqlite3_pcache *pNew; + pNew = sqlite3GlobalConfig.pcache2.xCreate( + szPage, pCache->szExtra + ROUND8(sizeof(PgHdr)), + pCache->bPurgeable + ); + if( pNew==0 ) return SQLITE_NOMEM_BKPT; + sqlite3GlobalConfig.pcache2.xCachesize(pNew, numberOfCachePages(pCache)); + if( pCache->pCache ){ + sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); + } + pCache->pCache = pNew; + pCache->szPage = szPage; + pcacheTrace(("%p.PAGESIZE %d\n",pCache,szPage)); + } + return SQLITE_OK; +} + +/* +** Try to obtain a page from the cache. +** +** This routine returns a pointer to an sqlite3_pcache_page object if +** such an object is already in cache, or if a new one is created. +** This routine returns a NULL pointer if the object was not in cache +** and could not be created. +** +** The createFlags should be 0 to check for existing pages and should +** be 3 (not 1, but 3) to try to create a new page. +** +** If the createFlag is 0, then NULL is always returned if the page +** is not already in the cache. If createFlag is 1, then a new page +** is created only if that can be done without spilling dirty pages +** and without exceeding the cache size limit. +** +** The caller needs to invoke sqlite3PcacheFetchFinish() to properly +** initialize the sqlite3_pcache_page object and convert it into a +** PgHdr object. The sqlite3PcacheFetch() and sqlite3PcacheFetchFinish() +** routines are split this way for performance reasons. When separated +** they can both (usually) operate without having to push values to +** the stack on entry and pop them back off on exit, which saves a +** lot of pushing and popping. +*/ +SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch( + PCache *pCache, /* Obtain the page from this cache */ + Pgno pgno, /* Page number to obtain */ + int createFlag /* If true, create page if it does not exist already */ +){ + int eCreate; + sqlite3_pcache_page *pRes; + + assert( pCache!=0 ); + assert( pCache->pCache!=0 ); + assert( createFlag==3 || createFlag==0 ); + assert( pCache->eCreate==((pCache->bPurgeable && pCache->pDirty) ? 1 : 2) ); + + /* eCreate defines what to do if the page does not exist. + ** 0 Do not allocate a new page. (createFlag==0) + ** 1 Allocate a new page if doing so is inexpensive. + ** (createFlag==1 AND bPurgeable AND pDirty) + ** 2 Allocate a new page even it doing so is difficult. + ** (createFlag==1 AND !(bPurgeable AND pDirty) + */ + eCreate = createFlag & pCache->eCreate; + assert( eCreate==0 || eCreate==1 || eCreate==2 ); + assert( createFlag==0 || pCache->eCreate==eCreate ); + assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) ); + pRes = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); + pcacheTrace(("%p.FETCH %d%s (result: %p)\n",pCache,pgno, + createFlag?" create":"",pRes)); + return pRes; +} + +/* +** If the sqlite3PcacheFetch() routine is unable to allocate a new +** page because no clean pages are available for reuse and the cache +** size limit has been reached, then this routine can be invoked to +** try harder to allocate a page. This routine might invoke the stress +** callback to spill dirty pages to the journal. It will then try to +** allocate the new page and will only fail to allocate a new page on +** an OOM error. +** +** This routine should be invoked only after sqlite3PcacheFetch() fails. +*/ +SQLITE_PRIVATE int sqlite3PcacheFetchStress( + PCache *pCache, /* Obtain the page from this cache */ + Pgno pgno, /* Page number to obtain */ + sqlite3_pcache_page **ppPage /* Write result here */ +){ + PgHdr *pPg; + if( pCache->eCreate==2 ) return 0; + + if( sqlite3PcachePagecount(pCache)>pCache->szSpill ){ + /* Find a dirty page to write-out and recycle. First try to find a + ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC + ** cleared), but if that is not possible settle for any other + ** unreferenced dirty page. + ** + ** If the LRU page in the dirty list that has a clear PGHDR_NEED_SYNC + ** flag is currently referenced, then the following may leave pSynced + ** set incorrectly (pointing to other than the LRU page with NEED_SYNC + ** cleared). This is Ok, as pSynced is just an optimization. */ + for(pPg=pCache->pSynced; + pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); + pPg=pPg->pDirtyPrev + ); + pCache->pSynced = pPg; + if( !pPg ){ + for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev); + } + if( pPg ){ + int rc; +#ifdef SQLITE_LOG_CACHE_SPILL + sqlite3_log(SQLITE_FULL, + "spill page %d making room for %d - cache used: %d/%d", + pPg->pgno, pgno, + sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), + numberOfCachePages(pCache)); +#endif + pcacheTrace(("%p.SPILL %d\n",pCache,pPg->pgno)); + rc = pCache->xStress(pCache->pStress, pPg); + pcacheDump(pCache); + if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ + return rc; + } + } + } + *ppPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2); + return *ppPage==0 ? SQLITE_NOMEM_BKPT : SQLITE_OK; +} + +/* +** This is a helper routine for sqlite3PcacheFetchFinish() +** +** In the uncommon case where the page being fetched has not been +** initialized, this routine is invoked to do the initialization. +** This routine is broken out into a separate function since it +** requires extra stack manipulation that can be avoided in the common +** case. +*/ +static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit( + PCache *pCache, /* Obtain the page from this cache */ + Pgno pgno, /* Page number obtained */ + sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */ +){ + PgHdr *pPgHdr; + assert( pPage!=0 ); + pPgHdr = (PgHdr*)pPage->pExtra; + assert( pPgHdr->pPage==0 ); + memset(&pPgHdr->pDirty, 0, sizeof(PgHdr) - offsetof(PgHdr,pDirty)); + pPgHdr->pPage = pPage; + pPgHdr->pData = pPage->pBuf; + pPgHdr->pExtra = (void *)&pPgHdr[1]; + memset(pPgHdr->pExtra, 0, 8); + pPgHdr->pCache = pCache; + pPgHdr->pgno = pgno; + pPgHdr->flags = PGHDR_CLEAN; + return sqlite3PcacheFetchFinish(pCache,pgno,pPage); +} + +/* +** This routine converts the sqlite3_pcache_page object returned by +** sqlite3PcacheFetch() into an initialized PgHdr object. This routine +** must be called after sqlite3PcacheFetch() in order to get a usable +** result. +*/ +SQLITE_PRIVATE PgHdr *sqlite3PcacheFetchFinish( + PCache *pCache, /* Obtain the page from this cache */ + Pgno pgno, /* Page number obtained */ + sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */ +){ + PgHdr *pPgHdr; + + assert( pPage!=0 ); + pPgHdr = (PgHdr *)pPage->pExtra; + + if( !pPgHdr->pPage ){ + return pcacheFetchFinishWithInit(pCache, pgno, pPage); + } + pCache->nRefSum++; + pPgHdr->nRef++; + assert( sqlite3PcachePageSanity(pPgHdr) ); + return pPgHdr; +} + +/* +** Decrement the reference count on a page. If the page is clean and the +** reference count drops to 0, then it is made eligible for recycling. +*/ +SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){ + assert( p->nRef>0 ); + p->pCache->nRefSum--; + if( (--p->nRef)==0 ){ + if( p->flags&PGHDR_CLEAN ){ + pcacheUnpin(p); + }else if( p->pDirtyPrev!=0 ){ /*OPTIMIZATION-IF-FALSE*/ + /* Move the page to the head of the dirty list. If p->pDirtyPrev==0, + ** then page p is already at the head of the dirty list and the + ** following call would be a no-op. Hence the OPTIMIZATION-IF-FALSE + ** tag above. */ + pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); + } + } +} + +/* +** Increase the reference count of a supplied page by 1. +*/ +SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){ + assert(p->nRef>0); + assert( sqlite3PcachePageSanity(p) ); + p->nRef++; + p->pCache->nRefSum++; +} + +/* +** Drop a page from the cache. There must be exactly one reference to the +** page. This function deletes that reference, so after it returns the +** page pointed to by p is invalid. +*/ +SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){ + assert( p->nRef==1 ); + assert( sqlite3PcachePageSanity(p) ); + if( p->flags&PGHDR_DIRTY ){ + pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); + } + p->pCache->nRefSum--; + sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1); +} + +/* +** Make sure the page is marked as dirty. If it isn't dirty already, +** make it so. +*/ +SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){ + assert( p->nRef>0 ); + assert( sqlite3PcachePageSanity(p) ); + if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ /*OPTIMIZATION-IF-FALSE*/ + p->flags &= ~PGHDR_DONT_WRITE; + if( p->flags & PGHDR_CLEAN ){ + p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN); + pcacheTrace(("%p.DIRTY %d\n",p->pCache,p->pgno)); + assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY ); + pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD); + } + assert( sqlite3PcachePageSanity(p) ); + } +} + +/* +** Make sure the page is marked as clean. If it isn't clean already, +** make it so. +*/ +SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr *p){ + assert( sqlite3PcachePageSanity(p) ); + if( ALWAYS((p->flags & PGHDR_DIRTY)!=0) ){ + assert( (p->flags & PGHDR_CLEAN)==0 ); + pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); + p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE); + p->flags |= PGHDR_CLEAN; + pcacheTrace(("%p.CLEAN %d\n",p->pCache,p->pgno)); + assert( sqlite3PcachePageSanity(p) ); + if( p->nRef==0 ){ + pcacheUnpin(p); + } + } +} + +/* +** Make every page in the cache clean. +*/ +SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache *pCache){ + PgHdr *p; + pcacheTrace(("%p.CLEAN-ALL\n",pCache)); + while( (p = pCache->pDirty)!=0 ){ + sqlite3PcacheMakeClean(p); + } +} + +/* +** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages. +*/ +SQLITE_PRIVATE void sqlite3PcacheClearWritable(PCache *pCache){ + PgHdr *p; + pcacheTrace(("%p.CLEAR-WRITEABLE\n",pCache)); + for(p=pCache->pDirty; p; p=p->pDirtyNext){ + p->flags &= ~(PGHDR_NEED_SYNC|PGHDR_WRITEABLE); + } + pCache->pSynced = pCache->pDirtyTail; +} + +/* +** Clear the PGHDR_NEED_SYNC flag from all dirty pages. +*/ +SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *pCache){ + PgHdr *p; + for(p=pCache->pDirty; p; p=p->pDirtyNext){ + p->flags &= ~PGHDR_NEED_SYNC; + } + pCache->pSynced = pCache->pDirtyTail; +} + +/* +** Change the page number of page p to newPgno. +*/ +SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){ + PCache *pCache = p->pCache; + assert( p->nRef>0 ); + assert( newPgno>0 ); + assert( sqlite3PcachePageSanity(p) ); + pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno)); + sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno); + p->pgno = newPgno; + if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){ + pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); + } +} + +/* +** Drop every cache entry whose page number is greater than "pgno". The +** caller must ensure that there are no outstanding references to any pages +** other than page 1 with a page number greater than pgno. +** +** If there is a reference to page 1 and the pgno parameter passed to this +** function is 0, then the data area associated with page 1 is zeroed, but +** the page object is not dropped. +*/ +SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ + if( pCache->pCache ){ + PgHdr *p; + PgHdr *pNext; + pcacheTrace(("%p.TRUNCATE %d\n",pCache,pgno)); + for(p=pCache->pDirty; p; p=pNext){ + pNext = p->pDirtyNext; + /* This routine never gets call with a positive pgno except right + ** after sqlite3PcacheCleanAll(). So if there are dirty pages, + ** it must be that pgno==0. + */ + assert( p->pgno>0 ); + if( p->pgno>pgno ){ + assert( p->flags&PGHDR_DIRTY ); + sqlite3PcacheMakeClean(p); + } + } + if( pgno==0 && pCache->nRefSum ){ + sqlite3_pcache_page *pPage1; + pPage1 = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache,1,0); + if( ALWAYS(pPage1) ){ /* Page 1 is always available in cache, because + ** pCache->nRefSum>0 */ + memset(pPage1->pBuf, 0, pCache->szPage); + pgno = 1; + } + } + sqlite3GlobalConfig.pcache2.xTruncate(pCache->pCache, pgno+1); + } +} + +/* +** Close a cache. +*/ +SQLITE_PRIVATE void sqlite3PcacheClose(PCache *pCache){ + assert( pCache->pCache!=0 ); + pcacheTrace(("%p.CLOSE\n",pCache)); + sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); +} + +/* +** Discard the contents of the cache. +*/ +SQLITE_PRIVATE void sqlite3PcacheClear(PCache *pCache){ + sqlite3PcacheTruncate(pCache, 0); +} + +/* +** Merge two lists of pages connected by pDirty and in pgno order. +** Do not bother fixing the pDirtyPrev pointers. +*/ +static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){ + PgHdr result, *pTail; + pTail = &result; + assert( pA!=0 && pB!=0 ); + for(;;){ + if( pA->pgnopgno ){ + pTail->pDirty = pA; + pTail = pA; + pA = pA->pDirty; + if( pA==0 ){ + pTail->pDirty = pB; + break; + } + }else{ + pTail->pDirty = pB; + pTail = pB; + pB = pB->pDirty; + if( pB==0 ){ + pTail->pDirty = pA; + break; + } + } + } + return result.pDirty; +} + +/* +** Sort the list of pages in accending order by pgno. Pages are +** connected by pDirty pointers. The pDirtyPrev pointers are +** corrupted by this sort. +** +** Since there cannot be more than 2^31 distinct pages in a database, +** there cannot be more than 31 buckets required by the merge sorter. +** One extra bucket is added to catch overflow in case something +** ever changes to make the previous sentence incorrect. +*/ +#define N_SORT_BUCKET 32 +static PgHdr *pcacheSortDirtyList(PgHdr *pIn){ + PgHdr *a[N_SORT_BUCKET], *p; + int i; + memset(a, 0, sizeof(a)); + while( pIn ){ + p = pIn; + pIn = p->pDirty; + p->pDirty = 0; + for(i=0; ALWAYS(ipDirty; p; p=p->pDirtyNext){ + p->pDirty = p->pDirtyNext; + } + return pcacheSortDirtyList(pCache->pDirty); +} + +/* +** Return the total number of references to all pages held by the cache. +** +** This is not the total number of pages referenced, but the sum of the +** reference count for all pages. +*/ +SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache *pCache){ + return pCache->nRefSum; +} + +/* +** Return the number of references to the page supplied as an argument. +*/ +SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr *p){ + return p->nRef; +} + +/* +** Return the total number of pages in the cache. +*/ +SQLITE_PRIVATE int sqlite3PcachePagecount(PCache *pCache){ + assert( pCache->pCache!=0 ); + return sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache); +} + +#ifdef SQLITE_TEST +/* +** Get the suggested cache-size value. +*/ +SQLITE_PRIVATE int sqlite3PcacheGetCachesize(PCache *pCache){ + return numberOfCachePages(pCache); +} +#endif + +/* +** Set the suggested cache-size value. +*/ +SQLITE_PRIVATE void sqlite3PcacheSetCachesize(PCache *pCache, int mxPage){ + assert( pCache->pCache!=0 ); + pCache->szCache = mxPage; + sqlite3GlobalConfig.pcache2.xCachesize(pCache->pCache, + numberOfCachePages(pCache)); +} + +/* +** Set the suggested cache-spill value. Make no changes if if the +** argument is zero. Return the effective cache-spill size, which will +** be the larger of the szSpill and szCache. +*/ +SQLITE_PRIVATE int sqlite3PcacheSetSpillsize(PCache *p, int mxPage){ + int res; + assert( p->pCache!=0 ); + if( mxPage ){ + if( mxPage<0 ){ + mxPage = (int)((-1024*(i64)mxPage)/(p->szPage+p->szExtra)); + } + p->szSpill = mxPage; + } + res = numberOfCachePages(p); + if( resszSpill ) res = p->szSpill; + return res; +} + +/* +** Free up as much memory as possible from the page cache. +*/ +SQLITE_PRIVATE void sqlite3PcacheShrink(PCache *pCache){ + assert( pCache->pCache!=0 ); + sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache); +} + +/* +** Return the size of the header added by this middleware layer +** in the page-cache hierarchy. +*/ +SQLITE_PRIVATE int sqlite3HeaderSizePcache(void){ return ROUND8(sizeof(PgHdr)); } + +/* +** Return the number of dirty pages currently in the cache, as a percentage +** of the configured cache size. +*/ +SQLITE_PRIVATE int sqlite3PCachePercentDirty(PCache *pCache){ + PgHdr *pDirty; + int nDirty = 0; + int nCache = numberOfCachePages(pCache); + for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext) nDirty++; + return nCache ? (int)(((i64)nDirty * 100) / nCache) : 0; +} + +#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG) +/* +** For all dirty pages currently in the cache, invoke the specified +** callback. This is only used if the SQLITE_CHECK_PAGES macro is +** defined. +*/ +SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)){ + PgHdr *pDirty; + for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext){ + xIter(pDirty); + } +} +#endif + +/************** End of pcache.c **********************************************/ +/************** Begin file pcache1.c *****************************************/ +/* +** 2008 November 05 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file implements the default page cache implementation (the +** sqlite3_pcache interface). It also contains part of the implementation +** of the SQLITE_CONFIG_PAGECACHE and sqlite3_release_memory() features. +** If the default page cache implementation is overridden, then neither of +** these two features are available. +** +** A Page cache line looks like this: +** +** ------------------------------------------------------------- +** | database page content | PgHdr1 | MemPage | PgHdr | +** ------------------------------------------------------------- +** +** The database page content is up front (so that buffer overreads tend to +** flow harmlessly into the PgHdr1, MemPage, and PgHdr extensions). MemPage +** is the extension added by the btree.c module containing information such +** as the database page number and how that database page is used. PgHdr +** is added by the pcache.c layer and contains information used to keep track +** of which pages are "dirty". PgHdr1 is an extension added by this +** module (pcache1.c). The PgHdr1 header is a subclass of sqlite3_pcache_page. +** PgHdr1 contains information needed to look up a page by its page number. +** The superclass sqlite3_pcache_page.pBuf points to the start of the +** database page content and sqlite3_pcache_page.pExtra points to PgHdr. +** +** The size of the extension (MemPage+PgHdr+PgHdr1) can be determined at +** runtime using sqlite3_config(SQLITE_CONFIG_PCACHE_HDRSZ, &size). The +** sizes of the extensions sum to 272 bytes on x64 for 3.8.10, but this +** size can vary according to architecture, compile-time options, and +** SQLite library version number. +** +** If SQLITE_PCACHE_SEPARATE_HEADER is defined, then the extension is obtained +** using a separate memory allocation from the database page content. This +** seeks to overcome the "clownshoe" problem (also called "internal +** fragmentation" in academic literature) of allocating a few bytes more +** than a power of two with the memory allocator rounding up to the next +** power of two, and leaving the rounded-up space unused. +** +** This module tracks pointers to PgHdr1 objects. Only pcache.c communicates +** with this module. Information is passed back and forth as PgHdr1 pointers. +** +** The pcache.c and pager.c modules deal pointers to PgHdr objects. +** The btree.c module deals with pointers to MemPage objects. +** +** SOURCE OF PAGE CACHE MEMORY: +** +** Memory for a page might come from any of three sources: +** +** (1) The general-purpose memory allocator - sqlite3Malloc() +** (2) Global page-cache memory provided using sqlite3_config() with +** SQLITE_CONFIG_PAGECACHE. +** (3) PCache-local bulk allocation. +** +** The third case is a chunk of heap memory (defaulting to 100 pages worth) +** that is allocated when the page cache is created. The size of the local +** bulk allocation can be adjusted using +** +** sqlite3_config(SQLITE_CONFIG_PAGECACHE, (void*)0, 0, N). +** +** If N is positive, then N pages worth of memory are allocated using a single +** sqlite3Malloc() call and that memory is used for the first N pages allocated. +** Or if N is negative, then -1024*N bytes of memory are allocated and used +** for as many pages as can be accomodated. +** +** Only one of (2) or (3) can be used. Once the memory available to (2) or +** (3) is exhausted, subsequent allocations fail over to the general-purpose +** memory allocator (1). +** +** Earlier versions of SQLite used only methods (1) and (2). But experiments +** show that method (3) with N==100 provides about a 5% performance boost for +** common workloads. +*/ +/* #include "sqliteInt.h" */ + +typedef struct PCache1 PCache1; +typedef struct PgHdr1 PgHdr1; +typedef struct PgFreeslot PgFreeslot; +typedef struct PGroup PGroup; + +/* +** Each cache entry is represented by an instance of the following +** structure. Unless SQLITE_PCACHE_SEPARATE_HEADER is defined, a buffer of +** PgHdr1.pCache->szPage bytes is allocated directly before this structure +** in memory. +*/ +struct PgHdr1 { + sqlite3_pcache_page page; /* Base class. Must be first. pBuf & pExtra */ + unsigned int iKey; /* Key value (page number) */ + u8 isPinned; /* Page in use, not on the LRU list */ + u8 isBulkLocal; /* This page from bulk local storage */ + u8 isAnchor; /* This is the PGroup.lru element */ + PgHdr1 *pNext; /* Next in hash table chain */ + PCache1 *pCache; /* Cache that currently owns this page */ + PgHdr1 *pLruNext; /* Next in LRU list of unpinned pages */ + PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */ +}; + +/* Each page cache (or PCache) belongs to a PGroup. A PGroup is a set +** of one or more PCaches that are able to recycle each other's unpinned +** pages when they are under memory pressure. A PGroup is an instance of +** the following object. +** +** This page cache implementation works in one of two modes: +** +** (1) Every PCache is the sole member of its own PGroup. There is +** one PGroup per PCache. +** +** (2) There is a single global PGroup that all PCaches are a member +** of. +** +** Mode 1 uses more memory (since PCache instances are not able to rob +** unused pages from other PCaches) but it also operates without a mutex, +** and is therefore often faster. Mode 2 requires a mutex in order to be +** threadsafe, but recycles pages more efficiently. +** +** For mode (1), PGroup.mutex is NULL. For mode (2) there is only a single +** PGroup which is the pcache1.grp global variable and its mutex is +** SQLITE_MUTEX_STATIC_LRU. +*/ +struct PGroup { + sqlite3_mutex *mutex; /* MUTEX_STATIC_LRU or NULL */ + unsigned int nMaxPage; /* Sum of nMax for purgeable caches */ + unsigned int nMinPage; /* Sum of nMin for purgeable caches */ + unsigned int mxPinned; /* nMaxpage + 10 - nMinPage */ + unsigned int nCurrentPage; /* Number of purgeable pages allocated */ + PgHdr1 lru; /* The beginning and end of the LRU list */ +}; + +/* Each page cache is an instance of the following object. Every +** open database file (including each in-memory database and each +** temporary or transient database) has a single page cache which +** is an instance of this object. +** +** Pointers to structures of this type are cast and returned as +** opaque sqlite3_pcache* handles. +*/ +struct PCache1 { + /* Cache configuration parameters. Page size (szPage) and the purgeable + ** flag (bPurgeable) are set when the cache is created. nMax may be + ** modified at any time by a call to the pcache1Cachesize() method. + ** The PGroup mutex must be held when accessing nMax. + */ + PGroup *pGroup; /* PGroup this cache belongs to */ + int szPage; /* Size of database content section */ + int szExtra; /* sizeof(MemPage)+sizeof(PgHdr) */ + int szAlloc; /* Total size of one pcache line */ + int bPurgeable; /* True if cache is purgeable */ + unsigned int nMin; /* Minimum number of pages reserved */ + unsigned int nMax; /* Configured "cache_size" value */ + unsigned int n90pct; /* nMax*9/10 */ + unsigned int iMaxKey; /* Largest key seen since xTruncate() */ + + /* Hash table of all pages. The following variables may only be accessed + ** when the accessor is holding the PGroup mutex. + */ + unsigned int nRecyclable; /* Number of pages in the LRU list */ + unsigned int nPage; /* Total number of pages in apHash */ + unsigned int nHash; /* Number of slots in apHash[] */ + PgHdr1 **apHash; /* Hash table for fast lookup by key */ + PgHdr1 *pFree; /* List of unused pcache-local pages */ + void *pBulk; /* Bulk memory used by pcache-local */ +}; + +/* +** Free slots in the allocator used to divide up the global page cache +** buffer provided using the SQLITE_CONFIG_PAGECACHE mechanism. +*/ +struct PgFreeslot { + PgFreeslot *pNext; /* Next free slot */ +}; + +/* +** Global data used by this cache. +*/ +static SQLITE_WSD struct PCacheGlobal { + PGroup grp; /* The global PGroup for mode (2) */ + + /* Variables related to SQLITE_CONFIG_PAGECACHE settings. The + ** szSlot, nSlot, pStart, pEnd, nReserve, and isInit values are all + ** fixed at sqlite3_initialize() time and do not require mutex protection. + ** The nFreeSlot and pFree values do require mutex protection. + */ + int isInit; /* True if initialized */ + int separateCache; /* Use a new PGroup for each PCache */ + int nInitPage; /* Initial bulk allocation size */ + int szSlot; /* Size of each free slot */ + int nSlot; /* The number of pcache slots */ + int nReserve; /* Try to keep nFreeSlot above this */ + void *pStart, *pEnd; /* Bounds of global page cache memory */ + /* Above requires no mutex. Use mutex below for variable that follow. */ + sqlite3_mutex *mutex; /* Mutex for accessing the following: */ + PgFreeslot *pFree; /* Free page blocks */ + int nFreeSlot; /* Number of unused pcache slots */ + /* The following value requires a mutex to change. We skip the mutex on + ** reading because (1) most platforms read a 32-bit integer atomically and + ** (2) even if an incorrect value is read, no great harm is done since this + ** is really just an optimization. */ + int bUnderPressure; /* True if low on PAGECACHE memory */ +} pcache1_g; + +/* +** All code in this file should access the global structure above via the +** alias "pcache1". This ensures that the WSD emulation is used when +** compiling for systems that do not support real WSD. +*/ +#define pcache1 (GLOBAL(struct PCacheGlobal, pcache1_g)) + +/* +** Macros to enter and leave the PCache LRU mutex. +*/ +#if !defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) || SQLITE_THREADSAFE==0 +# define pcache1EnterMutex(X) assert((X)->mutex==0) +# define pcache1LeaveMutex(X) assert((X)->mutex==0) +# define PCACHE1_MIGHT_USE_GROUP_MUTEX 0 +#else +# define pcache1EnterMutex(X) sqlite3_mutex_enter((X)->mutex) +# define pcache1LeaveMutex(X) sqlite3_mutex_leave((X)->mutex) +# define PCACHE1_MIGHT_USE_GROUP_MUTEX 1 +#endif + +/******************************************************************************/ +/******** Page Allocation/SQLITE_CONFIG_PCACHE Related Functions **************/ + + +/* +** This function is called during initialization if a static buffer is +** supplied to use for the page-cache by passing the SQLITE_CONFIG_PAGECACHE +** verb to sqlite3_config(). Parameter pBuf points to an allocation large +** enough to contain 'n' buffers of 'sz' bytes each. +** +** This routine is called from sqlite3_initialize() and so it is guaranteed +** to be serialized already. There is no need for further mutexing. +*/ +SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){ + if( pcache1.isInit ){ + PgFreeslot *p; + if( pBuf==0 ) sz = n = 0; + sz = ROUNDDOWN8(sz); + pcache1.szSlot = sz; + pcache1.nSlot = pcache1.nFreeSlot = n; + pcache1.nReserve = n>90 ? 10 : (n/10 + 1); + pcache1.pStart = pBuf; + pcache1.pFree = 0; + pcache1.bUnderPressure = 0; + while( n-- ){ + p = (PgFreeslot*)pBuf; + p->pNext = pcache1.pFree; + pcache1.pFree = p; + pBuf = (void*)&((char*)pBuf)[sz]; + } + pcache1.pEnd = pBuf; + } +} + +/* +** Try to initialize the pCache->pFree and pCache->pBulk fields. Return +** true if pCache->pFree ends up containing one or more free pages. +*/ +static int pcache1InitBulk(PCache1 *pCache){ + i64 szBulk; + char *zBulk; + if( pcache1.nInitPage==0 ) return 0; + /* Do not bother with a bulk allocation if the cache size very small */ + if( pCache->nMax<3 ) return 0; + sqlite3BeginBenignMalloc(); + if( pcache1.nInitPage>0 ){ + szBulk = pCache->szAlloc * (i64)pcache1.nInitPage; + }else{ + szBulk = -1024 * (i64)pcache1.nInitPage; + } + if( szBulk > pCache->szAlloc*(i64)pCache->nMax ){ + szBulk = pCache->szAlloc*(i64)pCache->nMax; + } + zBulk = pCache->pBulk = sqlite3Malloc( szBulk ); + sqlite3EndBenignMalloc(); + if( zBulk ){ + int nBulk = sqlite3MallocSize(zBulk)/pCache->szAlloc; + int i; + for(i=0; iszPage]; + pX->page.pBuf = zBulk; + pX->page.pExtra = &pX[1]; + pX->isBulkLocal = 1; + pX->isAnchor = 0; + pX->pNext = pCache->pFree; + pCache->pFree = pX; + zBulk += pCache->szAlloc; + } + } + return pCache->pFree!=0; +} + +/* +** Malloc function used within this file to allocate space from the buffer +** configured using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no +** such buffer exists or there is no space left in it, this function falls +** back to sqlite3Malloc(). +** +** Multiple threads can run this routine at the same time. Global variables +** in pcache1 need to be protected via mutex. +*/ +static void *pcache1Alloc(int nByte){ + void *p = 0; + assert( sqlite3_mutex_notheld(pcache1.grp.mutex) ); + if( nByte<=pcache1.szSlot ){ + sqlite3_mutex_enter(pcache1.mutex); + p = (PgHdr1 *)pcache1.pFree; + if( p ){ + pcache1.pFree = pcache1.pFree->pNext; + pcache1.nFreeSlot--; + pcache1.bUnderPressure = pcache1.nFreeSlot=0 ); + sqlite3StatusHighwater(SQLITE_STATUS_PAGECACHE_SIZE, nByte); + sqlite3StatusUp(SQLITE_STATUS_PAGECACHE_USED, 1); + } + sqlite3_mutex_leave(pcache1.mutex); + } + if( p==0 ){ + /* Memory is not available in the SQLITE_CONFIG_PAGECACHE pool. Get + ** it from sqlite3Malloc instead. + */ + p = sqlite3Malloc(nByte); +#ifndef SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS + if( p ){ + int sz = sqlite3MallocSize(p); + sqlite3_mutex_enter(pcache1.mutex); + sqlite3StatusHighwater(SQLITE_STATUS_PAGECACHE_SIZE, nByte); + sqlite3StatusUp(SQLITE_STATUS_PAGECACHE_OVERFLOW, sz); + sqlite3_mutex_leave(pcache1.mutex); + } +#endif + sqlite3MemdebugSetType(p, MEMTYPE_PCACHE); + } + return p; +} + +/* +** Free an allocated buffer obtained from pcache1Alloc(). +*/ +static void pcache1Free(void *p){ + if( p==0 ) return; + if( SQLITE_WITHIN(p, pcache1.pStart, pcache1.pEnd) ){ + PgFreeslot *pSlot; + sqlite3_mutex_enter(pcache1.mutex); + sqlite3StatusDown(SQLITE_STATUS_PAGECACHE_USED, 1); + pSlot = (PgFreeslot*)p; + pSlot->pNext = pcache1.pFree; + pcache1.pFree = pSlot; + pcache1.nFreeSlot++; + pcache1.bUnderPressure = pcache1.nFreeSlot=pcache1.pStart && ppGroup->mutex) ); + if( pCache->pFree || (pCache->nPage==0 && pcache1InitBulk(pCache)) ){ + p = pCache->pFree; + pCache->pFree = p->pNext; + p->pNext = 0; + }else{ +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + /* The group mutex must be released before pcache1Alloc() is called. This + ** is because it might call sqlite3_release_memory(), which assumes that + ** this mutex is not held. */ + assert( pcache1.separateCache==0 ); + assert( pCache->pGroup==&pcache1.grp ); + pcache1LeaveMutex(pCache->pGroup); +#endif + if( benignMalloc ){ sqlite3BeginBenignMalloc(); } +#ifdef SQLITE_PCACHE_SEPARATE_HEADER + pPg = pcache1Alloc(pCache->szPage); + p = sqlite3Malloc(sizeof(PgHdr1) + pCache->szExtra); + if( !pPg || !p ){ + pcache1Free(pPg); + sqlite3_free(p); + pPg = 0; + } +#else + pPg = pcache1Alloc(pCache->szAlloc); + p = (PgHdr1 *)&((u8 *)pPg)[pCache->szPage]; +#endif + if( benignMalloc ){ sqlite3EndBenignMalloc(); } +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT + pcache1EnterMutex(pCache->pGroup); +#endif + if( pPg==0 ) return 0; + p->page.pBuf = pPg; + p->page.pExtra = &p[1]; + p->isBulkLocal = 0; + p->isAnchor = 0; + } + if( pCache->bPurgeable ){ + pCache->pGroup->nCurrentPage++; + } + return p; +} + +/* +** Free a page object allocated by pcache1AllocPage(). +*/ +static void pcache1FreePage(PgHdr1 *p){ + PCache1 *pCache; + assert( p!=0 ); + pCache = p->pCache; + assert( sqlite3_mutex_held(p->pCache->pGroup->mutex) ); + if( p->isBulkLocal ){ + p->pNext = pCache->pFree; + pCache->pFree = p; + }else{ + pcache1Free(p->page.pBuf); +#ifdef SQLITE_PCACHE_SEPARATE_HEADER + sqlite3_free(p); +#endif + } + if( pCache->bPurgeable ){ + pCache->pGroup->nCurrentPage--; + } +} + +/* +** Malloc function used by SQLite to obtain space from the buffer configured +** using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no such buffer +** exists, this function falls back to sqlite3Malloc(). +*/ +SQLITE_PRIVATE void *sqlite3PageMalloc(int sz){ + return pcache1Alloc(sz); +} + +/* +** Free an allocated buffer obtained from sqlite3PageMalloc(). +*/ +SQLITE_PRIVATE void sqlite3PageFree(void *p){ + pcache1Free(p); +} + + +/* +** Return true if it desirable to avoid allocating a new page cache +** entry. +** +** If memory was allocated specifically to the page cache using +** SQLITE_CONFIG_PAGECACHE but that memory has all been used, then +** it is desirable to avoid allocating a new page cache entry because +** presumably SQLITE_CONFIG_PAGECACHE was suppose to be sufficient +** for all page cache needs and we should not need to spill the +** allocation onto the heap. +** +** Or, the heap is used for all page cache memory but the heap is +** under memory pressure, then again it is desirable to avoid +** allocating a new page cache entry in order to avoid stressing +** the heap even further. +*/ +static int pcache1UnderMemoryPressure(PCache1 *pCache){ + if( pcache1.nSlot && (pCache->szPage+pCache->szExtra)<=pcache1.szSlot ){ + return pcache1.bUnderPressure; + }else{ + return sqlite3HeapNearlyFull(); + } +} + +/******************************************************************************/ +/******** General Implementation Functions ************************************/ + +/* +** This function is used to resize the hash table used by the cache passed +** as the first argument. +** +** The PCache mutex must be held when this function is called. +*/ +static void pcache1ResizeHash(PCache1 *p){ + PgHdr1 **apNew; + unsigned int nNew; + unsigned int i; + + assert( sqlite3_mutex_held(p->pGroup->mutex) ); + + nNew = p->nHash*2; + if( nNew<256 ){ + nNew = 256; + } + + pcache1LeaveMutex(p->pGroup); + if( p->nHash ){ sqlite3BeginBenignMalloc(); } + apNew = (PgHdr1 **)sqlite3MallocZero(sizeof(PgHdr1 *)*nNew); + if( p->nHash ){ sqlite3EndBenignMalloc(); } + pcache1EnterMutex(p->pGroup); + if( apNew ){ + for(i=0; inHash; i++){ + PgHdr1 *pPage; + PgHdr1 *pNext = p->apHash[i]; + while( (pPage = pNext)!=0 ){ + unsigned int h = pPage->iKey % nNew; + pNext = pPage->pNext; + pPage->pNext = apNew[h]; + apNew[h] = pPage; + } + } + sqlite3_free(p->apHash); + p->apHash = apNew; + p->nHash = nNew; + } +} + +/* +** This function is used internally to remove the page pPage from the +** PGroup LRU list, if is part of it. If pPage is not part of the PGroup +** LRU list, then this function is a no-op. +** +** The PGroup mutex must be held when this function is called. +*/ +static PgHdr1 *pcache1PinPage(PgHdr1 *pPage){ + PCache1 *pCache; + + assert( pPage!=0 ); + assert( pPage->isPinned==0 ); + pCache = pPage->pCache; + assert( pPage->pLruNext ); + assert( pPage->pLruPrev ); + assert( sqlite3_mutex_held(pCache->pGroup->mutex) ); + pPage->pLruPrev->pLruNext = pPage->pLruNext; + pPage->pLruNext->pLruPrev = pPage->pLruPrev; + pPage->pLruNext = 0; + pPage->pLruPrev = 0; + pPage->isPinned = 1; + assert( pPage->isAnchor==0 ); + assert( pCache->pGroup->lru.isAnchor==1 ); + pCache->nRecyclable--; + return pPage; +} + + +/* +** Remove the page supplied as an argument from the hash table +** (PCache1.apHash structure) that it is currently stored in. +** Also free the page if freePage is true. +** +** The PGroup mutex must be held when this function is called. +*/ +static void pcache1RemoveFromHash(PgHdr1 *pPage, int freeFlag){ + unsigned int h; + PCache1 *pCache = pPage->pCache; + PgHdr1 **pp; + + assert( sqlite3_mutex_held(pCache->pGroup->mutex) ); + h = pPage->iKey % pCache->nHash; + for(pp=&pCache->apHash[h]; (*pp)!=pPage; pp=&(*pp)->pNext); + *pp = (*pp)->pNext; + + pCache->nPage--; + if( freeFlag ) pcache1FreePage(pPage); +} + +/* +** If there are currently more than nMaxPage pages allocated, try +** to recycle pages to reduce the number allocated to nMaxPage. +*/ +static void pcache1EnforceMaxPage(PCache1 *pCache){ + PGroup *pGroup = pCache->pGroup; + PgHdr1 *p; + assert( sqlite3_mutex_held(pGroup->mutex) ); + while( pGroup->nCurrentPage>pGroup->nMaxPage + && (p=pGroup->lru.pLruPrev)->isAnchor==0 + ){ + assert( p->pCache->pGroup==pGroup ); + assert( p->isPinned==0 ); + pcache1PinPage(p); + pcache1RemoveFromHash(p, 1); + } + if( pCache->nPage==0 && pCache->pBulk ){ + sqlite3_free(pCache->pBulk); + pCache->pBulk = pCache->pFree = 0; + } +} + +/* +** Discard all pages from cache pCache with a page number (key value) +** greater than or equal to iLimit. Any pinned pages that meet this +** criteria are unpinned before they are discarded. +** +** The PCache mutex must be held when this function is called. +*/ +static void pcache1TruncateUnsafe( + PCache1 *pCache, /* The cache to truncate */ + unsigned int iLimit /* Drop pages with this pgno or larger */ +){ + TESTONLY( int nPage = 0; ) /* To assert pCache->nPage is correct */ + unsigned int h, iStop; + assert( sqlite3_mutex_held(pCache->pGroup->mutex) ); + assert( pCache->iMaxKey >= iLimit ); + assert( pCache->nHash > 0 ); + if( pCache->iMaxKey - iLimit < pCache->nHash ){ + /* If we are just shaving the last few pages off the end of the + ** cache, then there is no point in scanning the entire hash table. + ** Only scan those hash slots that might contain pages that need to + ** be removed. */ + h = iLimit % pCache->nHash; + iStop = pCache->iMaxKey % pCache->nHash; + TESTONLY( nPage = -10; ) /* Disable the pCache->nPage validity check */ + }else{ + /* This is the general case where many pages are being removed. + ** It is necessary to scan the entire hash table */ + h = pCache->nHash/2; + iStop = h - 1; + } + for(;;){ + PgHdr1 **pp; + PgHdr1 *pPage; + assert( hnHash ); + pp = &pCache->apHash[h]; + while( (pPage = *pp)!=0 ){ + if( pPage->iKey>=iLimit ){ + pCache->nPage--; + *pp = pPage->pNext; + if( !pPage->isPinned ) pcache1PinPage(pPage); + pcache1FreePage(pPage); + }else{ + pp = &pPage->pNext; + TESTONLY( if( nPage>=0 ) nPage++; ) + } + } + if( h==iStop ) break; + h = (h+1) % pCache->nHash; + } + assert( nPage<0 || pCache->nPage==(unsigned)nPage ); +} + +/******************************************************************************/ +/******** sqlite3_pcache Methods **********************************************/ + +/* +** Implementation of the sqlite3_pcache.xInit method. +*/ +static int pcache1Init(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + assert( pcache1.isInit==0 ); + memset(&pcache1, 0, sizeof(pcache1)); + + + /* + ** The pcache1.separateCache variable is true if each PCache has its own + ** private PGroup (mode-1). pcache1.separateCache is false if the single + ** PGroup in pcache1.grp is used for all page caches (mode-2). + ** + ** * Always use a unified cache (mode-2) if ENABLE_MEMORY_MANAGEMENT + ** + ** * Use a unified cache in single-threaded applications that have + ** configured a start-time buffer for use as page-cache memory using + ** sqlite3_config(SQLITE_CONFIG_PAGECACHE, pBuf, sz, N) with non-NULL + ** pBuf argument. + ** + ** * Otherwise use separate caches (mode-1) + */ +#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) + pcache1.separateCache = 0; +#elif SQLITE_THREADSAFE + pcache1.separateCache = sqlite3GlobalConfig.pPage==0 + || sqlite3GlobalConfig.bCoreMutex>0; +#else + pcache1.separateCache = sqlite3GlobalConfig.pPage==0; +#endif + +#if SQLITE_THREADSAFE + if( sqlite3GlobalConfig.bCoreMutex ){ + pcache1.grp.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_LRU); + pcache1.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_PMEM); + } +#endif + if( pcache1.separateCache + && sqlite3GlobalConfig.nPage!=0 + && sqlite3GlobalConfig.pPage==0 + ){ + pcache1.nInitPage = sqlite3GlobalConfig.nPage; + }else{ + pcache1.nInitPage = 0; + } + pcache1.grp.mxPinned = 10; + pcache1.isInit = 1; + return SQLITE_OK; +} + +/* +** Implementation of the sqlite3_pcache.xShutdown method. +** Note that the static mutex allocated in xInit does +** not need to be freed. +*/ +static void pcache1Shutdown(void *NotUsed){ + UNUSED_PARAMETER(NotUsed); + assert( pcache1.isInit!=0 ); + memset(&pcache1, 0, sizeof(pcache1)); +} + +/* forward declaration */ +static void pcache1Destroy(sqlite3_pcache *p); + +/* +** Implementation of the sqlite3_pcache.xCreate method. +** +** Allocate a new cache. +*/ +static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){ + PCache1 *pCache; /* The newly created page cache */ + PGroup *pGroup; /* The group the new page cache will belong to */ + int sz; /* Bytes of memory required to allocate the new cache */ + + assert( (szPage & (szPage-1))==0 && szPage>=512 && szPage<=65536 ); + assert( szExtra < 300 ); + + sz = sizeof(PCache1) + sizeof(PGroup)*pcache1.separateCache; + pCache = (PCache1 *)sqlite3MallocZero(sz); + if( pCache ){ + if( pcache1.separateCache ){ + pGroup = (PGroup*)&pCache[1]; + pGroup->mxPinned = 10; + }else{ + pGroup = &pcache1.grp; + } + if( pGroup->lru.isAnchor==0 ){ + pGroup->lru.isAnchor = 1; + pGroup->lru.pLruPrev = pGroup->lru.pLruNext = &pGroup->lru; + } + pCache->pGroup = pGroup; + pCache->szPage = szPage; + pCache->szExtra = szExtra; + pCache->szAlloc = szPage + szExtra + ROUND8(sizeof(PgHdr1)); + pCache->bPurgeable = (bPurgeable ? 1 : 0); + pcache1EnterMutex(pGroup); + pcache1ResizeHash(pCache); + if( bPurgeable ){ + pCache->nMin = 10; + pGroup->nMinPage += pCache->nMin; + pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; + } + pcache1LeaveMutex(pGroup); + if( pCache->nHash==0 ){ + pcache1Destroy((sqlite3_pcache*)pCache); + pCache = 0; + } + } + return (sqlite3_pcache *)pCache; +} + +/* +** Implementation of the sqlite3_pcache.xCachesize method. +** +** Configure the cache_size limit for a cache. +*/ +static void pcache1Cachesize(sqlite3_pcache *p, int nMax){ + PCache1 *pCache = (PCache1 *)p; + if( pCache->bPurgeable ){ + PGroup *pGroup = pCache->pGroup; + pcache1EnterMutex(pGroup); + pGroup->nMaxPage += (nMax - pCache->nMax); + pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; + pCache->nMax = nMax; + pCache->n90pct = pCache->nMax*9/10; + pcache1EnforceMaxPage(pCache); + pcache1LeaveMutex(pGroup); + } +} + +/* +** Implementation of the sqlite3_pcache.xShrink method. +** +** Free up as much memory as possible. +*/ +static void pcache1Shrink(sqlite3_pcache *p){ + PCache1 *pCache = (PCache1*)p; + if( pCache->bPurgeable ){ + PGroup *pGroup = pCache->pGroup; + int savedMaxPage; + pcache1EnterMutex(pGroup); + savedMaxPage = pGroup->nMaxPage; + pGroup->nMaxPage = 0; + pcache1EnforceMaxPage(pCache); + pGroup->nMaxPage = savedMaxPage; + pcache1LeaveMutex(pGroup); + } +} + +/* +** Implementation of the sqlite3_pcache.xPagecount method. +*/ +static int pcache1Pagecount(sqlite3_pcache *p){ + int n; + PCache1 *pCache = (PCache1*)p; + pcache1EnterMutex(pCache->pGroup); + n = pCache->nPage; + pcache1LeaveMutex(pCache->pGroup); + return n; +} + + +/* +** Implement steps 3, 4, and 5 of the pcache1Fetch() algorithm described +** in the header of the pcache1Fetch() procedure. +** +** This steps are broken out into a separate procedure because they are +** usually not needed, and by avoiding the stack initialization required +** for these steps, the main pcache1Fetch() procedure can run faster. +*/ +static SQLITE_NOINLINE PgHdr1 *pcache1FetchStage2( + PCache1 *pCache, + unsigned int iKey, + int createFlag +){ + unsigned int nPinned; + PGroup *pGroup = pCache->pGroup; + PgHdr1 *pPage = 0; + + /* Step 3: Abort if createFlag is 1 but the cache is nearly full */ + assert( pCache->nPage >= pCache->nRecyclable ); + nPinned = pCache->nPage - pCache->nRecyclable; + assert( pGroup->mxPinned == pGroup->nMaxPage + 10 - pGroup->nMinPage ); + assert( pCache->n90pct == pCache->nMax*9/10 ); + if( createFlag==1 && ( + nPinned>=pGroup->mxPinned + || nPinned>=pCache->n90pct + || (pcache1UnderMemoryPressure(pCache) && pCache->nRecyclablenPage>=pCache->nHash ) pcache1ResizeHash(pCache); + assert( pCache->nHash>0 && pCache->apHash ); + + /* Step 4. Try to recycle a page. */ + if( pCache->bPurgeable + && !pGroup->lru.pLruPrev->isAnchor + && ((pCache->nPage+1>=pCache->nMax) || pcache1UnderMemoryPressure(pCache)) + ){ + PCache1 *pOther; + pPage = pGroup->lru.pLruPrev; + assert( pPage->isPinned==0 ); + pcache1RemoveFromHash(pPage, 0); + pcache1PinPage(pPage); + pOther = pPage->pCache; + if( pOther->szAlloc != pCache->szAlloc ){ + pcache1FreePage(pPage); + pPage = 0; + }else{ + pGroup->nCurrentPage -= (pOther->bPurgeable - pCache->bPurgeable); + } + } + + /* Step 5. If a usable page buffer has still not been found, + ** attempt to allocate a new one. + */ + if( !pPage ){ + pPage = pcache1AllocPage(pCache, createFlag==1); + } + + if( pPage ){ + unsigned int h = iKey % pCache->nHash; + pCache->nPage++; + pPage->iKey = iKey; + pPage->pNext = pCache->apHash[h]; + pPage->pCache = pCache; + pPage->pLruPrev = 0; + pPage->pLruNext = 0; + pPage->isPinned = 1; + *(void **)pPage->page.pExtra = 0; + pCache->apHash[h] = pPage; + if( iKey>pCache->iMaxKey ){ + pCache->iMaxKey = iKey; + } + } + return pPage; +} + +/* +** Implementation of the sqlite3_pcache.xFetch method. +** +** Fetch a page by key value. +** +** Whether or not a new page may be allocated by this function depends on +** the value of the createFlag argument. 0 means do not allocate a new +** page. 1 means allocate a new page if space is easily available. 2 +** means to try really hard to allocate a new page. +** +** For a non-purgeable cache (a cache used as the storage for an in-memory +** database) there is really no difference between createFlag 1 and 2. So +** the calling function (pcache.c) will never have a createFlag of 1 on +** a non-purgeable cache. +** +** There are three different approaches to obtaining space for a page, +** depending on the value of parameter createFlag (which may be 0, 1 or 2). +** +** 1. Regardless of the value of createFlag, the cache is searched for a +** copy of the requested page. If one is found, it is returned. +** +** 2. If createFlag==0 and the page is not already in the cache, NULL is +** returned. +** +** 3. If createFlag is 1, and the page is not already in the cache, then +** return NULL (do not allocate a new page) if any of the following +** conditions are true: +** +** (a) the number of pages pinned by the cache is greater than +** PCache1.nMax, or +** +** (b) the number of pages pinned by the cache is greater than +** the sum of nMax for all purgeable caches, less the sum of +** nMin for all other purgeable caches, or +** +** 4. If none of the first three conditions apply and the cache is marked +** as purgeable, and if one of the following is true: +** +** (a) The number of pages allocated for the cache is already +** PCache1.nMax, or +** +** (b) The number of pages allocated for all purgeable caches is +** already equal to or greater than the sum of nMax for all +** purgeable caches, +** +** (c) The system is under memory pressure and wants to avoid +** unnecessary pages cache entry allocations +** +** then attempt to recycle a page from the LRU list. If it is the right +** size, return the recycled buffer. Otherwise, free the buffer and +** proceed to step 5. +** +** 5. Otherwise, allocate and return a new page buffer. +** +** There are two versions of this routine. pcache1FetchWithMutex() is +** the general case. pcache1FetchNoMutex() is a faster implementation for +** the common case where pGroup->mutex is NULL. The pcache1Fetch() wrapper +** invokes the appropriate routine. +*/ +static PgHdr1 *pcache1FetchNoMutex( + sqlite3_pcache *p, + unsigned int iKey, + int createFlag +){ + PCache1 *pCache = (PCache1 *)p; + PgHdr1 *pPage = 0; + + /* Step 1: Search the hash table for an existing entry. */ + pPage = pCache->apHash[iKey % pCache->nHash]; + while( pPage && pPage->iKey!=iKey ){ pPage = pPage->pNext; } + + /* Step 2: If the page was found in the hash table, then return it. + ** If the page was not in the hash table and createFlag is 0, abort. + ** Otherwise (page not in hash and createFlag!=0) continue with + ** subsequent steps to try to create the page. */ + if( pPage ){ + if( !pPage->isPinned ){ + return pcache1PinPage(pPage); + }else{ + return pPage; + } + }else if( createFlag ){ + /* Steps 3, 4, and 5 implemented by this subroutine */ + return pcache1FetchStage2(pCache, iKey, createFlag); + }else{ + return 0; + } +} +#if PCACHE1_MIGHT_USE_GROUP_MUTEX +static PgHdr1 *pcache1FetchWithMutex( + sqlite3_pcache *p, + unsigned int iKey, + int createFlag +){ + PCache1 *pCache = (PCache1 *)p; + PgHdr1 *pPage; + + pcache1EnterMutex(pCache->pGroup); + pPage = pcache1FetchNoMutex(p, iKey, createFlag); + assert( pPage==0 || pCache->iMaxKey>=iKey ); + pcache1LeaveMutex(pCache->pGroup); + return pPage; +} +#endif +static sqlite3_pcache_page *pcache1Fetch( + sqlite3_pcache *p, + unsigned int iKey, + int createFlag +){ +#if PCACHE1_MIGHT_USE_GROUP_MUTEX || defined(SQLITE_DEBUG) + PCache1 *pCache = (PCache1 *)p; +#endif + + assert( offsetof(PgHdr1,page)==0 ); + assert( pCache->bPurgeable || createFlag!=1 ); + assert( pCache->bPurgeable || pCache->nMin==0 ); + assert( pCache->bPurgeable==0 || pCache->nMin==10 ); + assert( pCache->nMin==0 || pCache->bPurgeable ); + assert( pCache->nHash>0 ); +#if PCACHE1_MIGHT_USE_GROUP_MUTEX + if( pCache->pGroup->mutex ){ + return (sqlite3_pcache_page*)pcache1FetchWithMutex(p, iKey, createFlag); + }else +#endif + { + return (sqlite3_pcache_page*)pcache1FetchNoMutex(p, iKey, createFlag); + } +} + + +/* +** Implementation of the sqlite3_pcache.xUnpin method. +** +** Mark a page as unpinned (eligible for asynchronous recycling). +*/ +static void pcache1Unpin( + sqlite3_pcache *p, + sqlite3_pcache_page *pPg, + int reuseUnlikely +){ + PCache1 *pCache = (PCache1 *)p; + PgHdr1 *pPage = (PgHdr1 *)pPg; + PGroup *pGroup = pCache->pGroup; + + assert( pPage->pCache==pCache ); + pcache1EnterMutex(pGroup); + + /* It is an error to call this function if the page is already + ** part of the PGroup LRU list. + */ + assert( pPage->pLruPrev==0 && pPage->pLruNext==0 ); + assert( pPage->isPinned==1 ); + + if( reuseUnlikely || pGroup->nCurrentPage>pGroup->nMaxPage ){ + pcache1RemoveFromHash(pPage, 1); + }else{ + /* Add the page to the PGroup LRU list. */ + PgHdr1 **ppFirst = &pGroup->lru.pLruNext; + pPage->pLruPrev = &pGroup->lru; + (pPage->pLruNext = *ppFirst)->pLruPrev = pPage; + *ppFirst = pPage; + pCache->nRecyclable++; + pPage->isPinned = 0; + } + + pcache1LeaveMutex(pCache->pGroup); +} + +/* +** Implementation of the sqlite3_pcache.xRekey method. +*/ +static void pcache1Rekey( + sqlite3_pcache *p, + sqlite3_pcache_page *pPg, + unsigned int iOld, + unsigned int iNew +){ + PCache1 *pCache = (PCache1 *)p; + PgHdr1 *pPage = (PgHdr1 *)pPg; + PgHdr1 **pp; + unsigned int h; + assert( pPage->iKey==iOld ); + assert( pPage->pCache==pCache ); + + pcache1EnterMutex(pCache->pGroup); + + h = iOld%pCache->nHash; + pp = &pCache->apHash[h]; + while( (*pp)!=pPage ){ + pp = &(*pp)->pNext; + } + *pp = pPage->pNext; + + h = iNew%pCache->nHash; + pPage->iKey = iNew; + pPage->pNext = pCache->apHash[h]; + pCache->apHash[h] = pPage; + if( iNew>pCache->iMaxKey ){ + pCache->iMaxKey = iNew; + } + + pcache1LeaveMutex(pCache->pGroup); +} + +/* +** Implementation of the sqlite3_pcache.xTruncate method. +** +** Discard all unpinned pages in the cache with a page number equal to +** or greater than parameter iLimit. Any pinned pages with a page number +** equal to or greater than iLimit are implicitly unpinned. +*/ +static void pcache1Truncate(sqlite3_pcache *p, unsigned int iLimit){ + PCache1 *pCache = (PCache1 *)p; + pcache1EnterMutex(pCache->pGroup); + if( iLimit<=pCache->iMaxKey ){ + pcache1TruncateUnsafe(pCache, iLimit); + pCache->iMaxKey = iLimit-1; + } + pcache1LeaveMutex(pCache->pGroup); +} + +/* +** Implementation of the sqlite3_pcache.xDestroy method. +** +** Destroy a cache allocated using pcache1Create(). +*/ +static void pcache1Destroy(sqlite3_pcache *p){ + PCache1 *pCache = (PCache1 *)p; + PGroup *pGroup = pCache->pGroup; + assert( pCache->bPurgeable || (pCache->nMax==0 && pCache->nMin==0) ); + pcache1EnterMutex(pGroup); + if( pCache->nPage ) pcache1TruncateUnsafe(pCache, 0); + assert( pGroup->nMaxPage >= pCache->nMax ); + pGroup->nMaxPage -= pCache->nMax; + assert( pGroup->nMinPage >= pCache->nMin ); + pGroup->nMinPage -= pCache->nMin; + pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; + pcache1EnforceMaxPage(pCache); + pcache1LeaveMutex(pGroup); + sqlite3_free(pCache->pBulk); + sqlite3_free(pCache->apHash); + sqlite3_free(pCache); +} + +/* +** This function is called during initialization (sqlite3_initialize()) to +** install the default pluggable cache module, assuming the user has not +** already provided an alternative. +*/ +SQLITE_PRIVATE void sqlite3PCacheSetDefault(void){ + static const sqlite3_pcache_methods2 defaultMethods = { + 1, /* iVersion */ + 0, /* pArg */ + pcache1Init, /* xInit */ + pcache1Shutdown, /* xShutdown */ + pcache1Create, /* xCreate */ + pcache1Cachesize, /* xCachesize */ + pcache1Pagecount, /* xPagecount */ + pcache1Fetch, /* xFetch */ + pcache1Unpin, /* xUnpin */ + pcache1Rekey, /* xRekey */ + pcache1Truncate, /* xTruncate */ + pcache1Destroy, /* xDestroy */ + pcache1Shrink /* xShrink */ + }; + sqlite3_config(SQLITE_CONFIG_PCACHE2, &defaultMethods); +} + +/* +** Return the size of the header on each page of this PCACHE implementation. +*/ +SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void){ return ROUND8(sizeof(PgHdr1)); } + +/* +** Return the global mutex used by this PCACHE implementation. The +** sqlite3_status() routine needs access to this mutex. +*/ +SQLITE_PRIVATE sqlite3_mutex *sqlite3Pcache1Mutex(void){ + return pcache1.mutex; +} + +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT +/* +** This function is called to free superfluous dynamically allocated memory +** held by the pager system. Memory in use by any SQLite pager allocated +** by the current thread may be sqlite3_free()ed. +** +** nReq is the number of bytes of memory required. Once this much has +** been released, the function returns. The return value is the total number +** of bytes of memory released. +*/ +SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int nReq){ + int nFree = 0; + assert( sqlite3_mutex_notheld(pcache1.grp.mutex) ); + assert( sqlite3_mutex_notheld(pcache1.mutex) ); + if( sqlite3GlobalConfig.nPage==0 ){ + PgHdr1 *p; + pcache1EnterMutex(&pcache1.grp); + while( (nReq<0 || nFreeisAnchor==0 + ){ + nFree += pcache1MemSize(p->page.pBuf); +#ifdef SQLITE_PCACHE_SEPARATE_HEADER + nFree += sqlite3MemSize(p); +#endif + assert( p->isPinned==0 ); + pcache1PinPage(p); + pcache1RemoveFromHash(p, 1); + } + pcache1LeaveMutex(&pcache1.grp); + } + return nFree; +} +#endif /* SQLITE_ENABLE_MEMORY_MANAGEMENT */ + +#ifdef SQLITE_TEST +/* +** This function is used by test procedures to inspect the internal state +** of the global cache. +*/ +SQLITE_PRIVATE void sqlite3PcacheStats( + int *pnCurrent, /* OUT: Total number of pages cached */ + int *pnMax, /* OUT: Global maximum cache size */ + int *pnMin, /* OUT: Sum of PCache1.nMin for purgeable caches */ + int *pnRecyclable /* OUT: Total number of pages available for recycling */ +){ + PgHdr1 *p; + int nRecyclable = 0; + for(p=pcache1.grp.lru.pLruNext; p && !p->isAnchor; p=p->pLruNext){ + assert( p->isPinned==0 ); + nRecyclable++; + } + *pnCurrent = pcache1.grp.nCurrentPage; + *pnMax = (int)pcache1.grp.nMaxPage; + *pnMin = (int)pcache1.grp.nMinPage; + *pnRecyclable = nRecyclable; +} +#endif + +/************** End of pcache1.c *********************************************/ +/************** Begin file rowset.c ******************************************/ +/* +** 2008 December 3 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This module implements an object we call a "RowSet". +** +** The RowSet object is a collection of rowids. Rowids +** are inserted into the RowSet in an arbitrary order. Inserts +** can be intermixed with tests to see if a given rowid has been +** previously inserted into the RowSet. +** +** After all inserts are finished, it is possible to extract the +** elements of the RowSet in sorted order. Once this extraction +** process has started, no new elements may be inserted. +** +** Hence, the primitive operations for a RowSet are: +** +** CREATE +** INSERT +** TEST +** SMALLEST +** DESTROY +** +** The CREATE and DESTROY primitives are the constructor and destructor, +** obviously. The INSERT primitive adds a new element to the RowSet. +** TEST checks to see if an element is already in the RowSet. SMALLEST +** extracts the least value from the RowSet. +** +** The INSERT primitive might allocate additional memory. Memory is +** allocated in chunks so most INSERTs do no allocation. There is an +** upper bound on the size of allocated memory. No memory is freed +** until DESTROY. +** +** The TEST primitive includes a "batch" number. The TEST primitive +** will only see elements that were inserted before the last change +** in the batch number. In other words, if an INSERT occurs between +** two TESTs where the TESTs have the same batch nubmer, then the +** value added by the INSERT will not be visible to the second TEST. +** The initial batch number is zero, so if the very first TEST contains +** a non-zero batch number, it will see all prior INSERTs. +** +** No INSERTs may occurs after a SMALLEST. An assertion will fail if +** that is attempted. +** +** The cost of an INSERT is roughly constant. (Sometimes new memory +** has to be allocated on an INSERT.) The cost of a TEST with a new +** batch number is O(NlogN) where N is the number of elements in the RowSet. +** The cost of a TEST using the same batch number is O(logN). The cost +** of the first SMALLEST is O(NlogN). Second and subsequent SMALLEST +** primitives are constant time. The cost of DESTROY is O(N). +** +** TEST and SMALLEST may not be used by the same RowSet. This used to +** be possible, but the feature was not used, so it was removed in order +** to simplify the code. +*/ +/* #include "sqliteInt.h" */ + + +/* +** Target size for allocation chunks. +*/ +#define ROWSET_ALLOCATION_SIZE 1024 + +/* +** The number of rowset entries per allocation chunk. +*/ +#define ROWSET_ENTRY_PER_CHUNK \ + ((ROWSET_ALLOCATION_SIZE-8)/sizeof(struct RowSetEntry)) + +/* +** Each entry in a RowSet is an instance of the following object. +** +** This same object is reused to store a linked list of trees of RowSetEntry +** objects. In that alternative use, pRight points to the next entry +** in the list, pLeft points to the tree, and v is unused. The +** RowSet.pForest value points to the head of this forest list. +*/ +struct RowSetEntry { + i64 v; /* ROWID value for this entry */ + struct RowSetEntry *pRight; /* Right subtree (larger entries) or list */ + struct RowSetEntry *pLeft; /* Left subtree (smaller entries) */ +}; + +/* +** RowSetEntry objects are allocated in large chunks (instances of the +** following structure) to reduce memory allocation overhead. The +** chunks are kept on a linked list so that they can be deallocated +** when the RowSet is destroyed. +*/ +struct RowSetChunk { + struct RowSetChunk *pNextChunk; /* Next chunk on list of them all */ + struct RowSetEntry aEntry[ROWSET_ENTRY_PER_CHUNK]; /* Allocated entries */ +}; + +/* +** A RowSet in an instance of the following structure. +** +** A typedef of this structure if found in sqliteInt.h. +*/ +struct RowSet { + struct RowSetChunk *pChunk; /* List of all chunk allocations */ + sqlite3 *db; /* The database connection */ + struct RowSetEntry *pEntry; /* List of entries using pRight */ + struct RowSetEntry *pLast; /* Last entry on the pEntry list */ + struct RowSetEntry *pFresh; /* Source of new entry objects */ + struct RowSetEntry *pForest; /* List of binary trees of entries */ + u16 nFresh; /* Number of objects on pFresh */ + u16 rsFlags; /* Various flags */ + int iBatch; /* Current insert batch */ +}; + +/* +** Allowed values for RowSet.rsFlags +*/ +#define ROWSET_SORTED 0x01 /* True if RowSet.pEntry is sorted */ +#define ROWSET_NEXT 0x02 /* True if sqlite3RowSetNext() has been called */ + +/* +** Turn bulk memory into a RowSet object. N bytes of memory +** are available at pSpace. The db pointer is used as a memory context +** for any subsequent allocations that need to occur. +** Return a pointer to the new RowSet object. +** +** It must be the case that N is sufficient to make a Rowset. If not +** an assertion fault occurs. +** +** If N is larger than the minimum, use the surplus as an initial +** allocation of entries available to be filled. +*/ +SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3 *db, void *pSpace, unsigned int N){ + RowSet *p; + assert( N >= ROUND8(sizeof(*p)) ); + p = pSpace; + p->pChunk = 0; + p->db = db; + p->pEntry = 0; + p->pLast = 0; + p->pForest = 0; + p->pFresh = (struct RowSetEntry*)(ROUND8(sizeof(*p)) + (char*)p); + p->nFresh = (u16)((N - ROUND8(sizeof(*p)))/sizeof(struct RowSetEntry)); + p->rsFlags = ROWSET_SORTED; + p->iBatch = 0; + return p; +} + +/* +** Deallocate all chunks from a RowSet. This frees all memory that +** the RowSet has allocated over its lifetime. This routine is +** the destructor for the RowSet. +*/ +SQLITE_PRIVATE void sqlite3RowSetClear(RowSet *p){ + struct RowSetChunk *pChunk, *pNextChunk; + for(pChunk=p->pChunk; pChunk; pChunk = pNextChunk){ + pNextChunk = pChunk->pNextChunk; + sqlite3DbFree(p->db, pChunk); + } + p->pChunk = 0; + p->nFresh = 0; + p->pEntry = 0; + p->pLast = 0; + p->pForest = 0; + p->rsFlags = ROWSET_SORTED; +} + +/* +** Allocate a new RowSetEntry object that is associated with the +** given RowSet. Return a pointer to the new and completely uninitialized +** objected. +** +** In an OOM situation, the RowSet.db->mallocFailed flag is set and this +** routine returns NULL. +*/ +static struct RowSetEntry *rowSetEntryAlloc(RowSet *p){ + assert( p!=0 ); + if( p->nFresh==0 ){ /*OPTIMIZATION-IF-FALSE*/ + /* We could allocate a fresh RowSetEntry each time one is needed, but it + ** is more efficient to pull a preallocated entry from the pool */ + struct RowSetChunk *pNew; + pNew = sqlite3DbMallocRawNN(p->db, sizeof(*pNew)); + if( pNew==0 ){ + return 0; + } + pNew->pNextChunk = p->pChunk; + p->pChunk = pNew; + p->pFresh = pNew->aEntry; + p->nFresh = ROWSET_ENTRY_PER_CHUNK; + } + p->nFresh--; + return p->pFresh++; +} + +/* +** Insert a new value into a RowSet. +** +** The mallocFailed flag of the database connection is set if a +** memory allocation fails. +*/ +SQLITE_PRIVATE void sqlite3RowSetInsert(RowSet *p, i64 rowid){ + struct RowSetEntry *pEntry; /* The new entry */ + struct RowSetEntry *pLast; /* The last prior entry */ + + /* This routine is never called after sqlite3RowSetNext() */ + assert( p!=0 && (p->rsFlags & ROWSET_NEXT)==0 ); + + pEntry = rowSetEntryAlloc(p); + if( pEntry==0 ) return; + pEntry->v = rowid; + pEntry->pRight = 0; + pLast = p->pLast; + if( pLast ){ + if( rowid<=pLast->v ){ /*OPTIMIZATION-IF-FALSE*/ + /* Avoid unnecessary sorts by preserving the ROWSET_SORTED flags + ** where possible */ + p->rsFlags &= ~ROWSET_SORTED; + } + pLast->pRight = pEntry; + }else{ + p->pEntry = pEntry; + } + p->pLast = pEntry; +} + +/* +** Merge two lists of RowSetEntry objects. Remove duplicates. +** +** The input lists are connected via pRight pointers and are +** assumed to each already be in sorted order. +*/ +static struct RowSetEntry *rowSetEntryMerge( + struct RowSetEntry *pA, /* First sorted list to be merged */ + struct RowSetEntry *pB /* Second sorted list to be merged */ +){ + struct RowSetEntry head; + struct RowSetEntry *pTail; + + pTail = &head; + assert( pA!=0 && pB!=0 ); + for(;;){ + assert( pA->pRight==0 || pA->v<=pA->pRight->v ); + assert( pB->pRight==0 || pB->v<=pB->pRight->v ); + if( pA->v<=pB->v ){ + if( pA->vv ) pTail = pTail->pRight = pA; + pA = pA->pRight; + if( pA==0 ){ + pTail->pRight = pB; + break; + } + }else{ + pTail = pTail->pRight = pB; + pB = pB->pRight; + if( pB==0 ){ + pTail->pRight = pA; + break; + } + } + } + return head.pRight; +} + +/* +** Sort all elements on the list of RowSetEntry objects into order of +** increasing v. +*/ +static struct RowSetEntry *rowSetEntrySort(struct RowSetEntry *pIn){ + unsigned int i; + struct RowSetEntry *pNext, *aBucket[40]; + + memset(aBucket, 0, sizeof(aBucket)); + while( pIn ){ + pNext = pIn->pRight; + pIn->pRight = 0; + for(i=0; aBucket[i]; i++){ + pIn = rowSetEntryMerge(aBucket[i], pIn); + aBucket[i] = 0; + } + aBucket[i] = pIn; + pIn = pNext; + } + pIn = aBucket[0]; + for(i=1; ipLeft ){ + struct RowSetEntry *p; + rowSetTreeToList(pIn->pLeft, ppFirst, &p); + p->pRight = pIn; + }else{ + *ppFirst = pIn; + } + if( pIn->pRight ){ + rowSetTreeToList(pIn->pRight, &pIn->pRight, ppLast); + }else{ + *ppLast = pIn; + } + assert( (*ppLast)->pRight==0 ); +} + + +/* +** Convert a sorted list of elements (connected by pRight) into a binary +** tree with depth of iDepth. A depth of 1 means the tree contains a single +** node taken from the head of *ppList. A depth of 2 means a tree with +** three nodes. And so forth. +** +** Use as many entries from the input list as required and update the +** *ppList to point to the unused elements of the list. If the input +** list contains too few elements, then construct an incomplete tree +** and leave *ppList set to NULL. +** +** Return a pointer to the root of the constructed binary tree. +*/ +static struct RowSetEntry *rowSetNDeepTree( + struct RowSetEntry **ppList, + int iDepth +){ + struct RowSetEntry *p; /* Root of the new tree */ + struct RowSetEntry *pLeft; /* Left subtree */ + if( *ppList==0 ){ /*OPTIMIZATION-IF-TRUE*/ + /* Prevent unnecessary deep recursion when we run out of entries */ + return 0; + } + if( iDepth>1 ){ /*OPTIMIZATION-IF-TRUE*/ + /* This branch causes a *balanced* tree to be generated. A valid tree + ** is still generated without this branch, but the tree is wildly + ** unbalanced and inefficient. */ + pLeft = rowSetNDeepTree(ppList, iDepth-1); + p = *ppList; + if( p==0 ){ /*OPTIMIZATION-IF-FALSE*/ + /* It is safe to always return here, but the resulting tree + ** would be unbalanced */ + return pLeft; + } + p->pLeft = pLeft; + *ppList = p->pRight; + p->pRight = rowSetNDeepTree(ppList, iDepth-1); + }else{ + p = *ppList; + *ppList = p->pRight; + p->pLeft = p->pRight = 0; + } + return p; +} + +/* +** Convert a sorted list of elements into a binary tree. Make the tree +** as deep as it needs to be in order to contain the entire list. +*/ +static struct RowSetEntry *rowSetListToTree(struct RowSetEntry *pList){ + int iDepth; /* Depth of the tree so far */ + struct RowSetEntry *p; /* Current tree root */ + struct RowSetEntry *pLeft; /* Left subtree */ + + assert( pList!=0 ); + p = pList; + pList = p->pRight; + p->pLeft = p->pRight = 0; + for(iDepth=1; pList; iDepth++){ + pLeft = p; + p = pList; + pList = p->pRight; + p->pLeft = pLeft; + p->pRight = rowSetNDeepTree(&pList, iDepth); + } + return p; +} + +/* +** Extract the smallest element from the RowSet. +** Write the element into *pRowid. Return 1 on success. Return +** 0 if the RowSet is already empty. +** +** After this routine has been called, the sqlite3RowSetInsert() +** routine may not be called again. +** +** This routine may not be called after sqlite3RowSetTest() has +** been used. Older versions of RowSet allowed that, but as the +** capability was not used by the code generator, it was removed +** for code economy. +*/ +SQLITE_PRIVATE int sqlite3RowSetNext(RowSet *p, i64 *pRowid){ + assert( p!=0 ); + assert( p->pForest==0 ); /* Cannot be used with sqlite3RowSetText() */ + + /* Merge the forest into a single sorted list on first call */ + if( (p->rsFlags & ROWSET_NEXT)==0 ){ /*OPTIMIZATION-IF-FALSE*/ + if( (p->rsFlags & ROWSET_SORTED)==0 ){ /*OPTIMIZATION-IF-FALSE*/ + p->pEntry = rowSetEntrySort(p->pEntry); + } + p->rsFlags |= ROWSET_SORTED|ROWSET_NEXT; + } + + /* Return the next entry on the list */ + if( p->pEntry ){ + *pRowid = p->pEntry->v; + p->pEntry = p->pEntry->pRight; + if( p->pEntry==0 ){ /*OPTIMIZATION-IF-TRUE*/ + /* Free memory immediately, rather than waiting on sqlite3_finalize() */ + sqlite3RowSetClear(p); + } + return 1; + }else{ + return 0; + } +} + +/* +** Check to see if element iRowid was inserted into the rowset as +** part of any insert batch prior to iBatch. Return 1 or 0. +** +** If this is the first test of a new batch and if there exist entries +** on pRowSet->pEntry, then sort those entries into the forest at +** pRowSet->pForest so that they can be tested. +*/ +SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 iRowid){ + struct RowSetEntry *p, *pTree; + + /* This routine is never called after sqlite3RowSetNext() */ + assert( pRowSet!=0 && (pRowSet->rsFlags & ROWSET_NEXT)==0 ); + + /* Sort entries into the forest on the first test of a new batch. + ** To save unnecessary work, only do this when the batch number changes. + */ + if( iBatch!=pRowSet->iBatch ){ /*OPTIMIZATION-IF-FALSE*/ + p = pRowSet->pEntry; + if( p ){ + struct RowSetEntry **ppPrevTree = &pRowSet->pForest; + if( (pRowSet->rsFlags & ROWSET_SORTED)==0 ){ /*OPTIMIZATION-IF-FALSE*/ + /* Only sort the current set of entiries if they need it */ + p = rowSetEntrySort(p); + } + for(pTree = pRowSet->pForest; pTree; pTree=pTree->pRight){ + ppPrevTree = &pTree->pRight; + if( pTree->pLeft==0 ){ + pTree->pLeft = rowSetListToTree(p); + break; + }else{ + struct RowSetEntry *pAux, *pTail; + rowSetTreeToList(pTree->pLeft, &pAux, &pTail); + pTree->pLeft = 0; + p = rowSetEntryMerge(pAux, p); + } + } + if( pTree==0 ){ + *ppPrevTree = pTree = rowSetEntryAlloc(pRowSet); + if( pTree ){ + pTree->v = 0; + pTree->pRight = 0; + pTree->pLeft = rowSetListToTree(p); + } + } + pRowSet->pEntry = 0; + pRowSet->pLast = 0; + pRowSet->rsFlags |= ROWSET_SORTED; + } + pRowSet->iBatch = iBatch; + } + + /* Test to see if the iRowid value appears anywhere in the forest. + ** Return 1 if it does and 0 if not. + */ + for(pTree = pRowSet->pForest; pTree; pTree=pTree->pRight){ + p = pTree->pLeft; + while( p ){ + if( p->vpRight; + }else if( p->v>iRowid ){ + p = p->pLeft; + }else{ + return 1; + } + } + } + return 0; +} + +/************** End of rowset.c **********************************************/ +/************** Begin file pager.c *******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of the page cache subsystem or "pager". +** +** The pager is used to access a database disk file. It implements +** atomic commit and rollback through the use of a journal file that +** is separate from the database file. The pager also implements file +** locking to prevent two processes from writing the same database +** file simultaneously, or one process from reading the database while +** another is writing. +*/ +#ifndef SQLITE_OMIT_DISKIO +/* #include "sqliteInt.h" */ +/************** Include wal.h in the middle of pager.c ***********************/ +/************** Begin file wal.h *********************************************/ +/* +** 2010 February 1 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the interface to the write-ahead logging +** system. Refer to the comments below and the header comment attached to +** the implementation of each function in log.c for further details. +*/ + +#ifndef SQLITE_WAL_H +#define SQLITE_WAL_H + +/* #include "sqliteInt.h" */ + +/* Additional values that can be added to the sync_flags argument of +** sqlite3WalFrames(): +*/ +#define WAL_SYNC_TRANSACTIONS 0x20 /* Sync at the end of each transaction */ +#define SQLITE_SYNC_MASK 0x13 /* Mask off the SQLITE_SYNC_* values */ + +#ifdef SQLITE_OMIT_WAL +# define sqlite3WalOpen(x,y,z) 0 +# define sqlite3WalLimit(x,y) +# define sqlite3WalClose(v,w,x,y,z) 0 +# define sqlite3WalBeginReadTransaction(y,z) 0 +# define sqlite3WalEndReadTransaction(z) +# define sqlite3WalDbsize(y) 0 +# define sqlite3WalBeginWriteTransaction(y) 0 +# define sqlite3WalEndWriteTransaction(x) 0 +# define sqlite3WalUndo(x,y,z) 0 +# define sqlite3WalSavepoint(y,z) +# define sqlite3WalSavepointUndo(y,z) 0 +# define sqlite3WalFrames(u,v,w,x,y,z) 0 +# define sqlite3WalCheckpoint(q,r,s,t,u,v,w,x,y,z) 0 +# define sqlite3WalCallback(z) 0 +# define sqlite3WalExclusiveMode(y,z) 0 +# define sqlite3WalHeapMemory(z) 0 +# define sqlite3WalFramesize(z) 0 +# define sqlite3WalFindFrame(x,y,z) 0 +# define sqlite3WalFile(x) 0 +#else + +#define WAL_SAVEPOINT_NDATA 4 + +/* Connection to a write-ahead log (WAL) file. +** There is one object of this type for each pager. +*/ +typedef struct Wal Wal; + +/* Open and close a connection to a write-ahead log. */ +SQLITE_PRIVATE int sqlite3WalOpen(sqlite3_vfs*, sqlite3_file*, const char *, int, i64, Wal**); +SQLITE_PRIVATE int sqlite3WalClose(Wal *pWal, sqlite3*, int sync_flags, int, u8 *); + +/* Set the limiting size of a WAL file. */ +SQLITE_PRIVATE void sqlite3WalLimit(Wal*, i64); + +/* Used by readers to open (lock) and close (unlock) a snapshot. A +** snapshot is like a read-transaction. It is the state of the database +** at an instant in time. sqlite3WalOpenSnapshot gets a read lock and +** preserves the current state even if the other threads or processes +** write to or checkpoint the WAL. sqlite3WalCloseSnapshot() closes the +** transaction and releases the lock. +*/ +SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *); +SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal); + +/* Read a page from the write-ahead log, if it is present. */ +SQLITE_PRIVATE int sqlite3WalFindFrame(Wal *, Pgno, u32 *); +SQLITE_PRIVATE int sqlite3WalReadFrame(Wal *, u32, int, u8 *); + +/* If the WAL is not empty, return the size of the database. */ +SQLITE_PRIVATE Pgno sqlite3WalDbsize(Wal *pWal); + +/* Obtain or release the WRITER lock. */ +SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal); +SQLITE_PRIVATE int sqlite3WalEndWriteTransaction(Wal *pWal); + +/* Undo any frames written (but not committed) to the log */ +SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx); + +/* Return an integer that records the current (uncommitted) write +** position in the WAL */ +SQLITE_PRIVATE void sqlite3WalSavepoint(Wal *pWal, u32 *aWalData); + +/* Move the write position of the WAL back to iFrame. Called in +** response to a ROLLBACK TO command. */ +SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData); + +/* Write a frame or frames to the log. */ +SQLITE_PRIVATE int sqlite3WalFrames(Wal *pWal, int, PgHdr *, Pgno, int, int); + +/* Copy pages from the log to the database file */ +SQLITE_PRIVATE int sqlite3WalCheckpoint( + Wal *pWal, /* Write-ahead log connection */ + sqlite3 *db, /* Check this handle's interrupt flag */ + int eMode, /* One of PASSIVE, FULL and RESTART */ + int (*xBusy)(void*), /* Function to call when busy */ + void *pBusyArg, /* Context argument for xBusyHandler */ + int sync_flags, /* Flags to sync db file with (or 0) */ + int nBuf, /* Size of buffer nBuf */ + u8 *zBuf, /* Temporary buffer to use */ + int *pnLog, /* OUT: Number of frames in WAL */ + int *pnCkpt /* OUT: Number of backfilled frames in WAL */ +); + +/* Return the value to pass to a sqlite3_wal_hook callback, the +** number of frames in the WAL at the point of the last commit since +** sqlite3WalCallback() was called. If no commits have occurred since +** the last call, then return 0. +*/ +SQLITE_PRIVATE int sqlite3WalCallback(Wal *pWal); + +/* Tell the wal layer that an EXCLUSIVE lock has been obtained (or released) +** by the pager layer on the database file. +*/ +SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op); + +/* Return true if the argument is non-NULL and the WAL module is using +** heap-memory for the wal-index. Otherwise, if the argument is NULL or the +** WAL module is using shared-memory, return false. +*/ +SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal); + +#ifdef SQLITE_ENABLE_SNAPSHOT +SQLITE_PRIVATE int sqlite3WalSnapshotGet(Wal *pWal, sqlite3_snapshot **ppSnapshot); +SQLITE_PRIVATE void sqlite3WalSnapshotOpen(Wal *pWal, sqlite3_snapshot *pSnapshot); +SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal); +#endif + +#ifdef SQLITE_ENABLE_ZIPVFS +/* If the WAL file is not empty, return the number of bytes of content +** stored in each frame (i.e. the db page-size when the WAL was created). +*/ +SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal); +#endif + +/* Return the sqlite3_file object for the WAL file */ +SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal); + +#endif /* ifndef SQLITE_OMIT_WAL */ +#endif /* SQLITE_WAL_H */ + +/************** End of wal.h *************************************************/ +/************** Continuing where we left off in pager.c **********************/ + + +/******************* NOTES ON THE DESIGN OF THE PAGER ************************ +** +** This comment block describes invariants that hold when using a rollback +** journal. These invariants do not apply for journal_mode=WAL, +** journal_mode=MEMORY, or journal_mode=OFF. +** +** Within this comment block, a page is deemed to have been synced +** automatically as soon as it is written when PRAGMA synchronous=OFF. +** Otherwise, the page is not synced until the xSync method of the VFS +** is called successfully on the file containing the page. +** +** Definition: A page of the database file is said to be "overwriteable" if +** one or more of the following are true about the page: +** +** (a) The original content of the page as it was at the beginning of +** the transaction has been written into the rollback journal and +** synced. +** +** (b) The page was a freelist leaf page at the start of the transaction. +** +** (c) The page number is greater than the largest page that existed in +** the database file at the start of the transaction. +** +** (1) A page of the database file is never overwritten unless one of the +** following are true: +** +** (a) The page and all other pages on the same sector are overwriteable. +** +** (b) The atomic page write optimization is enabled, and the entire +** transaction other than the update of the transaction sequence +** number consists of a single page change. +** +** (2) The content of a page written into the rollback journal exactly matches +** both the content in the database when the rollback journal was written +** and the content in the database at the beginning of the current +** transaction. +** +** (3) Writes to the database file are an integer multiple of the page size +** in length and are aligned on a page boundary. +** +** (4) Reads from the database file are either aligned on a page boundary and +** an integer multiple of the page size in length or are taken from the +** first 100 bytes of the database file. +** +** (5) All writes to the database file are synced prior to the rollback journal +** being deleted, truncated, or zeroed. +** +** (6) If a master journal file is used, then all writes to the database file +** are synced prior to the master journal being deleted. +** +** Definition: Two databases (or the same database at two points it time) +** are said to be "logically equivalent" if they give the same answer to +** all queries. Note in particular the content of freelist leaf +** pages can be changed arbitrarily without affecting the logical equivalence +** of the database. +** +** (7) At any time, if any subset, including the empty set and the total set, +** of the unsynced changes to a rollback journal are removed and the +** journal is rolled back, the resulting database file will be logically +** equivalent to the database file at the beginning of the transaction. +** +** (8) When a transaction is rolled back, the xTruncate method of the VFS +** is called to restore the database file to the same size it was at +** the beginning of the transaction. (In some VFSes, the xTruncate +** method is a no-op, but that does not change the fact the SQLite will +** invoke it.) +** +** (9) Whenever the database file is modified, at least one bit in the range +** of bytes from 24 through 39 inclusive will be changed prior to releasing +** the EXCLUSIVE lock, thus signaling other connections on the same +** database to flush their caches. +** +** (10) The pattern of bits in bytes 24 through 39 shall not repeat in less +** than one billion transactions. +** +** (11) A database file is well-formed at the beginning and at the conclusion +** of every transaction. +** +** (12) An EXCLUSIVE lock is held on the database file when writing to +** the database file. +** +** (13) A SHARED lock is held on the database file while reading any +** content out of the database file. +** +******************************************************************************/ + +/* +** Macros for troubleshooting. Normally turned off +*/ +#if 0 +int sqlite3PagerTrace=1; /* True to enable tracing */ +#define sqlite3DebugPrintf printf +#define PAGERTRACE(X) if( sqlite3PagerTrace ){ sqlite3DebugPrintf X; } +#else +#define PAGERTRACE(X) +#endif + +/* +** The following two macros are used within the PAGERTRACE() macros above +** to print out file-descriptors. +** +** PAGERID() takes a pointer to a Pager struct as its argument. The +** associated file-descriptor is returned. FILEHANDLEID() takes an sqlite3_file +** struct as its argument. +*/ +#define PAGERID(p) ((int)(p->fd)) +#define FILEHANDLEID(fd) ((int)fd) + +/* +** The Pager.eState variable stores the current 'state' of a pager. A +** pager may be in any one of the seven states shown in the following +** state diagram. +** +** OPEN <------+------+ +** | | | +** V | | +** +---------> READER-------+ | +** | | | +** | V | +** |<-------WRITER_LOCKED------> ERROR +** | | ^ +** | V | +** |<------WRITER_CACHEMOD-------->| +** | | | +** | V | +** |<-------WRITER_DBMOD---------->| +** | | | +** | V | +** +<------WRITER_FINISHED-------->+ +** +** +** List of state transitions and the C [function] that performs each: +** +** OPEN -> READER [sqlite3PagerSharedLock] +** READER -> OPEN [pager_unlock] +** +** READER -> WRITER_LOCKED [sqlite3PagerBegin] +** WRITER_LOCKED -> WRITER_CACHEMOD [pager_open_journal] +** WRITER_CACHEMOD -> WRITER_DBMOD [syncJournal] +** WRITER_DBMOD -> WRITER_FINISHED [sqlite3PagerCommitPhaseOne] +** WRITER_*** -> READER [pager_end_transaction] +** +** WRITER_*** -> ERROR [pager_error] +** ERROR -> OPEN [pager_unlock] +** +** +** OPEN: +** +** The pager starts up in this state. Nothing is guaranteed in this +** state - the file may or may not be locked and the database size is +** unknown. The database may not be read or written. +** +** * No read or write transaction is active. +** * Any lock, or no lock at all, may be held on the database file. +** * The dbSize, dbOrigSize and dbFileSize variables may not be trusted. +** +** READER: +** +** In this state all the requirements for reading the database in +** rollback (non-WAL) mode are met. Unless the pager is (or recently +** was) in exclusive-locking mode, a user-level read transaction is +** open. The database size is known in this state. +** +** A connection running with locking_mode=normal enters this state when +** it opens a read-transaction on the database and returns to state +** OPEN after the read-transaction is completed. However a connection +** running in locking_mode=exclusive (including temp databases) remains in +** this state even after the read-transaction is closed. The only way +** a locking_mode=exclusive connection can transition from READER to OPEN +** is via the ERROR state (see below). +** +** * A read transaction may be active (but a write-transaction cannot). +** * A SHARED or greater lock is held on the database file. +** * The dbSize variable may be trusted (even if a user-level read +** transaction is not active). The dbOrigSize and dbFileSize variables +** may not be trusted at this point. +** * If the database is a WAL database, then the WAL connection is open. +** * Even if a read-transaction is not open, it is guaranteed that +** there is no hot-journal in the file-system. +** +** WRITER_LOCKED: +** +** The pager moves to this state from READER when a write-transaction +** is first opened on the database. In WRITER_LOCKED state, all locks +** required to start a write-transaction are held, but no actual +** modifications to the cache or database have taken place. +** +** In rollback mode, a RESERVED or (if the transaction was opened with +** BEGIN EXCLUSIVE) EXCLUSIVE lock is obtained on the database file when +** moving to this state, but the journal file is not written to or opened +** to in this state. If the transaction is committed or rolled back while +** in WRITER_LOCKED state, all that is required is to unlock the database +** file. +** +** IN WAL mode, WalBeginWriteTransaction() is called to lock the log file. +** If the connection is running with locking_mode=exclusive, an attempt +** is made to obtain an EXCLUSIVE lock on the database file. +** +** * A write transaction is active. +** * If the connection is open in rollback-mode, a RESERVED or greater +** lock is held on the database file. +** * If the connection is open in WAL-mode, a WAL write transaction +** is open (i.e. sqlite3WalBeginWriteTransaction() has been successfully +** called). +** * The dbSize, dbOrigSize and dbFileSize variables are all valid. +** * The contents of the pager cache have not been modified. +** * The journal file may or may not be open. +** * Nothing (not even the first header) has been written to the journal. +** +** WRITER_CACHEMOD: +** +** A pager moves from WRITER_LOCKED state to this state when a page is +** first modified by the upper layer. In rollback mode the journal file +** is opened (if it is not already open) and a header written to the +** start of it. The database file on disk has not been modified. +** +** * A write transaction is active. +** * A RESERVED or greater lock is held on the database file. +** * The journal file is open and the first header has been written +** to it, but the header has not been synced to disk. +** * The contents of the page cache have been modified. +** +** WRITER_DBMOD: +** +** The pager transitions from WRITER_CACHEMOD into WRITER_DBMOD state +** when it modifies the contents of the database file. WAL connections +** never enter this state (since they do not modify the database file, +** just the log file). +** +** * A write transaction is active. +** * An EXCLUSIVE or greater lock is held on the database file. +** * The journal file is open and the first header has been written +** and synced to disk. +** * The contents of the page cache have been modified (and possibly +** written to disk). +** +** WRITER_FINISHED: +** +** It is not possible for a WAL connection to enter this state. +** +** A rollback-mode pager changes to WRITER_FINISHED state from WRITER_DBMOD +** state after the entire transaction has been successfully written into the +** database file. In this state the transaction may be committed simply +** by finalizing the journal file. Once in WRITER_FINISHED state, it is +** not possible to modify the database further. At this point, the upper +** layer must either commit or rollback the transaction. +** +** * A write transaction is active. +** * An EXCLUSIVE or greater lock is held on the database file. +** * All writing and syncing of journal and database data has finished. +** If no error occurred, all that remains is to finalize the journal to +** commit the transaction. If an error did occur, the caller will need +** to rollback the transaction. +** +** ERROR: +** +** The ERROR state is entered when an IO or disk-full error (including +** SQLITE_IOERR_NOMEM) occurs at a point in the code that makes it +** difficult to be sure that the in-memory pager state (cache contents, +** db size etc.) are consistent with the contents of the file-system. +** +** Temporary pager files may enter the ERROR state, but in-memory pagers +** cannot. +** +** For example, if an IO error occurs while performing a rollback, +** the contents of the page-cache may be left in an inconsistent state. +** At this point it would be dangerous to change back to READER state +** (as usually happens after a rollback). Any subsequent readers might +** report database corruption (due to the inconsistent cache), and if +** they upgrade to writers, they may inadvertently corrupt the database +** file. To avoid this hazard, the pager switches into the ERROR state +** instead of READER following such an error. +** +** Once it has entered the ERROR state, any attempt to use the pager +** to read or write data returns an error. Eventually, once all +** outstanding transactions have been abandoned, the pager is able to +** transition back to OPEN state, discarding the contents of the +** page-cache and any other in-memory state at the same time. Everything +** is reloaded from disk (and, if necessary, hot-journal rollback peformed) +** when a read-transaction is next opened on the pager (transitioning +** the pager into READER state). At that point the system has recovered +** from the error. +** +** Specifically, the pager jumps into the ERROR state if: +** +** 1. An error occurs while attempting a rollback. This happens in +** function sqlite3PagerRollback(). +** +** 2. An error occurs while attempting to finalize a journal file +** following a commit in function sqlite3PagerCommitPhaseTwo(). +** +** 3. An error occurs while attempting to write to the journal or +** database file in function pagerStress() in order to free up +** memory. +** +** In other cases, the error is returned to the b-tree layer. The b-tree +** layer then attempts a rollback operation. If the error condition +** persists, the pager enters the ERROR state via condition (1) above. +** +** Condition (3) is necessary because it can be triggered by a read-only +** statement executed within a transaction. In this case, if the error +** code were simply returned to the user, the b-tree layer would not +** automatically attempt a rollback, as it assumes that an error in a +** read-only statement cannot leave the pager in an internally inconsistent +** state. +** +** * The Pager.errCode variable is set to something other than SQLITE_OK. +** * There are one or more outstanding references to pages (after the +** last reference is dropped the pager should move back to OPEN state). +** * The pager is not an in-memory pager. +** +** +** Notes: +** +** * A pager is never in WRITER_DBMOD or WRITER_FINISHED state if the +** connection is open in WAL mode. A WAL connection is always in one +** of the first four states. +** +** * Normally, a connection open in exclusive mode is never in PAGER_OPEN +** state. There are two exceptions: immediately after exclusive-mode has +** been turned on (and before any read or write transactions are +** executed), and when the pager is leaving the "error state". +** +** * See also: assert_pager_state(). +*/ +#define PAGER_OPEN 0 +#define PAGER_READER 1 +#define PAGER_WRITER_LOCKED 2 +#define PAGER_WRITER_CACHEMOD 3 +#define PAGER_WRITER_DBMOD 4 +#define PAGER_WRITER_FINISHED 5 +#define PAGER_ERROR 6 + +/* +** The Pager.eLock variable is almost always set to one of the +** following locking-states, according to the lock currently held on +** the database file: NO_LOCK, SHARED_LOCK, RESERVED_LOCK or EXCLUSIVE_LOCK. +** This variable is kept up to date as locks are taken and released by +** the pagerLockDb() and pagerUnlockDb() wrappers. +** +** If the VFS xLock() or xUnlock() returns an error other than SQLITE_BUSY +** (i.e. one of the SQLITE_IOERR subtypes), it is not clear whether or not +** the operation was successful. In these circumstances pagerLockDb() and +** pagerUnlockDb() take a conservative approach - eLock is always updated +** when unlocking the file, and only updated when locking the file if the +** VFS call is successful. This way, the Pager.eLock variable may be set +** to a less exclusive (lower) value than the lock that is actually held +** at the system level, but it is never set to a more exclusive value. +** +** This is usually safe. If an xUnlock fails or appears to fail, there may +** be a few redundant xLock() calls or a lock may be held for longer than +** required, but nothing really goes wrong. +** +** The exception is when the database file is unlocked as the pager moves +** from ERROR to OPEN state. At this point there may be a hot-journal file +** in the file-system that needs to be rolled back (as part of an OPEN->SHARED +** transition, by the same pager or any other). If the call to xUnlock() +** fails at this point and the pager is left holding an EXCLUSIVE lock, this +** can confuse the call to xCheckReservedLock() call made later as part +** of hot-journal detection. +** +** xCheckReservedLock() is defined as returning true "if there is a RESERVED +** lock held by this process or any others". So xCheckReservedLock may +** return true because the caller itself is holding an EXCLUSIVE lock (but +** doesn't know it because of a previous error in xUnlock). If this happens +** a hot-journal may be mistaken for a journal being created by an active +** transaction in another process, causing SQLite to read from the database +** without rolling it back. +** +** To work around this, if a call to xUnlock() fails when unlocking the +** database in the ERROR state, Pager.eLock is set to UNKNOWN_LOCK. It +** is only changed back to a real locking state after a successful call +** to xLock(EXCLUSIVE). Also, the code to do the OPEN->SHARED state transition +** omits the check for a hot-journal if Pager.eLock is set to UNKNOWN_LOCK +** lock. Instead, it assumes a hot-journal exists and obtains an EXCLUSIVE +** lock on the database file before attempting to roll it back. See function +** PagerSharedLock() for more detail. +** +** Pager.eLock may only be set to UNKNOWN_LOCK when the pager is in +** PAGER_OPEN state. +*/ +#define UNKNOWN_LOCK (EXCLUSIVE_LOCK+1) + +/* +** A macro used for invoking the codec if there is one +*/ +#ifdef SQLITE_HAS_CODEC +# define CODEC1(P,D,N,X,E) \ + if( P->xCodec && P->xCodec(P->pCodec,D,N,X)==0 ){ E; } +# define CODEC2(P,D,N,X,E,O) \ + if( P->xCodec==0 ){ O=(char*)D; }else \ + if( (O=(char*)(P->xCodec(P->pCodec,D,N,X)))==0 ){ E; } +#else +# define CODEC1(P,D,N,X,E) /* NO-OP */ +# define CODEC2(P,D,N,X,E,O) O=(char*)D +#endif + +/* +** The maximum allowed sector size. 64KiB. If the xSectorsize() method +** returns a value larger than this, then MAX_SECTOR_SIZE is used instead. +** This could conceivably cause corruption following a power failure on +** such a system. This is currently an undocumented limit. +*/ +#define MAX_SECTOR_SIZE 0x10000 + + +/* +** An instance of the following structure is allocated for each active +** savepoint and statement transaction in the system. All such structures +** are stored in the Pager.aSavepoint[] array, which is allocated and +** resized using sqlite3Realloc(). +** +** When a savepoint is created, the PagerSavepoint.iHdrOffset field is +** set to 0. If a journal-header is written into the main journal while +** the savepoint is active, then iHdrOffset is set to the byte offset +** immediately following the last journal record written into the main +** journal before the journal-header. This is required during savepoint +** rollback (see pagerPlaybackSavepoint()). +*/ +typedef struct PagerSavepoint PagerSavepoint; +struct PagerSavepoint { + i64 iOffset; /* Starting offset in main journal */ + i64 iHdrOffset; /* See above */ + Bitvec *pInSavepoint; /* Set of pages in this savepoint */ + Pgno nOrig; /* Original number of pages in file */ + Pgno iSubRec; /* Index of first record in sub-journal */ +#ifndef SQLITE_OMIT_WAL + u32 aWalData[WAL_SAVEPOINT_NDATA]; /* WAL savepoint context */ +#endif +}; + +/* +** Bits of the Pager.doNotSpill flag. See further description below. +*/ +#define SPILLFLAG_OFF 0x01 /* Never spill cache. Set via pragma */ +#define SPILLFLAG_ROLLBACK 0x02 /* Current rolling back, so do not spill */ +#define SPILLFLAG_NOSYNC 0x04 /* Spill is ok, but do not sync */ + +/* +** An open page cache is an instance of struct Pager. A description of +** some of the more important member variables follows: +** +** eState +** +** The current 'state' of the pager object. See the comment and state +** diagram above for a description of the pager state. +** +** eLock +** +** For a real on-disk database, the current lock held on the database file - +** NO_LOCK, SHARED_LOCK, RESERVED_LOCK or EXCLUSIVE_LOCK. +** +** For a temporary or in-memory database (neither of which require any +** locks), this variable is always set to EXCLUSIVE_LOCK. Since such +** databases always have Pager.exclusiveMode==1, this tricks the pager +** logic into thinking that it already has all the locks it will ever +** need (and no reason to release them). +** +** In some (obscure) circumstances, this variable may also be set to +** UNKNOWN_LOCK. See the comment above the #define of UNKNOWN_LOCK for +** details. +** +** changeCountDone +** +** This boolean variable is used to make sure that the change-counter +** (the 4-byte header field at byte offset 24 of the database file) is +** not updated more often than necessary. +** +** It is set to true when the change-counter field is updated, which +** can only happen if an exclusive lock is held on the database file. +** It is cleared (set to false) whenever an exclusive lock is +** relinquished on the database file. Each time a transaction is committed, +** The changeCountDone flag is inspected. If it is true, the work of +** updating the change-counter is omitted for the current transaction. +** +** This mechanism means that when running in exclusive mode, a connection +** need only update the change-counter once, for the first transaction +** committed. +** +** setMaster +** +** When PagerCommitPhaseOne() is called to commit a transaction, it may +** (or may not) specify a master-journal name to be written into the +** journal file before it is synced to disk. +** +** Whether or not a journal file contains a master-journal pointer affects +** the way in which the journal file is finalized after the transaction is +** committed or rolled back when running in "journal_mode=PERSIST" mode. +** If a journal file does not contain a master-journal pointer, it is +** finalized by overwriting the first journal header with zeroes. If +** it does contain a master-journal pointer the journal file is finalized +** by truncating it to zero bytes, just as if the connection were +** running in "journal_mode=truncate" mode. +** +** Journal files that contain master journal pointers cannot be finalized +** simply by overwriting the first journal-header with zeroes, as the +** master journal pointer could interfere with hot-journal rollback of any +** subsequently interrupted transaction that reuses the journal file. +** +** The flag is cleared as soon as the journal file is finalized (either +** by PagerCommitPhaseTwo or PagerRollback). If an IO error prevents the +** journal file from being successfully finalized, the setMaster flag +** is cleared anyway (and the pager will move to ERROR state). +** +** doNotSpill +** +** This variables control the behavior of cache-spills (calls made by +** the pcache module to the pagerStress() routine to write cached data +** to the file-system in order to free up memory). +** +** When bits SPILLFLAG_OFF or SPILLFLAG_ROLLBACK of doNotSpill are set, +** writing to the database from pagerStress() is disabled altogether. +** The SPILLFLAG_ROLLBACK case is done in a very obscure case that +** comes up during savepoint rollback that requires the pcache module +** to allocate a new page to prevent the journal file from being written +** while it is being traversed by code in pager_playback(). The SPILLFLAG_OFF +** case is a user preference. +** +** If the SPILLFLAG_NOSYNC bit is set, writing to the database from +** pagerStress() is permitted, but syncing the journal file is not. +** This flag is set by sqlite3PagerWrite() when the file-system sector-size +** is larger than the database page-size in order to prevent a journal sync +** from happening in between the journalling of two pages on the same sector. +** +** subjInMemory +** +** This is a boolean variable. If true, then any required sub-journal +** is opened as an in-memory journal file. If false, then in-memory +** sub-journals are only used for in-memory pager files. +** +** This variable is updated by the upper layer each time a new +** write-transaction is opened. +** +** dbSize, dbOrigSize, dbFileSize +** +** Variable dbSize is set to the number of pages in the database file. +** It is valid in PAGER_READER and higher states (all states except for +** OPEN and ERROR). +** +** dbSize is set based on the size of the database file, which may be +** larger than the size of the database (the value stored at offset +** 28 of the database header by the btree). If the size of the file +** is not an integer multiple of the page-size, the value stored in +** dbSize is rounded down (i.e. a 5KB file with 2K page-size has dbSize==2). +** Except, any file that is greater than 0 bytes in size is considered +** to have at least one page. (i.e. a 1KB file with 2K page-size leads +** to dbSize==1). +** +** During a write-transaction, if pages with page-numbers greater than +** dbSize are modified in the cache, dbSize is updated accordingly. +** Similarly, if the database is truncated using PagerTruncateImage(), +** dbSize is updated. +** +** Variables dbOrigSize and dbFileSize are valid in states +** PAGER_WRITER_LOCKED and higher. dbOrigSize is a copy of the dbSize +** variable at the start of the transaction. It is used during rollback, +** and to determine whether or not pages need to be journalled before +** being modified. +** +** Throughout a write-transaction, dbFileSize contains the size of +** the file on disk in pages. It is set to a copy of dbSize when the +** write-transaction is first opened, and updated when VFS calls are made +** to write or truncate the database file on disk. +** +** The only reason the dbFileSize variable is required is to suppress +** unnecessary calls to xTruncate() after committing a transaction. If, +** when a transaction is committed, the dbFileSize variable indicates +** that the database file is larger than the database image (Pager.dbSize), +** pager_truncate() is called. The pager_truncate() call uses xFilesize() +** to measure the database file on disk, and then truncates it if required. +** dbFileSize is not used when rolling back a transaction. In this case +** pager_truncate() is called unconditionally (which means there may be +** a call to xFilesize() that is not strictly required). In either case, +** pager_truncate() may cause the file to become smaller or larger. +** +** dbHintSize +** +** The dbHintSize variable is used to limit the number of calls made to +** the VFS xFileControl(FCNTL_SIZE_HINT) method. +** +** dbHintSize is set to a copy of the dbSize variable when a +** write-transaction is opened (at the same time as dbFileSize and +** dbOrigSize). If the xFileControl(FCNTL_SIZE_HINT) method is called, +** dbHintSize is increased to the number of pages that correspond to the +** size-hint passed to the method call. See pager_write_pagelist() for +** details. +** +** errCode +** +** The Pager.errCode variable is only ever used in PAGER_ERROR state. It +** is set to zero in all other states. In PAGER_ERROR state, Pager.errCode +** is always set to SQLITE_FULL, SQLITE_IOERR or one of the SQLITE_IOERR_XXX +** sub-codes. +*/ +struct Pager { + sqlite3_vfs *pVfs; /* OS functions to use for IO */ + u8 exclusiveMode; /* Boolean. True if locking_mode==EXCLUSIVE */ + u8 journalMode; /* One of the PAGER_JOURNALMODE_* values */ + u8 useJournal; /* Use a rollback journal on this file */ + u8 noSync; /* Do not sync the journal if true */ + u8 fullSync; /* Do extra syncs of the journal for robustness */ + u8 extraSync; /* sync directory after journal delete */ + u8 ckptSyncFlags; /* SYNC_NORMAL or SYNC_FULL for checkpoint */ + u8 walSyncFlags; /* SYNC_NORMAL or SYNC_FULL for wal writes */ + u8 syncFlags; /* SYNC_NORMAL or SYNC_FULL otherwise */ + u8 tempFile; /* zFilename is a temporary or immutable file */ + u8 noLock; /* Do not lock (except in WAL mode) */ + u8 readOnly; /* True for a read-only database */ + u8 memDb; /* True to inhibit all file I/O */ + + /************************************************************************** + ** The following block contains those class members that change during + ** routine operation. Class members not in this block are either fixed + ** when the pager is first created or else only change when there is a + ** significant mode change (such as changing the page_size, locking_mode, + ** or the journal_mode). From another view, these class members describe + ** the "state" of the pager, while other class members describe the + ** "configuration" of the pager. + */ + u8 eState; /* Pager state (OPEN, READER, WRITER_LOCKED..) */ + u8 eLock; /* Current lock held on database file */ + u8 changeCountDone; /* Set after incrementing the change-counter */ + u8 setMaster; /* True if a m-j name has been written to jrnl */ + u8 doNotSpill; /* Do not spill the cache when non-zero */ + u8 subjInMemory; /* True to use in-memory sub-journals */ + u8 bUseFetch; /* True to use xFetch() */ + u8 hasHeldSharedLock; /* True if a shared lock has ever been held */ + Pgno dbSize; /* Number of pages in the database */ + Pgno dbOrigSize; /* dbSize before the current transaction */ + Pgno dbFileSize; /* Number of pages in the database file */ + Pgno dbHintSize; /* Value passed to FCNTL_SIZE_HINT call */ + int errCode; /* One of several kinds of errors */ + int nRec; /* Pages journalled since last j-header written */ + u32 cksumInit; /* Quasi-random value added to every checksum */ + u32 nSubRec; /* Number of records written to sub-journal */ + Bitvec *pInJournal; /* One bit for each page in the database file */ + sqlite3_file *fd; /* File descriptor for database */ + sqlite3_file *jfd; /* File descriptor for main journal */ + sqlite3_file *sjfd; /* File descriptor for sub-journal */ + i64 journalOff; /* Current write offset in the journal file */ + i64 journalHdr; /* Byte offset to previous journal header */ + sqlite3_backup *pBackup; /* Pointer to list of ongoing backup processes */ + PagerSavepoint *aSavepoint; /* Array of active savepoints */ + int nSavepoint; /* Number of elements in aSavepoint[] */ + u32 iDataVersion; /* Changes whenever database content changes */ + char dbFileVers[16]; /* Changes whenever database file changes */ + + int nMmapOut; /* Number of mmap pages currently outstanding */ + sqlite3_int64 szMmap; /* Desired maximum mmap size */ + PgHdr *pMmapFreelist; /* List of free mmap page headers (pDirty) */ + /* + ** End of the routinely-changing class members + ***************************************************************************/ + + u16 nExtra; /* Add this many bytes to each in-memory page */ + i16 nReserve; /* Number of unused bytes at end of each page */ + u32 vfsFlags; /* Flags for sqlite3_vfs.xOpen() */ + u32 sectorSize; /* Assumed sector size during rollback */ + int pageSize; /* Number of bytes in a page */ + Pgno mxPgno; /* Maximum allowed size of the database */ + i64 journalSizeLimit; /* Size limit for persistent journal files */ + char *zFilename; /* Name of the database file */ + char *zJournal; /* Name of the journal file */ + int (*xBusyHandler)(void*); /* Function to call when busy */ + void *pBusyHandlerArg; /* Context argument for xBusyHandler */ + int aStat[3]; /* Total cache hits, misses and writes */ +#ifdef SQLITE_TEST + int nRead; /* Database pages read */ +#endif + void (*xReiniter)(DbPage*); /* Call this routine when reloading pages */ + int (*xGet)(Pager*,Pgno,DbPage**,int); /* Routine to fetch a patch */ +#ifdef SQLITE_HAS_CODEC + void *(*xCodec)(void*,void*,Pgno,int); /* Routine for en/decoding data */ + void (*xCodecSizeChng)(void*,int,int); /* Notify of page size changes */ + void (*xCodecFree)(void*); /* Destructor for the codec */ + void *pCodec; /* First argument to xCodec... methods */ +#endif + char *pTmpSpace; /* Pager.pageSize bytes of space for tmp use */ + PCache *pPCache; /* Pointer to page cache object */ +#ifndef SQLITE_OMIT_WAL + Wal *pWal; /* Write-ahead log used by "journal_mode=wal" */ + char *zWal; /* File name for write-ahead log */ +#endif +}; + +/* +** Indexes for use with Pager.aStat[]. The Pager.aStat[] array contains +** the values accessed by passing SQLITE_DBSTATUS_CACHE_HIT, CACHE_MISS +** or CACHE_WRITE to sqlite3_db_status(). +*/ +#define PAGER_STAT_HIT 0 +#define PAGER_STAT_MISS 1 +#define PAGER_STAT_WRITE 2 + +/* +** The following global variables hold counters used for +** testing purposes only. These variables do not exist in +** a non-testing build. These variables are not thread-safe. +*/ +#ifdef SQLITE_TEST +SQLITE_API int sqlite3_pager_readdb_count = 0; /* Number of full pages read from DB */ +SQLITE_API int sqlite3_pager_writedb_count = 0; /* Number of full pages written to DB */ +SQLITE_API int sqlite3_pager_writej_count = 0; /* Number of pages written to journal */ +# define PAGER_INCR(v) v++ +#else +# define PAGER_INCR(v) +#endif + + + +/* +** Journal files begin with the following magic string. The data +** was obtained from /dev/random. It is used only as a sanity check. +** +** Since version 2.8.0, the journal format contains additional sanity +** checking information. If the power fails while the journal is being +** written, semi-random garbage data might appear in the journal +** file after power is restored. If an attempt is then made +** to roll the journal back, the database could be corrupted. The additional +** sanity checking data is an attempt to discover the garbage in the +** journal and ignore it. +** +** The sanity checking information for the new journal format consists +** of a 32-bit checksum on each page of data. The checksum covers both +** the page number and the pPager->pageSize bytes of data for the page. +** This cksum is initialized to a 32-bit random value that appears in the +** journal file right after the header. The random initializer is important, +** because garbage data that appears at the end of a journal is likely +** data that was once in other files that have now been deleted. If the +** garbage data came from an obsolete journal file, the checksums might +** be correct. But by initializing the checksum to random value which +** is different for every journal, we minimize that risk. +*/ +static const unsigned char aJournalMagic[] = { + 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd7, +}; + +/* +** The size of the of each page record in the journal is given by +** the following macro. +*/ +#define JOURNAL_PG_SZ(pPager) ((pPager->pageSize) + 8) + +/* +** The journal header size for this pager. This is usually the same +** size as a single disk sector. See also setSectorSize(). +*/ +#define JOURNAL_HDR_SZ(pPager) (pPager->sectorSize) + +/* +** The macro MEMDB is true if we are dealing with an in-memory database. +** We do this as a macro so that if the SQLITE_OMIT_MEMORYDB macro is set, +** the value of MEMDB will be a constant and the compiler will optimize +** out code that would never execute. +*/ +#ifdef SQLITE_OMIT_MEMORYDB +# define MEMDB 0 +#else +# define MEMDB pPager->memDb +#endif + +/* +** The macro USEFETCH is true if we are allowed to use the xFetch and xUnfetch +** interfaces to access the database using memory-mapped I/O. +*/ +#if SQLITE_MAX_MMAP_SIZE>0 +# define USEFETCH(x) ((x)->bUseFetch) +#else +# define USEFETCH(x) 0 +#endif + +/* +** The maximum legal page number is (2^31 - 1). +*/ +#define PAGER_MAX_PGNO 2147483647 + +/* +** The argument to this macro is a file descriptor (type sqlite3_file*). +** Return 0 if it is not open, or non-zero (but not 1) if it is. +** +** This is so that expressions can be written as: +** +** if( isOpen(pPager->jfd) ){ ... +** +** instead of +** +** if( pPager->jfd->pMethods ){ ... +*/ +#define isOpen(pFd) ((pFd)->pMethods!=0) + +/* +** Return true if this pager uses a write-ahead log to read page pgno. +** Return false if the pager reads pgno directly from the database. +*/ +#if !defined(SQLITE_OMIT_WAL) && defined(SQLITE_DIRECT_OVERFLOW_READ) +SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager, Pgno pgno){ + u32 iRead = 0; + int rc; + if( pPager->pWal==0 ) return 0; + rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iRead); + return rc || iRead; +} +#endif +#ifndef SQLITE_OMIT_WAL +# define pagerUseWal(x) ((x)->pWal!=0) +#else +# define pagerUseWal(x) 0 +# define pagerRollbackWal(x) 0 +# define pagerWalFrames(v,w,x,y) 0 +# define pagerOpenWalIfPresent(z) SQLITE_OK +# define pagerBeginReadTransaction(z) SQLITE_OK +#endif + +#ifndef NDEBUG +/* +** Usage: +** +** assert( assert_pager_state(pPager) ); +** +** This function runs many asserts to try to find inconsistencies in +** the internal state of the Pager object. +*/ +static int assert_pager_state(Pager *p){ + Pager *pPager = p; + + /* State must be valid. */ + assert( p->eState==PAGER_OPEN + || p->eState==PAGER_READER + || p->eState==PAGER_WRITER_LOCKED + || p->eState==PAGER_WRITER_CACHEMOD + || p->eState==PAGER_WRITER_DBMOD + || p->eState==PAGER_WRITER_FINISHED + || p->eState==PAGER_ERROR + ); + + /* Regardless of the current state, a temp-file connection always behaves + ** as if it has an exclusive lock on the database file. It never updates + ** the change-counter field, so the changeCountDone flag is always set. + */ + assert( p->tempFile==0 || p->eLock==EXCLUSIVE_LOCK ); + assert( p->tempFile==0 || pPager->changeCountDone ); + + /* If the useJournal flag is clear, the journal-mode must be "OFF". + ** And if the journal-mode is "OFF", the journal file must not be open. + */ + assert( p->journalMode==PAGER_JOURNALMODE_OFF || p->useJournal ); + assert( p->journalMode!=PAGER_JOURNALMODE_OFF || !isOpen(p->jfd) ); + + /* Check that MEMDB implies noSync. And an in-memory journal. Since + ** this means an in-memory pager performs no IO at all, it cannot encounter + ** either SQLITE_IOERR or SQLITE_FULL during rollback or while finalizing + ** a journal file. (although the in-memory journal implementation may + ** return SQLITE_IOERR_NOMEM while the journal file is being written). It + ** is therefore not possible for an in-memory pager to enter the ERROR + ** state. + */ + if( MEMDB ){ + assert( !isOpen(p->fd) ); + assert( p->noSync ); + assert( p->journalMode==PAGER_JOURNALMODE_OFF + || p->journalMode==PAGER_JOURNALMODE_MEMORY + ); + assert( p->eState!=PAGER_ERROR && p->eState!=PAGER_OPEN ); + assert( pagerUseWal(p)==0 ); + } + + /* If changeCountDone is set, a RESERVED lock or greater must be held + ** on the file. + */ + assert( pPager->changeCountDone==0 || pPager->eLock>=RESERVED_LOCK ); + assert( p->eLock!=PENDING_LOCK ); + + switch( p->eState ){ + case PAGER_OPEN: + assert( !MEMDB ); + assert( pPager->errCode==SQLITE_OK ); + assert( sqlite3PcacheRefCount(pPager->pPCache)==0 || pPager->tempFile ); + break; + + case PAGER_READER: + assert( pPager->errCode==SQLITE_OK ); + assert( p->eLock!=UNKNOWN_LOCK ); + assert( p->eLock>=SHARED_LOCK ); + break; + + case PAGER_WRITER_LOCKED: + assert( p->eLock!=UNKNOWN_LOCK ); + assert( pPager->errCode==SQLITE_OK ); + if( !pagerUseWal(pPager) ){ + assert( p->eLock>=RESERVED_LOCK ); + } + assert( pPager->dbSize==pPager->dbOrigSize ); + assert( pPager->dbOrigSize==pPager->dbFileSize ); + assert( pPager->dbOrigSize==pPager->dbHintSize ); + assert( pPager->setMaster==0 ); + break; + + case PAGER_WRITER_CACHEMOD: + assert( p->eLock!=UNKNOWN_LOCK ); + assert( pPager->errCode==SQLITE_OK ); + if( !pagerUseWal(pPager) ){ + /* It is possible that if journal_mode=wal here that neither the + ** journal file nor the WAL file are open. This happens during + ** a rollback transaction that switches from journal_mode=off + ** to journal_mode=wal. + */ + assert( p->eLock>=RESERVED_LOCK ); + assert( isOpen(p->jfd) + || p->journalMode==PAGER_JOURNALMODE_OFF + || p->journalMode==PAGER_JOURNALMODE_WAL + ); + } + assert( pPager->dbOrigSize==pPager->dbFileSize ); + assert( pPager->dbOrigSize==pPager->dbHintSize ); + break; + + case PAGER_WRITER_DBMOD: + assert( p->eLock==EXCLUSIVE_LOCK ); + assert( pPager->errCode==SQLITE_OK ); + assert( !pagerUseWal(pPager) ); + assert( p->eLock>=EXCLUSIVE_LOCK ); + assert( isOpen(p->jfd) + || p->journalMode==PAGER_JOURNALMODE_OFF + || p->journalMode==PAGER_JOURNALMODE_WAL + ); + assert( pPager->dbOrigSize<=pPager->dbHintSize ); + break; + + case PAGER_WRITER_FINISHED: + assert( p->eLock==EXCLUSIVE_LOCK ); + assert( pPager->errCode==SQLITE_OK ); + assert( !pagerUseWal(pPager) ); + assert( isOpen(p->jfd) + || p->journalMode==PAGER_JOURNALMODE_OFF + || p->journalMode==PAGER_JOURNALMODE_WAL + ); + break; + + case PAGER_ERROR: + /* There must be at least one outstanding reference to the pager if + ** in ERROR state. Otherwise the pager should have already dropped + ** back to OPEN state. + */ + assert( pPager->errCode!=SQLITE_OK ); + assert( sqlite3PcacheRefCount(pPager->pPCache)>0 || pPager->tempFile ); + break; + } + + return 1; +} +#endif /* ifndef NDEBUG */ + +#ifdef SQLITE_DEBUG +/* +** Return a pointer to a human readable string in a static buffer +** containing the state of the Pager object passed as an argument. This +** is intended to be used within debuggers. For example, as an alternative +** to "print *pPager" in gdb: +** +** (gdb) printf "%s", print_pager_state(pPager) +*/ +static char *print_pager_state(Pager *p){ + static char zRet[1024]; + + sqlite3_snprintf(1024, zRet, + "Filename: %s\n" + "State: %s errCode=%d\n" + "Lock: %s\n" + "Locking mode: locking_mode=%s\n" + "Journal mode: journal_mode=%s\n" + "Backing store: tempFile=%d memDb=%d useJournal=%d\n" + "Journal: journalOff=%lld journalHdr=%lld\n" + "Size: dbsize=%d dbOrigSize=%d dbFileSize=%d\n" + , p->zFilename + , p->eState==PAGER_OPEN ? "OPEN" : + p->eState==PAGER_READER ? "READER" : + p->eState==PAGER_WRITER_LOCKED ? "WRITER_LOCKED" : + p->eState==PAGER_WRITER_CACHEMOD ? "WRITER_CACHEMOD" : + p->eState==PAGER_WRITER_DBMOD ? "WRITER_DBMOD" : + p->eState==PAGER_WRITER_FINISHED ? "WRITER_FINISHED" : + p->eState==PAGER_ERROR ? "ERROR" : "?error?" + , (int)p->errCode + , p->eLock==NO_LOCK ? "NO_LOCK" : + p->eLock==RESERVED_LOCK ? "RESERVED" : + p->eLock==EXCLUSIVE_LOCK ? "EXCLUSIVE" : + p->eLock==SHARED_LOCK ? "SHARED" : + p->eLock==UNKNOWN_LOCK ? "UNKNOWN" : "?error?" + , p->exclusiveMode ? "exclusive" : "normal" + , p->journalMode==PAGER_JOURNALMODE_MEMORY ? "memory" : + p->journalMode==PAGER_JOURNALMODE_OFF ? "off" : + p->journalMode==PAGER_JOURNALMODE_DELETE ? "delete" : + p->journalMode==PAGER_JOURNALMODE_PERSIST ? "persist" : + p->journalMode==PAGER_JOURNALMODE_TRUNCATE ? "truncate" : + p->journalMode==PAGER_JOURNALMODE_WAL ? "wal" : "?error?" + , (int)p->tempFile, (int)p->memDb, (int)p->useJournal + , p->journalOff, p->journalHdr + , (int)p->dbSize, (int)p->dbOrigSize, (int)p->dbFileSize + ); + + return zRet; +} +#endif + +/* Forward references to the various page getters */ +static int getPageNormal(Pager*,Pgno,DbPage**,int); +static int getPageError(Pager*,Pgno,DbPage**,int); +#if SQLITE_MAX_MMAP_SIZE>0 +static int getPageMMap(Pager*,Pgno,DbPage**,int); +#endif + +/* +** Set the Pager.xGet method for the appropriate routine used to fetch +** content from the pager. +*/ +static void setGetterMethod(Pager *pPager){ + if( pPager->errCode ){ + pPager->xGet = getPageError; +#if SQLITE_MAX_MMAP_SIZE>0 + }else if( USEFETCH(pPager) +#ifdef SQLITE_HAS_CODEC + && pPager->xCodec==0 +#endif + ){ + pPager->xGet = getPageMMap; +#endif /* SQLITE_MAX_MMAP_SIZE>0 */ + }else{ + pPager->xGet = getPageNormal; + } +} + +/* +** Return true if it is necessary to write page *pPg into the sub-journal. +** A page needs to be written into the sub-journal if there exists one +** or more open savepoints for which: +** +** * The page-number is less than or equal to PagerSavepoint.nOrig, and +** * The bit corresponding to the page-number is not set in +** PagerSavepoint.pInSavepoint. +*/ +static int subjRequiresPage(PgHdr *pPg){ + Pager *pPager = pPg->pPager; + PagerSavepoint *p; + Pgno pgno = pPg->pgno; + int i; + for(i=0; inSavepoint; i++){ + p = &pPager->aSavepoint[i]; + if( p->nOrig>=pgno && 0==sqlite3BitvecTestNotNull(p->pInSavepoint, pgno) ){ + return 1; + } + } + return 0; +} + +#ifdef SQLITE_DEBUG +/* +** Return true if the page is already in the journal file. +*/ +static int pageInJournal(Pager *pPager, PgHdr *pPg){ + return sqlite3BitvecTest(pPager->pInJournal, pPg->pgno); +} +#endif + +/* +** Read a 32-bit integer from the given file descriptor. Store the integer +** that is read in *pRes. Return SQLITE_OK if everything worked, or an +** error code is something goes wrong. +** +** All values are stored on disk as big-endian. +*/ +static int read32bits(sqlite3_file *fd, i64 offset, u32 *pRes){ + unsigned char ac[4]; + int rc = sqlite3OsRead(fd, ac, sizeof(ac), offset); + if( rc==SQLITE_OK ){ + *pRes = sqlite3Get4byte(ac); + } + return rc; +} + +/* +** Write a 32-bit integer into a string buffer in big-endian byte order. +*/ +#define put32bits(A,B) sqlite3Put4byte((u8*)A,B) + + +/* +** Write a 32-bit integer into the given file descriptor. Return SQLITE_OK +** on success or an error code is something goes wrong. +*/ +static int write32bits(sqlite3_file *fd, i64 offset, u32 val){ + char ac[4]; + put32bits(ac, val); + return sqlite3OsWrite(fd, ac, 4, offset); +} + +/* +** Unlock the database file to level eLock, which must be either NO_LOCK +** or SHARED_LOCK. Regardless of whether or not the call to xUnlock() +** succeeds, set the Pager.eLock variable to match the (attempted) new lock. +** +** Except, if Pager.eLock is set to UNKNOWN_LOCK when this function is +** called, do not modify it. See the comment above the #define of +** UNKNOWN_LOCK for an explanation of this. +*/ +static int pagerUnlockDb(Pager *pPager, int eLock){ + int rc = SQLITE_OK; + + assert( !pPager->exclusiveMode || pPager->eLock==eLock ); + assert( eLock==NO_LOCK || eLock==SHARED_LOCK ); + assert( eLock!=NO_LOCK || pagerUseWal(pPager)==0 ); + if( isOpen(pPager->fd) ){ + assert( pPager->eLock>=eLock ); + rc = pPager->noLock ? SQLITE_OK : sqlite3OsUnlock(pPager->fd, eLock); + if( pPager->eLock!=UNKNOWN_LOCK ){ + pPager->eLock = (u8)eLock; + } + IOTRACE(("UNLOCK %p %d\n", pPager, eLock)) + } + return rc; +} + +/* +** Lock the database file to level eLock, which must be either SHARED_LOCK, +** RESERVED_LOCK or EXCLUSIVE_LOCK. If the caller is successful, set the +** Pager.eLock variable to the new locking state. +** +** Except, if Pager.eLock is set to UNKNOWN_LOCK when this function is +** called, do not modify it unless the new locking state is EXCLUSIVE_LOCK. +** See the comment above the #define of UNKNOWN_LOCK for an explanation +** of this. +*/ +static int pagerLockDb(Pager *pPager, int eLock){ + int rc = SQLITE_OK; + + assert( eLock==SHARED_LOCK || eLock==RESERVED_LOCK || eLock==EXCLUSIVE_LOCK ); + if( pPager->eLockeLock==UNKNOWN_LOCK ){ + rc = pPager->noLock ? SQLITE_OK : sqlite3OsLock(pPager->fd, eLock); + if( rc==SQLITE_OK && (pPager->eLock!=UNKNOWN_LOCK||eLock==EXCLUSIVE_LOCK) ){ + pPager->eLock = (u8)eLock; + IOTRACE(("LOCK %p %d\n", pPager, eLock)) + } + } + return rc; +} + +/* +** This function determines whether or not the atomic-write optimization +** can be used with this pager. The optimization can be used if: +** +** (a) the value returned by OsDeviceCharacteristics() indicates that +** a database page may be written atomically, and +** (b) the value returned by OsSectorSize() is less than or equal +** to the page size. +** +** The optimization is also always enabled for temporary files. It is +** an error to call this function if pPager is opened on an in-memory +** database. +** +** If the optimization cannot be used, 0 is returned. If it can be used, +** then the value returned is the size of the journal file when it +** contains rollback data for exactly one page. +*/ +#ifdef SQLITE_ENABLE_ATOMIC_WRITE +static int jrnlBufferSize(Pager *pPager){ + assert( !MEMDB ); + if( !pPager->tempFile ){ + int dc; /* Device characteristics */ + int nSector; /* Sector size */ + int szPage; /* Page size */ + + assert( isOpen(pPager->fd) ); + dc = sqlite3OsDeviceCharacteristics(pPager->fd); + nSector = pPager->sectorSize; + szPage = pPager->pageSize; + + assert(SQLITE_IOCAP_ATOMIC512==(512>>8)); + assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8)); + if( 0==(dc&(SQLITE_IOCAP_ATOMIC|(szPage>>8)) || nSector>szPage) ){ + return 0; + } + } + + return JOURNAL_HDR_SZ(pPager) + JOURNAL_PG_SZ(pPager); +} +#else +# define jrnlBufferSize(x) 0 +#endif + +/* +** If SQLITE_CHECK_PAGES is defined then we do some sanity checking +** on the cache using a hash function. This is used for testing +** and debugging only. +*/ +#ifdef SQLITE_CHECK_PAGES +/* +** Return a 32-bit hash of the page data for pPage. +*/ +static u32 pager_datahash(int nByte, unsigned char *pData){ + u32 hash = 0; + int i; + for(i=0; ipPager->pageSize, (unsigned char *)pPage->pData); +} +static void pager_set_pagehash(PgHdr *pPage){ + pPage->pageHash = pager_pagehash(pPage); +} + +/* +** The CHECK_PAGE macro takes a PgHdr* as an argument. If SQLITE_CHECK_PAGES +** is defined, and NDEBUG is not defined, an assert() statement checks +** that the page is either dirty or still matches the calculated page-hash. +*/ +#define CHECK_PAGE(x) checkPage(x) +static void checkPage(PgHdr *pPg){ + Pager *pPager = pPg->pPager; + assert( pPager->eState!=PAGER_ERROR ); + assert( (pPg->flags&PGHDR_DIRTY) || pPg->pageHash==pager_pagehash(pPg) ); +} + +#else +#define pager_datahash(X,Y) 0 +#define pager_pagehash(X) 0 +#define pager_set_pagehash(X) +#define CHECK_PAGE(x) +#endif /* SQLITE_CHECK_PAGES */ + +/* +** When this is called the journal file for pager pPager must be open. +** This function attempts to read a master journal file name from the +** end of the file and, if successful, copies it into memory supplied +** by the caller. See comments above writeMasterJournal() for the format +** used to store a master journal file name at the end of a journal file. +** +** zMaster must point to a buffer of at least nMaster bytes allocated by +** the caller. This should be sqlite3_vfs.mxPathname+1 (to ensure there is +** enough space to write the master journal name). If the master journal +** name in the journal is longer than nMaster bytes (including a +** nul-terminator), then this is handled as if no master journal name +** were present in the journal. +** +** If a master journal file name is present at the end of the journal +** file, then it is copied into the buffer pointed to by zMaster. A +** nul-terminator byte is appended to the buffer following the master +** journal file name. +** +** If it is determined that no master journal file name is present +** zMaster[0] is set to 0 and SQLITE_OK returned. +** +** If an error occurs while reading from the journal file, an SQLite +** error code is returned. +*/ +static int readMasterJournal(sqlite3_file *pJrnl, char *zMaster, u32 nMaster){ + int rc; /* Return code */ + u32 len; /* Length in bytes of master journal name */ + i64 szJ; /* Total size in bytes of journal file pJrnl */ + u32 cksum; /* MJ checksum value read from journal */ + u32 u; /* Unsigned loop counter */ + unsigned char aMagic[8]; /* A buffer to hold the magic header */ + zMaster[0] = '\0'; + + if( SQLITE_OK!=(rc = sqlite3OsFileSize(pJrnl, &szJ)) + || szJ<16 + || SQLITE_OK!=(rc = read32bits(pJrnl, szJ-16, &len)) + || len>=nMaster + || len==0 + || SQLITE_OK!=(rc = read32bits(pJrnl, szJ-12, &cksum)) + || SQLITE_OK!=(rc = sqlite3OsRead(pJrnl, aMagic, 8, szJ-8)) + || memcmp(aMagic, aJournalMagic, 8) + || SQLITE_OK!=(rc = sqlite3OsRead(pJrnl, zMaster, len, szJ-16-len)) + ){ + return rc; + } + + /* See if the checksum matches the master journal name */ + for(u=0; ujournalOff, assuming a sector +** size of pPager->sectorSize bytes. +** +** i.e for a sector size of 512: +** +** Pager.journalOff Return value +** --------------------------------------- +** 0 0 +** 512 512 +** 100 512 +** 2000 2048 +** +*/ +static i64 journalHdrOffset(Pager *pPager){ + i64 offset = 0; + i64 c = pPager->journalOff; + if( c ){ + offset = ((c-1)/JOURNAL_HDR_SZ(pPager) + 1) * JOURNAL_HDR_SZ(pPager); + } + assert( offset%JOURNAL_HDR_SZ(pPager)==0 ); + assert( offset>=c ); + assert( (offset-c)jfd) ); + assert( !sqlite3JournalIsInMemory(pPager->jfd) ); + if( pPager->journalOff ){ + const i64 iLimit = pPager->journalSizeLimit; /* Local cache of jsl */ + + IOTRACE(("JZEROHDR %p\n", pPager)) + if( doTruncate || iLimit==0 ){ + rc = sqlite3OsTruncate(pPager->jfd, 0); + }else{ + static const char zeroHdr[28] = {0}; + rc = sqlite3OsWrite(pPager->jfd, zeroHdr, sizeof(zeroHdr), 0); + } + if( rc==SQLITE_OK && !pPager->noSync ){ + rc = sqlite3OsSync(pPager->jfd, SQLITE_SYNC_DATAONLY|pPager->syncFlags); + } + + /* At this point the transaction is committed but the write lock + ** is still held on the file. If there is a size limit configured for + ** the persistent journal and the journal file currently consumes more + ** space than that limit allows for, truncate it now. There is no need + ** to sync the file following this operation. + */ + if( rc==SQLITE_OK && iLimit>0 ){ + i64 sz; + rc = sqlite3OsFileSize(pPager->jfd, &sz); + if( rc==SQLITE_OK && sz>iLimit ){ + rc = sqlite3OsTruncate(pPager->jfd, iLimit); + } + } + } + return rc; +} + +/* +** The journal file must be open when this routine is called. A journal +** header (JOURNAL_HDR_SZ bytes) is written into the journal file at the +** current location. +** +** The format for the journal header is as follows: +** - 8 bytes: Magic identifying journal format. +** - 4 bytes: Number of records in journal, or -1 no-sync mode is on. +** - 4 bytes: Random number used for page hash. +** - 4 bytes: Initial database page count. +** - 4 bytes: Sector size used by the process that wrote this journal. +** - 4 bytes: Database page size. +** +** Followed by (JOURNAL_HDR_SZ - 28) bytes of unused space. +*/ +static int writeJournalHdr(Pager *pPager){ + int rc = SQLITE_OK; /* Return code */ + char *zHeader = pPager->pTmpSpace; /* Temporary space used to build header */ + u32 nHeader = (u32)pPager->pageSize;/* Size of buffer pointed to by zHeader */ + u32 nWrite; /* Bytes of header sector written */ + int ii; /* Loop counter */ + + assert( isOpen(pPager->jfd) ); /* Journal file must be open. */ + + if( nHeader>JOURNAL_HDR_SZ(pPager) ){ + nHeader = JOURNAL_HDR_SZ(pPager); + } + + /* If there are active savepoints and any of them were created + ** since the most recent journal header was written, update the + ** PagerSavepoint.iHdrOffset fields now. + */ + for(ii=0; iinSavepoint; ii++){ + if( pPager->aSavepoint[ii].iHdrOffset==0 ){ + pPager->aSavepoint[ii].iHdrOffset = pPager->journalOff; + } + } + + pPager->journalHdr = pPager->journalOff = journalHdrOffset(pPager); + + /* + ** Write the nRec Field - the number of page records that follow this + ** journal header. Normally, zero is written to this value at this time. + ** After the records are added to the journal (and the journal synced, + ** if in full-sync mode), the zero is overwritten with the true number + ** of records (see syncJournal()). + ** + ** A faster alternative is to write 0xFFFFFFFF to the nRec field. When + ** reading the journal this value tells SQLite to assume that the + ** rest of the journal file contains valid page records. This assumption + ** is dangerous, as if a failure occurred whilst writing to the journal + ** file it may contain some garbage data. There are two scenarios + ** where this risk can be ignored: + ** + ** * When the pager is in no-sync mode. Corruption can follow a + ** power failure in this case anyway. + ** + ** * When the SQLITE_IOCAP_SAFE_APPEND flag is set. This guarantees + ** that garbage data is never appended to the journal file. + */ + assert( isOpen(pPager->fd) || pPager->noSync ); + if( pPager->noSync || (pPager->journalMode==PAGER_JOURNALMODE_MEMORY) + || (sqlite3OsDeviceCharacteristics(pPager->fd)&SQLITE_IOCAP_SAFE_APPEND) + ){ + memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); + put32bits(&zHeader[sizeof(aJournalMagic)], 0xffffffff); + }else{ + memset(zHeader, 0, sizeof(aJournalMagic)+4); + } + + /* The random check-hash initializer */ + sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit); + put32bits(&zHeader[sizeof(aJournalMagic)+4], pPager->cksumInit); + /* The initial database size */ + put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbOrigSize); + /* The assumed sector size for this process */ + put32bits(&zHeader[sizeof(aJournalMagic)+12], pPager->sectorSize); + + /* The page size */ + put32bits(&zHeader[sizeof(aJournalMagic)+16], pPager->pageSize); + + /* Initializing the tail of the buffer is not necessary. Everything + ** works find if the following memset() is omitted. But initializing + ** the memory prevents valgrind from complaining, so we are willing to + ** take the performance hit. + */ + memset(&zHeader[sizeof(aJournalMagic)+20], 0, + nHeader-(sizeof(aJournalMagic)+20)); + + /* In theory, it is only necessary to write the 28 bytes that the + ** journal header consumes to the journal file here. Then increment the + ** Pager.journalOff variable by JOURNAL_HDR_SZ so that the next + ** record is written to the following sector (leaving a gap in the file + ** that will be implicitly filled in by the OS). + ** + ** However it has been discovered that on some systems this pattern can + ** be significantly slower than contiguously writing data to the file, + ** even if that means explicitly writing data to the block of + ** (JOURNAL_HDR_SZ - 28) bytes that will not be used. So that is what + ** is done. + ** + ** The loop is required here in case the sector-size is larger than the + ** database page size. Since the zHeader buffer is only Pager.pageSize + ** bytes in size, more than one call to sqlite3OsWrite() may be required + ** to populate the entire journal header sector. + */ + for(nWrite=0; rc==SQLITE_OK&&nWritejournalHdr, nHeader)) + rc = sqlite3OsWrite(pPager->jfd, zHeader, nHeader, pPager->journalOff); + assert( pPager->journalHdr <= pPager->journalOff ); + pPager->journalOff += nHeader; + } + + return rc; +} + +/* +** The journal file must be open when this is called. A journal header file +** (JOURNAL_HDR_SZ bytes) is read from the current location in the journal +** file. The current location in the journal file is given by +** pPager->journalOff. See comments above function writeJournalHdr() for +** a description of the journal header format. +** +** If the header is read successfully, *pNRec is set to the number of +** page records following this header and *pDbSize is set to the size of the +** database before the transaction began, in pages. Also, pPager->cksumInit +** is set to the value read from the journal header. SQLITE_OK is returned +** in this case. +** +** If the journal header file appears to be corrupted, SQLITE_DONE is +** returned and *pNRec and *PDbSize are undefined. If JOURNAL_HDR_SZ bytes +** cannot be read from the journal file an error code is returned. +*/ +static int readJournalHdr( + Pager *pPager, /* Pager object */ + int isHot, + i64 journalSize, /* Size of the open journal file in bytes */ + u32 *pNRec, /* OUT: Value read from the nRec field */ + u32 *pDbSize /* OUT: Value of original database size field */ +){ + int rc; /* Return code */ + unsigned char aMagic[8]; /* A buffer to hold the magic header */ + i64 iHdrOff; /* Offset of journal header being read */ + + assert( isOpen(pPager->jfd) ); /* Journal file must be open. */ + + /* Advance Pager.journalOff to the start of the next sector. If the + ** journal file is too small for there to be a header stored at this + ** point, return SQLITE_DONE. + */ + pPager->journalOff = journalHdrOffset(pPager); + if( pPager->journalOff+JOURNAL_HDR_SZ(pPager) > journalSize ){ + return SQLITE_DONE; + } + iHdrOff = pPager->journalOff; + + /* Read in the first 8 bytes of the journal header. If they do not match + ** the magic string found at the start of each journal header, return + ** SQLITE_DONE. If an IO error occurs, return an error code. Otherwise, + ** proceed. + */ + if( isHot || iHdrOff!=pPager->journalHdr ){ + rc = sqlite3OsRead(pPager->jfd, aMagic, sizeof(aMagic), iHdrOff); + if( rc ){ + return rc; + } + if( memcmp(aMagic, aJournalMagic, sizeof(aMagic))!=0 ){ + return SQLITE_DONE; + } + } + + /* Read the first three 32-bit fields of the journal header: The nRec + ** field, the checksum-initializer and the database size at the start + ** of the transaction. Return an error code if anything goes wrong. + */ + if( SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+8, pNRec)) + || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+12, &pPager->cksumInit)) + || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+16, pDbSize)) + ){ + return rc; + } + + if( pPager->journalOff==0 ){ + u32 iPageSize; /* Page-size field of journal header */ + u32 iSectorSize; /* Sector-size field of journal header */ + + /* Read the page-size and sector-size journal header fields. */ + if( SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+20, &iSectorSize)) + || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+24, &iPageSize)) + ){ + return rc; + } + + /* Versions of SQLite prior to 3.5.8 set the page-size field of the + ** journal header to zero. In this case, assume that the Pager.pageSize + ** variable is already set to the correct page size. + */ + if( iPageSize==0 ){ + iPageSize = pPager->pageSize; + } + + /* Check that the values read from the page-size and sector-size fields + ** are within range. To be 'in range', both values need to be a power + ** of two greater than or equal to 512 or 32, and not greater than their + ** respective compile time maximum limits. + */ + if( iPageSize<512 || iSectorSize<32 + || iPageSize>SQLITE_MAX_PAGE_SIZE || iSectorSize>MAX_SECTOR_SIZE + || ((iPageSize-1)&iPageSize)!=0 || ((iSectorSize-1)&iSectorSize)!=0 + ){ + /* If the either the page-size or sector-size in the journal-header is + ** invalid, then the process that wrote the journal-header must have + ** crashed before the header was synced. In this case stop reading + ** the journal file here. + */ + return SQLITE_DONE; + } + + /* Update the page-size to match the value read from the journal. + ** Use a testcase() macro to make sure that malloc failure within + ** PagerSetPagesize() is tested. + */ + rc = sqlite3PagerSetPagesize(pPager, &iPageSize, -1); + testcase( rc!=SQLITE_OK ); + + /* Update the assumed sector-size to match the value used by + ** the process that created this journal. If this journal was + ** created by a process other than this one, then this routine + ** is being called from within pager_playback(). The local value + ** of Pager.sectorSize is restored at the end of that routine. + */ + pPager->sectorSize = iSectorSize; + } + + pPager->journalOff += JOURNAL_HDR_SZ(pPager); + return rc; +} + + +/* +** Write the supplied master journal name into the journal file for pager +** pPager at the current location. The master journal name must be the last +** thing written to a journal file. If the pager is in full-sync mode, the +** journal file descriptor is advanced to the next sector boundary before +** anything is written. The format is: +** +** + 4 bytes: PAGER_MJ_PGNO. +** + N bytes: Master journal filename in utf-8. +** + 4 bytes: N (length of master journal name in bytes, no nul-terminator). +** + 4 bytes: Master journal name checksum. +** + 8 bytes: aJournalMagic[]. +** +** The master journal page checksum is the sum of the bytes in the master +** journal name, where each byte is interpreted as a signed 8-bit integer. +** +** If zMaster is a NULL pointer (occurs for a single database transaction), +** this call is a no-op. +*/ +static int writeMasterJournal(Pager *pPager, const char *zMaster){ + int rc; /* Return code */ + int nMaster; /* Length of string zMaster */ + i64 iHdrOff; /* Offset of header in journal file */ + i64 jrnlSize; /* Size of journal file on disk */ + u32 cksum = 0; /* Checksum of string zMaster */ + + assert( pPager->setMaster==0 ); + assert( !pagerUseWal(pPager) ); + + if( !zMaster + || pPager->journalMode==PAGER_JOURNALMODE_MEMORY + || !isOpen(pPager->jfd) + ){ + return SQLITE_OK; + } + pPager->setMaster = 1; + assert( pPager->journalHdr <= pPager->journalOff ); + + /* Calculate the length in bytes and the checksum of zMaster */ + for(nMaster=0; zMaster[nMaster]; nMaster++){ + cksum += zMaster[nMaster]; + } + + /* If in full-sync mode, advance to the next disk sector before writing + ** the master journal name. This is in case the previous page written to + ** the journal has already been synced. + */ + if( pPager->fullSync ){ + pPager->journalOff = journalHdrOffset(pPager); + } + iHdrOff = pPager->journalOff; + + /* Write the master journal data to the end of the journal file. If + ** an error occurs, return the error code to the caller. + */ + if( (0 != (rc = write32bits(pPager->jfd, iHdrOff, PAGER_MJ_PGNO(pPager)))) + || (0 != (rc = sqlite3OsWrite(pPager->jfd, zMaster, nMaster, iHdrOff+4))) + || (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nMaster, nMaster))) + || (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nMaster+4, cksum))) + || (0 != (rc = sqlite3OsWrite(pPager->jfd, aJournalMagic, 8, + iHdrOff+4+nMaster+8))) + ){ + return rc; + } + pPager->journalOff += (nMaster+20); + + /* If the pager is in peristent-journal mode, then the physical + ** journal-file may extend past the end of the master-journal name + ** and 8 bytes of magic data just written to the file. This is + ** dangerous because the code to rollback a hot-journal file + ** will not be able to find the master-journal name to determine + ** whether or not the journal is hot. + ** + ** Easiest thing to do in this scenario is to truncate the journal + ** file to the required size. + */ + if( SQLITE_OK==(rc = sqlite3OsFileSize(pPager->jfd, &jrnlSize)) + && jrnlSize>pPager->journalOff + ){ + rc = sqlite3OsTruncate(pPager->jfd, pPager->journalOff); + } + return rc; +} + +/* +** Discard the entire contents of the in-memory page-cache. +*/ +static void pager_reset(Pager *pPager){ + pPager->iDataVersion++; + sqlite3BackupRestart(pPager->pBackup); + sqlite3PcacheClear(pPager->pPCache); +} + +/* +** Return the pPager->iDataVersion value +*/ +SQLITE_PRIVATE u32 sqlite3PagerDataVersion(Pager *pPager){ + assert( pPager->eState>PAGER_OPEN ); + return pPager->iDataVersion; +} + +/* +** Free all structures in the Pager.aSavepoint[] array and set both +** Pager.aSavepoint and Pager.nSavepoint to zero. Close the sub-journal +** if it is open and the pager is not in exclusive mode. +*/ +static void releaseAllSavepoints(Pager *pPager){ + int ii; /* Iterator for looping through Pager.aSavepoint */ + for(ii=0; iinSavepoint; ii++){ + sqlite3BitvecDestroy(pPager->aSavepoint[ii].pInSavepoint); + } + if( !pPager->exclusiveMode || sqlite3JournalIsInMemory(pPager->sjfd) ){ + sqlite3OsClose(pPager->sjfd); + } + sqlite3_free(pPager->aSavepoint); + pPager->aSavepoint = 0; + pPager->nSavepoint = 0; + pPager->nSubRec = 0; +} + +/* +** Set the bit number pgno in the PagerSavepoint.pInSavepoint +** bitvecs of all open savepoints. Return SQLITE_OK if successful +** or SQLITE_NOMEM if a malloc failure occurs. +*/ +static int addToSavepointBitvecs(Pager *pPager, Pgno pgno){ + int ii; /* Loop counter */ + int rc = SQLITE_OK; /* Result code */ + + for(ii=0; iinSavepoint; ii++){ + PagerSavepoint *p = &pPager->aSavepoint[ii]; + if( pgno<=p->nOrig ){ + rc |= sqlite3BitvecSet(p->pInSavepoint, pgno); + testcase( rc==SQLITE_NOMEM ); + assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); + } + } + return rc; +} + +/* +** This function is a no-op if the pager is in exclusive mode and not +** in the ERROR state. Otherwise, it switches the pager to PAGER_OPEN +** state. +** +** If the pager is not in exclusive-access mode, the database file is +** completely unlocked. If the file is unlocked and the file-system does +** not exhibit the UNDELETABLE_WHEN_OPEN property, the journal file is +** closed (if it is open). +** +** If the pager is in ERROR state when this function is called, the +** contents of the pager cache are discarded before switching back to +** the OPEN state. Regardless of whether the pager is in exclusive-mode +** or not, any journal file left in the file-system will be treated +** as a hot-journal and rolled back the next time a read-transaction +** is opened (by this or by any other connection). +*/ +static void pager_unlock(Pager *pPager){ + + assert( pPager->eState==PAGER_READER + || pPager->eState==PAGER_OPEN + || pPager->eState==PAGER_ERROR + ); + + sqlite3BitvecDestroy(pPager->pInJournal); + pPager->pInJournal = 0; + releaseAllSavepoints(pPager); + + if( pagerUseWal(pPager) ){ + assert( !isOpen(pPager->jfd) ); + sqlite3WalEndReadTransaction(pPager->pWal); + pPager->eState = PAGER_OPEN; + }else if( !pPager->exclusiveMode ){ + int rc; /* Error code returned by pagerUnlockDb() */ + int iDc = isOpen(pPager->fd)?sqlite3OsDeviceCharacteristics(pPager->fd):0; + + /* If the operating system support deletion of open files, then + ** close the journal file when dropping the database lock. Otherwise + ** another connection with journal_mode=delete might delete the file + ** out from under us. + */ + assert( (PAGER_JOURNALMODE_MEMORY & 5)!=1 ); + assert( (PAGER_JOURNALMODE_OFF & 5)!=1 ); + assert( (PAGER_JOURNALMODE_WAL & 5)!=1 ); + assert( (PAGER_JOURNALMODE_DELETE & 5)!=1 ); + assert( (PAGER_JOURNALMODE_TRUNCATE & 5)==1 ); + assert( (PAGER_JOURNALMODE_PERSIST & 5)==1 ); + if( 0==(iDc & SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN) + || 1!=(pPager->journalMode & 5) + ){ + sqlite3OsClose(pPager->jfd); + } + + /* If the pager is in the ERROR state and the call to unlock the database + ** file fails, set the current lock to UNKNOWN_LOCK. See the comment + ** above the #define for UNKNOWN_LOCK for an explanation of why this + ** is necessary. + */ + rc = pagerUnlockDb(pPager, NO_LOCK); + if( rc!=SQLITE_OK && pPager->eState==PAGER_ERROR ){ + pPager->eLock = UNKNOWN_LOCK; + } + + /* The pager state may be changed from PAGER_ERROR to PAGER_OPEN here + ** without clearing the error code. This is intentional - the error + ** code is cleared and the cache reset in the block below. + */ + assert( pPager->errCode || pPager->eState!=PAGER_ERROR ); + pPager->changeCountDone = 0; + pPager->eState = PAGER_OPEN; + } + + /* If Pager.errCode is set, the contents of the pager cache cannot be + ** trusted. Now that there are no outstanding references to the pager, + ** it can safely move back to PAGER_OPEN state. This happens in both + ** normal and exclusive-locking mode. + */ + assert( pPager->errCode==SQLITE_OK || !MEMDB ); + if( pPager->errCode ){ + if( pPager->tempFile==0 ){ + pager_reset(pPager); + pPager->changeCountDone = 0; + pPager->eState = PAGER_OPEN; + }else{ + pPager->eState = (isOpen(pPager->jfd) ? PAGER_OPEN : PAGER_READER); + } + if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0); + pPager->errCode = SQLITE_OK; + setGetterMethod(pPager); + } + + pPager->journalOff = 0; + pPager->journalHdr = 0; + pPager->setMaster = 0; +} + +/* +** This function is called whenever an IOERR or FULL error that requires +** the pager to transition into the ERROR state may ahve occurred. +** The first argument is a pointer to the pager structure, the second +** the error-code about to be returned by a pager API function. The +** value returned is a copy of the second argument to this function. +** +** If the second argument is SQLITE_FULL, SQLITE_IOERR or one of the +** IOERR sub-codes, the pager enters the ERROR state and the error code +** is stored in Pager.errCode. While the pager remains in the ERROR state, +** all major API calls on the Pager will immediately return Pager.errCode. +** +** The ERROR state indicates that the contents of the pager-cache +** cannot be trusted. This state can be cleared by completely discarding +** the contents of the pager-cache. If a transaction was active when +** the persistent error occurred, then the rollback journal may need +** to be replayed to restore the contents of the database file (as if +** it were a hot-journal). +*/ +static int pager_error(Pager *pPager, int rc){ + int rc2 = rc & 0xff; + assert( rc==SQLITE_OK || !MEMDB ); + assert( + pPager->errCode==SQLITE_FULL || + pPager->errCode==SQLITE_OK || + (pPager->errCode & 0xff)==SQLITE_IOERR + ); + if( rc2==SQLITE_FULL || rc2==SQLITE_IOERR ){ + pPager->errCode = rc; + pPager->eState = PAGER_ERROR; + setGetterMethod(pPager); + } + return rc; +} + +static int pager_truncate(Pager *pPager, Pgno nPage); + +/* +** The write transaction open on pPager is being committed (bCommit==1) +** or rolled back (bCommit==0). +** +** Return TRUE if and only if all dirty pages should be flushed to disk. +** +** Rules: +** +** * For non-TEMP databases, always sync to disk. This is necessary +** for transactions to be durable. +** +** * Sync TEMP database only on a COMMIT (not a ROLLBACK) when the backing +** file has been created already (via a spill on pagerStress()) and +** when the number of dirty pages in memory exceeds 25% of the total +** cache size. +*/ +static int pagerFlushOnCommit(Pager *pPager, int bCommit){ + if( pPager->tempFile==0 ) return 1; + if( !bCommit ) return 0; + if( !isOpen(pPager->fd) ) return 0; + return (sqlite3PCachePercentDirty(pPager->pPCache)>=25); +} + +/* +** This routine ends a transaction. A transaction is usually ended by +** either a COMMIT or a ROLLBACK operation. This routine may be called +** after rollback of a hot-journal, or if an error occurs while opening +** the journal file or writing the very first journal-header of a +** database transaction. +** +** This routine is never called in PAGER_ERROR state. If it is called +** in PAGER_NONE or PAGER_SHARED state and the lock held is less +** exclusive than a RESERVED lock, it is a no-op. +** +** Otherwise, any active savepoints are released. +** +** If the journal file is open, then it is "finalized". Once a journal +** file has been finalized it is not possible to use it to roll back a +** transaction. Nor will it be considered to be a hot-journal by this +** or any other database connection. Exactly how a journal is finalized +** depends on whether or not the pager is running in exclusive mode and +** the current journal-mode (Pager.journalMode value), as follows: +** +** journalMode==MEMORY +** Journal file descriptor is simply closed. This destroys an +** in-memory journal. +** +** journalMode==TRUNCATE +** Journal file is truncated to zero bytes in size. +** +** journalMode==PERSIST +** The first 28 bytes of the journal file are zeroed. This invalidates +** the first journal header in the file, and hence the entire journal +** file. An invalid journal file cannot be rolled back. +** +** journalMode==DELETE +** The journal file is closed and deleted using sqlite3OsDelete(). +** +** If the pager is running in exclusive mode, this method of finalizing +** the journal file is never used. Instead, if the journalMode is +** DELETE and the pager is in exclusive mode, the method described under +** journalMode==PERSIST is used instead. +** +** After the journal is finalized, the pager moves to PAGER_READER state. +** If running in non-exclusive rollback mode, the lock on the file is +** downgraded to a SHARED_LOCK. +** +** SQLITE_OK is returned if no error occurs. If an error occurs during +** any of the IO operations to finalize the journal file or unlock the +** database then the IO error code is returned to the user. If the +** operation to finalize the journal file fails, then the code still +** tries to unlock the database file if not in exclusive mode. If the +** unlock operation fails as well, then the first error code related +** to the first error encountered (the journal finalization one) is +** returned. +*/ +static int pager_end_transaction(Pager *pPager, int hasMaster, int bCommit){ + int rc = SQLITE_OK; /* Error code from journal finalization operation */ + int rc2 = SQLITE_OK; /* Error code from db file unlock operation */ + + /* Do nothing if the pager does not have an open write transaction + ** or at least a RESERVED lock. This function may be called when there + ** is no write-transaction active but a RESERVED or greater lock is + ** held under two circumstances: + ** + ** 1. After a successful hot-journal rollback, it is called with + ** eState==PAGER_NONE and eLock==EXCLUSIVE_LOCK. + ** + ** 2. If a connection with locking_mode=exclusive holding an EXCLUSIVE + ** lock switches back to locking_mode=normal and then executes a + ** read-transaction, this function is called with eState==PAGER_READER + ** and eLock==EXCLUSIVE_LOCK when the read-transaction is closed. + */ + assert( assert_pager_state(pPager) ); + assert( pPager->eState!=PAGER_ERROR ); + if( pPager->eStateeLockjfd) || pPager->pInJournal==0 ); + if( isOpen(pPager->jfd) ){ + assert( !pagerUseWal(pPager) ); + + /* Finalize the journal file. */ + if( sqlite3JournalIsInMemory(pPager->jfd) ){ + /* assert( pPager->journalMode==PAGER_JOURNALMODE_MEMORY ); */ + sqlite3OsClose(pPager->jfd); + }else if( pPager->journalMode==PAGER_JOURNALMODE_TRUNCATE ){ + if( pPager->journalOff==0 ){ + rc = SQLITE_OK; + }else{ + rc = sqlite3OsTruncate(pPager->jfd, 0); + if( rc==SQLITE_OK && pPager->fullSync ){ + /* Make sure the new file size is written into the inode right away. + ** Otherwise the journal might resurrect following a power loss and + ** cause the last transaction to roll back. See + ** https://bugzilla.mozilla.org/show_bug.cgi?id=1072773 + */ + rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags); + } + } + pPager->journalOff = 0; + }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST + || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL) + ){ + rc = zeroJournalHdr(pPager, hasMaster||pPager->tempFile); + pPager->journalOff = 0; + }else{ + /* This branch may be executed with Pager.journalMode==MEMORY if + ** a hot-journal was just rolled back. In this case the journal + ** file should be closed and deleted. If this connection writes to + ** the database file, it will do so using an in-memory journal. + */ + int bDelete = !pPager->tempFile; + assert( sqlite3JournalIsInMemory(pPager->jfd)==0 ); + assert( pPager->journalMode==PAGER_JOURNALMODE_DELETE + || pPager->journalMode==PAGER_JOURNALMODE_MEMORY + || pPager->journalMode==PAGER_JOURNALMODE_WAL + ); + sqlite3OsClose(pPager->jfd); + if( bDelete ){ + rc = sqlite3OsDelete(pPager->pVfs, pPager->zJournal, pPager->extraSync); + } + } + } + +#ifdef SQLITE_CHECK_PAGES + sqlite3PcacheIterateDirty(pPager->pPCache, pager_set_pagehash); + if( pPager->dbSize==0 && sqlite3PcacheRefCount(pPager->pPCache)>0 ){ + PgHdr *p = sqlite3PagerLookup(pPager, 1); + if( p ){ + p->pageHash = 0; + sqlite3PagerUnrefNotNull(p); + } + } +#endif + + sqlite3BitvecDestroy(pPager->pInJournal); + pPager->pInJournal = 0; + pPager->nRec = 0; + if( rc==SQLITE_OK ){ + if( MEMDB || pagerFlushOnCommit(pPager, bCommit) ){ + sqlite3PcacheCleanAll(pPager->pPCache); + }else{ + sqlite3PcacheClearWritable(pPager->pPCache); + } + sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize); + } + + if( pagerUseWal(pPager) ){ + /* Drop the WAL write-lock, if any. Also, if the connection was in + ** locking_mode=exclusive mode but is no longer, drop the EXCLUSIVE + ** lock held on the database file. + */ + rc2 = sqlite3WalEndWriteTransaction(pPager->pWal); + assert( rc2==SQLITE_OK ); + }else if( rc==SQLITE_OK && bCommit && pPager->dbFileSize>pPager->dbSize ){ + /* This branch is taken when committing a transaction in rollback-journal + ** mode if the database file on disk is larger than the database image. + ** At this point the journal has been finalized and the transaction + ** successfully committed, but the EXCLUSIVE lock is still held on the + ** file. So it is safe to truncate the database file to its minimum + ** required size. */ + assert( pPager->eLock==EXCLUSIVE_LOCK ); + rc = pager_truncate(pPager, pPager->dbSize); + } + + if( rc==SQLITE_OK && bCommit && isOpen(pPager->fd) ){ + rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_COMMIT_PHASETWO, 0); + if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; + } + + if( !pPager->exclusiveMode + && (!pagerUseWal(pPager) || sqlite3WalExclusiveMode(pPager->pWal, 0)) + ){ + rc2 = pagerUnlockDb(pPager, SHARED_LOCK); + pPager->changeCountDone = 0; + } + pPager->eState = PAGER_READER; + pPager->setMaster = 0; + + return (rc==SQLITE_OK?rc2:rc); +} + +/* +** Execute a rollback if a transaction is active and unlock the +** database file. +** +** If the pager has already entered the ERROR state, do not attempt +** the rollback at this time. Instead, pager_unlock() is called. The +** call to pager_unlock() will discard all in-memory pages, unlock +** the database file and move the pager back to OPEN state. If this +** means that there is a hot-journal left in the file-system, the next +** connection to obtain a shared lock on the pager (which may be this one) +** will roll it back. +** +** If the pager has not already entered the ERROR state, but an IO or +** malloc error occurs during a rollback, then this will itself cause +** the pager to enter the ERROR state. Which will be cleared by the +** call to pager_unlock(), as described above. +*/ +static void pagerUnlockAndRollback(Pager *pPager){ + if( pPager->eState!=PAGER_ERROR && pPager->eState!=PAGER_OPEN ){ + assert( assert_pager_state(pPager) ); + if( pPager->eState>=PAGER_WRITER_LOCKED ){ + sqlite3BeginBenignMalloc(); + sqlite3PagerRollback(pPager); + sqlite3EndBenignMalloc(); + }else if( !pPager->exclusiveMode ){ + assert( pPager->eState==PAGER_READER ); + pager_end_transaction(pPager, 0, 0); + } + } + pager_unlock(pPager); +} + +/* +** Parameter aData must point to a buffer of pPager->pageSize bytes +** of data. Compute and return a checksum based ont the contents of the +** page of data and the current value of pPager->cksumInit. +** +** This is not a real checksum. It is really just the sum of the +** random initial value (pPager->cksumInit) and every 200th byte +** of the page data, starting with byte offset (pPager->pageSize%200). +** Each byte is interpreted as an 8-bit unsigned integer. +** +** Changing the formula used to compute this checksum results in an +** incompatible journal file format. +** +** If journal corruption occurs due to a power failure, the most likely +** scenario is that one end or the other of the record will be changed. +** It is much less likely that the two ends of the journal record will be +** correct and the middle be corrupt. Thus, this "checksum" scheme, +** though fast and simple, catches the mostly likely kind of corruption. +*/ +static u32 pager_cksum(Pager *pPager, const u8 *aData){ + u32 cksum = pPager->cksumInit; /* Checksum value to return */ + int i = pPager->pageSize-200; /* Loop counter */ + while( i>0 ){ + cksum += aData[i]; + i -= 200; + } + return cksum; +} + +/* +** Report the current page size and number of reserved bytes back +** to the codec. +*/ +#ifdef SQLITE_HAS_CODEC +static void pagerReportSize(Pager *pPager){ + if( pPager->xCodecSizeChng ){ + pPager->xCodecSizeChng(pPager->pCodec, pPager->pageSize, + (int)pPager->nReserve); + } +} +#else +# define pagerReportSize(X) /* No-op if we do not support a codec */ +#endif + +#ifdef SQLITE_HAS_CODEC +/* +** Make sure the number of reserved bits is the same in the destination +** pager as it is in the source. This comes up when a VACUUM changes the +** number of reserved bits to the "optimal" amount. +*/ +SQLITE_PRIVATE void sqlite3PagerAlignReserve(Pager *pDest, Pager *pSrc){ + if( pDest->nReserve!=pSrc->nReserve ){ + pDest->nReserve = pSrc->nReserve; + pagerReportSize(pDest); + } +} +#endif + +/* +** Read a single page from either the journal file (if isMainJrnl==1) or +** from the sub-journal (if isMainJrnl==0) and playback that page. +** The page begins at offset *pOffset into the file. The *pOffset +** value is increased to the start of the next page in the journal. +** +** The main rollback journal uses checksums - the statement journal does +** not. +** +** If the page number of the page record read from the (sub-)journal file +** is greater than the current value of Pager.dbSize, then playback is +** skipped and SQLITE_OK is returned. +** +** If pDone is not NULL, then it is a record of pages that have already +** been played back. If the page at *pOffset has already been played back +** (if the corresponding pDone bit is set) then skip the playback. +** Make sure the pDone bit corresponding to the *pOffset page is set +** prior to returning. +** +** If the page record is successfully read from the (sub-)journal file +** and played back, then SQLITE_OK is returned. If an IO error occurs +** while reading the record from the (sub-)journal file or while writing +** to the database file, then the IO error code is returned. If data +** is successfully read from the (sub-)journal file but appears to be +** corrupted, SQLITE_DONE is returned. Data is considered corrupted in +** two circumstances: +** +** * If the record page-number is illegal (0 or PAGER_MJ_PGNO), or +** * If the record is being rolled back from the main journal file +** and the checksum field does not match the record content. +** +** Neither of these two scenarios are possible during a savepoint rollback. +** +** If this is a savepoint rollback, then memory may have to be dynamically +** allocated by this function. If this is the case and an allocation fails, +** SQLITE_NOMEM is returned. +*/ +static int pager_playback_one_page( + Pager *pPager, /* The pager being played back */ + i64 *pOffset, /* Offset of record to playback */ + Bitvec *pDone, /* Bitvec of pages already played back */ + int isMainJrnl, /* 1 -> main journal. 0 -> sub-journal. */ + int isSavepnt /* True for a savepoint rollback */ +){ + int rc; + PgHdr *pPg; /* An existing page in the cache */ + Pgno pgno; /* The page number of a page in journal */ + u32 cksum; /* Checksum used for sanity checking */ + char *aData; /* Temporary storage for the page */ + sqlite3_file *jfd; /* The file descriptor for the journal file */ + int isSynced; /* True if journal page is synced */ + + assert( (isMainJrnl&~1)==0 ); /* isMainJrnl is 0 or 1 */ + assert( (isSavepnt&~1)==0 ); /* isSavepnt is 0 or 1 */ + assert( isMainJrnl || pDone ); /* pDone always used on sub-journals */ + assert( isSavepnt || pDone==0 ); /* pDone never used on non-savepoint */ + + aData = pPager->pTmpSpace; + assert( aData ); /* Temp storage must have already been allocated */ + assert( pagerUseWal(pPager)==0 || (!isMainJrnl && isSavepnt) ); + + /* Either the state is greater than PAGER_WRITER_CACHEMOD (a transaction + ** or savepoint rollback done at the request of the caller) or this is + ** a hot-journal rollback. If it is a hot-journal rollback, the pager + ** is in state OPEN and holds an EXCLUSIVE lock. Hot-journal rollback + ** only reads from the main journal, not the sub-journal. + */ + assert( pPager->eState>=PAGER_WRITER_CACHEMOD + || (pPager->eState==PAGER_OPEN && pPager->eLock==EXCLUSIVE_LOCK) + ); + assert( pPager->eState>=PAGER_WRITER_CACHEMOD || isMainJrnl ); + + /* Read the page number and page data from the journal or sub-journal + ** file. Return an error code to the caller if an IO error occurs. + */ + jfd = isMainJrnl ? pPager->jfd : pPager->sjfd; + rc = read32bits(jfd, *pOffset, &pgno); + if( rc!=SQLITE_OK ) return rc; + rc = sqlite3OsRead(jfd, (u8*)aData, pPager->pageSize, (*pOffset)+4); + if( rc!=SQLITE_OK ) return rc; + *pOffset += pPager->pageSize + 4 + isMainJrnl*4; + + /* Sanity checking on the page. This is more important that I originally + ** thought. If a power failure occurs while the journal is being written, + ** it could cause invalid data to be written into the journal. We need to + ** detect this invalid data (with high probability) and ignore it. + */ + if( pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ + assert( !isSavepnt ); + return SQLITE_DONE; + } + if( pgno>(Pgno)pPager->dbSize || sqlite3BitvecTest(pDone, pgno) ){ + return SQLITE_OK; + } + if( isMainJrnl ){ + rc = read32bits(jfd, (*pOffset)-4, &cksum); + if( rc ) return rc; + if( !isSavepnt && pager_cksum(pPager, (u8*)aData)!=cksum ){ + return SQLITE_DONE; + } + } + + /* If this page has already been played back before during the current + ** rollback, then don't bother to play it back again. + */ + if( pDone && (rc = sqlite3BitvecSet(pDone, pgno))!=SQLITE_OK ){ + return rc; + } + + /* When playing back page 1, restore the nReserve setting + */ + if( pgno==1 && pPager->nReserve!=((u8*)aData)[20] ){ + pPager->nReserve = ((u8*)aData)[20]; + pagerReportSize(pPager); + } + + /* If the pager is in CACHEMOD state, then there must be a copy of this + ** page in the pager cache. In this case just update the pager cache, + ** not the database file. The page is left marked dirty in this case. + ** + ** An exception to the above rule: If the database is in no-sync mode + ** and a page is moved during an incremental vacuum then the page may + ** not be in the pager cache. Later: if a malloc() or IO error occurs + ** during a Movepage() call, then the page may not be in the cache + ** either. So the condition described in the above paragraph is not + ** assert()able. + ** + ** If in WRITER_DBMOD, WRITER_FINISHED or OPEN state, then we update the + ** pager cache if it exists and the main file. The page is then marked + ** not dirty. Since this code is only executed in PAGER_OPEN state for + ** a hot-journal rollback, it is guaranteed that the page-cache is empty + ** if the pager is in OPEN state. + ** + ** Ticket #1171: The statement journal might contain page content that is + ** different from the page content at the start of the transaction. + ** This occurs when a page is changed prior to the start of a statement + ** then changed again within the statement. When rolling back such a + ** statement we must not write to the original database unless we know + ** for certain that original page contents are synced into the main rollback + ** journal. Otherwise, a power loss might leave modified data in the + ** database file without an entry in the rollback journal that can + ** restore the database to its original form. Two conditions must be + ** met before writing to the database files. (1) the database must be + ** locked. (2) we know that the original page content is fully synced + ** in the main journal either because the page is not in cache or else + ** the page is marked as needSync==0. + ** + ** 2008-04-14: When attempting to vacuum a corrupt database file, it + ** is possible to fail a statement on a database that does not yet exist. + ** Do not attempt to write if database file has never been opened. + */ + if( pagerUseWal(pPager) ){ + pPg = 0; + }else{ + pPg = sqlite3PagerLookup(pPager, pgno); + } + assert( pPg || !MEMDB ); + assert( pPager->eState!=PAGER_OPEN || pPg==0 || pPager->tempFile ); + PAGERTRACE(("PLAYBACK %d page %d hash(%08x) %s\n", + PAGERID(pPager), pgno, pager_datahash(pPager->pageSize, (u8*)aData), + (isMainJrnl?"main-journal":"sub-journal") + )); + if( isMainJrnl ){ + isSynced = pPager->noSync || (*pOffset <= pPager->journalHdr); + }else{ + isSynced = (pPg==0 || 0==(pPg->flags & PGHDR_NEED_SYNC)); + } + if( isOpen(pPager->fd) + && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN) + && isSynced + ){ + i64 ofst = (pgno-1)*(i64)pPager->pageSize; + testcase( !isSavepnt && pPg!=0 && (pPg->flags&PGHDR_NEED_SYNC)!=0 ); + assert( !pagerUseWal(pPager) ); + rc = sqlite3OsWrite(pPager->fd, (u8 *)aData, pPager->pageSize, ofst); + if( pgno>pPager->dbFileSize ){ + pPager->dbFileSize = pgno; + } + if( pPager->pBackup ){ + CODEC1(pPager, aData, pgno, 3, rc=SQLITE_NOMEM_BKPT); + sqlite3BackupUpdate(pPager->pBackup, pgno, (u8*)aData); + CODEC2(pPager, aData, pgno, 7, rc=SQLITE_NOMEM_BKPT, aData); + } + }else if( !isMainJrnl && pPg==0 ){ + /* If this is a rollback of a savepoint and data was not written to + ** the database and the page is not in-memory, there is a potential + ** problem. When the page is next fetched by the b-tree layer, it + ** will be read from the database file, which may or may not be + ** current. + ** + ** There are a couple of different ways this can happen. All are quite + ** obscure. When running in synchronous mode, this can only happen + ** if the page is on the free-list at the start of the transaction, then + ** populated, then moved using sqlite3PagerMovepage(). + ** + ** The solution is to add an in-memory page to the cache containing + ** the data just read from the sub-journal. Mark the page as dirty + ** and if the pager requires a journal-sync, then mark the page as + ** requiring a journal-sync before it is written. + */ + assert( isSavepnt ); + assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)==0 ); + pPager->doNotSpill |= SPILLFLAG_ROLLBACK; + rc = sqlite3PagerGet(pPager, pgno, &pPg, 1); + assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)!=0 ); + pPager->doNotSpill &= ~SPILLFLAG_ROLLBACK; + if( rc!=SQLITE_OK ) return rc; + sqlite3PcacheMakeDirty(pPg); + } + if( pPg ){ + /* No page should ever be explicitly rolled back that is in use, except + ** for page 1 which is held in use in order to keep the lock on the + ** database active. However such a page may be rolled back as a result + ** of an internal error resulting in an automatic call to + ** sqlite3PagerRollback(). + */ + void *pData; + pData = pPg->pData; + memcpy(pData, (u8*)aData, pPager->pageSize); + pPager->xReiniter(pPg); + /* It used to be that sqlite3PcacheMakeClean(pPg) was called here. But + ** that call was dangerous and had no detectable benefit since the cache + ** is normally cleaned by sqlite3PcacheCleanAll() after rollback and so + ** has been removed. */ + pager_set_pagehash(pPg); + + /* If this was page 1, then restore the value of Pager.dbFileVers. + ** Do this before any decoding. */ + if( pgno==1 ){ + memcpy(&pPager->dbFileVers, &((u8*)pData)[24],sizeof(pPager->dbFileVers)); + } + + /* Decode the page just read from disk */ + CODEC1(pPager, pData, pPg->pgno, 3, rc=SQLITE_NOMEM_BKPT); + sqlite3PcacheRelease(pPg); + } + return rc; +} + +/* +** Parameter zMaster is the name of a master journal file. A single journal +** file that referred to the master journal file has just been rolled back. +** This routine checks if it is possible to delete the master journal file, +** and does so if it is. +** +** Argument zMaster may point to Pager.pTmpSpace. So that buffer is not +** available for use within this function. +** +** When a master journal file is created, it is populated with the names +** of all of its child journals, one after another, formatted as utf-8 +** encoded text. The end of each child journal file is marked with a +** nul-terminator byte (0x00). i.e. the entire contents of a master journal +** file for a transaction involving two databases might be: +** +** "/home/bill/a.db-journal\x00/home/bill/b.db-journal\x00" +** +** A master journal file may only be deleted once all of its child +** journals have been rolled back. +** +** This function reads the contents of the master-journal file into +** memory and loops through each of the child journal names. For +** each child journal, it checks if: +** +** * if the child journal exists, and if so +** * if the child journal contains a reference to master journal +** file zMaster +** +** If a child journal can be found that matches both of the criteria +** above, this function returns without doing anything. Otherwise, if +** no such child journal can be found, file zMaster is deleted from +** the file-system using sqlite3OsDelete(). +** +** If an IO error within this function, an error code is returned. This +** function allocates memory by calling sqlite3Malloc(). If an allocation +** fails, SQLITE_NOMEM is returned. Otherwise, if no IO or malloc errors +** occur, SQLITE_OK is returned. +** +** TODO: This function allocates a single block of memory to load +** the entire contents of the master journal file. This could be +** a couple of kilobytes or so - potentially larger than the page +** size. +*/ +static int pager_delmaster(Pager *pPager, const char *zMaster){ + sqlite3_vfs *pVfs = pPager->pVfs; + int rc; /* Return code */ + sqlite3_file *pMaster; /* Malloc'd master-journal file descriptor */ + sqlite3_file *pJournal; /* Malloc'd child-journal file descriptor */ + char *zMasterJournal = 0; /* Contents of master journal file */ + i64 nMasterJournal; /* Size of master journal file */ + char *zJournal; /* Pointer to one journal within MJ file */ + char *zMasterPtr; /* Space to hold MJ filename from a journal file */ + int nMasterPtr; /* Amount of space allocated to zMasterPtr[] */ + + /* Allocate space for both the pJournal and pMaster file descriptors. + ** If successful, open the master journal file for reading. + */ + pMaster = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile * 2); + pJournal = (sqlite3_file *)(((u8 *)pMaster) + pVfs->szOsFile); + if( !pMaster ){ + rc = SQLITE_NOMEM_BKPT; + }else{ + const int flags = (SQLITE_OPEN_READONLY|SQLITE_OPEN_MASTER_JOURNAL); + rc = sqlite3OsOpen(pVfs, zMaster, pMaster, flags, 0); + } + if( rc!=SQLITE_OK ) goto delmaster_out; + + /* Load the entire master journal file into space obtained from + ** sqlite3_malloc() and pointed to by zMasterJournal. Also obtain + ** sufficient space (in zMasterPtr) to hold the names of master + ** journal files extracted from regular rollback-journals. + */ + rc = sqlite3OsFileSize(pMaster, &nMasterJournal); + if( rc!=SQLITE_OK ) goto delmaster_out; + nMasterPtr = pVfs->mxPathname+1; + zMasterJournal = sqlite3Malloc(nMasterJournal + nMasterPtr + 1); + if( !zMasterJournal ){ + rc = SQLITE_NOMEM_BKPT; + goto delmaster_out; + } + zMasterPtr = &zMasterJournal[nMasterJournal+1]; + rc = sqlite3OsRead(pMaster, zMasterJournal, (int)nMasterJournal, 0); + if( rc!=SQLITE_OK ) goto delmaster_out; + zMasterJournal[nMasterJournal] = 0; + + zJournal = zMasterJournal; + while( (zJournal-zMasterJournal)pageSize bytes). +** If the file on disk is currently larger than nPage pages, then use the VFS +** xTruncate() method to truncate it. +** +** Or, it might be the case that the file on disk is smaller than +** nPage pages. Some operating system implementations can get confused if +** you try to truncate a file to some size that is larger than it +** currently is, so detect this case and write a single zero byte to +** the end of the new file instead. +** +** If successful, return SQLITE_OK. If an IO error occurs while modifying +** the database file, return the error code to the caller. +*/ +static int pager_truncate(Pager *pPager, Pgno nPage){ + int rc = SQLITE_OK; + assert( pPager->eState!=PAGER_ERROR ); + assert( pPager->eState!=PAGER_READER ); + + if( isOpen(pPager->fd) + && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN) + ){ + i64 currentSize, newSize; + int szPage = pPager->pageSize; + assert( pPager->eLock==EXCLUSIVE_LOCK ); + /* TODO: Is it safe to use Pager.dbFileSize here? */ + rc = sqlite3OsFileSize(pPager->fd, ¤tSize); + newSize = szPage*(i64)nPage; + if( rc==SQLITE_OK && currentSize!=newSize ){ + if( currentSize>newSize ){ + rc = sqlite3OsTruncate(pPager->fd, newSize); + }else if( (currentSize+szPage)<=newSize ){ + char *pTmp = pPager->pTmpSpace; + memset(pTmp, 0, szPage); + testcase( (newSize-szPage) == currentSize ); + testcase( (newSize-szPage) > currentSize ); + rc = sqlite3OsWrite(pPager->fd, pTmp, szPage, newSize-szPage); + } + if( rc==SQLITE_OK ){ + pPager->dbFileSize = nPage; + } + } + } + return rc; +} + +/* +** Return a sanitized version of the sector-size of OS file pFile. The +** return value is guaranteed to lie between 32 and MAX_SECTOR_SIZE. +*/ +SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *pFile){ + int iRet = sqlite3OsSectorSize(pFile); + if( iRet<32 ){ + iRet = 512; + }else if( iRet>MAX_SECTOR_SIZE ){ + assert( MAX_SECTOR_SIZE>=512 ); + iRet = MAX_SECTOR_SIZE; + } + return iRet; +} + +/* +** Set the value of the Pager.sectorSize variable for the given +** pager based on the value returned by the xSectorSize method +** of the open database file. The sector size will be used +** to determine the size and alignment of journal header and +** master journal pointers within created journal files. +** +** For temporary files the effective sector size is always 512 bytes. +** +** Otherwise, for non-temporary files, the effective sector size is +** the value returned by the xSectorSize() method rounded up to 32 if +** it is less than 32, or rounded down to MAX_SECTOR_SIZE if it +** is greater than MAX_SECTOR_SIZE. +** +** If the file has the SQLITE_IOCAP_POWERSAFE_OVERWRITE property, then set +** the effective sector size to its minimum value (512). The purpose of +** pPager->sectorSize is to define the "blast radius" of bytes that +** might change if a crash occurs while writing to a single byte in +** that range. But with POWERSAFE_OVERWRITE, the blast radius is zero +** (that is what POWERSAFE_OVERWRITE means), so we minimize the sector +** size. For backwards compatibility of the rollback journal file format, +** we cannot reduce the effective sector size below 512. +*/ +static void setSectorSize(Pager *pPager){ + assert( isOpen(pPager->fd) || pPager->tempFile ); + + if( pPager->tempFile + || (sqlite3OsDeviceCharacteristics(pPager->fd) & + SQLITE_IOCAP_POWERSAFE_OVERWRITE)!=0 + ){ + /* Sector size doesn't matter for temporary files. Also, the file + ** may not have been opened yet, in which case the OsSectorSize() + ** call will segfault. */ + pPager->sectorSize = 512; + }else{ + pPager->sectorSize = sqlite3SectorSize(pPager->fd); + } +} + +/* +** Playback the journal and thus restore the database file to +** the state it was in before we started making changes. +** +** The journal file format is as follows: +** +** (1) 8 byte prefix. A copy of aJournalMagic[]. +** (2) 4 byte big-endian integer which is the number of valid page records +** in the journal. If this value is 0xffffffff, then compute the +** number of page records from the journal size. +** (3) 4 byte big-endian integer which is the initial value for the +** sanity checksum. +** (4) 4 byte integer which is the number of pages to truncate the +** database to during a rollback. +** (5) 4 byte big-endian integer which is the sector size. The header +** is this many bytes in size. +** (6) 4 byte big-endian integer which is the page size. +** (7) zero padding out to the next sector size. +** (8) Zero or more pages instances, each as follows: +** + 4 byte page number. +** + pPager->pageSize bytes of data. +** + 4 byte checksum +** +** When we speak of the journal header, we mean the first 7 items above. +** Each entry in the journal is an instance of the 8th item. +** +** Call the value from the second bullet "nRec". nRec is the number of +** valid page entries in the journal. In most cases, you can compute the +** value of nRec from the size of the journal file. But if a power +** failure occurred while the journal was being written, it could be the +** case that the size of the journal file had already been increased but +** the extra entries had not yet made it safely to disk. In such a case, +** the value of nRec computed from the file size would be too large. For +** that reason, we always use the nRec value in the header. +** +** If the nRec value is 0xffffffff it means that nRec should be computed +** from the file size. This value is used when the user selects the +** no-sync option for the journal. A power failure could lead to corruption +** in this case. But for things like temporary table (which will be +** deleted when the power is restored) we don't care. +** +** If the file opened as the journal file is not a well-formed +** journal file then all pages up to the first corrupted page are rolled +** back (or no pages if the journal header is corrupted). The journal file +** is then deleted and SQLITE_OK returned, just as if no corruption had +** been encountered. +** +** If an I/O or malloc() error occurs, the journal-file is not deleted +** and an error code is returned. +** +** The isHot parameter indicates that we are trying to rollback a journal +** that might be a hot journal. Or, it could be that the journal is +** preserved because of JOURNALMODE_PERSIST or JOURNALMODE_TRUNCATE. +** If the journal really is hot, reset the pager cache prior rolling +** back any content. If the journal is merely persistent, no reset is +** needed. +*/ +static int pager_playback(Pager *pPager, int isHot){ + sqlite3_vfs *pVfs = pPager->pVfs; + i64 szJ; /* Size of the journal file in bytes */ + u32 nRec; /* Number of Records in the journal */ + u32 u; /* Unsigned loop counter */ + Pgno mxPg = 0; /* Size of the original file in pages */ + int rc; /* Result code of a subroutine */ + int res = 1; /* Value returned by sqlite3OsAccess() */ + char *zMaster = 0; /* Name of master journal file if any */ + int needPagerReset; /* True to reset page prior to first page rollback */ + int nPlayback = 0; /* Total number of pages restored from journal */ + + /* Figure out how many records are in the journal. Abort early if + ** the journal is empty. + */ + assert( isOpen(pPager->jfd) ); + rc = sqlite3OsFileSize(pPager->jfd, &szJ); + if( rc!=SQLITE_OK ){ + goto end_playback; + } + + /* Read the master journal name from the journal, if it is present. + ** If a master journal file name is specified, but the file is not + ** present on disk, then the journal is not hot and does not need to be + ** played back. + ** + ** TODO: Technically the following is an error because it assumes that + ** buffer Pager.pTmpSpace is (mxPathname+1) bytes or larger. i.e. that + ** (pPager->pageSize >= pPager->pVfs->mxPathname+1). Using os_unix.c, + ** mxPathname is 512, which is the same as the minimum allowable value + ** for pageSize. + */ + zMaster = pPager->pTmpSpace; + rc = readMasterJournal(pPager->jfd, zMaster, pPager->pVfs->mxPathname+1); + if( rc==SQLITE_OK && zMaster[0] ){ + rc = sqlite3OsAccess(pVfs, zMaster, SQLITE_ACCESS_EXISTS, &res); + } + zMaster = 0; + if( rc!=SQLITE_OK || !res ){ + goto end_playback; + } + pPager->journalOff = 0; + needPagerReset = isHot; + + /* This loop terminates either when a readJournalHdr() or + ** pager_playback_one_page() call returns SQLITE_DONE or an IO error + ** occurs. + */ + while( 1 ){ + /* Read the next journal header from the journal file. If there are + ** not enough bytes left in the journal file for a complete header, or + ** it is corrupted, then a process must have failed while writing it. + ** This indicates nothing more needs to be rolled back. + */ + rc = readJournalHdr(pPager, isHot, szJ, &nRec, &mxPg); + if( rc!=SQLITE_OK ){ + if( rc==SQLITE_DONE ){ + rc = SQLITE_OK; + } + goto end_playback; + } + + /* If nRec is 0xffffffff, then this journal was created by a process + ** working in no-sync mode. This means that the rest of the journal + ** file consists of pages, there are no more journal headers. Compute + ** the value of nRec based on this assumption. + */ + if( nRec==0xffffffff ){ + assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ); + nRec = (int)((szJ - JOURNAL_HDR_SZ(pPager))/JOURNAL_PG_SZ(pPager)); + } + + /* If nRec is 0 and this rollback is of a transaction created by this + ** process and if this is the final header in the journal, then it means + ** that this part of the journal was being filled but has not yet been + ** synced to disk. Compute the number of pages based on the remaining + ** size of the file. + ** + ** The third term of the test was added to fix ticket #2565. + ** When rolling back a hot journal, nRec==0 always means that the next + ** chunk of the journal contains zero pages to be rolled back. But + ** when doing a ROLLBACK and the nRec==0 chunk is the last chunk in + ** the journal, it means that the journal might contain additional + ** pages that need to be rolled back and that the number of pages + ** should be computed based on the journal file size. + */ + if( nRec==0 && !isHot && + pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff ){ + nRec = (int)((szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager)); + } + + /* If this is the first header read from the journal, truncate the + ** database file back to its original size. + */ + if( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ){ + rc = pager_truncate(pPager, mxPg); + if( rc!=SQLITE_OK ){ + goto end_playback; + } + pPager->dbSize = mxPg; + } + + /* Copy original pages out of the journal and back into the + ** database file and/or page cache. + */ + for(u=0; ujournalOff,0,1,0); + if( rc==SQLITE_OK ){ + nPlayback++; + }else{ + if( rc==SQLITE_DONE ){ + pPager->journalOff = szJ; + break; + }else if( rc==SQLITE_IOERR_SHORT_READ ){ + /* If the journal has been truncated, simply stop reading and + ** processing the journal. This might happen if the journal was + ** not completely written and synced prior to a crash. In that + ** case, the database should have never been written in the + ** first place so it is OK to simply abandon the rollback. */ + rc = SQLITE_OK; + goto end_playback; + }else{ + /* If we are unable to rollback, quit and return the error + ** code. This will cause the pager to enter the error state + ** so that no further harm will be done. Perhaps the next + ** process to come along will be able to rollback the database. + */ + goto end_playback; + } + } + } + } + /*NOTREACHED*/ + assert( 0 ); + +end_playback: + /* Following a rollback, the database file should be back in its original + ** state prior to the start of the transaction, so invoke the + ** SQLITE_FCNTL_DB_UNCHANGED file-control method to disable the + ** assertion that the transaction counter was modified. + */ +#ifdef SQLITE_DEBUG + if( pPager->fd->pMethods ){ + sqlite3OsFileControlHint(pPager->fd,SQLITE_FCNTL_DB_UNCHANGED,0); + } +#endif + + /* If this playback is happening automatically as a result of an IO or + ** malloc error that occurred after the change-counter was updated but + ** before the transaction was committed, then the change-counter + ** modification may just have been reverted. If this happens in exclusive + ** mode, then subsequent transactions performed by the connection will not + ** update the change-counter at all. This may lead to cache inconsistency + ** problems for other processes at some point in the future. So, just + ** in case this has happened, clear the changeCountDone flag now. + */ + pPager->changeCountDone = pPager->tempFile; + + if( rc==SQLITE_OK ){ + zMaster = pPager->pTmpSpace; + rc = readMasterJournal(pPager->jfd, zMaster, pPager->pVfs->mxPathname+1); + testcase( rc!=SQLITE_OK ); + } + if( rc==SQLITE_OK + && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN) + ){ + rc = sqlite3PagerSync(pPager, 0); + } + if( rc==SQLITE_OK ){ + rc = pager_end_transaction(pPager, zMaster[0]!='\0', 0); + testcase( rc!=SQLITE_OK ); + } + if( rc==SQLITE_OK && zMaster[0] && res ){ + /* If there was a master journal and this routine will return success, + ** see if it is possible to delete the master journal. + */ + rc = pager_delmaster(pPager, zMaster); + testcase( rc!=SQLITE_OK ); + } + if( isHot && nPlayback ){ + sqlite3_log(SQLITE_NOTICE_RECOVER_ROLLBACK, "recovered %d pages from %s", + nPlayback, pPager->zJournal); + } + + /* The Pager.sectorSize variable may have been updated while rolling + ** back a journal created by a process with a different sector size + ** value. Reset it to the correct value for this process. + */ + setSectorSize(pPager); + return rc; +} + + +/* +** Read the content for page pPg out of the database file and into +** pPg->pData. A shared lock or greater must be held on the database +** file before this function is called. +** +** If page 1 is read, then the value of Pager.dbFileVers[] is set to +** the value read from the database file. +** +** If an IO error occurs, then the IO error is returned to the caller. +** Otherwise, SQLITE_OK is returned. +*/ +static int readDbPage(PgHdr *pPg, u32 iFrame){ + Pager *pPager = pPg->pPager; /* Pager object associated with page pPg */ + Pgno pgno = pPg->pgno; /* Page number to read */ + int rc = SQLITE_OK; /* Return code */ + int pgsz = pPager->pageSize; /* Number of bytes to read */ + + assert( pPager->eState>=PAGER_READER && !MEMDB ); + assert( isOpen(pPager->fd) ); + +#ifndef SQLITE_OMIT_WAL + if( iFrame ){ + /* Try to pull the page from the write-ahead log. */ + rc = sqlite3WalReadFrame(pPager->pWal, iFrame, pgsz, pPg->pData); + }else +#endif + { + i64 iOffset = (pgno-1)*(i64)pPager->pageSize; + rc = sqlite3OsRead(pPager->fd, pPg->pData, pgsz, iOffset); + if( rc==SQLITE_IOERR_SHORT_READ ){ + rc = SQLITE_OK; + } + } + + if( pgno==1 ){ + if( rc ){ + /* If the read is unsuccessful, set the dbFileVers[] to something + ** that will never be a valid file version. dbFileVers[] is a copy + ** of bytes 24..39 of the database. Bytes 28..31 should always be + ** zero or the size of the database in page. Bytes 32..35 and 35..39 + ** should be page numbers which are never 0xffffffff. So filling + ** pPager->dbFileVers[] with all 0xff bytes should suffice. + ** + ** For an encrypted database, the situation is more complex: bytes + ** 24..39 of the database are white noise. But the probability of + ** white noise equaling 16 bytes of 0xff is vanishingly small so + ** we should still be ok. + */ + memset(pPager->dbFileVers, 0xff, sizeof(pPager->dbFileVers)); + }else{ + u8 *dbFileVers = &((u8*)pPg->pData)[24]; + memcpy(&pPager->dbFileVers, dbFileVers, sizeof(pPager->dbFileVers)); + } + } + CODEC1(pPager, pPg->pData, pgno, 3, rc = SQLITE_NOMEM_BKPT); + + PAGER_INCR(sqlite3_pager_readdb_count); + PAGER_INCR(pPager->nRead); + IOTRACE(("PGIN %p %d\n", pPager, pgno)); + PAGERTRACE(("FETCH %d page %d hash(%08x)\n", + PAGERID(pPager), pgno, pager_pagehash(pPg))); + + return rc; +} + +/* +** Update the value of the change-counter at offsets 24 and 92 in +** the header and the sqlite version number at offset 96. +** +** This is an unconditional update. See also the pager_incr_changecounter() +** routine which only updates the change-counter if the update is actually +** needed, as determined by the pPager->changeCountDone state variable. +*/ +static void pager_write_changecounter(PgHdr *pPg){ + u32 change_counter; + + /* Increment the value just read and write it back to byte 24. */ + change_counter = sqlite3Get4byte((u8*)pPg->pPager->dbFileVers)+1; + put32bits(((char*)pPg->pData)+24, change_counter); + + /* Also store the SQLite version number in bytes 96..99 and in + ** bytes 92..95 store the change counter for which the version number + ** is valid. */ + put32bits(((char*)pPg->pData)+92, change_counter); + put32bits(((char*)pPg->pData)+96, SQLITE_VERSION_NUMBER); +} + +#ifndef SQLITE_OMIT_WAL +/* +** This function is invoked once for each page that has already been +** written into the log file when a WAL transaction is rolled back. +** Parameter iPg is the page number of said page. The pCtx argument +** is actually a pointer to the Pager structure. +** +** If page iPg is present in the cache, and has no outstanding references, +** it is discarded. Otherwise, if there are one or more outstanding +** references, the page content is reloaded from the database. If the +** attempt to reload content from the database is required and fails, +** return an SQLite error code. Otherwise, SQLITE_OK. +*/ +static int pagerUndoCallback(void *pCtx, Pgno iPg){ + int rc = SQLITE_OK; + Pager *pPager = (Pager *)pCtx; + PgHdr *pPg; + + assert( pagerUseWal(pPager) ); + pPg = sqlite3PagerLookup(pPager, iPg); + if( pPg ){ + if( sqlite3PcachePageRefcount(pPg)==1 ){ + sqlite3PcacheDrop(pPg); + }else{ + u32 iFrame = 0; + rc = sqlite3WalFindFrame(pPager->pWal, pPg->pgno, &iFrame); + if( rc==SQLITE_OK ){ + rc = readDbPage(pPg, iFrame); + } + if( rc==SQLITE_OK ){ + pPager->xReiniter(pPg); + } + sqlite3PagerUnrefNotNull(pPg); + } + } + + /* Normally, if a transaction is rolled back, any backup processes are + ** updated as data is copied out of the rollback journal and into the + ** database. This is not generally possible with a WAL database, as + ** rollback involves simply truncating the log file. Therefore, if one + ** or more frames have already been written to the log (and therefore + ** also copied into the backup databases) as part of this transaction, + ** the backups must be restarted. + */ + sqlite3BackupRestart(pPager->pBackup); + + return rc; +} + +/* +** This function is called to rollback a transaction on a WAL database. +*/ +static int pagerRollbackWal(Pager *pPager){ + int rc; /* Return Code */ + PgHdr *pList; /* List of dirty pages to revert */ + + /* For all pages in the cache that are currently dirty or have already + ** been written (but not committed) to the log file, do one of the + ** following: + ** + ** + Discard the cached page (if refcount==0), or + ** + Reload page content from the database (if refcount>0). + */ + pPager->dbSize = pPager->dbOrigSize; + rc = sqlite3WalUndo(pPager->pWal, pagerUndoCallback, (void *)pPager); + pList = sqlite3PcacheDirtyList(pPager->pPCache); + while( pList && rc==SQLITE_OK ){ + PgHdr *pNext = pList->pDirty; + rc = pagerUndoCallback((void *)pPager, pList->pgno); + pList = pNext; + } + + return rc; +} + +/* +** This function is a wrapper around sqlite3WalFrames(). As well as logging +** the contents of the list of pages headed by pList (connected by pDirty), +** this function notifies any active backup processes that the pages have +** changed. +** +** The list of pages passed into this routine is always sorted by page number. +** Hence, if page 1 appears anywhere on the list, it will be the first page. +*/ +static int pagerWalFrames( + Pager *pPager, /* Pager object */ + PgHdr *pList, /* List of frames to log */ + Pgno nTruncate, /* Database size after this commit */ + int isCommit /* True if this is a commit */ +){ + int rc; /* Return code */ + int nList; /* Number of pages in pList */ + PgHdr *p; /* For looping over pages */ + + assert( pPager->pWal ); + assert( pList ); +#ifdef SQLITE_DEBUG + /* Verify that the page list is in accending order */ + for(p=pList; p && p->pDirty; p=p->pDirty){ + assert( p->pgno < p->pDirty->pgno ); + } +#endif + + assert( pList->pDirty==0 || isCommit ); + if( isCommit ){ + /* If a WAL transaction is being committed, there is no point in writing + ** any pages with page numbers greater than nTruncate into the WAL file. + ** They will never be read by any client. So remove them from the pDirty + ** list here. */ + PgHdr **ppNext = &pList; + nList = 0; + for(p=pList; (*ppNext = p)!=0; p=p->pDirty){ + if( p->pgno<=nTruncate ){ + ppNext = &p->pDirty; + nList++; + } + } + assert( pList ); + }else{ + nList = 1; + } + pPager->aStat[PAGER_STAT_WRITE] += nList; + + if( pList->pgno==1 ) pager_write_changecounter(pList); + rc = sqlite3WalFrames(pPager->pWal, + pPager->pageSize, pList, nTruncate, isCommit, pPager->walSyncFlags + ); + if( rc==SQLITE_OK && pPager->pBackup ){ + for(p=pList; p; p=p->pDirty){ + sqlite3BackupUpdate(pPager->pBackup, p->pgno, (u8 *)p->pData); + } + } + +#ifdef SQLITE_CHECK_PAGES + pList = sqlite3PcacheDirtyList(pPager->pPCache); + for(p=pList; p; p=p->pDirty){ + pager_set_pagehash(p); + } +#endif + + return rc; +} + +/* +** Begin a read transaction on the WAL. +** +** This routine used to be called "pagerOpenSnapshot()" because it essentially +** makes a snapshot of the database at the current point in time and preserves +** that snapshot for use by the reader in spite of concurrently changes by +** other writers or checkpointers. +*/ +static int pagerBeginReadTransaction(Pager *pPager){ + int rc; /* Return code */ + int changed = 0; /* True if cache must be reset */ + + assert( pagerUseWal(pPager) ); + assert( pPager->eState==PAGER_OPEN || pPager->eState==PAGER_READER ); + + /* sqlite3WalEndReadTransaction() was not called for the previous + ** transaction in locking_mode=EXCLUSIVE. So call it now. If we + ** are in locking_mode=NORMAL and EndRead() was previously called, + ** the duplicate call is harmless. + */ + sqlite3WalEndReadTransaction(pPager->pWal); + + rc = sqlite3WalBeginReadTransaction(pPager->pWal, &changed); + if( rc!=SQLITE_OK || changed ){ + pager_reset(pPager); + if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0); + } + + return rc; +} +#endif + +/* +** This function is called as part of the transition from PAGER_OPEN +** to PAGER_READER state to determine the size of the database file +** in pages (assuming the page size currently stored in Pager.pageSize). +** +** If no error occurs, SQLITE_OK is returned and the size of the database +** in pages is stored in *pnPage. Otherwise, an error code (perhaps +** SQLITE_IOERR_FSTAT) is returned and *pnPage is left unmodified. +*/ +static int pagerPagecount(Pager *pPager, Pgno *pnPage){ + Pgno nPage; /* Value to return via *pnPage */ + + /* Query the WAL sub-system for the database size. The WalDbsize() + ** function returns zero if the WAL is not open (i.e. Pager.pWal==0), or + ** if the database size is not available. The database size is not + ** available from the WAL sub-system if the log file is empty or + ** contains no valid committed transactions. + */ + assert( pPager->eState==PAGER_OPEN ); + assert( pPager->eLock>=SHARED_LOCK ); + assert( isOpen(pPager->fd) ); + assert( pPager->tempFile==0 ); + nPage = sqlite3WalDbsize(pPager->pWal); + + /* If the number of pages in the database is not available from the + ** WAL sub-system, determine the page counte based on the size of + ** the database file. If the size of the database file is not an + ** integer multiple of the page-size, round up the result. + */ + if( nPage==0 && ALWAYS(isOpen(pPager->fd)) ){ + i64 n = 0; /* Size of db file in bytes */ + int rc = sqlite3OsFileSize(pPager->fd, &n); + if( rc!=SQLITE_OK ){ + return rc; + } + nPage = (Pgno)((n+pPager->pageSize-1) / pPager->pageSize); + } + + /* If the current number of pages in the file is greater than the + ** configured maximum pager number, increase the allowed limit so + ** that the file can be read. + */ + if( nPage>pPager->mxPgno ){ + pPager->mxPgno = (Pgno)nPage; + } + + *pnPage = nPage; + return SQLITE_OK; +} + +#ifndef SQLITE_OMIT_WAL +/* +** Check if the *-wal file that corresponds to the database opened by pPager +** exists if the database is not empy, or verify that the *-wal file does +** not exist (by deleting it) if the database file is empty. +** +** If the database is not empty and the *-wal file exists, open the pager +** in WAL mode. If the database is empty or if no *-wal file exists and +** if no error occurs, make sure Pager.journalMode is not set to +** PAGER_JOURNALMODE_WAL. +** +** Return SQLITE_OK or an error code. +** +** The caller must hold a SHARED lock on the database file to call this +** function. Because an EXCLUSIVE lock on the db file is required to delete +** a WAL on a none-empty database, this ensures there is no race condition +** between the xAccess() below and an xDelete() being executed by some +** other connection. +*/ +static int pagerOpenWalIfPresent(Pager *pPager){ + int rc = SQLITE_OK; + assert( pPager->eState==PAGER_OPEN ); + assert( pPager->eLock>=SHARED_LOCK ); + + if( !pPager->tempFile ){ + int isWal; /* True if WAL file exists */ + Pgno nPage; /* Size of the database file */ + + rc = pagerPagecount(pPager, &nPage); + if( rc ) return rc; + if( nPage==0 ){ + rc = sqlite3OsDelete(pPager->pVfs, pPager->zWal, 0); + if( rc==SQLITE_IOERR_DELETE_NOENT ) rc = SQLITE_OK; + isWal = 0; + }else{ + rc = sqlite3OsAccess( + pPager->pVfs, pPager->zWal, SQLITE_ACCESS_EXISTS, &isWal + ); + } + if( rc==SQLITE_OK ){ + if( isWal ){ + testcase( sqlite3PcachePagecount(pPager->pPCache)==0 ); + rc = sqlite3PagerOpenWal(pPager, 0); + }else if( pPager->journalMode==PAGER_JOURNALMODE_WAL ){ + pPager->journalMode = PAGER_JOURNALMODE_DELETE; + } + } + } + return rc; +} +#endif + +/* +** Playback savepoint pSavepoint. Or, if pSavepoint==NULL, then playback +** the entire master journal file. The case pSavepoint==NULL occurs when +** a ROLLBACK TO command is invoked on a SAVEPOINT that is a transaction +** savepoint. +** +** When pSavepoint is not NULL (meaning a non-transaction savepoint is +** being rolled back), then the rollback consists of up to three stages, +** performed in the order specified: +** +** * Pages are played back from the main journal starting at byte +** offset PagerSavepoint.iOffset and continuing to +** PagerSavepoint.iHdrOffset, or to the end of the main journal +** file if PagerSavepoint.iHdrOffset is zero. +** +** * If PagerSavepoint.iHdrOffset is not zero, then pages are played +** back starting from the journal header immediately following +** PagerSavepoint.iHdrOffset to the end of the main journal file. +** +** * Pages are then played back from the sub-journal file, starting +** with the PagerSavepoint.iSubRec and continuing to the end of +** the journal file. +** +** Throughout the rollback process, each time a page is rolled back, the +** corresponding bit is set in a bitvec structure (variable pDone in the +** implementation below). This is used to ensure that a page is only +** rolled back the first time it is encountered in either journal. +** +** If pSavepoint is NULL, then pages are only played back from the main +** journal file. There is no need for a bitvec in this case. +** +** In either case, before playback commences the Pager.dbSize variable +** is reset to the value that it held at the start of the savepoint +** (or transaction). No page with a page-number greater than this value +** is played back. If one is encountered it is simply skipped. +*/ +static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){ + i64 szJ; /* Effective size of the main journal */ + i64 iHdrOff; /* End of first segment of main-journal records */ + int rc = SQLITE_OK; /* Return code */ + Bitvec *pDone = 0; /* Bitvec to ensure pages played back only once */ + + assert( pPager->eState!=PAGER_ERROR ); + assert( pPager->eState>=PAGER_WRITER_LOCKED ); + + /* Allocate a bitvec to use to store the set of pages rolled back */ + if( pSavepoint ){ + pDone = sqlite3BitvecCreate(pSavepoint->nOrig); + if( !pDone ){ + return SQLITE_NOMEM_BKPT; + } + } + + /* Set the database size back to the value it was before the savepoint + ** being reverted was opened. + */ + pPager->dbSize = pSavepoint ? pSavepoint->nOrig : pPager->dbOrigSize; + pPager->changeCountDone = pPager->tempFile; + + if( !pSavepoint && pagerUseWal(pPager) ){ + return pagerRollbackWal(pPager); + } + + /* Use pPager->journalOff as the effective size of the main rollback + ** journal. The actual file might be larger than this in + ** PAGER_JOURNALMODE_TRUNCATE or PAGER_JOURNALMODE_PERSIST. But anything + ** past pPager->journalOff is off-limits to us. + */ + szJ = pPager->journalOff; + assert( pagerUseWal(pPager)==0 || szJ==0 ); + + /* Begin by rolling back records from the main journal starting at + ** PagerSavepoint.iOffset and continuing to the next journal header. + ** There might be records in the main journal that have a page number + ** greater than the current database size (pPager->dbSize) but those + ** will be skipped automatically. Pages are added to pDone as they + ** are played back. + */ + if( pSavepoint && !pagerUseWal(pPager) ){ + iHdrOff = pSavepoint->iHdrOffset ? pSavepoint->iHdrOffset : szJ; + pPager->journalOff = pSavepoint->iOffset; + while( rc==SQLITE_OK && pPager->journalOffjournalOff, pDone, 1, 1); + } + assert( rc!=SQLITE_DONE ); + }else{ + pPager->journalOff = 0; + } + + /* Continue rolling back records out of the main journal starting at + ** the first journal header seen and continuing until the effective end + ** of the main journal file. Continue to skip out-of-range pages and + ** continue adding pages rolled back to pDone. + */ + while( rc==SQLITE_OK && pPager->journalOffjournalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff" + ** test is related to ticket #2565. See the discussion in the + ** pager_playback() function for additional information. + */ + if( nJRec==0 + && pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff + ){ + nJRec = (u32)((szJ - pPager->journalOff)/JOURNAL_PG_SZ(pPager)); + } + for(ii=0; rc==SQLITE_OK && iijournalOffjournalOff, pDone, 1, 1); + } + assert( rc!=SQLITE_DONE ); + } + assert( rc!=SQLITE_OK || pPager->journalOff>=szJ ); + + /* Finally, rollback pages from the sub-journal. Page that were + ** previously rolled back out of the main journal (and are hence in pDone) + ** will be skipped. Out-of-range pages are also skipped. + */ + if( pSavepoint ){ + u32 ii; /* Loop counter */ + i64 offset = (i64)pSavepoint->iSubRec*(4+pPager->pageSize); + + if( pagerUseWal(pPager) ){ + rc = sqlite3WalSavepointUndo(pPager->pWal, pSavepoint->aWalData); + } + for(ii=pSavepoint->iSubRec; rc==SQLITE_OK && iinSubRec; ii++){ + assert( offset==(i64)ii*(4+pPager->pageSize) ); + rc = pager_playback_one_page(pPager, &offset, pDone, 0, 1); + } + assert( rc!=SQLITE_DONE ); + } + + sqlite3BitvecDestroy(pDone); + if( rc==SQLITE_OK ){ + pPager->journalOff = szJ; + } + + return rc; +} + +/* +** Change the maximum number of in-memory pages that are allowed +** before attempting to recycle clean and unused pages. +*/ +SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager *pPager, int mxPage){ + sqlite3PcacheSetCachesize(pPager->pPCache, mxPage); +} + +/* +** Change the maximum number of in-memory pages that are allowed +** before attempting to spill pages to journal. +*/ +SQLITE_PRIVATE int sqlite3PagerSetSpillsize(Pager *pPager, int mxPage){ + return sqlite3PcacheSetSpillsize(pPager->pPCache, mxPage); +} + +/* +** Invoke SQLITE_FCNTL_MMAP_SIZE based on the current value of szMmap. +*/ +static void pagerFixMaplimit(Pager *pPager){ +#if SQLITE_MAX_MMAP_SIZE>0 + sqlite3_file *fd = pPager->fd; + if( isOpen(fd) && fd->pMethods->iVersion>=3 ){ + sqlite3_int64 sz; + sz = pPager->szMmap; + pPager->bUseFetch = (sz>0); + setGetterMethod(pPager); + sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_MMAP_SIZE, &sz); + } +#endif +} + +/* +** Change the maximum size of any memory mapping made of the database file. +*/ +SQLITE_PRIVATE void sqlite3PagerSetMmapLimit(Pager *pPager, sqlite3_int64 szMmap){ + pPager->szMmap = szMmap; + pagerFixMaplimit(pPager); +} + +/* +** Free as much memory as possible from the pager. +*/ +SQLITE_PRIVATE void sqlite3PagerShrink(Pager *pPager){ + sqlite3PcacheShrink(pPager->pPCache); +} + +/* +** Adjust settings of the pager to those specified in the pgFlags parameter. +** +** The "level" in pgFlags & PAGER_SYNCHRONOUS_MASK sets the robustness +** of the database to damage due to OS crashes or power failures by +** changing the number of syncs()s when writing the journals. +** There are four levels: +** +** OFF sqlite3OsSync() is never called. This is the default +** for temporary and transient files. +** +** NORMAL The journal is synced once before writes begin on the +** database. This is normally adequate protection, but +** it is theoretically possible, though very unlikely, +** that an inopertune power failure could leave the journal +** in a state which would cause damage to the database +** when it is rolled back. +** +** FULL The journal is synced twice before writes begin on the +** database (with some additional information - the nRec field +** of the journal header - being written in between the two +** syncs). If we assume that writing a +** single disk sector is atomic, then this mode provides +** assurance that the journal will not be corrupted to the +** point of causing damage to the database during rollback. +** +** EXTRA This is like FULL except that is also syncs the directory +** that contains the rollback journal after the rollback +** journal is unlinked. +** +** The above is for a rollback-journal mode. For WAL mode, OFF continues +** to mean that no syncs ever occur. NORMAL means that the WAL is synced +** prior to the start of checkpoint and that the database file is synced +** at the conclusion of the checkpoint if the entire content of the WAL +** was written back into the database. But no sync operations occur for +** an ordinary commit in NORMAL mode with WAL. FULL means that the WAL +** file is synced following each commit operation, in addition to the +** syncs associated with NORMAL. There is no difference between FULL +** and EXTRA for WAL mode. +** +** Do not confuse synchronous=FULL with SQLITE_SYNC_FULL. The +** SQLITE_SYNC_FULL macro means to use the MacOSX-style full-fsync +** using fcntl(F_FULLFSYNC). SQLITE_SYNC_NORMAL means to do an +** ordinary fsync() call. There is no difference between SQLITE_SYNC_FULL +** and SQLITE_SYNC_NORMAL on platforms other than MacOSX. But the +** synchronous=FULL versus synchronous=NORMAL setting determines when +** the xSync primitive is called and is relevant to all platforms. +** +** Numeric values associated with these states are OFF==1, NORMAL=2, +** and FULL=3. +*/ +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +SQLITE_PRIVATE void sqlite3PagerSetFlags( + Pager *pPager, /* The pager to set safety level for */ + unsigned pgFlags /* Various flags */ +){ + unsigned level = pgFlags & PAGER_SYNCHRONOUS_MASK; + if( pPager->tempFile ){ + pPager->noSync = 1; + pPager->fullSync = 0; + pPager->extraSync = 0; + }else{ + pPager->noSync = level==PAGER_SYNCHRONOUS_OFF ?1:0; + pPager->fullSync = level>=PAGER_SYNCHRONOUS_FULL ?1:0; + pPager->extraSync = level==PAGER_SYNCHRONOUS_EXTRA ?1:0; + } + if( pPager->noSync ){ + pPager->syncFlags = 0; + pPager->ckptSyncFlags = 0; + }else if( pgFlags & PAGER_FULLFSYNC ){ + pPager->syncFlags = SQLITE_SYNC_FULL; + pPager->ckptSyncFlags = SQLITE_SYNC_FULL; + }else if( pgFlags & PAGER_CKPT_FULLFSYNC ){ + pPager->syncFlags = SQLITE_SYNC_NORMAL; + pPager->ckptSyncFlags = SQLITE_SYNC_FULL; + }else{ + pPager->syncFlags = SQLITE_SYNC_NORMAL; + pPager->ckptSyncFlags = SQLITE_SYNC_NORMAL; + } + pPager->walSyncFlags = pPager->syncFlags; + if( pPager->fullSync ){ + pPager->walSyncFlags |= WAL_SYNC_TRANSACTIONS; + } + if( pgFlags & PAGER_CACHESPILL ){ + pPager->doNotSpill &= ~SPILLFLAG_OFF; + }else{ + pPager->doNotSpill |= SPILLFLAG_OFF; + } +} +#endif + +/* +** The following global variable is incremented whenever the library +** attempts to open a temporary file. This information is used for +** testing and analysis only. +*/ +#ifdef SQLITE_TEST +SQLITE_API int sqlite3_opentemp_count = 0; +#endif + +/* +** Open a temporary file. +** +** Write the file descriptor into *pFile. Return SQLITE_OK on success +** or some other error code if we fail. The OS will automatically +** delete the temporary file when it is closed. +** +** The flags passed to the VFS layer xOpen() call are those specified +** by parameter vfsFlags ORed with the following: +** +** SQLITE_OPEN_READWRITE +** SQLITE_OPEN_CREATE +** SQLITE_OPEN_EXCLUSIVE +** SQLITE_OPEN_DELETEONCLOSE +*/ +static int pagerOpentemp( + Pager *pPager, /* The pager object */ + sqlite3_file *pFile, /* Write the file descriptor here */ + int vfsFlags /* Flags passed through to the VFS */ +){ + int rc; /* Return code */ + +#ifdef SQLITE_TEST + sqlite3_opentemp_count++; /* Used for testing and analysis only */ +#endif + + vfsFlags |= SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | + SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE; + rc = sqlite3OsOpen(pPager->pVfs, 0, pFile, vfsFlags, 0); + assert( rc!=SQLITE_OK || isOpen(pFile) ); + return rc; +} + +/* +** Set the busy handler function. +** +** The pager invokes the busy-handler if sqlite3OsLock() returns +** SQLITE_BUSY when trying to upgrade from no-lock to a SHARED lock, +** or when trying to upgrade from a RESERVED lock to an EXCLUSIVE +** lock. It does *not* invoke the busy handler when upgrading from +** SHARED to RESERVED, or when upgrading from SHARED to EXCLUSIVE +** (which occurs during hot-journal rollback). Summary: +** +** Transition | Invokes xBusyHandler +** -------------------------------------------------------- +** NO_LOCK -> SHARED_LOCK | Yes +** SHARED_LOCK -> RESERVED_LOCK | No +** SHARED_LOCK -> EXCLUSIVE_LOCK | No +** RESERVED_LOCK -> EXCLUSIVE_LOCK | Yes +** +** If the busy-handler callback returns non-zero, the lock is +** retried. If it returns zero, then the SQLITE_BUSY error is +** returned to the caller of the pager API function. +*/ +SQLITE_PRIVATE void sqlite3PagerSetBusyhandler( + Pager *pPager, /* Pager object */ + int (*xBusyHandler)(void *), /* Pointer to busy-handler function */ + void *pBusyHandlerArg /* Argument to pass to xBusyHandler */ +){ + pPager->xBusyHandler = xBusyHandler; + pPager->pBusyHandlerArg = pBusyHandlerArg; + + if( isOpen(pPager->fd) ){ + void **ap = (void **)&pPager->xBusyHandler; + assert( ((int(*)(void *))(ap[0]))==xBusyHandler ); + assert( ap[1]==pBusyHandlerArg ); + sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_BUSYHANDLER, (void *)ap); + } +} + +/* +** Change the page size used by the Pager object. The new page size +** is passed in *pPageSize. +** +** If the pager is in the error state when this function is called, it +** is a no-op. The value returned is the error state error code (i.e. +** one of SQLITE_IOERR, an SQLITE_IOERR_xxx sub-code or SQLITE_FULL). +** +** Otherwise, if all of the following are true: +** +** * the new page size (value of *pPageSize) is valid (a power +** of two between 512 and SQLITE_MAX_PAGE_SIZE, inclusive), and +** +** * there are no outstanding page references, and +** +** * the database is either not an in-memory database or it is +** an in-memory database that currently consists of zero pages. +** +** then the pager object page size is set to *pPageSize. +** +** If the page size is changed, then this function uses sqlite3PagerMalloc() +** to obtain a new Pager.pTmpSpace buffer. If this allocation attempt +** fails, SQLITE_NOMEM is returned and the page size remains unchanged. +** In all other cases, SQLITE_OK is returned. +** +** If the page size is not changed, either because one of the enumerated +** conditions above is not true, the pager was in error state when this +** function was called, or because the memory allocation attempt failed, +** then *pPageSize is set to the old, retained page size before returning. +*/ +SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager *pPager, u32 *pPageSize, int nReserve){ + int rc = SQLITE_OK; + + /* It is not possible to do a full assert_pager_state() here, as this + ** function may be called from within PagerOpen(), before the state + ** of the Pager object is internally consistent. + ** + ** At one point this function returned an error if the pager was in + ** PAGER_ERROR state. But since PAGER_ERROR state guarantees that + ** there is at least one outstanding page reference, this function + ** is a no-op for that case anyhow. + */ + + u32 pageSize = *pPageSize; + assert( pageSize==0 || (pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE) ); + if( (pPager->memDb==0 || pPager->dbSize==0) + && sqlite3PcacheRefCount(pPager->pPCache)==0 + && pageSize && pageSize!=(u32)pPager->pageSize + ){ + char *pNew = NULL; /* New temp space */ + i64 nByte = 0; + + if( pPager->eState>PAGER_OPEN && isOpen(pPager->fd) ){ + rc = sqlite3OsFileSize(pPager->fd, &nByte); + } + if( rc==SQLITE_OK ){ + pNew = (char *)sqlite3PageMalloc(pageSize); + if( !pNew ) rc = SQLITE_NOMEM_BKPT; + } + + if( rc==SQLITE_OK ){ + pager_reset(pPager); + rc = sqlite3PcacheSetPageSize(pPager->pPCache, pageSize); + } + if( rc==SQLITE_OK ){ + sqlite3PageFree(pPager->pTmpSpace); + pPager->pTmpSpace = pNew; + pPager->dbSize = (Pgno)((nByte+pageSize-1)/pageSize); + pPager->pageSize = pageSize; + }else{ + sqlite3PageFree(pNew); + } + } + + *pPageSize = pPager->pageSize; + if( rc==SQLITE_OK ){ + if( nReserve<0 ) nReserve = pPager->nReserve; + assert( nReserve>=0 && nReserve<1000 ); + pPager->nReserve = (i16)nReserve; + pagerReportSize(pPager); + pagerFixMaplimit(pPager); + } + return rc; +} + +/* +** Return a pointer to the "temporary page" buffer held internally +** by the pager. This is a buffer that is big enough to hold the +** entire content of a database page. This buffer is used internally +** during rollback and will be overwritten whenever a rollback +** occurs. But other modules are free to use it too, as long as +** no rollbacks are happening. +*/ +SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager *pPager){ + return pPager->pTmpSpace; +} + +/* +** Attempt to set the maximum database page count if mxPage is positive. +** Make no changes if mxPage is zero or negative. And never reduce the +** maximum page count below the current size of the database. +** +** Regardless of mxPage, return the current maximum page count. +*/ +SQLITE_PRIVATE int sqlite3PagerMaxPageCount(Pager *pPager, int mxPage){ + if( mxPage>0 ){ + pPager->mxPgno = mxPage; + } + assert( pPager->eState!=PAGER_OPEN ); /* Called only by OP_MaxPgcnt */ + assert( pPager->mxPgno>=pPager->dbSize ); /* OP_MaxPgcnt enforces this */ + return pPager->mxPgno; +} + +/* +** The following set of routines are used to disable the simulated +** I/O error mechanism. These routines are used to avoid simulated +** errors in places where we do not care about errors. +** +** Unless -DSQLITE_TEST=1 is used, these routines are all no-ops +** and generate no code. +*/ +#ifdef SQLITE_TEST +SQLITE_API extern int sqlite3_io_error_pending; +SQLITE_API extern int sqlite3_io_error_hit; +static int saved_cnt; +void disable_simulated_io_errors(void){ + saved_cnt = sqlite3_io_error_pending; + sqlite3_io_error_pending = -1; +} +void enable_simulated_io_errors(void){ + sqlite3_io_error_pending = saved_cnt; +} +#else +# define disable_simulated_io_errors() +# define enable_simulated_io_errors() +#endif + +/* +** Read the first N bytes from the beginning of the file into memory +** that pDest points to. +** +** If the pager was opened on a transient file (zFilename==""), or +** opened on a file less than N bytes in size, the output buffer is +** zeroed and SQLITE_OK returned. The rationale for this is that this +** function is used to read database headers, and a new transient or +** zero sized database has a header than consists entirely of zeroes. +** +** If any IO error apart from SQLITE_IOERR_SHORT_READ is encountered, +** the error code is returned to the caller and the contents of the +** output buffer undefined. +*/ +SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager *pPager, int N, unsigned char *pDest){ + int rc = SQLITE_OK; + memset(pDest, 0, N); + assert( isOpen(pPager->fd) || pPager->tempFile ); + + /* This routine is only called by btree immediately after creating + ** the Pager object. There has not been an opportunity to transition + ** to WAL mode yet. + */ + assert( !pagerUseWal(pPager) ); + + if( isOpen(pPager->fd) ){ + IOTRACE(("DBHDR %p 0 %d\n", pPager, N)) + rc = sqlite3OsRead(pPager->fd, pDest, N, 0); + if( rc==SQLITE_IOERR_SHORT_READ ){ + rc = SQLITE_OK; + } + } + return rc; +} + +/* +** This function may only be called when a read-transaction is open on +** the pager. It returns the total number of pages in the database. +** +** However, if the file is between 1 and bytes in size, then +** this is considered a 1 page file. +*/ +SQLITE_PRIVATE void sqlite3PagerPagecount(Pager *pPager, int *pnPage){ + assert( pPager->eState>=PAGER_READER ); + assert( pPager->eState!=PAGER_WRITER_FINISHED ); + *pnPage = (int)pPager->dbSize; +} + + +/* +** Try to obtain a lock of type locktype on the database file. If +** a similar or greater lock is already held, this function is a no-op +** (returning SQLITE_OK immediately). +** +** Otherwise, attempt to obtain the lock using sqlite3OsLock(). Invoke +** the busy callback if the lock is currently not available. Repeat +** until the busy callback returns false or until the attempt to +** obtain the lock succeeds. +** +** Return SQLITE_OK on success and an error code if we cannot obtain +** the lock. If the lock is obtained successfully, set the Pager.state +** variable to locktype before returning. +*/ +static int pager_wait_on_lock(Pager *pPager, int locktype){ + int rc; /* Return code */ + + /* Check that this is either a no-op (because the requested lock is + ** already held), or one of the transitions that the busy-handler + ** may be invoked during, according to the comment above + ** sqlite3PagerSetBusyhandler(). + */ + assert( (pPager->eLock>=locktype) + || (pPager->eLock==NO_LOCK && locktype==SHARED_LOCK) + || (pPager->eLock==RESERVED_LOCK && locktype==EXCLUSIVE_LOCK) + ); + + do { + rc = pagerLockDb(pPager, locktype); + }while( rc==SQLITE_BUSY && pPager->xBusyHandler(pPager->pBusyHandlerArg) ); + return rc; +} + +/* +** Function assertTruncateConstraint(pPager) checks that one of the +** following is true for all dirty pages currently in the page-cache: +** +** a) The page number is less than or equal to the size of the +** current database image, in pages, OR +** +** b) if the page content were written at this time, it would not +** be necessary to write the current content out to the sub-journal +** (as determined by function subjRequiresPage()). +** +** If the condition asserted by this function were not true, and the +** dirty page were to be discarded from the cache via the pagerStress() +** routine, pagerStress() would not write the current page content to +** the database file. If a savepoint transaction were rolled back after +** this happened, the correct behavior would be to restore the current +** content of the page. However, since this content is not present in either +** the database file or the portion of the rollback journal and +** sub-journal rolled back the content could not be restored and the +** database image would become corrupt. It is therefore fortunate that +** this circumstance cannot arise. +*/ +#if defined(SQLITE_DEBUG) +static void assertTruncateConstraintCb(PgHdr *pPg){ + assert( pPg->flags&PGHDR_DIRTY ); + assert( !subjRequiresPage(pPg) || pPg->pgno<=pPg->pPager->dbSize ); +} +static void assertTruncateConstraint(Pager *pPager){ + sqlite3PcacheIterateDirty(pPager->pPCache, assertTruncateConstraintCb); +} +#else +# define assertTruncateConstraint(pPager) +#endif + +/* +** Truncate the in-memory database file image to nPage pages. This +** function does not actually modify the database file on disk. It +** just sets the internal state of the pager object so that the +** truncation will be done when the current transaction is committed. +** +** This function is only called right before committing a transaction. +** Once this function has been called, the transaction must either be +** rolled back or committed. It is not safe to call this function and +** then continue writing to the database. +*/ +SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager *pPager, Pgno nPage){ + assert( pPager->dbSize>=nPage ); + assert( pPager->eState>=PAGER_WRITER_CACHEMOD ); + pPager->dbSize = nPage; + + /* At one point the code here called assertTruncateConstraint() to + ** ensure that all pages being truncated away by this operation are, + ** if one or more savepoints are open, present in the savepoint + ** journal so that they can be restored if the savepoint is rolled + ** back. This is no longer necessary as this function is now only + ** called right before committing a transaction. So although the + ** Pager object may still have open savepoints (Pager.nSavepoint!=0), + ** they cannot be rolled back. So the assertTruncateConstraint() call + ** is no longer correct. */ +} + + +/* +** This function is called before attempting a hot-journal rollback. It +** syncs the journal file to disk, then sets pPager->journalHdr to the +** size of the journal file so that the pager_playback() routine knows +** that the entire journal file has been synced. +** +** Syncing a hot-journal to disk before attempting to roll it back ensures +** that if a power-failure occurs during the rollback, the process that +** attempts rollback following system recovery sees the same journal +** content as this process. +** +** If everything goes as planned, SQLITE_OK is returned. Otherwise, +** an SQLite error code. +*/ +static int pagerSyncHotJournal(Pager *pPager){ + int rc = SQLITE_OK; + if( !pPager->noSync ){ + rc = sqlite3OsSync(pPager->jfd, SQLITE_SYNC_NORMAL); + } + if( rc==SQLITE_OK ){ + rc = sqlite3OsFileSize(pPager->jfd, &pPager->journalHdr); + } + return rc; +} + +#if SQLITE_MAX_MMAP_SIZE>0 +/* +** Obtain a reference to a memory mapped page object for page number pgno. +** The new object will use the pointer pData, obtained from xFetch(). +** If successful, set *ppPage to point to the new page reference +** and return SQLITE_OK. Otherwise, return an SQLite error code and set +** *ppPage to zero. +** +** Page references obtained by calling this function should be released +** by calling pagerReleaseMapPage(). +*/ +static int pagerAcquireMapPage( + Pager *pPager, /* Pager object */ + Pgno pgno, /* Page number */ + void *pData, /* xFetch()'d data for this page */ + PgHdr **ppPage /* OUT: Acquired page object */ +){ + PgHdr *p; /* Memory mapped page to return */ + + if( pPager->pMmapFreelist ){ + *ppPage = p = pPager->pMmapFreelist; + pPager->pMmapFreelist = p->pDirty; + p->pDirty = 0; + assert( pPager->nExtra>=8 ); + memset(p->pExtra, 0, 8); + }else{ + *ppPage = p = (PgHdr *)sqlite3MallocZero(sizeof(PgHdr) + pPager->nExtra); + if( p==0 ){ + sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1) * pPager->pageSize, pData); + return SQLITE_NOMEM_BKPT; + } + p->pExtra = (void *)&p[1]; + p->flags = PGHDR_MMAP; + p->nRef = 1; + p->pPager = pPager; + } + + assert( p->pExtra==(void *)&p[1] ); + assert( p->pPage==0 ); + assert( p->flags==PGHDR_MMAP ); + assert( p->pPager==pPager ); + assert( p->nRef==1 ); + + p->pgno = pgno; + p->pData = pData; + pPager->nMmapOut++; + + return SQLITE_OK; +} +#endif + +/* +** Release a reference to page pPg. pPg must have been returned by an +** earlier call to pagerAcquireMapPage(). +*/ +static void pagerReleaseMapPage(PgHdr *pPg){ + Pager *pPager = pPg->pPager; + pPager->nMmapOut--; + pPg->pDirty = pPager->pMmapFreelist; + pPager->pMmapFreelist = pPg; + + assert( pPager->fd->pMethods->iVersion>=3 ); + sqlite3OsUnfetch(pPager->fd, (i64)(pPg->pgno-1)*pPager->pageSize, pPg->pData); +} + +/* +** Free all PgHdr objects stored in the Pager.pMmapFreelist list. +*/ +static void pagerFreeMapHdrs(Pager *pPager){ + PgHdr *p; + PgHdr *pNext; + for(p=pPager->pMmapFreelist; p; p=pNext){ + pNext = p->pDirty; + sqlite3_free(p); + } +} + + +/* +** Shutdown the page cache. Free all memory and close all files. +** +** If a transaction was in progress when this routine is called, that +** transaction is rolled back. All outstanding pages are invalidated +** and their memory is freed. Any attempt to use a page associated +** with this page cache after this function returns will likely +** result in a coredump. +** +** This function always succeeds. If a transaction is active an attempt +** is made to roll it back. If an error occurs during the rollback +** a hot journal may be left in the filesystem but no error is returned +** to the caller. +*/ +SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager, sqlite3 *db){ + u8 *pTmp = (u8 *)pPager->pTmpSpace; + + assert( db || pagerUseWal(pPager)==0 ); + assert( assert_pager_state(pPager) ); + disable_simulated_io_errors(); + sqlite3BeginBenignMalloc(); + pagerFreeMapHdrs(pPager); + /* pPager->errCode = 0; */ + pPager->exclusiveMode = 0; +#ifndef SQLITE_OMIT_WAL + assert( db || pPager->pWal==0 ); + sqlite3WalClose(pPager->pWal, db, pPager->ckptSyncFlags, pPager->pageSize, + (db && (db->flags & SQLITE_NoCkptOnClose) ? 0 : pTmp) + ); + pPager->pWal = 0; +#endif + pager_reset(pPager); + if( MEMDB ){ + pager_unlock(pPager); + }else{ + /* If it is open, sync the journal file before calling UnlockAndRollback. + ** If this is not done, then an unsynced portion of the open journal + ** file may be played back into the database. If a power failure occurs + ** while this is happening, the database could become corrupt. + ** + ** If an error occurs while trying to sync the journal, shift the pager + ** into the ERROR state. This causes UnlockAndRollback to unlock the + ** database and close the journal file without attempting to roll it + ** back or finalize it. The next database user will have to do hot-journal + ** rollback before accessing the database file. + */ + if( isOpen(pPager->jfd) ){ + pager_error(pPager, pagerSyncHotJournal(pPager)); + } + pagerUnlockAndRollback(pPager); + } + sqlite3EndBenignMalloc(); + enable_simulated_io_errors(); + PAGERTRACE(("CLOSE %d\n", PAGERID(pPager))); + IOTRACE(("CLOSE %p\n", pPager)) + sqlite3OsClose(pPager->jfd); + sqlite3OsClose(pPager->fd); + sqlite3PageFree(pTmp); + sqlite3PcacheClose(pPager->pPCache); + +#ifdef SQLITE_HAS_CODEC + if( pPager->xCodecFree ) pPager->xCodecFree(pPager->pCodec); +#endif + + assert( !pPager->aSavepoint && !pPager->pInJournal ); + assert( !isOpen(pPager->jfd) && !isOpen(pPager->sjfd) ); + + sqlite3_free(pPager); + return SQLITE_OK; +} + +#if !defined(NDEBUG) || defined(SQLITE_TEST) +/* +** Return the page number for page pPg. +*/ +SQLITE_PRIVATE Pgno sqlite3PagerPagenumber(DbPage *pPg){ + return pPg->pgno; +} +#endif + +/* +** Increment the reference count for page pPg. +*/ +SQLITE_PRIVATE void sqlite3PagerRef(DbPage *pPg){ + sqlite3PcacheRef(pPg); +} + +/* +** Sync the journal. In other words, make sure all the pages that have +** been written to the journal have actually reached the surface of the +** disk and can be restored in the event of a hot-journal rollback. +** +** If the Pager.noSync flag is set, then this function is a no-op. +** Otherwise, the actions required depend on the journal-mode and the +** device characteristics of the file-system, as follows: +** +** * If the journal file is an in-memory journal file, no action need +** be taken. +** +** * Otherwise, if the device does not support the SAFE_APPEND property, +** then the nRec field of the most recently written journal header +** is updated to contain the number of journal records that have +** been written following it. If the pager is operating in full-sync +** mode, then the journal file is synced before this field is updated. +** +** * If the device does not support the SEQUENTIAL property, then +** journal file is synced. +** +** Or, in pseudo-code: +** +** if( NOT ){ +** if( NOT SAFE_APPEND ){ +** if( ) xSync(); +** +** } +** if( NOT SEQUENTIAL ) xSync(); +** } +** +** If successful, this routine clears the PGHDR_NEED_SYNC flag of every +** page currently held in memory before returning SQLITE_OK. If an IO +** error is encountered, then the IO error code is returned to the caller. +*/ +static int syncJournal(Pager *pPager, int newHdr){ + int rc; /* Return code */ + + assert( pPager->eState==PAGER_WRITER_CACHEMOD + || pPager->eState==PAGER_WRITER_DBMOD + ); + assert( assert_pager_state(pPager) ); + assert( !pagerUseWal(pPager) ); + + rc = sqlite3PagerExclusiveLock(pPager); + if( rc!=SQLITE_OK ) return rc; + + if( !pPager->noSync ){ + assert( !pPager->tempFile ); + if( isOpen(pPager->jfd) && pPager->journalMode!=PAGER_JOURNALMODE_MEMORY ){ + const int iDc = sqlite3OsDeviceCharacteristics(pPager->fd); + assert( isOpen(pPager->jfd) ); + + if( 0==(iDc&SQLITE_IOCAP_SAFE_APPEND) ){ + /* This block deals with an obscure problem. If the last connection + ** that wrote to this database was operating in persistent-journal + ** mode, then the journal file may at this point actually be larger + ** than Pager.journalOff bytes. If the next thing in the journal + ** file happens to be a journal-header (written as part of the + ** previous connection's transaction), and a crash or power-failure + ** occurs after nRec is updated but before this connection writes + ** anything else to the journal file (or commits/rolls back its + ** transaction), then SQLite may become confused when doing the + ** hot-journal rollback following recovery. It may roll back all + ** of this connections data, then proceed to rolling back the old, + ** out-of-date data that follows it. Database corruption. + ** + ** To work around this, if the journal file does appear to contain + ** a valid header following Pager.journalOff, then write a 0x00 + ** byte to the start of it to prevent it from being recognized. + ** + ** Variable iNextHdrOffset is set to the offset at which this + ** problematic header will occur, if it exists. aMagic is used + ** as a temporary buffer to inspect the first couple of bytes of + ** the potential journal header. + */ + i64 iNextHdrOffset; + u8 aMagic[8]; + u8 zHeader[sizeof(aJournalMagic)+4]; + + memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); + put32bits(&zHeader[sizeof(aJournalMagic)], pPager->nRec); + + iNextHdrOffset = journalHdrOffset(pPager); + rc = sqlite3OsRead(pPager->jfd, aMagic, 8, iNextHdrOffset); + if( rc==SQLITE_OK && 0==memcmp(aMagic, aJournalMagic, 8) ){ + static const u8 zerobyte = 0; + rc = sqlite3OsWrite(pPager->jfd, &zerobyte, 1, iNextHdrOffset); + } + if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){ + return rc; + } + + /* Write the nRec value into the journal file header. If in + ** full-synchronous mode, sync the journal first. This ensures that + ** all data has really hit the disk before nRec is updated to mark + ** it as a candidate for rollback. + ** + ** This is not required if the persistent media supports the + ** SAFE_APPEND property. Because in this case it is not possible + ** for garbage data to be appended to the file, the nRec field + ** is populated with 0xFFFFFFFF when the journal header is written + ** and never needs to be updated. + */ + if( pPager->fullSync && 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){ + PAGERTRACE(("SYNC journal of %d\n", PAGERID(pPager))); + IOTRACE(("JSYNC %p\n", pPager)) + rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags); + if( rc!=SQLITE_OK ) return rc; + } + IOTRACE(("JHDR %p %lld\n", pPager, pPager->journalHdr)); + rc = sqlite3OsWrite( + pPager->jfd, zHeader, sizeof(zHeader), pPager->journalHdr + ); + if( rc!=SQLITE_OK ) return rc; + } + if( 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){ + PAGERTRACE(("SYNC journal of %d\n", PAGERID(pPager))); + IOTRACE(("JSYNC %p\n", pPager)) + rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags| + (pPager->syncFlags==SQLITE_SYNC_FULL?SQLITE_SYNC_DATAONLY:0) + ); + if( rc!=SQLITE_OK ) return rc; + } + + pPager->journalHdr = pPager->journalOff; + if( newHdr && 0==(iDc&SQLITE_IOCAP_SAFE_APPEND) ){ + pPager->nRec = 0; + rc = writeJournalHdr(pPager); + if( rc!=SQLITE_OK ) return rc; + } + }else{ + pPager->journalHdr = pPager->journalOff; + } + } + + /* Unless the pager is in noSync mode, the journal file was just + ** successfully synced. Either way, clear the PGHDR_NEED_SYNC flag on + ** all pages. + */ + sqlite3PcacheClearSyncFlags(pPager->pPCache); + pPager->eState = PAGER_WRITER_DBMOD; + assert( assert_pager_state(pPager) ); + return SQLITE_OK; +} + +/* +** The argument is the first in a linked list of dirty pages connected +** by the PgHdr.pDirty pointer. This function writes each one of the +** in-memory pages in the list to the database file. The argument may +** be NULL, representing an empty list. In this case this function is +** a no-op. +** +** The pager must hold at least a RESERVED lock when this function +** is called. Before writing anything to the database file, this lock +** is upgraded to an EXCLUSIVE lock. If the lock cannot be obtained, +** SQLITE_BUSY is returned and no data is written to the database file. +** +** If the pager is a temp-file pager and the actual file-system file +** is not yet open, it is created and opened before any data is +** written out. +** +** Once the lock has been upgraded and, if necessary, the file opened, +** the pages are written out to the database file in list order. Writing +** a page is skipped if it meets either of the following criteria: +** +** * The page number is greater than Pager.dbSize, or +** * The PGHDR_DONT_WRITE flag is set on the page. +** +** If writing out a page causes the database file to grow, Pager.dbFileSize +** is updated accordingly. If page 1 is written out, then the value cached +** in Pager.dbFileVers[] is updated to match the new value stored in +** the database file. +** +** If everything is successful, SQLITE_OK is returned. If an IO error +** occurs, an IO error code is returned. Or, if the EXCLUSIVE lock cannot +** be obtained, SQLITE_BUSY is returned. +*/ +static int pager_write_pagelist(Pager *pPager, PgHdr *pList){ + int rc = SQLITE_OK; /* Return code */ + + /* This function is only called for rollback pagers in WRITER_DBMOD state. */ + assert( !pagerUseWal(pPager) ); + assert( pPager->tempFile || pPager->eState==PAGER_WRITER_DBMOD ); + assert( pPager->eLock==EXCLUSIVE_LOCK ); + assert( isOpen(pPager->fd) || pList->pDirty==0 ); + + /* If the file is a temp-file has not yet been opened, open it now. It + ** is not possible for rc to be other than SQLITE_OK if this branch + ** is taken, as pager_wait_on_lock() is a no-op for temp-files. + */ + if( !isOpen(pPager->fd) ){ + assert( pPager->tempFile && rc==SQLITE_OK ); + rc = pagerOpentemp(pPager, pPager->fd, pPager->vfsFlags); + } + + /* Before the first write, give the VFS a hint of what the final + ** file size will be. + */ + assert( rc!=SQLITE_OK || isOpen(pPager->fd) ); + if( rc==SQLITE_OK + && pPager->dbHintSizedbSize + && (pList->pDirty || pList->pgno>pPager->dbHintSize) + ){ + sqlite3_int64 szFile = pPager->pageSize * (sqlite3_int64)pPager->dbSize; + sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_SIZE_HINT, &szFile); + pPager->dbHintSize = pPager->dbSize; + } + + while( rc==SQLITE_OK && pList ){ + Pgno pgno = pList->pgno; + + /* If there are dirty pages in the page cache with page numbers greater + ** than Pager.dbSize, this means sqlite3PagerTruncateImage() was called to + ** make the file smaller (presumably by auto-vacuum code). Do not write + ** any such pages to the file. + ** + ** Also, do not write out any page that has the PGHDR_DONT_WRITE flag + ** set (set by sqlite3PagerDontWrite()). + */ + if( pgno<=pPager->dbSize && 0==(pList->flags&PGHDR_DONT_WRITE) ){ + i64 offset = (pgno-1)*(i64)pPager->pageSize; /* Offset to write */ + char *pData; /* Data to write */ + + assert( (pList->flags&PGHDR_NEED_SYNC)==0 ); + if( pList->pgno==1 ) pager_write_changecounter(pList); + + /* Encode the database */ + CODEC2(pPager, pList->pData, pgno, 6, return SQLITE_NOMEM_BKPT, pData); + + /* Write out the page data. */ + rc = sqlite3OsWrite(pPager->fd, pData, pPager->pageSize, offset); + + /* If page 1 was just written, update Pager.dbFileVers to match + ** the value now stored in the database file. If writing this + ** page caused the database file to grow, update dbFileSize. + */ + if( pgno==1 ){ + memcpy(&pPager->dbFileVers, &pData[24], sizeof(pPager->dbFileVers)); + } + if( pgno>pPager->dbFileSize ){ + pPager->dbFileSize = pgno; + } + pPager->aStat[PAGER_STAT_WRITE]++; + + /* Update any backup objects copying the contents of this pager. */ + sqlite3BackupUpdate(pPager->pBackup, pgno, (u8*)pList->pData); + + PAGERTRACE(("STORE %d page %d hash(%08x)\n", + PAGERID(pPager), pgno, pager_pagehash(pList))); + IOTRACE(("PGOUT %p %d\n", pPager, pgno)); + PAGER_INCR(sqlite3_pager_writedb_count); + }else{ + PAGERTRACE(("NOSTORE %d page %d\n", PAGERID(pPager), pgno)); + } + pager_set_pagehash(pList); + pList = pList->pDirty; + } + + return rc; +} + +/* +** Ensure that the sub-journal file is open. If it is already open, this +** function is a no-op. +** +** SQLITE_OK is returned if everything goes according to plan. An +** SQLITE_IOERR_XXX error code is returned if a call to sqlite3OsOpen() +** fails. +*/ +static int openSubJournal(Pager *pPager){ + int rc = SQLITE_OK; + if( !isOpen(pPager->sjfd) ){ + const int flags = SQLITE_OPEN_SUBJOURNAL | SQLITE_OPEN_READWRITE + | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE + | SQLITE_OPEN_DELETEONCLOSE; + int nStmtSpill = sqlite3Config.nStmtSpill; + if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY || pPager->subjInMemory ){ + nStmtSpill = -1; + } + rc = sqlite3JournalOpen(pPager->pVfs, 0, pPager->sjfd, flags, nStmtSpill); + } + return rc; +} + +/* +** Append a record of the current state of page pPg to the sub-journal. +** +** If successful, set the bit corresponding to pPg->pgno in the bitvecs +** for all open savepoints before returning. +** +** This function returns SQLITE_OK if everything is successful, an IO +** error code if the attempt to write to the sub-journal fails, or +** SQLITE_NOMEM if a malloc fails while setting a bit in a savepoint +** bitvec. +*/ +static int subjournalPage(PgHdr *pPg){ + int rc = SQLITE_OK; + Pager *pPager = pPg->pPager; + if( pPager->journalMode!=PAGER_JOURNALMODE_OFF ){ + + /* Open the sub-journal, if it has not already been opened */ + assert( pPager->useJournal ); + assert( isOpen(pPager->jfd) || pagerUseWal(pPager) ); + assert( isOpen(pPager->sjfd) || pPager->nSubRec==0 ); + assert( pagerUseWal(pPager) + || pageInJournal(pPager, pPg) + || pPg->pgno>pPager->dbOrigSize + ); + rc = openSubJournal(pPager); + + /* If the sub-journal was opened successfully (or was already open), + ** write the journal record into the file. */ + if( rc==SQLITE_OK ){ + void *pData = pPg->pData; + i64 offset = (i64)pPager->nSubRec*(4+pPager->pageSize); + char *pData2; + + CODEC2(pPager, pData, pPg->pgno, 7, return SQLITE_NOMEM_BKPT, pData2); + PAGERTRACE(("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno)); + rc = write32bits(pPager->sjfd, offset, pPg->pgno); + if( rc==SQLITE_OK ){ + rc = sqlite3OsWrite(pPager->sjfd, pData2, pPager->pageSize, offset+4); + } + } + } + if( rc==SQLITE_OK ){ + pPager->nSubRec++; + assert( pPager->nSavepoint>0 ); + rc = addToSavepointBitvecs(pPager, pPg->pgno); + } + return rc; +} +static int subjournalPageIfRequired(PgHdr *pPg){ + if( subjRequiresPage(pPg) ){ + return subjournalPage(pPg); + }else{ + return SQLITE_OK; + } +} + +/* +** This function is called by the pcache layer when it has reached some +** soft memory limit. The first argument is a pointer to a Pager object +** (cast as a void*). The pager is always 'purgeable' (not an in-memory +** database). The second argument is a reference to a page that is +** currently dirty but has no outstanding references. The page +** is always associated with the Pager object passed as the first +** argument. +** +** The job of this function is to make pPg clean by writing its contents +** out to the database file, if possible. This may involve syncing the +** journal file. +** +** If successful, sqlite3PcacheMakeClean() is called on the page and +** SQLITE_OK returned. If an IO error occurs while trying to make the +** page clean, the IO error code is returned. If the page cannot be +** made clean for some other reason, but no error occurs, then SQLITE_OK +** is returned by sqlite3PcacheMakeClean() is not called. +*/ +static int pagerStress(void *p, PgHdr *pPg){ + Pager *pPager = (Pager *)p; + int rc = SQLITE_OK; + + assert( pPg->pPager==pPager ); + assert( pPg->flags&PGHDR_DIRTY ); + + /* The doNotSpill NOSYNC bit is set during times when doing a sync of + ** journal (and adding a new header) is not allowed. This occurs + ** during calls to sqlite3PagerWrite() while trying to journal multiple + ** pages belonging to the same sector. + ** + ** The doNotSpill ROLLBACK and OFF bits inhibits all cache spilling + ** regardless of whether or not a sync is required. This is set during + ** a rollback or by user request, respectively. + ** + ** Spilling is also prohibited when in an error state since that could + ** lead to database corruption. In the current implementation it + ** is impossible for sqlite3PcacheFetch() to be called with createFlag==3 + ** while in the error state, hence it is impossible for this routine to + ** be called in the error state. Nevertheless, we include a NEVER() + ** test for the error state as a safeguard against future changes. + */ + if( NEVER(pPager->errCode) ) return SQLITE_OK; + testcase( pPager->doNotSpill & SPILLFLAG_ROLLBACK ); + testcase( pPager->doNotSpill & SPILLFLAG_OFF ); + testcase( pPager->doNotSpill & SPILLFLAG_NOSYNC ); + if( pPager->doNotSpill + && ((pPager->doNotSpill & (SPILLFLAG_ROLLBACK|SPILLFLAG_OFF))!=0 + || (pPg->flags & PGHDR_NEED_SYNC)!=0) + ){ + return SQLITE_OK; + } + + pPg->pDirty = 0; + if( pagerUseWal(pPager) ){ + /* Write a single frame for this page to the log. */ + rc = subjournalPageIfRequired(pPg); + if( rc==SQLITE_OK ){ + rc = pagerWalFrames(pPager, pPg, 0, 0); + } + }else{ + + /* Sync the journal file if required. */ + if( pPg->flags&PGHDR_NEED_SYNC + || pPager->eState==PAGER_WRITER_CACHEMOD + ){ + rc = syncJournal(pPager, 1); + } + + /* Write the contents of the page out to the database file. */ + if( rc==SQLITE_OK ){ + assert( (pPg->flags&PGHDR_NEED_SYNC)==0 ); + rc = pager_write_pagelist(pPager, pPg); + } + } + + /* Mark the page as clean. */ + if( rc==SQLITE_OK ){ + PAGERTRACE(("STRESS %d page %d\n", PAGERID(pPager), pPg->pgno)); + sqlite3PcacheMakeClean(pPg); + } + + return pager_error(pPager, rc); +} + +/* +** Flush all unreferenced dirty pages to disk. +*/ +SQLITE_PRIVATE int sqlite3PagerFlush(Pager *pPager){ + int rc = pPager->errCode; + if( !MEMDB ){ + PgHdr *pList = sqlite3PcacheDirtyList(pPager->pPCache); + assert( assert_pager_state(pPager) ); + while( rc==SQLITE_OK && pList ){ + PgHdr *pNext = pList->pDirty; + if( pList->nRef==0 ){ + rc = pagerStress((void*)pPager, pList); + } + pList = pNext; + } + } + + return rc; +} + +/* +** Allocate and initialize a new Pager object and put a pointer to it +** in *ppPager. The pager should eventually be freed by passing it +** to sqlite3PagerClose(). +** +** The zFilename argument is the path to the database file to open. +** If zFilename is NULL then a randomly-named temporary file is created +** and used as the file to be cached. Temporary files are be deleted +** automatically when they are closed. If zFilename is ":memory:" then +** all information is held in cache. It is never written to disk. +** This can be used to implement an in-memory database. +** +** The nExtra parameter specifies the number of bytes of space allocated +** along with each page reference. This space is available to the user +** via the sqlite3PagerGetExtra() API. When a new page is allocated, the +** first 8 bytes of this space are zeroed but the remainder is uninitialized. +** (The extra space is used by btree as the MemPage object.) +** +** The flags argument is used to specify properties that affect the +** operation of the pager. It should be passed some bitwise combination +** of the PAGER_* flags. +** +** The vfsFlags parameter is a bitmask to pass to the flags parameter +** of the xOpen() method of the supplied VFS when opening files. +** +** If the pager object is allocated and the specified file opened +** successfully, SQLITE_OK is returned and *ppPager set to point to +** the new pager object. If an error occurs, *ppPager is set to NULL +** and error code returned. This function may return SQLITE_NOMEM +** (sqlite3Malloc() is used to allocate memory), SQLITE_CANTOPEN or +** various SQLITE_IO_XXX errors. +*/ +SQLITE_PRIVATE int sqlite3PagerOpen( + sqlite3_vfs *pVfs, /* The virtual file system to use */ + Pager **ppPager, /* OUT: Return the Pager structure here */ + const char *zFilename, /* Name of the database file to open */ + int nExtra, /* Extra bytes append to each in-memory page */ + int flags, /* flags controlling this file */ + int vfsFlags, /* flags passed through to sqlite3_vfs.xOpen() */ + void (*xReinit)(DbPage*) /* Function to reinitialize pages */ +){ + u8 *pPtr; + Pager *pPager = 0; /* Pager object to allocate and return */ + int rc = SQLITE_OK; /* Return code */ + int tempFile = 0; /* True for temp files (incl. in-memory files) */ + int memDb = 0; /* True if this is an in-memory file */ + int readOnly = 0; /* True if this is a read-only file */ + int journalFileSize; /* Bytes to allocate for each journal fd */ + char *zPathname = 0; /* Full path to database file */ + int nPathname = 0; /* Number of bytes in zPathname */ + int useJournal = (flags & PAGER_OMIT_JOURNAL)==0; /* False to omit journal */ + int pcacheSize = sqlite3PcacheSize(); /* Bytes to allocate for PCache */ + u32 szPageDflt = SQLITE_DEFAULT_PAGE_SIZE; /* Default page size */ + const char *zUri = 0; /* URI args to copy */ + int nUri = 0; /* Number of bytes of URI args at *zUri */ + + /* Figure out how much space is required for each journal file-handle + ** (there are two of them, the main journal and the sub-journal). */ + journalFileSize = ROUND8(sqlite3JournalSize(pVfs)); + + /* Set the output variable to NULL in case an error occurs. */ + *ppPager = 0; + +#ifndef SQLITE_OMIT_MEMORYDB + if( flags & PAGER_MEMORY ){ + memDb = 1; + if( zFilename && zFilename[0] ){ + zPathname = sqlite3DbStrDup(0, zFilename); + if( zPathname==0 ) return SQLITE_NOMEM_BKPT; + nPathname = sqlite3Strlen30(zPathname); + zFilename = 0; + } + } +#endif + + /* Compute and store the full pathname in an allocated buffer pointed + ** to by zPathname, length nPathname. Or, if this is a temporary file, + ** leave both nPathname and zPathname set to 0. + */ + if( zFilename && zFilename[0] ){ + const char *z; + nPathname = pVfs->mxPathname+1; + zPathname = sqlite3DbMallocRaw(0, nPathname*2); + if( zPathname==0 ){ + return SQLITE_NOMEM_BKPT; + } + zPathname[0] = 0; /* Make sure initialized even if FullPathname() fails */ + rc = sqlite3OsFullPathname(pVfs, zFilename, nPathname, zPathname); + nPathname = sqlite3Strlen30(zPathname); + z = zUri = &zFilename[sqlite3Strlen30(zFilename)+1]; + while( *z ){ + z += sqlite3Strlen30(z)+1; + z += sqlite3Strlen30(z)+1; + } + nUri = (int)(&z[1] - zUri); + assert( nUri>=0 ); + if( rc==SQLITE_OK && nPathname+8>pVfs->mxPathname ){ + /* This branch is taken when the journal path required by + ** the database being opened will be more than pVfs->mxPathname + ** bytes in length. This means the database cannot be opened, + ** as it will not be possible to open the journal file or even + ** check for a hot-journal before reading. + */ + rc = SQLITE_CANTOPEN_BKPT; + } + if( rc!=SQLITE_OK ){ + sqlite3DbFree(0, zPathname); + return rc; + } + } + + /* Allocate memory for the Pager structure, PCache object, the + ** three file descriptors, the database file name and the journal + ** file name. The layout in memory is as follows: + ** + ** Pager object (sizeof(Pager) bytes) + ** PCache object (sqlite3PcacheSize() bytes) + ** Database file handle (pVfs->szOsFile bytes) + ** Sub-journal file handle (journalFileSize bytes) + ** Main journal file handle (journalFileSize bytes) + ** Database file name (nPathname+1 bytes) + ** Journal file name (nPathname+8+1 bytes) + */ + pPtr = (u8 *)sqlite3MallocZero( + ROUND8(sizeof(*pPager)) + /* Pager structure */ + ROUND8(pcacheSize) + /* PCache object */ + ROUND8(pVfs->szOsFile) + /* The main db file */ + journalFileSize * 2 + /* The two journal files */ + nPathname + 1 + nUri + /* zFilename */ + nPathname + 8 + 2 /* zJournal */ +#ifndef SQLITE_OMIT_WAL + + nPathname + 4 + 2 /* zWal */ +#endif + ); + assert( EIGHT_BYTE_ALIGNMENT(SQLITE_INT_TO_PTR(journalFileSize)) ); + if( !pPtr ){ + sqlite3DbFree(0, zPathname); + return SQLITE_NOMEM_BKPT; + } + pPager = (Pager*)(pPtr); + pPager->pPCache = (PCache*)(pPtr += ROUND8(sizeof(*pPager))); + pPager->fd = (sqlite3_file*)(pPtr += ROUND8(pcacheSize)); + pPager->sjfd = (sqlite3_file*)(pPtr += ROUND8(pVfs->szOsFile)); + pPager->jfd = (sqlite3_file*)(pPtr += journalFileSize); + pPager->zFilename = (char*)(pPtr += journalFileSize); + assert( EIGHT_BYTE_ALIGNMENT(pPager->jfd) ); + + /* Fill in the Pager.zFilename and Pager.zJournal buffers, if required. */ + if( zPathname ){ + assert( nPathname>0 ); + pPager->zJournal = (char*)(pPtr += nPathname + 1 + nUri); + memcpy(pPager->zFilename, zPathname, nPathname); + if( nUri ) memcpy(&pPager->zFilename[nPathname+1], zUri, nUri); + memcpy(pPager->zJournal, zPathname, nPathname); + memcpy(&pPager->zJournal[nPathname], "-journal\000", 8+2); + sqlite3FileSuffix3(pPager->zFilename, pPager->zJournal); +#ifndef SQLITE_OMIT_WAL + pPager->zWal = &pPager->zJournal[nPathname+8+1]; + memcpy(pPager->zWal, zPathname, nPathname); + memcpy(&pPager->zWal[nPathname], "-wal\000", 4+1); + sqlite3FileSuffix3(pPager->zFilename, pPager->zWal); +#endif + sqlite3DbFree(0, zPathname); + } + pPager->pVfs = pVfs; + pPager->vfsFlags = vfsFlags; + + /* Open the pager file. + */ + if( zFilename && zFilename[0] ){ + int fout = 0; /* VFS flags returned by xOpen() */ + rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, vfsFlags, &fout); + assert( !memDb ); + readOnly = (fout&SQLITE_OPEN_READONLY); + + /* If the file was successfully opened for read/write access, + ** choose a default page size in case we have to create the + ** database file. The default page size is the maximum of: + ** + ** + SQLITE_DEFAULT_PAGE_SIZE, + ** + The value returned by sqlite3OsSectorSize() + ** + The largest page size that can be written atomically. + */ + if( rc==SQLITE_OK ){ + int iDc = sqlite3OsDeviceCharacteristics(pPager->fd); + if( !readOnly ){ + setSectorSize(pPager); + assert(SQLITE_DEFAULT_PAGE_SIZE<=SQLITE_MAX_DEFAULT_PAGE_SIZE); + if( szPageDfltsectorSize ){ + if( pPager->sectorSize>SQLITE_MAX_DEFAULT_PAGE_SIZE ){ + szPageDflt = SQLITE_MAX_DEFAULT_PAGE_SIZE; + }else{ + szPageDflt = (u32)pPager->sectorSize; + } + } +#ifdef SQLITE_ENABLE_ATOMIC_WRITE + { + int ii; + assert(SQLITE_IOCAP_ATOMIC512==(512>>8)); + assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8)); + assert(SQLITE_MAX_DEFAULT_PAGE_SIZE<=65536); + for(ii=szPageDflt; ii<=SQLITE_MAX_DEFAULT_PAGE_SIZE; ii=ii*2){ + if( iDc&(SQLITE_IOCAP_ATOMIC|(ii>>8)) ){ + szPageDflt = ii; + } + } + } +#endif + } + pPager->noLock = sqlite3_uri_boolean(zFilename, "nolock", 0); + if( (iDc & SQLITE_IOCAP_IMMUTABLE)!=0 + || sqlite3_uri_boolean(zFilename, "immutable", 0) ){ + vfsFlags |= SQLITE_OPEN_READONLY; + goto act_like_temp_file; + } + } + }else{ + /* If a temporary file is requested, it is not opened immediately. + ** In this case we accept the default page size and delay actually + ** opening the file until the first call to OsWrite(). + ** + ** This branch is also run for an in-memory database. An in-memory + ** database is the same as a temp-file that is never written out to + ** disk and uses an in-memory rollback journal. + ** + ** This branch also runs for files marked as immutable. + */ +act_like_temp_file: + tempFile = 1; + pPager->eState = PAGER_READER; /* Pretend we already have a lock */ + pPager->eLock = EXCLUSIVE_LOCK; /* Pretend we are in EXCLUSIVE mode */ + pPager->noLock = 1; /* Do no locking */ + readOnly = (vfsFlags&SQLITE_OPEN_READONLY); + } + + /* The following call to PagerSetPagesize() serves to set the value of + ** Pager.pageSize and to allocate the Pager.pTmpSpace buffer. + */ + if( rc==SQLITE_OK ){ + assert( pPager->memDb==0 ); + rc = sqlite3PagerSetPagesize(pPager, &szPageDflt, -1); + testcase( rc!=SQLITE_OK ); + } + + /* Initialize the PCache object. */ + if( rc==SQLITE_OK ){ + nExtra = ROUND8(nExtra); + assert( nExtra>=8 && nExtra<1000 ); + rc = sqlite3PcacheOpen(szPageDflt, nExtra, !memDb, + !memDb?pagerStress:0, (void *)pPager, pPager->pPCache); + } + + /* If an error occurred above, free the Pager structure and close the file. + */ + if( rc!=SQLITE_OK ){ + sqlite3OsClose(pPager->fd); + sqlite3PageFree(pPager->pTmpSpace); + sqlite3_free(pPager); + return rc; + } + + PAGERTRACE(("OPEN %d %s\n", FILEHANDLEID(pPager->fd), pPager->zFilename)); + IOTRACE(("OPEN %p %s\n", pPager, pPager->zFilename)) + + pPager->useJournal = (u8)useJournal; + /* pPager->stmtOpen = 0; */ + /* pPager->stmtInUse = 0; */ + /* pPager->nRef = 0; */ + /* pPager->stmtSize = 0; */ + /* pPager->stmtJSize = 0; */ + /* pPager->nPage = 0; */ + pPager->mxPgno = SQLITE_MAX_PAGE_COUNT; + /* pPager->state = PAGER_UNLOCK; */ + /* pPager->errMask = 0; */ + pPager->tempFile = (u8)tempFile; + assert( tempFile==PAGER_LOCKINGMODE_NORMAL + || tempFile==PAGER_LOCKINGMODE_EXCLUSIVE ); + assert( PAGER_LOCKINGMODE_EXCLUSIVE==1 ); + pPager->exclusiveMode = (u8)tempFile; + pPager->changeCountDone = pPager->tempFile; + pPager->memDb = (u8)memDb; + pPager->readOnly = (u8)readOnly; + assert( useJournal || pPager->tempFile ); + pPager->noSync = pPager->tempFile; + if( pPager->noSync ){ + assert( pPager->fullSync==0 ); + assert( pPager->extraSync==0 ); + assert( pPager->syncFlags==0 ); + assert( pPager->walSyncFlags==0 ); + assert( pPager->ckptSyncFlags==0 ); + }else{ + pPager->fullSync = 1; + pPager->extraSync = 0; + pPager->syncFlags = SQLITE_SYNC_NORMAL; + pPager->walSyncFlags = SQLITE_SYNC_NORMAL | WAL_SYNC_TRANSACTIONS; + pPager->ckptSyncFlags = SQLITE_SYNC_NORMAL; + } + /* pPager->pFirst = 0; */ + /* pPager->pFirstSynced = 0; */ + /* pPager->pLast = 0; */ + pPager->nExtra = (u16)nExtra; + pPager->journalSizeLimit = SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT; + assert( isOpen(pPager->fd) || tempFile ); + setSectorSize(pPager); + if( !useJournal ){ + pPager->journalMode = PAGER_JOURNALMODE_OFF; + }else if( memDb ){ + pPager->journalMode = PAGER_JOURNALMODE_MEMORY; + } + /* pPager->xBusyHandler = 0; */ + /* pPager->pBusyHandlerArg = 0; */ + pPager->xReiniter = xReinit; + setGetterMethod(pPager); + /* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */ + /* pPager->szMmap = SQLITE_DEFAULT_MMAP_SIZE // will be set by btree.c */ + + *ppPager = pPager; + return SQLITE_OK; +} + + +/* Verify that the database file has not be deleted or renamed out from +** under the pager. Return SQLITE_OK if the database is still were it ought +** to be on disk. Return non-zero (SQLITE_READONLY_DBMOVED or some other error +** code from sqlite3OsAccess()) if the database has gone missing. +*/ +static int databaseIsUnmoved(Pager *pPager){ + int bHasMoved = 0; + int rc; + + if( pPager->tempFile ) return SQLITE_OK; + if( pPager->dbSize==0 ) return SQLITE_OK; + assert( pPager->zFilename && pPager->zFilename[0] ); + rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_HAS_MOVED, &bHasMoved); + if( rc==SQLITE_NOTFOUND ){ + /* If the HAS_MOVED file-control is unimplemented, assume that the file + ** has not been moved. That is the historical behavior of SQLite: prior to + ** version 3.8.3, it never checked */ + rc = SQLITE_OK; + }else if( rc==SQLITE_OK && bHasMoved ){ + rc = SQLITE_READONLY_DBMOVED; + } + return rc; +} + + +/* +** This function is called after transitioning from PAGER_UNLOCK to +** PAGER_SHARED state. It tests if there is a hot journal present in +** the file-system for the given pager. A hot journal is one that +** needs to be played back. According to this function, a hot-journal +** file exists if the following criteria are met: +** +** * The journal file exists in the file system, and +** * No process holds a RESERVED or greater lock on the database file, and +** * The database file itself is greater than 0 bytes in size, and +** * The first byte of the journal file exists and is not 0x00. +** +** If the current size of the database file is 0 but a journal file +** exists, that is probably an old journal left over from a prior +** database with the same name. In this case the journal file is +** just deleted using OsDelete, *pExists is set to 0 and SQLITE_OK +** is returned. +** +** This routine does not check if there is a master journal filename +** at the end of the file. If there is, and that master journal file +** does not exist, then the journal file is not really hot. In this +** case this routine will return a false-positive. The pager_playback() +** routine will discover that the journal file is not really hot and +** will not roll it back. +** +** If a hot-journal file is found to exist, *pExists is set to 1 and +** SQLITE_OK returned. If no hot-journal file is present, *pExists is +** set to 0 and SQLITE_OK returned. If an IO error occurs while trying +** to determine whether or not a hot-journal file exists, the IO error +** code is returned and the value of *pExists is undefined. +*/ +static int hasHotJournal(Pager *pPager, int *pExists){ + sqlite3_vfs * const pVfs = pPager->pVfs; + int rc = SQLITE_OK; /* Return code */ + int exists = 1; /* True if a journal file is present */ + int jrnlOpen = !!isOpen(pPager->jfd); + + assert( pPager->useJournal ); + assert( isOpen(pPager->fd) ); + assert( pPager->eState==PAGER_OPEN ); + + assert( jrnlOpen==0 || ( sqlite3OsDeviceCharacteristics(pPager->jfd) & + SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN + )); + + *pExists = 0; + if( !jrnlOpen ){ + rc = sqlite3OsAccess(pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS, &exists); + } + if( rc==SQLITE_OK && exists ){ + int locked = 0; /* True if some process holds a RESERVED lock */ + + /* Race condition here: Another process might have been holding the + ** the RESERVED lock and have a journal open at the sqlite3OsAccess() + ** call above, but then delete the journal and drop the lock before + ** we get to the following sqlite3OsCheckReservedLock() call. If that + ** is the case, this routine might think there is a hot journal when + ** in fact there is none. This results in a false-positive which will + ** be dealt with by the playback routine. Ticket #3883. + */ + rc = sqlite3OsCheckReservedLock(pPager->fd, &locked); + if( rc==SQLITE_OK && !locked ){ + Pgno nPage; /* Number of pages in database file */ + + assert( pPager->tempFile==0 ); + rc = pagerPagecount(pPager, &nPage); + if( rc==SQLITE_OK ){ + /* If the database is zero pages in size, that means that either (1) the + ** journal is a remnant from a prior database with the same name where + ** the database file but not the journal was deleted, or (2) the initial + ** transaction that populates a new database is being rolled back. + ** In either case, the journal file can be deleted. However, take care + ** not to delete the journal file if it is already open due to + ** journal_mode=PERSIST. + */ + if( nPage==0 && !jrnlOpen ){ + sqlite3BeginBenignMalloc(); + if( pagerLockDb(pPager, RESERVED_LOCK)==SQLITE_OK ){ + sqlite3OsDelete(pVfs, pPager->zJournal, 0); + if( !pPager->exclusiveMode ) pagerUnlockDb(pPager, SHARED_LOCK); + } + sqlite3EndBenignMalloc(); + }else{ + /* The journal file exists and no other connection has a reserved + ** or greater lock on the database file. Now check that there is + ** at least one non-zero bytes at the start of the journal file. + ** If there is, then we consider this journal to be hot. If not, + ** it can be ignored. + */ + if( !jrnlOpen ){ + int f = SQLITE_OPEN_READONLY|SQLITE_OPEN_MAIN_JOURNAL; + rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &f); + } + if( rc==SQLITE_OK ){ + u8 first = 0; + rc = sqlite3OsRead(pPager->jfd, (void *)&first, 1, 0); + if( rc==SQLITE_IOERR_SHORT_READ ){ + rc = SQLITE_OK; + } + if( !jrnlOpen ){ + sqlite3OsClose(pPager->jfd); + } + *pExists = (first!=0); + }else if( rc==SQLITE_CANTOPEN ){ + /* If we cannot open the rollback journal file in order to see if + ** it has a zero header, that might be due to an I/O error, or + ** it might be due to the race condition described above and in + ** ticket #3883. Either way, assume that the journal is hot. + ** This might be a false positive. But if it is, then the + ** automatic journal playback and recovery mechanism will deal + ** with it under an EXCLUSIVE lock where we do not need to + ** worry so much with race conditions. + */ + *pExists = 1; + rc = SQLITE_OK; + } + } + } + } + } + + return rc; +} + +/* +** This function is called to obtain a shared lock on the database file. +** It is illegal to call sqlite3PagerGet() until after this function +** has been successfully called. If a shared-lock is already held when +** this function is called, it is a no-op. +** +** The following operations are also performed by this function. +** +** 1) If the pager is currently in PAGER_OPEN state (no lock held +** on the database file), then an attempt is made to obtain a +** SHARED lock on the database file. Immediately after obtaining +** the SHARED lock, the file-system is checked for a hot-journal, +** which is played back if present. Following any hot-journal +** rollback, the contents of the cache are validated by checking +** the 'change-counter' field of the database file header and +** discarded if they are found to be invalid. +** +** 2) If the pager is running in exclusive-mode, and there are currently +** no outstanding references to any pages, and is in the error state, +** then an attempt is made to clear the error state by discarding +** the contents of the page cache and rolling back any open journal +** file. +** +** If everything is successful, SQLITE_OK is returned. If an IO error +** occurs while locking the database, checking for a hot-journal file or +** rolling back a journal file, the IO error code is returned. +*/ +SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){ + int rc = SQLITE_OK; /* Return code */ + + /* This routine is only called from b-tree and only when there are no + ** outstanding pages. This implies that the pager state should either + ** be OPEN or READER. READER is only possible if the pager is or was in + ** exclusive access mode. */ + assert( sqlite3PcacheRefCount(pPager->pPCache)==0 ); + assert( assert_pager_state(pPager) ); + assert( pPager->eState==PAGER_OPEN || pPager->eState==PAGER_READER ); + assert( pPager->errCode==SQLITE_OK ); + + if( !pagerUseWal(pPager) && pPager->eState==PAGER_OPEN ){ + int bHotJournal = 1; /* True if there exists a hot journal-file */ + + assert( !MEMDB ); + assert( pPager->tempFile==0 || pPager->eLock==EXCLUSIVE_LOCK ); + + rc = pager_wait_on_lock(pPager, SHARED_LOCK); + if( rc!=SQLITE_OK ){ + assert( pPager->eLock==NO_LOCK || pPager->eLock==UNKNOWN_LOCK ); + goto failed; + } + + /* If a journal file exists, and there is no RESERVED lock on the + ** database file, then it either needs to be played back or deleted. + */ + if( pPager->eLock<=SHARED_LOCK ){ + rc = hasHotJournal(pPager, &bHotJournal); + } + if( rc!=SQLITE_OK ){ + goto failed; + } + if( bHotJournal ){ + if( pPager->readOnly ){ + rc = SQLITE_READONLY_ROLLBACK; + goto failed; + } + + /* Get an EXCLUSIVE lock on the database file. At this point it is + ** important that a RESERVED lock is not obtained on the way to the + ** EXCLUSIVE lock. If it were, another process might open the + ** database file, detect the RESERVED lock, and conclude that the + ** database is safe to read while this process is still rolling the + ** hot-journal back. + ** + ** Because the intermediate RESERVED lock is not requested, any + ** other process attempting to access the database file will get to + ** this point in the code and fail to obtain its own EXCLUSIVE lock + ** on the database file. + ** + ** Unless the pager is in locking_mode=exclusive mode, the lock is + ** downgraded to SHARED_LOCK before this function returns. + */ + rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); + if( rc!=SQLITE_OK ){ + goto failed; + } + + /* If it is not already open and the file exists on disk, open the + ** journal for read/write access. Write access is required because + ** in exclusive-access mode the file descriptor will be kept open + ** and possibly used for a transaction later on. Also, write-access + ** is usually required to finalize the journal in journal_mode=persist + ** mode (and also for journal_mode=truncate on some systems). + ** + ** If the journal does not exist, it usually means that some + ** other connection managed to get in and roll it back before + ** this connection obtained the exclusive lock above. Or, it + ** may mean that the pager was in the error-state when this + ** function was called and the journal file does not exist. + */ + if( !isOpen(pPager->jfd) ){ + sqlite3_vfs * const pVfs = pPager->pVfs; + int bExists; /* True if journal file exists */ + rc = sqlite3OsAccess( + pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS, &bExists); + if( rc==SQLITE_OK && bExists ){ + int fout = 0; + int f = SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_JOURNAL; + assert( !pPager->tempFile ); + rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &fout); + assert( rc!=SQLITE_OK || isOpen(pPager->jfd) ); + if( rc==SQLITE_OK && fout&SQLITE_OPEN_READONLY ){ + rc = SQLITE_CANTOPEN_BKPT; + sqlite3OsClose(pPager->jfd); + } + } + } + + /* Playback and delete the journal. Drop the database write + ** lock and reacquire the read lock. Purge the cache before + ** playing back the hot-journal so that we don't end up with + ** an inconsistent cache. Sync the hot journal before playing + ** it back since the process that crashed and left the hot journal + ** probably did not sync it and we are required to always sync + ** the journal before playing it back. + */ + if( isOpen(pPager->jfd) ){ + assert( rc==SQLITE_OK ); + rc = pagerSyncHotJournal(pPager); + if( rc==SQLITE_OK ){ + rc = pager_playback(pPager, !pPager->tempFile); + pPager->eState = PAGER_OPEN; + } + }else if( !pPager->exclusiveMode ){ + pagerUnlockDb(pPager, SHARED_LOCK); + } + + if( rc!=SQLITE_OK ){ + /* This branch is taken if an error occurs while trying to open + ** or roll back a hot-journal while holding an EXCLUSIVE lock. The + ** pager_unlock() routine will be called before returning to unlock + ** the file. If the unlock attempt fails, then Pager.eLock must be + ** set to UNKNOWN_LOCK (see the comment above the #define for + ** UNKNOWN_LOCK above for an explanation). + ** + ** In order to get pager_unlock() to do this, set Pager.eState to + ** PAGER_ERROR now. This is not actually counted as a transition + ** to ERROR state in the state diagram at the top of this file, + ** since we know that the same call to pager_unlock() will very + ** shortly transition the pager object to the OPEN state. Calling + ** assert_pager_state() would fail now, as it should not be possible + ** to be in ERROR state when there are zero outstanding page + ** references. + */ + pager_error(pPager, rc); + goto failed; + } + + assert( pPager->eState==PAGER_OPEN ); + assert( (pPager->eLock==SHARED_LOCK) + || (pPager->exclusiveMode && pPager->eLock>SHARED_LOCK) + ); + } + + if( !pPager->tempFile && pPager->hasHeldSharedLock ){ + /* The shared-lock has just been acquired then check to + ** see if the database has been modified. If the database has changed, + ** flush the cache. The hasHeldSharedLock flag prevents this from + ** occurring on the very first access to a file, in order to save a + ** single unnecessary sqlite3OsRead() call at the start-up. + ** + ** Database changes are detected by looking at 15 bytes beginning + ** at offset 24 into the file. The first 4 of these 16 bytes are + ** a 32-bit counter that is incremented with each change. The + ** other bytes change randomly with each file change when + ** a codec is in use. + ** + ** There is a vanishingly small chance that a change will not be + ** detected. The chance of an undetected change is so small that + ** it can be neglected. + */ + Pgno nPage = 0; + char dbFileVers[sizeof(pPager->dbFileVers)]; + + rc = pagerPagecount(pPager, &nPage); + if( rc ) goto failed; + + if( nPage>0 ){ + IOTRACE(("CKVERS %p %d\n", pPager, sizeof(dbFileVers))); + rc = sqlite3OsRead(pPager->fd, &dbFileVers, sizeof(dbFileVers), 24); + if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){ + goto failed; + } + }else{ + memset(dbFileVers, 0, sizeof(dbFileVers)); + } + + if( memcmp(pPager->dbFileVers, dbFileVers, sizeof(dbFileVers))!=0 ){ + pager_reset(pPager); + + /* Unmap the database file. It is possible that external processes + ** may have truncated the database file and then extended it back + ** to its original size while this process was not holding a lock. + ** In this case there may exist a Pager.pMap mapping that appears + ** to be the right size but is not actually valid. Avoid this + ** possibility by unmapping the db here. */ + if( USEFETCH(pPager) ){ + sqlite3OsUnfetch(pPager->fd, 0, 0); + } + } + } + + /* If there is a WAL file in the file-system, open this database in WAL + ** mode. Otherwise, the following function call is a no-op. + */ + rc = pagerOpenWalIfPresent(pPager); +#ifndef SQLITE_OMIT_WAL + assert( pPager->pWal==0 || rc==SQLITE_OK ); +#endif + } + + if( pagerUseWal(pPager) ){ + assert( rc==SQLITE_OK ); + rc = pagerBeginReadTransaction(pPager); + } + + if( pPager->tempFile==0 && pPager->eState==PAGER_OPEN && rc==SQLITE_OK ){ + rc = pagerPagecount(pPager, &pPager->dbSize); + } + + failed: + if( rc!=SQLITE_OK ){ + assert( !MEMDB ); + pager_unlock(pPager); + assert( pPager->eState==PAGER_OPEN ); + }else{ + pPager->eState = PAGER_READER; + pPager->hasHeldSharedLock = 1; + } + return rc; +} + +/* +** If the reference count has reached zero, rollback any active +** transaction and unlock the pager. +** +** Except, in locking_mode=EXCLUSIVE when there is nothing to in +** the rollback journal, the unlock is not performed and there is +** nothing to rollback, so this routine is a no-op. +*/ +static void pagerUnlockIfUnused(Pager *pPager){ + if( pPager->nMmapOut==0 && (sqlite3PcacheRefCount(pPager->pPCache)==0) ){ + pagerUnlockAndRollback(pPager); + } +} + +/* +** The page getter methods each try to acquire a reference to a +** page with page number pgno. If the requested reference is +** successfully obtained, it is copied to *ppPage and SQLITE_OK returned. +** +** There are different implementations of the getter method depending +** on the current state of the pager. +** +** getPageNormal() -- The normal getter +** getPageError() -- Used if the pager is in an error state +** getPageMmap() -- Used if memory-mapped I/O is enabled +** +** If the requested page is already in the cache, it is returned. +** Otherwise, a new page object is allocated and populated with data +** read from the database file. In some cases, the pcache module may +** choose not to allocate a new page object and may reuse an existing +** object with no outstanding references. +** +** The extra data appended to a page is always initialized to zeros the +** first time a page is loaded into memory. If the page requested is +** already in the cache when this function is called, then the extra +** data is left as it was when the page object was last used. +** +** If the database image is smaller than the requested page or if +** the flags parameter contains the PAGER_GET_NOCONTENT bit and the +** requested page is not already stored in the cache, then no +** actual disk read occurs. In this case the memory image of the +** page is initialized to all zeros. +** +** If PAGER_GET_NOCONTENT is true, it means that we do not care about +** the contents of the page. This occurs in two scenarios: +** +** a) When reading a free-list leaf page from the database, and +** +** b) When a savepoint is being rolled back and we need to load +** a new page into the cache to be filled with the data read +** from the savepoint journal. +** +** If PAGER_GET_NOCONTENT is true, then the data returned is zeroed instead +** of being read from the database. Additionally, the bits corresponding +** to pgno in Pager.pInJournal (bitvec of pages already written to the +** journal file) and the PagerSavepoint.pInSavepoint bitvecs of any open +** savepoints are set. This means if the page is made writable at any +** point in the future, using a call to sqlite3PagerWrite(), its contents +** will not be journaled. This saves IO. +** +** The acquisition might fail for several reasons. In all cases, +** an appropriate error code is returned and *ppPage is set to NULL. +** +** See also sqlite3PagerLookup(). Both this routine and Lookup() attempt +** to find a page in the in-memory cache first. If the page is not already +** in memory, this routine goes to disk to read it in whereas Lookup() +** just returns 0. This routine acquires a read-lock the first time it +** has to go to disk, and could also playback an old journal if necessary. +** Since Lookup() never goes to disk, it never has to deal with locks +** or journal files. +*/ +static int getPageNormal( + Pager *pPager, /* The pager open on the database file */ + Pgno pgno, /* Page number to fetch */ + DbPage **ppPage, /* Write a pointer to the page here */ + int flags /* PAGER_GET_XXX flags */ +){ + int rc = SQLITE_OK; + PgHdr *pPg; + u8 noContent; /* True if PAGER_GET_NOCONTENT is set */ + sqlite3_pcache_page *pBase; + + assert( pPager->errCode==SQLITE_OK ); + assert( pPager->eState>=PAGER_READER ); + assert( assert_pager_state(pPager) ); + assert( pPager->hasHeldSharedLock==1 ); + + if( pgno==0 ) return SQLITE_CORRUPT_BKPT; + pBase = sqlite3PcacheFetch(pPager->pPCache, pgno, 3); + if( pBase==0 ){ + pPg = 0; + rc = sqlite3PcacheFetchStress(pPager->pPCache, pgno, &pBase); + if( rc!=SQLITE_OK ) goto pager_acquire_err; + if( pBase==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto pager_acquire_err; + } + } + pPg = *ppPage = sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pBase); + assert( pPg==(*ppPage) ); + assert( pPg->pgno==pgno ); + assert( pPg->pPager==pPager || pPg->pPager==0 ); + + noContent = (flags & PAGER_GET_NOCONTENT)!=0; + if( pPg->pPager && !noContent ){ + /* In this case the pcache already contains an initialized copy of + ** the page. Return without further ado. */ + assert( pgno<=PAGER_MAX_PGNO && pgno!=PAGER_MJ_PGNO(pPager) ); + pPager->aStat[PAGER_STAT_HIT]++; + return SQLITE_OK; + + }else{ + /* The pager cache has created a new page. Its content needs to + ** be initialized. But first some error checks: + ** + ** (1) The maximum page number is 2^31 + ** (2) Never try to fetch the locking page + */ + if( pgno>PAGER_MAX_PGNO || pgno==PAGER_MJ_PGNO(pPager) ){ + rc = SQLITE_CORRUPT_BKPT; + goto pager_acquire_err; + } + + pPg->pPager = pPager; + + assert( !isOpen(pPager->fd) || !MEMDB ); + if( !isOpen(pPager->fd) || pPager->dbSizepPager->mxPgno ){ + rc = SQLITE_FULL; + goto pager_acquire_err; + } + if( noContent ){ + /* Failure to set the bits in the InJournal bit-vectors is benign. + ** It merely means that we might do some extra work to journal a + ** page that does not need to be journaled. Nevertheless, be sure + ** to test the case where a malloc error occurs while trying to set + ** a bit in a bit vector. + */ + sqlite3BeginBenignMalloc(); + if( pgno<=pPager->dbOrigSize ){ + TESTONLY( rc = ) sqlite3BitvecSet(pPager->pInJournal, pgno); + testcase( rc==SQLITE_NOMEM ); + } + TESTONLY( rc = ) addToSavepointBitvecs(pPager, pgno); + testcase( rc==SQLITE_NOMEM ); + sqlite3EndBenignMalloc(); + } + memset(pPg->pData, 0, pPager->pageSize); + IOTRACE(("ZERO %p %d\n", pPager, pgno)); + }else{ + u32 iFrame = 0; /* Frame to read from WAL file */ + if( pagerUseWal(pPager) ){ + rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame); + if( rc!=SQLITE_OK ) goto pager_acquire_err; + } + assert( pPg->pPager==pPager ); + pPager->aStat[PAGER_STAT_MISS]++; + rc = readDbPage(pPg, iFrame); + if( rc!=SQLITE_OK ){ + goto pager_acquire_err; + } + } + pager_set_pagehash(pPg); + } + return SQLITE_OK; + +pager_acquire_err: + assert( rc!=SQLITE_OK ); + if( pPg ){ + sqlite3PcacheDrop(pPg); + } + pagerUnlockIfUnused(pPager); + *ppPage = 0; + return rc; +} + +#if SQLITE_MAX_MMAP_SIZE>0 +/* The page getter for when memory-mapped I/O is enabled */ +static int getPageMMap( + Pager *pPager, /* The pager open on the database file */ + Pgno pgno, /* Page number to fetch */ + DbPage **ppPage, /* Write a pointer to the page here */ + int flags /* PAGER_GET_XXX flags */ +){ + int rc = SQLITE_OK; + PgHdr *pPg = 0; + u32 iFrame = 0; /* Frame to read from WAL file */ + + /* It is acceptable to use a read-only (mmap) page for any page except + ** page 1 if there is no write-transaction open or the ACQUIRE_READONLY + ** flag was specified by the caller. And so long as the db is not a + ** temporary or in-memory database. */ + const int bMmapOk = (pgno>1 + && (pPager->eState==PAGER_READER || (flags & PAGER_GET_READONLY)) + ); + + assert( USEFETCH(pPager) ); +#ifdef SQLITE_HAS_CODEC + assert( pPager->xCodec==0 ); +#endif + + /* Optimization note: Adding the "pgno<=1" term before "pgno==0" here + ** allows the compiler optimizer to reuse the results of the "pgno>1" + ** test in the previous statement, and avoid testing pgno==0 in the + ** common case where pgno is large. */ + if( pgno<=1 && pgno==0 ){ + return SQLITE_CORRUPT_BKPT; + } + assert( pPager->eState>=PAGER_READER ); + assert( assert_pager_state(pPager) ); + assert( pPager->hasHeldSharedLock==1 ); + assert( pPager->errCode==SQLITE_OK ); + + if( bMmapOk && pagerUseWal(pPager) ){ + rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame); + if( rc!=SQLITE_OK ){ + *ppPage = 0; + return rc; + } + } + if( bMmapOk && iFrame==0 ){ + void *pData = 0; + rc = sqlite3OsFetch(pPager->fd, + (i64)(pgno-1) * pPager->pageSize, pPager->pageSize, &pData + ); + if( rc==SQLITE_OK && pData ){ + if( pPager->eState>PAGER_READER || pPager->tempFile ){ + pPg = sqlite3PagerLookup(pPager, pgno); + } + if( pPg==0 ){ + rc = pagerAcquireMapPage(pPager, pgno, pData, &pPg); + }else{ + sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1)*pPager->pageSize, pData); + } + if( pPg ){ + assert( rc==SQLITE_OK ); + *ppPage = pPg; + return SQLITE_OK; + } + } + if( rc!=SQLITE_OK ){ + *ppPage = 0; + return rc; + } + } + return getPageNormal(pPager, pgno, ppPage, flags); +} +#endif /* SQLITE_MAX_MMAP_SIZE>0 */ + +/* The page getter method for when the pager is an error state */ +static int getPageError( + Pager *pPager, /* The pager open on the database file */ + Pgno pgno, /* Page number to fetch */ + DbPage **ppPage, /* Write a pointer to the page here */ + int flags /* PAGER_GET_XXX flags */ +){ + UNUSED_PARAMETER(pgno); + UNUSED_PARAMETER(flags); + assert( pPager->errCode!=SQLITE_OK ); + *ppPage = 0; + return pPager->errCode; +} + + +/* Dispatch all page fetch requests to the appropriate getter method. +*/ +SQLITE_PRIVATE int sqlite3PagerGet( + Pager *pPager, /* The pager open on the database file */ + Pgno pgno, /* Page number to fetch */ + DbPage **ppPage, /* Write a pointer to the page here */ + int flags /* PAGER_GET_XXX flags */ +){ + return pPager->xGet(pPager, pgno, ppPage, flags); +} + +/* +** Acquire a page if it is already in the in-memory cache. Do +** not read the page from disk. Return a pointer to the page, +** or 0 if the page is not in cache. +** +** See also sqlite3PagerGet(). The difference between this routine +** and sqlite3PagerGet() is that _get() will go to the disk and read +** in the page if the page is not already in cache. This routine +** returns NULL if the page is not in cache or if a disk I/O error +** has ever happened. +*/ +SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){ + sqlite3_pcache_page *pPage; + assert( pPager!=0 ); + assert( pgno!=0 ); + assert( pPager->pPCache!=0 ); + pPage = sqlite3PcacheFetch(pPager->pPCache, pgno, 0); + assert( pPage==0 || pPager->hasHeldSharedLock ); + if( pPage==0 ) return 0; + return sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pPage); +} + +/* +** Release a page reference. +** +** If the number of references to the page drop to zero, then the +** page is added to the LRU list. When all references to all pages +** are released, a rollback occurs and the lock on the database is +** removed. +*/ +SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage *pPg){ + Pager *pPager; + assert( pPg!=0 ); + pPager = pPg->pPager; + if( pPg->flags & PGHDR_MMAP ){ + pagerReleaseMapPage(pPg); + }else{ + sqlite3PcacheRelease(pPg); + } + pagerUnlockIfUnused(pPager); +} +SQLITE_PRIVATE void sqlite3PagerUnref(DbPage *pPg){ + if( pPg ) sqlite3PagerUnrefNotNull(pPg); +} + +/* +** This function is called at the start of every write transaction. +** There must already be a RESERVED or EXCLUSIVE lock on the database +** file when this routine is called. +** +** Open the journal file for pager pPager and write a journal header +** to the start of it. If there are active savepoints, open the sub-journal +** as well. This function is only used when the journal file is being +** opened to write a rollback log for a transaction. It is not used +** when opening a hot journal file to roll it back. +** +** If the journal file is already open (as it may be in exclusive mode), +** then this function just writes a journal header to the start of the +** already open file. +** +** Whether or not the journal file is opened by this function, the +** Pager.pInJournal bitvec structure is allocated. +** +** Return SQLITE_OK if everything is successful. Otherwise, return +** SQLITE_NOMEM if the attempt to allocate Pager.pInJournal fails, or +** an IO error code if opening or writing the journal file fails. +*/ +static int pager_open_journal(Pager *pPager){ + int rc = SQLITE_OK; /* Return code */ + sqlite3_vfs * const pVfs = pPager->pVfs; /* Local cache of vfs pointer */ + + assert( pPager->eState==PAGER_WRITER_LOCKED ); + assert( assert_pager_state(pPager) ); + assert( pPager->pInJournal==0 ); + + /* If already in the error state, this function is a no-op. But on + ** the other hand, this routine is never called if we are already in + ** an error state. */ + if( NEVER(pPager->errCode) ) return pPager->errCode; + + if( !pagerUseWal(pPager) && pPager->journalMode!=PAGER_JOURNALMODE_OFF ){ + pPager->pInJournal = sqlite3BitvecCreate(pPager->dbSize); + if( pPager->pInJournal==0 ){ + return SQLITE_NOMEM_BKPT; + } + + /* Open the journal file if it is not already open. */ + if( !isOpen(pPager->jfd) ){ + if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY ){ + sqlite3MemJournalOpen(pPager->jfd); + }else{ + int flags = SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE; + int nSpill; + + if( pPager->tempFile ){ + flags |= (SQLITE_OPEN_DELETEONCLOSE|SQLITE_OPEN_TEMP_JOURNAL); + nSpill = sqlite3Config.nStmtSpill; + }else{ + flags |= SQLITE_OPEN_MAIN_JOURNAL; + nSpill = jrnlBufferSize(pPager); + } + + /* Verify that the database still has the same name as it did when + ** it was originally opened. */ + rc = databaseIsUnmoved(pPager); + if( rc==SQLITE_OK ){ + rc = sqlite3JournalOpen ( + pVfs, pPager->zJournal, pPager->jfd, flags, nSpill + ); + } + } + assert( rc!=SQLITE_OK || isOpen(pPager->jfd) ); + } + + + /* Write the first journal header to the journal file and open + ** the sub-journal if necessary. + */ + if( rc==SQLITE_OK ){ + /* TODO: Check if all of these are really required. */ + pPager->nRec = 0; + pPager->journalOff = 0; + pPager->setMaster = 0; + pPager->journalHdr = 0; + rc = writeJournalHdr(pPager); + } + } + + if( rc!=SQLITE_OK ){ + sqlite3BitvecDestroy(pPager->pInJournal); + pPager->pInJournal = 0; + }else{ + assert( pPager->eState==PAGER_WRITER_LOCKED ); + pPager->eState = PAGER_WRITER_CACHEMOD; + } + + return rc; +} + +/* +** Begin a write-transaction on the specified pager object. If a +** write-transaction has already been opened, this function is a no-op. +** +** If the exFlag argument is false, then acquire at least a RESERVED +** lock on the database file. If exFlag is true, then acquire at least +** an EXCLUSIVE lock. If such a lock is already held, no locking +** functions need be called. +** +** If the subjInMemory argument is non-zero, then any sub-journal opened +** within this transaction will be opened as an in-memory file. This +** has no effect if the sub-journal is already opened (as it may be when +** running in exclusive mode) or if the transaction does not require a +** sub-journal. If the subjInMemory argument is zero, then any required +** sub-journal is implemented in-memory if pPager is an in-memory database, +** or using a temporary file otherwise. +*/ +SQLITE_PRIVATE int sqlite3PagerBegin(Pager *pPager, int exFlag, int subjInMemory){ + int rc = SQLITE_OK; + + if( pPager->errCode ) return pPager->errCode; + assert( pPager->eState>=PAGER_READER && pPager->eStatesubjInMemory = (u8)subjInMemory; + + if( ALWAYS(pPager->eState==PAGER_READER) ){ + assert( pPager->pInJournal==0 ); + + if( pagerUseWal(pPager) ){ + /* If the pager is configured to use locking_mode=exclusive, and an + ** exclusive lock on the database is not already held, obtain it now. + */ + if( pPager->exclusiveMode && sqlite3WalExclusiveMode(pPager->pWal, -1) ){ + rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); + if( rc!=SQLITE_OK ){ + return rc; + } + (void)sqlite3WalExclusiveMode(pPager->pWal, 1); + } + + /* Grab the write lock on the log file. If successful, upgrade to + ** PAGER_RESERVED state. Otherwise, return an error code to the caller. + ** The busy-handler is not invoked if another connection already + ** holds the write-lock. If possible, the upper layer will call it. + */ + rc = sqlite3WalBeginWriteTransaction(pPager->pWal); + }else{ + /* Obtain a RESERVED lock on the database file. If the exFlag parameter + ** is true, then immediately upgrade this to an EXCLUSIVE lock. The + ** busy-handler callback can be used when upgrading to the EXCLUSIVE + ** lock, but not when obtaining the RESERVED lock. + */ + rc = pagerLockDb(pPager, RESERVED_LOCK); + if( rc==SQLITE_OK && exFlag ){ + rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); + } + } + + if( rc==SQLITE_OK ){ + /* Change to WRITER_LOCKED state. + ** + ** WAL mode sets Pager.eState to PAGER_WRITER_LOCKED or CACHEMOD + ** when it has an open transaction, but never to DBMOD or FINISHED. + ** This is because in those states the code to roll back savepoint + ** transactions may copy data from the sub-journal into the database + ** file as well as into the page cache. Which would be incorrect in + ** WAL mode. + */ + pPager->eState = PAGER_WRITER_LOCKED; + pPager->dbHintSize = pPager->dbSize; + pPager->dbFileSize = pPager->dbSize; + pPager->dbOrigSize = pPager->dbSize; + pPager->journalOff = 0; + } + + assert( rc==SQLITE_OK || pPager->eState==PAGER_READER ); + assert( rc!=SQLITE_OK || pPager->eState==PAGER_WRITER_LOCKED ); + assert( assert_pager_state(pPager) ); + } + + PAGERTRACE(("TRANSACTION %d\n", PAGERID(pPager))); + return rc; +} + +/* +** Write page pPg onto the end of the rollback journal. +*/ +static SQLITE_NOINLINE int pagerAddPageToRollbackJournal(PgHdr *pPg){ + Pager *pPager = pPg->pPager; + int rc; + u32 cksum; + char *pData2; + i64 iOff = pPager->journalOff; + + /* We should never write to the journal file the page that + ** contains the database locks. The following assert verifies + ** that we do not. */ + assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) ); + + assert( pPager->journalHdr<=pPager->journalOff ); + CODEC2(pPager, pPg->pData, pPg->pgno, 7, return SQLITE_NOMEM_BKPT, pData2); + cksum = pager_cksum(pPager, (u8*)pData2); + + /* Even if an IO or diskfull error occurs while journalling the + ** page in the block above, set the need-sync flag for the page. + ** Otherwise, when the transaction is rolled back, the logic in + ** playback_one_page() will think that the page needs to be restored + ** in the database file. And if an IO error occurs while doing so, + ** then corruption may follow. + */ + pPg->flags |= PGHDR_NEED_SYNC; + + rc = write32bits(pPager->jfd, iOff, pPg->pgno); + if( rc!=SQLITE_OK ) return rc; + rc = sqlite3OsWrite(pPager->jfd, pData2, pPager->pageSize, iOff+4); + if( rc!=SQLITE_OK ) return rc; + rc = write32bits(pPager->jfd, iOff+pPager->pageSize+4, cksum); + if( rc!=SQLITE_OK ) return rc; + + IOTRACE(("JOUT %p %d %lld %d\n", pPager, pPg->pgno, + pPager->journalOff, pPager->pageSize)); + PAGER_INCR(sqlite3_pager_writej_count); + PAGERTRACE(("JOURNAL %d page %d needSync=%d hash(%08x)\n", + PAGERID(pPager), pPg->pgno, + ((pPg->flags&PGHDR_NEED_SYNC)?1:0), pager_pagehash(pPg))); + + pPager->journalOff += 8 + pPager->pageSize; + pPager->nRec++; + assert( pPager->pInJournal!=0 ); + rc = sqlite3BitvecSet(pPager->pInJournal, pPg->pgno); + testcase( rc==SQLITE_NOMEM ); + assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); + rc |= addToSavepointBitvecs(pPager, pPg->pgno); + assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); + return rc; +} + +/* +** Mark a single data page as writeable. The page is written into the +** main journal or sub-journal as required. If the page is written into +** one of the journals, the corresponding bit is set in the +** Pager.pInJournal bitvec and the PagerSavepoint.pInSavepoint bitvecs +** of any open savepoints as appropriate. +*/ +static int pager_write(PgHdr *pPg){ + Pager *pPager = pPg->pPager; + int rc = SQLITE_OK; + + /* This routine is not called unless a write-transaction has already + ** been started. The journal file may or may not be open at this point. + ** It is never called in the ERROR state. + */ + assert( pPager->eState==PAGER_WRITER_LOCKED + || pPager->eState==PAGER_WRITER_CACHEMOD + || pPager->eState==PAGER_WRITER_DBMOD + ); + assert( assert_pager_state(pPager) ); + assert( pPager->errCode==0 ); + assert( pPager->readOnly==0 ); + CHECK_PAGE(pPg); + + /* The journal file needs to be opened. Higher level routines have already + ** obtained the necessary locks to begin the write-transaction, but the + ** rollback journal might not yet be open. Open it now if this is the case. + ** + ** This is done before calling sqlite3PcacheMakeDirty() on the page. + ** Otherwise, if it were done after calling sqlite3PcacheMakeDirty(), then + ** an error might occur and the pager would end up in WRITER_LOCKED state + ** with pages marked as dirty in the cache. + */ + if( pPager->eState==PAGER_WRITER_LOCKED ){ + rc = pager_open_journal(pPager); + if( rc!=SQLITE_OK ) return rc; + } + assert( pPager->eState>=PAGER_WRITER_CACHEMOD ); + assert( assert_pager_state(pPager) ); + + /* Mark the page that is about to be modified as dirty. */ + sqlite3PcacheMakeDirty(pPg); + + /* If a rollback journal is in use, them make sure the page that is about + ** to change is in the rollback journal, or if the page is a new page off + ** then end of the file, make sure it is marked as PGHDR_NEED_SYNC. + */ + assert( (pPager->pInJournal!=0) == isOpen(pPager->jfd) ); + if( pPager->pInJournal!=0 + && sqlite3BitvecTestNotNull(pPager->pInJournal, pPg->pgno)==0 + ){ + assert( pagerUseWal(pPager)==0 ); + if( pPg->pgno<=pPager->dbOrigSize ){ + rc = pagerAddPageToRollbackJournal(pPg); + if( rc!=SQLITE_OK ){ + return rc; + } + }else{ + if( pPager->eState!=PAGER_WRITER_DBMOD ){ + pPg->flags |= PGHDR_NEED_SYNC; + } + PAGERTRACE(("APPEND %d page %d needSync=%d\n", + PAGERID(pPager), pPg->pgno, + ((pPg->flags&PGHDR_NEED_SYNC)?1:0))); + } + } + + /* The PGHDR_DIRTY bit is set above when the page was added to the dirty-list + ** and before writing the page into the rollback journal. Wait until now, + ** after the page has been successfully journalled, before setting the + ** PGHDR_WRITEABLE bit that indicates that the page can be safely modified. + */ + pPg->flags |= PGHDR_WRITEABLE; + + /* If the statement journal is open and the page is not in it, + ** then write the page into the statement journal. + */ + if( pPager->nSavepoint>0 ){ + rc = subjournalPageIfRequired(pPg); + } + + /* Update the database size and return. */ + if( pPager->dbSizepgno ){ + pPager->dbSize = pPg->pgno; + } + return rc; +} + +/* +** This is a variant of sqlite3PagerWrite() that runs when the sector size +** is larger than the page size. SQLite makes the (reasonable) assumption that +** all bytes of a sector are written together by hardware. Hence, all bytes of +** a sector need to be journalled in case of a power loss in the middle of +** a write. +** +** Usually, the sector size is less than or equal to the page size, in which +** case pages can be individually written. This routine only runs in the +** exceptional case where the page size is smaller than the sector size. +*/ +static SQLITE_NOINLINE int pagerWriteLargeSector(PgHdr *pPg){ + int rc = SQLITE_OK; /* Return code */ + Pgno nPageCount; /* Total number of pages in database file */ + Pgno pg1; /* First page of the sector pPg is located on. */ + int nPage = 0; /* Number of pages starting at pg1 to journal */ + int ii; /* Loop counter */ + int needSync = 0; /* True if any page has PGHDR_NEED_SYNC */ + Pager *pPager = pPg->pPager; /* The pager that owns pPg */ + Pgno nPagePerSector = (pPager->sectorSize/pPager->pageSize); + + /* Set the doNotSpill NOSYNC bit to 1. This is because we cannot allow + ** a journal header to be written between the pages journaled by + ** this function. + */ + assert( !MEMDB ); + assert( (pPager->doNotSpill & SPILLFLAG_NOSYNC)==0 ); + pPager->doNotSpill |= SPILLFLAG_NOSYNC; + + /* This trick assumes that both the page-size and sector-size are + ** an integer power of 2. It sets variable pg1 to the identifier + ** of the first page of the sector pPg is located on. + */ + pg1 = ((pPg->pgno-1) & ~(nPagePerSector-1)) + 1; + + nPageCount = pPager->dbSize; + if( pPg->pgno>nPageCount ){ + nPage = (pPg->pgno - pg1)+1; + }else if( (pg1+nPagePerSector-1)>nPageCount ){ + nPage = nPageCount+1-pg1; + }else{ + nPage = nPagePerSector; + } + assert(nPage>0); + assert(pg1<=pPg->pgno); + assert((pg1+nPage)>pPg->pgno); + + for(ii=0; iipgno || !sqlite3BitvecTest(pPager->pInJournal, pg) ){ + if( pg!=PAGER_MJ_PGNO(pPager) ){ + rc = sqlite3PagerGet(pPager, pg, &pPage, 0); + if( rc==SQLITE_OK ){ + rc = pager_write(pPage); + if( pPage->flags&PGHDR_NEED_SYNC ){ + needSync = 1; + } + sqlite3PagerUnrefNotNull(pPage); + } + } + }else if( (pPage = sqlite3PagerLookup(pPager, pg))!=0 ){ + if( pPage->flags&PGHDR_NEED_SYNC ){ + needSync = 1; + } + sqlite3PagerUnrefNotNull(pPage); + } + } + + /* If the PGHDR_NEED_SYNC flag is set for any of the nPage pages + ** starting at pg1, then it needs to be set for all of them. Because + ** writing to any of these nPage pages may damage the others, the + ** journal file must contain sync()ed copies of all of them + ** before any of them can be written out to the database file. + */ + if( rc==SQLITE_OK && needSync ){ + assert( !MEMDB ); + for(ii=0; iiflags |= PGHDR_NEED_SYNC; + sqlite3PagerUnrefNotNull(pPage); + } + } + } + + assert( (pPager->doNotSpill & SPILLFLAG_NOSYNC)!=0 ); + pPager->doNotSpill &= ~SPILLFLAG_NOSYNC; + return rc; +} + +/* +** Mark a data page as writeable. This routine must be called before +** making changes to a page. The caller must check the return value +** of this function and be careful not to change any page data unless +** this routine returns SQLITE_OK. +** +** The difference between this function and pager_write() is that this +** function also deals with the special case where 2 or more pages +** fit on a single disk sector. In this case all co-resident pages +** must have been written to the journal file before returning. +** +** If an error occurs, SQLITE_NOMEM or an IO error code is returned +** as appropriate. Otherwise, SQLITE_OK. +*/ +SQLITE_PRIVATE int sqlite3PagerWrite(PgHdr *pPg){ + Pager *pPager = pPg->pPager; + assert( (pPg->flags & PGHDR_MMAP)==0 ); + assert( pPager->eState>=PAGER_WRITER_LOCKED ); + assert( assert_pager_state(pPager) ); + if( (pPg->flags & PGHDR_WRITEABLE)!=0 && pPager->dbSize>=pPg->pgno ){ + if( pPager->nSavepoint ) return subjournalPageIfRequired(pPg); + return SQLITE_OK; + }else if( pPager->errCode ){ + return pPager->errCode; + }else if( pPager->sectorSize > (u32)pPager->pageSize ){ + assert( pPager->tempFile==0 ); + return pagerWriteLargeSector(pPg); + }else{ + return pager_write(pPg); + } +} + +/* +** Return TRUE if the page given in the argument was previously passed +** to sqlite3PagerWrite(). In other words, return TRUE if it is ok +** to change the content of the page. +*/ +#ifndef NDEBUG +SQLITE_PRIVATE int sqlite3PagerIswriteable(DbPage *pPg){ + return pPg->flags & PGHDR_WRITEABLE; +} +#endif + +/* +** A call to this routine tells the pager that it is not necessary to +** write the information on page pPg back to the disk, even though +** that page might be marked as dirty. This happens, for example, when +** the page has been added as a leaf of the freelist and so its +** content no longer matters. +** +** The overlying software layer calls this routine when all of the data +** on the given page is unused. The pager marks the page as clean so +** that it does not get written to disk. +** +** Tests show that this optimization can quadruple the speed of large +** DELETE operations. +** +** This optimization cannot be used with a temp-file, as the page may +** have been dirty at the start of the transaction. In that case, if +** memory pressure forces page pPg out of the cache, the data does need +** to be written out to disk so that it may be read back in if the +** current transaction is rolled back. +*/ +SQLITE_PRIVATE void sqlite3PagerDontWrite(PgHdr *pPg){ + Pager *pPager = pPg->pPager; + if( !pPager->tempFile && (pPg->flags&PGHDR_DIRTY) && pPager->nSavepoint==0 ){ + PAGERTRACE(("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager))); + IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno)) + pPg->flags |= PGHDR_DONT_WRITE; + pPg->flags &= ~PGHDR_WRITEABLE; + testcase( pPg->flags & PGHDR_NEED_SYNC ); + pager_set_pagehash(pPg); + } +} + +/* +** This routine is called to increment the value of the database file +** change-counter, stored as a 4-byte big-endian integer starting at +** byte offset 24 of the pager file. The secondary change counter at +** 92 is also updated, as is the SQLite version number at offset 96. +** +** But this only happens if the pPager->changeCountDone flag is false. +** To avoid excess churning of page 1, the update only happens once. +** See also the pager_write_changecounter() routine that does an +** unconditional update of the change counters. +** +** If the isDirectMode flag is zero, then this is done by calling +** sqlite3PagerWrite() on page 1, then modifying the contents of the +** page data. In this case the file will be updated when the current +** transaction is committed. +** +** The isDirectMode flag may only be non-zero if the library was compiled +** with the SQLITE_ENABLE_ATOMIC_WRITE macro defined. In this case, +** if isDirect is non-zero, then the database file is updated directly +** by writing an updated version of page 1 using a call to the +** sqlite3OsWrite() function. +*/ +static int pager_incr_changecounter(Pager *pPager, int isDirectMode){ + int rc = SQLITE_OK; + + assert( pPager->eState==PAGER_WRITER_CACHEMOD + || pPager->eState==PAGER_WRITER_DBMOD + ); + assert( assert_pager_state(pPager) ); + + /* Declare and initialize constant integer 'isDirect'. If the + ** atomic-write optimization is enabled in this build, then isDirect + ** is initialized to the value passed as the isDirectMode parameter + ** to this function. Otherwise, it is always set to zero. + ** + ** The idea is that if the atomic-write optimization is not + ** enabled at compile time, the compiler can omit the tests of + ** 'isDirect' below, as well as the block enclosed in the + ** "if( isDirect )" condition. + */ +#ifndef SQLITE_ENABLE_ATOMIC_WRITE +# define DIRECT_MODE 0 + assert( isDirectMode==0 ); + UNUSED_PARAMETER(isDirectMode); +#else +# define DIRECT_MODE isDirectMode +#endif + + if( !pPager->changeCountDone && ALWAYS(pPager->dbSize>0) ){ + PgHdr *pPgHdr; /* Reference to page 1 */ + + assert( !pPager->tempFile && isOpen(pPager->fd) ); + + /* Open page 1 of the file for writing. */ + rc = sqlite3PagerGet(pPager, 1, &pPgHdr, 0); + assert( pPgHdr==0 || rc==SQLITE_OK ); + + /* If page one was fetched successfully, and this function is not + ** operating in direct-mode, make page 1 writable. When not in + ** direct mode, page 1 is always held in cache and hence the PagerGet() + ** above is always successful - hence the ALWAYS on rc==SQLITE_OK. + */ + if( !DIRECT_MODE && ALWAYS(rc==SQLITE_OK) ){ + rc = sqlite3PagerWrite(pPgHdr); + } + + if( rc==SQLITE_OK ){ + /* Actually do the update of the change counter */ + pager_write_changecounter(pPgHdr); + + /* If running in direct mode, write the contents of page 1 to the file. */ + if( DIRECT_MODE ){ + const void *zBuf; + assert( pPager->dbFileSize>0 ); + CODEC2(pPager, pPgHdr->pData, 1, 6, rc=SQLITE_NOMEM_BKPT, zBuf); + if( rc==SQLITE_OK ){ + rc = sqlite3OsWrite(pPager->fd, zBuf, pPager->pageSize, 0); + pPager->aStat[PAGER_STAT_WRITE]++; + } + if( rc==SQLITE_OK ){ + /* Update the pager's copy of the change-counter. Otherwise, the + ** next time a read transaction is opened the cache will be + ** flushed (as the change-counter values will not match). */ + const void *pCopy = (const void *)&((const char *)zBuf)[24]; + memcpy(&pPager->dbFileVers, pCopy, sizeof(pPager->dbFileVers)); + pPager->changeCountDone = 1; + } + }else{ + pPager->changeCountDone = 1; + } + } + + /* Release the page reference. */ + sqlite3PagerUnref(pPgHdr); + } + return rc; +} + +/* +** Sync the database file to disk. This is a no-op for in-memory databases +** or pages with the Pager.noSync flag set. +** +** If successful, or if called on a pager for which it is a no-op, this +** function returns SQLITE_OK. Otherwise, an IO error code is returned. +*/ +SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager, const char *zMaster){ + int rc = SQLITE_OK; + + if( isOpen(pPager->fd) ){ + void *pArg = (void*)zMaster; + rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_SYNC, pArg); + if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; + } + if( rc==SQLITE_OK && !pPager->noSync ){ + assert( !MEMDB ); + rc = sqlite3OsSync(pPager->fd, pPager->syncFlags); + } + return rc; +} + +/* +** This function may only be called while a write-transaction is active in +** rollback. If the connection is in WAL mode, this call is a no-op. +** Otherwise, if the connection does not already have an EXCLUSIVE lock on +** the database file, an attempt is made to obtain one. +** +** If the EXCLUSIVE lock is already held or the attempt to obtain it is +** successful, or the connection is in WAL mode, SQLITE_OK is returned. +** Otherwise, either SQLITE_BUSY or an SQLITE_IOERR_XXX error code is +** returned. +*/ +SQLITE_PRIVATE int sqlite3PagerExclusiveLock(Pager *pPager){ + int rc = pPager->errCode; + assert( assert_pager_state(pPager) ); + if( rc==SQLITE_OK ){ + assert( pPager->eState==PAGER_WRITER_CACHEMOD + || pPager->eState==PAGER_WRITER_DBMOD + || pPager->eState==PAGER_WRITER_LOCKED + ); + assert( assert_pager_state(pPager) ); + if( 0==pagerUseWal(pPager) ){ + rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); + } + } + return rc; +} + +/* +** Sync the database file for the pager pPager. zMaster points to the name +** of a master journal file that should be written into the individual +** journal file. zMaster may be NULL, which is interpreted as no master +** journal (a single database transaction). +** +** This routine ensures that: +** +** * The database file change-counter is updated, +** * the journal is synced (unless the atomic-write optimization is used), +** * all dirty pages are written to the database file, +** * the database file is truncated (if required), and +** * the database file synced. +** +** The only thing that remains to commit the transaction is to finalize +** (delete, truncate or zero the first part of) the journal file (or +** delete the master journal file if specified). +** +** Note that if zMaster==NULL, this does not overwrite a previous value +** passed to an sqlite3PagerCommitPhaseOne() call. +** +** If the final parameter - noSync - is true, then the database file itself +** is not synced. The caller must call sqlite3PagerSync() directly to +** sync the database file before calling CommitPhaseTwo() to delete the +** journal file in this case. +*/ +SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne( + Pager *pPager, /* Pager object */ + const char *zMaster, /* If not NULL, the master journal name */ + int noSync /* True to omit the xSync on the db file */ +){ + int rc = SQLITE_OK; /* Return code */ + + assert( pPager->eState==PAGER_WRITER_LOCKED + || pPager->eState==PAGER_WRITER_CACHEMOD + || pPager->eState==PAGER_WRITER_DBMOD + || pPager->eState==PAGER_ERROR + ); + assert( assert_pager_state(pPager) ); + + /* If a prior error occurred, report that error again. */ + if( NEVER(pPager->errCode) ) return pPager->errCode; + + /* Provide the ability to easily simulate an I/O error during testing */ + if( sqlite3FaultSim(400) ) return SQLITE_IOERR; + + PAGERTRACE(("DATABASE SYNC: File=%s zMaster=%s nSize=%d\n", + pPager->zFilename, zMaster, pPager->dbSize)); + + /* If no database changes have been made, return early. */ + if( pPager->eStatetempFile ); + assert( isOpen(pPager->fd) || pPager->tempFile ); + if( 0==pagerFlushOnCommit(pPager, 1) ){ + /* If this is an in-memory db, or no pages have been written to, or this + ** function has already been called, it is mostly a no-op. However, any + ** backup in progress needs to be restarted. */ + sqlite3BackupRestart(pPager->pBackup); + }else{ + if( pagerUseWal(pPager) ){ + PgHdr *pList = sqlite3PcacheDirtyList(pPager->pPCache); + PgHdr *pPageOne = 0; + if( pList==0 ){ + /* Must have at least one page for the WAL commit flag. + ** Ticket [2d1a5c67dfc2363e44f29d9bbd57f] 2011-05-18 */ + rc = sqlite3PagerGet(pPager, 1, &pPageOne, 0); + pList = pPageOne; + pList->pDirty = 0; + } + assert( rc==SQLITE_OK ); + if( ALWAYS(pList) ){ + rc = pagerWalFrames(pPager, pList, pPager->dbSize, 1); + } + sqlite3PagerUnref(pPageOne); + if( rc==SQLITE_OK ){ + sqlite3PcacheCleanAll(pPager->pPCache); + } + }else{ + /* The following block updates the change-counter. Exactly how it + ** does this depends on whether or not the atomic-update optimization + ** was enabled at compile time, and if this transaction meets the + ** runtime criteria to use the operation: + ** + ** * The file-system supports the atomic-write property for + ** blocks of size page-size, and + ** * This commit is not part of a multi-file transaction, and + ** * Exactly one page has been modified and store in the journal file. + ** + ** If the optimization was not enabled at compile time, then the + ** pager_incr_changecounter() function is called to update the change + ** counter in 'indirect-mode'. If the optimization is compiled in but + ** is not applicable to this transaction, call sqlite3JournalCreate() + ** to make sure the journal file has actually been created, then call + ** pager_incr_changecounter() to update the change-counter in indirect + ** mode. + ** + ** Otherwise, if the optimization is both enabled and applicable, + ** then call pager_incr_changecounter() to update the change-counter + ** in 'direct' mode. In this case the journal file will never be + ** created for this transaction. + */ + #ifdef SQLITE_ENABLE_ATOMIC_WRITE + PgHdr *pPg; + assert( isOpen(pPager->jfd) + || pPager->journalMode==PAGER_JOURNALMODE_OFF + || pPager->journalMode==PAGER_JOURNALMODE_WAL + ); + if( !zMaster && isOpen(pPager->jfd) + && pPager->journalOff==jrnlBufferSize(pPager) + && pPager->dbSize>=pPager->dbOrigSize + && (0==(pPg = sqlite3PcacheDirtyList(pPager->pPCache)) || 0==pPg->pDirty) + ){ + /* Update the db file change counter via the direct-write method. The + ** following call will modify the in-memory representation of page 1 + ** to include the updated change counter and then write page 1 + ** directly to the database file. Because of the atomic-write + ** property of the host file-system, this is safe. + */ + rc = pager_incr_changecounter(pPager, 1); + }else{ + rc = sqlite3JournalCreate(pPager->jfd); + if( rc==SQLITE_OK ){ + rc = pager_incr_changecounter(pPager, 0); + } + } + #else + rc = pager_incr_changecounter(pPager, 0); + #endif + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + + /* Write the master journal name into the journal file. If a master + ** journal file name has already been written to the journal file, + ** or if zMaster is NULL (no master journal), then this call is a no-op. + */ + rc = writeMasterJournal(pPager, zMaster); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + + /* Sync the journal file and write all dirty pages to the database. + ** If the atomic-update optimization is being used, this sync will not + ** create the journal file or perform any real IO. + ** + ** Because the change-counter page was just modified, unless the + ** atomic-update optimization is used it is almost certain that the + ** journal requires a sync here. However, in locking_mode=exclusive + ** on a system under memory pressure it is just possible that this is + ** not the case. In this case it is likely enough that the redundant + ** xSync() call will be changed to a no-op by the OS anyhow. + */ + rc = syncJournal(pPager, 0); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + + rc = pager_write_pagelist(pPager,sqlite3PcacheDirtyList(pPager->pPCache)); + if( rc!=SQLITE_OK ){ + assert( rc!=SQLITE_IOERR_BLOCKED ); + goto commit_phase_one_exit; + } + sqlite3PcacheCleanAll(pPager->pPCache); + + /* If the file on disk is smaller than the database image, use + ** pager_truncate to grow the file here. This can happen if the database + ** image was extended as part of the current transaction and then the + ** last page in the db image moved to the free-list. In this case the + ** last page is never written out to disk, leaving the database file + ** undersized. Fix this now if it is the case. */ + if( pPager->dbSize>pPager->dbFileSize ){ + Pgno nNew = pPager->dbSize - (pPager->dbSize==PAGER_MJ_PGNO(pPager)); + assert( pPager->eState==PAGER_WRITER_DBMOD ); + rc = pager_truncate(pPager, nNew); + if( rc!=SQLITE_OK ) goto commit_phase_one_exit; + } + + /* Finally, sync the database file. */ + if( !noSync ){ + rc = sqlite3PagerSync(pPager, zMaster); + } + IOTRACE(("DBSYNC %p\n", pPager)) + } + } + +commit_phase_one_exit: + if( rc==SQLITE_OK && !pagerUseWal(pPager) ){ + pPager->eState = PAGER_WRITER_FINISHED; + } + return rc; +} + + +/* +** When this function is called, the database file has been completely +** updated to reflect the changes made by the current transaction and +** synced to disk. The journal file still exists in the file-system +** though, and if a failure occurs at this point it will eventually +** be used as a hot-journal and the current transaction rolled back. +** +** This function finalizes the journal file, either by deleting, +** truncating or partially zeroing it, so that it cannot be used +** for hot-journal rollback. Once this is done the transaction is +** irrevocably committed. +** +** If an error occurs, an IO error code is returned and the pager +** moves into the error state. Otherwise, SQLITE_OK is returned. +*/ +SQLITE_PRIVATE int sqlite3PagerCommitPhaseTwo(Pager *pPager){ + int rc = SQLITE_OK; /* Return code */ + + /* This routine should not be called if a prior error has occurred. + ** But if (due to a coding error elsewhere in the system) it does get + ** called, just return the same error code without doing anything. */ + if( NEVER(pPager->errCode) ) return pPager->errCode; + + assert( pPager->eState==PAGER_WRITER_LOCKED + || pPager->eState==PAGER_WRITER_FINISHED + || (pagerUseWal(pPager) && pPager->eState==PAGER_WRITER_CACHEMOD) + ); + assert( assert_pager_state(pPager) ); + + /* An optimization. If the database was not actually modified during + ** this transaction, the pager is running in exclusive-mode and is + ** using persistent journals, then this function is a no-op. + ** + ** The start of the journal file currently contains a single journal + ** header with the nRec field set to 0. If such a journal is used as + ** a hot-journal during hot-journal rollback, 0 changes will be made + ** to the database file. So there is no need to zero the journal + ** header. Since the pager is in exclusive mode, there is no need + ** to drop any locks either. + */ + if( pPager->eState==PAGER_WRITER_LOCKED + && pPager->exclusiveMode + && pPager->journalMode==PAGER_JOURNALMODE_PERSIST + ){ + assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) || !pPager->journalOff ); + pPager->eState = PAGER_READER; + return SQLITE_OK; + } + + PAGERTRACE(("COMMIT %d\n", PAGERID(pPager))); + pPager->iDataVersion++; + rc = pager_end_transaction(pPager, pPager->setMaster, 1); + return pager_error(pPager, rc); +} + +/* +** If a write transaction is open, then all changes made within the +** transaction are reverted and the current write-transaction is closed. +** The pager falls back to PAGER_READER state if successful, or PAGER_ERROR +** state if an error occurs. +** +** If the pager is already in PAGER_ERROR state when this function is called, +** it returns Pager.errCode immediately. No work is performed in this case. +** +** Otherwise, in rollback mode, this function performs two functions: +** +** 1) It rolls back the journal file, restoring all database file and +** in-memory cache pages to the state they were in when the transaction +** was opened, and +** +** 2) It finalizes the journal file, so that it is not used for hot +** rollback at any point in the future. +** +** Finalization of the journal file (task 2) is only performed if the +** rollback is successful. +** +** In WAL mode, all cache-entries containing data modified within the +** current transaction are either expelled from the cache or reverted to +** their pre-transaction state by re-reading data from the database or +** WAL files. The WAL transaction is then closed. +*/ +SQLITE_PRIVATE int sqlite3PagerRollback(Pager *pPager){ + int rc = SQLITE_OK; /* Return code */ + PAGERTRACE(("ROLLBACK %d\n", PAGERID(pPager))); + + /* PagerRollback() is a no-op if called in READER or OPEN state. If + ** the pager is already in the ERROR state, the rollback is not + ** attempted here. Instead, the error code is returned to the caller. + */ + assert( assert_pager_state(pPager) ); + if( pPager->eState==PAGER_ERROR ) return pPager->errCode; + if( pPager->eState<=PAGER_READER ) return SQLITE_OK; + + if( pagerUseWal(pPager) ){ + int rc2; + rc = sqlite3PagerSavepoint(pPager, SAVEPOINT_ROLLBACK, -1); + rc2 = pager_end_transaction(pPager, pPager->setMaster, 0); + if( rc==SQLITE_OK ) rc = rc2; + }else if( !isOpen(pPager->jfd) || pPager->eState==PAGER_WRITER_LOCKED ){ + int eState = pPager->eState; + rc = pager_end_transaction(pPager, 0, 0); + if( !MEMDB && eState>PAGER_WRITER_LOCKED ){ + /* This can happen using journal_mode=off. Move the pager to the error + ** state to indicate that the contents of the cache may not be trusted. + ** Any active readers will get SQLITE_ABORT. + */ + pPager->errCode = SQLITE_ABORT; + pPager->eState = PAGER_ERROR; + setGetterMethod(pPager); + return rc; + } + }else{ + rc = pager_playback(pPager, 0); + } + + assert( pPager->eState==PAGER_READER || rc!=SQLITE_OK ); + assert( rc==SQLITE_OK || rc==SQLITE_FULL || rc==SQLITE_CORRUPT + || rc==SQLITE_NOMEM || (rc&0xFF)==SQLITE_IOERR + || rc==SQLITE_CANTOPEN + ); + + /* If an error occurs during a ROLLBACK, we can no longer trust the pager + ** cache. So call pager_error() on the way out to make any error persistent. + */ + return pager_error(pPager, rc); +} + +/* +** Return TRUE if the database file is opened read-only. Return FALSE +** if the database is (in theory) writable. +*/ +SQLITE_PRIVATE u8 sqlite3PagerIsreadonly(Pager *pPager){ + return pPager->readOnly; +} + +#ifdef SQLITE_DEBUG +/* +** Return the sum of the reference counts for all pages held by pPager. +*/ +SQLITE_PRIVATE int sqlite3PagerRefcount(Pager *pPager){ + return sqlite3PcacheRefCount(pPager->pPCache); +} +#endif + +/* +** Return the approximate number of bytes of memory currently +** used by the pager and its associated cache. +*/ +SQLITE_PRIVATE int sqlite3PagerMemUsed(Pager *pPager){ + int perPageSize = pPager->pageSize + pPager->nExtra + sizeof(PgHdr) + + 5*sizeof(void*); + return perPageSize*sqlite3PcachePagecount(pPager->pPCache) + + sqlite3MallocSize(pPager) + + pPager->pageSize; +} + +/* +** Return the number of references to the specified page. +*/ +SQLITE_PRIVATE int sqlite3PagerPageRefcount(DbPage *pPage){ + return sqlite3PcachePageRefcount(pPage); +} + +#ifdef SQLITE_TEST +/* +** This routine is used for testing and analysis only. +*/ +SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){ + static int a[11]; + a[0] = sqlite3PcacheRefCount(pPager->pPCache); + a[1] = sqlite3PcachePagecount(pPager->pPCache); + a[2] = sqlite3PcacheGetCachesize(pPager->pPCache); + a[3] = pPager->eState==PAGER_OPEN ? -1 : (int) pPager->dbSize; + a[4] = pPager->eState; + a[5] = pPager->errCode; + a[6] = pPager->aStat[PAGER_STAT_HIT]; + a[7] = pPager->aStat[PAGER_STAT_MISS]; + a[8] = 0; /* Used to be pPager->nOvfl */ + a[9] = pPager->nRead; + a[10] = pPager->aStat[PAGER_STAT_WRITE]; + return a; +} +#endif + +/* +** Parameter eStat must be either SQLITE_DBSTATUS_CACHE_HIT or +** SQLITE_DBSTATUS_CACHE_MISS. Before returning, *pnVal is incremented by the +** current cache hit or miss count, according to the value of eStat. If the +** reset parameter is non-zero, the cache hit or miss count is zeroed before +** returning. +*/ +SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, int *pnVal){ + + assert( eStat==SQLITE_DBSTATUS_CACHE_HIT + || eStat==SQLITE_DBSTATUS_CACHE_MISS + || eStat==SQLITE_DBSTATUS_CACHE_WRITE + ); + + assert( SQLITE_DBSTATUS_CACHE_HIT+1==SQLITE_DBSTATUS_CACHE_MISS ); + assert( SQLITE_DBSTATUS_CACHE_HIT+2==SQLITE_DBSTATUS_CACHE_WRITE ); + assert( PAGER_STAT_HIT==0 && PAGER_STAT_MISS==1 && PAGER_STAT_WRITE==2 ); + + *pnVal += pPager->aStat[eStat - SQLITE_DBSTATUS_CACHE_HIT]; + if( reset ){ + pPager->aStat[eStat - SQLITE_DBSTATUS_CACHE_HIT] = 0; + } +} + +/* +** Return true if this is an in-memory or temp-file backed pager. +*/ +SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager *pPager){ + return pPager->tempFile; +} + +/* +** Check that there are at least nSavepoint savepoints open. If there are +** currently less than nSavepoints open, then open one or more savepoints +** to make up the difference. If the number of savepoints is already +** equal to nSavepoint, then this function is a no-op. +** +** If a memory allocation fails, SQLITE_NOMEM is returned. If an error +** occurs while opening the sub-journal file, then an IO error code is +** returned. Otherwise, SQLITE_OK. +*/ +static SQLITE_NOINLINE int pagerOpenSavepoint(Pager *pPager, int nSavepoint){ + int rc = SQLITE_OK; /* Return code */ + int nCurrent = pPager->nSavepoint; /* Current number of savepoints */ + int ii; /* Iterator variable */ + PagerSavepoint *aNew; /* New Pager.aSavepoint array */ + + assert( pPager->eState>=PAGER_WRITER_LOCKED ); + assert( assert_pager_state(pPager) ); + assert( nSavepoint>nCurrent && pPager->useJournal ); + + /* Grow the Pager.aSavepoint array using realloc(). Return SQLITE_NOMEM + ** if the allocation fails. Otherwise, zero the new portion in case a + ** malloc failure occurs while populating it in the for(...) loop below. + */ + aNew = (PagerSavepoint *)sqlite3Realloc( + pPager->aSavepoint, sizeof(PagerSavepoint)*nSavepoint + ); + if( !aNew ){ + return SQLITE_NOMEM_BKPT; + } + memset(&aNew[nCurrent], 0, (nSavepoint-nCurrent) * sizeof(PagerSavepoint)); + pPager->aSavepoint = aNew; + + /* Populate the PagerSavepoint structures just allocated. */ + for(ii=nCurrent; iidbSize; + if( isOpen(pPager->jfd) && pPager->journalOff>0 ){ + aNew[ii].iOffset = pPager->journalOff; + }else{ + aNew[ii].iOffset = JOURNAL_HDR_SZ(pPager); + } + aNew[ii].iSubRec = pPager->nSubRec; + aNew[ii].pInSavepoint = sqlite3BitvecCreate(pPager->dbSize); + if( !aNew[ii].pInSavepoint ){ + return SQLITE_NOMEM_BKPT; + } + if( pagerUseWal(pPager) ){ + sqlite3WalSavepoint(pPager->pWal, aNew[ii].aWalData); + } + pPager->nSavepoint = ii+1; + } + assert( pPager->nSavepoint==nSavepoint ); + assertTruncateConstraint(pPager); + return rc; +} +SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int nSavepoint){ + assert( pPager->eState>=PAGER_WRITER_LOCKED ); + assert( assert_pager_state(pPager) ); + + if( nSavepoint>pPager->nSavepoint && pPager->useJournal ){ + return pagerOpenSavepoint(pPager, nSavepoint); + }else{ + return SQLITE_OK; + } +} + + +/* +** This function is called to rollback or release (commit) a savepoint. +** The savepoint to release or rollback need not be the most recently +** created savepoint. +** +** Parameter op is always either SAVEPOINT_ROLLBACK or SAVEPOINT_RELEASE. +** If it is SAVEPOINT_RELEASE, then release and destroy the savepoint with +** index iSavepoint. If it is SAVEPOINT_ROLLBACK, then rollback all changes +** that have occurred since the specified savepoint was created. +** +** The savepoint to rollback or release is identified by parameter +** iSavepoint. A value of 0 means to operate on the outermost savepoint +** (the first created). A value of (Pager.nSavepoint-1) means operate +** on the most recently created savepoint. If iSavepoint is greater than +** (Pager.nSavepoint-1), then this function is a no-op. +** +** If a negative value is passed to this function, then the current +** transaction is rolled back. This is different to calling +** sqlite3PagerRollback() because this function does not terminate +** the transaction or unlock the database, it just restores the +** contents of the database to its original state. +** +** In any case, all savepoints with an index greater than iSavepoint +** are destroyed. If this is a release operation (op==SAVEPOINT_RELEASE), +** then savepoint iSavepoint is also destroyed. +** +** This function may return SQLITE_NOMEM if a memory allocation fails, +** or an IO error code if an IO error occurs while rolling back a +** savepoint. If no errors occur, SQLITE_OK is returned. +*/ +SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){ + int rc = pPager->errCode; + +#ifdef SQLITE_ENABLE_ZIPVFS + if( op==SAVEPOINT_RELEASE ) rc = SQLITE_OK; +#endif + + assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK ); + assert( iSavepoint>=0 || op==SAVEPOINT_ROLLBACK ); + + if( rc==SQLITE_OK && iSavepointnSavepoint ){ + int ii; /* Iterator variable */ + int nNew; /* Number of remaining savepoints after this op. */ + + /* Figure out how many savepoints will still be active after this + ** operation. Store this value in nNew. Then free resources associated + ** with any savepoints that are destroyed by this operation. + */ + nNew = iSavepoint + (( op==SAVEPOINT_RELEASE ) ? 0 : 1); + for(ii=nNew; iinSavepoint; ii++){ + sqlite3BitvecDestroy(pPager->aSavepoint[ii].pInSavepoint); + } + pPager->nSavepoint = nNew; + + /* If this is a release of the outermost savepoint, truncate + ** the sub-journal to zero bytes in size. */ + if( op==SAVEPOINT_RELEASE ){ + if( nNew==0 && isOpen(pPager->sjfd) ){ + /* Only truncate if it is an in-memory sub-journal. */ + if( sqlite3JournalIsInMemory(pPager->sjfd) ){ + rc = sqlite3OsTruncate(pPager->sjfd, 0); + assert( rc==SQLITE_OK ); + } + pPager->nSubRec = 0; + } + } + /* Else this is a rollback operation, playback the specified savepoint. + ** If this is a temp-file, it is possible that the journal file has + ** not yet been opened. In this case there have been no changes to + ** the database file, so the playback operation can be skipped. + */ + else if( pagerUseWal(pPager) || isOpen(pPager->jfd) ){ + PagerSavepoint *pSavepoint = (nNew==0)?0:&pPager->aSavepoint[nNew-1]; + rc = pagerPlaybackSavepoint(pPager, pSavepoint); + assert(rc!=SQLITE_DONE); + } + +#ifdef SQLITE_ENABLE_ZIPVFS + /* If the cache has been modified but the savepoint cannot be rolled + ** back journal_mode=off, put the pager in the error state. This way, + ** if the VFS used by this pager includes ZipVFS, the entire transaction + ** can be rolled back at the ZipVFS level. */ + else if( + pPager->journalMode==PAGER_JOURNALMODE_OFF + && pPager->eState>=PAGER_WRITER_CACHEMOD + ){ + pPager->errCode = SQLITE_ABORT; + pPager->eState = PAGER_ERROR; + setGetterMethod(pPager); + } +#endif + } + + return rc; +} + +/* +** Return the full pathname of the database file. +** +** Except, if the pager is in-memory only, then return an empty string if +** nullIfMemDb is true. This routine is called with nullIfMemDb==1 when +** used to report the filename to the user, for compatibility with legacy +** behavior. But when the Btree needs to know the filename for matching to +** shared cache, it uses nullIfMemDb==0 so that in-memory databases can +** participate in shared-cache. +*/ +SQLITE_PRIVATE const char *sqlite3PagerFilename(Pager *pPager, int nullIfMemDb){ + return (nullIfMemDb && pPager->memDb) ? "" : pPager->zFilename; +} + +/* +** Return the VFS structure for the pager. +*/ +SQLITE_PRIVATE sqlite3_vfs *sqlite3PagerVfs(Pager *pPager){ + return pPager->pVfs; +} + +/* +** Return the file handle for the database file associated +** with the pager. This might return NULL if the file has +** not yet been opened. +*/ +SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager *pPager){ + return pPager->fd; +} + +/* +** Return the file handle for the journal file (if it exists). +** This will be either the rollback journal or the WAL file. +*/ +SQLITE_PRIVATE sqlite3_file *sqlite3PagerJrnlFile(Pager *pPager){ +#if SQLITE_OMIT_WAL + return pPager->jfd; +#else + return pPager->pWal ? sqlite3WalFile(pPager->pWal) : pPager->jfd; +#endif +} + +/* +** Return the full pathname of the journal file. +*/ +SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager *pPager){ + return pPager->zJournal; +} + +#ifdef SQLITE_HAS_CODEC +/* +** Set or retrieve the codec for this pager +*/ +SQLITE_PRIVATE void sqlite3PagerSetCodec( + Pager *pPager, + void *(*xCodec)(void*,void*,Pgno,int), + void (*xCodecSizeChng)(void*,int,int), + void (*xCodecFree)(void*), + void *pCodec +){ + if( pPager->xCodecFree ) pPager->xCodecFree(pPager->pCodec); + pPager->xCodec = pPager->memDb ? 0 : xCodec; + pPager->xCodecSizeChng = xCodecSizeChng; + pPager->xCodecFree = xCodecFree; + pPager->pCodec = pCodec; + setGetterMethod(pPager); + pagerReportSize(pPager); +} +SQLITE_PRIVATE void *sqlite3PagerGetCodec(Pager *pPager){ + return pPager->pCodec; +} + +/* +** This function is called by the wal module when writing page content +** into the log file. +** +** This function returns a pointer to a buffer containing the encrypted +** page content. If a malloc fails, this function may return NULL. +*/ +SQLITE_PRIVATE void *sqlite3PagerCodec(PgHdr *pPg){ + void *aData = 0; + CODEC2(pPg->pPager, pPg->pData, pPg->pgno, 6, return 0, aData); + return aData; +} + +/* +** Return the current pager state +*/ +SQLITE_PRIVATE int sqlite3PagerState(Pager *pPager){ + return pPager->eState; +} +#endif /* SQLITE_HAS_CODEC */ + +#ifndef SQLITE_OMIT_AUTOVACUUM +/* +** Move the page pPg to location pgno in the file. +** +** There must be no references to the page previously located at +** pgno (which we call pPgOld) though that page is allowed to be +** in cache. If the page previously located at pgno is not already +** in the rollback journal, it is not put there by by this routine. +** +** References to the page pPg remain valid. Updating any +** meta-data associated with pPg (i.e. data stored in the nExtra bytes +** allocated along with the page) is the responsibility of the caller. +** +** A transaction must be active when this routine is called. It used to be +** required that a statement transaction was not active, but this restriction +** has been removed (CREATE INDEX needs to move a page when a statement +** transaction is active). +** +** If the fourth argument, isCommit, is non-zero, then this page is being +** moved as part of a database reorganization just before the transaction +** is being committed. In this case, it is guaranteed that the database page +** pPg refers to will not be written to again within this transaction. +** +** This function may return SQLITE_NOMEM or an IO error code if an error +** occurs. Otherwise, it returns SQLITE_OK. +*/ +SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, int isCommit){ + PgHdr *pPgOld; /* The page being overwritten. */ + Pgno needSyncPgno = 0; /* Old value of pPg->pgno, if sync is required */ + int rc; /* Return code */ + Pgno origPgno; /* The original page number */ + + assert( pPg->nRef>0 ); + assert( pPager->eState==PAGER_WRITER_CACHEMOD + || pPager->eState==PAGER_WRITER_DBMOD + ); + assert( assert_pager_state(pPager) ); + + /* In order to be able to rollback, an in-memory database must journal + ** the page we are moving from. + */ + assert( pPager->tempFile || !MEMDB ); + if( pPager->tempFile ){ + rc = sqlite3PagerWrite(pPg); + if( rc ) return rc; + } + + /* If the page being moved is dirty and has not been saved by the latest + ** savepoint, then save the current contents of the page into the + ** sub-journal now. This is required to handle the following scenario: + ** + ** BEGIN; + ** + ** SAVEPOINT one; + ** + ** ROLLBACK TO one; + ** + ** If page X were not written to the sub-journal here, it would not + ** be possible to restore its contents when the "ROLLBACK TO one" + ** statement were is processed. + ** + ** subjournalPage() may need to allocate space to store pPg->pgno into + ** one or more savepoint bitvecs. This is the reason this function + ** may return SQLITE_NOMEM. + */ + if( (pPg->flags & PGHDR_DIRTY)!=0 + && SQLITE_OK!=(rc = subjournalPageIfRequired(pPg)) + ){ + return rc; + } + + PAGERTRACE(("MOVE %d page %d (needSync=%d) moves to %d\n", + PAGERID(pPager), pPg->pgno, (pPg->flags&PGHDR_NEED_SYNC)?1:0, pgno)); + IOTRACE(("MOVE %p %d %d\n", pPager, pPg->pgno, pgno)) + + /* If the journal needs to be sync()ed before page pPg->pgno can + ** be written to, store pPg->pgno in local variable needSyncPgno. + ** + ** If the isCommit flag is set, there is no need to remember that + ** the journal needs to be sync()ed before database page pPg->pgno + ** can be written to. The caller has already promised not to write to it. + */ + if( (pPg->flags&PGHDR_NEED_SYNC) && !isCommit ){ + needSyncPgno = pPg->pgno; + assert( pPager->journalMode==PAGER_JOURNALMODE_OFF || + pageInJournal(pPager, pPg) || pPg->pgno>pPager->dbOrigSize ); + assert( pPg->flags&PGHDR_DIRTY ); + } + + /* If the cache contains a page with page-number pgno, remove it + ** from its hash chain. Also, if the PGHDR_NEED_SYNC flag was set for + ** page pgno before the 'move' operation, it needs to be retained + ** for the page moved there. + */ + pPg->flags &= ~PGHDR_NEED_SYNC; + pPgOld = sqlite3PagerLookup(pPager, pgno); + assert( !pPgOld || pPgOld->nRef==1 ); + if( pPgOld ){ + pPg->flags |= (pPgOld->flags&PGHDR_NEED_SYNC); + if( pPager->tempFile ){ + /* Do not discard pages from an in-memory database since we might + ** need to rollback later. Just move the page out of the way. */ + sqlite3PcacheMove(pPgOld, pPager->dbSize+1); + }else{ + sqlite3PcacheDrop(pPgOld); + } + } + + origPgno = pPg->pgno; + sqlite3PcacheMove(pPg, pgno); + sqlite3PcacheMakeDirty(pPg); + + /* For an in-memory database, make sure the original page continues + ** to exist, in case the transaction needs to roll back. Use pPgOld + ** as the original page since it has already been allocated. + */ + if( pPager->tempFile && pPgOld ){ + sqlite3PcacheMove(pPgOld, origPgno); + sqlite3PagerUnrefNotNull(pPgOld); + } + + if( needSyncPgno ){ + /* If needSyncPgno is non-zero, then the journal file needs to be + ** sync()ed before any data is written to database file page needSyncPgno. + ** Currently, no such page exists in the page-cache and the + ** "is journaled" bitvec flag has been set. This needs to be remedied by + ** loading the page into the pager-cache and setting the PGHDR_NEED_SYNC + ** flag. + ** + ** If the attempt to load the page into the page-cache fails, (due + ** to a malloc() or IO failure), clear the bit in the pInJournal[] + ** array. Otherwise, if the page is loaded and written again in + ** this transaction, it may be written to the database file before + ** it is synced into the journal file. This way, it may end up in + ** the journal file twice, but that is not a problem. + */ + PgHdr *pPgHdr; + rc = sqlite3PagerGet(pPager, needSyncPgno, &pPgHdr, 0); + if( rc!=SQLITE_OK ){ + if( needSyncPgno<=pPager->dbOrigSize ){ + assert( pPager->pTmpSpace!=0 ); + sqlite3BitvecClear(pPager->pInJournal, needSyncPgno, pPager->pTmpSpace); + } + return rc; + } + pPgHdr->flags |= PGHDR_NEED_SYNC; + sqlite3PcacheMakeDirty(pPgHdr); + sqlite3PagerUnrefNotNull(pPgHdr); + } + + return SQLITE_OK; +} +#endif + +/* +** The page handle passed as the first argument refers to a dirty page +** with a page number other than iNew. This function changes the page's +** page number to iNew and sets the value of the PgHdr.flags field to +** the value passed as the third parameter. +*/ +SQLITE_PRIVATE void sqlite3PagerRekey(DbPage *pPg, Pgno iNew, u16 flags){ + assert( pPg->pgno!=iNew ); + pPg->flags = flags; + sqlite3PcacheMove(pPg, iNew); +} + +/* +** Return a pointer to the data for the specified page. +*/ +SQLITE_PRIVATE void *sqlite3PagerGetData(DbPage *pPg){ + assert( pPg->nRef>0 || pPg->pPager->memDb ); + return pPg->pData; +} + +/* +** Return a pointer to the Pager.nExtra bytes of "extra" space +** allocated along with the specified page. +*/ +SQLITE_PRIVATE void *sqlite3PagerGetExtra(DbPage *pPg){ + return pPg->pExtra; +} + +/* +** Get/set the locking-mode for this pager. Parameter eMode must be one +** of PAGER_LOCKINGMODE_QUERY, PAGER_LOCKINGMODE_NORMAL or +** PAGER_LOCKINGMODE_EXCLUSIVE. If the parameter is not _QUERY, then +** the locking-mode is set to the value specified. +** +** The returned value is either PAGER_LOCKINGMODE_NORMAL or +** PAGER_LOCKINGMODE_EXCLUSIVE, indicating the current (possibly updated) +** locking-mode. +*/ +SQLITE_PRIVATE int sqlite3PagerLockingMode(Pager *pPager, int eMode){ + assert( eMode==PAGER_LOCKINGMODE_QUERY + || eMode==PAGER_LOCKINGMODE_NORMAL + || eMode==PAGER_LOCKINGMODE_EXCLUSIVE ); + assert( PAGER_LOCKINGMODE_QUERY<0 ); + assert( PAGER_LOCKINGMODE_NORMAL>=0 && PAGER_LOCKINGMODE_EXCLUSIVE>=0 ); + assert( pPager->exclusiveMode || 0==sqlite3WalHeapMemory(pPager->pWal) ); + if( eMode>=0 && !pPager->tempFile && !sqlite3WalHeapMemory(pPager->pWal) ){ + pPager->exclusiveMode = (u8)eMode; + } + return (int)pPager->exclusiveMode; +} + +/* +** Set the journal-mode for this pager. Parameter eMode must be one of: +** +** PAGER_JOURNALMODE_DELETE +** PAGER_JOURNALMODE_TRUNCATE +** PAGER_JOURNALMODE_PERSIST +** PAGER_JOURNALMODE_OFF +** PAGER_JOURNALMODE_MEMORY +** PAGER_JOURNALMODE_WAL +** +** The journalmode is set to the value specified if the change is allowed. +** The change may be disallowed for the following reasons: +** +** * An in-memory database can only have its journal_mode set to _OFF +** or _MEMORY. +** +** * Temporary databases cannot have _WAL journalmode. +** +** The returned indicate the current (possibly updated) journal-mode. +*/ +SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){ + u8 eOld = pPager->journalMode; /* Prior journalmode */ + +#ifdef SQLITE_DEBUG + /* The print_pager_state() routine is intended to be used by the debugger + ** only. We invoke it once here to suppress a compiler warning. */ + print_pager_state(pPager); +#endif + + + /* The eMode parameter is always valid */ + assert( eMode==PAGER_JOURNALMODE_DELETE + || eMode==PAGER_JOURNALMODE_TRUNCATE + || eMode==PAGER_JOURNALMODE_PERSIST + || eMode==PAGER_JOURNALMODE_OFF + || eMode==PAGER_JOURNALMODE_WAL + || eMode==PAGER_JOURNALMODE_MEMORY ); + + /* This routine is only called from the OP_JournalMode opcode, and + ** the logic there will never allow a temporary file to be changed + ** to WAL mode. + */ + assert( pPager->tempFile==0 || eMode!=PAGER_JOURNALMODE_WAL ); + + /* Do allow the journalmode of an in-memory database to be set to + ** anything other than MEMORY or OFF + */ + if( MEMDB ){ + assert( eOld==PAGER_JOURNALMODE_MEMORY || eOld==PAGER_JOURNALMODE_OFF ); + if( eMode!=PAGER_JOURNALMODE_MEMORY && eMode!=PAGER_JOURNALMODE_OFF ){ + eMode = eOld; + } + } + + if( eMode!=eOld ){ + + /* Change the journal mode. */ + assert( pPager->eState!=PAGER_ERROR ); + pPager->journalMode = (u8)eMode; + + /* When transistioning from TRUNCATE or PERSIST to any other journal + ** mode except WAL, unless the pager is in locking_mode=exclusive mode, + ** delete the journal file. + */ + assert( (PAGER_JOURNALMODE_TRUNCATE & 5)==1 ); + assert( (PAGER_JOURNALMODE_PERSIST & 5)==1 ); + assert( (PAGER_JOURNALMODE_DELETE & 5)==0 ); + assert( (PAGER_JOURNALMODE_MEMORY & 5)==4 ); + assert( (PAGER_JOURNALMODE_OFF & 5)==0 ); + assert( (PAGER_JOURNALMODE_WAL & 5)==5 ); + + assert( isOpen(pPager->fd) || pPager->exclusiveMode ); + if( !pPager->exclusiveMode && (eOld & 5)==1 && (eMode & 1)==0 ){ + + /* In this case we would like to delete the journal file. If it is + ** not possible, then that is not a problem. Deleting the journal file + ** here is an optimization only. + ** + ** Before deleting the journal file, obtain a RESERVED lock on the + ** database file. This ensures that the journal file is not deleted + ** while it is in use by some other client. + */ + sqlite3OsClose(pPager->jfd); + if( pPager->eLock>=RESERVED_LOCK ){ + sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0); + }else{ + int rc = SQLITE_OK; + int state = pPager->eState; + assert( state==PAGER_OPEN || state==PAGER_READER ); + if( state==PAGER_OPEN ){ + rc = sqlite3PagerSharedLock(pPager); + } + if( pPager->eState==PAGER_READER ){ + assert( rc==SQLITE_OK ); + rc = pagerLockDb(pPager, RESERVED_LOCK); + } + if( rc==SQLITE_OK ){ + sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0); + } + if( rc==SQLITE_OK && state==PAGER_READER ){ + pagerUnlockDb(pPager, SHARED_LOCK); + }else if( state==PAGER_OPEN ){ + pager_unlock(pPager); + } + assert( state==pPager->eState ); + } + }else if( eMode==PAGER_JOURNALMODE_OFF ){ + sqlite3OsClose(pPager->jfd); + } + } + + /* Return the new journal mode */ + return (int)pPager->journalMode; +} + +/* +** Return the current journal mode. +*/ +SQLITE_PRIVATE int sqlite3PagerGetJournalMode(Pager *pPager){ + return (int)pPager->journalMode; +} + +/* +** Return TRUE if the pager is in a state where it is OK to change the +** journalmode. Journalmode changes can only happen when the database +** is unmodified. +*/ +SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager *pPager){ + assert( assert_pager_state(pPager) ); + if( pPager->eState>=PAGER_WRITER_CACHEMOD ) return 0; + if( NEVER(isOpen(pPager->jfd) && pPager->journalOff>0) ) return 0; + return 1; +} + +/* +** Get/set the size-limit used for persistent journal files. +** +** Setting the size limit to -1 means no limit is enforced. +** An attempt to set a limit smaller than -1 is a no-op. +*/ +SQLITE_PRIVATE i64 sqlite3PagerJournalSizeLimit(Pager *pPager, i64 iLimit){ + if( iLimit>=-1 ){ + pPager->journalSizeLimit = iLimit; + sqlite3WalLimit(pPager->pWal, iLimit); + } + return pPager->journalSizeLimit; +} + +/* +** Return a pointer to the pPager->pBackup variable. The backup module +** in backup.c maintains the content of this variable. This module +** uses it opaquely as an argument to sqlite3BackupRestart() and +** sqlite3BackupUpdate() only. +*/ +SQLITE_PRIVATE sqlite3_backup **sqlite3PagerBackupPtr(Pager *pPager){ + return &pPager->pBackup; +} + +#ifndef SQLITE_OMIT_VACUUM +/* +** Unless this is an in-memory or temporary database, clear the pager cache. +*/ +SQLITE_PRIVATE void sqlite3PagerClearCache(Pager *pPager){ + assert( MEMDB==0 || pPager->tempFile ); + if( pPager->tempFile==0 ) pager_reset(pPager); +} +#endif + + +#ifndef SQLITE_OMIT_WAL +/* +** This function is called when the user invokes "PRAGMA wal_checkpoint", +** "PRAGMA wal_blocking_checkpoint" or calls the sqlite3_wal_checkpoint() +** or wal_blocking_checkpoint() API functions. +** +** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART. +*/ +SQLITE_PRIVATE int sqlite3PagerCheckpoint( + Pager *pPager, /* Checkpoint on this pager */ + sqlite3 *db, /* Db handle used to check for interrupts */ + int eMode, /* Type of checkpoint */ + int *pnLog, /* OUT: Final number of frames in log */ + int *pnCkpt /* OUT: Final number of checkpointed frames */ +){ + int rc = SQLITE_OK; + if( pPager->pWal ){ + rc = sqlite3WalCheckpoint(pPager->pWal, db, eMode, + (eMode==SQLITE_CHECKPOINT_PASSIVE ? 0 : pPager->xBusyHandler), + pPager->pBusyHandlerArg, + pPager->ckptSyncFlags, pPager->pageSize, (u8 *)pPager->pTmpSpace, + pnLog, pnCkpt + ); + } + return rc; +} + +SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager){ + return sqlite3WalCallback(pPager->pWal); +} + +/* +** Return true if the underlying VFS for the given pager supports the +** primitives necessary for write-ahead logging. +*/ +SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager){ + const sqlite3_io_methods *pMethods = pPager->fd->pMethods; + if( pPager->noLock ) return 0; + return pPager->exclusiveMode || (pMethods->iVersion>=2 && pMethods->xShmMap); +} + +/* +** Attempt to take an exclusive lock on the database file. If a PENDING lock +** is obtained instead, immediately release it. +*/ +static int pagerExclusiveLock(Pager *pPager){ + int rc; /* Return code */ + + assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK ); + rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); + if( rc!=SQLITE_OK ){ + /* If the attempt to grab the exclusive lock failed, release the + ** pending lock that may have been obtained instead. */ + pagerUnlockDb(pPager, SHARED_LOCK); + } + + return rc; +} + +/* +** Call sqlite3WalOpen() to open the WAL handle. If the pager is in +** exclusive-locking mode when this function is called, take an EXCLUSIVE +** lock on the database file and use heap-memory to store the wal-index +** in. Otherwise, use the normal shared-memory. +*/ +static int pagerOpenWal(Pager *pPager){ + int rc = SQLITE_OK; + + assert( pPager->pWal==0 && pPager->tempFile==0 ); + assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK ); + + /* If the pager is already in exclusive-mode, the WAL module will use + ** heap-memory for the wal-index instead of the VFS shared-memory + ** implementation. Take the exclusive lock now, before opening the WAL + ** file, to make sure this is safe. + */ + if( pPager->exclusiveMode ){ + rc = pagerExclusiveLock(pPager); + } + + /* Open the connection to the log file. If this operation fails, + ** (e.g. due to malloc() failure), return an error code. + */ + if( rc==SQLITE_OK ){ + rc = sqlite3WalOpen(pPager->pVfs, + pPager->fd, pPager->zWal, pPager->exclusiveMode, + pPager->journalSizeLimit, &pPager->pWal + ); + } + pagerFixMaplimit(pPager); + + return rc; +} + + +/* +** The caller must be holding a SHARED lock on the database file to call +** this function. +** +** If the pager passed as the first argument is open on a real database +** file (not a temp file or an in-memory database), and the WAL file +** is not already open, make an attempt to open it now. If successful, +** return SQLITE_OK. If an error occurs or the VFS used by the pager does +** not support the xShmXXX() methods, return an error code. *pbOpen is +** not modified in either case. +** +** If the pager is open on a temp-file (or in-memory database), or if +** the WAL file is already open, set *pbOpen to 1 and return SQLITE_OK +** without doing anything. +*/ +SQLITE_PRIVATE int sqlite3PagerOpenWal( + Pager *pPager, /* Pager object */ + int *pbOpen /* OUT: Set to true if call is a no-op */ +){ + int rc = SQLITE_OK; /* Return code */ + + assert( assert_pager_state(pPager) ); + assert( pPager->eState==PAGER_OPEN || pbOpen ); + assert( pPager->eState==PAGER_READER || !pbOpen ); + assert( pbOpen==0 || *pbOpen==0 ); + assert( pbOpen!=0 || (!pPager->tempFile && !pPager->pWal) ); + + if( !pPager->tempFile && !pPager->pWal ){ + if( !sqlite3PagerWalSupported(pPager) ) return SQLITE_CANTOPEN; + + /* Close any rollback journal previously open */ + sqlite3OsClose(pPager->jfd); + + rc = pagerOpenWal(pPager); + if( rc==SQLITE_OK ){ + pPager->journalMode = PAGER_JOURNALMODE_WAL; + pPager->eState = PAGER_OPEN; + } + }else{ + *pbOpen = 1; + } + + return rc; +} + +/* +** This function is called to close the connection to the log file prior +** to switching from WAL to rollback mode. +** +** Before closing the log file, this function attempts to take an +** EXCLUSIVE lock on the database file. If this cannot be obtained, an +** error (SQLITE_BUSY) is returned and the log connection is not closed. +** If successful, the EXCLUSIVE lock is not released before returning. +*/ +SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager, sqlite3 *db){ + int rc = SQLITE_OK; + + assert( pPager->journalMode==PAGER_JOURNALMODE_WAL ); + + /* If the log file is not already open, but does exist in the file-system, + ** it may need to be checkpointed before the connection can switch to + ** rollback mode. Open it now so this can happen. + */ + if( !pPager->pWal ){ + int logexists = 0; + rc = pagerLockDb(pPager, SHARED_LOCK); + if( rc==SQLITE_OK ){ + rc = sqlite3OsAccess( + pPager->pVfs, pPager->zWal, SQLITE_ACCESS_EXISTS, &logexists + ); + } + if( rc==SQLITE_OK && logexists ){ + rc = pagerOpenWal(pPager); + } + } + + /* Checkpoint and close the log. Because an EXCLUSIVE lock is held on + ** the database file, the log and log-summary files will be deleted. + */ + if( rc==SQLITE_OK && pPager->pWal ){ + rc = pagerExclusiveLock(pPager); + if( rc==SQLITE_OK ){ + rc = sqlite3WalClose(pPager->pWal, db, pPager->ckptSyncFlags, + pPager->pageSize, (u8*)pPager->pTmpSpace); + pPager->pWal = 0; + pagerFixMaplimit(pPager); + if( rc && !pPager->exclusiveMode ) pagerUnlockDb(pPager, SHARED_LOCK); + } + } + return rc; +} + +#ifdef SQLITE_ENABLE_SNAPSHOT +/* +** If this is a WAL database, obtain a snapshot handle for the snapshot +** currently open. Otherwise, return an error. +*/ +SQLITE_PRIVATE int sqlite3PagerSnapshotGet(Pager *pPager, sqlite3_snapshot **ppSnapshot){ + int rc = SQLITE_ERROR; + if( pPager->pWal ){ + rc = sqlite3WalSnapshotGet(pPager->pWal, ppSnapshot); + } + return rc; +} + +/* +** If this is a WAL database, store a pointer to pSnapshot. Next time a +** read transaction is opened, attempt to read from the snapshot it +** identifies. If this is not a WAL database, return an error. +*/ +SQLITE_PRIVATE int sqlite3PagerSnapshotOpen(Pager *pPager, sqlite3_snapshot *pSnapshot){ + int rc = SQLITE_OK; + if( pPager->pWal ){ + sqlite3WalSnapshotOpen(pPager->pWal, pSnapshot); + }else{ + rc = SQLITE_ERROR; + } + return rc; +} + +/* +** If this is a WAL database, call sqlite3WalSnapshotRecover(). If this +** is not a WAL database, return an error. +*/ +SQLITE_PRIVATE int sqlite3PagerSnapshotRecover(Pager *pPager){ + int rc; + if( pPager->pWal ){ + rc = sqlite3WalSnapshotRecover(pPager->pWal); + }else{ + rc = SQLITE_ERROR; + } + return rc; +} +#endif /* SQLITE_ENABLE_SNAPSHOT */ +#endif /* !SQLITE_OMIT_WAL */ + +#ifdef SQLITE_ENABLE_ZIPVFS +/* +** A read-lock must be held on the pager when this function is called. If +** the pager is in WAL mode and the WAL file currently contains one or more +** frames, return the size in bytes of the page images stored within the +** WAL frames. Otherwise, if this is not a WAL database or the WAL file +** is empty, return 0. +*/ +SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){ + assert( pPager->eState>=PAGER_READER ); + return sqlite3WalFramesize(pPager->pWal); +} +#endif + +#endif /* SQLITE_OMIT_DISKIO */ + +/************** End of pager.c ***********************************************/ +/************** Begin file wal.c *********************************************/ +/* +** 2010 February 1 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains the implementation of a write-ahead log (WAL) used in +** "journal_mode=WAL" mode. +** +** WRITE-AHEAD LOG (WAL) FILE FORMAT +** +** A WAL file consists of a header followed by zero or more "frames". +** Each frame records the revised content of a single page from the +** database file. All changes to the database are recorded by writing +** frames into the WAL. Transactions commit when a frame is written that +** contains a commit marker. A single WAL can and usually does record +** multiple transactions. Periodically, the content of the WAL is +** transferred back into the database file in an operation called a +** "checkpoint". +** +** A single WAL file can be used multiple times. In other words, the +** WAL can fill up with frames and then be checkpointed and then new +** frames can overwrite the old ones. A WAL always grows from beginning +** toward the end. Checksums and counters attached to each frame are +** used to determine which frames within the WAL are valid and which +** are leftovers from prior checkpoints. +** +** The WAL header is 32 bytes in size and consists of the following eight +** big-endian 32-bit unsigned integer values: +** +** 0: Magic number. 0x377f0682 or 0x377f0683 +** 4: File format version. Currently 3007000 +** 8: Database page size. Example: 1024 +** 12: Checkpoint sequence number +** 16: Salt-1, random integer incremented with each checkpoint +** 20: Salt-2, a different random integer changing with each ckpt +** 24: Checksum-1 (first part of checksum for first 24 bytes of header). +** 28: Checksum-2 (second part of checksum for first 24 bytes of header). +** +** Immediately following the wal-header are zero or more frames. Each +** frame consists of a 24-byte frame-header followed by a bytes +** of page data. The frame-header is six big-endian 32-bit unsigned +** integer values, as follows: +** +** 0: Page number. +** 4: For commit records, the size of the database image in pages +** after the commit. For all other records, zero. +** 8: Salt-1 (copied from the header) +** 12: Salt-2 (copied from the header) +** 16: Checksum-1. +** 20: Checksum-2. +** +** A frame is considered valid if and only if the following conditions are +** true: +** +** (1) The salt-1 and salt-2 values in the frame-header match +** salt values in the wal-header +** +** (2) The checksum values in the final 8 bytes of the frame-header +** exactly match the checksum computed consecutively on the +** WAL header and the first 8 bytes and the content of all frames +** up to and including the current frame. +** +** The checksum is computed using 32-bit big-endian integers if the +** magic number in the first 4 bytes of the WAL is 0x377f0683 and it +** is computed using little-endian if the magic number is 0x377f0682. +** The checksum values are always stored in the frame header in a +** big-endian format regardless of which byte order is used to compute +** the checksum. The checksum is computed by interpreting the input as +** an even number of unsigned 32-bit integers: x[0] through x[N]. The +** algorithm used for the checksum is as follows: +** +** for i from 0 to n-1 step 2: +** s0 += x[i] + s1; +** s1 += x[i+1] + s0; +** endfor +** +** Note that s0 and s1 are both weighted checksums using fibonacci weights +** in reverse order (the largest fibonacci weight occurs on the first element +** of the sequence being summed.) The s1 value spans all 32-bit +** terms of the sequence whereas s0 omits the final term. +** +** On a checkpoint, the WAL is first VFS.xSync-ed, then valid content of the +** WAL is transferred into the database, then the database is VFS.xSync-ed. +** The VFS.xSync operations serve as write barriers - all writes launched +** before the xSync must complete before any write that launches after the +** xSync begins. +** +** After each checkpoint, the salt-1 value is incremented and the salt-2 +** value is randomized. This prevents old and new frames in the WAL from +** being considered valid at the same time and being checkpointing together +** following a crash. +** +** READER ALGORITHM +** +** To read a page from the database (call it page number P), a reader +** first checks the WAL to see if it contains page P. If so, then the +** last valid instance of page P that is a followed by a commit frame +** or is a commit frame itself becomes the value read. If the WAL +** contains no copies of page P that are valid and which are a commit +** frame or are followed by a commit frame, then page P is read from +** the database file. +** +** To start a read transaction, the reader records the index of the last +** valid frame in the WAL. The reader uses this recorded "mxFrame" value +** for all subsequent read operations. New transactions can be appended +** to the WAL, but as long as the reader uses its original mxFrame value +** and ignores the newly appended content, it will see a consistent snapshot +** of the database from a single point in time. This technique allows +** multiple concurrent readers to view different versions of the database +** content simultaneously. +** +** The reader algorithm in the previous paragraphs works correctly, but +** because frames for page P can appear anywhere within the WAL, the +** reader has to scan the entire WAL looking for page P frames. If the +** WAL is large (multiple megabytes is typical) that scan can be slow, +** and read performance suffers. To overcome this problem, a separate +** data structure called the wal-index is maintained to expedite the +** search for frames of a particular page. +** +** WAL-INDEX FORMAT +** +** Conceptually, the wal-index is shared memory, though VFS implementations +** might choose to implement the wal-index using a mmapped file. Because +** the wal-index is shared memory, SQLite does not support journal_mode=WAL +** on a network filesystem. All users of the database must be able to +** share memory. +** +** The wal-index is transient. After a crash, the wal-index can (and should +** be) reconstructed from the original WAL file. In fact, the VFS is required +** to either truncate or zero the header of the wal-index when the last +** connection to it closes. Because the wal-index is transient, it can +** use an architecture-specific format; it does not have to be cross-platform. +** Hence, unlike the database and WAL file formats which store all values +** as big endian, the wal-index can store multi-byte values in the native +** byte order of the host computer. +** +** The purpose of the wal-index is to answer this question quickly: Given +** a page number P and a maximum frame index M, return the index of the +** last frame in the wal before frame M for page P in the WAL, or return +** NULL if there are no frames for page P in the WAL prior to M. +** +** The wal-index consists of a header region, followed by an one or +** more index blocks. +** +** The wal-index header contains the total number of frames within the WAL +** in the mxFrame field. +** +** Each index block except for the first contains information on +** HASHTABLE_NPAGE frames. The first index block contains information on +** HASHTABLE_NPAGE_ONE frames. The values of HASHTABLE_NPAGE_ONE and +** HASHTABLE_NPAGE are selected so that together the wal-index header and +** first index block are the same size as all other index blocks in the +** wal-index. +** +** Each index block contains two sections, a page-mapping that contains the +** database page number associated with each wal frame, and a hash-table +** that allows readers to query an index block for a specific page number. +** The page-mapping is an array of HASHTABLE_NPAGE (or HASHTABLE_NPAGE_ONE +** for the first index block) 32-bit page numbers. The first entry in the +** first index-block contains the database page number corresponding to the +** first frame in the WAL file. The first entry in the second index block +** in the WAL file corresponds to the (HASHTABLE_NPAGE_ONE+1)th frame in +** the log, and so on. +** +** The last index block in a wal-index usually contains less than the full +** complement of HASHTABLE_NPAGE (or HASHTABLE_NPAGE_ONE) page-numbers, +** depending on the contents of the WAL file. This does not change the +** allocated size of the page-mapping array - the page-mapping array merely +** contains unused entries. +** +** Even without using the hash table, the last frame for page P +** can be found by scanning the page-mapping sections of each index block +** starting with the last index block and moving toward the first, and +** within each index block, starting at the end and moving toward the +** beginning. The first entry that equals P corresponds to the frame +** holding the content for that page. +** +** The hash table consists of HASHTABLE_NSLOT 16-bit unsigned integers. +** HASHTABLE_NSLOT = 2*HASHTABLE_NPAGE, and there is one entry in the +** hash table for each page number in the mapping section, so the hash +** table is never more than half full. The expected number of collisions +** prior to finding a match is 1. Each entry of the hash table is an +** 1-based index of an entry in the mapping section of the same +** index block. Let K be the 1-based index of the largest entry in +** the mapping section. (For index blocks other than the last, K will +** always be exactly HASHTABLE_NPAGE (4096) and for the last index block +** K will be (mxFrame%HASHTABLE_NPAGE).) Unused slots of the hash table +** contain a value of 0. +** +** To look for page P in the hash table, first compute a hash iKey on +** P as follows: +** +** iKey = (P * 383) % HASHTABLE_NSLOT +** +** Then start scanning entries of the hash table, starting with iKey +** (wrapping around to the beginning when the end of the hash table is +** reached) until an unused hash slot is found. Let the first unused slot +** be at index iUnused. (iUnused might be less than iKey if there was +** wrap-around.) Because the hash table is never more than half full, +** the search is guaranteed to eventually hit an unused entry. Let +** iMax be the value between iKey and iUnused, closest to iUnused, +** where aHash[iMax]==P. If there is no iMax entry (if there exists +** no hash slot such that aHash[i]==p) then page P is not in the +** current index block. Otherwise the iMax-th mapping entry of the +** current index block corresponds to the last entry that references +** page P. +** +** A hash search begins with the last index block and moves toward the +** first index block, looking for entries corresponding to page P. On +** average, only two or three slots in each index block need to be +** examined in order to either find the last entry for page P, or to +** establish that no such entry exists in the block. Each index block +** holds over 4000 entries. So two or three index blocks are sufficient +** to cover a typical 10 megabyte WAL file, assuming 1K pages. 8 or 10 +** comparisons (on average) suffice to either locate a frame in the +** WAL or to establish that the frame does not exist in the WAL. This +** is much faster than scanning the entire 10MB WAL. +** +** Note that entries are added in order of increasing K. Hence, one +** reader might be using some value K0 and a second reader that started +** at a later time (after additional transactions were added to the WAL +** and to the wal-index) might be using a different value K1, where K1>K0. +** Both readers can use the same hash table and mapping section to get +** the correct result. There may be entries in the hash table with +** K>K0 but to the first reader, those entries will appear to be unused +** slots in the hash table and so the first reader will get an answer as +** if no values greater than K0 had ever been inserted into the hash table +** in the first place - which is what reader one wants. Meanwhile, the +** second reader using K1 will see additional values that were inserted +** later, which is exactly what reader two wants. +** +** When a rollback occurs, the value of K is decreased. Hash table entries +** that correspond to frames greater than the new K value are removed +** from the hash table at this point. +*/ +#ifndef SQLITE_OMIT_WAL + +/* #include "wal.h" */ + +/* +** Trace output macros +*/ +#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) +SQLITE_PRIVATE int sqlite3WalTrace = 0; +# define WALTRACE(X) if(sqlite3WalTrace) sqlite3DebugPrintf X +#else +# define WALTRACE(X) +#endif + +/* +** The maximum (and only) versions of the wal and wal-index formats +** that may be interpreted by this version of SQLite. +** +** If a client begins recovering a WAL file and finds that (a) the checksum +** values in the wal-header are correct and (b) the version field is not +** WAL_MAX_VERSION, recovery fails and SQLite returns SQLITE_CANTOPEN. +** +** Similarly, if a client successfully reads a wal-index header (i.e. the +** checksum test is successful) and finds that the version field is not +** WALINDEX_MAX_VERSION, then no read-transaction is opened and SQLite +** returns SQLITE_CANTOPEN. +*/ +#define WAL_MAX_VERSION 3007000 +#define WALINDEX_MAX_VERSION 3007000 + +/* +** Indices of various locking bytes. WAL_NREADER is the number +** of available reader locks and should be at least 3. The default +** is SQLITE_SHM_NLOCK==8 and WAL_NREADER==5. +*/ +#define WAL_WRITE_LOCK 0 +#define WAL_ALL_BUT_WRITE 1 +#define WAL_CKPT_LOCK 1 +#define WAL_RECOVER_LOCK 2 +#define WAL_READ_LOCK(I) (3+(I)) +#define WAL_NREADER (SQLITE_SHM_NLOCK-3) + + +/* Object declarations */ +typedef struct WalIndexHdr WalIndexHdr; +typedef struct WalIterator WalIterator; +typedef struct WalCkptInfo WalCkptInfo; + + +/* +** The following object holds a copy of the wal-index header content. +** +** The actual header in the wal-index consists of two copies of this +** object followed by one instance of the WalCkptInfo object. +** For all versions of SQLite through 3.10.0 and probably beyond, +** the locking bytes (WalCkptInfo.aLock) start at offset 120 and +** the total header size is 136 bytes. +** +** The szPage value can be any power of 2 between 512 and 32768, inclusive. +** Or it can be 1 to represent a 65536-byte page. The latter case was +** added in 3.7.1 when support for 64K pages was added. +*/ +struct WalIndexHdr { + u32 iVersion; /* Wal-index version */ + u32 unused; /* Unused (padding) field */ + u32 iChange; /* Counter incremented each transaction */ + u8 isInit; /* 1 when initialized */ + u8 bigEndCksum; /* True if checksums in WAL are big-endian */ + u16 szPage; /* Database page size in bytes. 1==64K */ + u32 mxFrame; /* Index of last valid frame in the WAL */ + u32 nPage; /* Size of database in pages */ + u32 aFrameCksum[2]; /* Checksum of last frame in log */ + u32 aSalt[2]; /* Two salt values copied from WAL header */ + u32 aCksum[2]; /* Checksum over all prior fields */ +}; + +/* +** A copy of the following object occurs in the wal-index immediately +** following the second copy of the WalIndexHdr. This object stores +** information used by checkpoint. +** +** nBackfill is the number of frames in the WAL that have been written +** back into the database. (We call the act of moving content from WAL to +** database "backfilling".) The nBackfill number is never greater than +** WalIndexHdr.mxFrame. nBackfill can only be increased by threads +** holding the WAL_CKPT_LOCK lock (which includes a recovery thread). +** However, a WAL_WRITE_LOCK thread can move the value of nBackfill from +** mxFrame back to zero when the WAL is reset. +** +** nBackfillAttempted is the largest value of nBackfill that a checkpoint +** has attempted to achieve. Normally nBackfill==nBackfillAtempted, however +** the nBackfillAttempted is set before any backfilling is done and the +** nBackfill is only set after all backfilling completes. So if a checkpoint +** crashes, nBackfillAttempted might be larger than nBackfill. The +** WalIndexHdr.mxFrame must never be less than nBackfillAttempted. +** +** The aLock[] field is a set of bytes used for locking. These bytes should +** never be read or written. +** +** There is one entry in aReadMark[] for each reader lock. If a reader +** holds read-lock K, then the value in aReadMark[K] is no greater than +** the mxFrame for that reader. The value READMARK_NOT_USED (0xffffffff) +** for any aReadMark[] means that entry is unused. aReadMark[0] is +** a special case; its value is never used and it exists as a place-holder +** to avoid having to offset aReadMark[] indexs by one. Readers holding +** WAL_READ_LOCK(0) always ignore the entire WAL and read all content +** directly from the database. +** +** The value of aReadMark[K] may only be changed by a thread that +** is holding an exclusive lock on WAL_READ_LOCK(K). Thus, the value of +** aReadMark[K] cannot changed while there is a reader is using that mark +** since the reader will be holding a shared lock on WAL_READ_LOCK(K). +** +** The checkpointer may only transfer frames from WAL to database where +** the frame numbers are less than or equal to every aReadMark[] that is +** in use (that is, every aReadMark[j] for which there is a corresponding +** WAL_READ_LOCK(j)). New readers (usually) pick the aReadMark[] with the +** largest value and will increase an unused aReadMark[] to mxFrame if there +** is not already an aReadMark[] equal to mxFrame. The exception to the +** previous sentence is when nBackfill equals mxFrame (meaning that everything +** in the WAL has been backfilled into the database) then new readers +** will choose aReadMark[0] which has value 0 and hence such reader will +** get all their all content directly from the database file and ignore +** the WAL. +** +** Writers normally append new frames to the end of the WAL. However, +** if nBackfill equals mxFrame (meaning that all WAL content has been +** written back into the database) and if no readers are using the WAL +** (in other words, if there are no WAL_READ_LOCK(i) where i>0) then +** the writer will first "reset" the WAL back to the beginning and start +** writing new content beginning at frame 1. +** +** We assume that 32-bit loads are atomic and so no locks are needed in +** order to read from any aReadMark[] entries. +*/ +struct WalCkptInfo { + u32 nBackfill; /* Number of WAL frames backfilled into DB */ + u32 aReadMark[WAL_NREADER]; /* Reader marks */ + u8 aLock[SQLITE_SHM_NLOCK]; /* Reserved space for locks */ + u32 nBackfillAttempted; /* WAL frames perhaps written, or maybe not */ + u32 notUsed0; /* Available for future enhancements */ +}; +#define READMARK_NOT_USED 0xffffffff + + +/* A block of WALINDEX_LOCK_RESERVED bytes beginning at +** WALINDEX_LOCK_OFFSET is reserved for locks. Since some systems +** only support mandatory file-locks, we do not read or write data +** from the region of the file on which locks are applied. +*/ +#define WALINDEX_LOCK_OFFSET (sizeof(WalIndexHdr)*2+offsetof(WalCkptInfo,aLock)) +#define WALINDEX_HDR_SIZE (sizeof(WalIndexHdr)*2+sizeof(WalCkptInfo)) + +/* Size of header before each frame in wal */ +#define WAL_FRAME_HDRSIZE 24 + +/* Size of write ahead log header, including checksum. */ +/* #define WAL_HDRSIZE 24 */ +#define WAL_HDRSIZE 32 + +/* WAL magic value. Either this value, or the same value with the least +** significant bit also set (WAL_MAGIC | 0x00000001) is stored in 32-bit +** big-endian format in the first 4 bytes of a WAL file. +** +** If the LSB is set, then the checksums for each frame within the WAL +** file are calculated by treating all data as an array of 32-bit +** big-endian words. Otherwise, they are calculated by interpreting +** all data as 32-bit little-endian words. +*/ +#define WAL_MAGIC 0x377f0682 + +/* +** Return the offset of frame iFrame in the write-ahead log file, +** assuming a database page size of szPage bytes. The offset returned +** is to the start of the write-ahead log frame-header. +*/ +#define walFrameOffset(iFrame, szPage) ( \ + WAL_HDRSIZE + ((iFrame)-1)*(i64)((szPage)+WAL_FRAME_HDRSIZE) \ +) + +/* +** An open write-ahead log file is represented by an instance of the +** following object. +*/ +struct Wal { + sqlite3_vfs *pVfs; /* The VFS used to create pDbFd */ + sqlite3_file *pDbFd; /* File handle for the database file */ + sqlite3_file *pWalFd; /* File handle for WAL file */ + u32 iCallback; /* Value to pass to log callback (or 0) */ + i64 mxWalSize; /* Truncate WAL to this size upon reset */ + int nWiData; /* Size of array apWiData */ + int szFirstBlock; /* Size of first block written to WAL file */ + volatile u32 **apWiData; /* Pointer to wal-index content in memory */ + u32 szPage; /* Database page size */ + i16 readLock; /* Which read lock is being held. -1 for none */ + u8 syncFlags; /* Flags to use to sync header writes */ + u8 exclusiveMode; /* Non-zero if connection is in exclusive mode */ + u8 writeLock; /* True if in a write transaction */ + u8 ckptLock; /* True if holding a checkpoint lock */ + u8 readOnly; /* WAL_RDWR, WAL_RDONLY, or WAL_SHM_RDONLY */ + u8 truncateOnCommit; /* True to truncate WAL file on commit */ + u8 syncHeader; /* Fsync the WAL header if true */ + u8 padToSectorBoundary; /* Pad transactions out to the next sector */ + WalIndexHdr hdr; /* Wal-index header for current transaction */ + u32 minFrame; /* Ignore wal frames before this one */ + u32 iReCksum; /* On commit, recalculate checksums from here */ + const char *zWalName; /* Name of WAL file */ + u32 nCkpt; /* Checkpoint sequence counter in the wal-header */ +#ifdef SQLITE_DEBUG + u8 lockError; /* True if a locking error has occurred */ +#endif +#ifdef SQLITE_ENABLE_SNAPSHOT + WalIndexHdr *pSnapshot; /* Start transaction here if not NULL */ +#endif +}; + +/* +** Candidate values for Wal.exclusiveMode. +*/ +#define WAL_NORMAL_MODE 0 +#define WAL_EXCLUSIVE_MODE 1 +#define WAL_HEAPMEMORY_MODE 2 + +/* +** Possible values for WAL.readOnly +*/ +#define WAL_RDWR 0 /* Normal read/write connection */ +#define WAL_RDONLY 1 /* The WAL file is readonly */ +#define WAL_SHM_RDONLY 2 /* The SHM file is readonly */ + +/* +** Each page of the wal-index mapping contains a hash-table made up of +** an array of HASHTABLE_NSLOT elements of the following type. +*/ +typedef u16 ht_slot; + +/* +** This structure is used to implement an iterator that loops through +** all frames in the WAL in database page order. Where two or more frames +** correspond to the same database page, the iterator visits only the +** frame most recently written to the WAL (in other words, the frame with +** the largest index). +** +** The internals of this structure are only accessed by: +** +** walIteratorInit() - Create a new iterator, +** walIteratorNext() - Step an iterator, +** walIteratorFree() - Free an iterator. +** +** This functionality is used by the checkpoint code (see walCheckpoint()). +*/ +struct WalIterator { + int iPrior; /* Last result returned from the iterator */ + int nSegment; /* Number of entries in aSegment[] */ + struct WalSegment { + int iNext; /* Next slot in aIndex[] not yet returned */ + ht_slot *aIndex; /* i0, i1, i2... such that aPgno[iN] ascend */ + u32 *aPgno; /* Array of page numbers. */ + int nEntry; /* Nr. of entries in aPgno[] and aIndex[] */ + int iZero; /* Frame number associated with aPgno[0] */ + } aSegment[1]; /* One for every 32KB page in the wal-index */ +}; + +/* +** Define the parameters of the hash tables in the wal-index file. There +** is a hash-table following every HASHTABLE_NPAGE page numbers in the +** wal-index. +** +** Changing any of these constants will alter the wal-index format and +** create incompatibilities. +*/ +#define HASHTABLE_NPAGE 4096 /* Must be power of 2 */ +#define HASHTABLE_HASH_1 383 /* Should be prime */ +#define HASHTABLE_NSLOT (HASHTABLE_NPAGE*2) /* Must be a power of 2 */ + +/* +** The block of page numbers associated with the first hash-table in a +** wal-index is smaller than usual. This is so that there is a complete +** hash-table on each aligned 32KB page of the wal-index. +*/ +#define HASHTABLE_NPAGE_ONE (HASHTABLE_NPAGE - (WALINDEX_HDR_SIZE/sizeof(u32))) + +/* The wal-index is divided into pages of WALINDEX_PGSZ bytes each. */ +#define WALINDEX_PGSZ ( \ + sizeof(ht_slot)*HASHTABLE_NSLOT + HASHTABLE_NPAGE*sizeof(u32) \ +) + +/* +** Obtain a pointer to the iPage'th page of the wal-index. The wal-index +** is broken into pages of WALINDEX_PGSZ bytes. Wal-index pages are +** numbered from zero. +** +** If this call is successful, *ppPage is set to point to the wal-index +** page and SQLITE_OK is returned. If an error (an OOM or VFS error) occurs, +** then an SQLite error code is returned and *ppPage is set to 0. +*/ +static int walIndexPage(Wal *pWal, int iPage, volatile u32 **ppPage){ + int rc = SQLITE_OK; + + /* Enlarge the pWal->apWiData[] array if required */ + if( pWal->nWiData<=iPage ){ + int nByte = sizeof(u32*)*(iPage+1); + volatile u32 **apNew; + apNew = (volatile u32 **)sqlite3_realloc64((void *)pWal->apWiData, nByte); + if( !apNew ){ + *ppPage = 0; + return SQLITE_NOMEM_BKPT; + } + memset((void*)&apNew[pWal->nWiData], 0, + sizeof(u32*)*(iPage+1-pWal->nWiData)); + pWal->apWiData = apNew; + pWal->nWiData = iPage+1; + } + + /* Request a pointer to the required page from the VFS */ + if( pWal->apWiData[iPage]==0 ){ + if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ){ + pWal->apWiData[iPage] = (u32 volatile *)sqlite3MallocZero(WALINDEX_PGSZ); + if( !pWal->apWiData[iPage] ) rc = SQLITE_NOMEM_BKPT; + }else{ + rc = sqlite3OsShmMap(pWal->pDbFd, iPage, WALINDEX_PGSZ, + pWal->writeLock, (void volatile **)&pWal->apWiData[iPage] + ); + if( rc==SQLITE_READONLY ){ + pWal->readOnly |= WAL_SHM_RDONLY; + rc = SQLITE_OK; + } + } + } + + *ppPage = pWal->apWiData[iPage]; + assert( iPage==0 || *ppPage || rc!=SQLITE_OK ); + return rc; +} + +/* +** Return a pointer to the WalCkptInfo structure in the wal-index. +*/ +static volatile WalCkptInfo *walCkptInfo(Wal *pWal){ + assert( pWal->nWiData>0 && pWal->apWiData[0] ); + return (volatile WalCkptInfo*)&(pWal->apWiData[0][sizeof(WalIndexHdr)/2]); +} + +/* +** Return a pointer to the WalIndexHdr structure in the wal-index. +*/ +static volatile WalIndexHdr *walIndexHdr(Wal *pWal){ + assert( pWal->nWiData>0 && pWal->apWiData[0] ); + return (volatile WalIndexHdr*)pWal->apWiData[0]; +} + +/* +** The argument to this macro must be of type u32. On a little-endian +** architecture, it returns the u32 value that results from interpreting +** the 4 bytes as a big-endian value. On a big-endian architecture, it +** returns the value that would be produced by interpreting the 4 bytes +** of the input value as a little-endian integer. +*/ +#define BYTESWAP32(x) ( \ + (((x)&0x000000FF)<<24) + (((x)&0x0000FF00)<<8) \ + + (((x)&0x00FF0000)>>8) + (((x)&0xFF000000)>>24) \ +) + +/* +** Generate or extend an 8 byte checksum based on the data in +** array aByte[] and the initial values of aIn[0] and aIn[1] (or +** initial values of 0 and 0 if aIn==NULL). +** +** The checksum is written back into aOut[] before returning. +** +** nByte must be a positive multiple of 8. +*/ +static void walChecksumBytes( + int nativeCksum, /* True for native byte-order, false for non-native */ + u8 *a, /* Content to be checksummed */ + int nByte, /* Bytes of content in a[]. Must be a multiple of 8. */ + const u32 *aIn, /* Initial checksum value input */ + u32 *aOut /* OUT: Final checksum value output */ +){ + u32 s1, s2; + u32 *aData = (u32 *)a; + u32 *aEnd = (u32 *)&a[nByte]; + + if( aIn ){ + s1 = aIn[0]; + s2 = aIn[1]; + }else{ + s1 = s2 = 0; + } + + assert( nByte>=8 ); + assert( (nByte&0x00000007)==0 ); + + if( nativeCksum ){ + do { + s1 += *aData++ + s2; + s2 += *aData++ + s1; + }while( aDataexclusiveMode!=WAL_HEAPMEMORY_MODE ){ + sqlite3OsShmBarrier(pWal->pDbFd); + } +} + +/* +** Write the header information in pWal->hdr into the wal-index. +** +** The checksum on pWal->hdr is updated before it is written. +*/ +static void walIndexWriteHdr(Wal *pWal){ + volatile WalIndexHdr *aHdr = walIndexHdr(pWal); + const int nCksum = offsetof(WalIndexHdr, aCksum); + + assert( pWal->writeLock ); + pWal->hdr.isInit = 1; + pWal->hdr.iVersion = WALINDEX_MAX_VERSION; + walChecksumBytes(1, (u8*)&pWal->hdr, nCksum, 0, pWal->hdr.aCksum); + memcpy((void*)&aHdr[1], (const void*)&pWal->hdr, sizeof(WalIndexHdr)); + walShmBarrier(pWal); + memcpy((void*)&aHdr[0], (const void*)&pWal->hdr, sizeof(WalIndexHdr)); +} + +/* +** This function encodes a single frame header and writes it to a buffer +** supplied by the caller. A frame-header is made up of a series of +** 4-byte big-endian integers, as follows: +** +** 0: Page number. +** 4: For commit records, the size of the database image in pages +** after the commit. For all other records, zero. +** 8: Salt-1 (copied from the wal-header) +** 12: Salt-2 (copied from the wal-header) +** 16: Checksum-1. +** 20: Checksum-2. +*/ +static void walEncodeFrame( + Wal *pWal, /* The write-ahead log */ + u32 iPage, /* Database page number for frame */ + u32 nTruncate, /* New db size (or 0 for non-commit frames) */ + u8 *aData, /* Pointer to page data */ + u8 *aFrame /* OUT: Write encoded frame here */ +){ + int nativeCksum; /* True for native byte-order checksums */ + u32 *aCksum = pWal->hdr.aFrameCksum; + assert( WAL_FRAME_HDRSIZE==24 ); + sqlite3Put4byte(&aFrame[0], iPage); + sqlite3Put4byte(&aFrame[4], nTruncate); + if( pWal->iReCksum==0 ){ + memcpy(&aFrame[8], pWal->hdr.aSalt, 8); + + nativeCksum = (pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN); + walChecksumBytes(nativeCksum, aFrame, 8, aCksum, aCksum); + walChecksumBytes(nativeCksum, aData, pWal->szPage, aCksum, aCksum); + + sqlite3Put4byte(&aFrame[16], aCksum[0]); + sqlite3Put4byte(&aFrame[20], aCksum[1]); + }else{ + memset(&aFrame[8], 0, 16); + } +} + +/* +** Check to see if the frame with header in aFrame[] and content +** in aData[] is valid. If it is a valid frame, fill *piPage and +** *pnTruncate and return true. Return if the frame is not valid. +*/ +static int walDecodeFrame( + Wal *pWal, /* The write-ahead log */ + u32 *piPage, /* OUT: Database page number for frame */ + u32 *pnTruncate, /* OUT: New db size (or 0 if not commit) */ + u8 *aData, /* Pointer to page data (for checksum) */ + u8 *aFrame /* Frame data */ +){ + int nativeCksum; /* True for native byte-order checksums */ + u32 *aCksum = pWal->hdr.aFrameCksum; + u32 pgno; /* Page number of the frame */ + assert( WAL_FRAME_HDRSIZE==24 ); + + /* A frame is only valid if the salt values in the frame-header + ** match the salt values in the wal-header. + */ + if( memcmp(&pWal->hdr.aSalt, &aFrame[8], 8)!=0 ){ + return 0; + } + + /* A frame is only valid if the page number is creater than zero. + */ + pgno = sqlite3Get4byte(&aFrame[0]); + if( pgno==0 ){ + return 0; + } + + /* A frame is only valid if a checksum of the WAL header, + ** all prior frams, the first 16 bytes of this frame-header, + ** and the frame-data matches the checksum in the last 8 + ** bytes of this frame-header. + */ + nativeCksum = (pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN); + walChecksumBytes(nativeCksum, aFrame, 8, aCksum, aCksum); + walChecksumBytes(nativeCksum, aData, pWal->szPage, aCksum, aCksum); + if( aCksum[0]!=sqlite3Get4byte(&aFrame[16]) + || aCksum[1]!=sqlite3Get4byte(&aFrame[20]) + ){ + /* Checksum failed. */ + return 0; + } + + /* If we reach this point, the frame is valid. Return the page number + ** and the new database size. + */ + *piPage = pgno; + *pnTruncate = sqlite3Get4byte(&aFrame[4]); + return 1; +} + + +#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) +/* +** Names of locks. This routine is used to provide debugging output and is not +** a part of an ordinary build. +*/ +static const char *walLockName(int lockIdx){ + if( lockIdx==WAL_WRITE_LOCK ){ + return "WRITE-LOCK"; + }else if( lockIdx==WAL_CKPT_LOCK ){ + return "CKPT-LOCK"; + }else if( lockIdx==WAL_RECOVER_LOCK ){ + return "RECOVER-LOCK"; + }else{ + static char zName[15]; + sqlite3_snprintf(sizeof(zName), zName, "READ-LOCK[%d]", + lockIdx-WAL_READ_LOCK(0)); + return zName; + } +} +#endif /*defined(SQLITE_TEST) || defined(SQLITE_DEBUG) */ + + +/* +** Set or release locks on the WAL. Locks are either shared or exclusive. +** A lock cannot be moved directly between shared and exclusive - it must go +** through the unlocked state first. +** +** In locking_mode=EXCLUSIVE, all of these routines become no-ops. +*/ +static int walLockShared(Wal *pWal, int lockIdx){ + int rc; + if( pWal->exclusiveMode ) return SQLITE_OK; + rc = sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1, + SQLITE_SHM_LOCK | SQLITE_SHM_SHARED); + WALTRACE(("WAL%p: acquire SHARED-%s %s\n", pWal, + walLockName(lockIdx), rc ? "failed" : "ok")); + VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && rc!=SQLITE_BUSY); ) + return rc; +} +static void walUnlockShared(Wal *pWal, int lockIdx){ + if( pWal->exclusiveMode ) return; + (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1, + SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED); + WALTRACE(("WAL%p: release SHARED-%s\n", pWal, walLockName(lockIdx))); +} +static int walLockExclusive(Wal *pWal, int lockIdx, int n){ + int rc; + if( pWal->exclusiveMode ) return SQLITE_OK; + rc = sqlite3OsShmLock(pWal->pDbFd, lockIdx, n, + SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE); + WALTRACE(("WAL%p: acquire EXCLUSIVE-%s cnt=%d %s\n", pWal, + walLockName(lockIdx), n, rc ? "failed" : "ok")); + VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && rc!=SQLITE_BUSY); ) + return rc; +} +static void walUnlockExclusive(Wal *pWal, int lockIdx, int n){ + if( pWal->exclusiveMode ) return; + (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, n, + SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE); + WALTRACE(("WAL%p: release EXCLUSIVE-%s cnt=%d\n", pWal, + walLockName(lockIdx), n)); +} + +/* +** Compute a hash on a page number. The resulting hash value must land +** between 0 and (HASHTABLE_NSLOT-1). The walHashNext() function advances +** the hash to the next value in the event of a collision. +*/ +static int walHash(u32 iPage){ + assert( iPage>0 ); + assert( (HASHTABLE_NSLOT & (HASHTABLE_NSLOT-1))==0 ); + return (iPage*HASHTABLE_HASH_1) & (HASHTABLE_NSLOT-1); +} +static int walNextHash(int iPriorHash){ + return (iPriorHash+1)&(HASHTABLE_NSLOT-1); +} + +/* +** Return pointers to the hash table and page number array stored on +** page iHash of the wal-index. The wal-index is broken into 32KB pages +** numbered starting from 0. +** +** Set output variable *paHash to point to the start of the hash table +** in the wal-index file. Set *piZero to one less than the frame +** number of the first frame indexed by this hash table. If a +** slot in the hash table is set to N, it refers to frame number +** (*piZero+N) in the log. +** +** Finally, set *paPgno so that *paPgno[1] is the page number of the +** first frame indexed by the hash table, frame (*piZero+1). +*/ +static int walHashGet( + Wal *pWal, /* WAL handle */ + int iHash, /* Find the iHash'th table */ + volatile ht_slot **paHash, /* OUT: Pointer to hash index */ + volatile u32 **paPgno, /* OUT: Pointer to page number array */ + u32 *piZero /* OUT: Frame associated with *paPgno[0] */ +){ + int rc; /* Return code */ + volatile u32 *aPgno; + + rc = walIndexPage(pWal, iHash, &aPgno); + assert( rc==SQLITE_OK || iHash>0 ); + + if( rc==SQLITE_OK ){ + u32 iZero; + volatile ht_slot *aHash; + + aHash = (volatile ht_slot *)&aPgno[HASHTABLE_NPAGE]; + if( iHash==0 ){ + aPgno = &aPgno[WALINDEX_HDR_SIZE/sizeof(u32)]; + iZero = 0; + }else{ + iZero = HASHTABLE_NPAGE_ONE + (iHash-1)*HASHTABLE_NPAGE; + } + + *paPgno = &aPgno[-1]; + *paHash = aHash; + *piZero = iZero; + } + return rc; +} + +/* +** Return the number of the wal-index page that contains the hash-table +** and page-number array that contain entries corresponding to WAL frame +** iFrame. The wal-index is broken up into 32KB pages. Wal-index pages +** are numbered starting from 0. +*/ +static int walFramePage(u32 iFrame){ + int iHash = (iFrame+HASHTABLE_NPAGE-HASHTABLE_NPAGE_ONE-1) / HASHTABLE_NPAGE; + assert( (iHash==0 || iFrame>HASHTABLE_NPAGE_ONE) + && (iHash>=1 || iFrame<=HASHTABLE_NPAGE_ONE) + && (iHash<=1 || iFrame>(HASHTABLE_NPAGE_ONE+HASHTABLE_NPAGE)) + && (iHash>=2 || iFrame<=HASHTABLE_NPAGE_ONE+HASHTABLE_NPAGE) + && (iHash<=2 || iFrame>(HASHTABLE_NPAGE_ONE+2*HASHTABLE_NPAGE)) + ); + return iHash; +} + +/* +** Return the page number associated with frame iFrame in this WAL. +*/ +static u32 walFramePgno(Wal *pWal, u32 iFrame){ + int iHash = walFramePage(iFrame); + if( iHash==0 ){ + return pWal->apWiData[0][WALINDEX_HDR_SIZE/sizeof(u32) + iFrame - 1]; + } + return pWal->apWiData[iHash][(iFrame-1-HASHTABLE_NPAGE_ONE)%HASHTABLE_NPAGE]; +} + +/* +** Remove entries from the hash table that point to WAL slots greater +** than pWal->hdr.mxFrame. +** +** This function is called whenever pWal->hdr.mxFrame is decreased due +** to a rollback or savepoint. +** +** At most only the hash table containing pWal->hdr.mxFrame needs to be +** updated. Any later hash tables will be automatically cleared when +** pWal->hdr.mxFrame advances to the point where those hash tables are +** actually needed. +*/ +static void walCleanupHash(Wal *pWal){ + volatile ht_slot *aHash = 0; /* Pointer to hash table to clear */ + volatile u32 *aPgno = 0; /* Page number array for hash table */ + u32 iZero = 0; /* frame == (aHash[x]+iZero) */ + int iLimit = 0; /* Zero values greater than this */ + int nByte; /* Number of bytes to zero in aPgno[] */ + int i; /* Used to iterate through aHash[] */ + + assert( pWal->writeLock ); + testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE-1 ); + testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE ); + testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE+1 ); + + if( pWal->hdr.mxFrame==0 ) return; + + /* Obtain pointers to the hash-table and page-number array containing + ** the entry that corresponds to frame pWal->hdr.mxFrame. It is guaranteed + ** that the page said hash-table and array reside on is already mapped. + */ + assert( pWal->nWiData>walFramePage(pWal->hdr.mxFrame) ); + assert( pWal->apWiData[walFramePage(pWal->hdr.mxFrame)] ); + walHashGet(pWal, walFramePage(pWal->hdr.mxFrame), &aHash, &aPgno, &iZero); + + /* Zero all hash-table entries that correspond to frame numbers greater + ** than pWal->hdr.mxFrame. + */ + iLimit = pWal->hdr.mxFrame - iZero; + assert( iLimit>0 ); + for(i=0; iiLimit ){ + aHash[i] = 0; + } + } + + /* Zero the entries in the aPgno array that correspond to frames with + ** frame numbers greater than pWal->hdr.mxFrame. + */ + nByte = (int)((char *)aHash - (char *)&aPgno[iLimit+1]); + memset((void *)&aPgno[iLimit+1], 0, nByte); + +#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT + /* Verify that the every entry in the mapping region is still reachable + ** via the hash table even after the cleanup. + */ + if( iLimit ){ + int j; /* Loop counter */ + int iKey; /* Hash key */ + for(j=1; j<=iLimit; j++){ + for(iKey=walHash(aPgno[j]); aHash[iKey]; iKey=walNextHash(iKey)){ + if( aHash[iKey]==j ) break; + } + assert( aHash[iKey]==j ); + } + } +#endif /* SQLITE_ENABLE_EXPENSIVE_ASSERT */ +} + + +/* +** Set an entry in the wal-index that will map database page number +** pPage into WAL frame iFrame. +*/ +static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){ + int rc; /* Return code */ + u32 iZero = 0; /* One less than frame number of aPgno[1] */ + volatile u32 *aPgno = 0; /* Page number array */ + volatile ht_slot *aHash = 0; /* Hash table */ + + rc = walHashGet(pWal, walFramePage(iFrame), &aHash, &aPgno, &iZero); + + /* Assuming the wal-index file was successfully mapped, populate the + ** page number array and hash table entry. + */ + if( rc==SQLITE_OK ){ + int iKey; /* Hash table key */ + int idx; /* Value to write to hash-table slot */ + int nCollide; /* Number of hash collisions */ + + idx = iFrame - iZero; + assert( idx <= HASHTABLE_NSLOT/2 + 1 ); + + /* If this is the first entry to be added to this hash-table, zero the + ** entire hash table and aPgno[] array before proceeding. + */ + if( idx==1 ){ + int nByte = (int)((u8 *)&aHash[HASHTABLE_NSLOT] - (u8 *)&aPgno[1]); + memset((void*)&aPgno[1], 0, nByte); + } + + /* If the entry in aPgno[] is already set, then the previous writer + ** must have exited unexpectedly in the middle of a transaction (after + ** writing one or more dirty pages to the WAL to free up memory). + ** Remove the remnants of that writers uncommitted transaction from + ** the hash-table before writing any new entries. + */ + if( aPgno[idx] ){ + walCleanupHash(pWal); + assert( !aPgno[idx] ); + } + + /* Write the aPgno[] array entry and the hash-table slot. */ + nCollide = idx; + for(iKey=walHash(iPage); aHash[iKey]; iKey=walNextHash(iKey)){ + if( (nCollide--)==0 ) return SQLITE_CORRUPT_BKPT; + } + aPgno[idx] = iPage; + aHash[iKey] = (ht_slot)idx; + +#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT + /* Verify that the number of entries in the hash table exactly equals + ** the number of entries in the mapping region. + */ + { + int i; /* Loop counter */ + int nEntry = 0; /* Number of entries in the hash table */ + for(i=0; ickptLock==1 || pWal->ckptLock==0 ); + assert( WAL_ALL_BUT_WRITE==WAL_WRITE_LOCK+1 ); + assert( WAL_CKPT_LOCK==WAL_ALL_BUT_WRITE ); + assert( pWal->writeLock ); + iLock = WAL_ALL_BUT_WRITE + pWal->ckptLock; + nLock = SQLITE_SHM_NLOCK - iLock; + rc = walLockExclusive(pWal, iLock, nLock); + if( rc ){ + return rc; + } + WALTRACE(("WAL%p: recovery begin...\n", pWal)); + + memset(&pWal->hdr, 0, sizeof(WalIndexHdr)); + + rc = sqlite3OsFileSize(pWal->pWalFd, &nSize); + if( rc!=SQLITE_OK ){ + goto recovery_error; + } + + if( nSize>WAL_HDRSIZE ){ + u8 aBuf[WAL_HDRSIZE]; /* Buffer to load WAL header into */ + u8 *aFrame = 0; /* Malloc'd buffer to load entire frame */ + int szFrame; /* Number of bytes in buffer aFrame[] */ + u8 *aData; /* Pointer to data part of aFrame buffer */ + int iFrame; /* Index of last frame read */ + i64 iOffset; /* Next offset to read from log file */ + int szPage; /* Page size according to the log */ + u32 magic; /* Magic value read from WAL header */ + u32 version; /* Magic value read from WAL header */ + int isValid; /* True if this frame is valid */ + + /* Read in the WAL header. */ + rc = sqlite3OsRead(pWal->pWalFd, aBuf, WAL_HDRSIZE, 0); + if( rc!=SQLITE_OK ){ + goto recovery_error; + } + + /* If the database page size is not a power of two, or is greater than + ** SQLITE_MAX_PAGE_SIZE, conclude that the WAL file contains no valid + ** data. Similarly, if the 'magic' value is invalid, ignore the whole + ** WAL file. + */ + magic = sqlite3Get4byte(&aBuf[0]); + szPage = sqlite3Get4byte(&aBuf[8]); + if( (magic&0xFFFFFFFE)!=WAL_MAGIC + || szPage&(szPage-1) + || szPage>SQLITE_MAX_PAGE_SIZE + || szPage<512 + ){ + goto finished; + } + pWal->hdr.bigEndCksum = (u8)(magic&0x00000001); + pWal->szPage = szPage; + pWal->nCkpt = sqlite3Get4byte(&aBuf[12]); + memcpy(&pWal->hdr.aSalt, &aBuf[16], 8); + + /* Verify that the WAL header checksum is correct */ + walChecksumBytes(pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN, + aBuf, WAL_HDRSIZE-2*4, 0, pWal->hdr.aFrameCksum + ); + if( pWal->hdr.aFrameCksum[0]!=sqlite3Get4byte(&aBuf[24]) + || pWal->hdr.aFrameCksum[1]!=sqlite3Get4byte(&aBuf[28]) + ){ + goto finished; + } + + /* Verify that the version number on the WAL format is one that + ** are able to understand */ + version = sqlite3Get4byte(&aBuf[4]); + if( version!=WAL_MAX_VERSION ){ + rc = SQLITE_CANTOPEN_BKPT; + goto finished; + } + + /* Malloc a buffer to read frames into. */ + szFrame = szPage + WAL_FRAME_HDRSIZE; + aFrame = (u8 *)sqlite3_malloc64(szFrame); + if( !aFrame ){ + rc = SQLITE_NOMEM_BKPT; + goto recovery_error; + } + aData = &aFrame[WAL_FRAME_HDRSIZE]; + + /* Read all frames from the log file. */ + iFrame = 0; + for(iOffset=WAL_HDRSIZE; (iOffset+szFrame)<=nSize; iOffset+=szFrame){ + u32 pgno; /* Database page number for frame */ + u32 nTruncate; /* dbsize field from frame header */ + + /* Read and decode the next log frame. */ + iFrame++; + rc = sqlite3OsRead(pWal->pWalFd, aFrame, szFrame, iOffset); + if( rc!=SQLITE_OK ) break; + isValid = walDecodeFrame(pWal, &pgno, &nTruncate, aData, aFrame); + if( !isValid ) break; + rc = walIndexAppend(pWal, iFrame, pgno); + if( rc!=SQLITE_OK ) break; + + /* If nTruncate is non-zero, this is a commit record. */ + if( nTruncate ){ + pWal->hdr.mxFrame = iFrame; + pWal->hdr.nPage = nTruncate; + pWal->hdr.szPage = (u16)((szPage&0xff00) | (szPage>>16)); + testcase( szPage<=32768 ); + testcase( szPage>=65536 ); + aFrameCksum[0] = pWal->hdr.aFrameCksum[0]; + aFrameCksum[1] = pWal->hdr.aFrameCksum[1]; + } + } + + sqlite3_free(aFrame); + } + +finished: + if( rc==SQLITE_OK ){ + volatile WalCkptInfo *pInfo; + int i; + pWal->hdr.aFrameCksum[0] = aFrameCksum[0]; + pWal->hdr.aFrameCksum[1] = aFrameCksum[1]; + walIndexWriteHdr(pWal); + + /* Reset the checkpoint-header. This is safe because this thread is + ** currently holding locks that exclude all other readers, writers and + ** checkpointers. + */ + pInfo = walCkptInfo(pWal); + pInfo->nBackfill = 0; + pInfo->nBackfillAttempted = pWal->hdr.mxFrame; + pInfo->aReadMark[0] = 0; + for(i=1; iaReadMark[i] = READMARK_NOT_USED; + if( pWal->hdr.mxFrame ) pInfo->aReadMark[1] = pWal->hdr.mxFrame; + + /* If more than one frame was recovered from the log file, report an + ** event via sqlite3_log(). This is to help with identifying performance + ** problems caused by applications routinely shutting down without + ** checkpointing the log file. + */ + if( pWal->hdr.nPage ){ + sqlite3_log(SQLITE_NOTICE_RECOVER_WAL, + "recovered %d frames from WAL file %s", + pWal->hdr.mxFrame, pWal->zWalName + ); + } + } + +recovery_error: + WALTRACE(("WAL%p: recovery %s\n", pWal, rc ? "failed" : "ok")); + walUnlockExclusive(pWal, iLock, nLock); + return rc; +} + +/* +** Close an open wal-index. +*/ +static void walIndexClose(Wal *pWal, int isDelete){ + if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ){ + int i; + for(i=0; inWiData; i++){ + sqlite3_free((void *)pWal->apWiData[i]); + pWal->apWiData[i] = 0; + } + }else{ + sqlite3OsShmUnmap(pWal->pDbFd, isDelete); + } +} + +/* +** Open a connection to the WAL file zWalName. The database file must +** already be opened on connection pDbFd. The buffer that zWalName points +** to must remain valid for the lifetime of the returned Wal* handle. +** +** A SHARED lock should be held on the database file when this function +** is called. The purpose of this SHARED lock is to prevent any other +** client from unlinking the WAL or wal-index file. If another process +** were to do this just after this client opened one of these files, the +** system would be badly broken. +** +** If the log file is successfully opened, SQLITE_OK is returned and +** *ppWal is set to point to a new WAL handle. If an error occurs, +** an SQLite error code is returned and *ppWal is left unmodified. +*/ +SQLITE_PRIVATE int sqlite3WalOpen( + sqlite3_vfs *pVfs, /* vfs module to open wal and wal-index */ + sqlite3_file *pDbFd, /* The open database file */ + const char *zWalName, /* Name of the WAL file */ + int bNoShm, /* True to run in heap-memory mode */ + i64 mxWalSize, /* Truncate WAL to this size on reset */ + Wal **ppWal /* OUT: Allocated Wal handle */ +){ + int rc; /* Return Code */ + Wal *pRet; /* Object to allocate and return */ + int flags; /* Flags passed to OsOpen() */ + + assert( zWalName && zWalName[0] ); + assert( pDbFd ); + + /* In the amalgamation, the os_unix.c and os_win.c source files come before + ** this source file. Verify that the #defines of the locking byte offsets + ** in os_unix.c and os_win.c agree with the WALINDEX_LOCK_OFFSET value. + ** For that matter, if the lock offset ever changes from its initial design + ** value of 120, we need to know that so there is an assert() to check it. + */ + assert( 120==WALINDEX_LOCK_OFFSET ); + assert( 136==WALINDEX_HDR_SIZE ); +#ifdef WIN_SHM_BASE + assert( WIN_SHM_BASE==WALINDEX_LOCK_OFFSET ); +#endif +#ifdef UNIX_SHM_BASE + assert( UNIX_SHM_BASE==WALINDEX_LOCK_OFFSET ); +#endif + + + /* Allocate an instance of struct Wal to return. */ + *ppWal = 0; + pRet = (Wal*)sqlite3MallocZero(sizeof(Wal) + pVfs->szOsFile); + if( !pRet ){ + return SQLITE_NOMEM_BKPT; + } + + pRet->pVfs = pVfs; + pRet->pWalFd = (sqlite3_file *)&pRet[1]; + pRet->pDbFd = pDbFd; + pRet->readLock = -1; + pRet->mxWalSize = mxWalSize; + pRet->zWalName = zWalName; + pRet->syncHeader = 1; + pRet->padToSectorBoundary = 1; + pRet->exclusiveMode = (bNoShm ? WAL_HEAPMEMORY_MODE: WAL_NORMAL_MODE); + + /* Open file handle on the write-ahead log file. */ + flags = (SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_WAL); + rc = sqlite3OsOpen(pVfs, zWalName, pRet->pWalFd, flags, &flags); + if( rc==SQLITE_OK && flags&SQLITE_OPEN_READONLY ){ + pRet->readOnly = WAL_RDONLY; + } + + if( rc!=SQLITE_OK ){ + walIndexClose(pRet, 0); + sqlite3OsClose(pRet->pWalFd); + sqlite3_free(pRet); + }else{ + int iDC = sqlite3OsDeviceCharacteristics(pDbFd); + if( iDC & SQLITE_IOCAP_SEQUENTIAL ){ pRet->syncHeader = 0; } + if( iDC & SQLITE_IOCAP_POWERSAFE_OVERWRITE ){ + pRet->padToSectorBoundary = 0; + } + *ppWal = pRet; + WALTRACE(("WAL%d: opened\n", pRet)); + } + return rc; +} + +/* +** Change the size to which the WAL file is trucated on each reset. +*/ +SQLITE_PRIVATE void sqlite3WalLimit(Wal *pWal, i64 iLimit){ + if( pWal ) pWal->mxWalSize = iLimit; +} + +/* +** Find the smallest page number out of all pages held in the WAL that +** has not been returned by any prior invocation of this method on the +** same WalIterator object. Write into *piFrame the frame index where +** that page was last written into the WAL. Write into *piPage the page +** number. +** +** Return 0 on success. If there are no pages in the WAL with a page +** number larger than *piPage, then return 1. +*/ +static int walIteratorNext( + WalIterator *p, /* Iterator */ + u32 *piPage, /* OUT: The page number of the next page */ + u32 *piFrame /* OUT: Wal frame index of next page */ +){ + u32 iMin; /* Result pgno must be greater than iMin */ + u32 iRet = 0xFFFFFFFF; /* 0xffffffff is never a valid page number */ + int i; /* For looping through segments */ + + iMin = p->iPrior; + assert( iMin<0xffffffff ); + for(i=p->nSegment-1; i>=0; i--){ + struct WalSegment *pSegment = &p->aSegment[i]; + while( pSegment->iNextnEntry ){ + u32 iPg = pSegment->aPgno[pSegment->aIndex[pSegment->iNext]]; + if( iPg>iMin ){ + if( iPgiZero + pSegment->aIndex[pSegment->iNext]; + } + break; + } + pSegment->iNext++; + } + } + + *piPage = p->iPrior = iRet; + return (iRet==0xFFFFFFFF); +} + +/* +** This function merges two sorted lists into a single sorted list. +** +** aLeft[] and aRight[] are arrays of indices. The sort key is +** aContent[aLeft[]] and aContent[aRight[]]. Upon entry, the following +** is guaranteed for all J0 && nRight>0 ); + while( iRight=nRight || aContent[aLeft[iLeft]]=nLeft || aContent[aLeft[iLeft]]>dbpage ); + assert( iRight>=nRight || aContent[aRight[iRight]]>dbpage ); + } + + *paRight = aLeft; + *pnRight = iOut; + memcpy(aLeft, aTmp, sizeof(aTmp[0])*iOut); +} + +/* +** Sort the elements in list aList using aContent[] as the sort key. +** Remove elements with duplicate keys, preferring to keep the +** larger aList[] values. +** +** The aList[] entries are indices into aContent[]. The values in +** aList[] are to be sorted so that for all J0 ); + assert( HASHTABLE_NPAGE==(1<<(ArraySize(aSub)-1)) ); + + for(iList=0; iListaList && p->nList<=(1<aList==&aList[iList&~((2<aList, p->nList, &aMerge, &nMerge, aBuffer); + } + aSub[iSub].aList = aMerge; + aSub[iSub].nList = nMerge; + } + + for(iSub++; iSubnList<=(1<aList==&aList[nList&~((2<aList, p->nList, &aMerge, &nMerge, aBuffer); + } + } + assert( aMerge==aList ); + *pnList = nMerge; + +#ifdef SQLITE_DEBUG + { + int i; + for(i=1; i<*pnList; i++){ + assert( aContent[aList[i]] > aContent[aList[i-1]] ); + } + } +#endif +} + +/* +** Free an iterator allocated by walIteratorInit(). +*/ +static void walIteratorFree(WalIterator *p){ + sqlite3_free(p); +} + +/* +** Construct a WalInterator object that can be used to loop over all +** pages in the WAL in ascending order. The caller must hold the checkpoint +** lock. +** +** On success, make *pp point to the newly allocated WalInterator object +** return SQLITE_OK. Otherwise, return an error code. If this routine +** returns an error, the value of *pp is undefined. +** +** The calling routine should invoke walIteratorFree() to destroy the +** WalIterator object when it has finished with it. +*/ +static int walIteratorInit(Wal *pWal, WalIterator **pp){ + WalIterator *p; /* Return value */ + int nSegment; /* Number of segments to merge */ + u32 iLast; /* Last frame in log */ + int nByte; /* Number of bytes to allocate */ + int i; /* Iterator variable */ + ht_slot *aTmp; /* Temp space used by merge-sort */ + int rc = SQLITE_OK; /* Return Code */ + + /* This routine only runs while holding the checkpoint lock. And + ** it only runs if there is actually content in the log (mxFrame>0). + */ + assert( pWal->ckptLock && pWal->hdr.mxFrame>0 ); + iLast = pWal->hdr.mxFrame; + + /* Allocate space for the WalIterator object. */ + nSegment = walFramePage(iLast) + 1; + nByte = sizeof(WalIterator) + + (nSegment-1)*sizeof(struct WalSegment) + + iLast*sizeof(ht_slot); + p = (WalIterator *)sqlite3_malloc64(nByte); + if( !p ){ + return SQLITE_NOMEM_BKPT; + } + memset(p, 0, nByte); + p->nSegment = nSegment; + + /* Allocate temporary space used by the merge-sort routine. This block + ** of memory will be freed before this function returns. + */ + aTmp = (ht_slot *)sqlite3_malloc64( + sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast) + ); + if( !aTmp ){ + rc = SQLITE_NOMEM_BKPT; + } + + for(i=0; rc==SQLITE_OK && iaSegment[p->nSegment])[iZero]; + iZero++; + + for(j=0; jaSegment[i].iZero = iZero; + p->aSegment[i].nEntry = nEntry; + p->aSegment[i].aIndex = aIndex; + p->aSegment[i].aPgno = (u32 *)aPgno; + } + } + sqlite3_free(aTmp); + + if( rc!=SQLITE_OK ){ + walIteratorFree(p); + } + *pp = p; + return rc; +} + +/* +** Attempt to obtain the exclusive WAL lock defined by parameters lockIdx and +** n. If the attempt fails and parameter xBusy is not NULL, then it is a +** busy-handler function. Invoke it and retry the lock until either the +** lock is successfully obtained or the busy-handler returns 0. +*/ +static int walBusyLock( + Wal *pWal, /* WAL connection */ + int (*xBusy)(void*), /* Function to call when busy */ + void *pBusyArg, /* Context argument for xBusyHandler */ + int lockIdx, /* Offset of first byte to lock */ + int n /* Number of bytes to lock */ +){ + int rc; + do { + rc = walLockExclusive(pWal, lockIdx, n); + }while( xBusy && rc==SQLITE_BUSY && xBusy(pBusyArg) ); + return rc; +} + +/* +** The cache of the wal-index header must be valid to call this function. +** Return the page-size in bytes used by the database. +*/ +static int walPagesize(Wal *pWal){ + return (pWal->hdr.szPage&0xfe00) + ((pWal->hdr.szPage&0x0001)<<16); +} + +/* +** The following is guaranteed when this function is called: +** +** a) the WRITER lock is held, +** b) the entire log file has been checkpointed, and +** c) any existing readers are reading exclusively from the database +** file - there are no readers that may attempt to read a frame from +** the log file. +** +** This function updates the shared-memory structures so that the next +** client to write to the database (which may be this one) does so by +** writing frames into the start of the log file. +** +** The value of parameter salt1 is used as the aSalt[1] value in the +** new wal-index header. It should be passed a pseudo-random value (i.e. +** one obtained from sqlite3_randomness()). +*/ +static void walRestartHdr(Wal *pWal, u32 salt1){ + volatile WalCkptInfo *pInfo = walCkptInfo(pWal); + int i; /* Loop counter */ + u32 *aSalt = pWal->hdr.aSalt; /* Big-endian salt values */ + pWal->nCkpt++; + pWal->hdr.mxFrame = 0; + sqlite3Put4byte((u8*)&aSalt[0], 1 + sqlite3Get4byte((u8*)&aSalt[0])); + memcpy(&pWal->hdr.aSalt[1], &salt1, 4); + walIndexWriteHdr(pWal); + pInfo->nBackfill = 0; + pInfo->nBackfillAttempted = 0; + pInfo->aReadMark[1] = 0; + for(i=2; iaReadMark[i] = READMARK_NOT_USED; + assert( pInfo->aReadMark[0]==0 ); +} + +/* +** Copy as much content as we can from the WAL back into the database file +** in response to an sqlite3_wal_checkpoint() request or the equivalent. +** +** The amount of information copies from WAL to database might be limited +** by active readers. This routine will never overwrite a database page +** that a concurrent reader might be using. +** +** All I/O barrier operations (a.k.a fsyncs) occur in this routine when +** SQLite is in WAL-mode in synchronous=NORMAL. That means that if +** checkpoints are always run by a background thread or background +** process, foreground threads will never block on a lengthy fsync call. +** +** Fsync is called on the WAL before writing content out of the WAL and +** into the database. This ensures that if the new content is persistent +** in the WAL and can be recovered following a power-loss or hard reset. +** +** Fsync is also called on the database file if (and only if) the entire +** WAL content is copied into the database file. This second fsync makes +** it safe to delete the WAL since the new content will persist in the +** database file. +** +** This routine uses and updates the nBackfill field of the wal-index header. +** This is the only routine that will increase the value of nBackfill. +** (A WAL reset or recovery will revert nBackfill to zero, but not increase +** its value.) +** +** The caller must be holding sufficient locks to ensure that no other +** checkpoint is running (in any other thread or process) at the same +** time. +*/ +static int walCheckpoint( + Wal *pWal, /* Wal connection */ + sqlite3 *db, /* Check for interrupts on this handle */ + int eMode, /* One of PASSIVE, FULL or RESTART */ + int (*xBusy)(void*), /* Function to call when busy */ + void *pBusyArg, /* Context argument for xBusyHandler */ + int sync_flags, /* Flags for OsSync() (or 0) */ + u8 *zBuf /* Temporary buffer to use */ +){ + int rc = SQLITE_OK; /* Return code */ + int szPage; /* Database page-size */ + WalIterator *pIter = 0; /* Wal iterator context */ + u32 iDbpage = 0; /* Next database page to write */ + u32 iFrame = 0; /* Wal frame containing data for iDbpage */ + u32 mxSafeFrame; /* Max frame that can be backfilled */ + u32 mxPage; /* Max database page to write */ + int i; /* Loop counter */ + volatile WalCkptInfo *pInfo; /* The checkpoint status information */ + + szPage = walPagesize(pWal); + testcase( szPage<=32768 ); + testcase( szPage>=65536 ); + pInfo = walCkptInfo(pWal); + if( pInfo->nBackfillhdr.mxFrame ){ + + /* Allocate the iterator */ + rc = walIteratorInit(pWal, &pIter); + if( rc!=SQLITE_OK ){ + return rc; + } + assert( pIter ); + + /* EVIDENCE-OF: R-62920-47450 The busy-handler callback is never invoked + ** in the SQLITE_CHECKPOINT_PASSIVE mode. */ + assert( eMode!=SQLITE_CHECKPOINT_PASSIVE || xBusy==0 ); + + /* Compute in mxSafeFrame the index of the last frame of the WAL that is + ** safe to write into the database. Frames beyond mxSafeFrame might + ** overwrite database pages that are in use by active readers and thus + ** cannot be backfilled from the WAL. + */ + mxSafeFrame = pWal->hdr.mxFrame; + mxPage = pWal->hdr.nPage; + for(i=1; iaReadMark[i]; + if( mxSafeFrame>y ){ + assert( y<=pWal->hdr.mxFrame ); + rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(i), 1); + if( rc==SQLITE_OK ){ + pInfo->aReadMark[i] = (i==1 ? mxSafeFrame : READMARK_NOT_USED); + walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); + }else if( rc==SQLITE_BUSY ){ + mxSafeFrame = y; + xBusy = 0; + }else{ + goto walcheckpoint_out; + } + } + } + + if( pInfo->nBackfillnBackfill; + + pInfo->nBackfillAttempted = mxSafeFrame; + + /* Sync the WAL to disk */ + if( sync_flags ){ + rc = sqlite3OsSync(pWal->pWalFd, sync_flags); + } + + /* If the database may grow as a result of this checkpoint, hint + ** about the eventual size of the db file to the VFS layer. + */ + if( rc==SQLITE_OK ){ + i64 nReq = ((i64)mxPage * szPage); + rc = sqlite3OsFileSize(pWal->pDbFd, &nSize); + if( rc==SQLITE_OK && nSizepDbFd, SQLITE_FCNTL_SIZE_HINT, &nReq); + } + } + + + /* Iterate through the contents of the WAL, copying data to the db file */ + while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){ + i64 iOffset; + assert( walFramePgno(pWal, iFrame)==iDbpage ); + if( db->u1.isInterrupted ){ + rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_INTERRUPT; + break; + } + if( iFrame<=nBackfill || iFrame>mxSafeFrame || iDbpage>mxPage ){ + continue; + } + iOffset = walFrameOffset(iFrame, szPage) + WAL_FRAME_HDRSIZE; + /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL file */ + rc = sqlite3OsRead(pWal->pWalFd, zBuf, szPage, iOffset); + if( rc!=SQLITE_OK ) break; + iOffset = (iDbpage-1)*(i64)szPage; + testcase( IS_BIG_INT(iOffset) ); + rc = sqlite3OsWrite(pWal->pDbFd, zBuf, szPage, iOffset); + if( rc!=SQLITE_OK ) break; + } + + /* If work was actually accomplished... */ + if( rc==SQLITE_OK ){ + if( mxSafeFrame==walIndexHdr(pWal)->mxFrame ){ + i64 szDb = pWal->hdr.nPage*(i64)szPage; + testcase( IS_BIG_INT(szDb) ); + rc = sqlite3OsTruncate(pWal->pDbFd, szDb); + if( rc==SQLITE_OK && sync_flags ){ + rc = sqlite3OsSync(pWal->pDbFd, sync_flags); + } + } + if( rc==SQLITE_OK ){ + pInfo->nBackfill = mxSafeFrame; + } + } + + /* Release the reader lock held while backfilling */ + walUnlockExclusive(pWal, WAL_READ_LOCK(0), 1); + } + + if( rc==SQLITE_BUSY ){ + /* Reset the return code so as not to report a checkpoint failure + ** just because there are active readers. */ + rc = SQLITE_OK; + } + } + + /* If this is an SQLITE_CHECKPOINT_RESTART or TRUNCATE operation, and the + ** entire wal file has been copied into the database file, then block + ** until all readers have finished using the wal file. This ensures that + ** the next process to write to the database restarts the wal file. + */ + if( rc==SQLITE_OK && eMode!=SQLITE_CHECKPOINT_PASSIVE ){ + assert( pWal->writeLock ); + if( pInfo->nBackfillhdr.mxFrame ){ + rc = SQLITE_BUSY; + }else if( eMode>=SQLITE_CHECKPOINT_RESTART ){ + u32 salt1; + sqlite3_randomness(4, &salt1); + assert( pInfo->nBackfill==pWal->hdr.mxFrame ); + rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(1), WAL_NREADER-1); + if( rc==SQLITE_OK ){ + if( eMode==SQLITE_CHECKPOINT_TRUNCATE ){ + /* IMPLEMENTATION-OF: R-44699-57140 This mode works the same way as + ** SQLITE_CHECKPOINT_RESTART with the addition that it also + ** truncates the log file to zero bytes just prior to a + ** successful return. + ** + ** In theory, it might be safe to do this without updating the + ** wal-index header in shared memory, as all subsequent reader or + ** writer clients should see that the entire log file has been + ** checkpointed and behave accordingly. This seems unsafe though, + ** as it would leave the system in a state where the contents of + ** the wal-index header do not match the contents of the + ** file-system. To avoid this, update the wal-index header to + ** indicate that the log file contains zero valid frames. */ + walRestartHdr(pWal, salt1); + rc = sqlite3OsTruncate(pWal->pWalFd, 0); + } + walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); + } + } + } + + walcheckpoint_out: + walIteratorFree(pIter); + return rc; +} + +/* +** If the WAL file is currently larger than nMax bytes in size, truncate +** it to exactly nMax bytes. If an error occurs while doing so, ignore it. +*/ +static void walLimitSize(Wal *pWal, i64 nMax){ + i64 sz; + int rx; + sqlite3BeginBenignMalloc(); + rx = sqlite3OsFileSize(pWal->pWalFd, &sz); + if( rx==SQLITE_OK && (sz > nMax ) ){ + rx = sqlite3OsTruncate(pWal->pWalFd, nMax); + } + sqlite3EndBenignMalloc(); + if( rx ){ + sqlite3_log(rx, "cannot limit WAL size: %s", pWal->zWalName); + } +} + +/* +** Close a connection to a log file. +*/ +SQLITE_PRIVATE int sqlite3WalClose( + Wal *pWal, /* Wal to close */ + sqlite3 *db, /* For interrupt flag */ + int sync_flags, /* Flags to pass to OsSync() (or 0) */ + int nBuf, + u8 *zBuf /* Buffer of at least nBuf bytes */ +){ + int rc = SQLITE_OK; + if( pWal ){ + int isDelete = 0; /* True to unlink wal and wal-index files */ + + /* If an EXCLUSIVE lock can be obtained on the database file (using the + ** ordinary, rollback-mode locking methods, this guarantees that the + ** connection associated with this log file is the only connection to + ** the database. In this case checkpoint the database and unlink both + ** the wal and wal-index files. + ** + ** The EXCLUSIVE lock is not released before returning. + */ + if( zBuf!=0 + && SQLITE_OK==(rc = sqlite3OsLock(pWal->pDbFd, SQLITE_LOCK_EXCLUSIVE)) + ){ + if( pWal->exclusiveMode==WAL_NORMAL_MODE ){ + pWal->exclusiveMode = WAL_EXCLUSIVE_MODE; + } + rc = sqlite3WalCheckpoint(pWal, db, + SQLITE_CHECKPOINT_PASSIVE, 0, 0, sync_flags, nBuf, zBuf, 0, 0 + ); + if( rc==SQLITE_OK ){ + int bPersist = -1; + sqlite3OsFileControlHint( + pWal->pDbFd, SQLITE_FCNTL_PERSIST_WAL, &bPersist + ); + if( bPersist!=1 ){ + /* Try to delete the WAL file if the checkpoint completed and + ** fsyned (rc==SQLITE_OK) and if we are not in persistent-wal + ** mode (!bPersist) */ + isDelete = 1; + }else if( pWal->mxWalSize>=0 ){ + /* Try to truncate the WAL file to zero bytes if the checkpoint + ** completed and fsynced (rc==SQLITE_OK) and we are in persistent + ** WAL mode (bPersist) and if the PRAGMA journal_size_limit is a + ** non-negative value (pWal->mxWalSize>=0). Note that we truncate + ** to zero bytes as truncating to the journal_size_limit might + ** leave a corrupt WAL file on disk. */ + walLimitSize(pWal, 0); + } + } + } + + walIndexClose(pWal, isDelete); + sqlite3OsClose(pWal->pWalFd); + if( isDelete ){ + sqlite3BeginBenignMalloc(); + sqlite3OsDelete(pWal->pVfs, pWal->zWalName, 0); + sqlite3EndBenignMalloc(); + } + WALTRACE(("WAL%p: closed\n", pWal)); + sqlite3_free((void *)pWal->apWiData); + sqlite3_free(pWal); + } + return rc; +} + +/* +** Try to read the wal-index header. Return 0 on success and 1 if +** there is a problem. +** +** The wal-index is in shared memory. Another thread or process might +** be writing the header at the same time this procedure is trying to +** read it, which might result in inconsistency. A dirty read is detected +** by verifying that both copies of the header are the same and also by +** a checksum on the header. +** +** If and only if the read is consistent and the header is different from +** pWal->hdr, then pWal->hdr is updated to the content of the new header +** and *pChanged is set to 1. +** +** If the checksum cannot be verified return non-zero. If the header +** is read successfully and the checksum verified, return zero. +*/ +static int walIndexTryHdr(Wal *pWal, int *pChanged){ + u32 aCksum[2]; /* Checksum on the header content */ + WalIndexHdr h1, h2; /* Two copies of the header content */ + WalIndexHdr volatile *aHdr; /* Header in shared memory */ + + /* The first page of the wal-index must be mapped at this point. */ + assert( pWal->nWiData>0 && pWal->apWiData[0] ); + + /* Read the header. This might happen concurrently with a write to the + ** same area of shared memory on a different CPU in a SMP, + ** meaning it is possible that an inconsistent snapshot is read + ** from the file. If this happens, return non-zero. + ** + ** There are two copies of the header at the beginning of the wal-index. + ** When reading, read [0] first then [1]. Writes are in the reverse order. + ** Memory barriers are used to prevent the compiler or the hardware from + ** reordering the reads and writes. + */ + aHdr = walIndexHdr(pWal); + memcpy(&h1, (void *)&aHdr[0], sizeof(h1)); + walShmBarrier(pWal); + memcpy(&h2, (void *)&aHdr[1], sizeof(h2)); + + if( memcmp(&h1, &h2, sizeof(h1))!=0 ){ + return 1; /* Dirty read */ + } + if( h1.isInit==0 ){ + return 1; /* Malformed header - probably all zeros */ + } + walChecksumBytes(1, (u8*)&h1, sizeof(h1)-sizeof(h1.aCksum), 0, aCksum); + if( aCksum[0]!=h1.aCksum[0] || aCksum[1]!=h1.aCksum[1] ){ + return 1; /* Checksum does not match */ + } + + if( memcmp(&pWal->hdr, &h1, sizeof(WalIndexHdr)) ){ + *pChanged = 1; + memcpy(&pWal->hdr, &h1, sizeof(WalIndexHdr)); + pWal->szPage = (pWal->hdr.szPage&0xfe00) + ((pWal->hdr.szPage&0x0001)<<16); + testcase( pWal->szPage<=32768 ); + testcase( pWal->szPage>=65536 ); + } + + /* The header was successfully read. Return zero. */ + return 0; +} + +/* +** Read the wal-index header from the wal-index and into pWal->hdr. +** If the wal-header appears to be corrupt, try to reconstruct the +** wal-index from the WAL before returning. +** +** Set *pChanged to 1 if the wal-index header value in pWal->hdr is +** changed by this operation. If pWal->hdr is unchanged, set *pChanged +** to 0. +** +** If the wal-index header is successfully read, return SQLITE_OK. +** Otherwise an SQLite error code. +*/ +static int walIndexReadHdr(Wal *pWal, int *pChanged){ + int rc; /* Return code */ + int badHdr; /* True if a header read failed */ + volatile u32 *page0; /* Chunk of wal-index containing header */ + + /* Ensure that page 0 of the wal-index (the page that contains the + ** wal-index header) is mapped. Return early if an error occurs here. + */ + assert( pChanged ); + rc = walIndexPage(pWal, 0, &page0); + if( rc!=SQLITE_OK ){ + return rc; + }; + assert( page0 || pWal->writeLock==0 ); + + /* If the first page of the wal-index has been mapped, try to read the + ** wal-index header immediately, without holding any lock. This usually + ** works, but may fail if the wal-index header is corrupt or currently + ** being modified by another thread or process. + */ + badHdr = (page0 ? walIndexTryHdr(pWal, pChanged) : 1); + + /* If the first attempt failed, it might have been due to a race + ** with a writer. So get a WRITE lock and try again. + */ + assert( badHdr==0 || pWal->writeLock==0 ); + if( badHdr ){ + if( pWal->readOnly & WAL_SHM_RDONLY ){ + if( SQLITE_OK==(rc = walLockShared(pWal, WAL_WRITE_LOCK)) ){ + walUnlockShared(pWal, WAL_WRITE_LOCK); + rc = SQLITE_READONLY_RECOVERY; + } + }else if( SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1)) ){ + pWal->writeLock = 1; + if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){ + badHdr = walIndexTryHdr(pWal, pChanged); + if( badHdr ){ + /* If the wal-index header is still malformed even while holding + ** a WRITE lock, it can only mean that the header is corrupted and + ** needs to be reconstructed. So run recovery to do exactly that. + */ + rc = walIndexRecover(pWal); + *pChanged = 1; + } + } + pWal->writeLock = 0; + walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); + } + } + + /* If the header is read successfully, check the version number to make + ** sure the wal-index was not constructed with some future format that + ** this version of SQLite cannot understand. + */ + if( badHdr==0 && pWal->hdr.iVersion!=WALINDEX_MAX_VERSION ){ + rc = SQLITE_CANTOPEN_BKPT; + } + + return rc; +} + +/* +** This is the value that walTryBeginRead returns when it needs to +** be retried. +*/ +#define WAL_RETRY (-1) + +/* +** Attempt to start a read transaction. This might fail due to a race or +** other transient condition. When that happens, it returns WAL_RETRY to +** indicate to the caller that it is safe to retry immediately. +** +** On success return SQLITE_OK. On a permanent failure (such an +** I/O error or an SQLITE_BUSY because another process is running +** recovery) return a positive error code. +** +** The useWal parameter is true to force the use of the WAL and disable +** the case where the WAL is bypassed because it has been completely +** checkpointed. If useWal==0 then this routine calls walIndexReadHdr() +** to make a copy of the wal-index header into pWal->hdr. If the +** wal-index header has changed, *pChanged is set to 1 (as an indication +** to the caller that the local paget cache is obsolete and needs to be +** flushed.) When useWal==1, the wal-index header is assumed to already +** be loaded and the pChanged parameter is unused. +** +** The caller must set the cnt parameter to the number of prior calls to +** this routine during the current read attempt that returned WAL_RETRY. +** This routine will start taking more aggressive measures to clear the +** race conditions after multiple WAL_RETRY returns, and after an excessive +** number of errors will ultimately return SQLITE_PROTOCOL. The +** SQLITE_PROTOCOL return indicates that some other process has gone rogue +** and is not honoring the locking protocol. There is a vanishingly small +** chance that SQLITE_PROTOCOL could be returned because of a run of really +** bad luck when there is lots of contention for the wal-index, but that +** possibility is so small that it can be safely neglected, we believe. +** +** On success, this routine obtains a read lock on +** WAL_READ_LOCK(pWal->readLock). The pWal->readLock integer is +** in the range 0 <= pWal->readLock < WAL_NREADER. If pWal->readLock==(-1) +** that means the Wal does not hold any read lock. The reader must not +** access any database page that is modified by a WAL frame up to and +** including frame number aReadMark[pWal->readLock]. The reader will +** use WAL frames up to and including pWal->hdr.mxFrame if pWal->readLock>0 +** Or if pWal->readLock==0, then the reader will ignore the WAL +** completely and get all content directly from the database file. +** If the useWal parameter is 1 then the WAL will never be ignored and +** this routine will always set pWal->readLock>0 on success. +** When the read transaction is completed, the caller must release the +** lock on WAL_READ_LOCK(pWal->readLock) and set pWal->readLock to -1. +** +** This routine uses the nBackfill and aReadMark[] fields of the header +** to select a particular WAL_READ_LOCK() that strives to let the +** checkpoint process do as much work as possible. This routine might +** update values of the aReadMark[] array in the header, but if it does +** so it takes care to hold an exclusive lock on the corresponding +** WAL_READ_LOCK() while changing values. +*/ +static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ + volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */ + u32 mxReadMark; /* Largest aReadMark[] value */ + int mxI; /* Index of largest aReadMark[] value */ + int i; /* Loop counter */ + int rc = SQLITE_OK; /* Return code */ + u32 mxFrame; /* Wal frame to lock to */ + + assert( pWal->readLock<0 ); /* Not currently locked */ + + /* Take steps to avoid spinning forever if there is a protocol error. + ** + ** Circumstances that cause a RETRY should only last for the briefest + ** instances of time. No I/O or other system calls are done while the + ** locks are held, so the locks should not be held for very long. But + ** if we are unlucky, another process that is holding a lock might get + ** paged out or take a page-fault that is time-consuming to resolve, + ** during the few nanoseconds that it is holding the lock. In that case, + ** it might take longer than normal for the lock to free. + ** + ** After 5 RETRYs, we begin calling sqlite3OsSleep(). The first few + ** calls to sqlite3OsSleep() have a delay of 1 microsecond. Really this + ** is more of a scheduler yield than an actual delay. But on the 10th + ** an subsequent retries, the delays start becoming longer and longer, + ** so that on the 100th (and last) RETRY we delay for 323 milliseconds. + ** The total delay time before giving up is less than 10 seconds. + */ + if( cnt>5 ){ + int nDelay = 1; /* Pause time in microseconds */ + if( cnt>100 ){ + VVA_ONLY( pWal->lockError = 1; ) + return SQLITE_PROTOCOL; + } + if( cnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39; + sqlite3OsSleep(pWal->pVfs, nDelay); + } + + if( !useWal ){ + rc = walIndexReadHdr(pWal, pChanged); + if( rc==SQLITE_BUSY ){ + /* If there is not a recovery running in another thread or process + ** then convert BUSY errors to WAL_RETRY. If recovery is known to + ** be running, convert BUSY to BUSY_RECOVERY. There is a race here + ** which might cause WAL_RETRY to be returned even if BUSY_RECOVERY + ** would be technically correct. But the race is benign since with + ** WAL_RETRY this routine will be called again and will probably be + ** right on the second iteration. + */ + if( pWal->apWiData[0]==0 ){ + /* This branch is taken when the xShmMap() method returns SQLITE_BUSY. + ** We assume this is a transient condition, so return WAL_RETRY. The + ** xShmMap() implementation used by the default unix and win32 VFS + ** modules may return SQLITE_BUSY due to a race condition in the + ** code that determines whether or not the shared-memory region + ** must be zeroed before the requested page is returned. + */ + rc = WAL_RETRY; + }else if( SQLITE_OK==(rc = walLockShared(pWal, WAL_RECOVER_LOCK)) ){ + walUnlockShared(pWal, WAL_RECOVER_LOCK); + rc = WAL_RETRY; + }else if( rc==SQLITE_BUSY ){ + rc = SQLITE_BUSY_RECOVERY; + } + } + if( rc!=SQLITE_OK ){ + return rc; + } + } + + pInfo = walCkptInfo(pWal); + if( !useWal && pInfo->nBackfill==pWal->hdr.mxFrame +#ifdef SQLITE_ENABLE_SNAPSHOT + && (pWal->pSnapshot==0 || pWal->hdr.mxFrame==0 + || 0==memcmp(&pWal->hdr, pWal->pSnapshot, sizeof(WalIndexHdr))) +#endif + ){ + /* The WAL has been completely backfilled (or it is empty). + ** and can be safely ignored. + */ + rc = walLockShared(pWal, WAL_READ_LOCK(0)); + walShmBarrier(pWal); + if( rc==SQLITE_OK ){ + if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) ){ + /* It is not safe to allow the reader to continue here if frames + ** may have been appended to the log before READ_LOCK(0) was obtained. + ** When holding READ_LOCK(0), the reader ignores the entire log file, + ** which implies that the database file contains a trustworthy + ** snapshot. Since holding READ_LOCK(0) prevents a checkpoint from + ** happening, this is usually correct. + ** + ** However, if frames have been appended to the log (or if the log + ** is wrapped and written for that matter) before the READ_LOCK(0) + ** is obtained, that is not necessarily true. A checkpointer may + ** have started to backfill the appended frames but crashed before + ** it finished. Leaving a corrupt image in the database file. + */ + walUnlockShared(pWal, WAL_READ_LOCK(0)); + return WAL_RETRY; + } + pWal->readLock = 0; + return SQLITE_OK; + }else if( rc!=SQLITE_BUSY ){ + return rc; + } + } + + /* If we get this far, it means that the reader will want to use + ** the WAL to get at content from recent commits. The job now is + ** to select one of the aReadMark[] entries that is closest to + ** but not exceeding pWal->hdr.mxFrame and lock that entry. + */ + mxReadMark = 0; + mxI = 0; + mxFrame = pWal->hdr.mxFrame; +#ifdef SQLITE_ENABLE_SNAPSHOT + if( pWal->pSnapshot && pWal->pSnapshot->mxFramepSnapshot->mxFrame; + } +#endif + for(i=1; iaReadMark[i]; + if( mxReadMark<=thisMark && thisMark<=mxFrame ){ + assert( thisMark!=READMARK_NOT_USED ); + mxReadMark = thisMark; + mxI = i; + } + } + if( (pWal->readOnly & WAL_SHM_RDONLY)==0 + && (mxReadMarkaReadMark[i] = mxFrame; + mxI = i; + walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); + break; + }else if( rc!=SQLITE_BUSY ){ + return rc; + } + } + } + if( mxI==0 ){ + assert( rc==SQLITE_BUSY || (pWal->readOnly & WAL_SHM_RDONLY)!=0 ); + return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTLOCK; + } + + rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); + if( rc ){ + return rc==SQLITE_BUSY ? WAL_RETRY : rc; + } + /* Now that the read-lock has been obtained, check that neither the + ** value in the aReadMark[] array or the contents of the wal-index + ** header have changed. + ** + ** It is necessary to check that the wal-index header did not change + ** between the time it was read and when the shared-lock was obtained + ** on WAL_READ_LOCK(mxI) was obtained to account for the possibility + ** that the log file may have been wrapped by a writer, or that frames + ** that occur later in the log than pWal->hdr.mxFrame may have been + ** copied into the database by a checkpointer. If either of these things + ** happened, then reading the database with the current value of + ** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry + ** instead. + ** + ** Before checking that the live wal-index header has not changed + ** since it was read, set Wal.minFrame to the first frame in the wal + ** file that has not yet been checkpointed. This client will not need + ** to read any frames earlier than minFrame from the wal file - they + ** can be safely read directly from the database file. + ** + ** Because a ShmBarrier() call is made between taking the copy of + ** nBackfill and checking that the wal-header in shared-memory still + ** matches the one cached in pWal->hdr, it is guaranteed that the + ** checkpointer that set nBackfill was not working with a wal-index + ** header newer than that cached in pWal->hdr. If it were, that could + ** cause a problem. The checkpointer could omit to checkpoint + ** a version of page X that lies before pWal->minFrame (call that version + ** A) on the basis that there is a newer version (version B) of the same + ** page later in the wal file. But if version B happens to like past + ** frame pWal->hdr.mxFrame - then the client would incorrectly assume + ** that it can read version A from the database file. However, since + ** we can guarantee that the checkpointer that set nBackfill could not + ** see any pages past pWal->hdr.mxFrame, this problem does not come up. + */ + pWal->minFrame = pInfo->nBackfill+1; + walShmBarrier(pWal); + if( pInfo->aReadMark[mxI]!=mxReadMark + || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) + ){ + walUnlockShared(pWal, WAL_READ_LOCK(mxI)); + return WAL_RETRY; + }else{ + assert( mxReadMark<=pWal->hdr.mxFrame ); + pWal->readLock = (i16)mxI; + } + return rc; +} + +#ifdef SQLITE_ENABLE_SNAPSHOT +/* +** Attempt to reduce the value of the WalCkptInfo.nBackfillAttempted +** variable so that older snapshots can be accessed. To do this, loop +** through all wal frames from nBackfillAttempted to (nBackfill+1), +** comparing their content to the corresponding page with the database +** file, if any. Set nBackfillAttempted to the frame number of the +** first frame for which the wal file content matches the db file. +** +** This is only really safe if the file-system is such that any page +** writes made by earlier checkpointers were atomic operations, which +** is not always true. It is also possible that nBackfillAttempted +** may be left set to a value larger than expected, if a wal frame +** contains content that duplicate of an earlier version of the same +** page. +** +** SQLITE_OK is returned if successful, or an SQLite error code if an +** error occurs. It is not an error if nBackfillAttempted cannot be +** decreased at all. +*/ +SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){ + int rc; + + assert( pWal->readLock>=0 ); + rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1); + if( rc==SQLITE_OK ){ + volatile WalCkptInfo *pInfo = walCkptInfo(pWal); + int szPage = (int)pWal->szPage; + i64 szDb; /* Size of db file in bytes */ + + rc = sqlite3OsFileSize(pWal->pDbFd, &szDb); + if( rc==SQLITE_OK ){ + void *pBuf1 = sqlite3_malloc(szPage); + void *pBuf2 = sqlite3_malloc(szPage); + if( pBuf1==0 || pBuf2==0 ){ + rc = SQLITE_NOMEM; + }else{ + u32 i = pInfo->nBackfillAttempted; + for(i=pInfo->nBackfillAttempted; i>pInfo->nBackfill; i--){ + volatile ht_slot *dummy; + volatile u32 *aPgno; /* Array of page numbers */ + u32 iZero; /* Frame corresponding to aPgno[0] */ + u32 pgno; /* Page number in db file */ + i64 iDbOff; /* Offset of db file entry */ + i64 iWalOff; /* Offset of wal file entry */ + + rc = walHashGet(pWal, walFramePage(i), &dummy, &aPgno, &iZero); + if( rc!=SQLITE_OK ) break; + pgno = aPgno[i-iZero]; + iDbOff = (i64)(pgno-1) * szPage; + + if( iDbOff+szPage<=szDb ){ + iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE; + rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff); + + if( rc==SQLITE_OK ){ + rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff); + } + + if( rc!=SQLITE_OK || 0==memcmp(pBuf1, pBuf2, szPage) ){ + break; + } + } + + pInfo->nBackfillAttempted = i-1; + } + } + + sqlite3_free(pBuf1); + sqlite3_free(pBuf2); + } + walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1); + } + + return rc; +} +#endif /* SQLITE_ENABLE_SNAPSHOT */ + +/* +** Begin a read transaction on the database. +** +** This routine used to be called sqlite3OpenSnapshot() and with good reason: +** it takes a snapshot of the state of the WAL and wal-index for the current +** instant in time. The current thread will continue to use this snapshot. +** Other threads might append new content to the WAL and wal-index but +** that extra content is ignored by the current thread. +** +** If the database contents have changes since the previous read +** transaction, then *pChanged is set to 1 before returning. The +** Pager layer will use this to know that is cache is stale and +** needs to be flushed. +*/ +SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ + int rc; /* Return code */ + int cnt = 0; /* Number of TryBeginRead attempts */ + +#ifdef SQLITE_ENABLE_SNAPSHOT + int bChanged = 0; + WalIndexHdr *pSnapshot = pWal->pSnapshot; + if( pSnapshot && memcmp(pSnapshot, &pWal->hdr, sizeof(WalIndexHdr))!=0 ){ + bChanged = 1; + } +#endif + + do{ + rc = walTryBeginRead(pWal, pChanged, 0, ++cnt); + }while( rc==WAL_RETRY ); + testcase( (rc&0xff)==SQLITE_BUSY ); + testcase( (rc&0xff)==SQLITE_IOERR ); + testcase( rc==SQLITE_PROTOCOL ); + testcase( rc==SQLITE_OK ); + +#ifdef SQLITE_ENABLE_SNAPSHOT + if( rc==SQLITE_OK ){ + if( pSnapshot && memcmp(pSnapshot, &pWal->hdr, sizeof(WalIndexHdr))!=0 ){ + /* At this point the client has a lock on an aReadMark[] slot holding + ** a value equal to or smaller than pSnapshot->mxFrame, but pWal->hdr + ** is populated with the wal-index header corresponding to the head + ** of the wal file. Verify that pSnapshot is still valid before + ** continuing. Reasons why pSnapshot might no longer be valid: + ** + ** (1) The WAL file has been reset since the snapshot was taken. + ** In this case, the salt will have changed. + ** + ** (2) A checkpoint as been attempted that wrote frames past + ** pSnapshot->mxFrame into the database file. Note that the + ** checkpoint need not have completed for this to cause problems. + */ + volatile WalCkptInfo *pInfo = walCkptInfo(pWal); + + assert( pWal->readLock>0 || pWal->hdr.mxFrame==0 ); + assert( pInfo->aReadMark[pWal->readLock]<=pSnapshot->mxFrame ); + + /* It is possible that there is a checkpointer thread running + ** concurrent with this code. If this is the case, it may be that the + ** checkpointer has already determined that it will checkpoint + ** snapshot X, where X is later in the wal file than pSnapshot, but + ** has not yet set the pInfo->nBackfillAttempted variable to indicate + ** its intent. To avoid the race condition this leads to, ensure that + ** there is no checkpointer process by taking a shared CKPT lock + ** before checking pInfo->nBackfillAttempted. + ** + ** TODO: Does the aReadMark[] lock prevent a checkpointer from doing + ** this already? + */ + rc = walLockShared(pWal, WAL_CKPT_LOCK); + + if( rc==SQLITE_OK ){ + /* Check that the wal file has not been wrapped. Assuming that it has + ** not, also check that no checkpointer has attempted to checkpoint any + ** frames beyond pSnapshot->mxFrame. If either of these conditions are + ** true, return SQLITE_BUSY_SNAPSHOT. Otherwise, overwrite pWal->hdr + ** with *pSnapshot and set *pChanged as appropriate for opening the + ** snapshot. */ + if( !memcmp(pSnapshot->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt)) + && pSnapshot->mxFrame>=pInfo->nBackfillAttempted + ){ + assert( pWal->readLock>0 ); + memcpy(&pWal->hdr, pSnapshot, sizeof(WalIndexHdr)); + *pChanged = bChanged; + }else{ + rc = SQLITE_BUSY_SNAPSHOT; + } + + /* Release the shared CKPT lock obtained above. */ + walUnlockShared(pWal, WAL_CKPT_LOCK); + } + + + if( rc!=SQLITE_OK ){ + sqlite3WalEndReadTransaction(pWal); + } + } + } +#endif + return rc; +} + +/* +** Finish with a read transaction. All this does is release the +** read-lock. +*/ +SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal){ + sqlite3WalEndWriteTransaction(pWal); + if( pWal->readLock>=0 ){ + walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); + pWal->readLock = -1; + } +} + +/* +** Search the wal file for page pgno. If found, set *piRead to the frame that +** contains the page. Otherwise, if pgno is not in the wal file, set *piRead +** to zero. +** +** Return SQLITE_OK if successful, or an error code if an error occurs. If an +** error does occur, the final value of *piRead is undefined. +*/ +SQLITE_PRIVATE int sqlite3WalFindFrame( + Wal *pWal, /* WAL handle */ + Pgno pgno, /* Database page number to read data for */ + u32 *piRead /* OUT: Frame number (or zero) */ +){ + u32 iRead = 0; /* If !=0, WAL frame to return data from */ + u32 iLast = pWal->hdr.mxFrame; /* Last page in WAL for this reader */ + int iHash; /* Used to loop through N hash tables */ + int iMinHash; + + /* This routine is only be called from within a read transaction. */ + assert( pWal->readLock>=0 || pWal->lockError ); + + /* If the "last page" field of the wal-index header snapshot is 0, then + ** no data will be read from the wal under any circumstances. Return early + ** in this case as an optimization. Likewise, if pWal->readLock==0, + ** then the WAL is ignored by the reader so return early, as if the + ** WAL were empty. + */ + if( iLast==0 || pWal->readLock==0 ){ + *piRead = 0; + return SQLITE_OK; + } + + /* Search the hash table or tables for an entry matching page number + ** pgno. Each iteration of the following for() loop searches one + ** hash table (each hash table indexes up to HASHTABLE_NPAGE frames). + ** + ** This code might run concurrently to the code in walIndexAppend() + ** that adds entries to the wal-index (and possibly to this hash + ** table). This means the value just read from the hash + ** slot (aHash[iKey]) may have been added before or after the + ** current read transaction was opened. Values added after the + ** read transaction was opened may have been written incorrectly - + ** i.e. these slots may contain garbage data. However, we assume + ** that any slots written before the current read transaction was + ** opened remain unmodified. + ** + ** For the reasons above, the if(...) condition featured in the inner + ** loop of the following block is more stringent that would be required + ** if we had exclusive access to the hash-table: + ** + ** (aPgno[iFrame]==pgno): + ** This condition filters out normal hash-table collisions. + ** + ** (iFrame<=iLast): + ** This condition filters out entries that were added to the hash + ** table after the current read-transaction had started. + */ + iMinHash = walFramePage(pWal->minFrame); + for(iHash=walFramePage(iLast); iHash>=iMinHash && iRead==0; iHash--){ + volatile ht_slot *aHash; /* Pointer to hash table */ + volatile u32 *aPgno; /* Pointer to array of page numbers */ + u32 iZero; /* Frame number corresponding to aPgno[0] */ + int iKey; /* Hash slot index */ + int nCollide; /* Number of hash collisions remaining */ + int rc; /* Error code */ + + rc = walHashGet(pWal, iHash, &aHash, &aPgno, &iZero); + if( rc!=SQLITE_OK ){ + return rc; + } + nCollide = HASHTABLE_NSLOT; + for(iKey=walHash(pgno); aHash[iKey]; iKey=walNextHash(iKey)){ + u32 iFrame = aHash[iKey] + iZero; + if( iFrame<=iLast && iFrame>=pWal->minFrame && aPgno[aHash[iKey]]==pgno ){ + assert( iFrame>iRead || CORRUPT_DB ); + iRead = iFrame; + } + if( (nCollide--)==0 ){ + return SQLITE_CORRUPT_BKPT; + } + } + } + +#ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT + /* If expensive assert() statements are available, do a linear search + ** of the wal-index file content. Make sure the results agree with the + ** result obtained using the hash indexes above. */ + { + u32 iRead2 = 0; + u32 iTest; + assert( pWal->minFrame>0 ); + for(iTest=iLast; iTest>=pWal->minFrame; iTest--){ + if( walFramePgno(pWal, iTest)==pgno ){ + iRead2 = iTest; + break; + } + } + assert( iRead==iRead2 ); + } +#endif + + *piRead = iRead; + return SQLITE_OK; +} + +/* +** Read the contents of frame iRead from the wal file into buffer pOut +** (which is nOut bytes in size). Return SQLITE_OK if successful, or an +** error code otherwise. +*/ +SQLITE_PRIVATE int sqlite3WalReadFrame( + Wal *pWal, /* WAL handle */ + u32 iRead, /* Frame to read */ + int nOut, /* Size of buffer pOut in bytes */ + u8 *pOut /* Buffer to write page data to */ +){ + int sz; + i64 iOffset; + sz = pWal->hdr.szPage; + sz = (sz&0xfe00) + ((sz&0x0001)<<16); + testcase( sz<=32768 ); + testcase( sz>=65536 ); + iOffset = walFrameOffset(iRead, sz) + WAL_FRAME_HDRSIZE; + /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL */ + return sqlite3OsRead(pWal->pWalFd, pOut, (nOut>sz ? sz : nOut), iOffset); +} + +/* +** Return the size of the database in pages (or zero, if unknown). +*/ +SQLITE_PRIVATE Pgno sqlite3WalDbsize(Wal *pWal){ + if( pWal && ALWAYS(pWal->readLock>=0) ){ + return pWal->hdr.nPage; + } + return 0; +} + + +/* +** This function starts a write transaction on the WAL. +** +** A read transaction must have already been started by a prior call +** to sqlite3WalBeginReadTransaction(). +** +** If another thread or process has written into the database since +** the read transaction was started, then it is not possible for this +** thread to write as doing so would cause a fork. So this routine +** returns SQLITE_BUSY in that case and no write transaction is started. +** +** There can only be a single writer active at a time. +*/ +SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal){ + int rc; + + /* Cannot start a write transaction without first holding a read + ** transaction. */ + assert( pWal->readLock>=0 ); + assert( pWal->writeLock==0 && pWal->iReCksum==0 ); + + if( pWal->readOnly ){ + return SQLITE_READONLY; + } + + /* Only one writer allowed at a time. Get the write lock. Return + ** SQLITE_BUSY if unable. + */ + rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1); + if( rc ){ + return rc; + } + pWal->writeLock = 1; + + /* If another connection has written to the database file since the + ** time the read transaction on this connection was started, then + ** the write is disallowed. + */ + if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){ + walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); + pWal->writeLock = 0; + rc = SQLITE_BUSY_SNAPSHOT; + } + + return rc; +} + +/* +** End a write transaction. The commit has already been done. This +** routine merely releases the lock. +*/ +SQLITE_PRIVATE int sqlite3WalEndWriteTransaction(Wal *pWal){ + if( pWal->writeLock ){ + walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); + pWal->writeLock = 0; + pWal->iReCksum = 0; + pWal->truncateOnCommit = 0; + } + return SQLITE_OK; +} + +/* +** If any data has been written (but not committed) to the log file, this +** function moves the write-pointer back to the start of the transaction. +** +** Additionally, the callback function is invoked for each frame written +** to the WAL since the start of the transaction. If the callback returns +** other than SQLITE_OK, it is not invoked again and the error code is +** returned to the caller. +** +** Otherwise, if the callback function does not return an error, this +** function returns SQLITE_OK. +*/ +SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx){ + int rc = SQLITE_OK; + if( ALWAYS(pWal->writeLock) ){ + Pgno iMax = pWal->hdr.mxFrame; + Pgno iFrame; + + /* Restore the clients cache of the wal-index header to the state it + ** was in before the client began writing to the database. + */ + memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr)); + + for(iFrame=pWal->hdr.mxFrame+1; + ALWAYS(rc==SQLITE_OK) && iFrame<=iMax; + iFrame++ + ){ + /* This call cannot fail. Unless the page for which the page number + ** is passed as the second argument is (a) in the cache and + ** (b) has an outstanding reference, then xUndo is either a no-op + ** (if (a) is false) or simply expels the page from the cache (if (b) + ** is false). + ** + ** If the upper layer is doing a rollback, it is guaranteed that there + ** are no outstanding references to any page other than page 1. And + ** page 1 is never written to the log until the transaction is + ** committed. As a result, the call to xUndo may not fail. + */ + assert( walFramePgno(pWal, iFrame)!=1 ); + rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame)); + } + if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal); + } + return rc; +} + +/* +** Argument aWalData must point to an array of WAL_SAVEPOINT_NDATA u32 +** values. This function populates the array with values required to +** "rollback" the write position of the WAL handle back to the current +** point in the event of a savepoint rollback (via WalSavepointUndo()). +*/ +SQLITE_PRIVATE void sqlite3WalSavepoint(Wal *pWal, u32 *aWalData){ + assert( pWal->writeLock ); + aWalData[0] = pWal->hdr.mxFrame; + aWalData[1] = pWal->hdr.aFrameCksum[0]; + aWalData[2] = pWal->hdr.aFrameCksum[1]; + aWalData[3] = pWal->nCkpt; +} + +/* +** Move the write position of the WAL back to the point identified by +** the values in the aWalData[] array. aWalData must point to an array +** of WAL_SAVEPOINT_NDATA u32 values that has been previously populated +** by a call to WalSavepoint(). +*/ +SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){ + int rc = SQLITE_OK; + + assert( pWal->writeLock ); + assert( aWalData[3]!=pWal->nCkpt || aWalData[0]<=pWal->hdr.mxFrame ); + + if( aWalData[3]!=pWal->nCkpt ){ + /* This savepoint was opened immediately after the write-transaction + ** was started. Right after that, the writer decided to wrap around + ** to the start of the log. Update the savepoint values to match. + */ + aWalData[0] = 0; + aWalData[3] = pWal->nCkpt; + } + + if( aWalData[0]hdr.mxFrame ){ + pWal->hdr.mxFrame = aWalData[0]; + pWal->hdr.aFrameCksum[0] = aWalData[1]; + pWal->hdr.aFrameCksum[1] = aWalData[2]; + walCleanupHash(pWal); + } + + return rc; +} + +/* +** This function is called just before writing a set of frames to the log +** file (see sqlite3WalFrames()). It checks to see if, instead of appending +** to the current log file, it is possible to overwrite the start of the +** existing log file with the new frames (i.e. "reset" the log). If so, +** it sets pWal->hdr.mxFrame to 0. Otherwise, pWal->hdr.mxFrame is left +** unchanged. +** +** SQLITE_OK is returned if no error is encountered (regardless of whether +** or not pWal->hdr.mxFrame is modified). An SQLite error code is returned +** if an error occurs. +*/ +static int walRestartLog(Wal *pWal){ + int rc = SQLITE_OK; + int cnt; + + if( pWal->readLock==0 ){ + volatile WalCkptInfo *pInfo = walCkptInfo(pWal); + assert( pInfo->nBackfill==pWal->hdr.mxFrame ); + if( pInfo->nBackfill>0 ){ + u32 salt1; + sqlite3_randomness(4, &salt1); + rc = walLockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); + if( rc==SQLITE_OK ){ + /* If all readers are using WAL_READ_LOCK(0) (in other words if no + ** readers are currently using the WAL), then the transactions + ** frames will overwrite the start of the existing log. Update the + ** wal-index header to reflect this. + ** + ** In theory it would be Ok to update the cache of the header only + ** at this point. But updating the actual wal-index header is also + ** safe and means there is no special case for sqlite3WalUndo() + ** to handle if this transaction is rolled back. */ + walRestartHdr(pWal, salt1); + walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); + }else if( rc!=SQLITE_BUSY ){ + return rc; + } + } + walUnlockShared(pWal, WAL_READ_LOCK(0)); + pWal->readLock = -1; + cnt = 0; + do{ + int notUsed; + rc = walTryBeginRead(pWal, ¬Used, 1, ++cnt); + }while( rc==WAL_RETRY ); + assert( (rc&0xff)!=SQLITE_BUSY ); /* BUSY not possible when useWal==1 */ + testcase( (rc&0xff)==SQLITE_IOERR ); + testcase( rc==SQLITE_PROTOCOL ); + testcase( rc==SQLITE_OK ); + } + return rc; +} + +/* +** Information about the current state of the WAL file and where +** the next fsync should occur - passed from sqlite3WalFrames() into +** walWriteToLog(). +*/ +typedef struct WalWriter { + Wal *pWal; /* The complete WAL information */ + sqlite3_file *pFd; /* The WAL file to which we write */ + sqlite3_int64 iSyncPoint; /* Fsync at this offset */ + int syncFlags; /* Flags for the fsync */ + int szPage; /* Size of one page */ +} WalWriter; + +/* +** Write iAmt bytes of content into the WAL file beginning at iOffset. +** Do a sync when crossing the p->iSyncPoint boundary. +** +** In other words, if iSyncPoint is in between iOffset and iOffset+iAmt, +** first write the part before iSyncPoint, then sync, then write the +** rest. +*/ +static int walWriteToLog( + WalWriter *p, /* WAL to write to */ + void *pContent, /* Content to be written */ + int iAmt, /* Number of bytes to write */ + sqlite3_int64 iOffset /* Start writing at this offset */ +){ + int rc; + if( iOffsetiSyncPoint && iOffset+iAmt>=p->iSyncPoint ){ + int iFirstAmt = (int)(p->iSyncPoint - iOffset); + rc = sqlite3OsWrite(p->pFd, pContent, iFirstAmt, iOffset); + if( rc ) return rc; + iOffset += iFirstAmt; + iAmt -= iFirstAmt; + pContent = (void*)(iFirstAmt + (char*)pContent); + assert( p->syncFlags & (SQLITE_SYNC_NORMAL|SQLITE_SYNC_FULL) ); + rc = sqlite3OsSync(p->pFd, p->syncFlags & SQLITE_SYNC_MASK); + if( iAmt==0 || rc ) return rc; + } + rc = sqlite3OsWrite(p->pFd, pContent, iAmt, iOffset); + return rc; +} + +/* +** Write out a single frame of the WAL +*/ +static int walWriteOneFrame( + WalWriter *p, /* Where to write the frame */ + PgHdr *pPage, /* The page of the frame to be written */ + int nTruncate, /* The commit flag. Usually 0. >0 for commit */ + sqlite3_int64 iOffset /* Byte offset at which to write */ +){ + int rc; /* Result code from subfunctions */ + void *pData; /* Data actually written */ + u8 aFrame[WAL_FRAME_HDRSIZE]; /* Buffer to assemble frame-header in */ +#if defined(SQLITE_HAS_CODEC) + if( (pData = sqlite3PagerCodec(pPage))==0 ) return SQLITE_NOMEM_BKPT; +#else + pData = pPage->pData; +#endif + walEncodeFrame(p->pWal, pPage->pgno, nTruncate, pData, aFrame); + rc = walWriteToLog(p, aFrame, sizeof(aFrame), iOffset); + if( rc ) return rc; + /* Write the page data */ + rc = walWriteToLog(p, pData, p->szPage, iOffset+sizeof(aFrame)); + return rc; +} + +/* +** This function is called as part of committing a transaction within which +** one or more frames have been overwritten. It updates the checksums for +** all frames written to the wal file by the current transaction starting +** with the earliest to have been overwritten. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +static int walRewriteChecksums(Wal *pWal, u32 iLast){ + const int szPage = pWal->szPage;/* Database page size */ + int rc = SQLITE_OK; /* Return code */ + u8 *aBuf; /* Buffer to load data from wal file into */ + u8 aFrame[WAL_FRAME_HDRSIZE]; /* Buffer to assemble frame-headers in */ + u32 iRead; /* Next frame to read from wal file */ + i64 iCksumOff; + + aBuf = sqlite3_malloc(szPage + WAL_FRAME_HDRSIZE); + if( aBuf==0 ) return SQLITE_NOMEM_BKPT; + + /* Find the checksum values to use as input for the recalculating the + ** first checksum. If the first frame is frame 1 (implying that the current + ** transaction restarted the wal file), these values must be read from the + ** wal-file header. Otherwise, read them from the frame header of the + ** previous frame. */ + assert( pWal->iReCksum>0 ); + if( pWal->iReCksum==1 ){ + iCksumOff = 24; + }else{ + iCksumOff = walFrameOffset(pWal->iReCksum-1, szPage) + 16; + } + rc = sqlite3OsRead(pWal->pWalFd, aBuf, sizeof(u32)*2, iCksumOff); + pWal->hdr.aFrameCksum[0] = sqlite3Get4byte(aBuf); + pWal->hdr.aFrameCksum[1] = sqlite3Get4byte(&aBuf[sizeof(u32)]); + + iRead = pWal->iReCksum; + pWal->iReCksum = 0; + for(; rc==SQLITE_OK && iRead<=iLast; iRead++){ + i64 iOff = walFrameOffset(iRead, szPage); + rc = sqlite3OsRead(pWal->pWalFd, aBuf, szPage+WAL_FRAME_HDRSIZE, iOff); + if( rc==SQLITE_OK ){ + u32 iPgno, nDbSize; + iPgno = sqlite3Get4byte(aBuf); + nDbSize = sqlite3Get4byte(&aBuf[4]); + + walEncodeFrame(pWal, iPgno, nDbSize, &aBuf[WAL_FRAME_HDRSIZE], aFrame); + rc = sqlite3OsWrite(pWal->pWalFd, aFrame, sizeof(aFrame), iOff); + } + } + + sqlite3_free(aBuf); + return rc; +} + +/* +** Write a set of frames to the log. The caller must hold the write-lock +** on the log file (obtained using sqlite3WalBeginWriteTransaction()). +*/ +SQLITE_PRIVATE int sqlite3WalFrames( + Wal *pWal, /* Wal handle to write to */ + int szPage, /* Database page-size in bytes */ + PgHdr *pList, /* List of dirty pages to write */ + Pgno nTruncate, /* Database size after this commit */ + int isCommit, /* True if this is a commit */ + int sync_flags /* Flags to pass to OsSync() (or 0) */ +){ + int rc; /* Used to catch return codes */ + u32 iFrame; /* Next frame address */ + PgHdr *p; /* Iterator to run through pList with. */ + PgHdr *pLast = 0; /* Last frame in list */ + int nExtra = 0; /* Number of extra copies of last page */ + int szFrame; /* The size of a single frame */ + i64 iOffset; /* Next byte to write in WAL file */ + WalWriter w; /* The writer */ + u32 iFirst = 0; /* First frame that may be overwritten */ + WalIndexHdr *pLive; /* Pointer to shared header */ + + assert( pList ); + assert( pWal->writeLock ); + + /* If this frame set completes a transaction, then nTruncate>0. If + ** nTruncate==0 then this frame set does not complete the transaction. */ + assert( (isCommit!=0)==(nTruncate!=0) ); + +#if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) + { int cnt; for(cnt=0, p=pList; p; p=p->pDirty, cnt++){} + WALTRACE(("WAL%p: frame write begin. %d frames. mxFrame=%d. %s\n", + pWal, cnt, pWal->hdr.mxFrame, isCommit ? "Commit" : "Spill")); + } +#endif + + pLive = (WalIndexHdr*)walIndexHdr(pWal); + if( memcmp(&pWal->hdr, (void *)pLive, sizeof(WalIndexHdr))!=0 ){ + iFirst = pLive->mxFrame+1; + } + + /* See if it is possible to write these frames into the start of the + ** log file, instead of appending to it at pWal->hdr.mxFrame. + */ + if( SQLITE_OK!=(rc = walRestartLog(pWal)) ){ + return rc; + } + + /* If this is the first frame written into the log, write the WAL + ** header to the start of the WAL file. See comments at the top of + ** this source file for a description of the WAL header format. + */ + iFrame = pWal->hdr.mxFrame; + if( iFrame==0 ){ + u8 aWalHdr[WAL_HDRSIZE]; /* Buffer to assemble wal-header in */ + u32 aCksum[2]; /* Checksum for wal-header */ + + sqlite3Put4byte(&aWalHdr[0], (WAL_MAGIC | SQLITE_BIGENDIAN)); + sqlite3Put4byte(&aWalHdr[4], WAL_MAX_VERSION); + sqlite3Put4byte(&aWalHdr[8], szPage); + sqlite3Put4byte(&aWalHdr[12], pWal->nCkpt); + if( pWal->nCkpt==0 ) sqlite3_randomness(8, pWal->hdr.aSalt); + memcpy(&aWalHdr[16], pWal->hdr.aSalt, 8); + walChecksumBytes(1, aWalHdr, WAL_HDRSIZE-2*4, 0, aCksum); + sqlite3Put4byte(&aWalHdr[24], aCksum[0]); + sqlite3Put4byte(&aWalHdr[28], aCksum[1]); + + pWal->szPage = szPage; + pWal->hdr.bigEndCksum = SQLITE_BIGENDIAN; + pWal->hdr.aFrameCksum[0] = aCksum[0]; + pWal->hdr.aFrameCksum[1] = aCksum[1]; + pWal->truncateOnCommit = 1; + + rc = sqlite3OsWrite(pWal->pWalFd, aWalHdr, sizeof(aWalHdr), 0); + WALTRACE(("WAL%p: wal-header write %s\n", pWal, rc ? "failed" : "ok")); + if( rc!=SQLITE_OK ){ + return rc; + } + + /* Sync the header (unless SQLITE_IOCAP_SEQUENTIAL is true or unless + ** all syncing is turned off by PRAGMA synchronous=OFF). Otherwise + ** an out-of-order write following a WAL restart could result in + ** database corruption. See the ticket: + ** + ** http://localhost:591/sqlite/info/ff5be73dee + */ + if( pWal->syncHeader && sync_flags ){ + rc = sqlite3OsSync(pWal->pWalFd, sync_flags & SQLITE_SYNC_MASK); + if( rc ) return rc; + } + } + assert( (int)pWal->szPage==szPage ); + + /* Setup information needed to write frames into the WAL */ + w.pWal = pWal; + w.pFd = pWal->pWalFd; + w.iSyncPoint = 0; + w.syncFlags = sync_flags; + w.szPage = szPage; + iOffset = walFrameOffset(iFrame+1, szPage); + szFrame = szPage + WAL_FRAME_HDRSIZE; + + /* Write all frames into the log file exactly once */ + for(p=pList; p; p=p->pDirty){ + int nDbSize; /* 0 normally. Positive == commit flag */ + + /* Check if this page has already been written into the wal file by + ** the current transaction. If so, overwrite the existing frame and + ** set Wal.writeLock to WAL_WRITELOCK_RECKSUM - indicating that + ** checksums must be recomputed when the transaction is committed. */ + if( iFirst && (p->pDirty || isCommit==0) ){ + u32 iWrite = 0; + VVA_ONLY(rc =) sqlite3WalFindFrame(pWal, p->pgno, &iWrite); + assert( rc==SQLITE_OK || iWrite==0 ); + if( iWrite>=iFirst ){ + i64 iOff = walFrameOffset(iWrite, szPage) + WAL_FRAME_HDRSIZE; + void *pData; + if( pWal->iReCksum==0 || iWriteiReCksum ){ + pWal->iReCksum = iWrite; + } +#if defined(SQLITE_HAS_CODEC) + if( (pData = sqlite3PagerCodec(p))==0 ) return SQLITE_NOMEM; +#else + pData = p->pData; +#endif + rc = sqlite3OsWrite(pWal->pWalFd, pData, szPage, iOff); + if( rc ) return rc; + p->flags &= ~PGHDR_WAL_APPEND; + continue; + } + } + + iFrame++; + assert( iOffset==walFrameOffset(iFrame, szPage) ); + nDbSize = (isCommit && p->pDirty==0) ? nTruncate : 0; + rc = walWriteOneFrame(&w, p, nDbSize, iOffset); + if( rc ) return rc; + pLast = p; + iOffset += szFrame; + p->flags |= PGHDR_WAL_APPEND; + } + + /* Recalculate checksums within the wal file if required. */ + if( isCommit && pWal->iReCksum ){ + rc = walRewriteChecksums(pWal, iFrame); + if( rc ) return rc; + } + + /* If this is the end of a transaction, then we might need to pad + ** the transaction and/or sync the WAL file. + ** + ** Padding and syncing only occur if this set of frames complete a + ** transaction and if PRAGMA synchronous=FULL. If synchronous==NORMAL + ** or synchronous==OFF, then no padding or syncing are needed. + ** + ** If SQLITE_IOCAP_POWERSAFE_OVERWRITE is defined, then padding is not + ** needed and only the sync is done. If padding is needed, then the + ** final frame is repeated (with its commit mark) until the next sector + ** boundary is crossed. Only the part of the WAL prior to the last + ** sector boundary is synced; the part of the last frame that extends + ** past the sector boundary is written after the sync. + */ + if( isCommit && (sync_flags & WAL_SYNC_TRANSACTIONS)!=0 ){ + int bSync = 1; + if( pWal->padToSectorBoundary ){ + int sectorSize = sqlite3SectorSize(pWal->pWalFd); + w.iSyncPoint = ((iOffset+sectorSize-1)/sectorSize)*sectorSize; + bSync = (w.iSyncPoint==iOffset); + testcase( bSync ); + while( iOffsettruncateOnCommit && pWal->mxWalSize>=0 ){ + i64 sz = pWal->mxWalSize; + if( walFrameOffset(iFrame+nExtra+1, szPage)>pWal->mxWalSize ){ + sz = walFrameOffset(iFrame+nExtra+1, szPage); + } + walLimitSize(pWal, sz); + pWal->truncateOnCommit = 0; + } + + /* Append data to the wal-index. It is not necessary to lock the + ** wal-index to do this as the SQLITE_SHM_WRITE lock held on the wal-index + ** guarantees that there are no other writers, and no data that may + ** be in use by existing readers is being overwritten. + */ + iFrame = pWal->hdr.mxFrame; + for(p=pList; p && rc==SQLITE_OK; p=p->pDirty){ + if( (p->flags & PGHDR_WAL_APPEND)==0 ) continue; + iFrame++; + rc = walIndexAppend(pWal, iFrame, p->pgno); + } + while( rc==SQLITE_OK && nExtra>0 ){ + iFrame++; + nExtra--; + rc = walIndexAppend(pWal, iFrame, pLast->pgno); + } + + if( rc==SQLITE_OK ){ + /* Update the private copy of the header. */ + pWal->hdr.szPage = (u16)((szPage&0xff00) | (szPage>>16)); + testcase( szPage<=32768 ); + testcase( szPage>=65536 ); + pWal->hdr.mxFrame = iFrame; + if( isCommit ){ + pWal->hdr.iChange++; + pWal->hdr.nPage = nTruncate; + } + /* If this is a commit, update the wal-index header too. */ + if( isCommit ){ + walIndexWriteHdr(pWal); + pWal->iCallback = iFrame; + } + } + + WALTRACE(("WAL%p: frame write %s\n", pWal, rc ? "failed" : "ok")); + return rc; +} + +/* +** This routine is called to implement sqlite3_wal_checkpoint() and +** related interfaces. +** +** Obtain a CHECKPOINT lock and then backfill as much information as +** we can from WAL into the database. +** +** If parameter xBusy is not NULL, it is a pointer to a busy-handler +** callback. In this case this function runs a blocking checkpoint. +*/ +SQLITE_PRIVATE int sqlite3WalCheckpoint( + Wal *pWal, /* Wal connection */ + sqlite3 *db, /* Check this handle's interrupt flag */ + int eMode, /* PASSIVE, FULL, RESTART, or TRUNCATE */ + int (*xBusy)(void*), /* Function to call when busy */ + void *pBusyArg, /* Context argument for xBusyHandler */ + int sync_flags, /* Flags to sync db file with (or 0) */ + int nBuf, /* Size of temporary buffer */ + u8 *zBuf, /* Temporary buffer to use */ + int *pnLog, /* OUT: Number of frames in WAL */ + int *pnCkpt /* OUT: Number of backfilled frames in WAL */ +){ + int rc; /* Return code */ + int isChanged = 0; /* True if a new wal-index header is loaded */ + int eMode2 = eMode; /* Mode to pass to walCheckpoint() */ + int (*xBusy2)(void*) = xBusy; /* Busy handler for eMode2 */ + + assert( pWal->ckptLock==0 ); + assert( pWal->writeLock==0 ); + + /* EVIDENCE-OF: R-62920-47450 The busy-handler callback is never invoked + ** in the SQLITE_CHECKPOINT_PASSIVE mode. */ + assert( eMode!=SQLITE_CHECKPOINT_PASSIVE || xBusy==0 ); + + if( pWal->readOnly ) return SQLITE_READONLY; + WALTRACE(("WAL%p: checkpoint begins\n", pWal)); + + /* IMPLEMENTATION-OF: R-62028-47212 All calls obtain an exclusive + ** "checkpoint" lock on the database file. */ + rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1); + if( rc ){ + /* EVIDENCE-OF: R-10421-19736 If any other process is running a + ** checkpoint operation at the same time, the lock cannot be obtained and + ** SQLITE_BUSY is returned. + ** EVIDENCE-OF: R-53820-33897 Even if there is a busy-handler configured, + ** it will not be invoked in this case. + */ + testcase( rc==SQLITE_BUSY ); + testcase( xBusy!=0 ); + return rc; + } + pWal->ckptLock = 1; + + /* IMPLEMENTATION-OF: R-59782-36818 The SQLITE_CHECKPOINT_FULL, RESTART and + ** TRUNCATE modes also obtain the exclusive "writer" lock on the database + ** file. + ** + ** EVIDENCE-OF: R-60642-04082 If the writer lock cannot be obtained + ** immediately, and a busy-handler is configured, it is invoked and the + ** writer lock retried until either the busy-handler returns 0 or the + ** lock is successfully obtained. + */ + if( eMode!=SQLITE_CHECKPOINT_PASSIVE ){ + rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_WRITE_LOCK, 1); + if( rc==SQLITE_OK ){ + pWal->writeLock = 1; + }else if( rc==SQLITE_BUSY ){ + eMode2 = SQLITE_CHECKPOINT_PASSIVE; + xBusy2 = 0; + rc = SQLITE_OK; + } + } + + /* Read the wal-index header. */ + if( rc==SQLITE_OK ){ + rc = walIndexReadHdr(pWal, &isChanged); + if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){ + sqlite3OsUnfetch(pWal->pDbFd, 0, 0); + } + } + + /* Copy data from the log to the database file. */ + if( rc==SQLITE_OK ){ + + if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf); + } + + /* If no error occurred, set the output variables. */ + if( rc==SQLITE_OK || rc==SQLITE_BUSY ){ + if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame; + if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill); + } + } + + if( isChanged ){ + /* If a new wal-index header was loaded before the checkpoint was + ** performed, then the pager-cache associated with pWal is now + ** out of date. So zero the cached wal-index header to ensure that + ** next time the pager opens a snapshot on this database it knows that + ** the cache needs to be reset. + */ + memset(&pWal->hdr, 0, sizeof(WalIndexHdr)); + } + + /* Release the locks. */ + sqlite3WalEndWriteTransaction(pWal); + walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1); + pWal->ckptLock = 0; + WALTRACE(("WAL%p: checkpoint %s\n", pWal, rc ? "failed" : "ok")); + return (rc==SQLITE_OK && eMode!=eMode2 ? SQLITE_BUSY : rc); +} + +/* Return the value to pass to a sqlite3_wal_hook callback, the +** number of frames in the WAL at the point of the last commit since +** sqlite3WalCallback() was called. If no commits have occurred since +** the last call, then return 0. +*/ +SQLITE_PRIVATE int sqlite3WalCallback(Wal *pWal){ + u32 ret = 0; + if( pWal ){ + ret = pWal->iCallback; + pWal->iCallback = 0; + } + return (int)ret; +} + +/* +** This function is called to change the WAL subsystem into or out +** of locking_mode=EXCLUSIVE. +** +** If op is zero, then attempt to change from locking_mode=EXCLUSIVE +** into locking_mode=NORMAL. This means that we must acquire a lock +** on the pWal->readLock byte. If the WAL is already in locking_mode=NORMAL +** or if the acquisition of the lock fails, then return 0. If the +** transition out of exclusive-mode is successful, return 1. This +** operation must occur while the pager is still holding the exclusive +** lock on the main database file. +** +** If op is one, then change from locking_mode=NORMAL into +** locking_mode=EXCLUSIVE. This means that the pWal->readLock must +** be released. Return 1 if the transition is made and 0 if the +** WAL is already in exclusive-locking mode - meaning that this +** routine is a no-op. The pager must already hold the exclusive lock +** on the main database file before invoking this operation. +** +** If op is negative, then do a dry-run of the op==1 case but do +** not actually change anything. The pager uses this to see if it +** should acquire the database exclusive lock prior to invoking +** the op==1 case. +*/ +SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op){ + int rc; + assert( pWal->writeLock==0 ); + assert( pWal->exclusiveMode!=WAL_HEAPMEMORY_MODE || op==-1 ); + + /* pWal->readLock is usually set, but might be -1 if there was a + ** prior error while attempting to acquire are read-lock. This cannot + ** happen if the connection is actually in exclusive mode (as no xShmLock + ** locks are taken in this case). Nor should the pager attempt to + ** upgrade to exclusive-mode following such an error. + */ + assert( pWal->readLock>=0 || pWal->lockError ); + assert( pWal->readLock>=0 || (op<=0 && pWal->exclusiveMode==0) ); + + if( op==0 ){ + if( pWal->exclusiveMode ){ + pWal->exclusiveMode = 0; + if( walLockShared(pWal, WAL_READ_LOCK(pWal->readLock))!=SQLITE_OK ){ + pWal->exclusiveMode = 1; + } + rc = pWal->exclusiveMode==0; + }else{ + /* Already in locking_mode=NORMAL */ + rc = 0; + } + }else if( op>0 ){ + assert( pWal->exclusiveMode==0 ); + assert( pWal->readLock>=0 ); + walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); + pWal->exclusiveMode = 1; + rc = 1; + }else{ + rc = pWal->exclusiveMode==0; + } + return rc; +} + +/* +** Return true if the argument is non-NULL and the WAL module is using +** heap-memory for the wal-index. Otherwise, if the argument is NULL or the +** WAL module is using shared-memory, return false. +*/ +SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal){ + return (pWal && pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ); +} + +#ifdef SQLITE_ENABLE_SNAPSHOT +/* Create a snapshot object. The content of a snapshot is opaque to +** every other subsystem, so the WAL module can put whatever it needs +** in the object. +*/ +SQLITE_PRIVATE int sqlite3WalSnapshotGet(Wal *pWal, sqlite3_snapshot **ppSnapshot){ + int rc = SQLITE_OK; + WalIndexHdr *pRet; + static const u32 aZero[4] = { 0, 0, 0, 0 }; + + assert( pWal->readLock>=0 && pWal->writeLock==0 ); + + if( memcmp(&pWal->hdr.aFrameCksum[0],aZero,16)==0 ){ + *ppSnapshot = 0; + return SQLITE_ERROR; + } + pRet = (WalIndexHdr*)sqlite3_malloc(sizeof(WalIndexHdr)); + if( pRet==0 ){ + rc = SQLITE_NOMEM_BKPT; + }else{ + memcpy(pRet, &pWal->hdr, sizeof(WalIndexHdr)); + *ppSnapshot = (sqlite3_snapshot*)pRet; + } + + return rc; +} + +/* Try to open on pSnapshot when the next read-transaction starts +*/ +SQLITE_PRIVATE void sqlite3WalSnapshotOpen(Wal *pWal, sqlite3_snapshot *pSnapshot){ + pWal->pSnapshot = (WalIndexHdr*)pSnapshot; +} + +/* +** Return a +ve value if snapshot p1 is newer than p2. A -ve value if +** p1 is older than p2 and zero if p1 and p2 are the same snapshot. +*/ +SQLITE_API int sqlite3_snapshot_cmp(sqlite3_snapshot *p1, sqlite3_snapshot *p2){ + WalIndexHdr *pHdr1 = (WalIndexHdr*)p1; + WalIndexHdr *pHdr2 = (WalIndexHdr*)p2; + + /* aSalt[0] is a copy of the value stored in the wal file header. It + ** is incremented each time the wal file is restarted. */ + if( pHdr1->aSalt[0]aSalt[0] ) return -1; + if( pHdr1->aSalt[0]>pHdr2->aSalt[0] ) return +1; + if( pHdr1->mxFramemxFrame ) return -1; + if( pHdr1->mxFrame>pHdr2->mxFrame ) return +1; + return 0; +} +#endif /* SQLITE_ENABLE_SNAPSHOT */ + +#ifdef SQLITE_ENABLE_ZIPVFS +/* +** If the argument is not NULL, it points to a Wal object that holds a +** read-lock. This function returns the database page-size if it is known, +** or zero if it is not (or if pWal is NULL). +*/ +SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal){ + assert( pWal==0 || pWal->readLock>=0 ); + return (pWal ? pWal->szPage : 0); +} +#endif + +/* Return the sqlite3_file object for the WAL file +*/ +SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal){ + return pWal->pWalFd; +} + +#endif /* #ifndef SQLITE_OMIT_WAL */ + +/************** End of wal.c *************************************************/ +/************** Begin file btmutex.c *****************************************/ +/* +** 2007 August 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains code used to implement mutexes on Btree objects. +** This code really belongs in btree.c. But btree.c is getting too +** big and we want to break it down some. This packaged seemed like +** a good breakout. +*/ +/************** Include btreeInt.h in the middle of btmutex.c ****************/ +/************** Begin file btreeInt.h ****************************************/ +/* +** 2004 April 6 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements an external (disk-based) database using BTrees. +** For a detailed discussion of BTrees, refer to +** +** Donald E. Knuth, THE ART OF COMPUTER PROGRAMMING, Volume 3: +** "Sorting And Searching", pages 473-480. Addison-Wesley +** Publishing Company, Reading, Massachusetts. +** +** The basic idea is that each page of the file contains N database +** entries and N+1 pointers to subpages. +** +** ---------------------------------------------------------------- +** | Ptr(0) | Key(0) | Ptr(1) | Key(1) | ... | Key(N-1) | Ptr(N) | +** ---------------------------------------------------------------- +** +** All of the keys on the page that Ptr(0) points to have values less +** than Key(0). All of the keys on page Ptr(1) and its subpages have +** values greater than Key(0) and less than Key(1). All of the keys +** on Ptr(N) and its subpages have values greater than Key(N-1). And +** so forth. +** +** Finding a particular key requires reading O(log(M)) pages from the +** disk where M is the number of entries in the tree. +** +** In this implementation, a single file can hold one or more separate +** BTrees. Each BTree is identified by the index of its root page. The +** key and data for any entry are combined to form the "payload". A +** fixed amount of payload can be carried directly on the database +** page. If the payload is larger than the preset amount then surplus +** bytes are stored on overflow pages. The payload for an entry +** and the preceding pointer are combined to form a "Cell". Each +** page has a small header which contains the Ptr(N) pointer and other +** information such as the size of key and data. +** +** FORMAT DETAILS +** +** The file is divided into pages. The first page is called page 1, +** the second is page 2, and so forth. A page number of zero indicates +** "no such page". The page size can be any power of 2 between 512 and 65536. +** Each page can be either a btree page, a freelist page, an overflow +** page, or a pointer-map page. +** +** The first page is always a btree page. The first 100 bytes of the first +** page contain a special header (the "file header") that describes the file. +** The format of the file header is as follows: +** +** OFFSET SIZE DESCRIPTION +** 0 16 Header string: "SQLite format 3\000" +** 16 2 Page size in bytes. (1 means 65536) +** 18 1 File format write version +** 19 1 File format read version +** 20 1 Bytes of unused space at the end of each page +** 21 1 Max embedded payload fraction (must be 64) +** 22 1 Min embedded payload fraction (must be 32) +** 23 1 Min leaf payload fraction (must be 32) +** 24 4 File change counter +** 28 4 Reserved for future use +** 32 4 First freelist page +** 36 4 Number of freelist pages in the file +** 40 60 15 4-byte meta values passed to higher layers +** +** 40 4 Schema cookie +** 44 4 File format of schema layer +** 48 4 Size of page cache +** 52 4 Largest root-page (auto/incr_vacuum) +** 56 4 1=UTF-8 2=UTF16le 3=UTF16be +** 60 4 User version +** 64 4 Incremental vacuum mode +** 68 4 Application-ID +** 72 20 unused +** 92 4 The version-valid-for number +** 96 4 SQLITE_VERSION_NUMBER +** +** All of the integer values are big-endian (most significant byte first). +** +** The file change counter is incremented when the database is changed +** This counter allows other processes to know when the file has changed +** and thus when they need to flush their cache. +** +** The max embedded payload fraction is the amount of the total usable +** space in a page that can be consumed by a single cell for standard +** B-tree (non-LEAFDATA) tables. A value of 255 means 100%. The default +** is to limit the maximum cell size so that at least 4 cells will fit +** on one page. Thus the default max embedded payload fraction is 64. +** +** If the payload for a cell is larger than the max payload, then extra +** payload is spilled to overflow pages. Once an overflow page is allocated, +** as many bytes as possible are moved into the overflow pages without letting +** the cell size drop below the min embedded payload fraction. +** +** The min leaf payload fraction is like the min embedded payload fraction +** except that it applies to leaf nodes in a LEAFDATA tree. The maximum +** payload fraction for a LEAFDATA tree is always 100% (or 255) and it +** not specified in the header. +** +** Each btree pages is divided into three sections: The header, the +** cell pointer array, and the cell content area. Page 1 also has a 100-byte +** file header that occurs before the page header. +** +** |----------------| +** | file header | 100 bytes. Page 1 only. +** |----------------| +** | page header | 8 bytes for leaves. 12 bytes for interior nodes +** |----------------| +** | cell pointer | | 2 bytes per cell. Sorted order. +** | array | | Grows downward +** | | v +** |----------------| +** | unallocated | +** | space | +** |----------------| ^ Grows upwards +** | cell content | | Arbitrary order interspersed with freeblocks. +** | area | | and free space fragments. +** |----------------| +** +** The page headers looks like this: +** +** OFFSET SIZE DESCRIPTION +** 0 1 Flags. 1: intkey, 2: zerodata, 4: leafdata, 8: leaf +** 1 2 byte offset to the first freeblock +** 3 2 number of cells on this page +** 5 2 first byte of the cell content area +** 7 1 number of fragmented free bytes +** 8 4 Right child (the Ptr(N) value). Omitted on leaves. +** +** The flags define the format of this btree page. The leaf flag means that +** this page has no children. The zerodata flag means that this page carries +** only keys and no data. The intkey flag means that the key is an integer +** which is stored in the key size entry of the cell header rather than in +** the payload area. +** +** The cell pointer array begins on the first byte after the page header. +** The cell pointer array contains zero or more 2-byte numbers which are +** offsets from the beginning of the page to the cell content in the cell +** content area. The cell pointers occur in sorted order. The system strives +** to keep free space after the last cell pointer so that new cells can +** be easily added without having to defragment the page. +** +** Cell content is stored at the very end of the page and grows toward the +** beginning of the page. +** +** Unused space within the cell content area is collected into a linked list of +** freeblocks. Each freeblock is at least 4 bytes in size. The byte offset +** to the first freeblock is given in the header. Freeblocks occur in +** increasing order. Because a freeblock must be at least 4 bytes in size, +** any group of 3 or fewer unused bytes in the cell content area cannot +** exist on the freeblock chain. A group of 3 or fewer free bytes is called +** a fragment. The total number of bytes in all fragments is recorded. +** in the page header at offset 7. +** +** SIZE DESCRIPTION +** 2 Byte offset of the next freeblock +** 2 Bytes in this freeblock +** +** Cells are of variable length. Cells are stored in the cell content area at +** the end of the page. Pointers to the cells are in the cell pointer array +** that immediately follows the page header. Cells is not necessarily +** contiguous or in order, but cell pointers are contiguous and in order. +** +** Cell content makes use of variable length integers. A variable +** length integer is 1 to 9 bytes where the lower 7 bits of each +** byte are used. The integer consists of all bytes that have bit 8 set and +** the first byte with bit 8 clear. The most significant byte of the integer +** appears first. A variable-length integer may not be more than 9 bytes long. +** As a special case, all 8 bytes of the 9th byte are used as data. This +** allows a 64-bit integer to be encoded in 9 bytes. +** +** 0x00 becomes 0x00000000 +** 0x7f becomes 0x0000007f +** 0x81 0x00 becomes 0x00000080 +** 0x82 0x00 becomes 0x00000100 +** 0x80 0x7f becomes 0x0000007f +** 0x8a 0x91 0xd1 0xac 0x78 becomes 0x12345678 +** 0x81 0x81 0x81 0x81 0x01 becomes 0x10204081 +** +** Variable length integers are used for rowids and to hold the number of +** bytes of key and data in a btree cell. +** +** The content of a cell looks like this: +** +** SIZE DESCRIPTION +** 4 Page number of the left child. Omitted if leaf flag is set. +** var Number of bytes of data. Omitted if the zerodata flag is set. +** var Number of bytes of key. Or the key itself if intkey flag is set. +** * Payload +** 4 First page of the overflow chain. Omitted if no overflow +** +** Overflow pages form a linked list. Each page except the last is completely +** filled with data (pagesize - 4 bytes). The last page can have as little +** as 1 byte of data. +** +** SIZE DESCRIPTION +** 4 Page number of next overflow page +** * Data +** +** Freelist pages come in two subtypes: trunk pages and leaf pages. The +** file header points to the first in a linked list of trunk page. Each trunk +** page points to multiple leaf pages. The content of a leaf page is +** unspecified. A trunk page looks like this: +** +** SIZE DESCRIPTION +** 4 Page number of next trunk page +** 4 Number of leaf pointers on this page +** * zero or more pages numbers of leaves +*/ +/* #include "sqliteInt.h" */ + + +/* The following value is the maximum cell size assuming a maximum page +** size give above. +*/ +#define MX_CELL_SIZE(pBt) ((int)(pBt->pageSize-8)) + +/* The maximum number of cells on a single page of the database. This +** assumes a minimum cell size of 6 bytes (4 bytes for the cell itself +** plus 2 bytes for the index to the cell in the page header). Such +** small cells will be rare, but they are possible. +*/ +#define MX_CELL(pBt) ((pBt->pageSize-8)/6) + +/* Forward declarations */ +typedef struct MemPage MemPage; +typedef struct BtLock BtLock; +typedef struct CellInfo CellInfo; + +/* +** This is a magic string that appears at the beginning of every +** SQLite database in order to identify the file as a real database. +** +** You can change this value at compile-time by specifying a +** -DSQLITE_FILE_HEADER="..." on the compiler command-line. The +** header must be exactly 16 bytes including the zero-terminator so +** the string itself should be 15 characters long. If you change +** the header, then your custom library will not be able to read +** databases generated by the standard tools and the standard tools +** will not be able to read databases created by your custom library. +*/ +#ifndef SQLITE_FILE_HEADER /* 123456789 123456 */ +# define SQLITE_FILE_HEADER "SQLite format 3" +#endif + +/* +** Page type flags. An ORed combination of these flags appear as the +** first byte of on-disk image of every BTree page. +*/ +#define PTF_INTKEY 0x01 +#define PTF_ZERODATA 0x02 +#define PTF_LEAFDATA 0x04 +#define PTF_LEAF 0x08 + +/* +** An instance of this object stores information about each a single database +** page that has been loaded into memory. The information in this object +** is derived from the raw on-disk page content. +** +** As each database page is loaded into memory, the pager allocats an +** instance of this object and zeros the first 8 bytes. (This is the +** "extra" information associated with each page of the pager.) +** +** Access to all fields of this structure is controlled by the mutex +** stored in MemPage.pBt->mutex. +*/ +struct MemPage { + u8 isInit; /* True if previously initialized. MUST BE FIRST! */ + u8 bBusy; /* Prevent endless loops on corrupt database files */ + u8 intKey; /* True if table b-trees. False for index b-trees */ + u8 intKeyLeaf; /* True if the leaf of an intKey table */ + Pgno pgno; /* Page number for this page */ + /* Only the first 8 bytes (above) are zeroed by pager.c when a new page + ** is allocated. All fields that follow must be initialized before use */ + u8 leaf; /* True if a leaf page */ + u8 hdrOffset; /* 100 for page 1. 0 otherwise */ + u8 childPtrSize; /* 0 if leaf==1. 4 if leaf==0 */ + u8 max1bytePayload; /* min(maxLocal,127) */ + u8 nOverflow; /* Number of overflow cell bodies in aCell[] */ + u16 maxLocal; /* Copy of BtShared.maxLocal or BtShared.maxLeaf */ + u16 minLocal; /* Copy of BtShared.minLocal or BtShared.minLeaf */ + u16 cellOffset; /* Index in aData of first cell pointer */ + u16 nFree; /* Number of free bytes on the page */ + u16 nCell; /* Number of cells on this page, local and ovfl */ + u16 maskPage; /* Mask for page offset */ + u16 aiOvfl[4]; /* Insert the i-th overflow cell before the aiOvfl-th + ** non-overflow cell */ + u8 *apOvfl[4]; /* Pointers to the body of overflow cells */ + BtShared *pBt; /* Pointer to BtShared that this page is part of */ + u8 *aData; /* Pointer to disk image of the page data */ + u8 *aDataEnd; /* One byte past the end of usable data */ + u8 *aCellIdx; /* The cell index area */ + u8 *aDataOfst; /* Same as aData for leaves. aData+4 for interior */ + DbPage *pDbPage; /* Pager page handle */ + u16 (*xCellSize)(MemPage*,u8*); /* cellSizePtr method */ + void (*xParseCell)(MemPage*,u8*,CellInfo*); /* btreeParseCell method */ +}; + +/* +** A linked list of the following structures is stored at BtShared.pLock. +** Locks are added (or upgraded from READ_LOCK to WRITE_LOCK) when a cursor +** is opened on the table with root page BtShared.iTable. Locks are removed +** from this list when a transaction is committed or rolled back, or when +** a btree handle is closed. +*/ +struct BtLock { + Btree *pBtree; /* Btree handle holding this lock */ + Pgno iTable; /* Root page of table */ + u8 eLock; /* READ_LOCK or WRITE_LOCK */ + BtLock *pNext; /* Next in BtShared.pLock list */ +}; + +/* Candidate values for BtLock.eLock */ +#define READ_LOCK 1 +#define WRITE_LOCK 2 + +/* A Btree handle +** +** A database connection contains a pointer to an instance of +** this object for every database file that it has open. This structure +** is opaque to the database connection. The database connection cannot +** see the internals of this structure and only deals with pointers to +** this structure. +** +** For some database files, the same underlying database cache might be +** shared between multiple connections. In that case, each connection +** has it own instance of this object. But each instance of this object +** points to the same BtShared object. The database cache and the +** schema associated with the database file are all contained within +** the BtShared object. +** +** All fields in this structure are accessed under sqlite3.mutex. +** The pBt pointer itself may not be changed while there exists cursors +** in the referenced BtShared that point back to this Btree since those +** cursors have to go through this Btree to find their BtShared and +** they often do so without holding sqlite3.mutex. +*/ +struct Btree { + sqlite3 *db; /* The database connection holding this btree */ + BtShared *pBt; /* Sharable content of this btree */ + u8 inTrans; /* TRANS_NONE, TRANS_READ or TRANS_WRITE */ + u8 sharable; /* True if we can share pBt with another db */ + u8 locked; /* True if db currently has pBt locked */ + u8 hasIncrblobCur; /* True if there are one or more Incrblob cursors */ + int wantToLock; /* Number of nested calls to sqlite3BtreeEnter() */ + int nBackup; /* Number of backup operations reading this btree */ + u32 iDataVersion; /* Combines with pBt->pPager->iDataVersion */ + Btree *pNext; /* List of other sharable Btrees from the same db */ + Btree *pPrev; /* Back pointer of the same list */ +#ifndef SQLITE_OMIT_SHARED_CACHE + BtLock lock; /* Object used to lock page 1 */ +#endif +}; + +/* +** Btree.inTrans may take one of the following values. +** +** If the shared-data extension is enabled, there may be multiple users +** of the Btree structure. At most one of these may open a write transaction, +** but any number may have active read transactions. +*/ +#define TRANS_NONE 0 +#define TRANS_READ 1 +#define TRANS_WRITE 2 + +/* +** An instance of this object represents a single database file. +** +** A single database file can be in use at the same time by two +** or more database connections. When two or more connections are +** sharing the same database file, each connection has it own +** private Btree object for the file and each of those Btrees points +** to this one BtShared object. BtShared.nRef is the number of +** connections currently sharing this database file. +** +** Fields in this structure are accessed under the BtShared.mutex +** mutex, except for nRef and pNext which are accessed under the +** global SQLITE_MUTEX_STATIC_MASTER mutex. The pPager field +** may not be modified once it is initially set as long as nRef>0. +** The pSchema field may be set once under BtShared.mutex and +** thereafter is unchanged as long as nRef>0. +** +** isPending: +** +** If a BtShared client fails to obtain a write-lock on a database +** table (because there exists one or more read-locks on the table), +** the shared-cache enters 'pending-lock' state and isPending is +** set to true. +** +** The shared-cache leaves the 'pending lock' state when either of +** the following occur: +** +** 1) The current writer (BtShared.pWriter) concludes its transaction, OR +** 2) The number of locks held by other connections drops to zero. +** +** while in the 'pending-lock' state, no connection may start a new +** transaction. +** +** This feature is included to help prevent writer-starvation. +*/ +struct BtShared { + Pager *pPager; /* The page cache */ + sqlite3 *db; /* Database connection currently using this Btree */ + BtCursor *pCursor; /* A list of all open cursors */ + MemPage *pPage1; /* First page of the database */ + u8 openFlags; /* Flags to sqlite3BtreeOpen() */ +#ifndef SQLITE_OMIT_AUTOVACUUM + u8 autoVacuum; /* True if auto-vacuum is enabled */ + u8 incrVacuum; /* True if incr-vacuum is enabled */ + u8 bDoTruncate; /* True to truncate db on commit */ +#endif + u8 inTransaction; /* Transaction state */ + u8 max1bytePayload; /* Maximum first byte of cell for a 1-byte payload */ +#ifdef SQLITE_HAS_CODEC + u8 optimalReserve; /* Desired amount of reserved space per page */ +#endif + u16 btsFlags; /* Boolean parameters. See BTS_* macros below */ + u16 maxLocal; /* Maximum local payload in non-LEAFDATA tables */ + u16 minLocal; /* Minimum local payload in non-LEAFDATA tables */ + u16 maxLeaf; /* Maximum local payload in a LEAFDATA table */ + u16 minLeaf; /* Minimum local payload in a LEAFDATA table */ + u32 pageSize; /* Total number of bytes on a page */ + u32 usableSize; /* Number of usable bytes on each page */ + int nTransaction; /* Number of open transactions (read + write) */ + u32 nPage; /* Number of pages in the database */ + void *pSchema; /* Pointer to space allocated by sqlite3BtreeSchema() */ + void (*xFreeSchema)(void*); /* Destructor for BtShared.pSchema */ + sqlite3_mutex *mutex; /* Non-recursive mutex required to access this object */ + Bitvec *pHasContent; /* Set of pages moved to free-list this transaction */ +#ifndef SQLITE_OMIT_SHARED_CACHE + int nRef; /* Number of references to this structure */ + BtShared *pNext; /* Next on a list of sharable BtShared structs */ + BtLock *pLock; /* List of locks held on this shared-btree struct */ + Btree *pWriter; /* Btree with currently open write transaction */ +#endif + u8 *pTmpSpace; /* Temp space sufficient to hold a single cell */ +}; + +/* +** Allowed values for BtShared.btsFlags +*/ +#define BTS_READ_ONLY 0x0001 /* Underlying file is readonly */ +#define BTS_PAGESIZE_FIXED 0x0002 /* Page size can no longer be changed */ +#define BTS_SECURE_DELETE 0x0004 /* PRAGMA secure_delete is enabled */ +#define BTS_INITIALLY_EMPTY 0x0008 /* Database was empty at trans start */ +#define BTS_NO_WAL 0x0010 /* Do not open write-ahead-log files */ +#define BTS_EXCLUSIVE 0x0020 /* pWriter has an exclusive lock */ +#define BTS_PENDING 0x0040 /* Waiting for read-locks to clear */ + +/* +** An instance of the following structure is used to hold information +** about a cell. The parseCellPtr() function fills in this structure +** based on information extract from the raw disk page. +*/ +struct CellInfo { + i64 nKey; /* The key for INTKEY tables, or nPayload otherwise */ + u8 *pPayload; /* Pointer to the start of payload */ + u32 nPayload; /* Bytes of payload */ + u16 nLocal; /* Amount of payload held locally, not on overflow */ + u16 nSize; /* Size of the cell content on the main b-tree page */ +}; + +/* +** Maximum depth of an SQLite B-Tree structure. Any B-Tree deeper than +** this will be declared corrupt. This value is calculated based on a +** maximum database size of 2^31 pages a minimum fanout of 2 for a +** root-node and 3 for all other internal nodes. +** +** If a tree that appears to be taller than this is encountered, it is +** assumed that the database is corrupt. +*/ +#define BTCURSOR_MAX_DEPTH 20 + +/* +** A cursor is a pointer to a particular entry within a particular +** b-tree within a database file. +** +** The entry is identified by its MemPage and the index in +** MemPage.aCell[] of the entry. +** +** A single database file can be shared by two more database connections, +** but cursors cannot be shared. Each cursor is associated with a +** particular database connection identified BtCursor.pBtree.db. +** +** Fields in this structure are accessed under the BtShared.mutex +** found at self->pBt->mutex. +** +** skipNext meaning: +** eState==SKIPNEXT && skipNext>0: Next sqlite3BtreeNext() is no-op. +** eState==SKIPNEXT && skipNext<0: Next sqlite3BtreePrevious() is no-op. +** eState==FAULT: Cursor fault with skipNext as error code. +*/ +struct BtCursor { + Btree *pBtree; /* The Btree to which this cursor belongs */ + BtShared *pBt; /* The BtShared this cursor points to */ + BtCursor *pNext; /* Forms a linked list of all cursors */ + Pgno *aOverflow; /* Cache of overflow page locations */ + CellInfo info; /* A parse of the cell we are pointing at */ + i64 nKey; /* Size of pKey, or last integer key */ + void *pKey; /* Saved key that was cursor last known position */ + Pgno pgnoRoot; /* The root page of this tree */ + int nOvflAlloc; /* Allocated size of aOverflow[] array */ + int skipNext; /* Prev() is noop if negative. Next() is noop if positive. + ** Error code if eState==CURSOR_FAULT */ + u8 curFlags; /* zero or more BTCF_* flags defined below */ + u8 curPagerFlags; /* Flags to send to sqlite3PagerGet() */ + u8 eState; /* One of the CURSOR_XXX constants (see below) */ + u8 hints; /* As configured by CursorSetHints() */ + /* All fields above are zeroed when the cursor is allocated. See + ** sqlite3BtreeCursorZero(). Fields that follow must be manually + ** initialized. */ + i8 iPage; /* Index of current page in apPage */ + u8 curIntKey; /* Value of apPage[0]->intKey */ + struct KeyInfo *pKeyInfo; /* Argument passed to comparison function */ + void *padding1; /* Make object size a multiple of 16 */ + u16 aiIdx[BTCURSOR_MAX_DEPTH]; /* Current index in apPage[i] */ + MemPage *apPage[BTCURSOR_MAX_DEPTH]; /* Pages from root to current page */ +}; + +/* +** Legal values for BtCursor.curFlags +*/ +#define BTCF_WriteFlag 0x01 /* True if a write cursor */ +#define BTCF_ValidNKey 0x02 /* True if info.nKey is valid */ +#define BTCF_ValidOvfl 0x04 /* True if aOverflow is valid */ +#define BTCF_AtLast 0x08 /* Cursor is pointing ot the last entry */ +#define BTCF_Incrblob 0x10 /* True if an incremental I/O handle */ +#define BTCF_Multiple 0x20 /* Maybe another cursor on the same btree */ + +/* +** Potential values for BtCursor.eState. +** +** CURSOR_INVALID: +** Cursor does not point to a valid entry. This can happen (for example) +** because the table is empty or because BtreeCursorFirst() has not been +** called. +** +** CURSOR_VALID: +** Cursor points to a valid entry. getPayload() etc. may be called. +** +** CURSOR_SKIPNEXT: +** Cursor is valid except that the Cursor.skipNext field is non-zero +** indicating that the next sqlite3BtreeNext() or sqlite3BtreePrevious() +** operation should be a no-op. +** +** CURSOR_REQUIRESEEK: +** The table that this cursor was opened on still exists, but has been +** modified since the cursor was last used. The cursor position is saved +** in variables BtCursor.pKey and BtCursor.nKey. When a cursor is in +** this state, restoreCursorPosition() can be called to attempt to +** seek the cursor to the saved position. +** +** CURSOR_FAULT: +** An unrecoverable error (an I/O error or a malloc failure) has occurred +** on a different connection that shares the BtShared cache with this +** cursor. The error has left the cache in an inconsistent state. +** Do nothing else with this cursor. Any attempt to use the cursor +** should return the error code stored in BtCursor.skipNext +*/ +#define CURSOR_INVALID 0 +#define CURSOR_VALID 1 +#define CURSOR_SKIPNEXT 2 +#define CURSOR_REQUIRESEEK 3 +#define CURSOR_FAULT 4 + +/* +** The database page the PENDING_BYTE occupies. This page is never used. +*/ +# define PENDING_BYTE_PAGE(pBt) PAGER_MJ_PGNO(pBt) + +/* +** These macros define the location of the pointer-map entry for a +** database page. The first argument to each is the number of usable +** bytes on each page of the database (often 1024). The second is the +** page number to look up in the pointer map. +** +** PTRMAP_PAGENO returns the database page number of the pointer-map +** page that stores the required pointer. PTRMAP_PTROFFSET returns +** the offset of the requested map entry. +** +** If the pgno argument passed to PTRMAP_PAGENO is a pointer-map page, +** then pgno is returned. So (pgno==PTRMAP_PAGENO(pgsz, pgno)) can be +** used to test if pgno is a pointer-map page. PTRMAP_ISPAGE implements +** this test. +*/ +#define PTRMAP_PAGENO(pBt, pgno) ptrmapPageno(pBt, pgno) +#define PTRMAP_PTROFFSET(pgptrmap, pgno) (5*(pgno-pgptrmap-1)) +#define PTRMAP_ISPAGE(pBt, pgno) (PTRMAP_PAGENO((pBt),(pgno))==(pgno)) + +/* +** The pointer map is a lookup table that identifies the parent page for +** each child page in the database file. The parent page is the page that +** contains a pointer to the child. Every page in the database contains +** 0 or 1 parent pages. (In this context 'database page' refers +** to any page that is not part of the pointer map itself.) Each pointer map +** entry consists of a single byte 'type' and a 4 byte parent page number. +** The PTRMAP_XXX identifiers below are the valid types. +** +** The purpose of the pointer map is to facility moving pages from one +** position in the file to another as part of autovacuum. When a page +** is moved, the pointer in its parent must be updated to point to the +** new location. The pointer map is used to locate the parent page quickly. +** +** PTRMAP_ROOTPAGE: The database page is a root-page. The page-number is not +** used in this case. +** +** PTRMAP_FREEPAGE: The database page is an unused (free) page. The page-number +** is not used in this case. +** +** PTRMAP_OVERFLOW1: The database page is the first page in a list of +** overflow pages. The page number identifies the page that +** contains the cell with a pointer to this overflow page. +** +** PTRMAP_OVERFLOW2: The database page is the second or later page in a list of +** overflow pages. The page-number identifies the previous +** page in the overflow page list. +** +** PTRMAP_BTREE: The database page is a non-root btree page. The page number +** identifies the parent page in the btree. +*/ +#define PTRMAP_ROOTPAGE 1 +#define PTRMAP_FREEPAGE 2 +#define PTRMAP_OVERFLOW1 3 +#define PTRMAP_OVERFLOW2 4 +#define PTRMAP_BTREE 5 + +/* A bunch of assert() statements to check the transaction state variables +** of handle p (type Btree*) are internally consistent. +*/ +#define btreeIntegrity(p) \ + assert( p->pBt->inTransaction!=TRANS_NONE || p->pBt->nTransaction==0 ); \ + assert( p->pBt->inTransaction>=p->inTrans ); + + +/* +** The ISAUTOVACUUM macro is used within balance_nonroot() to determine +** if the database supports auto-vacuum or not. Because it is used +** within an expression that is an argument to another macro +** (sqliteMallocRaw), it is not possible to use conditional compilation. +** So, this macro is defined instead. +*/ +#ifndef SQLITE_OMIT_AUTOVACUUM +#define ISAUTOVACUUM (pBt->autoVacuum) +#else +#define ISAUTOVACUUM 0 +#endif + + +/* +** This structure is passed around through all the sanity checking routines +** in order to keep track of some global state information. +** +** The aRef[] array is allocated so that there is 1 bit for each page in +** the database. As the integrity-check proceeds, for each page used in +** the database the corresponding bit is set. This allows integrity-check to +** detect pages that are used twice and orphaned pages (both of which +** indicate corruption). +*/ +typedef struct IntegrityCk IntegrityCk; +struct IntegrityCk { + BtShared *pBt; /* The tree being checked out */ + Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */ + u8 *aPgRef; /* 1 bit per page in the db (see above) */ + Pgno nPage; /* Number of pages in the database */ + int mxErr; /* Stop accumulating errors when this reaches zero */ + int nErr; /* Number of messages written to zErrMsg so far */ + int mallocFailed; /* A memory allocation error has occurred */ + const char *zPfx; /* Error message prefix */ + int v1, v2; /* Values for up to two %d fields in zPfx */ + StrAccum errMsg; /* Accumulate the error message text here */ + u32 *heap; /* Min-heap used for analyzing cell coverage */ +}; + +/* +** Routines to read or write a two- and four-byte big-endian integer values. +*/ +#define get2byte(x) ((x)[0]<<8 | (x)[1]) +#define put2byte(p,v) ((p)[0] = (u8)((v)>>8), (p)[1] = (u8)(v)) +#define get4byte sqlite3Get4byte +#define put4byte sqlite3Put4byte + +/* +** get2byteAligned(), unlike get2byte(), requires that its argument point to a +** two-byte aligned address. get2bytea() is only used for accessing the +** cell addresses in a btree header. +*/ +#if SQLITE_BYTEORDER==4321 +# define get2byteAligned(x) (*(u16*)(x)) +#elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4008000 +# define get2byteAligned(x) __builtin_bswap16(*(u16*)(x)) +#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 +# define get2byteAligned(x) _byteswap_ushort(*(u16*)(x)) +#else +# define get2byteAligned(x) ((x)[0]<<8 | (x)[1]) +#endif + +/************** End of btreeInt.h ********************************************/ +/************** Continuing where we left off in btmutex.c ********************/ +#ifndef SQLITE_OMIT_SHARED_CACHE +#if SQLITE_THREADSAFE + +/* +** Obtain the BtShared mutex associated with B-Tree handle p. Also, +** set BtShared.db to the database handle associated with p and the +** p->locked boolean to true. +*/ +static void lockBtreeMutex(Btree *p){ + assert( p->locked==0 ); + assert( sqlite3_mutex_notheld(p->pBt->mutex) ); + assert( sqlite3_mutex_held(p->db->mutex) ); + + sqlite3_mutex_enter(p->pBt->mutex); + p->pBt->db = p->db; + p->locked = 1; +} + +/* +** Release the BtShared mutex associated with B-Tree handle p and +** clear the p->locked boolean. +*/ +static void SQLITE_NOINLINE unlockBtreeMutex(Btree *p){ + BtShared *pBt = p->pBt; + assert( p->locked==1 ); + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( sqlite3_mutex_held(p->db->mutex) ); + assert( p->db==pBt->db ); + + sqlite3_mutex_leave(pBt->mutex); + p->locked = 0; +} + +/* Forward reference */ +static void SQLITE_NOINLINE btreeLockCarefully(Btree *p); + +/* +** Enter a mutex on the given BTree object. +** +** If the object is not sharable, then no mutex is ever required +** and this routine is a no-op. The underlying mutex is non-recursive. +** But we keep a reference count in Btree.wantToLock so the behavior +** of this interface is recursive. +** +** To avoid deadlocks, multiple Btrees are locked in the same order +** by all database connections. The p->pNext is a list of other +** Btrees belonging to the same database connection as the p Btree +** which need to be locked after p. If we cannot get a lock on +** p, then first unlock all of the others on p->pNext, then wait +** for the lock to become available on p, then relock all of the +** subsequent Btrees that desire a lock. +*/ +SQLITE_PRIVATE void sqlite3BtreeEnter(Btree *p){ + /* Some basic sanity checking on the Btree. The list of Btrees + ** connected by pNext and pPrev should be in sorted order by + ** Btree.pBt value. All elements of the list should belong to + ** the same connection. Only shared Btrees are on the list. */ + assert( p->pNext==0 || p->pNext->pBt>p->pBt ); + assert( p->pPrev==0 || p->pPrev->pBtpBt ); + assert( p->pNext==0 || p->pNext->db==p->db ); + assert( p->pPrev==0 || p->pPrev->db==p->db ); + assert( p->sharable || (p->pNext==0 && p->pPrev==0) ); + + /* Check for locking consistency */ + assert( !p->locked || p->wantToLock>0 ); + assert( p->sharable || p->wantToLock==0 ); + + /* We should already hold a lock on the database connection */ + assert( sqlite3_mutex_held(p->db->mutex) ); + + /* Unless the database is sharable and unlocked, then BtShared.db + ** should already be set correctly. */ + assert( (p->locked==0 && p->sharable) || p->pBt->db==p->db ); + + if( !p->sharable ) return; + p->wantToLock++; + if( p->locked ) return; + btreeLockCarefully(p); +} + +/* This is a helper function for sqlite3BtreeLock(). By moving +** complex, but seldom used logic, out of sqlite3BtreeLock() and +** into this routine, we avoid unnecessary stack pointer changes +** and thus help the sqlite3BtreeLock() routine to run much faster +** in the common case. +*/ +static void SQLITE_NOINLINE btreeLockCarefully(Btree *p){ + Btree *pLater; + + /* In most cases, we should be able to acquire the lock we + ** want without having to go through the ascending lock + ** procedure that follows. Just be sure not to block. + */ + if( sqlite3_mutex_try(p->pBt->mutex)==SQLITE_OK ){ + p->pBt->db = p->db; + p->locked = 1; + return; + } + + /* To avoid deadlock, first release all locks with a larger + ** BtShared address. Then acquire our lock. Then reacquire + ** the other BtShared locks that we used to hold in ascending + ** order. + */ + for(pLater=p->pNext; pLater; pLater=pLater->pNext){ + assert( pLater->sharable ); + assert( pLater->pNext==0 || pLater->pNext->pBt>pLater->pBt ); + assert( !pLater->locked || pLater->wantToLock>0 ); + if( pLater->locked ){ + unlockBtreeMutex(pLater); + } + } + lockBtreeMutex(p); + for(pLater=p->pNext; pLater; pLater=pLater->pNext){ + if( pLater->wantToLock ){ + lockBtreeMutex(pLater); + } + } +} + + +/* +** Exit the recursive mutex on a Btree. +*/ +SQLITE_PRIVATE void sqlite3BtreeLeave(Btree *p){ + assert( sqlite3_mutex_held(p->db->mutex) ); + if( p->sharable ){ + assert( p->wantToLock>0 ); + p->wantToLock--; + if( p->wantToLock==0 ){ + unlockBtreeMutex(p); + } + } +} + +#ifndef NDEBUG +/* +** Return true if the BtShared mutex is held on the btree, or if the +** B-Tree is not marked as sharable. +** +** This routine is used only from within assert() statements. +*/ +SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree *p){ + assert( p->sharable==0 || p->locked==0 || p->wantToLock>0 ); + assert( p->sharable==0 || p->locked==0 || p->db==p->pBt->db ); + assert( p->sharable==0 || p->locked==0 || sqlite3_mutex_held(p->pBt->mutex) ); + assert( p->sharable==0 || p->locked==0 || sqlite3_mutex_held(p->db->mutex) ); + + return (p->sharable==0 || p->locked); +} +#endif + + +/* +** Enter the mutex on every Btree associated with a database +** connection. This is needed (for example) prior to parsing +** a statement since we will be comparing table and column names +** against all schemas and we do not want those schemas being +** reset out from under us. +** +** There is a corresponding leave-all procedures. +** +** Enter the mutexes in accending order by BtShared pointer address +** to avoid the possibility of deadlock when two threads with +** two or more btrees in common both try to lock all their btrees +** at the same instant. +*/ +static void SQLITE_NOINLINE btreeEnterAll(sqlite3 *db){ + int i; + int skipOk = 1; + Btree *p; + assert( sqlite3_mutex_held(db->mutex) ); + for(i=0; inDb; i++){ + p = db->aDb[i].pBt; + if( p && p->sharable ){ + sqlite3BtreeEnter(p); + skipOk = 0; + } + } + db->skipBtreeMutex = skipOk; +} +SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){ + if( db->skipBtreeMutex==0 ) btreeEnterAll(db); +} +static void SQLITE_NOINLINE btreeLeaveAll(sqlite3 *db){ + int i; + Btree *p; + assert( sqlite3_mutex_held(db->mutex) ); + for(i=0; inDb; i++){ + p = db->aDb[i].pBt; + if( p ) sqlite3BtreeLeave(p); + } +} +SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3 *db){ + if( db->skipBtreeMutex==0 ) btreeLeaveAll(db); +} + +#ifndef NDEBUG +/* +** Return true if the current thread holds the database connection +** mutex and all required BtShared mutexes. +** +** This routine is used inside assert() statements only. +*/ +SQLITE_PRIVATE int sqlite3BtreeHoldsAllMutexes(sqlite3 *db){ + int i; + if( !sqlite3_mutex_held(db->mutex) ){ + return 0; + } + for(i=0; inDb; i++){ + Btree *p; + p = db->aDb[i].pBt; + if( p && p->sharable && + (p->wantToLock==0 || !sqlite3_mutex_held(p->pBt->mutex)) ){ + return 0; + } + } + return 1; +} +#endif /* NDEBUG */ + +#ifndef NDEBUG +/* +** Return true if the correct mutexes are held for accessing the +** db->aDb[iDb].pSchema structure. The mutexes required for schema +** access are: +** +** (1) The mutex on db +** (2) if iDb!=1, then the mutex on db->aDb[iDb].pBt. +** +** If pSchema is not NULL, then iDb is computed from pSchema and +** db using sqlite3SchemaToIndex(). +*/ +SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3 *db, int iDb, Schema *pSchema){ + Btree *p; + assert( db!=0 ); + if( pSchema ) iDb = sqlite3SchemaToIndex(db, pSchema); + assert( iDb>=0 && iDbnDb ); + if( !sqlite3_mutex_held(db->mutex) ) return 0; + if( iDb==1 ) return 1; + p = db->aDb[iDb].pBt; + assert( p!=0 ); + return p->sharable==0 || p->locked==1; +} +#endif /* NDEBUG */ + +#else /* SQLITE_THREADSAFE>0 above. SQLITE_THREADSAFE==0 below */ +/* +** The following are special cases for mutex enter routines for use +** in single threaded applications that use shared cache. Except for +** these two routines, all mutex operations are no-ops in that case and +** are null #defines in btree.h. +** +** If shared cache is disabled, then all btree mutex routines, including +** the ones below, are no-ops and are null #defines in btree.h. +*/ + +SQLITE_PRIVATE void sqlite3BtreeEnter(Btree *p){ + p->pBt->db = p->db; +} +SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){ + int i; + for(i=0; inDb; i++){ + Btree *p = db->aDb[i].pBt; + if( p ){ + p->pBt->db = p->db; + } + } +} +#endif /* if SQLITE_THREADSAFE */ + +#ifndef SQLITE_OMIT_INCRBLOB +/* +** Enter a mutex on a Btree given a cursor owned by that Btree. +** +** These entry points are used by incremental I/O only. Enter() is required +** any time OMIT_SHARED_CACHE is not defined, regardless of whether or not +** the build is threadsafe. Leave() is only required by threadsafe builds. +*/ +SQLITE_PRIVATE void sqlite3BtreeEnterCursor(BtCursor *pCur){ + sqlite3BtreeEnter(pCur->pBtree); +} +# if SQLITE_THREADSAFE +SQLITE_PRIVATE void sqlite3BtreeLeaveCursor(BtCursor *pCur){ + sqlite3BtreeLeave(pCur->pBtree); +} +# endif +#endif /* ifndef SQLITE_OMIT_INCRBLOB */ + +#endif /* ifndef SQLITE_OMIT_SHARED_CACHE */ + +/************** End of btmutex.c *********************************************/ +/************** Begin file btree.c *******************************************/ +/* +** 2004 April 6 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements an external (disk-based) database using BTrees. +** See the header comment on "btreeInt.h" for additional information. +** Including a description of file format and an overview of operation. +*/ +/* #include "btreeInt.h" */ + +/* +** The header string that appears at the beginning of every +** SQLite database. +*/ +static const char zMagicHeader[] = SQLITE_FILE_HEADER; + +/* +** Set this global variable to 1 to enable tracing using the TRACE +** macro. +*/ +#if 0 +int sqlite3BtreeTrace=1; /* True to enable tracing */ +# define TRACE(X) if(sqlite3BtreeTrace){printf X;fflush(stdout);} +#else +# define TRACE(X) +#endif + +/* +** Extract a 2-byte big-endian integer from an array of unsigned bytes. +** But if the value is zero, make it 65536. +** +** This routine is used to extract the "offset to cell content area" value +** from the header of a btree page. If the page size is 65536 and the page +** is empty, the offset should be 65536, but the 2-byte value stores zero. +** This routine makes the necessary adjustment to 65536. +*/ +#define get2byteNotZero(X) (((((int)get2byte(X))-1)&0xffff)+1) + +/* +** Values passed as the 5th argument to allocateBtreePage() +*/ +#define BTALLOC_ANY 0 /* Allocate any page */ +#define BTALLOC_EXACT 1 /* Allocate exact page if possible */ +#define BTALLOC_LE 2 /* Allocate any page <= the parameter */ + +/* +** Macro IfNotOmitAV(x) returns (x) if SQLITE_OMIT_AUTOVACUUM is not +** defined, or 0 if it is. For example: +** +** bIncrVacuum = IfNotOmitAV(pBtShared->incrVacuum); +*/ +#ifndef SQLITE_OMIT_AUTOVACUUM +#define IfNotOmitAV(expr) (expr) +#else +#define IfNotOmitAV(expr) 0 +#endif + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** A list of BtShared objects that are eligible for participation +** in shared cache. This variable has file scope during normal builds, +** but the test harness needs to access it so we make it global for +** test builds. +** +** Access to this variable is protected by SQLITE_MUTEX_STATIC_MASTER. +*/ +#ifdef SQLITE_TEST +SQLITE_PRIVATE BtShared *SQLITE_WSD sqlite3SharedCacheList = 0; +#else +static BtShared *SQLITE_WSD sqlite3SharedCacheList = 0; +#endif +#endif /* SQLITE_OMIT_SHARED_CACHE */ + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** Enable or disable the shared pager and schema features. +** +** This routine has no effect on existing database connections. +** The shared cache setting effects only future calls to +** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2(). +*/ +SQLITE_API int sqlite3_enable_shared_cache(int enable){ + sqlite3GlobalConfig.sharedCacheEnabled = enable; + return SQLITE_OK; +} +#endif + + + +#ifdef SQLITE_OMIT_SHARED_CACHE + /* + ** The functions querySharedCacheTableLock(), setSharedCacheTableLock(), + ** and clearAllSharedCacheTableLocks() + ** manipulate entries in the BtShared.pLock linked list used to store + ** shared-cache table level locks. If the library is compiled with the + ** shared-cache feature disabled, then there is only ever one user + ** of each BtShared structure and so this locking is not necessary. + ** So define the lock related functions as no-ops. + */ + #define querySharedCacheTableLock(a,b,c) SQLITE_OK + #define setSharedCacheTableLock(a,b,c) SQLITE_OK + #define clearAllSharedCacheTableLocks(a) + #define downgradeAllSharedCacheTableLocks(a) + #define hasSharedCacheTableLock(a,b,c,d) 1 + #define hasReadConflicts(a, b) 0 +#endif + +#ifndef SQLITE_OMIT_SHARED_CACHE + +#ifdef SQLITE_DEBUG +/* +**** This function is only used as part of an assert() statement. *** +** +** Check to see if pBtree holds the required locks to read or write to the +** table with root page iRoot. Return 1 if it does and 0 if not. +** +** For example, when writing to a table with root-page iRoot via +** Btree connection pBtree: +** +** assert( hasSharedCacheTableLock(pBtree, iRoot, 0, WRITE_LOCK) ); +** +** When writing to an index that resides in a sharable database, the +** caller should have first obtained a lock specifying the root page of +** the corresponding table. This makes things a bit more complicated, +** as this module treats each table as a separate structure. To determine +** the table corresponding to the index being written, this +** function has to search through the database schema. +** +** Instead of a lock on the table/index rooted at page iRoot, the caller may +** hold a write-lock on the schema table (root page 1). This is also +** acceptable. +*/ +static int hasSharedCacheTableLock( + Btree *pBtree, /* Handle that must hold lock */ + Pgno iRoot, /* Root page of b-tree */ + int isIndex, /* True if iRoot is the root of an index b-tree */ + int eLockType /* Required lock type (READ_LOCK or WRITE_LOCK) */ +){ + Schema *pSchema = (Schema *)pBtree->pBt->pSchema; + Pgno iTab = 0; + BtLock *pLock; + + /* If this database is not shareable, or if the client is reading + ** and has the read-uncommitted flag set, then no lock is required. + ** Return true immediately. + */ + if( (pBtree->sharable==0) + || (eLockType==READ_LOCK && (pBtree->db->flags & SQLITE_ReadUncommitted)) + ){ + return 1; + } + + /* If the client is reading or writing an index and the schema is + ** not loaded, then it is too difficult to actually check to see if + ** the correct locks are held. So do not bother - just return true. + ** This case does not come up very often anyhow. + */ + if( isIndex && (!pSchema || (pSchema->schemaFlags&DB_SchemaLoaded)==0) ){ + return 1; + } + + /* Figure out the root-page that the lock should be held on. For table + ** b-trees, this is just the root page of the b-tree being read or + ** written. For index b-trees, it is the root page of the associated + ** table. */ + if( isIndex ){ + HashElem *p; + for(p=sqliteHashFirst(&pSchema->idxHash); p; p=sqliteHashNext(p)){ + Index *pIdx = (Index *)sqliteHashData(p); + if( pIdx->tnum==(int)iRoot ){ + if( iTab ){ + /* Two or more indexes share the same root page. There must + ** be imposter tables. So just return true. The assert is not + ** useful in that case. */ + return 1; + } + iTab = pIdx->pTable->tnum; + } + } + }else{ + iTab = iRoot; + } + + /* Search for the required lock. Either a write-lock on root-page iTab, a + ** write-lock on the schema table, or (if the client is reading) a + ** read-lock on iTab will suffice. Return 1 if any of these are found. */ + for(pLock=pBtree->pBt->pLock; pLock; pLock=pLock->pNext){ + if( pLock->pBtree==pBtree + && (pLock->iTable==iTab || (pLock->eLock==WRITE_LOCK && pLock->iTable==1)) + && pLock->eLock>=eLockType + ){ + return 1; + } + } + + /* Failed to find the required lock. */ + return 0; +} +#endif /* SQLITE_DEBUG */ + +#ifdef SQLITE_DEBUG +/* +**** This function may be used as part of assert() statements only. **** +** +** Return true if it would be illegal for pBtree to write into the +** table or index rooted at iRoot because other shared connections are +** simultaneously reading that same table or index. +** +** It is illegal for pBtree to write if some other Btree object that +** shares the same BtShared object is currently reading or writing +** the iRoot table. Except, if the other Btree object has the +** read-uncommitted flag set, then it is OK for the other object to +** have a read cursor. +** +** For example, before writing to any part of the table or index +** rooted at page iRoot, one should call: +** +** assert( !hasReadConflicts(pBtree, iRoot) ); +*/ +static int hasReadConflicts(Btree *pBtree, Pgno iRoot){ + BtCursor *p; + for(p=pBtree->pBt->pCursor; p; p=p->pNext){ + if( p->pgnoRoot==iRoot + && p->pBtree!=pBtree + && 0==(p->pBtree->db->flags & SQLITE_ReadUncommitted) + ){ + return 1; + } + } + return 0; +} +#endif /* #ifdef SQLITE_DEBUG */ + +/* +** Query to see if Btree handle p may obtain a lock of type eLock +** (READ_LOCK or WRITE_LOCK) on the table with root-page iTab. Return +** SQLITE_OK if the lock may be obtained (by calling +** setSharedCacheTableLock()), or SQLITE_LOCKED if not. +*/ +static int querySharedCacheTableLock(Btree *p, Pgno iTab, u8 eLock){ + BtShared *pBt = p->pBt; + BtLock *pIter; + + assert( sqlite3BtreeHoldsMutex(p) ); + assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); + assert( p->db!=0 ); + assert( !(p->db->flags&SQLITE_ReadUncommitted)||eLock==WRITE_LOCK||iTab==1 ); + + /* If requesting a write-lock, then the Btree must have an open write + ** transaction on this file. And, obviously, for this to be so there + ** must be an open write transaction on the file itself. + */ + assert( eLock==READ_LOCK || (p==pBt->pWriter && p->inTrans==TRANS_WRITE) ); + assert( eLock==READ_LOCK || pBt->inTransaction==TRANS_WRITE ); + + /* This routine is a no-op if the shared-cache is not enabled */ + if( !p->sharable ){ + return SQLITE_OK; + } + + /* If some other connection is holding an exclusive lock, the + ** requested lock may not be obtained. + */ + if( pBt->pWriter!=p && (pBt->btsFlags & BTS_EXCLUSIVE)!=0 ){ + sqlite3ConnectionBlocked(p->db, pBt->pWriter->db); + return SQLITE_LOCKED_SHAREDCACHE; + } + + for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ + /* The condition (pIter->eLock!=eLock) in the following if(...) + ** statement is a simplification of: + ** + ** (eLock==WRITE_LOCK || pIter->eLock==WRITE_LOCK) + ** + ** since we know that if eLock==WRITE_LOCK, then no other connection + ** may hold a WRITE_LOCK on any table in this file (since there can + ** only be a single writer). + */ + assert( pIter->eLock==READ_LOCK || pIter->eLock==WRITE_LOCK ); + assert( eLock==READ_LOCK || pIter->pBtree==p || pIter->eLock==READ_LOCK); + if( pIter->pBtree!=p && pIter->iTable==iTab && pIter->eLock!=eLock ){ + sqlite3ConnectionBlocked(p->db, pIter->pBtree->db); + if( eLock==WRITE_LOCK ){ + assert( p==pBt->pWriter ); + pBt->btsFlags |= BTS_PENDING; + } + return SQLITE_LOCKED_SHAREDCACHE; + } + } + return SQLITE_OK; +} +#endif /* !SQLITE_OMIT_SHARED_CACHE */ + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** Add a lock on the table with root-page iTable to the shared-btree used +** by Btree handle p. Parameter eLock must be either READ_LOCK or +** WRITE_LOCK. +** +** This function assumes the following: +** +** (a) The specified Btree object p is connected to a sharable +** database (one with the BtShared.sharable flag set), and +** +** (b) No other Btree objects hold a lock that conflicts +** with the requested lock (i.e. querySharedCacheTableLock() has +** already been called and returned SQLITE_OK). +** +** SQLITE_OK is returned if the lock is added successfully. SQLITE_NOMEM +** is returned if a malloc attempt fails. +*/ +static int setSharedCacheTableLock(Btree *p, Pgno iTable, u8 eLock){ + BtShared *pBt = p->pBt; + BtLock *pLock = 0; + BtLock *pIter; + + assert( sqlite3BtreeHoldsMutex(p) ); + assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); + assert( p->db!=0 ); + + /* A connection with the read-uncommitted flag set will never try to + ** obtain a read-lock using this function. The only read-lock obtained + ** by a connection in read-uncommitted mode is on the sqlite_master + ** table, and that lock is obtained in BtreeBeginTrans(). */ + assert( 0==(p->db->flags&SQLITE_ReadUncommitted) || eLock==WRITE_LOCK ); + + /* This function should only be called on a sharable b-tree after it + ** has been determined that no other b-tree holds a conflicting lock. */ + assert( p->sharable ); + assert( SQLITE_OK==querySharedCacheTableLock(p, iTable, eLock) ); + + /* First search the list for an existing lock on this table. */ + for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ + if( pIter->iTable==iTable && pIter->pBtree==p ){ + pLock = pIter; + break; + } + } + + /* If the above search did not find a BtLock struct associating Btree p + ** with table iTable, allocate one and link it into the list. + */ + if( !pLock ){ + pLock = (BtLock *)sqlite3MallocZero(sizeof(BtLock)); + if( !pLock ){ + return SQLITE_NOMEM_BKPT; + } + pLock->iTable = iTable; + pLock->pBtree = p; + pLock->pNext = pBt->pLock; + pBt->pLock = pLock; + } + + /* Set the BtLock.eLock variable to the maximum of the current lock + ** and the requested lock. This means if a write-lock was already held + ** and a read-lock requested, we don't incorrectly downgrade the lock. + */ + assert( WRITE_LOCK>READ_LOCK ); + if( eLock>pLock->eLock ){ + pLock->eLock = eLock; + } + + return SQLITE_OK; +} +#endif /* !SQLITE_OMIT_SHARED_CACHE */ + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** Release all the table locks (locks obtained via calls to +** the setSharedCacheTableLock() procedure) held by Btree object p. +** +** This function assumes that Btree p has an open read or write +** transaction. If it does not, then the BTS_PENDING flag +** may be incorrectly cleared. +*/ +static void clearAllSharedCacheTableLocks(Btree *p){ + BtShared *pBt = p->pBt; + BtLock **ppIter = &pBt->pLock; + + assert( sqlite3BtreeHoldsMutex(p) ); + assert( p->sharable || 0==*ppIter ); + assert( p->inTrans>0 ); + + while( *ppIter ){ + BtLock *pLock = *ppIter; + assert( (pBt->btsFlags & BTS_EXCLUSIVE)==0 || pBt->pWriter==pLock->pBtree ); + assert( pLock->pBtree->inTrans>=pLock->eLock ); + if( pLock->pBtree==p ){ + *ppIter = pLock->pNext; + assert( pLock->iTable!=1 || pLock==&p->lock ); + if( pLock->iTable!=1 ){ + sqlite3_free(pLock); + } + }else{ + ppIter = &pLock->pNext; + } + } + + assert( (pBt->btsFlags & BTS_PENDING)==0 || pBt->pWriter ); + if( pBt->pWriter==p ){ + pBt->pWriter = 0; + pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING); + }else if( pBt->nTransaction==2 ){ + /* This function is called when Btree p is concluding its + ** transaction. If there currently exists a writer, and p is not + ** that writer, then the number of locks held by connections other + ** than the writer must be about to drop to zero. In this case + ** set the BTS_PENDING flag to 0. + ** + ** If there is not currently a writer, then BTS_PENDING must + ** be zero already. So this next line is harmless in that case. + */ + pBt->btsFlags &= ~BTS_PENDING; + } +} + +/* +** This function changes all write-locks held by Btree p into read-locks. +*/ +static void downgradeAllSharedCacheTableLocks(Btree *p){ + BtShared *pBt = p->pBt; + if( pBt->pWriter==p ){ + BtLock *pLock; + pBt->pWriter = 0; + pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING); + for(pLock=pBt->pLock; pLock; pLock=pLock->pNext){ + assert( pLock->eLock==READ_LOCK || pLock->pBtree==p ); + pLock->eLock = READ_LOCK; + } + } +} + +#endif /* SQLITE_OMIT_SHARED_CACHE */ + +static void releasePage(MemPage *pPage); /* Forward reference */ + +/* +***** This routine is used inside of assert() only **** +** +** Verify that the cursor holds the mutex on its BtShared +*/ +#ifdef SQLITE_DEBUG +static int cursorHoldsMutex(BtCursor *p){ + return sqlite3_mutex_held(p->pBt->mutex); +} + +/* Verify that the cursor and the BtShared agree about what is the current +** database connetion. This is important in shared-cache mode. If the database +** connection pointers get out-of-sync, it is possible for routines like +** btreeInitPage() to reference an stale connection pointer that references a +** a connection that has already closed. This routine is used inside assert() +** statements only and for the purpose of double-checking that the btree code +** does keep the database connection pointers up-to-date. +*/ +static int cursorOwnsBtShared(BtCursor *p){ + assert( cursorHoldsMutex(p) ); + return (p->pBtree->db==p->pBt->db); +} +#endif + +/* +** Invalidate the overflow cache of the cursor passed as the first argument. +** on the shared btree structure pBt. +*/ +#define invalidateOverflowCache(pCur) (pCur->curFlags &= ~BTCF_ValidOvfl) + +/* +** Invalidate the overflow page-list cache for all cursors opened +** on the shared btree structure pBt. +*/ +static void invalidateAllOverflowCache(BtShared *pBt){ + BtCursor *p; + assert( sqlite3_mutex_held(pBt->mutex) ); + for(p=pBt->pCursor; p; p=p->pNext){ + invalidateOverflowCache(p); + } +} + +#ifndef SQLITE_OMIT_INCRBLOB +/* +** This function is called before modifying the contents of a table +** to invalidate any incrblob cursors that are open on the +** row or one of the rows being modified. +** +** If argument isClearTable is true, then the entire contents of the +** table is about to be deleted. In this case invalidate all incrblob +** cursors open on any row within the table with root-page pgnoRoot. +** +** Otherwise, if argument isClearTable is false, then the row with +** rowid iRow is being replaced or deleted. In this case invalidate +** only those incrblob cursors open on that specific row. +*/ +static void invalidateIncrblobCursors( + Btree *pBtree, /* The database file to check */ + i64 iRow, /* The rowid that might be changing */ + int isClearTable /* True if all rows are being deleted */ +){ + BtCursor *p; + if( pBtree->hasIncrblobCur==0 ) return; + assert( sqlite3BtreeHoldsMutex(pBtree) ); + pBtree->hasIncrblobCur = 0; + for(p=pBtree->pBt->pCursor; p; p=p->pNext){ + if( (p->curFlags & BTCF_Incrblob)!=0 ){ + pBtree->hasIncrblobCur = 1; + if( isClearTable || p->info.nKey==iRow ){ + p->eState = CURSOR_INVALID; + } + } + } +} + +#else + /* Stub function when INCRBLOB is omitted */ + #define invalidateIncrblobCursors(x,y,z) +#endif /* SQLITE_OMIT_INCRBLOB */ + +/* +** Set bit pgno of the BtShared.pHasContent bitvec. This is called +** when a page that previously contained data becomes a free-list leaf +** page. +** +** The BtShared.pHasContent bitvec exists to work around an obscure +** bug caused by the interaction of two useful IO optimizations surrounding +** free-list leaf pages: +** +** 1) When all data is deleted from a page and the page becomes +** a free-list leaf page, the page is not written to the database +** (as free-list leaf pages contain no meaningful data). Sometimes +** such a page is not even journalled (as it will not be modified, +** why bother journalling it?). +** +** 2) When a free-list leaf page is reused, its content is not read +** from the database or written to the journal file (why should it +** be, if it is not at all meaningful?). +** +** By themselves, these optimizations work fine and provide a handy +** performance boost to bulk delete or insert operations. However, if +** a page is moved to the free-list and then reused within the same +** transaction, a problem comes up. If the page is not journalled when +** it is moved to the free-list and it is also not journalled when it +** is extracted from the free-list and reused, then the original data +** may be lost. In the event of a rollback, it may not be possible +** to restore the database to its original configuration. +** +** The solution is the BtShared.pHasContent bitvec. Whenever a page is +** moved to become a free-list leaf page, the corresponding bit is +** set in the bitvec. Whenever a leaf page is extracted from the free-list, +** optimization 2 above is omitted if the corresponding bit is already +** set in BtShared.pHasContent. The contents of the bitvec are cleared +** at the end of every transaction. +*/ +static int btreeSetHasContent(BtShared *pBt, Pgno pgno){ + int rc = SQLITE_OK; + if( !pBt->pHasContent ){ + assert( pgno<=pBt->nPage ); + pBt->pHasContent = sqlite3BitvecCreate(pBt->nPage); + if( !pBt->pHasContent ){ + rc = SQLITE_NOMEM_BKPT; + } + } + if( rc==SQLITE_OK && pgno<=sqlite3BitvecSize(pBt->pHasContent) ){ + rc = sqlite3BitvecSet(pBt->pHasContent, pgno); + } + return rc; +} + +/* +** Query the BtShared.pHasContent vector. +** +** This function is called when a free-list leaf page is removed from the +** free-list for reuse. It returns false if it is safe to retrieve the +** page from the pager layer with the 'no-content' flag set. True otherwise. +*/ +static int btreeGetHasContent(BtShared *pBt, Pgno pgno){ + Bitvec *p = pBt->pHasContent; + return (p && (pgno>sqlite3BitvecSize(p) || sqlite3BitvecTest(p, pgno))); +} + +/* +** Clear (destroy) the BtShared.pHasContent bitvec. This should be +** invoked at the conclusion of each write-transaction. +*/ +static void btreeClearHasContent(BtShared *pBt){ + sqlite3BitvecDestroy(pBt->pHasContent); + pBt->pHasContent = 0; +} + +/* +** Release all of the apPage[] pages for a cursor. +*/ +static void btreeReleaseAllCursorPages(BtCursor *pCur){ + int i; + for(i=0; i<=pCur->iPage; i++){ + releasePage(pCur->apPage[i]); + pCur->apPage[i] = 0; + } + pCur->iPage = -1; +} + +/* +** The cursor passed as the only argument must point to a valid entry +** when this function is called (i.e. have eState==CURSOR_VALID). This +** function saves the current cursor key in variables pCur->nKey and +** pCur->pKey. SQLITE_OK is returned if successful or an SQLite error +** code otherwise. +** +** If the cursor is open on an intkey table, then the integer key +** (the rowid) is stored in pCur->nKey and pCur->pKey is left set to +** NULL. If the cursor is open on a non-intkey table, then pCur->pKey is +** set to point to a malloced buffer pCur->nKey bytes in size containing +** the key. +*/ +static int saveCursorKey(BtCursor *pCur){ + int rc = SQLITE_OK; + assert( CURSOR_VALID==pCur->eState ); + assert( 0==pCur->pKey ); + assert( cursorHoldsMutex(pCur) ); + + if( pCur->curIntKey ){ + /* Only the rowid is required for a table btree */ + pCur->nKey = sqlite3BtreeIntegerKey(pCur); + }else{ + /* For an index btree, save the complete key content */ + void *pKey; + pCur->nKey = sqlite3BtreePayloadSize(pCur); + pKey = sqlite3Malloc( pCur->nKey ); + if( pKey ){ + rc = sqlite3BtreePayload(pCur, 0, (int)pCur->nKey, pKey); + if( rc==SQLITE_OK ){ + pCur->pKey = pKey; + }else{ + sqlite3_free(pKey); + } + }else{ + rc = SQLITE_NOMEM_BKPT; + } + } + assert( !pCur->curIntKey || !pCur->pKey ); + return rc; +} + +/* +** Save the current cursor position in the variables BtCursor.nKey +** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK. +** +** The caller must ensure that the cursor is valid (has eState==CURSOR_VALID) +** prior to calling this routine. +*/ +static int saveCursorPosition(BtCursor *pCur){ + int rc; + + assert( CURSOR_VALID==pCur->eState || CURSOR_SKIPNEXT==pCur->eState ); + assert( 0==pCur->pKey ); + assert( cursorHoldsMutex(pCur) ); + + if( pCur->eState==CURSOR_SKIPNEXT ){ + pCur->eState = CURSOR_VALID; + }else{ + pCur->skipNext = 0; + } + + rc = saveCursorKey(pCur); + if( rc==SQLITE_OK ){ + btreeReleaseAllCursorPages(pCur); + pCur->eState = CURSOR_REQUIRESEEK; + } + + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl|BTCF_AtLast); + return rc; +} + +/* Forward reference */ +static int SQLITE_NOINLINE saveCursorsOnList(BtCursor*,Pgno,BtCursor*); + +/* +** Save the positions of all cursors (except pExcept) that are open on +** the table with root-page iRoot. "Saving the cursor position" means that +** the location in the btree is remembered in such a way that it can be +** moved back to the same spot after the btree has been modified. This +** routine is called just before cursor pExcept is used to modify the +** table, for example in BtreeDelete() or BtreeInsert(). +** +** If there are two or more cursors on the same btree, then all such +** cursors should have their BTCF_Multiple flag set. The btreeCursor() +** routine enforces that rule. This routine only needs to be called in +** the uncommon case when pExpect has the BTCF_Multiple flag set. +** +** If pExpect!=NULL and if no other cursors are found on the same root-page, +** then the BTCF_Multiple flag on pExpect is cleared, to avoid another +** pointless call to this routine. +** +** Implementation note: This routine merely checks to see if any cursors +** need to be saved. It calls out to saveCursorsOnList() in the (unusual) +** event that cursors are in need to being saved. +*/ +static int saveAllCursors(BtShared *pBt, Pgno iRoot, BtCursor *pExcept){ + BtCursor *p; + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( pExcept==0 || pExcept->pBt==pBt ); + for(p=pBt->pCursor; p; p=p->pNext){ + if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ) break; + } + if( p ) return saveCursorsOnList(p, iRoot, pExcept); + if( pExcept ) pExcept->curFlags &= ~BTCF_Multiple; + return SQLITE_OK; +} + +/* This helper routine to saveAllCursors does the actual work of saving +** the cursors if and when a cursor is found that actually requires saving. +** The common case is that no cursors need to be saved, so this routine is +** broken out from its caller to avoid unnecessary stack pointer movement. +*/ +static int SQLITE_NOINLINE saveCursorsOnList( + BtCursor *p, /* The first cursor that needs saving */ + Pgno iRoot, /* Only save cursor with this iRoot. Save all if zero */ + BtCursor *pExcept /* Do not save this cursor */ +){ + do{ + if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ){ + if( p->eState==CURSOR_VALID || p->eState==CURSOR_SKIPNEXT ){ + int rc = saveCursorPosition(p); + if( SQLITE_OK!=rc ){ + return rc; + } + }else{ + testcase( p->iPage>0 ); + btreeReleaseAllCursorPages(p); + } + } + p = p->pNext; + }while( p ); + return SQLITE_OK; +} + +/* +** Clear the current cursor position. +*/ +SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *pCur){ + assert( cursorHoldsMutex(pCur) ); + sqlite3_free(pCur->pKey); + pCur->pKey = 0; + pCur->eState = CURSOR_INVALID; +} + +/* +** In this version of BtreeMoveto, pKey is a packed index record +** such as is generated by the OP_MakeRecord opcode. Unpack the +** record and then call BtreeMovetoUnpacked() to do the work. +*/ +static int btreeMoveto( + BtCursor *pCur, /* Cursor open on the btree to be searched */ + const void *pKey, /* Packed key if the btree is an index */ + i64 nKey, /* Integer key for tables. Size of pKey for indices */ + int bias, /* Bias search to the high end */ + int *pRes /* Write search results here */ +){ + int rc; /* Status code */ + UnpackedRecord *pIdxKey; /* Unpacked index key */ + + if( pKey ){ + assert( nKey==(i64)(int)nKey ); + pIdxKey = sqlite3VdbeAllocUnpackedRecord(pCur->pKeyInfo); + if( pIdxKey==0 ) return SQLITE_NOMEM_BKPT; + sqlite3VdbeRecordUnpack(pCur->pKeyInfo, (int)nKey, pKey, pIdxKey); + if( pIdxKey->nField==0 ){ + rc = SQLITE_CORRUPT_BKPT; + goto moveto_done; + } + }else{ + pIdxKey = 0; + } + rc = sqlite3BtreeMovetoUnpacked(pCur, pIdxKey, nKey, bias, pRes); +moveto_done: + if( pIdxKey ){ + sqlite3DbFree(pCur->pKeyInfo->db, pIdxKey); + } + return rc; +} + +/* +** Restore the cursor to the position it was in (or as close to as possible) +** when saveCursorPosition() was called. Note that this call deletes the +** saved position info stored by saveCursorPosition(), so there can be +** at most one effective restoreCursorPosition() call after each +** saveCursorPosition(). +*/ +static int btreeRestoreCursorPosition(BtCursor *pCur){ + int rc; + int skipNext; + assert( cursorOwnsBtShared(pCur) ); + assert( pCur->eState>=CURSOR_REQUIRESEEK ); + if( pCur->eState==CURSOR_FAULT ){ + return pCur->skipNext; + } + pCur->eState = CURSOR_INVALID; + rc = btreeMoveto(pCur, pCur->pKey, pCur->nKey, 0, &skipNext); + if( rc==SQLITE_OK ){ + sqlite3_free(pCur->pKey); + pCur->pKey = 0; + assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_INVALID ); + pCur->skipNext |= skipNext; + if( pCur->skipNext && pCur->eState==CURSOR_VALID ){ + pCur->eState = CURSOR_SKIPNEXT; + } + } + return rc; +} + +#define restoreCursorPosition(p) \ + (p->eState>=CURSOR_REQUIRESEEK ? \ + btreeRestoreCursorPosition(p) : \ + SQLITE_OK) + +/* +** Determine whether or not a cursor has moved from the position where +** it was last placed, or has been invalidated for any other reason. +** Cursors can move when the row they are pointing at is deleted out +** from under them, for example. Cursor might also move if a btree +** is rebalanced. +** +** Calling this routine with a NULL cursor pointer returns false. +** +** Use the separate sqlite3BtreeCursorRestore() routine to restore a cursor +** back to where it ought to be if this routine returns true. +*/ +SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor *pCur){ + return pCur->eState!=CURSOR_VALID; +} + +/* +** This routine restores a cursor back to its original position after it +** has been moved by some outside activity (such as a btree rebalance or +** a row having been deleted out from under the cursor). +** +** On success, the *pDifferentRow parameter is false if the cursor is left +** pointing at exactly the same row. *pDifferntRow is the row the cursor +** was pointing to has been deleted, forcing the cursor to point to some +** nearby row. +** +** This routine should only be called for a cursor that just returned +** TRUE from sqlite3BtreeCursorHasMoved(). +*/ +SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor *pCur, int *pDifferentRow){ + int rc; + + assert( pCur!=0 ); + assert( pCur->eState!=CURSOR_VALID ); + rc = restoreCursorPosition(pCur); + if( rc ){ + *pDifferentRow = 1; + return rc; + } + if( pCur->eState!=CURSOR_VALID ){ + *pDifferentRow = 1; + }else{ + assert( pCur->skipNext==0 ); + *pDifferentRow = 0; + } + return SQLITE_OK; +} + +#ifdef SQLITE_ENABLE_CURSOR_HINTS +/* +** Provide hints to the cursor. The particular hint given (and the type +** and number of the varargs parameters) is determined by the eHintType +** parameter. See the definitions of the BTREE_HINT_* macros for details. +*/ +SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor *pCur, int eHintType, ...){ + /* Used only by system that substitute their own storage engine */ +} +#endif + +/* +** Provide flag hints to the cursor. +*/ +SQLITE_PRIVATE void sqlite3BtreeCursorHintFlags(BtCursor *pCur, unsigned x){ + assert( x==BTREE_SEEK_EQ || x==BTREE_BULKLOAD || x==0 ); + pCur->hints = x; +} + + +#ifndef SQLITE_OMIT_AUTOVACUUM +/* +** Given a page number of a regular database page, return the page +** number for the pointer-map page that contains the entry for the +** input page number. +** +** Return 0 (not a valid page) for pgno==1 since there is +** no pointer map associated with page 1. The integrity_check logic +** requires that ptrmapPageno(*,1)!=1. +*/ +static Pgno ptrmapPageno(BtShared *pBt, Pgno pgno){ + int nPagesPerMapPage; + Pgno iPtrMap, ret; + assert( sqlite3_mutex_held(pBt->mutex) ); + if( pgno<2 ) return 0; + nPagesPerMapPage = (pBt->usableSize/5)+1; + iPtrMap = (pgno-2)/nPagesPerMapPage; + ret = (iPtrMap*nPagesPerMapPage) + 2; + if( ret==PENDING_BYTE_PAGE(pBt) ){ + ret++; + } + return ret; +} + +/* +** Write an entry into the pointer map. +** +** This routine updates the pointer map entry for page number 'key' +** so that it maps to type 'eType' and parent page number 'pgno'. +** +** If *pRC is initially non-zero (non-SQLITE_OK) then this routine is +** a no-op. If an error occurs, the appropriate error code is written +** into *pRC. +*/ +static void ptrmapPut(BtShared *pBt, Pgno key, u8 eType, Pgno parent, int *pRC){ + DbPage *pDbPage; /* The pointer map page */ + u8 *pPtrmap; /* The pointer map data */ + Pgno iPtrmap; /* The pointer map page number */ + int offset; /* Offset in pointer map page */ + int rc; /* Return code from subfunctions */ + + if( *pRC ) return; + + assert( sqlite3_mutex_held(pBt->mutex) ); + /* The master-journal page number must never be used as a pointer map page */ + assert( 0==PTRMAP_ISPAGE(pBt, PENDING_BYTE_PAGE(pBt)) ); + + assert( pBt->autoVacuum ); + if( key==0 ){ + *pRC = SQLITE_CORRUPT_BKPT; + return; + } + iPtrmap = PTRMAP_PAGENO(pBt, key); + rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage, 0); + if( rc!=SQLITE_OK ){ + *pRC = rc; + return; + } + offset = PTRMAP_PTROFFSET(iPtrmap, key); + if( offset<0 ){ + *pRC = SQLITE_CORRUPT_BKPT; + goto ptrmap_exit; + } + assert( offset <= (int)pBt->usableSize-5 ); + pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); + + if( eType!=pPtrmap[offset] || get4byte(&pPtrmap[offset+1])!=parent ){ + TRACE(("PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent)); + *pRC= rc = sqlite3PagerWrite(pDbPage); + if( rc==SQLITE_OK ){ + pPtrmap[offset] = eType; + put4byte(&pPtrmap[offset+1], parent); + } + } + +ptrmap_exit: + sqlite3PagerUnref(pDbPage); +} + +/* +** Read an entry from the pointer map. +** +** This routine retrieves the pointer map entry for page 'key', writing +** the type and parent page number to *pEType and *pPgno respectively. +** An error code is returned if something goes wrong, otherwise SQLITE_OK. +*/ +static int ptrmapGet(BtShared *pBt, Pgno key, u8 *pEType, Pgno *pPgno){ + DbPage *pDbPage; /* The pointer map page */ + int iPtrmap; /* Pointer map page index */ + u8 *pPtrmap; /* Pointer map page data */ + int offset; /* Offset of entry in pointer map */ + int rc; + + assert( sqlite3_mutex_held(pBt->mutex) ); + + iPtrmap = PTRMAP_PAGENO(pBt, key); + rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage, 0); + if( rc!=0 ){ + return rc; + } + pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); + + offset = PTRMAP_PTROFFSET(iPtrmap, key); + if( offset<0 ){ + sqlite3PagerUnref(pDbPage); + return SQLITE_CORRUPT_BKPT; + } + assert( offset <= (int)pBt->usableSize-5 ); + assert( pEType!=0 ); + *pEType = pPtrmap[offset]; + if( pPgno ) *pPgno = get4byte(&pPtrmap[offset+1]); + + sqlite3PagerUnref(pDbPage); + if( *pEType<1 || *pEType>5 ) return SQLITE_CORRUPT_BKPT; + return SQLITE_OK; +} + +#else /* if defined SQLITE_OMIT_AUTOVACUUM */ + #define ptrmapPut(w,x,y,z,rc) + #define ptrmapGet(w,x,y,z) SQLITE_OK + #define ptrmapPutOvflPtr(x, y, rc) +#endif + +/* +** Given a btree page and a cell index (0 means the first cell on +** the page, 1 means the second cell, and so forth) return a pointer +** to the cell content. +** +** findCellPastPtr() does the same except it skips past the initial +** 4-byte child pointer found on interior pages, if there is one. +** +** This routine works only for pages that do not contain overflow cells. +*/ +#define findCell(P,I) \ + ((P)->aData + ((P)->maskPage & get2byteAligned(&(P)->aCellIdx[2*(I)]))) +#define findCellPastPtr(P,I) \ + ((P)->aDataOfst + ((P)->maskPage & get2byteAligned(&(P)->aCellIdx[2*(I)]))) + + +/* +** This is common tail processing for btreeParseCellPtr() and +** btreeParseCellPtrIndex() for the case when the cell does not fit entirely +** on a single B-tree page. Make necessary adjustments to the CellInfo +** structure. +*/ +static SQLITE_NOINLINE void btreeParseCellAdjustSizeForOverflow( + MemPage *pPage, /* Page containing the cell */ + u8 *pCell, /* Pointer to the cell text. */ + CellInfo *pInfo /* Fill in this structure */ +){ + /* If the payload will not fit completely on the local page, we have + ** to decide how much to store locally and how much to spill onto + ** overflow pages. The strategy is to minimize the amount of unused + ** space on overflow pages while keeping the amount of local storage + ** in between minLocal and maxLocal. + ** + ** Warning: changing the way overflow payload is distributed in any + ** way will result in an incompatible file format. + */ + int minLocal; /* Minimum amount of payload held locally */ + int maxLocal; /* Maximum amount of payload held locally */ + int surplus; /* Overflow payload available for local storage */ + + minLocal = pPage->minLocal; + maxLocal = pPage->maxLocal; + surplus = minLocal + (pInfo->nPayload - minLocal)%(pPage->pBt->usableSize-4); + testcase( surplus==maxLocal ); + testcase( surplus==maxLocal+1 ); + if( surplus <= maxLocal ){ + pInfo->nLocal = (u16)surplus; + }else{ + pInfo->nLocal = (u16)minLocal; + } + pInfo->nSize = (u16)(&pInfo->pPayload[pInfo->nLocal] - pCell) + 4; +} + +/* +** The following routines are implementations of the MemPage.xParseCell() +** method. +** +** Parse a cell content block and fill in the CellInfo structure. +** +** btreeParseCellPtr() => table btree leaf nodes +** btreeParseCellNoPayload() => table btree internal nodes +** btreeParseCellPtrIndex() => index btree nodes +** +** There is also a wrapper function btreeParseCell() that works for +** all MemPage types and that references the cell by index rather than +** by pointer. +*/ +static void btreeParseCellPtrNoPayload( + MemPage *pPage, /* Page containing the cell */ + u8 *pCell, /* Pointer to the cell text. */ + CellInfo *pInfo /* Fill in this structure */ +){ + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( pPage->leaf==0 ); + assert( pPage->childPtrSize==4 ); +#ifndef SQLITE_DEBUG + UNUSED_PARAMETER(pPage); +#endif + pInfo->nSize = 4 + getVarint(&pCell[4], (u64*)&pInfo->nKey); + pInfo->nPayload = 0; + pInfo->nLocal = 0; + pInfo->pPayload = 0; + return; +} +static void btreeParseCellPtr( + MemPage *pPage, /* Page containing the cell */ + u8 *pCell, /* Pointer to the cell text. */ + CellInfo *pInfo /* Fill in this structure */ +){ + u8 *pIter; /* For scanning through pCell */ + u32 nPayload; /* Number of bytes of cell payload */ + u64 iKey; /* Extracted Key value */ + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( pPage->leaf==0 || pPage->leaf==1 ); + assert( pPage->intKeyLeaf ); + assert( pPage->childPtrSize==0 ); + pIter = pCell; + + /* The next block of code is equivalent to: + ** + ** pIter += getVarint32(pIter, nPayload); + ** + ** The code is inlined to avoid a function call. + */ + nPayload = *pIter; + if( nPayload>=0x80 ){ + u8 *pEnd = &pIter[8]; + nPayload &= 0x7f; + do{ + nPayload = (nPayload<<7) | (*++pIter & 0x7f); + }while( (*pIter)>=0x80 && pIternKey); + ** + ** The code is inlined to avoid a function call. + */ + iKey = *pIter; + if( iKey>=0x80 ){ + u8 *pEnd = &pIter[7]; + iKey &= 0x7f; + while(1){ + iKey = (iKey<<7) | (*++pIter & 0x7f); + if( (*pIter)<0x80 ) break; + if( pIter>=pEnd ){ + iKey = (iKey<<8) | *++pIter; + break; + } + } + } + pIter++; + + pInfo->nKey = *(i64*)&iKey; + pInfo->nPayload = nPayload; + pInfo->pPayload = pIter; + testcase( nPayload==pPage->maxLocal ); + testcase( nPayload==pPage->maxLocal+1 ); + if( nPayload<=pPage->maxLocal ){ + /* This is the (easy) common case where the entire payload fits + ** on the local page. No overflow is required. + */ + pInfo->nSize = nPayload + (u16)(pIter - pCell); + if( pInfo->nSize<4 ) pInfo->nSize = 4; + pInfo->nLocal = (u16)nPayload; + }else{ + btreeParseCellAdjustSizeForOverflow(pPage, pCell, pInfo); + } +} +static void btreeParseCellPtrIndex( + MemPage *pPage, /* Page containing the cell */ + u8 *pCell, /* Pointer to the cell text. */ + CellInfo *pInfo /* Fill in this structure */ +){ + u8 *pIter; /* For scanning through pCell */ + u32 nPayload; /* Number of bytes of cell payload */ + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( pPage->leaf==0 || pPage->leaf==1 ); + assert( pPage->intKeyLeaf==0 ); + pIter = pCell + pPage->childPtrSize; + nPayload = *pIter; + if( nPayload>=0x80 ){ + u8 *pEnd = &pIter[8]; + nPayload &= 0x7f; + do{ + nPayload = (nPayload<<7) | (*++pIter & 0x7f); + }while( *(pIter)>=0x80 && pIternKey = nPayload; + pInfo->nPayload = nPayload; + pInfo->pPayload = pIter; + testcase( nPayload==pPage->maxLocal ); + testcase( nPayload==pPage->maxLocal+1 ); + if( nPayload<=pPage->maxLocal ){ + /* This is the (easy) common case where the entire payload fits + ** on the local page. No overflow is required. + */ + pInfo->nSize = nPayload + (u16)(pIter - pCell); + if( pInfo->nSize<4 ) pInfo->nSize = 4; + pInfo->nLocal = (u16)nPayload; + }else{ + btreeParseCellAdjustSizeForOverflow(pPage, pCell, pInfo); + } +} +static void btreeParseCell( + MemPage *pPage, /* Page containing the cell */ + int iCell, /* The cell index. First cell is 0 */ + CellInfo *pInfo /* Fill in this structure */ +){ + pPage->xParseCell(pPage, findCell(pPage, iCell), pInfo); +} + +/* +** The following routines are implementations of the MemPage.xCellSize +** method. +** +** Compute the total number of bytes that a Cell needs in the cell +** data area of the btree-page. The return number includes the cell +** data header and the local payload, but not any overflow page or +** the space used by the cell pointer. +** +** cellSizePtrNoPayload() => table internal nodes +** cellSizePtr() => all index nodes & table leaf nodes +*/ +static u16 cellSizePtr(MemPage *pPage, u8 *pCell){ + u8 *pIter = pCell + pPage->childPtrSize; /* For looping over bytes of pCell */ + u8 *pEnd; /* End mark for a varint */ + u32 nSize; /* Size value to return */ + +#ifdef SQLITE_DEBUG + /* The value returned by this function should always be the same as + ** the (CellInfo.nSize) value found by doing a full parse of the + ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of + ** this function verifies that this invariant is not violated. */ + CellInfo debuginfo; + pPage->xParseCell(pPage, pCell, &debuginfo); +#endif + + nSize = *pIter; + if( nSize>=0x80 ){ + pEnd = &pIter[8]; + nSize &= 0x7f; + do{ + nSize = (nSize<<7) | (*++pIter & 0x7f); + }while( *(pIter)>=0x80 && pIterintKey ){ + /* pIter now points at the 64-bit integer key value, a variable length + ** integer. The following block moves pIter to point at the first byte + ** past the end of the key value. */ + pEnd = &pIter[9]; + while( (*pIter++)&0x80 && pItermaxLocal ); + testcase( nSize==pPage->maxLocal+1 ); + if( nSize<=pPage->maxLocal ){ + nSize += (u32)(pIter - pCell); + if( nSize<4 ) nSize = 4; + }else{ + int minLocal = pPage->minLocal; + nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4); + testcase( nSize==pPage->maxLocal ); + testcase( nSize==pPage->maxLocal+1 ); + if( nSize>pPage->maxLocal ){ + nSize = minLocal; + } + nSize += 4 + (u16)(pIter - pCell); + } + assert( nSize==debuginfo.nSize || CORRUPT_DB ); + return (u16)nSize; +} +static u16 cellSizePtrNoPayload(MemPage *pPage, u8 *pCell){ + u8 *pIter = pCell + 4; /* For looping over bytes of pCell */ + u8 *pEnd; /* End mark for a varint */ + +#ifdef SQLITE_DEBUG + /* The value returned by this function should always be the same as + ** the (CellInfo.nSize) value found by doing a full parse of the + ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of + ** this function verifies that this invariant is not violated. */ + CellInfo debuginfo; + pPage->xParseCell(pPage, pCell, &debuginfo); +#else + UNUSED_PARAMETER(pPage); +#endif + + assert( pPage->childPtrSize==4 ); + pEnd = pIter + 9; + while( (*pIter++)&0x80 && pIterxCellSize(pPage, findCell(pPage, iCell)); +} +#endif + +#ifndef SQLITE_OMIT_AUTOVACUUM +/* +** If the cell pCell, part of page pPage contains a pointer +** to an overflow page, insert an entry into the pointer-map +** for the overflow page. +*/ +static void ptrmapPutOvflPtr(MemPage *pPage, u8 *pCell, int *pRC){ + CellInfo info; + if( *pRC ) return; + assert( pCell!=0 ); + pPage->xParseCell(pPage, pCell, &info); + if( info.nLocalpBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, pRC); + } +} +#endif + + +/* +** Defragment the page given. All Cells are moved to the +** end of the page and all free space is collected into one +** big FreeBlk that occurs in between the header and cell +** pointer array and the cell content area. +** +** EVIDENCE-OF: R-44582-60138 SQLite may from time to time reorganize a +** b-tree page so that there are no freeblocks or fragment bytes, all +** unused bytes are contained in the unallocated space region, and all +** cells are packed tightly at the end of the page. +*/ +static int defragmentPage(MemPage *pPage){ + int i; /* Loop counter */ + int pc; /* Address of the i-th cell */ + int hdr; /* Offset to the page header */ + int size; /* Size of a cell */ + int usableSize; /* Number of usable bytes on a page */ + int cellOffset; /* Offset to the cell pointer array */ + int cbrk; /* Offset to the cell content area */ + int nCell; /* Number of cells on the page */ + unsigned char *data; /* The page data */ + unsigned char *temp; /* Temp area for cell content */ + unsigned char *src; /* Source of content */ + int iCellFirst; /* First allowable cell index */ + int iCellLast; /* Last possible cell index */ + + + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + assert( pPage->pBt!=0 ); + assert( pPage->pBt->usableSize <= SQLITE_MAX_PAGE_SIZE ); + assert( pPage->nOverflow==0 ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + temp = 0; + src = data = pPage->aData; + hdr = pPage->hdrOffset; + cellOffset = pPage->cellOffset; + nCell = pPage->nCell; + assert( nCell==get2byte(&data[hdr+3]) ); + usableSize = pPage->pBt->usableSize; + cbrk = usableSize; + iCellFirst = cellOffset + 2*nCell; + iCellLast = usableSize - 4; + for(i=0; iiCellLast ){ + return SQLITE_CORRUPT_BKPT; + } + assert( pc>=iCellFirst && pc<=iCellLast ); + size = pPage->xCellSize(pPage, &src[pc]); + cbrk -= size; + if( cbrkusableSize ){ + return SQLITE_CORRUPT_BKPT; + } + assert( cbrk+size<=usableSize && cbrk>=iCellFirst ); + testcase( cbrk+size==usableSize ); + testcase( pc+size==usableSize ); + put2byte(pAddr, cbrk); + if( temp==0 ){ + int x; + if( cbrk==pc ) continue; + temp = sqlite3PagerTempSpace(pPage->pBt->pPager); + x = get2byte(&data[hdr+5]); + memcpy(&temp[x], &data[x], (cbrk+size) - x); + src = temp; + } + memcpy(&data[cbrk], &src[pc], size); + } + assert( cbrk>=iCellFirst ); + put2byte(&data[hdr+5], cbrk); + data[hdr+1] = 0; + data[hdr+2] = 0; + data[hdr+7] = 0; + memset(&data[iCellFirst], 0, cbrk-iCellFirst); + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + if( cbrk-iCellFirst!=pPage->nFree ){ + return SQLITE_CORRUPT_BKPT; + } + return SQLITE_OK; +} + +/* +** Search the free-list on page pPg for space to store a cell nByte bytes in +** size. If one can be found, return a pointer to the space and remove it +** from the free-list. +** +** If no suitable space can be found on the free-list, return NULL. +** +** This function may detect corruption within pPg. If corruption is +** detected then *pRc is set to SQLITE_CORRUPT and NULL is returned. +** +** Slots on the free list that are between 1 and 3 bytes larger than nByte +** will be ignored if adding the extra space to the fragmentation count +** causes the fragmentation count to exceed 60. +*/ +static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){ + const int hdr = pPg->hdrOffset; + u8 * const aData = pPg->aData; + int iAddr = hdr + 1; + int pc = get2byte(&aData[iAddr]); + int x; + int usableSize = pPg->pBt->usableSize; + + assert( pc>0 ); + do{ + int size; /* Size of the free slot */ + /* EVIDENCE-OF: R-06866-39125 Freeblocks are always connected in order of + ** increasing offset. */ + if( pc>usableSize-4 || pc=0 ){ + testcase( x==4 ); + testcase( x==3 ); + if( pc < pPg->cellOffset+2*pPg->nCell || size+pc > usableSize ){ + *pRc = SQLITE_CORRUPT_BKPT; + return 0; + }else if( x<4 ){ + /* EVIDENCE-OF: R-11498-58022 In a well-formed b-tree page, the total + ** number of bytes in fragments may not exceed 60. */ + if( aData[hdr+7]>57 ) return 0; + + /* Remove the slot from the free-list. Update the number of + ** fragmented bytes within the page. */ + memcpy(&aData[iAddr], &aData[pc], 2); + aData[hdr+7] += (u8)x; + }else{ + /* The slot remains on the free-list. Reduce its size to account + ** for the portion used by the new allocation. */ + put2byte(&aData[pc+2], x); + } + return &aData[pc + x]; + } + iAddr = pc; + pc = get2byte(&aData[pc]); + }while( pc ); + + return 0; +} + +/* +** Allocate nByte bytes of space from within the B-Tree page passed +** as the first argument. Write into *pIdx the index into pPage->aData[] +** of the first byte of allocated space. Return either SQLITE_OK or +** an error code (usually SQLITE_CORRUPT). +** +** The caller guarantees that there is sufficient space to make the +** allocation. This routine might need to defragment in order to bring +** all the space together, however. This routine will avoid using +** the first two bytes past the cell pointer area since presumably this +** allocation is being made in order to insert a new cell, so we will +** also end up needing a new cell pointer. +*/ +static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ + const int hdr = pPage->hdrOffset; /* Local cache of pPage->hdrOffset */ + u8 * const data = pPage->aData; /* Local cache of pPage->aData */ + int top; /* First byte of cell content area */ + int rc = SQLITE_OK; /* Integer return code */ + int gap; /* First byte of gap between cell pointers and cell content */ + + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + assert( pPage->pBt ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( nByte>=0 ); /* Minimum cell size is 4 */ + assert( pPage->nFree>=nByte ); + assert( pPage->nOverflow==0 ); + assert( nByte < (int)(pPage->pBt->usableSize-8) ); + + assert( pPage->cellOffset == hdr + 12 - 4*pPage->leaf ); + gap = pPage->cellOffset + 2*pPage->nCell; + assert( gap<=65536 ); + /* EVIDENCE-OF: R-29356-02391 If the database uses a 65536-byte page size + ** and the reserved space is zero (the usual value for reserved space) + ** then the cell content offset of an empty page wants to be 65536. + ** However, that integer is too large to be stored in a 2-byte unsigned + ** integer, so a value of 0 is used in its place. */ + top = get2byte(&data[hdr+5]); + assert( top<=(int)pPage->pBt->usableSize ); /* Prevent by getAndInitPage() */ + if( gap>top ){ + if( top==0 && pPage->pBt->usableSize==65536 ){ + top = 65536; + }else{ + return SQLITE_CORRUPT_BKPT; + } + } + + /* If there is enough space between gap and top for one more cell pointer + ** array entry offset, and if the freelist is not empty, then search the + ** freelist looking for a free slot big enough to satisfy the request. + */ + testcase( gap+2==top ); + testcase( gap+1==top ); + testcase( gap==top ); + if( (data[hdr+2] || data[hdr+1]) && gap+2<=top ){ + u8 *pSpace = pageFindSlot(pPage, nByte, &rc); + if( pSpace ){ + assert( pSpace>=data && (pSpace - data)<65536 ); + *pIdx = (int)(pSpace - data); + return SQLITE_OK; + }else if( rc ){ + return rc; + } + } + + /* The request could not be fulfilled using a freelist slot. Check + ** to see if defragmentation is necessary. + */ + testcase( gap+2+nByte==top ); + if( gap+2+nByte>top ){ + assert( pPage->nCell>0 || CORRUPT_DB ); + rc = defragmentPage(pPage); + if( rc ) return rc; + top = get2byteNotZero(&data[hdr+5]); + assert( gap+nByte<=top ); + } + + + /* Allocate memory from the gap in between the cell pointer array + ** and the cell content area. The btreeInitPage() call has already + ** validated the freelist. Given that the freelist is valid, there + ** is no way that the allocation can extend off the end of the page. + ** The assert() below verifies the previous sentence. + */ + top -= nByte; + put2byte(&data[hdr+5], top); + assert( top+nByte <= (int)pPage->pBt->usableSize ); + *pIdx = top; + return SQLITE_OK; +} + +/* +** Return a section of the pPage->aData to the freelist. +** The first byte of the new free block is pPage->aData[iStart] +** and the size of the block is iSize bytes. +** +** Adjacent freeblocks are coalesced. +** +** Note that even though the freeblock list was checked by btreeInitPage(), +** that routine will not detect overlap between cells or freeblocks. Nor +** does it detect cells or freeblocks that encrouch into the reserved bytes +** at the end of the page. So do additional corruption checks inside this +** routine and return SQLITE_CORRUPT if any problems are found. +*/ +static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ + u16 iPtr; /* Address of ptr to next freeblock */ + u16 iFreeBlk; /* Address of the next freeblock */ + u8 hdr; /* Page header size. 0 or 100 */ + u8 nFrag = 0; /* Reduction in fragmentation */ + u16 iOrigSize = iSize; /* Original value of iSize */ + u32 iLast = pPage->pBt->usableSize-4; /* Largest possible freeblock offset */ + u32 iEnd = iStart + iSize; /* First byte past the iStart buffer */ + unsigned char *data = pPage->aData; /* Page content */ + + assert( pPage->pBt!=0 ); + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + assert( CORRUPT_DB || iStart>=pPage->hdrOffset+6+pPage->childPtrSize ); + assert( CORRUPT_DB || iEnd <= pPage->pBt->usableSize ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( iSize>=4 ); /* Minimum cell size is 4 */ + assert( iStart<=iLast ); + + /* Overwrite deleted information with zeros when the secure_delete + ** option is enabled */ + if( pPage->pBt->btsFlags & BTS_SECURE_DELETE ){ + memset(&data[iStart], 0, iSize); + } + + /* The list of freeblocks must be in ascending order. Find the + ** spot on the list where iStart should be inserted. + */ + hdr = pPage->hdrOffset; + iPtr = hdr + 1; + if( data[iPtr+1]==0 && data[iPtr]==0 ){ + iFreeBlk = 0; /* Shortcut for the case when the freelist is empty */ + }else{ + while( (iFreeBlk = get2byte(&data[iPtr]))iLast ) return SQLITE_CORRUPT_BKPT; + assert( iFreeBlk>iPtr || iFreeBlk==0 ); + + /* At this point: + ** iFreeBlk: First freeblock after iStart, or zero if none + ** iPtr: The address of a pointer to iFreeBlk + ** + ** Check to see if iFreeBlk should be coalesced onto the end of iStart. + */ + if( iFreeBlk && iEnd+3>=iFreeBlk ){ + nFrag = iFreeBlk - iEnd; + if( iEnd>iFreeBlk ) return SQLITE_CORRUPT_BKPT; + iEnd = iFreeBlk + get2byte(&data[iFreeBlk+2]); + if( iEnd > pPage->pBt->usableSize ) return SQLITE_CORRUPT_BKPT; + iSize = iEnd - iStart; + iFreeBlk = get2byte(&data[iFreeBlk]); + } + + /* If iPtr is another freeblock (that is, if iPtr is not the freelist + ** pointer in the page header) then check to see if iStart should be + ** coalesced onto the end of iPtr. + */ + if( iPtr>hdr+1 ){ + int iPtrEnd = iPtr + get2byte(&data[iPtr+2]); + if( iPtrEnd+3>=iStart ){ + if( iPtrEnd>iStart ) return SQLITE_CORRUPT_BKPT; + nFrag += iStart - iPtrEnd; + iSize = iEnd - iPtr; + iStart = iPtr; + } + } + if( nFrag>data[hdr+7] ) return SQLITE_CORRUPT_BKPT; + data[hdr+7] -= nFrag; + } + if( iStart==get2byte(&data[hdr+5]) ){ + /* The new freeblock is at the beginning of the cell content area, + ** so just extend the cell content area rather than create another + ** freelist entry */ + if( iPtr!=hdr+1 ) return SQLITE_CORRUPT_BKPT; + put2byte(&data[hdr+1], iFreeBlk); + put2byte(&data[hdr+5], iEnd); + }else{ + /* Insert the new freeblock into the freelist */ + put2byte(&data[iPtr], iStart); + put2byte(&data[iStart], iFreeBlk); + put2byte(&data[iStart+2], iSize); + } + pPage->nFree += iOrigSize; + return SQLITE_OK; +} + +/* +** Decode the flags byte (the first byte of the header) for a page +** and initialize fields of the MemPage structure accordingly. +** +** Only the following combinations are supported. Anything different +** indicates a corrupt database files: +** +** PTF_ZERODATA +** PTF_ZERODATA | PTF_LEAF +** PTF_LEAFDATA | PTF_INTKEY +** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF +*/ +static int decodeFlags(MemPage *pPage, int flagByte){ + BtShared *pBt; /* A copy of pPage->pBt */ + + assert( pPage->hdrOffset==(pPage->pgno==1 ? 100 : 0) ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + pPage->leaf = (u8)(flagByte>>3); assert( PTF_LEAF == 1<<3 ); + flagByte &= ~PTF_LEAF; + pPage->childPtrSize = 4-4*pPage->leaf; + pPage->xCellSize = cellSizePtr; + pBt = pPage->pBt; + if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ + /* EVIDENCE-OF: R-07291-35328 A value of 5 (0x05) means the page is an + ** interior table b-tree page. */ + assert( (PTF_LEAFDATA|PTF_INTKEY)==5 ); + /* EVIDENCE-OF: R-26900-09176 A value of 13 (0x0d) means the page is a + ** leaf table b-tree page. */ + assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 ); + pPage->intKey = 1; + if( pPage->leaf ){ + pPage->intKeyLeaf = 1; + pPage->xParseCell = btreeParseCellPtr; + }else{ + pPage->intKeyLeaf = 0; + pPage->xCellSize = cellSizePtrNoPayload; + pPage->xParseCell = btreeParseCellPtrNoPayload; + } + pPage->maxLocal = pBt->maxLeaf; + pPage->minLocal = pBt->minLeaf; + }else if( flagByte==PTF_ZERODATA ){ + /* EVIDENCE-OF: R-43316-37308 A value of 2 (0x02) means the page is an + ** interior index b-tree page. */ + assert( (PTF_ZERODATA)==2 ); + /* EVIDENCE-OF: R-59615-42828 A value of 10 (0x0a) means the page is a + ** leaf index b-tree page. */ + assert( (PTF_ZERODATA|PTF_LEAF)==10 ); + pPage->intKey = 0; + pPage->intKeyLeaf = 0; + pPage->xParseCell = btreeParseCellPtrIndex; + pPage->maxLocal = pBt->maxLocal; + pPage->minLocal = pBt->minLocal; + }else{ + /* EVIDENCE-OF: R-47608-56469 Any other value for the b-tree page type is + ** an error. */ + return SQLITE_CORRUPT_BKPT; + } + pPage->max1bytePayload = pBt->max1bytePayload; + return SQLITE_OK; +} + +/* +** Initialize the auxiliary information for a disk block. +** +** Return SQLITE_OK on success. If we see that the page does +** not contain a well-formed database page, then return +** SQLITE_CORRUPT. Note that a return of SQLITE_OK does not +** guarantee that the page is well-formed. It only shows that +** we failed to detect any corruption. +*/ +static int btreeInitPage(MemPage *pPage){ + + assert( pPage->pBt!=0 ); + assert( pPage->pBt->db!=0 ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( pPage->pgno==sqlite3PagerPagenumber(pPage->pDbPage) ); + assert( pPage == sqlite3PagerGetExtra(pPage->pDbPage) ); + assert( pPage->aData == sqlite3PagerGetData(pPage->pDbPage) ); + + if( !pPage->isInit ){ + int pc; /* Address of a freeblock within pPage->aData[] */ + u8 hdr; /* Offset to beginning of page header */ + u8 *data; /* Equal to pPage->aData */ + BtShared *pBt; /* The main btree structure */ + int usableSize; /* Amount of usable space on each page */ + u16 cellOffset; /* Offset from start of page to first cell pointer */ + int nFree; /* Number of unused bytes on the page */ + int top; /* First byte of the cell content area */ + int iCellFirst; /* First allowable cell or freeblock offset */ + int iCellLast; /* Last possible cell or freeblock offset */ + + pBt = pPage->pBt; + + hdr = pPage->hdrOffset; + data = pPage->aData; + /* EVIDENCE-OF: R-28594-02890 The one-byte flag at offset 0 indicating + ** the b-tree page type. */ + if( decodeFlags(pPage, data[hdr]) ) return SQLITE_CORRUPT_BKPT; + assert( pBt->pageSize>=512 && pBt->pageSize<=65536 ); + pPage->maskPage = (u16)(pBt->pageSize - 1); + pPage->nOverflow = 0; + usableSize = pBt->usableSize; + pPage->cellOffset = cellOffset = hdr + 8 + pPage->childPtrSize; + pPage->aDataEnd = &data[usableSize]; + pPage->aCellIdx = &data[cellOffset]; + pPage->aDataOfst = &data[pPage->childPtrSize]; + /* EVIDENCE-OF: R-58015-48175 The two-byte integer at offset 5 designates + ** the start of the cell content area. A zero value for this integer is + ** interpreted as 65536. */ + top = get2byteNotZero(&data[hdr+5]); + /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the + ** number of cells on the page. */ + pPage->nCell = get2byte(&data[hdr+3]); + if( pPage->nCell>MX_CELL(pBt) ){ + /* To many cells for a single page. The page must be corrupt */ + return SQLITE_CORRUPT_BKPT; + } + testcase( pPage->nCell==MX_CELL(pBt) ); + /* EVIDENCE-OF: R-24089-57979 If a page contains no cells (which is only + ** possible for a root page of a table that contains no rows) then the + ** offset to the cell content area will equal the page size minus the + ** bytes of reserved space. */ + assert( pPage->nCell>0 || top==usableSize || CORRUPT_DB ); + + /* A malformed database page might cause us to read past the end + ** of page when parsing a cell. + ** + ** The following block of code checks early to see if a cell extends + ** past the end of a page boundary and causes SQLITE_CORRUPT to be + ** returned if it does. + */ + iCellFirst = cellOffset + 2*pPage->nCell; + iCellLast = usableSize - 4; + if( pBt->db->flags & SQLITE_CellSizeCk ){ + int i; /* Index into the cell pointer array */ + int sz; /* Size of a cell */ + + if( !pPage->leaf ) iCellLast--; + for(i=0; inCell; i++){ + pc = get2byteAligned(&data[cellOffset+i*2]); + testcase( pc==iCellFirst ); + testcase( pc==iCellLast ); + if( pciCellLast ){ + return SQLITE_CORRUPT_BKPT; + } + sz = pPage->xCellSize(pPage, &data[pc]); + testcase( pc+sz==usableSize ); + if( pc+sz>usableSize ){ + return SQLITE_CORRUPT_BKPT; + } + } + if( !pPage->leaf ) iCellLast++; + } + + /* Compute the total free space on the page + ** EVIDENCE-OF: R-23588-34450 The two-byte integer at offset 1 gives the + ** start of the first freeblock on the page, or is zero if there are no + ** freeblocks. */ + pc = get2byte(&data[hdr+1]); + nFree = data[hdr+7] + top; /* Init nFree to non-freeblock free space */ + if( pc>0 ){ + u32 next, size; + if( pciCellLast ){ + return SQLITE_CORRUPT_BKPT; /* Freeblock off the end of the page */ + } + next = get2byte(&data[pc]); + size = get2byte(&data[pc+2]); + nFree = nFree + size; + if( next<=pc+size+3 ) break; + pc = next; + } + if( next>0 ){ + return SQLITE_CORRUPT_BKPT; /* Freeblock not in ascending order */ + } + if( pc+size>(unsigned int)usableSize ){ + return SQLITE_CORRUPT_BKPT; /* Last freeblock extends past page end */ + } + } + + /* At this point, nFree contains the sum of the offset to the start + ** of the cell-content area plus the number of free bytes within + ** the cell-content area. If this is greater than the usable-size + ** of the page, then the page must be corrupted. This check also + ** serves to verify that the offset to the start of the cell-content + ** area, according to the page header, lies within the page. + */ + if( nFree>usableSize ){ + return SQLITE_CORRUPT_BKPT; + } + pPage->nFree = (u16)(nFree - iCellFirst); + pPage->isInit = 1; + } + return SQLITE_OK; +} + +/* +** Set up a raw page so that it looks like a database page holding +** no entries. +*/ +static void zeroPage(MemPage *pPage, int flags){ + unsigned char *data = pPage->aData; + BtShared *pBt = pPage->pBt; + u8 hdr = pPage->hdrOffset; + u16 first; + + assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno ); + assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); + assert( sqlite3PagerGetData(pPage->pDbPage) == data ); + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + assert( sqlite3_mutex_held(pBt->mutex) ); + if( pBt->btsFlags & BTS_SECURE_DELETE ){ + memset(&data[hdr], 0, pBt->usableSize - hdr); + } + data[hdr] = (char)flags; + first = hdr + ((flags&PTF_LEAF)==0 ? 12 : 8); + memset(&data[hdr+1], 0, 4); + data[hdr+7] = 0; + put2byte(&data[hdr+5], pBt->usableSize); + pPage->nFree = (u16)(pBt->usableSize - first); + decodeFlags(pPage, flags); + pPage->cellOffset = first; + pPage->aDataEnd = &data[pBt->usableSize]; + pPage->aCellIdx = &data[first]; + pPage->aDataOfst = &data[pPage->childPtrSize]; + pPage->nOverflow = 0; + assert( pBt->pageSize>=512 && pBt->pageSize<=65536 ); + pPage->maskPage = (u16)(pBt->pageSize - 1); + pPage->nCell = 0; + pPage->isInit = 1; +} + + +/* +** Convert a DbPage obtained from the pager into a MemPage used by +** the btree layer. +*/ +static MemPage *btreePageFromDbPage(DbPage *pDbPage, Pgno pgno, BtShared *pBt){ + MemPage *pPage = (MemPage*)sqlite3PagerGetExtra(pDbPage); + if( pgno!=pPage->pgno ){ + pPage->aData = sqlite3PagerGetData(pDbPage); + pPage->pDbPage = pDbPage; + pPage->pBt = pBt; + pPage->pgno = pgno; + pPage->hdrOffset = pgno==1 ? 100 : 0; + } + assert( pPage->aData==sqlite3PagerGetData(pDbPage) ); + return pPage; +} + +/* +** Get a page from the pager. Initialize the MemPage.pBt and +** MemPage.aData elements if needed. See also: btreeGetUnusedPage(). +** +** If the PAGER_GET_NOCONTENT flag is set, it means that we do not care +** about the content of the page at this time. So do not go to the disk +** to fetch the content. Just fill in the content with zeros for now. +** If in the future we call sqlite3PagerWrite() on this page, that +** means we have started to be concerned about content and the disk +** read should occur at that point. +*/ +static int btreeGetPage( + BtShared *pBt, /* The btree */ + Pgno pgno, /* Number of the page to fetch */ + MemPage **ppPage, /* Return the page in this parameter */ + int flags /* PAGER_GET_NOCONTENT or PAGER_GET_READONLY */ +){ + int rc; + DbPage *pDbPage; + + assert( flags==0 || flags==PAGER_GET_NOCONTENT || flags==PAGER_GET_READONLY ); + assert( sqlite3_mutex_held(pBt->mutex) ); + rc = sqlite3PagerGet(pBt->pPager, pgno, (DbPage**)&pDbPage, flags); + if( rc ) return rc; + *ppPage = btreePageFromDbPage(pDbPage, pgno, pBt); + return SQLITE_OK; +} + +/* +** Retrieve a page from the pager cache. If the requested page is not +** already in the pager cache return NULL. Initialize the MemPage.pBt and +** MemPage.aData elements if needed. +*/ +static MemPage *btreePageLookup(BtShared *pBt, Pgno pgno){ + DbPage *pDbPage; + assert( sqlite3_mutex_held(pBt->mutex) ); + pDbPage = sqlite3PagerLookup(pBt->pPager, pgno); + if( pDbPage ){ + return btreePageFromDbPage(pDbPage, pgno, pBt); + } + return 0; +} + +/* +** Return the size of the database file in pages. If there is any kind of +** error, return ((unsigned int)-1). +*/ +static Pgno btreePagecount(BtShared *pBt){ + return pBt->nPage; +} +SQLITE_PRIVATE u32 sqlite3BtreeLastPage(Btree *p){ + assert( sqlite3BtreeHoldsMutex(p) ); + assert( ((p->pBt->nPage)&0x8000000)==0 ); + return btreePagecount(p->pBt); +} + +/* +** Get a page from the pager and initialize it. +** +** If pCur!=0 then the page is being fetched as part of a moveToChild() +** call. Do additional sanity checking on the page in this case. +** And if the fetch fails, this routine must decrement pCur->iPage. +** +** The page is fetched as read-write unless pCur is not NULL and is +** a read-only cursor. +** +** If an error occurs, then *ppPage is undefined. It +** may remain unchanged, or it may be set to an invalid value. +*/ +static int getAndInitPage( + BtShared *pBt, /* The database file */ + Pgno pgno, /* Number of the page to get */ + MemPage **ppPage, /* Write the page pointer here */ + BtCursor *pCur, /* Cursor to receive the page, or NULL */ + int bReadOnly /* True for a read-only page */ +){ + int rc; + DbPage *pDbPage; + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( pCur==0 || ppPage==&pCur->apPage[pCur->iPage] ); + assert( pCur==0 || bReadOnly==pCur->curPagerFlags ); + assert( pCur==0 || pCur->iPage>0 ); + + if( pgno>btreePagecount(pBt) ){ + rc = SQLITE_CORRUPT_BKPT; + goto getAndInitPage_error; + } + rc = sqlite3PagerGet(pBt->pPager, pgno, (DbPage**)&pDbPage, bReadOnly); + if( rc ){ + goto getAndInitPage_error; + } + *ppPage = (MemPage*)sqlite3PagerGetExtra(pDbPage); + if( (*ppPage)->isInit==0 ){ + btreePageFromDbPage(pDbPage, pgno, pBt); + rc = btreeInitPage(*ppPage); + if( rc!=SQLITE_OK ){ + releasePage(*ppPage); + goto getAndInitPage_error; + } + } + assert( (*ppPage)->pgno==pgno ); + assert( (*ppPage)->aData==sqlite3PagerGetData(pDbPage) ); + + /* If obtaining a child page for a cursor, we must verify that the page is + ** compatible with the root page. */ + if( pCur && ((*ppPage)->nCell<1 || (*ppPage)->intKey!=pCur->curIntKey) ){ + rc = SQLITE_CORRUPT_BKPT; + releasePage(*ppPage); + goto getAndInitPage_error; + } + return SQLITE_OK; + +getAndInitPage_error: + if( pCur ) pCur->iPage--; + testcase( pgno==0 ); + assert( pgno!=0 || rc==SQLITE_CORRUPT ); + return rc; +} + +/* +** Release a MemPage. This should be called once for each prior +** call to btreeGetPage. +*/ +static void releasePageNotNull(MemPage *pPage){ + assert( pPage->aData ); + assert( pPage->pBt ); + assert( pPage->pDbPage!=0 ); + assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); + assert( sqlite3PagerGetData(pPage->pDbPage)==pPage->aData ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + sqlite3PagerUnrefNotNull(pPage->pDbPage); +} +static void releasePage(MemPage *pPage){ + if( pPage ) releasePageNotNull(pPage); +} + +/* +** Get an unused page. +** +** This works just like btreeGetPage() with the addition: +** +** * If the page is already in use for some other purpose, immediately +** release it and return an SQLITE_CURRUPT error. +** * Make sure the isInit flag is clear +*/ +static int btreeGetUnusedPage( + BtShared *pBt, /* The btree */ + Pgno pgno, /* Number of the page to fetch */ + MemPage **ppPage, /* Return the page in this parameter */ + int flags /* PAGER_GET_NOCONTENT or PAGER_GET_READONLY */ +){ + int rc = btreeGetPage(pBt, pgno, ppPage, flags); + if( rc==SQLITE_OK ){ + if( sqlite3PagerPageRefcount((*ppPage)->pDbPage)>1 ){ + releasePage(*ppPage); + *ppPage = 0; + return SQLITE_CORRUPT_BKPT; + } + (*ppPage)->isInit = 0; + }else{ + *ppPage = 0; + } + return rc; +} + + +/* +** During a rollback, when the pager reloads information into the cache +** so that the cache is restored to its original state at the start of +** the transaction, for each page restored this routine is called. +** +** This routine needs to reset the extra data section at the end of the +** page to agree with the restored data. +*/ +static void pageReinit(DbPage *pData){ + MemPage *pPage; + pPage = (MemPage *)sqlite3PagerGetExtra(pData); + assert( sqlite3PagerPageRefcount(pData)>0 ); + if( pPage->isInit ){ + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + pPage->isInit = 0; + if( sqlite3PagerPageRefcount(pData)>1 ){ + /* pPage might not be a btree page; it might be an overflow page + ** or ptrmap page or a free page. In those cases, the following + ** call to btreeInitPage() will likely return SQLITE_CORRUPT. + ** But no harm is done by this. And it is very important that + ** btreeInitPage() be called on every btree page so we make + ** the call for every page that comes in for re-initing. */ + btreeInitPage(pPage); + } + } +} + +/* +** Invoke the busy handler for a btree. +*/ +static int btreeInvokeBusyHandler(void *pArg){ + BtShared *pBt = (BtShared*)pArg; + assert( pBt->db ); + assert( sqlite3_mutex_held(pBt->db->mutex) ); + return sqlite3InvokeBusyHandler(&pBt->db->busyHandler); +} + +/* +** Open a database file. +** +** zFilename is the name of the database file. If zFilename is NULL +** then an ephemeral database is created. The ephemeral database might +** be exclusively in memory, or it might use a disk-based memory cache. +** Either way, the ephemeral database will be automatically deleted +** when sqlite3BtreeClose() is called. +** +** If zFilename is ":memory:" then an in-memory database is created +** that is automatically destroyed when it is closed. +** +** The "flags" parameter is a bitmask that might contain bits like +** BTREE_OMIT_JOURNAL and/or BTREE_MEMORY. +** +** If the database is already opened in the same database connection +** and we are in shared cache mode, then the open will fail with an +** SQLITE_CONSTRAINT error. We cannot allow two or more BtShared +** objects in the same database connection since doing so will lead +** to problems with locking. +*/ +SQLITE_PRIVATE int sqlite3BtreeOpen( + sqlite3_vfs *pVfs, /* VFS to use for this b-tree */ + const char *zFilename, /* Name of the file containing the BTree database */ + sqlite3 *db, /* Associated database handle */ + Btree **ppBtree, /* Pointer to new Btree object written here */ + int flags, /* Options */ + int vfsFlags /* Flags passed through to sqlite3_vfs.xOpen() */ +){ + BtShared *pBt = 0; /* Shared part of btree structure */ + Btree *p; /* Handle to return */ + sqlite3_mutex *mutexOpen = 0; /* Prevents a race condition. Ticket #3537 */ + int rc = SQLITE_OK; /* Result code from this function */ + u8 nReserve; /* Byte of unused space on each page */ + unsigned char zDbHeader[100]; /* Database header content */ + + /* True if opening an ephemeral, temporary database */ + const int isTempDb = zFilename==0 || zFilename[0]==0; + + /* Set the variable isMemdb to true for an in-memory database, or + ** false for a file-based database. + */ +#ifdef SQLITE_OMIT_MEMORYDB + const int isMemdb = 0; +#else + const int isMemdb = (zFilename && strcmp(zFilename, ":memory:")==0) + || (isTempDb && sqlite3TempInMemory(db)) + || (vfsFlags & SQLITE_OPEN_MEMORY)!=0; +#endif + + assert( db!=0 ); + assert( pVfs!=0 ); + assert( sqlite3_mutex_held(db->mutex) ); + assert( (flags&0xff)==flags ); /* flags fit in 8 bits */ + + /* Only a BTREE_SINGLE database can be BTREE_UNORDERED */ + assert( (flags & BTREE_UNORDERED)==0 || (flags & BTREE_SINGLE)!=0 ); + + /* A BTREE_SINGLE database is always a temporary and/or ephemeral */ + assert( (flags & BTREE_SINGLE)==0 || isTempDb ); + + if( isMemdb ){ + flags |= BTREE_MEMORY; + } + if( (vfsFlags & SQLITE_OPEN_MAIN_DB)!=0 && (isMemdb || isTempDb) ){ + vfsFlags = (vfsFlags & ~SQLITE_OPEN_MAIN_DB) | SQLITE_OPEN_TEMP_DB; + } + p = sqlite3MallocZero(sizeof(Btree)); + if( !p ){ + return SQLITE_NOMEM_BKPT; + } + p->inTrans = TRANS_NONE; + p->db = db; +#ifndef SQLITE_OMIT_SHARED_CACHE + p->lock.pBtree = p; + p->lock.iTable = 1; +#endif + +#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) + /* + ** If this Btree is a candidate for shared cache, try to find an + ** existing BtShared object that we can share with + */ + if( isTempDb==0 && (isMemdb==0 || (vfsFlags&SQLITE_OPEN_URI)!=0) ){ + if( vfsFlags & SQLITE_OPEN_SHAREDCACHE ){ + int nFilename = sqlite3Strlen30(zFilename)+1; + int nFullPathname = pVfs->mxPathname+1; + char *zFullPathname = sqlite3Malloc(MAX(nFullPathname,nFilename)); + MUTEX_LOGIC( sqlite3_mutex *mutexShared; ) + + p->sharable = 1; + if( !zFullPathname ){ + sqlite3_free(p); + return SQLITE_NOMEM_BKPT; + } + if( isMemdb ){ + memcpy(zFullPathname, zFilename, nFilename); + }else{ + rc = sqlite3OsFullPathname(pVfs, zFilename, + nFullPathname, zFullPathname); + if( rc ){ + sqlite3_free(zFullPathname); + sqlite3_free(p); + return rc; + } + } +#if SQLITE_THREADSAFE + mutexOpen = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_OPEN); + sqlite3_mutex_enter(mutexOpen); + mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); + sqlite3_mutex_enter(mutexShared); +#endif + for(pBt=GLOBAL(BtShared*,sqlite3SharedCacheList); pBt; pBt=pBt->pNext){ + assert( pBt->nRef>0 ); + if( 0==strcmp(zFullPathname, sqlite3PagerFilename(pBt->pPager, 0)) + && sqlite3PagerVfs(pBt->pPager)==pVfs ){ + int iDb; + for(iDb=db->nDb-1; iDb>=0; iDb--){ + Btree *pExisting = db->aDb[iDb].pBt; + if( pExisting && pExisting->pBt==pBt ){ + sqlite3_mutex_leave(mutexShared); + sqlite3_mutex_leave(mutexOpen); + sqlite3_free(zFullPathname); + sqlite3_free(p); + return SQLITE_CONSTRAINT; + } + } + p->pBt = pBt; + pBt->nRef++; + break; + } + } + sqlite3_mutex_leave(mutexShared); + sqlite3_free(zFullPathname); + } +#ifdef SQLITE_DEBUG + else{ + /* In debug mode, we mark all persistent databases as sharable + ** even when they are not. This exercises the locking code and + ** gives more opportunity for asserts(sqlite3_mutex_held()) + ** statements to find locking problems. + */ + p->sharable = 1; + } +#endif + } +#endif + if( pBt==0 ){ + /* + ** The following asserts make sure that structures used by the btree are + ** the right size. This is to guard against size changes that result + ** when compiling on a different architecture. + */ + assert( sizeof(i64)==8 ); + assert( sizeof(u64)==8 ); + assert( sizeof(u32)==4 ); + assert( sizeof(u16)==2 ); + assert( sizeof(Pgno)==4 ); + + pBt = sqlite3MallocZero( sizeof(*pBt) ); + if( pBt==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto btree_open_out; + } + rc = sqlite3PagerOpen(pVfs, &pBt->pPager, zFilename, + sizeof(MemPage), flags, vfsFlags, pageReinit); + if( rc==SQLITE_OK ){ + sqlite3PagerSetMmapLimit(pBt->pPager, db->szMmap); + rc = sqlite3PagerReadFileheader(pBt->pPager,sizeof(zDbHeader),zDbHeader); + } + if( rc!=SQLITE_OK ){ + goto btree_open_out; + } + pBt->openFlags = (u8)flags; + pBt->db = db; + sqlite3PagerSetBusyhandler(pBt->pPager, btreeInvokeBusyHandler, pBt); + p->pBt = pBt; + + pBt->pCursor = 0; + pBt->pPage1 = 0; + if( sqlite3PagerIsreadonly(pBt->pPager) ) pBt->btsFlags |= BTS_READ_ONLY; +#ifdef SQLITE_SECURE_DELETE + pBt->btsFlags |= BTS_SECURE_DELETE; +#endif + /* EVIDENCE-OF: R-51873-39618 The page size for a database file is + ** determined by the 2-byte integer located at an offset of 16 bytes from + ** the beginning of the database file. */ + pBt->pageSize = (zDbHeader[16]<<8) | (zDbHeader[17]<<16); + if( pBt->pageSize<512 || pBt->pageSize>SQLITE_MAX_PAGE_SIZE + || ((pBt->pageSize-1)&pBt->pageSize)!=0 ){ + pBt->pageSize = 0; +#ifndef SQLITE_OMIT_AUTOVACUUM + /* If the magic name ":memory:" will create an in-memory database, then + ** leave the autoVacuum mode at 0 (do not auto-vacuum), even if + ** SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if + ** SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a + ** regular file-name. In this case the auto-vacuum applies as per normal. + */ + if( zFilename && !isMemdb ){ + pBt->autoVacuum = (SQLITE_DEFAULT_AUTOVACUUM ? 1 : 0); + pBt->incrVacuum = (SQLITE_DEFAULT_AUTOVACUUM==2 ? 1 : 0); + } +#endif + nReserve = 0; + }else{ + /* EVIDENCE-OF: R-37497-42412 The size of the reserved region is + ** determined by the one-byte unsigned integer found at an offset of 20 + ** into the database file header. */ + nReserve = zDbHeader[20]; + pBt->btsFlags |= BTS_PAGESIZE_FIXED; +#ifndef SQLITE_OMIT_AUTOVACUUM + pBt->autoVacuum = (get4byte(&zDbHeader[36 + 4*4])?1:0); + pBt->incrVacuum = (get4byte(&zDbHeader[36 + 7*4])?1:0); +#endif + } + rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve); + if( rc ) goto btree_open_out; + pBt->usableSize = pBt->pageSize - nReserve; + assert( (pBt->pageSize & 7)==0 ); /* 8-byte alignment of pageSize */ + +#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) + /* Add the new BtShared object to the linked list sharable BtShareds. + */ + pBt->nRef = 1; + if( p->sharable ){ + MUTEX_LOGIC( sqlite3_mutex *mutexShared; ) + MUTEX_LOGIC( mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);) + if( SQLITE_THREADSAFE && sqlite3GlobalConfig.bCoreMutex ){ + pBt->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_FAST); + if( pBt->mutex==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto btree_open_out; + } + } + sqlite3_mutex_enter(mutexShared); + pBt->pNext = GLOBAL(BtShared*,sqlite3SharedCacheList); + GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt; + sqlite3_mutex_leave(mutexShared); + } +#endif + } + +#if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) + /* If the new Btree uses a sharable pBtShared, then link the new + ** Btree into the list of all sharable Btrees for the same connection. + ** The list is kept in ascending order by pBt address. + */ + if( p->sharable ){ + int i; + Btree *pSib; + for(i=0; inDb; i++){ + if( (pSib = db->aDb[i].pBt)!=0 && pSib->sharable ){ + while( pSib->pPrev ){ pSib = pSib->pPrev; } + if( (uptr)p->pBt<(uptr)pSib->pBt ){ + p->pNext = pSib; + p->pPrev = 0; + pSib->pPrev = p; + }else{ + while( pSib->pNext && (uptr)pSib->pNext->pBt<(uptr)p->pBt ){ + pSib = pSib->pNext; + } + p->pNext = pSib->pNext; + p->pPrev = pSib; + if( p->pNext ){ + p->pNext->pPrev = p; + } + pSib->pNext = p; + } + break; + } + } + } +#endif + *ppBtree = p; + +btree_open_out: + if( rc!=SQLITE_OK ){ + if( pBt && pBt->pPager ){ + sqlite3PagerClose(pBt->pPager, 0); + } + sqlite3_free(pBt); + sqlite3_free(p); + *ppBtree = 0; + }else{ + sqlite3_file *pFile; + + /* If the B-Tree was successfully opened, set the pager-cache size to the + ** default value. Except, when opening on an existing shared pager-cache, + ** do not change the pager-cache size. + */ + if( sqlite3BtreeSchema(p, 0, 0)==0 ){ + sqlite3PagerSetCachesize(p->pBt->pPager, SQLITE_DEFAULT_CACHE_SIZE); + } + + pFile = sqlite3PagerFile(pBt->pPager); + if( pFile->pMethods ){ + sqlite3OsFileControlHint(pFile, SQLITE_FCNTL_PDB, (void*)&pBt->db); + } + } + if( mutexOpen ){ + assert( sqlite3_mutex_held(mutexOpen) ); + sqlite3_mutex_leave(mutexOpen); + } + assert( rc!=SQLITE_OK || sqlite3BtreeConnectionCount(*ppBtree)>0 ); + return rc; +} + +/* +** Decrement the BtShared.nRef counter. When it reaches zero, +** remove the BtShared structure from the sharing list. Return +** true if the BtShared.nRef counter reaches zero and return +** false if it is still positive. +*/ +static int removeFromSharingList(BtShared *pBt){ +#ifndef SQLITE_OMIT_SHARED_CACHE + MUTEX_LOGIC( sqlite3_mutex *pMaster; ) + BtShared *pList; + int removed = 0; + + assert( sqlite3_mutex_notheld(pBt->mutex) ); + MUTEX_LOGIC( pMaster = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); ) + sqlite3_mutex_enter(pMaster); + pBt->nRef--; + if( pBt->nRef<=0 ){ + if( GLOBAL(BtShared*,sqlite3SharedCacheList)==pBt ){ + GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt->pNext; + }else{ + pList = GLOBAL(BtShared*,sqlite3SharedCacheList); + while( ALWAYS(pList) && pList->pNext!=pBt ){ + pList=pList->pNext; + } + if( ALWAYS(pList) ){ + pList->pNext = pBt->pNext; + } + } + if( SQLITE_THREADSAFE ){ + sqlite3_mutex_free(pBt->mutex); + } + removed = 1; + } + sqlite3_mutex_leave(pMaster); + return removed; +#else + return 1; +#endif +} + +/* +** Make sure pBt->pTmpSpace points to an allocation of +** MX_CELL_SIZE(pBt) bytes with a 4-byte prefix for a left-child +** pointer. +*/ +static void allocateTempSpace(BtShared *pBt){ + if( !pBt->pTmpSpace ){ + pBt->pTmpSpace = sqlite3PageMalloc( pBt->pageSize ); + + /* One of the uses of pBt->pTmpSpace is to format cells before + ** inserting them into a leaf page (function fillInCell()). If + ** a cell is less than 4 bytes in size, it is rounded up to 4 bytes + ** by the various routines that manipulate binary cells. Which + ** can mean that fillInCell() only initializes the first 2 or 3 + ** bytes of pTmpSpace, but that the first 4 bytes are copied from + ** it into a database page. This is not actually a problem, but it + ** does cause a valgrind error when the 1 or 2 bytes of unitialized + ** data is passed to system call write(). So to avoid this error, + ** zero the first 4 bytes of temp space here. + ** + ** Also: Provide four bytes of initialized space before the + ** beginning of pTmpSpace as an area available to prepend the + ** left-child pointer to the beginning of a cell. + */ + if( pBt->pTmpSpace ){ + memset(pBt->pTmpSpace, 0, 8); + pBt->pTmpSpace += 4; + } + } +} + +/* +** Free the pBt->pTmpSpace allocation +*/ +static void freeTempSpace(BtShared *pBt){ + if( pBt->pTmpSpace ){ + pBt->pTmpSpace -= 4; + sqlite3PageFree(pBt->pTmpSpace); + pBt->pTmpSpace = 0; + } +} + +/* +** Close an open database and invalidate all cursors. +*/ +SQLITE_PRIVATE int sqlite3BtreeClose(Btree *p){ + BtShared *pBt = p->pBt; + BtCursor *pCur; + + /* Close all cursors opened via this handle. */ + assert( sqlite3_mutex_held(p->db->mutex) ); + sqlite3BtreeEnter(p); + pCur = pBt->pCursor; + while( pCur ){ + BtCursor *pTmp = pCur; + pCur = pCur->pNext; + if( pTmp->pBtree==p ){ + sqlite3BtreeCloseCursor(pTmp); + } + } + + /* Rollback any active transaction and free the handle structure. + ** The call to sqlite3BtreeRollback() drops any table-locks held by + ** this handle. + */ + sqlite3BtreeRollback(p, SQLITE_OK, 0); + sqlite3BtreeLeave(p); + + /* If there are still other outstanding references to the shared-btree + ** structure, return now. The remainder of this procedure cleans + ** up the shared-btree. + */ + assert( p->wantToLock==0 && p->locked==0 ); + if( !p->sharable || removeFromSharingList(pBt) ){ + /* The pBt is no longer on the sharing list, so we can access + ** it without having to hold the mutex. + ** + ** Clean out and delete the BtShared object. + */ + assert( !pBt->pCursor ); + sqlite3PagerClose(pBt->pPager, p->db); + if( pBt->xFreeSchema && pBt->pSchema ){ + pBt->xFreeSchema(pBt->pSchema); + } + sqlite3DbFree(0, pBt->pSchema); + freeTempSpace(pBt); + sqlite3_free(pBt); + } + +#ifndef SQLITE_OMIT_SHARED_CACHE + assert( p->wantToLock==0 ); + assert( p->locked==0 ); + if( p->pPrev ) p->pPrev->pNext = p->pNext; + if( p->pNext ) p->pNext->pPrev = p->pPrev; +#endif + + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Change the "soft" limit on the number of pages in the cache. +** Unused and unmodified pages will be recycled when the number of +** pages in the cache exceeds this soft limit. But the size of the +** cache is allowed to grow larger than this limit if it contains +** dirty pages or pages still in active use. +*/ +SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree *p, int mxPage){ + BtShared *pBt = p->pBt; + assert( sqlite3_mutex_held(p->db->mutex) ); + sqlite3BtreeEnter(p); + sqlite3PagerSetCachesize(pBt->pPager, mxPage); + sqlite3BtreeLeave(p); + return SQLITE_OK; +} + +/* +** Change the "spill" limit on the number of pages in the cache. +** If the number of pages exceeds this limit during a write transaction, +** the pager might attempt to "spill" pages to the journal early in +** order to free up memory. +** +** The value returned is the current spill size. If zero is passed +** as an argument, no changes are made to the spill size setting, so +** using mxPage of 0 is a way to query the current spill size. +*/ +SQLITE_PRIVATE int sqlite3BtreeSetSpillSize(Btree *p, int mxPage){ + BtShared *pBt = p->pBt; + int res; + assert( sqlite3_mutex_held(p->db->mutex) ); + sqlite3BtreeEnter(p); + res = sqlite3PagerSetSpillsize(pBt->pPager, mxPage); + sqlite3BtreeLeave(p); + return res; +} + +#if SQLITE_MAX_MMAP_SIZE>0 +/* +** Change the limit on the amount of the database file that may be +** memory mapped. +*/ +SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree *p, sqlite3_int64 szMmap){ + BtShared *pBt = p->pBt; + assert( sqlite3_mutex_held(p->db->mutex) ); + sqlite3BtreeEnter(p); + sqlite3PagerSetMmapLimit(pBt->pPager, szMmap); + sqlite3BtreeLeave(p); + return SQLITE_OK; +} +#endif /* SQLITE_MAX_MMAP_SIZE>0 */ + +/* +** Change the way data is synced to disk in order to increase or decrease +** how well the database resists damage due to OS crashes and power +** failures. Level 1 is the same as asynchronous (no syncs() occur and +** there is a high probability of damage) Level 2 is the default. There +** is a very low but non-zero probability of damage. Level 3 reduces the +** probability of damage to near zero but with a write performance reduction. +*/ +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +SQLITE_PRIVATE int sqlite3BtreeSetPagerFlags( + Btree *p, /* The btree to set the safety level on */ + unsigned pgFlags /* Various PAGER_* flags */ +){ + BtShared *pBt = p->pBt; + assert( sqlite3_mutex_held(p->db->mutex) ); + sqlite3BtreeEnter(p); + sqlite3PagerSetFlags(pBt->pPager, pgFlags); + sqlite3BtreeLeave(p); + return SQLITE_OK; +} +#endif + +/* +** Change the default pages size and the number of reserved bytes per page. +** Or, if the page size has already been fixed, return SQLITE_READONLY +** without changing anything. +** +** The page size must be a power of 2 between 512 and 65536. If the page +** size supplied does not meet this constraint then the page size is not +** changed. +** +** Page sizes are constrained to be a power of two so that the region +** of the database file used for locking (beginning at PENDING_BYTE, +** the first byte past the 1GB boundary, 0x40000000) needs to occur +** at the beginning of a page. +** +** If parameter nReserve is less than zero, then the number of reserved +** bytes per page is left unchanged. +** +** If the iFix!=0 then the BTS_PAGESIZE_FIXED flag is set so that the page size +** and autovacuum mode can no longer be changed. +*/ +SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, int iFix){ + int rc = SQLITE_OK; + BtShared *pBt = p->pBt; + assert( nReserve>=-1 && nReserve<=255 ); + sqlite3BtreeEnter(p); +#if SQLITE_HAS_CODEC + if( nReserve>pBt->optimalReserve ) pBt->optimalReserve = (u8)nReserve; +#endif + if( pBt->btsFlags & BTS_PAGESIZE_FIXED ){ + sqlite3BtreeLeave(p); + return SQLITE_READONLY; + } + if( nReserve<0 ){ + nReserve = pBt->pageSize - pBt->usableSize; + } + assert( nReserve>=0 && nReserve<=255 ); + if( pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE && + ((pageSize-1)&pageSize)==0 ){ + assert( (pageSize & 7)==0 ); + assert( !pBt->pCursor ); + pBt->pageSize = (u32)pageSize; + freeTempSpace(pBt); + } + rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve); + pBt->usableSize = pBt->pageSize - (u16)nReserve; + if( iFix ) pBt->btsFlags |= BTS_PAGESIZE_FIXED; + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Return the currently defined page size +*/ +SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree *p){ + return p->pBt->pageSize; +} + +/* +** This function is similar to sqlite3BtreeGetReserve(), except that it +** may only be called if it is guaranteed that the b-tree mutex is already +** held. +** +** This is useful in one special case in the backup API code where it is +** known that the shared b-tree mutex is held, but the mutex on the +** database handle that owns *p is not. In this case if sqlite3BtreeEnter() +** were to be called, it might collide with some other operation on the +** database handle that owns *p, causing undefined behavior. +*/ +SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p){ + int n; + assert( sqlite3_mutex_held(p->pBt->mutex) ); + n = p->pBt->pageSize - p->pBt->usableSize; + return n; +} + +/* +** Return the number of bytes of space at the end of every page that +** are intentually left unused. This is the "reserved" space that is +** sometimes used by extensions. +** +** If SQLITE_HAS_MUTEX is defined then the number returned is the +** greater of the current reserved space and the maximum requested +** reserve space. +*/ +SQLITE_PRIVATE int sqlite3BtreeGetOptimalReserve(Btree *p){ + int n; + sqlite3BtreeEnter(p); + n = sqlite3BtreeGetReserveNoMutex(p); +#ifdef SQLITE_HAS_CODEC + if( npBt->optimalReserve ) n = p->pBt->optimalReserve; +#endif + sqlite3BtreeLeave(p); + return n; +} + + +/* +** Set the maximum page count for a database if mxPage is positive. +** No changes are made if mxPage is 0 or negative. +** Regardless of the value of mxPage, return the maximum page count. +*/ +SQLITE_PRIVATE int sqlite3BtreeMaxPageCount(Btree *p, int mxPage){ + int n; + sqlite3BtreeEnter(p); + n = sqlite3PagerMaxPageCount(p->pBt->pPager, mxPage); + sqlite3BtreeLeave(p); + return n; +} + +/* +** Set the BTS_SECURE_DELETE flag if newFlag is 0 or 1. If newFlag is -1, +** then make no changes. Always return the value of the BTS_SECURE_DELETE +** setting after the change. +*/ +SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree *p, int newFlag){ + int b; + if( p==0 ) return 0; + sqlite3BtreeEnter(p); + if( newFlag>=0 ){ + p->pBt->btsFlags &= ~BTS_SECURE_DELETE; + if( newFlag ) p->pBt->btsFlags |= BTS_SECURE_DELETE; + } + b = (p->pBt->btsFlags & BTS_SECURE_DELETE)!=0; + sqlite3BtreeLeave(p); + return b; +} + +/* +** Change the 'auto-vacuum' property of the database. If the 'autoVacuum' +** parameter is non-zero, then auto-vacuum mode is enabled. If zero, it +** is disabled. The default value for the auto-vacuum property is +** determined by the SQLITE_DEFAULT_AUTOVACUUM macro. +*/ +SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *p, int autoVacuum){ +#ifdef SQLITE_OMIT_AUTOVACUUM + return SQLITE_READONLY; +#else + BtShared *pBt = p->pBt; + int rc = SQLITE_OK; + u8 av = (u8)autoVacuum; + + sqlite3BtreeEnter(p); + if( (pBt->btsFlags & BTS_PAGESIZE_FIXED)!=0 && (av ?1:0)!=pBt->autoVacuum ){ + rc = SQLITE_READONLY; + }else{ + pBt->autoVacuum = av ?1:0; + pBt->incrVacuum = av==2 ?1:0; + } + sqlite3BtreeLeave(p); + return rc; +#endif +} + +/* +** Return the value of the 'auto-vacuum' property. If auto-vacuum is +** enabled 1 is returned. Otherwise 0. +*/ +SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *p){ +#ifdef SQLITE_OMIT_AUTOVACUUM + return BTREE_AUTOVACUUM_NONE; +#else + int rc; + sqlite3BtreeEnter(p); + rc = ( + (!p->pBt->autoVacuum)?BTREE_AUTOVACUUM_NONE: + (!p->pBt->incrVacuum)?BTREE_AUTOVACUUM_FULL: + BTREE_AUTOVACUUM_INCR + ); + sqlite3BtreeLeave(p); + return rc; +#endif +} + + +/* +** Get a reference to pPage1 of the database file. This will +** also acquire a readlock on that file. +** +** SQLITE_OK is returned on success. If the file is not a +** well-formed database file, then SQLITE_CORRUPT is returned. +** SQLITE_BUSY is returned if the database is locked. SQLITE_NOMEM +** is returned if we run out of memory. +*/ +static int lockBtree(BtShared *pBt){ + int rc; /* Result code from subfunctions */ + MemPage *pPage1; /* Page 1 of the database file */ + int nPage; /* Number of pages in the database */ + int nPageFile = 0; /* Number of pages in the database file */ + int nPageHeader; /* Number of pages in the database according to hdr */ + + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( pBt->pPage1==0 ); + rc = sqlite3PagerSharedLock(pBt->pPager); + if( rc!=SQLITE_OK ) return rc; + rc = btreeGetPage(pBt, 1, &pPage1, 0); + if( rc!=SQLITE_OK ) return rc; + + /* Do some checking to help insure the file we opened really is + ** a valid database file. + */ + nPage = nPageHeader = get4byte(28+(u8*)pPage1->aData); + sqlite3PagerPagecount(pBt->pPager, &nPageFile); + if( nPage==0 || memcmp(24+(u8*)pPage1->aData, 92+(u8*)pPage1->aData,4)!=0 ){ + nPage = nPageFile; + } + if( nPage>0 ){ + u32 pageSize; + u32 usableSize; + u8 *page1 = pPage1->aData; + rc = SQLITE_NOTADB; + /* EVIDENCE-OF: R-43737-39999 Every valid SQLite database file begins + ** with the following 16 bytes (in hex): 53 51 4c 69 74 65 20 66 6f 72 6d + ** 61 74 20 33 00. */ + if( memcmp(page1, zMagicHeader, 16)!=0 ){ + goto page1_init_failed; + } + +#ifdef SQLITE_OMIT_WAL + if( page1[18]>1 ){ + pBt->btsFlags |= BTS_READ_ONLY; + } + if( page1[19]>1 ){ + goto page1_init_failed; + } +#else + if( page1[18]>2 ){ + pBt->btsFlags |= BTS_READ_ONLY; + } + if( page1[19]>2 ){ + goto page1_init_failed; + } + + /* If the write version is set to 2, this database should be accessed + ** in WAL mode. If the log is not already open, open it now. Then + ** return SQLITE_OK and return without populating BtShared.pPage1. + ** The caller detects this and calls this function again. This is + ** required as the version of page 1 currently in the page1 buffer + ** may not be the latest version - there may be a newer one in the log + ** file. + */ + if( page1[19]==2 && (pBt->btsFlags & BTS_NO_WAL)==0 ){ + int isOpen = 0; + rc = sqlite3PagerOpenWal(pBt->pPager, &isOpen); + if( rc!=SQLITE_OK ){ + goto page1_init_failed; + }else{ +#if SQLITE_DEFAULT_SYNCHRONOUS!=SQLITE_DEFAULT_WAL_SYNCHRONOUS + sqlite3 *db; + Db *pDb; + if( (db=pBt->db)!=0 && (pDb=db->aDb)!=0 ){ + while( pDb->pBt==0 || pDb->pBt->pBt!=pBt ){ pDb++; } + if( pDb->bSyncSet==0 + && pDb->safety_level==SQLITE_DEFAULT_SYNCHRONOUS+1 + ){ + pDb->safety_level = SQLITE_DEFAULT_WAL_SYNCHRONOUS+1; + sqlite3PagerSetFlags(pBt->pPager, + pDb->safety_level | (db->flags & PAGER_FLAGS_MASK)); + } + } +#endif + if( isOpen==0 ){ + releasePage(pPage1); + return SQLITE_OK; + } + } + rc = SQLITE_NOTADB; + } +#endif + + /* EVIDENCE-OF: R-15465-20813 The maximum and minimum embedded payload + ** fractions and the leaf payload fraction values must be 64, 32, and 32. + ** + ** The original design allowed these amounts to vary, but as of + ** version 3.6.0, we require them to be fixed. + */ + if( memcmp(&page1[21], "\100\040\040",3)!=0 ){ + goto page1_init_failed; + } + /* EVIDENCE-OF: R-51873-39618 The page size for a database file is + ** determined by the 2-byte integer located at an offset of 16 bytes from + ** the beginning of the database file. */ + pageSize = (page1[16]<<8) | (page1[17]<<16); + /* EVIDENCE-OF: R-25008-21688 The size of a page is a power of two + ** between 512 and 65536 inclusive. */ + if( ((pageSize-1)&pageSize)!=0 + || pageSize>SQLITE_MAX_PAGE_SIZE + || pageSize<=256 + ){ + goto page1_init_failed; + } + assert( (pageSize & 7)==0 ); + /* EVIDENCE-OF: R-59310-51205 The "reserved space" size in the 1-byte + ** integer at offset 20 is the number of bytes of space at the end of + ** each page to reserve for extensions. + ** + ** EVIDENCE-OF: R-37497-42412 The size of the reserved region is + ** determined by the one-byte unsigned integer found at an offset of 20 + ** into the database file header. */ + usableSize = pageSize - page1[20]; + if( (u32)pageSize!=pBt->pageSize ){ + /* After reading the first page of the database assuming a page size + ** of BtShared.pageSize, we have discovered that the page-size is + ** actually pageSize. Unlock the database, leave pBt->pPage1 at + ** zero and return SQLITE_OK. The caller will call this function + ** again with the correct page-size. + */ + releasePage(pPage1); + pBt->usableSize = usableSize; + pBt->pageSize = pageSize; + freeTempSpace(pBt); + rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, + pageSize-usableSize); + return rc; + } + if( (pBt->db->flags & SQLITE_RecoveryMode)==0 && nPage>nPageFile ){ + rc = SQLITE_CORRUPT_BKPT; + goto page1_init_failed; + } + /* EVIDENCE-OF: R-28312-64704 However, the usable size is not allowed to + ** be less than 480. In other words, if the page size is 512, then the + ** reserved space size cannot exceed 32. */ + if( usableSize<480 ){ + goto page1_init_failed; + } + pBt->pageSize = pageSize; + pBt->usableSize = usableSize; +#ifndef SQLITE_OMIT_AUTOVACUUM + pBt->autoVacuum = (get4byte(&page1[36 + 4*4])?1:0); + pBt->incrVacuum = (get4byte(&page1[36 + 7*4])?1:0); +#endif + } + + /* maxLocal is the maximum amount of payload to store locally for + ** a cell. Make sure it is small enough so that at least minFanout + ** cells can will fit on one page. We assume a 10-byte page header. + ** Besides the payload, the cell must store: + ** 2-byte pointer to the cell + ** 4-byte child pointer + ** 9-byte nKey value + ** 4-byte nData value + ** 4-byte overflow page pointer + ** So a cell consists of a 2-byte pointer, a header which is as much as + ** 17 bytes long, 0 to N bytes of payload, and an optional 4 byte overflow + ** page pointer. + */ + pBt->maxLocal = (u16)((pBt->usableSize-12)*64/255 - 23); + pBt->minLocal = (u16)((pBt->usableSize-12)*32/255 - 23); + pBt->maxLeaf = (u16)(pBt->usableSize - 35); + pBt->minLeaf = (u16)((pBt->usableSize-12)*32/255 - 23); + if( pBt->maxLocal>127 ){ + pBt->max1bytePayload = 127; + }else{ + pBt->max1bytePayload = (u8)pBt->maxLocal; + } + assert( pBt->maxLeaf + 23 <= MX_CELL_SIZE(pBt) ); + pBt->pPage1 = pPage1; + pBt->nPage = nPage; + return SQLITE_OK; + +page1_init_failed: + releasePage(pPage1); + pBt->pPage1 = 0; + return rc; +} + +#ifndef NDEBUG +/* +** Return the number of cursors open on pBt. This is for use +** in assert() expressions, so it is only compiled if NDEBUG is not +** defined. +** +** Only write cursors are counted if wrOnly is true. If wrOnly is +** false then all cursors are counted. +** +** For the purposes of this routine, a cursor is any cursor that +** is capable of reading or writing to the database. Cursors that +** have been tripped into the CURSOR_FAULT state are not counted. +*/ +static int countValidCursors(BtShared *pBt, int wrOnly){ + BtCursor *pCur; + int r = 0; + for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){ + if( (wrOnly==0 || (pCur->curFlags & BTCF_WriteFlag)!=0) + && pCur->eState!=CURSOR_FAULT ) r++; + } + return r; +} +#endif + +/* +** If there are no outstanding cursors and we are not in the middle +** of a transaction but there is a read lock on the database, then +** this routine unrefs the first page of the database file which +** has the effect of releasing the read lock. +** +** If there is a transaction in progress, this routine is a no-op. +*/ +static void unlockBtreeIfUnused(BtShared *pBt){ + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( countValidCursors(pBt,0)==0 || pBt->inTransaction>TRANS_NONE ); + if( pBt->inTransaction==TRANS_NONE && pBt->pPage1!=0 ){ + MemPage *pPage1 = pBt->pPage1; + assert( pPage1->aData ); + assert( sqlite3PagerRefcount(pBt->pPager)==1 ); + pBt->pPage1 = 0; + releasePageNotNull(pPage1); + } +} + +/* +** If pBt points to an empty file then convert that empty file +** into a new empty database by initializing the first page of +** the database. +*/ +static int newDatabase(BtShared *pBt){ + MemPage *pP1; + unsigned char *data; + int rc; + + assert( sqlite3_mutex_held(pBt->mutex) ); + if( pBt->nPage>0 ){ + return SQLITE_OK; + } + pP1 = pBt->pPage1; + assert( pP1!=0 ); + data = pP1->aData; + rc = sqlite3PagerWrite(pP1->pDbPage); + if( rc ) return rc; + memcpy(data, zMagicHeader, sizeof(zMagicHeader)); + assert( sizeof(zMagicHeader)==16 ); + data[16] = (u8)((pBt->pageSize>>8)&0xff); + data[17] = (u8)((pBt->pageSize>>16)&0xff); + data[18] = 1; + data[19] = 1; + assert( pBt->usableSize<=pBt->pageSize && pBt->usableSize+255>=pBt->pageSize); + data[20] = (u8)(pBt->pageSize - pBt->usableSize); + data[21] = 64; + data[22] = 32; + data[23] = 32; + memset(&data[24], 0, 100-24); + zeroPage(pP1, PTF_INTKEY|PTF_LEAF|PTF_LEAFDATA ); + pBt->btsFlags |= BTS_PAGESIZE_FIXED; +#ifndef SQLITE_OMIT_AUTOVACUUM + assert( pBt->autoVacuum==1 || pBt->autoVacuum==0 ); + assert( pBt->incrVacuum==1 || pBt->incrVacuum==0 ); + put4byte(&data[36 + 4*4], pBt->autoVacuum); + put4byte(&data[36 + 7*4], pBt->incrVacuum); +#endif + pBt->nPage = 1; + data[31] = 1; + return SQLITE_OK; +} + +/* +** Initialize the first page of the database file (creating a database +** consisting of a single page and no schema objects). Return SQLITE_OK +** if successful, or an SQLite error code otherwise. +*/ +SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p){ + int rc; + sqlite3BtreeEnter(p); + p->pBt->nPage = 0; + rc = newDatabase(p->pBt); + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Attempt to start a new transaction. A write-transaction +** is started if the second argument is nonzero, otherwise a read- +** transaction. If the second argument is 2 or more and exclusive +** transaction is started, meaning that no other process is allowed +** to access the database. A preexisting transaction may not be +** upgraded to exclusive by calling this routine a second time - the +** exclusivity flag only works for a new transaction. +** +** A write-transaction must be started before attempting any +** changes to the database. None of the following routines +** will work unless a transaction is started first: +** +** sqlite3BtreeCreateTable() +** sqlite3BtreeCreateIndex() +** sqlite3BtreeClearTable() +** sqlite3BtreeDropTable() +** sqlite3BtreeInsert() +** sqlite3BtreeDelete() +** sqlite3BtreeUpdateMeta() +** +** If an initial attempt to acquire the lock fails because of lock contention +** and the database was previously unlocked, then invoke the busy handler +** if there is one. But if there was previously a read-lock, do not +** invoke the busy handler - just return SQLITE_BUSY. SQLITE_BUSY is +** returned when there is already a read-lock in order to avoid a deadlock. +** +** Suppose there are two processes A and B. A has a read lock and B has +** a reserved lock. B tries to promote to exclusive but is blocked because +** of A's read lock. A tries to promote to reserved but is blocked by B. +** One or the other of the two processes must give way or there can be +** no progress. By returning SQLITE_BUSY and not invoking the busy callback +** when A already has a read lock, we encourage A to give up and let B +** proceed. +*/ +SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag){ + BtShared *pBt = p->pBt; + int rc = SQLITE_OK; + + sqlite3BtreeEnter(p); + btreeIntegrity(p); + + /* If the btree is already in a write-transaction, or it + ** is already in a read-transaction and a read-transaction + ** is requested, this is a no-op. + */ + if( p->inTrans==TRANS_WRITE || (p->inTrans==TRANS_READ && !wrflag) ){ + goto trans_begun; + } + assert( pBt->inTransaction==TRANS_WRITE || IfNotOmitAV(pBt->bDoTruncate)==0 ); + + /* Write transactions are not possible on a read-only database */ + if( (pBt->btsFlags & BTS_READ_ONLY)!=0 && wrflag ){ + rc = SQLITE_READONLY; + goto trans_begun; + } + +#ifndef SQLITE_OMIT_SHARED_CACHE + { + sqlite3 *pBlock = 0; + /* If another database handle has already opened a write transaction + ** on this shared-btree structure and a second write transaction is + ** requested, return SQLITE_LOCKED. + */ + if( (wrflag && pBt->inTransaction==TRANS_WRITE) + || (pBt->btsFlags & BTS_PENDING)!=0 + ){ + pBlock = pBt->pWriter->db; + }else if( wrflag>1 ){ + BtLock *pIter; + for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ + if( pIter->pBtree!=p ){ + pBlock = pIter->pBtree->db; + break; + } + } + } + if( pBlock ){ + sqlite3ConnectionBlocked(p->db, pBlock); + rc = SQLITE_LOCKED_SHAREDCACHE; + goto trans_begun; + } + } +#endif + + /* Any read-only or read-write transaction implies a read-lock on + ** page 1. So if some other shared-cache client already has a write-lock + ** on page 1, the transaction cannot be opened. */ + rc = querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK); + if( SQLITE_OK!=rc ) goto trans_begun; + + pBt->btsFlags &= ~BTS_INITIALLY_EMPTY; + if( pBt->nPage==0 ) pBt->btsFlags |= BTS_INITIALLY_EMPTY; + do { + /* Call lockBtree() until either pBt->pPage1 is populated or + ** lockBtree() returns something other than SQLITE_OK. lockBtree() + ** may return SQLITE_OK but leave pBt->pPage1 set to 0 if after + ** reading page 1 it discovers that the page-size of the database + ** file is not pBt->pageSize. In this case lockBtree() will update + ** pBt->pageSize to the page-size of the file on disk. + */ + while( pBt->pPage1==0 && SQLITE_OK==(rc = lockBtree(pBt)) ); + + if( rc==SQLITE_OK && wrflag ){ + if( (pBt->btsFlags & BTS_READ_ONLY)!=0 ){ + rc = SQLITE_READONLY; + }else{ + rc = sqlite3PagerBegin(pBt->pPager,wrflag>1,sqlite3TempInMemory(p->db)); + if( rc==SQLITE_OK ){ + rc = newDatabase(pBt); + } + } + } + + if( rc!=SQLITE_OK ){ + unlockBtreeIfUnused(pBt); + } + }while( (rc&0xFF)==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE && + btreeInvokeBusyHandler(pBt) ); + + if( rc==SQLITE_OK ){ + if( p->inTrans==TRANS_NONE ){ + pBt->nTransaction++; +#ifndef SQLITE_OMIT_SHARED_CACHE + if( p->sharable ){ + assert( p->lock.pBtree==p && p->lock.iTable==1 ); + p->lock.eLock = READ_LOCK; + p->lock.pNext = pBt->pLock; + pBt->pLock = &p->lock; + } +#endif + } + p->inTrans = (wrflag?TRANS_WRITE:TRANS_READ); + if( p->inTrans>pBt->inTransaction ){ + pBt->inTransaction = p->inTrans; + } + if( wrflag ){ + MemPage *pPage1 = pBt->pPage1; +#ifndef SQLITE_OMIT_SHARED_CACHE + assert( !pBt->pWriter ); + pBt->pWriter = p; + pBt->btsFlags &= ~BTS_EXCLUSIVE; + if( wrflag>1 ) pBt->btsFlags |= BTS_EXCLUSIVE; +#endif + + /* If the db-size header field is incorrect (as it may be if an old + ** client has been writing the database file), update it now. Doing + ** this sooner rather than later means the database size can safely + ** re-read the database size from page 1 if a savepoint or transaction + ** rollback occurs within the transaction. + */ + if( pBt->nPage!=get4byte(&pPage1->aData[28]) ){ + rc = sqlite3PagerWrite(pPage1->pDbPage); + if( rc==SQLITE_OK ){ + put4byte(&pPage1->aData[28], pBt->nPage); + } + } + } + } + + +trans_begun: + if( rc==SQLITE_OK && wrflag ){ + /* This call makes sure that the pager has the correct number of + ** open savepoints. If the second parameter is greater than 0 and + ** the sub-journal is not already open, then it will be opened here. + */ + rc = sqlite3PagerOpenSavepoint(pBt->pPager, p->db->nSavepoint); + } + + btreeIntegrity(p); + sqlite3BtreeLeave(p); + return rc; +} + +#ifndef SQLITE_OMIT_AUTOVACUUM + +/* +** Set the pointer-map entries for all children of page pPage. Also, if +** pPage contains cells that point to overflow pages, set the pointer +** map entries for the overflow pages as well. +*/ +static int setChildPtrmaps(MemPage *pPage){ + int i; /* Counter variable */ + int nCell; /* Number of cells in page pPage */ + int rc; /* Return code */ + BtShared *pBt = pPage->pBt; + Pgno pgno = pPage->pgno; + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + rc = btreeInitPage(pPage); + if( rc!=SQLITE_OK ) return rc; + nCell = pPage->nCell; + + for(i=0; ileaf ){ + Pgno childPgno = get4byte(pCell); + ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc); + } + } + + if( !pPage->leaf ){ + Pgno childPgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); + ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc); + } + + return rc; +} + +/* +** Somewhere on pPage is a pointer to page iFrom. Modify this pointer so +** that it points to iTo. Parameter eType describes the type of pointer to +** be modified, as follows: +** +** PTRMAP_BTREE: pPage is a btree-page. The pointer points at a child +** page of pPage. +** +** PTRMAP_OVERFLOW1: pPage is a btree-page. The pointer points at an overflow +** page pointed to by one of the cells on pPage. +** +** PTRMAP_OVERFLOW2: pPage is an overflow-page. The pointer points at the next +** overflow page in the list. +*/ +static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){ + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + if( eType==PTRMAP_OVERFLOW2 ){ + /* The pointer is always the first 4 bytes of the page in this case. */ + if( get4byte(pPage->aData)!=iFrom ){ + return SQLITE_CORRUPT_BKPT; + } + put4byte(pPage->aData, iTo); + }else{ + int i; + int nCell; + int rc; + + rc = btreeInitPage(pPage); + if( rc ) return rc; + nCell = pPage->nCell; + + for(i=0; ixParseCell(pPage, pCell, &info); + if( info.nLocal pPage->aData+pPage->pBt->usableSize ){ + return SQLITE_CORRUPT_BKPT; + } + if( iFrom==get4byte(pCell+info.nSize-4) ){ + put4byte(pCell+info.nSize-4, iTo); + break; + } + } + }else{ + if( get4byte(pCell)==iFrom ){ + put4byte(pCell, iTo); + break; + } + } + } + + if( i==nCell ){ + if( eType!=PTRMAP_BTREE || + get4byte(&pPage->aData[pPage->hdrOffset+8])!=iFrom ){ + return SQLITE_CORRUPT_BKPT; + } + put4byte(&pPage->aData[pPage->hdrOffset+8], iTo); + } + } + return SQLITE_OK; +} + + +/* +** Move the open database page pDbPage to location iFreePage in the +** database. The pDbPage reference remains valid. +** +** The isCommit flag indicates that there is no need to remember that +** the journal needs to be sync()ed before database page pDbPage->pgno +** can be written to. The caller has already promised not to write to that +** page. +*/ +static int relocatePage( + BtShared *pBt, /* Btree */ + MemPage *pDbPage, /* Open page to move */ + u8 eType, /* Pointer map 'type' entry for pDbPage */ + Pgno iPtrPage, /* Pointer map 'page-no' entry for pDbPage */ + Pgno iFreePage, /* The location to move pDbPage to */ + int isCommit /* isCommit flag passed to sqlite3PagerMovepage */ +){ + MemPage *pPtrPage; /* The page that contains a pointer to pDbPage */ + Pgno iDbPage = pDbPage->pgno; + Pager *pPager = pBt->pPager; + int rc; + + assert( eType==PTRMAP_OVERFLOW2 || eType==PTRMAP_OVERFLOW1 || + eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ); + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( pDbPage->pBt==pBt ); + + /* Move page iDbPage from its current location to page number iFreePage */ + TRACE(("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n", + iDbPage, iFreePage, iPtrPage, eType)); + rc = sqlite3PagerMovepage(pPager, pDbPage->pDbPage, iFreePage, isCommit); + if( rc!=SQLITE_OK ){ + return rc; + } + pDbPage->pgno = iFreePage; + + /* If pDbPage was a btree-page, then it may have child pages and/or cells + ** that point to overflow pages. The pointer map entries for all these + ** pages need to be changed. + ** + ** If pDbPage is an overflow page, then the first 4 bytes may store a + ** pointer to a subsequent overflow page. If this is the case, then + ** the pointer map needs to be updated for the subsequent overflow page. + */ + if( eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ){ + rc = setChildPtrmaps(pDbPage); + if( rc!=SQLITE_OK ){ + return rc; + } + }else{ + Pgno nextOvfl = get4byte(pDbPage->aData); + if( nextOvfl!=0 ){ + ptrmapPut(pBt, nextOvfl, PTRMAP_OVERFLOW2, iFreePage, &rc); + if( rc!=SQLITE_OK ){ + return rc; + } + } + } + + /* Fix the database pointer on page iPtrPage that pointed at iDbPage so + ** that it points at iFreePage. Also fix the pointer map entry for + ** iPtrPage. + */ + if( eType!=PTRMAP_ROOTPAGE ){ + rc = btreeGetPage(pBt, iPtrPage, &pPtrPage, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + rc = sqlite3PagerWrite(pPtrPage->pDbPage); + if( rc!=SQLITE_OK ){ + releasePage(pPtrPage); + return rc; + } + rc = modifyPagePointer(pPtrPage, iDbPage, iFreePage, eType); + releasePage(pPtrPage); + if( rc==SQLITE_OK ){ + ptrmapPut(pBt, iFreePage, eType, iPtrPage, &rc); + } + } + return rc; +} + +/* Forward declaration required by incrVacuumStep(). */ +static int allocateBtreePage(BtShared *, MemPage **, Pgno *, Pgno, u8); + +/* +** Perform a single step of an incremental-vacuum. If successful, return +** SQLITE_OK. If there is no work to do (and therefore no point in +** calling this function again), return SQLITE_DONE. Or, if an error +** occurs, return some other error code. +** +** More specifically, this function attempts to re-organize the database so +** that the last page of the file currently in use is no longer in use. +** +** Parameter nFin is the number of pages that this database would contain +** were this function called until it returns SQLITE_DONE. +** +** If the bCommit parameter is non-zero, this function assumes that the +** caller will keep calling incrVacuumStep() until it returns SQLITE_DONE +** or an error. bCommit is passed true for an auto-vacuum-on-commit +** operation, or false for an incremental vacuum. +*/ +static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg, int bCommit){ + Pgno nFreeList; /* Number of pages still on the free-list */ + int rc; + + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( iLastPg>nFin ); + + if( !PTRMAP_ISPAGE(pBt, iLastPg) && iLastPg!=PENDING_BYTE_PAGE(pBt) ){ + u8 eType; + Pgno iPtrPage; + + nFreeList = get4byte(&pBt->pPage1->aData[36]); + if( nFreeList==0 ){ + return SQLITE_DONE; + } + + rc = ptrmapGet(pBt, iLastPg, &eType, &iPtrPage); + if( rc!=SQLITE_OK ){ + return rc; + } + if( eType==PTRMAP_ROOTPAGE ){ + return SQLITE_CORRUPT_BKPT; + } + + if( eType==PTRMAP_FREEPAGE ){ + if( bCommit==0 ){ + /* Remove the page from the files free-list. This is not required + ** if bCommit is non-zero. In that case, the free-list will be + ** truncated to zero after this function returns, so it doesn't + ** matter if it still contains some garbage entries. + */ + Pgno iFreePg; + MemPage *pFreePg; + rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iLastPg, BTALLOC_EXACT); + if( rc!=SQLITE_OK ){ + return rc; + } + assert( iFreePg==iLastPg ); + releasePage(pFreePg); + } + } else { + Pgno iFreePg; /* Index of free page to move pLastPg to */ + MemPage *pLastPg; + u8 eMode = BTALLOC_ANY; /* Mode parameter for allocateBtreePage() */ + Pgno iNear = 0; /* nearby parameter for allocateBtreePage() */ + + rc = btreeGetPage(pBt, iLastPg, &pLastPg, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + /* If bCommit is zero, this loop runs exactly once and page pLastPg + ** is swapped with the first free page pulled off the free list. + ** + ** On the other hand, if bCommit is greater than zero, then keep + ** looping until a free-page located within the first nFin pages + ** of the file is found. + */ + if( bCommit==0 ){ + eMode = BTALLOC_LE; + iNear = nFin; + } + do { + MemPage *pFreePg; + rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iNear, eMode); + if( rc!=SQLITE_OK ){ + releasePage(pLastPg); + return rc; + } + releasePage(pFreePg); + }while( bCommit && iFreePg>nFin ); + assert( iFreePgbDoTruncate = 1; + pBt->nPage = iLastPg; + } + return SQLITE_OK; +} + +/* +** The database opened by the first argument is an auto-vacuum database +** nOrig pages in size containing nFree free pages. Return the expected +** size of the database in pages following an auto-vacuum operation. +*/ +static Pgno finalDbSize(BtShared *pBt, Pgno nOrig, Pgno nFree){ + int nEntry; /* Number of entries on one ptrmap page */ + Pgno nPtrmap; /* Number of PtrMap pages to be freed */ + Pgno nFin; /* Return value */ + + nEntry = pBt->usableSize/5; + nPtrmap = (nFree-nOrig+PTRMAP_PAGENO(pBt, nOrig)+nEntry)/nEntry; + nFin = nOrig - nFree - nPtrmap; + if( nOrig>PENDING_BYTE_PAGE(pBt) && nFinpBt; + + sqlite3BtreeEnter(p); + assert( pBt->inTransaction==TRANS_WRITE && p->inTrans==TRANS_WRITE ); + if( !pBt->autoVacuum ){ + rc = SQLITE_DONE; + }else{ + Pgno nOrig = btreePagecount(pBt); + Pgno nFree = get4byte(&pBt->pPage1->aData[36]); + Pgno nFin = finalDbSize(pBt, nOrig, nFree); + + if( nOrig0 ){ + rc = saveAllCursors(pBt, 0, 0); + if( rc==SQLITE_OK ){ + invalidateAllOverflowCache(pBt); + rc = incrVacuumStep(pBt, nFin, nOrig, 0); + } + if( rc==SQLITE_OK ){ + rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); + put4byte(&pBt->pPage1->aData[28], pBt->nPage); + } + }else{ + rc = SQLITE_DONE; + } + } + sqlite3BtreeLeave(p); + return rc; +} + +/* +** This routine is called prior to sqlite3PagerCommit when a transaction +** is committed for an auto-vacuum database. +** +** If SQLITE_OK is returned, then *pnTrunc is set to the number of pages +** the database file should be truncated to during the commit process. +** i.e. the database has been reorganized so that only the first *pnTrunc +** pages are in use. +*/ +static int autoVacuumCommit(BtShared *pBt){ + int rc = SQLITE_OK; + Pager *pPager = pBt->pPager; + VVA_ONLY( int nRef = sqlite3PagerRefcount(pPager); ) + + assert( sqlite3_mutex_held(pBt->mutex) ); + invalidateAllOverflowCache(pBt); + assert(pBt->autoVacuum); + if( !pBt->incrVacuum ){ + Pgno nFin; /* Number of pages in database after autovacuuming */ + Pgno nFree; /* Number of pages on the freelist initially */ + Pgno iFree; /* The next page to be freed */ + Pgno nOrig; /* Database size before freeing */ + + nOrig = btreePagecount(pBt); + if( PTRMAP_ISPAGE(pBt, nOrig) || nOrig==PENDING_BYTE_PAGE(pBt) ){ + /* It is not possible to create a database for which the final page + ** is either a pointer-map page or the pending-byte page. If one + ** is encountered, this indicates corruption. + */ + return SQLITE_CORRUPT_BKPT; + } + + nFree = get4byte(&pBt->pPage1->aData[36]); + nFin = finalDbSize(pBt, nOrig, nFree); + if( nFin>nOrig ) return SQLITE_CORRUPT_BKPT; + if( nFinnFin && rc==SQLITE_OK; iFree--){ + rc = incrVacuumStep(pBt, nFin, iFree, 1); + } + if( (rc==SQLITE_DONE || rc==SQLITE_OK) && nFree>0 ){ + rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); + put4byte(&pBt->pPage1->aData[32], 0); + put4byte(&pBt->pPage1->aData[36], 0); + put4byte(&pBt->pPage1->aData[28], nFin); + pBt->bDoTruncate = 1; + pBt->nPage = nFin; + } + if( rc!=SQLITE_OK ){ + sqlite3PagerRollback(pPager); + } + } + + assert( nRef>=sqlite3PagerRefcount(pPager) ); + return rc; +} + +#else /* ifndef SQLITE_OMIT_AUTOVACUUM */ +# define setChildPtrmaps(x) SQLITE_OK +#endif + +/* +** This routine does the first phase of a two-phase commit. This routine +** causes a rollback journal to be created (if it does not already exist) +** and populated with enough information so that if a power loss occurs +** the database can be restored to its original state by playing back +** the journal. Then the contents of the journal are flushed out to +** the disk. After the journal is safely on oxide, the changes to the +** database are written into the database file and flushed to oxide. +** At the end of this call, the rollback journal still exists on the +** disk and we are still holding all locks, so the transaction has not +** committed. See sqlite3BtreeCommitPhaseTwo() for the second phase of the +** commit process. +** +** This call is a no-op if no write-transaction is currently active on pBt. +** +** Otherwise, sync the database file for the btree pBt. zMaster points to +** the name of a master journal file that should be written into the +** individual journal file, or is NULL, indicating no master journal file +** (single database transaction). +** +** When this is called, the master journal should already have been +** created, populated with this journal pointer and synced to disk. +** +** Once this is routine has returned, the only thing required to commit +** the write-transaction for this database file is to delete the journal. +*/ +SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree *p, const char *zMaster){ + int rc = SQLITE_OK; + if( p->inTrans==TRANS_WRITE ){ + BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + rc = autoVacuumCommit(pBt); + if( rc!=SQLITE_OK ){ + sqlite3BtreeLeave(p); + return rc; + } + } + if( pBt->bDoTruncate ){ + sqlite3PagerTruncateImage(pBt->pPager, pBt->nPage); + } +#endif + rc = sqlite3PagerCommitPhaseOne(pBt->pPager, zMaster, 0); + sqlite3BtreeLeave(p); + } + return rc; +} + +/* +** This function is called from both BtreeCommitPhaseTwo() and BtreeRollback() +** at the conclusion of a transaction. +*/ +static void btreeEndTransaction(Btree *p){ + BtShared *pBt = p->pBt; + sqlite3 *db = p->db; + assert( sqlite3BtreeHoldsMutex(p) ); + +#ifndef SQLITE_OMIT_AUTOVACUUM + pBt->bDoTruncate = 0; +#endif + if( p->inTrans>TRANS_NONE && db->nVdbeRead>1 ){ + /* If there are other active statements that belong to this database + ** handle, downgrade to a read-only transaction. The other statements + ** may still be reading from the database. */ + downgradeAllSharedCacheTableLocks(p); + p->inTrans = TRANS_READ; + }else{ + /* If the handle had any kind of transaction open, decrement the + ** transaction count of the shared btree. If the transaction count + ** reaches 0, set the shared state to TRANS_NONE. The unlockBtreeIfUnused() + ** call below will unlock the pager. */ + if( p->inTrans!=TRANS_NONE ){ + clearAllSharedCacheTableLocks(p); + pBt->nTransaction--; + if( 0==pBt->nTransaction ){ + pBt->inTransaction = TRANS_NONE; + } + } + + /* Set the current transaction state to TRANS_NONE and unlock the + ** pager if this call closed the only read or write transaction. */ + p->inTrans = TRANS_NONE; + unlockBtreeIfUnused(pBt); + } + + btreeIntegrity(p); +} + +/* +** Commit the transaction currently in progress. +** +** This routine implements the second phase of a 2-phase commit. The +** sqlite3BtreeCommitPhaseOne() routine does the first phase and should +** be invoked prior to calling this routine. The sqlite3BtreeCommitPhaseOne() +** routine did all the work of writing information out to disk and flushing the +** contents so that they are written onto the disk platter. All this +** routine has to do is delete or truncate or zero the header in the +** the rollback journal (which causes the transaction to commit) and +** drop locks. +** +** Normally, if an error occurs while the pager layer is attempting to +** finalize the underlying journal file, this function returns an error and +** the upper layer will attempt a rollback. However, if the second argument +** is non-zero then this b-tree transaction is part of a multi-file +** transaction. In this case, the transaction has already been committed +** (by deleting a master journal file) and the caller will ignore this +** functions return code. So, even if an error occurs in the pager layer, +** reset the b-tree objects internal state to indicate that the write +** transaction has been closed. This is quite safe, as the pager will have +** transitioned to the error state. +** +** This will release the write lock on the database file. If there +** are no active cursors, it also releases the read lock. +*/ +SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup){ + + if( p->inTrans==TRANS_NONE ) return SQLITE_OK; + sqlite3BtreeEnter(p); + btreeIntegrity(p); + + /* If the handle has a write-transaction open, commit the shared-btrees + ** transaction and set the shared state to TRANS_READ. + */ + if( p->inTrans==TRANS_WRITE ){ + int rc; + BtShared *pBt = p->pBt; + assert( pBt->inTransaction==TRANS_WRITE ); + assert( pBt->nTransaction>0 ); + rc = sqlite3PagerCommitPhaseTwo(pBt->pPager); + if( rc!=SQLITE_OK && bCleanup==0 ){ + sqlite3BtreeLeave(p); + return rc; + } + p->iDataVersion--; /* Compensate for pPager->iDataVersion++; */ + pBt->inTransaction = TRANS_READ; + btreeClearHasContent(pBt); + } + + btreeEndTransaction(p); + sqlite3BtreeLeave(p); + return SQLITE_OK; +} + +/* +** Do both phases of a commit. +*/ +SQLITE_PRIVATE int sqlite3BtreeCommit(Btree *p){ + int rc; + sqlite3BtreeEnter(p); + rc = sqlite3BtreeCommitPhaseOne(p, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3BtreeCommitPhaseTwo(p, 0); + } + sqlite3BtreeLeave(p); + return rc; +} + +/* +** This routine sets the state to CURSOR_FAULT and the error +** code to errCode for every cursor on any BtShared that pBtree +** references. Or if the writeOnly flag is set to 1, then only +** trip write cursors and leave read cursors unchanged. +** +** Every cursor is a candidate to be tripped, including cursors +** that belong to other database connections that happen to be +** sharing the cache with pBtree. +** +** This routine gets called when a rollback occurs. If the writeOnly +** flag is true, then only write-cursors need be tripped - read-only +** cursors save their current positions so that they may continue +** following the rollback. Or, if writeOnly is false, all cursors are +** tripped. In general, writeOnly is false if the transaction being +** rolled back modified the database schema. In this case b-tree root +** pages may be moved or deleted from the database altogether, making +** it unsafe for read cursors to continue. +** +** If the writeOnly flag is true and an error is encountered while +** saving the current position of a read-only cursor, all cursors, +** including all read-cursors are tripped. +** +** SQLITE_OK is returned if successful, or if an error occurs while +** saving a cursor position, an SQLite error code. +*/ +SQLITE_PRIVATE int sqlite3BtreeTripAllCursors(Btree *pBtree, int errCode, int writeOnly){ + BtCursor *p; + int rc = SQLITE_OK; + + assert( (writeOnly==0 || writeOnly==1) && BTCF_WriteFlag==1 ); + if( pBtree ){ + sqlite3BtreeEnter(pBtree); + for(p=pBtree->pBt->pCursor; p; p=p->pNext){ + int i; + if( writeOnly && (p->curFlags & BTCF_WriteFlag)==0 ){ + if( p->eState==CURSOR_VALID || p->eState==CURSOR_SKIPNEXT ){ + rc = saveCursorPosition(p); + if( rc!=SQLITE_OK ){ + (void)sqlite3BtreeTripAllCursors(pBtree, rc, 0); + break; + } + } + }else{ + sqlite3BtreeClearCursor(p); + p->eState = CURSOR_FAULT; + p->skipNext = errCode; + } + for(i=0; i<=p->iPage; i++){ + releasePage(p->apPage[i]); + p->apPage[i] = 0; + } + } + sqlite3BtreeLeave(pBtree); + } + return rc; +} + +/* +** Rollback the transaction in progress. +** +** If tripCode is not SQLITE_OK then cursors will be invalidated (tripped). +** Only write cursors are tripped if writeOnly is true but all cursors are +** tripped if writeOnly is false. Any attempt to use +** a tripped cursor will result in an error. +** +** This will release the write lock on the database file. If there +** are no active cursors, it also releases the read lock. +*/ +SQLITE_PRIVATE int sqlite3BtreeRollback(Btree *p, int tripCode, int writeOnly){ + int rc; + BtShared *pBt = p->pBt; + MemPage *pPage1; + + assert( writeOnly==1 || writeOnly==0 ); + assert( tripCode==SQLITE_ABORT_ROLLBACK || tripCode==SQLITE_OK ); + sqlite3BtreeEnter(p); + if( tripCode==SQLITE_OK ){ + rc = tripCode = saveAllCursors(pBt, 0, 0); + if( rc ) writeOnly = 0; + }else{ + rc = SQLITE_OK; + } + if( tripCode ){ + int rc2 = sqlite3BtreeTripAllCursors(p, tripCode, writeOnly); + assert( rc==SQLITE_OK || (writeOnly==0 && rc2==SQLITE_OK) ); + if( rc2!=SQLITE_OK ) rc = rc2; + } + btreeIntegrity(p); + + if( p->inTrans==TRANS_WRITE ){ + int rc2; + + assert( TRANS_WRITE==pBt->inTransaction ); + rc2 = sqlite3PagerRollback(pBt->pPager); + if( rc2!=SQLITE_OK ){ + rc = rc2; + } + + /* The rollback may have destroyed the pPage1->aData value. So + ** call btreeGetPage() on page 1 again to make + ** sure pPage1->aData is set correctly. */ + if( btreeGetPage(pBt, 1, &pPage1, 0)==SQLITE_OK ){ + int nPage = get4byte(28+(u8*)pPage1->aData); + testcase( nPage==0 ); + if( nPage==0 ) sqlite3PagerPagecount(pBt->pPager, &nPage); + testcase( pBt->nPage!=nPage ); + pBt->nPage = nPage; + releasePage(pPage1); + } + assert( countValidCursors(pBt, 1)==0 ); + pBt->inTransaction = TRANS_READ; + btreeClearHasContent(pBt); + } + + btreeEndTransaction(p); + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Start a statement subtransaction. The subtransaction can be rolled +** back independently of the main transaction. You must start a transaction +** before starting a subtransaction. The subtransaction is ended automatically +** if the main transaction commits or rolls back. +** +** Statement subtransactions are used around individual SQL statements +** that are contained within a BEGIN...COMMIT block. If a constraint +** error occurs within the statement, the effect of that one statement +** can be rolled back without having to rollback the entire transaction. +** +** A statement sub-transaction is implemented as an anonymous savepoint. The +** value passed as the second parameter is the total number of savepoints, +** including the new anonymous savepoint, open on the B-Tree. i.e. if there +** are no active savepoints and no other statement-transactions open, +** iStatement is 1. This anonymous savepoint can be released or rolled back +** using the sqlite3BtreeSavepoint() function. +*/ +SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree *p, int iStatement){ + int rc; + BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); + assert( p->inTrans==TRANS_WRITE ); + assert( (pBt->btsFlags & BTS_READ_ONLY)==0 ); + assert( iStatement>0 ); + assert( iStatement>p->db->nSavepoint ); + assert( pBt->inTransaction==TRANS_WRITE ); + /* At the pager level, a statement transaction is a savepoint with + ** an index greater than all savepoints created explicitly using + ** SQL statements. It is illegal to open, release or rollback any + ** such savepoints while the statement transaction savepoint is active. + */ + rc = sqlite3PagerOpenSavepoint(pBt->pPager, iStatement); + sqlite3BtreeLeave(p); + return rc; +} + +/* +** The second argument to this function, op, is always SAVEPOINT_ROLLBACK +** or SAVEPOINT_RELEASE. This function either releases or rolls back the +** savepoint identified by parameter iSavepoint, depending on the value +** of op. +** +** Normally, iSavepoint is greater than or equal to zero. However, if op is +** SAVEPOINT_ROLLBACK, then iSavepoint may also be -1. In this case the +** contents of the entire transaction are rolled back. This is different +** from a normal transaction rollback, as no locks are released and the +** transaction remains open. +*/ +SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *p, int op, int iSavepoint){ + int rc = SQLITE_OK; + if( p && p->inTrans==TRANS_WRITE ){ + BtShared *pBt = p->pBt; + assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK ); + assert( iSavepoint>=0 || (iSavepoint==-1 && op==SAVEPOINT_ROLLBACK) ); + sqlite3BtreeEnter(p); + if( op==SAVEPOINT_ROLLBACK ){ + rc = saveAllCursors(pBt, 0, 0); + } + if( rc==SQLITE_OK ){ + rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint); + } + if( rc==SQLITE_OK ){ + if( iSavepoint<0 && (pBt->btsFlags & BTS_INITIALLY_EMPTY)!=0 ){ + pBt->nPage = 0; + } + rc = newDatabase(pBt); + pBt->nPage = get4byte(28 + pBt->pPage1->aData); + + /* The database size was written into the offset 28 of the header + ** when the transaction started, so we know that the value at offset + ** 28 is nonzero. */ + assert( pBt->nPage>0 ); + } + sqlite3BtreeLeave(p); + } + return rc; +} + +/* +** Create a new cursor for the BTree whose root is on the page +** iTable. If a read-only cursor is requested, it is assumed that +** the caller already has at least a read-only transaction open +** on the database already. If a write-cursor is requested, then +** the caller is assumed to have an open write transaction. +** +** If the BTREE_WRCSR bit of wrFlag is clear, then the cursor can only +** be used for reading. If the BTREE_WRCSR bit is set, then the cursor +** can be used for reading or for writing if other conditions for writing +** are also met. These are the conditions that must be met in order +** for writing to be allowed: +** +** 1: The cursor must have been opened with wrFlag containing BTREE_WRCSR +** +** 2: Other database connections that share the same pager cache +** but which are not in the READ_UNCOMMITTED state may not have +** cursors open with wrFlag==0 on the same table. Otherwise +** the changes made by this write cursor would be visible to +** the read cursors in the other database connection. +** +** 3: The database must be writable (not on read-only media) +** +** 4: There must be an active transaction. +** +** The BTREE_FORDELETE bit of wrFlag may optionally be set if BTREE_WRCSR +** is set. If FORDELETE is set, that is a hint to the implementation that +** this cursor will only be used to seek to and delete entries of an index +** as part of a larger DELETE statement. The FORDELETE hint is not used by +** this implementation. But in a hypothetical alternative storage engine +** in which index entries are automatically deleted when corresponding table +** rows are deleted, the FORDELETE flag is a hint that all SEEK and DELETE +** operations on this cursor can be no-ops and all READ operations can +** return a null row (2-bytes: 0x01 0x00). +** +** No checking is done to make sure that page iTable really is the +** root page of a b-tree. If it is not, then the cursor acquired +** will not work correctly. +** +** It is assumed that the sqlite3BtreeCursorZero() has been called +** on pCur to initialize the memory space prior to invoking this routine. +*/ +static int btreeCursor( + Btree *p, /* The btree */ + int iTable, /* Root page of table to open */ + int wrFlag, /* 1 to write. 0 read-only */ + struct KeyInfo *pKeyInfo, /* First arg to comparison function */ + BtCursor *pCur /* Space for new cursor */ +){ + BtShared *pBt = p->pBt; /* Shared b-tree handle */ + BtCursor *pX; /* Looping over other all cursors */ + + assert( sqlite3BtreeHoldsMutex(p) ); + assert( wrFlag==0 + || wrFlag==BTREE_WRCSR + || wrFlag==(BTREE_WRCSR|BTREE_FORDELETE) + ); + + /* The following assert statements verify that if this is a sharable + ** b-tree database, the connection is holding the required table locks, + ** and that no other connection has any open cursor that conflicts with + ** this lock. */ + assert( hasSharedCacheTableLock(p, iTable, pKeyInfo!=0, (wrFlag?2:1)) ); + assert( wrFlag==0 || !hasReadConflicts(p, iTable) ); + + /* Assert that the caller has opened the required transaction. */ + assert( p->inTrans>TRANS_NONE ); + assert( wrFlag==0 || p->inTrans==TRANS_WRITE ); + assert( pBt->pPage1 && pBt->pPage1->aData ); + assert( wrFlag==0 || (pBt->btsFlags & BTS_READ_ONLY)==0 ); + + if( wrFlag ){ + allocateTempSpace(pBt); + if( pBt->pTmpSpace==0 ) return SQLITE_NOMEM_BKPT; + } + if( iTable==1 && btreePagecount(pBt)==0 ){ + assert( wrFlag==0 ); + iTable = 0; + } + + /* Now that no other errors can occur, finish filling in the BtCursor + ** variables and link the cursor into the BtShared list. */ + pCur->pgnoRoot = (Pgno)iTable; + pCur->iPage = -1; + pCur->pKeyInfo = pKeyInfo; + pCur->pBtree = p; + pCur->pBt = pBt; + pCur->curFlags = wrFlag ? BTCF_WriteFlag : 0; + pCur->curPagerFlags = wrFlag ? 0 : PAGER_GET_READONLY; + /* If there are two or more cursors on the same btree, then all such + ** cursors *must* have the BTCF_Multiple flag set. */ + for(pX=pBt->pCursor; pX; pX=pX->pNext){ + if( pX->pgnoRoot==(Pgno)iTable ){ + pX->curFlags |= BTCF_Multiple; + pCur->curFlags |= BTCF_Multiple; + } + } + pCur->pNext = pBt->pCursor; + pBt->pCursor = pCur; + pCur->eState = CURSOR_INVALID; + return SQLITE_OK; +} +SQLITE_PRIVATE int sqlite3BtreeCursor( + Btree *p, /* The btree */ + int iTable, /* Root page of table to open */ + int wrFlag, /* 1 to write. 0 read-only */ + struct KeyInfo *pKeyInfo, /* First arg to xCompare() */ + BtCursor *pCur /* Write new cursor here */ +){ + int rc; + if( iTable<1 ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + sqlite3BtreeEnter(p); + rc = btreeCursor(p, iTable, wrFlag, pKeyInfo, pCur); + sqlite3BtreeLeave(p); + } + return rc; +} + +/* +** Return the size of a BtCursor object in bytes. +** +** This interfaces is needed so that users of cursors can preallocate +** sufficient storage to hold a cursor. The BtCursor object is opaque +** to users so they cannot do the sizeof() themselves - they must call +** this routine. +*/ +SQLITE_PRIVATE int sqlite3BtreeCursorSize(void){ + return ROUND8(sizeof(BtCursor)); +} + +/* +** Initialize memory that will be converted into a BtCursor object. +** +** The simple approach here would be to memset() the entire object +** to zero. But it turns out that the apPage[] and aiIdx[] arrays +** do not need to be zeroed and they are large, so we can save a lot +** of run-time by skipping the initialization of those elements. +*/ +SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor *p){ + memset(p, 0, offsetof(BtCursor, iPage)); +} + +/* +** Close a cursor. The read lock on the database file is released +** when the last cursor is closed. +*/ +SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor *pCur){ + Btree *pBtree = pCur->pBtree; + if( pBtree ){ + int i; + BtShared *pBt = pCur->pBt; + sqlite3BtreeEnter(pBtree); + sqlite3BtreeClearCursor(pCur); + assert( pBt->pCursor!=0 ); + if( pBt->pCursor==pCur ){ + pBt->pCursor = pCur->pNext; + }else{ + BtCursor *pPrev = pBt->pCursor; + do{ + if( pPrev->pNext==pCur ){ + pPrev->pNext = pCur->pNext; + break; + } + pPrev = pPrev->pNext; + }while( ALWAYS(pPrev) ); + } + for(i=0; i<=pCur->iPage; i++){ + releasePage(pCur->apPage[i]); + } + unlockBtreeIfUnused(pBt); + sqlite3_free(pCur->aOverflow); + /* sqlite3_free(pCur); */ + sqlite3BtreeLeave(pBtree); + } + return SQLITE_OK; +} + +/* +** Make sure the BtCursor* given in the argument has a valid +** BtCursor.info structure. If it is not already valid, call +** btreeParseCell() to fill it in. +** +** BtCursor.info is a cache of the information in the current cell. +** Using this cache reduces the number of calls to btreeParseCell(). +*/ +#ifndef NDEBUG + static void assertCellInfo(BtCursor *pCur){ + CellInfo info; + int iPage = pCur->iPage; + memset(&info, 0, sizeof(info)); + btreeParseCell(pCur->apPage[iPage], pCur->aiIdx[iPage], &info); + assert( CORRUPT_DB || memcmp(&info, &pCur->info, sizeof(info))==0 ); + } +#else + #define assertCellInfo(x) +#endif +static SQLITE_NOINLINE void getCellInfo(BtCursor *pCur){ + if( pCur->info.nSize==0 ){ + int iPage = pCur->iPage; + pCur->curFlags |= BTCF_ValidNKey; + btreeParseCell(pCur->apPage[iPage],pCur->aiIdx[iPage],&pCur->info); + }else{ + assertCellInfo(pCur); + } +} + +#ifndef NDEBUG /* The next routine used only within assert() statements */ +/* +** Return true if the given BtCursor is valid. A valid cursor is one +** that is currently pointing to a row in a (non-empty) table. +** This is a verification routine is used only within assert() statements. +*/ +SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor *pCur){ + return pCur && pCur->eState==CURSOR_VALID; +} +#endif /* NDEBUG */ +SQLITE_PRIVATE int sqlite3BtreeCursorIsValidNN(BtCursor *pCur){ + assert( pCur!=0 ); + return pCur->eState==CURSOR_VALID; +} + +/* +** Return the value of the integer key or "rowid" for a table btree. +** This routine is only valid for a cursor that is pointing into a +** ordinary table btree. If the cursor points to an index btree or +** is invalid, the result of this routine is undefined. +*/ +SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor *pCur){ + assert( cursorHoldsMutex(pCur) ); + assert( pCur->eState==CURSOR_VALID ); + assert( pCur->curIntKey ); + getCellInfo(pCur); + return pCur->info.nKey; +} + +/* +** Return the number of bytes of payload for the entry that pCur is +** currently pointing to. For table btrees, this will be the amount +** of data. For index btrees, this will be the size of the key. +** +** The caller must guarantee that the cursor is pointing to a non-NULL +** valid entry. In other words, the calling procedure must guarantee +** that the cursor has Cursor.eState==CURSOR_VALID. +*/ +SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor *pCur){ + assert( cursorHoldsMutex(pCur) ); + assert( pCur->eState==CURSOR_VALID ); + getCellInfo(pCur); + return pCur->info.nPayload; +} + +/* +** Given the page number of an overflow page in the database (parameter +** ovfl), this function finds the page number of the next page in the +** linked list of overflow pages. If possible, it uses the auto-vacuum +** pointer-map data instead of reading the content of page ovfl to do so. +** +** If an error occurs an SQLite error code is returned. Otherwise: +** +** The page number of the next overflow page in the linked list is +** written to *pPgnoNext. If page ovfl is the last page in its linked +** list, *pPgnoNext is set to zero. +** +** If ppPage is not NULL, and a reference to the MemPage object corresponding +** to page number pOvfl was obtained, then *ppPage is set to point to that +** reference. It is the responsibility of the caller to call releasePage() +** on *ppPage to free the reference. In no reference was obtained (because +** the pointer-map was used to obtain the value for *pPgnoNext), then +** *ppPage is set to zero. +*/ +static int getOverflowPage( + BtShared *pBt, /* The database file */ + Pgno ovfl, /* Current overflow page number */ + MemPage **ppPage, /* OUT: MemPage handle (may be NULL) */ + Pgno *pPgnoNext /* OUT: Next overflow page number */ +){ + Pgno next = 0; + MemPage *pPage = 0; + int rc = SQLITE_OK; + + assert( sqlite3_mutex_held(pBt->mutex) ); + assert(pPgnoNext); + +#ifndef SQLITE_OMIT_AUTOVACUUM + /* Try to find the next page in the overflow list using the + ** autovacuum pointer-map pages. Guess that the next page in + ** the overflow list is page number (ovfl+1). If that guess turns + ** out to be wrong, fall back to loading the data of page + ** number ovfl to determine the next page number. + */ + if( pBt->autoVacuum ){ + Pgno pgno; + Pgno iGuess = ovfl+1; + u8 eType; + + while( PTRMAP_ISPAGE(pBt, iGuess) || iGuess==PENDING_BYTE_PAGE(pBt) ){ + iGuess++; + } + + if( iGuess<=btreePagecount(pBt) ){ + rc = ptrmapGet(pBt, iGuess, &eType, &pgno); + if( rc==SQLITE_OK && eType==PTRMAP_OVERFLOW2 && pgno==ovfl ){ + next = iGuess; + rc = SQLITE_DONE; + } + } + } +#endif + + assert( next==0 || rc==SQLITE_DONE ); + if( rc==SQLITE_OK ){ + rc = btreeGetPage(pBt, ovfl, &pPage, (ppPage==0) ? PAGER_GET_READONLY : 0); + assert( rc==SQLITE_OK || pPage==0 ); + if( rc==SQLITE_OK ){ + next = get4byte(pPage->aData); + } + } + + *pPgnoNext = next; + if( ppPage ){ + *ppPage = pPage; + }else{ + releasePage(pPage); + } + return (rc==SQLITE_DONE ? SQLITE_OK : rc); +} + +/* +** Copy data from a buffer to a page, or from a page to a buffer. +** +** pPayload is a pointer to data stored on database page pDbPage. +** If argument eOp is false, then nByte bytes of data are copied +** from pPayload to the buffer pointed at by pBuf. If eOp is true, +** then sqlite3PagerWrite() is called on pDbPage and nByte bytes +** of data are copied from the buffer pBuf to pPayload. +** +** SQLITE_OK is returned on success, otherwise an error code. +*/ +static int copyPayload( + void *pPayload, /* Pointer to page data */ + void *pBuf, /* Pointer to buffer */ + int nByte, /* Number of bytes to copy */ + int eOp, /* 0 -> copy from page, 1 -> copy to page */ + DbPage *pDbPage /* Page containing pPayload */ +){ + if( eOp ){ + /* Copy data from buffer to page (a write operation) */ + int rc = sqlite3PagerWrite(pDbPage); + if( rc!=SQLITE_OK ){ + return rc; + } + memcpy(pPayload, pBuf, nByte); + }else{ + /* Copy data from page to buffer (a read operation) */ + memcpy(pBuf, pPayload, nByte); + } + return SQLITE_OK; +} + +/* +** This function is used to read or overwrite payload information +** for the entry that the pCur cursor is pointing to. The eOp +** argument is interpreted as follows: +** +** 0: The operation is a read. Populate the overflow cache. +** 1: The operation is a write. Populate the overflow cache. +** +** A total of "amt" bytes are read or written beginning at "offset". +** Data is read to or from the buffer pBuf. +** +** The content being read or written might appear on the main page +** or be scattered out on multiple overflow pages. +** +** If the current cursor entry uses one or more overflow pages +** this function may allocate space for and lazily populate +** the overflow page-list cache array (BtCursor.aOverflow). +** Subsequent calls use this cache to make seeking to the supplied offset +** more efficient. +** +** Once an overflow page-list cache has been allocated, it must be +** invalidated if some other cursor writes to the same table, or if +** the cursor is moved to a different row. Additionally, in auto-vacuum +** mode, the following events may invalidate an overflow page-list cache. +** +** * An incremental vacuum, +** * A commit in auto_vacuum="full" mode, +** * Creating a table (may require moving an overflow page). +*/ +static int accessPayload( + BtCursor *pCur, /* Cursor pointing to entry to read from */ + u32 offset, /* Begin reading this far into payload */ + u32 amt, /* Read this many bytes */ + unsigned char *pBuf, /* Write the bytes into this buffer */ + int eOp /* zero to read. non-zero to write. */ +){ + unsigned char *aPayload; + int rc = SQLITE_OK; + int iIdx = 0; + MemPage *pPage = pCur->apPage[pCur->iPage]; /* Btree page of current entry */ + BtShared *pBt = pCur->pBt; /* Btree this cursor belongs to */ +#ifdef SQLITE_DIRECT_OVERFLOW_READ + unsigned char * const pBufStart = pBuf; /* Start of original out buffer */ +#endif + + assert( pPage ); + assert( eOp==0 || eOp==1 ); + assert( pCur->eState==CURSOR_VALID ); + assert( pCur->aiIdx[pCur->iPage]nCell ); + assert( cursorHoldsMutex(pCur) ); + + getCellInfo(pCur); + aPayload = pCur->info.pPayload; + assert( offset+amt <= pCur->info.nPayload ); + + assert( aPayload > pPage->aData ); + if( (uptr)(aPayload - pPage->aData) > (pBt->usableSize - pCur->info.nLocal) ){ + /* Trying to read or write past the end of the data is an error. The + ** conditional above is really: + ** &aPayload[pCur->info.nLocal] > &pPage->aData[pBt->usableSize] + ** but is recast into its current form to avoid integer overflow problems + */ + return SQLITE_CORRUPT_BKPT; + } + + /* Check if data must be read/written to/from the btree page itself. */ + if( offsetinfo.nLocal ){ + int a = amt; + if( a+offset>pCur->info.nLocal ){ + a = pCur->info.nLocal - offset; + } + rc = copyPayload(&aPayload[offset], pBuf, a, eOp, pPage->pDbPage); + offset = 0; + pBuf += a; + amt -= a; + }else{ + offset -= pCur->info.nLocal; + } + + + if( rc==SQLITE_OK && amt>0 ){ + const u32 ovflSize = pBt->usableSize - 4; /* Bytes content per ovfl page */ + Pgno nextPage; + + nextPage = get4byte(&aPayload[pCur->info.nLocal]); + + /* If the BtCursor.aOverflow[] has not been allocated, allocate it now. + ** + ** The aOverflow[] array is sized at one entry for each overflow page + ** in the overflow chain. The page number of the first overflow page is + ** stored in aOverflow[0], etc. A value of 0 in the aOverflow[] array + ** means "not yet known" (the cache is lazily populated). + */ + if( (pCur->curFlags & BTCF_ValidOvfl)==0 ){ + int nOvfl = (pCur->info.nPayload-pCur->info.nLocal+ovflSize-1)/ovflSize; + if( nOvfl>pCur->nOvflAlloc ){ + Pgno *aNew = (Pgno*)sqlite3Realloc( + pCur->aOverflow, nOvfl*2*sizeof(Pgno) + ); + if( aNew==0 ){ + return SQLITE_NOMEM_BKPT; + }else{ + pCur->nOvflAlloc = nOvfl*2; + pCur->aOverflow = aNew; + } + } + memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno)); + pCur->curFlags |= BTCF_ValidOvfl; + }else{ + /* If the overflow page-list cache has been allocated and the + ** entry for the first required overflow page is valid, skip + ** directly to it. + */ + if( pCur->aOverflow[offset/ovflSize] ){ + iIdx = (offset/ovflSize); + nextPage = pCur->aOverflow[iIdx]; + offset = (offset%ovflSize); + } + } + + assert( rc==SQLITE_OK && amt>0 ); + while( nextPage ){ + /* If required, populate the overflow page-list cache. */ + assert( pCur->aOverflow[iIdx]==0 + || pCur->aOverflow[iIdx]==nextPage + || CORRUPT_DB ); + pCur->aOverflow[iIdx] = nextPage; + + if( offset>=ovflSize ){ + /* The only reason to read this page is to obtain the page + ** number for the next page in the overflow chain. The page + ** data is not required. So first try to lookup the overflow + ** page-list cache, if any, then fall back to the getOverflowPage() + ** function. + */ + assert( pCur->curFlags & BTCF_ValidOvfl ); + assert( pCur->pBtree->db==pBt->db ); + if( pCur->aOverflow[iIdx+1] ){ + nextPage = pCur->aOverflow[iIdx+1]; + }else{ + rc = getOverflowPage(pBt, nextPage, 0, &nextPage); + } + offset -= ovflSize; + }else{ + /* Need to read this page properly. It contains some of the + ** range of data that is being read (eOp==0) or written (eOp!=0). + */ +#ifdef SQLITE_DIRECT_OVERFLOW_READ + sqlite3_file *fd; /* File from which to do direct overflow read */ +#endif + int a = amt; + if( a + offset > ovflSize ){ + a = ovflSize - offset; + } + +#ifdef SQLITE_DIRECT_OVERFLOW_READ + /* If all the following are true: + ** + ** 1) this is a read operation, and + ** 2) data is required from the start of this overflow page, and + ** 3) there is no open write-transaction, and + ** 4) the database is file-backed, and + ** 5) the page is not in the WAL file + ** 6) at least 4 bytes have already been read into the output buffer + ** + ** then data can be read directly from the database file into the + ** output buffer, bypassing the page-cache altogether. This speeds + ** up loading large records that span many overflow pages. + */ + if( eOp==0 /* (1) */ + && offset==0 /* (2) */ + && pBt->inTransaction==TRANS_READ /* (3) */ + && (fd = sqlite3PagerFile(pBt->pPager))->pMethods /* (4) */ + && 0==sqlite3PagerUseWal(pBt->pPager, nextPage) /* (5) */ + && &pBuf[-4]>=pBufStart /* (6) */ + ){ + u8 aSave[4]; + u8 *aWrite = &pBuf[-4]; + assert( aWrite>=pBufStart ); /* due to (6) */ + memcpy(aSave, aWrite, 4); + rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1)); + nextPage = get4byte(aWrite); + memcpy(aWrite, aSave, 4); + }else +#endif + + { + DbPage *pDbPage; + rc = sqlite3PagerGet(pBt->pPager, nextPage, &pDbPage, + (eOp==0 ? PAGER_GET_READONLY : 0) + ); + if( rc==SQLITE_OK ){ + aPayload = sqlite3PagerGetData(pDbPage); + nextPage = get4byte(aPayload); + rc = copyPayload(&aPayload[offset+4], pBuf, a, eOp, pDbPage); + sqlite3PagerUnref(pDbPage); + offset = 0; + } + } + amt -= a; + if( amt==0 ) return rc; + pBuf += a; + } + if( rc ) break; + iIdx++; + } + } + + if( rc==SQLITE_OK && amt>0 ){ + return SQLITE_CORRUPT_BKPT; /* Overflow chain ends prematurely */ + } + return rc; +} + +/* +** Read part of the payload for the row at which that cursor pCur is currently +** pointing. "amt" bytes will be transferred into pBuf[]. The transfer +** begins at "offset". +** +** pCur can be pointing to either a table or an index b-tree. +** If pointing to a table btree, then the content section is read. If +** pCur is pointing to an index b-tree then the key section is read. +** +** For sqlite3BtreePayload(), the caller must ensure that pCur is pointing +** to a valid row in the table. For sqlite3BtreePayloadChecked(), the +** cursor might be invalid or might need to be restored before being read. +** +** Return SQLITE_OK on success or an error code if anything goes +** wrong. An error is returned if "offset+amt" is larger than +** the available payload. +*/ +SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ + assert( cursorHoldsMutex(pCur) ); + assert( pCur->eState==CURSOR_VALID ); + assert( pCur->iPage>=0 && pCur->apPage[pCur->iPage] ); + assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); + return accessPayload(pCur, offset, amt, (unsigned char*)pBuf, 0); +} + +/* +** This variant of sqlite3BtreePayload() works even if the cursor has not +** in the CURSOR_VALID state. It is only used by the sqlite3_blob_read() +** interface. +*/ +#ifndef SQLITE_OMIT_INCRBLOB +static SQLITE_NOINLINE int accessPayloadChecked( + BtCursor *pCur, + u32 offset, + u32 amt, + void *pBuf +){ + int rc; + if ( pCur->eState==CURSOR_INVALID ){ + return SQLITE_ABORT; + } + assert( cursorOwnsBtShared(pCur) ); + rc = btreeRestoreCursorPosition(pCur); + return rc ? rc : accessPayload(pCur, offset, amt, pBuf, 0); +} +SQLITE_PRIVATE int sqlite3BtreePayloadChecked(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ + if( pCur->eState==CURSOR_VALID ){ + assert( cursorOwnsBtShared(pCur) ); + return accessPayload(pCur, offset, amt, pBuf, 0); + }else{ + return accessPayloadChecked(pCur, offset, amt, pBuf); + } +} +#endif /* SQLITE_OMIT_INCRBLOB */ + +/* +** Return a pointer to payload information from the entry that the +** pCur cursor is pointing to. The pointer is to the beginning of +** the key if index btrees (pPage->intKey==0) and is the data for +** table btrees (pPage->intKey==1). The number of bytes of available +** key/data is written into *pAmt. If *pAmt==0, then the value +** returned will not be a valid pointer. +** +** This routine is an optimization. It is common for the entire key +** and data to fit on the local page and for there to be no overflow +** pages. When that is so, this routine can be used to access the +** key and data without making a copy. If the key and/or data spills +** onto overflow pages, then accessPayload() must be used to reassemble +** the key/data and copy it into a preallocated buffer. +** +** The pointer returned by this routine looks directly into the cached +** page of the database. The data might change or move the next time +** any btree routine is called. +*/ +static const void *fetchPayload( + BtCursor *pCur, /* Cursor pointing to entry to read from */ + u32 *pAmt /* Write the number of available bytes here */ +){ + u32 amt; + assert( pCur!=0 && pCur->iPage>=0 && pCur->apPage[pCur->iPage]); + assert( pCur->eState==CURSOR_VALID ); + assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); + assert( cursorOwnsBtShared(pCur) ); + assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); + assert( pCur->info.nSize>0 ); + assert( pCur->info.pPayload>pCur->apPage[pCur->iPage]->aData || CORRUPT_DB ); + assert( pCur->info.pPayloadapPage[pCur->iPage]->aDataEnd ||CORRUPT_DB); + amt = (int)(pCur->apPage[pCur->iPage]->aDataEnd - pCur->info.pPayload); + if( pCur->info.nLocalinfo.nLocal; + *pAmt = amt; + return (void*)pCur->info.pPayload; +} + + +/* +** For the entry that cursor pCur is point to, return as +** many bytes of the key or data as are available on the local +** b-tree page. Write the number of available bytes into *pAmt. +** +** The pointer returned is ephemeral. The key/data may move +** or be destroyed on the next call to any Btree routine, +** including calls from other threads against the same cache. +** Hence, a mutex on the BtShared should be held prior to calling +** this routine. +** +** These routines is used to get quick access to key and data +** in the common case where no overflow pages are used. +*/ +SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor *pCur, u32 *pAmt){ + return fetchPayload(pCur, pAmt); +} + + +/* +** Move the cursor down to a new child page. The newPgno argument is the +** page number of the child page to move to. +** +** This function returns SQLITE_CORRUPT if the page-header flags field of +** the new child page does not match the flags field of the parent (i.e. +** if an intkey page appears to be the parent of a non-intkey page, or +** vice-versa). +*/ +static int moveToChild(BtCursor *pCur, u32 newPgno){ + BtShared *pBt = pCur->pBt; + + assert( cursorOwnsBtShared(pCur) ); + assert( pCur->eState==CURSOR_VALID ); + assert( pCur->iPageiPage>=0 ); + if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){ + return SQLITE_CORRUPT_BKPT; + } + pCur->info.nSize = 0; + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); + pCur->iPage++; + pCur->aiIdx[pCur->iPage] = 0; + return getAndInitPage(pBt, newPgno, &pCur->apPage[pCur->iPage], + pCur, pCur->curPagerFlags); +} + +#if SQLITE_DEBUG +/* +** Page pParent is an internal (non-leaf) tree page. This function +** asserts that page number iChild is the left-child if the iIdx'th +** cell in page pParent. Or, if iIdx is equal to the total number of +** cells in pParent, that page number iChild is the right-child of +** the page. +*/ +static void assertParentIndex(MemPage *pParent, int iIdx, Pgno iChild){ + if( CORRUPT_DB ) return; /* The conditions tested below might not be true + ** in a corrupt database */ + assert( iIdx<=pParent->nCell ); + if( iIdx==pParent->nCell ){ + assert( get4byte(&pParent->aData[pParent->hdrOffset+8])==iChild ); + }else{ + assert( get4byte(findCell(pParent, iIdx))==iChild ); + } +} +#else +# define assertParentIndex(x,y,z) +#endif + +/* +** Move the cursor up to the parent page. +** +** pCur->idx is set to the cell index that contains the pointer +** to the page we are coming from. If we are coming from the +** right-most child page then pCur->idx is set to one more than +** the largest cell index. +*/ +static void moveToParent(BtCursor *pCur){ + assert( cursorOwnsBtShared(pCur) ); + assert( pCur->eState==CURSOR_VALID ); + assert( pCur->iPage>0 ); + assert( pCur->apPage[pCur->iPage] ); + assertParentIndex( + pCur->apPage[pCur->iPage-1], + pCur->aiIdx[pCur->iPage-1], + pCur->apPage[pCur->iPage]->pgno + ); + testcase( pCur->aiIdx[pCur->iPage-1] > pCur->apPage[pCur->iPage-1]->nCell ); + pCur->info.nSize = 0; + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); + releasePageNotNull(pCur->apPage[pCur->iPage--]); +} + +/* +** Move the cursor to point to the root page of its b-tree structure. +** +** If the table has a virtual root page, then the cursor is moved to point +** to the virtual root page instead of the actual root page. A table has a +** virtual root page when the actual root page contains no cells and a +** single child page. This can only happen with the table rooted at page 1. +** +** If the b-tree structure is empty, the cursor state is set to +** CURSOR_INVALID. Otherwise, the cursor is set to point to the first +** cell located on the root (or virtual root) page and the cursor state +** is set to CURSOR_VALID. +** +** If this function returns successfully, it may be assumed that the +** page-header flags indicate that the [virtual] root-page is the expected +** kind of b-tree page (i.e. if when opening the cursor the caller did not +** specify a KeyInfo structure the flags byte is set to 0x05 or 0x0D, +** indicating a table b-tree, or if the caller did specify a KeyInfo +** structure the flags byte is set to 0x02 or 0x0A, indicating an index +** b-tree). +*/ +static int moveToRoot(BtCursor *pCur){ + MemPage *pRoot; + int rc = SQLITE_OK; + + assert( cursorOwnsBtShared(pCur) ); + assert( CURSOR_INVALID < CURSOR_REQUIRESEEK ); + assert( CURSOR_VALID < CURSOR_REQUIRESEEK ); + assert( CURSOR_FAULT > CURSOR_REQUIRESEEK ); + if( pCur->eState>=CURSOR_REQUIRESEEK ){ + if( pCur->eState==CURSOR_FAULT ){ + assert( pCur->skipNext!=SQLITE_OK ); + return pCur->skipNext; + } + sqlite3BtreeClearCursor(pCur); + } + + if( pCur->iPage>=0 ){ + if( pCur->iPage ){ + do{ + assert( pCur->apPage[pCur->iPage]!=0 ); + releasePageNotNull(pCur->apPage[pCur->iPage--]); + }while( pCur->iPage); + goto skip_init; + } + }else if( pCur->pgnoRoot==0 ){ + pCur->eState = CURSOR_INVALID; + return SQLITE_OK; + }else{ + assert( pCur->iPage==(-1) ); + rc = getAndInitPage(pCur->pBtree->pBt, pCur->pgnoRoot, &pCur->apPage[0], + 0, pCur->curPagerFlags); + if( rc!=SQLITE_OK ){ + pCur->eState = CURSOR_INVALID; + return rc; + } + pCur->iPage = 0; + pCur->curIntKey = pCur->apPage[0]->intKey; + } + pRoot = pCur->apPage[0]; + assert( pRoot->pgno==pCur->pgnoRoot ); + + /* If pCur->pKeyInfo is not NULL, then the caller that opened this cursor + ** expected to open it on an index b-tree. Otherwise, if pKeyInfo is + ** NULL, the caller expects a table b-tree. If this is not the case, + ** return an SQLITE_CORRUPT error. + ** + ** Earlier versions of SQLite assumed that this test could not fail + ** if the root page was already loaded when this function was called (i.e. + ** if pCur->iPage>=0). But this is not so if the database is corrupted + ** in such a way that page pRoot is linked into a second b-tree table + ** (or the freelist). */ + assert( pRoot->intKey==1 || pRoot->intKey==0 ); + if( pRoot->isInit==0 || (pCur->pKeyInfo==0)!=pRoot->intKey ){ + return SQLITE_CORRUPT_BKPT; + } + +skip_init: + pCur->aiIdx[0] = 0; + pCur->info.nSize = 0; + pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidNKey|BTCF_ValidOvfl); + + pRoot = pCur->apPage[0]; + if( pRoot->nCell>0 ){ + pCur->eState = CURSOR_VALID; + }else if( !pRoot->leaf ){ + Pgno subpage; + if( pRoot->pgno!=1 ) return SQLITE_CORRUPT_BKPT; + subpage = get4byte(&pRoot->aData[pRoot->hdrOffset+8]); + pCur->eState = CURSOR_VALID; + rc = moveToChild(pCur, subpage); + }else{ + pCur->eState = CURSOR_INVALID; + } + return rc; +} + +/* +** Move the cursor down to the left-most leaf entry beneath the +** entry to which it is currently pointing. +** +** The left-most leaf is the one with the smallest key - the first +** in ascending order. +*/ +static int moveToLeftmost(BtCursor *pCur){ + Pgno pgno; + int rc = SQLITE_OK; + MemPage *pPage; + + assert( cursorOwnsBtShared(pCur) ); + assert( pCur->eState==CURSOR_VALID ); + while( rc==SQLITE_OK && !(pPage = pCur->apPage[pCur->iPage])->leaf ){ + assert( pCur->aiIdx[pCur->iPage]nCell ); + pgno = get4byte(findCell(pPage, pCur->aiIdx[pCur->iPage])); + rc = moveToChild(pCur, pgno); + } + return rc; +} + +/* +** Move the cursor down to the right-most leaf entry beneath the +** page to which it is currently pointing. Notice the difference +** between moveToLeftmost() and moveToRightmost(). moveToLeftmost() +** finds the left-most entry beneath the *entry* whereas moveToRightmost() +** finds the right-most entry beneath the *page*. +** +** The right-most entry is the one with the largest key - the last +** key in ascending order. +*/ +static int moveToRightmost(BtCursor *pCur){ + Pgno pgno; + int rc = SQLITE_OK; + MemPage *pPage = 0; + + assert( cursorOwnsBtShared(pCur) ); + assert( pCur->eState==CURSOR_VALID ); + while( !(pPage = pCur->apPage[pCur->iPage])->leaf ){ + pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); + pCur->aiIdx[pCur->iPage] = pPage->nCell; + rc = moveToChild(pCur, pgno); + if( rc ) return rc; + } + pCur->aiIdx[pCur->iPage] = pPage->nCell-1; + assert( pCur->info.nSize==0 ); + assert( (pCur->curFlags & BTCF_ValidNKey)==0 ); + return SQLITE_OK; +} + +/* Move the cursor to the first entry in the table. Return SQLITE_OK +** on success. Set *pRes to 0 if the cursor actually points to something +** or set *pRes to 1 if the table is empty. +*/ +SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ + int rc; + + assert( cursorOwnsBtShared(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); + rc = moveToRoot(pCur); + if( rc==SQLITE_OK ){ + if( pCur->eState==CURSOR_INVALID ){ + assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 ); + *pRes = 1; + }else{ + assert( pCur->apPage[pCur->iPage]->nCell>0 ); + *pRes = 0; + rc = moveToLeftmost(pCur); + } + } + return rc; +} + +/* Move the cursor to the last entry in the table. Return SQLITE_OK +** on success. Set *pRes to 0 if the cursor actually points to something +** or set *pRes to 1 if the table is empty. +*/ +SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ + int rc; + + assert( cursorOwnsBtShared(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); + + /* If the cursor already points to the last entry, this is a no-op. */ + if( CURSOR_VALID==pCur->eState && (pCur->curFlags & BTCF_AtLast)!=0 ){ +#ifdef SQLITE_DEBUG + /* This block serves to assert() that the cursor really does point + ** to the last entry in the b-tree. */ + int ii; + for(ii=0; iiiPage; ii++){ + assert( pCur->aiIdx[ii]==pCur->apPage[ii]->nCell ); + } + assert( pCur->aiIdx[pCur->iPage]==pCur->apPage[pCur->iPage]->nCell-1 ); + assert( pCur->apPage[pCur->iPage]->leaf ); +#endif + return SQLITE_OK; + } + + rc = moveToRoot(pCur); + if( rc==SQLITE_OK ){ + if( CURSOR_INVALID==pCur->eState ){ + assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 ); + *pRes = 1; + }else{ + assert( pCur->eState==CURSOR_VALID ); + *pRes = 0; + rc = moveToRightmost(pCur); + if( rc==SQLITE_OK ){ + pCur->curFlags |= BTCF_AtLast; + }else{ + pCur->curFlags &= ~BTCF_AtLast; + } + + } + } + return rc; +} + +/* Move the cursor so that it points to an entry near the key +** specified by pIdxKey or intKey. Return a success code. +** +** For INTKEY tables, the intKey parameter is used. pIdxKey +** must be NULL. For index tables, pIdxKey is used and intKey +** is ignored. +** +** If an exact match is not found, then the cursor is always +** left pointing at a leaf page which would hold the entry if it +** were present. The cursor might point to an entry that comes +** before or after the key. +** +** An integer is written into *pRes which is the result of +** comparing the key with the entry to which the cursor is +** pointing. The meaning of the integer written into +** *pRes is as follows: +** +** *pRes<0 The cursor is left pointing at an entry that +** is smaller than intKey/pIdxKey or if the table is empty +** and the cursor is therefore left point to nothing. +** +** *pRes==0 The cursor is left pointing at an entry that +** exactly matches intKey/pIdxKey. +** +** *pRes>0 The cursor is left pointing at an entry that +** is larger than intKey/pIdxKey. +** +** For index tables, the pIdxKey->eqSeen field is set to 1 if there +** exists an entry in the table that exactly matches pIdxKey. +*/ +SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( + BtCursor *pCur, /* The cursor to be moved */ + UnpackedRecord *pIdxKey, /* Unpacked index key */ + i64 intKey, /* The table key */ + int biasRight, /* If true, bias the search to the high end */ + int *pRes /* Write search results here */ +){ + int rc; + RecordCompare xRecordCompare; + + assert( cursorOwnsBtShared(pCur) ); + assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); + assert( pRes ); + assert( (pIdxKey==0)==(pCur->pKeyInfo==0) ); + assert( pCur->eState!=CURSOR_VALID || (pIdxKey==0)==(pCur->curIntKey!=0) ); + + /* If the cursor is already positioned at the point we are trying + ** to move to, then just return without doing any work */ + if( pIdxKey==0 + && pCur->eState==CURSOR_VALID && (pCur->curFlags & BTCF_ValidNKey)!=0 + ){ + if( pCur->info.nKey==intKey ){ + *pRes = 0; + return SQLITE_OK; + } + if( pCur->info.nKeycurFlags & BTCF_AtLast)!=0 ){ + *pRes = -1; + return SQLITE_OK; + } + /* If the requested key is one more than the previous key, then + ** try to get there using sqlite3BtreeNext() rather than a full + ** binary search. This is an optimization only. The correct answer + ** is still obtained without this ase, only a little more slowely */ + if( pCur->info.nKey+1==intKey && !pCur->skipNext ){ + *pRes = 0; + rc = sqlite3BtreeNext(pCur, pRes); + if( rc ) return rc; + if( *pRes==0 ){ + getCellInfo(pCur); + if( pCur->info.nKey==intKey ){ + return SQLITE_OK; + } + } + } + } + } + + if( pIdxKey ){ + xRecordCompare = sqlite3VdbeFindCompare(pIdxKey); + pIdxKey->errCode = 0; + assert( pIdxKey->default_rc==1 + || pIdxKey->default_rc==0 + || pIdxKey->default_rc==-1 + ); + }else{ + xRecordCompare = 0; /* All keys are integers */ + } + + rc = moveToRoot(pCur); + if( rc ){ + return rc; + } + assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage] ); + assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->isInit ); + assert( pCur->eState==CURSOR_INVALID || pCur->apPage[pCur->iPage]->nCell>0 ); + if( pCur->eState==CURSOR_INVALID ){ + *pRes = -1; + assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 ); + return SQLITE_OK; + } + assert( pCur->apPage[0]->intKey==pCur->curIntKey ); + assert( pCur->curIntKey || pIdxKey ); + for(;;){ + int lwr, upr, idx, c; + Pgno chldPg; + MemPage *pPage = pCur->apPage[pCur->iPage]; + u8 *pCell; /* Pointer to current cell in pPage */ + + /* pPage->nCell must be greater than zero. If this is the root-page + ** the cursor would have been INVALID above and this for(;;) loop + ** not run. If this is not the root-page, then the moveToChild() routine + ** would have already detected db corruption. Similarly, pPage must + ** be the right kind (index or table) of b-tree page. Otherwise + ** a moveToChild() or moveToRoot() call would have detected corruption. */ + assert( pPage->nCell>0 ); + assert( pPage->intKey==(pIdxKey==0) ); + lwr = 0; + upr = pPage->nCell-1; + assert( biasRight==0 || biasRight==1 ); + idx = upr>>(1-biasRight); /* idx = biasRight ? upr : (lwr+upr)/2; */ + pCur->aiIdx[pCur->iPage] = (u16)idx; + if( xRecordCompare==0 ){ + for(;;){ + i64 nCellKey; + pCell = findCellPastPtr(pPage, idx); + if( pPage->intKeyLeaf ){ + while( 0x80 <= *(pCell++) ){ + if( pCell>=pPage->aDataEnd ) return SQLITE_CORRUPT_BKPT; + } + } + getVarint(pCell, (u64*)&nCellKey); + if( nCellKeyupr ){ c = -1; break; } + }else if( nCellKey>intKey ){ + upr = idx-1; + if( lwr>upr ){ c = +1; break; } + }else{ + assert( nCellKey==intKey ); + pCur->aiIdx[pCur->iPage] = (u16)idx; + if( !pPage->leaf ){ + lwr = idx; + goto moveto_next_layer; + }else{ + pCur->curFlags |= BTCF_ValidNKey; + pCur->info.nKey = nCellKey; + pCur->info.nSize = 0; + *pRes = 0; + return SQLITE_OK; + } + } + assert( lwr+upr>=0 ); + idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2; */ + } + }else{ + for(;;){ + int nCell; /* Size of the pCell cell in bytes */ + pCell = findCellPastPtr(pPage, idx); + + /* The maximum supported page-size is 65536 bytes. This means that + ** the maximum number of record bytes stored on an index B-Tree + ** page is less than 16384 bytes and may be stored as a 2-byte + ** varint. This information is used to attempt to avoid parsing + ** the entire cell by checking for the cases where the record is + ** stored entirely within the b-tree page by inspecting the first + ** 2 bytes of the cell. + */ + nCell = pCell[0]; + if( nCell<=pPage->max1bytePayload ){ + /* This branch runs if the record-size field of the cell is a + ** single byte varint and the record fits entirely on the main + ** b-tree page. */ + testcase( pCell+nCell+1==pPage->aDataEnd ); + c = xRecordCompare(nCell, (void*)&pCell[1], pIdxKey); + }else if( !(pCell[1] & 0x80) + && (nCell = ((nCell&0x7f)<<7) + pCell[1])<=pPage->maxLocal + ){ + /* The record-size field is a 2 byte varint and the record + ** fits entirely on the main b-tree page. */ + testcase( pCell+nCell+2==pPage->aDataEnd ); + c = xRecordCompare(nCell, (void*)&pCell[2], pIdxKey); + }else{ + /* The record flows over onto one or more overflow pages. In + ** this case the whole cell needs to be parsed, a buffer allocated + ** and accessPayload() used to retrieve the record into the + ** buffer before VdbeRecordCompare() can be called. + ** + ** If the record is corrupt, the xRecordCompare routine may read + ** up to two varints past the end of the buffer. An extra 18 + ** bytes of padding is allocated at the end of the buffer in + ** case this happens. */ + void *pCellKey; + u8 * const pCellBody = pCell - pPage->childPtrSize; + pPage->xParseCell(pPage, pCellBody, &pCur->info); + nCell = (int)pCur->info.nKey; + testcase( nCell<0 ); /* True if key size is 2^32 or more */ + testcase( nCell==0 ); /* Invalid key size: 0x80 0x80 0x00 */ + testcase( nCell==1 ); /* Invalid key size: 0x80 0x80 0x01 */ + testcase( nCell==2 ); /* Minimum legal index key size */ + if( nCell<2 ){ + rc = SQLITE_CORRUPT_BKPT; + goto moveto_finish; + } + pCellKey = sqlite3Malloc( nCell+18 ); + if( pCellKey==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto moveto_finish; + } + pCur->aiIdx[pCur->iPage] = (u16)idx; + rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 0); + pCur->curFlags &= ~BTCF_ValidOvfl; + if( rc ){ + sqlite3_free(pCellKey); + goto moveto_finish; + } + c = xRecordCompare(nCell, pCellKey, pIdxKey); + sqlite3_free(pCellKey); + } + assert( + (pIdxKey->errCode!=SQLITE_CORRUPT || c==0) + && (pIdxKey->errCode!=SQLITE_NOMEM || pCur->pBtree->db->mallocFailed) + ); + if( c<0 ){ + lwr = idx+1; + }else if( c>0 ){ + upr = idx-1; + }else{ + assert( c==0 ); + *pRes = 0; + rc = SQLITE_OK; + pCur->aiIdx[pCur->iPage] = (u16)idx; + if( pIdxKey->errCode ) rc = SQLITE_CORRUPT; + goto moveto_finish; + } + if( lwr>upr ) break; + assert( lwr+upr>=0 ); + idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2 */ + } + } + assert( lwr==upr+1 || (pPage->intKey && !pPage->leaf) ); + assert( pPage->isInit ); + if( pPage->leaf ){ + assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); + pCur->aiIdx[pCur->iPage] = (u16)idx; + *pRes = c; + rc = SQLITE_OK; + goto moveto_finish; + } +moveto_next_layer: + if( lwr>=pPage->nCell ){ + chldPg = get4byte(&pPage->aData[pPage->hdrOffset+8]); + }else{ + chldPg = get4byte(findCell(pPage, lwr)); + } + pCur->aiIdx[pCur->iPage] = (u16)lwr; + rc = moveToChild(pCur, chldPg); + if( rc ) break; + } +moveto_finish: + pCur->info.nSize = 0; + assert( (pCur->curFlags & BTCF_ValidOvfl)==0 ); + return rc; +} + + +/* +** Return TRUE if the cursor is not pointing at an entry of the table. +** +** TRUE will be returned after a call to sqlite3BtreeNext() moves +** past the last entry in the table or sqlite3BtreePrev() moves past +** the first entry. TRUE is also returned if the table is empty. +*/ +SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor *pCur){ + /* TODO: What if the cursor is in CURSOR_REQUIRESEEK but all table entries + ** have been deleted? This API will need to change to return an error code + ** as well as the boolean result value. + */ + return (CURSOR_VALID!=pCur->eState); +} + +/* +** Advance the cursor to the next entry in the database. If +** successful then set *pRes=0. If the cursor +** was already pointing to the last entry in the database before +** this routine was called, then set *pRes=1. +** +** The main entry point is sqlite3BtreeNext(). That routine is optimized +** for the common case of merely incrementing the cell counter BtCursor.aiIdx +** to the next cell on the current page. The (slower) btreeNext() helper +** routine is called when it is necessary to move to a different page or +** to restore the cursor. +** +** The calling function will set *pRes to 0 or 1. The initial *pRes value +** will be 1 if the cursor being stepped corresponds to an SQL index and +** if this routine could have been skipped if that SQL index had been +** a unique index. Otherwise the caller will have set *pRes to zero. +** Zero is the common case. The btree implementation is free to use the +** initial *pRes value as a hint to improve performance, but the current +** SQLite btree implementation does not. (Note that the comdb2 btree +** implementation does use this hint, however.) +*/ +static SQLITE_NOINLINE int btreeNext(BtCursor *pCur, int *pRes){ + int rc; + int idx; + MemPage *pPage; + + assert( cursorOwnsBtShared(pCur) ); + assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); + assert( *pRes==0 ); + if( pCur->eState!=CURSOR_VALID ){ + assert( (pCur->curFlags & BTCF_ValidOvfl)==0 ); + rc = restoreCursorPosition(pCur); + if( rc!=SQLITE_OK ){ + return rc; + } + if( CURSOR_INVALID==pCur->eState ){ + *pRes = 1; + return SQLITE_OK; + } + if( pCur->skipNext ){ + assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_SKIPNEXT ); + pCur->eState = CURSOR_VALID; + if( pCur->skipNext>0 ){ + pCur->skipNext = 0; + return SQLITE_OK; + } + pCur->skipNext = 0; + } + } + + pPage = pCur->apPage[pCur->iPage]; + idx = ++pCur->aiIdx[pCur->iPage]; + assert( pPage->isInit ); + + /* If the database file is corrupt, it is possible for the value of idx + ** to be invalid here. This can only occur if a second cursor modifies + ** the page while cursor pCur is holding a reference to it. Which can + ** only happen if the database is corrupt in such a way as to link the + ** page into more than one b-tree structure. */ + testcase( idx>pPage->nCell ); + + if( idx>=pPage->nCell ){ + if( !pPage->leaf ){ + rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); + if( rc ) return rc; + return moveToLeftmost(pCur); + } + do{ + if( pCur->iPage==0 ){ + *pRes = 1; + pCur->eState = CURSOR_INVALID; + return SQLITE_OK; + } + moveToParent(pCur); + pPage = pCur->apPage[pCur->iPage]; + }while( pCur->aiIdx[pCur->iPage]>=pPage->nCell ); + if( pPage->intKey ){ + return sqlite3BtreeNext(pCur, pRes); + }else{ + return SQLITE_OK; + } + } + if( pPage->leaf ){ + return SQLITE_OK; + }else{ + return moveToLeftmost(pCur); + } +} +SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor *pCur, int *pRes){ + MemPage *pPage; + assert( cursorOwnsBtShared(pCur) ); + assert( pRes!=0 ); + assert( *pRes==0 || *pRes==1 ); + assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); + pCur->info.nSize = 0; + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); + *pRes = 0; + if( pCur->eState!=CURSOR_VALID ) return btreeNext(pCur, pRes); + pPage = pCur->apPage[pCur->iPage]; + if( (++pCur->aiIdx[pCur->iPage])>=pPage->nCell ){ + pCur->aiIdx[pCur->iPage]--; + return btreeNext(pCur, pRes); + } + if( pPage->leaf ){ + return SQLITE_OK; + }else{ + return moveToLeftmost(pCur); + } +} + +/* +** Step the cursor to the back to the previous entry in the database. If +** successful then set *pRes=0. If the cursor +** was already pointing to the first entry in the database before +** this routine was called, then set *pRes=1. +** +** The main entry point is sqlite3BtreePrevious(). That routine is optimized +** for the common case of merely decrementing the cell counter BtCursor.aiIdx +** to the previous cell on the current page. The (slower) btreePrevious() +** helper routine is called when it is necessary to move to a different page +** or to restore the cursor. +** +** The calling function will set *pRes to 0 or 1. The initial *pRes value +** will be 1 if the cursor being stepped corresponds to an SQL index and +** if this routine could have been skipped if that SQL index had been +** a unique index. Otherwise the caller will have set *pRes to zero. +** Zero is the common case. The btree implementation is free to use the +** initial *pRes value as a hint to improve performance, but the current +** SQLite btree implementation does not. (Note that the comdb2 btree +** implementation does use this hint, however.) +*/ +static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur, int *pRes){ + int rc; + MemPage *pPage; + + assert( cursorOwnsBtShared(pCur) ); + assert( pRes!=0 ); + assert( *pRes==0 ); + assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); + assert( (pCur->curFlags & (BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey))==0 ); + assert( pCur->info.nSize==0 ); + if( pCur->eState!=CURSOR_VALID ){ + rc = restoreCursorPosition(pCur); + if( rc!=SQLITE_OK ){ + return rc; + } + if( CURSOR_INVALID==pCur->eState ){ + *pRes = 1; + return SQLITE_OK; + } + if( pCur->skipNext ){ + assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_SKIPNEXT ); + pCur->eState = CURSOR_VALID; + if( pCur->skipNext<0 ){ + pCur->skipNext = 0; + return SQLITE_OK; + } + pCur->skipNext = 0; + } + } + + pPage = pCur->apPage[pCur->iPage]; + assert( pPage->isInit ); + if( !pPage->leaf ){ + int idx = pCur->aiIdx[pCur->iPage]; + rc = moveToChild(pCur, get4byte(findCell(pPage, idx))); + if( rc ) return rc; + rc = moveToRightmost(pCur); + }else{ + while( pCur->aiIdx[pCur->iPage]==0 ){ + if( pCur->iPage==0 ){ + pCur->eState = CURSOR_INVALID; + *pRes = 1; + return SQLITE_OK; + } + moveToParent(pCur); + } + assert( pCur->info.nSize==0 ); + assert( (pCur->curFlags & (BTCF_ValidOvfl))==0 ); + + pCur->aiIdx[pCur->iPage]--; + pPage = pCur->apPage[pCur->iPage]; + if( pPage->intKey && !pPage->leaf ){ + rc = sqlite3BtreePrevious(pCur, pRes); + }else{ + rc = SQLITE_OK; + } + } + return rc; +} +SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){ + assert( cursorOwnsBtShared(pCur) ); + assert( pRes!=0 ); + assert( *pRes==0 || *pRes==1 ); + assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); + *pRes = 0; + pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey); + pCur->info.nSize = 0; + if( pCur->eState!=CURSOR_VALID + || pCur->aiIdx[pCur->iPage]==0 + || pCur->apPage[pCur->iPage]->leaf==0 + ){ + return btreePrevious(pCur, pRes); + } + pCur->aiIdx[pCur->iPage]--; + return SQLITE_OK; +} + +/* +** Allocate a new page from the database file. +** +** The new page is marked as dirty. (In other words, sqlite3PagerWrite() +** has already been called on the new page.) The new page has also +** been referenced and the calling routine is responsible for calling +** sqlite3PagerUnref() on the new page when it is done. +** +** SQLITE_OK is returned on success. Any other return value indicates +** an error. *ppPage is set to NULL in the event of an error. +** +** If the "nearby" parameter is not 0, then an effort is made to +** locate a page close to the page number "nearby". This can be used in an +** attempt to keep related pages close to each other in the database file, +** which in turn can make database access faster. +** +** If the eMode parameter is BTALLOC_EXACT and the nearby page exists +** anywhere on the free-list, then it is guaranteed to be returned. If +** eMode is BTALLOC_LT then the page returned will be less than or equal +** to nearby if any such page exists. If eMode is BTALLOC_ANY then there +** are no restrictions on which page is returned. +*/ +static int allocateBtreePage( + BtShared *pBt, /* The btree */ + MemPage **ppPage, /* Store pointer to the allocated page here */ + Pgno *pPgno, /* Store the page number here */ + Pgno nearby, /* Search for a page near this one */ + u8 eMode /* BTALLOC_EXACT, BTALLOC_LT, or BTALLOC_ANY */ +){ + MemPage *pPage1; + int rc; + u32 n; /* Number of pages on the freelist */ + u32 k; /* Number of leaves on the trunk of the freelist */ + MemPage *pTrunk = 0; + MemPage *pPrevTrunk = 0; + Pgno mxPage; /* Total size of the database file */ + + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( eMode==BTALLOC_ANY || (nearby>0 && IfNotOmitAV(pBt->autoVacuum)) ); + pPage1 = pBt->pPage1; + mxPage = btreePagecount(pBt); + /* EVIDENCE-OF: R-05119-02637 The 4-byte big-endian integer at offset 36 + ** stores stores the total number of pages on the freelist. */ + n = get4byte(&pPage1->aData[36]); + testcase( n==mxPage-1 ); + if( n>=mxPage ){ + return SQLITE_CORRUPT_BKPT; + } + if( n>0 ){ + /* There are pages on the freelist. Reuse one of those pages. */ + Pgno iTrunk; + u8 searchList = 0; /* If the free-list must be searched for 'nearby' */ + u32 nSearch = 0; /* Count of the number of search attempts */ + + /* If eMode==BTALLOC_EXACT and a query of the pointer-map + ** shows that the page 'nearby' is somewhere on the free-list, then + ** the entire-list will be searched for that page. + */ +#ifndef SQLITE_OMIT_AUTOVACUUM + if( eMode==BTALLOC_EXACT ){ + if( nearby<=mxPage ){ + u8 eType; + assert( nearby>0 ); + assert( pBt->autoVacuum ); + rc = ptrmapGet(pBt, nearby, &eType, 0); + if( rc ) return rc; + if( eType==PTRMAP_FREEPAGE ){ + searchList = 1; + } + } + }else if( eMode==BTALLOC_LE ){ + searchList = 1; + } +#endif + + /* Decrement the free-list count by 1. Set iTrunk to the index of the + ** first free-list trunk page. iPrevTrunk is initially 1. + */ + rc = sqlite3PagerWrite(pPage1->pDbPage); + if( rc ) return rc; + put4byte(&pPage1->aData[36], n-1); + + /* The code within this loop is run only once if the 'searchList' variable + ** is not true. Otherwise, it runs once for each trunk-page on the + ** free-list until the page 'nearby' is located (eMode==BTALLOC_EXACT) + ** or until a page less than 'nearby' is located (eMode==BTALLOC_LT) + */ + do { + pPrevTrunk = pTrunk; + if( pPrevTrunk ){ + /* EVIDENCE-OF: R-01506-11053 The first integer on a freelist trunk page + ** is the page number of the next freelist trunk page in the list or + ** zero if this is the last freelist trunk page. */ + iTrunk = get4byte(&pPrevTrunk->aData[0]); + }else{ + /* EVIDENCE-OF: R-59841-13798 The 4-byte big-endian integer at offset 32 + ** stores the page number of the first page of the freelist, or zero if + ** the freelist is empty. */ + iTrunk = get4byte(&pPage1->aData[32]); + } + testcase( iTrunk==mxPage ); + if( iTrunk>mxPage || nSearch++ > n ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + rc = btreeGetUnusedPage(pBt, iTrunk, &pTrunk, 0); + } + if( rc ){ + pTrunk = 0; + goto end_allocate_page; + } + assert( pTrunk!=0 ); + assert( pTrunk->aData!=0 ); + /* EVIDENCE-OF: R-13523-04394 The second integer on a freelist trunk page + ** is the number of leaf page pointers to follow. */ + k = get4byte(&pTrunk->aData[4]); + if( k==0 && !searchList ){ + /* The trunk has no leaves and the list is not being searched. + ** So extract the trunk page itself and use it as the newly + ** allocated page */ + assert( pPrevTrunk==0 ); + rc = sqlite3PagerWrite(pTrunk->pDbPage); + if( rc ){ + goto end_allocate_page; + } + *pPgno = iTrunk; + memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); + *ppPage = pTrunk; + pTrunk = 0; + TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); + }else if( k>(u32)(pBt->usableSize/4 - 2) ){ + /* Value of k is out of range. Database corruption */ + rc = SQLITE_CORRUPT_BKPT; + goto end_allocate_page; +#ifndef SQLITE_OMIT_AUTOVACUUM + }else if( searchList + && (nearby==iTrunk || (iTrunkpDbPage); + if( rc ){ + goto end_allocate_page; + } + if( k==0 ){ + if( !pPrevTrunk ){ + memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); + }else{ + rc = sqlite3PagerWrite(pPrevTrunk->pDbPage); + if( rc!=SQLITE_OK ){ + goto end_allocate_page; + } + memcpy(&pPrevTrunk->aData[0], &pTrunk->aData[0], 4); + } + }else{ + /* The trunk page is required by the caller but it contains + ** pointers to free-list leaves. The first leaf becomes a trunk + ** page in this case. + */ + MemPage *pNewTrunk; + Pgno iNewTrunk = get4byte(&pTrunk->aData[8]); + if( iNewTrunk>mxPage ){ + rc = SQLITE_CORRUPT_BKPT; + goto end_allocate_page; + } + testcase( iNewTrunk==mxPage ); + rc = btreeGetUnusedPage(pBt, iNewTrunk, &pNewTrunk, 0); + if( rc!=SQLITE_OK ){ + goto end_allocate_page; + } + rc = sqlite3PagerWrite(pNewTrunk->pDbPage); + if( rc!=SQLITE_OK ){ + releasePage(pNewTrunk); + goto end_allocate_page; + } + memcpy(&pNewTrunk->aData[0], &pTrunk->aData[0], 4); + put4byte(&pNewTrunk->aData[4], k-1); + memcpy(&pNewTrunk->aData[8], &pTrunk->aData[12], (k-1)*4); + releasePage(pNewTrunk); + if( !pPrevTrunk ){ + assert( sqlite3PagerIswriteable(pPage1->pDbPage) ); + put4byte(&pPage1->aData[32], iNewTrunk); + }else{ + rc = sqlite3PagerWrite(pPrevTrunk->pDbPage); + if( rc ){ + goto end_allocate_page; + } + put4byte(&pPrevTrunk->aData[0], iNewTrunk); + } + } + pTrunk = 0; + TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); +#endif + }else if( k>0 ){ + /* Extract a leaf from the trunk */ + u32 closest; + Pgno iPage; + unsigned char *aData = pTrunk->aData; + if( nearby>0 ){ + u32 i; + closest = 0; + if( eMode==BTALLOC_LE ){ + for(i=0; imxPage ){ + rc = SQLITE_CORRUPT_BKPT; + goto end_allocate_page; + } + testcase( iPage==mxPage ); + if( !searchList + || (iPage==nearby || (iPagepgno, n-1)); + rc = sqlite3PagerWrite(pTrunk->pDbPage); + if( rc ) goto end_allocate_page; + if( closestpDbPage); + if( rc!=SQLITE_OK ){ + releasePage(*ppPage); + *ppPage = 0; + } + } + searchList = 0; + } + } + releasePage(pPrevTrunk); + pPrevTrunk = 0; + }while( searchList ); + }else{ + /* There are no pages on the freelist, so append a new page to the + ** database image. + ** + ** Normally, new pages allocated by this block can be requested from the + ** pager layer with the 'no-content' flag set. This prevents the pager + ** from trying to read the pages content from disk. However, if the + ** current transaction has already run one or more incremental-vacuum + ** steps, then the page we are about to allocate may contain content + ** that is required in the event of a rollback. In this case, do + ** not set the no-content flag. This causes the pager to load and journal + ** the current page content before overwriting it. + ** + ** Note that the pager will not actually attempt to load or journal + ** content for any page that really does lie past the end of the database + ** file on disk. So the effects of disabling the no-content optimization + ** here are confined to those pages that lie between the end of the + ** database image and the end of the database file. + */ + int bNoContent = (0==IfNotOmitAV(pBt->bDoTruncate))? PAGER_GET_NOCONTENT:0; + + rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); + if( rc ) return rc; + pBt->nPage++; + if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ) pBt->nPage++; + +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum && PTRMAP_ISPAGE(pBt, pBt->nPage) ){ + /* If *pPgno refers to a pointer-map page, allocate two new pages + ** at the end of the file instead of one. The first allocated page + ** becomes a new pointer-map page, the second is used by the caller. + */ + MemPage *pPg = 0; + TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", pBt->nPage)); + assert( pBt->nPage!=PENDING_BYTE_PAGE(pBt) ); + rc = btreeGetUnusedPage(pBt, pBt->nPage, &pPg, bNoContent); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerWrite(pPg->pDbPage); + releasePage(pPg); + } + if( rc ) return rc; + pBt->nPage++; + if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ){ pBt->nPage++; } + } +#endif + put4byte(28 + (u8*)pBt->pPage1->aData, pBt->nPage); + *pPgno = pBt->nPage; + + assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); + rc = btreeGetUnusedPage(pBt, *pPgno, ppPage, bNoContent); + if( rc ) return rc; + rc = sqlite3PagerWrite((*ppPage)->pDbPage); + if( rc!=SQLITE_OK ){ + releasePage(*ppPage); + *ppPage = 0; + } + TRACE(("ALLOCATE: %d from end of file\n", *pPgno)); + } + + assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); + +end_allocate_page: + releasePage(pTrunk); + releasePage(pPrevTrunk); + assert( rc!=SQLITE_OK || sqlite3PagerPageRefcount((*ppPage)->pDbPage)<=1 ); + assert( rc!=SQLITE_OK || (*ppPage)->isInit==0 ); + return rc; +} + +/* +** This function is used to add page iPage to the database file free-list. +** It is assumed that the page is not already a part of the free-list. +** +** The value passed as the second argument to this function is optional. +** If the caller happens to have a pointer to the MemPage object +** corresponding to page iPage handy, it may pass it as the second value. +** Otherwise, it may pass NULL. +** +** If a pointer to a MemPage object is passed as the second argument, +** its reference count is not altered by this function. +*/ +static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){ + MemPage *pTrunk = 0; /* Free-list trunk page */ + Pgno iTrunk = 0; /* Page number of free-list trunk page */ + MemPage *pPage1 = pBt->pPage1; /* Local reference to page 1 */ + MemPage *pPage; /* Page being freed. May be NULL. */ + int rc; /* Return Code */ + int nFree; /* Initial number of pages on free-list */ + + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( CORRUPT_DB || iPage>1 ); + assert( !pMemPage || pMemPage->pgno==iPage ); + + if( iPage<2 ) return SQLITE_CORRUPT_BKPT; + if( pMemPage ){ + pPage = pMemPage; + sqlite3PagerRef(pPage->pDbPage); + }else{ + pPage = btreePageLookup(pBt, iPage); + } + + /* Increment the free page count on pPage1 */ + rc = sqlite3PagerWrite(pPage1->pDbPage); + if( rc ) goto freepage_out; + nFree = get4byte(&pPage1->aData[36]); + put4byte(&pPage1->aData[36], nFree+1); + + if( pBt->btsFlags & BTS_SECURE_DELETE ){ + /* If the secure_delete option is enabled, then + ** always fully overwrite deleted information with zeros. + */ + if( (!pPage && ((rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0) ) + || ((rc = sqlite3PagerWrite(pPage->pDbPage))!=0) + ){ + goto freepage_out; + } + memset(pPage->aData, 0, pPage->pBt->pageSize); + } + + /* If the database supports auto-vacuum, write an entry in the pointer-map + ** to indicate that the page is free. + */ + if( ISAUTOVACUUM ){ + ptrmapPut(pBt, iPage, PTRMAP_FREEPAGE, 0, &rc); + if( rc ) goto freepage_out; + } + + /* Now manipulate the actual database free-list structure. There are two + ** possibilities. If the free-list is currently empty, or if the first + ** trunk page in the free-list is full, then this page will become a + ** new free-list trunk page. Otherwise, it will become a leaf of the + ** first trunk page in the current free-list. This block tests if it + ** is possible to add the page as a new free-list leaf. + */ + if( nFree!=0 ){ + u32 nLeaf; /* Initial number of leaf cells on trunk page */ + + iTrunk = get4byte(&pPage1->aData[32]); + rc = btreeGetPage(pBt, iTrunk, &pTrunk, 0); + if( rc!=SQLITE_OK ){ + goto freepage_out; + } + + nLeaf = get4byte(&pTrunk->aData[4]); + assert( pBt->usableSize>32 ); + if( nLeaf > (u32)pBt->usableSize/4 - 2 ){ + rc = SQLITE_CORRUPT_BKPT; + goto freepage_out; + } + if( nLeaf < (u32)pBt->usableSize/4 - 8 ){ + /* In this case there is room on the trunk page to insert the page + ** being freed as a new leaf. + ** + ** Note that the trunk page is not really full until it contains + ** usableSize/4 - 2 entries, not usableSize/4 - 8 entries as we have + ** coded. But due to a coding error in versions of SQLite prior to + ** 3.6.0, databases with freelist trunk pages holding more than + ** usableSize/4 - 8 entries will be reported as corrupt. In order + ** to maintain backwards compatibility with older versions of SQLite, + ** we will continue to restrict the number of entries to usableSize/4 - 8 + ** for now. At some point in the future (once everyone has upgraded + ** to 3.6.0 or later) we should consider fixing the conditional above + ** to read "usableSize/4-2" instead of "usableSize/4-8". + ** + ** EVIDENCE-OF: R-19920-11576 However, newer versions of SQLite still + ** avoid using the last six entries in the freelist trunk page array in + ** order that database files created by newer versions of SQLite can be + ** read by older versions of SQLite. + */ + rc = sqlite3PagerWrite(pTrunk->pDbPage); + if( rc==SQLITE_OK ){ + put4byte(&pTrunk->aData[4], nLeaf+1); + put4byte(&pTrunk->aData[8+nLeaf*4], iPage); + if( pPage && (pBt->btsFlags & BTS_SECURE_DELETE)==0 ){ + sqlite3PagerDontWrite(pPage->pDbPage); + } + rc = btreeSetHasContent(pBt, iPage); + } + TRACE(("FREE-PAGE: %d leaf on trunk page %d\n",pPage->pgno,pTrunk->pgno)); + goto freepage_out; + } + } + + /* If control flows to this point, then it was not possible to add the + ** the page being freed as a leaf page of the first trunk in the free-list. + ** Possibly because the free-list is empty, or possibly because the + ** first trunk in the free-list is full. Either way, the page being freed + ** will become the new first trunk page in the free-list. + */ + if( pPage==0 && SQLITE_OK!=(rc = btreeGetPage(pBt, iPage, &pPage, 0)) ){ + goto freepage_out; + } + rc = sqlite3PagerWrite(pPage->pDbPage); + if( rc!=SQLITE_OK ){ + goto freepage_out; + } + put4byte(pPage->aData, iTrunk); + put4byte(&pPage->aData[4], 0); + put4byte(&pPage1->aData[32], iPage); + TRACE(("FREE-PAGE: %d new trunk page replacing %d\n", pPage->pgno, iTrunk)); + +freepage_out: + if( pPage ){ + pPage->isInit = 0; + } + releasePage(pPage); + releasePage(pTrunk); + return rc; +} +static void freePage(MemPage *pPage, int *pRC){ + if( (*pRC)==SQLITE_OK ){ + *pRC = freePage2(pPage->pBt, pPage, pPage->pgno); + } +} + +/* +** Free any overflow pages associated with the given Cell. Write the +** local Cell size (the number of bytes on the original page, omitting +** overflow) into *pnSize. +*/ +static int clearCell( + MemPage *pPage, /* The page that contains the Cell */ + unsigned char *pCell, /* First byte of the Cell */ + CellInfo *pInfo /* Size information about the cell */ +){ + BtShared *pBt = pPage->pBt; + Pgno ovflPgno; + int rc; + int nOvfl; + u32 ovflPageSize; + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + pPage->xParseCell(pPage, pCell, pInfo); + if( pInfo->nLocal==pInfo->nPayload ){ + return SQLITE_OK; /* No overflow pages. Return without doing anything */ + } + if( pCell+pInfo->nSize-1 > pPage->aData+pPage->maskPage ){ + return SQLITE_CORRUPT_BKPT; /* Cell extends past end of page */ + } + ovflPgno = get4byte(pCell + pInfo->nSize - 4); + assert( pBt->usableSize > 4 ); + ovflPageSize = pBt->usableSize - 4; + nOvfl = (pInfo->nPayload - pInfo->nLocal + ovflPageSize - 1)/ovflPageSize; + assert( nOvfl>0 || + (CORRUPT_DB && (pInfo->nPayload + ovflPageSize)btreePagecount(pBt) ){ + /* 0 is not a legal page number and page 1 cannot be an + ** overflow page. Therefore if ovflPgno<2 or past the end of the + ** file the database must be corrupt. */ + return SQLITE_CORRUPT_BKPT; + } + if( nOvfl ){ + rc = getOverflowPage(pBt, ovflPgno, &pOvfl, &iNext); + if( rc ) return rc; + } + + if( ( pOvfl || ((pOvfl = btreePageLookup(pBt, ovflPgno))!=0) ) + && sqlite3PagerPageRefcount(pOvfl->pDbPage)!=1 + ){ + /* There is no reason any cursor should have an outstanding reference + ** to an overflow page belonging to a cell that is being deleted/updated. + ** So if there exists more than one reference to this page, then it + ** must not really be an overflow page and the database must be corrupt. + ** It is helpful to detect this before calling freePage2(), as + ** freePage2() may zero the page contents if secure-delete mode is + ** enabled. If this 'overflow' page happens to be a page that the + ** caller is iterating through or using in some other way, this + ** can be problematic. + */ + rc = SQLITE_CORRUPT_BKPT; + }else{ + rc = freePage2(pBt, pOvfl, ovflPgno); + } + + if( pOvfl ){ + sqlite3PagerUnref(pOvfl->pDbPage); + } + if( rc ) return rc; + ovflPgno = iNext; + } + return SQLITE_OK; +} + +/* +** Create the byte sequence used to represent a cell on page pPage +** and write that byte sequence into pCell[]. Overflow pages are +** allocated and filled in as necessary. The calling procedure +** is responsible for making sure sufficient space has been allocated +** for pCell[]. +** +** Note that pCell does not necessary need to point to the pPage->aData +** area. pCell might point to some temporary storage. The cell will +** be constructed in this temporary area then copied into pPage->aData +** later. +*/ +static int fillInCell( + MemPage *pPage, /* The page that contains the cell */ + unsigned char *pCell, /* Complete text of the cell */ + const BtreePayload *pX, /* Payload with which to construct the cell */ + int *pnSize /* Write cell size here */ +){ + int nPayload; + const u8 *pSrc; + int nSrc, n, rc; + int spaceLeft; + MemPage *pOvfl = 0; + MemPage *pToRelease = 0; + unsigned char *pPrior; + unsigned char *pPayload; + BtShared *pBt = pPage->pBt; + Pgno pgnoOvfl = 0; + int nHeader; + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + + /* pPage is not necessarily writeable since pCell might be auxiliary + ** buffer space that is separate from the pPage buffer area */ + assert( pCellaData || pCell>=&pPage->aData[pBt->pageSize] + || sqlite3PagerIswriteable(pPage->pDbPage) ); + + /* Fill in the header. */ + nHeader = pPage->childPtrSize; + if( pPage->intKey ){ + nPayload = pX->nData + pX->nZero; + pSrc = pX->pData; + nSrc = pX->nData; + assert( pPage->intKeyLeaf ); /* fillInCell() only called for leaves */ + nHeader += putVarint32(&pCell[nHeader], nPayload); + nHeader += putVarint(&pCell[nHeader], *(u64*)&pX->nKey); + }else{ + assert( pX->nKey<=0x7fffffff && pX->pKey!=0 ); + nSrc = nPayload = (int)pX->nKey; + pSrc = pX->pKey; + nHeader += putVarint32(&pCell[nHeader], nPayload); + } + + /* Fill in the payload */ + if( nPayload<=pPage->maxLocal ){ + n = nHeader + nPayload; + testcase( n==3 ); + testcase( n==4 ); + if( n<4 ) n = 4; + *pnSize = n; + spaceLeft = nPayload; + pPrior = pCell; + }else{ + int mn = pPage->minLocal; + n = mn + (nPayload - mn) % (pPage->pBt->usableSize - 4); + testcase( n==pPage->maxLocal ); + testcase( n==pPage->maxLocal+1 ); + if( n > pPage->maxLocal ) n = mn; + spaceLeft = n; + *pnSize = n + nHeader + 4; + pPrior = &pCell[nHeader+n]; + } + pPayload = &pCell[nHeader]; + + /* At this point variables should be set as follows: + ** + ** nPayload Total payload size in bytes + ** pPayload Begin writing payload here + ** spaceLeft Space available at pPayload. If nPayload>spaceLeft, + ** that means content must spill into overflow pages. + ** *pnSize Size of the local cell (not counting overflow pages) + ** pPrior Where to write the pgno of the first overflow page + ** + ** Use a call to btreeParseCellPtr() to verify that the values above + ** were computed correctly. + */ +#if SQLITE_DEBUG + { + CellInfo info; + pPage->xParseCell(pPage, pCell, &info); + assert( nHeader==(int)(info.pPayload - pCell) ); + assert( info.nKey==pX->nKey ); + assert( *pnSize == info.nSize ); + assert( spaceLeft == info.nLocal ); + } +#endif + + /* Write the payload into the local Cell and any extra into overflow pages */ + while( nPayload>0 ){ + if( spaceLeft==0 ){ +#ifndef SQLITE_OMIT_AUTOVACUUM + Pgno pgnoPtrmap = pgnoOvfl; /* Overflow page pointer-map entry page */ + if( pBt->autoVacuum ){ + do{ + pgnoOvfl++; + } while( + PTRMAP_ISPAGE(pBt, pgnoOvfl) || pgnoOvfl==PENDING_BYTE_PAGE(pBt) + ); + } +#endif + rc = allocateBtreePage(pBt, &pOvfl, &pgnoOvfl, pgnoOvfl, 0); +#ifndef SQLITE_OMIT_AUTOVACUUM + /* If the database supports auto-vacuum, and the second or subsequent + ** overflow page is being allocated, add an entry to the pointer-map + ** for that page now. + ** + ** If this is the first overflow page, then write a partial entry + ** to the pointer-map. If we write nothing to this pointer-map slot, + ** then the optimistic overflow chain processing in clearCell() + ** may misinterpret the uninitialized values and delete the + ** wrong pages from the database. + */ + if( pBt->autoVacuum && rc==SQLITE_OK ){ + u8 eType = (pgnoPtrmap?PTRMAP_OVERFLOW2:PTRMAP_OVERFLOW1); + ptrmapPut(pBt, pgnoOvfl, eType, pgnoPtrmap, &rc); + if( rc ){ + releasePage(pOvfl); + } + } +#endif + if( rc ){ + releasePage(pToRelease); + return rc; + } + + /* If pToRelease is not zero than pPrior points into the data area + ** of pToRelease. Make sure pToRelease is still writeable. */ + assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) ); + + /* If pPrior is part of the data area of pPage, then make sure pPage + ** is still writeable */ + assert( pPrioraData || pPrior>=&pPage->aData[pBt->pageSize] + || sqlite3PagerIswriteable(pPage->pDbPage) ); + + put4byte(pPrior, pgnoOvfl); + releasePage(pToRelease); + pToRelease = pOvfl; + pPrior = pOvfl->aData; + put4byte(pPrior, 0); + pPayload = &pOvfl->aData[4]; + spaceLeft = pBt->usableSize - 4; + } + n = nPayload; + if( n>spaceLeft ) n = spaceLeft; + + /* If pToRelease is not zero than pPayload points into the data area + ** of pToRelease. Make sure pToRelease is still writeable. */ + assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) ); + + /* If pPayload is part of the data area of pPage, then make sure pPage + ** is still writeable */ + assert( pPayloadaData || pPayload>=&pPage->aData[pBt->pageSize] + || sqlite3PagerIswriteable(pPage->pDbPage) ); + + if( nSrc>0 ){ + if( n>nSrc ) n = nSrc; + assert( pSrc ); + memcpy(pPayload, pSrc, n); + }else{ + memset(pPayload, 0, n); + } + nPayload -= n; + pPayload += n; + pSrc += n; + nSrc -= n; + spaceLeft -= n; + } + releasePage(pToRelease); + return SQLITE_OK; +} + +/* +** Remove the i-th cell from pPage. This routine effects pPage only. +** The cell content is not freed or deallocated. It is assumed that +** the cell content has been copied someplace else. This routine just +** removes the reference to the cell from pPage. +** +** "sz" must be the number of bytes in the cell. +*/ +static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){ + u32 pc; /* Offset to cell content of cell being deleted */ + u8 *data; /* pPage->aData */ + u8 *ptr; /* Used to move bytes around within data[] */ + int rc; /* The return code */ + int hdr; /* Beginning of the header. 0 most pages. 100 page 1 */ + + if( *pRC ) return; + assert( idx>=0 && idxnCell ); + assert( CORRUPT_DB || sz==cellSize(pPage, idx) ); + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + data = pPage->aData; + ptr = &pPage->aCellIdx[2*idx]; + pc = get2byte(ptr); + hdr = pPage->hdrOffset; + testcase( pc==get2byte(&data[hdr+5]) ); + testcase( pc+sz==pPage->pBt->usableSize ); + if( pc < (u32)get2byte(&data[hdr+5]) || pc+sz > pPage->pBt->usableSize ){ + *pRC = SQLITE_CORRUPT_BKPT; + return; + } + rc = freeSpace(pPage, pc, sz); + if( rc ){ + *pRC = rc; + return; + } + pPage->nCell--; + if( pPage->nCell==0 ){ + memset(&data[hdr+1], 0, 4); + data[hdr+7] = 0; + put2byte(&data[hdr+5], pPage->pBt->usableSize); + pPage->nFree = pPage->pBt->usableSize - pPage->hdrOffset + - pPage->childPtrSize - 8; + }else{ + memmove(ptr, ptr+2, 2*(pPage->nCell - idx)); + put2byte(&data[hdr+3], pPage->nCell); + pPage->nFree += 2; + } +} + +/* +** Insert a new cell on pPage at cell index "i". pCell points to the +** content of the cell. +** +** If the cell content will fit on the page, then put it there. If it +** will not fit, then make a copy of the cell content into pTemp if +** pTemp is not null. Regardless of pTemp, allocate a new entry +** in pPage->apOvfl[] and make it point to the cell content (either +** in pTemp or the original pCell) and also record its index. +** Allocating a new entry in pPage->aCell[] implies that +** pPage->nOverflow is incremented. +** +** *pRC must be SQLITE_OK when this routine is called. +*/ +static void insertCell( + MemPage *pPage, /* Page into which we are copying */ + int i, /* New cell becomes the i-th cell of the page */ + u8 *pCell, /* Content of the new cell */ + int sz, /* Bytes of content in pCell */ + u8 *pTemp, /* Temp storage space for pCell, if needed */ + Pgno iChild, /* If non-zero, replace first 4 bytes with this value */ + int *pRC /* Read and write return code from here */ +){ + int idx = 0; /* Where to write new cell content in data[] */ + int j; /* Loop counter */ + u8 *data; /* The content of the whole page */ + u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */ + + assert( *pRC==SQLITE_OK ); + assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); + assert( MX_CELL(pPage->pBt)<=10921 ); + assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB ); + assert( pPage->nOverflow<=ArraySize(pPage->apOvfl) ); + assert( ArraySize(pPage->apOvfl)==ArraySize(pPage->aiOvfl) ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + /* The cell should normally be sized correctly. However, when moving a + ** malformed cell from a leaf page to an interior page, if the cell size + ** wanted to be less than 4 but got rounded up to 4 on the leaf, then size + ** might be less than 8 (leaf-size + pointer) on the interior node. Hence + ** the term after the || in the following assert(). */ + assert( sz==pPage->xCellSize(pPage, pCell) || (sz==8 && iChild>0) ); + if( pPage->nOverflow || sz+2>pPage->nFree ){ + if( pTemp ){ + memcpy(pTemp, pCell, sz); + pCell = pTemp; + } + if( iChild ){ + put4byte(pCell, iChild); + } + j = pPage->nOverflow++; + /* Comparison against ArraySize-1 since we hold back one extra slot + ** as a contingency. In other words, never need more than 3 overflow + ** slots but 4 are allocated, just to be safe. */ + assert( j < ArraySize(pPage->apOvfl)-1 ); + pPage->apOvfl[j] = pCell; + pPage->aiOvfl[j] = (u16)i; + + /* When multiple overflows occur, they are always sequential and in + ** sorted order. This invariants arise because multiple overflows can + ** only occur when inserting divider cells into the parent page during + ** balancing, and the dividers are adjacent and sorted. + */ + assert( j==0 || pPage->aiOvfl[j-1]<(u16)i ); /* Overflows in sorted order */ + assert( j==0 || i==pPage->aiOvfl[j-1]+1 ); /* Overflows are sequential */ + }else{ + int rc = sqlite3PagerWrite(pPage->pDbPage); + if( rc!=SQLITE_OK ){ + *pRC = rc; + return; + } + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + data = pPage->aData; + assert( &data[pPage->cellOffset]==pPage->aCellIdx ); + rc = allocateSpace(pPage, sz, &idx); + if( rc ){ *pRC = rc; return; } + /* The allocateSpace() routine guarantees the following properties + ** if it returns successfully */ + assert( idx >= 0 ); + assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB ); + assert( idx+sz <= (int)pPage->pBt->usableSize ); + pPage->nFree -= (u16)(2 + sz); + memcpy(&data[idx], pCell, sz); + if( iChild ){ + put4byte(&data[idx], iChild); + } + pIns = pPage->aCellIdx + i*2; + memmove(pIns+2, pIns, 2*(pPage->nCell - i)); + put2byte(pIns, idx); + pPage->nCell++; + /* increment the cell count */ + if( (++data[pPage->hdrOffset+4])==0 ) data[pPage->hdrOffset+3]++; + assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell ); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pPage->pBt->autoVacuum ){ + /* The cell may contain a pointer to an overflow page. If so, write + ** the entry for the overflow page into the pointer map. + */ + ptrmapPutOvflPtr(pPage, pCell, pRC); + } +#endif + } +} + +/* +** A CellArray object contains a cache of pointers and sizes for a +** consecutive sequence of cells that might be held on multiple pages. +*/ +typedef struct CellArray CellArray; +struct CellArray { + int nCell; /* Number of cells in apCell[] */ + MemPage *pRef; /* Reference page */ + u8 **apCell; /* All cells begin balanced */ + u16 *szCell; /* Local size of all cells in apCell[] */ +}; + +/* +** Make sure the cell sizes at idx, idx+1, ..., idx+N-1 have been +** computed. +*/ +static void populateCellCache(CellArray *p, int idx, int N){ + assert( idx>=0 && idx+N<=p->nCell ); + while( N>0 ){ + assert( p->apCell[idx]!=0 ); + if( p->szCell[idx]==0 ){ + p->szCell[idx] = p->pRef->xCellSize(p->pRef, p->apCell[idx]); + }else{ + assert( CORRUPT_DB || + p->szCell[idx]==p->pRef->xCellSize(p->pRef, p->apCell[idx]) ); + } + idx++; + N--; + } +} + +/* +** Return the size of the Nth element of the cell array +*/ +static SQLITE_NOINLINE u16 computeCellSize(CellArray *p, int N){ + assert( N>=0 && NnCell ); + assert( p->szCell[N]==0 ); + p->szCell[N] = p->pRef->xCellSize(p->pRef, p->apCell[N]); + return p->szCell[N]; +} +static u16 cachedCellSize(CellArray *p, int N){ + assert( N>=0 && NnCell ); + if( p->szCell[N] ) return p->szCell[N]; + return computeCellSize(p, N); +} + +/* +** Array apCell[] contains pointers to nCell b-tree page cells. The +** szCell[] array contains the size in bytes of each cell. This function +** replaces the current contents of page pPg with the contents of the cell +** array. +** +** Some of the cells in apCell[] may currently be stored in pPg. This +** function works around problems caused by this by making a copy of any +** such cells before overwriting the page data. +** +** The MemPage.nFree field is invalidated by this function. It is the +** responsibility of the caller to set it correctly. +*/ +static int rebuildPage( + MemPage *pPg, /* Edit this page */ + int nCell, /* Final number of cells on page */ + u8 **apCell, /* Array of cells */ + u16 *szCell /* Array of cell sizes */ +){ + const int hdr = pPg->hdrOffset; /* Offset of header on pPg */ + u8 * const aData = pPg->aData; /* Pointer to data for pPg */ + const int usableSize = pPg->pBt->usableSize; + u8 * const pEnd = &aData[usableSize]; + int i; + u8 *pCellptr = pPg->aCellIdx; + u8 *pTmp = sqlite3PagerTempSpace(pPg->pBt->pPager); + u8 *pData; + + i = get2byte(&aData[hdr+5]); + memcpy(&pTmp[i], &aData[i], usableSize - i); + + pData = pEnd; + for(i=0; ixCellSize(pPg, pCell) || CORRUPT_DB ); + testcase( szCell[i]!=pPg->xCellSize(pPg,pCell) ); + } + + /* The pPg->nFree field is now set incorrectly. The caller will fix it. */ + pPg->nCell = nCell; + pPg->nOverflow = 0; + + put2byte(&aData[hdr+1], 0); + put2byte(&aData[hdr+3], pPg->nCell); + put2byte(&aData[hdr+5], pData - aData); + aData[hdr+7] = 0x00; + return SQLITE_OK; +} + +/* +** Array apCell[] contains nCell pointers to b-tree cells. Array szCell +** contains the size in bytes of each such cell. This function attempts to +** add the cells stored in the array to page pPg. If it cannot (because +** the page needs to be defragmented before the cells will fit), non-zero +** is returned. Otherwise, if the cells are added successfully, zero is +** returned. +** +** Argument pCellptr points to the first entry in the cell-pointer array +** (part of page pPg) to populate. After cell apCell[0] is written to the +** page body, a 16-bit offset is written to pCellptr. And so on, for each +** cell in the array. It is the responsibility of the caller to ensure +** that it is safe to overwrite this part of the cell-pointer array. +** +** When this function is called, *ppData points to the start of the +** content area on page pPg. If the size of the content area is extended, +** *ppData is updated to point to the new start of the content area +** before returning. +** +** Finally, argument pBegin points to the byte immediately following the +** end of the space required by this page for the cell-pointer area (for +** all cells - not just those inserted by the current call). If the content +** area must be extended to before this point in order to accomodate all +** cells in apCell[], then the cells do not fit and non-zero is returned. +*/ +static int pageInsertArray( + MemPage *pPg, /* Page to add cells to */ + u8 *pBegin, /* End of cell-pointer array */ + u8 **ppData, /* IN/OUT: Page content -area pointer */ + u8 *pCellptr, /* Pointer to cell-pointer area */ + int iFirst, /* Index of first cell to add */ + int nCell, /* Number of cells to add to pPg */ + CellArray *pCArray /* Array of cells */ +){ + int i; + u8 *aData = pPg->aData; + u8 *pData = *ppData; + int iEnd = iFirst + nCell; + assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */ + for(i=iFirst; iapCell[i] will never overlap on a well-formed + ** database. But they might for a corrupt database. Hence use memmove() + ** since memcpy() sends SIGABORT with overlapping buffers on OpenBSD */ + assert( (pSlot+sz)<=pCArray->apCell[i] + || pSlot>=(pCArray->apCell[i]+sz) + || CORRUPT_DB ); + memmove(pSlot, pCArray->apCell[i], sz); + put2byte(pCellptr, (pSlot - aData)); + pCellptr += 2; + } + *ppData = pData; + return 0; +} + +/* +** Array apCell[] contains nCell pointers to b-tree cells. Array szCell +** contains the size in bytes of each such cell. This function adds the +** space associated with each cell in the array that is currently stored +** within the body of pPg to the pPg free-list. The cell-pointers and other +** fields of the page are not updated. +** +** This function returns the total number of cells added to the free-list. +*/ +static int pageFreeArray( + MemPage *pPg, /* Page to edit */ + int iFirst, /* First cell to delete */ + int nCell, /* Cells to delete */ + CellArray *pCArray /* Array of cells */ +){ + u8 * const aData = pPg->aData; + u8 * const pEnd = &aData[pPg->pBt->usableSize]; + u8 * const pStart = &aData[pPg->hdrOffset + 8 + pPg->childPtrSize]; + int nRet = 0; + int i; + int iEnd = iFirst + nCell; + u8 *pFree = 0; + int szFree = 0; + + for(i=iFirst; iapCell[i]; + if( SQLITE_WITHIN(pCell, pStart, pEnd) ){ + int sz; + /* No need to use cachedCellSize() here. The sizes of all cells that + ** are to be freed have already been computing while deciding which + ** cells need freeing */ + sz = pCArray->szCell[i]; assert( sz>0 ); + if( pFree!=(pCell + sz) ){ + if( pFree ){ + assert( pFree>aData && (pFree - aData)<65536 ); + freeSpace(pPg, (u16)(pFree - aData), szFree); + } + pFree = pCell; + szFree = sz; + if( pFree+sz>pEnd ) return 0; + }else{ + pFree = pCell; + szFree += sz; + } + nRet++; + } + } + if( pFree ){ + assert( pFree>aData && (pFree - aData)<65536 ); + freeSpace(pPg, (u16)(pFree - aData), szFree); + } + return nRet; +} + +/* +** apCell[] and szCell[] contains pointers to and sizes of all cells in the +** pages being balanced. The current page, pPg, has pPg->nCell cells starting +** with apCell[iOld]. After balancing, this page should hold nNew cells +** starting at apCell[iNew]. +** +** This routine makes the necessary adjustments to pPg so that it contains +** the correct cells after being balanced. +** +** The pPg->nFree field is invalid when this function returns. It is the +** responsibility of the caller to set it correctly. +*/ +static int editPage( + MemPage *pPg, /* Edit this page */ + int iOld, /* Index of first cell currently on page */ + int iNew, /* Index of new first cell on page */ + int nNew, /* Final number of cells on page */ + CellArray *pCArray /* Array of cells and sizes */ +){ + u8 * const aData = pPg->aData; + const int hdr = pPg->hdrOffset; + u8 *pBegin = &pPg->aCellIdx[nNew * 2]; + int nCell = pPg->nCell; /* Cells stored on pPg */ + u8 *pData; + u8 *pCellptr; + int i; + int iOldEnd = iOld + pPg->nCell + pPg->nOverflow; + int iNewEnd = iNew + nNew; + +#ifdef SQLITE_DEBUG + u8 *pTmp = sqlite3PagerTempSpace(pPg->pBt->pPager); + memcpy(pTmp, aData, pPg->pBt->usableSize); +#endif + + /* Remove cells from the start and end of the page */ + if( iOldaCellIdx, &pPg->aCellIdx[nShift*2], nCell*2); + nCell -= nShift; + } + if( iNewEnd < iOldEnd ){ + nCell -= pageFreeArray(pPg, iNewEnd, iOldEnd - iNewEnd, pCArray); + } + + pData = &aData[get2byteNotZero(&aData[hdr+5])]; + if( pDataaCellIdx; + memmove(&pCellptr[nAdd*2], pCellptr, nCell*2); + if( pageInsertArray( + pPg, pBegin, &pData, pCellptr, + iNew, nAdd, pCArray + ) ) goto editpage_fail; + nCell += nAdd; + } + + /* Add any overflow cells */ + for(i=0; inOverflow; i++){ + int iCell = (iOld + pPg->aiOvfl[i]) - iNew; + if( iCell>=0 && iCellaCellIdx[iCell * 2]; + memmove(&pCellptr[2], pCellptr, (nCell - iCell) * 2); + nCell++; + if( pageInsertArray( + pPg, pBegin, &pData, pCellptr, + iCell+iNew, 1, pCArray + ) ) goto editpage_fail; + } + } + + /* Append cells to the end of the page */ + pCellptr = &pPg->aCellIdx[nCell*2]; + if( pageInsertArray( + pPg, pBegin, &pData, pCellptr, + iNew+nCell, nNew-nCell, pCArray + ) ) goto editpage_fail; + + pPg->nCell = nNew; + pPg->nOverflow = 0; + + put2byte(&aData[hdr+3], pPg->nCell); + put2byte(&aData[hdr+5], pData - aData); + +#ifdef SQLITE_DEBUG + for(i=0; iapCell[i+iNew]; + int iOff = get2byteAligned(&pPg->aCellIdx[i*2]); + if( SQLITE_WITHIN(pCell, aData, &aData[pPg->pBt->usableSize]) ){ + pCell = &pTmp[pCell - aData]; + } + assert( 0==memcmp(pCell, &aData[iOff], + pCArray->pRef->xCellSize(pCArray->pRef, pCArray->apCell[i+iNew])) ); + } +#endif + + return SQLITE_OK; + editpage_fail: + /* Unable to edit this page. Rebuild it from scratch instead. */ + populateCellCache(pCArray, iNew, nNew); + return rebuildPage(pPg, nNew, &pCArray->apCell[iNew], &pCArray->szCell[iNew]); +} + +/* +** The following parameters determine how many adjacent pages get involved +** in a balancing operation. NN is the number of neighbors on either side +** of the page that participate in the balancing operation. NB is the +** total number of pages that participate, including the target page and +** NN neighbors on either side. +** +** The minimum value of NN is 1 (of course). Increasing NN above 1 +** (to 2 or 3) gives a modest improvement in SELECT and DELETE performance +** in exchange for a larger degradation in INSERT and UPDATE performance. +** The value of NN appears to give the best results overall. +*/ +#define NN 1 /* Number of neighbors on either side of pPage */ +#define NB (NN*2+1) /* Total pages involved in the balance */ + + +#ifndef SQLITE_OMIT_QUICKBALANCE +/* +** This version of balance() handles the common special case where +** a new entry is being inserted on the extreme right-end of the +** tree, in other words, when the new entry will become the largest +** entry in the tree. +** +** Instead of trying to balance the 3 right-most leaf pages, just add +** a new page to the right-hand side and put the one new entry in +** that page. This leaves the right side of the tree somewhat +** unbalanced. But odds are that we will be inserting new entries +** at the end soon afterwards so the nearly empty page will quickly +** fill up. On average. +** +** pPage is the leaf page which is the right-most page in the tree. +** pParent is its parent. pPage must have a single overflow entry +** which is also the right-most entry on the page. +** +** The pSpace buffer is used to store a temporary copy of the divider +** cell that will be inserted into pParent. Such a cell consists of a 4 +** byte page number followed by a variable length integer. In other +** words, at most 13 bytes. Hence the pSpace buffer must be at +** least 13 bytes in size. +*/ +static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){ + BtShared *const pBt = pPage->pBt; /* B-Tree Database */ + MemPage *pNew; /* Newly allocated page */ + int rc; /* Return Code */ + Pgno pgnoNew; /* Page number of pNew */ + + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( sqlite3PagerIswriteable(pParent->pDbPage) ); + assert( pPage->nOverflow==1 ); + + /* This error condition is now caught prior to reaching this function */ + if( NEVER(pPage->nCell==0) ) return SQLITE_CORRUPT_BKPT; + + /* Allocate a new page. This page will become the right-sibling of + ** pPage. Make the parent page writable, so that the new divider cell + ** may be inserted. If both these operations are successful, proceed. + */ + rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); + + if( rc==SQLITE_OK ){ + + u8 *pOut = &pSpace[4]; + u8 *pCell = pPage->apOvfl[0]; + u16 szCell = pPage->xCellSize(pPage, pCell); + u8 *pStop; + + assert( sqlite3PagerIswriteable(pNew->pDbPage) ); + assert( pPage->aData[0]==(PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF) ); + zeroPage(pNew, PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF); + rc = rebuildPage(pNew, 1, &pCell, &szCell); + if( NEVER(rc) ) return rc; + pNew->nFree = pBt->usableSize - pNew->cellOffset - 2 - szCell; + + /* If this is an auto-vacuum database, update the pointer map + ** with entries for the new page, and any pointer from the + ** cell on the page to an overflow page. If either of these + ** operations fails, the return code is set, but the contents + ** of the parent page are still manipulated by thh code below. + ** That is Ok, at this point the parent page is guaranteed to + ** be marked as dirty. Returning an error code will cause a + ** rollback, undoing any changes made to the parent page. + */ + if( ISAUTOVACUUM ){ + ptrmapPut(pBt, pgnoNew, PTRMAP_BTREE, pParent->pgno, &rc); + if( szCell>pNew->minLocal ){ + ptrmapPutOvflPtr(pNew, pCell, &rc); + } + } + + /* Create a divider cell to insert into pParent. The divider cell + ** consists of a 4-byte page number (the page number of pPage) and + ** a variable length key value (which must be the same value as the + ** largest key on pPage). + ** + ** To find the largest key value on pPage, first find the right-most + ** cell on pPage. The first two fields of this cell are the + ** record-length (a variable length integer at most 32-bits in size) + ** and the key value (a variable length integer, may have any value). + ** The first of the while(...) loops below skips over the record-length + ** field. The second while(...) loop copies the key value from the + ** cell on pPage into the pSpace buffer. + */ + pCell = findCell(pPage, pPage->nCell-1); + pStop = &pCell[9]; + while( (*(pCell++)&0x80) && pCellnCell, pSpace, (int)(pOut-pSpace), + 0, pPage->pgno, &rc); + } + + /* Set the right-child pointer of pParent to point to the new page. */ + put4byte(&pParent->aData[pParent->hdrOffset+8], pgnoNew); + + /* Release the reference to the new page. */ + releasePage(pNew); + } + + return rc; +} +#endif /* SQLITE_OMIT_QUICKBALANCE */ + +#if 0 +/* +** This function does not contribute anything to the operation of SQLite. +** it is sometimes activated temporarily while debugging code responsible +** for setting pointer-map entries. +*/ +static int ptrmapCheckPages(MemPage **apPage, int nPage){ + int i, j; + for(i=0; ipBt; + assert( pPage->isInit ); + + for(j=0; jnCell; j++){ + CellInfo info; + u8 *z; + + z = findCell(pPage, j); + pPage->xParseCell(pPage, z, &info); + if( info.nLocalpgno && e==PTRMAP_OVERFLOW1 ); + } + if( !pPage->leaf ){ + Pgno child = get4byte(z); + ptrmapGet(pBt, child, &e, &n); + assert( n==pPage->pgno && e==PTRMAP_BTREE ); + } + } + if( !pPage->leaf ){ + Pgno child = get4byte(&pPage->aData[pPage->hdrOffset+8]); + ptrmapGet(pBt, child, &e, &n); + assert( n==pPage->pgno && e==PTRMAP_BTREE ); + } + } + return 1; +} +#endif + +/* +** This function is used to copy the contents of the b-tree node stored +** on page pFrom to page pTo. If page pFrom was not a leaf page, then +** the pointer-map entries for each child page are updated so that the +** parent page stored in the pointer map is page pTo. If pFrom contained +** any cells with overflow page pointers, then the corresponding pointer +** map entries are also updated so that the parent page is page pTo. +** +** If pFrom is currently carrying any overflow cells (entries in the +** MemPage.apOvfl[] array), they are not copied to pTo. +** +** Before returning, page pTo is reinitialized using btreeInitPage(). +** +** The performance of this function is not critical. It is only used by +** the balance_shallower() and balance_deeper() procedures, neither of +** which are called often under normal circumstances. +*/ +static void copyNodeContent(MemPage *pFrom, MemPage *pTo, int *pRC){ + if( (*pRC)==SQLITE_OK ){ + BtShared * const pBt = pFrom->pBt; + u8 * const aFrom = pFrom->aData; + u8 * const aTo = pTo->aData; + int const iFromHdr = pFrom->hdrOffset; + int const iToHdr = ((pTo->pgno==1) ? 100 : 0); + int rc; + int iData; + + + assert( pFrom->isInit ); + assert( pFrom->nFree>=iToHdr ); + assert( get2byte(&aFrom[iFromHdr+5]) <= (int)pBt->usableSize ); + + /* Copy the b-tree node content from page pFrom to page pTo. */ + iData = get2byte(&aFrom[iFromHdr+5]); + memcpy(&aTo[iData], &aFrom[iData], pBt->usableSize-iData); + memcpy(&aTo[iToHdr], &aFrom[iFromHdr], pFrom->cellOffset + 2*pFrom->nCell); + + /* Reinitialize page pTo so that the contents of the MemPage structure + ** match the new data. The initialization of pTo can actually fail under + ** fairly obscure circumstances, even though it is a copy of initialized + ** page pFrom. + */ + pTo->isInit = 0; + rc = btreeInitPage(pTo); + if( rc!=SQLITE_OK ){ + *pRC = rc; + return; + } + + /* If this is an auto-vacuum database, update the pointer-map entries + ** for any b-tree or overflow pages that pTo now contains the pointers to. + */ + if( ISAUTOVACUUM ){ + *pRC = setChildPtrmaps(pTo); + } + } +} + +/* +** This routine redistributes cells on the iParentIdx'th child of pParent +** (hereafter "the page") and up to 2 siblings so that all pages have about the +** same amount of free space. Usually a single sibling on either side of the +** page are used in the balancing, though both siblings might come from one +** side if the page is the first or last child of its parent. If the page +** has fewer than 2 siblings (something which can only happen if the page +** is a root page or a child of a root page) then all available siblings +** participate in the balancing. +** +** The number of siblings of the page might be increased or decreased by +** one or two in an effort to keep pages nearly full but not over full. +** +** Note that when this routine is called, some of the cells on the page +** might not actually be stored in MemPage.aData[]. This can happen +** if the page is overfull. This routine ensures that all cells allocated +** to the page and its siblings fit into MemPage.aData[] before returning. +** +** In the course of balancing the page and its siblings, cells may be +** inserted into or removed from the parent page (pParent). Doing so +** may cause the parent page to become overfull or underfull. If this +** happens, it is the responsibility of the caller to invoke the correct +** balancing routine to fix this problem (see the balance() routine). +** +** If this routine fails for any reason, it might leave the database +** in a corrupted state. So if this routine fails, the database should +** be rolled back. +** +** The third argument to this function, aOvflSpace, is a pointer to a +** buffer big enough to hold one page. If while inserting cells into the parent +** page (pParent) the parent page becomes overfull, this buffer is +** used to store the parent's overflow cells. Because this function inserts +** a maximum of four divider cells into the parent page, and the maximum +** size of a cell stored within an internal node is always less than 1/4 +** of the page-size, the aOvflSpace[] buffer is guaranteed to be large +** enough for all overflow cells. +** +** If aOvflSpace is set to a null pointer, this function returns +** SQLITE_NOMEM. +*/ +static int balance_nonroot( + MemPage *pParent, /* Parent page of siblings being balanced */ + int iParentIdx, /* Index of "the page" in pParent */ + u8 *aOvflSpace, /* page-size bytes of space for parent ovfl */ + int isRoot, /* True if pParent is a root-page */ + int bBulk /* True if this call is part of a bulk load */ +){ + BtShared *pBt; /* The whole database */ + int nMaxCells = 0; /* Allocated size of apCell, szCell, aFrom. */ + int nNew = 0; /* Number of pages in apNew[] */ + int nOld; /* Number of pages in apOld[] */ + int i, j, k; /* Loop counters */ + int nxDiv; /* Next divider slot in pParent->aCell[] */ + int rc = SQLITE_OK; /* The return code */ + u16 leafCorrection; /* 4 if pPage is a leaf. 0 if not */ + int leafData; /* True if pPage is a leaf of a LEAFDATA tree */ + int usableSpace; /* Bytes in pPage beyond the header */ + int pageFlags; /* Value of pPage->aData[0] */ + int iSpace1 = 0; /* First unused byte of aSpace1[] */ + int iOvflSpace = 0; /* First unused byte of aOvflSpace[] */ + int szScratch; /* Size of scratch memory requested */ + MemPage *apOld[NB]; /* pPage and up to two siblings */ + MemPage *apNew[NB+2]; /* pPage and up to NB siblings after balancing */ + u8 *pRight; /* Location in parent of right-sibling pointer */ + u8 *apDiv[NB-1]; /* Divider cells in pParent */ + int cntNew[NB+2]; /* Index in b.paCell[] of cell after i-th page */ + int cntOld[NB+2]; /* Old index in b.apCell[] */ + int szNew[NB+2]; /* Combined size of cells placed on i-th page */ + u8 *aSpace1; /* Space for copies of dividers cells */ + Pgno pgno; /* Temp var to store a page number in */ + u8 abDone[NB+2]; /* True after i'th new page is populated */ + Pgno aPgno[NB+2]; /* Page numbers of new pages before shuffling */ + Pgno aPgOrder[NB+2]; /* Copy of aPgno[] used for sorting pages */ + u16 aPgFlags[NB+2]; /* flags field of new pages before shuffling */ + CellArray b; /* Parsed information on cells being balanced */ + + memset(abDone, 0, sizeof(abDone)); + b.nCell = 0; + b.apCell = 0; + pBt = pParent->pBt; + assert( sqlite3_mutex_held(pBt->mutex) ); + assert( sqlite3PagerIswriteable(pParent->pDbPage) ); + +#if 0 + TRACE(("BALANCE: begin page %d child of %d\n", pPage->pgno, pParent->pgno)); +#endif + + /* At this point pParent may have at most one overflow cell. And if + ** this overflow cell is present, it must be the cell with + ** index iParentIdx. This scenario comes about when this function + ** is called (indirectly) from sqlite3BtreeDelete(). + */ + assert( pParent->nOverflow==0 || pParent->nOverflow==1 ); + assert( pParent->nOverflow==0 || pParent->aiOvfl[0]==iParentIdx ); + + if( !aOvflSpace ){ + return SQLITE_NOMEM_BKPT; + } + + /* Find the sibling pages to balance. Also locate the cells in pParent + ** that divide the siblings. An attempt is made to find NN siblings on + ** either side of pPage. More siblings are taken from one side, however, + ** if there are fewer than NN siblings on the other side. If pParent + ** has NB or fewer children then all children of pParent are taken. + ** + ** This loop also drops the divider cells from the parent page. This + ** way, the remainder of the function does not have to deal with any + ** overflow cells in the parent page, since if any existed they will + ** have already been removed. + */ + i = pParent->nOverflow + pParent->nCell; + if( i<2 ){ + nxDiv = 0; + }else{ + assert( bBulk==0 || bBulk==1 ); + if( iParentIdx==0 ){ + nxDiv = 0; + }else if( iParentIdx==i ){ + nxDiv = i-2+bBulk; + }else{ + nxDiv = iParentIdx-1; + } + i = 2-bBulk; + } + nOld = i+1; + if( (i+nxDiv-pParent->nOverflow)==pParent->nCell ){ + pRight = &pParent->aData[pParent->hdrOffset+8]; + }else{ + pRight = findCell(pParent, i+nxDiv-pParent->nOverflow); + } + pgno = get4byte(pRight); + while( 1 ){ + rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0); + if( rc ){ + memset(apOld, 0, (i+1)*sizeof(MemPage*)); + goto balance_cleanup; + } + nMaxCells += 1+apOld[i]->nCell+apOld[i]->nOverflow; + if( (i--)==0 ) break; + + if( pParent->nOverflow && i+nxDiv==pParent->aiOvfl[0] ){ + apDiv[i] = pParent->apOvfl[0]; + pgno = get4byte(apDiv[i]); + szNew[i] = pParent->xCellSize(pParent, apDiv[i]); + pParent->nOverflow = 0; + }else{ + apDiv[i] = findCell(pParent, i+nxDiv-pParent->nOverflow); + pgno = get4byte(apDiv[i]); + szNew[i] = pParent->xCellSize(pParent, apDiv[i]); + + /* Drop the cell from the parent page. apDiv[i] still points to + ** the cell within the parent, even though it has been dropped. + ** This is safe because dropping a cell only overwrites the first + ** four bytes of it, and this function does not need the first + ** four bytes of the divider cell. So the pointer is safe to use + ** later on. + ** + ** But not if we are in secure-delete mode. In secure-delete mode, + ** the dropCell() routine will overwrite the entire cell with zeroes. + ** In this case, temporarily copy the cell into the aOvflSpace[] + ** buffer. It will be copied out again as soon as the aSpace[] buffer + ** is allocated. */ + if( pBt->btsFlags & BTS_SECURE_DELETE ){ + int iOff; + + iOff = SQLITE_PTR_TO_INT(apDiv[i]) - SQLITE_PTR_TO_INT(pParent->aData); + if( (iOff+szNew[i])>(int)pBt->usableSize ){ + rc = SQLITE_CORRUPT_BKPT; + memset(apOld, 0, (i+1)*sizeof(MemPage*)); + goto balance_cleanup; + }else{ + memcpy(&aOvflSpace[iOff], apDiv[i], szNew[i]); + apDiv[i] = &aOvflSpace[apDiv[i]-pParent->aData]; + } + } + dropCell(pParent, i+nxDiv-pParent->nOverflow, szNew[i], &rc); + } + } + + /* Make nMaxCells a multiple of 4 in order to preserve 8-byte + ** alignment */ + nMaxCells = (nMaxCells + 3)&~3; + + /* + ** Allocate space for memory structures + */ + szScratch = + nMaxCells*sizeof(u8*) /* b.apCell */ + + nMaxCells*sizeof(u16) /* b.szCell */ + + pBt->pageSize; /* aSpace1 */ + + /* EVIDENCE-OF: R-28375-38319 SQLite will never request a scratch buffer + ** that is more than 6 times the database page size. */ + assert( szScratch<=6*(int)pBt->pageSize ); + b.apCell = sqlite3ScratchMalloc( szScratch ); + if( b.apCell==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto balance_cleanup; + } + b.szCell = (u16*)&b.apCell[nMaxCells]; + aSpace1 = (u8*)&b.szCell[nMaxCells]; + assert( EIGHT_BYTE_ALIGNMENT(aSpace1) ); + + /* + ** Load pointers to all cells on sibling pages and the divider cells + ** into the local b.apCell[] array. Make copies of the divider cells + ** into space obtained from aSpace1[]. The divider cells have already + ** been removed from pParent. + ** + ** If the siblings are on leaf pages, then the child pointers of the + ** divider cells are stripped from the cells before they are copied + ** into aSpace1[]. In this way, all cells in b.apCell[] are without + ** child pointers. If siblings are not leaves, then all cell in + ** b.apCell[] include child pointers. Either way, all cells in b.apCell[] + ** are alike. + ** + ** leafCorrection: 4 if pPage is a leaf. 0 if pPage is not a leaf. + ** leafData: 1 if pPage holds key+data and pParent holds only keys. + */ + b.pRef = apOld[0]; + leafCorrection = b.pRef->leaf*4; + leafData = b.pRef->intKeyLeaf; + for(i=0; inCell; + u8 *aData = pOld->aData; + u16 maskPage = pOld->maskPage; + u8 *piCell = aData + pOld->cellOffset; + u8 *piEnd; + + /* Verify that all sibling pages are of the same "type" (table-leaf, + ** table-interior, index-leaf, or index-interior). + */ + if( pOld->aData[0]!=apOld[0]->aData[0] ){ + rc = SQLITE_CORRUPT_BKPT; + goto balance_cleanup; + } + + /* Load b.apCell[] with pointers to all cells in pOld. If pOld + ** constains overflow cells, include them in the b.apCell[] array + ** in the correct spot. + ** + ** Note that when there are multiple overflow cells, it is always the + ** case that they are sequential and adjacent. This invariant arises + ** because multiple overflows can only occurs when inserting divider + ** cells into a parent on a prior balance, and divider cells are always + ** adjacent and are inserted in order. There is an assert() tagged + ** with "NOTE 1" in the overflow cell insertion loop to prove this + ** invariant. + ** + ** This must be done in advance. Once the balance starts, the cell + ** offset section of the btree page will be overwritten and we will no + ** long be able to find the cells if a pointer to each cell is not saved + ** first. + */ + memset(&b.szCell[b.nCell], 0, sizeof(b.szCell[0])*(limit+pOld->nOverflow)); + if( pOld->nOverflow>0 ){ + limit = pOld->aiOvfl[0]; + for(j=0; jnOverflow; k++){ + assert( k==0 || pOld->aiOvfl[k-1]+1==pOld->aiOvfl[k] );/* NOTE 1 */ + b.apCell[b.nCell] = pOld->apOvfl[k]; + b.nCell++; + } + } + piEnd = aData + pOld->cellOffset + 2*pOld->nCell; + while( piCellmaxLocal+23 ); + assert( iSpace1 <= (int)pBt->pageSize ); + memcpy(pTemp, apDiv[i], sz); + b.apCell[b.nCell] = pTemp+leafCorrection; + assert( leafCorrection==0 || leafCorrection==4 ); + b.szCell[b.nCell] = b.szCell[b.nCell] - leafCorrection; + if( !pOld->leaf ){ + assert( leafCorrection==0 ); + assert( pOld->hdrOffset==0 ); + /* The right pointer of the child page pOld becomes the left + ** pointer of the divider cell */ + memcpy(b.apCell[b.nCell], &pOld->aData[8], 4); + }else{ + assert( leafCorrection==4 ); + while( b.szCell[b.nCell]<4 ){ + /* Do not allow any cells smaller than 4 bytes. If a smaller cell + ** does exist, pad it with 0x00 bytes. */ + assert( b.szCell[b.nCell]==3 || CORRUPT_DB ); + assert( b.apCell[b.nCell]==&aSpace1[iSpace1-3] || CORRUPT_DB ); + aSpace1[iSpace1++] = 0x00; + b.szCell[b.nCell]++; + } + } + b.nCell++; + } + } + + /* + ** Figure out the number of pages needed to hold all b.nCell cells. + ** Store this number in "k". Also compute szNew[] which is the total + ** size of all cells on the i-th page and cntNew[] which is the index + ** in b.apCell[] of the cell that divides page i from page i+1. + ** cntNew[k] should equal b.nCell. + ** + ** Values computed by this block: + ** + ** k: The total number of sibling pages + ** szNew[i]: Spaced used on the i-th sibling page. + ** cntNew[i]: Index in b.apCell[] and b.szCell[] for the first cell to + ** the right of the i-th sibling page. + ** usableSpace: Number of bytes of space available on each sibling. + ** + */ + usableSpace = pBt->usableSize - 12 + leafCorrection; + for(i=0; inFree; + for(j=0; jnOverflow; j++){ + szNew[i] += 2 + p->xCellSize(p, p->apOvfl[j]); + } + cntNew[i] = cntOld[i]; + } + k = nOld; + for(i=0; iusableSpace ){ + if( i+1>=k ){ + k = i+2; + if( k>NB+2 ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } + szNew[k-1] = 0; + cntNew[k-1] = b.nCell; + } + sz = 2 + cachedCellSize(&b, cntNew[i]-1); + szNew[i] -= sz; + if( !leafData ){ + if( cntNew[i]usableSpace ) break; + szNew[i] += sz; + cntNew[i]++; + if( !leafData ){ + if( cntNew[i]=b.nCell ){ + k = i+1; + }else if( cntNew[i] <= (i>0 ? cntNew[i-1] : 0) ){ + rc = SQLITE_CORRUPT_BKPT; + goto balance_cleanup; + } + } + + /* + ** The packing computed by the previous block is biased toward the siblings + ** on the left side (siblings with smaller keys). The left siblings are + ** always nearly full, while the right-most sibling might be nearly empty. + ** The next block of code attempts to adjust the packing of siblings to + ** get a better balance. + ** + ** This adjustment is more than an optimization. The packing above might + ** be so out of balance as to be illegal. For example, the right-most + ** sibling might be completely empty. This adjustment is not optional. + */ + for(i=k-1; i>0; i--){ + int szRight = szNew[i]; /* Size of sibling on the right */ + int szLeft = szNew[i-1]; /* Size of sibling on the left */ + int r; /* Index of right-most cell in left sibling */ + int d; /* Index of first cell to the left of right sibling */ + + r = cntNew[i-1] - 1; + d = r + 1 - leafData; + (void)cachedCellSize(&b, d); + do{ + assert( d szLeft-(b.szCell[r]+(i==k-1?0:2)))){ + break; + } + szRight += b.szCell[d] + 2; + szLeft -= b.szCell[r] + 2; + cntNew[i-1] = r; + r--; + d--; + }while( r>=0 ); + szNew[i] = szRight; + szNew[i-1] = szLeft; + if( cntNew[i-1] <= (i>1 ? cntNew[i-2] : 0) ){ + rc = SQLITE_CORRUPT_BKPT; + goto balance_cleanup; + } + } + + /* Sanity check: For a non-corrupt database file one of the follwing + ** must be true: + ** (1) We found one or more cells (cntNew[0])>0), or + ** (2) pPage is a virtual root page. A virtual root page is when + ** the real root page is page 1 and we are the only child of + ** that page. + */ + assert( cntNew[0]>0 || (pParent->pgno==1 && pParent->nCell==0) || CORRUPT_DB); + TRACE(("BALANCE: old: %d(nc=%d) %d(nc=%d) %d(nc=%d)\n", + apOld[0]->pgno, apOld[0]->nCell, + nOld>=2 ? apOld[1]->pgno : 0, nOld>=2 ? apOld[1]->nCell : 0, + nOld>=3 ? apOld[2]->pgno : 0, nOld>=3 ? apOld[2]->nCell : 0 + )); + + /* + ** Allocate k new pages. Reuse old pages where possible. + */ + pageFlags = apOld[0]->aData[0]; + for(i=0; ipDbPage); + nNew++; + if( rc ) goto balance_cleanup; + }else{ + assert( i>0 ); + rc = allocateBtreePage(pBt, &pNew, &pgno, (bBulk ? 1 : pgno), 0); + if( rc ) goto balance_cleanup; + zeroPage(pNew, pageFlags); + apNew[i] = pNew; + nNew++; + cntOld[i] = b.nCell; + + /* Set the pointer-map entry for the new sibling page. */ + if( ISAUTOVACUUM ){ + ptrmapPut(pBt, pNew->pgno, PTRMAP_BTREE, pParent->pgno, &rc); + if( rc!=SQLITE_OK ){ + goto balance_cleanup; + } + } + } + } + + /* + ** Reassign page numbers so that the new pages are in ascending order. + ** This helps to keep entries in the disk file in order so that a scan + ** of the table is closer to a linear scan through the file. That in turn + ** helps the operating system to deliver pages from the disk more rapidly. + ** + ** An O(n^2) insertion sort algorithm is used, but since n is never more + ** than (NB+2) (a small constant), that should not be a problem. + ** + ** When NB==3, this one optimization makes the database about 25% faster + ** for large insertions and deletions. + */ + for(i=0; ipgno; + aPgFlags[i] = apNew[i]->pDbPage->flags; + for(j=0; ji ){ + sqlite3PagerRekey(apNew[iBest]->pDbPage, pBt->nPage+iBest+1, 0); + } + sqlite3PagerRekey(apNew[i]->pDbPage, pgno, aPgFlags[iBest]); + apNew[i]->pgno = pgno; + } + } + + TRACE(("BALANCE: new: %d(%d nc=%d) %d(%d nc=%d) %d(%d nc=%d) " + "%d(%d nc=%d) %d(%d nc=%d)\n", + apNew[0]->pgno, szNew[0], cntNew[0], + nNew>=2 ? apNew[1]->pgno : 0, nNew>=2 ? szNew[1] : 0, + nNew>=2 ? cntNew[1] - cntNew[0] - !leafData : 0, + nNew>=3 ? apNew[2]->pgno : 0, nNew>=3 ? szNew[2] : 0, + nNew>=3 ? cntNew[2] - cntNew[1] - !leafData : 0, + nNew>=4 ? apNew[3]->pgno : 0, nNew>=4 ? szNew[3] : 0, + nNew>=4 ? cntNew[3] - cntNew[2] - !leafData : 0, + nNew>=5 ? apNew[4]->pgno : 0, nNew>=5 ? szNew[4] : 0, + nNew>=5 ? cntNew[4] - cntNew[3] - !leafData : 0 + )); + + assert( sqlite3PagerIswriteable(pParent->pDbPage) ); + put4byte(pRight, apNew[nNew-1]->pgno); + + /* If the sibling pages are not leaves, ensure that the right-child pointer + ** of the right-most new sibling page is set to the value that was + ** originally in the same field of the right-most old sibling page. */ + if( (pageFlags & PTF_LEAF)==0 && nOld!=nNew ){ + MemPage *pOld = (nNew>nOld ? apNew : apOld)[nOld-1]; + memcpy(&apNew[nNew-1]->aData[8], &pOld->aData[8], 4); + } + + /* Make any required updates to pointer map entries associated with + ** cells stored on sibling pages following the balance operation. Pointer + ** map entries associated with divider cells are set by the insertCell() + ** routine. The associated pointer map entries are: + ** + ** a) if the cell contains a reference to an overflow chain, the + ** entry associated with the first page in the overflow chain, and + ** + ** b) if the sibling pages are not leaves, the child page associated + ** with the cell. + ** + ** If the sibling pages are not leaves, then the pointer map entry + ** associated with the right-child of each sibling may also need to be + ** updated. This happens below, after the sibling pages have been + ** populated, not here. + */ + if( ISAUTOVACUUM ){ + MemPage *pNew = apNew[0]; + u8 *aOld = pNew->aData; + int cntOldNext = pNew->nCell + pNew->nOverflow; + int usableSize = pBt->usableSize; + int iNew = 0; + int iOld = 0; + + for(i=0; inCell + pOld->nOverflow + !leafData; + aOld = pOld->aData; + } + if( i==cntNew[iNew] ){ + pNew = apNew[++iNew]; + if( !leafData ) continue; + } + + /* Cell pCell is destined for new sibling page pNew. Originally, it + ** was either part of sibling page iOld (possibly an overflow cell), + ** or else the divider cell to the left of sibling page iOld. So, + ** if sibling page iOld had the same page number as pNew, and if + ** pCell really was a part of sibling page iOld (not a divider or + ** overflow cell), we can skip updating the pointer map entries. */ + if( iOld>=nNew + || pNew->pgno!=aPgno[iOld] + || !SQLITE_WITHIN(pCell,aOld,&aOld[usableSize]) + ){ + if( !leafCorrection ){ + ptrmapPut(pBt, get4byte(pCell), PTRMAP_BTREE, pNew->pgno, &rc); + } + if( cachedCellSize(&b,i)>pNew->minLocal ){ + ptrmapPutOvflPtr(pNew, pCell, &rc); + } + if( rc ) goto balance_cleanup; + } + } + } + + /* Insert new divider cells into pParent. */ + for(i=0; ileaf ){ + memcpy(&pNew->aData[8], pCell, 4); + }else if( leafData ){ + /* If the tree is a leaf-data tree, and the siblings are leaves, + ** then there is no divider cell in b.apCell[]. Instead, the divider + ** cell consists of the integer key for the right-most cell of + ** the sibling-page assembled above only. + */ + CellInfo info; + j--; + pNew->xParseCell(pNew, b.apCell[j], &info); + pCell = pTemp; + sz = 4 + putVarint(&pCell[4], info.nKey); + pTemp = 0; + }else{ + pCell -= 4; + /* Obscure case for non-leaf-data trees: If the cell at pCell was + ** previously stored on a leaf node, and its reported size was 4 + ** bytes, then it may actually be smaller than this + ** (see btreeParseCellPtr(), 4 bytes is the minimum size of + ** any cell). But it is important to pass the correct size to + ** insertCell(), so reparse the cell now. + ** + ** This can only happen for b-trees used to evaluate "IN (SELECT ...)" + ** and WITHOUT ROWID tables with exactly one column which is the + ** primary key. + */ + if( b.szCell[j]==4 ){ + assert(leafCorrection==4); + sz = pParent->xCellSize(pParent, pCell); + } + } + iOvflSpace += sz; + assert( sz<=pBt->maxLocal+23 ); + assert( iOvflSpace <= (int)pBt->pageSize ); + insertCell(pParent, nxDiv+i, pCell, sz, pTemp, pNew->pgno, &rc); + if( rc!=SQLITE_OK ) goto balance_cleanup; + assert( sqlite3PagerIswriteable(pParent->pDbPage) ); + } + + /* Now update the actual sibling pages. The order in which they are updated + ** is important, as this code needs to avoid disrupting any page from which + ** cells may still to be read. In practice, this means: + ** + ** (1) If cells are moving left (from apNew[iPg] to apNew[iPg-1]) + ** then it is not safe to update page apNew[iPg] until after + ** the left-hand sibling apNew[iPg-1] has been updated. + ** + ** (2) If cells are moving right (from apNew[iPg] to apNew[iPg+1]) + ** then it is not safe to update page apNew[iPg] until after + ** the right-hand sibling apNew[iPg+1] has been updated. + ** + ** If neither of the above apply, the page is safe to update. + ** + ** The iPg value in the following loop starts at nNew-1 goes down + ** to 0, then back up to nNew-1 again, thus making two passes over + ** the pages. On the initial downward pass, only condition (1) above + ** needs to be tested because (2) will always be true from the previous + ** step. On the upward pass, both conditions are always true, so the + ** upwards pass simply processes pages that were missed on the downward + ** pass. + */ + for(i=1-nNew; i=0 && iPg=0 /* On the upwards pass, or... */ + || cntOld[iPg-1]>=cntNew[iPg-1] /* Condition (1) is true */ + ){ + int iNew; + int iOld; + int nNewCell; + + /* Verify condition (1): If cells are moving left, update iPg + ** only after iPg-1 has already been updated. */ + assert( iPg==0 || cntOld[iPg-1]>=cntNew[iPg-1] || abDone[iPg-1] ); + + /* Verify condition (2): If cells are moving right, update iPg + ** only after iPg+1 has already been updated. */ + assert( cntNew[iPg]>=cntOld[iPg] || abDone[iPg+1] ); + + if( iPg==0 ){ + iNew = iOld = 0; + nNewCell = cntNew[0]; + }else{ + iOld = iPgnFree = usableSpace-szNew[iPg]; + assert( apNew[iPg]->nOverflow==0 ); + assert( apNew[iPg]->nCell==nNewCell ); + } + } + + /* All pages have been processed exactly once */ + assert( memcmp(abDone, "\01\01\01\01\01", nNew)==0 ); + + assert( nOld>0 ); + assert( nNew>0 ); + + if( isRoot && pParent->nCell==0 && pParent->hdrOffset<=apNew[0]->nFree ){ + /* The root page of the b-tree now contains no cells. The only sibling + ** page is the right-child of the parent. Copy the contents of the + ** child page into the parent, decreasing the overall height of the + ** b-tree structure by one. This is described as the "balance-shallower" + ** sub-algorithm in some documentation. + ** + ** If this is an auto-vacuum database, the call to copyNodeContent() + ** sets all pointer-map entries corresponding to database image pages + ** for which the pointer is stored within the content being copied. + ** + ** It is critical that the child page be defragmented before being + ** copied into the parent, because if the parent is page 1 then it will + ** by smaller than the child due to the database header, and so all the + ** free space needs to be up front. + */ + assert( nNew==1 || CORRUPT_DB ); + rc = defragmentPage(apNew[0]); + testcase( rc!=SQLITE_OK ); + assert( apNew[0]->nFree == + (get2byte(&apNew[0]->aData[5])-apNew[0]->cellOffset-apNew[0]->nCell*2) + || rc!=SQLITE_OK + ); + copyNodeContent(apNew[0], pParent, &rc); + freePage(apNew[0], &rc); + }else if( ISAUTOVACUUM && !leafCorrection ){ + /* Fix the pointer map entries associated with the right-child of each + ** sibling page. All other pointer map entries have already been taken + ** care of. */ + for(i=0; iaData[8]); + ptrmapPut(pBt, key, PTRMAP_BTREE, apNew[i]->pgno, &rc); + } + } + + assert( pParent->isInit ); + TRACE(("BALANCE: finished: old=%d new=%d cells=%d\n", + nOld, nNew, b.nCell)); + + /* Free any old pages that were not reused as new pages. + */ + for(i=nNew; iisInit ){ + /* The ptrmapCheckPages() contains assert() statements that verify that + ** all pointer map pages are set correctly. This is helpful while + ** debugging. This is usually disabled because a corrupt database may + ** cause an assert() statement to fail. */ + ptrmapCheckPages(apNew, nNew); + ptrmapCheckPages(&pParent, 1); + } +#endif + + /* + ** Cleanup before returning. + */ +balance_cleanup: + sqlite3ScratchFree(b.apCell); + for(i=0; ipBt; /* The BTree */ + + assert( pRoot->nOverflow>0 ); + assert( sqlite3_mutex_held(pBt->mutex) ); + + /* Make pRoot, the root page of the b-tree, writable. Allocate a new + ** page that will become the new right-child of pPage. Copy the contents + ** of the node stored on pRoot into the new child page. + */ + rc = sqlite3PagerWrite(pRoot->pDbPage); + if( rc==SQLITE_OK ){ + rc = allocateBtreePage(pBt,&pChild,&pgnoChild,pRoot->pgno,0); + copyNodeContent(pRoot, pChild, &rc); + if( ISAUTOVACUUM ){ + ptrmapPut(pBt, pgnoChild, PTRMAP_BTREE, pRoot->pgno, &rc); + } + } + if( rc ){ + *ppChild = 0; + releasePage(pChild); + return rc; + } + assert( sqlite3PagerIswriteable(pChild->pDbPage) ); + assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); + assert( pChild->nCell==pRoot->nCell ); + + TRACE(("BALANCE: copy root %d into %d\n", pRoot->pgno, pChild->pgno)); + + /* Copy the overflow cells from pRoot to pChild */ + memcpy(pChild->aiOvfl, pRoot->aiOvfl, + pRoot->nOverflow*sizeof(pRoot->aiOvfl[0])); + memcpy(pChild->apOvfl, pRoot->apOvfl, + pRoot->nOverflow*sizeof(pRoot->apOvfl[0])); + pChild->nOverflow = pRoot->nOverflow; + + /* Zero the contents of pRoot. Then install pChild as the right-child. */ + zeroPage(pRoot, pChild->aData[0] & ~PTF_LEAF); + put4byte(&pRoot->aData[pRoot->hdrOffset+8], pgnoChild); + + *ppChild = pChild; + return SQLITE_OK; +} + +/* +** The page that pCur currently points to has just been modified in +** some way. This function figures out if this modification means the +** tree needs to be balanced, and if so calls the appropriate balancing +** routine. Balancing routines are: +** +** balance_quick() +** balance_deeper() +** balance_nonroot() +*/ +static int balance(BtCursor *pCur){ + int rc = SQLITE_OK; + const int nMin = pCur->pBt->usableSize * 2 / 3; + u8 aBalanceQuickSpace[13]; + u8 *pFree = 0; + + VVA_ONLY( int balance_quick_called = 0 ); + VVA_ONLY( int balance_deeper_called = 0 ); + + do { + int iPage = pCur->iPage; + MemPage *pPage = pCur->apPage[iPage]; + + if( iPage==0 ){ + if( pPage->nOverflow ){ + /* The root page of the b-tree is overfull. In this case call the + ** balance_deeper() function to create a new child for the root-page + ** and copy the current contents of the root-page to it. The + ** next iteration of the do-loop will balance the child page. + */ + assert( balance_deeper_called==0 ); + VVA_ONLY( balance_deeper_called++ ); + rc = balance_deeper(pPage, &pCur->apPage[1]); + if( rc==SQLITE_OK ){ + pCur->iPage = 1; + pCur->aiIdx[0] = 0; + pCur->aiIdx[1] = 0; + assert( pCur->apPage[1]->nOverflow ); + } + }else{ + break; + } + }else if( pPage->nOverflow==0 && pPage->nFree<=nMin ){ + break; + }else{ + MemPage * const pParent = pCur->apPage[iPage-1]; + int const iIdx = pCur->aiIdx[iPage-1]; + + rc = sqlite3PagerWrite(pParent->pDbPage); + if( rc==SQLITE_OK ){ +#ifndef SQLITE_OMIT_QUICKBALANCE + if( pPage->intKeyLeaf + && pPage->nOverflow==1 + && pPage->aiOvfl[0]==pPage->nCell + && pParent->pgno!=1 + && pParent->nCell==iIdx + ){ + /* Call balance_quick() to create a new sibling of pPage on which + ** to store the overflow cell. balance_quick() inserts a new cell + ** into pParent, which may cause pParent overflow. If this + ** happens, the next iteration of the do-loop will balance pParent + ** use either balance_nonroot() or balance_deeper(). Until this + ** happens, the overflow cell is stored in the aBalanceQuickSpace[] + ** buffer. + ** + ** The purpose of the following assert() is to check that only a + ** single call to balance_quick() is made for each call to this + ** function. If this were not verified, a subtle bug involving reuse + ** of the aBalanceQuickSpace[] might sneak in. + */ + assert( balance_quick_called==0 ); + VVA_ONLY( balance_quick_called++ ); + rc = balance_quick(pParent, pPage, aBalanceQuickSpace); + }else +#endif + { + /* In this case, call balance_nonroot() to redistribute cells + ** between pPage and up to 2 of its sibling pages. This involves + ** modifying the contents of pParent, which may cause pParent to + ** become overfull or underfull. The next iteration of the do-loop + ** will balance the parent page to correct this. + ** + ** If the parent page becomes overfull, the overflow cell or cells + ** are stored in the pSpace buffer allocated immediately below. + ** A subsequent iteration of the do-loop will deal with this by + ** calling balance_nonroot() (balance_deeper() may be called first, + ** but it doesn't deal with overflow cells - just moves them to a + ** different page). Once this subsequent call to balance_nonroot() + ** has completed, it is safe to release the pSpace buffer used by + ** the previous call, as the overflow cell data will have been + ** copied either into the body of a database page or into the new + ** pSpace buffer passed to the latter call to balance_nonroot(). + */ + u8 *pSpace = sqlite3PageMalloc(pCur->pBt->pageSize); + rc = balance_nonroot(pParent, iIdx, pSpace, iPage==1, + pCur->hints&BTREE_BULKLOAD); + if( pFree ){ + /* If pFree is not NULL, it points to the pSpace buffer used + ** by a previous call to balance_nonroot(). Its contents are + ** now stored either on real database pages or within the + ** new pSpace buffer, so it may be safely freed here. */ + sqlite3PageFree(pFree); + } + + /* The pSpace buffer will be freed after the next call to + ** balance_nonroot(), or just before this function returns, whichever + ** comes first. */ + pFree = pSpace; + } + } + + pPage->nOverflow = 0; + + /* The next iteration of the do-loop balances the parent page. */ + releasePage(pPage); + pCur->iPage--; + assert( pCur->iPage>=0 ); + } + }while( rc==SQLITE_OK ); + + if( pFree ){ + sqlite3PageFree(pFree); + } + return rc; +} + + +/* +** Insert a new record into the BTree. The content of the new record +** is described by the pX object. The pCur cursor is used only to +** define what table the record should be inserted into, and is left +** pointing at a random location. +** +** For a table btree (used for rowid tables), only the pX.nKey value of +** the key is used. The pX.pKey value must be NULL. The pX.nKey is the +** rowid or INTEGER PRIMARY KEY of the row. The pX.nData,pData,nZero fields +** hold the content of the row. +** +** For an index btree (used for indexes and WITHOUT ROWID tables), the +** key is an arbitrary byte sequence stored in pX.pKey,nKey. The +** pX.pData,nData,nZero fields must be zero. +** +** If the seekResult parameter is non-zero, then a successful call to +** MovetoUnpacked() to seek cursor pCur to (pKey,nKey) has already +** been performed. In other words, if seekResult!=0 then the cursor +** is currently pointing to a cell that will be adjacent to the cell +** to be inserted. If seekResult<0 then pCur points to a cell that is +** smaller then (pKey,nKey). If seekResult>0 then pCur points to a cell +** that is larger than (pKey,nKey). +** +** If seekResult==0, that means pCur is pointing at some unknown location. +** In that case, this routine must seek the cursor to the correct insertion +** point for (pKey,nKey) before doing the insertion. For index btrees, +** if pX->nMem is non-zero, then pX->aMem contains pointers to the unpacked +** key values and pX->aMem can be used instead of pX->pKey to avoid having +** to decode the key. +*/ +SQLITE_PRIVATE int sqlite3BtreeInsert( + BtCursor *pCur, /* Insert data into the table of this cursor */ + const BtreePayload *pX, /* Content of the row to be inserted */ + int flags, /* True if this is likely an append */ + int seekResult /* Result of prior MovetoUnpacked() call */ +){ + int rc; + int loc = seekResult; /* -1: before desired location +1: after */ + int szNew = 0; + int idx; + MemPage *pPage; + Btree *p = pCur->pBtree; + BtShared *pBt = p->pBt; + unsigned char *oldCell; + unsigned char *newCell = 0; + + assert( (flags & (BTREE_SAVEPOSITION|BTREE_APPEND))==flags ); + + if( pCur->eState==CURSOR_FAULT ){ + assert( pCur->skipNext!=SQLITE_OK ); + return pCur->skipNext; + } + + assert( cursorOwnsBtShared(pCur) ); + assert( (pCur->curFlags & BTCF_WriteFlag)!=0 + && pBt->inTransaction==TRANS_WRITE + && (pBt->btsFlags & BTS_READ_ONLY)==0 ); + assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); + + /* Assert that the caller has been consistent. If this cursor was opened + ** expecting an index b-tree, then the caller should be inserting blob + ** keys with no associated data. If the cursor was opened expecting an + ** intkey table, the caller should be inserting integer keys with a + ** blob of associated data. */ + assert( (pX->pKey==0)==(pCur->pKeyInfo==0) ); + + /* Save the positions of any other cursors open on this table. + ** + ** In some cases, the call to btreeMoveto() below is a no-op. For + ** example, when inserting data into a table with auto-generated integer + ** keys, the VDBE layer invokes sqlite3BtreeLast() to figure out the + ** integer key to use. It then calls this function to actually insert the + ** data into the intkey B-Tree. In this case btreeMoveto() recognizes + ** that the cursor is already where it needs to be and returns without + ** doing any work. To avoid thwarting these optimizations, it is important + ** not to clear the cursor here. + */ + if( pCur->curFlags & BTCF_Multiple ){ + rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); + if( rc ) return rc; + } + + if( pCur->pKeyInfo==0 ){ + assert( pX->pKey==0 ); + /* If this is an insert into a table b-tree, invalidate any incrblob + ** cursors open on the row being replaced */ + invalidateIncrblobCursors(p, pX->nKey, 0); + + /* If BTREE_SAVEPOSITION is set, the cursor must already be pointing + ** to a row with the same key as the new entry being inserted. */ + assert( (flags & BTREE_SAVEPOSITION)==0 || + ((pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey==pCur->info.nKey) ); + + /* If the cursor is currently on the last row and we are appending a + ** new row onto the end, set the "loc" to avoid an unnecessary + ** btreeMoveto() call */ + if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey==pCur->info.nKey ){ + loc = 0; + }else if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey>0 + && pCur->info.nKey==pX->nKey-1 ){ + loc = -1; + }else if( loc==0 ){ + rc = sqlite3BtreeMovetoUnpacked(pCur, 0, pX->nKey, flags!=0, &loc); + if( rc ) return rc; + } + }else if( loc==0 && (flags & BTREE_SAVEPOSITION)==0 ){ + if( pX->nMem ){ + UnpackedRecord r; + r.pKeyInfo = pCur->pKeyInfo; + r.aMem = pX->aMem; + r.nField = pX->nMem; + r.default_rc = 0; + r.errCode = 0; + r.r1 = 0; + r.r2 = 0; + r.eqSeen = 0; + rc = sqlite3BtreeMovetoUnpacked(pCur, &r, 0, flags!=0, &loc); + }else{ + rc = btreeMoveto(pCur, pX->pKey, pX->nKey, flags!=0, &loc); + } + if( rc ) return rc; + } + assert( pCur->eState==CURSOR_VALID || (pCur->eState==CURSOR_INVALID && loc) ); + + pPage = pCur->apPage[pCur->iPage]; + assert( pPage->intKey || pX->nKey>=0 ); + assert( pPage->leaf || !pPage->intKey ); + + TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n", + pCur->pgnoRoot, pX->nKey, pX->nData, pPage->pgno, + loc==0 ? "overwrite" : "new entry")); + assert( pPage->isInit ); + newCell = pBt->pTmpSpace; + assert( newCell!=0 ); + rc = fillInCell(pPage, newCell, pX, &szNew); + if( rc ) goto end_insert; + assert( szNew==pPage->xCellSize(pPage, newCell) ); + assert( szNew <= MX_CELL_SIZE(pBt) ); + idx = pCur->aiIdx[pCur->iPage]; + if( loc==0 ){ + CellInfo info; + assert( idxnCell ); + rc = sqlite3PagerWrite(pPage->pDbPage); + if( rc ){ + goto end_insert; + } + oldCell = findCell(pPage, idx); + if( !pPage->leaf ){ + memcpy(newCell, oldCell, 4); + } + rc = clearCell(pPage, oldCell, &info); + if( info.nSize==szNew && info.nLocal==info.nPayload ){ + /* Overwrite the old cell with the new if they are the same size. + ** We could also try to do this if the old cell is smaller, then add + ** the leftover space to the free list. But experiments show that + ** doing that is no faster then skipping this optimization and just + ** calling dropCell() and insertCell(). */ + assert( rc==SQLITE_OK ); /* clearCell never fails when nLocal==nPayload */ + if( oldCell+szNew > pPage->aDataEnd ) return SQLITE_CORRUPT_BKPT; + memcpy(oldCell, newCell, szNew); + return SQLITE_OK; + } + dropCell(pPage, idx, info.nSize, &rc); + if( rc ) goto end_insert; + }else if( loc<0 && pPage->nCell>0 ){ + assert( pPage->leaf ); + idx = ++pCur->aiIdx[pCur->iPage]; + }else{ + assert( pPage->leaf ); + } + insertCell(pPage, idx, newCell, szNew, 0, 0, &rc); + assert( pPage->nOverflow==0 || rc==SQLITE_OK ); + assert( rc!=SQLITE_OK || pPage->nCell>0 || pPage->nOverflow>0 ); + + /* If no error has occurred and pPage has an overflow cell, call balance() + ** to redistribute the cells within the tree. Since balance() may move + ** the cursor, zero the BtCursor.info.nSize and BTCF_ValidNKey + ** variables. + ** + ** Previous versions of SQLite called moveToRoot() to move the cursor + ** back to the root page as balance() used to invalidate the contents + ** of BtCursor.apPage[] and BtCursor.aiIdx[]. Instead of doing that, + ** set the cursor state to "invalid". This makes common insert operations + ** slightly faster. + ** + ** There is a subtle but important optimization here too. When inserting + ** multiple records into an intkey b-tree using a single cursor (as can + ** happen while processing an "INSERT INTO ... SELECT" statement), it + ** is advantageous to leave the cursor pointing to the last entry in + ** the b-tree if possible. If the cursor is left pointing to the last + ** entry in the table, and the next row inserted has an integer key + ** larger than the largest existing key, it is possible to insert the + ** row without seeking the cursor. This can be a big performance boost. + */ + pCur->info.nSize = 0; + if( pPage->nOverflow ){ + assert( rc==SQLITE_OK ); + pCur->curFlags &= ~(BTCF_ValidNKey); + rc = balance(pCur); + + /* Must make sure nOverflow is reset to zero even if the balance() + ** fails. Internal data structure corruption will result otherwise. + ** Also, set the cursor state to invalid. This stops saveCursorPosition() + ** from trying to save the current position of the cursor. */ + pCur->apPage[pCur->iPage]->nOverflow = 0; + pCur->eState = CURSOR_INVALID; + if( (flags & BTREE_SAVEPOSITION) && rc==SQLITE_OK ){ + rc = moveToRoot(pCur); + if( pCur->pKeyInfo ){ + assert( pCur->pKey==0 ); + pCur->pKey = sqlite3Malloc( pX->nKey ); + if( pCur->pKey==0 ){ + rc = SQLITE_NOMEM; + }else{ + memcpy(pCur->pKey, pX->pKey, pX->nKey); + } + } + pCur->eState = CURSOR_REQUIRESEEK; + pCur->nKey = pX->nKey; + } + } + assert( pCur->apPage[pCur->iPage]->nOverflow==0 ); + +end_insert: + return rc; +} + +/* +** Delete the entry that the cursor is pointing to. +** +** If the BTREE_SAVEPOSITION bit of the flags parameter is zero, then +** the cursor is left pointing at an arbitrary location after the delete. +** But if that bit is set, then the cursor is left in a state such that +** the next call to BtreeNext() or BtreePrev() moves it to the same row +** as it would have been on if the call to BtreeDelete() had been omitted. +** +** The BTREE_AUXDELETE bit of flags indicates that is one of several deletes +** associated with a single table entry and its indexes. Only one of those +** deletes is considered the "primary" delete. The primary delete occurs +** on a cursor that is not a BTREE_FORDELETE cursor. All but one delete +** operation on non-FORDELETE cursors is tagged with the AUXDELETE flag. +** The BTREE_AUXDELETE bit is a hint that is not used by this implementation, +** but which might be used by alternative storage engines. +*/ +SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ + Btree *p = pCur->pBtree; + BtShared *pBt = p->pBt; + int rc; /* Return code */ + MemPage *pPage; /* Page to delete cell from */ + unsigned char *pCell; /* Pointer to cell to delete */ + int iCellIdx; /* Index of cell to delete */ + int iCellDepth; /* Depth of node containing pCell */ + CellInfo info; /* Size of the cell being deleted */ + int bSkipnext = 0; /* Leaf cursor in SKIPNEXT state */ + u8 bPreserve = flags & BTREE_SAVEPOSITION; /* Keep cursor valid */ + + assert( cursorOwnsBtShared(pCur) ); + assert( pBt->inTransaction==TRANS_WRITE ); + assert( (pBt->btsFlags & BTS_READ_ONLY)==0 ); + assert( pCur->curFlags & BTCF_WriteFlag ); + assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); + assert( !hasReadConflicts(p, pCur->pgnoRoot) ); + assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); + assert( pCur->eState==CURSOR_VALID ); + assert( (flags & ~(BTREE_SAVEPOSITION | BTREE_AUXDELETE))==0 ); + + iCellDepth = pCur->iPage; + iCellIdx = pCur->aiIdx[iCellDepth]; + pPage = pCur->apPage[iCellDepth]; + pCell = findCell(pPage, iCellIdx); + + /* If the bPreserve flag is set to true, then the cursor position must + ** be preserved following this delete operation. If the current delete + ** will cause a b-tree rebalance, then this is done by saving the cursor + ** key and leaving the cursor in CURSOR_REQUIRESEEK state before + ** returning. + ** + ** Or, if the current delete will not cause a rebalance, then the cursor + ** will be left in CURSOR_SKIPNEXT state pointing to the entry immediately + ** before or after the deleted entry. In this case set bSkipnext to true. */ + if( bPreserve ){ + if( !pPage->leaf + || (pPage->nFree+cellSizePtr(pPage,pCell)+2)>(int)(pBt->usableSize*2/3) + ){ + /* A b-tree rebalance will be required after deleting this entry. + ** Save the cursor key. */ + rc = saveCursorKey(pCur); + if( rc ) return rc; + }else{ + bSkipnext = 1; + } + } + + /* If the page containing the entry to delete is not a leaf page, move + ** the cursor to the largest entry in the tree that is smaller than + ** the entry being deleted. This cell will replace the cell being deleted + ** from the internal node. The 'previous' entry is used for this instead + ** of the 'next' entry, as the previous entry is always a part of the + ** sub-tree headed by the child page of the cell being deleted. This makes + ** balancing the tree following the delete operation easier. */ + if( !pPage->leaf ){ + int notUsed = 0; + rc = sqlite3BtreePrevious(pCur, ¬Used); + if( rc ) return rc; + } + + /* Save the positions of any other cursors open on this table before + ** making any modifications. */ + if( pCur->curFlags & BTCF_Multiple ){ + rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); + if( rc ) return rc; + } + + /* If this is a delete operation to remove a row from a table b-tree, + ** invalidate any incrblob cursors open on the row being deleted. */ + if( pCur->pKeyInfo==0 ){ + invalidateIncrblobCursors(p, pCur->info.nKey, 0); + } + + /* Make the page containing the entry to be deleted writable. Then free any + ** overflow pages associated with the entry and finally remove the cell + ** itself from within the page. */ + rc = sqlite3PagerWrite(pPage->pDbPage); + if( rc ) return rc; + rc = clearCell(pPage, pCell, &info); + dropCell(pPage, iCellIdx, info.nSize, &rc); + if( rc ) return rc; + + /* If the cell deleted was not located on a leaf page, then the cursor + ** is currently pointing to the largest entry in the sub-tree headed + ** by the child-page of the cell that was just deleted from an internal + ** node. The cell from the leaf node needs to be moved to the internal + ** node to replace the deleted cell. */ + if( !pPage->leaf ){ + MemPage *pLeaf = pCur->apPage[pCur->iPage]; + int nCell; + Pgno n = pCur->apPage[iCellDepth+1]->pgno; + unsigned char *pTmp; + + pCell = findCell(pLeaf, pLeaf->nCell-1); + if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_BKPT; + nCell = pLeaf->xCellSize(pLeaf, pCell); + assert( MX_CELL_SIZE(pBt) >= nCell ); + pTmp = pBt->pTmpSpace; + assert( pTmp!=0 ); + rc = sqlite3PagerWrite(pLeaf->pDbPage); + if( rc==SQLITE_OK ){ + insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n, &rc); + } + dropCell(pLeaf, pLeaf->nCell-1, nCell, &rc); + if( rc ) return rc; + } + + /* Balance the tree. If the entry deleted was located on a leaf page, + ** then the cursor still points to that page. In this case the first + ** call to balance() repairs the tree, and the if(...) condition is + ** never true. + ** + ** Otherwise, if the entry deleted was on an internal node page, then + ** pCur is pointing to the leaf page from which a cell was removed to + ** replace the cell deleted from the internal node. This is slightly + ** tricky as the leaf node may be underfull, and the internal node may + ** be either under or overfull. In this case run the balancing algorithm + ** on the leaf node first. If the balance proceeds far enough up the + ** tree that we can be sure that any problem in the internal node has + ** been corrected, so be it. Otherwise, after balancing the leaf node, + ** walk the cursor up the tree to the internal node and balance it as + ** well. */ + rc = balance(pCur); + if( rc==SQLITE_OK && pCur->iPage>iCellDepth ){ + while( pCur->iPage>iCellDepth ){ + releasePage(pCur->apPage[pCur->iPage--]); + } + rc = balance(pCur); + } + + if( rc==SQLITE_OK ){ + if( bSkipnext ){ + assert( bPreserve && (pCur->iPage==iCellDepth || CORRUPT_DB) ); + assert( pPage==pCur->apPage[pCur->iPage] || CORRUPT_DB ); + assert( (pPage->nCell>0 || CORRUPT_DB) && iCellIdx<=pPage->nCell ); + pCur->eState = CURSOR_SKIPNEXT; + if( iCellIdx>=pPage->nCell ){ + pCur->skipNext = -1; + pCur->aiIdx[iCellDepth] = pPage->nCell-1; + }else{ + pCur->skipNext = 1; + } + }else{ + rc = moveToRoot(pCur); + if( bPreserve ){ + pCur->eState = CURSOR_REQUIRESEEK; + } + } + } + return rc; +} + +/* +** Create a new BTree table. Write into *piTable the page +** number for the root page of the new table. +** +** The type of type is determined by the flags parameter. Only the +** following values of flags are currently in use. Other values for +** flags might not work: +** +** BTREE_INTKEY|BTREE_LEAFDATA Used for SQL tables with rowid keys +** BTREE_ZERODATA Used for SQL indices +*/ +static int btreeCreateTable(Btree *p, int *piTable, int createTabFlags){ + BtShared *pBt = p->pBt; + MemPage *pRoot; + Pgno pgnoRoot; + int rc; + int ptfFlags; /* Page-type flage for the root page of new table */ + + assert( sqlite3BtreeHoldsMutex(p) ); + assert( pBt->inTransaction==TRANS_WRITE ); + assert( (pBt->btsFlags & BTS_READ_ONLY)==0 ); + +#ifdef SQLITE_OMIT_AUTOVACUUM + rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0); + if( rc ){ + return rc; + } +#else + if( pBt->autoVacuum ){ + Pgno pgnoMove; /* Move a page here to make room for the root-page */ + MemPage *pPageMove; /* The page to move to. */ + + /* Creating a new table may probably require moving an existing database + ** to make room for the new tables root page. In case this page turns + ** out to be an overflow page, delete all overflow page-map caches + ** held by open cursors. + */ + invalidateAllOverflowCache(pBt); + + /* Read the value of meta[3] from the database to determine where the + ** root page of the new table should go. meta[3] is the largest root-page + ** created so far, so the new root-page is (meta[3]+1). + */ + sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &pgnoRoot); + pgnoRoot++; + + /* The new root-page may not be allocated on a pointer-map page, or the + ** PENDING_BYTE page. + */ + while( pgnoRoot==PTRMAP_PAGENO(pBt, pgnoRoot) || + pgnoRoot==PENDING_BYTE_PAGE(pBt) ){ + pgnoRoot++; + } + assert( pgnoRoot>=3 || CORRUPT_DB ); + testcase( pgnoRoot<3 ); + + /* Allocate a page. The page that currently resides at pgnoRoot will + ** be moved to the allocated page (unless the allocated page happens + ** to reside at pgnoRoot). + */ + rc = allocateBtreePage(pBt, &pPageMove, &pgnoMove, pgnoRoot, BTALLOC_EXACT); + if( rc!=SQLITE_OK ){ + return rc; + } + + if( pgnoMove!=pgnoRoot ){ + /* pgnoRoot is the page that will be used for the root-page of + ** the new table (assuming an error did not occur). But we were + ** allocated pgnoMove. If required (i.e. if it was not allocated + ** by extending the file), the current page at position pgnoMove + ** is already journaled. + */ + u8 eType = 0; + Pgno iPtrPage = 0; + + /* Save the positions of any open cursors. This is required in + ** case they are holding a reference to an xFetch reference + ** corresponding to page pgnoRoot. */ + rc = saveAllCursors(pBt, 0, 0); + releasePage(pPageMove); + if( rc!=SQLITE_OK ){ + return rc; + } + + /* Move the page currently at pgnoRoot to pgnoMove. */ + rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage); + if( eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){ + rc = SQLITE_CORRUPT_BKPT; + } + if( rc!=SQLITE_OK ){ + releasePage(pRoot); + return rc; + } + assert( eType!=PTRMAP_ROOTPAGE ); + assert( eType!=PTRMAP_FREEPAGE ); + rc = relocatePage(pBt, pRoot, eType, iPtrPage, pgnoMove, 0); + releasePage(pRoot); + + /* Obtain the page at pgnoRoot */ + if( rc!=SQLITE_OK ){ + return rc; + } + rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + rc = sqlite3PagerWrite(pRoot->pDbPage); + if( rc!=SQLITE_OK ){ + releasePage(pRoot); + return rc; + } + }else{ + pRoot = pPageMove; + } + + /* Update the pointer-map and meta-data with the new root-page number. */ + ptrmapPut(pBt, pgnoRoot, PTRMAP_ROOTPAGE, 0, &rc); + if( rc ){ + releasePage(pRoot); + return rc; + } + + /* When the new root page was allocated, page 1 was made writable in + ** order either to increase the database filesize, or to decrement the + ** freelist count. Hence, the sqlite3BtreeUpdateMeta() call cannot fail. + */ + assert( sqlite3PagerIswriteable(pBt->pPage1->pDbPage) ); + rc = sqlite3BtreeUpdateMeta(p, 4, pgnoRoot); + if( NEVER(rc) ){ + releasePage(pRoot); + return rc; + } + + }else{ + rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0); + if( rc ) return rc; + } +#endif + assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); + if( createTabFlags & BTREE_INTKEY ){ + ptfFlags = PTF_INTKEY | PTF_LEAFDATA | PTF_LEAF; + }else{ + ptfFlags = PTF_ZERODATA | PTF_LEAF; + } + zeroPage(pRoot, ptfFlags); + sqlite3PagerUnref(pRoot->pDbPage); + assert( (pBt->openFlags & BTREE_SINGLE)==0 || pgnoRoot==2 ); + *piTable = (int)pgnoRoot; + return SQLITE_OK; +} +SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree *p, int *piTable, int flags){ + int rc; + sqlite3BtreeEnter(p); + rc = btreeCreateTable(p, piTable, flags); + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Erase the given database page and all its children. Return +** the page to the freelist. +*/ +static int clearDatabasePage( + BtShared *pBt, /* The BTree that contains the table */ + Pgno pgno, /* Page number to clear */ + int freePageFlag, /* Deallocate page if true */ + int *pnChange /* Add number of Cells freed to this counter */ +){ + MemPage *pPage; + int rc; + unsigned char *pCell; + int i; + int hdr; + CellInfo info; + + assert( sqlite3_mutex_held(pBt->mutex) ); + if( pgno>btreePagecount(pBt) ){ + return SQLITE_CORRUPT_BKPT; + } + rc = getAndInitPage(pBt, pgno, &pPage, 0, 0); + if( rc ) return rc; + if( pPage->bBusy ){ + rc = SQLITE_CORRUPT_BKPT; + goto cleardatabasepage_out; + } + pPage->bBusy = 1; + hdr = pPage->hdrOffset; + for(i=0; inCell; i++){ + pCell = findCell(pPage, i); + if( !pPage->leaf ){ + rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange); + if( rc ) goto cleardatabasepage_out; + } + rc = clearCell(pPage, pCell, &info); + if( rc ) goto cleardatabasepage_out; + } + if( !pPage->leaf ){ + rc = clearDatabasePage(pBt, get4byte(&pPage->aData[hdr+8]), 1, pnChange); + if( rc ) goto cleardatabasepage_out; + }else if( pnChange ){ + assert( pPage->intKey || CORRUPT_DB ); + testcase( !pPage->intKey ); + *pnChange += pPage->nCell; + } + if( freePageFlag ){ + freePage(pPage, &rc); + }else if( (rc = sqlite3PagerWrite(pPage->pDbPage))==0 ){ + zeroPage(pPage, pPage->aData[hdr] | PTF_LEAF); + } + +cleardatabasepage_out: + pPage->bBusy = 0; + releasePage(pPage); + return rc; +} + +/* +** Delete all information from a single table in the database. iTable is +** the page number of the root of the table. After this routine returns, +** the root page is empty, but still exists. +** +** This routine will fail with SQLITE_LOCKED if there are any open +** read cursors on the table. Open write cursors are moved to the +** root of the table. +** +** If pnChange is not NULL, then table iTable must be an intkey table. The +** integer value pointed to by pnChange is incremented by the number of +** entries in the table. +*/ +SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){ + int rc; + BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); + assert( p->inTrans==TRANS_WRITE ); + + rc = saveAllCursors(pBt, (Pgno)iTable, 0); + + if( SQLITE_OK==rc ){ + /* Invalidate all incrblob cursors open on table iTable (assuming iTable + ** is the root of a table b-tree - if it is not, the following call is + ** a no-op). */ + invalidateIncrblobCursors(p, 0, 1); + rc = clearDatabasePage(pBt, (Pgno)iTable, 0, pnChange); + } + sqlite3BtreeLeave(p); + return rc; +} + +/* +** Delete all information from the single table that pCur is open on. +** +** This routine only work for pCur on an ephemeral table. +*/ +SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor *pCur){ + return sqlite3BtreeClearTable(pCur->pBtree, pCur->pgnoRoot, 0); +} + +/* +** Erase all information in a table and add the root of the table to +** the freelist. Except, the root of the principle table (the one on +** page 1) is never added to the freelist. +** +** This routine will fail with SQLITE_LOCKED if there are any open +** cursors on the table. +** +** If AUTOVACUUM is enabled and the page at iTable is not the last +** root page in the database file, then the last root page +** in the database file is moved into the slot formerly occupied by +** iTable and that last slot formerly occupied by the last root page +** is added to the freelist instead of iTable. In this say, all +** root pages are kept at the beginning of the database file, which +** is necessary for AUTOVACUUM to work right. *piMoved is set to the +** page number that used to be the last root page in the file before +** the move. If no page gets moved, *piMoved is set to 0. +** The last root page is recorded in meta[3] and the value of +** meta[3] is updated by this procedure. +*/ +static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){ + int rc; + MemPage *pPage = 0; + BtShared *pBt = p->pBt; + + assert( sqlite3BtreeHoldsMutex(p) ); + assert( p->inTrans==TRANS_WRITE ); + assert( iTable>=2 ); + + rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0); + if( rc ) return rc; + rc = sqlite3BtreeClearTable(p, iTable, 0); + if( rc ){ + releasePage(pPage); + return rc; + } + + *piMoved = 0; + +#ifdef SQLITE_OMIT_AUTOVACUUM + freePage(pPage, &rc); + releasePage(pPage); +#else + if( pBt->autoVacuum ){ + Pgno maxRootPgno; + sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &maxRootPgno); + + if( iTable==maxRootPgno ){ + /* If the table being dropped is the table with the largest root-page + ** number in the database, put the root page on the free list. + */ + freePage(pPage, &rc); + releasePage(pPage); + if( rc!=SQLITE_OK ){ + return rc; + } + }else{ + /* The table being dropped does not have the largest root-page + ** number in the database. So move the page that does into the + ** gap left by the deleted root-page. + */ + MemPage *pMove; + releasePage(pPage); + rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + rc = relocatePage(pBt, pMove, PTRMAP_ROOTPAGE, 0, iTable, 0); + releasePage(pMove); + if( rc!=SQLITE_OK ){ + return rc; + } + pMove = 0; + rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0); + freePage(pMove, &rc); + releasePage(pMove); + if( rc!=SQLITE_OK ){ + return rc; + } + *piMoved = maxRootPgno; + } + + /* Set the new 'max-root-page' value in the database header. This + ** is the old value less one, less one more if that happens to + ** be a root-page number, less one again if that is the + ** PENDING_BYTE_PAGE. + */ + maxRootPgno--; + while( maxRootPgno==PENDING_BYTE_PAGE(pBt) + || PTRMAP_ISPAGE(pBt, maxRootPgno) ){ + maxRootPgno--; + } + assert( maxRootPgno!=PENDING_BYTE_PAGE(pBt) ); + + rc = sqlite3BtreeUpdateMeta(p, 4, maxRootPgno); + }else{ + freePage(pPage, &rc); + releasePage(pPage); + } +#endif + return rc; +} +SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree *p, int iTable, int *piMoved){ + int rc; + sqlite3BtreeEnter(p); + rc = btreeDropTable(p, iTable, piMoved); + sqlite3BtreeLeave(p); + return rc; +} + + +/* +** This function may only be called if the b-tree connection already +** has a read or write transaction open on the database. +** +** Read the meta-information out of a database file. Meta[0] +** is the number of free pages currently in the database. Meta[1] +** through meta[15] are available for use by higher layers. Meta[0] +** is read-only, the others are read/write. +** +** The schema layer numbers meta values differently. At the schema +** layer (and the SetCookie and ReadCookie opcodes) the number of +** free pages is not visible. So Cookie[0] is the same as Meta[1]. +** +** This routine treats Meta[BTREE_DATA_VERSION] as a special case. Instead +** of reading the value out of the header, it instead loads the "DataVersion" +** from the pager. The BTREE_DATA_VERSION value is not actually stored in the +** database file. It is a number computed by the pager. But its access +** pattern is the same as header meta values, and so it is convenient to +** read it from this routine. +*/ +SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta){ + BtShared *pBt = p->pBt; + + sqlite3BtreeEnter(p); + assert( p->inTrans>TRANS_NONE ); + assert( SQLITE_OK==querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK) ); + assert( pBt->pPage1 ); + assert( idx>=0 && idx<=15 ); + + if( idx==BTREE_DATA_VERSION ){ + *pMeta = sqlite3PagerDataVersion(pBt->pPager) + p->iDataVersion; + }else{ + *pMeta = get4byte(&pBt->pPage1->aData[36 + idx*4]); + } + + /* If auto-vacuum is disabled in this build and this is an auto-vacuum + ** database, mark the database as read-only. */ +#ifdef SQLITE_OMIT_AUTOVACUUM + if( idx==BTREE_LARGEST_ROOT_PAGE && *pMeta>0 ){ + pBt->btsFlags |= BTS_READ_ONLY; + } +#endif + + sqlite3BtreeLeave(p); +} + +/* +** Write meta-information back into the database. Meta[0] is +** read-only and may not be written. +*/ +SQLITE_PRIVATE int sqlite3BtreeUpdateMeta(Btree *p, int idx, u32 iMeta){ + BtShared *pBt = p->pBt; + unsigned char *pP1; + int rc; + assert( idx>=1 && idx<=15 ); + sqlite3BtreeEnter(p); + assert( p->inTrans==TRANS_WRITE ); + assert( pBt->pPage1!=0 ); + pP1 = pBt->pPage1->aData; + rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); + if( rc==SQLITE_OK ){ + put4byte(&pP1[36 + idx*4], iMeta); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( idx==BTREE_INCR_VACUUM ){ + assert( pBt->autoVacuum || iMeta==0 ); + assert( iMeta==0 || iMeta==1 ); + pBt->incrVacuum = (u8)iMeta; + } +#endif + } + sqlite3BtreeLeave(p); + return rc; +} + +#ifndef SQLITE_OMIT_BTREECOUNT +/* +** The first argument, pCur, is a cursor opened on some b-tree. Count the +** number of entries in the b-tree and write the result to *pnEntry. +** +** SQLITE_OK is returned if the operation is successfully executed. +** Otherwise, if an error is encountered (i.e. an IO error or database +** corruption) an SQLite error code is returned. +*/ +SQLITE_PRIVATE int sqlite3BtreeCount(BtCursor *pCur, i64 *pnEntry){ + i64 nEntry = 0; /* Value to return in *pnEntry */ + int rc; /* Return code */ + + if( pCur->pgnoRoot==0 ){ + *pnEntry = 0; + return SQLITE_OK; + } + rc = moveToRoot(pCur); + + /* Unless an error occurs, the following loop runs one iteration for each + ** page in the B-Tree structure (not including overflow pages). + */ + while( rc==SQLITE_OK ){ + int iIdx; /* Index of child node in parent */ + MemPage *pPage; /* Current page of the b-tree */ + + /* If this is a leaf page or the tree is not an int-key tree, then + ** this page contains countable entries. Increment the entry counter + ** accordingly. + */ + pPage = pCur->apPage[pCur->iPage]; + if( pPage->leaf || !pPage->intKey ){ + nEntry += pPage->nCell; + } + + /* pPage is a leaf node. This loop navigates the cursor so that it + ** points to the first interior cell that it points to the parent of + ** the next page in the tree that has not yet been visited. The + ** pCur->aiIdx[pCur->iPage] value is set to the index of the parent cell + ** of the page, or to the number of cells in the page if the next page + ** to visit is the right-child of its parent. + ** + ** If all pages in the tree have been visited, return SQLITE_OK to the + ** caller. + */ + if( pPage->leaf ){ + do { + if( pCur->iPage==0 ){ + /* All pages of the b-tree have been visited. Return successfully. */ + *pnEntry = nEntry; + return moveToRoot(pCur); + } + moveToParent(pCur); + }while ( pCur->aiIdx[pCur->iPage]>=pCur->apPage[pCur->iPage]->nCell ); + + pCur->aiIdx[pCur->iPage]++; + pPage = pCur->apPage[pCur->iPage]; + } + + /* Descend to the child node of the cell that the cursor currently + ** points at. This is the right-child if (iIdx==pPage->nCell). + */ + iIdx = pCur->aiIdx[pCur->iPage]; + if( iIdx==pPage->nCell ){ + rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); + }else{ + rc = moveToChild(pCur, get4byte(findCell(pPage, iIdx))); + } + } + + /* An error has occurred. Return an error code. */ + return rc; +} +#endif + +/* +** Return the pager associated with a BTree. This routine is used for +** testing and debugging only. +*/ +SQLITE_PRIVATE Pager *sqlite3BtreePager(Btree *p){ + return p->pBt->pPager; +} + +#ifndef SQLITE_OMIT_INTEGRITY_CHECK +/* +** Append a message to the error message string. +*/ +static void checkAppendMsg( + IntegrityCk *pCheck, + const char *zFormat, + ... +){ + va_list ap; + if( !pCheck->mxErr ) return; + pCheck->mxErr--; + pCheck->nErr++; + va_start(ap, zFormat); + if( pCheck->errMsg.nChar ){ + sqlite3StrAccumAppend(&pCheck->errMsg, "\n", 1); + } + if( pCheck->zPfx ){ + sqlite3XPrintf(&pCheck->errMsg, pCheck->zPfx, pCheck->v1, pCheck->v2); + } + sqlite3VXPrintf(&pCheck->errMsg, zFormat, ap); + va_end(ap); + if( pCheck->errMsg.accError==STRACCUM_NOMEM ){ + pCheck->mallocFailed = 1; + } +} +#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ + +#ifndef SQLITE_OMIT_INTEGRITY_CHECK + +/* +** Return non-zero if the bit in the IntegrityCk.aPgRef[] array that +** corresponds to page iPg is already set. +*/ +static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){ + assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 ); + return (pCheck->aPgRef[iPg/8] & (1 << (iPg & 0x07))); +} + +/* +** Set the bit in the IntegrityCk.aPgRef[] array that corresponds to page iPg. +*/ +static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){ + assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 ); + pCheck->aPgRef[iPg/8] |= (1 << (iPg & 0x07)); +} + + +/* +** Add 1 to the reference count for page iPage. If this is the second +** reference to the page, add an error message to pCheck->zErrMsg. +** Return 1 if there are 2 or more references to the page and 0 if +** if this is the first reference to the page. +** +** Also check that the page number is in bounds. +*/ +static int checkRef(IntegrityCk *pCheck, Pgno iPage){ + if( iPage==0 ) return 1; + if( iPage>pCheck->nPage ){ + checkAppendMsg(pCheck, "invalid page number %d", iPage); + return 1; + } + if( getPageReferenced(pCheck, iPage) ){ + checkAppendMsg(pCheck, "2nd reference to page %d", iPage); + return 1; + } + setPageReferenced(pCheck, iPage); + return 0; +} + +#ifndef SQLITE_OMIT_AUTOVACUUM +/* +** Check that the entry in the pointer-map for page iChild maps to +** page iParent, pointer type ptrType. If not, append an error message +** to pCheck. +*/ +static void checkPtrmap( + IntegrityCk *pCheck, /* Integrity check context */ + Pgno iChild, /* Child page number */ + u8 eType, /* Expected pointer map type */ + Pgno iParent /* Expected pointer map parent page number */ +){ + int rc; + u8 ePtrmapType; + Pgno iPtrmapParent; + + rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent); + if( rc!=SQLITE_OK ){ + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) pCheck->mallocFailed = 1; + checkAppendMsg(pCheck, "Failed to read ptrmap key=%d", iChild); + return; + } + + if( ePtrmapType!=eType || iPtrmapParent!=iParent ){ + checkAppendMsg(pCheck, + "Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)", + iChild, eType, iParent, ePtrmapType, iPtrmapParent); + } +} +#endif + +/* +** Check the integrity of the freelist or of an overflow page list. +** Verify that the number of pages on the list is N. +*/ +static void checkList( + IntegrityCk *pCheck, /* Integrity checking context */ + int isFreeList, /* True for a freelist. False for overflow page list */ + int iPage, /* Page number for first page in the list */ + int N /* Expected number of pages in the list */ +){ + int i; + int expected = N; + int iFirst = iPage; + while( N-- > 0 && pCheck->mxErr ){ + DbPage *pOvflPage; + unsigned char *pOvflData; + if( iPage<1 ){ + checkAppendMsg(pCheck, + "%d of %d pages missing from overflow list starting at %d", + N+1, expected, iFirst); + break; + } + if( checkRef(pCheck, iPage) ) break; + if( sqlite3PagerGet(pCheck->pPager, (Pgno)iPage, &pOvflPage, 0) ){ + checkAppendMsg(pCheck, "failed to get page %d", iPage); + break; + } + pOvflData = (unsigned char *)sqlite3PagerGetData(pOvflPage); + if( isFreeList ){ + int n = get4byte(&pOvflData[4]); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pCheck->pBt->autoVacuum ){ + checkPtrmap(pCheck, iPage, PTRMAP_FREEPAGE, 0); + } +#endif + if( n>(int)pCheck->pBt->usableSize/4-2 ){ + checkAppendMsg(pCheck, + "freelist leaf count too big on page %d", iPage); + N--; + }else{ + for(i=0; ipBt->autoVacuum ){ + checkPtrmap(pCheck, iFreePage, PTRMAP_FREEPAGE, 0); + } +#endif + checkRef(pCheck, iFreePage); + } + N -= n; + } + } +#ifndef SQLITE_OMIT_AUTOVACUUM + else{ + /* If this database supports auto-vacuum and iPage is not the last + ** page in this overflow list, check that the pointer-map entry for + ** the following page matches iPage. + */ + if( pCheck->pBt->autoVacuum && N>0 ){ + i = get4byte(pOvflData); + checkPtrmap(pCheck, i, PTRMAP_OVERFLOW2, iPage); + } + } +#endif + iPage = get4byte(pOvflData); + sqlite3PagerUnref(pOvflPage); + + if( isFreeList && N<(iPage!=0) ){ + checkAppendMsg(pCheck, "free-page count in header is too small"); + } + } +} +#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ + +/* +** An implementation of a min-heap. +** +** aHeap[0] is the number of elements on the heap. aHeap[1] is the +** root element. The daughter nodes of aHeap[N] are aHeap[N*2] +** and aHeap[N*2+1]. +** +** The heap property is this: Every node is less than or equal to both +** of its daughter nodes. A consequence of the heap property is that the +** root node aHeap[1] is always the minimum value currently in the heap. +** +** The btreeHeapInsert() routine inserts an unsigned 32-bit number onto +** the heap, preserving the heap property. The btreeHeapPull() routine +** removes the root element from the heap (the minimum value in the heap) +** and then moves other nodes around as necessary to preserve the heap +** property. +** +** This heap is used for cell overlap and coverage testing. Each u32 +** entry represents the span of a cell or freeblock on a btree page. +** The upper 16 bits are the index of the first byte of a range and the +** lower 16 bits are the index of the last byte of that range. +*/ +static void btreeHeapInsert(u32 *aHeap, u32 x){ + u32 j, i = ++aHeap[0]; + aHeap[i] = x; + while( (j = i/2)>0 && aHeap[j]>aHeap[i] ){ + x = aHeap[j]; + aHeap[j] = aHeap[i]; + aHeap[i] = x; + i = j; + } +} +static int btreeHeapPull(u32 *aHeap, u32 *pOut){ + u32 j, i, x; + if( (x = aHeap[0])==0 ) return 0; + *pOut = aHeap[1]; + aHeap[1] = aHeap[x]; + aHeap[x] = 0xffffffff; + aHeap[0]--; + i = 1; + while( (j = i*2)<=aHeap[0] ){ + if( aHeap[j]>aHeap[j+1] ) j++; + if( aHeap[i]zPfx; + int saved_v1 = pCheck->v1; + int saved_v2 = pCheck->v2; + u8 savedIsInit = 0; + + /* Check that the page exists + */ + pBt = pCheck->pBt; + usableSize = pBt->usableSize; + if( iPage==0 ) return 0; + if( checkRef(pCheck, iPage) ) return 0; + pCheck->zPfx = "Page %d: "; + pCheck->v1 = iPage; + if( (rc = btreeGetPage(pBt, (Pgno)iPage, &pPage, 0))!=0 ){ + checkAppendMsg(pCheck, + "unable to get the page. error code=%d", rc); + goto end_of_check; + } + + /* Clear MemPage.isInit to make sure the corruption detection code in + ** btreeInitPage() is executed. */ + savedIsInit = pPage->isInit; + pPage->isInit = 0; + if( (rc = btreeInitPage(pPage))!=0 ){ + assert( rc==SQLITE_CORRUPT ); /* The only possible error from InitPage */ + checkAppendMsg(pCheck, + "btreeInitPage() returns error code %d", rc); + goto end_of_check; + } + data = pPage->aData; + hdr = pPage->hdrOffset; + + /* Set up for cell analysis */ + pCheck->zPfx = "On tree page %d cell %d: "; + contentOffset = get2byteNotZero(&data[hdr+5]); + assert( contentOffset<=usableSize ); /* Enforced by btreeInitPage() */ + + /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the + ** number of cells on the page. */ + nCell = get2byte(&data[hdr+3]); + assert( pPage->nCell==nCell ); + + /* EVIDENCE-OF: R-23882-45353 The cell pointer array of a b-tree page + ** immediately follows the b-tree page header. */ + cellStart = hdr + 12 - 4*pPage->leaf; + assert( pPage->aCellIdx==&data[cellStart] ); + pCellIdx = &data[cellStart + 2*(nCell-1)]; + + if( !pPage->leaf ){ + /* Analyze the right-child page of internal pages */ + pgno = get4byte(&data[hdr+8]); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + pCheck->zPfx = "On page %d at right child: "; + checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage); + } +#endif + depth = checkTreePage(pCheck, pgno, &maxKey, maxKey); + keyCanBeEqual = 0; + }else{ + /* For leaf pages, the coverage check will occur in the same loop + ** as the other cell checks, so initialize the heap. */ + heap = pCheck->heap; + heap[0] = 0; + } + + /* EVIDENCE-OF: R-02776-14802 The cell pointer array consists of K 2-byte + ** integer offsets to the cell contents. */ + for(i=nCell-1; i>=0 && pCheck->mxErr; i--){ + CellInfo info; + + /* Check cell size */ + pCheck->v2 = i; + assert( pCellIdx==&data[cellStart + i*2] ); + pc = get2byteAligned(pCellIdx); + pCellIdx -= 2; + if( pcusableSize-4 ){ + checkAppendMsg(pCheck, "Offset %d out of range %d..%d", + pc, contentOffset, usableSize-4); + doCoverageCheck = 0; + continue; + } + pCell = &data[pc]; + pPage->xParseCell(pPage, pCell, &info); + if( pc+info.nSize>usableSize ){ + checkAppendMsg(pCheck, "Extends off end of page"); + doCoverageCheck = 0; + continue; + } + + /* Check for integer primary key out of range */ + if( pPage->intKey ){ + if( keyCanBeEqual ? (info.nKey > maxKey) : (info.nKey >= maxKey) ){ + checkAppendMsg(pCheck, "Rowid %lld out of order", info.nKey); + } + maxKey = info.nKey; + } + + /* Check the content overflow list */ + if( info.nPayload>info.nLocal ){ + int nPage; /* Number of pages on the overflow chain */ + Pgno pgnoOvfl; /* First page of the overflow chain */ + assert( pc + info.nSize - 4 <= usableSize ); + nPage = (info.nPayload - info.nLocal + usableSize - 5)/(usableSize - 4); + pgnoOvfl = get4byte(&pCell[info.nSize - 4]); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + checkPtrmap(pCheck, pgnoOvfl, PTRMAP_OVERFLOW1, iPage); + } +#endif + checkList(pCheck, 0, pgnoOvfl, nPage); + } + + if( !pPage->leaf ){ + /* Check sanity of left child page for internal pages */ + pgno = get4byte(pCell); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pBt->autoVacuum ){ + checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage); + } +#endif + d2 = checkTreePage(pCheck, pgno, &maxKey, maxKey); + keyCanBeEqual = 0; + if( d2!=depth ){ + checkAppendMsg(pCheck, "Child page depth differs"); + depth = d2; + } + }else{ + /* Populate the coverage-checking heap for leaf pages */ + btreeHeapInsert(heap, (pc<<16)|(pc+info.nSize-1)); + } + } + *piMinKey = maxKey; + + /* Check for complete coverage of the page + */ + pCheck->zPfx = 0; + if( doCoverageCheck && pCheck->mxErr>0 ){ + /* For leaf pages, the min-heap has already been initialized and the + ** cells have already been inserted. But for internal pages, that has + ** not yet been done, so do it now */ + if( !pPage->leaf ){ + heap = pCheck->heap; + heap[0] = 0; + for(i=nCell-1; i>=0; i--){ + u32 size; + pc = get2byteAligned(&data[cellStart+i*2]); + size = pPage->xCellSize(pPage, &data[pc]); + btreeHeapInsert(heap, (pc<<16)|(pc+size-1)); + } + } + /* Add the freeblocks to the min-heap + ** + ** EVIDENCE-OF: R-20690-50594 The second field of the b-tree page header + ** is the offset of the first freeblock, or zero if there are no + ** freeblocks on the page. + */ + i = get2byte(&data[hdr+1]); + while( i>0 ){ + int size, j; + assert( (u32)i<=usableSize-4 ); /* Enforced by btreeInitPage() */ + size = get2byte(&data[i+2]); + assert( (u32)(i+size)<=usableSize ); /* Enforced by btreeInitPage() */ + btreeHeapInsert(heap, (((u32)i)<<16)|(i+size-1)); + /* EVIDENCE-OF: R-58208-19414 The first 2 bytes of a freeblock are a + ** big-endian integer which is the offset in the b-tree page of the next + ** freeblock in the chain, or zero if the freeblock is the last on the + ** chain. */ + j = get2byte(&data[i]); + /* EVIDENCE-OF: R-06866-39125 Freeblocks are always connected in order of + ** increasing offset. */ + assert( j==0 || j>i+size ); /* Enforced by btreeInitPage() */ + assert( (u32)j<=usableSize-4 ); /* Enforced by btreeInitPage() */ + i = j; + } + /* Analyze the min-heap looking for overlap between cells and/or + ** freeblocks, and counting the number of untracked bytes in nFrag. + ** + ** Each min-heap entry is of the form: (start_address<<16)|end_address. + ** There is an implied first entry the covers the page header, the cell + ** pointer index, and the gap between the cell pointer index and the start + ** of cell content. + ** + ** The loop below pulls entries from the min-heap in order and compares + ** the start_address against the previous end_address. If there is an + ** overlap, that means bytes are used multiple times. If there is a gap, + ** that gap is added to the fragmentation count. + */ + nFrag = 0; + prev = contentOffset - 1; /* Implied first min-heap entry */ + while( btreeHeapPull(heap,&x) ){ + if( (prev&0xffff)>=(x>>16) ){ + checkAppendMsg(pCheck, + "Multiple uses for byte %u of page %d", x>>16, iPage); + break; + }else{ + nFrag += (x>>16) - (prev&0xffff) - 1; + prev = x; + } + } + nFrag += usableSize - (prev&0xffff) - 1; + /* EVIDENCE-OF: R-43263-13491 The total number of bytes in all fragments + ** is stored in the fifth field of the b-tree page header. + ** EVIDENCE-OF: R-07161-27322 The one-byte integer at offset 7 gives the + ** number of fragmented free bytes within the cell content area. + */ + if( heap[0]==0 && nFrag!=data[hdr+7] ){ + checkAppendMsg(pCheck, + "Fragmentation of %d bytes reported as %d on page %d", + nFrag, data[hdr+7], iPage); + } + } + +end_of_check: + if( !doCoverageCheck ) pPage->isInit = savedIsInit; + releasePage(pPage); + pCheck->zPfx = saved_zPfx; + pCheck->v1 = saved_v1; + pCheck->v2 = saved_v2; + return depth+1; +} +#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ + +#ifndef SQLITE_OMIT_INTEGRITY_CHECK +/* +** This routine does a complete check of the given BTree file. aRoot[] is +** an array of pages numbers were each page number is the root page of +** a table. nRoot is the number of entries in aRoot. +** +** A read-only or read-write transaction must be opened before calling +** this function. +** +** Write the number of error seen in *pnErr. Except for some memory +** allocation errors, an error message held in memory obtained from +** malloc is returned if *pnErr is non-zero. If *pnErr==0 then NULL is +** returned. If a memory allocation error occurs, NULL is returned. +*/ +SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck( + Btree *p, /* The btree to be checked */ + int *aRoot, /* An array of root pages numbers for individual trees */ + int nRoot, /* Number of entries in aRoot[] */ + int mxErr, /* Stop reporting errors after this many */ + int *pnErr /* Write number of errors seen to this variable */ +){ + Pgno i; + IntegrityCk sCheck; + BtShared *pBt = p->pBt; + int savedDbFlags = pBt->db->flags; + char zErr[100]; + VVA_ONLY( int nRef ); + + sqlite3BtreeEnter(p); + assert( p->inTrans>TRANS_NONE && pBt->inTransaction>TRANS_NONE ); + VVA_ONLY( nRef = sqlite3PagerRefcount(pBt->pPager) ); + assert( nRef>=0 ); + sCheck.pBt = pBt; + sCheck.pPager = pBt->pPager; + sCheck.nPage = btreePagecount(sCheck.pBt); + sCheck.mxErr = mxErr; + sCheck.nErr = 0; + sCheck.mallocFailed = 0; + sCheck.zPfx = 0; + sCheck.v1 = 0; + sCheck.v2 = 0; + sCheck.aPgRef = 0; + sCheck.heap = 0; + sqlite3StrAccumInit(&sCheck.errMsg, 0, zErr, sizeof(zErr), SQLITE_MAX_LENGTH); + sCheck.errMsg.printfFlags = SQLITE_PRINTF_INTERNAL; + if( sCheck.nPage==0 ){ + goto integrity_ck_cleanup; + } + + sCheck.aPgRef = sqlite3MallocZero((sCheck.nPage / 8)+ 1); + if( !sCheck.aPgRef ){ + sCheck.mallocFailed = 1; + goto integrity_ck_cleanup; + } + sCheck.heap = (u32*)sqlite3PageMalloc( pBt->pageSize ); + if( sCheck.heap==0 ){ + sCheck.mallocFailed = 1; + goto integrity_ck_cleanup; + } + + i = PENDING_BYTE_PAGE(pBt); + if( i<=sCheck.nPage ) setPageReferenced(&sCheck, i); + + /* Check the integrity of the freelist + */ + sCheck.zPfx = "Main freelist: "; + checkList(&sCheck, 1, get4byte(&pBt->pPage1->aData[32]), + get4byte(&pBt->pPage1->aData[36])); + sCheck.zPfx = 0; + + /* Check all the tables. + */ + testcase( pBt->db->flags & SQLITE_CellSizeCk ); + pBt->db->flags &= ~SQLITE_CellSizeCk; + for(i=0; (int)iautoVacuum && aRoot[i]>1 ){ + checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); + } +#endif + checkTreePage(&sCheck, aRoot[i], ¬Used, LARGEST_INT64); + } + pBt->db->flags = savedDbFlags; + + /* Make sure every page in the file is referenced + */ + for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){ +#ifdef SQLITE_OMIT_AUTOVACUUM + if( getPageReferenced(&sCheck, i)==0 ){ + checkAppendMsg(&sCheck, "Page %d is never used", i); + } +#else + /* If the database supports auto-vacuum, make sure no tables contain + ** references to pointer-map pages. + */ + if( getPageReferenced(&sCheck, i)==0 && + (PTRMAP_PAGENO(pBt, i)!=i || !pBt->autoVacuum) ){ + checkAppendMsg(&sCheck, "Page %d is never used", i); + } + if( getPageReferenced(&sCheck, i)!=0 && + (PTRMAP_PAGENO(pBt, i)==i && pBt->autoVacuum) ){ + checkAppendMsg(&sCheck, "Pointer map page %d is referenced", i); + } +#endif + } + + /* Clean up and report errors. + */ +integrity_ck_cleanup: + sqlite3PageFree(sCheck.heap); + sqlite3_free(sCheck.aPgRef); + if( sCheck.mallocFailed ){ + sqlite3StrAccumReset(&sCheck.errMsg); + sCheck.nErr++; + } + *pnErr = sCheck.nErr; + if( sCheck.nErr==0 ) sqlite3StrAccumReset(&sCheck.errMsg); + /* Make sure this analysis did not leave any unref() pages. */ + assert( nRef==sqlite3PagerRefcount(pBt->pPager) ); + sqlite3BtreeLeave(p); + return sqlite3StrAccumFinish(&sCheck.errMsg); +} +#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ + +/* +** Return the full pathname of the underlying database file. Return +** an empty string if the database is in-memory or a TEMP database. +** +** The pager filename is invariant as long as the pager is +** open so it is safe to access without the BtShared mutex. +*/ +SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *p){ + assert( p->pBt->pPager!=0 ); + return sqlite3PagerFilename(p->pBt->pPager, 1); +} + +/* +** Return the pathname of the journal file for this database. The return +** value of this routine is the same regardless of whether the journal file +** has been created or not. +** +** The pager journal filename is invariant as long as the pager is +** open so it is safe to access without the BtShared mutex. +*/ +SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *p){ + assert( p->pBt->pPager!=0 ); + return sqlite3PagerJournalname(p->pBt->pPager); +} + +/* +** Return non-zero if a transaction is active. +*/ +SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree *p){ + assert( p==0 || sqlite3_mutex_held(p->db->mutex) ); + return (p && (p->inTrans==TRANS_WRITE)); +} + +#ifndef SQLITE_OMIT_WAL +/* +** Run a checkpoint on the Btree passed as the first argument. +** +** Return SQLITE_LOCKED if this or any other connection has an open +** transaction on the shared-cache the argument Btree is connected to. +** +** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART. +*/ +SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree *p, int eMode, int *pnLog, int *pnCkpt){ + int rc = SQLITE_OK; + if( p ){ + BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); + if( pBt->inTransaction!=TRANS_NONE ){ + rc = SQLITE_LOCKED; + }else{ + rc = sqlite3PagerCheckpoint(pBt->pPager, p->db, eMode, pnLog, pnCkpt); + } + sqlite3BtreeLeave(p); + } + return rc; +} +#endif + +/* +** Return non-zero if a read (or write) transaction is active. +*/ +SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree *p){ + assert( p ); + assert( sqlite3_mutex_held(p->db->mutex) ); + return p->inTrans!=TRANS_NONE; +} + +SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree *p){ + assert( p ); + assert( sqlite3_mutex_held(p->db->mutex) ); + return p->nBackup!=0; +} + +/* +** This function returns a pointer to a blob of memory associated with +** a single shared-btree. The memory is used by client code for its own +** purposes (for example, to store a high-level schema associated with +** the shared-btree). The btree layer manages reference counting issues. +** +** The first time this is called on a shared-btree, nBytes bytes of memory +** are allocated, zeroed, and returned to the caller. For each subsequent +** call the nBytes parameter is ignored and a pointer to the same blob +** of memory returned. +** +** If the nBytes parameter is 0 and the blob of memory has not yet been +** allocated, a null pointer is returned. If the blob has already been +** allocated, it is returned as normal. +** +** Just before the shared-btree is closed, the function passed as the +** xFree argument when the memory allocation was made is invoked on the +** blob of allocated memory. The xFree function should not call sqlite3_free() +** on the memory, the btree layer does that. +*/ +SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *p, int nBytes, void(*xFree)(void *)){ + BtShared *pBt = p->pBt; + sqlite3BtreeEnter(p); + if( !pBt->pSchema && nBytes ){ + pBt->pSchema = sqlite3DbMallocZero(0, nBytes); + pBt->xFreeSchema = xFree; + } + sqlite3BtreeLeave(p); + return pBt->pSchema; +} + +/* +** Return SQLITE_LOCKED_SHAREDCACHE if another user of the same shared +** btree as the argument handle holds an exclusive lock on the +** sqlite_master table. Otherwise SQLITE_OK. +*/ +SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *p){ + int rc; + assert( sqlite3_mutex_held(p->db->mutex) ); + sqlite3BtreeEnter(p); + rc = querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK); + assert( rc==SQLITE_OK || rc==SQLITE_LOCKED_SHAREDCACHE ); + sqlite3BtreeLeave(p); + return rc; +} + + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** Obtain a lock on the table whose root page is iTab. The +** lock is a write lock if isWritelock is true or a read lock +** if it is false. +*/ +SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *p, int iTab, u8 isWriteLock){ + int rc = SQLITE_OK; + assert( p->inTrans!=TRANS_NONE ); + if( p->sharable ){ + u8 lockType = READ_LOCK + isWriteLock; + assert( READ_LOCK+1==WRITE_LOCK ); + assert( isWriteLock==0 || isWriteLock==1 ); + + sqlite3BtreeEnter(p); + rc = querySharedCacheTableLock(p, iTab, lockType); + if( rc==SQLITE_OK ){ + rc = setSharedCacheTableLock(p, iTab, lockType); + } + sqlite3BtreeLeave(p); + } + return rc; +} +#endif + +#ifndef SQLITE_OMIT_INCRBLOB +/* +** Argument pCsr must be a cursor opened for writing on an +** INTKEY table currently pointing at a valid table entry. +** This function modifies the data stored as part of that entry. +** +** Only the data content may only be modified, it is not possible to +** change the length of the data stored. If this function is called with +** parameters that attempt to write past the end of the existing data, +** no modifications are made and SQLITE_CORRUPT is returned. +*/ +SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void *z){ + int rc; + assert( cursorOwnsBtShared(pCsr) ); + assert( sqlite3_mutex_held(pCsr->pBtree->db->mutex) ); + assert( pCsr->curFlags & BTCF_Incrblob ); + + rc = restoreCursorPosition(pCsr); + if( rc!=SQLITE_OK ){ + return rc; + } + assert( pCsr->eState!=CURSOR_REQUIRESEEK ); + if( pCsr->eState!=CURSOR_VALID ){ + return SQLITE_ABORT; + } + + /* Save the positions of all other cursors open on this table. This is + ** required in case any of them are holding references to an xFetch + ** version of the b-tree page modified by the accessPayload call below. + ** + ** Note that pCsr must be open on a INTKEY table and saveCursorPosition() + ** and hence saveAllCursors() cannot fail on a BTREE_INTKEY table, hence + ** saveAllCursors can only return SQLITE_OK. + */ + VVA_ONLY(rc =) saveAllCursors(pCsr->pBt, pCsr->pgnoRoot, pCsr); + assert( rc==SQLITE_OK ); + + /* Check some assumptions: + ** (a) the cursor is open for writing, + ** (b) there is a read/write transaction open, + ** (c) the connection holds a write-lock on the table (if required), + ** (d) there are no conflicting read-locks, and + ** (e) the cursor points at a valid row of an intKey table. + */ + if( (pCsr->curFlags & BTCF_WriteFlag)==0 ){ + return SQLITE_READONLY; + } + assert( (pCsr->pBt->btsFlags & BTS_READ_ONLY)==0 + && pCsr->pBt->inTransaction==TRANS_WRITE ); + assert( hasSharedCacheTableLock(pCsr->pBtree, pCsr->pgnoRoot, 0, 2) ); + assert( !hasReadConflicts(pCsr->pBtree, pCsr->pgnoRoot) ); + assert( pCsr->apPage[pCsr->iPage]->intKey ); + + return accessPayload(pCsr, offset, amt, (unsigned char *)z, 1); +} + +/* +** Mark this cursor as an incremental blob cursor. +*/ +SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *pCur){ + pCur->curFlags |= BTCF_Incrblob; + pCur->pBtree->hasIncrblobCur = 1; +} +#endif + +/* +** Set both the "read version" (single byte at byte offset 18) and +** "write version" (single byte at byte offset 19) fields in the database +** header to iVersion. +*/ +SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBtree, int iVersion){ + BtShared *pBt = pBtree->pBt; + int rc; /* Return code */ + + assert( iVersion==1 || iVersion==2 ); + + /* If setting the version fields to 1, do not automatically open the + ** WAL connection, even if the version fields are currently set to 2. + */ + pBt->btsFlags &= ~BTS_NO_WAL; + if( iVersion==1 ) pBt->btsFlags |= BTS_NO_WAL; + + rc = sqlite3BtreeBeginTrans(pBtree, 0); + if( rc==SQLITE_OK ){ + u8 *aData = pBt->pPage1->aData; + if( aData[18]!=(u8)iVersion || aData[19]!=(u8)iVersion ){ + rc = sqlite3BtreeBeginTrans(pBtree, 2); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); + if( rc==SQLITE_OK ){ + aData[18] = (u8)iVersion; + aData[19] = (u8)iVersion; + } + } + } + } + + pBt->btsFlags &= ~BTS_NO_WAL; + return rc; +} + +/* +** Return true if the cursor has a hint specified. This routine is +** only used from within assert() statements +*/ +SQLITE_PRIVATE int sqlite3BtreeCursorHasHint(BtCursor *pCsr, unsigned int mask){ + return (pCsr->hints & mask)!=0; +} + +/* +** Return true if the given Btree is read-only. +*/ +SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *p){ + return (p->pBt->btsFlags & BTS_READ_ONLY)!=0; +} + +/* +** Return the size of the header added to each page by this module. +*/ +SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void){ return ROUND8(sizeof(MemPage)); } + +#if !defined(SQLITE_OMIT_SHARED_CACHE) +/* +** Return true if the Btree passed as the only argument is sharable. +*/ +SQLITE_PRIVATE int sqlite3BtreeSharable(Btree *p){ + return p->sharable; +} + +/* +** Return the number of connections to the BtShared object accessed by +** the Btree handle passed as the only argument. For private caches +** this is always 1. For shared caches it may be 1 or greater. +*/ +SQLITE_PRIVATE int sqlite3BtreeConnectionCount(Btree *p){ + testcase( p->sharable ); + return p->pBt->nRef; +} +#endif + +/************** End of btree.c ***********************************************/ +/************** Begin file backup.c ******************************************/ +/* +** 2009 January 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the implementation of the sqlite3_backup_XXX() +** API functions and the related features. +*/ +/* #include "sqliteInt.h" */ +/* #include "btreeInt.h" */ + +/* +** Structure allocated for each backup operation. +*/ +struct sqlite3_backup { + sqlite3* pDestDb; /* Destination database handle */ + Btree *pDest; /* Destination b-tree file */ + u32 iDestSchema; /* Original schema cookie in destination */ + int bDestLocked; /* True once a write-transaction is open on pDest */ + + Pgno iNext; /* Page number of the next source page to copy */ + sqlite3* pSrcDb; /* Source database handle */ + Btree *pSrc; /* Source b-tree file */ + + int rc; /* Backup process error code */ + + /* These two variables are set by every call to backup_step(). They are + ** read by calls to backup_remaining() and backup_pagecount(). + */ + Pgno nRemaining; /* Number of pages left to copy */ + Pgno nPagecount; /* Total number of pages to copy */ + + int isAttached; /* True once backup has been registered with pager */ + sqlite3_backup *pNext; /* Next backup associated with source pager */ +}; + +/* +** THREAD SAFETY NOTES: +** +** Once it has been created using backup_init(), a single sqlite3_backup +** structure may be accessed via two groups of thread-safe entry points: +** +** * Via the sqlite3_backup_XXX() API function backup_step() and +** backup_finish(). Both these functions obtain the source database +** handle mutex and the mutex associated with the source BtShared +** structure, in that order. +** +** * Via the BackupUpdate() and BackupRestart() functions, which are +** invoked by the pager layer to report various state changes in +** the page cache associated with the source database. The mutex +** associated with the source database BtShared structure will always +** be held when either of these functions are invoked. +** +** The other sqlite3_backup_XXX() API functions, backup_remaining() and +** backup_pagecount() are not thread-safe functions. If they are called +** while some other thread is calling backup_step() or backup_finish(), +** the values returned may be invalid. There is no way for a call to +** BackupUpdate() or BackupRestart() to interfere with backup_remaining() +** or backup_pagecount(). +** +** Depending on the SQLite configuration, the database handles and/or +** the Btree objects may have their own mutexes that require locking. +** Non-sharable Btrees (in-memory databases for example), do not have +** associated mutexes. +*/ + +/* +** Return a pointer corresponding to database zDb (i.e. "main", "temp") +** in connection handle pDb. If such a database cannot be found, return +** a NULL pointer and write an error message to pErrorDb. +** +** If the "temp" database is requested, it may need to be opened by this +** function. If an error occurs while doing so, return 0 and write an +** error message to pErrorDb. +*/ +static Btree *findBtree(sqlite3 *pErrorDb, sqlite3 *pDb, const char *zDb){ + int i = sqlite3FindDbName(pDb, zDb); + + if( i==1 ){ + Parse sParse; + int rc = 0; + memset(&sParse, 0, sizeof(sParse)); + sParse.db = pDb; + if( sqlite3OpenTempDatabase(&sParse) ){ + sqlite3ErrorWithMsg(pErrorDb, sParse.rc, "%s", sParse.zErrMsg); + rc = SQLITE_ERROR; + } + sqlite3DbFree(pErrorDb, sParse.zErrMsg); + sqlite3ParserReset(&sParse); + if( rc ){ + return 0; + } + } + + if( i<0 ){ + sqlite3ErrorWithMsg(pErrorDb, SQLITE_ERROR, "unknown database %s", zDb); + return 0; + } + + return pDb->aDb[i].pBt; +} + +/* +** Attempt to set the page size of the destination to match the page size +** of the source. +*/ +static int setDestPgsz(sqlite3_backup *p){ + int rc; + rc = sqlite3BtreeSetPageSize(p->pDest,sqlite3BtreeGetPageSize(p->pSrc),-1,0); + return rc; +} + +/* +** Check that there is no open read-transaction on the b-tree passed as the +** second argument. If there is not, return SQLITE_OK. Otherwise, if there +** is an open read-transaction, return SQLITE_ERROR and leave an error +** message in database handle db. +*/ +static int checkReadTransaction(sqlite3 *db, Btree *p){ + if( sqlite3BtreeIsInReadTrans(p) ){ + sqlite3ErrorWithMsg(db, SQLITE_ERROR, "destination database is in use"); + return SQLITE_ERROR; + } + return SQLITE_OK; +} + +/* +** Create an sqlite3_backup process to copy the contents of zSrcDb from +** connection handle pSrcDb to zDestDb in pDestDb. If successful, return +** a pointer to the new sqlite3_backup object. +** +** If an error occurs, NULL is returned and an error code and error message +** stored in database handle pDestDb. +*/ +SQLITE_API sqlite3_backup *sqlite3_backup_init( + sqlite3* pDestDb, /* Database to write to */ + const char *zDestDb, /* Name of database within pDestDb */ + sqlite3* pSrcDb, /* Database connection to read from */ + const char *zSrcDb /* Name of database within pSrcDb */ +){ + sqlite3_backup *p; /* Value to return */ + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(pSrcDb)||!sqlite3SafetyCheckOk(pDestDb) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + + /* Lock the source database handle. The destination database + ** handle is not locked in this routine, but it is locked in + ** sqlite3_backup_step(). The user is required to ensure that no + ** other thread accesses the destination handle for the duration + ** of the backup operation. Any attempt to use the destination + ** database connection while a backup is in progress may cause + ** a malfunction or a deadlock. + */ + sqlite3_mutex_enter(pSrcDb->mutex); + sqlite3_mutex_enter(pDestDb->mutex); + + if( pSrcDb==pDestDb ){ + sqlite3ErrorWithMsg( + pDestDb, SQLITE_ERROR, "source and destination must be distinct" + ); + p = 0; + }else { + /* Allocate space for a new sqlite3_backup object... + ** EVIDENCE-OF: R-64852-21591 The sqlite3_backup object is created by a + ** call to sqlite3_backup_init() and is destroyed by a call to + ** sqlite3_backup_finish(). */ + p = (sqlite3_backup *)sqlite3MallocZero(sizeof(sqlite3_backup)); + if( !p ){ + sqlite3Error(pDestDb, SQLITE_NOMEM_BKPT); + } + } + + /* If the allocation succeeded, populate the new object. */ + if( p ){ + p->pSrc = findBtree(pDestDb, pSrcDb, zSrcDb); + p->pDest = findBtree(pDestDb, pDestDb, zDestDb); + p->pDestDb = pDestDb; + p->pSrcDb = pSrcDb; + p->iNext = 1; + p->isAttached = 0; + + if( 0==p->pSrc || 0==p->pDest + || checkReadTransaction(pDestDb, p->pDest)!=SQLITE_OK + ){ + /* One (or both) of the named databases did not exist or an OOM + ** error was hit. Or there is a transaction open on the destination + ** database. The error has already been written into the pDestDb + ** handle. All that is left to do here is free the sqlite3_backup + ** structure. */ + sqlite3_free(p); + p = 0; + } + } + if( p ){ + p->pSrc->nBackup++; + } + + sqlite3_mutex_leave(pDestDb->mutex); + sqlite3_mutex_leave(pSrcDb->mutex); + return p; +} + +/* +** Argument rc is an SQLite error code. Return true if this error is +** considered fatal if encountered during a backup operation. All errors +** are considered fatal except for SQLITE_BUSY and SQLITE_LOCKED. +*/ +static int isFatalError(int rc){ + return (rc!=SQLITE_OK && rc!=SQLITE_BUSY && ALWAYS(rc!=SQLITE_LOCKED)); +} + +/* +** Parameter zSrcData points to a buffer containing the data for +** page iSrcPg from the source database. Copy this data into the +** destination database. +*/ +static int backupOnePage( + sqlite3_backup *p, /* Backup handle */ + Pgno iSrcPg, /* Source database page to backup */ + const u8 *zSrcData, /* Source database page data */ + int bUpdate /* True for an update, false otherwise */ +){ + Pager * const pDestPager = sqlite3BtreePager(p->pDest); + const int nSrcPgsz = sqlite3BtreeGetPageSize(p->pSrc); + int nDestPgsz = sqlite3BtreeGetPageSize(p->pDest); + const int nCopy = MIN(nSrcPgsz, nDestPgsz); + const i64 iEnd = (i64)iSrcPg*(i64)nSrcPgsz; +#ifdef SQLITE_HAS_CODEC + /* Use BtreeGetReserveNoMutex() for the source b-tree, as although it is + ** guaranteed that the shared-mutex is held by this thread, handle + ** p->pSrc may not actually be the owner. */ + int nSrcReserve = sqlite3BtreeGetReserveNoMutex(p->pSrc); + int nDestReserve = sqlite3BtreeGetOptimalReserve(p->pDest); +#endif + int rc = SQLITE_OK; + i64 iOff; + + assert( sqlite3BtreeGetReserveNoMutex(p->pSrc)>=0 ); + assert( p->bDestLocked ); + assert( !isFatalError(p->rc) ); + assert( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ); + assert( zSrcData ); + + /* Catch the case where the destination is an in-memory database and the + ** page sizes of the source and destination differ. + */ + if( nSrcPgsz!=nDestPgsz && sqlite3PagerIsMemdb(pDestPager) ){ + rc = SQLITE_READONLY; + } + +#ifdef SQLITE_HAS_CODEC + /* Backup is not possible if the page size of the destination is changing + ** and a codec is in use. + */ + if( nSrcPgsz!=nDestPgsz && sqlite3PagerGetCodec(pDestPager)!=0 ){ + rc = SQLITE_READONLY; + } + + /* Backup is not possible if the number of bytes of reserve space differ + ** between source and destination. If there is a difference, try to + ** fix the destination to agree with the source. If that is not possible, + ** then the backup cannot proceed. + */ + if( nSrcReserve!=nDestReserve ){ + u32 newPgsz = nSrcPgsz; + rc = sqlite3PagerSetPagesize(pDestPager, &newPgsz, nSrcReserve); + if( rc==SQLITE_OK && newPgsz!=nSrcPgsz ) rc = SQLITE_READONLY; + } +#endif + + /* This loop runs once for each destination page spanned by the source + ** page. For each iteration, variable iOff is set to the byte offset + ** of the destination page. + */ + for(iOff=iEnd-(i64)nSrcPgsz; rc==SQLITE_OK && iOffpDest->pBt) ) continue; + if( SQLITE_OK==(rc = sqlite3PagerGet(pDestPager, iDest, &pDestPg, 0)) + && SQLITE_OK==(rc = sqlite3PagerWrite(pDestPg)) + ){ + const u8 *zIn = &zSrcData[iOff%nSrcPgsz]; + u8 *zDestData = sqlite3PagerGetData(pDestPg); + u8 *zOut = &zDestData[iOff%nDestPgsz]; + + /* Copy the data from the source page into the destination page. + ** Then clear the Btree layer MemPage.isInit flag. Both this module + ** and the pager code use this trick (clearing the first byte + ** of the page 'extra' space to invalidate the Btree layers + ** cached parse of the page). MemPage.isInit is marked + ** "MUST BE FIRST" for this purpose. + */ + memcpy(zOut, zIn, nCopy); + ((u8 *)sqlite3PagerGetExtra(pDestPg))[0] = 0; + if( iOff==0 && bUpdate==0 ){ + sqlite3Put4byte(&zOut[28], sqlite3BtreeLastPage(p->pSrc)); + } + } + sqlite3PagerUnref(pDestPg); + } + + return rc; +} + +/* +** If pFile is currently larger than iSize bytes, then truncate it to +** exactly iSize bytes. If pFile is not larger than iSize bytes, then +** this function is a no-op. +** +** Return SQLITE_OK if everything is successful, or an SQLite error +** code if an error occurs. +*/ +static int backupTruncateFile(sqlite3_file *pFile, i64 iSize){ + i64 iCurrent; + int rc = sqlite3OsFileSize(pFile, &iCurrent); + if( rc==SQLITE_OK && iCurrent>iSize ){ + rc = sqlite3OsTruncate(pFile, iSize); + } + return rc; +} + +/* +** Register this backup object with the associated source pager for +** callbacks when pages are changed or the cache invalidated. +*/ +static void attachBackupObject(sqlite3_backup *p){ + sqlite3_backup **pp; + assert( sqlite3BtreeHoldsMutex(p->pSrc) ); + pp = sqlite3PagerBackupPtr(sqlite3BtreePager(p->pSrc)); + p->pNext = *pp; + *pp = p; + p->isAttached = 1; +} + +/* +** Copy nPage pages from the source b-tree to the destination. +*/ +SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage){ + int rc; + int destMode; /* Destination journal mode */ + int pgszSrc = 0; /* Source page size */ + int pgszDest = 0; /* Destination page size */ + +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(p->pSrcDb->mutex); + sqlite3BtreeEnter(p->pSrc); + if( p->pDestDb ){ + sqlite3_mutex_enter(p->pDestDb->mutex); + } + + rc = p->rc; + if( !isFatalError(rc) ){ + Pager * const pSrcPager = sqlite3BtreePager(p->pSrc); /* Source pager */ + Pager * const pDestPager = sqlite3BtreePager(p->pDest); /* Dest pager */ + int ii; /* Iterator variable */ + int nSrcPage = -1; /* Size of source db in pages */ + int bCloseTrans = 0; /* True if src db requires unlocking */ + + /* If the source pager is currently in a write-transaction, return + ** SQLITE_BUSY immediately. + */ + if( p->pDestDb && p->pSrc->pBt->inTransaction==TRANS_WRITE ){ + rc = SQLITE_BUSY; + }else{ + rc = SQLITE_OK; + } + + /* If there is no open read-transaction on the source database, open + ** one now. If a transaction is opened here, then it will be closed + ** before this function exits. + */ + if( rc==SQLITE_OK && 0==sqlite3BtreeIsInReadTrans(p->pSrc) ){ + rc = sqlite3BtreeBeginTrans(p->pSrc, 0); + bCloseTrans = 1; + } + + /* If the destination database has not yet been locked (i.e. if this + ** is the first call to backup_step() for the current backup operation), + ** try to set its page size to the same as the source database. This + ** is especially important on ZipVFS systems, as in that case it is + ** not possible to create a database file that uses one page size by + ** writing to it with another. */ + if( p->bDestLocked==0 && rc==SQLITE_OK && setDestPgsz(p)==SQLITE_NOMEM ){ + rc = SQLITE_NOMEM; + } + + /* Lock the destination database, if it is not locked already. */ + if( SQLITE_OK==rc && p->bDestLocked==0 + && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2)) + ){ + p->bDestLocked = 1; + sqlite3BtreeGetMeta(p->pDest, BTREE_SCHEMA_VERSION, &p->iDestSchema); + } + + /* Do not allow backup if the destination database is in WAL mode + ** and the page sizes are different between source and destination */ + pgszSrc = sqlite3BtreeGetPageSize(p->pSrc); + pgszDest = sqlite3BtreeGetPageSize(p->pDest); + destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest)); + if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){ + rc = SQLITE_READONLY; + } + + /* Now that there is a read-lock on the source database, query the + ** source pager for the number of pages in the database. + */ + nSrcPage = (int)sqlite3BtreeLastPage(p->pSrc); + assert( nSrcPage>=0 ); + for(ii=0; (nPage<0 || iiiNext<=(Pgno)nSrcPage && !rc; ii++){ + const Pgno iSrcPg = p->iNext; /* Source page number */ + if( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ){ + DbPage *pSrcPg; /* Source page object */ + rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg,PAGER_GET_READONLY); + if( rc==SQLITE_OK ){ + rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg), 0); + sqlite3PagerUnref(pSrcPg); + } + } + p->iNext++; + } + if( rc==SQLITE_OK ){ + p->nPagecount = nSrcPage; + p->nRemaining = nSrcPage+1-p->iNext; + if( p->iNext>(Pgno)nSrcPage ){ + rc = SQLITE_DONE; + }else if( !p->isAttached ){ + attachBackupObject(p); + } + } + + /* Update the schema version field in the destination database. This + ** is to make sure that the schema-version really does change in + ** the case where the source and destination databases have the + ** same schema version. + */ + if( rc==SQLITE_DONE ){ + if( nSrcPage==0 ){ + rc = sqlite3BtreeNewDb(p->pDest); + nSrcPage = 1; + } + if( rc==SQLITE_OK || rc==SQLITE_DONE ){ + rc = sqlite3BtreeUpdateMeta(p->pDest,1,p->iDestSchema+1); + } + if( rc==SQLITE_OK ){ + if( p->pDestDb ){ + sqlite3ResetAllSchemasOfConnection(p->pDestDb); + } + if( destMode==PAGER_JOURNALMODE_WAL ){ + rc = sqlite3BtreeSetVersion(p->pDest, 2); + } + } + if( rc==SQLITE_OK ){ + int nDestTruncate; + /* Set nDestTruncate to the final number of pages in the destination + ** database. The complication here is that the destination page + ** size may be different to the source page size. + ** + ** If the source page size is smaller than the destination page size, + ** round up. In this case the call to sqlite3OsTruncate() below will + ** fix the size of the file. However it is important to call + ** sqlite3PagerTruncateImage() here so that any pages in the + ** destination file that lie beyond the nDestTruncate page mark are + ** journalled by PagerCommitPhaseOne() before they are destroyed + ** by the file truncation. + */ + assert( pgszSrc==sqlite3BtreeGetPageSize(p->pSrc) ); + assert( pgszDest==sqlite3BtreeGetPageSize(p->pDest) ); + if( pgszSrcpDest->pBt) ){ + nDestTruncate--; + } + }else{ + nDestTruncate = nSrcPage * (pgszSrc/pgszDest); + } + assert( nDestTruncate>0 ); + + if( pgszSrc= iSize || ( + nDestTruncate==(int)(PENDING_BYTE_PAGE(p->pDest->pBt)-1) + && iSize>=PENDING_BYTE && iSize<=PENDING_BYTE+pgszDest + )); + + /* This block ensures that all data required to recreate the original + ** database has been stored in the journal for pDestPager and the + ** journal synced to disk. So at this point we may safely modify + ** the database file in any way, knowing that if a power failure + ** occurs, the original database will be reconstructed from the + ** journal file. */ + sqlite3PagerPagecount(pDestPager, &nDstPage); + for(iPg=nDestTruncate; rc==SQLITE_OK && iPg<=(Pgno)nDstPage; iPg++){ + if( iPg!=PENDING_BYTE_PAGE(p->pDest->pBt) ){ + DbPage *pPg; + rc = sqlite3PagerGet(pDestPager, iPg, &pPg, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerWrite(pPg); + sqlite3PagerUnref(pPg); + } + } + } + if( rc==SQLITE_OK ){ + rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 1); + } + + /* Write the extra pages and truncate the database file as required */ + iEnd = MIN(PENDING_BYTE + pgszDest, iSize); + for( + iOff=PENDING_BYTE+pgszSrc; + rc==SQLITE_OK && iOffpDest, 0)) + ){ + rc = SQLITE_DONE; + } + } + } + + /* If bCloseTrans is true, then this function opened a read transaction + ** on the source database. Close the read transaction here. There is + ** no need to check the return values of the btree methods here, as + ** "committing" a read-only transaction cannot fail. + */ + if( bCloseTrans ){ + TESTONLY( int rc2 ); + TESTONLY( rc2 = ) sqlite3BtreeCommitPhaseOne(p->pSrc, 0); + TESTONLY( rc2 |= ) sqlite3BtreeCommitPhaseTwo(p->pSrc, 0); + assert( rc2==SQLITE_OK ); + } + + if( rc==SQLITE_IOERR_NOMEM ){ + rc = SQLITE_NOMEM_BKPT; + } + p->rc = rc; + } + if( p->pDestDb ){ + sqlite3_mutex_leave(p->pDestDb->mutex); + } + sqlite3BtreeLeave(p->pSrc); + sqlite3_mutex_leave(p->pSrcDb->mutex); + return rc; +} + +/* +** Release all resources associated with an sqlite3_backup* handle. +*/ +SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p){ + sqlite3_backup **pp; /* Ptr to head of pagers backup list */ + sqlite3 *pSrcDb; /* Source database connection */ + int rc; /* Value to return */ + + /* Enter the mutexes */ + if( p==0 ) return SQLITE_OK; + pSrcDb = p->pSrcDb; + sqlite3_mutex_enter(pSrcDb->mutex); + sqlite3BtreeEnter(p->pSrc); + if( p->pDestDb ){ + sqlite3_mutex_enter(p->pDestDb->mutex); + } + + /* Detach this backup from the source pager. */ + if( p->pDestDb ){ + p->pSrc->nBackup--; + } + if( p->isAttached ){ + pp = sqlite3PagerBackupPtr(sqlite3BtreePager(p->pSrc)); + while( *pp!=p ){ + pp = &(*pp)->pNext; + } + *pp = p->pNext; + } + + /* If a transaction is still open on the Btree, roll it back. */ + sqlite3BtreeRollback(p->pDest, SQLITE_OK, 0); + + /* Set the error code of the destination database handle. */ + rc = (p->rc==SQLITE_DONE) ? SQLITE_OK : p->rc; + if( p->pDestDb ){ + sqlite3Error(p->pDestDb, rc); + + /* Exit the mutexes and free the backup context structure. */ + sqlite3LeaveMutexAndCloseZombie(p->pDestDb); + } + sqlite3BtreeLeave(p->pSrc); + if( p->pDestDb ){ + /* EVIDENCE-OF: R-64852-21591 The sqlite3_backup object is created by a + ** call to sqlite3_backup_init() and is destroyed by a call to + ** sqlite3_backup_finish(). */ + sqlite3_free(p); + } + sqlite3LeaveMutexAndCloseZombie(pSrcDb); + return rc; +} + +/* +** Return the number of pages still to be backed up as of the most recent +** call to sqlite3_backup_step(). +*/ +SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + return p->nRemaining; +} + +/* +** Return the total number of pages in the source database as of the most +** recent call to sqlite3_backup_step(). +*/ +SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + return p->nPagecount; +} + +/* +** This function is called after the contents of page iPage of the +** source database have been modified. If page iPage has already been +** copied into the destination database, then the data written to the +** destination is now invalidated. The destination copy of iPage needs +** to be updated with the new data before the backup operation is +** complete. +** +** It is assumed that the mutex associated with the BtShared object +** corresponding to the source database is held when this function is +** called. +*/ +static SQLITE_NOINLINE void backupUpdate( + sqlite3_backup *p, + Pgno iPage, + const u8 *aData +){ + assert( p!=0 ); + do{ + assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) ); + if( !isFatalError(p->rc) && iPageiNext ){ + /* The backup process p has already copied page iPage. But now it + ** has been modified by a transaction on the source pager. Copy + ** the new data into the backup. + */ + int rc; + assert( p->pDestDb ); + sqlite3_mutex_enter(p->pDestDb->mutex); + rc = backupOnePage(p, iPage, aData, 1); + sqlite3_mutex_leave(p->pDestDb->mutex); + assert( rc!=SQLITE_BUSY && rc!=SQLITE_LOCKED ); + if( rc!=SQLITE_OK ){ + p->rc = rc; + } + } + }while( (p = p->pNext)!=0 ); +} +SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *pBackup, Pgno iPage, const u8 *aData){ + if( pBackup ) backupUpdate(pBackup, iPage, aData); +} + +/* +** Restart the backup process. This is called when the pager layer +** detects that the database has been modified by an external database +** connection. In this case there is no way of knowing which of the +** pages that have been copied into the destination database are still +** valid and which are not, so the entire process needs to be restarted. +** +** It is assumed that the mutex associated with the BtShared object +** corresponding to the source database is held when this function is +** called. +*/ +SQLITE_PRIVATE void sqlite3BackupRestart(sqlite3_backup *pBackup){ + sqlite3_backup *p; /* Iterator variable */ + for(p=pBackup; p; p=p->pNext){ + assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) ); + p->iNext = 1; + } +} + +#ifndef SQLITE_OMIT_VACUUM +/* +** Copy the complete content of pBtFrom into pBtTo. A transaction +** must be active for both files. +** +** The size of file pTo may be reduced by this operation. If anything +** goes wrong, the transaction on pTo is rolled back. If successful, the +** transaction is committed before returning. +*/ +SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *pTo, Btree *pFrom){ + int rc; + sqlite3_file *pFd; /* File descriptor for database pTo */ + sqlite3_backup b; + sqlite3BtreeEnter(pTo); + sqlite3BtreeEnter(pFrom); + + assert( sqlite3BtreeIsInTrans(pTo) ); + pFd = sqlite3PagerFile(sqlite3BtreePager(pTo)); + if( pFd->pMethods ){ + i64 nByte = sqlite3BtreeGetPageSize(pFrom)*(i64)sqlite3BtreeLastPage(pFrom); + rc = sqlite3OsFileControl(pFd, SQLITE_FCNTL_OVERWRITE, &nByte); + if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; + if( rc ) goto copy_finished; + } + + /* Set up an sqlite3_backup object. sqlite3_backup.pDestDb must be set + ** to 0. This is used by the implementations of sqlite3_backup_step() + ** and sqlite3_backup_finish() to detect that they are being called + ** from this function, not directly by the user. + */ + memset(&b, 0, sizeof(b)); + b.pSrcDb = pFrom->db; + b.pSrc = pFrom; + b.pDest = pTo; + b.iNext = 1; + +#ifdef SQLITE_HAS_CODEC + sqlite3PagerAlignReserve(sqlite3BtreePager(pTo), sqlite3BtreePager(pFrom)); +#endif + + /* 0x7FFFFFFF is the hard limit for the number of pages in a database + ** file. By passing this as the number of pages to copy to + ** sqlite3_backup_step(), we can guarantee that the copy finishes + ** within a single call (unless an error occurs). The assert() statement + ** checks this assumption - (p->rc) should be set to either SQLITE_DONE + ** or an error code. */ + sqlite3_backup_step(&b, 0x7FFFFFFF); + assert( b.rc!=SQLITE_OK ); + + rc = sqlite3_backup_finish(&b); + if( rc==SQLITE_OK ){ + pTo->pBt->btsFlags &= ~BTS_PAGESIZE_FIXED; + }else{ + sqlite3PagerClearCache(sqlite3BtreePager(b.pDest)); + } + + assert( sqlite3BtreeIsInTrans(pTo)==0 ); +copy_finished: + sqlite3BtreeLeave(pFrom); + sqlite3BtreeLeave(pTo); + return rc; +} +#endif /* SQLITE_OMIT_VACUUM */ + +/************** End of backup.c **********************************************/ +/************** Begin file vdbemem.c *****************************************/ +/* +** 2004 May 26 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains code use to manipulate "Mem" structure. A "Mem" +** stores a single value in the VDBE. Mem is an opaque structure visible +** only within the VDBE. Interface routines refer to a Mem using the +** name sqlite_value +*/ +/* #include "sqliteInt.h" */ +/* #include "vdbeInt.h" */ + +#ifdef SQLITE_DEBUG +/* +** Check invariants on a Mem object. +** +** This routine is intended for use inside of assert() statements, like +** this: assert( sqlite3VdbeCheckMemInvariants(pMem) ); +*/ +SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem *p){ + /* If MEM_Dyn is set then Mem.xDel!=0. + ** Mem.xDel is might not be initialized if MEM_Dyn is clear. + */ + assert( (p->flags & MEM_Dyn)==0 || p->xDel!=0 ); + + /* MEM_Dyn may only be set if Mem.szMalloc==0. In this way we + ** ensure that if Mem.szMalloc>0 then it is safe to do + ** Mem.z = Mem.zMalloc without having to check Mem.flags&MEM_Dyn. + ** That saves a few cycles in inner loops. */ + assert( (p->flags & MEM_Dyn)==0 || p->szMalloc==0 ); + + /* Cannot be both MEM_Int and MEM_Real at the same time */ + assert( (p->flags & (MEM_Int|MEM_Real))!=(MEM_Int|MEM_Real) ); + + /* The szMalloc field holds the correct memory allocation size */ + assert( p->szMalloc==0 + || p->szMalloc==sqlite3DbMallocSize(p->db,p->zMalloc) ); + + /* If p holds a string or blob, the Mem.z must point to exactly + ** one of the following: + ** + ** (1) Memory in Mem.zMalloc and managed by the Mem object + ** (2) Memory to be freed using Mem.xDel + ** (3) An ephemeral string or blob + ** (4) A static string or blob + */ + if( (p->flags & (MEM_Str|MEM_Blob)) && p->n>0 ){ + assert( + ((p->szMalloc>0 && p->z==p->zMalloc)? 1 : 0) + + ((p->flags&MEM_Dyn)!=0 ? 1 : 0) + + ((p->flags&MEM_Ephem)!=0 ? 1 : 0) + + ((p->flags&MEM_Static)!=0 ? 1 : 0) == 1 + ); + } + return 1; +} +#endif + + +/* +** If pMem is an object with a valid string representation, this routine +** ensures the internal encoding for the string representation is +** 'desiredEnc', one of SQLITE_UTF8, SQLITE_UTF16LE or SQLITE_UTF16BE. +** +** If pMem is not a string object, or the encoding of the string +** representation is already stored using the requested encoding, then this +** routine is a no-op. +** +** SQLITE_OK is returned if the conversion is successful (or not required). +** SQLITE_NOMEM may be returned if a malloc() fails during conversion +** between formats. +*/ +SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *pMem, int desiredEnc){ +#ifndef SQLITE_OMIT_UTF16 + int rc; +#endif + assert( (pMem->flags&MEM_RowSet)==0 ); + assert( desiredEnc==SQLITE_UTF8 || desiredEnc==SQLITE_UTF16LE + || desiredEnc==SQLITE_UTF16BE ); + if( !(pMem->flags&MEM_Str) || pMem->enc==desiredEnc ){ + return SQLITE_OK; + } + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); +#ifdef SQLITE_OMIT_UTF16 + return SQLITE_ERROR; +#else + + /* MemTranslate() may return SQLITE_OK or SQLITE_NOMEM. If NOMEM is returned, + ** then the encoding of the value may not have changed. + */ + rc = sqlite3VdbeMemTranslate(pMem, (u8)desiredEnc); + assert(rc==SQLITE_OK || rc==SQLITE_NOMEM); + assert(rc==SQLITE_OK || pMem->enc!=desiredEnc); + assert(rc==SQLITE_NOMEM || pMem->enc==desiredEnc); + return rc; +#endif +} + +/* +** Make sure pMem->z points to a writable allocation of at least +** min(n,32) bytes. +** +** If the bPreserve argument is true, then copy of the content of +** pMem->z into the new allocation. pMem must be either a string or +** blob if bPreserve is true. If bPreserve is false, any prior content +** in pMem->z is discarded. +*/ +SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3VdbeMemGrow(Mem *pMem, int n, int bPreserve){ + assert( sqlite3VdbeCheckMemInvariants(pMem) ); + assert( (pMem->flags&MEM_RowSet)==0 ); + testcase( pMem->db==0 ); + + /* If the bPreserve flag is set to true, then the memory cell must already + ** contain a valid string or blob value. */ + assert( bPreserve==0 || pMem->flags&(MEM_Blob|MEM_Str) ); + testcase( bPreserve && pMem->z==0 ); + + assert( pMem->szMalloc==0 + || pMem->szMalloc==sqlite3DbMallocSize(pMem->db, pMem->zMalloc) ); + if( pMem->szMallocszMalloc>0 && pMem->z==pMem->zMalloc ){ + pMem->z = pMem->zMalloc = sqlite3DbReallocOrFree(pMem->db, pMem->z, n); + bPreserve = 0; + }else{ + if( pMem->szMalloc>0 ) sqlite3DbFree(pMem->db, pMem->zMalloc); + pMem->zMalloc = sqlite3DbMallocRaw(pMem->db, n); + } + if( pMem->zMalloc==0 ){ + sqlite3VdbeMemSetNull(pMem); + pMem->z = 0; + pMem->szMalloc = 0; + return SQLITE_NOMEM_BKPT; + }else{ + pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->zMalloc); + } + } + + if( bPreserve && pMem->z && pMem->z!=pMem->zMalloc ){ + memcpy(pMem->zMalloc, pMem->z, pMem->n); + } + if( (pMem->flags&MEM_Dyn)!=0 ){ + assert( pMem->xDel!=0 && pMem->xDel!=SQLITE_DYNAMIC ); + pMem->xDel((void *)(pMem->z)); + } + + pMem->z = pMem->zMalloc; + pMem->flags &= ~(MEM_Dyn|MEM_Ephem|MEM_Static); + return SQLITE_OK; +} + +/* +** Change the pMem->zMalloc allocation to be at least szNew bytes. +** If pMem->zMalloc already meets or exceeds the requested size, this +** routine is a no-op. +** +** Any prior string or blob content in the pMem object may be discarded. +** The pMem->xDel destructor is called, if it exists. Though MEM_Str +** and MEM_Blob values may be discarded, MEM_Int, MEM_Real, and MEM_Null +** values are preserved. +** +** Return SQLITE_OK on success or an error code (probably SQLITE_NOMEM) +** if unable to complete the resizing. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int szNew){ + assert( szNew>0 ); + assert( (pMem->flags & MEM_Dyn)==0 || pMem->szMalloc==0 ); + if( pMem->szMallocflags & MEM_Dyn)==0 ); + pMem->z = pMem->zMalloc; + pMem->flags &= (MEM_Null|MEM_Int|MEM_Real); + return SQLITE_OK; +} + +/* +** Change pMem so that its MEM_Str or MEM_Blob value is stored in +** MEM.zMalloc, where it can be safely written. +** +** Return SQLITE_OK on success or SQLITE_NOMEM if malloc fails. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem *pMem){ + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( (pMem->flags&MEM_RowSet)==0 ); + if( (pMem->flags & (MEM_Str|MEM_Blob))!=0 ){ + if( ExpandBlob(pMem) ) return SQLITE_NOMEM; + if( pMem->szMalloc==0 || pMem->z!=pMem->zMalloc ){ + if( sqlite3VdbeMemGrow(pMem, pMem->n + 2, 1) ){ + return SQLITE_NOMEM_BKPT; + } + pMem->z[pMem->n] = 0; + pMem->z[pMem->n+1] = 0; + pMem->flags |= MEM_Term; + } + } + pMem->flags &= ~MEM_Ephem; +#ifdef SQLITE_DEBUG + pMem->pScopyFrom = 0; +#endif + + return SQLITE_OK; +} + +/* +** If the given Mem* has a zero-filled tail, turn it into an ordinary +** blob stored in dynamically allocated space. +*/ +#ifndef SQLITE_OMIT_INCRBLOB +SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *pMem){ + int nByte; + assert( pMem->flags & MEM_Zero ); + assert( pMem->flags&MEM_Blob ); + assert( (pMem->flags&MEM_RowSet)==0 ); + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + + /* Set nByte to the number of bytes required to store the expanded blob. */ + nByte = pMem->n + pMem->u.nZero; + if( nByte<=0 ){ + nByte = 1; + } + if( sqlite3VdbeMemGrow(pMem, nByte, 1) ){ + return SQLITE_NOMEM_BKPT; + } + + memset(&pMem->z[pMem->n], 0, pMem->u.nZero); + pMem->n += pMem->u.nZero; + pMem->flags &= ~(MEM_Zero|MEM_Term); + return SQLITE_OK; +} +#endif + +/* +** It is already known that pMem contains an unterminated string. +** Add the zero terminator. +*/ +static SQLITE_NOINLINE int vdbeMemAddTerminator(Mem *pMem){ + if( sqlite3VdbeMemGrow(pMem, pMem->n+2, 1) ){ + return SQLITE_NOMEM_BKPT; + } + pMem->z[pMem->n] = 0; + pMem->z[pMem->n+1] = 0; + pMem->flags |= MEM_Term; + return SQLITE_OK; +} + +/* +** Make sure the given Mem is \u0000 terminated. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem *pMem){ + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + testcase( (pMem->flags & (MEM_Term|MEM_Str))==(MEM_Term|MEM_Str) ); + testcase( (pMem->flags & (MEM_Term|MEM_Str))==0 ); + if( (pMem->flags & (MEM_Term|MEM_Str))!=MEM_Str ){ + return SQLITE_OK; /* Nothing to do */ + }else{ + return vdbeMemAddTerminator(pMem); + } +} + +/* +** Add MEM_Str to the set of representations for the given Mem. Numbers +** are converted using sqlite3_snprintf(). Converting a BLOB to a string +** is a no-op. +** +** Existing representations MEM_Int and MEM_Real are invalidated if +** bForce is true but are retained if bForce is false. +** +** A MEM_Null value will never be passed to this function. This function is +** used for converting values to text for returning to the user (i.e. via +** sqlite3_value_text()), or for ensuring that values to be used as btree +** keys are strings. In the former case a NULL pointer is returned the +** user and the latter is an internal programming error. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){ + int fg = pMem->flags; + const int nByte = 32; + + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( !(fg&MEM_Zero) ); + assert( !(fg&(MEM_Str|MEM_Blob)) ); + assert( fg&(MEM_Int|MEM_Real) ); + assert( (pMem->flags&MEM_RowSet)==0 ); + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); + + + if( sqlite3VdbeMemClearAndResize(pMem, nByte) ){ + pMem->enc = 0; + return SQLITE_NOMEM_BKPT; + } + + /* For a Real or Integer, use sqlite3_snprintf() to produce the UTF-8 + ** string representation of the value. Then, if the required encoding + ** is UTF-16le or UTF-16be do a translation. + ** + ** FIX ME: It would be better if sqlite3_snprintf() could do UTF-16. + */ + if( fg & MEM_Int ){ + sqlite3_snprintf(nByte, pMem->z, "%lld", pMem->u.i); + }else{ + assert( fg & MEM_Real ); + sqlite3_snprintf(nByte, pMem->z, "%!.15g", pMem->u.r); + } + pMem->n = sqlite3Strlen30(pMem->z); + pMem->enc = SQLITE_UTF8; + pMem->flags |= MEM_Str|MEM_Term; + if( bForce ) pMem->flags &= ~(MEM_Int|MEM_Real); + sqlite3VdbeChangeEncoding(pMem, enc); + return SQLITE_OK; +} + +/* +** Memory cell pMem contains the context of an aggregate function. +** This routine calls the finalize method for that function. The +** result of the aggregate is stored back into pMem. +** +** Return SQLITE_ERROR if the finalizer reports an error. SQLITE_OK +** otherwise. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem *pMem, FuncDef *pFunc){ + int rc = SQLITE_OK; + if( ALWAYS(pFunc && pFunc->xFinalize) ){ + sqlite3_context ctx; + Mem t; + assert( (pMem->flags & MEM_Null)!=0 || pFunc==pMem->u.pDef ); + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + memset(&ctx, 0, sizeof(ctx)); + memset(&t, 0, sizeof(t)); + t.flags = MEM_Null; + t.db = pMem->db; + ctx.pOut = &t; + ctx.pMem = pMem; + ctx.pFunc = pFunc; + pFunc->xFinalize(&ctx); /* IMP: R-24505-23230 */ + assert( (pMem->flags & MEM_Dyn)==0 ); + if( pMem->szMalloc>0 ) sqlite3DbFree(pMem->db, pMem->zMalloc); + memcpy(pMem, &t, sizeof(t)); + rc = ctx.isError; + } + return rc; +} + +/* +** If the memory cell contains a value that must be freed by +** invoking the external callback in Mem.xDel, then this routine +** will free that value. It also sets Mem.flags to MEM_Null. +** +** This is a helper routine for sqlite3VdbeMemSetNull() and +** for sqlite3VdbeMemRelease(). Use those other routines as the +** entry point for releasing Mem resources. +*/ +static SQLITE_NOINLINE void vdbeMemClearExternAndSetNull(Mem *p){ + assert( p->db==0 || sqlite3_mutex_held(p->db->mutex) ); + assert( VdbeMemDynamic(p) ); + if( p->flags&MEM_Agg ){ + sqlite3VdbeMemFinalize(p, p->u.pDef); + assert( (p->flags & MEM_Agg)==0 ); + testcase( p->flags & MEM_Dyn ); + } + if( p->flags&MEM_Dyn ){ + assert( (p->flags&MEM_RowSet)==0 ); + assert( p->xDel!=SQLITE_DYNAMIC && p->xDel!=0 ); + p->xDel((void *)p->z); + }else if( p->flags&MEM_RowSet ){ + sqlite3RowSetClear(p->u.pRowSet); + }else if( p->flags&MEM_Frame ){ + VdbeFrame *pFrame = p->u.pFrame; + pFrame->pParent = pFrame->v->pDelFrame; + pFrame->v->pDelFrame = pFrame; + } + p->flags = MEM_Null; +} + +/* +** Release memory held by the Mem p, both external memory cleared +** by p->xDel and memory in p->zMalloc. +** +** This is a helper routine invoked by sqlite3VdbeMemRelease() in +** the unusual case where there really is memory in p that needs +** to be freed. +*/ +static SQLITE_NOINLINE void vdbeMemClear(Mem *p){ + if( VdbeMemDynamic(p) ){ + vdbeMemClearExternAndSetNull(p); + } + if( p->szMalloc ){ + sqlite3DbFree(p->db, p->zMalloc); + p->szMalloc = 0; + } + p->z = 0; +} + +/* +** Release any memory resources held by the Mem. Both the memory that is +** free by Mem.xDel and the Mem.zMalloc allocation are freed. +** +** Use this routine prior to clean up prior to abandoning a Mem, or to +** reset a Mem back to its minimum memory utilization. +** +** Use sqlite3VdbeMemSetNull() to release just the Mem.xDel space +** prior to inserting new content into the Mem. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p){ + assert( sqlite3VdbeCheckMemInvariants(p) ); + if( VdbeMemDynamic(p) || p->szMalloc ){ + vdbeMemClear(p); + } +} + +/* +** Convert a 64-bit IEEE double into a 64-bit signed integer. +** If the double is out of range of a 64-bit signed integer then +** return the closest available 64-bit signed integer. +*/ +static i64 doubleToInt64(double r){ +#ifdef SQLITE_OMIT_FLOATING_POINT + /* When floating-point is omitted, double and int64 are the same thing */ + return r; +#else + /* + ** Many compilers we encounter do not define constants for the + ** minimum and maximum 64-bit integers, or they define them + ** inconsistently. And many do not understand the "LL" notation. + ** So we define our own static constants here using nothing + ** larger than a 32-bit integer constant. + */ + static const i64 maxInt = LARGEST_INT64; + static const i64 minInt = SMALLEST_INT64; + + if( r<=(double)minInt ){ + return minInt; + }else if( r>=(double)maxInt ){ + return maxInt; + }else{ + return (i64)r; + } +#endif +} + +/* +** Return some kind of integer value which is the best we can do +** at representing the value that *pMem describes as an integer. +** If pMem is an integer, then the value is exact. If pMem is +** a floating-point then the value returned is the integer part. +** If pMem is a string or blob, then we make an attempt to convert +** it into an integer and return that. If pMem represents an +** an SQL-NULL value, return 0. +** +** If pMem represents a string value, its encoding might be changed. +*/ +SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem *pMem){ + int flags; + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); + flags = pMem->flags; + if( flags & MEM_Int ){ + return pMem->u.i; + }else if( flags & MEM_Real ){ + return doubleToInt64(pMem->u.r); + }else if( flags & (MEM_Str|MEM_Blob) ){ + i64 value = 0; + assert( pMem->z || pMem->n==0 ); + sqlite3Atoi64(pMem->z, &value, pMem->n, pMem->enc); + return value; + }else{ + return 0; + } +} + +/* +** Return the best representation of pMem that we can get into a +** double. If pMem is already a double or an integer, return its +** value. If it is a string or blob, try to convert it to a double. +** If it is a NULL, return 0.0. +*/ +SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem *pMem){ + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); + if( pMem->flags & MEM_Real ){ + return pMem->u.r; + }else if( pMem->flags & MEM_Int ){ + return (double)pMem->u.i; + }else if( pMem->flags & (MEM_Str|MEM_Blob) ){ + /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ + double val = (double)0; + sqlite3AtoF(pMem->z, &val, pMem->n, pMem->enc); + return val; + }else{ + /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ + return (double)0; + } +} + +/* +** The MEM structure is already a MEM_Real. Try to also make it a +** MEM_Int if we can. +*/ +SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){ + i64 ix; + assert( pMem->flags & MEM_Real ); + assert( (pMem->flags & MEM_RowSet)==0 ); + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); + + ix = doubleToInt64(pMem->u.r); + + /* Only mark the value as an integer if + ** + ** (1) the round-trip conversion real->int->real is a no-op, and + ** (2) The integer is neither the largest nor the smallest + ** possible integer (ticket #3922) + ** + ** The second and third terms in the following conditional enforces + ** the second condition under the assumption that addition overflow causes + ** values to wrap around. + */ + if( pMem->u.r==ix && ix>SMALLEST_INT64 && ixu.i = ix; + MemSetTypeFlag(pMem, MEM_Int); + } +} + +/* +** Convert pMem to type integer. Invalidate any prior representations. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem *pMem){ + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( (pMem->flags & MEM_RowSet)==0 ); + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); + + pMem->u.i = sqlite3VdbeIntValue(pMem); + MemSetTypeFlag(pMem, MEM_Int); + return SQLITE_OK; +} + +/* +** Convert pMem so that it is of type MEM_Real. +** Invalidate any prior representations. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem *pMem){ + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); + + pMem->u.r = sqlite3VdbeRealValue(pMem); + MemSetTypeFlag(pMem, MEM_Real); + return SQLITE_OK; +} + +/* +** Convert pMem so that it has types MEM_Real or MEM_Int or both. +** Invalidate any prior representations. +** +** Every effort is made to force the conversion, even if the input +** is a string that does not look completely like a number. Convert +** as much of the string as we can and ignore the rest. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem *pMem){ + if( (pMem->flags & (MEM_Int|MEM_Real|MEM_Null))==0 ){ + assert( (pMem->flags & (MEM_Blob|MEM_Str))!=0 ); + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + if( 0==sqlite3Atoi64(pMem->z, &pMem->u.i, pMem->n, pMem->enc) ){ + MemSetTypeFlag(pMem, MEM_Int); + }else{ + pMem->u.r = sqlite3VdbeRealValue(pMem); + MemSetTypeFlag(pMem, MEM_Real); + sqlite3VdbeIntegerAffinity(pMem); + } + } + assert( (pMem->flags & (MEM_Int|MEM_Real|MEM_Null))!=0 ); + pMem->flags &= ~(MEM_Str|MEM_Blob|MEM_Zero); + return SQLITE_OK; +} + +/* +** Cast the datatype of the value in pMem according to the affinity +** "aff". Casting is different from applying affinity in that a cast +** is forced. In other words, the value is converted into the desired +** affinity even if that results in loss of data. This routine is +** used (for example) to implement the SQL "cast()" operator. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemCast(Mem *pMem, u8 aff, u8 encoding){ + if( pMem->flags & MEM_Null ) return; + switch( aff ){ + case SQLITE_AFF_BLOB: { /* Really a cast to BLOB */ + if( (pMem->flags & MEM_Blob)==0 ){ + sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding); + assert( pMem->flags & MEM_Str || pMem->db->mallocFailed ); + if( pMem->flags & MEM_Str ) MemSetTypeFlag(pMem, MEM_Blob); + }else{ + pMem->flags &= ~(MEM_TypeMask&~MEM_Blob); + } + break; + } + case SQLITE_AFF_NUMERIC: { + sqlite3VdbeMemNumerify(pMem); + break; + } + case SQLITE_AFF_INTEGER: { + sqlite3VdbeMemIntegerify(pMem); + break; + } + case SQLITE_AFF_REAL: { + sqlite3VdbeMemRealify(pMem); + break; + } + default: { + assert( aff==SQLITE_AFF_TEXT ); + assert( MEM_Str==(MEM_Blob>>3) ); + pMem->flags |= (pMem->flags&MEM_Blob)>>3; + sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding); + assert( pMem->flags & MEM_Str || pMem->db->mallocFailed ); + pMem->flags &= ~(MEM_Int|MEM_Real|MEM_Blob|MEM_Zero); + break; + } + } +} + +/* +** Initialize bulk memory to be a consistent Mem object. +** +** The minimum amount of initialization feasible is performed. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemInit(Mem *pMem, sqlite3 *db, u16 flags){ + assert( (flags & ~MEM_TypeMask)==0 ); + pMem->flags = flags; + pMem->db = db; + pMem->szMalloc = 0; +} + + +/* +** Delete any previous value and set the value stored in *pMem to NULL. +** +** This routine calls the Mem.xDel destructor to dispose of values that +** require the destructor. But it preserves the Mem.zMalloc memory allocation. +** To free all resources, use sqlite3VdbeMemRelease(), which both calls this +** routine to invoke the destructor and deallocates Mem.zMalloc. +** +** Use this routine to reset the Mem prior to insert a new value. +** +** Use sqlite3VdbeMemRelease() to complete erase the Mem prior to abandoning it. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem *pMem){ + if( VdbeMemDynamic(pMem) ){ + vdbeMemClearExternAndSetNull(pMem); + }else{ + pMem->flags = MEM_Null; + } +} +SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value *p){ + sqlite3VdbeMemSetNull((Mem*)p); +} + +/* +** Delete any previous value and set the value to be a BLOB of length +** n containing all zeros. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem *pMem, int n){ + sqlite3VdbeMemRelease(pMem); + pMem->flags = MEM_Blob|MEM_Zero; + pMem->n = 0; + if( n<0 ) n = 0; + pMem->u.nZero = n; + pMem->enc = SQLITE_UTF8; + pMem->z = 0; +} + +/* +** The pMem is known to contain content that needs to be destroyed prior +** to a value change. So invoke the destructor, then set the value to +** a 64-bit integer. +*/ +static SQLITE_NOINLINE void vdbeReleaseAndSetInt64(Mem *pMem, i64 val){ + sqlite3VdbeMemSetNull(pMem); + pMem->u.i = val; + pMem->flags = MEM_Int; +} + +/* +** Delete any previous value and set the value stored in *pMem to val, +** manifest type INTEGER. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem *pMem, i64 val){ + if( VdbeMemDynamic(pMem) ){ + vdbeReleaseAndSetInt64(pMem, val); + }else{ + pMem->u.i = val; + pMem->flags = MEM_Int; + } +} + +#ifndef SQLITE_OMIT_FLOATING_POINT +/* +** Delete any previous value and set the value stored in *pMem to val, +** manifest type REAL. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem *pMem, double val){ + sqlite3VdbeMemSetNull(pMem); + if( !sqlite3IsNaN(val) ){ + pMem->u.r = val; + pMem->flags = MEM_Real; + } +} +#endif + +/* +** Delete any previous value and set the value of pMem to be an +** empty boolean index. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemSetRowSet(Mem *pMem){ + sqlite3 *db = pMem->db; + assert( db!=0 ); + assert( (pMem->flags & MEM_RowSet)==0 ); + sqlite3VdbeMemRelease(pMem); + pMem->zMalloc = sqlite3DbMallocRawNN(db, 64); + if( db->mallocFailed ){ + pMem->flags = MEM_Null; + pMem->szMalloc = 0; + }else{ + assert( pMem->zMalloc ); + pMem->szMalloc = sqlite3DbMallocSize(db, pMem->zMalloc); + pMem->u.pRowSet = sqlite3RowSetInit(db, pMem->zMalloc, pMem->szMalloc); + assert( pMem->u.pRowSet!=0 ); + pMem->flags = MEM_RowSet; + } +} + +/* +** Return true if the Mem object contains a TEXT or BLOB that is +** too large - whose size exceeds SQLITE_MAX_LENGTH. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem *p){ + assert( p->db!=0 ); + if( p->flags & (MEM_Str|MEM_Blob) ){ + int n = p->n; + if( p->flags & MEM_Zero ){ + n += p->u.nZero; + } + return n>p->db->aLimit[SQLITE_LIMIT_LENGTH]; + } + return 0; +} + +#ifdef SQLITE_DEBUG +/* +** This routine prepares a memory cell for modification by breaking +** its link to a shallow copy and by marking any current shallow +** copies of this cell as invalid. +** +** This is used for testing and debugging only - to make sure shallow +** copies are not misused. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe *pVdbe, Mem *pMem){ + int i; + Mem *pX; + for(i=0, pX=pVdbe->aMem; inMem; i++, pX++){ + if( pX->pScopyFrom==pMem ){ + pX->flags |= MEM_Undefined; + pX->pScopyFrom = 0; + } + } + pMem->pScopyFrom = 0; +} +#endif /* SQLITE_DEBUG */ + + +/* +** Make an shallow copy of pFrom into pTo. Prior contents of +** pTo are freed. The pFrom->z field is not duplicated. If +** pFrom->z is used, then pTo->z points to the same thing as pFrom->z +** and flags gets srcType (either MEM_Ephem or MEM_Static). +*/ +static SQLITE_NOINLINE void vdbeClrCopy(Mem *pTo, const Mem *pFrom, int eType){ + vdbeMemClearExternAndSetNull(pTo); + assert( !VdbeMemDynamic(pTo) ); + sqlite3VdbeMemShallowCopy(pTo, pFrom, eType); +} +SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem *pTo, const Mem *pFrom, int srcType){ + assert( (pFrom->flags & MEM_RowSet)==0 ); + assert( pTo->db==pFrom->db ); + if( VdbeMemDynamic(pTo) ){ vdbeClrCopy(pTo,pFrom,srcType); return; } + memcpy(pTo, pFrom, MEMCELLSIZE); + if( (pFrom->flags&MEM_Static)==0 ){ + pTo->flags &= ~(MEM_Dyn|MEM_Static|MEM_Ephem); + assert( srcType==MEM_Ephem || srcType==MEM_Static ); + pTo->flags |= srcType; + } +} + +/* +** Make a full copy of pFrom into pTo. Prior contents of pTo are +** freed before the copy is made. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem *pTo, const Mem *pFrom){ + int rc = SQLITE_OK; + + assert( (pFrom->flags & MEM_RowSet)==0 ); + if( VdbeMemDynamic(pTo) ) vdbeMemClearExternAndSetNull(pTo); + memcpy(pTo, pFrom, MEMCELLSIZE); + pTo->flags &= ~MEM_Dyn; + if( pTo->flags&(MEM_Str|MEM_Blob) ){ + if( 0==(pFrom->flags&MEM_Static) ){ + pTo->flags |= MEM_Ephem; + rc = sqlite3VdbeMemMakeWriteable(pTo); + } + } + + return rc; +} + +/* +** Transfer the contents of pFrom to pTo. Any existing value in pTo is +** freed. If pFrom contains ephemeral data, a copy is made. +** +** pFrom contains an SQL NULL when this routine returns. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem *pTo, Mem *pFrom){ + assert( pFrom->db==0 || sqlite3_mutex_held(pFrom->db->mutex) ); + assert( pTo->db==0 || sqlite3_mutex_held(pTo->db->mutex) ); + assert( pFrom->db==0 || pTo->db==0 || pFrom->db==pTo->db ); + + sqlite3VdbeMemRelease(pTo); + memcpy(pTo, pFrom, sizeof(Mem)); + pFrom->flags = MEM_Null; + pFrom->szMalloc = 0; +} + +/* +** Change the value of a Mem to be a string or a BLOB. +** +** The memory management strategy depends on the value of the xDel +** parameter. If the value passed is SQLITE_TRANSIENT, then the +** string is copied into a (possibly existing) buffer managed by the +** Mem structure. Otherwise, any existing buffer is freed and the +** pointer copied. +** +** If the string is too large (if it exceeds the SQLITE_LIMIT_LENGTH +** size limit) then no memory allocation occurs. If the string can be +** stored without allocating memory, then it is. If a memory allocation +** is required to store the string, then value of pMem is unchanged. In +** either case, SQLITE_TOOBIG is returned. +*/ +SQLITE_PRIVATE int sqlite3VdbeMemSetStr( + Mem *pMem, /* Memory cell to set to string value */ + const char *z, /* String pointer */ + int n, /* Bytes in string, or negative */ + u8 enc, /* Encoding of z. 0 for BLOBs */ + void (*xDel)(void*) /* Destructor function */ +){ + int nByte = n; /* New value for pMem->n */ + int iLimit; /* Maximum allowed string or blob size */ + u16 flags = 0; /* New value for pMem->flags */ + + assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); + assert( (pMem->flags & MEM_RowSet)==0 ); + + /* If z is a NULL pointer, set pMem to contain an SQL NULL. */ + if( !z ){ + sqlite3VdbeMemSetNull(pMem); + return SQLITE_OK; + } + + if( pMem->db ){ + iLimit = pMem->db->aLimit[SQLITE_LIMIT_LENGTH]; + }else{ + iLimit = SQLITE_MAX_LENGTH; + } + flags = (enc==0?MEM_Blob:MEM_Str); + if( nByte<0 ){ + assert( enc!=0 ); + if( enc==SQLITE_UTF8 ){ + nByte = sqlite3Strlen30(z); + if( nByte>iLimit ) nByte = iLimit+1; + }else{ + for(nByte=0; nByte<=iLimit && (z[nByte] | z[nByte+1]); nByte+=2){} + } + flags |= MEM_Term; + } + + /* The following block sets the new values of Mem.z and Mem.xDel. It + ** also sets a flag in local variable "flags" to indicate the memory + ** management (one of MEM_Dyn or MEM_Static). + */ + if( xDel==SQLITE_TRANSIENT ){ + int nAlloc = nByte; + if( flags&MEM_Term ){ + nAlloc += (enc==SQLITE_UTF8?1:2); + } + if( nByte>iLimit ){ + return SQLITE_TOOBIG; + } + testcase( nAlloc==0 ); + testcase( nAlloc==31 ); + testcase( nAlloc==32 ); + if( sqlite3VdbeMemClearAndResize(pMem, MAX(nAlloc,32)) ){ + return SQLITE_NOMEM_BKPT; + } + memcpy(pMem->z, z, nAlloc); + }else if( xDel==SQLITE_DYNAMIC ){ + sqlite3VdbeMemRelease(pMem); + pMem->zMalloc = pMem->z = (char *)z; + pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->zMalloc); + }else{ + sqlite3VdbeMemRelease(pMem); + pMem->z = (char *)z; + pMem->xDel = xDel; + flags |= ((xDel==SQLITE_STATIC)?MEM_Static:MEM_Dyn); + } + + pMem->n = nByte; + pMem->flags = flags; + pMem->enc = (enc==0 ? SQLITE_UTF8 : enc); + +#ifndef SQLITE_OMIT_UTF16 + if( pMem->enc!=SQLITE_UTF8 && sqlite3VdbeMemHandleBom(pMem) ){ + return SQLITE_NOMEM_BKPT; + } +#endif + + if( nByte>iLimit ){ + return SQLITE_TOOBIG; + } + + return SQLITE_OK; +} + +/* +** Move data out of a btree key or data field and into a Mem structure. +** The data is payload from the entry that pCur is currently pointing +** to. offset and amt determine what portion of the data or key to retrieve. +** The result is written into the pMem element. +** +** The pMem object must have been initialized. This routine will use +** pMem->zMalloc to hold the content from the btree, if possible. New +** pMem->zMalloc space will be allocated if necessary. The calling routine +** is responsible for making sure that the pMem object is eventually +** destroyed. +** +** If this routine fails for any reason (malloc returns NULL or unable +** to read from the disk) then the pMem is left in an inconsistent state. +*/ +static SQLITE_NOINLINE int vdbeMemFromBtreeResize( + BtCursor *pCur, /* Cursor pointing at record to retrieve. */ + u32 offset, /* Offset from the start of data to return bytes from. */ + u32 amt, /* Number of bytes to return. */ + Mem *pMem /* OUT: Return data in this Mem structure. */ +){ + int rc; + pMem->flags = MEM_Null; + if( SQLITE_OK==(rc = sqlite3VdbeMemClearAndResize(pMem, amt+2)) ){ + rc = sqlite3BtreePayload(pCur, offset, amt, pMem->z); + if( rc==SQLITE_OK ){ + pMem->z[amt] = 0; + pMem->z[amt+1] = 0; + pMem->flags = MEM_Blob|MEM_Term; + pMem->n = (int)amt; + }else{ + sqlite3VdbeMemRelease(pMem); + } + } + return rc; +} +SQLITE_PRIVATE int sqlite3VdbeMemFromBtree( + BtCursor *pCur, /* Cursor pointing at record to retrieve. */ + u32 offset, /* Offset from the start of data to return bytes from. */ + u32 amt, /* Number of bytes to return. */ + Mem *pMem /* OUT: Return data in this Mem structure. */ +){ + char *zData; /* Data from the btree layer */ + u32 available = 0; /* Number of bytes available on the local btree page */ + int rc = SQLITE_OK; /* Return code */ + + assert( sqlite3BtreeCursorIsValid(pCur) ); + assert( !VdbeMemDynamic(pMem) ); + + /* Note: the calls to BtreeKeyFetch() and DataFetch() below assert() + ** that both the BtShared and database handle mutexes are held. */ + assert( (pMem->flags & MEM_RowSet)==0 ); + zData = (char *)sqlite3BtreePayloadFetch(pCur, &available); + assert( zData!=0 ); + + if( offset+amt<=available ){ + pMem->z = &zData[offset]; + pMem->flags = MEM_Blob|MEM_Ephem; + pMem->n = (int)amt; + }else{ + rc = vdbeMemFromBtreeResize(pCur, offset, amt, pMem); + } + + return rc; +} + +/* +** The pVal argument is known to be a value other than NULL. +** Convert it into a string with encoding enc and return a pointer +** to a zero-terminated version of that string. +*/ +static SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){ + assert( pVal!=0 ); + assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) ); + assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) ); + assert( (pVal->flags & MEM_RowSet)==0 ); + assert( (pVal->flags & (MEM_Null))==0 ); + if( pVal->flags & (MEM_Blob|MEM_Str) ){ + if( ExpandBlob(pVal) ) return 0; + pVal->flags |= MEM_Str; + if( pVal->enc != (enc & ~SQLITE_UTF16_ALIGNED) ){ + sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED); + } + if( (enc & SQLITE_UTF16_ALIGNED)!=0 && 1==(1&SQLITE_PTR_TO_INT(pVal->z)) ){ + assert( (pVal->flags & (MEM_Ephem|MEM_Static))!=0 ); + if( sqlite3VdbeMemMakeWriteable(pVal)!=SQLITE_OK ){ + return 0; + } + } + sqlite3VdbeMemNulTerminate(pVal); /* IMP: R-31275-44060 */ + }else{ + sqlite3VdbeMemStringify(pVal, enc, 0); + assert( 0==(1&SQLITE_PTR_TO_INT(pVal->z)) ); + } + assert(pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) || pVal->db==0 + || pVal->db->mallocFailed ); + if( pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) ){ + return pVal->z; + }else{ + return 0; + } +} + +/* This function is only available internally, it is not part of the +** external API. It works in a similar way to sqlite3_value_text(), +** except the data returned is in the encoding specified by the second +** parameter, which must be one of SQLITE_UTF16BE, SQLITE_UTF16LE or +** SQLITE_UTF8. +** +** (2006-02-16:) The enc value can be or-ed with SQLITE_UTF16_ALIGNED. +** If that is the case, then the result must be aligned on an even byte +** boundary. +*/ +SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){ + if( !pVal ) return 0; + assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) ); + assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) ); + assert( (pVal->flags & MEM_RowSet)==0 ); + if( (pVal->flags&(MEM_Str|MEM_Term))==(MEM_Str|MEM_Term) && pVal->enc==enc ){ + return pVal->z; + } + if( pVal->flags&MEM_Null ){ + return 0; + } + return valueToText(pVal, enc); +} + +/* +** Create a new sqlite3_value object. +*/ +SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *db){ + Mem *p = sqlite3DbMallocZero(db, sizeof(*p)); + if( p ){ + p->flags = MEM_Null; + p->db = db; + } + return p; +} + +/* +** Context object passed by sqlite3Stat4ProbeSetValue() through to +** valueNew(). See comments above valueNew() for details. +*/ +struct ValueNewStat4Ctx { + Parse *pParse; + Index *pIdx; + UnpackedRecord **ppRec; + int iVal; +}; + +/* +** Allocate and return a pointer to a new sqlite3_value object. If +** the second argument to this function is NULL, the object is allocated +** by calling sqlite3ValueNew(). +** +** Otherwise, if the second argument is non-zero, then this function is +** being called indirectly by sqlite3Stat4ProbeSetValue(). If it has not +** already been allocated, allocate the UnpackedRecord structure that +** that function will return to its caller here. Then return a pointer to +** an sqlite3_value within the UnpackedRecord.a[] array. +*/ +static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + if( p ){ + UnpackedRecord *pRec = p->ppRec[0]; + + if( pRec==0 ){ + Index *pIdx = p->pIdx; /* Index being probed */ + int nByte; /* Bytes of space to allocate */ + int i; /* Counter variable */ + int nCol = pIdx->nColumn; /* Number of index columns including rowid */ + + nByte = sizeof(Mem) * nCol + ROUND8(sizeof(UnpackedRecord)); + pRec = (UnpackedRecord*)sqlite3DbMallocZero(db, nByte); + if( pRec ){ + pRec->pKeyInfo = sqlite3KeyInfoOfIndex(p->pParse, pIdx); + if( pRec->pKeyInfo ){ + assert( pRec->pKeyInfo->nField+pRec->pKeyInfo->nXField==nCol ); + assert( pRec->pKeyInfo->enc==ENC(db) ); + pRec->aMem = (Mem *)((u8*)pRec + ROUND8(sizeof(UnpackedRecord))); + for(i=0; iaMem[i].flags = MEM_Null; + pRec->aMem[i].db = db; + } + }else{ + sqlite3DbFree(db, pRec); + pRec = 0; + } + } + if( pRec==0 ) return 0; + p->ppRec[0] = pRec; + } + + pRec->nField = p->iVal+1; + return &pRec->aMem[p->iVal]; + } +#else + UNUSED_PARAMETER(p); +#endif /* defined(SQLITE_ENABLE_STAT3_OR_STAT4) */ + return sqlite3ValueNew(db); +} + +/* +** The expression object indicated by the second argument is guaranteed +** to be a scalar SQL function. If +** +** * all function arguments are SQL literals, +** * one of the SQLITE_FUNC_CONSTANT or _SLOCHNG function flags is set, and +** * the SQLITE_FUNC_NEEDCOLL function flag is not set, +** +** then this routine attempts to invoke the SQL function. Assuming no +** error occurs, output parameter (*ppVal) is set to point to a value +** object containing the result before returning SQLITE_OK. +** +** Affinity aff is applied to the result of the function before returning. +** If the result is a text value, the sqlite3_value object uses encoding +** enc. +** +** If the conditions above are not met, this function returns SQLITE_OK +** and sets (*ppVal) to NULL. Or, if an error occurs, (*ppVal) is set to +** NULL and an SQLite error code returned. +*/ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +static int valueFromFunction( + sqlite3 *db, /* The database connection */ + Expr *p, /* The expression to evaluate */ + u8 enc, /* Encoding to use */ + u8 aff, /* Affinity to use */ + sqlite3_value **ppVal, /* Write the new value here */ + struct ValueNewStat4Ctx *pCtx /* Second argument for valueNew() */ +){ + sqlite3_context ctx; /* Context object for function invocation */ + sqlite3_value **apVal = 0; /* Function arguments */ + int nVal = 0; /* Size of apVal[] array */ + FuncDef *pFunc = 0; /* Function definition */ + sqlite3_value *pVal = 0; /* New value */ + int rc = SQLITE_OK; /* Return code */ + ExprList *pList = 0; /* Function arguments */ + int i; /* Iterator variable */ + + assert( pCtx!=0 ); + assert( (p->flags & EP_TokenOnly)==0 ); + pList = p->x.pList; + if( pList ) nVal = pList->nExpr; + pFunc = sqlite3FindFunction(db, p->u.zToken, nVal, enc, 0); + assert( pFunc ); + if( (pFunc->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0 + || (pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL) + ){ + return SQLITE_OK; + } + + if( pList ){ + apVal = (sqlite3_value**)sqlite3DbMallocZero(db, sizeof(apVal[0]) * nVal); + if( apVal==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto value_from_function_out; + } + for(i=0; ia[i].pExpr, enc, aff, &apVal[i]); + if( apVal[i]==0 || rc!=SQLITE_OK ) goto value_from_function_out; + } + } + + pVal = valueNew(db, pCtx); + if( pVal==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto value_from_function_out; + } + + assert( pCtx->pParse->rc==SQLITE_OK ); + memset(&ctx, 0, sizeof(ctx)); + ctx.pOut = pVal; + ctx.pFunc = pFunc; + pFunc->xSFunc(&ctx, nVal, apVal); + if( ctx.isError ){ + rc = ctx.isError; + sqlite3ErrorMsg(pCtx->pParse, "%s", sqlite3_value_text(pVal)); + }else{ + sqlite3ValueApplyAffinity(pVal, aff, SQLITE_UTF8); + assert( rc==SQLITE_OK ); + rc = sqlite3VdbeChangeEncoding(pVal, enc); + if( rc==SQLITE_OK && sqlite3VdbeMemTooBig(pVal) ){ + rc = SQLITE_TOOBIG; + pCtx->pParse->nErr++; + } + } + pCtx->pParse->rc = rc; + + value_from_function_out: + if( rc!=SQLITE_OK ){ + pVal = 0; + } + if( apVal ){ + for(i=0; iop)==TK_UPLUS || op==TK_SPAN ) pExpr = pExpr->pLeft; + if( NEVER(op==TK_REGISTER) ) op = pExpr->op2; + + /* Compressed expressions only appear when parsing the DEFAULT clause + ** on a table column definition, and hence only when pCtx==0. This + ** check ensures that an EP_TokenOnly expression is never passed down + ** into valueFromFunction(). */ + assert( (pExpr->flags & EP_TokenOnly)==0 || pCtx==0 ); + + if( op==TK_CAST ){ + u8 aff = sqlite3AffinityType(pExpr->u.zToken,0); + rc = valueFromExpr(db, pExpr->pLeft, enc, aff, ppVal, pCtx); + testcase( rc!=SQLITE_OK ); + if( *ppVal ){ + sqlite3VdbeMemCast(*ppVal, aff, SQLITE_UTF8); + sqlite3ValueApplyAffinity(*ppVal, affinity, SQLITE_UTF8); + } + return rc; + } + + /* Handle negative integers in a single step. This is needed in the + ** case when the value is -9223372036854775808. + */ + if( op==TK_UMINUS + && (pExpr->pLeft->op==TK_INTEGER || pExpr->pLeft->op==TK_FLOAT) ){ + pExpr = pExpr->pLeft; + op = pExpr->op; + negInt = -1; + zNeg = "-"; + } + + if( op==TK_STRING || op==TK_FLOAT || op==TK_INTEGER ){ + pVal = valueNew(db, pCtx); + if( pVal==0 ) goto no_mem; + if( ExprHasProperty(pExpr, EP_IntValue) ){ + sqlite3VdbeMemSetInt64(pVal, (i64)pExpr->u.iValue*negInt); + }else{ + zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken); + if( zVal==0 ) goto no_mem; + sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC); + } + if( (op==TK_INTEGER || op==TK_FLOAT ) && affinity==SQLITE_AFF_BLOB ){ + sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8); + }else{ + sqlite3ValueApplyAffinity(pVal, affinity, SQLITE_UTF8); + } + if( pVal->flags & (MEM_Int|MEM_Real) ) pVal->flags &= ~MEM_Str; + if( enc!=SQLITE_UTF8 ){ + rc = sqlite3VdbeChangeEncoding(pVal, enc); + } + }else if( op==TK_UMINUS ) { + /* This branch happens for multiple negative signs. Ex: -(-5) */ + if( SQLITE_OK==sqlite3ValueFromExpr(db,pExpr->pLeft,enc,affinity,&pVal) + && pVal!=0 + ){ + sqlite3VdbeMemNumerify(pVal); + if( pVal->flags & MEM_Real ){ + pVal->u.r = -pVal->u.r; + }else if( pVal->u.i==SMALLEST_INT64 ){ + pVal->u.r = -(double)SMALLEST_INT64; + MemSetTypeFlag(pVal, MEM_Real); + }else{ + pVal->u.i = -pVal->u.i; + } + sqlite3ValueApplyAffinity(pVal, affinity, enc); + } + }else if( op==TK_NULL ){ + pVal = valueNew(db, pCtx); + if( pVal==0 ) goto no_mem; + sqlite3VdbeMemNumerify(pVal); + } +#ifndef SQLITE_OMIT_BLOB_LITERAL + else if( op==TK_BLOB ){ + int nVal; + assert( pExpr->u.zToken[0]=='x' || pExpr->u.zToken[0]=='X' ); + assert( pExpr->u.zToken[1]=='\'' ); + pVal = valueNew(db, pCtx); + if( !pVal ) goto no_mem; + zVal = &pExpr->u.zToken[2]; + nVal = sqlite3Strlen30(zVal)-1; + assert( zVal[nVal]=='\'' ); + sqlite3VdbeMemSetStr(pVal, sqlite3HexToBlob(db, zVal, nVal), nVal/2, + 0, SQLITE_DYNAMIC); + } +#endif + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + else if( op==TK_FUNCTION && pCtx!=0 ){ + rc = valueFromFunction(db, pExpr, enc, affinity, &pVal, pCtx); + } +#endif + + *ppVal = pVal; + return rc; + +no_mem: + sqlite3OomFault(db); + sqlite3DbFree(db, zVal); + assert( *ppVal==0 ); +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + if( pCtx==0 ) sqlite3ValueFree(pVal); +#else + assert( pCtx==0 ); sqlite3ValueFree(pVal); +#endif + return SQLITE_NOMEM_BKPT; +} + +/* +** Create a new sqlite3_value object, containing the value of pExpr. +** +** This only works for very simple expressions that consist of one constant +** token (i.e. "5", "5.1", "'a string'"). If the expression can +** be converted directly into a value, then the value is allocated and +** a pointer written to *ppVal. The caller is responsible for deallocating +** the value by passing it to sqlite3ValueFree() later on. If the expression +** cannot be converted to a value, then *ppVal is set to NULL. +*/ +SQLITE_PRIVATE int sqlite3ValueFromExpr( + sqlite3 *db, /* The database connection */ + Expr *pExpr, /* The expression to evaluate */ + u8 enc, /* Encoding to use */ + u8 affinity, /* Affinity to use */ + sqlite3_value **ppVal /* Write the new value here */ +){ + return pExpr ? valueFromExpr(db, pExpr, enc, affinity, ppVal, 0) : 0; +} + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +/* +** The implementation of the sqlite_record() function. This function accepts +** a single argument of any type. The return value is a formatted database +** record (a blob) containing the argument value. +** +** This is used to convert the value stored in the 'sample' column of the +** sqlite_stat3 table to the record format SQLite uses internally. +*/ +static void recordFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const int file_format = 1; + u32 iSerial; /* Serial type */ + int nSerial; /* Bytes of space for iSerial as varint */ + u32 nVal; /* Bytes of space required for argv[0] */ + int nRet; + sqlite3 *db; + u8 *aRet; + + UNUSED_PARAMETER( argc ); + iSerial = sqlite3VdbeSerialType(argv[0], file_format, &nVal); + nSerial = sqlite3VarintLen(iSerial); + db = sqlite3_context_db_handle(context); + + nRet = 1 + nSerial + nVal; + aRet = sqlite3DbMallocRawNN(db, nRet); + if( aRet==0 ){ + sqlite3_result_error_nomem(context); + }else{ + aRet[0] = nSerial+1; + putVarint32(&aRet[1], iSerial); + sqlite3VdbeSerialPut(&aRet[1+nSerial], argv[0], iSerial); + sqlite3_result_blob(context, aRet, nRet, SQLITE_TRANSIENT); + sqlite3DbFree(db, aRet); + } +} + +/* +** Register built-in functions used to help read ANALYZE data. +*/ +SQLITE_PRIVATE void sqlite3AnalyzeFunctions(void){ + static FuncDef aAnalyzeTableFuncs[] = { + FUNCTION(sqlite_record, 1, 0, 0, recordFunc), + }; + sqlite3InsertBuiltinFuncs(aAnalyzeTableFuncs, ArraySize(aAnalyzeTableFuncs)); +} + +/* +** Attempt to extract a value from pExpr and use it to construct *ppVal. +** +** If pAlloc is not NULL, then an UnpackedRecord object is created for +** pAlloc if one does not exist and the new value is added to the +** UnpackedRecord object. +** +** A value is extracted in the following cases: +** +** * (pExpr==0). In this case the value is assumed to be an SQL NULL, +** +** * The expression is a bound variable, and this is a reprepare, or +** +** * The expression is a literal value. +** +** On success, *ppVal is made to point to the extracted value. The caller +** is responsible for ensuring that the value is eventually freed. +*/ +static int stat4ValueFromExpr( + Parse *pParse, /* Parse context */ + Expr *pExpr, /* The expression to extract a value from */ + u8 affinity, /* Affinity to use */ + struct ValueNewStat4Ctx *pAlloc,/* How to allocate space. Or NULL */ + sqlite3_value **ppVal /* OUT: New value object (or NULL) */ +){ + int rc = SQLITE_OK; + sqlite3_value *pVal = 0; + sqlite3 *db = pParse->db; + + /* Skip over any TK_COLLATE nodes */ + pExpr = sqlite3ExprSkipCollate(pExpr); + + if( !pExpr ){ + pVal = valueNew(db, pAlloc); + if( pVal ){ + sqlite3VdbeMemSetNull((Mem*)pVal); + } + }else if( pExpr->op==TK_VARIABLE + || NEVER(pExpr->op==TK_REGISTER && pExpr->op2==TK_VARIABLE) + ){ + Vdbe *v; + int iBindVar = pExpr->iColumn; + sqlite3VdbeSetVarmask(pParse->pVdbe, iBindVar); + if( (v = pParse->pReprepare)!=0 ){ + pVal = valueNew(db, pAlloc); + if( pVal ){ + rc = sqlite3VdbeMemCopy((Mem*)pVal, &v->aVar[iBindVar-1]); + if( rc==SQLITE_OK ){ + sqlite3ValueApplyAffinity(pVal, affinity, ENC(db)); + } + pVal->db = pParse->db; + } + } + }else{ + rc = valueFromExpr(db, pExpr, ENC(db), affinity, &pVal, pAlloc); + } + + assert( pVal==0 || pVal->db==db ); + *ppVal = pVal; + return rc; +} + +/* +** This function is used to allocate and populate UnpackedRecord +** structures intended to be compared against sample index keys stored +** in the sqlite_stat4 table. +** +** A single call to this function populates zero or more fields of the +** record starting with field iVal (fields are numbered from left to +** right starting with 0). A single field is populated if: +** +** * (pExpr==0). In this case the value is assumed to be an SQL NULL, +** +** * The expression is a bound variable, and this is a reprepare, or +** +** * The sqlite3ValueFromExpr() function is able to extract a value +** from the expression (i.e. the expression is a literal value). +** +** Or, if pExpr is a TK_VECTOR, one field is populated for each of the +** vector components that match either of the two latter criteria listed +** above. +** +** Before any value is appended to the record, the affinity of the +** corresponding column within index pIdx is applied to it. Before +** this function returns, output parameter *pnExtract is set to the +** number of values appended to the record. +** +** When this function is called, *ppRec must either point to an object +** allocated by an earlier call to this function, or must be NULL. If it +** is NULL and a value can be successfully extracted, a new UnpackedRecord +** is allocated (and *ppRec set to point to it) before returning. +** +** Unless an error is encountered, SQLITE_OK is returned. It is not an +** error if a value cannot be extracted from pExpr. If an error does +** occur, an SQLite error code is returned. +*/ +SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue( + Parse *pParse, /* Parse context */ + Index *pIdx, /* Index being probed */ + UnpackedRecord **ppRec, /* IN/OUT: Probe record */ + Expr *pExpr, /* The expression to extract a value from */ + int nElem, /* Maximum number of values to append */ + int iVal, /* Array element to populate */ + int *pnExtract /* OUT: Values appended to the record */ +){ + int rc = SQLITE_OK; + int nExtract = 0; + + if( pExpr==0 || pExpr->op!=TK_SELECT ){ + int i; + struct ValueNewStat4Ctx alloc; + + alloc.pParse = pParse; + alloc.pIdx = pIdx; + alloc.ppRec = ppRec; + + for(i=0; idb, pIdx, iVal+i); + alloc.iVal = iVal+i; + rc = stat4ValueFromExpr(pParse, pElem, aff, &alloc, &pVal); + if( !pVal ) break; + nExtract++; + } + } + + *pnExtract = nExtract; + return rc; +} + +/* +** Attempt to extract a value from expression pExpr using the methods +** as described for sqlite3Stat4ProbeSetValue() above. +** +** If successful, set *ppVal to point to a new value object and return +** SQLITE_OK. If no value can be extracted, but no other error occurs +** (e.g. OOM), return SQLITE_OK and set *ppVal to NULL. Or, if an error +** does occur, return an SQLite error code. The final value of *ppVal +** is undefined in this case. +*/ +SQLITE_PRIVATE int sqlite3Stat4ValueFromExpr( + Parse *pParse, /* Parse context */ + Expr *pExpr, /* The expression to extract a value from */ + u8 affinity, /* Affinity to use */ + sqlite3_value **ppVal /* OUT: New value object (or NULL) */ +){ + return stat4ValueFromExpr(pParse, pExpr, affinity, 0, ppVal); +} + +/* +** Extract the iCol-th column from the nRec-byte record in pRec. Write +** the column value into *ppVal. If *ppVal is initially NULL then a new +** sqlite3_value object is allocated. +** +** If *ppVal is initially NULL then the caller is responsible for +** ensuring that the value written into *ppVal is eventually freed. +*/ +SQLITE_PRIVATE int sqlite3Stat4Column( + sqlite3 *db, /* Database handle */ + const void *pRec, /* Pointer to buffer containing record */ + int nRec, /* Size of buffer pRec in bytes */ + int iCol, /* Column to extract */ + sqlite3_value **ppVal /* OUT: Extracted value */ +){ + u32 t; /* a column type code */ + int nHdr; /* Size of the header in the record */ + int iHdr; /* Next unread header byte */ + int iField; /* Next unread data byte */ + int szField; /* Size of the current data field */ + int i; /* Column index */ + u8 *a = (u8*)pRec; /* Typecast byte array */ + Mem *pMem = *ppVal; /* Write result into this Mem object */ + + assert( iCol>0 ); + iHdr = getVarint32(a, nHdr); + if( nHdr>nRec || iHdr>=nHdr ) return SQLITE_CORRUPT_BKPT; + iField = nHdr; + for(i=0; i<=iCol; i++){ + iHdr += getVarint32(&a[iHdr], t); + testcase( iHdr==nHdr ); + testcase( iHdr==nHdr+1 ); + if( iHdr>nHdr ) return SQLITE_CORRUPT_BKPT; + szField = sqlite3VdbeSerialTypeLen(t); + iField += szField; + } + testcase( iField==nRec ); + testcase( iField==nRec+1 ); + if( iField>nRec ) return SQLITE_CORRUPT_BKPT; + if( pMem==0 ){ + pMem = *ppVal = sqlite3ValueNew(db); + if( pMem==0 ) return SQLITE_NOMEM_BKPT; + } + sqlite3VdbeSerialGet(&a[iField-szField], t, pMem); + pMem->enc = ENC(db); + return SQLITE_OK; +} + +/* +** Unless it is NULL, the argument must be an UnpackedRecord object returned +** by an earlier call to sqlite3Stat4ProbeSetValue(). This call deletes +** the object. +*/ +SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord *pRec){ + if( pRec ){ + int i; + int nCol = pRec->pKeyInfo->nField+pRec->pKeyInfo->nXField; + Mem *aMem = pRec->aMem; + sqlite3 *db = aMem[0].db; + for(i=0; ipKeyInfo); + sqlite3DbFree(db, pRec); + } +} +#endif /* ifdef SQLITE_ENABLE_STAT4 */ + +/* +** Change the string value of an sqlite3_value object +*/ +SQLITE_PRIVATE void sqlite3ValueSetStr( + sqlite3_value *v, /* Value to be set */ + int n, /* Length of string z */ + const void *z, /* Text of the new string */ + u8 enc, /* Encoding to use */ + void (*xDel)(void*) /* Destructor for the string */ +){ + if( v ) sqlite3VdbeMemSetStr((Mem *)v, z, n, enc, xDel); +} + +/* +** Free an sqlite3_value object +*/ +SQLITE_PRIVATE void sqlite3ValueFree(sqlite3_value *v){ + if( !v ) return; + sqlite3VdbeMemRelease((Mem *)v); + sqlite3DbFree(((Mem*)v)->db, v); +} + +/* +** The sqlite3ValueBytes() routine returns the number of bytes in the +** sqlite3_value object assuming that it uses the encoding "enc". +** The valueBytes() routine is a helper function. +*/ +static SQLITE_NOINLINE int valueBytes(sqlite3_value *pVal, u8 enc){ + return valueToText(pVal, enc)!=0 ? pVal->n : 0; +} +SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value *pVal, u8 enc){ + Mem *p = (Mem*)pVal; + assert( (p->flags & MEM_Null)==0 || (p->flags & (MEM_Str|MEM_Blob))==0 ); + if( (p->flags & MEM_Str)!=0 && pVal->enc==enc ){ + return p->n; + } + if( (p->flags & MEM_Blob)!=0 ){ + if( p->flags & MEM_Zero ){ + return p->n + p->u.nZero; + }else{ + return p->n; + } + } + if( p->flags & MEM_Null ) return 0; + return valueBytes(pVal, enc); +} + +/************** End of vdbemem.c *********************************************/ +/************** Begin file vdbeaux.c *****************************************/ +/* +** 2003 September 6 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code used for creating, destroying, and populating +** a VDBE (or an "sqlite3_stmt" as it is known to the outside world.) +*/ +/* #include "sqliteInt.h" */ +/* #include "vdbeInt.h" */ + +/* +** Create a new virtual database engine. +*/ +SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse *pParse){ + sqlite3 *db = pParse->db; + Vdbe *p; + p = sqlite3DbMallocRawNN(db, sizeof(Vdbe) ); + if( p==0 ) return 0; + memset(&p->aOp, 0, sizeof(Vdbe)-offsetof(Vdbe,aOp)); + p->db = db; + if( db->pVdbe ){ + db->pVdbe->pPrev = p; + } + p->pNext = db->pVdbe; + p->pPrev = 0; + db->pVdbe = p; + p->magic = VDBE_MAGIC_INIT; + p->pParse = pParse; + assert( pParse->aLabel==0 ); + assert( pParse->nLabel==0 ); + assert( pParse->nOpAlloc==0 ); + assert( pParse->szOpAlloc==0 ); + return p; +} + +/* +** Change the error string stored in Vdbe.zErrMsg +*/ +SQLITE_PRIVATE void sqlite3VdbeError(Vdbe *p, const char *zFormat, ...){ + va_list ap; + sqlite3DbFree(p->db, p->zErrMsg); + va_start(ap, zFormat); + p->zErrMsg = sqlite3VMPrintf(p->db, zFormat, ap); + va_end(ap); +} + +/* +** Remember the SQL string for a prepared statement. +*/ +SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe *p, const char *z, int n, int isPrepareV2){ + assert( isPrepareV2==1 || isPrepareV2==0 ); + if( p==0 ) return; +#if defined(SQLITE_OMIT_TRACE) && !defined(SQLITE_ENABLE_SQLLOG) + if( !isPrepareV2 ) return; +#endif + assert( p->zSql==0 ); + p->zSql = sqlite3DbStrNDup(p->db, z, n); + p->isPrepareV2 = (u8)isPrepareV2; +} + +/* +** Swap all content between two VDBE structures. +*/ +SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe *pA, Vdbe *pB){ + Vdbe tmp, *pTmp; + char *zTmp; + assert( pA->db==pB->db ); + tmp = *pA; + *pA = *pB; + *pB = tmp; + pTmp = pA->pNext; + pA->pNext = pB->pNext; + pB->pNext = pTmp; + pTmp = pA->pPrev; + pA->pPrev = pB->pPrev; + pB->pPrev = pTmp; + zTmp = pA->zSql; + pA->zSql = pB->zSql; + pB->zSql = zTmp; + pB->isPrepareV2 = pA->isPrepareV2; +} + +/* +** Resize the Vdbe.aOp array so that it is at least nOp elements larger +** than its current size. nOp is guaranteed to be less than or equal +** to 1024/sizeof(Op). +** +** If an out-of-memory error occurs while resizing the array, return +** SQLITE_NOMEM. In this case Vdbe.aOp and Parse.nOpAlloc remain +** unchanged (this is so that any opcodes already allocated can be +** correctly deallocated along with the rest of the Vdbe). +*/ +static int growOpArray(Vdbe *v, int nOp){ + VdbeOp *pNew; + Parse *p = v->pParse; + + /* The SQLITE_TEST_REALLOC_STRESS compile-time option is designed to force + ** more frequent reallocs and hence provide more opportunities for + ** simulated OOM faults. SQLITE_TEST_REALLOC_STRESS is generally used + ** during testing only. With SQLITE_TEST_REALLOC_STRESS grow the op array + ** by the minimum* amount required until the size reaches 512. Normal + ** operation (without SQLITE_TEST_REALLOC_STRESS) is to double the current + ** size of the op array or add 1KB of space, whichever is smaller. */ +#ifdef SQLITE_TEST_REALLOC_STRESS + int nNew = (p->nOpAlloc>=512 ? p->nOpAlloc*2 : p->nOpAlloc+nOp); +#else + int nNew = (p->nOpAlloc ? p->nOpAlloc*2 : (int)(1024/sizeof(Op))); + UNUSED_PARAMETER(nOp); +#endif + + assert( nOp<=(1024/sizeof(Op)) ); + assert( nNew>=(p->nOpAlloc+nOp) ); + pNew = sqlite3DbRealloc(p->db, v->aOp, nNew*sizeof(Op)); + if( pNew ){ + p->szOpAlloc = sqlite3DbMallocSize(p->db, pNew); + p->nOpAlloc = p->szOpAlloc/sizeof(Op); + v->aOp = pNew; + } + return (pNew ? SQLITE_OK : SQLITE_NOMEM_BKPT); +} + +#ifdef SQLITE_DEBUG +/* This routine is just a convenient place to set a breakpoint that will +** fire after each opcode is inserted and displayed using +** "PRAGMA vdbe_addoptrace=on". +*/ +static void test_addop_breakpoint(void){ + static int n = 0; + n++; +} +#endif + +/* +** Add a new instruction to the list of instructions current in the +** VDBE. Return the address of the new instruction. +** +** Parameters: +** +** p Pointer to the VDBE +** +** op The opcode for this instruction +** +** p1, p2, p3 Operands +** +** Use the sqlite3VdbeResolveLabel() function to fix an address and +** the sqlite3VdbeChangeP4() function to change the value of the P4 +** operand. +*/ +static SQLITE_NOINLINE int growOp3(Vdbe *p, int op, int p1, int p2, int p3){ + assert( p->pParse->nOpAlloc<=p->nOp ); + if( growOpArray(p, 1) ) return 1; + assert( p->pParse->nOpAlloc>p->nOp ); + return sqlite3VdbeAddOp3(p, op, p1, p2, p3); +} +SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){ + int i; + VdbeOp *pOp; + + i = p->nOp; + assert( p->magic==VDBE_MAGIC_INIT ); + assert( op>=0 && op<0xff ); + if( p->pParse->nOpAlloc<=i ){ + return growOp3(p, op, p1, p2, p3); + } + p->nOp++; + pOp = &p->aOp[i]; + pOp->opcode = (u8)op; + pOp->p5 = 0; + pOp->p1 = p1; + pOp->p2 = p2; + pOp->p3 = p3; + pOp->p4.p = 0; + pOp->p4type = P4_NOTUSED; +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + pOp->zComment = 0; +#endif +#ifdef SQLITE_DEBUG + if( p->db->flags & SQLITE_VdbeAddopTrace ){ + int jj, kk; + Parse *pParse = p->pParse; + for(jj=kk=0; jjnColCache; jj++){ + struct yColCache *x = pParse->aColCache + jj; + printf(" r[%d]={%d:%d}", x->iReg, x->iTable, x->iColumn); + kk++; + } + if( kk ) printf("\n"); + sqlite3VdbePrintOp(0, i, &p->aOp[i]); + test_addop_breakpoint(); + } +#endif +#ifdef VDBE_PROFILE + pOp->cycles = 0; + pOp->cnt = 0; +#endif +#ifdef SQLITE_VDBE_COVERAGE + pOp->iSrcLine = 0; +#endif + return i; +} +SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){ + return sqlite3VdbeAddOp3(p, op, 0, 0, 0); +} +SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){ + return sqlite3VdbeAddOp3(p, op, p1, 0, 0); +} +SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){ + return sqlite3VdbeAddOp3(p, op, p1, p2, 0); +} + +/* Generate code for an unconditional jump to instruction iDest +*/ +SQLITE_PRIVATE int sqlite3VdbeGoto(Vdbe *p, int iDest){ + return sqlite3VdbeAddOp3(p, OP_Goto, 0, iDest, 0); +} + +/* Generate code to cause the string zStr to be loaded into +** register iDest +*/ +SQLITE_PRIVATE int sqlite3VdbeLoadString(Vdbe *p, int iDest, const char *zStr){ + return sqlite3VdbeAddOp4(p, OP_String8, 0, iDest, 0, zStr, 0); +} + +/* +** Generate code that initializes multiple registers to string or integer +** constants. The registers begin with iDest and increase consecutively. +** One register is initialized for each characgter in zTypes[]. For each +** "s" character in zTypes[], the register is a string if the argument is +** not NULL, or OP_Null if the value is a null pointer. For each "i" character +** in zTypes[], the register is initialized to an integer. +*/ +SQLITE_PRIVATE void sqlite3VdbeMultiLoad(Vdbe *p, int iDest, const char *zTypes, ...){ + va_list ap; + int i; + char c; + va_start(ap, zTypes); + for(i=0; (c = zTypes[i])!=0; i++){ + if( c=='s' ){ + const char *z = va_arg(ap, const char*); + sqlite3VdbeAddOp4(p, z==0 ? OP_Null : OP_String8, 0, iDest++, 0, z, 0); + }else{ + assert( c=='i' ); + sqlite3VdbeAddOp2(p, OP_Integer, va_arg(ap, int), iDest++); + } + } + va_end(ap); +} + +/* +** Add an opcode that includes the p4 value as a pointer. +*/ +SQLITE_PRIVATE int sqlite3VdbeAddOp4( + Vdbe *p, /* Add the opcode to this VM */ + int op, /* The new opcode */ + int p1, /* The P1 operand */ + int p2, /* The P2 operand */ + int p3, /* The P3 operand */ + const char *zP4, /* The P4 operand */ + int p4type /* P4 operand type */ +){ + int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3); + sqlite3VdbeChangeP4(p, addr, zP4, p4type); + return addr; +} + +/* +** Add an opcode that includes the p4 value with a P4_INT64 or +** P4_REAL type. +*/ +SQLITE_PRIVATE int sqlite3VdbeAddOp4Dup8( + Vdbe *p, /* Add the opcode to this VM */ + int op, /* The new opcode */ + int p1, /* The P1 operand */ + int p2, /* The P2 operand */ + int p3, /* The P3 operand */ + const u8 *zP4, /* The P4 operand */ + int p4type /* P4 operand type */ +){ + char *p4copy = sqlite3DbMallocRawNN(sqlite3VdbeDb(p), 8); + if( p4copy ) memcpy(p4copy, zP4, 8); + return sqlite3VdbeAddOp4(p, op, p1, p2, p3, p4copy, p4type); +} + +/* +** Add an OP_ParseSchema opcode. This routine is broken out from +** sqlite3VdbeAddOp4() since it needs to also needs to mark all btrees +** as having been used. +** +** The zWhere string must have been obtained from sqlite3_malloc(). +** This routine will take ownership of the allocated memory. +*/ +SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere){ + int j; + sqlite3VdbeAddOp4(p, OP_ParseSchema, iDb, 0, 0, zWhere, P4_DYNAMIC); + for(j=0; jdb->nDb; j++) sqlite3VdbeUsesBtree(p, j); +} + +/* +** Add an opcode that includes the p4 value as an integer. +*/ +SQLITE_PRIVATE int sqlite3VdbeAddOp4Int( + Vdbe *p, /* Add the opcode to this VM */ + int op, /* The new opcode */ + int p1, /* The P1 operand */ + int p2, /* The P2 operand */ + int p3, /* The P3 operand */ + int p4 /* The P4 operand as an integer */ +){ + int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3); + if( p->db->mallocFailed==0 ){ + VdbeOp *pOp = &p->aOp[addr]; + pOp->p4type = P4_INT32; + pOp->p4.i = p4; + } + return addr; +} + +/* Insert the end of a co-routine +*/ +SQLITE_PRIVATE void sqlite3VdbeEndCoroutine(Vdbe *v, int regYield){ + sqlite3VdbeAddOp1(v, OP_EndCoroutine, regYield); + + /* Clear the temporary register cache, thereby ensuring that each + ** co-routine has its own independent set of registers, because co-routines + ** might expect their registers to be preserved across an OP_Yield, and + ** that could cause problems if two or more co-routines are using the same + ** temporary register. + */ + v->pParse->nTempReg = 0; + v->pParse->nRangeReg = 0; +} + +/* +** Create a new symbolic label for an instruction that has yet to be +** coded. The symbolic label is really just a negative number. The +** label can be used as the P2 value of an operation. Later, when +** the label is resolved to a specific address, the VDBE will scan +** through its operation list and change all values of P2 which match +** the label into the resolved address. +** +** The VDBE knows that a P2 value is a label because labels are +** always negative and P2 values are suppose to be non-negative. +** Hence, a negative P2 value is a label that has yet to be resolved. +** +** Zero is returned if a malloc() fails. +*/ +SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Vdbe *v){ + Parse *p = v->pParse; + int i = p->nLabel++; + assert( v->magic==VDBE_MAGIC_INIT ); + if( (i & (i-1))==0 ){ + p->aLabel = sqlite3DbReallocOrFree(p->db, p->aLabel, + (i*2+1)*sizeof(p->aLabel[0])); + } + if( p->aLabel ){ + p->aLabel[i] = -1; + } + return ADDR(i); +} + +/* +** Resolve label "x" to be the address of the next instruction to +** be inserted. The parameter "x" must have been obtained from +** a prior call to sqlite3VdbeMakeLabel(). +*/ +SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *v, int x){ + Parse *p = v->pParse; + int j = ADDR(x); + assert( v->magic==VDBE_MAGIC_INIT ); + assert( jnLabel ); + assert( j>=0 ); + if( p->aLabel ){ + p->aLabel[j] = v->nOp; + } +} + +/* +** Mark the VDBE as one that can only be run one time. +*/ +SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe *p){ + p->runOnlyOnce = 1; +} + +/* +** Mark the VDBE as one that can only be run multiple times. +*/ +SQLITE_PRIVATE void sqlite3VdbeReusable(Vdbe *p){ + p->runOnlyOnce = 0; +} + +#ifdef SQLITE_DEBUG /* sqlite3AssertMayAbort() logic */ + +/* +** The following type and function are used to iterate through all opcodes +** in a Vdbe main program and each of the sub-programs (triggers) it may +** invoke directly or indirectly. It should be used as follows: +** +** Op *pOp; +** VdbeOpIter sIter; +** +** memset(&sIter, 0, sizeof(sIter)); +** sIter.v = v; // v is of type Vdbe* +** while( (pOp = opIterNext(&sIter)) ){ +** // Do something with pOp +** } +** sqlite3DbFree(v->db, sIter.apSub); +** +*/ +typedef struct VdbeOpIter VdbeOpIter; +struct VdbeOpIter { + Vdbe *v; /* Vdbe to iterate through the opcodes of */ + SubProgram **apSub; /* Array of subprograms */ + int nSub; /* Number of entries in apSub */ + int iAddr; /* Address of next instruction to return */ + int iSub; /* 0 = main program, 1 = first sub-program etc. */ +}; +static Op *opIterNext(VdbeOpIter *p){ + Vdbe *v = p->v; + Op *pRet = 0; + Op *aOp; + int nOp; + + if( p->iSub<=p->nSub ){ + + if( p->iSub==0 ){ + aOp = v->aOp; + nOp = v->nOp; + }else{ + aOp = p->apSub[p->iSub-1]->aOp; + nOp = p->apSub[p->iSub-1]->nOp; + } + assert( p->iAddriAddr]; + p->iAddr++; + if( p->iAddr==nOp ){ + p->iSub++; + p->iAddr = 0; + } + + if( pRet->p4type==P4_SUBPROGRAM ){ + int nByte = (p->nSub+1)*sizeof(SubProgram*); + int j; + for(j=0; jnSub; j++){ + if( p->apSub[j]==pRet->p4.pProgram ) break; + } + if( j==p->nSub ){ + p->apSub = sqlite3DbReallocOrFree(v->db, p->apSub, nByte); + if( !p->apSub ){ + pRet = 0; + }else{ + p->apSub[p->nSub++] = pRet->p4.pProgram; + } + } + } + } + + return pRet; +} + +/* +** Check if the program stored in the VM associated with pParse may +** throw an ABORT exception (causing the statement, but not entire transaction +** to be rolled back). This condition is true if the main program or any +** sub-programs contains any of the following: +** +** * OP_Halt with P1=SQLITE_CONSTRAINT and P2=OE_Abort. +** * OP_HaltIfNull with P1=SQLITE_CONSTRAINT and P2=OE_Abort. +** * OP_Destroy +** * OP_VUpdate +** * OP_VRename +** * OP_FkCounter with P2==0 (immediate foreign key constraint) +** * OP_CreateTable and OP_InitCoroutine (for CREATE TABLE AS SELECT ...) +** +** Then check that the value of Parse.mayAbort is true if an +** ABORT may be thrown, or false otherwise. Return true if it does +** match, or false otherwise. This function is intended to be used as +** part of an assert statement in the compiler. Similar to: +** +** assert( sqlite3VdbeAssertMayAbort(pParse->pVdbe, pParse->mayAbort) ); +*/ +SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){ + int hasAbort = 0; + int hasFkCounter = 0; + int hasCreateTable = 0; + int hasInitCoroutine = 0; + Op *pOp; + VdbeOpIter sIter; + memset(&sIter, 0, sizeof(sIter)); + sIter.v = v; + + while( (pOp = opIterNext(&sIter))!=0 ){ + int opcode = pOp->opcode; + if( opcode==OP_Destroy || opcode==OP_VUpdate || opcode==OP_VRename + || ((opcode==OP_Halt || opcode==OP_HaltIfNull) + && ((pOp->p1&0xff)==SQLITE_CONSTRAINT && pOp->p2==OE_Abort)) + ){ + hasAbort = 1; + break; + } + if( opcode==OP_CreateTable ) hasCreateTable = 1; + if( opcode==OP_InitCoroutine ) hasInitCoroutine = 1; +#ifndef SQLITE_OMIT_FOREIGN_KEY + if( opcode==OP_FkCounter && pOp->p1==0 && pOp->p2==1 ){ + hasFkCounter = 1; + } +#endif + } + sqlite3DbFree(v->db, sIter.apSub); + + /* Return true if hasAbort==mayAbort. Or if a malloc failure occurred. + ** If malloc failed, then the while() loop above may not have iterated + ** through all opcodes and hasAbort may be set incorrectly. Return + ** true for this case to prevent the assert() in the callers frame + ** from failing. */ + return ( v->db->mallocFailed || hasAbort==mayAbort || hasFkCounter + || (hasCreateTable && hasInitCoroutine) ); +} +#endif /* SQLITE_DEBUG - the sqlite3AssertMayAbort() function */ + +/* +** This routine is called after all opcodes have been inserted. It loops +** through all the opcodes and fixes up some details. +** +** (1) For each jump instruction with a negative P2 value (a label) +** resolve the P2 value to an actual address. +** +** (2) Compute the maximum number of arguments used by any SQL function +** and store that value in *pMaxFuncArgs. +** +** (3) Update the Vdbe.readOnly and Vdbe.bIsReader flags to accurately +** indicate what the prepared statement actually does. +** +** (4) Initialize the p4.xAdvance pointer on opcodes that use it. +** +** (5) Reclaim the memory allocated for storing labels. +** +** This routine will only function correctly if the mkopcodeh.tcl generator +** script numbers the opcodes correctly. Changes to this routine must be +** coordinated with changes to mkopcodeh.tcl. +*/ +static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ + int nMaxArgs = *pMaxFuncArgs; + Op *pOp; + Parse *pParse = p->pParse; + int *aLabel = pParse->aLabel; + p->readOnly = 1; + p->bIsReader = 0; + pOp = &p->aOp[p->nOp-1]; + while(1){ + + /* Only JUMP opcodes and the short list of special opcodes in the switch + ** below need to be considered. The mkopcodeh.tcl generator script groups + ** all these opcodes together near the front of the opcode list. Skip + ** any opcode that does not need processing by virtual of the fact that + ** it is larger than SQLITE_MX_JUMP_OPCODE, as a performance optimization. + */ + if( pOp->opcode<=SQLITE_MX_JUMP_OPCODE ){ + /* NOTE: Be sure to update mkopcodeh.tcl when adding or removing + ** cases from this switch! */ + switch( pOp->opcode ){ + case OP_Transaction: { + if( pOp->p2!=0 ) p->readOnly = 0; + /* fall thru */ + } + case OP_AutoCommit: + case OP_Savepoint: { + p->bIsReader = 1; + break; + } +#ifndef SQLITE_OMIT_WAL + case OP_Checkpoint: +#endif + case OP_Vacuum: + case OP_JournalMode: { + p->readOnly = 0; + p->bIsReader = 1; + break; + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + case OP_VUpdate: { + if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2; + break; + } + case OP_VFilter: { + int n; + assert( (pOp - p->aOp) >= 3 ); + assert( pOp[-1].opcode==OP_Integer ); + n = pOp[-1].p1; + if( n>nMaxArgs ) nMaxArgs = n; + break; + } +#endif + case OP_Next: + case OP_NextIfOpen: + case OP_SorterNext: { + pOp->p4.xAdvance = sqlite3BtreeNext; + pOp->p4type = P4_ADVANCE; + break; + } + case OP_Prev: + case OP_PrevIfOpen: { + pOp->p4.xAdvance = sqlite3BtreePrevious; + pOp->p4type = P4_ADVANCE; + break; + } + } + if( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP)!=0 && pOp->p2<0 ){ + assert( ADDR(pOp->p2)nLabel ); + pOp->p2 = aLabel[ADDR(pOp->p2)]; + } + } + if( pOp==p->aOp ) break; + pOp--; + } + sqlite3DbFree(p->db, pParse->aLabel); + pParse->aLabel = 0; + pParse->nLabel = 0; + *pMaxFuncArgs = nMaxArgs; + assert( p->bIsReader!=0 || DbMaskAllZero(p->btreeMask) ); +} + +/* +** Return the address of the next instruction to be inserted. +*/ +SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe *p){ + assert( p->magic==VDBE_MAGIC_INIT ); + return p->nOp; +} + +/* +** Verify that at least N opcode slots are available in p without +** having to malloc for more space (except when compiled using +** SQLITE_TEST_REALLOC_STRESS). This interface is used during testing +** to verify that certain calls to sqlite3VdbeAddOpList() can never +** fail due to a OOM fault and hence that the return value from +** sqlite3VdbeAddOpList() will always be non-NULL. +*/ +#if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS) +SQLITE_PRIVATE void sqlite3VdbeVerifyNoMallocRequired(Vdbe *p, int N){ + assert( p->nOp + N <= p->pParse->nOpAlloc ); +} +#endif + +/* +** Verify that the VM passed as the only argument does not contain +** an OP_ResultRow opcode. Fail an assert() if it does. This is used +** by code in pragma.c to ensure that the implementation of certain +** pragmas comports with the flags specified in the mkpragmatab.tcl +** script. +*/ +#if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS) +SQLITE_PRIVATE void sqlite3VdbeVerifyNoResultRow(Vdbe *p){ + int i; + for(i=0; inOp; i++){ + assert( p->aOp[i].opcode!=OP_ResultRow ); + } +} +#endif + +/* +** This function returns a pointer to the array of opcodes associated with +** the Vdbe passed as the first argument. It is the callers responsibility +** to arrange for the returned array to be eventually freed using the +** vdbeFreeOpArray() function. +** +** Before returning, *pnOp is set to the number of entries in the returned +** array. Also, *pnMaxArg is set to the larger of its current value and +** the number of entries in the Vdbe.apArg[] array required to execute the +** returned program. +*/ +SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe *p, int *pnOp, int *pnMaxArg){ + VdbeOp *aOp = p->aOp; + assert( aOp && !p->db->mallocFailed ); + + /* Check that sqlite3VdbeUsesBtree() was not called on this VM */ + assert( DbMaskAllZero(p->btreeMask) ); + + resolveP2Values(p, pnMaxArg); + *pnOp = p->nOp; + p->aOp = 0; + return aOp; +} + +/* +** Add a whole list of operations to the operation stack. Return a +** pointer to the first operation inserted. +** +** Non-zero P2 arguments to jump instructions are automatically adjusted +** so that the jump target is relative to the first operation inserted. +*/ +SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList( + Vdbe *p, /* Add opcodes to the prepared statement */ + int nOp, /* Number of opcodes to add */ + VdbeOpList const *aOp, /* The opcodes to be added */ + int iLineno /* Source-file line number of first opcode */ +){ + int i; + VdbeOp *pOut, *pFirst; + assert( nOp>0 ); + assert( p->magic==VDBE_MAGIC_INIT ); + if( p->nOp + nOp > p->pParse->nOpAlloc && growOpArray(p, nOp) ){ + return 0; + } + pFirst = pOut = &p->aOp[p->nOp]; + for(i=0; iopcode = aOp->opcode; + pOut->p1 = aOp->p1; + pOut->p2 = aOp->p2; + assert( aOp->p2>=0 ); + if( (sqlite3OpcodeProperty[aOp->opcode] & OPFLG_JUMP)!=0 && aOp->p2>0 ){ + pOut->p2 += p->nOp; + } + pOut->p3 = aOp->p3; + pOut->p4type = P4_NOTUSED; + pOut->p4.p = 0; + pOut->p5 = 0; +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + pOut->zComment = 0; +#endif +#ifdef SQLITE_VDBE_COVERAGE + pOut->iSrcLine = iLineno+i; +#else + (void)iLineno; +#endif +#ifdef SQLITE_DEBUG + if( p->db->flags & SQLITE_VdbeAddopTrace ){ + sqlite3VdbePrintOp(0, i+p->nOp, &p->aOp[i+p->nOp]); + } +#endif + } + p->nOp += nOp; + return pFirst; +} + +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) +/* +** Add an entry to the array of counters managed by sqlite3_stmt_scanstatus(). +*/ +SQLITE_PRIVATE void sqlite3VdbeScanStatus( + Vdbe *p, /* VM to add scanstatus() to */ + int addrExplain, /* Address of OP_Explain (or 0) */ + int addrLoop, /* Address of loop counter */ + int addrVisit, /* Address of rows visited counter */ + LogEst nEst, /* Estimated number of output rows */ + const char *zName /* Name of table or index being scanned */ +){ + int nByte = (p->nScan+1) * sizeof(ScanStatus); + ScanStatus *aNew; + aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte); + if( aNew ){ + ScanStatus *pNew = &aNew[p->nScan++]; + pNew->addrExplain = addrExplain; + pNew->addrLoop = addrLoop; + pNew->addrVisit = addrVisit; + pNew->nEst = nEst; + pNew->zName = sqlite3DbStrDup(p->db, zName); + p->aScan = aNew; + } +} +#endif + + +/* +** Change the value of the opcode, or P1, P2, P3, or P5 operands +** for a specific instruction. +*/ +SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe *p, u32 addr, u8 iNewOpcode){ + sqlite3VdbeGetOp(p,addr)->opcode = iNewOpcode; +} +SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe *p, u32 addr, int val){ + sqlite3VdbeGetOp(p,addr)->p1 = val; +} +SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe *p, u32 addr, int val){ + sqlite3VdbeGetOp(p,addr)->p2 = val; +} +SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe *p, u32 addr, int val){ + sqlite3VdbeGetOp(p,addr)->p3 = val; +} +SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){ + assert( p->nOp>0 || p->db->mallocFailed ); + if( p->nOp>0 ) p->aOp[p->nOp-1].p5 = p5; +} + +/* +** Change the P2 operand of instruction addr so that it points to +** the address of the next instruction to be coded. +*/ +SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe *p, int addr){ + sqlite3VdbeChangeP2(p, addr, p->nOp); +} + + +/* +** If the input FuncDef structure is ephemeral, then free it. If +** the FuncDef is not ephermal, then do nothing. +*/ +static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){ + if( (pDef->funcFlags & SQLITE_FUNC_EPHEM)!=0 ){ + sqlite3DbFree(db, pDef); + } +} + +static void vdbeFreeOpArray(sqlite3 *, Op *, int); + +/* +** Delete a P4 value if necessary. +*/ +static SQLITE_NOINLINE void freeP4Mem(sqlite3 *db, Mem *p){ + if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc); + sqlite3DbFree(db, p); +} +static SQLITE_NOINLINE void freeP4FuncCtx(sqlite3 *db, sqlite3_context *p){ + freeEphemeralFunction(db, p->pFunc); + sqlite3DbFree(db, p); +} +static void freeP4(sqlite3 *db, int p4type, void *p4){ + assert( db ); + switch( p4type ){ + case P4_FUNCCTX: { + freeP4FuncCtx(db, (sqlite3_context*)p4); + break; + } + case P4_REAL: + case P4_INT64: + case P4_DYNAMIC: + case P4_INTARRAY: { + sqlite3DbFree(db, p4); + break; + } + case P4_KEYINFO: { + if( db->pnBytesFreed==0 ) sqlite3KeyInfoUnref((KeyInfo*)p4); + break; + } +#ifdef SQLITE_ENABLE_CURSOR_HINTS + case P4_EXPR: { + sqlite3ExprDelete(db, (Expr*)p4); + break; + } +#endif + case P4_FUNCDEF: { + freeEphemeralFunction(db, (FuncDef*)p4); + break; + } + case P4_MEM: { + if( db->pnBytesFreed==0 ){ + sqlite3ValueFree((sqlite3_value*)p4); + }else{ + freeP4Mem(db, (Mem*)p4); + } + break; + } + case P4_VTAB : { + if( db->pnBytesFreed==0 ) sqlite3VtabUnlock((VTable *)p4); + break; + } + } +} + +/* +** Free the space allocated for aOp and any p4 values allocated for the +** opcodes contained within. If aOp is not NULL it is assumed to contain +** nOp entries. +*/ +static void vdbeFreeOpArray(sqlite3 *db, Op *aOp, int nOp){ + if( aOp ){ + Op *pOp; + for(pOp=aOp; pOp<&aOp[nOp]; pOp++){ + if( pOp->p4type ) freeP4(db, pOp->p4type, pOp->p4.p); +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + sqlite3DbFree(db, pOp->zComment); +#endif + } + } + sqlite3DbFree(db, aOp); +} + +/* +** Link the SubProgram object passed as the second argument into the linked +** list at Vdbe.pSubProgram. This list is used to delete all sub-program +** objects when the VM is no longer required. +*/ +SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *pVdbe, SubProgram *p){ + p->pNext = pVdbe->pProgram; + pVdbe->pProgram = p; +} + +/* +** Change the opcode at addr into OP_Noop +*/ +SQLITE_PRIVATE int sqlite3VdbeChangeToNoop(Vdbe *p, int addr){ + VdbeOp *pOp; + if( p->db->mallocFailed ) return 0; + assert( addr>=0 && addrnOp ); + pOp = &p->aOp[addr]; + freeP4(p->db, pOp->p4type, pOp->p4.p); + pOp->p4type = P4_NOTUSED; + pOp->p4.z = 0; + pOp->opcode = OP_Noop; + return 1; +} + +/* +** If the last opcode is "op" and it is not a jump destination, +** then remove it. Return true if and only if an opcode was removed. +*/ +SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe *p, u8 op){ + if( p->nOp>0 && p->aOp[p->nOp-1].opcode==op ){ + return sqlite3VdbeChangeToNoop(p, p->nOp-1); + }else{ + return 0; + } +} + +/* +** Change the value of the P4 operand for a specific instruction. +** This routine is useful when a large program is loaded from a +** static array using sqlite3VdbeAddOpList but we want to make a +** few minor changes to the program. +** +** If n>=0 then the P4 operand is dynamic, meaning that a copy of +** the string is made into memory obtained from sqlite3_malloc(). +** A value of n==0 means copy bytes of zP4 up to and including the +** first null byte. If n>0 then copy n+1 bytes of zP4. +** +** Other values of n (P4_STATIC, P4_COLLSEQ etc.) indicate that zP4 points +** to a string or structure that is guaranteed to exist for the lifetime of +** the Vdbe. In these cases we can just copy the pointer. +** +** If addr<0 then change P4 on the most recently inserted instruction. +*/ +static void SQLITE_NOINLINE vdbeChangeP4Full( + Vdbe *p, + Op *pOp, + const char *zP4, + int n +){ + if( pOp->p4type ){ + freeP4(p->db, pOp->p4type, pOp->p4.p); + pOp->p4type = 0; + pOp->p4.p = 0; + } + if( n<0 ){ + sqlite3VdbeChangeP4(p, (int)(pOp - p->aOp), zP4, n); + }else{ + if( n==0 ) n = sqlite3Strlen30(zP4); + pOp->p4.z = sqlite3DbStrNDup(p->db, zP4, n); + pOp->p4type = P4_DYNAMIC; + } +} +SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int n){ + Op *pOp; + sqlite3 *db; + assert( p!=0 ); + db = p->db; + assert( p->magic==VDBE_MAGIC_INIT ); + assert( p->aOp!=0 || db->mallocFailed ); + if( db->mallocFailed ){ + if( n!=P4_VTAB ) freeP4(db, n, (void*)*(char**)&zP4); + return; + } + assert( p->nOp>0 ); + assert( addrnOp ); + if( addr<0 ){ + addr = p->nOp - 1; + } + pOp = &p->aOp[addr]; + if( n>=0 || pOp->p4type ){ + vdbeChangeP4Full(p, pOp, zP4, n); + return; + } + if( n==P4_INT32 ){ + /* Note: this cast is safe, because the origin data point was an int + ** that was cast to a (const char *). */ + pOp->p4.i = SQLITE_PTR_TO_INT(zP4); + pOp->p4type = P4_INT32; + }else if( zP4!=0 ){ + assert( n<0 ); + pOp->p4.p = (void*)zP4; + pOp->p4type = (signed char)n; + if( n==P4_VTAB ) sqlite3VtabLock((VTable*)zP4); + } +} + +/* +** Change the P4 operand of the most recently coded instruction +** to the value defined by the arguments. This is a high-speed +** version of sqlite3VdbeChangeP4(). +** +** The P4 operand must not have been previously defined. And the new +** P4 must not be P4_INT32. Use sqlite3VdbeChangeP4() in either of +** those cases. +*/ +SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe *p, void *pP4, int n){ + VdbeOp *pOp; + assert( n!=P4_INT32 && n!=P4_VTAB ); + assert( n<=0 ); + if( p->db->mallocFailed ){ + freeP4(p->db, n, pP4); + }else{ + assert( pP4!=0 ); + assert( p->nOp>0 ); + pOp = &p->aOp[p->nOp-1]; + assert( pOp->p4type==P4_NOTUSED ); + pOp->p4type = n; + pOp->p4.p = pP4; + } +} + +/* +** Set the P4 on the most recently added opcode to the KeyInfo for the +** index given. +*/ +SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse *pParse, Index *pIdx){ + Vdbe *v = pParse->pVdbe; + KeyInfo *pKeyInfo; + assert( v!=0 ); + assert( pIdx!=0 ); + pKeyInfo = sqlite3KeyInfoOfIndex(pParse, pIdx); + if( pKeyInfo ) sqlite3VdbeAppendP4(v, pKeyInfo, P4_KEYINFO); +} + +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS +/* +** Change the comment on the most recently coded instruction. Or +** insert a No-op and add the comment to that new instruction. This +** makes the code easier to read during debugging. None of this happens +** in a production build. +*/ +static void vdbeVComment(Vdbe *p, const char *zFormat, va_list ap){ + assert( p->nOp>0 || p->aOp==0 ); + assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->db->mallocFailed ); + if( p->nOp ){ + assert( p->aOp ); + sqlite3DbFree(p->db, p->aOp[p->nOp-1].zComment); + p->aOp[p->nOp-1].zComment = sqlite3VMPrintf(p->db, zFormat, ap); + } +} +SQLITE_PRIVATE void sqlite3VdbeComment(Vdbe *p, const char *zFormat, ...){ + va_list ap; + if( p ){ + va_start(ap, zFormat); + vdbeVComment(p, zFormat, ap); + va_end(ap); + } +} +SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe *p, const char *zFormat, ...){ + va_list ap; + if( p ){ + sqlite3VdbeAddOp0(p, OP_Noop); + va_start(ap, zFormat); + vdbeVComment(p, zFormat, ap); + va_end(ap); + } +} +#endif /* NDEBUG */ + +#ifdef SQLITE_VDBE_COVERAGE +/* +** Set the value if the iSrcLine field for the previously coded instruction. +*/ +SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe *v, int iLine){ + sqlite3VdbeGetOp(v,-1)->iSrcLine = iLine; +} +#endif /* SQLITE_VDBE_COVERAGE */ + +/* +** Return the opcode for a given address. If the address is -1, then +** return the most recently inserted opcode. +** +** If a memory allocation error has occurred prior to the calling of this +** routine, then a pointer to a dummy VdbeOp will be returned. That opcode +** is readable but not writable, though it is cast to a writable value. +** The return of a dummy opcode allows the call to continue functioning +** after an OOM fault without having to check to see if the return from +** this routine is a valid pointer. But because the dummy.opcode is 0, +** dummy will never be written to. This is verified by code inspection and +** by running with Valgrind. +*/ +SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){ + /* C89 specifies that the constant "dummy" will be initialized to all + ** zeros, which is correct. MSVC generates a warning, nevertheless. */ + static VdbeOp dummy; /* Ignore the MSVC warning about no initializer */ + assert( p->magic==VDBE_MAGIC_INIT ); + if( addr<0 ){ + addr = p->nOp - 1; + } + assert( (addr>=0 && addrnOp) || p->db->mallocFailed ); + if( p->db->mallocFailed ){ + return (VdbeOp*)&dummy; + }else{ + return &p->aOp[addr]; + } +} + +#if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) +/* +** Return an integer value for one of the parameters to the opcode pOp +** determined by character c. +*/ +static int translateP(char c, const Op *pOp){ + if( c=='1' ) return pOp->p1; + if( c=='2' ) return pOp->p2; + if( c=='3' ) return pOp->p3; + if( c=='4' ) return pOp->p4.i; + return pOp->p5; +} + +/* +** Compute a string for the "comment" field of a VDBE opcode listing. +** +** The Synopsis: field in comments in the vdbe.c source file gets converted +** to an extra string that is appended to the sqlite3OpcodeName(). In the +** absence of other comments, this synopsis becomes the comment on the opcode. +** Some translation occurs: +** +** "PX" -> "r[X]" +** "PX@PY" -> "r[X..X+Y-1]" or "r[x]" if y is 0 or 1 +** "PX@PY+1" -> "r[X..X+Y]" or "r[x]" if y is 0 +** "PY..PY" -> "r[X..Y]" or "r[x]" if y<=x +*/ +static int displayComment( + const Op *pOp, /* The opcode to be commented */ + const char *zP4, /* Previously obtained value for P4 */ + char *zTemp, /* Write result here */ + int nTemp /* Space available in zTemp[] */ +){ + const char *zOpName; + const char *zSynopsis; + int nOpName; + int ii, jj; + char zAlt[50]; + zOpName = sqlite3OpcodeName(pOp->opcode); + nOpName = sqlite3Strlen30(zOpName); + if( zOpName[nOpName+1] ){ + int seenCom = 0; + char c; + zSynopsis = zOpName += nOpName + 1; + if( strncmp(zSynopsis,"IF ",3)==0 ){ + if( pOp->p5 & SQLITE_STOREP2 ){ + sqlite3_snprintf(sizeof(zAlt), zAlt, "r[P2] = (%s)", zSynopsis+3); + }else{ + sqlite3_snprintf(sizeof(zAlt), zAlt, "if %s goto P2", zSynopsis+3); + } + zSynopsis = zAlt; + } + for(ii=jj=0; jjzComment); + seenCom = 1; + }else{ + int v1 = translateP(c, pOp); + int v2; + sqlite3_snprintf(nTemp-jj, zTemp+jj, "%d", v1); + if( strncmp(zSynopsis+ii+1, "@P", 2)==0 ){ + ii += 3; + jj += sqlite3Strlen30(zTemp+jj); + v2 = translateP(zSynopsis[ii], pOp); + if( strncmp(zSynopsis+ii+1,"+1",2)==0 ){ + ii += 2; + v2++; + } + if( v2>1 ){ + sqlite3_snprintf(nTemp-jj, zTemp+jj, "..%d", v1+v2-1); + } + }else if( strncmp(zSynopsis+ii+1, "..P3", 4)==0 && pOp->p3==0 ){ + ii += 4; + } + } + jj += sqlite3Strlen30(zTemp+jj); + }else{ + zTemp[jj++] = c; + } + } + if( !seenCom && jjzComment ){ + sqlite3_snprintf(nTemp-jj, zTemp+jj, "; %s", pOp->zComment); + jj += sqlite3Strlen30(zTemp+jj); + } + if( jjzComment ){ + sqlite3_snprintf(nTemp, zTemp, "%s", pOp->zComment); + jj = sqlite3Strlen30(zTemp); + }else{ + zTemp[0] = 0; + jj = 0; + } + return jj; +} +#endif /* SQLITE_DEBUG */ + +#if VDBE_DISPLAY_P4 && defined(SQLITE_ENABLE_CURSOR_HINTS) +/* +** Translate the P4.pExpr value for an OP_CursorHint opcode into text +** that can be displayed in the P4 column of EXPLAIN output. +*/ +static void displayP4Expr(StrAccum *p, Expr *pExpr){ + const char *zOp = 0; + switch( pExpr->op ){ + case TK_STRING: + sqlite3XPrintf(p, "%Q", pExpr->u.zToken); + break; + case TK_INTEGER: + sqlite3XPrintf(p, "%d", pExpr->u.iValue); + break; + case TK_NULL: + sqlite3XPrintf(p, "NULL"); + break; + case TK_REGISTER: { + sqlite3XPrintf(p, "r[%d]", pExpr->iTable); + break; + } + case TK_COLUMN: { + if( pExpr->iColumn<0 ){ + sqlite3XPrintf(p, "rowid"); + }else{ + sqlite3XPrintf(p, "c%d", (int)pExpr->iColumn); + } + break; + } + case TK_LT: zOp = "LT"; break; + case TK_LE: zOp = "LE"; break; + case TK_GT: zOp = "GT"; break; + case TK_GE: zOp = "GE"; break; + case TK_NE: zOp = "NE"; break; + case TK_EQ: zOp = "EQ"; break; + case TK_IS: zOp = "IS"; break; + case TK_ISNOT: zOp = "ISNOT"; break; + case TK_AND: zOp = "AND"; break; + case TK_OR: zOp = "OR"; break; + case TK_PLUS: zOp = "ADD"; break; + case TK_STAR: zOp = "MUL"; break; + case TK_MINUS: zOp = "SUB"; break; + case TK_REM: zOp = "REM"; break; + case TK_BITAND: zOp = "BITAND"; break; + case TK_BITOR: zOp = "BITOR"; break; + case TK_SLASH: zOp = "DIV"; break; + case TK_LSHIFT: zOp = "LSHIFT"; break; + case TK_RSHIFT: zOp = "RSHIFT"; break; + case TK_CONCAT: zOp = "CONCAT"; break; + case TK_UMINUS: zOp = "MINUS"; break; + case TK_UPLUS: zOp = "PLUS"; break; + case TK_BITNOT: zOp = "BITNOT"; break; + case TK_NOT: zOp = "NOT"; break; + case TK_ISNULL: zOp = "ISNULL"; break; + case TK_NOTNULL: zOp = "NOTNULL"; break; + + default: + sqlite3XPrintf(p, "%s", "expr"); + break; + } + + if( zOp ){ + sqlite3XPrintf(p, "%s(", zOp); + displayP4Expr(p, pExpr->pLeft); + if( pExpr->pRight ){ + sqlite3StrAccumAppend(p, ",", 1); + displayP4Expr(p, pExpr->pRight); + } + sqlite3StrAccumAppend(p, ")", 1); + } +} +#endif /* VDBE_DISPLAY_P4 && defined(SQLITE_ENABLE_CURSOR_HINTS) */ + + +#if VDBE_DISPLAY_P4 +/* +** Compute a string that describes the P4 parameter for an opcode. +** Use zTemp for any required temporary buffer space. +*/ +static char *displayP4(Op *pOp, char *zTemp, int nTemp){ + char *zP4 = zTemp; + StrAccum x; + assert( nTemp>=20 ); + sqlite3StrAccumInit(&x, 0, zTemp, nTemp, 0); + switch( pOp->p4type ){ + case P4_KEYINFO: { + int j; + KeyInfo *pKeyInfo = pOp->p4.pKeyInfo; + assert( pKeyInfo->aSortOrder!=0 ); + sqlite3XPrintf(&x, "k(%d", pKeyInfo->nField); + for(j=0; jnField; j++){ + CollSeq *pColl = pKeyInfo->aColl[j]; + const char *zColl = pColl ? pColl->zName : ""; + if( strcmp(zColl, "BINARY")==0 ) zColl = "B"; + sqlite3XPrintf(&x, ",%s%s", pKeyInfo->aSortOrder[j] ? "-" : "", zColl); + } + sqlite3StrAccumAppend(&x, ")", 1); + break; + } +#ifdef SQLITE_ENABLE_CURSOR_HINTS + case P4_EXPR: { + displayP4Expr(&x, pOp->p4.pExpr); + break; + } +#endif + case P4_COLLSEQ: { + CollSeq *pColl = pOp->p4.pColl; + sqlite3XPrintf(&x, "(%.20s)", pColl->zName); + break; + } + case P4_FUNCDEF: { + FuncDef *pDef = pOp->p4.pFunc; + sqlite3XPrintf(&x, "%s(%d)", pDef->zName, pDef->nArg); + break; + } +#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) + case P4_FUNCCTX: { + FuncDef *pDef = pOp->p4.pCtx->pFunc; + sqlite3XPrintf(&x, "%s(%d)", pDef->zName, pDef->nArg); + break; + } +#endif + case P4_INT64: { + sqlite3XPrintf(&x, "%lld", *pOp->p4.pI64); + break; + } + case P4_INT32: { + sqlite3XPrintf(&x, "%d", pOp->p4.i); + break; + } + case P4_REAL: { + sqlite3XPrintf(&x, "%.16g", *pOp->p4.pReal); + break; + } + case P4_MEM: { + Mem *pMem = pOp->p4.pMem; + if( pMem->flags & MEM_Str ){ + zP4 = pMem->z; + }else if( pMem->flags & MEM_Int ){ + sqlite3XPrintf(&x, "%lld", pMem->u.i); + }else if( pMem->flags & MEM_Real ){ + sqlite3XPrintf(&x, "%.16g", pMem->u.r); + }else if( pMem->flags & MEM_Null ){ + zP4 = "NULL"; + }else{ + assert( pMem->flags & MEM_Blob ); + zP4 = "(blob)"; + } + break; + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + case P4_VTAB: { + sqlite3_vtab *pVtab = pOp->p4.pVtab->pVtab; + sqlite3XPrintf(&x, "vtab:%p", pVtab); + break; + } +#endif + case P4_INTARRAY: { + int i; + int *ai = pOp->p4.ai; + int n = ai[0]; /* The first element of an INTARRAY is always the + ** count of the number of elements to follow */ + for(i=1; ip4.pTab->zName); + break; + } + default: { + zP4 = pOp->p4.z; + if( zP4==0 ){ + zP4 = zTemp; + zTemp[0] = 0; + } + } + } + sqlite3StrAccumFinish(&x); + assert( zP4!=0 ); + return zP4; +} +#endif /* VDBE_DISPLAY_P4 */ + +/* +** Declare to the Vdbe that the BTree object at db->aDb[i] is used. +** +** The prepared statements need to know in advance the complete set of +** attached databases that will be use. A mask of these databases +** is maintained in p->btreeMask. The p->lockMask value is the subset of +** p->btreeMask of databases that will require a lock. +*/ +SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe *p, int i){ + assert( i>=0 && idb->nDb && i<(int)sizeof(yDbMask)*8 ); + assert( i<(int)sizeof(p->btreeMask)*8 ); + DbMaskSet(p->btreeMask, i); + if( i!=1 && sqlite3BtreeSharable(p->db->aDb[i].pBt) ){ + DbMaskSet(p->lockMask, i); + } +} + +#if !defined(SQLITE_OMIT_SHARED_CACHE) +/* +** If SQLite is compiled to support shared-cache mode and to be threadsafe, +** this routine obtains the mutex associated with each BtShared structure +** that may be accessed by the VM passed as an argument. In doing so it also +** sets the BtShared.db member of each of the BtShared structures, ensuring +** that the correct busy-handler callback is invoked if required. +** +** If SQLite is not threadsafe but does support shared-cache mode, then +** sqlite3BtreeEnter() is invoked to set the BtShared.db variables +** of all of BtShared structures accessible via the database handle +** associated with the VM. +** +** If SQLite is not threadsafe and does not support shared-cache mode, this +** function is a no-op. +** +** The p->btreeMask field is a bitmask of all btrees that the prepared +** statement p will ever use. Let N be the number of bits in p->btreeMask +** corresponding to btrees that use shared cache. Then the runtime of +** this routine is N*N. But as N is rarely more than 1, this should not +** be a problem. +*/ +SQLITE_PRIVATE void sqlite3VdbeEnter(Vdbe *p){ + int i; + sqlite3 *db; + Db *aDb; + int nDb; + if( DbMaskAllZero(p->lockMask) ) return; /* The common case */ + db = p->db; + aDb = db->aDb; + nDb = db->nDb; + for(i=0; ilockMask,i) && ALWAYS(aDb[i].pBt!=0) ){ + sqlite3BtreeEnter(aDb[i].pBt); + } + } +} +#endif + +#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0 +/* +** Unlock all of the btrees previously locked by a call to sqlite3VdbeEnter(). +*/ +static SQLITE_NOINLINE void vdbeLeave(Vdbe *p){ + int i; + sqlite3 *db; + Db *aDb; + int nDb; + db = p->db; + aDb = db->aDb; + nDb = db->nDb; + for(i=0; ilockMask,i) && ALWAYS(aDb[i].pBt!=0) ){ + sqlite3BtreeLeave(aDb[i].pBt); + } + } +} +SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe *p){ + if( DbMaskAllZero(p->lockMask) ) return; /* The common case */ + vdbeLeave(p); +} +#endif + +#if defined(VDBE_PROFILE) || defined(SQLITE_DEBUG) +/* +** Print a single opcode. This routine is used for debugging only. +*/ +SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, Op *pOp){ + char *zP4; + char zPtr[50]; + char zCom[100]; + static const char *zFormat1 = "%4d %-13s %4d %4d %4d %-13s %.2X %s\n"; + if( pOut==0 ) pOut = stdout; + zP4 = displayP4(pOp, zPtr, sizeof(zPtr)); +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + displayComment(pOp, zP4, zCom, sizeof(zCom)); +#else + zCom[0] = 0; +#endif + /* NB: The sqlite3OpcodeName() function is implemented by code created + ** by the mkopcodeh.awk and mkopcodec.awk scripts which extract the + ** information from the vdbe.c source text */ + fprintf(pOut, zFormat1, pc, + sqlite3OpcodeName(pOp->opcode), pOp->p1, pOp->p2, pOp->p3, zP4, pOp->p5, + zCom + ); + fflush(pOut); +} +#endif + +/* +** Initialize an array of N Mem element. +*/ +static void initMemArray(Mem *p, int N, sqlite3 *db, u16 flags){ + while( (N--)>0 ){ + p->db = db; + p->flags = flags; + p->szMalloc = 0; +#ifdef SQLITE_DEBUG + p->pScopyFrom = 0; +#endif + p++; + } +} + +/* +** Release an array of N Mem elements +*/ +static void releaseMemArray(Mem *p, int N){ + if( p && N ){ + Mem *pEnd = &p[N]; + sqlite3 *db = p->db; + if( db->pnBytesFreed ){ + do{ + if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc); + }while( (++p)flags & MEM_Agg ); + testcase( p->flags & MEM_Dyn ); + testcase( p->flags & MEM_Frame ); + testcase( p->flags & MEM_RowSet ); + if( p->flags&(MEM_Agg|MEM_Dyn|MEM_Frame|MEM_RowSet) ){ + sqlite3VdbeMemRelease(p); + }else if( p->szMalloc ){ + sqlite3DbFree(db, p->zMalloc); + p->szMalloc = 0; + } + + p->flags = MEM_Undefined; + }while( (++p)nChildMem]; + for(i=0; inChildCsr; i++){ + sqlite3VdbeFreeCursor(p->v, apCsr[i]); + } + releaseMemArray(aMem, p->nChildMem); + sqlite3VdbeDeleteAuxData(p->v->db, &p->pAuxData, -1, 0); + sqlite3DbFree(p->v->db, p); +} + +#ifndef SQLITE_OMIT_EXPLAIN +/* +** Give a listing of the program in the virtual machine. +** +** The interface is the same as sqlite3VdbeExec(). But instead of +** running the code, it invokes the callback once for each instruction. +** This feature is used to implement "EXPLAIN". +** +** When p->explain==1, each instruction is listed. When +** p->explain==2, only OP_Explain instructions are listed and these +** are shown in a different format. p->explain==2 is used to implement +** EXPLAIN QUERY PLAN. +** +** When p->explain==1, first the main program is listed, then each of +** the trigger subprograms are listed one by one. +*/ +SQLITE_PRIVATE int sqlite3VdbeList( + Vdbe *p /* The VDBE */ +){ + int nRow; /* Stop when row count reaches this */ + int nSub = 0; /* Number of sub-vdbes seen so far */ + SubProgram **apSub = 0; /* Array of sub-vdbes */ + Mem *pSub = 0; /* Memory cell hold array of subprogs */ + sqlite3 *db = p->db; /* The database connection */ + int i; /* Loop counter */ + int rc = SQLITE_OK; /* Return code */ + Mem *pMem = &p->aMem[1]; /* First Mem of result set */ + + assert( p->explain ); + assert( p->magic==VDBE_MAGIC_RUN ); + assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY || p->rc==SQLITE_NOMEM ); + + /* Even though this opcode does not use dynamic strings for + ** the result, result columns may become dynamic if the user calls + ** sqlite3_column_text16(), causing a translation to UTF-16 encoding. + */ + releaseMemArray(pMem, 8); + p->pResultSet = 0; + + if( p->rc==SQLITE_NOMEM_BKPT ){ + /* This happens if a malloc() inside a call to sqlite3_column_text() or + ** sqlite3_column_text16() failed. */ + sqlite3OomFault(db); + return SQLITE_ERROR; + } + + /* When the number of output rows reaches nRow, that means the + ** listing has finished and sqlite3_step() should return SQLITE_DONE. + ** nRow is the sum of the number of rows in the main program, plus + ** the sum of the number of rows in all trigger subprograms encountered + ** so far. The nRow value will increase as new trigger subprograms are + ** encountered, but p->pc will eventually catch up to nRow. + */ + nRow = p->nOp; + if( p->explain==1 ){ + /* The first 8 memory cells are used for the result set. So we will + ** commandeer the 9th cell to use as storage for an array of pointers + ** to trigger subprograms. The VDBE is guaranteed to have at least 9 + ** cells. */ + assert( p->nMem>9 ); + pSub = &p->aMem[9]; + if( pSub->flags&MEM_Blob ){ + /* On the first call to sqlite3_step(), pSub will hold a NULL. It is + ** initialized to a BLOB by the P4_SUBPROGRAM processing logic below */ + nSub = pSub->n/sizeof(Vdbe*); + apSub = (SubProgram **)pSub->z; + } + for(i=0; inOp; + } + } + + do{ + i = p->pc++; + }while( iexplain==2 && p->aOp[i].opcode!=OP_Explain ); + if( i>=nRow ){ + p->rc = SQLITE_OK; + rc = SQLITE_DONE; + }else if( db->u1.isInterrupted ){ + p->rc = SQLITE_INTERRUPT; + rc = SQLITE_ERROR; + sqlite3VdbeError(p, sqlite3ErrStr(p->rc)); + }else{ + char *zP4; + Op *pOp; + if( inOp ){ + /* The output line number is small enough that we are still in the + ** main program. */ + pOp = &p->aOp[i]; + }else{ + /* We are currently listing subprograms. Figure out which one and + ** pick up the appropriate opcode. */ + int j; + i -= p->nOp; + for(j=0; i>=apSub[j]->nOp; j++){ + i -= apSub[j]->nOp; + } + pOp = &apSub[j]->aOp[i]; + } + if( p->explain==1 ){ + pMem->flags = MEM_Int; + pMem->u.i = i; /* Program counter */ + pMem++; + + pMem->flags = MEM_Static|MEM_Str|MEM_Term; + pMem->z = (char*)sqlite3OpcodeName(pOp->opcode); /* Opcode */ + assert( pMem->z!=0 ); + pMem->n = sqlite3Strlen30(pMem->z); + pMem->enc = SQLITE_UTF8; + pMem++; + + /* When an OP_Program opcode is encounter (the only opcode that has + ** a P4_SUBPROGRAM argument), expand the size of the array of subprograms + ** kept in p->aMem[9].z to hold the new program - assuming this subprogram + ** has not already been seen. + */ + if( pOp->p4type==P4_SUBPROGRAM ){ + int nByte = (nSub+1)*sizeof(SubProgram*); + int j; + for(j=0; jp4.pProgram ) break; + } + if( j==nSub && SQLITE_OK==sqlite3VdbeMemGrow(pSub, nByte, nSub!=0) ){ + apSub = (SubProgram **)pSub->z; + apSub[nSub++] = pOp->p4.pProgram; + pSub->flags |= MEM_Blob; + pSub->n = nSub*sizeof(SubProgram*); + } + } + } + + pMem->flags = MEM_Int; + pMem->u.i = pOp->p1; /* P1 */ + pMem++; + + pMem->flags = MEM_Int; + pMem->u.i = pOp->p2; /* P2 */ + pMem++; + + pMem->flags = MEM_Int; + pMem->u.i = pOp->p3; /* P3 */ + pMem++; + + if( sqlite3VdbeMemClearAndResize(pMem, 100) ){ /* P4 */ + assert( p->db->mallocFailed ); + return SQLITE_ERROR; + } + pMem->flags = MEM_Str|MEM_Term; + zP4 = displayP4(pOp, pMem->z, pMem->szMalloc); + if( zP4!=pMem->z ){ + pMem->n = 0; + sqlite3VdbeMemSetStr(pMem, zP4, -1, SQLITE_UTF8, 0); + }else{ + assert( pMem->z!=0 ); + pMem->n = sqlite3Strlen30(pMem->z); + pMem->enc = SQLITE_UTF8; + } + pMem++; + + if( p->explain==1 ){ + if( sqlite3VdbeMemClearAndResize(pMem, 4) ){ + assert( p->db->mallocFailed ); + return SQLITE_ERROR; + } + pMem->flags = MEM_Str|MEM_Term; + pMem->n = 2; + sqlite3_snprintf(3, pMem->z, "%.2x", pOp->p5); /* P5 */ + pMem->enc = SQLITE_UTF8; + pMem++; + +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + if( sqlite3VdbeMemClearAndResize(pMem, 500) ){ + assert( p->db->mallocFailed ); + return SQLITE_ERROR; + } + pMem->flags = MEM_Str|MEM_Term; + pMem->n = displayComment(pOp, zP4, pMem->z, 500); + pMem->enc = SQLITE_UTF8; +#else + pMem->flags = MEM_Null; /* Comment */ +#endif + } + + p->nResColumn = 8 - 4*(p->explain-1); + p->pResultSet = &p->aMem[1]; + p->rc = SQLITE_OK; + rc = SQLITE_ROW; + } + return rc; +} +#endif /* SQLITE_OMIT_EXPLAIN */ + +#ifdef SQLITE_DEBUG +/* +** Print the SQL that was used to generate a VDBE program. +*/ +SQLITE_PRIVATE void sqlite3VdbePrintSql(Vdbe *p){ + const char *z = 0; + if( p->zSql ){ + z = p->zSql; + }else if( p->nOp>=1 ){ + const VdbeOp *pOp = &p->aOp[0]; + if( pOp->opcode==OP_Init && pOp->p4.z!=0 ){ + z = pOp->p4.z; + while( sqlite3Isspace(*z) ) z++; + } + } + if( z ) printf("SQL: [%s]\n", z); +} +#endif + +#if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE) +/* +** Print an IOTRACE message showing SQL content. +*/ +SQLITE_PRIVATE void sqlite3VdbeIOTraceSql(Vdbe *p){ + int nOp = p->nOp; + VdbeOp *pOp; + if( sqlite3IoTrace==0 ) return; + if( nOp<1 ) return; + pOp = &p->aOp[0]; + if( pOp->opcode==OP_Init && pOp->p4.z!=0 ){ + int i, j; + char z[1000]; + sqlite3_snprintf(sizeof(z), z, "%s", pOp->p4.z); + for(i=0; sqlite3Isspace(z[i]); i++){} + for(j=0; z[i]; i++){ + if( sqlite3Isspace(z[i]) ){ + if( z[i-1]!=' ' ){ + z[j++] = ' '; + } + }else{ + z[j++] = z[i]; + } + } + z[j] = 0; + sqlite3IoTrace("SQL %s\n", z); + } +} +#endif /* !SQLITE_OMIT_TRACE && SQLITE_ENABLE_IOTRACE */ + +/* An instance of this object describes bulk memory available for use +** by subcomponents of a prepared statement. Space is allocated out +** of a ReusableSpace object by the allocSpace() routine below. +*/ +struct ReusableSpace { + u8 *pSpace; /* Available memory */ + int nFree; /* Bytes of available memory */ + int nNeeded; /* Total bytes that could not be allocated */ +}; + +/* Try to allocate nByte bytes of 8-byte aligned bulk memory for pBuf +** from the ReusableSpace object. Return a pointer to the allocated +** memory on success. If insufficient memory is available in the +** ReusableSpace object, increase the ReusableSpace.nNeeded +** value by the amount needed and return NULL. +** +** If pBuf is not initially NULL, that means that the memory has already +** been allocated by a prior call to this routine, so just return a copy +** of pBuf and leave ReusableSpace unchanged. +** +** This allocator is employed to repurpose unused slots at the end of the +** opcode array of prepared state for other memory needs of the prepared +** statement. +*/ +static void *allocSpace( + struct ReusableSpace *p, /* Bulk memory available for allocation */ + void *pBuf, /* Pointer to a prior allocation */ + int nByte /* Bytes of memory needed */ +){ + assert( EIGHT_BYTE_ALIGNMENT(p->pSpace) ); + if( pBuf==0 ){ + nByte = ROUND8(nByte); + if( nByte <= p->nFree ){ + p->nFree -= nByte; + pBuf = &p->pSpace[p->nFree]; + }else{ + p->nNeeded += nByte; + } + } + assert( EIGHT_BYTE_ALIGNMENT(pBuf) ); + return pBuf; +} + +/* +** Rewind the VDBE back to the beginning in preparation for +** running it. +*/ +SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){ +#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) + int i; +#endif + assert( p!=0 ); + assert( p->magic==VDBE_MAGIC_INIT || p->magic==VDBE_MAGIC_RESET ); + + /* There should be at least one opcode. + */ + assert( p->nOp>0 ); + + /* Set the magic to VDBE_MAGIC_RUN sooner rather than later. */ + p->magic = VDBE_MAGIC_RUN; + +#ifdef SQLITE_DEBUG + for(i=0; inMem; i++){ + assert( p->aMem[i].db==p->db ); + } +#endif + p->pc = -1; + p->rc = SQLITE_OK; + p->errorAction = OE_Abort; + p->nChange = 0; + p->cacheCtr = 1; + p->minWriteFileFormat = 255; + p->iStatement = 0; + p->nFkConstraint = 0; +#ifdef VDBE_PROFILE + for(i=0; inOp; i++){ + p->aOp[i].cnt = 0; + p->aOp[i].cycles = 0; + } +#endif +} + +/* +** Prepare a virtual machine for execution for the first time after +** creating the virtual machine. This involves things such +** as allocating registers and initializing the program counter. +** After the VDBE has be prepped, it can be executed by one or more +** calls to sqlite3VdbeExec(). +** +** This function may be called exactly once on each virtual machine. +** After this routine is called the VM has been "packaged" and is ready +** to run. After this routine is called, further calls to +** sqlite3VdbeAddOp() functions are prohibited. This routine disconnects +** the Vdbe from the Parse object that helped generate it so that the +** the Vdbe becomes an independent entity and the Parse object can be +** destroyed. +** +** Use the sqlite3VdbeRewind() procedure to restore a virtual machine back +** to its initial state after it has been run. +*/ +SQLITE_PRIVATE void sqlite3VdbeMakeReady( + Vdbe *p, /* The VDBE */ + Parse *pParse /* Parsing context */ +){ + sqlite3 *db; /* The database connection */ + int nVar; /* Number of parameters */ + int nMem; /* Number of VM memory registers */ + int nCursor; /* Number of cursors required */ + int nArg; /* Number of arguments in subprograms */ + int n; /* Loop counter */ + struct ReusableSpace x; /* Reusable bulk memory */ + + assert( p!=0 ); + assert( p->nOp>0 ); + assert( pParse!=0 ); + assert( p->magic==VDBE_MAGIC_INIT ); + assert( pParse==p->pParse ); + db = p->db; + assert( db->mallocFailed==0 ); + nVar = pParse->nVar; + nMem = pParse->nMem; + nCursor = pParse->nTab; + nArg = pParse->nMaxArg; + + /* Each cursor uses a memory cell. The first cursor (cursor 0) can + ** use aMem[0] which is not otherwise used by the VDBE program. Allocate + ** space at the end of aMem[] for cursors 1 and greater. + ** See also: allocateCursor(). + */ + nMem += nCursor; + if( nCursor==0 && nMem>0 ) nMem++; /* Space for aMem[0] even if not used */ + + /* Figure out how much reusable memory is available at the end of the + ** opcode array. This extra memory will be reallocated for other elements + ** of the prepared statement. + */ + n = ROUND8(sizeof(Op)*p->nOp); /* Bytes of opcode memory used */ + x.pSpace = &((u8*)p->aOp)[n]; /* Unused opcode memory */ + assert( EIGHT_BYTE_ALIGNMENT(x.pSpace) ); + x.nFree = ROUNDDOWN8(pParse->szOpAlloc - n); /* Bytes of unused memory */ + assert( x.nFree>=0 ); + assert( EIGHT_BYTE_ALIGNMENT(&x.pSpace[x.nFree]) ); + + resolveP2Values(p, &nArg); + p->usesStmtJournal = (u8)(pParse->isMultiWrite && pParse->mayAbort); + if( pParse->explain && nMem<10 ){ + nMem = 10; + } + p->expired = 0; + + /* Memory for registers, parameters, cursor, etc, is allocated in one or two + ** passes. On the first pass, we try to reuse unused memory at the + ** end of the opcode array. If we are unable to satisfy all memory + ** requirements by reusing the opcode array tail, then the second + ** pass will fill in the remainder using a fresh memory allocation. + ** + ** This two-pass approach that reuses as much memory as possible from + ** the leftover memory at the end of the opcode array. This can significantly + ** reduce the amount of memory held by a prepared statement. + */ + do { + x.nNeeded = 0; + p->aMem = allocSpace(&x, p->aMem, nMem*sizeof(Mem)); + p->aVar = allocSpace(&x, p->aVar, nVar*sizeof(Mem)); + p->apArg = allocSpace(&x, p->apArg, nArg*sizeof(Mem*)); + p->apCsr = allocSpace(&x, p->apCsr, nCursor*sizeof(VdbeCursor*)); +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + p->anExec = allocSpace(&x, p->anExec, p->nOp*sizeof(i64)); +#endif + if( x.nNeeded==0 ) break; + x.pSpace = p->pFree = sqlite3DbMallocRawNN(db, x.nNeeded); + x.nFree = x.nNeeded; + }while( !db->mallocFailed ); + + p->pVList = pParse->pVList; + pParse->pVList = 0; + p->explain = pParse->explain; + if( db->mallocFailed ){ + p->nVar = 0; + p->nCursor = 0; + p->nMem = 0; + }else{ + p->nCursor = nCursor; + p->nVar = (ynVar)nVar; + initMemArray(p->aVar, nVar, db, MEM_Null); + p->nMem = nMem; + initMemArray(p->aMem, nMem, db, MEM_Undefined); + memset(p->apCsr, 0, nCursor*sizeof(VdbeCursor*)); +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + memset(p->anExec, 0, p->nOp*sizeof(i64)); +#endif + } + sqlite3VdbeRewind(p); +} + +/* +** Close a VDBE cursor and release all the resources that cursor +** happens to hold. +*/ +SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){ + if( pCx==0 ){ + return; + } + assert( pCx->pBtx==0 || pCx->eCurType==CURTYPE_BTREE ); + switch( pCx->eCurType ){ + case CURTYPE_SORTER: { + sqlite3VdbeSorterClose(p->db, pCx); + break; + } + case CURTYPE_BTREE: { + if( pCx->pBtx ){ + sqlite3BtreeClose(pCx->pBtx); + /* The pCx->pCursor will be close automatically, if it exists, by + ** the call above. */ + }else{ + assert( pCx->uc.pCursor!=0 ); + sqlite3BtreeCloseCursor(pCx->uc.pCursor); + } + break; + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + case CURTYPE_VTAB: { + sqlite3_vtab_cursor *pVCur = pCx->uc.pVCur; + const sqlite3_module *pModule = pVCur->pVtab->pModule; + assert( pVCur->pVtab->nRef>0 ); + pVCur->pVtab->nRef--; + pModule->xClose(pVCur); + break; + } +#endif + } +} + +/* +** Close all cursors in the current frame. +*/ +static void closeCursorsInFrame(Vdbe *p){ + if( p->apCsr ){ + int i; + for(i=0; inCursor; i++){ + VdbeCursor *pC = p->apCsr[i]; + if( pC ){ + sqlite3VdbeFreeCursor(p, pC); + p->apCsr[i] = 0; + } + } + } +} + +/* +** Copy the values stored in the VdbeFrame structure to its Vdbe. This +** is used, for example, when a trigger sub-program is halted to restore +** control to the main program. +*/ +SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *pFrame){ + Vdbe *v = pFrame->v; + closeCursorsInFrame(v); +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + v->anExec = pFrame->anExec; +#endif + v->aOp = pFrame->aOp; + v->nOp = pFrame->nOp; + v->aMem = pFrame->aMem; + v->nMem = pFrame->nMem; + v->apCsr = pFrame->apCsr; + v->nCursor = pFrame->nCursor; + v->db->lastRowid = pFrame->lastRowid; + v->nChange = pFrame->nChange; + v->db->nChange = pFrame->nDbChange; + sqlite3VdbeDeleteAuxData(v->db, &v->pAuxData, -1, 0); + v->pAuxData = pFrame->pAuxData; + pFrame->pAuxData = 0; + return pFrame->pc; +} + +/* +** Close all cursors. +** +** Also release any dynamic memory held by the VM in the Vdbe.aMem memory +** cell array. This is necessary as the memory cell array may contain +** pointers to VdbeFrame objects, which may in turn contain pointers to +** open cursors. +*/ +static void closeAllCursors(Vdbe *p){ + if( p->pFrame ){ + VdbeFrame *pFrame; + for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent); + sqlite3VdbeFrameRestore(pFrame); + p->pFrame = 0; + p->nFrame = 0; + } + assert( p->nFrame==0 ); + closeCursorsInFrame(p); + if( p->aMem ){ + releaseMemArray(p->aMem, p->nMem); + } + while( p->pDelFrame ){ + VdbeFrame *pDel = p->pDelFrame; + p->pDelFrame = pDel->pParent; + sqlite3VdbeFrameDelete(pDel); + } + + /* Delete any auxdata allocations made by the VM */ + if( p->pAuxData ) sqlite3VdbeDeleteAuxData(p->db, &p->pAuxData, -1, 0); + assert( p->pAuxData==0 ); +} + +/* +** Clean up the VM after a single run. +*/ +static void Cleanup(Vdbe *p){ + sqlite3 *db = p->db; + +#ifdef SQLITE_DEBUG + /* Execute assert() statements to ensure that the Vdbe.apCsr[] and + ** Vdbe.aMem[] arrays have already been cleaned up. */ + int i; + if( p->apCsr ) for(i=0; inCursor; i++) assert( p->apCsr[i]==0 ); + if( p->aMem ){ + for(i=0; inMem; i++) assert( p->aMem[i].flags==MEM_Undefined ); + } +#endif + + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = 0; + p->pResultSet = 0; +} + +/* +** Set the number of result columns that will be returned by this SQL +** statement. This is now set at compile time, rather than during +** execution of the vdbe program so that sqlite3_column_count() can +** be called on an SQL statement before sqlite3_step(). +*/ +SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe *p, int nResColumn){ + Mem *pColName; + int n; + sqlite3 *db = p->db; + + releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); + sqlite3DbFree(db, p->aColName); + n = nResColumn*COLNAME_N; + p->nResColumn = (u16)nResColumn; + p->aColName = pColName = (Mem*)sqlite3DbMallocRawNN(db, sizeof(Mem)*n ); + if( p->aColName==0 ) return; + initMemArray(p->aColName, n, p->db, MEM_Null); +} + +/* +** Set the name of the idx'th column to be returned by the SQL statement. +** zName must be a pointer to a nul terminated string. +** +** This call must be made after a call to sqlite3VdbeSetNumCols(). +** +** The final parameter, xDel, must be one of SQLITE_DYNAMIC, SQLITE_STATIC +** or SQLITE_TRANSIENT. If it is SQLITE_DYNAMIC, then the buffer pointed +** to by zName will be freed by sqlite3DbFree() when the vdbe is destroyed. +*/ +SQLITE_PRIVATE int sqlite3VdbeSetColName( + Vdbe *p, /* Vdbe being configured */ + int idx, /* Index of column zName applies to */ + int var, /* One of the COLNAME_* constants */ + const char *zName, /* Pointer to buffer containing name */ + void (*xDel)(void*) /* Memory management strategy for zName */ +){ + int rc; + Mem *pColName; + assert( idxnResColumn ); + assert( vardb->mallocFailed ){ + assert( !zName || xDel!=SQLITE_DYNAMIC ); + return SQLITE_NOMEM_BKPT; + } + assert( p->aColName!=0 ); + pColName = &(p->aColName[idx+var*p->nResColumn]); + rc = sqlite3VdbeMemSetStr(pColName, zName, -1, SQLITE_UTF8, xDel); + assert( rc!=0 || !zName || (pColName->flags&MEM_Term)!=0 ); + return rc; +} + +/* +** A read or write transaction may or may not be active on database handle +** db. If a transaction is active, commit it. If there is a +** write-transaction spanning more than one database file, this routine +** takes care of the master journal trickery. +*/ +static int vdbeCommit(sqlite3 *db, Vdbe *p){ + int i; + int nTrans = 0; /* Number of databases with an active write-transaction + ** that are candidates for a two-phase commit using a + ** master-journal */ + int rc = SQLITE_OK; + int needXcommit = 0; + +#ifdef SQLITE_OMIT_VIRTUALTABLE + /* With this option, sqlite3VtabSync() is defined to be simply + ** SQLITE_OK so p is not used. + */ + UNUSED_PARAMETER(p); +#endif + + /* Before doing anything else, call the xSync() callback for any + ** virtual module tables written in this transaction. This has to + ** be done before determining whether a master journal file is + ** required, as an xSync() callback may add an attached database + ** to the transaction. + */ + rc = sqlite3VtabSync(db, p); + + /* This loop determines (a) if the commit hook should be invoked and + ** (b) how many database files have open write transactions, not + ** including the temp database. (b) is important because if more than + ** one database file has an open write transaction, a master journal + ** file is required for an atomic commit. + */ + for(i=0; rc==SQLITE_OK && inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( sqlite3BtreeIsInTrans(pBt) ){ + /* Whether or not a database might need a master journal depends upon + ** its journal mode (among other things). This matrix determines which + ** journal modes use a master journal and which do not */ + static const u8 aMJNeeded[] = { + /* DELETE */ 1, + /* PERSIST */ 1, + /* OFF */ 0, + /* TRUNCATE */ 1, + /* MEMORY */ 0, + /* WAL */ 0 + }; + Pager *pPager; /* Pager associated with pBt */ + needXcommit = 1; + sqlite3BtreeEnter(pBt); + pPager = sqlite3BtreePager(pBt); + if( db->aDb[i].safety_level!=PAGER_SYNCHRONOUS_OFF + && aMJNeeded[sqlite3PagerGetJournalMode(pPager)] + ){ + assert( i!=1 ); + nTrans++; + } + rc = sqlite3PagerExclusiveLock(pPager); + sqlite3BtreeLeave(pBt); + } + } + if( rc!=SQLITE_OK ){ + return rc; + } + + /* If there are any write-transactions at all, invoke the commit hook */ + if( needXcommit && db->xCommitCallback ){ + rc = db->xCommitCallback(db->pCommitArg); + if( rc ){ + return SQLITE_CONSTRAINT_COMMITHOOK; + } + } + + /* The simple case - no more than one database file (not counting the + ** TEMP database) has a transaction active. There is no need for the + ** master-journal. + ** + ** If the return value of sqlite3BtreeGetFilename() is a zero length + ** string, it means the main database is :memory: or a temp file. In + ** that case we do not support atomic multi-file commits, so use the + ** simple case then too. + */ + if( 0==sqlite3Strlen30(sqlite3BtreeGetFilename(db->aDb[0].pBt)) + || nTrans<=1 + ){ + for(i=0; rc==SQLITE_OK && inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + rc = sqlite3BtreeCommitPhaseOne(pBt, 0); + } + } + + /* Do the commit only if all databases successfully complete phase 1. + ** If one of the BtreeCommitPhaseOne() calls fails, this indicates an + ** IO error while deleting or truncating a journal file. It is unlikely, + ** but could happen. In this case abandon processing and return the error. + */ + for(i=0; rc==SQLITE_OK && inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + rc = sqlite3BtreeCommitPhaseTwo(pBt, 0); + } + } + if( rc==SQLITE_OK ){ + sqlite3VtabCommit(db); + } + } + + /* The complex case - There is a multi-file write-transaction active. + ** This requires a master journal file to ensure the transaction is + ** committed atomically. + */ +#ifndef SQLITE_OMIT_DISKIO + else{ + sqlite3_vfs *pVfs = db->pVfs; + char *zMaster = 0; /* File-name for the master journal */ + char const *zMainFile = sqlite3BtreeGetFilename(db->aDb[0].pBt); + sqlite3_file *pMaster = 0; + i64 offset = 0; + int res; + int retryCount = 0; + int nMainFile; + + /* Select a master journal file name */ + nMainFile = sqlite3Strlen30(zMainFile); + zMaster = sqlite3MPrintf(db, "%s-mjXXXXXX9XXz", zMainFile); + if( zMaster==0 ) return SQLITE_NOMEM_BKPT; + do { + u32 iRandom; + if( retryCount ){ + if( retryCount>100 ){ + sqlite3_log(SQLITE_FULL, "MJ delete: %s", zMaster); + sqlite3OsDelete(pVfs, zMaster, 0); + break; + }else if( retryCount==1 ){ + sqlite3_log(SQLITE_FULL, "MJ collide: %s", zMaster); + } + } + retryCount++; + sqlite3_randomness(sizeof(iRandom), &iRandom); + sqlite3_snprintf(13, &zMaster[nMainFile], "-mj%06X9%02X", + (iRandom>>8)&0xffffff, iRandom&0xff); + /* The antipenultimate character of the master journal name must + ** be "9" to avoid name collisions when using 8+3 filenames. */ + assert( zMaster[sqlite3Strlen30(zMaster)-3]=='9' ); + sqlite3FileSuffix3(zMainFile, zMaster); + rc = sqlite3OsAccess(pVfs, zMaster, SQLITE_ACCESS_EXISTS, &res); + }while( rc==SQLITE_OK && res ); + if( rc==SQLITE_OK ){ + /* Open the master journal. */ + rc = sqlite3OsOpenMalloc(pVfs, zMaster, &pMaster, + SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE| + SQLITE_OPEN_EXCLUSIVE|SQLITE_OPEN_MASTER_JOURNAL, 0 + ); + } + if( rc!=SQLITE_OK ){ + sqlite3DbFree(db, zMaster); + return rc; + } + + /* Write the name of each database file in the transaction into the new + ** master journal file. If an error occurs at this point close + ** and delete the master journal file. All the individual journal files + ** still have 'null' as the master journal pointer, so they will roll + ** back independently if a failure occurs. + */ + for(i=0; inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( sqlite3BtreeIsInTrans(pBt) ){ + char const *zFile = sqlite3BtreeGetJournalname(pBt); + if( zFile==0 ){ + continue; /* Ignore TEMP and :memory: databases */ + } + assert( zFile[0]!=0 ); + rc = sqlite3OsWrite(pMaster, zFile, sqlite3Strlen30(zFile)+1, offset); + offset += sqlite3Strlen30(zFile)+1; + if( rc!=SQLITE_OK ){ + sqlite3OsCloseFree(pMaster); + sqlite3OsDelete(pVfs, zMaster, 0); + sqlite3DbFree(db, zMaster); + return rc; + } + } + } + + /* Sync the master journal file. If the IOCAP_SEQUENTIAL device + ** flag is set this is not required. + */ + if( 0==(sqlite3OsDeviceCharacteristics(pMaster)&SQLITE_IOCAP_SEQUENTIAL) + && SQLITE_OK!=(rc = sqlite3OsSync(pMaster, SQLITE_SYNC_NORMAL)) + ){ + sqlite3OsCloseFree(pMaster); + sqlite3OsDelete(pVfs, zMaster, 0); + sqlite3DbFree(db, zMaster); + return rc; + } + + /* Sync all the db files involved in the transaction. The same call + ** sets the master journal pointer in each individual journal. If + ** an error occurs here, do not delete the master journal file. + ** + ** If the error occurs during the first call to + ** sqlite3BtreeCommitPhaseOne(), then there is a chance that the + ** master journal file will be orphaned. But we cannot delete it, + ** in case the master journal file name was written into the journal + ** file before the failure occurred. + */ + for(i=0; rc==SQLITE_OK && inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + rc = sqlite3BtreeCommitPhaseOne(pBt, zMaster); + } + } + sqlite3OsCloseFree(pMaster); + assert( rc!=SQLITE_BUSY ); + if( rc!=SQLITE_OK ){ + sqlite3DbFree(db, zMaster); + return rc; + } + + /* Delete the master journal file. This commits the transaction. After + ** doing this the directory is synced again before any individual + ** transaction files are deleted. + */ + rc = sqlite3OsDelete(pVfs, zMaster, 1); + sqlite3DbFree(db, zMaster); + zMaster = 0; + if( rc ){ + return rc; + } + + /* All files and directories have already been synced, so the following + ** calls to sqlite3BtreeCommitPhaseTwo() are only closing files and + ** deleting or truncating journals. If something goes wrong while + ** this is happening we don't really care. The integrity of the + ** transaction is already guaranteed, but some stray 'cold' journals + ** may be lying around. Returning an error code won't help matters. + */ + disable_simulated_io_errors(); + sqlite3BeginBenignMalloc(); + for(i=0; inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + sqlite3BtreeCommitPhaseTwo(pBt, 1); + } + } + sqlite3EndBenignMalloc(); + enable_simulated_io_errors(); + + sqlite3VtabCommit(db); + } +#endif + + return rc; +} + +/* +** This routine checks that the sqlite3.nVdbeActive count variable +** matches the number of vdbe's in the list sqlite3.pVdbe that are +** currently active. An assertion fails if the two counts do not match. +** This is an internal self-check only - it is not an essential processing +** step. +** +** This is a no-op if NDEBUG is defined. +*/ +#ifndef NDEBUG +static void checkActiveVdbeCnt(sqlite3 *db){ + Vdbe *p; + int cnt = 0; + int nWrite = 0; + int nRead = 0; + p = db->pVdbe; + while( p ){ + if( sqlite3_stmt_busy((sqlite3_stmt*)p) ){ + cnt++; + if( p->readOnly==0 ) nWrite++; + if( p->bIsReader ) nRead++; + } + p = p->pNext; + } + assert( cnt==db->nVdbeActive ); + assert( nWrite==db->nVdbeWrite ); + assert( nRead==db->nVdbeRead ); +} +#else +#define checkActiveVdbeCnt(x) +#endif + +/* +** If the Vdbe passed as the first argument opened a statement-transaction, +** close it now. Argument eOp must be either SAVEPOINT_ROLLBACK or +** SAVEPOINT_RELEASE. If it is SAVEPOINT_ROLLBACK, then the statement +** transaction is rolled back. If eOp is SAVEPOINT_RELEASE, then the +** statement transaction is committed. +** +** If an IO error occurs, an SQLITE_IOERR_XXX error code is returned. +** Otherwise SQLITE_OK. +*/ +static SQLITE_NOINLINE int vdbeCloseStatement(Vdbe *p, int eOp){ + sqlite3 *const db = p->db; + int rc = SQLITE_OK; + int i; + const int iSavepoint = p->iStatement-1; + + assert( eOp==SAVEPOINT_ROLLBACK || eOp==SAVEPOINT_RELEASE); + assert( db->nStatement>0 ); + assert( p->iStatement==(db->nStatement+db->nSavepoint) ); + + for(i=0; inDb; i++){ + int rc2 = SQLITE_OK; + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + if( eOp==SAVEPOINT_ROLLBACK ){ + rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_ROLLBACK, iSavepoint); + } + if( rc2==SQLITE_OK ){ + rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_RELEASE, iSavepoint); + } + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + } + db->nStatement--; + p->iStatement = 0; + + if( rc==SQLITE_OK ){ + if( eOp==SAVEPOINT_ROLLBACK ){ + rc = sqlite3VtabSavepoint(db, SAVEPOINT_ROLLBACK, iSavepoint); + } + if( rc==SQLITE_OK ){ + rc = sqlite3VtabSavepoint(db, SAVEPOINT_RELEASE, iSavepoint); + } + } + + /* If the statement transaction is being rolled back, also restore the + ** database handles deferred constraint counter to the value it had when + ** the statement transaction was opened. */ + if( eOp==SAVEPOINT_ROLLBACK ){ + db->nDeferredCons = p->nStmtDefCons; + db->nDeferredImmCons = p->nStmtDefImmCons; + } + return rc; +} +SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *p, int eOp){ + if( p->db->nStatement && p->iStatement ){ + return vdbeCloseStatement(p, eOp); + } + return SQLITE_OK; +} + + +/* +** This function is called when a transaction opened by the database +** handle associated with the VM passed as an argument is about to be +** committed. If there are outstanding deferred foreign key constraint +** violations, return SQLITE_ERROR. Otherwise, SQLITE_OK. +** +** If there are outstanding FK violations and this function returns +** SQLITE_ERROR, set the result of the VM to SQLITE_CONSTRAINT_FOREIGNKEY +** and write an error message to it. Then return SQLITE_ERROR. +*/ +#ifndef SQLITE_OMIT_FOREIGN_KEY +SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *p, int deferred){ + sqlite3 *db = p->db; + if( (deferred && (db->nDeferredCons+db->nDeferredImmCons)>0) + || (!deferred && p->nFkConstraint>0) + ){ + p->rc = SQLITE_CONSTRAINT_FOREIGNKEY; + p->errorAction = OE_Abort; + sqlite3VdbeError(p, "FOREIGN KEY constraint failed"); + return SQLITE_ERROR; + } + return SQLITE_OK; +} +#endif + +/* +** This routine is called the when a VDBE tries to halt. If the VDBE +** has made changes and is in autocommit mode, then commit those +** changes. If a rollback is needed, then do the rollback. +** +** This routine is the only way to move the state of a VM from +** SQLITE_MAGIC_RUN to SQLITE_MAGIC_HALT. It is harmless to +** call this on a VM that is in the SQLITE_MAGIC_HALT state. +** +** Return an error code. If the commit could not complete because of +** lock contention, return SQLITE_BUSY. If SQLITE_BUSY is returned, it +** means the close did not happen and needs to be repeated. +*/ +SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ + int rc; /* Used to store transient return codes */ + sqlite3 *db = p->db; + + /* This function contains the logic that determines if a statement or + ** transaction will be committed or rolled back as a result of the + ** execution of this virtual machine. + ** + ** If any of the following errors occur: + ** + ** SQLITE_NOMEM + ** SQLITE_IOERR + ** SQLITE_FULL + ** SQLITE_INTERRUPT + ** + ** Then the internal cache might have been left in an inconsistent + ** state. We need to rollback the statement transaction, if there is + ** one, or the complete transaction if there is no statement transaction. + */ + + if( db->mallocFailed ){ + p->rc = SQLITE_NOMEM_BKPT; + } + closeAllCursors(p); + if( p->magic!=VDBE_MAGIC_RUN ){ + return SQLITE_OK; + } + checkActiveVdbeCnt(db); + + /* No commit or rollback needed if the program never started or if the + ** SQL statement does not read or write a database file. */ + if( p->pc>=0 && p->bIsReader ){ + int mrc; /* Primary error code from p->rc */ + int eStatementOp = 0; + int isSpecialError; /* Set to true if a 'special' error */ + + /* Lock all btrees used by the statement */ + sqlite3VdbeEnter(p); + + /* Check for one of the special errors */ + mrc = p->rc & 0xff; + isSpecialError = mrc==SQLITE_NOMEM || mrc==SQLITE_IOERR + || mrc==SQLITE_INTERRUPT || mrc==SQLITE_FULL; + if( isSpecialError ){ + /* If the query was read-only and the error code is SQLITE_INTERRUPT, + ** no rollback is necessary. Otherwise, at least a savepoint + ** transaction must be rolled back to restore the database to a + ** consistent state. + ** + ** Even if the statement is read-only, it is important to perform + ** a statement or transaction rollback operation. If the error + ** occurred while writing to the journal, sub-journal or database + ** file as part of an effort to free up cache space (see function + ** pagerStress() in pager.c), the rollback is required to restore + ** the pager to a consistent state. + */ + if( !p->readOnly || mrc!=SQLITE_INTERRUPT ){ + if( (mrc==SQLITE_NOMEM || mrc==SQLITE_FULL) && p->usesStmtJournal ){ + eStatementOp = SAVEPOINT_ROLLBACK; + }else{ + /* We are forced to roll back the active transaction. Before doing + ** so, abort any other statements this handle currently has active. + */ + sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); + sqlite3CloseSavepoints(db); + db->autoCommit = 1; + p->nChange = 0; + } + } + } + + /* Check for immediate foreign key violations. */ + if( p->rc==SQLITE_OK ){ + sqlite3VdbeCheckFk(p, 0); + } + + /* If the auto-commit flag is set and this is the only active writer + ** VM, then we do either a commit or rollback of the current transaction. + ** + ** Note: This block also runs if one of the special errors handled + ** above has occurred. + */ + if( !sqlite3VtabInSync(db) + && db->autoCommit + && db->nVdbeWrite==(p->readOnly==0) + ){ + if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){ + rc = sqlite3VdbeCheckFk(p, 1); + if( rc!=SQLITE_OK ){ + if( NEVER(p->readOnly) ){ + sqlite3VdbeLeave(p); + return SQLITE_ERROR; + } + rc = SQLITE_CONSTRAINT_FOREIGNKEY; + }else{ + /* The auto-commit flag is true, the vdbe program was successful + ** or hit an 'OR FAIL' constraint and there are no deferred foreign + ** key constraints to hold up the transaction. This means a commit + ** is required. */ + rc = vdbeCommit(db, p); + } + if( rc==SQLITE_BUSY && p->readOnly ){ + sqlite3VdbeLeave(p); + return SQLITE_BUSY; + }else if( rc!=SQLITE_OK ){ + p->rc = rc; + sqlite3RollbackAll(db, SQLITE_OK); + p->nChange = 0; + }else{ + db->nDeferredCons = 0; + db->nDeferredImmCons = 0; + db->flags &= ~SQLITE_DeferFKs; + sqlite3CommitInternalChanges(db); + } + }else{ + sqlite3RollbackAll(db, SQLITE_OK); + p->nChange = 0; + } + db->nStatement = 0; + }else if( eStatementOp==0 ){ + if( p->rc==SQLITE_OK || p->errorAction==OE_Fail ){ + eStatementOp = SAVEPOINT_RELEASE; + }else if( p->errorAction==OE_Abort ){ + eStatementOp = SAVEPOINT_ROLLBACK; + }else{ + sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); + sqlite3CloseSavepoints(db); + db->autoCommit = 1; + p->nChange = 0; + } + } + + /* If eStatementOp is non-zero, then a statement transaction needs to + ** be committed or rolled back. Call sqlite3VdbeCloseStatement() to + ** do so. If this operation returns an error, and the current statement + ** error code is SQLITE_OK or SQLITE_CONSTRAINT, then promote the + ** current statement error code. + */ + if( eStatementOp ){ + rc = sqlite3VdbeCloseStatement(p, eStatementOp); + if( rc ){ + if( p->rc==SQLITE_OK || (p->rc&0xff)==SQLITE_CONSTRAINT ){ + p->rc = rc; + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = 0; + } + sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); + sqlite3CloseSavepoints(db); + db->autoCommit = 1; + p->nChange = 0; + } + } + + /* If this was an INSERT, UPDATE or DELETE and no statement transaction + ** has been rolled back, update the database connection change-counter. + */ + if( p->changeCntOn ){ + if( eStatementOp!=SAVEPOINT_ROLLBACK ){ + sqlite3VdbeSetChanges(db, p->nChange); + }else{ + sqlite3VdbeSetChanges(db, 0); + } + p->nChange = 0; + } + + /* Release the locks */ + sqlite3VdbeLeave(p); + } + + /* We have successfully halted and closed the VM. Record this fact. */ + if( p->pc>=0 ){ + db->nVdbeActive--; + if( !p->readOnly ) db->nVdbeWrite--; + if( p->bIsReader ) db->nVdbeRead--; + assert( db->nVdbeActive>=db->nVdbeRead ); + assert( db->nVdbeRead>=db->nVdbeWrite ); + assert( db->nVdbeWrite>=0 ); + } + p->magic = VDBE_MAGIC_HALT; + checkActiveVdbeCnt(db); + if( db->mallocFailed ){ + p->rc = SQLITE_NOMEM_BKPT; + } + + /* If the auto-commit flag is set to true, then any locks that were held + ** by connection db have now been released. Call sqlite3ConnectionUnlocked() + ** to invoke any required unlock-notify callbacks. + */ + if( db->autoCommit ){ + sqlite3ConnectionUnlocked(db); + } + + assert( db->nVdbeActive>0 || db->autoCommit==0 || db->nStatement==0 ); + return (p->rc==SQLITE_BUSY ? SQLITE_BUSY : SQLITE_OK); +} + + +/* +** Each VDBE holds the result of the most recent sqlite3_step() call +** in p->rc. This routine sets that result back to SQLITE_OK. +*/ +SQLITE_PRIVATE void sqlite3VdbeResetStepResult(Vdbe *p){ + p->rc = SQLITE_OK; +} + +/* +** Copy the error code and error message belonging to the VDBE passed +** as the first argument to its database handle (so that they will be +** returned by calls to sqlite3_errcode() and sqlite3_errmsg()). +** +** This function does not clear the VDBE error code or message, just +** copies them to the database handle. +*/ +SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p){ + sqlite3 *db = p->db; + int rc = p->rc; + if( p->zErrMsg ){ + db->bBenignMalloc++; + sqlite3BeginBenignMalloc(); + if( db->pErr==0 ) db->pErr = sqlite3ValueNew(db); + sqlite3ValueSetStr(db->pErr, -1, p->zErrMsg, SQLITE_UTF8, SQLITE_TRANSIENT); + sqlite3EndBenignMalloc(); + db->bBenignMalloc--; + db->errCode = rc; + }else{ + sqlite3Error(db, rc); + } + return rc; +} + +#ifdef SQLITE_ENABLE_SQLLOG +/* +** If an SQLITE_CONFIG_SQLLOG hook is registered and the VM has been run, +** invoke it. +*/ +static void vdbeInvokeSqllog(Vdbe *v){ + if( sqlite3GlobalConfig.xSqllog && v->rc==SQLITE_OK && v->zSql && v->pc>=0 ){ + char *zExpanded = sqlite3VdbeExpandSql(v, v->zSql); + assert( v->db->init.busy==0 ); + if( zExpanded ){ + sqlite3GlobalConfig.xSqllog( + sqlite3GlobalConfig.pSqllogArg, v->db, zExpanded, 1 + ); + sqlite3DbFree(v->db, zExpanded); + } + } +} +#else +# define vdbeInvokeSqllog(x) +#endif + +/* +** Clean up a VDBE after execution but do not delete the VDBE just yet. +** Write any error messages into *pzErrMsg. Return the result code. +** +** After this routine is run, the VDBE should be ready to be executed +** again. +** +** To look at it another way, this routine resets the state of the +** virtual machine from VDBE_MAGIC_RUN or VDBE_MAGIC_HALT back to +** VDBE_MAGIC_INIT. +*/ +SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){ + sqlite3 *db; + db = p->db; + + /* If the VM did not run to completion or if it encountered an + ** error, then it might not have been halted properly. So halt + ** it now. + */ + sqlite3VdbeHalt(p); + + /* If the VDBE has be run even partially, then transfer the error code + ** and error message from the VDBE into the main database structure. But + ** if the VDBE has just been set to run but has not actually executed any + ** instructions yet, leave the main database error information unchanged. + */ + if( p->pc>=0 ){ + vdbeInvokeSqllog(p); + sqlite3VdbeTransferError(p); + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = 0; + if( p->runOnlyOnce ) p->expired = 1; + }else if( p->rc && p->expired ){ + /* The expired flag was set on the VDBE before the first call + ** to sqlite3_step(). For consistency (since sqlite3_step() was + ** called), set the database error in this case as well. + */ + sqlite3ErrorWithMsg(db, p->rc, p->zErrMsg ? "%s" : 0, p->zErrMsg); + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = 0; + } + + /* Reclaim all memory used by the VDBE + */ + Cleanup(p); + + /* Save profiling information from this VDBE run. + */ +#ifdef VDBE_PROFILE + { + FILE *out = fopen("vdbe_profile.out", "a"); + if( out ){ + int i; + fprintf(out, "---- "); + for(i=0; inOp; i++){ + fprintf(out, "%02x", p->aOp[i].opcode); + } + fprintf(out, "\n"); + if( p->zSql ){ + char c, pc = 0; + fprintf(out, "-- "); + for(i=0; (c = p->zSql[i])!=0; i++){ + if( pc=='\n' ) fprintf(out, "-- "); + putc(c, out); + pc = c; + } + if( pc!='\n' ) fprintf(out, "\n"); + } + for(i=0; inOp; i++){ + char zHdr[100]; + sqlite3_snprintf(sizeof(zHdr), zHdr, "%6u %12llu %8llu ", + p->aOp[i].cnt, + p->aOp[i].cycles, + p->aOp[i].cnt>0 ? p->aOp[i].cycles/p->aOp[i].cnt : 0 + ); + fprintf(out, "%s", zHdr); + sqlite3VdbePrintOp(out, i, &p->aOp[i]); + } + fclose(out); + } + } +#endif + p->iCurrentTime = 0; + p->magic = VDBE_MAGIC_RESET; + return p->rc & db->errMask; +} + +/* +** Clean up and delete a VDBE after execution. Return an integer which is +** the result code. Write any error message text into *pzErrMsg. +*/ +SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe *p){ + int rc = SQLITE_OK; + if( p->magic==VDBE_MAGIC_RUN || p->magic==VDBE_MAGIC_HALT ){ + rc = sqlite3VdbeReset(p); + assert( (rc & p->db->errMask)==rc ); + } + sqlite3VdbeDelete(p); + return rc; +} + +/* +** If parameter iOp is less than zero, then invoke the destructor for +** all auxiliary data pointers currently cached by the VM passed as +** the first argument. +** +** Or, if iOp is greater than or equal to zero, then the destructor is +** only invoked for those auxiliary data pointers created by the user +** function invoked by the OP_Function opcode at instruction iOp of +** VM pVdbe, and only then if: +** +** * the associated function parameter is the 32nd or later (counting +** from left to right), or +** +** * the corresponding bit in argument mask is clear (where the first +** function parameter corresponds to bit 0 etc.). +*/ +SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(sqlite3 *db, AuxData **pp, int iOp, int mask){ + while( *pp ){ + AuxData *pAux = *pp; + if( (iOp<0) + || (pAux->iOp==iOp && (pAux->iArg>31 || !(mask & MASKBIT32(pAux->iArg)))) + ){ + testcase( pAux->iArg==31 ); + if( pAux->xDelete ){ + pAux->xDelete(pAux->pAux); + } + *pp = pAux->pNext; + sqlite3DbFree(db, pAux); + }else{ + pp= &pAux->pNext; + } + } +} + +/* +** Free all memory associated with the Vdbe passed as the second argument, +** except for object itself, which is preserved. +** +** The difference between this function and sqlite3VdbeDelete() is that +** VdbeDelete() also unlinks the Vdbe from the list of VMs associated with +** the database connection and frees the object itself. +*/ +SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){ + SubProgram *pSub, *pNext; + assert( p->db==0 || p->db==db ); + releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); + for(pSub=p->pProgram; pSub; pSub=pNext){ + pNext = pSub->pNext; + vdbeFreeOpArray(db, pSub->aOp, pSub->nOp); + sqlite3DbFree(db, pSub); + } + if( p->magic!=VDBE_MAGIC_INIT ){ + releaseMemArray(p->aVar, p->nVar); + sqlite3DbFree(db, p->pVList); + sqlite3DbFree(db, p->pFree); + } + vdbeFreeOpArray(db, p->aOp, p->nOp); + sqlite3DbFree(db, p->aColName); + sqlite3DbFree(db, p->zSql); +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + { + int i; + for(i=0; inScan; i++){ + sqlite3DbFree(db, p->aScan[i].zName); + } + sqlite3DbFree(db, p->aScan); + } +#endif +} + +/* +** Delete an entire VDBE. +*/ +SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe *p){ + sqlite3 *db; + + if( NEVER(p==0) ) return; + db = p->db; + assert( sqlite3_mutex_held(db->mutex) ); + sqlite3VdbeClearObject(db, p); + if( p->pPrev ){ + p->pPrev->pNext = p->pNext; + }else{ + assert( db->pVdbe==p ); + db->pVdbe = p->pNext; + } + if( p->pNext ){ + p->pNext->pPrev = p->pPrev; + } + p->magic = VDBE_MAGIC_DEAD; + p->db = 0; + sqlite3DbFree(db, p); +} + +/* +** The cursor "p" has a pending seek operation that has not yet been +** carried out. Seek the cursor now. If an error occurs, return +** the appropriate error code. +*/ +static int SQLITE_NOINLINE handleDeferredMoveto(VdbeCursor *p){ + int res, rc; +#ifdef SQLITE_TEST + extern int sqlite3_search_count; +#endif + assert( p->deferredMoveto ); + assert( p->isTable ); + assert( p->eCurType==CURTYPE_BTREE ); + rc = sqlite3BtreeMovetoUnpacked(p->uc.pCursor, 0, p->movetoTarget, 0, &res); + if( rc ) return rc; + if( res!=0 ) return SQLITE_CORRUPT_BKPT; +#ifdef SQLITE_TEST + sqlite3_search_count++; +#endif + p->deferredMoveto = 0; + p->cacheStatus = CACHE_STALE; + return SQLITE_OK; +} + +/* +** Something has moved cursor "p" out of place. Maybe the row it was +** pointed to was deleted out from under it. Or maybe the btree was +** rebalanced. Whatever the cause, try to restore "p" to the place it +** is supposed to be pointing. If the row was deleted out from under the +** cursor, set the cursor to point to a NULL row. +*/ +static int SQLITE_NOINLINE handleMovedCursor(VdbeCursor *p){ + int isDifferentRow, rc; + assert( p->eCurType==CURTYPE_BTREE ); + assert( p->uc.pCursor!=0 ); + assert( sqlite3BtreeCursorHasMoved(p->uc.pCursor) ); + rc = sqlite3BtreeCursorRestore(p->uc.pCursor, &isDifferentRow); + p->cacheStatus = CACHE_STALE; + if( isDifferentRow ) p->nullRow = 1; + return rc; +} + +/* +** Check to ensure that the cursor is valid. Restore the cursor +** if need be. Return any I/O error from the restore operation. +*/ +SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor *p){ + assert( p->eCurType==CURTYPE_BTREE ); + if( sqlite3BtreeCursorHasMoved(p->uc.pCursor) ){ + return handleMovedCursor(p); + } + return SQLITE_OK; +} + +/* +** Make sure the cursor p is ready to read or write the row to which it +** was last positioned. Return an error code if an OOM fault or I/O error +** prevents us from positioning the cursor to its correct position. +** +** If a MoveTo operation is pending on the given cursor, then do that +** MoveTo now. If no move is pending, check to see if the row has been +** deleted out from under the cursor and if it has, mark the row as +** a NULL row. +** +** If the cursor is already pointing to the correct row and that row has +** not been deleted out from under the cursor, then this routine is a no-op. +*/ +SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor **pp, int *piCol){ + VdbeCursor *p = *pp; + if( p->eCurType==CURTYPE_BTREE ){ + if( p->deferredMoveto ){ + int iMap; + if( p->aAltMap && (iMap = p->aAltMap[1+*piCol])>0 ){ + *pp = p->pAltCursor; + *piCol = iMap - 1; + return SQLITE_OK; + } + return handleDeferredMoveto(p); + } + if( sqlite3BtreeCursorHasMoved(p->uc.pCursor) ){ + return handleMovedCursor(p); + } + } + return SQLITE_OK; +} + +/* +** The following functions: +** +** sqlite3VdbeSerialType() +** sqlite3VdbeSerialTypeLen() +** sqlite3VdbeSerialLen() +** sqlite3VdbeSerialPut() +** sqlite3VdbeSerialGet() +** +** encapsulate the code that serializes values for storage in SQLite +** data and index records. Each serialized value consists of a +** 'serial-type' and a blob of data. The serial type is an 8-byte unsigned +** integer, stored as a varint. +** +** In an SQLite index record, the serial type is stored directly before +** the blob of data that it corresponds to. In a table record, all serial +** types are stored at the start of the record, and the blobs of data at +** the end. Hence these functions allow the caller to handle the +** serial-type and data blob separately. +** +** The following table describes the various storage classes for data: +** +** serial type bytes of data type +** -------------- --------------- --------------- +** 0 0 NULL +** 1 1 signed integer +** 2 2 signed integer +** 3 3 signed integer +** 4 4 signed integer +** 5 6 signed integer +** 6 8 signed integer +** 7 8 IEEE float +** 8 0 Integer constant 0 +** 9 0 Integer constant 1 +** 10,11 reserved for expansion +** N>=12 and even (N-12)/2 BLOB +** N>=13 and odd (N-13)/2 text +** +** The 8 and 9 types were added in 3.3.0, file format 4. Prior versions +** of SQLite will not understand those serial types. +*/ + +/* +** Return the serial-type for the value stored in pMem. +*/ +SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem *pMem, int file_format, u32 *pLen){ + int flags = pMem->flags; + u32 n; + + assert( pLen!=0 ); + if( flags&MEM_Null ){ + *pLen = 0; + return 0; + } + if( flags&MEM_Int ){ + /* Figure out whether to use 1, 2, 4, 6 or 8 bytes. */ +# define MAX_6BYTE ((((i64)0x00008000)<<32)-1) + i64 i = pMem->u.i; + u64 u; + if( i<0 ){ + u = ~i; + }else{ + u = i; + } + if( u<=127 ){ + if( (i&1)==i && file_format>=4 ){ + *pLen = 0; + return 8+(u32)u; + }else{ + *pLen = 1; + return 1; + } + } + if( u<=32767 ){ *pLen = 2; return 2; } + if( u<=8388607 ){ *pLen = 3; return 3; } + if( u<=2147483647 ){ *pLen = 4; return 4; } + if( u<=MAX_6BYTE ){ *pLen = 6; return 5; } + *pLen = 8; + return 6; + } + if( flags&MEM_Real ){ + *pLen = 8; + return 7; + } + assert( pMem->db->mallocFailed || flags&(MEM_Str|MEM_Blob) ); + assert( pMem->n>=0 ); + n = (u32)pMem->n; + if( flags & MEM_Zero ){ + n += pMem->u.nZero; + } + *pLen = n; + return ((n*2) + 12 + ((flags&MEM_Str)!=0)); +} + +/* +** The sizes for serial types less than 128 +*/ +static const u8 sqlite3SmallTypeSizes[] = { + /* 0 1 2 3 4 5 6 7 8 9 */ +/* 0 */ 0, 1, 2, 3, 4, 6, 8, 8, 0, 0, +/* 10 */ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, +/* 20 */ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, +/* 30 */ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, +/* 40 */ 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, +/* 50 */ 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, +/* 60 */ 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, +/* 70 */ 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, +/* 80 */ 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, +/* 90 */ 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, +/* 100 */ 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, +/* 110 */ 49, 49, 50, 50, 51, 51, 52, 52, 53, 53, +/* 120 */ 54, 54, 55, 55, 56, 56, 57, 57 +}; + +/* +** Return the length of the data corresponding to the supplied serial-type. +*/ +SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32 serial_type){ + if( serial_type>=128 ){ + return (serial_type-12)/2; + }else{ + assert( serial_type<12 + || sqlite3SmallTypeSizes[serial_type]==(serial_type - 12)/2 ); + return sqlite3SmallTypeSizes[serial_type]; + } +} +SQLITE_PRIVATE u8 sqlite3VdbeOneByteSerialTypeLen(u8 serial_type){ + assert( serial_type<128 ); + return sqlite3SmallTypeSizes[serial_type]; +} + +/* +** If we are on an architecture with mixed-endian floating +** points (ex: ARM7) then swap the lower 4 bytes with the +** upper 4 bytes. Return the result. +** +** For most architectures, this is a no-op. +** +** (later): It is reported to me that the mixed-endian problem +** on ARM7 is an issue with GCC, not with the ARM7 chip. It seems +** that early versions of GCC stored the two words of a 64-bit +** float in the wrong order. And that error has been propagated +** ever since. The blame is not necessarily with GCC, though. +** GCC might have just copying the problem from a prior compiler. +** I am also told that newer versions of GCC that follow a different +** ABI get the byte order right. +** +** Developers using SQLite on an ARM7 should compile and run their +** application using -DSQLITE_DEBUG=1 at least once. With DEBUG +** enabled, some asserts below will ensure that the byte order of +** floating point values is correct. +** +** (2007-08-30) Frank van Vugt has studied this problem closely +** and has send his findings to the SQLite developers. Frank +** writes that some Linux kernels offer floating point hardware +** emulation that uses only 32-bit mantissas instead of a full +** 48-bits as required by the IEEE standard. (This is the +** CONFIG_FPE_FASTFPE option.) On such systems, floating point +** byte swapping becomes very complicated. To avoid problems, +** the necessary byte swapping is carried out using a 64-bit integer +** rather than a 64-bit float. Frank assures us that the code here +** works for him. We, the developers, have no way to independently +** verify this, but Frank seems to know what he is talking about +** so we trust him. +*/ +#ifdef SQLITE_MIXED_ENDIAN_64BIT_FLOAT +static u64 floatSwap(u64 in){ + union { + u64 r; + u32 i[2]; + } u; + u32 t; + + u.r = in; + t = u.i[0]; + u.i[0] = u.i[1]; + u.i[1] = t; + return u.r; +} +# define swapMixedEndianFloat(X) X = floatSwap(X) +#else +# define swapMixedEndianFloat(X) +#endif + +/* +** Write the serialized data blob for the value stored in pMem into +** buf. It is assumed that the caller has allocated sufficient space. +** Return the number of bytes written. +** +** nBuf is the amount of space left in buf[]. The caller is responsible +** for allocating enough space to buf[] to hold the entire field, exclusive +** of the pMem->u.nZero bytes for a MEM_Zero value. +** +** Return the number of bytes actually written into buf[]. The number +** of bytes in the zero-filled tail is included in the return value only +** if those bytes were zeroed in buf[]. +*/ +SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(u8 *buf, Mem *pMem, u32 serial_type){ + u32 len; + + /* Integer and Real */ + if( serial_type<=7 && serial_type>0 ){ + u64 v; + u32 i; + if( serial_type==7 ){ + assert( sizeof(v)==sizeof(pMem->u.r) ); + memcpy(&v, &pMem->u.r, sizeof(v)); + swapMixedEndianFloat(v); + }else{ + v = pMem->u.i; + } + len = i = sqlite3SmallTypeSizes[serial_type]; + assert( i>0 ); + do{ + buf[--i] = (u8)(v&0xFF); + v >>= 8; + }while( i ); + return len; + } + + /* String or blob */ + if( serial_type>=12 ){ + assert( pMem->n + ((pMem->flags & MEM_Zero)?pMem->u.nZero:0) + == (int)sqlite3VdbeSerialTypeLen(serial_type) ); + len = pMem->n; + if( len>0 ) memcpy(buf, pMem->z, len); + return len; + } + + /* NULL or constants 0 or 1 */ + return 0; +} + +/* Input "x" is a sequence of unsigned characters that represent a +** big-endian integer. Return the equivalent native integer +*/ +#define ONE_BYTE_INT(x) ((i8)(x)[0]) +#define TWO_BYTE_INT(x) (256*(i8)((x)[0])|(x)[1]) +#define THREE_BYTE_INT(x) (65536*(i8)((x)[0])|((x)[1]<<8)|(x)[2]) +#define FOUR_BYTE_UINT(x) (((u32)(x)[0]<<24)|((x)[1]<<16)|((x)[2]<<8)|(x)[3]) +#define FOUR_BYTE_INT(x) (16777216*(i8)((x)[0])|((x)[1]<<16)|((x)[2]<<8)|(x)[3]) + +/* +** Deserialize the data blob pointed to by buf as serial type serial_type +** and store the result in pMem. Return the number of bytes read. +** +** This function is implemented as two separate routines for performance. +** The few cases that require local variables are broken out into a separate +** routine so that in most cases the overhead of moving the stack pointer +** is avoided. +*/ +static u32 SQLITE_NOINLINE serialGet( + const unsigned char *buf, /* Buffer to deserialize from */ + u32 serial_type, /* Serial type to deserialize */ + Mem *pMem /* Memory cell to write value into */ +){ + u64 x = FOUR_BYTE_UINT(buf); + u32 y = FOUR_BYTE_UINT(buf+4); + x = (x<<32) + y; + if( serial_type==6 ){ + /* EVIDENCE-OF: R-29851-52272 Value is a big-endian 64-bit + ** twos-complement integer. */ + pMem->u.i = *(i64*)&x; + pMem->flags = MEM_Int; + testcase( pMem->u.i<0 ); + }else{ + /* EVIDENCE-OF: R-57343-49114 Value is a big-endian IEEE 754-2008 64-bit + ** floating point number. */ +#if !defined(NDEBUG) && !defined(SQLITE_OMIT_FLOATING_POINT) + /* Verify that integers and floating point values use the same + ** byte order. Or, that if SQLITE_MIXED_ENDIAN_64BIT_FLOAT is + ** defined that 64-bit floating point values really are mixed + ** endian. + */ + static const u64 t1 = ((u64)0x3ff00000)<<32; + static const double r1 = 1.0; + u64 t2 = t1; + swapMixedEndianFloat(t2); + assert( sizeof(r1)==sizeof(t2) && memcmp(&r1, &t2, sizeof(r1))==0 ); +#endif + assert( sizeof(x)==8 && sizeof(pMem->u.r)==8 ); + swapMixedEndianFloat(x); + memcpy(&pMem->u.r, &x, sizeof(x)); + pMem->flags = sqlite3IsNaN(pMem->u.r) ? MEM_Null : MEM_Real; + } + return 8; +} +SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( + const unsigned char *buf, /* Buffer to deserialize from */ + u32 serial_type, /* Serial type to deserialize */ + Mem *pMem /* Memory cell to write value into */ +){ + switch( serial_type ){ + case 10: /* Reserved for future use */ + case 11: /* Reserved for future use */ + case 0: { /* Null */ + /* EVIDENCE-OF: R-24078-09375 Value is a NULL. */ + pMem->flags = MEM_Null; + break; + } + case 1: { + /* EVIDENCE-OF: R-44885-25196 Value is an 8-bit twos-complement + ** integer. */ + pMem->u.i = ONE_BYTE_INT(buf); + pMem->flags = MEM_Int; + testcase( pMem->u.i<0 ); + return 1; + } + case 2: { /* 2-byte signed integer */ + /* EVIDENCE-OF: R-49794-35026 Value is a big-endian 16-bit + ** twos-complement integer. */ + pMem->u.i = TWO_BYTE_INT(buf); + pMem->flags = MEM_Int; + testcase( pMem->u.i<0 ); + return 2; + } + case 3: { /* 3-byte signed integer */ + /* EVIDENCE-OF: R-37839-54301 Value is a big-endian 24-bit + ** twos-complement integer. */ + pMem->u.i = THREE_BYTE_INT(buf); + pMem->flags = MEM_Int; + testcase( pMem->u.i<0 ); + return 3; + } + case 4: { /* 4-byte signed integer */ + /* EVIDENCE-OF: R-01849-26079 Value is a big-endian 32-bit + ** twos-complement integer. */ + pMem->u.i = FOUR_BYTE_INT(buf); +#ifdef __HP_cc + /* Work around a sign-extension bug in the HP compiler for HP/UX */ + if( buf[0]&0x80 ) pMem->u.i |= 0xffffffff80000000LL; +#endif + pMem->flags = MEM_Int; + testcase( pMem->u.i<0 ); + return 4; + } + case 5: { /* 6-byte signed integer */ + /* EVIDENCE-OF: R-50385-09674 Value is a big-endian 48-bit + ** twos-complement integer. */ + pMem->u.i = FOUR_BYTE_UINT(buf+2) + (((i64)1)<<32)*TWO_BYTE_INT(buf); + pMem->flags = MEM_Int; + testcase( pMem->u.i<0 ); + return 6; + } + case 6: /* 8-byte signed integer */ + case 7: { /* IEEE floating point */ + /* These use local variables, so do them in a separate routine + ** to avoid having to move the frame pointer in the common case */ + return serialGet(buf,serial_type,pMem); + } + case 8: /* Integer 0 */ + case 9: { /* Integer 1 */ + /* EVIDENCE-OF: R-12976-22893 Value is the integer 0. */ + /* EVIDENCE-OF: R-18143-12121 Value is the integer 1. */ + pMem->u.i = serial_type-8; + pMem->flags = MEM_Int; + return 0; + } + default: { + /* EVIDENCE-OF: R-14606-31564 Value is a BLOB that is (N-12)/2 bytes in + ** length. + ** EVIDENCE-OF: R-28401-00140 Value is a string in the text encoding and + ** (N-13)/2 bytes in length. */ + static const u16 aFlag[] = { MEM_Blob|MEM_Ephem, MEM_Str|MEM_Ephem }; + pMem->z = (char *)buf; + pMem->n = (serial_type-12)/2; + pMem->flags = aFlag[serial_type&1]; + return pMem->n; + } + } + return 0; +} +/* +** This routine is used to allocate sufficient space for an UnpackedRecord +** structure large enough to be used with sqlite3VdbeRecordUnpack() if +** the first argument is a pointer to KeyInfo structure pKeyInfo. +** +** The space is either allocated using sqlite3DbMallocRaw() or from within +** the unaligned buffer passed via the second and third arguments (presumably +** stack space). If the former, then *ppFree is set to a pointer that should +** be eventually freed by the caller using sqlite3DbFree(). Or, if the +** allocation comes from the pSpace/szSpace buffer, *ppFree is set to NULL +** before returning. +** +** If an OOM error occurs, NULL is returned. +*/ +SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord( + KeyInfo *pKeyInfo /* Description of the record */ +){ + UnpackedRecord *p; /* Unpacked record to return */ + int nByte; /* Number of bytes required for *p */ + nByte = ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nField+1); + p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte); + if( !p ) return 0; + p->aMem = (Mem*)&((char*)p)[ROUND8(sizeof(UnpackedRecord))]; + assert( pKeyInfo->aSortOrder!=0 ); + p->pKeyInfo = pKeyInfo; + p->nField = pKeyInfo->nField + 1; + return p; +} + +/* +** Given the nKey-byte encoding of a record in pKey[], populate the +** UnpackedRecord structure indicated by the fourth argument with the +** contents of the decoded record. +*/ +SQLITE_PRIVATE void sqlite3VdbeRecordUnpack( + KeyInfo *pKeyInfo, /* Information about the record format */ + int nKey, /* Size of the binary record */ + const void *pKey, /* The binary record */ + UnpackedRecord *p /* Populate this structure before returning. */ +){ + const unsigned char *aKey = (const unsigned char *)pKey; + int d; + u32 idx; /* Offset in aKey[] to read from */ + u16 u; /* Unsigned loop counter */ + u32 szHdr; + Mem *pMem = p->aMem; + + p->default_rc = 0; + assert( EIGHT_BYTE_ALIGNMENT(pMem) ); + idx = getVarint32(aKey, szHdr); + d = szHdr; + u = 0; + while( idxenc = pKeyInfo->enc; + pMem->db = pKeyInfo->db; + /* pMem->flags = 0; // sqlite3VdbeSerialGet() will set this for us */ + pMem->szMalloc = 0; + pMem->z = 0; + d += sqlite3VdbeSerialGet(&aKey[d], serial_type, pMem); + pMem++; + if( (++u)>=p->nField ) break; + } + assert( u<=pKeyInfo->nField + 1 ); + p->nField = u; +} + +#if SQLITE_DEBUG +/* +** This function compares two index or table record keys in the same way +** as the sqlite3VdbeRecordCompare() routine. Unlike VdbeRecordCompare(), +** this function deserializes and compares values using the +** sqlite3VdbeSerialGet() and sqlite3MemCompare() functions. It is used +** in assert() statements to ensure that the optimized code in +** sqlite3VdbeRecordCompare() returns results with these two primitives. +** +** Return true if the result of comparison is equivalent to desiredResult. +** Return false if there is a disagreement. +*/ +static int vdbeRecordCompareDebug( + int nKey1, const void *pKey1, /* Left key */ + const UnpackedRecord *pPKey2, /* Right key */ + int desiredResult /* Correct answer */ +){ + u32 d1; /* Offset into aKey[] of next data element */ + u32 idx1; /* Offset into aKey[] of next header element */ + u32 szHdr1; /* Number of bytes in header */ + int i = 0; + int rc = 0; + const unsigned char *aKey1 = (const unsigned char *)pKey1; + KeyInfo *pKeyInfo; + Mem mem1; + + pKeyInfo = pPKey2->pKeyInfo; + if( pKeyInfo->db==0 ) return 1; + mem1.enc = pKeyInfo->enc; + mem1.db = pKeyInfo->db; + /* mem1.flags = 0; // Will be initialized by sqlite3VdbeSerialGet() */ + VVA_ONLY( mem1.szMalloc = 0; ) /* Only needed by assert() statements */ + + /* Compilers may complain that mem1.u.i is potentially uninitialized. + ** We could initialize it, as shown here, to silence those complaints. + ** But in fact, mem1.u.i will never actually be used uninitialized, and doing + ** the unnecessary initialization has a measurable negative performance + ** impact, since this routine is a very high runner. And so, we choose + ** to ignore the compiler warnings and leave this variable uninitialized. + */ + /* mem1.u.i = 0; // not needed, here to silence compiler warning */ + + idx1 = getVarint32(aKey1, szHdr1); + if( szHdr1>98307 ) return SQLITE_CORRUPT; + d1 = szHdr1; + assert( pKeyInfo->nField+pKeyInfo->nXField>=pPKey2->nField || CORRUPT_DB ); + assert( pKeyInfo->aSortOrder!=0 ); + assert( pKeyInfo->nField>0 ); + assert( idx1<=szHdr1 || CORRUPT_DB ); + do{ + u32 serial_type1; + + /* Read the serial types for the next element in each key. */ + idx1 += getVarint32( aKey1+idx1, serial_type1 ); + + /* Verify that there is enough key space remaining to avoid + ** a buffer overread. The "d1+serial_type1+2" subexpression will + ** always be greater than or equal to the amount of required key space. + ** Use that approximation to avoid the more expensive call to + ** sqlite3VdbeSerialTypeLen() in the common case. + */ + if( d1+serial_type1+2>(u32)nKey1 + && d1+sqlite3VdbeSerialTypeLen(serial_type1)>(u32)nKey1 + ){ + break; + } + + /* Extract the values to be compared. + */ + d1 += sqlite3VdbeSerialGet(&aKey1[d1], serial_type1, &mem1); + + /* Do the comparison + */ + rc = sqlite3MemCompare(&mem1, &pPKey2->aMem[i], pKeyInfo->aColl[i]); + if( rc!=0 ){ + assert( mem1.szMalloc==0 ); /* See comment below */ + if( pKeyInfo->aSortOrder[i] ){ + rc = -rc; /* Invert the result for DESC sort order. */ + } + goto debugCompareEnd; + } + i++; + }while( idx1nField ); + + /* No memory allocation is ever used on mem1. Prove this using + ** the following assert(). If the assert() fails, it indicates a + ** memory leak and a need to call sqlite3VdbeMemRelease(&mem1). + */ + assert( mem1.szMalloc==0 ); + + /* rc==0 here means that one of the keys ran out of fields and + ** all the fields up to that point were equal. Return the default_rc + ** value. */ + rc = pPKey2->default_rc; + +debugCompareEnd: + if( desiredResult==0 && rc==0 ) return 1; + if( desiredResult<0 && rc<0 ) return 1; + if( desiredResult>0 && rc>0 ) return 1; + if( CORRUPT_DB ) return 1; + if( pKeyInfo->db->mallocFailed ) return 1; + return 0; +} +#endif + +#if SQLITE_DEBUG +/* +** Count the number of fields (a.k.a. columns) in the record given by +** pKey,nKey. The verify that this count is less than or equal to the +** limit given by pKeyInfo->nField + pKeyInfo->nXField. +** +** If this constraint is not satisfied, it means that the high-speed +** vdbeRecordCompareInt() and vdbeRecordCompareString() routines will +** not work correctly. If this assert() ever fires, it probably means +** that the KeyInfo.nField or KeyInfo.nXField values were computed +** incorrectly. +*/ +static void vdbeAssertFieldCountWithinLimits( + int nKey, const void *pKey, /* The record to verify */ + const KeyInfo *pKeyInfo /* Compare size with this KeyInfo */ +){ + int nField = 0; + u32 szHdr; + u32 idx; + u32 notUsed; + const unsigned char *aKey = (const unsigned char*)pKey; + + if( CORRUPT_DB ) return; + idx = getVarint32(aKey, szHdr); + assert( nKey>=0 ); + assert( szHdr<=(u32)nKey ); + while( idxnField+pKeyInfo->nXField ); +} +#else +# define vdbeAssertFieldCountWithinLimits(A,B,C) +#endif + +/* +** Both *pMem1 and *pMem2 contain string values. Compare the two values +** using the collation sequence pColl. As usual, return a negative , zero +** or positive value if *pMem1 is less than, equal to or greater than +** *pMem2, respectively. Similar in spirit to "rc = (*pMem1) - (*pMem2);". +*/ +static int vdbeCompareMemString( + const Mem *pMem1, + const Mem *pMem2, + const CollSeq *pColl, + u8 *prcErr /* If an OOM occurs, set to SQLITE_NOMEM */ +){ + if( pMem1->enc==pColl->enc ){ + /* The strings are already in the correct encoding. Call the + ** comparison function directly */ + return pColl->xCmp(pColl->pUser,pMem1->n,pMem1->z,pMem2->n,pMem2->z); + }else{ + int rc; + const void *v1, *v2; + int n1, n2; + Mem c1; + Mem c2; + sqlite3VdbeMemInit(&c1, pMem1->db, MEM_Null); + sqlite3VdbeMemInit(&c2, pMem1->db, MEM_Null); + sqlite3VdbeMemShallowCopy(&c1, pMem1, MEM_Ephem); + sqlite3VdbeMemShallowCopy(&c2, pMem2, MEM_Ephem); + v1 = sqlite3ValueText((sqlite3_value*)&c1, pColl->enc); + n1 = v1==0 ? 0 : c1.n; + v2 = sqlite3ValueText((sqlite3_value*)&c2, pColl->enc); + n2 = v2==0 ? 0 : c2.n; + rc = pColl->xCmp(pColl->pUser, n1, v1, n2, v2); + if( (v1==0 || v2==0) && prcErr ) *prcErr = SQLITE_NOMEM_BKPT; + sqlite3VdbeMemRelease(&c1); + sqlite3VdbeMemRelease(&c2); + return rc; + } +} + +/* +** The input pBlob is guaranteed to be a Blob that is not marked +** with MEM_Zero. Return true if it could be a zero-blob. +*/ +static int isAllZero(const char *z, int n){ + int i; + for(i=0; in; + int n2 = pB2->n; + + /* It is possible to have a Blob value that has some non-zero content + ** followed by zero content. But that only comes up for Blobs formed + ** by the OP_MakeRecord opcode, and such Blobs never get passed into + ** sqlite3MemCompare(). */ + assert( (pB1->flags & MEM_Zero)==0 || n1==0 ); + assert( (pB2->flags & MEM_Zero)==0 || n2==0 ); + + if( (pB1->flags|pB2->flags) & MEM_Zero ){ + if( pB1->flags & pB2->flags & MEM_Zero ){ + return pB1->u.nZero - pB2->u.nZero; + }else if( pB1->flags & MEM_Zero ){ + if( !isAllZero(pB2->z, pB2->n) ) return -1; + return pB1->u.nZero - n2; + }else{ + if( !isAllZero(pB1->z, pB1->n) ) return +1; + return n1 - pB2->u.nZero; + } + } + c = memcmp(pB1->z, pB2->z, n1>n2 ? n2 : n1); + if( c ) return c; + return n1 - n2; +} + +/* +** Do a comparison between a 64-bit signed integer and a 64-bit floating-point +** number. Return negative, zero, or positive if the first (i64) is less than, +** equal to, or greater than the second (double). +*/ +static int sqlite3IntFloatCompare(i64 i, double r){ + if( sizeof(LONGDOUBLE_TYPE)>8 ){ + LONGDOUBLE_TYPE x = (LONGDOUBLE_TYPE)i; + if( xr ) return +1; + return 0; + }else{ + i64 y; + double s; + if( r<-9223372036854775808.0 ) return +1; + if( r>9223372036854775807.0 ) return -1; + y = (i64)r; + if( iy ){ + if( y==SMALLEST_INT64 && r>0.0 ) return -1; + return +1; + } + s = (double)i; + if( sr ) return +1; + return 0; + } +} + +/* +** Compare the values contained by the two memory cells, returning +** negative, zero or positive if pMem1 is less than, equal to, or greater +** than pMem2. Sorting order is NULL's first, followed by numbers (integers +** and reals) sorted numerically, followed by text ordered by the collating +** sequence pColl and finally blob's ordered by memcmp(). +** +** Two NULL values are considered equal by this function. +*/ +SQLITE_PRIVATE int sqlite3MemCompare(const Mem *pMem1, const Mem *pMem2, const CollSeq *pColl){ + int f1, f2; + int combined_flags; + + f1 = pMem1->flags; + f2 = pMem2->flags; + combined_flags = f1|f2; + assert( (combined_flags & MEM_RowSet)==0 ); + + /* If one value is NULL, it is less than the other. If both values + ** are NULL, return 0. + */ + if( combined_flags&MEM_Null ){ + return (f2&MEM_Null) - (f1&MEM_Null); + } + + /* At least one of the two values is a number + */ + if( combined_flags&(MEM_Int|MEM_Real) ){ + if( (f1 & f2 & MEM_Int)!=0 ){ + if( pMem1->u.i < pMem2->u.i ) return -1; + if( pMem1->u.i > pMem2->u.i ) return +1; + return 0; + } + if( (f1 & f2 & MEM_Real)!=0 ){ + if( pMem1->u.r < pMem2->u.r ) return -1; + if( pMem1->u.r > pMem2->u.r ) return +1; + return 0; + } + if( (f1&MEM_Int)!=0 ){ + if( (f2&MEM_Real)!=0 ){ + return sqlite3IntFloatCompare(pMem1->u.i, pMem2->u.r); + }else{ + return -1; + } + } + if( (f1&MEM_Real)!=0 ){ + if( (f2&MEM_Int)!=0 ){ + return -sqlite3IntFloatCompare(pMem2->u.i, pMem1->u.r); + }else{ + return -1; + } + } + return +1; + } + + /* If one value is a string and the other is a blob, the string is less. + ** If both are strings, compare using the collating functions. + */ + if( combined_flags&MEM_Str ){ + if( (f1 & MEM_Str)==0 ){ + return 1; + } + if( (f2 & MEM_Str)==0 ){ + return -1; + } + + assert( pMem1->enc==pMem2->enc || pMem1->db->mallocFailed ); + assert( pMem1->enc==SQLITE_UTF8 || + pMem1->enc==SQLITE_UTF16LE || pMem1->enc==SQLITE_UTF16BE ); + + /* The collation sequence must be defined at this point, even if + ** the user deletes the collation sequence after the vdbe program is + ** compiled (this was not always the case). + */ + assert( !pColl || pColl->xCmp ); + + if( pColl ){ + return vdbeCompareMemString(pMem1, pMem2, pColl, 0); + } + /* If a NULL pointer was passed as the collate function, fall through + ** to the blob case and use memcmp(). */ + } + + /* Both values must be blobs. Compare using memcmp(). */ + return sqlite3BlobCompare(pMem1, pMem2); +} + + +/* +** The first argument passed to this function is a serial-type that +** corresponds to an integer - all values between 1 and 9 inclusive +** except 7. The second points to a buffer containing an integer value +** serialized according to serial_type. This function deserializes +** and returns the value. +*/ +static i64 vdbeRecordDecodeInt(u32 serial_type, const u8 *aKey){ + u32 y; + assert( CORRUPT_DB || (serial_type>=1 && serial_type<=9 && serial_type!=7) ); + switch( serial_type ){ + case 0: + case 1: + testcase( aKey[0]&0x80 ); + return ONE_BYTE_INT(aKey); + case 2: + testcase( aKey[0]&0x80 ); + return TWO_BYTE_INT(aKey); + case 3: + testcase( aKey[0]&0x80 ); + return THREE_BYTE_INT(aKey); + case 4: { + testcase( aKey[0]&0x80 ); + y = FOUR_BYTE_UINT(aKey); + return (i64)*(int*)&y; + } + case 5: { + testcase( aKey[0]&0x80 ); + return FOUR_BYTE_UINT(aKey+2) + (((i64)1)<<32)*TWO_BYTE_INT(aKey); + } + case 6: { + u64 x = FOUR_BYTE_UINT(aKey); + testcase( aKey[0]&0x80 ); + x = (x<<32) | FOUR_BYTE_UINT(aKey+4); + return (i64)*(i64*)&x; + } + } + + return (serial_type - 8); +} + +/* +** This function compares the two table rows or index records +** specified by {nKey1, pKey1} and pPKey2. It returns a negative, zero +** or positive integer if key1 is less than, equal to or +** greater than key2. The {nKey1, pKey1} key must be a blob +** created by the OP_MakeRecord opcode of the VDBE. The pPKey2 +** key must be a parsed key such as obtained from +** sqlite3VdbeParseRecord. +** +** If argument bSkip is non-zero, it is assumed that the caller has already +** determined that the first fields of the keys are equal. +** +** Key1 and Key2 do not have to contain the same number of fields. If all +** fields that appear in both keys are equal, then pPKey2->default_rc is +** returned. +** +** If database corruption is discovered, set pPKey2->errCode to +** SQLITE_CORRUPT and return 0. If an OOM error is encountered, +** pPKey2->errCode is set to SQLITE_NOMEM and, if it is not NULL, the +** malloc-failed flag set on database handle (pPKey2->pKeyInfo->db). +*/ +SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip( + int nKey1, const void *pKey1, /* Left key */ + UnpackedRecord *pPKey2, /* Right key */ + int bSkip /* If true, skip the first field */ +){ + u32 d1; /* Offset into aKey[] of next data element */ + int i; /* Index of next field to compare */ + u32 szHdr1; /* Size of record header in bytes */ + u32 idx1; /* Offset of first type in header */ + int rc = 0; /* Return value */ + Mem *pRhs = pPKey2->aMem; /* Next field of pPKey2 to compare */ + KeyInfo *pKeyInfo = pPKey2->pKeyInfo; + const unsigned char *aKey1 = (const unsigned char *)pKey1; + Mem mem1; + + /* If bSkip is true, then the caller has already determined that the first + ** two elements in the keys are equal. Fix the various stack variables so + ** that this routine begins comparing at the second field. */ + if( bSkip ){ + u32 s1; + idx1 = 1 + getVarint32(&aKey1[1], s1); + szHdr1 = aKey1[0]; + d1 = szHdr1 + sqlite3VdbeSerialTypeLen(s1); + i = 1; + pRhs++; + }else{ + idx1 = getVarint32(aKey1, szHdr1); + d1 = szHdr1; + if( d1>(unsigned)nKey1 ){ + pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; + return 0; /* Corruption */ + } + i = 0; + } + + VVA_ONLY( mem1.szMalloc = 0; ) /* Only needed by assert() statements */ + assert( pPKey2->pKeyInfo->nField+pPKey2->pKeyInfo->nXField>=pPKey2->nField + || CORRUPT_DB ); + assert( pPKey2->pKeyInfo->aSortOrder!=0 ); + assert( pPKey2->pKeyInfo->nField>0 ); + assert( idx1<=szHdr1 || CORRUPT_DB ); + do{ + u32 serial_type; + + /* RHS is an integer */ + if( pRhs->flags & MEM_Int ){ + serial_type = aKey1[idx1]; + testcase( serial_type==12 ); + if( serial_type>=10 ){ + rc = +1; + }else if( serial_type==0 ){ + rc = -1; + }else if( serial_type==7 ){ + sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); + rc = -sqlite3IntFloatCompare(pRhs->u.i, mem1.u.r); + }else{ + i64 lhs = vdbeRecordDecodeInt(serial_type, &aKey1[d1]); + i64 rhs = pRhs->u.i; + if( lhsrhs ){ + rc = +1; + } + } + } + + /* RHS is real */ + else if( pRhs->flags & MEM_Real ){ + serial_type = aKey1[idx1]; + if( serial_type>=10 ){ + /* Serial types 12 or greater are strings and blobs (greater than + ** numbers). Types 10 and 11 are currently "reserved for future + ** use", so it doesn't really matter what the results of comparing + ** them to numberic values are. */ + rc = +1; + }else if( serial_type==0 ){ + rc = -1; + }else{ + sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); + if( serial_type==7 ){ + if( mem1.u.ru.r ){ + rc = -1; + }else if( mem1.u.r>pRhs->u.r ){ + rc = +1; + } + }else{ + rc = sqlite3IntFloatCompare(mem1.u.i, pRhs->u.r); + } + } + } + + /* RHS is a string */ + else if( pRhs->flags & MEM_Str ){ + getVarint32(&aKey1[idx1], serial_type); + testcase( serial_type==12 ); + if( serial_type<12 ){ + rc = -1; + }else if( !(serial_type & 0x01) ){ + rc = +1; + }else{ + mem1.n = (serial_type - 12) / 2; + testcase( (d1+mem1.n)==(unsigned)nKey1 ); + testcase( (d1+mem1.n+1)==(unsigned)nKey1 ); + if( (d1+mem1.n) > (unsigned)nKey1 ){ + pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; + return 0; /* Corruption */ + }else if( pKeyInfo->aColl[i] ){ + mem1.enc = pKeyInfo->enc; + mem1.db = pKeyInfo->db; + mem1.flags = MEM_Str; + mem1.z = (char*)&aKey1[d1]; + rc = vdbeCompareMemString( + &mem1, pRhs, pKeyInfo->aColl[i], &pPKey2->errCode + ); + }else{ + int nCmp = MIN(mem1.n, pRhs->n); + rc = memcmp(&aKey1[d1], pRhs->z, nCmp); + if( rc==0 ) rc = mem1.n - pRhs->n; + } + } + } + + /* RHS is a blob */ + else if( pRhs->flags & MEM_Blob ){ + assert( (pRhs->flags & MEM_Zero)==0 || pRhs->n==0 ); + getVarint32(&aKey1[idx1], serial_type); + testcase( serial_type==12 ); + if( serial_type<12 || (serial_type & 0x01) ){ + rc = -1; + }else{ + int nStr = (serial_type - 12) / 2; + testcase( (d1+nStr)==(unsigned)nKey1 ); + testcase( (d1+nStr+1)==(unsigned)nKey1 ); + if( (d1+nStr) > (unsigned)nKey1 ){ + pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; + return 0; /* Corruption */ + }else if( pRhs->flags & MEM_Zero ){ + if( !isAllZero((const char*)&aKey1[d1],nStr) ){ + rc = 1; + }else{ + rc = nStr - pRhs->u.nZero; + } + }else{ + int nCmp = MIN(nStr, pRhs->n); + rc = memcmp(&aKey1[d1], pRhs->z, nCmp); + if( rc==0 ) rc = nStr - pRhs->n; + } + } + } + + /* RHS is null */ + else{ + serial_type = aKey1[idx1]; + rc = (serial_type!=0); + } + + if( rc!=0 ){ + if( pKeyInfo->aSortOrder[i] ){ + rc = -rc; + } + assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, rc) ); + assert( mem1.szMalloc==0 ); /* See comment below */ + return rc; + } + + i++; + pRhs++; + d1 += sqlite3VdbeSerialTypeLen(serial_type); + idx1 += sqlite3VarintLen(serial_type); + }while( idx1<(unsigned)szHdr1 && inField && d1<=(unsigned)nKey1 ); + + /* No memory allocation is ever used on mem1. Prove this using + ** the following assert(). If the assert() fails, it indicates a + ** memory leak and a need to call sqlite3VdbeMemRelease(&mem1). */ + assert( mem1.szMalloc==0 ); + + /* rc==0 here means that one or both of the keys ran out of fields and + ** all the fields up to that point were equal. Return the default_rc + ** value. */ + assert( CORRUPT_DB + || vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, pPKey2->default_rc) + || pKeyInfo->db->mallocFailed + ); + pPKey2->eqSeen = 1; + return pPKey2->default_rc; +} +SQLITE_PRIVATE int sqlite3VdbeRecordCompare( + int nKey1, const void *pKey1, /* Left key */ + UnpackedRecord *pPKey2 /* Right key */ +){ + return sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 0); +} + + +/* +** This function is an optimized version of sqlite3VdbeRecordCompare() +** that (a) the first field of pPKey2 is an integer, and (b) the +** size-of-header varint at the start of (pKey1/nKey1) fits in a single +** byte (i.e. is less than 128). +** +** To avoid concerns about buffer overreads, this routine is only used +** on schemas where the maximum valid header size is 63 bytes or less. +*/ +static int vdbeRecordCompareInt( + int nKey1, const void *pKey1, /* Left key */ + UnpackedRecord *pPKey2 /* Right key */ +){ + const u8 *aKey = &((const u8*)pKey1)[*(const u8*)pKey1 & 0x3F]; + int serial_type = ((const u8*)pKey1)[1]; + int res; + u32 y; + u64 x; + i64 v; + i64 lhs; + + vdbeAssertFieldCountWithinLimits(nKey1, pKey1, pPKey2->pKeyInfo); + assert( (*(u8*)pKey1)<=0x3F || CORRUPT_DB ); + switch( serial_type ){ + case 1: { /* 1-byte signed integer */ + lhs = ONE_BYTE_INT(aKey); + testcase( lhs<0 ); + break; + } + case 2: { /* 2-byte signed integer */ + lhs = TWO_BYTE_INT(aKey); + testcase( lhs<0 ); + break; + } + case 3: { /* 3-byte signed integer */ + lhs = THREE_BYTE_INT(aKey); + testcase( lhs<0 ); + break; + } + case 4: { /* 4-byte signed integer */ + y = FOUR_BYTE_UINT(aKey); + lhs = (i64)*(int*)&y; + testcase( lhs<0 ); + break; + } + case 5: { /* 6-byte signed integer */ + lhs = FOUR_BYTE_UINT(aKey+2) + (((i64)1)<<32)*TWO_BYTE_INT(aKey); + testcase( lhs<0 ); + break; + } + case 6: { /* 8-byte signed integer */ + x = FOUR_BYTE_UINT(aKey); + x = (x<<32) | FOUR_BYTE_UINT(aKey+4); + lhs = *(i64*)&x; + testcase( lhs<0 ); + break; + } + case 8: + lhs = 0; + break; + case 9: + lhs = 1; + break; + + /* This case could be removed without changing the results of running + ** this code. Including it causes gcc to generate a faster switch + ** statement (since the range of switch targets now starts at zero and + ** is contiguous) but does not cause any duplicate code to be generated + ** (as gcc is clever enough to combine the two like cases). Other + ** compilers might be similar. */ + case 0: case 7: + return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2); + + default: + return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2); + } + + v = pPKey2->aMem[0].u.i; + if( v>lhs ){ + res = pPKey2->r1; + }else if( vr2; + }else if( pPKey2->nField>1 ){ + /* The first fields of the two keys are equal. Compare the trailing + ** fields. */ + res = sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 1); + }else{ + /* The first fields of the two keys are equal and there are no trailing + ** fields. Return pPKey2->default_rc in this case. */ + res = pPKey2->default_rc; + pPKey2->eqSeen = 1; + } + + assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, res) ); + return res; +} + +/* +** This function is an optimized version of sqlite3VdbeRecordCompare() +** that (a) the first field of pPKey2 is a string, that (b) the first field +** uses the collation sequence BINARY and (c) that the size-of-header varint +** at the start of (pKey1/nKey1) fits in a single byte. +*/ +static int vdbeRecordCompareString( + int nKey1, const void *pKey1, /* Left key */ + UnpackedRecord *pPKey2 /* Right key */ +){ + const u8 *aKey1 = (const u8*)pKey1; + int serial_type; + int res; + + assert( pPKey2->aMem[0].flags & MEM_Str ); + vdbeAssertFieldCountWithinLimits(nKey1, pKey1, pPKey2->pKeyInfo); + getVarint32(&aKey1[1], serial_type); + if( serial_type<12 ){ + res = pPKey2->r1; /* (pKey1/nKey1) is a number or a null */ + }else if( !(serial_type & 0x01) ){ + res = pPKey2->r2; /* (pKey1/nKey1) is a blob */ + }else{ + int nCmp; + int nStr; + int szHdr = aKey1[0]; + + nStr = (serial_type-12) / 2; + if( (szHdr + nStr) > nKey1 ){ + pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; + return 0; /* Corruption */ + } + nCmp = MIN( pPKey2->aMem[0].n, nStr ); + res = memcmp(&aKey1[szHdr], pPKey2->aMem[0].z, nCmp); + + if( res==0 ){ + res = nStr - pPKey2->aMem[0].n; + if( res==0 ){ + if( pPKey2->nField>1 ){ + res = sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 1); + }else{ + res = pPKey2->default_rc; + pPKey2->eqSeen = 1; + } + }else if( res>0 ){ + res = pPKey2->r2; + }else{ + res = pPKey2->r1; + } + }else if( res>0 ){ + res = pPKey2->r2; + }else{ + res = pPKey2->r1; + } + } + + assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, res) + || CORRUPT_DB + || pPKey2->pKeyInfo->db->mallocFailed + ); + return res; +} + +/* +** Return a pointer to an sqlite3VdbeRecordCompare() compatible function +** suitable for comparing serialized records to the unpacked record passed +** as the only argument. +*/ +SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord *p){ + /* varintRecordCompareInt() and varintRecordCompareString() both assume + ** that the size-of-header varint that occurs at the start of each record + ** fits in a single byte (i.e. is 127 or less). varintRecordCompareInt() + ** also assumes that it is safe to overread a buffer by at least the + ** maximum possible legal header size plus 8 bytes. Because there is + ** guaranteed to be at least 74 (but not 136) bytes of padding following each + ** buffer passed to varintRecordCompareInt() this makes it convenient to + ** limit the size of the header to 64 bytes in cases where the first field + ** is an integer. + ** + ** The easiest way to enforce this limit is to consider only records with + ** 13 fields or less. If the first field is an integer, the maximum legal + ** header size is (12*5 + 1 + 1) bytes. */ + if( (p->pKeyInfo->nField + p->pKeyInfo->nXField)<=13 ){ + int flags = p->aMem[0].flags; + if( p->pKeyInfo->aSortOrder[0] ){ + p->r1 = 1; + p->r2 = -1; + }else{ + p->r1 = -1; + p->r2 = 1; + } + if( (flags & MEM_Int) ){ + return vdbeRecordCompareInt; + } + testcase( flags & MEM_Real ); + testcase( flags & MEM_Null ); + testcase( flags & MEM_Blob ); + if( (flags & (MEM_Real|MEM_Null|MEM_Blob))==0 && p->pKeyInfo->aColl[0]==0 ){ + assert( flags & MEM_Str ); + return vdbeRecordCompareString; + } + } + + return sqlite3VdbeRecordCompare; +} + +/* +** pCur points at an index entry created using the OP_MakeRecord opcode. +** Read the rowid (the last field in the record) and store it in *rowid. +** Return SQLITE_OK if everything works, or an error code otherwise. +** +** pCur might be pointing to text obtained from a corrupt database file. +** So the content cannot be trusted. Do appropriate checks on the content. +*/ +SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){ + i64 nCellKey = 0; + int rc; + u32 szHdr; /* Size of the header */ + u32 typeRowid; /* Serial type of the rowid */ + u32 lenRowid; /* Size of the rowid */ + Mem m, v; + + /* Get the size of the index entry. Only indices entries of less + ** than 2GiB are support - anything large must be database corruption. + ** Any corruption is detected in sqlite3BtreeParseCellPtr(), though, so + ** this code can safely assume that nCellKey is 32-bits + */ + assert( sqlite3BtreeCursorIsValid(pCur) ); + nCellKey = sqlite3BtreePayloadSize(pCur); + assert( (nCellKey & SQLITE_MAX_U32)==(u64)nCellKey ); + + /* Read in the complete content of the index entry */ + sqlite3VdbeMemInit(&m, db, 0); + rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, &m); + if( rc ){ + return rc; + } + + /* The index entry must begin with a header size */ + (void)getVarint32((u8*)m.z, szHdr); + testcase( szHdr==3 ); + testcase( szHdr==m.n ); + if( unlikely(szHdr<3 || (int)szHdr>m.n) ){ + goto idx_rowid_corruption; + } + + /* The last field of the index should be an integer - the ROWID. + ** Verify that the last entry really is an integer. */ + (void)getVarint32((u8*)&m.z[szHdr-1], typeRowid); + testcase( typeRowid==1 ); + testcase( typeRowid==2 ); + testcase( typeRowid==3 ); + testcase( typeRowid==4 ); + testcase( typeRowid==5 ); + testcase( typeRowid==6 ); + testcase( typeRowid==8 ); + testcase( typeRowid==9 ); + if( unlikely(typeRowid<1 || typeRowid>9 || typeRowid==7) ){ + goto idx_rowid_corruption; + } + lenRowid = sqlite3SmallTypeSizes[typeRowid]; + testcase( (u32)m.n==szHdr+lenRowid ); + if( unlikely((u32)m.neCurType==CURTYPE_BTREE ); + pCur = pC->uc.pCursor; + assert( sqlite3BtreeCursorIsValid(pCur) ); + nCellKey = sqlite3BtreePayloadSize(pCur); + /* nCellKey will always be between 0 and 0xffffffff because of the way + ** that btreeParseCellPtr() and sqlite3GetVarint32() are implemented */ + if( nCellKey<=0 || nCellKey>0x7fffffff ){ + *res = 0; + return SQLITE_CORRUPT_BKPT; + } + sqlite3VdbeMemInit(&m, db, 0); + rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, &m); + if( rc ){ + return rc; + } + *res = sqlite3VdbeRecordCompare(m.n, m.z, pUnpacked); + sqlite3VdbeMemRelease(&m); + return SQLITE_OK; +} + +/* +** This routine sets the value to be returned by subsequent calls to +** sqlite3_changes() on the database handle 'db'. +*/ +SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *db, int nChange){ + assert( sqlite3_mutex_held(db->mutex) ); + db->nChange = nChange; + db->nTotalChange += nChange; +} + +/* +** Set a flag in the vdbe to update the change counter when it is finalised +** or reset. +*/ +SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe *v){ + v->changeCntOn = 1; +} + +/* +** Mark every prepared statement associated with a database connection +** as expired. +** +** An expired statement means that recompilation of the statement is +** recommend. Statements expire when things happen that make their +** programs obsolete. Removing user-defined functions or collating +** sequences, or changing an authorization function are the types of +** things that make prepared statements obsolete. +*/ +SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3 *db){ + Vdbe *p; + for(p = db->pVdbe; p; p=p->pNext){ + p->expired = 1; + } +} + +/* +** Return the database associated with the Vdbe. +*/ +SQLITE_PRIVATE sqlite3 *sqlite3VdbeDb(Vdbe *v){ + return v->db; +} + +/* +** Return a pointer to an sqlite3_value structure containing the value bound +** parameter iVar of VM v. Except, if the value is an SQL NULL, return +** 0 instead. Unless it is NULL, apply affinity aff (one of the SQLITE_AFF_* +** constants) to the value before returning it. +** +** The returned value must be freed by the caller using sqlite3ValueFree(). +*/ +SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe *v, int iVar, u8 aff){ + assert( iVar>0 ); + if( v ){ + Mem *pMem = &v->aVar[iVar-1]; + if( 0==(pMem->flags & MEM_Null) ){ + sqlite3_value *pRet = sqlite3ValueNew(v->db); + if( pRet ){ + sqlite3VdbeMemCopy((Mem *)pRet, pMem); + sqlite3ValueApplyAffinity(pRet, aff, SQLITE_UTF8); + } + return pRet; + } + } + return 0; +} + +/* +** Configure SQL variable iVar so that binding a new value to it signals +** to sqlite3_reoptimize() that re-preparing the statement may result +** in a better query plan. +*/ +SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe *v, int iVar){ + assert( iVar>0 ); + if( iVar>32 ){ + v->expmask = 0xffffffff; + }else{ + v->expmask |= ((u32)1 << (iVar-1)); + } +} + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* +** Transfer error message text from an sqlite3_vtab.zErrMsg (text stored +** in memory obtained from sqlite3_malloc) into a Vdbe.zErrMsg (text stored +** in memory obtained from sqlite3DbMalloc). +*/ +SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe *p, sqlite3_vtab *pVtab){ + if( pVtab->zErrMsg ){ + sqlite3 *db = p->db; + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = sqlite3DbStrDup(db, pVtab->zErrMsg); + sqlite3_free(pVtab->zErrMsg); + pVtab->zErrMsg = 0; + } +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + +/* +** If the second argument is not NULL, release any allocations associated +** with the memory cells in the p->aMem[] array. Also free the UnpackedRecord +** structure itself, using sqlite3DbFree(). +** +** This function is used to free UnpackedRecord structures allocated by +** the vdbeUnpackRecord() function found in vdbeapi.c. +*/ +static void vdbeFreeUnpacked(sqlite3 *db, int nField, UnpackedRecord *p){ + if( p ){ + int i; + for(i=0; iaMem[i]; + if( pMem->zMalloc ) sqlite3VdbeMemRelease(pMem); + } + sqlite3DbFree(db, p); + } +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** Invoke the pre-update hook. If this is an UPDATE or DELETE pre-update call, +** then cursor passed as the second argument should point to the row about +** to be update or deleted. If the application calls sqlite3_preupdate_old(), +** the required value will be read from the row the cursor points to. +*/ +SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( + Vdbe *v, /* Vdbe pre-update hook is invoked by */ + VdbeCursor *pCsr, /* Cursor to grab old.* values from */ + int op, /* SQLITE_INSERT, UPDATE or DELETE */ + const char *zDb, /* Database name */ + Table *pTab, /* Modified table */ + i64 iKey1, /* Initial key value */ + int iReg /* Register for new.* record */ +){ + sqlite3 *db = v->db; + i64 iKey2; + PreUpdate preupdate; + const char *zTbl = pTab->zName; + static const u8 fakeSortOrder = 0; + + assert( db->pPreUpdate==0 ); + memset(&preupdate, 0, sizeof(PreUpdate)); + if( HasRowid(pTab)==0 ){ + iKey1 = iKey2 = 0; + preupdate.pPk = sqlite3PrimaryKeyIndex(pTab); + }else{ + if( op==SQLITE_UPDATE ){ + iKey2 = v->aMem[iReg].u.i; + }else{ + iKey2 = iKey1; + } + } + + assert( pCsr->nField==pTab->nCol + || (pCsr->nField==pTab->nCol+1 && op==SQLITE_DELETE && iReg==-1) + ); + + preupdate.v = v; + preupdate.pCsr = pCsr; + preupdate.op = op; + preupdate.iNewReg = iReg; + preupdate.keyinfo.db = db; + preupdate.keyinfo.enc = ENC(db); + preupdate.keyinfo.nField = pTab->nCol; + preupdate.keyinfo.aSortOrder = (u8*)&fakeSortOrder; + preupdate.iKey1 = iKey1; + preupdate.iKey2 = iKey2; + preupdate.pTab = pTab; + + db->pPreUpdate = &preupdate; + db->xPreUpdateCallback(db->pPreUpdateArg, db, op, zDb, zTbl, iKey1, iKey2); + db->pPreUpdate = 0; + sqlite3DbFree(db, preupdate.aRecord); + vdbeFreeUnpacked(db, preupdate.keyinfo.nField+1, preupdate.pUnpacked); + vdbeFreeUnpacked(db, preupdate.keyinfo.nField+1, preupdate.pNewUnpacked); + if( preupdate.aNew ){ + int i; + for(i=0; inField; i++){ + sqlite3VdbeMemRelease(&preupdate.aNew[i]); + } + sqlite3DbFree(db, preupdate.aNew); + } +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + +/************** End of vdbeaux.c *********************************************/ +/************** Begin file vdbeapi.c *****************************************/ +/* +** 2004 May 26 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains code use to implement APIs that are part of the +** VDBE. +*/ +/* #include "sqliteInt.h" */ +/* #include "vdbeInt.h" */ + +#ifndef SQLITE_OMIT_DEPRECATED +/* +** Return TRUE (non-zero) of the statement supplied as an argument needs +** to be recompiled. A statement needs to be recompiled whenever the +** execution environment changes in a way that would alter the program +** that sqlite3_prepare() generates. For example, if new functions or +** collating sequences are registered or if an authorizer function is +** added or changed. +*/ +SQLITE_API int sqlite3_expired(sqlite3_stmt *pStmt){ + Vdbe *p = (Vdbe*)pStmt; + return p==0 || p->expired; +} +#endif + +/* +** Check on a Vdbe to make sure it has not been finalized. Log +** an error and return true if it has been finalized (or is otherwise +** invalid). Return false if it is ok. +*/ +static int vdbeSafety(Vdbe *p){ + if( p->db==0 ){ + sqlite3_log(SQLITE_MISUSE, "API called with finalized prepared statement"); + return 1; + }else{ + return 0; + } +} +static int vdbeSafetyNotNull(Vdbe *p){ + if( p==0 ){ + sqlite3_log(SQLITE_MISUSE, "API called with NULL prepared statement"); + return 1; + }else{ + return vdbeSafety(p); + } +} + +#ifndef SQLITE_OMIT_TRACE +/* +** Invoke the profile callback. This routine is only called if we already +** know that the profile callback is defined and needs to be invoked. +*/ +static SQLITE_NOINLINE void invokeProfileCallback(sqlite3 *db, Vdbe *p){ + sqlite3_int64 iNow; + sqlite3_int64 iElapse; + assert( p->startTime>0 ); + assert( db->xProfile!=0 || (db->mTrace & SQLITE_TRACE_PROFILE)!=0 ); + assert( db->init.busy==0 ); + assert( p->zSql!=0 ); + sqlite3OsCurrentTimeInt64(db->pVfs, &iNow); + iElapse = (iNow - p->startTime)*1000000; + if( db->xProfile ){ + db->xProfile(db->pProfileArg, p->zSql, iElapse); + } + if( db->mTrace & SQLITE_TRACE_PROFILE ){ + db->xTrace(SQLITE_TRACE_PROFILE, db->pTraceArg, p, (void*)&iElapse); + } + p->startTime = 0; +} +/* +** The checkProfileCallback(DB,P) macro checks to see if a profile callback +** is needed, and it invokes the callback if it is needed. +*/ +# define checkProfileCallback(DB,P) \ + if( ((P)->startTime)>0 ){ invokeProfileCallback(DB,P); } +#else +# define checkProfileCallback(DB,P) /*no-op*/ +#endif + +/* +** The following routine destroys a virtual machine that is created by +** the sqlite3_compile() routine. The integer returned is an SQLITE_ +** success/failure code that describes the result of executing the virtual +** machine. +** +** This routine sets the error code and string returned by +** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16(). +*/ +SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt){ + int rc; + if( pStmt==0 ){ + /* IMPLEMENTATION-OF: R-57228-12904 Invoking sqlite3_finalize() on a NULL + ** pointer is a harmless no-op. */ + rc = SQLITE_OK; + }else{ + Vdbe *v = (Vdbe*)pStmt; + sqlite3 *db = v->db; + if( vdbeSafety(v) ) return SQLITE_MISUSE_BKPT; + sqlite3_mutex_enter(db->mutex); + checkProfileCallback(db, v); + rc = sqlite3VdbeFinalize(v); + rc = sqlite3ApiExit(db, rc); + sqlite3LeaveMutexAndCloseZombie(db); + } + return rc; +} + +/* +** Terminate the current execution of an SQL statement and reset it +** back to its starting state so that it can be reused. A success code from +** the prior execution is returned. +** +** This routine sets the error code and string returned by +** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16(). +*/ +SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt){ + int rc; + if( pStmt==0 ){ + rc = SQLITE_OK; + }else{ + Vdbe *v = (Vdbe*)pStmt; + sqlite3 *db = v->db; + sqlite3_mutex_enter(db->mutex); + checkProfileCallback(db, v); + rc = sqlite3VdbeReset(v); + sqlite3VdbeRewind(v); + assert( (rc & (db->errMask))==rc ); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + } + return rc; +} + +/* +** Set all the parameters in the compiled SQL statement to NULL. +*/ +SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt *pStmt){ + int i; + int rc = SQLITE_OK; + Vdbe *p = (Vdbe*)pStmt; +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex = ((Vdbe*)pStmt)->db->mutex; +#endif + sqlite3_mutex_enter(mutex); + for(i=0; inVar; i++){ + sqlite3VdbeMemRelease(&p->aVar[i]); + p->aVar[i].flags = MEM_Null; + } + if( p->isPrepareV2 && p->expmask ){ + p->expired = 1; + } + sqlite3_mutex_leave(mutex); + return rc; +} + + +/**************************** sqlite3_value_ ******************************* +** The following routines extract information from a Mem or sqlite3_value +** structure. +*/ +SQLITE_API const void *sqlite3_value_blob(sqlite3_value *pVal){ + Mem *p = (Mem*)pVal; + if( p->flags & (MEM_Blob|MEM_Str) ){ + if( ExpandBlob(p)!=SQLITE_OK ){ + assert( p->flags==MEM_Null && p->z==0 ); + return 0; + } + p->flags |= MEM_Blob; + return p->n ? p->z : 0; + }else{ + return sqlite3_value_text(pVal); + } +} +SQLITE_API int sqlite3_value_bytes(sqlite3_value *pVal){ + return sqlite3ValueBytes(pVal, SQLITE_UTF8); +} +SQLITE_API int sqlite3_value_bytes16(sqlite3_value *pVal){ + return sqlite3ValueBytes(pVal, SQLITE_UTF16NATIVE); +} +SQLITE_API double sqlite3_value_double(sqlite3_value *pVal){ + return sqlite3VdbeRealValue((Mem*)pVal); +} +SQLITE_API int sqlite3_value_int(sqlite3_value *pVal){ + return (int)sqlite3VdbeIntValue((Mem*)pVal); +} +SQLITE_API sqlite_int64 sqlite3_value_int64(sqlite3_value *pVal){ + return sqlite3VdbeIntValue((Mem*)pVal); +} +SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value *pVal){ + Mem *pMem = (Mem*)pVal; + return ((pMem->flags & MEM_Subtype) ? pMem->eSubtype : 0); +} +SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value *pVal){ + return (const unsigned char *)sqlite3ValueText(pVal, SQLITE_UTF8); +} +#ifndef SQLITE_OMIT_UTF16 +SQLITE_API const void *sqlite3_value_text16(sqlite3_value* pVal){ + return sqlite3ValueText(pVal, SQLITE_UTF16NATIVE); +} +SQLITE_API const void *sqlite3_value_text16be(sqlite3_value *pVal){ + return sqlite3ValueText(pVal, SQLITE_UTF16BE); +} +SQLITE_API const void *sqlite3_value_text16le(sqlite3_value *pVal){ + return sqlite3ValueText(pVal, SQLITE_UTF16LE); +} +#endif /* SQLITE_OMIT_UTF16 */ +/* EVIDENCE-OF: R-12793-43283 Every value in SQLite has one of five +** fundamental datatypes: 64-bit signed integer 64-bit IEEE floating +** point number string BLOB NULL +*/ +SQLITE_API int sqlite3_value_type(sqlite3_value* pVal){ + static const u8 aType[] = { + SQLITE_BLOB, /* 0x00 */ + SQLITE_NULL, /* 0x01 */ + SQLITE_TEXT, /* 0x02 */ + SQLITE_NULL, /* 0x03 */ + SQLITE_INTEGER, /* 0x04 */ + SQLITE_NULL, /* 0x05 */ + SQLITE_INTEGER, /* 0x06 */ + SQLITE_NULL, /* 0x07 */ + SQLITE_FLOAT, /* 0x08 */ + SQLITE_NULL, /* 0x09 */ + SQLITE_FLOAT, /* 0x0a */ + SQLITE_NULL, /* 0x0b */ + SQLITE_INTEGER, /* 0x0c */ + SQLITE_NULL, /* 0x0d */ + SQLITE_INTEGER, /* 0x0e */ + SQLITE_NULL, /* 0x0f */ + SQLITE_BLOB, /* 0x10 */ + SQLITE_NULL, /* 0x11 */ + SQLITE_TEXT, /* 0x12 */ + SQLITE_NULL, /* 0x13 */ + SQLITE_INTEGER, /* 0x14 */ + SQLITE_NULL, /* 0x15 */ + SQLITE_INTEGER, /* 0x16 */ + SQLITE_NULL, /* 0x17 */ + SQLITE_FLOAT, /* 0x18 */ + SQLITE_NULL, /* 0x19 */ + SQLITE_FLOAT, /* 0x1a */ + SQLITE_NULL, /* 0x1b */ + SQLITE_INTEGER, /* 0x1c */ + SQLITE_NULL, /* 0x1d */ + SQLITE_INTEGER, /* 0x1e */ + SQLITE_NULL, /* 0x1f */ + }; + return aType[pVal->flags&MEM_AffMask]; +} + +/* Make a copy of an sqlite3_value object +*/ +SQLITE_API sqlite3_value *sqlite3_value_dup(const sqlite3_value *pOrig){ + sqlite3_value *pNew; + if( pOrig==0 ) return 0; + pNew = sqlite3_malloc( sizeof(*pNew) ); + if( pNew==0 ) return 0; + memset(pNew, 0, sizeof(*pNew)); + memcpy(pNew, pOrig, MEMCELLSIZE); + pNew->flags &= ~MEM_Dyn; + pNew->db = 0; + if( pNew->flags&(MEM_Str|MEM_Blob) ){ + pNew->flags &= ~(MEM_Static|MEM_Dyn); + pNew->flags |= MEM_Ephem; + if( sqlite3VdbeMemMakeWriteable(pNew)!=SQLITE_OK ){ + sqlite3ValueFree(pNew); + pNew = 0; + } + } + return pNew; +} + +/* Destroy an sqlite3_value object previously obtained from +** sqlite3_value_dup(). +*/ +SQLITE_API void sqlite3_value_free(sqlite3_value *pOld){ + sqlite3ValueFree(pOld); +} + + +/**************************** sqlite3_result_ ******************************* +** The following routines are used by user-defined functions to specify +** the function result. +** +** The setStrOrError() function calls sqlite3VdbeMemSetStr() to store the +** result as a string or blob but if the string or blob is too large, it +** then sets the error code to SQLITE_TOOBIG +** +** The invokeValueDestructor(P,X) routine invokes destructor function X() +** on value P is not going to be used and need to be destroyed. +*/ +static void setResultStrOrError( + sqlite3_context *pCtx, /* Function context */ + const char *z, /* String pointer */ + int n, /* Bytes in string, or negative */ + u8 enc, /* Encoding of z. 0 for BLOBs */ + void (*xDel)(void*) /* Destructor function */ +){ + if( sqlite3VdbeMemSetStr(pCtx->pOut, z, n, enc, xDel)==SQLITE_TOOBIG ){ + sqlite3_result_error_toobig(pCtx); + } +} +static int invokeValueDestructor( + const void *p, /* Value to destroy */ + void (*xDel)(void*), /* The destructor */ + sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if no NULL */ +){ + assert( xDel!=SQLITE_DYNAMIC ); + if( xDel==0 ){ + /* noop */ + }else if( xDel==SQLITE_TRANSIENT ){ + /* noop */ + }else{ + xDel((void*)p); + } + if( pCtx ) sqlite3_result_error_toobig(pCtx); + return SQLITE_TOOBIG; +} +SQLITE_API void sqlite3_result_blob( + sqlite3_context *pCtx, + const void *z, + int n, + void (*xDel)(void *) +){ + assert( n>=0 ); + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + setResultStrOrError(pCtx, z, n, 0, xDel); +} +SQLITE_API void sqlite3_result_blob64( + sqlite3_context *pCtx, + const void *z, + sqlite3_uint64 n, + void (*xDel)(void *) +){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + assert( xDel!=SQLITE_DYNAMIC ); + if( n>0x7fffffff ){ + (void)invokeValueDestructor(z, xDel, pCtx); + }else{ + setResultStrOrError(pCtx, z, (int)n, 0, xDel); + } +} +SQLITE_API void sqlite3_result_double(sqlite3_context *pCtx, double rVal){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemSetDouble(pCtx->pOut, rVal); +} +SQLITE_API void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + pCtx->isError = SQLITE_ERROR; + pCtx->fErrorOrAux = 1; + sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF8, SQLITE_TRANSIENT); +} +#ifndef SQLITE_OMIT_UTF16 +SQLITE_API void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + pCtx->isError = SQLITE_ERROR; + pCtx->fErrorOrAux = 1; + sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT); +} +#endif +SQLITE_API void sqlite3_result_int(sqlite3_context *pCtx, int iVal){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemSetInt64(pCtx->pOut, (i64)iVal); +} +SQLITE_API void sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemSetInt64(pCtx->pOut, iVal); +} +SQLITE_API void sqlite3_result_null(sqlite3_context *pCtx){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemSetNull(pCtx->pOut); +} +SQLITE_API void sqlite3_result_subtype(sqlite3_context *pCtx, unsigned int eSubtype){ + Mem *pOut = pCtx->pOut; + assert( sqlite3_mutex_held(pOut->db->mutex) ); + pOut->eSubtype = eSubtype & 0xff; + pOut->flags |= MEM_Subtype; +} +SQLITE_API void sqlite3_result_text( + sqlite3_context *pCtx, + const char *z, + int n, + void (*xDel)(void *) +){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel); +} +SQLITE_API void sqlite3_result_text64( + sqlite3_context *pCtx, + const char *z, + sqlite3_uint64 n, + void (*xDel)(void *), + unsigned char enc +){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + assert( xDel!=SQLITE_DYNAMIC ); + if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; + if( n>0x7fffffff ){ + (void)invokeValueDestructor(z, xDel, pCtx); + }else{ + setResultStrOrError(pCtx, z, (int)n, enc, xDel); + } +} +#ifndef SQLITE_OMIT_UTF16 +SQLITE_API void sqlite3_result_text16( + sqlite3_context *pCtx, + const void *z, + int n, + void (*xDel)(void *) +){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + setResultStrOrError(pCtx, z, n, SQLITE_UTF16NATIVE, xDel); +} +SQLITE_API void sqlite3_result_text16be( + sqlite3_context *pCtx, + const void *z, + int n, + void (*xDel)(void *) +){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + setResultStrOrError(pCtx, z, n, SQLITE_UTF16BE, xDel); +} +SQLITE_API void sqlite3_result_text16le( + sqlite3_context *pCtx, + const void *z, + int n, + void (*xDel)(void *) +){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + setResultStrOrError(pCtx, z, n, SQLITE_UTF16LE, xDel); +} +#endif /* SQLITE_OMIT_UTF16 */ +SQLITE_API void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemCopy(pCtx->pOut, pValue); +} +SQLITE_API void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemSetZeroBlob(pCtx->pOut, n); +} +SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){ + Mem *pOut = pCtx->pOut; + assert( sqlite3_mutex_held(pOut->db->mutex) ); + if( n>(u64)pOut->db->aLimit[SQLITE_LIMIT_LENGTH] ){ + return SQLITE_TOOBIG; + } + sqlite3VdbeMemSetZeroBlob(pCtx->pOut, (int)n); + return SQLITE_OK; +} +SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){ + pCtx->isError = errCode; + pCtx->fErrorOrAux = 1; +#ifdef SQLITE_DEBUG + if( pCtx->pVdbe ) pCtx->pVdbe->rcApp = errCode; +#endif + if( pCtx->pOut->flags & MEM_Null ){ + sqlite3VdbeMemSetStr(pCtx->pOut, sqlite3ErrStr(errCode), -1, + SQLITE_UTF8, SQLITE_STATIC); + } +} + +/* Force an SQLITE_TOOBIG error. */ +SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + pCtx->isError = SQLITE_TOOBIG; + pCtx->fErrorOrAux = 1; + sqlite3VdbeMemSetStr(pCtx->pOut, "string or blob too big", -1, + SQLITE_UTF8, SQLITE_STATIC); +} + +/* An SQLITE_NOMEM error. */ +SQLITE_API void sqlite3_result_error_nomem(sqlite3_context *pCtx){ + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + sqlite3VdbeMemSetNull(pCtx->pOut); + pCtx->isError = SQLITE_NOMEM_BKPT; + pCtx->fErrorOrAux = 1; + sqlite3OomFault(pCtx->pOut->db); +} + +/* +** This function is called after a transaction has been committed. It +** invokes callbacks registered with sqlite3_wal_hook() as required. +*/ +static int doWalCallbacks(sqlite3 *db){ + int rc = SQLITE_OK; +#ifndef SQLITE_OMIT_WAL + int i; + for(i=0; inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + int nEntry; + sqlite3BtreeEnter(pBt); + nEntry = sqlite3PagerWalCallback(sqlite3BtreePager(pBt)); + sqlite3BtreeLeave(pBt); + if( db->xWalCallback && nEntry>0 && rc==SQLITE_OK ){ + rc = db->xWalCallback(db->pWalArg, db, db->aDb[i].zDbSName, nEntry); + } + } + } +#endif + return rc; +} + + +/* +** Execute the statement pStmt, either until a row of data is ready, the +** statement is completely executed or an error occurs. +** +** This routine implements the bulk of the logic behind the sqlite_step() +** API. The only thing omitted is the automatic recompile if a +** schema change has occurred. That detail is handled by the +** outer sqlite3_step() wrapper procedure. +*/ +static int sqlite3Step(Vdbe *p){ + sqlite3 *db; + int rc; + + assert(p); + if( p->magic!=VDBE_MAGIC_RUN ){ + /* We used to require that sqlite3_reset() be called before retrying + ** sqlite3_step() after any error or after SQLITE_DONE. But beginning + ** with version 3.7.0, we changed this so that sqlite3_reset() would + ** be called automatically instead of throwing the SQLITE_MISUSE error. + ** This "automatic-reset" change is not technically an incompatibility, + ** since any application that receives an SQLITE_MISUSE is broken by + ** definition. + ** + ** Nevertheless, some published applications that were originally written + ** for version 3.6.23 or earlier do in fact depend on SQLITE_MISUSE + ** returns, and those were broken by the automatic-reset change. As a + ** a work-around, the SQLITE_OMIT_AUTORESET compile-time restores the + ** legacy behavior of returning SQLITE_MISUSE for cases where the + ** previous sqlite3_step() returned something other than a SQLITE_LOCKED + ** or SQLITE_BUSY error. + */ +#ifdef SQLITE_OMIT_AUTORESET + if( (rc = p->rc&0xff)==SQLITE_BUSY || rc==SQLITE_LOCKED ){ + sqlite3_reset((sqlite3_stmt*)p); + }else{ + return SQLITE_MISUSE_BKPT; + } +#else + sqlite3_reset((sqlite3_stmt*)p); +#endif + } + + /* Check that malloc() has not failed. If it has, return early. */ + db = p->db; + if( db->mallocFailed ){ + p->rc = SQLITE_NOMEM; + return SQLITE_NOMEM_BKPT; + } + + if( p->pc<=0 && p->expired ){ + p->rc = SQLITE_SCHEMA; + rc = SQLITE_ERROR; + goto end_of_step; + } + if( p->pc<0 ){ + /* If there are no other statements currently running, then + ** reset the interrupt flag. This prevents a call to sqlite3_interrupt + ** from interrupting a statement that has not yet started. + */ + if( db->nVdbeActive==0 ){ + db->u1.isInterrupted = 0; + } + + assert( db->nVdbeWrite>0 || db->autoCommit==0 + || (db->nDeferredCons==0 && db->nDeferredImmCons==0) + ); + +#ifndef SQLITE_OMIT_TRACE + if( (db->xProfile || (db->mTrace & SQLITE_TRACE_PROFILE)!=0) + && !db->init.busy && p->zSql ){ + sqlite3OsCurrentTimeInt64(db->pVfs, &p->startTime); + }else{ + assert( p->startTime==0 ); + } +#endif + + db->nVdbeActive++; + if( p->readOnly==0 ) db->nVdbeWrite++; + if( p->bIsReader ) db->nVdbeRead++; + p->pc = 0; + } +#ifdef SQLITE_DEBUG + p->rcApp = SQLITE_OK; +#endif +#ifndef SQLITE_OMIT_EXPLAIN + if( p->explain ){ + rc = sqlite3VdbeList(p); + }else +#endif /* SQLITE_OMIT_EXPLAIN */ + { + db->nVdbeExec++; + rc = sqlite3VdbeExec(p); + db->nVdbeExec--; + } + +#ifndef SQLITE_OMIT_TRACE + /* If the statement completed successfully, invoke the profile callback */ + if( rc!=SQLITE_ROW ) checkProfileCallback(db, p); +#endif + + if( rc==SQLITE_DONE ){ + assert( p->rc==SQLITE_OK ); + p->rc = doWalCallbacks(db); + if( p->rc!=SQLITE_OK ){ + rc = SQLITE_ERROR; + } + } + + db->errCode = rc; + if( SQLITE_NOMEM==sqlite3ApiExit(p->db, p->rc) ){ + p->rc = SQLITE_NOMEM_BKPT; + } +end_of_step: + /* At this point local variable rc holds the value that should be + ** returned if this statement was compiled using the legacy + ** sqlite3_prepare() interface. According to the docs, this can only + ** be one of the values in the first assert() below. Variable p->rc + ** contains the value that would be returned if sqlite3_finalize() + ** were called on statement p. + */ + assert( rc==SQLITE_ROW || rc==SQLITE_DONE || rc==SQLITE_ERROR + || (rc&0xff)==SQLITE_BUSY || rc==SQLITE_MISUSE + ); + assert( (p->rc!=SQLITE_ROW && p->rc!=SQLITE_DONE) || p->rc==p->rcApp ); + if( p->isPrepareV2 && rc!=SQLITE_ROW && rc!=SQLITE_DONE ){ + /* If this statement was prepared using sqlite3_prepare_v2(), and an + ** error has occurred, then return the error code in p->rc to the + ** caller. Set the error code in the database handle to the same value. + */ + rc = sqlite3VdbeTransferError(p); + } + return (rc&db->errMask); +} + +/* +** This is the top-level implementation of sqlite3_step(). Call +** sqlite3Step() to do most of the work. If a schema error occurs, +** call sqlite3Reprepare() and try again. +*/ +SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){ + int rc = SQLITE_OK; /* Result from sqlite3Step() */ + int rc2 = SQLITE_OK; /* Result from sqlite3Reprepare() */ + Vdbe *v = (Vdbe*)pStmt; /* the prepared statement */ + int cnt = 0; /* Counter to prevent infinite loop of reprepares */ + sqlite3 *db; /* The database connection */ + + if( vdbeSafetyNotNull(v) ){ + return SQLITE_MISUSE_BKPT; + } + db = v->db; + sqlite3_mutex_enter(db->mutex); + v->doingRerun = 0; + while( (rc = sqlite3Step(v))==SQLITE_SCHEMA + && cnt++ < SQLITE_MAX_SCHEMA_RETRY ){ + int savedPc = v->pc; + rc2 = rc = sqlite3Reprepare(v); + if( rc!=SQLITE_OK) break; + sqlite3_reset(pStmt); + if( savedPc>=0 ) v->doingRerun = 1; + assert( v->expired==0 ); + } + if( rc2!=SQLITE_OK ){ + /* This case occurs after failing to recompile an sql statement. + ** The error message from the SQL compiler has already been loaded + ** into the database handle. This block copies the error message + ** from the database handle into the statement and sets the statement + ** program counter to 0 to ensure that when the statement is + ** finalized or reset the parser error message is available via + ** sqlite3_errmsg() and sqlite3_errcode(). + */ + const char *zErr = (const char *)sqlite3_value_text(db->pErr); + sqlite3DbFree(db, v->zErrMsg); + if( !db->mallocFailed ){ + v->zErrMsg = sqlite3DbStrDup(db, zErr); + v->rc = rc2; + } else { + v->zErrMsg = 0; + v->rc = rc = SQLITE_NOMEM_BKPT; + } + } + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + + +/* +** Extract the user data from a sqlite3_context structure and return a +** pointer to it. +*/ +SQLITE_API void *sqlite3_user_data(sqlite3_context *p){ + assert( p && p->pFunc ); + return p->pFunc->pUserData; +} + +/* +** Extract the user data from a sqlite3_context structure and return a +** pointer to it. +** +** IMPLEMENTATION-OF: R-46798-50301 The sqlite3_context_db_handle() interface +** returns a copy of the pointer to the database connection (the 1st +** parameter) of the sqlite3_create_function() and +** sqlite3_create_function16() routines that originally registered the +** application defined function. +*/ +SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){ + assert( p && p->pOut ); + return p->pOut->db; +} + +/* +** Return the current time for a statement. If the current time +** is requested more than once within the same run of a single prepared +** statement, the exact same time is returned for each invocation regardless +** of the amount of time that elapses between invocations. In other words, +** the time returned is always the time of the first call. +*/ +SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context *p){ + int rc; +#ifndef SQLITE_ENABLE_STAT3_OR_STAT4 + sqlite3_int64 *piTime = &p->pVdbe->iCurrentTime; + assert( p->pVdbe!=0 ); +#else + sqlite3_int64 iTime = 0; + sqlite3_int64 *piTime = p->pVdbe!=0 ? &p->pVdbe->iCurrentTime : &iTime; +#endif + if( *piTime==0 ){ + rc = sqlite3OsCurrentTimeInt64(p->pOut->db->pVfs, piTime); + if( rc ) *piTime = 0; + } + return *piTime; +} + +/* +** The following is the implementation of an SQL function that always +** fails with an error message stating that the function is used in the +** wrong context. The sqlite3_overload_function() API might construct +** SQL function that use this routine so that the functions will exist +** for name resolution but are actually overloaded by the xFindFunction +** method of virtual tables. +*/ +SQLITE_PRIVATE void sqlite3InvalidFunction( + sqlite3_context *context, /* The function calling context */ + int NotUsed, /* Number of arguments to the function */ + sqlite3_value **NotUsed2 /* Value of each argument */ +){ + const char *zName = context->pFunc->zName; + char *zErr; + UNUSED_PARAMETER2(NotUsed, NotUsed2); + zErr = sqlite3_mprintf( + "unable to use function %s in the requested context", zName); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); +} + +/* +** Create a new aggregate context for p and return a pointer to +** its pMem->z element. +*/ +static SQLITE_NOINLINE void *createAggContext(sqlite3_context *p, int nByte){ + Mem *pMem = p->pMem; + assert( (pMem->flags & MEM_Agg)==0 ); + if( nByte<=0 ){ + sqlite3VdbeMemSetNull(pMem); + pMem->z = 0; + }else{ + sqlite3VdbeMemClearAndResize(pMem, nByte); + pMem->flags = MEM_Agg; + pMem->u.pDef = p->pFunc; + if( pMem->z ){ + memset(pMem->z, 0, nByte); + } + } + return (void*)pMem->z; +} + +/* +** Allocate or return the aggregate context for a user function. A new +** context is allocated on the first call. Subsequent calls return the +** same context that was returned on prior calls. +*/ +SQLITE_API void *sqlite3_aggregate_context(sqlite3_context *p, int nByte){ + assert( p && p->pFunc && p->pFunc->xFinalize ); + assert( sqlite3_mutex_held(p->pOut->db->mutex) ); + testcase( nByte<0 ); + if( (p->pMem->flags & MEM_Agg)==0 ){ + return createAggContext(p, nByte); + }else{ + return (void*)p->pMem->z; + } +} + +/* +** Return the auxiliary data pointer, if any, for the iArg'th argument to +** the user-function defined by pCtx. +*/ +SQLITE_API void *sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){ + AuxData *pAuxData; + + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); +#if SQLITE_ENABLE_STAT3_OR_STAT4 + if( pCtx->pVdbe==0 ) return 0; +#else + assert( pCtx->pVdbe!=0 ); +#endif + for(pAuxData=pCtx->pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNext){ + if( pAuxData->iOp==pCtx->iOp && pAuxData->iArg==iArg ) break; + } + + return (pAuxData ? pAuxData->pAux : 0); +} + +/* +** Set the auxiliary data pointer and delete function, for the iArg'th +** argument to the user-function defined by pCtx. Any previous value is +** deleted by calling the delete function specified when it was set. +*/ +SQLITE_API void sqlite3_set_auxdata( + sqlite3_context *pCtx, + int iArg, + void *pAux, + void (*xDelete)(void*) +){ + AuxData *pAuxData; + Vdbe *pVdbe = pCtx->pVdbe; + + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); + if( iArg<0 ) goto failed; +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + if( pVdbe==0 ) goto failed; +#else + assert( pVdbe!=0 ); +#endif + + for(pAuxData=pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNext){ + if( pAuxData->iOp==pCtx->iOp && pAuxData->iArg==iArg ) break; + } + if( pAuxData==0 ){ + pAuxData = sqlite3DbMallocZero(pVdbe->db, sizeof(AuxData)); + if( !pAuxData ) goto failed; + pAuxData->iOp = pCtx->iOp; + pAuxData->iArg = iArg; + pAuxData->pNext = pVdbe->pAuxData; + pVdbe->pAuxData = pAuxData; + if( pCtx->fErrorOrAux==0 ){ + pCtx->isError = 0; + pCtx->fErrorOrAux = 1; + } + }else if( pAuxData->xDelete ){ + pAuxData->xDelete(pAuxData->pAux); + } + + pAuxData->pAux = pAux; + pAuxData->xDelete = xDelete; + return; + +failed: + if( xDelete ){ + xDelete(pAux); + } +} + +#ifndef SQLITE_OMIT_DEPRECATED +/* +** Return the number of times the Step function of an aggregate has been +** called. +** +** This function is deprecated. Do not use it for new code. It is +** provide only to avoid breaking legacy code. New aggregate function +** implementations should keep their own counts within their aggregate +** context. +*/ +SQLITE_API int sqlite3_aggregate_count(sqlite3_context *p){ + assert( p && p->pMem && p->pFunc && p->pFunc->xFinalize ); + return p->pMem->n; +} +#endif + +/* +** Return the number of columns in the result set for the statement pStmt. +*/ +SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt){ + Vdbe *pVm = (Vdbe *)pStmt; + return pVm ? pVm->nResColumn : 0; +} + +/* +** Return the number of values available from the current row of the +** currently executing statement pStmt. +*/ +SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt){ + Vdbe *pVm = (Vdbe *)pStmt; + if( pVm==0 || pVm->pResultSet==0 ) return 0; + return pVm->nResColumn; +} + +/* +** Return a pointer to static memory containing an SQL NULL value. +*/ +static const Mem *columnNullValue(void){ + /* Even though the Mem structure contains an element + ** of type i64, on certain architectures (x86) with certain compiler + ** switches (-Os), gcc may align this Mem object on a 4-byte boundary + ** instead of an 8-byte one. This all works fine, except that when + ** running with SQLITE_DEBUG defined the SQLite code sometimes assert()s + ** that a Mem structure is located on an 8-byte boundary. To prevent + ** these assert()s from failing, when building with SQLITE_DEBUG defined + ** using gcc, we force nullMem to be 8-byte aligned using the magical + ** __attribute__((aligned(8))) macro. */ + static const Mem nullMem +#if defined(SQLITE_DEBUG) && defined(__GNUC__) + __attribute__((aligned(8))) +#endif + = { + /* .u = */ {0}, + /* .flags = */ (u16)MEM_Null, + /* .enc = */ (u8)0, + /* .eSubtype = */ (u8)0, + /* .n = */ (int)0, + /* .z = */ (char*)0, + /* .zMalloc = */ (char*)0, + /* .szMalloc = */ (int)0, + /* .uTemp = */ (u32)0, + /* .db = */ (sqlite3*)0, + /* .xDel = */ (void(*)(void*))0, +#ifdef SQLITE_DEBUG + /* .pScopyFrom = */ (Mem*)0, + /* .pFiller = */ (void*)0, +#endif + }; + return &nullMem; +} + +/* +** Check to see if column iCol of the given statement is valid. If +** it is, return a pointer to the Mem for the value of that column. +** If iCol is not valid, return a pointer to a Mem which has a value +** of NULL. +*/ +static Mem *columnMem(sqlite3_stmt *pStmt, int i){ + Vdbe *pVm; + Mem *pOut; + + pVm = (Vdbe *)pStmt; + if( pVm==0 ) return (Mem*)columnNullValue(); + assert( pVm->db ); + sqlite3_mutex_enter(pVm->db->mutex); + if( pVm->pResultSet!=0 && inResColumn && i>=0 ){ + pOut = &pVm->pResultSet[i]; + }else{ + sqlite3Error(pVm->db, SQLITE_RANGE); + pOut = (Mem*)columnNullValue(); + } + return pOut; +} + +/* +** This function is called after invoking an sqlite3_value_XXX function on a +** column value (i.e. a value returned by evaluating an SQL expression in the +** select list of a SELECT statement) that may cause a malloc() failure. If +** malloc() has failed, the threads mallocFailed flag is cleared and the result +** code of statement pStmt set to SQLITE_NOMEM. +** +** Specifically, this is called from within: +** +** sqlite3_column_int() +** sqlite3_column_int64() +** sqlite3_column_text() +** sqlite3_column_text16() +** sqlite3_column_real() +** sqlite3_column_bytes() +** sqlite3_column_bytes16() +** sqiite3_column_blob() +*/ +static void columnMallocFailure(sqlite3_stmt *pStmt) +{ + /* If malloc() failed during an encoding conversion within an + ** sqlite3_column_XXX API, then set the return code of the statement to + ** SQLITE_NOMEM. The next call to _step() (if any) will return SQLITE_ERROR + ** and _finalize() will return NOMEM. + */ + Vdbe *p = (Vdbe *)pStmt; + if( p ){ + assert( p->db!=0 ); + assert( sqlite3_mutex_held(p->db->mutex) ); + p->rc = sqlite3ApiExit(p->db, p->rc); + sqlite3_mutex_leave(p->db->mutex); + } +} + +/**************************** sqlite3_column_ ******************************* +** The following routines are used to access elements of the current row +** in the result set. +*/ +SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt *pStmt, int i){ + const void *val; + val = sqlite3_value_blob( columnMem(pStmt,i) ); + /* Even though there is no encoding conversion, value_blob() might + ** need to call malloc() to expand the result of a zeroblob() + ** expression. + */ + columnMallocFailure(pStmt); + return val; +} +SQLITE_API int sqlite3_column_bytes(sqlite3_stmt *pStmt, int i){ + int val = sqlite3_value_bytes( columnMem(pStmt,i) ); + columnMallocFailure(pStmt); + return val; +} +SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt *pStmt, int i){ + int val = sqlite3_value_bytes16( columnMem(pStmt,i) ); + columnMallocFailure(pStmt); + return val; +} +SQLITE_API double sqlite3_column_double(sqlite3_stmt *pStmt, int i){ + double val = sqlite3_value_double( columnMem(pStmt,i) ); + columnMallocFailure(pStmt); + return val; +} +SQLITE_API int sqlite3_column_int(sqlite3_stmt *pStmt, int i){ + int val = sqlite3_value_int( columnMem(pStmt,i) ); + columnMallocFailure(pStmt); + return val; +} +SQLITE_API sqlite_int64 sqlite3_column_int64(sqlite3_stmt *pStmt, int i){ + sqlite_int64 val = sqlite3_value_int64( columnMem(pStmt,i) ); + columnMallocFailure(pStmt); + return val; +} +SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt *pStmt, int i){ + const unsigned char *val = sqlite3_value_text( columnMem(pStmt,i) ); + columnMallocFailure(pStmt); + return val; +} +SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt *pStmt, int i){ + Mem *pOut = columnMem(pStmt, i); + if( pOut->flags&MEM_Static ){ + pOut->flags &= ~MEM_Static; + pOut->flags |= MEM_Ephem; + } + columnMallocFailure(pStmt); + return (sqlite3_value *)pOut; +} +#ifndef SQLITE_OMIT_UTF16 +SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt *pStmt, int i){ + const void *val = sqlite3_value_text16( columnMem(pStmt,i) ); + columnMallocFailure(pStmt); + return val; +} +#endif /* SQLITE_OMIT_UTF16 */ +SQLITE_API int sqlite3_column_type(sqlite3_stmt *pStmt, int i){ + int iType = sqlite3_value_type( columnMem(pStmt,i) ); + columnMallocFailure(pStmt); + return iType; +} + +/* +** Convert the N-th element of pStmt->pColName[] into a string using +** xFunc() then return that string. If N is out of range, return 0. +** +** There are up to 5 names for each column. useType determines which +** name is returned. Here are the names: +** +** 0 The column name as it should be displayed for output +** 1 The datatype name for the column +** 2 The name of the database that the column derives from +** 3 The name of the table that the column derives from +** 4 The name of the table column that the result column derives from +** +** If the result is not a simple column reference (if it is an expression +** or a constant) then useTypes 2, 3, and 4 return NULL. +*/ +static const void *columnName( + sqlite3_stmt *pStmt, + int N, + const void *(*xFunc)(Mem*), + int useType +){ + const void *ret; + Vdbe *p; + int n; + sqlite3 *db; +#ifdef SQLITE_ENABLE_API_ARMOR + if( pStmt==0 ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + ret = 0; + p = (Vdbe *)pStmt; + db = p->db; + assert( db!=0 ); + n = sqlite3_column_count(pStmt); + if( N=0 ){ + N += useType*n; + sqlite3_mutex_enter(db->mutex); + assert( db->mallocFailed==0 ); + ret = xFunc(&p->aColName[N]); + /* A malloc may have failed inside of the xFunc() call. If this + ** is the case, clear the mallocFailed flag and return NULL. + */ + if( db->mallocFailed ){ + sqlite3OomClear(db); + ret = 0; + } + sqlite3_mutex_leave(db->mutex); + } + return ret; +} + +/* +** Return the name of the Nth column of the result set returned by SQL +** statement pStmt. +*/ +SQLITE_API const char *sqlite3_column_name(sqlite3_stmt *pStmt, int N){ + return columnName( + pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_NAME); +} +#ifndef SQLITE_OMIT_UTF16 +SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt *pStmt, int N){ + return columnName( + pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_NAME); +} +#endif + +/* +** Constraint: If you have ENABLE_COLUMN_METADATA then you must +** not define OMIT_DECLTYPE. +*/ +#if defined(SQLITE_OMIT_DECLTYPE) && defined(SQLITE_ENABLE_COLUMN_METADATA) +# error "Must not define both SQLITE_OMIT_DECLTYPE \ + and SQLITE_ENABLE_COLUMN_METADATA" +#endif + +#ifndef SQLITE_OMIT_DECLTYPE +/* +** Return the column declaration type (if applicable) of the 'i'th column +** of the result set of SQL statement pStmt. +*/ +SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt *pStmt, int N){ + return columnName( + pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_DECLTYPE); +} +#ifndef SQLITE_OMIT_UTF16 +SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt *pStmt, int N){ + return columnName( + pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DECLTYPE); +} +#endif /* SQLITE_OMIT_UTF16 */ +#endif /* SQLITE_OMIT_DECLTYPE */ + +#ifdef SQLITE_ENABLE_COLUMN_METADATA +/* +** Return the name of the database from which a result column derives. +** NULL is returned if the result column is an expression or constant or +** anything else which is not an unambiguous reference to a database column. +*/ +SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt *pStmt, int N){ + return columnName( + pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_DATABASE); +} +#ifndef SQLITE_OMIT_UTF16 +SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt *pStmt, int N){ + return columnName( + pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DATABASE); +} +#endif /* SQLITE_OMIT_UTF16 */ + +/* +** Return the name of the table from which a result column derives. +** NULL is returned if the result column is an expression or constant or +** anything else which is not an unambiguous reference to a database column. +*/ +SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt *pStmt, int N){ + return columnName( + pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_TABLE); +} +#ifndef SQLITE_OMIT_UTF16 +SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt *pStmt, int N){ + return columnName( + pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_TABLE); +} +#endif /* SQLITE_OMIT_UTF16 */ + +/* +** Return the name of the table column from which a result column derives. +** NULL is returned if the result column is an expression or constant or +** anything else which is not an unambiguous reference to a database column. +*/ +SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt *pStmt, int N){ + return columnName( + pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_COLUMN); +} +#ifndef SQLITE_OMIT_UTF16 +SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){ + return columnName( + pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_COLUMN); +} +#endif /* SQLITE_OMIT_UTF16 */ +#endif /* SQLITE_ENABLE_COLUMN_METADATA */ + + +/******************************* sqlite3_bind_ *************************** +** +** Routines used to attach values to wildcards in a compiled SQL statement. +*/ +/* +** Unbind the value bound to variable i in virtual machine p. This is the +** the same as binding a NULL value to the column. If the "i" parameter is +** out of range, then SQLITE_RANGE is returned. Othewise SQLITE_OK. +** +** A successful evaluation of this routine acquires the mutex on p. +** the mutex is released if any kind of error occurs. +** +** The error code stored in database p->db is overwritten with the return +** value in any case. +*/ +static int vdbeUnbind(Vdbe *p, int i){ + Mem *pVar; + if( vdbeSafetyNotNull(p) ){ + return SQLITE_MISUSE_BKPT; + } + sqlite3_mutex_enter(p->db->mutex); + if( p->magic!=VDBE_MAGIC_RUN || p->pc>=0 ){ + sqlite3Error(p->db, SQLITE_MISUSE); + sqlite3_mutex_leave(p->db->mutex); + sqlite3_log(SQLITE_MISUSE, + "bind on a busy prepared statement: [%s]", p->zSql); + return SQLITE_MISUSE_BKPT; + } + if( i<1 || i>p->nVar ){ + sqlite3Error(p->db, SQLITE_RANGE); + sqlite3_mutex_leave(p->db->mutex); + return SQLITE_RANGE; + } + i--; + pVar = &p->aVar[i]; + sqlite3VdbeMemRelease(pVar); + pVar->flags = MEM_Null; + sqlite3Error(p->db, SQLITE_OK); + + /* If the bit corresponding to this variable in Vdbe.expmask is set, then + ** binding a new value to this variable invalidates the current query plan. + ** + ** IMPLEMENTATION-OF: R-48440-37595 If the specific value bound to host + ** parameter in the WHERE clause might influence the choice of query plan + ** for a statement, then the statement will be automatically recompiled, + ** as if there had been a schema change, on the first sqlite3_step() call + ** following any change to the bindings of that parameter. + */ + if( p->isPrepareV2 && + ((i<32 && p->expmask & ((u32)1 << i)) || p->expmask==0xffffffff) + ){ + p->expired = 1; + } + return SQLITE_OK; +} + +/* +** Bind a text or BLOB value. +*/ +static int bindText( + sqlite3_stmt *pStmt, /* The statement to bind against */ + int i, /* Index of the parameter to bind */ + const void *zData, /* Pointer to the data to be bound */ + int nData, /* Number of bytes of data to be bound */ + void (*xDel)(void*), /* Destructor for the data */ + u8 encoding /* Encoding for the data */ +){ + Vdbe *p = (Vdbe *)pStmt; + Mem *pVar; + int rc; + + rc = vdbeUnbind(p, i); + if( rc==SQLITE_OK ){ + if( zData!=0 ){ + pVar = &p->aVar[i-1]; + rc = sqlite3VdbeMemSetStr(pVar, zData, nData, encoding, xDel); + if( rc==SQLITE_OK && encoding!=0 ){ + rc = sqlite3VdbeChangeEncoding(pVar, ENC(p->db)); + } + sqlite3Error(p->db, rc); + rc = sqlite3ApiExit(p->db, rc); + } + sqlite3_mutex_leave(p->db->mutex); + }else if( xDel!=SQLITE_STATIC && xDel!=SQLITE_TRANSIENT ){ + xDel((void*)zData); + } + return rc; +} + + +/* +** Bind a blob value to an SQL statement variable. +*/ +SQLITE_API int sqlite3_bind_blob( + sqlite3_stmt *pStmt, + int i, + const void *zData, + int nData, + void (*xDel)(void*) +){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( nData<0 ) return SQLITE_MISUSE_BKPT; +#endif + return bindText(pStmt, i, zData, nData, xDel, 0); +} +SQLITE_API int sqlite3_bind_blob64( + sqlite3_stmt *pStmt, + int i, + const void *zData, + sqlite3_uint64 nData, + void (*xDel)(void*) +){ + assert( xDel!=SQLITE_DYNAMIC ); + if( nData>0x7fffffff ){ + return invokeValueDestructor(zData, xDel, 0); + }else{ + return bindText(pStmt, i, zData, (int)nData, xDel, 0); + } +} +SQLITE_API int sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){ + int rc; + Vdbe *p = (Vdbe *)pStmt; + rc = vdbeUnbind(p, i); + if( rc==SQLITE_OK ){ + sqlite3VdbeMemSetDouble(&p->aVar[i-1], rValue); + sqlite3_mutex_leave(p->db->mutex); + } + return rc; +} +SQLITE_API int sqlite3_bind_int(sqlite3_stmt *p, int i, int iValue){ + return sqlite3_bind_int64(p, i, (i64)iValue); +} +SQLITE_API int sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValue){ + int rc; + Vdbe *p = (Vdbe *)pStmt; + rc = vdbeUnbind(p, i); + if( rc==SQLITE_OK ){ + sqlite3VdbeMemSetInt64(&p->aVar[i-1], iValue); + sqlite3_mutex_leave(p->db->mutex); + } + return rc; +} +SQLITE_API int sqlite3_bind_null(sqlite3_stmt *pStmt, int i){ + int rc; + Vdbe *p = (Vdbe*)pStmt; + rc = vdbeUnbind(p, i); + if( rc==SQLITE_OK ){ + sqlite3_mutex_leave(p->db->mutex); + } + return rc; +} +SQLITE_API int sqlite3_bind_text( + sqlite3_stmt *pStmt, + int i, + const char *zData, + int nData, + void (*xDel)(void*) +){ + return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF8); +} +SQLITE_API int sqlite3_bind_text64( + sqlite3_stmt *pStmt, + int i, + const char *zData, + sqlite3_uint64 nData, + void (*xDel)(void*), + unsigned char enc +){ + assert( xDel!=SQLITE_DYNAMIC ); + if( nData>0x7fffffff ){ + return invokeValueDestructor(zData, xDel, 0); + }else{ + if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; + return bindText(pStmt, i, zData, (int)nData, xDel, enc); + } +} +#ifndef SQLITE_OMIT_UTF16 +SQLITE_API int sqlite3_bind_text16( + sqlite3_stmt *pStmt, + int i, + const void *zData, + int nData, + void (*xDel)(void*) +){ + return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF16NATIVE); +} +#endif /* SQLITE_OMIT_UTF16 */ +SQLITE_API int sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_value *pValue){ + int rc; + switch( sqlite3_value_type((sqlite3_value*)pValue) ){ + case SQLITE_INTEGER: { + rc = sqlite3_bind_int64(pStmt, i, pValue->u.i); + break; + } + case SQLITE_FLOAT: { + rc = sqlite3_bind_double(pStmt, i, pValue->u.r); + break; + } + case SQLITE_BLOB: { + if( pValue->flags & MEM_Zero ){ + rc = sqlite3_bind_zeroblob(pStmt, i, pValue->u.nZero); + }else{ + rc = sqlite3_bind_blob(pStmt, i, pValue->z, pValue->n,SQLITE_TRANSIENT); + } + break; + } + case SQLITE_TEXT: { + rc = bindText(pStmt,i, pValue->z, pValue->n, SQLITE_TRANSIENT, + pValue->enc); + break; + } + default: { + rc = sqlite3_bind_null(pStmt, i); + break; + } + } + return rc; +} +SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){ + int rc; + Vdbe *p = (Vdbe *)pStmt; + rc = vdbeUnbind(p, i); + if( rc==SQLITE_OK ){ + sqlite3VdbeMemSetZeroBlob(&p->aVar[i-1], n); + sqlite3_mutex_leave(p->db->mutex); + } + return rc; +} +SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt *pStmt, int i, sqlite3_uint64 n){ + int rc; + Vdbe *p = (Vdbe *)pStmt; + sqlite3_mutex_enter(p->db->mutex); + if( n>(u64)p->db->aLimit[SQLITE_LIMIT_LENGTH] ){ + rc = SQLITE_TOOBIG; + }else{ + assert( (n & 0x7FFFFFFF)==n ); + rc = sqlite3_bind_zeroblob(pStmt, i, n); + } + rc = sqlite3ApiExit(p->db, rc); + sqlite3_mutex_leave(p->db->mutex); + return rc; +} + +/* +** Return the number of wildcards that can be potentially bound to. +** This routine is added to support DBD::SQLite. +*/ +SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){ + Vdbe *p = (Vdbe*)pStmt; + return p ? p->nVar : 0; +} + +/* +** Return the name of a wildcard parameter. Return NULL if the index +** is out of range or if the wildcard is unnamed. +** +** The result is always UTF-8. +*/ +SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){ + Vdbe *p = (Vdbe*)pStmt; + if( p==0 ) return 0; + return sqlite3VListNumToName(p->pVList, i); +} + +/* +** Given a wildcard parameter name, return the index of the variable +** with that name. If there is no variable with the given name, +** return 0. +*/ +SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe *p, const char *zName, int nName){ + if( p==0 || zName==0 ) return 0; + return sqlite3VListNameToNum(p->pVList, zName, nName); +} +SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt *pStmt, const char *zName){ + return sqlite3VdbeParameterIndex((Vdbe*)pStmt, zName, sqlite3Strlen30(zName)); +} + +/* +** Transfer all bindings from the first statement over to the second. +*/ +SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){ + Vdbe *pFrom = (Vdbe*)pFromStmt; + Vdbe *pTo = (Vdbe*)pToStmt; + int i; + assert( pTo->db==pFrom->db ); + assert( pTo->nVar==pFrom->nVar ); + sqlite3_mutex_enter(pTo->db->mutex); + for(i=0; inVar; i++){ + sqlite3VdbeMemMove(&pTo->aVar[i], &pFrom->aVar[i]); + } + sqlite3_mutex_leave(pTo->db->mutex); + return SQLITE_OK; +} + +#ifndef SQLITE_OMIT_DEPRECATED +/* +** Deprecated external interface. Internal/core SQLite code +** should call sqlite3TransferBindings. +** +** It is misuse to call this routine with statements from different +** database connections. But as this is a deprecated interface, we +** will not bother to check for that condition. +** +** If the two statements contain a different number of bindings, then +** an SQLITE_ERROR is returned. Nothing else can go wrong, so otherwise +** SQLITE_OK is returned. +*/ +SQLITE_API int sqlite3_transfer_bindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){ + Vdbe *pFrom = (Vdbe*)pFromStmt; + Vdbe *pTo = (Vdbe*)pToStmt; + if( pFrom->nVar!=pTo->nVar ){ + return SQLITE_ERROR; + } + if( pTo->isPrepareV2 && pTo->expmask ){ + pTo->expired = 1; + } + if( pFrom->isPrepareV2 && pFrom->expmask ){ + pFrom->expired = 1; + } + return sqlite3TransferBindings(pFromStmt, pToStmt); +} +#endif + +/* +** Return the sqlite3* database handle to which the prepared statement given +** in the argument belongs. This is the same database handle that was +** the first argument to the sqlite3_prepare() that was used to create +** the statement in the first place. +*/ +SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt *pStmt){ + return pStmt ? ((Vdbe*)pStmt)->db : 0; +} + +/* +** Return true if the prepared statement is guaranteed to not modify the +** database. +*/ +SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt){ + return pStmt ? ((Vdbe*)pStmt)->readOnly : 1; +} + +/* +** Return true if the prepared statement is in need of being reset. +*/ +SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt *pStmt){ + Vdbe *v = (Vdbe*)pStmt; + return v!=0 && v->magic==VDBE_MAGIC_RUN && v->pc>=0; +} + +/* +** Return a pointer to the next prepared statement after pStmt associated +** with database connection pDb. If pStmt is NULL, return the first +** prepared statement for the database connection. Return NULL if there +** are no more. +*/ +SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt){ + sqlite3_stmt *pNext; +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(pDb) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + sqlite3_mutex_enter(pDb->mutex); + if( pStmt==0 ){ + pNext = (sqlite3_stmt*)pDb->pVdbe; + }else{ + pNext = (sqlite3_stmt*)((Vdbe*)pStmt)->pNext; + } + sqlite3_mutex_leave(pDb->mutex); + return pNext; +} + +/* +** Return the value of a status counter for a prepared statement +*/ +SQLITE_API int sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){ + Vdbe *pVdbe = (Vdbe*)pStmt; + u32 v; +#ifdef SQLITE_ENABLE_API_ARMOR + if( !pStmt ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + v = pVdbe->aCounter[op]; + if( resetFlag ) pVdbe->aCounter[op] = 0; + return (int)v; +} + +/* +** Return the SQL associated with a prepared statement +*/ +SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt){ + Vdbe *p = (Vdbe *)pStmt; + return p ? p->zSql : 0; +} + +/* +** Return the SQL associated with a prepared statement with +** bound parameters expanded. Space to hold the returned string is +** obtained from sqlite3_malloc(). The caller is responsible for +** freeing the returned string by passing it to sqlite3_free(). +** +** The SQLITE_TRACE_SIZE_LIMIT puts an upper bound on the size of +** expanded bound parameters. +*/ +SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt){ +#ifdef SQLITE_OMIT_TRACE + return 0; +#else + char *z = 0; + const char *zSql = sqlite3_sql(pStmt); + if( zSql ){ + Vdbe *p = (Vdbe *)pStmt; + sqlite3_mutex_enter(p->db->mutex); + z = sqlite3VdbeExpandSql(p, zSql); + sqlite3_mutex_leave(p->db->mutex); + } + return z; +#endif +} + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** Allocate and populate an UnpackedRecord structure based on the serialized +** record in nKey/pKey. Return a pointer to the new UnpackedRecord structure +** if successful, or a NULL pointer if an OOM error is encountered. +*/ +static UnpackedRecord *vdbeUnpackRecord( + KeyInfo *pKeyInfo, + int nKey, + const void *pKey +){ + UnpackedRecord *pRet; /* Return value */ + + pRet = sqlite3VdbeAllocUnpackedRecord(pKeyInfo); + if( pRet ){ + memset(pRet->aMem, 0, sizeof(Mem)*(pKeyInfo->nField+1)); + sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, pRet); + } + return pRet; +} + +/* +** This function is called from within a pre-update callback to retrieve +** a field of the row currently being updated or deleted. +*/ +SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppValue){ + PreUpdate *p = db->pPreUpdate; + Mem *pMem; + int rc = SQLITE_OK; + + /* Test that this call is being made from within an SQLITE_DELETE or + ** SQLITE_UPDATE pre-update callback, and that iIdx is within range. */ + if( !p || p->op==SQLITE_INSERT ){ + rc = SQLITE_MISUSE_BKPT; + goto preupdate_old_out; + } + if( p->pPk ){ + iIdx = sqlite3ColumnOfIndex(p->pPk, iIdx); + } + if( iIdx>=p->pCsr->nField || iIdx<0 ){ + rc = SQLITE_RANGE; + goto preupdate_old_out; + } + + /* If the old.* record has not yet been loaded into memory, do so now. */ + if( p->pUnpacked==0 ){ + u32 nRec; + u8 *aRec; + + nRec = sqlite3BtreePayloadSize(p->pCsr->uc.pCursor); + aRec = sqlite3DbMallocRaw(db, nRec); + if( !aRec ) goto preupdate_old_out; + rc = sqlite3BtreePayload(p->pCsr->uc.pCursor, 0, nRec, aRec); + if( rc==SQLITE_OK ){ + p->pUnpacked = vdbeUnpackRecord(&p->keyinfo, nRec, aRec); + if( !p->pUnpacked ) rc = SQLITE_NOMEM; + } + if( rc!=SQLITE_OK ){ + sqlite3DbFree(db, aRec); + goto preupdate_old_out; + } + p->aRecord = aRec; + } + + pMem = *ppValue = &p->pUnpacked->aMem[iIdx]; + if( iIdx==p->pTab->iPKey ){ + sqlite3VdbeMemSetInt64(pMem, p->iKey1); + }else if( iIdx>=p->pUnpacked->nField ){ + *ppValue = (sqlite3_value *)columnNullValue(); + }else if( p->pTab->aCol[iIdx].affinity==SQLITE_AFF_REAL ){ + if( pMem->flags & MEM_Int ){ + sqlite3VdbeMemRealify(pMem); + } + } + + preupdate_old_out: + sqlite3Error(db, rc); + return sqlite3ApiExit(db, rc); +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** This function is called from within a pre-update callback to retrieve +** the number of columns in the row being updated, deleted or inserted. +*/ +SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){ + PreUpdate *p = db->pPreUpdate; + return (p ? p->keyinfo.nField : 0); +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** This function is designed to be called from within a pre-update callback +** only. It returns zero if the change that caused the callback was made +** immediately by a user SQL statement. Or, if the change was made by a +** trigger program, it returns the number of trigger programs currently +** on the stack (1 for a top-level trigger, 2 for a trigger fired by a +** top-level trigger etc.). +** +** For the purposes of the previous paragraph, a foreign key CASCADE, SET NULL +** or SET DEFAULT action is considered a trigger. +*/ +SQLITE_API int sqlite3_preupdate_depth(sqlite3 *db){ + PreUpdate *p = db->pPreUpdate; + return (p ? p->v->nFrame : 0); +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** This function is called from within a pre-update callback to retrieve +** a field of the row currently being updated or inserted. +*/ +SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppValue){ + PreUpdate *p = db->pPreUpdate; + int rc = SQLITE_OK; + Mem *pMem; + + if( !p || p->op==SQLITE_DELETE ){ + rc = SQLITE_MISUSE_BKPT; + goto preupdate_new_out; + } + if( p->pPk && p->op!=SQLITE_UPDATE ){ + iIdx = sqlite3ColumnOfIndex(p->pPk, iIdx); + } + if( iIdx>=p->pCsr->nField || iIdx<0 ){ + rc = SQLITE_RANGE; + goto preupdate_new_out; + } + + if( p->op==SQLITE_INSERT ){ + /* For an INSERT, memory cell p->iNewReg contains the serialized record + ** that is being inserted. Deserialize it. */ + UnpackedRecord *pUnpack = p->pNewUnpacked; + if( !pUnpack ){ + Mem *pData = &p->v->aMem[p->iNewReg]; + rc = ExpandBlob(pData); + if( rc!=SQLITE_OK ) goto preupdate_new_out; + pUnpack = vdbeUnpackRecord(&p->keyinfo, pData->n, pData->z); + if( !pUnpack ){ + rc = SQLITE_NOMEM; + goto preupdate_new_out; + } + p->pNewUnpacked = pUnpack; + } + pMem = &pUnpack->aMem[iIdx]; + if( iIdx==p->pTab->iPKey ){ + sqlite3VdbeMemSetInt64(pMem, p->iKey2); + }else if( iIdx>=pUnpack->nField ){ + pMem = (sqlite3_value *)columnNullValue(); + } + }else{ + /* For an UPDATE, memory cell (p->iNewReg+1+iIdx) contains the required + ** value. Make a copy of the cell contents and return a pointer to it. + ** It is not safe to return a pointer to the memory cell itself as the + ** caller may modify the value text encoding. + */ + assert( p->op==SQLITE_UPDATE ); + if( !p->aNew ){ + p->aNew = (Mem *)sqlite3DbMallocZero(db, sizeof(Mem) * p->pCsr->nField); + if( !p->aNew ){ + rc = SQLITE_NOMEM; + goto preupdate_new_out; + } + } + assert( iIdx>=0 && iIdxpCsr->nField ); + pMem = &p->aNew[iIdx]; + if( pMem->flags==0 ){ + if( iIdx==p->pTab->iPKey ){ + sqlite3VdbeMemSetInt64(pMem, p->iKey2); + }else{ + rc = sqlite3VdbeMemCopy(pMem, &p->v->aMem[p->iNewReg+1+iIdx]); + if( rc!=SQLITE_OK ) goto preupdate_new_out; + } + } + } + *ppValue = pMem; + + preupdate_new_out: + sqlite3Error(db, rc); + return sqlite3ApiExit(db, rc); +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS +/* +** Return status data for a single loop within query pStmt. +*/ +SQLITE_API int sqlite3_stmt_scanstatus( + sqlite3_stmt *pStmt, /* Prepared statement being queried */ + int idx, /* Index of loop to report on */ + int iScanStatusOp, /* Which metric to return */ + void *pOut /* OUT: Write the answer here */ +){ + Vdbe *p = (Vdbe*)pStmt; + ScanStatus *pScan; + if( idx<0 || idx>=p->nScan ) return 1; + pScan = &p->aScan[idx]; + switch( iScanStatusOp ){ + case SQLITE_SCANSTAT_NLOOP: { + *(sqlite3_int64*)pOut = p->anExec[pScan->addrLoop]; + break; + } + case SQLITE_SCANSTAT_NVISIT: { + *(sqlite3_int64*)pOut = p->anExec[pScan->addrVisit]; + break; + } + case SQLITE_SCANSTAT_EST: { + double r = 1.0; + LogEst x = pScan->nEst; + while( x<100 ){ + x += 10; + r *= 0.5; + } + *(double*)pOut = r*sqlite3LogEstToInt(x); + break; + } + case SQLITE_SCANSTAT_NAME: { + *(const char**)pOut = pScan->zName; + break; + } + case SQLITE_SCANSTAT_EXPLAIN: { + if( pScan->addrExplain ){ + *(const char**)pOut = p->aOp[ pScan->addrExplain ].p4.z; + }else{ + *(const char**)pOut = 0; + } + break; + } + case SQLITE_SCANSTAT_SELECTID: { + if( pScan->addrExplain ){ + *(int*)pOut = p->aOp[ pScan->addrExplain ].p1; + }else{ + *(int*)pOut = -1; + } + break; + } + default: { + return 1; + } + } + return 0; +} + +/* +** Zero all counters associated with the sqlite3_stmt_scanstatus() data. +*/ +SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt){ + Vdbe *p = (Vdbe*)pStmt; + memset(p->anExec, 0, p->nOp * sizeof(i64)); +} +#endif /* SQLITE_ENABLE_STMT_SCANSTATUS */ + +/************** End of vdbeapi.c *********************************************/ +/************** Begin file vdbetrace.c ***************************************/ +/* +** 2009 November 25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains code used to insert the values of host parameters +** (aka "wildcards") into the SQL text output by sqlite3_trace(). +** +** The Vdbe parse-tree explainer is also found here. +*/ +/* #include "sqliteInt.h" */ +/* #include "vdbeInt.h" */ + +#ifndef SQLITE_OMIT_TRACE + +/* +** zSql is a zero-terminated string of UTF-8 SQL text. Return the number of +** bytes in this text up to but excluding the first character in +** a host parameter. If the text contains no host parameters, return +** the total number of bytes in the text. +*/ +static int findNextHostParameter(const char *zSql, int *pnToken){ + int tokenType; + int nTotal = 0; + int n; + + *pnToken = 0; + while( zSql[0] ){ + n = sqlite3GetToken((u8*)zSql, &tokenType); + assert( n>0 && tokenType!=TK_ILLEGAL ); + if( tokenType==TK_VARIABLE ){ + *pnToken = n; + break; + } + nTotal += n; + zSql += n; + } + return nTotal; +} + +/* +** This function returns a pointer to a nul-terminated string in memory +** obtained from sqlite3DbMalloc(). If sqlite3.nVdbeExec is 1, then the +** string contains a copy of zRawSql but with host parameters expanded to +** their current bindings. Or, if sqlite3.nVdbeExec is greater than 1, +** then the returned string holds a copy of zRawSql with "-- " prepended +** to each line of text. +** +** If the SQLITE_TRACE_SIZE_LIMIT macro is defined to an integer, then +** then long strings and blobs are truncated to that many bytes. This +** can be used to prevent unreasonably large trace strings when dealing +** with large (multi-megabyte) strings and blobs. +** +** The calling function is responsible for making sure the memory returned +** is eventually freed. +** +** ALGORITHM: Scan the input string looking for host parameters in any of +** these forms: ?, ?N, $A, @A, :A. Take care to avoid text within +** string literals, quoted identifier names, and comments. For text forms, +** the host parameter index is found by scanning the prepared +** statement for the corresponding OP_Variable opcode. Once the host +** parameter index is known, locate the value in p->aVar[]. Then render +** the value as a literal in place of the host parameter name. +*/ +SQLITE_PRIVATE char *sqlite3VdbeExpandSql( + Vdbe *p, /* The prepared statement being evaluated */ + const char *zRawSql /* Raw text of the SQL statement */ +){ + sqlite3 *db; /* The database connection */ + int idx = 0; /* Index of a host parameter */ + int nextIndex = 1; /* Index of next ? host parameter */ + int n; /* Length of a token prefix */ + int nToken; /* Length of the parameter token */ + int i; /* Loop counter */ + Mem *pVar; /* Value of a host parameter */ + StrAccum out; /* Accumulate the output here */ +#ifndef SQLITE_OMIT_UTF16 + Mem utf8; /* Used to convert UTF16 parameters into UTF8 for display */ +#endif + char zBase[100]; /* Initial working space */ + + db = p->db; + sqlite3StrAccumInit(&out, 0, zBase, sizeof(zBase), + db->aLimit[SQLITE_LIMIT_LENGTH]); + if( db->nVdbeExec>1 ){ + while( *zRawSql ){ + const char *zStart = zRawSql; + while( *(zRawSql++)!='\n' && *zRawSql ); + sqlite3StrAccumAppend(&out, "-- ", 3); + assert( (zRawSql - zStart) > 0 ); + sqlite3StrAccumAppend(&out, zStart, (int)(zRawSql-zStart)); + } + }else if( p->nVar==0 ){ + sqlite3StrAccumAppend(&out, zRawSql, sqlite3Strlen30(zRawSql)); + }else{ + while( zRawSql[0] ){ + n = findNextHostParameter(zRawSql, &nToken); + assert( n>0 ); + sqlite3StrAccumAppend(&out, zRawSql, n); + zRawSql += n; + assert( zRawSql[0] || nToken==0 ); + if( nToken==0 ) break; + if( zRawSql[0]=='?' ){ + if( nToken>1 ){ + assert( sqlite3Isdigit(zRawSql[1]) ); + sqlite3GetInt32(&zRawSql[1], &idx); + }else{ + idx = nextIndex; + } + }else{ + assert( zRawSql[0]==':' || zRawSql[0]=='$' || + zRawSql[0]=='@' || zRawSql[0]=='#' ); + testcase( zRawSql[0]==':' ); + testcase( zRawSql[0]=='$' ); + testcase( zRawSql[0]=='@' ); + testcase( zRawSql[0]=='#' ); + idx = sqlite3VdbeParameterIndex(p, zRawSql, nToken); + assert( idx>0 ); + } + zRawSql += nToken; + nextIndex = idx + 1; + assert( idx>0 && idx<=p->nVar ); + pVar = &p->aVar[idx-1]; + if( pVar->flags & MEM_Null ){ + sqlite3StrAccumAppend(&out, "NULL", 4); + }else if( pVar->flags & MEM_Int ){ + sqlite3XPrintf(&out, "%lld", pVar->u.i); + }else if( pVar->flags & MEM_Real ){ + sqlite3XPrintf(&out, "%!.15g", pVar->u.r); + }else if( pVar->flags & MEM_Str ){ + int nOut; /* Number of bytes of the string text to include in output */ +#ifndef SQLITE_OMIT_UTF16 + u8 enc = ENC(db); + if( enc!=SQLITE_UTF8 ){ + memset(&utf8, 0, sizeof(utf8)); + utf8.db = db; + sqlite3VdbeMemSetStr(&utf8, pVar->z, pVar->n, enc, SQLITE_STATIC); + if( SQLITE_NOMEM==sqlite3VdbeChangeEncoding(&utf8, SQLITE_UTF8) ){ + out.accError = STRACCUM_NOMEM; + out.nAlloc = 0; + } + pVar = &utf8; + } +#endif + nOut = pVar->n; +#ifdef SQLITE_TRACE_SIZE_LIMIT + if( nOut>SQLITE_TRACE_SIZE_LIMIT ){ + nOut = SQLITE_TRACE_SIZE_LIMIT; + while( nOutn && (pVar->z[nOut]&0xc0)==0x80 ){ nOut++; } + } +#endif + sqlite3XPrintf(&out, "'%.*q'", nOut, pVar->z); +#ifdef SQLITE_TRACE_SIZE_LIMIT + if( nOutn ){ + sqlite3XPrintf(&out, "/*+%d bytes*/", pVar->n-nOut); + } +#endif +#ifndef SQLITE_OMIT_UTF16 + if( enc!=SQLITE_UTF8 ) sqlite3VdbeMemRelease(&utf8); +#endif + }else if( pVar->flags & MEM_Zero ){ + sqlite3XPrintf(&out, "zeroblob(%d)", pVar->u.nZero); + }else{ + int nOut; /* Number of bytes of the blob to include in output */ + assert( pVar->flags & MEM_Blob ); + sqlite3StrAccumAppend(&out, "x'", 2); + nOut = pVar->n; +#ifdef SQLITE_TRACE_SIZE_LIMIT + if( nOut>SQLITE_TRACE_SIZE_LIMIT ) nOut = SQLITE_TRACE_SIZE_LIMIT; +#endif + for(i=0; iz[i]&0xff); + } + sqlite3StrAccumAppend(&out, "'", 1); +#ifdef SQLITE_TRACE_SIZE_LIMIT + if( nOutn ){ + sqlite3XPrintf(&out, "/*+%d bytes*/", pVar->n-nOut); + } +#endif + } + } + } + if( out.accError ) sqlite3StrAccumReset(&out); + return sqlite3StrAccumFinish(&out); +} + +#endif /* #ifndef SQLITE_OMIT_TRACE */ + +/************** End of vdbetrace.c *******************************************/ +/************** Begin file vdbe.c ********************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** The code in this file implements the function that runs the +** bytecode of a prepared statement. +** +** Various scripts scan this source file in order to generate HTML +** documentation, headers files, or other derived files. The formatting +** of the code in this file is, therefore, important. See other comments +** in this file for details. If in doubt, do not deviate from existing +** commenting and indentation practices when changing or adding code. +*/ +/* #include "sqliteInt.h" */ +/* #include "vdbeInt.h" */ + +/* +** Invoke this macro on memory cells just prior to changing the +** value of the cell. This macro verifies that shallow copies are +** not misused. A shallow copy of a string or blob just copies a +** pointer to the string or blob, not the content. If the original +** is changed while the copy is still in use, the string or blob might +** be changed out from under the copy. This macro verifies that nothing +** like that ever happens. +*/ +#ifdef SQLITE_DEBUG +# define memAboutToChange(P,M) sqlite3VdbeMemAboutToChange(P,M) +#else +# define memAboutToChange(P,M) +#endif + +/* +** The following global variable is incremented every time a cursor +** moves, either by the OP_SeekXX, OP_Next, or OP_Prev opcodes. The test +** procedures use this information to make sure that indices are +** working correctly. This variable has no function other than to +** help verify the correct operation of the library. +*/ +#ifdef SQLITE_TEST +SQLITE_API int sqlite3_search_count = 0; +#endif + +/* +** When this global variable is positive, it gets decremented once before +** each instruction in the VDBE. When it reaches zero, the u1.isInterrupted +** field of the sqlite3 structure is set in order to simulate an interrupt. +** +** This facility is used for testing purposes only. It does not function +** in an ordinary build. +*/ +#ifdef SQLITE_TEST +SQLITE_API int sqlite3_interrupt_count = 0; +#endif + +/* +** The next global variable is incremented each type the OP_Sort opcode +** is executed. The test procedures use this information to make sure that +** sorting is occurring or not occurring at appropriate times. This variable +** has no function other than to help verify the correct operation of the +** library. +*/ +#ifdef SQLITE_TEST +SQLITE_API int sqlite3_sort_count = 0; +#endif + +/* +** The next global variable records the size of the largest MEM_Blob +** or MEM_Str that has been used by a VDBE opcode. The test procedures +** use this information to make sure that the zero-blob functionality +** is working correctly. This variable has no function other than to +** help verify the correct operation of the library. +*/ +#ifdef SQLITE_TEST +SQLITE_API int sqlite3_max_blobsize = 0; +static void updateMaxBlobsize(Mem *p){ + if( (p->flags & (MEM_Str|MEM_Blob))!=0 && p->n>sqlite3_max_blobsize ){ + sqlite3_max_blobsize = p->n; + } +} +#endif + +/* +** This macro evaluates to true if either the update hook or the preupdate +** hook are enabled for database connect DB. +*/ +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +# define HAS_UPDATE_HOOK(DB) ((DB)->xPreUpdateCallback||(DB)->xUpdateCallback) +#else +# define HAS_UPDATE_HOOK(DB) ((DB)->xUpdateCallback) +#endif + +/* +** The next global variable is incremented each time the OP_Found opcode +** is executed. This is used to test whether or not the foreign key +** operation implemented using OP_FkIsZero is working. This variable +** has no function other than to help verify the correct operation of the +** library. +*/ +#ifdef SQLITE_TEST +SQLITE_API int sqlite3_found_count = 0; +#endif + +/* +** Test a register to see if it exceeds the current maximum blob size. +** If it does, record the new maximum blob size. +*/ +#if defined(SQLITE_TEST) && !defined(SQLITE_UNTESTABLE) +# define UPDATE_MAX_BLOBSIZE(P) updateMaxBlobsize(P) +#else +# define UPDATE_MAX_BLOBSIZE(P) +#endif + +/* +** Invoke the VDBE coverage callback, if that callback is defined. This +** feature is used for test suite validation only and does not appear an +** production builds. +** +** M is an integer, 2 or 3, that indices how many different ways the +** branch can go. It is usually 2. "I" is the direction the branch +** goes. 0 means falls through. 1 means branch is taken. 2 means the +** second alternative branch is taken. +** +** iSrcLine is the source code line (from the __LINE__ macro) that +** generated the VDBE instruction. This instrumentation assumes that all +** source code is in a single file (the amalgamation). Special values 1 +** and 2 for the iSrcLine parameter mean that this particular branch is +** always taken or never taken, respectively. +*/ +#if !defined(SQLITE_VDBE_COVERAGE) +# define VdbeBranchTaken(I,M) +#else +# define VdbeBranchTaken(I,M) vdbeTakeBranch(pOp->iSrcLine,I,M) + static void vdbeTakeBranch(int iSrcLine, u8 I, u8 M){ + if( iSrcLine<=2 && ALWAYS(iSrcLine>0) ){ + M = iSrcLine; + /* Assert the truth of VdbeCoverageAlwaysTaken() and + ** VdbeCoverageNeverTaken() */ + assert( (M & I)==I ); + }else{ + if( sqlite3GlobalConfig.xVdbeBranch==0 ) return; /*NO_TEST*/ + sqlite3GlobalConfig.xVdbeBranch(sqlite3GlobalConfig.pVdbeBranchArg, + iSrcLine,I,M); + } + } +#endif + +/* +** Convert the given register into a string if it isn't one +** already. Return non-zero if a malloc() fails. +*/ +#define Stringify(P, enc) \ + if(((P)->flags&(MEM_Str|MEM_Blob))==0 && sqlite3VdbeMemStringify(P,enc,0)) \ + { goto no_mem; } + +/* +** An ephemeral string value (signified by the MEM_Ephem flag) contains +** a pointer to a dynamically allocated string where some other entity +** is responsible for deallocating that string. Because the register +** does not control the string, it might be deleted without the register +** knowing it. +** +** This routine converts an ephemeral string into a dynamically allocated +** string that the register itself controls. In other words, it +** converts an MEM_Ephem string into a string with P.z==P.zMalloc. +*/ +#define Deephemeralize(P) \ + if( ((P)->flags&MEM_Ephem)!=0 \ + && sqlite3VdbeMemMakeWriteable(P) ){ goto no_mem;} + +/* Return true if the cursor was opened using the OP_OpenSorter opcode. */ +#define isSorter(x) ((x)->eCurType==CURTYPE_SORTER) + +/* +** Allocate VdbeCursor number iCur. Return a pointer to it. Return NULL +** if we run out of memory. +*/ +static VdbeCursor *allocateCursor( + Vdbe *p, /* The virtual machine */ + int iCur, /* Index of the new VdbeCursor */ + int nField, /* Number of fields in the table or index */ + int iDb, /* Database the cursor belongs to, or -1 */ + u8 eCurType /* Type of the new cursor */ +){ + /* Find the memory cell that will be used to store the blob of memory + ** required for this VdbeCursor structure. It is convenient to use a + ** vdbe memory cell to manage the memory allocation required for a + ** VdbeCursor structure for the following reasons: + ** + ** * Sometimes cursor numbers are used for a couple of different + ** purposes in a vdbe program. The different uses might require + ** different sized allocations. Memory cells provide growable + ** allocations. + ** + ** * When using ENABLE_MEMORY_MANAGEMENT, memory cell buffers can + ** be freed lazily via the sqlite3_release_memory() API. This + ** minimizes the number of malloc calls made by the system. + ** + ** The memory cell for cursor 0 is aMem[0]. The rest are allocated from + ** the top of the register space. Cursor 1 is at Mem[p->nMem-1]. + ** Cursor 2 is at Mem[p->nMem-2]. And so forth. + */ + Mem *pMem = iCur>0 ? &p->aMem[p->nMem-iCur] : p->aMem; + + int nByte; + VdbeCursor *pCx = 0; + nByte = + ROUND8(sizeof(VdbeCursor)) + 2*sizeof(u32)*nField + + (eCurType==CURTYPE_BTREE?sqlite3BtreeCursorSize():0); + + assert( iCur>=0 && iCurnCursor ); + if( p->apCsr[iCur] ){ /*OPTIMIZATION-IF-FALSE*/ + sqlite3VdbeFreeCursor(p, p->apCsr[iCur]); + p->apCsr[iCur] = 0; + } + if( SQLITE_OK==sqlite3VdbeMemClearAndResize(pMem, nByte) ){ + p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->z; + memset(pCx, 0, offsetof(VdbeCursor,pAltCursor)); + pCx->eCurType = eCurType; + pCx->iDb = iDb; + pCx->nField = nField; + pCx->aOffset = &pCx->aType[nField]; + if( eCurType==CURTYPE_BTREE ){ + pCx->uc.pCursor = (BtCursor*) + &pMem->z[ROUND8(sizeof(VdbeCursor))+2*sizeof(u32)*nField]; + sqlite3BtreeCursorZero(pCx->uc.pCursor); + } + } + return pCx; +} + +/* +** Try to convert a value into a numeric representation if we can +** do so without loss of information. In other words, if the string +** looks like a number, convert it into a number. If it does not +** look like a number, leave it alone. +** +** If the bTryForInt flag is true, then extra effort is made to give +** an integer representation. Strings that look like floating point +** values but which have no fractional component (example: '48.00') +** will have a MEM_Int representation when bTryForInt is true. +** +** If bTryForInt is false, then if the input string contains a decimal +** point or exponential notation, the result is only MEM_Real, even +** if there is an exact integer representation of the quantity. +*/ +static void applyNumericAffinity(Mem *pRec, int bTryForInt){ + double rValue; + i64 iValue; + u8 enc = pRec->enc; + assert( (pRec->flags & (MEM_Str|MEM_Int|MEM_Real))==MEM_Str ); + if( sqlite3AtoF(pRec->z, &rValue, pRec->n, enc)==0 ) return; + if( 0==sqlite3Atoi64(pRec->z, &iValue, pRec->n, enc) ){ + pRec->u.i = iValue; + pRec->flags |= MEM_Int; + }else{ + pRec->u.r = rValue; + pRec->flags |= MEM_Real; + if( bTryForInt ) sqlite3VdbeIntegerAffinity(pRec); + } +} + +/* +** Processing is determine by the affinity parameter: +** +** SQLITE_AFF_INTEGER: +** SQLITE_AFF_REAL: +** SQLITE_AFF_NUMERIC: +** Try to convert pRec to an integer representation or a +** floating-point representation if an integer representation +** is not possible. Note that the integer representation is +** always preferred, even if the affinity is REAL, because +** an integer representation is more space efficient on disk. +** +** SQLITE_AFF_TEXT: +** Convert pRec to a text representation. +** +** SQLITE_AFF_BLOB: +** No-op. pRec is unchanged. +*/ +static void applyAffinity( + Mem *pRec, /* The value to apply affinity to */ + char affinity, /* The affinity to be applied */ + u8 enc /* Use this text encoding */ +){ + if( affinity>=SQLITE_AFF_NUMERIC ){ + assert( affinity==SQLITE_AFF_INTEGER || affinity==SQLITE_AFF_REAL + || affinity==SQLITE_AFF_NUMERIC ); + if( (pRec->flags & MEM_Int)==0 ){ /*OPTIMIZATION-IF-FALSE*/ + if( (pRec->flags & MEM_Real)==0 ){ + if( pRec->flags & MEM_Str ) applyNumericAffinity(pRec,1); + }else{ + sqlite3VdbeIntegerAffinity(pRec); + } + } + }else if( affinity==SQLITE_AFF_TEXT ){ + /* Only attempt the conversion to TEXT if there is an integer or real + ** representation (blob and NULL do not get converted) but no string + ** representation. It would be harmless to repeat the conversion if + ** there is already a string rep, but it is pointless to waste those + ** CPU cycles. */ + if( 0==(pRec->flags&MEM_Str) ){ /*OPTIMIZATION-IF-FALSE*/ + if( (pRec->flags&(MEM_Real|MEM_Int)) ){ + sqlite3VdbeMemStringify(pRec, enc, 1); + } + } + pRec->flags &= ~(MEM_Real|MEM_Int); + } +} + +/* +** Try to convert the type of a function argument or a result column +** into a numeric representation. Use either INTEGER or REAL whichever +** is appropriate. But only do the conversion if it is possible without +** loss of information and return the revised type of the argument. +*/ +SQLITE_API int sqlite3_value_numeric_type(sqlite3_value *pVal){ + int eType = sqlite3_value_type(pVal); + if( eType==SQLITE_TEXT ){ + Mem *pMem = (Mem*)pVal; + applyNumericAffinity(pMem, 0); + eType = sqlite3_value_type(pVal); + } + return eType; +} + +/* +** Exported version of applyAffinity(). This one works on sqlite3_value*, +** not the internal Mem* type. +*/ +SQLITE_PRIVATE void sqlite3ValueApplyAffinity( + sqlite3_value *pVal, + u8 affinity, + u8 enc +){ + applyAffinity((Mem *)pVal, affinity, enc); +} + +/* +** pMem currently only holds a string type (or maybe a BLOB that we can +** interpret as a string if we want to). Compute its corresponding +** numeric type, if has one. Set the pMem->u.r and pMem->u.i fields +** accordingly. +*/ +static u16 SQLITE_NOINLINE computeNumericType(Mem *pMem){ + assert( (pMem->flags & (MEM_Int|MEM_Real))==0 ); + assert( (pMem->flags & (MEM_Str|MEM_Blob))!=0 ); + if( sqlite3AtoF(pMem->z, &pMem->u.r, pMem->n, pMem->enc)==0 ){ + return 0; + } + if( sqlite3Atoi64(pMem->z, &pMem->u.i, pMem->n, pMem->enc)==SQLITE_OK ){ + return MEM_Int; + } + return MEM_Real; +} + +/* +** Return the numeric type for pMem, either MEM_Int or MEM_Real or both or +** none. +** +** Unlike applyNumericAffinity(), this routine does not modify pMem->flags. +** But it does set pMem->u.r and pMem->u.i appropriately. +*/ +static u16 numericType(Mem *pMem){ + if( pMem->flags & (MEM_Int|MEM_Real) ){ + return pMem->flags & (MEM_Int|MEM_Real); + } + if( pMem->flags & (MEM_Str|MEM_Blob) ){ + return computeNumericType(pMem); + } + return 0; +} + +#ifdef SQLITE_DEBUG +/* +** Write a nice string representation of the contents of cell pMem +** into buffer zBuf, length nBuf. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, char *zBuf){ + char *zCsr = zBuf; + int f = pMem->flags; + + static const char *const encnames[] = {"(X)", "(8)", "(16LE)", "(16BE)"}; + + if( f&MEM_Blob ){ + int i; + char c; + if( f & MEM_Dyn ){ + c = 'z'; + assert( (f & (MEM_Static|MEM_Ephem))==0 ); + }else if( f & MEM_Static ){ + c = 't'; + assert( (f & (MEM_Dyn|MEM_Ephem))==0 ); + }else if( f & MEM_Ephem ){ + c = 'e'; + assert( (f & (MEM_Static|MEM_Dyn))==0 ); + }else{ + c = 's'; + } + + sqlite3_snprintf(100, zCsr, "%c", c); + zCsr += sqlite3Strlen30(zCsr); + sqlite3_snprintf(100, zCsr, "%d[", pMem->n); + zCsr += sqlite3Strlen30(zCsr); + for(i=0; i<16 && in; i++){ + sqlite3_snprintf(100, zCsr, "%02X", ((int)pMem->z[i] & 0xFF)); + zCsr += sqlite3Strlen30(zCsr); + } + for(i=0; i<16 && in; i++){ + char z = pMem->z[i]; + if( z<32 || z>126 ) *zCsr++ = '.'; + else *zCsr++ = z; + } + + sqlite3_snprintf(100, zCsr, "]%s", encnames[pMem->enc]); + zCsr += sqlite3Strlen30(zCsr); + if( f & MEM_Zero ){ + sqlite3_snprintf(100, zCsr,"+%dz",pMem->u.nZero); + zCsr += sqlite3Strlen30(zCsr); + } + *zCsr = '\0'; + }else if( f & MEM_Str ){ + int j, k; + zBuf[0] = ' '; + if( f & MEM_Dyn ){ + zBuf[1] = 'z'; + assert( (f & (MEM_Static|MEM_Ephem))==0 ); + }else if( f & MEM_Static ){ + zBuf[1] = 't'; + assert( (f & (MEM_Dyn|MEM_Ephem))==0 ); + }else if( f & MEM_Ephem ){ + zBuf[1] = 'e'; + assert( (f & (MEM_Static|MEM_Dyn))==0 ); + }else{ + zBuf[1] = 's'; + } + k = 2; + sqlite3_snprintf(100, &zBuf[k], "%d", pMem->n); + k += sqlite3Strlen30(&zBuf[k]); + zBuf[k++] = '['; + for(j=0; j<15 && jn; j++){ + u8 c = pMem->z[j]; + if( c>=0x20 && c<0x7f ){ + zBuf[k++] = c; + }else{ + zBuf[k++] = '.'; + } + } + zBuf[k++] = ']'; + sqlite3_snprintf(100,&zBuf[k], encnames[pMem->enc]); + k += sqlite3Strlen30(&zBuf[k]); + zBuf[k++] = 0; + } +} +#endif + +#ifdef SQLITE_DEBUG +/* +** Print the value of a register for tracing purposes: +*/ +static void memTracePrint(Mem *p){ + if( p->flags & MEM_Undefined ){ + printf(" undefined"); + }else if( p->flags & MEM_Null ){ + printf(" NULL"); + }else if( (p->flags & (MEM_Int|MEM_Str))==(MEM_Int|MEM_Str) ){ + printf(" si:%lld", p->u.i); + }else if( p->flags & MEM_Int ){ + printf(" i:%lld", p->u.i); +#ifndef SQLITE_OMIT_FLOATING_POINT + }else if( p->flags & MEM_Real ){ + printf(" r:%g", p->u.r); +#endif + }else if( p->flags & MEM_RowSet ){ + printf(" (rowset)"); + }else{ + char zBuf[200]; + sqlite3VdbeMemPrettyPrint(p, zBuf); + printf(" %s", zBuf); + } + if( p->flags & MEM_Subtype ) printf(" subtype=0x%02x", p->eSubtype); +} +static void registerTrace(int iReg, Mem *p){ + printf("REG[%d] = ", iReg); + memTracePrint(p); + printf("\n"); +} +#endif + +#ifdef SQLITE_DEBUG +# define REGISTER_TRACE(R,M) if(db->flags&SQLITE_VdbeTrace)registerTrace(R,M) +#else +# define REGISTER_TRACE(R,M) +#endif + + +#ifdef VDBE_PROFILE + +/* +** hwtime.h contains inline assembler code for implementing +** high-performance timing routines. +*/ +/************** Include hwtime.h in the middle of vdbe.c *********************/ +/************** Begin file hwtime.h ******************************************/ +/* +** 2008 May 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains inline asm code for retrieving "high-performance" +** counters for x86 class CPUs. +*/ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H + +/* +** The following routine only works on pentium-class (or newer) processors. +** It uses the RDTSC opcode to read the cycle count value out of the +** processor and returns that value. This can be used for high-res +** profiling. +*/ +#if (defined(__GNUC__) || defined(_MSC_VER)) && \ + (defined(i386) || defined(__i386__) || defined(_M_IX86)) + + #if defined(__GNUC__) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + + #elif defined(_MSC_VER) + + __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ + __asm { + rdtsc + ret ; return value at EDX:EAX + } + } + + #endif + +#elif (defined(__GNUC__) && defined(__x86_64__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long val; + __asm__ __volatile__ ("rdtsc" : "=A" (val)); + return val; + } + +#elif (defined(__GNUC__) && defined(__ppc__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long long retval; + unsigned long junk; + __asm__ __volatile__ ("\n\ + 1: mftbu %1\n\ + mftb %L0\n\ + mftbu %0\n\ + cmpw %0,%1\n\ + bne 1b" + : "=r" (retval), "=r" (junk)); + return retval; + } + +#else + + #error Need implementation of sqlite3Hwtime() for your platform. + + /* + ** To compile without implementing sqlite3Hwtime() for your platform, + ** you can remove the above #error and use the following + ** stub function. You will lose timing support for many + ** of the debugging and testing utilities, but it should at + ** least compile and run. + */ +SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } + +#endif + +#endif /* !defined(SQLITE_HWTIME_H) */ + +/************** End of hwtime.h **********************************************/ +/************** Continuing where we left off in vdbe.c ***********************/ + +#endif + +#ifndef NDEBUG +/* +** This function is only called from within an assert() expression. It +** checks that the sqlite3.nTransaction variable is correctly set to +** the number of non-transaction savepoints currently in the +** linked list starting at sqlite3.pSavepoint. +** +** Usage: +** +** assert( checkSavepointCount(db) ); +*/ +static int checkSavepointCount(sqlite3 *db){ + int n = 0; + Savepoint *p; + for(p=db->pSavepoint; p; p=p->pNext) n++; + assert( n==(db->nSavepoint + db->isTransactionSavepoint) ); + return 1; +} +#endif + +/* +** Return the register of pOp->p2 after first preparing it to be +** overwritten with an integer value. +*/ +static SQLITE_NOINLINE Mem *out2PrereleaseWithClear(Mem *pOut){ + sqlite3VdbeMemSetNull(pOut); + pOut->flags = MEM_Int; + return pOut; +} +static Mem *out2Prerelease(Vdbe *p, VdbeOp *pOp){ + Mem *pOut; + assert( pOp->p2>0 ); + assert( pOp->p2<=(p->nMem+1 - p->nCursor) ); + pOut = &p->aMem[pOp->p2]; + memAboutToChange(p, pOut); + if( VdbeMemDynamic(pOut) ){ /*OPTIMIZATION-IF-FALSE*/ + return out2PrereleaseWithClear(pOut); + }else{ + pOut->flags = MEM_Int; + return pOut; + } +} + + +/* +** Execute as much of a VDBE program as we can. +** This is the core of sqlite3_step(). +*/ +SQLITE_PRIVATE int sqlite3VdbeExec( + Vdbe *p /* The VDBE */ +){ + Op *aOp = p->aOp; /* Copy of p->aOp */ + Op *pOp = aOp; /* Current operation */ +#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) + Op *pOrigOp; /* Value of pOp at the top of the loop */ +#endif +#ifdef SQLITE_DEBUG + int nExtraDelete = 0; /* Verifies FORDELETE and AUXDELETE flags */ +#endif + int rc = SQLITE_OK; /* Value to return */ + sqlite3 *db = p->db; /* The database */ + u8 resetSchemaOnFault = 0; /* Reset schema after an error if positive */ + u8 encoding = ENC(db); /* The database encoding */ + int iCompare = 0; /* Result of last comparison */ + unsigned nVmStep = 0; /* Number of virtual machine steps */ +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + unsigned nProgressLimit = 0;/* Invoke xProgress() when nVmStep reaches this */ +#endif + Mem *aMem = p->aMem; /* Copy of p->aMem */ + Mem *pIn1 = 0; /* 1st input operand */ + Mem *pIn2 = 0; /* 2nd input operand */ + Mem *pIn3 = 0; /* 3rd input operand */ + Mem *pOut = 0; /* Output operand */ +#ifdef VDBE_PROFILE + u64 start; /* CPU clock count at start of opcode */ +#endif + /*** INSERT STACK UNION HERE ***/ + + assert( p->magic==VDBE_MAGIC_RUN ); /* sqlite3_step() verifies this */ + sqlite3VdbeEnter(p); + if( p->rc==SQLITE_NOMEM ){ + /* This happens if a malloc() inside a call to sqlite3_column_text() or + ** sqlite3_column_text16() failed. */ + goto no_mem; + } + assert( p->rc==SQLITE_OK || (p->rc&0xff)==SQLITE_BUSY ); + assert( p->bIsReader || p->readOnly!=0 ); + p->iCurrentTime = 0; + assert( p->explain==0 ); + p->pResultSet = 0; + db->busyHandler.nBusy = 0; + if( db->u1.isInterrupted ) goto abort_due_to_interrupt; + sqlite3VdbeIOTraceSql(p); +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + if( db->xProgress ){ + u32 iPrior = p->aCounter[SQLITE_STMTSTATUS_VM_STEP]; + assert( 0 < db->nProgressOps ); + nProgressLimit = db->nProgressOps - (iPrior % db->nProgressOps); + } +#endif +#ifdef SQLITE_DEBUG + sqlite3BeginBenignMalloc(); + if( p->pc==0 + && (p->db->flags & (SQLITE_VdbeListing|SQLITE_VdbeEQP|SQLITE_VdbeTrace))!=0 + ){ + int i; + int once = 1; + sqlite3VdbePrintSql(p); + if( p->db->flags & SQLITE_VdbeListing ){ + printf("VDBE Program Listing:\n"); + for(i=0; inOp; i++){ + sqlite3VdbePrintOp(stdout, i, &aOp[i]); + } + } + if( p->db->flags & SQLITE_VdbeEQP ){ + for(i=0; inOp; i++){ + if( aOp[i].opcode==OP_Explain ){ + if( once ) printf("VDBE Query Plan:\n"); + printf("%s\n", aOp[i].p4.z); + once = 0; + } + } + } + if( p->db->flags & SQLITE_VdbeTrace ) printf("VDBE Trace:\n"); + } + sqlite3EndBenignMalloc(); +#endif + for(pOp=&aOp[p->pc]; 1; pOp++){ + /* Errors are detected by individual opcodes, with an immediate + ** jumps to abort_due_to_error. */ + assert( rc==SQLITE_OK ); + + assert( pOp>=aOp && pOp<&aOp[p->nOp]); +#ifdef VDBE_PROFILE + start = sqlite3Hwtime(); +#endif + nVmStep++; +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + if( p->anExec ) p->anExec[(int)(pOp-aOp)]++; +#endif + + /* Only allow tracing if SQLITE_DEBUG is defined. + */ +#ifdef SQLITE_DEBUG + if( db->flags & SQLITE_VdbeTrace ){ + sqlite3VdbePrintOp(stdout, (int)(pOp - aOp), pOp); + } +#endif + + + /* Check to see if we need to simulate an interrupt. This only happens + ** if we have a special test build. + */ +#ifdef SQLITE_TEST + if( sqlite3_interrupt_count>0 ){ + sqlite3_interrupt_count--; + if( sqlite3_interrupt_count==0 ){ + sqlite3_interrupt(db); + } + } +#endif + + /* Sanity checking on other operands */ +#ifdef SQLITE_DEBUG + { + u8 opProperty = sqlite3OpcodeProperty[pOp->opcode]; + if( (opProperty & OPFLG_IN1)!=0 ){ + assert( pOp->p1>0 ); + assert( pOp->p1<=(p->nMem+1 - p->nCursor) ); + assert( memIsValid(&aMem[pOp->p1]) ); + assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p1]) ); + REGISTER_TRACE(pOp->p1, &aMem[pOp->p1]); + } + if( (opProperty & OPFLG_IN2)!=0 ){ + assert( pOp->p2>0 ); + assert( pOp->p2<=(p->nMem+1 - p->nCursor) ); + assert( memIsValid(&aMem[pOp->p2]) ); + assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p2]) ); + REGISTER_TRACE(pOp->p2, &aMem[pOp->p2]); + } + if( (opProperty & OPFLG_IN3)!=0 ){ + assert( pOp->p3>0 ); + assert( pOp->p3<=(p->nMem+1 - p->nCursor) ); + assert( memIsValid(&aMem[pOp->p3]) ); + assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p3]) ); + REGISTER_TRACE(pOp->p3, &aMem[pOp->p3]); + } + if( (opProperty & OPFLG_OUT2)!=0 ){ + assert( pOp->p2>0 ); + assert( pOp->p2<=(p->nMem+1 - p->nCursor) ); + memAboutToChange(p, &aMem[pOp->p2]); + } + if( (opProperty & OPFLG_OUT3)!=0 ){ + assert( pOp->p3>0 ); + assert( pOp->p3<=(p->nMem+1 - p->nCursor) ); + memAboutToChange(p, &aMem[pOp->p3]); + } + } +#endif +#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) + pOrigOp = pOp; +#endif + + switch( pOp->opcode ){ + +/***************************************************************************** +** What follows is a massive switch statement where each case implements a +** separate instruction in the virtual machine. If we follow the usual +** indentation conventions, each case should be indented by 6 spaces. But +** that is a lot of wasted space on the left margin. So the code within +** the switch statement will break with convention and be flush-left. Another +** big comment (similar to this one) will mark the point in the code where +** we transition back to normal indentation. +** +** The formatting of each case is important. The makefile for SQLite +** generates two C files "opcodes.h" and "opcodes.c" by scanning this +** file looking for lines that begin with "case OP_". The opcodes.h files +** will be filled with #defines that give unique integer values to each +** opcode and the opcodes.c file is filled with an array of strings where +** each string is the symbolic name for the corresponding opcode. If the +** case statement is followed by a comment of the form "/# same as ... #/" +** that comment is used to determine the particular value of the opcode. +** +** Other keywords in the comment that follows each case are used to +** construct the OPFLG_INITIALIZER value that initializes opcodeProperty[]. +** Keywords include: in1, in2, in3, out2, out3. See +** the mkopcodeh.awk script for additional information. +** +** Documentation about VDBE opcodes is generated by scanning this file +** for lines of that contain "Opcode:". That line and all subsequent +** comment lines are used in the generation of the opcode.html documentation +** file. +** +** SUMMARY: +** +** Formatting is important to scripts that scan this file. +** Do not deviate from the formatting style currently in use. +** +*****************************************************************************/ + +/* Opcode: Goto * P2 * * * +** +** An unconditional jump to address P2. +** The next instruction executed will be +** the one at index P2 from the beginning of +** the program. +** +** The P1 parameter is not actually used by this opcode. However, it +** is sometimes set to 1 instead of 0 as a hint to the command-line shell +** that this Goto is the bottom of a loop and that the lines from P2 down +** to the current line should be indented for EXPLAIN output. +*/ +case OP_Goto: { /* jump */ +jump_to_p2_and_check_for_interrupt: + pOp = &aOp[pOp->p2 - 1]; + + /* Opcodes that are used as the bottom of a loop (OP_Next, OP_Prev, + ** OP_VNext, OP_RowSetNext, or OP_SorterNext) all jump here upon + ** completion. Check to see if sqlite3_interrupt() has been called + ** or if the progress callback needs to be invoked. + ** + ** This code uses unstructured "goto" statements and does not look clean. + ** But that is not due to sloppy coding habits. The code is written this + ** way for performance, to avoid having to run the interrupt and progress + ** checks on every opcode. This helps sqlite3_step() to run about 1.5% + ** faster according to "valgrind --tool=cachegrind" */ +check_for_interrupt: + if( db->u1.isInterrupted ) goto abort_due_to_interrupt; +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + /* Call the progress callback if it is configured and the required number + ** of VDBE ops have been executed (either since this invocation of + ** sqlite3VdbeExec() or since last time the progress callback was called). + ** If the progress callback returns non-zero, exit the virtual machine with + ** a return code SQLITE_ABORT. + */ + if( db->xProgress!=0 && nVmStep>=nProgressLimit ){ + assert( db->nProgressOps!=0 ); + nProgressLimit = nVmStep + db->nProgressOps - (nVmStep%db->nProgressOps); + if( db->xProgress(db->pProgressArg) ){ + rc = SQLITE_INTERRUPT; + goto abort_due_to_error; + } + } +#endif + + break; +} + +/* Opcode: Gosub P1 P2 * * * +** +** Write the current address onto register P1 +** and then jump to address P2. +*/ +case OP_Gosub: { /* jump */ + assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) ); + pIn1 = &aMem[pOp->p1]; + assert( VdbeMemDynamic(pIn1)==0 ); + memAboutToChange(p, pIn1); + pIn1->flags = MEM_Int; + pIn1->u.i = (int)(pOp-aOp); + REGISTER_TRACE(pOp->p1, pIn1); + + /* Most jump operations do a goto to this spot in order to update + ** the pOp pointer. */ +jump_to_p2: + pOp = &aOp[pOp->p2 - 1]; + break; +} + +/* Opcode: Return P1 * * * * +** +** Jump to the next instruction after the address in register P1. After +** the jump, register P1 becomes undefined. +*/ +case OP_Return: { /* in1 */ + pIn1 = &aMem[pOp->p1]; + assert( pIn1->flags==MEM_Int ); + pOp = &aOp[pIn1->u.i]; + pIn1->flags = MEM_Undefined; + break; +} + +/* Opcode: InitCoroutine P1 P2 P3 * * +** +** Set up register P1 so that it will Yield to the coroutine +** located at address P3. +** +** If P2!=0 then the coroutine implementation immediately follows +** this opcode. So jump over the coroutine implementation to +** address P2. +** +** See also: EndCoroutine +*/ +case OP_InitCoroutine: { /* jump */ + assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) ); + assert( pOp->p2>=0 && pOp->p2nOp ); + assert( pOp->p3>=0 && pOp->p3nOp ); + pOut = &aMem[pOp->p1]; + assert( !VdbeMemDynamic(pOut) ); + pOut->u.i = pOp->p3 - 1; + pOut->flags = MEM_Int; + if( pOp->p2 ) goto jump_to_p2; + break; +} + +/* Opcode: EndCoroutine P1 * * * * +** +** The instruction at the address in register P1 is a Yield. +** Jump to the P2 parameter of that Yield. +** After the jump, register P1 becomes undefined. +** +** See also: InitCoroutine +*/ +case OP_EndCoroutine: { /* in1 */ + VdbeOp *pCaller; + pIn1 = &aMem[pOp->p1]; + assert( pIn1->flags==MEM_Int ); + assert( pIn1->u.i>=0 && pIn1->u.inOp ); + pCaller = &aOp[pIn1->u.i]; + assert( pCaller->opcode==OP_Yield ); + assert( pCaller->p2>=0 && pCaller->p2nOp ); + pOp = &aOp[pCaller->p2 - 1]; + pIn1->flags = MEM_Undefined; + break; +} + +/* Opcode: Yield P1 P2 * * * +** +** Swap the program counter with the value in register P1. This +** has the effect of yielding to a coroutine. +** +** If the coroutine that is launched by this instruction ends with +** Yield or Return then continue to the next instruction. But if +** the coroutine launched by this instruction ends with +** EndCoroutine, then jump to P2 rather than continuing with the +** next instruction. +** +** See also: InitCoroutine +*/ +case OP_Yield: { /* in1, jump */ + int pcDest; + pIn1 = &aMem[pOp->p1]; + assert( VdbeMemDynamic(pIn1)==0 ); + pIn1->flags = MEM_Int; + pcDest = (int)pIn1->u.i; + pIn1->u.i = (int)(pOp - aOp); + REGISTER_TRACE(pOp->p1, pIn1); + pOp = &aOp[pcDest]; + break; +} + +/* Opcode: HaltIfNull P1 P2 P3 P4 P5 +** Synopsis: if r[P3]=null halt +** +** Check the value in register P3. If it is NULL then Halt using +** parameter P1, P2, and P4 as if this were a Halt instruction. If the +** value in register P3 is not NULL, then this routine is a no-op. +** The P5 parameter should be 1. +*/ +case OP_HaltIfNull: { /* in3 */ + pIn3 = &aMem[pOp->p3]; + if( (pIn3->flags & MEM_Null)==0 ) break; + /* Fall through into OP_Halt */ +} + +/* Opcode: Halt P1 P2 * P4 P5 +** +** Exit immediately. All open cursors, etc are closed +** automatically. +** +** P1 is the result code returned by sqlite3_exec(), sqlite3_reset(), +** or sqlite3_finalize(). For a normal halt, this should be SQLITE_OK (0). +** For errors, it can be some other value. If P1!=0 then P2 will determine +** whether or not to rollback the current transaction. Do not rollback +** if P2==OE_Fail. Do the rollback if P2==OE_Rollback. If P2==OE_Abort, +** then back out all changes that have occurred during this execution of the +** VDBE, but do not rollback the transaction. +** +** If P4 is not null then it is an error message string. +** +** P5 is a value between 0 and 4, inclusive, that modifies the P4 string. +** +** 0: (no change) +** 1: NOT NULL contraint failed: P4 +** 2: UNIQUE constraint failed: P4 +** 3: CHECK constraint failed: P4 +** 4: FOREIGN KEY constraint failed: P4 +** +** If P5 is not zero and P4 is NULL, then everything after the ":" is +** omitted. +** +** There is an implied "Halt 0 0 0" instruction inserted at the very end of +** every program. So a jump past the last instruction of the program +** is the same as executing Halt. +*/ +case OP_Halt: { + VdbeFrame *pFrame; + int pcx; + + pcx = (int)(pOp - aOp); + if( pOp->p1==SQLITE_OK && p->pFrame ){ + /* Halt the sub-program. Return control to the parent frame. */ + pFrame = p->pFrame; + p->pFrame = pFrame->pParent; + p->nFrame--; + sqlite3VdbeSetChanges(db, p->nChange); + pcx = sqlite3VdbeFrameRestore(pFrame); + if( pOp->p2==OE_Ignore ){ + /* Instruction pcx is the OP_Program that invoked the sub-program + ** currently being halted. If the p2 instruction of this OP_Halt + ** instruction is set to OE_Ignore, then the sub-program is throwing + ** an IGNORE exception. In this case jump to the address specified + ** as the p2 of the calling OP_Program. */ + pcx = p->aOp[pcx].p2-1; + } + aOp = p->aOp; + aMem = p->aMem; + pOp = &aOp[pcx]; + break; + } + p->rc = pOp->p1; + p->errorAction = (u8)pOp->p2; + p->pc = pcx; + assert( pOp->p5<=4 ); + if( p->rc ){ + if( pOp->p5 ){ + static const char * const azType[] = { "NOT NULL", "UNIQUE", "CHECK", + "FOREIGN KEY" }; + testcase( pOp->p5==1 ); + testcase( pOp->p5==2 ); + testcase( pOp->p5==3 ); + testcase( pOp->p5==4 ); + sqlite3VdbeError(p, "%s constraint failed", azType[pOp->p5-1]); + if( pOp->p4.z ){ + p->zErrMsg = sqlite3MPrintf(db, "%z: %s", p->zErrMsg, pOp->p4.z); + } + }else{ + sqlite3VdbeError(p, "%s", pOp->p4.z); + } + sqlite3_log(pOp->p1, "abort at %d in [%s]: %s", pcx, p->zSql, p->zErrMsg); + } + rc = sqlite3VdbeHalt(p); + assert( rc==SQLITE_BUSY || rc==SQLITE_OK || rc==SQLITE_ERROR ); + if( rc==SQLITE_BUSY ){ + p->rc = SQLITE_BUSY; + }else{ + assert( rc==SQLITE_OK || (p->rc&0xff)==SQLITE_CONSTRAINT ); + assert( rc==SQLITE_OK || db->nDeferredCons>0 || db->nDeferredImmCons>0 ); + rc = p->rc ? SQLITE_ERROR : SQLITE_DONE; + } + goto vdbe_return; +} + +/* Opcode: Integer P1 P2 * * * +** Synopsis: r[P2]=P1 +** +** The 32-bit integer value P1 is written into register P2. +*/ +case OP_Integer: { /* out2 */ + pOut = out2Prerelease(p, pOp); + pOut->u.i = pOp->p1; + break; +} + +/* Opcode: Int64 * P2 * P4 * +** Synopsis: r[P2]=P4 +** +** P4 is a pointer to a 64-bit integer value. +** Write that value into register P2. +*/ +case OP_Int64: { /* out2 */ + pOut = out2Prerelease(p, pOp); + assert( pOp->p4.pI64!=0 ); + pOut->u.i = *pOp->p4.pI64; + break; +} + +#ifndef SQLITE_OMIT_FLOATING_POINT +/* Opcode: Real * P2 * P4 * +** Synopsis: r[P2]=P4 +** +** P4 is a pointer to a 64-bit floating point value. +** Write that value into register P2. +*/ +case OP_Real: { /* same as TK_FLOAT, out2 */ + pOut = out2Prerelease(p, pOp); + pOut->flags = MEM_Real; + assert( !sqlite3IsNaN(*pOp->p4.pReal) ); + pOut->u.r = *pOp->p4.pReal; + break; +} +#endif + +/* Opcode: String8 * P2 * P4 * +** Synopsis: r[P2]='P4' +** +** P4 points to a nul terminated UTF-8 string. This opcode is transformed +** into a String opcode before it is executed for the first time. During +** this transformation, the length of string P4 is computed and stored +** as the P1 parameter. +*/ +case OP_String8: { /* same as TK_STRING, out2 */ + assert( pOp->p4.z!=0 ); + pOut = out2Prerelease(p, pOp); + pOp->opcode = OP_String; + pOp->p1 = sqlite3Strlen30(pOp->p4.z); + +#ifndef SQLITE_OMIT_UTF16 + if( encoding!=SQLITE_UTF8 ){ + rc = sqlite3VdbeMemSetStr(pOut, pOp->p4.z, -1, SQLITE_UTF8, SQLITE_STATIC); + assert( rc==SQLITE_OK || rc==SQLITE_TOOBIG ); + if( SQLITE_OK!=sqlite3VdbeChangeEncoding(pOut, encoding) ) goto no_mem; + assert( pOut->szMalloc>0 && pOut->zMalloc==pOut->z ); + assert( VdbeMemDynamic(pOut)==0 ); + pOut->szMalloc = 0; + pOut->flags |= MEM_Static; + if( pOp->p4type==P4_DYNAMIC ){ + sqlite3DbFree(db, pOp->p4.z); + } + pOp->p4type = P4_DYNAMIC; + pOp->p4.z = pOut->z; + pOp->p1 = pOut->n; + } + testcase( rc==SQLITE_TOOBIG ); +#endif + if( pOp->p1>db->aLimit[SQLITE_LIMIT_LENGTH] ){ + goto too_big; + } + assert( rc==SQLITE_OK ); + /* Fall through to the next case, OP_String */ +} + +/* Opcode: String P1 P2 P3 P4 P5 +** Synopsis: r[P2]='P4' (len=P1) +** +** The string value P4 of length P1 (bytes) is stored in register P2. +** +** If P3 is not zero and the content of register P3 is equal to P5, then +** the datatype of the register P2 is converted to BLOB. The content is +** the same sequence of bytes, it is merely interpreted as a BLOB instead +** of a string, as if it had been CAST. In other words: +** +** if( P3!=0 and reg[P3]==P5 ) reg[P2] := CAST(reg[P2] as BLOB) +*/ +case OP_String: { /* out2 */ + assert( pOp->p4.z!=0 ); + pOut = out2Prerelease(p, pOp); + pOut->flags = MEM_Str|MEM_Static|MEM_Term; + pOut->z = pOp->p4.z; + pOut->n = pOp->p1; + pOut->enc = encoding; + UPDATE_MAX_BLOBSIZE(pOut); +#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS + if( pOp->p3>0 ){ + assert( pOp->p3<=(p->nMem+1 - p->nCursor) ); + pIn3 = &aMem[pOp->p3]; + assert( pIn3->flags & MEM_Int ); + if( pIn3->u.i==pOp->p5 ) pOut->flags = MEM_Blob|MEM_Static|MEM_Term; + } +#endif + break; +} + +/* Opcode: Null P1 P2 P3 * * +** Synopsis: r[P2..P3]=NULL +** +** Write a NULL into registers P2. If P3 greater than P2, then also write +** NULL into register P3 and every register in between P2 and P3. If P3 +** is less than P2 (typically P3 is zero) then only register P2 is +** set to NULL. +** +** If the P1 value is non-zero, then also set the MEM_Cleared flag so that +** NULL values will not compare equal even if SQLITE_NULLEQ is set on +** OP_Ne or OP_Eq. +*/ +case OP_Null: { /* out2 */ + int cnt; + u16 nullFlag; + pOut = out2Prerelease(p, pOp); + cnt = pOp->p3-pOp->p2; + assert( pOp->p3<=(p->nMem+1 - p->nCursor) ); + pOut->flags = nullFlag = pOp->p1 ? (MEM_Null|MEM_Cleared) : MEM_Null; + pOut->n = 0; + while( cnt>0 ){ + pOut++; + memAboutToChange(p, pOut); + sqlite3VdbeMemSetNull(pOut); + pOut->flags = nullFlag; + pOut->n = 0; + cnt--; + } + break; +} + +/* Opcode: SoftNull P1 * * * * +** Synopsis: r[P1]=NULL +** +** Set register P1 to have the value NULL as seen by the OP_MakeRecord +** instruction, but do not free any string or blob memory associated with +** the register, so that if the value was a string or blob that was +** previously copied using OP_SCopy, the copies will continue to be valid. +*/ +case OP_SoftNull: { + assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) ); + pOut = &aMem[pOp->p1]; + pOut->flags = (pOut->flags|MEM_Null)&~MEM_Undefined; + break; +} + +/* Opcode: Blob P1 P2 * P4 * +** Synopsis: r[P2]=P4 (len=P1) +** +** P4 points to a blob of data P1 bytes long. Store this +** blob in register P2. +*/ +case OP_Blob: { /* out2 */ + assert( pOp->p1 <= SQLITE_MAX_LENGTH ); + pOut = out2Prerelease(p, pOp); + sqlite3VdbeMemSetStr(pOut, pOp->p4.z, pOp->p1, 0, 0); + pOut->enc = encoding; + UPDATE_MAX_BLOBSIZE(pOut); + break; +} + +/* Opcode: Variable P1 P2 * P4 * +** Synopsis: r[P2]=parameter(P1,P4) +** +** Transfer the values of bound parameter P1 into register P2 +** +** If the parameter is named, then its name appears in P4. +** The P4 value is used by sqlite3_bind_parameter_name(). +*/ +case OP_Variable: { /* out2 */ + Mem *pVar; /* Value being transferred */ + + assert( pOp->p1>0 && pOp->p1<=p->nVar ); + assert( pOp->p4.z==0 || pOp->p4.z==sqlite3VListNumToName(p->pVList,pOp->p1) ); + pVar = &p->aVar[pOp->p1 - 1]; + if( sqlite3VdbeMemTooBig(pVar) ){ + goto too_big; + } + pOut = &aMem[pOp->p2]; + sqlite3VdbeMemShallowCopy(pOut, pVar, MEM_Static); + UPDATE_MAX_BLOBSIZE(pOut); + break; +} + +/* Opcode: Move P1 P2 P3 * * +** Synopsis: r[P2@P3]=r[P1@P3] +** +** Move the P3 values in register P1..P1+P3-1 over into +** registers P2..P2+P3-1. Registers P1..P1+P3-1 are +** left holding a NULL. It is an error for register ranges +** P1..P1+P3-1 and P2..P2+P3-1 to overlap. It is an error +** for P3 to be less than 1. +*/ +case OP_Move: { + int n; /* Number of registers left to copy */ + int p1; /* Register to copy from */ + int p2; /* Register to copy to */ + + n = pOp->p3; + p1 = pOp->p1; + p2 = pOp->p2; + assert( n>0 && p1>0 && p2>0 ); + assert( p1+n<=p2 || p2+n<=p1 ); + + pIn1 = &aMem[p1]; + pOut = &aMem[p2]; + do{ + assert( pOut<=&aMem[(p->nMem+1 - p->nCursor)] ); + assert( pIn1<=&aMem[(p->nMem+1 - p->nCursor)] ); + assert( memIsValid(pIn1) ); + memAboutToChange(p, pOut); + sqlite3VdbeMemMove(pOut, pIn1); +#ifdef SQLITE_DEBUG + if( pOut->pScopyFrom>=&aMem[p1] && pOut->pScopyFrompScopyFrom += pOp->p2 - p1; + } +#endif + Deephemeralize(pOut); + REGISTER_TRACE(p2++, pOut); + pIn1++; + pOut++; + }while( --n ); + break; +} + +/* Opcode: Copy P1 P2 P3 * * +** Synopsis: r[P2@P3+1]=r[P1@P3+1] +** +** Make a copy of registers P1..P1+P3 into registers P2..P2+P3. +** +** This instruction makes a deep copy of the value. A duplicate +** is made of any string or blob constant. See also OP_SCopy. +*/ +case OP_Copy: { + int n; + + n = pOp->p3; + pIn1 = &aMem[pOp->p1]; + pOut = &aMem[pOp->p2]; + assert( pOut!=pIn1 ); + while( 1 ){ + sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem); + Deephemeralize(pOut); +#ifdef SQLITE_DEBUG + pOut->pScopyFrom = 0; +#endif + REGISTER_TRACE(pOp->p2+pOp->p3-n, pOut); + if( (n--)==0 ) break; + pOut++; + pIn1++; + } + break; +} + +/* Opcode: SCopy P1 P2 * * * +** Synopsis: r[P2]=r[P1] +** +** Make a shallow copy of register P1 into register P2. +** +** This instruction makes a shallow copy of the value. If the value +** is a string or blob, then the copy is only a pointer to the +** original and hence if the original changes so will the copy. +** Worse, if the original is deallocated, the copy becomes invalid. +** Thus the program must guarantee that the original will not change +** during the lifetime of the copy. Use OP_Copy to make a complete +** copy. +*/ +case OP_SCopy: { /* out2 */ + pIn1 = &aMem[pOp->p1]; + pOut = &aMem[pOp->p2]; + assert( pOut!=pIn1 ); + sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem); +#ifdef SQLITE_DEBUG + if( pOut->pScopyFrom==0 ) pOut->pScopyFrom = pIn1; +#endif + break; +} + +/* Opcode: IntCopy P1 P2 * * * +** Synopsis: r[P2]=r[P1] +** +** Transfer the integer value held in register P1 into register P2. +** +** This is an optimized version of SCopy that works only for integer +** values. +*/ +case OP_IntCopy: { /* out2 */ + pIn1 = &aMem[pOp->p1]; + assert( (pIn1->flags & MEM_Int)!=0 ); + pOut = &aMem[pOp->p2]; + sqlite3VdbeMemSetInt64(pOut, pIn1->u.i); + break; +} + +/* Opcode: ResultRow P1 P2 * * * +** Synopsis: output=r[P1@P2] +** +** The registers P1 through P1+P2-1 contain a single row of +** results. This opcode causes the sqlite3_step() call to terminate +** with an SQLITE_ROW return code and it sets up the sqlite3_stmt +** structure to provide access to the r(P1)..r(P1+P2-1) values as +** the result row. +*/ +case OP_ResultRow: { + Mem *pMem; + int i; + assert( p->nResColumn==pOp->p2 ); + assert( pOp->p1>0 ); + assert( pOp->p1+pOp->p2<=(p->nMem+1 - p->nCursor)+1 ); + +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + /* Run the progress counter just before returning. + */ + if( db->xProgress!=0 + && nVmStep>=nProgressLimit + && db->xProgress(db->pProgressArg)!=0 + ){ + rc = SQLITE_INTERRUPT; + goto abort_due_to_error; + } +#endif + + /* If this statement has violated immediate foreign key constraints, do + ** not return the number of rows modified. And do not RELEASE the statement + ** transaction. It needs to be rolled back. */ + if( SQLITE_OK!=(rc = sqlite3VdbeCheckFk(p, 0)) ){ + assert( db->flags&SQLITE_CountRows ); + assert( p->usesStmtJournal ); + goto abort_due_to_error; + } + + /* If the SQLITE_CountRows flag is set in sqlite3.flags mask, then + ** DML statements invoke this opcode to return the number of rows + ** modified to the user. This is the only way that a VM that + ** opens a statement transaction may invoke this opcode. + ** + ** In case this is such a statement, close any statement transaction + ** opened by this VM before returning control to the user. This is to + ** ensure that statement-transactions are always nested, not overlapping. + ** If the open statement-transaction is not closed here, then the user + ** may step another VM that opens its own statement transaction. This + ** may lead to overlapping statement transactions. + ** + ** The statement transaction is never a top-level transaction. Hence + ** the RELEASE call below can never fail. + */ + assert( p->iStatement==0 || db->flags&SQLITE_CountRows ); + rc = sqlite3VdbeCloseStatement(p, SAVEPOINT_RELEASE); + assert( rc==SQLITE_OK ); + + /* Invalidate all ephemeral cursor row caches */ + p->cacheCtr = (p->cacheCtr + 2)|1; + + /* Make sure the results of the current row are \000 terminated + ** and have an assigned type. The results are de-ephemeralized as + ** a side effect. + */ + pMem = p->pResultSet = &aMem[pOp->p1]; + for(i=0; ip2; i++){ + assert( memIsValid(&pMem[i]) ); + Deephemeralize(&pMem[i]); + assert( (pMem[i].flags & MEM_Ephem)==0 + || (pMem[i].flags & (MEM_Str|MEM_Blob))==0 ); + sqlite3VdbeMemNulTerminate(&pMem[i]); + REGISTER_TRACE(pOp->p1+i, &pMem[i]); + } + if( db->mallocFailed ) goto no_mem; + + if( db->mTrace & SQLITE_TRACE_ROW ){ + db->xTrace(SQLITE_TRACE_ROW, db->pTraceArg, p, 0); + } + + /* Return SQLITE_ROW + */ + p->pc = (int)(pOp - aOp) + 1; + rc = SQLITE_ROW; + goto vdbe_return; +} + +/* Opcode: Concat P1 P2 P3 * * +** Synopsis: r[P3]=r[P2]+r[P1] +** +** Add the text in register P1 onto the end of the text in +** register P2 and store the result in register P3. +** If either the P1 or P2 text are NULL then store NULL in P3. +** +** P3 = P2 || P1 +** +** It is illegal for P1 and P3 to be the same register. Sometimes, +** if P3 is the same register as P2, the implementation is able +** to avoid a memcpy(). +*/ +case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */ + i64 nByte; + + pIn1 = &aMem[pOp->p1]; + pIn2 = &aMem[pOp->p2]; + pOut = &aMem[pOp->p3]; + assert( pIn1!=pOut ); + if( (pIn1->flags | pIn2->flags) & MEM_Null ){ + sqlite3VdbeMemSetNull(pOut); + break; + } + if( ExpandBlob(pIn1) || ExpandBlob(pIn2) ) goto no_mem; + Stringify(pIn1, encoding); + Stringify(pIn2, encoding); + nByte = pIn1->n + pIn2->n; + if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){ + goto too_big; + } + if( sqlite3VdbeMemGrow(pOut, (int)nByte+2, pOut==pIn2) ){ + goto no_mem; + } + MemSetTypeFlag(pOut, MEM_Str); + if( pOut!=pIn2 ){ + memcpy(pOut->z, pIn2->z, pIn2->n); + } + memcpy(&pOut->z[pIn2->n], pIn1->z, pIn1->n); + pOut->z[nByte]=0; + pOut->z[nByte+1] = 0; + pOut->flags |= MEM_Term; + pOut->n = (int)nByte; + pOut->enc = encoding; + UPDATE_MAX_BLOBSIZE(pOut); + break; +} + +/* Opcode: Add P1 P2 P3 * * +** Synopsis: r[P3]=r[P1]+r[P2] +** +** Add the value in register P1 to the value in register P2 +** and store the result in register P3. +** If either input is NULL, the result is NULL. +*/ +/* Opcode: Multiply P1 P2 P3 * * +** Synopsis: r[P3]=r[P1]*r[P2] +** +** +** Multiply the value in register P1 by the value in register P2 +** and store the result in register P3. +** If either input is NULL, the result is NULL. +*/ +/* Opcode: Subtract P1 P2 P3 * * +** Synopsis: r[P3]=r[P2]-r[P1] +** +** Subtract the value in register P1 from the value in register P2 +** and store the result in register P3. +** If either input is NULL, the result is NULL. +*/ +/* Opcode: Divide P1 P2 P3 * * +** Synopsis: r[P3]=r[P2]/r[P1] +** +** Divide the value in register P1 by the value in register P2 +** and store the result in register P3 (P3=P2/P1). If the value in +** register P1 is zero, then the result is NULL. If either input is +** NULL, the result is NULL. +*/ +/* Opcode: Remainder P1 P2 P3 * * +** Synopsis: r[P3]=r[P2]%r[P1] +** +** Compute the remainder after integer register P2 is divided by +** register P1 and store the result in register P3. +** If the value in register P1 is zero the result is NULL. +** If either operand is NULL, the result is NULL. +*/ +case OP_Add: /* same as TK_PLUS, in1, in2, out3 */ +case OP_Subtract: /* same as TK_MINUS, in1, in2, out3 */ +case OP_Multiply: /* same as TK_STAR, in1, in2, out3 */ +case OP_Divide: /* same as TK_SLASH, in1, in2, out3 */ +case OP_Remainder: { /* same as TK_REM, in1, in2, out3 */ + char bIntint; /* Started out as two integer operands */ + u16 flags; /* Combined MEM_* flags from both inputs */ + u16 type1; /* Numeric type of left operand */ + u16 type2; /* Numeric type of right operand */ + i64 iA; /* Integer value of left operand */ + i64 iB; /* Integer value of right operand */ + double rA; /* Real value of left operand */ + double rB; /* Real value of right operand */ + + pIn1 = &aMem[pOp->p1]; + type1 = numericType(pIn1); + pIn2 = &aMem[pOp->p2]; + type2 = numericType(pIn2); + pOut = &aMem[pOp->p3]; + flags = pIn1->flags | pIn2->flags; + if( (flags & MEM_Null)!=0 ) goto arithmetic_result_is_null; + if( (type1 & type2 & MEM_Int)!=0 ){ + iA = pIn1->u.i; + iB = pIn2->u.i; + bIntint = 1; + switch( pOp->opcode ){ + case OP_Add: if( sqlite3AddInt64(&iB,iA) ) goto fp_math; break; + case OP_Subtract: if( sqlite3SubInt64(&iB,iA) ) goto fp_math; break; + case OP_Multiply: if( sqlite3MulInt64(&iB,iA) ) goto fp_math; break; + case OP_Divide: { + if( iA==0 ) goto arithmetic_result_is_null; + if( iA==-1 && iB==SMALLEST_INT64 ) goto fp_math; + iB /= iA; + break; + } + default: { + if( iA==0 ) goto arithmetic_result_is_null; + if( iA==-1 ) iA = 1; + iB %= iA; + break; + } + } + pOut->u.i = iB; + MemSetTypeFlag(pOut, MEM_Int); + }else{ + bIntint = 0; +fp_math: + rA = sqlite3VdbeRealValue(pIn1); + rB = sqlite3VdbeRealValue(pIn2); + switch( pOp->opcode ){ + case OP_Add: rB += rA; break; + case OP_Subtract: rB -= rA; break; + case OP_Multiply: rB *= rA; break; + case OP_Divide: { + /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ + if( rA==(double)0 ) goto arithmetic_result_is_null; + rB /= rA; + break; + } + default: { + iA = (i64)rA; + iB = (i64)rB; + if( iA==0 ) goto arithmetic_result_is_null; + if( iA==-1 ) iA = 1; + rB = (double)(iB % iA); + break; + } + } +#ifdef SQLITE_OMIT_FLOATING_POINT + pOut->u.i = rB; + MemSetTypeFlag(pOut, MEM_Int); +#else + if( sqlite3IsNaN(rB) ){ + goto arithmetic_result_is_null; + } + pOut->u.r = rB; + MemSetTypeFlag(pOut, MEM_Real); + if( ((type1|type2)&MEM_Real)==0 && !bIntint ){ + sqlite3VdbeIntegerAffinity(pOut); + } +#endif + } + break; + +arithmetic_result_is_null: + sqlite3VdbeMemSetNull(pOut); + break; +} + +/* Opcode: CollSeq P1 * * P4 +** +** P4 is a pointer to a CollSeq struct. If the next call to a user function +** or aggregate calls sqlite3GetFuncCollSeq(), this collation sequence will +** be returned. This is used by the built-in min(), max() and nullif() +** functions. +** +** If P1 is not zero, then it is a register that a subsequent min() or +** max() aggregate will set to 1 if the current row is not the minimum or +** maximum. The P1 register is initialized to 0 by this instruction. +** +** The interface used by the implementation of the aforementioned functions +** to retrieve the collation sequence set by this opcode is not available +** publicly. Only built-in functions have access to this feature. +*/ +case OP_CollSeq: { + assert( pOp->p4type==P4_COLLSEQ ); + if( pOp->p1 ){ + sqlite3VdbeMemSetInt64(&aMem[pOp->p1], 0); + } + break; +} + +/* Opcode: Function0 P1 P2 P3 P4 P5 +** Synopsis: r[P3]=func(r[P2@P5]) +** +** Invoke a user function (P4 is a pointer to a FuncDef object that +** defines the function) with P5 arguments taken from register P2 and +** successors. The result of the function is stored in register P3. +** Register P3 must not be one of the function inputs. +** +** P1 is a 32-bit bitmask indicating whether or not each argument to the +** function was determined to be constant at compile time. If the first +** argument was constant then bit 0 of P1 is set. This is used to determine +** whether meta data associated with a user function argument using the +** sqlite3_set_auxdata() API may be safely retained until the next +** invocation of this opcode. +** +** See also: Function, AggStep, AggFinal +*/ +/* Opcode: Function P1 P2 P3 P4 P5 +** Synopsis: r[P3]=func(r[P2@P5]) +** +** Invoke a user function (P4 is a pointer to an sqlite3_context object that +** contains a pointer to the function to be run) with P5 arguments taken +** from register P2 and successors. The result of the function is stored +** in register P3. Register P3 must not be one of the function inputs. +** +** P1 is a 32-bit bitmask indicating whether or not each argument to the +** function was determined to be constant at compile time. If the first +** argument was constant then bit 0 of P1 is set. This is used to determine +** whether meta data associated with a user function argument using the +** sqlite3_set_auxdata() API may be safely retained until the next +** invocation of this opcode. +** +** SQL functions are initially coded as OP_Function0 with P4 pointing +** to a FuncDef object. But on first evaluation, the P4 operand is +** automatically converted into an sqlite3_context object and the operation +** changed to this OP_Function opcode. In this way, the initialization of +** the sqlite3_context object occurs only once, rather than once for each +** evaluation of the function. +** +** See also: Function0, AggStep, AggFinal +*/ +case OP_Function0: { + int n; + sqlite3_context *pCtx; + + assert( pOp->p4type==P4_FUNCDEF ); + n = pOp->p5; + assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); + assert( n==0 || (pOp->p2>0 && pOp->p2+n<=(p->nMem+1 - p->nCursor)+1) ); + assert( pOp->p3p2 || pOp->p3>=pOp->p2+n ); + pCtx = sqlite3DbMallocRawNN(db, sizeof(*pCtx) + (n-1)*sizeof(sqlite3_value*)); + if( pCtx==0 ) goto no_mem; + pCtx->pOut = 0; + pCtx->pFunc = pOp->p4.pFunc; + pCtx->iOp = (int)(pOp - aOp); + pCtx->pVdbe = p; + pCtx->argc = n; + pOp->p4type = P4_FUNCCTX; + pOp->p4.pCtx = pCtx; + pOp->opcode = OP_Function; + /* Fall through into OP_Function */ +} +case OP_Function: { + int i; + sqlite3_context *pCtx; + + assert( pOp->p4type==P4_FUNCCTX ); + pCtx = pOp->p4.pCtx; + + /* If this function is inside of a trigger, the register array in aMem[] + ** might change from one evaluation to the next. The next block of code + ** checks to see if the register array has changed, and if so it + ** reinitializes the relavant parts of the sqlite3_context object */ + pOut = &aMem[pOp->p3]; + if( pCtx->pOut != pOut ){ + pCtx->pOut = pOut; + for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i]; + } + + memAboutToChange(p, pCtx->pOut); +#ifdef SQLITE_DEBUG + for(i=0; iargc; i++){ + assert( memIsValid(pCtx->argv[i]) ); + REGISTER_TRACE(pOp->p2+i, pCtx->argv[i]); + } +#endif + MemSetTypeFlag(pCtx->pOut, MEM_Null); + pCtx->fErrorOrAux = 0; + (*pCtx->pFunc->xSFunc)(pCtx, pCtx->argc, pCtx->argv);/* IMP: R-24505-23230 */ + + /* If the function returned an error, throw an exception */ + if( pCtx->fErrorOrAux ){ + if( pCtx->isError ){ + sqlite3VdbeError(p, "%s", sqlite3_value_text(pCtx->pOut)); + rc = pCtx->isError; + } + sqlite3VdbeDeleteAuxData(db, &p->pAuxData, pCtx->iOp, pOp->p1); + if( rc ) goto abort_due_to_error; + } + + /* Copy the result of the function into register P3 */ + if( pOut->flags & (MEM_Str|MEM_Blob) ){ + sqlite3VdbeChangeEncoding(pCtx->pOut, encoding); + if( sqlite3VdbeMemTooBig(pCtx->pOut) ) goto too_big; + } + + REGISTER_TRACE(pOp->p3, pCtx->pOut); + UPDATE_MAX_BLOBSIZE(pCtx->pOut); + break; +} + +/* Opcode: BitAnd P1 P2 P3 * * +** Synopsis: r[P3]=r[P1]&r[P2] +** +** Take the bit-wise AND of the values in register P1 and P2 and +** store the result in register P3. +** If either input is NULL, the result is NULL. +*/ +/* Opcode: BitOr P1 P2 P3 * * +** Synopsis: r[P3]=r[P1]|r[P2] +** +** Take the bit-wise OR of the values in register P1 and P2 and +** store the result in register P3. +** If either input is NULL, the result is NULL. +*/ +/* Opcode: ShiftLeft P1 P2 P3 * * +** Synopsis: r[P3]=r[P2]<>r[P1] +** +** Shift the integer value in register P2 to the right by the +** number of bits specified by the integer in register P1. +** Store the result in register P3. +** If either input is NULL, the result is NULL. +*/ +case OP_BitAnd: /* same as TK_BITAND, in1, in2, out3 */ +case OP_BitOr: /* same as TK_BITOR, in1, in2, out3 */ +case OP_ShiftLeft: /* same as TK_LSHIFT, in1, in2, out3 */ +case OP_ShiftRight: { /* same as TK_RSHIFT, in1, in2, out3 */ + i64 iA; + u64 uA; + i64 iB; + u8 op; + + pIn1 = &aMem[pOp->p1]; + pIn2 = &aMem[pOp->p2]; + pOut = &aMem[pOp->p3]; + if( (pIn1->flags | pIn2->flags) & MEM_Null ){ + sqlite3VdbeMemSetNull(pOut); + break; + } + iA = sqlite3VdbeIntValue(pIn2); + iB = sqlite3VdbeIntValue(pIn1); + op = pOp->opcode; + if( op==OP_BitAnd ){ + iA &= iB; + }else if( op==OP_BitOr ){ + iA |= iB; + }else if( iB!=0 ){ + assert( op==OP_ShiftRight || op==OP_ShiftLeft ); + + /* If shifting by a negative amount, shift in the other direction */ + if( iB<0 ){ + assert( OP_ShiftRight==OP_ShiftLeft+1 ); + op = 2*OP_ShiftLeft + 1 - op; + iB = iB>(-64) ? -iB : 64; + } + + if( iB>=64 ){ + iA = (iA>=0 || op==OP_ShiftLeft) ? 0 : -1; + }else{ + memcpy(&uA, &iA, sizeof(uA)); + if( op==OP_ShiftLeft ){ + uA <<= iB; + }else{ + uA >>= iB; + /* Sign-extend on a right shift of a negative number */ + if( iA<0 ) uA |= ((((u64)0xffffffff)<<32)|0xffffffff) << (64-iB); + } + memcpy(&iA, &uA, sizeof(iA)); + } + } + pOut->u.i = iA; + MemSetTypeFlag(pOut, MEM_Int); + break; +} + +/* Opcode: AddImm P1 P2 * * * +** Synopsis: r[P1]=r[P1]+P2 +** +** Add the constant P2 to the value in register P1. +** The result is always an integer. +** +** To force any register to be an integer, just add 0. +*/ +case OP_AddImm: { /* in1 */ + pIn1 = &aMem[pOp->p1]; + memAboutToChange(p, pIn1); + sqlite3VdbeMemIntegerify(pIn1); + pIn1->u.i += pOp->p2; + break; +} + +/* Opcode: MustBeInt P1 P2 * * * +** +** Force the value in register P1 to be an integer. If the value +** in P1 is not an integer and cannot be converted into an integer +** without data loss, then jump immediately to P2, or if P2==0 +** raise an SQLITE_MISMATCH exception. +*/ +case OP_MustBeInt: { /* jump, in1 */ + pIn1 = &aMem[pOp->p1]; + if( (pIn1->flags & MEM_Int)==0 ){ + applyAffinity(pIn1, SQLITE_AFF_NUMERIC, encoding); + VdbeBranchTaken((pIn1->flags&MEM_Int)==0, 2); + if( (pIn1->flags & MEM_Int)==0 ){ + if( pOp->p2==0 ){ + rc = SQLITE_MISMATCH; + goto abort_due_to_error; + }else{ + goto jump_to_p2; + } + } + } + MemSetTypeFlag(pIn1, MEM_Int); + break; +} + +#ifndef SQLITE_OMIT_FLOATING_POINT +/* Opcode: RealAffinity P1 * * * * +** +** If register P1 holds an integer convert it to a real value. +** +** This opcode is used when extracting information from a column that +** has REAL affinity. Such column values may still be stored as +** integers, for space efficiency, but after extraction we want them +** to have only a real value. +*/ +case OP_RealAffinity: { /* in1 */ + pIn1 = &aMem[pOp->p1]; + if( pIn1->flags & MEM_Int ){ + sqlite3VdbeMemRealify(pIn1); + } + break; +} +#endif + +#ifndef SQLITE_OMIT_CAST +/* Opcode: Cast P1 P2 * * * +** Synopsis: affinity(r[P1]) +** +** Force the value in register P1 to be the type defined by P2. +** +**
      +**
    • TEXT +**
    • BLOB +**
    • NUMERIC +**
    • INTEGER +**
    • REAL +**
    +** +** A NULL value is not changed by this routine. It remains NULL. +*/ +case OP_Cast: { /* in1 */ + assert( pOp->p2>=SQLITE_AFF_BLOB && pOp->p2<=SQLITE_AFF_REAL ); + testcase( pOp->p2==SQLITE_AFF_TEXT ); + testcase( pOp->p2==SQLITE_AFF_BLOB ); + testcase( pOp->p2==SQLITE_AFF_NUMERIC ); + testcase( pOp->p2==SQLITE_AFF_INTEGER ); + testcase( pOp->p2==SQLITE_AFF_REAL ); + pIn1 = &aMem[pOp->p1]; + memAboutToChange(p, pIn1); + rc = ExpandBlob(pIn1); + sqlite3VdbeMemCast(pIn1, pOp->p2, encoding); + UPDATE_MAX_BLOBSIZE(pIn1); + if( rc ) goto abort_due_to_error; + break; +} +#endif /* SQLITE_OMIT_CAST */ + +/* Opcode: Eq P1 P2 P3 P4 P5 +** Synopsis: IF r[P3]==r[P1] +** +** Compare the values in register P1 and P3. If reg(P3)==reg(P1) then +** jump to address P2. Or if the SQLITE_STOREP2 flag is set in P5, then +** store the result of comparison in register P2. +** +** The SQLITE_AFF_MASK portion of P5 must be an affinity character - +** SQLITE_AFF_TEXT, SQLITE_AFF_INTEGER, and so forth. An attempt is made +** to coerce both inputs according to this affinity before the +** comparison is made. If the SQLITE_AFF_MASK is 0x00, then numeric +** affinity is used. Note that the affinity conversions are stored +** back into the input registers P1 and P3. So this opcode can cause +** persistent changes to registers P1 and P3. +** +** Once any conversions have taken place, and neither value is NULL, +** the values are compared. If both values are blobs then memcmp() is +** used to determine the results of the comparison. If both values +** are text, then the appropriate collating function specified in +** P4 is used to do the comparison. If P4 is not specified then +** memcmp() is used to compare text string. If both values are +** numeric, then a numeric comparison is used. If the two values +** are of different types, then numbers are considered less than +** strings and strings are considered less than blobs. +** +** If SQLITE_NULLEQ is set in P5 then the result of comparison is always either +** true or false and is never NULL. If both operands are NULL then the result +** of comparison is true. If either operand is NULL then the result is false. +** If neither operand is NULL the result is the same as it would be if +** the SQLITE_NULLEQ flag were omitted from P5. +** +** If both SQLITE_STOREP2 and SQLITE_KEEPNULL flags are set then the +** content of r[P2] is only changed if the new value is NULL or 0 (false). +** In other words, a prior r[P2] value will not be overwritten by 1 (true). +*/ +/* Opcode: Ne P1 P2 P3 P4 P5 +** Synopsis: IF r[P3]!=r[P1] +** +** This works just like the Eq opcode except that the jump is taken if +** the operands in registers P1 and P3 are not equal. See the Eq opcode for +** additional information. +** +** If both SQLITE_STOREP2 and SQLITE_KEEPNULL flags are set then the +** content of r[P2] is only changed if the new value is NULL or 1 (true). +** In other words, a prior r[P2] value will not be overwritten by 0 (false). +*/ +/* Opcode: Lt P1 P2 P3 P4 P5 +** Synopsis: IF r[P3]r[P1] +** +** This works just like the Lt opcode except that the jump is taken if +** the content of register P3 is greater than the content of +** register P1. See the Lt opcode for additional information. +*/ +/* Opcode: Ge P1 P2 P3 P4 P5 +** Synopsis: IF r[P3]>=r[P1] +** +** This works just like the Lt opcode except that the jump is taken if +** the content of register P3 is greater than or equal to the content of +** register P1. See the Lt opcode for additional information. +*/ +case OP_Eq: /* same as TK_EQ, jump, in1, in3 */ +case OP_Ne: /* same as TK_NE, jump, in1, in3 */ +case OP_Lt: /* same as TK_LT, jump, in1, in3 */ +case OP_Le: /* same as TK_LE, jump, in1, in3 */ +case OP_Gt: /* same as TK_GT, jump, in1, in3 */ +case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ + int res, res2; /* Result of the comparison of pIn1 against pIn3 */ + char affinity; /* Affinity to use for comparison */ + u16 flags1; /* Copy of initial value of pIn1->flags */ + u16 flags3; /* Copy of initial value of pIn3->flags */ + + pIn1 = &aMem[pOp->p1]; + pIn3 = &aMem[pOp->p3]; + flags1 = pIn1->flags; + flags3 = pIn3->flags; + if( (flags1 | flags3)&MEM_Null ){ + /* One or both operands are NULL */ + if( pOp->p5 & SQLITE_NULLEQ ){ + /* If SQLITE_NULLEQ is set (which will only happen if the operator is + ** OP_Eq or OP_Ne) then take the jump or not depending on whether + ** or not both operands are null. + */ + assert( pOp->opcode==OP_Eq || pOp->opcode==OP_Ne ); + assert( (flags1 & MEM_Cleared)==0 ); + assert( (pOp->p5 & SQLITE_JUMPIFNULL)==0 ); + if( (flags1&flags3&MEM_Null)!=0 + && (flags3&MEM_Cleared)==0 + ){ + res = 0; /* Operands are equal */ + }else{ + res = 1; /* Operands are not equal */ + } + }else{ + /* SQLITE_NULLEQ is clear and at least one operand is NULL, + ** then the result is always NULL. + ** The jump is taken if the SQLITE_JUMPIFNULL bit is set. + */ + if( pOp->p5 & SQLITE_STOREP2 ){ + pOut = &aMem[pOp->p2]; + iCompare = 1; /* Operands are not equal */ + memAboutToChange(p, pOut); + MemSetTypeFlag(pOut, MEM_Null); + REGISTER_TRACE(pOp->p2, pOut); + }else{ + VdbeBranchTaken(2,3); + if( pOp->p5 & SQLITE_JUMPIFNULL ){ + goto jump_to_p2; + } + } + break; + } + }else{ + /* Neither operand is NULL. Do a comparison. */ + affinity = pOp->p5 & SQLITE_AFF_MASK; + if( affinity>=SQLITE_AFF_NUMERIC ){ + if( (flags1 | flags3)&MEM_Str ){ + if( (flags1 & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){ + applyNumericAffinity(pIn1,0); + testcase( flags3!=pIn3->flags ); /* Possible if pIn1==pIn3 */ + flags3 = pIn3->flags; + } + if( (flags3 & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){ + applyNumericAffinity(pIn3,0); + } + } + /* Handle the common case of integer comparison here, as an + ** optimization, to avoid a call to sqlite3MemCompare() */ + if( (pIn1->flags & pIn3->flags & MEM_Int)!=0 ){ + if( pIn3->u.i > pIn1->u.i ){ res = +1; goto compare_op; } + if( pIn3->u.i < pIn1->u.i ){ res = -1; goto compare_op; } + res = 0; + goto compare_op; + } + }else if( affinity==SQLITE_AFF_TEXT ){ + if( (flags1 & MEM_Str)==0 && (flags1 & (MEM_Int|MEM_Real))!=0 ){ + testcase( pIn1->flags & MEM_Int ); + testcase( pIn1->flags & MEM_Real ); + sqlite3VdbeMemStringify(pIn1, encoding, 1); + testcase( (flags1&MEM_Dyn) != (pIn1->flags&MEM_Dyn) ); + flags1 = (pIn1->flags & ~MEM_TypeMask) | (flags1 & MEM_TypeMask); + assert( pIn1!=pIn3 ); + } + if( (flags3 & MEM_Str)==0 && (flags3 & (MEM_Int|MEM_Real))!=0 ){ + testcase( pIn3->flags & MEM_Int ); + testcase( pIn3->flags & MEM_Real ); + sqlite3VdbeMemStringify(pIn3, encoding, 1); + testcase( (flags3&MEM_Dyn) != (pIn3->flags&MEM_Dyn) ); + flags3 = (pIn3->flags & ~MEM_TypeMask) | (flags3 & MEM_TypeMask); + } + } + assert( pOp->p4type==P4_COLLSEQ || pOp->p4.pColl==0 ); + res = sqlite3MemCompare(pIn3, pIn1, pOp->p4.pColl); + } +compare_op: + switch( pOp->opcode ){ + case OP_Eq: res2 = res==0; break; + case OP_Ne: res2 = res; break; + case OP_Lt: res2 = res<0; break; + case OP_Le: res2 = res<=0; break; + case OP_Gt: res2 = res>0; break; + default: res2 = res>=0; break; + } + + /* Undo any changes made by applyAffinity() to the input registers. */ + assert( (pIn1->flags & MEM_Dyn) == (flags1 & MEM_Dyn) ); + pIn1->flags = flags1; + assert( (pIn3->flags & MEM_Dyn) == (flags3 & MEM_Dyn) ); + pIn3->flags = flags3; + + if( pOp->p5 & SQLITE_STOREP2 ){ + pOut = &aMem[pOp->p2]; + iCompare = res; + res2 = res2!=0; /* For this path res2 must be exactly 0 or 1 */ + if( (pOp->p5 & SQLITE_KEEPNULL)!=0 ){ + /* The KEEPNULL flag prevents OP_Eq from overwriting a NULL with 1 + ** and prevents OP_Ne from overwriting NULL with 0. This flag + ** is only used in contexts where either: + ** (1) op==OP_Eq && (r[P2]==NULL || r[P2]==0) + ** (2) op==OP_Ne && (r[P2]==NULL || r[P2]==1) + ** Therefore it is not necessary to check the content of r[P2] for + ** NULL. */ + assert( pOp->opcode==OP_Ne || pOp->opcode==OP_Eq ); + assert( res2==0 || res2==1 ); + testcase( res2==0 && pOp->opcode==OP_Eq ); + testcase( res2==1 && pOp->opcode==OP_Eq ); + testcase( res2==0 && pOp->opcode==OP_Ne ); + testcase( res2==1 && pOp->opcode==OP_Ne ); + if( (pOp->opcode==OP_Eq)==res2 ) break; + } + memAboutToChange(p, pOut); + MemSetTypeFlag(pOut, MEM_Int); + pOut->u.i = res2; + REGISTER_TRACE(pOp->p2, pOut); + }else{ + VdbeBranchTaken(res!=0, (pOp->p5 & SQLITE_NULLEQ)?2:3); + if( res2 ){ + goto jump_to_p2; + } + } + break; +} + +/* Opcode: ElseNotEq * P2 * * * +** +** This opcode must immediately follow an OP_Lt or OP_Gt comparison operator. +** If result of an OP_Eq comparison on the same two operands +** would have be NULL or false (0), then then jump to P2. +** If the result of an OP_Eq comparison on the two previous operands +** would have been true (1), then fall through. +*/ +case OP_ElseNotEq: { /* same as TK_ESCAPE, jump */ + assert( pOp>aOp ); + assert( pOp[-1].opcode==OP_Lt || pOp[-1].opcode==OP_Gt ); + assert( pOp[-1].p5 & SQLITE_STOREP2 ); + VdbeBranchTaken(iCompare!=0, 2); + if( iCompare!=0 ) goto jump_to_p2; + break; +} + + +/* Opcode: Permutation * * * P4 * +** +** Set the permutation used by the OP_Compare operator in the next +** instruction. The permutation is stored in the P4 operand. +** +** The permutation is only valid until the next OP_Compare that has +** the OPFLAG_PERMUTE bit set in P5. Typically the OP_Permutation should +** occur immediately prior to the OP_Compare. +** +** The first integer in the P4 integer array is the length of the array +** and does not become part of the permutation. +*/ +case OP_Permutation: { + assert( pOp->p4type==P4_INTARRAY ); + assert( pOp->p4.ai ); + assert( pOp[1].opcode==OP_Compare ); + assert( pOp[1].p5 & OPFLAG_PERMUTE ); + break; +} + +/* Opcode: Compare P1 P2 P3 P4 P5 +** Synopsis: r[P1@P3] <-> r[P2@P3] +** +** Compare two vectors of registers in reg(P1)..reg(P1+P3-1) (call this +** vector "A") and in reg(P2)..reg(P2+P3-1) ("B"). Save the result of +** the comparison for use by the next OP_Jump instruct. +** +** If P5 has the OPFLAG_PERMUTE bit set, then the order of comparison is +** determined by the most recent OP_Permutation operator. If the +** OPFLAG_PERMUTE bit is clear, then register are compared in sequential +** order. +** +** P4 is a KeyInfo structure that defines collating sequences and sort +** orders for the comparison. The permutation applies to registers +** only. The KeyInfo elements are used sequentially. +** +** The comparison is a sort comparison, so NULLs compare equal, +** NULLs are less than numbers, numbers are less than strings, +** and strings are less than blobs. +*/ +case OP_Compare: { + int n; + int i; + int p1; + int p2; + const KeyInfo *pKeyInfo; + int idx; + CollSeq *pColl; /* Collating sequence to use on this term */ + int bRev; /* True for DESCENDING sort order */ + int *aPermute; /* The permutation */ + + if( (pOp->p5 & OPFLAG_PERMUTE)==0 ){ + aPermute = 0; + }else{ + assert( pOp>aOp ); + assert( pOp[-1].opcode==OP_Permutation ); + assert( pOp[-1].p4type==P4_INTARRAY ); + aPermute = pOp[-1].p4.ai + 1; + assert( aPermute!=0 ); + } + n = pOp->p3; + pKeyInfo = pOp->p4.pKeyInfo; + assert( n>0 ); + assert( pKeyInfo!=0 ); + p1 = pOp->p1; + p2 = pOp->p2; +#if SQLITE_DEBUG + if( aPermute ){ + int k, mx = 0; + for(k=0; kmx ) mx = aPermute[k]; + assert( p1>0 && p1+mx<=(p->nMem+1 - p->nCursor)+1 ); + assert( p2>0 && p2+mx<=(p->nMem+1 - p->nCursor)+1 ); + }else{ + assert( p1>0 && p1+n<=(p->nMem+1 - p->nCursor)+1 ); + assert( p2>0 && p2+n<=(p->nMem+1 - p->nCursor)+1 ); + } +#endif /* SQLITE_DEBUG */ + for(i=0; inField ); + pColl = pKeyInfo->aColl[i]; + bRev = pKeyInfo->aSortOrder[i]; + iCompare = sqlite3MemCompare(&aMem[p1+idx], &aMem[p2+idx], pColl); + if( iCompare ){ + if( bRev ) iCompare = -iCompare; + break; + } + } + break; +} + +/* Opcode: Jump P1 P2 P3 * * +** +** Jump to the instruction at address P1, P2, or P3 depending on whether +** in the most recent OP_Compare instruction the P1 vector was less than +** equal to, or greater than the P2 vector, respectively. +*/ +case OP_Jump: { /* jump */ + if( iCompare<0 ){ + VdbeBranchTaken(0,3); pOp = &aOp[pOp->p1 - 1]; + }else if( iCompare==0 ){ + VdbeBranchTaken(1,3); pOp = &aOp[pOp->p2 - 1]; + }else{ + VdbeBranchTaken(2,3); pOp = &aOp[pOp->p3 - 1]; + } + break; +} + +/* Opcode: And P1 P2 P3 * * +** Synopsis: r[P3]=(r[P1] && r[P2]) +** +** Take the logical AND of the values in registers P1 and P2 and +** write the result into register P3. +** +** If either P1 or P2 is 0 (false) then the result is 0 even if +** the other input is NULL. A NULL and true or two NULLs give +** a NULL output. +*/ +/* Opcode: Or P1 P2 P3 * * +** Synopsis: r[P3]=(r[P1] || r[P2]) +** +** Take the logical OR of the values in register P1 and P2 and +** store the answer in register P3. +** +** If either P1 or P2 is nonzero (true) then the result is 1 (true) +** even if the other input is NULL. A NULL and false or two NULLs +** give a NULL output. +*/ +case OP_And: /* same as TK_AND, in1, in2, out3 */ +case OP_Or: { /* same as TK_OR, in1, in2, out3 */ + int v1; /* Left operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */ + int v2; /* Right operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */ + + pIn1 = &aMem[pOp->p1]; + if( pIn1->flags & MEM_Null ){ + v1 = 2; + }else{ + v1 = sqlite3VdbeIntValue(pIn1)!=0; + } + pIn2 = &aMem[pOp->p2]; + if( pIn2->flags & MEM_Null ){ + v2 = 2; + }else{ + v2 = sqlite3VdbeIntValue(pIn2)!=0; + } + if( pOp->opcode==OP_And ){ + static const unsigned char and_logic[] = { 0, 0, 0, 0, 1, 2, 0, 2, 2 }; + v1 = and_logic[v1*3+v2]; + }else{ + static const unsigned char or_logic[] = { 0, 1, 2, 1, 1, 1, 2, 1, 2 }; + v1 = or_logic[v1*3+v2]; + } + pOut = &aMem[pOp->p3]; + if( v1==2 ){ + MemSetTypeFlag(pOut, MEM_Null); + }else{ + pOut->u.i = v1; + MemSetTypeFlag(pOut, MEM_Int); + } + break; +} + +/* Opcode: Not P1 P2 * * * +** Synopsis: r[P2]= !r[P1] +** +** Interpret the value in register P1 as a boolean value. Store the +** boolean complement in register P2. If the value in register P1 is +** NULL, then a NULL is stored in P2. +*/ +case OP_Not: { /* same as TK_NOT, in1, out2 */ + pIn1 = &aMem[pOp->p1]; + pOut = &aMem[pOp->p2]; + sqlite3VdbeMemSetNull(pOut); + if( (pIn1->flags & MEM_Null)==0 ){ + pOut->flags = MEM_Int; + pOut->u.i = !sqlite3VdbeIntValue(pIn1); + } + break; +} + +/* Opcode: BitNot P1 P2 * * * +** Synopsis: r[P1]= ~r[P1] +** +** Interpret the content of register P1 as an integer. Store the +** ones-complement of the P1 value into register P2. If P1 holds +** a NULL then store a NULL in P2. +*/ +case OP_BitNot: { /* same as TK_BITNOT, in1, out2 */ + pIn1 = &aMem[pOp->p1]; + pOut = &aMem[pOp->p2]; + sqlite3VdbeMemSetNull(pOut); + if( (pIn1->flags & MEM_Null)==0 ){ + pOut->flags = MEM_Int; + pOut->u.i = ~sqlite3VdbeIntValue(pIn1); + } + break; +} + +/* Opcode: Once P1 P2 * * * +** +** If the P1 value is equal to the P1 value on the OP_Init opcode at +** instruction 0, then jump to P2. If the two P1 values differ, then +** set the P1 value on this opcode to equal the P1 value on the OP_Init +** and fall through. +*/ +case OP_Once: { /* jump */ + assert( p->aOp[0].opcode==OP_Init ); + VdbeBranchTaken(p->aOp[0].p1==pOp->p1, 2); + if( p->aOp[0].p1==pOp->p1 ){ + goto jump_to_p2; + }else{ + pOp->p1 = p->aOp[0].p1; + } + break; +} + +/* Opcode: If P1 P2 P3 * * +** +** Jump to P2 if the value in register P1 is true. The value +** is considered true if it is numeric and non-zero. If the value +** in P1 is NULL then take the jump if and only if P3 is non-zero. +*/ +/* Opcode: IfNot P1 P2 P3 * * +** +** Jump to P2 if the value in register P1 is False. The value +** is considered false if it has a numeric value of zero. If the value +** in P1 is NULL then take the jump if and only if P3 is non-zero. +*/ +case OP_If: /* jump, in1 */ +case OP_IfNot: { /* jump, in1 */ + int c; + pIn1 = &aMem[pOp->p1]; + if( pIn1->flags & MEM_Null ){ + c = pOp->p3; + }else{ +#ifdef SQLITE_OMIT_FLOATING_POINT + c = sqlite3VdbeIntValue(pIn1)!=0; +#else + c = sqlite3VdbeRealValue(pIn1)!=0.0; +#endif + if( pOp->opcode==OP_IfNot ) c = !c; + } + VdbeBranchTaken(c!=0, 2); + if( c ){ + goto jump_to_p2; + } + break; +} + +/* Opcode: IsNull P1 P2 * * * +** Synopsis: if r[P1]==NULL goto P2 +** +** Jump to P2 if the value in register P1 is NULL. +*/ +case OP_IsNull: { /* same as TK_ISNULL, jump, in1 */ + pIn1 = &aMem[pOp->p1]; + VdbeBranchTaken( (pIn1->flags & MEM_Null)!=0, 2); + if( (pIn1->flags & MEM_Null)!=0 ){ + goto jump_to_p2; + } + break; +} + +/* Opcode: NotNull P1 P2 * * * +** Synopsis: if r[P1]!=NULL goto P2 +** +** Jump to P2 if the value in register P1 is not NULL. +*/ +case OP_NotNull: { /* same as TK_NOTNULL, jump, in1 */ + pIn1 = &aMem[pOp->p1]; + VdbeBranchTaken( (pIn1->flags & MEM_Null)==0, 2); + if( (pIn1->flags & MEM_Null)==0 ){ + goto jump_to_p2; + } + break; +} + +/* Opcode: Column P1 P2 P3 P4 P5 +** Synopsis: r[P3]=PX +** +** Interpret the data that cursor P1 points to as a structure built using +** the MakeRecord instruction. (See the MakeRecord opcode for additional +** information about the format of the data.) Extract the P2-th column +** from this record. If there are less that (P2+1) +** values in the record, extract a NULL. +** +** The value extracted is stored in register P3. +** +** If the column contains fewer than P2 fields, then extract a NULL. Or, +** if the P4 argument is a P4_MEM use the value of the P4 argument as +** the result. +** +** If the OPFLAG_CLEARCACHE bit is set on P5 and P1 is a pseudo-table cursor, +** then the cache of the cursor is reset prior to extracting the column. +** The first OP_Column against a pseudo-table after the value of the content +** register has changed should have this bit set. +** +** If the OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG bits are set on P5 when +** the result is guaranteed to only be used as the argument of a length() +** or typeof() function, respectively. The loading of large blobs can be +** skipped for length() and all content loading can be skipped for typeof(). +*/ +case OP_Column: { + int p2; /* column number to retrieve */ + VdbeCursor *pC; /* The VDBE cursor */ + BtCursor *pCrsr; /* The BTree cursor */ + u32 *aOffset; /* aOffset[i] is offset to start of data for i-th column */ + int len; /* The length of the serialized data for the column */ + int i; /* Loop counter */ + Mem *pDest; /* Where to write the extracted value */ + Mem sMem; /* For storing the record being decoded */ + const u8 *zData; /* Part of the record being decoded */ + const u8 *zHdr; /* Next unparsed byte of the header */ + const u8 *zEndHdr; /* Pointer to first byte after the header */ + u32 offset; /* Offset into the data */ + u64 offset64; /* 64-bit offset */ + u32 avail; /* Number of bytes of available data */ + u32 t; /* A type code from the record header */ + Mem *pReg; /* PseudoTable input register */ + + pC = p->apCsr[pOp->p1]; + p2 = pOp->p2; + + /* If the cursor cache is stale, bring it up-to-date */ + rc = sqlite3VdbeCursorMoveto(&pC, &p2); + if( rc ) goto abort_due_to_error; + + assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); + pDest = &aMem[pOp->p3]; + memAboutToChange(p, pDest); + assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( pC!=0 ); + assert( p2nField ); + aOffset = pC->aOffset; + assert( pC->eCurType!=CURTYPE_VTAB ); + assert( pC->eCurType!=CURTYPE_PSEUDO || pC->nullRow ); + assert( pC->eCurType!=CURTYPE_SORTER ); + + if( pC->cacheStatus!=p->cacheCtr ){ /*OPTIMIZATION-IF-FALSE*/ + if( pC->nullRow ){ + if( pC->eCurType==CURTYPE_PSEUDO ){ + assert( pC->uc.pseudoTableReg>0 ); + pReg = &aMem[pC->uc.pseudoTableReg]; + assert( pReg->flags & MEM_Blob ); + assert( memIsValid(pReg) ); + pC->payloadSize = pC->szRow = avail = pReg->n; + pC->aRow = (u8*)pReg->z; + }else{ + sqlite3VdbeMemSetNull(pDest); + goto op_column_out; + } + }else{ + pCrsr = pC->uc.pCursor; + assert( pC->eCurType==CURTYPE_BTREE ); + assert( pCrsr ); + assert( sqlite3BtreeCursorIsValid(pCrsr) ); + pC->payloadSize = sqlite3BtreePayloadSize(pCrsr); + pC->aRow = sqlite3BtreePayloadFetch(pCrsr, &avail); + assert( avail<=65536 ); /* Maximum page size is 64KiB */ + if( pC->payloadSize <= (u32)avail ){ + pC->szRow = pC->payloadSize; + }else if( pC->payloadSize > (u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){ + goto too_big; + }else{ + pC->szRow = avail; + } + } + pC->cacheStatus = p->cacheCtr; + pC->iHdrOffset = getVarint32(pC->aRow, offset); + pC->nHdrParsed = 0; + aOffset[0] = offset; + + + if( availaRow does not have to hold the entire row, but it does at least + ** need to cover the header of the record. If pC->aRow does not contain + ** the complete header, then set it to zero, forcing the header to be + ** dynamically allocated. */ + pC->aRow = 0; + pC->szRow = 0; + + /* Make sure a corrupt database has not given us an oversize header. + ** Do this now to avoid an oversize memory allocation. + ** + ** Type entries can be between 1 and 5 bytes each. But 4 and 5 byte + ** types use so much data space that there can only be 4096 and 32 of + ** them, respectively. So the maximum header length results from a + ** 3-byte type for each of the maximum of 32768 columns plus three + ** extra bytes for the header length itself. 32768*3 + 3 = 98307. + */ + if( offset > 98307 || offset > pC->payloadSize ){ + rc = SQLITE_CORRUPT_BKPT; + goto abort_due_to_error; + } + }else if( offset>0 ){ /*OPTIMIZATION-IF-TRUE*/ + /* The following goto is an optimization. It can be omitted and + ** everything will still work. But OP_Column is measurably faster + ** by skipping the subsequent conditional, which is always true. + */ + zData = pC->aRow; + assert( pC->nHdrParsed<=p2 ); /* Conditional skipped */ + goto op_column_read_header; + } + } + + /* Make sure at least the first p2+1 entries of the header have been + ** parsed and valid information is in aOffset[] and pC->aType[]. + */ + if( pC->nHdrParsed<=p2 ){ + /* If there is more header available for parsing in the record, try + ** to extract additional fields up through the p2+1-th field + */ + if( pC->iHdrOffsetaRow==0 ){ + memset(&sMem, 0, sizeof(sMem)); + rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, 0, aOffset[0], &sMem); + if( rc!=SQLITE_OK ) goto abort_due_to_error; + zData = (u8*)sMem.z; + }else{ + zData = pC->aRow; + } + + /* Fill in pC->aType[i] and aOffset[i] values through the p2-th field. */ + op_column_read_header: + i = pC->nHdrParsed; + offset64 = aOffset[i]; + zHdr = zData + pC->iHdrOffset; + zEndHdr = zData + aOffset[0]; + do{ + if( (t = zHdr[0])<0x80 ){ + zHdr++; + offset64 += sqlite3VdbeOneByteSerialTypeLen(t); + }else{ + zHdr += sqlite3GetVarint32(zHdr, &t); + offset64 += sqlite3VdbeSerialTypeLen(t); + } + pC->aType[i++] = t; + aOffset[i] = (u32)(offset64 & 0xffffffff); + }while( i<=p2 && zHdr=zEndHdr && (zHdr>zEndHdr || offset64!=pC->payloadSize)) + || (offset64 > pC->payloadSize) + ){ + if( pC->aRow==0 ) sqlite3VdbeMemRelease(&sMem); + rc = SQLITE_CORRUPT_BKPT; + goto abort_due_to_error; + } + + pC->nHdrParsed = i; + pC->iHdrOffset = (u32)(zHdr - zData); + if( pC->aRow==0 ) sqlite3VdbeMemRelease(&sMem); + }else{ + t = 0; + } + + /* If after trying to extract new entries from the header, nHdrParsed is + ** still not up to p2, that means that the record has fewer than p2 + ** columns. So the result will be either the default value or a NULL. + */ + if( pC->nHdrParsed<=p2 ){ + if( pOp->p4type==P4_MEM ){ + sqlite3VdbeMemShallowCopy(pDest, pOp->p4.pMem, MEM_Static); + }else{ + sqlite3VdbeMemSetNull(pDest); + } + goto op_column_out; + } + }else{ + t = pC->aType[p2]; + } + + /* Extract the content for the p2+1-th column. Control can only + ** reach this point if aOffset[p2], aOffset[p2+1], and pC->aType[p2] are + ** all valid. + */ + assert( p2nHdrParsed ); + assert( rc==SQLITE_OK ); + assert( sqlite3VdbeCheckMemInvariants(pDest) ); + if( VdbeMemDynamic(pDest) ){ + sqlite3VdbeMemSetNull(pDest); + } + assert( t==pC->aType[p2] ); + if( pC->szRow>=aOffset[p2+1] ){ + /* This is the common case where the desired content fits on the original + ** page - where the content is not on an overflow page */ + zData = pC->aRow + aOffset[p2]; + if( t<12 ){ + sqlite3VdbeSerialGet(zData, t, pDest); + }else{ + /* If the column value is a string, we need a persistent value, not + ** a MEM_Ephem value. This branch is a fast short-cut that is equivalent + ** to calling sqlite3VdbeSerialGet() and sqlite3VdbeDeephemeralize(). + */ + static const u16 aFlag[] = { MEM_Blob, MEM_Str|MEM_Term }; + pDest->n = len = (t-12)/2; + pDest->enc = encoding; + if( pDest->szMalloc < len+2 ){ + pDest->flags = MEM_Null; + if( sqlite3VdbeMemGrow(pDest, len+2, 0) ) goto no_mem; + }else{ + pDest->z = pDest->zMalloc; + } + memcpy(pDest->z, zData, len); + pDest->z[len] = 0; + pDest->z[len+1] = 0; + pDest->flags = aFlag[t&1]; + } + }else{ + pDest->enc = encoding; + /* This branch happens only when content is on overflow pages */ + if( ((pOp->p5 & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG))!=0 + && ((t>=12 && (t&1)==0) || (pOp->p5 & OPFLAG_TYPEOFARG)!=0)) + || (len = sqlite3VdbeSerialTypeLen(t))==0 + ){ + /* Content is irrelevant for + ** 1. the typeof() function, + ** 2. the length(X) function if X is a blob, and + ** 3. if the content length is zero. + ** So we might as well use bogus content rather than reading + ** content from disk. */ + static u8 aZero[8]; /* This is the bogus content */ + sqlite3VdbeSerialGet(aZero, t, pDest); + }else{ + rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, aOffset[p2], len, pDest); + if( rc!=SQLITE_OK ) goto abort_due_to_error; + sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest); + pDest->flags &= ~MEM_Ephem; + } + } + +op_column_out: + UPDATE_MAX_BLOBSIZE(pDest); + REGISTER_TRACE(pOp->p3, pDest); + break; +} + +/* Opcode: Affinity P1 P2 * P4 * +** Synopsis: affinity(r[P1@P2]) +** +** Apply affinities to a range of P2 registers starting with P1. +** +** P4 is a string that is P2 characters long. The nth character of the +** string indicates the column affinity that should be used for the nth +** memory cell in the range. +*/ +case OP_Affinity: { + const char *zAffinity; /* The affinity to be applied */ + char cAff; /* A single character of affinity */ + + zAffinity = pOp->p4.z; + assert( zAffinity!=0 ); + assert( zAffinity[pOp->p2]==0 ); + pIn1 = &aMem[pOp->p1]; + while( (cAff = *(zAffinity++))!=0 ){ + assert( pIn1 <= &p->aMem[(p->nMem+1 - p->nCursor)] ); + assert( memIsValid(pIn1) ); + applyAffinity(pIn1, cAff, encoding); + pIn1++; + } + break; +} + +/* Opcode: MakeRecord P1 P2 P3 P4 * +** Synopsis: r[P3]=mkrec(r[P1@P2]) +** +** Convert P2 registers beginning with P1 into the [record format] +** use as a data record in a database table or as a key +** in an index. The OP_Column opcode can decode the record later. +** +** P4 may be a string that is P2 characters long. The nth character of the +** string indicates the column affinity that should be used for the nth +** field of the index key. +** +** The mapping from character to affinity is given by the SQLITE_AFF_ +** macros defined in sqliteInt.h. +** +** If P4 is NULL then all index fields have the affinity BLOB. +*/ +case OP_MakeRecord: { + u8 *zNewRecord; /* A buffer to hold the data for the new record */ + Mem *pRec; /* The new record */ + u64 nData; /* Number of bytes of data space */ + int nHdr; /* Number of bytes of header space */ + i64 nByte; /* Data space required for this record */ + i64 nZero; /* Number of zero bytes at the end of the record */ + int nVarint; /* Number of bytes in a varint */ + u32 serial_type; /* Type field */ + Mem *pData0; /* First field to be combined into the record */ + Mem *pLast; /* Last field of the record */ + int nField; /* Number of fields in the record */ + char *zAffinity; /* The affinity string for the record */ + int file_format; /* File format to use for encoding */ + int i; /* Space used in zNewRecord[] header */ + int j; /* Space used in zNewRecord[] content */ + u32 len; /* Length of a field */ + + /* Assuming the record contains N fields, the record format looks + ** like this: + ** + ** ------------------------------------------------------------------------ + ** | hdr-size | type 0 | type 1 | ... | type N-1 | data0 | ... | data N-1 | + ** ------------------------------------------------------------------------ + ** + ** Data(0) is taken from register P1. Data(1) comes from register P1+1 + ** and so forth. + ** + ** Each type field is a varint representing the serial type of the + ** corresponding data element (see sqlite3VdbeSerialType()). The + ** hdr-size field is also a varint which is the offset from the beginning + ** of the record to data0. + */ + nData = 0; /* Number of bytes of data space */ + nHdr = 0; /* Number of bytes of header space */ + nZero = 0; /* Number of zero bytes at the end of the record */ + nField = pOp->p1; + zAffinity = pOp->p4.z; + assert( nField>0 && pOp->p2>0 && pOp->p2+nField<=(p->nMem+1 - p->nCursor)+1 ); + pData0 = &aMem[nField]; + nField = pOp->p2; + pLast = &pData0[nField-1]; + file_format = p->minWriteFileFormat; + + /* Identify the output register */ + assert( pOp->p3p1 || pOp->p3>=pOp->p1+pOp->p2 ); + pOut = &aMem[pOp->p3]; + memAboutToChange(p, pOut); + + /* Apply the requested affinity to all inputs + */ + assert( pData0<=pLast ); + if( zAffinity ){ + pRec = pData0; + do{ + applyAffinity(pRec++, *(zAffinity++), encoding); + assert( zAffinity[0]==0 || pRec<=pLast ); + }while( zAffinity[0] ); + } + +#ifdef SQLITE_ENABLE_NULL_TRIM + /* NULLs can be safely trimmed from the end of the record, as long as + ** as the schema format is 2 or more and none of the omitted columns + ** have a non-NULL default value. Also, the record must be left with + ** at least one field. If P5>0 then it will be one more than the + ** index of the right-most column with a non-NULL default value */ + if( pOp->p5 ){ + while( (pLast->flags & MEM_Null)!=0 && nField>pOp->p5 ){ + pLast--; + nField--; + } + } +#endif + + /* Loop through the elements that will make up the record to figure + ** out how much space is required for the new record. + */ + pRec = pLast; + do{ + assert( memIsValid(pRec) ); + pRec->uTemp = serial_type = sqlite3VdbeSerialType(pRec, file_format, &len); + if( pRec->flags & MEM_Zero ){ + if( nData ){ + if( sqlite3VdbeMemExpandBlob(pRec) ) goto no_mem; + }else{ + nZero += pRec->u.nZero; + len -= pRec->u.nZero; + } + } + nData += len; + testcase( serial_type==127 ); + testcase( serial_type==128 ); + nHdr += serial_type<=127 ? 1 : sqlite3VarintLen(serial_type); + if( pRec==pData0 ) break; + pRec--; + }while(1); + + /* EVIDENCE-OF: R-22564-11647 The header begins with a single varint + ** which determines the total number of bytes in the header. The varint + ** value is the size of the header in bytes including the size varint + ** itself. */ + testcase( nHdr==126 ); + testcase( nHdr==127 ); + if( nHdr<=126 ){ + /* The common case */ + nHdr += 1; + }else{ + /* Rare case of a really large header */ + nVarint = sqlite3VarintLen(nHdr); + nHdr += nVarint; + if( nVarintdb->aLimit[SQLITE_LIMIT_LENGTH] ){ + goto too_big; + } + + /* Make sure the output register has a buffer large enough to store + ** the new record. The output register (pOp->p3) is not allowed to + ** be one of the input registers (because the following call to + ** sqlite3VdbeMemClearAndResize() could clobber the value before it is used). + */ + if( sqlite3VdbeMemClearAndResize(pOut, (int)nByte) ){ + goto no_mem; + } + zNewRecord = (u8 *)pOut->z; + + /* Write the record */ + i = putVarint32(zNewRecord, nHdr); + j = nHdr; + assert( pData0<=pLast ); + pRec = pData0; + do{ + serial_type = pRec->uTemp; + /* EVIDENCE-OF: R-06529-47362 Following the size varint are one or more + ** additional varints, one per column. */ + i += putVarint32(&zNewRecord[i], serial_type); /* serial type */ + /* EVIDENCE-OF: R-64536-51728 The values for each column in the record + ** immediately follow the header. */ + j += sqlite3VdbeSerialPut(&zNewRecord[j], pRec, serial_type); /* content */ + }while( (++pRec)<=pLast ); + assert( i==nHdr ); + assert( j==nByte ); + + assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); + pOut->n = (int)nByte; + pOut->flags = MEM_Blob; + if( nZero ){ + pOut->u.nZero = nZero; + pOut->flags |= MEM_Zero; + } + pOut->enc = SQLITE_UTF8; /* In case the blob is ever converted to text */ + REGISTER_TRACE(pOp->p3, pOut); + UPDATE_MAX_BLOBSIZE(pOut); + break; +} + +/* Opcode: Count P1 P2 * * * +** Synopsis: r[P2]=count() +** +** Store the number of entries (an integer value) in the table or index +** opened by cursor P1 in register P2 +*/ +#ifndef SQLITE_OMIT_BTREECOUNT +case OP_Count: { /* out2 */ + i64 nEntry; + BtCursor *pCrsr; + + assert( p->apCsr[pOp->p1]->eCurType==CURTYPE_BTREE ); + pCrsr = p->apCsr[pOp->p1]->uc.pCursor; + assert( pCrsr ); + nEntry = 0; /* Not needed. Only used to silence a warning. */ + rc = sqlite3BtreeCount(pCrsr, &nEntry); + if( rc ) goto abort_due_to_error; + pOut = out2Prerelease(p, pOp); + pOut->u.i = nEntry; + break; +} +#endif + +/* Opcode: Savepoint P1 * * P4 * +** +** Open, release or rollback the savepoint named by parameter P4, depending +** on the value of P1. To open a new savepoint, P1==0. To release (commit) an +** existing savepoint, P1==1, or to rollback an existing savepoint P1==2. +*/ +case OP_Savepoint: { + int p1; /* Value of P1 operand */ + char *zName; /* Name of savepoint */ + int nName; + Savepoint *pNew; + Savepoint *pSavepoint; + Savepoint *pTmp; + int iSavepoint; + int ii; + + p1 = pOp->p1; + zName = pOp->p4.z; + + /* Assert that the p1 parameter is valid. Also that if there is no open + ** transaction, then there cannot be any savepoints. + */ + assert( db->pSavepoint==0 || db->autoCommit==0 ); + assert( p1==SAVEPOINT_BEGIN||p1==SAVEPOINT_RELEASE||p1==SAVEPOINT_ROLLBACK ); + assert( db->pSavepoint || db->isTransactionSavepoint==0 ); + assert( checkSavepointCount(db) ); + assert( p->bIsReader ); + + if( p1==SAVEPOINT_BEGIN ){ + if( db->nVdbeWrite>0 ){ + /* A new savepoint cannot be created if there are active write + ** statements (i.e. open read/write incremental blob handles). + */ + sqlite3VdbeError(p, "cannot open savepoint - SQL statements in progress"); + rc = SQLITE_BUSY; + }else{ + nName = sqlite3Strlen30(zName); + +#ifndef SQLITE_OMIT_VIRTUALTABLE + /* This call is Ok even if this savepoint is actually a transaction + ** savepoint (and therefore should not prompt xSavepoint()) callbacks. + ** If this is a transaction savepoint being opened, it is guaranteed + ** that the db->aVTrans[] array is empty. */ + assert( db->autoCommit==0 || db->nVTrans==0 ); + rc = sqlite3VtabSavepoint(db, SAVEPOINT_BEGIN, + db->nStatement+db->nSavepoint); + if( rc!=SQLITE_OK ) goto abort_due_to_error; +#endif + + /* Create a new savepoint structure. */ + pNew = sqlite3DbMallocRawNN(db, sizeof(Savepoint)+nName+1); + if( pNew ){ + pNew->zName = (char *)&pNew[1]; + memcpy(pNew->zName, zName, nName+1); + + /* If there is no open transaction, then mark this as a special + ** "transaction savepoint". */ + if( db->autoCommit ){ + db->autoCommit = 0; + db->isTransactionSavepoint = 1; + }else{ + db->nSavepoint++; + } + + /* Link the new savepoint into the database handle's list. */ + pNew->pNext = db->pSavepoint; + db->pSavepoint = pNew; + pNew->nDeferredCons = db->nDeferredCons; + pNew->nDeferredImmCons = db->nDeferredImmCons; + } + } + }else{ + iSavepoint = 0; + + /* Find the named savepoint. If there is no such savepoint, then an + ** an error is returned to the user. */ + for( + pSavepoint = db->pSavepoint; + pSavepoint && sqlite3StrICmp(pSavepoint->zName, zName); + pSavepoint = pSavepoint->pNext + ){ + iSavepoint++; + } + if( !pSavepoint ){ + sqlite3VdbeError(p, "no such savepoint: %s", zName); + rc = SQLITE_ERROR; + }else if( db->nVdbeWrite>0 && p1==SAVEPOINT_RELEASE ){ + /* It is not possible to release (commit) a savepoint if there are + ** active write statements. + */ + sqlite3VdbeError(p, "cannot release savepoint - " + "SQL statements in progress"); + rc = SQLITE_BUSY; + }else{ + + /* Determine whether or not this is a transaction savepoint. If so, + ** and this is a RELEASE command, then the current transaction + ** is committed. + */ + int isTransaction = pSavepoint->pNext==0 && db->isTransactionSavepoint; + if( isTransaction && p1==SAVEPOINT_RELEASE ){ + if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){ + goto vdbe_return; + } + db->autoCommit = 1; + if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){ + p->pc = (int)(pOp - aOp); + db->autoCommit = 0; + p->rc = rc = SQLITE_BUSY; + goto vdbe_return; + } + db->isTransactionSavepoint = 0; + rc = p->rc; + }else{ + int isSchemaChange; + iSavepoint = db->nSavepoint - iSavepoint - 1; + if( p1==SAVEPOINT_ROLLBACK ){ + isSchemaChange = (db->flags & SQLITE_InternChanges)!=0; + for(ii=0; iinDb; ii++){ + rc = sqlite3BtreeTripAllCursors(db->aDb[ii].pBt, + SQLITE_ABORT_ROLLBACK, + isSchemaChange==0); + if( rc!=SQLITE_OK ) goto abort_due_to_error; + } + }else{ + isSchemaChange = 0; + } + for(ii=0; iinDb; ii++){ + rc = sqlite3BtreeSavepoint(db->aDb[ii].pBt, p1, iSavepoint); + if( rc!=SQLITE_OK ){ + goto abort_due_to_error; + } + } + if( isSchemaChange ){ + sqlite3ExpirePreparedStatements(db); + sqlite3ResetAllSchemasOfConnection(db); + db->flags = (db->flags | SQLITE_InternChanges); + } + } + + /* Regardless of whether this is a RELEASE or ROLLBACK, destroy all + ** savepoints nested inside of the savepoint being operated on. */ + while( db->pSavepoint!=pSavepoint ){ + pTmp = db->pSavepoint; + db->pSavepoint = pTmp->pNext; + sqlite3DbFree(db, pTmp); + db->nSavepoint--; + } + + /* If it is a RELEASE, then destroy the savepoint being operated on + ** too. If it is a ROLLBACK TO, then set the number of deferred + ** constraint violations present in the database to the value stored + ** when the savepoint was created. */ + if( p1==SAVEPOINT_RELEASE ){ + assert( pSavepoint==db->pSavepoint ); + db->pSavepoint = pSavepoint->pNext; + sqlite3DbFree(db, pSavepoint); + if( !isTransaction ){ + db->nSavepoint--; + } + }else{ + db->nDeferredCons = pSavepoint->nDeferredCons; + db->nDeferredImmCons = pSavepoint->nDeferredImmCons; + } + + if( !isTransaction || p1==SAVEPOINT_ROLLBACK ){ + rc = sqlite3VtabSavepoint(db, p1, iSavepoint); + if( rc!=SQLITE_OK ) goto abort_due_to_error; + } + } + } + if( rc ) goto abort_due_to_error; + + break; +} + +/* Opcode: AutoCommit P1 P2 * * * +** +** Set the database auto-commit flag to P1 (1 or 0). If P2 is true, roll +** back any currently active btree transactions. If there are any active +** VMs (apart from this one), then a ROLLBACK fails. A COMMIT fails if +** there are active writing VMs or active VMs that use shared cache. +** +** This instruction causes the VM to halt. +*/ +case OP_AutoCommit: { + int desiredAutoCommit; + int iRollback; + + desiredAutoCommit = pOp->p1; + iRollback = pOp->p2; + assert( desiredAutoCommit==1 || desiredAutoCommit==0 ); + assert( desiredAutoCommit==1 || iRollback==0 ); + assert( db->nVdbeActive>0 ); /* At least this one VM is active */ + assert( p->bIsReader ); + + if( desiredAutoCommit!=db->autoCommit ){ + if( iRollback ){ + assert( desiredAutoCommit==1 ); + sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); + db->autoCommit = 1; + }else if( desiredAutoCommit && db->nVdbeWrite>0 ){ + /* If this instruction implements a COMMIT and other VMs are writing + ** return an error indicating that the other VMs must complete first. + */ + sqlite3VdbeError(p, "cannot commit transaction - " + "SQL statements in progress"); + rc = SQLITE_BUSY; + goto abort_due_to_error; + }else if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){ + goto vdbe_return; + }else{ + db->autoCommit = (u8)desiredAutoCommit; + } + if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){ + p->pc = (int)(pOp - aOp); + db->autoCommit = (u8)(1-desiredAutoCommit); + p->rc = rc = SQLITE_BUSY; + goto vdbe_return; + } + assert( db->nStatement==0 ); + sqlite3CloseSavepoints(db); + if( p->rc==SQLITE_OK ){ + rc = SQLITE_DONE; + }else{ + rc = SQLITE_ERROR; + } + goto vdbe_return; + }else{ + sqlite3VdbeError(p, + (!desiredAutoCommit)?"cannot start a transaction within a transaction":( + (iRollback)?"cannot rollback - no transaction is active": + "cannot commit - no transaction is active")); + + rc = SQLITE_ERROR; + goto abort_due_to_error; + } + break; +} + +/* Opcode: Transaction P1 P2 P3 P4 P5 +** +** Begin a transaction on database P1 if a transaction is not already +** active. +** If P2 is non-zero, then a write-transaction is started, or if a +** read-transaction is already active, it is upgraded to a write-transaction. +** If P2 is zero, then a read-transaction is started. +** +** P1 is the index of the database file on which the transaction is +** started. Index 0 is the main database file and index 1 is the +** file used for temporary tables. Indices of 2 or more are used for +** attached databases. +** +** If a write-transaction is started and the Vdbe.usesStmtJournal flag is +** true (this flag is set if the Vdbe may modify more than one row and may +** throw an ABORT exception), a statement transaction may also be opened. +** More specifically, a statement transaction is opened iff the database +** connection is currently not in autocommit mode, or if there are other +** active statements. A statement transaction allows the changes made by this +** VDBE to be rolled back after an error without having to roll back the +** entire transaction. If no error is encountered, the statement transaction +** will automatically commit when the VDBE halts. +** +** If P5!=0 then this opcode also checks the schema cookie against P3 +** and the schema generation counter against P4. +** The cookie changes its value whenever the database schema changes. +** This operation is used to detect when that the cookie has changed +** and that the current process needs to reread the schema. If the schema +** cookie in P3 differs from the schema cookie in the database header or +** if the schema generation counter in P4 differs from the current +** generation counter, then an SQLITE_SCHEMA error is raised and execution +** halts. The sqlite3_step() wrapper function might then reprepare the +** statement and rerun it from the beginning. +*/ +case OP_Transaction: { + Btree *pBt; + int iMeta; + int iGen; + + assert( p->bIsReader ); + assert( p->readOnly==0 || pOp->p2==0 ); + assert( pOp->p1>=0 && pOp->p1nDb ); + assert( DbMaskTest(p->btreeMask, pOp->p1) ); + if( pOp->p2 && (db->flags & SQLITE_QueryOnly)!=0 ){ + rc = SQLITE_READONLY; + goto abort_due_to_error; + } + pBt = db->aDb[pOp->p1].pBt; + + if( pBt ){ + rc = sqlite3BtreeBeginTrans(pBt, pOp->p2); + testcase( rc==SQLITE_BUSY_SNAPSHOT ); + testcase( rc==SQLITE_BUSY_RECOVERY ); + if( rc!=SQLITE_OK ){ + if( (rc&0xff)==SQLITE_BUSY ){ + p->pc = (int)(pOp - aOp); + p->rc = rc; + goto vdbe_return; + } + goto abort_due_to_error; + } + + if( pOp->p2 && p->usesStmtJournal + && (db->autoCommit==0 || db->nVdbeRead>1) + ){ + assert( sqlite3BtreeIsInTrans(pBt) ); + if( p->iStatement==0 ){ + assert( db->nStatement>=0 && db->nSavepoint>=0 ); + db->nStatement++; + p->iStatement = db->nSavepoint + db->nStatement; + } + + rc = sqlite3VtabSavepoint(db, SAVEPOINT_BEGIN, p->iStatement-1); + if( rc==SQLITE_OK ){ + rc = sqlite3BtreeBeginStmt(pBt, p->iStatement); + } + + /* Store the current value of the database handles deferred constraint + ** counter. If the statement transaction needs to be rolled back, + ** the value of this counter needs to be restored too. */ + p->nStmtDefCons = db->nDeferredCons; + p->nStmtDefImmCons = db->nDeferredImmCons; + } + + /* Gather the schema version number for checking: + ** IMPLEMENTATION-OF: R-03189-51135 As each SQL statement runs, the schema + ** version is checked to ensure that the schema has not changed since the + ** SQL statement was prepared. + */ + sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&iMeta); + iGen = db->aDb[pOp->p1].pSchema->iGeneration; + }else{ + iGen = iMeta = 0; + } + assert( pOp->p5==0 || pOp->p4type==P4_INT32 ); + if( pOp->p5 && (iMeta!=pOp->p3 || iGen!=pOp->p4.i) ){ + sqlite3DbFree(db, p->zErrMsg); + p->zErrMsg = sqlite3DbStrDup(db, "database schema has changed"); + /* If the schema-cookie from the database file matches the cookie + ** stored with the in-memory representation of the schema, do + ** not reload the schema from the database file. + ** + ** If virtual-tables are in use, this is not just an optimization. + ** Often, v-tables store their data in other SQLite tables, which + ** are queried from within xNext() and other v-table methods using + ** prepared queries. If such a query is out-of-date, we do not want to + ** discard the database schema, as the user code implementing the + ** v-table would have to be ready for the sqlite3_vtab structure itself + ** to be invalidated whenever sqlite3_step() is called from within + ** a v-table method. + */ + if( db->aDb[pOp->p1].pSchema->schema_cookie!=iMeta ){ + sqlite3ResetOneSchema(db, pOp->p1); + } + p->expired = 1; + rc = SQLITE_SCHEMA; + } + if( rc ) goto abort_due_to_error; + break; +} + +/* Opcode: ReadCookie P1 P2 P3 * * +** +** Read cookie number P3 from database P1 and write it into register P2. +** P3==1 is the schema version. P3==2 is the database format. +** P3==3 is the recommended pager cache size, and so forth. P1==0 is +** the main database file and P1==1 is the database file used to store +** temporary tables. +** +** There must be a read-lock on the database (either a transaction +** must be started or there must be an open cursor) before +** executing this instruction. +*/ +case OP_ReadCookie: { /* out2 */ + int iMeta; + int iDb; + int iCookie; + + assert( p->bIsReader ); + iDb = pOp->p1; + iCookie = pOp->p3; + assert( pOp->p3=0 && iDbnDb ); + assert( db->aDb[iDb].pBt!=0 ); + assert( DbMaskTest(p->btreeMask, iDb) ); + + sqlite3BtreeGetMeta(db->aDb[iDb].pBt, iCookie, (u32 *)&iMeta); + pOut = out2Prerelease(p, pOp); + pOut->u.i = iMeta; + break; +} + +/* Opcode: SetCookie P1 P2 P3 * * +** +** Write the integer value P3 into cookie number P2 of database P1. +** P2==1 is the schema version. P2==2 is the database format. +** P2==3 is the recommended pager cache +** size, and so forth. P1==0 is the main database file and P1==1 is the +** database file used to store temporary tables. +** +** A transaction must be started before executing this opcode. +*/ +case OP_SetCookie: { + Db *pDb; + assert( pOp->p2p1>=0 && pOp->p1nDb ); + assert( DbMaskTest(p->btreeMask, pOp->p1) ); + assert( p->readOnly==0 ); + pDb = &db->aDb[pOp->p1]; + assert( pDb->pBt!=0 ); + assert( sqlite3SchemaMutexHeld(db, pOp->p1, 0) ); + /* See note about index shifting on OP_ReadCookie */ + rc = sqlite3BtreeUpdateMeta(pDb->pBt, pOp->p2, pOp->p3); + if( pOp->p2==BTREE_SCHEMA_VERSION ){ + /* When the schema cookie changes, record the new cookie internally */ + pDb->pSchema->schema_cookie = pOp->p3; + db->flags |= SQLITE_InternChanges; + }else if( pOp->p2==BTREE_FILE_FORMAT ){ + /* Record changes in the file format */ + pDb->pSchema->file_format = pOp->p3; + } + if( pOp->p1==1 ){ + /* Invalidate all prepared statements whenever the TEMP database + ** schema is changed. Ticket #1644 */ + sqlite3ExpirePreparedStatements(db); + p->expired = 0; + } + if( rc ) goto abort_due_to_error; + break; +} + +/* Opcode: OpenRead P1 P2 P3 P4 P5 +** Synopsis: root=P2 iDb=P3 +** +** Open a read-only cursor for the database table whose root page is +** P2 in a database file. The database file is determined by P3. +** P3==0 means the main database, P3==1 means the database used for +** temporary tables, and P3>1 means used the corresponding attached +** database. Give the new cursor an identifier of P1. The P1 +** values need not be contiguous but all P1 values should be small integers. +** It is an error for P1 to be negative. +** +** If P5!=0 then use the content of register P2 as the root page, not +** the value of P2 itself. +** +** There will be a read lock on the database whenever there is an +** open cursor. If the database was unlocked prior to this instruction +** then a read lock is acquired as part of this instruction. A read +** lock allows other processes to read the database but prohibits +** any other process from modifying the database. The read lock is +** released when all cursors are closed. If this instruction attempts +** to get a read lock but fails, the script terminates with an +** SQLITE_BUSY error code. +** +** The P4 value may be either an integer (P4_INT32) or a pointer to +** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo +** structure, then said structure defines the content and collating +** sequence of the index being opened. Otherwise, if P4 is an integer +** value, it is set to the number of columns in the table. +** +** See also: OpenWrite, ReopenIdx +*/ +/* Opcode: ReopenIdx P1 P2 P3 P4 P5 +** Synopsis: root=P2 iDb=P3 +** +** The ReopenIdx opcode works exactly like ReadOpen except that it first +** checks to see if the cursor on P1 is already open with a root page +** number of P2 and if it is this opcode becomes a no-op. In other words, +** if the cursor is already open, do not reopen it. +** +** The ReopenIdx opcode may only be used with P5==0 and with P4 being +** a P4_KEYINFO object. Furthermore, the P3 value must be the same as +** every other ReopenIdx or OpenRead for the same cursor number. +** +** See the OpenRead opcode documentation for additional information. +*/ +/* Opcode: OpenWrite P1 P2 P3 P4 P5 +** Synopsis: root=P2 iDb=P3 +** +** Open a read/write cursor named P1 on the table or index whose root +** page is P2. Or if P5!=0 use the content of register P2 to find the +** root page. +** +** The P4 value may be either an integer (P4_INT32) or a pointer to +** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo +** structure, then said structure defines the content and collating +** sequence of the index being opened. Otherwise, if P4 is an integer +** value, it is set to the number of columns in the table, or to the +** largest index of any column of the table that is actually used. +** +** This instruction works just like OpenRead except that it opens the cursor +** in read/write mode. For a given table, there can be one or more read-only +** cursors or a single read/write cursor but not both. +** +** See also OpenRead. +*/ +case OP_ReopenIdx: { + int nField; + KeyInfo *pKeyInfo; + int p2; + int iDb; + int wrFlag; + Btree *pX; + VdbeCursor *pCur; + Db *pDb; + + assert( pOp->p5==0 || pOp->p5==OPFLAG_SEEKEQ ); + assert( pOp->p4type==P4_KEYINFO ); + pCur = p->apCsr[pOp->p1]; + if( pCur && pCur->pgnoRoot==(u32)pOp->p2 ){ + assert( pCur->iDb==pOp->p3 ); /* Guaranteed by the code generator */ + goto open_cursor_set_hints; + } + /* If the cursor is not currently open or is open on a different + ** index, then fall through into OP_OpenRead to force a reopen */ +case OP_OpenRead: +case OP_OpenWrite: + + assert( pOp->opcode==OP_OpenWrite || pOp->p5==0 || pOp->p5==OPFLAG_SEEKEQ ); + assert( p->bIsReader ); + assert( pOp->opcode==OP_OpenRead || pOp->opcode==OP_ReopenIdx + || p->readOnly==0 ); + + if( p->expired ){ + rc = SQLITE_ABORT_ROLLBACK; + goto abort_due_to_error; + } + + nField = 0; + pKeyInfo = 0; + p2 = pOp->p2; + iDb = pOp->p3; + assert( iDb>=0 && iDbnDb ); + assert( DbMaskTest(p->btreeMask, iDb) ); + pDb = &db->aDb[iDb]; + pX = pDb->pBt; + assert( pX!=0 ); + if( pOp->opcode==OP_OpenWrite ){ + assert( OPFLAG_FORDELETE==BTREE_FORDELETE ); + wrFlag = BTREE_WRCSR | (pOp->p5 & OPFLAG_FORDELETE); + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + if( pDb->pSchema->file_format < p->minWriteFileFormat ){ + p->minWriteFileFormat = pDb->pSchema->file_format; + } + }else{ + wrFlag = 0; + } + if( pOp->p5 & OPFLAG_P2ISREG ){ + assert( p2>0 ); + assert( p2<=(p->nMem+1 - p->nCursor) ); + pIn2 = &aMem[p2]; + assert( memIsValid(pIn2) ); + assert( (pIn2->flags & MEM_Int)!=0 ); + sqlite3VdbeMemIntegerify(pIn2); + p2 = (int)pIn2->u.i; + /* The p2 value always comes from a prior OP_CreateTable opcode and + ** that opcode will always set the p2 value to 2 or more or else fail. + ** If there were a failure, the prepared statement would have halted + ** before reaching this instruction. */ + assert( p2>=2 ); + } + if( pOp->p4type==P4_KEYINFO ){ + pKeyInfo = pOp->p4.pKeyInfo; + assert( pKeyInfo->enc==ENC(db) ); + assert( pKeyInfo->db==db ); + nField = pKeyInfo->nField+pKeyInfo->nXField; + }else if( pOp->p4type==P4_INT32 ){ + nField = pOp->p4.i; + } + assert( pOp->p1>=0 ); + assert( nField>=0 ); + testcase( nField==0 ); /* Table with INTEGER PRIMARY KEY and nothing else */ + pCur = allocateCursor(p, pOp->p1, nField, iDb, CURTYPE_BTREE); + if( pCur==0 ) goto no_mem; + pCur->nullRow = 1; + pCur->isOrdered = 1; + pCur->pgnoRoot = p2; +#ifdef SQLITE_DEBUG + pCur->wrFlag = wrFlag; +#endif + rc = sqlite3BtreeCursor(pX, p2, wrFlag, pKeyInfo, pCur->uc.pCursor); + pCur->pKeyInfo = pKeyInfo; + /* Set the VdbeCursor.isTable variable. Previous versions of + ** SQLite used to check if the root-page flags were sane at this point + ** and report database corruption if they were not, but this check has + ** since moved into the btree layer. */ + pCur->isTable = pOp->p4type!=P4_KEYINFO; + +open_cursor_set_hints: + assert( OPFLAG_BULKCSR==BTREE_BULKLOAD ); + assert( OPFLAG_SEEKEQ==BTREE_SEEK_EQ ); + testcase( pOp->p5 & OPFLAG_BULKCSR ); +#ifdef SQLITE_ENABLE_CURSOR_HINTS + testcase( pOp->p2 & OPFLAG_SEEKEQ ); +#endif + sqlite3BtreeCursorHintFlags(pCur->uc.pCursor, + (pOp->p5 & (OPFLAG_BULKCSR|OPFLAG_SEEKEQ))); + if( rc ) goto abort_due_to_error; + break; +} + +/* Opcode: OpenEphemeral P1 P2 * P4 P5 +** Synopsis: nColumn=P2 +** +** Open a new cursor P1 to a transient table. +** The cursor is always opened read/write even if +** the main database is read-only. The ephemeral +** table is deleted automatically when the cursor is closed. +** +** P2 is the number of columns in the ephemeral table. +** The cursor points to a BTree table if P4==0 and to a BTree index +** if P4 is not 0. If P4 is not NULL, it points to a KeyInfo structure +** that defines the format of keys in the index. +** +** The P5 parameter can be a mask of the BTREE_* flags defined +** in btree.h. These flags control aspects of the operation of +** the btree. The BTREE_OMIT_JOURNAL and BTREE_SINGLE flags are +** added automatically. +*/ +/* Opcode: OpenAutoindex P1 P2 * P4 * +** Synopsis: nColumn=P2 +** +** This opcode works the same as OP_OpenEphemeral. It has a +** different name to distinguish its use. Tables created using +** by this opcode will be used for automatically created transient +** indices in joins. +*/ +case OP_OpenAutoindex: +case OP_OpenEphemeral: { + VdbeCursor *pCx; + KeyInfo *pKeyInfo; + + static const int vfsFlags = + SQLITE_OPEN_READWRITE | + SQLITE_OPEN_CREATE | + SQLITE_OPEN_EXCLUSIVE | + SQLITE_OPEN_DELETEONCLOSE | + SQLITE_OPEN_TRANSIENT_DB; + assert( pOp->p1>=0 ); + assert( pOp->p2>=0 ); + pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, CURTYPE_BTREE); + if( pCx==0 ) goto no_mem; + pCx->nullRow = 1; + pCx->isEphemeral = 1; + rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pCx->pBtx, + BTREE_OMIT_JOURNAL | BTREE_SINGLE | pOp->p5, vfsFlags); + if( rc==SQLITE_OK ){ + rc = sqlite3BtreeBeginTrans(pCx->pBtx, 1); + } + if( rc==SQLITE_OK ){ + /* If a transient index is required, create it by calling + ** sqlite3BtreeCreateTable() with the BTREE_BLOBKEY flag before + ** opening it. If a transient table is required, just use the + ** automatically created table with root-page 1 (an BLOB_INTKEY table). + */ + if( (pCx->pKeyInfo = pKeyInfo = pOp->p4.pKeyInfo)!=0 ){ + int pgno; + assert( pOp->p4type==P4_KEYINFO ); + rc = sqlite3BtreeCreateTable(pCx->pBtx, &pgno, BTREE_BLOBKEY | pOp->p5); + if( rc==SQLITE_OK ){ + assert( pgno==MASTER_ROOT+1 ); + assert( pKeyInfo->db==db ); + assert( pKeyInfo->enc==ENC(db) ); + rc = sqlite3BtreeCursor(pCx->pBtx, pgno, BTREE_WRCSR, + pKeyInfo, pCx->uc.pCursor); + } + pCx->isTable = 0; + }else{ + rc = sqlite3BtreeCursor(pCx->pBtx, MASTER_ROOT, BTREE_WRCSR, + 0, pCx->uc.pCursor); + pCx->isTable = 1; + } + } + if( rc ) goto abort_due_to_error; + pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED); + break; +} + +/* Opcode: SorterOpen P1 P2 P3 P4 * +** +** This opcode works like OP_OpenEphemeral except that it opens +** a transient index that is specifically designed to sort large +** tables using an external merge-sort algorithm. +** +** If argument P3 is non-zero, then it indicates that the sorter may +** assume that a stable sort considering the first P3 fields of each +** key is sufficient to produce the required results. +*/ +case OP_SorterOpen: { + VdbeCursor *pCx; + + assert( pOp->p1>=0 ); + assert( pOp->p2>=0 ); + pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, CURTYPE_SORTER); + if( pCx==0 ) goto no_mem; + pCx->pKeyInfo = pOp->p4.pKeyInfo; + assert( pCx->pKeyInfo->db==db ); + assert( pCx->pKeyInfo->enc==ENC(db) ); + rc = sqlite3VdbeSorterInit(db, pOp->p3, pCx); + if( rc ) goto abort_due_to_error; + break; +} + +/* Opcode: SequenceTest P1 P2 * * * +** Synopsis: if( cursor[P1].ctr++ ) pc = P2 +** +** P1 is a sorter cursor. If the sequence counter is currently zero, jump +** to P2. Regardless of whether or not the jump is taken, increment the +** the sequence value. +*/ +case OP_SequenceTest: { + VdbeCursor *pC; + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( isSorter(pC) ); + if( (pC->seqCount++)==0 ){ + goto jump_to_p2; + } + break; +} + +/* Opcode: OpenPseudo P1 P2 P3 * * +** Synopsis: P3 columns in r[P2] +** +** Open a new cursor that points to a fake table that contains a single +** row of data. The content of that one row is the content of memory +** register P2. In other words, cursor P1 becomes an alias for the +** MEM_Blob content contained in register P2. +** +** A pseudo-table created by this opcode is used to hold a single +** row output from the sorter so that the row can be decomposed into +** individual columns using the OP_Column opcode. The OP_Column opcode +** is the only cursor opcode that works with a pseudo-table. +** +** P3 is the number of fields in the records that will be stored by +** the pseudo-table. +*/ +case OP_OpenPseudo: { + VdbeCursor *pCx; + + assert( pOp->p1>=0 ); + assert( pOp->p3>=0 ); + pCx = allocateCursor(p, pOp->p1, pOp->p3, -1, CURTYPE_PSEUDO); + if( pCx==0 ) goto no_mem; + pCx->nullRow = 1; + pCx->uc.pseudoTableReg = pOp->p2; + pCx->isTable = 1; + assert( pOp->p5==0 ); + break; +} + +/* Opcode: Close P1 * * * * +** +** Close a cursor previously opened as P1. If P1 is not +** currently open, this instruction is a no-op. +*/ +case OP_Close: { + assert( pOp->p1>=0 && pOp->p1nCursor ); + sqlite3VdbeFreeCursor(p, p->apCsr[pOp->p1]); + p->apCsr[pOp->p1] = 0; + break; +} + +#ifdef SQLITE_ENABLE_COLUMN_USED_MASK +/* Opcode: ColumnsUsed P1 * * P4 * +** +** This opcode (which only exists if SQLite was compiled with +** SQLITE_ENABLE_COLUMN_USED_MASK) identifies which columns of the +** table or index for cursor P1 are used. P4 is a 64-bit integer +** (P4_INT64) in which the first 63 bits are one for each of the +** first 63 columns of the table or index that are actually used +** by the cursor. The high-order bit is set if any column after +** the 64th is used. +*/ +case OP_ColumnsUsed: { + VdbeCursor *pC; + pC = p->apCsr[pOp->p1]; + assert( pC->eCurType==CURTYPE_BTREE ); + pC->maskUsed = *(u64*)pOp->p4.pI64; + break; +} +#endif + +/* Opcode: SeekGE P1 P2 P3 P4 * +** Synopsis: key=r[P3@P4] +** +** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), +** use the value in register P3 as the key. If cursor P1 refers +** to an SQL index, then P3 is the first in an array of P4 registers +** that are used as an unpacked index key. +** +** Reposition cursor P1 so that it points to the smallest entry that +** is greater than or equal to the key value. If there are no records +** greater than or equal to the key and P2 is not zero, then jump to P2. +** +** If the cursor P1 was opened using the OPFLAG_SEEKEQ flag, then this +** opcode will always land on a record that equally equals the key, or +** else jump immediately to P2. When the cursor is OPFLAG_SEEKEQ, this +** opcode must be followed by an IdxLE opcode with the same arguments. +** The IdxLE opcode will be skipped if this opcode succeeds, but the +** IdxLE opcode will be used on subsequent loop iterations. +** +** This opcode leaves the cursor configured to move in forward order, +** from the beginning toward the end. In other words, the cursor is +** configured to use Next, not Prev. +** +** See also: Found, NotFound, SeekLt, SeekGt, SeekLe +*/ +/* Opcode: SeekGT P1 P2 P3 P4 * +** Synopsis: key=r[P3@P4] +** +** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), +** use the value in register P3 as a key. If cursor P1 refers +** to an SQL index, then P3 is the first in an array of P4 registers +** that are used as an unpacked index key. +** +** Reposition cursor P1 so that it points to the smallest entry that +** is greater than the key value. If there are no records greater than +** the key and P2 is not zero, then jump to P2. +** +** This opcode leaves the cursor configured to move in forward order, +** from the beginning toward the end. In other words, the cursor is +** configured to use Next, not Prev. +** +** See also: Found, NotFound, SeekLt, SeekGe, SeekLe +*/ +/* Opcode: SeekLT P1 P2 P3 P4 * +** Synopsis: key=r[P3@P4] +** +** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), +** use the value in register P3 as a key. If cursor P1 refers +** to an SQL index, then P3 is the first in an array of P4 registers +** that are used as an unpacked index key. +** +** Reposition cursor P1 so that it points to the largest entry that +** is less than the key value. If there are no records less than +** the key and P2 is not zero, then jump to P2. +** +** This opcode leaves the cursor configured to move in reverse order, +** from the end toward the beginning. In other words, the cursor is +** configured to use Prev, not Next. +** +** See also: Found, NotFound, SeekGt, SeekGe, SeekLe +*/ +/* Opcode: SeekLE P1 P2 P3 P4 * +** Synopsis: key=r[P3@P4] +** +** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), +** use the value in register P3 as a key. If cursor P1 refers +** to an SQL index, then P3 is the first in an array of P4 registers +** that are used as an unpacked index key. +** +** Reposition cursor P1 so that it points to the largest entry that +** is less than or equal to the key value. If there are no records +** less than or equal to the key and P2 is not zero, then jump to P2. +** +** This opcode leaves the cursor configured to move in reverse order, +** from the end toward the beginning. In other words, the cursor is +** configured to use Prev, not Next. +** +** If the cursor P1 was opened using the OPFLAG_SEEKEQ flag, then this +** opcode will always land on a record that equally equals the key, or +** else jump immediately to P2. When the cursor is OPFLAG_SEEKEQ, this +** opcode must be followed by an IdxGE opcode with the same arguments. +** The IdxGE opcode will be skipped if this opcode succeeds, but the +** IdxGE opcode will be used on subsequent loop iterations. +** +** See also: Found, NotFound, SeekGt, SeekGe, SeekLt +*/ +case OP_SeekLT: /* jump, in3 */ +case OP_SeekLE: /* jump, in3 */ +case OP_SeekGE: /* jump, in3 */ +case OP_SeekGT: { /* jump, in3 */ + int res; /* Comparison result */ + int oc; /* Opcode */ + VdbeCursor *pC; /* The cursor to seek */ + UnpackedRecord r; /* The key to seek for */ + int nField; /* Number of columns or fields in the key */ + i64 iKey; /* The rowid we are to seek to */ + int eqOnly; /* Only interested in == results */ + + assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( pOp->p2!=0 ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->eCurType==CURTYPE_BTREE ); + assert( OP_SeekLE == OP_SeekLT+1 ); + assert( OP_SeekGE == OP_SeekLT+2 ); + assert( OP_SeekGT == OP_SeekLT+3 ); + assert( pC->isOrdered ); + assert( pC->uc.pCursor!=0 ); + oc = pOp->opcode; + eqOnly = 0; + pC->nullRow = 0; +#ifdef SQLITE_DEBUG + pC->seekOp = pOp->opcode; +#endif + + if( pC->isTable ){ + /* The BTREE_SEEK_EQ flag is only set on index cursors */ + assert( sqlite3BtreeCursorHasHint(pC->uc.pCursor, BTREE_SEEK_EQ)==0 + || CORRUPT_DB ); + + /* The input value in P3 might be of any type: integer, real, string, + ** blob, or NULL. But it needs to be an integer before we can do + ** the seek, so convert it. */ + pIn3 = &aMem[pOp->p3]; + if( (pIn3->flags & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){ + applyNumericAffinity(pIn3, 0); + } + iKey = sqlite3VdbeIntValue(pIn3); + + /* If the P3 value could not be converted into an integer without + ** loss of information, then special processing is required... */ + if( (pIn3->flags & MEM_Int)==0 ){ + if( (pIn3->flags & MEM_Real)==0 ){ + /* If the P3 value cannot be converted into any kind of a number, + ** then the seek is not possible, so jump to P2 */ + VdbeBranchTaken(1,2); goto jump_to_p2; + break; + } + + /* If the approximation iKey is larger than the actual real search + ** term, substitute >= for > and < for <=. e.g. if the search term + ** is 4.9 and the integer approximation 5: + ** + ** (x > 4.9) -> (x >= 5) + ** (x <= 4.9) -> (x < 5) + */ + if( pIn3->u.r<(double)iKey ){ + assert( OP_SeekGE==(OP_SeekGT-1) ); + assert( OP_SeekLT==(OP_SeekLE-1) ); + assert( (OP_SeekLE & 0x0001)==(OP_SeekGT & 0x0001) ); + if( (oc & 0x0001)==(OP_SeekGT & 0x0001) ) oc--; + } + + /* If the approximation iKey is smaller than the actual real search + ** term, substitute <= for < and > for >=. */ + else if( pIn3->u.r>(double)iKey ){ + assert( OP_SeekLE==(OP_SeekLT+1) ); + assert( OP_SeekGT==(OP_SeekGE+1) ); + assert( (OP_SeekLT & 0x0001)==(OP_SeekGE & 0x0001) ); + if( (oc & 0x0001)==(OP_SeekLT & 0x0001) ) oc++; + } + } + rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, 0, (u64)iKey, 0, &res); + pC->movetoTarget = iKey; /* Used by OP_Delete */ + if( rc!=SQLITE_OK ){ + goto abort_due_to_error; + } + }else{ + /* For a cursor with the BTREE_SEEK_EQ hint, only the OP_SeekGE and + ** OP_SeekLE opcodes are allowed, and these must be immediately followed + ** by an OP_IdxGT or OP_IdxLT opcode, respectively, with the same key. + */ + if( sqlite3BtreeCursorHasHint(pC->uc.pCursor, BTREE_SEEK_EQ) ){ + eqOnly = 1; + assert( pOp->opcode==OP_SeekGE || pOp->opcode==OP_SeekLE ); + assert( pOp[1].opcode==OP_IdxLT || pOp[1].opcode==OP_IdxGT ); + assert( pOp[1].p1==pOp[0].p1 ); + assert( pOp[1].p2==pOp[0].p2 ); + assert( pOp[1].p3==pOp[0].p3 ); + assert( pOp[1].p4.i==pOp[0].p4.i ); + } + + nField = pOp->p4.i; + assert( pOp->p4type==P4_INT32 ); + assert( nField>0 ); + r.pKeyInfo = pC->pKeyInfo; + r.nField = (u16)nField; + + /* The next line of code computes as follows, only faster: + ** if( oc==OP_SeekGT || oc==OP_SeekLE ){ + ** r.default_rc = -1; + ** }else{ + ** r.default_rc = +1; + ** } + */ + r.default_rc = ((1 & (oc - OP_SeekLT)) ? -1 : +1); + assert( oc!=OP_SeekGT || r.default_rc==-1 ); + assert( oc!=OP_SeekLE || r.default_rc==-1 ); + assert( oc!=OP_SeekGE || r.default_rc==+1 ); + assert( oc!=OP_SeekLT || r.default_rc==+1 ); + + r.aMem = &aMem[pOp->p3]; +#ifdef SQLITE_DEBUG + { int i; for(i=0; iuc.pCursor, &r, 0, 0, &res); + if( rc!=SQLITE_OK ){ + goto abort_due_to_error; + } + if( eqOnly && r.eqSeen==0 ){ + assert( res!=0 ); + goto seek_not_found; + } + } + pC->deferredMoveto = 0; + pC->cacheStatus = CACHE_STALE; +#ifdef SQLITE_TEST + sqlite3_search_count++; +#endif + if( oc>=OP_SeekGE ){ assert( oc==OP_SeekGE || oc==OP_SeekGT ); + if( res<0 || (res==0 && oc==OP_SeekGT) ){ + res = 0; + rc = sqlite3BtreeNext(pC->uc.pCursor, &res); + if( rc!=SQLITE_OK ) goto abort_due_to_error; + }else{ + res = 0; + } + }else{ + assert( oc==OP_SeekLT || oc==OP_SeekLE ); + if( res>0 || (res==0 && oc==OP_SeekLT) ){ + res = 0; + rc = sqlite3BtreePrevious(pC->uc.pCursor, &res); + if( rc!=SQLITE_OK ) goto abort_due_to_error; + }else{ + /* res might be negative because the table is empty. Check to + ** see if this is the case. + */ + res = sqlite3BtreeEof(pC->uc.pCursor); + } + } +seek_not_found: + assert( pOp->p2>0 ); + VdbeBranchTaken(res!=0,2); + if( res ){ + goto jump_to_p2; + }else if( eqOnly ){ + assert( pOp[1].opcode==OP_IdxLT || pOp[1].opcode==OP_IdxGT ); + pOp++; /* Skip the OP_IdxLt or OP_IdxGT that follows */ + } + break; +} + +/* Opcode: Found P1 P2 P3 P4 * +** Synopsis: key=r[P3@P4] +** +** If P4==0 then register P3 holds a blob constructed by MakeRecord. If +** P4>0 then register P3 is the first of P4 registers that form an unpacked +** record. +** +** Cursor P1 is on an index btree. If the record identified by P3 and P4 +** is a prefix of any entry in P1 then a jump is made to P2 and +** P1 is left pointing at the matching entry. +** +** This operation leaves the cursor in a state where it can be +** advanced in the forward direction. The Next instruction will work, +** but not the Prev instruction. +** +** See also: NotFound, NoConflict, NotExists. SeekGe +*/ +/* Opcode: NotFound P1 P2 P3 P4 * +** Synopsis: key=r[P3@P4] +** +** If P4==0 then register P3 holds a blob constructed by MakeRecord. If +** P4>0 then register P3 is the first of P4 registers that form an unpacked +** record. +** +** Cursor P1 is on an index btree. If the record identified by P3 and P4 +** is not the prefix of any entry in P1 then a jump is made to P2. If P1 +** does contain an entry whose prefix matches the P3/P4 record then control +** falls through to the next instruction and P1 is left pointing at the +** matching entry. +** +** This operation leaves the cursor in a state where it cannot be +** advanced in either direction. In other words, the Next and Prev +** opcodes do not work after this operation. +** +** See also: Found, NotExists, NoConflict +*/ +/* Opcode: NoConflict P1 P2 P3 P4 * +** Synopsis: key=r[P3@P4] +** +** If P4==0 then register P3 holds a blob constructed by MakeRecord. If +** P4>0 then register P3 is the first of P4 registers that form an unpacked +** record. +** +** Cursor P1 is on an index btree. If the record identified by P3 and P4 +** contains any NULL value, jump immediately to P2. If all terms of the +** record are not-NULL then a check is done to determine if any row in the +** P1 index btree has a matching key prefix. If there are no matches, jump +** immediately to P2. If there is a match, fall through and leave the P1 +** cursor pointing to the matching row. +** +** This opcode is similar to OP_NotFound with the exceptions that the +** branch is always taken if any part of the search key input is NULL. +** +** This operation leaves the cursor in a state where it cannot be +** advanced in either direction. In other words, the Next and Prev +** opcodes do not work after this operation. +** +** See also: NotFound, Found, NotExists +*/ +case OP_NoConflict: /* jump, in3 */ +case OP_NotFound: /* jump, in3 */ +case OP_Found: { /* jump, in3 */ + int alreadyExists; + int takeJump; + int ii; + VdbeCursor *pC; + int res; + UnpackedRecord *pFree; + UnpackedRecord *pIdxKey; + UnpackedRecord r; + +#ifdef SQLITE_TEST + if( pOp->opcode!=OP_NoConflict ) sqlite3_found_count++; +#endif + + assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( pOp->p4type==P4_INT32 ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); +#ifdef SQLITE_DEBUG + pC->seekOp = pOp->opcode; +#endif + pIn3 = &aMem[pOp->p3]; + assert( pC->eCurType==CURTYPE_BTREE ); + assert( pC->uc.pCursor!=0 ); + assert( pC->isTable==0 ); + if( pOp->p4.i>0 ){ + r.pKeyInfo = pC->pKeyInfo; + r.nField = (u16)pOp->p4.i; + r.aMem = pIn3; +#ifdef SQLITE_DEBUG + for(ii=0; iip3+ii, &r.aMem[ii]); + } +#endif + pIdxKey = &r; + pFree = 0; + }else{ + pFree = pIdxKey = sqlite3VdbeAllocUnpackedRecord(pC->pKeyInfo); + if( pIdxKey==0 ) goto no_mem; + assert( pIn3->flags & MEM_Blob ); + (void)ExpandBlob(pIn3); + sqlite3VdbeRecordUnpack(pC->pKeyInfo, pIn3->n, pIn3->z, pIdxKey); + } + pIdxKey->default_rc = 0; + takeJump = 0; + if( pOp->opcode==OP_NoConflict ){ + /* For the OP_NoConflict opcode, take the jump if any of the + ** input fields are NULL, since any key with a NULL will not + ** conflict */ + for(ii=0; iinField; ii++){ + if( pIdxKey->aMem[ii].flags & MEM_Null ){ + takeJump = 1; + break; + } + } + } + rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, pIdxKey, 0, 0, &res); + if( pFree ) sqlite3DbFree(db, pFree); + if( rc!=SQLITE_OK ){ + goto abort_due_to_error; + } + pC->seekResult = res; + alreadyExists = (res==0); + pC->nullRow = 1-alreadyExists; + pC->deferredMoveto = 0; + pC->cacheStatus = CACHE_STALE; + if( pOp->opcode==OP_Found ){ + VdbeBranchTaken(alreadyExists!=0,2); + if( alreadyExists ) goto jump_to_p2; + }else{ + VdbeBranchTaken(takeJump||alreadyExists==0,2); + if( takeJump || !alreadyExists ) goto jump_to_p2; + } + break; +} + +/* Opcode: SeekRowid P1 P2 P3 * * +** Synopsis: intkey=r[P3] +** +** P1 is the index of a cursor open on an SQL table btree (with integer +** keys). If register P3 does not contain an integer or if P1 does not +** contain a record with rowid P3 then jump immediately to P2. +** Or, if P2 is 0, raise an SQLITE_CORRUPT error. If P1 does contain +** a record with rowid P3 then +** leave the cursor pointing at that record and fall through to the next +** instruction. +** +** The OP_NotExists opcode performs the same operation, but with OP_NotExists +** the P3 register must be guaranteed to contain an integer value. With this +** opcode, register P3 might not contain an integer. +** +** The OP_NotFound opcode performs the same operation on index btrees +** (with arbitrary multi-value keys). +** +** This opcode leaves the cursor in a state where it cannot be advanced +** in either direction. In other words, the Next and Prev opcodes will +** not work following this opcode. +** +** See also: Found, NotFound, NoConflict, SeekRowid +*/ +/* Opcode: NotExists P1 P2 P3 * * +** Synopsis: intkey=r[P3] +** +** P1 is the index of a cursor open on an SQL table btree (with integer +** keys). P3 is an integer rowid. If P1 does not contain a record with +** rowid P3 then jump immediately to P2. Or, if P2 is 0, raise an +** SQLITE_CORRUPT error. If P1 does contain a record with rowid P3 then +** leave the cursor pointing at that record and fall through to the next +** instruction. +** +** The OP_SeekRowid opcode performs the same operation but also allows the +** P3 register to contain a non-integer value, in which case the jump is +** always taken. This opcode requires that P3 always contain an integer. +** +** The OP_NotFound opcode performs the same operation on index btrees +** (with arbitrary multi-value keys). +** +** This opcode leaves the cursor in a state where it cannot be advanced +** in either direction. In other words, the Next and Prev opcodes will +** not work following this opcode. +** +** See also: Found, NotFound, NoConflict, SeekRowid +*/ +case OP_SeekRowid: { /* jump, in3 */ + VdbeCursor *pC; + BtCursor *pCrsr; + int res; + u64 iKey; + + pIn3 = &aMem[pOp->p3]; + if( (pIn3->flags & MEM_Int)==0 ){ + applyAffinity(pIn3, SQLITE_AFF_NUMERIC, encoding); + if( (pIn3->flags & MEM_Int)==0 ) goto jump_to_p2; + } + /* Fall through into OP_NotExists */ +case OP_NotExists: /* jump, in3 */ + pIn3 = &aMem[pOp->p3]; + assert( pIn3->flags & MEM_Int ); + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); +#ifdef SQLITE_DEBUG + pC->seekOp = 0; +#endif + assert( pC->isTable ); + assert( pC->eCurType==CURTYPE_BTREE ); + pCrsr = pC->uc.pCursor; + assert( pCrsr!=0 ); + res = 0; + iKey = pIn3->u.i; + rc = sqlite3BtreeMovetoUnpacked(pCrsr, 0, iKey, 0, &res); + assert( rc==SQLITE_OK || res==0 ); + pC->movetoTarget = iKey; /* Used by OP_Delete */ + pC->nullRow = 0; + pC->cacheStatus = CACHE_STALE; + pC->deferredMoveto = 0; + VdbeBranchTaken(res!=0,2); + pC->seekResult = res; + if( res!=0 ){ + assert( rc==SQLITE_OK ); + if( pOp->p2==0 ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + goto jump_to_p2; + } + } + if( rc ) goto abort_due_to_error; + break; +} + +/* Opcode: Sequence P1 P2 * * * +** Synopsis: r[P2]=cursor[P1].ctr++ +** +** Find the next available sequence number for cursor P1. +** Write the sequence number into register P2. +** The sequence number on the cursor is incremented after this +** instruction. +*/ +case OP_Sequence: { /* out2 */ + assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( p->apCsr[pOp->p1]!=0 ); + assert( p->apCsr[pOp->p1]->eCurType!=CURTYPE_VTAB ); + pOut = out2Prerelease(p, pOp); + pOut->u.i = p->apCsr[pOp->p1]->seqCount++; + break; +} + + +/* Opcode: NewRowid P1 P2 P3 * * +** Synopsis: r[P2]=rowid +** +** Get a new integer record number (a.k.a "rowid") used as the key to a table. +** The record number is not previously used as a key in the database +** table that cursor P1 points to. The new record number is written +** written to register P2. +** +** If P3>0 then P3 is a register in the root frame of this VDBE that holds +** the largest previously generated record number. No new record numbers are +** allowed to be less than this value. When this value reaches its maximum, +** an SQLITE_FULL error is generated. The P3 register is updated with the ' +** generated record number. This P3 mechanism is used to help implement the +** AUTOINCREMENT feature. +*/ +case OP_NewRowid: { /* out2 */ + i64 v; /* The new rowid */ + VdbeCursor *pC; /* Cursor of table to get the new rowid */ + int res; /* Result of an sqlite3BtreeLast() */ + int cnt; /* Counter to limit the number of searches */ + Mem *pMem; /* Register holding largest rowid for AUTOINCREMENT */ + VdbeFrame *pFrame; /* Root frame of VDBE */ + + v = 0; + res = 0; + pOut = out2Prerelease(p, pOp); + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->eCurType==CURTYPE_BTREE ); + assert( pC->uc.pCursor!=0 ); + { + /* The next rowid or record number (different terms for the same + ** thing) is obtained in a two-step algorithm. + ** + ** First we attempt to find the largest existing rowid and add one + ** to that. But if the largest existing rowid is already the maximum + ** positive integer, we have to fall through to the second + ** probabilistic algorithm + ** + ** The second algorithm is to select a rowid at random and see if + ** it already exists in the table. If it does not exist, we have + ** succeeded. If the random rowid does exist, we select a new one + ** and try again, up to 100 times. + */ + assert( pC->isTable ); + +#ifdef SQLITE_32BIT_ROWID +# define MAX_ROWID 0x7fffffff +#else + /* Some compilers complain about constants of the form 0x7fffffffffffffff. + ** Others complain about 0x7ffffffffffffffffLL. The following macro seems + ** to provide the constant while making all compilers happy. + */ +# define MAX_ROWID (i64)( (((u64)0x7fffffff)<<32) | (u64)0xffffffff ) +#endif + + if( !pC->useRandomRowid ){ + rc = sqlite3BtreeLast(pC->uc.pCursor, &res); + if( rc!=SQLITE_OK ){ + goto abort_due_to_error; + } + if( res ){ + v = 1; /* IMP: R-61914-48074 */ + }else{ + assert( sqlite3BtreeCursorIsValid(pC->uc.pCursor) ); + v = sqlite3BtreeIntegerKey(pC->uc.pCursor); + if( v>=MAX_ROWID ){ + pC->useRandomRowid = 1; + }else{ + v++; /* IMP: R-29538-34987 */ + } + } + } + +#ifndef SQLITE_OMIT_AUTOINCREMENT + if( pOp->p3 ){ + /* Assert that P3 is a valid memory cell. */ + assert( pOp->p3>0 ); + if( p->pFrame ){ + for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent); + /* Assert that P3 is a valid memory cell. */ + assert( pOp->p3<=pFrame->nMem ); + pMem = &pFrame->aMem[pOp->p3]; + }else{ + /* Assert that P3 is a valid memory cell. */ + assert( pOp->p3<=(p->nMem+1 - p->nCursor) ); + pMem = &aMem[pOp->p3]; + memAboutToChange(p, pMem); + } + assert( memIsValid(pMem) ); + + REGISTER_TRACE(pOp->p3, pMem); + sqlite3VdbeMemIntegerify(pMem); + assert( (pMem->flags & MEM_Int)!=0 ); /* mem(P3) holds an integer */ + if( pMem->u.i==MAX_ROWID || pC->useRandomRowid ){ + rc = SQLITE_FULL; /* IMP: R-17817-00630 */ + goto abort_due_to_error; + } + if( vu.i+1 ){ + v = pMem->u.i + 1; + } + pMem->u.i = v; + } +#endif + if( pC->useRandomRowid ){ + /* IMPLEMENTATION-OF: R-07677-41881 If the largest ROWID is equal to the + ** largest possible integer (9223372036854775807) then the database + ** engine starts picking positive candidate ROWIDs at random until + ** it finds one that is not previously used. */ + assert( pOp->p3==0 ); /* We cannot be in random rowid mode if this is + ** an AUTOINCREMENT table. */ + cnt = 0; + do{ + sqlite3_randomness(sizeof(v), &v); + v &= (MAX_ROWID>>1); v++; /* Ensure that v is greater than zero */ + }while( ((rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, 0, (u64)v, + 0, &res))==SQLITE_OK) + && (res==0) + && (++cnt<100)); + if( rc ) goto abort_due_to_error; + if( res==0 ){ + rc = SQLITE_FULL; /* IMP: R-38219-53002 */ + goto abort_due_to_error; + } + assert( v>0 ); /* EV: R-40812-03570 */ + } + pC->deferredMoveto = 0; + pC->cacheStatus = CACHE_STALE; + } + pOut->u.i = v; + break; +} + +/* Opcode: Insert P1 P2 P3 P4 P5 +** Synopsis: intkey=r[P3] data=r[P2] +** +** Write an entry into the table of cursor P1. A new entry is +** created if it doesn't already exist or the data for an existing +** entry is overwritten. The data is the value MEM_Blob stored in register +** number P2. The key is stored in register P3. The key must +** be a MEM_Int. +** +** If the OPFLAG_NCHANGE flag of P5 is set, then the row change count is +** incremented (otherwise not). If the OPFLAG_LASTROWID flag of P5 is set, +** then rowid is stored for subsequent return by the +** sqlite3_last_insert_rowid() function (otherwise it is unmodified). +** +** If the OPFLAG_USESEEKRESULT flag of P5 is set, the implementation might +** run faster by avoiding an unnecessary seek on cursor P1. However, +** the OPFLAG_USESEEKRESULT flag must only be set if there have been no prior +** seeks on the cursor or if the most recent seek used a key equal to P3. +** +** If the OPFLAG_ISUPDATE flag is set, then this opcode is part of an +** UPDATE operation. Otherwise (if the flag is clear) then this opcode +** is part of an INSERT operation. The difference is only important to +** the update hook. +** +** Parameter P4 may point to a Table structure, or may be NULL. If it is +** not NULL, then the update-hook (sqlite3.xUpdateCallback) is invoked +** following a successful insert. +** +** (WARNING/TODO: If P1 is a pseudo-cursor and P2 is dynamically +** allocated, then ownership of P2 is transferred to the pseudo-cursor +** and register P2 becomes ephemeral. If the cursor is changed, the +** value of register P2 will then change. Make sure this does not +** cause any problems.) +** +** This instruction only works on tables. The equivalent instruction +** for indices is OP_IdxInsert. +*/ +/* Opcode: InsertInt P1 P2 P3 P4 P5 +** Synopsis: intkey=P3 data=r[P2] +** +** This works exactly like OP_Insert except that the key is the +** integer value P3, not the value of the integer stored in register P3. +*/ +case OP_Insert: +case OP_InsertInt: { + Mem *pData; /* MEM cell holding data for the record to be inserted */ + Mem *pKey; /* MEM cell holding key for the record */ + VdbeCursor *pC; /* Cursor to table into which insert is written */ + int seekResult; /* Result of prior seek or 0 if no USESEEKRESULT flag */ + const char *zDb; /* database name - used by the update hook */ + Table *pTab; /* Table structure - used by update and pre-update hooks */ + int op; /* Opcode for update hook: SQLITE_UPDATE or SQLITE_INSERT */ + BtreePayload x; /* Payload to be inserted */ + + op = 0; + pData = &aMem[pOp->p2]; + assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( memIsValid(pData) ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->eCurType==CURTYPE_BTREE ); + assert( pC->uc.pCursor!=0 ); + assert( (pOp->p5 & OPFLAG_ISNOOP) || pC->isTable ); + assert( pOp->p4type==P4_TABLE || pOp->p4type>=P4_STATIC ); + REGISTER_TRACE(pOp->p2, pData); + + if( pOp->opcode==OP_Insert ){ + pKey = &aMem[pOp->p3]; + assert( pKey->flags & MEM_Int ); + assert( memIsValid(pKey) ); + REGISTER_TRACE(pOp->p3, pKey); + x.nKey = pKey->u.i; + }else{ + assert( pOp->opcode==OP_InsertInt ); + x.nKey = pOp->p3; + } + + if( pOp->p4type==P4_TABLE && HAS_UPDATE_HOOK(db) ){ + assert( pC->iDb>=0 ); + zDb = db->aDb[pC->iDb].zDbSName; + pTab = pOp->p4.pTab; + assert( (pOp->p5 & OPFLAG_ISNOOP) || HasRowid(pTab) ); + op = ((pOp->p5 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT); + }else{ + pTab = 0; /* Not needed. Silence a compiler warning. */ + zDb = 0; /* Not needed. Silence a compiler warning. */ + } + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + /* Invoke the pre-update hook, if any */ + if( db->xPreUpdateCallback + && pOp->p4type==P4_TABLE + && !(pOp->p5 & OPFLAG_ISUPDATE) + ){ + sqlite3VdbePreUpdateHook(p, pC, SQLITE_INSERT, zDb, pTab, x.nKey, pOp->p2); + } + if( pOp->p5 & OPFLAG_ISNOOP ) break; +#endif + + if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; + if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = x.nKey; + if( pData->flags & MEM_Null ){ + x.pData = 0; + x.nData = 0; + }else{ + assert( pData->flags & (MEM_Blob|MEM_Str) ); + x.pData = pData->z; + x.nData = pData->n; + } + seekResult = ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0); + if( pData->flags & MEM_Zero ){ + x.nZero = pData->u.nZero; + }else{ + x.nZero = 0; + } + x.pKey = 0; + rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, + (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION)), seekResult + ); + pC->deferredMoveto = 0; + pC->cacheStatus = CACHE_STALE; + + /* Invoke the update-hook if required. */ + if( rc ) goto abort_due_to_error; + if( db->xUpdateCallback && op ){ + db->xUpdateCallback(db->pUpdateArg, op, zDb, pTab->zName, x.nKey); + } + break; +} + +/* Opcode: Delete P1 P2 P3 P4 P5 +** +** Delete the record at which the P1 cursor is currently pointing. +** +** If the OPFLAG_SAVEPOSITION bit of the P5 parameter is set, then +** the cursor will be left pointing at either the next or the previous +** record in the table. If it is left pointing at the next record, then +** the next Next instruction will be a no-op. As a result, in this case +** it is ok to delete a record from within a Next loop. If +** OPFLAG_SAVEPOSITION bit of P5 is clear, then the cursor will be +** left in an undefined state. +** +** If the OPFLAG_AUXDELETE bit is set on P5, that indicates that this +** delete one of several associated with deleting a table row and all its +** associated index entries. Exactly one of those deletes is the "primary" +** delete. The others are all on OPFLAG_FORDELETE cursors or else are +** marked with the AUXDELETE flag. +** +** If the OPFLAG_NCHANGE flag of P2 (NB: P2 not P5) is set, then the row +** change count is incremented (otherwise not). +** +** P1 must not be pseudo-table. It has to be a real table with +** multiple rows. +** +** If P4 is not NULL then it points to a Table object. In this case either +** the update or pre-update hook, or both, may be invoked. The P1 cursor must +** have been positioned using OP_NotFound prior to invoking this opcode in +** this case. Specifically, if one is configured, the pre-update hook is +** invoked if P4 is not NULL. The update-hook is invoked if one is configured, +** P4 is not NULL, and the OPFLAG_NCHANGE flag is set in P2. +** +** If the OPFLAG_ISUPDATE flag is set in P2, then P3 contains the address +** of the memory cell that contains the value that the rowid of the row will +** be set to by the update. +*/ +case OP_Delete: { + VdbeCursor *pC; + const char *zDb; + Table *pTab; + int opflags; + + opflags = pOp->p2; + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->eCurType==CURTYPE_BTREE ); + assert( pC->uc.pCursor!=0 ); + assert( pC->deferredMoveto==0 ); + +#ifdef SQLITE_DEBUG + if( pOp->p4type==P4_TABLE && HasRowid(pOp->p4.pTab) && pOp->p5==0 ){ + /* If p5 is zero, the seek operation that positioned the cursor prior to + ** OP_Delete will have also set the pC->movetoTarget field to the rowid of + ** the row that is being deleted */ + i64 iKey = sqlite3BtreeIntegerKey(pC->uc.pCursor); + assert( pC->movetoTarget==iKey ); + } +#endif + + /* If the update-hook or pre-update-hook will be invoked, set zDb to + ** the name of the db to pass as to it. Also set local pTab to a copy + ** of p4.pTab. Finally, if p5 is true, indicating that this cursor was + ** last moved with OP_Next or OP_Prev, not Seek or NotFound, set + ** VdbeCursor.movetoTarget to the current rowid. */ + if( pOp->p4type==P4_TABLE && HAS_UPDATE_HOOK(db) ){ + assert( pC->iDb>=0 ); + assert( pOp->p4.pTab!=0 ); + zDb = db->aDb[pC->iDb].zDbSName; + pTab = pOp->p4.pTab; + if( (pOp->p5 & OPFLAG_SAVEPOSITION)!=0 && pC->isTable ){ + pC->movetoTarget = sqlite3BtreeIntegerKey(pC->uc.pCursor); + } + }else{ + zDb = 0; /* Not needed. Silence a compiler warning. */ + pTab = 0; /* Not needed. Silence a compiler warning. */ + } + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + /* Invoke the pre-update-hook if required. */ + if( db->xPreUpdateCallback && pOp->p4.pTab ){ + assert( !(opflags & OPFLAG_ISUPDATE) + || HasRowid(pTab)==0 + || (aMem[pOp->p3].flags & MEM_Int) + ); + sqlite3VdbePreUpdateHook(p, pC, + (opflags & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_DELETE, + zDb, pTab, pC->movetoTarget, + pOp->p3 + ); + } + if( opflags & OPFLAG_ISNOOP ) break; +#endif + + /* Only flags that can be set are SAVEPOISTION and AUXDELETE */ + assert( (pOp->p5 & ~(OPFLAG_SAVEPOSITION|OPFLAG_AUXDELETE))==0 ); + assert( OPFLAG_SAVEPOSITION==BTREE_SAVEPOSITION ); + assert( OPFLAG_AUXDELETE==BTREE_AUXDELETE ); + +#ifdef SQLITE_DEBUG + if( p->pFrame==0 ){ + if( pC->isEphemeral==0 + && (pOp->p5 & OPFLAG_AUXDELETE)==0 + && (pC->wrFlag & OPFLAG_FORDELETE)==0 + ){ + nExtraDelete++; + } + if( pOp->p2 & OPFLAG_NCHANGE ){ + nExtraDelete--; + } + } +#endif + + rc = sqlite3BtreeDelete(pC->uc.pCursor, pOp->p5); + pC->cacheStatus = CACHE_STALE; + pC->seekResult = 0; + if( rc ) goto abort_due_to_error; + + /* Invoke the update-hook if required. */ + if( opflags & OPFLAG_NCHANGE ){ + p->nChange++; + if( db->xUpdateCallback && HasRowid(pTab) ){ + db->xUpdateCallback(db->pUpdateArg, SQLITE_DELETE, zDb, pTab->zName, + pC->movetoTarget); + assert( pC->iDb>=0 ); + } + } + + break; +} +/* Opcode: ResetCount * * * * * +** +** The value of the change counter is copied to the database handle +** change counter (returned by subsequent calls to sqlite3_changes()). +** Then the VMs internal change counter resets to 0. +** This is used by trigger programs. +*/ +case OP_ResetCount: { + sqlite3VdbeSetChanges(db, p->nChange); + p->nChange = 0; + break; +} + +/* Opcode: SorterCompare P1 P2 P3 P4 +** Synopsis: if key(P1)!=trim(r[P3],P4) goto P2 +** +** P1 is a sorter cursor. This instruction compares a prefix of the +** record blob in register P3 against a prefix of the entry that +** the sorter cursor currently points to. Only the first P4 fields +** of r[P3] and the sorter record are compared. +** +** If either P3 or the sorter contains a NULL in one of their significant +** fields (not counting the P4 fields at the end which are ignored) then +** the comparison is assumed to be equal. +** +** Fall through to next instruction if the two records compare equal to +** each other. Jump to P2 if they are different. +*/ +case OP_SorterCompare: { + VdbeCursor *pC; + int res; + int nKeyCol; + + pC = p->apCsr[pOp->p1]; + assert( isSorter(pC) ); + assert( pOp->p4type==P4_INT32 ); + pIn3 = &aMem[pOp->p3]; + nKeyCol = pOp->p4.i; + res = 0; + rc = sqlite3VdbeSorterCompare(pC, pIn3, nKeyCol, &res); + VdbeBranchTaken(res!=0,2); + if( rc ) goto abort_due_to_error; + if( res ) goto jump_to_p2; + break; +}; + +/* Opcode: SorterData P1 P2 P3 * * +** Synopsis: r[P2]=data +** +** Write into register P2 the current sorter data for sorter cursor P1. +** Then clear the column header cache on cursor P3. +** +** This opcode is normally use to move a record out of the sorter and into +** a register that is the source for a pseudo-table cursor created using +** OpenPseudo. That pseudo-table cursor is the one that is identified by +** parameter P3. Clearing the P3 column cache as part of this opcode saves +** us from having to issue a separate NullRow instruction to clear that cache. +*/ +case OP_SorterData: { + VdbeCursor *pC; + + pOut = &aMem[pOp->p2]; + pC = p->apCsr[pOp->p1]; + assert( isSorter(pC) ); + rc = sqlite3VdbeSorterRowkey(pC, pOut); + assert( rc!=SQLITE_OK || (pOut->flags & MEM_Blob) ); + assert( pOp->p1>=0 && pOp->p1nCursor ); + if( rc ) goto abort_due_to_error; + p->apCsr[pOp->p3]->cacheStatus = CACHE_STALE; + break; +} + +/* Opcode: RowData P1 P2 P3 * * +** Synopsis: r[P2]=data +** +** Write into register P2 the complete row content for the row at +** which cursor P1 is currently pointing. +** There is no interpretation of the data. +** It is just copied onto the P2 register exactly as +** it is found in the database file. +** +** If cursor P1 is an index, then the content is the key of the row. +** If cursor P2 is a table, then the content extracted is the data. +** +** If the P1 cursor must be pointing to a valid row (not a NULL row) +** of a real table, not a pseudo-table. +** +** If P3!=0 then this opcode is allowed to make an ephermeral pointer +** into the database page. That means that the content of the output +** register will be invalidated as soon as the cursor moves - including +** moves caused by other cursors that "save" the the current cursors +** position in order that they can write to the same table. If P3==0 +** then a copy of the data is made into memory. P3!=0 is faster, but +** P3==0 is safer. +** +** If P3!=0 then the content of the P2 register is unsuitable for use +** in OP_Result and any OP_Result will invalidate the P2 register content. +** The P2 register content is invalidated by opcodes like OP_Function or +** by any use of another cursor pointing to the same table. +*/ +case OP_RowData: { + VdbeCursor *pC; + BtCursor *pCrsr; + u32 n; + + pOut = out2Prerelease(p, pOp); + + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->eCurType==CURTYPE_BTREE ); + assert( isSorter(pC)==0 ); + assert( pC->nullRow==0 ); + assert( pC->uc.pCursor!=0 ); + pCrsr = pC->uc.pCursor; + + /* The OP_RowData opcodes always follow OP_NotExists or + ** OP_SeekRowid or OP_Rewind/Op_Next with no intervening instructions + ** that might invalidate the cursor. + ** If this where not the case, on of the following assert()s + ** would fail. Should this ever change (because of changes in the code + ** generator) then the fix would be to insert a call to + ** sqlite3VdbeCursorMoveto(). + */ + assert( pC->deferredMoveto==0 ); + assert( sqlite3BtreeCursorIsValid(pCrsr) ); +#if 0 /* Not required due to the previous to assert() statements */ + rc = sqlite3VdbeCursorMoveto(pC); + if( rc!=SQLITE_OK ) goto abort_due_to_error; +#endif + + n = sqlite3BtreePayloadSize(pCrsr); + if( n>(u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){ + goto too_big; + } + testcase( n==0 ); + rc = sqlite3VdbeMemFromBtree(pCrsr, 0, n, pOut); + if( rc ) goto abort_due_to_error; + if( !pOp->p3 ) Deephemeralize(pOut); + UPDATE_MAX_BLOBSIZE(pOut); + REGISTER_TRACE(pOp->p2, pOut); + break; +} + +/* Opcode: Rowid P1 P2 * * * +** Synopsis: r[P2]=rowid +** +** Store in register P2 an integer which is the key of the table entry that +** P1 is currently point to. +** +** P1 can be either an ordinary table or a virtual table. There used to +** be a separate OP_VRowid opcode for use with virtual tables, but this +** one opcode now works for both table types. +*/ +case OP_Rowid: { /* out2 */ + VdbeCursor *pC; + i64 v; + sqlite3_vtab *pVtab; + const sqlite3_module *pModule; + + pOut = out2Prerelease(p, pOp); + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->eCurType!=CURTYPE_PSEUDO || pC->nullRow ); + if( pC->nullRow ){ + pOut->flags = MEM_Null; + break; + }else if( pC->deferredMoveto ){ + v = pC->movetoTarget; +#ifndef SQLITE_OMIT_VIRTUALTABLE + }else if( pC->eCurType==CURTYPE_VTAB ){ + assert( pC->uc.pVCur!=0 ); + pVtab = pC->uc.pVCur->pVtab; + pModule = pVtab->pModule; + assert( pModule->xRowid ); + rc = pModule->xRowid(pC->uc.pVCur, &v); + sqlite3VtabImportErrmsg(p, pVtab); + if( rc ) goto abort_due_to_error; +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + }else{ + assert( pC->eCurType==CURTYPE_BTREE ); + assert( pC->uc.pCursor!=0 ); + rc = sqlite3VdbeCursorRestore(pC); + if( rc ) goto abort_due_to_error; + if( pC->nullRow ){ + pOut->flags = MEM_Null; + break; + } + v = sqlite3BtreeIntegerKey(pC->uc.pCursor); + } + pOut->u.i = v; + break; +} + +/* Opcode: NullRow P1 * * * * +** +** Move the cursor P1 to a null row. Any OP_Column operations +** that occur while the cursor is on the null row will always +** write a NULL. +*/ +case OP_NullRow: { + VdbeCursor *pC; + + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + pC->nullRow = 1; + pC->cacheStatus = CACHE_STALE; + if( pC->eCurType==CURTYPE_BTREE ){ + assert( pC->uc.pCursor!=0 ); + sqlite3BtreeClearCursor(pC->uc.pCursor); + } + break; +} + +/* Opcode: Last P1 P2 P3 * * +** +** The next use of the Rowid or Column or Prev instruction for P1 +** will refer to the last entry in the database table or index. +** If the table or index is empty and P2>0, then jump immediately to P2. +** If P2 is 0 or if the table or index is not empty, fall through +** to the following instruction. +** +** This opcode leaves the cursor configured to move in reverse order, +** from the end toward the beginning. In other words, the cursor is +** configured to use Prev, not Next. +** +** If P3 is -1, then the cursor is positioned at the end of the btree +** for the purpose of appending a new entry onto the btree. In that +** case P2 must be 0. It is assumed that the cursor is used only for +** appending and so if the cursor is valid, then the cursor must already +** be pointing at the end of the btree and so no changes are made to +** the cursor. +*/ +case OP_Last: { /* jump */ + VdbeCursor *pC; + BtCursor *pCrsr; + int res; + + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->eCurType==CURTYPE_BTREE ); + pCrsr = pC->uc.pCursor; + res = 0; + assert( pCrsr!=0 ); + pC->seekResult = pOp->p3; +#ifdef SQLITE_DEBUG + pC->seekOp = OP_Last; +#endif + if( pOp->p3==0 || !sqlite3BtreeCursorIsValidNN(pCrsr) ){ + rc = sqlite3BtreeLast(pCrsr, &res); + pC->nullRow = (u8)res; + pC->deferredMoveto = 0; + pC->cacheStatus = CACHE_STALE; + if( rc ) goto abort_due_to_error; + if( pOp->p2>0 ){ + VdbeBranchTaken(res!=0,2); + if( res ) goto jump_to_p2; + } + }else{ + assert( pOp->p2==0 ); + } + break; +} + + +/* Opcode: SorterSort P1 P2 * * * +** +** After all records have been inserted into the Sorter object +** identified by P1, invoke this opcode to actually do the sorting. +** Jump to P2 if there are no records to be sorted. +** +** This opcode is an alias for OP_Sort and OP_Rewind that is used +** for Sorter objects. +*/ +/* Opcode: Sort P1 P2 * * * +** +** This opcode does exactly the same thing as OP_Rewind except that +** it increments an undocumented global variable used for testing. +** +** Sorting is accomplished by writing records into a sorting index, +** then rewinding that index and playing it back from beginning to +** end. We use the OP_Sort opcode instead of OP_Rewind to do the +** rewinding so that the global variable will be incremented and +** regression tests can determine whether or not the optimizer is +** correctly optimizing out sorts. +*/ +case OP_SorterSort: /* jump */ +case OP_Sort: { /* jump */ +#ifdef SQLITE_TEST + sqlite3_sort_count++; + sqlite3_search_count--; +#endif + p->aCounter[SQLITE_STMTSTATUS_SORT]++; + /* Fall through into OP_Rewind */ +} +/* Opcode: Rewind P1 P2 * * * +** +** The next use of the Rowid or Column or Next instruction for P1 +** will refer to the first entry in the database table or index. +** If the table or index is empty, jump immediately to P2. +** If the table or index is not empty, fall through to the following +** instruction. +** +** This opcode leaves the cursor configured to move in forward order, +** from the beginning toward the end. In other words, the cursor is +** configured to use Next, not Prev. +*/ +case OP_Rewind: { /* jump */ + VdbeCursor *pC; + BtCursor *pCrsr; + int res; + + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( isSorter(pC)==(pOp->opcode==OP_SorterSort) ); + res = 1; +#ifdef SQLITE_DEBUG + pC->seekOp = OP_Rewind; +#endif + if( isSorter(pC) ){ + rc = sqlite3VdbeSorterRewind(pC, &res); + }else{ + assert( pC->eCurType==CURTYPE_BTREE ); + pCrsr = pC->uc.pCursor; + assert( pCrsr ); + rc = sqlite3BtreeFirst(pCrsr, &res); + pC->deferredMoveto = 0; + pC->cacheStatus = CACHE_STALE; + } + if( rc ) goto abort_due_to_error; + pC->nullRow = (u8)res; + assert( pOp->p2>0 && pOp->p2nOp ); + VdbeBranchTaken(res!=0,2); + if( res ) goto jump_to_p2; + break; +} + +/* Opcode: Next P1 P2 P3 P4 P5 +** +** Advance cursor P1 so that it points to the next key/data pair in its +** table or index. If there are no more key/value pairs then fall through +** to the following instruction. But if the cursor advance was successful, +** jump immediately to P2. +** +** The Next opcode is only valid following an SeekGT, SeekGE, or +** OP_Rewind opcode used to position the cursor. Next is not allowed +** to follow SeekLT, SeekLE, or OP_Last. +** +** The P1 cursor must be for a real table, not a pseudo-table. P1 must have +** been opened prior to this opcode or the program will segfault. +** +** The P3 value is a hint to the btree implementation. If P3==1, that +** means P1 is an SQL index and that this instruction could have been +** omitted if that index had been unique. P3 is usually 0. P3 is +** always either 0 or 1. +** +** P4 is always of type P4_ADVANCE. The function pointer points to +** sqlite3BtreeNext(). +** +** If P5 is positive and the jump is taken, then event counter +** number P5-1 in the prepared statement is incremented. +** +** See also: Prev, NextIfOpen +*/ +/* Opcode: NextIfOpen P1 P2 P3 P4 P5 +** +** This opcode works just like Next except that if cursor P1 is not +** open it behaves a no-op. +*/ +/* Opcode: Prev P1 P2 P3 P4 P5 +** +** Back up cursor P1 so that it points to the previous key/data pair in its +** table or index. If there is no previous key/value pairs then fall through +** to the following instruction. But if the cursor backup was successful, +** jump immediately to P2. +** +** +** The Prev opcode is only valid following an SeekLT, SeekLE, or +** OP_Last opcode used to position the cursor. Prev is not allowed +** to follow SeekGT, SeekGE, or OP_Rewind. +** +** The P1 cursor must be for a real table, not a pseudo-table. If P1 is +** not open then the behavior is undefined. +** +** The P3 value is a hint to the btree implementation. If P3==1, that +** means P1 is an SQL index and that this instruction could have been +** omitted if that index had been unique. P3 is usually 0. P3 is +** always either 0 or 1. +** +** P4 is always of type P4_ADVANCE. The function pointer points to +** sqlite3BtreePrevious(). +** +** If P5 is positive and the jump is taken, then event counter +** number P5-1 in the prepared statement is incremented. +*/ +/* Opcode: PrevIfOpen P1 P2 P3 P4 P5 +** +** This opcode works just like Prev except that if cursor P1 is not +** open it behaves a no-op. +*/ +/* Opcode: SorterNext P1 P2 * * P5 +** +** This opcode works just like OP_Next except that P1 must be a +** sorter object for which the OP_SorterSort opcode has been +** invoked. This opcode advances the cursor to the next sorted +** record, or jumps to P2 if there are no more sorted records. +*/ +case OP_SorterNext: { /* jump */ + VdbeCursor *pC; + int res; + + pC = p->apCsr[pOp->p1]; + assert( isSorter(pC) ); + res = 0; + rc = sqlite3VdbeSorterNext(db, pC, &res); + goto next_tail; +case OP_PrevIfOpen: /* jump */ +case OP_NextIfOpen: /* jump */ + if( p->apCsr[pOp->p1]==0 ) break; + /* Fall through */ +case OP_Prev: /* jump */ +case OP_Next: /* jump */ + assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( pOp->p5aCounter) ); + pC = p->apCsr[pOp->p1]; + res = pOp->p3; + assert( pC!=0 ); + assert( pC->deferredMoveto==0 ); + assert( pC->eCurType==CURTYPE_BTREE ); + assert( res==0 || (res==1 && pC->isTable==0) ); + testcase( res==1 ); + assert( pOp->opcode!=OP_Next || pOp->p4.xAdvance==sqlite3BtreeNext ); + assert( pOp->opcode!=OP_Prev || pOp->p4.xAdvance==sqlite3BtreePrevious ); + assert( pOp->opcode!=OP_NextIfOpen || pOp->p4.xAdvance==sqlite3BtreeNext ); + assert( pOp->opcode!=OP_PrevIfOpen || pOp->p4.xAdvance==sqlite3BtreePrevious); + + /* The Next opcode is only used after SeekGT, SeekGE, and Rewind. + ** The Prev opcode is only used after SeekLT, SeekLE, and Last. */ + assert( pOp->opcode!=OP_Next || pOp->opcode!=OP_NextIfOpen + || pC->seekOp==OP_SeekGT || pC->seekOp==OP_SeekGE + || pC->seekOp==OP_Rewind || pC->seekOp==OP_Found); + assert( pOp->opcode!=OP_Prev || pOp->opcode!=OP_PrevIfOpen + || pC->seekOp==OP_SeekLT || pC->seekOp==OP_SeekLE + || pC->seekOp==OP_Last ); + + rc = pOp->p4.xAdvance(pC->uc.pCursor, &res); +next_tail: + pC->cacheStatus = CACHE_STALE; + VdbeBranchTaken(res==0,2); + if( rc ) goto abort_due_to_error; + if( res==0 ){ + pC->nullRow = 0; + p->aCounter[pOp->p5]++; +#ifdef SQLITE_TEST + sqlite3_search_count++; +#endif + goto jump_to_p2_and_check_for_interrupt; + }else{ + pC->nullRow = 1; + } + goto check_for_interrupt; +} + +/* Opcode: IdxInsert P1 P2 P3 P4 P5 +** Synopsis: key=r[P2] +** +** Register P2 holds an SQL index key made using the +** MakeRecord instructions. This opcode writes that key +** into the index P1. Data for the entry is nil. +** +** If P4 is not zero, then it is the number of values in the unpacked +** key of reg(P2). In that case, P3 is the index of the first register +** for the unpacked key. The availability of the unpacked key can sometimes +** be an optimization. +** +** If P5 has the OPFLAG_APPEND bit set, that is a hint to the b-tree layer +** that this insert is likely to be an append. +** +** If P5 has the OPFLAG_NCHANGE bit set, then the change counter is +** incremented by this instruction. If the OPFLAG_NCHANGE bit is clear, +** then the change counter is unchanged. +** +** If the OPFLAG_USESEEKRESULT flag of P5 is set, the implementation might +** run faster by avoiding an unnecessary seek on cursor P1. However, +** the OPFLAG_USESEEKRESULT flag must only be set if there have been no prior +** seeks on the cursor or if the most recent seek used a key equivalent +** to P2. +** +** This instruction only works for indices. The equivalent instruction +** for tables is OP_Insert. +*/ +/* Opcode: SorterInsert P1 P2 * * * +** Synopsis: key=r[P2] +** +** Register P2 holds an SQL index key made using the +** MakeRecord instructions. This opcode writes that key +** into the sorter P1. Data for the entry is nil. +*/ +case OP_SorterInsert: /* in2 */ +case OP_IdxInsert: { /* in2 */ + VdbeCursor *pC; + BtreePayload x; + + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( isSorter(pC)==(pOp->opcode==OP_SorterInsert) ); + pIn2 = &aMem[pOp->p2]; + assert( pIn2->flags & MEM_Blob ); + if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; + assert( pC->eCurType==CURTYPE_BTREE || pOp->opcode==OP_SorterInsert ); + assert( pC->isTable==0 ); + rc = ExpandBlob(pIn2); + if( rc ) goto abort_due_to_error; + if( pOp->opcode==OP_SorterInsert ){ + rc = sqlite3VdbeSorterWrite(pC, pIn2); + }else{ + x.nKey = pIn2->n; + x.pKey = pIn2->z; + x.aMem = aMem + pOp->p3; + x.nMem = (u16)pOp->p4.i; + rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, + (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION)), + ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0) + ); + assert( pC->deferredMoveto==0 ); + pC->cacheStatus = CACHE_STALE; + } + if( rc) goto abort_due_to_error; + break; +} + +/* Opcode: IdxDelete P1 P2 P3 * * +** Synopsis: key=r[P2@P3] +** +** The content of P3 registers starting at register P2 form +** an unpacked index key. This opcode removes that entry from the +** index opened by cursor P1. +*/ +case OP_IdxDelete: { + VdbeCursor *pC; + BtCursor *pCrsr; + int res; + UnpackedRecord r; + + assert( pOp->p3>0 ); + assert( pOp->p2>0 && pOp->p2+pOp->p3<=(p->nMem+1 - p->nCursor)+1 ); + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->eCurType==CURTYPE_BTREE ); + pCrsr = pC->uc.pCursor; + assert( pCrsr!=0 ); + assert( pOp->p5==0 ); + r.pKeyInfo = pC->pKeyInfo; + r.nField = (u16)pOp->p3; + r.default_rc = 0; + r.aMem = &aMem[pOp->p2]; + rc = sqlite3BtreeMovetoUnpacked(pCrsr, &r, 0, 0, &res); + if( rc ) goto abort_due_to_error; + if( res==0 ){ + rc = sqlite3BtreeDelete(pCrsr, BTREE_AUXDELETE); + if( rc ) goto abort_due_to_error; + } + assert( pC->deferredMoveto==0 ); + pC->cacheStatus = CACHE_STALE; + pC->seekResult = 0; + break; +} + +/* Opcode: Seek P1 * P3 P4 * +** Synopsis: Move P3 to P1.rowid +** +** P1 is an open index cursor and P3 is a cursor on the corresponding +** table. This opcode does a deferred seek of the P3 table cursor +** to the row that corresponds to the current row of P1. +** +** This is a deferred seek. Nothing actually happens until +** the cursor is used to read a record. That way, if no reads +** occur, no unnecessary I/O happens. +** +** P4 may be an array of integers (type P4_INTARRAY) containing +** one entry for each column in the P3 table. If array entry a(i) +** is non-zero, then reading column a(i)-1 from cursor P3 is +** equivalent to performing the deferred seek and then reading column i +** from P1. This information is stored in P3 and used to redirect +** reads against P3 over to P1, thus possibly avoiding the need to +** seek and read cursor P3. +*/ +/* Opcode: IdxRowid P1 P2 * * * +** Synopsis: r[P2]=rowid +** +** Write into register P2 an integer which is the last entry in the record at +** the end of the index key pointed to by cursor P1. This integer should be +** the rowid of the table entry to which this index entry points. +** +** See also: Rowid, MakeRecord. +*/ +case OP_Seek: +case OP_IdxRowid: { /* out2 */ + VdbeCursor *pC; /* The P1 index cursor */ + VdbeCursor *pTabCur; /* The P2 table cursor (OP_Seek only) */ + i64 rowid; /* Rowid that P1 current points to */ + + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->eCurType==CURTYPE_BTREE ); + assert( pC->uc.pCursor!=0 ); + assert( pC->isTable==0 ); + assert( pC->deferredMoveto==0 ); + assert( !pC->nullRow || pOp->opcode==OP_IdxRowid ); + + /* The IdxRowid and Seek opcodes are combined because of the commonality + ** of sqlite3VdbeCursorRestore() and sqlite3VdbeIdxRowid(). */ + rc = sqlite3VdbeCursorRestore(pC); + + /* sqlite3VbeCursorRestore() can only fail if the record has been deleted + ** out from under the cursor. That will never happens for an IdxRowid + ** or Seek opcode */ + if( NEVER(rc!=SQLITE_OK) ) goto abort_due_to_error; + + if( !pC->nullRow ){ + rowid = 0; /* Not needed. Only used to silence a warning. */ + rc = sqlite3VdbeIdxRowid(db, pC->uc.pCursor, &rowid); + if( rc!=SQLITE_OK ){ + goto abort_due_to_error; + } + if( pOp->opcode==OP_Seek ){ + assert( pOp->p3>=0 && pOp->p3nCursor ); + pTabCur = p->apCsr[pOp->p3]; + assert( pTabCur!=0 ); + assert( pTabCur->eCurType==CURTYPE_BTREE ); + assert( pTabCur->uc.pCursor!=0 ); + assert( pTabCur->isTable ); + pTabCur->nullRow = 0; + pTabCur->movetoTarget = rowid; + pTabCur->deferredMoveto = 1; + assert( pOp->p4type==P4_INTARRAY || pOp->p4.ai==0 ); + pTabCur->aAltMap = pOp->p4.ai; + pTabCur->pAltCursor = pC; + }else{ + pOut = out2Prerelease(p, pOp); + pOut->u.i = rowid; + } + }else{ + assert( pOp->opcode==OP_IdxRowid ); + sqlite3VdbeMemSetNull(&aMem[pOp->p2]); + } + break; +} + +/* Opcode: IdxGE P1 P2 P3 P4 P5 +** Synopsis: key=r[P3@P4] +** +** The P4 register values beginning with P3 form an unpacked index +** key that omits the PRIMARY KEY. Compare this key value against the index +** that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID +** fields at the end. +** +** If the P1 index entry is greater than or equal to the key value +** then jump to P2. Otherwise fall through to the next instruction. +*/ +/* Opcode: IdxGT P1 P2 P3 P4 P5 +** Synopsis: key=r[P3@P4] +** +** The P4 register values beginning with P3 form an unpacked index +** key that omits the PRIMARY KEY. Compare this key value against the index +** that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID +** fields at the end. +** +** If the P1 index entry is greater than the key value +** then jump to P2. Otherwise fall through to the next instruction. +*/ +/* Opcode: IdxLT P1 P2 P3 P4 P5 +** Synopsis: key=r[P3@P4] +** +** The P4 register values beginning with P3 form an unpacked index +** key that omits the PRIMARY KEY or ROWID. Compare this key value against +** the index that P1 is currently pointing to, ignoring the PRIMARY KEY or +** ROWID on the P1 index. +** +** If the P1 index entry is less than the key value then jump to P2. +** Otherwise fall through to the next instruction. +*/ +/* Opcode: IdxLE P1 P2 P3 P4 P5 +** Synopsis: key=r[P3@P4] +** +** The P4 register values beginning with P3 form an unpacked index +** key that omits the PRIMARY KEY or ROWID. Compare this key value against +** the index that P1 is currently pointing to, ignoring the PRIMARY KEY or +** ROWID on the P1 index. +** +** If the P1 index entry is less than or equal to the key value then jump +** to P2. Otherwise fall through to the next instruction. +*/ +case OP_IdxLE: /* jump */ +case OP_IdxGT: /* jump */ +case OP_IdxLT: /* jump */ +case OP_IdxGE: { /* jump */ + VdbeCursor *pC; + int res; + UnpackedRecord r; + + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pC->isOrdered ); + assert( pC->eCurType==CURTYPE_BTREE ); + assert( pC->uc.pCursor!=0); + assert( pC->deferredMoveto==0 ); + assert( pOp->p5==0 || pOp->p5==1 ); + assert( pOp->p4type==P4_INT32 ); + r.pKeyInfo = pC->pKeyInfo; + r.nField = (u16)pOp->p4.i; + if( pOp->opcodeopcode==OP_IdxLE || pOp->opcode==OP_IdxGT ); + r.default_rc = -1; + }else{ + assert( pOp->opcode==OP_IdxGE || pOp->opcode==OP_IdxLT ); + r.default_rc = 0; + } + r.aMem = &aMem[pOp->p3]; +#ifdef SQLITE_DEBUG + { int i; for(i=0; iopcode&1)==(OP_IdxLT&1) ){ + assert( pOp->opcode==OP_IdxLE || pOp->opcode==OP_IdxLT ); + res = -res; + }else{ + assert( pOp->opcode==OP_IdxGE || pOp->opcode==OP_IdxGT ); + res++; + } + VdbeBranchTaken(res>0,2); + if( rc ) goto abort_due_to_error; + if( res>0 ) goto jump_to_p2; + break; +} + +/* Opcode: Destroy P1 P2 P3 * * +** +** Delete an entire database table or index whose root page in the database +** file is given by P1. +** +** The table being destroyed is in the main database file if P3==0. If +** P3==1 then the table to be clear is in the auxiliary database file +** that is used to store tables create using CREATE TEMPORARY TABLE. +** +** If AUTOVACUUM is enabled then it is possible that another root page +** might be moved into the newly deleted root page in order to keep all +** root pages contiguous at the beginning of the database. The former +** value of the root page that moved - its value before the move occurred - +** is stored in register P2. If no page +** movement was required (because the table being dropped was already +** the last one in the database) then a zero is stored in register P2. +** If AUTOVACUUM is disabled then a zero is stored in register P2. +** +** See also: Clear +*/ +case OP_Destroy: { /* out2 */ + int iMoved; + int iDb; + + assert( p->readOnly==0 ); + assert( pOp->p1>1 ); + pOut = out2Prerelease(p, pOp); + pOut->flags = MEM_Null; + if( db->nVdbeRead > db->nVDestroy+1 ){ + rc = SQLITE_LOCKED; + p->errorAction = OE_Abort; + goto abort_due_to_error; + }else{ + iDb = pOp->p3; + assert( DbMaskTest(p->btreeMask, iDb) ); + iMoved = 0; /* Not needed. Only to silence a warning. */ + rc = sqlite3BtreeDropTable(db->aDb[iDb].pBt, pOp->p1, &iMoved); + pOut->flags = MEM_Int; + pOut->u.i = iMoved; + if( rc ) goto abort_due_to_error; +#ifndef SQLITE_OMIT_AUTOVACUUM + if( iMoved!=0 ){ + sqlite3RootPageMoved(db, iDb, iMoved, pOp->p1); + /* All OP_Destroy operations occur on the same btree */ + assert( resetSchemaOnFault==0 || resetSchemaOnFault==iDb+1 ); + resetSchemaOnFault = iDb+1; + } +#endif + } + break; +} + +/* Opcode: Clear P1 P2 P3 +** +** Delete all contents of the database table or index whose root page +** in the database file is given by P1. But, unlike Destroy, do not +** remove the table or index from the database file. +** +** The table being clear is in the main database file if P2==0. If +** P2==1 then the table to be clear is in the auxiliary database file +** that is used to store tables create using CREATE TEMPORARY TABLE. +** +** If the P3 value is non-zero, then the table referred to must be an +** intkey table (an SQL table, not an index). In this case the row change +** count is incremented by the number of rows in the table being cleared. +** If P3 is greater than zero, then the value stored in register P3 is +** also incremented by the number of rows in the table being cleared. +** +** See also: Destroy +*/ +case OP_Clear: { + int nChange; + + nChange = 0; + assert( p->readOnly==0 ); + assert( DbMaskTest(p->btreeMask, pOp->p2) ); + rc = sqlite3BtreeClearTable( + db->aDb[pOp->p2].pBt, pOp->p1, (pOp->p3 ? &nChange : 0) + ); + if( pOp->p3 ){ + p->nChange += nChange; + if( pOp->p3>0 ){ + assert( memIsValid(&aMem[pOp->p3]) ); + memAboutToChange(p, &aMem[pOp->p3]); + aMem[pOp->p3].u.i += nChange; + } + } + if( rc ) goto abort_due_to_error; + break; +} + +/* Opcode: ResetSorter P1 * * * * +** +** Delete all contents from the ephemeral table or sorter +** that is open on cursor P1. +** +** This opcode only works for cursors used for sorting and +** opened with OP_OpenEphemeral or OP_SorterOpen. +*/ +case OP_ResetSorter: { + VdbeCursor *pC; + + assert( pOp->p1>=0 && pOp->p1nCursor ); + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + if( isSorter(pC) ){ + sqlite3VdbeSorterReset(db, pC->uc.pSorter); + }else{ + assert( pC->eCurType==CURTYPE_BTREE ); + assert( pC->isEphemeral ); + rc = sqlite3BtreeClearTableOfCursor(pC->uc.pCursor); + if( rc ) goto abort_due_to_error; + } + break; +} + +/* Opcode: CreateTable P1 P2 * * * +** Synopsis: r[P2]=root iDb=P1 +** +** Allocate a new table in the main database file if P1==0 or in the +** auxiliary database file if P1==1 or in an attached database if +** P1>1. Write the root page number of the new table into +** register P2 +** +** The difference between a table and an index is this: A table must +** have a 4-byte integer key and can have arbitrary data. An index +** has an arbitrary key but no data. +** +** See also: CreateIndex +*/ +/* Opcode: CreateIndex P1 P2 * * * +** Synopsis: r[P2]=root iDb=P1 +** +** Allocate a new index in the main database file if P1==0 or in the +** auxiliary database file if P1==1 or in an attached database if +** P1>1. Write the root page number of the new table into +** register P2. +** +** See documentation on OP_CreateTable for additional information. +*/ +case OP_CreateIndex: /* out2 */ +case OP_CreateTable: { /* out2 */ + int pgno; + int flags; + Db *pDb; + + pOut = out2Prerelease(p, pOp); + pgno = 0; + assert( pOp->p1>=0 && pOp->p1nDb ); + assert( DbMaskTest(p->btreeMask, pOp->p1) ); + assert( p->readOnly==0 ); + pDb = &db->aDb[pOp->p1]; + assert( pDb->pBt!=0 ); + if( pOp->opcode==OP_CreateTable ){ + /* flags = BTREE_INTKEY; */ + flags = BTREE_INTKEY; + }else{ + flags = BTREE_BLOBKEY; + } + rc = sqlite3BtreeCreateTable(pDb->pBt, &pgno, flags); + if( rc ) goto abort_due_to_error; + pOut->u.i = pgno; + break; +} + +/* Opcode: ParseSchema P1 * * P4 * +** +** Read and parse all entries from the SQLITE_MASTER table of database P1 +** that match the WHERE clause P4. +** +** This opcode invokes the parser to create a new virtual machine, +** then runs the new virtual machine. It is thus a re-entrant opcode. +*/ +case OP_ParseSchema: { + int iDb; + const char *zMaster; + char *zSql; + InitData initData; + + /* Any prepared statement that invokes this opcode will hold mutexes + ** on every btree. This is a prerequisite for invoking + ** sqlite3InitCallback(). + */ +#ifdef SQLITE_DEBUG + for(iDb=0; iDbnDb; iDb++){ + assert( iDb==1 || sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) ); + } +#endif + + iDb = pOp->p1; + assert( iDb>=0 && iDbnDb ); + assert( DbHasProperty(db, iDb, DB_SchemaLoaded) ); + /* Used to be a conditional */ { + zMaster = MASTER_NAME; + initData.db = db; + initData.iDb = pOp->p1; + initData.pzErrMsg = &p->zErrMsg; + zSql = sqlite3MPrintf(db, + "SELECT name, rootpage, sql FROM '%q'.%s WHERE %s ORDER BY rowid", + db->aDb[iDb].zDbSName, zMaster, pOp->p4.z); + if( zSql==0 ){ + rc = SQLITE_NOMEM_BKPT; + }else{ + assert( db->init.busy==0 ); + db->init.busy = 1; + initData.rc = SQLITE_OK; + assert( !db->mallocFailed ); + rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0); + if( rc==SQLITE_OK ) rc = initData.rc; + sqlite3DbFree(db, zSql); + db->init.busy = 0; + } + } + if( rc ){ + sqlite3ResetAllSchemasOfConnection(db); + if( rc==SQLITE_NOMEM ){ + goto no_mem; + } + goto abort_due_to_error; + } + break; +} + +#if !defined(SQLITE_OMIT_ANALYZE) +/* Opcode: LoadAnalysis P1 * * * * +** +** Read the sqlite_stat1 table for database P1 and load the content +** of that table into the internal index hash table. This will cause +** the analysis to be used when preparing all subsequent queries. +*/ +case OP_LoadAnalysis: { + assert( pOp->p1>=0 && pOp->p1nDb ); + rc = sqlite3AnalysisLoad(db, pOp->p1); + if( rc ) goto abort_due_to_error; + break; +} +#endif /* !defined(SQLITE_OMIT_ANALYZE) */ + +/* Opcode: DropTable P1 * * P4 * +** +** Remove the internal (in-memory) data structures that describe +** the table named P4 in database P1. This is called after a table +** is dropped from disk (using the Destroy opcode) in order to keep +** the internal representation of the +** schema consistent with what is on disk. +*/ +case OP_DropTable: { + sqlite3UnlinkAndDeleteTable(db, pOp->p1, pOp->p4.z); + break; +} + +/* Opcode: DropIndex P1 * * P4 * +** +** Remove the internal (in-memory) data structures that describe +** the index named P4 in database P1. This is called after an index +** is dropped from disk (using the Destroy opcode) +** in order to keep the internal representation of the +** schema consistent with what is on disk. +*/ +case OP_DropIndex: { + sqlite3UnlinkAndDeleteIndex(db, pOp->p1, pOp->p4.z); + break; +} + +/* Opcode: DropTrigger P1 * * P4 * +** +** Remove the internal (in-memory) data structures that describe +** the trigger named P4 in database P1. This is called after a trigger +** is dropped from disk (using the Destroy opcode) in order to keep +** the internal representation of the +** schema consistent with what is on disk. +*/ +case OP_DropTrigger: { + sqlite3UnlinkAndDeleteTrigger(db, pOp->p1, pOp->p4.z); + break; +} + + +#ifndef SQLITE_OMIT_INTEGRITY_CHECK +/* Opcode: IntegrityCk P1 P2 P3 P4 P5 +** +** Do an analysis of the currently open database. Store in +** register P1 the text of an error message describing any problems. +** If no problems are found, store a NULL in register P1. +** +** The register P3 contains the maximum number of allowed errors. +** At most reg(P3) errors will be reported. +** In other words, the analysis stops as soon as reg(P1) errors are +** seen. Reg(P1) is updated with the number of errors remaining. +** +** The root page numbers of all tables in the database are integers +** stored in P4_INTARRAY argument. +** +** If P5 is not zero, the check is done on the auxiliary database +** file, not the main database file. +** +** This opcode is used to implement the integrity_check pragma. +*/ +case OP_IntegrityCk: { + int nRoot; /* Number of tables to check. (Number of root pages.) */ + int *aRoot; /* Array of rootpage numbers for tables to be checked */ + int nErr; /* Number of errors reported */ + char *z; /* Text of the error report */ + Mem *pnErr; /* Register keeping track of errors remaining */ + + assert( p->bIsReader ); + nRoot = pOp->p2; + aRoot = pOp->p4.ai; + assert( nRoot>0 ); + assert( aRoot[nRoot]==0 ); + assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); + pnErr = &aMem[pOp->p3]; + assert( (pnErr->flags & MEM_Int)!=0 ); + assert( (pnErr->flags & (MEM_Str|MEM_Blob))==0 ); + pIn1 = &aMem[pOp->p1]; + assert( pOp->p5nDb ); + assert( DbMaskTest(p->btreeMask, pOp->p5) ); + z = sqlite3BtreeIntegrityCheck(db->aDb[pOp->p5].pBt, aRoot, nRoot, + (int)pnErr->u.i, &nErr); + pnErr->u.i -= nErr; + sqlite3VdbeMemSetNull(pIn1); + if( nErr==0 ){ + assert( z==0 ); + }else if( z==0 ){ + goto no_mem; + }else{ + sqlite3VdbeMemSetStr(pIn1, z, -1, SQLITE_UTF8, sqlite3_free); + } + UPDATE_MAX_BLOBSIZE(pIn1); + sqlite3VdbeChangeEncoding(pIn1, encoding); + break; +} +#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ + +/* Opcode: RowSetAdd P1 P2 * * * +** Synopsis: rowset(P1)=r[P2] +** +** Insert the integer value held by register P2 into a boolean index +** held in register P1. +** +** An assertion fails if P2 is not an integer. +*/ +case OP_RowSetAdd: { /* in1, in2 */ + pIn1 = &aMem[pOp->p1]; + pIn2 = &aMem[pOp->p2]; + assert( (pIn2->flags & MEM_Int)!=0 ); + if( (pIn1->flags & MEM_RowSet)==0 ){ + sqlite3VdbeMemSetRowSet(pIn1); + if( (pIn1->flags & MEM_RowSet)==0 ) goto no_mem; + } + sqlite3RowSetInsert(pIn1->u.pRowSet, pIn2->u.i); + break; +} + +/* Opcode: RowSetRead P1 P2 P3 * * +** Synopsis: r[P3]=rowset(P1) +** +** Extract the smallest value from boolean index P1 and put that value into +** register P3. Or, if boolean index P1 is initially empty, leave P3 +** unchanged and jump to instruction P2. +*/ +case OP_RowSetRead: { /* jump, in1, out3 */ + i64 val; + + pIn1 = &aMem[pOp->p1]; + if( (pIn1->flags & MEM_RowSet)==0 + || sqlite3RowSetNext(pIn1->u.pRowSet, &val)==0 + ){ + /* The boolean index is empty */ + sqlite3VdbeMemSetNull(pIn1); + VdbeBranchTaken(1,2); + goto jump_to_p2_and_check_for_interrupt; + }else{ + /* A value was pulled from the index */ + VdbeBranchTaken(0,2); + sqlite3VdbeMemSetInt64(&aMem[pOp->p3], val); + } + goto check_for_interrupt; +} + +/* Opcode: RowSetTest P1 P2 P3 P4 +** Synopsis: if r[P3] in rowset(P1) goto P2 +** +** Register P3 is assumed to hold a 64-bit integer value. If register P1 +** contains a RowSet object and that RowSet object contains +** the value held in P3, jump to register P2. Otherwise, insert the +** integer in P3 into the RowSet and continue on to the +** next opcode. +** +** The RowSet object is optimized for the case where successive sets +** of integers, where each set contains no duplicates. Each set +** of values is identified by a unique P4 value. The first set +** must have P4==0, the final set P4=-1. P4 must be either -1 or +** non-negative. For non-negative values of P4 only the lower 4 +** bits are significant. +** +** This allows optimizations: (a) when P4==0 there is no need to test +** the rowset object for P3, as it is guaranteed not to contain it, +** (b) when P4==-1 there is no need to insert the value, as it will +** never be tested for, and (c) when a value that is part of set X is +** inserted, there is no need to search to see if the same value was +** previously inserted as part of set X (only if it was previously +** inserted as part of some other set). +*/ +case OP_RowSetTest: { /* jump, in1, in3 */ + int iSet; + int exists; + + pIn1 = &aMem[pOp->p1]; + pIn3 = &aMem[pOp->p3]; + iSet = pOp->p4.i; + assert( pIn3->flags&MEM_Int ); + + /* If there is anything other than a rowset object in memory cell P1, + ** delete it now and initialize P1 with an empty rowset + */ + if( (pIn1->flags & MEM_RowSet)==0 ){ + sqlite3VdbeMemSetRowSet(pIn1); + if( (pIn1->flags & MEM_RowSet)==0 ) goto no_mem; + } + + assert( pOp->p4type==P4_INT32 ); + assert( iSet==-1 || iSet>=0 ); + if( iSet ){ + exists = sqlite3RowSetTest(pIn1->u.pRowSet, iSet, pIn3->u.i); + VdbeBranchTaken(exists!=0,2); + if( exists ) goto jump_to_p2; + } + if( iSet>=0 ){ + sqlite3RowSetInsert(pIn1->u.pRowSet, pIn3->u.i); + } + break; +} + + +#ifndef SQLITE_OMIT_TRIGGER + +/* Opcode: Program P1 P2 P3 P4 P5 +** +** Execute the trigger program passed as P4 (type P4_SUBPROGRAM). +** +** P1 contains the address of the memory cell that contains the first memory +** cell in an array of values used as arguments to the sub-program. P2 +** contains the address to jump to if the sub-program throws an IGNORE +** exception using the RAISE() function. Register P3 contains the address +** of a memory cell in this (the parent) VM that is used to allocate the +** memory required by the sub-vdbe at runtime. +** +** P4 is a pointer to the VM containing the trigger program. +** +** If P5 is non-zero, then recursive program invocation is enabled. +*/ +case OP_Program: { /* jump */ + int nMem; /* Number of memory registers for sub-program */ + int nByte; /* Bytes of runtime space required for sub-program */ + Mem *pRt; /* Register to allocate runtime space */ + Mem *pMem; /* Used to iterate through memory cells */ + Mem *pEnd; /* Last memory cell in new array */ + VdbeFrame *pFrame; /* New vdbe frame to execute in */ + SubProgram *pProgram; /* Sub-program to execute */ + void *t; /* Token identifying trigger */ + + pProgram = pOp->p4.pProgram; + pRt = &aMem[pOp->p3]; + assert( pProgram->nOp>0 ); + + /* If the p5 flag is clear, then recursive invocation of triggers is + ** disabled for backwards compatibility (p5 is set if this sub-program + ** is really a trigger, not a foreign key action, and the flag set + ** and cleared by the "PRAGMA recursive_triggers" command is clear). + ** + ** It is recursive invocation of triggers, at the SQL level, that is + ** disabled. In some cases a single trigger may generate more than one + ** SubProgram (if the trigger may be executed with more than one different + ** ON CONFLICT algorithm). SubProgram structures associated with a + ** single trigger all have the same value for the SubProgram.token + ** variable. */ + if( pOp->p5 ){ + t = pProgram->token; + for(pFrame=p->pFrame; pFrame && pFrame->token!=t; pFrame=pFrame->pParent); + if( pFrame ) break; + } + + if( p->nFrame>=db->aLimit[SQLITE_LIMIT_TRIGGER_DEPTH] ){ + rc = SQLITE_ERROR; + sqlite3VdbeError(p, "too many levels of trigger recursion"); + goto abort_due_to_error; + } + + /* Register pRt is used to store the memory required to save the state + ** of the current program, and the memory required at runtime to execute + ** the trigger program. If this trigger has been fired before, then pRt + ** is already allocated. Otherwise, it must be initialized. */ + if( (pRt->flags&MEM_Frame)==0 ){ + /* SubProgram.nMem is set to the number of memory cells used by the + ** program stored in SubProgram.aOp. As well as these, one memory + ** cell is required for each cursor used by the program. Set local + ** variable nMem (and later, VdbeFrame.nChildMem) to this value. + */ + nMem = pProgram->nMem + pProgram->nCsr; + assert( nMem>0 ); + if( pProgram->nCsr==0 ) nMem++; + nByte = ROUND8(sizeof(VdbeFrame)) + + nMem * sizeof(Mem) + + pProgram->nCsr * sizeof(VdbeCursor *); + pFrame = sqlite3DbMallocZero(db, nByte); + if( !pFrame ){ + goto no_mem; + } + sqlite3VdbeMemRelease(pRt); + pRt->flags = MEM_Frame; + pRt->u.pFrame = pFrame; + + pFrame->v = p; + pFrame->nChildMem = nMem; + pFrame->nChildCsr = pProgram->nCsr; + pFrame->pc = (int)(pOp - aOp); + pFrame->aMem = p->aMem; + pFrame->nMem = p->nMem; + pFrame->apCsr = p->apCsr; + pFrame->nCursor = p->nCursor; + pFrame->aOp = p->aOp; + pFrame->nOp = p->nOp; + pFrame->token = pProgram->token; +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + pFrame->anExec = p->anExec; +#endif + + pEnd = &VdbeFrameMem(pFrame)[pFrame->nChildMem]; + for(pMem=VdbeFrameMem(pFrame); pMem!=pEnd; pMem++){ + pMem->flags = MEM_Undefined; + pMem->db = db; + } + }else{ + pFrame = pRt->u.pFrame; + assert( pProgram->nMem+pProgram->nCsr==pFrame->nChildMem + || (pProgram->nCsr==0 && pProgram->nMem+1==pFrame->nChildMem) ); + assert( pProgram->nCsr==pFrame->nChildCsr ); + assert( (int)(pOp - aOp)==pFrame->pc ); + } + + p->nFrame++; + pFrame->pParent = p->pFrame; + pFrame->lastRowid = db->lastRowid; + pFrame->nChange = p->nChange; + pFrame->nDbChange = p->db->nChange; + assert( pFrame->pAuxData==0 ); + pFrame->pAuxData = p->pAuxData; + p->pAuxData = 0; + p->nChange = 0; + p->pFrame = pFrame; + p->aMem = aMem = VdbeFrameMem(pFrame); + p->nMem = pFrame->nChildMem; + p->nCursor = (u16)pFrame->nChildCsr; + p->apCsr = (VdbeCursor **)&aMem[p->nMem]; + p->aOp = aOp = pProgram->aOp; + p->nOp = pProgram->nOp; +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + p->anExec = 0; +#endif + pOp = &aOp[-1]; + + break; +} + +/* Opcode: Param P1 P2 * * * +** +** This opcode is only ever present in sub-programs called via the +** OP_Program instruction. Copy a value currently stored in a memory +** cell of the calling (parent) frame to cell P2 in the current frames +** address space. This is used by trigger programs to access the new.* +** and old.* values. +** +** The address of the cell in the parent frame is determined by adding +** the value of the P1 argument to the value of the P1 argument to the +** calling OP_Program instruction. +*/ +case OP_Param: { /* out2 */ + VdbeFrame *pFrame; + Mem *pIn; + pOut = out2Prerelease(p, pOp); + pFrame = p->pFrame; + pIn = &pFrame->aMem[pOp->p1 + pFrame->aOp[pFrame->pc].p1]; + sqlite3VdbeMemShallowCopy(pOut, pIn, MEM_Ephem); + break; +} + +#endif /* #ifndef SQLITE_OMIT_TRIGGER */ + +#ifndef SQLITE_OMIT_FOREIGN_KEY +/* Opcode: FkCounter P1 P2 * * * +** Synopsis: fkctr[P1]+=P2 +** +** Increment a "constraint counter" by P2 (P2 may be negative or positive). +** If P1 is non-zero, the database constraint counter is incremented +** (deferred foreign key constraints). Otherwise, if P1 is zero, the +** statement counter is incremented (immediate foreign key constraints). +*/ +case OP_FkCounter: { + if( db->flags & SQLITE_DeferFKs ){ + db->nDeferredImmCons += pOp->p2; + }else if( pOp->p1 ){ + db->nDeferredCons += pOp->p2; + }else{ + p->nFkConstraint += pOp->p2; + } + break; +} + +/* Opcode: FkIfZero P1 P2 * * * +** Synopsis: if fkctr[P1]==0 goto P2 +** +** This opcode tests if a foreign key constraint-counter is currently zero. +** If so, jump to instruction P2. Otherwise, fall through to the next +** instruction. +** +** If P1 is non-zero, then the jump is taken if the database constraint-counter +** is zero (the one that counts deferred constraint violations). If P1 is +** zero, the jump is taken if the statement constraint-counter is zero +** (immediate foreign key constraint violations). +*/ +case OP_FkIfZero: { /* jump */ + if( pOp->p1 ){ + VdbeBranchTaken(db->nDeferredCons==0 && db->nDeferredImmCons==0, 2); + if( db->nDeferredCons==0 && db->nDeferredImmCons==0 ) goto jump_to_p2; + }else{ + VdbeBranchTaken(p->nFkConstraint==0 && db->nDeferredImmCons==0, 2); + if( p->nFkConstraint==0 && db->nDeferredImmCons==0 ) goto jump_to_p2; + } + break; +} +#endif /* #ifndef SQLITE_OMIT_FOREIGN_KEY */ + +#ifndef SQLITE_OMIT_AUTOINCREMENT +/* Opcode: MemMax P1 P2 * * * +** Synopsis: r[P1]=max(r[P1],r[P2]) +** +** P1 is a register in the root frame of this VM (the root frame is +** different from the current frame if this instruction is being executed +** within a sub-program). Set the value of register P1 to the maximum of +** its current value and the value in register P2. +** +** This instruction throws an error if the memory cell is not initially +** an integer. +*/ +case OP_MemMax: { /* in2 */ + VdbeFrame *pFrame; + if( p->pFrame ){ + for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent); + pIn1 = &pFrame->aMem[pOp->p1]; + }else{ + pIn1 = &aMem[pOp->p1]; + } + assert( memIsValid(pIn1) ); + sqlite3VdbeMemIntegerify(pIn1); + pIn2 = &aMem[pOp->p2]; + sqlite3VdbeMemIntegerify(pIn2); + if( pIn1->u.iu.i){ + pIn1->u.i = pIn2->u.i; + } + break; +} +#endif /* SQLITE_OMIT_AUTOINCREMENT */ + +/* Opcode: IfPos P1 P2 P3 * * +** Synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 +** +** Register P1 must contain an integer. +** If the value of register P1 is 1 or greater, subtract P3 from the +** value in P1 and jump to P2. +** +** If the initial value of register P1 is less than 1, then the +** value is unchanged and control passes through to the next instruction. +*/ +case OP_IfPos: { /* jump, in1 */ + pIn1 = &aMem[pOp->p1]; + assert( pIn1->flags&MEM_Int ); + VdbeBranchTaken( pIn1->u.i>0, 2); + if( pIn1->u.i>0 ){ + pIn1->u.i -= pOp->p3; + goto jump_to_p2; + } + break; +} + +/* Opcode: OffsetLimit P1 P2 P3 * * +** Synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) +** +** This opcode performs a commonly used computation associated with +** LIMIT and OFFSET process. r[P1] holds the limit counter. r[P3] +** holds the offset counter. The opcode computes the combined value +** of the LIMIT and OFFSET and stores that value in r[P2]. The r[P2] +** value computed is the total number of rows that will need to be +** visited in order to complete the query. +** +** If r[P3] is zero or negative, that means there is no OFFSET +** and r[P2] is set to be the value of the LIMIT, r[P1]. +** +** if r[P1] is zero or negative, that means there is no LIMIT +** and r[P2] is set to -1. +** +** Otherwise, r[P2] is set to the sum of r[P1] and r[P3]. +*/ +case OP_OffsetLimit: { /* in1, out2, in3 */ + i64 x; + pIn1 = &aMem[pOp->p1]; + pIn3 = &aMem[pOp->p3]; + pOut = out2Prerelease(p, pOp); + assert( pIn1->flags & MEM_Int ); + assert( pIn3->flags & MEM_Int ); + x = pIn1->u.i; + if( x<=0 || sqlite3AddInt64(&x, pIn3->u.i>0?pIn3->u.i:0) ){ + /* If the LIMIT is less than or equal to zero, loop forever. This + ** is documented. But also, if the LIMIT+OFFSET exceeds 2^63 then + ** also loop forever. This is undocumented. In fact, one could argue + ** that the loop should terminate. But assuming 1 billion iterations + ** per second (far exceeding the capabilities of any current hardware) + ** it would take nearly 300 years to actually reach the limit. So + ** looping forever is a reasonable approximation. */ + pOut->u.i = -1; + }else{ + pOut->u.i = x; + } + break; +} + +/* Opcode: IfNotZero P1 P2 * * * +** Synopsis: if r[P1]!=0 then r[P1]--, goto P2 +** +** Register P1 must contain an integer. If the content of register P1 is +** initially greater than zero, then decrement the value in register P1. +** If it is non-zero (negative or positive) and then also jump to P2. +** If register P1 is initially zero, leave it unchanged and fall through. +*/ +case OP_IfNotZero: { /* jump, in1 */ + pIn1 = &aMem[pOp->p1]; + assert( pIn1->flags&MEM_Int ); + VdbeBranchTaken(pIn1->u.i<0, 2); + if( pIn1->u.i ){ + if( pIn1->u.i>0 ) pIn1->u.i--; + goto jump_to_p2; + } + break; +} + +/* Opcode: DecrJumpZero P1 P2 * * * +** Synopsis: if (--r[P1])==0 goto P2 +** +** Register P1 must hold an integer. Decrement the value in P1 +** and jump to P2 if the new value is exactly zero. +*/ +case OP_DecrJumpZero: { /* jump, in1 */ + pIn1 = &aMem[pOp->p1]; + assert( pIn1->flags&MEM_Int ); + if( pIn1->u.i>SMALLEST_INT64 ) pIn1->u.i--; + VdbeBranchTaken(pIn1->u.i==0, 2); + if( pIn1->u.i==0 ) goto jump_to_p2; + break; +} + + +/* Opcode: AggStep0 * P2 P3 P4 P5 +** Synopsis: accum=r[P3] step(r[P2@P5]) +** +** Execute the step function for an aggregate. The +** function has P5 arguments. P4 is a pointer to the FuncDef +** structure that specifies the function. Register P3 is the +** accumulator. +** +** The P5 arguments are taken from register P2 and its +** successors. +*/ +/* Opcode: AggStep * P2 P3 P4 P5 +** Synopsis: accum=r[P3] step(r[P2@P5]) +** +** Execute the step function for an aggregate. The +** function has P5 arguments. P4 is a pointer to an sqlite3_context +** object that is used to run the function. Register P3 is +** as the accumulator. +** +** The P5 arguments are taken from register P2 and its +** successors. +** +** This opcode is initially coded as OP_AggStep0. On first evaluation, +** the FuncDef stored in P4 is converted into an sqlite3_context and +** the opcode is changed. In this way, the initialization of the +** sqlite3_context only happens once, instead of on each call to the +** step function. +*/ +case OP_AggStep0: { + int n; + sqlite3_context *pCtx; + + assert( pOp->p4type==P4_FUNCDEF ); + n = pOp->p5; + assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); + assert( n==0 || (pOp->p2>0 && pOp->p2+n<=(p->nMem+1 - p->nCursor)+1) ); + assert( pOp->p3p2 || pOp->p3>=pOp->p2+n ); + pCtx = sqlite3DbMallocRawNN(db, sizeof(*pCtx) + (n-1)*sizeof(sqlite3_value*)); + if( pCtx==0 ) goto no_mem; + pCtx->pMem = 0; + pCtx->pFunc = pOp->p4.pFunc; + pCtx->iOp = (int)(pOp - aOp); + pCtx->pVdbe = p; + pCtx->argc = n; + pOp->p4type = P4_FUNCCTX; + pOp->p4.pCtx = pCtx; + pOp->opcode = OP_AggStep; + /* Fall through into OP_AggStep */ +} +case OP_AggStep: { + int i; + sqlite3_context *pCtx; + Mem *pMem; + Mem t; + + assert( pOp->p4type==P4_FUNCCTX ); + pCtx = pOp->p4.pCtx; + pMem = &aMem[pOp->p3]; + + /* If this function is inside of a trigger, the register array in aMem[] + ** might change from one evaluation to the next. The next block of code + ** checks to see if the register array has changed, and if so it + ** reinitializes the relavant parts of the sqlite3_context object */ + if( pCtx->pMem != pMem ){ + pCtx->pMem = pMem; + for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i]; + } + +#ifdef SQLITE_DEBUG + for(i=0; iargc; i++){ + assert( memIsValid(pCtx->argv[i]) ); + REGISTER_TRACE(pOp->p2+i, pCtx->argv[i]); + } +#endif + + pMem->n++; + sqlite3VdbeMemInit(&t, db, MEM_Null); + pCtx->pOut = &t; + pCtx->fErrorOrAux = 0; + pCtx->skipFlag = 0; + (pCtx->pFunc->xSFunc)(pCtx,pCtx->argc,pCtx->argv); /* IMP: R-24505-23230 */ + if( pCtx->fErrorOrAux ){ + if( pCtx->isError ){ + sqlite3VdbeError(p, "%s", sqlite3_value_text(&t)); + rc = pCtx->isError; + } + sqlite3VdbeMemRelease(&t); + if( rc ) goto abort_due_to_error; + }else{ + assert( t.flags==MEM_Null ); + } + if( pCtx->skipFlag ){ + assert( pOp[-1].opcode==OP_CollSeq ); + i = pOp[-1].p1; + if( i ) sqlite3VdbeMemSetInt64(&aMem[i], 1); + } + break; +} + +/* Opcode: AggFinal P1 P2 * P4 * +** Synopsis: accum=r[P1] N=P2 +** +** Execute the finalizer function for an aggregate. P1 is +** the memory location that is the accumulator for the aggregate. +** +** P2 is the number of arguments that the step function takes and +** P4 is a pointer to the FuncDef for this function. The P2 +** argument is not used by this opcode. It is only there to disambiguate +** functions that can take varying numbers of arguments. The +** P4 argument is only needed for the degenerate case where +** the step function was not previously called. +*/ +case OP_AggFinal: { + Mem *pMem; + assert( pOp->p1>0 && pOp->p1<=(p->nMem+1 - p->nCursor) ); + pMem = &aMem[pOp->p1]; + assert( (pMem->flags & ~(MEM_Null|MEM_Agg))==0 ); + rc = sqlite3VdbeMemFinalize(pMem, pOp->p4.pFunc); + if( rc ){ + sqlite3VdbeError(p, "%s", sqlite3_value_text(pMem)); + goto abort_due_to_error; + } + sqlite3VdbeChangeEncoding(pMem, encoding); + UPDATE_MAX_BLOBSIZE(pMem); + if( sqlite3VdbeMemTooBig(pMem) ){ + goto too_big; + } + break; +} + +#ifndef SQLITE_OMIT_WAL +/* Opcode: Checkpoint P1 P2 P3 * * +** +** Checkpoint database P1. This is a no-op if P1 is not currently in +** WAL mode. Parameter P2 is one of SQLITE_CHECKPOINT_PASSIVE, FULL, +** RESTART, or TRUNCATE. Write 1 or 0 into mem[P3] if the checkpoint returns +** SQLITE_BUSY or not, respectively. Write the number of pages in the +** WAL after the checkpoint into mem[P3+1] and the number of pages +** in the WAL that have been checkpointed after the checkpoint +** completes into mem[P3+2]. However on an error, mem[P3+1] and +** mem[P3+2] are initialized to -1. +*/ +case OP_Checkpoint: { + int i; /* Loop counter */ + int aRes[3]; /* Results */ + Mem *pMem; /* Write results here */ + + assert( p->readOnly==0 ); + aRes[0] = 0; + aRes[1] = aRes[2] = -1; + assert( pOp->p2==SQLITE_CHECKPOINT_PASSIVE + || pOp->p2==SQLITE_CHECKPOINT_FULL + || pOp->p2==SQLITE_CHECKPOINT_RESTART + || pOp->p2==SQLITE_CHECKPOINT_TRUNCATE + ); + rc = sqlite3Checkpoint(db, pOp->p1, pOp->p2, &aRes[1], &aRes[2]); + if( rc ){ + if( rc!=SQLITE_BUSY ) goto abort_due_to_error; + rc = SQLITE_OK; + aRes[0] = 1; + } + for(i=0, pMem = &aMem[pOp->p3]; i<3; i++, pMem++){ + sqlite3VdbeMemSetInt64(pMem, (i64)aRes[i]); + } + break; +}; +#endif + +#ifndef SQLITE_OMIT_PRAGMA +/* Opcode: JournalMode P1 P2 P3 * * +** +** Change the journal mode of database P1 to P3. P3 must be one of the +** PAGER_JOURNALMODE_XXX values. If changing between the various rollback +** modes (delete, truncate, persist, off and memory), this is a simple +** operation. No IO is required. +** +** If changing into or out of WAL mode the procedure is more complicated. +** +** Write a string containing the final journal-mode to register P2. +*/ +case OP_JournalMode: { /* out2 */ + Btree *pBt; /* Btree to change journal mode of */ + Pager *pPager; /* Pager associated with pBt */ + int eNew; /* New journal mode */ + int eOld; /* The old journal mode */ +#ifndef SQLITE_OMIT_WAL + const char *zFilename; /* Name of database file for pPager */ +#endif + + pOut = out2Prerelease(p, pOp); + eNew = pOp->p3; + assert( eNew==PAGER_JOURNALMODE_DELETE + || eNew==PAGER_JOURNALMODE_TRUNCATE + || eNew==PAGER_JOURNALMODE_PERSIST + || eNew==PAGER_JOURNALMODE_OFF + || eNew==PAGER_JOURNALMODE_MEMORY + || eNew==PAGER_JOURNALMODE_WAL + || eNew==PAGER_JOURNALMODE_QUERY + ); + assert( pOp->p1>=0 && pOp->p1nDb ); + assert( p->readOnly==0 ); + + pBt = db->aDb[pOp->p1].pBt; + pPager = sqlite3BtreePager(pBt); + eOld = sqlite3PagerGetJournalMode(pPager); + if( eNew==PAGER_JOURNALMODE_QUERY ) eNew = eOld; + if( !sqlite3PagerOkToChangeJournalMode(pPager) ) eNew = eOld; + +#ifndef SQLITE_OMIT_WAL + zFilename = sqlite3PagerFilename(pPager, 1); + + /* Do not allow a transition to journal_mode=WAL for a database + ** in temporary storage or if the VFS does not support shared memory + */ + if( eNew==PAGER_JOURNALMODE_WAL + && (sqlite3Strlen30(zFilename)==0 /* Temp file */ + || !sqlite3PagerWalSupported(pPager)) /* No shared-memory support */ + ){ + eNew = eOld; + } + + if( (eNew!=eOld) + && (eOld==PAGER_JOURNALMODE_WAL || eNew==PAGER_JOURNALMODE_WAL) + ){ + if( !db->autoCommit || db->nVdbeRead>1 ){ + rc = SQLITE_ERROR; + sqlite3VdbeError(p, + "cannot change %s wal mode from within a transaction", + (eNew==PAGER_JOURNALMODE_WAL ? "into" : "out of") + ); + goto abort_due_to_error; + }else{ + + if( eOld==PAGER_JOURNALMODE_WAL ){ + /* If leaving WAL mode, close the log file. If successful, the call + ** to PagerCloseWal() checkpoints and deletes the write-ahead-log + ** file. An EXCLUSIVE lock may still be held on the database file + ** after a successful return. + */ + rc = sqlite3PagerCloseWal(pPager, db); + if( rc==SQLITE_OK ){ + sqlite3PagerSetJournalMode(pPager, eNew); + } + }else if( eOld==PAGER_JOURNALMODE_MEMORY ){ + /* Cannot transition directly from MEMORY to WAL. Use mode OFF + ** as an intermediate */ + sqlite3PagerSetJournalMode(pPager, PAGER_JOURNALMODE_OFF); + } + + /* Open a transaction on the database file. Regardless of the journal + ** mode, this transaction always uses a rollback journal. + */ + assert( sqlite3BtreeIsInTrans(pBt)==0 ); + if( rc==SQLITE_OK ){ + rc = sqlite3BtreeSetVersion(pBt, (eNew==PAGER_JOURNALMODE_WAL ? 2 : 1)); + } + } + } +#endif /* ifndef SQLITE_OMIT_WAL */ + + if( rc ) eNew = eOld; + eNew = sqlite3PagerSetJournalMode(pPager, eNew); + + pOut->flags = MEM_Str|MEM_Static|MEM_Term; + pOut->z = (char *)sqlite3JournalModename(eNew); + pOut->n = sqlite3Strlen30(pOut->z); + pOut->enc = SQLITE_UTF8; + sqlite3VdbeChangeEncoding(pOut, encoding); + if( rc ) goto abort_due_to_error; + break; +}; +#endif /* SQLITE_OMIT_PRAGMA */ + +#if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH) +/* Opcode: Vacuum P1 * * * * +** +** Vacuum the entire database P1. P1 is 0 for "main", and 2 or more +** for an attached database. The "temp" database may not be vacuumed. +*/ +case OP_Vacuum: { + assert( p->readOnly==0 ); + rc = sqlite3RunVacuum(&p->zErrMsg, db, pOp->p1); + if( rc ) goto abort_due_to_error; + break; +} +#endif + +#if !defined(SQLITE_OMIT_AUTOVACUUM) +/* Opcode: IncrVacuum P1 P2 * * * +** +** Perform a single step of the incremental vacuum procedure on +** the P1 database. If the vacuum has finished, jump to instruction +** P2. Otherwise, fall through to the next instruction. +*/ +case OP_IncrVacuum: { /* jump */ + Btree *pBt; + + assert( pOp->p1>=0 && pOp->p1nDb ); + assert( DbMaskTest(p->btreeMask, pOp->p1) ); + assert( p->readOnly==0 ); + pBt = db->aDb[pOp->p1].pBt; + rc = sqlite3BtreeIncrVacuum(pBt); + VdbeBranchTaken(rc==SQLITE_DONE,2); + if( rc ){ + if( rc!=SQLITE_DONE ) goto abort_due_to_error; + rc = SQLITE_OK; + goto jump_to_p2; + } + break; +} +#endif + +/* Opcode: Expire P1 * * * * +** +** Cause precompiled statements to expire. When an expired statement +** is executed using sqlite3_step() it will either automatically +** reprepare itself (if it was originally created using sqlite3_prepare_v2()) +** or it will fail with SQLITE_SCHEMA. +** +** If P1 is 0, then all SQL statements become expired. If P1 is non-zero, +** then only the currently executing statement is expired. +*/ +case OP_Expire: { + if( !pOp->p1 ){ + sqlite3ExpirePreparedStatements(db); + }else{ + p->expired = 1; + } + break; +} + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* Opcode: TableLock P1 P2 P3 P4 * +** Synopsis: iDb=P1 root=P2 write=P3 +** +** Obtain a lock on a particular table. This instruction is only used when +** the shared-cache feature is enabled. +** +** P1 is the index of the database in sqlite3.aDb[] of the database +** on which the lock is acquired. A readlock is obtained if P3==0 or +** a write lock if P3==1. +** +** P2 contains the root-page of the table to lock. +** +** P4 contains a pointer to the name of the table being locked. This is only +** used to generate an error message if the lock cannot be obtained. +*/ +case OP_TableLock: { + u8 isWriteLock = (u8)pOp->p3; + if( isWriteLock || 0==(db->flags&SQLITE_ReadUncommitted) ){ + int p1 = pOp->p1; + assert( p1>=0 && p1nDb ); + assert( DbMaskTest(p->btreeMask, p1) ); + assert( isWriteLock==0 || isWriteLock==1 ); + rc = sqlite3BtreeLockTable(db->aDb[p1].pBt, pOp->p2, isWriteLock); + if( rc ){ + if( (rc&0xFF)==SQLITE_LOCKED ){ + const char *z = pOp->p4.z; + sqlite3VdbeError(p, "database table is locked: %s", z); + } + goto abort_due_to_error; + } + } + break; +} +#endif /* SQLITE_OMIT_SHARED_CACHE */ + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* Opcode: VBegin * * * P4 * +** +** P4 may be a pointer to an sqlite3_vtab structure. If so, call the +** xBegin method for that table. +** +** Also, whether or not P4 is set, check that this is not being called from +** within a callback to a virtual table xSync() method. If it is, the error +** code will be set to SQLITE_LOCKED. +*/ +case OP_VBegin: { + VTable *pVTab; + pVTab = pOp->p4.pVtab; + rc = sqlite3VtabBegin(db, pVTab); + if( pVTab ) sqlite3VtabImportErrmsg(p, pVTab->pVtab); + if( rc ) goto abort_due_to_error; + break; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* Opcode: VCreate P1 P2 * * * +** +** P2 is a register that holds the name of a virtual table in database +** P1. Call the xCreate method for that table. +*/ +case OP_VCreate: { + Mem sMem; /* For storing the record being decoded */ + const char *zTab; /* Name of the virtual table */ + + memset(&sMem, 0, sizeof(sMem)); + sMem.db = db; + /* Because P2 is always a static string, it is impossible for the + ** sqlite3VdbeMemCopy() to fail */ + assert( (aMem[pOp->p2].flags & MEM_Str)!=0 ); + assert( (aMem[pOp->p2].flags & MEM_Static)!=0 ); + rc = sqlite3VdbeMemCopy(&sMem, &aMem[pOp->p2]); + assert( rc==SQLITE_OK ); + zTab = (const char*)sqlite3_value_text(&sMem); + assert( zTab || db->mallocFailed ); + if( zTab ){ + rc = sqlite3VtabCallCreate(db, pOp->p1, zTab, &p->zErrMsg); + } + sqlite3VdbeMemRelease(&sMem); + if( rc ) goto abort_due_to_error; + break; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* Opcode: VDestroy P1 * * P4 * +** +** P4 is the name of a virtual table in database P1. Call the xDestroy method +** of that table. +*/ +case OP_VDestroy: { + db->nVDestroy++; + rc = sqlite3VtabCallDestroy(db, pOp->p1, pOp->p4.z); + db->nVDestroy--; + if( rc ) goto abort_due_to_error; + break; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* Opcode: VOpen P1 * * P4 * +** +** P4 is a pointer to a virtual table object, an sqlite3_vtab structure. +** P1 is a cursor number. This opcode opens a cursor to the virtual +** table and stores that cursor in P1. +*/ +case OP_VOpen: { + VdbeCursor *pCur; + sqlite3_vtab_cursor *pVCur; + sqlite3_vtab *pVtab; + const sqlite3_module *pModule; + + assert( p->bIsReader ); + pCur = 0; + pVCur = 0; + pVtab = pOp->p4.pVtab->pVtab; + if( pVtab==0 || NEVER(pVtab->pModule==0) ){ + rc = SQLITE_LOCKED; + goto abort_due_to_error; + } + pModule = pVtab->pModule; + rc = pModule->xOpen(pVtab, &pVCur); + sqlite3VtabImportErrmsg(p, pVtab); + if( rc ) goto abort_due_to_error; + + /* Initialize sqlite3_vtab_cursor base class */ + pVCur->pVtab = pVtab; + + /* Initialize vdbe cursor object */ + pCur = allocateCursor(p, pOp->p1, 0, -1, CURTYPE_VTAB); + if( pCur ){ + pCur->uc.pVCur = pVCur; + pVtab->nRef++; + }else{ + assert( db->mallocFailed ); + pModule->xClose(pVCur); + goto no_mem; + } + break; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* Opcode: VFilter P1 P2 P3 P4 * +** Synopsis: iplan=r[P3] zplan='P4' +** +** P1 is a cursor opened using VOpen. P2 is an address to jump to if +** the filtered result set is empty. +** +** P4 is either NULL or a string that was generated by the xBestIndex +** method of the module. The interpretation of the P4 string is left +** to the module implementation. +** +** This opcode invokes the xFilter method on the virtual table specified +** by P1. The integer query plan parameter to xFilter is stored in register +** P3. Register P3+1 stores the argc parameter to be passed to the +** xFilter method. Registers P3+2..P3+1+argc are the argc +** additional parameters which are passed to +** xFilter as argv. Register P3+2 becomes argv[0] when passed to xFilter. +** +** A jump is made to P2 if the result set after filtering would be empty. +*/ +case OP_VFilter: { /* jump */ + int nArg; + int iQuery; + const sqlite3_module *pModule; + Mem *pQuery; + Mem *pArgc; + sqlite3_vtab_cursor *pVCur; + sqlite3_vtab *pVtab; + VdbeCursor *pCur; + int res; + int i; + Mem **apArg; + + pQuery = &aMem[pOp->p3]; + pArgc = &pQuery[1]; + pCur = p->apCsr[pOp->p1]; + assert( memIsValid(pQuery) ); + REGISTER_TRACE(pOp->p3, pQuery); + assert( pCur->eCurType==CURTYPE_VTAB ); + pVCur = pCur->uc.pVCur; + pVtab = pVCur->pVtab; + pModule = pVtab->pModule; + + /* Grab the index number and argc parameters */ + assert( (pQuery->flags&MEM_Int)!=0 && pArgc->flags==MEM_Int ); + nArg = (int)pArgc->u.i; + iQuery = (int)pQuery->u.i; + + /* Invoke the xFilter method */ + res = 0; + apArg = p->apArg; + for(i = 0; ixFilter(pVCur, iQuery, pOp->p4.z, nArg, apArg); + sqlite3VtabImportErrmsg(p, pVtab); + if( rc ) goto abort_due_to_error; + res = pModule->xEof(pVCur); + pCur->nullRow = 0; + VdbeBranchTaken(res!=0,2); + if( res ) goto jump_to_p2; + break; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* Opcode: VColumn P1 P2 P3 * * +** Synopsis: r[P3]=vcolumn(P2) +** +** Store the value of the P2-th column of +** the row of the virtual-table that the +** P1 cursor is pointing to into register P3. +*/ +case OP_VColumn: { + sqlite3_vtab *pVtab; + const sqlite3_module *pModule; + Mem *pDest; + sqlite3_context sContext; + + VdbeCursor *pCur = p->apCsr[pOp->p1]; + assert( pCur->eCurType==CURTYPE_VTAB ); + assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); + pDest = &aMem[pOp->p3]; + memAboutToChange(p, pDest); + if( pCur->nullRow ){ + sqlite3VdbeMemSetNull(pDest); + break; + } + pVtab = pCur->uc.pVCur->pVtab; + pModule = pVtab->pModule; + assert( pModule->xColumn ); + memset(&sContext, 0, sizeof(sContext)); + sContext.pOut = pDest; + MemSetTypeFlag(pDest, MEM_Null); + rc = pModule->xColumn(pCur->uc.pVCur, &sContext, pOp->p2); + sqlite3VtabImportErrmsg(p, pVtab); + if( sContext.isError ){ + rc = sContext.isError; + } + sqlite3VdbeChangeEncoding(pDest, encoding); + REGISTER_TRACE(pOp->p3, pDest); + UPDATE_MAX_BLOBSIZE(pDest); + + if( sqlite3VdbeMemTooBig(pDest) ){ + goto too_big; + } + if( rc ) goto abort_due_to_error; + break; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* Opcode: VNext P1 P2 * * * +** +** Advance virtual table P1 to the next row in its result set and +** jump to instruction P2. Or, if the virtual table has reached +** the end of its result set, then fall through to the next instruction. +*/ +case OP_VNext: { /* jump */ + sqlite3_vtab *pVtab; + const sqlite3_module *pModule; + int res; + VdbeCursor *pCur; + + res = 0; + pCur = p->apCsr[pOp->p1]; + assert( pCur->eCurType==CURTYPE_VTAB ); + if( pCur->nullRow ){ + break; + } + pVtab = pCur->uc.pVCur->pVtab; + pModule = pVtab->pModule; + assert( pModule->xNext ); + + /* Invoke the xNext() method of the module. There is no way for the + ** underlying implementation to return an error if one occurs during + ** xNext(). Instead, if an error occurs, true is returned (indicating that + ** data is available) and the error code returned when xColumn or + ** some other method is next invoked on the save virtual table cursor. + */ + rc = pModule->xNext(pCur->uc.pVCur); + sqlite3VtabImportErrmsg(p, pVtab); + if( rc ) goto abort_due_to_error; + res = pModule->xEof(pCur->uc.pVCur); + VdbeBranchTaken(!res,2); + if( !res ){ + /* If there is data, jump to P2 */ + goto jump_to_p2_and_check_for_interrupt; + } + goto check_for_interrupt; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* Opcode: VRename P1 * * P4 * +** +** P4 is a pointer to a virtual table object, an sqlite3_vtab structure. +** This opcode invokes the corresponding xRename method. The value +** in register P1 is passed as the zName argument to the xRename method. +*/ +case OP_VRename: { + sqlite3_vtab *pVtab; + Mem *pName; + + pVtab = pOp->p4.pVtab->pVtab; + pName = &aMem[pOp->p1]; + assert( pVtab->pModule->xRename ); + assert( memIsValid(pName) ); + assert( p->readOnly==0 ); + REGISTER_TRACE(pOp->p1, pName); + assert( pName->flags & MEM_Str ); + testcase( pName->enc==SQLITE_UTF8 ); + testcase( pName->enc==SQLITE_UTF16BE ); + testcase( pName->enc==SQLITE_UTF16LE ); + rc = sqlite3VdbeChangeEncoding(pName, SQLITE_UTF8); + if( rc ) goto abort_due_to_error; + rc = pVtab->pModule->xRename(pVtab, pName->z); + sqlite3VtabImportErrmsg(p, pVtab); + p->expired = 0; + if( rc ) goto abort_due_to_error; + break; +} +#endif + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* Opcode: VUpdate P1 P2 P3 P4 P5 +** Synopsis: data=r[P3@P2] +** +** P4 is a pointer to a virtual table object, an sqlite3_vtab structure. +** This opcode invokes the corresponding xUpdate method. P2 values +** are contiguous memory cells starting at P3 to pass to the xUpdate +** invocation. The value in register (P3+P2-1) corresponds to the +** p2th element of the argv array passed to xUpdate. +** +** The xUpdate method will do a DELETE or an INSERT or both. +** The argv[0] element (which corresponds to memory cell P3) +** is the rowid of a row to delete. If argv[0] is NULL then no +** deletion occurs. The argv[1] element is the rowid of the new +** row. This can be NULL to have the virtual table select the new +** rowid for itself. The subsequent elements in the array are +** the values of columns in the new row. +** +** If P2==1 then no insert is performed. argv[0] is the rowid of +** a row to delete. +** +** P1 is a boolean flag. If it is set to true and the xUpdate call +** is successful, then the value returned by sqlite3_last_insert_rowid() +** is set to the value of the rowid for the row just inserted. +** +** P5 is the error actions (OE_Replace, OE_Fail, OE_Ignore, etc) to +** apply in the case of a constraint failure on an insert or update. +*/ +case OP_VUpdate: { + sqlite3_vtab *pVtab; + const sqlite3_module *pModule; + int nArg; + int i; + sqlite_int64 rowid; + Mem **apArg; + Mem *pX; + + assert( pOp->p2==1 || pOp->p5==OE_Fail || pOp->p5==OE_Rollback + || pOp->p5==OE_Abort || pOp->p5==OE_Ignore || pOp->p5==OE_Replace + ); + assert( p->readOnly==0 ); + pVtab = pOp->p4.pVtab->pVtab; + if( pVtab==0 || NEVER(pVtab->pModule==0) ){ + rc = SQLITE_LOCKED; + goto abort_due_to_error; + } + pModule = pVtab->pModule; + nArg = pOp->p2; + assert( pOp->p4type==P4_VTAB ); + if( ALWAYS(pModule->xUpdate) ){ + u8 vtabOnConflict = db->vtabOnConflict; + apArg = p->apArg; + pX = &aMem[pOp->p3]; + for(i=0; ivtabOnConflict = pOp->p5; + rc = pModule->xUpdate(pVtab, nArg, apArg, &rowid); + db->vtabOnConflict = vtabOnConflict; + sqlite3VtabImportErrmsg(p, pVtab); + if( rc==SQLITE_OK && pOp->p1 ){ + assert( nArg>1 && apArg[0] && (apArg[0]->flags&MEM_Null) ); + db->lastRowid = rowid; + } + if( (rc&0xff)==SQLITE_CONSTRAINT && pOp->p4.pVtab->bConstraint ){ + if( pOp->p5==OE_Ignore ){ + rc = SQLITE_OK; + }else{ + p->errorAction = ((pOp->p5==OE_Replace) ? OE_Abort : pOp->p5); + } + }else{ + p->nChange++; + } + if( rc ) goto abort_due_to_error; + } + break; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +/* Opcode: Pagecount P1 P2 * * * +** +** Write the current number of pages in database P1 to memory cell P2. +*/ +case OP_Pagecount: { /* out2 */ + pOut = out2Prerelease(p, pOp); + pOut->u.i = sqlite3BtreeLastPage(db->aDb[pOp->p1].pBt); + break; +} +#endif + + +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +/* Opcode: MaxPgcnt P1 P2 P3 * * +** +** Try to set the maximum page count for database P1 to the value in P3. +** Do not let the maximum page count fall below the current page count and +** do not change the maximum page count value if P3==0. +** +** Store the maximum page count after the change in register P2. +*/ +case OP_MaxPgcnt: { /* out2 */ + unsigned int newMax; + Btree *pBt; + + pOut = out2Prerelease(p, pOp); + pBt = db->aDb[pOp->p1].pBt; + newMax = 0; + if( pOp->p3 ){ + newMax = sqlite3BtreeLastPage(pBt); + if( newMax < (unsigned)pOp->p3 ) newMax = (unsigned)pOp->p3; + } + pOut->u.i = sqlite3BtreeMaxPageCount(pBt, newMax); + break; +} +#endif + + +/* Opcode: Init P1 P2 * P4 * +** Synopsis: Start at P2 +** +** Programs contain a single instance of this opcode as the very first +** opcode. +** +** If tracing is enabled (by the sqlite3_trace()) interface, then +** the UTF-8 string contained in P4 is emitted on the trace callback. +** Or if P4 is blank, use the string returned by sqlite3_sql(). +** +** If P2 is not zero, jump to instruction P2. +** +** Increment the value of P1 so that OP_Once opcodes will jump the +** first time they are evaluated for this run. +*/ +case OP_Init: { /* jump */ + char *zTrace; + int i; + + /* If the P4 argument is not NULL, then it must be an SQL comment string. + ** The "--" string is broken up to prevent false-positives with srcck1.c. + ** + ** This assert() provides evidence for: + ** EVIDENCE-OF: R-50676-09860 The callback can compute the same text that + ** would have been returned by the legacy sqlite3_trace() interface by + ** using the X argument when X begins with "--" and invoking + ** sqlite3_expanded_sql(P) otherwise. + */ + assert( pOp->p4.z==0 || strncmp(pOp->p4.z, "-" "- ", 3)==0 ); + assert( pOp==p->aOp ); /* Always instruction 0 */ + +#ifndef SQLITE_OMIT_TRACE + if( (db->mTrace & (SQLITE_TRACE_STMT|SQLITE_TRACE_LEGACY))!=0 + && !p->doingRerun + && (zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0 + ){ +#ifndef SQLITE_OMIT_DEPRECATED + if( db->mTrace & SQLITE_TRACE_LEGACY ){ + void (*x)(void*,const char*) = (void(*)(void*,const char*))db->xTrace; + char *z = sqlite3VdbeExpandSql(p, zTrace); + x(db->pTraceArg, z); + sqlite3_free(z); + }else +#endif + { + (void)db->xTrace(SQLITE_TRACE_STMT, db->pTraceArg, p, zTrace); + } + } +#ifdef SQLITE_USE_FCNTL_TRACE + zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql); + if( zTrace ){ + int j; + for(j=0; jnDb; j++){ + if( DbMaskTest(p->btreeMask, j)==0 ) continue; + sqlite3_file_control(db, db->aDb[j].zDbSName, SQLITE_FCNTL_TRACE, zTrace); + } + } +#endif /* SQLITE_USE_FCNTL_TRACE */ +#ifdef SQLITE_DEBUG + if( (db->flags & SQLITE_SqlTrace)!=0 + && (zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0 + ){ + sqlite3DebugPrintf("SQL-trace: %s\n", zTrace); + } +#endif /* SQLITE_DEBUG */ +#endif /* SQLITE_OMIT_TRACE */ + assert( pOp->p2>0 ); + if( pOp->p1>=sqlite3GlobalConfig.iOnceResetThreshold ){ + for(i=1; inOp; i++){ + if( p->aOp[i].opcode==OP_Once ) p->aOp[i].p1 = 0; + } + pOp->p1 = 0; + } + pOp->p1++; + goto jump_to_p2; +} + +#ifdef SQLITE_ENABLE_CURSOR_HINTS +/* Opcode: CursorHint P1 * * P4 * +** +** Provide a hint to cursor P1 that it only needs to return rows that +** satisfy the Expr in P4. TK_REGISTER terms in the P4 expression refer +** to values currently held in registers. TK_COLUMN terms in the P4 +** expression refer to columns in the b-tree to which cursor P1 is pointing. +*/ +case OP_CursorHint: { + VdbeCursor *pC; + + assert( pOp->p1>=0 && pOp->p1nCursor ); + assert( pOp->p4type==P4_EXPR ); + pC = p->apCsr[pOp->p1]; + if( pC ){ + assert( pC->eCurType==CURTYPE_BTREE ); + sqlite3BtreeCursorHint(pC->uc.pCursor, BTREE_HINT_RANGE, + pOp->p4.pExpr, aMem); + } + break; +} +#endif /* SQLITE_ENABLE_CURSOR_HINTS */ + +/* Opcode: Noop * * * * * +** +** Do nothing. This instruction is often useful as a jump +** destination. +*/ +/* +** The magic Explain opcode are only inserted when explain==2 (which +** is to say when the EXPLAIN QUERY PLAN syntax is used.) +** This opcode records information from the optimizer. It is the +** the same as a no-op. This opcodesnever appears in a real VM program. +*/ +default: { /* This is really OP_Noop and OP_Explain */ + assert( pOp->opcode==OP_Noop || pOp->opcode==OP_Explain ); + break; +} + +/***************************************************************************** +** The cases of the switch statement above this line should all be indented +** by 6 spaces. But the left-most 6 spaces have been removed to improve the +** readability. From this point on down, the normal indentation rules are +** restored. +*****************************************************************************/ + } + +#ifdef VDBE_PROFILE + { + u64 endTime = sqlite3Hwtime(); + if( endTime>start ) pOrigOp->cycles += endTime - start; + pOrigOp->cnt++; + } +#endif + + /* The following code adds nothing to the actual functionality + ** of the program. It is only here for testing and debugging. + ** On the other hand, it does burn CPU cycles every time through + ** the evaluator loop. So we can leave it out when NDEBUG is defined. + */ +#ifndef NDEBUG + assert( pOp>=&aOp[-1] && pOp<&aOp[p->nOp-1] ); + +#ifdef SQLITE_DEBUG + if( db->flags & SQLITE_VdbeTrace ){ + u8 opProperty = sqlite3OpcodeProperty[pOrigOp->opcode]; + if( rc!=0 ) printf("rc=%d\n",rc); + if( opProperty & (OPFLG_OUT2) ){ + registerTrace(pOrigOp->p2, &aMem[pOrigOp->p2]); + } + if( opProperty & OPFLG_OUT3 ){ + registerTrace(pOrigOp->p3, &aMem[pOrigOp->p3]); + } + } +#endif /* SQLITE_DEBUG */ +#endif /* NDEBUG */ + } /* The end of the for(;;) loop the loops through opcodes */ + + /* If we reach this point, it means that execution is finished with + ** an error of some kind. + */ +abort_due_to_error: + if( db->mallocFailed ) rc = SQLITE_NOMEM_BKPT; + assert( rc ); + if( p->zErrMsg==0 && rc!=SQLITE_IOERR_NOMEM ){ + sqlite3VdbeError(p, "%s", sqlite3ErrStr(rc)); + } + p->rc = rc; + sqlite3SystemError(db, rc); + testcase( sqlite3GlobalConfig.xLog!=0 ); + sqlite3_log(rc, "statement aborts at %d: [%s] %s", + (int)(pOp - aOp), p->zSql, p->zErrMsg); + sqlite3VdbeHalt(p); + if( rc==SQLITE_IOERR_NOMEM ) sqlite3OomFault(db); + rc = SQLITE_ERROR; + if( resetSchemaOnFault>0 ){ + sqlite3ResetOneSchema(db, resetSchemaOnFault-1); + } + + /* This is the only way out of this procedure. We have to + ** release the mutexes on btrees that were acquired at the + ** top. */ +vdbe_return: + testcase( nVmStep>0 ); + p->aCounter[SQLITE_STMTSTATUS_VM_STEP] += (int)nVmStep; + sqlite3VdbeLeave(p); + assert( rc!=SQLITE_OK || nExtraDelete==0 + || sqlite3_strlike("DELETE%",p->zSql,0)!=0 + ); + return rc; + + /* Jump to here if a string or blob larger than SQLITE_MAX_LENGTH + ** is encountered. + */ +too_big: + sqlite3VdbeError(p, "string or blob too big"); + rc = SQLITE_TOOBIG; + goto abort_due_to_error; + + /* Jump to here if a malloc() fails. + */ +no_mem: + sqlite3OomFault(db); + sqlite3VdbeError(p, "out of memory"); + rc = SQLITE_NOMEM_BKPT; + goto abort_due_to_error; + + /* Jump to here if the sqlite3_interrupt() API sets the interrupt + ** flag. + */ +abort_due_to_interrupt: + assert( db->u1.isInterrupted ); + rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_INTERRUPT; + p->rc = rc; + sqlite3VdbeError(p, "%s", sqlite3ErrStr(rc)); + goto abort_due_to_error; +} + + +/************** End of vdbe.c ************************************************/ +/************** Begin file vdbeblob.c ****************************************/ +/* +** 2007 May 1 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains code used to implement incremental BLOB I/O. +*/ + +/* #include "sqliteInt.h" */ +/* #include "vdbeInt.h" */ + +#ifndef SQLITE_OMIT_INCRBLOB + +/* +** Valid sqlite3_blob* handles point to Incrblob structures. +*/ +typedef struct Incrblob Incrblob; +struct Incrblob { + int nByte; /* Size of open blob, in bytes */ + int iOffset; /* Byte offset of blob in cursor data */ + u16 iCol; /* Table column this handle is open on */ + BtCursor *pCsr; /* Cursor pointing at blob row */ + sqlite3_stmt *pStmt; /* Statement holding cursor open */ + sqlite3 *db; /* The associated database */ + char *zDb; /* Database name */ + Table *pTab; /* Table object */ +}; + + +/* +** This function is used by both blob_open() and blob_reopen(). It seeks +** the b-tree cursor associated with blob handle p to point to row iRow. +** If successful, SQLITE_OK is returned and subsequent calls to +** sqlite3_blob_read() or sqlite3_blob_write() access the specified row. +** +** If an error occurs, or if the specified row does not exist or does not +** contain a value of type TEXT or BLOB in the column nominated when the +** blob handle was opened, then an error code is returned and *pzErr may +** be set to point to a buffer containing an error message. It is the +** responsibility of the caller to free the error message buffer using +** sqlite3DbFree(). +** +** If an error does occur, then the b-tree cursor is closed. All subsequent +** calls to sqlite3_blob_read(), blob_write() or blob_reopen() will +** immediately return SQLITE_ABORT. +*/ +static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){ + int rc; /* Error code */ + char *zErr = 0; /* Error message */ + Vdbe *v = (Vdbe *)p->pStmt; + + /* Set the value of register r[1] in the SQL statement to integer iRow. + ** This is done directly as a performance optimization + */ + v->aMem[1].flags = MEM_Int; + v->aMem[1].u.i = iRow; + + /* If the statement has been run before (and is paused at the OP_ResultRow) + ** then back it up to the point where it does the OP_SeekRowid. This could + ** have been down with an extra OP_Goto, but simply setting the program + ** counter is faster. */ + if( v->pc>3 ){ + v->pc = 3; + rc = sqlite3VdbeExec(v); + }else{ + rc = sqlite3_step(p->pStmt); + } + if( rc==SQLITE_ROW ){ + VdbeCursor *pC = v->apCsr[0]; + u32 type = pC->nHdrParsed>p->iCol ? pC->aType[p->iCol] : 0; + testcase( pC->nHdrParsed==p->iCol ); + testcase( pC->nHdrParsed==p->iCol+1 ); + if( type<12 ){ + zErr = sqlite3MPrintf(p->db, "cannot open value of type %s", + type==0?"null": type==7?"real": "integer" + ); + rc = SQLITE_ERROR; + sqlite3_finalize(p->pStmt); + p->pStmt = 0; + }else{ + p->iOffset = pC->aType[p->iCol + pC->nField]; + p->nByte = sqlite3VdbeSerialTypeLen(type); + p->pCsr = pC->uc.pCursor; + sqlite3BtreeIncrblobCursor(p->pCsr); + } + } + + if( rc==SQLITE_ROW ){ + rc = SQLITE_OK; + }else if( p->pStmt ){ + rc = sqlite3_finalize(p->pStmt); + p->pStmt = 0; + if( rc==SQLITE_OK ){ + zErr = sqlite3MPrintf(p->db, "no such rowid: %lld", iRow); + rc = SQLITE_ERROR; + }else{ + zErr = sqlite3MPrintf(p->db, "%s", sqlite3_errmsg(p->db)); + } + } + + assert( rc!=SQLITE_OK || zErr==0 ); + assert( rc!=SQLITE_ROW && rc!=SQLITE_DONE ); + + *pzErr = zErr; + return rc; +} + +/* +** Open a blob handle. +*/ +SQLITE_API int sqlite3_blob_open( + sqlite3* db, /* The database connection */ + const char *zDb, /* The attached database containing the blob */ + const char *zTable, /* The table containing the blob */ + const char *zColumn, /* The column containing the blob */ + sqlite_int64 iRow, /* The row containing the glob */ + int wrFlag, /* True -> read/write access, false -> read-only */ + sqlite3_blob **ppBlob /* Handle for accessing the blob returned here */ +){ + int nAttempt = 0; + int iCol; /* Index of zColumn in row-record */ + int rc = SQLITE_OK; + char *zErr = 0; + Table *pTab; + Parse *pParse = 0; + Incrblob *pBlob = 0; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( ppBlob==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif + *ppBlob = 0; +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) || zTable==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif + wrFlag = !!wrFlag; /* wrFlag = (wrFlag ? 1 : 0); */ + + sqlite3_mutex_enter(db->mutex); + + pBlob = (Incrblob *)sqlite3DbMallocZero(db, sizeof(Incrblob)); + if( !pBlob ) goto blob_open_out; + pParse = sqlite3StackAllocRaw(db, sizeof(*pParse)); + if( !pParse ) goto blob_open_out; + + do { + memset(pParse, 0, sizeof(Parse)); + pParse->db = db; + sqlite3DbFree(db, zErr); + zErr = 0; + + sqlite3BtreeEnterAll(db); + pTab = sqlite3LocateTable(pParse, 0, zTable, zDb); + if( pTab && IsVirtual(pTab) ){ + pTab = 0; + sqlite3ErrorMsg(pParse, "cannot open virtual table: %s", zTable); + } + if( pTab && !HasRowid(pTab) ){ + pTab = 0; + sqlite3ErrorMsg(pParse, "cannot open table without rowid: %s", zTable); + } +#ifndef SQLITE_OMIT_VIEW + if( pTab && pTab->pSelect ){ + pTab = 0; + sqlite3ErrorMsg(pParse, "cannot open view: %s", zTable); + } +#endif + if( !pTab ){ + if( pParse->zErrMsg ){ + sqlite3DbFree(db, zErr); + zErr = pParse->zErrMsg; + pParse->zErrMsg = 0; + } + rc = SQLITE_ERROR; + sqlite3BtreeLeaveAll(db); + goto blob_open_out; + } + pBlob->pTab = pTab; + pBlob->zDb = db->aDb[sqlite3SchemaToIndex(db, pTab->pSchema)].zDbSName; + + /* Now search pTab for the exact column. */ + for(iCol=0; iColnCol; iCol++) { + if( sqlite3StrICmp(pTab->aCol[iCol].zName, zColumn)==0 ){ + break; + } + } + if( iCol==pTab->nCol ){ + sqlite3DbFree(db, zErr); + zErr = sqlite3MPrintf(db, "no such column: \"%s\"", zColumn); + rc = SQLITE_ERROR; + sqlite3BtreeLeaveAll(db); + goto blob_open_out; + } + + /* If the value is being opened for writing, check that the + ** column is not indexed, and that it is not part of a foreign key. + */ + if( wrFlag ){ + const char *zFault = 0; + Index *pIdx; +#ifndef SQLITE_OMIT_FOREIGN_KEY + if( db->flags&SQLITE_ForeignKeys ){ + /* Check that the column is not part of an FK child key definition. It + ** is not necessary to check if it is part of a parent key, as parent + ** key columns must be indexed. The check below will pick up this + ** case. */ + FKey *pFKey; + for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){ + int j; + for(j=0; jnCol; j++){ + if( pFKey->aCol[j].iFrom==iCol ){ + zFault = "foreign key"; + } + } + } + } +#endif + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + int j; + for(j=0; jnKeyCol; j++){ + /* FIXME: Be smarter about indexes that use expressions */ + if( pIdx->aiColumn[j]==iCol || pIdx->aiColumn[j]==XN_EXPR ){ + zFault = "indexed"; + } + } + } + if( zFault ){ + sqlite3DbFree(db, zErr); + zErr = sqlite3MPrintf(db, "cannot open %s column for writing", zFault); + rc = SQLITE_ERROR; + sqlite3BtreeLeaveAll(db); + goto blob_open_out; + } + } + + pBlob->pStmt = (sqlite3_stmt *)sqlite3VdbeCreate(pParse); + assert( pBlob->pStmt || db->mallocFailed ); + if( pBlob->pStmt ){ + + /* This VDBE program seeks a btree cursor to the identified + ** db/table/row entry. The reason for using a vdbe program instead + ** of writing code to use the b-tree layer directly is that the + ** vdbe program will take advantage of the various transaction, + ** locking and error handling infrastructure built into the vdbe. + ** + ** After seeking the cursor, the vdbe executes an OP_ResultRow. + ** Code external to the Vdbe then "borrows" the b-tree cursor and + ** uses it to implement the blob_read(), blob_write() and + ** blob_bytes() functions. + ** + ** The sqlite3_blob_close() function finalizes the vdbe program, + ** which closes the b-tree cursor and (possibly) commits the + ** transaction. + */ + static const int iLn = VDBE_OFFSET_LINENO(2); + static const VdbeOpList openBlob[] = { + {OP_TableLock, 0, 0, 0}, /* 0: Acquire a read or write lock */ + {OP_OpenRead, 0, 0, 0}, /* 1: Open a cursor */ + /* blobSeekToRow() will initialize r[1] to the desired rowid */ + {OP_NotExists, 0, 5, 1}, /* 2: Seek the cursor to rowid=r[1] */ + {OP_Column, 0, 0, 1}, /* 3 */ + {OP_ResultRow, 1, 0, 0}, /* 4 */ + {OP_Halt, 0, 0, 0}, /* 5 */ + }; + Vdbe *v = (Vdbe *)pBlob->pStmt; + int iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + VdbeOp *aOp; + + sqlite3VdbeAddOp4Int(v, OP_Transaction, iDb, wrFlag, + pTab->pSchema->schema_cookie, + pTab->pSchema->iGeneration); + sqlite3VdbeChangeP5(v, 1); + aOp = sqlite3VdbeAddOpList(v, ArraySize(openBlob), openBlob, iLn); + + /* Make sure a mutex is held on the table to be accessed */ + sqlite3VdbeUsesBtree(v, iDb); + + if( db->mallocFailed==0 ){ + assert( aOp!=0 ); + /* Configure the OP_TableLock instruction */ +#ifdef SQLITE_OMIT_SHARED_CACHE + aOp[0].opcode = OP_Noop; +#else + aOp[0].p1 = iDb; + aOp[0].p2 = pTab->tnum; + aOp[0].p3 = wrFlag; + sqlite3VdbeChangeP4(v, 1, pTab->zName, P4_TRANSIENT); + } + if( db->mallocFailed==0 ){ +#endif + + /* Remove either the OP_OpenWrite or OpenRead. Set the P2 + ** parameter of the other to pTab->tnum. */ + if( wrFlag ) aOp[1].opcode = OP_OpenWrite; + aOp[1].p2 = pTab->tnum; + aOp[1].p3 = iDb; + + /* Configure the number of columns. Configure the cursor to + ** think that the table has one more column than it really + ** does. An OP_Column to retrieve this imaginary column will + ** always return an SQL NULL. This is useful because it means + ** we can invoke OP_Column to fill in the vdbe cursors type + ** and offset cache without causing any IO. + */ + aOp[1].p4type = P4_INT32; + aOp[1].p4.i = pTab->nCol+1; + aOp[3].p2 = pTab->nCol; + + pParse->nVar = 0; + pParse->nMem = 1; + pParse->nTab = 1; + sqlite3VdbeMakeReady(v, pParse); + } + } + + pBlob->iCol = iCol; + pBlob->db = db; + sqlite3BtreeLeaveAll(db); + if( db->mallocFailed ){ + goto blob_open_out; + } + rc = blobSeekToRow(pBlob, iRow, &zErr); + } while( (++nAttempt)mallocFailed==0 ){ + *ppBlob = (sqlite3_blob *)pBlob; + }else{ + if( pBlob && pBlob->pStmt ) sqlite3VdbeFinalize((Vdbe *)pBlob->pStmt); + sqlite3DbFree(db, pBlob); + } + sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr); + sqlite3DbFree(db, zErr); + sqlite3ParserReset(pParse); + sqlite3StackFree(db, pParse); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** Close a blob handle that was previously created using +** sqlite3_blob_open(). +*/ +SQLITE_API int sqlite3_blob_close(sqlite3_blob *pBlob){ + Incrblob *p = (Incrblob *)pBlob; + int rc; + sqlite3 *db; + + if( p ){ + db = p->db; + sqlite3_mutex_enter(db->mutex); + rc = sqlite3_finalize(p->pStmt); + sqlite3DbFree(db, p); + sqlite3_mutex_leave(db->mutex); + }else{ + rc = SQLITE_OK; + } + return rc; +} + +/* +** Perform a read or write operation on a blob +*/ +static int blobReadWrite( + sqlite3_blob *pBlob, + void *z, + int n, + int iOffset, + int (*xCall)(BtCursor*, u32, u32, void*) +){ + int rc; + Incrblob *p = (Incrblob *)pBlob; + Vdbe *v; + sqlite3 *db; + + if( p==0 ) return SQLITE_MISUSE_BKPT; + db = p->db; + sqlite3_mutex_enter(db->mutex); + v = (Vdbe*)p->pStmt; + + if( n<0 || iOffset<0 || ((sqlite3_int64)iOffset+n)>p->nByte ){ + /* Request is out of range. Return a transient error. */ + rc = SQLITE_ERROR; + }else if( v==0 ){ + /* If there is no statement handle, then the blob-handle has + ** already been invalidated. Return SQLITE_ABORT in this case. + */ + rc = SQLITE_ABORT; + }else{ + /* Call either BtreeData() or BtreePutData(). If SQLITE_ABORT is + ** returned, clean-up the statement handle. + */ + assert( db == v->db ); + sqlite3BtreeEnterCursor(p->pCsr); + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + if( xCall==sqlite3BtreePutData && db->xPreUpdateCallback ){ + /* If a pre-update hook is registered and this is a write cursor, + ** invoke it here. + ** + ** TODO: The preupdate-hook is passed SQLITE_DELETE, even though this + ** operation should really be an SQLITE_UPDATE. This is probably + ** incorrect, but is convenient because at this point the new.* values + ** are not easily obtainable. And for the sessions module, an + ** SQLITE_UPDATE where the PK columns do not change is handled in the + ** same way as an SQLITE_DELETE (the SQLITE_DELETE code is actually + ** slightly more efficient). Since you cannot write to a PK column + ** using the incremental-blob API, this works. For the sessions module + ** anyhow. + */ + sqlite3_int64 iKey; + iKey = sqlite3BtreeIntegerKey(p->pCsr); + sqlite3VdbePreUpdateHook( + v, v->apCsr[0], SQLITE_DELETE, p->zDb, p->pTab, iKey, -1 + ); + } +#endif + + rc = xCall(p->pCsr, iOffset+p->iOffset, n, z); + sqlite3BtreeLeaveCursor(p->pCsr); + if( rc==SQLITE_ABORT ){ + sqlite3VdbeFinalize(v); + p->pStmt = 0; + }else{ + v->rc = rc; + } + } + sqlite3Error(db, rc); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** Read data from a blob handle. +*/ +SQLITE_API int sqlite3_blob_read(sqlite3_blob *pBlob, void *z, int n, int iOffset){ + return blobReadWrite(pBlob, z, n, iOffset, sqlite3BtreePayloadChecked); +} + +/* +** Write data to a blob handle. +*/ +SQLITE_API int sqlite3_blob_write(sqlite3_blob *pBlob, const void *z, int n, int iOffset){ + return blobReadWrite(pBlob, (void *)z, n, iOffset, sqlite3BtreePutData); +} + +/* +** Query a blob handle for the size of the data. +** +** The Incrblob.nByte field is fixed for the lifetime of the Incrblob +** so no mutex is required for access. +*/ +SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *pBlob){ + Incrblob *p = (Incrblob *)pBlob; + return (p && p->pStmt) ? p->nByte : 0; +} + +/* +** Move an existing blob handle to point to a different row of the same +** database table. +** +** If an error occurs, or if the specified row does not exist or does not +** contain a blob or text value, then an error code is returned and the +** database handle error code and message set. If this happens, then all +** subsequent calls to sqlite3_blob_xxx() functions (except blob_close()) +** immediately return SQLITE_ABORT. +*/ +SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){ + int rc; + Incrblob *p = (Incrblob *)pBlob; + sqlite3 *db; + + if( p==0 ) return SQLITE_MISUSE_BKPT; + db = p->db; + sqlite3_mutex_enter(db->mutex); + + if( p->pStmt==0 ){ + /* If there is no statement handle, then the blob-handle has + ** already been invalidated. Return SQLITE_ABORT in this case. + */ + rc = SQLITE_ABORT; + }else{ + char *zErr; + rc = blobSeekToRow(p, iRow, &zErr); + if( rc!=SQLITE_OK ){ + sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr); + sqlite3DbFree(db, zErr); + } + assert( rc!=SQLITE_SCHEMA ); + } + + rc = sqlite3ApiExit(db, rc); + assert( rc==SQLITE_OK || p->pStmt==0 ); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +#endif /* #ifndef SQLITE_OMIT_INCRBLOB */ + +/************** End of vdbeblob.c ********************************************/ +/************** Begin file vdbesort.c ****************************************/ +/* +** 2011-07-09 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code for the VdbeSorter object, used in concert with +** a VdbeCursor to sort large numbers of keys for CREATE INDEX statements +** or by SELECT statements with ORDER BY clauses that cannot be satisfied +** using indexes and without LIMIT clauses. +** +** The VdbeSorter object implements a multi-threaded external merge sort +** algorithm that is efficient even if the number of elements being sorted +** exceeds the available memory. +** +** Here is the (internal, non-API) interface between this module and the +** rest of the SQLite system: +** +** sqlite3VdbeSorterInit() Create a new VdbeSorter object. +** +** sqlite3VdbeSorterWrite() Add a single new row to the VdbeSorter +** object. The row is a binary blob in the +** OP_MakeRecord format that contains both +** the ORDER BY key columns and result columns +** in the case of a SELECT w/ ORDER BY, or +** the complete record for an index entry +** in the case of a CREATE INDEX. +** +** sqlite3VdbeSorterRewind() Sort all content previously added. +** Position the read cursor on the +** first sorted element. +** +** sqlite3VdbeSorterNext() Advance the read cursor to the next sorted +** element. +** +** sqlite3VdbeSorterRowkey() Return the complete binary blob for the +** row currently under the read cursor. +** +** sqlite3VdbeSorterCompare() Compare the binary blob for the row +** currently under the read cursor against +** another binary blob X and report if +** X is strictly less than the read cursor. +** Used to enforce uniqueness in a +** CREATE UNIQUE INDEX statement. +** +** sqlite3VdbeSorterClose() Close the VdbeSorter object and reclaim +** all resources. +** +** sqlite3VdbeSorterReset() Refurbish the VdbeSorter for reuse. This +** is like Close() followed by Init() only +** much faster. +** +** The interfaces above must be called in a particular order. Write() can +** only occur in between Init()/Reset() and Rewind(). Next(), Rowkey(), and +** Compare() can only occur in between Rewind() and Close()/Reset(). i.e. +** +** Init() +** for each record: Write() +** Rewind() +** Rowkey()/Compare() +** Next() +** Close() +** +** Algorithm: +** +** Records passed to the sorter via calls to Write() are initially held +** unsorted in main memory. Assuming the amount of memory used never exceeds +** a threshold, when Rewind() is called the set of records is sorted using +** an in-memory merge sort. In this case, no temporary files are required +** and subsequent calls to Rowkey(), Next() and Compare() read records +** directly from main memory. +** +** If the amount of space used to store records in main memory exceeds the +** threshold, then the set of records currently in memory are sorted and +** written to a temporary file in "Packed Memory Array" (PMA) format. +** A PMA created at this point is known as a "level-0 PMA". Higher levels +** of PMAs may be created by merging existing PMAs together - for example +** merging two or more level-0 PMAs together creates a level-1 PMA. +** +** The threshold for the amount of main memory to use before flushing +** records to a PMA is roughly the same as the limit configured for the +** page-cache of the main database. Specifically, the threshold is set to +** the value returned by "PRAGMA main.page_size" multipled by +** that returned by "PRAGMA main.cache_size", in bytes. +** +** If the sorter is running in single-threaded mode, then all PMAs generated +** are appended to a single temporary file. Or, if the sorter is running in +** multi-threaded mode then up to (N+1) temporary files may be opened, where +** N is the configured number of worker threads. In this case, instead of +** sorting the records and writing the PMA to a temporary file itself, the +** calling thread usually launches a worker thread to do so. Except, if +** there are already N worker threads running, the main thread does the work +** itself. +** +** The sorter is running in multi-threaded mode if (a) the library was built +** with pre-processor symbol SQLITE_MAX_WORKER_THREADS set to a value greater +** than zero, and (b) worker threads have been enabled at runtime by calling +** "PRAGMA threads=N" with some value of N greater than 0. +** +** When Rewind() is called, any data remaining in memory is flushed to a +** final PMA. So at this point the data is stored in some number of sorted +** PMAs within temporary files on disk. +** +** If there are fewer than SORTER_MAX_MERGE_COUNT PMAs in total and the +** sorter is running in single-threaded mode, then these PMAs are merged +** incrementally as keys are retreived from the sorter by the VDBE. The +** MergeEngine object, described in further detail below, performs this +** merge. +** +** Or, if running in multi-threaded mode, then a background thread is +** launched to merge the existing PMAs. Once the background thread has +** merged T bytes of data into a single sorted PMA, the main thread +** begins reading keys from that PMA while the background thread proceeds +** with merging the next T bytes of data. And so on. +** +** Parameter T is set to half the value of the memory threshold used +** by Write() above to determine when to create a new PMA. +** +** If there are more than SORTER_MAX_MERGE_COUNT PMAs in total when +** Rewind() is called, then a hierarchy of incremental-merges is used. +** First, T bytes of data from the first SORTER_MAX_MERGE_COUNT PMAs on +** disk are merged together. Then T bytes of data from the second set, and +** so on, such that no operation ever merges more than SORTER_MAX_MERGE_COUNT +** PMAs at a time. This done is to improve locality. +** +** If running in multi-threaded mode and there are more than +** SORTER_MAX_MERGE_COUNT PMAs on disk when Rewind() is called, then more +** than one background thread may be created. Specifically, there may be +** one background thread for each temporary file on disk, and one background +** thread to merge the output of each of the others to a single PMA for +** the main thread to read from. +*/ +/* #include "sqliteInt.h" */ +/* #include "vdbeInt.h" */ + +/* +** If SQLITE_DEBUG_SORTER_THREADS is defined, this module outputs various +** messages to stderr that may be helpful in understanding the performance +** characteristics of the sorter in multi-threaded mode. +*/ +#if 0 +# define SQLITE_DEBUG_SORTER_THREADS 1 +#endif + +/* +** Hard-coded maximum amount of data to accumulate in memory before flushing +** to a level 0 PMA. The purpose of this limit is to prevent various integer +** overflows. 512MiB. +*/ +#define SQLITE_MAX_PMASZ (1<<29) + +/* +** Private objects used by the sorter +*/ +typedef struct MergeEngine MergeEngine; /* Merge PMAs together */ +typedef struct PmaReader PmaReader; /* Incrementally read one PMA */ +typedef struct PmaWriter PmaWriter; /* Incrementally write one PMA */ +typedef struct SorterRecord SorterRecord; /* A record being sorted */ +typedef struct SortSubtask SortSubtask; /* A sub-task in the sort process */ +typedef struct SorterFile SorterFile; /* Temporary file object wrapper */ +typedef struct SorterList SorterList; /* In-memory list of records */ +typedef struct IncrMerger IncrMerger; /* Read & merge multiple PMAs */ + +/* +** A container for a temp file handle and the current amount of data +** stored in the file. +*/ +struct SorterFile { + sqlite3_file *pFd; /* File handle */ + i64 iEof; /* Bytes of data stored in pFd */ +}; + +/* +** An in-memory list of objects to be sorted. +** +** If aMemory==0 then each object is allocated separately and the objects +** are connected using SorterRecord.u.pNext. If aMemory!=0 then all objects +** are stored in the aMemory[] bulk memory, one right after the other, and +** are connected using SorterRecord.u.iNext. +*/ +struct SorterList { + SorterRecord *pList; /* Linked list of records */ + u8 *aMemory; /* If non-NULL, bulk memory to hold pList */ + int szPMA; /* Size of pList as PMA in bytes */ +}; + +/* +** The MergeEngine object is used to combine two or more smaller PMAs into +** one big PMA using a merge operation. Separate PMAs all need to be +** combined into one big PMA in order to be able to step through the sorted +** records in order. +** +** The aReadr[] array contains a PmaReader object for each of the PMAs being +** merged. An aReadr[] object either points to a valid key or else is at EOF. +** ("EOF" means "End Of File". When aReadr[] is at EOF there is no more data.) +** For the purposes of the paragraphs below, we assume that the array is +** actually N elements in size, where N is the smallest power of 2 greater +** to or equal to the number of PMAs being merged. The extra aReadr[] elements +** are treated as if they are empty (always at EOF). +** +** The aTree[] array is also N elements in size. The value of N is stored in +** the MergeEngine.nTree variable. +** +** The final (N/2) elements of aTree[] contain the results of comparing +** pairs of PMA keys together. Element i contains the result of +** comparing aReadr[2*i-N] and aReadr[2*i-N+1]. Whichever key is smaller, the +** aTree element is set to the index of it. +** +** For the purposes of this comparison, EOF is considered greater than any +** other key value. If the keys are equal (only possible with two EOF +** values), it doesn't matter which index is stored. +** +** The (N/4) elements of aTree[] that precede the final (N/2) described +** above contains the index of the smallest of each block of 4 PmaReaders +** And so on. So that aTree[1] contains the index of the PmaReader that +** currently points to the smallest key value. aTree[0] is unused. +** +** Example: +** +** aReadr[0] -> Banana +** aReadr[1] -> Feijoa +** aReadr[2] -> Elderberry +** aReadr[3] -> Currant +** aReadr[4] -> Grapefruit +** aReadr[5] -> Apple +** aReadr[6] -> Durian +** aReadr[7] -> EOF +** +** aTree[] = { X, 5 0, 5 0, 3, 5, 6 } +** +** The current element is "Apple" (the value of the key indicated by +** PmaReader 5). When the Next() operation is invoked, PmaReader 5 will +** be advanced to the next key in its segment. Say the next key is +** "Eggplant": +** +** aReadr[5] -> Eggplant +** +** The contents of aTree[] are updated first by comparing the new PmaReader +** 5 key to the current key of PmaReader 4 (still "Grapefruit"). The PmaReader +** 5 value is still smaller, so aTree[6] is set to 5. And so on up the tree. +** The value of PmaReader 6 - "Durian" - is now smaller than that of PmaReader +** 5, so aTree[3] is set to 6. Key 0 is smaller than key 6 (Bananafile2. And instead of using a +** background thread to prepare data for the PmaReader, with a single +** threaded IncrMerger the allocate part of pTask->file2 is "refilled" with +** keys from pMerger by the calling thread whenever the PmaReader runs out +** of data. +*/ +struct IncrMerger { + SortSubtask *pTask; /* Task that owns this merger */ + MergeEngine *pMerger; /* Merge engine thread reads data from */ + i64 iStartOff; /* Offset to start writing file at */ + int mxSz; /* Maximum bytes of data to store */ + int bEof; /* Set to true when merge is finished */ + int bUseThread; /* True to use a bg thread for this object */ + SorterFile aFile[2]; /* aFile[0] for reading, [1] for writing */ +}; + +/* +** An instance of this object is used for writing a PMA. +** +** The PMA is written one record at a time. Each record is of an arbitrary +** size. But I/O is more efficient if it occurs in page-sized blocks where +** each block is aligned on a page boundary. This object caches writes to +** the PMA so that aligned, page-size blocks are written. +*/ +struct PmaWriter { + int eFWErr; /* Non-zero if in an error state */ + u8 *aBuffer; /* Pointer to write buffer */ + int nBuffer; /* Size of write buffer in bytes */ + int iBufStart; /* First byte of buffer to write */ + int iBufEnd; /* Last byte of buffer to write */ + i64 iWriteOff; /* Offset of start of buffer in file */ + sqlite3_file *pFd; /* File handle to write to */ +}; + +/* +** This object is the header on a single record while that record is being +** held in memory and prior to being written out as part of a PMA. +** +** How the linked list is connected depends on how memory is being managed +** by this module. If using a separate allocation for each in-memory record +** (VdbeSorter.list.aMemory==0), then the list is always connected using the +** SorterRecord.u.pNext pointers. +** +** Or, if using the single large allocation method (VdbeSorter.list.aMemory!=0), +** then while records are being accumulated the list is linked using the +** SorterRecord.u.iNext offset. This is because the aMemory[] array may +** be sqlite3Realloc()ed while records are being accumulated. Once the VM +** has finished passing records to the sorter, or when the in-memory buffer +** is full, the list is sorted. As part of the sorting process, it is +** converted to use the SorterRecord.u.pNext pointers. See function +** vdbeSorterSort() for details. +*/ +struct SorterRecord { + int nVal; /* Size of the record in bytes */ + union { + SorterRecord *pNext; /* Pointer to next record in list */ + int iNext; /* Offset within aMemory of next record */ + } u; + /* The data for the record immediately follows this header */ +}; + +/* Return a pointer to the buffer containing the record data for SorterRecord +** object p. Should be used as if: +** +** void *SRVAL(SorterRecord *p) { return (void*)&p[1]; } +*/ +#define SRVAL(p) ((void*)((SorterRecord*)(p) + 1)) + + +/* Maximum number of PMAs that a single MergeEngine can merge */ +#define SORTER_MAX_MERGE_COUNT 16 + +static int vdbeIncrSwap(IncrMerger*); +static void vdbeIncrFree(IncrMerger *); + +/* +** Free all memory belonging to the PmaReader object passed as the +** argument. All structure fields are set to zero before returning. +*/ +static void vdbePmaReaderClear(PmaReader *pReadr){ + sqlite3_free(pReadr->aAlloc); + sqlite3_free(pReadr->aBuffer); + if( pReadr->aMap ) sqlite3OsUnfetch(pReadr->pFd, 0, pReadr->aMap); + vdbeIncrFree(pReadr->pIncr); + memset(pReadr, 0, sizeof(PmaReader)); +} + +/* +** Read the next nByte bytes of data from the PMA p. +** If successful, set *ppOut to point to a buffer containing the data +** and return SQLITE_OK. Otherwise, if an error occurs, return an SQLite +** error code. +** +** The buffer returned in *ppOut is only valid until the +** next call to this function. +*/ +static int vdbePmaReadBlob( + PmaReader *p, /* PmaReader from which to take the blob */ + int nByte, /* Bytes of data to read */ + u8 **ppOut /* OUT: Pointer to buffer containing data */ +){ + int iBuf; /* Offset within buffer to read from */ + int nAvail; /* Bytes of data available in buffer */ + + if( p->aMap ){ + *ppOut = &p->aMap[p->iReadOff]; + p->iReadOff += nByte; + return SQLITE_OK; + } + + assert( p->aBuffer ); + + /* If there is no more data to be read from the buffer, read the next + ** p->nBuffer bytes of data from the file into it. Or, if there are less + ** than p->nBuffer bytes remaining in the PMA, read all remaining data. */ + iBuf = p->iReadOff % p->nBuffer; + if( iBuf==0 ){ + int nRead; /* Bytes to read from disk */ + int rc; /* sqlite3OsRead() return code */ + + /* Determine how many bytes of data to read. */ + if( (p->iEof - p->iReadOff) > (i64)p->nBuffer ){ + nRead = p->nBuffer; + }else{ + nRead = (int)(p->iEof - p->iReadOff); + } + assert( nRead>0 ); + + /* Readr data from the file. Return early if an error occurs. */ + rc = sqlite3OsRead(p->pFd, p->aBuffer, nRead, p->iReadOff); + assert( rc!=SQLITE_IOERR_SHORT_READ ); + if( rc!=SQLITE_OK ) return rc; + } + nAvail = p->nBuffer - iBuf; + + if( nByte<=nAvail ){ + /* The requested data is available in the in-memory buffer. In this + ** case there is no need to make a copy of the data, just return a + ** pointer into the buffer to the caller. */ + *ppOut = &p->aBuffer[iBuf]; + p->iReadOff += nByte; + }else{ + /* The requested data is not all available in the in-memory buffer. + ** In this case, allocate space at p->aAlloc[] to copy the requested + ** range into. Then return a copy of pointer p->aAlloc to the caller. */ + int nRem; /* Bytes remaining to copy */ + + /* Extend the p->aAlloc[] allocation if required. */ + if( p->nAllocnAlloc*2); + while( nByte>nNew ) nNew = nNew*2; + aNew = sqlite3Realloc(p->aAlloc, nNew); + if( !aNew ) return SQLITE_NOMEM_BKPT; + p->nAlloc = nNew; + p->aAlloc = aNew; + } + + /* Copy as much data as is available in the buffer into the start of + ** p->aAlloc[]. */ + memcpy(p->aAlloc, &p->aBuffer[iBuf], nAvail); + p->iReadOff += nAvail; + nRem = nByte - nAvail; + + /* The following loop copies up to p->nBuffer bytes per iteration into + ** the p->aAlloc[] buffer. */ + while( nRem>0 ){ + int rc; /* vdbePmaReadBlob() return code */ + int nCopy; /* Number of bytes to copy */ + u8 *aNext; /* Pointer to buffer to copy data from */ + + nCopy = nRem; + if( nRem>p->nBuffer ) nCopy = p->nBuffer; + rc = vdbePmaReadBlob(p, nCopy, &aNext); + if( rc!=SQLITE_OK ) return rc; + assert( aNext!=p->aAlloc ); + memcpy(&p->aAlloc[nByte - nRem], aNext, nCopy); + nRem -= nCopy; + } + + *ppOut = p->aAlloc; + } + + return SQLITE_OK; +} + +/* +** Read a varint from the stream of data accessed by p. Set *pnOut to +** the value read. +*/ +static int vdbePmaReadVarint(PmaReader *p, u64 *pnOut){ + int iBuf; + + if( p->aMap ){ + p->iReadOff += sqlite3GetVarint(&p->aMap[p->iReadOff], pnOut); + }else{ + iBuf = p->iReadOff % p->nBuffer; + if( iBuf && (p->nBuffer-iBuf)>=9 ){ + p->iReadOff += sqlite3GetVarint(&p->aBuffer[iBuf], pnOut); + }else{ + u8 aVarint[16], *a; + int i = 0, rc; + do{ + rc = vdbePmaReadBlob(p, 1, &a); + if( rc ) return rc; + aVarint[(i++)&0xf] = a[0]; + }while( (a[0]&0x80)!=0 ); + sqlite3GetVarint(aVarint, pnOut); + } + } + + return SQLITE_OK; +} + +/* +** Attempt to memory map file pFile. If successful, set *pp to point to the +** new mapping and return SQLITE_OK. If the mapping is not attempted +** (because the file is too large or the VFS layer is configured not to use +** mmap), return SQLITE_OK and set *pp to NULL. +** +** Or, if an error occurs, return an SQLite error code. The final value of +** *pp is undefined in this case. +*/ +static int vdbeSorterMapFile(SortSubtask *pTask, SorterFile *pFile, u8 **pp){ + int rc = SQLITE_OK; + if( pFile->iEof<=(i64)(pTask->pSorter->db->nMaxSorterMmap) ){ + sqlite3_file *pFd = pFile->pFd; + if( pFd->pMethods->iVersion>=3 ){ + rc = sqlite3OsFetch(pFd, 0, (int)pFile->iEof, (void**)pp); + testcase( rc!=SQLITE_OK ); + } + } + return rc; +} + +/* +** Attach PmaReader pReadr to file pFile (if it is not already attached to +** that file) and seek it to offset iOff within the file. Return SQLITE_OK +** if successful, or an SQLite error code if an error occurs. +*/ +static int vdbePmaReaderSeek( + SortSubtask *pTask, /* Task context */ + PmaReader *pReadr, /* Reader whose cursor is to be moved */ + SorterFile *pFile, /* Sorter file to read from */ + i64 iOff /* Offset in pFile */ +){ + int rc = SQLITE_OK; + + assert( pReadr->pIncr==0 || pReadr->pIncr->bEof==0 ); + + if( sqlite3FaultSim(201) ) return SQLITE_IOERR_READ; + if( pReadr->aMap ){ + sqlite3OsUnfetch(pReadr->pFd, 0, pReadr->aMap); + pReadr->aMap = 0; + } + pReadr->iReadOff = iOff; + pReadr->iEof = pFile->iEof; + pReadr->pFd = pFile->pFd; + + rc = vdbeSorterMapFile(pTask, pFile, &pReadr->aMap); + if( rc==SQLITE_OK && pReadr->aMap==0 ){ + int pgsz = pTask->pSorter->pgsz; + int iBuf = pReadr->iReadOff % pgsz; + if( pReadr->aBuffer==0 ){ + pReadr->aBuffer = (u8*)sqlite3Malloc(pgsz); + if( pReadr->aBuffer==0 ) rc = SQLITE_NOMEM_BKPT; + pReadr->nBuffer = pgsz; + } + if( rc==SQLITE_OK && iBuf ){ + int nRead = pgsz - iBuf; + if( (pReadr->iReadOff + nRead) > pReadr->iEof ){ + nRead = (int)(pReadr->iEof - pReadr->iReadOff); + } + rc = sqlite3OsRead( + pReadr->pFd, &pReadr->aBuffer[iBuf], nRead, pReadr->iReadOff + ); + testcase( rc!=SQLITE_OK ); + } + } + + return rc; +} + +/* +** Advance PmaReader pReadr to the next key in its PMA. Return SQLITE_OK if +** no error occurs, or an SQLite error code if one does. +*/ +static int vdbePmaReaderNext(PmaReader *pReadr){ + int rc = SQLITE_OK; /* Return Code */ + u64 nRec = 0; /* Size of record in bytes */ + + + if( pReadr->iReadOff>=pReadr->iEof ){ + IncrMerger *pIncr = pReadr->pIncr; + int bEof = 1; + if( pIncr ){ + rc = vdbeIncrSwap(pIncr); + if( rc==SQLITE_OK && pIncr->bEof==0 ){ + rc = vdbePmaReaderSeek( + pIncr->pTask, pReadr, &pIncr->aFile[0], pIncr->iStartOff + ); + bEof = 0; + } + } + + if( bEof ){ + /* This is an EOF condition */ + vdbePmaReaderClear(pReadr); + testcase( rc!=SQLITE_OK ); + return rc; + } + } + + if( rc==SQLITE_OK ){ + rc = vdbePmaReadVarint(pReadr, &nRec); + } + if( rc==SQLITE_OK ){ + pReadr->nKey = (int)nRec; + rc = vdbePmaReadBlob(pReadr, (int)nRec, &pReadr->aKey); + testcase( rc!=SQLITE_OK ); + } + + return rc; +} + +/* +** Initialize PmaReader pReadr to scan through the PMA stored in file pFile +** starting at offset iStart and ending at offset iEof-1. This function +** leaves the PmaReader pointing to the first key in the PMA (or EOF if the +** PMA is empty). +** +** If the pnByte parameter is NULL, then it is assumed that the file +** contains a single PMA, and that that PMA omits the initial length varint. +*/ +static int vdbePmaReaderInit( + SortSubtask *pTask, /* Task context */ + SorterFile *pFile, /* Sorter file to read from */ + i64 iStart, /* Start offset in pFile */ + PmaReader *pReadr, /* PmaReader to populate */ + i64 *pnByte /* IN/OUT: Increment this value by PMA size */ +){ + int rc; + + assert( pFile->iEof>iStart ); + assert( pReadr->aAlloc==0 && pReadr->nAlloc==0 ); + assert( pReadr->aBuffer==0 ); + assert( pReadr->aMap==0 ); + + rc = vdbePmaReaderSeek(pTask, pReadr, pFile, iStart); + if( rc==SQLITE_OK ){ + u64 nByte = 0; /* Size of PMA in bytes */ + rc = vdbePmaReadVarint(pReadr, &nByte); + pReadr->iEof = pReadr->iReadOff + nByte; + *pnByte += nByte; + } + + if( rc==SQLITE_OK ){ + rc = vdbePmaReaderNext(pReadr); + } + return rc; +} + +/* +** A version of vdbeSorterCompare() that assumes that it has already been +** determined that the first field of key1 is equal to the first field of +** key2. +*/ +static int vdbeSorterCompareTail( + SortSubtask *pTask, /* Subtask context (for pKeyInfo) */ + int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */ + const void *pKey1, int nKey1, /* Left side of comparison */ + const void *pKey2, int nKey2 /* Right side of comparison */ +){ + UnpackedRecord *r2 = pTask->pUnpacked; + if( *pbKey2Cached==0 ){ + sqlite3VdbeRecordUnpack(pTask->pSorter->pKeyInfo, nKey2, pKey2, r2); + *pbKey2Cached = 1; + } + return sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, r2, 1); +} + +/* +** Compare key1 (buffer pKey1, size nKey1 bytes) with key2 (buffer pKey2, +** size nKey2 bytes). Use (pTask->pKeyInfo) for the collation sequences +** used by the comparison. Return the result of the comparison. +** +** If IN/OUT parameter *pbKey2Cached is true when this function is called, +** it is assumed that (pTask->pUnpacked) contains the unpacked version +** of key2. If it is false, (pTask->pUnpacked) is populated with the unpacked +** version of key2 and *pbKey2Cached set to true before returning. +** +** If an OOM error is encountered, (pTask->pUnpacked->error_rc) is set +** to SQLITE_NOMEM. +*/ +static int vdbeSorterCompare( + SortSubtask *pTask, /* Subtask context (for pKeyInfo) */ + int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */ + const void *pKey1, int nKey1, /* Left side of comparison */ + const void *pKey2, int nKey2 /* Right side of comparison */ +){ + UnpackedRecord *r2 = pTask->pUnpacked; + if( !*pbKey2Cached ){ + sqlite3VdbeRecordUnpack(pTask->pSorter->pKeyInfo, nKey2, pKey2, r2); + *pbKey2Cached = 1; + } + return sqlite3VdbeRecordCompare(nKey1, pKey1, r2); +} + +/* +** A specially optimized version of vdbeSorterCompare() that assumes that +** the first field of each key is a TEXT value and that the collation +** sequence to compare them with is BINARY. +*/ +static int vdbeSorterCompareText( + SortSubtask *pTask, /* Subtask context (for pKeyInfo) */ + int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */ + const void *pKey1, int nKey1, /* Left side of comparison */ + const void *pKey2, int nKey2 /* Right side of comparison */ +){ + const u8 * const p1 = (const u8 * const)pKey1; + const u8 * const p2 = (const u8 * const)pKey2; + const u8 * const v1 = &p1[ p1[0] ]; /* Pointer to value 1 */ + const u8 * const v2 = &p2[ p2[0] ]; /* Pointer to value 2 */ + + int n1; + int n2; + int res; + + getVarint32(&p1[1], n1); n1 = (n1 - 13) / 2; + getVarint32(&p2[1], n2); n2 = (n2 - 13) / 2; + res = memcmp(v1, v2, MIN(n1, n2)); + if( res==0 ){ + res = n1 - n2; + } + + if( res==0 ){ + if( pTask->pSorter->pKeyInfo->nField>1 ){ + res = vdbeSorterCompareTail( + pTask, pbKey2Cached, pKey1, nKey1, pKey2, nKey2 + ); + } + }else{ + if( pTask->pSorter->pKeyInfo->aSortOrder[0] ){ + res = res * -1; + } + } + + return res; +} + +/* +** A specially optimized version of vdbeSorterCompare() that assumes that +** the first field of each key is an INTEGER value. +*/ +static int vdbeSorterCompareInt( + SortSubtask *pTask, /* Subtask context (for pKeyInfo) */ + int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */ + const void *pKey1, int nKey1, /* Left side of comparison */ + const void *pKey2, int nKey2 /* Right side of comparison */ +){ + const u8 * const p1 = (const u8 * const)pKey1; + const u8 * const p2 = (const u8 * const)pKey2; + const int s1 = p1[1]; /* Left hand serial type */ + const int s2 = p2[1]; /* Right hand serial type */ + const u8 * const v1 = &p1[ p1[0] ]; /* Pointer to value 1 */ + const u8 * const v2 = &p2[ p2[0] ]; /* Pointer to value 2 */ + int res; /* Return value */ + + assert( (s1>0 && s1<7) || s1==8 || s1==9 ); + assert( (s2>0 && s2<7) || s2==8 || s2==9 ); + + if( s1>7 && s2>7 ){ + res = s1 - s2; + }else{ + if( s1==s2 ){ + if( (*v1 ^ *v2) & 0x80 ){ + /* The two values have different signs */ + res = (*v1 & 0x80) ? -1 : +1; + }else{ + /* The two values have the same sign. Compare using memcmp(). */ + static const u8 aLen[] = {0, 1, 2, 3, 4, 6, 8 }; + int i; + res = 0; + for(i=0; i7 ){ + res = +1; + }else if( s1>7 ){ + res = -1; + }else{ + res = s1 - s2; + } + assert( res!=0 ); + + if( res>0 ){ + if( *v1 & 0x80 ) res = -1; + }else{ + if( *v2 & 0x80 ) res = +1; + } + } + } + + if( res==0 ){ + if( pTask->pSorter->pKeyInfo->nField>1 ){ + res = vdbeSorterCompareTail( + pTask, pbKey2Cached, pKey1, nKey1, pKey2, nKey2 + ); + } + }else if( pTask->pSorter->pKeyInfo->aSortOrder[0] ){ + res = res * -1; + } + + return res; +} + +/* +** Initialize the temporary index cursor just opened as a sorter cursor. +** +** Usually, the sorter module uses the value of (pCsr->pKeyInfo->nField) +** to determine the number of fields that should be compared from the +** records being sorted. However, if the value passed as argument nField +** is non-zero and the sorter is able to guarantee a stable sort, nField +** is used instead. This is used when sorting records for a CREATE INDEX +** statement. In this case, keys are always delivered to the sorter in +** order of the primary key, which happens to be make up the final part +** of the records being sorted. So if the sort is stable, there is never +** any reason to compare PK fields and they can be ignored for a small +** performance boost. +** +** The sorter can guarantee a stable sort when running in single-threaded +** mode, but not in multi-threaded mode. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +SQLITE_PRIVATE int sqlite3VdbeSorterInit( + sqlite3 *db, /* Database connection (for malloc()) */ + int nField, /* Number of key fields in each record */ + VdbeCursor *pCsr /* Cursor that holds the new sorter */ +){ + int pgsz; /* Page size of main database */ + int i; /* Used to iterate through aTask[] */ + VdbeSorter *pSorter; /* The new sorter */ + KeyInfo *pKeyInfo; /* Copy of pCsr->pKeyInfo with db==0 */ + int szKeyInfo; /* Size of pCsr->pKeyInfo in bytes */ + int sz; /* Size of pSorter in bytes */ + int rc = SQLITE_OK; +#if SQLITE_MAX_WORKER_THREADS==0 +# define nWorker 0 +#else + int nWorker; +#endif + + /* Initialize the upper limit on the number of worker threads */ +#if SQLITE_MAX_WORKER_THREADS>0 + if( sqlite3TempInMemory(db) || sqlite3GlobalConfig.bCoreMutex==0 ){ + nWorker = 0; + }else{ + nWorker = db->aLimit[SQLITE_LIMIT_WORKER_THREADS]; + } +#endif + + /* Do not allow the total number of threads (main thread + all workers) + ** to exceed the maximum merge count */ +#if SQLITE_MAX_WORKER_THREADS>=SORTER_MAX_MERGE_COUNT + if( nWorker>=SORTER_MAX_MERGE_COUNT ){ + nWorker = SORTER_MAX_MERGE_COUNT-1; + } +#endif + + assert( pCsr->pKeyInfo && pCsr->pBtx==0 ); + assert( pCsr->eCurType==CURTYPE_SORTER ); + szKeyInfo = sizeof(KeyInfo) + (pCsr->pKeyInfo->nField-1)*sizeof(CollSeq*); + sz = sizeof(VdbeSorter) + nWorker * sizeof(SortSubtask); + + pSorter = (VdbeSorter*)sqlite3DbMallocZero(db, sz + szKeyInfo); + pCsr->uc.pSorter = pSorter; + if( pSorter==0 ){ + rc = SQLITE_NOMEM_BKPT; + }else{ + pSorter->pKeyInfo = pKeyInfo = (KeyInfo*)((u8*)pSorter + sz); + memcpy(pKeyInfo, pCsr->pKeyInfo, szKeyInfo); + pKeyInfo->db = 0; + if( nField && nWorker==0 ){ + pKeyInfo->nXField += (pKeyInfo->nField - nField); + pKeyInfo->nField = nField; + } + pSorter->pgsz = pgsz = sqlite3BtreeGetPageSize(db->aDb[0].pBt); + pSorter->nTask = nWorker + 1; + pSorter->iPrev = (u8)(nWorker - 1); + pSorter->bUseThreads = (pSorter->nTask>1); + pSorter->db = db; + for(i=0; inTask; i++){ + SortSubtask *pTask = &pSorter->aTask[i]; + pTask->pSorter = pSorter; + } + + if( !sqlite3TempInMemory(db) ){ + i64 mxCache; /* Cache size in bytes*/ + u32 szPma = sqlite3GlobalConfig.szPma; + pSorter->mnPmaSize = szPma * pgsz; + + mxCache = db->aDb[0].pSchema->cache_size; + if( mxCache<0 ){ + /* A negative cache-size value C indicates that the cache is abs(C) + ** KiB in size. */ + mxCache = mxCache * -1024; + }else{ + mxCache = mxCache * pgsz; + } + mxCache = MIN(mxCache, SQLITE_MAX_PMASZ); + pSorter->mxPmaSize = MAX(pSorter->mnPmaSize, (int)mxCache); + + /* EVIDENCE-OF: R-26747-61719 When the application provides any amount of + ** scratch memory using SQLITE_CONFIG_SCRATCH, SQLite avoids unnecessary + ** large heap allocations. + */ + if( sqlite3GlobalConfig.pScratch==0 ){ + assert( pSorter->iMemory==0 ); + pSorter->nMemory = pgsz; + pSorter->list.aMemory = (u8*)sqlite3Malloc(pgsz); + if( !pSorter->list.aMemory ) rc = SQLITE_NOMEM_BKPT; + } + } + + if( (pKeyInfo->nField+pKeyInfo->nXField)<13 + && (pKeyInfo->aColl[0]==0 || pKeyInfo->aColl[0]==db->pDfltColl) + ){ + pSorter->typeMask = SORTER_TYPE_INTEGER | SORTER_TYPE_TEXT; + } + } + + return rc; +} +#undef nWorker /* Defined at the top of this function */ + +/* +** Free the list of sorted records starting at pRecord. +*/ +static void vdbeSorterRecordFree(sqlite3 *db, SorterRecord *pRecord){ + SorterRecord *p; + SorterRecord *pNext; + for(p=pRecord; p; p=pNext){ + pNext = p->u.pNext; + sqlite3DbFree(db, p); + } +} + +/* +** Free all resources owned by the object indicated by argument pTask. All +** fields of *pTask are zeroed before returning. +*/ +static void vdbeSortSubtaskCleanup(sqlite3 *db, SortSubtask *pTask){ + sqlite3DbFree(db, pTask->pUnpacked); +#if SQLITE_MAX_WORKER_THREADS>0 + /* pTask->list.aMemory can only be non-zero if it was handed memory + ** from the main thread. That only occurs SQLITE_MAX_WORKER_THREADS>0 */ + if( pTask->list.aMemory ){ + sqlite3_free(pTask->list.aMemory); + }else +#endif + { + assert( pTask->list.aMemory==0 ); + vdbeSorterRecordFree(0, pTask->list.pList); + } + if( pTask->file.pFd ){ + sqlite3OsCloseFree(pTask->file.pFd); + } + if( pTask->file2.pFd ){ + sqlite3OsCloseFree(pTask->file2.pFd); + } + memset(pTask, 0, sizeof(SortSubtask)); +} + +#ifdef SQLITE_DEBUG_SORTER_THREADS +static void vdbeSorterWorkDebug(SortSubtask *pTask, const char *zEvent){ + i64 t; + int iTask = (pTask - pTask->pSorter->aTask); + sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t); + fprintf(stderr, "%lld:%d %s\n", t, iTask, zEvent); +} +static void vdbeSorterRewindDebug(const char *zEvent){ + i64 t; + sqlite3OsCurrentTimeInt64(sqlite3_vfs_find(0), &t); + fprintf(stderr, "%lld:X %s\n", t, zEvent); +} +static void vdbeSorterPopulateDebug( + SortSubtask *pTask, + const char *zEvent +){ + i64 t; + int iTask = (pTask - pTask->pSorter->aTask); + sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t); + fprintf(stderr, "%lld:bg%d %s\n", t, iTask, zEvent); +} +static void vdbeSorterBlockDebug( + SortSubtask *pTask, + int bBlocked, + const char *zEvent +){ + if( bBlocked ){ + i64 t; + sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t); + fprintf(stderr, "%lld:main %s\n", t, zEvent); + } +} +#else +# define vdbeSorterWorkDebug(x,y) +# define vdbeSorterRewindDebug(y) +# define vdbeSorterPopulateDebug(x,y) +# define vdbeSorterBlockDebug(x,y,z) +#endif + +#if SQLITE_MAX_WORKER_THREADS>0 +/* +** Join thread pTask->thread. +*/ +static int vdbeSorterJoinThread(SortSubtask *pTask){ + int rc = SQLITE_OK; + if( pTask->pThread ){ +#ifdef SQLITE_DEBUG_SORTER_THREADS + int bDone = pTask->bDone; +#endif + void *pRet = SQLITE_INT_TO_PTR(SQLITE_ERROR); + vdbeSorterBlockDebug(pTask, !bDone, "enter"); + (void)sqlite3ThreadJoin(pTask->pThread, &pRet); + vdbeSorterBlockDebug(pTask, !bDone, "exit"); + rc = SQLITE_PTR_TO_INT(pRet); + assert( pTask->bDone==1 ); + pTask->bDone = 0; + pTask->pThread = 0; + } + return rc; +} + +/* +** Launch a background thread to run xTask(pIn). +*/ +static int vdbeSorterCreateThread( + SortSubtask *pTask, /* Thread will use this task object */ + void *(*xTask)(void*), /* Routine to run in a separate thread */ + void *pIn /* Argument passed into xTask() */ +){ + assert( pTask->pThread==0 && pTask->bDone==0 ); + return sqlite3ThreadCreate(&pTask->pThread, xTask, pIn); +} + +/* +** Join all outstanding threads launched by SorterWrite() to create +** level-0 PMAs. +*/ +static int vdbeSorterJoinAll(VdbeSorter *pSorter, int rcin){ + int rc = rcin; + int i; + + /* This function is always called by the main user thread. + ** + ** If this function is being called after SorterRewind() has been called, + ** it is possible that thread pSorter->aTask[pSorter->nTask-1].pThread + ** is currently attempt to join one of the other threads. To avoid a race + ** condition where this thread also attempts to join the same object, join + ** thread pSorter->aTask[pSorter->nTask-1].pThread first. */ + for(i=pSorter->nTask-1; i>=0; i--){ + SortSubtask *pTask = &pSorter->aTask[i]; + int rc2 = vdbeSorterJoinThread(pTask); + if( rc==SQLITE_OK ) rc = rc2; + } + return rc; +} +#else +# define vdbeSorterJoinAll(x,rcin) (rcin) +# define vdbeSorterJoinThread(pTask) SQLITE_OK +#endif + +/* +** Allocate a new MergeEngine object capable of handling up to +** nReader PmaReader inputs. +** +** nReader is automatically rounded up to the next power of two. +** nReader may not exceed SORTER_MAX_MERGE_COUNT even after rounding up. +*/ +static MergeEngine *vdbeMergeEngineNew(int nReader){ + int N = 2; /* Smallest power of two >= nReader */ + int nByte; /* Total bytes of space to allocate */ + MergeEngine *pNew; /* Pointer to allocated object to return */ + + assert( nReader<=SORTER_MAX_MERGE_COUNT ); + + while( NnTree = N; + pNew->pTask = 0; + pNew->aReadr = (PmaReader*)&pNew[1]; + pNew->aTree = (int*)&pNew->aReadr[N]; + } + return pNew; +} + +/* +** Free the MergeEngine object passed as the only argument. +*/ +static void vdbeMergeEngineFree(MergeEngine *pMerger){ + int i; + if( pMerger ){ + for(i=0; inTree; i++){ + vdbePmaReaderClear(&pMerger->aReadr[i]); + } + } + sqlite3_free(pMerger); +} + +/* +** Free all resources associated with the IncrMerger object indicated by +** the first argument. +*/ +static void vdbeIncrFree(IncrMerger *pIncr){ + if( pIncr ){ +#if SQLITE_MAX_WORKER_THREADS>0 + if( pIncr->bUseThread ){ + vdbeSorterJoinThread(pIncr->pTask); + if( pIncr->aFile[0].pFd ) sqlite3OsCloseFree(pIncr->aFile[0].pFd); + if( pIncr->aFile[1].pFd ) sqlite3OsCloseFree(pIncr->aFile[1].pFd); + } +#endif + vdbeMergeEngineFree(pIncr->pMerger); + sqlite3_free(pIncr); + } +} + +/* +** Reset a sorting cursor back to its original empty state. +*/ +SQLITE_PRIVATE void sqlite3VdbeSorterReset(sqlite3 *db, VdbeSorter *pSorter){ + int i; + (void)vdbeSorterJoinAll(pSorter, SQLITE_OK); + assert( pSorter->bUseThreads || pSorter->pReader==0 ); +#if SQLITE_MAX_WORKER_THREADS>0 + if( pSorter->pReader ){ + vdbePmaReaderClear(pSorter->pReader); + sqlite3DbFree(db, pSorter->pReader); + pSorter->pReader = 0; + } +#endif + vdbeMergeEngineFree(pSorter->pMerger); + pSorter->pMerger = 0; + for(i=0; inTask; i++){ + SortSubtask *pTask = &pSorter->aTask[i]; + vdbeSortSubtaskCleanup(db, pTask); + pTask->pSorter = pSorter; + } + if( pSorter->list.aMemory==0 ){ + vdbeSorterRecordFree(0, pSorter->list.pList); + } + pSorter->list.pList = 0; + pSorter->list.szPMA = 0; + pSorter->bUsePMA = 0; + pSorter->iMemory = 0; + pSorter->mxKeysize = 0; + sqlite3DbFree(db, pSorter->pUnpacked); + pSorter->pUnpacked = 0; +} + +/* +** Free any cursor components allocated by sqlite3VdbeSorterXXX routines. +*/ +SQLITE_PRIVATE void sqlite3VdbeSorterClose(sqlite3 *db, VdbeCursor *pCsr){ + VdbeSorter *pSorter; + assert( pCsr->eCurType==CURTYPE_SORTER ); + pSorter = pCsr->uc.pSorter; + if( pSorter ){ + sqlite3VdbeSorterReset(db, pSorter); + sqlite3_free(pSorter->list.aMemory); + sqlite3DbFree(db, pSorter); + pCsr->uc.pSorter = 0; + } +} + +#if SQLITE_MAX_MMAP_SIZE>0 +/* +** The first argument is a file-handle open on a temporary file. The file +** is guaranteed to be nByte bytes or smaller in size. This function +** attempts to extend the file to nByte bytes in size and to ensure that +** the VFS has memory mapped it. +** +** Whether or not the file does end up memory mapped of course depends on +** the specific VFS implementation. +*/ +static void vdbeSorterExtendFile(sqlite3 *db, sqlite3_file *pFd, i64 nByte){ + if( nByte<=(i64)(db->nMaxSorterMmap) && pFd->pMethods->iVersion>=3 ){ + void *p = 0; + int chunksize = 4*1024; + sqlite3OsFileControlHint(pFd, SQLITE_FCNTL_CHUNK_SIZE, &chunksize); + sqlite3OsFileControlHint(pFd, SQLITE_FCNTL_SIZE_HINT, &nByte); + sqlite3OsFetch(pFd, 0, (int)nByte, &p); + sqlite3OsUnfetch(pFd, 0, p); + } +} +#else +# define vdbeSorterExtendFile(x,y,z) +#endif + +/* +** Allocate space for a file-handle and open a temporary file. If successful, +** set *ppFd to point to the malloc'd file-handle and return SQLITE_OK. +** Otherwise, set *ppFd to 0 and return an SQLite error code. +*/ +static int vdbeSorterOpenTempFile( + sqlite3 *db, /* Database handle doing sort */ + i64 nExtend, /* Attempt to extend file to this size */ + sqlite3_file **ppFd +){ + int rc; + if( sqlite3FaultSim(202) ) return SQLITE_IOERR_ACCESS; + rc = sqlite3OsOpenMalloc(db->pVfs, 0, ppFd, + SQLITE_OPEN_TEMP_JOURNAL | + SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | + SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE, &rc + ); + if( rc==SQLITE_OK ){ + i64 max = SQLITE_MAX_MMAP_SIZE; + sqlite3OsFileControlHint(*ppFd, SQLITE_FCNTL_MMAP_SIZE, (void*)&max); + if( nExtend>0 ){ + vdbeSorterExtendFile(db, *ppFd, nExtend); + } + } + return rc; +} + +/* +** If it has not already been allocated, allocate the UnpackedRecord +** structure at pTask->pUnpacked. Return SQLITE_OK if successful (or +** if no allocation was required), or SQLITE_NOMEM otherwise. +*/ +static int vdbeSortAllocUnpacked(SortSubtask *pTask){ + if( pTask->pUnpacked==0 ){ + pTask->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pTask->pSorter->pKeyInfo); + if( pTask->pUnpacked==0 ) return SQLITE_NOMEM_BKPT; + pTask->pUnpacked->nField = pTask->pSorter->pKeyInfo->nField; + pTask->pUnpacked->errCode = 0; + } + return SQLITE_OK; +} + + +/* +** Merge the two sorted lists p1 and p2 into a single list. +*/ +static SorterRecord *vdbeSorterMerge( + SortSubtask *pTask, /* Calling thread context */ + SorterRecord *p1, /* First list to merge */ + SorterRecord *p2 /* Second list to merge */ +){ + SorterRecord *pFinal = 0; + SorterRecord **pp = &pFinal; + int bCached = 0; + + assert( p1!=0 && p2!=0 ); + for(;;){ + int res; + res = pTask->xCompare( + pTask, &bCached, SRVAL(p1), p1->nVal, SRVAL(p2), p2->nVal + ); + + if( res<=0 ){ + *pp = p1; + pp = &p1->u.pNext; + p1 = p1->u.pNext; + if( p1==0 ){ + *pp = p2; + break; + } + }else{ + *pp = p2; + pp = &p2->u.pNext; + p2 = p2->u.pNext; + bCached = 0; + if( p2==0 ){ + *pp = p1; + break; + } + } + } + return pFinal; +} + +/* +** Return the SorterCompare function to compare values collected by the +** sorter object passed as the only argument. +*/ +static SorterCompare vdbeSorterGetCompare(VdbeSorter *p){ + if( p->typeMask==SORTER_TYPE_INTEGER ){ + return vdbeSorterCompareInt; + }else if( p->typeMask==SORTER_TYPE_TEXT ){ + return vdbeSorterCompareText; + } + return vdbeSorterCompare; +} + +/* +** Sort the linked list of records headed at pTask->pList. Return +** SQLITE_OK if successful, or an SQLite error code (i.e. SQLITE_NOMEM) if +** an error occurs. +*/ +static int vdbeSorterSort(SortSubtask *pTask, SorterList *pList){ + int i; + SorterRecord **aSlot; + SorterRecord *p; + int rc; + + rc = vdbeSortAllocUnpacked(pTask); + if( rc!=SQLITE_OK ) return rc; + + p = pList->pList; + pTask->xCompare = vdbeSorterGetCompare(pTask->pSorter); + + aSlot = (SorterRecord **)sqlite3MallocZero(64 * sizeof(SorterRecord *)); + if( !aSlot ){ + return SQLITE_NOMEM_BKPT; + } + + while( p ){ + SorterRecord *pNext; + if( pList->aMemory ){ + if( (u8*)p==pList->aMemory ){ + pNext = 0; + }else{ + assert( p->u.iNextaMemory) ); + pNext = (SorterRecord*)&pList->aMemory[p->u.iNext]; + } + }else{ + pNext = p->u.pNext; + } + + p->u.pNext = 0; + for(i=0; aSlot[i]; i++){ + p = vdbeSorterMerge(pTask, p, aSlot[i]); + aSlot[i] = 0; + } + aSlot[i] = p; + p = pNext; + } + + p = 0; + for(i=0; i<64; i++){ + if( aSlot[i]==0 ) continue; + p = p ? vdbeSorterMerge(pTask, p, aSlot[i]) : aSlot[i]; + } + pList->pList = p; + + sqlite3_free(aSlot); + assert( pTask->pUnpacked->errCode==SQLITE_OK + || pTask->pUnpacked->errCode==SQLITE_NOMEM + ); + return pTask->pUnpacked->errCode; +} + +/* +** Initialize a PMA-writer object. +*/ +static void vdbePmaWriterInit( + sqlite3_file *pFd, /* File handle to write to */ + PmaWriter *p, /* Object to populate */ + int nBuf, /* Buffer size */ + i64 iStart /* Offset of pFd to begin writing at */ +){ + memset(p, 0, sizeof(PmaWriter)); + p->aBuffer = (u8*)sqlite3Malloc(nBuf); + if( !p->aBuffer ){ + p->eFWErr = SQLITE_NOMEM_BKPT; + }else{ + p->iBufEnd = p->iBufStart = (iStart % nBuf); + p->iWriteOff = iStart - p->iBufStart; + p->nBuffer = nBuf; + p->pFd = pFd; + } +} + +/* +** Write nData bytes of data to the PMA. Return SQLITE_OK +** if successful, or an SQLite error code if an error occurs. +*/ +static void vdbePmaWriteBlob(PmaWriter *p, u8 *pData, int nData){ + int nRem = nData; + while( nRem>0 && p->eFWErr==0 ){ + int nCopy = nRem; + if( nCopy>(p->nBuffer - p->iBufEnd) ){ + nCopy = p->nBuffer - p->iBufEnd; + } + + memcpy(&p->aBuffer[p->iBufEnd], &pData[nData-nRem], nCopy); + p->iBufEnd += nCopy; + if( p->iBufEnd==p->nBuffer ){ + p->eFWErr = sqlite3OsWrite(p->pFd, + &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart, + p->iWriteOff + p->iBufStart + ); + p->iBufStart = p->iBufEnd = 0; + p->iWriteOff += p->nBuffer; + } + assert( p->iBufEndnBuffer ); + + nRem -= nCopy; + } +} + +/* +** Flush any buffered data to disk and clean up the PMA-writer object. +** The results of using the PMA-writer after this call are undefined. +** Return SQLITE_OK if flushing the buffered data succeeds or is not +** required. Otherwise, return an SQLite error code. +** +** Before returning, set *piEof to the offset immediately following the +** last byte written to the file. +*/ +static int vdbePmaWriterFinish(PmaWriter *p, i64 *piEof){ + int rc; + if( p->eFWErr==0 && ALWAYS(p->aBuffer) && p->iBufEnd>p->iBufStart ){ + p->eFWErr = sqlite3OsWrite(p->pFd, + &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart, + p->iWriteOff + p->iBufStart + ); + } + *piEof = (p->iWriteOff + p->iBufEnd); + sqlite3_free(p->aBuffer); + rc = p->eFWErr; + memset(p, 0, sizeof(PmaWriter)); + return rc; +} + +/* +** Write value iVal encoded as a varint to the PMA. Return +** SQLITE_OK if successful, or an SQLite error code if an error occurs. +*/ +static void vdbePmaWriteVarint(PmaWriter *p, u64 iVal){ + int nByte; + u8 aByte[10]; + nByte = sqlite3PutVarint(aByte, iVal); + vdbePmaWriteBlob(p, aByte, nByte); +} + +/* +** Write the current contents of in-memory linked-list pList to a level-0 +** PMA in the temp file belonging to sub-task pTask. Return SQLITE_OK if +** successful, or an SQLite error code otherwise. +** +** The format of a PMA is: +** +** * A varint. This varint contains the total number of bytes of content +** in the PMA (not including the varint itself). +** +** * One or more records packed end-to-end in order of ascending keys. +** Each record consists of a varint followed by a blob of data (the +** key). The varint is the number of bytes in the blob of data. +*/ +static int vdbeSorterListToPMA(SortSubtask *pTask, SorterList *pList){ + sqlite3 *db = pTask->pSorter->db; + int rc = SQLITE_OK; /* Return code */ + PmaWriter writer; /* Object used to write to the file */ + +#ifdef SQLITE_DEBUG + /* Set iSz to the expected size of file pTask->file after writing the PMA. + ** This is used by an assert() statement at the end of this function. */ + i64 iSz = pList->szPMA + sqlite3VarintLen(pList->szPMA) + pTask->file.iEof; +#endif + + vdbeSorterWorkDebug(pTask, "enter"); + memset(&writer, 0, sizeof(PmaWriter)); + assert( pList->szPMA>0 ); + + /* If the first temporary PMA file has not been opened, open it now. */ + if( pTask->file.pFd==0 ){ + rc = vdbeSorterOpenTempFile(db, 0, &pTask->file.pFd); + assert( rc!=SQLITE_OK || pTask->file.pFd ); + assert( pTask->file.iEof==0 ); + assert( pTask->nPMA==0 ); + } + + /* Try to get the file to memory map */ + if( rc==SQLITE_OK ){ + vdbeSorterExtendFile(db, pTask->file.pFd, pTask->file.iEof+pList->szPMA+9); + } + + /* Sort the list */ + if( rc==SQLITE_OK ){ + rc = vdbeSorterSort(pTask, pList); + } + + if( rc==SQLITE_OK ){ + SorterRecord *p; + SorterRecord *pNext = 0; + + vdbePmaWriterInit(pTask->file.pFd, &writer, pTask->pSorter->pgsz, + pTask->file.iEof); + pTask->nPMA++; + vdbePmaWriteVarint(&writer, pList->szPMA); + for(p=pList->pList; p; p=pNext){ + pNext = p->u.pNext; + vdbePmaWriteVarint(&writer, p->nVal); + vdbePmaWriteBlob(&writer, SRVAL(p), p->nVal); + if( pList->aMemory==0 ) sqlite3_free(p); + } + pList->pList = p; + rc = vdbePmaWriterFinish(&writer, &pTask->file.iEof); + } + + vdbeSorterWorkDebug(pTask, "exit"); + assert( rc!=SQLITE_OK || pList->pList==0 ); + assert( rc!=SQLITE_OK || pTask->file.iEof==iSz ); + return rc; +} + +/* +** Advance the MergeEngine to its next entry. +** Set *pbEof to true there is no next entry because +** the MergeEngine has reached the end of all its inputs. +** +** Return SQLITE_OK if successful or an error code if an error occurs. +*/ +static int vdbeMergeEngineStep( + MergeEngine *pMerger, /* The merge engine to advance to the next row */ + int *pbEof /* Set TRUE at EOF. Set false for more content */ +){ + int rc; + int iPrev = pMerger->aTree[1];/* Index of PmaReader to advance */ + SortSubtask *pTask = pMerger->pTask; + + /* Advance the current PmaReader */ + rc = vdbePmaReaderNext(&pMerger->aReadr[iPrev]); + + /* Update contents of aTree[] */ + if( rc==SQLITE_OK ){ + int i; /* Index of aTree[] to recalculate */ + PmaReader *pReadr1; /* First PmaReader to compare */ + PmaReader *pReadr2; /* Second PmaReader to compare */ + int bCached = 0; + + /* Find the first two PmaReaders to compare. The one that was just + ** advanced (iPrev) and the one next to it in the array. */ + pReadr1 = &pMerger->aReadr[(iPrev & 0xFFFE)]; + pReadr2 = &pMerger->aReadr[(iPrev | 0x0001)]; + + for(i=(pMerger->nTree+iPrev)/2; i>0; i=i/2){ + /* Compare pReadr1 and pReadr2. Store the result in variable iRes. */ + int iRes; + if( pReadr1->pFd==0 ){ + iRes = +1; + }else if( pReadr2->pFd==0 ){ + iRes = -1; + }else{ + iRes = pTask->xCompare(pTask, &bCached, + pReadr1->aKey, pReadr1->nKey, pReadr2->aKey, pReadr2->nKey + ); + } + + /* If pReadr1 contained the smaller value, set aTree[i] to its index. + ** Then set pReadr2 to the next PmaReader to compare to pReadr1. In this + ** case there is no cache of pReadr2 in pTask->pUnpacked, so set + ** pKey2 to point to the record belonging to pReadr2. + ** + ** Alternatively, if pReadr2 contains the smaller of the two values, + ** set aTree[i] to its index and update pReadr1. If vdbeSorterCompare() + ** was actually called above, then pTask->pUnpacked now contains + ** a value equivalent to pReadr2. So set pKey2 to NULL to prevent + ** vdbeSorterCompare() from decoding pReadr2 again. + ** + ** If the two values were equal, then the value from the oldest + ** PMA should be considered smaller. The VdbeSorter.aReadr[] array + ** is sorted from oldest to newest, so pReadr1 contains older values + ** than pReadr2 iff (pReadr1aTree[i] = (int)(pReadr1 - pMerger->aReadr); + pReadr2 = &pMerger->aReadr[ pMerger->aTree[i ^ 0x0001] ]; + bCached = 0; + }else{ + if( pReadr1->pFd ) bCached = 0; + pMerger->aTree[i] = (int)(pReadr2 - pMerger->aReadr); + pReadr1 = &pMerger->aReadr[ pMerger->aTree[i ^ 0x0001] ]; + } + } + *pbEof = (pMerger->aReadr[pMerger->aTree[1]].pFd==0); + } + + return (rc==SQLITE_OK ? pTask->pUnpacked->errCode : rc); +} + +#if SQLITE_MAX_WORKER_THREADS>0 +/* +** The main routine for background threads that write level-0 PMAs. +*/ +static void *vdbeSorterFlushThread(void *pCtx){ + SortSubtask *pTask = (SortSubtask*)pCtx; + int rc; /* Return code */ + assert( pTask->bDone==0 ); + rc = vdbeSorterListToPMA(pTask, &pTask->list); + pTask->bDone = 1; + return SQLITE_INT_TO_PTR(rc); +} +#endif /* SQLITE_MAX_WORKER_THREADS>0 */ + +/* +** Flush the current contents of VdbeSorter.list to a new PMA, possibly +** using a background thread. +*/ +static int vdbeSorterFlushPMA(VdbeSorter *pSorter){ +#if SQLITE_MAX_WORKER_THREADS==0 + pSorter->bUsePMA = 1; + return vdbeSorterListToPMA(&pSorter->aTask[0], &pSorter->list); +#else + int rc = SQLITE_OK; + int i; + SortSubtask *pTask = 0; /* Thread context used to create new PMA */ + int nWorker = (pSorter->nTask-1); + + /* Set the flag to indicate that at least one PMA has been written. + ** Or will be, anyhow. */ + pSorter->bUsePMA = 1; + + /* Select a sub-task to sort and flush the current list of in-memory + ** records to disk. If the sorter is running in multi-threaded mode, + ** round-robin between the first (pSorter->nTask-1) tasks. Except, if + ** the background thread from a sub-tasks previous turn is still running, + ** skip it. If the first (pSorter->nTask-1) sub-tasks are all still busy, + ** fall back to using the final sub-task. The first (pSorter->nTask-1) + ** sub-tasks are prefered as they use background threads - the final + ** sub-task uses the main thread. */ + for(i=0; iiPrev + i + 1) % nWorker; + pTask = &pSorter->aTask[iTest]; + if( pTask->bDone ){ + rc = vdbeSorterJoinThread(pTask); + } + if( rc!=SQLITE_OK || pTask->pThread==0 ) break; + } + + if( rc==SQLITE_OK ){ + if( i==nWorker ){ + /* Use the foreground thread for this operation */ + rc = vdbeSorterListToPMA(&pSorter->aTask[nWorker], &pSorter->list); + }else{ + /* Launch a background thread for this operation */ + u8 *aMem = pTask->list.aMemory; + void *pCtx = (void*)pTask; + + assert( pTask->pThread==0 && pTask->bDone==0 ); + assert( pTask->list.pList==0 ); + assert( pTask->list.aMemory==0 || pSorter->list.aMemory!=0 ); + + pSorter->iPrev = (u8)(pTask - pSorter->aTask); + pTask->list = pSorter->list; + pSorter->list.pList = 0; + pSorter->list.szPMA = 0; + if( aMem ){ + pSorter->list.aMemory = aMem; + pSorter->nMemory = sqlite3MallocSize(aMem); + }else if( pSorter->list.aMemory ){ + pSorter->list.aMemory = sqlite3Malloc(pSorter->nMemory); + if( !pSorter->list.aMemory ) return SQLITE_NOMEM_BKPT; + } + + rc = vdbeSorterCreateThread(pTask, vdbeSorterFlushThread, pCtx); + } + } + + return rc; +#endif /* SQLITE_MAX_WORKER_THREADS!=0 */ +} + +/* +** Add a record to the sorter. +*/ +SQLITE_PRIVATE int sqlite3VdbeSorterWrite( + const VdbeCursor *pCsr, /* Sorter cursor */ + Mem *pVal /* Memory cell containing record */ +){ + VdbeSorter *pSorter; + int rc = SQLITE_OK; /* Return Code */ + SorterRecord *pNew; /* New list element */ + int bFlush; /* True to flush contents of memory to PMA */ + int nReq; /* Bytes of memory required */ + int nPMA; /* Bytes of PMA space required */ + int t; /* serial type of first record field */ + + assert( pCsr->eCurType==CURTYPE_SORTER ); + pSorter = pCsr->uc.pSorter; + getVarint32((const u8*)&pVal->z[1], t); + if( t>0 && t<10 && t!=7 ){ + pSorter->typeMask &= SORTER_TYPE_INTEGER; + }else if( t>10 && (t & 0x01) ){ + pSorter->typeMask &= SORTER_TYPE_TEXT; + }else{ + pSorter->typeMask = 0; + } + + assert( pSorter ); + + /* Figure out whether or not the current contents of memory should be + ** flushed to a PMA before continuing. If so, do so. + ** + ** If using the single large allocation mode (pSorter->aMemory!=0), then + ** flush the contents of memory to a new PMA if (a) at least one value is + ** already in memory and (b) the new value will not fit in memory. + ** + ** Or, if using separate allocations for each record, flush the contents + ** of memory to a PMA if either of the following are true: + ** + ** * The total memory allocated for the in-memory list is greater + ** than (page-size * cache-size), or + ** + ** * The total memory allocated for the in-memory list is greater + ** than (page-size * 10) and sqlite3HeapNearlyFull() returns true. + */ + nReq = pVal->n + sizeof(SorterRecord); + nPMA = pVal->n + sqlite3VarintLen(pVal->n); + if( pSorter->mxPmaSize ){ + if( pSorter->list.aMemory ){ + bFlush = pSorter->iMemory && (pSorter->iMemory+nReq) > pSorter->mxPmaSize; + }else{ + bFlush = ( + (pSorter->list.szPMA > pSorter->mxPmaSize) + || (pSorter->list.szPMA > pSorter->mnPmaSize && sqlite3HeapNearlyFull()) + ); + } + if( bFlush ){ + rc = vdbeSorterFlushPMA(pSorter); + pSorter->list.szPMA = 0; + pSorter->iMemory = 0; + assert( rc!=SQLITE_OK || pSorter->list.pList==0 ); + } + } + + pSorter->list.szPMA += nPMA; + if( nPMA>pSorter->mxKeysize ){ + pSorter->mxKeysize = nPMA; + } + + if( pSorter->list.aMemory ){ + int nMin = pSorter->iMemory + nReq; + + if( nMin>pSorter->nMemory ){ + u8 *aNew; + int iListOff = (u8*)pSorter->list.pList - pSorter->list.aMemory; + int nNew = pSorter->nMemory * 2; + while( nNew < nMin ) nNew = nNew*2; + if( nNew > pSorter->mxPmaSize ) nNew = pSorter->mxPmaSize; + if( nNew < nMin ) nNew = nMin; + + aNew = sqlite3Realloc(pSorter->list.aMemory, nNew); + if( !aNew ) return SQLITE_NOMEM_BKPT; + pSorter->list.pList = (SorterRecord*)&aNew[iListOff]; + pSorter->list.aMemory = aNew; + pSorter->nMemory = nNew; + } + + pNew = (SorterRecord*)&pSorter->list.aMemory[pSorter->iMemory]; + pSorter->iMemory += ROUND8(nReq); + if( pSorter->list.pList ){ + pNew->u.iNext = (int)((u8*)(pSorter->list.pList) - pSorter->list.aMemory); + } + }else{ + pNew = (SorterRecord *)sqlite3Malloc(nReq); + if( pNew==0 ){ + return SQLITE_NOMEM_BKPT; + } + pNew->u.pNext = pSorter->list.pList; + } + + memcpy(SRVAL(pNew), pVal->z, pVal->n); + pNew->nVal = pVal->n; + pSorter->list.pList = pNew; + + return rc; +} + +/* +** Read keys from pIncr->pMerger and populate pIncr->aFile[1]. The format +** of the data stored in aFile[1] is the same as that used by regular PMAs, +** except that the number-of-bytes varint is omitted from the start. +*/ +static int vdbeIncrPopulate(IncrMerger *pIncr){ + int rc = SQLITE_OK; + int rc2; + i64 iStart = pIncr->iStartOff; + SorterFile *pOut = &pIncr->aFile[1]; + SortSubtask *pTask = pIncr->pTask; + MergeEngine *pMerger = pIncr->pMerger; + PmaWriter writer; + assert( pIncr->bEof==0 ); + + vdbeSorterPopulateDebug(pTask, "enter"); + + vdbePmaWriterInit(pOut->pFd, &writer, pTask->pSorter->pgsz, iStart); + while( rc==SQLITE_OK ){ + int dummy; + PmaReader *pReader = &pMerger->aReadr[ pMerger->aTree[1] ]; + int nKey = pReader->nKey; + i64 iEof = writer.iWriteOff + writer.iBufEnd; + + /* Check if the output file is full or if the input has been exhausted. + ** In either case exit the loop. */ + if( pReader->pFd==0 ) break; + if( (iEof + nKey + sqlite3VarintLen(nKey))>(iStart + pIncr->mxSz) ) break; + + /* Write the next key to the output. */ + vdbePmaWriteVarint(&writer, nKey); + vdbePmaWriteBlob(&writer, pReader->aKey, nKey); + assert( pIncr->pMerger->pTask==pTask ); + rc = vdbeMergeEngineStep(pIncr->pMerger, &dummy); + } + + rc2 = vdbePmaWriterFinish(&writer, &pOut->iEof); + if( rc==SQLITE_OK ) rc = rc2; + vdbeSorterPopulateDebug(pTask, "exit"); + return rc; +} + +#if SQLITE_MAX_WORKER_THREADS>0 +/* +** The main routine for background threads that populate aFile[1] of +** multi-threaded IncrMerger objects. +*/ +static void *vdbeIncrPopulateThread(void *pCtx){ + IncrMerger *pIncr = (IncrMerger*)pCtx; + void *pRet = SQLITE_INT_TO_PTR( vdbeIncrPopulate(pIncr) ); + pIncr->pTask->bDone = 1; + return pRet; +} + +/* +** Launch a background thread to populate aFile[1] of pIncr. +*/ +static int vdbeIncrBgPopulate(IncrMerger *pIncr){ + void *p = (void*)pIncr; + assert( pIncr->bUseThread ); + return vdbeSorterCreateThread(pIncr->pTask, vdbeIncrPopulateThread, p); +} +#endif + +/* +** This function is called when the PmaReader corresponding to pIncr has +** finished reading the contents of aFile[0]. Its purpose is to "refill" +** aFile[0] such that the PmaReader should start rereading it from the +** beginning. +** +** For single-threaded objects, this is accomplished by literally reading +** keys from pIncr->pMerger and repopulating aFile[0]. +** +** For multi-threaded objects, all that is required is to wait until the +** background thread is finished (if it is not already) and then swap +** aFile[0] and aFile[1] in place. If the contents of pMerger have not +** been exhausted, this function also launches a new background thread +** to populate the new aFile[1]. +** +** SQLITE_OK is returned on success, or an SQLite error code otherwise. +*/ +static int vdbeIncrSwap(IncrMerger *pIncr){ + int rc = SQLITE_OK; + +#if SQLITE_MAX_WORKER_THREADS>0 + if( pIncr->bUseThread ){ + rc = vdbeSorterJoinThread(pIncr->pTask); + + if( rc==SQLITE_OK ){ + SorterFile f0 = pIncr->aFile[0]; + pIncr->aFile[0] = pIncr->aFile[1]; + pIncr->aFile[1] = f0; + } + + if( rc==SQLITE_OK ){ + if( pIncr->aFile[0].iEof==pIncr->iStartOff ){ + pIncr->bEof = 1; + }else{ + rc = vdbeIncrBgPopulate(pIncr); + } + } + }else +#endif + { + rc = vdbeIncrPopulate(pIncr); + pIncr->aFile[0] = pIncr->aFile[1]; + if( pIncr->aFile[0].iEof==pIncr->iStartOff ){ + pIncr->bEof = 1; + } + } + + return rc; +} + +/* +** Allocate and return a new IncrMerger object to read data from pMerger. +** +** If an OOM condition is encountered, return NULL. In this case free the +** pMerger argument before returning. +*/ +static int vdbeIncrMergerNew( + SortSubtask *pTask, /* The thread that will be using the new IncrMerger */ + MergeEngine *pMerger, /* The MergeEngine that the IncrMerger will control */ + IncrMerger **ppOut /* Write the new IncrMerger here */ +){ + int rc = SQLITE_OK; + IncrMerger *pIncr = *ppOut = (IncrMerger*) + (sqlite3FaultSim(100) ? 0 : sqlite3MallocZero(sizeof(*pIncr))); + if( pIncr ){ + pIncr->pMerger = pMerger; + pIncr->pTask = pTask; + pIncr->mxSz = MAX(pTask->pSorter->mxKeysize+9,pTask->pSorter->mxPmaSize/2); + pTask->file2.iEof += pIncr->mxSz; + }else{ + vdbeMergeEngineFree(pMerger); + rc = SQLITE_NOMEM_BKPT; + } + return rc; +} + +#if SQLITE_MAX_WORKER_THREADS>0 +/* +** Set the "use-threads" flag on object pIncr. +*/ +static void vdbeIncrMergerSetThreads(IncrMerger *pIncr){ + pIncr->bUseThread = 1; + pIncr->pTask->file2.iEof -= pIncr->mxSz; +} +#endif /* SQLITE_MAX_WORKER_THREADS>0 */ + + + +/* +** Recompute pMerger->aTree[iOut] by comparing the next keys on the +** two PmaReaders that feed that entry. Neither of the PmaReaders +** are advanced. This routine merely does the comparison. +*/ +static void vdbeMergeEngineCompare( + MergeEngine *pMerger, /* Merge engine containing PmaReaders to compare */ + int iOut /* Store the result in pMerger->aTree[iOut] */ +){ + int i1; + int i2; + int iRes; + PmaReader *p1; + PmaReader *p2; + + assert( iOutnTree && iOut>0 ); + + if( iOut>=(pMerger->nTree/2) ){ + i1 = (iOut - pMerger->nTree/2) * 2; + i2 = i1 + 1; + }else{ + i1 = pMerger->aTree[iOut*2]; + i2 = pMerger->aTree[iOut*2+1]; + } + + p1 = &pMerger->aReadr[i1]; + p2 = &pMerger->aReadr[i2]; + + if( p1->pFd==0 ){ + iRes = i2; + }else if( p2->pFd==0 ){ + iRes = i1; + }else{ + SortSubtask *pTask = pMerger->pTask; + int bCached = 0; + int res; + assert( pTask->pUnpacked!=0 ); /* from vdbeSortSubtaskMain() */ + res = pTask->xCompare( + pTask, &bCached, p1->aKey, p1->nKey, p2->aKey, p2->nKey + ); + if( res<=0 ){ + iRes = i1; + }else{ + iRes = i2; + } + } + + pMerger->aTree[iOut] = iRes; +} + +/* +** Allowed values for the eMode parameter to vdbeMergeEngineInit() +** and vdbePmaReaderIncrMergeInit(). +** +** Only INCRINIT_NORMAL is valid in single-threaded builds (when +** SQLITE_MAX_WORKER_THREADS==0). The other values are only used +** when there exists one or more separate worker threads. +*/ +#define INCRINIT_NORMAL 0 +#define INCRINIT_TASK 1 +#define INCRINIT_ROOT 2 + +/* +** Forward reference required as the vdbeIncrMergeInit() and +** vdbePmaReaderIncrInit() routines are called mutually recursively when +** building a merge tree. +*/ +static int vdbePmaReaderIncrInit(PmaReader *pReadr, int eMode); + +/* +** Initialize the MergeEngine object passed as the second argument. Once this +** function returns, the first key of merged data may be read from the +** MergeEngine object in the usual fashion. +** +** If argument eMode is INCRINIT_ROOT, then it is assumed that any IncrMerge +** objects attached to the PmaReader objects that the merger reads from have +** already been populated, but that they have not yet populated aFile[0] and +** set the PmaReader objects up to read from it. In this case all that is +** required is to call vdbePmaReaderNext() on each PmaReader to point it at +** its first key. +** +** Otherwise, if eMode is any value other than INCRINIT_ROOT, then use +** vdbePmaReaderIncrMergeInit() to initialize each PmaReader that feeds data +** to pMerger. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +static int vdbeMergeEngineInit( + SortSubtask *pTask, /* Thread that will run pMerger */ + MergeEngine *pMerger, /* MergeEngine to initialize */ + int eMode /* One of the INCRINIT_XXX constants */ +){ + int rc = SQLITE_OK; /* Return code */ + int i; /* For looping over PmaReader objects */ + int nTree = pMerger->nTree; + + /* eMode is always INCRINIT_NORMAL in single-threaded mode */ + assert( SQLITE_MAX_WORKER_THREADS>0 || eMode==INCRINIT_NORMAL ); + + /* Verify that the MergeEngine is assigned to a single thread */ + assert( pMerger->pTask==0 ); + pMerger->pTask = pTask; + + for(i=0; i0 && eMode==INCRINIT_ROOT ){ + /* PmaReaders should be normally initialized in order, as if they are + ** reading from the same temp file this makes for more linear file IO. + ** However, in the INCRINIT_ROOT case, if PmaReader aReadr[nTask-1] is + ** in use it will block the vdbePmaReaderNext() call while it uses + ** the main thread to fill its buffer. So calling PmaReaderNext() + ** on this PmaReader before any of the multi-threaded PmaReaders takes + ** better advantage of multi-processor hardware. */ + rc = vdbePmaReaderNext(&pMerger->aReadr[nTree-i-1]); + }else{ + rc = vdbePmaReaderIncrInit(&pMerger->aReadr[i], INCRINIT_NORMAL); + } + if( rc!=SQLITE_OK ) return rc; + } + + for(i=pMerger->nTree-1; i>0; i--){ + vdbeMergeEngineCompare(pMerger, i); + } + return pTask->pUnpacked->errCode; +} + +/* +** The PmaReader passed as the first argument is guaranteed to be an +** incremental-reader (pReadr->pIncr!=0). This function serves to open +** and/or initialize the temp file related fields of the IncrMerge +** object at (pReadr->pIncr). +** +** If argument eMode is set to INCRINIT_NORMAL, then all PmaReaders +** in the sub-tree headed by pReadr are also initialized. Data is then +** loaded into the buffers belonging to pReadr and it is set to point to +** the first key in its range. +** +** If argument eMode is set to INCRINIT_TASK, then pReadr is guaranteed +** to be a multi-threaded PmaReader and this function is being called in a +** background thread. In this case all PmaReaders in the sub-tree are +** initialized as for INCRINIT_NORMAL and the aFile[1] buffer belonging to +** pReadr is populated. However, pReadr itself is not set up to point +** to its first key. A call to vdbePmaReaderNext() is still required to do +** that. +** +** The reason this function does not call vdbePmaReaderNext() immediately +** in the INCRINIT_TASK case is that vdbePmaReaderNext() assumes that it has +** to block on thread (pTask->thread) before accessing aFile[1]. But, since +** this entire function is being run by thread (pTask->thread), that will +** lead to the current background thread attempting to join itself. +** +** Finally, if argument eMode is set to INCRINIT_ROOT, it may be assumed +** that pReadr->pIncr is a multi-threaded IncrMerge objects, and that all +** child-trees have already been initialized using IncrInit(INCRINIT_TASK). +** In this case vdbePmaReaderNext() is called on all child PmaReaders and +** the current PmaReader set to point to the first key in its range. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +static int vdbePmaReaderIncrMergeInit(PmaReader *pReadr, int eMode){ + int rc = SQLITE_OK; + IncrMerger *pIncr = pReadr->pIncr; + SortSubtask *pTask = pIncr->pTask; + sqlite3 *db = pTask->pSorter->db; + + /* eMode is always INCRINIT_NORMAL in single-threaded mode */ + assert( SQLITE_MAX_WORKER_THREADS>0 || eMode==INCRINIT_NORMAL ); + + rc = vdbeMergeEngineInit(pTask, pIncr->pMerger, eMode); + + /* Set up the required files for pIncr. A multi-theaded IncrMerge object + ** requires two temp files to itself, whereas a single-threaded object + ** only requires a region of pTask->file2. */ + if( rc==SQLITE_OK ){ + int mxSz = pIncr->mxSz; +#if SQLITE_MAX_WORKER_THREADS>0 + if( pIncr->bUseThread ){ + rc = vdbeSorterOpenTempFile(db, mxSz, &pIncr->aFile[0].pFd); + if( rc==SQLITE_OK ){ + rc = vdbeSorterOpenTempFile(db, mxSz, &pIncr->aFile[1].pFd); + } + }else +#endif + /*if( !pIncr->bUseThread )*/{ + if( pTask->file2.pFd==0 ){ + assert( pTask->file2.iEof>0 ); + rc = vdbeSorterOpenTempFile(db, pTask->file2.iEof, &pTask->file2.pFd); + pTask->file2.iEof = 0; + } + if( rc==SQLITE_OK ){ + pIncr->aFile[1].pFd = pTask->file2.pFd; + pIncr->iStartOff = pTask->file2.iEof; + pTask->file2.iEof += mxSz; + } + } + } + +#if SQLITE_MAX_WORKER_THREADS>0 + if( rc==SQLITE_OK && pIncr->bUseThread ){ + /* Use the current thread to populate aFile[1], even though this + ** PmaReader is multi-threaded. If this is an INCRINIT_TASK object, + ** then this function is already running in background thread + ** pIncr->pTask->thread. + ** + ** If this is the INCRINIT_ROOT object, then it is running in the + ** main VDBE thread. But that is Ok, as that thread cannot return + ** control to the VDBE or proceed with anything useful until the + ** first results are ready from this merger object anyway. + */ + assert( eMode==INCRINIT_ROOT || eMode==INCRINIT_TASK ); + rc = vdbeIncrPopulate(pIncr); + } +#endif + + if( rc==SQLITE_OK && (SQLITE_MAX_WORKER_THREADS==0 || eMode!=INCRINIT_TASK) ){ + rc = vdbePmaReaderNext(pReadr); + } + + return rc; +} + +#if SQLITE_MAX_WORKER_THREADS>0 +/* +** The main routine for vdbePmaReaderIncrMergeInit() operations run in +** background threads. +*/ +static void *vdbePmaReaderBgIncrInit(void *pCtx){ + PmaReader *pReader = (PmaReader*)pCtx; + void *pRet = SQLITE_INT_TO_PTR( + vdbePmaReaderIncrMergeInit(pReader,INCRINIT_TASK) + ); + pReader->pIncr->pTask->bDone = 1; + return pRet; +} +#endif + +/* +** If the PmaReader passed as the first argument is not an incremental-reader +** (if pReadr->pIncr==0), then this function is a no-op. Otherwise, it invokes +** the vdbePmaReaderIncrMergeInit() function with the parameters passed to +** this routine to initialize the incremental merge. +** +** If the IncrMerger object is multi-threaded (IncrMerger.bUseThread==1), +** then a background thread is launched to call vdbePmaReaderIncrMergeInit(). +** Or, if the IncrMerger is single threaded, the same function is called +** using the current thread. +*/ +static int vdbePmaReaderIncrInit(PmaReader *pReadr, int eMode){ + IncrMerger *pIncr = pReadr->pIncr; /* Incremental merger */ + int rc = SQLITE_OK; /* Return code */ + if( pIncr ){ +#if SQLITE_MAX_WORKER_THREADS>0 + assert( pIncr->bUseThread==0 || eMode==INCRINIT_TASK ); + if( pIncr->bUseThread ){ + void *pCtx = (void*)pReadr; + rc = vdbeSorterCreateThread(pIncr->pTask, vdbePmaReaderBgIncrInit, pCtx); + }else +#endif + { + rc = vdbePmaReaderIncrMergeInit(pReadr, eMode); + } + } + return rc; +} + +/* +** Allocate a new MergeEngine object to merge the contents of nPMA level-0 +** PMAs from pTask->file. If no error occurs, set *ppOut to point to +** the new object and return SQLITE_OK. Or, if an error does occur, set *ppOut +** to NULL and return an SQLite error code. +** +** When this function is called, *piOffset is set to the offset of the +** first PMA to read from pTask->file. Assuming no error occurs, it is +** set to the offset immediately following the last byte of the last +** PMA before returning. If an error does occur, then the final value of +** *piOffset is undefined. +*/ +static int vdbeMergeEngineLevel0( + SortSubtask *pTask, /* Sorter task to read from */ + int nPMA, /* Number of PMAs to read */ + i64 *piOffset, /* IN/OUT: Readr offset in pTask->file */ + MergeEngine **ppOut /* OUT: New merge-engine */ +){ + MergeEngine *pNew; /* Merge engine to return */ + i64 iOff = *piOffset; + int i; + int rc = SQLITE_OK; + + *ppOut = pNew = vdbeMergeEngineNew(nPMA); + if( pNew==0 ) rc = SQLITE_NOMEM_BKPT; + + for(i=0; iaReadr[i]; + rc = vdbePmaReaderInit(pTask, &pTask->file, iOff, pReadr, &nDummy); + iOff = pReadr->iEof; + } + + if( rc!=SQLITE_OK ){ + vdbeMergeEngineFree(pNew); + *ppOut = 0; + } + *piOffset = iOff; + return rc; +} + +/* +** Return the depth of a tree comprising nPMA PMAs, assuming a fanout of +** SORTER_MAX_MERGE_COUNT. The returned value does not include leaf nodes. +** +** i.e. +** +** nPMA<=16 -> TreeDepth() == 0 +** nPMA<=256 -> TreeDepth() == 1 +** nPMA<=65536 -> TreeDepth() == 2 +*/ +static int vdbeSorterTreeDepth(int nPMA){ + int nDepth = 0; + i64 nDiv = SORTER_MAX_MERGE_COUNT; + while( nDiv < (i64)nPMA ){ + nDiv = nDiv * SORTER_MAX_MERGE_COUNT; + nDepth++; + } + return nDepth; +} + +/* +** pRoot is the root of an incremental merge-tree with depth nDepth (according +** to vdbeSorterTreeDepth()). pLeaf is the iSeq'th leaf to be added to the +** tree, counting from zero. This function adds pLeaf to the tree. +** +** If successful, SQLITE_OK is returned. If an error occurs, an SQLite error +** code is returned and pLeaf is freed. +*/ +static int vdbeSorterAddToTree( + SortSubtask *pTask, /* Task context */ + int nDepth, /* Depth of tree according to TreeDepth() */ + int iSeq, /* Sequence number of leaf within tree */ + MergeEngine *pRoot, /* Root of tree */ + MergeEngine *pLeaf /* Leaf to add to tree */ +){ + int rc = SQLITE_OK; + int nDiv = 1; + int i; + MergeEngine *p = pRoot; + IncrMerger *pIncr; + + rc = vdbeIncrMergerNew(pTask, pLeaf, &pIncr); + + for(i=1; iaReadr[iIter]; + + if( pReadr->pIncr==0 ){ + MergeEngine *pNew = vdbeMergeEngineNew(SORTER_MAX_MERGE_COUNT); + if( pNew==0 ){ + rc = SQLITE_NOMEM_BKPT; + }else{ + rc = vdbeIncrMergerNew(pTask, pNew, &pReadr->pIncr); + } + } + if( rc==SQLITE_OK ){ + p = pReadr->pIncr->pMerger; + nDiv = nDiv / SORTER_MAX_MERGE_COUNT; + } + } + + if( rc==SQLITE_OK ){ + p->aReadr[iSeq % SORTER_MAX_MERGE_COUNT].pIncr = pIncr; + }else{ + vdbeIncrFree(pIncr); + } + return rc; +} + +/* +** This function is called as part of a SorterRewind() operation on a sorter +** that has already written two or more level-0 PMAs to one or more temp +** files. It builds a tree of MergeEngine/IncrMerger/PmaReader objects that +** can be used to incrementally merge all PMAs on disk. +** +** If successful, SQLITE_OK is returned and *ppOut set to point to the +** MergeEngine object at the root of the tree before returning. Or, if an +** error occurs, an SQLite error code is returned and the final value +** of *ppOut is undefined. +*/ +static int vdbeSorterMergeTreeBuild( + VdbeSorter *pSorter, /* The VDBE cursor that implements the sort */ + MergeEngine **ppOut /* Write the MergeEngine here */ +){ + MergeEngine *pMain = 0; + int rc = SQLITE_OK; + int iTask; + +#if SQLITE_MAX_WORKER_THREADS>0 + /* If the sorter uses more than one task, then create the top-level + ** MergeEngine here. This MergeEngine will read data from exactly + ** one PmaReader per sub-task. */ + assert( pSorter->bUseThreads || pSorter->nTask==1 ); + if( pSorter->nTask>1 ){ + pMain = vdbeMergeEngineNew(pSorter->nTask); + if( pMain==0 ) rc = SQLITE_NOMEM_BKPT; + } +#endif + + for(iTask=0; rc==SQLITE_OK && iTasknTask; iTask++){ + SortSubtask *pTask = &pSorter->aTask[iTask]; + assert( pTask->nPMA>0 || SQLITE_MAX_WORKER_THREADS>0 ); + if( SQLITE_MAX_WORKER_THREADS==0 || pTask->nPMA ){ + MergeEngine *pRoot = 0; /* Root node of tree for this task */ + int nDepth = vdbeSorterTreeDepth(pTask->nPMA); + i64 iReadOff = 0; + + if( pTask->nPMA<=SORTER_MAX_MERGE_COUNT ){ + rc = vdbeMergeEngineLevel0(pTask, pTask->nPMA, &iReadOff, &pRoot); + }else{ + int i; + int iSeq = 0; + pRoot = vdbeMergeEngineNew(SORTER_MAX_MERGE_COUNT); + if( pRoot==0 ) rc = SQLITE_NOMEM_BKPT; + for(i=0; inPMA && rc==SQLITE_OK; i += SORTER_MAX_MERGE_COUNT){ + MergeEngine *pMerger = 0; /* New level-0 PMA merger */ + int nReader; /* Number of level-0 PMAs to merge */ + + nReader = MIN(pTask->nPMA - i, SORTER_MAX_MERGE_COUNT); + rc = vdbeMergeEngineLevel0(pTask, nReader, &iReadOff, &pMerger); + if( rc==SQLITE_OK ){ + rc = vdbeSorterAddToTree(pTask, nDepth, iSeq++, pRoot, pMerger); + } + } + } + + if( rc==SQLITE_OK ){ +#if SQLITE_MAX_WORKER_THREADS>0 + if( pMain!=0 ){ + rc = vdbeIncrMergerNew(pTask, pRoot, &pMain->aReadr[iTask].pIncr); + }else +#endif + { + assert( pMain==0 ); + pMain = pRoot; + } + }else{ + vdbeMergeEngineFree(pRoot); + } + } + } + + if( rc!=SQLITE_OK ){ + vdbeMergeEngineFree(pMain); + pMain = 0; + } + *ppOut = pMain; + return rc; +} + +/* +** This function is called as part of an sqlite3VdbeSorterRewind() operation +** on a sorter that has written two or more PMAs to temporary files. It sets +** up either VdbeSorter.pMerger (for single threaded sorters) or pReader +** (for multi-threaded sorters) so that it can be used to iterate through +** all records stored in the sorter. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +static int vdbeSorterSetupMerge(VdbeSorter *pSorter){ + int rc; /* Return code */ + SortSubtask *pTask0 = &pSorter->aTask[0]; + MergeEngine *pMain = 0; +#if SQLITE_MAX_WORKER_THREADS + sqlite3 *db = pTask0->pSorter->db; + int i; + SorterCompare xCompare = vdbeSorterGetCompare(pSorter); + for(i=0; inTask; i++){ + pSorter->aTask[i].xCompare = xCompare; + } +#endif + + rc = vdbeSorterMergeTreeBuild(pSorter, &pMain); + if( rc==SQLITE_OK ){ +#if SQLITE_MAX_WORKER_THREADS + assert( pSorter->bUseThreads==0 || pSorter->nTask>1 ); + if( pSorter->bUseThreads ){ + int iTask; + PmaReader *pReadr = 0; + SortSubtask *pLast = &pSorter->aTask[pSorter->nTask-1]; + rc = vdbeSortAllocUnpacked(pLast); + if( rc==SQLITE_OK ){ + pReadr = (PmaReader*)sqlite3DbMallocZero(db, sizeof(PmaReader)); + pSorter->pReader = pReadr; + if( pReadr==0 ) rc = SQLITE_NOMEM_BKPT; + } + if( rc==SQLITE_OK ){ + rc = vdbeIncrMergerNew(pLast, pMain, &pReadr->pIncr); + if( rc==SQLITE_OK ){ + vdbeIncrMergerSetThreads(pReadr->pIncr); + for(iTask=0; iTask<(pSorter->nTask-1); iTask++){ + IncrMerger *pIncr; + if( (pIncr = pMain->aReadr[iTask].pIncr) ){ + vdbeIncrMergerSetThreads(pIncr); + assert( pIncr->pTask!=pLast ); + } + } + for(iTask=0; rc==SQLITE_OK && iTasknTask; iTask++){ + /* Check that: + ** + ** a) The incremental merge object is configured to use the + ** right task, and + ** b) If it is using task (nTask-1), it is configured to run + ** in single-threaded mode. This is important, as the + ** root merge (INCRINIT_ROOT) will be using the same task + ** object. + */ + PmaReader *p = &pMain->aReadr[iTask]; + assert( p->pIncr==0 || ( + (p->pIncr->pTask==&pSorter->aTask[iTask]) /* a */ + && (iTask!=pSorter->nTask-1 || p->pIncr->bUseThread==0) /* b */ + )); + rc = vdbePmaReaderIncrInit(p, INCRINIT_TASK); + } + } + pMain = 0; + } + if( rc==SQLITE_OK ){ + rc = vdbePmaReaderIncrMergeInit(pReadr, INCRINIT_ROOT); + } + }else +#endif + { + rc = vdbeMergeEngineInit(pTask0, pMain, INCRINIT_NORMAL); + pSorter->pMerger = pMain; + pMain = 0; + } + } + + if( rc!=SQLITE_OK ){ + vdbeMergeEngineFree(pMain); + } + return rc; +} + + +/* +** Once the sorter has been populated by calls to sqlite3VdbeSorterWrite, +** this function is called to prepare for iterating through the records +** in sorted order. +*/ +SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *pCsr, int *pbEof){ + VdbeSorter *pSorter; + int rc = SQLITE_OK; /* Return code */ + + assert( pCsr->eCurType==CURTYPE_SORTER ); + pSorter = pCsr->uc.pSorter; + assert( pSorter ); + + /* If no data has been written to disk, then do not do so now. Instead, + ** sort the VdbeSorter.pRecord list. The vdbe layer will read data directly + ** from the in-memory list. */ + if( pSorter->bUsePMA==0 ){ + if( pSorter->list.pList ){ + *pbEof = 0; + rc = vdbeSorterSort(&pSorter->aTask[0], &pSorter->list); + }else{ + *pbEof = 1; + } + return rc; + } + + /* Write the current in-memory list to a PMA. When the VdbeSorterWrite() + ** function flushes the contents of memory to disk, it immediately always + ** creates a new list consisting of a single key immediately afterwards. + ** So the list is never empty at this point. */ + assert( pSorter->list.pList ); + rc = vdbeSorterFlushPMA(pSorter); + + /* Join all threads */ + rc = vdbeSorterJoinAll(pSorter, rc); + + vdbeSorterRewindDebug("rewind"); + + /* Assuming no errors have occurred, set up a merger structure to + ** incrementally read and merge all remaining PMAs. */ + assert( pSorter->pReader==0 ); + if( rc==SQLITE_OK ){ + rc = vdbeSorterSetupMerge(pSorter); + *pbEof = 0; + } + + vdbeSorterRewindDebug("rewinddone"); + return rc; +} + +/* +** Advance to the next element in the sorter. +*/ +SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *db, const VdbeCursor *pCsr, int *pbEof){ + VdbeSorter *pSorter; + int rc; /* Return code */ + + assert( pCsr->eCurType==CURTYPE_SORTER ); + pSorter = pCsr->uc.pSorter; + assert( pSorter->bUsePMA || (pSorter->pReader==0 && pSorter->pMerger==0) ); + if( pSorter->bUsePMA ){ + assert( pSorter->pReader==0 || pSorter->pMerger==0 ); + assert( pSorter->bUseThreads==0 || pSorter->pReader ); + assert( pSorter->bUseThreads==1 || pSorter->pMerger ); +#if SQLITE_MAX_WORKER_THREADS>0 + if( pSorter->bUseThreads ){ + rc = vdbePmaReaderNext(pSorter->pReader); + *pbEof = (pSorter->pReader->pFd==0); + }else +#endif + /*if( !pSorter->bUseThreads )*/ { + assert( pSorter->pMerger!=0 ); + assert( pSorter->pMerger->pTask==(&pSorter->aTask[0]) ); + rc = vdbeMergeEngineStep(pSorter->pMerger, pbEof); + } + }else{ + SorterRecord *pFree = pSorter->list.pList; + pSorter->list.pList = pFree->u.pNext; + pFree->u.pNext = 0; + if( pSorter->list.aMemory==0 ) vdbeSorterRecordFree(db, pFree); + *pbEof = !pSorter->list.pList; + rc = SQLITE_OK; + } + return rc; +} + +/* +** Return a pointer to a buffer owned by the sorter that contains the +** current key. +*/ +static void *vdbeSorterRowkey( + const VdbeSorter *pSorter, /* Sorter object */ + int *pnKey /* OUT: Size of current key in bytes */ +){ + void *pKey; + if( pSorter->bUsePMA ){ + PmaReader *pReader; +#if SQLITE_MAX_WORKER_THREADS>0 + if( pSorter->bUseThreads ){ + pReader = pSorter->pReader; + }else +#endif + /*if( !pSorter->bUseThreads )*/{ + pReader = &pSorter->pMerger->aReadr[pSorter->pMerger->aTree[1]]; + } + *pnKey = pReader->nKey; + pKey = pReader->aKey; + }else{ + *pnKey = pSorter->list.pList->nVal; + pKey = SRVAL(pSorter->list.pList); + } + return pKey; +} + +/* +** Copy the current sorter key into the memory cell pOut. +*/ +SQLITE_PRIVATE int sqlite3VdbeSorterRowkey(const VdbeCursor *pCsr, Mem *pOut){ + VdbeSorter *pSorter; + void *pKey; int nKey; /* Sorter key to copy into pOut */ + + assert( pCsr->eCurType==CURTYPE_SORTER ); + pSorter = pCsr->uc.pSorter; + pKey = vdbeSorterRowkey(pSorter, &nKey); + if( sqlite3VdbeMemClearAndResize(pOut, nKey) ){ + return SQLITE_NOMEM_BKPT; + } + pOut->n = nKey; + MemSetTypeFlag(pOut, MEM_Blob); + memcpy(pOut->z, pKey, nKey); + + return SQLITE_OK; +} + +/* +** Compare the key in memory cell pVal with the key that the sorter cursor +** passed as the first argument currently points to. For the purposes of +** the comparison, ignore the rowid field at the end of each record. +** +** If the sorter cursor key contains any NULL values, consider it to be +** less than pVal. Even if pVal also contains NULL values. +** +** If an error occurs, return an SQLite error code (i.e. SQLITE_NOMEM). +** Otherwise, set *pRes to a negative, zero or positive value if the +** key in pVal is smaller than, equal to or larger than the current sorter +** key. +** +** This routine forms the core of the OP_SorterCompare opcode, which in +** turn is used to verify uniqueness when constructing a UNIQUE INDEX. +*/ +SQLITE_PRIVATE int sqlite3VdbeSorterCompare( + const VdbeCursor *pCsr, /* Sorter cursor */ + Mem *pVal, /* Value to compare to current sorter key */ + int nKeyCol, /* Compare this many columns */ + int *pRes /* OUT: Result of comparison */ +){ + VdbeSorter *pSorter; + UnpackedRecord *r2; + KeyInfo *pKeyInfo; + int i; + void *pKey; int nKey; /* Sorter key to compare pVal with */ + + assert( pCsr->eCurType==CURTYPE_SORTER ); + pSorter = pCsr->uc.pSorter; + r2 = pSorter->pUnpacked; + pKeyInfo = pCsr->pKeyInfo; + if( r2==0 ){ + r2 = pSorter->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pKeyInfo); + if( r2==0 ) return SQLITE_NOMEM_BKPT; + r2->nField = nKeyCol; + } + assert( r2->nField==nKeyCol ); + + pKey = vdbeSorterRowkey(pSorter, &nKey); + sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, r2); + for(i=0; iaMem[i].flags & MEM_Null ){ + *pRes = -1; + return SQLITE_OK; + } + } + + *pRes = sqlite3VdbeRecordCompare(pVal->n, pVal->z, r2); + return SQLITE_OK; +} + +/************** End of vdbesort.c ********************************************/ +/************** Begin file memjournal.c **************************************/ +/* +** 2008 October 7 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains code use to implement an in-memory rollback journal. +** The in-memory rollback journal is used to journal transactions for +** ":memory:" databases and when the journal_mode=MEMORY pragma is used. +** +** Update: The in-memory journal is also used to temporarily cache +** smaller journals that are not critical for power-loss recovery. +** For example, statement journals that are not too big will be held +** entirely in memory, thus reducing the number of file I/O calls, and +** more importantly, reducing temporary file creation events. If these +** journals become too large for memory, they are spilled to disk. But +** in the common case, they are usually small and no file I/O needs to +** occur. +*/ +/* #include "sqliteInt.h" */ + +/* Forward references to internal structures */ +typedef struct MemJournal MemJournal; +typedef struct FilePoint FilePoint; +typedef struct FileChunk FileChunk; + +/* +** The rollback journal is composed of a linked list of these structures. +** +** The zChunk array is always at least 8 bytes in size - usually much more. +** Its actual size is stored in the MemJournal.nChunkSize variable. +*/ +struct FileChunk { + FileChunk *pNext; /* Next chunk in the journal */ + u8 zChunk[8]; /* Content of this chunk */ +}; + +/* +** By default, allocate this many bytes of memory for each FileChunk object. +*/ +#define MEMJOURNAL_DFLT_FILECHUNKSIZE 1024 + +/* +** For chunk size nChunkSize, return the number of bytes that should +** be allocated for each FileChunk structure. +*/ +#define fileChunkSize(nChunkSize) (sizeof(FileChunk) + ((nChunkSize)-8)) + +/* +** An instance of this object serves as a cursor into the rollback journal. +** The cursor can be either for reading or writing. +*/ +struct FilePoint { + sqlite3_int64 iOffset; /* Offset from the beginning of the file */ + FileChunk *pChunk; /* Specific chunk into which cursor points */ +}; + +/* +** This structure is a subclass of sqlite3_file. Each open memory-journal +** is an instance of this class. +*/ +struct MemJournal { + const sqlite3_io_methods *pMethod; /* Parent class. MUST BE FIRST */ + int nChunkSize; /* In-memory chunk-size */ + + int nSpill; /* Bytes of data before flushing */ + int nSize; /* Bytes of data currently in memory */ + FileChunk *pFirst; /* Head of in-memory chunk-list */ + FilePoint endpoint; /* Pointer to the end of the file */ + FilePoint readpoint; /* Pointer to the end of the last xRead() */ + + int flags; /* xOpen flags */ + sqlite3_vfs *pVfs; /* The "real" underlying VFS */ + const char *zJournal; /* Name of the journal file */ +}; + +/* +** Read data from the in-memory journal file. This is the implementation +** of the sqlite3_vfs.xRead method. +*/ +static int memjrnlRead( + sqlite3_file *pJfd, /* The journal file from which to read */ + void *zBuf, /* Put the results here */ + int iAmt, /* Number of bytes to read */ + sqlite_int64 iOfst /* Begin reading at this offset */ +){ + MemJournal *p = (MemJournal *)pJfd; + u8 *zOut = zBuf; + int nRead = iAmt; + int iChunkOffset; + FileChunk *pChunk; + +#ifdef SQLITE_ENABLE_ATOMIC_WRITE + if( (iAmt+iOfst)>p->endpoint.iOffset ){ + return SQLITE_IOERR_SHORT_READ; + } +#endif + + assert( (iAmt+iOfst)<=p->endpoint.iOffset ); + assert( p->readpoint.iOffset==0 || p->readpoint.pChunk!=0 ); + if( p->readpoint.iOffset!=iOfst || iOfst==0 ){ + sqlite3_int64 iOff = 0; + for(pChunk=p->pFirst; + ALWAYS(pChunk) && (iOff+p->nChunkSize)<=iOfst; + pChunk=pChunk->pNext + ){ + iOff += p->nChunkSize; + } + }else{ + pChunk = p->readpoint.pChunk; + assert( pChunk!=0 ); + } + + iChunkOffset = (int)(iOfst%p->nChunkSize); + do { + int iSpace = p->nChunkSize - iChunkOffset; + int nCopy = MIN(nRead, (p->nChunkSize - iChunkOffset)); + memcpy(zOut, (u8*)pChunk->zChunk + iChunkOffset, nCopy); + zOut += nCopy; + nRead -= iSpace; + iChunkOffset = 0; + } while( nRead>=0 && (pChunk=pChunk->pNext)!=0 && nRead>0 ); + p->readpoint.iOffset = pChunk ? iOfst+iAmt : 0; + p->readpoint.pChunk = pChunk; + + return SQLITE_OK; +} + +/* +** Free the list of FileChunk structures headed at MemJournal.pFirst. +*/ +static void memjrnlFreeChunks(MemJournal *p){ + FileChunk *pIter; + FileChunk *pNext; + for(pIter=p->pFirst; pIter; pIter=pNext){ + pNext = pIter->pNext; + sqlite3_free(pIter); + } + p->pFirst = 0; +} + +/* +** Flush the contents of memory to a real file on disk. +*/ +static int memjrnlCreateFile(MemJournal *p){ + int rc; + sqlite3_file *pReal = (sqlite3_file*)p; + MemJournal copy = *p; + + memset(p, 0, sizeof(MemJournal)); + rc = sqlite3OsOpen(copy.pVfs, copy.zJournal, pReal, copy.flags, 0); + if( rc==SQLITE_OK ){ + int nChunk = copy.nChunkSize; + i64 iOff = 0; + FileChunk *pIter; + for(pIter=copy.pFirst; pIter; pIter=pIter->pNext){ + if( iOff + nChunk > copy.endpoint.iOffset ){ + nChunk = copy.endpoint.iOffset - iOff; + } + rc = sqlite3OsWrite(pReal, (u8*)pIter->zChunk, nChunk, iOff); + if( rc ) break; + iOff += nChunk; + } + if( rc==SQLITE_OK ){ + /* No error has occurred. Free the in-memory buffers. */ + memjrnlFreeChunks(©); + } + } + if( rc!=SQLITE_OK ){ + /* If an error occurred while creating or writing to the file, restore + ** the original before returning. This way, SQLite uses the in-memory + ** journal data to roll back changes made to the internal page-cache + ** before this function was called. */ + sqlite3OsClose(pReal); + *p = copy; + } + return rc; +} + + +/* +** Write data to the file. +*/ +static int memjrnlWrite( + sqlite3_file *pJfd, /* The journal file into which to write */ + const void *zBuf, /* Take data to be written from here */ + int iAmt, /* Number of bytes to write */ + sqlite_int64 iOfst /* Begin writing at this offset into the file */ +){ + MemJournal *p = (MemJournal *)pJfd; + int nWrite = iAmt; + u8 *zWrite = (u8 *)zBuf; + + /* If the file should be created now, create it and write the new data + ** into the file on disk. */ + if( p->nSpill>0 && (iAmt+iOfst)>p->nSpill ){ + int rc = memjrnlCreateFile(p); + if( rc==SQLITE_OK ){ + rc = sqlite3OsWrite(pJfd, zBuf, iAmt, iOfst); + } + return rc; + } + + /* If the contents of this write should be stored in memory */ + else{ + /* An in-memory journal file should only ever be appended to. Random + ** access writes are not required. The only exception to this is when + ** the in-memory journal is being used by a connection using the + ** atomic-write optimization. In this case the first 28 bytes of the + ** journal file may be written as part of committing the transaction. */ + assert( iOfst==p->endpoint.iOffset || iOfst==0 ); +#ifdef SQLITE_ENABLE_ATOMIC_WRITE + if( iOfst==0 && p->pFirst ){ + assert( p->nChunkSize>iAmt ); + memcpy((u8*)p->pFirst->zChunk, zBuf, iAmt); + }else +#else + assert( iOfst>0 || p->pFirst==0 ); +#endif + { + while( nWrite>0 ){ + FileChunk *pChunk = p->endpoint.pChunk; + int iChunkOffset = (int)(p->endpoint.iOffset%p->nChunkSize); + int iSpace = MIN(nWrite, p->nChunkSize - iChunkOffset); + + if( iChunkOffset==0 ){ + /* New chunk is required to extend the file. */ + FileChunk *pNew = sqlite3_malloc(fileChunkSize(p->nChunkSize)); + if( !pNew ){ + return SQLITE_IOERR_NOMEM_BKPT; + } + pNew->pNext = 0; + if( pChunk ){ + assert( p->pFirst ); + pChunk->pNext = pNew; + }else{ + assert( !p->pFirst ); + p->pFirst = pNew; + } + p->endpoint.pChunk = pNew; + } + + memcpy((u8*)p->endpoint.pChunk->zChunk + iChunkOffset, zWrite, iSpace); + zWrite += iSpace; + nWrite -= iSpace; + p->endpoint.iOffset += iSpace; + } + p->nSize = iAmt + iOfst; + } + } + + return SQLITE_OK; +} + +/* +** Truncate the file. +** +** If the journal file is already on disk, truncate it there. Or, if it +** is still in main memory but is being truncated to zero bytes in size, +** ignore +*/ +static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ + MemJournal *p = (MemJournal *)pJfd; + if( ALWAYS(size==0) ){ + memjrnlFreeChunks(p); + p->nSize = 0; + p->endpoint.pChunk = 0; + p->endpoint.iOffset = 0; + p->readpoint.pChunk = 0; + p->readpoint.iOffset = 0; + } + return SQLITE_OK; +} + +/* +** Close the file. +*/ +static int memjrnlClose(sqlite3_file *pJfd){ + MemJournal *p = (MemJournal *)pJfd; + memjrnlFreeChunks(p); + return SQLITE_OK; +} + +/* +** Sync the file. +** +** If the real file has been created, call its xSync method. Otherwise, +** syncing an in-memory journal is a no-op. +*/ +static int memjrnlSync(sqlite3_file *pJfd, int flags){ + UNUSED_PARAMETER2(pJfd, flags); + return SQLITE_OK; +} + +/* +** Query the size of the file in bytes. +*/ +static int memjrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){ + MemJournal *p = (MemJournal *)pJfd; + *pSize = (sqlite_int64) p->endpoint.iOffset; + return SQLITE_OK; +} + +/* +** Table of methods for MemJournal sqlite3_file object. +*/ +static const struct sqlite3_io_methods MemJournalMethods = { + 1, /* iVersion */ + memjrnlClose, /* xClose */ + memjrnlRead, /* xRead */ + memjrnlWrite, /* xWrite */ + memjrnlTruncate, /* xTruncate */ + memjrnlSync, /* xSync */ + memjrnlFileSize, /* xFileSize */ + 0, /* xLock */ + 0, /* xUnlock */ + 0, /* xCheckReservedLock */ + 0, /* xFileControl */ + 0, /* xSectorSize */ + 0, /* xDeviceCharacteristics */ + 0, /* xShmMap */ + 0, /* xShmLock */ + 0, /* xShmBarrier */ + 0, /* xShmUnmap */ + 0, /* xFetch */ + 0 /* xUnfetch */ +}; + +/* +** Open a journal file. +** +** The behaviour of the journal file depends on the value of parameter +** nSpill. If nSpill is 0, then the journal file is always create and +** accessed using the underlying VFS. If nSpill is less than zero, then +** all content is always stored in main-memory. Finally, if nSpill is a +** positive value, then the journal file is initially created in-memory +** but may be flushed to disk later on. In this case the journal file is +** flushed to disk either when it grows larger than nSpill bytes in size, +** or when sqlite3JournalCreate() is called. +*/ +SQLITE_PRIVATE int sqlite3JournalOpen( + sqlite3_vfs *pVfs, /* The VFS to use for actual file I/O */ + const char *zName, /* Name of the journal file */ + sqlite3_file *pJfd, /* Preallocated, blank file handle */ + int flags, /* Opening flags */ + int nSpill /* Bytes buffered before opening the file */ +){ + MemJournal *p = (MemJournal*)pJfd; + + /* Zero the file-handle object. If nSpill was passed zero, initialize + ** it using the sqlite3OsOpen() function of the underlying VFS. In this + ** case none of the code in this module is executed as a result of calls + ** made on the journal file-handle. */ + memset(p, 0, sizeof(MemJournal)); + if( nSpill==0 ){ + return sqlite3OsOpen(pVfs, zName, pJfd, flags, 0); + } + + if( nSpill>0 ){ + p->nChunkSize = nSpill; + }else{ + p->nChunkSize = 8 + MEMJOURNAL_DFLT_FILECHUNKSIZE - sizeof(FileChunk); + assert( MEMJOURNAL_DFLT_FILECHUNKSIZE==fileChunkSize(p->nChunkSize) ); + } + + p->pMethod = (const sqlite3_io_methods*)&MemJournalMethods; + p->nSpill = nSpill; + p->flags = flags; + p->zJournal = zName; + p->pVfs = pVfs; + return SQLITE_OK; +} + +/* +** Open an in-memory journal file. +*/ +SQLITE_PRIVATE void sqlite3MemJournalOpen(sqlite3_file *pJfd){ + sqlite3JournalOpen(0, 0, pJfd, 0, -1); +} + +#ifdef SQLITE_ENABLE_ATOMIC_WRITE +/* +** If the argument p points to a MemJournal structure that is not an +** in-memory-only journal file (i.e. is one that was opened with a +ve +** nSpill parameter), and the underlying file has not yet been created, +** create it now. +*/ +SQLITE_PRIVATE int sqlite3JournalCreate(sqlite3_file *p){ + int rc = SQLITE_OK; + if( p->pMethods==&MemJournalMethods && ((MemJournal*)p)->nSpill>0 ){ + rc = memjrnlCreateFile((MemJournal*)p); + } + return rc; +} +#endif + +/* +** The file-handle passed as the only argument is open on a journal file. +** Return true if this "journal file" is currently stored in heap memory, +** or false otherwise. +*/ +SQLITE_PRIVATE int sqlite3JournalIsInMemory(sqlite3_file *p){ + return p->pMethods==&MemJournalMethods; +} + +/* +** Return the number of bytes required to store a JournalFile that uses vfs +** pVfs to create the underlying on-disk files. +*/ +SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *pVfs){ + return MAX(pVfs->szOsFile, (int)sizeof(MemJournal)); +} + +/************** End of memjournal.c ******************************************/ +/************** Begin file walker.c ******************************************/ +/* +** 2008 August 16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains routines used for walking the parser tree for +** an SQL statement. +*/ +/* #include "sqliteInt.h" */ +/* #include */ +/* #include */ + + +/* +** Walk an expression tree. Invoke the callback once for each node +** of the expression, while descending. (In other words, the callback +** is invoked before visiting children.) +** +** The return value from the callback should be one of the WRC_* +** constants to specify how to proceed with the walk. +** +** WRC_Continue Continue descending down the tree. +** +** WRC_Prune Do not descend into child nodes. But allow +** the walk to continue with sibling nodes. +** +** WRC_Abort Do no more callbacks. Unwind the stack and +** return the top-level walk call. +** +** The return value from this routine is WRC_Abort to abandon the tree walk +** and WRC_Continue to continue. +*/ +static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){ + int rc; + testcase( ExprHasProperty(pExpr, EP_TokenOnly) ); + testcase( ExprHasProperty(pExpr, EP_Reduced) ); + rc = pWalker->xExprCallback(pWalker, pExpr); + if( rc || ExprHasProperty(pExpr,(EP_TokenOnly|EP_Leaf)) ){ + return rc & WRC_Abort; + } + if( pExpr->pLeft && walkExpr(pWalker, pExpr->pLeft) ) return WRC_Abort; + if( pExpr->pRight && walkExpr(pWalker, pExpr->pRight) ) return WRC_Abort; + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( sqlite3WalkSelect(pWalker, pExpr->x.pSelect) ) return WRC_Abort; + }else if( pExpr->x.pList ){ + if( sqlite3WalkExprList(pWalker, pExpr->x.pList) ) return WRC_Abort; + } + return WRC_Continue; +} +SQLITE_PRIVATE int sqlite3WalkExpr(Walker *pWalker, Expr *pExpr){ + return pExpr ? walkExpr(pWalker,pExpr) : WRC_Continue; +} + +/* +** Call sqlite3WalkExpr() for every expression in list p or until +** an abort request is seen. +*/ +SQLITE_PRIVATE int sqlite3WalkExprList(Walker *pWalker, ExprList *p){ + int i; + struct ExprList_item *pItem; + if( p ){ + for(i=p->nExpr, pItem=p->a; i>0; i--, pItem++){ + if( sqlite3WalkExpr(pWalker, pItem->pExpr) ) return WRC_Abort; + } + } + return WRC_Continue; +} + +/* +** Walk all expressions associated with SELECT statement p. Do +** not invoke the SELECT callback on p, but do (of course) invoke +** any expr callbacks and SELECT callbacks that come from subqueries. +** Return WRC_Abort or WRC_Continue. +*/ +SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker *pWalker, Select *p){ + if( sqlite3WalkExprList(pWalker, p->pEList) ) return WRC_Abort; + if( sqlite3WalkExpr(pWalker, p->pWhere) ) return WRC_Abort; + if( sqlite3WalkExprList(pWalker, p->pGroupBy) ) return WRC_Abort; + if( sqlite3WalkExpr(pWalker, p->pHaving) ) return WRC_Abort; + if( sqlite3WalkExprList(pWalker, p->pOrderBy) ) return WRC_Abort; + if( sqlite3WalkExpr(pWalker, p->pLimit) ) return WRC_Abort; + if( sqlite3WalkExpr(pWalker, p->pOffset) ) return WRC_Abort; + return WRC_Continue; +} + +/* +** Walk the parse trees associated with all subqueries in the +** FROM clause of SELECT statement p. Do not invoke the select +** callback on p, but do invoke it on each FROM clause subquery +** and on any subqueries further down in the tree. Return +** WRC_Abort or WRC_Continue; +*/ +SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker *pWalker, Select *p){ + SrcList *pSrc; + int i; + struct SrcList_item *pItem; + + pSrc = p->pSrc; + if( ALWAYS(pSrc) ){ + for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ + if( sqlite3WalkSelect(pWalker, pItem->pSelect) ){ + return WRC_Abort; + } + if( pItem->fg.isTabFunc + && sqlite3WalkExprList(pWalker, pItem->u1.pFuncArg) + ){ + return WRC_Abort; + } + } + } + return WRC_Continue; +} + +/* +** Call sqlite3WalkExpr() for every expression in Select statement p. +** Invoke sqlite3WalkSelect() for subqueries in the FROM clause and +** on the compound select chain, p->pPrior. +** +** If it is not NULL, the xSelectCallback() callback is invoked before +** the walk of the expressions and FROM clause. The xSelectCallback2() +** method, if it is not NULL, is invoked following the walk of the +** expressions and FROM clause. +** +** Return WRC_Continue under normal conditions. Return WRC_Abort if +** there is an abort request. +** +** If the Walker does not have an xSelectCallback() then this routine +** is a no-op returning WRC_Continue. +*/ +SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){ + int rc; + if( p==0 || (pWalker->xSelectCallback==0 && pWalker->xSelectCallback2==0) ){ + return WRC_Continue; + } + rc = WRC_Continue; + pWalker->walkerDepth++; + while( p ){ + if( pWalker->xSelectCallback ){ + rc = pWalker->xSelectCallback(pWalker, p); + if( rc ) break; + } + if( sqlite3WalkSelectExpr(pWalker, p) + || sqlite3WalkSelectFrom(pWalker, p) + ){ + pWalker->walkerDepth--; + return WRC_Abort; + } + if( pWalker->xSelectCallback2 ){ + pWalker->xSelectCallback2(pWalker, p); + } + p = p->pPrior; + } + pWalker->walkerDepth--; + return rc & WRC_Abort; +} + +/************** End of walker.c **********************************************/ +/************** Begin file resolve.c *****************************************/ +/* +** 2008 August 18 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains routines used for walking the parser tree and +** resolve all identifiers by associating them with a particular +** table and column. +*/ +/* #include "sqliteInt.h" */ + +/* +** Walk the expression tree pExpr and increase the aggregate function +** depth (the Expr.op2 field) by N on every TK_AGG_FUNCTION node. +** This needs to occur when copying a TK_AGG_FUNCTION node from an +** outer query into an inner subquery. +** +** incrAggFunctionDepth(pExpr,n) is the main routine. incrAggDepth(..) +** is a helper function - a callback for the tree walker. +*/ +static int incrAggDepth(Walker *pWalker, Expr *pExpr){ + if( pExpr->op==TK_AGG_FUNCTION ) pExpr->op2 += pWalker->u.n; + return WRC_Continue; +} +static void incrAggFunctionDepth(Expr *pExpr, int N){ + if( N>0 ){ + Walker w; + memset(&w, 0, sizeof(w)); + w.xExprCallback = incrAggDepth; + w.u.n = N; + sqlite3WalkExpr(&w, pExpr); + } +} + +/* +** Turn the pExpr expression into an alias for the iCol-th column of the +** result set in pEList. +** +** If the reference is followed by a COLLATE operator, then make sure +** the COLLATE operator is preserved. For example: +** +** SELECT a+b, c+d FROM t1 ORDER BY 1 COLLATE nocase; +** +** Should be transformed into: +** +** SELECT a+b, c+d FROM t1 ORDER BY (a+b) COLLATE nocase; +** +** The nSubquery parameter specifies how many levels of subquery the +** alias is removed from the original expression. The usual value is +** zero but it might be more if the alias is contained within a subquery +** of the original expression. The Expr.op2 field of TK_AGG_FUNCTION +** structures must be increased by the nSubquery amount. +*/ +static void resolveAlias( + Parse *pParse, /* Parsing context */ + ExprList *pEList, /* A result set */ + int iCol, /* A column in the result set. 0..pEList->nExpr-1 */ + Expr *pExpr, /* Transform this into an alias to the result set */ + const char *zType, /* "GROUP" or "ORDER" or "" */ + int nSubquery /* Number of subqueries that the label is moving */ +){ + Expr *pOrig; /* The iCol-th column of the result set */ + Expr *pDup; /* Copy of pOrig */ + sqlite3 *db; /* The database connection */ + + assert( iCol>=0 && iColnExpr ); + pOrig = pEList->a[iCol].pExpr; + assert( pOrig!=0 ); + db = pParse->db; + pDup = sqlite3ExprDup(db, pOrig, 0); + if( pDup==0 ) return; + if( zType[0]!='G' ) incrAggFunctionDepth(pDup, nSubquery); + if( pExpr->op==TK_COLLATE ){ + pDup = sqlite3ExprAddCollateString(pParse, pDup, pExpr->u.zToken); + } + ExprSetProperty(pDup, EP_Alias); + + /* Before calling sqlite3ExprDelete(), set the EP_Static flag. This + ** prevents ExprDelete() from deleting the Expr structure itself, + ** allowing it to be repopulated by the memcpy() on the following line. + ** The pExpr->u.zToken might point into memory that will be freed by the + ** sqlite3DbFree(db, pDup) on the last line of this block, so be sure to + ** make a copy of the token before doing the sqlite3DbFree(). + */ + ExprSetProperty(pExpr, EP_Static); + sqlite3ExprDelete(db, pExpr); + memcpy(pExpr, pDup, sizeof(*pExpr)); + if( !ExprHasProperty(pExpr, EP_IntValue) && pExpr->u.zToken!=0 ){ + assert( (pExpr->flags & (EP_Reduced|EP_TokenOnly))==0 ); + pExpr->u.zToken = sqlite3DbStrDup(db, pExpr->u.zToken); + pExpr->flags |= EP_MemToken; + } + sqlite3DbFree(db, pDup); +} + + +/* +** Return TRUE if the name zCol occurs anywhere in the USING clause. +** +** Return FALSE if the USING clause is NULL or if it does not contain +** zCol. +*/ +static int nameInUsingClause(IdList *pUsing, const char *zCol){ + if( pUsing ){ + int k; + for(k=0; knId; k++){ + if( sqlite3StrICmp(pUsing->a[k].zName, zCol)==0 ) return 1; + } + } + return 0; +} + +/* +** Subqueries stores the original database, table and column names for their +** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN". +** Check to see if the zSpan given to this routine matches the zDb, zTab, +** and zCol. If any of zDb, zTab, and zCol are NULL then those fields will +** match anything. +*/ +SQLITE_PRIVATE int sqlite3MatchSpanName( + const char *zSpan, + const char *zCol, + const char *zTab, + const char *zDb +){ + int n; + for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){} + if( zDb && (sqlite3StrNICmp(zSpan, zDb, n)!=0 || zDb[n]!=0) ){ + return 0; + } + zSpan += n+1; + for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){} + if( zTab && (sqlite3StrNICmp(zSpan, zTab, n)!=0 || zTab[n]!=0) ){ + return 0; + } + zSpan += n+1; + if( zCol && sqlite3StrICmp(zSpan, zCol)!=0 ){ + return 0; + } + return 1; +} + +/* +** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up +** that name in the set of source tables in pSrcList and make the pExpr +** expression node refer back to that source column. The following changes +** are made to pExpr: +** +** pExpr->iDb Set the index in db->aDb[] of the database X +** (even if X is implied). +** pExpr->iTable Set to the cursor number for the table obtained +** from pSrcList. +** pExpr->pTab Points to the Table structure of X.Y (even if +** X and/or Y are implied.) +** pExpr->iColumn Set to the column number within the table. +** pExpr->op Set to TK_COLUMN. +** pExpr->pLeft Any expression this points to is deleted +** pExpr->pRight Any expression this points to is deleted. +** +** The zDb variable is the name of the database (the "X"). This value may be +** NULL meaning that name is of the form Y.Z or Z. Any available database +** can be used. The zTable variable is the name of the table (the "Y"). This +** value can be NULL if zDb is also NULL. If zTable is NULL it +** means that the form of the name is Z and that columns from any table +** can be used. +** +** If the name cannot be resolved unambiguously, leave an error message +** in pParse and return WRC_Abort. Return WRC_Prune on success. +*/ +static int lookupName( + Parse *pParse, /* The parsing context */ + const char *zDb, /* Name of the database containing table, or NULL */ + const char *zTab, /* Name of table containing column, or NULL */ + const char *zCol, /* Name of the column. */ + NameContext *pNC, /* The name context used to resolve the name */ + Expr *pExpr /* Make this EXPR node point to the selected column */ +){ + int i, j; /* Loop counters */ + int cnt = 0; /* Number of matching column names */ + int cntTab = 0; /* Number of matching table names */ + int nSubquery = 0; /* How many levels of subquery */ + sqlite3 *db = pParse->db; /* The database connection */ + struct SrcList_item *pItem; /* Use for looping over pSrcList items */ + struct SrcList_item *pMatch = 0; /* The matching pSrcList item */ + NameContext *pTopNC = pNC; /* First namecontext in the list */ + Schema *pSchema = 0; /* Schema of the expression */ + int isTrigger = 0; /* True if resolved to a trigger column */ + Table *pTab = 0; /* Table hold the row */ + Column *pCol; /* A column of pTab */ + + assert( pNC ); /* the name context cannot be NULL. */ + assert( zCol ); /* The Z in X.Y.Z cannot be NULL */ + assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); + + /* Initialize the node to no-match */ + pExpr->iTable = -1; + pExpr->pTab = 0; + ExprSetVVAProperty(pExpr, EP_NoReduce); + + /* Translate the schema name in zDb into a pointer to the corresponding + ** schema. If not found, pSchema will remain NULL and nothing will match + ** resulting in an appropriate error message toward the end of this routine + */ + if( zDb ){ + testcase( pNC->ncFlags & NC_PartIdx ); + testcase( pNC->ncFlags & NC_IsCheck ); + if( (pNC->ncFlags & (NC_PartIdx|NC_IsCheck))!=0 ){ + /* Silently ignore database qualifiers inside CHECK constraints and + ** partial indices. Do not raise errors because that might break + ** legacy and because it does not hurt anything to just ignore the + ** database name. */ + zDb = 0; + }else{ + for(i=0; inDb; i++){ + assert( db->aDb[i].zDbSName ); + if( sqlite3StrICmp(db->aDb[i].zDbSName,zDb)==0 ){ + pSchema = db->aDb[i].pSchema; + break; + } + } + } + } + + /* Start at the inner-most context and move outward until a match is found */ + while( pNC && cnt==0 ){ + ExprList *pEList; + SrcList *pSrcList = pNC->pSrcList; + + if( pSrcList ){ + for(i=0, pItem=pSrcList->a; inSrc; i++, pItem++){ + pTab = pItem->pTab; + assert( pTab!=0 && pTab->zName!=0 ); + assert( pTab->nCol>0 ); + if( pItem->pSelect && (pItem->pSelect->selFlags & SF_NestedFrom)!=0 ){ + int hit = 0; + pEList = pItem->pSelect->pEList; + for(j=0; jnExpr; j++){ + if( sqlite3MatchSpanName(pEList->a[j].zSpan, zCol, zTab, zDb) ){ + cnt++; + cntTab = 2; + pMatch = pItem; + pExpr->iColumn = j; + hit = 1; + } + } + if( hit || zTab==0 ) continue; + } + if( zDb && pTab->pSchema!=pSchema ){ + continue; + } + if( zTab ){ + const char *zTabName = pItem->zAlias ? pItem->zAlias : pTab->zName; + assert( zTabName!=0 ); + if( sqlite3StrICmp(zTabName, zTab)!=0 ){ + continue; + } + } + if( 0==(cntTab++) ){ + pMatch = pItem; + } + for(j=0, pCol=pTab->aCol; jnCol; j++, pCol++){ + if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ + /* If there has been exactly one prior match and this match + ** is for the right-hand table of a NATURAL JOIN or is in a + ** USING clause, then skip this match. + */ + if( cnt==1 ){ + if( pItem->fg.jointype & JT_NATURAL ) continue; + if( nameInUsingClause(pItem->pUsing, zCol) ) continue; + } + cnt++; + pMatch = pItem; + /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ + pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j; + break; + } + } + } + if( pMatch ){ + pExpr->iTable = pMatch->iCursor; + pExpr->pTab = pMatch->pTab; + /* RIGHT JOIN not (yet) supported */ + assert( (pMatch->fg.jointype & JT_RIGHT)==0 ); + if( (pMatch->fg.jointype & JT_LEFT)!=0 ){ + ExprSetProperty(pExpr, EP_CanBeNull); + } + pSchema = pExpr->pTab->pSchema; + } + } /* if( pSrcList ) */ + +#ifndef SQLITE_OMIT_TRIGGER + /* If we have not already resolved the name, then maybe + ** it is a new.* or old.* trigger argument reference + */ + if( zDb==0 && zTab!=0 && cntTab==0 && pParse->pTriggerTab!=0 ){ + int op = pParse->eTriggerOp; + assert( op==TK_DELETE || op==TK_UPDATE || op==TK_INSERT ); + if( op!=TK_DELETE && sqlite3StrICmp("new",zTab) == 0 ){ + pExpr->iTable = 1; + pTab = pParse->pTriggerTab; + }else if( op!=TK_INSERT && sqlite3StrICmp("old",zTab)==0 ){ + pExpr->iTable = 0; + pTab = pParse->pTriggerTab; + }else{ + pTab = 0; + } + + if( pTab ){ + int iCol; + pSchema = pTab->pSchema; + cntTab++; + for(iCol=0, pCol=pTab->aCol; iColnCol; iCol++, pCol++){ + if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ + if( iCol==pTab->iPKey ){ + iCol = -1; + } + break; + } + } + if( iCol>=pTab->nCol && sqlite3IsRowid(zCol) && VisibleRowid(pTab) ){ + /* IMP: R-51414-32910 */ + iCol = -1; + } + if( iColnCol ){ + cnt++; + if( iCol<0 ){ + pExpr->affinity = SQLITE_AFF_INTEGER; + }else if( pExpr->iTable==0 ){ + testcase( iCol==31 ); + testcase( iCol==32 ); + pParse->oldmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<newmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<iColumn = (i16)iCol; + pExpr->pTab = pTab; + isTrigger = 1; + } + } + } +#endif /* !defined(SQLITE_OMIT_TRIGGER) */ + + /* + ** Perhaps the name is a reference to the ROWID + */ + if( cnt==0 + && cntTab==1 + && pMatch + && (pNC->ncFlags & NC_IdxExpr)==0 + && sqlite3IsRowid(zCol) + && VisibleRowid(pMatch->pTab) + ){ + cnt = 1; + pExpr->iColumn = -1; + pExpr->affinity = SQLITE_AFF_INTEGER; + } + + /* + ** If the input is of the form Z (not Y.Z or X.Y.Z) then the name Z + ** might refer to an result-set alias. This happens, for example, when + ** we are resolving names in the WHERE clause of the following command: + ** + ** SELECT a+b AS x FROM table WHERE x<10; + ** + ** In cases like this, replace pExpr with a copy of the expression that + ** forms the result set entry ("a+b" in the example) and return immediately. + ** Note that the expression in the result set should have already been + ** resolved by the time the WHERE clause is resolved. + ** + ** The ability to use an output result-set column in the WHERE, GROUP BY, + ** or HAVING clauses, or as part of a larger expression in the ORDER BY + ** clause is not standard SQL. This is a (goofy) SQLite extension, that + ** is supported for backwards compatibility only. Hence, we issue a warning + ** on sqlite3_log() whenever the capability is used. + */ + if( (pEList = pNC->pEList)!=0 + && zTab==0 + && cnt==0 + ){ + for(j=0; jnExpr; j++){ + char *zAs = pEList->a[j].zName; + if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){ + Expr *pOrig; + assert( pExpr->pLeft==0 && pExpr->pRight==0 ); + assert( pExpr->x.pList==0 ); + assert( pExpr->x.pSelect==0 ); + pOrig = pEList->a[j].pExpr; + if( (pNC->ncFlags&NC_AllowAgg)==0 && ExprHasProperty(pOrig, EP_Agg) ){ + sqlite3ErrorMsg(pParse, "misuse of aliased aggregate %s", zAs); + return WRC_Abort; + } + if( sqlite3ExprVectorSize(pOrig)!=1 ){ + sqlite3ErrorMsg(pParse, "row value misused"); + return WRC_Abort; + } + resolveAlias(pParse, pEList, j, pExpr, "", nSubquery); + cnt = 1; + pMatch = 0; + assert( zTab==0 && zDb==0 ); + goto lookupname_end; + } + } + } + + /* Advance to the next name context. The loop will exit when either + ** we have a match (cnt>0) or when we run out of name contexts. + */ + if( cnt==0 ){ + pNC = pNC->pNext; + nSubquery++; + } + } + + /* + ** If X and Y are NULL (in other words if only the column name Z is + ** supplied) and the value of Z is enclosed in double-quotes, then + ** Z is a string literal if it doesn't match any column names. In that + ** case, we need to return right away and not make any changes to + ** pExpr. + ** + ** Because no reference was made to outer contexts, the pNC->nRef + ** fields are not changed in any context. + */ + if( cnt==0 && zTab==0 && ExprHasProperty(pExpr,EP_DblQuoted) ){ + pExpr->op = TK_STRING; + pExpr->pTab = 0; + return WRC_Prune; + } + + /* + ** cnt==0 means there was not match. cnt>1 means there were two or + ** more matches. Either way, we have an error. + */ + if( cnt!=1 ){ + const char *zErr; + zErr = cnt==0 ? "no such column" : "ambiguous column name"; + if( zDb ){ + sqlite3ErrorMsg(pParse, "%s: %s.%s.%s", zErr, zDb, zTab, zCol); + }else if( zTab ){ + sqlite3ErrorMsg(pParse, "%s: %s.%s", zErr, zTab, zCol); + }else{ + sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol); + } + pParse->checkSchema = 1; + pTopNC->nErr++; + } + + /* If a column from a table in pSrcList is referenced, then record + ** this fact in the pSrcList.a[].colUsed bitmask. Column 0 causes + ** bit 0 to be set. Column 1 sets bit 1. And so forth. If the + ** column number is greater than the number of bits in the bitmask + ** then set the high-order bit of the bitmask. + */ + if( pExpr->iColumn>=0 && pMatch!=0 ){ + int n = pExpr->iColumn; + testcase( n==BMS-1 ); + if( n>=BMS ){ + n = BMS-1; + } + assert( pMatch->iCursor==pExpr->iTable ); + pMatch->colUsed |= ((Bitmask)1)<pLeft); + pExpr->pLeft = 0; + sqlite3ExprDelete(db, pExpr->pRight); + pExpr->pRight = 0; + pExpr->op = (isTrigger ? TK_TRIGGER : TK_COLUMN); +lookupname_end: + if( cnt==1 ){ + assert( pNC!=0 ); + if( !ExprHasProperty(pExpr, EP_Alias) ){ + sqlite3AuthRead(pParse, pExpr, pSchema, pNC->pSrcList); + } + /* Increment the nRef value on all name contexts from TopNC up to + ** the point where the name matched. */ + for(;;){ + assert( pTopNC!=0 ); + pTopNC->nRef++; + if( pTopNC==pNC ) break; + pTopNC = pTopNC->pNext; + } + return WRC_Prune; + } else { + return WRC_Abort; + } +} + +/* +** Allocate and return a pointer to an expression to load the column iCol +** from datasource iSrc in SrcList pSrc. +*/ +SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSrc, int iCol){ + Expr *p = sqlite3ExprAlloc(db, TK_COLUMN, 0, 0); + if( p ){ + struct SrcList_item *pItem = &pSrc->a[iSrc]; + p->pTab = pItem->pTab; + p->iTable = pItem->iCursor; + if( p->pTab->iPKey==iCol ){ + p->iColumn = -1; + }else{ + p->iColumn = (ynVar)iCol; + testcase( iCol==BMS ); + testcase( iCol==BMS-1 ); + pItem->colUsed |= ((Bitmask)1)<<(iCol>=BMS ? BMS-1 : iCol); + } + ExprSetProperty(p, EP_Resolved); + } + return p; +} + +/* +** Report an error that an expression is not valid for some set of +** pNC->ncFlags values determined by validMask. +*/ +static void notValid( + Parse *pParse, /* Leave error message here */ + NameContext *pNC, /* The name context */ + const char *zMsg, /* Type of error */ + int validMask /* Set of contexts for which prohibited */ +){ + assert( (validMask&~(NC_IsCheck|NC_PartIdx|NC_IdxExpr))==0 ); + if( (pNC->ncFlags & validMask)!=0 ){ + const char *zIn = "partial index WHERE clauses"; + if( pNC->ncFlags & NC_IdxExpr ) zIn = "index expressions"; +#ifndef SQLITE_OMIT_CHECK + else if( pNC->ncFlags & NC_IsCheck ) zIn = "CHECK constraints"; +#endif + sqlite3ErrorMsg(pParse, "%s prohibited in %s", zMsg, zIn); + } +} + +/* +** Expression p should encode a floating point value between 1.0 and 0.0. +** Return 1024 times this value. Or return -1 if p is not a floating point +** value between 1.0 and 0.0. +*/ +static int exprProbability(Expr *p){ + double r = -1.0; + if( p->op!=TK_FLOAT ) return -1; + sqlite3AtoF(p->u.zToken, &r, sqlite3Strlen30(p->u.zToken), SQLITE_UTF8); + assert( r>=0.0 ); + if( r>1.0 ) return -1; + return (int)(r*134217728.0); +} + +/* +** This routine is callback for sqlite3WalkExpr(). +** +** Resolve symbolic names into TK_COLUMN operators for the current +** node in the expression tree. Return 0 to continue the search down +** the tree or 2 to abort the tree walk. +** +** This routine also does error checking and name resolution for +** function names. The operator for aggregate functions is changed +** to TK_AGG_FUNCTION. +*/ +static int resolveExprStep(Walker *pWalker, Expr *pExpr){ + NameContext *pNC; + Parse *pParse; + + pNC = pWalker->u.pNC; + assert( pNC!=0 ); + pParse = pNC->pParse; + assert( pParse==pWalker->pParse ); + + if( ExprHasProperty(pExpr, EP_Resolved) ) return WRC_Prune; + ExprSetProperty(pExpr, EP_Resolved); +#ifndef NDEBUG + if( pNC->pSrcList && pNC->pSrcList->nAlloc>0 ){ + SrcList *pSrcList = pNC->pSrcList; + int i; + for(i=0; ipSrcList->nSrc; i++){ + assert( pSrcList->a[i].iCursor>=0 && pSrcList->a[i].iCursornTab); + } + } +#endif + switch( pExpr->op ){ + +#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) + /* The special operator TK_ROW means use the rowid for the first + ** column in the FROM clause. This is used by the LIMIT and ORDER BY + ** clause processing on UPDATE and DELETE statements. + */ + case TK_ROW: { + SrcList *pSrcList = pNC->pSrcList; + struct SrcList_item *pItem; + assert( pSrcList && pSrcList->nSrc==1 ); + pItem = pSrcList->a; + pExpr->op = TK_COLUMN; + pExpr->pTab = pItem->pTab; + pExpr->iTable = pItem->iCursor; + pExpr->iColumn = -1; + pExpr->affinity = SQLITE_AFF_INTEGER; + break; + } +#endif /* defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) + && !defined(SQLITE_OMIT_SUBQUERY) */ + + /* A lone identifier is the name of a column. + */ + case TK_ID: { + return lookupName(pParse, 0, 0, pExpr->u.zToken, pNC, pExpr); + } + + /* A table name and column name: ID.ID + ** Or a database, table and column: ID.ID.ID + */ + case TK_DOT: { + const char *zColumn; + const char *zTable; + const char *zDb; + Expr *pRight; + + /* if( pSrcList==0 ) break; */ + notValid(pParse, pNC, "the \".\" operator", NC_IdxExpr); + pRight = pExpr->pRight; + if( pRight->op==TK_ID ){ + zDb = 0; + zTable = pExpr->pLeft->u.zToken; + zColumn = pRight->u.zToken; + }else{ + assert( pRight->op==TK_DOT ); + zDb = pExpr->pLeft->u.zToken; + zTable = pRight->pLeft->u.zToken; + zColumn = pRight->pRight->u.zToken; + } + return lookupName(pParse, zDb, zTable, zColumn, pNC, pExpr); + } + + /* Resolve function names + */ + case TK_FUNCTION: { + ExprList *pList = pExpr->x.pList; /* The argument list */ + int n = pList ? pList->nExpr : 0; /* Number of arguments */ + int no_such_func = 0; /* True if no such function exists */ + int wrong_num_args = 0; /* True if wrong number of arguments */ + int is_agg = 0; /* True if is an aggregate function */ + int nId; /* Number of characters in function name */ + const char *zId; /* The function name. */ + FuncDef *pDef; /* Information about the function */ + u8 enc = ENC(pParse->db); /* The database encoding */ + + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + zId = pExpr->u.zToken; + nId = sqlite3Strlen30(zId); + pDef = sqlite3FindFunction(pParse->db, zId, n, enc, 0); + if( pDef==0 ){ + pDef = sqlite3FindFunction(pParse->db, zId, -2, enc, 0); + if( pDef==0 ){ + no_such_func = 1; + }else{ + wrong_num_args = 1; + } + }else{ + is_agg = pDef->xFinalize!=0; + if( pDef->funcFlags & SQLITE_FUNC_UNLIKELY ){ + ExprSetProperty(pExpr, EP_Unlikely|EP_Skip); + if( n==2 ){ + pExpr->iTable = exprProbability(pList->a[1].pExpr); + if( pExpr->iTable<0 ){ + sqlite3ErrorMsg(pParse, + "second argument to likelihood() must be a " + "constant between 0.0 and 1.0"); + pNC->nErr++; + } + }else{ + /* EVIDENCE-OF: R-61304-29449 The unlikely(X) function is + ** equivalent to likelihood(X, 0.0625). + ** EVIDENCE-OF: R-01283-11636 The unlikely(X) function is + ** short-hand for likelihood(X,0.0625). + ** EVIDENCE-OF: R-36850-34127 The likely(X) function is short-hand + ** for likelihood(X,0.9375). + ** EVIDENCE-OF: R-53436-40973 The likely(X) function is equivalent + ** to likelihood(X,0.9375). */ + /* TUNING: unlikely() probability is 0.0625. likely() is 0.9375 */ + pExpr->iTable = pDef->zName[0]=='u' ? 8388608 : 125829120; + } + } +#ifndef SQLITE_OMIT_AUTHORIZATION + { + int auth = sqlite3AuthCheck(pParse, SQLITE_FUNCTION, 0,pDef->zName,0); + if( auth!=SQLITE_OK ){ + if( auth==SQLITE_DENY ){ + sqlite3ErrorMsg(pParse, "not authorized to use function: %s", + pDef->zName); + pNC->nErr++; + } + pExpr->op = TK_NULL; + return WRC_Prune; + } + } +#endif + if( pDef->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG) ){ + /* For the purposes of the EP_ConstFunc flag, date and time + ** functions and other functions that change slowly are considered + ** constant because they are constant for the duration of one query */ + ExprSetProperty(pExpr,EP_ConstFunc); + } + if( (pDef->funcFlags & SQLITE_FUNC_CONSTANT)==0 ){ + /* Date/time functions that use 'now', and other functions like + ** sqlite_version() that might change over time cannot be used + ** in an index. */ + notValid(pParse, pNC, "non-deterministic functions", + NC_IdxExpr|NC_PartIdx); + } + } + if( is_agg && (pNC->ncFlags & NC_AllowAgg)==0 ){ + sqlite3ErrorMsg(pParse, "misuse of aggregate function %.*s()", nId,zId); + pNC->nErr++; + is_agg = 0; + }else if( no_such_func && pParse->db->init.busy==0 +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + && pParse->explain==0 +#endif + ){ + sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId); + pNC->nErr++; + }else if( wrong_num_args ){ + sqlite3ErrorMsg(pParse,"wrong number of arguments to function %.*s()", + nId, zId); + pNC->nErr++; + } + if( is_agg ) pNC->ncFlags &= ~NC_AllowAgg; + sqlite3WalkExprList(pWalker, pList); + if( is_agg ){ + NameContext *pNC2 = pNC; + pExpr->op = TK_AGG_FUNCTION; + pExpr->op2 = 0; + while( pNC2 && !sqlite3FunctionUsesThisSrc(pExpr, pNC2->pSrcList) ){ + pExpr->op2++; + pNC2 = pNC2->pNext; + } + assert( pDef!=0 ); + if( pNC2 ){ + assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg ); + testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 ); + pNC2->ncFlags |= NC_HasAgg | (pDef->funcFlags & SQLITE_FUNC_MINMAX); + + } + pNC->ncFlags |= NC_AllowAgg; + } + /* FIX ME: Compute pExpr->affinity based on the expected return + ** type of the function + */ + return WRC_Prune; + } +#ifndef SQLITE_OMIT_SUBQUERY + case TK_SELECT: + case TK_EXISTS: testcase( pExpr->op==TK_EXISTS ); +#endif + case TK_IN: { + testcase( pExpr->op==TK_IN ); + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + int nRef = pNC->nRef; + notValid(pParse, pNC, "subqueries", NC_IsCheck|NC_PartIdx|NC_IdxExpr); + sqlite3WalkSelect(pWalker, pExpr->x.pSelect); + assert( pNC->nRef>=nRef ); + if( nRef!=pNC->nRef ){ + ExprSetProperty(pExpr, EP_VarSelect); + pNC->ncFlags |= NC_VarSelect; + } + } + break; + } + case TK_VARIABLE: { + notValid(pParse, pNC, "parameters", NC_IsCheck|NC_PartIdx|NC_IdxExpr); + break; + } + case TK_BETWEEN: + case TK_EQ: + case TK_NE: + case TK_LT: + case TK_LE: + case TK_GT: + case TK_GE: + case TK_IS: + case TK_ISNOT: { + int nLeft, nRight; + if( pParse->db->mallocFailed ) break; + assert( pExpr->pLeft!=0 ); + nLeft = sqlite3ExprVectorSize(pExpr->pLeft); + if( pExpr->op==TK_BETWEEN ){ + nRight = sqlite3ExprVectorSize(pExpr->x.pList->a[0].pExpr); + if( nRight==nLeft ){ + nRight = sqlite3ExprVectorSize(pExpr->x.pList->a[1].pExpr); + } + }else{ + assert( pExpr->pRight!=0 ); + nRight = sqlite3ExprVectorSize(pExpr->pRight); + } + if( nLeft!=nRight ){ + testcase( pExpr->op==TK_EQ ); + testcase( pExpr->op==TK_NE ); + testcase( pExpr->op==TK_LT ); + testcase( pExpr->op==TK_LE ); + testcase( pExpr->op==TK_GT ); + testcase( pExpr->op==TK_GE ); + testcase( pExpr->op==TK_IS ); + testcase( pExpr->op==TK_ISNOT ); + testcase( pExpr->op==TK_BETWEEN ); + sqlite3ErrorMsg(pParse, "row value misused"); + } + break; + } + } + return (pParse->nErr || pParse->db->mallocFailed) ? WRC_Abort : WRC_Continue; +} + +/* +** pEList is a list of expressions which are really the result set of the +** a SELECT statement. pE is a term in an ORDER BY or GROUP BY clause. +** This routine checks to see if pE is a simple identifier which corresponds +** to the AS-name of one of the terms of the expression list. If it is, +** this routine return an integer between 1 and N where N is the number of +** elements in pEList, corresponding to the matching entry. If there is +** no match, or if pE is not a simple identifier, then this routine +** return 0. +** +** pEList has been resolved. pE has not. +*/ +static int resolveAsName( + Parse *pParse, /* Parsing context for error messages */ + ExprList *pEList, /* List of expressions to scan */ + Expr *pE /* Expression we are trying to match */ +){ + int i; /* Loop counter */ + + UNUSED_PARAMETER(pParse); + + if( pE->op==TK_ID ){ + char *zCol = pE->u.zToken; + for(i=0; inExpr; i++){ + char *zAs = pEList->a[i].zName; + if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){ + return i+1; + } + } + } + return 0; +} + +/* +** pE is a pointer to an expression which is a single term in the +** ORDER BY of a compound SELECT. The expression has not been +** name resolved. +** +** At the point this routine is called, we already know that the +** ORDER BY term is not an integer index into the result set. That +** case is handled by the calling routine. +** +** Attempt to match pE against result set columns in the left-most +** SELECT statement. Return the index i of the matching column, +** as an indication to the caller that it should sort by the i-th column. +** The left-most column is 1. In other words, the value returned is the +** same integer value that would be used in the SQL statement to indicate +** the column. +** +** If there is no match, return 0. Return -1 if an error occurs. +*/ +static int resolveOrderByTermToExprList( + Parse *pParse, /* Parsing context for error messages */ + Select *pSelect, /* The SELECT statement with the ORDER BY clause */ + Expr *pE /* The specific ORDER BY term */ +){ + int i; /* Loop counter */ + ExprList *pEList; /* The columns of the result set */ + NameContext nc; /* Name context for resolving pE */ + sqlite3 *db; /* Database connection */ + int rc; /* Return code from subprocedures */ + u8 savedSuppErr; /* Saved value of db->suppressErr */ + + assert( sqlite3ExprIsInteger(pE, &i)==0 ); + pEList = pSelect->pEList; + + /* Resolve all names in the ORDER BY term expression + */ + memset(&nc, 0, sizeof(nc)); + nc.pParse = pParse; + nc.pSrcList = pSelect->pSrc; + nc.pEList = pEList; + nc.ncFlags = NC_AllowAgg; + nc.nErr = 0; + db = pParse->db; + savedSuppErr = db->suppressErr; + db->suppressErr = 1; + rc = sqlite3ResolveExprNames(&nc, pE); + db->suppressErr = savedSuppErr; + if( rc ) return 0; + + /* Try to match the ORDER BY expression against an expression + ** in the result set. Return an 1-based index of the matching + ** result-set entry. + */ + for(i=0; inExpr; i++){ + if( sqlite3ExprCompare(pEList->a[i].pExpr, pE, -1)<2 ){ + return i+1; + } + } + + /* If no match, return 0. */ + return 0; +} + +/* +** Generate an ORDER BY or GROUP BY term out-of-range error. +*/ +static void resolveOutOfRangeError( + Parse *pParse, /* The error context into which to write the error */ + const char *zType, /* "ORDER" or "GROUP" */ + int i, /* The index (1-based) of the term out of range */ + int mx /* Largest permissible value of i */ +){ + sqlite3ErrorMsg(pParse, + "%r %s BY term out of range - should be " + "between 1 and %d", i, zType, mx); +} + +/* +** Analyze the ORDER BY clause in a compound SELECT statement. Modify +** each term of the ORDER BY clause is a constant integer between 1 +** and N where N is the number of columns in the compound SELECT. +** +** ORDER BY terms that are already an integer between 1 and N are +** unmodified. ORDER BY terms that are integers outside the range of +** 1 through N generate an error. ORDER BY terms that are expressions +** are matched against result set expressions of compound SELECT +** beginning with the left-most SELECT and working toward the right. +** At the first match, the ORDER BY expression is transformed into +** the integer column number. +** +** Return the number of errors seen. +*/ +static int resolveCompoundOrderBy( + Parse *pParse, /* Parsing context. Leave error messages here */ + Select *pSelect /* The SELECT statement containing the ORDER BY */ +){ + int i; + ExprList *pOrderBy; + ExprList *pEList; + sqlite3 *db; + int moreToDo = 1; + + pOrderBy = pSelect->pOrderBy; + if( pOrderBy==0 ) return 0; + db = pParse->db; +#if SQLITE_MAX_COLUMN + if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ + sqlite3ErrorMsg(pParse, "too many terms in ORDER BY clause"); + return 1; + } +#endif + for(i=0; inExpr; i++){ + pOrderBy->a[i].done = 0; + } + pSelect->pNext = 0; + while( pSelect->pPrior ){ + pSelect->pPrior->pNext = pSelect; + pSelect = pSelect->pPrior; + } + while( pSelect && moreToDo ){ + struct ExprList_item *pItem; + moreToDo = 0; + pEList = pSelect->pEList; + assert( pEList!=0 ); + for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ + int iCol = -1; + Expr *pE, *pDup; + if( pItem->done ) continue; + pE = sqlite3ExprSkipCollate(pItem->pExpr); + if( sqlite3ExprIsInteger(pE, &iCol) ){ + if( iCol<=0 || iCol>pEList->nExpr ){ + resolveOutOfRangeError(pParse, "ORDER", i+1, pEList->nExpr); + return 1; + } + }else{ + iCol = resolveAsName(pParse, pEList, pE); + if( iCol==0 ){ + pDup = sqlite3ExprDup(db, pE, 0); + if( !db->mallocFailed ){ + assert(pDup); + iCol = resolveOrderByTermToExprList(pParse, pSelect, pDup); + } + sqlite3ExprDelete(db, pDup); + } + } + if( iCol>0 ){ + /* Convert the ORDER BY term into an integer column number iCol, + ** taking care to preserve the COLLATE clause if it exists */ + Expr *pNew = sqlite3Expr(db, TK_INTEGER, 0); + if( pNew==0 ) return 1; + pNew->flags |= EP_IntValue; + pNew->u.iValue = iCol; + if( pItem->pExpr==pE ){ + pItem->pExpr = pNew; + }else{ + Expr *pParent = pItem->pExpr; + assert( pParent->op==TK_COLLATE ); + while( pParent->pLeft->op==TK_COLLATE ) pParent = pParent->pLeft; + assert( pParent->pLeft==pE ); + pParent->pLeft = pNew; + } + sqlite3ExprDelete(db, pE); + pItem->u.x.iOrderByCol = (u16)iCol; + pItem->done = 1; + }else{ + moreToDo = 1; + } + } + pSelect = pSelect->pNext; + } + for(i=0; inExpr; i++){ + if( pOrderBy->a[i].done==0 ){ + sqlite3ErrorMsg(pParse, "%r ORDER BY term does not match any " + "column in the result set", i+1); + return 1; + } + } + return 0; +} + +/* +** Check every term in the ORDER BY or GROUP BY clause pOrderBy of +** the SELECT statement pSelect. If any term is reference to a +** result set expression (as determined by the ExprList.a.u.x.iOrderByCol +** field) then convert that term into a copy of the corresponding result set +** column. +** +** If any errors are detected, add an error message to pParse and +** return non-zero. Return zero if no errors are seen. +*/ +SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy( + Parse *pParse, /* Parsing context. Leave error messages here */ + Select *pSelect, /* The SELECT statement containing the clause */ + ExprList *pOrderBy, /* The ORDER BY or GROUP BY clause to be processed */ + const char *zType /* "ORDER" or "GROUP" */ +){ + int i; + sqlite3 *db = pParse->db; + ExprList *pEList; + struct ExprList_item *pItem; + + if( pOrderBy==0 || pParse->db->mallocFailed ) return 0; +#if SQLITE_MAX_COLUMN + if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ + sqlite3ErrorMsg(pParse, "too many terms in %s BY clause", zType); + return 1; + } +#endif + pEList = pSelect->pEList; + assert( pEList!=0 ); /* sqlite3SelectNew() guarantees this */ + for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ + if( pItem->u.x.iOrderByCol ){ + if( pItem->u.x.iOrderByCol>pEList->nExpr ){ + resolveOutOfRangeError(pParse, zType, i+1, pEList->nExpr); + return 1; + } + resolveAlias(pParse, pEList, pItem->u.x.iOrderByCol-1, pItem->pExpr, + zType,0); + } + } + return 0; +} + +/* +** pOrderBy is an ORDER BY or GROUP BY clause in SELECT statement pSelect. +** The Name context of the SELECT statement is pNC. zType is either +** "ORDER" or "GROUP" depending on which type of clause pOrderBy is. +** +** This routine resolves each term of the clause into an expression. +** If the order-by term is an integer I between 1 and N (where N is the +** number of columns in the result set of the SELECT) then the expression +** in the resolution is a copy of the I-th result-set expression. If +** the order-by term is an identifier that corresponds to the AS-name of +** a result-set expression, then the term resolves to a copy of the +** result-set expression. Otherwise, the expression is resolved in +** the usual way - using sqlite3ResolveExprNames(). +** +** This routine returns the number of errors. If errors occur, then +** an appropriate error message might be left in pParse. (OOM errors +** excepted.) +*/ +static int resolveOrderGroupBy( + NameContext *pNC, /* The name context of the SELECT statement */ + Select *pSelect, /* The SELECT statement holding pOrderBy */ + ExprList *pOrderBy, /* An ORDER BY or GROUP BY clause to resolve */ + const char *zType /* Either "ORDER" or "GROUP", as appropriate */ +){ + int i, j; /* Loop counters */ + int iCol; /* Column number */ + struct ExprList_item *pItem; /* A term of the ORDER BY clause */ + Parse *pParse; /* Parsing context */ + int nResult; /* Number of terms in the result set */ + + if( pOrderBy==0 ) return 0; + nResult = pSelect->pEList->nExpr; + pParse = pNC->pParse; + for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ + Expr *pE = pItem->pExpr; + Expr *pE2 = sqlite3ExprSkipCollate(pE); + if( zType[0]!='G' ){ + iCol = resolveAsName(pParse, pSelect->pEList, pE2); + if( iCol>0 ){ + /* If an AS-name match is found, mark this ORDER BY column as being + ** a copy of the iCol-th result-set column. The subsequent call to + ** sqlite3ResolveOrderGroupBy() will convert the expression to a + ** copy of the iCol-th result-set expression. */ + pItem->u.x.iOrderByCol = (u16)iCol; + continue; + } + } + if( sqlite3ExprIsInteger(pE2, &iCol) ){ + /* The ORDER BY term is an integer constant. Again, set the column + ** number so that sqlite3ResolveOrderGroupBy() will convert the + ** order-by term to a copy of the result-set expression */ + if( iCol<1 || iCol>0xffff ){ + resolveOutOfRangeError(pParse, zType, i+1, nResult); + return 1; + } + pItem->u.x.iOrderByCol = (u16)iCol; + continue; + } + + /* Otherwise, treat the ORDER BY term as an ordinary expression */ + pItem->u.x.iOrderByCol = 0; + if( sqlite3ResolveExprNames(pNC, pE) ){ + return 1; + } + for(j=0; jpEList->nExpr; j++){ + if( sqlite3ExprCompare(pE, pSelect->pEList->a[j].pExpr, -1)==0 ){ + pItem->u.x.iOrderByCol = j+1; + } + } + } + return sqlite3ResolveOrderGroupBy(pParse, pSelect, pOrderBy, zType); +} + +/* +** Resolve names in the SELECT statement p and all of its descendants. +*/ +static int resolveSelectStep(Walker *pWalker, Select *p){ + NameContext *pOuterNC; /* Context that contains this SELECT */ + NameContext sNC; /* Name context of this SELECT */ + int isCompound; /* True if p is a compound select */ + int nCompound; /* Number of compound terms processed so far */ + Parse *pParse; /* Parsing context */ + int i; /* Loop counter */ + ExprList *pGroupBy; /* The GROUP BY clause */ + Select *pLeftmost; /* Left-most of SELECT of a compound */ + sqlite3 *db; /* Database connection */ + + + assert( p!=0 ); + if( p->selFlags & SF_Resolved ){ + return WRC_Prune; + } + pOuterNC = pWalker->u.pNC; + pParse = pWalker->pParse; + db = pParse->db; + + /* Normally sqlite3SelectExpand() will be called first and will have + ** already expanded this SELECT. However, if this is a subquery within + ** an expression, sqlite3ResolveExprNames() will be called without a + ** prior call to sqlite3SelectExpand(). When that happens, let + ** sqlite3SelectPrep() do all of the processing for this SELECT. + ** sqlite3SelectPrep() will invoke both sqlite3SelectExpand() and + ** this routine in the correct order. + */ + if( (p->selFlags & SF_Expanded)==0 ){ + sqlite3SelectPrep(pParse, p, pOuterNC); + return (pParse->nErr || db->mallocFailed) ? WRC_Abort : WRC_Prune; + } + + isCompound = p->pPrior!=0; + nCompound = 0; + pLeftmost = p; + while( p ){ + assert( (p->selFlags & SF_Expanded)!=0 ); + assert( (p->selFlags & SF_Resolved)==0 ); + p->selFlags |= SF_Resolved; + + /* Resolve the expressions in the LIMIT and OFFSET clauses. These + ** are not allowed to refer to any names, so pass an empty NameContext. + */ + memset(&sNC, 0, sizeof(sNC)); + sNC.pParse = pParse; + if( sqlite3ResolveExprNames(&sNC, p->pLimit) || + sqlite3ResolveExprNames(&sNC, p->pOffset) ){ + return WRC_Abort; + } + + /* If the SF_Converted flags is set, then this Select object was + ** was created by the convertCompoundSelectToSubquery() function. + ** In this case the ORDER BY clause (p->pOrderBy) should be resolved + ** as if it were part of the sub-query, not the parent. This block + ** moves the pOrderBy down to the sub-query. It will be moved back + ** after the names have been resolved. */ + if( p->selFlags & SF_Converted ){ + Select *pSub = p->pSrc->a[0].pSelect; + assert( p->pSrc->nSrc==1 && p->pOrderBy ); + assert( pSub->pPrior && pSub->pOrderBy==0 ); + pSub->pOrderBy = p->pOrderBy; + p->pOrderBy = 0; + } + + /* Recursively resolve names in all subqueries + */ + for(i=0; ipSrc->nSrc; i++){ + struct SrcList_item *pItem = &p->pSrc->a[i]; + if( pItem->pSelect ){ + NameContext *pNC; /* Used to iterate name contexts */ + int nRef = 0; /* Refcount for pOuterNC and outer contexts */ + const char *zSavedContext = pParse->zAuthContext; + + /* Count the total number of references to pOuterNC and all of its + ** parent contexts. After resolving references to expressions in + ** pItem->pSelect, check if this value has changed. If so, then + ** SELECT statement pItem->pSelect must be correlated. Set the + ** pItem->fg.isCorrelated flag if this is the case. */ + for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef += pNC->nRef; + + if( pItem->zName ) pParse->zAuthContext = pItem->zName; + sqlite3ResolveSelectNames(pParse, pItem->pSelect, pOuterNC); + pParse->zAuthContext = zSavedContext; + if( pParse->nErr || db->mallocFailed ) return WRC_Abort; + + for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef -= pNC->nRef; + assert( pItem->fg.isCorrelated==0 && nRef<=0 ); + pItem->fg.isCorrelated = (nRef!=0); + } + } + + /* Set up the local name-context to pass to sqlite3ResolveExprNames() to + ** resolve the result-set expression list. + */ + sNC.ncFlags = NC_AllowAgg; + sNC.pSrcList = p->pSrc; + sNC.pNext = pOuterNC; + + /* Resolve names in the result set. */ + if( sqlite3ResolveExprListNames(&sNC, p->pEList) ) return WRC_Abort; + + /* If there are no aggregate functions in the result-set, and no GROUP BY + ** expression, do not allow aggregates in any of the other expressions. + */ + assert( (p->selFlags & SF_Aggregate)==0 ); + pGroupBy = p->pGroupBy; + if( pGroupBy || (sNC.ncFlags & NC_HasAgg)!=0 ){ + assert( NC_MinMaxAgg==SF_MinMaxAgg ); + p->selFlags |= SF_Aggregate | (sNC.ncFlags&NC_MinMaxAgg); + }else{ + sNC.ncFlags &= ~NC_AllowAgg; + } + + /* If a HAVING clause is present, then there must be a GROUP BY clause. + */ + if( p->pHaving && !pGroupBy ){ + sqlite3ErrorMsg(pParse, "a GROUP BY clause is required before HAVING"); + return WRC_Abort; + } + + /* Add the output column list to the name-context before parsing the + ** other expressions in the SELECT statement. This is so that + ** expressions in the WHERE clause (etc.) can refer to expressions by + ** aliases in the result set. + ** + ** Minor point: If this is the case, then the expression will be + ** re-evaluated for each reference to it. + */ + sNC.pEList = p->pEList; + if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort; + if( sqlite3ResolveExprNames(&sNC, p->pWhere) ) return WRC_Abort; + + /* Resolve names in table-valued-function arguments */ + for(i=0; ipSrc->nSrc; i++){ + struct SrcList_item *pItem = &p->pSrc->a[i]; + if( pItem->fg.isTabFunc + && sqlite3ResolveExprListNames(&sNC, pItem->u1.pFuncArg) + ){ + return WRC_Abort; + } + } + + /* The ORDER BY and GROUP BY clauses may not refer to terms in + ** outer queries + */ + sNC.pNext = 0; + sNC.ncFlags |= NC_AllowAgg; + + /* If this is a converted compound query, move the ORDER BY clause from + ** the sub-query back to the parent query. At this point each term + ** within the ORDER BY clause has been transformed to an integer value. + ** These integers will be replaced by copies of the corresponding result + ** set expressions by the call to resolveOrderGroupBy() below. */ + if( p->selFlags & SF_Converted ){ + Select *pSub = p->pSrc->a[0].pSelect; + p->pOrderBy = pSub->pOrderBy; + pSub->pOrderBy = 0; + } + + /* Process the ORDER BY clause for singleton SELECT statements. + ** The ORDER BY clause for compounds SELECT statements is handled + ** below, after all of the result-sets for all of the elements of + ** the compound have been resolved. + ** + ** If there is an ORDER BY clause on a term of a compound-select other + ** than the right-most term, then that is a syntax error. But the error + ** is not detected until much later, and so we need to go ahead and + ** resolve those symbols on the incorrect ORDER BY for consistency. + */ + if( isCompound<=nCompound /* Defer right-most ORDER BY of a compound */ + && resolveOrderGroupBy(&sNC, p, p->pOrderBy, "ORDER") + ){ + return WRC_Abort; + } + if( db->mallocFailed ){ + return WRC_Abort; + } + + /* Resolve the GROUP BY clause. At the same time, make sure + ** the GROUP BY clause does not contain aggregate functions. + */ + if( pGroupBy ){ + struct ExprList_item *pItem; + + if( resolveOrderGroupBy(&sNC, p, pGroupBy, "GROUP") || db->mallocFailed ){ + return WRC_Abort; + } + for(i=0, pItem=pGroupBy->a; inExpr; i++, pItem++){ + if( ExprHasProperty(pItem->pExpr, EP_Agg) ){ + sqlite3ErrorMsg(pParse, "aggregate functions are not allowed in " + "the GROUP BY clause"); + return WRC_Abort; + } + } + } + + /* If this is part of a compound SELECT, check that it has the right + ** number of expressions in the select list. */ + if( p->pNext && p->pEList->nExpr!=p->pNext->pEList->nExpr ){ + sqlite3SelectWrongNumTermsError(pParse, p->pNext); + return WRC_Abort; + } + + /* Advance to the next term of the compound + */ + p = p->pPrior; + nCompound++; + } + + /* Resolve the ORDER BY on a compound SELECT after all terms of + ** the compound have been resolved. + */ + if( isCompound && resolveCompoundOrderBy(pParse, pLeftmost) ){ + return WRC_Abort; + } + + return WRC_Prune; +} + +/* +** This routine walks an expression tree and resolves references to +** table columns and result-set columns. At the same time, do error +** checking on function usage and set a flag if any aggregate functions +** are seen. +** +** To resolve table columns references we look for nodes (or subtrees) of the +** form X.Y.Z or Y.Z or just Z where +** +** X: The name of a database. Ex: "main" or "temp" or +** the symbolic name assigned to an ATTACH-ed database. +** +** Y: The name of a table in a FROM clause. Or in a trigger +** one of the special names "old" or "new". +** +** Z: The name of a column in table Y. +** +** The node at the root of the subtree is modified as follows: +** +** Expr.op Changed to TK_COLUMN +** Expr.pTab Points to the Table object for X.Y +** Expr.iColumn The column index in X.Y. -1 for the rowid. +** Expr.iTable The VDBE cursor number for X.Y +** +** +** To resolve result-set references, look for expression nodes of the +** form Z (with no X and Y prefix) where the Z matches the right-hand +** size of an AS clause in the result-set of a SELECT. The Z expression +** is replaced by a copy of the left-hand side of the result-set expression. +** Table-name and function resolution occurs on the substituted expression +** tree. For example, in: +** +** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY x; +** +** The "x" term of the order by is replaced by "a+b" to render: +** +** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY a+b; +** +** Function calls are checked to make sure that the function is +** defined and that the correct number of arguments are specified. +** If the function is an aggregate function, then the NC_HasAgg flag is +** set and the opcode is changed from TK_FUNCTION to TK_AGG_FUNCTION. +** If an expression contains aggregate functions then the EP_Agg +** property on the expression is set. +** +** An error message is left in pParse if anything is amiss. The number +** if errors is returned. +*/ +SQLITE_PRIVATE int sqlite3ResolveExprNames( + NameContext *pNC, /* Namespace to resolve expressions in. */ + Expr *pExpr /* The expression to be analyzed. */ +){ + u16 savedHasAgg; + Walker w; + + if( pExpr==0 ) return 0; +#if SQLITE_MAX_EXPR_DEPTH>0 + { + Parse *pParse = pNC->pParse; + if( sqlite3ExprCheckHeight(pParse, pExpr->nHeight+pNC->pParse->nHeight) ){ + return 1; + } + pParse->nHeight += pExpr->nHeight; + } +#endif + savedHasAgg = pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg); + pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg); + w.pParse = pNC->pParse; + w.xExprCallback = resolveExprStep; + w.xSelectCallback = resolveSelectStep; + w.xSelectCallback2 = 0; + w.walkerDepth = 0; + w.eCode = 0; + w.u.pNC = pNC; + sqlite3WalkExpr(&w, pExpr); +#if SQLITE_MAX_EXPR_DEPTH>0 + pNC->pParse->nHeight -= pExpr->nHeight; +#endif + if( pNC->nErr>0 || w.pParse->nErr>0 ){ + ExprSetProperty(pExpr, EP_Error); + } + if( pNC->ncFlags & NC_HasAgg ){ + ExprSetProperty(pExpr, EP_Agg); + } + pNC->ncFlags |= savedHasAgg; + return ExprHasProperty(pExpr, EP_Error); +} + +/* +** Resolve all names for all expression in an expression list. This is +** just like sqlite3ResolveExprNames() except that it works for an expression +** list rather than a single expression. +*/ +SQLITE_PRIVATE int sqlite3ResolveExprListNames( + NameContext *pNC, /* Namespace to resolve expressions in. */ + ExprList *pList /* The expression list to be analyzed. */ +){ + int i; + if( pList ){ + for(i=0; inExpr; i++){ + if( sqlite3ResolveExprNames(pNC, pList->a[i].pExpr) ) return WRC_Abort; + } + } + return WRC_Continue; +} + +/* +** Resolve all names in all expressions of a SELECT and in all +** decendents of the SELECT, including compounds off of p->pPrior, +** subqueries in expressions, and subqueries used as FROM clause +** terms. +** +** See sqlite3ResolveExprNames() for a description of the kinds of +** transformations that occur. +** +** All SELECT statements should have been expanded using +** sqlite3SelectExpand() prior to invoking this routine. +*/ +SQLITE_PRIVATE void sqlite3ResolveSelectNames( + Parse *pParse, /* The parser context */ + Select *p, /* The SELECT statement being coded. */ + NameContext *pOuterNC /* Name context for parent SELECT statement */ +){ + Walker w; + + assert( p!=0 ); + memset(&w, 0, sizeof(w)); + w.xExprCallback = resolveExprStep; + w.xSelectCallback = resolveSelectStep; + w.pParse = pParse; + w.u.pNC = pOuterNC; + sqlite3WalkSelect(&w, p); +} + +/* +** Resolve names in expressions that can only reference a single table: +** +** * CHECK constraints +** * WHERE clauses on partial indices +** +** The Expr.iTable value for Expr.op==TK_COLUMN nodes of the expression +** is set to -1 and the Expr.iColumn value is set to the column number. +** +** Any errors cause an error message to be set in pParse. +*/ +SQLITE_PRIVATE void sqlite3ResolveSelfReference( + Parse *pParse, /* Parsing context */ + Table *pTab, /* The table being referenced */ + int type, /* NC_IsCheck or NC_PartIdx or NC_IdxExpr */ + Expr *pExpr, /* Expression to resolve. May be NULL. */ + ExprList *pList /* Expression list to resolve. May be NUL. */ +){ + SrcList sSrc; /* Fake SrcList for pParse->pNewTable */ + NameContext sNC; /* Name context for pParse->pNewTable */ + + assert( type==NC_IsCheck || type==NC_PartIdx || type==NC_IdxExpr ); + memset(&sNC, 0, sizeof(sNC)); + memset(&sSrc, 0, sizeof(sSrc)); + sSrc.nSrc = 1; + sSrc.a[0].zName = pTab->zName; + sSrc.a[0].pTab = pTab; + sSrc.a[0].iCursor = -1; + sNC.pParse = pParse; + sNC.pSrcList = &sSrc; + sNC.ncFlags = type; + if( sqlite3ResolveExprNames(&sNC, pExpr) ) return; + if( pList ) sqlite3ResolveExprListNames(&sNC, pList); +} + +/************** End of resolve.c *********************************************/ +/************** Begin file expr.c ********************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains routines used for analyzing expressions and +** for generating VDBE code that evaluates expressions in SQLite. +*/ +/* #include "sqliteInt.h" */ + +/* Forward declarations */ +static void exprCodeBetween(Parse*,Expr*,int,void(*)(Parse*,Expr*,int,int),int); +static int exprCodeVector(Parse *pParse, Expr *p, int *piToFree); + +/* +** Return the affinity character for a single column of a table. +*/ +SQLITE_PRIVATE char sqlite3TableColumnAffinity(Table *pTab, int iCol){ + assert( iColnCol ); + return iCol>=0 ? pTab->aCol[iCol].affinity : SQLITE_AFF_INTEGER; +} + +/* +** Return the 'affinity' of the expression pExpr if any. +** +** If pExpr is a column, a reference to a column via an 'AS' alias, +** or a sub-select with a column as the return value, then the +** affinity of that column is returned. Otherwise, 0x00 is returned, +** indicating no affinity for the expression. +** +** i.e. the WHERE clause expressions in the following statements all +** have an affinity: +** +** CREATE TABLE t1(a); +** SELECT * FROM t1 WHERE a; +** SELECT a AS b FROM t1 WHERE b; +** SELECT * FROM t1 WHERE (select a from t1); +*/ +SQLITE_PRIVATE char sqlite3ExprAffinity(Expr *pExpr){ + int op; + pExpr = sqlite3ExprSkipCollate(pExpr); + if( pExpr->flags & EP_Generic ) return 0; + op = pExpr->op; + if( op==TK_SELECT ){ + assert( pExpr->flags&EP_xIsSelect ); + return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr); + } + if( op==TK_REGISTER ) op = pExpr->op2; +#ifndef SQLITE_OMIT_CAST + if( op==TK_CAST ){ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + return sqlite3AffinityType(pExpr->u.zToken, 0); + } +#endif + if( op==TK_AGG_COLUMN || op==TK_COLUMN ){ + return sqlite3TableColumnAffinity(pExpr->pTab, pExpr->iColumn); + } + if( op==TK_SELECT_COLUMN ){ + assert( pExpr->pLeft->flags&EP_xIsSelect ); + return sqlite3ExprAffinity( + pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr + ); + } + return pExpr->affinity; +} + +/* +** Set the collating sequence for expression pExpr to be the collating +** sequence named by pToken. Return a pointer to a new Expr node that +** implements the COLLATE operator. +** +** If a memory allocation error occurs, that fact is recorded in pParse->db +** and the pExpr parameter is returned unchanged. +*/ +SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken( + Parse *pParse, /* Parsing context */ + Expr *pExpr, /* Add the "COLLATE" clause to this expression */ + const Token *pCollName, /* Name of collating sequence */ + int dequote /* True to dequote pCollName */ +){ + if( pCollName->n>0 ){ + Expr *pNew = sqlite3ExprAlloc(pParse->db, TK_COLLATE, pCollName, dequote); + if( pNew ){ + pNew->pLeft = pExpr; + pNew->flags |= EP_Collate|EP_Skip; + pExpr = pNew; + } + } + return pExpr; +} +SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse *pParse, Expr *pExpr, const char *zC){ + Token s; + assert( zC!=0 ); + sqlite3TokenInit(&s, (char*)zC); + return sqlite3ExprAddCollateToken(pParse, pExpr, &s, 0); +} + +/* +** Skip over any TK_COLLATE operators and any unlikely() +** or likelihood() function at the root of an expression. +*/ +SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr *pExpr){ + while( pExpr && ExprHasProperty(pExpr, EP_Skip) ){ + if( ExprHasProperty(pExpr, EP_Unlikely) ){ + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + assert( pExpr->x.pList->nExpr>0 ); + assert( pExpr->op==TK_FUNCTION ); + pExpr = pExpr->x.pList->a[0].pExpr; + }else{ + assert( pExpr->op==TK_COLLATE ); + pExpr = pExpr->pLeft; + } + } + return pExpr; +} + +/* +** Return the collation sequence for the expression pExpr. If +** there is no defined collating sequence, return NULL. +** +** The collating sequence might be determined by a COLLATE operator +** or by the presence of a column with a defined collating sequence. +** COLLATE operators take first precedence. Left operands take +** precedence over right operands. +*/ +SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr){ + sqlite3 *db = pParse->db; + CollSeq *pColl = 0; + Expr *p = pExpr; + while( p ){ + int op = p->op; + if( p->flags & EP_Generic ) break; + if( op==TK_CAST || op==TK_UPLUS ){ + p = p->pLeft; + continue; + } + if( op==TK_COLLATE || (op==TK_REGISTER && p->op2==TK_COLLATE) ){ + pColl = sqlite3GetCollSeq(pParse, ENC(db), 0, p->u.zToken); + break; + } + if( (op==TK_AGG_COLUMN || op==TK_COLUMN + || op==TK_REGISTER || op==TK_TRIGGER) + && p->pTab!=0 + ){ + /* op==TK_REGISTER && p->pTab!=0 happens when pExpr was originally + ** a TK_COLUMN but was previously evaluated and cached in a register */ + int j = p->iColumn; + if( j>=0 ){ + const char *zColl = p->pTab->aCol[j].zColl; + pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); + } + break; + } + if( p->flags & EP_Collate ){ + if( p->pLeft && (p->pLeft->flags & EP_Collate)!=0 ){ + p = p->pLeft; + }else{ + Expr *pNext = p->pRight; + /* The Expr.x union is never used at the same time as Expr.pRight */ + assert( p->x.pList==0 || p->pRight==0 ); + /* p->flags holds EP_Collate and p->pLeft->flags does not. And + ** p->x.pSelect cannot. So if p->x.pLeft exists, it must hold at + ** least one EP_Collate. Thus the following two ALWAYS. */ + if( p->x.pList!=0 && ALWAYS(!ExprHasProperty(p, EP_xIsSelect)) ){ + int i; + for(i=0; ALWAYS(ix.pList->nExpr); i++){ + if( ExprHasProperty(p->x.pList->a[i].pExpr, EP_Collate) ){ + pNext = p->x.pList->a[i].pExpr; + break; + } + } + } + p = pNext; + } + }else{ + break; + } + } + if( sqlite3CheckCollSeq(pParse, pColl) ){ + pColl = 0; + } + return pColl; +} + +/* +** pExpr is an operand of a comparison operator. aff2 is the +** type affinity of the other operand. This routine returns the +** type affinity that should be used for the comparison operator. +*/ +SQLITE_PRIVATE char sqlite3CompareAffinity(Expr *pExpr, char aff2){ + char aff1 = sqlite3ExprAffinity(pExpr); + if( aff1 && aff2 ){ + /* Both sides of the comparison are columns. If one has numeric + ** affinity, use that. Otherwise use no affinity. + */ + if( sqlite3IsNumericAffinity(aff1) || sqlite3IsNumericAffinity(aff2) ){ + return SQLITE_AFF_NUMERIC; + }else{ + return SQLITE_AFF_BLOB; + } + }else if( !aff1 && !aff2 ){ + /* Neither side of the comparison is a column. Compare the + ** results directly. + */ + return SQLITE_AFF_BLOB; + }else{ + /* One side is a column, the other is not. Use the columns affinity. */ + assert( aff1==0 || aff2==0 ); + return (aff1 + aff2); + } +} + +/* +** pExpr is a comparison operator. Return the type affinity that should +** be applied to both operands prior to doing the comparison. +*/ +static char comparisonAffinity(Expr *pExpr){ + char aff; + assert( pExpr->op==TK_EQ || pExpr->op==TK_IN || pExpr->op==TK_LT || + pExpr->op==TK_GT || pExpr->op==TK_GE || pExpr->op==TK_LE || + pExpr->op==TK_NE || pExpr->op==TK_IS || pExpr->op==TK_ISNOT ); + assert( pExpr->pLeft ); + aff = sqlite3ExprAffinity(pExpr->pLeft); + if( pExpr->pRight ){ + aff = sqlite3CompareAffinity(pExpr->pRight, aff); + }else if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + aff = sqlite3CompareAffinity(pExpr->x.pSelect->pEList->a[0].pExpr, aff); + }else if( aff==0 ){ + aff = SQLITE_AFF_BLOB; + } + return aff; +} + +/* +** pExpr is a comparison expression, eg. '=', '<', IN(...) etc. +** idx_affinity is the affinity of an indexed column. Return true +** if the index with affinity idx_affinity may be used to implement +** the comparison in pExpr. +*/ +SQLITE_PRIVATE int sqlite3IndexAffinityOk(Expr *pExpr, char idx_affinity){ + char aff = comparisonAffinity(pExpr); + switch( aff ){ + case SQLITE_AFF_BLOB: + return 1; + case SQLITE_AFF_TEXT: + return idx_affinity==SQLITE_AFF_TEXT; + default: + return sqlite3IsNumericAffinity(idx_affinity); + } +} + +/* +** Return the P5 value that should be used for a binary comparison +** opcode (OP_Eq, OP_Ge etc.) used to compare pExpr1 and pExpr2. +*/ +static u8 binaryCompareP5(Expr *pExpr1, Expr *pExpr2, int jumpIfNull){ + u8 aff = (char)sqlite3ExprAffinity(pExpr2); + aff = (u8)sqlite3CompareAffinity(pExpr1, aff) | (u8)jumpIfNull; + return aff; +} + +/* +** Return a pointer to the collation sequence that should be used by +** a binary comparison operator comparing pLeft and pRight. +** +** If the left hand expression has a collating sequence type, then it is +** used. Otherwise the collation sequence for the right hand expression +** is used, or the default (BINARY) if neither expression has a collating +** type. +** +** Argument pRight (but not pLeft) may be a null pointer. In this case, +** it is not considered. +*/ +SQLITE_PRIVATE CollSeq *sqlite3BinaryCompareCollSeq( + Parse *pParse, + Expr *pLeft, + Expr *pRight +){ + CollSeq *pColl; + assert( pLeft ); + if( pLeft->flags & EP_Collate ){ + pColl = sqlite3ExprCollSeq(pParse, pLeft); + }else if( pRight && (pRight->flags & EP_Collate)!=0 ){ + pColl = sqlite3ExprCollSeq(pParse, pRight); + }else{ + pColl = sqlite3ExprCollSeq(pParse, pLeft); + if( !pColl ){ + pColl = sqlite3ExprCollSeq(pParse, pRight); + } + } + return pColl; +} + +/* +** Generate code for a comparison operator. +*/ +static int codeCompare( + Parse *pParse, /* The parsing (and code generating) context */ + Expr *pLeft, /* The left operand */ + Expr *pRight, /* The right operand */ + int opcode, /* The comparison opcode */ + int in1, int in2, /* Register holding operands */ + int dest, /* Jump here if true. */ + int jumpIfNull /* If true, jump if either operand is NULL */ +){ + int p5; + int addr; + CollSeq *p4; + + p4 = sqlite3BinaryCompareCollSeq(pParse, pLeft, pRight); + p5 = binaryCompareP5(pLeft, pRight, jumpIfNull); + addr = sqlite3VdbeAddOp4(pParse->pVdbe, opcode, in2, dest, in1, + (void*)p4, P4_COLLSEQ); + sqlite3VdbeChangeP5(pParse->pVdbe, (u8)p5); + return addr; +} + +/* +** Return true if expression pExpr is a vector, or false otherwise. +** +** A vector is defined as any expression that results in two or more +** columns of result. Every TK_VECTOR node is an vector because the +** parser will not generate a TK_VECTOR with fewer than two entries. +** But a TK_SELECT might be either a vector or a scalar. It is only +** considered a vector if it has two or more result columns. +*/ +SQLITE_PRIVATE int sqlite3ExprIsVector(Expr *pExpr){ + return sqlite3ExprVectorSize(pExpr)>1; +} + +/* +** If the expression passed as the only argument is of type TK_VECTOR +** return the number of expressions in the vector. Or, if the expression +** is a sub-select, return the number of columns in the sub-select. For +** any other type of expression, return 1. +*/ +SQLITE_PRIVATE int sqlite3ExprVectorSize(Expr *pExpr){ + u8 op = pExpr->op; + if( op==TK_REGISTER ) op = pExpr->op2; + if( op==TK_VECTOR ){ + return pExpr->x.pList->nExpr; + }else if( op==TK_SELECT ){ + return pExpr->x.pSelect->pEList->nExpr; + }else{ + return 1; + } +} + +#ifndef SQLITE_OMIT_SUBQUERY +/* +** Return a pointer to a subexpression of pVector that is the i-th +** column of the vector (numbered starting with 0). The caller must +** ensure that i is within range. +** +** If pVector is really a scalar (and "scalar" here includes subqueries +** that return a single column!) then return pVector unmodified. +** +** pVector retains ownership of the returned subexpression. +** +** If the vector is a (SELECT ...) then the expression returned is +** just the expression for the i-th term of the result set, and may +** not be ready for evaluation because the table cursor has not yet +** been positioned. +*/ +SQLITE_PRIVATE Expr *sqlite3VectorFieldSubexpr(Expr *pVector, int i){ + assert( iop2==0 || pVector->op==TK_REGISTER ); + if( pVector->op==TK_SELECT || pVector->op2==TK_SELECT ){ + return pVector->x.pSelect->pEList->a[i].pExpr; + }else{ + return pVector->x.pList->a[i].pExpr; + } + } + return pVector; +} +#endif /* !defined(SQLITE_OMIT_SUBQUERY) */ + +#ifndef SQLITE_OMIT_SUBQUERY +/* +** Compute and return a new Expr object which when passed to +** sqlite3ExprCode() will generate all necessary code to compute +** the iField-th column of the vector expression pVector. +** +** It is ok for pVector to be a scalar (as long as iField==0). +** In that case, this routine works like sqlite3ExprDup(). +** +** The caller owns the returned Expr object and is responsible for +** ensuring that the returned value eventually gets freed. +** +** The caller retains ownership of pVector. If pVector is a TK_SELECT, +** then the returned object will reference pVector and so pVector must remain +** valid for the life of the returned object. If pVector is a TK_VECTOR +** or a scalar expression, then it can be deleted as soon as this routine +** returns. +** +** A trick to cause a TK_SELECT pVector to be deleted together with +** the returned Expr object is to attach the pVector to the pRight field +** of the returned TK_SELECT_COLUMN Expr object. +*/ +SQLITE_PRIVATE Expr *sqlite3ExprForVectorField( + Parse *pParse, /* Parsing context */ + Expr *pVector, /* The vector. List of expressions or a sub-SELECT */ + int iField /* Which column of the vector to return */ +){ + Expr *pRet; + if( pVector->op==TK_SELECT ){ + assert( pVector->flags & EP_xIsSelect ); + /* The TK_SELECT_COLUMN Expr node: + ** + ** pLeft: pVector containing TK_SELECT. Not deleted. + ** pRight: not used. But recursively deleted. + ** iColumn: Index of a column in pVector + ** iTable: 0 or the number of columns on the LHS of an assignment + ** pLeft->iTable: First in an array of register holding result, or 0 + ** if the result is not yet computed. + ** + ** sqlite3ExprDelete() specifically skips the recursive delete of + ** pLeft on TK_SELECT_COLUMN nodes. But pRight is followed, so pVector + ** can be attached to pRight to cause this node to take ownership of + ** pVector. Typically there will be multiple TK_SELECT_COLUMN nodes + ** with the same pLeft pointer to the pVector, but only one of them + ** will own the pVector. + */ + pRet = sqlite3PExpr(pParse, TK_SELECT_COLUMN, 0, 0); + if( pRet ){ + pRet->iColumn = iField; + pRet->pLeft = pVector; + } + assert( pRet==0 || pRet->iTable==0 ); + }else{ + if( pVector->op==TK_VECTOR ) pVector = pVector->x.pList->a[iField].pExpr; + pRet = sqlite3ExprDup(pParse->db, pVector, 0); + } + return pRet; +} +#endif /* !define(SQLITE_OMIT_SUBQUERY) */ + +/* +** If expression pExpr is of type TK_SELECT, generate code to evaluate +** it. Return the register in which the result is stored (or, if the +** sub-select returns more than one column, the first in an array +** of registers in which the result is stored). +** +** If pExpr is not a TK_SELECT expression, return 0. +*/ +static int exprCodeSubselect(Parse *pParse, Expr *pExpr){ + int reg = 0; +#ifndef SQLITE_OMIT_SUBQUERY + if( pExpr->op==TK_SELECT ){ + reg = sqlite3CodeSubselect(pParse, pExpr, 0, 0); + } +#endif + return reg; +} + +/* +** Argument pVector points to a vector expression - either a TK_VECTOR +** or TK_SELECT that returns more than one column. This function returns +** the register number of a register that contains the value of +** element iField of the vector. +** +** If pVector is a TK_SELECT expression, then code for it must have +** already been generated using the exprCodeSubselect() routine. In this +** case parameter regSelect should be the first in an array of registers +** containing the results of the sub-select. +** +** If pVector is of type TK_VECTOR, then code for the requested field +** is generated. In this case (*pRegFree) may be set to the number of +** a temporary register to be freed by the caller before returning. +** +** Before returning, output parameter (*ppExpr) is set to point to the +** Expr object corresponding to element iElem of the vector. +*/ +static int exprVectorRegister( + Parse *pParse, /* Parse context */ + Expr *pVector, /* Vector to extract element from */ + int iField, /* Field to extract from pVector */ + int regSelect, /* First in array of registers */ + Expr **ppExpr, /* OUT: Expression element */ + int *pRegFree /* OUT: Temp register to free */ +){ + u8 op = pVector->op; + assert( op==TK_VECTOR || op==TK_REGISTER || op==TK_SELECT ); + if( op==TK_REGISTER ){ + *ppExpr = sqlite3VectorFieldSubexpr(pVector, iField); + return pVector->iTable+iField; + } + if( op==TK_SELECT ){ + *ppExpr = pVector->x.pSelect->pEList->a[iField].pExpr; + return regSelect+iField; + } + *ppExpr = pVector->x.pList->a[iField].pExpr; + return sqlite3ExprCodeTemp(pParse, *ppExpr, pRegFree); +} + +/* +** Expression pExpr is a comparison between two vector values. Compute +** the result of the comparison (1, 0, or NULL) and write that +** result into register dest. +** +** The caller must satisfy the following preconditions: +** +** if pExpr->op==TK_IS: op==TK_EQ and p5==SQLITE_NULLEQ +** if pExpr->op==TK_ISNOT: op==TK_NE and p5==SQLITE_NULLEQ +** otherwise: op==pExpr->op and p5==0 +*/ +static void codeVectorCompare( + Parse *pParse, /* Code generator context */ + Expr *pExpr, /* The comparison operation */ + int dest, /* Write results into this register */ + u8 op, /* Comparison operator */ + u8 p5 /* SQLITE_NULLEQ or zero */ +){ + Vdbe *v = pParse->pVdbe; + Expr *pLeft = pExpr->pLeft; + Expr *pRight = pExpr->pRight; + int nLeft = sqlite3ExprVectorSize(pLeft); + int i; + int regLeft = 0; + int regRight = 0; + u8 opx = op; + int addrDone = sqlite3VdbeMakeLabel(v); + + if( nLeft!=sqlite3ExprVectorSize(pRight) ){ + sqlite3ErrorMsg(pParse, "row value misused"); + return; + } + assert( pExpr->op==TK_EQ || pExpr->op==TK_NE + || pExpr->op==TK_IS || pExpr->op==TK_ISNOT + || pExpr->op==TK_LT || pExpr->op==TK_GT + || pExpr->op==TK_LE || pExpr->op==TK_GE + ); + assert( pExpr->op==op || (pExpr->op==TK_IS && op==TK_EQ) + || (pExpr->op==TK_ISNOT && op==TK_NE) ); + assert( p5==0 || pExpr->op!=op ); + assert( p5==SQLITE_NULLEQ || pExpr->op==op ); + + p5 |= SQLITE_STOREP2; + if( opx==TK_LE ) opx = TK_LT; + if( opx==TK_GE ) opx = TK_GT; + + regLeft = exprCodeSubselect(pParse, pLeft); + regRight = exprCodeSubselect(pParse, pRight); + + for(i=0; 1 /*Loop exits by "break"*/; i++){ + int regFree1 = 0, regFree2 = 0; + Expr *pL, *pR; + int r1, r2; + assert( i>=0 && i0 ) sqlite3ExprCachePush(pParse); + r1 = exprVectorRegister(pParse, pLeft, i, regLeft, &pL, ®Free1); + r2 = exprVectorRegister(pParse, pRight, i, regRight, &pR, ®Free2); + codeCompare(pParse, pL, pR, opx, r1, r2, dest, p5); + testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); + testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le); + testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt); + testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge); + testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq); + testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne); + sqlite3ReleaseTempReg(pParse, regFree1); + sqlite3ReleaseTempReg(pParse, regFree2); + if( i>0 ) sqlite3ExprCachePop(pParse); + if( i==nLeft-1 ){ + break; + } + if( opx==TK_EQ ){ + sqlite3VdbeAddOp2(v, OP_IfNot, dest, addrDone); VdbeCoverage(v); + p5 |= SQLITE_KEEPNULL; + }else if( opx==TK_NE ){ + sqlite3VdbeAddOp2(v, OP_If, dest, addrDone); VdbeCoverage(v); + p5 |= SQLITE_KEEPNULL; + }else{ + assert( op==TK_LT || op==TK_GT || op==TK_LE || op==TK_GE ); + sqlite3VdbeAddOp2(v, OP_ElseNotEq, 0, addrDone); + VdbeCoverageIf(v, op==TK_LT); + VdbeCoverageIf(v, op==TK_GT); + VdbeCoverageIf(v, op==TK_LE); + VdbeCoverageIf(v, op==TK_GE); + if( i==nLeft-2 ) opx = op; + } + } + sqlite3VdbeResolveLabel(v, addrDone); +} + +#if SQLITE_MAX_EXPR_DEPTH>0 +/* +** Check that argument nHeight is less than or equal to the maximum +** expression depth allowed. If it is not, leave an error message in +** pParse. +*/ +SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse *pParse, int nHeight){ + int rc = SQLITE_OK; + int mxHeight = pParse->db->aLimit[SQLITE_LIMIT_EXPR_DEPTH]; + if( nHeight>mxHeight ){ + sqlite3ErrorMsg(pParse, + "Expression tree is too large (maximum depth %d)", mxHeight + ); + rc = SQLITE_ERROR; + } + return rc; +} + +/* The following three functions, heightOfExpr(), heightOfExprList() +** and heightOfSelect(), are used to determine the maximum height +** of any expression tree referenced by the structure passed as the +** first argument. +** +** If this maximum height is greater than the current value pointed +** to by pnHeight, the second parameter, then set *pnHeight to that +** value. +*/ +static void heightOfExpr(Expr *p, int *pnHeight){ + if( p ){ + if( p->nHeight>*pnHeight ){ + *pnHeight = p->nHeight; + } + } +} +static void heightOfExprList(ExprList *p, int *pnHeight){ + if( p ){ + int i; + for(i=0; inExpr; i++){ + heightOfExpr(p->a[i].pExpr, pnHeight); + } + } +} +static void heightOfSelect(Select *p, int *pnHeight){ + if( p ){ + heightOfExpr(p->pWhere, pnHeight); + heightOfExpr(p->pHaving, pnHeight); + heightOfExpr(p->pLimit, pnHeight); + heightOfExpr(p->pOffset, pnHeight); + heightOfExprList(p->pEList, pnHeight); + heightOfExprList(p->pGroupBy, pnHeight); + heightOfExprList(p->pOrderBy, pnHeight); + heightOfSelect(p->pPrior, pnHeight); + } +} + +/* +** Set the Expr.nHeight variable in the structure passed as an +** argument. An expression with no children, Expr.pList or +** Expr.pSelect member has a height of 1. Any other expression +** has a height equal to the maximum height of any other +** referenced Expr plus one. +** +** Also propagate EP_Propagate flags up from Expr.x.pList to Expr.flags, +** if appropriate. +*/ +static void exprSetHeight(Expr *p){ + int nHeight = 0; + heightOfExpr(p->pLeft, &nHeight); + heightOfExpr(p->pRight, &nHeight); + if( ExprHasProperty(p, EP_xIsSelect) ){ + heightOfSelect(p->x.pSelect, &nHeight); + }else if( p->x.pList ){ + heightOfExprList(p->x.pList, &nHeight); + p->flags |= EP_Propagate & sqlite3ExprListFlags(p->x.pList); + } + p->nHeight = nHeight + 1; +} + +/* +** Set the Expr.nHeight variable using the exprSetHeight() function. If +** the height is greater than the maximum allowed expression depth, +** leave an error in pParse. +** +** Also propagate all EP_Propagate flags from the Expr.x.pList into +** Expr.flags. +*/ +SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){ + if( pParse->nErr ) return; + exprSetHeight(p); + sqlite3ExprCheckHeight(pParse, p->nHeight); +} + +/* +** Return the maximum height of any expression tree referenced +** by the select statement passed as an argument. +*/ +SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *p){ + int nHeight = 0; + heightOfSelect(p, &nHeight); + return nHeight; +} +#else /* ABOVE: Height enforcement enabled. BELOW: Height enforcement off */ +/* +** Propagate all EP_Propagate flags from the Expr.x.pList into +** Expr.flags. +*/ +SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){ + if( p && p->x.pList && !ExprHasProperty(p, EP_xIsSelect) ){ + p->flags |= EP_Propagate & sqlite3ExprListFlags(p->x.pList); + } +} +#define exprSetHeight(y) +#endif /* SQLITE_MAX_EXPR_DEPTH>0 */ + +/* +** This routine is the core allocator for Expr nodes. +** +** Construct a new expression node and return a pointer to it. Memory +** for this node and for the pToken argument is a single allocation +** obtained from sqlite3DbMalloc(). The calling function +** is responsible for making sure the node eventually gets freed. +** +** If dequote is true, then the token (if it exists) is dequoted. +** If dequote is false, no dequoting is performed. The deQuote +** parameter is ignored if pToken is NULL or if the token does not +** appear to be quoted. If the quotes were of the form "..." (double-quotes) +** then the EP_DblQuoted flag is set on the expression node. +** +** Special case: If op==TK_INTEGER and pToken points to a string that +** can be translated into a 32-bit integer, then the token is not +** stored in u.zToken. Instead, the integer values is written +** into u.iValue and the EP_IntValue flag is set. No extra storage +** is allocated to hold the integer text and the dequote flag is ignored. +*/ +SQLITE_PRIVATE Expr *sqlite3ExprAlloc( + sqlite3 *db, /* Handle for sqlite3DbMallocRawNN() */ + int op, /* Expression opcode */ + const Token *pToken, /* Token argument. Might be NULL */ + int dequote /* True to dequote */ +){ + Expr *pNew; + int nExtra = 0; + int iValue = 0; + + assert( db!=0 ); + if( pToken ){ + if( op!=TK_INTEGER || pToken->z==0 + || sqlite3GetInt32(pToken->z, &iValue)==0 ){ + nExtra = pToken->n+1; + assert( iValue>=0 ); + } + } + pNew = sqlite3DbMallocRawNN(db, sizeof(Expr)+nExtra); + if( pNew ){ + memset(pNew, 0, sizeof(Expr)); + pNew->op = (u8)op; + pNew->iAgg = -1; + if( pToken ){ + if( nExtra==0 ){ + pNew->flags |= EP_IntValue; + pNew->u.iValue = iValue; + }else{ + pNew->u.zToken = (char*)&pNew[1]; + assert( pToken->z!=0 || pToken->n==0 ); + if( pToken->n ) memcpy(pNew->u.zToken, pToken->z, pToken->n); + pNew->u.zToken[pToken->n] = 0; + if( dequote && sqlite3Isquote(pNew->u.zToken[0]) ){ + if( pNew->u.zToken[0]=='"' ) pNew->flags |= EP_DblQuoted; + sqlite3Dequote(pNew->u.zToken); + } + } + } +#if SQLITE_MAX_EXPR_DEPTH>0 + pNew->nHeight = 1; +#endif + } + return pNew; +} + +/* +** Allocate a new expression node from a zero-terminated token that has +** already been dequoted. +*/ +SQLITE_PRIVATE Expr *sqlite3Expr( + sqlite3 *db, /* Handle for sqlite3DbMallocZero() (may be null) */ + int op, /* Expression opcode */ + const char *zToken /* Token argument. Might be NULL */ +){ + Token x; + x.z = zToken; + x.n = zToken ? sqlite3Strlen30(zToken) : 0; + return sqlite3ExprAlloc(db, op, &x, 0); +} + +/* +** Attach subtrees pLeft and pRight to the Expr node pRoot. +** +** If pRoot==NULL that means that a memory allocation error has occurred. +** In that case, delete the subtrees pLeft and pRight. +*/ +SQLITE_PRIVATE void sqlite3ExprAttachSubtrees( + sqlite3 *db, + Expr *pRoot, + Expr *pLeft, + Expr *pRight +){ + if( pRoot==0 ){ + assert( db->mallocFailed ); + sqlite3ExprDelete(db, pLeft); + sqlite3ExprDelete(db, pRight); + }else{ + if( pRight ){ + pRoot->pRight = pRight; + pRoot->flags |= EP_Propagate & pRight->flags; + } + if( pLeft ){ + pRoot->pLeft = pLeft; + pRoot->flags |= EP_Propagate & pLeft->flags; + } + exprSetHeight(pRoot); + } +} + +/* +** Allocate an Expr node which joins as many as two subtrees. +** +** One or both of the subtrees can be NULL. Return a pointer to the new +** Expr node. Or, if an OOM error occurs, set pParse->db->mallocFailed, +** free the subtrees and return NULL. +*/ +SQLITE_PRIVATE Expr *sqlite3PExpr( + Parse *pParse, /* Parsing context */ + int op, /* Expression opcode */ + Expr *pLeft, /* Left operand */ + Expr *pRight /* Right operand */ +){ + Expr *p; + if( op==TK_AND && pParse->nErr==0 ){ + /* Take advantage of short-circuit false optimization for AND */ + p = sqlite3ExprAnd(pParse->db, pLeft, pRight); + }else{ + p = sqlite3DbMallocRawNN(pParse->db, sizeof(Expr)); + if( p ){ + memset(p, 0, sizeof(Expr)); + p->op = op & TKFLG_MASK; + p->iAgg = -1; + } + sqlite3ExprAttachSubtrees(pParse->db, p, pLeft, pRight); + } + if( p ) { + sqlite3ExprCheckHeight(pParse, p->nHeight); + } + return p; +} + +/* +** Add pSelect to the Expr.x.pSelect field. Or, if pExpr is NULL (due +** do a memory allocation failure) then delete the pSelect object. +*/ +SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse *pParse, Expr *pExpr, Select *pSelect){ + if( pExpr ){ + pExpr->x.pSelect = pSelect; + ExprSetProperty(pExpr, EP_xIsSelect|EP_Subquery); + sqlite3ExprSetHeightAndFlags(pParse, pExpr); + }else{ + assert( pParse->db->mallocFailed ); + sqlite3SelectDelete(pParse->db, pSelect); + } +} + + +/* +** If the expression is always either TRUE or FALSE (respectively), +** then return 1. If one cannot determine the truth value of the +** expression at compile-time return 0. +** +** This is an optimization. If is OK to return 0 here even if +** the expression really is always false or false (a false negative). +** But it is a bug to return 1 if the expression might have different +** boolean values in different circumstances (a false positive.) +** +** Note that if the expression is part of conditional for a +** LEFT JOIN, then we cannot determine at compile-time whether or not +** is it true or false, so always return 0. +*/ +static int exprAlwaysTrue(Expr *p){ + int v = 0; + if( ExprHasProperty(p, EP_FromJoin) ) return 0; + if( !sqlite3ExprIsInteger(p, &v) ) return 0; + return v!=0; +} +static int exprAlwaysFalse(Expr *p){ + int v = 0; + if( ExprHasProperty(p, EP_FromJoin) ) return 0; + if( !sqlite3ExprIsInteger(p, &v) ) return 0; + return v==0; +} + +/* +** Join two expressions using an AND operator. If either expression is +** NULL, then just return the other expression. +** +** If one side or the other of the AND is known to be false, then instead +** of returning an AND expression, just return a constant expression with +** a value of false. +*/ +SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3 *db, Expr *pLeft, Expr *pRight){ + if( pLeft==0 ){ + return pRight; + }else if( pRight==0 ){ + return pLeft; + }else if( exprAlwaysFalse(pLeft) || exprAlwaysFalse(pRight) ){ + sqlite3ExprDelete(db, pLeft); + sqlite3ExprDelete(db, pRight); + return sqlite3ExprAlloc(db, TK_INTEGER, &sqlite3IntTokens[0], 0); + }else{ + Expr *pNew = sqlite3ExprAlloc(db, TK_AND, 0, 0); + sqlite3ExprAttachSubtrees(db, pNew, pLeft, pRight); + return pNew; + } +} + +/* +** Construct a new expression node for a function with multiple +** arguments. +*/ +SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token *pToken){ + Expr *pNew; + sqlite3 *db = pParse->db; + assert( pToken ); + pNew = sqlite3ExprAlloc(db, TK_FUNCTION, pToken, 1); + if( pNew==0 ){ + sqlite3ExprListDelete(db, pList); /* Avoid memory leak when malloc fails */ + return 0; + } + pNew->x.pList = pList; + assert( !ExprHasProperty(pNew, EP_xIsSelect) ); + sqlite3ExprSetHeightAndFlags(pParse, pNew); + return pNew; +} + +/* +** Assign a variable number to an expression that encodes a wildcard +** in the original SQL statement. +** +** Wildcards consisting of a single "?" are assigned the next sequential +** variable number. +** +** Wildcards of the form "?nnn" are assigned the number "nnn". We make +** sure "nnn" is not too big to avoid a denial of service attack when +** the SQL statement comes from an external source. +** +** Wildcards of the form ":aaa", "@aaa", or "$aaa" are assigned the same number +** as the previous instance of the same wildcard. Or if this is the first +** instance of the wildcard, the next sequential variable number is +** assigned. +*/ +SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n){ + sqlite3 *db = pParse->db; + const char *z; + ynVar x; + + if( pExpr==0 ) return; + assert( !ExprHasProperty(pExpr, EP_IntValue|EP_Reduced|EP_TokenOnly) ); + z = pExpr->u.zToken; + assert( z!=0 ); + assert( z[0]!=0 ); + assert( n==sqlite3Strlen30(z) ); + if( z[1]==0 ){ + /* Wildcard of the form "?". Assign the next variable number */ + assert( z[0]=='?' ); + x = (ynVar)(++pParse->nVar); + }else{ + int doAdd = 0; + if( z[0]=='?' ){ + /* Wildcard of the form "?nnn". Convert "nnn" to an integer and + ** use it as the variable number */ + i64 i; + int bOk; + if( n==2 ){ /*OPTIMIZATION-IF-TRUE*/ + i = z[1]-'0'; /* The common case of ?N for a single digit N */ + bOk = 1; + }else{ + bOk = 0==sqlite3Atoi64(&z[1], &i, n-1, SQLITE_UTF8); + } + testcase( i==0 ); + testcase( i==1 ); + testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]-1 ); + testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ); + if( bOk==0 || i<1 || i>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){ + sqlite3ErrorMsg(pParse, "variable number must be between ?1 and ?%d", + db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]); + return; + } + x = (ynVar)i; + if( x>pParse->nVar ){ + pParse->nVar = (int)x; + doAdd = 1; + }else if( sqlite3VListNumToName(pParse->pVList, x)==0 ){ + doAdd = 1; + } + }else{ + /* Wildcards like ":aaa", "$aaa" or "@aaa". Reuse the same variable + ** number as the prior appearance of the same name, or if the name + ** has never appeared before, reuse the same variable number + */ + x = (ynVar)sqlite3VListNameToNum(pParse->pVList, z, n); + if( x==0 ){ + x = (ynVar)(++pParse->nVar); + doAdd = 1; + } + } + if( doAdd ){ + pParse->pVList = sqlite3VListAdd(db, pParse->pVList, z, n, x); + } + } + pExpr->iColumn = x; + if( x>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){ + sqlite3ErrorMsg(pParse, "too many SQL variables"); + } +} + +/* +** Recursively delete an expression tree. +*/ +static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ + assert( p!=0 ); + /* Sanity check: Assert that the IntValue is non-negative if it exists */ + assert( !ExprHasProperty(p, EP_IntValue) || p->u.iValue>=0 ); +#ifdef SQLITE_DEBUG + if( ExprHasProperty(p, EP_Leaf) && !ExprHasProperty(p, EP_TokenOnly) ){ + assert( p->pLeft==0 ); + assert( p->pRight==0 ); + assert( p->x.pSelect==0 ); + } +#endif + if( !ExprHasProperty(p, (EP_TokenOnly|EP_Leaf)) ){ + /* The Expr.x union is never used at the same time as Expr.pRight */ + assert( p->x.pList==0 || p->pRight==0 ); + if( p->pLeft && p->op!=TK_SELECT_COLUMN ) sqlite3ExprDeleteNN(db, p->pLeft); + sqlite3ExprDelete(db, p->pRight); + if( ExprHasProperty(p, EP_xIsSelect) ){ + sqlite3SelectDelete(db, p->x.pSelect); + }else{ + sqlite3ExprListDelete(db, p->x.pList); + } + } + if( ExprHasProperty(p, EP_MemToken) ) sqlite3DbFree(db, p->u.zToken); + if( !ExprHasProperty(p, EP_Static) ){ + sqlite3DbFree(db, p); + } +} +SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){ + if( p ) sqlite3ExprDeleteNN(db, p); +} + +/* +** Return the number of bytes allocated for the expression structure +** passed as the first argument. This is always one of EXPR_FULLSIZE, +** EXPR_REDUCEDSIZE or EXPR_TOKENONLYSIZE. +*/ +static int exprStructSize(Expr *p){ + if( ExprHasProperty(p, EP_TokenOnly) ) return EXPR_TOKENONLYSIZE; + if( ExprHasProperty(p, EP_Reduced) ) return EXPR_REDUCEDSIZE; + return EXPR_FULLSIZE; +} + +/* +** The dupedExpr*Size() routines each return the number of bytes required +** to store a copy of an expression or expression tree. They differ in +** how much of the tree is measured. +** +** dupedExprStructSize() Size of only the Expr structure +** dupedExprNodeSize() Size of Expr + space for token +** dupedExprSize() Expr + token + subtree components +** +*************************************************************************** +** +** The dupedExprStructSize() function returns two values OR-ed together: +** (1) the space required for a copy of the Expr structure only and +** (2) the EP_xxx flags that indicate what the structure size should be. +** The return values is always one of: +** +** EXPR_FULLSIZE +** EXPR_REDUCEDSIZE | EP_Reduced +** EXPR_TOKENONLYSIZE | EP_TokenOnly +** +** The size of the structure can be found by masking the return value +** of this routine with 0xfff. The flags can be found by masking the +** return value with EP_Reduced|EP_TokenOnly. +** +** Note that with flags==EXPRDUP_REDUCE, this routines works on full-size +** (unreduced) Expr objects as they or originally constructed by the parser. +** During expression analysis, extra information is computed and moved into +** later parts of teh Expr object and that extra information might get chopped +** off if the expression is reduced. Note also that it does not work to +** make an EXPRDUP_REDUCE copy of a reduced expression. It is only legal +** to reduce a pristine expression tree from the parser. The implementation +** of dupedExprStructSize() contain multiple assert() statements that attempt +** to enforce this constraint. +*/ +static int dupedExprStructSize(Expr *p, int flags){ + int nSize; + assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */ + assert( EXPR_FULLSIZE<=0xfff ); + assert( (0xfff & (EP_Reduced|EP_TokenOnly))==0 ); + if( 0==flags || p->op==TK_SELECT_COLUMN ){ + nSize = EXPR_FULLSIZE; + }else{ + assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); + assert( !ExprHasProperty(p, EP_FromJoin) ); + assert( !ExprHasProperty(p, EP_MemToken) ); + assert( !ExprHasProperty(p, EP_NoReduce) ); + if( p->pLeft || p->x.pList ){ + nSize = EXPR_REDUCEDSIZE | EP_Reduced; + }else{ + assert( p->pRight==0 ); + nSize = EXPR_TOKENONLYSIZE | EP_TokenOnly; + } + } + return nSize; +} + +/* +** This function returns the space in bytes required to store the copy +** of the Expr structure and a copy of the Expr.u.zToken string (if that +** string is defined.) +*/ +static int dupedExprNodeSize(Expr *p, int flags){ + int nByte = dupedExprStructSize(p, flags) & 0xfff; + if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ + nByte += sqlite3Strlen30(p->u.zToken)+1; + } + return ROUND8(nByte); +} + +/* +** Return the number of bytes required to create a duplicate of the +** expression passed as the first argument. The second argument is a +** mask containing EXPRDUP_XXX flags. +** +** The value returned includes space to create a copy of the Expr struct +** itself and the buffer referred to by Expr.u.zToken, if any. +** +** If the EXPRDUP_REDUCE flag is set, then the return value includes +** space to duplicate all Expr nodes in the tree formed by Expr.pLeft +** and Expr.pRight variables (but not for any structures pointed to or +** descended from the Expr.x.pList or Expr.x.pSelect variables). +*/ +static int dupedExprSize(Expr *p, int flags){ + int nByte = 0; + if( p ){ + nByte = dupedExprNodeSize(p, flags); + if( flags&EXPRDUP_REDUCE ){ + nByte += dupedExprSize(p->pLeft, flags) + dupedExprSize(p->pRight, flags); + } + } + return nByte; +} + +/* +** This function is similar to sqlite3ExprDup(), except that if pzBuffer +** is not NULL then *pzBuffer is assumed to point to a buffer large enough +** to store the copy of expression p, the copies of p->u.zToken +** (if applicable), and the copies of the p->pLeft and p->pRight expressions, +** if any. Before returning, *pzBuffer is set to the first byte past the +** portion of the buffer copied into by this function. +*/ +static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){ + Expr *pNew; /* Value to return */ + u8 *zAlloc; /* Memory space from which to build Expr object */ + u32 staticFlag; /* EP_Static if space not obtained from malloc */ + + assert( db!=0 ); + assert( p ); + assert( dupFlags==0 || dupFlags==EXPRDUP_REDUCE ); + assert( pzBuffer==0 || dupFlags==EXPRDUP_REDUCE ); + + /* Figure out where to write the new Expr structure. */ + if( pzBuffer ){ + zAlloc = *pzBuffer; + staticFlag = EP_Static; + }else{ + zAlloc = sqlite3DbMallocRawNN(db, dupedExprSize(p, dupFlags)); + staticFlag = 0; + } + pNew = (Expr *)zAlloc; + + if( pNew ){ + /* Set nNewSize to the size allocated for the structure pointed to + ** by pNew. This is either EXPR_FULLSIZE, EXPR_REDUCEDSIZE or + ** EXPR_TOKENONLYSIZE. nToken is set to the number of bytes consumed + ** by the copy of the p->u.zToken string (if any). + */ + const unsigned nStructSize = dupedExprStructSize(p, dupFlags); + const int nNewSize = nStructSize & 0xfff; + int nToken; + if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ + nToken = sqlite3Strlen30(p->u.zToken) + 1; + }else{ + nToken = 0; + } + if( dupFlags ){ + assert( ExprHasProperty(p, EP_Reduced)==0 ); + memcpy(zAlloc, p, nNewSize); + }else{ + u32 nSize = (u32)exprStructSize(p); + memcpy(zAlloc, p, nSize); + if( nSizeflags &= ~(EP_Reduced|EP_TokenOnly|EP_Static|EP_MemToken); + pNew->flags |= nStructSize & (EP_Reduced|EP_TokenOnly); + pNew->flags |= staticFlag; + + /* Copy the p->u.zToken string, if any. */ + if( nToken ){ + char *zToken = pNew->u.zToken = (char*)&zAlloc[nNewSize]; + memcpy(zToken, p->u.zToken, nToken); + } + + if( 0==((p->flags|pNew->flags) & (EP_TokenOnly|EP_Leaf)) ){ + /* Fill in the pNew->x.pSelect or pNew->x.pList member. */ + if( ExprHasProperty(p, EP_xIsSelect) ){ + pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, dupFlags); + }else{ + pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, dupFlags); + } + } + + /* Fill in pNew->pLeft and pNew->pRight. */ + if( ExprHasProperty(pNew, EP_Reduced|EP_TokenOnly) ){ + zAlloc += dupedExprNodeSize(p, dupFlags); + if( !ExprHasProperty(pNew, EP_TokenOnly|EP_Leaf) ){ + pNew->pLeft = p->pLeft ? + exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc) : 0; + pNew->pRight = p->pRight ? + exprDup(db, p->pRight, EXPRDUP_REDUCE, &zAlloc) : 0; + } + if( pzBuffer ){ + *pzBuffer = zAlloc; + } + }else{ + if( !ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){ + if( pNew->op==TK_SELECT_COLUMN ){ + pNew->pLeft = p->pLeft; + assert( p->iColumn==0 || p->pRight==0 ); + assert( p->pRight==0 || p->pRight==p->pLeft ); + }else{ + pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0); + } + pNew->pRight = sqlite3ExprDup(db, p->pRight, 0); + } + } + } + return pNew; +} + +/* +** Create and return a deep copy of the object passed as the second +** argument. If an OOM condition is encountered, NULL is returned +** and the db->mallocFailed flag set. +*/ +#ifndef SQLITE_OMIT_CTE +static With *withDup(sqlite3 *db, With *p){ + With *pRet = 0; + if( p ){ + int nByte = sizeof(*p) + sizeof(p->a[0]) * (p->nCte-1); + pRet = sqlite3DbMallocZero(db, nByte); + if( pRet ){ + int i; + pRet->nCte = p->nCte; + for(i=0; inCte; i++){ + pRet->a[i].pSelect = sqlite3SelectDup(db, p->a[i].pSelect, 0); + pRet->a[i].pCols = sqlite3ExprListDup(db, p->a[i].pCols, 0); + pRet->a[i].zName = sqlite3DbStrDup(db, p->a[i].zName); + } + } + } + return pRet; +} +#else +# define withDup(x,y) 0 +#endif + +/* +** The following group of routines make deep copies of expressions, +** expression lists, ID lists, and select statements. The copies can +** be deleted (by being passed to their respective ...Delete() routines) +** without effecting the originals. +** +** The expression list, ID, and source lists return by sqlite3ExprListDup(), +** sqlite3IdListDup(), and sqlite3SrcListDup() can not be further expanded +** by subsequent calls to sqlite*ListAppend() routines. +** +** Any tables that the SrcList might point to are not duplicated. +** +** The flags parameter contains a combination of the EXPRDUP_XXX flags. +** If the EXPRDUP_REDUCE flag is set, then the structure returned is a +** truncated version of the usual Expr structure that will be stored as +** part of the in-memory representation of the database schema. +*/ +SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3 *db, Expr *p, int flags){ + assert( flags==0 || flags==EXPRDUP_REDUCE ); + return p ? exprDup(db, p, flags, 0) : 0; +} +SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags){ + ExprList *pNew; + struct ExprList_item *pItem, *pOldItem; + int i; + Expr *pPriorSelectCol = 0; + assert( db!=0 ); + if( p==0 ) return 0; + pNew = sqlite3DbMallocRawNN(db, sizeof(*pNew) ); + if( pNew==0 ) return 0; + pNew->nExpr = i = p->nExpr; + if( (flags & EXPRDUP_REDUCE)==0 ) for(i=1; inExpr; i+=i){} + pNew->a = pItem = sqlite3DbMallocRawNN(db, i*sizeof(p->a[0]) ); + if( pItem==0 ){ + sqlite3DbFree(db, pNew); + return 0; + } + pOldItem = p->a; + for(i=0; inExpr; i++, pItem++, pOldItem++){ + Expr *pOldExpr = pOldItem->pExpr; + Expr *pNewExpr; + pItem->pExpr = sqlite3ExprDup(db, pOldExpr, flags); + if( pOldExpr + && pOldExpr->op==TK_SELECT_COLUMN + && (pNewExpr = pItem->pExpr)!=0 + ){ + assert( pNewExpr->iColumn==0 || i>0 ); + if( pNewExpr->iColumn==0 ){ + assert( pOldExpr->pLeft==pOldExpr->pRight ); + pPriorSelectCol = pNewExpr->pLeft = pNewExpr->pRight; + }else{ + assert( i>0 ); + assert( pItem[-1].pExpr!=0 ); + assert( pNewExpr->iColumn==pItem[-1].pExpr->iColumn+1 ); + assert( pPriorSelectCol==pItem[-1].pExpr->pLeft ); + pNewExpr->pLeft = pPriorSelectCol; + } + } + pItem->zName = sqlite3DbStrDup(db, pOldItem->zName); + pItem->zSpan = sqlite3DbStrDup(db, pOldItem->zSpan); + pItem->sortOrder = pOldItem->sortOrder; + pItem->done = 0; + pItem->bSpanIsTab = pOldItem->bSpanIsTab; + pItem->u = pOldItem->u; + } + return pNew; +} + +/* +** If cursors, triggers, views and subqueries are all omitted from +** the build, then none of the following routines, except for +** sqlite3SelectDup(), can be called. sqlite3SelectDup() is sometimes +** called with a NULL argument. +*/ +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) \ + || !defined(SQLITE_OMIT_SUBQUERY) +SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){ + SrcList *pNew; + int i; + int nByte; + assert( db!=0 ); + if( p==0 ) return 0; + nByte = sizeof(*p) + (p->nSrc>0 ? sizeof(p->a[0]) * (p->nSrc-1) : 0); + pNew = sqlite3DbMallocRawNN(db, nByte ); + if( pNew==0 ) return 0; + pNew->nSrc = pNew->nAlloc = p->nSrc; + for(i=0; inSrc; i++){ + struct SrcList_item *pNewItem = &pNew->a[i]; + struct SrcList_item *pOldItem = &p->a[i]; + Table *pTab; + pNewItem->pSchema = pOldItem->pSchema; + pNewItem->zDatabase = sqlite3DbStrDup(db, pOldItem->zDatabase); + pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); + pNewItem->zAlias = sqlite3DbStrDup(db, pOldItem->zAlias); + pNewItem->fg = pOldItem->fg; + pNewItem->iCursor = pOldItem->iCursor; + pNewItem->addrFillSub = pOldItem->addrFillSub; + pNewItem->regReturn = pOldItem->regReturn; + if( pNewItem->fg.isIndexedBy ){ + pNewItem->u1.zIndexedBy = sqlite3DbStrDup(db, pOldItem->u1.zIndexedBy); + } + pNewItem->pIBIndex = pOldItem->pIBIndex; + if( pNewItem->fg.isTabFunc ){ + pNewItem->u1.pFuncArg = + sqlite3ExprListDup(db, pOldItem->u1.pFuncArg, flags); + } + pTab = pNewItem->pTab = pOldItem->pTab; + if( pTab ){ + pTab->nTabRef++; + } + pNewItem->pSelect = sqlite3SelectDup(db, pOldItem->pSelect, flags); + pNewItem->pOn = sqlite3ExprDup(db, pOldItem->pOn, flags); + pNewItem->pUsing = sqlite3IdListDup(db, pOldItem->pUsing); + pNewItem->colUsed = pOldItem->colUsed; + } + return pNew; +} +SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, IdList *p){ + IdList *pNew; + int i; + assert( db!=0 ); + if( p==0 ) return 0; + pNew = sqlite3DbMallocRawNN(db, sizeof(*pNew) ); + if( pNew==0 ) return 0; + pNew->nId = p->nId; + pNew->a = sqlite3DbMallocRawNN(db, p->nId*sizeof(p->a[0]) ); + if( pNew->a==0 ){ + sqlite3DbFree(db, pNew); + return 0; + } + /* Note that because the size of the allocation for p->a[] is not + ** necessarily a power of two, sqlite3IdListAppend() may not be called + ** on the duplicate created by this function. */ + for(i=0; inId; i++){ + struct IdList_item *pNewItem = &pNew->a[i]; + struct IdList_item *pOldItem = &p->a[i]; + pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); + pNewItem->idx = pOldItem->idx; + } + return pNew; +} +SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *pDup, int flags){ + Select *pRet = 0; + Select *pNext = 0; + Select **pp = &pRet; + Select *p; + + assert( db!=0 ); + for(p=pDup; p; p=p->pPrior){ + Select *pNew = sqlite3DbMallocRawNN(db, sizeof(*p) ); + if( pNew==0 ) break; + pNew->pEList = sqlite3ExprListDup(db, p->pEList, flags); + pNew->pSrc = sqlite3SrcListDup(db, p->pSrc, flags); + pNew->pWhere = sqlite3ExprDup(db, p->pWhere, flags); + pNew->pGroupBy = sqlite3ExprListDup(db, p->pGroupBy, flags); + pNew->pHaving = sqlite3ExprDup(db, p->pHaving, flags); + pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy, flags); + pNew->op = p->op; + pNew->pNext = pNext; + pNew->pPrior = 0; + pNew->pLimit = sqlite3ExprDup(db, p->pLimit, flags); + pNew->pOffset = sqlite3ExprDup(db, p->pOffset, flags); + pNew->iLimit = 0; + pNew->iOffset = 0; + pNew->selFlags = p->selFlags & ~SF_UsesEphemeral; + pNew->addrOpenEphm[0] = -1; + pNew->addrOpenEphm[1] = -1; + pNew->nSelectRow = p->nSelectRow; + pNew->pWith = withDup(db, p->pWith); + sqlite3SelectSetName(pNew, p->zSelName); + *pp = pNew; + pp = &pNew->pPrior; + pNext = pNew; + } + + return pRet; +} +#else +SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){ + assert( p==0 ); + return 0; +} +#endif + + +/* +** Add a new element to the end of an expression list. If pList is +** initially NULL, then create a new expression list. +** +** If a memory allocation error occurs, the entire list is freed and +** NULL is returned. If non-NULL is returned, then it is guaranteed +** that the new entry was successfully appended. +*/ +SQLITE_PRIVATE ExprList *sqlite3ExprListAppend( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* List to which to append. Might be NULL */ + Expr *pExpr /* Expression to be appended. Might be NULL */ +){ + sqlite3 *db = pParse->db; + assert( db!=0 ); + if( pList==0 ){ + pList = sqlite3DbMallocRawNN(db, sizeof(ExprList) ); + if( pList==0 ){ + goto no_mem; + } + pList->nExpr = 0; + pList->a = sqlite3DbMallocRawNN(db, sizeof(pList->a[0])); + if( pList->a==0 ) goto no_mem; + }else if( (pList->nExpr & (pList->nExpr-1))==0 ){ + struct ExprList_item *a; + assert( pList->nExpr>0 ); + a = sqlite3DbRealloc(db, pList->a, pList->nExpr*2*sizeof(pList->a[0])); + if( a==0 ){ + goto no_mem; + } + pList->a = a; + } + assert( pList->a!=0 ); + if( 1 ){ + struct ExprList_item *pItem = &pList->a[pList->nExpr++]; + memset(pItem, 0, sizeof(*pItem)); + pItem->pExpr = pExpr; + } + return pList; + +no_mem: + /* Avoid leaking memory if malloc has failed. */ + sqlite3ExprDelete(db, pExpr); + sqlite3ExprListDelete(db, pList); + return 0; +} + +/* +** pColumns and pExpr form a vector assignment which is part of the SET +** clause of an UPDATE statement. Like this: +** +** (a,b,c) = (expr1,expr2,expr3) +** Or: (a,b,c) = (SELECT x,y,z FROM ....) +** +** For each term of the vector assignment, append new entries to the +** expression list pList. In the case of a subquery on the RHS, append +** TK_SELECT_COLUMN expressions. +*/ +SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* List to which to append. Might be NULL */ + IdList *pColumns, /* List of names of LHS of the assignment */ + Expr *pExpr /* Vector expression to be appended. Might be NULL */ +){ + sqlite3 *db = pParse->db; + int n; + int i; + int iFirst = pList ? pList->nExpr : 0; + /* pColumns can only be NULL due to an OOM but an OOM will cause an + ** exit prior to this routine being invoked */ + if( NEVER(pColumns==0) ) goto vector_append_error; + if( pExpr==0 ) goto vector_append_error; + + /* If the RHS is a vector, then we can immediately check to see that + ** the size of the RHS and LHS match. But if the RHS is a SELECT, + ** wildcards ("*") in the result set of the SELECT must be expanded before + ** we can do the size check, so defer the size check until code generation. + */ + if( pExpr->op!=TK_SELECT && pColumns->nId!=(n=sqlite3ExprVectorSize(pExpr)) ){ + sqlite3ErrorMsg(pParse, "%d columns assigned %d values", + pColumns->nId, n); + goto vector_append_error; + } + + for(i=0; inId; i++){ + Expr *pSubExpr = sqlite3ExprForVectorField(pParse, pExpr, i); + pList = sqlite3ExprListAppend(pParse, pList, pSubExpr); + if( pList ){ + assert( pList->nExpr==iFirst+i+1 ); + pList->a[pList->nExpr-1].zName = pColumns->a[i].zName; + pColumns->a[i].zName = 0; + } + } + + if( pExpr->op==TK_SELECT ){ + if( pList && pList->a[iFirst].pExpr ){ + Expr *pFirst = pList->a[iFirst].pExpr; + assert( pFirst->op==TK_SELECT_COLUMN ); + + /* Store the SELECT statement in pRight so it will be deleted when + ** sqlite3ExprListDelete() is called */ + pFirst->pRight = pExpr; + pExpr = 0; + + /* Remember the size of the LHS in iTable so that we can check that + ** the RHS and LHS sizes match during code generation. */ + pFirst->iTable = pColumns->nId; + } + } + +vector_append_error: + sqlite3ExprDelete(db, pExpr); + sqlite3IdListDelete(db, pColumns); + return pList; +} + +/* +** Set the sort order for the last element on the given ExprList. +*/ +SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList *p, int iSortOrder){ + if( p==0 ) return; + assert( SQLITE_SO_UNDEFINED<0 && SQLITE_SO_ASC>=0 && SQLITE_SO_DESC>0 ); + assert( p->nExpr>0 ); + if( iSortOrder<0 ){ + assert( p->a[p->nExpr-1].sortOrder==SQLITE_SO_ASC ); + return; + } + p->a[p->nExpr-1].sortOrder = (u8)iSortOrder; +} + +/* +** Set the ExprList.a[].zName element of the most recently added item +** on the expression list. +** +** pList might be NULL following an OOM error. But pName should never be +** NULL. If a memory allocation fails, the pParse->db->mallocFailed flag +** is set. +*/ +SQLITE_PRIVATE void sqlite3ExprListSetName( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* List to which to add the span. */ + Token *pName, /* Name to be added */ + int dequote /* True to cause the name to be dequoted */ +){ + assert( pList!=0 || pParse->db->mallocFailed!=0 ); + if( pList ){ + struct ExprList_item *pItem; + assert( pList->nExpr>0 ); + pItem = &pList->a[pList->nExpr-1]; + assert( pItem->zName==0 ); + pItem->zName = sqlite3DbStrNDup(pParse->db, pName->z, pName->n); + if( dequote ) sqlite3Dequote(pItem->zName); + } +} + +/* +** Set the ExprList.a[].zSpan element of the most recently added item +** on the expression list. +** +** pList might be NULL following an OOM error. But pSpan should never be +** NULL. If a memory allocation fails, the pParse->db->mallocFailed flag +** is set. +*/ +SQLITE_PRIVATE void sqlite3ExprListSetSpan( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* List to which to add the span. */ + ExprSpan *pSpan /* The span to be added */ +){ + sqlite3 *db = pParse->db; + assert( pList!=0 || db->mallocFailed!=0 ); + if( pList ){ + struct ExprList_item *pItem = &pList->a[pList->nExpr-1]; + assert( pList->nExpr>0 ); + assert( db->mallocFailed || pItem->pExpr==pSpan->pExpr ); + sqlite3DbFree(db, pItem->zSpan); + pItem->zSpan = sqlite3DbStrNDup(db, (char*)pSpan->zStart, + (int)(pSpan->zEnd - pSpan->zStart)); + } +} + +/* +** If the expression list pEList contains more than iLimit elements, +** leave an error message in pParse. +*/ +SQLITE_PRIVATE void sqlite3ExprListCheckLength( + Parse *pParse, + ExprList *pEList, + const char *zObject +){ + int mx = pParse->db->aLimit[SQLITE_LIMIT_COLUMN]; + testcase( pEList && pEList->nExpr==mx ); + testcase( pEList && pEList->nExpr==mx+1 ); + if( pEList && pEList->nExpr>mx ){ + sqlite3ErrorMsg(pParse, "too many columns in %s", zObject); + } +} + +/* +** Delete an entire expression list. +*/ +static SQLITE_NOINLINE void exprListDeleteNN(sqlite3 *db, ExprList *pList){ + int i; + struct ExprList_item *pItem; + assert( pList->a!=0 || pList->nExpr==0 ); + for(pItem=pList->a, i=0; inExpr; i++, pItem++){ + sqlite3ExprDelete(db, pItem->pExpr); + sqlite3DbFree(db, pItem->zName); + sqlite3DbFree(db, pItem->zSpan); + } + sqlite3DbFree(db, pList->a); + sqlite3DbFree(db, pList); +} +SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){ + if( pList ) exprListDeleteNN(db, pList); +} + +/* +** Return the bitwise-OR of all Expr.flags fields in the given +** ExprList. +*/ +SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList *pList){ + int i; + u32 m = 0; + if( pList ){ + for(i=0; inExpr; i++){ + Expr *pExpr = pList->a[i].pExpr; + assert( pExpr!=0 ); + m |= pExpr->flags; + } + } + return m; +} + +/* +** These routines are Walker callbacks used to check expressions to +** see if they are "constant" for some definition of constant. The +** Walker.eCode value determines the type of "constant" we are looking +** for. +** +** These callback routines are used to implement the following: +** +** sqlite3ExprIsConstant() pWalker->eCode==1 +** sqlite3ExprIsConstantNotJoin() pWalker->eCode==2 +** sqlite3ExprIsTableConstant() pWalker->eCode==3 +** sqlite3ExprIsConstantOrFunction() pWalker->eCode==4 or 5 +** +** In all cases, the callbacks set Walker.eCode=0 and abort if the expression +** is found to not be a constant. +** +** The sqlite3ExprIsConstantOrFunction() is used for evaluating expressions +** in a CREATE TABLE statement. The Walker.eCode value is 5 when parsing +** an existing schema and 4 when processing a new statement. A bound +** parameter raises an error for new statements, but is silently converted +** to NULL for existing schemas. This allows sqlite_master tables that +** contain a bound parameter because they were generated by older versions +** of SQLite to be parsed by newer versions of SQLite without raising a +** malformed schema error. +*/ +static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ + + /* If pWalker->eCode is 2 then any term of the expression that comes from + ** the ON or USING clauses of a left join disqualifies the expression + ** from being considered constant. */ + if( pWalker->eCode==2 && ExprHasProperty(pExpr, EP_FromJoin) ){ + pWalker->eCode = 0; + return WRC_Abort; + } + + switch( pExpr->op ){ + /* Consider functions to be constant if all their arguments are constant + ** and either pWalker->eCode==4 or 5 or the function has the + ** SQLITE_FUNC_CONST flag. */ + case TK_FUNCTION: + if( pWalker->eCode>=4 || ExprHasProperty(pExpr,EP_ConstFunc) ){ + return WRC_Continue; + }else{ + pWalker->eCode = 0; + return WRC_Abort; + } + case TK_ID: + case TK_COLUMN: + case TK_AGG_FUNCTION: + case TK_AGG_COLUMN: + testcase( pExpr->op==TK_ID ); + testcase( pExpr->op==TK_COLUMN ); + testcase( pExpr->op==TK_AGG_FUNCTION ); + testcase( pExpr->op==TK_AGG_COLUMN ); + if( pWalker->eCode==3 && pExpr->iTable==pWalker->u.iCur ){ + return WRC_Continue; + }else{ + pWalker->eCode = 0; + return WRC_Abort; + } + case TK_VARIABLE: + if( pWalker->eCode==5 ){ + /* Silently convert bound parameters that appear inside of CREATE + ** statements into a NULL when parsing the CREATE statement text out + ** of the sqlite_master table */ + pExpr->op = TK_NULL; + }else if( pWalker->eCode==4 ){ + /* A bound parameter in a CREATE statement that originates from + ** sqlite3_prepare() causes an error */ + pWalker->eCode = 0; + return WRC_Abort; + } + /* Fall through */ + default: + testcase( pExpr->op==TK_SELECT ); /* selectNodeIsConstant will disallow */ + testcase( pExpr->op==TK_EXISTS ); /* selectNodeIsConstant will disallow */ + return WRC_Continue; + } +} +static int selectNodeIsConstant(Walker *pWalker, Select *NotUsed){ + UNUSED_PARAMETER(NotUsed); + pWalker->eCode = 0; + return WRC_Abort; +} +static int exprIsConst(Expr *p, int initFlag, int iCur){ + Walker w; + memset(&w, 0, sizeof(w)); + w.eCode = initFlag; + w.xExprCallback = exprNodeIsConstant; + w.xSelectCallback = selectNodeIsConstant; + w.u.iCur = iCur; + sqlite3WalkExpr(&w, p); + return w.eCode; +} + +/* +** Walk an expression tree. Return non-zero if the expression is constant +** and 0 if it involves variables or function calls. +** +** For the purposes of this function, a double-quoted string (ex: "abc") +** is considered a variable but a single-quoted string (ex: 'abc') is +** a constant. +*/ +SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr *p){ + return exprIsConst(p, 1, 0); +} + +/* +** Walk an expression tree. Return non-zero if the expression is constant +** that does no originate from the ON or USING clauses of a join. +** Return 0 if it involves variables or function calls or terms from +** an ON or USING clause. +*/ +SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr *p){ + return exprIsConst(p, 2, 0); +} + +/* +** Walk an expression tree. Return non-zero if the expression is constant +** for any single row of the table with cursor iCur. In other words, the +** expression must not refer to any non-deterministic function nor any +** table other than iCur. +*/ +SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ + return exprIsConst(p, 3, iCur); +} + +/* +** Walk an expression tree. Return non-zero if the expression is constant +** or a function call with constant arguments. Return and 0 if there +** are any variables. +** +** For the purposes of this function, a double-quoted string (ex: "abc") +** is considered a variable but a single-quoted string (ex: 'abc') is +** a constant. +*/ +SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr *p, u8 isInit){ + assert( isInit==0 || isInit==1 ); + return exprIsConst(p, 4+isInit, 0); +} + +#ifdef SQLITE_ENABLE_CURSOR_HINTS +/* +** Walk an expression tree. Return 1 if the expression contains a +** subquery of some kind. Return 0 if there are no subqueries. +*/ +SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr *p){ + Walker w; + memset(&w, 0, sizeof(w)); + w.eCode = 1; + w.xExprCallback = sqlite3ExprWalkNoop; + w.xSelectCallback = selectNodeIsConstant; + sqlite3WalkExpr(&w, p); + return w.eCode==0; +} +#endif + +/* +** If the expression p codes a constant integer that is small enough +** to fit in a 32-bit integer, return 1 and put the value of the integer +** in *pValue. If the expression is not an integer or if it is too big +** to fit in a signed 32-bit integer, return 0 and leave *pValue unchanged. +*/ +SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr *p, int *pValue){ + int rc = 0; + + /* If an expression is an integer literal that fits in a signed 32-bit + ** integer, then the EP_IntValue flag will have already been set */ + assert( p->op!=TK_INTEGER || (p->flags & EP_IntValue)!=0 + || sqlite3GetInt32(p->u.zToken, &rc)==0 ); + + if( p->flags & EP_IntValue ){ + *pValue = p->u.iValue; + return 1; + } + switch( p->op ){ + case TK_UPLUS: { + rc = sqlite3ExprIsInteger(p->pLeft, pValue); + break; + } + case TK_UMINUS: { + int v; + if( sqlite3ExprIsInteger(p->pLeft, &v) ){ + assert( v!=(-2147483647-1) ); + *pValue = -v; + rc = 1; + } + break; + } + default: break; + } + return rc; +} + +/* +** Return FALSE if there is no chance that the expression can be NULL. +** +** If the expression might be NULL or if the expression is too complex +** to tell return TRUE. +** +** This routine is used as an optimization, to skip OP_IsNull opcodes +** when we know that a value cannot be NULL. Hence, a false positive +** (returning TRUE when in fact the expression can never be NULL) might +** be a small performance hit but is otherwise harmless. On the other +** hand, a false negative (returning FALSE when the result could be NULL) +** will likely result in an incorrect answer. So when in doubt, return +** TRUE. +*/ +SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){ + u8 op; + while( p->op==TK_UPLUS || p->op==TK_UMINUS ){ p = p->pLeft; } + op = p->op; + if( op==TK_REGISTER ) op = p->op2; + switch( op ){ + case TK_INTEGER: + case TK_STRING: + case TK_FLOAT: + case TK_BLOB: + return 0; + case TK_COLUMN: + assert( p->pTab!=0 ); + return ExprHasProperty(p, EP_CanBeNull) || + (p->iColumn>=0 && p->pTab->aCol[p->iColumn].notNull==0); + default: + return 1; + } +} + +/* +** Return TRUE if the given expression is a constant which would be +** unchanged by OP_Affinity with the affinity given in the second +** argument. +** +** This routine is used to determine if the OP_Affinity operation +** can be omitted. When in doubt return FALSE. A false negative +** is harmless. A false positive, however, can result in the wrong +** answer. +*/ +SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr *p, char aff){ + u8 op; + if( aff==SQLITE_AFF_BLOB ) return 1; + while( p->op==TK_UPLUS || p->op==TK_UMINUS ){ p = p->pLeft; } + op = p->op; + if( op==TK_REGISTER ) op = p->op2; + switch( op ){ + case TK_INTEGER: { + return aff==SQLITE_AFF_INTEGER || aff==SQLITE_AFF_NUMERIC; + } + case TK_FLOAT: { + return aff==SQLITE_AFF_REAL || aff==SQLITE_AFF_NUMERIC; + } + case TK_STRING: { + return aff==SQLITE_AFF_TEXT; + } + case TK_BLOB: { + return 1; + } + case TK_COLUMN: { + assert( p->iTable>=0 ); /* p cannot be part of a CHECK constraint */ + return p->iColumn<0 + && (aff==SQLITE_AFF_INTEGER || aff==SQLITE_AFF_NUMERIC); + } + default: { + return 0; + } + } +} + +/* +** Return TRUE if the given string is a row-id column name. +*/ +SQLITE_PRIVATE int sqlite3IsRowid(const char *z){ + if( sqlite3StrICmp(z, "_ROWID_")==0 ) return 1; + if( sqlite3StrICmp(z, "ROWID")==0 ) return 1; + if( sqlite3StrICmp(z, "OID")==0 ) return 1; + return 0; +} + +/* +** pX is the RHS of an IN operator. If pX is a SELECT statement +** that can be simplified to a direct table access, then return +** a pointer to the SELECT statement. If pX is not a SELECT statement, +** or if the SELECT statement needs to be manifested into a transient +** table, then return NULL. +*/ +#ifndef SQLITE_OMIT_SUBQUERY +static Select *isCandidateForInOpt(Expr *pX){ + Select *p; + SrcList *pSrc; + ExprList *pEList; + Table *pTab; + int i; + if( !ExprHasProperty(pX, EP_xIsSelect) ) return 0; /* Not a subquery */ + if( ExprHasProperty(pX, EP_VarSelect) ) return 0; /* Correlated subq */ + p = pX->x.pSelect; + if( p->pPrior ) return 0; /* Not a compound SELECT */ + if( p->selFlags & (SF_Distinct|SF_Aggregate) ){ + testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ); + testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate ); + return 0; /* No DISTINCT keyword and no aggregate functions */ + } + assert( p->pGroupBy==0 ); /* Has no GROUP BY clause */ + if( p->pLimit ) return 0; /* Has no LIMIT clause */ + assert( p->pOffset==0 ); /* No LIMIT means no OFFSET */ + if( p->pWhere ) return 0; /* Has no WHERE clause */ + pSrc = p->pSrc; + assert( pSrc!=0 ); + if( pSrc->nSrc!=1 ) return 0; /* Single term in FROM clause */ + if( pSrc->a[0].pSelect ) return 0; /* FROM is not a subquery or view */ + pTab = pSrc->a[0].pTab; + assert( pTab!=0 ); + assert( pTab->pSelect==0 ); /* FROM clause is not a view */ + if( IsVirtual(pTab) ) return 0; /* FROM clause not a virtual table */ + pEList = p->pEList; + assert( pEList!=0 ); + /* All SELECT results must be columns. */ + for(i=0; inExpr; i++){ + Expr *pRes = pEList->a[i].pExpr; + if( pRes->op!=TK_COLUMN ) return 0; + assert( pRes->iTable==pSrc->a[0].iCursor ); /* Not a correlated subquery */ + } + return p; +} +#endif /* SQLITE_OMIT_SUBQUERY */ + +#ifndef SQLITE_OMIT_SUBQUERY +/* +** Generate code that checks the left-most column of index table iCur to see if +** it contains any NULL entries. Cause the register at regHasNull to be set +** to a non-NULL value if iCur contains no NULLs. Cause register regHasNull +** to be set to NULL if iCur contains one or more NULL values. +*/ +static void sqlite3SetHasNullFlag(Vdbe *v, int iCur, int regHasNull){ + int addr1; + sqlite3VdbeAddOp2(v, OP_Integer, 0, regHasNull); + addr1 = sqlite3VdbeAddOp1(v, OP_Rewind, iCur); VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_Column, iCur, 0, regHasNull); + sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG); + VdbeComment((v, "first_entry_in(%d)", iCur)); + sqlite3VdbeJumpHere(v, addr1); +} +#endif + + +#ifndef SQLITE_OMIT_SUBQUERY +/* +** The argument is an IN operator with a list (not a subquery) on the +** right-hand side. Return TRUE if that list is constant. +*/ +static int sqlite3InRhsIsConstant(Expr *pIn){ + Expr *pLHS; + int res; + assert( !ExprHasProperty(pIn, EP_xIsSelect) ); + pLHS = pIn->pLeft; + pIn->pLeft = 0; + res = sqlite3ExprIsConstant(pIn); + pIn->pLeft = pLHS; + return res; +} +#endif + +/* +** This function is used by the implementation of the IN (...) operator. +** The pX parameter is the expression on the RHS of the IN operator, which +** might be either a list of expressions or a subquery. +** +** The job of this routine is to find or create a b-tree object that can +** be used either to test for membership in the RHS set or to iterate through +** all members of the RHS set, skipping duplicates. +** +** A cursor is opened on the b-tree object that is the RHS of the IN operator +** and pX->iTable is set to the index of that cursor. +** +** The returned value of this function indicates the b-tree type, as follows: +** +** IN_INDEX_ROWID - The cursor was opened on a database table. +** IN_INDEX_INDEX_ASC - The cursor was opened on an ascending index. +** IN_INDEX_INDEX_DESC - The cursor was opened on a descending index. +** IN_INDEX_EPH - The cursor was opened on a specially created and +** populated epheremal table. +** IN_INDEX_NOOP - No cursor was allocated. The IN operator must be +** implemented as a sequence of comparisons. +** +** An existing b-tree might be used if the RHS expression pX is a simple +** subquery such as: +** +** SELECT , ... FROM +** +** If the RHS of the IN operator is a list or a more complex subquery, then +** an ephemeral table might need to be generated from the RHS and then +** pX->iTable made to point to the ephemeral table instead of an +** existing table. +** +** The inFlags parameter must contain exactly one of the bits +** IN_INDEX_MEMBERSHIP or IN_INDEX_LOOP. If inFlags contains +** IN_INDEX_MEMBERSHIP, then the generated table will be used for a +** fast membership test. When the IN_INDEX_LOOP bit is set, the +** IN index will be used to loop over all values of the RHS of the +** IN operator. +** +** When IN_INDEX_LOOP is used (and the b-tree will be used to iterate +** through the set members) then the b-tree must not contain duplicates. +** An epheremal table must be used unless the selected columns are guaranteed +** to be unique - either because it is an INTEGER PRIMARY KEY or due to +** a UNIQUE constraint or index. +** +** When IN_INDEX_MEMBERSHIP is used (and the b-tree will be used +** for fast set membership tests) then an epheremal table must +** be used unless is a single INTEGER PRIMARY KEY column or an +** index can be found with the specified as its left-most. +** +** If the IN_INDEX_NOOP_OK and IN_INDEX_MEMBERSHIP are both set and +** if the RHS of the IN operator is a list (not a subquery) then this +** routine might decide that creating an ephemeral b-tree for membership +** testing is too expensive and return IN_INDEX_NOOP. In that case, the +** calling routine should implement the IN operator using a sequence +** of Eq or Ne comparison operations. +** +** When the b-tree is being used for membership tests, the calling function +** might need to know whether or not the RHS side of the IN operator +** contains a NULL. If prRhsHasNull is not a NULL pointer and +** if there is any chance that the (...) might contain a NULL value at +** runtime, then a register is allocated and the register number written +** to *prRhsHasNull. If there is no chance that the (...) contains a +** NULL value, then *prRhsHasNull is left unchanged. +** +** If a register is allocated and its location stored in *prRhsHasNull, then +** the value in that register will be NULL if the b-tree contains one or more +** NULL values, and it will be some non-NULL value if the b-tree contains no +** NULL values. +** +** If the aiMap parameter is not NULL, it must point to an array containing +** one element for each column returned by the SELECT statement on the RHS +** of the IN(...) operator. The i'th entry of the array is populated with the +** offset of the index column that matches the i'th column returned by the +** SELECT. For example, if the expression and selected index are: +** +** (?,?,?) IN (SELECT a, b, c FROM t1) +** CREATE INDEX i1 ON t1(b, c, a); +** +** then aiMap[] is populated with {2, 0, 1}. +*/ +#ifndef SQLITE_OMIT_SUBQUERY +SQLITE_PRIVATE int sqlite3FindInIndex( + Parse *pParse, /* Parsing context */ + Expr *pX, /* The right-hand side (RHS) of the IN operator */ + u32 inFlags, /* IN_INDEX_LOOP, _MEMBERSHIP, and/or _NOOP_OK */ + int *prRhsHasNull, /* Register holding NULL status. See notes */ + int *aiMap /* Mapping from Index fields to RHS fields */ +){ + Select *p; /* SELECT to the right of IN operator */ + int eType = 0; /* Type of RHS table. IN_INDEX_* */ + int iTab = pParse->nTab++; /* Cursor of the RHS table */ + int mustBeUnique; /* True if RHS must be unique */ + Vdbe *v = sqlite3GetVdbe(pParse); /* Virtual machine being coded */ + + assert( pX->op==TK_IN ); + mustBeUnique = (inFlags & IN_INDEX_LOOP)!=0; + + /* If the RHS of this IN(...) operator is a SELECT, and if it matters + ** whether or not the SELECT result contains NULL values, check whether + ** or not NULL is actually possible (it may not be, for example, due + ** to NOT NULL constraints in the schema). If no NULL values are possible, + ** set prRhsHasNull to 0 before continuing. */ + if( prRhsHasNull && (pX->flags & EP_xIsSelect) ){ + int i; + ExprList *pEList = pX->x.pSelect->pEList; + for(i=0; inExpr; i++){ + if( sqlite3ExprCanBeNull(pEList->a[i].pExpr) ) break; + } + if( i==pEList->nExpr ){ + prRhsHasNull = 0; + } + } + + /* Check to see if an existing table or index can be used to + ** satisfy the query. This is preferable to generating a new + ** ephemeral table. */ + if( pParse->nErr==0 && (p = isCandidateForInOpt(pX))!=0 ){ + sqlite3 *db = pParse->db; /* Database connection */ + Table *pTab; /* Table
    . */ + i16 iDb; /* Database idx for pTab */ + ExprList *pEList = p->pEList; + int nExpr = pEList->nExpr; + + assert( p->pEList!=0 ); /* Because of isCandidateForInOpt(p) */ + assert( p->pEList->a[0].pExpr!=0 ); /* Because of isCandidateForInOpt(p) */ + assert( p->pSrc!=0 ); /* Because of isCandidateForInOpt(p) */ + pTab = p->pSrc->a[0].pTab; + + /* Code an OP_Transaction and OP_TableLock for
    . */ + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + sqlite3CodeVerifySchema(pParse, iDb); + sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); + + assert(v); /* sqlite3GetVdbe() has always been previously called */ + if( nExpr==1 && pEList->a[0].pExpr->iColumn<0 ){ + /* The "x IN (SELECT rowid FROM table)" case */ + int iAddr = sqlite3VdbeAddOp0(v, OP_Once); + VdbeCoverage(v); + + sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead); + eType = IN_INDEX_ROWID; + + sqlite3VdbeJumpHere(v, iAddr); + }else{ + Index *pIdx; /* Iterator variable */ + int affinity_ok = 1; + int i; + + /* Check that the affinity that will be used to perform each + ** comparison is the same as the affinity of each column in table + ** on the RHS of the IN operator. If it not, it is not possible to + ** use any index of the RHS table. */ + for(i=0; ipLeft, i); + int iCol = pEList->a[i].pExpr->iColumn; + char idxaff = sqlite3TableColumnAffinity(pTab,iCol); /* RHS table */ + char cmpaff = sqlite3CompareAffinity(pLhs, idxaff); + testcase( cmpaff==SQLITE_AFF_BLOB ); + testcase( cmpaff==SQLITE_AFF_TEXT ); + switch( cmpaff ){ + case SQLITE_AFF_BLOB: + break; + case SQLITE_AFF_TEXT: + /* sqlite3CompareAffinity() only returns TEXT if one side or the + ** other has no affinity and the other side is TEXT. Hence, + ** the only way for cmpaff to be TEXT is for idxaff to be TEXT + ** and for the term on the LHS of the IN to have no affinity. */ + assert( idxaff==SQLITE_AFF_TEXT ); + break; + default: + affinity_ok = sqlite3IsNumericAffinity(idxaff); + } + } + + if( affinity_ok ){ + /* Search for an existing index that will work for this IN operator */ + for(pIdx=pTab->pIndex; pIdx && eType==0; pIdx=pIdx->pNext){ + Bitmask colUsed; /* Columns of the index used */ + Bitmask mCol; /* Mask for the current column */ + if( pIdx->nColumnnColumn==BMS-2 ); + testcase( pIdx->nColumn==BMS-1 ); + if( pIdx->nColumn>=BMS-1 ) continue; + if( mustBeUnique ){ + if( pIdx->nKeyCol>nExpr + ||(pIdx->nColumn>nExpr && !IsUniqueIndex(pIdx)) + ){ + continue; /* This index is not unique over the IN RHS columns */ + } + } + + colUsed = 0; /* Columns of index used so far */ + for(i=0; ipLeft, i); + Expr *pRhs = pEList->a[i].pExpr; + CollSeq *pReq = sqlite3BinaryCompareCollSeq(pParse, pLhs, pRhs); + int j; + + assert( pReq!=0 || pRhs->iColumn==XN_ROWID || pParse->nErr ); + for(j=0; jaiColumn[j]!=pRhs->iColumn ) continue; + assert( pIdx->azColl[j] ); + if( pReq!=0 && sqlite3StrICmp(pReq->zName, pIdx->azColl[j])!=0 ){ + continue; + } + break; + } + if( j==nExpr ) break; + mCol = MASKBIT(j); + if( mCol & colUsed ) break; /* Each column used only once */ + colUsed |= mCol; + if( aiMap ) aiMap[i] = j; + } + + assert( i==nExpr || colUsed!=(MASKBIT(nExpr)-1) ); + if( colUsed==(MASKBIT(nExpr)-1) ){ + /* If we reach this point, that means the index pIdx is usable */ + int iAddr = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); +#ifndef SQLITE_OMIT_EXPLAIN + sqlite3VdbeAddOp4(v, OP_Explain, 0, 0, 0, + sqlite3MPrintf(db, "USING INDEX %s FOR IN-OPERATOR",pIdx->zName), + P4_DYNAMIC); +#endif + sqlite3VdbeAddOp3(v, OP_OpenRead, iTab, pIdx->tnum, iDb); + sqlite3VdbeSetP4KeyInfo(pParse, pIdx); + VdbeComment((v, "%s", pIdx->zName)); + assert( IN_INDEX_INDEX_DESC == IN_INDEX_INDEX_ASC+1 ); + eType = IN_INDEX_INDEX_ASC + pIdx->aSortOrder[0]; + + if( prRhsHasNull ){ +#ifdef SQLITE_ENABLE_COLUMN_USED_MASK + i64 mask = (1<nMem; + if( nExpr==1 ){ + sqlite3SetHasNullFlag(v, iTab, *prRhsHasNull); + } + } + sqlite3VdbeJumpHere(v, iAddr); + } + } /* End loop over indexes */ + } /* End if( affinity_ok ) */ + } /* End if not an rowid index */ + } /* End attempt to optimize using an index */ + + /* If no preexisting index is available for the IN clause + ** and IN_INDEX_NOOP is an allowed reply + ** and the RHS of the IN operator is a list, not a subquery + ** and the RHS is not constant or has two or fewer terms, + ** then it is not worth creating an ephemeral table to evaluate + ** the IN operator so return IN_INDEX_NOOP. + */ + if( eType==0 + && (inFlags & IN_INDEX_NOOP_OK) + && !ExprHasProperty(pX, EP_xIsSelect) + && (!sqlite3InRhsIsConstant(pX) || pX->x.pList->nExpr<=2) + ){ + eType = IN_INDEX_NOOP; + } + + if( eType==0 ){ + /* Could not find an existing table or index to use as the RHS b-tree. + ** We will have to generate an ephemeral table to do the job. + */ + u32 savedNQueryLoop = pParse->nQueryLoop; + int rMayHaveNull = 0; + eType = IN_INDEX_EPH; + if( inFlags & IN_INDEX_LOOP ){ + pParse->nQueryLoop = 0; + if( pX->pLeft->iColumn<0 && !ExprHasProperty(pX, EP_xIsSelect) ){ + eType = IN_INDEX_ROWID; + } + }else if( prRhsHasNull ){ + *prRhsHasNull = rMayHaveNull = ++pParse->nMem; + } + sqlite3CodeSubselect(pParse, pX, rMayHaveNull, eType==IN_INDEX_ROWID); + pParse->nQueryLoop = savedNQueryLoop; + }else{ + pX->iTable = iTab; + } + + if( aiMap && eType!=IN_INDEX_INDEX_ASC && eType!=IN_INDEX_INDEX_DESC ){ + int i, n; + n = sqlite3ExprVectorSize(pX->pLeft); + for(i=0; ipLeft; + int nVal = sqlite3ExprVectorSize(pLeft); + Select *pSelect = (pExpr->flags & EP_xIsSelect) ? pExpr->x.pSelect : 0; + char *zRet; + + assert( pExpr->op==TK_IN ); + zRet = sqlite3DbMallocZero(pParse->db, nVal+1); + if( zRet ){ + int i; + for(i=0; ipEList->a[i].pExpr, a); + }else{ + zRet[i] = a; + } + } + zRet[nVal] = '\0'; + } + return zRet; +} +#endif + +#ifndef SQLITE_OMIT_SUBQUERY +/* +** Load the Parse object passed as the first argument with an error +** message of the form: +** +** "sub-select returns N columns - expected M" +*/ +SQLITE_PRIVATE void sqlite3SubselectError(Parse *pParse, int nActual, int nExpect){ + const char *zFmt = "sub-select returns %d columns - expected %d"; + sqlite3ErrorMsg(pParse, zFmt, nActual, nExpect); +} +#endif + +/* +** Expression pExpr is a vector that has been used in a context where +** it is not permitted. If pExpr is a sub-select vector, this routine +** loads the Parse object with a message of the form: +** +** "sub-select returns N columns - expected 1" +** +** Or, if it is a regular scalar vector: +** +** "row value misused" +*/ +SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse *pParse, Expr *pExpr){ +#ifndef SQLITE_OMIT_SUBQUERY + if( pExpr->flags & EP_xIsSelect ){ + sqlite3SubselectError(pParse, pExpr->x.pSelect->pEList->nExpr, 1); + }else +#endif + { + sqlite3ErrorMsg(pParse, "row value misused"); + } +} + +/* +** Generate code for scalar subqueries used as a subquery expression, EXISTS, +** or IN operators. Examples: +** +** (SELECT a FROM b) -- subquery +** EXISTS (SELECT a FROM b) -- EXISTS subquery +** x IN (4,5,11) -- IN operator with list on right-hand side +** x IN (SELECT a FROM b) -- IN operator with subquery on the right +** +** The pExpr parameter describes the expression that contains the IN +** operator or subquery. +** +** If parameter isRowid is non-zero, then expression pExpr is guaranteed +** to be of the form " IN (?, ?, ?)", where is a reference +** to some integer key column of a table B-Tree. In this case, use an +** intkey B-Tree to store the set of IN(...) values instead of the usual +** (slower) variable length keys B-Tree. +** +** If rMayHaveNull is non-zero, that means that the operation is an IN +** (not a SELECT or EXISTS) and that the RHS might contains NULLs. +** All this routine does is initialize the register given by rMayHaveNull +** to NULL. Calling routines will take care of changing this register +** value to non-NULL if the RHS is NULL-free. +** +** For a SELECT or EXISTS operator, return the register that holds the +** result. For a multi-column SELECT, the result is stored in a contiguous +** array of registers and the return value is the register of the left-most +** result column. Return 0 for IN operators or if an error occurs. +*/ +#ifndef SQLITE_OMIT_SUBQUERY +SQLITE_PRIVATE int sqlite3CodeSubselect( + Parse *pParse, /* Parsing context */ + Expr *pExpr, /* The IN, SELECT, or EXISTS operator */ + int rHasNullFlag, /* Register that records whether NULLs exist in RHS */ + int isRowid /* If true, LHS of IN operator is a rowid */ +){ + int jmpIfDynamic = -1; /* One-time test address */ + int rReg = 0; /* Register storing resulting */ + Vdbe *v = sqlite3GetVdbe(pParse); + if( NEVER(v==0) ) return 0; + sqlite3ExprCachePush(pParse); + + /* The evaluation of the IN/EXISTS/SELECT must be repeated every time it + ** is encountered if any of the following is true: + ** + ** * The right-hand side is a correlated subquery + ** * The right-hand side is an expression list containing variables + ** * We are inside a trigger + ** + ** If all of the above are false, then we can run this code just once + ** save the results, and reuse the same result on subsequent invocations. + */ + if( !ExprHasProperty(pExpr, EP_VarSelect) ){ + jmpIfDynamic = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); + } + +#ifndef SQLITE_OMIT_EXPLAIN + if( pParse->explain==2 ){ + char *zMsg = sqlite3MPrintf(pParse->db, "EXECUTE %s%s SUBQUERY %d", + jmpIfDynamic>=0?"":"CORRELATED ", + pExpr->op==TK_IN?"LIST":"SCALAR", + pParse->iNextSelectId + ); + sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC); + } +#endif + + switch( pExpr->op ){ + case TK_IN: { + int addr; /* Address of OP_OpenEphemeral instruction */ + Expr *pLeft = pExpr->pLeft; /* the LHS of the IN operator */ + KeyInfo *pKeyInfo = 0; /* Key information */ + int nVal; /* Size of vector pLeft */ + + nVal = sqlite3ExprVectorSize(pLeft); + assert( !isRowid || nVal==1 ); + + /* Whether this is an 'x IN(SELECT...)' or an 'x IN()' + ** expression it is handled the same way. An ephemeral table is + ** filled with index keys representing the results from the + ** SELECT or the . + ** + ** If the 'x' expression is a column value, or the SELECT... + ** statement returns a column value, then the affinity of that + ** column is used to build the index keys. If both 'x' and the + ** SELECT... statement are columns, then numeric affinity is used + ** if either column has NUMERIC or INTEGER affinity. If neither + ** 'x' nor the SELECT... statement are columns, then numeric affinity + ** is used. + */ + pExpr->iTable = pParse->nTab++; + addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, + pExpr->iTable, (isRowid?0:nVal)); + pKeyInfo = isRowid ? 0 : sqlite3KeyInfoAlloc(pParse->db, nVal, 1); + + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + /* Case 1: expr IN (SELECT ...) + ** + ** Generate code to write the results of the select into the temporary + ** table allocated and opened above. + */ + Select *pSelect = pExpr->x.pSelect; + ExprList *pEList = pSelect->pEList; + + assert( !isRowid ); + /* If the LHS and RHS of the IN operator do not match, that + ** error will have been caught long before we reach this point. */ + if( ALWAYS(pEList->nExpr==nVal) ){ + SelectDest dest; + int i; + sqlite3SelectDestInit(&dest, SRT_Set, pExpr->iTable); + dest.zAffSdst = exprINAffinity(pParse, pExpr); + assert( (pExpr->iTable&0x0000FFFF)==pExpr->iTable ); + pSelect->iLimit = 0; + testcase( pSelect->selFlags & SF_Distinct ); + testcase( pKeyInfo==0 ); /* Caused by OOM in sqlite3KeyInfoAlloc() */ + if( sqlite3Select(pParse, pSelect, &dest) ){ + sqlite3DbFree(pParse->db, dest.zAffSdst); + sqlite3KeyInfoUnref(pKeyInfo); + return 0; + } + sqlite3DbFree(pParse->db, dest.zAffSdst); + assert( pKeyInfo!=0 ); /* OOM will cause exit after sqlite3Select() */ + assert( pEList!=0 ); + assert( pEList->nExpr>0 ); + assert( sqlite3KeyInfoIsWriteable(pKeyInfo) ); + for(i=0; iaColl[i] = sqlite3BinaryCompareCollSeq( + pParse, p, pEList->a[i].pExpr + ); + } + } + }else if( ALWAYS(pExpr->x.pList!=0) ){ + /* Case 2: expr IN (exprlist) + ** + ** For each expression, build an index key from the evaluation and + ** store it in the temporary table. If is a column, then use + ** that columns affinity when building index keys. If is not + ** a column, use numeric affinity. + */ + char affinity; /* Affinity of the LHS of the IN */ + int i; + ExprList *pList = pExpr->x.pList; + struct ExprList_item *pItem; + int r1, r2, r3; + + affinity = sqlite3ExprAffinity(pLeft); + if( !affinity ){ + affinity = SQLITE_AFF_BLOB; + } + if( pKeyInfo ){ + assert( sqlite3KeyInfoIsWriteable(pKeyInfo) ); + pKeyInfo->aColl[0] = sqlite3ExprCollSeq(pParse, pExpr->pLeft); + } + + /* Loop through each expression in . */ + r1 = sqlite3GetTempReg(pParse); + r2 = sqlite3GetTempReg(pParse); + if( isRowid ) sqlite3VdbeAddOp2(v, OP_Null, 0, r2); + for(i=pList->nExpr, pItem=pList->a; i>0; i--, pItem++){ + Expr *pE2 = pItem->pExpr; + int iValToIns; + + /* If the expression is not constant then we will need to + ** disable the test that was generated above that makes sure + ** this code only executes once. Because for a non-constant + ** expression we need to rerun this code each time. + */ + if( jmpIfDynamic>=0 && !sqlite3ExprIsConstant(pE2) ){ + sqlite3VdbeChangeToNoop(v, jmpIfDynamic); + jmpIfDynamic = -1; + } + + /* Evaluate the expression and insert it into the temp table */ + if( isRowid && sqlite3ExprIsInteger(pE2, &iValToIns) ){ + sqlite3VdbeAddOp3(v, OP_InsertInt, pExpr->iTable, r2, iValToIns); + }else{ + r3 = sqlite3ExprCodeTarget(pParse, pE2, r1); + if( isRowid ){ + sqlite3VdbeAddOp2(v, OP_MustBeInt, r3, + sqlite3VdbeCurrentAddr(v)+2); + VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_Insert, pExpr->iTable, r2, r3); + }else{ + sqlite3VdbeAddOp4(v, OP_MakeRecord, r3, 1, r2, &affinity, 1); + sqlite3ExprCacheAffinityChange(pParse, r3, 1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pExpr->iTable, r2, r3, 1); + } + } + } + sqlite3ReleaseTempReg(pParse, r1); + sqlite3ReleaseTempReg(pParse, r2); + } + if( pKeyInfo ){ + sqlite3VdbeChangeP4(v, addr, (void *)pKeyInfo, P4_KEYINFO); + } + break; + } + + case TK_EXISTS: + case TK_SELECT: + default: { + /* Case 3: (SELECT ... FROM ...) + ** or: EXISTS(SELECT ... FROM ...) + ** + ** For a SELECT, generate code to put the values for all columns of + ** the first row into an array of registers and return the index of + ** the first register. + ** + ** If this is an EXISTS, write an integer 0 (not exists) or 1 (exists) + ** into a register and return that register number. + ** + ** In both cases, the query is augmented with "LIMIT 1". Any + ** preexisting limit is discarded in place of the new LIMIT 1. + */ + Select *pSel; /* SELECT statement to encode */ + SelectDest dest; /* How to deal with SELECT result */ + int nReg; /* Registers to allocate */ + + testcase( pExpr->op==TK_EXISTS ); + testcase( pExpr->op==TK_SELECT ); + assert( pExpr->op==TK_EXISTS || pExpr->op==TK_SELECT ); + assert( ExprHasProperty(pExpr, EP_xIsSelect) ); + + pSel = pExpr->x.pSelect; + nReg = pExpr->op==TK_SELECT ? pSel->pEList->nExpr : 1; + sqlite3SelectDestInit(&dest, 0, pParse->nMem+1); + pParse->nMem += nReg; + if( pExpr->op==TK_SELECT ){ + dest.eDest = SRT_Mem; + dest.iSdst = dest.iSDParm; + dest.nSdst = nReg; + sqlite3VdbeAddOp3(v, OP_Null, 0, dest.iSDParm, dest.iSDParm+nReg-1); + VdbeComment((v, "Init subquery result")); + }else{ + dest.eDest = SRT_Exists; + sqlite3VdbeAddOp2(v, OP_Integer, 0, dest.iSDParm); + VdbeComment((v, "Init EXISTS result")); + } + sqlite3ExprDelete(pParse->db, pSel->pLimit); + pSel->pLimit = sqlite3ExprAlloc(pParse->db, TK_INTEGER, + &sqlite3IntTokens[1], 0); + pSel->iLimit = 0; + pSel->selFlags &= ~SF_MultiValue; + if( sqlite3Select(pParse, pSel, &dest) ){ + return 0; + } + rReg = dest.iSDParm; + ExprSetVVAProperty(pExpr, EP_NoReduce); + break; + } + } + + if( rHasNullFlag ){ + sqlite3SetHasNullFlag(v, pExpr->iTable, rHasNullFlag); + } + + if( jmpIfDynamic>=0 ){ + sqlite3VdbeJumpHere(v, jmpIfDynamic); + } + sqlite3ExprCachePop(pParse); + + return rReg; +} +#endif /* SQLITE_OMIT_SUBQUERY */ + +#ifndef SQLITE_OMIT_SUBQUERY +/* +** Expr pIn is an IN(...) expression. This function checks that the +** sub-select on the RHS of the IN() operator has the same number of +** columns as the vector on the LHS. Or, if the RHS of the IN() is not +** a sub-query, that the LHS is a vector of size 1. +*/ +SQLITE_PRIVATE int sqlite3ExprCheckIN(Parse *pParse, Expr *pIn){ + int nVector = sqlite3ExprVectorSize(pIn->pLeft); + if( (pIn->flags & EP_xIsSelect) ){ + if( nVector!=pIn->x.pSelect->pEList->nExpr ){ + sqlite3SubselectError(pParse, pIn->x.pSelect->pEList->nExpr, nVector); + return 1; + } + }else if( nVector!=1 ){ + sqlite3VectorErrorMsg(pParse, pIn->pLeft); + return 1; + } + return 0; +} +#endif + +#ifndef SQLITE_OMIT_SUBQUERY +/* +** Generate code for an IN expression. +** +** x IN (SELECT ...) +** x IN (value, value, ...) +** +** The left-hand side (LHS) is a scalar or vector expression. The +** right-hand side (RHS) is an array of zero or more scalar values, or a +** subquery. If the RHS is a subquery, the number of result columns must +** match the number of columns in the vector on the LHS. If the RHS is +** a list of values, the LHS must be a scalar. +** +** The IN operator is true if the LHS value is contained within the RHS. +** The result is false if the LHS is definitely not in the RHS. The +** result is NULL if the presence of the LHS in the RHS cannot be +** determined due to NULLs. +** +** This routine generates code that jumps to destIfFalse if the LHS is not +** contained within the RHS. If due to NULLs we cannot determine if the LHS +** is contained in the RHS then jump to destIfNull. If the LHS is contained +** within the RHS then fall through. +** +** See the separate in-operator.md documentation file in the canonical +** SQLite source tree for additional information. +*/ +static void sqlite3ExprCodeIN( + Parse *pParse, /* Parsing and code generating context */ + Expr *pExpr, /* The IN expression */ + int destIfFalse, /* Jump here if LHS is not contained in the RHS */ + int destIfNull /* Jump here if the results are unknown due to NULLs */ +){ + int rRhsHasNull = 0; /* Register that is true if RHS contains NULL values */ + int eType; /* Type of the RHS */ + int rLhs; /* Register(s) holding the LHS values */ + int rLhsOrig; /* LHS values prior to reordering by aiMap[] */ + Vdbe *v; /* Statement under construction */ + int *aiMap = 0; /* Map from vector field to index column */ + char *zAff = 0; /* Affinity string for comparisons */ + int nVector; /* Size of vectors for this IN operator */ + int iDummy; /* Dummy parameter to exprCodeVector() */ + Expr *pLeft; /* The LHS of the IN operator */ + int i; /* loop counter */ + int destStep2; /* Where to jump when NULLs seen in step 2 */ + int destStep6 = 0; /* Start of code for Step 6 */ + int addrTruthOp; /* Address of opcode that determines the IN is true */ + int destNotNull; /* Jump here if a comparison is not true in step 6 */ + int addrTop; /* Top of the step-6 loop */ + + pLeft = pExpr->pLeft; + if( sqlite3ExprCheckIN(pParse, pExpr) ) return; + zAff = exprINAffinity(pParse, pExpr); + nVector = sqlite3ExprVectorSize(pExpr->pLeft); + aiMap = (int*)sqlite3DbMallocZero( + pParse->db, nVector*(sizeof(int) + sizeof(char)) + 1 + ); + if( pParse->db->mallocFailed ) goto sqlite3ExprCodeIN_oom_error; + + /* Attempt to compute the RHS. After this step, if anything other than + ** IN_INDEX_NOOP is returned, the table opened ith cursor pExpr->iTable + ** contains the values that make up the RHS. If IN_INDEX_NOOP is returned, + ** the RHS has not yet been coded. */ + v = pParse->pVdbe; + assert( v!=0 ); /* OOM detected prior to this routine */ + VdbeNoopComment((v, "begin IN expr")); + eType = sqlite3FindInIndex(pParse, pExpr, + IN_INDEX_MEMBERSHIP | IN_INDEX_NOOP_OK, + destIfFalse==destIfNull ? 0 : &rRhsHasNull, aiMap); + + assert( pParse->nErr || nVector==1 || eType==IN_INDEX_EPH + || eType==IN_INDEX_INDEX_ASC || eType==IN_INDEX_INDEX_DESC + ); +#ifdef SQLITE_DEBUG + /* Confirm that aiMap[] contains nVector integer values between 0 and + ** nVector-1. */ + for(i=0; i from " IN (...)". If the LHS is a + ** vector, then it is stored in an array of nVector registers starting + ** at r1. + ** + ** sqlite3FindInIndex() might have reordered the fields of the LHS vector + ** so that the fields are in the same order as an existing index. The + ** aiMap[] array contains a mapping from the original LHS field order to + ** the field order that matches the RHS index. + */ + sqlite3ExprCachePush(pParse); + rLhsOrig = exprCodeVector(pParse, pLeft, &iDummy); + for(i=0; ix.pList; + CollSeq *pColl = sqlite3ExprCollSeq(pParse, pExpr->pLeft); + int labelOk = sqlite3VdbeMakeLabel(v); + int r2, regToFree; + int regCkNull = 0; + int ii; + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + if( destIfNull!=destIfFalse ){ + regCkNull = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_BitAnd, rLhs, rLhs, regCkNull); + } + for(ii=0; iinExpr; ii++){ + r2 = sqlite3ExprCodeTemp(pParse, pList->a[ii].pExpr, ®ToFree); + if( regCkNull && sqlite3ExprCanBeNull(pList->a[ii].pExpr) ){ + sqlite3VdbeAddOp3(v, OP_BitAnd, regCkNull, r2, regCkNull); + } + if( iinExpr-1 || destIfNull!=destIfFalse ){ + sqlite3VdbeAddOp4(v, OP_Eq, rLhs, labelOk, r2, + (void*)pColl, P4_COLLSEQ); + VdbeCoverageIf(v, iinExpr-1); + VdbeCoverageIf(v, ii==pList->nExpr-1); + sqlite3VdbeChangeP5(v, zAff[0]); + }else{ + assert( destIfNull==destIfFalse ); + sqlite3VdbeAddOp4(v, OP_Ne, rLhs, destIfFalse, r2, + (void*)pColl, P4_COLLSEQ); VdbeCoverage(v); + sqlite3VdbeChangeP5(v, zAff[0] | SQLITE_JUMPIFNULL); + } + sqlite3ReleaseTempReg(pParse, regToFree); + } + if( regCkNull ){ + sqlite3VdbeAddOp2(v, OP_IsNull, regCkNull, destIfNull); VdbeCoverage(v); + sqlite3VdbeGoto(v, destIfFalse); + } + sqlite3VdbeResolveLabel(v, labelOk); + sqlite3ReleaseTempReg(pParse, regCkNull); + goto sqlite3ExprCodeIN_finished; + } + + /* Step 2: Check to see if the LHS contains any NULL columns. If the + ** LHS does contain NULLs then the result must be either FALSE or NULL. + ** We will then skip the binary search of the RHS. + */ + if( destIfNull==destIfFalse ){ + destStep2 = destIfFalse; + }else{ + destStep2 = destStep6 = sqlite3VdbeMakeLabel(v); + } + for(i=0; ipLeft, i); + if( sqlite3ExprCanBeNull(p) ){ + sqlite3VdbeAddOp2(v, OP_IsNull, rLhs+i, destStep2); + VdbeCoverage(v); + } + } + + /* Step 3. The LHS is now known to be non-NULL. Do the binary search + ** of the RHS using the LHS as a probe. If found, the result is + ** true. + */ + if( eType==IN_INDEX_ROWID ){ + /* In this case, the RHS is the ROWID of table b-tree and so we also + ** know that the RHS is non-NULL. Hence, we combine steps 3 and 4 + ** into a single opcode. */ + sqlite3VdbeAddOp3(v, OP_SeekRowid, pExpr->iTable, destIfFalse, rLhs); + VdbeCoverage(v); + addrTruthOp = sqlite3VdbeAddOp0(v, OP_Goto); /* Return True */ + }else{ + sqlite3VdbeAddOp4(v, OP_Affinity, rLhs, nVector, 0, zAff, nVector); + if( destIfFalse==destIfNull ){ + /* Combine Step 3 and Step 5 into a single opcode */ + sqlite3VdbeAddOp4Int(v, OP_NotFound, pExpr->iTable, destIfFalse, + rLhs, nVector); VdbeCoverage(v); + goto sqlite3ExprCodeIN_finished; + } + /* Ordinary Step 3, for the case where FALSE and NULL are distinct */ + addrTruthOp = sqlite3VdbeAddOp4Int(v, OP_Found, pExpr->iTable, 0, + rLhs, nVector); VdbeCoverage(v); + } + + /* Step 4. If the RHS is known to be non-NULL and we did not find + ** an match on the search above, then the result must be FALSE. + */ + if( rRhsHasNull && nVector==1 ){ + sqlite3VdbeAddOp2(v, OP_NotNull, rRhsHasNull, destIfFalse); + VdbeCoverage(v); + } + + /* Step 5. If we do not care about the difference between NULL and + ** FALSE, then just return false. + */ + if( destIfFalse==destIfNull ) sqlite3VdbeGoto(v, destIfFalse); + + /* Step 6: Loop through rows of the RHS. Compare each row to the LHS. + ** If any comparison is NULL, then the result is NULL. If all + ** comparisons are FALSE then the final result is FALSE. + ** + ** For a scalar LHS, it is sufficient to check just the first row + ** of the RHS. + */ + if( destStep6 ) sqlite3VdbeResolveLabel(v, destStep6); + addrTop = sqlite3VdbeAddOp2(v, OP_Rewind, pExpr->iTable, destIfFalse); + VdbeCoverage(v); + if( nVector>1 ){ + destNotNull = sqlite3VdbeMakeLabel(v); + }else{ + /* For nVector==1, combine steps 6 and 7 by immediately returning + ** FALSE if the first comparison is not NULL */ + destNotNull = destIfFalse; + } + for(i=0; iiTable, i, r3); + sqlite3VdbeAddOp4(v, OP_Ne, rLhs+i, destNotNull, r3, + (void*)pColl, P4_COLLSEQ); + VdbeCoverage(v); + sqlite3ReleaseTempReg(pParse, r3); + } + sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfNull); + if( nVector>1 ){ + sqlite3VdbeResolveLabel(v, destNotNull); + sqlite3VdbeAddOp2(v, OP_Next, pExpr->iTable, addrTop+1); + VdbeCoverage(v); + + /* Step 7: If we reach this point, we know that the result must + ** be false. */ + sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfFalse); + } + + /* Jumps here in order to return true. */ + sqlite3VdbeJumpHere(v, addrTruthOp); + +sqlite3ExprCodeIN_finished: + if( rLhs!=rLhsOrig ) sqlite3ReleaseTempReg(pParse, rLhs); + sqlite3ExprCachePop(pParse); + VdbeComment((v, "end IN expr")); +sqlite3ExprCodeIN_oom_error: + sqlite3DbFree(pParse->db, aiMap); + sqlite3DbFree(pParse->db, zAff); +} +#endif /* SQLITE_OMIT_SUBQUERY */ + +#ifndef SQLITE_OMIT_FLOATING_POINT +/* +** Generate an instruction that will put the floating point +** value described by z[0..n-1] into register iMem. +** +** The z[] string will probably not be zero-terminated. But the +** z[n] character is guaranteed to be something that does not look +** like the continuation of the number. +*/ +static void codeReal(Vdbe *v, const char *z, int negateFlag, int iMem){ + if( ALWAYS(z!=0) ){ + double value; + sqlite3AtoF(z, &value, sqlite3Strlen30(z), SQLITE_UTF8); + assert( !sqlite3IsNaN(value) ); /* The new AtoF never returns NaN */ + if( negateFlag ) value = -value; + sqlite3VdbeAddOp4Dup8(v, OP_Real, 0, iMem, 0, (u8*)&value, P4_REAL); + } +} +#endif + + +/* +** Generate an instruction that will put the integer describe by +** text z[0..n-1] into register iMem. +** +** Expr.u.zToken is always UTF8 and zero-terminated. +*/ +static void codeInteger(Parse *pParse, Expr *pExpr, int negFlag, int iMem){ + Vdbe *v = pParse->pVdbe; + if( pExpr->flags & EP_IntValue ){ + int i = pExpr->u.iValue; + assert( i>=0 ); + if( negFlag ) i = -i; + sqlite3VdbeAddOp2(v, OP_Integer, i, iMem); + }else{ + int c; + i64 value; + const char *z = pExpr->u.zToken; + assert( z!=0 ); + c = sqlite3DecOrHexToI64(z, &value); + if( c==1 || (c==2 && !negFlag) || (negFlag && value==SMALLEST_INT64)){ +#ifdef SQLITE_OMIT_FLOATING_POINT + sqlite3ErrorMsg(pParse, "oversized integer: %s%s", negFlag ? "-" : "", z); +#else +#ifndef SQLITE_OMIT_HEX_INTEGER + if( sqlite3_strnicmp(z,"0x",2)==0 ){ + sqlite3ErrorMsg(pParse, "hex literal too big: %s%s", negFlag?"-":"",z); + }else +#endif + { + codeReal(v, z, negFlag, iMem); + } +#endif + }else{ + if( negFlag ){ value = c==2 ? SMALLEST_INT64 : -value; } + sqlite3VdbeAddOp4Dup8(v, OP_Int64, 0, iMem, 0, (u8*)&value, P4_INT64); + } + } +} + +/* +** Erase column-cache entry number i +*/ +static void cacheEntryClear(Parse *pParse, int i){ + if( pParse->aColCache[i].tempReg ){ + if( pParse->nTempRegaTempReg) ){ + pParse->aTempReg[pParse->nTempReg++] = pParse->aColCache[i].iReg; + } + } + pParse->nColCache--; + if( inColCache ){ + pParse->aColCache[i] = pParse->aColCache[pParse->nColCache]; + } +} + + +/* +** Record in the column cache that a particular column from a +** particular table is stored in a particular register. +*/ +SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse *pParse, int iTab, int iCol, int iReg){ + int i; + int minLru; + int idxLru; + struct yColCache *p; + + /* Unless an error has occurred, register numbers are always positive. */ + assert( iReg>0 || pParse->nErr || pParse->db->mallocFailed ); + assert( iCol>=-1 && iCol<32768 ); /* Finite column numbers */ + + /* The SQLITE_ColumnCache flag disables the column cache. This is used + ** for testing only - to verify that SQLite always gets the same answer + ** with and without the column cache. + */ + if( OptimizationDisabled(pParse->db, SQLITE_ColumnCache) ) return; + + /* First replace any existing entry. + ** + ** Actually, the way the column cache is currently used, we are guaranteed + ** that the object will never already be in cache. Verify this guarantee. + */ +#ifndef NDEBUG + for(i=0, p=pParse->aColCache; inColCache; i++, p++){ + assert( p->iTable!=iTab || p->iColumn!=iCol ); + } +#endif + + /* If the cache is already full, delete the least recently used entry */ + if( pParse->nColCache>=SQLITE_N_COLCACHE ){ + minLru = 0x7fffffff; + idxLru = -1; + for(i=0, p=pParse->aColCache; ilrulru; + } + } + p = &pParse->aColCache[idxLru]; + }else{ + p = &pParse->aColCache[pParse->nColCache++]; + } + + /* Add the new entry to the end of the cache */ + p->iLevel = pParse->iCacheLevel; + p->iTable = iTab; + p->iColumn = iCol; + p->iReg = iReg; + p->tempReg = 0; + p->lru = pParse->iCacheCnt++; +} + +/* +** Indicate that registers between iReg..iReg+nReg-1 are being overwritten. +** Purge the range of registers from the column cache. +*/ +SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse *pParse, int iReg, int nReg){ + int i = 0; + while( inColCache ){ + struct yColCache *p = &pParse->aColCache[i]; + if( p->iReg >= iReg && p->iReg < iReg+nReg ){ + cacheEntryClear(pParse, i); + }else{ + i++; + } + } +} + +/* +** Remember the current column cache context. Any new entries added +** added to the column cache after this call are removed when the +** corresponding pop occurs. +*/ +SQLITE_PRIVATE void sqlite3ExprCachePush(Parse *pParse){ + pParse->iCacheLevel++; +#ifdef SQLITE_DEBUG + if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ + printf("PUSH to %d\n", pParse->iCacheLevel); + } +#endif +} + +/* +** Remove from the column cache any entries that were added since the +** the previous sqlite3ExprCachePush operation. In other words, restore +** the cache to the state it was in prior the most recent Push. +*/ +SQLITE_PRIVATE void sqlite3ExprCachePop(Parse *pParse){ + int i = 0; + assert( pParse->iCacheLevel>=1 ); + pParse->iCacheLevel--; +#ifdef SQLITE_DEBUG + if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ + printf("POP to %d\n", pParse->iCacheLevel); + } +#endif + while( inColCache ){ + if( pParse->aColCache[i].iLevel>pParse->iCacheLevel ){ + cacheEntryClear(pParse, i); + }else{ + i++; + } + } +} + +/* +** When a cached column is reused, make sure that its register is +** no longer available as a temp register. ticket #3879: that same +** register might be in the cache in multiple places, so be sure to +** get them all. +*/ +static void sqlite3ExprCachePinRegister(Parse *pParse, int iReg){ + int i; + struct yColCache *p; + for(i=0, p=pParse->aColCache; inColCache; i++, p++){ + if( p->iReg==iReg ){ + p->tempReg = 0; + } + } +} + +/* Generate code that will load into register regOut a value that is +** appropriate for the iIdxCol-th column of index pIdx. +*/ +SQLITE_PRIVATE void sqlite3ExprCodeLoadIndexColumn( + Parse *pParse, /* The parsing context */ + Index *pIdx, /* The index whose column is to be loaded */ + int iTabCur, /* Cursor pointing to a table row */ + int iIdxCol, /* The column of the index to be loaded */ + int regOut /* Store the index column value in this register */ +){ + i16 iTabCol = pIdx->aiColumn[iIdxCol]; + if( iTabCol==XN_EXPR ){ + assert( pIdx->aColExpr ); + assert( pIdx->aColExpr->nExpr>iIdxCol ); + pParse->iSelfTab = iTabCur; + sqlite3ExprCodeCopy(pParse, pIdx->aColExpr->a[iIdxCol].pExpr, regOut); + }else{ + sqlite3ExprCodeGetColumnOfTable(pParse->pVdbe, pIdx->pTable, iTabCur, + iTabCol, regOut); + } +} + +/* +** Generate code to extract the value of the iCol-th column of a table. +*/ +SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable( + Vdbe *v, /* The VDBE under construction */ + Table *pTab, /* The table containing the value */ + int iTabCur, /* The table cursor. Or the PK cursor for WITHOUT ROWID */ + int iCol, /* Index of the column to extract */ + int regOut /* Extract the value into this register */ +){ + if( iCol<0 || iCol==pTab->iPKey ){ + sqlite3VdbeAddOp2(v, OP_Rowid, iTabCur, regOut); + }else{ + int op = IsVirtual(pTab) ? OP_VColumn : OP_Column; + int x = iCol; + if( !HasRowid(pTab) && !IsVirtual(pTab) ){ + x = sqlite3ColumnOfIndex(sqlite3PrimaryKeyIndex(pTab), iCol); + } + sqlite3VdbeAddOp3(v, op, iTabCur, x, regOut); + } + if( iCol>=0 ){ + sqlite3ColumnDefault(v, pTab, iCol, regOut); + } +} + +/* +** Generate code that will extract the iColumn-th column from +** table pTab and store the column value in a register. +** +** An effort is made to store the column value in register iReg. This +** is not garanteeed for GetColumn() - the result can be stored in +** any register. But the result is guaranteed to land in register iReg +** for GetColumnToReg(). +** +** There must be an open cursor to pTab in iTable when this routine +** is called. If iColumn<0 then code is generated that extracts the rowid. +*/ +SQLITE_PRIVATE int sqlite3ExprCodeGetColumn( + Parse *pParse, /* Parsing and code generating context */ + Table *pTab, /* Description of the table we are reading from */ + int iColumn, /* Index of the table column */ + int iTable, /* The cursor pointing to the table */ + int iReg, /* Store results here */ + u8 p5 /* P5 value for OP_Column + FLAGS */ +){ + Vdbe *v = pParse->pVdbe; + int i; + struct yColCache *p; + + for(i=0, p=pParse->aColCache; inColCache; i++, p++){ + if( p->iTable==iTable && p->iColumn==iColumn ){ + p->lru = pParse->iCacheCnt++; + sqlite3ExprCachePinRegister(pParse, p->iReg); + return p->iReg; + } + } + assert( v!=0 ); + sqlite3ExprCodeGetColumnOfTable(v, pTab, iTable, iColumn, iReg); + if( p5 ){ + sqlite3VdbeChangeP5(v, p5); + }else{ + sqlite3ExprCacheStore(pParse, iTable, iColumn, iReg); + } + return iReg; +} +SQLITE_PRIVATE void sqlite3ExprCodeGetColumnToReg( + Parse *pParse, /* Parsing and code generating context */ + Table *pTab, /* Description of the table we are reading from */ + int iColumn, /* Index of the table column */ + int iTable, /* The cursor pointing to the table */ + int iReg /* Store results here */ +){ + int r1 = sqlite3ExprCodeGetColumn(pParse, pTab, iColumn, iTable, iReg, 0); + if( r1!=iReg ) sqlite3VdbeAddOp2(pParse->pVdbe, OP_SCopy, r1, iReg); +} + + +/* +** Clear all column cache entries. +*/ +SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse *pParse){ + int i; + +#if SQLITE_DEBUG + if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ + printf("CLEAR\n"); + } +#endif + for(i=0; inColCache; i++){ + if( pParse->aColCache[i].tempReg + && pParse->nTempRegaTempReg) + ){ + pParse->aTempReg[pParse->nTempReg++] = pParse->aColCache[i].iReg; + } + } + pParse->nColCache = 0; +} + +/* +** Record the fact that an affinity change has occurred on iCount +** registers starting with iStart. +*/ +SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse *pParse, int iStart, int iCount){ + sqlite3ExprCacheRemove(pParse, iStart, iCount); +} + +/* +** Generate code to move content from registers iFrom...iFrom+nReg-1 +** over to iTo..iTo+nReg-1. Keep the column cache up-to-date. +*/ +SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse *pParse, int iFrom, int iTo, int nReg){ + assert( iFrom>=iTo+nReg || iFrom+nReg<=iTo ); + sqlite3VdbeAddOp3(pParse->pVdbe, OP_Move, iFrom, iTo, nReg); + sqlite3ExprCacheRemove(pParse, iFrom, nReg); +} + +#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) +/* +** Return true if any register in the range iFrom..iTo (inclusive) +** is used as part of the column cache. +** +** This routine is used within assert() and testcase() macros only +** and does not appear in a normal build. +*/ +static int usedAsColumnCache(Parse *pParse, int iFrom, int iTo){ + int i; + struct yColCache *p; + for(i=0, p=pParse->aColCache; inColCache; i++, p++){ + int r = p->iReg; + if( r>=iFrom && r<=iTo ) return 1; /*NO_TEST*/ + } + return 0; +} +#endif /* SQLITE_DEBUG || SQLITE_COVERAGE_TEST */ + + +/* +** Convert a scalar expression node to a TK_REGISTER referencing +** register iReg. The caller must ensure that iReg already contains +** the correct value for the expression. +*/ +static void exprToRegister(Expr *p, int iReg){ + p->op2 = p->op; + p->op = TK_REGISTER; + p->iTable = iReg; + ExprClearProperty(p, EP_Skip); +} + +/* +** Evaluate an expression (either a vector or a scalar expression) and store +** the result in continguous temporary registers. Return the index of +** the first register used to store the result. +** +** If the returned result register is a temporary scalar, then also write +** that register number into *piFreeable. If the returned result register +** is not a temporary or if the expression is a vector set *piFreeable +** to 0. +*/ +static int exprCodeVector(Parse *pParse, Expr *p, int *piFreeable){ + int iResult; + int nResult = sqlite3ExprVectorSize(p); + if( nResult==1 ){ + iResult = sqlite3ExprCodeTemp(pParse, p, piFreeable); + }else{ + *piFreeable = 0; + if( p->op==TK_SELECT ){ + iResult = sqlite3CodeSubselect(pParse, p, 0, 0); + }else{ + int i; + iResult = pParse->nMem+1; + pParse->nMem += nResult; + for(i=0; ix.pList->a[i].pExpr, i+iResult); + } + } + } + return iResult; +} + + +/* +** Generate code into the current Vdbe to evaluate the given +** expression. Attempt to store the results in register "target". +** Return the register where results are stored. +** +** With this routine, there is no guarantee that results will +** be stored in target. The result might be stored in some other +** register if it is convenient to do so. The calling function +** must check the return code and move the results to the desired +** register. +*/ +SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target){ + Vdbe *v = pParse->pVdbe; /* The VM under construction */ + int op; /* The opcode being coded */ + int inReg = target; /* Results stored in register inReg */ + int regFree1 = 0; /* If non-zero free this temporary register */ + int regFree2 = 0; /* If non-zero free this temporary register */ + int r1, r2; /* Various register numbers */ + Expr tempX; /* Temporary expression node */ + int p5 = 0; + + assert( target>0 && target<=pParse->nMem ); + if( v==0 ){ + assert( pParse->db->mallocFailed ); + return 0; + } + + if( pExpr==0 ){ + op = TK_NULL; + }else{ + op = pExpr->op; + } + switch( op ){ + case TK_AGG_COLUMN: { + AggInfo *pAggInfo = pExpr->pAggInfo; + struct AggInfo_col *pCol = &pAggInfo->aCol[pExpr->iAgg]; + if( !pAggInfo->directMode ){ + assert( pCol->iMem>0 ); + return pCol->iMem; + }else if( pAggInfo->useSortingIdx ){ + sqlite3VdbeAddOp3(v, OP_Column, pAggInfo->sortingIdxPTab, + pCol->iSorterColumn, target); + return target; + } + /* Otherwise, fall thru into the TK_COLUMN case */ + } + case TK_COLUMN: { + int iTab = pExpr->iTable; + if( iTab<0 ){ + if( pParse->ckBase>0 ){ + /* Generating CHECK constraints or inserting into partial index */ + return pExpr->iColumn + pParse->ckBase; + }else{ + /* Coding an expression that is part of an index where column names + ** in the index refer to the table to which the index belongs */ + iTab = pParse->iSelfTab; + } + } + return sqlite3ExprCodeGetColumn(pParse, pExpr->pTab, + pExpr->iColumn, iTab, target, + pExpr->op2); + } + case TK_INTEGER: { + codeInteger(pParse, pExpr, 0, target); + return target; + } +#ifndef SQLITE_OMIT_FLOATING_POINT + case TK_FLOAT: { + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + codeReal(v, pExpr->u.zToken, 0, target); + return target; + } +#endif + case TK_STRING: { + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + sqlite3VdbeLoadString(v, target, pExpr->u.zToken); + return target; + } + case TK_NULL: { + sqlite3VdbeAddOp2(v, OP_Null, 0, target); + return target; + } +#ifndef SQLITE_OMIT_BLOB_LITERAL + case TK_BLOB: { + int n; + const char *z; + char *zBlob; + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + assert( pExpr->u.zToken[0]=='x' || pExpr->u.zToken[0]=='X' ); + assert( pExpr->u.zToken[1]=='\'' ); + z = &pExpr->u.zToken[2]; + n = sqlite3Strlen30(z) - 1; + assert( z[n]=='\'' ); + zBlob = sqlite3HexToBlob(sqlite3VdbeDb(v), z, n); + sqlite3VdbeAddOp4(v, OP_Blob, n/2, target, 0, zBlob, P4_DYNAMIC); + return target; + } +#endif + case TK_VARIABLE: { + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + assert( pExpr->u.zToken!=0 ); + assert( pExpr->u.zToken[0]!=0 ); + sqlite3VdbeAddOp2(v, OP_Variable, pExpr->iColumn, target); + if( pExpr->u.zToken[1]!=0 ){ + const char *z = sqlite3VListNumToName(pParse->pVList, pExpr->iColumn); + assert( pExpr->u.zToken[0]=='?' || strcmp(pExpr->u.zToken, z)==0 ); + pParse->pVList[0] = 0; /* Indicate VList may no longer be enlarged */ + sqlite3VdbeAppendP4(v, (char*)z, P4_STATIC); + } + return target; + } + case TK_REGISTER: { + return pExpr->iTable; + } +#ifndef SQLITE_OMIT_CAST + case TK_CAST: { + /* Expressions of the form: CAST(pLeft AS token) */ + inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); + if( inReg!=target ){ + sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target); + inReg = target; + } + sqlite3VdbeAddOp2(v, OP_Cast, target, + sqlite3AffinityType(pExpr->u.zToken, 0)); + testcase( usedAsColumnCache(pParse, inReg, inReg) ); + sqlite3ExprCacheAffinityChange(pParse, inReg, 1); + return inReg; + } +#endif /* SQLITE_OMIT_CAST */ + case TK_IS: + case TK_ISNOT: + op = (op==TK_IS) ? TK_EQ : TK_NE; + p5 = SQLITE_NULLEQ; + /* fall-through */ + case TK_LT: + case TK_LE: + case TK_GT: + case TK_GE: + case TK_NE: + case TK_EQ: { + Expr *pLeft = pExpr->pLeft; + if( sqlite3ExprIsVector(pLeft) ){ + codeVectorCompare(pParse, pExpr, target, op, p5); + }else{ + r1 = sqlite3ExprCodeTemp(pParse, pLeft, ®Free1); + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + codeCompare(pParse, pLeft, pExpr->pRight, op, + r1, r2, inReg, SQLITE_STOREP2 | p5); + assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); + assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le); + assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt); + assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge); + assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq); + assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne); + testcase( regFree1==0 ); + testcase( regFree2==0 ); + } + break; + } + case TK_AND: + case TK_OR: + case TK_PLUS: + case TK_STAR: + case TK_MINUS: + case TK_REM: + case TK_BITAND: + case TK_BITOR: + case TK_SLASH: + case TK_LSHIFT: + case TK_RSHIFT: + case TK_CONCAT: { + assert( TK_AND==OP_And ); testcase( op==TK_AND ); + assert( TK_OR==OP_Or ); testcase( op==TK_OR ); + assert( TK_PLUS==OP_Add ); testcase( op==TK_PLUS ); + assert( TK_MINUS==OP_Subtract ); testcase( op==TK_MINUS ); + assert( TK_REM==OP_Remainder ); testcase( op==TK_REM ); + assert( TK_BITAND==OP_BitAnd ); testcase( op==TK_BITAND ); + assert( TK_BITOR==OP_BitOr ); testcase( op==TK_BITOR ); + assert( TK_SLASH==OP_Divide ); testcase( op==TK_SLASH ); + assert( TK_LSHIFT==OP_ShiftLeft ); testcase( op==TK_LSHIFT ); + assert( TK_RSHIFT==OP_ShiftRight ); testcase( op==TK_RSHIFT ); + assert( TK_CONCAT==OP_Concat ); testcase( op==TK_CONCAT ); + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + sqlite3VdbeAddOp3(v, op, r2, r1, target); + testcase( regFree1==0 ); + testcase( regFree2==0 ); + break; + } + case TK_UMINUS: { + Expr *pLeft = pExpr->pLeft; + assert( pLeft ); + if( pLeft->op==TK_INTEGER ){ + codeInteger(pParse, pLeft, 1, target); + return target; +#ifndef SQLITE_OMIT_FLOATING_POINT + }else if( pLeft->op==TK_FLOAT ){ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + codeReal(v, pLeft->u.zToken, 1, target); + return target; +#endif + }else{ + tempX.op = TK_INTEGER; + tempX.flags = EP_IntValue|EP_TokenOnly; + tempX.u.iValue = 0; + r1 = sqlite3ExprCodeTemp(pParse, &tempX, ®Free1); + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free2); + sqlite3VdbeAddOp3(v, OP_Subtract, r2, r1, target); + testcase( regFree2==0 ); + } + break; + } + case TK_BITNOT: + case TK_NOT: { + assert( TK_BITNOT==OP_BitNot ); testcase( op==TK_BITNOT ); + assert( TK_NOT==OP_Not ); testcase( op==TK_NOT ); + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + testcase( regFree1==0 ); + sqlite3VdbeAddOp2(v, op, r1, inReg); + break; + } + case TK_ISNULL: + case TK_NOTNULL: { + int addr; + assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL ); + assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL ); + sqlite3VdbeAddOp2(v, OP_Integer, 1, target); + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + testcase( regFree1==0 ); + addr = sqlite3VdbeAddOp1(v, op, r1); + VdbeCoverageIf(v, op==TK_ISNULL); + VdbeCoverageIf(v, op==TK_NOTNULL); + sqlite3VdbeAddOp2(v, OP_Integer, 0, target); + sqlite3VdbeJumpHere(v, addr); + break; + } + case TK_AGG_FUNCTION: { + AggInfo *pInfo = pExpr->pAggInfo; + if( pInfo==0 ){ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + sqlite3ErrorMsg(pParse, "misuse of aggregate: %s()", pExpr->u.zToken); + }else{ + return pInfo->aFunc[pExpr->iAgg].iMem; + } + break; + } + case TK_FUNCTION: { + ExprList *pFarg; /* List of function arguments */ + int nFarg; /* Number of function arguments */ + FuncDef *pDef; /* The function definition object */ + const char *zId; /* The function name */ + u32 constMask = 0; /* Mask of function arguments that are constant */ + int i; /* Loop counter */ + sqlite3 *db = pParse->db; /* The database connection */ + u8 enc = ENC(db); /* The text encoding used by this database */ + CollSeq *pColl = 0; /* A collating sequence */ + + if( ConstFactorOk(pParse) && sqlite3ExprIsConstantNotJoin(pExpr) ){ + /* SQL functions can be expensive. So try to move constant functions + ** out of the inner loop, even if that means an extra OP_Copy. */ + return sqlite3ExprCodeAtInit(pParse, pExpr, -1); + } + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + if( ExprHasProperty(pExpr, EP_TokenOnly) ){ + pFarg = 0; + }else{ + pFarg = pExpr->x.pList; + } + nFarg = pFarg ? pFarg->nExpr : 0; + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + zId = pExpr->u.zToken; + pDef = sqlite3FindFunction(db, zId, nFarg, enc, 0); +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + if( pDef==0 && pParse->explain ){ + pDef = sqlite3FindFunction(db, "unknown", nFarg, enc, 0); + } +#endif + if( pDef==0 || pDef->xFinalize!=0 ){ + sqlite3ErrorMsg(pParse, "unknown function: %s()", zId); + break; + } + + /* Attempt a direct implementation of the built-in COALESCE() and + ** IFNULL() functions. This avoids unnecessary evaluation of + ** arguments past the first non-NULL argument. + */ + if( pDef->funcFlags & SQLITE_FUNC_COALESCE ){ + int endCoalesce = sqlite3VdbeMakeLabel(v); + assert( nFarg>=2 ); + sqlite3ExprCode(pParse, pFarg->a[0].pExpr, target); + for(i=1; ia[i].pExpr, target); + sqlite3ExprCachePop(pParse); + } + sqlite3VdbeResolveLabel(v, endCoalesce); + break; + } + + /* The UNLIKELY() function is a no-op. The result is the value + ** of the first argument. + */ + if( pDef->funcFlags & SQLITE_FUNC_UNLIKELY ){ + assert( nFarg>=1 ); + return sqlite3ExprCodeTarget(pParse, pFarg->a[0].pExpr, target); + } + +#ifdef SQLITE_DEBUG + /* The AFFINITY() function evaluates to a string that describes + ** the type affinity of the argument. This is used for testing of + ** the SQLite type logic. + */ + if( pDef->funcFlags & SQLITE_FUNC_AFFINITY ){ + const char *azAff[] = { "blob", "text", "numeric", "integer", "real" }; + char aff; + assert( nFarg==1 ); + aff = sqlite3ExprAffinity(pFarg->a[0].pExpr); + sqlite3VdbeLoadString(v, target, + aff ? azAff[aff-SQLITE_AFF_BLOB] : "none"); + return target; + } +#endif + + for(i=0; ia[i].pExpr) ){ + testcase( i==31 ); + constMask |= MASKBIT32(i); + } + if( (pDef->funcFlags & SQLITE_FUNC_NEEDCOLL)!=0 && !pColl ){ + pColl = sqlite3ExprCollSeq(pParse, pFarg->a[i].pExpr); + } + } + if( pFarg ){ + if( constMask ){ + r1 = pParse->nMem+1; + pParse->nMem += nFarg; + }else{ + r1 = sqlite3GetTempRange(pParse, nFarg); + } + + /* For length() and typeof() functions with a column argument, + ** set the P5 parameter to the OP_Column opcode to OPFLAG_LENGTHARG + ** or OPFLAG_TYPEOFARG respectively, to avoid unnecessary data + ** loading. + */ + if( (pDef->funcFlags & (SQLITE_FUNC_LENGTH|SQLITE_FUNC_TYPEOF))!=0 ){ + u8 exprOp; + assert( nFarg==1 ); + assert( pFarg->a[0].pExpr!=0 ); + exprOp = pFarg->a[0].pExpr->op; + if( exprOp==TK_COLUMN || exprOp==TK_AGG_COLUMN ){ + assert( SQLITE_FUNC_LENGTH==OPFLAG_LENGTHARG ); + assert( SQLITE_FUNC_TYPEOF==OPFLAG_TYPEOFARG ); + testcase( pDef->funcFlags & OPFLAG_LENGTHARG ); + pFarg->a[0].pExpr->op2 = + pDef->funcFlags & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG); + } + } + + sqlite3ExprCachePush(pParse); /* Ticket 2ea2425d34be */ + sqlite3ExprCodeExprList(pParse, pFarg, r1, 0, + SQLITE_ECEL_DUP|SQLITE_ECEL_FACTOR); + sqlite3ExprCachePop(pParse); /* Ticket 2ea2425d34be */ + }else{ + r1 = 0; + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + /* Possibly overload the function if the first argument is + ** a virtual table column. + ** + ** For infix functions (LIKE, GLOB, REGEXP, and MATCH) use the + ** second argument, not the first, as the argument to test to + ** see if it is a column in a virtual table. This is done because + ** the left operand of infix functions (the operand we want to + ** control overloading) ends up as the second argument to the + ** function. The expression "A glob B" is equivalent to + ** "glob(B,A). We want to use the A in "A glob B" to test + ** for function overloading. But we use the B term in "glob(B,A)". + */ + if( nFarg>=2 && (pExpr->flags & EP_InfixFunc) ){ + pDef = sqlite3VtabOverloadFunction(db, pDef, nFarg, pFarg->a[1].pExpr); + }else if( nFarg>0 ){ + pDef = sqlite3VtabOverloadFunction(db, pDef, nFarg, pFarg->a[0].pExpr); + } +#endif + if( pDef->funcFlags & SQLITE_FUNC_NEEDCOLL ){ + if( !pColl ) pColl = db->pDfltColl; + sqlite3VdbeAddOp4(v, OP_CollSeq, 0, 0, 0, (char *)pColl, P4_COLLSEQ); + } + sqlite3VdbeAddOp4(v, OP_Function0, constMask, r1, target, + (char*)pDef, P4_FUNCDEF); + sqlite3VdbeChangeP5(v, (u8)nFarg); + if( nFarg && constMask==0 ){ + sqlite3ReleaseTempRange(pParse, r1, nFarg); + } + return target; + } +#ifndef SQLITE_OMIT_SUBQUERY + case TK_EXISTS: + case TK_SELECT: { + int nCol; + testcase( op==TK_EXISTS ); + testcase( op==TK_SELECT ); + if( op==TK_SELECT && (nCol = pExpr->x.pSelect->pEList->nExpr)!=1 ){ + sqlite3SubselectError(pParse, nCol, 1); + }else{ + return sqlite3CodeSubselect(pParse, pExpr, 0, 0); + } + break; + } + case TK_SELECT_COLUMN: { + int n; + if( pExpr->pLeft->iTable==0 ){ + pExpr->pLeft->iTable = sqlite3CodeSubselect(pParse, pExpr->pLeft, 0, 0); + } + assert( pExpr->iTable==0 || pExpr->pLeft->op==TK_SELECT ); + if( pExpr->iTable + && pExpr->iTable!=(n = sqlite3ExprVectorSize(pExpr->pLeft)) + ){ + sqlite3ErrorMsg(pParse, "%d columns assigned %d values", + pExpr->iTable, n); + } + return pExpr->pLeft->iTable + pExpr->iColumn; + } + case TK_IN: { + int destIfFalse = sqlite3VdbeMakeLabel(v); + int destIfNull = sqlite3VdbeMakeLabel(v); + sqlite3VdbeAddOp2(v, OP_Null, 0, target); + sqlite3ExprCodeIN(pParse, pExpr, destIfFalse, destIfNull); + sqlite3VdbeAddOp2(v, OP_Integer, 1, target); + sqlite3VdbeResolveLabel(v, destIfFalse); + sqlite3VdbeAddOp2(v, OP_AddImm, target, 0); + sqlite3VdbeResolveLabel(v, destIfNull); + return target; + } +#endif /* SQLITE_OMIT_SUBQUERY */ + + + /* + ** x BETWEEN y AND z + ** + ** This is equivalent to + ** + ** x>=y AND x<=z + ** + ** X is stored in pExpr->pLeft. + ** Y is stored in pExpr->pList->a[0].pExpr. + ** Z is stored in pExpr->pList->a[1].pExpr. + */ + case TK_BETWEEN: { + exprCodeBetween(pParse, pExpr, target, 0, 0); + return target; + } + case TK_SPAN: + case TK_COLLATE: + case TK_UPLUS: { + return sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); + } + + case TK_TRIGGER: { + /* If the opcode is TK_TRIGGER, then the expression is a reference + ** to a column in the new.* or old.* pseudo-tables available to + ** trigger programs. In this case Expr.iTable is set to 1 for the + ** new.* pseudo-table, or 0 for the old.* pseudo-table. Expr.iColumn + ** is set to the column of the pseudo-table to read, or to -1 to + ** read the rowid field. + ** + ** The expression is implemented using an OP_Param opcode. The p1 + ** parameter is set to 0 for an old.rowid reference, or to (i+1) + ** to reference another column of the old.* pseudo-table, where + ** i is the index of the column. For a new.rowid reference, p1 is + ** set to (n+1), where n is the number of columns in each pseudo-table. + ** For a reference to any other column in the new.* pseudo-table, p1 + ** is set to (n+2+i), where n and i are as defined previously. For + ** example, if the table on which triggers are being fired is + ** declared as: + ** + ** CREATE TABLE t1(a, b); + ** + ** Then p1 is interpreted as follows: + ** + ** p1==0 -> old.rowid p1==3 -> new.rowid + ** p1==1 -> old.a p1==4 -> new.a + ** p1==2 -> old.b p1==5 -> new.b + */ + Table *pTab = pExpr->pTab; + int p1 = pExpr->iTable * (pTab->nCol+1) + 1 + pExpr->iColumn; + + assert( pExpr->iTable==0 || pExpr->iTable==1 ); + assert( pExpr->iColumn>=-1 && pExpr->iColumnnCol ); + assert( pTab->iPKey<0 || pExpr->iColumn!=pTab->iPKey ); + assert( p1>=0 && p1<(pTab->nCol*2+2) ); + + sqlite3VdbeAddOp2(v, OP_Param, p1, target); + VdbeComment((v, "%s.%s -> $%d", + (pExpr->iTable ? "new" : "old"), + (pExpr->iColumn<0 ? "rowid" : pExpr->pTab->aCol[pExpr->iColumn].zName), + target + )); + +#ifndef SQLITE_OMIT_FLOATING_POINT + /* If the column has REAL affinity, it may currently be stored as an + ** integer. Use OP_RealAffinity to make sure it is really real. + ** + ** EVIDENCE-OF: R-60985-57662 SQLite will convert the value back to + ** floating point when extracting it from the record. */ + if( pExpr->iColumn>=0 + && pTab->aCol[pExpr->iColumn].affinity==SQLITE_AFF_REAL + ){ + sqlite3VdbeAddOp1(v, OP_RealAffinity, target); + } +#endif + break; + } + + case TK_VECTOR: { + sqlite3ErrorMsg(pParse, "row value misused"); + break; + } + + /* + ** Form A: + ** CASE x WHEN e1 THEN r1 WHEN e2 THEN r2 ... WHEN eN THEN rN ELSE y END + ** + ** Form B: + ** CASE WHEN e1 THEN r1 WHEN e2 THEN r2 ... WHEN eN THEN rN ELSE y END + ** + ** Form A is can be transformed into the equivalent form B as follows: + ** CASE WHEN x=e1 THEN r1 WHEN x=e2 THEN r2 ... + ** WHEN x=eN THEN rN ELSE y END + ** + ** X (if it exists) is in pExpr->pLeft. + ** Y is in the last element of pExpr->x.pList if pExpr->x.pList->nExpr is + ** odd. The Y is also optional. If the number of elements in x.pList + ** is even, then Y is omitted and the "otherwise" result is NULL. + ** Ei is in pExpr->pList->a[i*2] and Ri is pExpr->pList->a[i*2+1]. + ** + ** The result of the expression is the Ri for the first matching Ei, + ** or if there is no matching Ei, the ELSE term Y, or if there is + ** no ELSE term, NULL. + */ + default: assert( op==TK_CASE ); { + int endLabel; /* GOTO label for end of CASE stmt */ + int nextCase; /* GOTO label for next WHEN clause */ + int nExpr; /* 2x number of WHEN terms */ + int i; /* Loop counter */ + ExprList *pEList; /* List of WHEN terms */ + struct ExprList_item *aListelem; /* Array of WHEN terms */ + Expr opCompare; /* The X==Ei expression */ + Expr *pX; /* The X expression */ + Expr *pTest = 0; /* X==Ei (form A) or just Ei (form B) */ + VVA_ONLY( int iCacheLevel = pParse->iCacheLevel; ) + + assert( !ExprHasProperty(pExpr, EP_xIsSelect) && pExpr->x.pList ); + assert(pExpr->x.pList->nExpr > 0); + pEList = pExpr->x.pList; + aListelem = pEList->a; + nExpr = pEList->nExpr; + endLabel = sqlite3VdbeMakeLabel(v); + if( (pX = pExpr->pLeft)!=0 ){ + tempX = *pX; + testcase( pX->op==TK_COLUMN ); + exprToRegister(&tempX, exprCodeVector(pParse, &tempX, ®Free1)); + testcase( regFree1==0 ); + memset(&opCompare, 0, sizeof(opCompare)); + opCompare.op = TK_EQ; + opCompare.pLeft = &tempX; + pTest = &opCompare; + /* Ticket b351d95f9cd5ef17e9d9dbae18f5ca8611190001: + ** The value in regFree1 might get SCopy-ed into the file result. + ** So make sure that the regFree1 register is not reused for other + ** purposes and possibly overwritten. */ + regFree1 = 0; + } + for(i=0; iop==TK_COLUMN ); + sqlite3ExprIfFalse(pParse, pTest, nextCase, SQLITE_JUMPIFNULL); + testcase( aListelem[i+1].pExpr->op==TK_COLUMN ); + sqlite3ExprCode(pParse, aListelem[i+1].pExpr, target); + sqlite3VdbeGoto(v, endLabel); + sqlite3ExprCachePop(pParse); + sqlite3VdbeResolveLabel(v, nextCase); + } + if( (nExpr&1)!=0 ){ + sqlite3ExprCachePush(pParse); + sqlite3ExprCode(pParse, pEList->a[nExpr-1].pExpr, target); + sqlite3ExprCachePop(pParse); + }else{ + sqlite3VdbeAddOp2(v, OP_Null, 0, target); + } + assert( pParse->db->mallocFailed || pParse->nErr>0 + || pParse->iCacheLevel==iCacheLevel ); + sqlite3VdbeResolveLabel(v, endLabel); + break; + } +#ifndef SQLITE_OMIT_TRIGGER + case TK_RAISE: { + assert( pExpr->affinity==OE_Rollback + || pExpr->affinity==OE_Abort + || pExpr->affinity==OE_Fail + || pExpr->affinity==OE_Ignore + ); + if( !pParse->pTriggerTab ){ + sqlite3ErrorMsg(pParse, + "RAISE() may only be used within a trigger-program"); + return 0; + } + if( pExpr->affinity==OE_Abort ){ + sqlite3MayAbort(pParse); + } + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + if( pExpr->affinity==OE_Ignore ){ + sqlite3VdbeAddOp4( + v, OP_Halt, SQLITE_OK, OE_Ignore, 0, pExpr->u.zToken,0); + VdbeCoverage(v); + }else{ + sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_TRIGGER, + pExpr->affinity, pExpr->u.zToken, 0, 0); + } + + break; + } +#endif + } + sqlite3ReleaseTempReg(pParse, regFree1); + sqlite3ReleaseTempReg(pParse, regFree2); + return inReg; +} + +/* +** Factor out the code of the given expression to initialization time. +** +** If regDest>=0 then the result is always stored in that register and the +** result is not reusable. If regDest<0 then this routine is free to +** store the value whereever it wants. The register where the expression +** is stored is returned. When regDest<0, two identical expressions will +** code to the same register. +*/ +SQLITE_PRIVATE int sqlite3ExprCodeAtInit( + Parse *pParse, /* Parsing context */ + Expr *pExpr, /* The expression to code when the VDBE initializes */ + int regDest /* Store the value in this register */ +){ + ExprList *p; + assert( ConstFactorOk(pParse) ); + p = pParse->pConstExpr; + if( regDest<0 && p ){ + struct ExprList_item *pItem; + int i; + for(pItem=p->a, i=p->nExpr; i>0; pItem++, i--){ + if( pItem->reusable && sqlite3ExprCompare(pItem->pExpr,pExpr,-1)==0 ){ + return pItem->u.iConstExprReg; + } + } + } + pExpr = sqlite3ExprDup(pParse->db, pExpr, 0); + p = sqlite3ExprListAppend(pParse, p, pExpr); + if( p ){ + struct ExprList_item *pItem = &p->a[p->nExpr-1]; + pItem->reusable = regDest<0; + if( regDest<0 ) regDest = ++pParse->nMem; + pItem->u.iConstExprReg = regDest; + } + pParse->pConstExpr = p; + return regDest; +} + +/* +** Generate code to evaluate an expression and store the results +** into a register. Return the register number where the results +** are stored. +** +** If the register is a temporary register that can be deallocated, +** then write its number into *pReg. If the result register is not +** a temporary, then set *pReg to zero. +** +** If pExpr is a constant, then this routine might generate this +** code to fill the register in the initialization section of the +** VDBE program, in order to factor it out of the evaluation loop. +*/ +SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse *pParse, Expr *pExpr, int *pReg){ + int r2; + pExpr = sqlite3ExprSkipCollate(pExpr); + if( ConstFactorOk(pParse) + && pExpr->op!=TK_REGISTER + && sqlite3ExprIsConstantNotJoin(pExpr) + ){ + *pReg = 0; + r2 = sqlite3ExprCodeAtInit(pParse, pExpr, -1); + }else{ + int r1 = sqlite3GetTempReg(pParse); + r2 = sqlite3ExprCodeTarget(pParse, pExpr, r1); + if( r2==r1 ){ + *pReg = r1; + }else{ + sqlite3ReleaseTempReg(pParse, r1); + *pReg = 0; + } + } + return r2; +} + +/* +** Generate code that will evaluate expression pExpr and store the +** results in register target. The results are guaranteed to appear +** in register target. +*/ +SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){ + int inReg; + + assert( target>0 && target<=pParse->nMem ); + if( pExpr && pExpr->op==TK_REGISTER ){ + sqlite3VdbeAddOp2(pParse->pVdbe, OP_Copy, pExpr->iTable, target); + }else{ + inReg = sqlite3ExprCodeTarget(pParse, pExpr, target); + assert( pParse->pVdbe!=0 || pParse->db->mallocFailed ); + if( inReg!=target && pParse->pVdbe ){ + sqlite3VdbeAddOp2(pParse->pVdbe, OP_SCopy, inReg, target); + } + } +} + +/* +** Make a transient copy of expression pExpr and then code it using +** sqlite3ExprCode(). This routine works just like sqlite3ExprCode() +** except that the input expression is guaranteed to be unchanged. +*/ +SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse *pParse, Expr *pExpr, int target){ + sqlite3 *db = pParse->db; + pExpr = sqlite3ExprDup(db, pExpr, 0); + if( !db->mallocFailed ) sqlite3ExprCode(pParse, pExpr, target); + sqlite3ExprDelete(db, pExpr); +} + +/* +** Generate code that will evaluate expression pExpr and store the +** results in register target. The results are guaranteed to appear +** in register target. If the expression is constant, then this routine +** might choose to code the expression at initialization time. +*/ +SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse *pParse, Expr *pExpr, int target){ + if( pParse->okConstFactor && sqlite3ExprIsConstant(pExpr) ){ + sqlite3ExprCodeAtInit(pParse, pExpr, target); + }else{ + sqlite3ExprCode(pParse, pExpr, target); + } +} + +/* +** Generate code that evaluates the given expression and puts the result +** in register target. +** +** Also make a copy of the expression results into another "cache" register +** and modify the expression so that the next time it is evaluated, +** the result is a copy of the cache register. +** +** This routine is used for expressions that are used multiple +** times. They are evaluated once and the results of the expression +** are reused. +*/ +SQLITE_PRIVATE void sqlite3ExprCodeAndCache(Parse *pParse, Expr *pExpr, int target){ + Vdbe *v = pParse->pVdbe; + int iMem; + + assert( target>0 ); + assert( pExpr->op!=TK_REGISTER ); + sqlite3ExprCode(pParse, pExpr, target); + iMem = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Copy, target, iMem); + exprToRegister(pExpr, iMem); +} + +/* +** Generate code that pushes the value of every element of the given +** expression list into a sequence of registers beginning at target. +** +** Return the number of elements evaluated. +** +** The SQLITE_ECEL_DUP flag prevents the arguments from being +** filled using OP_SCopy. OP_Copy must be used instead. +** +** The SQLITE_ECEL_FACTOR argument allows constant arguments to be +** factored out into initialization code. +** +** The SQLITE_ECEL_REF flag means that expressions in the list with +** ExprList.a[].u.x.iOrderByCol>0 have already been evaluated and stored +** in registers at srcReg, and so the value can be copied from there. +*/ +SQLITE_PRIVATE int sqlite3ExprCodeExprList( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* The expression list to be coded */ + int target, /* Where to write results */ + int srcReg, /* Source registers if SQLITE_ECEL_REF */ + u8 flags /* SQLITE_ECEL_* flags */ +){ + struct ExprList_item *pItem; + int i, j, n; + u8 copyOp = (flags & SQLITE_ECEL_DUP) ? OP_Copy : OP_SCopy; + Vdbe *v = pParse->pVdbe; + assert( pList!=0 ); + assert( target>0 ); + assert( pParse->pVdbe!=0 ); /* Never gets this far otherwise */ + n = pList->nExpr; + if( !ConstFactorOk(pParse) ) flags &= ~SQLITE_ECEL_FACTOR; + for(pItem=pList->a, i=0; ipExpr; + if( (flags & SQLITE_ECEL_REF)!=0 && (j = pItem->u.x.iOrderByCol)>0 ){ + if( flags & SQLITE_ECEL_OMITREF ){ + i--; + n--; + }else{ + sqlite3VdbeAddOp2(v, copyOp, j+srcReg-1, target+i); + } + }else if( (flags & SQLITE_ECEL_FACTOR)!=0 && sqlite3ExprIsConstant(pExpr) ){ + sqlite3ExprCodeAtInit(pParse, pExpr, target+i); + }else{ + int inReg = sqlite3ExprCodeTarget(pParse, pExpr, target+i); + if( inReg!=target+i ){ + VdbeOp *pOp; + if( copyOp==OP_Copy + && (pOp=sqlite3VdbeGetOp(v, -1))->opcode==OP_Copy + && pOp->p1+pOp->p3+1==inReg + && pOp->p2+pOp->p3+1==target+i + ){ + pOp->p3++; + }else{ + sqlite3VdbeAddOp2(v, copyOp, inReg, target+i); + } + } + } + } + return n; +} + +/* +** Generate code for a BETWEEN operator. +** +** x BETWEEN y AND z +** +** The above is equivalent to +** +** x>=y AND x<=z +** +** Code it as such, taking care to do the common subexpression +** elimination of x. +** +** The xJumpIf parameter determines details: +** +** NULL: Store the boolean result in reg[dest] +** sqlite3ExprIfTrue: Jump to dest if true +** sqlite3ExprIfFalse: Jump to dest if false +** +** The jumpIfNull parameter is ignored if xJumpIf is NULL. +*/ +static void exprCodeBetween( + Parse *pParse, /* Parsing and code generating context */ + Expr *pExpr, /* The BETWEEN expression */ + int dest, /* Jump destination or storage location */ + void (*xJump)(Parse*,Expr*,int,int), /* Action to take */ + int jumpIfNull /* Take the jump if the BETWEEN is NULL */ +){ + Expr exprAnd; /* The AND operator in x>=y AND x<=z */ + Expr compLeft; /* The x>=y term */ + Expr compRight; /* The x<=z term */ + Expr exprX; /* The x subexpression */ + int regFree1 = 0; /* Temporary use register */ + + + memset(&compLeft, 0, sizeof(Expr)); + memset(&compRight, 0, sizeof(Expr)); + memset(&exprAnd, 0, sizeof(Expr)); + + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + exprX = *pExpr->pLeft; + exprAnd.op = TK_AND; + exprAnd.pLeft = &compLeft; + exprAnd.pRight = &compRight; + compLeft.op = TK_GE; + compLeft.pLeft = &exprX; + compLeft.pRight = pExpr->x.pList->a[0].pExpr; + compRight.op = TK_LE; + compRight.pLeft = &exprX; + compRight.pRight = pExpr->x.pList->a[1].pExpr; + exprToRegister(&exprX, exprCodeVector(pParse, &exprX, ®Free1)); + if( xJump ){ + xJump(pParse, &exprAnd, dest, jumpIfNull); + }else{ + /* Mark the expression is being from the ON or USING clause of a join + ** so that the sqlite3ExprCodeTarget() routine will not attempt to move + ** it into the Parse.pConstExpr list. We should use a new bit for this, + ** for clarity, but we are out of bits in the Expr.flags field so we + ** have to reuse the EP_FromJoin bit. Bummer. */ + exprX.flags |= EP_FromJoin; + sqlite3ExprCodeTarget(pParse, &exprAnd, dest); + } + sqlite3ReleaseTempReg(pParse, regFree1); + + /* Ensure adequate test coverage */ + testcase( xJump==sqlite3ExprIfTrue && jumpIfNull==0 && regFree1==0 ); + testcase( xJump==sqlite3ExprIfTrue && jumpIfNull==0 && regFree1!=0 ); + testcase( xJump==sqlite3ExprIfTrue && jumpIfNull!=0 && regFree1==0 ); + testcase( xJump==sqlite3ExprIfTrue && jumpIfNull!=0 && regFree1!=0 ); + testcase( xJump==sqlite3ExprIfFalse && jumpIfNull==0 && regFree1==0 ); + testcase( xJump==sqlite3ExprIfFalse && jumpIfNull==0 && regFree1!=0 ); + testcase( xJump==sqlite3ExprIfFalse && jumpIfNull!=0 && regFree1==0 ); + testcase( xJump==sqlite3ExprIfFalse && jumpIfNull!=0 && regFree1!=0 ); + testcase( xJump==0 ); +} + +/* +** Generate code for a boolean expression such that a jump is made +** to the label "dest" if the expression is true but execution +** continues straight thru if the expression is false. +** +** If the expression evaluates to NULL (neither true nor false), then +** take the jump if the jumpIfNull flag is SQLITE_JUMPIFNULL. +** +** This code depends on the fact that certain token values (ex: TK_EQ) +** are the same as opcode values (ex: OP_Eq) that implement the corresponding +** operation. Special comments in vdbe.c and the mkopcodeh.awk script in +** the make process cause these values to align. Assert()s in the code +** below verify that the numbers are aligned correctly. +*/ +SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){ + Vdbe *v = pParse->pVdbe; + int op = 0; + int regFree1 = 0; + int regFree2 = 0; + int r1, r2; + + assert( jumpIfNull==SQLITE_JUMPIFNULL || jumpIfNull==0 ); + if( NEVER(v==0) ) return; /* Existence of VDBE checked by caller */ + if( NEVER(pExpr==0) ) return; /* No way this can happen */ + op = pExpr->op; + switch( op ){ + case TK_AND: { + int d2 = sqlite3VdbeMakeLabel(v); + testcase( jumpIfNull==0 ); + sqlite3ExprIfFalse(pParse, pExpr->pLeft, d2,jumpIfNull^SQLITE_JUMPIFNULL); + sqlite3ExprCachePush(pParse); + sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull); + sqlite3VdbeResolveLabel(v, d2); + sqlite3ExprCachePop(pParse); + break; + } + case TK_OR: { + testcase( jumpIfNull==0 ); + sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull); + sqlite3ExprCachePush(pParse); + sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull); + sqlite3ExprCachePop(pParse); + break; + } + case TK_NOT: { + testcase( jumpIfNull==0 ); + sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull); + break; + } + case TK_IS: + case TK_ISNOT: + testcase( op==TK_IS ); + testcase( op==TK_ISNOT ); + op = (op==TK_IS) ? TK_EQ : TK_NE; + jumpIfNull = SQLITE_NULLEQ; + /* Fall thru */ + case TK_LT: + case TK_LE: + case TK_GT: + case TK_GE: + case TK_NE: + case TK_EQ: { + if( sqlite3ExprIsVector(pExpr->pLeft) ) goto default_expr; + testcase( jumpIfNull==0 ); + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, + r1, r2, dest, jumpIfNull); + assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); + assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le); + assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt); + assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge); + assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); + VdbeCoverageIf(v, op==OP_Eq && jumpIfNull==SQLITE_NULLEQ); + VdbeCoverageIf(v, op==OP_Eq && jumpIfNull!=SQLITE_NULLEQ); + assert(TK_NE==OP_Ne); testcase(op==OP_Ne); + VdbeCoverageIf(v, op==OP_Ne && jumpIfNull==SQLITE_NULLEQ); + VdbeCoverageIf(v, op==OP_Ne && jumpIfNull!=SQLITE_NULLEQ); + testcase( regFree1==0 ); + testcase( regFree2==0 ); + break; + } + case TK_ISNULL: + case TK_NOTNULL: { + assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL ); + assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL ); + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + sqlite3VdbeAddOp2(v, op, r1, dest); + VdbeCoverageIf(v, op==TK_ISNULL); + VdbeCoverageIf(v, op==TK_NOTNULL); + testcase( regFree1==0 ); + break; + } + case TK_BETWEEN: { + testcase( jumpIfNull==0 ); + exprCodeBetween(pParse, pExpr, dest, sqlite3ExprIfTrue, jumpIfNull); + break; + } +#ifndef SQLITE_OMIT_SUBQUERY + case TK_IN: { + int destIfFalse = sqlite3VdbeMakeLabel(v); + int destIfNull = jumpIfNull ? dest : destIfFalse; + sqlite3ExprCodeIN(pParse, pExpr, destIfFalse, destIfNull); + sqlite3VdbeGoto(v, dest); + sqlite3VdbeResolveLabel(v, destIfFalse); + break; + } +#endif + default: { + default_expr: + if( exprAlwaysTrue(pExpr) ){ + sqlite3VdbeGoto(v, dest); + }else if( exprAlwaysFalse(pExpr) ){ + /* No-op */ + }else{ + r1 = sqlite3ExprCodeTemp(pParse, pExpr, ®Free1); + sqlite3VdbeAddOp3(v, OP_If, r1, dest, jumpIfNull!=0); + VdbeCoverage(v); + testcase( regFree1==0 ); + testcase( jumpIfNull==0 ); + } + break; + } + } + sqlite3ReleaseTempReg(pParse, regFree1); + sqlite3ReleaseTempReg(pParse, regFree2); +} + +/* +** Generate code for a boolean expression such that a jump is made +** to the label "dest" if the expression is false but execution +** continues straight thru if the expression is true. +** +** If the expression evaluates to NULL (neither true nor false) then +** jump if jumpIfNull is SQLITE_JUMPIFNULL or fall through if jumpIfNull +** is 0. +*/ +SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){ + Vdbe *v = pParse->pVdbe; + int op = 0; + int regFree1 = 0; + int regFree2 = 0; + int r1, r2; + + assert( jumpIfNull==SQLITE_JUMPIFNULL || jumpIfNull==0 ); + if( NEVER(v==0) ) return; /* Existence of VDBE checked by caller */ + if( pExpr==0 ) return; + + /* The value of pExpr->op and op are related as follows: + ** + ** pExpr->op op + ** --------- ---------- + ** TK_ISNULL OP_NotNull + ** TK_NOTNULL OP_IsNull + ** TK_NE OP_Eq + ** TK_EQ OP_Ne + ** TK_GT OP_Le + ** TK_LE OP_Gt + ** TK_GE OP_Lt + ** TK_LT OP_Ge + ** + ** For other values of pExpr->op, op is undefined and unused. + ** The value of TK_ and OP_ constants are arranged such that we + ** can compute the mapping above using the following expression. + ** Assert()s verify that the computation is correct. + */ + op = ((pExpr->op+(TK_ISNULL&1))^1)-(TK_ISNULL&1); + + /* Verify correct alignment of TK_ and OP_ constants + */ + assert( pExpr->op!=TK_ISNULL || op==OP_NotNull ); + assert( pExpr->op!=TK_NOTNULL || op==OP_IsNull ); + assert( pExpr->op!=TK_NE || op==OP_Eq ); + assert( pExpr->op!=TK_EQ || op==OP_Ne ); + assert( pExpr->op!=TK_LT || op==OP_Ge ); + assert( pExpr->op!=TK_LE || op==OP_Gt ); + assert( pExpr->op!=TK_GT || op==OP_Le ); + assert( pExpr->op!=TK_GE || op==OP_Lt ); + + switch( pExpr->op ){ + case TK_AND: { + testcase( jumpIfNull==0 ); + sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull); + sqlite3ExprCachePush(pParse); + sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull); + sqlite3ExprCachePop(pParse); + break; + } + case TK_OR: { + int d2 = sqlite3VdbeMakeLabel(v); + testcase( jumpIfNull==0 ); + sqlite3ExprIfTrue(pParse, pExpr->pLeft, d2, jumpIfNull^SQLITE_JUMPIFNULL); + sqlite3ExprCachePush(pParse); + sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull); + sqlite3VdbeResolveLabel(v, d2); + sqlite3ExprCachePop(pParse); + break; + } + case TK_NOT: { + testcase( jumpIfNull==0 ); + sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull); + break; + } + case TK_IS: + case TK_ISNOT: + testcase( pExpr->op==TK_IS ); + testcase( pExpr->op==TK_ISNOT ); + op = (pExpr->op==TK_IS) ? TK_NE : TK_EQ; + jumpIfNull = SQLITE_NULLEQ; + /* Fall thru */ + case TK_LT: + case TK_LE: + case TK_GT: + case TK_GE: + case TK_NE: + case TK_EQ: { + if( sqlite3ExprIsVector(pExpr->pLeft) ) goto default_expr; + testcase( jumpIfNull==0 ); + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); + codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, + r1, r2, dest, jumpIfNull); + assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); + assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le); + assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt); + assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge); + assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); + VdbeCoverageIf(v, op==OP_Eq && jumpIfNull!=SQLITE_NULLEQ); + VdbeCoverageIf(v, op==OP_Eq && jumpIfNull==SQLITE_NULLEQ); + assert(TK_NE==OP_Ne); testcase(op==OP_Ne); + VdbeCoverageIf(v, op==OP_Ne && jumpIfNull!=SQLITE_NULLEQ); + VdbeCoverageIf(v, op==OP_Ne && jumpIfNull==SQLITE_NULLEQ); + testcase( regFree1==0 ); + testcase( regFree2==0 ); + break; + } + case TK_ISNULL: + case TK_NOTNULL: { + r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + sqlite3VdbeAddOp2(v, op, r1, dest); + testcase( op==TK_ISNULL ); VdbeCoverageIf(v, op==TK_ISNULL); + testcase( op==TK_NOTNULL ); VdbeCoverageIf(v, op==TK_NOTNULL); + testcase( regFree1==0 ); + break; + } + case TK_BETWEEN: { + testcase( jumpIfNull==0 ); + exprCodeBetween(pParse, pExpr, dest, sqlite3ExprIfFalse, jumpIfNull); + break; + } +#ifndef SQLITE_OMIT_SUBQUERY + case TK_IN: { + if( jumpIfNull ){ + sqlite3ExprCodeIN(pParse, pExpr, dest, dest); + }else{ + int destIfNull = sqlite3VdbeMakeLabel(v); + sqlite3ExprCodeIN(pParse, pExpr, dest, destIfNull); + sqlite3VdbeResolveLabel(v, destIfNull); + } + break; + } +#endif + default: { + default_expr: + if( exprAlwaysFalse(pExpr) ){ + sqlite3VdbeGoto(v, dest); + }else if( exprAlwaysTrue(pExpr) ){ + /* no-op */ + }else{ + r1 = sqlite3ExprCodeTemp(pParse, pExpr, ®Free1); + sqlite3VdbeAddOp3(v, OP_IfNot, r1, dest, jumpIfNull!=0); + VdbeCoverage(v); + testcase( regFree1==0 ); + testcase( jumpIfNull==0 ); + } + break; + } + } + sqlite3ReleaseTempReg(pParse, regFree1); + sqlite3ReleaseTempReg(pParse, regFree2); +} + +/* +** Like sqlite3ExprIfFalse() except that a copy is made of pExpr before +** code generation, and that copy is deleted after code generation. This +** ensures that the original pExpr is unchanged. +*/ +SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse *pParse, Expr *pExpr, int dest,int jumpIfNull){ + sqlite3 *db = pParse->db; + Expr *pCopy = sqlite3ExprDup(db, pExpr, 0); + if( db->mallocFailed==0 ){ + sqlite3ExprIfFalse(pParse, pCopy, dest, jumpIfNull); + } + sqlite3ExprDelete(db, pCopy); +} + + +/* +** Do a deep comparison of two expression trees. Return 0 if the two +** expressions are completely identical. Return 1 if they differ only +** by a COLLATE operator at the top level. Return 2 if there are differences +** other than the top-level COLLATE operator. +** +** If any subelement of pB has Expr.iTable==(-1) then it is allowed +** to compare equal to an equivalent element in pA with Expr.iTable==iTab. +** +** The pA side might be using TK_REGISTER. If that is the case and pB is +** not using TK_REGISTER but is otherwise equivalent, then still return 0. +** +** Sometimes this routine will return 2 even if the two expressions +** really are equivalent. If we cannot prove that the expressions are +** identical, we return 2 just to be safe. So if this routine +** returns 2, then you do not really know for certain if the two +** expressions are the same. But if you get a 0 or 1 return, then you +** can be sure the expressions are the same. In the places where +** this routine is used, it does not hurt to get an extra 2 - that +** just might result in some slightly slower code. But returning +** an incorrect 0 or 1 could lead to a malfunction. +*/ +SQLITE_PRIVATE int sqlite3ExprCompare(Expr *pA, Expr *pB, int iTab){ + u32 combinedFlags; + if( pA==0 || pB==0 ){ + return pB==pA ? 0 : 2; + } + combinedFlags = pA->flags | pB->flags; + if( combinedFlags & EP_IntValue ){ + if( (pA->flags&pB->flags&EP_IntValue)!=0 && pA->u.iValue==pB->u.iValue ){ + return 0; + } + return 2; + } + if( pA->op!=pB->op ){ + if( pA->op==TK_COLLATE && sqlite3ExprCompare(pA->pLeft, pB, iTab)<2 ){ + return 1; + } + if( pB->op==TK_COLLATE && sqlite3ExprCompare(pA, pB->pLeft, iTab)<2 ){ + return 1; + } + return 2; + } + if( pA->op!=TK_COLUMN && pA->op!=TK_AGG_COLUMN && pA->u.zToken ){ + if( pA->op==TK_FUNCTION ){ + if( sqlite3StrICmp(pA->u.zToken,pB->u.zToken)!=0 ) return 2; + }else if( strcmp(pA->u.zToken,pB->u.zToken)!=0 ){ + return pA->op==TK_COLLATE ? 1 : 2; + } + } + if( (pA->flags & EP_Distinct)!=(pB->flags & EP_Distinct) ) return 2; + if( ALWAYS((combinedFlags & EP_TokenOnly)==0) ){ + if( combinedFlags & EP_xIsSelect ) return 2; + if( sqlite3ExprCompare(pA->pLeft, pB->pLeft, iTab) ) return 2; + if( sqlite3ExprCompare(pA->pRight, pB->pRight, iTab) ) return 2; + if( sqlite3ExprListCompare(pA->x.pList, pB->x.pList, iTab) ) return 2; + if( ALWAYS((combinedFlags & EP_Reduced)==0) && pA->op!=TK_STRING ){ + if( pA->iColumn!=pB->iColumn ) return 2; + if( pA->iTable!=pB->iTable + && (pA->iTable!=iTab || NEVER(pB->iTable>=0)) ) return 2; + } + } + return 0; +} + +/* +** Compare two ExprList objects. Return 0 if they are identical and +** non-zero if they differ in any way. +** +** If any subelement of pB has Expr.iTable==(-1) then it is allowed +** to compare equal to an equivalent element in pA with Expr.iTable==iTab. +** +** This routine might return non-zero for equivalent ExprLists. The +** only consequence will be disabled optimizations. But this routine +** must never return 0 if the two ExprList objects are different, or +** a malfunction will result. +** +** Two NULL pointers are considered to be the same. But a NULL pointer +** always differs from a non-NULL pointer. +*/ +SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList *pA, ExprList *pB, int iTab){ + int i; + if( pA==0 && pB==0 ) return 0; + if( pA==0 || pB==0 ) return 1; + if( pA->nExpr!=pB->nExpr ) return 1; + for(i=0; inExpr; i++){ + Expr *pExprA = pA->a[i].pExpr; + Expr *pExprB = pB->a[i].pExpr; + if( pA->a[i].sortOrder!=pB->a[i].sortOrder ) return 1; + if( sqlite3ExprCompare(pExprA, pExprB, iTab) ) return 1; + } + return 0; +} + +/* +** Return true if we can prove the pE2 will always be true if pE1 is +** true. Return false if we cannot complete the proof or if pE2 might +** be false. Examples: +** +** pE1: x==5 pE2: x==5 Result: true +** pE1: x>0 pE2: x==5 Result: false +** pE1: x=21 pE2: x=21 OR y=43 Result: true +** pE1: x!=123 pE2: x IS NOT NULL Result: true +** pE1: x!=?1 pE2: x IS NOT NULL Result: true +** pE1: x IS NULL pE2: x IS NOT NULL Result: false +** pE1: x IS ?2 pE2: x IS NOT NULL Reuslt: false +** +** When comparing TK_COLUMN nodes between pE1 and pE2, if pE2 has +** Expr.iTable<0 then assume a table number given by iTab. +** +** When in doubt, return false. Returning true might give a performance +** improvement. Returning false might cause a performance reduction, but +** it will always give the correct answer and is hence always safe. +*/ +SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Expr *pE1, Expr *pE2, int iTab){ + if( sqlite3ExprCompare(pE1, pE2, iTab)==0 ){ + return 1; + } + if( pE2->op==TK_OR + && (sqlite3ExprImpliesExpr(pE1, pE2->pLeft, iTab) + || sqlite3ExprImpliesExpr(pE1, pE2->pRight, iTab) ) + ){ + return 1; + } + if( pE2->op==TK_NOTNULL && pE1->op!=TK_ISNULL && pE1->op!=TK_IS ){ + Expr *pX = sqlite3ExprSkipCollate(pE1->pLeft); + testcase( pX!=pE1->pLeft ); + if( sqlite3ExprCompare(pX, pE2->pLeft, iTab)==0 ) return 1; + } + return 0; +} + +/* +** An instance of the following structure is used by the tree walker +** to determine if an expression can be evaluated by reference to the +** index only, without having to do a search for the corresponding +** table entry. The IdxCover.pIdx field is the index. IdxCover.iCur +** is the cursor for the table. +*/ +struct IdxCover { + Index *pIdx; /* The index to be tested for coverage */ + int iCur; /* Cursor number for the table corresponding to the index */ +}; + +/* +** Check to see if there are references to columns in table +** pWalker->u.pIdxCover->iCur can be satisfied using the index +** pWalker->u.pIdxCover->pIdx. +*/ +static int exprIdxCover(Walker *pWalker, Expr *pExpr){ + if( pExpr->op==TK_COLUMN + && pExpr->iTable==pWalker->u.pIdxCover->iCur + && sqlite3ColumnOfIndex(pWalker->u.pIdxCover->pIdx, pExpr->iColumn)<0 + ){ + pWalker->eCode = 1; + return WRC_Abort; + } + return WRC_Continue; +} + +/* +** Determine if an index pIdx on table with cursor iCur contains will +** the expression pExpr. Return true if the index does cover the +** expression and false if the pExpr expression references table columns +** that are not found in the index pIdx. +** +** An index covering an expression means that the expression can be +** evaluated using only the index and without having to lookup the +** corresponding table entry. +*/ +SQLITE_PRIVATE int sqlite3ExprCoveredByIndex( + Expr *pExpr, /* The index to be tested */ + int iCur, /* The cursor number for the corresponding table */ + Index *pIdx /* The index that might be used for coverage */ +){ + Walker w; + struct IdxCover xcov; + memset(&w, 0, sizeof(w)); + xcov.iCur = iCur; + xcov.pIdx = pIdx; + w.xExprCallback = exprIdxCover; + w.u.pIdxCover = &xcov; + sqlite3WalkExpr(&w, pExpr); + return !w.eCode; +} + + +/* +** An instance of the following structure is used by the tree walker +** to count references to table columns in the arguments of an +** aggregate function, in order to implement the +** sqlite3FunctionThisSrc() routine. +*/ +struct SrcCount { + SrcList *pSrc; /* One particular FROM clause in a nested query */ + int nThis; /* Number of references to columns in pSrcList */ + int nOther; /* Number of references to columns in other FROM clauses */ +}; + +/* +** Count the number of references to columns. +*/ +static int exprSrcCount(Walker *pWalker, Expr *pExpr){ + /* The NEVER() on the second term is because sqlite3FunctionUsesThisSrc() + ** is always called before sqlite3ExprAnalyzeAggregates() and so the + ** TK_COLUMNs have not yet been converted into TK_AGG_COLUMN. If + ** sqlite3FunctionUsesThisSrc() is used differently in the future, the + ** NEVER() will need to be removed. */ + if( pExpr->op==TK_COLUMN || NEVER(pExpr->op==TK_AGG_COLUMN) ){ + int i; + struct SrcCount *p = pWalker->u.pSrcCount; + SrcList *pSrc = p->pSrc; + int nSrc = pSrc ? pSrc->nSrc : 0; + for(i=0; iiTable==pSrc->a[i].iCursor ) break; + } + if( inThis++; + }else{ + p->nOther++; + } + } + return WRC_Continue; +} + +/* +** Determine if any of the arguments to the pExpr Function reference +** pSrcList. Return true if they do. Also return true if the function +** has no arguments or has only constant arguments. Return false if pExpr +** references columns but not columns of tables found in pSrcList. +*/ +SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr *pExpr, SrcList *pSrcList){ + Walker w; + struct SrcCount cnt; + assert( pExpr->op==TK_AGG_FUNCTION ); + memset(&w, 0, sizeof(w)); + w.xExprCallback = exprSrcCount; + w.u.pSrcCount = &cnt; + cnt.pSrc = pSrcList; + cnt.nThis = 0; + cnt.nOther = 0; + sqlite3WalkExprList(&w, pExpr->x.pList); + return cnt.nThis>0 || cnt.nOther==0; +} + +/* +** Add a new element to the pAggInfo->aCol[] array. Return the index of +** the new element. Return a negative number if malloc fails. +*/ +static int addAggInfoColumn(sqlite3 *db, AggInfo *pInfo){ + int i; + pInfo->aCol = sqlite3ArrayAllocate( + db, + pInfo->aCol, + sizeof(pInfo->aCol[0]), + &pInfo->nColumn, + &i + ); + return i; +} + +/* +** Add a new element to the pAggInfo->aFunc[] array. Return the index of +** the new element. Return a negative number if malloc fails. +*/ +static int addAggInfoFunc(sqlite3 *db, AggInfo *pInfo){ + int i; + pInfo->aFunc = sqlite3ArrayAllocate( + db, + pInfo->aFunc, + sizeof(pInfo->aFunc[0]), + &pInfo->nFunc, + &i + ); + return i; +} + +/* +** This is the xExprCallback for a tree walker. It is used to +** implement sqlite3ExprAnalyzeAggregates(). See sqlite3ExprAnalyzeAggregates +** for additional information. +*/ +static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ + int i; + NameContext *pNC = pWalker->u.pNC; + Parse *pParse = pNC->pParse; + SrcList *pSrcList = pNC->pSrcList; + AggInfo *pAggInfo = pNC->pAggInfo; + + switch( pExpr->op ){ + case TK_AGG_COLUMN: + case TK_COLUMN: { + testcase( pExpr->op==TK_AGG_COLUMN ); + testcase( pExpr->op==TK_COLUMN ); + /* Check to see if the column is in one of the tables in the FROM + ** clause of the aggregate query */ + if( ALWAYS(pSrcList!=0) ){ + struct SrcList_item *pItem = pSrcList->a; + for(i=0; inSrc; i++, pItem++){ + struct AggInfo_col *pCol; + assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); + if( pExpr->iTable==pItem->iCursor ){ + /* If we reach this point, it means that pExpr refers to a table + ** that is in the FROM clause of the aggregate query. + ** + ** Make an entry for the column in pAggInfo->aCol[] if there + ** is not an entry there already. + */ + int k; + pCol = pAggInfo->aCol; + for(k=0; knColumn; k++, pCol++){ + if( pCol->iTable==pExpr->iTable && + pCol->iColumn==pExpr->iColumn ){ + break; + } + } + if( (k>=pAggInfo->nColumn) + && (k = addAggInfoColumn(pParse->db, pAggInfo))>=0 + ){ + pCol = &pAggInfo->aCol[k]; + pCol->pTab = pExpr->pTab; + pCol->iTable = pExpr->iTable; + pCol->iColumn = pExpr->iColumn; + pCol->iMem = ++pParse->nMem; + pCol->iSorterColumn = -1; + pCol->pExpr = pExpr; + if( pAggInfo->pGroupBy ){ + int j, n; + ExprList *pGB = pAggInfo->pGroupBy; + struct ExprList_item *pTerm = pGB->a; + n = pGB->nExpr; + for(j=0; jpExpr; + if( pE->op==TK_COLUMN && pE->iTable==pExpr->iTable && + pE->iColumn==pExpr->iColumn ){ + pCol->iSorterColumn = j; + break; + } + } + } + if( pCol->iSorterColumn<0 ){ + pCol->iSorterColumn = pAggInfo->nSortingColumn++; + } + } + /* There is now an entry for pExpr in pAggInfo->aCol[] (either + ** because it was there before or because we just created it). + ** Convert the pExpr to be a TK_AGG_COLUMN referring to that + ** pAggInfo->aCol[] entry. + */ + ExprSetVVAProperty(pExpr, EP_NoReduce); + pExpr->pAggInfo = pAggInfo; + pExpr->op = TK_AGG_COLUMN; + pExpr->iAgg = (i16)k; + break; + } /* endif pExpr->iTable==pItem->iCursor */ + } /* end loop over pSrcList */ + } + return WRC_Prune; + } + case TK_AGG_FUNCTION: { + if( (pNC->ncFlags & NC_InAggFunc)==0 + && pWalker->walkerDepth==pExpr->op2 + ){ + /* Check to see if pExpr is a duplicate of another aggregate + ** function that is already in the pAggInfo structure + */ + struct AggInfo_func *pItem = pAggInfo->aFunc; + for(i=0; inFunc; i++, pItem++){ + if( sqlite3ExprCompare(pItem->pExpr, pExpr, -1)==0 ){ + break; + } + } + if( i>=pAggInfo->nFunc ){ + /* pExpr is original. Make a new entry in pAggInfo->aFunc[] + */ + u8 enc = ENC(pParse->db); + i = addAggInfoFunc(pParse->db, pAggInfo); + if( i>=0 ){ + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + pItem = &pAggInfo->aFunc[i]; + pItem->pExpr = pExpr; + pItem->iMem = ++pParse->nMem; + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + pItem->pFunc = sqlite3FindFunction(pParse->db, + pExpr->u.zToken, + pExpr->x.pList ? pExpr->x.pList->nExpr : 0, enc, 0); + if( pExpr->flags & EP_Distinct ){ + pItem->iDistinct = pParse->nTab++; + }else{ + pItem->iDistinct = -1; + } + } + } + /* Make pExpr point to the appropriate pAggInfo->aFunc[] entry + */ + assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); + ExprSetVVAProperty(pExpr, EP_NoReduce); + pExpr->iAgg = (i16)i; + pExpr->pAggInfo = pAggInfo; + return WRC_Prune; + }else{ + return WRC_Continue; + } + } + } + return WRC_Continue; +} +static int analyzeAggregatesInSelect(Walker *pWalker, Select *pSelect){ + UNUSED_PARAMETER(pWalker); + UNUSED_PARAMETER(pSelect); + return WRC_Continue; +} + +/* +** Analyze the pExpr expression looking for aggregate functions and +** for variables that need to be added to AggInfo object that pNC->pAggInfo +** points to. Additional entries are made on the AggInfo object as +** necessary. +** +** This routine should only be called after the expression has been +** analyzed by sqlite3ResolveExprNames(). +*/ +SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext *pNC, Expr *pExpr){ + Walker w; + memset(&w, 0, sizeof(w)); + w.xExprCallback = analyzeAggregate; + w.xSelectCallback = analyzeAggregatesInSelect; + w.u.pNC = pNC; + assert( pNC->pSrcList!=0 ); + sqlite3WalkExpr(&w, pExpr); +} + +/* +** Call sqlite3ExprAnalyzeAggregates() for every expression in an +** expression list. Return the number of errors. +** +** If an error is found, the analysis is cut short. +*/ +SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext *pNC, ExprList *pList){ + struct ExprList_item *pItem; + int i; + if( pList ){ + for(pItem=pList->a, i=0; inExpr; i++, pItem++){ + sqlite3ExprAnalyzeAggregates(pNC, pItem->pExpr); + } + } +} + +/* +** Allocate a single new register for use to hold some intermediate result. +*/ +SQLITE_PRIVATE int sqlite3GetTempReg(Parse *pParse){ + if( pParse->nTempReg==0 ){ + return ++pParse->nMem; + } + return pParse->aTempReg[--pParse->nTempReg]; +} + +/* +** Deallocate a register, making available for reuse for some other +** purpose. +** +** If a register is currently being used by the column cache, then +** the deallocation is deferred until the column cache line that uses +** the register becomes stale. +*/ +SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse *pParse, int iReg){ + if( iReg && pParse->nTempRegaTempReg) ){ + int i; + struct yColCache *p; + for(i=0, p=pParse->aColCache; inColCache; i++, p++){ + if( p->iReg==iReg ){ + p->tempReg = 1; + return; + } + } + pParse->aTempReg[pParse->nTempReg++] = iReg; + } +} + +/* +** Allocate or deallocate a block of nReg consecutive registers. +*/ +SQLITE_PRIVATE int sqlite3GetTempRange(Parse *pParse, int nReg){ + int i, n; + if( nReg==1 ) return sqlite3GetTempReg(pParse); + i = pParse->iRangeReg; + n = pParse->nRangeReg; + if( nReg<=n ){ + assert( !usedAsColumnCache(pParse, i, i+n-1) ); + pParse->iRangeReg += nReg; + pParse->nRangeReg -= nReg; + }else{ + i = pParse->nMem+1; + pParse->nMem += nReg; + } + return i; +} +SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse *pParse, int iReg, int nReg){ + if( nReg==1 ){ + sqlite3ReleaseTempReg(pParse, iReg); + return; + } + sqlite3ExprCacheRemove(pParse, iReg, nReg); + if( nReg>pParse->nRangeReg ){ + pParse->nRangeReg = nReg; + pParse->iRangeReg = iReg; + } +} + +/* +** Mark all temporary registers as being unavailable for reuse. +*/ +SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse *pParse){ + pParse->nTempReg = 0; + pParse->nRangeReg = 0; +} + +/* +** Validate that no temporary register falls within the range of +** iFirst..iLast, inclusive. This routine is only call from within assert() +** statements. +*/ +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse *pParse, int iFirst, int iLast){ + int i; + if( pParse->nRangeReg>0 + && pParse->iRangeReg+pParse->nRangeRegiRangeReg>=iFirst + ){ + return 0; + } + for(i=0; inTempReg; i++){ + if( pParse->aTempReg[i]>=iFirst && pParse->aTempReg[i]<=iLast ){ + return 0; + } + } + return 1; +} +#endif /* SQLITE_DEBUG */ + +/************** End of expr.c ************************************************/ +/************** Begin file alter.c *******************************************/ +/* +** 2005 February 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains C code routines that used to generate VDBE code +** that implements the ALTER TABLE command. +*/ +/* #include "sqliteInt.h" */ + +/* +** The code in this file only exists if we are not omitting the +** ALTER TABLE logic from the build. +*/ +#ifndef SQLITE_OMIT_ALTERTABLE + + +/* +** This function is used by SQL generated to implement the +** ALTER TABLE command. The first argument is the text of a CREATE TABLE or +** CREATE INDEX command. The second is a table name. The table name in +** the CREATE TABLE or CREATE INDEX statement is replaced with the third +** argument and the result returned. Examples: +** +** sqlite_rename_table('CREATE TABLE abc(a, b, c)', 'def') +** -> 'CREATE TABLE def(a, b, c)' +** +** sqlite_rename_table('CREATE INDEX i ON abc(a)', 'def') +** -> 'CREATE INDEX i ON def(a, b, c)' +*/ +static void renameTableFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **argv +){ + unsigned char const *zSql = sqlite3_value_text(argv[0]); + unsigned char const *zTableName = sqlite3_value_text(argv[1]); + + int token; + Token tname; + unsigned char const *zCsr = zSql; + int len = 0; + char *zRet; + + sqlite3 *db = sqlite3_context_db_handle(context); + + UNUSED_PARAMETER(NotUsed); + + /* The principle used to locate the table name in the CREATE TABLE + ** statement is that the table name is the first non-space token that + ** is immediately followed by a TK_LP or TK_USING token. + */ + if( zSql ){ + do { + if( !*zCsr ){ + /* Ran out of input before finding an opening bracket. Return NULL. */ + return; + } + + /* Store the token that zCsr points to in tname. */ + tname.z = (char*)zCsr; + tname.n = len; + + /* Advance zCsr to the next token. Store that token type in 'token', + ** and its length in 'len' (to be used next iteration of this loop). + */ + do { + zCsr += len; + len = sqlite3GetToken(zCsr, &token); + } while( token==TK_SPACE ); + assert( len>0 ); + } while( token!=TK_LP && token!=TK_USING ); + + zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", (int)(((u8*)tname.z) - zSql), + zSql, zTableName, tname.z+tname.n); + sqlite3_result_text(context, zRet, -1, SQLITE_DYNAMIC); + } +} + +/* +** This C function implements an SQL user function that is used by SQL code +** generated by the ALTER TABLE ... RENAME command to modify the definition +** of any foreign key constraints that use the table being renamed as the +** parent table. It is passed three arguments: +** +** 1) The complete text of the CREATE TABLE statement being modified, +** 2) The old name of the table being renamed, and +** 3) The new name of the table being renamed. +** +** It returns the new CREATE TABLE statement. For example: +** +** sqlite_rename_parent('CREATE TABLE t1(a REFERENCES t2)', 't2', 't3') +** -> 'CREATE TABLE t1(a REFERENCES t3)' +*/ +#ifndef SQLITE_OMIT_FOREIGN_KEY +static void renameParentFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **argv +){ + sqlite3 *db = sqlite3_context_db_handle(context); + char *zOutput = 0; + char *zResult; + unsigned char const *zInput = sqlite3_value_text(argv[0]); + unsigned char const *zOld = sqlite3_value_text(argv[1]); + unsigned char const *zNew = sqlite3_value_text(argv[2]); + + unsigned const char *z; /* Pointer to token */ + int n; /* Length of token z */ + int token; /* Type of token */ + + UNUSED_PARAMETER(NotUsed); + if( zInput==0 || zOld==0 ) return; + for(z=zInput; *z; z=z+n){ + n = sqlite3GetToken(z, &token); + if( token==TK_REFERENCES ){ + char *zParent; + do { + z += n; + n = sqlite3GetToken(z, &token); + }while( token==TK_SPACE ); + + if( token==TK_ILLEGAL ) break; + zParent = sqlite3DbStrNDup(db, (const char *)z, n); + if( zParent==0 ) break; + sqlite3Dequote(zParent); + if( 0==sqlite3StrICmp((const char *)zOld, zParent) ){ + char *zOut = sqlite3MPrintf(db, "%s%.*s\"%w\"", + (zOutput?zOutput:""), (int)(z-zInput), zInput, (const char *)zNew + ); + sqlite3DbFree(db, zOutput); + zOutput = zOut; + zInput = &z[n]; + } + sqlite3DbFree(db, zParent); + } + } + + zResult = sqlite3MPrintf(db, "%s%s", (zOutput?zOutput:""), zInput), + sqlite3_result_text(context, zResult, -1, SQLITE_DYNAMIC); + sqlite3DbFree(db, zOutput); +} +#endif + +#ifndef SQLITE_OMIT_TRIGGER +/* This function is used by SQL generated to implement the +** ALTER TABLE command. The first argument is the text of a CREATE TRIGGER +** statement. The second is a table name. The table name in the CREATE +** TRIGGER statement is replaced with the third argument and the result +** returned. This is analagous to renameTableFunc() above, except for CREATE +** TRIGGER, not CREATE INDEX and CREATE TABLE. +*/ +static void renameTriggerFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **argv +){ + unsigned char const *zSql = sqlite3_value_text(argv[0]); + unsigned char const *zTableName = sqlite3_value_text(argv[1]); + + int token; + Token tname; + int dist = 3; + unsigned char const *zCsr = zSql; + int len = 0; + char *zRet; + sqlite3 *db = sqlite3_context_db_handle(context); + + UNUSED_PARAMETER(NotUsed); + + /* The principle used to locate the table name in the CREATE TRIGGER + ** statement is that the table name is the first token that is immediately + ** preceded by either TK_ON or TK_DOT and immediately followed by one + ** of TK_WHEN, TK_BEGIN or TK_FOR. + */ + if( zSql ){ + do { + + if( !*zCsr ){ + /* Ran out of input before finding the table name. Return NULL. */ + return; + } + + /* Store the token that zCsr points to in tname. */ + tname.z = (char*)zCsr; + tname.n = len; + + /* Advance zCsr to the next token. Store that token type in 'token', + ** and its length in 'len' (to be used next iteration of this loop). + */ + do { + zCsr += len; + len = sqlite3GetToken(zCsr, &token); + }while( token==TK_SPACE ); + assert( len>0 ); + + /* Variable 'dist' stores the number of tokens read since the most + ** recent TK_DOT or TK_ON. This means that when a WHEN, FOR or BEGIN + ** token is read and 'dist' equals 2, the condition stated above + ** to be met. + ** + ** Note that ON cannot be a database, table or column name, so + ** there is no need to worry about syntax like + ** "CREATE TRIGGER ... ON ON.ON BEGIN ..." etc. + */ + dist++; + if( token==TK_DOT || token==TK_ON ){ + dist = 0; + } + } while( dist!=2 || (token!=TK_WHEN && token!=TK_FOR && token!=TK_BEGIN) ); + + /* Variable tname now contains the token that is the old table-name + ** in the CREATE TRIGGER statement. + */ + zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", (int)(((u8*)tname.z) - zSql), + zSql, zTableName, tname.z+tname.n); + sqlite3_result_text(context, zRet, -1, SQLITE_DYNAMIC); + } +} +#endif /* !SQLITE_OMIT_TRIGGER */ + +/* +** Register built-in functions used to help implement ALTER TABLE +*/ +SQLITE_PRIVATE void sqlite3AlterFunctions(void){ + static FuncDef aAlterTableFuncs[] = { + FUNCTION(sqlite_rename_table, 2, 0, 0, renameTableFunc), +#ifndef SQLITE_OMIT_TRIGGER + FUNCTION(sqlite_rename_trigger, 2, 0, 0, renameTriggerFunc), +#endif +#ifndef SQLITE_OMIT_FOREIGN_KEY + FUNCTION(sqlite_rename_parent, 3, 0, 0, renameParentFunc), +#endif + }; + sqlite3InsertBuiltinFuncs(aAlterTableFuncs, ArraySize(aAlterTableFuncs)); +} + +/* +** This function is used to create the text of expressions of the form: +** +** name= OR name= OR ... +** +** If argument zWhere is NULL, then a pointer string containing the text +** "name=" is returned, where is the quoted version +** of the string passed as argument zConstant. The returned buffer is +** allocated using sqlite3DbMalloc(). It is the responsibility of the +** caller to ensure that it is eventually freed. +** +** If argument zWhere is not NULL, then the string returned is +** " OR name=", where is the contents of zWhere. +** In this case zWhere is passed to sqlite3DbFree() before returning. +** +*/ +static char *whereOrName(sqlite3 *db, char *zWhere, char *zConstant){ + char *zNew; + if( !zWhere ){ + zNew = sqlite3MPrintf(db, "name=%Q", zConstant); + }else{ + zNew = sqlite3MPrintf(db, "%s OR name=%Q", zWhere, zConstant); + sqlite3DbFree(db, zWhere); + } + return zNew; +} + +#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) +/* +** Generate the text of a WHERE expression which can be used to select all +** tables that have foreign key constraints that refer to table pTab (i.e. +** constraints for which pTab is the parent table) from the sqlite_master +** table. +*/ +static char *whereForeignKeys(Parse *pParse, Table *pTab){ + FKey *p; + char *zWhere = 0; + for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ + zWhere = whereOrName(pParse->db, zWhere, p->pFrom->zName); + } + return zWhere; +} +#endif + +/* +** Generate the text of a WHERE expression which can be used to select all +** temporary triggers on table pTab from the sqlite_temp_master table. If +** table pTab has no temporary triggers, or is itself stored in the +** temporary database, NULL is returned. +*/ +static char *whereTempTriggers(Parse *pParse, Table *pTab){ + Trigger *pTrig; + char *zWhere = 0; + const Schema *pTempSchema = pParse->db->aDb[1].pSchema; /* Temp db schema */ + + /* If the table is not located in the temp-db (in which case NULL is + ** returned, loop through the tables list of triggers. For each trigger + ** that is not part of the temp-db schema, add a clause to the WHERE + ** expression being built up in zWhere. + */ + if( pTab->pSchema!=pTempSchema ){ + sqlite3 *db = pParse->db; + for(pTrig=sqlite3TriggerList(pParse, pTab); pTrig; pTrig=pTrig->pNext){ + if( pTrig->pSchema==pTempSchema ){ + zWhere = whereOrName(db, zWhere, pTrig->zName); + } + } + } + if( zWhere ){ + char *zNew = sqlite3MPrintf(pParse->db, "type='trigger' AND (%s)", zWhere); + sqlite3DbFree(pParse->db, zWhere); + zWhere = zNew; + } + return zWhere; +} + +/* +** Generate code to drop and reload the internal representation of table +** pTab from the database, including triggers and temporary triggers. +** Argument zName is the name of the table in the database schema at +** the time the generated code is executed. This can be different from +** pTab->zName if this function is being called to code part of an +** "ALTER TABLE RENAME TO" statement. +*/ +static void reloadTableSchema(Parse *pParse, Table *pTab, const char *zName){ + Vdbe *v; + char *zWhere; + int iDb; /* Index of database containing pTab */ +#ifndef SQLITE_OMIT_TRIGGER + Trigger *pTrig; +#endif + + v = sqlite3GetVdbe(pParse); + if( NEVER(v==0) ) return; + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); + iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + assert( iDb>=0 ); + +#ifndef SQLITE_OMIT_TRIGGER + /* Drop any table triggers from the internal schema. */ + for(pTrig=sqlite3TriggerList(pParse, pTab); pTrig; pTrig=pTrig->pNext){ + int iTrigDb = sqlite3SchemaToIndex(pParse->db, pTrig->pSchema); + assert( iTrigDb==iDb || iTrigDb==1 ); + sqlite3VdbeAddOp4(v, OP_DropTrigger, iTrigDb, 0, 0, pTrig->zName, 0); + } +#endif + + /* Drop the table and index from the internal schema. */ + sqlite3VdbeAddOp4(v, OP_DropTable, iDb, 0, 0, pTab->zName, 0); + + /* Reload the table, index and permanent trigger schemas. */ + zWhere = sqlite3MPrintf(pParse->db, "tbl_name=%Q", zName); + if( !zWhere ) return; + sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere); + +#ifndef SQLITE_OMIT_TRIGGER + /* Now, if the table is not stored in the temp database, reload any temp + ** triggers. Don't use IN(...) in case SQLITE_OMIT_SUBQUERY is defined. + */ + if( (zWhere=whereTempTriggers(pParse, pTab))!=0 ){ + sqlite3VdbeAddParseSchemaOp(v, 1, zWhere); + } +#endif +} + +/* +** Parameter zName is the name of a table that is about to be altered +** (either with ALTER TABLE ... RENAME TO or ALTER TABLE ... ADD COLUMN). +** If the table is a system table, this function leaves an error message +** in pParse->zErr (system tables may not be altered) and returns non-zero. +** +** Or, if zName is not a system table, zero is returned. +*/ +static int isSystemTable(Parse *pParse, const char *zName){ + if( sqlite3Strlen30(zName)>6 && 0==sqlite3StrNICmp(zName, "sqlite_", 7) ){ + sqlite3ErrorMsg(pParse, "table %s may not be altered", zName); + return 1; + } + return 0; +} + +/* +** Generate code to implement the "ALTER TABLE xxx RENAME TO yyy" +** command. +*/ +SQLITE_PRIVATE void sqlite3AlterRenameTable( + Parse *pParse, /* Parser context. */ + SrcList *pSrc, /* The table to rename. */ + Token *pName /* The new table name. */ +){ + int iDb; /* Database that contains the table */ + char *zDb; /* Name of database iDb */ + Table *pTab; /* Table being renamed */ + char *zName = 0; /* NULL-terminated version of pName */ + sqlite3 *db = pParse->db; /* Database connection */ + int nTabName; /* Number of UTF-8 characters in zTabName */ + const char *zTabName; /* Original name of the table */ + Vdbe *v; +#ifndef SQLITE_OMIT_TRIGGER + char *zWhere = 0; /* Where clause to locate temp triggers */ +#endif + VTable *pVTab = 0; /* Non-zero if this is a v-tab with an xRename() */ + int savedDbFlags; /* Saved value of db->flags */ + + savedDbFlags = db->flags; + if( NEVER(db->mallocFailed) ) goto exit_rename_table; + assert( pSrc->nSrc==1 ); + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); + + pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]); + if( !pTab ) goto exit_rename_table; + iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + zDb = db->aDb[iDb].zDbSName; + db->flags |= SQLITE_PreferBuiltin; + + /* Get a NULL terminated version of the new table name. */ + zName = sqlite3NameFromToken(db, pName); + if( !zName ) goto exit_rename_table; + + /* Check that a table or index named 'zName' does not already exist + ** in database iDb. If so, this is an error. + */ + if( sqlite3FindTable(db, zName, zDb) || sqlite3FindIndex(db, zName, zDb) ){ + sqlite3ErrorMsg(pParse, + "there is already another table or index with this name: %s", zName); + goto exit_rename_table; + } + + /* Make sure it is not a system table being altered, or a reserved name + ** that the table is being renamed to. + */ + if( SQLITE_OK!=isSystemTable(pParse, pTab->zName) ){ + goto exit_rename_table; + } + if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ goto + exit_rename_table; + } + +#ifndef SQLITE_OMIT_VIEW + if( pTab->pSelect ){ + sqlite3ErrorMsg(pParse, "view %s may not be altered", pTab->zName); + goto exit_rename_table; + } +#endif + +#ifndef SQLITE_OMIT_AUTHORIZATION + /* Invoke the authorization callback. */ + if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){ + goto exit_rename_table; + } +#endif + +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( sqlite3ViewGetColumnNames(pParse, pTab) ){ + goto exit_rename_table; + } + if( IsVirtual(pTab) ){ + pVTab = sqlite3GetVTable(db, pTab); + if( pVTab->pVtab->pModule->xRename==0 ){ + pVTab = 0; + } + } +#endif + + /* Begin a transaction for database iDb. + ** Then modify the schema cookie (since the ALTER TABLE modifies the + ** schema). Open a statement transaction if the table is a virtual + ** table. + */ + v = sqlite3GetVdbe(pParse); + if( v==0 ){ + goto exit_rename_table; + } + sqlite3BeginWriteOperation(pParse, pVTab!=0, iDb); + sqlite3ChangeCookie(pParse, iDb); + + /* If this is a virtual table, invoke the xRename() function if + ** one is defined. The xRename() callback will modify the names + ** of any resources used by the v-table implementation (including other + ** SQLite tables) that are identified by the name of the virtual table. + */ +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( pVTab ){ + int i = ++pParse->nMem; + sqlite3VdbeLoadString(v, i, zName); + sqlite3VdbeAddOp4(v, OP_VRename, i, 0, 0,(const char*)pVTab, P4_VTAB); + sqlite3MayAbort(pParse); + } +#endif + + /* figure out how many UTF-8 characters are in zName */ + zTabName = pTab->zName; + nTabName = sqlite3Utf8CharLen(zTabName, -1); + +#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) + if( db->flags&SQLITE_ForeignKeys ){ + /* If foreign-key support is enabled, rewrite the CREATE TABLE + ** statements corresponding to all child tables of foreign key constraints + ** for which the renamed table is the parent table. */ + if( (zWhere=whereForeignKeys(pParse, pTab))!=0 ){ + sqlite3NestedParse(pParse, + "UPDATE \"%w\".%s SET " + "sql = sqlite_rename_parent(sql, %Q, %Q) " + "WHERE %s;", zDb, MASTER_NAME, zTabName, zName, zWhere); + sqlite3DbFree(db, zWhere); + } + } +#endif + + /* Modify the sqlite_master table to use the new table name. */ + sqlite3NestedParse(pParse, + "UPDATE %Q.%s SET " +#ifdef SQLITE_OMIT_TRIGGER + "sql = sqlite_rename_table(sql, %Q), " +#else + "sql = CASE " + "WHEN type = 'trigger' THEN sqlite_rename_trigger(sql, %Q)" + "ELSE sqlite_rename_table(sql, %Q) END, " +#endif + "tbl_name = %Q, " + "name = CASE " + "WHEN type='table' THEN %Q " + "WHEN name LIKE 'sqlite_autoindex%%' AND type='index' THEN " + "'sqlite_autoindex_' || %Q || substr(name,%d+18) " + "ELSE name END " + "WHERE tbl_name=%Q COLLATE nocase AND " + "(type='table' OR type='index' OR type='trigger');", + zDb, MASTER_NAME, zName, zName, zName, +#ifndef SQLITE_OMIT_TRIGGER + zName, +#endif + zName, nTabName, zTabName + ); + +#ifndef SQLITE_OMIT_AUTOINCREMENT + /* If the sqlite_sequence table exists in this database, then update + ** it with the new table name. + */ + if( sqlite3FindTable(db, "sqlite_sequence", zDb) ){ + sqlite3NestedParse(pParse, + "UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q", + zDb, zName, pTab->zName); + } +#endif + +#ifndef SQLITE_OMIT_TRIGGER + /* If there are TEMP triggers on this table, modify the sqlite_temp_master + ** table. Don't do this if the table being ALTERed is itself located in + ** the temp database. + */ + if( (zWhere=whereTempTriggers(pParse, pTab))!=0 ){ + sqlite3NestedParse(pParse, + "UPDATE sqlite_temp_master SET " + "sql = sqlite_rename_trigger(sql, %Q), " + "tbl_name = %Q " + "WHERE %s;", zName, zName, zWhere); + sqlite3DbFree(db, zWhere); + } +#endif + +#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) + if( db->flags&SQLITE_ForeignKeys ){ + FKey *p; + for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ + Table *pFrom = p->pFrom; + if( pFrom!=pTab ){ + reloadTableSchema(pParse, p->pFrom, pFrom->zName); + } + } + } +#endif + + /* Drop and reload the internal table schema. */ + reloadTableSchema(pParse, pTab, zName); + +exit_rename_table: + sqlite3SrcListDelete(db, pSrc); + sqlite3DbFree(db, zName); + db->flags = savedDbFlags; +} + +/* +** This function is called after an "ALTER TABLE ... ADD" statement +** has been parsed. Argument pColDef contains the text of the new +** column definition. +** +** The Table structure pParse->pNewTable was extended to include +** the new column during parsing. +*/ +SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ + Table *pNew; /* Copy of pParse->pNewTable */ + Table *pTab; /* Table being altered */ + int iDb; /* Database number */ + const char *zDb; /* Database name */ + const char *zTab; /* Table name */ + char *zCol; /* Null-terminated column definition */ + Column *pCol; /* The new column */ + Expr *pDflt; /* Default value for the new column */ + sqlite3 *db; /* The database connection; */ + Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */ + int r1; /* Temporary registers */ + + db = pParse->db; + if( pParse->nErr || db->mallocFailed ) return; + assert( v!=0 ); + pNew = pParse->pNewTable; + assert( pNew ); + + assert( sqlite3BtreeHoldsAllMutexes(db) ); + iDb = sqlite3SchemaToIndex(db, pNew->pSchema); + zDb = db->aDb[iDb].zDbSName; + zTab = &pNew->zName[16]; /* Skip the "sqlite_altertab_" prefix on the name */ + pCol = &pNew->aCol[pNew->nCol-1]; + pDflt = pCol->pDflt; + pTab = sqlite3FindTable(db, zTab, zDb); + assert( pTab ); + +#ifndef SQLITE_OMIT_AUTHORIZATION + /* Invoke the authorization callback. */ + if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){ + return; + } +#endif + + /* If the default value for the new column was specified with a + ** literal NULL, then set pDflt to 0. This simplifies checking + ** for an SQL NULL default below. + */ + assert( pDflt==0 || pDflt->op==TK_SPAN ); + if( pDflt && pDflt->pLeft->op==TK_NULL ){ + pDflt = 0; + } + + /* Check that the new column is not specified as PRIMARY KEY or UNIQUE. + ** If there is a NOT NULL constraint, then the default value for the + ** column must not be NULL. + */ + if( pCol->colFlags & COLFLAG_PRIMKEY ){ + sqlite3ErrorMsg(pParse, "Cannot add a PRIMARY KEY column"); + return; + } + if( pNew->pIndex ){ + sqlite3ErrorMsg(pParse, "Cannot add a UNIQUE column"); + return; + } + if( (db->flags&SQLITE_ForeignKeys) && pNew->pFKey && pDflt ){ + sqlite3ErrorMsg(pParse, + "Cannot add a REFERENCES column with non-NULL default value"); + return; + } + if( pCol->notNull && !pDflt ){ + sqlite3ErrorMsg(pParse, + "Cannot add a NOT NULL column with default value NULL"); + return; + } + + /* Ensure the default expression is something that sqlite3ValueFromExpr() + ** can handle (i.e. not CURRENT_TIME etc.) + */ + if( pDflt ){ + sqlite3_value *pVal = 0; + int rc; + rc = sqlite3ValueFromExpr(db, pDflt, SQLITE_UTF8, SQLITE_AFF_BLOB, &pVal); + assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); + if( rc!=SQLITE_OK ){ + assert( db->mallocFailed == 1 ); + return; + } + if( !pVal ){ + sqlite3ErrorMsg(pParse, "Cannot add a column with non-constant default"); + return; + } + sqlite3ValueFree(pVal); + } + + /* Modify the CREATE TABLE statement. */ + zCol = sqlite3DbStrNDup(db, (char*)pColDef->z, pColDef->n); + if( zCol ){ + char *zEnd = &zCol[pColDef->n-1]; + int savedDbFlags = db->flags; + while( zEnd>zCol && (*zEnd==';' || sqlite3Isspace(*zEnd)) ){ + *zEnd-- = '\0'; + } + db->flags |= SQLITE_PreferBuiltin; + sqlite3NestedParse(pParse, + "UPDATE \"%w\".%s SET " + "sql = substr(sql,1,%d) || ', ' || %Q || substr(sql,%d) " + "WHERE type = 'table' AND name = %Q", + zDb, MASTER_NAME, pNew->addColOffset, zCol, pNew->addColOffset+1, + zTab + ); + sqlite3DbFree(db, zCol); + db->flags = savedDbFlags; + } + + /* Make sure the schema version is at least 3. But do not upgrade + ** from less than 3 to 4, as that will corrupt any preexisting DESC + ** index. + */ + r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, r1, BTREE_FILE_FORMAT); + sqlite3VdbeUsesBtree(v, iDb); + sqlite3VdbeAddOp2(v, OP_AddImm, r1, -2); + sqlite3VdbeAddOp2(v, OP_IfPos, r1, sqlite3VdbeCurrentAddr(v)+2); + VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, 3); + sqlite3ReleaseTempReg(pParse, r1); + + /* Reload the schema of the modified table. */ + reloadTableSchema(pParse, pTab, pTab->zName); +} + +/* +** This function is called by the parser after the table-name in +** an "ALTER TABLE ADD" statement is parsed. Argument +** pSrc is the full-name of the table being altered. +** +** This routine makes a (partial) copy of the Table structure +** for the table being altered and sets Parse.pNewTable to point +** to it. Routines called by the parser as the column definition +** is parsed (i.e. sqlite3AddColumn()) add the new Column data to +** the copy. The copy of the Table structure is deleted by tokenize.c +** after parsing is finished. +** +** Routine sqlite3AlterFinishAddColumn() will be called to complete +** coding the "ALTER TABLE ... ADD" statement. +*/ +SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ + Table *pNew; + Table *pTab; + Vdbe *v; + int iDb; + int i; + int nAlloc; + sqlite3 *db = pParse->db; + + /* Look up the table being altered. */ + assert( pParse->pNewTable==0 ); + assert( sqlite3BtreeHoldsAllMutexes(db) ); + if( db->mallocFailed ) goto exit_begin_add_column; + pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]); + if( !pTab ) goto exit_begin_add_column; + +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pTab) ){ + sqlite3ErrorMsg(pParse, "virtual tables may not be altered"); + goto exit_begin_add_column; + } +#endif + + /* Make sure this is not an attempt to ALTER a view. */ + if( pTab->pSelect ){ + sqlite3ErrorMsg(pParse, "Cannot add a column to a view"); + goto exit_begin_add_column; + } + if( SQLITE_OK!=isSystemTable(pParse, pTab->zName) ){ + goto exit_begin_add_column; + } + + assert( pTab->addColOffset>0 ); + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + + /* Put a copy of the Table struct in Parse.pNewTable for the + ** sqlite3AddColumn() function and friends to modify. But modify + ** the name by adding an "sqlite_altertab_" prefix. By adding this + ** prefix, we insure that the name will not collide with an existing + ** table because user table are not allowed to have the "sqlite_" + ** prefix on their name. + */ + pNew = (Table*)sqlite3DbMallocZero(db, sizeof(Table)); + if( !pNew ) goto exit_begin_add_column; + pParse->pNewTable = pNew; + pNew->nTabRef = 1; + pNew->nCol = pTab->nCol; + assert( pNew->nCol>0 ); + nAlloc = (((pNew->nCol-1)/8)*8)+8; + assert( nAlloc>=pNew->nCol && nAlloc%8==0 && nAlloc-pNew->nCol<8 ); + pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*nAlloc); + pNew->zName = sqlite3MPrintf(db, "sqlite_altertab_%s", pTab->zName); + if( !pNew->aCol || !pNew->zName ){ + assert( db->mallocFailed ); + goto exit_begin_add_column; + } + memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*pNew->nCol); + for(i=0; inCol; i++){ + Column *pCol = &pNew->aCol[i]; + pCol->zName = sqlite3DbStrDup(db, pCol->zName); + pCol->zColl = 0; + pCol->pDflt = 0; + } + pNew->pSchema = db->aDb[iDb].pSchema; + pNew->addColOffset = pTab->addColOffset; + pNew->nTabRef = 1; + + /* Begin a transaction and increment the schema cookie. */ + sqlite3BeginWriteOperation(pParse, 0, iDb); + v = sqlite3GetVdbe(pParse); + if( !v ) goto exit_begin_add_column; + sqlite3ChangeCookie(pParse, iDb); + +exit_begin_add_column: + sqlite3SrcListDelete(db, pSrc); + return; +} +#endif /* SQLITE_ALTER_TABLE */ + +/************** End of alter.c ***********************************************/ +/************** Begin file analyze.c *****************************************/ +/* +** 2005-07-08 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code associated with the ANALYZE command. +** +** The ANALYZE command gather statistics about the content of tables +** and indices. These statistics are made available to the query planner +** to help it make better decisions about how to perform queries. +** +** The following system tables are or have been supported: +** +** CREATE TABLE sqlite_stat1(tbl, idx, stat); +** CREATE TABLE sqlite_stat2(tbl, idx, sampleno, sample); +** CREATE TABLE sqlite_stat3(tbl, idx, nEq, nLt, nDLt, sample); +** CREATE TABLE sqlite_stat4(tbl, idx, nEq, nLt, nDLt, sample); +** +** Additional tables might be added in future releases of SQLite. +** The sqlite_stat2 table is not created or used unless the SQLite version +** is between 3.6.18 and 3.7.8, inclusive, and unless SQLite is compiled +** with SQLITE_ENABLE_STAT2. The sqlite_stat2 table is deprecated. +** The sqlite_stat2 table is superseded by sqlite_stat3, which is only +** created and used by SQLite versions 3.7.9 and later and with +** SQLITE_ENABLE_STAT3 defined. The functionality of sqlite_stat3 +** is a superset of sqlite_stat2. The sqlite_stat4 is an enhanced +** version of sqlite_stat3 and is only available when compiled with +** SQLITE_ENABLE_STAT4 and in SQLite versions 3.8.1 and later. It is +** not possible to enable both STAT3 and STAT4 at the same time. If they +** are both enabled, then STAT4 takes precedence. +** +** For most applications, sqlite_stat1 provides all the statistics required +** for the query planner to make good choices. +** +** Format of sqlite_stat1: +** +** There is normally one row per index, with the index identified by the +** name in the idx column. The tbl column is the name of the table to +** which the index belongs. In each such row, the stat column will be +** a string consisting of a list of integers. The first integer in this +** list is the number of rows in the index. (This is the same as the +** number of rows in the table, except for partial indices.) The second +** integer is the average number of rows in the index that have the same +** value in the first column of the index. The third integer is the average +** number of rows in the index that have the same value for the first two +** columns. The N-th integer (for N>1) is the average number of rows in +** the index which have the same value for the first N-1 columns. For +** a K-column index, there will be K+1 integers in the stat column. If +** the index is unique, then the last integer will be 1. +** +** The list of integers in the stat column can optionally be followed +** by the keyword "unordered". The "unordered" keyword, if it is present, +** must be separated from the last integer by a single space. If the +** "unordered" keyword is present, then the query planner assumes that +** the index is unordered and will not use the index for a range query. +** +** If the sqlite_stat1.idx column is NULL, then the sqlite_stat1.stat +** column contains a single integer which is the (estimated) number of +** rows in the table identified by sqlite_stat1.tbl. +** +** Format of sqlite_stat2: +** +** The sqlite_stat2 is only created and is only used if SQLite is compiled +** with SQLITE_ENABLE_STAT2 and if the SQLite version number is between +** 3.6.18 and 3.7.8. The "stat2" table contains additional information +** about the distribution of keys within an index. The index is identified by +** the "idx" column and the "tbl" column is the name of the table to which +** the index belongs. There are usually 10 rows in the sqlite_stat2 +** table for each index. +** +** The sqlite_stat2 entries for an index that have sampleno between 0 and 9 +** inclusive are samples of the left-most key value in the index taken at +** evenly spaced points along the index. Let the number of samples be S +** (10 in the standard build) and let C be the number of rows in the index. +** Then the sampled rows are given by: +** +** rownumber = (i*C*2 + C)/(S*2) +** +** For i between 0 and S-1. Conceptually, the index space is divided into +** S uniform buckets and the samples are the middle row from each bucket. +** +** The format for sqlite_stat2 is recorded here for legacy reference. This +** version of SQLite does not support sqlite_stat2. It neither reads nor +** writes the sqlite_stat2 table. This version of SQLite only supports +** sqlite_stat3. +** +** Format for sqlite_stat3: +** +** The sqlite_stat3 format is a subset of sqlite_stat4. Hence, the +** sqlite_stat4 format will be described first. Further information +** about sqlite_stat3 follows the sqlite_stat4 description. +** +** Format for sqlite_stat4: +** +** As with sqlite_stat2, the sqlite_stat4 table contains histogram data +** to aid the query planner in choosing good indices based on the values +** that indexed columns are compared against in the WHERE clauses of +** queries. +** +** The sqlite_stat4 table contains multiple entries for each index. +** The idx column names the index and the tbl column is the table of the +** index. If the idx and tbl columns are the same, then the sample is +** of the INTEGER PRIMARY KEY. The sample column is a blob which is the +** binary encoding of a key from the index. The nEq column is a +** list of integers. The first integer is the approximate number +** of entries in the index whose left-most column exactly matches +** the left-most column of the sample. The second integer in nEq +** is the approximate number of entries in the index where the +** first two columns match the first two columns of the sample. +** And so forth. nLt is another list of integers that show the approximate +** number of entries that are strictly less than the sample. The first +** integer in nLt contains the number of entries in the index where the +** left-most column is less than the left-most column of the sample. +** The K-th integer in the nLt entry is the number of index entries +** where the first K columns are less than the first K columns of the +** sample. The nDLt column is like nLt except that it contains the +** number of distinct entries in the index that are less than the +** sample. +** +** There can be an arbitrary number of sqlite_stat4 entries per index. +** The ANALYZE command will typically generate sqlite_stat4 tables +** that contain between 10 and 40 samples which are distributed across +** the key space, though not uniformly, and which include samples with +** large nEq values. +** +** Format for sqlite_stat3 redux: +** +** The sqlite_stat3 table is like sqlite_stat4 except that it only +** looks at the left-most column of the index. The sqlite_stat3.sample +** column contains the actual value of the left-most column instead +** of a blob encoding of the complete index key as is found in +** sqlite_stat4.sample. The nEq, nLt, and nDLt entries of sqlite_stat3 +** all contain just a single integer which is the same as the first +** integer in the equivalent columns in sqlite_stat4. +*/ +#ifndef SQLITE_OMIT_ANALYZE +/* #include "sqliteInt.h" */ + +#if defined(SQLITE_ENABLE_STAT4) +# define IsStat4 1 +# define IsStat3 0 +#elif defined(SQLITE_ENABLE_STAT3) +# define IsStat4 0 +# define IsStat3 1 +#else +# define IsStat4 0 +# define IsStat3 0 +# undef SQLITE_STAT4_SAMPLES +# define SQLITE_STAT4_SAMPLES 1 +#endif +#define IsStat34 (IsStat3+IsStat4) /* 1 for STAT3 or STAT4. 0 otherwise */ + +/* +** This routine generates code that opens the sqlite_statN tables. +** The sqlite_stat1 table is always relevant. sqlite_stat2 is now +** obsolete. sqlite_stat3 and sqlite_stat4 are only opened when +** appropriate compile-time options are provided. +** +** If the sqlite_statN tables do not previously exist, it is created. +** +** Argument zWhere may be a pointer to a buffer containing a table name, +** or it may be a NULL pointer. If it is not NULL, then all entries in +** the sqlite_statN tables associated with the named table are deleted. +** If zWhere==0, then code is generated to delete all stat table entries. +*/ +static void openStatTable( + Parse *pParse, /* Parsing context */ + int iDb, /* The database we are looking in */ + int iStatCur, /* Open the sqlite_stat1 table on this cursor */ + const char *zWhere, /* Delete entries for this table or index */ + const char *zWhereType /* Either "tbl" or "idx" */ +){ + static const struct { + const char *zName; + const char *zCols; + } aTable[] = { + { "sqlite_stat1", "tbl,idx,stat" }, +#if defined(SQLITE_ENABLE_STAT4) + { "sqlite_stat4", "tbl,idx,neq,nlt,ndlt,sample" }, + { "sqlite_stat3", 0 }, +#elif defined(SQLITE_ENABLE_STAT3) + { "sqlite_stat3", "tbl,idx,neq,nlt,ndlt,sample" }, + { "sqlite_stat4", 0 }, +#else + { "sqlite_stat3", 0 }, + { "sqlite_stat4", 0 }, +#endif + }; + int i; + sqlite3 *db = pParse->db; + Db *pDb; + Vdbe *v = sqlite3GetVdbe(pParse); + int aRoot[ArraySize(aTable)]; + u8 aCreateTbl[ArraySize(aTable)]; + + if( v==0 ) return; + assert( sqlite3BtreeHoldsAllMutexes(db) ); + assert( sqlite3VdbeDb(v)==db ); + pDb = &db->aDb[iDb]; + + /* Create new statistic tables if they do not exist, or clear them + ** if they do already exist. + */ + for(i=0; izDbSName))==0 ){ + if( aTable[i].zCols ){ + /* The sqlite_statN table does not exist. Create it. Note that a + ** side-effect of the CREATE TABLE statement is to leave the rootpage + ** of the new table in register pParse->regRoot. This is important + ** because the OpenWrite opcode below will be needing it. */ + sqlite3NestedParse(pParse, + "CREATE TABLE %Q.%s(%s)", pDb->zDbSName, zTab, aTable[i].zCols + ); + aRoot[i] = pParse->regRoot; + aCreateTbl[i] = OPFLAG_P2ISREG; + } + }else{ + /* The table already exists. If zWhere is not NULL, delete all entries + ** associated with the table zWhere. If zWhere is NULL, delete the + ** entire contents of the table. */ + aRoot[i] = pStat->tnum; + aCreateTbl[i] = 0; + sqlite3TableLock(pParse, iDb, aRoot[i], 1, zTab); + if( zWhere ){ + sqlite3NestedParse(pParse, + "DELETE FROM %Q.%s WHERE %s=%Q", + pDb->zDbSName, zTab, zWhereType, zWhere + ); + }else{ + /* The sqlite_stat[134] table already exists. Delete all rows. */ + sqlite3VdbeAddOp2(v, OP_Clear, aRoot[i], iDb); + } + } + } + + /* Open the sqlite_stat[134] tables for writing. */ + for(i=0; aTable[i].zCols; i++){ + assert( inRowid ){ + sqlite3DbFree(db, p->u.aRowid); + p->nRowid = 0; + } +} +#endif + +/* Initialize the BLOB value of a ROWID +*/ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +static void sampleSetRowid(sqlite3 *db, Stat4Sample *p, int n, const u8 *pData){ + assert( db!=0 ); + if( p->nRowid ) sqlite3DbFree(db, p->u.aRowid); + p->u.aRowid = sqlite3DbMallocRawNN(db, n); + if( p->u.aRowid ){ + p->nRowid = n; + memcpy(p->u.aRowid, pData, n); + }else{ + p->nRowid = 0; + } +} +#endif + +/* Initialize the INTEGER value of a ROWID. +*/ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +static void sampleSetRowidInt64(sqlite3 *db, Stat4Sample *p, i64 iRowid){ + assert( db!=0 ); + if( p->nRowid ) sqlite3DbFree(db, p->u.aRowid); + p->nRowid = 0; + p->u.iRowid = iRowid; +} +#endif + + +/* +** Copy the contents of object (*pFrom) into (*pTo). +*/ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +static void sampleCopy(Stat4Accum *p, Stat4Sample *pTo, Stat4Sample *pFrom){ + pTo->isPSample = pFrom->isPSample; + pTo->iCol = pFrom->iCol; + pTo->iHash = pFrom->iHash; + memcpy(pTo->anEq, pFrom->anEq, sizeof(tRowcnt)*p->nCol); + memcpy(pTo->anLt, pFrom->anLt, sizeof(tRowcnt)*p->nCol); + memcpy(pTo->anDLt, pFrom->anDLt, sizeof(tRowcnt)*p->nCol); + if( pFrom->nRowid ){ + sampleSetRowid(p->db, pTo, pFrom->nRowid, pFrom->u.aRowid); + }else{ + sampleSetRowidInt64(p->db, pTo, pFrom->u.iRowid); + } +} +#endif + +/* +** Reclaim all memory of a Stat4Accum structure. +*/ +static void stat4Destructor(void *pOld){ + Stat4Accum *p = (Stat4Accum*)pOld; +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + int i; + for(i=0; inCol; i++) sampleClear(p->db, p->aBest+i); + for(i=0; imxSample; i++) sampleClear(p->db, p->a+i); + sampleClear(p->db, &p->current); +#endif + sqlite3DbFree(p->db, p); +} + +/* +** Implementation of the stat_init(N,K,C) SQL function. The three parameters +** are: +** N: The number of columns in the index including the rowid/pk (note 1) +** K: The number of columns in the index excluding the rowid/pk. +** C: The number of rows in the index (note 2) +** +** Note 1: In the special case of the covering index that implements a +** WITHOUT ROWID table, N is the number of PRIMARY KEY columns, not the +** total number of columns in the table. +** +** Note 2: C is only used for STAT3 and STAT4. +** +** For indexes on ordinary rowid tables, N==K+1. But for indexes on +** WITHOUT ROWID tables, N=K+P where P is the number of columns in the +** PRIMARY KEY of the table. The covering index that implements the +** original WITHOUT ROWID table as N==K as a special case. +** +** This routine allocates the Stat4Accum object in heap memory. The return +** value is a pointer to the Stat4Accum object. The datatype of the +** return value is BLOB, but it is really just a pointer to the Stat4Accum +** object. +*/ +static void statInit( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + Stat4Accum *p; + int nCol; /* Number of columns in index being sampled */ + int nKeyCol; /* Number of key columns */ + int nColUp; /* nCol rounded up for alignment */ + int n; /* Bytes of space to allocate */ + sqlite3 *db; /* Database connection */ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + int mxSample = SQLITE_STAT4_SAMPLES; +#endif + + /* Decode the three function arguments */ + UNUSED_PARAMETER(argc); + nCol = sqlite3_value_int(argv[0]); + assert( nCol>0 ); + nColUp = sizeof(tRowcnt)<8 ? (nCol+1)&~1 : nCol; + nKeyCol = sqlite3_value_int(argv[1]); + assert( nKeyCol<=nCol ); + assert( nKeyCol>0 ); + + /* Allocate the space required for the Stat4Accum object */ + n = sizeof(*p) + + sizeof(tRowcnt)*nColUp /* Stat4Accum.anEq */ + + sizeof(tRowcnt)*nColUp /* Stat4Accum.anDLt */ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + + sizeof(tRowcnt)*nColUp /* Stat4Accum.anLt */ + + sizeof(Stat4Sample)*(nCol+mxSample) /* Stat4Accum.aBest[], a[] */ + + sizeof(tRowcnt)*3*nColUp*(nCol+mxSample) +#endif + ; + db = sqlite3_context_db_handle(context); + p = sqlite3DbMallocZero(db, n); + if( p==0 ){ + sqlite3_result_error_nomem(context); + return; + } + + p->db = db; + p->nRow = 0; + p->nCol = nCol; + p->nKeyCol = nKeyCol; + p->current.anDLt = (tRowcnt*)&p[1]; + p->current.anEq = &p->current.anDLt[nColUp]; + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + { + u8 *pSpace; /* Allocated space not yet assigned */ + int i; /* Used to iterate through p->aSample[] */ + + p->iGet = -1; + p->mxSample = mxSample; + p->nPSample = (tRowcnt)(sqlite3_value_int64(argv[2])/(mxSample/3+1) + 1); + p->current.anLt = &p->current.anEq[nColUp]; + p->iPrn = 0x689e962d*(u32)nCol ^ 0xd0944565*(u32)sqlite3_value_int(argv[2]); + + /* Set up the Stat4Accum.a[] and aBest[] arrays */ + p->a = (struct Stat4Sample*)&p->current.anLt[nColUp]; + p->aBest = &p->a[mxSample]; + pSpace = (u8*)(&p->a[mxSample+nCol]); + for(i=0; i<(mxSample+nCol); i++){ + p->a[i].anEq = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp); + p->a[i].anLt = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp); + p->a[i].anDLt = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp); + } + assert( (pSpace - (u8*)p)==n ); + + for(i=0; iaBest[i].iCol = i; + } + } +#endif + + /* Return a pointer to the allocated object to the caller. Note that + ** only the pointer (the 2nd parameter) matters. The size of the object + ** (given by the 3rd parameter) is never used and can be any positive + ** value. */ + sqlite3_result_blob(context, p, sizeof(*p), stat4Destructor); +} +static const FuncDef statInitFuncdef = { + 2+IsStat34, /* nArg */ + SQLITE_UTF8, /* funcFlags */ + 0, /* pUserData */ + 0, /* pNext */ + statInit, /* xSFunc */ + 0, /* xFinalize */ + "stat_init", /* zName */ + {0} +}; + +#ifdef SQLITE_ENABLE_STAT4 +/* +** pNew and pOld are both candidate non-periodic samples selected for +** the same column (pNew->iCol==pOld->iCol). Ignoring this column and +** considering only any trailing columns and the sample hash value, this +** function returns true if sample pNew is to be preferred over pOld. +** In other words, if we assume that the cardinalities of the selected +** column for pNew and pOld are equal, is pNew to be preferred over pOld. +** +** This function assumes that for each argument sample, the contents of +** the anEq[] array from pSample->anEq[pSample->iCol+1] onwards are valid. +*/ +static int sampleIsBetterPost( + Stat4Accum *pAccum, + Stat4Sample *pNew, + Stat4Sample *pOld +){ + int nCol = pAccum->nCol; + int i; + assert( pNew->iCol==pOld->iCol ); + for(i=pNew->iCol+1; ianEq[i]>pOld->anEq[i] ) return 1; + if( pNew->anEq[i]anEq[i] ) return 0; + } + if( pNew->iHash>pOld->iHash ) return 1; + return 0; +} +#endif + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +/* +** Return true if pNew is to be preferred over pOld. +** +** This function assumes that for each argument sample, the contents of +** the anEq[] array from pSample->anEq[pSample->iCol] onwards are valid. +*/ +static int sampleIsBetter( + Stat4Accum *pAccum, + Stat4Sample *pNew, + Stat4Sample *pOld +){ + tRowcnt nEqNew = pNew->anEq[pNew->iCol]; + tRowcnt nEqOld = pOld->anEq[pOld->iCol]; + + assert( pOld->isPSample==0 && pNew->isPSample==0 ); + assert( IsStat4 || (pNew->iCol==0 && pOld->iCol==0) ); + + if( (nEqNew>nEqOld) ) return 1; +#ifdef SQLITE_ENABLE_STAT4 + if( nEqNew==nEqOld ){ + if( pNew->iColiCol ) return 1; + return (pNew->iCol==pOld->iCol && sampleIsBetterPost(pAccum, pNew, pOld)); + } + return 0; +#else + return (nEqNew==nEqOld && pNew->iHash>pOld->iHash); +#endif +} + +/* +** Copy the contents of sample *pNew into the p->a[] array. If necessary, +** remove the least desirable sample from p->a[] to make room. +*/ +static void sampleInsert(Stat4Accum *p, Stat4Sample *pNew, int nEqZero){ + Stat4Sample *pSample = 0; + int i; + + assert( IsStat4 || nEqZero==0 ); + +#ifdef SQLITE_ENABLE_STAT4 + if( pNew->isPSample==0 ){ + Stat4Sample *pUpgrade = 0; + assert( pNew->anEq[pNew->iCol]>0 ); + + /* This sample is being added because the prefix that ends in column + ** iCol occurs many times in the table. However, if we have already + ** added a sample that shares this prefix, there is no need to add + ** this one. Instead, upgrade the priority of the highest priority + ** existing sample that shares this prefix. */ + for(i=p->nSample-1; i>=0; i--){ + Stat4Sample *pOld = &p->a[i]; + if( pOld->anEq[pNew->iCol]==0 ){ + if( pOld->isPSample ) return; + assert( pOld->iCol>pNew->iCol ); + assert( sampleIsBetter(p, pNew, pOld) ); + if( pUpgrade==0 || sampleIsBetter(p, pOld, pUpgrade) ){ + pUpgrade = pOld; + } + } + } + if( pUpgrade ){ + pUpgrade->iCol = pNew->iCol; + pUpgrade->anEq[pUpgrade->iCol] = pNew->anEq[pUpgrade->iCol]; + goto find_new_min; + } + } +#endif + + /* If necessary, remove sample iMin to make room for the new sample. */ + if( p->nSample>=p->mxSample ){ + Stat4Sample *pMin = &p->a[p->iMin]; + tRowcnt *anEq = pMin->anEq; + tRowcnt *anLt = pMin->anLt; + tRowcnt *anDLt = pMin->anDLt; + sampleClear(p->db, pMin); + memmove(pMin, &pMin[1], sizeof(p->a[0])*(p->nSample-p->iMin-1)); + pSample = &p->a[p->nSample-1]; + pSample->nRowid = 0; + pSample->anEq = anEq; + pSample->anDLt = anDLt; + pSample->anLt = anLt; + p->nSample = p->mxSample-1; + } + + /* The "rows less-than" for the rowid column must be greater than that + ** for the last sample in the p->a[] array. Otherwise, the samples would + ** be out of order. */ +#ifdef SQLITE_ENABLE_STAT4 + assert( p->nSample==0 + || pNew->anLt[p->nCol-1] > p->a[p->nSample-1].anLt[p->nCol-1] ); +#endif + + /* Insert the new sample */ + pSample = &p->a[p->nSample]; + sampleCopy(p, pSample, pNew); + p->nSample++; + + /* Zero the first nEqZero entries in the anEq[] array. */ + memset(pSample->anEq, 0, sizeof(tRowcnt)*nEqZero); + +#ifdef SQLITE_ENABLE_STAT4 + find_new_min: +#endif + if( p->nSample>=p->mxSample ){ + int iMin = -1; + for(i=0; imxSample; i++){ + if( p->a[i].isPSample ) continue; + if( iMin<0 || sampleIsBetter(p, &p->a[iMin], &p->a[i]) ){ + iMin = i; + } + } + assert( iMin>=0 ); + p->iMin = iMin; + } +} +#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ + +/* +** Field iChng of the index being scanned has changed. So at this point +** p->current contains a sample that reflects the previous row of the +** index. The value of anEq[iChng] and subsequent anEq[] elements are +** correct at this point. +*/ +static void samplePushPrevious(Stat4Accum *p, int iChng){ +#ifdef SQLITE_ENABLE_STAT4 + int i; + + /* Check if any samples from the aBest[] array should be pushed + ** into IndexSample.a[] at this point. */ + for(i=(p->nCol-2); i>=iChng; i--){ + Stat4Sample *pBest = &p->aBest[i]; + pBest->anEq[i] = p->current.anEq[i]; + if( p->nSamplemxSample || sampleIsBetter(p, pBest, &p->a[p->iMin]) ){ + sampleInsert(p, pBest, i); + } + } + + /* Update the anEq[] fields of any samples already collected. */ + for(i=p->nSample-1; i>=0; i--){ + int j; + for(j=iChng; jnCol; j++){ + if( p->a[i].anEq[j]==0 ) p->a[i].anEq[j] = p->current.anEq[j]; + } + } +#endif + +#if defined(SQLITE_ENABLE_STAT3) && !defined(SQLITE_ENABLE_STAT4) + if( iChng==0 ){ + tRowcnt nLt = p->current.anLt[0]; + tRowcnt nEq = p->current.anEq[0]; + + /* Check if this is to be a periodic sample. If so, add it. */ + if( (nLt/p->nPSample)!=(nLt+nEq)/p->nPSample ){ + p->current.isPSample = 1; + sampleInsert(p, &p->current, 0); + p->current.isPSample = 0; + }else + + /* Or if it is a non-periodic sample. Add it in this case too. */ + if( p->nSamplemxSample + || sampleIsBetter(p, &p->current, &p->a[p->iMin]) + ){ + sampleInsert(p, &p->current, 0); + } + } +#endif + +#ifndef SQLITE_ENABLE_STAT3_OR_STAT4 + UNUSED_PARAMETER( p ); + UNUSED_PARAMETER( iChng ); +#endif +} + +/* +** Implementation of the stat_push SQL function: stat_push(P,C,R) +** Arguments: +** +** P Pointer to the Stat4Accum object created by stat_init() +** C Index of left-most column to differ from previous row +** R Rowid for the current row. Might be a key record for +** WITHOUT ROWID tables. +** +** This SQL function always returns NULL. It's purpose it to accumulate +** statistical data and/or samples in the Stat4Accum object about the +** index being analyzed. The stat_get() SQL function will later be used to +** extract relevant information for constructing the sqlite_statN tables. +** +** The R parameter is only used for STAT3 and STAT4 +*/ +static void statPush( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int i; + + /* The three function arguments */ + Stat4Accum *p = (Stat4Accum*)sqlite3_value_blob(argv[0]); + int iChng = sqlite3_value_int(argv[1]); + + UNUSED_PARAMETER( argc ); + UNUSED_PARAMETER( context ); + assert( p->nCol>0 ); + assert( iChngnCol ); + + if( p->nRow==0 ){ + /* This is the first call to this function. Do initialization. */ + for(i=0; inCol; i++) p->current.anEq[i] = 1; + }else{ + /* Second and subsequent calls get processed here */ + samplePushPrevious(p, iChng); + + /* Update anDLt[], anLt[] and anEq[] to reflect the values that apply + ** to the current row of the index. */ + for(i=0; icurrent.anEq[i]++; + } + for(i=iChng; inCol; i++){ + p->current.anDLt[i]++; +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + p->current.anLt[i] += p->current.anEq[i]; +#endif + p->current.anEq[i] = 1; + } + } + p->nRow++; +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + if( sqlite3_value_type(argv[2])==SQLITE_INTEGER ){ + sampleSetRowidInt64(p->db, &p->current, sqlite3_value_int64(argv[2])); + }else{ + sampleSetRowid(p->db, &p->current, sqlite3_value_bytes(argv[2]), + sqlite3_value_blob(argv[2])); + } + p->current.iHash = p->iPrn = p->iPrn*1103515245 + 12345; +#endif + +#ifdef SQLITE_ENABLE_STAT4 + { + tRowcnt nLt = p->current.anLt[p->nCol-1]; + + /* Check if this is to be a periodic sample. If so, add it. */ + if( (nLt/p->nPSample)!=(nLt+1)/p->nPSample ){ + p->current.isPSample = 1; + p->current.iCol = 0; + sampleInsert(p, &p->current, p->nCol-1); + p->current.isPSample = 0; + } + + /* Update the aBest[] array. */ + for(i=0; i<(p->nCol-1); i++){ + p->current.iCol = i; + if( i>=iChng || sampleIsBetterPost(p, &p->current, &p->aBest[i]) ){ + sampleCopy(p, &p->aBest[i], &p->current); + } + } + } +#endif +} +static const FuncDef statPushFuncdef = { + 2+IsStat34, /* nArg */ + SQLITE_UTF8, /* funcFlags */ + 0, /* pUserData */ + 0, /* pNext */ + statPush, /* xSFunc */ + 0, /* xFinalize */ + "stat_push", /* zName */ + {0} +}; + +#define STAT_GET_STAT1 0 /* "stat" column of stat1 table */ +#define STAT_GET_ROWID 1 /* "rowid" column of stat[34] entry */ +#define STAT_GET_NEQ 2 /* "neq" column of stat[34] entry */ +#define STAT_GET_NLT 3 /* "nlt" column of stat[34] entry */ +#define STAT_GET_NDLT 4 /* "ndlt" column of stat[34] entry */ + +/* +** Implementation of the stat_get(P,J) SQL function. This routine is +** used to query statistical information that has been gathered into +** the Stat4Accum object by prior calls to stat_push(). The P parameter +** has type BLOB but it is really just a pointer to the Stat4Accum object. +** The content to returned is determined by the parameter J +** which is one of the STAT_GET_xxxx values defined above. +** +** The stat_get(P,J) function is not available to generic SQL. It is +** inserted as part of a manually constructed bytecode program. (See +** the callStatGet() routine below.) It is guaranteed that the P +** parameter will always be a poiner to a Stat4Accum object, never a +** NULL. +** +** If neither STAT3 nor STAT4 are enabled, then J is always +** STAT_GET_STAT1 and is hence omitted and this routine becomes +** a one-parameter function, stat_get(P), that always returns the +** stat1 table entry information. +*/ +static void statGet( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + Stat4Accum *p = (Stat4Accum*)sqlite3_value_blob(argv[0]); +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + /* STAT3 and STAT4 have a parameter on this routine. */ + int eCall = sqlite3_value_int(argv[1]); + assert( argc==2 ); + assert( eCall==STAT_GET_STAT1 || eCall==STAT_GET_NEQ + || eCall==STAT_GET_ROWID || eCall==STAT_GET_NLT + || eCall==STAT_GET_NDLT + ); + if( eCall==STAT_GET_STAT1 ) +#else + assert( argc==1 ); +#endif + { + /* Return the value to store in the "stat" column of the sqlite_stat1 + ** table for this index. + ** + ** The value is a string composed of a list of integers describing + ** the index. The first integer in the list is the total number of + ** entries in the index. There is one additional integer in the list + ** for each indexed column. This additional integer is an estimate of + ** the number of rows matched by a stabbing query on the index using + ** a key with the corresponding number of fields. In other words, + ** if the index is on columns (a,b) and the sqlite_stat1 value is + ** "100 10 2", then SQLite estimates that: + ** + ** * the index contains 100 rows, + ** * "WHERE a=?" matches 10 rows, and + ** * "WHERE a=? AND b=?" matches 2 rows. + ** + ** If D is the count of distinct values and K is the total number of + ** rows, then each estimate is computed as: + ** + ** I = (K+D-1)/D + */ + char *z; + int i; + + char *zRet = sqlite3MallocZero( (p->nKeyCol+1)*25 ); + if( zRet==0 ){ + sqlite3_result_error_nomem(context); + return; + } + + sqlite3_snprintf(24, zRet, "%llu", (u64)p->nRow); + z = zRet + sqlite3Strlen30(zRet); + for(i=0; inKeyCol; i++){ + u64 nDistinct = p->current.anDLt[i] + 1; + u64 iVal = (p->nRow + nDistinct - 1) / nDistinct; + sqlite3_snprintf(24, z, " %llu", iVal); + z += sqlite3Strlen30(z); + assert( p->current.anEq[i] ); + } + assert( z[0]=='\0' && z>zRet ); + + sqlite3_result_text(context, zRet, -1, sqlite3_free); + } +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + else if( eCall==STAT_GET_ROWID ){ + if( p->iGet<0 ){ + samplePushPrevious(p, 0); + p->iGet = 0; + } + if( p->iGetnSample ){ + Stat4Sample *pS = p->a + p->iGet; + if( pS->nRowid==0 ){ + sqlite3_result_int64(context, pS->u.iRowid); + }else{ + sqlite3_result_blob(context, pS->u.aRowid, pS->nRowid, + SQLITE_TRANSIENT); + } + } + }else{ + tRowcnt *aCnt = 0; + + assert( p->iGetnSample ); + switch( eCall ){ + case STAT_GET_NEQ: aCnt = p->a[p->iGet].anEq; break; + case STAT_GET_NLT: aCnt = p->a[p->iGet].anLt; break; + default: { + aCnt = p->a[p->iGet].anDLt; + p->iGet++; + break; + } + } + + if( IsStat3 ){ + sqlite3_result_int64(context, (i64)aCnt[0]); + }else{ + char *zRet = sqlite3MallocZero(p->nCol * 25); + if( zRet==0 ){ + sqlite3_result_error_nomem(context); + }else{ + int i; + char *z = zRet; + for(i=0; inCol; i++){ + sqlite3_snprintf(24, z, "%llu ", (u64)aCnt[i]); + z += sqlite3Strlen30(z); + } + assert( z[0]=='\0' && z>zRet ); + z[-1] = '\0'; + sqlite3_result_text(context, zRet, -1, sqlite3_free); + } + } + } +#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ +#ifndef SQLITE_DEBUG + UNUSED_PARAMETER( argc ); +#endif +} +static const FuncDef statGetFuncdef = { + 1+IsStat34, /* nArg */ + SQLITE_UTF8, /* funcFlags */ + 0, /* pUserData */ + 0, /* pNext */ + statGet, /* xSFunc */ + 0, /* xFinalize */ + "stat_get", /* zName */ + {0} +}; + +static void callStatGet(Vdbe *v, int regStat4, int iParam, int regOut){ + assert( regOut!=regStat4 && regOut!=regStat4+1 ); +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + sqlite3VdbeAddOp2(v, OP_Integer, iParam, regStat4+1); +#elif SQLITE_DEBUG + assert( iParam==STAT_GET_STAT1 ); +#else + UNUSED_PARAMETER( iParam ); +#endif + sqlite3VdbeAddOp4(v, OP_Function0, 0, regStat4, regOut, + (char*)&statGetFuncdef, P4_FUNCDEF); + sqlite3VdbeChangeP5(v, 1 + IsStat34); +} + +/* +** Generate code to do an analysis of all indices associated with +** a single table. +*/ +static void analyzeOneTable( + Parse *pParse, /* Parser context */ + Table *pTab, /* Table whose indices are to be analyzed */ + Index *pOnlyIdx, /* If not NULL, only analyze this one index */ + int iStatCur, /* Index of VdbeCursor that writes the sqlite_stat1 table */ + int iMem, /* Available memory locations begin here */ + int iTab /* Next available cursor */ +){ + sqlite3 *db = pParse->db; /* Database handle */ + Index *pIdx; /* An index to being analyzed */ + int iIdxCur; /* Cursor open on index being analyzed */ + int iTabCur; /* Table cursor */ + Vdbe *v; /* The virtual machine being built up */ + int i; /* Loop counter */ + int jZeroRows = -1; /* Jump from here if number of rows is zero */ + int iDb; /* Index of database containing pTab */ + u8 needTableCnt = 1; /* True to count the table */ + int regNewRowid = iMem++; /* Rowid for the inserted record */ + int regStat4 = iMem++; /* Register to hold Stat4Accum object */ + int regChng = iMem++; /* Index of changed index field */ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + int regRowid = iMem++; /* Rowid argument passed to stat_push() */ +#endif + int regTemp = iMem++; /* Temporary use register */ + int regTabname = iMem++; /* Register containing table name */ + int regIdxname = iMem++; /* Register containing index name */ + int regStat1 = iMem++; /* Value for the stat column of sqlite_stat1 */ + int regPrev = iMem; /* MUST BE LAST (see below) */ + + pParse->nMem = MAX(pParse->nMem, iMem); + v = sqlite3GetVdbe(pParse); + if( v==0 || NEVER(pTab==0) ){ + return; + } + if( pTab->tnum==0 ){ + /* Do not gather statistics on views or virtual tables */ + return; + } + if( sqlite3_strlike("sqlite_%", pTab->zName, 0)==0 ){ + /* Do not gather statistics on system tables */ + return; + } + assert( sqlite3BtreeHoldsAllMutexes(db) ); + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( iDb>=0 ); + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); +#ifndef SQLITE_OMIT_AUTHORIZATION + if( sqlite3AuthCheck(pParse, SQLITE_ANALYZE, pTab->zName, 0, + db->aDb[iDb].zDbSName ) ){ + return; + } +#endif + + /* Establish a read-lock on the table at the shared-cache level. + ** Open a read-only cursor on the table. Also allocate a cursor number + ** to use for scanning indexes (iIdxCur). No index cursor is opened at + ** this time though. */ + sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); + iTabCur = iTab++; + iIdxCur = iTab++; + pParse->nTab = MAX(pParse->nTab, iTab); + sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead); + sqlite3VdbeLoadString(v, regTabname, pTab->zName); + + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + int nCol; /* Number of columns in pIdx. "N" */ + int addrRewind; /* Address of "OP_Rewind iIdxCur" */ + int addrNextRow; /* Address of "next_row:" */ + const char *zIdxName; /* Name of the index */ + int nColTest; /* Number of columns to test for changes */ + + if( pOnlyIdx && pOnlyIdx!=pIdx ) continue; + if( pIdx->pPartIdxWhere==0 ) needTableCnt = 0; + if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIdx) ){ + nCol = pIdx->nKeyCol; + zIdxName = pTab->zName; + nColTest = nCol - 1; + }else{ + nCol = pIdx->nColumn; + zIdxName = pIdx->zName; + nColTest = pIdx->uniqNotNull ? pIdx->nKeyCol-1 : nCol-1; + } + + /* Populate the register containing the index name. */ + sqlite3VdbeLoadString(v, regIdxname, zIdxName); + VdbeComment((v, "Analysis for %s.%s", pTab->zName, zIdxName)); + + /* + ** Pseudo-code for loop that calls stat_push(): + ** + ** Rewind csr + ** if eof(csr) goto end_of_scan; + ** regChng = 0 + ** goto chng_addr_0; + ** + ** next_row: + ** regChng = 0 + ** if( idx(0) != regPrev(0) ) goto chng_addr_0 + ** regChng = 1 + ** if( idx(1) != regPrev(1) ) goto chng_addr_1 + ** ... + ** regChng = N + ** goto chng_addr_N + ** + ** chng_addr_0: + ** regPrev(0) = idx(0) + ** chng_addr_1: + ** regPrev(1) = idx(1) + ** ... + ** + ** endDistinctTest: + ** regRowid = idx(rowid) + ** stat_push(P, regChng, regRowid) + ** Next csr + ** if !eof(csr) goto next_row; + ** + ** end_of_scan: + */ + + /* Make sure there are enough memory cells allocated to accommodate + ** the regPrev array and a trailing rowid (the rowid slot is required + ** when building a record to insert into the sample column of + ** the sqlite_stat4 table. */ + pParse->nMem = MAX(pParse->nMem, regPrev+nColTest); + + /* Open a read-only cursor on the index being analyzed. */ + assert( iDb==sqlite3SchemaToIndex(db, pIdx->pSchema) ); + sqlite3VdbeAddOp3(v, OP_OpenRead, iIdxCur, pIdx->tnum, iDb); + sqlite3VdbeSetP4KeyInfo(pParse, pIdx); + VdbeComment((v, "%s", pIdx->zName)); + + /* Invoke the stat_init() function. The arguments are: + ** + ** (1) the number of columns in the index including the rowid + ** (or for a WITHOUT ROWID table, the number of PK columns), + ** (2) the number of columns in the key without the rowid/pk + ** (3) the number of rows in the index, + ** + ** + ** The third argument is only used for STAT3 and STAT4 + */ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + sqlite3VdbeAddOp2(v, OP_Count, iIdxCur, regStat4+3); +#endif + sqlite3VdbeAddOp2(v, OP_Integer, nCol, regStat4+1); + sqlite3VdbeAddOp2(v, OP_Integer, pIdx->nKeyCol, regStat4+2); + sqlite3VdbeAddOp4(v, OP_Function0, 0, regStat4+1, regStat4, + (char*)&statInitFuncdef, P4_FUNCDEF); + sqlite3VdbeChangeP5(v, 2+IsStat34); + + /* Implementation of the following: + ** + ** Rewind csr + ** if eof(csr) goto end_of_scan; + ** regChng = 0 + ** goto next_push_0; + ** + */ + addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); + VdbeCoverage(v); + sqlite3VdbeAddOp2(v, OP_Integer, 0, regChng); + addrNextRow = sqlite3VdbeCurrentAddr(v); + + if( nColTest>0 ){ + int endDistinctTest = sqlite3VdbeMakeLabel(v); + int *aGotoChng; /* Array of jump instruction addresses */ + aGotoChng = sqlite3DbMallocRawNN(db, sizeof(int)*nColTest); + if( aGotoChng==0 ) continue; + + /* + ** next_row: + ** regChng = 0 + ** if( idx(0) != regPrev(0) ) goto chng_addr_0 + ** regChng = 1 + ** if( idx(1) != regPrev(1) ) goto chng_addr_1 + ** ... + ** regChng = N + ** goto endDistinctTest + */ + sqlite3VdbeAddOp0(v, OP_Goto); + addrNextRow = sqlite3VdbeCurrentAddr(v); + if( nColTest==1 && pIdx->nKeyCol==1 && IsUniqueIndex(pIdx) ){ + /* For a single-column UNIQUE index, once we have found a non-NULL + ** row, we know that all the rest will be distinct, so skip + ** subsequent distinctness tests. */ + sqlite3VdbeAddOp2(v, OP_NotNull, regPrev, endDistinctTest); + VdbeCoverage(v); + } + for(i=0; iazColl[i]); + sqlite3VdbeAddOp2(v, OP_Integer, i, regChng); + sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regTemp); + aGotoChng[i] = + sqlite3VdbeAddOp4(v, OP_Ne, regTemp, 0, regPrev+i, pColl, P4_COLLSEQ); + sqlite3VdbeChangeP5(v, SQLITE_NULLEQ); + VdbeCoverage(v); + } + sqlite3VdbeAddOp2(v, OP_Integer, nColTest, regChng); + sqlite3VdbeGoto(v, endDistinctTest); + + + /* + ** chng_addr_0: + ** regPrev(0) = idx(0) + ** chng_addr_1: + ** regPrev(1) = idx(1) + ** ... + */ + sqlite3VdbeJumpHere(v, addrNextRow-1); + for(i=0; ipTable); + int j, k, regKey; + regKey = sqlite3GetTempRange(pParse, pPk->nKeyCol); + for(j=0; jnKeyCol; j++){ + k = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[j]); + assert( k>=0 && knCol ); + sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, regKey+j); + VdbeComment((v, "%s", pTab->aCol[pPk->aiColumn[j]].zName)); + } + sqlite3VdbeAddOp3(v, OP_MakeRecord, regKey, pPk->nKeyCol, regRowid); + sqlite3ReleaseTempRange(pParse, regKey, pPk->nKeyCol); + } +#endif + assert( regChng==(regStat4+1) ); + sqlite3VdbeAddOp4(v, OP_Function0, 1, regStat4, regTemp, + (char*)&statPushFuncdef, P4_FUNCDEF); + sqlite3VdbeChangeP5(v, 2+IsStat34); + sqlite3VdbeAddOp2(v, OP_Next, iIdxCur, addrNextRow); VdbeCoverage(v); + + /* Add the entry to the stat1 table. */ + callStatGet(v, regStat4, STAT_GET_STAT1, regStat1); + assert( "BBB"[0]==SQLITE_AFF_TEXT ); + sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0); + sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid); + sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid); + sqlite3VdbeChangeP5(v, OPFLAG_APPEND); + + /* Add the entries to the stat3 or stat4 table. */ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + { + int regEq = regStat1; + int regLt = regStat1+1; + int regDLt = regStat1+2; + int regSample = regStat1+3; + int regCol = regStat1+4; + int regSampleRowid = regCol + nCol; + int addrNext; + int addrIsNull; + u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound; + + pParse->nMem = MAX(pParse->nMem, regCol+nCol); + + addrNext = sqlite3VdbeCurrentAddr(v); + callStatGet(v, regStat4, STAT_GET_ROWID, regSampleRowid); + addrIsNull = sqlite3VdbeAddOp1(v, OP_IsNull, regSampleRowid); + VdbeCoverage(v); + callStatGet(v, regStat4, STAT_GET_NEQ, regEq); + callStatGet(v, regStat4, STAT_GET_NLT, regLt); + callStatGet(v, regStat4, STAT_GET_NDLT, regDLt); + sqlite3VdbeAddOp4Int(v, seekOp, iTabCur, addrNext, regSampleRowid, 0); + /* We know that the regSampleRowid row exists because it was read by + ** the previous loop. Thus the not-found jump of seekOp will never + ** be taken */ + VdbeCoverageNeverTaken(v); +#ifdef SQLITE_ENABLE_STAT3 + sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iTabCur, 0, regSample); +#else + for(i=0; izName)); + sqlite3VdbeAddOp2(v, OP_Count, iTabCur, regStat1); + jZeroRows = sqlite3VdbeAddOp1(v, OP_IfNot, regStat1); VdbeCoverage(v); + sqlite3VdbeAddOp2(v, OP_Null, 0, regIdxname); + assert( "BBB"[0]==SQLITE_AFF_TEXT ); + sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0); + sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid); + sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid); + sqlite3VdbeChangeP5(v, OPFLAG_APPEND); + sqlite3VdbeJumpHere(v, jZeroRows); + } +} + + +/* +** Generate code that will cause the most recent index analysis to +** be loaded into internal hash tables where is can be used. +*/ +static void loadAnalysis(Parse *pParse, int iDb){ + Vdbe *v = sqlite3GetVdbe(pParse); + if( v ){ + sqlite3VdbeAddOp1(v, OP_LoadAnalysis, iDb); + } +} + +/* +** Generate code that will do an analysis of an entire database +*/ +static void analyzeDatabase(Parse *pParse, int iDb){ + sqlite3 *db = pParse->db; + Schema *pSchema = db->aDb[iDb].pSchema; /* Schema of database iDb */ + HashElem *k; + int iStatCur; + int iMem; + int iTab; + + sqlite3BeginWriteOperation(pParse, 0, iDb); + iStatCur = pParse->nTab; + pParse->nTab += 3; + openStatTable(pParse, iDb, iStatCur, 0, 0); + iMem = pParse->nMem+1; + iTab = pParse->nTab; + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){ + Table *pTab = (Table*)sqliteHashData(k); + analyzeOneTable(pParse, pTab, 0, iStatCur, iMem, iTab); + } + loadAnalysis(pParse, iDb); +} + +/* +** Generate code that will do an analysis of a single table in +** a database. If pOnlyIdx is not NULL then it is a single index +** in pTab that should be analyzed. +*/ +static void analyzeTable(Parse *pParse, Table *pTab, Index *pOnlyIdx){ + int iDb; + int iStatCur; + + assert( pTab!=0 ); + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); + iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + sqlite3BeginWriteOperation(pParse, 0, iDb); + iStatCur = pParse->nTab; + pParse->nTab += 3; + if( pOnlyIdx ){ + openStatTable(pParse, iDb, iStatCur, pOnlyIdx->zName, "idx"); + }else{ + openStatTable(pParse, iDb, iStatCur, pTab->zName, "tbl"); + } + analyzeOneTable(pParse, pTab, pOnlyIdx, iStatCur,pParse->nMem+1,pParse->nTab); + loadAnalysis(pParse, iDb); +} + +/* +** Generate code for the ANALYZE command. The parser calls this routine +** when it recognizes an ANALYZE command. +** +** ANALYZE -- 1 +** ANALYZE -- 2 +** ANALYZE ?.? -- 3 +** +** Form 1 causes all indices in all attached databases to be analyzed. +** Form 2 analyzes all indices the single database named. +** Form 3 analyzes all indices associated with the named table. +*/ +SQLITE_PRIVATE void sqlite3Analyze(Parse *pParse, Token *pName1, Token *pName2){ + sqlite3 *db = pParse->db; + int iDb; + int i; + char *z, *zDb; + Table *pTab; + Index *pIdx; + Token *pTableName; + Vdbe *v; + + /* Read the database schema. If an error occurs, leave an error message + ** and code in pParse and return NULL. */ + assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + return; + } + + assert( pName2!=0 || pName1==0 ); + if( pName1==0 ){ + /* Form 1: Analyze everything */ + for(i=0; inDb; i++){ + if( i==1 ) continue; /* Do not analyze the TEMP database */ + analyzeDatabase(pParse, i); + } + }else if( pName2->n==0 ){ + /* Form 2: Analyze the database or table named */ + iDb = sqlite3FindDb(db, pName1); + if( iDb>=0 ){ + analyzeDatabase(pParse, iDb); + }else{ + z = sqlite3NameFromToken(db, pName1); + if( z ){ + if( (pIdx = sqlite3FindIndex(db, z, 0))!=0 ){ + analyzeTable(pParse, pIdx->pTable, pIdx); + }else if( (pTab = sqlite3LocateTable(pParse, 0, z, 0))!=0 ){ + analyzeTable(pParse, pTab, 0); + } + sqlite3DbFree(db, z); + } + } + }else{ + /* Form 3: Analyze the fully qualified table name */ + iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pTableName); + if( iDb>=0 ){ + zDb = db->aDb[iDb].zDbSName; + z = sqlite3NameFromToken(db, pTableName); + if( z ){ + if( (pIdx = sqlite3FindIndex(db, z, zDb))!=0 ){ + analyzeTable(pParse, pIdx->pTable, pIdx); + }else if( (pTab = sqlite3LocateTable(pParse, 0, z, zDb))!=0 ){ + analyzeTable(pParse, pTab, 0); + } + sqlite3DbFree(db, z); + } + } + } + v = sqlite3GetVdbe(pParse); + if( v ) sqlite3VdbeAddOp0(v, OP_Expire); +} + +/* +** Used to pass information from the analyzer reader through to the +** callback routine. +*/ +typedef struct analysisInfo analysisInfo; +struct analysisInfo { + sqlite3 *db; + const char *zDatabase; +}; + +/* +** The first argument points to a nul-terminated string containing a +** list of space separated integers. Read the first nOut of these into +** the array aOut[]. +*/ +static void decodeIntArray( + char *zIntArray, /* String containing int array to decode */ + int nOut, /* Number of slots in aOut[] */ + tRowcnt *aOut, /* Store integers here */ + LogEst *aLog, /* Or, if aOut==0, here */ + Index *pIndex /* Handle extra flags for this index, if not NULL */ +){ + char *z = zIntArray; + int c; + int i; + tRowcnt v; + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + if( z==0 ) z = ""; +#else + assert( z!=0 ); +#endif + for(i=0; *z && i='0' && c<='9' ){ + v = v*10 + c - '0'; + z++; + } +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + if( aOut ) aOut[i] = v; + if( aLog ) aLog[i] = sqlite3LogEst(v); +#else + assert( aOut==0 ); + UNUSED_PARAMETER(aOut); + assert( aLog!=0 ); + aLog[i] = sqlite3LogEst(v); +#endif + if( *z==' ' ) z++; + } +#ifndef SQLITE_ENABLE_STAT3_OR_STAT4 + assert( pIndex!=0 ); { +#else + if( pIndex ){ +#endif + pIndex->bUnordered = 0; + pIndex->noSkipScan = 0; + while( z[0] ){ + if( sqlite3_strglob("unordered*", z)==0 ){ + pIndex->bUnordered = 1; + }else if( sqlite3_strglob("sz=[0-9]*", z)==0 ){ + pIndex->szIdxRow = sqlite3LogEst(sqlite3Atoi(z+3)); + }else if( sqlite3_strglob("noskipscan*", z)==0 ){ + pIndex->noSkipScan = 1; + } +#ifdef SQLITE_ENABLE_COSTMULT + else if( sqlite3_strglob("costmult=[0-9]*",z)==0 ){ + pIndex->pTable->costMult = sqlite3LogEst(sqlite3Atoi(z+9)); + } +#endif + while( z[0]!=0 && z[0]!=' ' ) z++; + while( z[0]==' ' ) z++; + } + } +} + +/* +** This callback is invoked once for each index when reading the +** sqlite_stat1 table. +** +** argv[0] = name of the table +** argv[1] = name of the index (might be NULL) +** argv[2] = results of analysis - on integer for each column +** +** Entries for which argv[1]==NULL simply record the number of rows in +** the table. +*/ +static int analysisLoader(void *pData, int argc, char **argv, char **NotUsed){ + analysisInfo *pInfo = (analysisInfo*)pData; + Index *pIndex; + Table *pTable; + const char *z; + + assert( argc==3 ); + UNUSED_PARAMETER2(NotUsed, argc); + + if( argv==0 || argv[0]==0 || argv[2]==0 ){ + return 0; + } + pTable = sqlite3FindTable(pInfo->db, argv[0], pInfo->zDatabase); + if( pTable==0 ){ + return 0; + } + if( argv[1]==0 ){ + pIndex = 0; + }else if( sqlite3_stricmp(argv[0],argv[1])==0 ){ + pIndex = sqlite3PrimaryKeyIndex(pTable); + }else{ + pIndex = sqlite3FindIndex(pInfo->db, argv[1], pInfo->zDatabase); + } + z = argv[2]; + + if( pIndex ){ + tRowcnt *aiRowEst = 0; + int nCol = pIndex->nKeyCol+1; +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + /* Index.aiRowEst may already be set here if there are duplicate + ** sqlite_stat1 entries for this index. In that case just clobber + ** the old data with the new instead of allocating a new array. */ + if( pIndex->aiRowEst==0 ){ + pIndex->aiRowEst = (tRowcnt*)sqlite3MallocZero(sizeof(tRowcnt) * nCol); + if( pIndex->aiRowEst==0 ) sqlite3OomFault(pInfo->db); + } + aiRowEst = pIndex->aiRowEst; +#endif + pIndex->bUnordered = 0; + decodeIntArray((char*)z, nCol, aiRowEst, pIndex->aiRowLogEst, pIndex); + if( pIndex->pPartIdxWhere==0 ) pTable->nRowLogEst = pIndex->aiRowLogEst[0]; + }else{ + Index fakeIdx; + fakeIdx.szIdxRow = pTable->szTabRow; +#ifdef SQLITE_ENABLE_COSTMULT + fakeIdx.pTable = pTable; +#endif + decodeIntArray((char*)z, 1, 0, &pTable->nRowLogEst, &fakeIdx); + pTable->szTabRow = fakeIdx.szIdxRow; + } + + return 0; +} + +/* +** If the Index.aSample variable is not NULL, delete the aSample[] array +** and its contents. +*/ +SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3 *db, Index *pIdx){ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + if( pIdx->aSample ){ + int j; + for(j=0; jnSample; j++){ + IndexSample *p = &pIdx->aSample[j]; + sqlite3DbFree(db, p->p); + } + sqlite3DbFree(db, pIdx->aSample); + } + if( db && db->pnBytesFreed==0 ){ + pIdx->nSample = 0; + pIdx->aSample = 0; + } +#else + UNUSED_PARAMETER(db); + UNUSED_PARAMETER(pIdx); +#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ +} + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +/* +** Populate the pIdx->aAvgEq[] array based on the samples currently +** stored in pIdx->aSample[]. +*/ +static void initAvgEq(Index *pIdx){ + if( pIdx ){ + IndexSample *aSample = pIdx->aSample; + IndexSample *pFinal = &aSample[pIdx->nSample-1]; + int iCol; + int nCol = 1; + if( pIdx->nSampleCol>1 ){ + /* If this is stat4 data, then calculate aAvgEq[] values for all + ** sample columns except the last. The last is always set to 1, as + ** once the trailing PK fields are considered all index keys are + ** unique. */ + nCol = pIdx->nSampleCol-1; + pIdx->aAvgEq[nCol] = 1; + } + for(iCol=0; iColnSample; + int i; /* Used to iterate through samples */ + tRowcnt sumEq = 0; /* Sum of the nEq values */ + tRowcnt avgEq = 0; + tRowcnt nRow; /* Number of rows in index */ + i64 nSum100 = 0; /* Number of terms contributing to sumEq */ + i64 nDist100; /* Number of distinct values in index */ + + if( !pIdx->aiRowEst || iCol>=pIdx->nKeyCol || pIdx->aiRowEst[iCol+1]==0 ){ + nRow = pFinal->anLt[iCol]; + nDist100 = (i64)100 * pFinal->anDLt[iCol]; + nSample--; + }else{ + nRow = pIdx->aiRowEst[0]; + nDist100 = ((i64)100 * pIdx->aiRowEst[0]) / pIdx->aiRowEst[iCol+1]; + } + pIdx->nRowEst0 = nRow; + + /* Set nSum to the number of distinct (iCol+1) field prefixes that + ** occur in the stat4 table for this index. Set sumEq to the sum of + ** the nEq values for column iCol for the same set (adding the value + ** only once where there exist duplicate prefixes). */ + for(i=0; inSample-1) + || aSample[i].anDLt[iCol]!=aSample[i+1].anDLt[iCol] + ){ + sumEq += aSample[i].anEq[iCol]; + nSum100 += 100; + } + } + + if( nDist100>nSum100 && sumEqaAvgEq[iCol] = avgEq; + } + } +} + +/* +** Look up an index by name. Or, if the name of a WITHOUT ROWID table +** is supplied instead, find the PRIMARY KEY index for that table. +*/ +static Index *findIndexOrPrimaryKey( + sqlite3 *db, + const char *zName, + const char *zDb +){ + Index *pIdx = sqlite3FindIndex(db, zName, zDb); + if( pIdx==0 ){ + Table *pTab = sqlite3FindTable(db, zName, zDb); + if( pTab && !HasRowid(pTab) ) pIdx = sqlite3PrimaryKeyIndex(pTab); + } + return pIdx; +} + +/* +** Load the content from either the sqlite_stat4 or sqlite_stat3 table +** into the relevant Index.aSample[] arrays. +** +** Arguments zSql1 and zSql2 must point to SQL statements that return +** data equivalent to the following (statements are different for stat3, +** see the caller of this function for details): +** +** zSql1: SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx +** zSql2: SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4 +** +** where %Q is replaced with the database name before the SQL is executed. +*/ +static int loadStatTbl( + sqlite3 *db, /* Database handle */ + int bStat3, /* Assume single column records only */ + const char *zSql1, /* SQL statement 1 (see above) */ + const char *zSql2, /* SQL statement 2 (see above) */ + const char *zDb /* Database name (e.g. "main") */ +){ + int rc; /* Result codes from subroutines */ + sqlite3_stmt *pStmt = 0; /* An SQL statement being run */ + char *zSql; /* Text of the SQL statement */ + Index *pPrevIdx = 0; /* Previous index in the loop */ + IndexSample *pSample; /* A slot in pIdx->aSample[] */ + + assert( db->lookaside.bDisable ); + zSql = sqlite3MPrintf(db, zSql1, zDb); + if( !zSql ){ + return SQLITE_NOMEM_BKPT; + } + rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); + sqlite3DbFree(db, zSql); + if( rc ) return rc; + + while( sqlite3_step(pStmt)==SQLITE_ROW ){ + int nIdxCol = 1; /* Number of columns in stat4 records */ + + char *zIndex; /* Index name */ + Index *pIdx; /* Pointer to the index object */ + int nSample; /* Number of samples */ + int nByte; /* Bytes of space required */ + int i; /* Bytes of space required */ + tRowcnt *pSpace; + + zIndex = (char *)sqlite3_column_text(pStmt, 0); + if( zIndex==0 ) continue; + nSample = sqlite3_column_int(pStmt, 1); + pIdx = findIndexOrPrimaryKey(db, zIndex, zDb); + assert( pIdx==0 || bStat3 || pIdx->nSample==0 ); + /* Index.nSample is non-zero at this point if data has already been + ** loaded from the stat4 table. In this case ignore stat3 data. */ + if( pIdx==0 || pIdx->nSample ) continue; + if( bStat3==0 ){ + assert( !HasRowid(pIdx->pTable) || pIdx->nColumn==pIdx->nKeyCol+1 ); + if( !HasRowid(pIdx->pTable) && IsPrimaryKeyIndex(pIdx) ){ + nIdxCol = pIdx->nKeyCol; + }else{ + nIdxCol = pIdx->nColumn; + } + } + pIdx->nSampleCol = nIdxCol; + nByte = sizeof(IndexSample) * nSample; + nByte += sizeof(tRowcnt) * nIdxCol * 3 * nSample; + nByte += nIdxCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */ + + pIdx->aSample = sqlite3DbMallocZero(db, nByte); + if( pIdx->aSample==0 ){ + sqlite3_finalize(pStmt); + return SQLITE_NOMEM_BKPT; + } + pSpace = (tRowcnt*)&pIdx->aSample[nSample]; + pIdx->aAvgEq = pSpace; pSpace += nIdxCol; + for(i=0; iaSample[i].anEq = pSpace; pSpace += nIdxCol; + pIdx->aSample[i].anLt = pSpace; pSpace += nIdxCol; + pIdx->aSample[i].anDLt = pSpace; pSpace += nIdxCol; + } + assert( ((u8*)pSpace)-nByte==(u8*)(pIdx->aSample) ); + } + rc = sqlite3_finalize(pStmt); + if( rc ) return rc; + + zSql = sqlite3MPrintf(db, zSql2, zDb); + if( !zSql ){ + return SQLITE_NOMEM_BKPT; + } + rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); + sqlite3DbFree(db, zSql); + if( rc ) return rc; + + while( sqlite3_step(pStmt)==SQLITE_ROW ){ + char *zIndex; /* Index name */ + Index *pIdx; /* Pointer to the index object */ + int nCol = 1; /* Number of columns in index */ + + zIndex = (char *)sqlite3_column_text(pStmt, 0); + if( zIndex==0 ) continue; + pIdx = findIndexOrPrimaryKey(db, zIndex, zDb); + if( pIdx==0 ) continue; + /* This next condition is true if data has already been loaded from + ** the sqlite_stat4 table. In this case ignore stat3 data. */ + nCol = pIdx->nSampleCol; + if( bStat3 && nCol>1 ) continue; + if( pIdx!=pPrevIdx ){ + initAvgEq(pPrevIdx); + pPrevIdx = pIdx; + } + pSample = &pIdx->aSample[pIdx->nSample]; + decodeIntArray((char*)sqlite3_column_text(pStmt,1),nCol,pSample->anEq,0,0); + decodeIntArray((char*)sqlite3_column_text(pStmt,2),nCol,pSample->anLt,0,0); + decodeIntArray((char*)sqlite3_column_text(pStmt,3),nCol,pSample->anDLt,0,0); + + /* Take a copy of the sample. Add two 0x00 bytes the end of the buffer. + ** This is in case the sample record is corrupted. In that case, the + ** sqlite3VdbeRecordCompare() may read up to two varints past the + ** end of the allocated buffer before it realizes it is dealing with + ** a corrupt record. Adding the two 0x00 bytes prevents this from causing + ** a buffer overread. */ + pSample->n = sqlite3_column_bytes(pStmt, 4); + pSample->p = sqlite3DbMallocZero(db, pSample->n + 2); + if( pSample->p==0 ){ + sqlite3_finalize(pStmt); + return SQLITE_NOMEM_BKPT; + } + if( pSample->n ){ + memcpy(pSample->p, sqlite3_column_blob(pStmt, 4), pSample->n); + } + pIdx->nSample++; + } + rc = sqlite3_finalize(pStmt); + if( rc==SQLITE_OK ) initAvgEq(pPrevIdx); + return rc; +} + +/* +** Load content from the sqlite_stat4 and sqlite_stat3 tables into +** the Index.aSample[] arrays of all indices. +*/ +static int loadStat4(sqlite3 *db, const char *zDb){ + int rc = SQLITE_OK; /* Result codes from subroutines */ + + assert( db->lookaside.bDisable ); + if( sqlite3FindTable(db, "sqlite_stat4", zDb) ){ + rc = loadStatTbl(db, 0, + "SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx", + "SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4", + zDb + ); + } + + if( rc==SQLITE_OK && sqlite3FindTable(db, "sqlite_stat3", zDb) ){ + rc = loadStatTbl(db, 1, + "SELECT idx,count(*) FROM %Q.sqlite_stat3 GROUP BY idx", + "SELECT idx,neq,nlt,ndlt,sqlite_record(sample) FROM %Q.sqlite_stat3", + zDb + ); + } + + return rc; +} +#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ + +/* +** Load the content of the sqlite_stat1 and sqlite_stat3/4 tables. The +** contents of sqlite_stat1 are used to populate the Index.aiRowEst[] +** arrays. The contents of sqlite_stat3/4 are used to populate the +** Index.aSample[] arrays. +** +** If the sqlite_stat1 table is not present in the database, SQLITE_ERROR +** is returned. In this case, even if SQLITE_ENABLE_STAT3/4 was defined +** during compilation and the sqlite_stat3/4 table is present, no data is +** read from it. +** +** If SQLITE_ENABLE_STAT3/4 was defined during compilation and the +** sqlite_stat4 table is not present in the database, SQLITE_ERROR is +** returned. However, in this case, data is read from the sqlite_stat1 +** table (if it is present) before returning. +** +** If an OOM error occurs, this function always sets db->mallocFailed. +** This means if the caller does not care about other errors, the return +** code may be ignored. +*/ +SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3 *db, int iDb){ + analysisInfo sInfo; + HashElem *i; + char *zSql; + int rc = SQLITE_OK; + + assert( iDb>=0 && iDbnDb ); + assert( db->aDb[iDb].pBt!=0 ); + + /* Clear any prior statistics */ + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){ + Index *pIdx = sqliteHashData(i); + pIdx->aiRowLogEst[0] = 0; +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + sqlite3DeleteIndexSamples(db, pIdx); + pIdx->aSample = 0; +#endif + } + + /* Load new statistics out of the sqlite_stat1 table */ + sInfo.db = db; + sInfo.zDatabase = db->aDb[iDb].zDbSName; + if( sqlite3FindTable(db, "sqlite_stat1", sInfo.zDatabase)!=0 ){ + zSql = sqlite3MPrintf(db, + "SELECT tbl,idx,stat FROM %Q.sqlite_stat1", sInfo.zDatabase); + if( zSql==0 ){ + rc = SQLITE_NOMEM_BKPT; + }else{ + rc = sqlite3_exec(db, zSql, analysisLoader, &sInfo, 0); + sqlite3DbFree(db, zSql); + } + } + + /* Set appropriate defaults on all indexes not in the sqlite_stat1 table */ + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){ + Index *pIdx = sqliteHashData(i); + if( pIdx->aiRowLogEst[0]==0 ) sqlite3DefaultRowEst(pIdx); + } + + /* Load the statistics from the sqlite_stat4 table. */ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + if( rc==SQLITE_OK && OptimizationEnabled(db, SQLITE_Stat34) ){ + db->lookaside.bDisable++; + rc = loadStat4(db, sInfo.zDatabase); + db->lookaside.bDisable--; + } + for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){ + Index *pIdx = sqliteHashData(i); + sqlite3_free(pIdx->aiRowEst); + pIdx->aiRowEst = 0; + } +#endif + + if( rc==SQLITE_NOMEM ){ + sqlite3OomFault(db); + } + return rc; +} + + +#endif /* SQLITE_OMIT_ANALYZE */ + +/************** End of analyze.c *********************************************/ +/************** Begin file attach.c ******************************************/ +/* +** 2003 April 6 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code used to implement the ATTACH and DETACH commands. +*/ +/* #include "sqliteInt.h" */ + +#ifndef SQLITE_OMIT_ATTACH +/* +** Resolve an expression that was part of an ATTACH or DETACH statement. This +** is slightly different from resolving a normal SQL expression, because simple +** identifiers are treated as strings, not possible column names or aliases. +** +** i.e. if the parser sees: +** +** ATTACH DATABASE abc AS def +** +** it treats the two expressions as literal strings 'abc' and 'def' instead of +** looking for columns of the same name. +** +** This only applies to the root node of pExpr, so the statement: +** +** ATTACH DATABASE abc||def AS 'db2' +** +** will fail because neither abc or def can be resolved. +*/ +static int resolveAttachExpr(NameContext *pName, Expr *pExpr) +{ + int rc = SQLITE_OK; + if( pExpr ){ + if( pExpr->op!=TK_ID ){ + rc = sqlite3ResolveExprNames(pName, pExpr); + }else{ + pExpr->op = TK_STRING; + } + } + return rc; +} + +/* +** An SQL user-function registered to do the work of an ATTACH statement. The +** three arguments to the function come directly from an attach statement: +** +** ATTACH DATABASE x AS y KEY z +** +** SELECT sqlite_attach(x, y, z) +** +** If the optional "KEY z" syntax is omitted, an SQL NULL is passed as the +** third argument. +*/ +static void attachFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **argv +){ + int i; + int rc = 0; + sqlite3 *db = sqlite3_context_db_handle(context); + const char *zName; + const char *zFile; + char *zPath = 0; + char *zErr = 0; + unsigned int flags; + Db *aNew; + char *zErrDyn = 0; + sqlite3_vfs *pVfs; + + UNUSED_PARAMETER(NotUsed); + + zFile = (const char *)sqlite3_value_text(argv[0]); + zName = (const char *)sqlite3_value_text(argv[1]); + if( zFile==0 ) zFile = ""; + if( zName==0 ) zName = ""; + + /* Check for the following errors: + ** + ** * Too many attached databases, + ** * Transaction currently open + ** * Specified database name already being used. + */ + if( db->nDb>=db->aLimit[SQLITE_LIMIT_ATTACHED]+2 ){ + zErrDyn = sqlite3MPrintf(db, "too many attached databases - max %d", + db->aLimit[SQLITE_LIMIT_ATTACHED] + ); + goto attach_error; + } + if( !db->autoCommit ){ + zErrDyn = sqlite3MPrintf(db, "cannot ATTACH database within transaction"); + goto attach_error; + } + for(i=0; inDb; i++){ + char *z = db->aDb[i].zDbSName; + assert( z && zName ); + if( sqlite3StrICmp(z, zName)==0 ){ + zErrDyn = sqlite3MPrintf(db, "database %s is already in use", zName); + goto attach_error; + } + } + + /* Allocate the new entry in the db->aDb[] array and initialize the schema + ** hash tables. + */ + if( db->aDb==db->aDbStatic ){ + aNew = sqlite3DbMallocRawNN(db, sizeof(db->aDb[0])*3 ); + if( aNew==0 ) return; + memcpy(aNew, db->aDb, sizeof(db->aDb[0])*2); + }else{ + aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(db->nDb+1) ); + if( aNew==0 ) return; + } + db->aDb = aNew; + aNew = &db->aDb[db->nDb]; + memset(aNew, 0, sizeof(*aNew)); + + /* Open the database file. If the btree is successfully opened, use + ** it to obtain the database schema. At this point the schema may + ** or may not be initialized. + */ + flags = db->openFlags; + rc = sqlite3ParseUri(db->pVfs->zName, zFile, &flags, &pVfs, &zPath, &zErr); + if( rc!=SQLITE_OK ){ + if( rc==SQLITE_NOMEM ) sqlite3OomFault(db); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + assert( pVfs ); + flags |= SQLITE_OPEN_MAIN_DB; + rc = sqlite3BtreeOpen(pVfs, zPath, db, &aNew->pBt, 0, flags); + sqlite3_free( zPath ); + db->nDb++; + db->skipBtreeMutex = 0; + if( rc==SQLITE_CONSTRAINT ){ + rc = SQLITE_ERROR; + zErrDyn = sqlite3MPrintf(db, "database is already attached"); + }else if( rc==SQLITE_OK ){ + Pager *pPager; + aNew->pSchema = sqlite3SchemaGet(db, aNew->pBt); + if( !aNew->pSchema ){ + rc = SQLITE_NOMEM_BKPT; + }else if( aNew->pSchema->file_format && aNew->pSchema->enc!=ENC(db) ){ + zErrDyn = sqlite3MPrintf(db, + "attached databases must use the same text encoding as main database"); + rc = SQLITE_ERROR; + } + sqlite3BtreeEnter(aNew->pBt); + pPager = sqlite3BtreePager(aNew->pBt); + sqlite3PagerLockingMode(pPager, db->dfltLockMode); + sqlite3BtreeSecureDelete(aNew->pBt, + sqlite3BtreeSecureDelete(db->aDb[0].pBt,-1) ); +#ifndef SQLITE_OMIT_PAGER_PRAGMAS + sqlite3BtreeSetPagerFlags(aNew->pBt, + PAGER_SYNCHRONOUS_FULL | (db->flags & PAGER_FLAGS_MASK)); +#endif + sqlite3BtreeLeave(aNew->pBt); + } + aNew->safety_level = SQLITE_DEFAULT_SYNCHRONOUS+1; + aNew->zDbSName = sqlite3DbStrDup(db, zName); + if( rc==SQLITE_OK && aNew->zDbSName==0 ){ + rc = SQLITE_NOMEM_BKPT; + } + + +#ifdef SQLITE_HAS_CODEC + if( rc==SQLITE_OK ){ + extern int sqlite3CodecAttach(sqlite3*, int, const void*, int); + extern void sqlite3CodecGetKey(sqlite3*, int, void**, int*); + int nKey; + char *zKey; + int t = sqlite3_value_type(argv[2]); + switch( t ){ + case SQLITE_INTEGER: + case SQLITE_FLOAT: + zErrDyn = sqlite3DbStrDup(db, "Invalid key value"); + rc = SQLITE_ERROR; + break; + + case SQLITE_TEXT: + case SQLITE_BLOB: + nKey = sqlite3_value_bytes(argv[2]); + zKey = (char *)sqlite3_value_blob(argv[2]); + rc = sqlite3CodecAttach(db, db->nDb-1, zKey, nKey); + break; + + case SQLITE_NULL: + /* No key specified. Use the key from the main database */ + sqlite3CodecGetKey(db, 0, (void**)&zKey, &nKey); + if( nKey || sqlite3BtreeGetOptimalReserve(db->aDb[0].pBt)>0 ){ + rc = sqlite3CodecAttach(db, db->nDb-1, zKey, nKey); + } + break; + } + } +#endif + + /* If the file was opened successfully, read the schema for the new database. + ** If this fails, or if opening the file failed, then close the file and + ** remove the entry from the db->aDb[] array. i.e. put everything back the way + ** we found it. + */ + if( rc==SQLITE_OK ){ + sqlite3BtreeEnterAll(db); + rc = sqlite3Init(db, &zErrDyn); + sqlite3BtreeLeaveAll(db); + } +#ifdef SQLITE_USER_AUTHENTICATION + if( rc==SQLITE_OK ){ + u8 newAuth = 0; + rc = sqlite3UserAuthCheckLogin(db, zName, &newAuth); + if( newAuthauth.authLevel ){ + rc = SQLITE_AUTH_USER; + } + } +#endif + if( rc ){ + int iDb = db->nDb - 1; + assert( iDb>=2 ); + if( db->aDb[iDb].pBt ){ + sqlite3BtreeClose(db->aDb[iDb].pBt); + db->aDb[iDb].pBt = 0; + db->aDb[iDb].pSchema = 0; + } + sqlite3ResetAllSchemasOfConnection(db); + db->nDb = iDb; + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ + sqlite3OomFault(db); + sqlite3DbFree(db, zErrDyn); + zErrDyn = sqlite3MPrintf(db, "out of memory"); + }else if( zErrDyn==0 ){ + zErrDyn = sqlite3MPrintf(db, "unable to open database: %s", zFile); + } + goto attach_error; + } + + return; + +attach_error: + /* Return an error if we get here */ + if( zErrDyn ){ + sqlite3_result_error(context, zErrDyn, -1); + sqlite3DbFree(db, zErrDyn); + } + if( rc ) sqlite3_result_error_code(context, rc); +} + +/* +** An SQL user-function registered to do the work of an DETACH statement. The +** three arguments to the function come directly from a detach statement: +** +** DETACH DATABASE x +** +** SELECT sqlite_detach(x) +*/ +static void detachFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **argv +){ + const char *zName = (const char *)sqlite3_value_text(argv[0]); + sqlite3 *db = sqlite3_context_db_handle(context); + int i; + Db *pDb = 0; + char zErr[128]; + + UNUSED_PARAMETER(NotUsed); + + if( zName==0 ) zName = ""; + for(i=0; inDb; i++){ + pDb = &db->aDb[i]; + if( pDb->pBt==0 ) continue; + if( sqlite3StrICmp(pDb->zDbSName, zName)==0 ) break; + } + + if( i>=db->nDb ){ + sqlite3_snprintf(sizeof(zErr),zErr, "no such database: %s", zName); + goto detach_error; + } + if( i<2 ){ + sqlite3_snprintf(sizeof(zErr),zErr, "cannot detach database %s", zName); + goto detach_error; + } + if( !db->autoCommit ){ + sqlite3_snprintf(sizeof(zErr), zErr, + "cannot DETACH database within transaction"); + goto detach_error; + } + if( sqlite3BtreeIsInReadTrans(pDb->pBt) || sqlite3BtreeIsInBackup(pDb->pBt) ){ + sqlite3_snprintf(sizeof(zErr),zErr, "database %s is locked", zName); + goto detach_error; + } + + sqlite3BtreeClose(pDb->pBt); + pDb->pBt = 0; + pDb->pSchema = 0; + sqlite3CollapseDatabaseArray(db); + return; + +detach_error: + sqlite3_result_error(context, zErr, -1); +} + +/* +** This procedure generates VDBE code for a single invocation of either the +** sqlite_detach() or sqlite_attach() SQL user functions. +*/ +static void codeAttach( + Parse *pParse, /* The parser context */ + int type, /* Either SQLITE_ATTACH or SQLITE_DETACH */ + FuncDef const *pFunc,/* FuncDef wrapper for detachFunc() or attachFunc() */ + Expr *pAuthArg, /* Expression to pass to authorization callback */ + Expr *pFilename, /* Name of database file */ + Expr *pDbname, /* Name of the database to use internally */ + Expr *pKey /* Database key for encryption extension */ +){ + int rc; + NameContext sName; + Vdbe *v; + sqlite3* db = pParse->db; + int regArgs; + + if( pParse->nErr ) goto attach_end; + memset(&sName, 0, sizeof(NameContext)); + sName.pParse = pParse; + + if( + SQLITE_OK!=(rc = resolveAttachExpr(&sName, pFilename)) || + SQLITE_OK!=(rc = resolveAttachExpr(&sName, pDbname)) || + SQLITE_OK!=(rc = resolveAttachExpr(&sName, pKey)) + ){ + goto attach_end; + } + +#ifndef SQLITE_OMIT_AUTHORIZATION + if( pAuthArg ){ + char *zAuthArg; + if( pAuthArg->op==TK_STRING ){ + zAuthArg = pAuthArg->u.zToken; + }else{ + zAuthArg = 0; + } + rc = sqlite3AuthCheck(pParse, type, zAuthArg, 0, 0); + if(rc!=SQLITE_OK ){ + goto attach_end; + } + } +#endif /* SQLITE_OMIT_AUTHORIZATION */ + + + v = sqlite3GetVdbe(pParse); + regArgs = sqlite3GetTempRange(pParse, 4); + sqlite3ExprCode(pParse, pFilename, regArgs); + sqlite3ExprCode(pParse, pDbname, regArgs+1); + sqlite3ExprCode(pParse, pKey, regArgs+2); + + assert( v || db->mallocFailed ); + if( v ){ + sqlite3VdbeAddOp4(v, OP_Function0, 0, regArgs+3-pFunc->nArg, regArgs+3, + (char *)pFunc, P4_FUNCDEF); + assert( pFunc->nArg==-1 || (pFunc->nArg&0xff)==pFunc->nArg ); + sqlite3VdbeChangeP5(v, (u8)(pFunc->nArg)); + + /* Code an OP_Expire. For an ATTACH statement, set P1 to true (expire this + ** statement only). For DETACH, set it to false (expire all existing + ** statements). + */ + sqlite3VdbeAddOp1(v, OP_Expire, (type==SQLITE_ATTACH)); + } + +attach_end: + sqlite3ExprDelete(db, pFilename); + sqlite3ExprDelete(db, pDbname); + sqlite3ExprDelete(db, pKey); +} + +/* +** Called by the parser to compile a DETACH statement. +** +** DETACH pDbname +*/ +SQLITE_PRIVATE void sqlite3Detach(Parse *pParse, Expr *pDbname){ + static const FuncDef detach_func = { + 1, /* nArg */ + SQLITE_UTF8, /* funcFlags */ + 0, /* pUserData */ + 0, /* pNext */ + detachFunc, /* xSFunc */ + 0, /* xFinalize */ + "sqlite_detach", /* zName */ + {0} + }; + codeAttach(pParse, SQLITE_DETACH, &detach_func, pDbname, 0, 0, pDbname); +} + +/* +** Called by the parser to compile an ATTACH statement. +** +** ATTACH p AS pDbname KEY pKey +*/ +SQLITE_PRIVATE void sqlite3Attach(Parse *pParse, Expr *p, Expr *pDbname, Expr *pKey){ + static const FuncDef attach_func = { + 3, /* nArg */ + SQLITE_UTF8, /* funcFlags */ + 0, /* pUserData */ + 0, /* pNext */ + attachFunc, /* xSFunc */ + 0, /* xFinalize */ + "sqlite_attach", /* zName */ + {0} + }; + codeAttach(pParse, SQLITE_ATTACH, &attach_func, p, p, pDbname, pKey); +} +#endif /* SQLITE_OMIT_ATTACH */ + +/* +** Initialize a DbFixer structure. This routine must be called prior +** to passing the structure to one of the sqliteFixAAAA() routines below. +*/ +SQLITE_PRIVATE void sqlite3FixInit( + DbFixer *pFix, /* The fixer to be initialized */ + Parse *pParse, /* Error messages will be written here */ + int iDb, /* This is the database that must be used */ + const char *zType, /* "view", "trigger", or "index" */ + const Token *pName /* Name of the view, trigger, or index */ +){ + sqlite3 *db; + + db = pParse->db; + assert( db->nDb>iDb ); + pFix->pParse = pParse; + pFix->zDb = db->aDb[iDb].zDbSName; + pFix->pSchema = db->aDb[iDb].pSchema; + pFix->zType = zType; + pFix->pName = pName; + pFix->bVarOnly = (iDb==1); +} + +/* +** The following set of routines walk through the parse tree and assign +** a specific database to all table references where the database name +** was left unspecified in the original SQL statement. The pFix structure +** must have been initialized by a prior call to sqlite3FixInit(). +** +** These routines are used to make sure that an index, trigger, or +** view in one database does not refer to objects in a different database. +** (Exception: indices, triggers, and views in the TEMP database are +** allowed to refer to anything.) If a reference is explicitly made +** to an object in a different database, an error message is added to +** pParse->zErrMsg and these routines return non-zero. If everything +** checks out, these routines return 0. +*/ +SQLITE_PRIVATE int sqlite3FixSrcList( + DbFixer *pFix, /* Context of the fixation */ + SrcList *pList /* The Source list to check and modify */ +){ + int i; + const char *zDb; + struct SrcList_item *pItem; + + if( NEVER(pList==0) ) return 0; + zDb = pFix->zDb; + for(i=0, pItem=pList->a; inSrc; i++, pItem++){ + if( pFix->bVarOnly==0 ){ + if( pItem->zDatabase && sqlite3StrICmp(pItem->zDatabase, zDb) ){ + sqlite3ErrorMsg(pFix->pParse, + "%s %T cannot reference objects in database %s", + pFix->zType, pFix->pName, pItem->zDatabase); + return 1; + } + sqlite3DbFree(pFix->pParse->db, pItem->zDatabase); + pItem->zDatabase = 0; + pItem->pSchema = pFix->pSchema; + } +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) + if( sqlite3FixSelect(pFix, pItem->pSelect) ) return 1; + if( sqlite3FixExpr(pFix, pItem->pOn) ) return 1; +#endif + } + return 0; +} +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) +SQLITE_PRIVATE int sqlite3FixSelect( + DbFixer *pFix, /* Context of the fixation */ + Select *pSelect /* The SELECT statement to be fixed to one database */ +){ + while( pSelect ){ + if( sqlite3FixExprList(pFix, pSelect->pEList) ){ + return 1; + } + if( sqlite3FixSrcList(pFix, pSelect->pSrc) ){ + return 1; + } + if( sqlite3FixExpr(pFix, pSelect->pWhere) ){ + return 1; + } + if( sqlite3FixExprList(pFix, pSelect->pGroupBy) ){ + return 1; + } + if( sqlite3FixExpr(pFix, pSelect->pHaving) ){ + return 1; + } + if( sqlite3FixExprList(pFix, pSelect->pOrderBy) ){ + return 1; + } + if( sqlite3FixExpr(pFix, pSelect->pLimit) ){ + return 1; + } + if( sqlite3FixExpr(pFix, pSelect->pOffset) ){ + return 1; + } + pSelect = pSelect->pPrior; + } + return 0; +} +SQLITE_PRIVATE int sqlite3FixExpr( + DbFixer *pFix, /* Context of the fixation */ + Expr *pExpr /* The expression to be fixed to one database */ +){ + while( pExpr ){ + if( pExpr->op==TK_VARIABLE ){ + if( pFix->pParse->db->init.busy ){ + pExpr->op = TK_NULL; + }else{ + sqlite3ErrorMsg(pFix->pParse, "%s cannot use variables", pFix->zType); + return 1; + } + } + if( ExprHasProperty(pExpr, EP_TokenOnly|EP_Leaf) ) break; + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + if( sqlite3FixSelect(pFix, pExpr->x.pSelect) ) return 1; + }else{ + if( sqlite3FixExprList(pFix, pExpr->x.pList) ) return 1; + } + if( sqlite3FixExpr(pFix, pExpr->pRight) ){ + return 1; + } + pExpr = pExpr->pLeft; + } + return 0; +} +SQLITE_PRIVATE int sqlite3FixExprList( + DbFixer *pFix, /* Context of the fixation */ + ExprList *pList /* The expression to be fixed to one database */ +){ + int i; + struct ExprList_item *pItem; + if( pList==0 ) return 0; + for(i=0, pItem=pList->a; inExpr; i++, pItem++){ + if( sqlite3FixExpr(pFix, pItem->pExpr) ){ + return 1; + } + } + return 0; +} +#endif + +#ifndef SQLITE_OMIT_TRIGGER +SQLITE_PRIVATE int sqlite3FixTriggerStep( + DbFixer *pFix, /* Context of the fixation */ + TriggerStep *pStep /* The trigger step be fixed to one database */ +){ + while( pStep ){ + if( sqlite3FixSelect(pFix, pStep->pSelect) ){ + return 1; + } + if( sqlite3FixExpr(pFix, pStep->pWhere) ){ + return 1; + } + if( sqlite3FixExprList(pFix, pStep->pExprList) ){ + return 1; + } + pStep = pStep->pNext; + } + return 0; +} +#endif + +/************** End of attach.c **********************************************/ +/************** Begin file auth.c ********************************************/ +/* +** 2003 January 11 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code used to implement the sqlite3_set_authorizer() +** API. This facility is an optional feature of the library. Embedded +** systems that do not need this facility may omit it by recompiling +** the library with -DSQLITE_OMIT_AUTHORIZATION=1 +*/ +/* #include "sqliteInt.h" */ + +/* +** All of the code in this file may be omitted by defining a single +** macro. +*/ +#ifndef SQLITE_OMIT_AUTHORIZATION + +/* +** Set or clear the access authorization function. +** +** The access authorization function is be called during the compilation +** phase to verify that the user has read and/or write access permission on +** various fields of the database. The first argument to the auth function +** is a copy of the 3rd argument to this routine. The second argument +** to the auth function is one of these constants: +** +** SQLITE_CREATE_INDEX +** SQLITE_CREATE_TABLE +** SQLITE_CREATE_TEMP_INDEX +** SQLITE_CREATE_TEMP_TABLE +** SQLITE_CREATE_TEMP_TRIGGER +** SQLITE_CREATE_TEMP_VIEW +** SQLITE_CREATE_TRIGGER +** SQLITE_CREATE_VIEW +** SQLITE_DELETE +** SQLITE_DROP_INDEX +** SQLITE_DROP_TABLE +** SQLITE_DROP_TEMP_INDEX +** SQLITE_DROP_TEMP_TABLE +** SQLITE_DROP_TEMP_TRIGGER +** SQLITE_DROP_TEMP_VIEW +** SQLITE_DROP_TRIGGER +** SQLITE_DROP_VIEW +** SQLITE_INSERT +** SQLITE_PRAGMA +** SQLITE_READ +** SQLITE_SELECT +** SQLITE_TRANSACTION +** SQLITE_UPDATE +** +** The third and fourth arguments to the auth function are the name of +** the table and the column that are being accessed. The auth function +** should return either SQLITE_OK, SQLITE_DENY, or SQLITE_IGNORE. If +** SQLITE_OK is returned, it means that access is allowed. SQLITE_DENY +** means that the SQL statement will never-run - the sqlite3_exec() call +** will return with an error. SQLITE_IGNORE means that the SQL statement +** should run but attempts to read the specified column will return NULL +** and attempts to write the column will be ignored. +** +** Setting the auth function to NULL disables this hook. The default +** setting of the auth function is NULL. +*/ +SQLITE_API int sqlite3_set_authorizer( + sqlite3 *db, + int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), + void *pArg +){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(db->mutex); + db->xAuth = (sqlite3_xauth)xAuth; + db->pAuthArg = pArg; + sqlite3ExpirePreparedStatements(db); + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +/* +** Write an error message into pParse->zErrMsg that explains that the +** user-supplied authorization function returned an illegal value. +*/ +static void sqliteAuthBadReturnCode(Parse *pParse){ + sqlite3ErrorMsg(pParse, "authorizer malfunction"); + pParse->rc = SQLITE_ERROR; +} + +/* +** Invoke the authorization callback for permission to read column zCol from +** table zTab in database zDb. This function assumes that an authorization +** callback has been registered (i.e. that sqlite3.xAuth is not NULL). +** +** If SQLITE_IGNORE is returned and pExpr is not NULL, then pExpr is changed +** to an SQL NULL expression. Otherwise, if pExpr is NULL, then SQLITE_IGNORE +** is treated as SQLITE_DENY. In this case an error is left in pParse. +*/ +SQLITE_PRIVATE int sqlite3AuthReadCol( + Parse *pParse, /* The parser context */ + const char *zTab, /* Table name */ + const char *zCol, /* Column name */ + int iDb /* Index of containing database. */ +){ + sqlite3 *db = pParse->db; /* Database handle */ + char *zDb = db->aDb[iDb].zDbSName; /* Schema name of attached database */ + int rc; /* Auth callback return code */ + + if( db->init.busy ) return SQLITE_OK; + rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext +#ifdef SQLITE_USER_AUTHENTICATION + ,db->auth.zAuthUser +#endif + ); + if( rc==SQLITE_DENY ){ + if( db->nDb>2 || iDb!=0 ){ + sqlite3ErrorMsg(pParse, "access to %s.%s.%s is prohibited",zDb,zTab,zCol); + }else{ + sqlite3ErrorMsg(pParse, "access to %s.%s is prohibited", zTab, zCol); + } + pParse->rc = SQLITE_AUTH; + }else if( rc!=SQLITE_IGNORE && rc!=SQLITE_OK ){ + sqliteAuthBadReturnCode(pParse); + } + return rc; +} + +/* +** The pExpr should be a TK_COLUMN expression. The table referred to +** is in pTabList or else it is the NEW or OLD table of a trigger. +** Check to see if it is OK to read this particular column. +** +** If the auth function returns SQLITE_IGNORE, change the TK_COLUMN +** instruction into a TK_NULL. If the auth function returns SQLITE_DENY, +** then generate an error. +*/ +SQLITE_PRIVATE void sqlite3AuthRead( + Parse *pParse, /* The parser context */ + Expr *pExpr, /* The expression to check authorization on */ + Schema *pSchema, /* The schema of the expression */ + SrcList *pTabList /* All table that pExpr might refer to */ +){ + sqlite3 *db = pParse->db; + Table *pTab = 0; /* The table being read */ + const char *zCol; /* Name of the column of the table */ + int iSrc; /* Index in pTabList->a[] of table being read */ + int iDb; /* The index of the database the expression refers to */ + int iCol; /* Index of column in table */ + + if( db->xAuth==0 ) return; + iDb = sqlite3SchemaToIndex(pParse->db, pSchema); + if( iDb<0 ){ + /* An attempt to read a column out of a subquery or other + ** temporary table. */ + return; + } + + assert( pExpr->op==TK_COLUMN || pExpr->op==TK_TRIGGER ); + if( pExpr->op==TK_TRIGGER ){ + pTab = pParse->pTriggerTab; + }else{ + assert( pTabList ); + for(iSrc=0; ALWAYS(iSrcnSrc); iSrc++){ + if( pExpr->iTable==pTabList->a[iSrc].iCursor ){ + pTab = pTabList->a[iSrc].pTab; + break; + } + } + } + iCol = pExpr->iColumn; + if( NEVER(pTab==0) ) return; + + if( iCol>=0 ){ + assert( iColnCol ); + zCol = pTab->aCol[iCol].zName; + }else if( pTab->iPKey>=0 ){ + assert( pTab->iPKeynCol ); + zCol = pTab->aCol[pTab->iPKey].zName; + }else{ + zCol = "ROWID"; + } + assert( iDb>=0 && iDbnDb ); + if( SQLITE_IGNORE==sqlite3AuthReadCol(pParse, pTab->zName, zCol, iDb) ){ + pExpr->op = TK_NULL; + } +} + +/* +** Do an authorization check using the code and arguments given. Return +** either SQLITE_OK (zero) or SQLITE_IGNORE or SQLITE_DENY. If SQLITE_DENY +** is returned, then the error count and error message in pParse are +** modified appropriately. +*/ +SQLITE_PRIVATE int sqlite3AuthCheck( + Parse *pParse, + int code, + const char *zArg1, + const char *zArg2, + const char *zArg3 +){ + sqlite3 *db = pParse->db; + int rc; + + /* Don't do any authorization checks if the database is initialising + ** or if the parser is being invoked from within sqlite3_declare_vtab. + */ + if( db->init.busy || IN_DECLARE_VTAB ){ + return SQLITE_OK; + } + + if( db->xAuth==0 ){ + return SQLITE_OK; + } + rc = db->xAuth(db->pAuthArg, code, zArg1, zArg2, zArg3, pParse->zAuthContext +#ifdef SQLITE_USER_AUTHENTICATION + ,db->auth.zAuthUser +#endif + ); + if( rc==SQLITE_DENY ){ + sqlite3ErrorMsg(pParse, "not authorized"); + pParse->rc = SQLITE_AUTH; + }else if( rc!=SQLITE_OK && rc!=SQLITE_IGNORE ){ + rc = SQLITE_DENY; + sqliteAuthBadReturnCode(pParse); + } + return rc; +} + +/* +** Push an authorization context. After this routine is called, the +** zArg3 argument to authorization callbacks will be zContext until +** popped. Or if pParse==0, this routine is a no-op. +*/ +SQLITE_PRIVATE void sqlite3AuthContextPush( + Parse *pParse, + AuthContext *pContext, + const char *zContext +){ + assert( pParse ); + pContext->pParse = pParse; + pContext->zAuthContext = pParse->zAuthContext; + pParse->zAuthContext = zContext; +} + +/* +** Pop an authorization context that was previously pushed +** by sqlite3AuthContextPush +*/ +SQLITE_PRIVATE void sqlite3AuthContextPop(AuthContext *pContext){ + if( pContext->pParse ){ + pContext->pParse->zAuthContext = pContext->zAuthContext; + pContext->pParse = 0; + } +} + +#endif /* SQLITE_OMIT_AUTHORIZATION */ + +/************** End of auth.c ************************************************/ +/************** Begin file build.c *******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains C code routines that are called by the SQLite parser +** when syntax rules are reduced. The routines in this file handle the +** following kinds of SQL syntax: +** +** CREATE TABLE +** DROP TABLE +** CREATE INDEX +** DROP INDEX +** creating ID lists +** BEGIN TRANSACTION +** COMMIT +** ROLLBACK +*/ +/* #include "sqliteInt.h" */ + +#ifndef SQLITE_OMIT_SHARED_CACHE +/* +** The TableLock structure is only used by the sqlite3TableLock() and +** codeTableLocks() functions. +*/ +struct TableLock { + int iDb; /* The database containing the table to be locked */ + int iTab; /* The root page of the table to be locked */ + u8 isWriteLock; /* True for write lock. False for a read lock */ + const char *zLockName; /* Name of the table */ +}; + +/* +** Record the fact that we want to lock a table at run-time. +** +** The table to be locked has root page iTab and is found in database iDb. +** A read or a write lock can be taken depending on isWritelock. +** +** This routine just records the fact that the lock is desired. The +** code to make the lock occur is generated by a later call to +** codeTableLocks() which occurs during sqlite3FinishCoding(). +*/ +SQLITE_PRIVATE void sqlite3TableLock( + Parse *pParse, /* Parsing context */ + int iDb, /* Index of the database containing the table to lock */ + int iTab, /* Root page number of the table to be locked */ + u8 isWriteLock, /* True for a write lock */ + const char *zName /* Name of the table to be locked */ +){ + Parse *pToplevel = sqlite3ParseToplevel(pParse); + int i; + int nBytes; + TableLock *p; + assert( iDb>=0 ); + + if( iDb==1 ) return; + if( !sqlite3BtreeSharable(pParse->db->aDb[iDb].pBt) ) return; + for(i=0; inTableLock; i++){ + p = &pToplevel->aTableLock[i]; + if( p->iDb==iDb && p->iTab==iTab ){ + p->isWriteLock = (p->isWriteLock || isWriteLock); + return; + } + } + + nBytes = sizeof(TableLock) * (pToplevel->nTableLock+1); + pToplevel->aTableLock = + sqlite3DbReallocOrFree(pToplevel->db, pToplevel->aTableLock, nBytes); + if( pToplevel->aTableLock ){ + p = &pToplevel->aTableLock[pToplevel->nTableLock++]; + p->iDb = iDb; + p->iTab = iTab; + p->isWriteLock = isWriteLock; + p->zLockName = zName; + }else{ + pToplevel->nTableLock = 0; + sqlite3OomFault(pToplevel->db); + } +} + +/* +** Code an OP_TableLock instruction for each table locked by the +** statement (configured by calls to sqlite3TableLock()). +*/ +static void codeTableLocks(Parse *pParse){ + int i; + Vdbe *pVdbe; + + pVdbe = sqlite3GetVdbe(pParse); + assert( pVdbe!=0 ); /* sqlite3GetVdbe cannot fail: VDBE already allocated */ + + for(i=0; inTableLock; i++){ + TableLock *p = &pParse->aTableLock[i]; + int p1 = p->iDb; + sqlite3VdbeAddOp4(pVdbe, OP_TableLock, p1, p->iTab, p->isWriteLock, + p->zLockName, P4_STATIC); + } +} +#else + #define codeTableLocks(x) +#endif + +/* +** Return TRUE if the given yDbMask object is empty - if it contains no +** 1 bits. This routine is used by the DbMaskAllZero() and DbMaskNotZero() +** macros when SQLITE_MAX_ATTACHED is greater than 30. +*/ +#if SQLITE_MAX_ATTACHED>30 +SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask m){ + int i; + for(i=0; ipToplevel==0 ); + db = pParse->db; + if( pParse->nested ) return; + if( db->mallocFailed || pParse->nErr ){ + if( pParse->rc==SQLITE_OK ) pParse->rc = SQLITE_ERROR; + return; + } + + /* Begin by generating some termination code at the end of the + ** vdbe program + */ + v = sqlite3GetVdbe(pParse); + assert( !pParse->isMultiWrite + || sqlite3VdbeAssertMayAbort(v, pParse->mayAbort)); + if( v ){ + sqlite3VdbeAddOp0(v, OP_Halt); + +#if SQLITE_USER_AUTHENTICATION + if( pParse->nTableLock>0 && db->init.busy==0 ){ + sqlite3UserAuthInit(db); + if( db->auth.authLevelrc = SQLITE_AUTH_USER; + return; + } + } +#endif + + /* The cookie mask contains one bit for each database file open. + ** (Bit 0 is for main, bit 1 is for temp, and so forth.) Bits are + ** set for each database that is used. Generate code to start a + ** transaction on each used database and to verify the schema cookie + ** on each used database. + */ + if( db->mallocFailed==0 + && (DbMaskNonZero(pParse->cookieMask) || pParse->pConstExpr) + ){ + int iDb, i; + assert( sqlite3VdbeGetOp(v, 0)->opcode==OP_Init ); + sqlite3VdbeJumpHere(v, 0); + for(iDb=0; iDbnDb; iDb++){ + Schema *pSchema; + if( DbMaskTest(pParse->cookieMask, iDb)==0 ) continue; + sqlite3VdbeUsesBtree(v, iDb); + pSchema = db->aDb[iDb].pSchema; + sqlite3VdbeAddOp4Int(v, + OP_Transaction, /* Opcode */ + iDb, /* P1 */ + DbMaskTest(pParse->writeMask,iDb), /* P2 */ + pSchema->schema_cookie, /* P3 */ + pSchema->iGeneration /* P4 */ + ); + if( db->init.busy==0 ) sqlite3VdbeChangeP5(v, 1); + VdbeComment((v, + "usesStmtJournal=%d", pParse->mayAbort && pParse->isMultiWrite)); + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + for(i=0; inVtabLock; i++){ + char *vtab = (char *)sqlite3GetVTable(db, pParse->apVtabLock[i]); + sqlite3VdbeAddOp4(v, OP_VBegin, 0, 0, 0, vtab, P4_VTAB); + } + pParse->nVtabLock = 0; +#endif + + /* Once all the cookies have been verified and transactions opened, + ** obtain the required table-locks. This is a no-op unless the + ** shared-cache feature is enabled. + */ + codeTableLocks(pParse); + + /* Initialize any AUTOINCREMENT data structures required. + */ + sqlite3AutoincrementBegin(pParse); + + /* Code constant expressions that where factored out of inner loops */ + if( pParse->pConstExpr ){ + ExprList *pEL = pParse->pConstExpr; + pParse->okConstFactor = 0; + for(i=0; inExpr; i++){ + sqlite3ExprCode(pParse, pEL->a[i].pExpr, pEL->a[i].u.iConstExprReg); + } + } + + /* Finally, jump back to the beginning of the executable code. */ + sqlite3VdbeGoto(v, 1); + } + } + + + /* Get the VDBE program ready for execution + */ + if( v && pParse->nErr==0 && !db->mallocFailed ){ + assert( pParse->iCacheLevel==0 ); /* Disables and re-enables match */ + /* A minimum of one cursor is required if autoincrement is used + * See ticket [a696379c1f08866] */ + if( pParse->pAinc!=0 && pParse->nTab==0 ) pParse->nTab = 1; + sqlite3VdbeMakeReady(v, pParse); + pParse->rc = SQLITE_DONE; + }else{ + pParse->rc = SQLITE_ERROR; + } +} + +/* +** Run the parser and code generator recursively in order to generate +** code for the SQL statement given onto the end of the pParse context +** currently under construction. When the parser is run recursively +** this way, the final OP_Halt is not appended and other initialization +** and finalization steps are omitted because those are handling by the +** outermost parser. +** +** Not everything is nestable. This facility is designed to permit +** INSERT, UPDATE, and DELETE operations against SQLITE_MASTER. Use +** care if you decide to try to use this routine for some other purposes. +*/ +SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){ + va_list ap; + char *zSql; + char *zErrMsg = 0; + sqlite3 *db = pParse->db; + char saveBuf[PARSE_TAIL_SZ]; + + if( pParse->nErr ) return; + assert( pParse->nested<10 ); /* Nesting should only be of limited depth */ + va_start(ap, zFormat); + zSql = sqlite3VMPrintf(db, zFormat, ap); + va_end(ap); + if( zSql==0 ){ + return; /* A malloc must have failed */ + } + pParse->nested++; + memcpy(saveBuf, PARSE_TAIL(pParse), PARSE_TAIL_SZ); + memset(PARSE_TAIL(pParse), 0, PARSE_TAIL_SZ); + sqlite3RunParser(pParse, zSql, &zErrMsg); + sqlite3DbFree(db, zErrMsg); + sqlite3DbFree(db, zSql); + memcpy(PARSE_TAIL(pParse), saveBuf, PARSE_TAIL_SZ); + pParse->nested--; +} + +#if SQLITE_USER_AUTHENTICATION +/* +** Return TRUE if zTable is the name of the system table that stores the +** list of users and their access credentials. +*/ +SQLITE_PRIVATE int sqlite3UserAuthTable(const char *zTable){ + return sqlite3_stricmp(zTable, "sqlite_user")==0; +} +#endif + +/* +** Locate the in-memory structure that describes a particular database +** table given the name of that table and (optionally) the name of the +** database containing the table. Return NULL if not found. +** +** If zDatabase is 0, all databases are searched for the table and the +** first matching table is returned. (No checking for duplicate table +** names is done.) The search order is TEMP first, then MAIN, then any +** auxiliary databases added using the ATTACH command. +** +** See also sqlite3LocateTable(). +*/ +SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const char *zDatabase){ + Table *p = 0; + int i; + + /* All mutexes are required for schema access. Make sure we hold them. */ + assert( zDatabase!=0 || sqlite3BtreeHoldsAllMutexes(db) ); +#if SQLITE_USER_AUTHENTICATION + /* Only the admin user is allowed to know that the sqlite_user table + ** exists */ + if( db->auth.authLevelnDb; i++){ + int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ + if( zDatabase==0 || sqlite3StrICmp(zDatabase, db->aDb[j].zDbSName)==0 ){ + assert( sqlite3SchemaMutexHeld(db, j, 0) ); + p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName); + if( p ) return p; + } + } + /* Not found. If the name we were looking for was temp.sqlite_master + ** then change the name to sqlite_temp_master and try again. */ + if( sqlite3StrICmp(zName, MASTER_NAME)!=0 ) break; + if( sqlite3_stricmp(zDatabase, db->aDb[1].zDbSName)!=0 ) break; + zName = TEMP_MASTER_NAME; + } + return 0; +} + +/* +** Locate the in-memory structure that describes a particular database +** table given the name of that table and (optionally) the name of the +** database containing the table. Return NULL if not found. Also leave an +** error message in pParse->zErrMsg. +** +** The difference between this routine and sqlite3FindTable() is that this +** routine leaves an error message in pParse->zErrMsg where +** sqlite3FindTable() does not. +*/ +SQLITE_PRIVATE Table *sqlite3LocateTable( + Parse *pParse, /* context in which to report errors */ + u32 flags, /* LOCATE_VIEW or LOCATE_NOERR */ + const char *zName, /* Name of the table we are looking for */ + const char *zDbase /* Name of the database. Might be NULL */ +){ + Table *p; + + /* Read the database schema. If an error occurs, leave an error message + ** and code in pParse and return NULL. */ + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + return 0; + } + + p = sqlite3FindTable(pParse->db, zName, zDbase); + if( p==0 ){ + const char *zMsg = flags & LOCATE_VIEW ? "no such view" : "no such table"; +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( sqlite3FindDbName(pParse->db, zDbase)<1 ){ + /* If zName is the not the name of a table in the schema created using + ** CREATE, then check to see if it is the name of an virtual table that + ** can be an eponymous virtual table. */ + Module *pMod = (Module*)sqlite3HashFind(&pParse->db->aModule, zName); + if( pMod==0 && sqlite3_strnicmp(zName, "pragma_", 7)==0 ){ + pMod = sqlite3PragmaVtabRegister(pParse->db, zName); + } + if( pMod && sqlite3VtabEponymousTableInit(pParse, pMod) ){ + return pMod->pEpoTab; + } + } +#endif + if( (flags & LOCATE_NOERR)==0 ){ + if( zDbase ){ + sqlite3ErrorMsg(pParse, "%s: %s.%s", zMsg, zDbase, zName); + }else{ + sqlite3ErrorMsg(pParse, "%s: %s", zMsg, zName); + } + pParse->checkSchema = 1; + } + } + + return p; +} + +/* +** Locate the table identified by *p. +** +** This is a wrapper around sqlite3LocateTable(). The difference between +** sqlite3LocateTable() and this function is that this function restricts +** the search to schema (p->pSchema) if it is not NULL. p->pSchema may be +** non-NULL if it is part of a view or trigger program definition. See +** sqlite3FixSrcList() for details. +*/ +SQLITE_PRIVATE Table *sqlite3LocateTableItem( + Parse *pParse, + u32 flags, + struct SrcList_item *p +){ + const char *zDb; + assert( p->pSchema==0 || p->zDatabase==0 ); + if( p->pSchema ){ + int iDb = sqlite3SchemaToIndex(pParse->db, p->pSchema); + zDb = pParse->db->aDb[iDb].zDbSName; + }else{ + zDb = p->zDatabase; + } + return sqlite3LocateTable(pParse, flags, p->zName, zDb); +} + +/* +** Locate the in-memory structure that describes +** a particular index given the name of that index +** and the name of the database that contains the index. +** Return NULL if not found. +** +** If zDatabase is 0, all databases are searched for the +** table and the first matching index is returned. (No checking +** for duplicate index names is done.) The search order is +** TEMP first, then MAIN, then any auxiliary databases added +** using the ATTACH command. +*/ +SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3 *db, const char *zName, const char *zDb){ + Index *p = 0; + int i; + /* All mutexes are required for schema access. Make sure we hold them. */ + assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) ); + for(i=OMIT_TEMPDB; inDb; i++){ + int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ + Schema *pSchema = db->aDb[j].pSchema; + assert( pSchema ); + if( zDb && sqlite3StrICmp(zDb, db->aDb[j].zDbSName) ) continue; + assert( sqlite3SchemaMutexHeld(db, j, 0) ); + p = sqlite3HashFind(&pSchema->idxHash, zName); + if( p ) break; + } + return p; +} + +/* +** Reclaim the memory used by an index +*/ +static void freeIndex(sqlite3 *db, Index *p){ +#ifndef SQLITE_OMIT_ANALYZE + sqlite3DeleteIndexSamples(db, p); +#endif + sqlite3ExprDelete(db, p->pPartIdxWhere); + sqlite3ExprListDelete(db, p->aColExpr); + sqlite3DbFree(db, p->zColAff); + if( p->isResized ) sqlite3DbFree(db, (void *)p->azColl); +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + sqlite3_free(p->aiRowEst); +#endif + sqlite3DbFree(db, p); +} + +/* +** For the index called zIdxName which is found in the database iDb, +** unlike that index from its Table then remove the index from +** the index hash table and free all memory structures associated +** with the index. +*/ +SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3 *db, int iDb, const char *zIdxName){ + Index *pIndex; + Hash *pHash; + + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + pHash = &db->aDb[iDb].pSchema->idxHash; + pIndex = sqlite3HashInsert(pHash, zIdxName, 0); + if( ALWAYS(pIndex) ){ + if( pIndex->pTable->pIndex==pIndex ){ + pIndex->pTable->pIndex = pIndex->pNext; + }else{ + Index *p; + /* Justification of ALWAYS(); The index must be on the list of + ** indices. */ + p = pIndex->pTable->pIndex; + while( ALWAYS(p) && p->pNext!=pIndex ){ p = p->pNext; } + if( ALWAYS(p && p->pNext==pIndex) ){ + p->pNext = pIndex->pNext; + } + } + freeIndex(db, pIndex); + } + db->flags |= SQLITE_InternChanges; +} + +/* +** Look through the list of open database files in db->aDb[] and if +** any have been closed, remove them from the list. Reallocate the +** db->aDb[] structure to a smaller size, if possible. +** +** Entry 0 (the "main" database) and entry 1 (the "temp" database) +** are never candidates for being collapsed. +*/ +SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3 *db){ + int i, j; + for(i=j=2; inDb; i++){ + struct Db *pDb = &db->aDb[i]; + if( pDb->pBt==0 ){ + sqlite3DbFree(db, pDb->zDbSName); + pDb->zDbSName = 0; + continue; + } + if( jaDb[j] = db->aDb[i]; + } + j++; + } + db->nDb = j; + if( db->nDb<=2 && db->aDb!=db->aDbStatic ){ + memcpy(db->aDbStatic, db->aDb, 2*sizeof(db->aDb[0])); + sqlite3DbFree(db, db->aDb); + db->aDb = db->aDbStatic; + } +} + +/* +** Reset the schema for the database at index iDb. Also reset the +** TEMP schema. +*/ +SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3 *db, int iDb){ + Db *pDb; + assert( iDbnDb ); + + /* Case 1: Reset the single schema identified by iDb */ + pDb = &db->aDb[iDb]; + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + assert( pDb->pSchema!=0 ); + sqlite3SchemaClear(pDb->pSchema); + + /* If any database other than TEMP is reset, then also reset TEMP + ** since TEMP might be holding triggers that reference tables in the + ** other database. + */ + if( iDb!=1 ){ + pDb = &db->aDb[1]; + assert( pDb->pSchema!=0 ); + sqlite3SchemaClear(pDb->pSchema); + } + return; +} + +/* +** Erase all schema information from all attached databases (including +** "main" and "temp") for a single database connection. +*/ +SQLITE_PRIVATE void sqlite3ResetAllSchemasOfConnection(sqlite3 *db){ + int i; + sqlite3BtreeEnterAll(db); + for(i=0; inDb; i++){ + Db *pDb = &db->aDb[i]; + if( pDb->pSchema ){ + sqlite3SchemaClear(pDb->pSchema); + } + } + db->flags &= ~SQLITE_InternChanges; + sqlite3VtabUnlockList(db); + sqlite3BtreeLeaveAll(db); + sqlite3CollapseDatabaseArray(db); +} + +/* +** This routine is called when a commit occurs. +*/ +SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3 *db){ + db->flags &= ~SQLITE_InternChanges; +} + +/* +** Delete memory allocated for the column names of a table or view (the +** Table.aCol[] array). +*/ +SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3 *db, Table *pTable){ + int i; + Column *pCol; + assert( pTable!=0 ); + if( (pCol = pTable->aCol)!=0 ){ + for(i=0; inCol; i++, pCol++){ + sqlite3DbFree(db, pCol->zName); + sqlite3ExprDelete(db, pCol->pDflt); + sqlite3DbFree(db, pCol->zColl); + } + sqlite3DbFree(db, pTable->aCol); + } +} + +/* +** Remove the memory data structures associated with the given +** Table. No changes are made to disk by this routine. +** +** This routine just deletes the data structure. It does not unlink +** the table data structure from the hash table. But it does destroy +** memory structures of the indices and foreign keys associated with +** the table. +** +** The db parameter is optional. It is needed if the Table object +** contains lookaside memory. (Table objects in the schema do not use +** lookaside memory, but some ephemeral Table objects do.) Or the +** db parameter can be used with db->pnBytesFreed to measure the memory +** used by the Table object. +*/ +static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){ + Index *pIndex, *pNext; + TESTONLY( int nLookaside; ) /* Used to verify lookaside not used for schema */ + + /* Record the number of outstanding lookaside allocations in schema Tables + ** prior to doing any free() operations. Since schema Tables do not use + ** lookaside, this number should not change. */ + TESTONLY( nLookaside = (db && (pTable->tabFlags & TF_Ephemeral)==0) ? + db->lookaside.nOut : 0 ); + + /* Delete all indices associated with this table. */ + for(pIndex = pTable->pIndex; pIndex; pIndex=pNext){ + pNext = pIndex->pNext; + assert( pIndex->pSchema==pTable->pSchema + || (IsVirtual(pTable) && pIndex->idxType!=SQLITE_IDXTYPE_APPDEF) ); + if( (db==0 || db->pnBytesFreed==0) && !IsVirtual(pTable) ){ + char *zName = pIndex->zName; + TESTONLY ( Index *pOld = ) sqlite3HashInsert( + &pIndex->pSchema->idxHash, zName, 0 + ); + assert( db==0 || sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) ); + assert( pOld==pIndex || pOld==0 ); + } + freeIndex(db, pIndex); + } + + /* Delete any foreign keys attached to this table. */ + sqlite3FkDelete(db, pTable); + + /* Delete the Table structure itself. + */ + sqlite3DeleteColumnNames(db, pTable); + sqlite3DbFree(db, pTable->zName); + sqlite3DbFree(db, pTable->zColAff); + sqlite3SelectDelete(db, pTable->pSelect); + sqlite3ExprListDelete(db, pTable->pCheck); +#ifndef SQLITE_OMIT_VIRTUALTABLE + sqlite3VtabClear(db, pTable); +#endif + sqlite3DbFree(db, pTable); + + /* Verify that no lookaside memory was used by schema tables */ + assert( nLookaside==0 || nLookaside==db->lookaside.nOut ); +} +SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){ + /* Do not delete the table until the reference count reaches zero. */ + if( !pTable ) return; + if( ((!db || db->pnBytesFreed==0) && (--pTable->nTabRef)>0) ) return; + deleteTable(db, pTable); +} + + +/* +** Unlink the given table from the hash tables and the delete the +** table structure with all its indices and foreign keys. +*/ +SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3 *db, int iDb, const char *zTabName){ + Table *p; + Db *pDb; + + assert( db!=0 ); + assert( iDb>=0 && iDbnDb ); + assert( zTabName ); + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + testcase( zTabName[0]==0 ); /* Zero-length table names are allowed */ + pDb = &db->aDb[iDb]; + p = sqlite3HashInsert(&pDb->pSchema->tblHash, zTabName, 0); + sqlite3DeleteTable(db, p); + db->flags |= SQLITE_InternChanges; +} + +/* +** Given a token, return a string that consists of the text of that +** token. Space to hold the returned string +** is obtained from sqliteMalloc() and must be freed by the calling +** function. +** +** Any quotation marks (ex: "name", 'name', [name], or `name`) that +** surround the body of the token are removed. +** +** Tokens are often just pointers into the original SQL text and so +** are not \000 terminated and are not persistent. The returned string +** is \000 terminated and is persistent. +*/ +SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3 *db, Token *pName){ + char *zName; + if( pName ){ + zName = sqlite3DbStrNDup(db, (char*)pName->z, pName->n); + sqlite3Dequote(zName); + }else{ + zName = 0; + } + return zName; +} + +/* +** Open the sqlite_master table stored in database number iDb for +** writing. The table is opened using cursor 0. +*/ +SQLITE_PRIVATE void sqlite3OpenMasterTable(Parse *p, int iDb){ + Vdbe *v = sqlite3GetVdbe(p); + sqlite3TableLock(p, iDb, MASTER_ROOT, 1, MASTER_NAME); + sqlite3VdbeAddOp4Int(v, OP_OpenWrite, 0, MASTER_ROOT, iDb, 5); + if( p->nTab==0 ){ + p->nTab = 1; + } +} + +/* +** Parameter zName points to a nul-terminated buffer containing the name +** of a database ("main", "temp" or the name of an attached db). This +** function returns the index of the named database in db->aDb[], or +** -1 if the named db cannot be found. +*/ +SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *db, const char *zName){ + int i = -1; /* Database number */ + if( zName ){ + Db *pDb; + for(i=(db->nDb-1), pDb=&db->aDb[i]; i>=0; i--, pDb--){ + if( 0==sqlite3_stricmp(pDb->zDbSName, zName) ) break; + /* "main" is always an acceptable alias for the primary database + ** even if it has been renamed using SQLITE_DBCONFIG_MAINDBNAME. */ + if( i==0 && 0==sqlite3_stricmp("main", zName) ) break; + } + } + return i; +} + +/* +** The token *pName contains the name of a database (either "main" or +** "temp" or the name of an attached db). This routine returns the +** index of the named database in db->aDb[], or -1 if the named db +** does not exist. +*/ +SQLITE_PRIVATE int sqlite3FindDb(sqlite3 *db, Token *pName){ + int i; /* Database number */ + char *zName; /* Name we are searching for */ + zName = sqlite3NameFromToken(db, pName); + i = sqlite3FindDbName(db, zName); + sqlite3DbFree(db, zName); + return i; +} + +/* The table or view or trigger name is passed to this routine via tokens +** pName1 and pName2. If the table name was fully qualified, for example: +** +** CREATE TABLE xxx.yyy (...); +** +** Then pName1 is set to "xxx" and pName2 "yyy". On the other hand if +** the table name is not fully qualified, i.e.: +** +** CREATE TABLE yyy(...); +** +** Then pName1 is set to "yyy" and pName2 is "". +** +** This routine sets the *ppUnqual pointer to point at the token (pName1 or +** pName2) that stores the unqualified table name. The index of the +** database "xxx" is returned. +*/ +SQLITE_PRIVATE int sqlite3TwoPartName( + Parse *pParse, /* Parsing and code generating context */ + Token *pName1, /* The "xxx" in the name "xxx.yyy" or "xxx" */ + Token *pName2, /* The "yyy" in the name "xxx.yyy" */ + Token **pUnqual /* Write the unqualified object name here */ +){ + int iDb; /* Database holding the object */ + sqlite3 *db = pParse->db; + + assert( pName2!=0 ); + if( pName2->n>0 ){ + if( db->init.busy ) { + sqlite3ErrorMsg(pParse, "corrupt database"); + return -1; + } + *pUnqual = pName2; + iDb = sqlite3FindDb(db, pName1); + if( iDb<0 ){ + sqlite3ErrorMsg(pParse, "unknown database %T", pName1); + return -1; + } + }else{ + assert( db->init.iDb==0 || db->init.busy || (db->flags & SQLITE_Vacuum)!=0); + iDb = db->init.iDb; + *pUnqual = pName1; + } + return iDb; +} + +/* +** This routine is used to check if the UTF-8 string zName is a legal +** unqualified name for a new schema object (table, index, view or +** trigger). All names are legal except those that begin with the string +** "sqlite_" (in upper, lower or mixed case). This portion of the namespace +** is reserved for internal use. +*/ +SQLITE_PRIVATE int sqlite3CheckObjectName(Parse *pParse, const char *zName){ + if( !pParse->db->init.busy && pParse->nested==0 + && (pParse->db->flags & SQLITE_WriteSchema)==0 + && 0==sqlite3StrNICmp(zName, "sqlite_", 7) ){ + sqlite3ErrorMsg(pParse, "object name reserved for internal use: %s", zName); + return SQLITE_ERROR; + } + return SQLITE_OK; +} + +/* +** Return the PRIMARY KEY index of a table +*/ +SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table *pTab){ + Index *p; + for(p=pTab->pIndex; p && !IsPrimaryKeyIndex(p); p=p->pNext){} + return p; +} + +/* +** Return the column of index pIdx that corresponds to table +** column iCol. Return -1 if not found. +*/ +SQLITE_PRIVATE i16 sqlite3ColumnOfIndex(Index *pIdx, i16 iCol){ + int i; + for(i=0; inColumn; i++){ + if( iCol==pIdx->aiColumn[i] ) return i; + } + return -1; +} + +/* +** Begin constructing a new table representation in memory. This is +** the first of several action routines that get called in response +** to a CREATE TABLE statement. In particular, this routine is called +** after seeing tokens "CREATE" and "TABLE" and the table name. The isTemp +** flag is true if the table should be stored in the auxiliary database +** file instead of in the main database file. This is normally the case +** when the "TEMP" or "TEMPORARY" keyword occurs in between +** CREATE and TABLE. +** +** The new table record is initialized and put in pParse->pNewTable. +** As more of the CREATE TABLE statement is parsed, additional action +** routines will be called to add more information to this record. +** At the end of the CREATE TABLE statement, the sqlite3EndTable() routine +** is called to complete the construction of the new table record. +*/ +SQLITE_PRIVATE void sqlite3StartTable( + Parse *pParse, /* Parser context */ + Token *pName1, /* First part of the name of the table or view */ + Token *pName2, /* Second part of the name of the table or view */ + int isTemp, /* True if this is a TEMP table */ + int isView, /* True if this is a VIEW */ + int isVirtual, /* True if this is a VIRTUAL table */ + int noErr /* Do nothing if table already exists */ +){ + Table *pTable; + char *zName = 0; /* The name of the new table */ + sqlite3 *db = pParse->db; + Vdbe *v; + int iDb; /* Database number to create the table in */ + Token *pName; /* Unqualified name of the table to create */ + + if( db->init.busy && db->init.newTnum==1 ){ + /* Special case: Parsing the sqlite_master or sqlite_temp_master schema */ + iDb = db->init.iDb; + zName = sqlite3DbStrDup(db, SCHEMA_TABLE(iDb)); + pName = pName1; + }else{ + /* The common case */ + iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName); + if( iDb<0 ) return; + if( !OMIT_TEMPDB && isTemp && pName2->n>0 && iDb!=1 ){ + /* If creating a temp table, the name may not be qualified. Unless + ** the database name is "temp" anyway. */ + sqlite3ErrorMsg(pParse, "temporary table name must be unqualified"); + return; + } + if( !OMIT_TEMPDB && isTemp ) iDb = 1; + zName = sqlite3NameFromToken(db, pName); + } + pParse->sNameToken = *pName; + if( zName==0 ) return; + if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ + goto begin_table_error; + } + if( db->init.iDb==1 ) isTemp = 1; +#ifndef SQLITE_OMIT_AUTHORIZATION + assert( isTemp==0 || isTemp==1 ); + assert( isView==0 || isView==1 ); + { + static const u8 aCode[] = { + SQLITE_CREATE_TABLE, + SQLITE_CREATE_TEMP_TABLE, + SQLITE_CREATE_VIEW, + SQLITE_CREATE_TEMP_VIEW + }; + char *zDb = db->aDb[iDb].zDbSName; + if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(isTemp), 0, zDb) ){ + goto begin_table_error; + } + if( !isVirtual && sqlite3AuthCheck(pParse, (int)aCode[isTemp+2*isView], + zName, 0, zDb) ){ + goto begin_table_error; + } + } +#endif + + /* Make sure the new table name does not collide with an existing + ** index or table name in the same database. Issue an error message if + ** it does. The exception is if the statement being parsed was passed + ** to an sqlite3_declare_vtab() call. In that case only the column names + ** and types will be used, so there is no need to test for namespace + ** collisions. + */ + if( !IN_DECLARE_VTAB ){ + char *zDb = db->aDb[iDb].zDbSName; + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + goto begin_table_error; + } + pTable = sqlite3FindTable(db, zName, zDb); + if( pTable ){ + if( !noErr ){ + sqlite3ErrorMsg(pParse, "table %T already exists", pName); + }else{ + assert( !db->init.busy || CORRUPT_DB ); + sqlite3CodeVerifySchema(pParse, iDb); + } + goto begin_table_error; + } + if( sqlite3FindIndex(db, zName, zDb)!=0 ){ + sqlite3ErrorMsg(pParse, "there is already an index named %s", zName); + goto begin_table_error; + } + } + + pTable = sqlite3DbMallocZero(db, sizeof(Table)); + if( pTable==0 ){ + assert( db->mallocFailed ); + pParse->rc = SQLITE_NOMEM_BKPT; + pParse->nErr++; + goto begin_table_error; + } + pTable->zName = zName; + pTable->iPKey = -1; + pTable->pSchema = db->aDb[iDb].pSchema; + pTable->nTabRef = 1; + pTable->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); + assert( pParse->pNewTable==0 ); + pParse->pNewTable = pTable; + + /* If this is the magic sqlite_sequence table used by autoincrement, + ** then record a pointer to this table in the main database structure + ** so that INSERT can find the table easily. + */ +#ifndef SQLITE_OMIT_AUTOINCREMENT + if( !pParse->nested && strcmp(zName, "sqlite_sequence")==0 ){ + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + pTable->pSchema->pSeqTab = pTable; + } +#endif + + /* Begin generating the code that will insert the table record into + ** the SQLITE_MASTER table. Note in particular that we must go ahead + ** and allocate the record number for the table entry now. Before any + ** PRIMARY KEY or UNIQUE keywords are parsed. Those keywords will cause + ** indices to be created and the table record must come before the + ** indices. Hence, the record number for the table must be allocated + ** now. + */ + if( !db->init.busy && (v = sqlite3GetVdbe(pParse))!=0 ){ + int addr1; + int fileFormat; + int reg1, reg2, reg3; + /* nullRow[] is an OP_Record encoding of a row containing 5 NULLs */ + static const char nullRow[] = { 6, 0, 0, 0, 0, 0 }; + sqlite3BeginWriteOperation(pParse, 1, iDb); + +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( isVirtual ){ + sqlite3VdbeAddOp0(v, OP_VBegin); + } +#endif + + /* If the file format and encoding in the database have not been set, + ** set them now. + */ + reg1 = pParse->regRowid = ++pParse->nMem; + reg2 = pParse->regRoot = ++pParse->nMem; + reg3 = ++pParse->nMem; + sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, reg3, BTREE_FILE_FORMAT); + sqlite3VdbeUsesBtree(v, iDb); + addr1 = sqlite3VdbeAddOp1(v, OP_If, reg3); VdbeCoverage(v); + fileFormat = (db->flags & SQLITE_LegacyFileFmt)!=0 ? + 1 : SQLITE_MAX_FILE_FORMAT; + sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, fileFormat); + sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_TEXT_ENCODING, ENC(db)); + sqlite3VdbeJumpHere(v, addr1); + + /* This just creates a place-holder record in the sqlite_master table. + ** The record created does not contain anything yet. It will be replaced + ** by the real entry in code generated at sqlite3EndTable(). + ** + ** The rowid for the new entry is left in register pParse->regRowid. + ** The root page number of the new table is left in reg pParse->regRoot. + ** The rowid and root page number values are needed by the code that + ** sqlite3EndTable will generate. + */ +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) + if( isView || isVirtual ){ + sqlite3VdbeAddOp2(v, OP_Integer, 0, reg2); + }else +#endif + { + pParse->addrCrTab = sqlite3VdbeAddOp2(v, OP_CreateTable, iDb, reg2); + } + sqlite3OpenMasterTable(pParse, iDb); + sqlite3VdbeAddOp2(v, OP_NewRowid, 0, reg1); + sqlite3VdbeAddOp4(v, OP_Blob, 6, reg3, 0, nullRow, P4_STATIC); + sqlite3VdbeAddOp3(v, OP_Insert, 0, reg3, reg1); + sqlite3VdbeChangeP5(v, OPFLAG_APPEND); + sqlite3VdbeAddOp0(v, OP_Close); + } + + /* Normal (non-error) return. */ + return; + + /* If an error occurs, we jump here */ +begin_table_error: + sqlite3DbFree(db, zName); + return; +} + +/* Set properties of a table column based on the (magical) +** name of the column. +*/ +#if SQLITE_ENABLE_HIDDEN_COLUMNS +SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table *pTab, Column *pCol){ + if( sqlite3_strnicmp(pCol->zName, "__hidden__", 10)==0 ){ + pCol->colFlags |= COLFLAG_HIDDEN; + }else if( pTab && pCol!=pTab->aCol && (pCol[-1].colFlags & COLFLAG_HIDDEN) ){ + pTab->tabFlags |= TF_OOOHidden; + } +} +#endif + + +/* +** Add a new column to the table currently being constructed. +** +** The parser calls this routine once for each column declaration +** in a CREATE TABLE statement. sqlite3StartTable() gets called +** first to get things going. Then this routine is called for each +** column. +*/ +SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName, Token *pType){ + Table *p; + int i; + char *z; + char *zType; + Column *pCol; + sqlite3 *db = pParse->db; + if( (p = pParse->pNewTable)==0 ) return; +#if SQLITE_MAX_COLUMN + if( p->nCol+1>db->aLimit[SQLITE_LIMIT_COLUMN] ){ + sqlite3ErrorMsg(pParse, "too many columns on %s", p->zName); + return; + } +#endif + z = sqlite3DbMallocRaw(db, pName->n + pType->n + 2); + if( z==0 ) return; + memcpy(z, pName->z, pName->n); + z[pName->n] = 0; + sqlite3Dequote(z); + for(i=0; inCol; i++){ + if( sqlite3_stricmp(z, p->aCol[i].zName)==0 ){ + sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); + sqlite3DbFree(db, z); + return; + } + } + if( (p->nCol & 0x7)==0 ){ + Column *aNew; + aNew = sqlite3DbRealloc(db,p->aCol,(p->nCol+8)*sizeof(p->aCol[0])); + if( aNew==0 ){ + sqlite3DbFree(db, z); + return; + } + p->aCol = aNew; + } + pCol = &p->aCol[p->nCol]; + memset(pCol, 0, sizeof(p->aCol[0])); + pCol->zName = z; + sqlite3ColumnPropertiesFromName(p, pCol); + + if( pType->n==0 ){ + /* If there is no type specified, columns have the default affinity + ** 'BLOB'. */ + pCol->affinity = SQLITE_AFF_BLOB; + pCol->szEst = 1; + }else{ + zType = z + sqlite3Strlen30(z) + 1; + memcpy(zType, pType->z, pType->n); + zType[pType->n] = 0; + sqlite3Dequote(zType); + pCol->affinity = sqlite3AffinityType(zType, &pCol->szEst); + pCol->colFlags |= COLFLAG_HASTYPE; + } + p->nCol++; + pParse->constraintName.n = 0; +} + +/* +** This routine is called by the parser while in the middle of +** parsing a CREATE TABLE statement. A "NOT NULL" constraint has +** been seen on a column. This routine sets the notNull flag on +** the column currently under construction. +*/ +SQLITE_PRIVATE void sqlite3AddNotNull(Parse *pParse, int onError){ + Table *p; + p = pParse->pNewTable; + if( p==0 || NEVER(p->nCol<1) ) return; + p->aCol[p->nCol-1].notNull = (u8)onError; +} + +/* +** Scan the column type name zType (length nType) and return the +** associated affinity type. +** +** This routine does a case-independent search of zType for the +** substrings in the following table. If one of the substrings is +** found, the corresponding affinity is returned. If zType contains +** more than one of the substrings, entries toward the top of +** the table take priority. For example, if zType is 'BLOBINT', +** SQLITE_AFF_INTEGER is returned. +** +** Substring | Affinity +** -------------------------------- +** 'INT' | SQLITE_AFF_INTEGER +** 'CHAR' | SQLITE_AFF_TEXT +** 'CLOB' | SQLITE_AFF_TEXT +** 'TEXT' | SQLITE_AFF_TEXT +** 'BLOB' | SQLITE_AFF_BLOB +** 'REAL' | SQLITE_AFF_REAL +** 'FLOA' | SQLITE_AFF_REAL +** 'DOUB' | SQLITE_AFF_REAL +** +** If none of the substrings in the above table are found, +** SQLITE_AFF_NUMERIC is returned. +*/ +SQLITE_PRIVATE char sqlite3AffinityType(const char *zIn, u8 *pszEst){ + u32 h = 0; + char aff = SQLITE_AFF_NUMERIC; + const char *zChar = 0; + + assert( zIn!=0 ); + while( zIn[0] ){ + h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff]; + zIn++; + if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */ + aff = SQLITE_AFF_TEXT; + zChar = zIn; + }else if( h==(('c'<<24)+('l'<<16)+('o'<<8)+'b') ){ /* CLOB */ + aff = SQLITE_AFF_TEXT; + }else if( h==(('t'<<24)+('e'<<16)+('x'<<8)+'t') ){ /* TEXT */ + aff = SQLITE_AFF_TEXT; + }else if( h==(('b'<<24)+('l'<<16)+('o'<<8)+'b') /* BLOB */ + && (aff==SQLITE_AFF_NUMERIC || aff==SQLITE_AFF_REAL) ){ + aff = SQLITE_AFF_BLOB; + if( zIn[0]=='(' ) zChar = zIn; +#ifndef SQLITE_OMIT_FLOATING_POINT + }else if( h==(('r'<<24)+('e'<<16)+('a'<<8)+'l') /* REAL */ + && aff==SQLITE_AFF_NUMERIC ){ + aff = SQLITE_AFF_REAL; + }else if( h==(('f'<<24)+('l'<<16)+('o'<<8)+'a') /* FLOA */ + && aff==SQLITE_AFF_NUMERIC ){ + aff = SQLITE_AFF_REAL; + }else if( h==(('d'<<24)+('o'<<16)+('u'<<8)+'b') /* DOUB */ + && aff==SQLITE_AFF_NUMERIC ){ + aff = SQLITE_AFF_REAL; +#endif + }else if( (h&0x00FFFFFF)==(('i'<<16)+('n'<<8)+'t') ){ /* INT */ + aff = SQLITE_AFF_INTEGER; + break; + } + } + + /* If pszEst is not NULL, store an estimate of the field size. The + ** estimate is scaled so that the size of an integer is 1. */ + if( pszEst ){ + *pszEst = 1; /* default size is approx 4 bytes */ + if( aff255 ) v = 255; + *pszEst = v; /* BLOB(k), VARCHAR(k), CHAR(k) -> r=(k/4+1) */ + break; + } + zChar++; + } + }else{ + *pszEst = 5; /* BLOB, TEXT, CLOB -> r=5 (approx 20 bytes)*/ + } + } + } + return aff; +} + +/* +** The expression is the default value for the most recently added column +** of the table currently under construction. +** +** Default value expressions must be constant. Raise an exception if this +** is not the case. +** +** This routine is called by the parser while in the middle of +** parsing a CREATE TABLE statement. +*/ +SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse *pParse, ExprSpan *pSpan){ + Table *p; + Column *pCol; + sqlite3 *db = pParse->db; + p = pParse->pNewTable; + if( p!=0 ){ + pCol = &(p->aCol[p->nCol-1]); + if( !sqlite3ExprIsConstantOrFunction(pSpan->pExpr, db->init.busy) ){ + sqlite3ErrorMsg(pParse, "default value of column [%s] is not constant", + pCol->zName); + }else{ + /* A copy of pExpr is used instead of the original, as pExpr contains + ** tokens that point to volatile memory. The 'span' of the expression + ** is required by pragma table_info. + */ + Expr x; + sqlite3ExprDelete(db, pCol->pDflt); + memset(&x, 0, sizeof(x)); + x.op = TK_SPAN; + x.u.zToken = sqlite3DbStrNDup(db, (char*)pSpan->zStart, + (int)(pSpan->zEnd - pSpan->zStart)); + x.pLeft = pSpan->pExpr; + x.flags = EP_Skip; + pCol->pDflt = sqlite3ExprDup(db, &x, EXPRDUP_REDUCE); + sqlite3DbFree(db, x.u.zToken); + } + } + sqlite3ExprDelete(db, pSpan->pExpr); +} + +/* +** Backwards Compatibility Hack: +** +** Historical versions of SQLite accepted strings as column names in +** indexes and PRIMARY KEY constraints and in UNIQUE constraints. Example: +** +** CREATE TABLE xyz(a,b,c,d,e,PRIMARY KEY('a'),UNIQUE('b','c' COLLATE trim) +** CREATE INDEX abc ON xyz('c','d' DESC,'e' COLLATE nocase DESC); +** +** This is goofy. But to preserve backwards compatibility we continue to +** accept it. This routine does the necessary conversion. It converts +** the expression given in its argument from a TK_STRING into a TK_ID +** if the expression is just a TK_STRING with an optional COLLATE clause. +** If the epxression is anything other than TK_STRING, the expression is +** unchanged. +*/ +static void sqlite3StringToId(Expr *p){ + if( p->op==TK_STRING ){ + p->op = TK_ID; + }else if( p->op==TK_COLLATE && p->pLeft->op==TK_STRING ){ + p->pLeft->op = TK_ID; + } +} + +/* +** Designate the PRIMARY KEY for the table. pList is a list of names +** of columns that form the primary key. If pList is NULL, then the +** most recently added column of the table is the primary key. +** +** A table can have at most one primary key. If the table already has +** a primary key (and this is the second primary key) then create an +** error. +** +** If the PRIMARY KEY is on a single column whose datatype is INTEGER, +** then we will try to use that column as the rowid. Set the Table.iPKey +** field of the table under construction to be the index of the +** INTEGER PRIMARY KEY column. Table.iPKey is set to -1 if there is +** no INTEGER PRIMARY KEY. +** +** If the key is not an INTEGER PRIMARY KEY, then create a unique +** index for the key. No index is created for INTEGER PRIMARY KEYs. +*/ +SQLITE_PRIVATE void sqlite3AddPrimaryKey( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* List of field names to be indexed */ + int onError, /* What to do with a uniqueness conflict */ + int autoInc, /* True if the AUTOINCREMENT keyword is present */ + int sortOrder /* SQLITE_SO_ASC or SQLITE_SO_DESC */ +){ + Table *pTab = pParse->pNewTable; + Column *pCol = 0; + int iCol = -1, i; + int nTerm; + if( pTab==0 ) goto primary_key_exit; + if( pTab->tabFlags & TF_HasPrimaryKey ){ + sqlite3ErrorMsg(pParse, + "table \"%s\" has more than one primary key", pTab->zName); + goto primary_key_exit; + } + pTab->tabFlags |= TF_HasPrimaryKey; + if( pList==0 ){ + iCol = pTab->nCol - 1; + pCol = &pTab->aCol[iCol]; + pCol->colFlags |= COLFLAG_PRIMKEY; + nTerm = 1; + }else{ + nTerm = pList->nExpr; + for(i=0; ia[i].pExpr); + assert( pCExpr!=0 ); + sqlite3StringToId(pCExpr); + if( pCExpr->op==TK_ID ){ + const char *zCName = pCExpr->u.zToken; + for(iCol=0; iColnCol; iCol++){ + if( sqlite3StrICmp(zCName, pTab->aCol[iCol].zName)==0 ){ + pCol = &pTab->aCol[iCol]; + pCol->colFlags |= COLFLAG_PRIMKEY; + break; + } + } + } + } + } + if( nTerm==1 + && pCol + && sqlite3StrICmp(sqlite3ColumnType(pCol,""), "INTEGER")==0 + && sortOrder!=SQLITE_SO_DESC + ){ + pTab->iPKey = iCol; + pTab->keyConf = (u8)onError; + assert( autoInc==0 || autoInc==1 ); + pTab->tabFlags |= autoInc*TF_Autoincrement; + if( pList ) pParse->iPkSortOrder = pList->a[0].sortOrder; + }else if( autoInc ){ +#ifndef SQLITE_OMIT_AUTOINCREMENT + sqlite3ErrorMsg(pParse, "AUTOINCREMENT is only allowed on an " + "INTEGER PRIMARY KEY"); +#endif + }else{ + sqlite3CreateIndex(pParse, 0, 0, 0, pList, onError, 0, + 0, sortOrder, 0, SQLITE_IDXTYPE_PRIMARYKEY); + pList = 0; + } + +primary_key_exit: + sqlite3ExprListDelete(pParse->db, pList); + return; +} + +/* +** Add a new CHECK constraint to the table currently under construction. +*/ +SQLITE_PRIVATE void sqlite3AddCheckConstraint( + Parse *pParse, /* Parsing context */ + Expr *pCheckExpr /* The check expression */ +){ +#ifndef SQLITE_OMIT_CHECK + Table *pTab = pParse->pNewTable; + sqlite3 *db = pParse->db; + if( pTab && !IN_DECLARE_VTAB + && !sqlite3BtreeIsReadonly(db->aDb[db->init.iDb].pBt) + ){ + pTab->pCheck = sqlite3ExprListAppend(pParse, pTab->pCheck, pCheckExpr); + if( pParse->constraintName.n ){ + sqlite3ExprListSetName(pParse, pTab->pCheck, &pParse->constraintName, 1); + } + }else +#endif + { + sqlite3ExprDelete(pParse->db, pCheckExpr); + } +} + +/* +** Set the collation function of the most recently parsed table column +** to the CollSeq given. +*/ +SQLITE_PRIVATE void sqlite3AddCollateType(Parse *pParse, Token *pToken){ + Table *p; + int i; + char *zColl; /* Dequoted name of collation sequence */ + sqlite3 *db; + + if( (p = pParse->pNewTable)==0 ) return; + i = p->nCol-1; + db = pParse->db; + zColl = sqlite3NameFromToken(db, pToken); + if( !zColl ) return; + + if( sqlite3LocateCollSeq(pParse, zColl) ){ + Index *pIdx; + sqlite3DbFree(db, p->aCol[i].zColl); + p->aCol[i].zColl = zColl; + + /* If the column is declared as " PRIMARY KEY COLLATE ", + ** then an index may have been created on this column before the + ** collation type was added. Correct this if it is the case. + */ + for(pIdx=p->pIndex; pIdx; pIdx=pIdx->pNext){ + assert( pIdx->nKeyCol==1 ); + if( pIdx->aiColumn[0]==i ){ + pIdx->azColl[0] = p->aCol[i].zColl; + } + } + }else{ + sqlite3DbFree(db, zColl); + } +} + +/* +** This function returns the collation sequence for database native text +** encoding identified by the string zName, length nName. +** +** If the requested collation sequence is not available, or not available +** in the database native encoding, the collation factory is invoked to +** request it. If the collation factory does not supply such a sequence, +** and the sequence is available in another text encoding, then that is +** returned instead. +** +** If no versions of the requested collations sequence are available, or +** another error occurs, NULL is returned and an error message written into +** pParse. +** +** This routine is a wrapper around sqlite3FindCollSeq(). This routine +** invokes the collation factory if the named collation cannot be found +** and generates an error message. +** +** See also: sqlite3FindCollSeq(), sqlite3GetCollSeq() +*/ +SQLITE_PRIVATE CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char *zName){ + sqlite3 *db = pParse->db; + u8 enc = ENC(db); + u8 initbusy = db->init.busy; + CollSeq *pColl; + + pColl = sqlite3FindCollSeq(db, enc, zName, initbusy); + if( !initbusy && (!pColl || !pColl->xCmp) ){ + pColl = sqlite3GetCollSeq(pParse, enc, pColl, zName); + } + + return pColl; +} + + +/* +** Generate code that will increment the schema cookie. +** +** The schema cookie is used to determine when the schema for the +** database changes. After each schema change, the cookie value +** changes. When a process first reads the schema it records the +** cookie. Thereafter, whenever it goes to access the database, +** it checks the cookie to make sure the schema has not changed +** since it was last read. +** +** This plan is not completely bullet-proof. It is possible for +** the schema to change multiple times and for the cookie to be +** set back to prior value. But schema changes are infrequent +** and the probability of hitting the same cookie value is only +** 1 chance in 2^32. So we're safe enough. +** +** IMPLEMENTATION-OF: R-34230-56049 SQLite automatically increments +** the schema-version whenever the schema changes. +*/ +SQLITE_PRIVATE void sqlite3ChangeCookie(Parse *pParse, int iDb){ + sqlite3 *db = pParse->db; + Vdbe *v = pParse->pVdbe; + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_SCHEMA_VERSION, + db->aDb[iDb].pSchema->schema_cookie+1); +} + +/* +** Measure the number of characters needed to output the given +** identifier. The number returned includes any quotes used +** but does not include the null terminator. +** +** The estimate is conservative. It might be larger that what is +** really needed. +*/ +static int identLength(const char *z){ + int n; + for(n=0; *z; n++, z++){ + if( *z=='"' ){ n++; } + } + return n + 2; +} + +/* +** The first parameter is a pointer to an output buffer. The second +** parameter is a pointer to an integer that contains the offset at +** which to write into the output buffer. This function copies the +** nul-terminated string pointed to by the third parameter, zSignedIdent, +** to the specified offset in the buffer and updates *pIdx to refer +** to the first byte after the last byte written before returning. +** +** If the string zSignedIdent consists entirely of alpha-numeric +** characters, does not begin with a digit and is not an SQL keyword, +** then it is copied to the output buffer exactly as it is. Otherwise, +** it is quoted using double-quotes. +*/ +static void identPut(char *z, int *pIdx, char *zSignedIdent){ + unsigned char *zIdent = (unsigned char*)zSignedIdent; + int i, j, needQuote; + i = *pIdx; + + for(j=0; zIdent[j]; j++){ + if( !sqlite3Isalnum(zIdent[j]) && zIdent[j]!='_' ) break; + } + needQuote = sqlite3Isdigit(zIdent[0]) + || sqlite3KeywordCode(zIdent, j)!=TK_ID + || zIdent[j]!=0 + || j==0; + + if( needQuote ) z[i++] = '"'; + for(j=0; zIdent[j]; j++){ + z[i++] = zIdent[j]; + if( zIdent[j]=='"' ) z[i++] = '"'; + } + if( needQuote ) z[i++] = '"'; + z[i] = 0; + *pIdx = i; +} + +/* +** Generate a CREATE TABLE statement appropriate for the given +** table. Memory to hold the text of the statement is obtained +** from sqliteMalloc() and must be freed by the calling function. +*/ +static char *createTableStmt(sqlite3 *db, Table *p){ + int i, k, n; + char *zStmt; + char *zSep, *zSep2, *zEnd; + Column *pCol; + n = 0; + for(pCol = p->aCol, i=0; inCol; i++, pCol++){ + n += identLength(pCol->zName) + 5; + } + n += identLength(p->zName); + if( n<50 ){ + zSep = ""; + zSep2 = ","; + zEnd = ")"; + }else{ + zSep = "\n "; + zSep2 = ",\n "; + zEnd = "\n)"; + } + n += 35 + 6*p->nCol; + zStmt = sqlite3DbMallocRaw(0, n); + if( zStmt==0 ){ + sqlite3OomFault(db); + return 0; + } + sqlite3_snprintf(n, zStmt, "CREATE TABLE "); + k = sqlite3Strlen30(zStmt); + identPut(zStmt, &k, p->zName); + zStmt[k++] = '('; + for(pCol=p->aCol, i=0; inCol; i++, pCol++){ + static const char * const azType[] = { + /* SQLITE_AFF_BLOB */ "", + /* SQLITE_AFF_TEXT */ " TEXT", + /* SQLITE_AFF_NUMERIC */ " NUM", + /* SQLITE_AFF_INTEGER */ " INT", + /* SQLITE_AFF_REAL */ " REAL" + }; + int len; + const char *zType; + + sqlite3_snprintf(n-k, &zStmt[k], zSep); + k += sqlite3Strlen30(&zStmt[k]); + zSep = zSep2; + identPut(zStmt, &k, pCol->zName); + assert( pCol->affinity-SQLITE_AFF_BLOB >= 0 ); + assert( pCol->affinity-SQLITE_AFF_BLOB < ArraySize(azType) ); + testcase( pCol->affinity==SQLITE_AFF_BLOB ); + testcase( pCol->affinity==SQLITE_AFF_TEXT ); + testcase( pCol->affinity==SQLITE_AFF_NUMERIC ); + testcase( pCol->affinity==SQLITE_AFF_INTEGER ); + testcase( pCol->affinity==SQLITE_AFF_REAL ); + + zType = azType[pCol->affinity - SQLITE_AFF_BLOB]; + len = sqlite3Strlen30(zType); + assert( pCol->affinity==SQLITE_AFF_BLOB + || pCol->affinity==sqlite3AffinityType(zType, 0) ); + memcpy(&zStmt[k], zType, len); + k += len; + assert( k<=n ); + } + sqlite3_snprintf(n-k, &zStmt[k], "%s", zEnd); + return zStmt; +} + +/* +** Resize an Index object to hold N columns total. Return SQLITE_OK +** on success and SQLITE_NOMEM on an OOM error. +*/ +static int resizeIndexObject(sqlite3 *db, Index *pIdx, int N){ + char *zExtra; + int nByte; + if( pIdx->nColumn>=N ) return SQLITE_OK; + assert( pIdx->isResized==0 ); + nByte = (sizeof(char*) + sizeof(i16) + 1)*N; + zExtra = sqlite3DbMallocZero(db, nByte); + if( zExtra==0 ) return SQLITE_NOMEM_BKPT; + memcpy(zExtra, pIdx->azColl, sizeof(char*)*pIdx->nColumn); + pIdx->azColl = (const char**)zExtra; + zExtra += sizeof(char*)*N; + memcpy(zExtra, pIdx->aiColumn, sizeof(i16)*pIdx->nColumn); + pIdx->aiColumn = (i16*)zExtra; + zExtra += sizeof(i16)*N; + memcpy(zExtra, pIdx->aSortOrder, pIdx->nColumn); + pIdx->aSortOrder = (u8*)zExtra; + pIdx->nColumn = N; + pIdx->isResized = 1; + return SQLITE_OK; +} + +/* +** Estimate the total row width for a table. +*/ +static void estimateTableWidth(Table *pTab){ + unsigned wTable = 0; + const Column *pTabCol; + int i; + for(i=pTab->nCol, pTabCol=pTab->aCol; i>0; i--, pTabCol++){ + wTable += pTabCol->szEst; + } + if( pTab->iPKey<0 ) wTable++; + pTab->szTabRow = sqlite3LogEst(wTable*4); +} + +/* +** Estimate the average size of a row for an index. +*/ +static void estimateIndexWidth(Index *pIdx){ + unsigned wIndex = 0; + int i; + const Column *aCol = pIdx->pTable->aCol; + for(i=0; inColumn; i++){ + i16 x = pIdx->aiColumn[i]; + assert( xpTable->nCol ); + wIndex += x<0 ? 1 : aCol[pIdx->aiColumn[i]].szEst; + } + pIdx->szIdxRow = sqlite3LogEst(wIndex*4); +} + +/* Return true if value x is found any of the first nCol entries of aiCol[] +*/ +static int hasColumn(const i16 *aiCol, int nCol, int x){ + while( nCol-- > 0 ) if( x==*(aiCol++) ) return 1; + return 0; +} + +/* +** This routine runs at the end of parsing a CREATE TABLE statement that +** has a WITHOUT ROWID clause. The job of this routine is to convert both +** internal schema data structures and the generated VDBE code so that they +** are appropriate for a WITHOUT ROWID table instead of a rowid table. +** Changes include: +** +** (1) Set all columns of the PRIMARY KEY schema object to be NOT NULL. +** (2) Convert the OP_CreateTable into an OP_CreateIndex. There is +** no rowid btree for a WITHOUT ROWID. Instead, the canonical +** data storage is a covering index btree. +** (3) Bypass the creation of the sqlite_master table entry +** for the PRIMARY KEY as the primary key index is now +** identified by the sqlite_master table entry of the table itself. +** (4) Set the Index.tnum of the PRIMARY KEY Index object in the +** schema to the rootpage from the main table. +** (5) Add all table columns to the PRIMARY KEY Index object +** so that the PRIMARY KEY is a covering index. The surplus +** columns are part of KeyInfo.nXField and are not used for +** sorting or lookup or uniqueness checks. +** (6) Replace the rowid tail on all automatically generated UNIQUE +** indices with the PRIMARY KEY columns. +** +** For virtual tables, only (1) is performed. +*/ +static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ + Index *pIdx; + Index *pPk; + int nPk; + int i, j; + sqlite3 *db = pParse->db; + Vdbe *v = pParse->pVdbe; + + /* Mark every PRIMARY KEY column as NOT NULL (except for imposter tables) + */ + if( !db->init.imposterTable ){ + for(i=0; inCol; i++){ + if( (pTab->aCol[i].colFlags & COLFLAG_PRIMKEY)!=0 ){ + pTab->aCol[i].notNull = OE_Abort; + } + } + } + + /* The remaining transformations only apply to b-tree tables, not to + ** virtual tables */ + if( IN_DECLARE_VTAB ) return; + + /* Convert the OP_CreateTable opcode that would normally create the + ** root-page for the table into an OP_CreateIndex opcode. The index + ** created will become the PRIMARY KEY index. + */ + if( pParse->addrCrTab ){ + assert( v ); + sqlite3VdbeChangeOpcode(v, pParse->addrCrTab, OP_CreateIndex); + } + + /* Locate the PRIMARY KEY index. Or, if this table was originally + ** an INTEGER PRIMARY KEY table, create a new PRIMARY KEY index. + */ + if( pTab->iPKey>=0 ){ + ExprList *pList; + Token ipkToken; + sqlite3TokenInit(&ipkToken, pTab->aCol[pTab->iPKey].zName); + pList = sqlite3ExprListAppend(pParse, 0, + sqlite3ExprAlloc(db, TK_ID, &ipkToken, 0)); + if( pList==0 ) return; + pList->a[0].sortOrder = pParse->iPkSortOrder; + assert( pParse->pNewTable==pTab ); + sqlite3CreateIndex(pParse, 0, 0, 0, pList, pTab->keyConf, 0, 0, 0, 0, + SQLITE_IDXTYPE_PRIMARYKEY); + if( db->mallocFailed ) return; + pPk = sqlite3PrimaryKeyIndex(pTab); + pTab->iPKey = -1; + }else{ + pPk = sqlite3PrimaryKeyIndex(pTab); + + /* Bypass the creation of the PRIMARY KEY btree and the sqlite_master + ** table entry. This is only required if currently generating VDBE + ** code for a CREATE TABLE (not when parsing one as part of reading + ** a database schema). */ + if( v ){ + assert( db->init.busy==0 ); + sqlite3VdbeChangeOpcode(v, pPk->tnum, OP_Goto); + } + + /* + ** Remove all redundant columns from the PRIMARY KEY. For example, change + ** "PRIMARY KEY(a,b,a,b,c,b,c,d)" into just "PRIMARY KEY(a,b,c,d)". Later + ** code assumes the PRIMARY KEY contains no repeated columns. + */ + for(i=j=1; inKeyCol; i++){ + if( hasColumn(pPk->aiColumn, j, pPk->aiColumn[i]) ){ + pPk->nColumn--; + }else{ + pPk->aiColumn[j++] = pPk->aiColumn[i]; + } + } + pPk->nKeyCol = j; + } + assert( pPk!=0 ); + pPk->isCovering = 1; + if( !db->init.imposterTable ) pPk->uniqNotNull = 1; + nPk = pPk->nKeyCol; + + /* The root page of the PRIMARY KEY is the table root page */ + pPk->tnum = pTab->tnum; + + /* Update the in-memory representation of all UNIQUE indices by converting + ** the final rowid column into one or more columns of the PRIMARY KEY. + */ + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + int n; + if( IsPrimaryKeyIndex(pIdx) ) continue; + for(i=n=0; iaiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) ) n++; + } + if( n==0 ){ + /* This index is a superset of the primary key */ + pIdx->nColumn = pIdx->nKeyCol; + continue; + } + if( resizeIndexObject(db, pIdx, pIdx->nKeyCol+n) ) return; + for(i=0, j=pIdx->nKeyCol; iaiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) ){ + pIdx->aiColumn[j] = pPk->aiColumn[i]; + pIdx->azColl[j] = pPk->azColl[i]; + j++; + } + } + assert( pIdx->nColumn>=pIdx->nKeyCol+n ); + assert( pIdx->nColumn>=j ); + } + + /* Add all table columns to the PRIMARY KEY index + */ + if( nPknCol ){ + if( resizeIndexObject(db, pPk, pTab->nCol) ) return; + for(i=0, j=nPk; inCol; i++){ + if( !hasColumn(pPk->aiColumn, j, i) ){ + assert( jnColumn ); + pPk->aiColumn[j] = i; + pPk->azColl[j] = sqlite3StrBINARY; + j++; + } + } + assert( pPk->nColumn==j ); + assert( pTab->nCol==j ); + }else{ + pPk->nColumn = pTab->nCol; + } +} + +/* +** This routine is called to report the final ")" that terminates +** a CREATE TABLE statement. +** +** The table structure that other action routines have been building +** is added to the internal hash tables, assuming no errors have +** occurred. +** +** An entry for the table is made in the master table on disk, unless +** this is a temporary table or db->init.busy==1. When db->init.busy==1 +** it means we are reading the sqlite_master table because we just +** connected to the database or because the sqlite_master table has +** recently changed, so the entry for this table already exists in +** the sqlite_master table. We do not want to create it again. +** +** If the pSelect argument is not NULL, it means that this routine +** was called to create a table generated from a +** "CREATE TABLE ... AS SELECT ..." statement. The column names of +** the new table will match the result set of the SELECT. +*/ +SQLITE_PRIVATE void sqlite3EndTable( + Parse *pParse, /* Parse context */ + Token *pCons, /* The ',' token after the last column defn. */ + Token *pEnd, /* The ')' before options in the CREATE TABLE */ + u8 tabOpts, /* Extra table options. Usually 0. */ + Select *pSelect /* Select from a "CREATE ... AS SELECT" */ +){ + Table *p; /* The new table */ + sqlite3 *db = pParse->db; /* The database connection */ + int iDb; /* Database in which the table lives */ + Index *pIdx; /* An implied index of the table */ + + if( pEnd==0 && pSelect==0 ){ + return; + } + assert( !db->mallocFailed ); + p = pParse->pNewTable; + if( p==0 ) return; + + assert( !db->init.busy || !pSelect ); + + /* If the db->init.busy is 1 it means we are reading the SQL off the + ** "sqlite_master" or "sqlite_temp_master" table on the disk. + ** So do not write to the disk again. Extract the root page number + ** for the table from the db->init.newTnum field. (The page number + ** should have been put there by the sqliteOpenCb routine.) + ** + ** If the root page number is 1, that means this is the sqlite_master + ** table itself. So mark it read-only. + */ + if( db->init.busy ){ + p->tnum = db->init.newTnum; + if( p->tnum==1 ) p->tabFlags |= TF_Readonly; + } + + /* Special processing for WITHOUT ROWID Tables */ + if( tabOpts & TF_WithoutRowid ){ + if( (p->tabFlags & TF_Autoincrement) ){ + sqlite3ErrorMsg(pParse, + "AUTOINCREMENT not allowed on WITHOUT ROWID tables"); + return; + } + if( (p->tabFlags & TF_HasPrimaryKey)==0 ){ + sqlite3ErrorMsg(pParse, "PRIMARY KEY missing on table %s", p->zName); + }else{ + p->tabFlags |= TF_WithoutRowid | TF_NoVisibleRowid; + convertToWithoutRowidTable(pParse, p); + } + } + + iDb = sqlite3SchemaToIndex(db, p->pSchema); + +#ifndef SQLITE_OMIT_CHECK + /* Resolve names in all CHECK constraint expressions. + */ + if( p->pCheck ){ + sqlite3ResolveSelfReference(pParse, p, NC_IsCheck, 0, p->pCheck); + } +#endif /* !defined(SQLITE_OMIT_CHECK) */ + + /* Estimate the average row size for the table and for all implied indices */ + estimateTableWidth(p); + for(pIdx=p->pIndex; pIdx; pIdx=pIdx->pNext){ + estimateIndexWidth(pIdx); + } + + /* If not initializing, then create a record for the new table + ** in the SQLITE_MASTER table of the database. + ** + ** If this is a TEMPORARY table, write the entry into the auxiliary + ** file instead of into the main database file. + */ + if( !db->init.busy ){ + int n; + Vdbe *v; + char *zType; /* "view" or "table" */ + char *zType2; /* "VIEW" or "TABLE" */ + char *zStmt; /* Text of the CREATE TABLE or CREATE VIEW statement */ + + v = sqlite3GetVdbe(pParse); + if( NEVER(v==0) ) return; + + sqlite3VdbeAddOp1(v, OP_Close, 0); + + /* + ** Initialize zType for the new view or table. + */ + if( p->pSelect==0 ){ + /* A regular table */ + zType = "table"; + zType2 = "TABLE"; +#ifndef SQLITE_OMIT_VIEW + }else{ + /* A view */ + zType = "view"; + zType2 = "VIEW"; +#endif + } + + /* If this is a CREATE TABLE xx AS SELECT ..., execute the SELECT + ** statement to populate the new table. The root-page number for the + ** new table is in register pParse->regRoot. + ** + ** Once the SELECT has been coded by sqlite3Select(), it is in a + ** suitable state to query for the column names and types to be used + ** by the new table. + ** + ** A shared-cache write-lock is not required to write to the new table, + ** as a schema-lock must have already been obtained to create it. Since + ** a schema-lock excludes all other database users, the write-lock would + ** be redundant. + */ + if( pSelect ){ + SelectDest dest; /* Where the SELECT should store results */ + int regYield; /* Register holding co-routine entry-point */ + int addrTop; /* Top of the co-routine */ + int regRec; /* A record to be insert into the new table */ + int regRowid; /* Rowid of the next row to insert */ + int addrInsLoop; /* Top of the loop for inserting rows */ + Table *pSelTab; /* A table that describes the SELECT results */ + + regYield = ++pParse->nMem; + regRec = ++pParse->nMem; + regRowid = ++pParse->nMem; + assert(pParse->nTab==1); + sqlite3MayAbort(pParse); + sqlite3VdbeAddOp3(v, OP_OpenWrite, 1, pParse->regRoot, iDb); + sqlite3VdbeChangeP5(v, OPFLAG_P2ISREG); + pParse->nTab = 2; + addrTop = sqlite3VdbeCurrentAddr(v) + 1; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); + sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield); + sqlite3Select(pParse, pSelect, &dest); + sqlite3VdbeEndCoroutine(v, regYield); + sqlite3VdbeJumpHere(v, addrTop - 1); + if( pParse->nErr ) return; + pSelTab = sqlite3ResultSetOfSelect(pParse, pSelect); + if( pSelTab==0 ) return; + assert( p->aCol==0 ); + p->nCol = pSelTab->nCol; + p->aCol = pSelTab->aCol; + pSelTab->nCol = 0; + pSelTab->aCol = 0; + sqlite3DeleteTable(db, pSelTab); + addrInsLoop = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm); + VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_MakeRecord, dest.iSdst, dest.nSdst, regRec); + sqlite3TableAffinity(v, p, 0); + sqlite3VdbeAddOp2(v, OP_NewRowid, 1, regRowid); + sqlite3VdbeAddOp3(v, OP_Insert, 1, regRec, regRowid); + sqlite3VdbeGoto(v, addrInsLoop); + sqlite3VdbeJumpHere(v, addrInsLoop); + sqlite3VdbeAddOp1(v, OP_Close, 1); + } + + /* Compute the complete text of the CREATE statement */ + if( pSelect ){ + zStmt = createTableStmt(db, p); + }else{ + Token *pEnd2 = tabOpts ? &pParse->sLastToken : pEnd; + n = (int)(pEnd2->z - pParse->sNameToken.z); + if( pEnd2->z[0]!=';' ) n += pEnd2->n; + zStmt = sqlite3MPrintf(db, + "CREATE %s %.*s", zType2, n, pParse->sNameToken.z + ); + } + + /* A slot for the record has already been allocated in the + ** SQLITE_MASTER table. We just need to update that slot with all + ** the information we've collected. + */ + sqlite3NestedParse(pParse, + "UPDATE %Q.%s " + "SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q " + "WHERE rowid=#%d", + db->aDb[iDb].zDbSName, MASTER_NAME, + zType, + p->zName, + p->zName, + pParse->regRoot, + zStmt, + pParse->regRowid + ); + sqlite3DbFree(db, zStmt); + sqlite3ChangeCookie(pParse, iDb); + +#ifndef SQLITE_OMIT_AUTOINCREMENT + /* Check to see if we need to create an sqlite_sequence table for + ** keeping track of autoincrement keys. + */ + if( (p->tabFlags & TF_Autoincrement)!=0 ){ + Db *pDb = &db->aDb[iDb]; + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + if( pDb->pSchema->pSeqTab==0 ){ + sqlite3NestedParse(pParse, + "CREATE TABLE %Q.sqlite_sequence(name,seq)", + pDb->zDbSName + ); + } + } +#endif + + /* Reparse everything to update our internal data structures */ + sqlite3VdbeAddParseSchemaOp(v, iDb, + sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName)); + } + + + /* Add the table to the in-memory representation of the database. + */ + if( db->init.busy ){ + Table *pOld; + Schema *pSchema = p->pSchema; + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + pOld = sqlite3HashInsert(&pSchema->tblHash, p->zName, p); + if( pOld ){ + assert( p==pOld ); /* Malloc must have failed inside HashInsert() */ + sqlite3OomFault(db); + return; + } + pParse->pNewTable = 0; + db->flags |= SQLITE_InternChanges; + +#ifndef SQLITE_OMIT_ALTERTABLE + if( !p->pSelect ){ + const char *zName = (const char *)pParse->sNameToken.z; + int nName; + assert( !pSelect && pCons && pEnd ); + if( pCons->z==0 ){ + pCons = pEnd; + } + nName = (int)((const char *)pCons->z - zName); + p->addColOffset = 13 + sqlite3Utf8CharLen(zName, nName); + } +#endif + } +} + +#ifndef SQLITE_OMIT_VIEW +/* +** The parser calls this routine in order to create a new VIEW +*/ +SQLITE_PRIVATE void sqlite3CreateView( + Parse *pParse, /* The parsing context */ + Token *pBegin, /* The CREATE token that begins the statement */ + Token *pName1, /* The token that holds the name of the view */ + Token *pName2, /* The token that holds the name of the view */ + ExprList *pCNames, /* Optional list of view column names */ + Select *pSelect, /* A SELECT statement that will become the new view */ + int isTemp, /* TRUE for a TEMPORARY view */ + int noErr /* Suppress error messages if VIEW already exists */ +){ + Table *p; + int n; + const char *z; + Token sEnd; + DbFixer sFix; + Token *pName = 0; + int iDb; + sqlite3 *db = pParse->db; + + if( pParse->nVar>0 ){ + sqlite3ErrorMsg(pParse, "parameters are not allowed in views"); + goto create_view_fail; + } + sqlite3StartTable(pParse, pName1, pName2, isTemp, 1, 0, noErr); + p = pParse->pNewTable; + if( p==0 || pParse->nErr ) goto create_view_fail; + sqlite3TwoPartName(pParse, pName1, pName2, &pName); + iDb = sqlite3SchemaToIndex(db, p->pSchema); + sqlite3FixInit(&sFix, pParse, iDb, "view", pName); + if( sqlite3FixSelect(&sFix, pSelect) ) goto create_view_fail; + + /* Make a copy of the entire SELECT statement that defines the view. + ** This will force all the Expr.token.z values to be dynamically + ** allocated rather than point to the input string - which means that + ** they will persist after the current sqlite3_exec() call returns. + */ + p->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); + p->pCheck = sqlite3ExprListDup(db, pCNames, EXPRDUP_REDUCE); + if( db->mallocFailed ) goto create_view_fail; + + /* Locate the end of the CREATE VIEW statement. Make sEnd point to + ** the end. + */ + sEnd = pParse->sLastToken; + assert( sEnd.z[0]!=0 ); + if( sEnd.z[0]!=';' ){ + sEnd.z += sEnd.n; + } + sEnd.n = 0; + n = (int)(sEnd.z - pBegin->z); + assert( n>0 ); + z = pBegin->z; + while( sqlite3Isspace(z[n-1]) ){ n--; } + sEnd.z = &z[n-1]; + sEnd.n = 1; + + /* Use sqlite3EndTable() to add the view to the SQLITE_MASTER table */ + sqlite3EndTable(pParse, 0, &sEnd, 0, 0); + +create_view_fail: + sqlite3SelectDelete(db, pSelect); + sqlite3ExprListDelete(db, pCNames); + return; +} +#endif /* SQLITE_OMIT_VIEW */ + +#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) +/* +** The Table structure pTable is really a VIEW. Fill in the names of +** the columns of the view in the pTable structure. Return the number +** of errors. If an error is seen leave an error message in pParse->zErrMsg. +*/ +SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ + Table *pSelTab; /* A fake table from which we get the result set */ + Select *pSel; /* Copy of the SELECT that implements the view */ + int nErr = 0; /* Number of errors encountered */ + int n; /* Temporarily holds the number of cursors assigned */ + sqlite3 *db = pParse->db; /* Database connection for malloc errors */ +#ifndef SQLITE_OMIT_AUTHORIZATION + sqlite3_xauth xAuth; /* Saved xAuth pointer */ +#endif + + assert( pTable ); + +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( sqlite3VtabCallConnect(pParse, pTable) ){ + return SQLITE_ERROR; + } + if( IsVirtual(pTable) ) return 0; +#endif + +#ifndef SQLITE_OMIT_VIEW + /* A positive nCol means the columns names for this view are + ** already known. + */ + if( pTable->nCol>0 ) return 0; + + /* A negative nCol is a special marker meaning that we are currently + ** trying to compute the column names. If we enter this routine with + ** a negative nCol, it means two or more views form a loop, like this: + ** + ** CREATE VIEW one AS SELECT * FROM two; + ** CREATE VIEW two AS SELECT * FROM one; + ** + ** Actually, the error above is now caught prior to reaching this point. + ** But the following test is still important as it does come up + ** in the following: + ** + ** CREATE TABLE main.ex1(a); + ** CREATE TEMP VIEW ex1 AS SELECT a FROM ex1; + ** SELECT * FROM temp.ex1; + */ + if( pTable->nCol<0 ){ + sqlite3ErrorMsg(pParse, "view %s is circularly defined", pTable->zName); + return 1; + } + assert( pTable->nCol>=0 ); + + /* If we get this far, it means we need to compute the table names. + ** Note that the call to sqlite3ResultSetOfSelect() will expand any + ** "*" elements in the results set of the view and will assign cursors + ** to the elements of the FROM clause. But we do not want these changes + ** to be permanent. So the computation is done on a copy of the SELECT + ** statement that defines the view. + */ + assert( pTable->pSelect ); + pSel = sqlite3SelectDup(db, pTable->pSelect, 0); + if( pSel ){ + n = pParse->nTab; + sqlite3SrcListAssignCursors(pParse, pSel->pSrc); + pTable->nCol = -1; + db->lookaside.bDisable++; +#ifndef SQLITE_OMIT_AUTHORIZATION + xAuth = db->xAuth; + db->xAuth = 0; + pSelTab = sqlite3ResultSetOfSelect(pParse, pSel); + db->xAuth = xAuth; +#else + pSelTab = sqlite3ResultSetOfSelect(pParse, pSel); +#endif + pParse->nTab = n; + if( pTable->pCheck ){ + /* CREATE VIEW name(arglist) AS ... + ** The names of the columns in the table are taken from + ** arglist which is stored in pTable->pCheck. The pCheck field + ** normally holds CHECK constraints on an ordinary table, but for + ** a VIEW it holds the list of column names. + */ + sqlite3ColumnsFromExprList(pParse, pTable->pCheck, + &pTable->nCol, &pTable->aCol); + if( db->mallocFailed==0 + && pParse->nErr==0 + && pTable->nCol==pSel->pEList->nExpr + ){ + sqlite3SelectAddColumnTypeAndCollation(pParse, pTable, pSel); + } + }else if( pSelTab ){ + /* CREATE VIEW name AS... without an argument list. Construct + ** the column names from the SELECT statement that defines the view. + */ + assert( pTable->aCol==0 ); + pTable->nCol = pSelTab->nCol; + pTable->aCol = pSelTab->aCol; + pSelTab->nCol = 0; + pSelTab->aCol = 0; + assert( sqlite3SchemaMutexHeld(db, 0, pTable->pSchema) ); + }else{ + pTable->nCol = 0; + nErr++; + } + sqlite3DeleteTable(db, pSelTab); + sqlite3SelectDelete(db, pSel); + db->lookaside.bDisable--; + } else { + nErr++; + } + pTable->pSchema->schemaFlags |= DB_UnresetViews; +#endif /* SQLITE_OMIT_VIEW */ + return nErr; +} +#endif /* !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) */ + +#ifndef SQLITE_OMIT_VIEW +/* +** Clear the column names from every VIEW in database idx. +*/ +static void sqliteViewResetAll(sqlite3 *db, int idx){ + HashElem *i; + assert( sqlite3SchemaMutexHeld(db, idx, 0) ); + if( !DbHasProperty(db, idx, DB_UnresetViews) ) return; + for(i=sqliteHashFirst(&db->aDb[idx].pSchema->tblHash); i;i=sqliteHashNext(i)){ + Table *pTab = sqliteHashData(i); + if( pTab->pSelect ){ + sqlite3DeleteColumnNames(db, pTab); + pTab->aCol = 0; + pTab->nCol = 0; + } + } + DbClearProperty(db, idx, DB_UnresetViews); +} +#else +# define sqliteViewResetAll(A,B) +#endif /* SQLITE_OMIT_VIEW */ + +/* +** This function is called by the VDBE to adjust the internal schema +** used by SQLite when the btree layer moves a table root page. The +** root-page of a table or index in database iDb has changed from iFrom +** to iTo. +** +** Ticket #1728: The symbol table might still contain information +** on tables and/or indices that are the process of being deleted. +** If you are unlucky, one of those deleted indices or tables might +** have the same rootpage number as the real table or index that is +** being moved. So we cannot stop searching after the first match +** because the first match might be for one of the deleted indices +** or tables and not the table/index that is actually being moved. +** We must continue looping until all tables and indices with +** rootpage==iFrom have been converted to have a rootpage of iTo +** in order to be certain that we got the right one. +*/ +#ifndef SQLITE_OMIT_AUTOVACUUM +SQLITE_PRIVATE void sqlite3RootPageMoved(sqlite3 *db, int iDb, int iFrom, int iTo){ + HashElem *pElem; + Hash *pHash; + Db *pDb; + + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + pDb = &db->aDb[iDb]; + pHash = &pDb->pSchema->tblHash; + for(pElem=sqliteHashFirst(pHash); pElem; pElem=sqliteHashNext(pElem)){ + Table *pTab = sqliteHashData(pElem); + if( pTab->tnum==iFrom ){ + pTab->tnum = iTo; + } + } + pHash = &pDb->pSchema->idxHash; + for(pElem=sqliteHashFirst(pHash); pElem; pElem=sqliteHashNext(pElem)){ + Index *pIdx = sqliteHashData(pElem); + if( pIdx->tnum==iFrom ){ + pIdx->tnum = iTo; + } + } +} +#endif + +/* +** Write code to erase the table with root-page iTable from database iDb. +** Also write code to modify the sqlite_master table and internal schema +** if a root-page of another table is moved by the btree-layer whilst +** erasing iTable (this can happen with an auto-vacuum database). +*/ +static void destroyRootPage(Parse *pParse, int iTable, int iDb){ + Vdbe *v = sqlite3GetVdbe(pParse); + int r1 = sqlite3GetTempReg(pParse); + assert( iTable>1 ); + sqlite3VdbeAddOp3(v, OP_Destroy, iTable, r1, iDb); + sqlite3MayAbort(pParse); +#ifndef SQLITE_OMIT_AUTOVACUUM + /* OP_Destroy stores an in integer r1. If this integer + ** is non-zero, then it is the root page number of a table moved to + ** location iTable. The following code modifies the sqlite_master table to + ** reflect this. + ** + ** The "#NNN" in the SQL is a special constant that means whatever value + ** is in register NNN. See grammar rules associated with the TK_REGISTER + ** token for additional information. + */ + sqlite3NestedParse(pParse, + "UPDATE %Q.%s SET rootpage=%d WHERE #%d AND rootpage=#%d", + pParse->db->aDb[iDb].zDbSName, MASTER_NAME, iTable, r1, r1); +#endif + sqlite3ReleaseTempReg(pParse, r1); +} + +/* +** Write VDBE code to erase table pTab and all associated indices on disk. +** Code to update the sqlite_master tables and internal schema definitions +** in case a root-page belonging to another table is moved by the btree layer +** is also added (this can happen with an auto-vacuum database). +*/ +static void destroyTable(Parse *pParse, Table *pTab){ +#ifdef SQLITE_OMIT_AUTOVACUUM + Index *pIdx; + int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + destroyRootPage(pParse, pTab->tnum, iDb); + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + destroyRootPage(pParse, pIdx->tnum, iDb); + } +#else + /* If the database may be auto-vacuum capable (if SQLITE_OMIT_AUTOVACUUM + ** is not defined), then it is important to call OP_Destroy on the + ** table and index root-pages in order, starting with the numerically + ** largest root-page number. This guarantees that none of the root-pages + ** to be destroyed is relocated by an earlier OP_Destroy. i.e. if the + ** following were coded: + ** + ** OP_Destroy 4 0 + ** ... + ** OP_Destroy 5 0 + ** + ** and root page 5 happened to be the largest root-page number in the + ** database, then root page 5 would be moved to page 4 by the + ** "OP_Destroy 4 0" opcode. The subsequent "OP_Destroy 5 0" would hit + ** a free-list page. + */ + int iTab = pTab->tnum; + int iDestroyed = 0; + + while( 1 ){ + Index *pIdx; + int iLargest = 0; + + if( iDestroyed==0 || iTabpIndex; pIdx; pIdx=pIdx->pNext){ + int iIdx = pIdx->tnum; + assert( pIdx->pSchema==pTab->pSchema ); + if( (iDestroyed==0 || (iIdxiLargest ){ + iLargest = iIdx; + } + } + if( iLargest==0 ){ + return; + }else{ + int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + assert( iDb>=0 && iDbdb->nDb ); + destroyRootPage(pParse, iLargest, iDb); + iDestroyed = iLargest; + } + } +#endif +} + +/* +** Remove entries from the sqlite_statN tables (for N in (1,2,3)) +** after a DROP INDEX or DROP TABLE command. +*/ +static void sqlite3ClearStatTables( + Parse *pParse, /* The parsing context */ + int iDb, /* The database number */ + const char *zType, /* "idx" or "tbl" */ + const char *zName /* Name of index or table */ +){ + int i; + const char *zDbName = pParse->db->aDb[iDb].zDbSName; + for(i=1; i<=4; i++){ + char zTab[24]; + sqlite3_snprintf(sizeof(zTab),zTab,"sqlite_stat%d",i); + if( sqlite3FindTable(pParse->db, zTab, zDbName) ){ + sqlite3NestedParse(pParse, + "DELETE FROM %Q.%s WHERE %s=%Q", + zDbName, zTab, zType, zName + ); + } + } +} + +/* +** Generate code to drop a table. +*/ +SQLITE_PRIVATE void sqlite3CodeDropTable(Parse *pParse, Table *pTab, int iDb, int isView){ + Vdbe *v; + sqlite3 *db = pParse->db; + Trigger *pTrigger; + Db *pDb = &db->aDb[iDb]; + + v = sqlite3GetVdbe(pParse); + assert( v!=0 ); + sqlite3BeginWriteOperation(pParse, 1, iDb); + +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pTab) ){ + sqlite3VdbeAddOp0(v, OP_VBegin); + } +#endif + + /* Drop all triggers associated with the table being dropped. Code + ** is generated to remove entries from sqlite_master and/or + ** sqlite_temp_master if required. + */ + pTrigger = sqlite3TriggerList(pParse, pTab); + while( pTrigger ){ + assert( pTrigger->pSchema==pTab->pSchema || + pTrigger->pSchema==db->aDb[1].pSchema ); + sqlite3DropTriggerPtr(pParse, pTrigger); + pTrigger = pTrigger->pNext; + } + +#ifndef SQLITE_OMIT_AUTOINCREMENT + /* Remove any entries of the sqlite_sequence table associated with + ** the table being dropped. This is done before the table is dropped + ** at the btree level, in case the sqlite_sequence table needs to + ** move as a result of the drop (can happen in auto-vacuum mode). + */ + if( pTab->tabFlags & TF_Autoincrement ){ + sqlite3NestedParse(pParse, + "DELETE FROM %Q.sqlite_sequence WHERE name=%Q", + pDb->zDbSName, pTab->zName + ); + } +#endif + + /* Drop all SQLITE_MASTER table and index entries that refer to the + ** table. The program name loops through the master table and deletes + ** every row that refers to a table of the same name as the one being + ** dropped. Triggers are handled separately because a trigger can be + ** created in the temp database that refers to a table in another + ** database. + */ + sqlite3NestedParse(pParse, + "DELETE FROM %Q.%s WHERE tbl_name=%Q and type!='trigger'", + pDb->zDbSName, MASTER_NAME, pTab->zName); + if( !isView && !IsVirtual(pTab) ){ + destroyTable(pParse, pTab); + } + + /* Remove the table entry from SQLite's internal schema and modify + ** the schema cookie. + */ + if( IsVirtual(pTab) ){ + sqlite3VdbeAddOp4(v, OP_VDestroy, iDb, 0, 0, pTab->zName, 0); + } + sqlite3VdbeAddOp4(v, OP_DropTable, iDb, 0, 0, pTab->zName, 0); + sqlite3ChangeCookie(pParse, iDb); + sqliteViewResetAll(db, iDb); +} + +/* +** This routine is called to do the work of a DROP TABLE statement. +** pName is the name of the table to be dropped. +*/ +SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, int noErr){ + Table *pTab; + Vdbe *v; + sqlite3 *db = pParse->db; + int iDb; + + if( db->mallocFailed ){ + goto exit_drop_table; + } + assert( pParse->nErr==0 ); + assert( pName->nSrc==1 ); + if( sqlite3ReadSchema(pParse) ) goto exit_drop_table; + if( noErr ) db->suppressErr++; + assert( isView==0 || isView==LOCATE_VIEW ); + pTab = sqlite3LocateTableItem(pParse, isView, &pName->a[0]); + if( noErr ) db->suppressErr--; + + if( pTab==0 ){ + if( noErr ) sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); + goto exit_drop_table; + } + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( iDb>=0 && iDbnDb ); + + /* If pTab is a virtual table, call ViewGetColumnNames() to ensure + ** it is initialized. + */ + if( IsVirtual(pTab) && sqlite3ViewGetColumnNames(pParse, pTab) ){ + goto exit_drop_table; + } +#ifndef SQLITE_OMIT_AUTHORIZATION + { + int code; + const char *zTab = SCHEMA_TABLE(iDb); + const char *zDb = db->aDb[iDb].zDbSName; + const char *zArg2 = 0; + if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb)){ + goto exit_drop_table; + } + if( isView ){ + if( !OMIT_TEMPDB && iDb==1 ){ + code = SQLITE_DROP_TEMP_VIEW; + }else{ + code = SQLITE_DROP_VIEW; + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + }else if( IsVirtual(pTab) ){ + code = SQLITE_DROP_VTABLE; + zArg2 = sqlite3GetVTable(db, pTab)->pMod->zName; +#endif + }else{ + if( !OMIT_TEMPDB && iDb==1 ){ + code = SQLITE_DROP_TEMP_TABLE; + }else{ + code = SQLITE_DROP_TABLE; + } + } + if( sqlite3AuthCheck(pParse, code, pTab->zName, zArg2, zDb) ){ + goto exit_drop_table; + } + if( sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, zDb) ){ + goto exit_drop_table; + } + } +#endif + if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 + && sqlite3StrNICmp(pTab->zName, "sqlite_stat", 11)!=0 ){ + sqlite3ErrorMsg(pParse, "table %s may not be dropped", pTab->zName); + goto exit_drop_table; + } + +#ifndef SQLITE_OMIT_VIEW + /* Ensure DROP TABLE is not used on a view, and DROP VIEW is not used + ** on a table. + */ + if( isView && pTab->pSelect==0 ){ + sqlite3ErrorMsg(pParse, "use DROP TABLE to delete table %s", pTab->zName); + goto exit_drop_table; + } + if( !isView && pTab->pSelect ){ + sqlite3ErrorMsg(pParse, "use DROP VIEW to delete view %s", pTab->zName); + goto exit_drop_table; + } +#endif + + /* Generate code to remove the table from the master table + ** on disk. + */ + v = sqlite3GetVdbe(pParse); + if( v ){ + sqlite3BeginWriteOperation(pParse, 1, iDb); + sqlite3ClearStatTables(pParse, iDb, "tbl", pTab->zName); + sqlite3FkDropTable(pParse, pName, pTab); + sqlite3CodeDropTable(pParse, pTab, iDb, isView); + } + +exit_drop_table: + sqlite3SrcListDelete(db, pName); +} + +/* +** This routine is called to create a new foreign key on the table +** currently under construction. pFromCol determines which columns +** in the current table point to the foreign key. If pFromCol==0 then +** connect the key to the last column inserted. pTo is the name of +** the table referred to (a.k.a the "parent" table). pToCol is a list +** of tables in the parent pTo table. flags contains all +** information about the conflict resolution algorithms specified +** in the ON DELETE, ON UPDATE and ON INSERT clauses. +** +** An FKey structure is created and added to the table currently +** under construction in the pParse->pNewTable field. +** +** The foreign key is set for IMMEDIATE processing. A subsequent call +** to sqlite3DeferForeignKey() might change this to DEFERRED. +*/ +SQLITE_PRIVATE void sqlite3CreateForeignKey( + Parse *pParse, /* Parsing context */ + ExprList *pFromCol, /* Columns in this table that point to other table */ + Token *pTo, /* Name of the other table */ + ExprList *pToCol, /* Columns in the other table */ + int flags /* Conflict resolution algorithms. */ +){ + sqlite3 *db = pParse->db; +#ifndef SQLITE_OMIT_FOREIGN_KEY + FKey *pFKey = 0; + FKey *pNextTo; + Table *p = pParse->pNewTable; + int nByte; + int i; + int nCol; + char *z; + + assert( pTo!=0 ); + if( p==0 || IN_DECLARE_VTAB ) goto fk_end; + if( pFromCol==0 ){ + int iCol = p->nCol-1; + if( NEVER(iCol<0) ) goto fk_end; + if( pToCol && pToCol->nExpr!=1 ){ + sqlite3ErrorMsg(pParse, "foreign key on %s" + " should reference only one column of table %T", + p->aCol[iCol].zName, pTo); + goto fk_end; + } + nCol = 1; + }else if( pToCol && pToCol->nExpr!=pFromCol->nExpr ){ + sqlite3ErrorMsg(pParse, + "number of columns in foreign key does not match the number of " + "columns in the referenced table"); + goto fk_end; + }else{ + nCol = pFromCol->nExpr; + } + nByte = sizeof(*pFKey) + (nCol-1)*sizeof(pFKey->aCol[0]) + pTo->n + 1; + if( pToCol ){ + for(i=0; inExpr; i++){ + nByte += sqlite3Strlen30(pToCol->a[i].zName) + 1; + } + } + pFKey = sqlite3DbMallocZero(db, nByte ); + if( pFKey==0 ){ + goto fk_end; + } + pFKey->pFrom = p; + pFKey->pNextFrom = p->pFKey; + z = (char*)&pFKey->aCol[nCol]; + pFKey->zTo = z; + memcpy(z, pTo->z, pTo->n); + z[pTo->n] = 0; + sqlite3Dequote(z); + z += pTo->n+1; + pFKey->nCol = nCol; + if( pFromCol==0 ){ + pFKey->aCol[0].iFrom = p->nCol-1; + }else{ + for(i=0; inCol; j++){ + if( sqlite3StrICmp(p->aCol[j].zName, pFromCol->a[i].zName)==0 ){ + pFKey->aCol[i].iFrom = j; + break; + } + } + if( j>=p->nCol ){ + sqlite3ErrorMsg(pParse, + "unknown column \"%s\" in foreign key definition", + pFromCol->a[i].zName); + goto fk_end; + } + } + } + if( pToCol ){ + for(i=0; ia[i].zName); + pFKey->aCol[i].zCol = z; + memcpy(z, pToCol->a[i].zName, n); + z[n] = 0; + z += n+1; + } + } + pFKey->isDeferred = 0; + pFKey->aAction[0] = (u8)(flags & 0xff); /* ON DELETE action */ + pFKey->aAction[1] = (u8)((flags >> 8 ) & 0xff); /* ON UPDATE action */ + + assert( sqlite3SchemaMutexHeld(db, 0, p->pSchema) ); + pNextTo = (FKey *)sqlite3HashInsert(&p->pSchema->fkeyHash, + pFKey->zTo, (void *)pFKey + ); + if( pNextTo==pFKey ){ + sqlite3OomFault(db); + goto fk_end; + } + if( pNextTo ){ + assert( pNextTo->pPrevTo==0 ); + pFKey->pNextTo = pNextTo; + pNextTo->pPrevTo = pFKey; + } + + /* Link the foreign key to the table as the last step. + */ + p->pFKey = pFKey; + pFKey = 0; + +fk_end: + sqlite3DbFree(db, pFKey); +#endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ + sqlite3ExprListDelete(db, pFromCol); + sqlite3ExprListDelete(db, pToCol); +} + +/* +** This routine is called when an INITIALLY IMMEDIATE or INITIALLY DEFERRED +** clause is seen as part of a foreign key definition. The isDeferred +** parameter is 1 for INITIALLY DEFERRED and 0 for INITIALLY IMMEDIATE. +** The behavior of the most recently created foreign key is adjusted +** accordingly. +*/ +SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse *pParse, int isDeferred){ +#ifndef SQLITE_OMIT_FOREIGN_KEY + Table *pTab; + FKey *pFKey; + if( (pTab = pParse->pNewTable)==0 || (pFKey = pTab->pFKey)==0 ) return; + assert( isDeferred==0 || isDeferred==1 ); /* EV: R-30323-21917 */ + pFKey->isDeferred = (u8)isDeferred; +#endif +} + +/* +** Generate code that will erase and refill index *pIdx. This is +** used to initialize a newly created index or to recompute the +** content of an index in response to a REINDEX command. +** +** if memRootPage is not negative, it means that the index is newly +** created. The register specified by memRootPage contains the +** root page number of the index. If memRootPage is negative, then +** the index already exists and must be cleared before being refilled and +** the root page number of the index is taken from pIndex->tnum. +*/ +static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ + Table *pTab = pIndex->pTable; /* The table that is indexed */ + int iTab = pParse->nTab++; /* Btree cursor used for pTab */ + int iIdx = pParse->nTab++; /* Btree cursor used for pIndex */ + int iSorter; /* Cursor opened by OpenSorter (if in use) */ + int addr1; /* Address of top of loop */ + int addr2; /* Address to jump to for next iteration */ + int tnum; /* Root page of index */ + int iPartIdxLabel; /* Jump to this label to skip a row */ + Vdbe *v; /* Generate code into this virtual machine */ + KeyInfo *pKey; /* KeyInfo for index */ + int regRecord; /* Register holding assembled index record */ + sqlite3 *db = pParse->db; /* The database connection */ + int iDb = sqlite3SchemaToIndex(db, pIndex->pSchema); + +#ifndef SQLITE_OMIT_AUTHORIZATION + if( sqlite3AuthCheck(pParse, SQLITE_REINDEX, pIndex->zName, 0, + db->aDb[iDb].zDbSName ) ){ + return; + } +#endif + + /* Require a write-lock on the table to perform this operation */ + sqlite3TableLock(pParse, iDb, pTab->tnum, 1, pTab->zName); + + v = sqlite3GetVdbe(pParse); + if( v==0 ) return; + if( memRootPage>=0 ){ + tnum = memRootPage; + }else{ + tnum = pIndex->tnum; + } + pKey = sqlite3KeyInfoOfIndex(pParse, pIndex); + assert( pKey!=0 || db->mallocFailed || pParse->nErr ); + + /* Open the sorter cursor if we are to use one. */ + iSorter = pParse->nTab++; + sqlite3VdbeAddOp4(v, OP_SorterOpen, iSorter, 0, pIndex->nKeyCol, (char*) + sqlite3KeyInfoRef(pKey), P4_KEYINFO); + + /* Open the table. Loop through all rows of the table, inserting index + ** records into the sorter. */ + sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead); + addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iTab, 0); VdbeCoverage(v); + regRecord = sqlite3GetTempReg(pParse); + + sqlite3GenerateIndexKey(pParse,pIndex,iTab,regRecord,0,&iPartIdxLabel,0,0); + sqlite3VdbeAddOp2(v, OP_SorterInsert, iSorter, regRecord); + sqlite3ResolvePartIdxLabel(pParse, iPartIdxLabel); + sqlite3VdbeAddOp2(v, OP_Next, iTab, addr1+1); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addr1); + if( memRootPage<0 ) sqlite3VdbeAddOp2(v, OP_Clear, tnum, iDb); + sqlite3VdbeAddOp4(v, OP_OpenWrite, iIdx, tnum, iDb, + (char *)pKey, P4_KEYINFO); + sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR|((memRootPage>=0)?OPFLAG_P2ISREG:0)); + + addr1 = sqlite3VdbeAddOp2(v, OP_SorterSort, iSorter, 0); VdbeCoverage(v); + if( IsUniqueIndex(pIndex) ){ + int j2 = sqlite3VdbeCurrentAddr(v) + 3; + sqlite3VdbeGoto(v, j2); + addr2 = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp4Int(v, OP_SorterCompare, iSorter, j2, regRecord, + pIndex->nKeyCol); VdbeCoverage(v); + sqlite3UniqueConstraint(pParse, OE_Abort, pIndex); + }else{ + addr2 = sqlite3VdbeCurrentAddr(v); + } + sqlite3VdbeAddOp3(v, OP_SorterData, iSorter, regRecord, iIdx); + sqlite3VdbeAddOp3(v, OP_Last, iIdx, 0, -1); + sqlite3VdbeAddOp2(v, OP_IdxInsert, iIdx, regRecord); + sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); + sqlite3ReleaseTempReg(pParse, regRecord); + sqlite3VdbeAddOp2(v, OP_SorterNext, iSorter, addr2); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addr1); + + sqlite3VdbeAddOp1(v, OP_Close, iTab); + sqlite3VdbeAddOp1(v, OP_Close, iIdx); + sqlite3VdbeAddOp1(v, OP_Close, iSorter); +} + +/* +** Allocate heap space to hold an Index object with nCol columns. +** +** Increase the allocation size to provide an extra nExtra bytes +** of 8-byte aligned space after the Index object and return a +** pointer to this extra space in *ppExtra. +*/ +SQLITE_PRIVATE Index *sqlite3AllocateIndexObject( + sqlite3 *db, /* Database connection */ + i16 nCol, /* Total number of columns in the index */ + int nExtra, /* Number of bytes of extra space to alloc */ + char **ppExtra /* Pointer to the "extra" space */ +){ + Index *p; /* Allocated index object */ + int nByte; /* Bytes of space for Index object + arrays */ + + nByte = ROUND8(sizeof(Index)) + /* Index structure */ + ROUND8(sizeof(char*)*nCol) + /* Index.azColl */ + ROUND8(sizeof(LogEst)*(nCol+1) + /* Index.aiRowLogEst */ + sizeof(i16)*nCol + /* Index.aiColumn */ + sizeof(u8)*nCol); /* Index.aSortOrder */ + p = sqlite3DbMallocZero(db, nByte + nExtra); + if( p ){ + char *pExtra = ((char*)p)+ROUND8(sizeof(Index)); + p->azColl = (const char**)pExtra; pExtra += ROUND8(sizeof(char*)*nCol); + p->aiRowLogEst = (LogEst*)pExtra; pExtra += sizeof(LogEst)*(nCol+1); + p->aiColumn = (i16*)pExtra; pExtra += sizeof(i16)*nCol; + p->aSortOrder = (u8*)pExtra; + p->nColumn = nCol; + p->nKeyCol = nCol - 1; + *ppExtra = ((char*)p) + nByte; + } + return p; +} + +/* +** Create a new index for an SQL table. pName1.pName2 is the name of the index +** and pTblList is the name of the table that is to be indexed. Both will +** be NULL for a primary key or an index that is created to satisfy a +** UNIQUE constraint. If pTable and pIndex are NULL, use pParse->pNewTable +** as the table to be indexed. pParse->pNewTable is a table that is +** currently being constructed by a CREATE TABLE statement. +** +** pList is a list of columns to be indexed. pList will be NULL if this +** is a primary key or unique-constraint on the most recent column added +** to the table currently under construction. +*/ +SQLITE_PRIVATE void sqlite3CreateIndex( + Parse *pParse, /* All information about this parse */ + Token *pName1, /* First part of index name. May be NULL */ + Token *pName2, /* Second part of index name. May be NULL */ + SrcList *pTblName, /* Table to index. Use pParse->pNewTable if 0 */ + ExprList *pList, /* A list of columns to be indexed */ + int onError, /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */ + Token *pStart, /* The CREATE token that begins this statement */ + Expr *pPIWhere, /* WHERE clause for partial indices */ + int sortOrder, /* Sort order of primary key when pList==NULL */ + int ifNotExist, /* Omit error if index already exists */ + u8 idxType /* The index type */ +){ + Table *pTab = 0; /* Table to be indexed */ + Index *pIndex = 0; /* The index to be created */ + char *zName = 0; /* Name of the index */ + int nName; /* Number of characters in zName */ + int i, j; + DbFixer sFix; /* For assigning database names to pTable */ + int sortOrderMask; /* 1 to honor DESC in index. 0 to ignore. */ + sqlite3 *db = pParse->db; + Db *pDb; /* The specific table containing the indexed database */ + int iDb; /* Index of the database that is being written */ + Token *pName = 0; /* Unqualified name of the index to create */ + struct ExprList_item *pListItem; /* For looping over pList */ + int nExtra = 0; /* Space allocated for zExtra[] */ + int nExtraCol; /* Number of extra columns needed */ + char *zExtra = 0; /* Extra space after the Index object */ + Index *pPk = 0; /* PRIMARY KEY index for WITHOUT ROWID tables */ + + if( db->mallocFailed || pParse->nErr>0 ){ + goto exit_create_index; + } + if( IN_DECLARE_VTAB && idxType!=SQLITE_IDXTYPE_PRIMARYKEY ){ + goto exit_create_index; + } + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + goto exit_create_index; + } + + /* + ** Find the table that is to be indexed. Return early if not found. + */ + if( pTblName!=0 ){ + + /* Use the two-part index name to determine the database + ** to search for the table. 'Fix' the table name to this db + ** before looking up the table. + */ + assert( pName1 && pName2 ); + iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName); + if( iDb<0 ) goto exit_create_index; + assert( pName && pName->z ); + +#ifndef SQLITE_OMIT_TEMPDB + /* If the index name was unqualified, check if the table + ** is a temp table. If so, set the database to 1. Do not do this + ** if initialising a database schema. + */ + if( !db->init.busy ){ + pTab = sqlite3SrcListLookup(pParse, pTblName); + if( pName2->n==0 && pTab && pTab->pSchema==db->aDb[1].pSchema ){ + iDb = 1; + } + } +#endif + + sqlite3FixInit(&sFix, pParse, iDb, "index", pName); + if( sqlite3FixSrcList(&sFix, pTblName) ){ + /* Because the parser constructs pTblName from a single identifier, + ** sqlite3FixSrcList can never fail. */ + assert(0); + } + pTab = sqlite3LocateTableItem(pParse, 0, &pTblName->a[0]); + assert( db->mallocFailed==0 || pTab==0 ); + if( pTab==0 ) goto exit_create_index; + if( iDb==1 && db->aDb[iDb].pSchema!=pTab->pSchema ){ + sqlite3ErrorMsg(pParse, + "cannot create a TEMP index on non-TEMP table \"%s\"", + pTab->zName); + goto exit_create_index; + } + if( !HasRowid(pTab) ) pPk = sqlite3PrimaryKeyIndex(pTab); + }else{ + assert( pName==0 ); + assert( pStart==0 ); + pTab = pParse->pNewTable; + if( !pTab ) goto exit_create_index; + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + } + pDb = &db->aDb[iDb]; + + assert( pTab!=0 ); + assert( pParse->nErr==0 ); + if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 + && db->init.busy==0 +#if SQLITE_USER_AUTHENTICATION + && sqlite3UserAuthTable(pTab->zName)==0 +#endif + && sqlite3StrNICmp(&pTab->zName[7],"altertab_",9)!=0 ){ + sqlite3ErrorMsg(pParse, "table %s may not be indexed", pTab->zName); + goto exit_create_index; + } +#ifndef SQLITE_OMIT_VIEW + if( pTab->pSelect ){ + sqlite3ErrorMsg(pParse, "views may not be indexed"); + goto exit_create_index; + } +#endif +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pTab) ){ + sqlite3ErrorMsg(pParse, "virtual tables may not be indexed"); + goto exit_create_index; + } +#endif + + /* + ** Find the name of the index. Make sure there is not already another + ** index or table with the same name. + ** + ** Exception: If we are reading the names of permanent indices from the + ** sqlite_master table (because some other process changed the schema) and + ** one of the index names collides with the name of a temporary table or + ** index, then we will continue to process this index. + ** + ** If pName==0 it means that we are + ** dealing with a primary key or UNIQUE constraint. We have to invent our + ** own name. + */ + if( pName ){ + zName = sqlite3NameFromToken(db, pName); + if( zName==0 ) goto exit_create_index; + assert( pName->z!=0 ); + if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ + goto exit_create_index; + } + if( !db->init.busy ){ + if( sqlite3FindTable(db, zName, 0)!=0 ){ + sqlite3ErrorMsg(pParse, "there is already a table named %s", zName); + goto exit_create_index; + } + } + if( sqlite3FindIndex(db, zName, pDb->zDbSName)!=0 ){ + if( !ifNotExist ){ + sqlite3ErrorMsg(pParse, "index %s already exists", zName); + }else{ + assert( !db->init.busy ); + sqlite3CodeVerifySchema(pParse, iDb); + } + goto exit_create_index; + } + }else{ + int n; + Index *pLoop; + for(pLoop=pTab->pIndex, n=1; pLoop; pLoop=pLoop->pNext, n++){} + zName = sqlite3MPrintf(db, "sqlite_autoindex_%s_%d", pTab->zName, n); + if( zName==0 ){ + goto exit_create_index; + } + + /* Automatic index names generated from within sqlite3_declare_vtab() + ** must have names that are distinct from normal automatic index names. + ** The following statement converts "sqlite3_autoindex..." into + ** "sqlite3_butoindex..." in order to make the names distinct. + ** The "vtab_err.test" test demonstrates the need of this statement. */ + if( IN_DECLARE_VTAB ) zName[7]++; + } + + /* Check for authorization to create an index. + */ +#ifndef SQLITE_OMIT_AUTHORIZATION + { + const char *zDb = pDb->zDbSName; + if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(iDb), 0, zDb) ){ + goto exit_create_index; + } + i = SQLITE_CREATE_INDEX; + if( !OMIT_TEMPDB && iDb==1 ) i = SQLITE_CREATE_TEMP_INDEX; + if( sqlite3AuthCheck(pParse, i, zName, pTab->zName, zDb) ){ + goto exit_create_index; + } + } +#endif + + /* If pList==0, it means this routine was called to make a primary + ** key out of the last column added to the table under construction. + ** So create a fake list to simulate this. + */ + if( pList==0 ){ + Token prevCol; + sqlite3TokenInit(&prevCol, pTab->aCol[pTab->nCol-1].zName); + pList = sqlite3ExprListAppend(pParse, 0, + sqlite3ExprAlloc(db, TK_ID, &prevCol, 0)); + if( pList==0 ) goto exit_create_index; + assert( pList->nExpr==1 ); + sqlite3ExprListSetSortOrder(pList, sortOrder); + }else{ + sqlite3ExprListCheckLength(pParse, pList, "index"); + } + + /* Figure out how many bytes of space are required to store explicitly + ** specified collation sequence names. + */ + for(i=0; inExpr; i++){ + Expr *pExpr = pList->a[i].pExpr; + assert( pExpr!=0 ); + if( pExpr->op==TK_COLLATE ){ + nExtra += (1 + sqlite3Strlen30(pExpr->u.zToken)); + } + } + + /* + ** Allocate the index structure. + */ + nName = sqlite3Strlen30(zName); + nExtraCol = pPk ? pPk->nKeyCol : 1; + pIndex = sqlite3AllocateIndexObject(db, pList->nExpr + nExtraCol, + nName + nExtra + 1, &zExtra); + if( db->mallocFailed ){ + goto exit_create_index; + } + assert( EIGHT_BYTE_ALIGNMENT(pIndex->aiRowLogEst) ); + assert( EIGHT_BYTE_ALIGNMENT(pIndex->azColl) ); + pIndex->zName = zExtra; + zExtra += nName + 1; + memcpy(pIndex->zName, zName, nName+1); + pIndex->pTable = pTab; + pIndex->onError = (u8)onError; + pIndex->uniqNotNull = onError!=OE_None; + pIndex->idxType = idxType; + pIndex->pSchema = db->aDb[iDb].pSchema; + pIndex->nKeyCol = pList->nExpr; + if( pPIWhere ){ + sqlite3ResolveSelfReference(pParse, pTab, NC_PartIdx, pPIWhere, 0); + pIndex->pPartIdxWhere = pPIWhere; + pPIWhere = 0; + } + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + + /* Check to see if we should honor DESC requests on index columns + */ + if( pDb->pSchema->file_format>=4 ){ + sortOrderMask = -1; /* Honor DESC */ + }else{ + sortOrderMask = 0; /* Ignore DESC */ + } + + /* Analyze the list of expressions that form the terms of the index and + ** report any errors. In the common case where the expression is exactly + ** a table column, store that column in aiColumn[]. For general expressions, + ** populate pIndex->aColExpr and store XN_EXPR (-2) in aiColumn[]. + ** + ** TODO: Issue a warning if two or more columns of the index are identical. + ** TODO: Issue a warning if the table primary key is used as part of the + ** index key. + */ + for(i=0, pListItem=pList->a; inExpr; i++, pListItem++){ + Expr *pCExpr; /* The i-th index expression */ + int requestedSortOrder; /* ASC or DESC on the i-th expression */ + const char *zColl; /* Collation sequence name */ + + sqlite3StringToId(pListItem->pExpr); + sqlite3ResolveSelfReference(pParse, pTab, NC_IdxExpr, pListItem->pExpr, 0); + if( pParse->nErr ) goto exit_create_index; + pCExpr = sqlite3ExprSkipCollate(pListItem->pExpr); + if( pCExpr->op!=TK_COLUMN ){ + if( pTab==pParse->pNewTable ){ + sqlite3ErrorMsg(pParse, "expressions prohibited in PRIMARY KEY and " + "UNIQUE constraints"); + goto exit_create_index; + } + if( pIndex->aColExpr==0 ){ + ExprList *pCopy = sqlite3ExprListDup(db, pList, 0); + pIndex->aColExpr = pCopy; + if( !db->mallocFailed ){ + assert( pCopy!=0 ); + pListItem = &pCopy->a[i]; + } + } + j = XN_EXPR; + pIndex->aiColumn[i] = XN_EXPR; + pIndex->uniqNotNull = 0; + }else{ + j = pCExpr->iColumn; + assert( j<=0x7fff ); + if( j<0 ){ + j = pTab->iPKey; + }else if( pTab->aCol[j].notNull==0 ){ + pIndex->uniqNotNull = 0; + } + pIndex->aiColumn[i] = (i16)j; + } + zColl = 0; + if( pListItem->pExpr->op==TK_COLLATE ){ + int nColl; + zColl = pListItem->pExpr->u.zToken; + nColl = sqlite3Strlen30(zColl) + 1; + assert( nExtra>=nColl ); + memcpy(zExtra, zColl, nColl); + zColl = zExtra; + zExtra += nColl; + nExtra -= nColl; + }else if( j>=0 ){ + zColl = pTab->aCol[j].zColl; + } + if( !zColl ) zColl = sqlite3StrBINARY; + if( !db->init.busy && !sqlite3LocateCollSeq(pParse, zColl) ){ + goto exit_create_index; + } + pIndex->azColl[i] = zColl; + requestedSortOrder = pListItem->sortOrder & sortOrderMask; + pIndex->aSortOrder[i] = (u8)requestedSortOrder; + } + + /* Append the table key to the end of the index. For WITHOUT ROWID + ** tables (when pPk!=0) this will be the declared PRIMARY KEY. For + ** normal tables (when pPk==0) this will be the rowid. + */ + if( pPk ){ + for(j=0; jnKeyCol; j++){ + int x = pPk->aiColumn[j]; + assert( x>=0 ); + if( hasColumn(pIndex->aiColumn, pIndex->nKeyCol, x) ){ + pIndex->nColumn--; + }else{ + pIndex->aiColumn[i] = x; + pIndex->azColl[i] = pPk->azColl[j]; + pIndex->aSortOrder[i] = pPk->aSortOrder[j]; + i++; + } + } + assert( i==pIndex->nColumn ); + }else{ + pIndex->aiColumn[i] = XN_ROWID; + pIndex->azColl[i] = sqlite3StrBINARY; + } + sqlite3DefaultRowEst(pIndex); + if( pParse->pNewTable==0 ) estimateIndexWidth(pIndex); + + /* If this index contains every column of its table, then mark + ** it as a covering index */ + assert( HasRowid(pTab) + || pTab->iPKey<0 || sqlite3ColumnOfIndex(pIndex, pTab->iPKey)>=0 ); + if( pTblName!=0 && pIndex->nColumn>=pTab->nCol ){ + pIndex->isCovering = 1; + for(j=0; jnCol; j++){ + if( j==pTab->iPKey ) continue; + if( sqlite3ColumnOfIndex(pIndex,j)>=0 ) continue; + pIndex->isCovering = 0; + break; + } + } + + if( pTab==pParse->pNewTable ){ + /* This routine has been called to create an automatic index as a + ** result of a PRIMARY KEY or UNIQUE clause on a column definition, or + ** a PRIMARY KEY or UNIQUE clause following the column definitions. + ** i.e. one of: + ** + ** CREATE TABLE t(x PRIMARY KEY, y); + ** CREATE TABLE t(x, y, UNIQUE(x, y)); + ** + ** Either way, check to see if the table already has such an index. If + ** so, don't bother creating this one. This only applies to + ** automatically created indices. Users can do as they wish with + ** explicit indices. + ** + ** Two UNIQUE or PRIMARY KEY constraints are considered equivalent + ** (and thus suppressing the second one) even if they have different + ** sort orders. + ** + ** If there are different collating sequences or if the columns of + ** the constraint occur in different orders, then the constraints are + ** considered distinct and both result in separate indices. + */ + Index *pIdx; + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + int k; + assert( IsUniqueIndex(pIdx) ); + assert( pIdx->idxType!=SQLITE_IDXTYPE_APPDEF ); + assert( IsUniqueIndex(pIndex) ); + + if( pIdx->nKeyCol!=pIndex->nKeyCol ) continue; + for(k=0; knKeyCol; k++){ + const char *z1; + const char *z2; + assert( pIdx->aiColumn[k]>=0 ); + if( pIdx->aiColumn[k]!=pIndex->aiColumn[k] ) break; + z1 = pIdx->azColl[k]; + z2 = pIndex->azColl[k]; + if( sqlite3StrICmp(z1, z2) ) break; + } + if( k==pIdx->nKeyCol ){ + if( pIdx->onError!=pIndex->onError ){ + /* This constraint creates the same index as a previous + ** constraint specified somewhere in the CREATE TABLE statement. + ** However the ON CONFLICT clauses are different. If both this + ** constraint and the previous equivalent constraint have explicit + ** ON CONFLICT clauses this is an error. Otherwise, use the + ** explicitly specified behavior for the index. + */ + if( !(pIdx->onError==OE_Default || pIndex->onError==OE_Default) ){ + sqlite3ErrorMsg(pParse, + "conflicting ON CONFLICT clauses specified", 0); + } + if( pIdx->onError==OE_Default ){ + pIdx->onError = pIndex->onError; + } + } + if( idxType==SQLITE_IDXTYPE_PRIMARYKEY ) pIdx->idxType = idxType; + goto exit_create_index; + } + } + } + + /* Link the new Index structure to its table and to the other + ** in-memory database structures. + */ + assert( pParse->nErr==0 ); + if( db->init.busy ){ + Index *p; + assert( !IN_DECLARE_VTAB ); + assert( sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) ); + p = sqlite3HashInsert(&pIndex->pSchema->idxHash, + pIndex->zName, pIndex); + if( p ){ + assert( p==pIndex ); /* Malloc must have failed */ + sqlite3OomFault(db); + goto exit_create_index; + } + db->flags |= SQLITE_InternChanges; + if( pTblName!=0 ){ + pIndex->tnum = db->init.newTnum; + } + } + + /* If this is the initial CREATE INDEX statement (or CREATE TABLE if the + ** index is an implied index for a UNIQUE or PRIMARY KEY constraint) then + ** emit code to allocate the index rootpage on disk and make an entry for + ** the index in the sqlite_master table and populate the index with + ** content. But, do not do this if we are simply reading the sqlite_master + ** table to parse the schema, or if this index is the PRIMARY KEY index + ** of a WITHOUT ROWID table. + ** + ** If pTblName==0 it means this index is generated as an implied PRIMARY KEY + ** or UNIQUE index in a CREATE TABLE statement. Since the table + ** has just been created, it contains no data and the index initialization + ** step can be skipped. + */ + else if( HasRowid(pTab) || pTblName!=0 ){ + Vdbe *v; + char *zStmt; + int iMem = ++pParse->nMem; + + v = sqlite3GetVdbe(pParse); + if( v==0 ) goto exit_create_index; + + sqlite3BeginWriteOperation(pParse, 1, iDb); + + /* Create the rootpage for the index using CreateIndex. But before + ** doing so, code a Noop instruction and store its address in + ** Index.tnum. This is required in case this index is actually a + ** PRIMARY KEY and the table is actually a WITHOUT ROWID table. In + ** that case the convertToWithoutRowidTable() routine will replace + ** the Noop with a Goto to jump over the VDBE code generated below. */ + pIndex->tnum = sqlite3VdbeAddOp0(v, OP_Noop); + sqlite3VdbeAddOp2(v, OP_CreateIndex, iDb, iMem); + + /* Gather the complete text of the CREATE INDEX statement into + ** the zStmt variable + */ + if( pStart ){ + int n = (int)(pParse->sLastToken.z - pName->z) + pParse->sLastToken.n; + if( pName->z[n-1]==';' ) n--; + /* A named index with an explicit CREATE INDEX statement */ + zStmt = sqlite3MPrintf(db, "CREATE%s INDEX %.*s", + onError==OE_None ? "" : " UNIQUE", n, pName->z); + }else{ + /* An automatic index created by a PRIMARY KEY or UNIQUE constraint */ + /* zStmt = sqlite3MPrintf(""); */ + zStmt = 0; + } + + /* Add an entry in sqlite_master for this index + */ + sqlite3NestedParse(pParse, + "INSERT INTO %Q.%s VALUES('index',%Q,%Q,#%d,%Q);", + db->aDb[iDb].zDbSName, MASTER_NAME, + pIndex->zName, + pTab->zName, + iMem, + zStmt + ); + sqlite3DbFree(db, zStmt); + + /* Fill the index with data and reparse the schema. Code an OP_Expire + ** to invalidate all pre-compiled statements. + */ + if( pTblName ){ + sqlite3RefillIndex(pParse, pIndex, iMem); + sqlite3ChangeCookie(pParse, iDb); + sqlite3VdbeAddParseSchemaOp(v, iDb, + sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName)); + sqlite3VdbeAddOp0(v, OP_Expire); + } + + sqlite3VdbeJumpHere(v, pIndex->tnum); + } + + /* When adding an index to the list of indices for a table, make + ** sure all indices labeled OE_Replace come after all those labeled + ** OE_Ignore. This is necessary for the correct constraint check + ** processing (in sqlite3GenerateConstraintChecks()) as part of + ** UPDATE and INSERT statements. + */ + if( db->init.busy || pTblName==0 ){ + if( onError!=OE_Replace || pTab->pIndex==0 + || pTab->pIndex->onError==OE_Replace){ + pIndex->pNext = pTab->pIndex; + pTab->pIndex = pIndex; + }else{ + Index *pOther = pTab->pIndex; + while( pOther->pNext && pOther->pNext->onError!=OE_Replace ){ + pOther = pOther->pNext; + } + pIndex->pNext = pOther->pNext; + pOther->pNext = pIndex; + } + pIndex = 0; + } + + /* Clean up before exiting */ +exit_create_index: + if( pIndex ) freeIndex(db, pIndex); + sqlite3ExprDelete(db, pPIWhere); + sqlite3ExprListDelete(db, pList); + sqlite3SrcListDelete(db, pTblName); + sqlite3DbFree(db, zName); +} + +/* +** Fill the Index.aiRowEst[] array with default information - information +** to be used when we have not run the ANALYZE command. +** +** aiRowEst[0] is supposed to contain the number of elements in the index. +** Since we do not know, guess 1 million. aiRowEst[1] is an estimate of the +** number of rows in the table that match any particular value of the +** first column of the index. aiRowEst[2] is an estimate of the number +** of rows that match any particular combination of the first 2 columns +** of the index. And so forth. It must always be the case that +* +** aiRowEst[N]<=aiRowEst[N-1] +** aiRowEst[N]>=1 +** +** Apart from that, we have little to go on besides intuition as to +** how aiRowEst[] should be initialized. The numbers generated here +** are based on typical values found in actual indices. +*/ +SQLITE_PRIVATE void sqlite3DefaultRowEst(Index *pIdx){ + /* 10, 9, 8, 7, 6 */ + LogEst aVal[] = { 33, 32, 30, 28, 26 }; + LogEst *a = pIdx->aiRowLogEst; + int nCopy = MIN(ArraySize(aVal), pIdx->nKeyCol); + int i; + + /* Set the first entry (number of rows in the index) to the estimated + ** number of rows in the table, or half the number of rows in the table + ** for a partial index. But do not let the estimate drop below 10. */ + a[0] = pIdx->pTable->nRowLogEst; + if( pIdx->pPartIdxWhere!=0 ) a[0] -= 10; assert( 10==sqlite3LogEst(2) ); + if( a[0]<33 ) a[0] = 33; assert( 33==sqlite3LogEst(10) ); + + /* Estimate that a[1] is 10, a[2] is 9, a[3] is 8, a[4] is 7, a[5] is + ** 6 and each subsequent value (if any) is 5. */ + memcpy(&a[1], aVal, nCopy*sizeof(LogEst)); + for(i=nCopy+1; i<=pIdx->nKeyCol; i++){ + a[i] = 23; assert( 23==sqlite3LogEst(5) ); + } + + assert( 0==sqlite3LogEst(1) ); + if( IsUniqueIndex(pIdx) ) a[pIdx->nKeyCol] = 0; +} + +/* +** This routine will drop an existing named index. This routine +** implements the DROP INDEX statement. +*/ +SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists){ + Index *pIndex; + Vdbe *v; + sqlite3 *db = pParse->db; + int iDb; + + assert( pParse->nErr==0 ); /* Never called with prior errors */ + if( db->mallocFailed ){ + goto exit_drop_index; + } + assert( pName->nSrc==1 ); + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + goto exit_drop_index; + } + pIndex = sqlite3FindIndex(db, pName->a[0].zName, pName->a[0].zDatabase); + if( pIndex==0 ){ + if( !ifExists ){ + sqlite3ErrorMsg(pParse, "no such index: %S", pName, 0); + }else{ + sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); + } + pParse->checkSchema = 1; + goto exit_drop_index; + } + if( pIndex->idxType!=SQLITE_IDXTYPE_APPDEF ){ + sqlite3ErrorMsg(pParse, "index associated with UNIQUE " + "or PRIMARY KEY constraint cannot be dropped", 0); + goto exit_drop_index; + } + iDb = sqlite3SchemaToIndex(db, pIndex->pSchema); +#ifndef SQLITE_OMIT_AUTHORIZATION + { + int code = SQLITE_DROP_INDEX; + Table *pTab = pIndex->pTable; + const char *zDb = db->aDb[iDb].zDbSName; + const char *zTab = SCHEMA_TABLE(iDb); + if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){ + goto exit_drop_index; + } + if( !OMIT_TEMPDB && iDb ) code = SQLITE_DROP_TEMP_INDEX; + if( sqlite3AuthCheck(pParse, code, pIndex->zName, pTab->zName, zDb) ){ + goto exit_drop_index; + } + } +#endif + + /* Generate code to remove the index and from the master table */ + v = sqlite3GetVdbe(pParse); + if( v ){ + sqlite3BeginWriteOperation(pParse, 1, iDb); + sqlite3NestedParse(pParse, + "DELETE FROM %Q.%s WHERE name=%Q AND type='index'", + db->aDb[iDb].zDbSName, MASTER_NAME, pIndex->zName + ); + sqlite3ClearStatTables(pParse, iDb, "idx", pIndex->zName); + sqlite3ChangeCookie(pParse, iDb); + destroyRootPage(pParse, pIndex->tnum, iDb); + sqlite3VdbeAddOp4(v, OP_DropIndex, iDb, 0, 0, pIndex->zName, 0); + } + +exit_drop_index: + sqlite3SrcListDelete(db, pName); +} + +/* +** pArray is a pointer to an array of objects. Each object in the +** array is szEntry bytes in size. This routine uses sqlite3DbRealloc() +** to extend the array so that there is space for a new object at the end. +** +** When this function is called, *pnEntry contains the current size of +** the array (in entries - so the allocation is ((*pnEntry) * szEntry) bytes +** in total). +** +** If the realloc() is successful (i.e. if no OOM condition occurs), the +** space allocated for the new object is zeroed, *pnEntry updated to +** reflect the new size of the array and a pointer to the new allocation +** returned. *pIdx is set to the index of the new array entry in this case. +** +** Otherwise, if the realloc() fails, *pIdx is set to -1, *pnEntry remains +** unchanged and a copy of pArray returned. +*/ +SQLITE_PRIVATE void *sqlite3ArrayAllocate( + sqlite3 *db, /* Connection to notify of malloc failures */ + void *pArray, /* Array of objects. Might be reallocated */ + int szEntry, /* Size of each object in the array */ + int *pnEntry, /* Number of objects currently in use */ + int *pIdx /* Write the index of a new slot here */ +){ + char *z; + int n = *pnEntry; + if( (n & (n-1))==0 ){ + int sz = (n==0) ? 1 : 2*n; + void *pNew = sqlite3DbRealloc(db, pArray, sz*szEntry); + if( pNew==0 ){ + *pIdx = -1; + return pArray; + } + pArray = pNew; + } + z = (char*)pArray; + memset(&z[n * szEntry], 0, szEntry); + *pIdx = n; + ++*pnEntry; + return pArray; +} + +/* +** Append a new element to the given IdList. Create a new IdList if +** need be. +** +** A new IdList is returned, or NULL if malloc() fails. +*/ +SQLITE_PRIVATE IdList *sqlite3IdListAppend(sqlite3 *db, IdList *pList, Token *pToken){ + int i; + if( pList==0 ){ + pList = sqlite3DbMallocZero(db, sizeof(IdList) ); + if( pList==0 ) return 0; + } + pList->a = sqlite3ArrayAllocate( + db, + pList->a, + sizeof(pList->a[0]), + &pList->nId, + &i + ); + if( i<0 ){ + sqlite3IdListDelete(db, pList); + return 0; + } + pList->a[i].zName = sqlite3NameFromToken(db, pToken); + return pList; +} + +/* +** Delete an IdList. +*/ +SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3 *db, IdList *pList){ + int i; + if( pList==0 ) return; + for(i=0; inId; i++){ + sqlite3DbFree(db, pList->a[i].zName); + } + sqlite3DbFree(db, pList->a); + sqlite3DbFree(db, pList); +} + +/* +** Return the index in pList of the identifier named zId. Return -1 +** if not found. +*/ +SQLITE_PRIVATE int sqlite3IdListIndex(IdList *pList, const char *zName){ + int i; + if( pList==0 ) return -1; + for(i=0; inId; i++){ + if( sqlite3StrICmp(pList->a[i].zName, zName)==0 ) return i; + } + return -1; +} + +/* +** Expand the space allocated for the given SrcList object by +** creating nExtra new slots beginning at iStart. iStart is zero based. +** New slots are zeroed. +** +** For example, suppose a SrcList initially contains two entries: A,B. +** To append 3 new entries onto the end, do this: +** +** sqlite3SrcListEnlarge(db, pSrclist, 3, 2); +** +** After the call above it would contain: A, B, nil, nil, nil. +** If the iStart argument had been 1 instead of 2, then the result +** would have been: A, nil, nil, nil, B. To prepend the new slots, +** the iStart value would be 0. The result then would +** be: nil, nil, nil, A, B. +** +** If a memory allocation fails the SrcList is unchanged. The +** db->mallocFailed flag will be set to true. +*/ +SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge( + sqlite3 *db, /* Database connection to notify of OOM errors */ + SrcList *pSrc, /* The SrcList to be enlarged */ + int nExtra, /* Number of new slots to add to pSrc->a[] */ + int iStart /* Index in pSrc->a[] of first new slot */ +){ + int i; + + /* Sanity checking on calling parameters */ + assert( iStart>=0 ); + assert( nExtra>=1 ); + assert( pSrc!=0 ); + assert( iStart<=pSrc->nSrc ); + + /* Allocate additional space if needed */ + if( (u32)pSrc->nSrc+nExtra>pSrc->nAlloc ){ + SrcList *pNew; + int nAlloc = pSrc->nSrc*2+nExtra; + int nGot; + pNew = sqlite3DbRealloc(db, pSrc, + sizeof(*pSrc) + (nAlloc-1)*sizeof(pSrc->a[0]) ); + if( pNew==0 ){ + assert( db->mallocFailed ); + return pSrc; + } + pSrc = pNew; + nGot = (sqlite3DbMallocSize(db, pNew) - sizeof(*pSrc))/sizeof(pSrc->a[0])+1; + pSrc->nAlloc = nGot; + } + + /* Move existing slots that come after the newly inserted slots + ** out of the way */ + for(i=pSrc->nSrc-1; i>=iStart; i--){ + pSrc->a[i+nExtra] = pSrc->a[i]; + } + pSrc->nSrc += nExtra; + + /* Zero the newly allocated slots */ + memset(&pSrc->a[iStart], 0, sizeof(pSrc->a[0])*nExtra); + for(i=iStart; ia[i].iCursor = -1; + } + + /* Return a pointer to the enlarged SrcList */ + return pSrc; +} + + +/* +** Append a new table name to the given SrcList. Create a new SrcList if +** need be. A new entry is created in the SrcList even if pTable is NULL. +** +** A SrcList is returned, or NULL if there is an OOM error. The returned +** SrcList might be the same as the SrcList that was input or it might be +** a new one. If an OOM error does occurs, then the prior value of pList +** that is input to this routine is automatically freed. +** +** If pDatabase is not null, it means that the table has an optional +** database name prefix. Like this: "database.table". The pDatabase +** points to the table name and the pTable points to the database name. +** The SrcList.a[].zName field is filled with the table name which might +** come from pTable (if pDatabase is NULL) or from pDatabase. +** SrcList.a[].zDatabase is filled with the database name from pTable, +** or with NULL if no database is specified. +** +** In other words, if call like this: +** +** sqlite3SrcListAppend(D,A,B,0); +** +** Then B is a table name and the database name is unspecified. If called +** like this: +** +** sqlite3SrcListAppend(D,A,B,C); +** +** Then C is the table name and B is the database name. If C is defined +** then so is B. In other words, we never have a case where: +** +** sqlite3SrcListAppend(D,A,0,C); +** +** Both pTable and pDatabase are assumed to be quoted. They are dequoted +** before being added to the SrcList. +*/ +SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( + sqlite3 *db, /* Connection to notify of malloc failures */ + SrcList *pList, /* Append to this SrcList. NULL creates a new SrcList */ + Token *pTable, /* Table to append */ + Token *pDatabase /* Database of the table */ +){ + struct SrcList_item *pItem; + assert( pDatabase==0 || pTable!=0 ); /* Cannot have C without B */ + assert( db!=0 ); + if( pList==0 ){ + pList = sqlite3DbMallocRawNN(db, sizeof(SrcList) ); + if( pList==0 ) return 0; + pList->nAlloc = 1; + pList->nSrc = 1; + memset(&pList->a[0], 0, sizeof(pList->a[0])); + pList->a[0].iCursor = -1; + }else{ + pList = sqlite3SrcListEnlarge(db, pList, 1, pList->nSrc); + } + if( db->mallocFailed ){ + sqlite3SrcListDelete(db, pList); + return 0; + } + pItem = &pList->a[pList->nSrc-1]; + if( pDatabase && pDatabase->z==0 ){ + pDatabase = 0; + } + if( pDatabase ){ + Token *pTemp = pDatabase; + pDatabase = pTable; + pTable = pTemp; + } + pItem->zName = sqlite3NameFromToken(db, pTable); + pItem->zDatabase = sqlite3NameFromToken(db, pDatabase); + return pList; +} + +/* +** Assign VdbeCursor index numbers to all tables in a SrcList +*/ +SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ + int i; + struct SrcList_item *pItem; + assert(pList || pParse->db->mallocFailed ); + if( pList ){ + for(i=0, pItem=pList->a; inSrc; i++, pItem++){ + if( pItem->iCursor>=0 ) break; + pItem->iCursor = pParse->nTab++; + if( pItem->pSelect ){ + sqlite3SrcListAssignCursors(pParse, pItem->pSelect->pSrc); + } + } + } +} + +/* +** Delete an entire SrcList including all its substructure. +*/ +SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ + int i; + struct SrcList_item *pItem; + if( pList==0 ) return; + for(pItem=pList->a, i=0; inSrc; i++, pItem++){ + sqlite3DbFree(db, pItem->zDatabase); + sqlite3DbFree(db, pItem->zName); + sqlite3DbFree(db, pItem->zAlias); + if( pItem->fg.isIndexedBy ) sqlite3DbFree(db, pItem->u1.zIndexedBy); + if( pItem->fg.isTabFunc ) sqlite3ExprListDelete(db, pItem->u1.pFuncArg); + sqlite3DeleteTable(db, pItem->pTab); + sqlite3SelectDelete(db, pItem->pSelect); + sqlite3ExprDelete(db, pItem->pOn); + sqlite3IdListDelete(db, pItem->pUsing); + } + sqlite3DbFree(db, pList); +} + +/* +** This routine is called by the parser to add a new term to the +** end of a growing FROM clause. The "p" parameter is the part of +** the FROM clause that has already been constructed. "p" is NULL +** if this is the first term of the FROM clause. pTable and pDatabase +** are the name of the table and database named in the FROM clause term. +** pDatabase is NULL if the database name qualifier is missing - the +** usual case. If the term has an alias, then pAlias points to the +** alias token. If the term is a subquery, then pSubquery is the +** SELECT statement that the subquery encodes. The pTable and +** pDatabase parameters are NULL for subqueries. The pOn and pUsing +** parameters are the content of the ON and USING clauses. +** +** Return a new SrcList which encodes is the FROM with the new +** term added. +*/ +SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm( + Parse *pParse, /* Parsing context */ + SrcList *p, /* The left part of the FROM clause already seen */ + Token *pTable, /* Name of the table to add to the FROM clause */ + Token *pDatabase, /* Name of the database containing pTable */ + Token *pAlias, /* The right-hand side of the AS subexpression */ + Select *pSubquery, /* A subquery used in place of a table name */ + Expr *pOn, /* The ON clause of a join */ + IdList *pUsing /* The USING clause of a join */ +){ + struct SrcList_item *pItem; + sqlite3 *db = pParse->db; + if( !p && (pOn || pUsing) ){ + sqlite3ErrorMsg(pParse, "a JOIN clause is required before %s", + (pOn ? "ON" : "USING") + ); + goto append_from_error; + } + p = sqlite3SrcListAppend(db, p, pTable, pDatabase); + if( p==0 || NEVER(p->nSrc==0) ){ + goto append_from_error; + } + pItem = &p->a[p->nSrc-1]; + assert( pAlias!=0 ); + if( pAlias->n ){ + pItem->zAlias = sqlite3NameFromToken(db, pAlias); + } + pItem->pSelect = pSubquery; + pItem->pOn = pOn; + pItem->pUsing = pUsing; + return p; + + append_from_error: + assert( p==0 ); + sqlite3ExprDelete(db, pOn); + sqlite3IdListDelete(db, pUsing); + sqlite3SelectDelete(db, pSubquery); + return 0; +} + +/* +** Add an INDEXED BY or NOT INDEXED clause to the most recently added +** element of the source-list passed as the second argument. +*/ +SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *pParse, SrcList *p, Token *pIndexedBy){ + assert( pIndexedBy!=0 ); + if( p && ALWAYS(p->nSrc>0) ){ + struct SrcList_item *pItem = &p->a[p->nSrc-1]; + assert( pItem->fg.notIndexed==0 ); + assert( pItem->fg.isIndexedBy==0 ); + assert( pItem->fg.isTabFunc==0 ); + if( pIndexedBy->n==1 && !pIndexedBy->z ){ + /* A "NOT INDEXED" clause was supplied. See parse.y + ** construct "indexed_opt" for details. */ + pItem->fg.notIndexed = 1; + }else{ + pItem->u1.zIndexedBy = sqlite3NameFromToken(pParse->db, pIndexedBy); + pItem->fg.isIndexedBy = (pItem->u1.zIndexedBy!=0); + } + } +} + +/* +** Add the list of function arguments to the SrcList entry for a +** table-valued-function. +*/ +SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse *pParse, SrcList *p, ExprList *pList){ + if( p ){ + struct SrcList_item *pItem = &p->a[p->nSrc-1]; + assert( pItem->fg.notIndexed==0 ); + assert( pItem->fg.isIndexedBy==0 ); + assert( pItem->fg.isTabFunc==0 ); + pItem->u1.pFuncArg = pList; + pItem->fg.isTabFunc = 1; + }else{ + sqlite3ExprListDelete(pParse->db, pList); + } +} + +/* +** When building up a FROM clause in the parser, the join operator +** is initially attached to the left operand. But the code generator +** expects the join operator to be on the right operand. This routine +** Shifts all join operators from left to right for an entire FROM +** clause. +** +** Example: Suppose the join is like this: +** +** A natural cross join B +** +** The operator is "natural cross join". The A and B operands are stored +** in p->a[0] and p->a[1], respectively. The parser initially stores the +** operator with A. This routine shifts that operator over to B. +*/ +SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList *p){ + if( p ){ + int i; + for(i=p->nSrc-1; i>0; i--){ + p->a[i].fg.jointype = p->a[i-1].fg.jointype; + } + p->a[0].fg.jointype = 0; + } +} + +/* +** Generate VDBE code for a BEGIN statement. +*/ +SQLITE_PRIVATE void sqlite3BeginTransaction(Parse *pParse, int type){ + sqlite3 *db; + Vdbe *v; + int i; + + assert( pParse!=0 ); + db = pParse->db; + assert( db!=0 ); + if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "BEGIN", 0, 0) ){ + return; + } + v = sqlite3GetVdbe(pParse); + if( !v ) return; + if( type!=TK_DEFERRED ){ + for(i=0; inDb; i++){ + sqlite3VdbeAddOp2(v, OP_Transaction, i, (type==TK_EXCLUSIVE)+1); + sqlite3VdbeUsesBtree(v, i); + } + } + sqlite3VdbeAddOp0(v, OP_AutoCommit); +} + +/* +** Generate VDBE code for a COMMIT statement. +*/ +SQLITE_PRIVATE void sqlite3CommitTransaction(Parse *pParse){ + Vdbe *v; + + assert( pParse!=0 ); + assert( pParse->db!=0 ); + if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "COMMIT", 0, 0) ){ + return; + } + v = sqlite3GetVdbe(pParse); + if( v ){ + sqlite3VdbeAddOp1(v, OP_AutoCommit, 1); + } +} + +/* +** Generate VDBE code for a ROLLBACK statement. +*/ +SQLITE_PRIVATE void sqlite3RollbackTransaction(Parse *pParse){ + Vdbe *v; + + assert( pParse!=0 ); + assert( pParse->db!=0 ); + if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "ROLLBACK", 0, 0) ){ + return; + } + v = sqlite3GetVdbe(pParse); + if( v ){ + sqlite3VdbeAddOp2(v, OP_AutoCommit, 1, 1); + } +} + +/* +** This function is called by the parser when it parses a command to create, +** release or rollback an SQL savepoint. +*/ +SQLITE_PRIVATE void sqlite3Savepoint(Parse *pParse, int op, Token *pName){ + char *zName = sqlite3NameFromToken(pParse->db, pName); + if( zName ){ + Vdbe *v = sqlite3GetVdbe(pParse); +#ifndef SQLITE_OMIT_AUTHORIZATION + static const char * const az[] = { "BEGIN", "RELEASE", "ROLLBACK" }; + assert( !SAVEPOINT_BEGIN && SAVEPOINT_RELEASE==1 && SAVEPOINT_ROLLBACK==2 ); +#endif + if( !v || sqlite3AuthCheck(pParse, SQLITE_SAVEPOINT, az[op], zName, 0) ){ + sqlite3DbFree(pParse->db, zName); + return; + } + sqlite3VdbeAddOp4(v, OP_Savepoint, op, 0, 0, zName, P4_DYNAMIC); + } +} + +/* +** Make sure the TEMP database is open and available for use. Return +** the number of errors. Leave any error messages in the pParse structure. +*/ +SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *pParse){ + sqlite3 *db = pParse->db; + if( db->aDb[1].pBt==0 && !pParse->explain ){ + int rc; + Btree *pBt; + static const int flags = + SQLITE_OPEN_READWRITE | + SQLITE_OPEN_CREATE | + SQLITE_OPEN_EXCLUSIVE | + SQLITE_OPEN_DELETEONCLOSE | + SQLITE_OPEN_TEMP_DB; + + rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pBt, 0, flags); + if( rc!=SQLITE_OK ){ + sqlite3ErrorMsg(pParse, "unable to open a temporary database " + "file for storing temporary tables"); + pParse->rc = rc; + return 1; + } + db->aDb[1].pBt = pBt; + assert( db->aDb[1].pSchema ); + if( SQLITE_NOMEM==sqlite3BtreeSetPageSize(pBt, db->nextPagesize, -1, 0) ){ + sqlite3OomFault(db); + return 1; + } + } + return 0; +} + +/* +** Record the fact that the schema cookie will need to be verified +** for database iDb. The code to actually verify the schema cookie +** will occur at the end of the top-level VDBE and will be generated +** later, by sqlite3FinishCoding(). +*/ +SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse *pParse, int iDb){ + Parse *pToplevel = sqlite3ParseToplevel(pParse); + + assert( iDb>=0 && iDbdb->nDb ); + assert( pParse->db->aDb[iDb].pBt!=0 || iDb==1 ); + assert( iDbdb, iDb, 0) ); + if( DbMaskTest(pToplevel->cookieMask, iDb)==0 ){ + DbMaskSet(pToplevel->cookieMask, iDb); + if( !OMIT_TEMPDB && iDb==1 ){ + sqlite3OpenTempDatabase(pToplevel); + } + } +} + +/* +** If argument zDb is NULL, then call sqlite3CodeVerifySchema() for each +** attached database. Otherwise, invoke it for the database named zDb only. +*/ +SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse *pParse, const char *zDb){ + sqlite3 *db = pParse->db; + int i; + for(i=0; inDb; i++){ + Db *pDb = &db->aDb[i]; + if( pDb->pBt && (!zDb || 0==sqlite3StrICmp(zDb, pDb->zDbSName)) ){ + sqlite3CodeVerifySchema(pParse, i); + } + } +} + +/* +** Generate VDBE code that prepares for doing an operation that +** might change the database. +** +** This routine starts a new transaction if we are not already within +** a transaction. If we are already within a transaction, then a checkpoint +** is set if the setStatement parameter is true. A checkpoint should +** be set for operations that might fail (due to a constraint) part of +** the way through and which will need to undo some writes without having to +** rollback the whole transaction. For operations where all constraints +** can be checked before any changes are made to the database, it is never +** necessary to undo a write and the checkpoint should not be set. +*/ +SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse *pParse, int setStatement, int iDb){ + Parse *pToplevel = sqlite3ParseToplevel(pParse); + sqlite3CodeVerifySchema(pParse, iDb); + DbMaskSet(pToplevel->writeMask, iDb); + pToplevel->isMultiWrite |= setStatement; +} + +/* +** Indicate that the statement currently under construction might write +** more than one entry (example: deleting one row then inserting another, +** inserting multiple rows in a table, or inserting a row and index entries.) +** If an abort occurs after some of these writes have completed, then it will +** be necessary to undo the completed writes. +*/ +SQLITE_PRIVATE void sqlite3MultiWrite(Parse *pParse){ + Parse *pToplevel = sqlite3ParseToplevel(pParse); + pToplevel->isMultiWrite = 1; +} + +/* +** The code generator calls this routine if is discovers that it is +** possible to abort a statement prior to completion. In order to +** perform this abort without corrupting the database, we need to make +** sure that the statement is protected by a statement transaction. +** +** Technically, we only need to set the mayAbort flag if the +** isMultiWrite flag was previously set. There is a time dependency +** such that the abort must occur after the multiwrite. This makes +** some statements involving the REPLACE conflict resolution algorithm +** go a little faster. But taking advantage of this time dependency +** makes it more difficult to prove that the code is correct (in +** particular, it prevents us from writing an effective +** implementation of sqlite3AssertMayAbort()) and so we have chosen +** to take the safe route and skip the optimization. +*/ +SQLITE_PRIVATE void sqlite3MayAbort(Parse *pParse){ + Parse *pToplevel = sqlite3ParseToplevel(pParse); + pToplevel->mayAbort = 1; +} + +/* +** Code an OP_Halt that causes the vdbe to return an SQLITE_CONSTRAINT +** error. The onError parameter determines which (if any) of the statement +** and/or current transaction is rolled back. +*/ +SQLITE_PRIVATE void sqlite3HaltConstraint( + Parse *pParse, /* Parsing context */ + int errCode, /* extended error code */ + int onError, /* Constraint type */ + char *p4, /* Error message */ + i8 p4type, /* P4_STATIC or P4_TRANSIENT */ + u8 p5Errmsg /* P5_ErrMsg type */ +){ + Vdbe *v = sqlite3GetVdbe(pParse); + assert( (errCode&0xff)==SQLITE_CONSTRAINT ); + if( onError==OE_Abort ){ + sqlite3MayAbort(pParse); + } + sqlite3VdbeAddOp4(v, OP_Halt, errCode, onError, 0, p4, p4type); + sqlite3VdbeChangeP5(v, p5Errmsg); +} + +/* +** Code an OP_Halt due to UNIQUE or PRIMARY KEY constraint violation. +*/ +SQLITE_PRIVATE void sqlite3UniqueConstraint( + Parse *pParse, /* Parsing context */ + int onError, /* Constraint type */ + Index *pIdx /* The index that triggers the constraint */ +){ + char *zErr; + int j; + StrAccum errMsg; + Table *pTab = pIdx->pTable; + + sqlite3StrAccumInit(&errMsg, pParse->db, 0, 0, 200); + if( pIdx->aColExpr ){ + sqlite3XPrintf(&errMsg, "index '%q'", pIdx->zName); + }else{ + for(j=0; jnKeyCol; j++){ + char *zCol; + assert( pIdx->aiColumn[j]>=0 ); + zCol = pTab->aCol[pIdx->aiColumn[j]].zName; + if( j ) sqlite3StrAccumAppend(&errMsg, ", ", 2); + sqlite3XPrintf(&errMsg, "%s.%s", pTab->zName, zCol); + } + } + zErr = sqlite3StrAccumFinish(&errMsg); + sqlite3HaltConstraint(pParse, + IsPrimaryKeyIndex(pIdx) ? SQLITE_CONSTRAINT_PRIMARYKEY + : SQLITE_CONSTRAINT_UNIQUE, + onError, zErr, P4_DYNAMIC, P5_ConstraintUnique); +} + + +/* +** Code an OP_Halt due to non-unique rowid. +*/ +SQLITE_PRIVATE void sqlite3RowidConstraint( + Parse *pParse, /* Parsing context */ + int onError, /* Conflict resolution algorithm */ + Table *pTab /* The table with the non-unique rowid */ +){ + char *zMsg; + int rc; + if( pTab->iPKey>=0 ){ + zMsg = sqlite3MPrintf(pParse->db, "%s.%s", pTab->zName, + pTab->aCol[pTab->iPKey].zName); + rc = SQLITE_CONSTRAINT_PRIMARYKEY; + }else{ + zMsg = sqlite3MPrintf(pParse->db, "%s.rowid", pTab->zName); + rc = SQLITE_CONSTRAINT_ROWID; + } + sqlite3HaltConstraint(pParse, rc, onError, zMsg, P4_DYNAMIC, + P5_ConstraintUnique); +} + +/* +** Check to see if pIndex uses the collating sequence pColl. Return +** true if it does and false if it does not. +*/ +#ifndef SQLITE_OMIT_REINDEX +static int collationMatch(const char *zColl, Index *pIndex){ + int i; + assert( zColl!=0 ); + for(i=0; inColumn; i++){ + const char *z = pIndex->azColl[i]; + assert( z!=0 || pIndex->aiColumn[i]<0 ); + if( pIndex->aiColumn[i]>=0 && 0==sqlite3StrICmp(z, zColl) ){ + return 1; + } + } + return 0; +} +#endif + +/* +** Recompute all indices of pTab that use the collating sequence pColl. +** If pColl==0 then recompute all indices of pTab. +*/ +#ifndef SQLITE_OMIT_REINDEX +static void reindexTable(Parse *pParse, Table *pTab, char const *zColl){ + Index *pIndex; /* An index associated with pTab */ + + for(pIndex=pTab->pIndex; pIndex; pIndex=pIndex->pNext){ + if( zColl==0 || collationMatch(zColl, pIndex) ){ + int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + sqlite3BeginWriteOperation(pParse, 0, iDb); + sqlite3RefillIndex(pParse, pIndex, -1); + } + } +} +#endif + +/* +** Recompute all indices of all tables in all databases where the +** indices use the collating sequence pColl. If pColl==0 then recompute +** all indices everywhere. +*/ +#ifndef SQLITE_OMIT_REINDEX +static void reindexDatabases(Parse *pParse, char const *zColl){ + Db *pDb; /* A single database */ + int iDb; /* The database index number */ + sqlite3 *db = pParse->db; /* The database connection */ + HashElem *k; /* For looping over tables in pDb */ + Table *pTab; /* A table in the database */ + + assert( sqlite3BtreeHoldsAllMutexes(db) ); /* Needed for schema access */ + for(iDb=0, pDb=db->aDb; iDbnDb; iDb++, pDb++){ + assert( pDb!=0 ); + for(k=sqliteHashFirst(&pDb->pSchema->tblHash); k; k=sqliteHashNext(k)){ + pTab = (Table*)sqliteHashData(k); + reindexTable(pParse, pTab, zColl); + } + } +} +#endif + +/* +** Generate code for the REINDEX command. +** +** REINDEX -- 1 +** REINDEX -- 2 +** REINDEX ?.? -- 3 +** REINDEX ?.? -- 4 +** +** Form 1 causes all indices in all attached databases to be rebuilt. +** Form 2 rebuilds all indices in all databases that use the named +** collating function. Forms 3 and 4 rebuild the named index or all +** indices associated with the named table. +*/ +#ifndef SQLITE_OMIT_REINDEX +SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){ + CollSeq *pColl; /* Collating sequence to be reindexed, or NULL */ + char *z; /* Name of a table or index */ + const char *zDb; /* Name of the database */ + Table *pTab; /* A table in the database */ + Index *pIndex; /* An index associated with pTab */ + int iDb; /* The database index number */ + sqlite3 *db = pParse->db; /* The database connection */ + Token *pObjName; /* Name of the table or index to be reindexed */ + + /* Read the database schema. If an error occurs, leave an error message + ** and code in pParse and return NULL. */ + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + return; + } + + if( pName1==0 ){ + reindexDatabases(pParse, 0); + return; + }else if( NEVER(pName2==0) || pName2->z==0 ){ + char *zColl; + assert( pName1->z ); + zColl = sqlite3NameFromToken(pParse->db, pName1); + if( !zColl ) return; + pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); + if( pColl ){ + reindexDatabases(pParse, zColl); + sqlite3DbFree(db, zColl); + return; + } + sqlite3DbFree(db, zColl); + } + iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pObjName); + if( iDb<0 ) return; + z = sqlite3NameFromToken(db, pObjName); + if( z==0 ) return; + zDb = db->aDb[iDb].zDbSName; + pTab = sqlite3FindTable(db, z, zDb); + if( pTab ){ + reindexTable(pParse, pTab, 0); + sqlite3DbFree(db, z); + return; + } + pIndex = sqlite3FindIndex(db, z, zDb); + sqlite3DbFree(db, z); + if( pIndex ){ + sqlite3BeginWriteOperation(pParse, 0, iDb); + sqlite3RefillIndex(pParse, pIndex, -1); + return; + } + sqlite3ErrorMsg(pParse, "unable to identify the object to be reindexed"); +} +#endif + +/* +** Return a KeyInfo structure that is appropriate for the given Index. +** +** The caller should invoke sqlite3KeyInfoUnref() on the returned object +** when it has finished using it. +*/ +SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse *pParse, Index *pIdx){ + int i; + int nCol = pIdx->nColumn; + int nKey = pIdx->nKeyCol; + KeyInfo *pKey; + if( pParse->nErr ) return 0; + if( pIdx->uniqNotNull ){ + pKey = sqlite3KeyInfoAlloc(pParse->db, nKey, nCol-nKey); + }else{ + pKey = sqlite3KeyInfoAlloc(pParse->db, nCol, 0); + } + if( pKey ){ + assert( sqlite3KeyInfoIsWriteable(pKey) ); + for(i=0; iazColl[i]; + pKey->aColl[i] = zColl==sqlite3StrBINARY ? 0 : + sqlite3LocateCollSeq(pParse, zColl); + pKey->aSortOrder[i] = pIdx->aSortOrder[i]; + } + if( pParse->nErr ){ + sqlite3KeyInfoUnref(pKey); + pKey = 0; + } + } + return pKey; +} + +#ifndef SQLITE_OMIT_CTE +/* +** This routine is invoked once per CTE by the parser while parsing a +** WITH clause. +*/ +SQLITE_PRIVATE With *sqlite3WithAdd( + Parse *pParse, /* Parsing context */ + With *pWith, /* Existing WITH clause, or NULL */ + Token *pName, /* Name of the common-table */ + ExprList *pArglist, /* Optional column name list for the table */ + Select *pQuery /* Query used to initialize the table */ +){ + sqlite3 *db = pParse->db; + With *pNew; + char *zName; + + /* Check that the CTE name is unique within this WITH clause. If + ** not, store an error in the Parse structure. */ + zName = sqlite3NameFromToken(pParse->db, pName); + if( zName && pWith ){ + int i; + for(i=0; inCte; i++){ + if( sqlite3StrICmp(zName, pWith->a[i].zName)==0 ){ + sqlite3ErrorMsg(pParse, "duplicate WITH table name: %s", zName); + } + } + } + + if( pWith ){ + int nByte = sizeof(*pWith) + (sizeof(pWith->a[1]) * pWith->nCte); + pNew = sqlite3DbRealloc(db, pWith, nByte); + }else{ + pNew = sqlite3DbMallocZero(db, sizeof(*pWith)); + } + assert( (pNew!=0 && zName!=0) || db->mallocFailed ); + + if( db->mallocFailed ){ + sqlite3ExprListDelete(db, pArglist); + sqlite3SelectDelete(db, pQuery); + sqlite3DbFree(db, zName); + pNew = pWith; + }else{ + pNew->a[pNew->nCte].pSelect = pQuery; + pNew->a[pNew->nCte].pCols = pArglist; + pNew->a[pNew->nCte].zName = zName; + pNew->a[pNew->nCte].zCteErr = 0; + pNew->nCte++; + } + + return pNew; +} + +/* +** Free the contents of the With object passed as the second argument. +*/ +SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){ + if( pWith ){ + int i; + for(i=0; inCte; i++){ + struct Cte *pCte = &pWith->a[i]; + sqlite3ExprListDelete(db, pCte->pCols); + sqlite3SelectDelete(db, pCte->pSelect); + sqlite3DbFree(db, pCte->zName); + } + sqlite3DbFree(db, pWith); + } +} +#endif /* !defined(SQLITE_OMIT_CTE) */ + +/************** End of build.c ***********************************************/ +/************** Begin file callback.c ****************************************/ +/* +** 2005 May 23 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains functions used to access the internal hash tables +** of user defined functions and collation sequences. +*/ + +/* #include "sqliteInt.h" */ + +/* +** Invoke the 'collation needed' callback to request a collation sequence +** in the encoding enc of name zName, length nName. +*/ +static void callCollNeeded(sqlite3 *db, int enc, const char *zName){ + assert( !db->xCollNeeded || !db->xCollNeeded16 ); + if( db->xCollNeeded ){ + char *zExternal = sqlite3DbStrDup(db, zName); + if( !zExternal ) return; + db->xCollNeeded(db->pCollNeededArg, db, enc, zExternal); + sqlite3DbFree(db, zExternal); + } +#ifndef SQLITE_OMIT_UTF16 + if( db->xCollNeeded16 ){ + char const *zExternal; + sqlite3_value *pTmp = sqlite3ValueNew(db); + sqlite3ValueSetStr(pTmp, -1, zName, SQLITE_UTF8, SQLITE_STATIC); + zExternal = sqlite3ValueText(pTmp, SQLITE_UTF16NATIVE); + if( zExternal ){ + db->xCollNeeded16(db->pCollNeededArg, db, (int)ENC(db), zExternal); + } + sqlite3ValueFree(pTmp); + } +#endif +} + +/* +** This routine is called if the collation factory fails to deliver a +** collation function in the best encoding but there may be other versions +** of this collation function (for other text encodings) available. Use one +** of these instead if they exist. Avoid a UTF-8 <-> UTF-16 conversion if +** possible. +*/ +static int synthCollSeq(sqlite3 *db, CollSeq *pColl){ + CollSeq *pColl2; + char *z = pColl->zName; + int i; + static const u8 aEnc[] = { SQLITE_UTF16BE, SQLITE_UTF16LE, SQLITE_UTF8 }; + for(i=0; i<3; i++){ + pColl2 = sqlite3FindCollSeq(db, aEnc[i], z, 0); + if( pColl2->xCmp!=0 ){ + memcpy(pColl, pColl2, sizeof(CollSeq)); + pColl->xDel = 0; /* Do not copy the destructor */ + return SQLITE_OK; + } + } + return SQLITE_ERROR; +} + +/* +** This function is responsible for invoking the collation factory callback +** or substituting a collation sequence of a different encoding when the +** requested collation sequence is not available in the desired encoding. +** +** If it is not NULL, then pColl must point to the database native encoding +** collation sequence with name zName, length nName. +** +** The return value is either the collation sequence to be used in database +** db for collation type name zName, length nName, or NULL, if no collation +** sequence can be found. If no collation is found, leave an error message. +** +** See also: sqlite3LocateCollSeq(), sqlite3FindCollSeq() +*/ +SQLITE_PRIVATE CollSeq *sqlite3GetCollSeq( + Parse *pParse, /* Parsing context */ + u8 enc, /* The desired encoding for the collating sequence */ + CollSeq *pColl, /* Collating sequence with native encoding, or NULL */ + const char *zName /* Collating sequence name */ +){ + CollSeq *p; + sqlite3 *db = pParse->db; + + p = pColl; + if( !p ){ + p = sqlite3FindCollSeq(db, enc, zName, 0); + } + if( !p || !p->xCmp ){ + /* No collation sequence of this type for this encoding is registered. + ** Call the collation factory to see if it can supply us with one. + */ + callCollNeeded(db, enc, zName); + p = sqlite3FindCollSeq(db, enc, zName, 0); + } + if( p && !p->xCmp && synthCollSeq(db, p) ){ + p = 0; + } + assert( !p || p->xCmp ); + if( p==0 ){ + sqlite3ErrorMsg(pParse, "no such collation sequence: %s", zName); + } + return p; +} + +/* +** This routine is called on a collation sequence before it is used to +** check that it is defined. An undefined collation sequence exists when +** a database is loaded that contains references to collation sequences +** that have not been defined by sqlite3_create_collation() etc. +** +** If required, this routine calls the 'collation needed' callback to +** request a definition of the collating sequence. If this doesn't work, +** an equivalent collating sequence that uses a text encoding different +** from the main database is substituted, if one is available. +*/ +SQLITE_PRIVATE int sqlite3CheckCollSeq(Parse *pParse, CollSeq *pColl){ + if( pColl ){ + const char *zName = pColl->zName; + sqlite3 *db = pParse->db; + CollSeq *p = sqlite3GetCollSeq(pParse, ENC(db), pColl, zName); + if( !p ){ + return SQLITE_ERROR; + } + assert( p==pColl ); + } + return SQLITE_OK; +} + + + +/* +** Locate and return an entry from the db.aCollSeq hash table. If the entry +** specified by zName and nName is not found and parameter 'create' is +** true, then create a new entry. Otherwise return NULL. +** +** Each pointer stored in the sqlite3.aCollSeq hash table contains an +** array of three CollSeq structures. The first is the collation sequence +** preferred for UTF-8, the second UTF-16le, and the third UTF-16be. +** +** Stored immediately after the three collation sequences is a copy of +** the collation sequence name. A pointer to this string is stored in +** each collation sequence structure. +*/ +static CollSeq *findCollSeqEntry( + sqlite3 *db, /* Database connection */ + const char *zName, /* Name of the collating sequence */ + int create /* Create a new entry if true */ +){ + CollSeq *pColl; + pColl = sqlite3HashFind(&db->aCollSeq, zName); + + if( 0==pColl && create ){ + int nName = sqlite3Strlen30(zName); + pColl = sqlite3DbMallocZero(db, 3*sizeof(*pColl) + nName + 1); + if( pColl ){ + CollSeq *pDel = 0; + pColl[0].zName = (char*)&pColl[3]; + pColl[0].enc = SQLITE_UTF8; + pColl[1].zName = (char*)&pColl[3]; + pColl[1].enc = SQLITE_UTF16LE; + pColl[2].zName = (char*)&pColl[3]; + pColl[2].enc = SQLITE_UTF16BE; + memcpy(pColl[0].zName, zName, nName); + pColl[0].zName[nName] = 0; + pDel = sqlite3HashInsert(&db->aCollSeq, pColl[0].zName, pColl); + + /* If a malloc() failure occurred in sqlite3HashInsert(), it will + ** return the pColl pointer to be deleted (because it wasn't added + ** to the hash table). + */ + assert( pDel==0 || pDel==pColl ); + if( pDel!=0 ){ + sqlite3OomFault(db); + sqlite3DbFree(db, pDel); + pColl = 0; + } + } + } + return pColl; +} + +/* +** Parameter zName points to a UTF-8 encoded string nName bytes long. +** Return the CollSeq* pointer for the collation sequence named zName +** for the encoding 'enc' from the database 'db'. +** +** If the entry specified is not found and 'create' is true, then create a +** new entry. Otherwise return NULL. +** +** A separate function sqlite3LocateCollSeq() is a wrapper around +** this routine. sqlite3LocateCollSeq() invokes the collation factory +** if necessary and generates an error message if the collating sequence +** cannot be found. +** +** See also: sqlite3LocateCollSeq(), sqlite3GetCollSeq() +*/ +SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq( + sqlite3 *db, + u8 enc, + const char *zName, + int create +){ + CollSeq *pColl; + if( zName ){ + pColl = findCollSeqEntry(db, zName, create); + }else{ + pColl = db->pDfltColl; + } + assert( SQLITE_UTF8==1 && SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); + assert( enc>=SQLITE_UTF8 && enc<=SQLITE_UTF16BE ); + if( pColl ) pColl += enc-1; + return pColl; +} + +/* During the search for the best function definition, this procedure +** is called to test how well the function passed as the first argument +** matches the request for a function with nArg arguments in a system +** that uses encoding enc. The value returned indicates how well the +** request is matched. A higher value indicates a better match. +** +** If nArg is -1 that means to only return a match (non-zero) if p->nArg +** is also -1. In other words, we are searching for a function that +** takes a variable number of arguments. +** +** If nArg is -2 that means that we are searching for any function +** regardless of the number of arguments it uses, so return a positive +** match score for any +** +** The returned value is always between 0 and 6, as follows: +** +** 0: Not a match. +** 1: UTF8/16 conversion required and function takes any number of arguments. +** 2: UTF16 byte order change required and function takes any number of args. +** 3: encoding matches and function takes any number of arguments +** 4: UTF8/16 conversion required - argument count matches exactly +** 5: UTF16 byte order conversion required - argument count matches exactly +** 6: Perfect match: encoding and argument count match exactly. +** +** If nArg==(-2) then any function with a non-null xSFunc is +** a perfect match and any function with xSFunc NULL is +** a non-match. +*/ +#define FUNC_PERFECT_MATCH 6 /* The score for a perfect match */ +static int matchQuality( + FuncDef *p, /* The function we are evaluating for match quality */ + int nArg, /* Desired number of arguments. (-1)==any */ + u8 enc /* Desired text encoding */ +){ + int match; + + /* nArg of -2 is a special case */ + if( nArg==(-2) ) return (p->xSFunc==0) ? 0 : FUNC_PERFECT_MATCH; + + /* Wrong number of arguments means "no match" */ + if( p->nArg!=nArg && p->nArg>=0 ) return 0; + + /* Give a better score to a function with a specific number of arguments + ** than to function that accepts any number of arguments. */ + if( p->nArg==nArg ){ + match = 4; + }else{ + match = 1; + } + + /* Bonus points if the text encoding matches */ + if( enc==(p->funcFlags & SQLITE_FUNC_ENCMASK) ){ + match += 2; /* Exact encoding match */ + }else if( (enc & p->funcFlags & 2)!=0 ){ + match += 1; /* Both are UTF16, but with different byte orders */ + } + + return match; +} + +/* +** Search a FuncDefHash for a function with the given name. Return +** a pointer to the matching FuncDef if found, or 0 if there is no match. +*/ +static FuncDef *functionSearch( + int h, /* Hash of the name */ + const char *zFunc /* Name of function */ +){ + FuncDef *p; + for(p=sqlite3BuiltinFunctions.a[h]; p; p=p->u.pHash){ + if( sqlite3StrICmp(p->zName, zFunc)==0 ){ + return p; + } + } + return 0; +} + +/* +** Insert a new FuncDef into a FuncDefHash hash table. +*/ +SQLITE_PRIVATE void sqlite3InsertBuiltinFuncs( + FuncDef *aDef, /* List of global functions to be inserted */ + int nDef /* Length of the apDef[] list */ +){ + int i; + for(i=0; ipNext!=&aDef[i] ); + aDef[i].pNext = pOther->pNext; + pOther->pNext = &aDef[i]; + }else{ + aDef[i].pNext = 0; + aDef[i].u.pHash = sqlite3BuiltinFunctions.a[h]; + sqlite3BuiltinFunctions.a[h] = &aDef[i]; + } + } +} + + + +/* +** Locate a user function given a name, a number of arguments and a flag +** indicating whether the function prefers UTF-16 over UTF-8. Return a +** pointer to the FuncDef structure that defines that function, or return +** NULL if the function does not exist. +** +** If the createFlag argument is true, then a new (blank) FuncDef +** structure is created and liked into the "db" structure if a +** no matching function previously existed. +** +** If nArg is -2, then the first valid function found is returned. A +** function is valid if xSFunc is non-zero. The nArg==(-2) +** case is used to see if zName is a valid function name for some number +** of arguments. If nArg is -2, then createFlag must be 0. +** +** If createFlag is false, then a function with the required name and +** number of arguments may be returned even if the eTextRep flag does not +** match that requested. +*/ +SQLITE_PRIVATE FuncDef *sqlite3FindFunction( + sqlite3 *db, /* An open database */ + const char *zName, /* Name of the function. zero-terminated */ + int nArg, /* Number of arguments. -1 means any number */ + u8 enc, /* Preferred text encoding */ + u8 createFlag /* Create new entry if true and does not otherwise exist */ +){ + FuncDef *p; /* Iterator variable */ + FuncDef *pBest = 0; /* Best match found so far */ + int bestScore = 0; /* Score of best match */ + int h; /* Hash value */ + int nName; /* Length of the name */ + + assert( nArg>=(-2) ); + assert( nArg>=(-1) || createFlag==0 ); + nName = sqlite3Strlen30(zName); + + /* First search for a match amongst the application-defined functions. + */ + p = (FuncDef*)sqlite3HashFind(&db->aFunc, zName); + while( p ){ + int score = matchQuality(p, nArg, enc); + if( score>bestScore ){ + pBest = p; + bestScore = score; + } + p = p->pNext; + } + + /* If no match is found, search the built-in functions. + ** + ** If the SQLITE_PreferBuiltin flag is set, then search the built-in + ** functions even if a prior app-defined function was found. And give + ** priority to built-in functions. + ** + ** Except, if createFlag is true, that means that we are trying to + ** install a new function. Whatever FuncDef structure is returned it will + ** have fields overwritten with new information appropriate for the + ** new function. But the FuncDefs for built-in functions are read-only. + ** So we must not search for built-ins when creating a new function. + */ + if( !createFlag && (pBest==0 || (db->flags & SQLITE_PreferBuiltin)!=0) ){ + bestScore = 0; + h = (sqlite3UpperToLower[(u8)zName[0]] + nName) % SQLITE_FUNC_HASH_SZ; + p = functionSearch(h, zName); + while( p ){ + int score = matchQuality(p, nArg, enc); + if( score>bestScore ){ + pBest = p; + bestScore = score; + } + p = p->pNext; + } + } + + /* If the createFlag parameter is true and the search did not reveal an + ** exact match for the name, number of arguments and encoding, then add a + ** new entry to the hash table and return it. + */ + if( createFlag && bestScorezName = (const char*)&pBest[1]; + pBest->nArg = (u16)nArg; + pBest->funcFlags = enc; + memcpy((char*)&pBest[1], zName, nName+1); + pOther = (FuncDef*)sqlite3HashInsert(&db->aFunc, pBest->zName, pBest); + if( pOther==pBest ){ + sqlite3DbFree(db, pBest); + sqlite3OomFault(db); + return 0; + }else{ + pBest->pNext = pOther; + } + } + + if( pBest && (pBest->xSFunc || createFlag) ){ + return pBest; + } + return 0; +} + +/* +** Free all resources held by the schema structure. The void* argument points +** at a Schema struct. This function does not call sqlite3DbFree(db, ) on the +** pointer itself, it just cleans up subsidiary resources (i.e. the contents +** of the schema hash tables). +** +** The Schema.cache_size variable is not cleared. +*/ +SQLITE_PRIVATE void sqlite3SchemaClear(void *p){ + Hash temp1; + Hash temp2; + HashElem *pElem; + Schema *pSchema = (Schema *)p; + + temp1 = pSchema->tblHash; + temp2 = pSchema->trigHash; + sqlite3HashInit(&pSchema->trigHash); + sqlite3HashClear(&pSchema->idxHash); + for(pElem=sqliteHashFirst(&temp2); pElem; pElem=sqliteHashNext(pElem)){ + sqlite3DeleteTrigger(0, (Trigger*)sqliteHashData(pElem)); + } + sqlite3HashClear(&temp2); + sqlite3HashInit(&pSchema->tblHash); + for(pElem=sqliteHashFirst(&temp1); pElem; pElem=sqliteHashNext(pElem)){ + Table *pTab = sqliteHashData(pElem); + sqlite3DeleteTable(0, pTab); + } + sqlite3HashClear(&temp1); + sqlite3HashClear(&pSchema->fkeyHash); + pSchema->pSeqTab = 0; + if( pSchema->schemaFlags & DB_SchemaLoaded ){ + pSchema->iGeneration++; + pSchema->schemaFlags &= ~DB_SchemaLoaded; + } +} + +/* +** Find and return the schema associated with a BTree. Create +** a new one if necessary. +*/ +SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){ + Schema * p; + if( pBt ){ + p = (Schema *)sqlite3BtreeSchema(pBt, sizeof(Schema), sqlite3SchemaClear); + }else{ + p = (Schema *)sqlite3DbMallocZero(0, sizeof(Schema)); + } + if( !p ){ + sqlite3OomFault(db); + }else if ( 0==p->file_format ){ + sqlite3HashInit(&p->tblHash); + sqlite3HashInit(&p->idxHash); + sqlite3HashInit(&p->trigHash); + sqlite3HashInit(&p->fkeyHash); + p->enc = SQLITE_UTF8; + } + return p; +} + +/************** End of callback.c ********************************************/ +/************** Begin file delete.c ******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains C code routines that are called by the parser +** in order to generate code for DELETE FROM statements. +*/ +/* #include "sqliteInt.h" */ + +/* +** While a SrcList can in general represent multiple tables and subqueries +** (as in the FROM clause of a SELECT statement) in this case it contains +** the name of a single table, as one might find in an INSERT, DELETE, +** or UPDATE statement. Look up that table in the symbol table and +** return a pointer. Set an error message and return NULL if the table +** name is not found or if any other error occurs. +** +** The following fields are initialized appropriate in pSrc: +** +** pSrc->a[0].pTab Pointer to the Table object +** pSrc->a[0].pIndex Pointer to the INDEXED BY index, if there is one +** +*/ +SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ + struct SrcList_item *pItem = pSrc->a; + Table *pTab; + assert( pItem && pSrc->nSrc==1 ); + pTab = sqlite3LocateTableItem(pParse, 0, pItem); + sqlite3DeleteTable(pParse->db, pItem->pTab); + pItem->pTab = pTab; + if( pTab ){ + pTab->nTabRef++; + } + if( sqlite3IndexedByLookup(pParse, pItem) ){ + pTab = 0; + } + return pTab; +} + +/* +** Check to make sure the given table is writable. If it is not +** writable, generate an error message and return 1. If it is +** writable return 0; +*/ +SQLITE_PRIVATE int sqlite3IsReadOnly(Parse *pParse, Table *pTab, int viewOk){ + /* A table is not writable under the following circumstances: + ** + ** 1) It is a virtual table and no implementation of the xUpdate method + ** has been provided, or + ** 2) It is a system table (i.e. sqlite_master), this call is not + ** part of a nested parse and writable_schema pragma has not + ** been specified. + ** + ** In either case leave an error message in pParse and return non-zero. + */ + if( ( IsVirtual(pTab) + && sqlite3GetVTable(pParse->db, pTab)->pMod->pModule->xUpdate==0 ) + || ( (pTab->tabFlags & TF_Readonly)!=0 + && (pParse->db->flags & SQLITE_WriteSchema)==0 + && pParse->nested==0 ) + ){ + sqlite3ErrorMsg(pParse, "table %s may not be modified", pTab->zName); + return 1; + } + +#ifndef SQLITE_OMIT_VIEW + if( !viewOk && pTab->pSelect ){ + sqlite3ErrorMsg(pParse,"cannot modify %s because it is a view",pTab->zName); + return 1; + } +#endif + return 0; +} + + +#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) +/* +** Evaluate a view and store its result in an ephemeral table. The +** pWhere argument is an optional WHERE clause that restricts the +** set of rows in the view that are to be added to the ephemeral table. +*/ +SQLITE_PRIVATE void sqlite3MaterializeView( + Parse *pParse, /* Parsing context */ + Table *pView, /* View definition */ + Expr *pWhere, /* Optional WHERE clause to be added */ + int iCur /* Cursor number for ephemeral table */ +){ + SelectDest dest; + Select *pSel; + SrcList *pFrom; + sqlite3 *db = pParse->db; + int iDb = sqlite3SchemaToIndex(db, pView->pSchema); + pWhere = sqlite3ExprDup(db, pWhere, 0); + pFrom = sqlite3SrcListAppend(db, 0, 0, 0); + if( pFrom ){ + assert( pFrom->nSrc==1 ); + pFrom->a[0].zName = sqlite3DbStrDup(db, pView->zName); + pFrom->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); + assert( pFrom->a[0].pOn==0 ); + assert( pFrom->a[0].pUsing==0 ); + } + pSel = sqlite3SelectNew(pParse, 0, pFrom, pWhere, 0, 0, 0, + SF_IncludeHidden, 0, 0); + sqlite3SelectDestInit(&dest, SRT_EphemTab, iCur); + sqlite3Select(pParse, pSel, &dest); + sqlite3SelectDelete(db, pSel); +} +#endif /* !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) */ + +#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) +/* +** Generate an expression tree to implement the WHERE, ORDER BY, +** and LIMIT/OFFSET portion of DELETE and UPDATE statements. +** +** DELETE FROM table_wxyz WHERE a<5 ORDER BY a LIMIT 1; +** \__________________________/ +** pLimitWhere (pInClause) +*/ +SQLITE_PRIVATE Expr *sqlite3LimitWhere( + Parse *pParse, /* The parser context */ + SrcList *pSrc, /* the FROM clause -- which tables to scan */ + Expr *pWhere, /* The WHERE clause. May be null */ + ExprList *pOrderBy, /* The ORDER BY clause. May be null */ + Expr *pLimit, /* The LIMIT clause. May be null */ + Expr *pOffset, /* The OFFSET clause. May be null */ + char *zStmtType /* Either DELETE or UPDATE. For err msgs. */ +){ + Expr *pWhereRowid = NULL; /* WHERE rowid .. */ + Expr *pInClause = NULL; /* WHERE rowid IN ( select ) */ + Expr *pSelectRowid = NULL; /* SELECT rowid ... */ + ExprList *pEList = NULL; /* Expression list contaning only pSelectRowid */ + SrcList *pSelectSrc = NULL; /* SELECT rowid FROM x ... (dup of pSrc) */ + Select *pSelect = NULL; /* Complete SELECT tree */ + + /* Check that there isn't an ORDER BY without a LIMIT clause. + */ + if( pOrderBy && (pLimit == 0) ) { + sqlite3ErrorMsg(pParse, "ORDER BY without LIMIT on %s", zStmtType); + goto limit_where_cleanup; + } + + /* We only need to generate a select expression if there + ** is a limit/offset term to enforce. + */ + if( pLimit == 0 ) { + /* if pLimit is null, pOffset will always be null as well. */ + assert( pOffset == 0 ); + return pWhere; + } + + /* Generate a select expression tree to enforce the limit/offset + ** term for the DELETE or UPDATE statement. For example: + ** DELETE FROM table_a WHERE col1=1 ORDER BY col2 LIMIT 1 OFFSET 1 + ** becomes: + ** DELETE FROM table_a WHERE rowid IN ( + ** SELECT rowid FROM table_a WHERE col1=1 ORDER BY col2 LIMIT 1 OFFSET 1 + ** ); + */ + + pSelectRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0); + if( pSelectRowid == 0 ) goto limit_where_cleanup; + pEList = sqlite3ExprListAppend(pParse, 0, pSelectRowid); + if( pEList == 0 ) goto limit_where_cleanup; + + /* duplicate the FROM clause as it is needed by both the DELETE/UPDATE tree + ** and the SELECT subtree. */ + pSelectSrc = sqlite3SrcListDup(pParse->db, pSrc, 0); + if( pSelectSrc == 0 ) { + sqlite3ExprListDelete(pParse->db, pEList); + goto limit_where_cleanup; + } + + /* generate the SELECT expression tree. */ + pSelect = sqlite3SelectNew(pParse,pEList,pSelectSrc,pWhere,0,0, + pOrderBy,0,pLimit,pOffset); + if( pSelect == 0 ) return 0; + + /* now generate the new WHERE rowid IN clause for the DELETE/UDPATE */ + pWhereRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0); + pInClause = pWhereRowid ? sqlite3PExpr(pParse, TK_IN, pWhereRowid, 0) : 0; + sqlite3PExprAddSelect(pParse, pInClause, pSelect); + return pInClause; + +limit_where_cleanup: + sqlite3ExprDelete(pParse->db, pWhere); + sqlite3ExprListDelete(pParse->db, pOrderBy); + sqlite3ExprDelete(pParse->db, pLimit); + sqlite3ExprDelete(pParse->db, pOffset); + return 0; +} +#endif /* defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) */ + /* && !defined(SQLITE_OMIT_SUBQUERY) */ + +/* +** Generate code for a DELETE FROM statement. +** +** DELETE FROM table_wxyz WHERE a<5 AND b NOT NULL; +** \________/ \________________/ +** pTabList pWhere +*/ +SQLITE_PRIVATE void sqlite3DeleteFrom( + Parse *pParse, /* The parser context */ + SrcList *pTabList, /* The table from which we should delete things */ + Expr *pWhere /* The WHERE clause. May be null */ +){ + Vdbe *v; /* The virtual database engine */ + Table *pTab; /* The table from which records will be deleted */ + int i; /* Loop counter */ + WhereInfo *pWInfo; /* Information about the WHERE clause */ + Index *pIdx; /* For looping over indices of the table */ + int iTabCur; /* Cursor number for the table */ + int iDataCur = 0; /* VDBE cursor for the canonical data source */ + int iIdxCur = 0; /* Cursor number of the first index */ + int nIdx; /* Number of indices */ + sqlite3 *db; /* Main database structure */ + AuthContext sContext; /* Authorization context */ + NameContext sNC; /* Name context to resolve expressions in */ + int iDb; /* Database number */ + int memCnt = -1; /* Memory cell used for change counting */ + int rcauth; /* Value returned by authorization callback */ + int eOnePass; /* ONEPASS_OFF or _SINGLE or _MULTI */ + int aiCurOnePass[2]; /* The write cursors opened by WHERE_ONEPASS */ + u8 *aToOpen = 0; /* Open cursor iTabCur+j if aToOpen[j] is true */ + Index *pPk; /* The PRIMARY KEY index on the table */ + int iPk = 0; /* First of nPk registers holding PRIMARY KEY value */ + i16 nPk = 1; /* Number of columns in the PRIMARY KEY */ + int iKey; /* Memory cell holding key of row to be deleted */ + i16 nKey; /* Number of memory cells in the row key */ + int iEphCur = 0; /* Ephemeral table holding all primary key values */ + int iRowSet = 0; /* Register for rowset of rows to delete */ + int addrBypass = 0; /* Address of jump over the delete logic */ + int addrLoop = 0; /* Top of the delete loop */ + int addrEphOpen = 0; /* Instruction to open the Ephemeral table */ + int bComplex; /* True if there are triggers or FKs or + ** subqueries in the WHERE clause */ + +#ifndef SQLITE_OMIT_TRIGGER + int isView; /* True if attempting to delete from a view */ + Trigger *pTrigger; /* List of table triggers, if required */ +#endif + + memset(&sContext, 0, sizeof(sContext)); + db = pParse->db; + if( pParse->nErr || db->mallocFailed ){ + goto delete_from_cleanup; + } + assert( pTabList->nSrc==1 ); + + /* Locate the table which we want to delete. This table has to be + ** put in an SrcList structure because some of the subroutines we + ** will be calling are designed to work with multiple tables and expect + ** an SrcList* parameter instead of just a Table* parameter. + */ + pTab = sqlite3SrcListLookup(pParse, pTabList); + if( pTab==0 ) goto delete_from_cleanup; + + /* Figure out if we have any triggers and if the table being + ** deleted from is a view + */ +#ifndef SQLITE_OMIT_TRIGGER + pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); + isView = pTab->pSelect!=0; + bComplex = pTrigger || sqlite3FkRequired(pParse, pTab, 0, 0); +#else +# define pTrigger 0 +# define isView 0 +#endif +#ifdef SQLITE_OMIT_VIEW +# undef isView +# define isView 0 +#endif + + /* If pTab is really a view, make sure it has been initialized. + */ + if( sqlite3ViewGetColumnNames(pParse, pTab) ){ + goto delete_from_cleanup; + } + + if( sqlite3IsReadOnly(pParse, pTab, (pTrigger?1:0)) ){ + goto delete_from_cleanup; + } + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( iDbnDb ); + rcauth = sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, + db->aDb[iDb].zDbSName); + assert( rcauth==SQLITE_OK || rcauth==SQLITE_DENY || rcauth==SQLITE_IGNORE ); + if( rcauth==SQLITE_DENY ){ + goto delete_from_cleanup; + } + assert(!isView || pTrigger); + + /* Assign cursor numbers to the table and all its indices. + */ + assert( pTabList->nSrc==1 ); + iTabCur = pTabList->a[0].iCursor = pParse->nTab++; + for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ + pParse->nTab++; + } + + /* Start the view context + */ + if( isView ){ + sqlite3AuthContextPush(pParse, &sContext, pTab->zName); + } + + /* Begin generating code. + */ + v = sqlite3GetVdbe(pParse); + if( v==0 ){ + goto delete_from_cleanup; + } + if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); + sqlite3BeginWriteOperation(pParse, 1, iDb); + + /* If we are trying to delete from a view, realize that view into + ** an ephemeral table. + */ +#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) + if( isView ){ + sqlite3MaterializeView(pParse, pTab, pWhere, iTabCur); + iDataCur = iIdxCur = iTabCur; + } +#endif + + /* Resolve the column names in the WHERE clause. + */ + memset(&sNC, 0, sizeof(sNC)); + sNC.pParse = pParse; + sNC.pSrcList = pTabList; + if( sqlite3ResolveExprNames(&sNC, pWhere) ){ + goto delete_from_cleanup; + } + + /* Initialize the counter of the number of rows deleted, if + ** we are counting rows. + */ + if( db->flags & SQLITE_CountRows ){ + memCnt = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Integer, 0, memCnt); + } + +#ifndef SQLITE_OMIT_TRUNCATE_OPTIMIZATION + /* Special case: A DELETE without a WHERE clause deletes everything. + ** It is easier just to erase the whole table. Prior to version 3.6.5, + ** this optimization caused the row change count (the value returned by + ** API function sqlite3_count_changes) to be set incorrectly. */ + if( rcauth==SQLITE_OK + && pWhere==0 + && !bComplex + && !IsVirtual(pTab) +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + && db->xPreUpdateCallback==0 +#endif + ){ + assert( !isView ); + sqlite3TableLock(pParse, iDb, pTab->tnum, 1, pTab->zName); + if( HasRowid(pTab) ){ + sqlite3VdbeAddOp4(v, OP_Clear, pTab->tnum, iDb, memCnt, + pTab->zName, P4_STATIC); + } + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + assert( pIdx->pSchema==pTab->pSchema ); + sqlite3VdbeAddOp2(v, OP_Clear, pIdx->tnum, iDb); + } + }else +#endif /* SQLITE_OMIT_TRUNCATE_OPTIMIZATION */ + { + u16 wcf = WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK|WHERE_SEEK_TABLE; + if( sNC.ncFlags & NC_VarSelect ) bComplex = 1; + wcf |= (bComplex ? 0 : WHERE_ONEPASS_MULTIROW); + if( HasRowid(pTab) ){ + /* For a rowid table, initialize the RowSet to an empty set */ + pPk = 0; + nPk = 1; + iRowSet = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Null, 0, iRowSet); + }else{ + /* For a WITHOUT ROWID table, create an ephemeral table used to + ** hold all primary keys for rows to be deleted. */ + pPk = sqlite3PrimaryKeyIndex(pTab); + assert( pPk!=0 ); + nPk = pPk->nKeyCol; + iPk = pParse->nMem+1; + pParse->nMem += nPk; + iEphCur = pParse->nTab++; + addrEphOpen = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iEphCur, nPk); + sqlite3VdbeSetP4KeyInfo(pParse, pPk); + } + + /* Construct a query to find the rowid or primary key for every row + ** to be deleted, based on the WHERE clause. Set variable eOnePass + ** to indicate the strategy used to implement this delete: + ** + ** ONEPASS_OFF: Two-pass approach - use a FIFO for rowids/PK values. + ** ONEPASS_SINGLE: One-pass approach - at most one row deleted. + ** ONEPASS_MULTI: One-pass approach - any number of rows may be deleted. + */ + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, wcf, iTabCur+1); + if( pWInfo==0 ) goto delete_from_cleanup; + eOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); + assert( IsVirtual(pTab)==0 || eOnePass!=ONEPASS_MULTI ); + assert( IsVirtual(pTab) || bComplex || eOnePass!=ONEPASS_OFF ); + + /* Keep track of the number of rows to be deleted */ + if( db->flags & SQLITE_CountRows ){ + sqlite3VdbeAddOp2(v, OP_AddImm, memCnt, 1); + } + + /* Extract the rowid or primary key for the current row */ + if( pPk ){ + for(i=0; iaiColumn[i]>=0 ); + sqlite3ExprCodeGetColumnOfTable(v, pTab, iTabCur, + pPk->aiColumn[i], iPk+i); + } + iKey = iPk; + }else{ + iKey = pParse->nMem + 1; + iKey = sqlite3ExprCodeGetColumn(pParse, pTab, -1, iTabCur, iKey, 0); + if( iKey>pParse->nMem ) pParse->nMem = iKey; + } + + if( eOnePass!=ONEPASS_OFF ){ + /* For ONEPASS, no need to store the rowid/primary-key. There is only + ** one, so just keep it in its register(s) and fall through to the + ** delete code. */ + nKey = nPk; /* OP_Found will use an unpacked key */ + aToOpen = sqlite3DbMallocRawNN(db, nIdx+2); + if( aToOpen==0 ){ + sqlite3WhereEnd(pWInfo); + goto delete_from_cleanup; + } + memset(aToOpen, 1, nIdx+1); + aToOpen[nIdx+1] = 0; + if( aiCurOnePass[0]>=0 ) aToOpen[aiCurOnePass[0]-iTabCur] = 0; + if( aiCurOnePass[1]>=0 ) aToOpen[aiCurOnePass[1]-iTabCur] = 0; + if( addrEphOpen ) sqlite3VdbeChangeToNoop(v, addrEphOpen); + }else{ + if( pPk ){ + /* Add the PK key for this row to the temporary table */ + iKey = ++pParse->nMem; + nKey = 0; /* Zero tells OP_Found to use a composite key */ + sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, iKey, + sqlite3IndexAffinityStr(pParse->db, pPk), nPk); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iEphCur, iKey, iPk, nPk); + }else{ + /* Add the rowid of the row to be deleted to the RowSet */ + nKey = 1; /* OP_Seek always uses a single rowid */ + sqlite3VdbeAddOp2(v, OP_RowSetAdd, iRowSet, iKey); + } + } + + /* If this DELETE cannot use the ONEPASS strategy, this is the + ** end of the WHERE loop */ + if( eOnePass!=ONEPASS_OFF ){ + addrBypass = sqlite3VdbeMakeLabel(v); + }else{ + sqlite3WhereEnd(pWInfo); + } + + /* Unless this is a view, open cursors for the table we are + ** deleting from and all its indices. If this is a view, then the + ** only effect this statement has is to fire the INSTEAD OF + ** triggers. + */ + if( !isView ){ + int iAddrOnce = 0; + if( eOnePass==ONEPASS_MULTI ){ + iAddrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); + } + testcase( IsVirtual(pTab) ); + sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, OPFLAG_FORDELETE, + iTabCur, aToOpen, &iDataCur, &iIdxCur); + assert( pPk || IsVirtual(pTab) || iDataCur==iTabCur ); + assert( pPk || IsVirtual(pTab) || iIdxCur==iDataCur+1 ); + if( eOnePass==ONEPASS_MULTI ) sqlite3VdbeJumpHere(v, iAddrOnce); + } + + /* Set up a loop over the rowids/primary-keys that were found in the + ** where-clause loop above. + */ + if( eOnePass!=ONEPASS_OFF ){ + assert( nKey==nPk ); /* OP_Found will use an unpacked key */ + if( !IsVirtual(pTab) && aToOpen[iDataCur-iTabCur] ){ + assert( pPk!=0 || pTab->pSelect!=0 ); + sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, addrBypass, iKey, nKey); + VdbeCoverage(v); + } + }else if( pPk ){ + addrLoop = sqlite3VdbeAddOp1(v, OP_Rewind, iEphCur); VdbeCoverage(v); + sqlite3VdbeAddOp2(v, OP_RowData, iEphCur, iKey); + assert( nKey==0 ); /* OP_Found will use a composite key */ + }else{ + addrLoop = sqlite3VdbeAddOp3(v, OP_RowSetRead, iRowSet, 0, iKey); + VdbeCoverage(v); + assert( nKey==1 ); + } + + /* Delete the row */ +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pTab) ){ + const char *pVTab = (const char *)sqlite3GetVTable(db, pTab); + sqlite3VtabMakeWritable(pParse, pTab); + sqlite3VdbeAddOp4(v, OP_VUpdate, 0, 1, iKey, pVTab, P4_VTAB); + sqlite3VdbeChangeP5(v, OE_Abort); + assert( eOnePass==ONEPASS_OFF || eOnePass==ONEPASS_SINGLE ); + sqlite3MayAbort(pParse); + if( eOnePass==ONEPASS_SINGLE && sqlite3IsToplevel(pParse) ){ + pParse->isMultiWrite = 0; + } + }else +#endif + { + int count = (pParse->nested==0); /* True to count changes */ + sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur, + iKey, nKey, count, OE_Default, eOnePass, aiCurOnePass[1]); + } + + /* End of the loop over all rowids/primary-keys. */ + if( eOnePass!=ONEPASS_OFF ){ + sqlite3VdbeResolveLabel(v, addrBypass); + sqlite3WhereEnd(pWInfo); + }else if( pPk ){ + sqlite3VdbeAddOp2(v, OP_Next, iEphCur, addrLoop+1); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addrLoop); + }else{ + sqlite3VdbeGoto(v, addrLoop); + sqlite3VdbeJumpHere(v, addrLoop); + } + } /* End non-truncate path */ + + /* Update the sqlite_sequence table by storing the content of the + ** maximum rowid counter values recorded while inserting into + ** autoincrement tables. + */ + if( pParse->nested==0 && pParse->pTriggerTab==0 ){ + sqlite3AutoincrementEnd(pParse); + } + + /* Return the number of rows that were deleted. If this routine is + ** generating code because of a call to sqlite3NestedParse(), do not + ** invoke the callback function. + */ + if( (db->flags&SQLITE_CountRows) && !pParse->nested && !pParse->pTriggerTab ){ + sqlite3VdbeAddOp2(v, OP_ResultRow, memCnt, 1); + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows deleted", SQLITE_STATIC); + } + +delete_from_cleanup: + sqlite3AuthContextPop(&sContext); + sqlite3SrcListDelete(db, pTabList); + sqlite3ExprDelete(db, pWhere); + sqlite3DbFree(db, aToOpen); + return; +} +/* Make sure "isView" and other macros defined above are undefined. Otherwise +** they may interfere with compilation of other functions in this file +** (or in another file, if this file becomes part of the amalgamation). */ +#ifdef isView + #undef isView +#endif +#ifdef pTrigger + #undef pTrigger +#endif + +/* +** This routine generates VDBE code that causes a single row of a +** single table to be deleted. Both the original table entry and +** all indices are removed. +** +** Preconditions: +** +** 1. iDataCur is an open cursor on the btree that is the canonical data +** store for the table. (This will be either the table itself, +** in the case of a rowid table, or the PRIMARY KEY index in the case +** of a WITHOUT ROWID table.) +** +** 2. Read/write cursors for all indices of pTab must be open as +** cursor number iIdxCur+i for the i-th index. +** +** 3. The primary key for the row to be deleted must be stored in a +** sequence of nPk memory cells starting at iPk. If nPk==0 that means +** that a search record formed from OP_MakeRecord is contained in the +** single memory location iPk. +** +** eMode: +** Parameter eMode may be passed either ONEPASS_OFF (0), ONEPASS_SINGLE, or +** ONEPASS_MULTI. If eMode is not ONEPASS_OFF, then the cursor +** iDataCur already points to the row to delete. If eMode is ONEPASS_OFF +** then this function must seek iDataCur to the entry identified by iPk +** and nPk before reading from it. +** +** If eMode is ONEPASS_MULTI, then this call is being made as part +** of a ONEPASS delete that affects multiple rows. In this case, if +** iIdxNoSeek is a valid cursor number (>=0) and is not the same as +** iDataCur, then its position should be preserved following the delete +** operation. Or, if iIdxNoSeek is not a valid cursor number, the +** position of iDataCur should be preserved instead. +** +** iIdxNoSeek: +** If iIdxNoSeek is a valid cursor number (>=0) not equal to iDataCur, +** then it identifies an index cursor (from within array of cursors +** starting at iIdxCur) that already points to the index entry to be deleted. +** Except, this optimization is disabled if there are BEFORE triggers since +** the trigger body might have moved the cursor. +*/ +SQLITE_PRIVATE void sqlite3GenerateRowDelete( + Parse *pParse, /* Parsing context */ + Table *pTab, /* Table containing the row to be deleted */ + Trigger *pTrigger, /* List of triggers to (potentially) fire */ + int iDataCur, /* Cursor from which column data is extracted */ + int iIdxCur, /* First index cursor */ + int iPk, /* First memory cell containing the PRIMARY KEY */ + i16 nPk, /* Number of PRIMARY KEY memory cells */ + u8 count, /* If non-zero, increment the row change counter */ + u8 onconf, /* Default ON CONFLICT policy for triggers */ + u8 eMode, /* ONEPASS_OFF, _SINGLE, or _MULTI. See above */ + int iIdxNoSeek /* Cursor number of cursor that does not need seeking */ +){ + Vdbe *v = pParse->pVdbe; /* Vdbe */ + int iOld = 0; /* First register in OLD.* array */ + int iLabel; /* Label resolved to end of generated code */ + u8 opSeek; /* Seek opcode */ + + /* Vdbe is guaranteed to have been allocated by this stage. */ + assert( v ); + VdbeModuleComment((v, "BEGIN: GenRowDel(%d,%d,%d,%d)", + iDataCur, iIdxCur, iPk, (int)nPk)); + + /* Seek cursor iCur to the row to delete. If this row no longer exists + ** (this can happen if a trigger program has already deleted it), do + ** not attempt to delete it or fire any DELETE triggers. */ + iLabel = sqlite3VdbeMakeLabel(v); + opSeek = HasRowid(pTab) ? OP_NotExists : OP_NotFound; + if( eMode==ONEPASS_OFF ){ + sqlite3VdbeAddOp4Int(v, opSeek, iDataCur, iLabel, iPk, nPk); + VdbeCoverageIf(v, opSeek==OP_NotExists); + VdbeCoverageIf(v, opSeek==OP_NotFound); + } + + /* If there are any triggers to fire, allocate a range of registers to + ** use for the old.* references in the triggers. */ + if( sqlite3FkRequired(pParse, pTab, 0, 0) || pTrigger ){ + u32 mask; /* Mask of OLD.* columns in use */ + int iCol; /* Iterator used while populating OLD.* */ + int addrStart; /* Start of BEFORE trigger programs */ + + /* TODO: Could use temporary registers here. Also could attempt to + ** avoid copying the contents of the rowid register. */ + mask = sqlite3TriggerColmask( + pParse, pTrigger, 0, 0, TRIGGER_BEFORE|TRIGGER_AFTER, pTab, onconf + ); + mask |= sqlite3FkOldmask(pParse, pTab); + iOld = pParse->nMem+1; + pParse->nMem += (1 + pTab->nCol); + + /* Populate the OLD.* pseudo-table register array. These values will be + ** used by any BEFORE and AFTER triggers that exist. */ + sqlite3VdbeAddOp2(v, OP_Copy, iPk, iOld); + for(iCol=0; iColnCol; iCol++){ + testcase( mask!=0xffffffff && iCol==31 ); + testcase( mask!=0xffffffff && iCol==32 ); + if( mask==0xffffffff || (iCol<=31 && (mask & MASKBIT32(iCol))!=0) ){ + sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, iCol, iOld+iCol+1); + } + } + + /* Invoke BEFORE DELETE trigger programs. */ + addrStart = sqlite3VdbeCurrentAddr(v); + sqlite3CodeRowTrigger(pParse, pTrigger, + TK_DELETE, 0, TRIGGER_BEFORE, pTab, iOld, onconf, iLabel + ); + + /* If any BEFORE triggers were coded, then seek the cursor to the + ** row to be deleted again. It may be that the BEFORE triggers moved + ** the cursor or already deleted the row that the cursor was + ** pointing to. + ** + ** Also disable the iIdxNoSeek optimization since the BEFORE trigger + ** may have moved that cursor. + */ + if( addrStart=0 ); + iIdxNoSeek = -1; + } + + /* Do FK processing. This call checks that any FK constraints that + ** refer to this table (i.e. constraints attached to other tables) + ** are not violated by deleting this row. */ + sqlite3FkCheck(pParse, pTab, iOld, 0, 0, 0); + } + + /* Delete the index and table entries. Skip this step if pTab is really + ** a view (in which case the only effect of the DELETE statement is to + ** fire the INSTEAD OF triggers). + ** + ** If variable 'count' is non-zero, then this OP_Delete instruction should + ** invoke the update-hook. The pre-update-hook, on the other hand should + ** be invoked unless table pTab is a system table. The difference is that + ** the update-hook is not invoked for rows removed by REPLACE, but the + ** pre-update-hook is. + */ + if( pTab->pSelect==0 ){ + u8 p5 = 0; + sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur,0,iIdxNoSeek); + sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, (count?OPFLAG_NCHANGE:0)); + if( pParse->nested==0 ){ + sqlite3VdbeAppendP4(v, (char*)pTab, P4_TABLE); + } + if( eMode!=ONEPASS_OFF ){ + sqlite3VdbeChangeP5(v, OPFLAG_AUXDELETE); + } + if( iIdxNoSeek>=0 && iIdxNoSeek!=iDataCur ){ + sqlite3VdbeAddOp1(v, OP_Delete, iIdxNoSeek); + } + if( eMode==ONEPASS_MULTI ) p5 |= OPFLAG_SAVEPOSITION; + sqlite3VdbeChangeP5(v, p5); + } + + /* Do any ON CASCADE, SET NULL or SET DEFAULT operations required to + ** handle rows (possibly in other tables) that refer via a foreign key + ** to the row just deleted. */ + sqlite3FkActions(pParse, pTab, 0, iOld, 0, 0); + + /* Invoke AFTER DELETE trigger programs. */ + sqlite3CodeRowTrigger(pParse, pTrigger, + TK_DELETE, 0, TRIGGER_AFTER, pTab, iOld, onconf, iLabel + ); + + /* Jump here if the row had already been deleted before any BEFORE + ** trigger programs were invoked. Or if a trigger program throws a + ** RAISE(IGNORE) exception. */ + sqlite3VdbeResolveLabel(v, iLabel); + VdbeModuleComment((v, "END: GenRowDel()")); +} + +/* +** This routine generates VDBE code that causes the deletion of all +** index entries associated with a single row of a single table, pTab +** +** Preconditions: +** +** 1. A read/write cursor "iDataCur" must be open on the canonical storage +** btree for the table pTab. (This will be either the table itself +** for rowid tables or to the primary key index for WITHOUT ROWID +** tables.) +** +** 2. Read/write cursors for all indices of pTab must be open as +** cursor number iIdxCur+i for the i-th index. (The pTab->pIndex +** index is the 0-th index.) +** +** 3. The "iDataCur" cursor must be already be positioned on the row +** that is to be deleted. +*/ +SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete( + Parse *pParse, /* Parsing and code generating context */ + Table *pTab, /* Table containing the row to be deleted */ + int iDataCur, /* Cursor of table holding data. */ + int iIdxCur, /* First index cursor */ + int *aRegIdx, /* Only delete if aRegIdx!=0 && aRegIdx[i]>0 */ + int iIdxNoSeek /* Do not delete from this cursor */ +){ + int i; /* Index loop counter */ + int r1 = -1; /* Register holding an index key */ + int iPartIdxLabel; /* Jump destination for skipping partial index entries */ + Index *pIdx; /* Current index */ + Index *pPrior = 0; /* Prior index */ + Vdbe *v; /* The prepared statement under construction */ + Index *pPk; /* PRIMARY KEY index, or NULL for rowid tables */ + + v = pParse->pVdbe; + pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab); + for(i=0, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){ + assert( iIdxCur+i!=iDataCur || pPk==pIdx ); + if( aRegIdx!=0 && aRegIdx[i]==0 ) continue; + if( pIdx==pPk ) continue; + if( iIdxCur+i==iIdxNoSeek ) continue; + VdbeModuleComment((v, "GenRowIdxDel for %s", pIdx->zName)); + r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 1, + &iPartIdxLabel, pPrior, r1); + sqlite3VdbeAddOp3(v, OP_IdxDelete, iIdxCur+i, r1, + pIdx->uniqNotNull ? pIdx->nKeyCol : pIdx->nColumn); + sqlite3ResolvePartIdxLabel(pParse, iPartIdxLabel); + pPrior = pIdx; + } +} + +/* +** Generate code that will assemble an index key and stores it in register +** regOut. The key with be for index pIdx which is an index on pTab. +** iCur is the index of a cursor open on the pTab table and pointing to +** the entry that needs indexing. If pTab is a WITHOUT ROWID table, then +** iCur must be the cursor of the PRIMARY KEY index. +** +** Return a register number which is the first in a block of +** registers that holds the elements of the index key. The +** block of registers has already been deallocated by the time +** this routine returns. +** +** If *piPartIdxLabel is not NULL, fill it in with a label and jump +** to that label if pIdx is a partial index that should be skipped. +** The label should be resolved using sqlite3ResolvePartIdxLabel(). +** A partial index should be skipped if its WHERE clause evaluates +** to false or null. If pIdx is not a partial index, *piPartIdxLabel +** will be set to zero which is an empty label that is ignored by +** sqlite3ResolvePartIdxLabel(). +** +** The pPrior and regPrior parameters are used to implement a cache to +** avoid unnecessary register loads. If pPrior is not NULL, then it is +** a pointer to a different index for which an index key has just been +** computed into register regPrior. If the current pIdx index is generating +** its key into the same sequence of registers and if pPrior and pIdx share +** a column in common, then the register corresponding to that column already +** holds the correct value and the loading of that register is skipped. +** This optimization is helpful when doing a DELETE or an INTEGRITY_CHECK +** on a table with multiple indices, and especially with the ROWID or +** PRIMARY KEY columns of the index. +*/ +SQLITE_PRIVATE int sqlite3GenerateIndexKey( + Parse *pParse, /* Parsing context */ + Index *pIdx, /* The index for which to generate a key */ + int iDataCur, /* Cursor number from which to take column data */ + int regOut, /* Put the new key into this register if not 0 */ + int prefixOnly, /* Compute only a unique prefix of the key */ + int *piPartIdxLabel, /* OUT: Jump to this label to skip partial index */ + Index *pPrior, /* Previously generated index key */ + int regPrior /* Register holding previous generated key */ +){ + Vdbe *v = pParse->pVdbe; + int j; + int regBase; + int nCol; + + if( piPartIdxLabel ){ + if( pIdx->pPartIdxWhere ){ + *piPartIdxLabel = sqlite3VdbeMakeLabel(v); + pParse->iSelfTab = iDataCur; + sqlite3ExprCachePush(pParse); + sqlite3ExprIfFalseDup(pParse, pIdx->pPartIdxWhere, *piPartIdxLabel, + SQLITE_JUMPIFNULL); + }else{ + *piPartIdxLabel = 0; + } + } + nCol = (prefixOnly && pIdx->uniqNotNull) ? pIdx->nKeyCol : pIdx->nColumn; + regBase = sqlite3GetTempRange(pParse, nCol); + if( pPrior && (regBase!=regPrior || pPrior->pPartIdxWhere) ) pPrior = 0; + for(j=0; jaiColumn[j]==pIdx->aiColumn[j] + && pPrior->aiColumn[j]!=XN_EXPR + ){ + /* This column was already computed by the previous index */ + continue; + } + sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iDataCur, j, regBase+j); + /* If the column affinity is REAL but the number is an integer, then it + ** might be stored in the table as an integer (using a compact + ** representation) then converted to REAL by an OP_RealAffinity opcode. + ** But we are getting ready to store this value back into an index, where + ** it should be converted by to INTEGER again. So omit the OP_RealAffinity + ** opcode if it is present */ + sqlite3VdbeDeletePriorOpcode(v, OP_RealAffinity); + } + if( regOut ){ + sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regOut); + if( pIdx->pTable->pSelect ){ + const char *zAff = sqlite3IndexAffinityStr(pParse->db, pIdx); + sqlite3VdbeChangeP4(v, -1, zAff, P4_TRANSIENT); + } + } + sqlite3ReleaseTempRange(pParse, regBase, nCol); + return regBase; +} + +/* +** If a prior call to sqlite3GenerateIndexKey() generated a jump-over label +** because it was a partial index, then this routine should be called to +** resolve that label. +*/ +SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse *pParse, int iLabel){ + if( iLabel ){ + sqlite3VdbeResolveLabel(pParse->pVdbe, iLabel); + sqlite3ExprCachePop(pParse); + } +} + +/************** End of delete.c **********************************************/ +/************** Begin file func.c ********************************************/ +/* +** 2002 February 23 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the C-language implementations for many of the SQL +** functions of SQLite. (Some function, and in particular the date and +** time functions, are implemented separately.) +*/ +/* #include "sqliteInt.h" */ +/* #include */ +/* #include */ +/* #include "vdbeInt.h" */ + +/* +** Return the collating function associated with a function. +*/ +static CollSeq *sqlite3GetFuncCollSeq(sqlite3_context *context){ + VdbeOp *pOp; + assert( context->pVdbe!=0 ); + pOp = &context->pVdbe->aOp[context->iOp-1]; + assert( pOp->opcode==OP_CollSeq ); + assert( pOp->p4type==P4_COLLSEQ ); + return pOp->p4.pColl; +} + +/* +** Indicate that the accumulator load should be skipped on this +** iteration of the aggregate loop. +*/ +static void sqlite3SkipAccumulatorLoad(sqlite3_context *context){ + context->skipFlag = 1; +} + +/* +** Implementation of the non-aggregate min() and max() functions +*/ +static void minmaxFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int i; + int mask; /* 0 for min() or 0xffffffff for max() */ + int iBest; + CollSeq *pColl; + + assert( argc>1 ); + mask = sqlite3_user_data(context)==0 ? 0 : -1; + pColl = sqlite3GetFuncCollSeq(context); + assert( pColl ); + assert( mask==-1 || mask==0 ); + iBest = 0; + if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; + for(i=1; i=0 ){ + testcase( mask==0 ); + iBest = i; + } + } + sqlite3_result_value(context, argv[iBest]); +} + +/* +** Return the type of the argument. +*/ +static void typeofFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **argv +){ + const char *z = 0; + UNUSED_PARAMETER(NotUsed); + switch( sqlite3_value_type(argv[0]) ){ + case SQLITE_INTEGER: z = "integer"; break; + case SQLITE_TEXT: z = "text"; break; + case SQLITE_FLOAT: z = "real"; break; + case SQLITE_BLOB: z = "blob"; break; + default: z = "null"; break; + } + sqlite3_result_text(context, z, -1, SQLITE_STATIC); +} + + +/* +** Implementation of the length() function +*/ +static void lengthFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int len; + + assert( argc==1 ); + UNUSED_PARAMETER(argc); + switch( sqlite3_value_type(argv[0]) ){ + case SQLITE_BLOB: + case SQLITE_INTEGER: + case SQLITE_FLOAT: { + sqlite3_result_int(context, sqlite3_value_bytes(argv[0])); + break; + } + case SQLITE_TEXT: { + const unsigned char *z = sqlite3_value_text(argv[0]); + if( z==0 ) return; + len = 0; + while( *z ){ + len++; + SQLITE_SKIP_UTF8(z); + } + sqlite3_result_int(context, len); + break; + } + default: { + sqlite3_result_null(context); + break; + } + } +} + +/* +** Implementation of the abs() function. +** +** IMP: R-23979-26855 The abs(X) function returns the absolute value of +** the numeric argument X. +*/ +static void absFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ + assert( argc==1 ); + UNUSED_PARAMETER(argc); + switch( sqlite3_value_type(argv[0]) ){ + case SQLITE_INTEGER: { + i64 iVal = sqlite3_value_int64(argv[0]); + if( iVal<0 ){ + if( iVal==SMALLEST_INT64 ){ + /* IMP: R-31676-45509 If X is the integer -9223372036854775808 + ** then abs(X) throws an integer overflow error since there is no + ** equivalent positive 64-bit two complement value. */ + sqlite3_result_error(context, "integer overflow", -1); + return; + } + iVal = -iVal; + } + sqlite3_result_int64(context, iVal); + break; + } + case SQLITE_NULL: { + /* IMP: R-37434-19929 Abs(X) returns NULL if X is NULL. */ + sqlite3_result_null(context); + break; + } + default: { + /* Because sqlite3_value_double() returns 0.0 if the argument is not + ** something that can be converted into a number, we have: + ** IMP: R-01992-00519 Abs(X) returns 0.0 if X is a string or blob + ** that cannot be converted to a numeric value. + */ + double rVal = sqlite3_value_double(argv[0]); + if( rVal<0 ) rVal = -rVal; + sqlite3_result_double(context, rVal); + break; + } + } +} + +/* +** Implementation of the instr() function. +** +** instr(haystack,needle) finds the first occurrence of needle +** in haystack and returns the number of previous characters plus 1, +** or 0 if needle does not occur within haystack. +** +** If both haystack and needle are BLOBs, then the result is one more than +** the number of bytes in haystack prior to the first occurrence of needle, +** or 0 if needle never occurs in haystack. +*/ +static void instrFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *zHaystack; + const unsigned char *zNeedle; + int nHaystack; + int nNeedle; + int typeHaystack, typeNeedle; + int N = 1; + int isText; + + UNUSED_PARAMETER(argc); + typeHaystack = sqlite3_value_type(argv[0]); + typeNeedle = sqlite3_value_type(argv[1]); + if( typeHaystack==SQLITE_NULL || typeNeedle==SQLITE_NULL ) return; + nHaystack = sqlite3_value_bytes(argv[0]); + nNeedle = sqlite3_value_bytes(argv[1]); + if( nNeedle>0 ){ + if( typeHaystack==SQLITE_BLOB && typeNeedle==SQLITE_BLOB ){ + zHaystack = sqlite3_value_blob(argv[0]); + zNeedle = sqlite3_value_blob(argv[1]); + assert( zNeedle!=0 ); + assert( zHaystack!=0 || nHaystack==0 ); + isText = 0; + }else{ + zHaystack = sqlite3_value_text(argv[0]); + zNeedle = sqlite3_value_text(argv[1]); + isText = 1; + if( zHaystack==0 || zNeedle==0 ) return; + } + while( nNeedle<=nHaystack && memcmp(zHaystack, zNeedle, nNeedle)!=0 ){ + N++; + do{ + nHaystack--; + zHaystack++; + }while( isText && (zHaystack[0]&0xc0)==0x80 ); + } + if( nNeedle>nHaystack ) N = 0; + } + sqlite3_result_int(context, N); +} + +/* +** Implementation of the printf() function. +*/ +static void printfFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + PrintfArguments x; + StrAccum str; + const char *zFormat; + int n; + sqlite3 *db = sqlite3_context_db_handle(context); + + if( argc>=1 && (zFormat = (const char*)sqlite3_value_text(argv[0]))!=0 ){ + x.nArg = argc-1; + x.nUsed = 0; + x.apArg = argv+1; + sqlite3StrAccumInit(&str, db, 0, 0, db->aLimit[SQLITE_LIMIT_LENGTH]); + str.printfFlags = SQLITE_PRINTF_SQLFUNC; + sqlite3XPrintf(&str, zFormat, &x); + n = str.nChar; + sqlite3_result_text(context, sqlite3StrAccumFinish(&str), n, + SQLITE_DYNAMIC); + } +} + +/* +** Implementation of the substr() function. +** +** substr(x,p1,p2) returns p2 characters of x[] beginning with p1. +** p1 is 1-indexed. So substr(x,1,1) returns the first character +** of x. If x is text, then we actually count UTF-8 characters. +** If x is a blob, then we count bytes. +** +** If p1 is negative, then we begin abs(p1) from the end of x[]. +** +** If p2 is negative, return the p2 characters preceding p1. +*/ +static void substrFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *z; + const unsigned char *z2; + int len; + int p0type; + i64 p1, p2; + int negP2 = 0; + + assert( argc==3 || argc==2 ); + if( sqlite3_value_type(argv[1])==SQLITE_NULL + || (argc==3 && sqlite3_value_type(argv[2])==SQLITE_NULL) + ){ + return; + } + p0type = sqlite3_value_type(argv[0]); + p1 = sqlite3_value_int(argv[1]); + if( p0type==SQLITE_BLOB ){ + len = sqlite3_value_bytes(argv[0]); + z = sqlite3_value_blob(argv[0]); + if( z==0 ) return; + assert( len==sqlite3_value_bytes(argv[0]) ); + }else{ + z = sqlite3_value_text(argv[0]); + if( z==0 ) return; + len = 0; + if( p1<0 ){ + for(z2=z; *z2; len++){ + SQLITE_SKIP_UTF8(z2); + } + } + } +#ifdef SQLITE_SUBSTR_COMPATIBILITY + /* If SUBSTR_COMPATIBILITY is defined then substr(X,0,N) work the same as + ** as substr(X,1,N) - it returns the first N characters of X. This + ** is essentially a back-out of the bug-fix in check-in [5fc125d362df4b8] + ** from 2009-02-02 for compatibility of applications that exploited the + ** old buggy behavior. */ + if( p1==0 ) p1 = 1; /* */ +#endif + if( argc==3 ){ + p2 = sqlite3_value_int(argv[2]); + if( p2<0 ){ + p2 = -p2; + negP2 = 1; + } + }else{ + p2 = sqlite3_context_db_handle(context)->aLimit[SQLITE_LIMIT_LENGTH]; + } + if( p1<0 ){ + p1 += len; + if( p1<0 ){ + p2 += p1; + if( p2<0 ) p2 = 0; + p1 = 0; + } + }else if( p1>0 ){ + p1--; + }else if( p2>0 ){ + p2--; + } + if( negP2 ){ + p1 -= p2; + if( p1<0 ){ + p2 += p1; + p1 = 0; + } + } + assert( p1>=0 && p2>=0 ); + if( p0type!=SQLITE_BLOB ){ + while( *z && p1 ){ + SQLITE_SKIP_UTF8(z); + p1--; + } + for(z2=z; *z2 && p2; p2--){ + SQLITE_SKIP_UTF8(z2); + } + sqlite3_result_text64(context, (char*)z, z2-z, SQLITE_TRANSIENT, + SQLITE_UTF8); + }else{ + if( p1+p2>len ){ + p2 = len-p1; + if( p2<0 ) p2 = 0; + } + sqlite3_result_blob64(context, (char*)&z[p1], (u64)p2, SQLITE_TRANSIENT); + } +} + +/* +** Implementation of the round() function +*/ +#ifndef SQLITE_OMIT_FLOATING_POINT +static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ + int n = 0; + double r; + char *zBuf; + assert( argc==1 || argc==2 ); + if( argc==2 ){ + if( SQLITE_NULL==sqlite3_value_type(argv[1]) ) return; + n = sqlite3_value_int(argv[1]); + if( n>30 ) n = 30; + if( n<0 ) n = 0; + } + if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; + r = sqlite3_value_double(argv[0]); + /* If Y==0 and X will fit in a 64-bit int, + ** handle the rounding directly, + ** otherwise use printf. + */ + if( n==0 && r>=0 && r0 ); + testcase( nByte==db->aLimit[SQLITE_LIMIT_LENGTH] ); + testcase( nByte==db->aLimit[SQLITE_LIMIT_LENGTH]+1 ); + if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){ + sqlite3_result_error_toobig(context); + z = 0; + }else{ + z = sqlite3Malloc(nByte); + if( !z ){ + sqlite3_result_error_nomem(context); + } + } + return z; +} + +/* +** Implementation of the upper() and lower() SQL functions. +*/ +static void upperFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ + char *z1; + const char *z2; + int i, n; + UNUSED_PARAMETER(argc); + z2 = (char*)sqlite3_value_text(argv[0]); + n = sqlite3_value_bytes(argv[0]); + /* Verify that the call to _bytes() does not invalidate the _text() pointer */ + assert( z2==(char*)sqlite3_value_text(argv[0]) ); + if( z2 ){ + z1 = contextMalloc(context, ((i64)n)+1); + if( z1 ){ + for(i=0; imatchOne; /* "?" or "_" */ + u32 matchAll = pInfo->matchAll; /* "*" or "%" */ + u8 noCase = pInfo->noCase; /* True if uppercase==lowercase */ + const u8 *zEscaped = 0; /* One past the last escaped input char */ + + while( (c = Utf8Read(zPattern))!=0 ){ + if( c==matchAll ){ /* Match "*" */ + /* Skip over multiple "*" characters in the pattern. If there + ** are also "?" characters, skip those as well, but consume a + ** single character of the input string for each "?" skipped */ + while( (c=Utf8Read(zPattern)) == matchAll || c == matchOne ){ + if( c==matchOne && sqlite3Utf8Read(&zString)==0 ){ + return SQLITE_NOWILDCARDMATCH; + } + } + if( c==0 ){ + return SQLITE_MATCH; /* "*" at the end of the pattern matches */ + }else if( c==matchOther ){ + if( pInfo->matchSet==0 ){ + c = sqlite3Utf8Read(&zPattern); + if( c==0 ) return SQLITE_NOWILDCARDMATCH; + }else{ + /* "[...]" immediately follows the "*". We have to do a slow + ** recursive search in this case, but it is an unusual case. */ + assert( matchOther<0x80 ); /* '[' is a single-byte character */ + while( *zString ){ + int bMatch = patternCompare(&zPattern[-1],zString,pInfo,matchOther); + if( bMatch!=SQLITE_NOMATCH ) return bMatch; + SQLITE_SKIP_UTF8(zString); + } + return SQLITE_NOWILDCARDMATCH; + } + } + + /* At this point variable c contains the first character of the + ** pattern string past the "*". Search in the input string for the + ** first matching character and recursively continue the match from + ** that point. + ** + ** For a case-insensitive search, set variable cx to be the same as + ** c but in the other case and search the input string for either + ** c or cx. + */ + if( c<=0x80 ){ + u32 cx; + int bMatch; + if( noCase ){ + cx = sqlite3Toupper(c); + c = sqlite3Tolower(c); + }else{ + cx = c; + } + while( (c2 = *(zString++))!=0 ){ + if( c2!=c && c2!=cx ) continue; + bMatch = patternCompare(zPattern,zString,pInfo,matchOther); + if( bMatch!=SQLITE_NOMATCH ) return bMatch; + } + }else{ + int bMatch; + while( (c2 = Utf8Read(zString))!=0 ){ + if( c2!=c ) continue; + bMatch = patternCompare(zPattern,zString,pInfo,matchOther); + if( bMatch!=SQLITE_NOMATCH ) return bMatch; + } + } + return SQLITE_NOWILDCARDMATCH; + } + if( c==matchOther ){ + if( pInfo->matchSet==0 ){ + c = sqlite3Utf8Read(&zPattern); + if( c==0 ) return SQLITE_NOMATCH; + zEscaped = zPattern; + }else{ + u32 prior_c = 0; + int seen = 0; + int invert = 0; + c = sqlite3Utf8Read(&zString); + if( c==0 ) return SQLITE_NOMATCH; + c2 = sqlite3Utf8Read(&zPattern); + if( c2=='^' ){ + invert = 1; + c2 = sqlite3Utf8Read(&zPattern); + } + if( c2==']' ){ + if( c==']' ) seen = 1; + c2 = sqlite3Utf8Read(&zPattern); + } + while( c2 && c2!=']' ){ + if( c2=='-' && zPattern[0]!=']' && zPattern[0]!=0 && prior_c>0 ){ + c2 = sqlite3Utf8Read(&zPattern); + if( c>=prior_c && c<=c2 ) seen = 1; + prior_c = 0; + }else{ + if( c==c2 ){ + seen = 1; + } + prior_c = c2; + } + c2 = sqlite3Utf8Read(&zPattern); + } + if( c2==0 || (seen ^ invert)==0 ){ + return SQLITE_NOMATCH; + } + continue; + } + } + c2 = Utf8Read(zString); + if( c==c2 ) continue; + if( noCase && sqlite3Tolower(c)==sqlite3Tolower(c2) && c<0x80 && c2<0x80 ){ + continue; + } + if( c==matchOne && zPattern!=zEscaped && c2!=0 ) continue; + return SQLITE_NOMATCH; + } + return *zString==0 ? SQLITE_MATCH : SQLITE_NOMATCH; +} + +/* +** The sqlite3_strglob() interface. Return 0 on a match (like strcmp()) and +** non-zero if there is no match. +*/ +SQLITE_API int sqlite3_strglob(const char *zGlobPattern, const char *zString){ + return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '['); +} + +/* +** The sqlite3_strlike() interface. Return 0 on a match and non-zero for +** a miss - like strcmp(). +*/ +SQLITE_API int sqlite3_strlike(const char *zPattern, const char *zStr, unsigned int esc){ + return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc); +} + +/* +** Count the number of times that the LIKE operator (or GLOB which is +** just a variation of LIKE) gets called. This is used for testing +** only. +*/ +#ifdef SQLITE_TEST +SQLITE_API int sqlite3_like_count = 0; +#endif + + +/* +** Implementation of the like() SQL function. This function implements +** the build-in LIKE operator. The first argument to the function is the +** pattern and the second argument is the string. So, the SQL statements: +** +** A LIKE B +** +** is implemented as like(B,A). +** +** This same function (with a different compareInfo structure) computes +** the GLOB operator. +*/ +static void likeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *zA, *zB; + u32 escape; + int nPat; + sqlite3 *db = sqlite3_context_db_handle(context); + struct compareInfo *pInfo = sqlite3_user_data(context); + +#ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS + if( sqlite3_value_type(argv[0])==SQLITE_BLOB + || sqlite3_value_type(argv[1])==SQLITE_BLOB + ){ +#ifdef SQLITE_TEST + sqlite3_like_count++; +#endif + sqlite3_result_int(context, 0); + return; + } +#endif + zB = sqlite3_value_text(argv[0]); + zA = sqlite3_value_text(argv[1]); + + /* Limit the length of the LIKE or GLOB pattern to avoid problems + ** of deep recursion and N*N behavior in patternCompare(). + */ + nPat = sqlite3_value_bytes(argv[0]); + testcase( nPat==db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH] ); + testcase( nPat==db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]+1 ); + if( nPat > db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH] ){ + sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1); + return; + } + assert( zB==sqlite3_value_text(argv[0]) ); /* Encoding did not change */ + + if( argc==3 ){ + /* The escape character string must consist of a single UTF-8 character. + ** Otherwise, return an error. + */ + const unsigned char *zEsc = sqlite3_value_text(argv[2]); + if( zEsc==0 ) return; + if( sqlite3Utf8CharLen((char*)zEsc, -1)!=1 ){ + sqlite3_result_error(context, + "ESCAPE expression must be a single character", -1); + return; + } + escape = sqlite3Utf8Read(&zEsc); + }else{ + escape = pInfo->matchSet; + } + if( zA && zB ){ +#ifdef SQLITE_TEST + sqlite3_like_count++; +#endif + sqlite3_result_int(context, patternCompare(zB, zA, pInfo, escape)==SQLITE_MATCH); + } +} + +/* +** Implementation of the NULLIF(x,y) function. The result is the first +** argument if the arguments are different. The result is NULL if the +** arguments are equal to each other. +*/ +static void nullifFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **argv +){ + CollSeq *pColl = sqlite3GetFuncCollSeq(context); + UNUSED_PARAMETER(NotUsed); + if( sqlite3MemCompare(argv[0], argv[1], pColl)!=0 ){ + sqlite3_result_value(context, argv[0]); + } +} + +/* +** Implementation of the sqlite_version() function. The result is the version +** of the SQLite library that is running. +*/ +static void versionFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **NotUsed2 +){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + /* IMP: R-48699-48617 This function is an SQL wrapper around the + ** sqlite3_libversion() C-interface. */ + sqlite3_result_text(context, sqlite3_libversion(), -1, SQLITE_STATIC); +} + +/* +** Implementation of the sqlite_source_id() function. The result is a string +** that identifies the particular version of the source code used to build +** SQLite. +*/ +static void sourceidFunc( + sqlite3_context *context, + int NotUsed, + sqlite3_value **NotUsed2 +){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + /* IMP: R-24470-31136 This function is an SQL wrapper around the + ** sqlite3_sourceid() C interface. */ + sqlite3_result_text(context, sqlite3_sourceid(), -1, SQLITE_STATIC); +} + +/* +** Implementation of the sqlite_log() function. This is a wrapper around +** sqlite3_log(). The return value is NULL. The function exists purely for +** its side-effects. +*/ +static void errlogFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + UNUSED_PARAMETER(argc); + UNUSED_PARAMETER(context); + sqlite3_log(sqlite3_value_int(argv[0]), "%s", sqlite3_value_text(argv[1])); +} + +/* +** Implementation of the sqlite_compileoption_used() function. +** The result is an integer that identifies if the compiler option +** was used to build SQLite. +*/ +#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS +static void compileoptionusedFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const char *zOptName; + assert( argc==1 ); + UNUSED_PARAMETER(argc); + /* IMP: R-39564-36305 The sqlite_compileoption_used() SQL + ** function is a wrapper around the sqlite3_compileoption_used() C/C++ + ** function. + */ + if( (zOptName = (const char*)sqlite3_value_text(argv[0]))!=0 ){ + sqlite3_result_int(context, sqlite3_compileoption_used(zOptName)); + } +} +#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ + +/* +** Implementation of the sqlite_compileoption_get() function. +** The result is a string that identifies the compiler options +** used to build SQLite. +*/ +#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS +static void compileoptiongetFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int n; + assert( argc==1 ); + UNUSED_PARAMETER(argc); + /* IMP: R-04922-24076 The sqlite_compileoption_get() SQL function + ** is a wrapper around the sqlite3_compileoption_get() C/C++ function. + */ + n = sqlite3_value_int(argv[0]); + sqlite3_result_text(context, sqlite3_compileoption_get(n), -1, SQLITE_STATIC); +} +#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ + +/* Array for converting from half-bytes (nybbles) into ASCII hex +** digits. */ +static const char hexdigits[] = { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' +}; + +/* +** Implementation of the QUOTE() function. This function takes a single +** argument. If the argument is numeric, the return value is the same as +** the argument. If the argument is NULL, the return value is the string +** "NULL". Otherwise, the argument is enclosed in single quotes with +** single-quote escapes. +*/ +static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ + assert( argc==1 ); + UNUSED_PARAMETER(argc); + switch( sqlite3_value_type(argv[0]) ){ + case SQLITE_FLOAT: { + double r1, r2; + char zBuf[50]; + r1 = sqlite3_value_double(argv[0]); + sqlite3_snprintf(sizeof(zBuf), zBuf, "%!.15g", r1); + sqlite3AtoF(zBuf, &r2, 20, SQLITE_UTF8); + if( r1!=r2 ){ + sqlite3_snprintf(sizeof(zBuf), zBuf, "%!.20e", r1); + } + sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); + break; + } + case SQLITE_INTEGER: { + sqlite3_result_value(context, argv[0]); + break; + } + case SQLITE_BLOB: { + char *zText = 0; + char const *zBlob = sqlite3_value_blob(argv[0]); + int nBlob = sqlite3_value_bytes(argv[0]); + assert( zBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ + zText = (char *)contextMalloc(context, (2*(i64)nBlob)+4); + if( zText ){ + int i; + for(i=0; i>4)&0x0F]; + zText[(i*2)+3] = hexdigits[(zBlob[i])&0x0F]; + } + zText[(nBlob*2)+2] = '\''; + zText[(nBlob*2)+3] = '\0'; + zText[0] = 'X'; + zText[1] = '\''; + sqlite3_result_text(context, zText, -1, SQLITE_TRANSIENT); + sqlite3_free(zText); + } + break; + } + case SQLITE_TEXT: { + int i,j; + u64 n; + const unsigned char *zArg = sqlite3_value_text(argv[0]); + char *z; + + if( zArg==0 ) return; + for(i=0, n=0; zArg[i]; i++){ if( zArg[i]=='\'' ) n++; } + z = contextMalloc(context, ((i64)i)+((i64)n)+3); + if( z ){ + z[0] = '\''; + for(i=0, j=1; zArg[i]; i++){ + z[j++] = zArg[i]; + if( zArg[i]=='\'' ){ + z[j++] = '\''; + } + } + z[j++] = '\''; + z[j] = 0; + sqlite3_result_text(context, z, j, sqlite3_free); + } + break; + } + default: { + assert( sqlite3_value_type(argv[0])==SQLITE_NULL ); + sqlite3_result_text(context, "NULL", 4, SQLITE_STATIC); + break; + } + } +} + +/* +** The unicode() function. Return the integer unicode code-point value +** for the first character of the input string. +*/ +static void unicodeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *z = sqlite3_value_text(argv[0]); + (void)argc; + if( z && z[0] ) sqlite3_result_int(context, sqlite3Utf8Read(&z)); +} + +/* +** The char() function takes zero or more arguments, each of which is +** an integer. It constructs a string where each character of the string +** is the unicode character for the corresponding integer argument. +*/ +static void charFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + unsigned char *z, *zOut; + int i; + zOut = z = sqlite3_malloc64( argc*4+1 ); + if( z==0 ){ + sqlite3_result_error_nomem(context); + return; + } + for(i=0; i0x10ffff ) x = 0xfffd; + c = (unsigned)(x & 0x1fffff); + if( c<0x00080 ){ + *zOut++ = (u8)(c&0xFF); + }else if( c<0x00800 ){ + *zOut++ = 0xC0 + (u8)((c>>6)&0x1F); + *zOut++ = 0x80 + (u8)(c & 0x3F); + }else if( c<0x10000 ){ + *zOut++ = 0xE0 + (u8)((c>>12)&0x0F); + *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); + *zOut++ = 0x80 + (u8)(c & 0x3F); + }else{ + *zOut++ = 0xF0 + (u8)((c>>18) & 0x07); + *zOut++ = 0x80 + (u8)((c>>12) & 0x3F); + *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); + *zOut++ = 0x80 + (u8)(c & 0x3F); + } \ + } + sqlite3_result_text64(context, (char*)z, zOut-z, sqlite3_free, SQLITE_UTF8); +} + +/* +** The hex() function. Interpret the argument as a blob. Return +** a hexadecimal rendering as text. +*/ +static void hexFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int i, n; + const unsigned char *pBlob; + char *zHex, *z; + assert( argc==1 ); + UNUSED_PARAMETER(argc); + pBlob = sqlite3_value_blob(argv[0]); + n = sqlite3_value_bytes(argv[0]); + assert( pBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ + z = zHex = contextMalloc(context, ((i64)n)*2 + 1); + if( zHex ){ + for(i=0; i>4)&0xf]; + *(z++) = hexdigits[c&0xf]; + } + *z = 0; + sqlite3_result_text(context, zHex, n*2, sqlite3_free); + } +} + +/* +** The zeroblob(N) function returns a zero-filled blob of size N bytes. +*/ +static void zeroblobFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + i64 n; + int rc; + assert( argc==1 ); + UNUSED_PARAMETER(argc); + n = sqlite3_value_int64(argv[0]); + if( n<0 ) n = 0; + rc = sqlite3_result_zeroblob64(context, n); /* IMP: R-00293-64994 */ + if( rc ){ + sqlite3_result_error_code(context, rc); + } +} + +/* +** The replace() function. Three arguments are all strings: call +** them A, B, and C. The result is also a string which is derived +** from A by replacing every occurrence of B with C. The match +** must be exact. Collating sequences are not used. +*/ +static void replaceFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *zStr; /* The input string A */ + const unsigned char *zPattern; /* The pattern string B */ + const unsigned char *zRep; /* The replacement string C */ + unsigned char *zOut; /* The output */ + int nStr; /* Size of zStr */ + int nPattern; /* Size of zPattern */ + int nRep; /* Size of zRep */ + i64 nOut; /* Maximum size of zOut */ + int loopLimit; /* Last zStr[] that might match zPattern[] */ + int i, j; /* Loop counters */ + + assert( argc==3 ); + UNUSED_PARAMETER(argc); + zStr = sqlite3_value_text(argv[0]); + if( zStr==0 ) return; + nStr = sqlite3_value_bytes(argv[0]); + assert( zStr==sqlite3_value_text(argv[0]) ); /* No encoding change */ + zPattern = sqlite3_value_text(argv[1]); + if( zPattern==0 ){ + assert( sqlite3_value_type(argv[1])==SQLITE_NULL + || sqlite3_context_db_handle(context)->mallocFailed ); + return; + } + if( zPattern[0]==0 ){ + assert( sqlite3_value_type(argv[1])!=SQLITE_NULL ); + sqlite3_result_value(context, argv[0]); + return; + } + nPattern = sqlite3_value_bytes(argv[1]); + assert( zPattern==sqlite3_value_text(argv[1]) ); /* No encoding change */ + zRep = sqlite3_value_text(argv[2]); + if( zRep==0 ) return; + nRep = sqlite3_value_bytes(argv[2]); + assert( zRep==sqlite3_value_text(argv[2]) ); + nOut = nStr + 1; + assert( nOutaLimit[SQLITE_LIMIT_LENGTH] ); + testcase( nOut-2==db->aLimit[SQLITE_LIMIT_LENGTH] ); + if( nOut-1>db->aLimit[SQLITE_LIMIT_LENGTH] ){ + sqlite3_result_error_toobig(context); + sqlite3_free(zOut); + return; + } + zOld = zOut; + zOut = sqlite3_realloc64(zOut, (int)nOut); + if( zOut==0 ){ + sqlite3_result_error_nomem(context); + sqlite3_free(zOld); + return; + } + memcpy(&zOut[j], zRep, nRep); + j += nRep; + i += nPattern-1; + } + } + assert( j+nStr-i+1==nOut ); + memcpy(&zOut[j], &zStr[i], nStr-i); + j += nStr - i; + assert( j<=nOut ); + zOut[j] = 0; + sqlite3_result_text(context, (char*)zOut, j, sqlite3_free); +} + +/* +** Implementation of the TRIM(), LTRIM(), and RTRIM() functions. +** The userdata is 0x1 for left trim, 0x2 for right trim, 0x3 for both. +*/ +static void trimFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *zIn; /* Input string */ + const unsigned char *zCharSet; /* Set of characters to trim */ + int nIn; /* Number of bytes in input */ + int flags; /* 1: trimleft 2: trimright 3: trim */ + int i; /* Loop counter */ + unsigned char *aLen = 0; /* Length of each character in zCharSet */ + unsigned char **azChar = 0; /* Individual characters in zCharSet */ + int nChar; /* Number of characters in zCharSet */ + + if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ + return; + } + zIn = sqlite3_value_text(argv[0]); + if( zIn==0 ) return; + nIn = sqlite3_value_bytes(argv[0]); + assert( zIn==sqlite3_value_text(argv[0]) ); + if( argc==1 ){ + static const unsigned char lenOne[] = { 1 }; + static unsigned char * const azOne[] = { (u8*)" " }; + nChar = 1; + aLen = (u8*)lenOne; + azChar = (unsigned char **)azOne; + zCharSet = 0; + }else if( (zCharSet = sqlite3_value_text(argv[1]))==0 ){ + return; + }else{ + const unsigned char *z; + for(z=zCharSet, nChar=0; *z; nChar++){ + SQLITE_SKIP_UTF8(z); + } + if( nChar>0 ){ + azChar = contextMalloc(context, ((i64)nChar)*(sizeof(char*)+1)); + if( azChar==0 ){ + return; + } + aLen = (unsigned char*)&azChar[nChar]; + for(z=zCharSet, nChar=0; *z; nChar++){ + azChar[nChar] = (unsigned char *)z; + SQLITE_SKIP_UTF8(z); + aLen[nChar] = (u8)(z - azChar[nChar]); + } + } + } + if( nChar>0 ){ + flags = SQLITE_PTR_TO_INT(sqlite3_user_data(context)); + if( flags & 1 ){ + while( nIn>0 ){ + int len = 0; + for(i=0; i=nChar ) break; + zIn += len; + nIn -= len; + } + } + if( flags & 2 ){ + while( nIn>0 ){ + int len = 0; + for(i=0; i=nChar ) break; + nIn -= len; + } + } + if( zCharSet ){ + sqlite3_free(azChar); + } + } + sqlite3_result_text(context, (char*)zIn, nIn, SQLITE_TRANSIENT); +} + + +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION +/* +** The "unknown" function is automatically substituted in place of +** any unrecognized function name when doing an EXPLAIN or EXPLAIN QUERY PLAN +** when the SQLITE_ENABLE_UNKNOWN_FUNCTION compile-time option is used. +** When the "sqlite3" command-line shell is built using this functionality, +** that allows an EXPLAIN or EXPLAIN QUERY PLAN for complex queries +** involving application-defined functions to be examined in a generic +** sqlite3 shell. +*/ +static void unknownFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + /* no-op */ +} +#endif /*SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION*/ + + +/* IMP: R-25361-16150 This function is omitted from SQLite by default. It +** is only available if the SQLITE_SOUNDEX compile-time option is used +** when SQLite is built. +*/ +#ifdef SQLITE_SOUNDEX +/* +** Compute the soundex encoding of a word. +** +** IMP: R-59782-00072 The soundex(X) function returns a string that is the +** soundex encoding of the string X. +*/ +static void soundexFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + char zResult[8]; + const u8 *zIn; + int i, j; + static const unsigned char iCode[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0, + 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0, + 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0, + 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0, + }; + assert( argc==1 ); + zIn = (u8*)sqlite3_value_text(argv[0]); + if( zIn==0 ) zIn = (u8*)""; + for(i=0; zIn[i] && !sqlite3Isalpha(zIn[i]); i++){} + if( zIn[i] ){ + u8 prevcode = iCode[zIn[i]&0x7f]; + zResult[0] = sqlite3Toupper(zIn[i]); + for(j=1; j<4 && zIn[i]; i++){ + int code = iCode[zIn[i]&0x7f]; + if( code>0 ){ + if( code!=prevcode ){ + prevcode = code; + zResult[j++] = code + '0'; + } + }else{ + prevcode = 0; + } + } + while( j<4 ){ + zResult[j++] = '0'; + } + zResult[j] = 0; + sqlite3_result_text(context, zResult, 4, SQLITE_TRANSIENT); + }else{ + /* IMP: R-64894-50321 The string "?000" is returned if the argument + ** is NULL or contains no ASCII alphabetic characters. */ + sqlite3_result_text(context, "?000", 4, SQLITE_STATIC); + } +} +#endif /* SQLITE_SOUNDEX */ + +#ifndef SQLITE_OMIT_LOAD_EXTENSION +/* +** A function that loads a shared-library extension then returns NULL. +*/ +static void loadExt(sqlite3_context *context, int argc, sqlite3_value **argv){ + const char *zFile = (const char *)sqlite3_value_text(argv[0]); + const char *zProc; + sqlite3 *db = sqlite3_context_db_handle(context); + char *zErrMsg = 0; + + /* Disallow the load_extension() SQL function unless the SQLITE_LoadExtFunc + ** flag is set. See the sqlite3_enable_load_extension() API. + */ + if( (db->flags & SQLITE_LoadExtFunc)==0 ){ + sqlite3_result_error(context, "not authorized", -1); + return; + } + + if( argc==2 ){ + zProc = (const char *)sqlite3_value_text(argv[1]); + }else{ + zProc = 0; + } + if( zFile && sqlite3_load_extension(db, zFile, zProc, &zErrMsg) ){ + sqlite3_result_error(context, zErrMsg, -1); + sqlite3_free(zErrMsg); + } +} +#endif + + +/* +** An instance of the following structure holds the context of a +** sum() or avg() aggregate computation. +*/ +typedef struct SumCtx SumCtx; +struct SumCtx { + double rSum; /* Floating point sum */ + i64 iSum; /* Integer sum */ + i64 cnt; /* Number of elements summed */ + u8 overflow; /* True if integer overflow seen */ + u8 approx; /* True if non-integer value was input to the sum */ +}; + +/* +** Routines used to compute the sum, average, and total. +** +** The SUM() function follows the (broken) SQL standard which means +** that it returns NULL if it sums over no inputs. TOTAL returns +** 0.0 in that case. In addition, TOTAL always returns a float where +** SUM might return an integer if it never encounters a floating point +** value. TOTAL never fails, but SUM might through an exception if +** it overflows an integer. +*/ +static void sumStep(sqlite3_context *context, int argc, sqlite3_value **argv){ + SumCtx *p; + int type; + assert( argc==1 ); + UNUSED_PARAMETER(argc); + p = sqlite3_aggregate_context(context, sizeof(*p)); + type = sqlite3_value_numeric_type(argv[0]); + if( p && type!=SQLITE_NULL ){ + p->cnt++; + if( type==SQLITE_INTEGER ){ + i64 v = sqlite3_value_int64(argv[0]); + p->rSum += v; + if( (p->approx|p->overflow)==0 && sqlite3AddInt64(&p->iSum, v) ){ + p->overflow = 1; + } + }else{ + p->rSum += sqlite3_value_double(argv[0]); + p->approx = 1; + } + } +} +static void sumFinalize(sqlite3_context *context){ + SumCtx *p; + p = sqlite3_aggregate_context(context, 0); + if( p && p->cnt>0 ){ + if( p->overflow ){ + sqlite3_result_error(context,"integer overflow",-1); + }else if( p->approx ){ + sqlite3_result_double(context, p->rSum); + }else{ + sqlite3_result_int64(context, p->iSum); + } + } +} +static void avgFinalize(sqlite3_context *context){ + SumCtx *p; + p = sqlite3_aggregate_context(context, 0); + if( p && p->cnt>0 ){ + sqlite3_result_double(context, p->rSum/(double)p->cnt); + } +} +static void totalFinalize(sqlite3_context *context){ + SumCtx *p; + p = sqlite3_aggregate_context(context, 0); + /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ + sqlite3_result_double(context, p ? p->rSum : (double)0); +} + +/* +** The following structure keeps track of state information for the +** count() aggregate function. +*/ +typedef struct CountCtx CountCtx; +struct CountCtx { + i64 n; +}; + +/* +** Routines to implement the count() aggregate function. +*/ +static void countStep(sqlite3_context *context, int argc, sqlite3_value **argv){ + CountCtx *p; + p = sqlite3_aggregate_context(context, sizeof(*p)); + if( (argc==0 || SQLITE_NULL!=sqlite3_value_type(argv[0])) && p ){ + p->n++; + } + +#ifndef SQLITE_OMIT_DEPRECATED + /* The sqlite3_aggregate_count() function is deprecated. But just to make + ** sure it still operates correctly, verify that its count agrees with our + ** internal count when using count(*) and when the total count can be + ** expressed as a 32-bit integer. */ + assert( argc==1 || p==0 || p->n>0x7fffffff + || p->n==sqlite3_aggregate_count(context) ); +#endif +} +static void countFinalize(sqlite3_context *context){ + CountCtx *p; + p = sqlite3_aggregate_context(context, 0); + sqlite3_result_int64(context, p ? p->n : 0); +} + +/* +** Routines to implement min() and max() aggregate functions. +*/ +static void minmaxStep( + sqlite3_context *context, + int NotUsed, + sqlite3_value **argv +){ + Mem *pArg = (Mem *)argv[0]; + Mem *pBest; + UNUSED_PARAMETER(NotUsed); + + pBest = (Mem *)sqlite3_aggregate_context(context, sizeof(*pBest)); + if( !pBest ) return; + + if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ + if( pBest->flags ) sqlite3SkipAccumulatorLoad(context); + }else if( pBest->flags ){ + int max; + int cmp; + CollSeq *pColl = sqlite3GetFuncCollSeq(context); + /* This step function is used for both the min() and max() aggregates, + ** the only difference between the two being that the sense of the + ** comparison is inverted. For the max() aggregate, the + ** sqlite3_user_data() function returns (void *)-1. For min() it + ** returns (void *)db, where db is the sqlite3* database pointer. + ** Therefore the next statement sets variable 'max' to 1 for the max() + ** aggregate, or 0 for min(). + */ + max = sqlite3_user_data(context)!=0; + cmp = sqlite3MemCompare(pBest, pArg, pColl); + if( (max && cmp<0) || (!max && cmp>0) ){ + sqlite3VdbeMemCopy(pBest, pArg); + }else{ + sqlite3SkipAccumulatorLoad(context); + } + }else{ + pBest->db = sqlite3_context_db_handle(context); + sqlite3VdbeMemCopy(pBest, pArg); + } +} +static void minMaxFinalize(sqlite3_context *context){ + sqlite3_value *pRes; + pRes = (sqlite3_value *)sqlite3_aggregate_context(context, 0); + if( pRes ){ + if( pRes->flags ){ + sqlite3_result_value(context, pRes); + } + sqlite3VdbeMemRelease(pRes); + } +} + +/* +** group_concat(EXPR, ?SEPARATOR?) +*/ +static void groupConcatStep( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const char *zVal; + StrAccum *pAccum; + const char *zSep; + int nVal, nSep; + assert( argc==1 || argc==2 ); + if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; + pAccum = (StrAccum*)sqlite3_aggregate_context(context, sizeof(*pAccum)); + + if( pAccum ){ + sqlite3 *db = sqlite3_context_db_handle(context); + int firstTerm = pAccum->mxAlloc==0; + pAccum->mxAlloc = db->aLimit[SQLITE_LIMIT_LENGTH]; + if( !firstTerm ){ + if( argc==2 ){ + zSep = (char*)sqlite3_value_text(argv[1]); + nSep = sqlite3_value_bytes(argv[1]); + }else{ + zSep = ","; + nSep = 1; + } + if( zSep ) sqlite3StrAccumAppend(pAccum, zSep, nSep); + } + zVal = (char*)sqlite3_value_text(argv[0]); + nVal = sqlite3_value_bytes(argv[0]); + if( zVal ) sqlite3StrAccumAppend(pAccum, zVal, nVal); + } +} +static void groupConcatFinalize(sqlite3_context *context){ + StrAccum *pAccum; + pAccum = sqlite3_aggregate_context(context, 0); + if( pAccum ){ + if( pAccum->accError==STRACCUM_TOOBIG ){ + sqlite3_result_error_toobig(context); + }else if( pAccum->accError==STRACCUM_NOMEM ){ + sqlite3_result_error_nomem(context); + }else{ + sqlite3_result_text(context, sqlite3StrAccumFinish(pAccum), -1, + sqlite3_free); + } + } +} + +/* +** This routine does per-connection function registration. Most +** of the built-in functions above are part of the global function set. +** This routine only deals with those that are not global. +*/ +SQLITE_PRIVATE void sqlite3RegisterPerConnectionBuiltinFunctions(sqlite3 *db){ + int rc = sqlite3_overload_function(db, "MATCH", 2); + assert( rc==SQLITE_NOMEM || rc==SQLITE_OK ); + if( rc==SQLITE_NOMEM ){ + sqlite3OomFault(db); + } +} + +/* +** Set the LIKEOPT flag on the 2-argument function with the given name. +*/ +static void setLikeOptFlag(sqlite3 *db, const char *zName, u8 flagVal){ + FuncDef *pDef; + pDef = sqlite3FindFunction(db, zName, 2, SQLITE_UTF8, 0); + if( ALWAYS(pDef) ){ + pDef->funcFlags |= flagVal; + } +} + +/* +** Register the built-in LIKE and GLOB functions. The caseSensitive +** parameter determines whether or not the LIKE operator is case +** sensitive. GLOB is always case sensitive. +*/ +SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive){ + struct compareInfo *pInfo; + if( caseSensitive ){ + pInfo = (struct compareInfo*)&likeInfoAlt; + }else{ + pInfo = (struct compareInfo*)&likeInfoNorm; + } + sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0); + sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0); + sqlite3CreateFunc(db, "glob", 2, SQLITE_UTF8, + (struct compareInfo*)&globInfo, likeFunc, 0, 0, 0); + setLikeOptFlag(db, "glob", SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE); + setLikeOptFlag(db, "like", + caseSensitive ? (SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE) : SQLITE_FUNC_LIKE); +} + +/* +** pExpr points to an expression which implements a function. If +** it is appropriate to apply the LIKE optimization to that function +** then set aWc[0] through aWc[2] to the wildcard characters and +** return TRUE. If the function is not a LIKE-style function then +** return FALSE. +** +** *pIsNocase is set to true if uppercase and lowercase are equivalent for +** the function (default for LIKE). If the function makes the distinction +** between uppercase and lowercase (as does GLOB) then *pIsNocase is set to +** false. +*/ +SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocase, char *aWc){ + FuncDef *pDef; + if( pExpr->op!=TK_FUNCTION + || !pExpr->x.pList + || pExpr->x.pList->nExpr!=2 + ){ + return 0; + } + assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); + pDef = sqlite3FindFunction(db, pExpr->u.zToken, 2, SQLITE_UTF8, 0); + if( NEVER(pDef==0) || (pDef->funcFlags & SQLITE_FUNC_LIKE)==0 ){ + return 0; + } + + /* The memcpy() statement assumes that the wildcard characters are + ** the first three statements in the compareInfo structure. The + ** asserts() that follow verify that assumption + */ + memcpy(aWc, pDef->pUserData, 3); + assert( (char*)&likeInfoAlt == (char*)&likeInfoAlt.matchAll ); + assert( &((char*)&likeInfoAlt)[1] == (char*)&likeInfoAlt.matchOne ); + assert( &((char*)&likeInfoAlt)[2] == (char*)&likeInfoAlt.matchSet ); + *pIsNocase = (pDef->funcFlags & SQLITE_FUNC_CASE)==0; + return 1; +} + +/* +** All of the FuncDef structures in the aBuiltinFunc[] array above +** to the global function hash table. This occurs at start-time (as +** a consequence of calling sqlite3_initialize()). +** +** After this routine runs +*/ +SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ + /* + ** The following array holds FuncDef structures for all of the functions + ** defined in this file. + ** + ** The array cannot be constant since changes are made to the + ** FuncDef.pHash elements at start-time. The elements of this array + ** are read-only after initialization is complete. + ** + ** For peak efficiency, put the most frequently used function last. + */ + static FuncDef aBuiltinFunc[] = { +#ifdef SQLITE_SOUNDEX + FUNCTION(soundex, 1, 0, 0, soundexFunc ), +#endif +#ifndef SQLITE_OMIT_LOAD_EXTENSION + VFUNCTION(load_extension, 1, 0, 0, loadExt ), + VFUNCTION(load_extension, 2, 0, 0, loadExt ), +#endif +#if SQLITE_USER_AUTHENTICATION + FUNCTION(sqlite_crypt, 2, 0, 0, sqlite3CryptFunc ), +#endif +#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS + DFUNCTION(sqlite_compileoption_used,1, 0, 0, compileoptionusedFunc ), + DFUNCTION(sqlite_compileoption_get, 1, 0, 0, compileoptiongetFunc ), +#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ + FUNCTION2(unlikely, 1, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY), + FUNCTION2(likelihood, 2, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY), + FUNCTION2(likely, 1, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY), +#ifdef SQLITE_DEBUG + FUNCTION2(affinity, 1, 0, 0, noopFunc, SQLITE_FUNC_AFFINITY), +#endif + FUNCTION(ltrim, 1, 1, 0, trimFunc ), + FUNCTION(ltrim, 2, 1, 0, trimFunc ), + FUNCTION(rtrim, 1, 2, 0, trimFunc ), + FUNCTION(rtrim, 2, 2, 0, trimFunc ), + FUNCTION(trim, 1, 3, 0, trimFunc ), + FUNCTION(trim, 2, 3, 0, trimFunc ), + FUNCTION(min, -1, 0, 1, minmaxFunc ), + FUNCTION(min, 0, 0, 1, 0 ), + AGGREGATE2(min, 1, 0, 1, minmaxStep, minMaxFinalize, + SQLITE_FUNC_MINMAX ), + FUNCTION(max, -1, 1, 1, minmaxFunc ), + FUNCTION(max, 0, 1, 1, 0 ), + AGGREGATE2(max, 1, 1, 1, minmaxStep, minMaxFinalize, + SQLITE_FUNC_MINMAX ), + FUNCTION2(typeof, 1, 0, 0, typeofFunc, SQLITE_FUNC_TYPEOF), + FUNCTION2(length, 1, 0, 0, lengthFunc, SQLITE_FUNC_LENGTH), + FUNCTION(instr, 2, 0, 0, instrFunc ), + FUNCTION(printf, -1, 0, 0, printfFunc ), + FUNCTION(unicode, 1, 0, 0, unicodeFunc ), + FUNCTION(char, -1, 0, 0, charFunc ), + FUNCTION(abs, 1, 0, 0, absFunc ), +#ifndef SQLITE_OMIT_FLOATING_POINT + FUNCTION(round, 1, 0, 0, roundFunc ), + FUNCTION(round, 2, 0, 0, roundFunc ), +#endif + FUNCTION(upper, 1, 0, 0, upperFunc ), + FUNCTION(lower, 1, 0, 0, lowerFunc ), + FUNCTION(hex, 1, 0, 0, hexFunc ), + FUNCTION2(ifnull, 2, 0, 0, noopFunc, SQLITE_FUNC_COALESCE), + VFUNCTION(random, 0, 0, 0, randomFunc ), + VFUNCTION(randomblob, 1, 0, 0, randomBlob ), + FUNCTION(nullif, 2, 0, 1, nullifFunc ), + DFUNCTION(sqlite_version, 0, 0, 0, versionFunc ), + DFUNCTION(sqlite_source_id, 0, 0, 0, sourceidFunc ), + FUNCTION(sqlite_log, 2, 0, 0, errlogFunc ), + FUNCTION(quote, 1, 0, 0, quoteFunc ), + VFUNCTION(last_insert_rowid, 0, 0, 0, last_insert_rowid), + VFUNCTION(changes, 0, 0, 0, changes ), + VFUNCTION(total_changes, 0, 0, 0, total_changes ), + FUNCTION(replace, 3, 0, 0, replaceFunc ), + FUNCTION(zeroblob, 1, 0, 0, zeroblobFunc ), + FUNCTION(substr, 2, 0, 0, substrFunc ), + FUNCTION(substr, 3, 0, 0, substrFunc ), + AGGREGATE(sum, 1, 0, 0, sumStep, sumFinalize ), + AGGREGATE(total, 1, 0, 0, sumStep, totalFinalize ), + AGGREGATE(avg, 1, 0, 0, sumStep, avgFinalize ), + AGGREGATE2(count, 0, 0, 0, countStep, countFinalize, + SQLITE_FUNC_COUNT ), + AGGREGATE(count, 1, 0, 0, countStep, countFinalize ), + AGGREGATE(group_concat, 1, 0, 0, groupConcatStep, groupConcatFinalize), + AGGREGATE(group_concat, 2, 0, 0, groupConcatStep, groupConcatFinalize), + + LIKEFUNC(glob, 2, &globInfo, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), +#ifdef SQLITE_CASE_SENSITIVE_LIKE + LIKEFUNC(like, 2, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), + LIKEFUNC(like, 3, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), +#else + LIKEFUNC(like, 2, &likeInfoNorm, SQLITE_FUNC_LIKE), + LIKEFUNC(like, 3, &likeInfoNorm, SQLITE_FUNC_LIKE), +#endif +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + FUNCTION(unknown, -1, 0, 0, unknownFunc ), +#endif + FUNCTION(coalesce, 1, 0, 0, 0 ), + FUNCTION(coalesce, 0, 0, 0, 0 ), + FUNCTION2(coalesce, -1, 0, 0, noopFunc, SQLITE_FUNC_COALESCE), + }; +#ifndef SQLITE_OMIT_ALTERTABLE + sqlite3AlterFunctions(); +#endif +#if defined(SQLITE_ENABLE_STAT3) || defined(SQLITE_ENABLE_STAT4) + sqlite3AnalyzeFunctions(); +#endif + sqlite3RegisterDateTimeFunctions(); + sqlite3InsertBuiltinFuncs(aBuiltinFunc, ArraySize(aBuiltinFunc)); + +#if 0 /* Enable to print out how the built-in functions are hashed */ + { + int i; + FuncDef *p; + for(i=0; iu.pHash){ + int n = sqlite3Strlen30(p->zName); + int h = p->zName[0] + n; + printf(" %s(%d)", p->zName, h); + } + printf("\n"); + } + } +#endif +} + +/************** End of func.c ************************************************/ +/************** Begin file fkey.c ********************************************/ +/* +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code used by the compiler to add foreign key +** support to compiled SQL statements. +*/ +/* #include "sqliteInt.h" */ + +#ifndef SQLITE_OMIT_FOREIGN_KEY +#ifndef SQLITE_OMIT_TRIGGER + +/* +** Deferred and Immediate FKs +** -------------------------- +** +** Foreign keys in SQLite come in two flavours: deferred and immediate. +** If an immediate foreign key constraint is violated, +** SQLITE_CONSTRAINT_FOREIGNKEY is returned and the current +** statement transaction rolled back. If a +** deferred foreign key constraint is violated, no action is taken +** immediately. However if the application attempts to commit the +** transaction before fixing the constraint violation, the attempt fails. +** +** Deferred constraints are implemented using a simple counter associated +** with the database handle. The counter is set to zero each time a +** database transaction is opened. Each time a statement is executed +** that causes a foreign key violation, the counter is incremented. Each +** time a statement is executed that removes an existing violation from +** the database, the counter is decremented. When the transaction is +** committed, the commit fails if the current value of the counter is +** greater than zero. This scheme has two big drawbacks: +** +** * When a commit fails due to a deferred foreign key constraint, +** there is no way to tell which foreign constraint is not satisfied, +** or which row it is not satisfied for. +** +** * If the database contains foreign key violations when the +** transaction is opened, this may cause the mechanism to malfunction. +** +** Despite these problems, this approach is adopted as it seems simpler +** than the alternatives. +** +** INSERT operations: +** +** I.1) For each FK for which the table is the child table, search +** the parent table for a match. If none is found increment the +** constraint counter. +** +** I.2) For each FK for which the table is the parent table, +** search the child table for rows that correspond to the new +** row in the parent table. Decrement the counter for each row +** found (as the constraint is now satisfied). +** +** DELETE operations: +** +** D.1) For each FK for which the table is the child table, +** search the parent table for a row that corresponds to the +** deleted row in the child table. If such a row is not found, +** decrement the counter. +** +** D.2) For each FK for which the table is the parent table, search +** the child table for rows that correspond to the deleted row +** in the parent table. For each found increment the counter. +** +** UPDATE operations: +** +** An UPDATE command requires that all 4 steps above are taken, but only +** for FK constraints for which the affected columns are actually +** modified (values must be compared at runtime). +** +** Note that I.1 and D.1 are very similar operations, as are I.2 and D.2. +** This simplifies the implementation a bit. +** +** For the purposes of immediate FK constraints, the OR REPLACE conflict +** resolution is considered to delete rows before the new row is inserted. +** If a delete caused by OR REPLACE violates an FK constraint, an exception +** is thrown, even if the FK constraint would be satisfied after the new +** row is inserted. +** +** Immediate constraints are usually handled similarly. The only difference +** is that the counter used is stored as part of each individual statement +** object (struct Vdbe). If, after the statement has run, its immediate +** constraint counter is greater than zero, +** it returns SQLITE_CONSTRAINT_FOREIGNKEY +** and the statement transaction is rolled back. An exception is an INSERT +** statement that inserts a single row only (no triggers). In this case, +** instead of using a counter, an exception is thrown immediately if the +** INSERT violates a foreign key constraint. This is necessary as such +** an INSERT does not open a statement transaction. +** +** TODO: How should dropping a table be handled? How should renaming a +** table be handled? +** +** +** Query API Notes +** --------------- +** +** Before coding an UPDATE or DELETE row operation, the code-generator +** for those two operations needs to know whether or not the operation +** requires any FK processing and, if so, which columns of the original +** row are required by the FK processing VDBE code (i.e. if FKs were +** implemented using triggers, which of the old.* columns would be +** accessed). No information is required by the code-generator before +** coding an INSERT operation. The functions used by the UPDATE/DELETE +** generation code to query for this information are: +** +** sqlite3FkRequired() - Test to see if FK processing is required. +** sqlite3FkOldmask() - Query for the set of required old.* columns. +** +** +** Externally accessible module functions +** -------------------------------------- +** +** sqlite3FkCheck() - Check for foreign key violations. +** sqlite3FkActions() - Code triggers for ON UPDATE/ON DELETE actions. +** sqlite3FkDelete() - Delete an FKey structure. +*/ + +/* +** VDBE Calling Convention +** ----------------------- +** +** Example: +** +** For the following INSERT statement: +** +** CREATE TABLE t1(a, b INTEGER PRIMARY KEY, c); +** INSERT INTO t1 VALUES(1, 2, 3.1); +** +** Register (x): 2 (type integer) +** Register (x+1): 1 (type integer) +** Register (x+2): NULL (type NULL) +** Register (x+3): 3.1 (type real) +*/ + +/* +** A foreign key constraint requires that the key columns in the parent +** table are collectively subject to a UNIQUE or PRIMARY KEY constraint. +** Given that pParent is the parent table for foreign key constraint pFKey, +** search the schema for a unique index on the parent key columns. +** +** If successful, zero is returned. If the parent key is an INTEGER PRIMARY +** KEY column, then output variable *ppIdx is set to NULL. Otherwise, *ppIdx +** is set to point to the unique index. +** +** If the parent key consists of a single column (the foreign key constraint +** is not a composite foreign key), output variable *paiCol is set to NULL. +** Otherwise, it is set to point to an allocated array of size N, where +** N is the number of columns in the parent key. The first element of the +** array is the index of the child table column that is mapped by the FK +** constraint to the parent table column stored in the left-most column +** of index *ppIdx. The second element of the array is the index of the +** child table column that corresponds to the second left-most column of +** *ppIdx, and so on. +** +** If the required index cannot be found, either because: +** +** 1) The named parent key columns do not exist, or +** +** 2) The named parent key columns do exist, but are not subject to a +** UNIQUE or PRIMARY KEY constraint, or +** +** 3) No parent key columns were provided explicitly as part of the +** foreign key definition, and the parent table does not have a +** PRIMARY KEY, or +** +** 4) No parent key columns were provided explicitly as part of the +** foreign key definition, and the PRIMARY KEY of the parent table +** consists of a different number of columns to the child key in +** the child table. +** +** then non-zero is returned, and a "foreign key mismatch" error loaded +** into pParse. If an OOM error occurs, non-zero is returned and the +** pParse->db->mallocFailed flag is set. +*/ +SQLITE_PRIVATE int sqlite3FkLocateIndex( + Parse *pParse, /* Parse context to store any error in */ + Table *pParent, /* Parent table of FK constraint pFKey */ + FKey *pFKey, /* Foreign key to find index for */ + Index **ppIdx, /* OUT: Unique index on parent table */ + int **paiCol /* OUT: Map of index columns in pFKey */ +){ + Index *pIdx = 0; /* Value to return via *ppIdx */ + int *aiCol = 0; /* Value to return via *paiCol */ + int nCol = pFKey->nCol; /* Number of columns in parent key */ + char *zKey = pFKey->aCol[0].zCol; /* Name of left-most parent key column */ + + /* The caller is responsible for zeroing output parameters. */ + assert( ppIdx && *ppIdx==0 ); + assert( !paiCol || *paiCol==0 ); + assert( pParse ); + + /* If this is a non-composite (single column) foreign key, check if it + ** maps to the INTEGER PRIMARY KEY of table pParent. If so, leave *ppIdx + ** and *paiCol set to zero and return early. + ** + ** Otherwise, for a composite foreign key (more than one column), allocate + ** space for the aiCol array (returned via output parameter *paiCol). + ** Non-composite foreign keys do not require the aiCol array. + */ + if( nCol==1 ){ + /* The FK maps to the IPK if any of the following are true: + ** + ** 1) There is an INTEGER PRIMARY KEY column and the FK is implicitly + ** mapped to the primary key of table pParent, or + ** 2) The FK is explicitly mapped to a column declared as INTEGER + ** PRIMARY KEY. + */ + if( pParent->iPKey>=0 ){ + if( !zKey ) return 0; + if( !sqlite3StrICmp(pParent->aCol[pParent->iPKey].zName, zKey) ) return 0; + } + }else if( paiCol ){ + assert( nCol>1 ); + aiCol = (int *)sqlite3DbMallocRawNN(pParse->db, nCol*sizeof(int)); + if( !aiCol ) return 1; + *paiCol = aiCol; + } + + for(pIdx=pParent->pIndex; pIdx; pIdx=pIdx->pNext){ + if( pIdx->nKeyCol==nCol && IsUniqueIndex(pIdx) && pIdx->pPartIdxWhere==0 ){ + /* pIdx is a UNIQUE index (or a PRIMARY KEY) and has the right number + ** of columns. If each indexed column corresponds to a foreign key + ** column of pFKey, then this index is a winner. */ + + if( zKey==0 ){ + /* If zKey is NULL, then this foreign key is implicitly mapped to + ** the PRIMARY KEY of table pParent. The PRIMARY KEY index may be + ** identified by the test. */ + if( IsPrimaryKeyIndex(pIdx) ){ + if( aiCol ){ + int i; + for(i=0; iaCol[i].iFrom; + } + break; + } + }else{ + /* If zKey is non-NULL, then this foreign key was declared to + ** map to an explicit list of columns in table pParent. Check if this + ** index matches those columns. Also, check that the index uses + ** the default collation sequences for each column. */ + int i, j; + for(i=0; iaiColumn[i]; /* Index of column in parent tbl */ + const char *zDfltColl; /* Def. collation for column */ + char *zIdxCol; /* Name of indexed column */ + + if( iCol<0 ) break; /* No foreign keys against expression indexes */ + + /* If the index uses a collation sequence that is different from + ** the default collation sequence for the column, this index is + ** unusable. Bail out early in this case. */ + zDfltColl = pParent->aCol[iCol].zColl; + if( !zDfltColl ) zDfltColl = sqlite3StrBINARY; + if( sqlite3StrICmp(pIdx->azColl[i], zDfltColl) ) break; + + zIdxCol = pParent->aCol[iCol].zName; + for(j=0; jaCol[j].zCol, zIdxCol)==0 ){ + if( aiCol ) aiCol[i] = pFKey->aCol[j].iFrom; + break; + } + } + if( j==nCol ) break; + } + if( i==nCol ) break; /* pIdx is usable */ + } + } + } + + if( !pIdx ){ + if( !pParse->disableTriggers ){ + sqlite3ErrorMsg(pParse, + "foreign key mismatch - \"%w\" referencing \"%w\"", + pFKey->pFrom->zName, pFKey->zTo); + } + sqlite3DbFree(pParse->db, aiCol); + return 1; + } + + *ppIdx = pIdx; + return 0; +} + +/* +** This function is called when a row is inserted into or deleted from the +** child table of foreign key constraint pFKey. If an SQL UPDATE is executed +** on the child table of pFKey, this function is invoked twice for each row +** affected - once to "delete" the old row, and then again to "insert" the +** new row. +** +** Each time it is called, this function generates VDBE code to locate the +** row in the parent table that corresponds to the row being inserted into +** or deleted from the child table. If the parent row can be found, no +** special action is taken. Otherwise, if the parent row can *not* be +** found in the parent table: +** +** Operation | FK type | Action taken +** -------------------------------------------------------------------------- +** INSERT immediate Increment the "immediate constraint counter". +** +** DELETE immediate Decrement the "immediate constraint counter". +** +** INSERT deferred Increment the "deferred constraint counter". +** +** DELETE deferred Decrement the "deferred constraint counter". +** +** These operations are identified in the comment at the top of this file +** (fkey.c) as "I.1" and "D.1". +*/ +static void fkLookupParent( + Parse *pParse, /* Parse context */ + int iDb, /* Index of database housing pTab */ + Table *pTab, /* Parent table of FK pFKey */ + Index *pIdx, /* Unique index on parent key columns in pTab */ + FKey *pFKey, /* Foreign key constraint */ + int *aiCol, /* Map from parent key columns to child table columns */ + int regData, /* Address of array containing child table row */ + int nIncr, /* Increment constraint counter by this */ + int isIgnore /* If true, pretend pTab contains all NULL values */ +){ + int i; /* Iterator variable */ + Vdbe *v = sqlite3GetVdbe(pParse); /* Vdbe to add code to */ + int iCur = pParse->nTab - 1; /* Cursor number to use */ + int iOk = sqlite3VdbeMakeLabel(v); /* jump here if parent key found */ + + /* If nIncr is less than zero, then check at runtime if there are any + ** outstanding constraints to resolve. If there are not, there is no need + ** to check if deleting this row resolves any outstanding violations. + ** + ** Check if any of the key columns in the child table row are NULL. If + ** any are, then the constraint is considered satisfied. No need to + ** search for a matching row in the parent table. */ + if( nIncr<0 ){ + sqlite3VdbeAddOp2(v, OP_FkIfZero, pFKey->isDeferred, iOk); + VdbeCoverage(v); + } + for(i=0; inCol; i++){ + int iReg = aiCol[i] + regData + 1; + sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iOk); VdbeCoverage(v); + } + + if( isIgnore==0 ){ + if( pIdx==0 ){ + /* If pIdx is NULL, then the parent key is the INTEGER PRIMARY KEY + ** column of the parent table (table pTab). */ + int iMustBeInt; /* Address of MustBeInt instruction */ + int regTemp = sqlite3GetTempReg(pParse); + + /* Invoke MustBeInt to coerce the child key value to an integer (i.e. + ** apply the affinity of the parent key). If this fails, then there + ** is no matching parent key. Before using MustBeInt, make a copy of + ** the value. Otherwise, the value inserted into the child key column + ** will have INTEGER affinity applied to it, which may not be correct. */ + sqlite3VdbeAddOp2(v, OP_SCopy, aiCol[0]+1+regData, regTemp); + iMustBeInt = sqlite3VdbeAddOp2(v, OP_MustBeInt, regTemp, 0); + VdbeCoverage(v); + + /* If the parent table is the same as the child table, and we are about + ** to increment the constraint-counter (i.e. this is an INSERT operation), + ** then check if the row being inserted matches itself. If so, do not + ** increment the constraint-counter. */ + if( pTab==pFKey->pFrom && nIncr==1 ){ + sqlite3VdbeAddOp3(v, OP_Eq, regData, iOk, regTemp); VdbeCoverage(v); + sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); + } + + sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenRead); + sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, regTemp); VdbeCoverage(v); + sqlite3VdbeGoto(v, iOk); + sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2); + sqlite3VdbeJumpHere(v, iMustBeInt); + sqlite3ReleaseTempReg(pParse, regTemp); + }else{ + int nCol = pFKey->nCol; + int regTemp = sqlite3GetTempRange(pParse, nCol); + int regRec = sqlite3GetTempReg(pParse); + + sqlite3VdbeAddOp3(v, OP_OpenRead, iCur, pIdx->tnum, iDb); + sqlite3VdbeSetP4KeyInfo(pParse, pIdx); + for(i=0; ipFrom && nIncr==1 ){ + int iJump = sqlite3VdbeCurrentAddr(v) + nCol + 1; + for(i=0; iaiColumn[i]+1+regData; + assert( pIdx->aiColumn[i]>=0 ); + assert( aiCol[i]!=pTab->iPKey ); + if( pIdx->aiColumn[i]==pTab->iPKey ){ + /* The parent key is a composite key that includes the IPK column */ + iParent = regData; + } + sqlite3VdbeAddOp3(v, OP_Ne, iChild, iJump, iParent); VdbeCoverage(v); + sqlite3VdbeChangeP5(v, SQLITE_JUMPIFNULL); + } + sqlite3VdbeGoto(v, iOk); + } + + sqlite3VdbeAddOp4(v, OP_MakeRecord, regTemp, nCol, regRec, + sqlite3IndexAffinityStr(pParse->db,pIdx), nCol); + sqlite3VdbeAddOp4Int(v, OP_Found, iCur, iOk, regRec, 0); VdbeCoverage(v); + + sqlite3ReleaseTempReg(pParse, regRec); + sqlite3ReleaseTempRange(pParse, regTemp, nCol); + } + } + + if( !pFKey->isDeferred && !(pParse->db->flags & SQLITE_DeferFKs) + && !pParse->pToplevel + && !pParse->isMultiWrite + ){ + /* Special case: If this is an INSERT statement that will insert exactly + ** one row into the table, raise a constraint immediately instead of + ** incrementing a counter. This is necessary as the VM code is being + ** generated for will not open a statement transaction. */ + assert( nIncr==1 ); + sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_FOREIGNKEY, + OE_Abort, 0, P4_STATIC, P5_ConstraintFK); + }else{ + if( nIncr>0 && pFKey->isDeferred==0 ){ + sqlite3MayAbort(pParse); + } + sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, nIncr); + } + + sqlite3VdbeResolveLabel(v, iOk); + sqlite3VdbeAddOp1(v, OP_Close, iCur); +} + + +/* +** Return an Expr object that refers to a memory register corresponding +** to column iCol of table pTab. +** +** regBase is the first of an array of register that contains the data +** for pTab. regBase itself holds the rowid. regBase+1 holds the first +** column. regBase+2 holds the second column, and so forth. +*/ +static Expr *exprTableRegister( + Parse *pParse, /* Parsing and code generating context */ + Table *pTab, /* The table whose content is at r[regBase]... */ + int regBase, /* Contents of table pTab */ + i16 iCol /* Which column of pTab is desired */ +){ + Expr *pExpr; + Column *pCol; + const char *zColl; + sqlite3 *db = pParse->db; + + pExpr = sqlite3Expr(db, TK_REGISTER, 0); + if( pExpr ){ + if( iCol>=0 && iCol!=pTab->iPKey ){ + pCol = &pTab->aCol[iCol]; + pExpr->iTable = regBase + iCol + 1; + pExpr->affinity = pCol->affinity; + zColl = pCol->zColl; + if( zColl==0 ) zColl = db->pDfltColl->zName; + pExpr = sqlite3ExprAddCollateString(pParse, pExpr, zColl); + }else{ + pExpr->iTable = regBase; + pExpr->affinity = SQLITE_AFF_INTEGER; + } + } + return pExpr; +} + +/* +** Return an Expr object that refers to column iCol of table pTab which +** has cursor iCur. +*/ +static Expr *exprTableColumn( + sqlite3 *db, /* The database connection */ + Table *pTab, /* The table whose column is desired */ + int iCursor, /* The open cursor on the table */ + i16 iCol /* The column that is wanted */ +){ + Expr *pExpr = sqlite3Expr(db, TK_COLUMN, 0); + if( pExpr ){ + pExpr->pTab = pTab; + pExpr->iTable = iCursor; + pExpr->iColumn = iCol; + } + return pExpr; +} + +/* +** This function is called to generate code executed when a row is deleted +** from the parent table of foreign key constraint pFKey and, if pFKey is +** deferred, when a row is inserted into the same table. When generating +** code for an SQL UPDATE operation, this function may be called twice - +** once to "delete" the old row and once to "insert" the new row. +** +** Parameter nIncr is passed -1 when inserting a row (as this may decrease +** the number of FK violations in the db) or +1 when deleting one (as this +** may increase the number of FK constraint problems). +** +** The code generated by this function scans through the rows in the child +** table that correspond to the parent table row being deleted or inserted. +** For each child row found, one of the following actions is taken: +** +** Operation | FK type | Action taken +** -------------------------------------------------------------------------- +** DELETE immediate Increment the "immediate constraint counter". +** Or, if the ON (UPDATE|DELETE) action is RESTRICT, +** throw a "FOREIGN KEY constraint failed" exception. +** +** INSERT immediate Decrement the "immediate constraint counter". +** +** DELETE deferred Increment the "deferred constraint counter". +** Or, if the ON (UPDATE|DELETE) action is RESTRICT, +** throw a "FOREIGN KEY constraint failed" exception. +** +** INSERT deferred Decrement the "deferred constraint counter". +** +** These operations are identified in the comment at the top of this file +** (fkey.c) as "I.2" and "D.2". +*/ +static void fkScanChildren( + Parse *pParse, /* Parse context */ + SrcList *pSrc, /* The child table to be scanned */ + Table *pTab, /* The parent table */ + Index *pIdx, /* Index on parent covering the foreign key */ + FKey *pFKey, /* The foreign key linking pSrc to pTab */ + int *aiCol, /* Map from pIdx cols to child table cols */ + int regData, /* Parent row data starts here */ + int nIncr /* Amount to increment deferred counter by */ +){ + sqlite3 *db = pParse->db; /* Database handle */ + int i; /* Iterator variable */ + Expr *pWhere = 0; /* WHERE clause to scan with */ + NameContext sNameContext; /* Context used to resolve WHERE clause */ + WhereInfo *pWInfo; /* Context used by sqlite3WhereXXX() */ + int iFkIfZero = 0; /* Address of OP_FkIfZero */ + Vdbe *v = sqlite3GetVdbe(pParse); + + assert( pIdx==0 || pIdx->pTable==pTab ); + assert( pIdx==0 || pIdx->nKeyCol==pFKey->nCol ); + assert( pIdx!=0 || pFKey->nCol==1 ); + assert( pIdx!=0 || HasRowid(pTab) ); + + if( nIncr<0 ){ + iFkIfZero = sqlite3VdbeAddOp2(v, OP_FkIfZero, pFKey->isDeferred, 0); + VdbeCoverage(v); + } + + /* Create an Expr object representing an SQL expression like: + ** + ** = AND = ... + ** + ** The collation sequence used for the comparison should be that of + ** the parent key columns. The affinity of the parent key column should + ** be applied to each child key value before the comparison takes place. + */ + for(i=0; inCol; i++){ + Expr *pLeft; /* Value from parent table row */ + Expr *pRight; /* Column ref to child table */ + Expr *pEq; /* Expression (pLeft = pRight) */ + i16 iCol; /* Index of column in child table */ + const char *zCol; /* Name of column in child table */ + + iCol = pIdx ? pIdx->aiColumn[i] : -1; + pLeft = exprTableRegister(pParse, pTab, regData, iCol); + iCol = aiCol ? aiCol[i] : pFKey->aCol[0].iFrom; + assert( iCol>=0 ); + zCol = pFKey->pFrom->aCol[iCol].zName; + pRight = sqlite3Expr(db, TK_ID, zCol); + pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight); + pWhere = sqlite3ExprAnd(db, pWhere, pEq); + } + + /* If the child table is the same as the parent table, then add terms + ** to the WHERE clause that prevent this entry from being scanned. + ** The added WHERE clause terms are like this: + ** + ** $current_rowid!=rowid + ** NOT( $current_a==a AND $current_b==b AND ... ) + ** + ** The first form is used for rowid tables. The second form is used + ** for WITHOUT ROWID tables. In the second form, the primary key is + ** (a,b,...) + */ + if( pTab==pFKey->pFrom && nIncr>0 ){ + Expr *pNe; /* Expression (pLeft != pRight) */ + Expr *pLeft; /* Value from parent table row */ + Expr *pRight; /* Column ref to child table */ + if( HasRowid(pTab) ){ + pLeft = exprTableRegister(pParse, pTab, regData, -1); + pRight = exprTableColumn(db, pTab, pSrc->a[0].iCursor, -1); + pNe = sqlite3PExpr(pParse, TK_NE, pLeft, pRight); + }else{ + Expr *pEq, *pAll = 0; + Index *pPk = sqlite3PrimaryKeyIndex(pTab); + assert( pIdx!=0 ); + for(i=0; inKeyCol; i++){ + i16 iCol = pIdx->aiColumn[i]; + assert( iCol>=0 ); + pLeft = exprTableRegister(pParse, pTab, regData, iCol); + pRight = exprTableColumn(db, pTab, pSrc->a[0].iCursor, iCol); + pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight); + pAll = sqlite3ExprAnd(db, pAll, pEq); + } + pNe = sqlite3PExpr(pParse, TK_NOT, pAll, 0); + } + pWhere = sqlite3ExprAnd(db, pWhere, pNe); + } + + /* Resolve the references in the WHERE clause. */ + memset(&sNameContext, 0, sizeof(NameContext)); + sNameContext.pSrcList = pSrc; + sNameContext.pParse = pParse; + sqlite3ResolveExprNames(&sNameContext, pWhere); + + /* Create VDBE to loop through the entries in pSrc that match the WHERE + ** clause. For each row found, increment either the deferred or immediate + ** foreign key constraint counter. */ + pWInfo = sqlite3WhereBegin(pParse, pSrc, pWhere, 0, 0, 0, 0); + sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, nIncr); + if( pWInfo ){ + sqlite3WhereEnd(pWInfo); + } + + /* Clean up the WHERE clause constructed above. */ + sqlite3ExprDelete(db, pWhere); + if( iFkIfZero ){ + sqlite3VdbeJumpHere(v, iFkIfZero); + } +} + +/* +** This function returns a linked list of FKey objects (connected by +** FKey.pNextTo) holding all children of table pTab. For example, +** given the following schema: +** +** CREATE TABLE t1(a PRIMARY KEY); +** CREATE TABLE t2(b REFERENCES t1(a); +** +** Calling this function with table "t1" as an argument returns a pointer +** to the FKey structure representing the foreign key constraint on table +** "t2". Calling this function with "t2" as the argument would return a +** NULL pointer (as there are no FK constraints for which t2 is the parent +** table). +*/ +SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *pTab){ + return (FKey *)sqlite3HashFind(&pTab->pSchema->fkeyHash, pTab->zName); +} + +/* +** The second argument is a Trigger structure allocated by the +** fkActionTrigger() routine. This function deletes the Trigger structure +** and all of its sub-components. +** +** The Trigger structure or any of its sub-components may be allocated from +** the lookaside buffer belonging to database handle dbMem. +*/ +static void fkTriggerDelete(sqlite3 *dbMem, Trigger *p){ + if( p ){ + TriggerStep *pStep = p->step_list; + sqlite3ExprDelete(dbMem, pStep->pWhere); + sqlite3ExprListDelete(dbMem, pStep->pExprList); + sqlite3SelectDelete(dbMem, pStep->pSelect); + sqlite3ExprDelete(dbMem, p->pWhen); + sqlite3DbFree(dbMem, p); + } +} + +/* +** This function is called to generate code that runs when table pTab is +** being dropped from the database. The SrcList passed as the second argument +** to this function contains a single entry guaranteed to resolve to +** table pTab. +** +** Normally, no code is required. However, if either +** +** (a) The table is the parent table of a FK constraint, or +** (b) The table is the child table of a deferred FK constraint and it is +** determined at runtime that there are outstanding deferred FK +** constraint violations in the database, +** +** then the equivalent of "DELETE FROM " is executed before dropping +** the table from the database. Triggers are disabled while running this +** DELETE, but foreign key actions are not. +*/ +SQLITE_PRIVATE void sqlite3FkDropTable(Parse *pParse, SrcList *pName, Table *pTab){ + sqlite3 *db = pParse->db; + if( (db->flags&SQLITE_ForeignKeys) && !IsVirtual(pTab) && !pTab->pSelect ){ + int iSkip = 0; + Vdbe *v = sqlite3GetVdbe(pParse); + + assert( v ); /* VDBE has already been allocated */ + if( sqlite3FkReferences(pTab)==0 ){ + /* Search for a deferred foreign key constraint for which this table + ** is the child table. If one cannot be found, return without + ** generating any VDBE code. If one can be found, then jump over + ** the entire DELETE if there are no outstanding deferred constraints + ** when this statement is run. */ + FKey *p; + for(p=pTab->pFKey; p; p=p->pNextFrom){ + if( p->isDeferred || (db->flags & SQLITE_DeferFKs) ) break; + } + if( !p ) return; + iSkip = sqlite3VdbeMakeLabel(v); + sqlite3VdbeAddOp2(v, OP_FkIfZero, 1, iSkip); VdbeCoverage(v); + } + + pParse->disableTriggers = 1; + sqlite3DeleteFrom(pParse, sqlite3SrcListDup(db, pName, 0), 0); + pParse->disableTriggers = 0; + + /* If the DELETE has generated immediate foreign key constraint + ** violations, halt the VDBE and return an error at this point, before + ** any modifications to the schema are made. This is because statement + ** transactions are not able to rollback schema changes. + ** + ** If the SQLITE_DeferFKs flag is set, then this is not required, as + ** the statement transaction will not be rolled back even if FK + ** constraints are violated. + */ + if( (db->flags & SQLITE_DeferFKs)==0 ){ + sqlite3VdbeAddOp2(v, OP_FkIfZero, 0, sqlite3VdbeCurrentAddr(v)+2); + VdbeCoverage(v); + sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_FOREIGNKEY, + OE_Abort, 0, P4_STATIC, P5_ConstraintFK); + } + + if( iSkip ){ + sqlite3VdbeResolveLabel(v, iSkip); + } + } +} + + +/* +** The second argument points to an FKey object representing a foreign key +** for which pTab is the child table. An UPDATE statement against pTab +** is currently being processed. For each column of the table that is +** actually updated, the corresponding element in the aChange[] array +** is zero or greater (if a column is unmodified the corresponding element +** is set to -1). If the rowid column is modified by the UPDATE statement +** the bChngRowid argument is non-zero. +** +** This function returns true if any of the columns that are part of the +** child key for FK constraint *p are modified. +*/ +static int fkChildIsModified( + Table *pTab, /* Table being updated */ + FKey *p, /* Foreign key for which pTab is the child */ + int *aChange, /* Array indicating modified columns */ + int bChngRowid /* True if rowid is modified by this update */ +){ + int i; + for(i=0; inCol; i++){ + int iChildKey = p->aCol[i].iFrom; + if( aChange[iChildKey]>=0 ) return 1; + if( iChildKey==pTab->iPKey && bChngRowid ) return 1; + } + return 0; +} + +/* +** The second argument points to an FKey object representing a foreign key +** for which pTab is the parent table. An UPDATE statement against pTab +** is currently being processed. For each column of the table that is +** actually updated, the corresponding element in the aChange[] array +** is zero or greater (if a column is unmodified the corresponding element +** is set to -1). If the rowid column is modified by the UPDATE statement +** the bChngRowid argument is non-zero. +** +** This function returns true if any of the columns that are part of the +** parent key for FK constraint *p are modified. +*/ +static int fkParentIsModified( + Table *pTab, + FKey *p, + int *aChange, + int bChngRowid +){ + int i; + for(i=0; inCol; i++){ + char *zKey = p->aCol[i].zCol; + int iKey; + for(iKey=0; iKeynCol; iKey++){ + if( aChange[iKey]>=0 || (iKey==pTab->iPKey && bChngRowid) ){ + Column *pCol = &pTab->aCol[iKey]; + if( zKey ){ + if( 0==sqlite3StrICmp(pCol->zName, zKey) ) return 1; + }else if( pCol->colFlags & COLFLAG_PRIMKEY ){ + return 1; + } + } + } + } + return 0; +} + +/* +** Return true if the parser passed as the first argument is being +** used to code a trigger that is really a "SET NULL" action belonging +** to trigger pFKey. +*/ +static int isSetNullAction(Parse *pParse, FKey *pFKey){ + Parse *pTop = sqlite3ParseToplevel(pParse); + if( pTop->pTriggerPrg ){ + Trigger *p = pTop->pTriggerPrg->pTrigger; + if( (p==pFKey->apTrigger[0] && pFKey->aAction[0]==OE_SetNull) + || (p==pFKey->apTrigger[1] && pFKey->aAction[1]==OE_SetNull) + ){ + return 1; + } + } + return 0; +} + +/* +** This function is called when inserting, deleting or updating a row of +** table pTab to generate VDBE code to perform foreign key constraint +** processing for the operation. +** +** For a DELETE operation, parameter regOld is passed the index of the +** first register in an array of (pTab->nCol+1) registers containing the +** rowid of the row being deleted, followed by each of the column values +** of the row being deleted, from left to right. Parameter regNew is passed +** zero in this case. +** +** For an INSERT operation, regOld is passed zero and regNew is passed the +** first register of an array of (pTab->nCol+1) registers containing the new +** row data. +** +** For an UPDATE operation, this function is called twice. Once before +** the original record is deleted from the table using the calling convention +** described for DELETE. Then again after the original record is deleted +** but before the new record is inserted using the INSERT convention. +*/ +SQLITE_PRIVATE void sqlite3FkCheck( + Parse *pParse, /* Parse context */ + Table *pTab, /* Row is being deleted from this table */ + int regOld, /* Previous row data is stored here */ + int regNew, /* New row data is stored here */ + int *aChange, /* Array indicating UPDATEd columns (or 0) */ + int bChngRowid /* True if rowid is UPDATEd */ +){ + sqlite3 *db = pParse->db; /* Database handle */ + FKey *pFKey; /* Used to iterate through FKs */ + int iDb; /* Index of database containing pTab */ + const char *zDb; /* Name of database containing pTab */ + int isIgnoreErrors = pParse->disableTriggers; + + /* Exactly one of regOld and regNew should be non-zero. */ + assert( (regOld==0)!=(regNew==0) ); + + /* If foreign-keys are disabled, this function is a no-op. */ + if( (db->flags&SQLITE_ForeignKeys)==0 ) return; + + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + zDb = db->aDb[iDb].zDbSName; + + /* Loop through all the foreign key constraints for which pTab is the + ** child table (the table that the foreign key definition is part of). */ + for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){ + Table *pTo; /* Parent table of foreign key pFKey */ + Index *pIdx = 0; /* Index on key columns in pTo */ + int *aiFree = 0; + int *aiCol; + int iCol; + int i; + int bIgnore = 0; + + if( aChange + && sqlite3_stricmp(pTab->zName, pFKey->zTo)!=0 + && fkChildIsModified(pTab, pFKey, aChange, bChngRowid)==0 + ){ + continue; + } + + /* Find the parent table of this foreign key. Also find a unique index + ** on the parent key columns in the parent table. If either of these + ** schema items cannot be located, set an error in pParse and return + ** early. */ + if( pParse->disableTriggers ){ + pTo = sqlite3FindTable(db, pFKey->zTo, zDb); + }else{ + pTo = sqlite3LocateTable(pParse, 0, pFKey->zTo, zDb); + } + if( !pTo || sqlite3FkLocateIndex(pParse, pTo, pFKey, &pIdx, &aiFree) ){ + assert( isIgnoreErrors==0 || (regOld!=0 && regNew==0) ); + if( !isIgnoreErrors || db->mallocFailed ) return; + if( pTo==0 ){ + /* If isIgnoreErrors is true, then a table is being dropped. In this + ** case SQLite runs a "DELETE FROM xxx" on the table being dropped + ** before actually dropping it in order to check FK constraints. + ** If the parent table of an FK constraint on the current table is + ** missing, behave as if it is empty. i.e. decrement the relevant + ** FK counter for each row of the current table with non-NULL keys. + */ + Vdbe *v = sqlite3GetVdbe(pParse); + int iJump = sqlite3VdbeCurrentAddr(v) + pFKey->nCol + 1; + for(i=0; inCol; i++){ + int iReg = pFKey->aCol[i].iFrom + regOld + 1; + sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iJump); VdbeCoverage(v); + } + sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, -1); + } + continue; + } + assert( pFKey->nCol==1 || (aiFree && pIdx) ); + + if( aiFree ){ + aiCol = aiFree; + }else{ + iCol = pFKey->aCol[0].iFrom; + aiCol = &iCol; + } + for(i=0; inCol; i++){ + if( aiCol[i]==pTab->iPKey ){ + aiCol[i] = -1; + } + assert( pIdx==0 || pIdx->aiColumn[i]>=0 ); +#ifndef SQLITE_OMIT_AUTHORIZATION + /* Request permission to read the parent key columns. If the + ** authorization callback returns SQLITE_IGNORE, behave as if any + ** values read from the parent table are NULL. */ + if( db->xAuth ){ + int rcauth; + char *zCol = pTo->aCol[pIdx ? pIdx->aiColumn[i] : pTo->iPKey].zName; + rcauth = sqlite3AuthReadCol(pParse, pTo->zName, zCol, iDb); + bIgnore = (rcauth==SQLITE_IGNORE); + } +#endif + } + + /* Take a shared-cache advisory read-lock on the parent table. Allocate + ** a cursor to use to search the unique index on the parent key columns + ** in the parent table. */ + sqlite3TableLock(pParse, iDb, pTo->tnum, 0, pTo->zName); + pParse->nTab++; + + if( regOld!=0 ){ + /* A row is being removed from the child table. Search for the parent. + ** If the parent does not exist, removing the child row resolves an + ** outstanding foreign key constraint violation. */ + fkLookupParent(pParse, iDb, pTo, pIdx, pFKey, aiCol, regOld, -1, bIgnore); + } + if( regNew!=0 && !isSetNullAction(pParse, pFKey) ){ + /* A row is being added to the child table. If a parent row cannot + ** be found, adding the child row has violated the FK constraint. + ** + ** If this operation is being performed as part of a trigger program + ** that is actually a "SET NULL" action belonging to this very + ** foreign key, then omit this scan altogether. As all child key + ** values are guaranteed to be NULL, it is not possible for adding + ** this row to cause an FK violation. */ + fkLookupParent(pParse, iDb, pTo, pIdx, pFKey, aiCol, regNew, +1, bIgnore); + } + + sqlite3DbFree(db, aiFree); + } + + /* Loop through all the foreign key constraints that refer to this table. + ** (the "child" constraints) */ + for(pFKey = sqlite3FkReferences(pTab); pFKey; pFKey=pFKey->pNextTo){ + Index *pIdx = 0; /* Foreign key index for pFKey */ + SrcList *pSrc; + int *aiCol = 0; + + if( aChange && fkParentIsModified(pTab, pFKey, aChange, bChngRowid)==0 ){ + continue; + } + + if( !pFKey->isDeferred && !(db->flags & SQLITE_DeferFKs) + && !pParse->pToplevel && !pParse->isMultiWrite + ){ + assert( regOld==0 && regNew!=0 ); + /* Inserting a single row into a parent table cannot cause (or fix) + ** an immediate foreign key violation. So do nothing in this case. */ + continue; + } + + if( sqlite3FkLocateIndex(pParse, pTab, pFKey, &pIdx, &aiCol) ){ + if( !isIgnoreErrors || db->mallocFailed ) return; + continue; + } + assert( aiCol || pFKey->nCol==1 ); + + /* Create a SrcList structure containing the child table. We need the + ** child table as a SrcList for sqlite3WhereBegin() */ + pSrc = sqlite3SrcListAppend(db, 0, 0, 0); + if( pSrc ){ + struct SrcList_item *pItem = pSrc->a; + pItem->pTab = pFKey->pFrom; + pItem->zName = pFKey->pFrom->zName; + pItem->pTab->nTabRef++; + pItem->iCursor = pParse->nTab++; + + if( regNew!=0 ){ + fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regNew, -1); + } + if( regOld!=0 ){ + int eAction = pFKey->aAction[aChange!=0]; + fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regOld, 1); + /* If this is a deferred FK constraint, or a CASCADE or SET NULL + ** action applies, then any foreign key violations caused by + ** removing the parent key will be rectified by the action trigger. + ** So do not set the "may-abort" flag in this case. + ** + ** Note 1: If the FK is declared "ON UPDATE CASCADE", then the + ** may-abort flag will eventually be set on this statement anyway + ** (when this function is called as part of processing the UPDATE + ** within the action trigger). + ** + ** Note 2: At first glance it may seem like SQLite could simply omit + ** all OP_FkCounter related scans when either CASCADE or SET NULL + ** applies. The trouble starts if the CASCADE or SET NULL action + ** trigger causes other triggers or action rules attached to the + ** child table to fire. In these cases the fk constraint counters + ** might be set incorrectly if any OP_FkCounter related scans are + ** omitted. */ + if( !pFKey->isDeferred && eAction!=OE_Cascade && eAction!=OE_SetNull ){ + sqlite3MayAbort(pParse); + } + } + pItem->zName = 0; + sqlite3SrcListDelete(db, pSrc); + } + sqlite3DbFree(db, aiCol); + } +} + +#define COLUMN_MASK(x) (((x)>31) ? 0xffffffff : ((u32)1<<(x))) + +/* +** This function is called before generating code to update or delete a +** row contained in table pTab. +*/ +SQLITE_PRIVATE u32 sqlite3FkOldmask( + Parse *pParse, /* Parse context */ + Table *pTab /* Table being modified */ +){ + u32 mask = 0; + if( pParse->db->flags&SQLITE_ForeignKeys ){ + FKey *p; + int i; + for(p=pTab->pFKey; p; p=p->pNextFrom){ + for(i=0; inCol; i++) mask |= COLUMN_MASK(p->aCol[i].iFrom); + } + for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ + Index *pIdx = 0; + sqlite3FkLocateIndex(pParse, pTab, p, &pIdx, 0); + if( pIdx ){ + for(i=0; inKeyCol; i++){ + assert( pIdx->aiColumn[i]>=0 ); + mask |= COLUMN_MASK(pIdx->aiColumn[i]); + } + } + } + } + return mask; +} + + +/* +** This function is called before generating code to update or delete a +** row contained in table pTab. If the operation is a DELETE, then +** parameter aChange is passed a NULL value. For an UPDATE, aChange points +** to an array of size N, where N is the number of columns in table pTab. +** If the i'th column is not modified by the UPDATE, then the corresponding +** entry in the aChange[] array is set to -1. If the column is modified, +** the value is 0 or greater. Parameter chngRowid is set to true if the +** UPDATE statement modifies the rowid fields of the table. +** +** If any foreign key processing will be required, this function returns +** true. If there is no foreign key related processing, this function +** returns false. +*/ +SQLITE_PRIVATE int sqlite3FkRequired( + Parse *pParse, /* Parse context */ + Table *pTab, /* Table being modified */ + int *aChange, /* Non-NULL for UPDATE operations */ + int chngRowid /* True for UPDATE that affects rowid */ +){ + if( pParse->db->flags&SQLITE_ForeignKeys ){ + if( !aChange ){ + /* A DELETE operation. Foreign key processing is required if the + ** table in question is either the child or parent table for any + ** foreign key constraint. */ + return (sqlite3FkReferences(pTab) || pTab->pFKey); + }else{ + /* This is an UPDATE. Foreign key processing is only required if the + ** operation modifies one or more child or parent key columns. */ + FKey *p; + + /* Check if any child key columns are being modified. */ + for(p=pTab->pFKey; p; p=p->pNextFrom){ + if( fkChildIsModified(pTab, p, aChange, chngRowid) ) return 1; + } + + /* Check if any parent key columns are being modified. */ + for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ + if( fkParentIsModified(pTab, p, aChange, chngRowid) ) return 1; + } + } + } + return 0; +} + +/* +** This function is called when an UPDATE or DELETE operation is being +** compiled on table pTab, which is the parent table of foreign-key pFKey. +** If the current operation is an UPDATE, then the pChanges parameter is +** passed a pointer to the list of columns being modified. If it is a +** DELETE, pChanges is passed a NULL pointer. +** +** It returns a pointer to a Trigger structure containing a trigger +** equivalent to the ON UPDATE or ON DELETE action specified by pFKey. +** If the action is "NO ACTION" or "RESTRICT", then a NULL pointer is +** returned (these actions require no special handling by the triggers +** sub-system, code for them is created by fkScanChildren()). +** +** For example, if pFKey is the foreign key and pTab is table "p" in +** the following schema: +** +** CREATE TABLE p(pk PRIMARY KEY); +** CREATE TABLE c(ck REFERENCES p ON DELETE CASCADE); +** +** then the returned trigger structure is equivalent to: +** +** CREATE TRIGGER ... DELETE ON p BEGIN +** DELETE FROM c WHERE ck = old.pk; +** END; +** +** The returned pointer is cached as part of the foreign key object. It +** is eventually freed along with the rest of the foreign key object by +** sqlite3FkDelete(). +*/ +static Trigger *fkActionTrigger( + Parse *pParse, /* Parse context */ + Table *pTab, /* Table being updated or deleted from */ + FKey *pFKey, /* Foreign key to get action for */ + ExprList *pChanges /* Change-list for UPDATE, NULL for DELETE */ +){ + sqlite3 *db = pParse->db; /* Database handle */ + int action; /* One of OE_None, OE_Cascade etc. */ + Trigger *pTrigger; /* Trigger definition to return */ + int iAction = (pChanges!=0); /* 1 for UPDATE, 0 for DELETE */ + + action = pFKey->aAction[iAction]; + if( action==OE_Restrict && (db->flags & SQLITE_DeferFKs) ){ + return 0; + } + pTrigger = pFKey->apTrigger[iAction]; + + if( action!=OE_None && !pTrigger ){ + char const *zFrom; /* Name of child table */ + int nFrom; /* Length in bytes of zFrom */ + Index *pIdx = 0; /* Parent key index for this FK */ + int *aiCol = 0; /* child table cols -> parent key cols */ + TriggerStep *pStep = 0; /* First (only) step of trigger program */ + Expr *pWhere = 0; /* WHERE clause of trigger step */ + ExprList *pList = 0; /* Changes list if ON UPDATE CASCADE */ + Select *pSelect = 0; /* If RESTRICT, "SELECT RAISE(...)" */ + int i; /* Iterator variable */ + Expr *pWhen = 0; /* WHEN clause for the trigger */ + + if( sqlite3FkLocateIndex(pParse, pTab, pFKey, &pIdx, &aiCol) ) return 0; + assert( aiCol || pFKey->nCol==1 ); + + for(i=0; inCol; i++){ + Token tOld = { "old", 3 }; /* Literal "old" token */ + Token tNew = { "new", 3 }; /* Literal "new" token */ + Token tFromCol; /* Name of column in child table */ + Token tToCol; /* Name of column in parent table */ + int iFromCol; /* Idx of column in child table */ + Expr *pEq; /* tFromCol = OLD.tToCol */ + + iFromCol = aiCol ? aiCol[i] : pFKey->aCol[0].iFrom; + assert( iFromCol>=0 ); + assert( pIdx!=0 || (pTab->iPKey>=0 && pTab->iPKeynCol) ); + assert( pIdx==0 || pIdx->aiColumn[i]>=0 ); + sqlite3TokenInit(&tToCol, + pTab->aCol[pIdx ? pIdx->aiColumn[i] : pTab->iPKey].zName); + sqlite3TokenInit(&tFromCol, pFKey->pFrom->aCol[iFromCol].zName); + + /* Create the expression "OLD.zToCol = zFromCol". It is important + ** that the "OLD.zToCol" term is on the LHS of the = operator, so + ** that the affinity and collation sequence associated with the + ** parent table are used for the comparison. */ + pEq = sqlite3PExpr(pParse, TK_EQ, + sqlite3PExpr(pParse, TK_DOT, + sqlite3ExprAlloc(db, TK_ID, &tOld, 0), + sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)), + sqlite3ExprAlloc(db, TK_ID, &tFromCol, 0) + ); + pWhere = sqlite3ExprAnd(db, pWhere, pEq); + + /* For ON UPDATE, construct the next term of the WHEN clause. + ** The final WHEN clause will be like this: + ** + ** WHEN NOT(old.col1 IS new.col1 AND ... AND old.colN IS new.colN) + */ + if( pChanges ){ + pEq = sqlite3PExpr(pParse, TK_IS, + sqlite3PExpr(pParse, TK_DOT, + sqlite3ExprAlloc(db, TK_ID, &tOld, 0), + sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)), + sqlite3PExpr(pParse, TK_DOT, + sqlite3ExprAlloc(db, TK_ID, &tNew, 0), + sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)) + ); + pWhen = sqlite3ExprAnd(db, pWhen, pEq); + } + + if( action!=OE_Restrict && (action!=OE_Cascade || pChanges) ){ + Expr *pNew; + if( action==OE_Cascade ){ + pNew = sqlite3PExpr(pParse, TK_DOT, + sqlite3ExprAlloc(db, TK_ID, &tNew, 0), + sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)); + }else if( action==OE_SetDflt ){ + Expr *pDflt = pFKey->pFrom->aCol[iFromCol].pDflt; + if( pDflt ){ + pNew = sqlite3ExprDup(db, pDflt, 0); + }else{ + pNew = sqlite3ExprAlloc(db, TK_NULL, 0, 0); + } + }else{ + pNew = sqlite3ExprAlloc(db, TK_NULL, 0, 0); + } + pList = sqlite3ExprListAppend(pParse, pList, pNew); + sqlite3ExprListSetName(pParse, pList, &tFromCol, 0); + } + } + sqlite3DbFree(db, aiCol); + + zFrom = pFKey->pFrom->zName; + nFrom = sqlite3Strlen30(zFrom); + + if( action==OE_Restrict ){ + Token tFrom; + Expr *pRaise; + + tFrom.z = zFrom; + tFrom.n = nFrom; + pRaise = sqlite3Expr(db, TK_RAISE, "FOREIGN KEY constraint failed"); + if( pRaise ){ + pRaise->affinity = OE_Abort; + } + pSelect = sqlite3SelectNew(pParse, + sqlite3ExprListAppend(pParse, 0, pRaise), + sqlite3SrcListAppend(db, 0, &tFrom, 0), + pWhere, + 0, 0, 0, 0, 0, 0 + ); + pWhere = 0; + } + + /* Disable lookaside memory allocation */ + db->lookaside.bDisable++; + + pTrigger = (Trigger *)sqlite3DbMallocZero(db, + sizeof(Trigger) + /* struct Trigger */ + sizeof(TriggerStep) + /* Single step in trigger program */ + nFrom + 1 /* Space for pStep->zTarget */ + ); + if( pTrigger ){ + pStep = pTrigger->step_list = (TriggerStep *)&pTrigger[1]; + pStep->zTarget = (char *)&pStep[1]; + memcpy((char *)pStep->zTarget, zFrom, nFrom); + + pStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE); + pStep->pExprList = sqlite3ExprListDup(db, pList, EXPRDUP_REDUCE); + pStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); + if( pWhen ){ + pWhen = sqlite3PExpr(pParse, TK_NOT, pWhen, 0); + pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE); + } + } + + /* Re-enable the lookaside buffer, if it was disabled earlier. */ + db->lookaside.bDisable--; + + sqlite3ExprDelete(db, pWhere); + sqlite3ExprDelete(db, pWhen); + sqlite3ExprListDelete(db, pList); + sqlite3SelectDelete(db, pSelect); + if( db->mallocFailed==1 ){ + fkTriggerDelete(db, pTrigger); + return 0; + } + assert( pStep!=0 ); + + switch( action ){ + case OE_Restrict: + pStep->op = TK_SELECT; + break; + case OE_Cascade: + if( !pChanges ){ + pStep->op = TK_DELETE; + break; + } + default: + pStep->op = TK_UPDATE; + } + pStep->pTrig = pTrigger; + pTrigger->pSchema = pTab->pSchema; + pTrigger->pTabSchema = pTab->pSchema; + pFKey->apTrigger[iAction] = pTrigger; + pTrigger->op = (pChanges ? TK_UPDATE : TK_DELETE); + } + + return pTrigger; +} + +/* +** This function is called when deleting or updating a row to implement +** any required CASCADE, SET NULL or SET DEFAULT actions. +*/ +SQLITE_PRIVATE void sqlite3FkActions( + Parse *pParse, /* Parse context */ + Table *pTab, /* Table being updated or deleted from */ + ExprList *pChanges, /* Change-list for UPDATE, NULL for DELETE */ + int regOld, /* Address of array containing old row */ + int *aChange, /* Array indicating UPDATEd columns (or 0) */ + int bChngRowid /* True if rowid is UPDATEd */ +){ + /* If foreign-key support is enabled, iterate through all FKs that + ** refer to table pTab. If there is an action associated with the FK + ** for this operation (either update or delete), invoke the associated + ** trigger sub-program. */ + if( pParse->db->flags&SQLITE_ForeignKeys ){ + FKey *pFKey; /* Iterator variable */ + for(pFKey = sqlite3FkReferences(pTab); pFKey; pFKey=pFKey->pNextTo){ + if( aChange==0 || fkParentIsModified(pTab, pFKey, aChange, bChngRowid) ){ + Trigger *pAct = fkActionTrigger(pParse, pTab, pFKey, pChanges); + if( pAct ){ + sqlite3CodeRowTriggerDirect(pParse, pAct, pTab, regOld, OE_Abort, 0); + } + } + } + } +} + +#endif /* ifndef SQLITE_OMIT_TRIGGER */ + +/* +** Free all memory associated with foreign key definitions attached to +** table pTab. Remove the deleted foreign keys from the Schema.fkeyHash +** hash table. +*/ +SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *db, Table *pTab){ + FKey *pFKey; /* Iterator variable */ + FKey *pNext; /* Copy of pFKey->pNextFrom */ + + assert( db==0 || IsVirtual(pTab) + || sqlite3SchemaMutexHeld(db, 0, pTab->pSchema) ); + for(pFKey=pTab->pFKey; pFKey; pFKey=pNext){ + + /* Remove the FK from the fkeyHash hash table. */ + if( !db || db->pnBytesFreed==0 ){ + if( pFKey->pPrevTo ){ + pFKey->pPrevTo->pNextTo = pFKey->pNextTo; + }else{ + void *p = (void *)pFKey->pNextTo; + const char *z = (p ? pFKey->pNextTo->zTo : pFKey->zTo); + sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, p); + } + if( pFKey->pNextTo ){ + pFKey->pNextTo->pPrevTo = pFKey->pPrevTo; + } + } + + /* EV: R-30323-21917 Each foreign key constraint in SQLite is + ** classified as either immediate or deferred. + */ + assert( pFKey->isDeferred==0 || pFKey->isDeferred==1 ); + + /* Delete any triggers created to implement actions for this FK. */ +#ifndef SQLITE_OMIT_TRIGGER + fkTriggerDelete(db, pFKey->apTrigger[0]); + fkTriggerDelete(db, pFKey->apTrigger[1]); +#endif + + pNext = pFKey->pNextFrom; + sqlite3DbFree(db, pFKey); + } +} +#endif /* ifndef SQLITE_OMIT_FOREIGN_KEY */ + +/************** End of fkey.c ************************************************/ +/************** Begin file insert.c ******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains C code routines that are called by the parser +** to handle INSERT statements in SQLite. +*/ +/* #include "sqliteInt.h" */ + +/* +** Generate code that will +** +** (1) acquire a lock for table pTab then +** (2) open pTab as cursor iCur. +** +** If pTab is a WITHOUT ROWID table, then it is the PRIMARY KEY index +** for that table that is actually opened. +*/ +SQLITE_PRIVATE void sqlite3OpenTable( + Parse *pParse, /* Generate code into this VDBE */ + int iCur, /* The cursor number of the table */ + int iDb, /* The database index in sqlite3.aDb[] */ + Table *pTab, /* The table to be opened */ + int opcode /* OP_OpenRead or OP_OpenWrite */ +){ + Vdbe *v; + assert( !IsVirtual(pTab) ); + v = sqlite3GetVdbe(pParse); + assert( opcode==OP_OpenWrite || opcode==OP_OpenRead ); + sqlite3TableLock(pParse, iDb, pTab->tnum, + (opcode==OP_OpenWrite)?1:0, pTab->zName); + if( HasRowid(pTab) ){ + sqlite3VdbeAddOp4Int(v, opcode, iCur, pTab->tnum, iDb, pTab->nCol); + VdbeComment((v, "%s", pTab->zName)); + }else{ + Index *pPk = sqlite3PrimaryKeyIndex(pTab); + assert( pPk!=0 ); + assert( pPk->tnum==pTab->tnum ); + sqlite3VdbeAddOp3(v, opcode, iCur, pPk->tnum, iDb); + sqlite3VdbeSetP4KeyInfo(pParse, pPk); + VdbeComment((v, "%s", pTab->zName)); + } +} + +/* +** Return a pointer to the column affinity string associated with index +** pIdx. A column affinity string has one character for each column in +** the table, according to the affinity of the column: +** +** Character Column affinity +** ------------------------------ +** 'A' BLOB +** 'B' TEXT +** 'C' NUMERIC +** 'D' INTEGER +** 'F' REAL +** +** An extra 'D' is appended to the end of the string to cover the +** rowid that appears as the last column in every index. +** +** Memory for the buffer containing the column index affinity string +** is managed along with the rest of the Index structure. It will be +** released when sqlite3DeleteIndex() is called. +*/ +SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){ + if( !pIdx->zColAff ){ + /* The first time a column affinity string for a particular index is + ** required, it is allocated and populated here. It is then stored as + ** a member of the Index structure for subsequent use. + ** + ** The column affinity string will eventually be deleted by + ** sqliteDeleteIndex() when the Index structure itself is cleaned + ** up. + */ + int n; + Table *pTab = pIdx->pTable; + pIdx->zColAff = (char *)sqlite3DbMallocRaw(0, pIdx->nColumn+1); + if( !pIdx->zColAff ){ + sqlite3OomFault(db); + return 0; + } + for(n=0; nnColumn; n++){ + i16 x = pIdx->aiColumn[n]; + if( x>=0 ){ + pIdx->zColAff[n] = pTab->aCol[x].affinity; + }else if( x==XN_ROWID ){ + pIdx->zColAff[n] = SQLITE_AFF_INTEGER; + }else{ + char aff; + assert( x==XN_EXPR ); + assert( pIdx->aColExpr!=0 ); + aff = sqlite3ExprAffinity(pIdx->aColExpr->a[n].pExpr); + if( aff==0 ) aff = SQLITE_AFF_BLOB; + pIdx->zColAff[n] = aff; + } + } + pIdx->zColAff[n] = 0; + } + + return pIdx->zColAff; +} + +/* +** Compute the affinity string for table pTab, if it has not already been +** computed. As an optimization, omit trailing SQLITE_AFF_BLOB affinities. +** +** If the affinity exists (if it is no entirely SQLITE_AFF_BLOB values) and +** if iReg>0 then code an OP_Affinity opcode that will set the affinities +** for register iReg and following. Or if affinities exists and iReg==0, +** then just set the P4 operand of the previous opcode (which should be +** an OP_MakeRecord) to the affinity string. +** +** A column affinity string has one character per column: +** +** Character Column affinity +** ------------------------------ +** 'A' BLOB +** 'B' TEXT +** 'C' NUMERIC +** 'D' INTEGER +** 'E' REAL +*/ +SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){ + int i; + char *zColAff = pTab->zColAff; + if( zColAff==0 ){ + sqlite3 *db = sqlite3VdbeDb(v); + zColAff = (char *)sqlite3DbMallocRaw(0, pTab->nCol+1); + if( !zColAff ){ + sqlite3OomFault(db); + return; + } + + for(i=0; inCol; i++){ + zColAff[i] = pTab->aCol[i].affinity; + } + do{ + zColAff[i--] = 0; + }while( i>=0 && zColAff[i]==SQLITE_AFF_BLOB ); + pTab->zColAff = zColAff; + } + i = sqlite3Strlen30(zColAff); + if( i ){ + if( iReg ){ + sqlite3VdbeAddOp4(v, OP_Affinity, iReg, i, 0, zColAff, i); + }else{ + sqlite3VdbeChangeP4(v, -1, zColAff, i); + } + } +} + +/* +** Return non-zero if the table pTab in database iDb or any of its indices +** have been opened at any point in the VDBE program. This is used to see if +** a statement of the form "INSERT INTO SELECT ..." can +** run without using a temporary table for the results of the SELECT. +*/ +static int readsTable(Parse *p, int iDb, Table *pTab){ + Vdbe *v = sqlite3GetVdbe(p); + int i; + int iEnd = sqlite3VdbeCurrentAddr(v); +#ifndef SQLITE_OMIT_VIRTUALTABLE + VTable *pVTab = IsVirtual(pTab) ? sqlite3GetVTable(p->db, pTab) : 0; +#endif + + for(i=1; iopcode==OP_OpenRead && pOp->p3==iDb ){ + Index *pIndex; + int tnum = pOp->p2; + if( tnum==pTab->tnum ){ + return 1; + } + for(pIndex=pTab->pIndex; pIndex; pIndex=pIndex->pNext){ + if( tnum==pIndex->tnum ){ + return 1; + } + } + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( pOp->opcode==OP_VOpen && pOp->p4.pVtab==pVTab ){ + assert( pOp->p4.pVtab!=0 ); + assert( pOp->p4type==P4_VTAB ); + return 1; + } +#endif + } + return 0; +} + +#ifndef SQLITE_OMIT_AUTOINCREMENT +/* +** Locate or create an AutoincInfo structure associated with table pTab +** which is in database iDb. Return the register number for the register +** that holds the maximum rowid. Return zero if pTab is not an AUTOINCREMENT +** table. (Also return zero when doing a VACUUM since we do not want to +** update the AUTOINCREMENT counters during a VACUUM.) +** +** There is at most one AutoincInfo structure per table even if the +** same table is autoincremented multiple times due to inserts within +** triggers. A new AutoincInfo structure is created if this is the +** first use of table pTab. On 2nd and subsequent uses, the original +** AutoincInfo structure is used. +** +** Three memory locations are allocated: +** +** (1) Register to hold the name of the pTab table. +** (2) Register to hold the maximum ROWID of pTab. +** (3) Register to hold the rowid in sqlite_sequence of pTab +** +** The 2nd register is the one that is returned. That is all the +** insert routine needs to know about. +*/ +static int autoIncBegin( + Parse *pParse, /* Parsing context */ + int iDb, /* Index of the database holding pTab */ + Table *pTab /* The table we are writing to */ +){ + int memId = 0; /* Register holding maximum rowid */ + if( (pTab->tabFlags & TF_Autoincrement)!=0 + && (pParse->db->flags & SQLITE_Vacuum)==0 + ){ + Parse *pToplevel = sqlite3ParseToplevel(pParse); + AutoincInfo *pInfo; + + pInfo = pToplevel->pAinc; + while( pInfo && pInfo->pTab!=pTab ){ pInfo = pInfo->pNext; } + if( pInfo==0 ){ + pInfo = sqlite3DbMallocRawNN(pParse->db, sizeof(*pInfo)); + if( pInfo==0 ) return 0; + pInfo->pNext = pToplevel->pAinc; + pToplevel->pAinc = pInfo; + pInfo->pTab = pTab; + pInfo->iDb = iDb; + pToplevel->nMem++; /* Register to hold name of table */ + pInfo->regCtr = ++pToplevel->nMem; /* Max rowid register */ + pToplevel->nMem++; /* Rowid in sqlite_sequence */ + } + memId = pInfo->regCtr; + } + return memId; +} + +/* +** This routine generates code that will initialize all of the +** register used by the autoincrement tracker. +*/ +SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse){ + AutoincInfo *p; /* Information about an AUTOINCREMENT */ + sqlite3 *db = pParse->db; /* The database connection */ + Db *pDb; /* Database only autoinc table */ + int memId; /* Register holding max rowid */ + Vdbe *v = pParse->pVdbe; /* VDBE under construction */ + + /* This routine is never called during trigger-generation. It is + ** only called from the top-level */ + assert( pParse->pTriggerTab==0 ); + assert( sqlite3IsToplevel(pParse) ); + + assert( v ); /* We failed long ago if this is not so */ + for(p = pParse->pAinc; p; p = p->pNext){ + static const int iLn = VDBE_OFFSET_LINENO(2); + static const VdbeOpList autoInc[] = { + /* 0 */ {OP_Null, 0, 0, 0}, + /* 1 */ {OP_Rewind, 0, 9, 0}, + /* 2 */ {OP_Column, 0, 0, 0}, + /* 3 */ {OP_Ne, 0, 7, 0}, + /* 4 */ {OP_Rowid, 0, 0, 0}, + /* 5 */ {OP_Column, 0, 1, 0}, + /* 6 */ {OP_Goto, 0, 9, 0}, + /* 7 */ {OP_Next, 0, 2, 0}, + /* 8 */ {OP_Integer, 0, 0, 0}, + /* 9 */ {OP_Close, 0, 0, 0} + }; + VdbeOp *aOp; + pDb = &db->aDb[p->iDb]; + memId = p->regCtr; + assert( sqlite3SchemaMutexHeld(db, 0, pDb->pSchema) ); + sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenRead); + sqlite3VdbeLoadString(v, memId-1, p->pTab->zName); + aOp = sqlite3VdbeAddOpList(v, ArraySize(autoInc), autoInc, iLn); + if( aOp==0 ) break; + aOp[0].p2 = memId; + aOp[0].p3 = memId+1; + aOp[2].p3 = memId; + aOp[3].p1 = memId-1; + aOp[3].p3 = memId; + aOp[3].p5 = SQLITE_JUMPIFNULL; + aOp[4].p2 = memId+1; + aOp[5].p3 = memId; + aOp[8].p2 = memId; + } +} + +/* +** Update the maximum rowid for an autoincrement calculation. +** +** This routine should be called when the regRowid register holds a +** new rowid that is about to be inserted. If that new rowid is +** larger than the maximum rowid in the memId memory cell, then the +** memory cell is updated. +*/ +static void autoIncStep(Parse *pParse, int memId, int regRowid){ + if( memId>0 ){ + sqlite3VdbeAddOp2(pParse->pVdbe, OP_MemMax, memId, regRowid); + } +} + +/* +** This routine generates the code needed to write autoincrement +** maximum rowid values back into the sqlite_sequence register. +** Every statement that might do an INSERT into an autoincrement +** table (either directly or through triggers) needs to call this +** routine just before the "exit" code. +*/ +static SQLITE_NOINLINE void autoIncrementEnd(Parse *pParse){ + AutoincInfo *p; + Vdbe *v = pParse->pVdbe; + sqlite3 *db = pParse->db; + + assert( v ); + for(p = pParse->pAinc; p; p = p->pNext){ + static const int iLn = VDBE_OFFSET_LINENO(2); + static const VdbeOpList autoIncEnd[] = { + /* 0 */ {OP_NotNull, 0, 2, 0}, + /* 1 */ {OP_NewRowid, 0, 0, 0}, + /* 2 */ {OP_MakeRecord, 0, 2, 0}, + /* 3 */ {OP_Insert, 0, 0, 0}, + /* 4 */ {OP_Close, 0, 0, 0} + }; + VdbeOp *aOp; + Db *pDb = &db->aDb[p->iDb]; + int iRec; + int memId = p->regCtr; + + iRec = sqlite3GetTempReg(pParse); + assert( sqlite3SchemaMutexHeld(db, 0, pDb->pSchema) ); + sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenWrite); + aOp = sqlite3VdbeAddOpList(v, ArraySize(autoIncEnd), autoIncEnd, iLn); + if( aOp==0 ) break; + aOp[0].p1 = memId+1; + aOp[1].p2 = memId+1; + aOp[2].p1 = memId-1; + aOp[2].p3 = iRec; + aOp[3].p2 = iRec; + aOp[3].p3 = memId+1; + aOp[3].p5 = OPFLAG_APPEND; + sqlite3ReleaseTempReg(pParse, iRec); + } +} +SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse){ + if( pParse->pAinc ) autoIncrementEnd(pParse); +} +#else +/* +** If SQLITE_OMIT_AUTOINCREMENT is defined, then the three routines +** above are all no-ops +*/ +# define autoIncBegin(A,B,C) (0) +# define autoIncStep(A,B,C) +#endif /* SQLITE_OMIT_AUTOINCREMENT */ + + +/* Forward declaration */ +static int xferOptimization( + Parse *pParse, /* Parser context */ + Table *pDest, /* The table we are inserting into */ + Select *pSelect, /* A SELECT statement to use as the data source */ + int onError, /* How to handle constraint errors */ + int iDbDest /* The database of pDest */ +); + +/* +** This routine is called to handle SQL of the following forms: +** +** insert into TABLE (IDLIST) values(EXPRLIST),(EXPRLIST),... +** insert into TABLE (IDLIST) select +** insert into TABLE (IDLIST) default values +** +** The IDLIST following the table name is always optional. If omitted, +** then a list of all (non-hidden) columns for the table is substituted. +** The IDLIST appears in the pColumn parameter. pColumn is NULL if IDLIST +** is omitted. +** +** For the pSelect parameter holds the values to be inserted for the +** first two forms shown above. A VALUES clause is really just short-hand +** for a SELECT statement that omits the FROM clause and everything else +** that follows. If the pSelect parameter is NULL, that means that the +** DEFAULT VALUES form of the INSERT statement is intended. +** +** The code generated follows one of four templates. For a simple +** insert with data coming from a single-row VALUES clause, the code executes +** once straight down through. Pseudo-code follows (we call this +** the "1st template"): +** +** open write cursor to
    and its indices +** put VALUES clause expressions into registers +** write the resulting record into
    +** cleanup +** +** The three remaining templates assume the statement is of the form +** +** INSERT INTO
    SELECT ... +** +** If the SELECT clause is of the restricted form "SELECT * FROM " - +** in other words if the SELECT pulls all columns from a single table +** and there is no WHERE or LIMIT or GROUP BY or ORDER BY clauses, and +** if and are distinct tables but have identical +** schemas, including all the same indices, then a special optimization +** is invoked that copies raw records from over to . +** See the xferOptimization() function for the implementation of this +** template. This is the 2nd template. +** +** open a write cursor to
    +** open read cursor on +** transfer all records in over to
    +** close cursors +** foreach index on
    +** open a write cursor on the
    index +** open a read cursor on the corresponding index +** transfer all records from the read to the write cursors +** close cursors +** end foreach +** +** The 3rd template is for when the second template does not apply +** and the SELECT clause does not read from
    at any time. +** The generated code follows this template: +** +** X <- A +** goto B +** A: setup for the SELECT +** loop over the rows in the SELECT +** load values into registers R..R+n +** yield X +** end loop +** cleanup after the SELECT +** end-coroutine X +** B: open write cursor to
    and its indices +** C: yield X, at EOF goto D +** insert the select result into
    from R..R+n +** goto C +** D: cleanup +** +** The 4th template is used if the insert statement takes its +** values from a SELECT but the data is being inserted into a table +** that is also read as part of the SELECT. In the third form, +** we have to use an intermediate table to store the results of +** the select. The template is like this: +** +** X <- A +** goto B +** A: setup for the SELECT +** loop over the tables in the SELECT +** load value into register R..R+n +** yield X +** end loop +** cleanup after the SELECT +** end co-routine R +** B: open temp table +** L: yield X, at EOF goto M +** insert row from R..R+n into temp table +** goto L +** M: open write cursor to
    and its indices +** rewind temp table +** C: loop over rows of intermediate table +** transfer values form intermediate table into
    +** end loop +** D: cleanup +*/ +SQLITE_PRIVATE void sqlite3Insert( + Parse *pParse, /* Parser context */ + SrcList *pTabList, /* Name of table into which we are inserting */ + Select *pSelect, /* A SELECT statement to use as the data source */ + IdList *pColumn, /* Column names corresponding to IDLIST. */ + int onError /* How to handle constraint errors */ +){ + sqlite3 *db; /* The main database structure */ + Table *pTab; /* The table to insert into. aka TABLE */ + char *zTab; /* Name of the table into which we are inserting */ + int i, j; /* Loop counters */ + Vdbe *v; /* Generate code into this virtual machine */ + Index *pIdx; /* For looping over indices of the table */ + int nColumn; /* Number of columns in the data */ + int nHidden = 0; /* Number of hidden columns if TABLE is virtual */ + int iDataCur = 0; /* VDBE cursor that is the main data repository */ + int iIdxCur = 0; /* First index cursor */ + int ipkColumn = -1; /* Column that is the INTEGER PRIMARY KEY */ + int endOfLoop; /* Label for the end of the insertion loop */ + int srcTab = 0; /* Data comes from this temporary cursor if >=0 */ + int addrInsTop = 0; /* Jump to label "D" */ + int addrCont = 0; /* Top of insert loop. Label "C" in templates 3 and 4 */ + SelectDest dest; /* Destination for SELECT on rhs of INSERT */ + int iDb; /* Index of database holding TABLE */ + u8 useTempTable = 0; /* Store SELECT results in intermediate table */ + u8 appendFlag = 0; /* True if the insert is likely to be an append */ + u8 withoutRowid; /* 0 for normal table. 1 for WITHOUT ROWID table */ + u8 bIdListInOrder; /* True if IDLIST is in table order */ + ExprList *pList = 0; /* List of VALUES() to be inserted */ + + /* Register allocations */ + int regFromSelect = 0;/* Base register for data coming from SELECT */ + int regAutoinc = 0; /* Register holding the AUTOINCREMENT counter */ + int regRowCount = 0; /* Memory cell used for the row counter */ + int regIns; /* Block of regs holding rowid+data being inserted */ + int regRowid; /* registers holding insert rowid */ + int regData; /* register holding first column to insert */ + int *aRegIdx = 0; /* One register allocated to each index */ + +#ifndef SQLITE_OMIT_TRIGGER + int isView; /* True if attempting to insert into a view */ + Trigger *pTrigger; /* List of triggers on pTab, if required */ + int tmask; /* Mask of trigger times */ +#endif + + db = pParse->db; + memset(&dest, 0, sizeof(dest)); + if( pParse->nErr || db->mallocFailed ){ + goto insert_cleanup; + } + + /* If the Select object is really just a simple VALUES() list with a + ** single row (the common case) then keep that one row of values + ** and discard the other (unused) parts of the pSelect object + */ + if( pSelect && (pSelect->selFlags & SF_Values)!=0 && pSelect->pPrior==0 ){ + pList = pSelect->pEList; + pSelect->pEList = 0; + sqlite3SelectDelete(db, pSelect); + pSelect = 0; + } + + /* Locate the table into which we will be inserting new information. + */ + assert( pTabList->nSrc==1 ); + zTab = pTabList->a[0].zName; + if( NEVER(zTab==0) ) goto insert_cleanup; + pTab = sqlite3SrcListLookup(pParse, pTabList); + if( pTab==0 ){ + goto insert_cleanup; + } + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + assert( iDbnDb ); + if( sqlite3AuthCheck(pParse, SQLITE_INSERT, pTab->zName, 0, + db->aDb[iDb].zDbSName) ){ + goto insert_cleanup; + } + withoutRowid = !HasRowid(pTab); + + /* Figure out if we have any triggers and if the table being + ** inserted into is a view + */ +#ifndef SQLITE_OMIT_TRIGGER + pTrigger = sqlite3TriggersExist(pParse, pTab, TK_INSERT, 0, &tmask); + isView = pTab->pSelect!=0; +#else +# define pTrigger 0 +# define tmask 0 +# define isView 0 +#endif +#ifdef SQLITE_OMIT_VIEW +# undef isView +# define isView 0 +#endif + assert( (pTrigger && tmask) || (pTrigger==0 && tmask==0) ); + + /* If pTab is really a view, make sure it has been initialized. + ** ViewGetColumnNames() is a no-op if pTab is not a view. + */ + if( sqlite3ViewGetColumnNames(pParse, pTab) ){ + goto insert_cleanup; + } + + /* Cannot insert into a read-only table. + */ + if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ + goto insert_cleanup; + } + + /* Allocate a VDBE + */ + v = sqlite3GetVdbe(pParse); + if( v==0 ) goto insert_cleanup; + if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); + sqlite3BeginWriteOperation(pParse, pSelect || pTrigger, iDb); + +#ifndef SQLITE_OMIT_XFER_OPT + /* If the statement is of the form + ** + ** INSERT INTO SELECT * FROM ; + ** + ** Then special optimizations can be applied that make the transfer + ** very fast and which reduce fragmentation of indices. + ** + ** This is the 2nd template. + */ + if( pColumn==0 && xferOptimization(pParse, pTab, pSelect, onError, iDb) ){ + assert( !pTrigger ); + assert( pList==0 ); + goto insert_end; + } +#endif /* SQLITE_OMIT_XFER_OPT */ + + /* If this is an AUTOINCREMENT table, look up the sequence number in the + ** sqlite_sequence table and store it in memory cell regAutoinc. + */ + regAutoinc = autoIncBegin(pParse, iDb, pTab); + + /* Allocate registers for holding the rowid of the new row, + ** the content of the new row, and the assembled row record. + */ + regRowid = regIns = pParse->nMem+1; + pParse->nMem += pTab->nCol + 1; + if( IsVirtual(pTab) ){ + regRowid++; + pParse->nMem++; + } + regData = regRowid+1; + + /* If the INSERT statement included an IDLIST term, then make sure + ** all elements of the IDLIST really are columns of the table and + ** remember the column indices. + ** + ** If the table has an INTEGER PRIMARY KEY column and that column + ** is named in the IDLIST, then record in the ipkColumn variable + ** the index into IDLIST of the primary key column. ipkColumn is + ** the index of the primary key as it appears in IDLIST, not as + ** is appears in the original table. (The index of the INTEGER + ** PRIMARY KEY in the original table is pTab->iPKey.) + */ + bIdListInOrder = (pTab->tabFlags & TF_OOOHidden)==0; + if( pColumn ){ + for(i=0; inId; i++){ + pColumn->a[i].idx = -1; + } + for(i=0; inId; i++){ + for(j=0; jnCol; j++){ + if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zName)==0 ){ + pColumn->a[i].idx = j; + if( i!=j ) bIdListInOrder = 0; + if( j==pTab->iPKey ){ + ipkColumn = i; assert( !withoutRowid ); + } + break; + } + } + if( j>=pTab->nCol ){ + if( sqlite3IsRowid(pColumn->a[i].zName) && !withoutRowid ){ + ipkColumn = i; + bIdListInOrder = 0; + }else{ + sqlite3ErrorMsg(pParse, "table %S has no column named %s", + pTabList, 0, pColumn->a[i].zName); + pParse->checkSchema = 1; + goto insert_cleanup; + } + } + } + } + + /* Figure out how many columns of data are supplied. If the data + ** is coming from a SELECT statement, then generate a co-routine that + ** produces a single row of the SELECT on each invocation. The + ** co-routine is the common header to the 3rd and 4th templates. + */ + if( pSelect ){ + /* Data is coming from a SELECT or from a multi-row VALUES clause. + ** Generate a co-routine to run the SELECT. */ + int regYield; /* Register holding co-routine entry-point */ + int addrTop; /* Top of the co-routine */ + int rc; /* Result code */ + + regYield = ++pParse->nMem; + addrTop = sqlite3VdbeCurrentAddr(v) + 1; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); + sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield); + dest.iSdst = bIdListInOrder ? regData : 0; + dest.nSdst = pTab->nCol; + rc = sqlite3Select(pParse, pSelect, &dest); + regFromSelect = dest.iSdst; + if( rc || db->mallocFailed || pParse->nErr ) goto insert_cleanup; + sqlite3VdbeEndCoroutine(v, regYield); + sqlite3VdbeJumpHere(v, addrTop - 1); /* label B: */ + assert( pSelect->pEList ); + nColumn = pSelect->pEList->nExpr; + + /* Set useTempTable to TRUE if the result of the SELECT statement + ** should be written into a temporary table (template 4). Set to + ** FALSE if each output row of the SELECT can be written directly into + ** the destination table (template 3). + ** + ** A temp table must be used if the table being updated is also one + ** of the tables being read by the SELECT statement. Also use a + ** temp table in the case of row triggers. + */ + if( pTrigger || readsTable(pParse, iDb, pTab) ){ + useTempTable = 1; + } + + if( useTempTable ){ + /* Invoke the coroutine to extract information from the SELECT + ** and add it to a transient table srcTab. The code generated + ** here is from the 4th template: + ** + ** B: open temp table + ** L: yield X, goto M at EOF + ** insert row from R..R+n into temp table + ** goto L + ** M: ... + */ + int regRec; /* Register to hold packed record */ + int regTempRowid; /* Register to hold temp table ROWID */ + int addrL; /* Label "L" */ + + srcTab = pParse->nTab++; + regRec = sqlite3GetTempReg(pParse); + regTempRowid = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, srcTab, nColumn); + addrL = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm); VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regFromSelect, nColumn, regRec); + sqlite3VdbeAddOp2(v, OP_NewRowid, srcTab, regTempRowid); + sqlite3VdbeAddOp3(v, OP_Insert, srcTab, regRec, regTempRowid); + sqlite3VdbeGoto(v, addrL); + sqlite3VdbeJumpHere(v, addrL); + sqlite3ReleaseTempReg(pParse, regRec); + sqlite3ReleaseTempReg(pParse, regTempRowid); + } + }else{ + /* This is the case if the data for the INSERT is coming from a + ** single-row VALUES clause + */ + NameContext sNC; + memset(&sNC, 0, sizeof(sNC)); + sNC.pParse = pParse; + srcTab = -1; + assert( useTempTable==0 ); + if( pList ){ + nColumn = pList->nExpr; + if( sqlite3ResolveExprListNames(&sNC, pList) ){ + goto insert_cleanup; + } + }else{ + nColumn = 0; + } + } + + /* If there is no IDLIST term but the table has an integer primary + ** key, the set the ipkColumn variable to the integer primary key + ** column index in the original table definition. + */ + if( pColumn==0 && nColumn>0 ){ + ipkColumn = pTab->iPKey; + } + + /* Make sure the number of columns in the source data matches the number + ** of columns to be inserted into the table. + */ + for(i=0; inCol; i++){ + nHidden += (IsHiddenColumn(&pTab->aCol[i]) ? 1 : 0); + } + if( pColumn==0 && nColumn && nColumn!=(pTab->nCol-nHidden) ){ + sqlite3ErrorMsg(pParse, + "table %S has %d columns but %d values were supplied", + pTabList, 0, pTab->nCol-nHidden, nColumn); + goto insert_cleanup; + } + if( pColumn!=0 && nColumn!=pColumn->nId ){ + sqlite3ErrorMsg(pParse, "%d values for %d columns", nColumn, pColumn->nId); + goto insert_cleanup; + } + + /* Initialize the count of rows to be inserted + */ + if( db->flags & SQLITE_CountRows ){ + regRowCount = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount); + } + + /* If this is not a view, open the table and and all indices */ + if( !isView ){ + int nIdx; + nIdx = sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, 0, -1, 0, + &iDataCur, &iIdxCur); + aRegIdx = sqlite3DbMallocRawNN(db, sizeof(int)*(nIdx+1)); + if( aRegIdx==0 ){ + goto insert_cleanup; + } + for(i=0, pIdx=pTab->pIndex; ipNext, i++){ + assert( pIdx ); + aRegIdx[i] = ++pParse->nMem; + pParse->nMem += pIdx->nColumn; + } + } + + /* This is the top of the main insertion loop */ + if( useTempTable ){ + /* This block codes the top of loop only. The complete loop is the + ** following pseudocode (template 4): + ** + ** rewind temp table, if empty goto D + ** C: loop over rows of intermediate table + ** transfer values form intermediate table into
    + ** end loop + ** D: ... + */ + addrInsTop = sqlite3VdbeAddOp1(v, OP_Rewind, srcTab); VdbeCoverage(v); + addrCont = sqlite3VdbeCurrentAddr(v); + }else if( pSelect ){ + /* This block codes the top of loop only. The complete loop is the + ** following pseudocode (template 3): + ** + ** C: yield X, at EOF goto D + ** insert the select result into
    from R..R+n + ** goto C + ** D: ... + */ + addrInsTop = addrCont = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm); + VdbeCoverage(v); + } + + /* Run the BEFORE and INSTEAD OF triggers, if there are any + */ + endOfLoop = sqlite3VdbeMakeLabel(v); + if( tmask & TRIGGER_BEFORE ){ + int regCols = sqlite3GetTempRange(pParse, pTab->nCol+1); + + /* build the NEW.* reference row. Note that if there is an INTEGER + ** PRIMARY KEY into which a NULL is being inserted, that NULL will be + ** translated into a unique ID for the row. But on a BEFORE trigger, + ** we do not know what the unique ID will be (because the insert has + ** not happened yet) so we substitute a rowid of -1 + */ + if( ipkColumn<0 ){ + sqlite3VdbeAddOp2(v, OP_Integer, -1, regCols); + }else{ + int addr1; + assert( !withoutRowid ); + if( useTempTable ){ + sqlite3VdbeAddOp3(v, OP_Column, srcTab, ipkColumn, regCols); + }else{ + assert( pSelect==0 ); /* Otherwise useTempTable is true */ + sqlite3ExprCode(pParse, pList->a[ipkColumn].pExpr, regCols); + } + addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, regCols); VdbeCoverage(v); + sqlite3VdbeAddOp2(v, OP_Integer, -1, regCols); + sqlite3VdbeJumpHere(v, addr1); + sqlite3VdbeAddOp1(v, OP_MustBeInt, regCols); VdbeCoverage(v); + } + + /* Cannot have triggers on a virtual table. If it were possible, + ** this block would have to account for hidden column. + */ + assert( !IsVirtual(pTab) ); + + /* Create the new column data + */ + for(i=j=0; inCol; i++){ + if( pColumn ){ + for(j=0; jnId; j++){ + if( pColumn->a[j].idx==i ) break; + } + } + if( (!useTempTable && !pList) || (pColumn && j>=pColumn->nId) + || (pColumn==0 && IsOrdinaryHiddenColumn(&pTab->aCol[i])) ){ + sqlite3ExprCode(pParse, pTab->aCol[i].pDflt, regCols+i+1); + }else if( useTempTable ){ + sqlite3VdbeAddOp3(v, OP_Column, srcTab, j, regCols+i+1); + }else{ + assert( pSelect==0 ); /* Otherwise useTempTable is true */ + sqlite3ExprCodeAndCache(pParse, pList->a[j].pExpr, regCols+i+1); + } + if( pColumn==0 && !IsOrdinaryHiddenColumn(&pTab->aCol[i]) ) j++; + } + + /* If this is an INSERT on a view with an INSTEAD OF INSERT trigger, + ** do not attempt any conversions before assembling the record. + ** If this is a real table, attempt conversions as required by the + ** table column affinities. + */ + if( !isView ){ + sqlite3TableAffinity(v, pTab, regCols+1); + } + + /* Fire BEFORE or INSTEAD OF triggers */ + sqlite3CodeRowTrigger(pParse, pTrigger, TK_INSERT, 0, TRIGGER_BEFORE, + pTab, regCols-pTab->nCol-1, onError, endOfLoop); + + sqlite3ReleaseTempRange(pParse, regCols, pTab->nCol+1); + } + + /* Compute the content of the next row to insert into a range of + ** registers beginning at regIns. + */ + if( !isView ){ + if( IsVirtual(pTab) ){ + /* The row that the VUpdate opcode will delete: none */ + sqlite3VdbeAddOp2(v, OP_Null, 0, regIns); + } + if( ipkColumn>=0 ){ + if( useTempTable ){ + sqlite3VdbeAddOp3(v, OP_Column, srcTab, ipkColumn, regRowid); + }else if( pSelect ){ + sqlite3VdbeAddOp2(v, OP_Copy, regFromSelect+ipkColumn, regRowid); + }else{ + VdbeOp *pOp; + sqlite3ExprCode(pParse, pList->a[ipkColumn].pExpr, regRowid); + pOp = sqlite3VdbeGetOp(v, -1); + if( ALWAYS(pOp) && pOp->opcode==OP_Null && !IsVirtual(pTab) ){ + appendFlag = 1; + pOp->opcode = OP_NewRowid; + pOp->p1 = iDataCur; + pOp->p2 = regRowid; + pOp->p3 = regAutoinc; + } + } + /* If the PRIMARY KEY expression is NULL, then use OP_NewRowid + ** to generate a unique primary key value. + */ + if( !appendFlag ){ + int addr1; + if( !IsVirtual(pTab) ){ + addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, regRowid); VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_NewRowid, iDataCur, regRowid, regAutoinc); + sqlite3VdbeJumpHere(v, addr1); + }else{ + addr1 = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp2(v, OP_IsNull, regRowid, addr1+2); VdbeCoverage(v); + } + sqlite3VdbeAddOp1(v, OP_MustBeInt, regRowid); VdbeCoverage(v); + } + }else if( IsVirtual(pTab) || withoutRowid ){ + sqlite3VdbeAddOp2(v, OP_Null, 0, regRowid); + }else{ + sqlite3VdbeAddOp3(v, OP_NewRowid, iDataCur, regRowid, regAutoinc); + appendFlag = 1; + } + autoIncStep(pParse, regAutoinc, regRowid); + + /* Compute data for all columns of the new entry, beginning + ** with the first column. + */ + nHidden = 0; + for(i=0; inCol; i++){ + int iRegStore = regRowid+1+i; + if( i==pTab->iPKey ){ + /* The value of the INTEGER PRIMARY KEY column is always a NULL. + ** Whenever this column is read, the rowid will be substituted + ** in its place. Hence, fill this column with a NULL to avoid + ** taking up data space with information that will never be used. + ** As there may be shallow copies of this value, make it a soft-NULL */ + sqlite3VdbeAddOp1(v, OP_SoftNull, iRegStore); + continue; + } + if( pColumn==0 ){ + if( IsHiddenColumn(&pTab->aCol[i]) ){ + j = -1; + nHidden++; + }else{ + j = i - nHidden; + } + }else{ + for(j=0; jnId; j++){ + if( pColumn->a[j].idx==i ) break; + } + } + if( j<0 || nColumn==0 || (pColumn && j>=pColumn->nId) ){ + sqlite3ExprCodeFactorable(pParse, pTab->aCol[i].pDflt, iRegStore); + }else if( useTempTable ){ + sqlite3VdbeAddOp3(v, OP_Column, srcTab, j, iRegStore); + }else if( pSelect ){ + if( regFromSelect!=regData ){ + sqlite3VdbeAddOp2(v, OP_SCopy, regFromSelect+j, iRegStore); + } + }else{ + sqlite3ExprCode(pParse, pList->a[j].pExpr, iRegStore); + } + } + + /* Generate code to check constraints and generate index keys and + ** do the insertion. + */ +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pTab) ){ + const char *pVTab = (const char *)sqlite3GetVTable(db, pTab); + sqlite3VtabMakeWritable(pParse, pTab); + sqlite3VdbeAddOp4(v, OP_VUpdate, 1, pTab->nCol+2, regIns, pVTab, P4_VTAB); + sqlite3VdbeChangeP5(v, onError==OE_Default ? OE_Abort : onError); + sqlite3MayAbort(pParse); + }else +#endif + { + int isReplace; /* Set to true if constraints may cause a replace */ + int bUseSeek; /* True to use OPFLAG_SEEKRESULT */ + sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur, + regIns, 0, ipkColumn>=0, onError, endOfLoop, &isReplace, 0 + ); + sqlite3FkCheck(pParse, pTab, 0, regIns, 0, 0); + + /* Set the OPFLAG_USESEEKRESULT flag if either (a) there are no REPLACE + ** constraints or (b) there are no triggers and this table is not a + ** parent table in a foreign key constraint. It is safe to set the + ** flag in the second case as if any REPLACE constraint is hit, an + ** OP_Delete or OP_IdxDelete instruction will be executed on each + ** cursor that is disturbed. And these instructions both clear the + ** VdbeCursor.seekResult variable, disabling the OPFLAG_USESEEKRESULT + ** functionality. */ + bUseSeek = (isReplace==0 || (pTrigger==0 && + ((db->flags & SQLITE_ForeignKeys)==0 || sqlite3FkReferences(pTab)==0) + )); + sqlite3CompleteInsertion(pParse, pTab, iDataCur, iIdxCur, + regIns, aRegIdx, 0, appendFlag, bUseSeek + ); + } + } + + /* Update the count of rows that are inserted + */ + if( (db->flags & SQLITE_CountRows)!=0 ){ + sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1); + } + + if( pTrigger ){ + /* Code AFTER triggers */ + sqlite3CodeRowTrigger(pParse, pTrigger, TK_INSERT, 0, TRIGGER_AFTER, + pTab, regData-2-pTab->nCol, onError, endOfLoop); + } + + /* The bottom of the main insertion loop, if the data source + ** is a SELECT statement. + */ + sqlite3VdbeResolveLabel(v, endOfLoop); + if( useTempTable ){ + sqlite3VdbeAddOp2(v, OP_Next, srcTab, addrCont); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addrInsTop); + sqlite3VdbeAddOp1(v, OP_Close, srcTab); + }else if( pSelect ){ + sqlite3VdbeGoto(v, addrCont); + sqlite3VdbeJumpHere(v, addrInsTop); + } + +insert_end: + /* Update the sqlite_sequence table by storing the content of the + ** maximum rowid counter values recorded while inserting into + ** autoincrement tables. + */ + if( pParse->nested==0 && pParse->pTriggerTab==0 ){ + sqlite3AutoincrementEnd(pParse); + } + + /* + ** Return the number of rows inserted. If this routine is + ** generating code because of a call to sqlite3NestedParse(), do not + ** invoke the callback function. + */ + if( (db->flags&SQLITE_CountRows) && !pParse->nested && !pParse->pTriggerTab ){ + sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1); + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows inserted", SQLITE_STATIC); + } + +insert_cleanup: + sqlite3SrcListDelete(db, pTabList); + sqlite3ExprListDelete(db, pList); + sqlite3SelectDelete(db, pSelect); + sqlite3IdListDelete(db, pColumn); + sqlite3DbFree(db, aRegIdx); +} + +/* Make sure "isView" and other macros defined above are undefined. Otherwise +** they may interfere with compilation of other functions in this file +** (or in another file, if this file becomes part of the amalgamation). */ +#ifdef isView + #undef isView +#endif +#ifdef pTrigger + #undef pTrigger +#endif +#ifdef tmask + #undef tmask +#endif + +/* +** Meanings of bits in of pWalker->eCode for checkConstraintUnchanged() +*/ +#define CKCNSTRNT_COLUMN 0x01 /* CHECK constraint uses a changing column */ +#define CKCNSTRNT_ROWID 0x02 /* CHECK constraint references the ROWID */ + +/* This is the Walker callback from checkConstraintUnchanged(). Set +** bit 0x01 of pWalker->eCode if +** pWalker->eCode to 0 if this expression node references any of the +** columns that are being modifed by an UPDATE statement. +*/ +static int checkConstraintExprNode(Walker *pWalker, Expr *pExpr){ + if( pExpr->op==TK_COLUMN ){ + assert( pExpr->iColumn>=0 || pExpr->iColumn==-1 ); + if( pExpr->iColumn>=0 ){ + if( pWalker->u.aiCol[pExpr->iColumn]>=0 ){ + pWalker->eCode |= CKCNSTRNT_COLUMN; + } + }else{ + pWalker->eCode |= CKCNSTRNT_ROWID; + } + } + return WRC_Continue; +} + +/* +** pExpr is a CHECK constraint on a row that is being UPDATE-ed. The +** only columns that are modified by the UPDATE are those for which +** aiChng[i]>=0, and also the ROWID is modified if chngRowid is true. +** +** Return true if CHECK constraint pExpr does not use any of the +** changing columns (or the rowid if it is changing). In other words, +** return true if this CHECK constraint can be skipped when validating +** the new row in the UPDATE statement. +*/ +static int checkConstraintUnchanged(Expr *pExpr, int *aiChng, int chngRowid){ + Walker w; + memset(&w, 0, sizeof(w)); + w.eCode = 0; + w.xExprCallback = checkConstraintExprNode; + w.u.aiCol = aiChng; + sqlite3WalkExpr(&w, pExpr); + if( !chngRowid ){ + testcase( (w.eCode & CKCNSTRNT_ROWID)!=0 ); + w.eCode &= ~CKCNSTRNT_ROWID; + } + testcase( w.eCode==0 ); + testcase( w.eCode==CKCNSTRNT_COLUMN ); + testcase( w.eCode==CKCNSTRNT_ROWID ); + testcase( w.eCode==(CKCNSTRNT_ROWID|CKCNSTRNT_COLUMN) ); + return !w.eCode; +} + +/* +** Generate code to do constraint checks prior to an INSERT or an UPDATE +** on table pTab. +** +** The regNewData parameter is the first register in a range that contains +** the data to be inserted or the data after the update. There will be +** pTab->nCol+1 registers in this range. The first register (the one +** that regNewData points to) will contain the new rowid, or NULL in the +** case of a WITHOUT ROWID table. The second register in the range will +** contain the content of the first table column. The third register will +** contain the content of the second table column. And so forth. +** +** The regOldData parameter is similar to regNewData except that it contains +** the data prior to an UPDATE rather than afterwards. regOldData is zero +** for an INSERT. This routine can distinguish between UPDATE and INSERT by +** checking regOldData for zero. +** +** For an UPDATE, the pkChng boolean is true if the true primary key (the +** rowid for a normal table or the PRIMARY KEY for a WITHOUT ROWID table) +** might be modified by the UPDATE. If pkChng is false, then the key of +** the iDataCur content table is guaranteed to be unchanged by the UPDATE. +** +** For an INSERT, the pkChng boolean indicates whether or not the rowid +** was explicitly specified as part of the INSERT statement. If pkChng +** is zero, it means that the either rowid is computed automatically or +** that the table is a WITHOUT ROWID table and has no rowid. On an INSERT, +** pkChng will only be true if the INSERT statement provides an integer +** value for either the rowid column or its INTEGER PRIMARY KEY alias. +** +** The code generated by this routine will store new index entries into +** registers identified by aRegIdx[]. No index entry is created for +** indices where aRegIdx[i]==0. The order of indices in aRegIdx[] is +** the same as the order of indices on the linked list of indices +** at pTab->pIndex. +** +** The caller must have already opened writeable cursors on the main +** table and all applicable indices (that is to say, all indices for which +** aRegIdx[] is not zero). iDataCur is the cursor for the main table when +** inserting or updating a rowid table, or the cursor for the PRIMARY KEY +** index when operating on a WITHOUT ROWID table. iIdxCur is the cursor +** for the first index in the pTab->pIndex list. Cursors for other indices +** are at iIdxCur+N for the N-th element of the pTab->pIndex list. +** +** This routine also generates code to check constraints. NOT NULL, +** CHECK, and UNIQUE constraints are all checked. If a constraint fails, +** then the appropriate action is performed. There are five possible +** actions: ROLLBACK, ABORT, FAIL, REPLACE, and IGNORE. +** +** Constraint type Action What Happens +** --------------- ---------- ---------------------------------------- +** any ROLLBACK The current transaction is rolled back and +** sqlite3_step() returns immediately with a +** return code of SQLITE_CONSTRAINT. +** +** any ABORT Back out changes from the current command +** only (do not do a complete rollback) then +** cause sqlite3_step() to return immediately +** with SQLITE_CONSTRAINT. +** +** any FAIL Sqlite3_step() returns immediately with a +** return code of SQLITE_CONSTRAINT. The +** transaction is not rolled back and any +** changes to prior rows are retained. +** +** any IGNORE The attempt in insert or update the current +** row is skipped, without throwing an error. +** Processing continues with the next row. +** (There is an immediate jump to ignoreDest.) +** +** NOT NULL REPLACE The NULL value is replace by the default +** value for that column. If the default value +** is NULL, the action is the same as ABORT. +** +** UNIQUE REPLACE The other row that conflicts with the row +** being inserted is removed. +** +** CHECK REPLACE Illegal. The results in an exception. +** +** Which action to take is determined by the overrideError parameter. +** Or if overrideError==OE_Default, then the pParse->onError parameter +** is used. Or if pParse->onError==OE_Default then the onError value +** for the constraint is used. +*/ +SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( + Parse *pParse, /* The parser context */ + Table *pTab, /* The table being inserted or updated */ + int *aRegIdx, /* Use register aRegIdx[i] for index i. 0 for unused */ + int iDataCur, /* Canonical data cursor (main table or PK index) */ + int iIdxCur, /* First index cursor */ + int regNewData, /* First register in a range holding values to insert */ + int regOldData, /* Previous content. 0 for INSERTs */ + u8 pkChng, /* Non-zero if the rowid or PRIMARY KEY changed */ + u8 overrideError, /* Override onError to this if not OE_Default */ + int ignoreDest, /* Jump to this label on an OE_Ignore resolution */ + int *pbMayReplace, /* OUT: Set to true if constraint may cause a replace */ + int *aiChng /* column i is unchanged if aiChng[i]<0 */ +){ + Vdbe *v; /* VDBE under constrution */ + Index *pIdx; /* Pointer to one of the indices */ + Index *pPk = 0; /* The PRIMARY KEY index */ + sqlite3 *db; /* Database connection */ + int i; /* loop counter */ + int ix; /* Index loop counter */ + int nCol; /* Number of columns */ + int onError; /* Conflict resolution strategy */ + int addr1; /* Address of jump instruction */ + int seenReplace = 0; /* True if REPLACE is used to resolve INT PK conflict */ + int nPkField; /* Number of fields in PRIMARY KEY. 1 for ROWID tables */ + int ipkTop = 0; /* Top of the rowid change constraint check */ + int ipkBottom = 0; /* Bottom of the rowid change constraint check */ + u8 isUpdate; /* True if this is an UPDATE operation */ + u8 bAffinityDone = 0; /* True if the OP_Affinity operation has been run */ + + isUpdate = regOldData!=0; + db = pParse->db; + v = sqlite3GetVdbe(pParse); + assert( v!=0 ); + assert( pTab->pSelect==0 ); /* This table is not a VIEW */ + nCol = pTab->nCol; + + /* pPk is the PRIMARY KEY index for WITHOUT ROWID tables and NULL for + ** normal rowid tables. nPkField is the number of key fields in the + ** pPk index or 1 for a rowid table. In other words, nPkField is the + ** number of fields in the true primary key of the table. */ + if( HasRowid(pTab) ){ + pPk = 0; + nPkField = 1; + }else{ + pPk = sqlite3PrimaryKeyIndex(pTab); + nPkField = pPk->nKeyCol; + } + + /* Record that this module has started */ + VdbeModuleComment((v, "BEGIN: GenCnstCks(%d,%d,%d,%d,%d)", + iDataCur, iIdxCur, regNewData, regOldData, pkChng)); + + /* Test all NOT NULL constraints. + */ + for(i=0; iiPKey ){ + continue; /* ROWID is never NULL */ + } + if( aiChng && aiChng[i]<0 ){ + /* Don't bother checking for NOT NULL on columns that do not change */ + continue; + } + onError = pTab->aCol[i].notNull; + if( onError==OE_None ) continue; /* This column is allowed to be NULL */ + if( overrideError!=OE_Default ){ + onError = overrideError; + }else if( onError==OE_Default ){ + onError = OE_Abort; + } + if( onError==OE_Replace && pTab->aCol[i].pDflt==0 ){ + onError = OE_Abort; + } + assert( onError==OE_Rollback || onError==OE_Abort || onError==OE_Fail + || onError==OE_Ignore || onError==OE_Replace ); + switch( onError ){ + case OE_Abort: + sqlite3MayAbort(pParse); + /* Fall through */ + case OE_Rollback: + case OE_Fail: { + char *zMsg = sqlite3MPrintf(db, "%s.%s", pTab->zName, + pTab->aCol[i].zName); + sqlite3VdbeAddOp3(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL, onError, + regNewData+1+i); + sqlite3VdbeAppendP4(v, zMsg, P4_DYNAMIC); + sqlite3VdbeChangeP5(v, P5_ConstraintNotNull); + VdbeCoverage(v); + break; + } + case OE_Ignore: { + sqlite3VdbeAddOp2(v, OP_IsNull, regNewData+1+i, ignoreDest); + VdbeCoverage(v); + break; + } + default: { + assert( onError==OE_Replace ); + addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, regNewData+1+i); + VdbeCoverage(v); + sqlite3ExprCode(pParse, pTab->aCol[i].pDflt, regNewData+1+i); + sqlite3VdbeJumpHere(v, addr1); + break; + } + } + } + + /* Test all CHECK constraints + */ +#ifndef SQLITE_OMIT_CHECK + if( pTab->pCheck && (db->flags & SQLITE_IgnoreChecks)==0 ){ + ExprList *pCheck = pTab->pCheck; + pParse->ckBase = regNewData+1; + onError = overrideError!=OE_Default ? overrideError : OE_Abort; + for(i=0; inExpr; i++){ + int allOk; + Expr *pExpr = pCheck->a[i].pExpr; + if( aiChng && checkConstraintUnchanged(pExpr, aiChng, pkChng) ) continue; + allOk = sqlite3VdbeMakeLabel(v); + sqlite3ExprIfTrue(pParse, pExpr, allOk, SQLITE_JUMPIFNULL); + if( onError==OE_Ignore ){ + sqlite3VdbeGoto(v, ignoreDest); + }else{ + char *zName = pCheck->a[i].zName; + if( zName==0 ) zName = pTab->zName; + if( onError==OE_Replace ) onError = OE_Abort; /* IMP: R-15569-63625 */ + sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_CHECK, + onError, zName, P4_TRANSIENT, + P5_ConstraintCheck); + } + sqlite3VdbeResolveLabel(v, allOk); + } + } +#endif /* !defined(SQLITE_OMIT_CHECK) */ + + /* If rowid is changing, make sure the new rowid does not previously + ** exist in the table. + */ + if( pkChng && pPk==0 ){ + int addrRowidOk = sqlite3VdbeMakeLabel(v); + + /* Figure out what action to take in case of a rowid collision */ + onError = pTab->keyConf; + if( overrideError!=OE_Default ){ + onError = overrideError; + }else if( onError==OE_Default ){ + onError = OE_Abort; + } + + if( isUpdate ){ + /* pkChng!=0 does not mean that the rowid has changed, only that + ** it might have changed. Skip the conflict logic below if the rowid + ** is unchanged. */ + sqlite3VdbeAddOp3(v, OP_Eq, regNewData, addrRowidOk, regOldData); + sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); + VdbeCoverage(v); + } + + /* If the response to a rowid conflict is REPLACE but the response + ** to some other UNIQUE constraint is FAIL or IGNORE, then we need + ** to defer the running of the rowid conflict checking until after + ** the UNIQUE constraints have run. + */ + if( onError==OE_Replace && overrideError!=OE_Replace ){ + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( pIdx->onError==OE_Ignore || pIdx->onError==OE_Fail ){ + ipkTop = sqlite3VdbeAddOp0(v, OP_Goto); + break; + } + } + } + + /* Check to see if the new rowid already exists in the table. Skip + ** the following conflict logic if it does not. */ + sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, addrRowidOk, regNewData); + VdbeCoverage(v); + + /* Generate code that deals with a rowid collision */ + switch( onError ){ + default: { + onError = OE_Abort; + /* Fall thru into the next case */ + } + case OE_Rollback: + case OE_Abort: + case OE_Fail: { + sqlite3RowidConstraint(pParse, onError, pTab); + break; + } + case OE_Replace: { + /* If there are DELETE triggers on this table and the + ** recursive-triggers flag is set, call GenerateRowDelete() to + ** remove the conflicting row from the table. This will fire + ** the triggers and remove both the table and index b-tree entries. + ** + ** Otherwise, if there are no triggers or the recursive-triggers + ** flag is not set, but the table has one or more indexes, call + ** GenerateRowIndexDelete(). This removes the index b-tree entries + ** only. The table b-tree entry will be replaced by the new entry + ** when it is inserted. + ** + ** If either GenerateRowDelete() or GenerateRowIndexDelete() is called, + ** also invoke MultiWrite() to indicate that this VDBE may require + ** statement rollback (if the statement is aborted after the delete + ** takes place). Earlier versions called sqlite3MultiWrite() regardless, + ** but being more selective here allows statements like: + ** + ** REPLACE INTO t(rowid) VALUES($newrowid) + ** + ** to run without a statement journal if there are no indexes on the + ** table. + */ + Trigger *pTrigger = 0; + if( db->flags&SQLITE_RecTriggers ){ + pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); + } + if( pTrigger || sqlite3FkRequired(pParse, pTab, 0, 0) ){ + sqlite3MultiWrite(pParse); + sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur, + regNewData, 1, 0, OE_Replace, 1, -1); + }else{ +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + if( HasRowid(pTab) ){ + /* This OP_Delete opcode fires the pre-update-hook only. It does + ** not modify the b-tree. It is more efficient to let the coming + ** OP_Insert replace the existing entry than it is to delete the + ** existing entry and then insert a new one. */ + sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, OPFLAG_ISNOOP); + sqlite3VdbeAppendP4(v, pTab, P4_TABLE); + } +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + if( pTab->pIndex ){ + sqlite3MultiWrite(pParse); + sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur,0,-1); + } + } + seenReplace = 1; + break; + } + case OE_Ignore: { + /*assert( seenReplace==0 );*/ + sqlite3VdbeGoto(v, ignoreDest); + break; + } + } + sqlite3VdbeResolveLabel(v, addrRowidOk); + if( ipkTop ){ + ipkBottom = sqlite3VdbeAddOp0(v, OP_Goto); + sqlite3VdbeJumpHere(v, ipkTop); + } + } + + /* Test all UNIQUE constraints by creating entries for each UNIQUE + ** index and making sure that duplicate entries do not already exist. + ** Compute the revised record entries for indices as we go. + ** + ** This loop also handles the case of the PRIMARY KEY index for a + ** WITHOUT ROWID table. + */ + for(ix=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, ix++){ + int regIdx; /* Range of registers hold conent for pIdx */ + int regR; /* Range of registers holding conflicting PK */ + int iThisCur; /* Cursor for this UNIQUE index */ + int addrUniqueOk; /* Jump here if the UNIQUE constraint is satisfied */ + + if( aRegIdx[ix]==0 ) continue; /* Skip indices that do not change */ + if( bAffinityDone==0 ){ + sqlite3TableAffinity(v, pTab, regNewData+1); + bAffinityDone = 1; + } + iThisCur = iIdxCur+ix; + addrUniqueOk = sqlite3VdbeMakeLabel(v); + + /* Skip partial indices for which the WHERE clause is not true */ + if( pIdx->pPartIdxWhere ){ + sqlite3VdbeAddOp2(v, OP_Null, 0, aRegIdx[ix]); + pParse->ckBase = regNewData+1; + sqlite3ExprIfFalseDup(pParse, pIdx->pPartIdxWhere, addrUniqueOk, + SQLITE_JUMPIFNULL); + pParse->ckBase = 0; + } + + /* Create a record for this index entry as it should appear after + ** the insert or update. Store that record in the aRegIdx[ix] register + */ + regIdx = aRegIdx[ix]+1; + for(i=0; inColumn; i++){ + int iField = pIdx->aiColumn[i]; + int x; + if( iField==XN_EXPR ){ + pParse->ckBase = regNewData+1; + sqlite3ExprCodeCopy(pParse, pIdx->aColExpr->a[i].pExpr, regIdx+i); + pParse->ckBase = 0; + VdbeComment((v, "%s column %d", pIdx->zName, i)); + }else{ + if( iField==XN_ROWID || iField==pTab->iPKey ){ + x = regNewData; + }else{ + x = iField + regNewData + 1; + } + sqlite3VdbeAddOp2(v, iField<0 ? OP_IntCopy : OP_SCopy, x, regIdx+i); + VdbeComment((v, "%s", iField<0 ? "rowid" : pTab->aCol[iField].zName)); + } + } + sqlite3VdbeAddOp3(v, OP_MakeRecord, regIdx, pIdx->nColumn, aRegIdx[ix]); + VdbeComment((v, "for %s", pIdx->zName)); + + /* In an UPDATE operation, if this index is the PRIMARY KEY index + ** of a WITHOUT ROWID table and there has been no change the + ** primary key, then no collision is possible. The collision detection + ** logic below can all be skipped. */ + if( isUpdate && pPk==pIdx && pkChng==0 ){ + sqlite3VdbeResolveLabel(v, addrUniqueOk); + continue; + } + + /* Find out what action to take in case there is a uniqueness conflict */ + onError = pIdx->onError; + if( onError==OE_None ){ + sqlite3VdbeResolveLabel(v, addrUniqueOk); + continue; /* pIdx is not a UNIQUE index */ + } + if( overrideError!=OE_Default ){ + onError = overrideError; + }else if( onError==OE_Default ){ + onError = OE_Abort; + } + + /* Collision detection may be omitted if all of the following are true: + ** (1) The conflict resolution algorithm is REPLACE + ** (2) The table is a WITHOUT ROWID table + ** (3) There are no secondary indexes on the table + ** (4) No delete triggers need to be fired if there is a conflict + ** (5) No FK constraint counters need to be updated if a conflict occurs. + */ + if( (ix==0 && pIdx->pNext==0) /* Condition 3 */ + && pPk==pIdx /* Condition 2 */ + && onError==OE_Replace /* Condition 1 */ + && ( 0==(db->flags&SQLITE_RecTriggers) || /* Condition 4 */ + 0==sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0)) + && ( 0==(db->flags&SQLITE_ForeignKeys) || /* Condition 5 */ + (0==pTab->pFKey && 0==sqlite3FkReferences(pTab))) + ){ + sqlite3VdbeResolveLabel(v, addrUniqueOk); + continue; + } + + /* Check to see if the new index entry will be unique */ + sqlite3VdbeAddOp4Int(v, OP_NoConflict, iThisCur, addrUniqueOk, + regIdx, pIdx->nKeyCol); VdbeCoverage(v); + + /* Generate code to handle collisions */ + regR = (pIdx==pPk) ? regIdx : sqlite3GetTempRange(pParse, nPkField); + if( isUpdate || onError==OE_Replace ){ + if( HasRowid(pTab) ){ + sqlite3VdbeAddOp2(v, OP_IdxRowid, iThisCur, regR); + /* Conflict only if the rowid of the existing index entry + ** is different from old-rowid */ + if( isUpdate ){ + sqlite3VdbeAddOp3(v, OP_Eq, regR, addrUniqueOk, regOldData); + sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); + VdbeCoverage(v); + } + }else{ + int x; + /* Extract the PRIMARY KEY from the end of the index entry and + ** store it in registers regR..regR+nPk-1 */ + if( pIdx!=pPk ){ + for(i=0; inKeyCol; i++){ + assert( pPk->aiColumn[i]>=0 ); + x = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[i]); + sqlite3VdbeAddOp3(v, OP_Column, iThisCur, x, regR+i); + VdbeComment((v, "%s.%s", pTab->zName, + pTab->aCol[pPk->aiColumn[i]].zName)); + } + } + if( isUpdate ){ + /* If currently processing the PRIMARY KEY of a WITHOUT ROWID + ** table, only conflict if the new PRIMARY KEY values are actually + ** different from the old. + ** + ** For a UNIQUE index, only conflict if the PRIMARY KEY values + ** of the matched index row are different from the original PRIMARY + ** KEY values of this row before the update. */ + int addrJump = sqlite3VdbeCurrentAddr(v)+pPk->nKeyCol; + int op = OP_Ne; + int regCmp = (IsPrimaryKeyIndex(pIdx) ? regIdx : regR); + + for(i=0; inKeyCol; i++){ + char *p4 = (char*)sqlite3LocateCollSeq(pParse, pPk->azColl[i]); + x = pPk->aiColumn[i]; + assert( x>=0 ); + if( i==(pPk->nKeyCol-1) ){ + addrJump = addrUniqueOk; + op = OP_Eq; + } + sqlite3VdbeAddOp4(v, op, + regOldData+1+x, addrJump, regCmp+i, p4, P4_COLLSEQ + ); + sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); + VdbeCoverageIf(v, op==OP_Eq); + VdbeCoverageIf(v, op==OP_Ne); + } + } + } + } + + /* Generate code that executes if the new index entry is not unique */ + assert( onError==OE_Rollback || onError==OE_Abort || onError==OE_Fail + || onError==OE_Ignore || onError==OE_Replace ); + switch( onError ){ + case OE_Rollback: + case OE_Abort: + case OE_Fail: { + sqlite3UniqueConstraint(pParse, onError, pIdx); + break; + } + case OE_Ignore: { + sqlite3VdbeGoto(v, ignoreDest); + break; + } + default: { + Trigger *pTrigger = 0; + assert( onError==OE_Replace ); + sqlite3MultiWrite(pParse); + if( db->flags&SQLITE_RecTriggers ){ + pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); + } + sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur, + regR, nPkField, 0, OE_Replace, + (pIdx==pPk ? ONEPASS_SINGLE : ONEPASS_OFF), iThisCur); + seenReplace = 1; + break; + } + } + sqlite3VdbeResolveLabel(v, addrUniqueOk); + if( regR!=regIdx ) sqlite3ReleaseTempRange(pParse, regR, nPkField); + } + if( ipkTop ){ + sqlite3VdbeGoto(v, ipkTop+1); + sqlite3VdbeJumpHere(v, ipkBottom); + } + + *pbMayReplace = seenReplace; + VdbeModuleComment((v, "END: GenCnstCks(%d)", seenReplace)); +} + +#ifdef SQLITE_ENABLE_NULL_TRIM +/* +** Change the P5 operand on the last opcode (which should be an OP_MakeRecord) +** to be the number of columns in table pTab that must not be NULL-trimmed. +** +** Or if no columns of pTab may be NULL-trimmed, leave P5 at zero. +*/ +SQLITE_PRIVATE void sqlite3SetMakeRecordP5(Vdbe *v, Table *pTab){ + u16 i; + + /* Records with omitted columns are only allowed for schema format + ** version 2 and later (SQLite version 3.1.4, 2005-02-20). */ + if( pTab->pSchema->file_format<2 ) return; + + for(i=pTab->nCol; i>1 && pTab->aCol[i-1].pDflt==0; i--){} + sqlite3VdbeChangeP5(v, i); +} +#endif + +/* +** This routine generates code to finish the INSERT or UPDATE operation +** that was started by a prior call to sqlite3GenerateConstraintChecks. +** A consecutive range of registers starting at regNewData contains the +** rowid and the content to be inserted. +** +** The arguments to this routine should be the same as the first six +** arguments to sqlite3GenerateConstraintChecks. +*/ +SQLITE_PRIVATE void sqlite3CompleteInsertion( + Parse *pParse, /* The parser context */ + Table *pTab, /* the table into which we are inserting */ + int iDataCur, /* Cursor of the canonical data source */ + int iIdxCur, /* First index cursor */ + int regNewData, /* Range of content */ + int *aRegIdx, /* Register used by each index. 0 for unused indices */ + int update_flags, /* True for UPDATE, False for INSERT */ + int appendBias, /* True if this is likely to be an append */ + int useSeekResult /* True to set the USESEEKRESULT flag on OP_[Idx]Insert */ +){ + Vdbe *v; /* Prepared statements under construction */ + Index *pIdx; /* An index being inserted or updated */ + u8 pik_flags; /* flag values passed to the btree insert */ + int regData; /* Content registers (after the rowid) */ + int regRec; /* Register holding assembled record for the table */ + int i; /* Loop counter */ + u8 bAffinityDone = 0; /* True if OP_Affinity has been run already */ + + assert( update_flags==0 + || update_flags==OPFLAG_ISUPDATE + || update_flags==(OPFLAG_ISUPDATE|OPFLAG_SAVEPOSITION) + ); + + v = sqlite3GetVdbe(pParse); + assert( v!=0 ); + assert( pTab->pSelect==0 ); /* This table is not a VIEW */ + for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ + if( aRegIdx[i]==0 ) continue; + bAffinityDone = 1; + if( pIdx->pPartIdxWhere ){ + sqlite3VdbeAddOp2(v, OP_IsNull, aRegIdx[i], sqlite3VdbeCurrentAddr(v)+2); + VdbeCoverage(v); + } + pik_flags = (useSeekResult ? OPFLAG_USESEEKRESULT : 0); + if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){ + assert( pParse->nested==0 ); + pik_flags |= OPFLAG_NCHANGE; + pik_flags |= (update_flags & OPFLAG_SAVEPOSITION); +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + if( update_flags==0 ){ + sqlite3VdbeAddOp4(v, OP_InsertInt, + iIdxCur+i, aRegIdx[i], 0, (char*)pTab, P4_TABLE + ); + sqlite3VdbeChangeP5(v, OPFLAG_ISNOOP); + } +#endif + } + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iIdxCur+i, aRegIdx[i], + aRegIdx[i]+1, + pIdx->uniqNotNull ? pIdx->nKeyCol: pIdx->nColumn); + sqlite3VdbeChangeP5(v, pik_flags); + } + if( !HasRowid(pTab) ) return; + regData = regNewData + 1; + regRec = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regData, pTab->nCol, regRec); + sqlite3SetMakeRecordP5(v, pTab); + if( !bAffinityDone ){ + sqlite3TableAffinity(v, pTab, 0); + sqlite3ExprCacheAffinityChange(pParse, regData, pTab->nCol); + } + if( pParse->nested ){ + pik_flags = 0; + }else{ + pik_flags = OPFLAG_NCHANGE; + pik_flags |= (update_flags?update_flags:OPFLAG_LASTROWID); + } + if( appendBias ){ + pik_flags |= OPFLAG_APPEND; + } + if( useSeekResult ){ + pik_flags |= OPFLAG_USESEEKRESULT; + } + sqlite3VdbeAddOp3(v, OP_Insert, iDataCur, regRec, regNewData); + if( !pParse->nested ){ + sqlite3VdbeAppendP4(v, pTab, P4_TABLE); + } + sqlite3VdbeChangeP5(v, pik_flags); +} + +/* +** Allocate cursors for the pTab table and all its indices and generate +** code to open and initialized those cursors. +** +** The cursor for the object that contains the complete data (normally +** the table itself, but the PRIMARY KEY index in the case of a WITHOUT +** ROWID table) is returned in *piDataCur. The first index cursor is +** returned in *piIdxCur. The number of indices is returned. +** +** Use iBase as the first cursor (either the *piDataCur for rowid tables +** or the first index for WITHOUT ROWID tables) if it is non-negative. +** If iBase is negative, then allocate the next available cursor. +** +** For a rowid table, *piDataCur will be exactly one less than *piIdxCur. +** For a WITHOUT ROWID table, *piDataCur will be somewhere in the range +** of *piIdxCurs, depending on where the PRIMARY KEY index appears on the +** pTab->pIndex list. +** +** If pTab is a virtual table, then this routine is a no-op and the +** *piDataCur and *piIdxCur values are left uninitialized. +*/ +SQLITE_PRIVATE int sqlite3OpenTableAndIndices( + Parse *pParse, /* Parsing context */ + Table *pTab, /* Table to be opened */ + int op, /* OP_OpenRead or OP_OpenWrite */ + u8 p5, /* P5 value for OP_Open* opcodes (except on WITHOUT ROWID) */ + int iBase, /* Use this for the table cursor, if there is one */ + u8 *aToOpen, /* If not NULL: boolean for each table and index */ + int *piDataCur, /* Write the database source cursor number here */ + int *piIdxCur /* Write the first index cursor number here */ +){ + int i; + int iDb; + int iDataCur; + Index *pIdx; + Vdbe *v; + + assert( op==OP_OpenRead || op==OP_OpenWrite ); + assert( op==OP_OpenWrite || p5==0 ); + if( IsVirtual(pTab) ){ + /* This routine is a no-op for virtual tables. Leave the output + ** variables *piDataCur and *piIdxCur uninitialized so that valgrind + ** can detect if they are used by mistake in the caller. */ + return 0; + } + iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + v = sqlite3GetVdbe(pParse); + assert( v!=0 ); + if( iBase<0 ) iBase = pParse->nTab; + iDataCur = iBase++; + if( piDataCur ) *piDataCur = iDataCur; + if( HasRowid(pTab) && (aToOpen==0 || aToOpen[0]) ){ + sqlite3OpenTable(pParse, iDataCur, iDb, pTab, op); + }else{ + sqlite3TableLock(pParse, iDb, pTab->tnum, op==OP_OpenWrite, pTab->zName); + } + if( piIdxCur ) *piIdxCur = iBase; + for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ + int iIdxCur = iBase++; + assert( pIdx->pSchema==pTab->pSchema ); + if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){ + if( piDataCur ) *piDataCur = iIdxCur; + p5 = 0; + } + if( aToOpen==0 || aToOpen[i+1] ){ + sqlite3VdbeAddOp3(v, op, iIdxCur, pIdx->tnum, iDb); + sqlite3VdbeSetP4KeyInfo(pParse, pIdx); + sqlite3VdbeChangeP5(v, p5); + VdbeComment((v, "%s", pIdx->zName)); + } + } + if( iBase>pParse->nTab ) pParse->nTab = iBase; + return i; +} + + +#ifdef SQLITE_TEST +/* +** The following global variable is incremented whenever the +** transfer optimization is used. This is used for testing +** purposes only - to make sure the transfer optimization really +** is happening when it is supposed to. +*/ +SQLITE_API int sqlite3_xferopt_count; +#endif /* SQLITE_TEST */ + + +#ifndef SQLITE_OMIT_XFER_OPT +/* +** Check to see if index pSrc is compatible as a source of data +** for index pDest in an insert transfer optimization. The rules +** for a compatible index: +** +** * The index is over the same set of columns +** * The same DESC and ASC markings occurs on all columns +** * The same onError processing (OE_Abort, OE_Ignore, etc) +** * The same collating sequence on each column +** * The index has the exact same WHERE clause +*/ +static int xferCompatibleIndex(Index *pDest, Index *pSrc){ + int i; + assert( pDest && pSrc ); + assert( pDest->pTable!=pSrc->pTable ); + if( pDest->nKeyCol!=pSrc->nKeyCol ){ + return 0; /* Different number of columns */ + } + if( pDest->onError!=pSrc->onError ){ + return 0; /* Different conflict resolution strategies */ + } + for(i=0; inKeyCol; i++){ + if( pSrc->aiColumn[i]!=pDest->aiColumn[i] ){ + return 0; /* Different columns indexed */ + } + if( pSrc->aiColumn[i]==XN_EXPR ){ + assert( pSrc->aColExpr!=0 && pDest->aColExpr!=0 ); + if( sqlite3ExprCompare(pSrc->aColExpr->a[i].pExpr, + pDest->aColExpr->a[i].pExpr, -1)!=0 ){ + return 0; /* Different expressions in the index */ + } + } + if( pSrc->aSortOrder[i]!=pDest->aSortOrder[i] ){ + return 0; /* Different sort orders */ + } + if( sqlite3_stricmp(pSrc->azColl[i],pDest->azColl[i])!=0 ){ + return 0; /* Different collating sequences */ + } + } + if( sqlite3ExprCompare(pSrc->pPartIdxWhere, pDest->pPartIdxWhere, -1) ){ + return 0; /* Different WHERE clauses */ + } + + /* If no test above fails then the indices must be compatible */ + return 1; +} + +/* +** Attempt the transfer optimization on INSERTs of the form +** +** INSERT INTO tab1 SELECT * FROM tab2; +** +** The xfer optimization transfers raw records from tab2 over to tab1. +** Columns are not decoded and reassembled, which greatly improves +** performance. Raw index records are transferred in the same way. +** +** The xfer optimization is only attempted if tab1 and tab2 are compatible. +** There are lots of rules for determining compatibility - see comments +** embedded in the code for details. +** +** This routine returns TRUE if the optimization is guaranteed to be used. +** Sometimes the xfer optimization will only work if the destination table +** is empty - a factor that can only be determined at run-time. In that +** case, this routine generates code for the xfer optimization but also +** does a test to see if the destination table is empty and jumps over the +** xfer optimization code if the test fails. In that case, this routine +** returns FALSE so that the caller will know to go ahead and generate +** an unoptimized transfer. This routine also returns FALSE if there +** is no chance that the xfer optimization can be applied. +** +** This optimization is particularly useful at making VACUUM run faster. +*/ +static int xferOptimization( + Parse *pParse, /* Parser context */ + Table *pDest, /* The table we are inserting into */ + Select *pSelect, /* A SELECT statement to use as the data source */ + int onError, /* How to handle constraint errors */ + int iDbDest /* The database of pDest */ +){ + sqlite3 *db = pParse->db; + ExprList *pEList; /* The result set of the SELECT */ + Table *pSrc; /* The table in the FROM clause of SELECT */ + Index *pSrcIdx, *pDestIdx; /* Source and destination indices */ + struct SrcList_item *pItem; /* An element of pSelect->pSrc */ + int i; /* Loop counter */ + int iDbSrc; /* The database of pSrc */ + int iSrc, iDest; /* Cursors from source and destination */ + int addr1, addr2; /* Loop addresses */ + int emptyDestTest = 0; /* Address of test for empty pDest */ + int emptySrcTest = 0; /* Address of test for empty pSrc */ + Vdbe *v; /* The VDBE we are building */ + int regAutoinc; /* Memory register used by AUTOINC */ + int destHasUniqueIdx = 0; /* True if pDest has a UNIQUE index */ + int regData, regRowid; /* Registers holding data and rowid */ + + if( pSelect==0 ){ + return 0; /* Must be of the form INSERT INTO ... SELECT ... */ + } + if( pParse->pWith || pSelect->pWith ){ + /* Do not attempt to process this query if there are an WITH clauses + ** attached to it. Proceeding may generate a false "no such table: xxx" + ** error if pSelect reads from a CTE named "xxx". */ + return 0; + } + if( sqlite3TriggerList(pParse, pDest) ){ + return 0; /* tab1 must not have triggers */ + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( pDest->tabFlags & TF_Virtual ){ + return 0; /* tab1 must not be a virtual table */ + } +#endif + if( onError==OE_Default ){ + if( pDest->iPKey>=0 ) onError = pDest->keyConf; + if( onError==OE_Default ) onError = OE_Abort; + } + assert(pSelect->pSrc); /* allocated even if there is no FROM clause */ + if( pSelect->pSrc->nSrc!=1 ){ + return 0; /* FROM clause must have exactly one term */ + } + if( pSelect->pSrc->a[0].pSelect ){ + return 0; /* FROM clause cannot contain a subquery */ + } + if( pSelect->pWhere ){ + return 0; /* SELECT may not have a WHERE clause */ + } + if( pSelect->pOrderBy ){ + return 0; /* SELECT may not have an ORDER BY clause */ + } + /* Do not need to test for a HAVING clause. If HAVING is present but + ** there is no ORDER BY, we will get an error. */ + if( pSelect->pGroupBy ){ + return 0; /* SELECT may not have a GROUP BY clause */ + } + if( pSelect->pLimit ){ + return 0; /* SELECT may not have a LIMIT clause */ + } + assert( pSelect->pOffset==0 ); /* Must be so if pLimit==0 */ + if( pSelect->pPrior ){ + return 0; /* SELECT may not be a compound query */ + } + if( pSelect->selFlags & SF_Distinct ){ + return 0; /* SELECT may not be DISTINCT */ + } + pEList = pSelect->pEList; + assert( pEList!=0 ); + if( pEList->nExpr!=1 ){ + return 0; /* The result set must have exactly one column */ + } + assert( pEList->a[0].pExpr ); + if( pEList->a[0].pExpr->op!=TK_ASTERISK ){ + return 0; /* The result set must be the special operator "*" */ + } + + /* At this point we have established that the statement is of the + ** correct syntactic form to participate in this optimization. Now + ** we have to check the semantics. + */ + pItem = pSelect->pSrc->a; + pSrc = sqlite3LocateTableItem(pParse, 0, pItem); + if( pSrc==0 ){ + return 0; /* FROM clause does not contain a real table */ + } + if( pSrc==pDest ){ + return 0; /* tab1 and tab2 may not be the same table */ + } + if( HasRowid(pDest)!=HasRowid(pSrc) ){ + return 0; /* source and destination must both be WITHOUT ROWID or not */ + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( pSrc->tabFlags & TF_Virtual ){ + return 0; /* tab2 must not be a virtual table */ + } +#endif + if( pSrc->pSelect ){ + return 0; /* tab2 may not be a view */ + } + if( pDest->nCol!=pSrc->nCol ){ + return 0; /* Number of columns must be the same in tab1 and tab2 */ + } + if( pDest->iPKey!=pSrc->iPKey ){ + return 0; /* Both tables must have the same INTEGER PRIMARY KEY */ + } + for(i=0; inCol; i++){ + Column *pDestCol = &pDest->aCol[i]; + Column *pSrcCol = &pSrc->aCol[i]; +#ifdef SQLITE_ENABLE_HIDDEN_COLUMNS + if( (db->flags & SQLITE_Vacuum)==0 + && (pDestCol->colFlags | pSrcCol->colFlags) & COLFLAG_HIDDEN + ){ + return 0; /* Neither table may have __hidden__ columns */ + } +#endif + if( pDestCol->affinity!=pSrcCol->affinity ){ + return 0; /* Affinity must be the same on all columns */ + } + if( sqlite3_stricmp(pDestCol->zColl, pSrcCol->zColl)!=0 ){ + return 0; /* Collating sequence must be the same on all columns */ + } + if( pDestCol->notNull && !pSrcCol->notNull ){ + return 0; /* tab2 must be NOT NULL if tab1 is */ + } + /* Default values for second and subsequent columns need to match. */ + if( i>0 ){ + assert( pDestCol->pDflt==0 || pDestCol->pDflt->op==TK_SPAN ); + assert( pSrcCol->pDflt==0 || pSrcCol->pDflt->op==TK_SPAN ); + if( (pDestCol->pDflt==0)!=(pSrcCol->pDflt==0) + || (pDestCol->pDflt && strcmp(pDestCol->pDflt->u.zToken, + pSrcCol->pDflt->u.zToken)!=0) + ){ + return 0; /* Default values must be the same for all columns */ + } + } + } + for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){ + if( IsUniqueIndex(pDestIdx) ){ + destHasUniqueIdx = 1; + } + for(pSrcIdx=pSrc->pIndex; pSrcIdx; pSrcIdx=pSrcIdx->pNext){ + if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break; + } + if( pSrcIdx==0 ){ + return 0; /* pDestIdx has no corresponding index in pSrc */ + } + } +#ifndef SQLITE_OMIT_CHECK + if( pDest->pCheck && sqlite3ExprListCompare(pSrc->pCheck,pDest->pCheck,-1) ){ + return 0; /* Tables have different CHECK constraints. Ticket #2252 */ + } +#endif +#ifndef SQLITE_OMIT_FOREIGN_KEY + /* Disallow the transfer optimization if the destination table constains + ** any foreign key constraints. This is more restrictive than necessary. + ** But the main beneficiary of the transfer optimization is the VACUUM + ** command, and the VACUUM command disables foreign key constraints. So + ** the extra complication to make this rule less restrictive is probably + ** not worth the effort. Ticket [6284df89debdfa61db8073e062908af0c9b6118e] + */ + if( (db->flags & SQLITE_ForeignKeys)!=0 && pDest->pFKey!=0 ){ + return 0; + } +#endif + if( (db->flags & SQLITE_CountRows)!=0 ){ + return 0; /* xfer opt does not play well with PRAGMA count_changes */ + } + + /* If we get this far, it means that the xfer optimization is at + ** least a possibility, though it might only work if the destination + ** table (tab1) is initially empty. + */ +#ifdef SQLITE_TEST + sqlite3_xferopt_count++; +#endif + iDbSrc = sqlite3SchemaToIndex(db, pSrc->pSchema); + v = sqlite3GetVdbe(pParse); + sqlite3CodeVerifySchema(pParse, iDbSrc); + iSrc = pParse->nTab++; + iDest = pParse->nTab++; + regAutoinc = autoIncBegin(pParse, iDbDest, pDest); + regData = sqlite3GetTempReg(pParse); + regRowid = sqlite3GetTempReg(pParse); + sqlite3OpenTable(pParse, iDest, iDbDest, pDest, OP_OpenWrite); + assert( HasRowid(pDest) || destHasUniqueIdx ); + if( (db->flags & SQLITE_Vacuum)==0 && ( + (pDest->iPKey<0 && pDest->pIndex!=0) /* (1) */ + || destHasUniqueIdx /* (2) */ + || (onError!=OE_Abort && onError!=OE_Rollback) /* (3) */ + )){ + /* In some circumstances, we are able to run the xfer optimization + ** only if the destination table is initially empty. Unless the + ** SQLITE_Vacuum flag is set, this block generates code to make + ** that determination. If SQLITE_Vacuum is set, then the destination + ** table is always empty. + ** + ** Conditions under which the destination must be empty: + ** + ** (1) There is no INTEGER PRIMARY KEY but there are indices. + ** (If the destination is not initially empty, the rowid fields + ** of index entries might need to change.) + ** + ** (2) The destination has a unique index. (The xfer optimization + ** is unable to test uniqueness.) + ** + ** (3) onError is something other than OE_Abort and OE_Rollback. + */ + addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iDest, 0); VdbeCoverage(v); + emptyDestTest = sqlite3VdbeAddOp0(v, OP_Goto); + sqlite3VdbeJumpHere(v, addr1); + } + if( HasRowid(pSrc) ){ + u8 insFlags; + sqlite3OpenTable(pParse, iSrc, iDbSrc, pSrc, OP_OpenRead); + emptySrcTest = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v); + if( pDest->iPKey>=0 ){ + addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); + addr2 = sqlite3VdbeAddOp3(v, OP_NotExists, iDest, 0, regRowid); + VdbeCoverage(v); + sqlite3RowidConstraint(pParse, onError, pDest); + sqlite3VdbeJumpHere(v, addr2); + autoIncStep(pParse, regAutoinc, regRowid); + }else if( pDest->pIndex==0 ){ + addr1 = sqlite3VdbeAddOp2(v, OP_NewRowid, iDest, regRowid); + }else{ + addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); + assert( (pDest->tabFlags & TF_Autoincrement)==0 ); + } + sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1); + if( db->flags & SQLITE_Vacuum ){ + sqlite3VdbeAddOp3(v, OP_Last, iDest, 0, -1); + insFlags = OPFLAG_NCHANGE|OPFLAG_LASTROWID| + OPFLAG_APPEND|OPFLAG_USESEEKRESULT; + }else{ + insFlags = OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND; + } + sqlite3VdbeAddOp4(v, OP_Insert, iDest, regData, regRowid, + (char*)pDest, P4_TABLE); + sqlite3VdbeChangeP5(v, insFlags); + sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1); VdbeCoverage(v); + sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); + sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); + }else{ + sqlite3TableLock(pParse, iDbDest, pDest->tnum, 1, pDest->zName); + sqlite3TableLock(pParse, iDbSrc, pSrc->tnum, 0, pSrc->zName); + } + for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){ + u8 idxInsFlags = 0; + for(pSrcIdx=pSrc->pIndex; ALWAYS(pSrcIdx); pSrcIdx=pSrcIdx->pNext){ + if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break; + } + assert( pSrcIdx ); + sqlite3VdbeAddOp3(v, OP_OpenRead, iSrc, pSrcIdx->tnum, iDbSrc); + sqlite3VdbeSetP4KeyInfo(pParse, pSrcIdx); + VdbeComment((v, "%s", pSrcIdx->zName)); + sqlite3VdbeAddOp3(v, OP_OpenWrite, iDest, pDestIdx->tnum, iDbDest); + sqlite3VdbeSetP4KeyInfo(pParse, pDestIdx); + sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR); + VdbeComment((v, "%s", pDestIdx->zName)); + addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1); + if( db->flags & SQLITE_Vacuum ){ + /* This INSERT command is part of a VACUUM operation, which guarantees + ** that the destination table is empty. If all indexed columns use + ** collation sequence BINARY, then it can also be assumed that the + ** index will be populated by inserting keys in strictly sorted + ** order. In this case, instead of seeking within the b-tree as part + ** of every OP_IdxInsert opcode, an OP_Last is added before the + ** OP_IdxInsert to seek to the point within the b-tree where each key + ** should be inserted. This is faster. + ** + ** If any of the indexed columns use a collation sequence other than + ** BINARY, this optimization is disabled. This is because the user + ** might change the definition of a collation sequence and then run + ** a VACUUM command. In that case keys may not be written in strictly + ** sorted order. */ + for(i=0; inColumn; i++){ + const char *zColl = pSrcIdx->azColl[i]; + assert( sqlite3_stricmp(sqlite3StrBINARY, zColl)!=0 + || sqlite3StrBINARY==zColl ); + if( sqlite3_stricmp(sqlite3StrBINARY, zColl) ) break; + } + if( i==pSrcIdx->nColumn ){ + idxInsFlags = OPFLAG_USESEEKRESULT; + sqlite3VdbeAddOp3(v, OP_Last, iDest, 0, -1); + } + } + if( !HasRowid(pSrc) && pDestIdx->idxType==2 ){ + idxInsFlags |= OPFLAG_NCHANGE; + } + sqlite3VdbeAddOp2(v, OP_IdxInsert, iDest, regData); + sqlite3VdbeChangeP5(v, idxInsFlags|OPFLAG_APPEND); + sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1+1); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addr1); + sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); + sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); + } + if( emptySrcTest ) sqlite3VdbeJumpHere(v, emptySrcTest); + sqlite3ReleaseTempReg(pParse, regRowid); + sqlite3ReleaseTempReg(pParse, regData); + if( emptyDestTest ){ + sqlite3AutoincrementEnd(pParse); + sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_OK, 0); + sqlite3VdbeJumpHere(v, emptyDestTest); + sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); + return 0; + }else{ + return 1; + } +} +#endif /* SQLITE_OMIT_XFER_OPT */ + +/************** End of insert.c **********************************************/ +/************** Begin file legacy.c ******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Main file for the SQLite library. The routines in this file +** implement the programmer interface to the library. Routines in +** other files are for internal use by SQLite and should not be +** accessed by users of the library. +*/ + +/* #include "sqliteInt.h" */ + +/* +** Execute SQL code. Return one of the SQLITE_ success/failure +** codes. Also write an error message into memory obtained from +** malloc() and make *pzErrMsg point to that message. +** +** If the SQL is a query, then for each row in the query result +** the xCallback() function is called. pArg becomes the first +** argument to xCallback(). If xCallback=NULL then no callback +** is invoked, even for queries. +*/ +SQLITE_API int sqlite3_exec( + sqlite3 *db, /* The database on which the SQL executes */ + const char *zSql, /* The SQL to be executed */ + sqlite3_callback xCallback, /* Invoke this callback routine */ + void *pArg, /* First argument to xCallback() */ + char **pzErrMsg /* Write error messages here */ +){ + int rc = SQLITE_OK; /* Return code */ + const char *zLeftover; /* Tail of unprocessed SQL */ + sqlite3_stmt *pStmt = 0; /* The current SQL statement */ + char **azCols = 0; /* Names of result columns */ + int callbackIsInit; /* True if callback data is initialized */ + + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; + if( zSql==0 ) zSql = ""; + + sqlite3_mutex_enter(db->mutex); + sqlite3Error(db, SQLITE_OK); + while( rc==SQLITE_OK && zSql[0] ){ + int nCol; + char **azVals = 0; + + pStmt = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &zLeftover); + assert( rc==SQLITE_OK || pStmt==0 ); + if( rc!=SQLITE_OK ){ + continue; + } + if( !pStmt ){ + /* this happens for a comment or white-space */ + zSql = zLeftover; + continue; + } + + callbackIsInit = 0; + nCol = sqlite3_column_count(pStmt); + + while( 1 ){ + int i; + rc = sqlite3_step(pStmt); + + /* Invoke the callback function if required */ + if( xCallback && (SQLITE_ROW==rc || + (SQLITE_DONE==rc && !callbackIsInit + && db->flags&SQLITE_NullCallback)) ){ + if( !callbackIsInit ){ + azCols = sqlite3DbMallocZero(db, 2*nCol*sizeof(const char*) + 1); + if( azCols==0 ){ + goto exec_out; + } + for(i=0; ierrMask)==rc ); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/************** End of legacy.c **********************************************/ +/************** Begin file loadext.c *****************************************/ +/* +** 2006 June 7 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code used to dynamically load extensions into +** the SQLite library. +*/ + +#ifndef SQLITE_CORE + #define SQLITE_CORE 1 /* Disable the API redefinition in sqlite3ext.h */ +#endif +/************** Include sqlite3ext.h in the middle of loadext.c **************/ +/************** Begin file sqlite3ext.h **************************************/ +/* +** 2006 June 7 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the SQLite interface for use by +** shared libraries that want to be imported as extensions into +** an SQLite instance. Shared libraries that intend to be loaded +** as extensions by SQLite should #include this file instead of +** sqlite3.h. +*/ +#ifndef SQLITE3EXT_H +#define SQLITE3EXT_H +/* #include "sqlite3.h" */ + +/* +** The following structure holds pointers to all of the SQLite API +** routines. +** +** WARNING: In order to maintain backwards compatibility, add new +** interfaces to the end of this structure only. If you insert new +** interfaces in the middle of this structure, then older different +** versions of SQLite will not be able to load each other's shared +** libraries! +*/ +struct sqlite3_api_routines { + void * (*aggregate_context)(sqlite3_context*,int nBytes); + int (*aggregate_count)(sqlite3_context*); + int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*)); + int (*bind_double)(sqlite3_stmt*,int,double); + int (*bind_int)(sqlite3_stmt*,int,int); + int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64); + int (*bind_null)(sqlite3_stmt*,int); + int (*bind_parameter_count)(sqlite3_stmt*); + int (*bind_parameter_index)(sqlite3_stmt*,const char*zName); + const char * (*bind_parameter_name)(sqlite3_stmt*,int); + int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*)); + int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*)); + int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*); + int (*busy_handler)(sqlite3*,int(*)(void*,int),void*); + int (*busy_timeout)(sqlite3*,int ms); + int (*changes)(sqlite3*); + int (*close)(sqlite3*); + int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*, + int eTextRep,const char*)); + int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*, + int eTextRep,const void*)); + const void * (*column_blob)(sqlite3_stmt*,int iCol); + int (*column_bytes)(sqlite3_stmt*,int iCol); + int (*column_bytes16)(sqlite3_stmt*,int iCol); + int (*column_count)(sqlite3_stmt*pStmt); + const char * (*column_database_name)(sqlite3_stmt*,int); + const void * (*column_database_name16)(sqlite3_stmt*,int); + const char * (*column_decltype)(sqlite3_stmt*,int i); + const void * (*column_decltype16)(sqlite3_stmt*,int); + double (*column_double)(sqlite3_stmt*,int iCol); + int (*column_int)(sqlite3_stmt*,int iCol); + sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol); + const char * (*column_name)(sqlite3_stmt*,int); + const void * (*column_name16)(sqlite3_stmt*,int); + const char * (*column_origin_name)(sqlite3_stmt*,int); + const void * (*column_origin_name16)(sqlite3_stmt*,int); + const char * (*column_table_name)(sqlite3_stmt*,int); + const void * (*column_table_name16)(sqlite3_stmt*,int); + const unsigned char * (*column_text)(sqlite3_stmt*,int iCol); + const void * (*column_text16)(sqlite3_stmt*,int iCol); + int (*column_type)(sqlite3_stmt*,int iCol); + sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol); + void * (*commit_hook)(sqlite3*,int(*)(void*),void*); + int (*complete)(const char*sql); + int (*complete16)(const void*sql); + int (*create_collation)(sqlite3*,const char*,int,void*, + int(*)(void*,int,const void*,int,const void*)); + int (*create_collation16)(sqlite3*,const void*,int,void*, + int(*)(void*,int,const void*,int,const void*)); + int (*create_function)(sqlite3*,const char*,int,int,void*, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*)); + int (*create_function16)(sqlite3*,const void*,int,int,void*, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*)); + int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*); + int (*data_count)(sqlite3_stmt*pStmt); + sqlite3 * (*db_handle)(sqlite3_stmt*); + int (*declare_vtab)(sqlite3*,const char*); + int (*enable_shared_cache)(int); + int (*errcode)(sqlite3*db); + const char * (*errmsg)(sqlite3*); + const void * (*errmsg16)(sqlite3*); + int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**); + int (*expired)(sqlite3_stmt*); + int (*finalize)(sqlite3_stmt*pStmt); + void (*free)(void*); + void (*free_table)(char**result); + int (*get_autocommit)(sqlite3*); + void * (*get_auxdata)(sqlite3_context*,int); + int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**); + int (*global_recover)(void); + void (*interruptx)(sqlite3*); + sqlite_int64 (*last_insert_rowid)(sqlite3*); + const char * (*libversion)(void); + int (*libversion_number)(void); + void *(*malloc)(int); + char * (*mprintf)(const char*,...); + int (*open)(const char*,sqlite3**); + int (*open16)(const void*,sqlite3**); + int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); + int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); + void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*); + void (*progress_handler)(sqlite3*,int,int(*)(void*),void*); + void *(*realloc)(void*,int); + int (*reset)(sqlite3_stmt*pStmt); + void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*)); + void (*result_double)(sqlite3_context*,double); + void (*result_error)(sqlite3_context*,const char*,int); + void (*result_error16)(sqlite3_context*,const void*,int); + void (*result_int)(sqlite3_context*,int); + void (*result_int64)(sqlite3_context*,sqlite_int64); + void (*result_null)(sqlite3_context*); + void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*)); + void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*)); + void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*)); + void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*)); + void (*result_value)(sqlite3_context*,sqlite3_value*); + void * (*rollback_hook)(sqlite3*,void(*)(void*),void*); + int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*, + const char*,const char*),void*); + void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*)); + char * (*snprintf)(int,char*,const char*,...); + int (*step)(sqlite3_stmt*); + int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*, + char const**,char const**,int*,int*,int*); + void (*thread_cleanup)(void); + int (*total_changes)(sqlite3*); + void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*); + int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*); + void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*, + sqlite_int64),void*); + void * (*user_data)(sqlite3_context*); + const void * (*value_blob)(sqlite3_value*); + int (*value_bytes)(sqlite3_value*); + int (*value_bytes16)(sqlite3_value*); + double (*value_double)(sqlite3_value*); + int (*value_int)(sqlite3_value*); + sqlite_int64 (*value_int64)(sqlite3_value*); + int (*value_numeric_type)(sqlite3_value*); + const unsigned char * (*value_text)(sqlite3_value*); + const void * (*value_text16)(sqlite3_value*); + const void * (*value_text16be)(sqlite3_value*); + const void * (*value_text16le)(sqlite3_value*); + int (*value_type)(sqlite3_value*); + char *(*vmprintf)(const char*,va_list); + /* Added ??? */ + int (*overload_function)(sqlite3*, const char *zFuncName, int nArg); + /* Added by 3.3.13 */ + int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); + int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); + int (*clear_bindings)(sqlite3_stmt*); + /* Added by 3.4.1 */ + int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*, + void (*xDestroy)(void *)); + /* Added by 3.5.0 */ + int (*bind_zeroblob)(sqlite3_stmt*,int,int); + int (*blob_bytes)(sqlite3_blob*); + int (*blob_close)(sqlite3_blob*); + int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64, + int,sqlite3_blob**); + int (*blob_read)(sqlite3_blob*,void*,int,int); + int (*blob_write)(sqlite3_blob*,const void*,int,int); + int (*create_collation_v2)(sqlite3*,const char*,int,void*, + int(*)(void*,int,const void*,int,const void*), + void(*)(void*)); + int (*file_control)(sqlite3*,const char*,int,void*); + sqlite3_int64 (*memory_highwater)(int); + sqlite3_int64 (*memory_used)(void); + sqlite3_mutex *(*mutex_alloc)(int); + void (*mutex_enter)(sqlite3_mutex*); + void (*mutex_free)(sqlite3_mutex*); + void (*mutex_leave)(sqlite3_mutex*); + int (*mutex_try)(sqlite3_mutex*); + int (*open_v2)(const char*,sqlite3**,int,const char*); + int (*release_memory)(int); + void (*result_error_nomem)(sqlite3_context*); + void (*result_error_toobig)(sqlite3_context*); + int (*sleep)(int); + void (*soft_heap_limit)(int); + sqlite3_vfs *(*vfs_find)(const char*); + int (*vfs_register)(sqlite3_vfs*,int); + int (*vfs_unregister)(sqlite3_vfs*); + int (*xthreadsafe)(void); + void (*result_zeroblob)(sqlite3_context*,int); + void (*result_error_code)(sqlite3_context*,int); + int (*test_control)(int, ...); + void (*randomness)(int,void*); + sqlite3 *(*context_db_handle)(sqlite3_context*); + int (*extended_result_codes)(sqlite3*,int); + int (*limit)(sqlite3*,int,int); + sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*); + const char *(*sql)(sqlite3_stmt*); + int (*status)(int,int*,int*,int); + int (*backup_finish)(sqlite3_backup*); + sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*); + int (*backup_pagecount)(sqlite3_backup*); + int (*backup_remaining)(sqlite3_backup*); + int (*backup_step)(sqlite3_backup*,int); + const char *(*compileoption_get)(int); + int (*compileoption_used)(const char*); + int (*create_function_v2)(sqlite3*,const char*,int,int,void*, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*), + void(*xDestroy)(void*)); + int (*db_config)(sqlite3*,int,...); + sqlite3_mutex *(*db_mutex)(sqlite3*); + int (*db_status)(sqlite3*,int,int*,int*,int); + int (*extended_errcode)(sqlite3*); + void (*log)(int,const char*,...); + sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64); + const char *(*sourceid)(void); + int (*stmt_status)(sqlite3_stmt*,int,int); + int (*strnicmp)(const char*,const char*,int); + int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*); + int (*wal_autocheckpoint)(sqlite3*,int); + int (*wal_checkpoint)(sqlite3*,const char*); + void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*); + int (*blob_reopen)(sqlite3_blob*,sqlite3_int64); + int (*vtab_config)(sqlite3*,int op,...); + int (*vtab_on_conflict)(sqlite3*); + /* Version 3.7.16 and later */ + int (*close_v2)(sqlite3*); + const char *(*db_filename)(sqlite3*,const char*); + int (*db_readonly)(sqlite3*,const char*); + int (*db_release_memory)(sqlite3*); + const char *(*errstr)(int); + int (*stmt_busy)(sqlite3_stmt*); + int (*stmt_readonly)(sqlite3_stmt*); + int (*stricmp)(const char*,const char*); + int (*uri_boolean)(const char*,const char*,int); + sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64); + const char *(*uri_parameter)(const char*,const char*); + char *(*vsnprintf)(int,char*,const char*,va_list); + int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*); + /* Version 3.8.7 and later */ + int (*auto_extension)(void(*)(void)); + int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64, + void(*)(void*)); + int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64, + void(*)(void*),unsigned char); + int (*cancel_auto_extension)(void(*)(void)); + int (*load_extension)(sqlite3*,const char*,const char*,char**); + void *(*malloc64)(sqlite3_uint64); + sqlite3_uint64 (*msize)(void*); + void *(*realloc64)(void*,sqlite3_uint64); + void (*reset_auto_extension)(void); + void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64, + void(*)(void*)); + void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64, + void(*)(void*), unsigned char); + int (*strglob)(const char*,const char*); + /* Version 3.8.11 and later */ + sqlite3_value *(*value_dup)(const sqlite3_value*); + void (*value_free)(sqlite3_value*); + int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64); + int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64); + /* Version 3.9.0 and later */ + unsigned int (*value_subtype)(sqlite3_value*); + void (*result_subtype)(sqlite3_context*,unsigned int); + /* Version 3.10.0 and later */ + int (*status64)(int,sqlite3_int64*,sqlite3_int64*,int); + int (*strlike)(const char*,const char*,unsigned int); + int (*db_cacheflush)(sqlite3*); + /* Version 3.12.0 and later */ + int (*system_errno)(sqlite3*); + /* Version 3.14.0 and later */ + int (*trace_v2)(sqlite3*,unsigned,int(*)(unsigned,void*,void*,void*),void*); + char *(*expanded_sql)(sqlite3_stmt*); +}; + +/* +** This is the function signature used for all extension entry points. It +** is also defined in the file "loadext.c". +*/ +typedef int (*sqlite3_loadext_entry)( + sqlite3 *db, /* Handle to the database. */ + char **pzErrMsg, /* Used to set error string on failure. */ + const sqlite3_api_routines *pThunk /* Extension API function pointers. */ +); + +/* +** The following macros redefine the API routines so that they are +** redirected through the global sqlite3_api structure. +** +** This header file is also used by the loadext.c source file +** (part of the main SQLite library - not an extension) so that +** it can get access to the sqlite3_api_routines structure +** definition. But the main library does not want to redefine +** the API. So the redefinition macros are only valid if the +** SQLITE_CORE macros is undefined. +*/ +#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) +#define sqlite3_aggregate_context sqlite3_api->aggregate_context +#ifndef SQLITE_OMIT_DEPRECATED +#define sqlite3_aggregate_count sqlite3_api->aggregate_count +#endif +#define sqlite3_bind_blob sqlite3_api->bind_blob +#define sqlite3_bind_double sqlite3_api->bind_double +#define sqlite3_bind_int sqlite3_api->bind_int +#define sqlite3_bind_int64 sqlite3_api->bind_int64 +#define sqlite3_bind_null sqlite3_api->bind_null +#define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count +#define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index +#define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name +#define sqlite3_bind_text sqlite3_api->bind_text +#define sqlite3_bind_text16 sqlite3_api->bind_text16 +#define sqlite3_bind_value sqlite3_api->bind_value +#define sqlite3_busy_handler sqlite3_api->busy_handler +#define sqlite3_busy_timeout sqlite3_api->busy_timeout +#define sqlite3_changes sqlite3_api->changes +#define sqlite3_close sqlite3_api->close +#define sqlite3_collation_needed sqlite3_api->collation_needed +#define sqlite3_collation_needed16 sqlite3_api->collation_needed16 +#define sqlite3_column_blob sqlite3_api->column_blob +#define sqlite3_column_bytes sqlite3_api->column_bytes +#define sqlite3_column_bytes16 sqlite3_api->column_bytes16 +#define sqlite3_column_count sqlite3_api->column_count +#define sqlite3_column_database_name sqlite3_api->column_database_name +#define sqlite3_column_database_name16 sqlite3_api->column_database_name16 +#define sqlite3_column_decltype sqlite3_api->column_decltype +#define sqlite3_column_decltype16 sqlite3_api->column_decltype16 +#define sqlite3_column_double sqlite3_api->column_double +#define sqlite3_column_int sqlite3_api->column_int +#define sqlite3_column_int64 sqlite3_api->column_int64 +#define sqlite3_column_name sqlite3_api->column_name +#define sqlite3_column_name16 sqlite3_api->column_name16 +#define sqlite3_column_origin_name sqlite3_api->column_origin_name +#define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16 +#define sqlite3_column_table_name sqlite3_api->column_table_name +#define sqlite3_column_table_name16 sqlite3_api->column_table_name16 +#define sqlite3_column_text sqlite3_api->column_text +#define sqlite3_column_text16 sqlite3_api->column_text16 +#define sqlite3_column_type sqlite3_api->column_type +#define sqlite3_column_value sqlite3_api->column_value +#define sqlite3_commit_hook sqlite3_api->commit_hook +#define sqlite3_complete sqlite3_api->complete +#define sqlite3_complete16 sqlite3_api->complete16 +#define sqlite3_create_collation sqlite3_api->create_collation +#define sqlite3_create_collation16 sqlite3_api->create_collation16 +#define sqlite3_create_function sqlite3_api->create_function +#define sqlite3_create_function16 sqlite3_api->create_function16 +#define sqlite3_create_module sqlite3_api->create_module +#define sqlite3_create_module_v2 sqlite3_api->create_module_v2 +#define sqlite3_data_count sqlite3_api->data_count +#define sqlite3_db_handle sqlite3_api->db_handle +#define sqlite3_declare_vtab sqlite3_api->declare_vtab +#define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache +#define sqlite3_errcode sqlite3_api->errcode +#define sqlite3_errmsg sqlite3_api->errmsg +#define sqlite3_errmsg16 sqlite3_api->errmsg16 +#define sqlite3_exec sqlite3_api->exec +#ifndef SQLITE_OMIT_DEPRECATED +#define sqlite3_expired sqlite3_api->expired +#endif +#define sqlite3_finalize sqlite3_api->finalize +#define sqlite3_free sqlite3_api->free +#define sqlite3_free_table sqlite3_api->free_table +#define sqlite3_get_autocommit sqlite3_api->get_autocommit +#define sqlite3_get_auxdata sqlite3_api->get_auxdata +#define sqlite3_get_table sqlite3_api->get_table +#ifndef SQLITE_OMIT_DEPRECATED +#define sqlite3_global_recover sqlite3_api->global_recover +#endif +#define sqlite3_interrupt sqlite3_api->interruptx +#define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid +#define sqlite3_libversion sqlite3_api->libversion +#define sqlite3_libversion_number sqlite3_api->libversion_number +#define sqlite3_malloc sqlite3_api->malloc +#define sqlite3_mprintf sqlite3_api->mprintf +#define sqlite3_open sqlite3_api->open +#define sqlite3_open16 sqlite3_api->open16 +#define sqlite3_prepare sqlite3_api->prepare +#define sqlite3_prepare16 sqlite3_api->prepare16 +#define sqlite3_prepare_v2 sqlite3_api->prepare_v2 +#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 +#define sqlite3_profile sqlite3_api->profile +#define sqlite3_progress_handler sqlite3_api->progress_handler +#define sqlite3_realloc sqlite3_api->realloc +#define sqlite3_reset sqlite3_api->reset +#define sqlite3_result_blob sqlite3_api->result_blob +#define sqlite3_result_double sqlite3_api->result_double +#define sqlite3_result_error sqlite3_api->result_error +#define sqlite3_result_error16 sqlite3_api->result_error16 +#define sqlite3_result_int sqlite3_api->result_int +#define sqlite3_result_int64 sqlite3_api->result_int64 +#define sqlite3_result_null sqlite3_api->result_null +#define sqlite3_result_text sqlite3_api->result_text +#define sqlite3_result_text16 sqlite3_api->result_text16 +#define sqlite3_result_text16be sqlite3_api->result_text16be +#define sqlite3_result_text16le sqlite3_api->result_text16le +#define sqlite3_result_value sqlite3_api->result_value +#define sqlite3_rollback_hook sqlite3_api->rollback_hook +#define sqlite3_set_authorizer sqlite3_api->set_authorizer +#define sqlite3_set_auxdata sqlite3_api->set_auxdata +#define sqlite3_snprintf sqlite3_api->snprintf +#define sqlite3_step sqlite3_api->step +#define sqlite3_table_column_metadata sqlite3_api->table_column_metadata +#define sqlite3_thread_cleanup sqlite3_api->thread_cleanup +#define sqlite3_total_changes sqlite3_api->total_changes +#define sqlite3_trace sqlite3_api->trace +#ifndef SQLITE_OMIT_DEPRECATED +#define sqlite3_transfer_bindings sqlite3_api->transfer_bindings +#endif +#define sqlite3_update_hook sqlite3_api->update_hook +#define sqlite3_user_data sqlite3_api->user_data +#define sqlite3_value_blob sqlite3_api->value_blob +#define sqlite3_value_bytes sqlite3_api->value_bytes +#define sqlite3_value_bytes16 sqlite3_api->value_bytes16 +#define sqlite3_value_double sqlite3_api->value_double +#define sqlite3_value_int sqlite3_api->value_int +#define sqlite3_value_int64 sqlite3_api->value_int64 +#define sqlite3_value_numeric_type sqlite3_api->value_numeric_type +#define sqlite3_value_text sqlite3_api->value_text +#define sqlite3_value_text16 sqlite3_api->value_text16 +#define sqlite3_value_text16be sqlite3_api->value_text16be +#define sqlite3_value_text16le sqlite3_api->value_text16le +#define sqlite3_value_type sqlite3_api->value_type +#define sqlite3_vmprintf sqlite3_api->vmprintf +#define sqlite3_vsnprintf sqlite3_api->vsnprintf +#define sqlite3_overload_function sqlite3_api->overload_function +#define sqlite3_prepare_v2 sqlite3_api->prepare_v2 +#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 +#define sqlite3_clear_bindings sqlite3_api->clear_bindings +#define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob +#define sqlite3_blob_bytes sqlite3_api->blob_bytes +#define sqlite3_blob_close sqlite3_api->blob_close +#define sqlite3_blob_open sqlite3_api->blob_open +#define sqlite3_blob_read sqlite3_api->blob_read +#define sqlite3_blob_write sqlite3_api->blob_write +#define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2 +#define sqlite3_file_control sqlite3_api->file_control +#define sqlite3_memory_highwater sqlite3_api->memory_highwater +#define sqlite3_memory_used sqlite3_api->memory_used +#define sqlite3_mutex_alloc sqlite3_api->mutex_alloc +#define sqlite3_mutex_enter sqlite3_api->mutex_enter +#define sqlite3_mutex_free sqlite3_api->mutex_free +#define sqlite3_mutex_leave sqlite3_api->mutex_leave +#define sqlite3_mutex_try sqlite3_api->mutex_try +#define sqlite3_open_v2 sqlite3_api->open_v2 +#define sqlite3_release_memory sqlite3_api->release_memory +#define sqlite3_result_error_nomem sqlite3_api->result_error_nomem +#define sqlite3_result_error_toobig sqlite3_api->result_error_toobig +#define sqlite3_sleep sqlite3_api->sleep +#define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit +#define sqlite3_vfs_find sqlite3_api->vfs_find +#define sqlite3_vfs_register sqlite3_api->vfs_register +#define sqlite3_vfs_unregister sqlite3_api->vfs_unregister +#define sqlite3_threadsafe sqlite3_api->xthreadsafe +#define sqlite3_result_zeroblob sqlite3_api->result_zeroblob +#define sqlite3_result_error_code sqlite3_api->result_error_code +#define sqlite3_test_control sqlite3_api->test_control +#define sqlite3_randomness sqlite3_api->randomness +#define sqlite3_context_db_handle sqlite3_api->context_db_handle +#define sqlite3_extended_result_codes sqlite3_api->extended_result_codes +#define sqlite3_limit sqlite3_api->limit +#define sqlite3_next_stmt sqlite3_api->next_stmt +#define sqlite3_sql sqlite3_api->sql +#define sqlite3_status sqlite3_api->status +#define sqlite3_backup_finish sqlite3_api->backup_finish +#define sqlite3_backup_init sqlite3_api->backup_init +#define sqlite3_backup_pagecount sqlite3_api->backup_pagecount +#define sqlite3_backup_remaining sqlite3_api->backup_remaining +#define sqlite3_backup_step sqlite3_api->backup_step +#define sqlite3_compileoption_get sqlite3_api->compileoption_get +#define sqlite3_compileoption_used sqlite3_api->compileoption_used +#define sqlite3_create_function_v2 sqlite3_api->create_function_v2 +#define sqlite3_db_config sqlite3_api->db_config +#define sqlite3_db_mutex sqlite3_api->db_mutex +#define sqlite3_db_status sqlite3_api->db_status +#define sqlite3_extended_errcode sqlite3_api->extended_errcode +#define sqlite3_log sqlite3_api->log +#define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64 +#define sqlite3_sourceid sqlite3_api->sourceid +#define sqlite3_stmt_status sqlite3_api->stmt_status +#define sqlite3_strnicmp sqlite3_api->strnicmp +#define sqlite3_unlock_notify sqlite3_api->unlock_notify +#define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint +#define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint +#define sqlite3_wal_hook sqlite3_api->wal_hook +#define sqlite3_blob_reopen sqlite3_api->blob_reopen +#define sqlite3_vtab_config sqlite3_api->vtab_config +#define sqlite3_vtab_on_conflict sqlite3_api->vtab_on_conflict +/* Version 3.7.16 and later */ +#define sqlite3_close_v2 sqlite3_api->close_v2 +#define sqlite3_db_filename sqlite3_api->db_filename +#define sqlite3_db_readonly sqlite3_api->db_readonly +#define sqlite3_db_release_memory sqlite3_api->db_release_memory +#define sqlite3_errstr sqlite3_api->errstr +#define sqlite3_stmt_busy sqlite3_api->stmt_busy +#define sqlite3_stmt_readonly sqlite3_api->stmt_readonly +#define sqlite3_stricmp sqlite3_api->stricmp +#define sqlite3_uri_boolean sqlite3_api->uri_boolean +#define sqlite3_uri_int64 sqlite3_api->uri_int64 +#define sqlite3_uri_parameter sqlite3_api->uri_parameter +#define sqlite3_uri_vsnprintf sqlite3_api->vsnprintf +#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2 +/* Version 3.8.7 and later */ +#define sqlite3_auto_extension sqlite3_api->auto_extension +#define sqlite3_bind_blob64 sqlite3_api->bind_blob64 +#define sqlite3_bind_text64 sqlite3_api->bind_text64 +#define sqlite3_cancel_auto_extension sqlite3_api->cancel_auto_extension +#define sqlite3_load_extension sqlite3_api->load_extension +#define sqlite3_malloc64 sqlite3_api->malloc64 +#define sqlite3_msize sqlite3_api->msize +#define sqlite3_realloc64 sqlite3_api->realloc64 +#define sqlite3_reset_auto_extension sqlite3_api->reset_auto_extension +#define sqlite3_result_blob64 sqlite3_api->result_blob64 +#define sqlite3_result_text64 sqlite3_api->result_text64 +#define sqlite3_strglob sqlite3_api->strglob +/* Version 3.8.11 and later */ +#define sqlite3_value_dup sqlite3_api->value_dup +#define sqlite3_value_free sqlite3_api->value_free +#define sqlite3_result_zeroblob64 sqlite3_api->result_zeroblob64 +#define sqlite3_bind_zeroblob64 sqlite3_api->bind_zeroblob64 +/* Version 3.9.0 and later */ +#define sqlite3_value_subtype sqlite3_api->value_subtype +#define sqlite3_result_subtype sqlite3_api->result_subtype +/* Version 3.10.0 and later */ +#define sqlite3_status64 sqlite3_api->status64 +#define sqlite3_strlike sqlite3_api->strlike +#define sqlite3_db_cacheflush sqlite3_api->db_cacheflush +/* Version 3.12.0 and later */ +#define sqlite3_system_errno sqlite3_api->system_errno +/* Version 3.14.0 and later */ +#define sqlite3_trace_v2 sqlite3_api->trace_v2 +#define sqlite3_expanded_sql sqlite3_api->expanded_sql +#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ + +#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) + /* This case when the file really is being compiled as a loadable + ** extension */ +# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0; +# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v; +# define SQLITE_EXTENSION_INIT3 \ + extern const sqlite3_api_routines *sqlite3_api; +#else + /* This case when the file is being statically linked into the + ** application */ +# define SQLITE_EXTENSION_INIT1 /*no-op*/ +# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */ +# define SQLITE_EXTENSION_INIT3 /*no-op*/ +#endif + +#endif /* SQLITE3EXT_H */ + +/************** End of sqlite3ext.h ******************************************/ +/************** Continuing where we left off in loadext.c ********************/ +/* #include "sqliteInt.h" */ + +#ifndef SQLITE_OMIT_LOAD_EXTENSION +/* +** Some API routines are omitted when various features are +** excluded from a build of SQLite. Substitute a NULL pointer +** for any missing APIs. +*/ +#ifndef SQLITE_ENABLE_COLUMN_METADATA +# define sqlite3_column_database_name 0 +# define sqlite3_column_database_name16 0 +# define sqlite3_column_table_name 0 +# define sqlite3_column_table_name16 0 +# define sqlite3_column_origin_name 0 +# define sqlite3_column_origin_name16 0 +#endif + +#ifdef SQLITE_OMIT_AUTHORIZATION +# define sqlite3_set_authorizer 0 +#endif + +#ifdef SQLITE_OMIT_UTF16 +# define sqlite3_bind_text16 0 +# define sqlite3_collation_needed16 0 +# define sqlite3_column_decltype16 0 +# define sqlite3_column_name16 0 +# define sqlite3_column_text16 0 +# define sqlite3_complete16 0 +# define sqlite3_create_collation16 0 +# define sqlite3_create_function16 0 +# define sqlite3_errmsg16 0 +# define sqlite3_open16 0 +# define sqlite3_prepare16 0 +# define sqlite3_prepare16_v2 0 +# define sqlite3_result_error16 0 +# define sqlite3_result_text16 0 +# define sqlite3_result_text16be 0 +# define sqlite3_result_text16le 0 +# define sqlite3_value_text16 0 +# define sqlite3_value_text16be 0 +# define sqlite3_value_text16le 0 +# define sqlite3_column_database_name16 0 +# define sqlite3_column_table_name16 0 +# define sqlite3_column_origin_name16 0 +#endif + +#ifdef SQLITE_OMIT_COMPLETE +# define sqlite3_complete 0 +# define sqlite3_complete16 0 +#endif + +#ifdef SQLITE_OMIT_DECLTYPE +# define sqlite3_column_decltype16 0 +# define sqlite3_column_decltype 0 +#endif + +#ifdef SQLITE_OMIT_PROGRESS_CALLBACK +# define sqlite3_progress_handler 0 +#endif + +#ifdef SQLITE_OMIT_VIRTUALTABLE +# define sqlite3_create_module 0 +# define sqlite3_create_module_v2 0 +# define sqlite3_declare_vtab 0 +# define sqlite3_vtab_config 0 +# define sqlite3_vtab_on_conflict 0 +#endif + +#ifdef SQLITE_OMIT_SHARED_CACHE +# define sqlite3_enable_shared_cache 0 +#endif + +#if defined(SQLITE_OMIT_TRACE) || defined(SQLITE_OMIT_DEPRECATED) +# define sqlite3_profile 0 +# define sqlite3_trace 0 +#endif + +#ifdef SQLITE_OMIT_GET_TABLE +# define sqlite3_free_table 0 +# define sqlite3_get_table 0 +#endif + +#ifdef SQLITE_OMIT_INCRBLOB +#define sqlite3_bind_zeroblob 0 +#define sqlite3_blob_bytes 0 +#define sqlite3_blob_close 0 +#define sqlite3_blob_open 0 +#define sqlite3_blob_read 0 +#define sqlite3_blob_write 0 +#define sqlite3_blob_reopen 0 +#endif + +#if defined(SQLITE_OMIT_TRACE) +# define sqlite3_trace_v2 0 +#endif + +/* +** The following structure contains pointers to all SQLite API routines. +** A pointer to this structure is passed into extensions when they are +** loaded so that the extension can make calls back into the SQLite +** library. +** +** When adding new APIs, add them to the bottom of this structure +** in order to preserve backwards compatibility. +** +** Extensions that use newer APIs should first call the +** sqlite3_libversion_number() to make sure that the API they +** intend to use is supported by the library. Extensions should +** also check to make sure that the pointer to the function is +** not NULL before calling it. +*/ +static const sqlite3_api_routines sqlite3Apis = { + sqlite3_aggregate_context, +#ifndef SQLITE_OMIT_DEPRECATED + sqlite3_aggregate_count, +#else + 0, +#endif + sqlite3_bind_blob, + sqlite3_bind_double, + sqlite3_bind_int, + sqlite3_bind_int64, + sqlite3_bind_null, + sqlite3_bind_parameter_count, + sqlite3_bind_parameter_index, + sqlite3_bind_parameter_name, + sqlite3_bind_text, + sqlite3_bind_text16, + sqlite3_bind_value, + sqlite3_busy_handler, + sqlite3_busy_timeout, + sqlite3_changes, + sqlite3_close, + sqlite3_collation_needed, + sqlite3_collation_needed16, + sqlite3_column_blob, + sqlite3_column_bytes, + sqlite3_column_bytes16, + sqlite3_column_count, + sqlite3_column_database_name, + sqlite3_column_database_name16, + sqlite3_column_decltype, + sqlite3_column_decltype16, + sqlite3_column_double, + sqlite3_column_int, + sqlite3_column_int64, + sqlite3_column_name, + sqlite3_column_name16, + sqlite3_column_origin_name, + sqlite3_column_origin_name16, + sqlite3_column_table_name, + sqlite3_column_table_name16, + sqlite3_column_text, + sqlite3_column_text16, + sqlite3_column_type, + sqlite3_column_value, + sqlite3_commit_hook, + sqlite3_complete, + sqlite3_complete16, + sqlite3_create_collation, + sqlite3_create_collation16, + sqlite3_create_function, + sqlite3_create_function16, + sqlite3_create_module, + sqlite3_data_count, + sqlite3_db_handle, + sqlite3_declare_vtab, + sqlite3_enable_shared_cache, + sqlite3_errcode, + sqlite3_errmsg, + sqlite3_errmsg16, + sqlite3_exec, +#ifndef SQLITE_OMIT_DEPRECATED + sqlite3_expired, +#else + 0, +#endif + sqlite3_finalize, + sqlite3_free, + sqlite3_free_table, + sqlite3_get_autocommit, + sqlite3_get_auxdata, + sqlite3_get_table, + 0, /* Was sqlite3_global_recover(), but that function is deprecated */ + sqlite3_interrupt, + sqlite3_last_insert_rowid, + sqlite3_libversion, + sqlite3_libversion_number, + sqlite3_malloc, + sqlite3_mprintf, + sqlite3_open, + sqlite3_open16, + sqlite3_prepare, + sqlite3_prepare16, + sqlite3_profile, + sqlite3_progress_handler, + sqlite3_realloc, + sqlite3_reset, + sqlite3_result_blob, + sqlite3_result_double, + sqlite3_result_error, + sqlite3_result_error16, + sqlite3_result_int, + sqlite3_result_int64, + sqlite3_result_null, + sqlite3_result_text, + sqlite3_result_text16, + sqlite3_result_text16be, + sqlite3_result_text16le, + sqlite3_result_value, + sqlite3_rollback_hook, + sqlite3_set_authorizer, + sqlite3_set_auxdata, + sqlite3_snprintf, + sqlite3_step, + sqlite3_table_column_metadata, +#ifndef SQLITE_OMIT_DEPRECATED + sqlite3_thread_cleanup, +#else + 0, +#endif + sqlite3_total_changes, + sqlite3_trace, +#ifndef SQLITE_OMIT_DEPRECATED + sqlite3_transfer_bindings, +#else + 0, +#endif + sqlite3_update_hook, + sqlite3_user_data, + sqlite3_value_blob, + sqlite3_value_bytes, + sqlite3_value_bytes16, + sqlite3_value_double, + sqlite3_value_int, + sqlite3_value_int64, + sqlite3_value_numeric_type, + sqlite3_value_text, + sqlite3_value_text16, + sqlite3_value_text16be, + sqlite3_value_text16le, + sqlite3_value_type, + sqlite3_vmprintf, + /* + ** The original API set ends here. All extensions can call any + ** of the APIs above provided that the pointer is not NULL. But + ** before calling APIs that follow, extension should check the + ** sqlite3_libversion_number() to make sure they are dealing with + ** a library that is new enough to support that API. + ************************************************************************* + */ + sqlite3_overload_function, + + /* + ** Added after 3.3.13 + */ + sqlite3_prepare_v2, + sqlite3_prepare16_v2, + sqlite3_clear_bindings, + + /* + ** Added for 3.4.1 + */ + sqlite3_create_module_v2, + + /* + ** Added for 3.5.0 + */ + sqlite3_bind_zeroblob, + sqlite3_blob_bytes, + sqlite3_blob_close, + sqlite3_blob_open, + sqlite3_blob_read, + sqlite3_blob_write, + sqlite3_create_collation_v2, + sqlite3_file_control, + sqlite3_memory_highwater, + sqlite3_memory_used, +#ifdef SQLITE_MUTEX_OMIT + 0, + 0, + 0, + 0, + 0, +#else + sqlite3_mutex_alloc, + sqlite3_mutex_enter, + sqlite3_mutex_free, + sqlite3_mutex_leave, + sqlite3_mutex_try, +#endif + sqlite3_open_v2, + sqlite3_release_memory, + sqlite3_result_error_nomem, + sqlite3_result_error_toobig, + sqlite3_sleep, + sqlite3_soft_heap_limit, + sqlite3_vfs_find, + sqlite3_vfs_register, + sqlite3_vfs_unregister, + + /* + ** Added for 3.5.8 + */ + sqlite3_threadsafe, + sqlite3_result_zeroblob, + sqlite3_result_error_code, + sqlite3_test_control, + sqlite3_randomness, + sqlite3_context_db_handle, + + /* + ** Added for 3.6.0 + */ + sqlite3_extended_result_codes, + sqlite3_limit, + sqlite3_next_stmt, + sqlite3_sql, + sqlite3_status, + + /* + ** Added for 3.7.4 + */ + sqlite3_backup_finish, + sqlite3_backup_init, + sqlite3_backup_pagecount, + sqlite3_backup_remaining, + sqlite3_backup_step, +#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS + sqlite3_compileoption_get, + sqlite3_compileoption_used, +#else + 0, + 0, +#endif + sqlite3_create_function_v2, + sqlite3_db_config, + sqlite3_db_mutex, + sqlite3_db_status, + sqlite3_extended_errcode, + sqlite3_log, + sqlite3_soft_heap_limit64, + sqlite3_sourceid, + sqlite3_stmt_status, + sqlite3_strnicmp, +#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY + sqlite3_unlock_notify, +#else + 0, +#endif +#ifndef SQLITE_OMIT_WAL + sqlite3_wal_autocheckpoint, + sqlite3_wal_checkpoint, + sqlite3_wal_hook, +#else + 0, + 0, + 0, +#endif + sqlite3_blob_reopen, + sqlite3_vtab_config, + sqlite3_vtab_on_conflict, + sqlite3_close_v2, + sqlite3_db_filename, + sqlite3_db_readonly, + sqlite3_db_release_memory, + sqlite3_errstr, + sqlite3_stmt_busy, + sqlite3_stmt_readonly, + sqlite3_stricmp, + sqlite3_uri_boolean, + sqlite3_uri_int64, + sqlite3_uri_parameter, + sqlite3_vsnprintf, + sqlite3_wal_checkpoint_v2, + /* Version 3.8.7 and later */ + sqlite3_auto_extension, + sqlite3_bind_blob64, + sqlite3_bind_text64, + sqlite3_cancel_auto_extension, + sqlite3_load_extension, + sqlite3_malloc64, + sqlite3_msize, + sqlite3_realloc64, + sqlite3_reset_auto_extension, + sqlite3_result_blob64, + sqlite3_result_text64, + sqlite3_strglob, + /* Version 3.8.11 and later */ + (sqlite3_value*(*)(const sqlite3_value*))sqlite3_value_dup, + sqlite3_value_free, + sqlite3_result_zeroblob64, + sqlite3_bind_zeroblob64, + /* Version 3.9.0 and later */ + sqlite3_value_subtype, + sqlite3_result_subtype, + /* Version 3.10.0 and later */ + sqlite3_status64, + sqlite3_strlike, + sqlite3_db_cacheflush, + /* Version 3.12.0 and later */ + sqlite3_system_errno, + /* Version 3.14.0 and later */ + sqlite3_trace_v2, + sqlite3_expanded_sql +}; + +/* +** Attempt to load an SQLite extension library contained in the file +** zFile. The entry point is zProc. zProc may be 0 in which case a +** default entry point name (sqlite3_extension_init) is used. Use +** of the default name is recommended. +** +** Return SQLITE_OK on success and SQLITE_ERROR if something goes wrong. +** +** If an error occurs and pzErrMsg is not 0, then fill *pzErrMsg with +** error message text. The calling function should free this memory +** by calling sqlite3DbFree(db, ). +*/ +static int sqlite3LoadExtension( + sqlite3 *db, /* Load the extension into this database connection */ + const char *zFile, /* Name of the shared library containing extension */ + const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */ + char **pzErrMsg /* Put error message here if not 0 */ +){ + sqlite3_vfs *pVfs = db->pVfs; + void *handle; + sqlite3_loadext_entry xInit; + char *zErrmsg = 0; + const char *zEntry; + char *zAltEntry = 0; + void **aHandle; + u64 nMsg = 300 + sqlite3Strlen30(zFile); + int ii; + int rc; + + /* Shared library endings to try if zFile cannot be loaded as written */ + static const char *azEndings[] = { +#if SQLITE_OS_WIN + "dll" +#elif defined(__APPLE__) + "dylib" +#else + "so" +#endif + }; + + + if( pzErrMsg ) *pzErrMsg = 0; + + /* Ticket #1863. To avoid a creating security problems for older + ** applications that relink against newer versions of SQLite, the + ** ability to run load_extension is turned off by default. One + ** must call either sqlite3_enable_load_extension(db) or + ** sqlite3_db_config(db, SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, 1, 0) + ** to turn on extension loading. + */ + if( (db->flags & SQLITE_LoadExtension)==0 ){ + if( pzErrMsg ){ + *pzErrMsg = sqlite3_mprintf("not authorized"); + } + return SQLITE_ERROR; + } + + zEntry = zProc ? zProc : "sqlite3_extension_init"; + + handle = sqlite3OsDlOpen(pVfs, zFile); +#if SQLITE_OS_UNIX || SQLITE_OS_WIN + for(ii=0; ii sqlite3_example_init + ** C:/lib/mathfuncs.dll ==> sqlite3_mathfuncs_init + */ + if( xInit==0 && zProc==0 ){ + int iFile, iEntry, c; + int ncFile = sqlite3Strlen30(zFile); + zAltEntry = sqlite3_malloc64(ncFile+30); + if( zAltEntry==0 ){ + sqlite3OsDlClose(pVfs, handle); + return SQLITE_NOMEM_BKPT; + } + memcpy(zAltEntry, "sqlite3_", 8); + for(iFile=ncFile-1; iFile>=0 && zFile[iFile]!='/'; iFile--){} + iFile++; + if( sqlite3_strnicmp(zFile+iFile, "lib", 3)==0 ) iFile += 3; + for(iEntry=8; (c = zFile[iFile])!=0 && c!='.'; iFile++){ + if( sqlite3Isalpha(c) ){ + zAltEntry[iEntry++] = (char)sqlite3UpperToLower[(unsigned)c]; + } + } + memcpy(zAltEntry+iEntry, "_init", 6); + zEntry = zAltEntry; + xInit = (sqlite3_loadext_entry)sqlite3OsDlSym(pVfs, handle, zEntry); + } + if( xInit==0 ){ + if( pzErrMsg ){ + nMsg += sqlite3Strlen30(zEntry); + *pzErrMsg = zErrmsg = sqlite3_malloc64(nMsg); + if( zErrmsg ){ + sqlite3_snprintf(nMsg, zErrmsg, + "no entry point [%s] in shared library [%s]", zEntry, zFile); + sqlite3OsDlError(pVfs, nMsg-1, zErrmsg); + } + } + sqlite3OsDlClose(pVfs, handle); + sqlite3_free(zAltEntry); + return SQLITE_ERROR; + } + sqlite3_free(zAltEntry); + rc = xInit(db, &zErrmsg, &sqlite3Apis); + if( rc ){ + if( rc==SQLITE_OK_LOAD_PERMANENTLY ) return SQLITE_OK; + if( pzErrMsg ){ + *pzErrMsg = sqlite3_mprintf("error during initialization: %s", zErrmsg); + } + sqlite3_free(zErrmsg); + sqlite3OsDlClose(pVfs, handle); + return SQLITE_ERROR; + } + + /* Append the new shared library handle to the db->aExtension array. */ + aHandle = sqlite3DbMallocZero(db, sizeof(handle)*(db->nExtension+1)); + if( aHandle==0 ){ + return SQLITE_NOMEM_BKPT; + } + if( db->nExtension>0 ){ + memcpy(aHandle, db->aExtension, sizeof(handle)*db->nExtension); + } + sqlite3DbFree(db, db->aExtension); + db->aExtension = aHandle; + + db->aExtension[db->nExtension++] = handle; + return SQLITE_OK; +} +SQLITE_API int sqlite3_load_extension( + sqlite3 *db, /* Load the extension into this database connection */ + const char *zFile, /* Name of the shared library containing extension */ + const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */ + char **pzErrMsg /* Put error message here if not 0 */ +){ + int rc; + sqlite3_mutex_enter(db->mutex); + rc = sqlite3LoadExtension(db, zFile, zProc, pzErrMsg); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** Call this routine when the database connection is closing in order +** to clean up loaded extensions +*/ +SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3 *db){ + int i; + assert( sqlite3_mutex_held(db->mutex) ); + for(i=0; inExtension; i++){ + sqlite3OsDlClose(db->pVfs, db->aExtension[i]); + } + sqlite3DbFree(db, db->aExtension); +} + +/* +** Enable or disable extension loading. Extension loading is disabled by +** default so as not to open security holes in older applications. +*/ +SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff){ + sqlite3_mutex_enter(db->mutex); + if( onoff ){ + db->flags |= SQLITE_LoadExtension|SQLITE_LoadExtFunc; + }else{ + db->flags &= ~(SQLITE_LoadExtension|SQLITE_LoadExtFunc); + } + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +#endif /* !defined(SQLITE_OMIT_LOAD_EXTENSION) */ + +/* +** The following object holds the list of automatically loaded +** extensions. +** +** This list is shared across threads. The SQLITE_MUTEX_STATIC_MASTER +** mutex must be held while accessing this list. +*/ +typedef struct sqlite3AutoExtList sqlite3AutoExtList; +static SQLITE_WSD struct sqlite3AutoExtList { + u32 nExt; /* Number of entries in aExt[] */ + void (**aExt)(void); /* Pointers to the extension init functions */ +} sqlite3Autoext = { 0, 0 }; + +/* The "wsdAutoext" macro will resolve to the autoextension +** state vector. If writable static data is unsupported on the target, +** we have to locate the state vector at run-time. In the more common +** case where writable static data is supported, wsdStat can refer directly +** to the "sqlite3Autoext" state vector declared above. +*/ +#ifdef SQLITE_OMIT_WSD +# define wsdAutoextInit \ + sqlite3AutoExtList *x = &GLOBAL(sqlite3AutoExtList,sqlite3Autoext) +# define wsdAutoext x[0] +#else +# define wsdAutoextInit +# define wsdAutoext sqlite3Autoext +#endif + + +/* +** Register a statically linked extension that is automatically +** loaded by every new database connection. +*/ +SQLITE_API int sqlite3_auto_extension( + void (*xInit)(void) +){ + int rc = SQLITE_OK; +#ifndef SQLITE_OMIT_AUTOINIT + rc = sqlite3_initialize(); + if( rc ){ + return rc; + }else +#endif + { + u32 i; +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); +#endif + wsdAutoextInit; + sqlite3_mutex_enter(mutex); + for(i=0; i=0; i--){ + if( wsdAutoext.aExt[i]==xInit ){ + wsdAutoext.nExt--; + wsdAutoext.aExt[i] = wsdAutoext.aExt[wsdAutoext.nExt]; + n++; + break; + } + } + sqlite3_mutex_leave(mutex); + return n; +} + +/* +** Reset the automatic extension loading mechanism. +*/ +SQLITE_API void sqlite3_reset_auto_extension(void){ +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize()==SQLITE_OK ) +#endif + { +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); +#endif + wsdAutoextInit; + sqlite3_mutex_enter(mutex); + sqlite3_free(wsdAutoext.aExt); + wsdAutoext.aExt = 0; + wsdAutoext.nExt = 0; + sqlite3_mutex_leave(mutex); + } +} + +/* +** Load all automatic extensions. +** +** If anything goes wrong, set an error in the database connection. +*/ +SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){ + u32 i; + int go = 1; + int rc; + sqlite3_loadext_entry xInit; + + wsdAutoextInit; + if( wsdAutoext.nExt==0 ){ + /* Common case: early out without every having to acquire a mutex */ + return; + } + for(i=0; go; i++){ + char *zErrmsg; +#if SQLITE_THREADSAFE + sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); +#endif +#ifdef SQLITE_OMIT_LOAD_EXTENSION + const sqlite3_api_routines *pThunk = 0; +#else + const sqlite3_api_routines *pThunk = &sqlite3Apis; +#endif + sqlite3_mutex_enter(mutex); + if( i>=wsdAutoext.nExt ){ + xInit = 0; + go = 0; + }else{ + xInit = (sqlite3_loadext_entry)wsdAutoext.aExt[i]; + } + sqlite3_mutex_leave(mutex); + zErrmsg = 0; + if( xInit && (rc = xInit(db, &zErrmsg, pThunk))!=0 ){ + sqlite3ErrorWithMsg(db, rc, + "automatic extension loading failed: %s", zErrmsg); + go = 0; + } + sqlite3_free(zErrmsg); + } +} + +/************** End of loadext.c *********************************************/ +/************** Begin file pragma.c ******************************************/ +/* +** 2003 April 6 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code used to implement the PRAGMA command. +*/ +/* #include "sqliteInt.h" */ + +#if !defined(SQLITE_ENABLE_LOCKING_STYLE) +# if defined(__APPLE__) +# define SQLITE_ENABLE_LOCKING_STYLE 1 +# else +# define SQLITE_ENABLE_LOCKING_STYLE 0 +# endif +#endif + +/*************************************************************************** +** The "pragma.h" include file is an automatically generated file that +** that includes the PragType_XXXX macro definitions and the aPragmaName[] +** object. This ensures that the aPragmaName[] table is arranged in +** lexicographical order to facility a binary search of the pragma name. +** Do not edit pragma.h directly. Edit and rerun the script in at +** ../tool/mkpragmatab.tcl. */ +/************** Include pragma.h in the middle of pragma.c *******************/ +/************** Begin file pragma.h ******************************************/ +/* DO NOT EDIT! +** This file is automatically generated by the script at +** ../tool/mkpragmatab.tcl. To update the set of pragmas, edit +** that script and rerun it. +*/ + +/* The various pragma types */ +#define PragTyp_HEADER_VALUE 0 +#define PragTyp_AUTO_VACUUM 1 +#define PragTyp_FLAG 2 +#define PragTyp_BUSY_TIMEOUT 3 +#define PragTyp_CACHE_SIZE 4 +#define PragTyp_CACHE_SPILL 5 +#define PragTyp_CASE_SENSITIVE_LIKE 6 +#define PragTyp_COLLATION_LIST 7 +#define PragTyp_COMPILE_OPTIONS 8 +#define PragTyp_DATA_STORE_DIRECTORY 9 +#define PragTyp_DATABASE_LIST 10 +#define PragTyp_DEFAULT_CACHE_SIZE 11 +#define PragTyp_ENCODING 12 +#define PragTyp_FOREIGN_KEY_CHECK 13 +#define PragTyp_FOREIGN_KEY_LIST 14 +#define PragTyp_INCREMENTAL_VACUUM 15 +#define PragTyp_INDEX_INFO 16 +#define PragTyp_INDEX_LIST 17 +#define PragTyp_INTEGRITY_CHECK 18 +#define PragTyp_JOURNAL_MODE 19 +#define PragTyp_JOURNAL_SIZE_LIMIT 20 +#define PragTyp_LOCK_PROXY_FILE 21 +#define PragTyp_LOCKING_MODE 22 +#define PragTyp_PAGE_COUNT 23 +#define PragTyp_MMAP_SIZE 24 +#define PragTyp_PAGE_SIZE 25 +#define PragTyp_SECURE_DELETE 26 +#define PragTyp_SHRINK_MEMORY 27 +#define PragTyp_SOFT_HEAP_LIMIT 28 +#define PragTyp_STATS 29 +#define PragTyp_SYNCHRONOUS 30 +#define PragTyp_TABLE_INFO 31 +#define PragTyp_TEMP_STORE 32 +#define PragTyp_TEMP_STORE_DIRECTORY 33 +#define PragTyp_THREADS 34 +#define PragTyp_WAL_AUTOCHECKPOINT 35 +#define PragTyp_WAL_CHECKPOINT 36 +#define PragTyp_ACTIVATE_EXTENSIONS 37 +#define PragTyp_HEXKEY 38 +#define PragTyp_KEY 39 +#define PragTyp_REKEY 40 +#define PragTyp_LOCK_STATUS 41 +#define PragTyp_PARSER_TRACE 42 + +/* Property flags associated with various pragma. */ +#define PragFlg_NeedSchema 0x01 /* Force schema load before running */ +#define PragFlg_NoColumns 0x02 /* OP_ResultRow called with zero columns */ +#define PragFlg_NoColumns1 0x04 /* zero columns if RHS argument is present */ +#define PragFlg_ReadOnly 0x08 /* Read-only HEADER_VALUE */ +#define PragFlg_Result0 0x10 /* Acts as query when no argument */ +#define PragFlg_Result1 0x20 /* Acts as query when has one argument */ +#define PragFlg_SchemaOpt 0x40 /* Schema restricts name search if present */ +#define PragFlg_SchemaReq 0x80 /* Schema required - "main" is default */ + +/* Names of columns for pragmas that return multi-column result +** or that return single-column results where the name of the +** result column is different from the name of the pragma +*/ +static const char *const pragCName[] = { + /* 0 */ "cache_size", /* Used by: default_cache_size */ + /* 1 */ "cid", /* Used by: table_info */ + /* 2 */ "name", + /* 3 */ "type", + /* 4 */ "notnull", + /* 5 */ "dflt_value", + /* 6 */ "pk", + /* 7 */ "table", /* Used by: stats */ + /* 8 */ "index", + /* 9 */ "width", + /* 10 */ "height", + /* 11 */ "seqno", /* Used by: index_info */ + /* 12 */ "cid", + /* 13 */ "name", + /* 14 */ "seqno", /* Used by: index_xinfo */ + /* 15 */ "cid", + /* 16 */ "name", + /* 17 */ "desc", + /* 18 */ "coll", + /* 19 */ "key", + /* 20 */ "seq", /* Used by: index_list */ + /* 21 */ "name", + /* 22 */ "unique", + /* 23 */ "origin", + /* 24 */ "partial", + /* 25 */ "seq", /* Used by: database_list */ + /* 26 */ "name", + /* 27 */ "file", + /* 28 */ "seq", /* Used by: collation_list */ + /* 29 */ "name", + /* 30 */ "id", /* Used by: foreign_key_list */ + /* 31 */ "seq", + /* 32 */ "table", + /* 33 */ "from", + /* 34 */ "to", + /* 35 */ "on_update", + /* 36 */ "on_delete", + /* 37 */ "match", + /* 38 */ "table", /* Used by: foreign_key_check */ + /* 39 */ "rowid", + /* 40 */ "parent", + /* 41 */ "fkid", + /* 42 */ "busy", /* Used by: wal_checkpoint */ + /* 43 */ "log", + /* 44 */ "checkpointed", + /* 45 */ "timeout", /* Used by: busy_timeout */ + /* 46 */ "database", /* Used by: lock_status */ + /* 47 */ "status", +}; + +/* Definitions of all built-in pragmas */ +typedef struct PragmaName { + const char *const zName; /* Name of pragma */ + u8 ePragTyp; /* PragTyp_XXX value */ + u8 mPragFlg; /* Zero or more PragFlg_XXX values */ + u8 iPragCName; /* Start of column names in pragCName[] */ + u8 nPragCName; /* Num of col names. 0 means use pragma name */ + u32 iArg; /* Extra argument */ +} PragmaName; +static const PragmaName aPragmaName[] = { +#if defined(SQLITE_HAS_CODEC) || defined(SQLITE_ENABLE_CEROD) + {/* zName: */ "activate_extensions", + /* ePragTyp: */ PragTyp_ACTIVATE_EXTENSIONS, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) + {/* zName: */ "application_id", + /* ePragTyp: */ PragTyp_HEADER_VALUE, + /* ePragFlg: */ PragFlg_NoColumns1|PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ BTREE_APPLICATION_ID }, +#endif +#if !defined(SQLITE_OMIT_AUTOVACUUM) + {/* zName: */ "auto_vacuum", + /* ePragTyp: */ PragTyp_AUTO_VACUUM, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) +#if !defined(SQLITE_OMIT_AUTOMATIC_INDEX) + {/* zName: */ "automatic_index", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_AutoIndex }, +#endif +#endif + {/* zName: */ "busy_timeout", + /* ePragTyp: */ PragTyp_BUSY_TIMEOUT, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 45, 1, + /* iArg: */ 0 }, +#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) + {/* zName: */ "cache_size", + /* ePragTyp: */ PragTyp_CACHE_SIZE, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) + {/* zName: */ "cache_spill", + /* ePragTyp: */ PragTyp_CACHE_SPILL, + /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif + {/* zName: */ "case_sensitive_like", + /* ePragTyp: */ PragTyp_CASE_SENSITIVE_LIKE, + /* ePragFlg: */ PragFlg_NoColumns, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "cell_size_check", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_CellSizeCk }, +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) + {/* zName: */ "checkpoint_fullfsync", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_CkptFullFSync }, +#endif +#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) + {/* zName: */ "collation_list", + /* ePragTyp: */ PragTyp_COLLATION_LIST, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 28, 2, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS) + {/* zName: */ "compile_options", + /* ePragTyp: */ PragTyp_COMPILE_OPTIONS, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) + {/* zName: */ "count_changes", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_CountRows }, +#endif +#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_OS_WIN + {/* zName: */ "data_store_directory", + /* ePragTyp: */ PragTyp_DATA_STORE_DIRECTORY, + /* ePragFlg: */ PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) + {/* zName: */ "data_version", + /* ePragTyp: */ PragTyp_HEADER_VALUE, + /* ePragFlg: */ PragFlg_ReadOnly|PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ BTREE_DATA_VERSION }, +#endif +#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) + {/* zName: */ "database_list", + /* ePragTyp: */ PragTyp_DATABASE_LIST, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0, + /* ColNames: */ 25, 3, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) + {/* zName: */ "default_cache_size", + /* ePragTyp: */ PragTyp_DEFAULT_CACHE_SIZE, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ColNames: */ 0, 1, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) +#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) + {/* zName: */ "defer_foreign_keys", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_DeferFKs }, +#endif +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) + {/* zName: */ "empty_result_callbacks", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_NullCallback }, +#endif +#if !defined(SQLITE_OMIT_UTF16) + {/* zName: */ "encoding", + /* ePragTyp: */ PragTyp_ENCODING, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) + {/* zName: */ "foreign_key_check", + /* ePragTyp: */ PragTyp_FOREIGN_KEY_CHECK, + /* ePragFlg: */ PragFlg_NeedSchema, + /* ColNames: */ 38, 4, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FOREIGN_KEY) + {/* zName: */ "foreign_key_list", + /* ePragTyp: */ PragTyp_FOREIGN_KEY_LIST, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, + /* ColNames: */ 30, 8, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) +#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) + {/* zName: */ "foreign_keys", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_ForeignKeys }, +#endif +#endif +#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) + {/* zName: */ "freelist_count", + /* ePragTyp: */ PragTyp_HEADER_VALUE, + /* ePragFlg: */ PragFlg_ReadOnly|PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ BTREE_FREE_PAGE_COUNT }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) + {/* zName: */ "full_column_names", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_FullColNames }, + {/* zName: */ "fullfsync", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_FullFSync }, +#endif +#if defined(SQLITE_HAS_CODEC) + {/* zName: */ "hexkey", + /* ePragTyp: */ PragTyp_HEXKEY, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "hexrekey", + /* ePragTyp: */ PragTyp_HEXKEY, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) +#if !defined(SQLITE_OMIT_CHECK) + {/* zName: */ "ignore_check_constraints", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_IgnoreChecks }, +#endif +#endif +#if !defined(SQLITE_OMIT_AUTOVACUUM) + {/* zName: */ "incremental_vacuum", + /* ePragTyp: */ PragTyp_INCREMENTAL_VACUUM, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_NoColumns, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) + {/* zName: */ "index_info", + /* ePragTyp: */ PragTyp_INDEX_INFO, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, + /* ColNames: */ 11, 3, + /* iArg: */ 0 }, + {/* zName: */ "index_list", + /* ePragTyp: */ PragTyp_INDEX_LIST, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, + /* ColNames: */ 20, 5, + /* iArg: */ 0 }, + {/* zName: */ "index_xinfo", + /* ePragTyp: */ PragTyp_INDEX_INFO, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, + /* ColNames: */ 14, 6, + /* iArg: */ 1 }, +#endif +#if !defined(SQLITE_OMIT_INTEGRITY_CHECK) + {/* zName: */ "integrity_check", + /* ePragTyp: */ PragTyp_INTEGRITY_CHECK, + /* ePragFlg: */ PragFlg_NeedSchema, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) + {/* zName: */ "journal_mode", + /* ePragTyp: */ PragTyp_JOURNAL_MODE, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "journal_size_limit", + /* ePragTyp: */ PragTyp_JOURNAL_SIZE_LIMIT, + /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if defined(SQLITE_HAS_CODEC) + {/* zName: */ "key", + /* ePragTyp: */ PragTyp_KEY, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) + {/* zName: */ "legacy_file_format", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_LegacyFileFmt }, +#endif +#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_ENABLE_LOCKING_STYLE + {/* zName: */ "lock_proxy_file", + /* ePragTyp: */ PragTyp_LOCK_PROXY_FILE, + /* ePragFlg: */ PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) + {/* zName: */ "lock_status", + /* ePragTyp: */ PragTyp_LOCK_STATUS, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 46, 2, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) + {/* zName: */ "locking_mode", + /* ePragTyp: */ PragTyp_LOCKING_MODE, + /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "max_page_count", + /* ePragTyp: */ PragTyp_PAGE_COUNT, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "mmap_size", + /* ePragTyp: */ PragTyp_MMAP_SIZE, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "page_count", + /* ePragTyp: */ PragTyp_PAGE_COUNT, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "page_size", + /* ePragTyp: */ PragTyp_PAGE_SIZE, + /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_PARSER_TRACE) + {/* zName: */ "parser_trace", + /* ePragTyp: */ PragTyp_PARSER_TRACE, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) + {/* zName: */ "query_only", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_QueryOnly }, +#endif +#if !defined(SQLITE_OMIT_INTEGRITY_CHECK) + {/* zName: */ "quick_check", + /* ePragTyp: */ PragTyp_INTEGRITY_CHECK, + /* ePragFlg: */ PragFlg_NeedSchema, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) + {/* zName: */ "read_uncommitted", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_ReadUncommitted }, + {/* zName: */ "recursive_triggers", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_RecTriggers }, +#endif +#if defined(SQLITE_HAS_CODEC) + {/* zName: */ "rekey", + /* ePragTyp: */ PragTyp_REKEY, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) + {/* zName: */ "reverse_unordered_selects", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_ReverseOrder }, +#endif +#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) + {/* zName: */ "schema_version", + /* ePragTyp: */ PragTyp_HEADER_VALUE, + /* ePragFlg: */ PragFlg_NoColumns1|PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ BTREE_SCHEMA_VERSION }, +#endif +#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) + {/* zName: */ "secure_delete", + /* ePragTyp: */ PragTyp_SECURE_DELETE, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) + {/* zName: */ "short_column_names", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_ShortColNames }, +#endif + {/* zName: */ "shrink_memory", + /* ePragTyp: */ PragTyp_SHRINK_MEMORY, + /* ePragFlg: */ PragFlg_NoColumns, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "soft_heap_limit", + /* ePragTyp: */ PragTyp_SOFT_HEAP_LIMIT, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) +#if defined(SQLITE_DEBUG) + {/* zName: */ "sql_trace", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_SqlTrace }, +#endif +#endif +#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) + {/* zName: */ "stats", + /* ePragTyp: */ PragTyp_STATS, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq, + /* ColNames: */ 7, 4, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) + {/* zName: */ "synchronous", + /* ePragTyp: */ PragTyp_SYNCHRONOUS, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) + {/* zName: */ "table_info", + /* ePragTyp: */ PragTyp_TABLE_INFO, + /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt, + /* ColNames: */ 1, 6, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) + {/* zName: */ "temp_store", + /* ePragTyp: */ PragTyp_TEMP_STORE, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "temp_store_directory", + /* ePragTyp: */ PragTyp_TEMP_STORE_DIRECTORY, + /* ePragFlg: */ PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#endif + {/* zName: */ "threads", + /* ePragTyp: */ PragTyp_THREADS, + /* ePragFlg: */ PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, +#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) + {/* zName: */ "user_version", + /* ePragTyp: */ PragTyp_HEADER_VALUE, + /* ePragFlg: */ PragFlg_NoColumns1|PragFlg_Result0, + /* ColNames: */ 0, 0, + /* iArg: */ BTREE_USER_VERSION }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) +#if defined(SQLITE_DEBUG) + {/* zName: */ "vdbe_addoptrace", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_VdbeAddopTrace }, + {/* zName: */ "vdbe_debug", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_SqlTrace|SQLITE_VdbeListing|SQLITE_VdbeTrace }, + {/* zName: */ "vdbe_eqp", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_VdbeEQP }, + {/* zName: */ "vdbe_listing", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_VdbeListing }, + {/* zName: */ "vdbe_trace", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_VdbeTrace }, +#endif +#endif +#if !defined(SQLITE_OMIT_WAL) + {/* zName: */ "wal_autocheckpoint", + /* ePragTyp: */ PragTyp_WAL_AUTOCHECKPOINT, + /* ePragFlg: */ 0, + /* ColNames: */ 0, 0, + /* iArg: */ 0 }, + {/* zName: */ "wal_checkpoint", + /* ePragTyp: */ PragTyp_WAL_CHECKPOINT, + /* ePragFlg: */ PragFlg_NeedSchema, + /* ColNames: */ 42, 3, + /* iArg: */ 0 }, +#endif +#if !defined(SQLITE_OMIT_FLAG_PRAGMAS) + {/* zName: */ "writable_schema", + /* ePragTyp: */ PragTyp_FLAG, + /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1, + /* ColNames: */ 0, 0, + /* iArg: */ SQLITE_WriteSchema|SQLITE_RecoveryMode }, +#endif +}; +/* Number of pragmas: 60 on by default, 73 total. */ + +/************** End of pragma.h **********************************************/ +/************** Continuing where we left off in pragma.c *********************/ + +/* +** Interpret the given string as a safety level. Return 0 for OFF, +** 1 for ON or NORMAL, 2 for FULL, and 3 for EXTRA. Return 1 for an empty or +** unrecognized string argument. The FULL and EXTRA option is disallowed +** if the omitFull parameter it 1. +** +** Note that the values returned are one less that the values that +** should be passed into sqlite3BtreeSetSafetyLevel(). The is done +** to support legacy SQL code. The safety level used to be boolean +** and older scripts may have used numbers 0 for OFF and 1 for ON. +*/ +static u8 getSafetyLevel(const char *z, int omitFull, u8 dflt){ + /* 123456789 123456789 123 */ + static const char zText[] = "onoffalseyestruextrafull"; + static const u8 iOffset[] = {0, 1, 2, 4, 9, 12, 15, 20}; + static const u8 iLength[] = {2, 2, 3, 5, 3, 4, 5, 4}; + static const u8 iValue[] = {1, 0, 0, 0, 1, 1, 3, 2}; + /* on no off false yes true extra full */ + int i, n; + if( sqlite3Isdigit(*z) ){ + return (u8)sqlite3Atoi(z); + } + n = sqlite3Strlen30(z); + for(i=0; i=0&&i<=2)?i:0); +} +#endif /* ifndef SQLITE_OMIT_AUTOVACUUM */ + +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +/* +** Interpret the given string as a temp db location. Return 1 for file +** backed temporary databases, 2 for the Red-Black tree in memory database +** and 0 to use the compile-time default. +*/ +static int getTempStore(const char *z){ + if( z[0]>='0' && z[0]<='2' ){ + return z[0] - '0'; + }else if( sqlite3StrICmp(z, "file")==0 ){ + return 1; + }else if( sqlite3StrICmp(z, "memory")==0 ){ + return 2; + }else{ + return 0; + } +} +#endif /* SQLITE_PAGER_PRAGMAS */ + +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +/* +** Invalidate temp storage, either when the temp storage is changed +** from default, or when 'file' and the temp_store_directory has changed +*/ +static int invalidateTempStorage(Parse *pParse){ + sqlite3 *db = pParse->db; + if( db->aDb[1].pBt!=0 ){ + if( !db->autoCommit || sqlite3BtreeIsInReadTrans(db->aDb[1].pBt) ){ + sqlite3ErrorMsg(pParse, "temporary storage cannot be changed " + "from within a transaction"); + return SQLITE_ERROR; + } + sqlite3BtreeClose(db->aDb[1].pBt); + db->aDb[1].pBt = 0; + sqlite3ResetAllSchemasOfConnection(db); + } + return SQLITE_OK; +} +#endif /* SQLITE_PAGER_PRAGMAS */ + +#ifndef SQLITE_OMIT_PAGER_PRAGMAS +/* +** If the TEMP database is open, close it and mark the database schema +** as needing reloading. This must be done when using the SQLITE_TEMP_STORE +** or DEFAULT_TEMP_STORE pragmas. +*/ +static int changeTempStorage(Parse *pParse, const char *zStorageType){ + int ts = getTempStore(zStorageType); + sqlite3 *db = pParse->db; + if( db->temp_store==ts ) return SQLITE_OK; + if( invalidateTempStorage( pParse ) != SQLITE_OK ){ + return SQLITE_ERROR; + } + db->temp_store = (u8)ts; + return SQLITE_OK; +} +#endif /* SQLITE_PAGER_PRAGMAS */ + +/* +** Set result column names for a pragma. +*/ +static void setPragmaResultColumnNames( + Vdbe *v, /* The query under construction */ + const PragmaName *pPragma /* The pragma */ +){ + u8 n = pPragma->nPragCName; + sqlite3VdbeSetNumCols(v, n==0 ? 1 : n); + if( n==0 ){ + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, pPragma->zName, SQLITE_STATIC); + }else{ + int i, j; + for(i=0, j=pPragma->iPragCName; iautoCommit ){ + Db *pDb = db->aDb; + int n = db->nDb; + assert( SQLITE_FullFSync==PAGER_FULLFSYNC ); + assert( SQLITE_CkptFullFSync==PAGER_CKPT_FULLFSYNC ); + assert( SQLITE_CacheSpill==PAGER_CACHESPILL ); + assert( (PAGER_FULLFSYNC | PAGER_CKPT_FULLFSYNC | PAGER_CACHESPILL) + == PAGER_FLAGS_MASK ); + assert( (pDb->safety_level & PAGER_SYNCHRONOUS_MASK)==pDb->safety_level ); + while( (n--) > 0 ){ + if( pDb->pBt ){ + sqlite3BtreeSetPagerFlags(pDb->pBt, + pDb->safety_level | (db->flags & PAGER_FLAGS_MASK) ); + } + pDb++; + } + } +} +#else +# define setAllPagerFlags(X) /* no-op */ +#endif + + +/* +** Return a human-readable name for a constraint resolution action. +*/ +#ifndef SQLITE_OMIT_FOREIGN_KEY +static const char *actionName(u8 action){ + const char *zName; + switch( action ){ + case OE_SetNull: zName = "SET NULL"; break; + case OE_SetDflt: zName = "SET DEFAULT"; break; + case OE_Cascade: zName = "CASCADE"; break; + case OE_Restrict: zName = "RESTRICT"; break; + default: zName = "NO ACTION"; + assert( action==OE_None ); break; + } + return zName; +} +#endif + + +/* +** Parameter eMode must be one of the PAGER_JOURNALMODE_XXX constants +** defined in pager.h. This function returns the associated lowercase +** journal-mode name. +*/ +SQLITE_PRIVATE const char *sqlite3JournalModename(int eMode){ + static char * const azModeName[] = { + "delete", "persist", "off", "truncate", "memory" +#ifndef SQLITE_OMIT_WAL + , "wal" +#endif + }; + assert( PAGER_JOURNALMODE_DELETE==0 ); + assert( PAGER_JOURNALMODE_PERSIST==1 ); + assert( PAGER_JOURNALMODE_OFF==2 ); + assert( PAGER_JOURNALMODE_TRUNCATE==3 ); + assert( PAGER_JOURNALMODE_MEMORY==4 ); + assert( PAGER_JOURNALMODE_WAL==5 ); + assert( eMode>=0 && eMode<=ArraySize(azModeName) ); + + if( eMode==ArraySize(azModeName) ) return 0; + return azModeName[eMode]; +} + +/* +** Locate a pragma in the aPragmaName[] array. +*/ +static const PragmaName *pragmaLocate(const char *zName){ + int upr, lwr, mid = 0, rc; + lwr = 0; + upr = ArraySize(aPragmaName)-1; + while( lwr<=upr ){ + mid = (lwr+upr)/2; + rc = sqlite3_stricmp(zName, aPragmaName[mid].zName); + if( rc==0 ) break; + if( rc<0 ){ + upr = mid - 1; + }else{ + lwr = mid + 1; + } + } + return lwr>upr ? 0 : &aPragmaName[mid]; +} + +/* +** Process a pragma statement. +** +** Pragmas are of this form: +** +** PRAGMA [schema.]id [= value] +** +** The identifier might also be a string. The value is a string, and +** identifier, or a number. If minusFlag is true, then the value is +** a number that was preceded by a minus sign. +** +** If the left side is "database.id" then pId1 is the database name +** and pId2 is the id. If the left side is just "id" then pId1 is the +** id and pId2 is any empty string. +*/ +SQLITE_PRIVATE void sqlite3Pragma( + Parse *pParse, + Token *pId1, /* First part of [schema.]id field */ + Token *pId2, /* Second part of [schema.]id field, or NULL */ + Token *pValue, /* Token for , or NULL */ + int minusFlag /* True if a '-' sign preceded */ +){ + char *zLeft = 0; /* Nul-terminated UTF-8 string */ + char *zRight = 0; /* Nul-terminated UTF-8 string , or NULL */ + const char *zDb = 0; /* The database name */ + Token *pId; /* Pointer to token */ + char *aFcntl[4]; /* Argument to SQLITE_FCNTL_PRAGMA */ + int iDb; /* Database index for */ + int rc; /* return value form SQLITE_FCNTL_PRAGMA */ + sqlite3 *db = pParse->db; /* The database connection */ + Db *pDb; /* The specific database being pragmaed */ + Vdbe *v = sqlite3GetVdbe(pParse); /* Prepared statement */ + const PragmaName *pPragma; /* The pragma */ + + if( v==0 ) return; + sqlite3VdbeRunOnlyOnce(v); + pParse->nMem = 2; + + /* Interpret the [schema.] part of the pragma statement. iDb is the + ** index of the database this pragma is being applied to in db.aDb[]. */ + iDb = sqlite3TwoPartName(pParse, pId1, pId2, &pId); + if( iDb<0 ) return; + pDb = &db->aDb[iDb]; + + /* If the temp database has been explicitly named as part of the + ** pragma, make sure it is open. + */ + if( iDb==1 && sqlite3OpenTempDatabase(pParse) ){ + return; + } + + zLeft = sqlite3NameFromToken(db, pId); + if( !zLeft ) return; + if( minusFlag ){ + zRight = sqlite3MPrintf(db, "-%T", pValue); + }else{ + zRight = sqlite3NameFromToken(db, pValue); + } + + assert( pId2 ); + zDb = pId2->n>0 ? pDb->zDbSName : 0; + if( sqlite3AuthCheck(pParse, SQLITE_PRAGMA, zLeft, zRight, zDb) ){ + goto pragma_out; + } + + /* Send an SQLITE_FCNTL_PRAGMA file-control to the underlying VFS + ** connection. If it returns SQLITE_OK, then assume that the VFS + ** handled the pragma and generate a no-op prepared statement. + ** + ** IMPLEMENTATION-OF: R-12238-55120 Whenever a PRAGMA statement is parsed, + ** an SQLITE_FCNTL_PRAGMA file control is sent to the open sqlite3_file + ** object corresponding to the database file to which the pragma + ** statement refers. + ** + ** IMPLEMENTATION-OF: R-29875-31678 The argument to the SQLITE_FCNTL_PRAGMA + ** file control is an array of pointers to strings (char**) in which the + ** second element of the array is the name of the pragma and the third + ** element is the argument to the pragma or NULL if the pragma has no + ** argument. + */ + aFcntl[0] = 0; + aFcntl[1] = zLeft; + aFcntl[2] = zRight; + aFcntl[3] = 0; + db->busyHandler.nBusy = 0; + rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_PRAGMA, (void*)aFcntl); + if( rc==SQLITE_OK ){ + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, aFcntl[0], SQLITE_TRANSIENT); + returnSingleText(v, aFcntl[0]); + sqlite3_free(aFcntl[0]); + goto pragma_out; + } + if( rc!=SQLITE_NOTFOUND ){ + if( aFcntl[0] ){ + sqlite3ErrorMsg(pParse, "%s", aFcntl[0]); + sqlite3_free(aFcntl[0]); + } + pParse->nErr++; + pParse->rc = rc; + goto pragma_out; + } + + /* Locate the pragma in the lookup table */ + pPragma = pragmaLocate(zLeft); + if( pPragma==0 ) goto pragma_out; + + /* Make sure the database schema is loaded if the pragma requires that */ + if( (pPragma->mPragFlg & PragFlg_NeedSchema)!=0 ){ + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + } + + /* Register the result column names for pragmas that return results */ + if( (pPragma->mPragFlg & PragFlg_NoColumns)==0 + && ((pPragma->mPragFlg & PragFlg_NoColumns1)==0 || zRight==0) + ){ + setPragmaResultColumnNames(v, pPragma); + } + + /* Jump to the appropriate pragma handler */ + switch( pPragma->ePragTyp ){ + +#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) + /* + ** PRAGMA [schema.]default_cache_size + ** PRAGMA [schema.]default_cache_size=N + ** + ** The first form reports the current persistent setting for the + ** page cache size. The value returned is the maximum number of + ** pages in the page cache. The second form sets both the current + ** page cache size value and the persistent page cache size value + ** stored in the database file. + ** + ** Older versions of SQLite would set the default cache size to a + ** negative number to indicate synchronous=OFF. These days, synchronous + ** is always on by default regardless of the sign of the default cache + ** size. But continue to take the absolute value of the default cache + ** size of historical compatibility. + */ + case PragTyp_DEFAULT_CACHE_SIZE: { + static const int iLn = VDBE_OFFSET_LINENO(2); + static const VdbeOpList getCacheSize[] = { + { OP_Transaction, 0, 0, 0}, /* 0 */ + { OP_ReadCookie, 0, 1, BTREE_DEFAULT_CACHE_SIZE}, /* 1 */ + { OP_IfPos, 1, 8, 0}, + { OP_Integer, 0, 2, 0}, + { OP_Subtract, 1, 2, 1}, + { OP_IfPos, 1, 8, 0}, + { OP_Integer, 0, 1, 0}, /* 6 */ + { OP_Noop, 0, 0, 0}, + { OP_ResultRow, 1, 1, 0}, + }; + VdbeOp *aOp; + sqlite3VdbeUsesBtree(v, iDb); + if( !zRight ){ + pParse->nMem += 2; + sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(getCacheSize)); + aOp = sqlite3VdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize, iLn); + if( ONLY_IF_REALLOC_STRESS(aOp==0) ) break; + aOp[0].p1 = iDb; + aOp[1].p1 = iDb; + aOp[6].p1 = SQLITE_DEFAULT_CACHE_SIZE; + }else{ + int size = sqlite3AbsInt32(sqlite3Atoi(zRight)); + sqlite3BeginWriteOperation(pParse, 0, iDb); + sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_DEFAULT_CACHE_SIZE, size); + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + pDb->pSchema->cache_size = size; + sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); + } + break; + } +#endif /* !SQLITE_OMIT_PAGER_PRAGMAS && !SQLITE_OMIT_DEPRECATED */ + +#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) + /* + ** PRAGMA [schema.]page_size + ** PRAGMA [schema.]page_size=N + ** + ** The first form reports the current setting for the + ** database page size in bytes. The second form sets the + ** database page size value. The value can only be set if + ** the database has not yet been created. + */ + case PragTyp_PAGE_SIZE: { + Btree *pBt = pDb->pBt; + assert( pBt!=0 ); + if( !zRight ){ + int size = ALWAYS(pBt) ? sqlite3BtreeGetPageSize(pBt) : 0; + returnSingleInt(v, size); + }else{ + /* Malloc may fail when setting the page-size, as there is an internal + ** buffer that the pager module resizes using sqlite3_realloc(). + */ + db->nextPagesize = sqlite3Atoi(zRight); + if( SQLITE_NOMEM==sqlite3BtreeSetPageSize(pBt, db->nextPagesize,-1,0) ){ + sqlite3OomFault(db); + } + } + break; + } + + /* + ** PRAGMA [schema.]secure_delete + ** PRAGMA [schema.]secure_delete=ON/OFF + ** + ** The first form reports the current setting for the + ** secure_delete flag. The second form changes the secure_delete + ** flag setting and reports thenew value. + */ + case PragTyp_SECURE_DELETE: { + Btree *pBt = pDb->pBt; + int b = -1; + assert( pBt!=0 ); + if( zRight ){ + b = sqlite3GetBoolean(zRight, 0); + } + if( pId2->n==0 && b>=0 ){ + int ii; + for(ii=0; iinDb; ii++){ + sqlite3BtreeSecureDelete(db->aDb[ii].pBt, b); + } + } + b = sqlite3BtreeSecureDelete(pBt, b); + returnSingleInt(v, b); + break; + } + + /* + ** PRAGMA [schema.]max_page_count + ** PRAGMA [schema.]max_page_count=N + ** + ** The first form reports the current setting for the + ** maximum number of pages in the database file. The + ** second form attempts to change this setting. Both + ** forms return the current setting. + ** + ** The absolute value of N is used. This is undocumented and might + ** change. The only purpose is to provide an easy way to test + ** the sqlite3AbsInt32() function. + ** + ** PRAGMA [schema.]page_count + ** + ** Return the number of pages in the specified database. + */ + case PragTyp_PAGE_COUNT: { + int iReg; + sqlite3CodeVerifySchema(pParse, iDb); + iReg = ++pParse->nMem; + if( sqlite3Tolower(zLeft[0])=='p' ){ + sqlite3VdbeAddOp2(v, OP_Pagecount, iDb, iReg); + }else{ + sqlite3VdbeAddOp3(v, OP_MaxPgcnt, iDb, iReg, + sqlite3AbsInt32(sqlite3Atoi(zRight))); + } + sqlite3VdbeAddOp2(v, OP_ResultRow, iReg, 1); + break; + } + + /* + ** PRAGMA [schema.]locking_mode + ** PRAGMA [schema.]locking_mode = (normal|exclusive) + */ + case PragTyp_LOCKING_MODE: { + const char *zRet = "normal"; + int eMode = getLockingMode(zRight); + + if( pId2->n==0 && eMode==PAGER_LOCKINGMODE_QUERY ){ + /* Simple "PRAGMA locking_mode;" statement. This is a query for + ** the current default locking mode (which may be different to + ** the locking-mode of the main database). + */ + eMode = db->dfltLockMode; + }else{ + Pager *pPager; + if( pId2->n==0 ){ + /* This indicates that no database name was specified as part + ** of the PRAGMA command. In this case the locking-mode must be + ** set on all attached databases, as well as the main db file. + ** + ** Also, the sqlite3.dfltLockMode variable is set so that + ** any subsequently attached databases also use the specified + ** locking mode. + */ + int ii; + assert(pDb==&db->aDb[0]); + for(ii=2; iinDb; ii++){ + pPager = sqlite3BtreePager(db->aDb[ii].pBt); + sqlite3PagerLockingMode(pPager, eMode); + } + db->dfltLockMode = (u8)eMode; + } + pPager = sqlite3BtreePager(pDb->pBt); + eMode = sqlite3PagerLockingMode(pPager, eMode); + } + + assert( eMode==PAGER_LOCKINGMODE_NORMAL + || eMode==PAGER_LOCKINGMODE_EXCLUSIVE ); + if( eMode==PAGER_LOCKINGMODE_EXCLUSIVE ){ + zRet = "exclusive"; + } + returnSingleText(v, zRet); + break; + } + + /* + ** PRAGMA [schema.]journal_mode + ** PRAGMA [schema.]journal_mode = + ** (delete|persist|off|truncate|memory|wal|off) + */ + case PragTyp_JOURNAL_MODE: { + int eMode; /* One of the PAGER_JOURNALMODE_XXX symbols */ + int ii; /* Loop counter */ + + if( zRight==0 ){ + /* If there is no "=MODE" part of the pragma, do a query for the + ** current mode */ + eMode = PAGER_JOURNALMODE_QUERY; + }else{ + const char *zMode; + int n = sqlite3Strlen30(zRight); + for(eMode=0; (zMode = sqlite3JournalModename(eMode))!=0; eMode++){ + if( sqlite3StrNICmp(zRight, zMode, n)==0 ) break; + } + if( !zMode ){ + /* If the "=MODE" part does not match any known journal mode, + ** then do a query */ + eMode = PAGER_JOURNALMODE_QUERY; + } + } + if( eMode==PAGER_JOURNALMODE_QUERY && pId2->n==0 ){ + /* Convert "PRAGMA journal_mode" into "PRAGMA main.journal_mode" */ + iDb = 0; + pId2->n = 1; + } + for(ii=db->nDb-1; ii>=0; ii--){ + if( db->aDb[ii].pBt && (ii==iDb || pId2->n==0) ){ + sqlite3VdbeUsesBtree(v, ii); + sqlite3VdbeAddOp3(v, OP_JournalMode, ii, 1, eMode); + } + } + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); + break; + } + + /* + ** PRAGMA [schema.]journal_size_limit + ** PRAGMA [schema.]journal_size_limit=N + ** + ** Get or set the size limit on rollback journal files. + */ + case PragTyp_JOURNAL_SIZE_LIMIT: { + Pager *pPager = sqlite3BtreePager(pDb->pBt); + i64 iLimit = -2; + if( zRight ){ + sqlite3DecOrHexToI64(zRight, &iLimit); + if( iLimit<-1 ) iLimit = -1; + } + iLimit = sqlite3PagerJournalSizeLimit(pPager, iLimit); + returnSingleInt(v, iLimit); + break; + } + +#endif /* SQLITE_OMIT_PAGER_PRAGMAS */ + + /* + ** PRAGMA [schema.]auto_vacuum + ** PRAGMA [schema.]auto_vacuum=N + ** + ** Get or set the value of the database 'auto-vacuum' parameter. + ** The value is one of: 0 NONE 1 FULL 2 INCREMENTAL + */ +#ifndef SQLITE_OMIT_AUTOVACUUM + case PragTyp_AUTO_VACUUM: { + Btree *pBt = pDb->pBt; + assert( pBt!=0 ); + if( !zRight ){ + returnSingleInt(v, sqlite3BtreeGetAutoVacuum(pBt)); + }else{ + int eAuto = getAutoVacuum(zRight); + assert( eAuto>=0 && eAuto<=2 ); + db->nextAutovac = (u8)eAuto; + /* Call SetAutoVacuum() to set initialize the internal auto and + ** incr-vacuum flags. This is required in case this connection + ** creates the database file. It is important that it is created + ** as an auto-vacuum capable db. + */ + rc = sqlite3BtreeSetAutoVacuum(pBt, eAuto); + if( rc==SQLITE_OK && (eAuto==1 || eAuto==2) ){ + /* When setting the auto_vacuum mode to either "full" or + ** "incremental", write the value of meta[6] in the database + ** file. Before writing to meta[6], check that meta[3] indicates + ** that this really is an auto-vacuum capable database. + */ + static const int iLn = VDBE_OFFSET_LINENO(2); + static const VdbeOpList setMeta6[] = { + { OP_Transaction, 0, 1, 0}, /* 0 */ + { OP_ReadCookie, 0, 1, BTREE_LARGEST_ROOT_PAGE}, + { OP_If, 1, 0, 0}, /* 2 */ + { OP_Halt, SQLITE_OK, OE_Abort, 0}, /* 3 */ + { OP_SetCookie, 0, BTREE_INCR_VACUUM, 0}, /* 4 */ + }; + VdbeOp *aOp; + int iAddr = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(setMeta6)); + aOp = sqlite3VdbeAddOpList(v, ArraySize(setMeta6), setMeta6, iLn); + if( ONLY_IF_REALLOC_STRESS(aOp==0) ) break; + aOp[0].p1 = iDb; + aOp[1].p1 = iDb; + aOp[2].p2 = iAddr+4; + aOp[4].p1 = iDb; + aOp[4].p3 = eAuto - 1; + sqlite3VdbeUsesBtree(v, iDb); + } + } + break; + } +#endif + + /* + ** PRAGMA [schema.]incremental_vacuum(N) + ** + ** Do N steps of incremental vacuuming on a database. + */ +#ifndef SQLITE_OMIT_AUTOVACUUM + case PragTyp_INCREMENTAL_VACUUM: { + int iLimit, addr; + if( zRight==0 || !sqlite3GetInt32(zRight, &iLimit) || iLimit<=0 ){ + iLimit = 0x7fffffff; + } + sqlite3BeginWriteOperation(pParse, 0, iDb); + sqlite3VdbeAddOp2(v, OP_Integer, iLimit, 1); + addr = sqlite3VdbeAddOp1(v, OP_IncrVacuum, iDb); VdbeCoverage(v); + sqlite3VdbeAddOp1(v, OP_ResultRow, 1); + sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); + sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addr); + break; + } +#endif + +#ifndef SQLITE_OMIT_PAGER_PRAGMAS + /* + ** PRAGMA [schema.]cache_size + ** PRAGMA [schema.]cache_size=N + ** + ** The first form reports the current local setting for the + ** page cache size. The second form sets the local + ** page cache size value. If N is positive then that is the + ** number of pages in the cache. If N is negative, then the + ** number of pages is adjusted so that the cache uses -N kibibytes + ** of memory. + */ + case PragTyp_CACHE_SIZE: { + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + if( !zRight ){ + returnSingleInt(v, pDb->pSchema->cache_size); + }else{ + int size = sqlite3Atoi(zRight); + pDb->pSchema->cache_size = size; + sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); + } + break; + } + + /* + ** PRAGMA [schema.]cache_spill + ** PRAGMA cache_spill=BOOLEAN + ** PRAGMA [schema.]cache_spill=N + ** + ** The first form reports the current local setting for the + ** page cache spill size. The second form turns cache spill on + ** or off. When turnning cache spill on, the size is set to the + ** current cache_size. The third form sets a spill size that + ** may be different form the cache size. + ** If N is positive then that is the + ** number of pages in the cache. If N is negative, then the + ** number of pages is adjusted so that the cache uses -N kibibytes + ** of memory. + ** + ** If the number of cache_spill pages is less then the number of + ** cache_size pages, no spilling occurs until the page count exceeds + ** the number of cache_size pages. + ** + ** The cache_spill=BOOLEAN setting applies to all attached schemas, + ** not just the schema specified. + */ + case PragTyp_CACHE_SPILL: { + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + if( !zRight ){ + returnSingleInt(v, + (db->flags & SQLITE_CacheSpill)==0 ? 0 : + sqlite3BtreeSetSpillSize(pDb->pBt,0)); + }else{ + int size = 1; + if( sqlite3GetInt32(zRight, &size) ){ + sqlite3BtreeSetSpillSize(pDb->pBt, size); + } + if( sqlite3GetBoolean(zRight, size!=0) ){ + db->flags |= SQLITE_CacheSpill; + }else{ + db->flags &= ~SQLITE_CacheSpill; + } + setAllPagerFlags(db); + } + break; + } + + /* + ** PRAGMA [schema.]mmap_size(N) + ** + ** Used to set mapping size limit. The mapping size limit is + ** used to limit the aggregate size of all memory mapped regions of the + ** database file. If this parameter is set to zero, then memory mapping + ** is not used at all. If N is negative, then the default memory map + ** limit determined by sqlite3_config(SQLITE_CONFIG_MMAP_SIZE) is set. + ** The parameter N is measured in bytes. + ** + ** This value is advisory. The underlying VFS is free to memory map + ** as little or as much as it wants. Except, if N is set to 0 then the + ** upper layers will never invoke the xFetch interfaces to the VFS. + */ + case PragTyp_MMAP_SIZE: { + sqlite3_int64 sz; +#if SQLITE_MAX_MMAP_SIZE>0 + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + if( zRight ){ + int ii; + sqlite3DecOrHexToI64(zRight, &sz); + if( sz<0 ) sz = sqlite3GlobalConfig.szMmap; + if( pId2->n==0 ) db->szMmap = sz; + for(ii=db->nDb-1; ii>=0; ii--){ + if( db->aDb[ii].pBt && (ii==iDb || pId2->n==0) ){ + sqlite3BtreeSetMmapLimit(db->aDb[ii].pBt, sz); + } + } + } + sz = -1; + rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_MMAP_SIZE, &sz); +#else + sz = 0; + rc = SQLITE_OK; +#endif + if( rc==SQLITE_OK ){ + returnSingleInt(v, sz); + }else if( rc!=SQLITE_NOTFOUND ){ + pParse->nErr++; + pParse->rc = rc; + } + break; + } + + /* + ** PRAGMA temp_store + ** PRAGMA temp_store = "default"|"memory"|"file" + ** + ** Return or set the local value of the temp_store flag. Changing + ** the local value does not make changes to the disk file and the default + ** value will be restored the next time the database is opened. + ** + ** Note that it is possible for the library compile-time options to + ** override this setting + */ + case PragTyp_TEMP_STORE: { + if( !zRight ){ + returnSingleInt(v, db->temp_store); + }else{ + changeTempStorage(pParse, zRight); + } + break; + } + + /* + ** PRAGMA temp_store_directory + ** PRAGMA temp_store_directory = ""|"directory_name" + ** + ** Return or set the local value of the temp_store_directory flag. Changing + ** the value sets a specific directory to be used for temporary files. + ** Setting to a null string reverts to the default temporary directory search. + ** If temporary directory is changed, then invalidateTempStorage. + ** + */ + case PragTyp_TEMP_STORE_DIRECTORY: { + if( !zRight ){ + returnSingleText(v, sqlite3_temp_directory); + }else{ +#ifndef SQLITE_OMIT_WSD + if( zRight[0] ){ + int res; + rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res); + if( rc!=SQLITE_OK || res==0 ){ + sqlite3ErrorMsg(pParse, "not a writable directory"); + goto pragma_out; + } + } + if( SQLITE_TEMP_STORE==0 + || (SQLITE_TEMP_STORE==1 && db->temp_store<=1) + || (SQLITE_TEMP_STORE==2 && db->temp_store==1) + ){ + invalidateTempStorage(pParse); + } + sqlite3_free(sqlite3_temp_directory); + if( zRight[0] ){ + sqlite3_temp_directory = sqlite3_mprintf("%s", zRight); + }else{ + sqlite3_temp_directory = 0; + } +#endif /* SQLITE_OMIT_WSD */ + } + break; + } + +#if SQLITE_OS_WIN + /* + ** PRAGMA data_store_directory + ** PRAGMA data_store_directory = ""|"directory_name" + ** + ** Return or set the local value of the data_store_directory flag. Changing + ** the value sets a specific directory to be used for database files that + ** were specified with a relative pathname. Setting to a null string reverts + ** to the default database directory, which for database files specified with + ** a relative path will probably be based on the current directory for the + ** process. Database file specified with an absolute path are not impacted + ** by this setting, regardless of its value. + ** + */ + case PragTyp_DATA_STORE_DIRECTORY: { + if( !zRight ){ + returnSingleText(v, sqlite3_data_directory); + }else{ +#ifndef SQLITE_OMIT_WSD + if( zRight[0] ){ + int res; + rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res); + if( rc!=SQLITE_OK || res==0 ){ + sqlite3ErrorMsg(pParse, "not a writable directory"); + goto pragma_out; + } + } + sqlite3_free(sqlite3_data_directory); + if( zRight[0] ){ + sqlite3_data_directory = sqlite3_mprintf("%s", zRight); + }else{ + sqlite3_data_directory = 0; + } +#endif /* SQLITE_OMIT_WSD */ + } + break; + } +#endif + +#if SQLITE_ENABLE_LOCKING_STYLE + /* + ** PRAGMA [schema.]lock_proxy_file + ** PRAGMA [schema.]lock_proxy_file = ":auto:"|"lock_file_path" + ** + ** Return or set the value of the lock_proxy_file flag. Changing + ** the value sets a specific file to be used for database access locks. + ** + */ + case PragTyp_LOCK_PROXY_FILE: { + if( !zRight ){ + Pager *pPager = sqlite3BtreePager(pDb->pBt); + char *proxy_file_path = NULL; + sqlite3_file *pFile = sqlite3PagerFile(pPager); + sqlite3OsFileControlHint(pFile, SQLITE_GET_LOCKPROXYFILE, + &proxy_file_path); + returnSingleText(v, proxy_file_path); + }else{ + Pager *pPager = sqlite3BtreePager(pDb->pBt); + sqlite3_file *pFile = sqlite3PagerFile(pPager); + int res; + if( zRight[0] ){ + res=sqlite3OsFileControl(pFile, SQLITE_SET_LOCKPROXYFILE, + zRight); + } else { + res=sqlite3OsFileControl(pFile, SQLITE_SET_LOCKPROXYFILE, + NULL); + } + if( res!=SQLITE_OK ){ + sqlite3ErrorMsg(pParse, "failed to set lock proxy file"); + goto pragma_out; + } + } + break; + } +#endif /* SQLITE_ENABLE_LOCKING_STYLE */ + + /* + ** PRAGMA [schema.]synchronous + ** PRAGMA [schema.]synchronous=OFF|ON|NORMAL|FULL|EXTRA + ** + ** Return or set the local value of the synchronous flag. Changing + ** the local value does not make changes to the disk file and the + ** default value will be restored the next time the database is + ** opened. + */ + case PragTyp_SYNCHRONOUS: { + if( !zRight ){ + returnSingleInt(v, pDb->safety_level-1); + }else{ + if( !db->autoCommit ){ + sqlite3ErrorMsg(pParse, + "Safety level may not be changed inside a transaction"); + }else{ + int iLevel = (getSafetyLevel(zRight,0,1)+1) & PAGER_SYNCHRONOUS_MASK; + if( iLevel==0 ) iLevel = 1; + pDb->safety_level = iLevel; + pDb->bSyncSet = 1; + setAllPagerFlags(db); + } + } + break; + } +#endif /* SQLITE_OMIT_PAGER_PRAGMAS */ + +#ifndef SQLITE_OMIT_FLAG_PRAGMAS + case PragTyp_FLAG: { + if( zRight==0 ){ + setPragmaResultColumnNames(v, pPragma); + returnSingleInt(v, (db->flags & pPragma->iArg)!=0 ); + }else{ + int mask = pPragma->iArg; /* Mask of bits to set or clear. */ + if( db->autoCommit==0 ){ + /* Foreign key support may not be enabled or disabled while not + ** in auto-commit mode. */ + mask &= ~(SQLITE_ForeignKeys); + } +#if SQLITE_USER_AUTHENTICATION + if( db->auth.authLevel==UAUTH_User ){ + /* Do not allow non-admin users to modify the schema arbitrarily */ + mask &= ~(SQLITE_WriteSchema); + } +#endif + + if( sqlite3GetBoolean(zRight, 0) ){ + db->flags |= mask; + }else{ + db->flags &= ~mask; + if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0; + } + + /* Many of the flag-pragmas modify the code generated by the SQL + ** compiler (eg. count_changes). So add an opcode to expire all + ** compiled SQL statements after modifying a pragma value. + */ + sqlite3VdbeAddOp0(v, OP_Expire); + setAllPagerFlags(db); + } + break; + } +#endif /* SQLITE_OMIT_FLAG_PRAGMAS */ + +#ifndef SQLITE_OMIT_SCHEMA_PRAGMAS + /* + ** PRAGMA table_info(
    ) + ** + ** Return a single row for each column of the named table. The columns of + ** the returned data set are: + ** + ** cid: Column id (numbered from left to right, starting at 0) + ** name: Column name + ** type: Column declaration type. + ** notnull: True if 'NOT NULL' is part of column declaration + ** dflt_value: The default value for the column, if any. + */ + case PragTyp_TABLE_INFO: if( zRight ){ + Table *pTab; + pTab = sqlite3LocateTable(pParse, LOCATE_NOERR, zRight, zDb); + if( pTab ){ + int i, k; + int nHidden = 0; + Column *pCol; + Index *pPk = sqlite3PrimaryKeyIndex(pTab); + pParse->nMem = 6; + sqlite3CodeVerifySchema(pParse, iDb); + sqlite3ViewGetColumnNames(pParse, pTab); + for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ + if( IsHiddenColumn(pCol) ){ + nHidden++; + continue; + } + if( (pCol->colFlags & COLFLAG_PRIMKEY)==0 ){ + k = 0; + }else if( pPk==0 ){ + k = 1; + }else{ + for(k=1; k<=pTab->nCol && pPk->aiColumn[k-1]!=i; k++){} + } + assert( pCol->pDflt==0 || pCol->pDflt->op==TK_SPAN ); + sqlite3VdbeMultiLoad(v, 1, "issisi", + i-nHidden, + pCol->zName, + sqlite3ColumnType(pCol,""), + pCol->notNull ? 1 : 0, + pCol->pDflt ? pCol->pDflt->u.zToken : 0, + k); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 6); + } + } + } + break; + + case PragTyp_STATS: { + Index *pIdx; + HashElem *i; + pParse->nMem = 4; + sqlite3CodeVerifySchema(pParse, iDb); + for(i=sqliteHashFirst(&pDb->pSchema->tblHash); i; i=sqliteHashNext(i)){ + Table *pTab = sqliteHashData(i); + sqlite3VdbeMultiLoad(v, 1, "ssii", + pTab->zName, + 0, + pTab->szTabRow, + pTab->nRowLogEst); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 4); + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + sqlite3VdbeMultiLoad(v, 2, "sii", + pIdx->zName, + pIdx->szIdxRow, + pIdx->aiRowLogEst[0]); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 4); + } + } + } + break; + + case PragTyp_INDEX_INFO: if( zRight ){ + Index *pIdx; + Table *pTab; + pIdx = sqlite3FindIndex(db, zRight, zDb); + if( pIdx ){ + int i; + int mx; + if( pPragma->iArg ){ + /* PRAGMA index_xinfo (newer version with more rows and columns) */ + mx = pIdx->nColumn; + pParse->nMem = 6; + }else{ + /* PRAGMA index_info (legacy version) */ + mx = pIdx->nKeyCol; + pParse->nMem = 3; + } + pTab = pIdx->pTable; + sqlite3CodeVerifySchema(pParse, iDb); + assert( pParse->nMem<=pPragma->nPragCName ); + for(i=0; iaiColumn[i]; + sqlite3VdbeMultiLoad(v, 1, "iis", i, cnum, + cnum<0 ? 0 : pTab->aCol[cnum].zName); + if( pPragma->iArg ){ + sqlite3VdbeMultiLoad(v, 4, "isi", + pIdx->aSortOrder[i], + pIdx->azColl[i], + inKeyCol); + } + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, pParse->nMem); + } + } + } + break; + + case PragTyp_INDEX_LIST: if( zRight ){ + Index *pIdx; + Table *pTab; + int i; + pTab = sqlite3FindTable(db, zRight, zDb); + if( pTab ){ + pParse->nMem = 5; + sqlite3CodeVerifySchema(pParse, iDb); + for(pIdx=pTab->pIndex, i=0; pIdx; pIdx=pIdx->pNext, i++){ + const char *azOrigin[] = { "c", "u", "pk" }; + sqlite3VdbeMultiLoad(v, 1, "isisi", + i, + pIdx->zName, + IsUniqueIndex(pIdx), + azOrigin[pIdx->idxType], + pIdx->pPartIdxWhere!=0); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 5); + } + } + } + break; + + case PragTyp_DATABASE_LIST: { + int i; + pParse->nMem = 3; + for(i=0; inDb; i++){ + if( db->aDb[i].pBt==0 ) continue; + assert( db->aDb[i].zDbSName!=0 ); + sqlite3VdbeMultiLoad(v, 1, "iss", + i, + db->aDb[i].zDbSName, + sqlite3BtreeGetFilename(db->aDb[i].pBt)); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); + } + } + break; + + case PragTyp_COLLATION_LIST: { + int i = 0; + HashElem *p; + pParse->nMem = 2; + for(p=sqliteHashFirst(&db->aCollSeq); p; p=sqliteHashNext(p)){ + CollSeq *pColl = (CollSeq *)sqliteHashData(p); + sqlite3VdbeMultiLoad(v, 1, "is", i++, pColl->zName); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 2); + } + } + break; +#endif /* SQLITE_OMIT_SCHEMA_PRAGMAS */ + +#ifndef SQLITE_OMIT_FOREIGN_KEY + case PragTyp_FOREIGN_KEY_LIST: if( zRight ){ + FKey *pFK; + Table *pTab; + pTab = sqlite3FindTable(db, zRight, zDb); + if( pTab ){ + pFK = pTab->pFKey; + if( pFK ){ + int i = 0; + pParse->nMem = 8; + sqlite3CodeVerifySchema(pParse, iDb); + while(pFK){ + int j; + for(j=0; jnCol; j++){ + sqlite3VdbeMultiLoad(v, 1, "iissssss", + i, + j, + pFK->zTo, + pTab->aCol[pFK->aCol[j].iFrom].zName, + pFK->aCol[j].zCol, + actionName(pFK->aAction[1]), /* ON UPDATE */ + actionName(pFK->aAction[0]), /* ON DELETE */ + "NONE"); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 8); + } + ++i; + pFK = pFK->pNextFrom; + } + } + } + } + break; +#endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ + +#ifndef SQLITE_OMIT_FOREIGN_KEY +#ifndef SQLITE_OMIT_TRIGGER + case PragTyp_FOREIGN_KEY_CHECK: { + FKey *pFK; /* A foreign key constraint */ + Table *pTab; /* Child table contain "REFERENCES" keyword */ + Table *pParent; /* Parent table that child points to */ + Index *pIdx; /* Index in the parent table */ + int i; /* Loop counter: Foreign key number for pTab */ + int j; /* Loop counter: Field of the foreign key */ + HashElem *k; /* Loop counter: Next table in schema */ + int x; /* result variable */ + int regResult; /* 3 registers to hold a result row */ + int regKey; /* Register to hold key for checking the FK */ + int regRow; /* Registers to hold a row from pTab */ + int addrTop; /* Top of a loop checking foreign keys */ + int addrOk; /* Jump here if the key is OK */ + int *aiCols; /* child to parent column mapping */ + + regResult = pParse->nMem+1; + pParse->nMem += 4; + regKey = ++pParse->nMem; + regRow = ++pParse->nMem; + sqlite3CodeVerifySchema(pParse, iDb); + k = sqliteHashFirst(&db->aDb[iDb].pSchema->tblHash); + while( k ){ + if( zRight ){ + pTab = sqlite3LocateTable(pParse, 0, zRight, zDb); + k = 0; + }else{ + pTab = (Table*)sqliteHashData(k); + k = sqliteHashNext(k); + } + if( pTab==0 || pTab->pFKey==0 ) continue; + sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); + if( pTab->nCol+regRow>pParse->nMem ) pParse->nMem = pTab->nCol + regRow; + sqlite3OpenTable(pParse, 0, iDb, pTab, OP_OpenRead); + sqlite3VdbeLoadString(v, regResult, pTab->zName); + for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){ + pParent = sqlite3FindTable(db, pFK->zTo, zDb); + if( pParent==0 ) continue; + pIdx = 0; + sqlite3TableLock(pParse, iDb, pParent->tnum, 0, pParent->zName); + x = sqlite3FkLocateIndex(pParse, pParent, pFK, &pIdx, 0); + if( x==0 ){ + if( pIdx==0 ){ + sqlite3OpenTable(pParse, i, iDb, pParent, OP_OpenRead); + }else{ + sqlite3VdbeAddOp3(v, OP_OpenRead, i, pIdx->tnum, iDb); + sqlite3VdbeSetP4KeyInfo(pParse, pIdx); + } + }else{ + k = 0; + break; + } + } + assert( pParse->nErr>0 || pFK==0 ); + if( pFK ) break; + if( pParse->nTabnTab = i; + addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, 0); VdbeCoverage(v); + for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){ + pParent = sqlite3FindTable(db, pFK->zTo, zDb); + pIdx = 0; + aiCols = 0; + if( pParent ){ + x = sqlite3FkLocateIndex(pParse, pParent, pFK, &pIdx, &aiCols); + assert( x==0 ); + } + addrOk = sqlite3VdbeMakeLabel(v); + if( pParent && pIdx==0 ){ + int iKey = pFK->aCol[0].iFrom; + assert( iKey>=0 && iKeynCol ); + if( iKey!=pTab->iPKey ){ + sqlite3VdbeAddOp3(v, OP_Column, 0, iKey, regRow); + sqlite3ColumnDefault(v, pTab, iKey, regRow); + sqlite3VdbeAddOp2(v, OP_IsNull, regRow, addrOk); VdbeCoverage(v); + }else{ + sqlite3VdbeAddOp2(v, OP_Rowid, 0, regRow); + } + sqlite3VdbeAddOp3(v, OP_SeekRowid, i, 0, regRow); VdbeCoverage(v); + sqlite3VdbeGoto(v, addrOk); + sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2); + }else{ + for(j=0; jnCol; j++){ + sqlite3ExprCodeGetColumnOfTable(v, pTab, 0, + aiCols ? aiCols[j] : pFK->aCol[j].iFrom, regRow+j); + sqlite3VdbeAddOp2(v, OP_IsNull, regRow+j, addrOk); VdbeCoverage(v); + } + if( pParent ){ + sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, pFK->nCol, regKey, + sqlite3IndexAffinityStr(db,pIdx), pFK->nCol); + sqlite3VdbeAddOp4Int(v, OP_Found, i, addrOk, regKey, 0); + VdbeCoverage(v); + } + } + sqlite3VdbeAddOp2(v, OP_Rowid, 0, regResult+1); + sqlite3VdbeMultiLoad(v, regResult+2, "si", pFK->zTo, i-1); + sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, 4); + sqlite3VdbeResolveLabel(v, addrOk); + sqlite3DbFree(db, aiCols); + } + sqlite3VdbeAddOp2(v, OP_Next, 0, addrTop+1); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addrTop); + } + } + break; +#endif /* !defined(SQLITE_OMIT_TRIGGER) */ +#endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ + +#ifndef NDEBUG + case PragTyp_PARSER_TRACE: { + if( zRight ){ + if( sqlite3GetBoolean(zRight, 0) ){ + sqlite3ParserTrace(stdout, "parser: "); + }else{ + sqlite3ParserTrace(0, 0); + } + } + } + break; +#endif + + /* Reinstall the LIKE and GLOB functions. The variant of LIKE + ** used will be case sensitive or not depending on the RHS. + */ + case PragTyp_CASE_SENSITIVE_LIKE: { + if( zRight ){ + sqlite3RegisterLikeFunctions(db, sqlite3GetBoolean(zRight, 0)); + } + } + break; + +#ifndef SQLITE_INTEGRITY_CHECK_ERROR_MAX +# define SQLITE_INTEGRITY_CHECK_ERROR_MAX 100 +#endif + +#ifndef SQLITE_OMIT_INTEGRITY_CHECK + /* Pragma "quick_check" is reduced version of + ** integrity_check designed to detect most database corruption + ** without most of the overhead of a full integrity-check. + */ + case PragTyp_INTEGRITY_CHECK: { + int i, j, addr, mxErr; + + int isQuick = (sqlite3Tolower(zLeft[0])=='q'); + + /* If the PRAGMA command was of the form "PRAGMA .integrity_check", + ** then iDb is set to the index of the database identified by . + ** In this case, the integrity of database iDb only is verified by + ** the VDBE created below. + ** + ** Otherwise, if the command was simply "PRAGMA integrity_check" (or + ** "PRAGMA quick_check"), then iDb is set to 0. In this case, set iDb + ** to -1 here, to indicate that the VDBE should verify the integrity + ** of all attached databases. */ + assert( iDb>=0 ); + assert( iDb==0 || pId2->z ); + if( pId2->z==0 ) iDb = -1; + + /* Initialize the VDBE program */ + pParse->nMem = 6; + + /* Set the maximum error count */ + mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; + if( zRight ){ + sqlite3GetInt32(zRight, &mxErr); + if( mxErr<=0 ){ + mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; + } + } + sqlite3VdbeAddOp2(v, OP_Integer, mxErr, 1); /* reg[1] holds errors left */ + + /* Do an integrity check on each database file */ + for(i=0; inDb; i++){ + HashElem *x; + Hash *pTbls; + int *aRoot; + int cnt = 0; + int mxIdx = 0; + int nIdx; + + if( OMIT_TEMPDB && i==1 ) continue; + if( iDb>=0 && i!=iDb ) continue; + + sqlite3CodeVerifySchema(pParse, i); + addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); /* Halt if out of errors */ + VdbeCoverage(v); + sqlite3VdbeAddOp2(v, OP_Halt, 0, 0); + sqlite3VdbeJumpHere(v, addr); + + /* Do an integrity check of the B-Tree + ** + ** Begin by finding the root pages numbers + ** for all tables and indices in the database. + */ + assert( sqlite3SchemaMutexHeld(db, i, 0) ); + pTbls = &db->aDb[i].pSchema->tblHash; + for(cnt=0, x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ + Table *pTab = sqliteHashData(x); + Index *pIdx; + if( HasRowid(pTab) ) cnt++; + for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ cnt++; } + if( nIdx>mxIdx ) mxIdx = nIdx; + } + aRoot = sqlite3DbMallocRawNN(db, sizeof(int)*(cnt+1)); + if( aRoot==0 ) break; + for(cnt=0, x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ + Table *pTab = sqliteHashData(x); + Index *pIdx; + if( HasRowid(pTab) ) aRoot[cnt++] = pTab->tnum; + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + aRoot[cnt++] = pIdx->tnum; + } + } + aRoot[cnt] = 0; + + /* Make sure sufficient number of registers have been allocated */ + pParse->nMem = MAX( pParse->nMem, 8+mxIdx ); + + /* Do the b-tree integrity checks */ + sqlite3VdbeAddOp4(v, OP_IntegrityCk, 2, cnt, 1, (char*)aRoot,P4_INTARRAY); + sqlite3VdbeChangeP5(v, (u8)i); + addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, + sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zDbSName), + P4_DYNAMIC); + sqlite3VdbeAddOp3(v, OP_Move, 2, 4, 1); + sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 2); + sqlite3VdbeAddOp2(v, OP_ResultRow, 2, 1); + sqlite3VdbeJumpHere(v, addr); + + /* Make sure all the indices are constructed correctly. + */ + for(x=sqliteHashFirst(pTbls); x && !isQuick; x=sqliteHashNext(x)){ + Table *pTab = sqliteHashData(x); + Index *pIdx, *pPk; + Index *pPrior = 0; + int loopTop; + int iDataCur, iIdxCur; + int r1 = -1; + + if( pTab->pIndex==0 ) continue; + pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab); + addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); /* Stop if out of errors */ + VdbeCoverage(v); + sqlite3VdbeAddOp2(v, OP_Halt, 0, 0); + sqlite3VdbeJumpHere(v, addr); + sqlite3ExprCacheClear(pParse); + sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenRead, 0, + 1, 0, &iDataCur, &iIdxCur); + sqlite3VdbeAddOp2(v, OP_Integer, 0, 7); + for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ + sqlite3VdbeAddOp2(v, OP_Integer, 0, 8+j); /* index entries counter */ + } + assert( pParse->nMem>=8+j ); + assert( sqlite3NoTempsInRange(pParse,1,7+j) ); + sqlite3VdbeAddOp2(v, OP_Rewind, iDataCur, 0); VdbeCoverage(v); + loopTop = sqlite3VdbeAddOp2(v, OP_AddImm, 7, 1); + /* Verify that all NOT NULL columns really are NOT NULL */ + for(j=0; jnCol; j++){ + char *zErr; + int jmp2, jmp3; + if( j==pTab->iPKey ) continue; + if( pTab->aCol[j].notNull==0 ) continue; + sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3); + sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG); + jmp2 = sqlite3VdbeAddOp1(v, OP_NotNull, 3); VdbeCoverage(v); + sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); /* Decrement error limit */ + zErr = sqlite3MPrintf(db, "NULL value in %s.%s", pTab->zName, + pTab->aCol[j].zName); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); + sqlite3VdbeAddOp2(v, OP_ResultRow, 3, 1); + jmp3 = sqlite3VdbeAddOp1(v, OP_IfPos, 1); VdbeCoverage(v); + sqlite3VdbeAddOp0(v, OP_Halt); + sqlite3VdbeJumpHere(v, jmp2); + sqlite3VdbeJumpHere(v, jmp3); + } + /* Validate index entries for the current row */ + for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ + int jmp2, jmp3, jmp4, jmp5; + int ckUniq = sqlite3VdbeMakeLabel(v); + if( pPk==pIdx ) continue; + r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 0, &jmp3, + pPrior, r1); + pPrior = pIdx; + sqlite3VdbeAddOp2(v, OP_AddImm, 8+j, 1); /* increment entry count */ + /* Verify that an index entry exists for the current table row */ + jmp2 = sqlite3VdbeAddOp4Int(v, OP_Found, iIdxCur+j, ckUniq, r1, + pIdx->nColumn); VdbeCoverage(v); + sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); /* Decrement error limit */ + sqlite3VdbeLoadString(v, 3, "row "); + sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3); + sqlite3VdbeLoadString(v, 4, " missing from index "); + sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3); + jmp5 = sqlite3VdbeLoadString(v, 4, pIdx->zName); + sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3); + sqlite3VdbeAddOp2(v, OP_ResultRow, 3, 1); + jmp4 = sqlite3VdbeAddOp1(v, OP_IfPos, 1); VdbeCoverage(v); + sqlite3VdbeAddOp0(v, OP_Halt); + sqlite3VdbeJumpHere(v, jmp2); + /* For UNIQUE indexes, verify that only one entry exists with the + ** current key. The entry is unique if (1) any column is NULL + ** or (2) the next entry has a different key */ + if( IsUniqueIndex(pIdx) ){ + int uniqOk = sqlite3VdbeMakeLabel(v); + int jmp6; + int kk; + for(kk=0; kknKeyCol; kk++){ + int iCol = pIdx->aiColumn[kk]; + assert( iCol!=XN_ROWID && iColnCol ); + if( iCol>=0 && pTab->aCol[iCol].notNull ) continue; + sqlite3VdbeAddOp2(v, OP_IsNull, r1+kk, uniqOk); + VdbeCoverage(v); + } + jmp6 = sqlite3VdbeAddOp1(v, OP_Next, iIdxCur+j); VdbeCoverage(v); + sqlite3VdbeGoto(v, uniqOk); + sqlite3VdbeJumpHere(v, jmp6); + sqlite3VdbeAddOp4Int(v, OP_IdxGT, iIdxCur+j, uniqOk, r1, + pIdx->nKeyCol); VdbeCoverage(v); + sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); /* Decrement error limit */ + sqlite3VdbeLoadString(v, 3, "non-unique entry in index "); + sqlite3VdbeGoto(v, jmp5); + sqlite3VdbeResolveLabel(v, uniqOk); + } + sqlite3VdbeJumpHere(v, jmp4); + sqlite3ResolvePartIdxLabel(pParse, jmp3); + } + sqlite3VdbeAddOp2(v, OP_Next, iDataCur, loopTop); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, loopTop-1); +#ifndef SQLITE_OMIT_BTREECOUNT + sqlite3VdbeLoadString(v, 2, "wrong # of entries in index "); + for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ + if( pPk==pIdx ) continue; + addr = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr+2); VdbeCoverage(v); + sqlite3VdbeAddOp2(v, OP_Halt, 0, 0); + sqlite3VdbeAddOp2(v, OP_Count, iIdxCur+j, 3); + sqlite3VdbeAddOp3(v, OP_Eq, 8+j, addr+8, 3); VdbeCoverage(v); + sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); + sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); + sqlite3VdbeLoadString(v, 3, pIdx->zName); + sqlite3VdbeAddOp3(v, OP_Concat, 3, 2, 7); + sqlite3VdbeAddOp2(v, OP_ResultRow, 7, 1); + } +#endif /* SQLITE_OMIT_BTREECOUNT */ + } + } + { + static const int iLn = VDBE_OFFSET_LINENO(2); + static const VdbeOpList endCode[] = { + { OP_AddImm, 1, 0, 0}, /* 0 */ + { OP_If, 1, 4, 0}, /* 1 */ + { OP_String8, 0, 3, 0}, /* 2 */ + { OP_ResultRow, 3, 1, 0}, /* 3 */ + }; + VdbeOp *aOp; + + aOp = sqlite3VdbeAddOpList(v, ArraySize(endCode), endCode, iLn); + if( aOp ){ + aOp[0].p2 = -mxErr; + aOp[2].p4type = P4_STATIC; + aOp[2].p4.z = "ok"; + } + } + } + break; +#endif /* SQLITE_OMIT_INTEGRITY_CHECK */ + +#ifndef SQLITE_OMIT_UTF16 + /* + ** PRAGMA encoding + ** PRAGMA encoding = "utf-8"|"utf-16"|"utf-16le"|"utf-16be" + ** + ** In its first form, this pragma returns the encoding of the main + ** database. If the database is not initialized, it is initialized now. + ** + ** The second form of this pragma is a no-op if the main database file + ** has not already been initialized. In this case it sets the default + ** encoding that will be used for the main database file if a new file + ** is created. If an existing main database file is opened, then the + ** default text encoding for the existing database is used. + ** + ** In all cases new databases created using the ATTACH command are + ** created to use the same default text encoding as the main database. If + ** the main database has not been initialized and/or created when ATTACH + ** is executed, this is done before the ATTACH operation. + ** + ** In the second form this pragma sets the text encoding to be used in + ** new database files created using this database handle. It is only + ** useful if invoked immediately after the main database i + */ + case PragTyp_ENCODING: { + static const struct EncName { + char *zName; + u8 enc; + } encnames[] = { + { "UTF8", SQLITE_UTF8 }, + { "UTF-8", SQLITE_UTF8 }, /* Must be element [1] */ + { "UTF-16le", SQLITE_UTF16LE }, /* Must be element [2] */ + { "UTF-16be", SQLITE_UTF16BE }, /* Must be element [3] */ + { "UTF16le", SQLITE_UTF16LE }, + { "UTF16be", SQLITE_UTF16BE }, + { "UTF-16", 0 }, /* SQLITE_UTF16NATIVE */ + { "UTF16", 0 }, /* SQLITE_UTF16NATIVE */ + { 0, 0 } + }; + const struct EncName *pEnc; + if( !zRight ){ /* "PRAGMA encoding" */ + if( sqlite3ReadSchema(pParse) ) goto pragma_out; + assert( encnames[SQLITE_UTF8].enc==SQLITE_UTF8 ); + assert( encnames[SQLITE_UTF16LE].enc==SQLITE_UTF16LE ); + assert( encnames[SQLITE_UTF16BE].enc==SQLITE_UTF16BE ); + returnSingleText(v, encnames[ENC(pParse->db)].zName); + }else{ /* "PRAGMA encoding = XXX" */ + /* Only change the value of sqlite.enc if the database handle is not + ** initialized. If the main database exists, the new sqlite.enc value + ** will be overwritten when the schema is next loaded. If it does not + ** already exists, it will be created to use the new encoding value. + */ + if( + !(DbHasProperty(db, 0, DB_SchemaLoaded)) || + DbHasProperty(db, 0, DB_Empty) + ){ + for(pEnc=&encnames[0]; pEnc->zName; pEnc++){ + if( 0==sqlite3StrICmp(zRight, pEnc->zName) ){ + SCHEMA_ENC(db) = ENC(db) = + pEnc->enc ? pEnc->enc : SQLITE_UTF16NATIVE; + break; + } + } + if( !pEnc->zName ){ + sqlite3ErrorMsg(pParse, "unsupported encoding: %s", zRight); + } + } + } + } + break; +#endif /* SQLITE_OMIT_UTF16 */ + +#ifndef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS + /* + ** PRAGMA [schema.]schema_version + ** PRAGMA [schema.]schema_version = + ** + ** PRAGMA [schema.]user_version + ** PRAGMA [schema.]user_version = + ** + ** PRAGMA [schema.]freelist_count + ** + ** PRAGMA [schema.]data_version + ** + ** PRAGMA [schema.]application_id + ** PRAGMA [schema.]application_id = + ** + ** The pragma's schema_version and user_version are used to set or get + ** the value of the schema-version and user-version, respectively. Both + ** the schema-version and the user-version are 32-bit signed integers + ** stored in the database header. + ** + ** The schema-cookie is usually only manipulated internally by SQLite. It + ** is incremented by SQLite whenever the database schema is modified (by + ** creating or dropping a table or index). The schema version is used by + ** SQLite each time a query is executed to ensure that the internal cache + ** of the schema used when compiling the SQL query matches the schema of + ** the database against which the compiled query is actually executed. + ** Subverting this mechanism by using "PRAGMA schema_version" to modify + ** the schema-version is potentially dangerous and may lead to program + ** crashes or database corruption. Use with caution! + ** + ** The user-version is not used internally by SQLite. It may be used by + ** applications for any purpose. + */ + case PragTyp_HEADER_VALUE: { + int iCookie = pPragma->iArg; /* Which cookie to read or write */ + sqlite3VdbeUsesBtree(v, iDb); + if( zRight && (pPragma->mPragFlg & PragFlg_ReadOnly)==0 ){ + /* Write the specified cookie value */ + static const VdbeOpList setCookie[] = { + { OP_Transaction, 0, 1, 0}, /* 0 */ + { OP_SetCookie, 0, 0, 0}, /* 1 */ + }; + VdbeOp *aOp; + sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(setCookie)); + aOp = sqlite3VdbeAddOpList(v, ArraySize(setCookie), setCookie, 0); + if( ONLY_IF_REALLOC_STRESS(aOp==0) ) break; + aOp[0].p1 = iDb; + aOp[1].p1 = iDb; + aOp[1].p2 = iCookie; + aOp[1].p3 = sqlite3Atoi(zRight); + }else{ + /* Read the specified cookie value */ + static const VdbeOpList readCookie[] = { + { OP_Transaction, 0, 0, 0}, /* 0 */ + { OP_ReadCookie, 0, 1, 0}, /* 1 */ + { OP_ResultRow, 1, 1, 0} + }; + VdbeOp *aOp; + sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(readCookie)); + aOp = sqlite3VdbeAddOpList(v, ArraySize(readCookie),readCookie,0); + if( ONLY_IF_REALLOC_STRESS(aOp==0) ) break; + aOp[0].p1 = iDb; + aOp[1].p1 = iDb; + aOp[1].p3 = iCookie; + sqlite3VdbeReusable(v); + } + } + break; +#endif /* SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS */ + +#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS + /* + ** PRAGMA compile_options + ** + ** Return the names of all compile-time options used in this build, + ** one option per row. + */ + case PragTyp_COMPILE_OPTIONS: { + int i = 0; + const char *zOpt; + pParse->nMem = 1; + while( (zOpt = sqlite3_compileoption_get(i++))!=0 ){ + sqlite3VdbeLoadString(v, 1, zOpt); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); + } + sqlite3VdbeReusable(v); + } + break; +#endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ + +#ifndef SQLITE_OMIT_WAL + /* + ** PRAGMA [schema.]wal_checkpoint = passive|full|restart|truncate + ** + ** Checkpoint the database. + */ + case PragTyp_WAL_CHECKPOINT: { + int iBt = (pId2->z?iDb:SQLITE_MAX_ATTACHED); + int eMode = SQLITE_CHECKPOINT_PASSIVE; + if( zRight ){ + if( sqlite3StrICmp(zRight, "full")==0 ){ + eMode = SQLITE_CHECKPOINT_FULL; + }else if( sqlite3StrICmp(zRight, "restart")==0 ){ + eMode = SQLITE_CHECKPOINT_RESTART; + }else if( sqlite3StrICmp(zRight, "truncate")==0 ){ + eMode = SQLITE_CHECKPOINT_TRUNCATE; + } + } + pParse->nMem = 3; + sqlite3VdbeAddOp3(v, OP_Checkpoint, iBt, eMode, 1); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); + } + break; + + /* + ** PRAGMA wal_autocheckpoint + ** PRAGMA wal_autocheckpoint = N + ** + ** Configure a database connection to automatically checkpoint a database + ** after accumulating N frames in the log. Or query for the current value + ** of N. + */ + case PragTyp_WAL_AUTOCHECKPOINT: { + if( zRight ){ + sqlite3_wal_autocheckpoint(db, sqlite3Atoi(zRight)); + } + returnSingleInt(v, + db->xWalCallback==sqlite3WalDefaultHook ? + SQLITE_PTR_TO_INT(db->pWalArg) : 0); + } + break; +#endif + + /* + ** PRAGMA shrink_memory + ** + ** IMPLEMENTATION-OF: R-23445-46109 This pragma causes the database + ** connection on which it is invoked to free up as much memory as it + ** can, by calling sqlite3_db_release_memory(). + */ + case PragTyp_SHRINK_MEMORY: { + sqlite3_db_release_memory(db); + break; + } + + /* + ** PRAGMA busy_timeout + ** PRAGMA busy_timeout = N + ** + ** Call sqlite3_busy_timeout(db, N). Return the current timeout value + ** if one is set. If no busy handler or a different busy handler is set + ** then 0 is returned. Setting the busy_timeout to 0 or negative + ** disables the timeout. + */ + /*case PragTyp_BUSY_TIMEOUT*/ default: { + assert( pPragma->ePragTyp==PragTyp_BUSY_TIMEOUT ); + if( zRight ){ + sqlite3_busy_timeout(db, sqlite3Atoi(zRight)); + } + returnSingleInt(v, db->busyTimeout); + break; + } + + /* + ** PRAGMA soft_heap_limit + ** PRAGMA soft_heap_limit = N + ** + ** IMPLEMENTATION-OF: R-26343-45930 This pragma invokes the + ** sqlite3_soft_heap_limit64() interface with the argument N, if N is + ** specified and is a non-negative integer. + ** IMPLEMENTATION-OF: R-64451-07163 The soft_heap_limit pragma always + ** returns the same integer that would be returned by the + ** sqlite3_soft_heap_limit64(-1) C-language function. + */ + case PragTyp_SOFT_HEAP_LIMIT: { + sqlite3_int64 N; + if( zRight && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK ){ + sqlite3_soft_heap_limit64(N); + } + returnSingleInt(v, sqlite3_soft_heap_limit64(-1)); + break; + } + + /* + ** PRAGMA threads + ** PRAGMA threads = N + ** + ** Configure the maximum number of worker threads. Return the new + ** maximum, which might be less than requested. + */ + case PragTyp_THREADS: { + sqlite3_int64 N; + if( zRight + && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK + && N>=0 + ){ + sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, (int)(N&0x7fffffff)); + } + returnSingleInt(v, sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, -1)); + break; + } + +#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) + /* + ** Report the current state of file logs for all databases + */ + case PragTyp_LOCK_STATUS: { + static const char *const azLockName[] = { + "unlocked", "shared", "reserved", "pending", "exclusive" + }; + int i; + pParse->nMem = 2; + for(i=0; inDb; i++){ + Btree *pBt; + const char *zState = "unknown"; + int j; + if( db->aDb[i].zDbSName==0 ) continue; + pBt = db->aDb[i].pBt; + if( pBt==0 || sqlite3BtreePager(pBt)==0 ){ + zState = "closed"; + }else if( sqlite3_file_control(db, i ? db->aDb[i].zDbSName : 0, + SQLITE_FCNTL_LOCKSTATE, &j)==SQLITE_OK ){ + zState = azLockName[j]; + } + sqlite3VdbeMultiLoad(v, 1, "ss", db->aDb[i].zDbSName, zState); + sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 2); + } + break; + } +#endif + +#ifdef SQLITE_HAS_CODEC + case PragTyp_KEY: { + if( zRight ) sqlite3_key_v2(db, zDb, zRight, sqlite3Strlen30(zRight)); + break; + } + case PragTyp_REKEY: { + if( zRight ) sqlite3_rekey_v2(db, zDb, zRight, sqlite3Strlen30(zRight)); + break; + } + case PragTyp_HEXKEY: { + if( zRight ){ + u8 iByte; + int i; + char zKey[40]; + for(i=0, iByte=0; imPragFlg & PragFlg_NoColumns1) && zRight ){ + sqlite3VdbeVerifyNoResultRow(v); + } + +pragma_out: + sqlite3DbFree(db, zLeft); + sqlite3DbFree(db, zRight); +} +#ifndef SQLITE_OMIT_VIRTUALTABLE +/***************************************************************************** +** Implementation of an eponymous virtual table that runs a pragma. +** +*/ +typedef struct PragmaVtab PragmaVtab; +typedef struct PragmaVtabCursor PragmaVtabCursor; +struct PragmaVtab { + sqlite3_vtab base; /* Base class. Must be first */ + sqlite3 *db; /* The database connection to which it belongs */ + const PragmaName *pName; /* Name of the pragma */ + u8 nHidden; /* Number of hidden columns */ + u8 iHidden; /* Index of the first hidden column */ +}; +struct PragmaVtabCursor { + sqlite3_vtab_cursor base; /* Base class. Must be first */ + sqlite3_stmt *pPragma; /* The pragma statement to run */ + sqlite_int64 iRowid; /* Current rowid */ + char *azArg[2]; /* Value of the argument and schema */ +}; + +/* +** Pragma virtual table module xConnect method. +*/ +static int pragmaVtabConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + const PragmaName *pPragma = (const PragmaName*)pAux; + PragmaVtab *pTab = 0; + int rc; + int i, j; + char cSep = '('; + StrAccum acc; + char zBuf[200]; + + UNUSED_PARAMETER(argc); + UNUSED_PARAMETER(argv); + sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0); + sqlite3StrAccumAppendAll(&acc, "CREATE TABLE x"); + for(i=0, j=pPragma->iPragCName; inPragCName; i++, j++){ + sqlite3XPrintf(&acc, "%c\"%s\"", cSep, pragCName[j]); + cSep = ','; + } + if( i==0 ){ + sqlite3XPrintf(&acc, "(\"%s\"", pPragma->zName); + cSep = ','; + i++; + } + j = 0; + if( pPragma->mPragFlg & PragFlg_Result1 ){ + sqlite3StrAccumAppendAll(&acc, ",arg HIDDEN"); + j++; + } + if( pPragma->mPragFlg & (PragFlg_SchemaOpt|PragFlg_SchemaReq) ){ + sqlite3StrAccumAppendAll(&acc, ",schema HIDDEN"); + j++; + } + sqlite3StrAccumAppend(&acc, ")", 1); + sqlite3StrAccumFinish(&acc); + assert( strlen(zBuf) < sizeof(zBuf)-1 ); + rc = sqlite3_declare_vtab(db, zBuf); + if( rc==SQLITE_OK ){ + pTab = (PragmaVtab*)sqlite3_malloc(sizeof(PragmaVtab)); + if( pTab==0 ){ + rc = SQLITE_NOMEM; + }else{ + memset(pTab, 0, sizeof(PragmaVtab)); + pTab->pName = pPragma; + pTab->db = db; + pTab->iHidden = i; + pTab->nHidden = j; + } + }else{ + *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + } + + *ppVtab = (sqlite3_vtab*)pTab; + return rc; +} + +/* +** Pragma virtual table module xDisconnect method. +*/ +static int pragmaVtabDisconnect(sqlite3_vtab *pVtab){ + PragmaVtab *pTab = (PragmaVtab*)pVtab; + sqlite3_free(pTab); + return SQLITE_OK; +} + +/* Figure out the best index to use to search a pragma virtual table. +** +** There are not really any index choices. But we want to encourage the +** query planner to give == constraints on as many hidden parameters as +** possible, and especially on the first hidden parameter. So return a +** high cost if hidden parameters are unconstrained. +*/ +static int pragmaVtabBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ + PragmaVtab *pTab = (PragmaVtab*)tab; + const struct sqlite3_index_constraint *pConstraint; + int i, j; + int seen[2]; + + pIdxInfo->estimatedCost = (double)1; + if( pTab->nHidden==0 ){ return SQLITE_OK; } + pConstraint = pIdxInfo->aConstraint; + seen[0] = 0; + seen[1] = 0; + for(i=0; inConstraint; i++, pConstraint++){ + if( pConstraint->usable==0 ) continue; + if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue; + if( pConstraint->iColumn < pTab->iHidden ) continue; + j = pConstraint->iColumn - pTab->iHidden; + assert( j < 2 ); + seen[j] = i+1; + } + if( seen[0]==0 ){ + pIdxInfo->estimatedCost = (double)2147483647; + pIdxInfo->estimatedRows = 2147483647; + return SQLITE_OK; + } + j = seen[0]-1; + pIdxInfo->aConstraintUsage[j].argvIndex = 1; + pIdxInfo->aConstraintUsage[j].omit = 1; + if( seen[1]==0 ) return SQLITE_OK; + pIdxInfo->estimatedCost = (double)20; + pIdxInfo->estimatedRows = 20; + j = seen[1]-1; + pIdxInfo->aConstraintUsage[j].argvIndex = 2; + pIdxInfo->aConstraintUsage[j].omit = 1; + return SQLITE_OK; +} + +/* Create a new cursor for the pragma virtual table */ +static int pragmaVtabOpen(sqlite3_vtab *pVtab, sqlite3_vtab_cursor **ppCursor){ + PragmaVtabCursor *pCsr; + pCsr = (PragmaVtabCursor*)sqlite3_malloc(sizeof(*pCsr)); + if( pCsr==0 ) return SQLITE_NOMEM; + memset(pCsr, 0, sizeof(PragmaVtabCursor)); + pCsr->base.pVtab = pVtab; + *ppCursor = &pCsr->base; + return SQLITE_OK; +} + +/* Clear all content from pragma virtual table cursor. */ +static void pragmaVtabCursorClear(PragmaVtabCursor *pCsr){ + int i; + sqlite3_finalize(pCsr->pPragma); + pCsr->pPragma = 0; + for(i=0; iazArg); i++){ + sqlite3_free(pCsr->azArg[i]); + pCsr->azArg[i] = 0; + } +} + +/* Close a pragma virtual table cursor */ +static int pragmaVtabClose(sqlite3_vtab_cursor *cur){ + PragmaVtabCursor *pCsr = (PragmaVtabCursor*)cur; + pragmaVtabCursorClear(pCsr); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* Advance the pragma virtual table cursor to the next row */ +static int pragmaVtabNext(sqlite3_vtab_cursor *pVtabCursor){ + PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor; + int rc = SQLITE_OK; + + /* Increment the xRowid value */ + pCsr->iRowid++; + assert( pCsr->pPragma ); + if( SQLITE_ROW!=sqlite3_step(pCsr->pPragma) ){ + rc = sqlite3_finalize(pCsr->pPragma); + pCsr->pPragma = 0; + pragmaVtabCursorClear(pCsr); + } + return rc; +} + +/* +** Pragma virtual table module xFilter method. +*/ +static int pragmaVtabFilter( + sqlite3_vtab_cursor *pVtabCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor; + PragmaVtab *pTab = (PragmaVtab*)(pVtabCursor->pVtab); + int rc; + int i, j; + StrAccum acc; + char *zSql; + + UNUSED_PARAMETER(idxNum); + UNUSED_PARAMETER(idxStr); + pragmaVtabCursorClear(pCsr); + j = (pTab->pName->mPragFlg & PragFlg_Result1)!=0 ? 0 : 1; + for(i=0; iazArg) ); + pCsr->azArg[j] = sqlite3_mprintf("%s", sqlite3_value_text(argv[i])); + if( pCsr->azArg[j]==0 ){ + return SQLITE_NOMEM; + } + } + sqlite3StrAccumInit(&acc, 0, 0, 0, pTab->db->aLimit[SQLITE_LIMIT_SQL_LENGTH]); + sqlite3StrAccumAppendAll(&acc, "PRAGMA "); + if( pCsr->azArg[1] ){ + sqlite3XPrintf(&acc, "%Q.", pCsr->azArg[1]); + } + sqlite3StrAccumAppendAll(&acc, pTab->pName->zName); + if( pCsr->azArg[0] ){ + sqlite3XPrintf(&acc, "=%Q", pCsr->azArg[0]); + } + zSql = sqlite3StrAccumFinish(&acc); + if( zSql==0 ) return SQLITE_NOMEM; + rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pCsr->pPragma, 0); + sqlite3_free(zSql); + if( rc!=SQLITE_OK ){ + pTab->base.zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pTab->db)); + return rc; + } + return pragmaVtabNext(pVtabCursor); +} + +/* +** Pragma virtual table module xEof method. +*/ +static int pragmaVtabEof(sqlite3_vtab_cursor *pVtabCursor){ + PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor; + return (pCsr->pPragma==0); +} + +/* The xColumn method simply returns the corresponding column from +** the PRAGMA. +*/ +static int pragmaVtabColumn( + sqlite3_vtab_cursor *pVtabCursor, + sqlite3_context *ctx, + int i +){ + PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor; + PragmaVtab *pTab = (PragmaVtab*)(pVtabCursor->pVtab); + if( iiHidden ){ + sqlite3_result_value(ctx, sqlite3_column_value(pCsr->pPragma, i)); + }else{ + sqlite3_result_text(ctx, pCsr->azArg[i-pTab->iHidden],-1,SQLITE_TRANSIENT); + } + return SQLITE_OK; +} + +/* +** Pragma virtual table module xRowid method. +*/ +static int pragmaVtabRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *p){ + PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor; + *p = pCsr->iRowid; + return SQLITE_OK; +} + +/* The pragma virtual table object */ +static const sqlite3_module pragmaVtabModule = { + 0, /* iVersion */ + 0, /* xCreate - create a table */ + pragmaVtabConnect, /* xConnect - connect to an existing table */ + pragmaVtabBestIndex, /* xBestIndex - Determine search strategy */ + pragmaVtabDisconnect, /* xDisconnect - Disconnect from a table */ + 0, /* xDestroy - Drop a table */ + pragmaVtabOpen, /* xOpen - open a cursor */ + pragmaVtabClose, /* xClose - close a cursor */ + pragmaVtabFilter, /* xFilter - configure scan constraints */ + pragmaVtabNext, /* xNext - advance a cursor */ + pragmaVtabEof, /* xEof */ + pragmaVtabColumn, /* xColumn - read data */ + pragmaVtabRowid, /* xRowid - read data */ + 0, /* xUpdate - write data */ + 0, /* xBegin - begin transaction */ + 0, /* xSync - sync transaction */ + 0, /* xCommit - commit transaction */ + 0, /* xRollback - rollback transaction */ + 0, /* xFindFunction - function overloading */ + 0, /* xRename - rename the table */ + 0, /* xSavepoint */ + 0, /* xRelease */ + 0 /* xRollbackTo */ +}; + +/* +** Check to see if zTabName is really the name of a pragma. If it is, +** then register an eponymous virtual table for that pragma and return +** a pointer to the Module object for the new virtual table. +*/ +SQLITE_PRIVATE Module *sqlite3PragmaVtabRegister(sqlite3 *db, const char *zName){ + const PragmaName *pName; + assert( sqlite3_strnicmp(zName, "pragma_", 7)==0 ); + pName = pragmaLocate(zName+7); + if( pName==0 ) return 0; + if( (pName->mPragFlg & (PragFlg_Result0|PragFlg_Result1))==0 ) return 0; + assert( sqlite3HashFind(&db->aModule, zName)==0 ); + return sqlite3VtabCreateModule(db, zName, &pragmaVtabModule, (void*)pName, 0); +} + +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +#endif /* SQLITE_OMIT_PRAGMA */ + +/************** End of pragma.c **********************************************/ +/************** Begin file prepare.c *****************************************/ +/* +** 2005 May 25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the implementation of the sqlite3_prepare() +** interface, and routines that contribute to loading the database schema +** from disk. +*/ +/* #include "sqliteInt.h" */ + +/* +** Fill the InitData structure with an error message that indicates +** that the database is corrupt. +*/ +static void corruptSchema( + InitData *pData, /* Initialization context */ + const char *zObj, /* Object being parsed at the point of error */ + const char *zExtra /* Error information */ +){ + sqlite3 *db = pData->db; + if( !db->mallocFailed && (db->flags & SQLITE_RecoveryMode)==0 ){ + char *z; + if( zObj==0 ) zObj = "?"; + z = sqlite3MPrintf(db, "malformed database schema (%s)", zObj); + if( zExtra ) z = sqlite3MPrintf(db, "%z - %s", z, zExtra); + sqlite3DbFree(db, *pData->pzErrMsg); + *pData->pzErrMsg = z; + } + pData->rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_CORRUPT_BKPT; +} + +/* +** This is the callback routine for the code that initializes the +** database. See sqlite3Init() below for additional information. +** This routine is also called from the OP_ParseSchema opcode of the VDBE. +** +** Each callback contains the following information: +** +** argv[0] = name of thing being created +** argv[1] = root page number for table or index. 0 for trigger or view. +** argv[2] = SQL text for the CREATE statement. +** +*/ +SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char **NotUsed){ + InitData *pData = (InitData*)pInit; + sqlite3 *db = pData->db; + int iDb = pData->iDb; + + assert( argc==3 ); + UNUSED_PARAMETER2(NotUsed, argc); + assert( sqlite3_mutex_held(db->mutex) ); + DbClearProperty(db, iDb, DB_Empty); + if( db->mallocFailed ){ + corruptSchema(pData, argv[0], 0); + return 1; + } + + assert( iDb>=0 && iDbnDb ); + if( argv==0 ) return 0; /* Might happen if EMPTY_RESULT_CALLBACKS are on */ + if( argv[1]==0 ){ + corruptSchema(pData, argv[0], 0); + }else if( sqlite3_strnicmp(argv[2],"create ",7)==0 ){ + /* Call the parser to process a CREATE TABLE, INDEX or VIEW. + ** But because db->init.busy is set to 1, no VDBE code is generated + ** or executed. All the parser does is build the internal data + ** structures that describe the table, index, or view. + */ + int rc; + u8 saved_iDb = db->init.iDb; + sqlite3_stmt *pStmt; + TESTONLY(int rcp); /* Return code from sqlite3_prepare() */ + + assert( db->init.busy ); + db->init.iDb = iDb; + db->init.newTnum = sqlite3Atoi(argv[1]); + db->init.orphanTrigger = 0; + TESTONLY(rcp = ) sqlite3_prepare(db, argv[2], -1, &pStmt, 0); + rc = db->errCode; + assert( (rc&0xFF)==(rcp&0xFF) ); + db->init.iDb = saved_iDb; + assert( saved_iDb==0 || (db->flags & SQLITE_Vacuum)!=0 ); + if( SQLITE_OK!=rc ){ + if( db->init.orphanTrigger ){ + assert( iDb==1 ); + }else{ + pData->rc = rc; + if( rc==SQLITE_NOMEM ){ + sqlite3OomFault(db); + }else if( rc!=SQLITE_INTERRUPT && (rc&0xFF)!=SQLITE_LOCKED ){ + corruptSchema(pData, argv[0], sqlite3_errmsg(db)); + } + } + } + sqlite3_finalize(pStmt); + }else if( argv[0]==0 || (argv[2]!=0 && argv[2][0]!=0) ){ + corruptSchema(pData, argv[0], 0); + }else{ + /* If the SQL column is blank it means this is an index that + ** was created to be the PRIMARY KEY or to fulfill a UNIQUE + ** constraint for a CREATE TABLE. The index should have already + ** been created when we processed the CREATE TABLE. All we have + ** to do here is record the root page number for that index. + */ + Index *pIndex; + pIndex = sqlite3FindIndex(db, argv[0], db->aDb[iDb].zDbSName); + if( pIndex==0 ){ + /* This can occur if there exists an index on a TEMP table which + ** has the same name as another index on a permanent index. Since + ** the permanent table is hidden by the TEMP table, we can also + ** safely ignore the index on the permanent table. + */ + /* Do Nothing */; + }else if( sqlite3GetInt32(argv[1], &pIndex->tnum)==0 ){ + corruptSchema(pData, argv[0], "invalid rootpage"); + } + } + return 0; +} + +/* +** Attempt to read the database schema and initialize internal +** data structures for a single database file. The index of the +** database file is given by iDb. iDb==0 is used for the main +** database. iDb==1 should never be used. iDb>=2 is used for +** auxiliary databases. Return one of the SQLITE_ error codes to +** indicate success or failure. +*/ +static int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg){ + int rc; + int i; +#ifndef SQLITE_OMIT_DEPRECATED + int size; +#endif + Db *pDb; + char const *azArg[4]; + int meta[5]; + InitData initData; + const char *zMasterName; + int openedTransaction = 0; + + assert( iDb>=0 && iDbnDb ); + assert( db->aDb[iDb].pSchema ); + assert( sqlite3_mutex_held(db->mutex) ); + assert( iDb==1 || sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) ); + + /* Construct the in-memory representation schema tables (sqlite_master or + ** sqlite_temp_master) by invoking the parser directly. The appropriate + ** table name will be inserted automatically by the parser so we can just + ** use the abbreviation "x" here. The parser will also automatically tag + ** the schema table as read-only. */ + azArg[0] = zMasterName = SCHEMA_TABLE(iDb); + azArg[1] = "1"; + azArg[2] = "CREATE TABLE x(type text,name text,tbl_name text," + "rootpage integer,sql text)"; + azArg[3] = 0; + initData.db = db; + initData.iDb = iDb; + initData.rc = SQLITE_OK; + initData.pzErrMsg = pzErrMsg; + sqlite3InitCallback(&initData, 3, (char **)azArg, 0); + if( initData.rc ){ + rc = initData.rc; + goto error_out; + } + + /* Create a cursor to hold the database open + */ + pDb = &db->aDb[iDb]; + if( pDb->pBt==0 ){ + if( !OMIT_TEMPDB && ALWAYS(iDb==1) ){ + DbSetProperty(db, 1, DB_SchemaLoaded); + } + return SQLITE_OK; + } + + /* If there is not already a read-only (or read-write) transaction opened + ** on the b-tree database, open one now. If a transaction is opened, it + ** will be closed before this function returns. */ + sqlite3BtreeEnter(pDb->pBt); + if( !sqlite3BtreeIsInReadTrans(pDb->pBt) ){ + rc = sqlite3BtreeBeginTrans(pDb->pBt, 0); + if( rc!=SQLITE_OK ){ + sqlite3SetString(pzErrMsg, db, sqlite3ErrStr(rc)); + goto initone_error_out; + } + openedTransaction = 1; + } + + /* Get the database meta information. + ** + ** Meta values are as follows: + ** meta[0] Schema cookie. Changes with each schema change. + ** meta[1] File format of schema layer. + ** meta[2] Size of the page cache. + ** meta[3] Largest rootpage (auto/incr_vacuum mode) + ** meta[4] Db text encoding. 1:UTF-8 2:UTF-16LE 3:UTF-16BE + ** meta[5] User version + ** meta[6] Incremental vacuum mode + ** meta[7] unused + ** meta[8] unused + ** meta[9] unused + ** + ** Note: The #defined SQLITE_UTF* symbols in sqliteInt.h correspond to + ** the possible values of meta[4]. + */ + for(i=0; ipBt, i+1, (u32 *)&meta[i]); + } + pDb->pSchema->schema_cookie = meta[BTREE_SCHEMA_VERSION-1]; + + /* If opening a non-empty database, check the text encoding. For the + ** main database, set sqlite3.enc to the encoding of the main database. + ** For an attached db, it is an error if the encoding is not the same + ** as sqlite3.enc. + */ + if( meta[BTREE_TEXT_ENCODING-1] ){ /* text encoding */ + if( iDb==0 ){ +#ifndef SQLITE_OMIT_UTF16 + u8 encoding; + /* If opening the main database, set ENC(db). */ + encoding = (u8)meta[BTREE_TEXT_ENCODING-1] & 3; + if( encoding==0 ) encoding = SQLITE_UTF8; + ENC(db) = encoding; +#else + ENC(db) = SQLITE_UTF8; +#endif + }else{ + /* If opening an attached database, the encoding much match ENC(db) */ + if( meta[BTREE_TEXT_ENCODING-1]!=ENC(db) ){ + sqlite3SetString(pzErrMsg, db, "attached databases must use the same" + " text encoding as main database"); + rc = SQLITE_ERROR; + goto initone_error_out; + } + } + }else{ + DbSetProperty(db, iDb, DB_Empty); + } + pDb->pSchema->enc = ENC(db); + + if( pDb->pSchema->cache_size==0 ){ +#ifndef SQLITE_OMIT_DEPRECATED + size = sqlite3AbsInt32(meta[BTREE_DEFAULT_CACHE_SIZE-1]); + if( size==0 ){ size = SQLITE_DEFAULT_CACHE_SIZE; } + pDb->pSchema->cache_size = size; +#else + pDb->pSchema->cache_size = SQLITE_DEFAULT_CACHE_SIZE; +#endif + sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); + } + + /* + ** file_format==1 Version 3.0.0. + ** file_format==2 Version 3.1.3. // ALTER TABLE ADD COLUMN + ** file_format==3 Version 3.1.4. // ditto but with non-NULL defaults + ** file_format==4 Version 3.3.0. // DESC indices. Boolean constants + */ + pDb->pSchema->file_format = (u8)meta[BTREE_FILE_FORMAT-1]; + if( pDb->pSchema->file_format==0 ){ + pDb->pSchema->file_format = 1; + } + if( pDb->pSchema->file_format>SQLITE_MAX_FILE_FORMAT ){ + sqlite3SetString(pzErrMsg, db, "unsupported file format"); + rc = SQLITE_ERROR; + goto initone_error_out; + } + + /* Ticket #2804: When we open a database in the newer file format, + ** clear the legacy_file_format pragma flag so that a VACUUM will + ** not downgrade the database and thus invalidate any descending + ** indices that the user might have created. + */ + if( iDb==0 && meta[BTREE_FILE_FORMAT-1]>=4 ){ + db->flags &= ~SQLITE_LegacyFileFmt; + } + + /* Read the schema information out of the schema tables + */ + assert( db->init.busy ); + { + char *zSql; + zSql = sqlite3MPrintf(db, + "SELECT name, rootpage, sql FROM \"%w\".%s ORDER BY rowid", + db->aDb[iDb].zDbSName, zMasterName); +#ifndef SQLITE_OMIT_AUTHORIZATION + { + sqlite3_xauth xAuth; + xAuth = db->xAuth; + db->xAuth = 0; +#endif + rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0); +#ifndef SQLITE_OMIT_AUTHORIZATION + db->xAuth = xAuth; + } +#endif + if( rc==SQLITE_OK ) rc = initData.rc; + sqlite3DbFree(db, zSql); +#ifndef SQLITE_OMIT_ANALYZE + if( rc==SQLITE_OK ){ + sqlite3AnalysisLoad(db, iDb); + } +#endif + } + if( db->mallocFailed ){ + rc = SQLITE_NOMEM_BKPT; + sqlite3ResetAllSchemasOfConnection(db); + } + if( rc==SQLITE_OK || (db->flags&SQLITE_RecoveryMode)){ + /* Black magic: If the SQLITE_RecoveryMode flag is set, then consider + ** the schema loaded, even if errors occurred. In this situation the + ** current sqlite3_prepare() operation will fail, but the following one + ** will attempt to compile the supplied statement against whatever subset + ** of the schema was loaded before the error occurred. The primary + ** purpose of this is to allow access to the sqlite_master table + ** even when its contents have been corrupted. + */ + DbSetProperty(db, iDb, DB_SchemaLoaded); + rc = SQLITE_OK; + } + + /* Jump here for an error that occurs after successfully allocating + ** curMain and calling sqlite3BtreeEnter(). For an error that occurs + ** before that point, jump to error_out. + */ +initone_error_out: + if( openedTransaction ){ + sqlite3BtreeCommit(pDb->pBt); + } + sqlite3BtreeLeave(pDb->pBt); + +error_out: + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ + sqlite3OomFault(db); + } + return rc; +} + +/* +** Initialize all database files - the main database file, the file +** used to store temporary tables, and any additional database files +** created using ATTACH statements. Return a success code. If an +** error occurs, write an error message into *pzErrMsg. +** +** After a database is initialized, the DB_SchemaLoaded bit is set +** bit is set in the flags field of the Db structure. If the database +** file was of zero-length, then the DB_Empty flag is also set. +*/ +SQLITE_PRIVATE int sqlite3Init(sqlite3 *db, char **pzErrMsg){ + int i, rc; + int commit_internal = !(db->flags&SQLITE_InternChanges); + + assert( sqlite3_mutex_held(db->mutex) ); + assert( sqlite3BtreeHoldsMutex(db->aDb[0].pBt) ); + assert( db->init.busy==0 ); + rc = SQLITE_OK; + db->init.busy = 1; + ENC(db) = SCHEMA_ENC(db); + for(i=0; rc==SQLITE_OK && inDb; i++){ + if( DbHasProperty(db, i, DB_SchemaLoaded) || i==1 ) continue; + rc = sqlite3InitOne(db, i, pzErrMsg); + if( rc ){ + sqlite3ResetOneSchema(db, i); + } + } + + /* Once all the other databases have been initialized, load the schema + ** for the TEMP database. This is loaded last, as the TEMP database + ** schema may contain references to objects in other databases. + */ +#ifndef SQLITE_OMIT_TEMPDB + assert( db->nDb>1 ); + if( rc==SQLITE_OK && !DbHasProperty(db, 1, DB_SchemaLoaded) ){ + rc = sqlite3InitOne(db, 1, pzErrMsg); + if( rc ){ + sqlite3ResetOneSchema(db, 1); + } + } +#endif + + db->init.busy = 0; + if( rc==SQLITE_OK && commit_internal ){ + sqlite3CommitInternalChanges(db); + } + + return rc; +} + +/* +** This routine is a no-op if the database schema is already initialized. +** Otherwise, the schema is loaded. An error code is returned. +*/ +SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse){ + int rc = SQLITE_OK; + sqlite3 *db = pParse->db; + assert( sqlite3_mutex_held(db->mutex) ); + if( !db->init.busy ){ + rc = sqlite3Init(db, &pParse->zErrMsg); + } + if( rc!=SQLITE_OK ){ + pParse->rc = rc; + pParse->nErr++; + } + return rc; +} + + +/* +** Check schema cookies in all databases. If any cookie is out +** of date set pParse->rc to SQLITE_SCHEMA. If all schema cookies +** make no changes to pParse->rc. +*/ +static void schemaIsValid(Parse *pParse){ + sqlite3 *db = pParse->db; + int iDb; + int rc; + int cookie; + + assert( pParse->checkSchema ); + assert( sqlite3_mutex_held(db->mutex) ); + for(iDb=0; iDbnDb; iDb++){ + int openedTransaction = 0; /* True if a transaction is opened */ + Btree *pBt = db->aDb[iDb].pBt; /* Btree database to read cookie from */ + if( pBt==0 ) continue; + + /* If there is not already a read-only (or read-write) transaction opened + ** on the b-tree database, open one now. If a transaction is opened, it + ** will be closed immediately after reading the meta-value. */ + if( !sqlite3BtreeIsInReadTrans(pBt) ){ + rc = sqlite3BtreeBeginTrans(pBt, 0); + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ + sqlite3OomFault(db); + } + if( rc!=SQLITE_OK ) return; + openedTransaction = 1; + } + + /* Read the schema cookie from the database. If it does not match the + ** value stored as part of the in-memory schema representation, + ** set Parse.rc to SQLITE_SCHEMA. */ + sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&cookie); + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + if( cookie!=db->aDb[iDb].pSchema->schema_cookie ){ + sqlite3ResetOneSchema(db, iDb); + pParse->rc = SQLITE_SCHEMA; + } + + /* Close the transaction, if one was opened. */ + if( openedTransaction ){ + sqlite3BtreeCommit(pBt); + } + } +} + +/* +** Convert a schema pointer into the iDb index that indicates +** which database file in db->aDb[] the schema refers to. +** +** If the same database is attached more than once, the first +** attached database is returned. +*/ +SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *pSchema){ + int i = -1000000; + + /* If pSchema is NULL, then return -1000000. This happens when code in + ** expr.c is trying to resolve a reference to a transient table (i.e. one + ** created by a sub-select). In this case the return value of this + ** function should never be used. + ** + ** We return -1000000 instead of the more usual -1 simply because using + ** -1000000 as the incorrect index into db->aDb[] is much + ** more likely to cause a segfault than -1 (of course there are assert() + ** statements too, but it never hurts to play the odds). + */ + assert( sqlite3_mutex_held(db->mutex) ); + if( pSchema ){ + for(i=0; ALWAYS(inDb); i++){ + if( db->aDb[i].pSchema==pSchema ){ + break; + } + } + assert( i>=0 && inDb ); + } + return i; +} + +/* +** Free all memory allocations in the pParse object +*/ +SQLITE_PRIVATE void sqlite3ParserReset(Parse *pParse){ + if( pParse ){ + sqlite3 *db = pParse->db; + sqlite3DbFree(db, pParse->aLabel); + sqlite3ExprListDelete(db, pParse->pConstExpr); + if( db ){ + assert( db->lookaside.bDisable >= pParse->disableLookaside ); + db->lookaside.bDisable -= pParse->disableLookaside; + } + pParse->disableLookaside = 0; + } +} + +/* +** Compile the UTF-8 encoded SQL statement zSql into a statement handle. +*/ +static int sqlite3Prepare( + sqlite3 *db, /* Database handle. */ + const char *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + int saveSqlFlag, /* True to copy SQL text into the sqlite3_stmt */ + Vdbe *pReprepare, /* VM being reprepared */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const char **pzTail /* OUT: End of parsed string */ +){ + char *zErrMsg = 0; /* Error message */ + int rc = SQLITE_OK; /* Result code */ + int i; /* Loop counter */ + Parse sParse; /* Parsing context */ + + memset(&sParse, 0, PARSE_HDR_SZ); + memset(PARSE_TAIL(&sParse), 0, PARSE_TAIL_SZ); + sParse.pReprepare = pReprepare; + assert( ppStmt && *ppStmt==0 ); + /* assert( !db->mallocFailed ); // not true with SQLITE_USE_ALLOCA */ + assert( sqlite3_mutex_held(db->mutex) ); + + /* Check to verify that it is possible to get a read lock on all + ** database schemas. The inability to get a read lock indicates that + ** some other database connection is holding a write-lock, which in + ** turn means that the other connection has made uncommitted changes + ** to the schema. + ** + ** Were we to proceed and prepare the statement against the uncommitted + ** schema changes and if those schema changes are subsequently rolled + ** back and different changes are made in their place, then when this + ** prepared statement goes to run the schema cookie would fail to detect + ** the schema change. Disaster would follow. + ** + ** This thread is currently holding mutexes on all Btrees (because + ** of the sqlite3BtreeEnterAll() in sqlite3LockAndPrepare()) so it + ** is not possible for another thread to start a new schema change + ** while this routine is running. Hence, we do not need to hold + ** locks on the schema, we just need to make sure nobody else is + ** holding them. + ** + ** Note that setting READ_UNCOMMITTED overrides most lock detection, + ** but it does *not* override schema lock detection, so this all still + ** works even if READ_UNCOMMITTED is set. + */ + for(i=0; inDb; i++) { + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + assert( sqlite3BtreeHoldsMutex(pBt) ); + rc = sqlite3BtreeSchemaLocked(pBt); + if( rc ){ + const char *zDb = db->aDb[i].zDbSName; + sqlite3ErrorWithMsg(db, rc, "database schema is locked: %s", zDb); + testcase( db->flags & SQLITE_ReadUncommitted ); + goto end_prepare; + } + } + } + + sqlite3VtabUnlockList(db); + + sParse.db = db; + if( nBytes>=0 && (nBytes==0 || zSql[nBytes-1]!=0) ){ + char *zSqlCopy; + int mxLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH]; + testcase( nBytes==mxLen ); + testcase( nBytes==mxLen+1 ); + if( nBytes>mxLen ){ + sqlite3ErrorWithMsg(db, SQLITE_TOOBIG, "statement too long"); + rc = sqlite3ApiExit(db, SQLITE_TOOBIG); + goto end_prepare; + } + zSqlCopy = sqlite3DbStrNDup(db, zSql, nBytes); + if( zSqlCopy ){ + sqlite3RunParser(&sParse, zSqlCopy, &zErrMsg); + sParse.zTail = &zSql[sParse.zTail-zSqlCopy]; + sqlite3DbFree(db, zSqlCopy); + }else{ + sParse.zTail = &zSql[nBytes]; + } + }else{ + sqlite3RunParser(&sParse, zSql, &zErrMsg); + } + assert( 0==sParse.nQueryLoop ); + + if( sParse.rc==SQLITE_DONE ) sParse.rc = SQLITE_OK; + if( sParse.checkSchema ){ + schemaIsValid(&sParse); + } + if( db->mallocFailed ){ + sParse.rc = SQLITE_NOMEM_BKPT; + } + if( pzTail ){ + *pzTail = sParse.zTail; + } + rc = sParse.rc; + +#ifndef SQLITE_OMIT_EXPLAIN + if( rc==SQLITE_OK && sParse.pVdbe && sParse.explain ){ + static const char * const azColName[] = { + "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment", + "selectid", "order", "from", "detail" + }; + int iFirst, mx; + if( sParse.explain==2 ){ + sqlite3VdbeSetNumCols(sParse.pVdbe, 4); + iFirst = 8; + mx = 12; + }else{ + sqlite3VdbeSetNumCols(sParse.pVdbe, 8); + iFirst = 0; + mx = 8; + } + for(i=iFirst; iinit.busy==0 ){ + Vdbe *pVdbe = sParse.pVdbe; + sqlite3VdbeSetSql(pVdbe, zSql, (int)(sParse.zTail-zSql), saveSqlFlag); + } + if( sParse.pVdbe && (rc!=SQLITE_OK || db->mallocFailed) ){ + sqlite3VdbeFinalize(sParse.pVdbe); + assert(!(*ppStmt)); + }else{ + *ppStmt = (sqlite3_stmt*)sParse.pVdbe; + } + + if( zErrMsg ){ + sqlite3ErrorWithMsg(db, rc, "%s", zErrMsg); + sqlite3DbFree(db, zErrMsg); + }else{ + sqlite3Error(db, rc); + } + + /* Delete any TriggerPrg structures allocated while parsing this statement. */ + while( sParse.pTriggerPrg ){ + TriggerPrg *pT = sParse.pTriggerPrg; + sParse.pTriggerPrg = pT->pNext; + sqlite3DbFree(db, pT); + } + +end_prepare: + + sqlite3ParserReset(&sParse); + rc = sqlite3ApiExit(db, rc); + assert( (rc&db->errMask)==rc ); + return rc; +} +static int sqlite3LockAndPrepare( + sqlite3 *db, /* Database handle. */ + const char *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + int saveSqlFlag, /* True to copy SQL text into the sqlite3_stmt */ + Vdbe *pOld, /* VM being reprepared */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const char **pzTail /* OUT: End of parsed string */ +){ + int rc; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( ppStmt==0 ) return SQLITE_MISUSE_BKPT; +#endif + *ppStmt = 0; + if( !sqlite3SafetyCheckOk(db)||zSql==0 ){ + return SQLITE_MISUSE_BKPT; + } + sqlite3_mutex_enter(db->mutex); + sqlite3BtreeEnterAll(db); + rc = sqlite3Prepare(db, zSql, nBytes, saveSqlFlag, pOld, ppStmt, pzTail); + if( rc==SQLITE_SCHEMA ){ + sqlite3_finalize(*ppStmt); + rc = sqlite3Prepare(db, zSql, nBytes, saveSqlFlag, pOld, ppStmt, pzTail); + } + sqlite3BtreeLeaveAll(db); + sqlite3_mutex_leave(db->mutex); + assert( rc==SQLITE_OK || *ppStmt==0 ); + return rc; +} + +/* +** Rerun the compilation of a statement after a schema change. +** +** If the statement is successfully recompiled, return SQLITE_OK. Otherwise, +** if the statement cannot be recompiled because another connection has +** locked the sqlite3_master table, return SQLITE_LOCKED. If any other error +** occurs, return SQLITE_SCHEMA. +*/ +SQLITE_PRIVATE int sqlite3Reprepare(Vdbe *p){ + int rc; + sqlite3_stmt *pNew; + const char *zSql; + sqlite3 *db; + + assert( sqlite3_mutex_held(sqlite3VdbeDb(p)->mutex) ); + zSql = sqlite3_sql((sqlite3_stmt *)p); + assert( zSql!=0 ); /* Reprepare only called for prepare_v2() statements */ + db = sqlite3VdbeDb(p); + assert( sqlite3_mutex_held(db->mutex) ); + rc = sqlite3LockAndPrepare(db, zSql, -1, 0, p, &pNew, 0); + if( rc ){ + if( rc==SQLITE_NOMEM ){ + sqlite3OomFault(db); + } + assert( pNew==0 ); + return rc; + }else{ + assert( pNew!=0 ); + } + sqlite3VdbeSwap((Vdbe*)pNew, p); + sqlite3TransferBindings(pNew, (sqlite3_stmt*)p); + sqlite3VdbeResetStepResult((Vdbe*)pNew); + sqlite3VdbeFinalize((Vdbe*)pNew); + return SQLITE_OK; +} + + +/* +** Two versions of the official API. Legacy and new use. In the legacy +** version, the original SQL text is not saved in the prepared statement +** and so if a schema change occurs, SQLITE_SCHEMA is returned by +** sqlite3_step(). In the new version, the original SQL text is retained +** and the statement is automatically recompiled if an schema change +** occurs. +*/ +SQLITE_API int sqlite3_prepare( + sqlite3 *db, /* Database handle. */ + const char *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const char **pzTail /* OUT: End of parsed string */ +){ + int rc; + rc = sqlite3LockAndPrepare(db,zSql,nBytes,0,0,ppStmt,pzTail); + assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ + return rc; +} +SQLITE_API int sqlite3_prepare_v2( + sqlite3 *db, /* Database handle. */ + const char *zSql, /* UTF-8 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const char **pzTail /* OUT: End of parsed string */ +){ + int rc; + rc = sqlite3LockAndPrepare(db,zSql,nBytes,1,0,ppStmt,pzTail); + assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ + return rc; +} + + +#ifndef SQLITE_OMIT_UTF16 +/* +** Compile the UTF-16 encoded SQL statement zSql into a statement handle. +*/ +static int sqlite3Prepare16( + sqlite3 *db, /* Database handle. */ + const void *zSql, /* UTF-16 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + int saveSqlFlag, /* True to save SQL text into the sqlite3_stmt */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const void **pzTail /* OUT: End of parsed string */ +){ + /* This function currently works by first transforming the UTF-16 + ** encoded string to UTF-8, then invoking sqlite3_prepare(). The + ** tricky bit is figuring out the pointer to return in *pzTail. + */ + char *zSql8; + const char *zTail8 = 0; + int rc = SQLITE_OK; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( ppStmt==0 ) return SQLITE_MISUSE_BKPT; +#endif + *ppStmt = 0; + if( !sqlite3SafetyCheckOk(db)||zSql==0 ){ + return SQLITE_MISUSE_BKPT; + } + if( nBytes>=0 ){ + int sz; + const char *z = (const char*)zSql; + for(sz=0; szmutex); + zSql8 = sqlite3Utf16to8(db, zSql, nBytes, SQLITE_UTF16NATIVE); + if( zSql8 ){ + rc = sqlite3LockAndPrepare(db, zSql8, -1, saveSqlFlag, 0, ppStmt, &zTail8); + } + + if( zTail8 && pzTail ){ + /* If sqlite3_prepare returns a tail pointer, we calculate the + ** equivalent pointer into the UTF-16 string by counting the unicode + ** characters between zSql8 and zTail8, and then returning a pointer + ** the same number of characters into the UTF-16 string. + */ + int chars_parsed = sqlite3Utf8CharLen(zSql8, (int)(zTail8-zSql8)); + *pzTail = (u8 *)zSql + sqlite3Utf16ByteLen(zSql, chars_parsed); + } + sqlite3DbFree(db, zSql8); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** Two versions of the official API. Legacy and new use. In the legacy +** version, the original SQL text is not saved in the prepared statement +** and so if a schema change occurs, SQLITE_SCHEMA is returned by +** sqlite3_step(). In the new version, the original SQL text is retained +** and the statement is automatically recompiled if an schema change +** occurs. +*/ +SQLITE_API int sqlite3_prepare16( + sqlite3 *db, /* Database handle. */ + const void *zSql, /* UTF-16 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const void **pzTail /* OUT: End of parsed string */ +){ + int rc; + rc = sqlite3Prepare16(db,zSql,nBytes,0,ppStmt,pzTail); + assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ + return rc; +} +SQLITE_API int sqlite3_prepare16_v2( + sqlite3 *db, /* Database handle. */ + const void *zSql, /* UTF-16 encoded SQL statement. */ + int nBytes, /* Length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ + const void **pzTail /* OUT: End of parsed string */ +){ + int rc; + rc = sqlite3Prepare16(db,zSql,nBytes,1,ppStmt,pzTail); + assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ + return rc; +} + +#endif /* SQLITE_OMIT_UTF16 */ + +/************** End of prepare.c *********************************************/ +/************** Begin file select.c ******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains C code routines that are called by the parser +** to handle SELECT statements in SQLite. +*/ +/* #include "sqliteInt.h" */ + +/* +** Trace output macros +*/ +#if SELECTTRACE_ENABLED +/***/ int sqlite3SelectTrace = 0; +# define SELECTTRACE(K,P,S,X) \ + if(sqlite3SelectTrace&(K)) \ + sqlite3DebugPrintf("%*s%s.%p: ",(P)->nSelectIndent*2-2,"",\ + (S)->zSelName,(S)),\ + sqlite3DebugPrintf X +#else +# define SELECTTRACE(K,P,S,X) +#endif + + +/* +** An instance of the following object is used to record information about +** how to process the DISTINCT keyword, to simplify passing that information +** into the selectInnerLoop() routine. +*/ +typedef struct DistinctCtx DistinctCtx; +struct DistinctCtx { + u8 isTnct; /* True if the DISTINCT keyword is present */ + u8 eTnctType; /* One of the WHERE_DISTINCT_* operators */ + int tabTnct; /* Ephemeral table used for DISTINCT processing */ + int addrTnct; /* Address of OP_OpenEphemeral opcode for tabTnct */ +}; + +/* +** An instance of the following object is used to record information about +** the ORDER BY (or GROUP BY) clause of query is being coded. +*/ +typedef struct SortCtx SortCtx; +struct SortCtx { + ExprList *pOrderBy; /* The ORDER BY (or GROUP BY clause) */ + int nOBSat; /* Number of ORDER BY terms satisfied by indices */ + int iECursor; /* Cursor number for the sorter */ + int regReturn; /* Register holding block-output return address */ + int labelBkOut; /* Start label for the block-output subroutine */ + int addrSortIndex; /* Address of the OP_SorterOpen or OP_OpenEphemeral */ + int labelDone; /* Jump here when done, ex: LIMIT reached */ + u8 sortFlags; /* Zero or more SORTFLAG_* bits */ + u8 bOrderedInnerLoop; /* ORDER BY correctly sorts the inner loop */ +}; +#define SORTFLAG_UseSorter 0x01 /* Use SorterOpen instead of OpenEphemeral */ + +/* +** Delete all the content of a Select structure. Deallocate the structure +** itself only if bFree is true. +*/ +static void clearSelect(sqlite3 *db, Select *p, int bFree){ + while( p ){ + Select *pPrior = p->pPrior; + sqlite3ExprListDelete(db, p->pEList); + sqlite3SrcListDelete(db, p->pSrc); + sqlite3ExprDelete(db, p->pWhere); + sqlite3ExprListDelete(db, p->pGroupBy); + sqlite3ExprDelete(db, p->pHaving); + sqlite3ExprListDelete(db, p->pOrderBy); + sqlite3ExprDelete(db, p->pLimit); + sqlite3ExprDelete(db, p->pOffset); + if( p->pWith ) sqlite3WithDelete(db, p->pWith); + if( bFree ) sqlite3DbFree(db, p); + p = pPrior; + bFree = 1; + } +} + +/* +** Initialize a SelectDest structure. +*/ +SQLITE_PRIVATE void sqlite3SelectDestInit(SelectDest *pDest, int eDest, int iParm){ + pDest->eDest = (u8)eDest; + pDest->iSDParm = iParm; + pDest->zAffSdst = 0; + pDest->iSdst = 0; + pDest->nSdst = 0; +} + + +/* +** Allocate a new Select structure and return a pointer to that +** structure. +*/ +SQLITE_PRIVATE Select *sqlite3SelectNew( + Parse *pParse, /* Parsing context */ + ExprList *pEList, /* which columns to include in the result */ + SrcList *pSrc, /* the FROM clause -- which tables to scan */ + Expr *pWhere, /* the WHERE clause */ + ExprList *pGroupBy, /* the GROUP BY clause */ + Expr *pHaving, /* the HAVING clause */ + ExprList *pOrderBy, /* the ORDER BY clause */ + u32 selFlags, /* Flag parameters, such as SF_Distinct */ + Expr *pLimit, /* LIMIT value. NULL means not used */ + Expr *pOffset /* OFFSET value. NULL means no offset */ +){ + Select *pNew; + Select standin; + sqlite3 *db = pParse->db; + pNew = sqlite3DbMallocRawNN(db, sizeof(*pNew) ); + if( pNew==0 ){ + assert( db->mallocFailed ); + pNew = &standin; + } + if( pEList==0 ){ + pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db,TK_ASTERISK,0)); + } + pNew->pEList = pEList; + pNew->op = TK_SELECT; + pNew->selFlags = selFlags; + pNew->iLimit = 0; + pNew->iOffset = 0; +#if SELECTTRACE_ENABLED + pNew->zSelName[0] = 0; +#endif + pNew->addrOpenEphm[0] = -1; + pNew->addrOpenEphm[1] = -1; + pNew->nSelectRow = 0; + if( pSrc==0 ) pSrc = sqlite3DbMallocZero(db, sizeof(*pSrc)); + pNew->pSrc = pSrc; + pNew->pWhere = pWhere; + pNew->pGroupBy = pGroupBy; + pNew->pHaving = pHaving; + pNew->pOrderBy = pOrderBy; + pNew->pPrior = 0; + pNew->pNext = 0; + pNew->pLimit = pLimit; + pNew->pOffset = pOffset; + pNew->pWith = 0; + assert( pOffset==0 || pLimit!=0 || pParse->nErr>0 || db->mallocFailed!=0 ); + if( db->mallocFailed ) { + clearSelect(db, pNew, pNew!=&standin); + pNew = 0; + }else{ + assert( pNew->pSrc!=0 || pParse->nErr>0 ); + } + assert( pNew!=&standin ); + return pNew; +} + +#if SELECTTRACE_ENABLED +/* +** Set the name of a Select object +*/ +SQLITE_PRIVATE void sqlite3SelectSetName(Select *p, const char *zName){ + if( p && zName ){ + sqlite3_snprintf(sizeof(p->zSelName), p->zSelName, "%s", zName); + } +} +#endif + + +/* +** Delete the given Select structure and all of its substructures. +*/ +SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3 *db, Select *p){ + if( p ) clearSelect(db, p, 1); +} + +/* +** Return a pointer to the right-most SELECT statement in a compound. +*/ +static Select *findRightmost(Select *p){ + while( p->pNext ) p = p->pNext; + return p; +} + +/* +** Given 1 to 3 identifiers preceding the JOIN keyword, determine the +** type of join. Return an integer constant that expresses that type +** in terms of the following bit values: +** +** JT_INNER +** JT_CROSS +** JT_OUTER +** JT_NATURAL +** JT_LEFT +** JT_RIGHT +** +** A full outer join is the combination of JT_LEFT and JT_RIGHT. +** +** If an illegal or unsupported join type is seen, then still return +** a join type, but put an error in the pParse structure. +*/ +SQLITE_PRIVATE int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *pC){ + int jointype = 0; + Token *apAll[3]; + Token *p; + /* 0123456789 123456789 123456789 123 */ + static const char zKeyText[] = "naturaleftouterightfullinnercross"; + static const struct { + u8 i; /* Beginning of keyword text in zKeyText[] */ + u8 nChar; /* Length of the keyword in characters */ + u8 code; /* Join type mask */ + } aKeyword[] = { + /* natural */ { 0, 7, JT_NATURAL }, + /* left */ { 6, 4, JT_LEFT|JT_OUTER }, + /* outer */ { 10, 5, JT_OUTER }, + /* right */ { 14, 5, JT_RIGHT|JT_OUTER }, + /* full */ { 19, 4, JT_LEFT|JT_RIGHT|JT_OUTER }, + /* inner */ { 23, 5, JT_INNER }, + /* cross */ { 28, 5, JT_INNER|JT_CROSS }, + }; + int i, j; + apAll[0] = pA; + apAll[1] = pB; + apAll[2] = pC; + for(i=0; i<3 && apAll[i]; i++){ + p = apAll[i]; + for(j=0; jn==aKeyword[j].nChar + && sqlite3StrNICmp((char*)p->z, &zKeyText[aKeyword[j].i], p->n)==0 ){ + jointype |= aKeyword[j].code; + break; + } + } + testcase( j==0 || j==1 || j==2 || j==3 || j==4 || j==5 || j==6 ); + if( j>=ArraySize(aKeyword) ){ + jointype |= JT_ERROR; + break; + } + } + if( + (jointype & (JT_INNER|JT_OUTER))==(JT_INNER|JT_OUTER) || + (jointype & JT_ERROR)!=0 + ){ + const char *zSp = " "; + assert( pB!=0 ); + if( pC==0 ){ zSp++; } + sqlite3ErrorMsg(pParse, "unknown or unsupported join type: " + "%T %T%s%T", pA, pB, zSp, pC); + jointype = JT_INNER; + }else if( (jointype & JT_OUTER)!=0 + && (jointype & (JT_LEFT|JT_RIGHT))!=JT_LEFT ){ + sqlite3ErrorMsg(pParse, + "RIGHT and FULL OUTER JOINs are not currently supported"); + jointype = JT_INNER; + } + return jointype; +} + +/* +** Return the index of a column in a table. Return -1 if the column +** is not contained in the table. +*/ +static int columnIndex(Table *pTab, const char *zCol){ + int i; + for(i=0; inCol; i++){ + if( sqlite3StrICmp(pTab->aCol[i].zName, zCol)==0 ) return i; + } + return -1; +} + +/* +** Search the first N tables in pSrc, from left to right, looking for a +** table that has a column named zCol. +** +** When found, set *piTab and *piCol to the table index and column index +** of the matching column and return TRUE. +** +** If not found, return FALSE. +*/ +static int tableAndColumnIndex( + SrcList *pSrc, /* Array of tables to search */ + int N, /* Number of tables in pSrc->a[] to search */ + const char *zCol, /* Name of the column we are looking for */ + int *piTab, /* Write index of pSrc->a[] here */ + int *piCol /* Write index of pSrc->a[*piTab].pTab->aCol[] here */ +){ + int i; /* For looping over tables in pSrc */ + int iCol; /* Index of column matching zCol */ + + assert( (piTab==0)==(piCol==0) ); /* Both or neither are NULL */ + for(i=0; ia[i].pTab, zCol); + if( iCol>=0 ){ + if( piTab ){ + *piTab = i; + *piCol = iCol; + } + return 1; + } + } + return 0; +} + +/* +** This function is used to add terms implied by JOIN syntax to the +** WHERE clause expression of a SELECT statement. The new term, which +** is ANDed with the existing WHERE clause, is of the form: +** +** (tab1.col1 = tab2.col2) +** +** where tab1 is the iSrc'th table in SrcList pSrc and tab2 is the +** (iSrc+1)'th. Column col1 is column iColLeft of tab1, and col2 is +** column iColRight of tab2. +*/ +static void addWhereTerm( + Parse *pParse, /* Parsing context */ + SrcList *pSrc, /* List of tables in FROM clause */ + int iLeft, /* Index of first table to join in pSrc */ + int iColLeft, /* Index of column in first table */ + int iRight, /* Index of second table in pSrc */ + int iColRight, /* Index of column in second table */ + int isOuterJoin, /* True if this is an OUTER join */ + Expr **ppWhere /* IN/OUT: The WHERE clause to add to */ +){ + sqlite3 *db = pParse->db; + Expr *pE1; + Expr *pE2; + Expr *pEq; + + assert( iLeftnSrc>iRight ); + assert( pSrc->a[iLeft].pTab ); + assert( pSrc->a[iRight].pTab ); + + pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iColLeft); + pE2 = sqlite3CreateColumnExpr(db, pSrc, iRight, iColRight); + + pEq = sqlite3PExpr(pParse, TK_EQ, pE1, pE2); + if( pEq && isOuterJoin ){ + ExprSetProperty(pEq, EP_FromJoin); + assert( !ExprHasProperty(pEq, EP_TokenOnly|EP_Reduced) ); + ExprSetVVAProperty(pEq, EP_NoReduce); + pEq->iRightJoinTable = (i16)pE2->iTable; + } + *ppWhere = sqlite3ExprAnd(db, *ppWhere, pEq); +} + +/* +** Set the EP_FromJoin property on all terms of the given expression. +** And set the Expr.iRightJoinTable to iTable for every term in the +** expression. +** +** The EP_FromJoin property is used on terms of an expression to tell +** the LEFT OUTER JOIN processing logic that this term is part of the +** join restriction specified in the ON or USING clause and not a part +** of the more general WHERE clause. These terms are moved over to the +** WHERE clause during join processing but we need to remember that they +** originated in the ON or USING clause. +** +** The Expr.iRightJoinTable tells the WHERE clause processing that the +** expression depends on table iRightJoinTable even if that table is not +** explicitly mentioned in the expression. That information is needed +** for cases like this: +** +** SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.b AND t1.x=5 +** +** The where clause needs to defer the handling of the t1.x=5 +** term until after the t2 loop of the join. In that way, a +** NULL t2 row will be inserted whenever t1.x!=5. If we do not +** defer the handling of t1.x=5, it will be processed immediately +** after the t1 loop and rows with t1.x!=5 will never appear in +** the output, which is incorrect. +*/ +static void setJoinExpr(Expr *p, int iTable){ + while( p ){ + ExprSetProperty(p, EP_FromJoin); + assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); + ExprSetVVAProperty(p, EP_NoReduce); + p->iRightJoinTable = (i16)iTable; + if( p->op==TK_FUNCTION && p->x.pList ){ + int i; + for(i=0; ix.pList->nExpr; i++){ + setJoinExpr(p->x.pList->a[i].pExpr, iTable); + } + } + setJoinExpr(p->pLeft, iTable); + p = p->pRight; + } +} + +/* +** This routine processes the join information for a SELECT statement. +** ON and USING clauses are converted into extra terms of the WHERE clause. +** NATURAL joins also create extra WHERE clause terms. +** +** The terms of a FROM clause are contained in the Select.pSrc structure. +** The left most table is the first entry in Select.pSrc. The right-most +** table is the last entry. The join operator is held in the entry to +** the left. Thus entry 0 contains the join operator for the join between +** entries 0 and 1. Any ON or USING clauses associated with the join are +** also attached to the left entry. +** +** This routine returns the number of errors encountered. +*/ +static int sqliteProcessJoin(Parse *pParse, Select *p){ + SrcList *pSrc; /* All tables in the FROM clause */ + int i, j; /* Loop counters */ + struct SrcList_item *pLeft; /* Left table being joined */ + struct SrcList_item *pRight; /* Right table being joined */ + + pSrc = p->pSrc; + pLeft = &pSrc->a[0]; + pRight = &pLeft[1]; + for(i=0; inSrc-1; i++, pRight++, pLeft++){ + Table *pLeftTab = pLeft->pTab; + Table *pRightTab = pRight->pTab; + int isOuter; + + if( NEVER(pLeftTab==0 || pRightTab==0) ) continue; + isOuter = (pRight->fg.jointype & JT_OUTER)!=0; + + /* When the NATURAL keyword is present, add WHERE clause terms for + ** every column that the two tables have in common. + */ + if( pRight->fg.jointype & JT_NATURAL ){ + if( pRight->pOn || pRight->pUsing ){ + sqlite3ErrorMsg(pParse, "a NATURAL join may not have " + "an ON or USING clause", 0); + return 1; + } + for(j=0; jnCol; j++){ + char *zName; /* Name of column in the right table */ + int iLeft; /* Matching left table */ + int iLeftCol; /* Matching column in the left table */ + + zName = pRightTab->aCol[j].zName; + if( tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol) ){ + addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, j, + isOuter, &p->pWhere); + } + } + } + + /* Disallow both ON and USING clauses in the same join + */ + if( pRight->pOn && pRight->pUsing ){ + sqlite3ErrorMsg(pParse, "cannot have both ON and USING " + "clauses in the same join"); + return 1; + } + + /* Add the ON clause to the end of the WHERE clause, connected by + ** an AND operator. + */ + if( pRight->pOn ){ + if( isOuter ) setJoinExpr(pRight->pOn, pRight->iCursor); + p->pWhere = sqlite3ExprAnd(pParse->db, p->pWhere, pRight->pOn); + pRight->pOn = 0; + } + + /* Create extra terms on the WHERE clause for each column named + ** in the USING clause. Example: If the two tables to be joined are + ** A and B and the USING clause names X, Y, and Z, then add this + ** to the WHERE clause: A.X=B.X AND A.Y=B.Y AND A.Z=B.Z + ** Report an error if any column mentioned in the USING clause is + ** not contained in both tables to be joined. + */ + if( pRight->pUsing ){ + IdList *pList = pRight->pUsing; + for(j=0; jnId; j++){ + char *zName; /* Name of the term in the USING clause */ + int iLeft; /* Table on the left with matching column name */ + int iLeftCol; /* Column number of matching column on the left */ + int iRightCol; /* Column number of matching column on the right */ + + zName = pList->a[j].zName; + iRightCol = columnIndex(pRightTab, zName); + if( iRightCol<0 + || !tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol) + ){ + sqlite3ErrorMsg(pParse, "cannot join using column %s - column " + "not present in both tables", zName); + return 1; + } + addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, iRightCol, + isOuter, &p->pWhere); + } + } + } + return 0; +} + +/* Forward reference */ +static KeyInfo *keyInfoFromExprList( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* Form the KeyInfo object from this ExprList */ + int iStart, /* Begin with this column of pList */ + int nExtra /* Add this many extra columns to the end */ +); + +/* +** Generate code that will push the record in registers regData +** through regData+nData-1 onto the sorter. +*/ +static void pushOntoSorter( + Parse *pParse, /* Parser context */ + SortCtx *pSort, /* Information about the ORDER BY clause */ + Select *pSelect, /* The whole SELECT statement */ + int regData, /* First register holding data to be sorted */ + int regOrigData, /* First register holding data before packing */ + int nData, /* Number of elements in the data array */ + int nPrefixReg /* No. of reg prior to regData available for use */ +){ + Vdbe *v = pParse->pVdbe; /* Stmt under construction */ + int bSeq = ((pSort->sortFlags & SORTFLAG_UseSorter)==0); + int nExpr = pSort->pOrderBy->nExpr; /* No. of ORDER BY terms */ + int nBase = nExpr + bSeq + nData; /* Fields in sorter record */ + int regBase; /* Regs for sorter record */ + int regRecord = ++pParse->nMem; /* Assembled sorter record */ + int nOBSat = pSort->nOBSat; /* ORDER BY terms to skip */ + int op; /* Opcode to add sorter record to sorter */ + int iLimit; /* LIMIT counter */ + + assert( bSeq==0 || bSeq==1 ); + assert( nData==1 || regData==regOrigData || regOrigData==0 ); + if( nPrefixReg ){ + assert( nPrefixReg==nExpr+bSeq ); + regBase = regData - nExpr - bSeq; + }else{ + regBase = pParse->nMem + 1; + pParse->nMem += nBase; + } + assert( pSelect->iOffset==0 || pSelect->iLimit!=0 ); + iLimit = pSelect->iOffset ? pSelect->iOffset+1 : pSelect->iLimit; + pSort->labelDone = sqlite3VdbeMakeLabel(v); + sqlite3ExprCodeExprList(pParse, pSort->pOrderBy, regBase, regOrigData, + SQLITE_ECEL_DUP | (regOrigData? SQLITE_ECEL_REF : 0)); + if( bSeq ){ + sqlite3VdbeAddOp2(v, OP_Sequence, pSort->iECursor, regBase+nExpr); + } + if( nPrefixReg==0 && nData>0 ){ + sqlite3ExprCodeMove(pParse, regData, regBase+nExpr+bSeq, nData); + } + sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase+nOBSat, nBase-nOBSat, regRecord); + if( nOBSat>0 ){ + int regPrevKey; /* The first nOBSat columns of the previous row */ + int addrFirst; /* Address of the OP_IfNot opcode */ + int addrJmp; /* Address of the OP_Jump opcode */ + VdbeOp *pOp; /* Opcode that opens the sorter */ + int nKey; /* Number of sorting key columns, including OP_Sequence */ + KeyInfo *pKI; /* Original KeyInfo on the sorter table */ + + regPrevKey = pParse->nMem+1; + pParse->nMem += pSort->nOBSat; + nKey = nExpr - pSort->nOBSat + bSeq; + if( bSeq ){ + addrFirst = sqlite3VdbeAddOp1(v, OP_IfNot, regBase+nExpr); + }else{ + addrFirst = sqlite3VdbeAddOp1(v, OP_SequenceTest, pSort->iECursor); + } + VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_Compare, regPrevKey, regBase, pSort->nOBSat); + pOp = sqlite3VdbeGetOp(v, pSort->addrSortIndex); + if( pParse->db->mallocFailed ) return; + pOp->p2 = nKey + nData; + pKI = pOp->p4.pKeyInfo; + memset(pKI->aSortOrder, 0, pKI->nField); /* Makes OP_Jump below testable */ + sqlite3VdbeChangeP4(v, -1, (char*)pKI, P4_KEYINFO); + testcase( pKI->nXField>2 ); + pOp->p4.pKeyInfo = keyInfoFromExprList(pParse, pSort->pOrderBy, nOBSat, + pKI->nXField-1); + addrJmp = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp3(v, OP_Jump, addrJmp+1, 0, addrJmp+1); VdbeCoverage(v); + pSort->labelBkOut = sqlite3VdbeMakeLabel(v); + pSort->regReturn = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut); + sqlite3VdbeAddOp1(v, OP_ResetSorter, pSort->iECursor); + if( iLimit ){ + sqlite3VdbeAddOp2(v, OP_IfNot, iLimit, pSort->labelDone); + VdbeCoverage(v); + } + sqlite3VdbeJumpHere(v, addrFirst); + sqlite3ExprCodeMove(pParse, regBase, regPrevKey, pSort->nOBSat); + sqlite3VdbeJumpHere(v, addrJmp); + } + if( pSort->sortFlags & SORTFLAG_UseSorter ){ + op = OP_SorterInsert; + }else{ + op = OP_IdxInsert; + } + sqlite3VdbeAddOp4Int(v, op, pSort->iECursor, regRecord, + regBase+nOBSat, nBase-nOBSat); + if( iLimit ){ + int addr; + int r1 = 0; + /* Fill the sorter until it contains LIMIT+OFFSET entries. (The iLimit + ** register is initialized with value of LIMIT+OFFSET.) After the sorter + ** fills up, delete the least entry in the sorter after each insert. + ** Thus we never hold more than the LIMIT+OFFSET rows in memory at once */ + addr = sqlite3VdbeAddOp1(v, OP_IfNotZero, iLimit); VdbeCoverage(v); + sqlite3VdbeAddOp1(v, OP_Last, pSort->iECursor); + if( pSort->bOrderedInnerLoop ){ + r1 = ++pParse->nMem; + sqlite3VdbeAddOp3(v, OP_Column, pSort->iECursor, nExpr, r1); + VdbeComment((v, "seq")); + } + sqlite3VdbeAddOp1(v, OP_Delete, pSort->iECursor); + if( pSort->bOrderedInnerLoop ){ + /* If the inner loop is driven by an index such that values from + ** the same iteration of the inner loop are in sorted order, then + ** immediately jump to the next iteration of an inner loop if the + ** entry from the current iteration does not fit into the top + ** LIMIT+OFFSET entries of the sorter. */ + int iBrk = sqlite3VdbeCurrentAddr(v) + 2; + sqlite3VdbeAddOp3(v, OP_Eq, regBase+nExpr, iBrk, r1); + sqlite3VdbeChangeP5(v, SQLITE_NULLEQ); + VdbeCoverage(v); + } + sqlite3VdbeJumpHere(v, addr); + } +} + +/* +** Add code to implement the OFFSET +*/ +static void codeOffset( + Vdbe *v, /* Generate code into this VM */ + int iOffset, /* Register holding the offset counter */ + int iContinue /* Jump here to skip the current record */ +){ + if( iOffset>0 ){ + sqlite3VdbeAddOp3(v, OP_IfPos, iOffset, iContinue, 1); VdbeCoverage(v); + VdbeComment((v, "OFFSET")); + } +} + +/* +** Add code that will check to make sure the N registers starting at iMem +** form a distinct entry. iTab is a sorting index that holds previously +** seen combinations of the N values. A new entry is made in iTab +** if the current N values are new. +** +** A jump to addrRepeat is made and the N+1 values are popped from the +** stack if the top N elements are not distinct. +*/ +static void codeDistinct( + Parse *pParse, /* Parsing and code generating context */ + int iTab, /* A sorting index used to test for distinctness */ + int addrRepeat, /* Jump to here if not distinct */ + int N, /* Number of elements */ + int iMem /* First element */ +){ + Vdbe *v; + int r1; + + v = pParse->pVdbe; + r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp4Int(v, OP_Found, iTab, addrRepeat, iMem, N); VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_MakeRecord, iMem, N, r1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iTab, r1, iMem, N); + sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); + sqlite3ReleaseTempReg(pParse, r1); +} + +/* +** This routine generates the code for the inside of the inner loop +** of a SELECT. +** +** If srcTab is negative, then the pEList expressions +** are evaluated in order to get the data for this row. If srcTab is +** zero or more, then data is pulled from srcTab and pEList is used only +** to get the number of columns and the collation sequence for each column. +*/ +static void selectInnerLoop( + Parse *pParse, /* The parser context */ + Select *p, /* The complete select statement being coded */ + ExprList *pEList, /* List of values being extracted */ + int srcTab, /* Pull data from this table */ + SortCtx *pSort, /* If not NULL, info on how to process ORDER BY */ + DistinctCtx *pDistinct, /* If not NULL, info on how to process DISTINCT */ + SelectDest *pDest, /* How to dispose of the results */ + int iContinue, /* Jump here to continue with next row */ + int iBreak /* Jump here to break out of the inner loop */ +){ + Vdbe *v = pParse->pVdbe; + int i; + int hasDistinct; /* True if the DISTINCT keyword is present */ + int eDest = pDest->eDest; /* How to dispose of results */ + int iParm = pDest->iSDParm; /* First argument to disposal method */ + int nResultCol; /* Number of result columns */ + int nPrefixReg = 0; /* Number of extra registers before regResult */ + + /* Usually, regResult is the first cell in an array of memory cells + ** containing the current result row. In this case regOrig is set to the + ** same value. However, if the results are being sent to the sorter, the + ** values for any expressions that are also part of the sort-key are omitted + ** from this array. In this case regOrig is set to zero. */ + int regResult; /* Start of memory holding current results */ + int regOrig; /* Start of memory holding full result (or 0) */ + + assert( v ); + assert( pEList!=0 ); + hasDistinct = pDistinct ? pDistinct->eTnctType : WHERE_DISTINCT_NOOP; + if( pSort && pSort->pOrderBy==0 ) pSort = 0; + if( pSort==0 && !hasDistinct ){ + assert( iContinue!=0 ); + codeOffset(v, p->iOffset, iContinue); + } + + /* Pull the requested columns. + */ + nResultCol = pEList->nExpr; + + if( pDest->iSdst==0 ){ + if( pSort ){ + nPrefixReg = pSort->pOrderBy->nExpr; + if( !(pSort->sortFlags & SORTFLAG_UseSorter) ) nPrefixReg++; + pParse->nMem += nPrefixReg; + } + pDest->iSdst = pParse->nMem+1; + pParse->nMem += nResultCol; + }else if( pDest->iSdst+nResultCol > pParse->nMem ){ + /* This is an error condition that can result, for example, when a SELECT + ** on the right-hand side of an INSERT contains more result columns than + ** there are columns in the table on the left. The error will be caught + ** and reported later. But we need to make sure enough memory is allocated + ** to avoid other spurious errors in the meantime. */ + pParse->nMem += nResultCol; + } + pDest->nSdst = nResultCol; + regOrig = regResult = pDest->iSdst; + if( srcTab>=0 ){ + for(i=0; ia[i].zName)); + } + }else if( eDest!=SRT_Exists ){ + /* If the destination is an EXISTS(...) expression, the actual + ** values returned by the SELECT are not required. + */ + u8 ecelFlags; + if( eDest==SRT_Mem || eDest==SRT_Output || eDest==SRT_Coroutine ){ + ecelFlags = SQLITE_ECEL_DUP; + }else{ + ecelFlags = 0; + } + if( pSort && hasDistinct==0 && eDest!=SRT_EphemTab && eDest!=SRT_Table ){ + /* For each expression in pEList that is a copy of an expression in + ** the ORDER BY clause (pSort->pOrderBy), set the associated + ** iOrderByCol value to one more than the index of the ORDER BY + ** expression within the sort-key that pushOntoSorter() will generate. + ** This allows the pEList field to be omitted from the sorted record, + ** saving space and CPU cycles. */ + ecelFlags |= (SQLITE_ECEL_OMITREF|SQLITE_ECEL_REF); + for(i=pSort->nOBSat; ipOrderBy->nExpr; i++){ + int j; + if( (j = pSort->pOrderBy->a[i].u.x.iOrderByCol)>0 ){ + pEList->a[j-1].u.x.iOrderByCol = i+1-pSort->nOBSat; + } + } + regOrig = 0; + assert( eDest==SRT_Set || eDest==SRT_Mem + || eDest==SRT_Coroutine || eDest==SRT_Output ); + } + nResultCol = sqlite3ExprCodeExprList(pParse,pEList,regResult,0,ecelFlags); + } + + /* If the DISTINCT keyword was present on the SELECT statement + ** and this row has been seen before, then do not make this row + ** part of the result. + */ + if( hasDistinct ){ + switch( pDistinct->eTnctType ){ + case WHERE_DISTINCT_ORDERED: { + VdbeOp *pOp; /* No longer required OpenEphemeral instr. */ + int iJump; /* Jump destination */ + int regPrev; /* Previous row content */ + + /* Allocate space for the previous row */ + regPrev = pParse->nMem+1; + pParse->nMem += nResultCol; + + /* Change the OP_OpenEphemeral coded earlier to an OP_Null + ** sets the MEM_Cleared bit on the first register of the + ** previous value. This will cause the OP_Ne below to always + ** fail on the first iteration of the loop even if the first + ** row is all NULLs. + */ + sqlite3VdbeChangeToNoop(v, pDistinct->addrTnct); + pOp = sqlite3VdbeGetOp(v, pDistinct->addrTnct); + pOp->opcode = OP_Null; + pOp->p1 = 1; + pOp->p2 = regPrev; + + iJump = sqlite3VdbeCurrentAddr(v) + nResultCol; + for(i=0; ia[i].pExpr); + if( idb->mallocFailed ); + sqlite3VdbeAddOp3(v, OP_Copy, regResult, regPrev, nResultCol-1); + break; + } + + case WHERE_DISTINCT_UNIQUE: { + sqlite3VdbeChangeToNoop(v, pDistinct->addrTnct); + break; + } + + default: { + assert( pDistinct->eTnctType==WHERE_DISTINCT_UNORDERED ); + codeDistinct(pParse, pDistinct->tabTnct, iContinue, nResultCol, + regResult); + break; + } + } + if( pSort==0 ){ + codeOffset(v, p->iOffset, iContinue); + } + } + + switch( eDest ){ + /* In this mode, write each query result to the key of the temporary + ** table iParm. + */ +#ifndef SQLITE_OMIT_COMPOUND_SELECT + case SRT_Union: { + int r1; + r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol); + sqlite3ReleaseTempReg(pParse, r1); + break; + } + + /* Construct a record from the query result, but instead of + ** saving that record, use it as a key to delete elements from + ** the temporary table iParm. + */ + case SRT_Except: { + sqlite3VdbeAddOp3(v, OP_IdxDelete, iParm, regResult, nResultCol); + break; + } +#endif /* SQLITE_OMIT_COMPOUND_SELECT */ + + /* Store the result as data using a unique key. + */ + case SRT_Fifo: + case SRT_DistFifo: + case SRT_Table: + case SRT_EphemTab: { + int r1 = sqlite3GetTempRange(pParse, nPrefixReg+1); + testcase( eDest==SRT_Table ); + testcase( eDest==SRT_EphemTab ); + testcase( eDest==SRT_Fifo ); + testcase( eDest==SRT_DistFifo ); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg); +#ifndef SQLITE_OMIT_CTE + if( eDest==SRT_DistFifo ){ + /* If the destination is DistFifo, then cursor (iParm+1) is open + ** on an ephemeral index. If the current row is already present + ** in the index, do not write it to the output. If not, add the + ** current row to the index and proceed with writing it to the + ** output table as well. */ + int addr = sqlite3VdbeCurrentAddr(v) + 4; + sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, addr, r1, 0); + VdbeCoverage(v); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm+1, r1,regResult,nResultCol); + assert( pSort==0 ); + } +#endif + if( pSort ){ + pushOntoSorter(pParse, pSort, p, r1+nPrefixReg,regResult,1,nPrefixReg); + }else{ + int r2 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, r2); + sqlite3VdbeAddOp3(v, OP_Insert, iParm, r1, r2); + sqlite3VdbeChangeP5(v, OPFLAG_APPEND); + sqlite3ReleaseTempReg(pParse, r2); + } + sqlite3ReleaseTempRange(pParse, r1, nPrefixReg+1); + break; + } + +#ifndef SQLITE_OMIT_SUBQUERY + /* If we are creating a set for an "expr IN (SELECT ...)" construct, + ** then there should be a single item on the stack. Write this + ** item into the set table with bogus data. + */ + case SRT_Set: { + if( pSort ){ + /* At first glance you would think we could optimize out the + ** ORDER BY in this case since the order of entries in the set + ** does not matter. But there might be a LIMIT clause, in which + ** case the order does matter */ + pushOntoSorter( + pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg); + }else{ + int r1 = sqlite3GetTempReg(pParse); + assert( sqlite3Strlen30(pDest->zAffSdst)==nResultCol ); + sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult, nResultCol, + r1, pDest->zAffSdst, nResultCol); + sqlite3ExprCacheAffinityChange(pParse, regResult, nResultCol); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol); + sqlite3ReleaseTempReg(pParse, r1); + } + break; + } + + /* If any row exist in the result set, record that fact and abort. + */ + case SRT_Exists: { + sqlite3VdbeAddOp2(v, OP_Integer, 1, iParm); + /* The LIMIT clause will terminate the loop for us */ + break; + } + + /* If this is a scalar select that is part of an expression, then + ** store the results in the appropriate memory cell or array of + ** memory cells and break out of the scan loop. + */ + case SRT_Mem: { + if( pSort ){ + assert( nResultCol<=pDest->nSdst ); + pushOntoSorter( + pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg); + }else{ + assert( nResultCol==pDest->nSdst ); + assert( regResult==iParm ); + /* The LIMIT clause will jump out of the loop for us */ + } + break; + } +#endif /* #ifndef SQLITE_OMIT_SUBQUERY */ + + case SRT_Coroutine: /* Send data to a co-routine */ + case SRT_Output: { /* Return the results */ + testcase( eDest==SRT_Coroutine ); + testcase( eDest==SRT_Output ); + if( pSort ){ + pushOntoSorter(pParse, pSort, p, regResult, regOrig, nResultCol, + nPrefixReg); + }else if( eDest==SRT_Coroutine ){ + sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm); + }else{ + sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, nResultCol); + sqlite3ExprCacheAffinityChange(pParse, regResult, nResultCol); + } + break; + } + +#ifndef SQLITE_OMIT_CTE + /* Write the results into a priority queue that is order according to + ** pDest->pOrderBy (in pSO). pDest->iSDParm (in iParm) is the cursor for an + ** index with pSO->nExpr+2 columns. Build a key using pSO for the first + ** pSO->nExpr columns, then make sure all keys are unique by adding a + ** final OP_Sequence column. The last column is the record as a blob. + */ + case SRT_DistQueue: + case SRT_Queue: { + int nKey; + int r1, r2, r3; + int addrTest = 0; + ExprList *pSO; + pSO = pDest->pOrderBy; + assert( pSO ); + nKey = pSO->nExpr; + r1 = sqlite3GetTempReg(pParse); + r2 = sqlite3GetTempRange(pParse, nKey+2); + r3 = r2+nKey+1; + if( eDest==SRT_DistQueue ){ + /* If the destination is DistQueue, then cursor (iParm+1) is open + ** on a second ephemeral index that holds all values every previously + ** added to the queue. */ + addrTest = sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, 0, + regResult, nResultCol); + VdbeCoverage(v); + } + sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r3); + if( eDest==SRT_DistQueue ){ + sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm+1, r3); + sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); + } + for(i=0; ia[i].u.x.iOrderByCol - 1, + r2+i); + } + sqlite3VdbeAddOp2(v, OP_Sequence, iParm, r2+nKey); + sqlite3VdbeAddOp3(v, OP_MakeRecord, r2, nKey+2, r1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, r2, nKey+2); + if( addrTest ) sqlite3VdbeJumpHere(v, addrTest); + sqlite3ReleaseTempReg(pParse, r1); + sqlite3ReleaseTempRange(pParse, r2, nKey+2); + break; + } +#endif /* SQLITE_OMIT_CTE */ + + + +#if !defined(SQLITE_OMIT_TRIGGER) + /* Discard the results. This is used for SELECT statements inside + ** the body of a TRIGGER. The purpose of such selects is to call + ** user-defined functions that have side effects. We do not care + ** about the actual results of the select. + */ + default: { + assert( eDest==SRT_Discard ); + break; + } +#endif + } + + /* Jump to the end of the loop if the LIMIT is reached. Except, if + ** there is a sorter, in which case the sorter has already limited + ** the output for us. + */ + if( pSort==0 && p->iLimit ){ + sqlite3VdbeAddOp2(v, OP_DecrJumpZero, p->iLimit, iBreak); VdbeCoverage(v); + } +} + +/* +** Allocate a KeyInfo object sufficient for an index of N key columns and +** X extra columns. +*/ +SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ + int nExtra = (N+X)*(sizeof(CollSeq*)+1); + KeyInfo *p = sqlite3DbMallocRawNN(db, sizeof(KeyInfo) + nExtra); + if( p ){ + p->aSortOrder = (u8*)&p->aColl[N+X]; + p->nField = (u16)N; + p->nXField = (u16)X; + p->enc = ENC(db); + p->db = db; + p->nRef = 1; + memset(&p[1], 0, nExtra); + }else{ + sqlite3OomFault(db); + } + return p; +} + +/* +** Deallocate a KeyInfo object +*/ +SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo *p){ + if( p ){ + assert( p->nRef>0 ); + p->nRef--; + if( p->nRef==0 ) sqlite3DbFree(p->db, p); + } +} + +/* +** Make a new pointer to a KeyInfo object +*/ +SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoRef(KeyInfo *p){ + if( p ){ + assert( p->nRef>0 ); + p->nRef++; + } + return p; +} + +#ifdef SQLITE_DEBUG +/* +** Return TRUE if a KeyInfo object can be change. The KeyInfo object +** can only be changed if this is just a single reference to the object. +** +** This routine is used only inside of assert() statements. +*/ +SQLITE_PRIVATE int sqlite3KeyInfoIsWriteable(KeyInfo *p){ return p->nRef==1; } +#endif /* SQLITE_DEBUG */ + +/* +** Given an expression list, generate a KeyInfo structure that records +** the collating sequence for each expression in that expression list. +** +** If the ExprList is an ORDER BY or GROUP BY clause then the resulting +** KeyInfo structure is appropriate for initializing a virtual index to +** implement that clause. If the ExprList is the result set of a SELECT +** then the KeyInfo structure is appropriate for initializing a virtual +** index to implement a DISTINCT test. +** +** Space to hold the KeyInfo structure is obtained from malloc. The calling +** function is responsible for seeing that this structure is eventually +** freed. +*/ +static KeyInfo *keyInfoFromExprList( + Parse *pParse, /* Parsing context */ + ExprList *pList, /* Form the KeyInfo object from this ExprList */ + int iStart, /* Begin with this column of pList */ + int nExtra /* Add this many extra columns to the end */ +){ + int nExpr; + KeyInfo *pInfo; + struct ExprList_item *pItem; + sqlite3 *db = pParse->db; + int i; + + nExpr = pList->nExpr; + pInfo = sqlite3KeyInfoAlloc(db, nExpr-iStart, nExtra+1); + if( pInfo ){ + assert( sqlite3KeyInfoIsWriteable(pInfo) ); + for(i=iStart, pItem=pList->a+iStart; ipExpr); + if( !pColl ) pColl = db->pDfltColl; + pInfo->aColl[i-iStart] = pColl; + pInfo->aSortOrder[i-iStart] = pItem->sortOrder; + } + } + return pInfo; +} + +/* +** Name of the connection operator, used for error messages. +*/ +static const char *selectOpName(int id){ + char *z; + switch( id ){ + case TK_ALL: z = "UNION ALL"; break; + case TK_INTERSECT: z = "INTERSECT"; break; + case TK_EXCEPT: z = "EXCEPT"; break; + default: z = "UNION"; break; + } + return z; +} + +#ifndef SQLITE_OMIT_EXPLAIN +/* +** Unless an "EXPLAIN QUERY PLAN" command is being processed, this function +** is a no-op. Otherwise, it adds a single row of output to the EQP result, +** where the caption is of the form: +** +** "USE TEMP B-TREE FOR xxx" +** +** where xxx is one of "DISTINCT", "ORDER BY" or "GROUP BY". Exactly which +** is determined by the zUsage argument. +*/ +static void explainTempTable(Parse *pParse, const char *zUsage){ + if( pParse->explain==2 ){ + Vdbe *v = pParse->pVdbe; + char *zMsg = sqlite3MPrintf(pParse->db, "USE TEMP B-TREE FOR %s", zUsage); + sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC); + } +} + +/* +** Assign expression b to lvalue a. A second, no-op, version of this macro +** is provided when SQLITE_OMIT_EXPLAIN is defined. This allows the code +** in sqlite3Select() to assign values to structure member variables that +** only exist if SQLITE_OMIT_EXPLAIN is not defined without polluting the +** code with #ifndef directives. +*/ +# define explainSetInteger(a, b) a = b + +#else +/* No-op versions of the explainXXX() functions and macros. */ +# define explainTempTable(y,z) +# define explainSetInteger(y,z) +#endif + +#if !defined(SQLITE_OMIT_EXPLAIN) && !defined(SQLITE_OMIT_COMPOUND_SELECT) +/* +** Unless an "EXPLAIN QUERY PLAN" command is being processed, this function +** is a no-op. Otherwise, it adds a single row of output to the EQP result, +** where the caption is of one of the two forms: +** +** "COMPOSITE SUBQUERIES iSub1 and iSub2 (op)" +** "COMPOSITE SUBQUERIES iSub1 and iSub2 USING TEMP B-TREE (op)" +** +** where iSub1 and iSub2 are the integers passed as the corresponding +** function parameters, and op is the text representation of the parameter +** of the same name. The parameter "op" must be one of TK_UNION, TK_EXCEPT, +** TK_INTERSECT or TK_ALL. The first form is used if argument bUseTmp is +** false, or the second form if it is true. +*/ +static void explainComposite( + Parse *pParse, /* Parse context */ + int op, /* One of TK_UNION, TK_EXCEPT etc. */ + int iSub1, /* Subquery id 1 */ + int iSub2, /* Subquery id 2 */ + int bUseTmp /* True if a temp table was used */ +){ + assert( op==TK_UNION || op==TK_EXCEPT || op==TK_INTERSECT || op==TK_ALL ); + if( pParse->explain==2 ){ + Vdbe *v = pParse->pVdbe; + char *zMsg = sqlite3MPrintf( + pParse->db, "COMPOUND SUBQUERIES %d AND %d %s(%s)", iSub1, iSub2, + bUseTmp?"USING TEMP B-TREE ":"", selectOpName(op) + ); + sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC); + } +} +#else +/* No-op versions of the explainXXX() functions and macros. */ +# define explainComposite(v,w,x,y,z) +#endif + +/* +** If the inner loop was generated using a non-null pOrderBy argument, +** then the results were placed in a sorter. After the loop is terminated +** we need to run the sorter and output the results. The following +** routine generates the code needed to do that. +*/ +static void generateSortTail( + Parse *pParse, /* Parsing context */ + Select *p, /* The SELECT statement */ + SortCtx *pSort, /* Information on the ORDER BY clause */ + int nColumn, /* Number of columns of data */ + SelectDest *pDest /* Write the sorted results here */ +){ + Vdbe *v = pParse->pVdbe; /* The prepared statement */ + int addrBreak = pSort->labelDone; /* Jump here to exit loop */ + int addrContinue = sqlite3VdbeMakeLabel(v); /* Jump here for next cycle */ + int addr; + int addrOnce = 0; + int iTab; + ExprList *pOrderBy = pSort->pOrderBy; + int eDest = pDest->eDest; + int iParm = pDest->iSDParm; + int regRow; + int regRowid; + int iCol; + int nKey; + int iSortTab; /* Sorter cursor to read from */ + int nSortData; /* Trailing values to read from sorter */ + int i; + int bSeq; /* True if sorter record includes seq. no. */ + struct ExprList_item *aOutEx = p->pEList->a; + + assert( addrBreak<0 ); + if( pSort->labelBkOut ){ + sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut); + sqlite3VdbeGoto(v, addrBreak); + sqlite3VdbeResolveLabel(v, pSort->labelBkOut); + } + iTab = pSort->iECursor; + if( eDest==SRT_Output || eDest==SRT_Coroutine || eDest==SRT_Mem ){ + regRowid = 0; + regRow = pDest->iSdst; + nSortData = nColumn; + }else{ + regRowid = sqlite3GetTempReg(pParse); + regRow = sqlite3GetTempRange(pParse, nColumn); + nSortData = nColumn; + } + nKey = pOrderBy->nExpr - pSort->nOBSat; + if( pSort->sortFlags & SORTFLAG_UseSorter ){ + int regSortOut = ++pParse->nMem; + iSortTab = pParse->nTab++; + if( pSort->labelBkOut ){ + addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); + } + sqlite3VdbeAddOp3(v, OP_OpenPseudo, iSortTab, regSortOut, nKey+1+nSortData); + if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce); + addr = 1 + sqlite3VdbeAddOp2(v, OP_SorterSort, iTab, addrBreak); + VdbeCoverage(v); + codeOffset(v, p->iOffset, addrContinue); + sqlite3VdbeAddOp3(v, OP_SorterData, iTab, regSortOut, iSortTab); + bSeq = 0; + }else{ + addr = 1 + sqlite3VdbeAddOp2(v, OP_Sort, iTab, addrBreak); VdbeCoverage(v); + codeOffset(v, p->iOffset, addrContinue); + iSortTab = iTab; + bSeq = 1; + } + for(i=0, iCol=nKey+bSeq; izAffSdst) ); + sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, nColumn, regRowid, + pDest->zAffSdst, nColumn); + sqlite3ExprCacheAffinityChange(pParse, regRow, nColumn); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, regRowid, regRow, nColumn); + break; + } + case SRT_Mem: { + /* The LIMIT clause will terminate the loop for us */ + break; + } +#endif + default: { + assert( eDest==SRT_Output || eDest==SRT_Coroutine ); + testcase( eDest==SRT_Output ); + testcase( eDest==SRT_Coroutine ); + if( eDest==SRT_Output ){ + sqlite3VdbeAddOp2(v, OP_ResultRow, pDest->iSdst, nColumn); + sqlite3ExprCacheAffinityChange(pParse, pDest->iSdst, nColumn); + }else{ + sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm); + } + break; + } + } + if( regRowid ){ + if( eDest==SRT_Set ){ + sqlite3ReleaseTempRange(pParse, regRow, nColumn); + }else{ + sqlite3ReleaseTempReg(pParse, regRow); + } + sqlite3ReleaseTempReg(pParse, regRowid); + } + /* The bottom of the loop + */ + sqlite3VdbeResolveLabel(v, addrContinue); + if( pSort->sortFlags & SORTFLAG_UseSorter ){ + sqlite3VdbeAddOp2(v, OP_SorterNext, iTab, addr); VdbeCoverage(v); + }else{ + sqlite3VdbeAddOp2(v, OP_Next, iTab, addr); VdbeCoverage(v); + } + if( pSort->regReturn ) sqlite3VdbeAddOp1(v, OP_Return, pSort->regReturn); + sqlite3VdbeResolveLabel(v, addrBreak); +} + +/* +** Return a pointer to a string containing the 'declaration type' of the +** expression pExpr. The string may be treated as static by the caller. +** +** Also try to estimate the size of the returned value and return that +** result in *pEstWidth. +** +** The declaration type is the exact datatype definition extracted from the +** original CREATE TABLE statement if the expression is a column. The +** declaration type for a ROWID field is INTEGER. Exactly when an expression +** is considered a column can be complex in the presence of subqueries. The +** result-set expression in all of the following SELECT statements is +** considered a column by this function. +** +** SELECT col FROM tbl; +** SELECT (SELECT col FROM tbl; +** SELECT (SELECT col FROM tbl); +** SELECT abc FROM (SELECT col AS abc FROM tbl); +** +** The declaration type for any expression other than a column is NULL. +** +** This routine has either 3 or 6 parameters depending on whether or not +** the SQLITE_ENABLE_COLUMN_METADATA compile-time option is used. +*/ +#ifdef SQLITE_ENABLE_COLUMN_METADATA +# define columnType(A,B,C,D,E,F) columnTypeImpl(A,B,C,D,E,F) +#else /* if !defined(SQLITE_ENABLE_COLUMN_METADATA) */ +# define columnType(A,B,C,D,E,F) columnTypeImpl(A,B,F) +#endif +static const char *columnTypeImpl( + NameContext *pNC, + Expr *pExpr, +#ifdef SQLITE_ENABLE_COLUMN_METADATA + const char **pzOrigDb, + const char **pzOrigTab, + const char **pzOrigCol, +#endif + u8 *pEstWidth +){ + char const *zType = 0; + int j; + u8 estWidth = 1; +#ifdef SQLITE_ENABLE_COLUMN_METADATA + char const *zOrigDb = 0; + char const *zOrigTab = 0; + char const *zOrigCol = 0; +#endif + + assert( pExpr!=0 ); + assert( pNC->pSrcList!=0 ); + switch( pExpr->op ){ + case TK_AGG_COLUMN: + case TK_COLUMN: { + /* The expression is a column. Locate the table the column is being + ** extracted from in NameContext.pSrcList. This table may be real + ** database table or a subquery. + */ + Table *pTab = 0; /* Table structure column is extracted from */ + Select *pS = 0; /* Select the column is extracted from */ + int iCol = pExpr->iColumn; /* Index of column in pTab */ + testcase( pExpr->op==TK_AGG_COLUMN ); + testcase( pExpr->op==TK_COLUMN ); + while( pNC && !pTab ){ + SrcList *pTabList = pNC->pSrcList; + for(j=0;jnSrc && pTabList->a[j].iCursor!=pExpr->iTable;j++); + if( jnSrc ){ + pTab = pTabList->a[j].pTab; + pS = pTabList->a[j].pSelect; + }else{ + pNC = pNC->pNext; + } + } + + if( pTab==0 ){ + /* At one time, code such as "SELECT new.x" within a trigger would + ** cause this condition to run. Since then, we have restructured how + ** trigger code is generated and so this condition is no longer + ** possible. However, it can still be true for statements like + ** the following: + ** + ** CREATE TABLE t1(col INTEGER); + ** SELECT (SELECT t1.col) FROM FROM t1; + ** + ** when columnType() is called on the expression "t1.col" in the + ** sub-select. In this case, set the column type to NULL, even + ** though it should really be "INTEGER". + ** + ** This is not a problem, as the column type of "t1.col" is never + ** used. When columnType() is called on the expression + ** "(SELECT t1.col)", the correct type is returned (see the TK_SELECT + ** branch below. */ + break; + } + + assert( pTab && pExpr->pTab==pTab ); + if( pS ){ + /* The "table" is actually a sub-select or a view in the FROM clause + ** of the SELECT statement. Return the declaration type and origin + ** data for the result-set column of the sub-select. + */ + if( iCol>=0 && ALWAYS(iColpEList->nExpr) ){ + /* If iCol is less than zero, then the expression requests the + ** rowid of the sub-select or view. This expression is legal (see + ** test case misc2.2.2) - it always evaluates to NULL. + ** + ** The ALWAYS() is because iCol>=pS->pEList->nExpr will have been + ** caught already by name resolution. + */ + NameContext sNC; + Expr *p = pS->pEList->a[iCol].pExpr; + sNC.pSrcList = pS->pSrc; + sNC.pNext = pNC; + sNC.pParse = pNC->pParse; + zType = columnType(&sNC, p,&zOrigDb,&zOrigTab,&zOrigCol, &estWidth); + } + }else if( pTab->pSchema ){ + /* A real table */ + assert( !pS ); + if( iCol<0 ) iCol = pTab->iPKey; + assert( iCol==-1 || (iCol>=0 && iColnCol) ); +#ifdef SQLITE_ENABLE_COLUMN_METADATA + if( iCol<0 ){ + zType = "INTEGER"; + zOrigCol = "rowid"; + }else{ + zOrigCol = pTab->aCol[iCol].zName; + zType = sqlite3ColumnType(&pTab->aCol[iCol],0); + estWidth = pTab->aCol[iCol].szEst; + } + zOrigTab = pTab->zName; + if( pNC->pParse ){ + int iDb = sqlite3SchemaToIndex(pNC->pParse->db, pTab->pSchema); + zOrigDb = pNC->pParse->db->aDb[iDb].zDbSName; + } +#else + if( iCol<0 ){ + zType = "INTEGER"; + }else{ + zType = sqlite3ColumnType(&pTab->aCol[iCol],0); + estWidth = pTab->aCol[iCol].szEst; + } +#endif + } + break; + } +#ifndef SQLITE_OMIT_SUBQUERY + case TK_SELECT: { + /* The expression is a sub-select. Return the declaration type and + ** origin info for the single column in the result set of the SELECT + ** statement. + */ + NameContext sNC; + Select *pS = pExpr->x.pSelect; + Expr *p = pS->pEList->a[0].pExpr; + assert( ExprHasProperty(pExpr, EP_xIsSelect) ); + sNC.pSrcList = pS->pSrc; + sNC.pNext = pNC; + sNC.pParse = pNC->pParse; + zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol, &estWidth); + break; + } +#endif + } + +#ifdef SQLITE_ENABLE_COLUMN_METADATA + if( pzOrigDb ){ + assert( pzOrigTab && pzOrigCol ); + *pzOrigDb = zOrigDb; + *pzOrigTab = zOrigTab; + *pzOrigCol = zOrigCol; + } +#endif + if( pEstWidth ) *pEstWidth = estWidth; + return zType; +} + +/* +** Generate code that will tell the VDBE the declaration types of columns +** in the result set. +*/ +static void generateColumnTypes( + Parse *pParse, /* Parser context */ + SrcList *pTabList, /* List of tables */ + ExprList *pEList /* Expressions defining the result set */ +){ +#ifndef SQLITE_OMIT_DECLTYPE + Vdbe *v = pParse->pVdbe; + int i; + NameContext sNC; + sNC.pSrcList = pTabList; + sNC.pParse = pParse; + for(i=0; inExpr; i++){ + Expr *p = pEList->a[i].pExpr; + const char *zType; +#ifdef SQLITE_ENABLE_COLUMN_METADATA + const char *zOrigDb = 0; + const char *zOrigTab = 0; + const char *zOrigCol = 0; + zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol, 0); + + /* The vdbe must make its own copy of the column-type and other + ** column specific strings, in case the schema is reset before this + ** virtual machine is deleted. + */ + sqlite3VdbeSetColName(v, i, COLNAME_DATABASE, zOrigDb, SQLITE_TRANSIENT); + sqlite3VdbeSetColName(v, i, COLNAME_TABLE, zOrigTab, SQLITE_TRANSIENT); + sqlite3VdbeSetColName(v, i, COLNAME_COLUMN, zOrigCol, SQLITE_TRANSIENT); +#else + zType = columnType(&sNC, p, 0, 0, 0, 0); +#endif + sqlite3VdbeSetColName(v, i, COLNAME_DECLTYPE, zType, SQLITE_TRANSIENT); + } +#endif /* !defined(SQLITE_OMIT_DECLTYPE) */ +} + +/* +** Generate code that will tell the VDBE the names of columns +** in the result set. This information is used to provide the +** azCol[] values in the callback. +*/ +static void generateColumnNames( + Parse *pParse, /* Parser context */ + SrcList *pTabList, /* List of tables */ + ExprList *pEList /* Expressions defining the result set */ +){ + Vdbe *v = pParse->pVdbe; + int i, j; + sqlite3 *db = pParse->db; + int fullNames, shortNames; + +#ifndef SQLITE_OMIT_EXPLAIN + /* If this is an EXPLAIN, skip this step */ + if( pParse->explain ){ + return; + } +#endif + + if( pParse->colNamesSet || db->mallocFailed ) return; + assert( v!=0 ); + assert( pTabList!=0 ); + pParse->colNamesSet = 1; + fullNames = (db->flags & SQLITE_FullColNames)!=0; + shortNames = (db->flags & SQLITE_ShortColNames)!=0; + sqlite3VdbeSetNumCols(v, pEList->nExpr); + for(i=0; inExpr; i++){ + Expr *p; + p = pEList->a[i].pExpr; + if( NEVER(p==0) ) continue; + if( pEList->a[i].zName ){ + char *zName = pEList->a[i].zName; + sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_TRANSIENT); + }else if( p->op==TK_COLUMN || p->op==TK_AGG_COLUMN ){ + Table *pTab; + char *zCol; + int iCol = p->iColumn; + for(j=0; ALWAYS(jnSrc); j++){ + if( pTabList->a[j].iCursor==p->iTable ) break; + } + assert( jnSrc ); + pTab = pTabList->a[j].pTab; + if( iCol<0 ) iCol = pTab->iPKey; + assert( iCol==-1 || (iCol>=0 && iColnCol) ); + if( iCol<0 ){ + zCol = "rowid"; + }else{ + zCol = pTab->aCol[iCol].zName; + } + if( !shortNames && !fullNames ){ + sqlite3VdbeSetColName(v, i, COLNAME_NAME, + sqlite3DbStrDup(db, pEList->a[i].zSpan), SQLITE_DYNAMIC); + }else if( fullNames ){ + char *zName = 0; + zName = sqlite3MPrintf(db, "%s.%s", pTab->zName, zCol); + sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_DYNAMIC); + }else{ + sqlite3VdbeSetColName(v, i, COLNAME_NAME, zCol, SQLITE_TRANSIENT); + } + }else{ + const char *z = pEList->a[i].zSpan; + z = z==0 ? sqlite3MPrintf(db, "column%d", i+1) : sqlite3DbStrDup(db, z); + sqlite3VdbeSetColName(v, i, COLNAME_NAME, z, SQLITE_DYNAMIC); + } + } + generateColumnTypes(pParse, pTabList, pEList); +} + +/* +** Given an expression list (which is really the list of expressions +** that form the result set of a SELECT statement) compute appropriate +** column names for a table that would hold the expression list. +** +** All column names will be unique. +** +** Only the column names are computed. Column.zType, Column.zColl, +** and other fields of Column are zeroed. +** +** Return SQLITE_OK on success. If a memory allocation error occurs, +** store NULL in *paCol and 0 in *pnCol and return SQLITE_NOMEM. +*/ +SQLITE_PRIVATE int sqlite3ColumnsFromExprList( + Parse *pParse, /* Parsing context */ + ExprList *pEList, /* Expr list from which to derive column names */ + i16 *pnCol, /* Write the number of columns here */ + Column **paCol /* Write the new column list here */ +){ + sqlite3 *db = pParse->db; /* Database connection */ + int i, j; /* Loop counters */ + u32 cnt; /* Index added to make the name unique */ + Column *aCol, *pCol; /* For looping over result columns */ + int nCol; /* Number of columns in the result set */ + Expr *p; /* Expression for a single result column */ + char *zName; /* Column name */ + int nName; /* Size of name in zName[] */ + Hash ht; /* Hash table of column names */ + + sqlite3HashInit(&ht); + if( pEList ){ + nCol = pEList->nExpr; + aCol = sqlite3DbMallocZero(db, sizeof(aCol[0])*nCol); + testcase( aCol==0 ); + }else{ + nCol = 0; + aCol = 0; + } + assert( nCol==(i16)nCol ); + *pnCol = nCol; + *paCol = aCol; + + for(i=0, pCol=aCol; imallocFailed; i++, pCol++){ + /* Get an appropriate name for the column + */ + p = sqlite3ExprSkipCollate(pEList->a[i].pExpr); + if( (zName = pEList->a[i].zName)!=0 ){ + /* If the column contains an "AS " phrase, use as the name */ + }else{ + Expr *pColExpr = p; /* The expression that is the result column name */ + Table *pTab; /* Table associated with this expression */ + while( pColExpr->op==TK_DOT ){ + pColExpr = pColExpr->pRight; + assert( pColExpr!=0 ); + } + if( pColExpr->op==TK_COLUMN && ALWAYS(pColExpr->pTab!=0) ){ + /* For columns use the column name name */ + int iCol = pColExpr->iColumn; + pTab = pColExpr->pTab; + if( iCol<0 ) iCol = pTab->iPKey; + zName = iCol>=0 ? pTab->aCol[iCol].zName : "rowid"; + }else if( pColExpr->op==TK_ID ){ + assert( !ExprHasProperty(pColExpr, EP_IntValue) ); + zName = pColExpr->u.zToken; + }else{ + /* Use the original text of the column expression as its name */ + zName = pEList->a[i].zSpan; + } + } + zName = sqlite3MPrintf(db, "%s", zName); + + /* Make sure the column name is unique. If the name is not unique, + ** append an integer to the name so that it becomes unique. + */ + cnt = 0; + while( zName && sqlite3HashFind(&ht, zName)!=0 ){ + nName = sqlite3Strlen30(zName); + if( nName>0 ){ + for(j=nName-1; j>0 && sqlite3Isdigit(zName[j]); j--){} + if( zName[j]==':' ) nName = j; + } + zName = sqlite3MPrintf(db, "%.*z:%u", nName, zName, ++cnt); + if( cnt>3 ) sqlite3_randomness(sizeof(cnt), &cnt); + } + pCol->zName = zName; + sqlite3ColumnPropertiesFromName(0, pCol); + if( zName && sqlite3HashInsert(&ht, zName, pCol)==pCol ){ + sqlite3OomFault(db); + } + } + sqlite3HashClear(&ht); + if( db->mallocFailed ){ + for(j=0; jdb; + NameContext sNC; + Column *pCol; + CollSeq *pColl; + int i; + Expr *p; + struct ExprList_item *a; + u64 szAll = 0; + + assert( pSelect!=0 ); + assert( (pSelect->selFlags & SF_Resolved)!=0 ); + assert( pTab->nCol==pSelect->pEList->nExpr || db->mallocFailed ); + if( db->mallocFailed ) return; + memset(&sNC, 0, sizeof(sNC)); + sNC.pSrcList = pSelect->pSrc; + a = pSelect->pEList->a; + for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ + const char *zType; + int n, m; + p = a[i].pExpr; + zType = columnType(&sNC, p, 0, 0, 0, &pCol->szEst); + szAll += pCol->szEst; + pCol->affinity = sqlite3ExprAffinity(p); + if( zType && (m = sqlite3Strlen30(zType))>0 ){ + n = sqlite3Strlen30(pCol->zName); + pCol->zName = sqlite3DbReallocOrFree(db, pCol->zName, n+m+2); + if( pCol->zName ){ + memcpy(&pCol->zName[n+1], zType, m+1); + pCol->colFlags |= COLFLAG_HASTYPE; + } + } + if( pCol->affinity==0 ) pCol->affinity = SQLITE_AFF_BLOB; + pColl = sqlite3ExprCollSeq(pParse, p); + if( pColl && pCol->zColl==0 ){ + pCol->zColl = sqlite3DbStrDup(db, pColl->zName); + } + } + pTab->szTabRow = sqlite3LogEst(szAll*4); +} + +/* +** Given a SELECT statement, generate a Table structure that describes +** the result set of that SELECT. +*/ +SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect){ + Table *pTab; + sqlite3 *db = pParse->db; + int savedFlags; + + savedFlags = db->flags; + db->flags &= ~SQLITE_FullColNames; + db->flags |= SQLITE_ShortColNames; + sqlite3SelectPrep(pParse, pSelect, 0); + if( pParse->nErr ) return 0; + while( pSelect->pPrior ) pSelect = pSelect->pPrior; + db->flags = savedFlags; + pTab = sqlite3DbMallocZero(db, sizeof(Table) ); + if( pTab==0 ){ + return 0; + } + /* The sqlite3ResultSetOfSelect() is only used n contexts where lookaside + ** is disabled */ + assert( db->lookaside.bDisable ); + pTab->nTabRef = 1; + pTab->zName = 0; + pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); + sqlite3ColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol); + sqlite3SelectAddColumnTypeAndCollation(pParse, pTab, pSelect); + pTab->iPKey = -1; + if( db->mallocFailed ){ + sqlite3DeleteTable(db, pTab); + return 0; + } + return pTab; +} + +/* +** Get a VDBE for the given parser context. Create a new one if necessary. +** If an error occurs, return NULL and leave a message in pParse. +*/ +static SQLITE_NOINLINE Vdbe *allocVdbe(Parse *pParse){ + Vdbe *v = pParse->pVdbe = sqlite3VdbeCreate(pParse); + if( v ) sqlite3VdbeAddOp2(v, OP_Init, 0, 1); + if( pParse->pToplevel==0 + && OptimizationEnabled(pParse->db,SQLITE_FactorOutConst) + ){ + pParse->okConstFactor = 1; + } + return v; +} +SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse *pParse){ + Vdbe *v = pParse->pVdbe; + return v ? v : allocVdbe(pParse); +} + + +/* +** Compute the iLimit and iOffset fields of the SELECT based on the +** pLimit and pOffset expressions. pLimit and pOffset hold the expressions +** that appear in the original SQL statement after the LIMIT and OFFSET +** keywords. Or NULL if those keywords are omitted. iLimit and iOffset +** are the integer memory register numbers for counters used to compute +** the limit and offset. If there is no limit and/or offset, then +** iLimit and iOffset are negative. +** +** This routine changes the values of iLimit and iOffset only if +** a limit or offset is defined by pLimit and pOffset. iLimit and +** iOffset should have been preset to appropriate default values (zero) +** prior to calling this routine. +** +** The iOffset register (if it exists) is initialized to the value +** of the OFFSET. The iLimit register is initialized to LIMIT. Register +** iOffset+1 is initialized to LIMIT+OFFSET. +** +** Only if pLimit!=0 or pOffset!=0 do the limit registers get +** redefined. The UNION ALL operator uses this property to force +** the reuse of the same limit and offset registers across multiple +** SELECT statements. +*/ +static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){ + Vdbe *v = 0; + int iLimit = 0; + int iOffset; + int n; + if( p->iLimit ) return; + + /* + ** "LIMIT -1" always shows all rows. There is some + ** controversy about what the correct behavior should be. + ** The current implementation interprets "LIMIT 0" to mean + ** no rows. + */ + sqlite3ExprCacheClear(pParse); + assert( p->pOffset==0 || p->pLimit!=0 ); + if( p->pLimit ){ + p->iLimit = iLimit = ++pParse->nMem; + v = sqlite3GetVdbe(pParse); + assert( v!=0 ); + if( sqlite3ExprIsInteger(p->pLimit, &n) ){ + sqlite3VdbeAddOp2(v, OP_Integer, n, iLimit); + VdbeComment((v, "LIMIT counter")); + if( n==0 ){ + sqlite3VdbeGoto(v, iBreak); + }else if( n>=0 && p->nSelectRow>sqlite3LogEst((u64)n) ){ + p->nSelectRow = sqlite3LogEst((u64)n); + p->selFlags |= SF_FixedLimit; + } + }else{ + sqlite3ExprCode(pParse, p->pLimit, iLimit); + sqlite3VdbeAddOp1(v, OP_MustBeInt, iLimit); VdbeCoverage(v); + VdbeComment((v, "LIMIT counter")); + sqlite3VdbeAddOp2(v, OP_IfNot, iLimit, iBreak); VdbeCoverage(v); + } + if( p->pOffset ){ + p->iOffset = iOffset = ++pParse->nMem; + pParse->nMem++; /* Allocate an extra register for limit+offset */ + sqlite3ExprCode(pParse, p->pOffset, iOffset); + sqlite3VdbeAddOp1(v, OP_MustBeInt, iOffset); VdbeCoverage(v); + VdbeComment((v, "OFFSET counter")); + sqlite3VdbeAddOp3(v, OP_OffsetLimit, iLimit, iOffset+1, iOffset); + VdbeComment((v, "LIMIT+OFFSET")); + } + } +} + +#ifndef SQLITE_OMIT_COMPOUND_SELECT +/* +** Return the appropriate collating sequence for the iCol-th column of +** the result set for the compound-select statement "p". Return NULL if +** the column has no default collating sequence. +** +** The collating sequence for the compound select is taken from the +** left-most term of the select that has a collating sequence. +*/ +static CollSeq *multiSelectCollSeq(Parse *pParse, Select *p, int iCol){ + CollSeq *pRet; + if( p->pPrior ){ + pRet = multiSelectCollSeq(pParse, p->pPrior, iCol); + }else{ + pRet = 0; + } + assert( iCol>=0 ); + /* iCol must be less than p->pEList->nExpr. Otherwise an error would + ** have been thrown during name resolution and we would not have gotten + ** this far */ + if( pRet==0 && ALWAYS(iColpEList->nExpr) ){ + pRet = sqlite3ExprCollSeq(pParse, p->pEList->a[iCol].pExpr); + } + return pRet; +} + +/* +** The select statement passed as the second parameter is a compound SELECT +** with an ORDER BY clause. This function allocates and returns a KeyInfo +** structure suitable for implementing the ORDER BY. +** +** Space to hold the KeyInfo structure is obtained from malloc. The calling +** function is responsible for ensuring that this structure is eventually +** freed. +*/ +static KeyInfo *multiSelectOrderByKeyInfo(Parse *pParse, Select *p, int nExtra){ + ExprList *pOrderBy = p->pOrderBy; + int nOrderBy = p->pOrderBy->nExpr; + sqlite3 *db = pParse->db; + KeyInfo *pRet = sqlite3KeyInfoAlloc(db, nOrderBy+nExtra, 1); + if( pRet ){ + int i; + for(i=0; ia[i]; + Expr *pTerm = pItem->pExpr; + CollSeq *pColl; + + if( pTerm->flags & EP_Collate ){ + pColl = sqlite3ExprCollSeq(pParse, pTerm); + }else{ + pColl = multiSelectCollSeq(pParse, p, pItem->u.x.iOrderByCol-1); + if( pColl==0 ) pColl = db->pDfltColl; + pOrderBy->a[i].pExpr = + sqlite3ExprAddCollateString(pParse, pTerm, pColl->zName); + } + assert( sqlite3KeyInfoIsWriteable(pRet) ); + pRet->aColl[i] = pColl; + pRet->aSortOrder[i] = pOrderBy->a[i].sortOrder; + } + } + + return pRet; +} + +#ifndef SQLITE_OMIT_CTE +/* +** This routine generates VDBE code to compute the content of a WITH RECURSIVE +** query of the form: +** +** AS ( UNION [ALL] ) +** \___________/ \_______________/ +** p->pPrior p +** +** +** There is exactly one reference to the recursive-table in the FROM clause +** of recursive-query, marked with the SrcList->a[].fg.isRecursive flag. +** +** The setup-query runs once to generate an initial set of rows that go +** into a Queue table. Rows are extracted from the Queue table one by +** one. Each row extracted from Queue is output to pDest. Then the single +** extracted row (now in the iCurrent table) becomes the content of the +** recursive-table for a recursive-query run. The output of the recursive-query +** is added back into the Queue table. Then another row is extracted from Queue +** and the iteration continues until the Queue table is empty. +** +** If the compound query operator is UNION then no duplicate rows are ever +** inserted into the Queue table. The iDistinct table keeps a copy of all rows +** that have ever been inserted into Queue and causes duplicates to be +** discarded. If the operator is UNION ALL, then duplicates are allowed. +** +** If the query has an ORDER BY, then entries in the Queue table are kept in +** ORDER BY order and the first entry is extracted for each cycle. Without +** an ORDER BY, the Queue table is just a FIFO. +** +** If a LIMIT clause is provided, then the iteration stops after LIMIT rows +** have been output to pDest. A LIMIT of zero means to output no rows and a +** negative LIMIT means to output all rows. If there is also an OFFSET clause +** with a positive value, then the first OFFSET outputs are discarded rather +** than being sent to pDest. The LIMIT count does not begin until after OFFSET +** rows have been skipped. +*/ +static void generateWithRecursiveQuery( + Parse *pParse, /* Parsing context */ + Select *p, /* The recursive SELECT to be coded */ + SelectDest *pDest /* What to do with query results */ +){ + SrcList *pSrc = p->pSrc; /* The FROM clause of the recursive query */ + int nCol = p->pEList->nExpr; /* Number of columns in the recursive table */ + Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */ + Select *pSetup = p->pPrior; /* The setup query */ + int addrTop; /* Top of the loop */ + int addrCont, addrBreak; /* CONTINUE and BREAK addresses */ + int iCurrent = 0; /* The Current table */ + int regCurrent; /* Register holding Current table */ + int iQueue; /* The Queue table */ + int iDistinct = 0; /* To ensure unique results if UNION */ + int eDest = SRT_Fifo; /* How to write to Queue */ + SelectDest destQueue; /* SelectDest targetting the Queue table */ + int i; /* Loop counter */ + int rc; /* Result code */ + ExprList *pOrderBy; /* The ORDER BY clause */ + Expr *pLimit, *pOffset; /* Saved LIMIT and OFFSET */ + int regLimit, regOffset; /* Registers used by LIMIT and OFFSET */ + + /* Obtain authorization to do a recursive query */ + if( sqlite3AuthCheck(pParse, SQLITE_RECURSIVE, 0, 0, 0) ) return; + + /* Process the LIMIT and OFFSET clauses, if they exist */ + addrBreak = sqlite3VdbeMakeLabel(v); + p->nSelectRow = 320; /* 4 billion rows */ + computeLimitRegisters(pParse, p, addrBreak); + pLimit = p->pLimit; + pOffset = p->pOffset; + regLimit = p->iLimit; + regOffset = p->iOffset; + p->pLimit = p->pOffset = 0; + p->iLimit = p->iOffset = 0; + pOrderBy = p->pOrderBy; + + /* Locate the cursor number of the Current table */ + for(i=0; ALWAYS(inSrc); i++){ + if( pSrc->a[i].fg.isRecursive ){ + iCurrent = pSrc->a[i].iCursor; + break; + } + } + + /* Allocate cursors numbers for Queue and Distinct. The cursor number for + ** the Distinct table must be exactly one greater than Queue in order + ** for the SRT_DistFifo and SRT_DistQueue destinations to work. */ + iQueue = pParse->nTab++; + if( p->op==TK_UNION ){ + eDest = pOrderBy ? SRT_DistQueue : SRT_DistFifo; + iDistinct = pParse->nTab++; + }else{ + eDest = pOrderBy ? SRT_Queue : SRT_Fifo; + } + sqlite3SelectDestInit(&destQueue, eDest, iQueue); + + /* Allocate cursors for Current, Queue, and Distinct. */ + regCurrent = ++pParse->nMem; + sqlite3VdbeAddOp3(v, OP_OpenPseudo, iCurrent, regCurrent, nCol); + if( pOrderBy ){ + KeyInfo *pKeyInfo = multiSelectOrderByKeyInfo(pParse, p, 1); + sqlite3VdbeAddOp4(v, OP_OpenEphemeral, iQueue, pOrderBy->nExpr+2, 0, + (char*)pKeyInfo, P4_KEYINFO); + destQueue.pOrderBy = pOrderBy; + }else{ + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iQueue, nCol); + } + VdbeComment((v, "Queue table")); + if( iDistinct ){ + p->addrOpenEphm[0] = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iDistinct, 0); + p->selFlags |= SF_UsesEphemeral; + } + + /* Detach the ORDER BY clause from the compound SELECT */ + p->pOrderBy = 0; + + /* Store the results of the setup-query in Queue. */ + pSetup->pNext = 0; + rc = sqlite3Select(pParse, pSetup, &destQueue); + pSetup->pNext = p; + if( rc ) goto end_of_recursive_query; + + /* Find the next row in the Queue and output that row */ + addrTop = sqlite3VdbeAddOp2(v, OP_Rewind, iQueue, addrBreak); VdbeCoverage(v); + + /* Transfer the next row in Queue over to Current */ + sqlite3VdbeAddOp1(v, OP_NullRow, iCurrent); /* To reset column cache */ + if( pOrderBy ){ + sqlite3VdbeAddOp3(v, OP_Column, iQueue, pOrderBy->nExpr+1, regCurrent); + }else{ + sqlite3VdbeAddOp2(v, OP_RowData, iQueue, regCurrent); + } + sqlite3VdbeAddOp1(v, OP_Delete, iQueue); + + /* Output the single row in Current */ + addrCont = sqlite3VdbeMakeLabel(v); + codeOffset(v, regOffset, addrCont); + selectInnerLoop(pParse, p, p->pEList, iCurrent, + 0, 0, pDest, addrCont, addrBreak); + if( regLimit ){ + sqlite3VdbeAddOp2(v, OP_DecrJumpZero, regLimit, addrBreak); + VdbeCoverage(v); + } + sqlite3VdbeResolveLabel(v, addrCont); + + /* Execute the recursive SELECT taking the single row in Current as + ** the value for the recursive-table. Store the results in the Queue. + */ + if( p->selFlags & SF_Aggregate ){ + sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported"); + }else{ + p->pPrior = 0; + sqlite3Select(pParse, p, &destQueue); + assert( p->pPrior==0 ); + p->pPrior = pSetup; + } + + /* Keep running the loop until the Queue is empty */ + sqlite3VdbeGoto(v, addrTop); + sqlite3VdbeResolveLabel(v, addrBreak); + +end_of_recursive_query: + sqlite3ExprListDelete(pParse->db, p->pOrderBy); + p->pOrderBy = pOrderBy; + p->pLimit = pLimit; + p->pOffset = pOffset; + return; +} +#endif /* SQLITE_OMIT_CTE */ + +/* Forward references */ +static int multiSelectOrderBy( + Parse *pParse, /* Parsing context */ + Select *p, /* The right-most of SELECTs to be coded */ + SelectDest *pDest /* What to do with query results */ +); + +/* +** Handle the special case of a compound-select that originates from a +** VALUES clause. By handling this as a special case, we avoid deep +** recursion, and thus do not need to enforce the SQLITE_LIMIT_COMPOUND_SELECT +** on a VALUES clause. +** +** Because the Select object originates from a VALUES clause: +** (1) It has no LIMIT or OFFSET +** (2) All terms are UNION ALL +** (3) There is no ORDER BY clause +*/ +static int multiSelectValues( + Parse *pParse, /* Parsing context */ + Select *p, /* The right-most of SELECTs to be coded */ + SelectDest *pDest /* What to do with query results */ +){ + Select *pPrior; + int nRow = 1; + int rc = 0; + assert( p->selFlags & SF_MultiValue ); + do{ + assert( p->selFlags & SF_Values ); + assert( p->op==TK_ALL || (p->op==TK_SELECT && p->pPrior==0) ); + assert( p->pLimit==0 ); + assert( p->pOffset==0 ); + assert( p->pNext==0 || p->pEList->nExpr==p->pNext->pEList->nExpr ); + if( p->pPrior==0 ) break; + assert( p->pPrior->pNext==p ); + p = p->pPrior; + nRow++; + }while(1); + while( p ){ + pPrior = p->pPrior; + p->pPrior = 0; + rc = sqlite3Select(pParse, p, pDest); + p->pPrior = pPrior; + if( rc ) break; + p->nSelectRow = nRow; + p = p->pNext; + } + return rc; +} + +/* +** This routine is called to process a compound query form from +** two or more separate queries using UNION, UNION ALL, EXCEPT, or +** INTERSECT +** +** "p" points to the right-most of the two queries. the query on the +** left is p->pPrior. The left query could also be a compound query +** in which case this routine will be called recursively. +** +** The results of the total query are to be written into a destination +** of type eDest with parameter iParm. +** +** Example 1: Consider a three-way compound SQL statement. +** +** SELECT a FROM t1 UNION SELECT b FROM t2 UNION SELECT c FROM t3 +** +** This statement is parsed up as follows: +** +** SELECT c FROM t3 +** | +** `-----> SELECT b FROM t2 +** | +** `------> SELECT a FROM t1 +** +** The arrows in the diagram above represent the Select.pPrior pointer. +** So if this routine is called with p equal to the t3 query, then +** pPrior will be the t2 query. p->op will be TK_UNION in this case. +** +** Notice that because of the way SQLite parses compound SELECTs, the +** individual selects always group from left to right. +*/ +static int multiSelect( + Parse *pParse, /* Parsing context */ + Select *p, /* The right-most of SELECTs to be coded */ + SelectDest *pDest /* What to do with query results */ +){ + int rc = SQLITE_OK; /* Success code from a subroutine */ + Select *pPrior; /* Another SELECT immediately to our left */ + Vdbe *v; /* Generate code to this VDBE */ + SelectDest dest; /* Alternative data destination */ + Select *pDelete = 0; /* Chain of simple selects to delete */ + sqlite3 *db; /* Database connection */ +#ifndef SQLITE_OMIT_EXPLAIN + int iSub1 = 0; /* EQP id of left-hand query */ + int iSub2 = 0; /* EQP id of right-hand query */ +#endif + + /* Make sure there is no ORDER BY or LIMIT clause on prior SELECTs. Only + ** the last (right-most) SELECT in the series may have an ORDER BY or LIMIT. + */ + assert( p && p->pPrior ); /* Calling function guarantees this much */ + assert( (p->selFlags & SF_Recursive)==0 || p->op==TK_ALL || p->op==TK_UNION ); + db = pParse->db; + pPrior = p->pPrior; + dest = *pDest; + if( pPrior->pOrderBy ){ + sqlite3ErrorMsg(pParse,"ORDER BY clause should come after %s not before", + selectOpName(p->op)); + rc = 1; + goto multi_select_end; + } + if( pPrior->pLimit ){ + sqlite3ErrorMsg(pParse,"LIMIT clause should come after %s not before", + selectOpName(p->op)); + rc = 1; + goto multi_select_end; + } + + v = sqlite3GetVdbe(pParse); + assert( v!=0 ); /* The VDBE already created by calling function */ + + /* Create the destination temporary table if necessary + */ + if( dest.eDest==SRT_EphemTab ){ + assert( p->pEList ); + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, dest.iSDParm, p->pEList->nExpr); + dest.eDest = SRT_Table; + } + + /* Special handling for a compound-select that originates as a VALUES clause. + */ + if( p->selFlags & SF_MultiValue ){ + rc = multiSelectValues(pParse, p, &dest); + goto multi_select_end; + } + + /* Make sure all SELECTs in the statement have the same number of elements + ** in their result sets. + */ + assert( p->pEList && pPrior->pEList ); + assert( p->pEList->nExpr==pPrior->pEList->nExpr ); + +#ifndef SQLITE_OMIT_CTE + if( p->selFlags & SF_Recursive ){ + generateWithRecursiveQuery(pParse, p, &dest); + }else +#endif + + /* Compound SELECTs that have an ORDER BY clause are handled separately. + */ + if( p->pOrderBy ){ + return multiSelectOrderBy(pParse, p, pDest); + }else + + /* Generate code for the left and right SELECT statements. + */ + switch( p->op ){ + case TK_ALL: { + int addr = 0; + int nLimit; + assert( !pPrior->pLimit ); + pPrior->iLimit = p->iLimit; + pPrior->iOffset = p->iOffset; + pPrior->pLimit = p->pLimit; + pPrior->pOffset = p->pOffset; + explainSetInteger(iSub1, pParse->iNextSelectId); + rc = sqlite3Select(pParse, pPrior, &dest); + p->pLimit = 0; + p->pOffset = 0; + if( rc ){ + goto multi_select_end; + } + p->pPrior = 0; + p->iLimit = pPrior->iLimit; + p->iOffset = pPrior->iOffset; + if( p->iLimit ){ + addr = sqlite3VdbeAddOp1(v, OP_IfNot, p->iLimit); VdbeCoverage(v); + VdbeComment((v, "Jump ahead if LIMIT reached")); + if( p->iOffset ){ + sqlite3VdbeAddOp3(v, OP_OffsetLimit, + p->iLimit, p->iOffset+1, p->iOffset); + } + } + explainSetInteger(iSub2, pParse->iNextSelectId); + rc = sqlite3Select(pParse, p, &dest); + testcase( rc!=SQLITE_OK ); + pDelete = p->pPrior; + p->pPrior = pPrior; + p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow); + if( pPrior->pLimit + && sqlite3ExprIsInteger(pPrior->pLimit, &nLimit) + && nLimit>0 && p->nSelectRow > sqlite3LogEst((u64)nLimit) + ){ + p->nSelectRow = sqlite3LogEst((u64)nLimit); + } + if( addr ){ + sqlite3VdbeJumpHere(v, addr); + } + break; + } + case TK_EXCEPT: + case TK_UNION: { + int unionTab; /* Cursor number of the temporary table holding result */ + u8 op = 0; /* One of the SRT_ operations to apply to self */ + int priorOp; /* The SRT_ operation to apply to prior selects */ + Expr *pLimit, *pOffset; /* Saved values of p->nLimit and p->nOffset */ + int addr; + SelectDest uniondest; + + testcase( p->op==TK_EXCEPT ); + testcase( p->op==TK_UNION ); + priorOp = SRT_Union; + if( dest.eDest==priorOp ){ + /* We can reuse a temporary table generated by a SELECT to our + ** right. + */ + assert( p->pLimit==0 ); /* Not allowed on leftward elements */ + assert( p->pOffset==0 ); /* Not allowed on leftward elements */ + unionTab = dest.iSDParm; + }else{ + /* We will need to create our own temporary table to hold the + ** intermediate results. + */ + unionTab = pParse->nTab++; + assert( p->pOrderBy==0 ); + addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, unionTab, 0); + assert( p->addrOpenEphm[0] == -1 ); + p->addrOpenEphm[0] = addr; + findRightmost(p)->selFlags |= SF_UsesEphemeral; + assert( p->pEList ); + } + + /* Code the SELECT statements to our left + */ + assert( !pPrior->pOrderBy ); + sqlite3SelectDestInit(&uniondest, priorOp, unionTab); + explainSetInteger(iSub1, pParse->iNextSelectId); + rc = sqlite3Select(pParse, pPrior, &uniondest); + if( rc ){ + goto multi_select_end; + } + + /* Code the current SELECT statement + */ + if( p->op==TK_EXCEPT ){ + op = SRT_Except; + }else{ + assert( p->op==TK_UNION ); + op = SRT_Union; + } + p->pPrior = 0; + pLimit = p->pLimit; + p->pLimit = 0; + pOffset = p->pOffset; + p->pOffset = 0; + uniondest.eDest = op; + explainSetInteger(iSub2, pParse->iNextSelectId); + rc = sqlite3Select(pParse, p, &uniondest); + testcase( rc!=SQLITE_OK ); + /* Query flattening in sqlite3Select() might refill p->pOrderBy. + ** Be sure to delete p->pOrderBy, therefore, to avoid a memory leak. */ + sqlite3ExprListDelete(db, p->pOrderBy); + pDelete = p->pPrior; + p->pPrior = pPrior; + p->pOrderBy = 0; + if( p->op==TK_UNION ){ + p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow); + } + sqlite3ExprDelete(db, p->pLimit); + p->pLimit = pLimit; + p->pOffset = pOffset; + p->iLimit = 0; + p->iOffset = 0; + + /* Convert the data in the temporary table into whatever form + ** it is that we currently need. + */ + assert( unionTab==dest.iSDParm || dest.eDest!=priorOp ); + if( dest.eDest!=priorOp ){ + int iCont, iBreak, iStart; + assert( p->pEList ); + if( dest.eDest==SRT_Output ){ + Select *pFirst = p; + while( pFirst->pPrior ) pFirst = pFirst->pPrior; + generateColumnNames(pParse, pFirst->pSrc, pFirst->pEList); + } + iBreak = sqlite3VdbeMakeLabel(v); + iCont = sqlite3VdbeMakeLabel(v); + computeLimitRegisters(pParse, p, iBreak); + sqlite3VdbeAddOp2(v, OP_Rewind, unionTab, iBreak); VdbeCoverage(v); + iStart = sqlite3VdbeCurrentAddr(v); + selectInnerLoop(pParse, p, p->pEList, unionTab, + 0, 0, &dest, iCont, iBreak); + sqlite3VdbeResolveLabel(v, iCont); + sqlite3VdbeAddOp2(v, OP_Next, unionTab, iStart); VdbeCoverage(v); + sqlite3VdbeResolveLabel(v, iBreak); + sqlite3VdbeAddOp2(v, OP_Close, unionTab, 0); + } + break; + } + default: assert( p->op==TK_INTERSECT ); { + int tab1, tab2; + int iCont, iBreak, iStart; + Expr *pLimit, *pOffset; + int addr; + SelectDest intersectdest; + int r1; + + /* INTERSECT is different from the others since it requires + ** two temporary tables. Hence it has its own case. Begin + ** by allocating the tables we will need. + */ + tab1 = pParse->nTab++; + tab2 = pParse->nTab++; + assert( p->pOrderBy==0 ); + + addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab1, 0); + assert( p->addrOpenEphm[0] == -1 ); + p->addrOpenEphm[0] = addr; + findRightmost(p)->selFlags |= SF_UsesEphemeral; + assert( p->pEList ); + + /* Code the SELECTs to our left into temporary table "tab1". + */ + sqlite3SelectDestInit(&intersectdest, SRT_Union, tab1); + explainSetInteger(iSub1, pParse->iNextSelectId); + rc = sqlite3Select(pParse, pPrior, &intersectdest); + if( rc ){ + goto multi_select_end; + } + + /* Code the current SELECT into temporary table "tab2" + */ + addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab2, 0); + assert( p->addrOpenEphm[1] == -1 ); + p->addrOpenEphm[1] = addr; + p->pPrior = 0; + pLimit = p->pLimit; + p->pLimit = 0; + pOffset = p->pOffset; + p->pOffset = 0; + intersectdest.iSDParm = tab2; + explainSetInteger(iSub2, pParse->iNextSelectId); + rc = sqlite3Select(pParse, p, &intersectdest); + testcase( rc!=SQLITE_OK ); + pDelete = p->pPrior; + p->pPrior = pPrior; + if( p->nSelectRow>pPrior->nSelectRow ) p->nSelectRow = pPrior->nSelectRow; + sqlite3ExprDelete(db, p->pLimit); + p->pLimit = pLimit; + p->pOffset = pOffset; + + /* Generate code to take the intersection of the two temporary + ** tables. + */ + assert( p->pEList ); + if( dest.eDest==SRT_Output ){ + Select *pFirst = p; + while( pFirst->pPrior ) pFirst = pFirst->pPrior; + generateColumnNames(pParse, pFirst->pSrc, pFirst->pEList); + } + iBreak = sqlite3VdbeMakeLabel(v); + iCont = sqlite3VdbeMakeLabel(v); + computeLimitRegisters(pParse, p, iBreak); + sqlite3VdbeAddOp2(v, OP_Rewind, tab1, iBreak); VdbeCoverage(v); + r1 = sqlite3GetTempReg(pParse); + iStart = sqlite3VdbeAddOp2(v, OP_RowData, tab1, r1); + sqlite3VdbeAddOp4Int(v, OP_NotFound, tab2, iCont, r1, 0); VdbeCoverage(v); + sqlite3ReleaseTempReg(pParse, r1); + selectInnerLoop(pParse, p, p->pEList, tab1, + 0, 0, &dest, iCont, iBreak); + sqlite3VdbeResolveLabel(v, iCont); + sqlite3VdbeAddOp2(v, OP_Next, tab1, iStart); VdbeCoverage(v); + sqlite3VdbeResolveLabel(v, iBreak); + sqlite3VdbeAddOp2(v, OP_Close, tab2, 0); + sqlite3VdbeAddOp2(v, OP_Close, tab1, 0); + break; + } + } + + explainComposite(pParse, p->op, iSub1, iSub2, p->op!=TK_ALL); + + /* Compute collating sequences used by + ** temporary tables needed to implement the compound select. + ** Attach the KeyInfo structure to all temporary tables. + ** + ** This section is run by the right-most SELECT statement only. + ** SELECT statements to the left always skip this part. The right-most + ** SELECT might also skip this part if it has no ORDER BY clause and + ** no temp tables are required. + */ + if( p->selFlags & SF_UsesEphemeral ){ + int i; /* Loop counter */ + KeyInfo *pKeyInfo; /* Collating sequence for the result set */ + Select *pLoop; /* For looping through SELECT statements */ + CollSeq **apColl; /* For looping through pKeyInfo->aColl[] */ + int nCol; /* Number of columns in result set */ + + assert( p->pNext==0 ); + nCol = p->pEList->nExpr; + pKeyInfo = sqlite3KeyInfoAlloc(db, nCol, 1); + if( !pKeyInfo ){ + rc = SQLITE_NOMEM_BKPT; + goto multi_select_end; + } + for(i=0, apColl=pKeyInfo->aColl; ipDfltColl; + } + } + + for(pLoop=p; pLoop; pLoop=pLoop->pPrior){ + for(i=0; i<2; i++){ + int addr = pLoop->addrOpenEphm[i]; + if( addr<0 ){ + /* If [0] is unused then [1] is also unused. So we can + ** always safely abort as soon as the first unused slot is found */ + assert( pLoop->addrOpenEphm[1]<0 ); + break; + } + sqlite3VdbeChangeP2(v, addr, nCol); + sqlite3VdbeChangeP4(v, addr, (char*)sqlite3KeyInfoRef(pKeyInfo), + P4_KEYINFO); + pLoop->addrOpenEphm[i] = -1; + } + } + sqlite3KeyInfoUnref(pKeyInfo); + } + +multi_select_end: + pDest->iSdst = dest.iSdst; + pDest->nSdst = dest.nSdst; + sqlite3SelectDelete(db, pDelete); + return rc; +} +#endif /* SQLITE_OMIT_COMPOUND_SELECT */ + +/* +** Error message for when two or more terms of a compound select have different +** size result sets. +*/ +SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p){ + if( p->selFlags & SF_Values ){ + sqlite3ErrorMsg(pParse, "all VALUES must have the same number of terms"); + }else{ + sqlite3ErrorMsg(pParse, "SELECTs to the left and right of %s" + " do not have the same number of result columns", selectOpName(p->op)); + } +} + +/* +** Code an output subroutine for a coroutine implementation of a +** SELECT statment. +** +** The data to be output is contained in pIn->iSdst. There are +** pIn->nSdst columns to be output. pDest is where the output should +** be sent. +** +** regReturn is the number of the register holding the subroutine +** return address. +** +** If regPrev>0 then it is the first register in a vector that +** records the previous output. mem[regPrev] is a flag that is false +** if there has been no previous output. If regPrev>0 then code is +** generated to suppress duplicates. pKeyInfo is used for comparing +** keys. +** +** If the LIMIT found in p->iLimit is reached, jump immediately to +** iBreak. +*/ +static int generateOutputSubroutine( + Parse *pParse, /* Parsing context */ + Select *p, /* The SELECT statement */ + SelectDest *pIn, /* Coroutine supplying data */ + SelectDest *pDest, /* Where to send the data */ + int regReturn, /* The return address register */ + int regPrev, /* Previous result register. No uniqueness if 0 */ + KeyInfo *pKeyInfo, /* For comparing with previous entry */ + int iBreak /* Jump here if we hit the LIMIT */ +){ + Vdbe *v = pParse->pVdbe; + int iContinue; + int addr; + + addr = sqlite3VdbeCurrentAddr(v); + iContinue = sqlite3VdbeMakeLabel(v); + + /* Suppress duplicates for UNION, EXCEPT, and INTERSECT + */ + if( regPrev ){ + int addr1, addr2; + addr1 = sqlite3VdbeAddOp1(v, OP_IfNot, regPrev); VdbeCoverage(v); + addr2 = sqlite3VdbeAddOp4(v, OP_Compare, pIn->iSdst, regPrev+1, pIn->nSdst, + (char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO); + sqlite3VdbeAddOp3(v, OP_Jump, addr2+2, iContinue, addr2+2); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, addr1); + sqlite3VdbeAddOp3(v, OP_Copy, pIn->iSdst, regPrev+1, pIn->nSdst-1); + sqlite3VdbeAddOp2(v, OP_Integer, 1, regPrev); + } + if( pParse->db->mallocFailed ) return 0; + + /* Suppress the first OFFSET entries if there is an OFFSET clause + */ + codeOffset(v, p->iOffset, iContinue); + + assert( pDest->eDest!=SRT_Exists ); + assert( pDest->eDest!=SRT_Table ); + switch( pDest->eDest ){ + /* Store the result as data using a unique key. + */ + case SRT_EphemTab: { + int r1 = sqlite3GetTempReg(pParse); + int r2 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst, r1); + sqlite3VdbeAddOp2(v, OP_NewRowid, pDest->iSDParm, r2); + sqlite3VdbeAddOp3(v, OP_Insert, pDest->iSDParm, r1, r2); + sqlite3VdbeChangeP5(v, OPFLAG_APPEND); + sqlite3ReleaseTempReg(pParse, r2); + sqlite3ReleaseTempReg(pParse, r1); + break; + } + +#ifndef SQLITE_OMIT_SUBQUERY + /* If we are creating a set for an "expr IN (SELECT ...)". + */ + case SRT_Set: { + int r1; + testcase( pIn->nSdst>1 ); + r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp4(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst, + r1, pDest->zAffSdst, pIn->nSdst); + sqlite3ExprCacheAffinityChange(pParse, pIn->iSdst, pIn->nSdst); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pDest->iSDParm, r1, + pIn->iSdst, pIn->nSdst); + sqlite3ReleaseTempReg(pParse, r1); + break; + } + + /* If this is a scalar select that is part of an expression, then + ** store the results in the appropriate memory cell and break out + ** of the scan loop. + */ + case SRT_Mem: { + assert( pIn->nSdst==1 || pParse->nErr>0 ); testcase( pIn->nSdst!=1 ); + sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSDParm, 1); + /* The LIMIT clause will jump out of the loop for us */ + break; + } +#endif /* #ifndef SQLITE_OMIT_SUBQUERY */ + + /* The results are stored in a sequence of registers + ** starting at pDest->iSdst. Then the co-routine yields. + */ + case SRT_Coroutine: { + if( pDest->iSdst==0 ){ + pDest->iSdst = sqlite3GetTempRange(pParse, pIn->nSdst); + pDest->nSdst = pIn->nSdst; + } + sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSdst, pIn->nSdst); + sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm); + break; + } + + /* If none of the above, then the result destination must be + ** SRT_Output. This routine is never called with any other + ** destination other than the ones handled above or SRT_Output. + ** + ** For SRT_Output, results are stored in a sequence of registers. + ** Then the OP_ResultRow opcode is used to cause sqlite3_step() to + ** return the next row of result. + */ + default: { + assert( pDest->eDest==SRT_Output ); + sqlite3VdbeAddOp2(v, OP_ResultRow, pIn->iSdst, pIn->nSdst); + sqlite3ExprCacheAffinityChange(pParse, pIn->iSdst, pIn->nSdst); + break; + } + } + + /* Jump to the end of the loop if the LIMIT is reached. + */ + if( p->iLimit ){ + sqlite3VdbeAddOp2(v, OP_DecrJumpZero, p->iLimit, iBreak); VdbeCoverage(v); + } + + /* Generate the subroutine return + */ + sqlite3VdbeResolveLabel(v, iContinue); + sqlite3VdbeAddOp1(v, OP_Return, regReturn); + + return addr; +} + +/* +** Alternative compound select code generator for cases when there +** is an ORDER BY clause. +** +** We assume a query of the following form: +** +** ORDER BY +** +** is one of UNION ALL, UNION, EXCEPT, or INTERSECT. The idea +** is to code both and with the ORDER BY clause as +** co-routines. Then run the co-routines in parallel and merge the results +** into the output. In addition to the two coroutines (called selectA and +** selectB) there are 7 subroutines: +** +** outA: Move the output of the selectA coroutine into the output +** of the compound query. +** +** outB: Move the output of the selectB coroutine into the output +** of the compound query. (Only generated for UNION and +** UNION ALL. EXCEPT and INSERTSECT never output a row that +** appears only in B.) +** +** AltB: Called when there is data from both coroutines and AB. +** +** EofA: Called when data is exhausted from selectA. +** +** EofB: Called when data is exhausted from selectB. +** +** The implementation of the latter five subroutines depend on which +** is used: +** +** +** UNION ALL UNION EXCEPT INTERSECT +** ------------- ----------------- -------------- ----------------- +** AltB: outA, nextA outA, nextA outA, nextA nextA +** +** AeqB: outA, nextA nextA nextA outA, nextA +** +** AgtB: outB, nextB outB, nextB nextB nextB +** +** EofA: outB, nextB outB, nextB halt halt +** +** EofB: outA, nextA outA, nextA outA, nextA halt +** +** In the AltB, AeqB, and AgtB subroutines, an EOF on A following nextA +** causes an immediate jump to EofA and an EOF on B following nextB causes +** an immediate jump to EofB. Within EofA and EofB, and EOF on entry or +** following nextX causes a jump to the end of the select processing. +** +** Duplicate removal in the UNION, EXCEPT, and INTERSECT cases is handled +** within the output subroutine. The regPrev register set holds the previously +** output value. A comparison is made against this value and the output +** is skipped if the next results would be the same as the previous. +** +** The implementation plan is to implement the two coroutines and seven +** subroutines first, then put the control logic at the bottom. Like this: +** +** goto Init +** coA: coroutine for left query (A) +** coB: coroutine for right query (B) +** outA: output one row of A +** outB: output one row of B (UNION and UNION ALL only) +** EofA: ... +** EofB: ... +** AltB: ... +** AeqB: ... +** AgtB: ... +** Init: initialize coroutine registers +** yield coA +** if eof(A) goto EofA +** yield coB +** if eof(B) goto EofB +** Cmpr: Compare A, B +** Jump AltB, AeqB, AgtB +** End: ... +** +** We call AltB, AeqB, AgtB, EofA, and EofB "subroutines" but they are not +** actually called using Gosub and they do not Return. EofA and EofB loop +** until all data is exhausted then jump to the "end" labe. AltB, AeqB, +** and AgtB jump to either L2 or to one of EofA or EofB. +*/ +#ifndef SQLITE_OMIT_COMPOUND_SELECT +static int multiSelectOrderBy( + Parse *pParse, /* Parsing context */ + Select *p, /* The right-most of SELECTs to be coded */ + SelectDest *pDest /* What to do with query results */ +){ + int i, j; /* Loop counters */ + Select *pPrior; /* Another SELECT immediately to our left */ + Vdbe *v; /* Generate code to this VDBE */ + SelectDest destA; /* Destination for coroutine A */ + SelectDest destB; /* Destination for coroutine B */ + int regAddrA; /* Address register for select-A coroutine */ + int regAddrB; /* Address register for select-B coroutine */ + int addrSelectA; /* Address of the select-A coroutine */ + int addrSelectB; /* Address of the select-B coroutine */ + int regOutA; /* Address register for the output-A subroutine */ + int regOutB; /* Address register for the output-B subroutine */ + int addrOutA; /* Address of the output-A subroutine */ + int addrOutB = 0; /* Address of the output-B subroutine */ + int addrEofA; /* Address of the select-A-exhausted subroutine */ + int addrEofA_noB; /* Alternate addrEofA if B is uninitialized */ + int addrEofB; /* Address of the select-B-exhausted subroutine */ + int addrAltB; /* Address of the AB subroutine */ + int regLimitA; /* Limit register for select-A */ + int regLimitB; /* Limit register for select-A */ + int regPrev; /* A range of registers to hold previous output */ + int savedLimit; /* Saved value of p->iLimit */ + int savedOffset; /* Saved value of p->iOffset */ + int labelCmpr; /* Label for the start of the merge algorithm */ + int labelEnd; /* Label for the end of the overall SELECT stmt */ + int addr1; /* Jump instructions that get retargetted */ + int op; /* One of TK_ALL, TK_UNION, TK_EXCEPT, TK_INTERSECT */ + KeyInfo *pKeyDup = 0; /* Comparison information for duplicate removal */ + KeyInfo *pKeyMerge; /* Comparison information for merging rows */ + sqlite3 *db; /* Database connection */ + ExprList *pOrderBy; /* The ORDER BY clause */ + int nOrderBy; /* Number of terms in the ORDER BY clause */ + int *aPermute; /* Mapping from ORDER BY terms to result set columns */ +#ifndef SQLITE_OMIT_EXPLAIN + int iSub1; /* EQP id of left-hand query */ + int iSub2; /* EQP id of right-hand query */ +#endif + + assert( p->pOrderBy!=0 ); + assert( pKeyDup==0 ); /* "Managed" code needs this. Ticket #3382. */ + db = pParse->db; + v = pParse->pVdbe; + assert( v!=0 ); /* Already thrown the error if VDBE alloc failed */ + labelEnd = sqlite3VdbeMakeLabel(v); + labelCmpr = sqlite3VdbeMakeLabel(v); + + + /* Patch up the ORDER BY clause + */ + op = p->op; + pPrior = p->pPrior; + assert( pPrior->pOrderBy==0 ); + pOrderBy = p->pOrderBy; + assert( pOrderBy ); + nOrderBy = pOrderBy->nExpr; + + /* For operators other than UNION ALL we have to make sure that + ** the ORDER BY clause covers every term of the result set. Add + ** terms to the ORDER BY clause as necessary. + */ + if( op!=TK_ALL ){ + for(i=1; db->mallocFailed==0 && i<=p->pEList->nExpr; i++){ + struct ExprList_item *pItem; + for(j=0, pItem=pOrderBy->a; ju.x.iOrderByCol>0 ); + if( pItem->u.x.iOrderByCol==i ) break; + } + if( j==nOrderBy ){ + Expr *pNew = sqlite3Expr(db, TK_INTEGER, 0); + if( pNew==0 ) return SQLITE_NOMEM_BKPT; + pNew->flags |= EP_IntValue; + pNew->u.iValue = i; + pOrderBy = sqlite3ExprListAppend(pParse, pOrderBy, pNew); + if( pOrderBy ) pOrderBy->a[nOrderBy++].u.x.iOrderByCol = (u16)i; + } + } + } + + /* Compute the comparison permutation and keyinfo that is used with + ** the permutation used to determine if the next + ** row of results comes from selectA or selectB. Also add explicit + ** collations to the ORDER BY clause terms so that when the subqueries + ** to the right and the left are evaluated, they use the correct + ** collation. + */ + aPermute = sqlite3DbMallocRawNN(db, sizeof(int)*(nOrderBy + 1)); + if( aPermute ){ + struct ExprList_item *pItem; + aPermute[0] = nOrderBy; + for(i=1, pItem=pOrderBy->a; i<=nOrderBy; i++, pItem++){ + assert( pItem->u.x.iOrderByCol>0 ); + assert( pItem->u.x.iOrderByCol<=p->pEList->nExpr ); + aPermute[i] = pItem->u.x.iOrderByCol - 1; + } + pKeyMerge = multiSelectOrderByKeyInfo(pParse, p, 1); + }else{ + pKeyMerge = 0; + } + + /* Reattach the ORDER BY clause to the query. + */ + p->pOrderBy = pOrderBy; + pPrior->pOrderBy = sqlite3ExprListDup(pParse->db, pOrderBy, 0); + + /* Allocate a range of temporary registers and the KeyInfo needed + ** for the logic that removes duplicate result rows when the + ** operator is UNION, EXCEPT, or INTERSECT (but not UNION ALL). + */ + if( op==TK_ALL ){ + regPrev = 0; + }else{ + int nExpr = p->pEList->nExpr; + assert( nOrderBy>=nExpr || db->mallocFailed ); + regPrev = pParse->nMem+1; + pParse->nMem += nExpr+1; + sqlite3VdbeAddOp2(v, OP_Integer, 0, regPrev); + pKeyDup = sqlite3KeyInfoAlloc(db, nExpr, 1); + if( pKeyDup ){ + assert( sqlite3KeyInfoIsWriteable(pKeyDup) ); + for(i=0; iaColl[i] = multiSelectCollSeq(pParse, p, i); + pKeyDup->aSortOrder[i] = 0; + } + } + } + + /* Separate the left and the right query from one another + */ + p->pPrior = 0; + pPrior->pNext = 0; + sqlite3ResolveOrderGroupBy(pParse, p, p->pOrderBy, "ORDER"); + if( pPrior->pPrior==0 ){ + sqlite3ResolveOrderGroupBy(pParse, pPrior, pPrior->pOrderBy, "ORDER"); + } + + /* Compute the limit registers */ + computeLimitRegisters(pParse, p, labelEnd); + if( p->iLimit && op==TK_ALL ){ + regLimitA = ++pParse->nMem; + regLimitB = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Copy, p->iOffset ? p->iOffset+1 : p->iLimit, + regLimitA); + sqlite3VdbeAddOp2(v, OP_Copy, regLimitA, regLimitB); + }else{ + regLimitA = regLimitB = 0; + } + sqlite3ExprDelete(db, p->pLimit); + p->pLimit = 0; + sqlite3ExprDelete(db, p->pOffset); + p->pOffset = 0; + + regAddrA = ++pParse->nMem; + regAddrB = ++pParse->nMem; + regOutA = ++pParse->nMem; + regOutB = ++pParse->nMem; + sqlite3SelectDestInit(&destA, SRT_Coroutine, regAddrA); + sqlite3SelectDestInit(&destB, SRT_Coroutine, regAddrB); + + /* Generate a coroutine to evaluate the SELECT statement to the + ** left of the compound operator - the "A" select. + */ + addrSelectA = sqlite3VdbeCurrentAddr(v) + 1; + addr1 = sqlite3VdbeAddOp3(v, OP_InitCoroutine, regAddrA, 0, addrSelectA); + VdbeComment((v, "left SELECT")); + pPrior->iLimit = regLimitA; + explainSetInteger(iSub1, pParse->iNextSelectId); + sqlite3Select(pParse, pPrior, &destA); + sqlite3VdbeEndCoroutine(v, regAddrA); + sqlite3VdbeJumpHere(v, addr1); + + /* Generate a coroutine to evaluate the SELECT statement on + ** the right - the "B" select + */ + addrSelectB = sqlite3VdbeCurrentAddr(v) + 1; + addr1 = sqlite3VdbeAddOp3(v, OP_InitCoroutine, regAddrB, 0, addrSelectB); + VdbeComment((v, "right SELECT")); + savedLimit = p->iLimit; + savedOffset = p->iOffset; + p->iLimit = regLimitB; + p->iOffset = 0; + explainSetInteger(iSub2, pParse->iNextSelectId); + sqlite3Select(pParse, p, &destB); + p->iLimit = savedLimit; + p->iOffset = savedOffset; + sqlite3VdbeEndCoroutine(v, regAddrB); + + /* Generate a subroutine that outputs the current row of the A + ** select as the next output row of the compound select. + */ + VdbeNoopComment((v, "Output routine for A")); + addrOutA = generateOutputSubroutine(pParse, + p, &destA, pDest, regOutA, + regPrev, pKeyDup, labelEnd); + + /* Generate a subroutine that outputs the current row of the B + ** select as the next output row of the compound select. + */ + if( op==TK_ALL || op==TK_UNION ){ + VdbeNoopComment((v, "Output routine for B")); + addrOutB = generateOutputSubroutine(pParse, + p, &destB, pDest, regOutB, + regPrev, pKeyDup, labelEnd); + } + sqlite3KeyInfoUnref(pKeyDup); + + /* Generate a subroutine to run when the results from select A + ** are exhausted and only data in select B remains. + */ + if( op==TK_EXCEPT || op==TK_INTERSECT ){ + addrEofA_noB = addrEofA = labelEnd; + }else{ + VdbeNoopComment((v, "eof-A subroutine")); + addrEofA = sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB); + addrEofA_noB = sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, labelEnd); + VdbeCoverage(v); + sqlite3VdbeGoto(v, addrEofA); + p->nSelectRow = sqlite3LogEstAdd(p->nSelectRow, pPrior->nSelectRow); + } + + /* Generate a subroutine to run when the results from select B + ** are exhausted and only data in select A remains. + */ + if( op==TK_INTERSECT ){ + addrEofB = addrEofA; + if( p->nSelectRow > pPrior->nSelectRow ) p->nSelectRow = pPrior->nSelectRow; + }else{ + VdbeNoopComment((v, "eof-B subroutine")); + addrEofB = sqlite3VdbeAddOp2(v, OP_Gosub, regOutA, addrOutA); + sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, labelEnd); VdbeCoverage(v); + sqlite3VdbeGoto(v, addrEofB); + } + + /* Generate code to handle the case of AB + */ + VdbeNoopComment((v, "A-gt-B subroutine")); + addrAgtB = sqlite3VdbeCurrentAddr(v); + if( op==TK_ALL || op==TK_UNION ){ + sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB); + } + sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, addrEofB); VdbeCoverage(v); + sqlite3VdbeGoto(v, labelCmpr); + + /* This code runs once to initialize everything. + */ + sqlite3VdbeJumpHere(v, addr1); + sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, addrEofA_noB); VdbeCoverage(v); + sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, addrEofB); VdbeCoverage(v); + + /* Implement the main merge loop + */ + sqlite3VdbeResolveLabel(v, labelCmpr); + sqlite3VdbeAddOp4(v, OP_Permutation, 0, 0, 0, (char*)aPermute, P4_INTARRAY); + sqlite3VdbeAddOp4(v, OP_Compare, destA.iSdst, destB.iSdst, nOrderBy, + (char*)pKeyMerge, P4_KEYINFO); + sqlite3VdbeChangeP5(v, OPFLAG_PERMUTE); + sqlite3VdbeAddOp3(v, OP_Jump, addrAltB, addrAeqB, addrAgtB); VdbeCoverage(v); + + /* Jump to the this point in order to terminate the query. + */ + sqlite3VdbeResolveLabel(v, labelEnd); + + /* Set the number of output columns + */ + if( pDest->eDest==SRT_Output ){ + Select *pFirst = pPrior; + while( pFirst->pPrior ) pFirst = pFirst->pPrior; + generateColumnNames(pParse, pFirst->pSrc, pFirst->pEList); + } + + /* Reassembly the compound query so that it will be freed correctly + ** by the calling function */ + if( p->pPrior ){ + sqlite3SelectDelete(db, p->pPrior); + } + p->pPrior = pPrior; + pPrior->pNext = p; + + /*** TBD: Insert subroutine calls to close cursors on incomplete + **** subqueries ****/ + explainComposite(pParse, p->op, iSub1, iSub2, 0); + return pParse->nErr!=0; +} +#endif + +#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) +/* Forward Declarations */ +static void substExprList(Parse*, ExprList*, int, ExprList*); +static void substSelect(Parse*, Select *, int, ExprList*, int); + +/* +** Scan through the expression pExpr. Replace every reference to +** a column in table number iTable with a copy of the iColumn-th +** entry in pEList. (But leave references to the ROWID column +** unchanged.) +** +** This routine is part of the flattening procedure. A subquery +** whose result set is defined by pEList appears as entry in the +** FROM clause of a SELECT such that the VDBE cursor assigned to that +** FORM clause entry is iTable. This routine make the necessary +** changes to pExpr so that it refers directly to the source table +** of the subquery rather the result set of the subquery. +*/ +static Expr *substExpr( + Parse *pParse, /* Report errors here */ + Expr *pExpr, /* Expr in which substitution occurs */ + int iTable, /* Table to be substituted */ + ExprList *pEList /* Substitute expressions */ +){ + sqlite3 *db = pParse->db; + if( pExpr==0 ) return 0; + if( pExpr->op==TK_COLUMN && pExpr->iTable==iTable ){ + if( pExpr->iColumn<0 ){ + pExpr->op = TK_NULL; + }else{ + Expr *pNew; + Expr *pCopy = pEList->a[pExpr->iColumn].pExpr; + assert( pEList!=0 && pExpr->iColumnnExpr ); + assert( pExpr->pLeft==0 && pExpr->pRight==0 ); + if( sqlite3ExprIsVector(pCopy) ){ + sqlite3VectorErrorMsg(pParse, pCopy); + }else{ + pNew = sqlite3ExprDup(db, pCopy, 0); + if( pNew && (pExpr->flags & EP_FromJoin) ){ + pNew->iRightJoinTable = pExpr->iRightJoinTable; + pNew->flags |= EP_FromJoin; + } + sqlite3ExprDelete(db, pExpr); + pExpr = pNew; + } + } + }else{ + pExpr->pLeft = substExpr(pParse, pExpr->pLeft, iTable, pEList); + pExpr->pRight = substExpr(pParse, pExpr->pRight, iTable, pEList); + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + substSelect(pParse, pExpr->x.pSelect, iTable, pEList, 1); + }else{ + substExprList(pParse, pExpr->x.pList, iTable, pEList); + } + } + return pExpr; +} +static void substExprList( + Parse *pParse, /* Report errors here */ + ExprList *pList, /* List to scan and in which to make substitutes */ + int iTable, /* Table to be substituted */ + ExprList *pEList /* Substitute values */ +){ + int i; + if( pList==0 ) return; + for(i=0; inExpr; i++){ + pList->a[i].pExpr = substExpr(pParse, pList->a[i].pExpr, iTable, pEList); + } +} +static void substSelect( + Parse *pParse, /* Report errors here */ + Select *p, /* SELECT statement in which to make substitutions */ + int iTable, /* Table to be replaced */ + ExprList *pEList, /* Substitute values */ + int doPrior /* Do substitutes on p->pPrior too */ +){ + SrcList *pSrc; + struct SrcList_item *pItem; + int i; + if( !p ) return; + do{ + substExprList(pParse, p->pEList, iTable, pEList); + substExprList(pParse, p->pGroupBy, iTable, pEList); + substExprList(pParse, p->pOrderBy, iTable, pEList); + p->pHaving = substExpr(pParse, p->pHaving, iTable, pEList); + p->pWhere = substExpr(pParse, p->pWhere, iTable, pEList); + pSrc = p->pSrc; + assert( pSrc!=0 ); + for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ + substSelect(pParse, pItem->pSelect, iTable, pEList, 1); + if( pItem->fg.isTabFunc ){ + substExprList(pParse, pItem->u1.pFuncArg, iTable, pEList); + } + } + }while( doPrior && (p = p->pPrior)!=0 ); +} +#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ + +#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) +/* +** This routine attempts to flatten subqueries as a performance optimization. +** This routine returns 1 if it makes changes and 0 if no flattening occurs. +** +** To understand the concept of flattening, consider the following +** query: +** +** SELECT a FROM (SELECT x+y AS a FROM t1 WHERE z<100) WHERE a>5 +** +** The default way of implementing this query is to execute the +** subquery first and store the results in a temporary table, then +** run the outer query on that temporary table. This requires two +** passes over the data. Furthermore, because the temporary table +** has no indices, the WHERE clause on the outer query cannot be +** optimized. +** +** This routine attempts to rewrite queries such as the above into +** a single flat select, like this: +** +** SELECT x+y AS a FROM t1 WHERE z<100 AND a>5 +** +** The code generated for this simplification gives the same result +** but only has to scan the data once. And because indices might +** exist on the table t1, a complete scan of the data might be +** avoided. +** +** Flattening is only attempted if all of the following are true: +** +** (1) The subquery and the outer query do not both use aggregates. +** +** (2) The subquery is not an aggregate or (2a) the outer query is not a join +** and (2b) the outer query does not use subqueries other than the one +** FROM-clause subquery that is a candidate for flattening. (2b is +** due to ticket [2f7170d73bf9abf80] from 2015-02-09.) +** +** (3) The subquery is not the right operand of a left outer join +** (Originally ticket #306. Strengthened by ticket #3300) +** +** (4) The subquery is not DISTINCT. +** +** (**) At one point restrictions (4) and (5) defined a subset of DISTINCT +** sub-queries that were excluded from this optimization. Restriction +** (4) has since been expanded to exclude all DISTINCT subqueries. +** +** (6) The subquery does not use aggregates or the outer query is not +** DISTINCT. +** +** (7) The subquery has a FROM clause. TODO: For subqueries without +** A FROM clause, consider adding a FROM close with the special +** table sqlite_once that consists of a single row containing a +** single NULL. +** +** (8) The subquery does not use LIMIT or the outer query is not a join. +** +** (9) The subquery does not use LIMIT or the outer query does not use +** aggregates. +** +** (**) Restriction (10) was removed from the code on 2005-02-05 but we +** accidently carried the comment forward until 2014-09-15. Original +** text: "The subquery does not use aggregates or the outer query +** does not use LIMIT." +** +** (11) The subquery and the outer query do not both have ORDER BY clauses. +** +** (**) Not implemented. Subsumed into restriction (3). Was previously +** a separate restriction deriving from ticket #350. +** +** (13) The subquery and outer query do not both use LIMIT. +** +** (14) The subquery does not use OFFSET. +** +** (15) The outer query is not part of a compound select or the +** subquery does not have a LIMIT clause. +** (See ticket #2339 and ticket [02a8e81d44]). +** +** (16) The outer query is not an aggregate or the subquery does +** not contain ORDER BY. (Ticket #2942) This used to not matter +** until we introduced the group_concat() function. +** +** (17) The sub-query is not a compound select, or it is a UNION ALL +** compound clause made up entirely of non-aggregate queries, and +** the parent query: +** +** * is not itself part of a compound select, +** * is not an aggregate or DISTINCT query, and +** * is not a join +** +** The parent and sub-query may contain WHERE clauses. Subject to +** rules (11), (13) and (14), they may also contain ORDER BY, +** LIMIT and OFFSET clauses. The subquery cannot use any compound +** operator other than UNION ALL because all the other compound +** operators have an implied DISTINCT which is disallowed by +** restriction (4). +** +** Also, each component of the sub-query must return the same number +** of result columns. This is actually a requirement for any compound +** SELECT statement, but all the code here does is make sure that no +** such (illegal) sub-query is flattened. The caller will detect the +** syntax error and return a detailed message. +** +** (18) If the sub-query is a compound select, then all terms of the +** ORDER by clause of the parent must be simple references to +** columns of the sub-query. +** +** (19) The subquery does not use LIMIT or the outer query does not +** have a WHERE clause. +** +** (20) If the sub-query is a compound select, then it must not use +** an ORDER BY clause. Ticket #3773. We could relax this constraint +** somewhat by saying that the terms of the ORDER BY clause must +** appear as unmodified result columns in the outer query. But we +** have other optimizations in mind to deal with that case. +** +** (21) The subquery does not use LIMIT or the outer query is not +** DISTINCT. (See ticket [752e1646fc]). +** +** (22) The subquery is not a recursive CTE. +** +** (23) The parent is not a recursive CTE, or the sub-query is not a +** compound query. This restriction is because transforming the +** parent to a compound query confuses the code that handles +** recursive queries in multiSelect(). +** +** (24) The subquery is not an aggregate that uses the built-in min() or +** or max() functions. (Without this restriction, a query like: +** "SELECT x FROM (SELECT max(y), x FROM t1)" would not necessarily +** return the value X for which Y was maximal.) +** +** +** In this routine, the "p" parameter is a pointer to the outer query. +** The subquery is p->pSrc->a[iFrom]. isAgg is true if the outer query +** uses aggregates and subqueryIsAgg is true if the subquery uses aggregates. +** +** If flattening is not attempted, this routine is a no-op and returns 0. +** If flattening is attempted this routine returns 1. +** +** All of the expression analysis must occur on both the outer query and +** the subquery before this routine runs. +*/ +static int flattenSubquery( + Parse *pParse, /* Parsing context */ + Select *p, /* The parent or outer SELECT statement */ + int iFrom, /* Index in p->pSrc->a[] of the inner subquery */ + int isAgg, /* True if outer SELECT uses aggregate functions */ + int subqueryIsAgg /* True if the subquery uses aggregate functions */ +){ + const char *zSavedAuthContext = pParse->zAuthContext; + Select *pParent; /* Current UNION ALL term of the other query */ + Select *pSub; /* The inner query or "subquery" */ + Select *pSub1; /* Pointer to the rightmost select in sub-query */ + SrcList *pSrc; /* The FROM clause of the outer query */ + SrcList *pSubSrc; /* The FROM clause of the subquery */ + ExprList *pList; /* The result set of the outer query */ + int iParent; /* VDBE cursor number of the pSub result set temp table */ + int i; /* Loop counter */ + Expr *pWhere; /* The WHERE clause */ + struct SrcList_item *pSubitem; /* The subquery */ + sqlite3 *db = pParse->db; + + /* Check to see if flattening is permitted. Return 0 if not. + */ + assert( p!=0 ); + assert( p->pPrior==0 ); /* Unable to flatten compound queries */ + if( OptimizationDisabled(db, SQLITE_QueryFlattener) ) return 0; + pSrc = p->pSrc; + assert( pSrc && iFrom>=0 && iFromnSrc ); + pSubitem = &pSrc->a[iFrom]; + iParent = pSubitem->iCursor; + pSub = pSubitem->pSelect; + assert( pSub!=0 ); + if( subqueryIsAgg ){ + if( isAgg ) return 0; /* Restriction (1) */ + if( pSrc->nSrc>1 ) return 0; /* Restriction (2a) */ + if( (p->pWhere && ExprHasProperty(p->pWhere,EP_Subquery)) + || (sqlite3ExprListFlags(p->pEList) & EP_Subquery)!=0 + || (sqlite3ExprListFlags(p->pOrderBy) & EP_Subquery)!=0 + ){ + return 0; /* Restriction (2b) */ + } + } + + pSubSrc = pSub->pSrc; + assert( pSubSrc ); + /* Prior to version 3.1.2, when LIMIT and OFFSET had to be simple constants, + ** not arbitrary expressions, we allowed some combining of LIMIT and OFFSET + ** because they could be computed at compile-time. But when LIMIT and OFFSET + ** became arbitrary expressions, we were forced to add restrictions (13) + ** and (14). */ + if( pSub->pLimit && p->pLimit ) return 0; /* Restriction (13) */ + if( pSub->pOffset ) return 0; /* Restriction (14) */ + if( (p->selFlags & SF_Compound)!=0 && pSub->pLimit ){ + return 0; /* Restriction (15) */ + } + if( pSubSrc->nSrc==0 ) return 0; /* Restriction (7) */ + if( pSub->selFlags & SF_Distinct ) return 0; /* Restriction (5) */ + if( pSub->pLimit && (pSrc->nSrc>1 || isAgg) ){ + return 0; /* Restrictions (8)(9) */ + } + if( (p->selFlags & SF_Distinct)!=0 && subqueryIsAgg ){ + return 0; /* Restriction (6) */ + } + if( p->pOrderBy && pSub->pOrderBy ){ + return 0; /* Restriction (11) */ + } + if( isAgg && pSub->pOrderBy ) return 0; /* Restriction (16) */ + if( pSub->pLimit && p->pWhere ) return 0; /* Restriction (19) */ + if( pSub->pLimit && (p->selFlags & SF_Distinct)!=0 ){ + return 0; /* Restriction (21) */ + } + testcase( pSub->selFlags & SF_Recursive ); + testcase( pSub->selFlags & SF_MinMaxAgg ); + if( pSub->selFlags & (SF_Recursive|SF_MinMaxAgg) ){ + return 0; /* Restrictions (22) and (24) */ + } + if( (p->selFlags & SF_Recursive) && pSub->pPrior ){ + return 0; /* Restriction (23) */ + } + + /* OBSOLETE COMMENT 1: + ** Restriction 3: If the subquery is a join, make sure the subquery is + ** not used as the right operand of an outer join. Examples of why this + ** is not allowed: + ** + ** t1 LEFT OUTER JOIN (t2 JOIN t3) + ** + ** If we flatten the above, we would get + ** + ** (t1 LEFT OUTER JOIN t2) JOIN t3 + ** + ** which is not at all the same thing. + ** + ** OBSOLETE COMMENT 2: + ** Restriction 12: If the subquery is the right operand of a left outer + ** join, make sure the subquery has no WHERE clause. + ** An examples of why this is not allowed: + ** + ** t1 LEFT OUTER JOIN (SELECT * FROM t2 WHERE t2.x>0) + ** + ** If we flatten the above, we would get + ** + ** (t1 LEFT OUTER JOIN t2) WHERE t2.x>0 + ** + ** But the t2.x>0 test will always fail on a NULL row of t2, which + ** effectively converts the OUTER JOIN into an INNER JOIN. + ** + ** THIS OVERRIDES OBSOLETE COMMENTS 1 AND 2 ABOVE: + ** Ticket #3300 shows that flattening the right term of a LEFT JOIN + ** is fraught with danger. Best to avoid the whole thing. If the + ** subquery is the right term of a LEFT JOIN, then do not flatten. + */ + if( (pSubitem->fg.jointype & JT_OUTER)!=0 ){ + return 0; + } + + /* Restriction 17: If the sub-query is a compound SELECT, then it must + ** use only the UNION ALL operator. And none of the simple select queries + ** that make up the compound SELECT are allowed to be aggregate or distinct + ** queries. + */ + if( pSub->pPrior ){ + if( pSub->pOrderBy ){ + return 0; /* Restriction 20 */ + } + if( isAgg || (p->selFlags & SF_Distinct)!=0 || pSrc->nSrc!=1 ){ + return 0; + } + for(pSub1=pSub; pSub1; pSub1=pSub1->pPrior){ + testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ); + testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate ); + assert( pSub->pSrc!=0 ); + assert( pSub->pEList->nExpr==pSub1->pEList->nExpr ); + if( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))!=0 + || (pSub1->pPrior && pSub1->op!=TK_ALL) + || pSub1->pSrc->nSrc<1 + ){ + return 0; + } + testcase( pSub1->pSrc->nSrc>1 ); + } + + /* Restriction 18. */ + if( p->pOrderBy ){ + int ii; + for(ii=0; iipOrderBy->nExpr; ii++){ + if( p->pOrderBy->a[ii].u.x.iOrderByCol==0 ) return 0; + } + } + } + + /***** If we reach this point, flattening is permitted. *****/ + SELECTTRACE(1,pParse,p,("flatten %s.%p from term %d\n", + pSub->zSelName, pSub, iFrom)); + + /* Authorize the subquery */ + pParse->zAuthContext = pSubitem->zName; + TESTONLY(i =) sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0); + testcase( i==SQLITE_DENY ); + pParse->zAuthContext = zSavedAuthContext; + + /* If the sub-query is a compound SELECT statement, then (by restrictions + ** 17 and 18 above) it must be a UNION ALL and the parent query must + ** be of the form: + ** + ** SELECT FROM () + ** + ** followed by any ORDER BY, LIMIT and/or OFFSET clauses. This block + ** creates N-1 copies of the parent query without any ORDER BY, LIMIT or + ** OFFSET clauses and joins them to the left-hand-side of the original + ** using UNION ALL operators. In this case N is the number of simple + ** select statements in the compound sub-query. + ** + ** Example: + ** + ** SELECT a+1 FROM ( + ** SELECT x FROM tab + ** UNION ALL + ** SELECT y FROM tab + ** UNION ALL + ** SELECT abs(z*2) FROM tab2 + ** ) WHERE a!=5 ORDER BY 1 + ** + ** Transformed into: + ** + ** SELECT x+1 FROM tab WHERE x+1!=5 + ** UNION ALL + ** SELECT y+1 FROM tab WHERE y+1!=5 + ** UNION ALL + ** SELECT abs(z*2)+1 FROM tab2 WHERE abs(z*2)+1!=5 + ** ORDER BY 1 + ** + ** We call this the "compound-subquery flattening". + */ + for(pSub=pSub->pPrior; pSub; pSub=pSub->pPrior){ + Select *pNew; + ExprList *pOrderBy = p->pOrderBy; + Expr *pLimit = p->pLimit; + Expr *pOffset = p->pOffset; + Select *pPrior = p->pPrior; + p->pOrderBy = 0; + p->pSrc = 0; + p->pPrior = 0; + p->pLimit = 0; + p->pOffset = 0; + pNew = sqlite3SelectDup(db, p, 0); + sqlite3SelectSetName(pNew, pSub->zSelName); + p->pOffset = pOffset; + p->pLimit = pLimit; + p->pOrderBy = pOrderBy; + p->pSrc = pSrc; + p->op = TK_ALL; + if( pNew==0 ){ + p->pPrior = pPrior; + }else{ + pNew->pPrior = pPrior; + if( pPrior ) pPrior->pNext = pNew; + pNew->pNext = p; + p->pPrior = pNew; + SELECTTRACE(2,pParse,p, + ("compound-subquery flattener creates %s.%p as peer\n", + pNew->zSelName, pNew)); + } + if( db->mallocFailed ) return 1; + } + + /* Begin flattening the iFrom-th entry of the FROM clause + ** in the outer query. + */ + pSub = pSub1 = pSubitem->pSelect; + + /* Delete the transient table structure associated with the + ** subquery + */ + sqlite3DbFree(db, pSubitem->zDatabase); + sqlite3DbFree(db, pSubitem->zName); + sqlite3DbFree(db, pSubitem->zAlias); + pSubitem->zDatabase = 0; + pSubitem->zName = 0; + pSubitem->zAlias = 0; + pSubitem->pSelect = 0; + + /* Defer deleting the Table object associated with the + ** subquery until code generation is + ** complete, since there may still exist Expr.pTab entries that + ** refer to the subquery even after flattening. Ticket #3346. + ** + ** pSubitem->pTab is always non-NULL by test restrictions and tests above. + */ + if( ALWAYS(pSubitem->pTab!=0) ){ + Table *pTabToDel = pSubitem->pTab; + if( pTabToDel->nTabRef==1 ){ + Parse *pToplevel = sqlite3ParseToplevel(pParse); + pTabToDel->pNextZombie = pToplevel->pZombieTab; + pToplevel->pZombieTab = pTabToDel; + }else{ + pTabToDel->nTabRef--; + } + pSubitem->pTab = 0; + } + + /* The following loop runs once for each term in a compound-subquery + ** flattening (as described above). If we are doing a different kind + ** of flattening - a flattening other than a compound-subquery flattening - + ** then this loop only runs once. + ** + ** This loop moves all of the FROM elements of the subquery into the + ** the FROM clause of the outer query. Before doing this, remember + ** the cursor number for the original outer query FROM element in + ** iParent. The iParent cursor will never be used. Subsequent code + ** will scan expressions looking for iParent references and replace + ** those references with expressions that resolve to the subquery FROM + ** elements we are now copying in. + */ + for(pParent=p; pParent; pParent=pParent->pPrior, pSub=pSub->pPrior){ + int nSubSrc; + u8 jointype = 0; + pSubSrc = pSub->pSrc; /* FROM clause of subquery */ + nSubSrc = pSubSrc->nSrc; /* Number of terms in subquery FROM clause */ + pSrc = pParent->pSrc; /* FROM clause of the outer query */ + + if( pSrc ){ + assert( pParent==p ); /* First time through the loop */ + jointype = pSubitem->fg.jointype; + }else{ + assert( pParent!=p ); /* 2nd and subsequent times through the loop */ + pSrc = pParent->pSrc = sqlite3SrcListAppend(db, 0, 0, 0); + if( pSrc==0 ){ + assert( db->mallocFailed ); + break; + } + } + + /* The subquery uses a single slot of the FROM clause of the outer + ** query. If the subquery has more than one element in its FROM clause, + ** then expand the outer query to make space for it to hold all elements + ** of the subquery. + ** + ** Example: + ** + ** SELECT * FROM tabA, (SELECT * FROM sub1, sub2), tabB; + ** + ** The outer query has 3 slots in its FROM clause. One slot of the + ** outer query (the middle slot) is used by the subquery. The next + ** block of code will expand the outer query FROM clause to 4 slots. + ** The middle slot is expanded to two slots in order to make space + ** for the two elements in the FROM clause of the subquery. + */ + if( nSubSrc>1 ){ + pParent->pSrc = pSrc = sqlite3SrcListEnlarge(db, pSrc, nSubSrc-1,iFrom+1); + if( db->mallocFailed ){ + break; + } + } + + /* Transfer the FROM clause terms from the subquery into the + ** outer query. + */ + for(i=0; ia[i+iFrom].pUsing); + assert( pSrc->a[i+iFrom].fg.isTabFunc==0 ); + pSrc->a[i+iFrom] = pSubSrc->a[i]; + memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i])); + } + pSrc->a[iFrom].fg.jointype = jointype; + + /* Now begin substituting subquery result set expressions for + ** references to the iParent in the outer query. + ** + ** Example: + ** + ** SELECT a+5, b*10 FROM (SELECT x*3 AS a, y+10 AS b FROM t1) WHERE a>b; + ** \ \_____________ subquery __________/ / + ** \_____________________ outer query ______________________________/ + ** + ** We look at every expression in the outer query and every place we see + ** "a" we substitute "x*3" and every place we see "b" we substitute "y+10". + */ + pList = pParent->pEList; + for(i=0; inExpr; i++){ + if( pList->a[i].zName==0 ){ + char *zName = sqlite3DbStrDup(db, pList->a[i].zSpan); + sqlite3Dequote(zName); + pList->a[i].zName = zName; + } + } + if( pSub->pOrderBy ){ + /* At this point, any non-zero iOrderByCol values indicate that the + ** ORDER BY column expression is identical to the iOrderByCol'th + ** expression returned by SELECT statement pSub. Since these values + ** do not necessarily correspond to columns in SELECT statement pParent, + ** zero them before transfering the ORDER BY clause. + ** + ** Not doing this may cause an error if a subsequent call to this + ** function attempts to flatten a compound sub-query into pParent + ** (the only way this can happen is if the compound sub-query is + ** currently part of pSub->pSrc). See ticket [d11a6e908f]. */ + ExprList *pOrderBy = pSub->pOrderBy; + for(i=0; inExpr; i++){ + pOrderBy->a[i].u.x.iOrderByCol = 0; + } + assert( pParent->pOrderBy==0 ); + assert( pSub->pPrior==0 ); + pParent->pOrderBy = pOrderBy; + pSub->pOrderBy = 0; + } + pWhere = sqlite3ExprDup(db, pSub->pWhere, 0); + if( subqueryIsAgg ){ + assert( pParent->pHaving==0 ); + pParent->pHaving = pParent->pWhere; + pParent->pWhere = pWhere; + pParent->pHaving = sqlite3ExprAnd(db, + sqlite3ExprDup(db, pSub->pHaving, 0), pParent->pHaving + ); + assert( pParent->pGroupBy==0 ); + pParent->pGroupBy = sqlite3ExprListDup(db, pSub->pGroupBy, 0); + }else{ + pParent->pWhere = sqlite3ExprAnd(db, pWhere, pParent->pWhere); + } + substSelect(pParse, pParent, iParent, pSub->pEList, 0); + + /* The flattened query is distinct if either the inner or the + ** outer query is distinct. + */ + pParent->selFlags |= pSub->selFlags & SF_Distinct; + + /* + ** SELECT ... FROM (SELECT ... LIMIT a OFFSET b) LIMIT x OFFSET y; + ** + ** One is tempted to try to add a and b to combine the limits. But this + ** does not work if either limit is negative. + */ + if( pSub->pLimit ){ + pParent->pLimit = pSub->pLimit; + pSub->pLimit = 0; + } + } + + /* Finially, delete what is left of the subquery and return + ** success. + */ + sqlite3SelectDelete(db, pSub1); + +#if SELECTTRACE_ENABLED + if( sqlite3SelectTrace & 0x100 ){ + SELECTTRACE(0x100,pParse,p,("After flattening:\n")); + sqlite3TreeViewSelect(0, p, 0); + } +#endif + + return 1; +} +#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ + + + +#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) +/* +** Make copies of relevant WHERE clause terms of the outer query into +** the WHERE clause of subquery. Example: +** +** SELECT * FROM (SELECT a AS x, c-d AS y FROM t1) WHERE x=5 AND y=10; +** +** Transformed into: +** +** SELECT * FROM (SELECT a AS x, c-d AS y FROM t1 WHERE a=5 AND c-d=10) +** WHERE x=5 AND y=10; +** +** The hope is that the terms added to the inner query will make it more +** efficient. +** +** Do not attempt this optimization if: +** +** (1) The inner query is an aggregate. (In that case, we'd really want +** to copy the outer WHERE-clause terms onto the HAVING clause of the +** inner query. But they probably won't help there so do not bother.) +** +** (2) The inner query is the recursive part of a common table expression. +** +** (3) The inner query has a LIMIT clause (since the changes to the WHERE +** close would change the meaning of the LIMIT). +** +** (4) The inner query is the right operand of a LEFT JOIN. (The caller +** enforces this restriction since this routine does not have enough +** information to know.) +** +** (5) The WHERE clause expression originates in the ON or USING clause +** of a LEFT JOIN. +** +** Return 0 if no changes are made and non-zero if one or more WHERE clause +** terms are duplicated into the subquery. +*/ +static int pushDownWhereTerms( + Parse *pParse, /* Parse context (for malloc() and error reporting) */ + Select *pSubq, /* The subquery whose WHERE clause is to be augmented */ + Expr *pWhere, /* The WHERE clause of the outer query */ + int iCursor /* Cursor number of the subquery */ +){ + Expr *pNew; + int nChng = 0; + Select *pX; /* For looping over compound SELECTs in pSubq */ + if( pWhere==0 ) return 0; + for(pX=pSubq; pX; pX=pX->pPrior){ + if( (pX->selFlags & (SF_Aggregate|SF_Recursive))!=0 ){ + testcase( pX->selFlags & SF_Aggregate ); + testcase( pX->selFlags & SF_Recursive ); + testcase( pX!=pSubq ); + return 0; /* restrictions (1) and (2) */ + } + } + if( pSubq->pLimit!=0 ){ + return 0; /* restriction (3) */ + } + while( pWhere->op==TK_AND ){ + nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, iCursor); + pWhere = pWhere->pLeft; + } + if( ExprHasProperty(pWhere,EP_FromJoin) ) return 0; /* restriction 5 */ + if( sqlite3ExprIsTableConstant(pWhere, iCursor) ){ + nChng++; + while( pSubq ){ + pNew = sqlite3ExprDup(pParse->db, pWhere, 0); + pNew = substExpr(pParse, pNew, iCursor, pSubq->pEList); + pSubq->pWhere = sqlite3ExprAnd(pParse->db, pSubq->pWhere, pNew); + pSubq = pSubq->pPrior; + } + } + return nChng; +} +#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ + +/* +** Based on the contents of the AggInfo structure indicated by the first +** argument, this function checks if the following are true: +** +** * the query contains just a single aggregate function, +** * the aggregate function is either min() or max(), and +** * the argument to the aggregate function is a column value. +** +** If all of the above are true, then WHERE_ORDERBY_MIN or WHERE_ORDERBY_MAX +** is returned as appropriate. Also, *ppMinMax is set to point to the +** list of arguments passed to the aggregate before returning. +** +** Or, if the conditions above are not met, *ppMinMax is set to 0 and +** WHERE_ORDERBY_NORMAL is returned. +*/ +static u8 minMaxQuery(AggInfo *pAggInfo, ExprList **ppMinMax){ + int eRet = WHERE_ORDERBY_NORMAL; /* Return value */ + + *ppMinMax = 0; + if( pAggInfo->nFunc==1 ){ + Expr *pExpr = pAggInfo->aFunc[0].pExpr; /* Aggregate function */ + ExprList *pEList = pExpr->x.pList; /* Arguments to agg function */ + + assert( pExpr->op==TK_AGG_FUNCTION ); + if( pEList && pEList->nExpr==1 && pEList->a[0].pExpr->op==TK_AGG_COLUMN ){ + const char *zFunc = pExpr->u.zToken; + if( sqlite3StrICmp(zFunc, "min")==0 ){ + eRet = WHERE_ORDERBY_MIN; + *ppMinMax = pEList; + }else if( sqlite3StrICmp(zFunc, "max")==0 ){ + eRet = WHERE_ORDERBY_MAX; + *ppMinMax = pEList; + } + } + } + + assert( *ppMinMax==0 || (*ppMinMax)->nExpr==1 ); + return eRet; +} + +/* +** The select statement passed as the first argument is an aggregate query. +** The second argument is the associated aggregate-info object. This +** function tests if the SELECT is of the form: +** +** SELECT count(*) FROM +** +** where table is a database table, not a sub-select or view. If the query +** does match this pattern, then a pointer to the Table object representing +** is returned. Otherwise, 0 is returned. +*/ +static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ + Table *pTab; + Expr *pExpr; + + assert( !p->pGroupBy ); + + if( p->pWhere || p->pEList->nExpr!=1 + || p->pSrc->nSrc!=1 || p->pSrc->a[0].pSelect + ){ + return 0; + } + pTab = p->pSrc->a[0].pTab; + pExpr = p->pEList->a[0].pExpr; + assert( pTab && !pTab->pSelect && pExpr ); + + if( IsVirtual(pTab) ) return 0; + if( pExpr->op!=TK_AGG_FUNCTION ) return 0; + if( NEVER(pAggInfo->nFunc==0) ) return 0; + if( (pAggInfo->aFunc[0].pFunc->funcFlags&SQLITE_FUNC_COUNT)==0 ) return 0; + if( pExpr->flags&EP_Distinct ) return 0; + + return pTab; +} + +/* +** If the source-list item passed as an argument was augmented with an +** INDEXED BY clause, then try to locate the specified index. If there +** was such a clause and the named index cannot be found, return +** SQLITE_ERROR and leave an error in pParse. Otherwise, populate +** pFrom->pIndex and return SQLITE_OK. +*/ +SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, struct SrcList_item *pFrom){ + if( pFrom->pTab && pFrom->fg.isIndexedBy ){ + Table *pTab = pFrom->pTab; + char *zIndexedBy = pFrom->u1.zIndexedBy; + Index *pIdx; + for(pIdx=pTab->pIndex; + pIdx && sqlite3StrICmp(pIdx->zName, zIndexedBy); + pIdx=pIdx->pNext + ); + if( !pIdx ){ + sqlite3ErrorMsg(pParse, "no such index: %s", zIndexedBy, 0); + pParse->checkSchema = 1; + return SQLITE_ERROR; + } + pFrom->pIBIndex = pIdx; + } + return SQLITE_OK; +} +/* +** Detect compound SELECT statements that use an ORDER BY clause with +** an alternative collating sequence. +** +** SELECT ... FROM t1 EXCEPT SELECT ... FROM t2 ORDER BY .. COLLATE ... +** +** These are rewritten as a subquery: +** +** SELECT * FROM (SELECT ... FROM t1 EXCEPT SELECT ... FROM t2) +** ORDER BY ... COLLATE ... +** +** This transformation is necessary because the multiSelectOrderBy() routine +** above that generates the code for a compound SELECT with an ORDER BY clause +** uses a merge algorithm that requires the same collating sequence on the +** result columns as on the ORDER BY clause. See ticket +** http://www.sqlite.org/src/info/6709574d2a +** +** This transformation is only needed for EXCEPT, INTERSECT, and UNION. +** The UNION ALL operator works fine with multiSelectOrderBy() even when +** there are COLLATE terms in the ORDER BY. +*/ +static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){ + int i; + Select *pNew; + Select *pX; + sqlite3 *db; + struct ExprList_item *a; + SrcList *pNewSrc; + Parse *pParse; + Token dummy; + + if( p->pPrior==0 ) return WRC_Continue; + if( p->pOrderBy==0 ) return WRC_Continue; + for(pX=p; pX && (pX->op==TK_ALL || pX->op==TK_SELECT); pX=pX->pPrior){} + if( pX==0 ) return WRC_Continue; + a = p->pOrderBy->a; + for(i=p->pOrderBy->nExpr-1; i>=0; i--){ + if( a[i].pExpr->flags & EP_Collate ) break; + } + if( i<0 ) return WRC_Continue; + + /* If we reach this point, that means the transformation is required. */ + + pParse = pWalker->pParse; + db = pParse->db; + pNew = sqlite3DbMallocZero(db, sizeof(*pNew) ); + if( pNew==0 ) return WRC_Abort; + memset(&dummy, 0, sizeof(dummy)); + pNewSrc = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&dummy,pNew,0,0); + if( pNewSrc==0 ) return WRC_Abort; + *pNew = *p; + p->pSrc = pNewSrc; + p->pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ASTERISK, 0)); + p->op = TK_SELECT; + p->pWhere = 0; + pNew->pGroupBy = 0; + pNew->pHaving = 0; + pNew->pOrderBy = 0; + p->pPrior = 0; + p->pNext = 0; + p->pWith = 0; + p->selFlags &= ~SF_Compound; + assert( (p->selFlags & SF_Converted)==0 ); + p->selFlags |= SF_Converted; + assert( pNew->pPrior!=0 ); + pNew->pPrior->pNext = pNew; + pNew->pLimit = 0; + pNew->pOffset = 0; + return WRC_Continue; +} + +/* +** Check to see if the FROM clause term pFrom has table-valued function +** arguments. If it does, leave an error message in pParse and return +** non-zero, since pFrom is not allowed to be a table-valued function. +*/ +static int cannotBeFunction(Parse *pParse, struct SrcList_item *pFrom){ + if( pFrom->fg.isTabFunc ){ + sqlite3ErrorMsg(pParse, "'%s' is not a function", pFrom->zName); + return 1; + } + return 0; +} + +#ifndef SQLITE_OMIT_CTE +/* +** Argument pWith (which may be NULL) points to a linked list of nested +** WITH contexts, from inner to outermost. If the table identified by +** FROM clause element pItem is really a common-table-expression (CTE) +** then return a pointer to the CTE definition for that table. Otherwise +** return NULL. +** +** If a non-NULL value is returned, set *ppContext to point to the With +** object that the returned CTE belongs to. +*/ +static struct Cte *searchWith( + With *pWith, /* Current innermost WITH clause */ + struct SrcList_item *pItem, /* FROM clause element to resolve */ + With **ppContext /* OUT: WITH clause return value belongs to */ +){ + const char *zName; + if( pItem->zDatabase==0 && (zName = pItem->zName)!=0 ){ + With *p; + for(p=pWith; p; p=p->pOuter){ + int i; + for(i=0; inCte; i++){ + if( sqlite3StrICmp(zName, p->a[i].zName)==0 ){ + *ppContext = p; + return &p->a[i]; + } + } + } + } + return 0; +} + +/* The code generator maintains a stack of active WITH clauses +** with the inner-most WITH clause being at the top of the stack. +** +** This routine pushes the WITH clause passed as the second argument +** onto the top of the stack. If argument bFree is true, then this +** WITH clause will never be popped from the stack. In this case it +** should be freed along with the Parse object. In other cases, when +** bFree==0, the With object will be freed along with the SELECT +** statement with which it is associated. +*/ +SQLITE_PRIVATE void sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){ + assert( bFree==0 || (pParse->pWith==0 && pParse->pWithToFree==0) ); + if( pWith ){ + assert( pParse->pWith!=pWith ); + pWith->pOuter = pParse->pWith; + pParse->pWith = pWith; + if( bFree ) pParse->pWithToFree = pWith; + } +} + +/* +** This function checks if argument pFrom refers to a CTE declared by +** a WITH clause on the stack currently maintained by the parser. And, +** if currently processing a CTE expression, if it is a recursive +** reference to the current CTE. +** +** If pFrom falls into either of the two categories above, pFrom->pTab +** and other fields are populated accordingly. The caller should check +** (pFrom->pTab!=0) to determine whether or not a successful match +** was found. +** +** Whether or not a match is found, SQLITE_OK is returned if no error +** occurs. If an error does occur, an error message is stored in the +** parser and some error code other than SQLITE_OK returned. +*/ +static int withExpand( + Walker *pWalker, + struct SrcList_item *pFrom +){ + Parse *pParse = pWalker->pParse; + sqlite3 *db = pParse->db; + struct Cte *pCte; /* Matched CTE (or NULL if no match) */ + With *pWith; /* WITH clause that pCte belongs to */ + + assert( pFrom->pTab==0 ); + + pCte = searchWith(pParse->pWith, pFrom, &pWith); + if( pCte ){ + Table *pTab; + ExprList *pEList; + Select *pSel; + Select *pLeft; /* Left-most SELECT statement */ + int bMayRecursive; /* True if compound joined by UNION [ALL] */ + With *pSavedWith; /* Initial value of pParse->pWith */ + + /* If pCte->zCteErr is non-NULL at this point, then this is an illegal + ** recursive reference to CTE pCte. Leave an error in pParse and return + ** early. If pCte->zCteErr is NULL, then this is not a recursive reference. + ** In this case, proceed. */ + if( pCte->zCteErr ){ + sqlite3ErrorMsg(pParse, pCte->zCteErr, pCte->zName); + return SQLITE_ERROR; + } + if( cannotBeFunction(pParse, pFrom) ) return SQLITE_ERROR; + + assert( pFrom->pTab==0 ); + pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table)); + if( pTab==0 ) return WRC_Abort; + pTab->nTabRef = 1; + pTab->zName = sqlite3DbStrDup(db, pCte->zName); + pTab->iPKey = -1; + pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); + pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid; + pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0); + if( db->mallocFailed ) return SQLITE_NOMEM_BKPT; + assert( pFrom->pSelect ); + + /* Check if this is a recursive CTE. */ + pSel = pFrom->pSelect; + bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION ); + if( bMayRecursive ){ + int i; + SrcList *pSrc = pFrom->pSelect->pSrc; + for(i=0; inSrc; i++){ + struct SrcList_item *pItem = &pSrc->a[i]; + if( pItem->zDatabase==0 + && pItem->zName!=0 + && 0==sqlite3StrICmp(pItem->zName, pCte->zName) + ){ + pItem->pTab = pTab; + pItem->fg.isRecursive = 1; + pTab->nTabRef++; + pSel->selFlags |= SF_Recursive; + } + } + } + + /* Only one recursive reference is permitted. */ + if( pTab->nTabRef>2 ){ + sqlite3ErrorMsg( + pParse, "multiple references to recursive table: %s", pCte->zName + ); + return SQLITE_ERROR; + } + assert( pTab->nTabRef==1 || ((pSel->selFlags&SF_Recursive) && pTab->nTabRef==2 )); + + pCte->zCteErr = "circular reference: %s"; + pSavedWith = pParse->pWith; + pParse->pWith = pWith; + if( bMayRecursive ){ + Select *pPrior = pSel->pPrior; + assert( pPrior->pWith==0 ); + pPrior->pWith = pSel->pWith; + sqlite3WalkSelect(pWalker, pPrior); + pPrior->pWith = 0; + }else{ + sqlite3WalkSelect(pWalker, pSel); + } + pParse->pWith = pWith; + + for(pLeft=pSel; pLeft->pPrior; pLeft=pLeft->pPrior); + pEList = pLeft->pEList; + if( pCte->pCols ){ + if( pEList && pEList->nExpr!=pCte->pCols->nExpr ){ + sqlite3ErrorMsg(pParse, "table %s has %d values for %d columns", + pCte->zName, pEList->nExpr, pCte->pCols->nExpr + ); + pParse->pWith = pSavedWith; + return SQLITE_ERROR; + } + pEList = pCte->pCols; + } + + sqlite3ColumnsFromExprList(pParse, pEList, &pTab->nCol, &pTab->aCol); + if( bMayRecursive ){ + if( pSel->selFlags & SF_Recursive ){ + pCte->zCteErr = "multiple recursive references: %s"; + }else{ + pCte->zCteErr = "recursive reference in a subquery: %s"; + } + sqlite3WalkSelect(pWalker, pSel); + } + pCte->zCteErr = 0; + pParse->pWith = pSavedWith; + } + + return SQLITE_OK; +} +#endif + +#ifndef SQLITE_OMIT_CTE +/* +** If the SELECT passed as the second argument has an associated WITH +** clause, pop it from the stack stored as part of the Parse object. +** +** This function is used as the xSelectCallback2() callback by +** sqlite3SelectExpand() when walking a SELECT tree to resolve table +** names and other FROM clause elements. +*/ +static void selectPopWith(Walker *pWalker, Select *p){ + Parse *pParse = pWalker->pParse; + if( pParse->pWith && p->pPrior==0 ){ + With *pWith = findRightmost(p)->pWith; + if( pWith!=0 ){ + assert( pParse->pWith==pWith ); + pParse->pWith = pWith->pOuter; + } + } +} +#else +#define selectPopWith 0 +#endif + +/* +** This routine is a Walker callback for "expanding" a SELECT statement. +** "Expanding" means to do the following: +** +** (1) Make sure VDBE cursor numbers have been assigned to every +** element of the FROM clause. +** +** (2) Fill in the pTabList->a[].pTab fields in the SrcList that +** defines FROM clause. When views appear in the FROM clause, +** fill pTabList->a[].pSelect with a copy of the SELECT statement +** that implements the view. A copy is made of the view's SELECT +** statement so that we can freely modify or delete that statement +** without worrying about messing up the persistent representation +** of the view. +** +** (3) Add terms to the WHERE clause to accommodate the NATURAL keyword +** on joins and the ON and USING clause of joins. +** +** (4) Scan the list of columns in the result set (pEList) looking +** for instances of the "*" operator or the TABLE.* operator. +** If found, expand each "*" to be every column in every table +** and TABLE.* to be every column in TABLE. +** +*/ +static int selectExpander(Walker *pWalker, Select *p){ + Parse *pParse = pWalker->pParse; + int i, j, k; + SrcList *pTabList; + ExprList *pEList; + struct SrcList_item *pFrom; + sqlite3 *db = pParse->db; + Expr *pE, *pRight, *pExpr; + u16 selFlags = p->selFlags; + + p->selFlags |= SF_Expanded; + if( db->mallocFailed ){ + return WRC_Abort; + } + if( NEVER(p->pSrc==0) || (selFlags & SF_Expanded)!=0 ){ + return WRC_Prune; + } + pTabList = p->pSrc; + pEList = p->pEList; + if( p->pWith ){ + sqlite3WithPush(pParse, p->pWith, 0); + } + + /* Make sure cursor numbers have been assigned to all entries in + ** the FROM clause of the SELECT statement. + */ + sqlite3SrcListAssignCursors(pParse, pTabList); + + /* Look up every table named in the FROM clause of the select. If + ** an entry of the FROM clause is a subquery instead of a table or view, + ** then create a transient table structure to describe the subquery. + */ + for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ + Table *pTab; + assert( pFrom->fg.isRecursive==0 || pFrom->pTab!=0 ); + if( pFrom->fg.isRecursive ) continue; + assert( pFrom->pTab==0 ); +#ifndef SQLITE_OMIT_CTE + if( withExpand(pWalker, pFrom) ) return WRC_Abort; + if( pFrom->pTab ) {} else +#endif + if( pFrom->zName==0 ){ +#ifndef SQLITE_OMIT_SUBQUERY + Select *pSel = pFrom->pSelect; + /* A sub-query in the FROM clause of a SELECT */ + assert( pSel!=0 ); + assert( pFrom->pTab==0 ); + if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort; + pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table)); + if( pTab==0 ) return WRC_Abort; + pTab->nTabRef = 1; + pTab->zName = sqlite3MPrintf(db, "sqlite_sq_%p", (void*)pTab); + while( pSel->pPrior ){ pSel = pSel->pPrior; } + sqlite3ColumnsFromExprList(pParse, pSel->pEList,&pTab->nCol,&pTab->aCol); + pTab->iPKey = -1; + pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); + pTab->tabFlags |= TF_Ephemeral; +#endif + }else{ + /* An ordinary table or view name in the FROM clause */ + assert( pFrom->pTab==0 ); + pFrom->pTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom); + if( pTab==0 ) return WRC_Abort; + if( pTab->nTabRef>=0xffff ){ + sqlite3ErrorMsg(pParse, "too many references to \"%s\": max 65535", + pTab->zName); + pFrom->pTab = 0; + return WRC_Abort; + } + pTab->nTabRef++; + if( !IsVirtual(pTab) && cannotBeFunction(pParse, pFrom) ){ + return WRC_Abort; + } +#if !defined(SQLITE_OMIT_VIEW) || !defined (SQLITE_OMIT_VIRTUALTABLE) + if( IsVirtual(pTab) || pTab->pSelect ){ + i16 nCol; + if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort; + assert( pFrom->pSelect==0 ); + pFrom->pSelect = sqlite3SelectDup(db, pTab->pSelect, 0); + sqlite3SelectSetName(pFrom->pSelect, pTab->zName); + nCol = pTab->nCol; + pTab->nCol = -1; + sqlite3WalkSelect(pWalker, pFrom->pSelect); + pTab->nCol = nCol; + } +#endif + } + + /* Locate the index named by the INDEXED BY clause, if any. */ + if( sqlite3IndexedByLookup(pParse, pFrom) ){ + return WRC_Abort; + } + } + + /* Process NATURAL keywords, and ON and USING clauses of joins. + */ + if( db->mallocFailed || sqliteProcessJoin(pParse, p) ){ + return WRC_Abort; + } + + /* For every "*" that occurs in the column list, insert the names of + ** all columns in all tables. And for every TABLE.* insert the names + ** of all columns in TABLE. The parser inserted a special expression + ** with the TK_ASTERISK operator for each "*" that it found in the column + ** list. The following code just has to locate the TK_ASTERISK + ** expressions and expand each one to the list of all columns in + ** all tables. + ** + ** The first loop just checks to see if there are any "*" operators + ** that need expanding. + */ + for(k=0; knExpr; k++){ + pE = pEList->a[k].pExpr; + if( pE->op==TK_ASTERISK ) break; + assert( pE->op!=TK_DOT || pE->pRight!=0 ); + assert( pE->op!=TK_DOT || (pE->pLeft!=0 && pE->pLeft->op==TK_ID) ); + if( pE->op==TK_DOT && pE->pRight->op==TK_ASTERISK ) break; + } + if( knExpr ){ + /* + ** If we get here it means the result set contains one or more "*" + ** operators that need to be expanded. Loop through each expression + ** in the result set and expand them one by one. + */ + struct ExprList_item *a = pEList->a; + ExprList *pNew = 0; + int flags = pParse->db->flags; + int longNames = (flags & SQLITE_FullColNames)!=0 + && (flags & SQLITE_ShortColNames)==0; + + for(k=0; knExpr; k++){ + pE = a[k].pExpr; + pRight = pE->pRight; + assert( pE->op!=TK_DOT || pRight!=0 ); + if( pE->op!=TK_ASTERISK + && (pE->op!=TK_DOT || pRight->op!=TK_ASTERISK) + ){ + /* This particular expression does not need to be expanded. + */ + pNew = sqlite3ExprListAppend(pParse, pNew, a[k].pExpr); + if( pNew ){ + pNew->a[pNew->nExpr-1].zName = a[k].zName; + pNew->a[pNew->nExpr-1].zSpan = a[k].zSpan; + a[k].zName = 0; + a[k].zSpan = 0; + } + a[k].pExpr = 0; + }else{ + /* This expression is a "*" or a "TABLE.*" and needs to be + ** expanded. */ + int tableSeen = 0; /* Set to 1 when TABLE matches */ + char *zTName = 0; /* text of name of TABLE */ + if( pE->op==TK_DOT ){ + assert( pE->pLeft!=0 ); + assert( !ExprHasProperty(pE->pLeft, EP_IntValue) ); + zTName = pE->pLeft->u.zToken; + } + for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ + Table *pTab = pFrom->pTab; + Select *pSub = pFrom->pSelect; + char *zTabName = pFrom->zAlias; + const char *zSchemaName = 0; + int iDb; + if( zTabName==0 ){ + zTabName = pTab->zName; + } + if( db->mallocFailed ) break; + if( pSub==0 || (pSub->selFlags & SF_NestedFrom)==0 ){ + pSub = 0; + if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){ + continue; + } + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + zSchemaName = iDb>=0 ? db->aDb[iDb].zDbSName : "*"; + } + for(j=0; jnCol; j++){ + char *zName = pTab->aCol[j].zName; + char *zColname; /* The computed column name */ + char *zToFree; /* Malloced string that needs to be freed */ + Token sColname; /* Computed column name as a token */ + + assert( zName ); + if( zTName && pSub + && sqlite3MatchSpanName(pSub->pEList->a[j].zSpan, 0, zTName, 0)==0 + ){ + continue; + } + + /* If a column is marked as 'hidden', omit it from the expanded + ** result-set list unless the SELECT has the SF_IncludeHidden + ** bit set. + */ + if( (p->selFlags & SF_IncludeHidden)==0 + && IsHiddenColumn(&pTab->aCol[j]) + ){ + continue; + } + tableSeen = 1; + + if( i>0 && zTName==0 ){ + if( (pFrom->fg.jointype & JT_NATURAL)!=0 + && tableAndColumnIndex(pTabList, i, zName, 0, 0) + ){ + /* In a NATURAL join, omit the join columns from the + ** table to the right of the join */ + continue; + } + if( sqlite3IdListIndex(pFrom->pUsing, zName)>=0 ){ + /* In a join with a USING clause, omit columns in the + ** using clause from the table on the right. */ + continue; + } + } + pRight = sqlite3Expr(db, TK_ID, zName); + zColname = zName; + zToFree = 0; + if( longNames || pTabList->nSrc>1 ){ + Expr *pLeft; + pLeft = sqlite3Expr(db, TK_ID, zTabName); + pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); + if( zSchemaName ){ + pLeft = sqlite3Expr(db, TK_ID, zSchemaName); + pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pExpr); + } + if( longNames ){ + zColname = sqlite3MPrintf(db, "%s.%s", zTabName, zName); + zToFree = zColname; + } + }else{ + pExpr = pRight; + } + pNew = sqlite3ExprListAppend(pParse, pNew, pExpr); + sqlite3TokenInit(&sColname, zColname); + sqlite3ExprListSetName(pParse, pNew, &sColname, 0); + if( pNew && (p->selFlags & SF_NestedFrom)!=0 ){ + struct ExprList_item *pX = &pNew->a[pNew->nExpr-1]; + if( pSub ){ + pX->zSpan = sqlite3DbStrDup(db, pSub->pEList->a[j].zSpan); + testcase( pX->zSpan==0 ); + }else{ + pX->zSpan = sqlite3MPrintf(db, "%s.%s.%s", + zSchemaName, zTabName, zColname); + testcase( pX->zSpan==0 ); + } + pX->bSpanIsTab = 1; + } + sqlite3DbFree(db, zToFree); + } + } + if( !tableSeen ){ + if( zTName ){ + sqlite3ErrorMsg(pParse, "no such table: %s", zTName); + }else{ + sqlite3ErrorMsg(pParse, "no tables specified"); + } + } + } + } + sqlite3ExprListDelete(db, pEList); + p->pEList = pNew; + } +#if SQLITE_MAX_COLUMN + if( p->pEList && p->pEList->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ + sqlite3ErrorMsg(pParse, "too many columns in result set"); + return WRC_Abort; + } +#endif + return WRC_Continue; +} + +/* +** No-op routine for the parse-tree walker. +** +** When this routine is the Walker.xExprCallback then expression trees +** are walked without any actions being taken at each node. Presumably, +** when this routine is used for Walker.xExprCallback then +** Walker.xSelectCallback is set to do something useful for every +** subquery in the parser tree. +*/ +SQLITE_PRIVATE int sqlite3ExprWalkNoop(Walker *NotUsed, Expr *NotUsed2){ + UNUSED_PARAMETER2(NotUsed, NotUsed2); + return WRC_Continue; +} + +/* +** This routine "expands" a SELECT statement and all of its subqueries. +** For additional information on what it means to "expand" a SELECT +** statement, see the comment on the selectExpand worker callback above. +** +** Expanding a SELECT statement is the first step in processing a +** SELECT statement. The SELECT statement must be expanded before +** name resolution is performed. +** +** If anything goes wrong, an error message is written into pParse. +** The calling function can detect the problem by looking at pParse->nErr +** and/or pParse->db->mallocFailed. +*/ +static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){ + Walker w; + memset(&w, 0, sizeof(w)); + w.xExprCallback = sqlite3ExprWalkNoop; + w.pParse = pParse; + if( pParse->hasCompound ){ + w.xSelectCallback = convertCompoundSelectToSubquery; + sqlite3WalkSelect(&w, pSelect); + } + w.xSelectCallback = selectExpander; + w.xSelectCallback2 = selectPopWith; + sqlite3WalkSelect(&w, pSelect); +} + + +#ifndef SQLITE_OMIT_SUBQUERY +/* +** This is a Walker.xSelectCallback callback for the sqlite3SelectTypeInfo() +** interface. +** +** For each FROM-clause subquery, add Column.zType and Column.zColl +** information to the Table structure that represents the result set +** of that subquery. +** +** The Table structure that represents the result set was constructed +** by selectExpander() but the type and collation information was omitted +** at that point because identifiers had not yet been resolved. This +** routine is called after identifier resolution. +*/ +static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ + Parse *pParse; + int i; + SrcList *pTabList; + struct SrcList_item *pFrom; + + assert( p->selFlags & SF_Resolved ); + assert( (p->selFlags & SF_HasTypeInfo)==0 ); + p->selFlags |= SF_HasTypeInfo; + pParse = pWalker->pParse; + pTabList = p->pSrc; + for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ + Table *pTab = pFrom->pTab; + assert( pTab!=0 ); + if( (pTab->tabFlags & TF_Ephemeral)!=0 ){ + /* A sub-query in the FROM clause of a SELECT */ + Select *pSel = pFrom->pSelect; + if( pSel ){ + while( pSel->pPrior ) pSel = pSel->pPrior; + sqlite3SelectAddColumnTypeAndCollation(pParse, pTab, pSel); + } + } + } +} +#endif + + +/* +** This routine adds datatype and collating sequence information to +** the Table structures of all FROM-clause subqueries in a +** SELECT statement. +** +** Use this routine after name resolution. +*/ +static void sqlite3SelectAddTypeInfo(Parse *pParse, Select *pSelect){ +#ifndef SQLITE_OMIT_SUBQUERY + Walker w; + memset(&w, 0, sizeof(w)); + w.xSelectCallback2 = selectAddSubqueryTypeInfo; + w.xExprCallback = sqlite3ExprWalkNoop; + w.pParse = pParse; + sqlite3WalkSelect(&w, pSelect); +#endif +} + + +/* +** This routine sets up a SELECT statement for processing. The +** following is accomplished: +** +** * VDBE Cursor numbers are assigned to all FROM-clause terms. +** * Ephemeral Table objects are created for all FROM-clause subqueries. +** * ON and USING clauses are shifted into WHERE statements +** * Wildcards "*" and "TABLE.*" in result sets are expanded. +** * Identifiers in expression are matched to tables. +** +** This routine acts recursively on all subqueries within the SELECT. +*/ +SQLITE_PRIVATE void sqlite3SelectPrep( + Parse *pParse, /* The parser context */ + Select *p, /* The SELECT statement being coded. */ + NameContext *pOuterNC /* Name context for container */ +){ + sqlite3 *db; + if( NEVER(p==0) ) return; + db = pParse->db; + if( db->mallocFailed ) return; + if( p->selFlags & SF_HasTypeInfo ) return; + sqlite3SelectExpand(pParse, p); + if( pParse->nErr || db->mallocFailed ) return; + sqlite3ResolveSelectNames(pParse, p, pOuterNC); + if( pParse->nErr || db->mallocFailed ) return; + sqlite3SelectAddTypeInfo(pParse, p); +} + +/* +** Reset the aggregate accumulator. +** +** The aggregate accumulator is a set of memory cells that hold +** intermediate results while calculating an aggregate. This +** routine generates code that stores NULLs in all of those memory +** cells. +*/ +static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ + Vdbe *v = pParse->pVdbe; + int i; + struct AggInfo_func *pFunc; + int nReg = pAggInfo->nFunc + pAggInfo->nColumn; + if( nReg==0 ) return; +#ifdef SQLITE_DEBUG + /* Verify that all AggInfo registers are within the range specified by + ** AggInfo.mnReg..AggInfo.mxReg */ + assert( nReg==pAggInfo->mxReg-pAggInfo->mnReg+1 ); + for(i=0; inColumn; i++){ + assert( pAggInfo->aCol[i].iMem>=pAggInfo->mnReg + && pAggInfo->aCol[i].iMem<=pAggInfo->mxReg ); + } + for(i=0; inFunc; i++){ + assert( pAggInfo->aFunc[i].iMem>=pAggInfo->mnReg + && pAggInfo->aFunc[i].iMem<=pAggInfo->mxReg ); + } +#endif + sqlite3VdbeAddOp3(v, OP_Null, 0, pAggInfo->mnReg, pAggInfo->mxReg); + for(pFunc=pAggInfo->aFunc, i=0; inFunc; i++, pFunc++){ + if( pFunc->iDistinct>=0 ){ + Expr *pE = pFunc->pExpr; + assert( !ExprHasProperty(pE, EP_xIsSelect) ); + if( pE->x.pList==0 || pE->x.pList->nExpr!=1 ){ + sqlite3ErrorMsg(pParse, "DISTINCT aggregates must have exactly one " + "argument"); + pFunc->iDistinct = -1; + }else{ + KeyInfo *pKeyInfo = keyInfoFromExprList(pParse, pE->x.pList, 0, 0); + sqlite3VdbeAddOp4(v, OP_OpenEphemeral, pFunc->iDistinct, 0, 0, + (char*)pKeyInfo, P4_KEYINFO); + } + } + } +} + +/* +** Invoke the OP_AggFinalize opcode for every aggregate function +** in the AggInfo structure. +*/ +static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ + Vdbe *v = pParse->pVdbe; + int i; + struct AggInfo_func *pF; + for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ + ExprList *pList = pF->pExpr->x.pList; + assert( !ExprHasProperty(pF->pExpr, EP_xIsSelect) ); + sqlite3VdbeAddOp2(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0); + sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); + } +} + +/* +** Update the accumulator memory cells for an aggregate based on +** the current cursor position. +*/ +static void updateAccumulator(Parse *pParse, AggInfo *pAggInfo){ + Vdbe *v = pParse->pVdbe; + int i; + int regHit = 0; + int addrHitTest = 0; + struct AggInfo_func *pF; + struct AggInfo_col *pC; + + pAggInfo->directMode = 1; + for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ + int nArg; + int addrNext = 0; + int regAgg; + ExprList *pList = pF->pExpr->x.pList; + assert( !ExprHasProperty(pF->pExpr, EP_xIsSelect) ); + if( pList ){ + nArg = pList->nExpr; + regAgg = sqlite3GetTempRange(pParse, nArg); + sqlite3ExprCodeExprList(pParse, pList, regAgg, 0, SQLITE_ECEL_DUP); + }else{ + nArg = 0; + regAgg = 0; + } + if( pF->iDistinct>=0 ){ + addrNext = sqlite3VdbeMakeLabel(v); + testcase( nArg==0 ); /* Error condition */ + testcase( nArg>1 ); /* Also an error */ + codeDistinct(pParse, pF->iDistinct, addrNext, 1, regAgg); + } + if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){ + CollSeq *pColl = 0; + struct ExprList_item *pItem; + int j; + assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */ + for(j=0, pItem=pList->a; !pColl && jpExpr); + } + if( !pColl ){ + pColl = pParse->db->pDfltColl; + } + if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem; + sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ); + } + sqlite3VdbeAddOp3(v, OP_AggStep0, 0, regAgg, pF->iMem); + sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); + sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3ExprCacheAffinityChange(pParse, regAgg, nArg); + sqlite3ReleaseTempRange(pParse, regAgg, nArg); + if( addrNext ){ + sqlite3VdbeResolveLabel(v, addrNext); + sqlite3ExprCacheClear(pParse); + } + } + + /* Before populating the accumulator registers, clear the column cache. + ** Otherwise, if any of the required column values are already present + ** in registers, sqlite3ExprCode() may use OP_SCopy to copy the value + ** to pC->iMem. But by the time the value is used, the original register + ** may have been used, invalidating the underlying buffer holding the + ** text or blob value. See ticket [883034dcb5]. + ** + ** Another solution would be to change the OP_SCopy used to copy cached + ** values to an OP_Copy. + */ + if( regHit ){ + addrHitTest = sqlite3VdbeAddOp1(v, OP_If, regHit); VdbeCoverage(v); + } + sqlite3ExprCacheClear(pParse); + for(i=0, pC=pAggInfo->aCol; inAccumulator; i++, pC++){ + sqlite3ExprCode(pParse, pC->pExpr, pC->iMem); + } + pAggInfo->directMode = 0; + sqlite3ExprCacheClear(pParse); + if( addrHitTest ){ + sqlite3VdbeJumpHere(v, addrHitTest); + } +} + +/* +** Add a single OP_Explain instruction to the VDBE to explain a simple +** count(*) query ("SELECT count(*) FROM pTab"). +*/ +#ifndef SQLITE_OMIT_EXPLAIN +static void explainSimpleCount( + Parse *pParse, /* Parse context */ + Table *pTab, /* Table being queried */ + Index *pIdx /* Index used to optimize scan, or NULL */ +){ + if( pParse->explain==2 ){ + int bCover = (pIdx!=0 && (HasRowid(pTab) || !IsPrimaryKeyIndex(pIdx))); + char *zEqp = sqlite3MPrintf(pParse->db, "SCAN TABLE %s%s%s", + pTab->zName, + bCover ? " USING COVERING INDEX " : "", + bCover ? pIdx->zName : "" + ); + sqlite3VdbeAddOp4( + pParse->pVdbe, OP_Explain, pParse->iSelectId, 0, 0, zEqp, P4_DYNAMIC + ); + } +} +#else +# define explainSimpleCount(a,b,c) +#endif + +/* +** Generate code for the SELECT statement given in the p argument. +** +** The results are returned according to the SelectDest structure. +** See comments in sqliteInt.h for further information. +** +** This routine returns the number of errors. If any errors are +** encountered, then an appropriate error message is left in +** pParse->zErrMsg. +** +** This routine does NOT free the Select structure passed in. The +** calling function needs to do that. +*/ +SQLITE_PRIVATE int sqlite3Select( + Parse *pParse, /* The parser context */ + Select *p, /* The SELECT statement being coded. */ + SelectDest *pDest /* What to do with the query results */ +){ + int i, j; /* Loop counters */ + WhereInfo *pWInfo; /* Return from sqlite3WhereBegin() */ + Vdbe *v; /* The virtual machine under construction */ + int isAgg; /* True for select lists like "count(*)" */ + ExprList *pEList = 0; /* List of columns to extract. */ + SrcList *pTabList; /* List of tables to select from */ + Expr *pWhere; /* The WHERE clause. May be NULL */ + ExprList *pGroupBy; /* The GROUP BY clause. May be NULL */ + Expr *pHaving; /* The HAVING clause. May be NULL */ + int rc = 1; /* Value to return from this function */ + DistinctCtx sDistinct; /* Info on how to code the DISTINCT keyword */ + SortCtx sSort; /* Info on how to code the ORDER BY clause */ + AggInfo sAggInfo; /* Information used by aggregate queries */ + int iEnd; /* Address of the end of the query */ + sqlite3 *db; /* The database connection */ + +#ifndef SQLITE_OMIT_EXPLAIN + int iRestoreSelectId = pParse->iSelectId; + pParse->iSelectId = pParse->iNextSelectId++; +#endif + + db = pParse->db; + if( p==0 || db->mallocFailed || pParse->nErr ){ + return 1; + } + if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; + memset(&sAggInfo, 0, sizeof(sAggInfo)); +#if SELECTTRACE_ENABLED + pParse->nSelectIndent++; + SELECTTRACE(1,pParse,p, ("begin processing:\n")); + if( sqlite3SelectTrace & 0x100 ){ + sqlite3TreeViewSelect(0, p, 0); + } +#endif + + assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistFifo ); + assert( p->pOrderBy==0 || pDest->eDest!=SRT_Fifo ); + assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistQueue ); + assert( p->pOrderBy==0 || pDest->eDest!=SRT_Queue ); + if( IgnorableOrderby(pDest) ){ + assert(pDest->eDest==SRT_Exists || pDest->eDest==SRT_Union || + pDest->eDest==SRT_Except || pDest->eDest==SRT_Discard || + pDest->eDest==SRT_Queue || pDest->eDest==SRT_DistFifo || + pDest->eDest==SRT_DistQueue || pDest->eDest==SRT_Fifo); + /* If ORDER BY makes no difference in the output then neither does + ** DISTINCT so it can be removed too. */ + sqlite3ExprListDelete(db, p->pOrderBy); + p->pOrderBy = 0; + p->selFlags &= ~SF_Distinct; + } + sqlite3SelectPrep(pParse, p, 0); + memset(&sSort, 0, sizeof(sSort)); + sSort.pOrderBy = p->pOrderBy; + pTabList = p->pSrc; + if( pParse->nErr || db->mallocFailed ){ + goto select_end; + } + assert( p->pEList!=0 ); + isAgg = (p->selFlags & SF_Aggregate)!=0; +#if SELECTTRACE_ENABLED + if( sqlite3SelectTrace & 0x100 ){ + SELECTTRACE(0x100,pParse,p, ("after name resolution:\n")); + sqlite3TreeViewSelect(0, p, 0); + } +#endif + + /* Try to flatten subqueries in the FROM clause up into the main query + */ +#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) + for(i=0; !p->pPrior && inSrc; i++){ + struct SrcList_item *pItem = &pTabList->a[i]; + Select *pSub = pItem->pSelect; + int isAggSub; + Table *pTab = pItem->pTab; + if( pSub==0 ) continue; + + /* Catch mismatch in the declared columns of a view and the number of + ** columns in the SELECT on the RHS */ + if( pTab->nCol!=pSub->pEList->nExpr ){ + sqlite3ErrorMsg(pParse, "expected %d columns for '%s' but got %d", + pTab->nCol, pTab->zName, pSub->pEList->nExpr); + goto select_end; + } + + isAggSub = (pSub->selFlags & SF_Aggregate)!=0; + if( flattenSubquery(pParse, p, i, isAgg, isAggSub) ){ + /* This subquery can be absorbed into its parent. */ + if( isAggSub ){ + isAgg = 1; + p->selFlags |= SF_Aggregate; + } + i = -1; + } + pTabList = p->pSrc; + if( db->mallocFailed ) goto select_end; + if( !IgnorableOrderby(pDest) ){ + sSort.pOrderBy = p->pOrderBy; + } + } +#endif + + /* Get a pointer the VDBE under construction, allocating a new VDBE if one + ** does not already exist */ + v = sqlite3GetVdbe(pParse); + if( v==0 ) goto select_end; + +#ifndef SQLITE_OMIT_COMPOUND_SELECT + /* Handle compound SELECT statements using the separate multiSelect() + ** procedure. + */ + if( p->pPrior ){ + rc = multiSelect(pParse, p, pDest); + explainSetInteger(pParse->iSelectId, iRestoreSelectId); +#if SELECTTRACE_ENABLED + SELECTTRACE(1,pParse,p,("end compound-select processing\n")); + pParse->nSelectIndent--; +#endif + return rc; + } +#endif + + /* Generate code for all sub-queries in the FROM clause + */ +#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) + for(i=0; inSrc; i++){ + struct SrcList_item *pItem = &pTabList->a[i]; + SelectDest dest; + Select *pSub = pItem->pSelect; + if( pSub==0 ) continue; + + /* Sometimes the code for a subquery will be generated more than + ** once, if the subquery is part of the WHERE clause in a LEFT JOIN, + ** for example. In that case, do not regenerate the code to manifest + ** a view or the co-routine to implement a view. The first instance + ** is sufficient, though the subroutine to manifest the view does need + ** to be invoked again. */ + if( pItem->addrFillSub ){ + if( pItem->fg.viaCoroutine==0 ){ + sqlite3VdbeAddOp2(v, OP_Gosub, pItem->regReturn, pItem->addrFillSub); + } + continue; + } + + /* Increment Parse.nHeight by the height of the largest expression + ** tree referred to by this, the parent select. The child select + ** may contain expression trees of at most + ** (SQLITE_MAX_EXPR_DEPTH-Parse.nHeight) height. This is a bit + ** more conservative than necessary, but much easier than enforcing + ** an exact limit. + */ + pParse->nHeight += sqlite3SelectExprHeight(p); + + /* Make copies of constant WHERE-clause terms in the outer query down + ** inside the subquery. This can help the subquery to run more efficiently. + */ + if( (pItem->fg.jointype & JT_OUTER)==0 + && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem->iCursor) + ){ +#if SELECTTRACE_ENABLED + if( sqlite3SelectTrace & 0x100 ){ + SELECTTRACE(0x100,pParse,p,("After WHERE-clause push-down:\n")); + sqlite3TreeViewSelect(0, p, 0); + } +#endif + } + + /* Generate code to implement the subquery + ** + ** The subquery is implemented as a co-routine if all of these are true: + ** (1) The subquery is guaranteed to be the outer loop (so that it + ** does not need to be computed more than once) + ** (2) The ALL keyword after SELECT is omitted. (Applications are + ** allowed to say "SELECT ALL" instead of just "SELECT" to disable + ** the use of co-routines.) + ** (3) Co-routines are not disabled using sqlite3_test_control() + ** with SQLITE_TESTCTRL_OPTIMIZATIONS. + ** + ** TODO: Are there other reasons beside (1) to use a co-routine + ** implementation? + */ + if( i==0 + && (pTabList->nSrc==1 + || (pTabList->a[1].fg.jointype&(JT_LEFT|JT_CROSS))!=0) /* (1) */ + && (p->selFlags & SF_All)==0 /* (2) */ + && OptimizationEnabled(db, SQLITE_SubqCoroutine) /* (3) */ + ){ + /* Implement a co-routine that will return a single row of the result + ** set on each invocation. + */ + int addrTop = sqlite3VdbeCurrentAddr(v)+1; + pItem->regReturn = ++pParse->nMem; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, pItem->regReturn, 0, addrTop); + VdbeComment((v, "%s", pItem->pTab->zName)); + pItem->addrFillSub = addrTop; + sqlite3SelectDestInit(&dest, SRT_Coroutine, pItem->regReturn); + explainSetInteger(pItem->iSelectId, (u8)pParse->iNextSelectId); + sqlite3Select(pParse, pSub, &dest); + pItem->pTab->nRowLogEst = pSub->nSelectRow; + pItem->fg.viaCoroutine = 1; + pItem->regResult = dest.iSdst; + sqlite3VdbeEndCoroutine(v, pItem->regReturn); + sqlite3VdbeJumpHere(v, addrTop-1); + sqlite3ClearTempRegCache(pParse); + }else{ + /* Generate a subroutine that will fill an ephemeral table with + ** the content of this subquery. pItem->addrFillSub will point + ** to the address of the generated subroutine. pItem->regReturn + ** is a register allocated to hold the subroutine return address + */ + int topAddr; + int onceAddr = 0; + int retAddr; + assert( pItem->addrFillSub==0 ); + pItem->regReturn = ++pParse->nMem; + topAddr = sqlite3VdbeAddOp2(v, OP_Integer, 0, pItem->regReturn); + pItem->addrFillSub = topAddr+1; + if( pItem->fg.isCorrelated==0 ){ + /* If the subquery is not correlated and if we are not inside of + ** a trigger, then we only need to compute the value of the subquery + ** once. */ + onceAddr = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); + VdbeComment((v, "materialize \"%s\"", pItem->pTab->zName)); + }else{ + VdbeNoopComment((v, "materialize \"%s\"", pItem->pTab->zName)); + } + sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor); + explainSetInteger(pItem->iSelectId, (u8)pParse->iNextSelectId); + sqlite3Select(pParse, pSub, &dest); + pItem->pTab->nRowLogEst = pSub->nSelectRow; + if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr); + retAddr = sqlite3VdbeAddOp1(v, OP_Return, pItem->regReturn); + VdbeComment((v, "end %s", pItem->pTab->zName)); + sqlite3VdbeChangeP1(v, topAddr, retAddr); + sqlite3ClearTempRegCache(pParse); + } + if( db->mallocFailed ) goto select_end; + pParse->nHeight -= sqlite3SelectExprHeight(p); + } +#endif + + /* Various elements of the SELECT copied into local variables for + ** convenience */ + pEList = p->pEList; + pWhere = p->pWhere; + pGroupBy = p->pGroupBy; + pHaving = p->pHaving; + sDistinct.isTnct = (p->selFlags & SF_Distinct)!=0; + +#if SELECTTRACE_ENABLED + if( sqlite3SelectTrace & 0x400 ){ + SELECTTRACE(0x400,pParse,p,("After all FROM-clause analysis:\n")); + sqlite3TreeViewSelect(0, p, 0); + } +#endif + + /* If the query is DISTINCT with an ORDER BY but is not an aggregate, and + ** if the select-list is the same as the ORDER BY list, then this query + ** can be rewritten as a GROUP BY. In other words, this: + ** + ** SELECT DISTINCT xyz FROM ... ORDER BY xyz + ** + ** is transformed to: + ** + ** SELECT xyz FROM ... GROUP BY xyz ORDER BY xyz + ** + ** The second form is preferred as a single index (or temp-table) may be + ** used for both the ORDER BY and DISTINCT processing. As originally + ** written the query must use a temp-table for at least one of the ORDER + ** BY and DISTINCT, and an index or separate temp-table for the other. + */ + if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct + && sqlite3ExprListCompare(sSort.pOrderBy, pEList, -1)==0 + ){ + p->selFlags &= ~SF_Distinct; + pGroupBy = p->pGroupBy = sqlite3ExprListDup(db, pEList, 0); + /* Notice that even thought SF_Distinct has been cleared from p->selFlags, + ** the sDistinct.isTnct is still set. Hence, isTnct represents the + ** original setting of the SF_Distinct flag, not the current setting */ + assert( sDistinct.isTnct ); + +#if SELECTTRACE_ENABLED + if( sqlite3SelectTrace & 0x400 ){ + SELECTTRACE(0x400,pParse,p,("Transform DISTINCT into GROUP BY:\n")); + sqlite3TreeViewSelect(0, p, 0); + } +#endif + } + + /* If there is an ORDER BY clause, then create an ephemeral index to + ** do the sorting. But this sorting ephemeral index might end up + ** being unused if the data can be extracted in pre-sorted order. + ** If that is the case, then the OP_OpenEphemeral instruction will be + ** changed to an OP_Noop once we figure out that the sorting index is + ** not needed. The sSort.addrSortIndex variable is used to facilitate + ** that change. + */ + if( sSort.pOrderBy ){ + KeyInfo *pKeyInfo; + pKeyInfo = keyInfoFromExprList(pParse, sSort.pOrderBy, 0, pEList->nExpr); + sSort.iECursor = pParse->nTab++; + sSort.addrSortIndex = + sqlite3VdbeAddOp4(v, OP_OpenEphemeral, + sSort.iECursor, sSort.pOrderBy->nExpr+1+pEList->nExpr, 0, + (char*)pKeyInfo, P4_KEYINFO + ); + }else{ + sSort.addrSortIndex = -1; + } + + /* If the output is destined for a temporary table, open that table. + */ + if( pDest->eDest==SRT_EphemTab ){ + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pDest->iSDParm, pEList->nExpr); + } + + /* Set the limiter. + */ + iEnd = sqlite3VdbeMakeLabel(v); + if( (p->selFlags & SF_FixedLimit)==0 ){ + p->nSelectRow = 320; /* 4 billion rows */ + } + computeLimitRegisters(pParse, p, iEnd); + if( p->iLimit==0 && sSort.addrSortIndex>=0 ){ + sqlite3VdbeChangeOpcode(v, sSort.addrSortIndex, OP_SorterOpen); + sSort.sortFlags |= SORTFLAG_UseSorter; + } + + /* Open an ephemeral index to use for the distinct set. + */ + if( p->selFlags & SF_Distinct ){ + sDistinct.tabTnct = pParse->nTab++; + sDistinct.addrTnct = sqlite3VdbeAddOp4(v, OP_OpenEphemeral, + sDistinct.tabTnct, 0, 0, + (char*)keyInfoFromExprList(pParse, p->pEList,0,0), + P4_KEYINFO); + sqlite3VdbeChangeP5(v, BTREE_UNORDERED); + sDistinct.eTnctType = WHERE_DISTINCT_UNORDERED; + }else{ + sDistinct.eTnctType = WHERE_DISTINCT_NOOP; + } + + if( !isAgg && pGroupBy==0 ){ + /* No aggregate functions and no GROUP BY clause */ + u16 wctrlFlags = (sDistinct.isTnct ? WHERE_WANT_DISTINCT : 0); + assert( WHERE_USE_LIMIT==SF_FixedLimit ); + wctrlFlags |= p->selFlags & SF_FixedLimit; + + /* Begin the database scan. */ + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, sSort.pOrderBy, + p->pEList, wctrlFlags, p->nSelectRow); + if( pWInfo==0 ) goto select_end; + if( sqlite3WhereOutputRowCount(pWInfo) < p->nSelectRow ){ + p->nSelectRow = sqlite3WhereOutputRowCount(pWInfo); + } + if( sDistinct.isTnct && sqlite3WhereIsDistinct(pWInfo) ){ + sDistinct.eTnctType = sqlite3WhereIsDistinct(pWInfo); + } + if( sSort.pOrderBy ){ + sSort.nOBSat = sqlite3WhereIsOrdered(pWInfo); + sSort.bOrderedInnerLoop = sqlite3WhereOrderedInnerLoop(pWInfo); + if( sSort.nOBSat==sSort.pOrderBy->nExpr ){ + sSort.pOrderBy = 0; + } + } + + /* If sorting index that was created by a prior OP_OpenEphemeral + ** instruction ended up not being needed, then change the OP_OpenEphemeral + ** into an OP_Noop. + */ + if( sSort.addrSortIndex>=0 && sSort.pOrderBy==0 ){ + sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex); + } + + /* Use the standard inner loop. */ + selectInnerLoop(pParse, p, pEList, -1, &sSort, &sDistinct, pDest, + sqlite3WhereContinueLabel(pWInfo), + sqlite3WhereBreakLabel(pWInfo)); + + /* End the database scan loop. + */ + sqlite3WhereEnd(pWInfo); + }else{ + /* This case when there exist aggregate functions or a GROUP BY clause + ** or both */ + NameContext sNC; /* Name context for processing aggregate information */ + int iAMem; /* First Mem address for storing current GROUP BY */ + int iBMem; /* First Mem address for previous GROUP BY */ + int iUseFlag; /* Mem address holding flag indicating that at least + ** one row of the input to the aggregator has been + ** processed */ + int iAbortFlag; /* Mem address which causes query abort if positive */ + int groupBySort; /* Rows come from source in GROUP BY order */ + int addrEnd; /* End of processing for this SELECT */ + int sortPTab = 0; /* Pseudotable used to decode sorting results */ + int sortOut = 0; /* Output register from the sorter */ + int orderByGrp = 0; /* True if the GROUP BY and ORDER BY are the same */ + + /* Remove any and all aliases between the result set and the + ** GROUP BY clause. + */ + if( pGroupBy ){ + int k; /* Loop counter */ + struct ExprList_item *pItem; /* For looping over expression in a list */ + + for(k=p->pEList->nExpr, pItem=p->pEList->a; k>0; k--, pItem++){ + pItem->u.x.iAlias = 0; + } + for(k=pGroupBy->nExpr, pItem=pGroupBy->a; k>0; k--, pItem++){ + pItem->u.x.iAlias = 0; + } + assert( 66==sqlite3LogEst(100) ); + if( p->nSelectRow>66 ) p->nSelectRow = 66; + }else{ + assert( 0==sqlite3LogEst(1) ); + p->nSelectRow = 0; + } + + /* If there is both a GROUP BY and an ORDER BY clause and they are + ** identical, then it may be possible to disable the ORDER BY clause + ** on the grounds that the GROUP BY will cause elements to come out + ** in the correct order. It also may not - the GROUP BY might use a + ** database index that causes rows to be grouped together as required + ** but not actually sorted. Either way, record the fact that the + ** ORDER BY and GROUP BY clauses are the same by setting the orderByGrp + ** variable. */ + if( sqlite3ExprListCompare(pGroupBy, sSort.pOrderBy, -1)==0 ){ + orderByGrp = 1; + } + + /* Create a label to jump to when we want to abort the query */ + addrEnd = sqlite3VdbeMakeLabel(v); + + /* Convert TK_COLUMN nodes into TK_AGG_COLUMN and make entries in + ** sAggInfo for all TK_AGG_FUNCTION nodes in expressions of the + ** SELECT statement. + */ + memset(&sNC, 0, sizeof(sNC)); + sNC.pParse = pParse; + sNC.pSrcList = pTabList; + sNC.pAggInfo = &sAggInfo; + sAggInfo.mnReg = pParse->nMem+1; + sAggInfo.nSortingColumn = pGroupBy ? pGroupBy->nExpr : 0; + sAggInfo.pGroupBy = pGroupBy; + sqlite3ExprAnalyzeAggList(&sNC, pEList); + sqlite3ExprAnalyzeAggList(&sNC, sSort.pOrderBy); + if( pHaving ){ + sqlite3ExprAnalyzeAggregates(&sNC, pHaving); + } + sAggInfo.nAccumulator = sAggInfo.nColumn; + for(i=0; ix.pList); + sNC.ncFlags &= ~NC_InAggFunc; + } + sAggInfo.mxReg = pParse->nMem; + if( db->mallocFailed ) goto select_end; + + /* Processing for aggregates with GROUP BY is very different and + ** much more complex than aggregates without a GROUP BY. + */ + if( pGroupBy ){ + KeyInfo *pKeyInfo; /* Keying information for the group by clause */ + int addr1; /* A-vs-B comparision jump */ + int addrOutputRow; /* Start of subroutine that outputs a result row */ + int regOutputRow; /* Return address register for output subroutine */ + int addrSetAbort; /* Set the abort flag and return */ + int addrTopOfLoop; /* Top of the input loop */ + int addrSortingIdx; /* The OP_OpenEphemeral for the sorting index */ + int addrReset; /* Subroutine for resetting the accumulator */ + int regReset; /* Return address register for reset subroutine */ + + /* If there is a GROUP BY clause we might need a sorting index to + ** implement it. Allocate that sorting index now. If it turns out + ** that we do not need it after all, the OP_SorterOpen instruction + ** will be converted into a Noop. + */ + sAggInfo.sortingIdx = pParse->nTab++; + pKeyInfo = keyInfoFromExprList(pParse, pGroupBy, 0, sAggInfo.nColumn); + addrSortingIdx = sqlite3VdbeAddOp4(v, OP_SorterOpen, + sAggInfo.sortingIdx, sAggInfo.nSortingColumn, + 0, (char*)pKeyInfo, P4_KEYINFO); + + /* Initialize memory locations used by GROUP BY aggregate processing + */ + iUseFlag = ++pParse->nMem; + iAbortFlag = ++pParse->nMem; + regOutputRow = ++pParse->nMem; + addrOutputRow = sqlite3VdbeMakeLabel(v); + regReset = ++pParse->nMem; + addrReset = sqlite3VdbeMakeLabel(v); + iAMem = pParse->nMem + 1; + pParse->nMem += pGroupBy->nExpr; + iBMem = pParse->nMem + 1; + pParse->nMem += pGroupBy->nExpr; + sqlite3VdbeAddOp2(v, OP_Integer, 0, iAbortFlag); + VdbeComment((v, "clear abort flag")); + sqlite3VdbeAddOp2(v, OP_Integer, 0, iUseFlag); + VdbeComment((v, "indicate accumulator empty")); + sqlite3VdbeAddOp3(v, OP_Null, 0, iAMem, iAMem+pGroupBy->nExpr-1); + + /* Begin a loop that will extract all source rows in GROUP BY order. + ** This might involve two separate loops with an OP_Sort in between, or + ** it might be a single loop that uses an index to extract information + ** in the right order to begin with. + */ + sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, 0, + WHERE_GROUPBY | (orderByGrp ? WHERE_SORTBYGROUP : 0), 0 + ); + if( pWInfo==0 ) goto select_end; + if( sqlite3WhereIsOrdered(pWInfo)==pGroupBy->nExpr ){ + /* The optimizer is able to deliver rows in group by order so + ** we do not have to sort. The OP_OpenEphemeral table will be + ** cancelled later because we still need to use the pKeyInfo + */ + groupBySort = 0; + }else{ + /* Rows are coming out in undetermined order. We have to push + ** each row into a sorting index, terminate the first loop, + ** then loop over the sorting index in order to get the output + ** in sorted order + */ + int regBase; + int regRecord; + int nCol; + int nGroupBy; + + explainTempTable(pParse, + (sDistinct.isTnct && (p->selFlags&SF_Distinct)==0) ? + "DISTINCT" : "GROUP BY"); + + groupBySort = 1; + nGroupBy = pGroupBy->nExpr; + nCol = nGroupBy; + j = nGroupBy; + for(i=0; i=j ){ + nCol++; + j++; + } + } + regBase = sqlite3GetTempRange(pParse, nCol); + sqlite3ExprCacheClear(pParse); + sqlite3ExprCodeExprList(pParse, pGroupBy, regBase, 0, 0); + j = nGroupBy; + for(i=0; iiSorterColumn>=j ){ + int r1 = j + regBase; + sqlite3ExprCodeGetColumnToReg(pParse, + pCol->pTab, pCol->iColumn, pCol->iTable, r1); + j++; + } + } + regRecord = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regRecord); + sqlite3VdbeAddOp2(v, OP_SorterInsert, sAggInfo.sortingIdx, regRecord); + sqlite3ReleaseTempReg(pParse, regRecord); + sqlite3ReleaseTempRange(pParse, regBase, nCol); + sqlite3WhereEnd(pWInfo); + sAggInfo.sortingIdxPTab = sortPTab = pParse->nTab++; + sortOut = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_OpenPseudo, sortPTab, sortOut, nCol); + sqlite3VdbeAddOp2(v, OP_SorterSort, sAggInfo.sortingIdx, addrEnd); + VdbeComment((v, "GROUP BY sort")); VdbeCoverage(v); + sAggInfo.useSortingIdx = 1; + sqlite3ExprCacheClear(pParse); + + } + + /* If the index or temporary table used by the GROUP BY sort + ** will naturally deliver rows in the order required by the ORDER BY + ** clause, cancel the ephemeral table open coded earlier. + ** + ** This is an optimization - the correct answer should result regardless. + ** Use the SQLITE_GroupByOrder flag with SQLITE_TESTCTRL_OPTIMIZER to + ** disable this optimization for testing purposes. */ + if( orderByGrp && OptimizationEnabled(db, SQLITE_GroupByOrder) + && (groupBySort || sqlite3WhereIsSorted(pWInfo)) + ){ + sSort.pOrderBy = 0; + sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex); + } + + /* Evaluate the current GROUP BY terms and store in b0, b1, b2... + ** (b0 is memory location iBMem+0, b1 is iBMem+1, and so forth) + ** Then compare the current GROUP BY terms against the GROUP BY terms + ** from the previous row currently stored in a0, a1, a2... + */ + addrTopOfLoop = sqlite3VdbeCurrentAddr(v); + sqlite3ExprCacheClear(pParse); + if( groupBySort ){ + sqlite3VdbeAddOp3(v, OP_SorterData, sAggInfo.sortingIdx, + sortOut, sortPTab); + } + for(j=0; jnExpr; j++){ + if( groupBySort ){ + sqlite3VdbeAddOp3(v, OP_Column, sortPTab, j, iBMem+j); + }else{ + sAggInfo.directMode = 1; + sqlite3ExprCode(pParse, pGroupBy->a[j].pExpr, iBMem+j); + } + } + sqlite3VdbeAddOp4(v, OP_Compare, iAMem, iBMem, pGroupBy->nExpr, + (char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO); + addr1 = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp3(v, OP_Jump, addr1+1, 0, addr1+1); VdbeCoverage(v); + + /* Generate code that runs whenever the GROUP BY changes. + ** Changes in the GROUP BY are detected by the previous code + ** block. If there were no changes, this block is skipped. + ** + ** This code copies current group by terms in b0,b1,b2,... + ** over to a0,a1,a2. It then calls the output subroutine + ** and resets the aggregate accumulator registers in preparation + ** for the next GROUP BY batch. + */ + sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr); + sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); + VdbeComment((v, "output one row")); + sqlite3VdbeAddOp2(v, OP_IfPos, iAbortFlag, addrEnd); VdbeCoverage(v); + VdbeComment((v, "check abort flag")); + sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); + VdbeComment((v, "reset accumulator")); + + /* Update the aggregate accumulators based on the content of + ** the current row + */ + sqlite3VdbeJumpHere(v, addr1); + updateAccumulator(pParse, &sAggInfo); + sqlite3VdbeAddOp2(v, OP_Integer, 1, iUseFlag); + VdbeComment((v, "indicate data in accumulator")); + + /* End of the loop + */ + if( groupBySort ){ + sqlite3VdbeAddOp2(v, OP_SorterNext, sAggInfo.sortingIdx, addrTopOfLoop); + VdbeCoverage(v); + }else{ + sqlite3WhereEnd(pWInfo); + sqlite3VdbeChangeToNoop(v, addrSortingIdx); + } + + /* Output the final row of result + */ + sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); + VdbeComment((v, "output final row")); + + /* Jump over the subroutines + */ + sqlite3VdbeGoto(v, addrEnd); + + /* Generate a subroutine that outputs a single row of the result + ** set. This subroutine first looks at the iUseFlag. If iUseFlag + ** is less than or equal to zero, the subroutine is a no-op. If + ** the processing calls for the query to abort, this subroutine + ** increments the iAbortFlag memory location before returning in + ** order to signal the caller to abort. + */ + addrSetAbort = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp2(v, OP_Integer, 1, iAbortFlag); + VdbeComment((v, "set abort flag")); + sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); + sqlite3VdbeResolveLabel(v, addrOutputRow); + addrOutputRow = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp2(v, OP_IfPos, iUseFlag, addrOutputRow+2); + VdbeCoverage(v); + VdbeComment((v, "Groupby result generator entry point")); + sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); + finalizeAggFunctions(pParse, &sAggInfo); + sqlite3ExprIfFalse(pParse, pHaving, addrOutputRow+1, SQLITE_JUMPIFNULL); + selectInnerLoop(pParse, p, p->pEList, -1, &sSort, + &sDistinct, pDest, + addrOutputRow+1, addrSetAbort); + sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); + VdbeComment((v, "end groupby result generator")); + + /* Generate a subroutine that will reset the group-by accumulator + */ + sqlite3VdbeResolveLabel(v, addrReset); + resetAccumulator(pParse, &sAggInfo); + sqlite3VdbeAddOp1(v, OP_Return, regReset); + + } /* endif pGroupBy. Begin aggregate queries without GROUP BY: */ + else { + ExprList *pDel = 0; +#ifndef SQLITE_OMIT_BTREECOUNT + Table *pTab; + if( (pTab = isSimpleCount(p, &sAggInfo))!=0 ){ + /* If isSimpleCount() returns a pointer to a Table structure, then + ** the SQL statement is of the form: + ** + ** SELECT count(*) FROM + ** + ** where the Table structure returned represents table . + ** + ** This statement is so common that it is optimized specially. The + ** OP_Count instruction is executed either on the intkey table that + ** contains the data for table or on one of its indexes. It + ** is better to execute the op on an index, as indexes are almost + ** always spread across less pages than their corresponding tables. + */ + const int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + const int iCsr = pParse->nTab++; /* Cursor to scan b-tree */ + Index *pIdx; /* Iterator variable */ + KeyInfo *pKeyInfo = 0; /* Keyinfo for scanned index */ + Index *pBest = 0; /* Best index found so far */ + int iRoot = pTab->tnum; /* Root page of scanned b-tree */ + + sqlite3CodeVerifySchema(pParse, iDb); + sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); + + /* Search for the index that has the lowest scan cost. + ** + ** (2011-04-15) Do not do a full scan of an unordered index. + ** + ** (2013-10-03) Do not count the entries in a partial index. + ** + ** In practice the KeyInfo structure will not be used. It is only + ** passed to keep OP_OpenRead happy. + */ + if( !HasRowid(pTab) ) pBest = sqlite3PrimaryKeyIndex(pTab); + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( pIdx->bUnordered==0 + && pIdx->szIdxRowszTabRow + && pIdx->pPartIdxWhere==0 + && (!pBest || pIdx->szIdxRowszIdxRow) + ){ + pBest = pIdx; + } + } + if( pBest ){ + iRoot = pBest->tnum; + pKeyInfo = sqlite3KeyInfoOfIndex(pParse, pBest); + } + + /* Open a read-only cursor, execute the OP_Count, close the cursor. */ + sqlite3VdbeAddOp4Int(v, OP_OpenRead, iCsr, iRoot, iDb, 1); + if( pKeyInfo ){ + sqlite3VdbeChangeP4(v, -1, (char *)pKeyInfo, P4_KEYINFO); + } + sqlite3VdbeAddOp2(v, OP_Count, iCsr, sAggInfo.aFunc[0].iMem); + sqlite3VdbeAddOp1(v, OP_Close, iCsr); + explainSimpleCount(pParse, pTab, pBest); + }else +#endif /* SQLITE_OMIT_BTREECOUNT */ + { + /* Check if the query is of one of the following forms: + ** + ** SELECT min(x) FROM ... + ** SELECT max(x) FROM ... + ** + ** If it is, then ask the code in where.c to attempt to sort results + ** as if there was an "ORDER ON x" or "ORDER ON x DESC" clause. + ** If where.c is able to produce results sorted in this order, then + ** add vdbe code to break out of the processing loop after the + ** first iteration (since the first iteration of the loop is + ** guaranteed to operate on the row with the minimum or maximum + ** value of x, the only row required). + ** + ** A special flag must be passed to sqlite3WhereBegin() to slightly + ** modify behavior as follows: + ** + ** + If the query is a "SELECT min(x)", then the loop coded by + ** where.c should not iterate over any values with a NULL value + ** for x. + ** + ** + The optimizer code in where.c (the thing that decides which + ** index or indices to use) should place a different priority on + ** satisfying the 'ORDER BY' clause than it does in other cases. + ** Refer to code and comments in where.c for details. + */ + ExprList *pMinMax = 0; + u8 flag = WHERE_ORDERBY_NORMAL; + + assert( p->pGroupBy==0 ); + assert( flag==0 ); + if( p->pHaving==0 ){ + flag = minMaxQuery(&sAggInfo, &pMinMax); + } + assert( flag==0 || (pMinMax!=0 && pMinMax->nExpr==1) ); + + if( flag ){ + pMinMax = sqlite3ExprListDup(db, pMinMax, 0); + pDel = pMinMax; + assert( db->mallocFailed || pMinMax!=0 ); + if( !db->mallocFailed ){ + pMinMax->a[0].sortOrder = flag!=WHERE_ORDERBY_MIN ?1:0; + pMinMax->a[0].pExpr->op = TK_COLUMN; + } + } + + /* This case runs if the aggregate has no GROUP BY clause. The + ** processing is much simpler since there is only a single row + ** of output. + */ + resetAccumulator(pParse, &sAggInfo); + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMax, 0,flag,0); + if( pWInfo==0 ){ + sqlite3ExprListDelete(db, pDel); + goto select_end; + } + updateAccumulator(pParse, &sAggInfo); + assert( pMinMax==0 || pMinMax->nExpr==1 ); + if( sqlite3WhereIsOrdered(pWInfo)>0 ){ + sqlite3VdbeGoto(v, sqlite3WhereBreakLabel(pWInfo)); + VdbeComment((v, "%s() by index", + (flag==WHERE_ORDERBY_MIN?"min":"max"))); + } + sqlite3WhereEnd(pWInfo); + finalizeAggFunctions(pParse, &sAggInfo); + } + + sSort.pOrderBy = 0; + sqlite3ExprIfFalse(pParse, pHaving, addrEnd, SQLITE_JUMPIFNULL); + selectInnerLoop(pParse, p, p->pEList, -1, 0, 0, + pDest, addrEnd, addrEnd); + sqlite3ExprListDelete(db, pDel); + } + sqlite3VdbeResolveLabel(v, addrEnd); + + } /* endif aggregate query */ + + if( sDistinct.eTnctType==WHERE_DISTINCT_UNORDERED ){ + explainTempTable(pParse, "DISTINCT"); + } + + /* If there is an ORDER BY clause, then we need to sort the results + ** and send them to the callback one by one. + */ + if( sSort.pOrderBy ){ + explainTempTable(pParse, + sSort.nOBSat>0 ? "RIGHT PART OF ORDER BY":"ORDER BY"); + generateSortTail(pParse, p, &sSort, pEList->nExpr, pDest); + } + + /* Jump here to skip this query + */ + sqlite3VdbeResolveLabel(v, iEnd); + + /* The SELECT has been coded. If there is an error in the Parse structure, + ** set the return code to 1. Otherwise 0. */ + rc = (pParse->nErr>0); + + /* Control jumps to here if an error is encountered above, or upon + ** successful coding of the SELECT. + */ +select_end: + explainSetInteger(pParse->iSelectId, iRestoreSelectId); + + /* Identify column names if results of the SELECT are to be output. + */ + if( rc==SQLITE_OK && pDest->eDest==SRT_Output ){ + generateColumnNames(pParse, pTabList, pEList); + } + + sqlite3DbFree(db, sAggInfo.aCol); + sqlite3DbFree(db, sAggInfo.aFunc); +#if SELECTTRACE_ENABLED + SELECTTRACE(1,pParse,p,("end processing\n")); + pParse->nSelectIndent--; +#endif + return rc; +} + +/************** End of select.c **********************************************/ +/************** Begin file table.c *******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the sqlite3_get_table() and sqlite3_free_table() +** interface routines. These are just wrappers around the main +** interface routine of sqlite3_exec(). +** +** These routines are in a separate files so that they will not be linked +** if they are not used. +*/ +/* #include "sqliteInt.h" */ + +#ifndef SQLITE_OMIT_GET_TABLE + +/* +** This structure is used to pass data from sqlite3_get_table() through +** to the callback function is uses to build the result. +*/ +typedef struct TabResult { + char **azResult; /* Accumulated output */ + char *zErrMsg; /* Error message text, if an error occurs */ + u32 nAlloc; /* Slots allocated for azResult[] */ + u32 nRow; /* Number of rows in the result */ + u32 nColumn; /* Number of columns in the result */ + u32 nData; /* Slots used in azResult[]. (nRow+1)*nColumn */ + int rc; /* Return code from sqlite3_exec() */ +} TabResult; + +/* +** This routine is called once for each row in the result table. Its job +** is to fill in the TabResult structure appropriately, allocating new +** memory as necessary. +*/ +static int sqlite3_get_table_cb(void *pArg, int nCol, char **argv, char **colv){ + TabResult *p = (TabResult*)pArg; /* Result accumulator */ + int need; /* Slots needed in p->azResult[] */ + int i; /* Loop counter */ + char *z; /* A single column of result */ + + /* Make sure there is enough space in p->azResult to hold everything + ** we need to remember from this invocation of the callback. + */ + if( p->nRow==0 && argv!=0 ){ + need = nCol*2; + }else{ + need = nCol; + } + if( p->nData + need > p->nAlloc ){ + char **azNew; + p->nAlloc = p->nAlloc*2 + need; + azNew = sqlite3_realloc64( p->azResult, sizeof(char*)*p->nAlloc ); + if( azNew==0 ) goto malloc_failed; + p->azResult = azNew; + } + + /* If this is the first row, then generate an extra row containing + ** the names of all columns. + */ + if( p->nRow==0 ){ + p->nColumn = nCol; + for(i=0; iazResult[p->nData++] = z; + } + }else if( (int)p->nColumn!=nCol ){ + sqlite3_free(p->zErrMsg); + p->zErrMsg = sqlite3_mprintf( + "sqlite3_get_table() called with two or more incompatible queries" + ); + p->rc = SQLITE_ERROR; + return 1; + } + + /* Copy over the row data + */ + if( argv!=0 ){ + for(i=0; iazResult[p->nData++] = z; + } + p->nRow++; + } + return 0; + +malloc_failed: + p->rc = SQLITE_NOMEM_BKPT; + return 1; +} + +/* +** Query the database. But instead of invoking a callback for each row, +** malloc() for space to hold the result and return the entire results +** at the conclusion of the call. +** +** The result that is written to ***pazResult is held in memory obtained +** from malloc(). But the caller cannot free this memory directly. +** Instead, the entire table should be passed to sqlite3_free_table() when +** the calling procedure is finished using it. +*/ +SQLITE_API int sqlite3_get_table( + sqlite3 *db, /* The database on which the SQL executes */ + const char *zSql, /* The SQL to be executed */ + char ***pazResult, /* Write the result table here */ + int *pnRow, /* Write the number of rows in the result here */ + int *pnColumn, /* Write the number of columns of result here */ + char **pzErrMsg /* Write error messages here */ +){ + int rc; + TabResult res; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) || pazResult==0 ) return SQLITE_MISUSE_BKPT; +#endif + *pazResult = 0; + if( pnColumn ) *pnColumn = 0; + if( pnRow ) *pnRow = 0; + if( pzErrMsg ) *pzErrMsg = 0; + res.zErrMsg = 0; + res.nRow = 0; + res.nColumn = 0; + res.nData = 1; + res.nAlloc = 20; + res.rc = SQLITE_OK; + res.azResult = sqlite3_malloc64(sizeof(char*)*res.nAlloc ); + if( res.azResult==0 ){ + db->errCode = SQLITE_NOMEM; + return SQLITE_NOMEM_BKPT; + } + res.azResult[0] = 0; + rc = sqlite3_exec(db, zSql, sqlite3_get_table_cb, &res, pzErrMsg); + assert( sizeof(res.azResult[0])>= sizeof(res.nData) ); + res.azResult[0] = SQLITE_INT_TO_PTR(res.nData); + if( (rc&0xff)==SQLITE_ABORT ){ + sqlite3_free_table(&res.azResult[1]); + if( res.zErrMsg ){ + if( pzErrMsg ){ + sqlite3_free(*pzErrMsg); + *pzErrMsg = sqlite3_mprintf("%s",res.zErrMsg); + } + sqlite3_free(res.zErrMsg); + } + db->errCode = res.rc; /* Assume 32-bit assignment is atomic */ + return res.rc; + } + sqlite3_free(res.zErrMsg); + if( rc!=SQLITE_OK ){ + sqlite3_free_table(&res.azResult[1]); + return rc; + } + if( res.nAlloc>res.nData ){ + char **azNew; + azNew = sqlite3_realloc64( res.azResult, sizeof(char*)*res.nData ); + if( azNew==0 ){ + sqlite3_free_table(&res.azResult[1]); + db->errCode = SQLITE_NOMEM; + return SQLITE_NOMEM_BKPT; + } + res.azResult = azNew; + } + *pazResult = &res.azResult[1]; + if( pnColumn ) *pnColumn = res.nColumn; + if( pnRow ) *pnRow = res.nRow; + return rc; +} + +/* +** This routine frees the space the sqlite3_get_table() malloced. +*/ +SQLITE_API void sqlite3_free_table( + char **azResult /* Result returned from sqlite3_get_table() */ +){ + if( azResult ){ + int i, n; + azResult--; + assert( azResult!=0 ); + n = SQLITE_PTR_TO_INT(azResult[0]); + for(i=1; ipNext; + + sqlite3ExprDelete(db, pTmp->pWhere); + sqlite3ExprListDelete(db, pTmp->pExprList); + sqlite3SelectDelete(db, pTmp->pSelect); + sqlite3IdListDelete(db, pTmp->pIdList); + + sqlite3DbFree(db, pTmp); + } +} + +/* +** Given table pTab, return a list of all the triggers attached to +** the table. The list is connected by Trigger.pNext pointers. +** +** All of the triggers on pTab that are in the same database as pTab +** are already attached to pTab->pTrigger. But there might be additional +** triggers on pTab in the TEMP schema. This routine prepends all +** TEMP triggers on pTab to the beginning of the pTab->pTrigger list +** and returns the combined list. +** +** To state it another way: This routine returns a list of all triggers +** that fire off of pTab. The list will include any TEMP triggers on +** pTab as well as the triggers lised in pTab->pTrigger. +*/ +SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){ + Schema * const pTmpSchema = pParse->db->aDb[1].pSchema; + Trigger *pList = 0; /* List of triggers to return */ + + if( pParse->disableTriggers ){ + return 0; + } + + if( pTmpSchema!=pTab->pSchema ){ + HashElem *p; + assert( sqlite3SchemaMutexHeld(pParse->db, 0, pTmpSchema) ); + for(p=sqliteHashFirst(&pTmpSchema->trigHash); p; p=sqliteHashNext(p)){ + Trigger *pTrig = (Trigger *)sqliteHashData(p); + if( pTrig->pTabSchema==pTab->pSchema + && 0==sqlite3StrICmp(pTrig->table, pTab->zName) + ){ + pTrig->pNext = (pList ? pList : pTab->pTrigger); + pList = pTrig; + } + } + } + + return (pList ? pList : pTab->pTrigger); +} + +/* +** This is called by the parser when it sees a CREATE TRIGGER statement +** up to the point of the BEGIN before the trigger actions. A Trigger +** structure is generated based on the information available and stored +** in pParse->pNewTrigger. After the trigger actions have been parsed, the +** sqlite3FinishTrigger() function is called to complete the trigger +** construction process. +*/ +SQLITE_PRIVATE void sqlite3BeginTrigger( + Parse *pParse, /* The parse context of the CREATE TRIGGER statement */ + Token *pName1, /* The name of the trigger */ + Token *pName2, /* The name of the trigger */ + int tr_tm, /* One of TK_BEFORE, TK_AFTER, TK_INSTEAD */ + int op, /* One of TK_INSERT, TK_UPDATE, TK_DELETE */ + IdList *pColumns, /* column list if this is an UPDATE OF trigger */ + SrcList *pTableName,/* The name of the table/view the trigger applies to */ + Expr *pWhen, /* WHEN clause */ + int isTemp, /* True if the TEMPORARY keyword is present */ + int noErr /* Suppress errors if the trigger already exists */ +){ + Trigger *pTrigger = 0; /* The new trigger */ + Table *pTab; /* Table that the trigger fires off of */ + char *zName = 0; /* Name of the trigger */ + sqlite3 *db = pParse->db; /* The database connection */ + int iDb; /* The database to store the trigger in */ + Token *pName; /* The unqualified db name */ + DbFixer sFix; /* State vector for the DB fixer */ + + assert( pName1!=0 ); /* pName1->z might be NULL, but not pName1 itself */ + assert( pName2!=0 ); + assert( op==TK_INSERT || op==TK_UPDATE || op==TK_DELETE ); + assert( op>0 && op<0xff ); + if( isTemp ){ + /* If TEMP was specified, then the trigger name may not be qualified. */ + if( pName2->n>0 ){ + sqlite3ErrorMsg(pParse, "temporary trigger may not have qualified name"); + goto trigger_cleanup; + } + iDb = 1; + pName = pName1; + }else{ + /* Figure out the db that the trigger will be created in */ + iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName); + if( iDb<0 ){ + goto trigger_cleanup; + } + } + if( !pTableName || db->mallocFailed ){ + goto trigger_cleanup; + } + + /* A long-standing parser bug is that this syntax was allowed: + ** + ** CREATE TRIGGER attached.demo AFTER INSERT ON attached.tab .... + ** ^^^^^^^^ + ** + ** To maintain backwards compatibility, ignore the database + ** name on pTableName if we are reparsing out of SQLITE_MASTER. + */ + if( db->init.busy && iDb!=1 ){ + sqlite3DbFree(db, pTableName->a[0].zDatabase); + pTableName->a[0].zDatabase = 0; + } + + /* If the trigger name was unqualified, and the table is a temp table, + ** then set iDb to 1 to create the trigger in the temporary database. + ** If sqlite3SrcListLookup() returns 0, indicating the table does not + ** exist, the error is caught by the block below. + */ + pTab = sqlite3SrcListLookup(pParse, pTableName); + if( db->init.busy==0 && pName2->n==0 && pTab + && pTab->pSchema==db->aDb[1].pSchema ){ + iDb = 1; + } + + /* Ensure the table name matches database name and that the table exists */ + if( db->mallocFailed ) goto trigger_cleanup; + assert( pTableName->nSrc==1 ); + sqlite3FixInit(&sFix, pParse, iDb, "trigger", pName); + if( sqlite3FixSrcList(&sFix, pTableName) ){ + goto trigger_cleanup; + } + pTab = sqlite3SrcListLookup(pParse, pTableName); + if( !pTab ){ + /* The table does not exist. */ + if( db->init.iDb==1 ){ + /* Ticket #3810. + ** Normally, whenever a table is dropped, all associated triggers are + ** dropped too. But if a TEMP trigger is created on a non-TEMP table + ** and the table is dropped by a different database connection, the + ** trigger is not visible to the database connection that does the + ** drop so the trigger cannot be dropped. This results in an + ** "orphaned trigger" - a trigger whose associated table is missing. + */ + db->init.orphanTrigger = 1; + } + goto trigger_cleanup; + } + if( IsVirtual(pTab) ){ + sqlite3ErrorMsg(pParse, "cannot create triggers on virtual tables"); + goto trigger_cleanup; + } + + /* Check that the trigger name is not reserved and that no trigger of the + ** specified name exists */ + zName = sqlite3NameFromToken(db, pName); + if( !zName || SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ + goto trigger_cleanup; + } + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + if( sqlite3HashFind(&(db->aDb[iDb].pSchema->trigHash),zName) ){ + if( !noErr ){ + sqlite3ErrorMsg(pParse, "trigger %T already exists", pName); + }else{ + assert( !db->init.busy ); + sqlite3CodeVerifySchema(pParse, iDb); + } + goto trigger_cleanup; + } + + /* Do not create a trigger on a system table */ + if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 ){ + sqlite3ErrorMsg(pParse, "cannot create trigger on system table"); + goto trigger_cleanup; + } + + /* INSTEAD of triggers are only for views and views only support INSTEAD + ** of triggers. + */ + if( pTab->pSelect && tr_tm!=TK_INSTEAD ){ + sqlite3ErrorMsg(pParse, "cannot create %s trigger on view: %S", + (tr_tm == TK_BEFORE)?"BEFORE":"AFTER", pTableName, 0); + goto trigger_cleanup; + } + if( !pTab->pSelect && tr_tm==TK_INSTEAD ){ + sqlite3ErrorMsg(pParse, "cannot create INSTEAD OF" + " trigger on table: %S", pTableName, 0); + goto trigger_cleanup; + } + +#ifndef SQLITE_OMIT_AUTHORIZATION + { + int iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema); + int code = SQLITE_CREATE_TRIGGER; + const char *zDb = db->aDb[iTabDb].zDbSName; + const char *zDbTrig = isTemp ? db->aDb[1].zDbSName : zDb; + if( iTabDb==1 || isTemp ) code = SQLITE_CREATE_TEMP_TRIGGER; + if( sqlite3AuthCheck(pParse, code, zName, pTab->zName, zDbTrig) ){ + goto trigger_cleanup; + } + if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(iTabDb),0,zDb)){ + goto trigger_cleanup; + } + } +#endif + + /* INSTEAD OF triggers can only appear on views and BEFORE triggers + ** cannot appear on views. So we might as well translate every + ** INSTEAD OF trigger into a BEFORE trigger. It simplifies code + ** elsewhere. + */ + if (tr_tm == TK_INSTEAD){ + tr_tm = TK_BEFORE; + } + + /* Build the Trigger object */ + pTrigger = (Trigger*)sqlite3DbMallocZero(db, sizeof(Trigger)); + if( pTrigger==0 ) goto trigger_cleanup; + pTrigger->zName = zName; + zName = 0; + pTrigger->table = sqlite3DbStrDup(db, pTableName->a[0].zName); + pTrigger->pSchema = db->aDb[iDb].pSchema; + pTrigger->pTabSchema = pTab->pSchema; + pTrigger->op = (u8)op; + pTrigger->tr_tm = tr_tm==TK_BEFORE ? TRIGGER_BEFORE : TRIGGER_AFTER; + pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE); + pTrigger->pColumns = sqlite3IdListDup(db, pColumns); + assert( pParse->pNewTrigger==0 ); + pParse->pNewTrigger = pTrigger; + +trigger_cleanup: + sqlite3DbFree(db, zName); + sqlite3SrcListDelete(db, pTableName); + sqlite3IdListDelete(db, pColumns); + sqlite3ExprDelete(db, pWhen); + if( !pParse->pNewTrigger ){ + sqlite3DeleteTrigger(db, pTrigger); + }else{ + assert( pParse->pNewTrigger==pTrigger ); + } +} + +/* +** This routine is called after all of the trigger actions have been parsed +** in order to complete the process of building the trigger. +*/ +SQLITE_PRIVATE void sqlite3FinishTrigger( + Parse *pParse, /* Parser context */ + TriggerStep *pStepList, /* The triggered program */ + Token *pAll /* Token that describes the complete CREATE TRIGGER */ +){ + Trigger *pTrig = pParse->pNewTrigger; /* Trigger being finished */ + char *zName; /* Name of trigger */ + sqlite3 *db = pParse->db; /* The database */ + DbFixer sFix; /* Fixer object */ + int iDb; /* Database containing the trigger */ + Token nameToken; /* Trigger name for error reporting */ + + pParse->pNewTrigger = 0; + if( NEVER(pParse->nErr) || !pTrig ) goto triggerfinish_cleanup; + zName = pTrig->zName; + iDb = sqlite3SchemaToIndex(pParse->db, pTrig->pSchema); + pTrig->step_list = pStepList; + while( pStepList ){ + pStepList->pTrig = pTrig; + pStepList = pStepList->pNext; + } + sqlite3TokenInit(&nameToken, pTrig->zName); + sqlite3FixInit(&sFix, pParse, iDb, "trigger", &nameToken); + if( sqlite3FixTriggerStep(&sFix, pTrig->step_list) + || sqlite3FixExpr(&sFix, pTrig->pWhen) + ){ + goto triggerfinish_cleanup; + } + + /* if we are not initializing, + ** build the sqlite_master entry + */ + if( !db->init.busy ){ + Vdbe *v; + char *z; + + /* Make an entry in the sqlite_master table */ + v = sqlite3GetVdbe(pParse); + if( v==0 ) goto triggerfinish_cleanup; + sqlite3BeginWriteOperation(pParse, 0, iDb); + z = sqlite3DbStrNDup(db, (char*)pAll->z, pAll->n); + sqlite3NestedParse(pParse, + "INSERT INTO %Q.%s VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')", + db->aDb[iDb].zDbSName, MASTER_NAME, zName, + pTrig->table, z); + sqlite3DbFree(db, z); + sqlite3ChangeCookie(pParse, iDb); + sqlite3VdbeAddParseSchemaOp(v, iDb, + sqlite3MPrintf(db, "type='trigger' AND name='%q'", zName)); + } + + if( db->init.busy ){ + Trigger *pLink = pTrig; + Hash *pHash = &db->aDb[iDb].pSchema->trigHash; + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + pTrig = sqlite3HashInsert(pHash, zName, pTrig); + if( pTrig ){ + sqlite3OomFault(db); + }else if( pLink->pSchema==pLink->pTabSchema ){ + Table *pTab; + pTab = sqlite3HashFind(&pLink->pTabSchema->tblHash, pLink->table); + assert( pTab!=0 ); + pLink->pNext = pTab->pTrigger; + pTab->pTrigger = pLink; + } + } + +triggerfinish_cleanup: + sqlite3DeleteTrigger(db, pTrig); + assert( !pParse->pNewTrigger ); + sqlite3DeleteTriggerStep(db, pStepList); +} + +/* +** Turn a SELECT statement (that the pSelect parameter points to) into +** a trigger step. Return a pointer to a TriggerStep structure. +** +** The parser calls this routine when it finds a SELECT statement in +** body of a TRIGGER. +*/ +SQLITE_PRIVATE TriggerStep *sqlite3TriggerSelectStep(sqlite3 *db, Select *pSelect){ + TriggerStep *pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep)); + if( pTriggerStep==0 ) { + sqlite3SelectDelete(db, pSelect); + return 0; + } + pTriggerStep->op = TK_SELECT; + pTriggerStep->pSelect = pSelect; + pTriggerStep->orconf = OE_Default; + return pTriggerStep; +} + +/* +** Allocate space to hold a new trigger step. The allocated space +** holds both the TriggerStep object and the TriggerStep.target.z string. +** +** If an OOM error occurs, NULL is returned and db->mallocFailed is set. +*/ +static TriggerStep *triggerStepAllocate( + sqlite3 *db, /* Database connection */ + u8 op, /* Trigger opcode */ + Token *pName /* The target name */ +){ + TriggerStep *pTriggerStep; + + pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep) + pName->n + 1); + if( pTriggerStep ){ + char *z = (char*)&pTriggerStep[1]; + memcpy(z, pName->z, pName->n); + sqlite3Dequote(z); + pTriggerStep->zTarget = z; + pTriggerStep->op = op; + } + return pTriggerStep; +} + +/* +** Build a trigger step out of an INSERT statement. Return a pointer +** to the new trigger step. +** +** The parser calls this routine when it sees an INSERT inside the +** body of a trigger. +*/ +SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep( + sqlite3 *db, /* The database connection */ + Token *pTableName, /* Name of the table into which we insert */ + IdList *pColumn, /* List of columns in pTableName to insert into */ + Select *pSelect, /* A SELECT statement that supplies values */ + u8 orconf /* The conflict algorithm (OE_Abort, OE_Replace, etc.) */ +){ + TriggerStep *pTriggerStep; + + assert(pSelect != 0 || db->mallocFailed); + + pTriggerStep = triggerStepAllocate(db, TK_INSERT, pTableName); + if( pTriggerStep ){ + pTriggerStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); + pTriggerStep->pIdList = pColumn; + pTriggerStep->orconf = orconf; + }else{ + sqlite3IdListDelete(db, pColumn); + } + sqlite3SelectDelete(db, pSelect); + + return pTriggerStep; +} + +/* +** Construct a trigger step that implements an UPDATE statement and return +** a pointer to that trigger step. The parser calls this routine when it +** sees an UPDATE statement inside the body of a CREATE TRIGGER. +*/ +SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep( + sqlite3 *db, /* The database connection */ + Token *pTableName, /* Name of the table to be updated */ + ExprList *pEList, /* The SET clause: list of column and new values */ + Expr *pWhere, /* The WHERE clause */ + u8 orconf /* The conflict algorithm. (OE_Abort, OE_Ignore, etc) */ +){ + TriggerStep *pTriggerStep; + + pTriggerStep = triggerStepAllocate(db, TK_UPDATE, pTableName); + if( pTriggerStep ){ + pTriggerStep->pExprList = sqlite3ExprListDup(db, pEList, EXPRDUP_REDUCE); + pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE); + pTriggerStep->orconf = orconf; + } + sqlite3ExprListDelete(db, pEList); + sqlite3ExprDelete(db, pWhere); + return pTriggerStep; +} + +/* +** Construct a trigger step that implements a DELETE statement and return +** a pointer to that trigger step. The parser calls this routine when it +** sees a DELETE statement inside the body of a CREATE TRIGGER. +*/ +SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep( + sqlite3 *db, /* Database connection */ + Token *pTableName, /* The table from which rows are deleted */ + Expr *pWhere /* The WHERE clause */ +){ + TriggerStep *pTriggerStep; + + pTriggerStep = triggerStepAllocate(db, TK_DELETE, pTableName); + if( pTriggerStep ){ + pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE); + pTriggerStep->orconf = OE_Default; + } + sqlite3ExprDelete(db, pWhere); + return pTriggerStep; +} + +/* +** Recursively delete a Trigger structure +*/ +SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3 *db, Trigger *pTrigger){ + if( pTrigger==0 ) return; + sqlite3DeleteTriggerStep(db, pTrigger->step_list); + sqlite3DbFree(db, pTrigger->zName); + sqlite3DbFree(db, pTrigger->table); + sqlite3ExprDelete(db, pTrigger->pWhen); + sqlite3IdListDelete(db, pTrigger->pColumns); + sqlite3DbFree(db, pTrigger); +} + +/* +** This function is called to drop a trigger from the database schema. +** +** This may be called directly from the parser and therefore identifies +** the trigger by name. The sqlite3DropTriggerPtr() routine does the +** same job as this routine except it takes a pointer to the trigger +** instead of the trigger name. +**/ +SQLITE_PRIVATE void sqlite3DropTrigger(Parse *pParse, SrcList *pName, int noErr){ + Trigger *pTrigger = 0; + int i; + const char *zDb; + const char *zName; + sqlite3 *db = pParse->db; + + if( db->mallocFailed ) goto drop_trigger_cleanup; + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ + goto drop_trigger_cleanup; + } + + assert( pName->nSrc==1 ); + zDb = pName->a[0].zDatabase; + zName = pName->a[0].zName; + assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) ); + for(i=OMIT_TEMPDB; inDb; i++){ + int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ + if( zDb && sqlite3StrICmp(db->aDb[j].zDbSName, zDb) ) continue; + assert( sqlite3SchemaMutexHeld(db, j, 0) ); + pTrigger = sqlite3HashFind(&(db->aDb[j].pSchema->trigHash), zName); + if( pTrigger ) break; + } + if( !pTrigger ){ + if( !noErr ){ + sqlite3ErrorMsg(pParse, "no such trigger: %S", pName, 0); + }else{ + sqlite3CodeVerifyNamedSchema(pParse, zDb); + } + pParse->checkSchema = 1; + goto drop_trigger_cleanup; + } + sqlite3DropTriggerPtr(pParse, pTrigger); + +drop_trigger_cleanup: + sqlite3SrcListDelete(db, pName); +} + +/* +** Return a pointer to the Table structure for the table that a trigger +** is set on. +*/ +static Table *tableOfTrigger(Trigger *pTrigger){ + return sqlite3HashFind(&pTrigger->pTabSchema->tblHash, pTrigger->table); +} + + +/* +** Drop a trigger given a pointer to that trigger. +*/ +SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse *pParse, Trigger *pTrigger){ + Table *pTable; + Vdbe *v; + sqlite3 *db = pParse->db; + int iDb; + + iDb = sqlite3SchemaToIndex(pParse->db, pTrigger->pSchema); + assert( iDb>=0 && iDbnDb ); + pTable = tableOfTrigger(pTrigger); + assert( pTable ); + assert( pTable->pSchema==pTrigger->pSchema || iDb==1 ); +#ifndef SQLITE_OMIT_AUTHORIZATION + { + int code = SQLITE_DROP_TRIGGER; + const char *zDb = db->aDb[iDb].zDbSName; + const char *zTab = SCHEMA_TABLE(iDb); + if( iDb==1 ) code = SQLITE_DROP_TEMP_TRIGGER; + if( sqlite3AuthCheck(pParse, code, pTrigger->zName, pTable->zName, zDb) || + sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){ + return; + } + } +#endif + + /* Generate code to destroy the database record of the trigger. + */ + assert( pTable!=0 ); + if( (v = sqlite3GetVdbe(pParse))!=0 ){ + sqlite3NestedParse(pParse, + "DELETE FROM %Q.%s WHERE name=%Q AND type='trigger'", + db->aDb[iDb].zDbSName, MASTER_NAME, pTrigger->zName + ); + sqlite3ChangeCookie(pParse, iDb); + sqlite3VdbeAddOp4(v, OP_DropTrigger, iDb, 0, 0, pTrigger->zName, 0); + } +} + +/* +** Remove a trigger from the hash tables of the sqlite* pointer. +*/ +SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTrigger(sqlite3 *db, int iDb, const char *zName){ + Trigger *pTrigger; + Hash *pHash; + + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + pHash = &(db->aDb[iDb].pSchema->trigHash); + pTrigger = sqlite3HashInsert(pHash, zName, 0); + if( ALWAYS(pTrigger) ){ + if( pTrigger->pSchema==pTrigger->pTabSchema ){ + Table *pTab = tableOfTrigger(pTrigger); + Trigger **pp; + for(pp=&pTab->pTrigger; *pp!=pTrigger; pp=&((*pp)->pNext)); + *pp = (*pp)->pNext; + } + sqlite3DeleteTrigger(db, pTrigger); + db->flags |= SQLITE_InternChanges; + } +} + +/* +** pEList is the SET clause of an UPDATE statement. Each entry +** in pEList is of the format =. If any of the entries +** in pEList have an which matches an identifier in pIdList, +** then return TRUE. If pIdList==NULL, then it is considered a +** wildcard that matches anything. Likewise if pEList==NULL then +** it matches anything so always return true. Return false only +** if there is no match. +*/ +static int checkColumnOverlap(IdList *pIdList, ExprList *pEList){ + int e; + if( pIdList==0 || NEVER(pEList==0) ) return 1; + for(e=0; enExpr; e++){ + if( sqlite3IdListIndex(pIdList, pEList->a[e].zName)>=0 ) return 1; + } + return 0; +} + +/* +** Return a list of all triggers on table pTab if there exists at least +** one trigger that must be fired when an operation of type 'op' is +** performed on the table, and, if that operation is an UPDATE, if at +** least one of the columns in pChanges is being modified. +*/ +SQLITE_PRIVATE Trigger *sqlite3TriggersExist( + Parse *pParse, /* Parse context */ + Table *pTab, /* The table the contains the triggers */ + int op, /* one of TK_DELETE, TK_INSERT, TK_UPDATE */ + ExprList *pChanges, /* Columns that change in an UPDATE statement */ + int *pMask /* OUT: Mask of TRIGGER_BEFORE|TRIGGER_AFTER */ +){ + int mask = 0; + Trigger *pList = 0; + Trigger *p; + + if( (pParse->db->flags & SQLITE_EnableTrigger)!=0 ){ + pList = sqlite3TriggerList(pParse, pTab); + } + assert( pList==0 || IsVirtual(pTab)==0 ); + for(p=pList; p; p=p->pNext){ + if( p->op==op && checkColumnOverlap(p->pColumns, pChanges) ){ + mask |= p->tr_tm; + } + } + if( pMask ){ + *pMask = mask; + } + return (mask ? pList : 0); +} + +/* +** Convert the pStep->zTarget string into a SrcList and return a pointer +** to that SrcList. +** +** This routine adds a specific database name, if needed, to the target when +** forming the SrcList. This prevents a trigger in one database from +** referring to a target in another database. An exception is when the +** trigger is in TEMP in which case it can refer to any other database it +** wants. +*/ +static SrcList *targetSrcList( + Parse *pParse, /* The parsing context */ + TriggerStep *pStep /* The trigger containing the target token */ +){ + sqlite3 *db = pParse->db; + int iDb; /* Index of the database to use */ + SrcList *pSrc; /* SrcList to be returned */ + + pSrc = sqlite3SrcListAppend(db, 0, 0, 0); + if( pSrc ){ + assert( pSrc->nSrc>0 ); + pSrc->a[pSrc->nSrc-1].zName = sqlite3DbStrDup(db, pStep->zTarget); + iDb = sqlite3SchemaToIndex(db, pStep->pTrig->pSchema); + if( iDb==0 || iDb>=2 ){ + const char *zDb; + assert( iDbnDb ); + zDb = db->aDb[iDb].zDbSName; + pSrc->a[pSrc->nSrc-1].zDatabase = sqlite3DbStrDup(db, zDb); + } + } + return pSrc; +} + +/* +** Generate VDBE code for the statements inside the body of a single +** trigger. +*/ +static int codeTriggerProgram( + Parse *pParse, /* The parser context */ + TriggerStep *pStepList, /* List of statements inside the trigger body */ + int orconf /* Conflict algorithm. (OE_Abort, etc) */ +){ + TriggerStep *pStep; + Vdbe *v = pParse->pVdbe; + sqlite3 *db = pParse->db; + + assert( pParse->pTriggerTab && pParse->pToplevel ); + assert( pStepList ); + assert( v!=0 ); + for(pStep=pStepList; pStep; pStep=pStep->pNext){ + /* Figure out the ON CONFLICT policy that will be used for this step + ** of the trigger program. If the statement that caused this trigger + ** to fire had an explicit ON CONFLICT, then use it. Otherwise, use + ** the ON CONFLICT policy that was specified as part of the trigger + ** step statement. Example: + ** + ** CREATE TRIGGER AFTER INSERT ON t1 BEGIN; + ** INSERT OR REPLACE INTO t2 VALUES(new.a, new.b); + ** END; + ** + ** INSERT INTO t1 ... ; -- insert into t2 uses REPLACE policy + ** INSERT OR IGNORE INTO t1 ... ; -- insert into t2 uses IGNORE policy + */ + pParse->eOrconf = (orconf==OE_Default)?pStep->orconf:(u8)orconf; + assert( pParse->okConstFactor==0 ); + + switch( pStep->op ){ + case TK_UPDATE: { + sqlite3Update(pParse, + targetSrcList(pParse, pStep), + sqlite3ExprListDup(db, pStep->pExprList, 0), + sqlite3ExprDup(db, pStep->pWhere, 0), + pParse->eOrconf + ); + break; + } + case TK_INSERT: { + sqlite3Insert(pParse, + targetSrcList(pParse, pStep), + sqlite3SelectDup(db, pStep->pSelect, 0), + sqlite3IdListDup(db, pStep->pIdList), + pParse->eOrconf + ); + break; + } + case TK_DELETE: { + sqlite3DeleteFrom(pParse, + targetSrcList(pParse, pStep), + sqlite3ExprDup(db, pStep->pWhere, 0) + ); + break; + } + default: assert( pStep->op==TK_SELECT ); { + SelectDest sDest; + Select *pSelect = sqlite3SelectDup(db, pStep->pSelect, 0); + sqlite3SelectDestInit(&sDest, SRT_Discard, 0); + sqlite3Select(pParse, pSelect, &sDest); + sqlite3SelectDelete(db, pSelect); + break; + } + } + if( pStep->op!=TK_SELECT ){ + sqlite3VdbeAddOp0(v, OP_ResetCount); + } + } + + return 0; +} + +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS +/* +** This function is used to add VdbeComment() annotations to a VDBE +** program. It is not used in production code, only for debugging. +*/ +static const char *onErrorText(int onError){ + switch( onError ){ + case OE_Abort: return "abort"; + case OE_Rollback: return "rollback"; + case OE_Fail: return "fail"; + case OE_Replace: return "replace"; + case OE_Ignore: return "ignore"; + case OE_Default: return "default"; + } + return "n/a"; +} +#endif + +/* +** Parse context structure pFrom has just been used to create a sub-vdbe +** (trigger program). If an error has occurred, transfer error information +** from pFrom to pTo. +*/ +static void transferParseError(Parse *pTo, Parse *pFrom){ + assert( pFrom->zErrMsg==0 || pFrom->nErr ); + assert( pTo->zErrMsg==0 || pTo->nErr ); + if( pTo->nErr==0 ){ + pTo->zErrMsg = pFrom->zErrMsg; + pTo->nErr = pFrom->nErr; + pTo->rc = pFrom->rc; + }else{ + sqlite3DbFree(pFrom->db, pFrom->zErrMsg); + } +} + +/* +** Create and populate a new TriggerPrg object with a sub-program +** implementing trigger pTrigger with ON CONFLICT policy orconf. +*/ +static TriggerPrg *codeRowTrigger( + Parse *pParse, /* Current parse context */ + Trigger *pTrigger, /* Trigger to code */ + Table *pTab, /* The table pTrigger is attached to */ + int orconf /* ON CONFLICT policy to code trigger program with */ +){ + Parse *pTop = sqlite3ParseToplevel(pParse); + sqlite3 *db = pParse->db; /* Database handle */ + TriggerPrg *pPrg; /* Value to return */ + Expr *pWhen = 0; /* Duplicate of trigger WHEN expression */ + Vdbe *v; /* Temporary VM */ + NameContext sNC; /* Name context for sub-vdbe */ + SubProgram *pProgram = 0; /* Sub-vdbe for trigger program */ + Parse *pSubParse; /* Parse context for sub-vdbe */ + int iEndTrigger = 0; /* Label to jump to if WHEN is false */ + + assert( pTrigger->zName==0 || pTab==tableOfTrigger(pTrigger) ); + assert( pTop->pVdbe ); + + /* Allocate the TriggerPrg and SubProgram objects. To ensure that they + ** are freed if an error occurs, link them into the Parse.pTriggerPrg + ** list of the top-level Parse object sooner rather than later. */ + pPrg = sqlite3DbMallocZero(db, sizeof(TriggerPrg)); + if( !pPrg ) return 0; + pPrg->pNext = pTop->pTriggerPrg; + pTop->pTriggerPrg = pPrg; + pPrg->pProgram = pProgram = sqlite3DbMallocZero(db, sizeof(SubProgram)); + if( !pProgram ) return 0; + sqlite3VdbeLinkSubProgram(pTop->pVdbe, pProgram); + pPrg->pTrigger = pTrigger; + pPrg->orconf = orconf; + pPrg->aColmask[0] = 0xffffffff; + pPrg->aColmask[1] = 0xffffffff; + + /* Allocate and populate a new Parse context to use for coding the + ** trigger sub-program. */ + pSubParse = sqlite3StackAllocZero(db, sizeof(Parse)); + if( !pSubParse ) return 0; + memset(&sNC, 0, sizeof(sNC)); + sNC.pParse = pSubParse; + pSubParse->db = db; + pSubParse->pTriggerTab = pTab; + pSubParse->pToplevel = pTop; + pSubParse->zAuthContext = pTrigger->zName; + pSubParse->eTriggerOp = pTrigger->op; + pSubParse->nQueryLoop = pParse->nQueryLoop; + + v = sqlite3GetVdbe(pSubParse); + if( v ){ + VdbeComment((v, "Start: %s.%s (%s %s%s%s ON %s)", + pTrigger->zName, onErrorText(orconf), + (pTrigger->tr_tm==TRIGGER_BEFORE ? "BEFORE" : "AFTER"), + (pTrigger->op==TK_UPDATE ? "UPDATE" : ""), + (pTrigger->op==TK_INSERT ? "INSERT" : ""), + (pTrigger->op==TK_DELETE ? "DELETE" : ""), + pTab->zName + )); +#ifndef SQLITE_OMIT_TRACE + sqlite3VdbeChangeP4(v, -1, + sqlite3MPrintf(db, "-- TRIGGER %s", pTrigger->zName), P4_DYNAMIC + ); +#endif + + /* If one was specified, code the WHEN clause. If it evaluates to false + ** (or NULL) the sub-vdbe is immediately halted by jumping to the + ** OP_Halt inserted at the end of the program. */ + if( pTrigger->pWhen ){ + pWhen = sqlite3ExprDup(db, pTrigger->pWhen, 0); + if( SQLITE_OK==sqlite3ResolveExprNames(&sNC, pWhen) + && db->mallocFailed==0 + ){ + iEndTrigger = sqlite3VdbeMakeLabel(v); + sqlite3ExprIfFalse(pSubParse, pWhen, iEndTrigger, SQLITE_JUMPIFNULL); + } + sqlite3ExprDelete(db, pWhen); + } + + /* Code the trigger program into the sub-vdbe. */ + codeTriggerProgram(pSubParse, pTrigger->step_list, orconf); + + /* Insert an OP_Halt at the end of the sub-program. */ + if( iEndTrigger ){ + sqlite3VdbeResolveLabel(v, iEndTrigger); + } + sqlite3VdbeAddOp0(v, OP_Halt); + VdbeComment((v, "End: %s.%s", pTrigger->zName, onErrorText(orconf))); + + transferParseError(pParse, pSubParse); + if( db->mallocFailed==0 ){ + pProgram->aOp = sqlite3VdbeTakeOpArray(v, &pProgram->nOp, &pTop->nMaxArg); + } + pProgram->nMem = pSubParse->nMem; + pProgram->nCsr = pSubParse->nTab; + pProgram->token = (void *)pTrigger; + pPrg->aColmask[0] = pSubParse->oldmask; + pPrg->aColmask[1] = pSubParse->newmask; + sqlite3VdbeDelete(v); + } + + assert( !pSubParse->pAinc && !pSubParse->pZombieTab ); + assert( !pSubParse->pTriggerPrg && !pSubParse->nMaxArg ); + sqlite3ParserReset(pSubParse); + sqlite3StackFree(db, pSubParse); + + return pPrg; +} + +/* +** Return a pointer to a TriggerPrg object containing the sub-program for +** trigger pTrigger with default ON CONFLICT algorithm orconf. If no such +** TriggerPrg object exists, a new object is allocated and populated before +** being returned. +*/ +static TriggerPrg *getRowTrigger( + Parse *pParse, /* Current parse context */ + Trigger *pTrigger, /* Trigger to code */ + Table *pTab, /* The table trigger pTrigger is attached to */ + int orconf /* ON CONFLICT algorithm. */ +){ + Parse *pRoot = sqlite3ParseToplevel(pParse); + TriggerPrg *pPrg; + + assert( pTrigger->zName==0 || pTab==tableOfTrigger(pTrigger) ); + + /* It may be that this trigger has already been coded (or is in the + ** process of being coded). If this is the case, then an entry with + ** a matching TriggerPrg.pTrigger field will be present somewhere + ** in the Parse.pTriggerPrg list. Search for such an entry. */ + for(pPrg=pRoot->pTriggerPrg; + pPrg && (pPrg->pTrigger!=pTrigger || pPrg->orconf!=orconf); + pPrg=pPrg->pNext + ); + + /* If an existing TriggerPrg could not be located, create a new one. */ + if( !pPrg ){ + pPrg = codeRowTrigger(pParse, pTrigger, pTab, orconf); + } + + return pPrg; +} + +/* +** Generate code for the trigger program associated with trigger p on +** table pTab. The reg, orconf and ignoreJump parameters passed to this +** function are the same as those described in the header function for +** sqlite3CodeRowTrigger() +*/ +SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect( + Parse *pParse, /* Parse context */ + Trigger *p, /* Trigger to code */ + Table *pTab, /* The table to code triggers from */ + int reg, /* Reg array containing OLD.* and NEW.* values */ + int orconf, /* ON CONFLICT policy */ + int ignoreJump /* Instruction to jump to for RAISE(IGNORE) */ +){ + Vdbe *v = sqlite3GetVdbe(pParse); /* Main VM */ + TriggerPrg *pPrg; + pPrg = getRowTrigger(pParse, p, pTab, orconf); + assert( pPrg || pParse->nErr || pParse->db->mallocFailed ); + + /* Code the OP_Program opcode in the parent VDBE. P4 of the OP_Program + ** is a pointer to the sub-vdbe containing the trigger program. */ + if( pPrg ){ + int bRecursive = (p->zName && 0==(pParse->db->flags&SQLITE_RecTriggers)); + + sqlite3VdbeAddOp4(v, OP_Program, reg, ignoreJump, ++pParse->nMem, + (const char *)pPrg->pProgram, P4_SUBPROGRAM); + VdbeComment( + (v, "Call: %s.%s", (p->zName?p->zName:"fkey"), onErrorText(orconf))); + + /* Set the P5 operand of the OP_Program instruction to non-zero if + ** recursive invocation of this trigger program is disallowed. Recursive + ** invocation is disallowed if (a) the sub-program is really a trigger, + ** not a foreign key action, and (b) the flag to enable recursive triggers + ** is clear. */ + sqlite3VdbeChangeP5(v, (u8)bRecursive); + } +} + +/* +** This is called to code the required FOR EACH ROW triggers for an operation +** on table pTab. The operation to code triggers for (INSERT, UPDATE or DELETE) +** is given by the op parameter. The tr_tm parameter determines whether the +** BEFORE or AFTER triggers are coded. If the operation is an UPDATE, then +** parameter pChanges is passed the list of columns being modified. +** +** If there are no triggers that fire at the specified time for the specified +** operation on pTab, this function is a no-op. +** +** The reg argument is the address of the first in an array of registers +** that contain the values substituted for the new.* and old.* references +** in the trigger program. If N is the number of columns in table pTab +** (a copy of pTab->nCol), then registers are populated as follows: +** +** Register Contains +** ------------------------------------------------------ +** reg+0 OLD.rowid +** reg+1 OLD.* value of left-most column of pTab +** ... ... +** reg+N OLD.* value of right-most column of pTab +** reg+N+1 NEW.rowid +** reg+N+2 OLD.* value of left-most column of pTab +** ... ... +** reg+N+N+1 NEW.* value of right-most column of pTab +** +** For ON DELETE triggers, the registers containing the NEW.* values will +** never be accessed by the trigger program, so they are not allocated or +** populated by the caller (there is no data to populate them with anyway). +** Similarly, for ON INSERT triggers the values stored in the OLD.* registers +** are never accessed, and so are not allocated by the caller. So, for an +** ON INSERT trigger, the value passed to this function as parameter reg +** is not a readable register, although registers (reg+N) through +** (reg+N+N+1) are. +** +** Parameter orconf is the default conflict resolution algorithm for the +** trigger program to use (REPLACE, IGNORE etc.). Parameter ignoreJump +** is the instruction that control should jump to if a trigger program +** raises an IGNORE exception. +*/ +SQLITE_PRIVATE void sqlite3CodeRowTrigger( + Parse *pParse, /* Parse context */ + Trigger *pTrigger, /* List of triggers on table pTab */ + int op, /* One of TK_UPDATE, TK_INSERT, TK_DELETE */ + ExprList *pChanges, /* Changes list for any UPDATE OF triggers */ + int tr_tm, /* One of TRIGGER_BEFORE, TRIGGER_AFTER */ + Table *pTab, /* The table to code triggers from */ + int reg, /* The first in an array of registers (see above) */ + int orconf, /* ON CONFLICT policy */ + int ignoreJump /* Instruction to jump to for RAISE(IGNORE) */ +){ + Trigger *p; /* Used to iterate through pTrigger list */ + + assert( op==TK_UPDATE || op==TK_INSERT || op==TK_DELETE ); + assert( tr_tm==TRIGGER_BEFORE || tr_tm==TRIGGER_AFTER ); + assert( (op==TK_UPDATE)==(pChanges!=0) ); + + for(p=pTrigger; p; p=p->pNext){ + + /* Sanity checking: The schema for the trigger and for the table are + ** always defined. The trigger must be in the same schema as the table + ** or else it must be a TEMP trigger. */ + assert( p->pSchema!=0 ); + assert( p->pTabSchema!=0 ); + assert( p->pSchema==p->pTabSchema + || p->pSchema==pParse->db->aDb[1].pSchema ); + + /* Determine whether we should code this trigger */ + if( p->op==op + && p->tr_tm==tr_tm + && checkColumnOverlap(p->pColumns, pChanges) + ){ + sqlite3CodeRowTriggerDirect(pParse, p, pTab, reg, orconf, ignoreJump); + } + } +} + +/* +** Triggers may access values stored in the old.* or new.* pseudo-table. +** This function returns a 32-bit bitmask indicating which columns of the +** old.* or new.* tables actually are used by triggers. This information +** may be used by the caller, for example, to avoid having to load the entire +** old.* record into memory when executing an UPDATE or DELETE command. +** +** Bit 0 of the returned mask is set if the left-most column of the +** table may be accessed using an [old|new].reference. Bit 1 is set if +** the second leftmost column value is required, and so on. If there +** are more than 32 columns in the table, and at least one of the columns +** with an index greater than 32 may be accessed, 0xffffffff is returned. +** +** It is not possible to determine if the old.rowid or new.rowid column is +** accessed by triggers. The caller must always assume that it is. +** +** Parameter isNew must be either 1 or 0. If it is 0, then the mask returned +** applies to the old.* table. If 1, the new.* table. +** +** Parameter tr_tm must be a mask with one or both of the TRIGGER_BEFORE +** and TRIGGER_AFTER bits set. Values accessed by BEFORE triggers are only +** included in the returned mask if the TRIGGER_BEFORE bit is set in the +** tr_tm parameter. Similarly, values accessed by AFTER triggers are only +** included in the returned mask if the TRIGGER_AFTER bit is set in tr_tm. +*/ +SQLITE_PRIVATE u32 sqlite3TriggerColmask( + Parse *pParse, /* Parse context */ + Trigger *pTrigger, /* List of triggers on table pTab */ + ExprList *pChanges, /* Changes list for any UPDATE OF triggers */ + int isNew, /* 1 for new.* ref mask, 0 for old.* ref mask */ + int tr_tm, /* Mask of TRIGGER_BEFORE|TRIGGER_AFTER */ + Table *pTab, /* The table to code triggers from */ + int orconf /* Default ON CONFLICT policy for trigger steps */ +){ + const int op = pChanges ? TK_UPDATE : TK_DELETE; + u32 mask = 0; + Trigger *p; + + assert( isNew==1 || isNew==0 ); + for(p=pTrigger; p; p=p->pNext){ + if( p->op==op && (tr_tm&p->tr_tm) + && checkColumnOverlap(p->pColumns,pChanges) + ){ + TriggerPrg *pPrg; + pPrg = getRowTrigger(pParse, p, pTab, orconf); + if( pPrg ){ + mask |= pPrg->aColmask[isNew]; + } + } + } + + return mask; +} + +#endif /* !defined(SQLITE_OMIT_TRIGGER) */ + +/************** End of trigger.c *********************************************/ +/************** Begin file update.c ******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains C code routines that are called by the parser +** to handle UPDATE statements. +*/ +/* #include "sqliteInt.h" */ + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* Forward declaration */ +static void updateVirtualTable( + Parse *pParse, /* The parsing context */ + SrcList *pSrc, /* The virtual table to be modified */ + Table *pTab, /* The virtual table */ + ExprList *pChanges, /* The columns to change in the UPDATE statement */ + Expr *pRowidExpr, /* Expression used to recompute the rowid */ + int *aXRef, /* Mapping from columns of pTab to entries in pChanges */ + Expr *pWhere, /* WHERE clause of the UPDATE statement */ + int onError /* ON CONFLICT strategy */ +); +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +/* +** The most recently coded instruction was an OP_Column to retrieve the +** i-th column of table pTab. This routine sets the P4 parameter of the +** OP_Column to the default value, if any. +** +** The default value of a column is specified by a DEFAULT clause in the +** column definition. This was either supplied by the user when the table +** was created, or added later to the table definition by an ALTER TABLE +** command. If the latter, then the row-records in the table btree on disk +** may not contain a value for the column and the default value, taken +** from the P4 parameter of the OP_Column instruction, is returned instead. +** If the former, then all row-records are guaranteed to include a value +** for the column and the P4 value is not required. +** +** Column definitions created by an ALTER TABLE command may only have +** literal default values specified: a number, null or a string. (If a more +** complicated default expression value was provided, it is evaluated +** when the ALTER TABLE is executed and one of the literal values written +** into the sqlite_master table.) +** +** Therefore, the P4 parameter is only required if the default value for +** the column is a literal number, string or null. The sqlite3ValueFromExpr() +** function is capable of transforming these types of expressions into +** sqlite3_value objects. +** +** If parameter iReg is not negative, code an OP_RealAffinity instruction +** on register iReg. This is used when an equivalent integer value is +** stored in place of an 8-byte floating point value in order to save +** space. +*/ +SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){ + assert( pTab!=0 ); + if( !pTab->pSelect ){ + sqlite3_value *pValue = 0; + u8 enc = ENC(sqlite3VdbeDb(v)); + Column *pCol = &pTab->aCol[i]; + VdbeComment((v, "%s.%s", pTab->zName, pCol->zName)); + assert( inCol ); + sqlite3ValueFromExpr(sqlite3VdbeDb(v), pCol->pDflt, enc, + pCol->affinity, &pValue); + if( pValue ){ + sqlite3VdbeAppendP4(v, pValue, P4_MEM); + } + } +#ifndef SQLITE_OMIT_FLOATING_POINT + if( pTab->aCol[i].affinity==SQLITE_AFF_REAL ){ + sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg); + } +#endif +} + +/* +** Process an UPDATE statement. +** +** UPDATE OR IGNORE table_wxyz SET a=b, c=d WHERE e<5 AND f NOT NULL; +** \_______/ \________/ \______/ \________________/ +* onError pTabList pChanges pWhere +*/ +SQLITE_PRIVATE void sqlite3Update( + Parse *pParse, /* The parser context */ + SrcList *pTabList, /* The table in which we should change things */ + ExprList *pChanges, /* Things to be changed */ + Expr *pWhere, /* The WHERE clause. May be null */ + int onError /* How to handle constraint errors */ +){ + int i, j; /* Loop counters */ + Table *pTab; /* The table to be updated */ + int addrTop = 0; /* VDBE instruction address of the start of the loop */ + WhereInfo *pWInfo; /* Information about the WHERE clause */ + Vdbe *v; /* The virtual database engine */ + Index *pIdx; /* For looping over indices */ + Index *pPk; /* The PRIMARY KEY index for WITHOUT ROWID tables */ + int nIdx; /* Number of indices that need updating */ + int iBaseCur; /* Base cursor number */ + int iDataCur; /* Cursor for the canonical data btree */ + int iIdxCur; /* Cursor for the first index */ + sqlite3 *db; /* The database structure */ + int *aRegIdx = 0; /* First register in array assigned to each index */ + int *aXRef = 0; /* aXRef[i] is the index in pChanges->a[] of the + ** an expression for the i-th column of the table. + ** aXRef[i]==-1 if the i-th column is not changed. */ + u8 *aToOpen; /* 1 for tables and indices to be opened */ + u8 chngPk; /* PRIMARY KEY changed in a WITHOUT ROWID table */ + u8 chngRowid; /* Rowid changed in a normal table */ + u8 chngKey; /* Either chngPk or chngRowid */ + Expr *pRowidExpr = 0; /* Expression defining the new record number */ + AuthContext sContext; /* The authorization context */ + NameContext sNC; /* The name-context to resolve expressions in */ + int iDb; /* Database containing the table being updated */ + int eOnePass; /* ONEPASS_XXX value from where.c */ + int hasFK; /* True if foreign key processing is required */ + int labelBreak; /* Jump here to break out of UPDATE loop */ + int labelContinue; /* Jump here to continue next step of UPDATE loop */ + int flags; /* Flags for sqlite3WhereBegin() */ + +#ifndef SQLITE_OMIT_TRIGGER + int isView; /* True when updating a view (INSTEAD OF trigger) */ + Trigger *pTrigger; /* List of triggers on pTab, if required */ + int tmask; /* Mask of TRIGGER_BEFORE|TRIGGER_AFTER */ +#endif + int newmask; /* Mask of NEW.* columns accessed by BEFORE triggers */ + int iEph = 0; /* Ephemeral table holding all primary key values */ + int nKey = 0; /* Number of elements in regKey for WITHOUT ROWID */ + int aiCurOnePass[2]; /* The write cursors opened by WHERE_ONEPASS */ + int addrOpen = 0; /* Address of OP_OpenEphemeral */ + int iPk = 0; /* First of nPk cells holding PRIMARY KEY value */ + i16 nPk = 0; /* Number of components of the PRIMARY KEY */ + int bReplace = 0; /* True if REPLACE conflict resolution might happen */ + + /* Register Allocations */ + int regRowCount = 0; /* A count of rows changed */ + int regOldRowid = 0; /* The old rowid */ + int regNewRowid = 0; /* The new rowid */ + int regNew = 0; /* Content of the NEW.* table in triggers */ + int regOld = 0; /* Content of OLD.* table in triggers */ + int regRowSet = 0; /* Rowset of rows to be updated */ + int regKey = 0; /* composite PRIMARY KEY value */ + + memset(&sContext, 0, sizeof(sContext)); + db = pParse->db; + if( pParse->nErr || db->mallocFailed ){ + goto update_cleanup; + } + assert( pTabList->nSrc==1 ); + + /* Locate the table which we want to update. + */ + pTab = sqlite3SrcListLookup(pParse, pTabList); + if( pTab==0 ) goto update_cleanup; + iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); + + /* Figure out if we have any triggers and if the table being + ** updated is a view. + */ +#ifndef SQLITE_OMIT_TRIGGER + pTrigger = sqlite3TriggersExist(pParse, pTab, TK_UPDATE, pChanges, &tmask); + isView = pTab->pSelect!=0; + assert( pTrigger || tmask==0 ); +#else +# define pTrigger 0 +# define isView 0 +# define tmask 0 +#endif +#ifdef SQLITE_OMIT_VIEW +# undef isView +# define isView 0 +#endif + + if( sqlite3ViewGetColumnNames(pParse, pTab) ){ + goto update_cleanup; + } + if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ + goto update_cleanup; + } + + /* Allocate a cursors for the main database table and for all indices. + ** The index cursors might not be used, but if they are used they + ** need to occur right after the database cursor. So go ahead and + ** allocate enough space, just in case. + */ + pTabList->a[0].iCursor = iBaseCur = iDataCur = pParse->nTab++; + iIdxCur = iDataCur+1; + pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab); + for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ + if( IsPrimaryKeyIndex(pIdx) && pPk!=0 ){ + iDataCur = pParse->nTab; + pTabList->a[0].iCursor = iDataCur; + } + pParse->nTab++; + } + + /* Allocate space for aXRef[], aRegIdx[], and aToOpen[]. + ** Initialize aXRef[] and aToOpen[] to their default values. + */ + aXRef = sqlite3DbMallocRawNN(db, sizeof(int) * (pTab->nCol+nIdx) + nIdx+2 ); + if( aXRef==0 ) goto update_cleanup; + aRegIdx = aXRef+pTab->nCol; + aToOpen = (u8*)(aRegIdx+nIdx); + memset(aToOpen, 1, nIdx+1); + aToOpen[nIdx+1] = 0; + for(i=0; inCol; i++) aXRef[i] = -1; + + /* Initialize the name-context */ + memset(&sNC, 0, sizeof(sNC)); + sNC.pParse = pParse; + sNC.pSrcList = pTabList; + + /* Resolve the column names in all the expressions of the + ** of the UPDATE statement. Also find the column index + ** for each column to be updated in the pChanges array. For each + ** column to be updated, make sure we have authorization to change + ** that column. + */ + chngRowid = chngPk = 0; + for(i=0; inExpr; i++){ + if( sqlite3ResolveExprNames(&sNC, pChanges->a[i].pExpr) ){ + goto update_cleanup; + } + for(j=0; jnCol; j++){ + if( sqlite3StrICmp(pTab->aCol[j].zName, pChanges->a[i].zName)==0 ){ + if( j==pTab->iPKey ){ + chngRowid = 1; + pRowidExpr = pChanges->a[i].pExpr; + }else if( pPk && (pTab->aCol[j].colFlags & COLFLAG_PRIMKEY)!=0 ){ + chngPk = 1; + } + aXRef[j] = i; + break; + } + } + if( j>=pTab->nCol ){ + if( pPk==0 && sqlite3IsRowid(pChanges->a[i].zName) ){ + j = -1; + chngRowid = 1; + pRowidExpr = pChanges->a[i].pExpr; + }else{ + sqlite3ErrorMsg(pParse, "no such column: %s", pChanges->a[i].zName); + pParse->checkSchema = 1; + goto update_cleanup; + } + } +#ifndef SQLITE_OMIT_AUTHORIZATION + { + int rc; + rc = sqlite3AuthCheck(pParse, SQLITE_UPDATE, pTab->zName, + j<0 ? "ROWID" : pTab->aCol[j].zName, + db->aDb[iDb].zDbSName); + if( rc==SQLITE_DENY ){ + goto update_cleanup; + }else if( rc==SQLITE_IGNORE ){ + aXRef[j] = -1; + } + } +#endif + } + assert( (chngRowid & chngPk)==0 ); + assert( chngRowid==0 || chngRowid==1 ); + assert( chngPk==0 || chngPk==1 ); + chngKey = chngRowid + chngPk; + + /* The SET expressions are not actually used inside the WHERE loop. + ** So reset the colUsed mask. Unless this is a virtual table. In that + ** case, set all bits of the colUsed mask (to ensure that the virtual + ** table implementation makes all columns available). + */ + pTabList->a[0].colUsed = IsVirtual(pTab) ? ALLBITS : 0; + + hasFK = sqlite3FkRequired(pParse, pTab, aXRef, chngKey); + + /* There is one entry in the aRegIdx[] array for each index on the table + ** being updated. Fill in aRegIdx[] with a register number that will hold + ** the key for accessing each index. + ** + ** FIXME: Be smarter about omitting indexes that use expressions. + */ + for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ + int reg; + if( chngKey || hasFK || pIdx->pPartIdxWhere || pIdx==pPk ){ + reg = ++pParse->nMem; + pParse->nMem += pIdx->nColumn; + }else{ + reg = 0; + for(i=0; inKeyCol; i++){ + i16 iIdxCol = pIdx->aiColumn[i]; + if( iIdxCol<0 || aXRef[iIdxCol]>=0 ){ + reg = ++pParse->nMem; + pParse->nMem += pIdx->nColumn; + if( (onError==OE_Replace) + || (onError==OE_Default && pIdx->onError==OE_Replace) + ){ + bReplace = 1; + } + break; + } + } + } + if( reg==0 ) aToOpen[j+1] = 0; + aRegIdx[j] = reg; + } + if( bReplace ){ + /* If REPLACE conflict resolution might be invoked, open cursors on all + ** indexes in case they are needed to delete records. */ + memset(aToOpen, 1, nIdx+1); + } + + /* Begin generating code. */ + v = sqlite3GetVdbe(pParse); + if( v==0 ) goto update_cleanup; + if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); + sqlite3BeginWriteOperation(pParse, 1, iDb); + + /* Allocate required registers. */ + if( !IsVirtual(pTab) ){ + regRowSet = ++pParse->nMem; + regOldRowid = regNewRowid = ++pParse->nMem; + if( chngPk || pTrigger || hasFK ){ + regOld = pParse->nMem + 1; + pParse->nMem += pTab->nCol; + } + if( chngKey || pTrigger || hasFK ){ + regNewRowid = ++pParse->nMem; + } + regNew = pParse->nMem + 1; + pParse->nMem += pTab->nCol; + } + + /* Start the view context. */ + if( isView ){ + sqlite3AuthContextPush(pParse, &sContext, pTab->zName); + } + + /* If we are trying to update a view, realize that view into + ** an ephemeral table. + */ +#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) + if( isView ){ + sqlite3MaterializeView(pParse, pTab, pWhere, iDataCur); + } +#endif + + /* Resolve the column names in all the expressions in the + ** WHERE clause. + */ + if( sqlite3ResolveExprNames(&sNC, pWhere) ){ + goto update_cleanup; + } + +#ifndef SQLITE_OMIT_VIRTUALTABLE + /* Virtual tables must be handled separately */ + if( IsVirtual(pTab) ){ + updateVirtualTable(pParse, pTabList, pTab, pChanges, pRowidExpr, aXRef, + pWhere, onError); + goto update_cleanup; + } +#endif + + /* Initialize the count of updated rows */ + if( (db->flags & SQLITE_CountRows) && !pParse->pTriggerTab ){ + regRowCount = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount); + } + + if( HasRowid(pTab) ){ + sqlite3VdbeAddOp3(v, OP_Null, 0, regRowSet, regOldRowid); + }else{ + assert( pPk!=0 ); + nPk = pPk->nKeyCol; + iPk = pParse->nMem+1; + pParse->nMem += nPk; + regKey = ++pParse->nMem; + iEph = pParse->nTab++; + + sqlite3VdbeAddOp2(v, OP_Null, 0, iPk); + addrOpen = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iEph, nPk); + sqlite3VdbeSetP4KeyInfo(pParse, pPk); + } + + /* Begin the database scan. + ** + ** Do not consider a single-pass strategy for a multi-row update if + ** there are any triggers or foreign keys to process, or rows may + ** be deleted as a result of REPLACE conflict handling. Any of these + ** things might disturb a cursor being used to scan through the table + ** or index, causing a single-pass approach to malfunction. */ + flags = WHERE_ONEPASS_DESIRED|WHERE_SEEK_UNIQ_TABLE; + if( !pParse->nested && !pTrigger && !hasFK && !chngKey && !bReplace ){ + flags |= WHERE_ONEPASS_MULTIROW; + } + pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, flags, iIdxCur); + if( pWInfo==0 ) goto update_cleanup; + + /* A one-pass strategy that might update more than one row may not + ** be used if any column of the index used for the scan is being + ** updated. Otherwise, if there is an index on "b", statements like + ** the following could create an infinite loop: + ** + ** UPDATE t1 SET b=b+1 WHERE b>? + ** + ** Fall back to ONEPASS_OFF if where.c has selected a ONEPASS_MULTI + ** strategy that uses an index for which one or more columns are being + ** updated. */ + eOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); + if( eOnePass==ONEPASS_MULTI ){ + int iCur = aiCurOnePass[1]; + if( iCur>=0 && iCur!=iDataCur && aToOpen[iCur-iBaseCur] ){ + eOnePass = ONEPASS_OFF; + } + assert( iCur!=iDataCur || !HasRowid(pTab) ); + } + + if( HasRowid(pTab) ){ + /* Read the rowid of the current row of the WHERE scan. In ONEPASS_OFF + ** mode, write the rowid into the FIFO. In either of the one-pass modes, + ** leave it in register regOldRowid. */ + sqlite3VdbeAddOp2(v, OP_Rowid, iDataCur, regOldRowid); + if( eOnePass==ONEPASS_OFF ){ + sqlite3VdbeAddOp2(v, OP_RowSetAdd, regRowSet, regOldRowid); + } + }else{ + /* Read the PK of the current row into an array of registers. In + ** ONEPASS_OFF mode, serialize the array into a record and store it in + ** the ephemeral table. Or, in ONEPASS_SINGLE or MULTI mode, change + ** the OP_OpenEphemeral instruction to a Noop (the ephemeral table + ** is not required) and leave the PK fields in the array of registers. */ + for(i=0; iaiColumn[i]>=0 ); + sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur,pPk->aiColumn[i],iPk+i); + } + if( eOnePass ){ + sqlite3VdbeChangeToNoop(v, addrOpen); + nKey = nPk; + regKey = iPk; + }else{ + sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, regKey, + sqlite3IndexAffinityStr(db, pPk), nPk); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iEph, regKey, iPk, nPk); + } + } + + if( eOnePass!=ONEPASS_MULTI ){ + sqlite3WhereEnd(pWInfo); + } + + labelBreak = sqlite3VdbeMakeLabel(v); + if( !isView ){ + int addrOnce = 0; + + /* Open every index that needs updating. */ + if( eOnePass!=ONEPASS_OFF ){ + if( aiCurOnePass[0]>=0 ) aToOpen[aiCurOnePass[0]-iBaseCur] = 0; + if( aiCurOnePass[1]>=0 ) aToOpen[aiCurOnePass[1]-iBaseCur] = 0; + } + + if( eOnePass==ONEPASS_MULTI && (nIdx-(aiCurOnePass[1]>=0))>0 ){ + addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); + } + sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, 0, iBaseCur, aToOpen, + 0, 0); + if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce); + } + + /* Top of the update loop */ + if( eOnePass!=ONEPASS_OFF ){ + if( !isView && aiCurOnePass[0]!=iDataCur && aiCurOnePass[1]!=iDataCur ){ + assert( pPk ); + sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelBreak, regKey, nKey); + VdbeCoverageNeverTaken(v); + } + if( eOnePass==ONEPASS_SINGLE ){ + labelContinue = labelBreak; + }else{ + labelContinue = sqlite3VdbeMakeLabel(v); + } + sqlite3VdbeAddOp2(v, OP_IsNull, pPk ? regKey : regOldRowid, labelBreak); + VdbeCoverageIf(v, pPk==0); + VdbeCoverageIf(v, pPk!=0); + }else if( pPk ){ + labelContinue = sqlite3VdbeMakeLabel(v); + sqlite3VdbeAddOp2(v, OP_Rewind, iEph, labelBreak); VdbeCoverage(v); + addrTop = sqlite3VdbeAddOp2(v, OP_RowData, iEph, regKey); + sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelContinue, regKey, 0); + VdbeCoverage(v); + }else{ + labelContinue = sqlite3VdbeAddOp3(v, OP_RowSetRead, regRowSet, labelBreak, + regOldRowid); + VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue, regOldRowid); + VdbeCoverage(v); + } + + /* If the record number will change, set register regNewRowid to + ** contain the new value. If the record number is not being modified, + ** then regNewRowid is the same register as regOldRowid, which is + ** already populated. */ + assert( chngKey || pTrigger || hasFK || regOldRowid==regNewRowid ); + if( chngRowid ){ + sqlite3ExprCode(pParse, pRowidExpr, regNewRowid); + sqlite3VdbeAddOp1(v, OP_MustBeInt, regNewRowid); VdbeCoverage(v); + } + + /* Compute the old pre-UPDATE content of the row being changed, if that + ** information is needed */ + if( chngPk || hasFK || pTrigger ){ + u32 oldmask = (hasFK ? sqlite3FkOldmask(pParse, pTab) : 0); + oldmask |= sqlite3TriggerColmask(pParse, + pTrigger, pChanges, 0, TRIGGER_BEFORE|TRIGGER_AFTER, pTab, onError + ); + for(i=0; inCol; i++){ + if( oldmask==0xffffffff + || (i<32 && (oldmask & MASKBIT32(i))!=0) + || (pTab->aCol[i].colFlags & COLFLAG_PRIMKEY)!=0 + ){ + testcase( oldmask!=0xffffffff && i==31 ); + sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, regOld+i); + }else{ + sqlite3VdbeAddOp2(v, OP_Null, 0, regOld+i); + } + } + if( chngRowid==0 && pPk==0 ){ + sqlite3VdbeAddOp2(v, OP_Copy, regOldRowid, regNewRowid); + } + } + + /* Populate the array of registers beginning at regNew with the new + ** row data. This array is used to check constants, create the new + ** table and index records, and as the values for any new.* references + ** made by triggers. + ** + ** If there are one or more BEFORE triggers, then do not populate the + ** registers associated with columns that are (a) not modified by + ** this UPDATE statement and (b) not accessed by new.* references. The + ** values for registers not modified by the UPDATE must be reloaded from + ** the database after the BEFORE triggers are fired anyway (as the trigger + ** may have modified them). So not loading those that are not going to + ** be used eliminates some redundant opcodes. + */ + newmask = sqlite3TriggerColmask( + pParse, pTrigger, pChanges, 1, TRIGGER_BEFORE, pTab, onError + ); + for(i=0; inCol; i++){ + if( i==pTab->iPKey ){ + sqlite3VdbeAddOp2(v, OP_Null, 0, regNew+i); + }else{ + j = aXRef[i]; + if( j>=0 ){ + sqlite3ExprCode(pParse, pChanges->a[j].pExpr, regNew+i); + }else if( 0==(tmask&TRIGGER_BEFORE) || i>31 || (newmask & MASKBIT32(i)) ){ + /* This branch loads the value of a column that will not be changed + ** into a register. This is done if there are no BEFORE triggers, or + ** if there are one or more BEFORE triggers that use this value via + ** a new.* reference in a trigger program. + */ + testcase( i==31 ); + testcase( i==32 ); + sqlite3ExprCodeGetColumnToReg(pParse, pTab, i, iDataCur, regNew+i); + }else{ + sqlite3VdbeAddOp2(v, OP_Null, 0, regNew+i); + } + } + } + + /* Fire any BEFORE UPDATE triggers. This happens before constraints are + ** verified. One could argue that this is wrong. + */ + if( tmask&TRIGGER_BEFORE ){ + sqlite3TableAffinity(v, pTab, regNew); + sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges, + TRIGGER_BEFORE, pTab, regOldRowid, onError, labelContinue); + + /* The row-trigger may have deleted the row being updated. In this + ** case, jump to the next row. No updates or AFTER triggers are + ** required. This behavior - what happens when the row being updated + ** is deleted or renamed by a BEFORE trigger - is left undefined in the + ** documentation. + */ + if( pPk ){ + sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelContinue,regKey,nKey); + VdbeCoverage(v); + }else{ + sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue, regOldRowid); + VdbeCoverage(v); + } + + /* If it did not delete it, the row-trigger may still have modified + ** some of the columns of the row being updated. Load the values for + ** all columns not modified by the update statement into their + ** registers in case this has happened. + */ + for(i=0; inCol; i++){ + if( aXRef[i]<0 && i!=pTab->iPKey ){ + sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, regNew+i); + } + } + } + + if( !isView ){ + int addr1 = 0; /* Address of jump instruction */ + + /* Do constraint checks. */ + assert( regOldRowid>0 ); + sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur, + regNewRowid, regOldRowid, chngKey, onError, labelContinue, &bReplace, + aXRef); + + /* Do FK constraint checks. */ + if( hasFK ){ + sqlite3FkCheck(pParse, pTab, regOldRowid, 0, aXRef, chngKey); + } + + /* Delete the index entries associated with the current record. */ + if( bReplace || chngKey ){ + if( pPk ){ + addr1 = sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, 0, regKey, nKey); + }else{ + addr1 = sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, 0, regOldRowid); + } + VdbeCoverageNeverTaken(v); + } + sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, aRegIdx, -1); + + /* If changing the rowid value, or if there are foreign key constraints + ** to process, delete the old record. Otherwise, add a noop OP_Delete + ** to invoke the pre-update hook. + ** + ** That (regNew==regnewRowid+1) is true is also important for the + ** pre-update hook. If the caller invokes preupdate_new(), the returned + ** value is copied from memory cell (regNewRowid+1+iCol), where iCol + ** is the column index supplied by the user. + */ + assert( regNew==regNewRowid+1 ); +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + sqlite3VdbeAddOp3(v, OP_Delete, iDataCur, + OPFLAG_ISUPDATE | ((hasFK || chngKey) ? 0 : OPFLAG_ISNOOP), + regNewRowid + ); + if( eOnePass==ONEPASS_MULTI ){ + assert( hasFK==0 && chngKey==0 ); + sqlite3VdbeChangeP5(v, OPFLAG_SAVEPOSITION); + } + if( !pParse->nested ){ + sqlite3VdbeAppendP4(v, pTab, P4_TABLE); + } +#else + if( hasFK || chngKey ){ + sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, 0); + } +#endif + if( bReplace || chngKey ){ + sqlite3VdbeJumpHere(v, addr1); + } + + if( hasFK ){ + sqlite3FkCheck(pParse, pTab, 0, regNewRowid, aXRef, chngKey); + } + + /* Insert the new index entries and the new record. */ + sqlite3CompleteInsertion( + pParse, pTab, iDataCur, iIdxCur, regNewRowid, aRegIdx, + OPFLAG_ISUPDATE | (eOnePass==ONEPASS_MULTI ? OPFLAG_SAVEPOSITION : 0), + 0, 0 + ); + + /* Do any ON CASCADE, SET NULL or SET DEFAULT operations required to + ** handle rows (possibly in other tables) that refer via a foreign key + ** to the row just updated. */ + if( hasFK ){ + sqlite3FkActions(pParse, pTab, pChanges, regOldRowid, aXRef, chngKey); + } + } + + /* Increment the row counter + */ + if( (db->flags & SQLITE_CountRows) && !pParse->pTriggerTab){ + sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1); + } + + sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges, + TRIGGER_AFTER, pTab, regOldRowid, onError, labelContinue); + + /* Repeat the above with the next record to be updated, until + ** all record selected by the WHERE clause have been updated. + */ + if( eOnePass==ONEPASS_SINGLE ){ + /* Nothing to do at end-of-loop for a single-pass */ + }else if( eOnePass==ONEPASS_MULTI ){ + sqlite3VdbeResolveLabel(v, labelContinue); + sqlite3WhereEnd(pWInfo); + }else if( pPk ){ + sqlite3VdbeResolveLabel(v, labelContinue); + sqlite3VdbeAddOp2(v, OP_Next, iEph, addrTop); VdbeCoverage(v); + }else{ + sqlite3VdbeGoto(v, labelContinue); + } + sqlite3VdbeResolveLabel(v, labelBreak); + + /* Update the sqlite_sequence table by storing the content of the + ** maximum rowid counter values recorded while inserting into + ** autoincrement tables. + */ + if( pParse->nested==0 && pParse->pTriggerTab==0 ){ + sqlite3AutoincrementEnd(pParse); + } + + /* + ** Return the number of rows that were changed. If this routine is + ** generating code because of a call to sqlite3NestedParse(), do not + ** invoke the callback function. + */ + if( (db->flags&SQLITE_CountRows) && !pParse->pTriggerTab && !pParse->nested ){ + sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1); + sqlite3VdbeSetNumCols(v, 1); + sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows updated", SQLITE_STATIC); + } + +update_cleanup: + sqlite3AuthContextPop(&sContext); + sqlite3DbFree(db, aXRef); /* Also frees aRegIdx[] and aToOpen[] */ + sqlite3SrcListDelete(db, pTabList); + sqlite3ExprListDelete(db, pChanges); + sqlite3ExprDelete(db, pWhere); + return; +} +/* Make sure "isView" and other macros defined above are undefined. Otherwise +** they may interfere with compilation of other functions in this file +** (or in another file, if this file becomes part of the amalgamation). */ +#ifdef isView + #undef isView +#endif +#ifdef pTrigger + #undef pTrigger +#endif + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* +** Generate code for an UPDATE of a virtual table. +** +** There are two possible strategies - the default and the special +** "onepass" strategy. Onepass is only used if the virtual table +** implementation indicates that pWhere may match at most one row. +** +** The default strategy is to create an ephemeral table that contains +** for each row to be changed: +** +** (A) The original rowid of that row. +** (B) The revised rowid for the row. +** (C) The content of every column in the row. +** +** Then loop through the contents of this ephemeral table executing a +** VUpdate for each row. When finished, drop the ephemeral table. +** +** The "onepass" strategy does not use an ephemeral table. Instead, it +** stores the same values (A, B and C above) in a register array and +** makes a single invocation of VUpdate. +*/ +static void updateVirtualTable( + Parse *pParse, /* The parsing context */ + SrcList *pSrc, /* The virtual table to be modified */ + Table *pTab, /* The virtual table */ + ExprList *pChanges, /* The columns to change in the UPDATE statement */ + Expr *pRowid, /* Expression used to recompute the rowid */ + int *aXRef, /* Mapping from columns of pTab to entries in pChanges */ + Expr *pWhere, /* WHERE clause of the UPDATE statement */ + int onError /* ON CONFLICT strategy */ +){ + Vdbe *v = pParse->pVdbe; /* Virtual machine under construction */ + int ephemTab; /* Table holding the result of the SELECT */ + int i; /* Loop counter */ + sqlite3 *db = pParse->db; /* Database connection */ + const char *pVTab = (const char*)sqlite3GetVTable(db, pTab); + WhereInfo *pWInfo; + int nArg = 2 + pTab->nCol; /* Number of arguments to VUpdate */ + int regArg; /* First register in VUpdate arg array */ + int regRec; /* Register in which to assemble record */ + int regRowid; /* Register for ephem table rowid */ + int iCsr = pSrc->a[0].iCursor; /* Cursor used for virtual table scan */ + int aDummy[2]; /* Unused arg for sqlite3WhereOkOnePass() */ + int bOnePass; /* True to use onepass strategy */ + int addr; /* Address of OP_OpenEphemeral */ + + /* Allocate nArg registers to martial the arguments to VUpdate. Then + ** create and open the ephemeral table in which the records created from + ** these arguments will be temporarily stored. */ + assert( v ); + ephemTab = pParse->nTab++; + addr= sqlite3VdbeAddOp2(v, OP_OpenEphemeral, ephemTab, nArg); + regArg = pParse->nMem + 1; + pParse->nMem += nArg; + regRec = ++pParse->nMem; + regRowid = ++pParse->nMem; + + /* Start scanning the virtual table */ + pWInfo = sqlite3WhereBegin(pParse, pSrc, pWhere, 0,0,WHERE_ONEPASS_DESIRED,0); + if( pWInfo==0 ) return; + + /* Populate the argument registers. */ + sqlite3VdbeAddOp2(v, OP_Rowid, iCsr, regArg); + if( pRowid ){ + sqlite3ExprCode(pParse, pRowid, regArg+1); + }else{ + sqlite3VdbeAddOp2(v, OP_Rowid, iCsr, regArg+1); + } + for(i=0; inCol; i++){ + if( aXRef[i]>=0 ){ + sqlite3ExprCode(pParse, pChanges->a[aXRef[i]].pExpr, regArg+2+i); + }else{ + sqlite3VdbeAddOp3(v, OP_VColumn, iCsr, i, regArg+2+i); + } + } + + bOnePass = sqlite3WhereOkOnePass(pWInfo, aDummy); + + if( bOnePass ){ + /* If using the onepass strategy, no-op out the OP_OpenEphemeral coded + ** above. Also, if this is a top-level parse (not a trigger), clear the + ** multi-write flag so that the VM does not open a statement journal */ + sqlite3VdbeChangeToNoop(v, addr); + if( sqlite3IsToplevel(pParse) ){ + pParse->isMultiWrite = 0; + } + }else{ + /* Create a record from the argument register contents and insert it into + ** the ephemeral table. */ + sqlite3VdbeAddOp3(v, OP_MakeRecord, regArg, nArg, regRec); + sqlite3VdbeAddOp2(v, OP_NewRowid, ephemTab, regRowid); + sqlite3VdbeAddOp3(v, OP_Insert, ephemTab, regRec, regRowid); + } + + + if( bOnePass==0 ){ + /* End the virtual table scan */ + sqlite3WhereEnd(pWInfo); + + /* Begin scannning through the ephemeral table. */ + addr = sqlite3VdbeAddOp1(v, OP_Rewind, ephemTab); VdbeCoverage(v); + + /* Extract arguments from the current row of the ephemeral table and + ** invoke the VUpdate method. */ + for(i=0; i=2 || iDb==0) ){ + sqlite3VdbeAddOp1(v, OP_Vacuum, iDb); + sqlite3VdbeUsesBtree(v, iDb); + } + return; +} + +/* +** This routine implements the OP_Vacuum opcode of the VDBE. +*/ +SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db, int iDb){ + int rc = SQLITE_OK; /* Return code from service routines */ + Btree *pMain; /* The database being vacuumed */ + Btree *pTemp; /* The temporary database we vacuum into */ + int saved_flags; /* Saved value of the db->flags */ + int saved_nChange; /* Saved value of db->nChange */ + int saved_nTotalChange; /* Saved value of db->nTotalChange */ + u8 saved_mTrace; /* Saved trace settings */ + Db *pDb = 0; /* Database to detach at end of vacuum */ + int isMemDb; /* True if vacuuming a :memory: database */ + int nRes; /* Bytes of reserved space at the end of each page */ + int nDb; /* Number of attached databases */ + const char *zDbMain; /* Schema name of database to vacuum */ + + if( !db->autoCommit ){ + sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction"); + return SQLITE_ERROR; + } + if( db->nVdbeActive>1 ){ + sqlite3SetString(pzErrMsg, db,"cannot VACUUM - SQL statements in progress"); + return SQLITE_ERROR; + } + + /* Save the current value of the database flags so that it can be + ** restored before returning. Then set the writable-schema flag, and + ** disable CHECK and foreign key constraints. */ + saved_flags = db->flags; + saved_nChange = db->nChange; + saved_nTotalChange = db->nTotalChange; + saved_mTrace = db->mTrace; + db->flags |= (SQLITE_WriteSchema | SQLITE_IgnoreChecks + | SQLITE_PreferBuiltin | SQLITE_Vacuum); + db->flags &= ~(SQLITE_ForeignKeys | SQLITE_ReverseOrder | SQLITE_CountRows); + db->mTrace = 0; + + zDbMain = db->aDb[iDb].zDbSName; + pMain = db->aDb[iDb].pBt; + isMemDb = sqlite3PagerIsMemdb(sqlite3BtreePager(pMain)); + + /* Attach the temporary database as 'vacuum_db'. The synchronous pragma + ** can be set to 'off' for this file, as it is not recovered if a crash + ** occurs anyway. The integrity of the database is maintained by a + ** (possibly synchronous) transaction opened on the main database before + ** sqlite3BtreeCopyFile() is called. + ** + ** An optimisation would be to use a non-journaled pager. + ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but + ** that actually made the VACUUM run slower. Very little journalling + ** actually occurs when doing a vacuum since the vacuum_db is initially + ** empty. Only the journal header is written. Apparently it takes more + ** time to parse and run the PRAGMA to turn journalling off than it does + ** to write the journal header file. + */ + nDb = db->nDb; + rc = execSql(db, pzErrMsg, "ATTACH''AS vacuum_db"); + if( rc!=SQLITE_OK ) goto end_of_vacuum; + assert( (db->nDb-1)==nDb ); + pDb = &db->aDb[nDb]; + assert( strcmp(pDb->zDbSName,"vacuum_db")==0 ); + pTemp = pDb->pBt; + + /* The call to execSql() to attach the temp database has left the file + ** locked (as there was more than one active statement when the transaction + ** to read the schema was concluded. Unlock it here so that this doesn't + ** cause problems for the call to BtreeSetPageSize() below. */ + sqlite3BtreeCommit(pTemp); + + nRes = sqlite3BtreeGetOptimalReserve(pMain); + + /* A VACUUM cannot change the pagesize of an encrypted database. */ +#ifdef SQLITE_HAS_CODEC + if( db->nextPagesize ){ + extern void sqlite3CodecGetKey(sqlite3*, int, void**, int*); + int nKey; + char *zKey; + sqlite3CodecGetKey(db, 0, (void**)&zKey, &nKey); + if( nKey ) db->nextPagesize = 0; + } +#endif + + sqlite3BtreeSetCacheSize(pTemp, db->aDb[iDb].pSchema->cache_size); + sqlite3BtreeSetSpillSize(pTemp, sqlite3BtreeSetSpillSize(pMain,0)); + sqlite3BtreeSetPagerFlags(pTemp, PAGER_SYNCHRONOUS_OFF|PAGER_CACHESPILL); + + /* Begin a transaction and take an exclusive lock on the main database + ** file. This is done before the sqlite3BtreeGetPageSize(pMain) call below, + ** to ensure that we do not try to change the page-size on a WAL database. + */ + rc = execSql(db, pzErrMsg, "BEGIN"); + if( rc!=SQLITE_OK ) goto end_of_vacuum; + rc = sqlite3BtreeBeginTrans(pMain, 2); + if( rc!=SQLITE_OK ) goto end_of_vacuum; + + /* Do not attempt to change the page size for a WAL database */ + if( sqlite3PagerGetJournalMode(sqlite3BtreePager(pMain)) + ==PAGER_JOURNALMODE_WAL ){ + db->nextPagesize = 0; + } + + if( sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain), nRes, 0) + || (!isMemDb && sqlite3BtreeSetPageSize(pTemp, db->nextPagesize, nRes, 0)) + || NEVER(db->mallocFailed) + ){ + rc = SQLITE_NOMEM_BKPT; + goto end_of_vacuum; + } + +#ifndef SQLITE_OMIT_AUTOVACUUM + sqlite3BtreeSetAutoVacuum(pTemp, db->nextAutovac>=0 ? db->nextAutovac : + sqlite3BtreeGetAutoVacuum(pMain)); +#endif + + /* Query the schema of the main database. Create a mirror schema + ** in the temporary database. + */ + db->init.iDb = nDb; /* force new CREATE statements into vacuum_db */ + rc = execSqlF(db, pzErrMsg, + "SELECT sql FROM \"%w\".sqlite_master" + " WHERE type='table'AND name<>'sqlite_sequence'" + " AND coalesce(rootpage,1)>0", + zDbMain + ); + if( rc!=SQLITE_OK ) goto end_of_vacuum; + rc = execSqlF(db, pzErrMsg, + "SELECT sql FROM \"%w\".sqlite_master" + " WHERE type='index' AND length(sql)>10", + zDbMain + ); + if( rc!=SQLITE_OK ) goto end_of_vacuum; + db->init.iDb = 0; + + /* Loop through the tables in the main database. For each, do + ** an "INSERT INTO vacuum_db.xxx SELECT * FROM main.xxx;" to copy + ** the contents to the temporary database. + */ + rc = execSqlF(db, pzErrMsg, + "SELECT'INSERT INTO vacuum_db.'||quote(name)" + "||' SELECT*FROM\"%w\".'||quote(name)" + "FROM vacuum_db.sqlite_master " + "WHERE type='table'AND coalesce(rootpage,1)>0", + zDbMain + ); + assert( (db->flags & SQLITE_Vacuum)!=0 ); + db->flags &= ~SQLITE_Vacuum; + if( rc!=SQLITE_OK ) goto end_of_vacuum; + + /* Copy the triggers, views, and virtual tables from the main database + ** over to the temporary database. None of these objects has any + ** associated storage, so all we have to do is copy their entries + ** from the SQLITE_MASTER table. + */ + rc = execSqlF(db, pzErrMsg, + "INSERT INTO vacuum_db.sqlite_master" + " SELECT*FROM \"%w\".sqlite_master" + " WHERE type IN('view','trigger')" + " OR(type='table'AND rootpage=0)", + zDbMain + ); + if( rc ) goto end_of_vacuum; + + /* At this point, there is a write transaction open on both the + ** vacuum database and the main database. Assuming no error occurs, + ** both transactions are closed by this block - the main database + ** transaction by sqlite3BtreeCopyFile() and the other by an explicit + ** call to sqlite3BtreeCommit(). + */ + { + u32 meta; + int i; + + /* This array determines which meta meta values are preserved in the + ** vacuum. Even entries are the meta value number and odd entries + ** are an increment to apply to the meta value after the vacuum. + ** The increment is used to increase the schema cookie so that other + ** connections to the same database will know to reread the schema. + */ + static const unsigned char aCopy[] = { + BTREE_SCHEMA_VERSION, 1, /* Add one to the old schema cookie */ + BTREE_DEFAULT_CACHE_SIZE, 0, /* Preserve the default page cache size */ + BTREE_TEXT_ENCODING, 0, /* Preserve the text encoding */ + BTREE_USER_VERSION, 0, /* Preserve the user version */ + BTREE_APPLICATION_ID, 0, /* Preserve the application id */ + }; + + assert( 1==sqlite3BtreeIsInTrans(pTemp) ); + assert( 1==sqlite3BtreeIsInTrans(pMain) ); + + /* Copy Btree meta values */ + for(i=0; iflags */ + db->init.iDb = 0; + db->flags = saved_flags; + db->nChange = saved_nChange; + db->nTotalChange = saved_nTotalChange; + db->mTrace = saved_mTrace; + sqlite3BtreeSetPageSize(pMain, -1, -1, 1); + + /* Currently there is an SQL level transaction open on the vacuum + ** database. No locks are held on any other files (since the main file + ** was committed at the btree level). So it safe to end the transaction + ** by manually setting the autoCommit flag to true and detaching the + ** vacuum database. The vacuum_db journal file is deleted when the pager + ** is closed by the DETACH. + */ + db->autoCommit = 1; + + if( pDb ){ + sqlite3BtreeClose(pDb->pBt); + pDb->pBt = 0; + pDb->pSchema = 0; + } + + /* This both clears the schemas and reduces the size of the db->aDb[] + ** array. */ + sqlite3ResetAllSchemasOfConnection(db); + + return rc; +} + +#endif /* SQLITE_OMIT_VACUUM && SQLITE_OMIT_ATTACH */ + +/************** End of vacuum.c **********************************************/ +/************** Begin file vtab.c ********************************************/ +/* +** 2006 June 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code used to help implement virtual tables. +*/ +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* #include "sqliteInt.h" */ + +/* +** Before a virtual table xCreate() or xConnect() method is invoked, the +** sqlite3.pVtabCtx member variable is set to point to an instance of +** this struct allocated on the stack. It is used by the implementation of +** the sqlite3_declare_vtab() and sqlite3_vtab_config() APIs, both of which +** are invoked only from within xCreate and xConnect methods. +*/ +struct VtabCtx { + VTable *pVTable; /* The virtual table being constructed */ + Table *pTab; /* The Table object to which the virtual table belongs */ + VtabCtx *pPrior; /* Parent context (if any) */ + int bDeclared; /* True after sqlite3_declare_vtab() is called */ +}; + +/* +** Construct and install a Module object for a virtual table. When this +** routine is called, it is guaranteed that all appropriate locks are held +** and the module is not already part of the connection. +*/ +SQLITE_PRIVATE Module *sqlite3VtabCreateModule( + sqlite3 *db, /* Database in which module is registered */ + const char *zName, /* Name assigned to this module */ + const sqlite3_module *pModule, /* The definition of the module */ + void *pAux, /* Context pointer for xCreate/xConnect */ + void (*xDestroy)(void *) /* Module destructor function */ +){ + Module *pMod; + int nName = sqlite3Strlen30(zName); + pMod = (Module *)sqlite3DbMallocRawNN(db, sizeof(Module) + nName + 1); + if( pMod ){ + Module *pDel; + char *zCopy = (char *)(&pMod[1]); + memcpy(zCopy, zName, nName+1); + pMod->zName = zCopy; + pMod->pModule = pModule; + pMod->pAux = pAux; + pMod->xDestroy = xDestroy; + pMod->pEpoTab = 0; + pDel = (Module *)sqlite3HashInsert(&db->aModule,zCopy,(void*)pMod); + assert( pDel==0 || pDel==pMod ); + if( pDel ){ + sqlite3OomFault(db); + sqlite3DbFree(db, pDel); + pMod = 0; + } + } + return pMod; +} + +/* +** The actual function that does the work of creating a new module. +** This function implements the sqlite3_create_module() and +** sqlite3_create_module_v2() interfaces. +*/ +static int createModule( + sqlite3 *db, /* Database in which module is registered */ + const char *zName, /* Name assigned to this module */ + const sqlite3_module *pModule, /* The definition of the module */ + void *pAux, /* Context pointer for xCreate/xConnect */ + void (*xDestroy)(void *) /* Module destructor function */ +){ + int rc = SQLITE_OK; + + sqlite3_mutex_enter(db->mutex); + if( sqlite3HashFind(&db->aModule, zName) ){ + rc = SQLITE_MISUSE_BKPT; + }else{ + (void)sqlite3VtabCreateModule(db, zName, pModule, pAux, xDestroy); + } + rc = sqlite3ApiExit(db, rc); + if( rc!=SQLITE_OK && xDestroy ) xDestroy(pAux); + sqlite3_mutex_leave(db->mutex); + return rc; +} + + +/* +** External API function used to create a new virtual-table module. +*/ +SQLITE_API int sqlite3_create_module( + sqlite3 *db, /* Database in which module is registered */ + const char *zName, /* Name assigned to this module */ + const sqlite3_module *pModule, /* The definition of the module */ + void *pAux /* Context pointer for xCreate/xConnect */ +){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT; +#endif + return createModule(db, zName, pModule, pAux, 0); +} + +/* +** External API function used to create a new virtual-table module. +*/ +SQLITE_API int sqlite3_create_module_v2( + sqlite3 *db, /* Database in which module is registered */ + const char *zName, /* Name assigned to this module */ + const sqlite3_module *pModule, /* The definition of the module */ + void *pAux, /* Context pointer for xCreate/xConnect */ + void (*xDestroy)(void *) /* Module destructor function */ +){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT; +#endif + return createModule(db, zName, pModule, pAux, xDestroy); +} + +/* +** Lock the virtual table so that it cannot be disconnected. +** Locks nest. Every lock should have a corresponding unlock. +** If an unlock is omitted, resources leaks will occur. +** +** If a disconnect is attempted while a virtual table is locked, +** the disconnect is deferred until all locks have been removed. +*/ +SQLITE_PRIVATE void sqlite3VtabLock(VTable *pVTab){ + pVTab->nRef++; +} + + +/* +** pTab is a pointer to a Table structure representing a virtual-table. +** Return a pointer to the VTable object used by connection db to access +** this virtual-table, if one has been created, or NULL otherwise. +*/ +SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3 *db, Table *pTab){ + VTable *pVtab; + assert( IsVirtual(pTab) ); + for(pVtab=pTab->pVTable; pVtab && pVtab->db!=db; pVtab=pVtab->pNext); + return pVtab; +} + +/* +** Decrement the ref-count on a virtual table object. When the ref-count +** reaches zero, call the xDisconnect() method to delete the object. +*/ +SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *pVTab){ + sqlite3 *db = pVTab->db; + + assert( db ); + assert( pVTab->nRef>0 ); + assert( db->magic==SQLITE_MAGIC_OPEN || db->magic==SQLITE_MAGIC_ZOMBIE ); + + pVTab->nRef--; + if( pVTab->nRef==0 ){ + sqlite3_vtab *p = pVTab->pVtab; + if( p ){ + p->pModule->xDisconnect(p); + } + sqlite3DbFree(db, pVTab); + } +} + +/* +** Table p is a virtual table. This function moves all elements in the +** p->pVTable list to the sqlite3.pDisconnect lists of their associated +** database connections to be disconnected at the next opportunity. +** Except, if argument db is not NULL, then the entry associated with +** connection db is left in the p->pVTable list. +*/ +static VTable *vtabDisconnectAll(sqlite3 *db, Table *p){ + VTable *pRet = 0; + VTable *pVTable = p->pVTable; + p->pVTable = 0; + + /* Assert that the mutex (if any) associated with the BtShared database + ** that contains table p is held by the caller. See header comments + ** above function sqlite3VtabUnlockList() for an explanation of why + ** this makes it safe to access the sqlite3.pDisconnect list of any + ** database connection that may have an entry in the p->pVTable list. + */ + assert( db==0 || sqlite3SchemaMutexHeld(db, 0, p->pSchema) ); + + while( pVTable ){ + sqlite3 *db2 = pVTable->db; + VTable *pNext = pVTable->pNext; + assert( db2 ); + if( db2==db ){ + pRet = pVTable; + p->pVTable = pRet; + pRet->pNext = 0; + }else{ + pVTable->pNext = db2->pDisconnect; + db2->pDisconnect = pVTable; + } + pVTable = pNext; + } + + assert( !db || pRet ); + return pRet; +} + +/* +** Table *p is a virtual table. This function removes the VTable object +** for table *p associated with database connection db from the linked +** list in p->pVTab. It also decrements the VTable ref count. This is +** used when closing database connection db to free all of its VTable +** objects without disturbing the rest of the Schema object (which may +** be being used by other shared-cache connections). +*/ +SQLITE_PRIVATE void sqlite3VtabDisconnect(sqlite3 *db, Table *p){ + VTable **ppVTab; + + assert( IsVirtual(p) ); + assert( sqlite3BtreeHoldsAllMutexes(db) ); + assert( sqlite3_mutex_held(db->mutex) ); + + for(ppVTab=&p->pVTable; *ppVTab; ppVTab=&(*ppVTab)->pNext){ + if( (*ppVTab)->db==db ){ + VTable *pVTab = *ppVTab; + *ppVTab = pVTab->pNext; + sqlite3VtabUnlock(pVTab); + break; + } + } +} + + +/* +** Disconnect all the virtual table objects in the sqlite3.pDisconnect list. +** +** This function may only be called when the mutexes associated with all +** shared b-tree databases opened using connection db are held by the +** caller. This is done to protect the sqlite3.pDisconnect list. The +** sqlite3.pDisconnect list is accessed only as follows: +** +** 1) By this function. In this case, all BtShared mutexes and the mutex +** associated with the database handle itself must be held. +** +** 2) By function vtabDisconnectAll(), when it adds a VTable entry to +** the sqlite3.pDisconnect list. In this case either the BtShared mutex +** associated with the database the virtual table is stored in is held +** or, if the virtual table is stored in a non-sharable database, then +** the database handle mutex is held. +** +** As a result, a sqlite3.pDisconnect cannot be accessed simultaneously +** by multiple threads. It is thread-safe. +*/ +SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){ + VTable *p = db->pDisconnect; + db->pDisconnect = 0; + + assert( sqlite3BtreeHoldsAllMutexes(db) ); + assert( sqlite3_mutex_held(db->mutex) ); + + if( p ){ + sqlite3ExpirePreparedStatements(db); + do { + VTable *pNext = p->pNext; + sqlite3VtabUnlock(p); + p = pNext; + }while( p ); + } +} + +/* +** Clear any and all virtual-table information from the Table record. +** This routine is called, for example, just before deleting the Table +** record. +** +** Since it is a virtual-table, the Table structure contains a pointer +** to the head of a linked list of VTable structures. Each VTable +** structure is associated with a single sqlite3* user of the schema. +** The reference count of the VTable structure associated with database +** connection db is decremented immediately (which may lead to the +** structure being xDisconnected and free). Any other VTable structures +** in the list are moved to the sqlite3.pDisconnect list of the associated +** database connection. +*/ +SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table *p){ + if( !db || db->pnBytesFreed==0 ) vtabDisconnectAll(0, p); + if( p->azModuleArg ){ + int i; + for(i=0; inModuleArg; i++){ + if( i!=1 ) sqlite3DbFree(db, p->azModuleArg[i]); + } + sqlite3DbFree(db, p->azModuleArg); + } +} + +/* +** Add a new module argument to pTable->azModuleArg[]. +** The string is not copied - the pointer is stored. The +** string will be freed automatically when the table is +** deleted. +*/ +static void addModuleArgument(sqlite3 *db, Table *pTable, char *zArg){ + int nBytes = sizeof(char *)*(2+pTable->nModuleArg); + char **azModuleArg; + azModuleArg = sqlite3DbRealloc(db, pTable->azModuleArg, nBytes); + if( azModuleArg==0 ){ + sqlite3DbFree(db, zArg); + }else{ + int i = pTable->nModuleArg++; + azModuleArg[i] = zArg; + azModuleArg[i+1] = 0; + pTable->azModuleArg = azModuleArg; + } +} + +/* +** The parser calls this routine when it first sees a CREATE VIRTUAL TABLE +** statement. The module name has been parsed, but the optional list +** of parameters that follow the module name are still pending. +*/ +SQLITE_PRIVATE void sqlite3VtabBeginParse( + Parse *pParse, /* Parsing context */ + Token *pName1, /* Name of new table, or database name */ + Token *pName2, /* Name of new table or NULL */ + Token *pModuleName, /* Name of the module for the virtual table */ + int ifNotExists /* No error if the table already exists */ +){ + int iDb; /* The database the table is being created in */ + Table *pTable; /* The new virtual table */ + sqlite3 *db; /* Database connection */ + + sqlite3StartTable(pParse, pName1, pName2, 0, 0, 1, ifNotExists); + pTable = pParse->pNewTable; + if( pTable==0 ) return; + assert( 0==pTable->pIndex ); + + db = pParse->db; + iDb = sqlite3SchemaToIndex(db, pTable->pSchema); + assert( iDb>=0 ); + + pTable->tabFlags |= TF_Virtual; + pTable->nModuleArg = 0; + addModuleArgument(db, pTable, sqlite3NameFromToken(db, pModuleName)); + addModuleArgument(db, pTable, 0); + addModuleArgument(db, pTable, sqlite3DbStrDup(db, pTable->zName)); + assert( (pParse->sNameToken.z==pName2->z && pName2->z!=0) + || (pParse->sNameToken.z==pName1->z && pName2->z==0) + ); + pParse->sNameToken.n = (int)( + &pModuleName->z[pModuleName->n] - pParse->sNameToken.z + ); + +#ifndef SQLITE_OMIT_AUTHORIZATION + /* Creating a virtual table invokes the authorization callback twice. + ** The first invocation, to obtain permission to INSERT a row into the + ** sqlite_master table, has already been made by sqlite3StartTable(). + ** The second call, to obtain permission to create the table, is made now. + */ + if( pTable->azModuleArg ){ + sqlite3AuthCheck(pParse, SQLITE_CREATE_VTABLE, pTable->zName, + pTable->azModuleArg[0], pParse->db->aDb[iDb].zDbSName); + } +#endif +} + +/* +** This routine takes the module argument that has been accumulating +** in pParse->zArg[] and appends it to the list of arguments on the +** virtual table currently under construction in pParse->pTable. +*/ +static void addArgumentToVtab(Parse *pParse){ + if( pParse->sArg.z && pParse->pNewTable ){ + const char *z = (const char*)pParse->sArg.z; + int n = pParse->sArg.n; + sqlite3 *db = pParse->db; + addModuleArgument(db, pParse->pNewTable, sqlite3DbStrNDup(db, z, n)); + } +} + +/* +** The parser calls this routine after the CREATE VIRTUAL TABLE statement +** has been completely parsed. +*/ +SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ + Table *pTab = pParse->pNewTable; /* The table being constructed */ + sqlite3 *db = pParse->db; /* The database connection */ + + if( pTab==0 ) return; + addArgumentToVtab(pParse); + pParse->sArg.z = 0; + if( pTab->nModuleArg<1 ) return; + + /* If the CREATE VIRTUAL TABLE statement is being entered for the + ** first time (in other words if the virtual table is actually being + ** created now instead of just being read out of sqlite_master) then + ** do additional initialization work and store the statement text + ** in the sqlite_master table. + */ + if( !db->init.busy ){ + char *zStmt; + char *zWhere; + int iDb; + int iReg; + Vdbe *v; + + /* Compute the complete text of the CREATE VIRTUAL TABLE statement */ + if( pEnd ){ + pParse->sNameToken.n = (int)(pEnd->z - pParse->sNameToken.z) + pEnd->n; + } + zStmt = sqlite3MPrintf(db, "CREATE VIRTUAL TABLE %T", &pParse->sNameToken); + + /* A slot for the record has already been allocated in the + ** SQLITE_MASTER table. We just need to update that slot with all + ** the information we've collected. + ** + ** The VM register number pParse->regRowid holds the rowid of an + ** entry in the sqlite_master table tht was created for this vtab + ** by sqlite3StartTable(). + */ + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + sqlite3NestedParse(pParse, + "UPDATE %Q.%s " + "SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q " + "WHERE rowid=#%d", + db->aDb[iDb].zDbSName, MASTER_NAME, + pTab->zName, + pTab->zName, + zStmt, + pParse->regRowid + ); + sqlite3DbFree(db, zStmt); + v = sqlite3GetVdbe(pParse); + sqlite3ChangeCookie(pParse, iDb); + + sqlite3VdbeAddOp0(v, OP_Expire); + zWhere = sqlite3MPrintf(db, "name='%q' AND type='table'", pTab->zName); + sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere); + + iReg = ++pParse->nMem; + sqlite3VdbeLoadString(v, iReg, pTab->zName); + sqlite3VdbeAddOp2(v, OP_VCreate, iDb, iReg); + } + + /* If we are rereading the sqlite_master table create the in-memory + ** record of the table. The xConnect() method is not called until + ** the first time the virtual table is used in an SQL statement. This + ** allows a schema that contains virtual tables to be loaded before + ** the required virtual table implementations are registered. */ + else { + Table *pOld; + Schema *pSchema = pTab->pSchema; + const char *zName = pTab->zName; + assert( sqlite3SchemaMutexHeld(db, 0, pSchema) ); + pOld = sqlite3HashInsert(&pSchema->tblHash, zName, pTab); + if( pOld ){ + sqlite3OomFault(db); + assert( pTab==pOld ); /* Malloc must have failed inside HashInsert() */ + return; + } + pParse->pNewTable = 0; + } +} + +/* +** The parser calls this routine when it sees the first token +** of an argument to the module name in a CREATE VIRTUAL TABLE statement. +*/ +SQLITE_PRIVATE void sqlite3VtabArgInit(Parse *pParse){ + addArgumentToVtab(pParse); + pParse->sArg.z = 0; + pParse->sArg.n = 0; +} + +/* +** The parser calls this routine for each token after the first token +** in an argument to the module name in a CREATE VIRTUAL TABLE statement. +*/ +SQLITE_PRIVATE void sqlite3VtabArgExtend(Parse *pParse, Token *p){ + Token *pArg = &pParse->sArg; + if( pArg->z==0 ){ + pArg->z = p->z; + pArg->n = p->n; + }else{ + assert(pArg->z <= p->z); + pArg->n = (int)(&p->z[p->n] - pArg->z); + } +} + +/* +** Invoke a virtual table constructor (either xCreate or xConnect). The +** pointer to the function to invoke is passed as the fourth parameter +** to this procedure. +*/ +static int vtabCallConstructor( + sqlite3 *db, + Table *pTab, + Module *pMod, + int (*xConstruct)(sqlite3*,void*,int,const char*const*,sqlite3_vtab**,char**), + char **pzErr +){ + VtabCtx sCtx; + VTable *pVTable; + int rc; + const char *const*azArg = (const char *const*)pTab->azModuleArg; + int nArg = pTab->nModuleArg; + char *zErr = 0; + char *zModuleName; + int iDb; + VtabCtx *pCtx; + + /* Check that the virtual-table is not already being initialized */ + for(pCtx=db->pVtabCtx; pCtx; pCtx=pCtx->pPrior){ + if( pCtx->pTab==pTab ){ + *pzErr = sqlite3MPrintf(db, + "vtable constructor called recursively: %s", pTab->zName + ); + return SQLITE_LOCKED; + } + } + + zModuleName = sqlite3MPrintf(db, "%s", pTab->zName); + if( !zModuleName ){ + return SQLITE_NOMEM_BKPT; + } + + pVTable = sqlite3DbMallocZero(db, sizeof(VTable)); + if( !pVTable ){ + sqlite3DbFree(db, zModuleName); + return SQLITE_NOMEM_BKPT; + } + pVTable->db = db; + pVTable->pMod = pMod; + + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + pTab->azModuleArg[1] = db->aDb[iDb].zDbSName; + + /* Invoke the virtual table constructor */ + assert( &db->pVtabCtx ); + assert( xConstruct ); + sCtx.pTab = pTab; + sCtx.pVTable = pVTable; + sCtx.pPrior = db->pVtabCtx; + sCtx.bDeclared = 0; + db->pVtabCtx = &sCtx; + rc = xConstruct(db, pMod->pAux, nArg, azArg, &pVTable->pVtab, &zErr); + db->pVtabCtx = sCtx.pPrior; + if( rc==SQLITE_NOMEM ) sqlite3OomFault(db); + assert( sCtx.pTab==pTab ); + + if( SQLITE_OK!=rc ){ + if( zErr==0 ){ + *pzErr = sqlite3MPrintf(db, "vtable constructor failed: %s", zModuleName); + }else { + *pzErr = sqlite3MPrintf(db, "%s", zErr); + sqlite3_free(zErr); + } + sqlite3DbFree(db, pVTable); + }else if( ALWAYS(pVTable->pVtab) ){ + /* Justification of ALWAYS(): A correct vtab constructor must allocate + ** the sqlite3_vtab object if successful. */ + memset(pVTable->pVtab, 0, sizeof(pVTable->pVtab[0])); + pVTable->pVtab->pModule = pMod->pModule; + pVTable->nRef = 1; + if( sCtx.bDeclared==0 ){ + const char *zFormat = "vtable constructor did not declare schema: %s"; + *pzErr = sqlite3MPrintf(db, zFormat, pTab->zName); + sqlite3VtabUnlock(pVTable); + rc = SQLITE_ERROR; + }else{ + int iCol; + u8 oooHidden = 0; + /* If everything went according to plan, link the new VTable structure + ** into the linked list headed by pTab->pVTable. Then loop through the + ** columns of the table to see if any of them contain the token "hidden". + ** If so, set the Column COLFLAG_HIDDEN flag and remove the token from + ** the type string. */ + pVTable->pNext = pTab->pVTable; + pTab->pVTable = pVTable; + + for(iCol=0; iColnCol; iCol++){ + char *zType = sqlite3ColumnType(&pTab->aCol[iCol], ""); + int nType; + int i = 0; + nType = sqlite3Strlen30(zType); + for(i=0; i0 ){ + assert(zType[i-1]==' '); + zType[i-1] = '\0'; + } + pTab->aCol[iCol].colFlags |= COLFLAG_HIDDEN; + oooHidden = TF_OOOHidden; + }else{ + pTab->tabFlags |= oooHidden; + } + } + } + } + + sqlite3DbFree(db, zModuleName); + return rc; +} + +/* +** This function is invoked by the parser to call the xConnect() method +** of the virtual table pTab. If an error occurs, an error code is returned +** and an error left in pParse. +** +** This call is a no-op if table pTab is not a virtual table. +*/ +SQLITE_PRIVATE int sqlite3VtabCallConnect(Parse *pParse, Table *pTab){ + sqlite3 *db = pParse->db; + const char *zMod; + Module *pMod; + int rc; + + assert( pTab ); + if( (pTab->tabFlags & TF_Virtual)==0 || sqlite3GetVTable(db, pTab) ){ + return SQLITE_OK; + } + + /* Locate the required virtual table module */ + zMod = pTab->azModuleArg[0]; + pMod = (Module*)sqlite3HashFind(&db->aModule, zMod); + + if( !pMod ){ + const char *zModule = pTab->azModuleArg[0]; + sqlite3ErrorMsg(pParse, "no such module: %s", zModule); + rc = SQLITE_ERROR; + }else{ + char *zErr = 0; + rc = vtabCallConstructor(db, pTab, pMod, pMod->pModule->xConnect, &zErr); + if( rc!=SQLITE_OK ){ + sqlite3ErrorMsg(pParse, "%s", zErr); + } + sqlite3DbFree(db, zErr); + } + + return rc; +} +/* +** Grow the db->aVTrans[] array so that there is room for at least one +** more v-table. Return SQLITE_NOMEM if a malloc fails, or SQLITE_OK otherwise. +*/ +static int growVTrans(sqlite3 *db){ + const int ARRAY_INCR = 5; + + /* Grow the sqlite3.aVTrans array if required */ + if( (db->nVTrans%ARRAY_INCR)==0 ){ + VTable **aVTrans; + int nBytes = sizeof(sqlite3_vtab *) * (db->nVTrans + ARRAY_INCR); + aVTrans = sqlite3DbRealloc(db, (void *)db->aVTrans, nBytes); + if( !aVTrans ){ + return SQLITE_NOMEM_BKPT; + } + memset(&aVTrans[db->nVTrans], 0, sizeof(sqlite3_vtab *)*ARRAY_INCR); + db->aVTrans = aVTrans; + } + + return SQLITE_OK; +} + +/* +** Add the virtual table pVTab to the array sqlite3.aVTrans[]. Space should +** have already been reserved using growVTrans(). +*/ +static void addToVTrans(sqlite3 *db, VTable *pVTab){ + /* Add pVtab to the end of sqlite3.aVTrans */ + db->aVTrans[db->nVTrans++] = pVTab; + sqlite3VtabLock(pVTab); +} + +/* +** This function is invoked by the vdbe to call the xCreate method +** of the virtual table named zTab in database iDb. +** +** If an error occurs, *pzErr is set to point to an English language +** description of the error and an SQLITE_XXX error code is returned. +** In this case the caller must call sqlite3DbFree(db, ) on *pzErr. +*/ +SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3 *db, int iDb, const char *zTab, char **pzErr){ + int rc = SQLITE_OK; + Table *pTab; + Module *pMod; + const char *zMod; + + pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zDbSName); + assert( pTab && (pTab->tabFlags & TF_Virtual)!=0 && !pTab->pVTable ); + + /* Locate the required virtual table module */ + zMod = pTab->azModuleArg[0]; + pMod = (Module*)sqlite3HashFind(&db->aModule, zMod); + + /* If the module has been registered and includes a Create method, + ** invoke it now. If the module has not been registered, return an + ** error. Otherwise, do nothing. + */ + if( pMod==0 || pMod->pModule->xCreate==0 || pMod->pModule->xDestroy==0 ){ + *pzErr = sqlite3MPrintf(db, "no such module: %s", zMod); + rc = SQLITE_ERROR; + }else{ + rc = vtabCallConstructor(db, pTab, pMod, pMod->pModule->xCreate, pzErr); + } + + /* Justification of ALWAYS(): The xConstructor method is required to + ** create a valid sqlite3_vtab if it returns SQLITE_OK. */ + if( rc==SQLITE_OK && ALWAYS(sqlite3GetVTable(db, pTab)) ){ + rc = growVTrans(db); + if( rc==SQLITE_OK ){ + addToVTrans(db, sqlite3GetVTable(db, pTab)); + } + } + + return rc; +} + +/* +** This function is used to set the schema of a virtual table. It is only +** valid to call this function from within the xCreate() or xConnect() of a +** virtual table module. +*/ +SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ + VtabCtx *pCtx; + Parse *pParse; + int rc = SQLITE_OK; + Table *pTab; + char *zErr = 0; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) || zCreateTable==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif + sqlite3_mutex_enter(db->mutex); + pCtx = db->pVtabCtx; + if( !pCtx || pCtx->bDeclared ){ + sqlite3Error(db, SQLITE_MISUSE); + sqlite3_mutex_leave(db->mutex); + return SQLITE_MISUSE_BKPT; + } + pTab = pCtx->pTab; + assert( (pTab->tabFlags & TF_Virtual)!=0 ); + + pParse = sqlite3StackAllocZero(db, sizeof(*pParse)); + if( pParse==0 ){ + rc = SQLITE_NOMEM_BKPT; + }else{ + pParse->declareVtab = 1; + pParse->db = db; + pParse->nQueryLoop = 1; + + if( SQLITE_OK==sqlite3RunParser(pParse, zCreateTable, &zErr) + && pParse->pNewTable + && !db->mallocFailed + && !pParse->pNewTable->pSelect + && (pParse->pNewTable->tabFlags & TF_Virtual)==0 + ){ + if( !pTab->aCol ){ + Table *pNew = pParse->pNewTable; + Index *pIdx; + pTab->aCol = pNew->aCol; + pTab->nCol = pNew->nCol; + pTab->tabFlags |= pNew->tabFlags & (TF_WithoutRowid|TF_NoVisibleRowid); + pNew->nCol = 0; + pNew->aCol = 0; + assert( pTab->pIndex==0 ); + if( !HasRowid(pNew) && pCtx->pVTable->pMod->pModule->xUpdate!=0 ){ + rc = SQLITE_ERROR; + } + pIdx = pNew->pIndex; + if( pIdx ){ + assert( pIdx->pNext==0 ); + pTab->pIndex = pIdx; + pNew->pIndex = 0; + pIdx->pTable = pTab; + } + } + pCtx->bDeclared = 1; + }else{ + sqlite3ErrorWithMsg(db, SQLITE_ERROR, (zErr ? "%s" : 0), zErr); + sqlite3DbFree(db, zErr); + rc = SQLITE_ERROR; + } + pParse->declareVtab = 0; + + if( pParse->pVdbe ){ + sqlite3VdbeFinalize(pParse->pVdbe); + } + sqlite3DeleteTable(db, pParse->pNewTable); + sqlite3ParserReset(pParse); + sqlite3StackFree(db, pParse); + } + + assert( (rc&0xff)==rc ); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** This function is invoked by the vdbe to call the xDestroy method +** of the virtual table named zTab in database iDb. This occurs +** when a DROP TABLE is mentioned. +** +** This call is a no-op if zTab is not a virtual table. +*/ +SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3 *db, int iDb, const char *zTab){ + int rc = SQLITE_OK; + Table *pTab; + + pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zDbSName); + if( pTab!=0 && ALWAYS(pTab->pVTable!=0) ){ + VTable *p; + int (*xDestroy)(sqlite3_vtab *); + for(p=pTab->pVTable; p; p=p->pNext){ + assert( p->pVtab ); + if( p->pVtab->nRef>0 ){ + return SQLITE_LOCKED; + } + } + p = vtabDisconnectAll(db, pTab); + xDestroy = p->pMod->pModule->xDestroy; + assert( xDestroy!=0 ); /* Checked before the virtual table is created */ + rc = xDestroy(p->pVtab); + /* Remove the sqlite3_vtab* from the aVTrans[] array, if applicable */ + if( rc==SQLITE_OK ){ + assert( pTab->pVTable==p && p->pNext==0 ); + p->pVtab = 0; + pTab->pVTable = 0; + sqlite3VtabUnlock(p); + } + } + + return rc; +} + +/* +** This function invokes either the xRollback or xCommit method +** of each of the virtual tables in the sqlite3.aVTrans array. The method +** called is identified by the second argument, "offset", which is +** the offset of the method to call in the sqlite3_module structure. +** +** The array is cleared after invoking the callbacks. +*/ +static void callFinaliser(sqlite3 *db, int offset){ + int i; + if( db->aVTrans ){ + VTable **aVTrans = db->aVTrans; + db->aVTrans = 0; + for(i=0; inVTrans; i++){ + VTable *pVTab = aVTrans[i]; + sqlite3_vtab *p = pVTab->pVtab; + if( p ){ + int (*x)(sqlite3_vtab *); + x = *(int (**)(sqlite3_vtab *))((char *)p->pModule + offset); + if( x ) x(p); + } + pVTab->iSavepoint = 0; + sqlite3VtabUnlock(pVTab); + } + sqlite3DbFree(db, aVTrans); + db->nVTrans = 0; + } +} + +/* +** Invoke the xSync method of all virtual tables in the sqlite3.aVTrans +** array. Return the error code for the first error that occurs, or +** SQLITE_OK if all xSync operations are successful. +** +** If an error message is available, leave it in p->zErrMsg. +*/ +SQLITE_PRIVATE int sqlite3VtabSync(sqlite3 *db, Vdbe *p){ + int i; + int rc = SQLITE_OK; + VTable **aVTrans = db->aVTrans; + + db->aVTrans = 0; + for(i=0; rc==SQLITE_OK && inVTrans; i++){ + int (*x)(sqlite3_vtab *); + sqlite3_vtab *pVtab = aVTrans[i]->pVtab; + if( pVtab && (x = pVtab->pModule->xSync)!=0 ){ + rc = x(pVtab); + sqlite3VtabImportErrmsg(p, pVtab); + } + } + db->aVTrans = aVTrans; + return rc; +} + +/* +** Invoke the xRollback method of all virtual tables in the +** sqlite3.aVTrans array. Then clear the array itself. +*/ +SQLITE_PRIVATE int sqlite3VtabRollback(sqlite3 *db){ + callFinaliser(db, offsetof(sqlite3_module,xRollback)); + return SQLITE_OK; +} + +/* +** Invoke the xCommit method of all virtual tables in the +** sqlite3.aVTrans array. Then clear the array itself. +*/ +SQLITE_PRIVATE int sqlite3VtabCommit(sqlite3 *db){ + callFinaliser(db, offsetof(sqlite3_module,xCommit)); + return SQLITE_OK; +} + +/* +** If the virtual table pVtab supports the transaction interface +** (xBegin/xRollback/xCommit and optionally xSync) and a transaction is +** not currently open, invoke the xBegin method now. +** +** If the xBegin call is successful, place the sqlite3_vtab pointer +** in the sqlite3.aVTrans array. +*/ +SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *db, VTable *pVTab){ + int rc = SQLITE_OK; + const sqlite3_module *pModule; + + /* Special case: If db->aVTrans is NULL and db->nVTrans is greater + ** than zero, then this function is being called from within a + ** virtual module xSync() callback. It is illegal to write to + ** virtual module tables in this case, so return SQLITE_LOCKED. + */ + if( sqlite3VtabInSync(db) ){ + return SQLITE_LOCKED; + } + if( !pVTab ){ + return SQLITE_OK; + } + pModule = pVTab->pVtab->pModule; + + if( pModule->xBegin ){ + int i; + + /* If pVtab is already in the aVTrans array, return early */ + for(i=0; inVTrans; i++){ + if( db->aVTrans[i]==pVTab ){ + return SQLITE_OK; + } + } + + /* Invoke the xBegin method. If successful, add the vtab to the + ** sqlite3.aVTrans[] array. */ + rc = growVTrans(db); + if( rc==SQLITE_OK ){ + rc = pModule->xBegin(pVTab->pVtab); + if( rc==SQLITE_OK ){ + int iSvpt = db->nStatement + db->nSavepoint; + addToVTrans(db, pVTab); + if( iSvpt && pModule->xSavepoint ){ + pVTab->iSavepoint = iSvpt; + rc = pModule->xSavepoint(pVTab->pVtab, iSvpt-1); + } + } + } + } + return rc; +} + +/* +** Invoke either the xSavepoint, xRollbackTo or xRelease method of all +** virtual tables that currently have an open transaction. Pass iSavepoint +** as the second argument to the virtual table method invoked. +** +** If op is SAVEPOINT_BEGIN, the xSavepoint method is invoked. If it is +** SAVEPOINT_ROLLBACK, the xRollbackTo method. Otherwise, if op is +** SAVEPOINT_RELEASE, then the xRelease method of each virtual table with +** an open transaction is invoked. +** +** If any virtual table method returns an error code other than SQLITE_OK, +** processing is abandoned and the error returned to the caller of this +** function immediately. If all calls to virtual table methods are successful, +** SQLITE_OK is returned. +*/ +SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *db, int op, int iSavepoint){ + int rc = SQLITE_OK; + + assert( op==SAVEPOINT_RELEASE||op==SAVEPOINT_ROLLBACK||op==SAVEPOINT_BEGIN ); + assert( iSavepoint>=-1 ); + if( db->aVTrans ){ + int i; + for(i=0; rc==SQLITE_OK && inVTrans; i++){ + VTable *pVTab = db->aVTrans[i]; + const sqlite3_module *pMod = pVTab->pMod->pModule; + if( pVTab->pVtab && pMod->iVersion>=2 ){ + int (*xMethod)(sqlite3_vtab *, int); + switch( op ){ + case SAVEPOINT_BEGIN: + xMethod = pMod->xSavepoint; + pVTab->iSavepoint = iSavepoint+1; + break; + case SAVEPOINT_ROLLBACK: + xMethod = pMod->xRollbackTo; + break; + default: + xMethod = pMod->xRelease; + break; + } + if( xMethod && pVTab->iSavepoint>iSavepoint ){ + rc = xMethod(pVTab->pVtab, iSavepoint); + } + } + } + } + return rc; +} + +/* +** The first parameter (pDef) is a function implementation. The +** second parameter (pExpr) is the first argument to this function. +** If pExpr is a column in a virtual table, then let the virtual +** table implementation have an opportunity to overload the function. +** +** This routine is used to allow virtual table implementations to +** overload MATCH, LIKE, GLOB, and REGEXP operators. +** +** Return either the pDef argument (indicating no change) or a +** new FuncDef structure that is marked as ephemeral using the +** SQLITE_FUNC_EPHEM flag. +*/ +SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction( + sqlite3 *db, /* Database connection for reporting malloc problems */ + FuncDef *pDef, /* Function to possibly overload */ + int nArg, /* Number of arguments to the function */ + Expr *pExpr /* First argument to the function */ +){ + Table *pTab; + sqlite3_vtab *pVtab; + sqlite3_module *pMod; + void (*xSFunc)(sqlite3_context*,int,sqlite3_value**) = 0; + void *pArg = 0; + FuncDef *pNew; + int rc = 0; + char *zLowerName; + unsigned char *z; + + + /* Check to see the left operand is a column in a virtual table */ + if( NEVER(pExpr==0) ) return pDef; + if( pExpr->op!=TK_COLUMN ) return pDef; + pTab = pExpr->pTab; + if( NEVER(pTab==0) ) return pDef; + if( (pTab->tabFlags & TF_Virtual)==0 ) return pDef; + pVtab = sqlite3GetVTable(db, pTab)->pVtab; + assert( pVtab!=0 ); + assert( pVtab->pModule!=0 ); + pMod = (sqlite3_module *)pVtab->pModule; + if( pMod->xFindFunction==0 ) return pDef; + + /* Call the xFindFunction method on the virtual table implementation + ** to see if the implementation wants to overload this function + */ + zLowerName = sqlite3DbStrDup(db, pDef->zName); + if( zLowerName ){ + for(z=(unsigned char*)zLowerName; *z; z++){ + *z = sqlite3UpperToLower[*z]; + } + rc = pMod->xFindFunction(pVtab, nArg, zLowerName, &xSFunc, &pArg); + sqlite3DbFree(db, zLowerName); + } + if( rc==0 ){ + return pDef; + } + + /* Create a new ephemeral function definition for the overloaded + ** function */ + pNew = sqlite3DbMallocZero(db, sizeof(*pNew) + + sqlite3Strlen30(pDef->zName) + 1); + if( pNew==0 ){ + return pDef; + } + *pNew = *pDef; + pNew->zName = (const char*)&pNew[1]; + memcpy((char*)&pNew[1], pDef->zName, sqlite3Strlen30(pDef->zName)+1); + pNew->xSFunc = xSFunc; + pNew->pUserData = pArg; + pNew->funcFlags |= SQLITE_FUNC_EPHEM; + return pNew; +} + +/* +** Make sure virtual table pTab is contained in the pParse->apVirtualLock[] +** array so that an OP_VBegin will get generated for it. Add pTab to the +** array if it is missing. If pTab is already in the array, this routine +** is a no-op. +*/ +SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse *pParse, Table *pTab){ + Parse *pToplevel = sqlite3ParseToplevel(pParse); + int i, n; + Table **apVtabLock; + + assert( IsVirtual(pTab) ); + for(i=0; inVtabLock; i++){ + if( pTab==pToplevel->apVtabLock[i] ) return; + } + n = (pToplevel->nVtabLock+1)*sizeof(pToplevel->apVtabLock[0]); + apVtabLock = sqlite3_realloc64(pToplevel->apVtabLock, n); + if( apVtabLock ){ + pToplevel->apVtabLock = apVtabLock; + pToplevel->apVtabLock[pToplevel->nVtabLock++] = pTab; + }else{ + sqlite3OomFault(pToplevel->db); + } +} + +/* +** Check to see if virtual table module pMod can be have an eponymous +** virtual table instance. If it can, create one if one does not already +** exist. Return non-zero if the eponymous virtual table instance exists +** when this routine returns, and return zero if it does not exist. +** +** An eponymous virtual table instance is one that is named after its +** module, and more importantly, does not require a CREATE VIRTUAL TABLE +** statement in order to come into existance. Eponymous virtual table +** instances always exist. They cannot be DROP-ed. +** +** Any virtual table module for which xConnect and xCreate are the same +** method can have an eponymous virtual table instance. +*/ +SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse *pParse, Module *pMod){ + const sqlite3_module *pModule = pMod->pModule; + Table *pTab; + char *zErr = 0; + int rc; + sqlite3 *db = pParse->db; + if( pMod->pEpoTab ) return 1; + if( pModule->xCreate!=0 && pModule->xCreate!=pModule->xConnect ) return 0; + pTab = sqlite3DbMallocZero(db, sizeof(Table)); + if( pTab==0 ) return 0; + pTab->zName = sqlite3DbStrDup(db, pMod->zName); + if( pTab->zName==0 ){ + sqlite3DbFree(db, pTab); + return 0; + } + pMod->pEpoTab = pTab; + pTab->nTabRef = 1; + pTab->pSchema = db->aDb[0].pSchema; + pTab->tabFlags |= TF_Virtual; + pTab->nModuleArg = 0; + pTab->iPKey = -1; + addModuleArgument(db, pTab, sqlite3DbStrDup(db, pTab->zName)); + addModuleArgument(db, pTab, 0); + addModuleArgument(db, pTab, sqlite3DbStrDup(db, pTab->zName)); + rc = vtabCallConstructor(db, pTab, pMod, pModule->xConnect, &zErr); + if( rc ){ + sqlite3ErrorMsg(pParse, "%s", zErr); + sqlite3DbFree(db, zErr); + sqlite3VtabEponymousTableClear(db, pMod); + return 0; + } + return 1; +} + +/* +** Erase the eponymous virtual table instance associated with +** virtual table module pMod, if it exists. +*/ +SQLITE_PRIVATE void sqlite3VtabEponymousTableClear(sqlite3 *db, Module *pMod){ + Table *pTab = pMod->pEpoTab; + if( pTab!=0 ){ + /* Mark the table as Ephemeral prior to deleting it, so that the + ** sqlite3DeleteTable() routine will know that it is not stored in + ** the schema. */ + pTab->tabFlags |= TF_Ephemeral; + sqlite3DeleteTable(db, pTab); + pMod->pEpoTab = 0; + } +} + +/* +** Return the ON CONFLICT resolution mode in effect for the virtual +** table update operation currently in progress. +** +** The results of this routine are undefined unless it is called from +** within an xUpdate method. +*/ +SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *db){ + static const unsigned char aMap[] = { + SQLITE_ROLLBACK, SQLITE_ABORT, SQLITE_FAIL, SQLITE_IGNORE, SQLITE_REPLACE + }; +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + assert( OE_Rollback==1 && OE_Abort==2 && OE_Fail==3 ); + assert( OE_Ignore==4 && OE_Replace==5 ); + assert( db->vtabOnConflict>=1 && db->vtabOnConflict<=5 ); + return (int)aMap[db->vtabOnConflict-1]; +} + +/* +** Call from within the xCreate() or xConnect() methods to provide +** the SQLite core with additional information about the behavior +** of the virtual table being implemented. +*/ +SQLITE_API int sqlite3_vtab_config(sqlite3 *db, int op, ...){ + va_list ap; + int rc = SQLITE_OK; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(db->mutex); + va_start(ap, op); + switch( op ){ + case SQLITE_VTAB_CONSTRAINT_SUPPORT: { + VtabCtx *p = db->pVtabCtx; + if( !p ){ + rc = SQLITE_MISUSE_BKPT; + }else{ + assert( p->pTab==0 || (p->pTab->tabFlags & TF_Virtual)!=0 ); + p->pVTable->bConstraint = (u8)va_arg(ap, int); + } + break; + } + default: + rc = SQLITE_MISUSE_BKPT; + break; + } + va_end(ap); + + if( rc!=SQLITE_OK ) sqlite3Error(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +/************** End of vtab.c ************************************************/ +/************** Begin file wherecode.c ***************************************/ +/* +** 2015-06-06 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This module contains C code that generates VDBE code used to process +** the WHERE clause of SQL statements. +** +** This file was split off from where.c on 2015-06-06 in order to reduce the +** size of where.c and make it easier to edit. This file contains the routines +** that actually generate the bulk of the WHERE loop code. The original where.c +** file retains the code that does query planning and analysis. +*/ +/* #include "sqliteInt.h" */ +/************** Include whereInt.h in the middle of wherecode.c **************/ +/************** Begin file whereInt.h ****************************************/ +/* +** 2013-11-12 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains structure and macro definitions for the query +** planner logic in "where.c". These definitions are broken out into +** a separate source file for easier editing. +*/ + +/* +** Trace output macros +*/ +#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) +/***/ int sqlite3WhereTrace; +#endif +#if defined(SQLITE_DEBUG) \ + && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_WHERETRACE)) +# define WHERETRACE(K,X) if(sqlite3WhereTrace&(K)) sqlite3DebugPrintf X +# define WHERETRACE_ENABLED 1 +#else +# define WHERETRACE(K,X) +#endif + +/* Forward references +*/ +typedef struct WhereClause WhereClause; +typedef struct WhereMaskSet WhereMaskSet; +typedef struct WhereOrInfo WhereOrInfo; +typedef struct WhereAndInfo WhereAndInfo; +typedef struct WhereLevel WhereLevel; +typedef struct WhereLoop WhereLoop; +typedef struct WherePath WherePath; +typedef struct WhereTerm WhereTerm; +typedef struct WhereLoopBuilder WhereLoopBuilder; +typedef struct WhereScan WhereScan; +typedef struct WhereOrCost WhereOrCost; +typedef struct WhereOrSet WhereOrSet; + +/* +** This object contains information needed to implement a single nested +** loop in WHERE clause. +** +** Contrast this object with WhereLoop. This object describes the +** implementation of the loop. WhereLoop describes the algorithm. +** This object contains a pointer to the WhereLoop algorithm as one of +** its elements. +** +** The WhereInfo object contains a single instance of this object for +** each term in the FROM clause (which is to say, for each of the +** nested loops as implemented). The order of WhereLevel objects determines +** the loop nested order, with WhereInfo.a[0] being the outer loop and +** WhereInfo.a[WhereInfo.nLevel-1] being the inner loop. +*/ +struct WhereLevel { + int iLeftJoin; /* Memory cell used to implement LEFT OUTER JOIN */ + int iTabCur; /* The VDBE cursor used to access the table */ + int iIdxCur; /* The VDBE cursor used to access pIdx */ + int addrBrk; /* Jump here to break out of the loop */ + int addrNxt; /* Jump here to start the next IN combination */ + int addrSkip; /* Jump here for next iteration of skip-scan */ + int addrCont; /* Jump here to continue with the next loop cycle */ + int addrFirst; /* First instruction of interior of the loop */ + int addrBody; /* Beginning of the body of this loop */ +#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS + u32 iLikeRepCntr; /* LIKE range processing counter register (times 2) */ + int addrLikeRep; /* LIKE range processing address */ +#endif + u8 iFrom; /* Which entry in the FROM clause */ + u8 op, p3, p5; /* Opcode, P3 & P5 of the opcode that ends the loop */ + int p1, p2; /* Operands of the opcode used to ends the loop */ + union { /* Information that depends on pWLoop->wsFlags */ + struct { + int nIn; /* Number of entries in aInLoop[] */ + struct InLoop { + int iCur; /* The VDBE cursor used by this IN operator */ + int addrInTop; /* Top of the IN loop */ + u8 eEndLoopOp; /* IN Loop terminator. OP_Next or OP_Prev */ + } *aInLoop; /* Information about each nested IN operator */ + } in; /* Used when pWLoop->wsFlags&WHERE_IN_ABLE */ + Index *pCovidx; /* Possible covering index for WHERE_MULTI_OR */ + } u; + struct WhereLoop *pWLoop; /* The selected WhereLoop object */ + Bitmask notReady; /* FROM entries not usable at this level */ +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrVisit; /* Address at which row is visited */ +#endif +}; + +/* +** Each instance of this object represents an algorithm for evaluating one +** term of a join. Every term of the FROM clause will have at least +** one corresponding WhereLoop object (unless INDEXED BY constraints +** prevent a query solution - which is an error) and many terms of the +** FROM clause will have multiple WhereLoop objects, each describing a +** potential way of implementing that FROM-clause term, together with +** dependencies and cost estimates for using the chosen algorithm. +** +** Query planning consists of building up a collection of these WhereLoop +** objects, then computing a particular sequence of WhereLoop objects, with +** one WhereLoop object per FROM clause term, that satisfy all dependencies +** and that minimize the overall cost. +*/ +struct WhereLoop { + Bitmask prereq; /* Bitmask of other loops that must run first */ + Bitmask maskSelf; /* Bitmask identifying table iTab */ +#ifdef SQLITE_DEBUG + char cId; /* Symbolic ID of this loop for debugging use */ +#endif + u8 iTab; /* Position in FROM clause of table for this loop */ + u8 iSortIdx; /* Sorting index number. 0==None */ + LogEst rSetup; /* One-time setup cost (ex: create transient index) */ + LogEst rRun; /* Cost of running each loop */ + LogEst nOut; /* Estimated number of output rows */ + union { + struct { /* Information for internal btree tables */ + u16 nEq; /* Number of equality constraints */ + u16 nBtm; /* Size of BTM vector */ + u16 nTop; /* Size of TOP vector */ + Index *pIndex; /* Index used, or NULL */ + } btree; + struct { /* Information for virtual tables */ + int idxNum; /* Index number */ + u8 needFree; /* True if sqlite3_free(idxStr) is needed */ + i8 isOrdered; /* True if satisfies ORDER BY */ + u16 omitMask; /* Terms that may be omitted */ + char *idxStr; /* Index identifier string */ + } vtab; + } u; + u32 wsFlags; /* WHERE_* flags describing the plan */ + u16 nLTerm; /* Number of entries in aLTerm[] */ + u16 nSkip; /* Number of NULL aLTerm[] entries */ + /**** whereLoopXfer() copies fields above ***********************/ +# define WHERE_LOOP_XFER_SZ offsetof(WhereLoop,nLSlot) + u16 nLSlot; /* Number of slots allocated for aLTerm[] */ + WhereTerm **aLTerm; /* WhereTerms used */ + WhereLoop *pNextLoop; /* Next WhereLoop object in the WhereClause */ + WhereTerm *aLTermSpace[3]; /* Initial aLTerm[] space */ +}; + +/* This object holds the prerequisites and the cost of running a +** subquery on one operand of an OR operator in the WHERE clause. +** See WhereOrSet for additional information +*/ +struct WhereOrCost { + Bitmask prereq; /* Prerequisites */ + LogEst rRun; /* Cost of running this subquery */ + LogEst nOut; /* Number of outputs for this subquery */ +}; + +/* The WhereOrSet object holds a set of possible WhereOrCosts that +** correspond to the subquery(s) of OR-clause processing. Only the +** best N_OR_COST elements are retained. +*/ +#define N_OR_COST 3 +struct WhereOrSet { + u16 n; /* Number of valid a[] entries */ + WhereOrCost a[N_OR_COST]; /* Set of best costs */ +}; + +/* +** Each instance of this object holds a sequence of WhereLoop objects +** that implement some or all of a query plan. +** +** Think of each WhereLoop object as a node in a graph with arcs +** showing dependencies and costs for travelling between nodes. (That is +** not a completely accurate description because WhereLoop costs are a +** vector, not a scalar, and because dependencies are many-to-one, not +** one-to-one as are graph nodes. But it is a useful visualization aid.) +** Then a WherePath object is a path through the graph that visits some +** or all of the WhereLoop objects once. +** +** The "solver" works by creating the N best WherePath objects of length +** 1. Then using those as a basis to compute the N best WherePath objects +** of length 2. And so forth until the length of WherePaths equals the +** number of nodes in the FROM clause. The best (lowest cost) WherePath +** at the end is the chosen query plan. +*/ +struct WherePath { + Bitmask maskLoop; /* Bitmask of all WhereLoop objects in this path */ + Bitmask revLoop; /* aLoop[]s that should be reversed for ORDER BY */ + LogEst nRow; /* Estimated number of rows generated by this path */ + LogEst rCost; /* Total cost of this path */ + LogEst rUnsorted; /* Total cost of this path ignoring sorting costs */ + i8 isOrdered; /* No. of ORDER BY terms satisfied. -1 for unknown */ + WhereLoop **aLoop; /* Array of WhereLoop objects implementing this path */ +}; + +/* +** The query generator uses an array of instances of this structure to +** help it analyze the subexpressions of the WHERE clause. Each WHERE +** clause subexpression is separated from the others by AND operators, +** usually, or sometimes subexpressions separated by OR. +** +** All WhereTerms are collected into a single WhereClause structure. +** The following identity holds: +** +** WhereTerm.pWC->a[WhereTerm.idx] == WhereTerm +** +** When a term is of the form: +** +** X +** +** where X is a column name and is one of certain operators, +** then WhereTerm.leftCursor and WhereTerm.u.leftColumn record the +** cursor number and column number for X. WhereTerm.eOperator records +** the using a bitmask encoding defined by WO_xxx below. The +** use of a bitmask encoding for the operator allows us to search +** quickly for terms that match any of several different operators. +** +** A WhereTerm might also be two or more subterms connected by OR: +** +** (t1.X ) OR (t1.Y ) OR .... +** +** In this second case, wtFlag has the TERM_ORINFO bit set and eOperator==WO_OR +** and the WhereTerm.u.pOrInfo field points to auxiliary information that +** is collected about the OR clause. +** +** If a term in the WHERE clause does not match either of the two previous +** categories, then eOperator==0. The WhereTerm.pExpr field is still set +** to the original subexpression content and wtFlags is set up appropriately +** but no other fields in the WhereTerm object are meaningful. +** +** When eOperator!=0, prereqRight and prereqAll record sets of cursor numbers, +** but they do so indirectly. A single WhereMaskSet structure translates +** cursor number into bits and the translated bit is stored in the prereq +** fields. The translation is used in order to maximize the number of +** bits that will fit in a Bitmask. The VDBE cursor numbers might be +** spread out over the non-negative integers. For example, the cursor +** numbers might be 3, 8, 9, 10, 20, 23, 41, and 45. The WhereMaskSet +** translates these sparse cursor numbers into consecutive integers +** beginning with 0 in order to make the best possible use of the available +** bits in the Bitmask. So, in the example above, the cursor numbers +** would be mapped into integers 0 through 7. +** +** The number of terms in a join is limited by the number of bits +** in prereqRight and prereqAll. The default is 64 bits, hence SQLite +** is only able to process joins with 64 or fewer tables. +*/ +struct WhereTerm { + Expr *pExpr; /* Pointer to the subexpression that is this term */ + WhereClause *pWC; /* The clause this term is part of */ + LogEst truthProb; /* Probability of truth for this expression */ + u16 wtFlags; /* TERM_xxx bit flags. See below */ + u16 eOperator; /* A WO_xx value describing */ + u8 nChild; /* Number of children that must disable us */ + u8 eMatchOp; /* Op for vtab MATCH/LIKE/GLOB/REGEXP terms */ + int iParent; /* Disable pWC->a[iParent] when this term disabled */ + int leftCursor; /* Cursor number of X in "X " */ + int iField; /* Field in (?,?,?) IN (SELECT...) vector */ + union { + int leftColumn; /* Column number of X in "X " */ + WhereOrInfo *pOrInfo; /* Extra information if (eOperator & WO_OR)!=0 */ + WhereAndInfo *pAndInfo; /* Extra information if (eOperator& WO_AND)!=0 */ + } u; + Bitmask prereqRight; /* Bitmask of tables used by pExpr->pRight */ + Bitmask prereqAll; /* Bitmask of tables referenced by pExpr */ +}; + +/* +** Allowed values of WhereTerm.wtFlags +*/ +#define TERM_DYNAMIC 0x01 /* Need to call sqlite3ExprDelete(db, pExpr) */ +#define TERM_VIRTUAL 0x02 /* Added by the optimizer. Do not code */ +#define TERM_CODED 0x04 /* This term is already coded */ +#define TERM_COPIED 0x08 /* Has a child */ +#define TERM_ORINFO 0x10 /* Need to free the WhereTerm.u.pOrInfo object */ +#define TERM_ANDINFO 0x20 /* Need to free the WhereTerm.u.pAndInfo obj */ +#define TERM_OR_OK 0x40 /* Used during OR-clause processing */ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +# define TERM_VNULL 0x80 /* Manufactured x>NULL or x<=NULL term */ +#else +# define TERM_VNULL 0x00 /* Disabled if not using stat3 */ +#endif +#define TERM_LIKEOPT 0x100 /* Virtual terms from the LIKE optimization */ +#define TERM_LIKECOND 0x200 /* Conditionally this LIKE operator term */ +#define TERM_LIKE 0x400 /* The original LIKE operator */ +#define TERM_IS 0x800 /* Term.pExpr is an IS operator */ + +/* +** An instance of the WhereScan object is used as an iterator for locating +** terms in the WHERE clause that are useful to the query planner. +*/ +struct WhereScan { + WhereClause *pOrigWC; /* Original, innermost WhereClause */ + WhereClause *pWC; /* WhereClause currently being scanned */ + const char *zCollName; /* Required collating sequence, if not NULL */ + Expr *pIdxExpr; /* Search for this index expression */ + char idxaff; /* Must match this affinity, if zCollName!=NULL */ + unsigned char nEquiv; /* Number of entries in aEquiv[] */ + unsigned char iEquiv; /* Next unused slot in aEquiv[] */ + u32 opMask; /* Acceptable operators */ + int k; /* Resume scanning at this->pWC->a[this->k] */ + int aiCur[11]; /* Cursors in the equivalence class */ + i16 aiColumn[11]; /* Corresponding column number in the eq-class */ +}; + +/* +** An instance of the following structure holds all information about a +** WHERE clause. Mostly this is a container for one or more WhereTerms. +** +** Explanation of pOuter: For a WHERE clause of the form +** +** a AND ((b AND c) OR (d AND e)) AND f +** +** There are separate WhereClause objects for the whole clause and for +** the subclauses "(b AND c)" and "(d AND e)". The pOuter field of the +** subclauses points to the WhereClause object for the whole clause. +*/ +struct WhereClause { + WhereInfo *pWInfo; /* WHERE clause processing context */ + WhereClause *pOuter; /* Outer conjunction */ + u8 op; /* Split operator. TK_AND or TK_OR */ + int nTerm; /* Number of terms */ + int nSlot; /* Number of entries in a[] */ + WhereTerm *a; /* Each a[] describes a term of the WHERE cluase */ +#if defined(SQLITE_SMALL_STACK) + WhereTerm aStatic[1]; /* Initial static space for a[] */ +#else + WhereTerm aStatic[8]; /* Initial static space for a[] */ +#endif +}; + +/* +** A WhereTerm with eOperator==WO_OR has its u.pOrInfo pointer set to +** a dynamically allocated instance of the following structure. +*/ +struct WhereOrInfo { + WhereClause wc; /* Decomposition into subterms */ + Bitmask indexable; /* Bitmask of all indexable tables in the clause */ +}; + +/* +** A WhereTerm with eOperator==WO_AND has its u.pAndInfo pointer set to +** a dynamically allocated instance of the following structure. +*/ +struct WhereAndInfo { + WhereClause wc; /* The subexpression broken out */ +}; + +/* +** An instance of the following structure keeps track of a mapping +** between VDBE cursor numbers and bits of the bitmasks in WhereTerm. +** +** The VDBE cursor numbers are small integers contained in +** SrcList_item.iCursor and Expr.iTable fields. For any given WHERE +** clause, the cursor numbers might not begin with 0 and they might +** contain gaps in the numbering sequence. But we want to make maximum +** use of the bits in our bitmasks. This structure provides a mapping +** from the sparse cursor numbers into consecutive integers beginning +** with 0. +** +** If WhereMaskSet.ix[A]==B it means that The A-th bit of a Bitmask +** corresponds VDBE cursor number B. The A-th bit of a bitmask is 1<3, 5->1, 8->2, 29->0, +** 57->5, 73->4. Or one of 719 other combinations might be used. It +** does not really matter. What is important is that sparse cursor +** numbers all get mapped into bit numbers that begin with 0 and contain +** no gaps. +*/ +struct WhereMaskSet { + int n; /* Number of assigned cursor values */ + int ix[BMS]; /* Cursor assigned to each bit */ +}; + +/* +** Initialize a WhereMaskSet object +*/ +#define initMaskSet(P) (P)->n=0 + +/* +** This object is a convenience wrapper holding all information needed +** to construct WhereLoop objects for a particular query. +*/ +struct WhereLoopBuilder { + WhereInfo *pWInfo; /* Information about this WHERE */ + WhereClause *pWC; /* WHERE clause terms */ + ExprList *pOrderBy; /* ORDER BY clause */ + WhereLoop *pNew; /* Template WhereLoop */ + WhereOrSet *pOrSet; /* Record best loops here, if not NULL */ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + UnpackedRecord *pRec; /* Probe for stat4 (if required) */ + int nRecValid; /* Number of valid fields currently in pRec */ +#endif +}; + +/* +** The WHERE clause processing routine has two halves. The +** first part does the start of the WHERE loop and the second +** half does the tail of the WHERE loop. An instance of +** this structure is returned by the first half and passed +** into the second half to give some continuity. +** +** An instance of this object holds the complete state of the query +** planner. +*/ +struct WhereInfo { + Parse *pParse; /* Parsing and code generating context */ + SrcList *pTabList; /* List of tables in the join */ + ExprList *pOrderBy; /* The ORDER BY clause or NULL */ + ExprList *pDistinctSet; /* DISTINCT over all these values */ + LogEst iLimit; /* LIMIT if wctrlFlags has WHERE_USE_LIMIT */ + int aiCurOnePass[2]; /* OP_OpenWrite cursors for the ONEPASS opt */ + int iContinue; /* Jump here to continue with next record */ + int iBreak; /* Jump here to break out of the loop */ + int savedNQueryLoop; /* pParse->nQueryLoop outside the WHERE loop */ + u16 wctrlFlags; /* Flags originally passed to sqlite3WhereBegin() */ + u8 nLevel; /* Number of nested loop */ + i8 nOBSat; /* Number of ORDER BY terms satisfied by indices */ + u8 sorted; /* True if really sorted (not just grouped) */ + u8 eOnePass; /* ONEPASS_OFF, or _SINGLE, or _MULTI */ + u8 untestedTerms; /* Not all WHERE terms resolved by outer loop */ + u8 eDistinct; /* One of the WHERE_DISTINCT_* values */ + u8 bOrderedInnerLoop; /* True if only the inner-most loop is ordered */ + int iTop; /* The very beginning of the WHERE loop */ + WhereLoop *pLoops; /* List of all WhereLoop objects */ + Bitmask revMask; /* Mask of ORDER BY terms that need reversing */ + LogEst nRowOut; /* Estimated number of output rows */ + WhereClause sWC; /* Decomposition of the WHERE clause */ + WhereMaskSet sMaskSet; /* Map cursor numbers to bitmasks */ + WhereLevel a[1]; /* Information about each nest loop in WHERE */ +}; + +/* +** Private interfaces - callable only by other where.c routines. +** +** where.c: +*/ +SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet*,int); +#ifdef WHERETRACE_ENABLED +SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC); +#endif +SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm( + WhereClause *pWC, /* The WHERE clause to be searched */ + int iCur, /* Cursor number of LHS */ + int iColumn, /* Column number of LHS */ + Bitmask notReady, /* RHS must not overlap with this mask */ + u32 op, /* Mask of WO_xx values describing operator */ + Index *pIdx /* Must be compatible with this index, if not NULL */ +); + +/* wherecode.c: */ +#ifndef SQLITE_OMIT_EXPLAIN +SQLITE_PRIVATE int sqlite3WhereExplainOneScan( + Parse *pParse, /* Parse context */ + SrcList *pTabList, /* Table list this loop refers to */ + WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ + int iLevel, /* Value for "level" column of output */ + int iFrom, /* Value for "from" column of output */ + u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ +); +#else +# define sqlite3WhereExplainOneScan(u,v,w,x,y,z) 0 +#endif /* SQLITE_OMIT_EXPLAIN */ +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS +SQLITE_PRIVATE void sqlite3WhereAddScanStatus( + Vdbe *v, /* Vdbe to add scanstatus entry to */ + SrcList *pSrclist, /* FROM clause pLvl reads data from */ + WhereLevel *pLvl, /* Level to add scanstatus() entry for */ + int addrExplain /* Address of OP_Explain (or 0) */ +); +#else +# define sqlite3WhereAddScanStatus(a, b, c, d) ((void)d) +#endif +SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( + WhereInfo *pWInfo, /* Complete information about the WHERE clause */ + int iLevel, /* Which level of pWInfo->a[] should be coded */ + Bitmask notReady /* Which tables are currently available */ +); + +/* whereexpr.c: */ +SQLITE_PRIVATE void sqlite3WhereClauseInit(WhereClause*,WhereInfo*); +SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause*); +SQLITE_PRIVATE void sqlite3WhereSplit(WhereClause*,Expr*,u8); +SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet*, Expr*); +SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet*, ExprList*); +SQLITE_PRIVATE void sqlite3WhereExprAnalyze(SrcList*, WhereClause*); +SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, struct SrcList_item*, WhereClause*); + + + + + +/* +** Bitmasks for the operators on WhereTerm objects. These are all +** operators that are of interest to the query planner. An +** OR-ed combination of these values can be used when searching for +** particular WhereTerms within a WhereClause. +** +** Value constraints: +** WO_EQ == SQLITE_INDEX_CONSTRAINT_EQ +** WO_LT == SQLITE_INDEX_CONSTRAINT_LT +** WO_LE == SQLITE_INDEX_CONSTRAINT_LE +** WO_GT == SQLITE_INDEX_CONSTRAINT_GT +** WO_GE == SQLITE_INDEX_CONSTRAINT_GE +** WO_MATCH == SQLITE_INDEX_CONSTRAINT_MATCH +*/ +#define WO_IN 0x0001 +#define WO_EQ 0x0002 +#define WO_LT (WO_EQ<<(TK_LT-TK_EQ)) +#define WO_LE (WO_EQ<<(TK_LE-TK_EQ)) +#define WO_GT (WO_EQ<<(TK_GT-TK_EQ)) +#define WO_GE (WO_EQ<<(TK_GE-TK_EQ)) +#define WO_MATCH 0x0040 +#define WO_IS 0x0080 +#define WO_ISNULL 0x0100 +#define WO_OR 0x0200 /* Two or more OR-connected terms */ +#define WO_AND 0x0400 /* Two or more AND-connected terms */ +#define WO_EQUIV 0x0800 /* Of the form A==B, both columns */ +#define WO_NOOP 0x1000 /* This term does not restrict search space */ + +#define WO_ALL 0x1fff /* Mask of all possible WO_* values */ +#define WO_SINGLE 0x01ff /* Mask of all non-compound WO_* values */ + +/* +** These are definitions of bits in the WhereLoop.wsFlags field. +** The particular combination of bits in each WhereLoop help to +** determine the algorithm that WhereLoop represents. +*/ +#define WHERE_COLUMN_EQ 0x00000001 /* x=EXPR */ +#define WHERE_COLUMN_RANGE 0x00000002 /* xEXPR */ +#define WHERE_COLUMN_IN 0x00000004 /* x IN (...) */ +#define WHERE_COLUMN_NULL 0x00000008 /* x IS NULL */ +#define WHERE_CONSTRAINT 0x0000000f /* Any of the WHERE_COLUMN_xxx values */ +#define WHERE_TOP_LIMIT 0x00000010 /* xEXPR or x>=EXPR constraint */ +#define WHERE_BOTH_LIMIT 0x00000030 /* Both x>EXPR and xaiColumn[i]; + if( i==XN_EXPR ) return ""; + if( i==XN_ROWID ) return "rowid"; + return pIdx->pTable->aCol[i].zName; +} + +/* +** This routine is a helper for explainIndexRange() below +** +** pStr holds the text of an expression that we are building up one term +** at a time. This routine adds a new term to the end of the expression. +** Terms are separated by AND so add the "AND" text for second and subsequent +** terms only. +*/ +static void explainAppendTerm( + StrAccum *pStr, /* The text expression being built */ + Index *pIdx, /* Index to read column names from */ + int nTerm, /* Number of terms */ + int iTerm, /* Zero-based index of first term. */ + int bAnd, /* Non-zero to append " AND " */ + const char *zOp /* Name of the operator */ +){ + int i; + + assert( nTerm>=1 ); + if( bAnd ) sqlite3StrAccumAppend(pStr, " AND ", 5); + + if( nTerm>1 ) sqlite3StrAccumAppend(pStr, "(", 1); + for(i=0; i1 ) sqlite3StrAccumAppend(pStr, ")", 1); + + sqlite3StrAccumAppend(pStr, zOp, 1); + + if( nTerm>1 ) sqlite3StrAccumAppend(pStr, "(", 1); + for(i=0; i1 ) sqlite3StrAccumAppend(pStr, ")", 1); +} + +/* +** Argument pLevel describes a strategy for scanning table pTab. This +** function appends text to pStr that describes the subset of table +** rows scanned by the strategy in the form of an SQL expression. +** +** For example, if the query: +** +** SELECT * FROM t1 WHERE a=1 AND b>2; +** +** is run and there is an index on (a, b), then this function returns a +** string similar to: +** +** "a=? AND b>?" +*/ +static void explainIndexRange(StrAccum *pStr, WhereLoop *pLoop){ + Index *pIndex = pLoop->u.btree.pIndex; + u16 nEq = pLoop->u.btree.nEq; + u16 nSkip = pLoop->nSkip; + int i, j; + + if( nEq==0 && (pLoop->wsFlags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))==0 ) return; + sqlite3StrAccumAppend(pStr, " (", 2); + for(i=0; i=nSkip ? "%s=?" : "ANY(%s)", z); + } + + j = i; + if( pLoop->wsFlags&WHERE_BTM_LIMIT ){ + explainAppendTerm(pStr, pIndex, pLoop->u.btree.nBtm, j, i, ">"); + i = 1; + } + if( pLoop->wsFlags&WHERE_TOP_LIMIT ){ + explainAppendTerm(pStr, pIndex, pLoop->u.btree.nTop, j, i, "<"); + } + sqlite3StrAccumAppend(pStr, ")", 1); +} + +/* +** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN +** command, or if either SQLITE_DEBUG or SQLITE_ENABLE_STMT_SCANSTATUS was +** defined at compile-time. If it is not a no-op, a single OP_Explain opcode +** is added to the output to describe the table scan strategy in pLevel. +** +** If an OP_Explain opcode is added to the VM, its address is returned. +** Otherwise, if no OP_Explain is coded, zero is returned. +*/ +SQLITE_PRIVATE int sqlite3WhereExplainOneScan( + Parse *pParse, /* Parse context */ + SrcList *pTabList, /* Table list this loop refers to */ + WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ + int iLevel, /* Value for "level" column of output */ + int iFrom, /* Value for "from" column of output */ + u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ +){ + int ret = 0; +#if !defined(SQLITE_DEBUG) && !defined(SQLITE_ENABLE_STMT_SCANSTATUS) + if( pParse->explain==2 ) +#endif + { + struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom]; + Vdbe *v = pParse->pVdbe; /* VM being constructed */ + sqlite3 *db = pParse->db; /* Database handle */ + int iId = pParse->iSelectId; /* Select id (left-most output column) */ + int isSearch; /* True for a SEARCH. False for SCAN. */ + WhereLoop *pLoop; /* The controlling WhereLoop object */ + u32 flags; /* Flags that describe this loop */ + char *zMsg; /* Text to add to EQP output */ + StrAccum str; /* EQP output string */ + char zBuf[100]; /* Initial space for EQP output string */ + + pLoop = pLevel->pWLoop; + flags = pLoop->wsFlags; + if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_OR_SUBCLAUSE) ) return 0; + + isSearch = (flags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0 + || ((flags&WHERE_VIRTUALTABLE)==0 && (pLoop->u.btree.nEq>0)) + || (wctrlFlags&(WHERE_ORDERBY_MIN|WHERE_ORDERBY_MAX)); + + sqlite3StrAccumInit(&str, db, zBuf, sizeof(zBuf), SQLITE_MAX_LENGTH); + sqlite3StrAccumAppendAll(&str, isSearch ? "SEARCH" : "SCAN"); + if( pItem->pSelect ){ + sqlite3XPrintf(&str, " SUBQUERY %d", pItem->iSelectId); + }else{ + sqlite3XPrintf(&str, " TABLE %s", pItem->zName); + } + + if( pItem->zAlias ){ + sqlite3XPrintf(&str, " AS %s", pItem->zAlias); + } + if( (flags & (WHERE_IPK|WHERE_VIRTUALTABLE))==0 ){ + const char *zFmt = 0; + Index *pIdx; + + assert( pLoop->u.btree.pIndex!=0 ); + pIdx = pLoop->u.btree.pIndex; + assert( !(flags&WHERE_AUTO_INDEX) || (flags&WHERE_IDX_ONLY) ); + if( !HasRowid(pItem->pTab) && IsPrimaryKeyIndex(pIdx) ){ + if( isSearch ){ + zFmt = "PRIMARY KEY"; + } + }else if( flags & WHERE_PARTIALIDX ){ + zFmt = "AUTOMATIC PARTIAL COVERING INDEX"; + }else if( flags & WHERE_AUTO_INDEX ){ + zFmt = "AUTOMATIC COVERING INDEX"; + }else if( flags & WHERE_IDX_ONLY ){ + zFmt = "COVERING INDEX %s"; + }else{ + zFmt = "INDEX %s"; + } + if( zFmt ){ + sqlite3StrAccumAppend(&str, " USING ", 7); + sqlite3XPrintf(&str, zFmt, pIdx->zName); + explainIndexRange(&str, pLoop); + } + }else if( (flags & WHERE_IPK)!=0 && (flags & WHERE_CONSTRAINT)!=0 ){ + const char *zRangeOp; + if( flags&(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) ){ + zRangeOp = "="; + }else if( (flags&WHERE_BOTH_LIMIT)==WHERE_BOTH_LIMIT ){ + zRangeOp = ">? AND rowid<"; + }else if( flags&WHERE_BTM_LIMIT ){ + zRangeOp = ">"; + }else{ + assert( flags&WHERE_TOP_LIMIT); + zRangeOp = "<"; + } + sqlite3XPrintf(&str, " USING INTEGER PRIMARY KEY (rowid%s?)",zRangeOp); + } +#ifndef SQLITE_OMIT_VIRTUALTABLE + else if( (flags & WHERE_VIRTUALTABLE)!=0 ){ + sqlite3XPrintf(&str, " VIRTUAL TABLE INDEX %d:%s", + pLoop->u.vtab.idxNum, pLoop->u.vtab.idxStr); + } +#endif +#ifdef SQLITE_EXPLAIN_ESTIMATED_ROWS + if( pLoop->nOut>=10 ){ + sqlite3XPrintf(&str, " (~%llu rows)", sqlite3LogEstToInt(pLoop->nOut)); + }else{ + sqlite3StrAccumAppend(&str, " (~1 row)", 9); + } +#endif + zMsg = sqlite3StrAccumFinish(&str); + ret = sqlite3VdbeAddOp4(v, OP_Explain, iId, iLevel, iFrom, zMsg,P4_DYNAMIC); + } + return ret; +} +#endif /* SQLITE_OMIT_EXPLAIN */ + +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS +/* +** Configure the VM passed as the first argument with an +** sqlite3_stmt_scanstatus() entry corresponding to the scan used to +** implement level pLvl. Argument pSrclist is a pointer to the FROM +** clause that the scan reads data from. +** +** If argument addrExplain is not 0, it must be the address of an +** OP_Explain instruction that describes the same loop. +*/ +SQLITE_PRIVATE void sqlite3WhereAddScanStatus( + Vdbe *v, /* Vdbe to add scanstatus entry to */ + SrcList *pSrclist, /* FROM clause pLvl reads data from */ + WhereLevel *pLvl, /* Level to add scanstatus() entry for */ + int addrExplain /* Address of OP_Explain (or 0) */ +){ + const char *zObj = 0; + WhereLoop *pLoop = pLvl->pWLoop; + if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){ + zObj = pLoop->u.btree.pIndex->zName; + }else{ + zObj = pSrclist->a[pLvl->iFrom].zName; + } + sqlite3VdbeScanStatus( + v, addrExplain, pLvl->addrBody, pLvl->addrVisit, pLoop->nOut, zObj + ); +} +#endif + + +/* +** Disable a term in the WHERE clause. Except, do not disable the term +** if it controls a LEFT OUTER JOIN and it did not originate in the ON +** or USING clause of that join. +** +** Consider the term t2.z='ok' in the following queries: +** +** (1) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x WHERE t2.z='ok' +** (2) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x AND t2.z='ok' +** (3) SELECT * FROM t1, t2 WHERE t1.a=t2.x AND t2.z='ok' +** +** The t2.z='ok' is disabled in the in (2) because it originates +** in the ON clause. The term is disabled in (3) because it is not part +** of a LEFT OUTER JOIN. In (1), the term is not disabled. +** +** Disabling a term causes that term to not be tested in the inner loop +** of the join. Disabling is an optimization. When terms are satisfied +** by indices, we disable them to prevent redundant tests in the inner +** loop. We would get the correct results if nothing were ever disabled, +** but joins might run a little slower. The trick is to disable as much +** as we can without disabling too much. If we disabled in (1), we'd get +** the wrong answer. See ticket #813. +** +** If all the children of a term are disabled, then that term is also +** automatically disabled. In this way, terms get disabled if derived +** virtual terms are tested first. For example: +** +** x GLOB 'abc*' AND x>='abc' AND x<'acd' +** \___________/ \______/ \_____/ +** parent child1 child2 +** +** Only the parent term was in the original WHERE clause. The child1 +** and child2 terms were added by the LIKE optimization. If both of +** the virtual child terms are valid, then testing of the parent can be +** skipped. +** +** Usually the parent term is marked as TERM_CODED. But if the parent +** term was originally TERM_LIKE, then the parent gets TERM_LIKECOND instead. +** The TERM_LIKECOND marking indicates that the term should be coded inside +** a conditional such that is only evaluated on the second pass of a +** LIKE-optimization loop, when scanning BLOBs instead of strings. +*/ +static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){ + int nLoop = 0; + while( ALWAYS(pTerm!=0) + && (pTerm->wtFlags & TERM_CODED)==0 + && (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_FromJoin)) + && (pLevel->notReady & pTerm->prereqAll)==0 + ){ + if( nLoop && (pTerm->wtFlags & TERM_LIKE)!=0 ){ + pTerm->wtFlags |= TERM_LIKECOND; + }else{ + pTerm->wtFlags |= TERM_CODED; + } + if( pTerm->iParent<0 ) break; + pTerm = &pTerm->pWC->a[pTerm->iParent]; + pTerm->nChild--; + if( pTerm->nChild!=0 ) break; + nLoop++; + } +} + +/* +** Code an OP_Affinity opcode to apply the column affinity string zAff +** to the n registers starting at base. +** +** As an optimization, SQLITE_AFF_BLOB entries (which are no-ops) at the +** beginning and end of zAff are ignored. If all entries in zAff are +** SQLITE_AFF_BLOB, then no code gets generated. +** +** This routine makes its own copy of zAff so that the caller is free +** to modify zAff after this routine returns. +*/ +static void codeApplyAffinity(Parse *pParse, int base, int n, char *zAff){ + Vdbe *v = pParse->pVdbe; + if( zAff==0 ){ + assert( pParse->db->mallocFailed ); + return; + } + assert( v!=0 ); + + /* Adjust base and n to skip over SQLITE_AFF_BLOB entries at the beginning + ** and end of the affinity string. + */ + while( n>0 && zAff[0]==SQLITE_AFF_BLOB ){ + n--; + base++; + zAff++; + } + while( n>1 && zAff[n-1]==SQLITE_AFF_BLOB ){ + n--; + } + + /* Code the OP_Affinity opcode if there is anything left to do. */ + if( n>0 ){ + sqlite3VdbeAddOp4(v, OP_Affinity, base, n, 0, zAff, n); + sqlite3ExprCacheAffinityChange(pParse, base, n); + } +} + +/* +** Expression pRight, which is the RHS of a comparison operation, is +** either a vector of n elements or, if n==1, a scalar expression. +** Before the comparison operation, affinity zAff is to be applied +** to the pRight values. This function modifies characters within the +** affinity string to SQLITE_AFF_BLOB if either: +** +** * the comparison will be performed with no affinity, or +** * the affinity change in zAff is guaranteed not to change the value. +*/ +static void updateRangeAffinityStr( + Expr *pRight, /* RHS of comparison */ + int n, /* Number of vector elements in comparison */ + char *zAff /* Affinity string to modify */ +){ + int i; + for(i=0; ipExpr; + Vdbe *v = pParse->pVdbe; + int iReg; /* Register holding results */ + + assert( pLevel->pWLoop->aLTerm[iEq]==pTerm ); + assert( iTarget>0 ); + if( pX->op==TK_EQ || pX->op==TK_IS ){ + iReg = sqlite3ExprCodeTarget(pParse, pX->pRight, iTarget); + }else if( pX->op==TK_ISNULL ){ + iReg = iTarget; + sqlite3VdbeAddOp2(v, OP_Null, 0, iReg); +#ifndef SQLITE_OMIT_SUBQUERY + }else{ + int eType = IN_INDEX_NOOP; + int iTab; + struct InLoop *pIn; + WhereLoop *pLoop = pLevel->pWLoop; + int i; + int nEq = 0; + int *aiMap = 0; + + if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 + && pLoop->u.btree.pIndex!=0 + && pLoop->u.btree.pIndex->aSortOrder[iEq] + ){ + testcase( iEq==0 ); + testcase( bRev ); + bRev = !bRev; + } + assert( pX->op==TK_IN ); + iReg = iTarget; + + for(i=0; iaLTerm[i] && pLoop->aLTerm[i]->pExpr==pX ){ + disableTerm(pLevel, pTerm); + return iTarget; + } + } + for(i=iEq;inLTerm; i++){ + if( ALWAYS(pLoop->aLTerm[i]) && pLoop->aLTerm[i]->pExpr==pX ) nEq++; + } + + if( (pX->flags & EP_xIsSelect)==0 || pX->x.pSelect->pEList->nExpr==1 ){ + eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0); + }else{ + Select *pSelect = pX->x.pSelect; + sqlite3 *db = pParse->db; + u16 savedDbOptFlags = db->dbOptFlags; + ExprList *pOrigRhs = pSelect->pEList; + ExprList *pOrigLhs = pX->pLeft->x.pList; + ExprList *pRhs = 0; /* New Select.pEList for RHS */ + ExprList *pLhs = 0; /* New pX->pLeft vector */ + + for(i=iEq;inLTerm; i++){ + if( pLoop->aLTerm[i]->pExpr==pX ){ + int iField = pLoop->aLTerm[i]->iField - 1; + Expr *pNewRhs = sqlite3ExprDup(db, pOrigRhs->a[iField].pExpr, 0); + Expr *pNewLhs = sqlite3ExprDup(db, pOrigLhs->a[iField].pExpr, 0); + + pRhs = sqlite3ExprListAppend(pParse, pRhs, pNewRhs); + pLhs = sqlite3ExprListAppend(pParse, pLhs, pNewLhs); + } + } + if( !db->mallocFailed ){ + Expr *pLeft = pX->pLeft; + + if( pSelect->pOrderBy ){ + /* If the SELECT statement has an ORDER BY clause, zero the + ** iOrderByCol variables. These are set to non-zero when an + ** ORDER BY term exactly matches one of the terms of the + ** result-set. Since the result-set of the SELECT statement may + ** have been modified or reordered, these variables are no longer + ** set correctly. Since setting them is just an optimization, + ** it's easiest just to zero them here. */ + ExprList *pOrderBy = pSelect->pOrderBy; + for(i=0; inExpr; i++){ + pOrderBy->a[i].u.x.iOrderByCol = 0; + } + } + + /* Take care here not to generate a TK_VECTOR containing only a + ** single value. Since the parser never creates such a vector, some + ** of the subroutines do not handle this case. */ + if( pLhs->nExpr==1 ){ + pX->pLeft = pLhs->a[0].pExpr; + }else{ + pLeft->x.pList = pLhs; + aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int) * nEq); + testcase( aiMap==0 ); + } + pSelect->pEList = pRhs; + db->dbOptFlags |= SQLITE_QueryFlattener; + eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap); + db->dbOptFlags = savedDbOptFlags; + testcase( aiMap!=0 && aiMap[0]!=0 ); + pSelect->pEList = pOrigRhs; + pLeft->x.pList = pOrigLhs; + pX->pLeft = pLeft; + } + sqlite3ExprListDelete(pParse->db, pLhs); + sqlite3ExprListDelete(pParse->db, pRhs); + } + + if( eType==IN_INDEX_INDEX_DESC ){ + testcase( bRev ); + bRev = !bRev; + } + iTab = pX->iTable; + sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0); + VdbeCoverageIf(v, bRev); + VdbeCoverageIf(v, !bRev); + assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 ); + + pLoop->wsFlags |= WHERE_IN_ABLE; + if( pLevel->u.in.nIn==0 ){ + pLevel->addrNxt = sqlite3VdbeMakeLabel(v); + } + + i = pLevel->u.in.nIn; + pLevel->u.in.nIn += nEq; + pLevel->u.in.aInLoop = + sqlite3DbReallocOrFree(pParse->db, pLevel->u.in.aInLoop, + sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn); + pIn = pLevel->u.in.aInLoop; + if( pIn ){ + int iMap = 0; /* Index in aiMap[] */ + pIn += i; + for(i=iEq;inLTerm; i++){ + if( pLoop->aLTerm[i]->pExpr==pX ){ + int iOut = iReg + i - iEq; + if( eType==IN_INDEX_ROWID ){ + testcase( nEq>1 ); /* Happens with a UNIQUE index on ROWID */ + pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iOut); + }else{ + int iCol = aiMap ? aiMap[iMap++] : 0; + pIn->addrInTop = sqlite3VdbeAddOp3(v,OP_Column,iTab, iCol, iOut); + } + sqlite3VdbeAddOp1(v, OP_IsNull, iOut); VdbeCoverage(v); + if( i==iEq ){ + pIn->iCur = iTab; + pIn->eEndLoopOp = bRev ? OP_PrevIfOpen : OP_NextIfOpen; + }else{ + pIn->eEndLoopOp = OP_Noop; + } + pIn++; + } + } + }else{ + pLevel->u.in.nIn = 0; + } + sqlite3DbFree(pParse->db, aiMap); +#endif + } + disableTerm(pLevel, pTerm); + return iReg; +} + +/* +** Generate code that will evaluate all == and IN constraints for an +** index scan. +** +** For example, consider table t1(a,b,c,d,e,f) with index i1(a,b,c). +** Suppose the WHERE clause is this: a==5 AND b IN (1,2,3) AND c>5 AND c<10 +** The index has as many as three equality constraints, but in this +** example, the third "c" value is an inequality. So only two +** constraints are coded. This routine will generate code to evaluate +** a==5 and b IN (1,2,3). The current values for a and b will be stored +** in consecutive registers and the index of the first register is returned. +** +** In the example above nEq==2. But this subroutine works for any value +** of nEq including 0. If nEq==0, this routine is nearly a no-op. +** The only thing it does is allocate the pLevel->iMem memory cell and +** compute the affinity string. +** +** The nExtraReg parameter is 0 or 1. It is 0 if all WHERE clause constraints +** are == or IN and are covered by the nEq. nExtraReg is 1 if there is +** an inequality constraint (such as the "c>=5 AND c<10" in the example) that +** occurs after the nEq quality constraints. +** +** This routine allocates a range of nEq+nExtraReg memory cells and returns +** the index of the first memory cell in that range. The code that +** calls this routine will use that memory range to store keys for +** start and termination conditions of the loop. +** key value of the loop. If one or more IN operators appear, then +** this routine allocates an additional nEq memory cells for internal +** use. +** +** Before returning, *pzAff is set to point to a buffer containing a +** copy of the column affinity string of the index allocated using +** sqlite3DbMalloc(). Except, entries in the copy of the string associated +** with equality constraints that use BLOB or NONE affinity are set to +** SQLITE_AFF_BLOB. This is to deal with SQL such as the following: +** +** CREATE TABLE t1(a TEXT PRIMARY KEY, b); +** SELECT ... FROM t1 AS t2, t1 WHERE t1.a = t2.b; +** +** In the example above, the index on t1(a) has TEXT affinity. But since +** the right hand side of the equality constraint (t2.b) has BLOB/NONE affinity, +** no conversion should be attempted before using a t2.b value as part of +** a key to search the index. Hence the first byte in the returned affinity +** string in this example would be set to SQLITE_AFF_BLOB. +*/ +static int codeAllEqualityTerms( + Parse *pParse, /* Parsing context */ + WhereLevel *pLevel, /* Which nested loop of the FROM we are coding */ + int bRev, /* Reverse the order of IN operators */ + int nExtraReg, /* Number of extra registers to allocate */ + char **pzAff /* OUT: Set to point to affinity string */ +){ + u16 nEq; /* The number of == or IN constraints to code */ + u16 nSkip; /* Number of left-most columns to skip */ + Vdbe *v = pParse->pVdbe; /* The vm under construction */ + Index *pIdx; /* The index being used for this loop */ + WhereTerm *pTerm; /* A single constraint term */ + WhereLoop *pLoop; /* The WhereLoop object */ + int j; /* Loop counter */ + int regBase; /* Base register */ + int nReg; /* Number of registers to allocate */ + char *zAff; /* Affinity string to return */ + + /* This module is only called on query plans that use an index. */ + pLoop = pLevel->pWLoop; + assert( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 ); + nEq = pLoop->u.btree.nEq; + nSkip = pLoop->nSkip; + pIdx = pLoop->u.btree.pIndex; + assert( pIdx!=0 ); + + /* Figure out how many memory cells we will need then allocate them. + */ + regBase = pParse->nMem + 1; + nReg = pLoop->u.btree.nEq + nExtraReg; + pParse->nMem += nReg; + + zAff = sqlite3DbStrDup(pParse->db,sqlite3IndexAffinityStr(pParse->db,pIdx)); + assert( zAff!=0 || pParse->db->mallocFailed ); + + if( nSkip ){ + int iIdxCur = pLevel->iIdxCur; + sqlite3VdbeAddOp1(v, (bRev?OP_Last:OP_Rewind), iIdxCur); + VdbeCoverageIf(v, bRev==0); + VdbeCoverageIf(v, bRev!=0); + VdbeComment((v, "begin skip-scan on %s", pIdx->zName)); + j = sqlite3VdbeAddOp0(v, OP_Goto); + pLevel->addrSkip = sqlite3VdbeAddOp4Int(v, (bRev?OP_SeekLT:OP_SeekGT), + iIdxCur, 0, regBase, nSkip); + VdbeCoverageIf(v, bRev==0); + VdbeCoverageIf(v, bRev!=0); + sqlite3VdbeJumpHere(v, j); + for(j=0; jaiColumn[j]==XN_EXPR ); + VdbeComment((v, "%s", explainIndexColumnName(pIdx, j))); + } + } + + /* Evaluate the equality constraints + */ + assert( zAff==0 || (int)strlen(zAff)>=nEq ); + for(j=nSkip; jaLTerm[j]; + assert( pTerm!=0 ); + /* The following testcase is true for indices with redundant columns. + ** Ex: CREATE INDEX i1 ON t1(a,b,a); SELECT * FROM t1 WHERE a=0 AND b=0; */ + testcase( (pTerm->wtFlags & TERM_CODED)!=0 ); + testcase( pTerm->wtFlags & TERM_VIRTUAL ); + r1 = codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, regBase+j); + if( r1!=regBase+j ){ + if( nReg==1 ){ + sqlite3ReleaseTempReg(pParse, regBase); + regBase = r1; + }else{ + sqlite3VdbeAddOp2(v, OP_SCopy, r1, regBase+j); + } + } + if( pTerm->eOperator & WO_IN ){ + if( pTerm->pExpr->flags & EP_xIsSelect ){ + /* No affinity ever needs to be (or should be) applied to a value + ** from the RHS of an "? IN (SELECT ...)" expression. The + ** sqlite3FindInIndex() routine has already ensured that the + ** affinity of the comparison has been applied to the value. */ + if( zAff ) zAff[j] = SQLITE_AFF_BLOB; + } + }else if( (pTerm->eOperator & WO_ISNULL)==0 ){ + Expr *pRight = pTerm->pExpr->pRight; + if( (pTerm->wtFlags & TERM_IS)==0 && sqlite3ExprCanBeNull(pRight) ){ + sqlite3VdbeAddOp2(v, OP_IsNull, regBase+j, pLevel->addrBrk); + VdbeCoverage(v); + } + if( zAff ){ + if( sqlite3CompareAffinity(pRight, zAff[j])==SQLITE_AFF_BLOB ){ + zAff[j] = SQLITE_AFF_BLOB; + } + if( sqlite3ExprNeedsNoAffinityChange(pRight, zAff[j]) ){ + zAff[j] = SQLITE_AFF_BLOB; + } + } + } + } + *pzAff = zAff; + return regBase; +} + +#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS +/* +** If the most recently coded instruction is a constant range constraint +** (a string literal) that originated from the LIKE optimization, then +** set P3 and P5 on the OP_String opcode so that the string will be cast +** to a BLOB at appropriate times. +** +** The LIKE optimization trys to evaluate "x LIKE 'abc%'" as a range +** expression: "x>='ABC' AND x<'abd'". But this requires that the range +** scan loop run twice, once for strings and a second time for BLOBs. +** The OP_String opcodes on the second pass convert the upper and lower +** bound string constants to blobs. This routine makes the necessary changes +** to the OP_String opcodes for that to happen. +** +** Except, of course, if SQLITE_LIKE_DOESNT_MATCH_BLOBS is defined, then +** only the one pass through the string space is required, so this routine +** becomes a no-op. +*/ +static void whereLikeOptimizationStringFixup( + Vdbe *v, /* prepared statement under construction */ + WhereLevel *pLevel, /* The loop that contains the LIKE operator */ + WhereTerm *pTerm /* The upper or lower bound just coded */ +){ + if( pTerm->wtFlags & TERM_LIKEOPT ){ + VdbeOp *pOp; + assert( pLevel->iLikeRepCntr>0 ); + pOp = sqlite3VdbeGetOp(v, -1); + assert( pOp!=0 ); + assert( pOp->opcode==OP_String8 + || pTerm->pWC->pWInfo->pParse->db->mallocFailed ); + pOp->p3 = (int)(pLevel->iLikeRepCntr>>1); /* Register holding counter */ + pOp->p5 = (u8)(pLevel->iLikeRepCntr&1); /* ASC or DESC */ + } +} +#else +# define whereLikeOptimizationStringFixup(A,B,C) +#endif + +#ifdef SQLITE_ENABLE_CURSOR_HINTS +/* +** Information is passed from codeCursorHint() down to individual nodes of +** the expression tree (by sqlite3WalkExpr()) using an instance of this +** structure. +*/ +struct CCurHint { + int iTabCur; /* Cursor for the main table */ + int iIdxCur; /* Cursor for the index, if pIdx!=0. Unused otherwise */ + Index *pIdx; /* The index used to access the table */ +}; + +/* +** This function is called for every node of an expression that is a candidate +** for a cursor hint on an index cursor. For TK_COLUMN nodes that reference +** the table CCurHint.iTabCur, verify that the same column can be +** accessed through the index. If it cannot, then set pWalker->eCode to 1. +*/ +static int codeCursorHintCheckExpr(Walker *pWalker, Expr *pExpr){ + struct CCurHint *pHint = pWalker->u.pCCurHint; + assert( pHint->pIdx!=0 ); + if( pExpr->op==TK_COLUMN + && pExpr->iTable==pHint->iTabCur + && sqlite3ColumnOfIndex(pHint->pIdx, pExpr->iColumn)<0 + ){ + pWalker->eCode = 1; + } + return WRC_Continue; +} + +/* +** Test whether or not expression pExpr, which was part of a WHERE clause, +** should be included in the cursor-hint for a table that is on the rhs +** of a LEFT JOIN. Set Walker.eCode to non-zero before returning if the +** expression is not suitable. +** +** An expression is unsuitable if it might evaluate to non NULL even if +** a TK_COLUMN node that does affect the value of the expression is set +** to NULL. For example: +** +** col IS NULL +** col IS NOT NULL +** coalesce(col, 1) +** CASE WHEN col THEN 0 ELSE 1 END +*/ +static int codeCursorHintIsOrFunction(Walker *pWalker, Expr *pExpr){ + if( pExpr->op==TK_IS + || pExpr->op==TK_ISNULL || pExpr->op==TK_ISNOT + || pExpr->op==TK_NOTNULL || pExpr->op==TK_CASE + ){ + pWalker->eCode = 1; + }else if( pExpr->op==TK_FUNCTION ){ + int d1; + char d2[3]; + if( 0==sqlite3IsLikeFunction(pWalker->pParse->db, pExpr, &d1, d2) ){ + pWalker->eCode = 1; + } + } + + return WRC_Continue; +} + + +/* +** This function is called on every node of an expression tree used as an +** argument to the OP_CursorHint instruction. If the node is a TK_COLUMN +** that accesses any table other than the one identified by +** CCurHint.iTabCur, then do the following: +** +** 1) allocate a register and code an OP_Column instruction to read +** the specified column into the new register, and +** +** 2) transform the expression node to a TK_REGISTER node that reads +** from the newly populated register. +** +** Also, if the node is a TK_COLUMN that does access the table idenified +** by pCCurHint.iTabCur, and an index is being used (which we will +** know because CCurHint.pIdx!=0) then transform the TK_COLUMN into +** an access of the index rather than the original table. +*/ +static int codeCursorHintFixExpr(Walker *pWalker, Expr *pExpr){ + int rc = WRC_Continue; + struct CCurHint *pHint = pWalker->u.pCCurHint; + if( pExpr->op==TK_COLUMN ){ + if( pExpr->iTable!=pHint->iTabCur ){ + Vdbe *v = pWalker->pParse->pVdbe; + int reg = ++pWalker->pParse->nMem; /* Register for column value */ + sqlite3ExprCodeGetColumnOfTable( + v, pExpr->pTab, pExpr->iTable, pExpr->iColumn, reg + ); + pExpr->op = TK_REGISTER; + pExpr->iTable = reg; + }else if( pHint->pIdx!=0 ){ + pExpr->iTable = pHint->iIdxCur; + pExpr->iColumn = sqlite3ColumnOfIndex(pHint->pIdx, pExpr->iColumn); + assert( pExpr->iColumn>=0 ); + } + }else if( pExpr->op==TK_AGG_FUNCTION ){ + /* An aggregate function in the WHERE clause of a query means this must + ** be a correlated sub-query, and expression pExpr is an aggregate from + ** the parent context. Do not walk the function arguments in this case. + ** + ** todo: It should be possible to replace this node with a TK_REGISTER + ** expression, as the result of the expression must be stored in a + ** register at this point. The same holds for TK_AGG_COLUMN nodes. */ + rc = WRC_Prune; + } + return rc; +} + +/* +** Insert an OP_CursorHint instruction if it is appropriate to do so. +*/ +static void codeCursorHint( + struct SrcList_item *pTabItem, /* FROM clause item */ + WhereInfo *pWInfo, /* The where clause */ + WhereLevel *pLevel, /* Which loop to provide hints for */ + WhereTerm *pEndRange /* Hint this end-of-scan boundary term if not NULL */ +){ + Parse *pParse = pWInfo->pParse; + sqlite3 *db = pParse->db; + Vdbe *v = pParse->pVdbe; + Expr *pExpr = 0; + WhereLoop *pLoop = pLevel->pWLoop; + int iCur; + WhereClause *pWC; + WhereTerm *pTerm; + int i, j; + struct CCurHint sHint; + Walker sWalker; + + if( OptimizationDisabled(db, SQLITE_CursorHints) ) return; + iCur = pLevel->iTabCur; + assert( iCur==pWInfo->pTabList->a[pLevel->iFrom].iCursor ); + sHint.iTabCur = iCur; + sHint.iIdxCur = pLevel->iIdxCur; + sHint.pIdx = pLoop->u.btree.pIndex; + memset(&sWalker, 0, sizeof(sWalker)); + sWalker.pParse = pParse; + sWalker.u.pCCurHint = &sHint; + pWC = &pWInfo->sWC; + for(i=0; inTerm; i++){ + pTerm = &pWC->a[i]; + if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; + if( pTerm->prereqAll & pLevel->notReady ) continue; + + /* Any terms specified as part of the ON(...) clause for any LEFT + ** JOIN for which the current table is not the rhs are omitted + ** from the cursor-hint. + ** + ** If this table is the rhs of a LEFT JOIN, "IS" or "IS NULL" terms + ** that were specified as part of the WHERE clause must be excluded. + ** This is to address the following: + ** + ** SELECT ... t1 LEFT JOIN t2 ON (t1.a=t2.b) WHERE t2.c IS NULL; + ** + ** Say there is a single row in t2 that matches (t1.a=t2.b), but its + ** t2.c values is not NULL. If the (t2.c IS NULL) constraint is + ** pushed down to the cursor, this row is filtered out, causing + ** SQLite to synthesize a row of NULL values. Which does match the + ** WHERE clause, and so the query returns a row. Which is incorrect. + ** + ** For the same reason, WHERE terms such as: + ** + ** WHERE 1 = (t2.c IS NULL) + ** + ** are also excluded. See codeCursorHintIsOrFunction() for details. + */ + if( pTabItem->fg.jointype & JT_LEFT ){ + Expr *pExpr = pTerm->pExpr; + if( !ExprHasProperty(pExpr, EP_FromJoin) + || pExpr->iRightJoinTable!=pTabItem->iCursor + ){ + sWalker.eCode = 0; + sWalker.xExprCallback = codeCursorHintIsOrFunction; + sqlite3WalkExpr(&sWalker, pTerm->pExpr); + if( sWalker.eCode ) continue; + } + }else{ + if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) continue; + } + + /* All terms in pWLoop->aLTerm[] except pEndRange are used to initialize + ** the cursor. These terms are not needed as hints for a pure range + ** scan (that has no == terms) so omit them. */ + if( pLoop->u.btree.nEq==0 && pTerm!=pEndRange ){ + for(j=0; jnLTerm && pLoop->aLTerm[j]!=pTerm; j++){} + if( jnLTerm ) continue; + } + + /* No subqueries or non-deterministic functions allowed */ + if( sqlite3ExprContainsSubquery(pTerm->pExpr) ) continue; + + /* For an index scan, make sure referenced columns are actually in + ** the index. */ + if( sHint.pIdx!=0 ){ + sWalker.eCode = 0; + sWalker.xExprCallback = codeCursorHintCheckExpr; + sqlite3WalkExpr(&sWalker, pTerm->pExpr); + if( sWalker.eCode ) continue; + } + + /* If we survive all prior tests, that means this term is worth hinting */ + pExpr = sqlite3ExprAnd(db, pExpr, sqlite3ExprDup(db, pTerm->pExpr, 0)); + } + if( pExpr!=0 ){ + sWalker.xExprCallback = codeCursorHintFixExpr; + sqlite3WalkExpr(&sWalker, pExpr); + sqlite3VdbeAddOp4(v, OP_CursorHint, + (sHint.pIdx ? sHint.iIdxCur : sHint.iTabCur), 0, 0, + (const char*)pExpr, P4_EXPR); + } +} +#else +# define codeCursorHint(A,B,C,D) /* No-op */ +#endif /* SQLITE_ENABLE_CURSOR_HINTS */ + +/* +** Cursor iCur is open on an intkey b-tree (a table). Register iRowid contains +** a rowid value just read from cursor iIdxCur, open on index pIdx. This +** function generates code to do a deferred seek of cursor iCur to the +** rowid stored in register iRowid. +** +** Normally, this is just: +** +** OP_Seek $iCur $iRowid +** +** However, if the scan currently being coded is a branch of an OR-loop and +** the statement currently being coded is a SELECT, then P3 of the OP_Seek +** is set to iIdxCur and P4 is set to point to an array of integers +** containing one entry for each column of the table cursor iCur is open +** on. For each table column, if the column is the i'th column of the +** index, then the corresponding array entry is set to (i+1). If the column +** does not appear in the index at all, the array entry is set to 0. +*/ +static void codeDeferredSeek( + WhereInfo *pWInfo, /* Where clause context */ + Index *pIdx, /* Index scan is using */ + int iCur, /* Cursor for IPK b-tree */ + int iIdxCur /* Index cursor */ +){ + Parse *pParse = pWInfo->pParse; /* Parse context */ + Vdbe *v = pParse->pVdbe; /* Vdbe to generate code within */ + + assert( iIdxCur>0 ); + assert( pIdx->aiColumn[pIdx->nColumn-1]==-1 ); + + sqlite3VdbeAddOp3(v, OP_Seek, iIdxCur, 0, iCur); + if( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE) + && DbMaskAllZero(sqlite3ParseToplevel(pParse)->writeMask) + ){ + int i; + Table *pTab = pIdx->pTable; + int *ai = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*(pTab->nCol+1)); + if( ai ){ + ai[0] = pTab->nCol; + for(i=0; inColumn-1; i++){ + assert( pIdx->aiColumn[i]nCol ); + if( pIdx->aiColumn[i]>=0 ) ai[pIdx->aiColumn[i]+1] = i+1; + } + sqlite3VdbeChangeP4(v, -1, (char*)ai, P4_INTARRAY); + } + } +} + +/* +** If the expression passed as the second argument is a vector, generate +** code to write the first nReg elements of the vector into an array +** of registers starting with iReg. +** +** If the expression is not a vector, then nReg must be passed 1. In +** this case, generate code to evaluate the expression and leave the +** result in register iReg. +*/ +static void codeExprOrVector(Parse *pParse, Expr *p, int iReg, int nReg){ + assert( nReg>0 ); + if( sqlite3ExprIsVector(p) ){ +#ifndef SQLITE_OMIT_SUBQUERY + if( (p->flags & EP_xIsSelect) ){ + Vdbe *v = pParse->pVdbe; + int iSelect = sqlite3CodeSubselect(pParse, p, 0, 0); + sqlite3VdbeAddOp3(v, OP_Copy, iSelect, iReg, nReg-1); + }else +#endif + { + int i; + ExprList *pList = p->x.pList; + assert( nReg<=pList->nExpr ); + for(i=0; ia[i].pExpr, iReg+i); + } + } + }else{ + assert( nReg==1 ); + sqlite3ExprCode(pParse, p, iReg); + } +} + +/* +** Generate code for the start of the iLevel-th loop in the WHERE clause +** implementation described by pWInfo. +*/ +SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( + WhereInfo *pWInfo, /* Complete information about the WHERE clause */ + int iLevel, /* Which level of pWInfo->a[] should be coded */ + Bitmask notReady /* Which tables are currently available */ +){ + int j, k; /* Loop counters */ + int iCur; /* The VDBE cursor for the table */ + int addrNxt; /* Where to jump to continue with the next IN case */ + int omitTable; /* True if we use the index only */ + int bRev; /* True if we need to scan in reverse order */ + WhereLevel *pLevel; /* The where level to be coded */ + WhereLoop *pLoop; /* The WhereLoop object being coded */ + WhereClause *pWC; /* Decomposition of the entire WHERE clause */ + WhereTerm *pTerm; /* A WHERE clause term */ + Parse *pParse; /* Parsing context */ + sqlite3 *db; /* Database connection */ + Vdbe *v; /* The prepared stmt under constructions */ + struct SrcList_item *pTabItem; /* FROM clause term being coded */ + int addrBrk; /* Jump here to break out of the loop */ + int addrCont; /* Jump here to continue with next cycle */ + int iRowidReg = 0; /* Rowid is stored in this register, if not zero */ + int iReleaseReg = 0; /* Temp register to free before returning */ + + pParse = pWInfo->pParse; + v = pParse->pVdbe; + pWC = &pWInfo->sWC; + db = pParse->db; + pLevel = &pWInfo->a[iLevel]; + pLoop = pLevel->pWLoop; + pTabItem = &pWInfo->pTabList->a[pLevel->iFrom]; + iCur = pTabItem->iCursor; + pLevel->notReady = notReady & ~sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur); + bRev = (pWInfo->revMask>>iLevel)&1; + omitTable = (pLoop->wsFlags & WHERE_IDX_ONLY)!=0 + && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0; + VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName)); + + /* Create labels for the "break" and "continue" instructions + ** for the current loop. Jump to addrBrk to break out of a loop. + ** Jump to cont to go immediately to the next iteration of the + ** loop. + ** + ** When there is an IN operator, we also have a "addrNxt" label that + ** means to continue with the next IN value combination. When + ** there are no IN operators in the constraints, the "addrNxt" label + ** is the same as "addrBrk". + */ + addrBrk = pLevel->addrBrk = pLevel->addrNxt = sqlite3VdbeMakeLabel(v); + addrCont = pLevel->addrCont = sqlite3VdbeMakeLabel(v); + + /* If this is the right table of a LEFT OUTER JOIN, allocate and + ** initialize a memory cell that records if this table matches any + ** row of the left table of the join. + */ + if( pLevel->iFrom>0 && (pTabItem[0].fg.jointype & JT_LEFT)!=0 ){ + pLevel->iLeftJoin = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Integer, 0, pLevel->iLeftJoin); + VdbeComment((v, "init LEFT JOIN no-match flag")); + } + + /* Special case of a FROM clause subquery implemented as a co-routine */ + if( pTabItem->fg.viaCoroutine ){ + int regYield = pTabItem->regReturn; + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub); + pLevel->p2 = sqlite3VdbeAddOp2(v, OP_Yield, regYield, addrBrk); + VdbeCoverage(v); + VdbeComment((v, "next row of \"%s\"", pTabItem->pTab->zName)); + pLevel->op = OP_Goto; + }else + +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ){ + /* Case 1: The table is a virtual-table. Use the VFilter and VNext + ** to access the data. + */ + int iReg; /* P3 Value for OP_VFilter */ + int addrNotFound; + int nConstraint = pLoop->nLTerm; + int iIn; /* Counter for IN constraints */ + + sqlite3ExprCachePush(pParse); + iReg = sqlite3GetTempRange(pParse, nConstraint+2); + addrNotFound = pLevel->addrBrk; + for(j=0; jaLTerm[j]; + if( NEVER(pTerm==0) ) continue; + if( pTerm->eOperator & WO_IN ){ + codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, iTarget); + addrNotFound = pLevel->addrNxt; + }else{ + Expr *pRight = pTerm->pExpr->pRight; + codeExprOrVector(pParse, pRight, iTarget, 1); + } + } + sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg); + sqlite3VdbeAddOp2(v, OP_Integer, nConstraint, iReg+1); + sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrNotFound, iReg, + pLoop->u.vtab.idxStr, + pLoop->u.vtab.needFree ? P4_DYNAMIC : P4_STATIC); + VdbeCoverage(v); + pLoop->u.vtab.needFree = 0; + pLevel->p1 = iCur; + pLevel->op = pWInfo->eOnePass ? OP_Noop : OP_VNext; + pLevel->p2 = sqlite3VdbeCurrentAddr(v); + iIn = pLevel->u.in.nIn; + for(j=nConstraint-1; j>=0; j--){ + pTerm = pLoop->aLTerm[j]; + if( j<16 && (pLoop->u.vtab.omitMask>>j)&1 ){ + disableTerm(pLevel, pTerm); + }else if( (pTerm->eOperator & WO_IN)!=0 ){ + Expr *pCompare; /* The comparison operator */ + Expr *pRight; /* RHS of the comparison */ + VdbeOp *pOp; /* Opcode to access the value of the IN constraint */ + + /* Reload the constraint value into reg[iReg+j+2]. The same value + ** was loaded into the same register prior to the OP_VFilter, but + ** the xFilter implementation might have changed the datatype or + ** encoding of the value in the register, so it *must* be reloaded. */ + assert( pLevel->u.in.aInLoop!=0 || db->mallocFailed ); + if( !db->mallocFailed ){ + assert( iIn>0 ); + pOp = sqlite3VdbeGetOp(v, pLevel->u.in.aInLoop[--iIn].addrInTop); + assert( pOp->opcode==OP_Column || pOp->opcode==OP_Rowid ); + assert( pOp->opcode!=OP_Column || pOp->p3==iReg+j+2 ); + assert( pOp->opcode!=OP_Rowid || pOp->p2==iReg+j+2 ); + testcase( pOp->opcode==OP_Rowid ); + sqlite3VdbeAddOp3(v, pOp->opcode, pOp->p1, pOp->p2, pOp->p3); + } + + /* Generate code that will continue to the next row if + ** the IN constraint is not satisfied */ + pCompare = sqlite3PExpr(pParse, TK_EQ, 0, 0); + assert( pCompare!=0 || db->mallocFailed ); + if( pCompare ){ + pCompare->pLeft = pTerm->pExpr->pLeft; + pCompare->pRight = pRight = sqlite3Expr(db, TK_REGISTER, 0); + if( pRight ){ + pRight->iTable = iReg+j+2; + sqlite3ExprIfFalse(pParse, pCompare, pLevel->addrCont, 0); + } + pCompare->pLeft = 0; + sqlite3ExprDelete(db, pCompare); + } + } + } + /* These registers need to be preserved in case there is an IN operator + ** loop. So we could deallocate the registers here (and potentially + ** reuse them later) if (pLoop->wsFlags & WHERE_IN_ABLE)==0. But it seems + ** simpler and safer to simply not reuse the registers. + ** + ** sqlite3ReleaseTempRange(pParse, iReg, nConstraint+2); + */ + sqlite3ExprCachePop(pParse); + }else +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + + if( (pLoop->wsFlags & WHERE_IPK)!=0 + && (pLoop->wsFlags & (WHERE_COLUMN_IN|WHERE_COLUMN_EQ))!=0 + ){ + /* Case 2: We can directly reference a single row using an + ** equality comparison against the ROWID field. Or + ** we reference multiple rows using a "rowid IN (...)" + ** construct. + */ + assert( pLoop->u.btree.nEq==1 ); + pTerm = pLoop->aLTerm[0]; + assert( pTerm!=0 ); + assert( pTerm->pExpr!=0 ); + assert( omitTable==0 ); + testcase( pTerm->wtFlags & TERM_VIRTUAL ); + iReleaseReg = ++pParse->nMem; + iRowidReg = codeEqualityTerm(pParse, pTerm, pLevel, 0, bRev, iReleaseReg); + if( iRowidReg!=iReleaseReg ) sqlite3ReleaseTempReg(pParse, iReleaseReg); + addrNxt = pLevel->addrNxt; + sqlite3VdbeAddOp3(v, OP_SeekRowid, iCur, addrNxt, iRowidReg); + VdbeCoverage(v); + sqlite3ExprCacheAffinityChange(pParse, iRowidReg, 1); + sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); + VdbeComment((v, "pk")); + pLevel->op = OP_Noop; + }else if( (pLoop->wsFlags & WHERE_IPK)!=0 + && (pLoop->wsFlags & WHERE_COLUMN_RANGE)!=0 + ){ + /* Case 3: We have an inequality comparison against the ROWID field. + */ + int testOp = OP_Noop; + int start; + int memEndValue = 0; + WhereTerm *pStart, *pEnd; + + assert( omitTable==0 ); + j = 0; + pStart = pEnd = 0; + if( pLoop->wsFlags & WHERE_BTM_LIMIT ) pStart = pLoop->aLTerm[j++]; + if( pLoop->wsFlags & WHERE_TOP_LIMIT ) pEnd = pLoop->aLTerm[j++]; + assert( pStart!=0 || pEnd!=0 ); + if( bRev ){ + pTerm = pStart; + pStart = pEnd; + pEnd = pTerm; + } + codeCursorHint(pTabItem, pWInfo, pLevel, pEnd); + if( pStart ){ + Expr *pX; /* The expression that defines the start bound */ + int r1, rTemp; /* Registers for holding the start boundary */ + int op; /* Cursor seek operation */ + + /* The following constant maps TK_xx codes into corresponding + ** seek opcodes. It depends on a particular ordering of TK_xx + */ + const u8 aMoveOp[] = { + /* TK_GT */ OP_SeekGT, + /* TK_LE */ OP_SeekLE, + /* TK_LT */ OP_SeekLT, + /* TK_GE */ OP_SeekGE + }; + assert( TK_LE==TK_GT+1 ); /* Make sure the ordering.. */ + assert( TK_LT==TK_GT+2 ); /* ... of the TK_xx values... */ + assert( TK_GE==TK_GT+3 ); /* ... is correcct. */ + + assert( (pStart->wtFlags & TERM_VNULL)==0 ); + testcase( pStart->wtFlags & TERM_VIRTUAL ); + pX = pStart->pExpr; + assert( pX!=0 ); + testcase( pStart->leftCursor!=iCur ); /* transitive constraints */ + if( sqlite3ExprIsVector(pX->pRight) ){ + r1 = rTemp = sqlite3GetTempReg(pParse); + codeExprOrVector(pParse, pX->pRight, r1, 1); + op = aMoveOp[(pX->op - TK_GT) | 0x0001]; + }else{ + r1 = sqlite3ExprCodeTemp(pParse, pX->pRight, &rTemp); + disableTerm(pLevel, pStart); + op = aMoveOp[(pX->op - TK_GT)]; + } + sqlite3VdbeAddOp3(v, op, iCur, addrBrk, r1); + VdbeComment((v, "pk")); + VdbeCoverageIf(v, pX->op==TK_GT); + VdbeCoverageIf(v, pX->op==TK_LE); + VdbeCoverageIf(v, pX->op==TK_LT); + VdbeCoverageIf(v, pX->op==TK_GE); + sqlite3ExprCacheAffinityChange(pParse, r1, 1); + sqlite3ReleaseTempReg(pParse, rTemp); + }else{ + sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iCur, addrBrk); + VdbeCoverageIf(v, bRev==0); + VdbeCoverageIf(v, bRev!=0); + } + if( pEnd ){ + Expr *pX; + pX = pEnd->pExpr; + assert( pX!=0 ); + assert( (pEnd->wtFlags & TERM_VNULL)==0 ); + testcase( pEnd->leftCursor!=iCur ); /* Transitive constraints */ + testcase( pEnd->wtFlags & TERM_VIRTUAL ); + memEndValue = ++pParse->nMem; + codeExprOrVector(pParse, pX->pRight, memEndValue, 1); + if( 0==sqlite3ExprIsVector(pX->pRight) + && (pX->op==TK_LT || pX->op==TK_GT) + ){ + testOp = bRev ? OP_Le : OP_Ge; + }else{ + testOp = bRev ? OP_Lt : OP_Gt; + } + if( 0==sqlite3ExprIsVector(pX->pRight) ){ + disableTerm(pLevel, pEnd); + } + } + start = sqlite3VdbeCurrentAddr(v); + pLevel->op = bRev ? OP_Prev : OP_Next; + pLevel->p1 = iCur; + pLevel->p2 = start; + assert( pLevel->p5==0 ); + if( testOp!=OP_Noop ){ + iRowidReg = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Rowid, iCur, iRowidReg); + sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); + sqlite3VdbeAddOp3(v, testOp, memEndValue, addrBrk, iRowidReg); + VdbeCoverageIf(v, testOp==OP_Le); + VdbeCoverageIf(v, testOp==OP_Lt); + VdbeCoverageIf(v, testOp==OP_Ge); + VdbeCoverageIf(v, testOp==OP_Gt); + sqlite3VdbeChangeP5(v, SQLITE_AFF_NUMERIC | SQLITE_JUMPIFNULL); + } + }else if( pLoop->wsFlags & WHERE_INDEXED ){ + /* Case 4: A scan using an index. + ** + ** The WHERE clause may contain zero or more equality + ** terms ("==" or "IN" operators) that refer to the N + ** left-most columns of the index. It may also contain + ** inequality constraints (>, <, >= or <=) on the indexed + ** column that immediately follows the N equalities. Only + ** the right-most column can be an inequality - the rest must + ** use the "==" and "IN" operators. For example, if the + ** index is on (x,y,z), then the following clauses are all + ** optimized: + ** + ** x=5 + ** x=5 AND y=10 + ** x=5 AND y<10 + ** x=5 AND y>5 AND y<10 + ** x=5 AND y=5 AND z<=10 + ** + ** The z<10 term of the following cannot be used, only + ** the x=5 term: + ** + ** x=5 AND z<10 + ** + ** N may be zero if there are inequality constraints. + ** If there are no inequality constraints, then N is at + ** least one. + ** + ** This case is also used when there are no WHERE clause + ** constraints but an index is selected anyway, in order + ** to force the output order to conform to an ORDER BY. + */ + static const u8 aStartOp[] = { + 0, + 0, + OP_Rewind, /* 2: (!start_constraints && startEq && !bRev) */ + OP_Last, /* 3: (!start_constraints && startEq && bRev) */ + OP_SeekGT, /* 4: (start_constraints && !startEq && !bRev) */ + OP_SeekLT, /* 5: (start_constraints && !startEq && bRev) */ + OP_SeekGE, /* 6: (start_constraints && startEq && !bRev) */ + OP_SeekLE /* 7: (start_constraints && startEq && bRev) */ + }; + static const u8 aEndOp[] = { + OP_IdxGE, /* 0: (end_constraints && !bRev && !endEq) */ + OP_IdxGT, /* 1: (end_constraints && !bRev && endEq) */ + OP_IdxLE, /* 2: (end_constraints && bRev && !endEq) */ + OP_IdxLT, /* 3: (end_constraints && bRev && endEq) */ + }; + u16 nEq = pLoop->u.btree.nEq; /* Number of == or IN terms */ + u16 nBtm = pLoop->u.btree.nBtm; /* Length of BTM vector */ + u16 nTop = pLoop->u.btree.nTop; /* Length of TOP vector */ + int regBase; /* Base register holding constraint values */ + WhereTerm *pRangeStart = 0; /* Inequality constraint at range start */ + WhereTerm *pRangeEnd = 0; /* Inequality constraint at range end */ + int startEq; /* True if range start uses ==, >= or <= */ + int endEq; /* True if range end uses ==, >= or <= */ + int start_constraints; /* Start of range is constrained */ + int nConstraint; /* Number of constraint terms */ + Index *pIdx; /* The index we will be using */ + int iIdxCur; /* The VDBE cursor for the index */ + int nExtraReg = 0; /* Number of extra registers needed */ + int op; /* Instruction opcode */ + char *zStartAff; /* Affinity for start of range constraint */ + char *zEndAff = 0; /* Affinity for end of range constraint */ + u8 bSeekPastNull = 0; /* True to seek past initial nulls */ + u8 bStopAtNull = 0; /* Add condition to terminate at NULLs */ + + pIdx = pLoop->u.btree.pIndex; + iIdxCur = pLevel->iIdxCur; + assert( nEq>=pLoop->nSkip ); + + /* If this loop satisfies a sort order (pOrderBy) request that + ** was passed to this function to implement a "SELECT min(x) ..." + ** query, then the caller will only allow the loop to run for + ** a single iteration. This means that the first row returned + ** should not have a NULL value stored in 'x'. If column 'x' is + ** the first one after the nEq equality constraints in the index, + ** this requires some special handling. + */ + assert( pWInfo->pOrderBy==0 + || pWInfo->pOrderBy->nExpr==1 + || (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)==0 ); + if( (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)!=0 + && pWInfo->nOBSat>0 + && (pIdx->nKeyCol>nEq) + ){ + assert( pLoop->nSkip==0 ); + bSeekPastNull = 1; + nExtraReg = 1; + } + + /* Find any inequality constraint terms for the start and end + ** of the range. + */ + j = nEq; + if( pLoop->wsFlags & WHERE_BTM_LIMIT ){ + pRangeStart = pLoop->aLTerm[j++]; + nExtraReg = MAX(nExtraReg, pLoop->u.btree.nBtm); + /* Like optimization range constraints always occur in pairs */ + assert( (pRangeStart->wtFlags & TERM_LIKEOPT)==0 || + (pLoop->wsFlags & WHERE_TOP_LIMIT)!=0 ); + } + if( pLoop->wsFlags & WHERE_TOP_LIMIT ){ + pRangeEnd = pLoop->aLTerm[j++]; + nExtraReg = MAX(nExtraReg, pLoop->u.btree.nTop); +#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS + if( (pRangeEnd->wtFlags & TERM_LIKEOPT)!=0 ){ + assert( pRangeStart!=0 ); /* LIKE opt constraints */ + assert( pRangeStart->wtFlags & TERM_LIKEOPT ); /* occur in pairs */ + pLevel->iLikeRepCntr = (u32)++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Integer, 1, (int)pLevel->iLikeRepCntr); + VdbeComment((v, "LIKE loop counter")); + pLevel->addrLikeRep = sqlite3VdbeCurrentAddr(v); + /* iLikeRepCntr actually stores 2x the counter register number. The + ** bottom bit indicates whether the search order is ASC or DESC. */ + testcase( bRev ); + testcase( pIdx->aSortOrder[nEq]==SQLITE_SO_DESC ); + assert( (bRev & ~1)==0 ); + pLevel->iLikeRepCntr <<=1; + pLevel->iLikeRepCntr |= bRev ^ (pIdx->aSortOrder[nEq]==SQLITE_SO_DESC); + } +#endif + if( pRangeStart==0 ){ + j = pIdx->aiColumn[nEq]; + if( (j>=0 && pIdx->pTable->aCol[j].notNull==0) || j==XN_EXPR ){ + bSeekPastNull = 1; + } + } + } + assert( pRangeEnd==0 || (pRangeEnd->wtFlags & TERM_VNULL)==0 ); + + /* If we are doing a reverse order scan on an ascending index, or + ** a forward order scan on a descending index, interchange the + ** start and end terms (pRangeStart and pRangeEnd). + */ + if( (nEqnKeyCol && bRev==(pIdx->aSortOrder[nEq]==SQLITE_SO_ASC)) + || (bRev && pIdx->nKeyCol==nEq) + ){ + SWAP(WhereTerm *, pRangeEnd, pRangeStart); + SWAP(u8, bSeekPastNull, bStopAtNull); + SWAP(u8, nBtm, nTop); + } + + /* Generate code to evaluate all constraint terms using == or IN + ** and store the values of those terms in an array of registers + ** starting at regBase. + */ + codeCursorHint(pTabItem, pWInfo, pLevel, pRangeEnd); + regBase = codeAllEqualityTerms(pParse,pLevel,bRev,nExtraReg,&zStartAff); + assert( zStartAff==0 || sqlite3Strlen30(zStartAff)>=nEq ); + if( zStartAff && nTop ){ + zEndAff = sqlite3DbStrDup(db, &zStartAff[nEq]); + } + addrNxt = pLevel->addrNxt; + + testcase( pRangeStart && (pRangeStart->eOperator & WO_LE)!=0 ); + testcase( pRangeStart && (pRangeStart->eOperator & WO_GE)!=0 ); + testcase( pRangeEnd && (pRangeEnd->eOperator & WO_LE)!=0 ); + testcase( pRangeEnd && (pRangeEnd->eOperator & WO_GE)!=0 ); + startEq = !pRangeStart || pRangeStart->eOperator & (WO_LE|WO_GE); + endEq = !pRangeEnd || pRangeEnd->eOperator & (WO_LE|WO_GE); + start_constraints = pRangeStart || nEq>0; + + /* Seek the index cursor to the start of the range. */ + nConstraint = nEq; + if( pRangeStart ){ + Expr *pRight = pRangeStart->pExpr->pRight; + codeExprOrVector(pParse, pRight, regBase+nEq, nBtm); + whereLikeOptimizationStringFixup(v, pLevel, pRangeStart); + if( (pRangeStart->wtFlags & TERM_VNULL)==0 + && sqlite3ExprCanBeNull(pRight) + ){ + sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt); + VdbeCoverage(v); + } + if( zStartAff ){ + updateRangeAffinityStr(pRight, nBtm, &zStartAff[nEq]); + } + nConstraint += nBtm; + testcase( pRangeStart->wtFlags & TERM_VIRTUAL ); + if( sqlite3ExprIsVector(pRight)==0 ){ + disableTerm(pLevel, pRangeStart); + }else{ + startEq = 1; + } + bSeekPastNull = 0; + }else if( bSeekPastNull ){ + sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq); + nConstraint++; + startEq = 0; + start_constraints = 1; + } + codeApplyAffinity(pParse, regBase, nConstraint - bSeekPastNull, zStartAff); + if( pLoop->nSkip>0 && nConstraint==pLoop->nSkip ){ + /* The skip-scan logic inside the call to codeAllEqualityConstraints() + ** above has already left the cursor sitting on the correct row, + ** so no further seeking is needed */ + }else{ + op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev]; + assert( op!=0 ); + sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint); + VdbeCoverage(v); + VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind ); + VdbeCoverageIf(v, op==OP_Last); testcase( op==OP_Last ); + VdbeCoverageIf(v, op==OP_SeekGT); testcase( op==OP_SeekGT ); + VdbeCoverageIf(v, op==OP_SeekGE); testcase( op==OP_SeekGE ); + VdbeCoverageIf(v, op==OP_SeekLE); testcase( op==OP_SeekLE ); + VdbeCoverageIf(v, op==OP_SeekLT); testcase( op==OP_SeekLT ); + } + + /* Load the value for the inequality constraint at the end of the + ** range (if any). + */ + nConstraint = nEq; + if( pRangeEnd ){ + Expr *pRight = pRangeEnd->pExpr->pRight; + sqlite3ExprCacheRemove(pParse, regBase+nEq, 1); + codeExprOrVector(pParse, pRight, regBase+nEq, nTop); + whereLikeOptimizationStringFixup(v, pLevel, pRangeEnd); + if( (pRangeEnd->wtFlags & TERM_VNULL)==0 + && sqlite3ExprCanBeNull(pRight) + ){ + sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt); + VdbeCoverage(v); + } + if( zEndAff ){ + updateRangeAffinityStr(pRight, nTop, zEndAff); + codeApplyAffinity(pParse, regBase+nEq, nTop, zEndAff); + }else{ + assert( pParse->db->mallocFailed ); + } + nConstraint += nTop; + testcase( pRangeEnd->wtFlags & TERM_VIRTUAL ); + + if( sqlite3ExprIsVector(pRight)==0 ){ + disableTerm(pLevel, pRangeEnd); + }else{ + endEq = 1; + } + }else if( bStopAtNull ){ + sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq); + endEq = 0; + nConstraint++; + } + sqlite3DbFree(db, zStartAff); + sqlite3DbFree(db, zEndAff); + + /* Top of the loop body */ + pLevel->p2 = sqlite3VdbeCurrentAddr(v); + + /* Check if the index cursor is past the end of the range. */ + if( nConstraint ){ + op = aEndOp[bRev*2 + endEq]; + sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint); + testcase( op==OP_IdxGT ); VdbeCoverageIf(v, op==OP_IdxGT ); + testcase( op==OP_IdxGE ); VdbeCoverageIf(v, op==OP_IdxGE ); + testcase( op==OP_IdxLT ); VdbeCoverageIf(v, op==OP_IdxLT ); + testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE ); + } + + /* Seek the table cursor, if required */ + if( omitTable ){ + /* pIdx is a covering index. No need to access the main table. */ + }else if( HasRowid(pIdx->pTable) ){ + if( (pWInfo->wctrlFlags & WHERE_SEEK_TABLE) || ( + (pWInfo->wctrlFlags & WHERE_SEEK_UNIQ_TABLE) + && (pWInfo->eOnePass==ONEPASS_SINGLE) + )){ + iRowidReg = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg); + sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); + sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, iRowidReg); + VdbeCoverage(v); + }else{ + codeDeferredSeek(pWInfo, pIdx, iCur, iIdxCur); + } + }else if( iCur!=iIdxCur ){ + Index *pPk = sqlite3PrimaryKeyIndex(pIdx->pTable); + iRowidReg = sqlite3GetTempRange(pParse, pPk->nKeyCol); + for(j=0; jnKeyCol; j++){ + k = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[j]); + sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, iRowidReg+j); + } + sqlite3VdbeAddOp4Int(v, OP_NotFound, iCur, addrCont, + iRowidReg, pPk->nKeyCol); VdbeCoverage(v); + } + + /* Record the instruction used to terminate the loop. */ + if( pLoop->wsFlags & WHERE_ONEROW ){ + pLevel->op = OP_Noop; + }else if( bRev ){ + pLevel->op = OP_Prev; + }else{ + pLevel->op = OP_Next; + } + pLevel->p1 = iIdxCur; + pLevel->p3 = (pLoop->wsFlags&WHERE_UNQ_WANTED)!=0 ? 1:0; + if( (pLoop->wsFlags & WHERE_CONSTRAINT)==0 ){ + pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP; + }else{ + assert( pLevel->p5==0 ); + } + }else + +#ifndef SQLITE_OMIT_OR_OPTIMIZATION + if( pLoop->wsFlags & WHERE_MULTI_OR ){ + /* Case 5: Two or more separately indexed terms connected by OR + ** + ** Example: + ** + ** CREATE TABLE t1(a,b,c,d); + ** CREATE INDEX i1 ON t1(a); + ** CREATE INDEX i2 ON t1(b); + ** CREATE INDEX i3 ON t1(c); + ** + ** SELECT * FROM t1 WHERE a=5 OR b=7 OR (c=11 AND d=13) + ** + ** In the example, there are three indexed terms connected by OR. + ** The top of the loop looks like this: + ** + ** Null 1 # Zero the rowset in reg 1 + ** + ** Then, for each indexed term, the following. The arguments to + ** RowSetTest are such that the rowid of the current row is inserted + ** into the RowSet. If it is already present, control skips the + ** Gosub opcode and jumps straight to the code generated by WhereEnd(). + ** + ** sqlite3WhereBegin() + ** RowSetTest # Insert rowid into rowset + ** Gosub 2 A + ** sqlite3WhereEnd() + ** + ** Following the above, code to terminate the loop. Label A, the target + ** of the Gosub above, jumps to the instruction right after the Goto. + ** + ** Null 1 # Zero the rowset in reg 1 + ** Goto B # The loop is finished. + ** + ** A: # Return data, whatever. + ** + ** Return 2 # Jump back to the Gosub + ** + ** B: + ** + ** Added 2014-05-26: If the table is a WITHOUT ROWID table, then + ** use an ephemeral index instead of a RowSet to record the primary + ** keys of the rows we have already seen. + ** + */ + WhereClause *pOrWc; /* The OR-clause broken out into subterms */ + SrcList *pOrTab; /* Shortened table list or OR-clause generation */ + Index *pCov = 0; /* Potential covering index (or NULL) */ + int iCovCur = pParse->nTab++; /* Cursor used for index scans (if any) */ + + int regReturn = ++pParse->nMem; /* Register used with OP_Gosub */ + int regRowset = 0; /* Register for RowSet object */ + int regRowid = 0; /* Register holding rowid */ + int iLoopBody = sqlite3VdbeMakeLabel(v); /* Start of loop body */ + int iRetInit; /* Address of regReturn init */ + int untestedTerms = 0; /* Some terms not completely tested */ + int ii; /* Loop counter */ + u16 wctrlFlags; /* Flags for sub-WHERE clause */ + Expr *pAndExpr = 0; /* An ".. AND (...)" expression */ + Table *pTab = pTabItem->pTab; + + pTerm = pLoop->aLTerm[0]; + assert( pTerm!=0 ); + assert( pTerm->eOperator & WO_OR ); + assert( (pTerm->wtFlags & TERM_ORINFO)!=0 ); + pOrWc = &pTerm->u.pOrInfo->wc; + pLevel->op = OP_Return; + pLevel->p1 = regReturn; + + /* Set up a new SrcList in pOrTab containing the table being scanned + ** by this loop in the a[0] slot and all notReady tables in a[1..] slots. + ** This becomes the SrcList in the recursive call to sqlite3WhereBegin(). + */ + if( pWInfo->nLevel>1 ){ + int nNotReady; /* The number of notReady tables */ + struct SrcList_item *origSrc; /* Original list of tables */ + nNotReady = pWInfo->nLevel - iLevel - 1; + pOrTab = sqlite3StackAllocRaw(db, + sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0])); + if( pOrTab==0 ) return notReady; + pOrTab->nAlloc = (u8)(nNotReady + 1); + pOrTab->nSrc = pOrTab->nAlloc; + memcpy(pOrTab->a, pTabItem, sizeof(*pTabItem)); + origSrc = pWInfo->pTabList->a; + for(k=1; k<=nNotReady; k++){ + memcpy(&pOrTab->a[k], &origSrc[pLevel[k].iFrom], sizeof(pOrTab->a[k])); + } + }else{ + pOrTab = pWInfo->pTabList; + } + + /* Initialize the rowset register to contain NULL. An SQL NULL is + ** equivalent to an empty rowset. Or, create an ephemeral index + ** capable of holding primary keys in the case of a WITHOUT ROWID. + ** + ** Also initialize regReturn to contain the address of the instruction + ** immediately following the OP_Return at the bottom of the loop. This + ** is required in a few obscure LEFT JOIN cases where control jumps + ** over the top of the loop into the body of it. In this case the + ** correct response for the end-of-loop code (the OP_Return) is to + ** fall through to the next instruction, just as an OP_Next does if + ** called on an uninitialized cursor. + */ + if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){ + if( HasRowid(pTab) ){ + regRowset = ++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Null, 0, regRowset); + }else{ + Index *pPk = sqlite3PrimaryKeyIndex(pTab); + regRowset = pParse->nTab++; + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, regRowset, pPk->nKeyCol); + sqlite3VdbeSetP4KeyInfo(pParse, pPk); + } + regRowid = ++pParse->nMem; + } + iRetInit = sqlite3VdbeAddOp2(v, OP_Integer, 0, regReturn); + + /* If the original WHERE clause is z of the form: (x1 OR x2 OR ...) AND y + ** Then for every term xN, evaluate as the subexpression: xN AND z + ** That way, terms in y that are factored into the disjunction will + ** be picked up by the recursive calls to sqlite3WhereBegin() below. + ** + ** Actually, each subexpression is converted to "xN AND w" where w is + ** the "interesting" terms of z - terms that did not originate in the + ** ON or USING clause of a LEFT JOIN, and terms that are usable as + ** indices. + ** + ** This optimization also only applies if the (x1 OR x2 OR ...) term + ** is not contained in the ON clause of a LEFT JOIN. + ** See ticket http://www.sqlite.org/src/info/f2369304e4 + */ + if( pWC->nTerm>1 ){ + int iTerm; + for(iTerm=0; iTermnTerm; iTerm++){ + Expr *pExpr = pWC->a[iTerm].pExpr; + if( &pWC->a[iTerm] == pTerm ) continue; + if( ExprHasProperty(pExpr, EP_FromJoin) ) continue; + testcase( pWC->a[iTerm].wtFlags & TERM_VIRTUAL ); + testcase( pWC->a[iTerm].wtFlags & TERM_CODED ); + if( (pWC->a[iTerm].wtFlags & (TERM_VIRTUAL|TERM_CODED))!=0 ) continue; + if( (pWC->a[iTerm].eOperator & WO_ALL)==0 ) continue; + testcase( pWC->a[iTerm].wtFlags & TERM_ORINFO ); + pExpr = sqlite3ExprDup(db, pExpr, 0); + pAndExpr = sqlite3ExprAnd(db, pAndExpr, pExpr); + } + if( pAndExpr ){ + pAndExpr = sqlite3PExpr(pParse, TK_AND|TKFLG_DONTFOLD, 0, pAndExpr); + } + } + + /* Run a separate WHERE clause for each term of the OR clause. After + ** eliminating duplicates from other WHERE clauses, the action for each + ** sub-WHERE clause is to to invoke the main loop body as a subroutine. + */ + wctrlFlags = WHERE_OR_SUBCLAUSE | (pWInfo->wctrlFlags & WHERE_SEEK_TABLE); + for(ii=0; iinTerm; ii++){ + WhereTerm *pOrTerm = &pOrWc->a[ii]; + if( pOrTerm->leftCursor==iCur || (pOrTerm->eOperator & WO_AND)!=0 ){ + WhereInfo *pSubWInfo; /* Info for single OR-term scan */ + Expr *pOrExpr = pOrTerm->pExpr; /* Current OR clause term */ + int jmp1 = 0; /* Address of jump operation */ + if( pAndExpr && !ExprHasProperty(pOrExpr, EP_FromJoin) ){ + pAndExpr->pLeft = pOrExpr; + pOrExpr = pAndExpr; + } + /* Loop through table entries that match term pOrTerm. */ + WHERETRACE(0xffff, ("Subplan for OR-clause:\n")); + pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0, + wctrlFlags, iCovCur); + assert( pSubWInfo || pParse->nErr || db->mallocFailed ); + if( pSubWInfo ){ + WhereLoop *pSubLoop; + int addrExplain = sqlite3WhereExplainOneScan( + pParse, pOrTab, &pSubWInfo->a[0], iLevel, pLevel->iFrom, 0 + ); + sqlite3WhereAddScanStatus(v, pOrTab, &pSubWInfo->a[0], addrExplain); + + /* This is the sub-WHERE clause body. First skip over + ** duplicate rows from prior sub-WHERE clauses, and record the + ** rowid (or PRIMARY KEY) for the current row so that the same + ** row will be skipped in subsequent sub-WHERE clauses. + */ + if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){ + int r; + int iSet = ((ii==pOrWc->nTerm-1)?-1:ii); + if( HasRowid(pTab) ){ + r = sqlite3ExprCodeGetColumn(pParse, pTab, -1, iCur, regRowid, 0); + jmp1 = sqlite3VdbeAddOp4Int(v, OP_RowSetTest, regRowset, 0, + r,iSet); + VdbeCoverage(v); + }else{ + Index *pPk = sqlite3PrimaryKeyIndex(pTab); + int nPk = pPk->nKeyCol; + int iPk; + + /* Read the PK into an array of temp registers. */ + r = sqlite3GetTempRange(pParse, nPk); + for(iPk=0; iPkaiColumn[iPk]; + sqlite3ExprCodeGetColumnToReg(pParse, pTab, iCol, iCur, r+iPk); + } + + /* Check if the temp table already contains this key. If so, + ** the row has already been included in the result set and + ** can be ignored (by jumping past the Gosub below). Otherwise, + ** insert the key into the temp table and proceed with processing + ** the row. + ** + ** Use some of the same optimizations as OP_RowSetTest: If iSet + ** is zero, assume that the key cannot already be present in + ** the temp table. And if iSet is -1, assume that there is no + ** need to insert the key into the temp table, as it will never + ** be tested for. */ + if( iSet ){ + jmp1 = sqlite3VdbeAddOp4Int(v, OP_Found, regRowset, 0, r, nPk); + VdbeCoverage(v); + } + if( iSet>=0 ){ + sqlite3VdbeAddOp3(v, OP_MakeRecord, r, nPk, regRowid); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, regRowset, regRowid, + r, nPk); + if( iSet ) sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); + } + + /* Release the array of temp registers */ + sqlite3ReleaseTempRange(pParse, r, nPk); + } + } + + /* Invoke the main loop body as a subroutine */ + sqlite3VdbeAddOp2(v, OP_Gosub, regReturn, iLoopBody); + + /* Jump here (skipping the main loop body subroutine) if the + ** current sub-WHERE row is a duplicate from prior sub-WHEREs. */ + if( jmp1 ) sqlite3VdbeJumpHere(v, jmp1); + + /* The pSubWInfo->untestedTerms flag means that this OR term + ** contained one or more AND term from a notReady table. The + ** terms from the notReady table could not be tested and will + ** need to be tested later. + */ + if( pSubWInfo->untestedTerms ) untestedTerms = 1; + + /* If all of the OR-connected terms are optimized using the same + ** index, and the index is opened using the same cursor number + ** by each call to sqlite3WhereBegin() made by this loop, it may + ** be possible to use that index as a covering index. + ** + ** If the call to sqlite3WhereBegin() above resulted in a scan that + ** uses an index, and this is either the first OR-connected term + ** processed or the index is the same as that used by all previous + ** terms, set pCov to the candidate covering index. Otherwise, set + ** pCov to NULL to indicate that no candidate covering index will + ** be available. + */ + pSubLoop = pSubWInfo->a[0].pWLoop; + assert( (pSubLoop->wsFlags & WHERE_AUTO_INDEX)==0 ); + if( (pSubLoop->wsFlags & WHERE_INDEXED)!=0 + && (ii==0 || pSubLoop->u.btree.pIndex==pCov) + && (HasRowid(pTab) || !IsPrimaryKeyIndex(pSubLoop->u.btree.pIndex)) + ){ + assert( pSubWInfo->a[0].iIdxCur==iCovCur ); + pCov = pSubLoop->u.btree.pIndex; + }else{ + pCov = 0; + } + + /* Finish the loop through table entries that match term pOrTerm. */ + sqlite3WhereEnd(pSubWInfo); + } + } + } + pLevel->u.pCovidx = pCov; + if( pCov ) pLevel->iIdxCur = iCovCur; + if( pAndExpr ){ + pAndExpr->pLeft = 0; + sqlite3ExprDelete(db, pAndExpr); + } + sqlite3VdbeChangeP1(v, iRetInit, sqlite3VdbeCurrentAddr(v)); + sqlite3VdbeGoto(v, pLevel->addrBrk); + sqlite3VdbeResolveLabel(v, iLoopBody); + + if( pWInfo->nLevel>1 ) sqlite3StackFree(db, pOrTab); + if( !untestedTerms ) disableTerm(pLevel, pTerm); + }else +#endif /* SQLITE_OMIT_OR_OPTIMIZATION */ + + { + /* Case 6: There is no usable index. We must do a complete + ** scan of the entire table. + */ + static const u8 aStep[] = { OP_Next, OP_Prev }; + static const u8 aStart[] = { OP_Rewind, OP_Last }; + assert( bRev==0 || bRev==1 ); + if( pTabItem->fg.isRecursive ){ + /* Tables marked isRecursive have only a single row that is stored in + ** a pseudo-cursor. No need to Rewind or Next such cursors. */ + pLevel->op = OP_Noop; + }else{ + codeCursorHint(pTabItem, pWInfo, pLevel, 0); + pLevel->op = aStep[bRev]; + pLevel->p1 = iCur; + pLevel->p2 = 1 + sqlite3VdbeAddOp2(v, aStart[bRev], iCur, addrBrk); + VdbeCoverageIf(v, bRev==0); + VdbeCoverageIf(v, bRev!=0); + pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP; + } + } + +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + pLevel->addrVisit = sqlite3VdbeCurrentAddr(v); +#endif + + /* Insert code to test every subexpression that can be completely + ** computed using the current set of tables. + */ + for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){ + Expr *pE; + int skipLikeAddr = 0; + testcase( pTerm->wtFlags & TERM_VIRTUAL ); + testcase( pTerm->wtFlags & TERM_CODED ); + if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; + if( (pTerm->prereqAll & pLevel->notReady)!=0 ){ + testcase( pWInfo->untestedTerms==0 + && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)!=0 ); + pWInfo->untestedTerms = 1; + continue; + } + pE = pTerm->pExpr; + assert( pE!=0 ); + if( pLevel->iLeftJoin && !ExprHasProperty(pE, EP_FromJoin) ){ + continue; + } + if( pTerm->wtFlags & TERM_LIKECOND ){ + /* If the TERM_LIKECOND flag is set, that means that the range search + ** is sufficient to guarantee that the LIKE operator is true, so we + ** can skip the call to the like(A,B) function. But this only works + ** for strings. So do not skip the call to the function on the pass + ** that compares BLOBs. */ +#ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS + continue; +#else + u32 x = pLevel->iLikeRepCntr; + assert( x>0 ); + skipLikeAddr = sqlite3VdbeAddOp1(v, (x&1)? OP_IfNot : OP_If, (int)(x>>1)); + VdbeCoverage(v); +#endif + } + sqlite3ExprIfFalse(pParse, pE, addrCont, SQLITE_JUMPIFNULL); + if( skipLikeAddr ) sqlite3VdbeJumpHere(v, skipLikeAddr); + pTerm->wtFlags |= TERM_CODED; + } + + /* Insert code to test for implied constraints based on transitivity + ** of the "==" operator. + ** + ** Example: If the WHERE clause contains "t1.a=t2.b" and "t2.b=123" + ** and we are coding the t1 loop and the t2 loop has not yet coded, + ** then we cannot use the "t1.a=t2.b" constraint, but we can code + ** the implied "t1.a=123" constraint. + */ + for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){ + Expr *pE, sEAlt; + WhereTerm *pAlt; + if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; + if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) continue; + if( (pTerm->eOperator & WO_EQUIV)==0 ) continue; + if( pTerm->leftCursor!=iCur ) continue; + if( pLevel->iLeftJoin ) continue; + pE = pTerm->pExpr; + assert( !ExprHasProperty(pE, EP_FromJoin) ); + assert( (pTerm->prereqRight & pLevel->notReady)!=0 ); + pAlt = sqlite3WhereFindTerm(pWC, iCur, pTerm->u.leftColumn, notReady, + WO_EQ|WO_IN|WO_IS, 0); + if( pAlt==0 ) continue; + if( pAlt->wtFlags & (TERM_CODED) ) continue; + testcase( pAlt->eOperator & WO_EQ ); + testcase( pAlt->eOperator & WO_IS ); + testcase( pAlt->eOperator & WO_IN ); + VdbeModuleComment((v, "begin transitive constraint")); + sEAlt = *pAlt->pExpr; + sEAlt.pLeft = pE->pLeft; + sqlite3ExprIfFalse(pParse, &sEAlt, addrCont, SQLITE_JUMPIFNULL); + } + + /* For a LEFT OUTER JOIN, generate code that will record the fact that + ** at least one row of the right table has matched the left table. + */ + if( pLevel->iLeftJoin ){ + pLevel->addrFirst = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp2(v, OP_Integer, 1, pLevel->iLeftJoin); + VdbeComment((v, "record LEFT JOIN hit")); + sqlite3ExprCacheClear(pParse); + for(pTerm=pWC->a, j=0; jnTerm; j++, pTerm++){ + testcase( pTerm->wtFlags & TERM_VIRTUAL ); + testcase( pTerm->wtFlags & TERM_CODED ); + if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; + if( (pTerm->prereqAll & pLevel->notReady)!=0 ){ + assert( pWInfo->untestedTerms ); + continue; + } + assert( pTerm->pExpr ); + sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL); + pTerm->wtFlags |= TERM_CODED; + } + } + + return pLevel->notReady; +} + +/************** End of wherecode.c *******************************************/ +/************** Begin file whereexpr.c ***************************************/ +/* +** 2015-06-08 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This module contains C code that generates VDBE code used to process +** the WHERE clause of SQL statements. +** +** This file was originally part of where.c but was split out to improve +** readability and editabiliity. This file contains utility routines for +** analyzing Expr objects in the WHERE clause. +*/ +/* #include "sqliteInt.h" */ +/* #include "whereInt.h" */ + +/* Forward declarations */ +static void exprAnalyze(SrcList*, WhereClause*, int); + +/* +** Deallocate all memory associated with a WhereOrInfo object. +*/ +static void whereOrInfoDelete(sqlite3 *db, WhereOrInfo *p){ + sqlite3WhereClauseClear(&p->wc); + sqlite3DbFree(db, p); +} + +/* +** Deallocate all memory associated with a WhereAndInfo object. +*/ +static void whereAndInfoDelete(sqlite3 *db, WhereAndInfo *p){ + sqlite3WhereClauseClear(&p->wc); + sqlite3DbFree(db, p); +} + +/* +** Add a single new WhereTerm entry to the WhereClause object pWC. +** The new WhereTerm object is constructed from Expr p and with wtFlags. +** The index in pWC->a[] of the new WhereTerm is returned on success. +** 0 is returned if the new WhereTerm could not be added due to a memory +** allocation error. The memory allocation failure will be recorded in +** the db->mallocFailed flag so that higher-level functions can detect it. +** +** This routine will increase the size of the pWC->a[] array as necessary. +** +** If the wtFlags argument includes TERM_DYNAMIC, then responsibility +** for freeing the expression p is assumed by the WhereClause object pWC. +** This is true even if this routine fails to allocate a new WhereTerm. +** +** WARNING: This routine might reallocate the space used to store +** WhereTerms. All pointers to WhereTerms should be invalidated after +** calling this routine. Such pointers may be reinitialized by referencing +** the pWC->a[] array. +*/ +static int whereClauseInsert(WhereClause *pWC, Expr *p, u16 wtFlags){ + WhereTerm *pTerm; + int idx; + testcase( wtFlags & TERM_VIRTUAL ); + if( pWC->nTerm>=pWC->nSlot ){ + WhereTerm *pOld = pWC->a; + sqlite3 *db = pWC->pWInfo->pParse->db; + pWC->a = sqlite3DbMallocRawNN(db, sizeof(pWC->a[0])*pWC->nSlot*2 ); + if( pWC->a==0 ){ + if( wtFlags & TERM_DYNAMIC ){ + sqlite3ExprDelete(db, p); + } + pWC->a = pOld; + return 0; + } + memcpy(pWC->a, pOld, sizeof(pWC->a[0])*pWC->nTerm); + if( pOld!=pWC->aStatic ){ + sqlite3DbFree(db, pOld); + } + pWC->nSlot = sqlite3DbMallocSize(db, pWC->a)/sizeof(pWC->a[0]); + } + pTerm = &pWC->a[idx = pWC->nTerm++]; + if( p && ExprHasProperty(p, EP_Unlikely) ){ + pTerm->truthProb = sqlite3LogEst(p->iTable) - 270; + }else{ + pTerm->truthProb = 1; + } + pTerm->pExpr = sqlite3ExprSkipCollate(p); + pTerm->wtFlags = wtFlags; + pTerm->pWC = pWC; + pTerm->iParent = -1; + memset(&pTerm->eOperator, 0, + sizeof(WhereTerm) - offsetof(WhereTerm,eOperator)); + return idx; +} + +/* +** Return TRUE if the given operator is one of the operators that is +** allowed for an indexable WHERE clause term. The allowed operators are +** "=", "<", ">", "<=", ">=", "IN", "IS", and "IS NULL" +*/ +static int allowedOp(int op){ + assert( TK_GT>TK_EQ && TK_GTTK_EQ && TK_LTTK_EQ && TK_LE=TK_EQ && op<=TK_GE) || op==TK_ISNULL || op==TK_IS; +} + +/* +** Commute a comparison operator. Expressions of the form "X op Y" +** are converted into "Y op X". +** +** If left/right precedence rules come into play when determining the +** collating sequence, then COLLATE operators are adjusted to ensure +** that the collating sequence does not change. For example: +** "Y collate NOCASE op X" becomes "X op Y" because any collation sequence on +** the left hand side of a comparison overrides any collation sequence +** attached to the right. For the same reason the EP_Collate flag +** is not commuted. +*/ +static void exprCommute(Parse *pParse, Expr *pExpr){ + u16 expRight = (pExpr->pRight->flags & EP_Collate); + u16 expLeft = (pExpr->pLeft->flags & EP_Collate); + assert( allowedOp(pExpr->op) && pExpr->op!=TK_IN ); + if( expRight==expLeft ){ + /* Either X and Y both have COLLATE operator or neither do */ + if( expRight ){ + /* Both X and Y have COLLATE operators. Make sure X is always + ** used by clearing the EP_Collate flag from Y. */ + pExpr->pRight->flags &= ~EP_Collate; + }else if( sqlite3ExprCollSeq(pParse, pExpr->pLeft)!=0 ){ + /* Neither X nor Y have COLLATE operators, but X has a non-default + ** collating sequence. So add the EP_Collate marker on X to cause + ** it to be searched first. */ + pExpr->pLeft->flags |= EP_Collate; + } + } + SWAP(Expr*,pExpr->pRight,pExpr->pLeft); + if( pExpr->op>=TK_GT ){ + assert( TK_LT==TK_GT+2 ); + assert( TK_GE==TK_LE+2 ); + assert( TK_GT>TK_EQ ); + assert( TK_GTop>=TK_GT && pExpr->op<=TK_GE ); + pExpr->op = ((pExpr->op-TK_GT)^2)+TK_GT; + } +} + +/* +** Translate from TK_xx operator to WO_xx bitmask. +*/ +static u16 operatorMask(int op){ + u16 c; + assert( allowedOp(op) ); + if( op==TK_IN ){ + c = WO_IN; + }else if( op==TK_ISNULL ){ + c = WO_ISNULL; + }else if( op==TK_IS ){ + c = WO_IS; + }else{ + assert( (WO_EQ<<(op-TK_EQ)) < 0x7fff ); + c = (u16)(WO_EQ<<(op-TK_EQ)); + } + assert( op!=TK_ISNULL || c==WO_ISNULL ); + assert( op!=TK_IN || c==WO_IN ); + assert( op!=TK_EQ || c==WO_EQ ); + assert( op!=TK_LT || c==WO_LT ); + assert( op!=TK_LE || c==WO_LE ); + assert( op!=TK_GT || c==WO_GT ); + assert( op!=TK_GE || c==WO_GE ); + assert( op!=TK_IS || c==WO_IS ); + return c; +} + + +#ifndef SQLITE_OMIT_LIKE_OPTIMIZATION +/* +** Check to see if the given expression is a LIKE or GLOB operator that +** can be optimized using inequality constraints. Return TRUE if it is +** so and false if not. +** +** In order for the operator to be optimizible, the RHS must be a string +** literal that does not begin with a wildcard. The LHS must be a column +** that may only be NULL, a string, or a BLOB, never a number. (This means +** that virtual tables cannot participate in the LIKE optimization.) The +** collating sequence for the column on the LHS must be appropriate for +** the operator. +*/ +static int isLikeOrGlob( + Parse *pParse, /* Parsing and code generating context */ + Expr *pExpr, /* Test this expression */ + Expr **ppPrefix, /* Pointer to TK_STRING expression with pattern prefix */ + int *pisComplete, /* True if the only wildcard is % in the last character */ + int *pnoCase /* True if uppercase is equivalent to lowercase */ +){ + const char *z = 0; /* String on RHS of LIKE operator */ + Expr *pRight, *pLeft; /* Right and left size of LIKE operator */ + ExprList *pList; /* List of operands to the LIKE operator */ + int c; /* One character in z[] */ + int cnt; /* Number of non-wildcard prefix characters */ + char wc[3]; /* Wildcard characters */ + sqlite3 *db = pParse->db; /* Database connection */ + sqlite3_value *pVal = 0; + int op; /* Opcode of pRight */ + int rc; /* Result code to return */ + + if( !sqlite3IsLikeFunction(db, pExpr, pnoCase, wc) ){ + return 0; + } +#ifdef SQLITE_EBCDIC + if( *pnoCase ) return 0; +#endif + pList = pExpr->x.pList; + pLeft = pList->a[1].pExpr; + if( pLeft->op!=TK_COLUMN + || sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT + || IsVirtual(pLeft->pTab) /* Value might be numeric */ + ){ + /* IMP: R-02065-49465 The left-hand side of the LIKE or GLOB operator must + ** be the name of an indexed column with TEXT affinity. */ + return 0; + } + assert( pLeft->iColumn!=(-1) ); /* Because IPK never has AFF_TEXT */ + + pRight = sqlite3ExprSkipCollate(pList->a[0].pExpr); + op = pRight->op; + if( op==TK_VARIABLE ){ + Vdbe *pReprepare = pParse->pReprepare; + int iCol = pRight->iColumn; + pVal = sqlite3VdbeGetBoundValue(pReprepare, iCol, SQLITE_AFF_BLOB); + if( pVal && sqlite3_value_type(pVal)==SQLITE_TEXT ){ + z = (char *)sqlite3_value_text(pVal); + } + sqlite3VdbeSetVarmask(pParse->pVdbe, iCol); + assert( pRight->op==TK_VARIABLE || pRight->op==TK_REGISTER ); + }else if( op==TK_STRING ){ + z = pRight->u.zToken; + } + if( z ){ + cnt = 0; + while( (c=z[cnt])!=0 && c!=wc[0] && c!=wc[1] && c!=wc[2] ){ + cnt++; + } + if( cnt!=0 && 255!=(u8)z[cnt-1] ){ + Expr *pPrefix; + *pisComplete = c==wc[0] && z[cnt+1]==0; + pPrefix = sqlite3Expr(db, TK_STRING, z); + if( pPrefix ) pPrefix->u.zToken[cnt] = 0; + *ppPrefix = pPrefix; + if( op==TK_VARIABLE ){ + Vdbe *v = pParse->pVdbe; + sqlite3VdbeSetVarmask(v, pRight->iColumn); + if( *pisComplete && pRight->u.zToken[1] ){ + /* If the rhs of the LIKE expression is a variable, and the current + ** value of the variable means there is no need to invoke the LIKE + ** function, then no OP_Variable will be added to the program. + ** This causes problems for the sqlite3_bind_parameter_name() + ** API. To work around them, add a dummy OP_Variable here. + */ + int r1 = sqlite3GetTempReg(pParse); + sqlite3ExprCodeTarget(pParse, pRight, r1); + sqlite3VdbeChangeP3(v, sqlite3VdbeCurrentAddr(v)-1, 0); + sqlite3ReleaseTempReg(pParse, r1); + } + } + }else{ + z = 0; + } + } + + rc = (z!=0); + sqlite3ValueFree(pVal); + return rc; +} +#endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */ + + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* +** Check to see if the given expression is of the form +** +** column OP expr +** +** where OP is one of MATCH, GLOB, LIKE or REGEXP and "column" is a +** column of a virtual table. +** +** If it is then return TRUE. If not, return FALSE. +*/ +static int isMatchOfColumn( + Expr *pExpr, /* Test this expression */ + unsigned char *peOp2 /* OUT: 0 for MATCH, or else an op2 value */ +){ + static const struct Op2 { + const char *zOp; + unsigned char eOp2; + } aOp[] = { + { "match", SQLITE_INDEX_CONSTRAINT_MATCH }, + { "glob", SQLITE_INDEX_CONSTRAINT_GLOB }, + { "like", SQLITE_INDEX_CONSTRAINT_LIKE }, + { "regexp", SQLITE_INDEX_CONSTRAINT_REGEXP } + }; + ExprList *pList; + Expr *pCol; /* Column reference */ + int i; + + if( pExpr->op!=TK_FUNCTION ){ + return 0; + } + pList = pExpr->x.pList; + if( pList==0 || pList->nExpr!=2 ){ + return 0; + } + pCol = pList->a[1].pExpr; + if( pCol->op!=TK_COLUMN || !IsVirtual(pCol->pTab) ){ + return 0; + } + for(i=0; iu.zToken, aOp[i].zOp)==0 ){ + *peOp2 = aOp[i].eOp2; + return 1; + } + } + return 0; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +/* +** If the pBase expression originated in the ON or USING clause of +** a join, then transfer the appropriate markings over to derived. +*/ +static void transferJoinMarkings(Expr *pDerived, Expr *pBase){ + if( pDerived ){ + pDerived->flags |= pBase->flags & EP_FromJoin; + pDerived->iRightJoinTable = pBase->iRightJoinTable; + } +} + +/* +** Mark term iChild as being a child of term iParent +*/ +static void markTermAsChild(WhereClause *pWC, int iChild, int iParent){ + pWC->a[iChild].iParent = iParent; + pWC->a[iChild].truthProb = pWC->a[iParent].truthProb; + pWC->a[iParent].nChild++; +} + +/* +** Return the N-th AND-connected subterm of pTerm. Or if pTerm is not +** a conjunction, then return just pTerm when N==0. If N is exceeds +** the number of available subterms, return NULL. +*/ +static WhereTerm *whereNthSubterm(WhereTerm *pTerm, int N){ + if( pTerm->eOperator!=WO_AND ){ + return N==0 ? pTerm : 0; + } + if( Nu.pAndInfo->wc.nTerm ){ + return &pTerm->u.pAndInfo->wc.a[N]; + } + return 0; +} + +/* +** Subterms pOne and pTwo are contained within WHERE clause pWC. The +** two subterms are in disjunction - they are OR-ed together. +** +** If these two terms are both of the form: "A op B" with the same +** A and B values but different operators and if the operators are +** compatible (if one is = and the other is <, for example) then +** add a new virtual AND term to pWC that is the combination of the +** two. +** +** Some examples: +** +** x x<=y +** x=y OR x=y --> x=y +** x<=y OR x x<=y +** +** The following is NOT generated: +** +** xy --> x!=y +*/ +static void whereCombineDisjuncts( + SrcList *pSrc, /* the FROM clause */ + WhereClause *pWC, /* The complete WHERE clause */ + WhereTerm *pOne, /* First disjunct */ + WhereTerm *pTwo /* Second disjunct */ +){ + u16 eOp = pOne->eOperator | pTwo->eOperator; + sqlite3 *db; /* Database connection (for malloc) */ + Expr *pNew; /* New virtual expression */ + int op; /* Operator for the combined expression */ + int idxNew; /* Index in pWC of the next virtual term */ + + if( (pOne->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE))==0 ) return; + if( (pTwo->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE))==0 ) return; + if( (eOp & (WO_EQ|WO_LT|WO_LE))!=eOp + && (eOp & (WO_EQ|WO_GT|WO_GE))!=eOp ) return; + assert( pOne->pExpr->pLeft!=0 && pOne->pExpr->pRight!=0 ); + assert( pTwo->pExpr->pLeft!=0 && pTwo->pExpr->pRight!=0 ); + if( sqlite3ExprCompare(pOne->pExpr->pLeft, pTwo->pExpr->pLeft, -1) ) return; + if( sqlite3ExprCompare(pOne->pExpr->pRight, pTwo->pExpr->pRight, -1) )return; + /* If we reach this point, it means the two subterms can be combined */ + if( (eOp & (eOp-1))!=0 ){ + if( eOp & (WO_LT|WO_LE) ){ + eOp = WO_LE; + }else{ + assert( eOp & (WO_GT|WO_GE) ); + eOp = WO_GE; + } + } + db = pWC->pWInfo->pParse->db; + pNew = sqlite3ExprDup(db, pOne->pExpr, 0); + if( pNew==0 ) return; + for(op=TK_EQ; eOp!=(WO_EQ<<(op-TK_EQ)); op++){ assert( opop = op; + idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC); + exprAnalyze(pSrc, pWC, idxNew); +} + +#if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY) +/* +** Analyze a term that consists of two or more OR-connected +** subterms. So in: +** +** ... WHERE (a=5) AND (b=7 OR c=9 OR d=13) AND (d=13) +** ^^^^^^^^^^^^^^^^^^^^ +** +** This routine analyzes terms such as the middle term in the above example. +** A WhereOrTerm object is computed and attached to the term under +** analysis, regardless of the outcome of the analysis. Hence: +** +** WhereTerm.wtFlags |= TERM_ORINFO +** WhereTerm.u.pOrInfo = a dynamically allocated WhereOrTerm object +** +** The term being analyzed must have two or more of OR-connected subterms. +** A single subterm might be a set of AND-connected sub-subterms. +** Examples of terms under analysis: +** +** (A) t1.x=t2.y OR t1.x=t2.z OR t1.y=15 OR t1.z=t3.a+5 +** (B) x=expr1 OR expr2=x OR x=expr3 +** (C) t1.x=t2.y OR (t1.x=t2.z AND t1.y=15) +** (D) x=expr1 OR (y>11 AND y<22 AND z LIKE '*hello*') +** (E) (p.a=1 AND q.b=2 AND r.c=3) OR (p.x=4 AND q.y=5 AND r.z=6) +** (F) x>A OR (x=A AND y>=B) +** +** CASE 1: +** +** If all subterms are of the form T.C=expr for some single column of C and +** a single table T (as shown in example B above) then create a new virtual +** term that is an equivalent IN expression. In other words, if the term +** being analyzed is: +** +** x = expr1 OR expr2 = x OR x = expr3 +** +** then create a new virtual term like this: +** +** x IN (expr1,expr2,expr3) +** +** CASE 2: +** +** If there are exactly two disjuncts and one side has x>A and the other side +** has x=A (for the same x and A) then add a new virtual conjunct term to the +** WHERE clause of the form "x>=A". Example: +** +** x>A OR (x=A AND y>B) adds: x>=A +** +** The added conjunct can sometimes be helpful in query planning. +** +** CASE 3: +** +** If all subterms are indexable by a single table T, then set +** +** WhereTerm.eOperator = WO_OR +** WhereTerm.u.pOrInfo->indexable |= the cursor number for table T +** +** A subterm is "indexable" if it is of the form +** "T.C " where C is any column of table T and +** is one of "=", "<", "<=", ">", ">=", "IS NULL", or "IN". +** A subterm is also indexable if it is an AND of two or more +** subsubterms at least one of which is indexable. Indexable AND +** subterms have their eOperator set to WO_AND and they have +** u.pAndInfo set to a dynamically allocated WhereAndTerm object. +** +** From another point of view, "indexable" means that the subterm could +** potentially be used with an index if an appropriate index exists. +** This analysis does not consider whether or not the index exists; that +** is decided elsewhere. This analysis only looks at whether subterms +** appropriate for indexing exist. +** +** All examples A through E above satisfy case 3. But if a term +** also satisfies case 1 (such as B) we know that the optimizer will +** always prefer case 1, so in that case we pretend that case 3 is not +** satisfied. +** +** It might be the case that multiple tables are indexable. For example, +** (E) above is indexable on tables P, Q, and R. +** +** Terms that satisfy case 3 are candidates for lookup by using +** separate indices to find rowids for each subterm and composing +** the union of all rowids using a RowSet object. This is similar +** to "bitmap indices" in other database engines. +** +** OTHERWISE: +** +** If none of cases 1, 2, or 3 apply, then leave the eOperator set to +** zero. This term is not useful for search. +*/ +static void exprAnalyzeOrTerm( + SrcList *pSrc, /* the FROM clause */ + WhereClause *pWC, /* the complete WHERE clause */ + int idxTerm /* Index of the OR-term to be analyzed */ +){ + WhereInfo *pWInfo = pWC->pWInfo; /* WHERE clause processing context */ + Parse *pParse = pWInfo->pParse; /* Parser context */ + sqlite3 *db = pParse->db; /* Database connection */ + WhereTerm *pTerm = &pWC->a[idxTerm]; /* The term to be analyzed */ + Expr *pExpr = pTerm->pExpr; /* The expression of the term */ + int i; /* Loop counters */ + WhereClause *pOrWc; /* Breakup of pTerm into subterms */ + WhereTerm *pOrTerm; /* A Sub-term within the pOrWc */ + WhereOrInfo *pOrInfo; /* Additional information associated with pTerm */ + Bitmask chngToIN; /* Tables that might satisfy case 1 */ + Bitmask indexable; /* Tables that are indexable, satisfying case 2 */ + + /* + ** Break the OR clause into its separate subterms. The subterms are + ** stored in a WhereClause structure containing within the WhereOrInfo + ** object that is attached to the original OR clause term. + */ + assert( (pTerm->wtFlags & (TERM_DYNAMIC|TERM_ORINFO|TERM_ANDINFO))==0 ); + assert( pExpr->op==TK_OR ); + pTerm->u.pOrInfo = pOrInfo = sqlite3DbMallocZero(db, sizeof(*pOrInfo)); + if( pOrInfo==0 ) return; + pTerm->wtFlags |= TERM_ORINFO; + pOrWc = &pOrInfo->wc; + memset(pOrWc->aStatic, 0, sizeof(pOrWc->aStatic)); + sqlite3WhereClauseInit(pOrWc, pWInfo); + sqlite3WhereSplit(pOrWc, pExpr, TK_OR); + sqlite3WhereExprAnalyze(pSrc, pOrWc); + if( db->mallocFailed ) return; + assert( pOrWc->nTerm>=2 ); + + /* + ** Compute the set of tables that might satisfy cases 1 or 3. + */ + indexable = ~(Bitmask)0; + chngToIN = ~(Bitmask)0; + for(i=pOrWc->nTerm-1, pOrTerm=pOrWc->a; i>=0 && indexable; i--, pOrTerm++){ + if( (pOrTerm->eOperator & WO_SINGLE)==0 ){ + WhereAndInfo *pAndInfo; + assert( (pOrTerm->wtFlags & (TERM_ANDINFO|TERM_ORINFO))==0 ); + chngToIN = 0; + pAndInfo = sqlite3DbMallocRawNN(db, sizeof(*pAndInfo)); + if( pAndInfo ){ + WhereClause *pAndWC; + WhereTerm *pAndTerm; + int j; + Bitmask b = 0; + pOrTerm->u.pAndInfo = pAndInfo; + pOrTerm->wtFlags |= TERM_ANDINFO; + pOrTerm->eOperator = WO_AND; + pAndWC = &pAndInfo->wc; + memset(pAndWC->aStatic, 0, sizeof(pAndWC->aStatic)); + sqlite3WhereClauseInit(pAndWC, pWC->pWInfo); + sqlite3WhereSplit(pAndWC, pOrTerm->pExpr, TK_AND); + sqlite3WhereExprAnalyze(pSrc, pAndWC); + pAndWC->pOuter = pWC; + if( !db->mallocFailed ){ + for(j=0, pAndTerm=pAndWC->a; jnTerm; j++, pAndTerm++){ + assert( pAndTerm->pExpr ); + if( allowedOp(pAndTerm->pExpr->op) + || pAndTerm->eOperator==WO_MATCH + ){ + b |= sqlite3WhereGetMask(&pWInfo->sMaskSet, pAndTerm->leftCursor); + } + } + } + indexable &= b; + } + }else if( pOrTerm->wtFlags & TERM_COPIED ){ + /* Skip this term for now. We revisit it when we process the + ** corresponding TERM_VIRTUAL term */ + }else{ + Bitmask b; + b = sqlite3WhereGetMask(&pWInfo->sMaskSet, pOrTerm->leftCursor); + if( pOrTerm->wtFlags & TERM_VIRTUAL ){ + WhereTerm *pOther = &pOrWc->a[pOrTerm->iParent]; + b |= sqlite3WhereGetMask(&pWInfo->sMaskSet, pOther->leftCursor); + } + indexable &= b; + if( (pOrTerm->eOperator & WO_EQ)==0 ){ + chngToIN = 0; + }else{ + chngToIN &= b; + } + } + } + + /* + ** Record the set of tables that satisfy case 3. The set might be + ** empty. + */ + pOrInfo->indexable = indexable; + pTerm->eOperator = indexable==0 ? 0 : WO_OR; + + /* For a two-way OR, attempt to implementation case 2. + */ + if( indexable && pOrWc->nTerm==2 ){ + int iOne = 0; + WhereTerm *pOne; + while( (pOne = whereNthSubterm(&pOrWc->a[0],iOne++))!=0 ){ + int iTwo = 0; + WhereTerm *pTwo; + while( (pTwo = whereNthSubterm(&pOrWc->a[1],iTwo++))!=0 ){ + whereCombineDisjuncts(pSrc, pWC, pOne, pTwo); + } + } + } + + /* + ** chngToIN holds a set of tables that *might* satisfy case 1. But + ** we have to do some additional checking to see if case 1 really + ** is satisfied. + ** + ** chngToIN will hold either 0, 1, or 2 bits. The 0-bit case means + ** that there is no possibility of transforming the OR clause into an + ** IN operator because one or more terms in the OR clause contain + ** something other than == on a column in the single table. The 1-bit + ** case means that every term of the OR clause is of the form + ** "table.column=expr" for some single table. The one bit that is set + ** will correspond to the common table. We still need to check to make + ** sure the same column is used on all terms. The 2-bit case is when + ** the all terms are of the form "table1.column=table2.column". It + ** might be possible to form an IN operator with either table1.column + ** or table2.column as the LHS if either is common to every term of + ** the OR clause. + ** + ** Note that terms of the form "table.column1=table.column2" (the + ** same table on both sizes of the ==) cannot be optimized. + */ + if( chngToIN ){ + int okToChngToIN = 0; /* True if the conversion to IN is valid */ + int iColumn = -1; /* Column index on lhs of IN operator */ + int iCursor = -1; /* Table cursor common to all terms */ + int j = 0; /* Loop counter */ + + /* Search for a table and column that appears on one side or the + ** other of the == operator in every subterm. That table and column + ** will be recorded in iCursor and iColumn. There might not be any + ** such table and column. Set okToChngToIN if an appropriate table + ** and column is found but leave okToChngToIN false if not found. + */ + for(j=0; j<2 && !okToChngToIN; j++){ + pOrTerm = pOrWc->a; + for(i=pOrWc->nTerm-1; i>=0; i--, pOrTerm++){ + assert( pOrTerm->eOperator & WO_EQ ); + pOrTerm->wtFlags &= ~TERM_OR_OK; + if( pOrTerm->leftCursor==iCursor ){ + /* This is the 2-bit case and we are on the second iteration and + ** current term is from the first iteration. So skip this term. */ + assert( j==1 ); + continue; + } + if( (chngToIN & sqlite3WhereGetMask(&pWInfo->sMaskSet, + pOrTerm->leftCursor))==0 ){ + /* This term must be of the form t1.a==t2.b where t2 is in the + ** chngToIN set but t1 is not. This term will be either preceded + ** or follwed by an inverted copy (t2.b==t1.a). Skip this term + ** and use its inversion. */ + testcase( pOrTerm->wtFlags & TERM_COPIED ); + testcase( pOrTerm->wtFlags & TERM_VIRTUAL ); + assert( pOrTerm->wtFlags & (TERM_COPIED|TERM_VIRTUAL) ); + continue; + } + iColumn = pOrTerm->u.leftColumn; + iCursor = pOrTerm->leftCursor; + break; + } + if( i<0 ){ + /* No candidate table+column was found. This can only occur + ** on the second iteration */ + assert( j==1 ); + assert( IsPowerOfTwo(chngToIN) ); + assert( chngToIN==sqlite3WhereGetMask(&pWInfo->sMaskSet, iCursor) ); + break; + } + testcase( j==1 ); + + /* We have found a candidate table and column. Check to see if that + ** table and column is common to every term in the OR clause */ + okToChngToIN = 1; + for(; i>=0 && okToChngToIN; i--, pOrTerm++){ + assert( pOrTerm->eOperator & WO_EQ ); + if( pOrTerm->leftCursor!=iCursor ){ + pOrTerm->wtFlags &= ~TERM_OR_OK; + }else if( pOrTerm->u.leftColumn!=iColumn ){ + okToChngToIN = 0; + }else{ + int affLeft, affRight; + /* If the right-hand side is also a column, then the affinities + ** of both right and left sides must be such that no type + ** conversions are required on the right. (Ticket #2249) + */ + affRight = sqlite3ExprAffinity(pOrTerm->pExpr->pRight); + affLeft = sqlite3ExprAffinity(pOrTerm->pExpr->pLeft); + if( affRight!=0 && affRight!=affLeft ){ + okToChngToIN = 0; + }else{ + pOrTerm->wtFlags |= TERM_OR_OK; + } + } + } + } + + /* At this point, okToChngToIN is true if original pTerm satisfies + ** case 1. In that case, construct a new virtual term that is + ** pTerm converted into an IN operator. + */ + if( okToChngToIN ){ + Expr *pDup; /* A transient duplicate expression */ + ExprList *pList = 0; /* The RHS of the IN operator */ + Expr *pLeft = 0; /* The LHS of the IN operator */ + Expr *pNew; /* The complete IN operator */ + + for(i=pOrWc->nTerm-1, pOrTerm=pOrWc->a; i>=0; i--, pOrTerm++){ + if( (pOrTerm->wtFlags & TERM_OR_OK)==0 ) continue; + assert( pOrTerm->eOperator & WO_EQ ); + assert( pOrTerm->leftCursor==iCursor ); + assert( pOrTerm->u.leftColumn==iColumn ); + pDup = sqlite3ExprDup(db, pOrTerm->pExpr->pRight, 0); + pList = sqlite3ExprListAppend(pWInfo->pParse, pList, pDup); + pLeft = pOrTerm->pExpr->pLeft; + } + assert( pLeft!=0 ); + pDup = sqlite3ExprDup(db, pLeft, 0); + pNew = sqlite3PExpr(pParse, TK_IN, pDup, 0); + if( pNew ){ + int idxNew; + transferJoinMarkings(pNew, pExpr); + assert( !ExprHasProperty(pNew, EP_xIsSelect) ); + pNew->x.pList = pList; + idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC); + testcase( idxNew==0 ); + exprAnalyze(pSrc, pWC, idxNew); + pTerm = &pWC->a[idxTerm]; + markTermAsChild(pWC, idxNew, idxTerm); + }else{ + sqlite3ExprListDelete(db, pList); + } + pTerm->eOperator = WO_NOOP; /* case 1 trumps case 3 */ + } + } +} +#endif /* !SQLITE_OMIT_OR_OPTIMIZATION && !SQLITE_OMIT_SUBQUERY */ + +/* +** We already know that pExpr is a binary operator where both operands are +** column references. This routine checks to see if pExpr is an equivalence +** relation: +** 1. The SQLITE_Transitive optimization must be enabled +** 2. Must be either an == or an IS operator +** 3. Not originating in the ON clause of an OUTER JOIN +** 4. The affinities of A and B must be compatible +** 5a. Both operands use the same collating sequence OR +** 5b. The overall collating sequence is BINARY +** If this routine returns TRUE, that means that the RHS can be substituted +** for the LHS anyplace else in the WHERE clause where the LHS column occurs. +** This is an optimization. No harm comes from returning 0. But if 1 is +** returned when it should not be, then incorrect answers might result. +*/ +static int termIsEquivalence(Parse *pParse, Expr *pExpr){ + char aff1, aff2; + CollSeq *pColl; + const char *zColl1, *zColl2; + if( !OptimizationEnabled(pParse->db, SQLITE_Transitive) ) return 0; + if( pExpr->op!=TK_EQ && pExpr->op!=TK_IS ) return 0; + if( ExprHasProperty(pExpr, EP_FromJoin) ) return 0; + aff1 = sqlite3ExprAffinity(pExpr->pLeft); + aff2 = sqlite3ExprAffinity(pExpr->pRight); + if( aff1!=aff2 + && (!sqlite3IsNumericAffinity(aff1) || !sqlite3IsNumericAffinity(aff2)) + ){ + return 0; + } + pColl = sqlite3BinaryCompareCollSeq(pParse, pExpr->pLeft, pExpr->pRight); + if( pColl==0 || sqlite3StrICmp(pColl->zName, "BINARY")==0 ) return 1; + pColl = sqlite3ExprCollSeq(pParse, pExpr->pLeft); + zColl1 = pColl ? pColl->zName : 0; + pColl = sqlite3ExprCollSeq(pParse, pExpr->pRight); + zColl2 = pColl ? pColl->zName : 0; + return sqlite3_stricmp(zColl1, zColl2)==0; +} + +/* +** Recursively walk the expressions of a SELECT statement and generate +** a bitmask indicating which tables are used in that expression +** tree. +*/ +static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){ + Bitmask mask = 0; + while( pS ){ + SrcList *pSrc = pS->pSrc; + mask |= sqlite3WhereExprListUsage(pMaskSet, pS->pEList); + mask |= sqlite3WhereExprListUsage(pMaskSet, pS->pGroupBy); + mask |= sqlite3WhereExprListUsage(pMaskSet, pS->pOrderBy); + mask |= sqlite3WhereExprUsage(pMaskSet, pS->pWhere); + mask |= sqlite3WhereExprUsage(pMaskSet, pS->pHaving); + if( ALWAYS(pSrc!=0) ){ + int i; + for(i=0; inSrc; i++){ + mask |= exprSelectUsage(pMaskSet, pSrc->a[i].pSelect); + mask |= sqlite3WhereExprUsage(pMaskSet, pSrc->a[i].pOn); + } + } + pS = pS->pPrior; + } + return mask; +} + +/* +** Expression pExpr is one operand of a comparison operator that might +** be useful for indexing. This routine checks to see if pExpr appears +** in any index. Return TRUE (1) if pExpr is an indexed term and return +** FALSE (0) if not. If TRUE is returned, also set *piCur to the cursor +** number of the table that is indexed and *piColumn to the column number +** of the column that is indexed, or XN_EXPR (-2) if an expression is being +** indexed. +** +** If pExpr is a TK_COLUMN column reference, then this routine always returns +** true even if that particular column is not indexed, because the column +** might be added to an automatic index later. +*/ +static int exprMightBeIndexed( + SrcList *pFrom, /* The FROM clause */ + int op, /* The specific comparison operator */ + Bitmask mPrereq, /* Bitmask of FROM clause terms referenced by pExpr */ + Expr *pExpr, /* An operand of a comparison operator */ + int *piCur, /* Write the referenced table cursor number here */ + int *piColumn /* Write the referenced table column number here */ +){ + Index *pIdx; + int i; + int iCur; + + /* If this expression is a vector to the left or right of a + ** inequality constraint (>, <, >= or <=), perform the processing + ** on the first element of the vector. */ + assert( TK_GT+1==TK_LE && TK_GT+2==TK_LT && TK_GT+3==TK_GE ); + assert( TK_ISop==TK_VECTOR && (op>=TK_GT && ALWAYS(op<=TK_GE)) ){ + pExpr = pExpr->x.pList->a[0].pExpr; + } + + if( pExpr->op==TK_COLUMN ){ + *piCur = pExpr->iTable; + *piColumn = pExpr->iColumn; + return 1; + } + if( mPrereq==0 ) return 0; /* No table references */ + if( (mPrereq&(mPrereq-1))!=0 ) return 0; /* Refs more than one table */ + for(i=0; mPrereq>1; i++, mPrereq>>=1){} + iCur = pFrom->a[i].iCursor; + for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( pIdx->aColExpr==0 ) continue; + for(i=0; inKeyCol; i++){ + if( pIdx->aiColumn[i]!=XN_EXPR ) continue; + if( sqlite3ExprCompare(pExpr, pIdx->aColExpr->a[i].pExpr, iCur)==0 ){ + *piCur = iCur; + *piColumn = XN_EXPR; + return 1; + } + } + } + return 0; +} + +/* +** The input to this routine is an WhereTerm structure with only the +** "pExpr" field filled in. The job of this routine is to analyze the +** subexpression and populate all the other fields of the WhereTerm +** structure. +** +** If the expression is of the form " X" it gets commuted +** to the standard form of "X ". +** +** If the expression is of the form "X Y" where both X and Y are +** columns, then the original expression is unchanged and a new virtual +** term of the form "Y X" is added to the WHERE clause and +** analyzed separately. The original term is marked with TERM_COPIED +** and the new term is marked with TERM_DYNAMIC (because it's pExpr +** needs to be freed with the WhereClause) and TERM_VIRTUAL (because it +** is a commuted copy of a prior term.) The original term has nChild=1 +** and the copy has idxParent set to the index of the original term. +*/ +static void exprAnalyze( + SrcList *pSrc, /* the FROM clause */ + WhereClause *pWC, /* the WHERE clause */ + int idxTerm /* Index of the term to be analyzed */ +){ + WhereInfo *pWInfo = pWC->pWInfo; /* WHERE clause processing context */ + WhereTerm *pTerm; /* The term to be analyzed */ + WhereMaskSet *pMaskSet; /* Set of table index masks */ + Expr *pExpr; /* The expression to be analyzed */ + Bitmask prereqLeft; /* Prerequesites of the pExpr->pLeft */ + Bitmask prereqAll; /* Prerequesites of pExpr */ + Bitmask extraRight = 0; /* Extra dependencies on LEFT JOIN */ + Expr *pStr1 = 0; /* RHS of LIKE/GLOB operator */ + int isComplete = 0; /* RHS of LIKE/GLOB ends with wildcard */ + int noCase = 0; /* uppercase equivalent to lowercase */ + int op; /* Top-level operator. pExpr->op */ + Parse *pParse = pWInfo->pParse; /* Parsing context */ + sqlite3 *db = pParse->db; /* Database connection */ + unsigned char eOp2; /* op2 value for LIKE/REGEXP/GLOB */ + int nLeft; /* Number of elements on left side vector */ + + if( db->mallocFailed ){ + return; + } + pTerm = &pWC->a[idxTerm]; + pMaskSet = &pWInfo->sMaskSet; + pExpr = pTerm->pExpr; + assert( pExpr->op!=TK_AS && pExpr->op!=TK_COLLATE ); + prereqLeft = sqlite3WhereExprUsage(pMaskSet, pExpr->pLeft); + op = pExpr->op; + if( op==TK_IN ){ + assert( pExpr->pRight==0 ); + if( sqlite3ExprCheckIN(pParse, pExpr) ) return; + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + pTerm->prereqRight = exprSelectUsage(pMaskSet, pExpr->x.pSelect); + }else{ + pTerm->prereqRight = sqlite3WhereExprListUsage(pMaskSet, pExpr->x.pList); + } + }else if( op==TK_ISNULL ){ + pTerm->prereqRight = 0; + }else{ + pTerm->prereqRight = sqlite3WhereExprUsage(pMaskSet, pExpr->pRight); + } + prereqAll = sqlite3WhereExprUsage(pMaskSet, pExpr); + if( ExprHasProperty(pExpr, EP_FromJoin) ){ + Bitmask x = sqlite3WhereGetMask(pMaskSet, pExpr->iRightJoinTable); + prereqAll |= x; + extraRight = x-1; /* ON clause terms may not be used with an index + ** on left table of a LEFT JOIN. Ticket #3015 */ + if( (prereqAll>>1)>=x ){ + sqlite3ErrorMsg(pParse, "ON clause references tables to its right"); + return; + } + } + pTerm->prereqAll = prereqAll; + pTerm->leftCursor = -1; + pTerm->iParent = -1; + pTerm->eOperator = 0; + if( allowedOp(op) ){ + int iCur, iColumn; + Expr *pLeft = sqlite3ExprSkipCollate(pExpr->pLeft); + Expr *pRight = sqlite3ExprSkipCollate(pExpr->pRight); + u16 opMask = (pTerm->prereqRight & prereqLeft)==0 ? WO_ALL : WO_EQUIV; + + if( pTerm->iField>0 ){ + assert( op==TK_IN ); + assert( pLeft->op==TK_VECTOR ); + pLeft = pLeft->x.pList->a[pTerm->iField-1].pExpr; + } + + if( exprMightBeIndexed(pSrc, op, prereqLeft, pLeft, &iCur, &iColumn) ){ + pTerm->leftCursor = iCur; + pTerm->u.leftColumn = iColumn; + pTerm->eOperator = operatorMask(op) & opMask; + } + if( op==TK_IS ) pTerm->wtFlags |= TERM_IS; + if( pRight + && exprMightBeIndexed(pSrc, op, pTerm->prereqRight, pRight, &iCur,&iColumn) + ){ + WhereTerm *pNew; + Expr *pDup; + u16 eExtraOp = 0; /* Extra bits for pNew->eOperator */ + assert( pTerm->iField==0 ); + if( pTerm->leftCursor>=0 ){ + int idxNew; + pDup = sqlite3ExprDup(db, pExpr, 0); + if( db->mallocFailed ){ + sqlite3ExprDelete(db, pDup); + return; + } + idxNew = whereClauseInsert(pWC, pDup, TERM_VIRTUAL|TERM_DYNAMIC); + if( idxNew==0 ) return; + pNew = &pWC->a[idxNew]; + markTermAsChild(pWC, idxNew, idxTerm); + if( op==TK_IS ) pNew->wtFlags |= TERM_IS; + pTerm = &pWC->a[idxTerm]; + pTerm->wtFlags |= TERM_COPIED; + + if( termIsEquivalence(pParse, pDup) ){ + pTerm->eOperator |= WO_EQUIV; + eExtraOp = WO_EQUIV; + } + }else{ + pDup = pExpr; + pNew = pTerm; + } + exprCommute(pParse, pDup); + pNew->leftCursor = iCur; + pNew->u.leftColumn = iColumn; + testcase( (prereqLeft | extraRight) != prereqLeft ); + pNew->prereqRight = prereqLeft | extraRight; + pNew->prereqAll = prereqAll; + pNew->eOperator = (operatorMask(pDup->op) + eExtraOp) & opMask; + } + } + +#ifndef SQLITE_OMIT_BETWEEN_OPTIMIZATION + /* If a term is the BETWEEN operator, create two new virtual terms + ** that define the range that the BETWEEN implements. For example: + ** + ** a BETWEEN b AND c + ** + ** is converted into: + ** + ** (a BETWEEN b AND c) AND (a>=b) AND (a<=c) + ** + ** The two new terms are added onto the end of the WhereClause object. + ** The new terms are "dynamic" and are children of the original BETWEEN + ** term. That means that if the BETWEEN term is coded, the children are + ** skipped. Or, if the children are satisfied by an index, the original + ** BETWEEN term is skipped. + */ + else if( pExpr->op==TK_BETWEEN && pWC->op==TK_AND ){ + ExprList *pList = pExpr->x.pList; + int i; + static const u8 ops[] = {TK_GE, TK_LE}; + assert( pList!=0 ); + assert( pList->nExpr==2 ); + for(i=0; i<2; i++){ + Expr *pNewExpr; + int idxNew; + pNewExpr = sqlite3PExpr(pParse, ops[i], + sqlite3ExprDup(db, pExpr->pLeft, 0), + sqlite3ExprDup(db, pList->a[i].pExpr, 0)); + transferJoinMarkings(pNewExpr, pExpr); + idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); + testcase( idxNew==0 ); + exprAnalyze(pSrc, pWC, idxNew); + pTerm = &pWC->a[idxTerm]; + markTermAsChild(pWC, idxNew, idxTerm); + } + } +#endif /* SQLITE_OMIT_BETWEEN_OPTIMIZATION */ + +#if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY) + /* Analyze a term that is composed of two or more subterms connected by + ** an OR operator. + */ + else if( pExpr->op==TK_OR ){ + assert( pWC->op==TK_AND ); + exprAnalyzeOrTerm(pSrc, pWC, idxTerm); + pTerm = &pWC->a[idxTerm]; + } +#endif /* SQLITE_OMIT_OR_OPTIMIZATION */ + +#ifndef SQLITE_OMIT_LIKE_OPTIMIZATION + /* Add constraints to reduce the search space on a LIKE or GLOB + ** operator. + ** + ** A like pattern of the form "x LIKE 'aBc%'" is changed into constraints + ** + ** x>='ABC' AND x<'abd' AND x LIKE 'aBc%' + ** + ** The last character of the prefix "abc" is incremented to form the + ** termination condition "abd". If case is not significant (the default + ** for LIKE) then the lower-bound is made all uppercase and the upper- + ** bound is made all lowercase so that the bounds also work when comparing + ** BLOBs. + */ + if( pWC->op==TK_AND + && isLikeOrGlob(pParse, pExpr, &pStr1, &isComplete, &noCase) + ){ + Expr *pLeft; /* LHS of LIKE/GLOB operator */ + Expr *pStr2; /* Copy of pStr1 - RHS of LIKE/GLOB operator */ + Expr *pNewExpr1; + Expr *pNewExpr2; + int idxNew1; + int idxNew2; + const char *zCollSeqName; /* Name of collating sequence */ + const u16 wtFlags = TERM_LIKEOPT | TERM_VIRTUAL | TERM_DYNAMIC; + + pLeft = pExpr->x.pList->a[1].pExpr; + pStr2 = sqlite3ExprDup(db, pStr1, 0); + + /* Convert the lower bound to upper-case and the upper bound to + ** lower-case (upper-case is less than lower-case in ASCII) so that + ** the range constraints also work for BLOBs + */ + if( noCase && !pParse->db->mallocFailed ){ + int i; + char c; + pTerm->wtFlags |= TERM_LIKE; + for(i=0; (c = pStr1->u.zToken[i])!=0; i++){ + pStr1->u.zToken[i] = sqlite3Toupper(c); + pStr2->u.zToken[i] = sqlite3Tolower(c); + } + } + + if( !db->mallocFailed ){ + u8 c, *pC; /* Last character before the first wildcard */ + pC = (u8*)&pStr2->u.zToken[sqlite3Strlen30(pStr2->u.zToken)-1]; + c = *pC; + if( noCase ){ + /* The point is to increment the last character before the first + ** wildcard. But if we increment '@', that will push it into the + ** alphabetic range where case conversions will mess up the + ** inequality. To avoid this, make sure to also run the full + ** LIKE on all candidate expressions by clearing the isComplete flag + */ + if( c=='A'-1 ) isComplete = 0; + c = sqlite3UpperToLower[c]; + } + *pC = c + 1; + } + zCollSeqName = noCase ? "NOCASE" : "BINARY"; + pNewExpr1 = sqlite3ExprDup(db, pLeft, 0); + pNewExpr1 = sqlite3PExpr(pParse, TK_GE, + sqlite3ExprAddCollateString(pParse,pNewExpr1,zCollSeqName), + pStr1); + transferJoinMarkings(pNewExpr1, pExpr); + idxNew1 = whereClauseInsert(pWC, pNewExpr1, wtFlags); + testcase( idxNew1==0 ); + exprAnalyze(pSrc, pWC, idxNew1); + pNewExpr2 = sqlite3ExprDup(db, pLeft, 0); + pNewExpr2 = sqlite3PExpr(pParse, TK_LT, + sqlite3ExprAddCollateString(pParse,pNewExpr2,zCollSeqName), + pStr2); + transferJoinMarkings(pNewExpr2, pExpr); + idxNew2 = whereClauseInsert(pWC, pNewExpr2, wtFlags); + testcase( idxNew2==0 ); + exprAnalyze(pSrc, pWC, idxNew2); + pTerm = &pWC->a[idxTerm]; + if( isComplete ){ + markTermAsChild(pWC, idxNew1, idxTerm); + markTermAsChild(pWC, idxNew2, idxTerm); + } + } +#endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */ + +#ifndef SQLITE_OMIT_VIRTUALTABLE + /* Add a WO_MATCH auxiliary term to the constraint set if the + ** current expression is of the form: column MATCH expr. + ** This information is used by the xBestIndex methods of + ** virtual tables. The native query optimizer does not attempt + ** to do anything with MATCH functions. + */ + if( pWC->op==TK_AND && isMatchOfColumn(pExpr, &eOp2) ){ + int idxNew; + Expr *pRight, *pLeft; + WhereTerm *pNewTerm; + Bitmask prereqColumn, prereqExpr; + + pRight = pExpr->x.pList->a[0].pExpr; + pLeft = pExpr->x.pList->a[1].pExpr; + prereqExpr = sqlite3WhereExprUsage(pMaskSet, pRight); + prereqColumn = sqlite3WhereExprUsage(pMaskSet, pLeft); + if( (prereqExpr & prereqColumn)==0 ){ + Expr *pNewExpr; + pNewExpr = sqlite3PExpr(pParse, TK_MATCH, + 0, sqlite3ExprDup(db, pRight, 0)); + idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); + testcase( idxNew==0 ); + pNewTerm = &pWC->a[idxNew]; + pNewTerm->prereqRight = prereqExpr; + pNewTerm->leftCursor = pLeft->iTable; + pNewTerm->u.leftColumn = pLeft->iColumn; + pNewTerm->eOperator = WO_MATCH; + pNewTerm->eMatchOp = eOp2; + markTermAsChild(pWC, idxNew, idxTerm); + pTerm = &pWC->a[idxTerm]; + pTerm->wtFlags |= TERM_COPIED; + pNewTerm->prereqAll = pTerm->prereqAll; + } + } +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + + /* If there is a vector == or IS term - e.g. "(a, b) == (?, ?)" - create + ** new terms for each component comparison - "a = ?" and "b = ?". The + ** new terms completely replace the original vector comparison, which is + ** no longer used. + ** + ** This is only required if at least one side of the comparison operation + ** is not a sub-select. */ + if( pWC->op==TK_AND + && (pExpr->op==TK_EQ || pExpr->op==TK_IS) + && (nLeft = sqlite3ExprVectorSize(pExpr->pLeft))>1 + && sqlite3ExprVectorSize(pExpr->pRight)==nLeft + && ( (pExpr->pLeft->flags & EP_xIsSelect)==0 + || (pExpr->pRight->flags & EP_xIsSelect)==0) + ){ + int i; + for(i=0; ipLeft, i); + Expr *pRight = sqlite3ExprForVectorField(pParse, pExpr->pRight, i); + + pNew = sqlite3PExpr(pParse, pExpr->op, pLeft, pRight); + transferJoinMarkings(pNew, pExpr); + idxNew = whereClauseInsert(pWC, pNew, TERM_DYNAMIC); + exprAnalyze(pSrc, pWC, idxNew); + } + pTerm = &pWC->a[idxTerm]; + pTerm->wtFlags = TERM_CODED|TERM_VIRTUAL; /* Disable the original */ + pTerm->eOperator = 0; + } + + /* If there is a vector IN term - e.g. "(a, b) IN (SELECT ...)" - create + ** a virtual term for each vector component. The expression object + ** used by each such virtual term is pExpr (the full vector IN(...) + ** expression). The WhereTerm.iField variable identifies the index within + ** the vector on the LHS that the virtual term represents. + ** + ** This only works if the RHS is a simple SELECT, not a compound + */ + if( pWC->op==TK_AND && pExpr->op==TK_IN && pTerm->iField==0 + && pExpr->pLeft->op==TK_VECTOR + && pExpr->x.pSelect->pPrior==0 + ){ + int i; + for(i=0; ipLeft); i++){ + int idxNew; + idxNew = whereClauseInsert(pWC, pExpr, TERM_VIRTUAL); + pWC->a[idxNew].iField = i+1; + exprAnalyze(pSrc, pWC, idxNew); + markTermAsChild(pWC, idxNew, idxTerm); + } + } + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + /* When sqlite_stat3 histogram data is available an operator of the + ** form "x IS NOT NULL" can sometimes be evaluated more efficiently + ** as "x>NULL" if x is not an INTEGER PRIMARY KEY. So construct a + ** virtual term of that form. + ** + ** Note that the virtual term must be tagged with TERM_VNULL. + */ + if( pExpr->op==TK_NOTNULL + && pExpr->pLeft->op==TK_COLUMN + && pExpr->pLeft->iColumn>=0 + && OptimizationEnabled(db, SQLITE_Stat34) + ){ + Expr *pNewExpr; + Expr *pLeft = pExpr->pLeft; + int idxNew; + WhereTerm *pNewTerm; + + pNewExpr = sqlite3PExpr(pParse, TK_GT, + sqlite3ExprDup(db, pLeft, 0), + sqlite3ExprAlloc(db, TK_NULL, 0, 0)); + + idxNew = whereClauseInsert(pWC, pNewExpr, + TERM_VIRTUAL|TERM_DYNAMIC|TERM_VNULL); + if( idxNew ){ + pNewTerm = &pWC->a[idxNew]; + pNewTerm->prereqRight = 0; + pNewTerm->leftCursor = pLeft->iTable; + pNewTerm->u.leftColumn = pLeft->iColumn; + pNewTerm->eOperator = WO_GT; + markTermAsChild(pWC, idxNew, idxTerm); + pTerm = &pWC->a[idxTerm]; + pTerm->wtFlags |= TERM_COPIED; + pNewTerm->prereqAll = pTerm->prereqAll; + } + } +#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ + + /* Prevent ON clause terms of a LEFT JOIN from being used to drive + ** an index for tables to the left of the join. + */ + testcase( pTerm!=&pWC->a[idxTerm] ); + pTerm = &pWC->a[idxTerm]; + pTerm->prereqRight |= extraRight; +} + +/*************************************************************************** +** Routines with file scope above. Interface to the rest of the where.c +** subsystem follows. +***************************************************************************/ + +/* +** This routine identifies subexpressions in the WHERE clause where +** each subexpression is separated by the AND operator or some other +** operator specified in the op parameter. The WhereClause structure +** is filled with pointers to subexpressions. For example: +** +** WHERE a=='hello' AND coalesce(b,11)<10 AND (c+12!=d OR c==22) +** \________/ \_______________/ \________________/ +** slot[0] slot[1] slot[2] +** +** The original WHERE clause in pExpr is unaltered. All this routine +** does is make slot[] entries point to substructure within pExpr. +** +** In the previous sentence and in the diagram, "slot[]" refers to +** the WhereClause.a[] array. The slot[] array grows as needed to contain +** all terms of the WHERE clause. +*/ +SQLITE_PRIVATE void sqlite3WhereSplit(WhereClause *pWC, Expr *pExpr, u8 op){ + Expr *pE2 = sqlite3ExprSkipCollate(pExpr); + pWC->op = op; + if( pE2==0 ) return; + if( pE2->op!=op ){ + whereClauseInsert(pWC, pExpr, 0); + }else{ + sqlite3WhereSplit(pWC, pE2->pLeft, op); + sqlite3WhereSplit(pWC, pE2->pRight, op); + } +} + +/* +** Initialize a preallocated WhereClause structure. +*/ +SQLITE_PRIVATE void sqlite3WhereClauseInit( + WhereClause *pWC, /* The WhereClause to be initialized */ + WhereInfo *pWInfo /* The WHERE processing context */ +){ + pWC->pWInfo = pWInfo; + pWC->pOuter = 0; + pWC->nTerm = 0; + pWC->nSlot = ArraySize(pWC->aStatic); + pWC->a = pWC->aStatic; +} + +/* +** Deallocate a WhereClause structure. The WhereClause structure +** itself is not freed. This routine is the inverse of +** sqlite3WhereClauseInit(). +*/ +SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause *pWC){ + int i; + WhereTerm *a; + sqlite3 *db = pWC->pWInfo->pParse->db; + for(i=pWC->nTerm-1, a=pWC->a; i>=0; i--, a++){ + if( a->wtFlags & TERM_DYNAMIC ){ + sqlite3ExprDelete(db, a->pExpr); + } + if( a->wtFlags & TERM_ORINFO ){ + whereOrInfoDelete(db, a->u.pOrInfo); + }else if( a->wtFlags & TERM_ANDINFO ){ + whereAndInfoDelete(db, a->u.pAndInfo); + } + } + if( pWC->a!=pWC->aStatic ){ + sqlite3DbFree(db, pWC->a); + } +} + + +/* +** These routines walk (recursively) an expression tree and generate +** a bitmask indicating which tables are used in that expression +** tree. +*/ +SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet *pMaskSet, Expr *p){ + Bitmask mask; + if( p==0 ) return 0; + if( p->op==TK_COLUMN ){ + mask = sqlite3WhereGetMask(pMaskSet, p->iTable); + return mask; + } + assert( !ExprHasProperty(p, EP_TokenOnly) ); + mask = p->pRight ? sqlite3WhereExprUsage(pMaskSet, p->pRight) : 0; + if( p->pLeft ) mask |= sqlite3WhereExprUsage(pMaskSet, p->pLeft); + if( ExprHasProperty(p, EP_xIsSelect) ){ + mask |= exprSelectUsage(pMaskSet, p->x.pSelect); + }else if( p->x.pList ){ + mask |= sqlite3WhereExprListUsage(pMaskSet, p->x.pList); + } + return mask; +} +SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet *pMaskSet, ExprList *pList){ + int i; + Bitmask mask = 0; + if( pList ){ + for(i=0; inExpr; i++){ + mask |= sqlite3WhereExprUsage(pMaskSet, pList->a[i].pExpr); + } + } + return mask; +} + + +/* +** Call exprAnalyze on all terms in a WHERE clause. +** +** Note that exprAnalyze() might add new virtual terms onto the +** end of the WHERE clause. We do not want to analyze these new +** virtual terms, so start analyzing at the end and work forward +** so that the added virtual terms are never processed. +*/ +SQLITE_PRIVATE void sqlite3WhereExprAnalyze( + SrcList *pTabList, /* the FROM clause */ + WhereClause *pWC /* the WHERE clause to be analyzed */ +){ + int i; + for(i=pWC->nTerm-1; i>=0; i--){ + exprAnalyze(pTabList, pWC, i); + } +} + +/* +** For table-valued-functions, transform the function arguments into +** new WHERE clause terms. +** +** Each function argument translates into an equality constraint against +** a HIDDEN column in the table. +*/ +SQLITE_PRIVATE void sqlite3WhereTabFuncArgs( + Parse *pParse, /* Parsing context */ + struct SrcList_item *pItem, /* The FROM clause term to process */ + WhereClause *pWC /* Xfer function arguments to here */ +){ + Table *pTab; + int j, k; + ExprList *pArgs; + Expr *pColRef; + Expr *pTerm; + if( pItem->fg.isTabFunc==0 ) return; + pTab = pItem->pTab; + assert( pTab!=0 ); + pArgs = pItem->u1.pFuncArg; + if( pArgs==0 ) return; + for(j=k=0; jnExpr; j++){ + while( knCol && (pTab->aCol[k].colFlags & COLFLAG_HIDDEN)==0 ){k++;} + if( k>=pTab->nCol ){ + sqlite3ErrorMsg(pParse, "too many arguments on %s() - max %d", + pTab->zName, j); + return; + } + pColRef = sqlite3ExprAlloc(pParse->db, TK_COLUMN, 0, 0); + if( pColRef==0 ) return; + pColRef->iTable = pItem->iCursor; + pColRef->iColumn = k++; + pColRef->pTab = pTab; + pTerm = sqlite3PExpr(pParse, TK_EQ, pColRef, + sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0)); + whereClauseInsert(pWC, pTerm, TERM_DYNAMIC); + } +} + +/************** End of whereexpr.c *******************************************/ +/************** Begin file where.c *******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This module contains C code that generates VDBE code used to process +** the WHERE clause of SQL statements. This module is responsible for +** generating the code that loops through a table looking for applicable +** rows. Indices are selected and used to speed the search when doing +** so is applicable. Because this module is responsible for selecting +** indices, you might also think of this module as the "query optimizer". +*/ +/* #include "sqliteInt.h" */ +/* #include "whereInt.h" */ + +/* Forward declaration of methods */ +static int whereLoopResize(sqlite3*, WhereLoop*, int); + +/* Test variable that can be set to enable WHERE tracing */ +#if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) +/***/ int sqlite3WhereTrace = 0; +#endif + + +/* +** Return the estimated number of output rows from a WHERE clause +*/ +SQLITE_PRIVATE LogEst sqlite3WhereOutputRowCount(WhereInfo *pWInfo){ + return pWInfo->nRowOut; +} + +/* +** Return one of the WHERE_DISTINCT_xxxxx values to indicate how this +** WHERE clause returns outputs for DISTINCT processing. +*/ +SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo *pWInfo){ + return pWInfo->eDistinct; +} + +/* +** Return TRUE if the WHERE clause returns rows in ORDER BY order. +** Return FALSE if the output needs to be sorted. +*/ +SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo *pWInfo){ + return pWInfo->nOBSat; +} + +/* +** Return TRUE if the innermost loop of the WHERE clause implementation +** returns rows in ORDER BY order for complete run of the inner loop. +** +** Across multiple iterations of outer loops, the output rows need not be +** sorted. As long as rows are sorted for just the innermost loop, this +** routine can return TRUE. +*/ +SQLITE_PRIVATE int sqlite3WhereOrderedInnerLoop(WhereInfo *pWInfo){ + return pWInfo->bOrderedInnerLoop; +} + +/* +** Return the VDBE address or label to jump to in order to continue +** immediately with the next row of a WHERE clause. +*/ +SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo *pWInfo){ + assert( pWInfo->iContinue!=0 ); + return pWInfo->iContinue; +} + +/* +** Return the VDBE address or label to jump to in order to break +** out of a WHERE loop. +*/ +SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo *pWInfo){ + return pWInfo->iBreak; +} + +/* +** Return ONEPASS_OFF (0) if an UPDATE or DELETE statement is unable to +** operate directly on the rowis returned by a WHERE clause. Return +** ONEPASS_SINGLE (1) if the statement can operation directly because only +** a single row is to be changed. Return ONEPASS_MULTI (2) if the one-pass +** optimization can be used on multiple +** +** If the ONEPASS optimization is used (if this routine returns true) +** then also write the indices of open cursors used by ONEPASS +** into aiCur[0] and aiCur[1]. iaCur[0] gets the cursor of the data +** table and iaCur[1] gets the cursor used by an auxiliary index. +** Either value may be -1, indicating that cursor is not used. +** Any cursors returned will have been opened for writing. +** +** aiCur[0] and aiCur[1] both get -1 if the where-clause logic is +** unable to use the ONEPASS optimization. +*/ +SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo *pWInfo, int *aiCur){ + memcpy(aiCur, pWInfo->aiCurOnePass, sizeof(int)*2); +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace && pWInfo->eOnePass!=ONEPASS_OFF ){ + sqlite3DebugPrintf("%s cursors: %d %d\n", + pWInfo->eOnePass==ONEPASS_SINGLE ? "ONEPASS_SINGLE" : "ONEPASS_MULTI", + aiCur[0], aiCur[1]); + } +#endif + return pWInfo->eOnePass; +} + +/* +** Move the content of pSrc into pDest +*/ +static void whereOrMove(WhereOrSet *pDest, WhereOrSet *pSrc){ + pDest->n = pSrc->n; + memcpy(pDest->a, pSrc->a, pDest->n*sizeof(pDest->a[0])); +} + +/* +** Try to insert a new prerequisite/cost entry into the WhereOrSet pSet. +** +** The new entry might overwrite an existing entry, or it might be +** appended, or it might be discarded. Do whatever is the right thing +** so that pSet keeps the N_OR_COST best entries seen so far. +*/ +static int whereOrInsert( + WhereOrSet *pSet, /* The WhereOrSet to be updated */ + Bitmask prereq, /* Prerequisites of the new entry */ + LogEst rRun, /* Run-cost of the new entry */ + LogEst nOut /* Number of outputs for the new entry */ +){ + u16 i; + WhereOrCost *p; + for(i=pSet->n, p=pSet->a; i>0; i--, p++){ + if( rRun<=p->rRun && (prereq & p->prereq)==prereq ){ + goto whereOrInsert_done; + } + if( p->rRun<=rRun && (p->prereq & prereq)==p->prereq ){ + return 0; + } + } + if( pSet->na[pSet->n++]; + p->nOut = nOut; + }else{ + p = pSet->a; + for(i=1; in; i++){ + if( p->rRun>pSet->a[i].rRun ) p = pSet->a + i; + } + if( p->rRun<=rRun ) return 0; + } +whereOrInsert_done: + p->prereq = prereq; + p->rRun = rRun; + if( p->nOut>nOut ) p->nOut = nOut; + return 1; +} + +/* +** Return the bitmask for the given cursor number. Return 0 if +** iCursor is not in the set. +*/ +SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet *pMaskSet, int iCursor){ + int i; + assert( pMaskSet->n<=(int)sizeof(Bitmask)*8 ); + for(i=0; in; i++){ + if( pMaskSet->ix[i]==iCursor ){ + return MASKBIT(i); + } + } + return 0; +} + +/* +** Create a new mask for cursor iCursor. +** +** There is one cursor per table in the FROM clause. The number of +** tables in the FROM clause is limited by a test early in the +** sqlite3WhereBegin() routine. So we know that the pMaskSet->ix[] +** array will never overflow. +*/ +static void createMask(WhereMaskSet *pMaskSet, int iCursor){ + assert( pMaskSet->n < ArraySize(pMaskSet->ix) ); + pMaskSet->ix[pMaskSet->n++] = iCursor; +} + +/* +** Advance to the next WhereTerm that matches according to the criteria +** established when the pScan object was initialized by whereScanInit(). +** Return NULL if there are no more matching WhereTerms. +*/ +static WhereTerm *whereScanNext(WhereScan *pScan){ + int iCur; /* The cursor on the LHS of the term */ + i16 iColumn; /* The column on the LHS of the term. -1 for IPK */ + Expr *pX; /* An expression being tested */ + WhereClause *pWC; /* Shorthand for pScan->pWC */ + WhereTerm *pTerm; /* The term being tested */ + int k = pScan->k; /* Where to start scanning */ + + assert( pScan->iEquiv<=pScan->nEquiv ); + pWC = pScan->pWC; + while(1){ + iColumn = pScan->aiColumn[pScan->iEquiv-1]; + iCur = pScan->aiCur[pScan->iEquiv-1]; + assert( pWC!=0 ); + do{ + for(pTerm=pWC->a+k; knTerm; k++, pTerm++){ + if( pTerm->leftCursor==iCur + && pTerm->u.leftColumn==iColumn + && (iColumn!=XN_EXPR + || sqlite3ExprCompare(pTerm->pExpr->pLeft,pScan->pIdxExpr,iCur)==0) + && (pScan->iEquiv<=1 || !ExprHasProperty(pTerm->pExpr, EP_FromJoin)) + ){ + if( (pTerm->eOperator & WO_EQUIV)!=0 + && pScan->nEquivaiCur) + && (pX = sqlite3ExprSkipCollate(pTerm->pExpr->pRight))->op==TK_COLUMN + ){ + int j; + for(j=0; jnEquiv; j++){ + if( pScan->aiCur[j]==pX->iTable + && pScan->aiColumn[j]==pX->iColumn ){ + break; + } + } + if( j==pScan->nEquiv ){ + pScan->aiCur[j] = pX->iTable; + pScan->aiColumn[j] = pX->iColumn; + pScan->nEquiv++; + } + } + if( (pTerm->eOperator & pScan->opMask)!=0 ){ + /* Verify the affinity and collating sequence match */ + if( pScan->zCollName && (pTerm->eOperator & WO_ISNULL)==0 ){ + CollSeq *pColl; + Parse *pParse = pWC->pWInfo->pParse; + pX = pTerm->pExpr; + if( !sqlite3IndexAffinityOk(pX, pScan->idxaff) ){ + continue; + } + assert(pX->pLeft); + pColl = sqlite3BinaryCompareCollSeq(pParse, + pX->pLeft, pX->pRight); + if( pColl==0 ) pColl = pParse->db->pDfltColl; + if( sqlite3StrICmp(pColl->zName, pScan->zCollName) ){ + continue; + } + } + if( (pTerm->eOperator & (WO_EQ|WO_IS))!=0 + && (pX = pTerm->pExpr->pRight)->op==TK_COLUMN + && pX->iTable==pScan->aiCur[0] + && pX->iColumn==pScan->aiColumn[0] + ){ + testcase( pTerm->eOperator & WO_IS ); + continue; + } + pScan->pWC = pWC; + pScan->k = k+1; + return pTerm; + } + } + } + pWC = pWC->pOuter; + k = 0; + }while( pWC!=0 ); + if( pScan->iEquiv>=pScan->nEquiv ) break; + pWC = pScan->pOrigWC; + k = 0; + pScan->iEquiv++; + } + return 0; +} + +/* +** Initialize a WHERE clause scanner object. Return a pointer to the +** first match. Return NULL if there are no matches. +** +** The scanner will be searching the WHERE clause pWC. It will look +** for terms of the form "X " where X is column iColumn of table +** iCur. Or if pIdx!=0 then X is column iColumn of index pIdx. pIdx +** must be one of the indexes of table iCur. +** +** The must be one of the operators described by opMask. +** +** If the search is for X and the WHERE clause contains terms of the +** form X=Y then this routine might also return terms of the form +** "Y ". The number of levels of transitivity is limited, +** but is enough to handle most commonly occurring SQL statements. +** +** If X is not the INTEGER PRIMARY KEY then X must be compatible with +** index pIdx. +*/ +static WhereTerm *whereScanInit( + WhereScan *pScan, /* The WhereScan object being initialized */ + WhereClause *pWC, /* The WHERE clause to be scanned */ + int iCur, /* Cursor to scan for */ + int iColumn, /* Column to scan for */ + u32 opMask, /* Operator(s) to scan for */ + Index *pIdx /* Must be compatible with this index */ +){ + pScan->pOrigWC = pWC; + pScan->pWC = pWC; + pScan->pIdxExpr = 0; + pScan->idxaff = 0; + pScan->zCollName = 0; + if( pIdx ){ + int j = iColumn; + iColumn = pIdx->aiColumn[j]; + if( iColumn==XN_EXPR ){ + pScan->pIdxExpr = pIdx->aColExpr->a[j].pExpr; + pScan->zCollName = pIdx->azColl[j]; + }else if( iColumn==pIdx->pTable->iPKey ){ + iColumn = XN_ROWID; + }else if( iColumn>=0 ){ + pScan->idxaff = pIdx->pTable->aCol[iColumn].affinity; + pScan->zCollName = pIdx->azColl[j]; + } + }else if( iColumn==XN_EXPR ){ + return 0; + } + pScan->opMask = opMask; + pScan->k = 0; + pScan->aiCur[0] = iCur; + pScan->aiColumn[0] = iColumn; + pScan->nEquiv = 1; + pScan->iEquiv = 1; + return whereScanNext(pScan); +} + +/* +** Search for a term in the WHERE clause that is of the form "X " +** where X is a reference to the iColumn of table iCur or of index pIdx +** if pIdx!=0 and is one of the WO_xx operator codes specified by +** the op parameter. Return a pointer to the term. Return 0 if not found. +** +** If pIdx!=0 then it must be one of the indexes of table iCur. +** Search for terms matching the iColumn-th column of pIdx +** rather than the iColumn-th column of table iCur. +** +** The term returned might by Y= if there is another constraint in +** the WHERE clause that specifies that X=Y. Any such constraints will be +** identified by the WO_EQUIV bit in the pTerm->eOperator field. The +** aiCur[]/iaColumn[] arrays hold X and all its equivalents. There are 11 +** slots in aiCur[]/aiColumn[] so that means we can look for X plus up to 10 +** other equivalent values. Hence a search for X will return if X=A1 +** and A1=A2 and A2=A3 and ... and A9=A10 and A10=. +** +** If there are multiple terms in the WHERE clause of the form "X " +** then try for the one with no dependencies on - in other words where +** is a constant expression of some kind. Only return entries of +** the form "X Y" where Y is a column in another table if no terms of +** the form "X " exist. If no terms with a constant RHS +** exist, try to return a term that does not use WO_EQUIV. +*/ +SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm( + WhereClause *pWC, /* The WHERE clause to be searched */ + int iCur, /* Cursor number of LHS */ + int iColumn, /* Column number of LHS */ + Bitmask notReady, /* RHS must not overlap with this mask */ + u32 op, /* Mask of WO_xx values describing operator */ + Index *pIdx /* Must be compatible with this index, if not NULL */ +){ + WhereTerm *pResult = 0; + WhereTerm *p; + WhereScan scan; + + p = whereScanInit(&scan, pWC, iCur, iColumn, op, pIdx); + op &= WO_EQ|WO_IS; + while( p ){ + if( (p->prereqRight & notReady)==0 ){ + if( p->prereqRight==0 && (p->eOperator&op)!=0 ){ + testcase( p->eOperator & WO_IS ); + return p; + } + if( pResult==0 ) pResult = p; + } + p = whereScanNext(&scan); + } + return pResult; +} + +/* +** This function searches pList for an entry that matches the iCol-th column +** of index pIdx. +** +** If such an expression is found, its index in pList->a[] is returned. If +** no expression is found, -1 is returned. +*/ +static int findIndexCol( + Parse *pParse, /* Parse context */ + ExprList *pList, /* Expression list to search */ + int iBase, /* Cursor for table associated with pIdx */ + Index *pIdx, /* Index to match column of */ + int iCol /* Column of index to match */ +){ + int i; + const char *zColl = pIdx->azColl[iCol]; + + for(i=0; inExpr; i++){ + Expr *p = sqlite3ExprSkipCollate(pList->a[i].pExpr); + if( p->op==TK_COLUMN + && p->iColumn==pIdx->aiColumn[iCol] + && p->iTable==iBase + ){ + CollSeq *pColl = sqlite3ExprCollSeq(pParse, pList->a[i].pExpr); + if( pColl && 0==sqlite3StrICmp(pColl->zName, zColl) ){ + return i; + } + } + } + + return -1; +} + +/* +** Return TRUE if the iCol-th column of index pIdx is NOT NULL +*/ +static int indexColumnNotNull(Index *pIdx, int iCol){ + int j; + assert( pIdx!=0 ); + assert( iCol>=0 && iColnColumn ); + j = pIdx->aiColumn[iCol]; + if( j>=0 ){ + return pIdx->pTable->aCol[j].notNull; + }else if( j==(-1) ){ + return 1; + }else{ + assert( j==(-2) ); + return 0; /* Assume an indexed expression can always yield a NULL */ + + } +} + +/* +** Return true if the DISTINCT expression-list passed as the third argument +** is redundant. +** +** A DISTINCT list is redundant if any subset of the columns in the +** DISTINCT list are collectively unique and individually non-null. +*/ +static int isDistinctRedundant( + Parse *pParse, /* Parsing context */ + SrcList *pTabList, /* The FROM clause */ + WhereClause *pWC, /* The WHERE clause */ + ExprList *pDistinct /* The result set that needs to be DISTINCT */ +){ + Table *pTab; + Index *pIdx; + int i; + int iBase; + + /* If there is more than one table or sub-select in the FROM clause of + ** this query, then it will not be possible to show that the DISTINCT + ** clause is redundant. */ + if( pTabList->nSrc!=1 ) return 0; + iBase = pTabList->a[0].iCursor; + pTab = pTabList->a[0].pTab; + + /* If any of the expressions is an IPK column on table iBase, then return + ** true. Note: The (p->iTable==iBase) part of this test may be false if the + ** current SELECT is a correlated sub-query. + */ + for(i=0; inExpr; i++){ + Expr *p = sqlite3ExprSkipCollate(pDistinct->a[i].pExpr); + if( p->op==TK_COLUMN && p->iTable==iBase && p->iColumn<0 ) return 1; + } + + /* Loop through all indices on the table, checking each to see if it makes + ** the DISTINCT qualifier redundant. It does so if: + ** + ** 1. The index is itself UNIQUE, and + ** + ** 2. All of the columns in the index are either part of the pDistinct + ** list, or else the WHERE clause contains a term of the form "col=X", + ** where X is a constant value. The collation sequences of the + ** comparison and select-list expressions must match those of the index. + ** + ** 3. All of those index columns for which the WHERE clause does not + ** contain a "col=X" term are subject to a NOT NULL constraint. + */ + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( !IsUniqueIndex(pIdx) ) continue; + for(i=0; inKeyCol; i++){ + if( 0==sqlite3WhereFindTerm(pWC, iBase, i, ~(Bitmask)0, WO_EQ, pIdx) ){ + if( findIndexCol(pParse, pDistinct, iBase, pIdx, i)<0 ) break; + if( indexColumnNotNull(pIdx, i)==0 ) break; + } + } + if( i==pIdx->nKeyCol ){ + /* This index implies that the DISTINCT qualifier is redundant. */ + return 1; + } + } + + return 0; +} + + +/* +** Estimate the logarithm of the input value to base 2. +*/ +static LogEst estLog(LogEst N){ + return N<=10 ? 0 : sqlite3LogEst(N) - 33; +} + +/* +** Convert OP_Column opcodes to OP_Copy in previously generated code. +** +** This routine runs over generated VDBE code and translates OP_Column +** opcodes into OP_Copy when the table is being accessed via co-routine +** instead of via table lookup. +** +** If the bIncrRowid parameter is 0, then any OP_Rowid instructions on +** cursor iTabCur are transformed into OP_Null. Or, if bIncrRowid is non-zero, +** then each OP_Rowid is transformed into an instruction to increment the +** value stored in its output register. +*/ +static void translateColumnToCopy( + Vdbe *v, /* The VDBE containing code to translate */ + int iStart, /* Translate from this opcode to the end */ + int iTabCur, /* OP_Column/OP_Rowid references to this table */ + int iRegister, /* The first column is in this register */ + int bIncrRowid /* If non-zero, transform OP_rowid to OP_AddImm(1) */ +){ + VdbeOp *pOp = sqlite3VdbeGetOp(v, iStart); + int iEnd = sqlite3VdbeCurrentAddr(v); + for(; iStartp1!=iTabCur ) continue; + if( pOp->opcode==OP_Column ){ + pOp->opcode = OP_Copy; + pOp->p1 = pOp->p2 + iRegister; + pOp->p2 = pOp->p3; + pOp->p3 = 0; + }else if( pOp->opcode==OP_Rowid ){ + if( bIncrRowid ){ + /* Increment the value stored in the P2 operand of the OP_Rowid. */ + pOp->opcode = OP_AddImm; + pOp->p1 = pOp->p2; + pOp->p2 = 1; + }else{ + pOp->opcode = OP_Null; + pOp->p1 = 0; + pOp->p3 = 0; + } + } + } +} + +/* +** Two routines for printing the content of an sqlite3_index_info +** structure. Used for testing and debugging only. If neither +** SQLITE_TEST or SQLITE_DEBUG are defined, then these routines +** are no-ops. +*/ +#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(WHERETRACE_ENABLED) +static void TRACE_IDX_INPUTS(sqlite3_index_info *p){ + int i; + if( !sqlite3WhereTrace ) return; + for(i=0; inConstraint; i++){ + sqlite3DebugPrintf(" constraint[%d]: col=%d termid=%d op=%d usabled=%d\n", + i, + p->aConstraint[i].iColumn, + p->aConstraint[i].iTermOffset, + p->aConstraint[i].op, + p->aConstraint[i].usable); + } + for(i=0; inOrderBy; i++){ + sqlite3DebugPrintf(" orderby[%d]: col=%d desc=%d\n", + i, + p->aOrderBy[i].iColumn, + p->aOrderBy[i].desc); + } +} +static void TRACE_IDX_OUTPUTS(sqlite3_index_info *p){ + int i; + if( !sqlite3WhereTrace ) return; + for(i=0; inConstraint; i++){ + sqlite3DebugPrintf(" usage[%d]: argvIdx=%d omit=%d\n", + i, + p->aConstraintUsage[i].argvIndex, + p->aConstraintUsage[i].omit); + } + sqlite3DebugPrintf(" idxNum=%d\n", p->idxNum); + sqlite3DebugPrintf(" idxStr=%s\n", p->idxStr); + sqlite3DebugPrintf(" orderByConsumed=%d\n", p->orderByConsumed); + sqlite3DebugPrintf(" estimatedCost=%g\n", p->estimatedCost); + sqlite3DebugPrintf(" estimatedRows=%lld\n", p->estimatedRows); +} +#else +#define TRACE_IDX_INPUTS(A) +#define TRACE_IDX_OUTPUTS(A) +#endif + +#ifndef SQLITE_OMIT_AUTOMATIC_INDEX +/* +** Return TRUE if the WHERE clause term pTerm is of a form where it +** could be used with an index to access pSrc, assuming an appropriate +** index existed. +*/ +static int termCanDriveIndex( + WhereTerm *pTerm, /* WHERE clause term to check */ + struct SrcList_item *pSrc, /* Table we are trying to access */ + Bitmask notReady /* Tables in outer loops of the join */ +){ + char aff; + if( pTerm->leftCursor!=pSrc->iCursor ) return 0; + if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) return 0; + if( (pTerm->prereqRight & notReady)!=0 ) return 0; + if( pTerm->u.leftColumn<0 ) return 0; + aff = pSrc->pTab->aCol[pTerm->u.leftColumn].affinity; + if( !sqlite3IndexAffinityOk(pTerm->pExpr, aff) ) return 0; + testcase( pTerm->pExpr->op==TK_IS ); + return 1; +} +#endif + + +#ifndef SQLITE_OMIT_AUTOMATIC_INDEX +/* +** Generate code to construct the Index object for an automatic index +** and to set up the WhereLevel object pLevel so that the code generator +** makes use of the automatic index. +*/ +static void constructAutomaticIndex( + Parse *pParse, /* The parsing context */ + WhereClause *pWC, /* The WHERE clause */ + struct SrcList_item *pSrc, /* The FROM clause term to get the next index */ + Bitmask notReady, /* Mask of cursors that are not available */ + WhereLevel *pLevel /* Write new index here */ +){ + int nKeyCol; /* Number of columns in the constructed index */ + WhereTerm *pTerm; /* A single term of the WHERE clause */ + WhereTerm *pWCEnd; /* End of pWC->a[] */ + Index *pIdx; /* Object describing the transient index */ + Vdbe *v; /* Prepared statement under construction */ + int addrInit; /* Address of the initialization bypass jump */ + Table *pTable; /* The table being indexed */ + int addrTop; /* Top of the index fill loop */ + int regRecord; /* Register holding an index record */ + int n; /* Column counter */ + int i; /* Loop counter */ + int mxBitCol; /* Maximum column in pSrc->colUsed */ + CollSeq *pColl; /* Collating sequence to on a column */ + WhereLoop *pLoop; /* The Loop object */ + char *zNotUsed; /* Extra space on the end of pIdx */ + Bitmask idxCols; /* Bitmap of columns used for indexing */ + Bitmask extraCols; /* Bitmap of additional columns */ + u8 sentWarning = 0; /* True if a warnning has been issued */ + Expr *pPartial = 0; /* Partial Index Expression */ + int iContinue = 0; /* Jump here to skip excluded rows */ + struct SrcList_item *pTabItem; /* FROM clause term being indexed */ + int addrCounter = 0; /* Address where integer counter is initialized */ + int regBase; /* Array of registers where record is assembled */ + + /* Generate code to skip over the creation and initialization of the + ** transient index on 2nd and subsequent iterations of the loop. */ + v = pParse->pVdbe; + assert( v!=0 ); + addrInit = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); + + /* Count the number of columns that will be added to the index + ** and used to match WHERE clause constraints */ + nKeyCol = 0; + pTable = pSrc->pTab; + pWCEnd = &pWC->a[pWC->nTerm]; + pLoop = pLevel->pWLoop; + idxCols = 0; + for(pTerm=pWC->a; pTermpExpr; + assert( !ExprHasProperty(pExpr, EP_FromJoin) /* prereq always non-zero */ + || pExpr->iRightJoinTable!=pSrc->iCursor /* for the right-hand */ + || pLoop->prereq!=0 ); /* table of a LEFT JOIN */ + if( pLoop->prereq==0 + && (pTerm->wtFlags & TERM_VIRTUAL)==0 + && !ExprHasProperty(pExpr, EP_FromJoin) + && sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor) ){ + pPartial = sqlite3ExprAnd(pParse->db, pPartial, + sqlite3ExprDup(pParse->db, pExpr, 0)); + } + if( termCanDriveIndex(pTerm, pSrc, notReady) ){ + int iCol = pTerm->u.leftColumn; + Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol); + testcase( iCol==BMS ); + testcase( iCol==BMS-1 ); + if( !sentWarning ){ + sqlite3_log(SQLITE_WARNING_AUTOINDEX, + "automatic index on %s(%s)", pTable->zName, + pTable->aCol[iCol].zName); + sentWarning = 1; + } + if( (idxCols & cMask)==0 ){ + if( whereLoopResize(pParse->db, pLoop, nKeyCol+1) ){ + goto end_auto_index_create; + } + pLoop->aLTerm[nKeyCol++] = pTerm; + idxCols |= cMask; + } + } + } + assert( nKeyCol>0 ); + pLoop->u.btree.nEq = pLoop->nLTerm = nKeyCol; + pLoop->wsFlags = WHERE_COLUMN_EQ | WHERE_IDX_ONLY | WHERE_INDEXED + | WHERE_AUTO_INDEX; + + /* Count the number of additional columns needed to create a + ** covering index. A "covering index" is an index that contains all + ** columns that are needed by the query. With a covering index, the + ** original table never needs to be accessed. Automatic indices must + ** be a covering index because the index will not be updated if the + ** original table changes and the index and table cannot both be used + ** if they go out of sync. + */ + extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); + mxBitCol = MIN(BMS-1,pTable->nCol); + testcase( pTable->nCol==BMS-1 ); + testcase( pTable->nCol==BMS-2 ); + for(i=0; icolUsed & MASKBIT(BMS-1) ){ + nKeyCol += pTable->nCol - BMS + 1; + } + + /* Construct the Index object to describe this index */ + pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+1, 0, &zNotUsed); + if( pIdx==0 ) goto end_auto_index_create; + pLoop->u.btree.pIndex = pIdx; + pIdx->zName = "auto-index"; + pIdx->pTable = pTable; + n = 0; + idxCols = 0; + for(pTerm=pWC->a; pTermu.leftColumn; + Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol); + testcase( iCol==BMS-1 ); + testcase( iCol==BMS ); + if( (idxCols & cMask)==0 ){ + Expr *pX = pTerm->pExpr; + idxCols |= cMask; + pIdx->aiColumn[n] = pTerm->u.leftColumn; + pColl = sqlite3BinaryCompareCollSeq(pParse, pX->pLeft, pX->pRight); + pIdx->azColl[n] = pColl ? pColl->zName : sqlite3StrBINARY; + n++; + } + } + } + assert( (u32)n==pLoop->u.btree.nEq ); + + /* Add additional columns needed to make the automatic index into + ** a covering index */ + for(i=0; iaiColumn[n] = i; + pIdx->azColl[n] = sqlite3StrBINARY; + n++; + } + } + if( pSrc->colUsed & MASKBIT(BMS-1) ){ + for(i=BMS-1; inCol; i++){ + pIdx->aiColumn[n] = i; + pIdx->azColl[n] = sqlite3StrBINARY; + n++; + } + } + assert( n==nKeyCol ); + pIdx->aiColumn[n] = XN_ROWID; + pIdx->azColl[n] = sqlite3StrBINARY; + + /* Create the automatic index */ + assert( pLevel->iIdxCur>=0 ); + pLevel->iIdxCur = pParse->nTab++; + sqlite3VdbeAddOp2(v, OP_OpenAutoindex, pLevel->iIdxCur, nKeyCol+1); + sqlite3VdbeSetP4KeyInfo(pParse, pIdx); + VdbeComment((v, "for %s", pTable->zName)); + + /* Fill the automatic index with content */ + sqlite3ExprCachePush(pParse); + pTabItem = &pWC->pWInfo->pTabList->a[pLevel->iFrom]; + if( pTabItem->fg.viaCoroutine ){ + int regYield = pTabItem->regReturn; + addrCounter = sqlite3VdbeAddOp2(v, OP_Integer, 0, 0); + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub); + addrTop = sqlite3VdbeAddOp1(v, OP_Yield, regYield); + VdbeCoverage(v); + VdbeComment((v, "next row of \"%s\"", pTabItem->pTab->zName)); + }else{ + addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v); + } + if( pPartial ){ + iContinue = sqlite3VdbeMakeLabel(v); + sqlite3ExprIfFalse(pParse, pPartial, iContinue, SQLITE_JUMPIFNULL); + pLoop->wsFlags |= WHERE_PARTIALIDX; + } + regRecord = sqlite3GetTempReg(pParse); + regBase = sqlite3GenerateIndexKey( + pParse, pIdx, pLevel->iTabCur, regRecord, 0, 0, 0, 0 + ); + sqlite3VdbeAddOp2(v, OP_IdxInsert, pLevel->iIdxCur, regRecord); + sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); + if( pPartial ) sqlite3VdbeResolveLabel(v, iContinue); + if( pTabItem->fg.viaCoroutine ){ + sqlite3VdbeChangeP2(v, addrCounter, regBase+n); + translateColumnToCopy(v, addrTop, pLevel->iTabCur, pTabItem->regResult, 1); + sqlite3VdbeGoto(v, addrTop); + pTabItem->fg.viaCoroutine = 0; + }else{ + sqlite3VdbeAddOp2(v, OP_Next, pLevel->iTabCur, addrTop+1); VdbeCoverage(v); + } + sqlite3VdbeChangeP5(v, SQLITE_STMTSTATUS_AUTOINDEX); + sqlite3VdbeJumpHere(v, addrTop); + sqlite3ReleaseTempReg(pParse, regRecord); + sqlite3ExprCachePop(pParse); + + /* Jump here when skipping the initialization */ + sqlite3VdbeJumpHere(v, addrInit); + +end_auto_index_create: + sqlite3ExprDelete(pParse->db, pPartial); +} +#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */ + +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* +** Allocate and populate an sqlite3_index_info structure. It is the +** responsibility of the caller to eventually release the structure +** by passing the pointer returned by this function to sqlite3_free(). +*/ +static sqlite3_index_info *allocateIndexInfo( + Parse *pParse, + WhereClause *pWC, + Bitmask mUnusable, /* Ignore terms with these prereqs */ + struct SrcList_item *pSrc, + ExprList *pOrderBy, + u16 *pmNoOmit /* Mask of terms not to omit */ +){ + int i, j; + int nTerm; + struct sqlite3_index_constraint *pIdxCons; + struct sqlite3_index_orderby *pIdxOrderBy; + struct sqlite3_index_constraint_usage *pUsage; + WhereTerm *pTerm; + int nOrderBy; + sqlite3_index_info *pIdxInfo; + u16 mNoOmit = 0; + + /* Count the number of possible WHERE clause constraints referring + ** to this virtual table */ + for(i=nTerm=0, pTerm=pWC->a; inTerm; i++, pTerm++){ + if( pTerm->leftCursor != pSrc->iCursor ) continue; + if( pTerm->prereqRight & mUnusable ) continue; + assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); + testcase( pTerm->eOperator & WO_IN ); + testcase( pTerm->eOperator & WO_ISNULL ); + testcase( pTerm->eOperator & WO_IS ); + testcase( pTerm->eOperator & WO_ALL ); + if( (pTerm->eOperator & ~(WO_ISNULL|WO_EQUIV|WO_IS))==0 ) continue; + if( pTerm->wtFlags & TERM_VNULL ) continue; + assert( pTerm->u.leftColumn>=(-1) ); + nTerm++; + } + + /* If the ORDER BY clause contains only columns in the current + ** virtual table then allocate space for the aOrderBy part of + ** the sqlite3_index_info structure. + */ + nOrderBy = 0; + if( pOrderBy ){ + int n = pOrderBy->nExpr; + for(i=0; ia[i].pExpr; + if( pExpr->op!=TK_COLUMN || pExpr->iTable!=pSrc->iCursor ) break; + } + if( i==n){ + nOrderBy = n; + } + } + + /* Allocate the sqlite3_index_info structure + */ + pIdxInfo = sqlite3DbMallocZero(pParse->db, sizeof(*pIdxInfo) + + (sizeof(*pIdxCons) + sizeof(*pUsage))*nTerm + + sizeof(*pIdxOrderBy)*nOrderBy ); + if( pIdxInfo==0 ){ + sqlite3ErrorMsg(pParse, "out of memory"); + return 0; + } + + /* Initialize the structure. The sqlite3_index_info structure contains + ** many fields that are declared "const" to prevent xBestIndex from + ** changing them. We have to do some funky casting in order to + ** initialize those fields. + */ + pIdxCons = (struct sqlite3_index_constraint*)&pIdxInfo[1]; + pIdxOrderBy = (struct sqlite3_index_orderby*)&pIdxCons[nTerm]; + pUsage = (struct sqlite3_index_constraint_usage*)&pIdxOrderBy[nOrderBy]; + *(int*)&pIdxInfo->nConstraint = nTerm; + *(int*)&pIdxInfo->nOrderBy = nOrderBy; + *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint = pIdxCons; + *(struct sqlite3_index_orderby**)&pIdxInfo->aOrderBy = pIdxOrderBy; + *(struct sqlite3_index_constraint_usage**)&pIdxInfo->aConstraintUsage = + pUsage; + + for(i=j=0, pTerm=pWC->a; inTerm; i++, pTerm++){ + u8 op; + if( pTerm->leftCursor != pSrc->iCursor ) continue; + if( pTerm->prereqRight & mUnusable ) continue; + assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); + testcase( pTerm->eOperator & WO_IN ); + testcase( pTerm->eOperator & WO_IS ); + testcase( pTerm->eOperator & WO_ISNULL ); + testcase( pTerm->eOperator & WO_ALL ); + if( (pTerm->eOperator & ~(WO_ISNULL|WO_EQUIV|WO_IS))==0 ) continue; + if( pTerm->wtFlags & TERM_VNULL ) continue; + assert( pTerm->u.leftColumn>=(-1) ); + pIdxCons[j].iColumn = pTerm->u.leftColumn; + pIdxCons[j].iTermOffset = i; + op = (u8)pTerm->eOperator & WO_ALL; + if( op==WO_IN ) op = WO_EQ; + if( op==WO_MATCH ){ + op = pTerm->eMatchOp; + } + pIdxCons[j].op = op; + /* The direct assignment in the previous line is possible only because + ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The + ** following asserts verify this fact. */ + assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); + assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); + assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); + assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); + assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); + assert( WO_MATCH==SQLITE_INDEX_CONSTRAINT_MATCH ); + assert( pTerm->eOperator & (WO_IN|WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_MATCH) ); + + if( op & (WO_LT|WO_LE|WO_GT|WO_GE) + && sqlite3ExprIsVector(pTerm->pExpr->pRight) + ){ + if( i<16 ) mNoOmit |= (1 << i); + if( op==WO_LT ) pIdxCons[j].op = WO_LE; + if( op==WO_GT ) pIdxCons[j].op = WO_GE; + } + + j++; + } + for(i=0; ia[i].pExpr; + pIdxOrderBy[i].iColumn = pExpr->iColumn; + pIdxOrderBy[i].desc = pOrderBy->a[i].sortOrder; + } + + *pmNoOmit = mNoOmit; + return pIdxInfo; +} + +/* +** The table object reference passed as the second argument to this function +** must represent a virtual table. This function invokes the xBestIndex() +** method of the virtual table with the sqlite3_index_info object that +** comes in as the 3rd argument to this function. +** +** If an error occurs, pParse is populated with an error message and a +** non-zero value is returned. Otherwise, 0 is returned and the output +** part of the sqlite3_index_info structure is left populated. +** +** Whether or not an error is returned, it is the responsibility of the +** caller to eventually free p->idxStr if p->needToFreeIdxStr indicates +** that this is required. +*/ +static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ + sqlite3_vtab *pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab; + int rc; + + TRACE_IDX_INPUTS(p); + rc = pVtab->pModule->xBestIndex(pVtab, p); + TRACE_IDX_OUTPUTS(p); + + if( rc!=SQLITE_OK ){ + if( rc==SQLITE_NOMEM ){ + sqlite3OomFault(pParse->db); + }else if( !pVtab->zErrMsg ){ + sqlite3ErrorMsg(pParse, "%s", sqlite3ErrStr(rc)); + }else{ + sqlite3ErrorMsg(pParse, "%s", pVtab->zErrMsg); + } + } + sqlite3_free(pVtab->zErrMsg); + pVtab->zErrMsg = 0; + +#if 0 + /* This error is now caught by the caller. + ** Search for "xBestIndex malfunction" below */ + for(i=0; inConstraint; i++){ + if( !p->aConstraint[i].usable && p->aConstraintUsage[i].argvIndex>0 ){ + sqlite3ErrorMsg(pParse, + "table %s: xBestIndex returned an invalid plan", pTab->zName); + } + } +#endif + + return pParse->nErr; +} +#endif /* !defined(SQLITE_OMIT_VIRTUALTABLE) */ + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +/* +** Estimate the location of a particular key among all keys in an +** index. Store the results in aStat as follows: +** +** aStat[0] Est. number of rows less than pRec +** aStat[1] Est. number of rows equal to pRec +** +** Return the index of the sample that is the smallest sample that +** is greater than or equal to pRec. Note that this index is not an index +** into the aSample[] array - it is an index into a virtual set of samples +** based on the contents of aSample[] and the number of fields in record +** pRec. +*/ +static int whereKeyStats( + Parse *pParse, /* Database connection */ + Index *pIdx, /* Index to consider domain of */ + UnpackedRecord *pRec, /* Vector of values to consider */ + int roundUp, /* Round up if true. Round down if false */ + tRowcnt *aStat /* OUT: stats written here */ +){ + IndexSample *aSample = pIdx->aSample; + int iCol; /* Index of required stats in anEq[] etc. */ + int i; /* Index of first sample >= pRec */ + int iSample; /* Smallest sample larger than or equal to pRec */ + int iMin = 0; /* Smallest sample not yet tested */ + int iTest; /* Next sample to test */ + int res; /* Result of comparison operation */ + int nField; /* Number of fields in pRec */ + tRowcnt iLower = 0; /* anLt[] + anEq[] of largest sample pRec is > */ + +#ifndef SQLITE_DEBUG + UNUSED_PARAMETER( pParse ); +#endif + assert( pRec!=0 ); + assert( pIdx->nSample>0 ); + assert( pRec->nField>0 && pRec->nField<=pIdx->nSampleCol ); + + /* Do a binary search to find the first sample greater than or equal + ** to pRec. If pRec contains a single field, the set of samples to search + ** is simply the aSample[] array. If the samples in aSample[] contain more + ** than one fields, all fields following the first are ignored. + ** + ** If pRec contains N fields, where N is more than one, then as well as the + ** samples in aSample[] (truncated to N fields), the search also has to + ** consider prefixes of those samples. For example, if the set of samples + ** in aSample is: + ** + ** aSample[0] = (a, 5) + ** aSample[1] = (a, 10) + ** aSample[2] = (b, 5) + ** aSample[3] = (c, 100) + ** aSample[4] = (c, 105) + ** + ** Then the search space should ideally be the samples above and the + ** unique prefixes [a], [b] and [c]. But since that is hard to organize, + ** the code actually searches this set: + ** + ** 0: (a) + ** 1: (a, 5) + ** 2: (a, 10) + ** 3: (a, 10) + ** 4: (b) + ** 5: (b, 5) + ** 6: (c) + ** 7: (c, 100) + ** 8: (c, 105) + ** 9: (c, 105) + ** + ** For each sample in the aSample[] array, N samples are present in the + ** effective sample array. In the above, samples 0 and 1 are based on + ** sample aSample[0]. Samples 2 and 3 on aSample[1] etc. + ** + ** Often, sample i of each block of N effective samples has (i+1) fields. + ** Except, each sample may be extended to ensure that it is greater than or + ** equal to the previous sample in the array. For example, in the above, + ** sample 2 is the first sample of a block of N samples, so at first it + ** appears that it should be 1 field in size. However, that would make it + ** smaller than sample 1, so the binary search would not work. As a result, + ** it is extended to two fields. The duplicates that this creates do not + ** cause any problems. + */ + nField = pRec->nField; + iCol = 0; + iSample = pIdx->nSample * nField; + do{ + int iSamp; /* Index in aSample[] of test sample */ + int n; /* Number of fields in test sample */ + + iTest = (iMin+iSample)/2; + iSamp = iTest / nField; + if( iSamp>0 ){ + /* The proposed effective sample is a prefix of sample aSample[iSamp]. + ** Specifically, the shortest prefix of at least (1 + iTest%nField) + ** fields that is greater than the previous effective sample. */ + for(n=(iTest % nField) + 1; nnField = n; + res = sqlite3VdbeRecordCompare(aSample[iSamp].n, aSample[iSamp].p, pRec); + if( res<0 ){ + iLower = aSample[iSamp].anLt[n-1] + aSample[iSamp].anEq[n-1]; + iMin = iTest+1; + }else if( res==0 && ndb->mallocFailed==0 ){ + if( res==0 ){ + /* If (res==0) is true, then pRec must be equal to sample i. */ + assert( inSample ); + assert( iCol==nField-1 ); + pRec->nField = nField; + assert( 0==sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec) + || pParse->db->mallocFailed + ); + }else{ + /* Unless i==pIdx->nSample, indicating that pRec is larger than + ** all samples in the aSample[] array, pRec must be smaller than the + ** (iCol+1) field prefix of sample i. */ + assert( i<=pIdx->nSample && i>=0 ); + pRec->nField = iCol+1; + assert( i==pIdx->nSample + || sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)>0 + || pParse->db->mallocFailed ); + + /* if i==0 and iCol==0, then record pRec is smaller than all samples + ** in the aSample[] array. Otherwise, if (iCol>0) then pRec must + ** be greater than or equal to the (iCol) field prefix of sample i. + ** If (i>0), then pRec must also be greater than sample (i-1). */ + if( iCol>0 ){ + pRec->nField = iCol; + assert( sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)<=0 + || pParse->db->mallocFailed ); + } + if( i>0 ){ + pRec->nField = nField; + assert( sqlite3VdbeRecordCompare(aSample[i-1].n, aSample[i-1].p, pRec)<0 + || pParse->db->mallocFailed ); + } + } + } +#endif /* ifdef SQLITE_DEBUG */ + + if( res==0 ){ + /* Record pRec is equal to sample i */ + assert( iCol==nField-1 ); + aStat[0] = aSample[i].anLt[iCol]; + aStat[1] = aSample[i].anEq[iCol]; + }else{ + /* At this point, the (iCol+1) field prefix of aSample[i] is the first + ** sample that is greater than pRec. Or, if i==pIdx->nSample then pRec + ** is larger than all samples in the array. */ + tRowcnt iUpper, iGap; + if( i>=pIdx->nSample ){ + iUpper = sqlite3LogEstToInt(pIdx->aiRowLogEst[0]); + }else{ + iUpper = aSample[i].anLt[iCol]; + } + + if( iLower>=iUpper ){ + iGap = 0; + }else{ + iGap = iUpper - iLower; + } + if( roundUp ){ + iGap = (iGap*2)/3; + }else{ + iGap = iGap/3; + } + aStat[0] = iLower + iGap; + aStat[1] = pIdx->aAvgEq[iCol]; + } + + /* Restore the pRec->nField value before returning. */ + pRec->nField = nField; + return i; +} +#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ + +/* +** If it is not NULL, pTerm is a term that provides an upper or lower +** bound on a range scan. Without considering pTerm, it is estimated +** that the scan will visit nNew rows. This function returns the number +** estimated to be visited after taking pTerm into account. +** +** If the user explicitly specified a likelihood() value for this term, +** then the return value is the likelihood multiplied by the number of +** input rows. Otherwise, this function assumes that an "IS NOT NULL" term +** has a likelihood of 0.50, and any other term a likelihood of 0.25. +*/ +static LogEst whereRangeAdjust(WhereTerm *pTerm, LogEst nNew){ + LogEst nRet = nNew; + if( pTerm ){ + if( pTerm->truthProb<=0 ){ + nRet += pTerm->truthProb; + }else if( (pTerm->wtFlags & TERM_VNULL)==0 ){ + nRet -= 20; assert( 20==sqlite3LogEst(4) ); + } + } + return nRet; +} + + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +/* +** Return the affinity for a single column of an index. +*/ +SQLITE_PRIVATE char sqlite3IndexColumnAffinity(sqlite3 *db, Index *pIdx, int iCol){ + assert( iCol>=0 && iColnColumn ); + if( !pIdx->zColAff ){ + if( sqlite3IndexAffinityStr(db, pIdx)==0 ) return SQLITE_AFF_BLOB; + } + return pIdx->zColAff[iCol]; +} +#endif + + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +/* +** This function is called to estimate the number of rows visited by a +** range-scan on a skip-scan index. For example: +** +** CREATE INDEX i1 ON t1(a, b, c); +** SELECT * FROM t1 WHERE a=? AND c BETWEEN ? AND ?; +** +** Value pLoop->nOut is currently set to the estimated number of rows +** visited for scanning (a=? AND b=?). This function reduces that estimate +** by some factor to account for the (c BETWEEN ? AND ?) expression based +** on the stat4 data for the index. this scan will be peformed multiple +** times (once for each (a,b) combination that matches a=?) is dealt with +** by the caller. +** +** It does this by scanning through all stat4 samples, comparing values +** extracted from pLower and pUpper with the corresponding column in each +** sample. If L and U are the number of samples found to be less than or +** equal to the values extracted from pLower and pUpper respectively, and +** N is the total number of samples, the pLoop->nOut value is adjusted +** as follows: +** +** nOut = nOut * ( min(U - L, 1) / N ) +** +** If pLower is NULL, or a value cannot be extracted from the term, L is +** set to zero. If pUpper is NULL, or a value cannot be extracted from it, +** U is set to N. +** +** Normally, this function sets *pbDone to 1 before returning. However, +** if no value can be extracted from either pLower or pUpper (and so the +** estimate of the number of rows delivered remains unchanged), *pbDone +** is left as is. +** +** If an error occurs, an SQLite error code is returned. Otherwise, +** SQLITE_OK. +*/ +static int whereRangeSkipScanEst( + Parse *pParse, /* Parsing & code generating context */ + WhereTerm *pLower, /* Lower bound on the range. ex: "x>123" Might be NULL */ + WhereTerm *pUpper, /* Upper bound on the range. ex: "x<455" Might be NULL */ + WhereLoop *pLoop, /* Update the .nOut value of this loop */ + int *pbDone /* Set to true if at least one expr. value extracted */ +){ + Index *p = pLoop->u.btree.pIndex; + int nEq = pLoop->u.btree.nEq; + sqlite3 *db = pParse->db; + int nLower = -1; + int nUpper = p->nSample+1; + int rc = SQLITE_OK; + u8 aff = sqlite3IndexColumnAffinity(db, p, nEq); + CollSeq *pColl; + + sqlite3_value *p1 = 0; /* Value extracted from pLower */ + sqlite3_value *p2 = 0; /* Value extracted from pUpper */ + sqlite3_value *pVal = 0; /* Value extracted from record */ + + pColl = sqlite3LocateCollSeq(pParse, p->azColl[nEq]); + if( pLower ){ + rc = sqlite3Stat4ValueFromExpr(pParse, pLower->pExpr->pRight, aff, &p1); + nLower = 0; + } + if( pUpper && rc==SQLITE_OK ){ + rc = sqlite3Stat4ValueFromExpr(pParse, pUpper->pExpr->pRight, aff, &p2); + nUpper = p2 ? 0 : p->nSample; + } + + if( p1 || p2 ){ + int i; + int nDiff; + for(i=0; rc==SQLITE_OK && inSample; i++){ + rc = sqlite3Stat4Column(db, p->aSample[i].p, p->aSample[i].n, nEq, &pVal); + if( rc==SQLITE_OK && p1 ){ + int res = sqlite3MemCompare(p1, pVal, pColl); + if( res>=0 ) nLower++; + } + if( rc==SQLITE_OK && p2 ){ + int res = sqlite3MemCompare(p2, pVal, pColl); + if( res>=0 ) nUpper++; + } + } + nDiff = (nUpper - nLower); + if( nDiff<=0 ) nDiff = 1; + + /* If there is both an upper and lower bound specified, and the + ** comparisons indicate that they are close together, use the fallback + ** method (assume that the scan visits 1/64 of the rows) for estimating + ** the number of rows visited. Otherwise, estimate the number of rows + ** using the method described in the header comment for this function. */ + if( nDiff!=1 || pUpper==0 || pLower==0 ){ + int nAdjust = (sqlite3LogEst(p->nSample) - sqlite3LogEst(nDiff)); + pLoop->nOut -= nAdjust; + *pbDone = 1; + WHERETRACE(0x10, ("range skip-scan regions: %u..%u adjust=%d est=%d\n", + nLower, nUpper, nAdjust*-1, pLoop->nOut)); + } + + }else{ + assert( *pbDone==0 ); + } + + sqlite3ValueFree(p1); + sqlite3ValueFree(p2); + sqlite3ValueFree(pVal); + + return rc; +} +#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ + +/* +** This function is used to estimate the number of rows that will be visited +** by scanning an index for a range of values. The range may have an upper +** bound, a lower bound, or both. The WHERE clause terms that set the upper +** and lower bounds are represented by pLower and pUpper respectively. For +** example, assuming that index p is on t1(a): +** +** ... FROM t1 WHERE a > ? AND a < ? ... +** |_____| |_____| +** | | +** pLower pUpper +** +** If either of the upper or lower bound is not present, then NULL is passed in +** place of the corresponding WhereTerm. +** +** The value in (pBuilder->pNew->u.btree.nEq) is the number of the index +** column subject to the range constraint. Or, equivalently, the number of +** equality constraints optimized by the proposed index scan. For example, +** assuming index p is on t1(a, b), and the SQL query is: +** +** ... FROM t1 WHERE a = ? AND b > ? AND b < ? ... +** +** then nEq is set to 1 (as the range restricted column, b, is the second +** left-most column of the index). Or, if the query is: +** +** ... FROM t1 WHERE a > ? AND a < ? ... +** +** then nEq is set to 0. +** +** When this function is called, *pnOut is set to the sqlite3LogEst() of the +** number of rows that the index scan is expected to visit without +** considering the range constraints. If nEq is 0, then *pnOut is the number of +** rows in the index. Assuming no error occurs, *pnOut is adjusted (reduced) +** to account for the range constraints pLower and pUpper. +** +** In the absence of sqlite_stat4 ANALYZE data, or if such data cannot be +** used, a single range inequality reduces the search space by a factor of 4. +** and a pair of constraints (x>? AND x123" Might be NULL */ + WhereTerm *pUpper, /* Upper bound on the range. ex: "x<455" Might be NULL */ + WhereLoop *pLoop /* Modify the .nOut and maybe .rRun fields */ +){ + int rc = SQLITE_OK; + int nOut = pLoop->nOut; + LogEst nNew; + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + Index *p = pLoop->u.btree.pIndex; + int nEq = pLoop->u.btree.nEq; + + if( p->nSample>0 && nEqnSampleCol ){ + if( nEq==pBuilder->nRecValid ){ + UnpackedRecord *pRec = pBuilder->pRec; + tRowcnt a[2]; + int nBtm = pLoop->u.btree.nBtm; + int nTop = pLoop->u.btree.nTop; + + /* Variable iLower will be set to the estimate of the number of rows in + ** the index that are less than the lower bound of the range query. The + ** lower bound being the concatenation of $P and $L, where $P is the + ** key-prefix formed by the nEq values matched against the nEq left-most + ** columns of the index, and $L is the value in pLower. + ** + ** Or, if pLower is NULL or $L cannot be extracted from it (because it + ** is not a simple variable or literal value), the lower bound of the + ** range is $P. Due to a quirk in the way whereKeyStats() works, even + ** if $L is available, whereKeyStats() is called for both ($P) and + ** ($P:$L) and the larger of the two returned values is used. + ** + ** Similarly, iUpper is to be set to the estimate of the number of rows + ** less than the upper bound of the range query. Where the upper bound + ** is either ($P) or ($P:$U). Again, even if $U is available, both values + ** of iUpper are requested of whereKeyStats() and the smaller used. + ** + ** The number of rows between the two bounds is then just iUpper-iLower. + */ + tRowcnt iLower; /* Rows less than the lower bound */ + tRowcnt iUpper; /* Rows less than the upper bound */ + int iLwrIdx = -2; /* aSample[] for the lower bound */ + int iUprIdx = -1; /* aSample[] for the upper bound */ + + if( pRec ){ + testcase( pRec->nField!=pBuilder->nRecValid ); + pRec->nField = pBuilder->nRecValid; + } + /* Determine iLower and iUpper using ($P) only. */ + if( nEq==0 ){ + iLower = 0; + iUpper = p->nRowEst0; + }else{ + /* Note: this call could be optimized away - since the same values must + ** have been requested when testing key $P in whereEqualScanEst(). */ + whereKeyStats(pParse, p, pRec, 0, a); + iLower = a[0]; + iUpper = a[0] + a[1]; + } + + assert( pLower==0 || (pLower->eOperator & (WO_GT|WO_GE))!=0 ); + assert( pUpper==0 || (pUpper->eOperator & (WO_LT|WO_LE))!=0 ); + assert( p->aSortOrder!=0 ); + if( p->aSortOrder[nEq] ){ + /* The roles of pLower and pUpper are swapped for a DESC index */ + SWAP(WhereTerm*, pLower, pUpper); + SWAP(int, nBtm, nTop); + } + + /* If possible, improve on the iLower estimate using ($P:$L). */ + if( pLower ){ + int n; /* Values extracted from pExpr */ + Expr *pExpr = pLower->pExpr->pRight; + rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, nBtm, nEq, &n); + if( rc==SQLITE_OK && n ){ + tRowcnt iNew; + u16 mask = WO_GT|WO_LE; + if( sqlite3ExprVectorSize(pExpr)>n ) mask = (WO_LE|WO_LT); + iLwrIdx = whereKeyStats(pParse, p, pRec, 0, a); + iNew = a[0] + ((pLower->eOperator & mask) ? a[1] : 0); + if( iNew>iLower ) iLower = iNew; + nOut--; + pLower = 0; + } + } + + /* If possible, improve on the iUpper estimate using ($P:$U). */ + if( pUpper ){ + int n; /* Values extracted from pExpr */ + Expr *pExpr = pUpper->pExpr->pRight; + rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, nTop, nEq, &n); + if( rc==SQLITE_OK && n ){ + tRowcnt iNew; + u16 mask = WO_GT|WO_LE; + if( sqlite3ExprVectorSize(pExpr)>n ) mask = (WO_LE|WO_LT); + iUprIdx = whereKeyStats(pParse, p, pRec, 1, a); + iNew = a[0] + ((pUpper->eOperator & mask) ? a[1] : 0); + if( iNewpRec = pRec; + if( rc==SQLITE_OK ){ + if( iUpper>iLower ){ + nNew = sqlite3LogEst(iUpper - iLower); + /* TUNING: If both iUpper and iLower are derived from the same + ** sample, then assume they are 4x more selective. This brings + ** the estimated selectivity more in line with what it would be + ** if estimated without the use of STAT3/4 tables. */ + if( iLwrIdx==iUprIdx ) nNew -= 20; assert( 20==sqlite3LogEst(4) ); + }else{ + nNew = 10; assert( 10==sqlite3LogEst(2) ); + } + if( nNewwtFlags & TERM_VNULL)==0 ); + nNew = whereRangeAdjust(pLower, nOut); + nNew = whereRangeAdjust(pUpper, nNew); + + /* TUNING: If there is both an upper and lower limit and neither limit + ** has an application-defined likelihood(), assume the range is + ** reduced by an additional 75%. This means that, by default, an open-ended + ** range query (e.g. col > ?) is assumed to match 1/4 of the rows in the + ** index. While a closed range (e.g. col BETWEEN ? AND ?) is estimated to + ** match 1/64 of the index. */ + if( pLower && pLower->truthProb>0 && pUpper && pUpper->truthProb>0 ){ + nNew -= 20; + } + + nOut -= (pLower!=0) + (pUpper!=0); + if( nNew<10 ) nNew = 10; + if( nNewnOut>nOut ){ + WHERETRACE(0x10,("Range scan lowers nOut from %d to %d\n", + pLoop->nOut, nOut)); + } +#endif + pLoop->nOut = (LogEst)nOut; + return rc; +} + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +/* +** Estimate the number of rows that will be returned based on +** an equality constraint x=VALUE and where that VALUE occurs in +** the histogram data. This only works when x is the left-most +** column of an index and sqlite_stat3 histogram data is available +** for that index. When pExpr==NULL that means the constraint is +** "x IS NULL" instead of "x=VALUE". +** +** Write the estimated row count into *pnRow and return SQLITE_OK. +** If unable to make an estimate, leave *pnRow unchanged and return +** non-zero. +** +** This routine can fail if it is unable to load a collating sequence +** required for string comparison, or if unable to allocate memory +** for a UTF conversion required for comparison. The error is stored +** in the pParse structure. +*/ +static int whereEqualScanEst( + Parse *pParse, /* Parsing & code generating context */ + WhereLoopBuilder *pBuilder, + Expr *pExpr, /* Expression for VALUE in the x=VALUE constraint */ + tRowcnt *pnRow /* Write the revised row estimate here */ +){ + Index *p = pBuilder->pNew->u.btree.pIndex; + int nEq = pBuilder->pNew->u.btree.nEq; + UnpackedRecord *pRec = pBuilder->pRec; + int rc; /* Subfunction return code */ + tRowcnt a[2]; /* Statistics */ + int bOk; + + assert( nEq>=1 ); + assert( nEq<=p->nColumn ); + assert( p->aSample!=0 ); + assert( p->nSample>0 ); + assert( pBuilder->nRecValidnRecValid<(nEq-1) ){ + return SQLITE_NOTFOUND; + } + + /* This is an optimization only. The call to sqlite3Stat4ProbeSetValue() + ** below would return the same value. */ + if( nEq>=p->nColumn ){ + *pnRow = 1; + return SQLITE_OK; + } + + rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, 1, nEq-1, &bOk); + pBuilder->pRec = pRec; + if( rc!=SQLITE_OK ) return rc; + if( bOk==0 ) return SQLITE_NOTFOUND; + pBuilder->nRecValid = nEq; + + whereKeyStats(pParse, p, pRec, 0, a); + WHERETRACE(0x10,("equality scan regions %s(%d): %d\n", + p->zName, nEq-1, (int)a[1])); + *pnRow = a[1]; + + return rc; +} +#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ + +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 +/* +** Estimate the number of rows that will be returned based on +** an IN constraint where the right-hand side of the IN operator +** is a list of values. Example: +** +** WHERE x IN (1,2,3,4) +** +** Write the estimated row count into *pnRow and return SQLITE_OK. +** If unable to make an estimate, leave *pnRow unchanged and return +** non-zero. +** +** This routine can fail if it is unable to load a collating sequence +** required for string comparison, or if unable to allocate memory +** for a UTF conversion required for comparison. The error is stored +** in the pParse structure. +*/ +static int whereInScanEst( + Parse *pParse, /* Parsing & code generating context */ + WhereLoopBuilder *pBuilder, + ExprList *pList, /* The value list on the RHS of "x IN (v1,v2,v3,...)" */ + tRowcnt *pnRow /* Write the revised row estimate here */ +){ + Index *p = pBuilder->pNew->u.btree.pIndex; + i64 nRow0 = sqlite3LogEstToInt(p->aiRowLogEst[0]); + int nRecValid = pBuilder->nRecValid; + int rc = SQLITE_OK; /* Subfunction return code */ + tRowcnt nEst; /* Number of rows for a single term */ + tRowcnt nRowEst = 0; /* New estimate of the number of rows */ + int i; /* Loop counter */ + + assert( p->aSample!=0 ); + for(i=0; rc==SQLITE_OK && inExpr; i++){ + nEst = nRow0; + rc = whereEqualScanEst(pParse, pBuilder, pList->a[i].pExpr, &nEst); + nRowEst += nEst; + pBuilder->nRecValid = nRecValid; + } + + if( rc==SQLITE_OK ){ + if( nRowEst > nRow0 ) nRowEst = nRow0; + *pnRow = nRowEst; + WHERETRACE(0x10,("IN row estimate: est=%d\n", nRowEst)); + } + assert( pBuilder->nRecValid==nRecValid ); + return rc; +} +#endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ + + +#ifdef WHERETRACE_ENABLED +/* +** Print the content of a WhereTerm object +*/ +static void whereTermPrint(WhereTerm *pTerm, int iTerm){ + if( pTerm==0 ){ + sqlite3DebugPrintf("TERM-%-3d NULL\n", iTerm); + }else{ + char zType[4]; + char zLeft[50]; + memcpy(zType, "...", 4); + if( pTerm->wtFlags & TERM_VIRTUAL ) zType[0] = 'V'; + if( pTerm->eOperator & WO_EQUIV ) zType[1] = 'E'; + if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) zType[2] = 'L'; + if( pTerm->eOperator & WO_SINGLE ){ + sqlite3_snprintf(sizeof(zLeft),zLeft,"left={%d:%d}", + pTerm->leftCursor, pTerm->u.leftColumn); + }else if( (pTerm->eOperator & WO_OR)!=0 && pTerm->u.pOrInfo!=0 ){ + sqlite3_snprintf(sizeof(zLeft),zLeft,"indexable=0x%lld", + pTerm->u.pOrInfo->indexable); + }else{ + sqlite3_snprintf(sizeof(zLeft),zLeft,"left=%d", pTerm->leftCursor); + } + sqlite3DebugPrintf( + "TERM-%-3d %p %s %-12s prob=%-3d op=0x%03x wtFlags=0x%04x", + iTerm, pTerm, zType, zLeft, pTerm->truthProb, + pTerm->eOperator, pTerm->wtFlags); + if( pTerm->iField ){ + sqlite3DebugPrintf(" iField=%d\n", pTerm->iField); + }else{ + sqlite3DebugPrintf("\n"); + } + sqlite3TreeViewExpr(0, pTerm->pExpr, 0); + } +} +#endif + +#ifdef WHERETRACE_ENABLED +/* +** Show the complete content of a WhereClause +*/ +SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){ + int i; + for(i=0; inTerm; i++){ + whereTermPrint(&pWC->a[i], i); + } +} +#endif + +#ifdef WHERETRACE_ENABLED +/* +** Print a WhereLoop object for debugging purposes +*/ +static void whereLoopPrint(WhereLoop *p, WhereClause *pWC){ + WhereInfo *pWInfo = pWC->pWInfo; + int nb = 1+(pWInfo->pTabList->nSrc+3)/4; + struct SrcList_item *pItem = pWInfo->pTabList->a + p->iTab; + Table *pTab = pItem->pTab; + Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; + sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, + p->iTab, nb, p->maskSelf, nb, p->prereq & mAll); + sqlite3DebugPrintf(" %12s", + pItem->zAlias ? pItem->zAlias : pTab->zName); + if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){ + const char *zName; + if( p->u.btree.pIndex && (zName = p->u.btree.pIndex->zName)!=0 ){ + if( strncmp(zName, "sqlite_autoindex_", 17)==0 ){ + int i = sqlite3Strlen30(zName) - 1; + while( zName[i]!='_' ) i--; + zName += i; + } + sqlite3DebugPrintf(".%-16s %2d", zName, p->u.btree.nEq); + }else{ + sqlite3DebugPrintf("%20s",""); + } + }else{ + char *z; + if( p->u.vtab.idxStr ){ + z = sqlite3_mprintf("(%d,\"%s\",%x)", + p->u.vtab.idxNum, p->u.vtab.idxStr, p->u.vtab.omitMask); + }else{ + z = sqlite3_mprintf("(%d,%x)", p->u.vtab.idxNum, p->u.vtab.omitMask); + } + sqlite3DebugPrintf(" %-19s", z); + sqlite3_free(z); + } + if( p->wsFlags & WHERE_SKIPSCAN ){ + sqlite3DebugPrintf(" f %05x %d-%d", p->wsFlags, p->nLTerm,p->nSkip); + }else{ + sqlite3DebugPrintf(" f %05x N %d", p->wsFlags, p->nLTerm); + } + sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut); + if( p->nLTerm && (sqlite3WhereTrace & 0x100)!=0 ){ + int i; + for(i=0; inLTerm; i++){ + whereTermPrint(p->aLTerm[i], i); + } + } +} +#endif + +/* +** Convert bulk memory into a valid WhereLoop that can be passed +** to whereLoopClear harmlessly. +*/ +static void whereLoopInit(WhereLoop *p){ + p->aLTerm = p->aLTermSpace; + p->nLTerm = 0; + p->nLSlot = ArraySize(p->aLTermSpace); + p->wsFlags = 0; +} + +/* +** Clear the WhereLoop.u union. Leave WhereLoop.pLTerm intact. +*/ +static void whereLoopClearUnion(sqlite3 *db, WhereLoop *p){ + if( p->wsFlags & (WHERE_VIRTUALTABLE|WHERE_AUTO_INDEX) ){ + if( (p->wsFlags & WHERE_VIRTUALTABLE)!=0 && p->u.vtab.needFree ){ + sqlite3_free(p->u.vtab.idxStr); + p->u.vtab.needFree = 0; + p->u.vtab.idxStr = 0; + }else if( (p->wsFlags & WHERE_AUTO_INDEX)!=0 && p->u.btree.pIndex!=0 ){ + sqlite3DbFree(db, p->u.btree.pIndex->zColAff); + sqlite3DbFree(db, p->u.btree.pIndex); + p->u.btree.pIndex = 0; + } + } +} + +/* +** Deallocate internal memory used by a WhereLoop object +*/ +static void whereLoopClear(sqlite3 *db, WhereLoop *p){ + if( p->aLTerm!=p->aLTermSpace ) sqlite3DbFree(db, p->aLTerm); + whereLoopClearUnion(db, p); + whereLoopInit(p); +} + +/* +** Increase the memory allocation for pLoop->aLTerm[] to be at least n. +*/ +static int whereLoopResize(sqlite3 *db, WhereLoop *p, int n){ + WhereTerm **paNew; + if( p->nLSlot>=n ) return SQLITE_OK; + n = (n+7)&~7; + paNew = sqlite3DbMallocRawNN(db, sizeof(p->aLTerm[0])*n); + if( paNew==0 ) return SQLITE_NOMEM_BKPT; + memcpy(paNew, p->aLTerm, sizeof(p->aLTerm[0])*p->nLSlot); + if( p->aLTerm!=p->aLTermSpace ) sqlite3DbFree(db, p->aLTerm); + p->aLTerm = paNew; + p->nLSlot = n; + return SQLITE_OK; +} + +/* +** Transfer content from the second pLoop into the first. +*/ +static int whereLoopXfer(sqlite3 *db, WhereLoop *pTo, WhereLoop *pFrom){ + whereLoopClearUnion(db, pTo); + if( whereLoopResize(db, pTo, pFrom->nLTerm) ){ + memset(&pTo->u, 0, sizeof(pTo->u)); + return SQLITE_NOMEM_BKPT; + } + memcpy(pTo, pFrom, WHERE_LOOP_XFER_SZ); + memcpy(pTo->aLTerm, pFrom->aLTerm, pTo->nLTerm*sizeof(pTo->aLTerm[0])); + if( pFrom->wsFlags & WHERE_VIRTUALTABLE ){ + pFrom->u.vtab.needFree = 0; + }else if( (pFrom->wsFlags & WHERE_AUTO_INDEX)!=0 ){ + pFrom->u.btree.pIndex = 0; + } + return SQLITE_OK; +} + +/* +** Delete a WhereLoop object +*/ +static void whereLoopDelete(sqlite3 *db, WhereLoop *p){ + whereLoopClear(db, p); + sqlite3DbFree(db, p); +} + +/* +** Free a WhereInfo structure +*/ +static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ + if( ALWAYS(pWInfo) ){ + int i; + for(i=0; inLevel; i++){ + WhereLevel *pLevel = &pWInfo->a[i]; + if( pLevel->pWLoop && (pLevel->pWLoop->wsFlags & WHERE_IN_ABLE) ){ + sqlite3DbFree(db, pLevel->u.in.aInLoop); + } + } + sqlite3WhereClauseClear(&pWInfo->sWC); + while( pWInfo->pLoops ){ + WhereLoop *p = pWInfo->pLoops; + pWInfo->pLoops = p->pNextLoop; + whereLoopDelete(db, p); + } + sqlite3DbFree(db, pWInfo); + } +} + +/* +** Return TRUE if all of the following are true: +** +** (1) X has the same or lower cost that Y +** (2) X is a proper subset of Y +** (3) X skips at least as many columns as Y +** +** By "proper subset" we mean that X uses fewer WHERE clause terms +** than Y and that every WHERE clause term used by X is also used +** by Y. +** +** If X is a proper subset of Y then Y is a better choice and ought +** to have a lower cost. This routine returns TRUE when that cost +** relationship is inverted and needs to be adjusted. The third rule +** was added because if X uses skip-scan less than Y it still might +** deserve a lower cost even if it is a proper subset of Y. +*/ +static int whereLoopCheaperProperSubset( + const WhereLoop *pX, /* First WhereLoop to compare */ + const WhereLoop *pY /* Compare against this WhereLoop */ +){ + int i, j; + if( pX->nLTerm-pX->nSkip >= pY->nLTerm-pY->nSkip ){ + return 0; /* X is not a subset of Y */ + } + if( pY->nSkip > pX->nSkip ) return 0; + if( pX->rRun >= pY->rRun ){ + if( pX->rRun > pY->rRun ) return 0; /* X costs more than Y */ + if( pX->nOut > pY->nOut ) return 0; /* X costs more than Y */ + } + for(i=pX->nLTerm-1; i>=0; i--){ + if( pX->aLTerm[i]==0 ) continue; + for(j=pY->nLTerm-1; j>=0; j--){ + if( pY->aLTerm[j]==pX->aLTerm[i] ) break; + } + if( j<0 ) return 0; /* X not a subset of Y since term X[i] not used by Y */ + } + return 1; /* All conditions meet */ +} + +/* +** Try to adjust the cost of WhereLoop pTemplate upwards or downwards so +** that: +** +** (1) pTemplate costs less than any other WhereLoops that are a proper +** subset of pTemplate +** +** (2) pTemplate costs more than any other WhereLoops for which pTemplate +** is a proper subset. +** +** To say "WhereLoop X is a proper subset of Y" means that X uses fewer +** WHERE clause terms than Y and that every WHERE clause term used by X is +** also used by Y. +*/ +static void whereLoopAdjustCost(const WhereLoop *p, WhereLoop *pTemplate){ + if( (pTemplate->wsFlags & WHERE_INDEXED)==0 ) return; + for(; p; p=p->pNextLoop){ + if( p->iTab!=pTemplate->iTab ) continue; + if( (p->wsFlags & WHERE_INDEXED)==0 ) continue; + if( whereLoopCheaperProperSubset(p, pTemplate) ){ + /* Adjust pTemplate cost downward so that it is cheaper than its + ** subset p. */ + WHERETRACE(0x80,("subset cost adjustment %d,%d to %d,%d\n", + pTemplate->rRun, pTemplate->nOut, p->rRun, p->nOut-1)); + pTemplate->rRun = p->rRun; + pTemplate->nOut = p->nOut - 1; + }else if( whereLoopCheaperProperSubset(pTemplate, p) ){ + /* Adjust pTemplate cost upward so that it is costlier than p since + ** pTemplate is a proper subset of p */ + WHERETRACE(0x80,("subset cost adjustment %d,%d to %d,%d\n", + pTemplate->rRun, pTemplate->nOut, p->rRun, p->nOut+1)); + pTemplate->rRun = p->rRun; + pTemplate->nOut = p->nOut + 1; + } + } +} + +/* +** Search the list of WhereLoops in *ppPrev looking for one that can be +** supplanted by pTemplate. +** +** Return NULL if the WhereLoop list contains an entry that can supplant +** pTemplate, in other words if pTemplate does not belong on the list. +** +** If pX is a WhereLoop that pTemplate can supplant, then return the +** link that points to pX. +** +** If pTemplate cannot supplant any existing element of the list but needs +** to be added to the list, then return a pointer to the tail of the list. +*/ +static WhereLoop **whereLoopFindLesser( + WhereLoop **ppPrev, + const WhereLoop *pTemplate +){ + WhereLoop *p; + for(p=(*ppPrev); p; ppPrev=&p->pNextLoop, p=*ppPrev){ + if( p->iTab!=pTemplate->iTab || p->iSortIdx!=pTemplate->iSortIdx ){ + /* If either the iTab or iSortIdx values for two WhereLoop are different + ** then those WhereLoops need to be considered separately. Neither is + ** a candidate to replace the other. */ + continue; + } + /* In the current implementation, the rSetup value is either zero + ** or the cost of building an automatic index (NlogN) and the NlogN + ** is the same for compatible WhereLoops. */ + assert( p->rSetup==0 || pTemplate->rSetup==0 + || p->rSetup==pTemplate->rSetup ); + + /* whereLoopAddBtree() always generates and inserts the automatic index + ** case first. Hence compatible candidate WhereLoops never have a larger + ** rSetup. Call this SETUP-INVARIANT */ + assert( p->rSetup>=pTemplate->rSetup ); + + /* Any loop using an appliation-defined index (or PRIMARY KEY or + ** UNIQUE constraint) with one or more == constraints is better + ** than an automatic index. Unless it is a skip-scan. */ + if( (p->wsFlags & WHERE_AUTO_INDEX)!=0 + && (pTemplate->nSkip)==0 + && (pTemplate->wsFlags & WHERE_INDEXED)!=0 + && (pTemplate->wsFlags & WHERE_COLUMN_EQ)!=0 + && (p->prereq & pTemplate->prereq)==pTemplate->prereq + ){ + break; + } + + /* If existing WhereLoop p is better than pTemplate, pTemplate can be + ** discarded. WhereLoop p is better if: + ** (1) p has no more dependencies than pTemplate, and + ** (2) p has an equal or lower cost than pTemplate + */ + if( (p->prereq & pTemplate->prereq)==p->prereq /* (1) */ + && p->rSetup<=pTemplate->rSetup /* (2a) */ + && p->rRun<=pTemplate->rRun /* (2b) */ + && p->nOut<=pTemplate->nOut /* (2c) */ + ){ + return 0; /* Discard pTemplate */ + } + + /* If pTemplate is always better than p, then cause p to be overwritten + ** with pTemplate. pTemplate is better than p if: + ** (1) pTemplate has no more dependences than p, and + ** (2) pTemplate has an equal or lower cost than p. + */ + if( (p->prereq & pTemplate->prereq)==pTemplate->prereq /* (1) */ + && p->rRun>=pTemplate->rRun /* (2a) */ + && p->nOut>=pTemplate->nOut /* (2b) */ + ){ + assert( p->rSetup>=pTemplate->rSetup ); /* SETUP-INVARIANT above */ + break; /* Cause p to be overwritten by pTemplate */ + } + } + return ppPrev; +} + +/* +** Insert or replace a WhereLoop entry using the template supplied. +** +** An existing WhereLoop entry might be overwritten if the new template +** is better and has fewer dependencies. Or the template will be ignored +** and no insert will occur if an existing WhereLoop is faster and has +** fewer dependencies than the template. Otherwise a new WhereLoop is +** added based on the template. +** +** If pBuilder->pOrSet is not NULL then we care about only the +** prerequisites and rRun and nOut costs of the N best loops. That +** information is gathered in the pBuilder->pOrSet object. This special +** processing mode is used only for OR clause processing. +** +** When accumulating multiple loops (when pBuilder->pOrSet is NULL) we +** still might overwrite similar loops with the new template if the +** new template is better. Loops may be overwritten if the following +** conditions are met: +** +** (1) They have the same iTab. +** (2) They have the same iSortIdx. +** (3) The template has same or fewer dependencies than the current loop +** (4) The template has the same or lower cost than the current loop +*/ +static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){ + WhereLoop **ppPrev, *p; + WhereInfo *pWInfo = pBuilder->pWInfo; + sqlite3 *db = pWInfo->pParse->db; + int rc; + + /* If pBuilder->pOrSet is defined, then only keep track of the costs + ** and prereqs. + */ + if( pBuilder->pOrSet!=0 ){ + if( pTemplate->nLTerm ){ +#if WHERETRACE_ENABLED + u16 n = pBuilder->pOrSet->n; + int x = +#endif + whereOrInsert(pBuilder->pOrSet, pTemplate->prereq, pTemplate->rRun, + pTemplate->nOut); +#if WHERETRACE_ENABLED /* 0x8 */ + if( sqlite3WhereTrace & 0x8 ){ + sqlite3DebugPrintf(x?" or-%d: ":" or-X: ", n); + whereLoopPrint(pTemplate, pBuilder->pWC); + } +#endif + } + return SQLITE_OK; + } + + /* Look for an existing WhereLoop to replace with pTemplate + */ + whereLoopAdjustCost(pWInfo->pLoops, pTemplate); + ppPrev = whereLoopFindLesser(&pWInfo->pLoops, pTemplate); + + if( ppPrev==0 ){ + /* There already exists a WhereLoop on the list that is better + ** than pTemplate, so just ignore pTemplate */ +#if WHERETRACE_ENABLED /* 0x8 */ + if( sqlite3WhereTrace & 0x8 ){ + sqlite3DebugPrintf(" skip: "); + whereLoopPrint(pTemplate, pBuilder->pWC); + } +#endif + return SQLITE_OK; + }else{ + p = *ppPrev; + } + + /* If we reach this point it means that either p[] should be overwritten + ** with pTemplate[] if p[] exists, or if p==NULL then allocate a new + ** WhereLoop and insert it. + */ +#if WHERETRACE_ENABLED /* 0x8 */ + if( sqlite3WhereTrace & 0x8 ){ + if( p!=0 ){ + sqlite3DebugPrintf("replace: "); + whereLoopPrint(p, pBuilder->pWC); + } + sqlite3DebugPrintf(" add: "); + whereLoopPrint(pTemplate, pBuilder->pWC); + } +#endif + if( p==0 ){ + /* Allocate a new WhereLoop to add to the end of the list */ + *ppPrev = p = sqlite3DbMallocRawNN(db, sizeof(WhereLoop)); + if( p==0 ) return SQLITE_NOMEM_BKPT; + whereLoopInit(p); + p->pNextLoop = 0; + }else{ + /* We will be overwriting WhereLoop p[]. But before we do, first + ** go through the rest of the list and delete any other entries besides + ** p[] that are also supplated by pTemplate */ + WhereLoop **ppTail = &p->pNextLoop; + WhereLoop *pToDel; + while( *ppTail ){ + ppTail = whereLoopFindLesser(ppTail, pTemplate); + if( ppTail==0 ) break; + pToDel = *ppTail; + if( pToDel==0 ) break; + *ppTail = pToDel->pNextLoop; +#if WHERETRACE_ENABLED /* 0x8 */ + if( sqlite3WhereTrace & 0x8 ){ + sqlite3DebugPrintf(" delete: "); + whereLoopPrint(pToDel, pBuilder->pWC); + } +#endif + whereLoopDelete(db, pToDel); + } + } + rc = whereLoopXfer(db, p, pTemplate); + if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){ + Index *pIndex = p->u.btree.pIndex; + if( pIndex && pIndex->tnum==0 ){ + p->u.btree.pIndex = 0; + } + } + return rc; +} + +/* +** Adjust the WhereLoop.nOut value downward to account for terms of the +** WHERE clause that reference the loop but which are not used by an +** index. +* +** For every WHERE clause term that is not used by the index +** and which has a truth probability assigned by one of the likelihood(), +** likely(), or unlikely() SQL functions, reduce the estimated number +** of output rows by the probability specified. +** +** TUNING: For every WHERE clause term that is not used by the index +** and which does not have an assigned truth probability, heuristics +** described below are used to try to estimate the truth probability. +** TODO --> Perhaps this is something that could be improved by better +** table statistics. +** +** Heuristic 1: Estimate the truth probability as 93.75%. The 93.75% +** value corresponds to -1 in LogEst notation, so this means decrement +** the WhereLoop.nOut field for every such WHERE clause term. +** +** Heuristic 2: If there exists one or more WHERE clause terms of the +** form "x==EXPR" and EXPR is not a constant 0 or 1, then make sure the +** final output row estimate is no greater than 1/4 of the total number +** of rows in the table. In other words, assume that x==EXPR will filter +** out at least 3 out of 4 rows. If EXPR is -1 or 0 or 1, then maybe the +** "x" column is boolean or else -1 or 0 or 1 is a common default value +** on the "x" column and so in that case only cap the output row estimate +** at 1/2 instead of 1/4. +*/ +static void whereLoopOutputAdjust( + WhereClause *pWC, /* The WHERE clause */ + WhereLoop *pLoop, /* The loop to adjust downward */ + LogEst nRow /* Number of rows in the entire table */ +){ + WhereTerm *pTerm, *pX; + Bitmask notAllowed = ~(pLoop->prereq|pLoop->maskSelf); + int i, j, k; + LogEst iReduce = 0; /* pLoop->nOut should not exceed nRow-iReduce */ + + assert( (pLoop->wsFlags & WHERE_AUTO_INDEX)==0 ); + for(i=pWC->nTerm, pTerm=pWC->a; i>0; i--, pTerm++){ + if( (pTerm->wtFlags & TERM_VIRTUAL)!=0 ) break; + if( (pTerm->prereqAll & pLoop->maskSelf)==0 ) continue; + if( (pTerm->prereqAll & notAllowed)!=0 ) continue; + for(j=pLoop->nLTerm-1; j>=0; j--){ + pX = pLoop->aLTerm[j]; + if( pX==0 ) continue; + if( pX==pTerm ) break; + if( pX->iParent>=0 && (&pWC->a[pX->iParent])==pTerm ) break; + } + if( j<0 ){ + if( pTerm->truthProb<=0 ){ + /* If a truth probability is specified using the likelihood() hints, + ** then use the probability provided by the application. */ + pLoop->nOut += pTerm->truthProb; + }else{ + /* In the absence of explicit truth probabilities, use heuristics to + ** guess a reasonable truth probability. */ + pLoop->nOut--; + if( pTerm->eOperator&(WO_EQ|WO_IS) ){ + Expr *pRight = pTerm->pExpr->pRight; + testcase( pTerm->pExpr->op==TK_IS ); + if( sqlite3ExprIsInteger(pRight, &k) && k>=(-1) && k<=1 ){ + k = 10; + }else{ + k = 20; + } + if( iReducenOut > nRow-iReduce ) pLoop->nOut = nRow - iReduce; +} + +/* +** Term pTerm is a vector range comparison operation. The first comparison +** in the vector can be optimized using column nEq of the index. This +** function returns the total number of vector elements that can be used +** as part of the range comparison. +** +** For example, if the query is: +** +** WHERE a = ? AND (b, c, d) > (?, ?, ?) +** +** and the index: +** +** CREATE INDEX ... ON (a, b, c, d, e) +** +** then this function would be invoked with nEq=1. The value returned in +** this case is 3. +*/ +static int whereRangeVectorLen( + Parse *pParse, /* Parsing context */ + int iCur, /* Cursor open on pIdx */ + Index *pIdx, /* The index to be used for a inequality constraint */ + int nEq, /* Number of prior equality constraints on same index */ + WhereTerm *pTerm /* The vector inequality constraint */ +){ + int nCmp = sqlite3ExprVectorSize(pTerm->pExpr->pLeft); + int i; + + nCmp = MIN(nCmp, (pIdx->nColumn - nEq)); + for(i=1; ipExpr->pLeft->x.pList->a[i].pExpr; + Expr *pRhs = pTerm->pExpr->pRight; + if( pRhs->flags & EP_xIsSelect ){ + pRhs = pRhs->x.pSelect->pEList->a[i].pExpr; + }else{ + pRhs = pRhs->x.pList->a[i].pExpr; + } + + /* Check that the LHS of the comparison is a column reference to + ** the right column of the right source table. And that the sort + ** order of the index column is the same as the sort order of the + ** leftmost index column. */ + if( pLhs->op!=TK_COLUMN + || pLhs->iTable!=iCur + || pLhs->iColumn!=pIdx->aiColumn[i+nEq] + || pIdx->aSortOrder[i+nEq]!=pIdx->aSortOrder[nEq] + ){ + break; + } + + testcase( pLhs->iColumn==XN_ROWID ); + aff = sqlite3CompareAffinity(pRhs, sqlite3ExprAffinity(pLhs)); + idxaff = sqlite3TableColumnAffinity(pIdx->pTable, pLhs->iColumn); + if( aff!=idxaff ) break; + + pColl = sqlite3BinaryCompareCollSeq(pParse, pLhs, pRhs); + if( pColl==0 ) break; + if( sqlite3StrICmp(pColl->zName, pIdx->azColl[i+nEq]) ) break; + } + return i; +} + +/* +** Adjust the cost C by the costMult facter T. This only occurs if +** compiled with -DSQLITE_ENABLE_COSTMULT +*/ +#ifdef SQLITE_ENABLE_COSTMULT +# define ApplyCostMultiplier(C,T) C += T +#else +# define ApplyCostMultiplier(C,T) +#endif + +/* +** We have so far matched pBuilder->pNew->u.btree.nEq terms of the +** index pIndex. Try to match one more. +** +** When this function is called, pBuilder->pNew->nOut contains the +** number of rows expected to be visited by filtering using the nEq +** terms only. If it is modified, this value is restored before this +** function returns. +** +** If pProbe->tnum==0, that means pIndex is a fake index used for the +** INTEGER PRIMARY KEY. +*/ +static int whereLoopAddBtreeIndex( + WhereLoopBuilder *pBuilder, /* The WhereLoop factory */ + struct SrcList_item *pSrc, /* FROM clause term being analyzed */ + Index *pProbe, /* An index on pSrc */ + LogEst nInMul /* log(Number of iterations due to IN) */ +){ + WhereInfo *pWInfo = pBuilder->pWInfo; /* WHERE analyse context */ + Parse *pParse = pWInfo->pParse; /* Parsing context */ + sqlite3 *db = pParse->db; /* Database connection malloc context */ + WhereLoop *pNew; /* Template WhereLoop under construction */ + WhereTerm *pTerm; /* A WhereTerm under consideration */ + int opMask; /* Valid operators for constraints */ + WhereScan scan; /* Iterator for WHERE terms */ + Bitmask saved_prereq; /* Original value of pNew->prereq */ + u16 saved_nLTerm; /* Original value of pNew->nLTerm */ + u16 saved_nEq; /* Original value of pNew->u.btree.nEq */ + u16 saved_nBtm; /* Original value of pNew->u.btree.nBtm */ + u16 saved_nTop; /* Original value of pNew->u.btree.nTop */ + u16 saved_nSkip; /* Original value of pNew->nSkip */ + u32 saved_wsFlags; /* Original value of pNew->wsFlags */ + LogEst saved_nOut; /* Original value of pNew->nOut */ + int rc = SQLITE_OK; /* Return code */ + LogEst rSize; /* Number of rows in the table */ + LogEst rLogSize; /* Logarithm of table size */ + WhereTerm *pTop = 0, *pBtm = 0; /* Top and bottom range constraints */ + + pNew = pBuilder->pNew; + if( db->mallocFailed ) return SQLITE_NOMEM_BKPT; + WHERETRACE(0x800, ("BEGIN addBtreeIdx(%s), nEq=%d\n", + pProbe->zName, pNew->u.btree.nEq)); + + assert( (pNew->wsFlags & WHERE_VIRTUALTABLE)==0 ); + assert( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 ); + if( pNew->wsFlags & WHERE_BTM_LIMIT ){ + opMask = WO_LT|WO_LE; + }else{ + assert( pNew->u.btree.nBtm==0 ); + opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS; + } + if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); + + assert( pNew->u.btree.nEqnColumn ); + + saved_nEq = pNew->u.btree.nEq; + saved_nBtm = pNew->u.btree.nBtm; + saved_nTop = pNew->u.btree.nTop; + saved_nSkip = pNew->nSkip; + saved_nLTerm = pNew->nLTerm; + saved_wsFlags = pNew->wsFlags; + saved_prereq = pNew->prereq; + saved_nOut = pNew->nOut; + pTerm = whereScanInit(&scan, pBuilder->pWC, pSrc->iCursor, saved_nEq, + opMask, pProbe); + pNew->rSetup = 0; + rSize = pProbe->aiRowLogEst[0]; + rLogSize = estLog(rSize); + for(; rc==SQLITE_OK && pTerm!=0; pTerm = whereScanNext(&scan)){ + u16 eOp = pTerm->eOperator; /* Shorthand for pTerm->eOperator */ + LogEst rCostIdx; + LogEst nOutUnadjusted; /* nOut before IN() and WHERE adjustments */ + int nIn = 0; +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + int nRecValid = pBuilder->nRecValid; +#endif + if( (eOp==WO_ISNULL || (pTerm->wtFlags&TERM_VNULL)!=0) + && indexColumnNotNull(pProbe, saved_nEq) + ){ + continue; /* ignore IS [NOT] NULL constraints on NOT NULL columns */ + } + if( pTerm->prereqRight & pNew->maskSelf ) continue; + + /* Do not allow the upper bound of a LIKE optimization range constraint + ** to mix with a lower range bound from some other source */ + if( pTerm->wtFlags & TERM_LIKEOPT && pTerm->eOperator==WO_LT ) continue; + + /* Do not allow IS constraints from the WHERE clause to be used by the + ** right table of a LEFT JOIN. Only constraints in the ON clause are + ** allowed */ + if( (pSrc->fg.jointype & JT_LEFT)!=0 + && !ExprHasProperty(pTerm->pExpr, EP_FromJoin) + && (eOp & (WO_IS|WO_ISNULL))!=0 + ){ + testcase( eOp & WO_IS ); + testcase( eOp & WO_ISNULL ); + continue; + } + + pNew->wsFlags = saved_wsFlags; + pNew->u.btree.nEq = saved_nEq; + pNew->u.btree.nBtm = saved_nBtm; + pNew->u.btree.nTop = saved_nTop; + pNew->nLTerm = saved_nLTerm; + if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */ + pNew->aLTerm[pNew->nLTerm++] = pTerm; + pNew->prereq = (saved_prereq | pTerm->prereqRight) & ~pNew->maskSelf; + + assert( nInMul==0 + || (pNew->wsFlags & WHERE_COLUMN_NULL)!=0 + || (pNew->wsFlags & WHERE_COLUMN_IN)!=0 + || (pNew->wsFlags & WHERE_SKIPSCAN)!=0 + ); + + if( eOp & WO_IN ){ + Expr *pExpr = pTerm->pExpr; + pNew->wsFlags |= WHERE_COLUMN_IN; + if( ExprHasProperty(pExpr, EP_xIsSelect) ){ + /* "x IN (SELECT ...)": TUNING: the SELECT returns 25 rows */ + int i; + nIn = 46; assert( 46==sqlite3LogEst(25) ); + + /* The expression may actually be of the form (x, y) IN (SELECT...). + ** In this case there is a separate term for each of (x) and (y). + ** However, the nIn multiplier should only be applied once, not once + ** for each such term. The following loop checks that pTerm is the + ** first such term in use, and sets nIn back to 0 if it is not. */ + for(i=0; inLTerm-1; i++){ + if( pNew->aLTerm[i] && pNew->aLTerm[i]->pExpr==pExpr ) nIn = 0; + } + }else if( ALWAYS(pExpr->x.pList && pExpr->x.pList->nExpr) ){ + /* "x IN (value, value, ...)" */ + nIn = sqlite3LogEst(pExpr->x.pList->nExpr); + assert( nIn>0 ); /* RHS always has 2 or more terms... The parser + ** changes "x IN (?)" into "x=?". */ + } + }else if( eOp & (WO_EQ|WO_IS) ){ + int iCol = pProbe->aiColumn[saved_nEq]; + pNew->wsFlags |= WHERE_COLUMN_EQ; + assert( saved_nEq==pNew->u.btree.nEq ); + if( iCol==XN_ROWID + || (iCol>0 && nInMul==0 && saved_nEq==pProbe->nKeyCol-1) + ){ + if( iCol>=0 && pProbe->uniqNotNull==0 ){ + pNew->wsFlags |= WHERE_UNQ_WANTED; + }else{ + pNew->wsFlags |= WHERE_ONEROW; + } + } + }else if( eOp & WO_ISNULL ){ + pNew->wsFlags |= WHERE_COLUMN_NULL; + }else if( eOp & (WO_GT|WO_GE) ){ + testcase( eOp & WO_GT ); + testcase( eOp & WO_GE ); + pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_BTM_LIMIT; + pNew->u.btree.nBtm = whereRangeVectorLen( + pParse, pSrc->iCursor, pProbe, saved_nEq, pTerm + ); + pBtm = pTerm; + pTop = 0; + if( pTerm->wtFlags & TERM_LIKEOPT ){ + /* Range contraints that come from the LIKE optimization are + ** always used in pairs. */ + pTop = &pTerm[1]; + assert( (pTop-(pTerm->pWC->a))pWC->nTerm ); + assert( pTop->wtFlags & TERM_LIKEOPT ); + assert( pTop->eOperator==WO_LT ); + if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */ + pNew->aLTerm[pNew->nLTerm++] = pTop; + pNew->wsFlags |= WHERE_TOP_LIMIT; + pNew->u.btree.nTop = 1; + } + }else{ + assert( eOp & (WO_LT|WO_LE) ); + testcase( eOp & WO_LT ); + testcase( eOp & WO_LE ); + pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_TOP_LIMIT; + pNew->u.btree.nTop = whereRangeVectorLen( + pParse, pSrc->iCursor, pProbe, saved_nEq, pTerm + ); + pTop = pTerm; + pBtm = (pNew->wsFlags & WHERE_BTM_LIMIT)!=0 ? + pNew->aLTerm[pNew->nLTerm-2] : 0; + } + + /* At this point pNew->nOut is set to the number of rows expected to + ** be visited by the index scan before considering term pTerm, or the + ** values of nIn and nInMul. In other words, assuming that all + ** "x IN(...)" terms are replaced with "x = ?". This block updates + ** the value of pNew->nOut to account for pTerm (but not nIn/nInMul). */ + assert( pNew->nOut==saved_nOut ); + if( pNew->wsFlags & WHERE_COLUMN_RANGE ){ + /* Adjust nOut using stat3/stat4 data. Or, if there is no stat3/stat4 + ** data, using some other estimate. */ + whereRangeScanEst(pParse, pBuilder, pBtm, pTop, pNew); + }else{ + int nEq = ++pNew->u.btree.nEq; + assert( eOp & (WO_ISNULL|WO_EQ|WO_IN|WO_IS) ); + + assert( pNew->nOut==saved_nOut ); + if( pTerm->truthProb<=0 && pProbe->aiColumn[saved_nEq]>=0 ){ + assert( (eOp & WO_IN) || nIn==0 ); + testcase( eOp & WO_IN ); + pNew->nOut += pTerm->truthProb; + pNew->nOut -= nIn; + }else{ +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + tRowcnt nOut = 0; + if( nInMul==0 + && pProbe->nSample + && pNew->u.btree.nEq<=pProbe->nSampleCol + && ((eOp & WO_IN)==0 || !ExprHasProperty(pTerm->pExpr, EP_xIsSelect)) + ){ + Expr *pExpr = pTerm->pExpr; + if( (eOp & (WO_EQ|WO_ISNULL|WO_IS))!=0 ){ + testcase( eOp & WO_EQ ); + testcase( eOp & WO_IS ); + testcase( eOp & WO_ISNULL ); + rc = whereEqualScanEst(pParse, pBuilder, pExpr->pRight, &nOut); + }else{ + rc = whereInScanEst(pParse, pBuilder, pExpr->x.pList, &nOut); + } + if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; + if( rc!=SQLITE_OK ) break; /* Jump out of the pTerm loop */ + if( nOut ){ + pNew->nOut = sqlite3LogEst(nOut); + if( pNew->nOut>saved_nOut ) pNew->nOut = saved_nOut; + pNew->nOut -= nIn; + } + } + if( nOut==0 ) +#endif + { + pNew->nOut += (pProbe->aiRowLogEst[nEq] - pProbe->aiRowLogEst[nEq-1]); + if( eOp & WO_ISNULL ){ + /* TUNING: If there is no likelihood() value, assume that a + ** "col IS NULL" expression matches twice as many rows + ** as (col=?). */ + pNew->nOut += 10; + } + } + } + } + + /* Set rCostIdx to the cost of visiting selected rows in index. Add + ** it to pNew->rRun, which is currently set to the cost of the index + ** seek only. Then, if this is a non-covering index, add the cost of + ** visiting the rows in the main table. */ + rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; + pNew->rRun = sqlite3LogEstAdd(rLogSize, rCostIdx); + if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK))==0 ){ + pNew->rRun = sqlite3LogEstAdd(pNew->rRun, pNew->nOut + 16); + } + ApplyCostMultiplier(pNew->rRun, pProbe->pTable->costMult); + + nOutUnadjusted = pNew->nOut; + pNew->rRun += nInMul + nIn; + pNew->nOut += nInMul + nIn; + whereLoopOutputAdjust(pBuilder->pWC, pNew, rSize); + rc = whereLoopInsert(pBuilder, pNew); + + if( pNew->wsFlags & WHERE_COLUMN_RANGE ){ + pNew->nOut = saved_nOut; + }else{ + pNew->nOut = nOutUnadjusted; + } + + if( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 + && pNew->u.btree.nEqnColumn + ){ + whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nInMul+nIn); + } + pNew->nOut = saved_nOut; +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + pBuilder->nRecValid = nRecValid; +#endif + } + pNew->prereq = saved_prereq; + pNew->u.btree.nEq = saved_nEq; + pNew->u.btree.nBtm = saved_nBtm; + pNew->u.btree.nTop = saved_nTop; + pNew->nSkip = saved_nSkip; + pNew->wsFlags = saved_wsFlags; + pNew->nOut = saved_nOut; + pNew->nLTerm = saved_nLTerm; + + /* Consider using a skip-scan if there are no WHERE clause constraints + ** available for the left-most terms of the index, and if the average + ** number of repeats in the left-most terms is at least 18. + ** + ** The magic number 18 is selected on the basis that scanning 17 rows + ** is almost always quicker than an index seek (even though if the index + ** contains fewer than 2^17 rows we assume otherwise in other parts of + ** the code). And, even if it is not, it should not be too much slower. + ** On the other hand, the extra seeks could end up being significantly + ** more expensive. */ + assert( 42==sqlite3LogEst(18) ); + if( saved_nEq==saved_nSkip + && saved_nEq+1nKeyCol + && pProbe->noSkipScan==0 + && pProbe->aiRowLogEst[saved_nEq+1]>=42 /* TUNING: Minimum for skip-scan */ + && (rc = whereLoopResize(db, pNew, pNew->nLTerm+1))==SQLITE_OK + ){ + LogEst nIter; + pNew->u.btree.nEq++; + pNew->nSkip++; + pNew->aLTerm[pNew->nLTerm++] = 0; + pNew->wsFlags |= WHERE_SKIPSCAN; + nIter = pProbe->aiRowLogEst[saved_nEq] - pProbe->aiRowLogEst[saved_nEq+1]; + pNew->nOut -= nIter; + /* TUNING: Because uncertainties in the estimates for skip-scan queries, + ** add a 1.375 fudge factor to make skip-scan slightly less likely. */ + nIter += 5; + whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nIter + nInMul); + pNew->nOut = saved_nOut; + pNew->u.btree.nEq = saved_nEq; + pNew->nSkip = saved_nSkip; + pNew->wsFlags = saved_wsFlags; + } + + WHERETRACE(0x800, ("END addBtreeIdx(%s), nEq=%d, rc=%d\n", + pProbe->zName, saved_nEq, rc)); + return rc; +} + +/* +** Return True if it is possible that pIndex might be useful in +** implementing the ORDER BY clause in pBuilder. +** +** Return False if pBuilder does not contain an ORDER BY clause or +** if there is no way for pIndex to be useful in implementing that +** ORDER BY clause. +*/ +static int indexMightHelpWithOrderBy( + WhereLoopBuilder *pBuilder, + Index *pIndex, + int iCursor +){ + ExprList *pOB; + ExprList *aColExpr; + int ii, jj; + + if( pIndex->bUnordered ) return 0; + if( (pOB = pBuilder->pWInfo->pOrderBy)==0 ) return 0; + for(ii=0; iinExpr; ii++){ + Expr *pExpr = sqlite3ExprSkipCollate(pOB->a[ii].pExpr); + if( pExpr->op==TK_COLUMN && pExpr->iTable==iCursor ){ + if( pExpr->iColumn<0 ) return 1; + for(jj=0; jjnKeyCol; jj++){ + if( pExpr->iColumn==pIndex->aiColumn[jj] ) return 1; + } + }else if( (aColExpr = pIndex->aColExpr)!=0 ){ + for(jj=0; jjnKeyCol; jj++){ + if( pIndex->aiColumn[jj]!=XN_EXPR ) continue; + if( sqlite3ExprCompare(pExpr,aColExpr->a[jj].pExpr,iCursor)==0 ){ + return 1; + } + } + } + } + return 0; +} + +/* +** Return a bitmask where 1s indicate that the corresponding column of +** the table is used by an index. Only the first 63 columns are considered. +*/ +static Bitmask columnsInIndex(Index *pIdx){ + Bitmask m = 0; + int j; + for(j=pIdx->nColumn-1; j>=0; j--){ + int x = pIdx->aiColumn[j]; + if( x>=0 ){ + testcase( x==BMS-1 ); + testcase( x==BMS-2 ); + if( xop==TK_AND ){ + if( !whereUsablePartialIndex(iTab,pWC,pWhere->pLeft) ) return 0; + pWhere = pWhere->pRight; + } + for(i=0, pTerm=pWC->a; inTerm; i++, pTerm++){ + Expr *pExpr = pTerm->pExpr; + if( sqlite3ExprImpliesExpr(pExpr, pWhere, iTab) + && (!ExprHasProperty(pExpr, EP_FromJoin) || pExpr->iRightJoinTable==iTab) + ){ + return 1; + } + } + return 0; +} + +/* +** Add all WhereLoop objects for a single table of the join where the table +** is identified by pBuilder->pNew->iTab. That table is guaranteed to be +** a b-tree table, not a virtual table. +** +** The costs (WhereLoop.rRun) of the b-tree loops added by this function +** are calculated as follows: +** +** For a full scan, assuming the table (or index) contains nRow rows: +** +** cost = nRow * 3.0 // full-table scan +** cost = nRow * K // scan of covering index +** cost = nRow * (K+3.0) // scan of non-covering index +** +** where K is a value between 1.1 and 3.0 set based on the relative +** estimated average size of the index and table records. +** +** For an index scan, where nVisit is the number of index rows visited +** by the scan, and nSeek is the number of seek operations required on +** the index b-tree: +** +** cost = nSeek * (log(nRow) + K * nVisit) // covering index +** cost = nSeek * (log(nRow) + (K+3.0) * nVisit) // non-covering index +** +** Normally, nSeek is 1. nSeek values greater than 1 come about if the +** WHERE clause includes "x IN (....)" terms used in place of "x=?". Or when +** implicit "x IN (SELECT x FROM tbl)" terms are added for skip-scans. +** +** The estimated values (nRow, nVisit, nSeek) often contain a large amount +** of uncertainty. For this reason, scoring is designed to pick plans that +** "do the least harm" if the estimates are inaccurate. For example, a +** log(nRow) factor is omitted from a non-covering index scan in order to +** bias the scoring in favor of using an index, since the worst-case +** performance of using an index is far better than the worst-case performance +** of a full table scan. +*/ +static int whereLoopAddBtree( + WhereLoopBuilder *pBuilder, /* WHERE clause information */ + Bitmask mPrereq /* Extra prerequesites for using this table */ +){ + WhereInfo *pWInfo; /* WHERE analysis context */ + Index *pProbe; /* An index we are evaluating */ + Index sPk; /* A fake index object for the primary key */ + LogEst aiRowEstPk[2]; /* The aiRowLogEst[] value for the sPk index */ + i16 aiColumnPk = -1; /* The aColumn[] value for the sPk index */ + SrcList *pTabList; /* The FROM clause */ + struct SrcList_item *pSrc; /* The FROM clause btree term to add */ + WhereLoop *pNew; /* Template WhereLoop object */ + int rc = SQLITE_OK; /* Return code */ + int iSortIdx = 1; /* Index number */ + int b; /* A boolean value */ + LogEst rSize; /* number of rows in the table */ + LogEst rLogSize; /* Logarithm of the number of rows in the table */ + WhereClause *pWC; /* The parsed WHERE clause */ + Table *pTab; /* Table being queried */ + + pNew = pBuilder->pNew; + pWInfo = pBuilder->pWInfo; + pTabList = pWInfo->pTabList; + pSrc = pTabList->a + pNew->iTab; + pTab = pSrc->pTab; + pWC = pBuilder->pWC; + assert( !IsVirtual(pSrc->pTab) ); + + if( pSrc->pIBIndex ){ + /* An INDEXED BY clause specifies a particular index to use */ + pProbe = pSrc->pIBIndex; + }else if( !HasRowid(pTab) ){ + pProbe = pTab->pIndex; + }else{ + /* There is no INDEXED BY clause. Create a fake Index object in local + ** variable sPk to represent the rowid primary key index. Make this + ** fake index the first in a chain of Index objects with all of the real + ** indices to follow */ + Index *pFirst; /* First of real indices on the table */ + memset(&sPk, 0, sizeof(Index)); + sPk.nKeyCol = 1; + sPk.nColumn = 1; + sPk.aiColumn = &aiColumnPk; + sPk.aiRowLogEst = aiRowEstPk; + sPk.onError = OE_Replace; + sPk.pTable = pTab; + sPk.szIdxRow = pTab->szTabRow; + aiRowEstPk[0] = pTab->nRowLogEst; + aiRowEstPk[1] = 0; + pFirst = pSrc->pTab->pIndex; + if( pSrc->fg.notIndexed==0 ){ + /* The real indices of the table are only considered if the + ** NOT INDEXED qualifier is omitted from the FROM clause */ + sPk.pNext = pFirst; + } + pProbe = &sPk; + } + rSize = pTab->nRowLogEst; + rLogSize = estLog(rSize); + +#ifndef SQLITE_OMIT_AUTOMATIC_INDEX + /* Automatic indexes */ + if( !pBuilder->pOrSet /* Not part of an OR optimization */ + && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 + && (pWInfo->pParse->db->flags & SQLITE_AutoIndex)!=0 + && pSrc->pIBIndex==0 /* Has no INDEXED BY clause */ + && !pSrc->fg.notIndexed /* Has no NOT INDEXED clause */ + && HasRowid(pTab) /* Not WITHOUT ROWID table. (FIXME: Why not?) */ + && !pSrc->fg.isCorrelated /* Not a correlated subquery */ + && !pSrc->fg.isRecursive /* Not a recursive common table expression. */ + ){ + /* Generate auto-index WhereLoops */ + WhereTerm *pTerm; + WhereTerm *pWCEnd = pWC->a + pWC->nTerm; + for(pTerm=pWC->a; rc==SQLITE_OK && pTermprereqRight & pNew->maskSelf ) continue; + if( termCanDriveIndex(pTerm, pSrc, 0) ){ + pNew->u.btree.nEq = 1; + pNew->nSkip = 0; + pNew->u.btree.pIndex = 0; + pNew->nLTerm = 1; + pNew->aLTerm[0] = pTerm; + /* TUNING: One-time cost for computing the automatic index is + ** estimated to be X*N*log2(N) where N is the number of rows in + ** the table being indexed and where X is 7 (LogEst=28) for normal + ** tables or 1.375 (LogEst=4) for views and subqueries. The value + ** of X is smaller for views and subqueries so that the query planner + ** will be more aggressive about generating automatic indexes for + ** those objects, since there is no opportunity to add schema + ** indexes on subqueries and views. */ + pNew->rSetup = rLogSize + rSize + 4; + if( pTab->pSelect==0 && (pTab->tabFlags & TF_Ephemeral)==0 ){ + pNew->rSetup += 24; + } + ApplyCostMultiplier(pNew->rSetup, pTab->costMult); + if( pNew->rSetup<0 ) pNew->rSetup = 0; + /* TUNING: Each index lookup yields 20 rows in the table. This + ** is more than the usual guess of 10 rows, since we have no way + ** of knowing how selective the index will ultimately be. It would + ** not be unreasonable to make this value much larger. */ + pNew->nOut = 43; assert( 43==sqlite3LogEst(20) ); + pNew->rRun = sqlite3LogEstAdd(rLogSize,pNew->nOut); + pNew->wsFlags = WHERE_AUTO_INDEX; + pNew->prereq = mPrereq | pTerm->prereqRight; + rc = whereLoopInsert(pBuilder, pNew); + } + } + } +#endif /* SQLITE_OMIT_AUTOMATIC_INDEX */ + + /* Loop over all indices + */ + for(; rc==SQLITE_OK && pProbe; pProbe=pProbe->pNext, iSortIdx++){ + if( pProbe->pPartIdxWhere!=0 + && !whereUsablePartialIndex(pSrc->iCursor, pWC, pProbe->pPartIdxWhere) ){ + testcase( pNew->iTab!=pSrc->iCursor ); /* See ticket [98d973b8f5] */ + continue; /* Partial index inappropriate for this query */ + } + rSize = pProbe->aiRowLogEst[0]; + pNew->u.btree.nEq = 0; + pNew->u.btree.nBtm = 0; + pNew->u.btree.nTop = 0; + pNew->nSkip = 0; + pNew->nLTerm = 0; + pNew->iSortIdx = 0; + pNew->rSetup = 0; + pNew->prereq = mPrereq; + pNew->nOut = rSize; + pNew->u.btree.pIndex = pProbe; + b = indexMightHelpWithOrderBy(pBuilder, pProbe, pSrc->iCursor); + /* The ONEPASS_DESIRED flags never occurs together with ORDER BY */ + assert( (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 || b==0 ); + if( pProbe->tnum<=0 ){ + /* Integer primary key index */ + pNew->wsFlags = WHERE_IPK; + + /* Full table scan */ + pNew->iSortIdx = b ? iSortIdx : 0; + /* TUNING: Cost of full table scan is (N*3.0). */ + pNew->rRun = rSize + 16; + ApplyCostMultiplier(pNew->rRun, pTab->costMult); + whereLoopOutputAdjust(pWC, pNew, rSize); + rc = whereLoopInsert(pBuilder, pNew); + pNew->nOut = rSize; + if( rc ) break; + }else{ + Bitmask m; + if( pProbe->isCovering ){ + pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; + m = 0; + }else{ + m = pSrc->colUsed & ~columnsInIndex(pProbe); + pNew->wsFlags = (m==0) ? (WHERE_IDX_ONLY|WHERE_INDEXED) : WHERE_INDEXED; + } + + /* Full scan via index */ + if( b + || !HasRowid(pTab) + || pProbe->pPartIdxWhere!=0 + || ( m==0 + && pProbe->bUnordered==0 + && (pProbe->szIdxRowszTabRow) + && (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 + && sqlite3GlobalConfig.bUseCis + && OptimizationEnabled(pWInfo->pParse->db, SQLITE_CoverIdxScan) + ) + ){ + pNew->iSortIdx = b ? iSortIdx : 0; + + /* The cost of visiting the index rows is N*K, where K is + ** between 1.1 and 3.0, depending on the relative sizes of the + ** index and table rows. */ + pNew->rRun = rSize + 1 + (15*pProbe->szIdxRow)/pTab->szTabRow; + if( m!=0 ){ + /* If this is a non-covering index scan, add in the cost of + ** doing table lookups. The cost will be 3x the number of + ** lookups. Take into account WHERE clause terms that can be + ** satisfied using just the index, and that do not require a + ** table lookup. */ + LogEst nLookup = rSize + 16; /* Base cost: N*3 */ + int ii; + int iCur = pSrc->iCursor; + WhereClause *pWC2 = &pWInfo->sWC; + for(ii=0; iinTerm; ii++){ + WhereTerm *pTerm = &pWC2->a[ii]; + if( !sqlite3ExprCoveredByIndex(pTerm->pExpr, iCur, pProbe) ){ + break; + } + /* pTerm can be evaluated using just the index. So reduce + ** the expected number of table lookups accordingly */ + if( pTerm->truthProb<=0 ){ + nLookup += pTerm->truthProb; + }else{ + nLookup--; + if( pTerm->eOperator & (WO_EQ|WO_IS) ) nLookup -= 19; + } + } + + pNew->rRun = sqlite3LogEstAdd(pNew->rRun, nLookup); + } + ApplyCostMultiplier(pNew->rRun, pTab->costMult); + whereLoopOutputAdjust(pWC, pNew, rSize); + rc = whereLoopInsert(pBuilder, pNew); + pNew->nOut = rSize; + if( rc ) break; + } + } + + rc = whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, 0); +#ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + sqlite3Stat4ProbeFree(pBuilder->pRec); + pBuilder->nRecValid = 0; + pBuilder->pRec = 0; +#endif + + /* If there was an INDEXED BY clause, then only that one index is + ** considered. */ + if( pSrc->pIBIndex ) break; + } + return rc; +} + +#ifndef SQLITE_OMIT_VIRTUALTABLE + +/* +** Argument pIdxInfo is already populated with all constraints that may +** be used by the virtual table identified by pBuilder->pNew->iTab. This +** function marks a subset of those constraints usable, invokes the +** xBestIndex method and adds the returned plan to pBuilder. +** +** A constraint is marked usable if: +** +** * Argument mUsable indicates that its prerequisites are available, and +** +** * It is not one of the operators specified in the mExclude mask passed +** as the fourth argument (which in practice is either WO_IN or 0). +** +** Argument mPrereq is a mask of tables that must be scanned before the +** virtual table in question. These are added to the plans prerequisites +** before it is added to pBuilder. +** +** Output parameter *pbIn is set to true if the plan added to pBuilder +** uses one or more WO_IN terms, or false otherwise. +*/ +static int whereLoopAddVirtualOne( + WhereLoopBuilder *pBuilder, + Bitmask mPrereq, /* Mask of tables that must be used. */ + Bitmask mUsable, /* Mask of usable tables */ + u16 mExclude, /* Exclude terms using these operators */ + sqlite3_index_info *pIdxInfo, /* Populated object for xBestIndex */ + u16 mNoOmit, /* Do not omit these constraints */ + int *pbIn /* OUT: True if plan uses an IN(...) op */ +){ + WhereClause *pWC = pBuilder->pWC; + struct sqlite3_index_constraint *pIdxCons; + struct sqlite3_index_constraint_usage *pUsage = pIdxInfo->aConstraintUsage; + int i; + int mxTerm; + int rc = SQLITE_OK; + WhereLoop *pNew = pBuilder->pNew; + Parse *pParse = pBuilder->pWInfo->pParse; + struct SrcList_item *pSrc = &pBuilder->pWInfo->pTabList->a[pNew->iTab]; + int nConstraint = pIdxInfo->nConstraint; + + assert( (mUsable & mPrereq)==mPrereq ); + *pbIn = 0; + pNew->prereq = mPrereq; + + /* Set the usable flag on the subset of constraints identified by + ** arguments mUsable and mExclude. */ + pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint; + for(i=0; ia[pIdxCons->iTermOffset]; + pIdxCons->usable = 0; + if( (pTerm->prereqRight & mUsable)==pTerm->prereqRight + && (pTerm->eOperator & mExclude)==0 + ){ + pIdxCons->usable = 1; + } + } + + /* Initialize the output fields of the sqlite3_index_info structure */ + memset(pUsage, 0, sizeof(pUsage[0])*nConstraint); + assert( pIdxInfo->needToFreeIdxStr==0 ); + pIdxInfo->idxStr = 0; + pIdxInfo->idxNum = 0; + pIdxInfo->orderByConsumed = 0; + pIdxInfo->estimatedCost = SQLITE_BIG_DBL / (double)2; + pIdxInfo->estimatedRows = 25; + pIdxInfo->idxFlags = 0; + pIdxInfo->colUsed = (sqlite3_int64)pSrc->colUsed; + + /* Invoke the virtual table xBestIndex() method */ + rc = vtabBestIndex(pParse, pSrc->pTab, pIdxInfo); + if( rc ) return rc; + + mxTerm = -1; + assert( pNew->nLSlot>=nConstraint ); + for(i=0; iaLTerm[i] = 0; + pNew->u.vtab.omitMask = 0; + pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint; + for(i=0; i=0 ){ + WhereTerm *pTerm; + int j = pIdxCons->iTermOffset; + if( iTerm>=nConstraint + || j<0 + || j>=pWC->nTerm + || pNew->aLTerm[iTerm]!=0 + || pIdxCons->usable==0 + ){ + rc = SQLITE_ERROR; + sqlite3ErrorMsg(pParse,"%s.xBestIndex malfunction",pSrc->pTab->zName); + return rc; + } + testcase( iTerm==nConstraint-1 ); + testcase( j==0 ); + testcase( j==pWC->nTerm-1 ); + pTerm = &pWC->a[j]; + pNew->prereq |= pTerm->prereqRight; + assert( iTermnLSlot ); + pNew->aLTerm[iTerm] = pTerm; + if( iTerm>mxTerm ) mxTerm = iTerm; + testcase( iTerm==15 ); + testcase( iTerm==16 ); + if( iTerm<16 && pUsage[i].omit ) pNew->u.vtab.omitMask |= 1<eOperator & WO_IN)!=0 ){ + /* A virtual table that is constrained by an IN clause may not + ** consume the ORDER BY clause because (1) the order of IN terms + ** is not necessarily related to the order of output terms and + ** (2) Multiple outputs from a single IN value will not merge + ** together. */ + pIdxInfo->orderByConsumed = 0; + pIdxInfo->idxFlags &= ~SQLITE_INDEX_SCAN_UNIQUE; + *pbIn = 1; assert( (mExclude & WO_IN)==0 ); + } + } + } + pNew->u.vtab.omitMask &= ~mNoOmit; + + pNew->nLTerm = mxTerm+1; + assert( pNew->nLTerm<=pNew->nLSlot ); + pNew->u.vtab.idxNum = pIdxInfo->idxNum; + pNew->u.vtab.needFree = pIdxInfo->needToFreeIdxStr; + pIdxInfo->needToFreeIdxStr = 0; + pNew->u.vtab.idxStr = pIdxInfo->idxStr; + pNew->u.vtab.isOrdered = (i8)(pIdxInfo->orderByConsumed ? + pIdxInfo->nOrderBy : 0); + pNew->rSetup = 0; + pNew->rRun = sqlite3LogEstFromDouble(pIdxInfo->estimatedCost); + pNew->nOut = sqlite3LogEst(pIdxInfo->estimatedRows); + + /* Set the WHERE_ONEROW flag if the xBestIndex() method indicated + ** that the scan will visit at most one row. Clear it otherwise. */ + if( pIdxInfo->idxFlags & SQLITE_INDEX_SCAN_UNIQUE ){ + pNew->wsFlags |= WHERE_ONEROW; + }else{ + pNew->wsFlags &= ~WHERE_ONEROW; + } + rc = whereLoopInsert(pBuilder, pNew); + if( pNew->u.vtab.needFree ){ + sqlite3_free(pNew->u.vtab.idxStr); + pNew->u.vtab.needFree = 0; + } + WHERETRACE(0xffff, (" bIn=%d prereqIn=%04llx prereqOut=%04llx\n", + *pbIn, (sqlite3_uint64)mPrereq, + (sqlite3_uint64)(pNew->prereq & ~mPrereq))); + + return rc; +} + + +/* +** Add all WhereLoop objects for a table of the join identified by +** pBuilder->pNew->iTab. That table is guaranteed to be a virtual table. +** +** If there are no LEFT or CROSS JOIN joins in the query, both mPrereq and +** mUnusable are set to 0. Otherwise, mPrereq is a mask of all FROM clause +** entries that occur before the virtual table in the FROM clause and are +** separated from it by at least one LEFT or CROSS JOIN. Similarly, the +** mUnusable mask contains all FROM clause entries that occur after the +** virtual table and are separated from it by at least one LEFT or +** CROSS JOIN. +** +** For example, if the query were: +** +** ... FROM t1, t2 LEFT JOIN t3, t4, vt CROSS JOIN t5, t6; +** +** then mPrereq corresponds to (t1, t2) and mUnusable to (t5, t6). +** +** All the tables in mPrereq must be scanned before the current virtual +** table. So any terms for which all prerequisites are satisfied by +** mPrereq may be specified as "usable" in all calls to xBestIndex. +** Conversely, all tables in mUnusable must be scanned after the current +** virtual table, so any terms for which the prerequisites overlap with +** mUnusable should always be configured as "not-usable" for xBestIndex. +*/ +static int whereLoopAddVirtual( + WhereLoopBuilder *pBuilder, /* WHERE clause information */ + Bitmask mPrereq, /* Tables that must be scanned before this one */ + Bitmask mUnusable /* Tables that must be scanned after this one */ +){ + int rc = SQLITE_OK; /* Return code */ + WhereInfo *pWInfo; /* WHERE analysis context */ + Parse *pParse; /* The parsing context */ + WhereClause *pWC; /* The WHERE clause */ + struct SrcList_item *pSrc; /* The FROM clause term to search */ + sqlite3_index_info *p; /* Object to pass to xBestIndex() */ + int nConstraint; /* Number of constraints in p */ + int bIn; /* True if plan uses IN(...) operator */ + WhereLoop *pNew; + Bitmask mBest; /* Tables used by best possible plan */ + u16 mNoOmit; + + assert( (mPrereq & mUnusable)==0 ); + pWInfo = pBuilder->pWInfo; + pParse = pWInfo->pParse; + pWC = pBuilder->pWC; + pNew = pBuilder->pNew; + pSrc = &pWInfo->pTabList->a[pNew->iTab]; + assert( IsVirtual(pSrc->pTab) ); + p = allocateIndexInfo(pParse, pWC, mUnusable, pSrc, pBuilder->pOrderBy, + &mNoOmit); + if( p==0 ) return SQLITE_NOMEM_BKPT; + pNew->rSetup = 0; + pNew->wsFlags = WHERE_VIRTUALTABLE; + pNew->nLTerm = 0; + pNew->u.vtab.needFree = 0; + nConstraint = p->nConstraint; + if( whereLoopResize(pParse->db, pNew, nConstraint) ){ + sqlite3DbFree(pParse->db, p); + return SQLITE_NOMEM_BKPT; + } + + /* First call xBestIndex() with all constraints usable. */ + WHERETRACE(0x40, (" VirtualOne: all usable\n")); + rc = whereLoopAddVirtualOne(pBuilder, mPrereq, ALLBITS, 0, p, mNoOmit, &bIn); + + /* If the call to xBestIndex() with all terms enabled produced a plan + ** that does not require any source tables (IOW: a plan with mBest==0), + ** then there is no point in making any further calls to xBestIndex() + ** since they will all return the same result (if the xBestIndex() + ** implementation is sane). */ + if( rc==SQLITE_OK && (mBest = (pNew->prereq & ~mPrereq))!=0 ){ + int seenZero = 0; /* True if a plan with no prereqs seen */ + int seenZeroNoIN = 0; /* Plan with no prereqs and no IN(...) seen */ + Bitmask mPrev = 0; + Bitmask mBestNoIn = 0; + + /* If the plan produced by the earlier call uses an IN(...) term, call + ** xBestIndex again, this time with IN(...) terms disabled. */ + if( bIn ){ + WHERETRACE(0x40, (" VirtualOne: all usable w/o IN\n")); + rc = whereLoopAddVirtualOne( + pBuilder, mPrereq, ALLBITS, WO_IN, p, mNoOmit, &bIn); + assert( bIn==0 ); + mBestNoIn = pNew->prereq & ~mPrereq; + if( mBestNoIn==0 ){ + seenZero = 1; + seenZeroNoIN = 1; + } + } + + /* Call xBestIndex once for each distinct value of (prereqRight & ~mPrereq) + ** in the set of terms that apply to the current virtual table. */ + while( rc==SQLITE_OK ){ + int i; + Bitmask mNext = ALLBITS; + assert( mNext>0 ); + for(i=0; ia[p->aConstraint[i].iTermOffset].prereqRight & ~mPrereq + ); + if( mThis>mPrev && mThisprereq==mPrereq ){ + seenZero = 1; + if( bIn==0 ) seenZeroNoIN = 1; + } + } + + /* If the calls to xBestIndex() in the above loop did not find a plan + ** that requires no source tables at all (i.e. one guaranteed to be + ** usable), make a call here with all source tables disabled */ + if( rc==SQLITE_OK && seenZero==0 ){ + WHERETRACE(0x40, (" VirtualOne: all disabled\n")); + rc = whereLoopAddVirtualOne( + pBuilder, mPrereq, mPrereq, 0, p, mNoOmit, &bIn); + if( bIn==0 ) seenZeroNoIN = 1; + } + + /* If the calls to xBestIndex() have so far failed to find a plan + ** that requires no source tables at all and does not use an IN(...) + ** operator, make a final call to obtain one here. */ + if( rc==SQLITE_OK && seenZeroNoIN==0 ){ + WHERETRACE(0x40, (" VirtualOne: all disabled and w/o IN\n")); + rc = whereLoopAddVirtualOne( + pBuilder, mPrereq, mPrereq, WO_IN, p, mNoOmit, &bIn); + } + } + + if( p->needToFreeIdxStr ) sqlite3_free(p->idxStr); + sqlite3DbFree(pParse->db, p); + return rc; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + +/* +** Add WhereLoop entries to handle OR terms. This works for either +** btrees or virtual tables. +*/ +static int whereLoopAddOr( + WhereLoopBuilder *pBuilder, + Bitmask mPrereq, + Bitmask mUnusable +){ + WhereInfo *pWInfo = pBuilder->pWInfo; + WhereClause *pWC; + WhereLoop *pNew; + WhereTerm *pTerm, *pWCEnd; + int rc = SQLITE_OK; + int iCur; + WhereClause tempWC; + WhereLoopBuilder sSubBuild; + WhereOrSet sSum, sCur; + struct SrcList_item *pItem; + + pWC = pBuilder->pWC; + pWCEnd = pWC->a + pWC->nTerm; + pNew = pBuilder->pNew; + memset(&sSum, 0, sizeof(sSum)); + pItem = pWInfo->pTabList->a + pNew->iTab; + iCur = pItem->iCursor; + + for(pTerm=pWC->a; pTermeOperator & WO_OR)!=0 + && (pTerm->u.pOrInfo->indexable & pNew->maskSelf)!=0 + ){ + WhereClause * const pOrWC = &pTerm->u.pOrInfo->wc; + WhereTerm * const pOrWCEnd = &pOrWC->a[pOrWC->nTerm]; + WhereTerm *pOrTerm; + int once = 1; + int i, j; + + sSubBuild = *pBuilder; + sSubBuild.pOrderBy = 0; + sSubBuild.pOrSet = &sCur; + + WHERETRACE(0x200, ("Begin processing OR-clause %p\n", pTerm)); + for(pOrTerm=pOrWC->a; pOrTermeOperator & WO_AND)!=0 ){ + sSubBuild.pWC = &pOrTerm->u.pAndInfo->wc; + }else if( pOrTerm->leftCursor==iCur ){ + tempWC.pWInfo = pWC->pWInfo; + tempWC.pOuter = pWC; + tempWC.op = TK_AND; + tempWC.nTerm = 1; + tempWC.a = pOrTerm; + sSubBuild.pWC = &tempWC; + }else{ + continue; + } + sCur.n = 0; +#ifdef WHERETRACE_ENABLED + WHERETRACE(0x200, ("OR-term %d of %p has %d subterms:\n", + (int)(pOrTerm-pOrWC->a), pTerm, sSubBuild.pWC->nTerm)); + if( sqlite3WhereTrace & 0x400 ){ + sqlite3WhereClausePrint(sSubBuild.pWC); + } +#endif +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pItem->pTab) ){ + rc = whereLoopAddVirtual(&sSubBuild, mPrereq, mUnusable); + }else +#endif + { + rc = whereLoopAddBtree(&sSubBuild, mPrereq); + } + if( rc==SQLITE_OK ){ + rc = whereLoopAddOr(&sSubBuild, mPrereq, mUnusable); + } + assert( rc==SQLITE_OK || sCur.n==0 ); + if( sCur.n==0 ){ + sSum.n = 0; + break; + }else if( once ){ + whereOrMove(&sSum, &sCur); + once = 0; + }else{ + WhereOrSet sPrev; + whereOrMove(&sPrev, &sSum); + sSum.n = 0; + for(i=0; inLTerm = 1; + pNew->aLTerm[0] = pTerm; + pNew->wsFlags = WHERE_MULTI_OR; + pNew->rSetup = 0; + pNew->iSortIdx = 0; + memset(&pNew->u, 0, sizeof(pNew->u)); + for(i=0; rc==SQLITE_OK && irRun = sSum.a[i].rRun + 1; + pNew->nOut = sSum.a[i].nOut; + pNew->prereq = sSum.a[i].prereq; + rc = whereLoopInsert(pBuilder, pNew); + } + WHERETRACE(0x200, ("End processing OR-clause %p\n", pTerm)); + } + } + return rc; +} + +/* +** Add all WhereLoop objects for all tables +*/ +static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ + WhereInfo *pWInfo = pBuilder->pWInfo; + Bitmask mPrereq = 0; + Bitmask mPrior = 0; + int iTab; + SrcList *pTabList = pWInfo->pTabList; + struct SrcList_item *pItem; + struct SrcList_item *pEnd = &pTabList->a[pWInfo->nLevel]; + sqlite3 *db = pWInfo->pParse->db; + int rc = SQLITE_OK; + WhereLoop *pNew; + u8 priorJointype = 0; + + /* Loop over the tables in the join, from left to right */ + pNew = pBuilder->pNew; + whereLoopInit(pNew); + for(iTab=0, pItem=pTabList->a; pItemiTab = iTab; + pNew->maskSelf = sqlite3WhereGetMask(&pWInfo->sMaskSet, pItem->iCursor); + if( ((pItem->fg.jointype|priorJointype) & (JT_LEFT|JT_CROSS))!=0 ){ + /* This condition is true when pItem is the FROM clause term on the + ** right-hand-side of a LEFT or CROSS JOIN. */ + mPrereq = mPrior; + } + priorJointype = pItem->fg.jointype; +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( IsVirtual(pItem->pTab) ){ + struct SrcList_item *p; + for(p=&pItem[1]; pfg.jointype & (JT_LEFT|JT_CROSS)) ){ + mUnusable |= sqlite3WhereGetMask(&pWInfo->sMaskSet, p->iCursor); + } + } + rc = whereLoopAddVirtual(pBuilder, mPrereq, mUnusable); + }else +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + { + rc = whereLoopAddBtree(pBuilder, mPrereq); + } + if( rc==SQLITE_OK ){ + rc = whereLoopAddOr(pBuilder, mPrereq, mUnusable); + } + mPrior |= pNew->maskSelf; + if( rc || db->mallocFailed ) break; + } + + whereLoopClear(db, pNew); + return rc; +} + +/* +** Examine a WherePath (with the addition of the extra WhereLoop of the 5th +** parameters) to see if it outputs rows in the requested ORDER BY +** (or GROUP BY) without requiring a separate sort operation. Return N: +** +** N>0: N terms of the ORDER BY clause are satisfied +** N==0: No terms of the ORDER BY clause are satisfied +** N<0: Unknown yet how many terms of ORDER BY might be satisfied. +** +** Note that processing for WHERE_GROUPBY and WHERE_DISTINCTBY is not as +** strict. With GROUP BY and DISTINCT the only requirement is that +** equivalent rows appear immediately adjacent to one another. GROUP BY +** and DISTINCT do not require rows to appear in any particular order as long +** as equivalent rows are grouped together. Thus for GROUP BY and DISTINCT +** the pOrderBy terms can be matched in any order. With ORDER BY, the +** pOrderBy terms must be matched in strict left-to-right order. +*/ +static i8 wherePathSatisfiesOrderBy( + WhereInfo *pWInfo, /* The WHERE clause */ + ExprList *pOrderBy, /* ORDER BY or GROUP BY or DISTINCT clause to check */ + WherePath *pPath, /* The WherePath to check */ + u16 wctrlFlags, /* WHERE_GROUPBY or _DISTINCTBY or _ORDERBY_LIMIT */ + u16 nLoop, /* Number of entries in pPath->aLoop[] */ + WhereLoop *pLast, /* Add this WhereLoop to the end of pPath->aLoop[] */ + Bitmask *pRevMask /* OUT: Mask of WhereLoops to run in reverse order */ +){ + u8 revSet; /* True if rev is known */ + u8 rev; /* Composite sort order */ + u8 revIdx; /* Index sort order */ + u8 isOrderDistinct; /* All prior WhereLoops are order-distinct */ + u8 distinctColumns; /* True if the loop has UNIQUE NOT NULL columns */ + u8 isMatch; /* iColumn matches a term of the ORDER BY clause */ + u16 eqOpMask; /* Allowed equality operators */ + u16 nKeyCol; /* Number of key columns in pIndex */ + u16 nColumn; /* Total number of ordered columns in the index */ + u16 nOrderBy; /* Number terms in the ORDER BY clause */ + int iLoop; /* Index of WhereLoop in pPath being processed */ + int i, j; /* Loop counters */ + int iCur; /* Cursor number for current WhereLoop */ + int iColumn; /* A column number within table iCur */ + WhereLoop *pLoop = 0; /* Current WhereLoop being processed. */ + WhereTerm *pTerm; /* A single term of the WHERE clause */ + Expr *pOBExpr; /* An expression from the ORDER BY clause */ + CollSeq *pColl; /* COLLATE function from an ORDER BY clause term */ + Index *pIndex; /* The index associated with pLoop */ + sqlite3 *db = pWInfo->pParse->db; /* Database connection */ + Bitmask obSat = 0; /* Mask of ORDER BY terms satisfied so far */ + Bitmask obDone; /* Mask of all ORDER BY terms */ + Bitmask orderDistinctMask; /* Mask of all well-ordered loops */ + Bitmask ready; /* Mask of inner loops */ + + /* + ** We say the WhereLoop is "one-row" if it generates no more than one + ** row of output. A WhereLoop is one-row if all of the following are true: + ** (a) All index columns match with WHERE_COLUMN_EQ. + ** (b) The index is unique + ** Any WhereLoop with an WHERE_COLUMN_EQ constraint on the rowid is one-row. + ** Every one-row WhereLoop will have the WHERE_ONEROW bit set in wsFlags. + ** + ** We say the WhereLoop is "order-distinct" if the set of columns from + ** that WhereLoop that are in the ORDER BY clause are different for every + ** row of the WhereLoop. Every one-row WhereLoop is automatically + ** order-distinct. A WhereLoop that has no columns in the ORDER BY clause + ** is not order-distinct. To be order-distinct is not quite the same as being + ** UNIQUE since a UNIQUE column or index can have multiple rows that + ** are NULL and NULL values are equivalent for the purpose of order-distinct. + ** To be order-distinct, the columns must be UNIQUE and NOT NULL. + ** + ** The rowid for a table is always UNIQUE and NOT NULL so whenever the + ** rowid appears in the ORDER BY clause, the corresponding WhereLoop is + ** automatically order-distinct. + */ + + assert( pOrderBy!=0 ); + if( nLoop && OptimizationDisabled(db, SQLITE_OrderByIdxJoin) ) return 0; + + nOrderBy = pOrderBy->nExpr; + testcase( nOrderBy==BMS-1 ); + if( nOrderBy>BMS-1 ) return 0; /* Cannot optimize overly large ORDER BYs */ + isOrderDistinct = 1; + obDone = MASKBIT(nOrderBy)-1; + orderDistinctMask = 0; + ready = 0; + eqOpMask = WO_EQ | WO_IS | WO_ISNULL; + if( wctrlFlags & WHERE_ORDERBY_LIMIT ) eqOpMask |= WO_IN; + for(iLoop=0; isOrderDistinct && obSat0 ) ready |= pLoop->maskSelf; + if( iLoopaLoop[iLoop]; + if( wctrlFlags & WHERE_ORDERBY_LIMIT ) continue; + }else{ + pLoop = pLast; + } + if( pLoop->wsFlags & WHERE_VIRTUALTABLE ){ + if( pLoop->u.vtab.isOrdered ) obSat = obDone; + break; + } + iCur = pWInfo->pTabList->a[pLoop->iTab].iCursor; + + /* Mark off any ORDER BY term X that is a column in the table of + ** the current loop for which there is term in the WHERE + ** clause of the form X IS NULL or X=? that reference only outer + ** loops. + */ + for(i=0; ia[i].pExpr); + if( pOBExpr->op!=TK_COLUMN ) continue; + if( pOBExpr->iTable!=iCur ) continue; + pTerm = sqlite3WhereFindTerm(&pWInfo->sWC, iCur, pOBExpr->iColumn, + ~ready, eqOpMask, 0); + if( pTerm==0 ) continue; + if( pTerm->eOperator==WO_IN ){ + /* IN terms are only valid for sorting in the ORDER BY LIMIT + ** optimization, and then only if they are actually used + ** by the query plan */ + assert( wctrlFlags & WHERE_ORDERBY_LIMIT ); + for(j=0; jnLTerm && pTerm!=pLoop->aLTerm[j]; j++){} + if( j>=pLoop->nLTerm ) continue; + } + if( (pTerm->eOperator&(WO_EQ|WO_IS))!=0 && pOBExpr->iColumn>=0 ){ + const char *z1, *z2; + pColl = sqlite3ExprCollSeq(pWInfo->pParse, pOrderBy->a[i].pExpr); + if( !pColl ) pColl = db->pDfltColl; + z1 = pColl->zName; + pColl = sqlite3ExprCollSeq(pWInfo->pParse, pTerm->pExpr); + if( !pColl ) pColl = db->pDfltColl; + z2 = pColl->zName; + if( sqlite3StrICmp(z1, z2)!=0 ) continue; + testcase( pTerm->pExpr->op==TK_IS ); + } + obSat |= MASKBIT(i); + } + + if( (pLoop->wsFlags & WHERE_ONEROW)==0 ){ + if( pLoop->wsFlags & WHERE_IPK ){ + pIndex = 0; + nKeyCol = 0; + nColumn = 1; + }else if( (pIndex = pLoop->u.btree.pIndex)==0 || pIndex->bUnordered ){ + return 0; + }else{ + nKeyCol = pIndex->nKeyCol; + nColumn = pIndex->nColumn; + assert( nColumn==nKeyCol+1 || !HasRowid(pIndex->pTable) ); + assert( pIndex->aiColumn[nColumn-1]==XN_ROWID + || !HasRowid(pIndex->pTable)); + isOrderDistinct = IsUniqueIndex(pIndex); + } + + /* Loop through all columns of the index and deal with the ones + ** that are not constrained by == or IN. + */ + rev = revSet = 0; + distinctColumns = 0; + for(j=0; j=pLoop->u.btree.nEq + || (pLoop->aLTerm[j]==0)==(jnSkip) + ); + if( ju.btree.nEq && j>=pLoop->nSkip ){ + u16 eOp = pLoop->aLTerm[j]->eOperator; + + /* Skip over == and IS and ISNULL terms. (Also skip IN terms when + ** doing WHERE_ORDERBY_LIMIT processing). + ** + ** If the current term is a column of an ((?,?) IN (SELECT...)) + ** expression for which the SELECT returns more than one column, + ** check that it is the only column used by this loop. Otherwise, + ** if it is one of two or more, none of the columns can be + ** considered to match an ORDER BY term. */ + if( (eOp & eqOpMask)!=0 ){ + if( eOp & WO_ISNULL ){ + testcase( isOrderDistinct ); + isOrderDistinct = 0; + } + continue; + }else if( ALWAYS(eOp & WO_IN) ){ + /* ALWAYS() justification: eOp is an equality operator due to the + ** ju.btree.nEq constraint above. Any equality other + ** than WO_IN is captured by the previous "if". So this one + ** always has to be WO_IN. */ + Expr *pX = pLoop->aLTerm[j]->pExpr; + for(i=j+1; iu.btree.nEq; i++){ + if( pLoop->aLTerm[i]->pExpr==pX ){ + assert( (pLoop->aLTerm[i]->eOperator & WO_IN) ); + bOnce = 0; + break; + } + } + } + } + + /* Get the column number in the table (iColumn) and sort order + ** (revIdx) for the j-th column of the index. + */ + if( pIndex ){ + iColumn = pIndex->aiColumn[j]; + revIdx = pIndex->aSortOrder[j]; + if( iColumn==pIndex->pTable->iPKey ) iColumn = -1; + }else{ + iColumn = XN_ROWID; + revIdx = 0; + } + + /* An unconstrained column that might be NULL means that this + ** WhereLoop is not well-ordered + */ + if( isOrderDistinct + && iColumn>=0 + && j>=pLoop->u.btree.nEq + && pIndex->pTable->aCol[iColumn].notNull==0 + ){ + isOrderDistinct = 0; + } + + /* Find the ORDER BY term that corresponds to the j-th column + ** of the index and mark that ORDER BY term off + */ + isMatch = 0; + for(i=0; bOnce && ia[i].pExpr); + testcase( wctrlFlags & WHERE_GROUPBY ); + testcase( wctrlFlags & WHERE_DISTINCTBY ); + if( (wctrlFlags & (WHERE_GROUPBY|WHERE_DISTINCTBY))==0 ) bOnce = 0; + if( iColumn>=(-1) ){ + if( pOBExpr->op!=TK_COLUMN ) continue; + if( pOBExpr->iTable!=iCur ) continue; + if( pOBExpr->iColumn!=iColumn ) continue; + }else{ + if( sqlite3ExprCompare(pOBExpr,pIndex->aColExpr->a[j].pExpr,iCur) ){ + continue; + } + } + if( iColumn>=0 ){ + pColl = sqlite3ExprCollSeq(pWInfo->pParse, pOrderBy->a[i].pExpr); + if( !pColl ) pColl = db->pDfltColl; + if( sqlite3StrICmp(pColl->zName, pIndex->azColl[j])!=0 ) continue; + } + isMatch = 1; + break; + } + if( isMatch && (wctrlFlags & WHERE_GROUPBY)==0 ){ + /* Make sure the sort order is compatible in an ORDER BY clause. + ** Sort order is irrelevant for a GROUP BY clause. */ + if( revSet ){ + if( (rev ^ revIdx)!=pOrderBy->a[i].sortOrder ) isMatch = 0; + }else{ + rev = revIdx ^ pOrderBy->a[i].sortOrder; + if( rev ) *pRevMask |= MASKBIT(iLoop); + revSet = 1; + } + } + if( isMatch ){ + if( iColumn==XN_ROWID ){ + testcase( distinctColumns==0 ); + distinctColumns = 1; + } + obSat |= MASKBIT(i); + }else{ + /* No match found */ + if( j==0 || jmaskSelf; + for(i=0; ia[i].pExpr; + mTerm = sqlite3WhereExprUsage(&pWInfo->sMaskSet,p); + if( mTerm==0 && !sqlite3ExprIsConstant(p) ) continue; + if( (mTerm&~orderDistinctMask)==0 ){ + obSat |= MASKBIT(i); + } + } + } + } /* End the loop over all WhereLoops from outer-most down to inner-most */ + if( obSat==obDone ) return (i8)nOrderBy; + if( !isOrderDistinct ){ + for(i=nOrderBy-1; i>0; i--){ + Bitmask m = MASKBIT(i) - 1; + if( (obSat&m)==m ) return i; + } + return 0; + } + return -1; +} + + +/* +** If the WHERE_GROUPBY flag is set in the mask passed to sqlite3WhereBegin(), +** the planner assumes that the specified pOrderBy list is actually a GROUP +** BY clause - and so any order that groups rows as required satisfies the +** request. +** +** Normally, in this case it is not possible for the caller to determine +** whether or not the rows are really being delivered in sorted order, or +** just in some other order that provides the required grouping. However, +** if the WHERE_SORTBYGROUP flag is also passed to sqlite3WhereBegin(), then +** this function may be called on the returned WhereInfo object. It returns +** true if the rows really will be sorted in the specified order, or false +** otherwise. +** +** For example, assuming: +** +** CREATE INDEX i1 ON t1(x, Y); +** +** then +** +** SELECT * FROM t1 GROUP BY x,y ORDER BY x,y; -- IsSorted()==1 +** SELECT * FROM t1 GROUP BY y,x ORDER BY y,x; -- IsSorted()==0 +*/ +SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo *pWInfo){ + assert( pWInfo->wctrlFlags & WHERE_GROUPBY ); + assert( pWInfo->wctrlFlags & WHERE_SORTBYGROUP ); + return pWInfo->sorted; +} + +#ifdef WHERETRACE_ENABLED +/* For debugging use only: */ +static const char *wherePathName(WherePath *pPath, int nLoop, WhereLoop *pLast){ + static char zName[65]; + int i; + for(i=0; iaLoop[i]->cId; } + if( pLast ) zName[i++] = pLast->cId; + zName[i] = 0; + return zName; +} +#endif + +/* +** Return the cost of sorting nRow rows, assuming that the keys have +** nOrderby columns and that the first nSorted columns are already in +** order. +*/ +static LogEst whereSortingCost( + WhereInfo *pWInfo, + LogEst nRow, + int nOrderBy, + int nSorted +){ + /* TUNING: Estimated cost of a full external sort, where N is + ** the number of rows to sort is: + ** + ** cost = (3.0 * N * log(N)). + ** + ** Or, if the order-by clause has X terms but only the last Y + ** terms are out of order, then block-sorting will reduce the + ** sorting cost to: + ** + ** cost = (3.0 * N * log(N)) * (Y/X) + ** + ** The (Y/X) term is implemented using stack variable rScale + ** below. */ + LogEst rScale, rSortCost; + assert( nOrderBy>0 && 66==sqlite3LogEst(100) ); + rScale = sqlite3LogEst((nOrderBy-nSorted)*100/nOrderBy) - 66; + rSortCost = nRow + rScale + 16; + + /* Multiple by log(M) where M is the number of output rows. + ** Use the LIMIT for M if it is smaller */ + if( (pWInfo->wctrlFlags & WHERE_USE_LIMIT)!=0 && pWInfo->iLimitiLimit; + } + rSortCost += estLog(nRow); + return rSortCost; +} + +/* +** Given the list of WhereLoop objects at pWInfo->pLoops, this routine +** attempts to find the lowest cost path that visits each WhereLoop +** once. This path is then loaded into the pWInfo->a[].pWLoop fields. +** +** Assume that the total number of output rows that will need to be sorted +** will be nRowEst (in the 10*log2 representation). Or, ignore sorting +** costs if nRowEst==0. +** +** Return SQLITE_OK on success or SQLITE_NOMEM of a memory allocation +** error occurs. +*/ +static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ + int mxChoice; /* Maximum number of simultaneous paths tracked */ + int nLoop; /* Number of terms in the join */ + Parse *pParse; /* Parsing context */ + sqlite3 *db; /* The database connection */ + int iLoop; /* Loop counter over the terms of the join */ + int ii, jj; /* Loop counters */ + int mxI = 0; /* Index of next entry to replace */ + int nOrderBy; /* Number of ORDER BY clause terms */ + LogEst mxCost = 0; /* Maximum cost of a set of paths */ + LogEst mxUnsorted = 0; /* Maximum unsorted cost of a set of path */ + int nTo, nFrom; /* Number of valid entries in aTo[] and aFrom[] */ + WherePath *aFrom; /* All nFrom paths at the previous level */ + WherePath *aTo; /* The nTo best paths at the current level */ + WherePath *pFrom; /* An element of aFrom[] that we are working on */ + WherePath *pTo; /* An element of aTo[] that we are working on */ + WhereLoop *pWLoop; /* One of the WhereLoop objects */ + WhereLoop **pX; /* Used to divy up the pSpace memory */ + LogEst *aSortCost = 0; /* Sorting and partial sorting costs */ + char *pSpace; /* Temporary memory used by this routine */ + int nSpace; /* Bytes of space allocated at pSpace */ + + pParse = pWInfo->pParse; + db = pParse->db; + nLoop = pWInfo->nLevel; + /* TUNING: For simple queries, only the best path is tracked. + ** For 2-way joins, the 5 best paths are followed. + ** For joins of 3 or more tables, track the 10 best paths */ + mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10); + assert( nLoop<=pWInfo->pTabList->nSrc ); + WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d)\n", nRowEst)); + + /* If nRowEst is zero and there is an ORDER BY clause, ignore it. In this + ** case the purpose of this call is to estimate the number of rows returned + ** by the overall query. Once this estimate has been obtained, the caller + ** will invoke this function a second time, passing the estimate as the + ** nRowEst parameter. */ + if( pWInfo->pOrderBy==0 || nRowEst==0 ){ + nOrderBy = 0; + }else{ + nOrderBy = pWInfo->pOrderBy->nExpr; + } + + /* Allocate and initialize space for aTo, aFrom and aSortCost[] */ + nSpace = (sizeof(WherePath)+sizeof(WhereLoop*)*nLoop)*mxChoice*2; + nSpace += sizeof(LogEst) * nOrderBy; + pSpace = sqlite3DbMallocRawNN(db, nSpace); + if( pSpace==0 ) return SQLITE_NOMEM_BKPT; + aTo = (WherePath*)pSpace; + aFrom = aTo+mxChoice; + memset(aFrom, 0, sizeof(aFrom[0])); + pX = (WhereLoop**)(aFrom+mxChoice); + for(ii=mxChoice*2, pFrom=aTo; ii>0; ii--, pFrom++, pX += nLoop){ + pFrom->aLoop = pX; + } + if( nOrderBy ){ + /* If there is an ORDER BY clause and it is not being ignored, set up + ** space for the aSortCost[] array. Each element of the aSortCost array + ** is either zero - meaning it has not yet been initialized - or the + ** cost of sorting nRowEst rows of data where the first X terms of + ** the ORDER BY clause are already in order, where X is the array + ** index. */ + aSortCost = (LogEst*)pX; + memset(aSortCost, 0, sizeof(LogEst) * nOrderBy); + } + assert( aSortCost==0 || &pSpace[nSpace]==(char*)&aSortCost[nOrderBy] ); + assert( aSortCost!=0 || &pSpace[nSpace]==(char*)pX ); + + /* Seed the search with a single WherePath containing zero WhereLoops. + ** + ** TUNING: Do not let the number of iterations go above 28. If the cost + ** of computing an automatic index is not paid back within the first 28 + ** rows, then do not use the automatic index. */ + aFrom[0].nRow = MIN(pParse->nQueryLoop, 48); assert( 48==sqlite3LogEst(28) ); + nFrom = 1; + assert( aFrom[0].isOrdered==0 ); + if( nOrderBy ){ + /* If nLoop is zero, then there are no FROM terms in the query. Since + ** in this case the query may return a maximum of one row, the results + ** are already in the requested order. Set isOrdered to nOrderBy to + ** indicate this. Or, if nLoop is greater than zero, set isOrdered to + ** -1, indicating that the result set may or may not be ordered, + ** depending on the loops added to the current plan. */ + aFrom[0].isOrdered = nLoop>0 ? -1 : nOrderBy; + } + + /* Compute successively longer WherePaths using the previous generation + ** of WherePaths as the basis for the next. Keep track of the mxChoice + ** best paths at each generation */ + for(iLoop=0; iLooppLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ + LogEst nOut; /* Rows visited by (pFrom+pWLoop) */ + LogEst rCost; /* Cost of path (pFrom+pWLoop) */ + LogEst rUnsorted; /* Unsorted cost of (pFrom+pWLoop) */ + i8 isOrdered = pFrom->isOrdered; /* isOrdered for (pFrom+pWLoop) */ + Bitmask maskNew; /* Mask of src visited by (..) */ + Bitmask revMask = 0; /* Mask of rev-order loops for (..) */ + + if( (pWLoop->prereq & ~pFrom->maskLoop)!=0 ) continue; + if( (pWLoop->maskSelf & pFrom->maskLoop)!=0 ) continue; + if( (pWLoop->wsFlags & WHERE_AUTO_INDEX)!=0 && pFrom->nRow<10 ){ + /* Do not use an automatic index if the this loop is expected + ** to run less than 2 times. */ + assert( 10==sqlite3LogEst(2) ); + continue; + } + /* At this point, pWLoop is a candidate to be the next loop. + ** Compute its cost */ + rUnsorted = sqlite3LogEstAdd(pWLoop->rSetup,pWLoop->rRun + pFrom->nRow); + rUnsorted = sqlite3LogEstAdd(rUnsorted, pFrom->rUnsorted); + nOut = pFrom->nRow + pWLoop->nOut; + maskNew = pFrom->maskLoop | pWLoop->maskSelf; + if( isOrdered<0 ){ + isOrdered = wherePathSatisfiesOrderBy(pWInfo, + pWInfo->pOrderBy, pFrom, pWInfo->wctrlFlags, + iLoop, pWLoop, &revMask); + }else{ + revMask = pFrom->revLoop; + } + if( isOrdered>=0 && isOrderedisOrdered^isOrdered)&0x80)==0" is equivalent + ** to (pTo->isOrdered==(-1))==(isOrdered==(-1))" for the range + ** of legal values for isOrdered, -1..64. + */ + for(jj=0, pTo=aTo; jjmaskLoop==maskNew + && ((pTo->isOrdered^isOrdered)&0x80)==0 + ){ + testcase( jj==nTo-1 ); + break; + } + } + if( jj>=nTo ){ + /* None of the existing best-so-far paths match the candidate. */ + if( nTo>=mxChoice + && (rCost>mxCost || (rCost==mxCost && rUnsorted>=mxUnsorted)) + ){ + /* The current candidate is no better than any of the mxChoice + ** paths currently in the best-so-far buffer. So discard + ** this candidate as not viable. */ +#ifdef WHERETRACE_ENABLED /* 0x4 */ + if( sqlite3WhereTrace&0x4 ){ + sqlite3DebugPrintf("Skip %s cost=%-3d,%3d order=%c\n", + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, + isOrdered>=0 ? isOrdered+'0' : '?'); + } +#endif + continue; + } + /* If we reach this points it means that the new candidate path + ** needs to be added to the set of best-so-far paths. */ + if( nTo=0 ? isOrdered+'0' : '?'); + } +#endif + }else{ + /* Control reaches here if best-so-far path pTo=aTo[jj] covers the + ** same set of loops and has the sam isOrdered setting as the + ** candidate path. Check to see if the candidate should replace + ** pTo or if the candidate should be skipped */ + if( pTo->rCostrCost==rCost && pTo->nRow<=nOut) ){ +#ifdef WHERETRACE_ENABLED /* 0x4 */ + if( sqlite3WhereTrace&0x4 ){ + sqlite3DebugPrintf( + "Skip %s cost=%-3d,%3d order=%c", + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, + isOrdered>=0 ? isOrdered+'0' : '?'); + sqlite3DebugPrintf(" vs %s cost=%-3d,%d order=%c\n", + wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, + pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); + } +#endif + /* Discard the candidate path from further consideration */ + testcase( pTo->rCost==rCost ); + continue; + } + testcase( pTo->rCost==rCost+1 ); + /* Control reaches here if the candidate path is better than the + ** pTo path. Replace pTo with the candidate. */ +#ifdef WHERETRACE_ENABLED /* 0x4 */ + if( sqlite3WhereTrace&0x4 ){ + sqlite3DebugPrintf( + "Update %s cost=%-3d,%3d order=%c", + wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, + isOrdered>=0 ? isOrdered+'0' : '?'); + sqlite3DebugPrintf(" was %s cost=%-3d,%3d order=%c\n", + wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, + pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); + } +#endif + } + /* pWLoop is a winner. Add it to the set of best so far */ + pTo->maskLoop = pFrom->maskLoop | pWLoop->maskSelf; + pTo->revLoop = revMask; + pTo->nRow = nOut; + pTo->rCost = rCost; + pTo->rUnsorted = rUnsorted; + pTo->isOrdered = isOrdered; + memcpy(pTo->aLoop, pFrom->aLoop, sizeof(WhereLoop*)*iLoop); + pTo->aLoop[iLoop] = pWLoop; + if( nTo>=mxChoice ){ + mxI = 0; + mxCost = aTo[0].rCost; + mxUnsorted = aTo[0].nRow; + for(jj=1, pTo=&aTo[1]; jjrCost>mxCost + || (pTo->rCost==mxCost && pTo->rUnsorted>mxUnsorted) + ){ + mxCost = pTo->rCost; + mxUnsorted = pTo->rUnsorted; + mxI = jj; + } + } + } + } + } + +#ifdef WHERETRACE_ENABLED /* >=2 */ + if( sqlite3WhereTrace & 0x02 ){ + sqlite3DebugPrintf("---- after round %d ----\n", iLoop); + for(ii=0, pTo=aTo; iirCost, pTo->nRow, + pTo->isOrdered>=0 ? (pTo->isOrdered+'0') : '?'); + if( pTo->isOrdered>0 ){ + sqlite3DebugPrintf(" rev=0x%llx\n", pTo->revLoop); + }else{ + sqlite3DebugPrintf("\n"); + } + } + } +#endif + + /* Swap the roles of aFrom and aTo for the next generation */ + pFrom = aTo; + aTo = aFrom; + aFrom = pFrom; + nFrom = nTo; + } + + if( nFrom==0 ){ + sqlite3ErrorMsg(pParse, "no query solution"); + sqlite3DbFree(db, pSpace); + return SQLITE_ERROR; + } + + /* Find the lowest cost path. pFrom will be left pointing to that path */ + pFrom = aFrom; + for(ii=1; iirCost>aFrom[ii].rCost ) pFrom = &aFrom[ii]; + } + assert( pWInfo->nLevel==nLoop ); + /* Load the lowest cost path into pWInfo */ + for(iLoop=0; iLoopa + iLoop; + pLevel->pWLoop = pWLoop = pFrom->aLoop[iLoop]; + pLevel->iFrom = pWLoop->iTab; + pLevel->iTabCur = pWInfo->pTabList->a[pLevel->iFrom].iCursor; + } + if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)!=0 + && (pWInfo->wctrlFlags & WHERE_DISTINCTBY)==0 + && pWInfo->eDistinct==WHERE_DISTINCT_NOOP + && nRowEst + ){ + Bitmask notUsed; + int rc = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pDistinctSet, pFrom, + WHERE_DISTINCTBY, nLoop-1, pFrom->aLoop[nLoop-1], ¬Used); + if( rc==pWInfo->pDistinctSet->nExpr ){ + pWInfo->eDistinct = WHERE_DISTINCT_ORDERED; + } + } + if( pWInfo->pOrderBy ){ + if( pWInfo->wctrlFlags & WHERE_DISTINCTBY ){ + if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){ + pWInfo->eDistinct = WHERE_DISTINCT_ORDERED; + } + }else{ + pWInfo->nOBSat = pFrom->isOrdered; + pWInfo->revMask = pFrom->revLoop; + if( pWInfo->nOBSat<=0 ){ + pWInfo->nOBSat = 0; + if( nLoop>0 ){ + u32 wsFlags = pFrom->aLoop[nLoop-1]->wsFlags; + if( (wsFlags & WHERE_ONEROW)==0 + && (wsFlags&(WHERE_IPK|WHERE_COLUMN_IN))!=(WHERE_IPK|WHERE_COLUMN_IN) + ){ + Bitmask m = 0; + int rc = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pOrderBy, pFrom, + WHERE_ORDERBY_LIMIT, nLoop-1, pFrom->aLoop[nLoop-1], &m); + testcase( wsFlags & WHERE_IPK ); + testcase( wsFlags & WHERE_COLUMN_IN ); + if( rc==pWInfo->pOrderBy->nExpr ){ + pWInfo->bOrderedInnerLoop = 1; + pWInfo->revMask = m; + } + } + } + } + } + if( (pWInfo->wctrlFlags & WHERE_SORTBYGROUP) + && pWInfo->nOBSat==pWInfo->pOrderBy->nExpr && nLoop>0 + ){ + Bitmask revMask = 0; + int nOrder = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pOrderBy, + pFrom, 0, nLoop-1, pFrom->aLoop[nLoop-1], &revMask + ); + assert( pWInfo->sorted==0 ); + if( nOrder==pWInfo->pOrderBy->nExpr ){ + pWInfo->sorted = 1; + pWInfo->revMask = revMask; + } + } + } + + + pWInfo->nRowOut = pFrom->nRow; + + /* Free temporary memory and return success */ + sqlite3DbFree(db, pSpace); + return SQLITE_OK; +} + +/* +** Most queries use only a single table (they are not joins) and have +** simple == constraints against indexed fields. This routine attempts +** to plan those simple cases using much less ceremony than the +** general-purpose query planner, and thereby yield faster sqlite3_prepare() +** times for the common case. +** +** Return non-zero on success, if this query can be handled by this +** no-frills query planner. Return zero if this query needs the +** general-purpose query planner. +*/ +static int whereShortCut(WhereLoopBuilder *pBuilder){ + WhereInfo *pWInfo; + struct SrcList_item *pItem; + WhereClause *pWC; + WhereTerm *pTerm; + WhereLoop *pLoop; + int iCur; + int j; + Table *pTab; + Index *pIdx; + + pWInfo = pBuilder->pWInfo; + if( pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE ) return 0; + assert( pWInfo->pTabList->nSrc>=1 ); + pItem = pWInfo->pTabList->a; + pTab = pItem->pTab; + if( IsVirtual(pTab) ) return 0; + if( pItem->fg.isIndexedBy ) return 0; + iCur = pItem->iCursor; + pWC = &pWInfo->sWC; + pLoop = pBuilder->pNew; + pLoop->wsFlags = 0; + pLoop->nSkip = 0; + pTerm = sqlite3WhereFindTerm(pWC, iCur, -1, 0, WO_EQ|WO_IS, 0); + if( pTerm ){ + testcase( pTerm->eOperator & WO_IS ); + pLoop->wsFlags = WHERE_COLUMN_EQ|WHERE_IPK|WHERE_ONEROW; + pLoop->aLTerm[0] = pTerm; + pLoop->nLTerm = 1; + pLoop->u.btree.nEq = 1; + /* TUNING: Cost of a rowid lookup is 10 */ + pLoop->rRun = 33; /* 33==sqlite3LogEst(10) */ + }else{ + for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + int opMask; + assert( pLoop->aLTermSpace==pLoop->aLTerm ); + if( !IsUniqueIndex(pIdx) + || pIdx->pPartIdxWhere!=0 + || pIdx->nKeyCol>ArraySize(pLoop->aLTermSpace) + ) continue; + opMask = pIdx->uniqNotNull ? (WO_EQ|WO_IS) : WO_EQ; + for(j=0; jnKeyCol; j++){ + pTerm = sqlite3WhereFindTerm(pWC, iCur, j, 0, opMask, pIdx); + if( pTerm==0 ) break; + testcase( pTerm->eOperator & WO_IS ); + pLoop->aLTerm[j] = pTerm; + } + if( j!=pIdx->nKeyCol ) continue; + pLoop->wsFlags = WHERE_COLUMN_EQ|WHERE_ONEROW|WHERE_INDEXED; + if( pIdx->isCovering || (pItem->colUsed & ~columnsInIndex(pIdx))==0 ){ + pLoop->wsFlags |= WHERE_IDX_ONLY; + } + pLoop->nLTerm = j; + pLoop->u.btree.nEq = j; + pLoop->u.btree.pIndex = pIdx; + /* TUNING: Cost of a unique index lookup is 15 */ + pLoop->rRun = 39; /* 39==sqlite3LogEst(15) */ + break; + } + } + if( pLoop->wsFlags ){ + pLoop->nOut = (LogEst)1; + pWInfo->a[0].pWLoop = pLoop; + pLoop->maskSelf = sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur); + pWInfo->a[0].iTabCur = iCur; + pWInfo->nRowOut = 1; + if( pWInfo->pOrderBy ) pWInfo->nOBSat = pWInfo->pOrderBy->nExpr; + if( pWInfo->wctrlFlags & WHERE_WANT_DISTINCT ){ + pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; + } +#ifdef SQLITE_DEBUG + pLoop->cId = '0'; +#endif + return 1; + } + return 0; +} + +/* +** Generate the beginning of the loop used for WHERE clause processing. +** The return value is a pointer to an opaque structure that contains +** information needed to terminate the loop. Later, the calling routine +** should invoke sqlite3WhereEnd() with the return value of this function +** in order to complete the WHERE clause processing. +** +** If an error occurs, this routine returns NULL. +** +** The basic idea is to do a nested loop, one loop for each table in +** the FROM clause of a select. (INSERT and UPDATE statements are the +** same as a SELECT with only a single table in the FROM clause.) For +** example, if the SQL is this: +** +** SELECT * FROM t1, t2, t3 WHERE ...; +** +** Then the code generated is conceptually like the following: +** +** foreach row1 in t1 do \ Code generated +** foreach row2 in t2 do |-- by sqlite3WhereBegin() +** foreach row3 in t3 do / +** ... +** end \ Code generated +** end |-- by sqlite3WhereEnd() +** end / +** +** Note that the loops might not be nested in the order in which they +** appear in the FROM clause if a different order is better able to make +** use of indices. Note also that when the IN operator appears in +** the WHERE clause, it might result in additional nested loops for +** scanning through all values on the right-hand side of the IN. +** +** There are Btree cursors associated with each table. t1 uses cursor +** number pTabList->a[0].iCursor. t2 uses the cursor pTabList->a[1].iCursor. +** And so forth. This routine generates code to open those VDBE cursors +** and sqlite3WhereEnd() generates the code to close them. +** +** The code that sqlite3WhereBegin() generates leaves the cursors named +** in pTabList pointing at their appropriate entries. The [...] code +** can use OP_Column and OP_Rowid opcodes on these cursors to extract +** data from the various tables of the loop. +** +** If the WHERE clause is empty, the foreach loops must each scan their +** entire tables. Thus a three-way join is an O(N^3) operation. But if +** the tables have indices and there are terms in the WHERE clause that +** refer to those indices, a complete table scan can be avoided and the +** code will run much faster. Most of the work of this routine is checking +** to see if there are indices that can be used to speed up the loop. +** +** Terms of the WHERE clause are also used to limit which rows actually +** make it to the "..." in the middle of the loop. After each "foreach", +** terms of the WHERE clause that use only terms in that loop and outer +** loops are evaluated and if false a jump is made around all subsequent +** inner loops (or around the "..." if the test occurs within the inner- +** most loop) +** +** OUTER JOINS +** +** An outer join of tables t1 and t2 is conceptally coded as follows: +** +** foreach row1 in t1 do +** flag = 0 +** foreach row2 in t2 do +** start: +** ... +** flag = 1 +** end +** if flag==0 then +** move the row2 cursor to a null row +** goto start +** fi +** end +** +** ORDER BY CLAUSE PROCESSING +** +** pOrderBy is a pointer to the ORDER BY clause (or the GROUP BY clause +** if the WHERE_GROUPBY flag is set in wctrlFlags) of a SELECT statement +** if there is one. If there is no ORDER BY clause or if this routine +** is called from an UPDATE or DELETE statement, then pOrderBy is NULL. +** +** The iIdxCur parameter is the cursor number of an index. If +** WHERE_OR_SUBCLAUSE is set, iIdxCur is the cursor number of an index +** to use for OR clause processing. The WHERE clause should use this +** specific cursor. If WHERE_ONEPASS_DESIRED is set, then iIdxCur is +** the first cursor in an array of cursors for all indices. iIdxCur should +** be used to compute the appropriate cursor depending on which index is +** used. +*/ +SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( + Parse *pParse, /* The parser context */ + SrcList *pTabList, /* FROM clause: A list of all tables to be scanned */ + Expr *pWhere, /* The WHERE clause */ + ExprList *pOrderBy, /* An ORDER BY (or GROUP BY) clause, or NULL */ + ExprList *pDistinctSet, /* Try not to output two rows that duplicate these */ + u16 wctrlFlags, /* The WHERE_* flags defined in sqliteInt.h */ + int iAuxArg /* If WHERE_OR_SUBCLAUSE is set, index cursor number + ** If WHERE_USE_LIMIT, then the limit amount */ +){ + int nByteWInfo; /* Num. bytes allocated for WhereInfo struct */ + int nTabList; /* Number of elements in pTabList */ + WhereInfo *pWInfo; /* Will become the return value of this function */ + Vdbe *v = pParse->pVdbe; /* The virtual database engine */ + Bitmask notReady; /* Cursors that are not yet positioned */ + WhereLoopBuilder sWLB; /* The WhereLoop builder */ + WhereMaskSet *pMaskSet; /* The expression mask set */ + WhereLevel *pLevel; /* A single level in pWInfo->a[] */ + WhereLoop *pLoop; /* Pointer to a single WhereLoop object */ + int ii; /* Loop counter */ + sqlite3 *db; /* Database connection */ + int rc; /* Return code */ + u8 bFordelete = 0; /* OPFLAG_FORDELETE or zero, as appropriate */ + + assert( (wctrlFlags & WHERE_ONEPASS_MULTIROW)==0 || ( + (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0 + && (wctrlFlags & WHERE_OR_SUBCLAUSE)==0 + )); + + /* Only one of WHERE_OR_SUBCLAUSE or WHERE_USE_LIMIT */ + assert( (wctrlFlags & WHERE_OR_SUBCLAUSE)==0 + || (wctrlFlags & WHERE_USE_LIMIT)==0 ); + + /* Variable initialization */ + db = pParse->db; + memset(&sWLB, 0, sizeof(sWLB)); + + /* An ORDER/GROUP BY clause of more than 63 terms cannot be optimized */ + testcase( pOrderBy && pOrderBy->nExpr==BMS-1 ); + if( pOrderBy && pOrderBy->nExpr>=BMS ) pOrderBy = 0; + sWLB.pOrderBy = pOrderBy; + + /* Disable the DISTINCT optimization if SQLITE_DistinctOpt is set via + ** sqlite3_test_ctrl(SQLITE_TESTCTRL_OPTIMIZATIONS,...) */ + if( OptimizationDisabled(db, SQLITE_DistinctOpt) ){ + wctrlFlags &= ~WHERE_WANT_DISTINCT; + } + + /* The number of tables in the FROM clause is limited by the number of + ** bits in a Bitmask + */ + testcase( pTabList->nSrc==BMS ); + if( pTabList->nSrc>BMS ){ + sqlite3ErrorMsg(pParse, "at most %d tables in a join", BMS); + return 0; + } + + /* This function normally generates a nested loop for all tables in + ** pTabList. But if the WHERE_OR_SUBCLAUSE flag is set, then we should + ** only generate code for the first table in pTabList and assume that + ** any cursors associated with subsequent tables are uninitialized. + */ + nTabList = (wctrlFlags & WHERE_OR_SUBCLAUSE) ? 1 : pTabList->nSrc; + + /* Allocate and initialize the WhereInfo structure that will become the + ** return value. A single allocation is used to store the WhereInfo + ** struct, the contents of WhereInfo.a[], the WhereClause structure + ** and the WhereMaskSet structure. Since WhereClause contains an 8-byte + ** field (type Bitmask) it must be aligned on an 8-byte boundary on + ** some architectures. Hence the ROUND8() below. + */ + nByteWInfo = ROUND8(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel)); + pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop)); + if( db->mallocFailed ){ + sqlite3DbFree(db, pWInfo); + pWInfo = 0; + goto whereBeginError; + } + pWInfo->pParse = pParse; + pWInfo->pTabList = pTabList; + pWInfo->pOrderBy = pOrderBy; + pWInfo->pDistinctSet = pDistinctSet; + pWInfo->aiCurOnePass[0] = pWInfo->aiCurOnePass[1] = -1; + pWInfo->nLevel = nTabList; + pWInfo->iBreak = pWInfo->iContinue = sqlite3VdbeMakeLabel(v); + pWInfo->wctrlFlags = wctrlFlags; + pWInfo->iLimit = iAuxArg; + pWInfo->savedNQueryLoop = pParse->nQueryLoop; + memset(&pWInfo->nOBSat, 0, + offsetof(WhereInfo,sWC) - offsetof(WhereInfo,nOBSat)); + memset(&pWInfo->a[0], 0, sizeof(WhereLoop)+nTabList*sizeof(WhereLevel)); + assert( pWInfo->eOnePass==ONEPASS_OFF ); /* ONEPASS defaults to OFF */ + pMaskSet = &pWInfo->sMaskSet; + sWLB.pWInfo = pWInfo; + sWLB.pWC = &pWInfo->sWC; + sWLB.pNew = (WhereLoop*)(((char*)pWInfo)+nByteWInfo); + assert( EIGHT_BYTE_ALIGNMENT(sWLB.pNew) ); + whereLoopInit(sWLB.pNew); +#ifdef SQLITE_DEBUG + sWLB.pNew->cId = '*'; +#endif + + /* Split the WHERE clause into separate subexpressions where each + ** subexpression is separated by an AND operator. + */ + initMaskSet(pMaskSet); + sqlite3WhereClauseInit(&pWInfo->sWC, pWInfo); + sqlite3WhereSplit(&pWInfo->sWC, pWhere, TK_AND); + + /* Special case: a WHERE clause that is constant. Evaluate the + ** expression and either jump over all of the code or fall thru. + */ + for(ii=0; iinTerm; ii++){ + if( nTabList==0 || sqlite3ExprIsConstantNotJoin(sWLB.pWC->a[ii].pExpr) ){ + sqlite3ExprIfFalse(pParse, sWLB.pWC->a[ii].pExpr, pWInfo->iBreak, + SQLITE_JUMPIFNULL); + sWLB.pWC->a[ii].wtFlags |= TERM_CODED; + } + } + + /* Special case: No FROM clause + */ + if( nTabList==0 ){ + if( pOrderBy ) pWInfo->nOBSat = pOrderBy->nExpr; + if( wctrlFlags & WHERE_WANT_DISTINCT ){ + pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; + } + } + + /* Assign a bit from the bitmask to every term in the FROM clause. + ** + ** The N-th term of the FROM clause is assigned a bitmask of 1<nSrc tables in + ** pTabList, not just the first nTabList tables. nTabList is normally + ** equal to pTabList->nSrc but might be shortened to 1 if the + ** WHERE_OR_SUBCLAUSE flag is set. + */ + for(ii=0; iinSrc; ii++){ + createMask(pMaskSet, pTabList->a[ii].iCursor); + sqlite3WhereTabFuncArgs(pParse, &pTabList->a[ii], &pWInfo->sWC); + } +#ifdef SQLITE_DEBUG + for(ii=0; iinSrc; ii++){ + Bitmask m = sqlite3WhereGetMask(pMaskSet, pTabList->a[ii].iCursor); + assert( m==MASKBIT(ii) ); + } +#endif + + /* Analyze all of the subexpressions. */ + sqlite3WhereExprAnalyze(pTabList, &pWInfo->sWC); + if( db->mallocFailed ) goto whereBeginError; + + if( wctrlFlags & WHERE_WANT_DISTINCT ){ + if( isDistinctRedundant(pParse, pTabList, &pWInfo->sWC, pDistinctSet) ){ + /* The DISTINCT marking is pointless. Ignore it. */ + pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; + }else if( pOrderBy==0 ){ + /* Try to ORDER BY the result set to make distinct processing easier */ + pWInfo->wctrlFlags |= WHERE_DISTINCTBY; + pWInfo->pOrderBy = pDistinctSet; + } + } + + /* Construct the WhereLoop objects */ +#if defined(WHERETRACE_ENABLED) + if( sqlite3WhereTrace & 0xffff ){ + sqlite3DebugPrintf("*** Optimizer Start *** (wctrlFlags: 0x%x",wctrlFlags); + if( wctrlFlags & WHERE_USE_LIMIT ){ + sqlite3DebugPrintf(", limit: %d", iAuxArg); + } + sqlite3DebugPrintf(")\n"); + } + if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */ + sqlite3WhereClausePrint(sWLB.pWC); + } +#endif + + if( nTabList!=1 || whereShortCut(&sWLB)==0 ){ + rc = whereLoopAddAll(&sWLB); + if( rc ) goto whereBeginError; + +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace ){ /* Display all of the WhereLoop objects */ + WhereLoop *p; + int i; + static const char zLabel[] = "0123456789abcdefghijklmnopqrstuvwyxz" + "ABCDEFGHIJKLMNOPQRSTUVWYXZ"; + for(p=pWInfo->pLoops, i=0; p; p=p->pNextLoop, i++){ + p->cId = zLabel[i%sizeof(zLabel)]; + whereLoopPrint(p, sWLB.pWC); + } + } +#endif + + wherePathSolver(pWInfo, 0); + if( db->mallocFailed ) goto whereBeginError; + if( pWInfo->pOrderBy ){ + wherePathSolver(pWInfo, pWInfo->nRowOut+1); + if( db->mallocFailed ) goto whereBeginError; + } + } + if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){ + pWInfo->revMask = ALLBITS; + } + if( pParse->nErr || NEVER(db->mallocFailed) ){ + goto whereBeginError; + } +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace ){ + sqlite3DebugPrintf("---- Solution nRow=%d", pWInfo->nRowOut); + if( pWInfo->nOBSat>0 ){ + sqlite3DebugPrintf(" ORDERBY=%d,0x%llx", pWInfo->nOBSat, pWInfo->revMask); + } + switch( pWInfo->eDistinct ){ + case WHERE_DISTINCT_UNIQUE: { + sqlite3DebugPrintf(" DISTINCT=unique"); + break; + } + case WHERE_DISTINCT_ORDERED: { + sqlite3DebugPrintf(" DISTINCT=ordered"); + break; + } + case WHERE_DISTINCT_UNORDERED: { + sqlite3DebugPrintf(" DISTINCT=unordered"); + break; + } + } + sqlite3DebugPrintf("\n"); + for(ii=0; iinLevel; ii++){ + whereLoopPrint(pWInfo->a[ii].pWLoop, sWLB.pWC); + } + } +#endif + /* Attempt to omit tables from the join that do not effect the result */ + if( pWInfo->nLevel>=2 + && pDistinctSet!=0 + && OptimizationEnabled(db, SQLITE_OmitNoopJoin) + ){ + Bitmask tabUsed = sqlite3WhereExprListUsage(pMaskSet, pDistinctSet); + if( sWLB.pOrderBy ){ + tabUsed |= sqlite3WhereExprListUsage(pMaskSet, sWLB.pOrderBy); + } + while( pWInfo->nLevel>=2 ){ + WhereTerm *pTerm, *pEnd; + pLoop = pWInfo->a[pWInfo->nLevel-1].pWLoop; + if( (pWInfo->pTabList->a[pLoop->iTab].fg.jointype & JT_LEFT)==0 ) break; + if( (wctrlFlags & WHERE_WANT_DISTINCT)==0 + && (pLoop->wsFlags & WHERE_ONEROW)==0 + ){ + break; + } + if( (tabUsed & pLoop->maskSelf)!=0 ) break; + pEnd = sWLB.pWC->a + sWLB.pWC->nTerm; + for(pTerm=sWLB.pWC->a; pTermprereqAll & pLoop->maskSelf)!=0 + && !ExprHasProperty(pTerm->pExpr, EP_FromJoin) + ){ + break; + } + } + if( pTerm drop loop %c not used\n", pLoop->cId)); + pWInfo->nLevel--; + nTabList--; + } + } + WHERETRACE(0xffff,("*** Optimizer Finished ***\n")); + pWInfo->pParse->nQueryLoop += pWInfo->nRowOut; + + /* If the caller is an UPDATE or DELETE statement that is requesting + ** to use a one-pass algorithm, determine if this is appropriate. + */ + assert( (wctrlFlags & WHERE_ONEPASS_DESIRED)==0 || pWInfo->nLevel==1 ); + if( (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0 ){ + int wsFlags = pWInfo->a[0].pWLoop->wsFlags; + int bOnerow = (wsFlags & WHERE_ONEROW)!=0; + if( bOnerow + || ((wctrlFlags & WHERE_ONEPASS_MULTIROW)!=0 + && 0==(wsFlags & WHERE_VIRTUALTABLE)) + ){ + pWInfo->eOnePass = bOnerow ? ONEPASS_SINGLE : ONEPASS_MULTI; + if( HasRowid(pTabList->a[0].pTab) && (wsFlags & WHERE_IDX_ONLY) ){ + if( wctrlFlags & WHERE_ONEPASS_MULTIROW ){ + bFordelete = OPFLAG_FORDELETE; + } + pWInfo->a[0].pWLoop->wsFlags = (wsFlags & ~WHERE_IDX_ONLY); + } + } + } + + /* Open all tables in the pTabList and any indices selected for + ** searching those tables. + */ + for(ii=0, pLevel=pWInfo->a; iia[pLevel->iFrom]; + pTab = pTabItem->pTab; + iDb = sqlite3SchemaToIndex(db, pTab->pSchema); + pLoop = pLevel->pWLoop; + if( (pTab->tabFlags & TF_Ephemeral)!=0 || pTab->pSelect ){ + /* Do nothing */ + }else +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ){ + const char *pVTab = (const char *)sqlite3GetVTable(db, pTab); + int iCur = pTabItem->iCursor; + sqlite3VdbeAddOp4(v, OP_VOpen, iCur, 0, 0, pVTab, P4_VTAB); + }else if( IsVirtual(pTab) ){ + /* noop */ + }else +#endif + if( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 + && (wctrlFlags & WHERE_OR_SUBCLAUSE)==0 ){ + int op = OP_OpenRead; + if( pWInfo->eOnePass!=ONEPASS_OFF ){ + op = OP_OpenWrite; + pWInfo->aiCurOnePass[0] = pTabItem->iCursor; + }; + sqlite3OpenTable(pParse, pTabItem->iCursor, iDb, pTab, op); + assert( pTabItem->iCursor==pLevel->iTabCur ); + testcase( pWInfo->eOnePass==ONEPASS_OFF && pTab->nCol==BMS-1 ); + testcase( pWInfo->eOnePass==ONEPASS_OFF && pTab->nCol==BMS ); + if( pWInfo->eOnePass==ONEPASS_OFF && pTab->nColcolUsed; + int n = 0; + for(; b; b=b>>1, n++){} + sqlite3VdbeChangeP4(v, -1, SQLITE_INT_TO_PTR(n), P4_INT32); + assert( n<=pTab->nCol ); + } +#ifdef SQLITE_ENABLE_CURSOR_HINTS + if( pLoop->u.btree.pIndex!=0 ){ + sqlite3VdbeChangeP5(v, OPFLAG_SEEKEQ|bFordelete); + }else +#endif + { + sqlite3VdbeChangeP5(v, bFordelete); + } +#ifdef SQLITE_ENABLE_COLUMN_USED_MASK + sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed, pTabItem->iCursor, 0, 0, + (const u8*)&pTabItem->colUsed, P4_INT64); +#endif + }else{ + sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); + } + if( pLoop->wsFlags & WHERE_INDEXED ){ + Index *pIx = pLoop->u.btree.pIndex; + int iIndexCur; + int op = OP_OpenRead; + /* iAuxArg is always set if to a positive value if ONEPASS is possible */ + assert( iAuxArg!=0 || (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 ); + if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIx) + && (wctrlFlags & WHERE_OR_SUBCLAUSE)!=0 + ){ + /* This is one term of an OR-optimization using the PRIMARY KEY of a + ** WITHOUT ROWID table. No need for a separate index */ + iIndexCur = pLevel->iTabCur; + op = 0; + }else if( pWInfo->eOnePass!=ONEPASS_OFF ){ + Index *pJ = pTabItem->pTab->pIndex; + iIndexCur = iAuxArg; + assert( wctrlFlags & WHERE_ONEPASS_DESIRED ); + while( ALWAYS(pJ) && pJ!=pIx ){ + iIndexCur++; + pJ = pJ->pNext; + } + op = OP_OpenWrite; + pWInfo->aiCurOnePass[1] = iIndexCur; + }else if( iAuxArg && (wctrlFlags & WHERE_OR_SUBCLAUSE)!=0 ){ + iIndexCur = iAuxArg; + op = OP_ReopenIdx; + }else{ + iIndexCur = pParse->nTab++; + } + pLevel->iIdxCur = iIndexCur; + assert( pIx->pSchema==pTab->pSchema ); + assert( iIndexCur>=0 ); + if( op ){ + sqlite3VdbeAddOp3(v, op, iIndexCur, pIx->tnum, iDb); + sqlite3VdbeSetP4KeyInfo(pParse, pIx); + if( (pLoop->wsFlags & WHERE_CONSTRAINT)!=0 + && (pLoop->wsFlags & (WHERE_COLUMN_RANGE|WHERE_SKIPSCAN))==0 + && (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)==0 + ){ + sqlite3VdbeChangeP5(v, OPFLAG_SEEKEQ); /* Hint to COMDB2 */ + } + VdbeComment((v, "%s", pIx->zName)); +#ifdef SQLITE_ENABLE_COLUMN_USED_MASK + { + u64 colUsed = 0; + int ii, jj; + for(ii=0; iinColumn; ii++){ + jj = pIx->aiColumn[ii]; + if( jj<0 ) continue; + if( jj>63 ) jj = 63; + if( (pTabItem->colUsed & MASKBIT(jj))==0 ) continue; + colUsed |= ((u64)1)<<(ii<63 ? ii : 63); + } + sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed, iIndexCur, 0, 0, + (u8*)&colUsed, P4_INT64); + } +#endif /* SQLITE_ENABLE_COLUMN_USED_MASK */ + } + } + if( iDb>=0 ) sqlite3CodeVerifySchema(pParse, iDb); + } + pWInfo->iTop = sqlite3VdbeCurrentAddr(v); + if( db->mallocFailed ) goto whereBeginError; + + /* Generate the code to do the search. Each iteration of the for + ** loop below generates code for a single nested loop of the VM + ** program. + */ + notReady = ~(Bitmask)0; + for(ii=0; iia[ii]; + wsFlags = pLevel->pWLoop->wsFlags; +#ifndef SQLITE_OMIT_AUTOMATIC_INDEX + if( (pLevel->pWLoop->wsFlags & WHERE_AUTO_INDEX)!=0 ){ + constructAutomaticIndex(pParse, &pWInfo->sWC, + &pTabList->a[pLevel->iFrom], notReady, pLevel); + if( db->mallocFailed ) goto whereBeginError; + } +#endif + addrExplain = sqlite3WhereExplainOneScan( + pParse, pTabList, pLevel, ii, pLevel->iFrom, wctrlFlags + ); + pLevel->addrBody = sqlite3VdbeCurrentAddr(v); + notReady = sqlite3WhereCodeOneLoopStart(pWInfo, ii, notReady); + pWInfo->iContinue = pLevel->addrCont; + if( (wsFlags&WHERE_MULTI_OR)==0 && (wctrlFlags&WHERE_OR_SUBCLAUSE)==0 ){ + sqlite3WhereAddScanStatus(v, pTabList, pLevel, addrExplain); + } + } + + /* Done. */ + VdbeModuleComment((v, "Begin WHERE-core")); + return pWInfo; + + /* Jump here if malloc fails */ +whereBeginError: + if( pWInfo ){ + pParse->nQueryLoop = pWInfo->savedNQueryLoop; + whereInfoFree(db, pWInfo); + } + return 0; +} + +/* +** Generate the end of the WHERE loop. See comments on +** sqlite3WhereBegin() for additional information. +*/ +SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ + Parse *pParse = pWInfo->pParse; + Vdbe *v = pParse->pVdbe; + int i; + WhereLevel *pLevel; + WhereLoop *pLoop; + SrcList *pTabList = pWInfo->pTabList; + sqlite3 *db = pParse->db; + + /* Generate loop termination code. + */ + VdbeModuleComment((v, "End WHERE-core")); + sqlite3ExprCacheClear(pParse); + for(i=pWInfo->nLevel-1; i>=0; i--){ + int addr; + pLevel = &pWInfo->a[i]; + pLoop = pLevel->pWLoop; + sqlite3VdbeResolveLabel(v, pLevel->addrCont); + if( pLevel->op!=OP_Noop ){ + sqlite3VdbeAddOp3(v, pLevel->op, pLevel->p1, pLevel->p2, pLevel->p3); + sqlite3VdbeChangeP5(v, pLevel->p5); + VdbeCoverage(v); + VdbeCoverageIf(v, pLevel->op==OP_Next); + VdbeCoverageIf(v, pLevel->op==OP_Prev); + VdbeCoverageIf(v, pLevel->op==OP_VNext); + } + if( pLoop->wsFlags & WHERE_IN_ABLE && pLevel->u.in.nIn>0 ){ + struct InLoop *pIn; + int j; + sqlite3VdbeResolveLabel(v, pLevel->addrNxt); + for(j=pLevel->u.in.nIn, pIn=&pLevel->u.in.aInLoop[j-1]; j>0; j--, pIn--){ + sqlite3VdbeJumpHere(v, pIn->addrInTop+1); + if( pIn->eEndLoopOp!=OP_Noop ){ + sqlite3VdbeAddOp2(v, pIn->eEndLoopOp, pIn->iCur, pIn->addrInTop); + VdbeCoverage(v); + VdbeCoverageIf(v, pIn->eEndLoopOp==OP_PrevIfOpen); + VdbeCoverageIf(v, pIn->eEndLoopOp==OP_NextIfOpen); + } + sqlite3VdbeJumpHere(v, pIn->addrInTop-1); + } + } + sqlite3VdbeResolveLabel(v, pLevel->addrBrk); + if( pLevel->addrSkip ){ + sqlite3VdbeGoto(v, pLevel->addrSkip); + VdbeComment((v, "next skip-scan on %s", pLoop->u.btree.pIndex->zName)); + sqlite3VdbeJumpHere(v, pLevel->addrSkip); + sqlite3VdbeJumpHere(v, pLevel->addrSkip-2); + } +#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS + if( pLevel->addrLikeRep ){ + sqlite3VdbeAddOp2(v, OP_DecrJumpZero, (int)(pLevel->iLikeRepCntr>>1), + pLevel->addrLikeRep); + VdbeCoverage(v); + } +#endif + if( pLevel->iLeftJoin ){ + int ws = pLoop->wsFlags; + addr = sqlite3VdbeAddOp1(v, OP_IfPos, pLevel->iLeftJoin); VdbeCoverage(v); + assert( (ws & WHERE_IDX_ONLY)==0 || (ws & WHERE_INDEXED)!=0 ); + if( (ws & WHERE_IDX_ONLY)==0 ){ + sqlite3VdbeAddOp1(v, OP_NullRow, pTabList->a[i].iCursor); + } + if( (ws & WHERE_INDEXED) + || ((ws & WHERE_MULTI_OR) && pLevel->u.pCovidx) + ){ + sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iIdxCur); + } + if( pLevel->op==OP_Return ){ + sqlite3VdbeAddOp2(v, OP_Gosub, pLevel->p1, pLevel->addrFirst); + }else{ + sqlite3VdbeGoto(v, pLevel->addrFirst); + } + sqlite3VdbeJumpHere(v, addr); + } + VdbeModuleComment((v, "End WHERE-loop%d: %s", i, + pWInfo->pTabList->a[pLevel->iFrom].pTab->zName)); + } + + /* The "break" point is here, just past the end of the outer loop. + ** Set it. + */ + sqlite3VdbeResolveLabel(v, pWInfo->iBreak); + + assert( pWInfo->nLevel<=pTabList->nSrc ); + for(i=0, pLevel=pWInfo->a; inLevel; i++, pLevel++){ + int k, last; + VdbeOp *pOp; + Index *pIdx = 0; + struct SrcList_item *pTabItem = &pTabList->a[pLevel->iFrom]; + Table *pTab = pTabItem->pTab; + assert( pTab!=0 ); + pLoop = pLevel->pWLoop; + + /* For a co-routine, change all OP_Column references to the table of + ** the co-routine into OP_Copy of result contained in a register. + ** OP_Rowid becomes OP_Null. + */ + if( pTabItem->fg.viaCoroutine && !db->mallocFailed ){ + translateColumnToCopy(v, pLevel->addrBody, pLevel->iTabCur, + pTabItem->regResult, 0); + continue; + } + + /* If this scan uses an index, make VDBE code substitutions to read data + ** from the index instead of from the table where possible. In some cases + ** this optimization prevents the table from ever being read, which can + ** yield a significant performance boost. + ** + ** Calls to the code generator in between sqlite3WhereBegin and + ** sqlite3WhereEnd will have created code that references the table + ** directly. This loop scans all that code looking for opcodes + ** that reference the table and converts them into opcodes that + ** reference the index. + */ + if( pLoop->wsFlags & (WHERE_INDEXED|WHERE_IDX_ONLY) ){ + pIdx = pLoop->u.btree.pIndex; + }else if( pLoop->wsFlags & WHERE_MULTI_OR ){ + pIdx = pLevel->u.pCovidx; + } + if( pIdx + && (pWInfo->eOnePass==ONEPASS_OFF || !HasRowid(pIdx->pTable)) + && !db->mallocFailed + ){ + last = sqlite3VdbeCurrentAddr(v); + k = pLevel->addrBody; + pOp = sqlite3VdbeGetOp(v, k); + for(; kp1!=pLevel->iTabCur ) continue; + if( pOp->opcode==OP_Column ){ + int x = pOp->p2; + assert( pIdx->pTable==pTab ); + if( !HasRowid(pTab) ){ + Index *pPk = sqlite3PrimaryKeyIndex(pTab); + x = pPk->aiColumn[x]; + assert( x>=0 ); + } + x = sqlite3ColumnOfIndex(pIdx, x); + if( x>=0 ){ + pOp->p2 = x; + pOp->p1 = pLevel->iIdxCur; + } + assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 || x>=0 + || pWInfo->eOnePass ); + }else if( pOp->opcode==OP_Rowid ){ + pOp->p1 = pLevel->iIdxCur; + pOp->opcode = OP_IdxRowid; + } + } + } + } + + /* Final cleanup + */ + pParse->nQueryLoop = pWInfo->savedNQueryLoop; + whereInfoFree(db, pWInfo); + return; +} + +/************** End of where.c ***********************************************/ +/************** Begin file parse.c *******************************************/ +/* +** 2000-05-29 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Driver template for the LEMON parser generator. +** +** The "lemon" program processes an LALR(1) input grammar file, then uses +** this template to construct a parser. The "lemon" program inserts text +** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the +** interstitial "-" characters) contained in this template is changed into +** the value of the %name directive from the grammar. Otherwise, the content +** of this template is copied straight through into the generate parser +** source file. +** +** The following is the concatenation of all %include directives from the +** input grammar file: +*/ +/* #include */ +/************ Begin %include sections from the grammar ************************/ + +/* #include "sqliteInt.h" */ + +/* +** Disable all error recovery processing in the parser push-down +** automaton. +*/ +#define YYNOERRORRECOVERY 1 + +/* +** Make yytestcase() the same as testcase() +*/ +#define yytestcase(X) testcase(X) + +/* +** Indicate that sqlite3ParserFree() will never be called with a null +** pointer. +*/ +#define YYPARSEFREENEVERNULL 1 + +/* +** In the amalgamation, the parse.c file generated by lemon and the +** tokenize.c file are concatenated. In that case, sqlite3RunParser() +** has access to the the size of the yyParser object and so the parser +** engine can be allocated from stack. In that case, only the +** sqlite3ParserInit() and sqlite3ParserFinalize() routines are invoked +** and the sqlite3ParserAlloc() and sqlite3ParserFree() routines can be +** omitted. +*/ +#ifdef SQLITE_AMALGAMATION +# define sqlite3Parser_ENGINEALWAYSONSTACK 1 +#endif + +/* +** Alternative datatype for the argument to the malloc() routine passed +** into sqlite3ParserAlloc(). The default is size_t. +*/ +#define YYMALLOCARGTYPE u64 + +/* +** An instance of this structure holds information about the +** LIMIT clause of a SELECT statement. +*/ +struct LimitVal { + Expr *pLimit; /* The LIMIT expression. NULL if there is no limit */ + Expr *pOffset; /* The OFFSET expression. NULL if there is none */ +}; + +/* +** An instance of the following structure describes the event of a +** TRIGGER. "a" is the event type, one of TK_UPDATE, TK_INSERT, +** TK_DELETE, or TK_INSTEAD. If the event is of the form +** +** UPDATE ON (a,b,c) +** +** Then the "b" IdList records the list "a,b,c". +*/ +struct TrigEvent { int a; IdList * b; }; + +/* +** Disable lookaside memory allocation for objects that might be +** shared across database connections. +*/ +static void disableLookaside(Parse *pParse){ + pParse->disableLookaside++; + pParse->db->lookaside.bDisable++; +} + + + /* + ** For a compound SELECT statement, make sure p->pPrior->pNext==p for + ** all elements in the list. And make sure list length does not exceed + ** SQLITE_LIMIT_COMPOUND_SELECT. + */ + static void parserDoubleLinkSelect(Parse *pParse, Select *p){ + if( p->pPrior ){ + Select *pNext = 0, *pLoop; + int mxSelect, cnt = 0; + for(pLoop=p; pLoop; pNext=pLoop, pLoop=pLoop->pPrior, cnt++){ + pLoop->pNext = pNext; + pLoop->selFlags |= SF_Compound; + } + if( (p->selFlags & SF_MultiValue)==0 && + (mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT])>0 && + cnt>mxSelect + ){ + sqlite3ErrorMsg(pParse, "too many terms in compound SELECT"); + } + } + } + + /* This is a utility routine used to set the ExprSpan.zStart and + ** ExprSpan.zEnd values of pOut so that the span covers the complete + ** range of text beginning with pStart and going to the end of pEnd. + */ + static void spanSet(ExprSpan *pOut, Token *pStart, Token *pEnd){ + pOut->zStart = pStart->z; + pOut->zEnd = &pEnd->z[pEnd->n]; + } + + /* Construct a new Expr object from a single identifier. Use the + ** new Expr to populate pOut. Set the span of pOut to be the identifier + ** that created the expression. + */ + static void spanExpr(ExprSpan *pOut, Parse *pParse, int op, Token t){ + Expr *p = sqlite3DbMallocRawNN(pParse->db, sizeof(Expr)+t.n+1); + if( p ){ + memset(p, 0, sizeof(Expr)); + p->op = (u8)op; + p->flags = EP_Leaf; + p->iAgg = -1; + p->u.zToken = (char*)&p[1]; + memcpy(p->u.zToken, t.z, t.n); + p->u.zToken[t.n] = 0; + if( sqlite3Isquote(p->u.zToken[0]) ){ + if( p->u.zToken[0]=='"' ) p->flags |= EP_DblQuoted; + sqlite3Dequote(p->u.zToken); + } +#if SQLITE_MAX_EXPR_DEPTH>0 + p->nHeight = 1; +#endif + } + pOut->pExpr = p; + pOut->zStart = t.z; + pOut->zEnd = &t.z[t.n]; + } + + /* This routine constructs a binary expression node out of two ExprSpan + ** objects and uses the result to populate a new ExprSpan object. + */ + static void spanBinaryExpr( + Parse *pParse, /* The parsing context. Errors accumulate here */ + int op, /* The binary operation */ + ExprSpan *pLeft, /* The left operand, and output */ + ExprSpan *pRight /* The right operand */ + ){ + pLeft->pExpr = sqlite3PExpr(pParse, op, pLeft->pExpr, pRight->pExpr); + pLeft->zEnd = pRight->zEnd; + } + + /* If doNot is true, then add a TK_NOT Expr-node wrapper around the + ** outside of *ppExpr. + */ + static void exprNot(Parse *pParse, int doNot, ExprSpan *pSpan){ + if( doNot ){ + pSpan->pExpr = sqlite3PExpr(pParse, TK_NOT, pSpan->pExpr, 0); + } + } + + /* Construct an expression node for a unary postfix operator + */ + static void spanUnaryPostfix( + Parse *pParse, /* Parsing context to record errors */ + int op, /* The operator */ + ExprSpan *pOperand, /* The operand, and output */ + Token *pPostOp /* The operand token for setting the span */ + ){ + pOperand->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0); + pOperand->zEnd = &pPostOp->z[pPostOp->n]; + } + + /* A routine to convert a binary TK_IS or TK_ISNOT expression into a + ** unary TK_ISNULL or TK_NOTNULL expression. */ + static void binaryToUnaryIfNull(Parse *pParse, Expr *pY, Expr *pA, int op){ + sqlite3 *db = pParse->db; + if( pA && pY && pY->op==TK_NULL ){ + pA->op = (u8)op; + sqlite3ExprDelete(db, pA->pRight); + pA->pRight = 0; + } + } + + /* Construct an expression node for a unary prefix operator + */ + static void spanUnaryPrefix( + ExprSpan *pOut, /* Write the new expression node here */ + Parse *pParse, /* Parsing context to record errors */ + int op, /* The operator */ + ExprSpan *pOperand, /* The operand */ + Token *pPreOp /* The operand token for setting the span */ + ){ + pOut->zStart = pPreOp->z; + pOut->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0); + pOut->zEnd = pOperand->zEnd; + } + + /* Add a single new term to an ExprList that is used to store a + ** list of identifiers. Report an error if the ID list contains + ** a COLLATE clause or an ASC or DESC keyword, except ignore the + ** error while parsing a legacy schema. + */ + static ExprList *parserAddExprIdListTerm( + Parse *pParse, + ExprList *pPrior, + Token *pIdToken, + int hasCollate, + int sortOrder + ){ + ExprList *p = sqlite3ExprListAppend(pParse, pPrior, 0); + if( (hasCollate || sortOrder!=SQLITE_SO_UNDEFINED) + && pParse->db->init.busy==0 + ){ + sqlite3ErrorMsg(pParse, "syntax error after column name \"%.*s\"", + pIdToken->n, pIdToken->z); + } + sqlite3ExprListSetName(pParse, p, pIdToken, 1); + return p; + } +/**************** End of %include directives **********************************/ +/* These constants specify the various numeric values for terminal symbols +** in a format understandable to "makeheaders". This section is blank unless +** "lemon" is run with the "-m" command-line option. +***************** Begin makeheaders token definitions *************************/ +/**************** End makeheaders token definitions ***************************/ + +/* The next sections is a series of control #defines. +** various aspects of the generated parser. +** YYCODETYPE is the data type used to store the integer codes +** that represent terminal and non-terminal symbols. +** "unsigned char" is used if there are fewer than +** 256 symbols. Larger types otherwise. +** YYNOCODE is a number of type YYCODETYPE that is not used for +** any terminal or nonterminal symbol. +** YYFALLBACK If defined, this indicates that one or more tokens +** (also known as: "terminal symbols") have fall-back +** values which should be used if the original symbol +** would not parse. This permits keywords to sometimes +** be used as identifiers, for example. +** YYACTIONTYPE is the data type used for "action codes" - numbers +** that indicate what to do in response to the next +** token. +** sqlite3ParserTOKENTYPE is the data type used for minor type for terminal +** symbols. Background: A "minor type" is a semantic +** value associated with a terminal or non-terminal +** symbols. For example, for an "ID" terminal symbol, +** the minor type might be the name of the identifier. +** Each non-terminal can have a different minor type. +** Terminal symbols all have the same minor type, though. +** This macros defines the minor type for terminal +** symbols. +** YYMINORTYPE is the data type used for all minor types. +** This is typically a union of many types, one of +** which is sqlite3ParserTOKENTYPE. The entry in the union +** for terminal symbols is called "yy0". +** YYSTACKDEPTH is the maximum depth of the parser's stack. If +** zero the stack is dynamically sized using realloc() +** sqlite3ParserARG_SDECL A static variable declaration for the %extra_argument +** sqlite3ParserARG_PDECL A parameter declaration for the %extra_argument +** sqlite3ParserARG_STORE Code to store %extra_argument into yypParser +** sqlite3ParserARG_FETCH Code to extract %extra_argument from yypParser +** YYERRORSYMBOL is the code number of the error symbol. If not +** defined, then do no error processing. +** YYNSTATE the combined number of states. +** YYNRULE the number of rules in the grammar +** YY_MAX_SHIFT Maximum value for shift actions +** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions +** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions +** YY_MIN_REDUCE Maximum value for reduce actions +** YY_ERROR_ACTION The yy_action[] code for syntax error +** YY_ACCEPT_ACTION The yy_action[] code for accept +** YY_NO_ACTION The yy_action[] code for no-op +*/ +#ifndef INTERFACE +# define INTERFACE 1 +#endif +/************* Begin control #defines *****************************************/ +#define YYCODETYPE unsigned char +#define YYNOCODE 252 +#define YYACTIONTYPE unsigned short int +#define YYWILDCARD 96 +#define sqlite3ParserTOKENTYPE Token +typedef union { + int yyinit; + sqlite3ParserTOKENTYPE yy0; + Expr* yy72; + TriggerStep* yy145; + ExprList* yy148; + SrcList* yy185; + ExprSpan yy190; + int yy194; + Select* yy243; + IdList* yy254; + With* yy285; + struct TrigEvent yy332; + struct LimitVal yy354; + struct {int value; int mask;} yy497; +} YYMINORTYPE; +#ifndef YYSTACKDEPTH +#define YYSTACKDEPTH 100 +#endif +#define sqlite3ParserARG_SDECL Parse *pParse; +#define sqlite3ParserARG_PDECL ,Parse *pParse +#define sqlite3ParserARG_FETCH Parse *pParse = yypParser->pParse +#define sqlite3ParserARG_STORE yypParser->pParse = pParse +#define YYFALLBACK 1 +#define YYNSTATE 456 +#define YYNRULE 332 +#define YY_MAX_SHIFT 455 +#define YY_MIN_SHIFTREDUCE 668 +#define YY_MAX_SHIFTREDUCE 999 +#define YY_MIN_REDUCE 1000 +#define YY_MAX_REDUCE 1331 +#define YY_ERROR_ACTION 1332 +#define YY_ACCEPT_ACTION 1333 +#define YY_NO_ACTION 1334 +/************* End control #defines *******************************************/ + +/* Define the yytestcase() macro to be a no-op if is not already defined +** otherwise. +** +** Applications can choose to define yytestcase() in the %include section +** to a macro that can assist in verifying code coverage. For production +** code the yytestcase() macro should be turned off. But it is useful +** for testing. +*/ +#ifndef yytestcase +# define yytestcase(X) +#endif + + +/* Next are the tables used to determine what action to take based on the +** current state and lookahead token. These tables are used to implement +** functions that take a state number and lookahead value and return an +** action integer. +** +** Suppose the action integer is N. Then the action is determined as +** follows +** +** 0 <= N <= YY_MAX_SHIFT Shift N. That is, push the lookahead +** token onto the stack and goto state N. +** +** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then +** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE. +** +** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE +** and YY_MAX_REDUCE +** +** N == YY_ERROR_ACTION A syntax error has occurred. +** +** N == YY_ACCEPT_ACTION The parser accepts its input. +** +** N == YY_NO_ACTION No such action. Denotes unused +** slots in the yy_action[] table. +** +** The action table is constructed as a single large table named yy_action[]. +** Given state S and lookahead X, the action is computed as either: +** +** (A) N = yy_action[ yy_shift_ofst[S] + X ] +** (B) N = yy_default[S] +** +** The (A) formula is preferred. The B formula is used instead if: +** (1) The yy_shift_ofst[S]+X value is out of range, or +** (2) yy_lookahead[yy_shift_ofst[S]+X] is not equal to X, or +** (3) yy_shift_ofst[S] equal YY_SHIFT_USE_DFLT. +** (Implementation note: YY_SHIFT_USE_DFLT is chosen so that +** YY_SHIFT_USE_DFLT+X will be out of range for all possible lookaheads X. +** Hence only tests (1) and (2) need to be evaluated.) +** +** The formulas above are for computing the action when the lookahead is +** a terminal symbol. If the lookahead is a non-terminal (as occurs after +** a reduce action) then the yy_reduce_ofst[] array is used in place of +** the yy_shift_ofst[] array and YY_REDUCE_USE_DFLT is used in place of +** YY_SHIFT_USE_DFLT. +** +** The following are the tables generated in this section: +** +** yy_action[] A single table containing all actions. +** yy_lookahead[] A table containing the lookahead for each entry in +** yy_action. Used to detect hash collisions. +** yy_shift_ofst[] For each state, the offset into yy_action for +** shifting terminals. +** yy_reduce_ofst[] For each state, the offset into yy_action for +** shifting non-terminals after a reduce. +** yy_default[] Default action for each state. +** +*********** Begin parsing tables **********************************************/ +#define YY_ACTTAB_COUNT (1567) +static const YYACTIONTYPE yy_action[] = { + /* 0 */ 325, 832, 351, 825, 5, 203, 203, 819, 99, 100, + /* 10 */ 90, 842, 842, 854, 857, 846, 846, 97, 97, 98, + /* 20 */ 98, 98, 98, 301, 96, 96, 96, 96, 95, 95, + /* 30 */ 94, 94, 94, 93, 351, 325, 977, 977, 824, 824, + /* 40 */ 826, 947, 354, 99, 100, 90, 842, 842, 854, 857, + /* 50 */ 846, 846, 97, 97, 98, 98, 98, 98, 338, 96, + /* 60 */ 96, 96, 96, 95, 95, 94, 94, 94, 93, 351, + /* 70 */ 95, 95, 94, 94, 94, 93, 351, 791, 977, 977, + /* 80 */ 325, 94, 94, 94, 93, 351, 792, 75, 99, 100, + /* 90 */ 90, 842, 842, 854, 857, 846, 846, 97, 97, 98, + /* 100 */ 98, 98, 98, 450, 96, 96, 96, 96, 95, 95, + /* 110 */ 94, 94, 94, 93, 351, 1333, 155, 155, 2, 325, + /* 120 */ 275, 146, 132, 52, 52, 93, 351, 99, 100, 90, + /* 130 */ 842, 842, 854, 857, 846, 846, 97, 97, 98, 98, + /* 140 */ 98, 98, 101, 96, 96, 96, 96, 95, 95, 94, + /* 150 */ 94, 94, 93, 351, 958, 958, 325, 268, 428, 413, + /* 160 */ 411, 61, 752, 752, 99, 100, 90, 842, 842, 854, + /* 170 */ 857, 846, 846, 97, 97, 98, 98, 98, 98, 60, + /* 180 */ 96, 96, 96, 96, 95, 95, 94, 94, 94, 93, + /* 190 */ 351, 325, 270, 329, 273, 277, 959, 960, 250, 99, + /* 200 */ 100, 90, 842, 842, 854, 857, 846, 846, 97, 97, + /* 210 */ 98, 98, 98, 98, 301, 96, 96, 96, 96, 95, + /* 220 */ 95, 94, 94, 94, 93, 351, 325, 938, 1326, 698, + /* 230 */ 706, 1326, 242, 412, 99, 100, 90, 842, 842, 854, + /* 240 */ 857, 846, 846, 97, 97, 98, 98, 98, 98, 347, + /* 250 */ 96, 96, 96, 96, 95, 95, 94, 94, 94, 93, + /* 260 */ 351, 325, 938, 1327, 384, 699, 1327, 381, 379, 99, + /* 270 */ 100, 90, 842, 842, 854, 857, 846, 846, 97, 97, + /* 280 */ 98, 98, 98, 98, 701, 96, 96, 96, 96, 95, + /* 290 */ 95, 94, 94, 94, 93, 351, 325, 92, 89, 178, + /* 300 */ 833, 936, 373, 700, 99, 100, 90, 842, 842, 854, + /* 310 */ 857, 846, 846, 97, 97, 98, 98, 98, 98, 375, + /* 320 */ 96, 96, 96, 96, 95, 95, 94, 94, 94, 93, + /* 330 */ 351, 325, 1276, 947, 354, 818, 936, 739, 739, 99, + /* 340 */ 100, 90, 842, 842, 854, 857, 846, 846, 97, 97, + /* 350 */ 98, 98, 98, 98, 230, 96, 96, 96, 96, 95, + /* 360 */ 95, 94, 94, 94, 93, 351, 325, 969, 227, 92, + /* 370 */ 89, 178, 373, 300, 99, 100, 90, 842, 842, 854, + /* 380 */ 857, 846, 846, 97, 97, 98, 98, 98, 98, 921, + /* 390 */ 96, 96, 96, 96, 95, 95, 94, 94, 94, 93, + /* 400 */ 351, 325, 449, 447, 447, 447, 147, 737, 737, 99, + /* 410 */ 100, 90, 842, 842, 854, 857, 846, 846, 97, 97, + /* 420 */ 98, 98, 98, 98, 296, 96, 96, 96, 96, 95, + /* 430 */ 95, 94, 94, 94, 93, 351, 325, 419, 231, 958, + /* 440 */ 958, 158, 25, 422, 99, 100, 90, 842, 842, 854, + /* 450 */ 857, 846, 846, 97, 97, 98, 98, 98, 98, 450, + /* 460 */ 96, 96, 96, 96, 95, 95, 94, 94, 94, 93, + /* 470 */ 351, 443, 224, 224, 420, 958, 958, 962, 325, 52, + /* 480 */ 52, 959, 960, 176, 415, 78, 99, 100, 90, 842, + /* 490 */ 842, 854, 857, 846, 846, 97, 97, 98, 98, 98, + /* 500 */ 98, 379, 96, 96, 96, 96, 95, 95, 94, 94, + /* 510 */ 94, 93, 351, 325, 428, 418, 298, 959, 960, 962, + /* 520 */ 81, 99, 88, 90, 842, 842, 854, 857, 846, 846, + /* 530 */ 97, 97, 98, 98, 98, 98, 717, 96, 96, 96, + /* 540 */ 96, 95, 95, 94, 94, 94, 93, 351, 325, 843, + /* 550 */ 843, 855, 858, 996, 318, 343, 379, 100, 90, 842, + /* 560 */ 842, 854, 857, 846, 846, 97, 97, 98, 98, 98, + /* 570 */ 98, 450, 96, 96, 96, 96, 95, 95, 94, 94, + /* 580 */ 94, 93, 351, 325, 350, 350, 350, 260, 377, 340, + /* 590 */ 929, 52, 52, 90, 842, 842, 854, 857, 846, 846, + /* 600 */ 97, 97, 98, 98, 98, 98, 361, 96, 96, 96, + /* 610 */ 96, 95, 95, 94, 94, 94, 93, 351, 86, 445, + /* 620 */ 847, 3, 1203, 361, 360, 378, 344, 813, 958, 958, + /* 630 */ 1300, 86, 445, 729, 3, 212, 169, 287, 405, 282, + /* 640 */ 404, 199, 232, 450, 300, 760, 83, 84, 280, 245, + /* 650 */ 262, 365, 251, 85, 352, 352, 92, 89, 178, 83, + /* 660 */ 84, 242, 412, 52, 52, 448, 85, 352, 352, 246, + /* 670 */ 959, 960, 194, 455, 670, 402, 399, 398, 448, 243, + /* 680 */ 221, 114, 434, 776, 361, 450, 397, 268, 747, 224, + /* 690 */ 224, 132, 132, 198, 832, 434, 452, 451, 428, 427, + /* 700 */ 819, 415, 734, 713, 132, 52, 52, 832, 268, 452, + /* 710 */ 451, 734, 194, 819, 363, 402, 399, 398, 450, 1271, + /* 720 */ 1271, 23, 958, 958, 86, 445, 397, 3, 228, 429, + /* 730 */ 895, 824, 824, 826, 827, 19, 203, 720, 52, 52, + /* 740 */ 428, 408, 439, 249, 824, 824, 826, 827, 19, 229, + /* 750 */ 403, 153, 83, 84, 761, 177, 241, 450, 721, 85, + /* 760 */ 352, 352, 120, 157, 959, 960, 58, 977, 409, 355, + /* 770 */ 330, 448, 268, 428, 430, 320, 790, 32, 32, 86, + /* 780 */ 445, 776, 3, 341, 98, 98, 98, 98, 434, 96, + /* 790 */ 96, 96, 96, 95, 95, 94, 94, 94, 93, 351, + /* 800 */ 832, 120, 452, 451, 813, 887, 819, 83, 84, 977, + /* 810 */ 813, 132, 410, 920, 85, 352, 352, 132, 407, 789, + /* 820 */ 958, 958, 92, 89, 178, 917, 448, 262, 370, 261, + /* 830 */ 82, 914, 80, 262, 370, 261, 776, 824, 824, 826, + /* 840 */ 827, 19, 934, 434, 96, 96, 96, 96, 95, 95, + /* 850 */ 94, 94, 94, 93, 351, 832, 74, 452, 451, 958, + /* 860 */ 958, 819, 959, 960, 120, 92, 89, 178, 945, 2, + /* 870 */ 918, 965, 268, 1, 976, 76, 445, 762, 3, 708, + /* 880 */ 901, 901, 387, 958, 958, 757, 919, 371, 740, 778, + /* 890 */ 756, 257, 824, 824, 826, 827, 19, 417, 741, 450, + /* 900 */ 24, 959, 960, 83, 84, 369, 958, 958, 177, 226, + /* 910 */ 85, 352, 352, 885, 315, 314, 313, 215, 311, 10, + /* 920 */ 10, 683, 448, 349, 348, 959, 960, 909, 777, 157, + /* 930 */ 120, 958, 958, 337, 776, 416, 711, 310, 450, 434, + /* 940 */ 450, 321, 450, 791, 103, 200, 175, 450, 959, 960, + /* 950 */ 908, 832, 792, 452, 451, 9, 9, 819, 10, 10, + /* 960 */ 52, 52, 51, 51, 180, 716, 248, 10, 10, 171, + /* 970 */ 170, 167, 339, 959, 960, 247, 984, 702, 702, 450, + /* 980 */ 715, 233, 686, 982, 889, 983, 182, 914, 824, 824, + /* 990 */ 826, 827, 19, 183, 256, 423, 132, 181, 394, 10, + /* 1000 */ 10, 889, 891, 749, 958, 958, 917, 268, 985, 198, + /* 1010 */ 985, 349, 348, 425, 415, 299, 817, 832, 326, 825, + /* 1020 */ 120, 332, 133, 819, 268, 98, 98, 98, 98, 91, + /* 1030 */ 96, 96, 96, 96, 95, 95, 94, 94, 94, 93, + /* 1040 */ 351, 157, 810, 371, 382, 359, 959, 960, 358, 268, + /* 1050 */ 450, 918, 368, 324, 824, 824, 826, 450, 709, 450, + /* 1060 */ 264, 380, 889, 450, 877, 746, 253, 919, 255, 433, + /* 1070 */ 36, 36, 234, 450, 234, 120, 269, 37, 37, 12, + /* 1080 */ 12, 334, 272, 27, 27, 450, 330, 118, 450, 162, + /* 1090 */ 742, 280, 450, 38, 38, 450, 985, 356, 985, 450, + /* 1100 */ 709, 1210, 450, 132, 450, 39, 39, 450, 40, 40, + /* 1110 */ 450, 362, 41, 41, 450, 42, 42, 450, 254, 28, + /* 1120 */ 28, 450, 29, 29, 31, 31, 450, 43, 43, 450, + /* 1130 */ 44, 44, 450, 714, 45, 45, 450, 11, 11, 767, + /* 1140 */ 450, 46, 46, 450, 268, 450, 105, 105, 450, 47, + /* 1150 */ 47, 450, 48, 48, 450, 237, 33, 33, 450, 172, + /* 1160 */ 49, 49, 450, 50, 50, 34, 34, 274, 122, 122, + /* 1170 */ 450, 123, 123, 450, 124, 124, 450, 898, 56, 56, + /* 1180 */ 450, 897, 35, 35, 450, 267, 450, 817, 450, 817, + /* 1190 */ 106, 106, 450, 53, 53, 385, 107, 107, 450, 817, + /* 1200 */ 108, 108, 817, 450, 104, 104, 121, 121, 119, 119, + /* 1210 */ 450, 117, 112, 112, 450, 276, 450, 225, 111, 111, + /* 1220 */ 450, 730, 450, 109, 109, 450, 673, 674, 675, 912, + /* 1230 */ 110, 110, 317, 998, 55, 55, 57, 57, 692, 331, + /* 1240 */ 54, 54, 26, 26, 696, 30, 30, 317, 937, 197, + /* 1250 */ 196, 195, 335, 281, 336, 446, 331, 745, 689, 436, + /* 1260 */ 440, 444, 120, 72, 386, 223, 175, 345, 757, 933, + /* 1270 */ 20, 286, 319, 756, 815, 372, 374, 202, 202, 202, + /* 1280 */ 263, 395, 285, 74, 208, 21, 696, 719, 718, 884, + /* 1290 */ 120, 120, 120, 120, 120, 754, 278, 828, 77, 74, + /* 1300 */ 726, 727, 785, 783, 880, 202, 999, 208, 894, 893, + /* 1310 */ 894, 893, 694, 816, 763, 116, 774, 1290, 431, 432, + /* 1320 */ 302, 999, 390, 303, 823, 697, 691, 680, 159, 289, + /* 1330 */ 679, 884, 681, 952, 291, 218, 293, 7, 316, 828, + /* 1340 */ 173, 805, 259, 364, 252, 911, 376, 713, 295, 435, + /* 1350 */ 308, 168, 955, 993, 135, 400, 990, 284, 882, 881, + /* 1360 */ 205, 928, 926, 59, 333, 62, 144, 156, 130, 72, + /* 1370 */ 802, 366, 367, 393, 137, 185, 189, 160, 139, 383, + /* 1380 */ 67, 896, 140, 141, 142, 148, 389, 812, 775, 266, + /* 1390 */ 219, 190, 154, 391, 913, 876, 271, 406, 191, 322, + /* 1400 */ 682, 733, 192, 342, 732, 724, 731, 711, 723, 421, + /* 1410 */ 705, 71, 323, 6, 204, 771, 288, 79, 297, 346, + /* 1420 */ 772, 704, 290, 283, 703, 770, 292, 294, 967, 239, + /* 1430 */ 769, 102, 862, 438, 426, 240, 424, 442, 73, 213, + /* 1440 */ 688, 238, 22, 453, 953, 214, 217, 216, 454, 677, + /* 1450 */ 676, 671, 753, 125, 115, 235, 126, 669, 353, 166, + /* 1460 */ 127, 244, 179, 357, 306, 304, 305, 307, 113, 892, + /* 1470 */ 327, 890, 811, 328, 134, 128, 136, 138, 743, 258, + /* 1480 */ 907, 184, 143, 129, 910, 186, 63, 64, 145, 187, + /* 1490 */ 906, 65, 8, 66, 13, 188, 202, 899, 265, 149, + /* 1500 */ 987, 388, 150, 685, 161, 392, 285, 193, 279, 396, + /* 1510 */ 151, 401, 68, 14, 15, 722, 69, 236, 831, 131, + /* 1520 */ 830, 860, 70, 751, 16, 414, 755, 4, 174, 220, + /* 1530 */ 222, 784, 201, 152, 779, 77, 74, 17, 18, 875, + /* 1540 */ 861, 859, 916, 864, 915, 207, 206, 942, 163, 437, + /* 1550 */ 948, 943, 164, 209, 1002, 441, 863, 165, 210, 829, + /* 1560 */ 695, 87, 312, 211, 1292, 1291, 309, +}; +static const YYCODETYPE yy_lookahead[] = { + /* 0 */ 19, 95, 53, 97, 22, 24, 24, 101, 27, 28, + /* 10 */ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + /* 20 */ 39, 40, 41, 152, 43, 44, 45, 46, 47, 48, + /* 30 */ 49, 50, 51, 52, 53, 19, 55, 55, 132, 133, + /* 40 */ 134, 1, 2, 27, 28, 29, 30, 31, 32, 33, + /* 50 */ 34, 35, 36, 37, 38, 39, 40, 41, 187, 43, + /* 60 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + /* 70 */ 47, 48, 49, 50, 51, 52, 53, 61, 97, 97, + /* 80 */ 19, 49, 50, 51, 52, 53, 70, 26, 27, 28, + /* 90 */ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + /* 100 */ 39, 40, 41, 152, 43, 44, 45, 46, 47, 48, + /* 110 */ 49, 50, 51, 52, 53, 144, 145, 146, 147, 19, + /* 120 */ 16, 22, 92, 172, 173, 52, 53, 27, 28, 29, + /* 130 */ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + /* 140 */ 40, 41, 81, 43, 44, 45, 46, 47, 48, 49, + /* 150 */ 50, 51, 52, 53, 55, 56, 19, 152, 207, 208, + /* 160 */ 115, 24, 117, 118, 27, 28, 29, 30, 31, 32, + /* 170 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 79, + /* 180 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 190 */ 53, 19, 88, 157, 90, 23, 97, 98, 193, 27, + /* 200 */ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + /* 210 */ 38, 39, 40, 41, 152, 43, 44, 45, 46, 47, + /* 220 */ 48, 49, 50, 51, 52, 53, 19, 22, 23, 172, + /* 230 */ 23, 26, 119, 120, 27, 28, 29, 30, 31, 32, + /* 240 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 187, + /* 250 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 260 */ 53, 19, 22, 23, 228, 23, 26, 231, 152, 27, + /* 270 */ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + /* 280 */ 38, 39, 40, 41, 172, 43, 44, 45, 46, 47, + /* 290 */ 48, 49, 50, 51, 52, 53, 19, 221, 222, 223, + /* 300 */ 23, 96, 152, 172, 27, 28, 29, 30, 31, 32, + /* 310 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 152, + /* 320 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 330 */ 53, 19, 0, 1, 2, 23, 96, 190, 191, 27, + /* 340 */ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + /* 350 */ 38, 39, 40, 41, 238, 43, 44, 45, 46, 47, + /* 360 */ 48, 49, 50, 51, 52, 53, 19, 185, 218, 221, + /* 370 */ 222, 223, 152, 152, 27, 28, 29, 30, 31, 32, + /* 380 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 241, + /* 390 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 400 */ 53, 19, 152, 168, 169, 170, 22, 190, 191, 27, + /* 410 */ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + /* 420 */ 38, 39, 40, 41, 152, 43, 44, 45, 46, 47, + /* 430 */ 48, 49, 50, 51, 52, 53, 19, 19, 218, 55, + /* 440 */ 56, 24, 22, 152, 27, 28, 29, 30, 31, 32, + /* 450 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 152, + /* 460 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 470 */ 53, 250, 194, 195, 56, 55, 56, 55, 19, 172, + /* 480 */ 173, 97, 98, 152, 206, 138, 27, 28, 29, 30, + /* 490 */ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + /* 500 */ 41, 152, 43, 44, 45, 46, 47, 48, 49, 50, + /* 510 */ 51, 52, 53, 19, 207, 208, 152, 97, 98, 97, + /* 520 */ 138, 27, 28, 29, 30, 31, 32, 33, 34, 35, + /* 530 */ 36, 37, 38, 39, 40, 41, 181, 43, 44, 45, + /* 540 */ 46, 47, 48, 49, 50, 51, 52, 53, 19, 30, + /* 550 */ 31, 32, 33, 247, 248, 19, 152, 28, 29, 30, + /* 560 */ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + /* 570 */ 41, 152, 43, 44, 45, 46, 47, 48, 49, 50, + /* 580 */ 51, 52, 53, 19, 168, 169, 170, 238, 19, 53, + /* 590 */ 152, 172, 173, 29, 30, 31, 32, 33, 34, 35, + /* 600 */ 36, 37, 38, 39, 40, 41, 152, 43, 44, 45, + /* 610 */ 46, 47, 48, 49, 50, 51, 52, 53, 19, 20, + /* 620 */ 101, 22, 23, 169, 170, 56, 207, 85, 55, 56, + /* 630 */ 23, 19, 20, 26, 22, 99, 100, 101, 102, 103, + /* 640 */ 104, 105, 238, 152, 152, 210, 47, 48, 112, 152, + /* 650 */ 108, 109, 110, 54, 55, 56, 221, 222, 223, 47, + /* 660 */ 48, 119, 120, 172, 173, 66, 54, 55, 56, 152, + /* 670 */ 97, 98, 99, 148, 149, 102, 103, 104, 66, 154, + /* 680 */ 23, 156, 83, 26, 230, 152, 113, 152, 163, 194, + /* 690 */ 195, 92, 92, 30, 95, 83, 97, 98, 207, 208, + /* 700 */ 101, 206, 179, 180, 92, 172, 173, 95, 152, 97, + /* 710 */ 98, 188, 99, 101, 219, 102, 103, 104, 152, 119, + /* 720 */ 120, 196, 55, 56, 19, 20, 113, 22, 193, 163, + /* 730 */ 11, 132, 133, 134, 135, 136, 24, 65, 172, 173, + /* 740 */ 207, 208, 250, 152, 132, 133, 134, 135, 136, 193, + /* 750 */ 78, 84, 47, 48, 49, 98, 199, 152, 86, 54, + /* 760 */ 55, 56, 196, 152, 97, 98, 209, 55, 163, 244, + /* 770 */ 107, 66, 152, 207, 208, 164, 175, 172, 173, 19, + /* 780 */ 20, 124, 22, 111, 38, 39, 40, 41, 83, 43, + /* 790 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + /* 800 */ 95, 196, 97, 98, 85, 152, 101, 47, 48, 97, + /* 810 */ 85, 92, 207, 193, 54, 55, 56, 92, 49, 175, + /* 820 */ 55, 56, 221, 222, 223, 12, 66, 108, 109, 110, + /* 830 */ 137, 163, 139, 108, 109, 110, 26, 132, 133, 134, + /* 840 */ 135, 136, 152, 83, 43, 44, 45, 46, 47, 48, + /* 850 */ 49, 50, 51, 52, 53, 95, 26, 97, 98, 55, + /* 860 */ 56, 101, 97, 98, 196, 221, 222, 223, 146, 147, + /* 870 */ 57, 171, 152, 22, 26, 19, 20, 49, 22, 179, + /* 880 */ 108, 109, 110, 55, 56, 116, 73, 219, 75, 124, + /* 890 */ 121, 152, 132, 133, 134, 135, 136, 163, 85, 152, + /* 900 */ 232, 97, 98, 47, 48, 237, 55, 56, 98, 5, + /* 910 */ 54, 55, 56, 193, 10, 11, 12, 13, 14, 172, + /* 920 */ 173, 17, 66, 47, 48, 97, 98, 152, 124, 152, + /* 930 */ 196, 55, 56, 186, 124, 152, 106, 160, 152, 83, + /* 940 */ 152, 164, 152, 61, 22, 211, 212, 152, 97, 98, + /* 950 */ 152, 95, 70, 97, 98, 172, 173, 101, 172, 173, + /* 960 */ 172, 173, 172, 173, 60, 181, 62, 172, 173, 47, + /* 970 */ 48, 123, 186, 97, 98, 71, 100, 55, 56, 152, + /* 980 */ 181, 186, 21, 107, 152, 109, 82, 163, 132, 133, + /* 990 */ 134, 135, 136, 89, 16, 207, 92, 93, 19, 172, + /* 1000 */ 173, 169, 170, 195, 55, 56, 12, 152, 132, 30, + /* 1010 */ 134, 47, 48, 186, 206, 225, 152, 95, 114, 97, + /* 1020 */ 196, 245, 246, 101, 152, 38, 39, 40, 41, 42, + /* 1030 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 1040 */ 53, 152, 163, 219, 152, 141, 97, 98, 193, 152, + /* 1050 */ 152, 57, 91, 164, 132, 133, 134, 152, 55, 152, + /* 1060 */ 152, 237, 230, 152, 103, 193, 88, 73, 90, 75, + /* 1070 */ 172, 173, 183, 152, 185, 196, 152, 172, 173, 172, + /* 1080 */ 173, 217, 152, 172, 173, 152, 107, 22, 152, 24, + /* 1090 */ 193, 112, 152, 172, 173, 152, 132, 242, 134, 152, + /* 1100 */ 97, 140, 152, 92, 152, 172, 173, 152, 172, 173, + /* 1110 */ 152, 100, 172, 173, 152, 172, 173, 152, 140, 172, + /* 1120 */ 173, 152, 172, 173, 172, 173, 152, 172, 173, 152, + /* 1130 */ 172, 173, 152, 152, 172, 173, 152, 172, 173, 213, + /* 1140 */ 152, 172, 173, 152, 152, 152, 172, 173, 152, 172, + /* 1150 */ 173, 152, 172, 173, 152, 210, 172, 173, 152, 26, + /* 1160 */ 172, 173, 152, 172, 173, 172, 173, 152, 172, 173, + /* 1170 */ 152, 172, 173, 152, 172, 173, 152, 59, 172, 173, + /* 1180 */ 152, 63, 172, 173, 152, 193, 152, 152, 152, 152, + /* 1190 */ 172, 173, 152, 172, 173, 77, 172, 173, 152, 152, + /* 1200 */ 172, 173, 152, 152, 172, 173, 172, 173, 172, 173, + /* 1210 */ 152, 22, 172, 173, 152, 152, 152, 22, 172, 173, + /* 1220 */ 152, 152, 152, 172, 173, 152, 7, 8, 9, 163, + /* 1230 */ 172, 173, 22, 23, 172, 173, 172, 173, 166, 167, + /* 1240 */ 172, 173, 172, 173, 55, 172, 173, 22, 23, 108, + /* 1250 */ 109, 110, 217, 152, 217, 166, 167, 163, 163, 163, + /* 1260 */ 163, 163, 196, 130, 217, 211, 212, 217, 116, 23, + /* 1270 */ 22, 101, 26, 121, 23, 23, 23, 26, 26, 26, + /* 1280 */ 23, 23, 112, 26, 26, 37, 97, 100, 101, 55, + /* 1290 */ 196, 196, 196, 196, 196, 23, 23, 55, 26, 26, + /* 1300 */ 7, 8, 23, 152, 23, 26, 96, 26, 132, 132, + /* 1310 */ 134, 134, 23, 152, 152, 26, 152, 122, 152, 191, + /* 1320 */ 152, 96, 234, 152, 152, 152, 152, 152, 197, 210, + /* 1330 */ 152, 97, 152, 152, 210, 233, 210, 198, 150, 97, + /* 1340 */ 184, 201, 239, 214, 214, 201, 239, 180, 214, 227, + /* 1350 */ 200, 198, 155, 67, 243, 176, 69, 175, 175, 175, + /* 1360 */ 122, 159, 159, 240, 159, 240, 22, 220, 27, 130, + /* 1370 */ 201, 18, 159, 18, 189, 158, 158, 220, 192, 159, + /* 1380 */ 137, 236, 192, 192, 192, 189, 74, 189, 159, 235, + /* 1390 */ 159, 158, 22, 177, 201, 201, 159, 107, 158, 177, + /* 1400 */ 159, 174, 158, 76, 174, 182, 174, 106, 182, 125, + /* 1410 */ 174, 107, 177, 22, 159, 216, 215, 137, 159, 53, + /* 1420 */ 216, 176, 215, 174, 174, 216, 215, 215, 174, 229, + /* 1430 */ 216, 129, 224, 177, 126, 229, 127, 177, 128, 25, + /* 1440 */ 162, 226, 26, 161, 13, 153, 6, 153, 151, 151, + /* 1450 */ 151, 151, 205, 165, 178, 178, 165, 4, 3, 22, + /* 1460 */ 165, 142, 15, 94, 202, 204, 203, 201, 16, 23, + /* 1470 */ 249, 23, 120, 249, 246, 111, 131, 123, 20, 16, + /* 1480 */ 1, 125, 123, 111, 56, 64, 37, 37, 131, 122, + /* 1490 */ 1, 37, 5, 37, 22, 107, 26, 80, 140, 80, + /* 1500 */ 87, 72, 107, 20, 24, 19, 112, 105, 23, 79, + /* 1510 */ 22, 79, 22, 22, 22, 58, 22, 79, 23, 68, + /* 1520 */ 23, 23, 26, 116, 22, 26, 23, 22, 122, 23, + /* 1530 */ 23, 56, 64, 22, 124, 26, 26, 64, 64, 23, + /* 1540 */ 23, 23, 23, 11, 23, 22, 26, 23, 22, 24, + /* 1550 */ 1, 23, 22, 26, 251, 24, 23, 22, 122, 23, + /* 1560 */ 23, 22, 15, 122, 122, 122, 23, +}; +#define YY_SHIFT_USE_DFLT (1567) +#define YY_SHIFT_COUNT (455) +#define YY_SHIFT_MIN (-94) +#define YY_SHIFT_MAX (1549) +static const short yy_shift_ofst[] = { + /* 0 */ 40, 599, 904, 612, 760, 760, 760, 760, 725, -19, + /* 10 */ 16, 16, 100, 760, 760, 760, 760, 760, 760, 760, + /* 20 */ 876, 876, 573, 542, 719, 600, 61, 137, 172, 207, + /* 30 */ 242, 277, 312, 347, 382, 417, 459, 459, 459, 459, + /* 40 */ 459, 459, 459, 459, 459, 459, 459, 459, 459, 459, + /* 50 */ 459, 459, 459, 494, 459, 529, 564, 564, 705, 760, + /* 60 */ 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, + /* 70 */ 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, + /* 80 */ 760, 760, 760, 760, 760, 760, 760, 760, 760, 760, + /* 90 */ 856, 760, 760, 760, 760, 760, 760, 760, 760, 760, + /* 100 */ 760, 760, 760, 760, 987, 746, 746, 746, 746, 746, + /* 110 */ 801, 23, 32, 949, 961, 979, 964, 964, 949, 73, + /* 120 */ 113, -51, 1567, 1567, 1567, 536, 536, 536, 99, 99, + /* 130 */ 813, 813, 667, 205, 240, 949, 949, 949, 949, 949, + /* 140 */ 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, + /* 150 */ 949, 949, 949, 949, 949, 332, 1011, 422, 422, 113, + /* 160 */ 30, 30, 30, 30, 30, 30, 1567, 1567, 1567, 922, + /* 170 */ -94, -94, 384, 613, 828, 420, 765, 804, 851, 949, + /* 180 */ 949, 949, 949, 949, 949, 949, 949, 949, 949, 949, + /* 190 */ 949, 949, 949, 949, 949, 672, 672, 672, 949, 949, + /* 200 */ 657, 949, 949, 949, -18, 949, 949, 994, 949, 949, + /* 210 */ 949, 949, 949, 949, 949, 949, 949, 949, 772, 1118, + /* 220 */ 712, 712, 712, 810, 45, 769, 1219, 1133, 418, 418, + /* 230 */ 569, 1133, 569, 830, 607, 663, 882, 418, 693, 882, + /* 240 */ 882, 848, 1152, 1065, 1286, 1238, 1238, 1287, 1287, 1238, + /* 250 */ 1344, 1341, 1239, 1353, 1353, 1353, 1353, 1238, 1355, 1239, + /* 260 */ 1344, 1341, 1341, 1239, 1238, 1355, 1243, 1312, 1238, 1238, + /* 270 */ 1355, 1370, 1238, 1355, 1238, 1355, 1370, 1290, 1290, 1290, + /* 280 */ 1327, 1370, 1290, 1301, 1290, 1327, 1290, 1290, 1284, 1304, + /* 290 */ 1284, 1304, 1284, 1304, 1284, 1304, 1238, 1391, 1238, 1280, + /* 300 */ 1370, 1366, 1366, 1370, 1302, 1308, 1310, 1309, 1239, 1414, + /* 310 */ 1416, 1431, 1431, 1440, 1440, 1440, 1440, 1567, 1567, 1567, + /* 320 */ 1567, 1567, 1567, 1567, 1567, 519, 978, 1210, 1225, 104, + /* 330 */ 1141, 1189, 1246, 1248, 1251, 1252, 1253, 1257, 1258, 1273, + /* 340 */ 1003, 1187, 1293, 1170, 1272, 1279, 1234, 1281, 1176, 1177, + /* 350 */ 1289, 1242, 1195, 1453, 1455, 1437, 1319, 1447, 1369, 1452, + /* 360 */ 1446, 1448, 1352, 1345, 1364, 1354, 1458, 1356, 1463, 1479, + /* 370 */ 1359, 1357, 1449, 1450, 1454, 1456, 1372, 1428, 1421, 1367, + /* 380 */ 1489, 1487, 1472, 1388, 1358, 1417, 1470, 1419, 1413, 1429, + /* 390 */ 1395, 1480, 1483, 1486, 1394, 1402, 1488, 1430, 1490, 1491, + /* 400 */ 1485, 1492, 1432, 1457, 1494, 1438, 1451, 1495, 1497, 1498, + /* 410 */ 1496, 1407, 1502, 1503, 1505, 1499, 1406, 1506, 1507, 1475, + /* 420 */ 1468, 1511, 1410, 1509, 1473, 1510, 1474, 1516, 1509, 1517, + /* 430 */ 1518, 1519, 1520, 1521, 1523, 1532, 1524, 1526, 1525, 1527, + /* 440 */ 1528, 1530, 1531, 1527, 1533, 1535, 1536, 1537, 1539, 1436, + /* 450 */ 1441, 1442, 1443, 1543, 1547, 1549, +}; +#define YY_REDUCE_USE_DFLT (-130) +#define YY_REDUCE_COUNT (324) +#define YY_REDUCE_MIN (-129) +#define YY_REDUCE_MAX (1300) +static const short yy_reduce_ofst[] = { + /* 0 */ -29, 566, 525, 605, -49, 307, 491, 533, 668, 435, + /* 10 */ 601, 644, 148, 747, 786, 795, 419, 788, 827, 790, + /* 20 */ 454, 832, 889, 495, 824, 734, 76, 76, 76, 76, + /* 30 */ 76, 76, 76, 76, 76, 76, 76, 76, 76, 76, + /* 40 */ 76, 76, 76, 76, 76, 76, 76, 76, 76, 76, + /* 50 */ 76, 76, 76, 76, 76, 76, 76, 76, 783, 898, + /* 60 */ 905, 907, 911, 921, 933, 936, 940, 943, 947, 950, + /* 70 */ 952, 955, 958, 962, 965, 969, 974, 977, 980, 984, + /* 80 */ 988, 991, 993, 996, 999, 1002, 1006, 1010, 1018, 1021, + /* 90 */ 1024, 1028, 1032, 1034, 1036, 1040, 1046, 1051, 1058, 1062, + /* 100 */ 1064, 1068, 1070, 1073, 76, 76, 76, 76, 76, 76, + /* 110 */ 76, 76, 76, 855, 36, 523, 235, 416, 777, 76, + /* 120 */ 278, 76, 76, 76, 76, 700, 700, 700, 150, 220, + /* 130 */ 147, 217, 221, 306, 306, 611, 5, 535, 556, 620, + /* 140 */ 720, 872, 897, 116, 864, 349, 1035, 1037, 404, 1047, + /* 150 */ 992, -129, 1050, 492, 62, 722, 879, 1072, 1089, 808, + /* 160 */ 1066, 1094, 1095, 1096, 1097, 1098, 776, 1054, 557, 57, + /* 170 */ 112, 131, 167, 182, 250, 272, 291, 331, 364, 438, + /* 180 */ 497, 517, 591, 653, 690, 739, 775, 798, 892, 908, + /* 190 */ 924, 930, 1015, 1063, 1069, 355, 784, 799, 981, 1101, + /* 200 */ 926, 1151, 1161, 1162, 945, 1164, 1166, 1128, 1168, 1171, + /* 210 */ 1172, 250, 1173, 1174, 1175, 1178, 1180, 1181, 1088, 1102, + /* 220 */ 1119, 1124, 1126, 926, 1131, 1139, 1188, 1140, 1129, 1130, + /* 230 */ 1103, 1144, 1107, 1179, 1156, 1167, 1182, 1134, 1122, 1183, + /* 240 */ 1184, 1150, 1153, 1197, 1111, 1202, 1203, 1123, 1125, 1205, + /* 250 */ 1147, 1185, 1169, 1186, 1190, 1191, 1192, 1213, 1217, 1193, + /* 260 */ 1157, 1196, 1198, 1194, 1220, 1218, 1145, 1154, 1229, 1231, + /* 270 */ 1233, 1216, 1237, 1240, 1241, 1244, 1222, 1227, 1230, 1232, + /* 280 */ 1223, 1235, 1236, 1245, 1249, 1226, 1250, 1254, 1199, 1201, + /* 290 */ 1204, 1207, 1209, 1211, 1214, 1212, 1255, 1208, 1259, 1215, + /* 300 */ 1256, 1200, 1206, 1260, 1247, 1261, 1263, 1262, 1266, 1278, + /* 310 */ 1282, 1292, 1294, 1297, 1298, 1299, 1300, 1221, 1224, 1228, + /* 320 */ 1288, 1291, 1276, 1277, 1295, +}; +static const YYACTIONTYPE yy_default[] = { + /* 0 */ 1281, 1271, 1271, 1271, 1203, 1203, 1203, 1203, 1271, 1096, + /* 10 */ 1125, 1125, 1255, 1332, 1332, 1332, 1332, 1332, 1332, 1202, + /* 20 */ 1332, 1332, 1332, 1332, 1271, 1100, 1131, 1332, 1332, 1332, + /* 30 */ 1332, 1204, 1205, 1332, 1332, 1332, 1254, 1256, 1141, 1140, + /* 40 */ 1139, 1138, 1237, 1112, 1136, 1129, 1133, 1204, 1198, 1199, + /* 50 */ 1197, 1201, 1205, 1332, 1132, 1167, 1182, 1166, 1332, 1332, + /* 60 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, + /* 70 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, + /* 80 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, + /* 90 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, + /* 100 */ 1332, 1332, 1332, 1332, 1176, 1181, 1188, 1180, 1177, 1169, + /* 110 */ 1168, 1170, 1171, 1332, 1019, 1067, 1332, 1332, 1332, 1172, + /* 120 */ 1332, 1173, 1185, 1184, 1183, 1262, 1289, 1288, 1332, 1332, + /* 130 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, + /* 140 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, + /* 150 */ 1332, 1332, 1332, 1332, 1332, 1281, 1271, 1025, 1025, 1332, + /* 160 */ 1271, 1271, 1271, 1271, 1271, 1271, 1267, 1100, 1091, 1332, + /* 170 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, + /* 180 */ 1259, 1257, 1332, 1218, 1332, 1332, 1332, 1332, 1332, 1332, + /* 190 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, + /* 200 */ 1332, 1332, 1332, 1332, 1096, 1332, 1332, 1332, 1332, 1332, + /* 210 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1283, 1332, 1232, + /* 220 */ 1096, 1096, 1096, 1098, 1080, 1090, 1004, 1135, 1114, 1114, + /* 230 */ 1321, 1135, 1321, 1042, 1303, 1039, 1125, 1114, 1200, 1125, + /* 240 */ 1125, 1097, 1090, 1332, 1324, 1105, 1105, 1323, 1323, 1105, + /* 250 */ 1146, 1070, 1135, 1076, 1076, 1076, 1076, 1105, 1016, 1135, + /* 260 */ 1146, 1070, 1070, 1135, 1105, 1016, 1236, 1318, 1105, 1105, + /* 270 */ 1016, 1211, 1105, 1016, 1105, 1016, 1211, 1068, 1068, 1068, + /* 280 */ 1057, 1211, 1068, 1042, 1068, 1057, 1068, 1068, 1118, 1113, + /* 290 */ 1118, 1113, 1118, 1113, 1118, 1113, 1105, 1206, 1105, 1332, + /* 300 */ 1211, 1215, 1215, 1211, 1130, 1119, 1128, 1126, 1135, 1022, + /* 310 */ 1060, 1286, 1286, 1282, 1282, 1282, 1282, 1329, 1329, 1267, + /* 320 */ 1298, 1298, 1044, 1044, 1298, 1332, 1332, 1332, 1332, 1332, + /* 330 */ 1332, 1293, 1332, 1220, 1332, 1332, 1332, 1332, 1332, 1332, + /* 340 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, + /* 350 */ 1332, 1332, 1152, 1332, 1000, 1264, 1332, 1332, 1263, 1332, + /* 360 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, + /* 370 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1320, + /* 380 */ 1332, 1332, 1332, 1332, 1332, 1332, 1235, 1234, 1332, 1332, + /* 390 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, + /* 400 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, + /* 410 */ 1332, 1082, 1332, 1332, 1332, 1307, 1332, 1332, 1332, 1332, + /* 420 */ 1332, 1332, 1332, 1127, 1332, 1120, 1332, 1332, 1311, 1332, + /* 430 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1273, + /* 440 */ 1332, 1332, 1332, 1272, 1332, 1332, 1332, 1332, 1332, 1154, + /* 450 */ 1332, 1153, 1157, 1332, 1010, 1332, +}; +/********** End of lemon-generated parsing tables *****************************/ + +/* The next table maps tokens (terminal symbols) into fallback tokens. +** If a construct like the following: +** +** %fallback ID X Y Z. +** +** appears in the grammar, then ID becomes a fallback token for X, Y, +** and Z. Whenever one of the tokens X, Y, or Z is input to the parser +** but it does not parse, the type of the token is changed to ID and +** the parse is retried before an error is thrown. +** +** This feature can be used, for example, to cause some keywords in a language +** to revert to identifiers if they keyword does not apply in the context where +** it appears. +*/ +#ifdef YYFALLBACK +static const YYCODETYPE yyFallback[] = { + 0, /* $ => nothing */ + 0, /* SEMI => nothing */ + 55, /* EXPLAIN => ID */ + 55, /* QUERY => ID */ + 55, /* PLAN => ID */ + 55, /* BEGIN => ID */ + 0, /* TRANSACTION => nothing */ + 55, /* DEFERRED => ID */ + 55, /* IMMEDIATE => ID */ + 55, /* EXCLUSIVE => ID */ + 0, /* COMMIT => nothing */ + 55, /* END => ID */ + 55, /* ROLLBACK => ID */ + 55, /* SAVEPOINT => ID */ + 55, /* RELEASE => ID */ + 0, /* TO => nothing */ + 0, /* TABLE => nothing */ + 0, /* CREATE => nothing */ + 55, /* IF => ID */ + 0, /* NOT => nothing */ + 0, /* EXISTS => nothing */ + 55, /* TEMP => ID */ + 0, /* LP => nothing */ + 0, /* RP => nothing */ + 0, /* AS => nothing */ + 55, /* WITHOUT => ID */ + 0, /* COMMA => nothing */ + 0, /* OR => nothing */ + 0, /* AND => nothing */ + 0, /* IS => nothing */ + 55, /* MATCH => ID */ + 55, /* LIKE_KW => ID */ + 0, /* BETWEEN => nothing */ + 0, /* IN => nothing */ + 0, /* ISNULL => nothing */ + 0, /* NOTNULL => nothing */ + 0, /* NE => nothing */ + 0, /* EQ => nothing */ + 0, /* GT => nothing */ + 0, /* LE => nothing */ + 0, /* LT => nothing */ + 0, /* GE => nothing */ + 0, /* ESCAPE => nothing */ + 0, /* BITAND => nothing */ + 0, /* BITOR => nothing */ + 0, /* LSHIFT => nothing */ + 0, /* RSHIFT => nothing */ + 0, /* PLUS => nothing */ + 0, /* MINUS => nothing */ + 0, /* STAR => nothing */ + 0, /* SLASH => nothing */ + 0, /* REM => nothing */ + 0, /* CONCAT => nothing */ + 0, /* COLLATE => nothing */ + 0, /* BITNOT => nothing */ + 0, /* ID => nothing */ + 0, /* INDEXED => nothing */ + 55, /* ABORT => ID */ + 55, /* ACTION => ID */ + 55, /* AFTER => ID */ + 55, /* ANALYZE => ID */ + 55, /* ASC => ID */ + 55, /* ATTACH => ID */ + 55, /* BEFORE => ID */ + 55, /* BY => ID */ + 55, /* CASCADE => ID */ + 55, /* CAST => ID */ + 55, /* COLUMNKW => ID */ + 55, /* CONFLICT => ID */ + 55, /* DATABASE => ID */ + 55, /* DESC => ID */ + 55, /* DETACH => ID */ + 55, /* EACH => ID */ + 55, /* FAIL => ID */ + 55, /* FOR => ID */ + 55, /* IGNORE => ID */ + 55, /* INITIALLY => ID */ + 55, /* INSTEAD => ID */ + 55, /* NO => ID */ + 55, /* KEY => ID */ + 55, /* OF => ID */ + 55, /* OFFSET => ID */ + 55, /* PRAGMA => ID */ + 55, /* RAISE => ID */ + 55, /* RECURSIVE => ID */ + 55, /* REPLACE => ID */ + 55, /* RESTRICT => ID */ + 55, /* ROW => ID */ + 55, /* TRIGGER => ID */ + 55, /* VACUUM => ID */ + 55, /* VIEW => ID */ + 55, /* VIRTUAL => ID */ + 55, /* WITH => ID */ + 55, /* REINDEX => ID */ + 55, /* RENAME => ID */ + 55, /* CTIME_KW => ID */ +}; +#endif /* YYFALLBACK */ + +/* The following structure represents a single element of the +** parser's stack. Information stored includes: +** +** + The state number for the parser at this level of the stack. +** +** + The value of the token stored at this level of the stack. +** (In other words, the "major" token.) +** +** + The semantic value stored at this level of the stack. This is +** the information used by the action routines in the grammar. +** It is sometimes called the "minor" token. +** +** After the "shift" half of a SHIFTREDUCE action, the stateno field +** actually contains the reduce action for the second half of the +** SHIFTREDUCE. +*/ +struct yyStackEntry { + YYACTIONTYPE stateno; /* The state-number, or reduce action in SHIFTREDUCE */ + YYCODETYPE major; /* The major token value. This is the code + ** number for the token at this stack level */ + YYMINORTYPE minor; /* The user-supplied minor token value. This + ** is the value of the token */ +}; +typedef struct yyStackEntry yyStackEntry; + +/* The state of the parser is completely contained in an instance of +** the following structure */ +struct yyParser { + yyStackEntry *yytos; /* Pointer to top element of the stack */ +#ifdef YYTRACKMAXSTACKDEPTH + int yyhwm; /* High-water mark of the stack */ +#endif +#ifndef YYNOERRORRECOVERY + int yyerrcnt; /* Shifts left before out of the error */ +#endif + sqlite3ParserARG_SDECL /* A place to hold %extra_argument */ +#if YYSTACKDEPTH<=0 + int yystksz; /* Current side of the stack */ + yyStackEntry *yystack; /* The parser's stack */ + yyStackEntry yystk0; /* First stack entry */ +#else + yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */ +#endif +}; +typedef struct yyParser yyParser; + +#ifndef NDEBUG +/* #include */ +static FILE *yyTraceFILE = 0; +static char *yyTracePrompt = 0; +#endif /* NDEBUG */ + +#ifndef NDEBUG +/* +** Turn parser tracing on by giving a stream to which to write the trace +** and a prompt to preface each trace message. Tracing is turned off +** by making either argument NULL +** +** Inputs: +**
      +**
    • A FILE* to which trace output should be written. +** If NULL, then tracing is turned off. +**
    • A prefix string written at the beginning of every +** line of trace output. If NULL, then tracing is +** turned off. +**
    +** +** Outputs: +** None. +*/ +SQLITE_PRIVATE void sqlite3ParserTrace(FILE *TraceFILE, char *zTracePrompt){ + yyTraceFILE = TraceFILE; + yyTracePrompt = zTracePrompt; + if( yyTraceFILE==0 ) yyTracePrompt = 0; + else if( yyTracePrompt==0 ) yyTraceFILE = 0; +} +#endif /* NDEBUG */ + +#ifndef NDEBUG +/* For tracing shifts, the names of all terminals and nonterminals +** are required. The following table supplies these names */ +static const char *const yyTokenName[] = { + "$", "SEMI", "EXPLAIN", "QUERY", + "PLAN", "BEGIN", "TRANSACTION", "DEFERRED", + "IMMEDIATE", "EXCLUSIVE", "COMMIT", "END", + "ROLLBACK", "SAVEPOINT", "RELEASE", "TO", + "TABLE", "CREATE", "IF", "NOT", + "EXISTS", "TEMP", "LP", "RP", + "AS", "WITHOUT", "COMMA", "OR", + "AND", "IS", "MATCH", "LIKE_KW", + "BETWEEN", "IN", "ISNULL", "NOTNULL", + "NE", "EQ", "GT", "LE", + "LT", "GE", "ESCAPE", "BITAND", + "BITOR", "LSHIFT", "RSHIFT", "PLUS", + "MINUS", "STAR", "SLASH", "REM", + "CONCAT", "COLLATE", "BITNOT", "ID", + "INDEXED", "ABORT", "ACTION", "AFTER", + "ANALYZE", "ASC", "ATTACH", "BEFORE", + "BY", "CASCADE", "CAST", "COLUMNKW", + "CONFLICT", "DATABASE", "DESC", "DETACH", + "EACH", "FAIL", "FOR", "IGNORE", + "INITIALLY", "INSTEAD", "NO", "KEY", + "OF", "OFFSET", "PRAGMA", "RAISE", + "RECURSIVE", "REPLACE", "RESTRICT", "ROW", + "TRIGGER", "VACUUM", "VIEW", "VIRTUAL", + "WITH", "REINDEX", "RENAME", "CTIME_KW", + "ANY", "STRING", "JOIN_KW", "CONSTRAINT", + "DEFAULT", "NULL", "PRIMARY", "UNIQUE", + "CHECK", "REFERENCES", "AUTOINCR", "ON", + "INSERT", "DELETE", "UPDATE", "SET", + "DEFERRABLE", "FOREIGN", "DROP", "UNION", + "ALL", "EXCEPT", "INTERSECT", "SELECT", + "VALUES", "DISTINCT", "DOT", "FROM", + "JOIN", "USING", "ORDER", "GROUP", + "HAVING", "LIMIT", "WHERE", "INTO", + "FLOAT", "BLOB", "INTEGER", "VARIABLE", + "CASE", "WHEN", "THEN", "ELSE", + "INDEX", "ALTER", "ADD", "error", + "input", "cmdlist", "ecmd", "explain", + "cmdx", "cmd", "transtype", "trans_opt", + "nm", "savepoint_opt", "create_table", "create_table_args", + "createkw", "temp", "ifnotexists", "dbnm", + "columnlist", "conslist_opt", "table_options", "select", + "columnname", "carglist", "typetoken", "typename", + "signed", "plus_num", "minus_num", "ccons", + "term", "expr", "onconf", "sortorder", + "autoinc", "eidlist_opt", "refargs", "defer_subclause", + "refarg", "refact", "init_deferred_pred_opt", "conslist", + "tconscomma", "tcons", "sortlist", "eidlist", + "defer_subclause_opt", "orconf", "resolvetype", "raisetype", + "ifexists", "fullname", "selectnowith", "oneselect", + "with", "multiselect_op", "distinct", "selcollist", + "from", "where_opt", "groupby_opt", "having_opt", + "orderby_opt", "limit_opt", "values", "nexprlist", + "exprlist", "sclp", "as", "seltablist", + "stl_prefix", "joinop", "indexed_opt", "on_opt", + "using_opt", "idlist", "setlist", "insert_cmd", + "idlist_opt", "likeop", "between_op", "in_op", + "paren_exprlist", "case_operand", "case_exprlist", "case_else", + "uniqueflag", "collate", "nmnum", "trigger_decl", + "trigger_cmd_list", "trigger_time", "trigger_event", "foreach_clause", + "when_clause", "trigger_cmd", "trnm", "tridxby", + "database_kw_opt", "key_opt", "add_column_fullname", "kwcolumn_opt", + "create_vtab", "vtabarglist", "vtabarg", "vtabargtoken", + "lp", "anylist", "wqlist", +}; +#endif /* NDEBUG */ + +#ifndef NDEBUG +/* For tracing reduce actions, the names of all rules are required. +*/ +static const char *const yyRuleName[] = { + /* 0 */ "explain ::= EXPLAIN", + /* 1 */ "explain ::= EXPLAIN QUERY PLAN", + /* 2 */ "cmdx ::= cmd", + /* 3 */ "cmd ::= BEGIN transtype trans_opt", + /* 4 */ "transtype ::=", + /* 5 */ "transtype ::= DEFERRED", + /* 6 */ "transtype ::= IMMEDIATE", + /* 7 */ "transtype ::= EXCLUSIVE", + /* 8 */ "cmd ::= COMMIT trans_opt", + /* 9 */ "cmd ::= END trans_opt", + /* 10 */ "cmd ::= ROLLBACK trans_opt", + /* 11 */ "cmd ::= SAVEPOINT nm", + /* 12 */ "cmd ::= RELEASE savepoint_opt nm", + /* 13 */ "cmd ::= ROLLBACK trans_opt TO savepoint_opt nm", + /* 14 */ "create_table ::= createkw temp TABLE ifnotexists nm dbnm", + /* 15 */ "createkw ::= CREATE", + /* 16 */ "ifnotexists ::=", + /* 17 */ "ifnotexists ::= IF NOT EXISTS", + /* 18 */ "temp ::= TEMP", + /* 19 */ "temp ::=", + /* 20 */ "create_table_args ::= LP columnlist conslist_opt RP table_options", + /* 21 */ "create_table_args ::= AS select", + /* 22 */ "table_options ::=", + /* 23 */ "table_options ::= WITHOUT nm", + /* 24 */ "columnname ::= nm typetoken", + /* 25 */ "typetoken ::=", + /* 26 */ "typetoken ::= typename LP signed RP", + /* 27 */ "typetoken ::= typename LP signed COMMA signed RP", + /* 28 */ "typename ::= typename ID|STRING", + /* 29 */ "ccons ::= CONSTRAINT nm", + /* 30 */ "ccons ::= DEFAULT term", + /* 31 */ "ccons ::= DEFAULT LP expr RP", + /* 32 */ "ccons ::= DEFAULT PLUS term", + /* 33 */ "ccons ::= DEFAULT MINUS term", + /* 34 */ "ccons ::= DEFAULT ID|INDEXED", + /* 35 */ "ccons ::= NOT NULL onconf", + /* 36 */ "ccons ::= PRIMARY KEY sortorder onconf autoinc", + /* 37 */ "ccons ::= UNIQUE onconf", + /* 38 */ "ccons ::= CHECK LP expr RP", + /* 39 */ "ccons ::= REFERENCES nm eidlist_opt refargs", + /* 40 */ "ccons ::= defer_subclause", + /* 41 */ "ccons ::= COLLATE ID|STRING", + /* 42 */ "autoinc ::=", + /* 43 */ "autoinc ::= AUTOINCR", + /* 44 */ "refargs ::=", + /* 45 */ "refargs ::= refargs refarg", + /* 46 */ "refarg ::= MATCH nm", + /* 47 */ "refarg ::= ON INSERT refact", + /* 48 */ "refarg ::= ON DELETE refact", + /* 49 */ "refarg ::= ON UPDATE refact", + /* 50 */ "refact ::= SET NULL", + /* 51 */ "refact ::= SET DEFAULT", + /* 52 */ "refact ::= CASCADE", + /* 53 */ "refact ::= RESTRICT", + /* 54 */ "refact ::= NO ACTION", + /* 55 */ "defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt", + /* 56 */ "defer_subclause ::= DEFERRABLE init_deferred_pred_opt", + /* 57 */ "init_deferred_pred_opt ::=", + /* 58 */ "init_deferred_pred_opt ::= INITIALLY DEFERRED", + /* 59 */ "init_deferred_pred_opt ::= INITIALLY IMMEDIATE", + /* 60 */ "conslist_opt ::=", + /* 61 */ "tconscomma ::= COMMA", + /* 62 */ "tcons ::= CONSTRAINT nm", + /* 63 */ "tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf", + /* 64 */ "tcons ::= UNIQUE LP sortlist RP onconf", + /* 65 */ "tcons ::= CHECK LP expr RP onconf", + /* 66 */ "tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt", + /* 67 */ "defer_subclause_opt ::=", + /* 68 */ "onconf ::=", + /* 69 */ "onconf ::= ON CONFLICT resolvetype", + /* 70 */ "orconf ::=", + /* 71 */ "orconf ::= OR resolvetype", + /* 72 */ "resolvetype ::= IGNORE", + /* 73 */ "resolvetype ::= REPLACE", + /* 74 */ "cmd ::= DROP TABLE ifexists fullname", + /* 75 */ "ifexists ::= IF EXISTS", + /* 76 */ "ifexists ::=", + /* 77 */ "cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select", + /* 78 */ "cmd ::= DROP VIEW ifexists fullname", + /* 79 */ "cmd ::= select", + /* 80 */ "select ::= with selectnowith", + /* 81 */ "selectnowith ::= selectnowith multiselect_op oneselect", + /* 82 */ "multiselect_op ::= UNION", + /* 83 */ "multiselect_op ::= UNION ALL", + /* 84 */ "multiselect_op ::= EXCEPT|INTERSECT", + /* 85 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt", + /* 86 */ "values ::= VALUES LP nexprlist RP", + /* 87 */ "values ::= values COMMA LP exprlist RP", + /* 88 */ "distinct ::= DISTINCT", + /* 89 */ "distinct ::= ALL", + /* 90 */ "distinct ::=", + /* 91 */ "sclp ::=", + /* 92 */ "selcollist ::= sclp expr as", + /* 93 */ "selcollist ::= sclp STAR", + /* 94 */ "selcollist ::= sclp nm DOT STAR", + /* 95 */ "as ::= AS nm", + /* 96 */ "as ::=", + /* 97 */ "from ::=", + /* 98 */ "from ::= FROM seltablist", + /* 99 */ "stl_prefix ::= seltablist joinop", + /* 100 */ "stl_prefix ::=", + /* 101 */ "seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt", + /* 102 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt", + /* 103 */ "seltablist ::= stl_prefix LP select RP as on_opt using_opt", + /* 104 */ "seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt", + /* 105 */ "dbnm ::=", + /* 106 */ "dbnm ::= DOT nm", + /* 107 */ "fullname ::= nm dbnm", + /* 108 */ "joinop ::= COMMA|JOIN", + /* 109 */ "joinop ::= JOIN_KW JOIN", + /* 110 */ "joinop ::= JOIN_KW nm JOIN", + /* 111 */ "joinop ::= JOIN_KW nm nm JOIN", + /* 112 */ "on_opt ::= ON expr", + /* 113 */ "on_opt ::=", + /* 114 */ "indexed_opt ::=", + /* 115 */ "indexed_opt ::= INDEXED BY nm", + /* 116 */ "indexed_opt ::= NOT INDEXED", + /* 117 */ "using_opt ::= USING LP idlist RP", + /* 118 */ "using_opt ::=", + /* 119 */ "orderby_opt ::=", + /* 120 */ "orderby_opt ::= ORDER BY sortlist", + /* 121 */ "sortlist ::= sortlist COMMA expr sortorder", + /* 122 */ "sortlist ::= expr sortorder", + /* 123 */ "sortorder ::= ASC", + /* 124 */ "sortorder ::= DESC", + /* 125 */ "sortorder ::=", + /* 126 */ "groupby_opt ::=", + /* 127 */ "groupby_opt ::= GROUP BY nexprlist", + /* 128 */ "having_opt ::=", + /* 129 */ "having_opt ::= HAVING expr", + /* 130 */ "limit_opt ::=", + /* 131 */ "limit_opt ::= LIMIT expr", + /* 132 */ "limit_opt ::= LIMIT expr OFFSET expr", + /* 133 */ "limit_opt ::= LIMIT expr COMMA expr", + /* 134 */ "cmd ::= with DELETE FROM fullname indexed_opt where_opt", + /* 135 */ "where_opt ::=", + /* 136 */ "where_opt ::= WHERE expr", + /* 137 */ "cmd ::= with UPDATE orconf fullname indexed_opt SET setlist where_opt", + /* 138 */ "setlist ::= setlist COMMA nm EQ expr", + /* 139 */ "setlist ::= setlist COMMA LP idlist RP EQ expr", + /* 140 */ "setlist ::= nm EQ expr", + /* 141 */ "setlist ::= LP idlist RP EQ expr", + /* 142 */ "cmd ::= with insert_cmd INTO fullname idlist_opt select", + /* 143 */ "cmd ::= with insert_cmd INTO fullname idlist_opt DEFAULT VALUES", + /* 144 */ "insert_cmd ::= INSERT orconf", + /* 145 */ "insert_cmd ::= REPLACE", + /* 146 */ "idlist_opt ::=", + /* 147 */ "idlist_opt ::= LP idlist RP", + /* 148 */ "idlist ::= idlist COMMA nm", + /* 149 */ "idlist ::= nm", + /* 150 */ "expr ::= LP expr RP", + /* 151 */ "term ::= NULL", + /* 152 */ "expr ::= ID|INDEXED", + /* 153 */ "expr ::= JOIN_KW", + /* 154 */ "expr ::= nm DOT nm", + /* 155 */ "expr ::= nm DOT nm DOT nm", + /* 156 */ "term ::= FLOAT|BLOB", + /* 157 */ "term ::= STRING", + /* 158 */ "term ::= INTEGER", + /* 159 */ "expr ::= VARIABLE", + /* 160 */ "expr ::= expr COLLATE ID|STRING", + /* 161 */ "expr ::= CAST LP expr AS typetoken RP", + /* 162 */ "expr ::= ID|INDEXED LP distinct exprlist RP", + /* 163 */ "expr ::= ID|INDEXED LP STAR RP", + /* 164 */ "term ::= CTIME_KW", + /* 165 */ "expr ::= LP nexprlist COMMA expr RP", + /* 166 */ "expr ::= expr AND expr", + /* 167 */ "expr ::= expr OR expr", + /* 168 */ "expr ::= expr LT|GT|GE|LE expr", + /* 169 */ "expr ::= expr EQ|NE expr", + /* 170 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", + /* 171 */ "expr ::= expr PLUS|MINUS expr", + /* 172 */ "expr ::= expr STAR|SLASH|REM expr", + /* 173 */ "expr ::= expr CONCAT expr", + /* 174 */ "likeop ::= LIKE_KW|MATCH", + /* 175 */ "likeop ::= NOT LIKE_KW|MATCH", + /* 176 */ "expr ::= expr likeop expr", + /* 177 */ "expr ::= expr likeop expr ESCAPE expr", + /* 178 */ "expr ::= expr ISNULL|NOTNULL", + /* 179 */ "expr ::= expr NOT NULL", + /* 180 */ "expr ::= expr IS expr", + /* 181 */ "expr ::= expr IS NOT expr", + /* 182 */ "expr ::= NOT expr", + /* 183 */ "expr ::= BITNOT expr", + /* 184 */ "expr ::= MINUS expr", + /* 185 */ "expr ::= PLUS expr", + /* 186 */ "between_op ::= BETWEEN", + /* 187 */ "between_op ::= NOT BETWEEN", + /* 188 */ "expr ::= expr between_op expr AND expr", + /* 189 */ "in_op ::= IN", + /* 190 */ "in_op ::= NOT IN", + /* 191 */ "expr ::= expr in_op LP exprlist RP", + /* 192 */ "expr ::= LP select RP", + /* 193 */ "expr ::= expr in_op LP select RP", + /* 194 */ "expr ::= expr in_op nm dbnm paren_exprlist", + /* 195 */ "expr ::= EXISTS LP select RP", + /* 196 */ "expr ::= CASE case_operand case_exprlist case_else END", + /* 197 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", + /* 198 */ "case_exprlist ::= WHEN expr THEN expr", + /* 199 */ "case_else ::= ELSE expr", + /* 200 */ "case_else ::=", + /* 201 */ "case_operand ::= expr", + /* 202 */ "case_operand ::=", + /* 203 */ "exprlist ::=", + /* 204 */ "nexprlist ::= nexprlist COMMA expr", + /* 205 */ "nexprlist ::= expr", + /* 206 */ "paren_exprlist ::=", + /* 207 */ "paren_exprlist ::= LP exprlist RP", + /* 208 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", + /* 209 */ "uniqueflag ::= UNIQUE", + /* 210 */ "uniqueflag ::=", + /* 211 */ "eidlist_opt ::=", + /* 212 */ "eidlist_opt ::= LP eidlist RP", + /* 213 */ "eidlist ::= eidlist COMMA nm collate sortorder", + /* 214 */ "eidlist ::= nm collate sortorder", + /* 215 */ "collate ::=", + /* 216 */ "collate ::= COLLATE ID|STRING", + /* 217 */ "cmd ::= DROP INDEX ifexists fullname", + /* 218 */ "cmd ::= VACUUM", + /* 219 */ "cmd ::= VACUUM nm", + /* 220 */ "cmd ::= PRAGMA nm dbnm", + /* 221 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", + /* 222 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", + /* 223 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", + /* 224 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", + /* 225 */ "plus_num ::= PLUS INTEGER|FLOAT", + /* 226 */ "minus_num ::= MINUS INTEGER|FLOAT", + /* 227 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", + /* 228 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", + /* 229 */ "trigger_time ::= BEFORE", + /* 230 */ "trigger_time ::= AFTER", + /* 231 */ "trigger_time ::= INSTEAD OF", + /* 232 */ "trigger_time ::=", + /* 233 */ "trigger_event ::= DELETE|INSERT", + /* 234 */ "trigger_event ::= UPDATE", + /* 235 */ "trigger_event ::= UPDATE OF idlist", + /* 236 */ "when_clause ::=", + /* 237 */ "when_clause ::= WHEN expr", + /* 238 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", + /* 239 */ "trigger_cmd_list ::= trigger_cmd SEMI", + /* 240 */ "trnm ::= nm DOT nm", + /* 241 */ "tridxby ::= INDEXED BY nm", + /* 242 */ "tridxby ::= NOT INDEXED", + /* 243 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt", + /* 244 */ "trigger_cmd ::= insert_cmd INTO trnm idlist_opt select", + /* 245 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt", + /* 246 */ "trigger_cmd ::= select", + /* 247 */ "expr ::= RAISE LP IGNORE RP", + /* 248 */ "expr ::= RAISE LP raisetype COMMA nm RP", + /* 249 */ "raisetype ::= ROLLBACK", + /* 250 */ "raisetype ::= ABORT", + /* 251 */ "raisetype ::= FAIL", + /* 252 */ "cmd ::= DROP TRIGGER ifexists fullname", + /* 253 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", + /* 254 */ "cmd ::= DETACH database_kw_opt expr", + /* 255 */ "key_opt ::=", + /* 256 */ "key_opt ::= KEY expr", + /* 257 */ "cmd ::= REINDEX", + /* 258 */ "cmd ::= REINDEX nm dbnm", + /* 259 */ "cmd ::= ANALYZE", + /* 260 */ "cmd ::= ANALYZE nm dbnm", + /* 261 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", + /* 262 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", + /* 263 */ "add_column_fullname ::= fullname", + /* 264 */ "cmd ::= create_vtab", + /* 265 */ "cmd ::= create_vtab LP vtabarglist RP", + /* 266 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", + /* 267 */ "vtabarg ::=", + /* 268 */ "vtabargtoken ::= ANY", + /* 269 */ "vtabargtoken ::= lp anylist RP", + /* 270 */ "lp ::= LP", + /* 271 */ "with ::=", + /* 272 */ "with ::= WITH wqlist", + /* 273 */ "with ::= WITH RECURSIVE wqlist", + /* 274 */ "wqlist ::= nm eidlist_opt AS LP select RP", + /* 275 */ "wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP", + /* 276 */ "input ::= cmdlist", + /* 277 */ "cmdlist ::= cmdlist ecmd", + /* 278 */ "cmdlist ::= ecmd", + /* 279 */ "ecmd ::= SEMI", + /* 280 */ "ecmd ::= explain cmdx SEMI", + /* 281 */ "explain ::=", + /* 282 */ "trans_opt ::=", + /* 283 */ "trans_opt ::= TRANSACTION", + /* 284 */ "trans_opt ::= TRANSACTION nm", + /* 285 */ "savepoint_opt ::= SAVEPOINT", + /* 286 */ "savepoint_opt ::=", + /* 287 */ "cmd ::= create_table create_table_args", + /* 288 */ "columnlist ::= columnlist COMMA columnname carglist", + /* 289 */ "columnlist ::= columnname carglist", + /* 290 */ "nm ::= ID|INDEXED", + /* 291 */ "nm ::= STRING", + /* 292 */ "nm ::= JOIN_KW", + /* 293 */ "typetoken ::= typename", + /* 294 */ "typename ::= ID|STRING", + /* 295 */ "signed ::= plus_num", + /* 296 */ "signed ::= minus_num", + /* 297 */ "carglist ::= carglist ccons", + /* 298 */ "carglist ::=", + /* 299 */ "ccons ::= NULL onconf", + /* 300 */ "conslist_opt ::= COMMA conslist", + /* 301 */ "conslist ::= conslist tconscomma tcons", + /* 302 */ "conslist ::= tcons", + /* 303 */ "tconscomma ::=", + /* 304 */ "defer_subclause_opt ::= defer_subclause", + /* 305 */ "resolvetype ::= raisetype", + /* 306 */ "selectnowith ::= oneselect", + /* 307 */ "oneselect ::= values", + /* 308 */ "sclp ::= selcollist COMMA", + /* 309 */ "as ::= ID|STRING", + /* 310 */ "expr ::= term", + /* 311 */ "exprlist ::= nexprlist", + /* 312 */ "nmnum ::= plus_num", + /* 313 */ "nmnum ::= nm", + /* 314 */ "nmnum ::= ON", + /* 315 */ "nmnum ::= DELETE", + /* 316 */ "nmnum ::= DEFAULT", + /* 317 */ "plus_num ::= INTEGER|FLOAT", + /* 318 */ "foreach_clause ::=", + /* 319 */ "foreach_clause ::= FOR EACH ROW", + /* 320 */ "trnm ::= nm", + /* 321 */ "tridxby ::=", + /* 322 */ "database_kw_opt ::= DATABASE", + /* 323 */ "database_kw_opt ::=", + /* 324 */ "kwcolumn_opt ::=", + /* 325 */ "kwcolumn_opt ::= COLUMNKW", + /* 326 */ "vtabarglist ::= vtabarg", + /* 327 */ "vtabarglist ::= vtabarglist COMMA vtabarg", + /* 328 */ "vtabarg ::= vtabarg vtabargtoken", + /* 329 */ "anylist ::=", + /* 330 */ "anylist ::= anylist LP anylist RP", + /* 331 */ "anylist ::= anylist ANY", +}; +#endif /* NDEBUG */ + + +#if YYSTACKDEPTH<=0 +/* +** Try to increase the size of the parser stack. Return the number +** of errors. Return 0 on success. +*/ +static int yyGrowStack(yyParser *p){ + int newSize; + int idx; + yyStackEntry *pNew; + + newSize = p->yystksz*2 + 100; + idx = p->yytos ? (int)(p->yytos - p->yystack) : 0; + if( p->yystack==&p->yystk0 ){ + pNew = malloc(newSize*sizeof(pNew[0])); + if( pNew ) pNew[0] = p->yystk0; + }else{ + pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); + } + if( pNew ){ + p->yystack = pNew; + p->yytos = &p->yystack[idx]; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", + yyTracePrompt, p->yystksz, newSize); + } +#endif + p->yystksz = newSize; + } + return pNew==0; +} +#endif + +/* Datatype of the argument to the memory allocated passed as the +** second argument to sqlite3ParserAlloc() below. This can be changed by +** putting an appropriate #define in the %include section of the input +** grammar. +*/ +#ifndef YYMALLOCARGTYPE +# define YYMALLOCARGTYPE size_t +#endif + +/* Initialize a new parser that has already been allocated. +*/ +SQLITE_PRIVATE void sqlite3ParserInit(void *yypParser){ + yyParser *pParser = (yyParser*)yypParser; +#ifdef YYTRACKMAXSTACKDEPTH + pParser->yyhwm = 0; +#endif +#if YYSTACKDEPTH<=0 + pParser->yytos = NULL; + pParser->yystack = NULL; + pParser->yystksz = 0; + if( yyGrowStack(pParser) ){ + pParser->yystack = &pParser->yystk0; + pParser->yystksz = 1; + } +#endif +#ifndef YYNOERRORRECOVERY + pParser->yyerrcnt = -1; +#endif + pParser->yytos = pParser->yystack; + pParser->yystack[0].stateno = 0; + pParser->yystack[0].major = 0; +} + +#ifndef sqlite3Parser_ENGINEALWAYSONSTACK +/* +** This function allocates a new parser. +** The only argument is a pointer to a function which works like +** malloc. +** +** Inputs: +** A pointer to the function used to allocate memory. +** +** Outputs: +** A pointer to a parser. This pointer is used in subsequent calls +** to sqlite3Parser and sqlite3ParserFree. +*/ +SQLITE_PRIVATE void *sqlite3ParserAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){ + yyParser *pParser; + pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); + if( pParser ) sqlite3ParserInit(pParser); + return pParser; +} +#endif /* sqlite3Parser_ENGINEALWAYSONSTACK */ + + +/* The following function deletes the "minor type" or semantic value +** associated with a symbol. The symbol can be either a terminal +** or nonterminal. "yymajor" is the symbol code, and "yypminor" is +** a pointer to the value to be deleted. The code used to do the +** deletions is derived from the %destructor and/or %token_destructor +** directives of the input grammar. +*/ +static void yy_destructor( + yyParser *yypParser, /* The parser */ + YYCODETYPE yymajor, /* Type code for object to destroy */ + YYMINORTYPE *yypminor /* The object to be destroyed */ +){ + sqlite3ParserARG_FETCH; + switch( yymajor ){ + /* Here is inserted the actions which take place when a + ** terminal or non-terminal is destroyed. This can happen + ** when the symbol is popped from the stack during a + ** reduce or during error processing or when a parser is + ** being destroyed before it is finished parsing. + ** + ** Note: during a reduce, the only symbols destroyed are those + ** which appear on the RHS of the rule, but which are *not* used + ** inside the C code. + */ +/********* Begin destructor definitions ***************************************/ + case 163: /* select */ + case 194: /* selectnowith */ + case 195: /* oneselect */ + case 206: /* values */ +{ +sqlite3SelectDelete(pParse->db, (yypminor->yy243)); +} + break; + case 172: /* term */ + case 173: /* expr */ +{ +sqlite3ExprDelete(pParse->db, (yypminor->yy190).pExpr); +} + break; + case 177: /* eidlist_opt */ + case 186: /* sortlist */ + case 187: /* eidlist */ + case 199: /* selcollist */ + case 202: /* groupby_opt */ + case 204: /* orderby_opt */ + case 207: /* nexprlist */ + case 208: /* exprlist */ + case 209: /* sclp */ + case 218: /* setlist */ + case 224: /* paren_exprlist */ + case 226: /* case_exprlist */ +{ +sqlite3ExprListDelete(pParse->db, (yypminor->yy148)); +} + break; + case 193: /* fullname */ + case 200: /* from */ + case 211: /* seltablist */ + case 212: /* stl_prefix */ +{ +sqlite3SrcListDelete(pParse->db, (yypminor->yy185)); +} + break; + case 196: /* with */ + case 250: /* wqlist */ +{ +sqlite3WithDelete(pParse->db, (yypminor->yy285)); +} + break; + case 201: /* where_opt */ + case 203: /* having_opt */ + case 215: /* on_opt */ + case 225: /* case_operand */ + case 227: /* case_else */ + case 236: /* when_clause */ + case 241: /* key_opt */ +{ +sqlite3ExprDelete(pParse->db, (yypminor->yy72)); +} + break; + case 216: /* using_opt */ + case 217: /* idlist */ + case 220: /* idlist_opt */ +{ +sqlite3IdListDelete(pParse->db, (yypminor->yy254)); +} + break; + case 232: /* trigger_cmd_list */ + case 237: /* trigger_cmd */ +{ +sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy145)); +} + break; + case 234: /* trigger_event */ +{ +sqlite3IdListDelete(pParse->db, (yypminor->yy332).b); +} + break; +/********* End destructor definitions *****************************************/ + default: break; /* If no destructor action specified: do nothing */ + } +} + +/* +** Pop the parser's stack once. +** +** If there is a destructor routine associated with the token which +** is popped from the stack, then call it. +*/ +static void yy_pop_parser_stack(yyParser *pParser){ + yyStackEntry *yytos; + assert( pParser->yytos!=0 ); + assert( pParser->yytos > pParser->yystack ); + yytos = pParser->yytos--; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sPopping %s\n", + yyTracePrompt, + yyTokenName[yytos->major]); + } +#endif + yy_destructor(pParser, yytos->major, &yytos->minor); +} + +/* +** Clear all secondary memory allocations from the parser +*/ +SQLITE_PRIVATE void sqlite3ParserFinalize(void *p){ + yyParser *pParser = (yyParser*)p; + while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser); +#if YYSTACKDEPTH<=0 + if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack); +#endif +} + +#ifndef sqlite3Parser_ENGINEALWAYSONSTACK +/* +** Deallocate and destroy a parser. Destructors are called for +** all stack elements before shutting the parser down. +** +** If the YYPARSEFREENEVERNULL macro exists (for example because it +** is defined in a %include section of the input grammar) then it is +** assumed that the input pointer is never NULL. +*/ +SQLITE_PRIVATE void sqlite3ParserFree( + void *p, /* The parser to be deleted */ + void (*freeProc)(void*) /* Function used to reclaim memory */ +){ +#ifndef YYPARSEFREENEVERNULL + if( p==0 ) return; +#endif + sqlite3ParserFinalize(p); + (*freeProc)(p); +} +#endif /* sqlite3Parser_ENGINEALWAYSONSTACK */ + +/* +** Return the peak depth of the stack for a parser. +*/ +#ifdef YYTRACKMAXSTACKDEPTH +SQLITE_PRIVATE int sqlite3ParserStackPeak(void *p){ + yyParser *pParser = (yyParser*)p; + return pParser->yyhwm; +} +#endif + +/* +** Find the appropriate action for a parser given the terminal +** look-ahead token iLookAhead. +*/ +static unsigned int yy_find_shift_action( + yyParser *pParser, /* The parser */ + YYCODETYPE iLookAhead /* The look-ahead token */ +){ + int i; + int stateno = pParser->yytos->stateno; + + if( stateno>=YY_MIN_REDUCE ) return stateno; + assert( stateno <= YY_SHIFT_COUNT ); + do{ + i = yy_shift_ofst[stateno]; + assert( iLookAhead!=YYNOCODE ); + i += iLookAhead; + if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ +#ifdef YYFALLBACK + YYCODETYPE iFallback; /* Fallback token */ + if( iLookAhead %s\n", + yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]); + } +#endif + assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */ + iLookAhead = iFallback; + continue; + } +#endif +#ifdef YYWILDCARD + { + int j = i - iLookAhead + YYWILDCARD; + if( +#if YY_SHIFT_MIN+YYWILDCARD<0 + j>=0 && +#endif +#if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT + j0 + ){ +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n", + yyTracePrompt, yyTokenName[iLookAhead], + yyTokenName[YYWILDCARD]); + } +#endif /* NDEBUG */ + return yy_action[j]; + } + } +#endif /* YYWILDCARD */ + return yy_default[stateno]; + }else{ + return yy_action[i]; + } + }while(1); +} + +/* +** Find the appropriate action for a parser given the non-terminal +** look-ahead token iLookAhead. +*/ +static int yy_find_reduce_action( + int stateno, /* Current state number */ + YYCODETYPE iLookAhead /* The look-ahead token */ +){ + int i; +#ifdef YYERRORSYMBOL + if( stateno>YY_REDUCE_COUNT ){ + return yy_default[stateno]; + } +#else + assert( stateno<=YY_REDUCE_COUNT ); +#endif + i = yy_reduce_ofst[stateno]; + assert( i!=YY_REDUCE_USE_DFLT ); + assert( iLookAhead!=YYNOCODE ); + i += iLookAhead; +#ifdef YYERRORSYMBOL + if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ + return yy_default[stateno]; + } +#else + assert( i>=0 && iyytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); + /* Here code is inserted which will execute if the parser + ** stack every overflows */ +/******** Begin %stack_overflow code ******************************************/ + + sqlite3ErrorMsg(pParse, "parser stack overflow"); +/******** End %stack_overflow code ********************************************/ + sqlite3ParserARG_STORE; /* Suppress warning about unused %extra_argument var */ +} + +/* +** Print tracing information for a SHIFT action +*/ +#ifndef NDEBUG +static void yyTraceShift(yyParser *yypParser, int yyNewState){ + if( yyTraceFILE ){ + if( yyNewStateyytos->major], + yyNewState); + }else{ + fprintf(yyTraceFILE,"%sShift '%s'\n", + yyTracePrompt,yyTokenName[yypParser->yytos->major]); + } + } +} +#else +# define yyTraceShift(X,Y) +#endif + +/* +** Perform a shift action. +*/ +static void yy_shift( + yyParser *yypParser, /* The parser to be shifted */ + int yyNewState, /* The new state to shift in */ + int yyMajor, /* The major token to shift in */ + sqlite3ParserTOKENTYPE yyMinor /* The minor token to shift in */ +){ + yyStackEntry *yytos; + yypParser->yytos++; +#ifdef YYTRACKMAXSTACKDEPTH + if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) ); + } +#endif +#if YYSTACKDEPTH>0 + if( yypParser->yytos>=&yypParser->yystack[YYSTACKDEPTH] ){ + yypParser->yytos--; + yyStackOverflow(yypParser); + return; + } +#else + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){ + if( yyGrowStack(yypParser) ){ + yypParser->yytos--; + yyStackOverflow(yypParser); + return; + } + } +#endif + if( yyNewState > YY_MAX_SHIFT ){ + yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; + } + yytos = yypParser->yytos; + yytos->stateno = (YYACTIONTYPE)yyNewState; + yytos->major = (YYCODETYPE)yyMajor; + yytos->minor.yy0 = yyMinor; + yyTraceShift(yypParser, yyNewState); +} + +/* The following table contains information about every rule that +** is used during the reduce. +*/ +static const struct { + YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ + unsigned char nrhs; /* Number of right-hand side symbols in the rule */ +} yyRuleInfo[] = { + { 147, 1 }, + { 147, 3 }, + { 148, 1 }, + { 149, 3 }, + { 150, 0 }, + { 150, 1 }, + { 150, 1 }, + { 150, 1 }, + { 149, 2 }, + { 149, 2 }, + { 149, 2 }, + { 149, 2 }, + { 149, 3 }, + { 149, 5 }, + { 154, 6 }, + { 156, 1 }, + { 158, 0 }, + { 158, 3 }, + { 157, 1 }, + { 157, 0 }, + { 155, 5 }, + { 155, 2 }, + { 162, 0 }, + { 162, 2 }, + { 164, 2 }, + { 166, 0 }, + { 166, 4 }, + { 166, 6 }, + { 167, 2 }, + { 171, 2 }, + { 171, 2 }, + { 171, 4 }, + { 171, 3 }, + { 171, 3 }, + { 171, 2 }, + { 171, 3 }, + { 171, 5 }, + { 171, 2 }, + { 171, 4 }, + { 171, 4 }, + { 171, 1 }, + { 171, 2 }, + { 176, 0 }, + { 176, 1 }, + { 178, 0 }, + { 178, 2 }, + { 180, 2 }, + { 180, 3 }, + { 180, 3 }, + { 180, 3 }, + { 181, 2 }, + { 181, 2 }, + { 181, 1 }, + { 181, 1 }, + { 181, 2 }, + { 179, 3 }, + { 179, 2 }, + { 182, 0 }, + { 182, 2 }, + { 182, 2 }, + { 161, 0 }, + { 184, 1 }, + { 185, 2 }, + { 185, 7 }, + { 185, 5 }, + { 185, 5 }, + { 185, 10 }, + { 188, 0 }, + { 174, 0 }, + { 174, 3 }, + { 189, 0 }, + { 189, 2 }, + { 190, 1 }, + { 190, 1 }, + { 149, 4 }, + { 192, 2 }, + { 192, 0 }, + { 149, 9 }, + { 149, 4 }, + { 149, 1 }, + { 163, 2 }, + { 194, 3 }, + { 197, 1 }, + { 197, 2 }, + { 197, 1 }, + { 195, 9 }, + { 206, 4 }, + { 206, 5 }, + { 198, 1 }, + { 198, 1 }, + { 198, 0 }, + { 209, 0 }, + { 199, 3 }, + { 199, 2 }, + { 199, 4 }, + { 210, 2 }, + { 210, 0 }, + { 200, 0 }, + { 200, 2 }, + { 212, 2 }, + { 212, 0 }, + { 211, 7 }, + { 211, 9 }, + { 211, 7 }, + { 211, 7 }, + { 159, 0 }, + { 159, 2 }, + { 193, 2 }, + { 213, 1 }, + { 213, 2 }, + { 213, 3 }, + { 213, 4 }, + { 215, 2 }, + { 215, 0 }, + { 214, 0 }, + { 214, 3 }, + { 214, 2 }, + { 216, 4 }, + { 216, 0 }, + { 204, 0 }, + { 204, 3 }, + { 186, 4 }, + { 186, 2 }, + { 175, 1 }, + { 175, 1 }, + { 175, 0 }, + { 202, 0 }, + { 202, 3 }, + { 203, 0 }, + { 203, 2 }, + { 205, 0 }, + { 205, 2 }, + { 205, 4 }, + { 205, 4 }, + { 149, 6 }, + { 201, 0 }, + { 201, 2 }, + { 149, 8 }, + { 218, 5 }, + { 218, 7 }, + { 218, 3 }, + { 218, 5 }, + { 149, 6 }, + { 149, 7 }, + { 219, 2 }, + { 219, 1 }, + { 220, 0 }, + { 220, 3 }, + { 217, 3 }, + { 217, 1 }, + { 173, 3 }, + { 172, 1 }, + { 173, 1 }, + { 173, 1 }, + { 173, 3 }, + { 173, 5 }, + { 172, 1 }, + { 172, 1 }, + { 172, 1 }, + { 173, 1 }, + { 173, 3 }, + { 173, 6 }, + { 173, 5 }, + { 173, 4 }, + { 172, 1 }, + { 173, 5 }, + { 173, 3 }, + { 173, 3 }, + { 173, 3 }, + { 173, 3 }, + { 173, 3 }, + { 173, 3 }, + { 173, 3 }, + { 173, 3 }, + { 221, 1 }, + { 221, 2 }, + { 173, 3 }, + { 173, 5 }, + { 173, 2 }, + { 173, 3 }, + { 173, 3 }, + { 173, 4 }, + { 173, 2 }, + { 173, 2 }, + { 173, 2 }, + { 173, 2 }, + { 222, 1 }, + { 222, 2 }, + { 173, 5 }, + { 223, 1 }, + { 223, 2 }, + { 173, 5 }, + { 173, 3 }, + { 173, 5 }, + { 173, 5 }, + { 173, 4 }, + { 173, 5 }, + { 226, 5 }, + { 226, 4 }, + { 227, 2 }, + { 227, 0 }, + { 225, 1 }, + { 225, 0 }, + { 208, 0 }, + { 207, 3 }, + { 207, 1 }, + { 224, 0 }, + { 224, 3 }, + { 149, 12 }, + { 228, 1 }, + { 228, 0 }, + { 177, 0 }, + { 177, 3 }, + { 187, 5 }, + { 187, 3 }, + { 229, 0 }, + { 229, 2 }, + { 149, 4 }, + { 149, 1 }, + { 149, 2 }, + { 149, 3 }, + { 149, 5 }, + { 149, 6 }, + { 149, 5 }, + { 149, 6 }, + { 169, 2 }, + { 170, 2 }, + { 149, 5 }, + { 231, 11 }, + { 233, 1 }, + { 233, 1 }, + { 233, 2 }, + { 233, 0 }, + { 234, 1 }, + { 234, 1 }, + { 234, 3 }, + { 236, 0 }, + { 236, 2 }, + { 232, 3 }, + { 232, 2 }, + { 238, 3 }, + { 239, 3 }, + { 239, 2 }, + { 237, 7 }, + { 237, 5 }, + { 237, 5 }, + { 237, 1 }, + { 173, 4 }, + { 173, 6 }, + { 191, 1 }, + { 191, 1 }, + { 191, 1 }, + { 149, 4 }, + { 149, 6 }, + { 149, 3 }, + { 241, 0 }, + { 241, 2 }, + { 149, 1 }, + { 149, 3 }, + { 149, 1 }, + { 149, 3 }, + { 149, 6 }, + { 149, 7 }, + { 242, 1 }, + { 149, 1 }, + { 149, 4 }, + { 244, 8 }, + { 246, 0 }, + { 247, 1 }, + { 247, 3 }, + { 248, 1 }, + { 196, 0 }, + { 196, 2 }, + { 196, 3 }, + { 250, 6 }, + { 250, 8 }, + { 144, 1 }, + { 145, 2 }, + { 145, 1 }, + { 146, 1 }, + { 146, 3 }, + { 147, 0 }, + { 151, 0 }, + { 151, 1 }, + { 151, 2 }, + { 153, 1 }, + { 153, 0 }, + { 149, 2 }, + { 160, 4 }, + { 160, 2 }, + { 152, 1 }, + { 152, 1 }, + { 152, 1 }, + { 166, 1 }, + { 167, 1 }, + { 168, 1 }, + { 168, 1 }, + { 165, 2 }, + { 165, 0 }, + { 171, 2 }, + { 161, 2 }, + { 183, 3 }, + { 183, 1 }, + { 184, 0 }, + { 188, 1 }, + { 190, 1 }, + { 194, 1 }, + { 195, 1 }, + { 209, 2 }, + { 210, 1 }, + { 173, 1 }, + { 208, 1 }, + { 230, 1 }, + { 230, 1 }, + { 230, 1 }, + { 230, 1 }, + { 230, 1 }, + { 169, 1 }, + { 235, 0 }, + { 235, 3 }, + { 238, 1 }, + { 239, 0 }, + { 240, 1 }, + { 240, 0 }, + { 243, 0 }, + { 243, 1 }, + { 245, 1 }, + { 245, 3 }, + { 246, 2 }, + { 249, 0 }, + { 249, 4 }, + { 249, 2 }, +}; + +static void yy_accept(yyParser*); /* Forward Declaration */ + +/* +** Perform a reduce action and the shift that must immediately +** follow the reduce. +*/ +static void yy_reduce( + yyParser *yypParser, /* The parser */ + unsigned int yyruleno /* Number of the rule by which to reduce */ +){ + int yygoto; /* The next state */ + int yyact; /* The next action */ + yyStackEntry *yymsp; /* The top of the parser's stack */ + int yysize; /* Amount to pop the stack */ + sqlite3ParserARG_FETCH; + yymsp = yypParser->yytos; +#ifndef NDEBUG + if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ + yysize = yyRuleInfo[yyruleno].nrhs; + fprintf(yyTraceFILE, "%sReduce [%s], go to state %d.\n", yyTracePrompt, + yyRuleName[yyruleno], yymsp[-yysize].stateno); + } +#endif /* NDEBUG */ + + /* Check that the stack is large enough to grow by a single entry + ** if the RHS of the rule is empty. This ensures that there is room + ** enough on the stack to push the LHS value */ + if( yyRuleInfo[yyruleno].nrhs==0 ){ +#ifdef YYTRACKMAXSTACKDEPTH + if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack)); + } +#endif +#if YYSTACKDEPTH>0 + if( yypParser->yytos>=&yypParser->yystack[YYSTACKDEPTH-1] ){ + yyStackOverflow(yypParser); + return; + } +#else + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ + if( yyGrowStack(yypParser) ){ + yyStackOverflow(yypParser); + return; + } + yymsp = yypParser->yytos; + } +#endif + } + + switch( yyruleno ){ + /* Beginning here are the reduction cases. A typical example + ** follows: + ** case 0: + ** #line + ** { ... } // User supplied code + ** #line + ** break; + */ +/********** Begin reduce actions **********************************************/ + YYMINORTYPE yylhsminor; + case 0: /* explain ::= EXPLAIN */ +{ pParse->explain = 1; } + break; + case 1: /* explain ::= EXPLAIN QUERY PLAN */ +{ pParse->explain = 2; } + break; + case 2: /* cmdx ::= cmd */ +{ sqlite3FinishCoding(pParse); } + break; + case 3: /* cmd ::= BEGIN transtype trans_opt */ +{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy194);} + break; + case 4: /* transtype ::= */ +{yymsp[1].minor.yy194 = TK_DEFERRED;} + break; + case 5: /* transtype ::= DEFERRED */ + case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); + case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); +{yymsp[0].minor.yy194 = yymsp[0].major; /*A-overwrites-X*/} + break; + case 8: /* cmd ::= COMMIT trans_opt */ + case 9: /* cmd ::= END trans_opt */ yytestcase(yyruleno==9); +{sqlite3CommitTransaction(pParse);} + break; + case 10: /* cmd ::= ROLLBACK trans_opt */ +{sqlite3RollbackTransaction(pParse);} + break; + case 11: /* cmd ::= SAVEPOINT nm */ +{ + sqlite3Savepoint(pParse, SAVEPOINT_BEGIN, &yymsp[0].minor.yy0); +} + break; + case 12: /* cmd ::= RELEASE savepoint_opt nm */ +{ + sqlite3Savepoint(pParse, SAVEPOINT_RELEASE, &yymsp[0].minor.yy0); +} + break; + case 13: /* cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ +{ + sqlite3Savepoint(pParse, SAVEPOINT_ROLLBACK, &yymsp[0].minor.yy0); +} + break; + case 14: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ +{ + sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy194,0,0,yymsp[-2].minor.yy194); +} + break; + case 15: /* createkw ::= CREATE */ +{disableLookaside(pParse);} + break; + case 16: /* ifnotexists ::= */ + case 19: /* temp ::= */ yytestcase(yyruleno==19); + case 22: /* table_options ::= */ yytestcase(yyruleno==22); + case 42: /* autoinc ::= */ yytestcase(yyruleno==42); + case 57: /* init_deferred_pred_opt ::= */ yytestcase(yyruleno==57); + case 67: /* defer_subclause_opt ::= */ yytestcase(yyruleno==67); + case 76: /* ifexists ::= */ yytestcase(yyruleno==76); + case 90: /* distinct ::= */ yytestcase(yyruleno==90); + case 215: /* collate ::= */ yytestcase(yyruleno==215); +{yymsp[1].minor.yy194 = 0;} + break; + case 17: /* ifnotexists ::= IF NOT EXISTS */ +{yymsp[-2].minor.yy194 = 1;} + break; + case 18: /* temp ::= TEMP */ + case 43: /* autoinc ::= AUTOINCR */ yytestcase(yyruleno==43); +{yymsp[0].minor.yy194 = 1;} + break; + case 20: /* create_table_args ::= LP columnlist conslist_opt RP table_options */ +{ + sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy194,0); +} + break; + case 21: /* create_table_args ::= AS select */ +{ + sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy243); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy243); +} + break; + case 23: /* table_options ::= WITHOUT nm */ +{ + if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ + yymsp[-1].minor.yy194 = TF_WithoutRowid | TF_NoVisibleRowid; + }else{ + yymsp[-1].minor.yy194 = 0; + sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); + } +} + break; + case 24: /* columnname ::= nm typetoken */ +{sqlite3AddColumn(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0);} + break; + case 25: /* typetoken ::= */ + case 60: /* conslist_opt ::= */ yytestcase(yyruleno==60); + case 96: /* as ::= */ yytestcase(yyruleno==96); +{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = 0;} + break; + case 26: /* typetoken ::= typename LP signed RP */ +{ + yymsp[-3].minor.yy0.n = (int)(&yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] - yymsp[-3].minor.yy0.z); +} + break; + case 27: /* typetoken ::= typename LP signed COMMA signed RP */ +{ + yymsp[-5].minor.yy0.n = (int)(&yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] - yymsp[-5].minor.yy0.z); +} + break; + case 28: /* typename ::= typename ID|STRING */ +{yymsp[-1].minor.yy0.n=yymsp[0].minor.yy0.n+(int)(yymsp[0].minor.yy0.z-yymsp[-1].minor.yy0.z);} + break; + case 29: /* ccons ::= CONSTRAINT nm */ + case 62: /* tcons ::= CONSTRAINT nm */ yytestcase(yyruleno==62); +{pParse->constraintName = yymsp[0].minor.yy0;} + break; + case 30: /* ccons ::= DEFAULT term */ + case 32: /* ccons ::= DEFAULT PLUS term */ yytestcase(yyruleno==32); +{sqlite3AddDefaultValue(pParse,&yymsp[0].minor.yy190);} + break; + case 31: /* ccons ::= DEFAULT LP expr RP */ +{sqlite3AddDefaultValue(pParse,&yymsp[-1].minor.yy190);} + break; + case 33: /* ccons ::= DEFAULT MINUS term */ +{ + ExprSpan v; + v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy190.pExpr, 0); + v.zStart = yymsp[-1].minor.yy0.z; + v.zEnd = yymsp[0].minor.yy190.zEnd; + sqlite3AddDefaultValue(pParse,&v); +} + break; + case 34: /* ccons ::= DEFAULT ID|INDEXED */ +{ + ExprSpan v; + spanExpr(&v, pParse, TK_STRING, yymsp[0].minor.yy0); + sqlite3AddDefaultValue(pParse,&v); +} + break; + case 35: /* ccons ::= NOT NULL onconf */ +{sqlite3AddNotNull(pParse, yymsp[0].minor.yy194);} + break; + case 36: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ +{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy194,yymsp[0].minor.yy194,yymsp[-2].minor.yy194);} + break; + case 37: /* ccons ::= UNIQUE onconf */ +{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy194,0,0,0,0, + SQLITE_IDXTYPE_UNIQUE);} + break; + case 38: /* ccons ::= CHECK LP expr RP */ +{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy190.pExpr);} + break; + case 39: /* ccons ::= REFERENCES nm eidlist_opt refargs */ +{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy148,yymsp[0].minor.yy194);} + break; + case 40: /* ccons ::= defer_subclause */ +{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy194);} + break; + case 41: /* ccons ::= COLLATE ID|STRING */ +{sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} + break; + case 44: /* refargs ::= */ +{ yymsp[1].minor.yy194 = OE_None*0x0101; /* EV: R-19803-45884 */} + break; + case 45: /* refargs ::= refargs refarg */ +{ yymsp[-1].minor.yy194 = (yymsp[-1].minor.yy194 & ~yymsp[0].minor.yy497.mask) | yymsp[0].minor.yy497.value; } + break; + case 46: /* refarg ::= MATCH nm */ +{ yymsp[-1].minor.yy497.value = 0; yymsp[-1].minor.yy497.mask = 0x000000; } + break; + case 47: /* refarg ::= ON INSERT refact */ +{ yymsp[-2].minor.yy497.value = 0; yymsp[-2].minor.yy497.mask = 0x000000; } + break; + case 48: /* refarg ::= ON DELETE refact */ +{ yymsp[-2].minor.yy497.value = yymsp[0].minor.yy194; yymsp[-2].minor.yy497.mask = 0x0000ff; } + break; + case 49: /* refarg ::= ON UPDATE refact */ +{ yymsp[-2].minor.yy497.value = yymsp[0].minor.yy194<<8; yymsp[-2].minor.yy497.mask = 0x00ff00; } + break; + case 50: /* refact ::= SET NULL */ +{ yymsp[-1].minor.yy194 = OE_SetNull; /* EV: R-33326-45252 */} + break; + case 51: /* refact ::= SET DEFAULT */ +{ yymsp[-1].minor.yy194 = OE_SetDflt; /* EV: R-33326-45252 */} + break; + case 52: /* refact ::= CASCADE */ +{ yymsp[0].minor.yy194 = OE_Cascade; /* EV: R-33326-45252 */} + break; + case 53: /* refact ::= RESTRICT */ +{ yymsp[0].minor.yy194 = OE_Restrict; /* EV: R-33326-45252 */} + break; + case 54: /* refact ::= NO ACTION */ +{ yymsp[-1].minor.yy194 = OE_None; /* EV: R-33326-45252 */} + break; + case 55: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ +{yymsp[-2].minor.yy194 = 0;} + break; + case 56: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ + case 71: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==71); + case 144: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==144); +{yymsp[-1].minor.yy194 = yymsp[0].minor.yy194;} + break; + case 58: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ + case 75: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==75); + case 187: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==187); + case 190: /* in_op ::= NOT IN */ yytestcase(yyruleno==190); + case 216: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==216); +{yymsp[-1].minor.yy194 = 1;} + break; + case 59: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ +{yymsp[-1].minor.yy194 = 0;} + break; + case 61: /* tconscomma ::= COMMA */ +{pParse->constraintName.n = 0;} + break; + case 63: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ +{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy148,yymsp[0].minor.yy194,yymsp[-2].minor.yy194,0);} + break; + case 64: /* tcons ::= UNIQUE LP sortlist RP onconf */ +{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy148,yymsp[0].minor.yy194,0,0,0,0, + SQLITE_IDXTYPE_UNIQUE);} + break; + case 65: /* tcons ::= CHECK LP expr RP onconf */ +{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy190.pExpr);} + break; + case 66: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ +{ + sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy148, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy148, yymsp[-1].minor.yy194); + sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy194); +} + break; + case 68: /* onconf ::= */ + case 70: /* orconf ::= */ yytestcase(yyruleno==70); +{yymsp[1].minor.yy194 = OE_Default;} + break; + case 69: /* onconf ::= ON CONFLICT resolvetype */ +{yymsp[-2].minor.yy194 = yymsp[0].minor.yy194;} + break; + case 72: /* resolvetype ::= IGNORE */ +{yymsp[0].minor.yy194 = OE_Ignore;} + break; + case 73: /* resolvetype ::= REPLACE */ + case 145: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==145); +{yymsp[0].minor.yy194 = OE_Replace;} + break; + case 74: /* cmd ::= DROP TABLE ifexists fullname */ +{ + sqlite3DropTable(pParse, yymsp[0].minor.yy185, 0, yymsp[-1].minor.yy194); +} + break; + case 77: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ +{ + sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy148, yymsp[0].minor.yy243, yymsp[-7].minor.yy194, yymsp[-5].minor.yy194); +} + break; + case 78: /* cmd ::= DROP VIEW ifexists fullname */ +{ + sqlite3DropTable(pParse, yymsp[0].minor.yy185, 1, yymsp[-1].minor.yy194); +} + break; + case 79: /* cmd ::= select */ +{ + SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0}; + sqlite3Select(pParse, yymsp[0].minor.yy243, &dest); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy243); +} + break; + case 80: /* select ::= with selectnowith */ +{ + Select *p = yymsp[0].minor.yy243; + if( p ){ + p->pWith = yymsp[-1].minor.yy285; + parserDoubleLinkSelect(pParse, p); + }else{ + sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy285); + } + yymsp[-1].minor.yy243 = p; /*A-overwrites-W*/ +} + break; + case 81: /* selectnowith ::= selectnowith multiselect_op oneselect */ +{ + Select *pRhs = yymsp[0].minor.yy243; + Select *pLhs = yymsp[-2].minor.yy243; + if( pRhs && pRhs->pPrior ){ + SrcList *pFrom; + Token x; + x.n = 0; + parserDoubleLinkSelect(pParse, pRhs); + pFrom = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&x,pRhs,0,0); + pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0,0); + } + if( pRhs ){ + pRhs->op = (u8)yymsp[-1].minor.yy194; + pRhs->pPrior = pLhs; + if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; + pRhs->selFlags &= ~SF_MultiValue; + if( yymsp[-1].minor.yy194!=TK_ALL ) pParse->hasCompound = 1; + }else{ + sqlite3SelectDelete(pParse->db, pLhs); + } + yymsp[-2].minor.yy243 = pRhs; +} + break; + case 82: /* multiselect_op ::= UNION */ + case 84: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==84); +{yymsp[0].minor.yy194 = yymsp[0].major; /*A-overwrites-OP*/} + break; + case 83: /* multiselect_op ::= UNION ALL */ +{yymsp[-1].minor.yy194 = TK_ALL;} + break; + case 85: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ +{ +#if SELECTTRACE_ENABLED + Token s = yymsp[-8].minor.yy0; /*A-overwrites-S*/ +#endif + yymsp[-8].minor.yy243 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy148,yymsp[-5].minor.yy185,yymsp[-4].minor.yy72,yymsp[-3].minor.yy148,yymsp[-2].minor.yy72,yymsp[-1].minor.yy148,yymsp[-7].minor.yy194,yymsp[0].minor.yy354.pLimit,yymsp[0].minor.yy354.pOffset); +#if SELECTTRACE_ENABLED + /* Populate the Select.zSelName[] string that is used to help with + ** query planner debugging, to differentiate between multiple Select + ** objects in a complex query. + ** + ** If the SELECT keyword is immediately followed by a C-style comment + ** then extract the first few alphanumeric characters from within that + ** comment to be the zSelName value. Otherwise, the label is #N where + ** is an integer that is incremented with each SELECT statement seen. + */ + if( yymsp[-8].minor.yy243!=0 ){ + const char *z = s.z+6; + int i; + sqlite3_snprintf(sizeof(yymsp[-8].minor.yy243->zSelName), yymsp[-8].minor.yy243->zSelName, "#%d", + ++pParse->nSelect); + while( z[0]==' ' ) z++; + if( z[0]=='/' && z[1]=='*' ){ + z += 2; + while( z[0]==' ' ) z++; + for(i=0; sqlite3Isalnum(z[i]); i++){} + sqlite3_snprintf(sizeof(yymsp[-8].minor.yy243->zSelName), yymsp[-8].minor.yy243->zSelName, "%.*s", i, z); + } + } +#endif /* SELECTRACE_ENABLED */ +} + break; + case 86: /* values ::= VALUES LP nexprlist RP */ +{ + yymsp[-3].minor.yy243 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy148,0,0,0,0,0,SF_Values,0,0); +} + break; + case 87: /* values ::= values COMMA LP exprlist RP */ +{ + Select *pRight, *pLeft = yymsp[-4].minor.yy243; + pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy148,0,0,0,0,0,SF_Values|SF_MultiValue,0,0); + if( ALWAYS(pLeft) ) pLeft->selFlags &= ~SF_MultiValue; + if( pRight ){ + pRight->op = TK_ALL; + pRight->pPrior = pLeft; + yymsp[-4].minor.yy243 = pRight; + }else{ + yymsp[-4].minor.yy243 = pLeft; + } +} + break; + case 88: /* distinct ::= DISTINCT */ +{yymsp[0].minor.yy194 = SF_Distinct;} + break; + case 89: /* distinct ::= ALL */ +{yymsp[0].minor.yy194 = SF_All;} + break; + case 91: /* sclp ::= */ + case 119: /* orderby_opt ::= */ yytestcase(yyruleno==119); + case 126: /* groupby_opt ::= */ yytestcase(yyruleno==126); + case 203: /* exprlist ::= */ yytestcase(yyruleno==203); + case 206: /* paren_exprlist ::= */ yytestcase(yyruleno==206); + case 211: /* eidlist_opt ::= */ yytestcase(yyruleno==211); +{yymsp[1].minor.yy148 = 0;} + break; + case 92: /* selcollist ::= sclp expr as */ +{ + yymsp[-2].minor.yy148 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy148, yymsp[-1].minor.yy190.pExpr); + if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-2].minor.yy148, &yymsp[0].minor.yy0, 1); + sqlite3ExprListSetSpan(pParse,yymsp[-2].minor.yy148,&yymsp[-1].minor.yy190); +} + break; + case 93: /* selcollist ::= sclp STAR */ +{ + Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); + yymsp[-1].minor.yy148 = sqlite3ExprListAppend(pParse, yymsp[-1].minor.yy148, p); +} + break; + case 94: /* selcollist ::= sclp nm DOT STAR */ +{ + Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0); + Expr *pLeft = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); + Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); + yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy148, pDot); +} + break; + case 95: /* as ::= AS nm */ + case 106: /* dbnm ::= DOT nm */ yytestcase(yyruleno==106); + case 225: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==225); + case 226: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==226); +{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;} + break; + case 97: /* from ::= */ +{yymsp[1].minor.yy185 = sqlite3DbMallocZero(pParse->db, sizeof(*yymsp[1].minor.yy185));} + break; + case 98: /* from ::= FROM seltablist */ +{ + yymsp[-1].minor.yy185 = yymsp[0].minor.yy185; + sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy185); +} + break; + case 99: /* stl_prefix ::= seltablist joinop */ +{ + if( ALWAYS(yymsp[-1].minor.yy185 && yymsp[-1].minor.yy185->nSrc>0) ) yymsp[-1].minor.yy185->a[yymsp[-1].minor.yy185->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy194; +} + break; + case 100: /* stl_prefix ::= */ +{yymsp[1].minor.yy185 = 0;} + break; + case 101: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ +{ + yymsp[-6].minor.yy185 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy185,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy72,yymsp[0].minor.yy254); + sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy185, &yymsp[-2].minor.yy0); +} + break; + case 102: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ +{ + yymsp[-8].minor.yy185 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy185,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy72,yymsp[0].minor.yy254); + sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy185, yymsp[-4].minor.yy148); +} + break; + case 103: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */ +{ + yymsp[-6].minor.yy185 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy185,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy243,yymsp[-1].minor.yy72,yymsp[0].minor.yy254); + } + break; + case 104: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ +{ + if( yymsp[-6].minor.yy185==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy72==0 && yymsp[0].minor.yy254==0 ){ + yymsp[-6].minor.yy185 = yymsp[-4].minor.yy185; + }else if( yymsp[-4].minor.yy185->nSrc==1 ){ + yymsp[-6].minor.yy185 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy185,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy72,yymsp[0].minor.yy254); + if( yymsp[-6].minor.yy185 ){ + struct SrcList_item *pNew = &yymsp[-6].minor.yy185->a[yymsp[-6].minor.yy185->nSrc-1]; + struct SrcList_item *pOld = yymsp[-4].minor.yy185->a; + pNew->zName = pOld->zName; + pNew->zDatabase = pOld->zDatabase; + pNew->pSelect = pOld->pSelect; + pOld->zName = pOld->zDatabase = 0; + pOld->pSelect = 0; + } + sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy185); + }else{ + Select *pSubquery; + sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy185); + pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy185,0,0,0,0,SF_NestedFrom,0,0); + yymsp[-6].minor.yy185 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy185,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy72,yymsp[0].minor.yy254); + } + } + break; + case 105: /* dbnm ::= */ + case 114: /* indexed_opt ::= */ yytestcase(yyruleno==114); +{yymsp[1].minor.yy0.z=0; yymsp[1].minor.yy0.n=0;} + break; + case 107: /* fullname ::= nm dbnm */ +{yymsp[-1].minor.yy185 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} + break; + case 108: /* joinop ::= COMMA|JOIN */ +{ yymsp[0].minor.yy194 = JT_INNER; } + break; + case 109: /* joinop ::= JOIN_KW JOIN */ +{yymsp[-1].minor.yy194 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} + break; + case 110: /* joinop ::= JOIN_KW nm JOIN */ +{yymsp[-2].minor.yy194 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} + break; + case 111: /* joinop ::= JOIN_KW nm nm JOIN */ +{yymsp[-3].minor.yy194 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} + break; + case 112: /* on_opt ::= ON expr */ + case 129: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==129); + case 136: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==136); + case 199: /* case_else ::= ELSE expr */ yytestcase(yyruleno==199); +{yymsp[-1].minor.yy72 = yymsp[0].minor.yy190.pExpr;} + break; + case 113: /* on_opt ::= */ + case 128: /* having_opt ::= */ yytestcase(yyruleno==128); + case 135: /* where_opt ::= */ yytestcase(yyruleno==135); + case 200: /* case_else ::= */ yytestcase(yyruleno==200); + case 202: /* case_operand ::= */ yytestcase(yyruleno==202); +{yymsp[1].minor.yy72 = 0;} + break; + case 115: /* indexed_opt ::= INDEXED BY nm */ +{yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;} + break; + case 116: /* indexed_opt ::= NOT INDEXED */ +{yymsp[-1].minor.yy0.z=0; yymsp[-1].minor.yy0.n=1;} + break; + case 117: /* using_opt ::= USING LP idlist RP */ +{yymsp[-3].minor.yy254 = yymsp[-1].minor.yy254;} + break; + case 118: /* using_opt ::= */ + case 146: /* idlist_opt ::= */ yytestcase(yyruleno==146); +{yymsp[1].minor.yy254 = 0;} + break; + case 120: /* orderby_opt ::= ORDER BY sortlist */ + case 127: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==127); +{yymsp[-2].minor.yy148 = yymsp[0].minor.yy148;} + break; + case 121: /* sortlist ::= sortlist COMMA expr sortorder */ +{ + yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy148,yymsp[-1].minor.yy190.pExpr); + sqlite3ExprListSetSortOrder(yymsp[-3].minor.yy148,yymsp[0].minor.yy194); +} + break; + case 122: /* sortlist ::= expr sortorder */ +{ + yymsp[-1].minor.yy148 = sqlite3ExprListAppend(pParse,0,yymsp[-1].minor.yy190.pExpr); /*A-overwrites-Y*/ + sqlite3ExprListSetSortOrder(yymsp[-1].minor.yy148,yymsp[0].minor.yy194); +} + break; + case 123: /* sortorder ::= ASC */ +{yymsp[0].minor.yy194 = SQLITE_SO_ASC;} + break; + case 124: /* sortorder ::= DESC */ +{yymsp[0].minor.yy194 = SQLITE_SO_DESC;} + break; + case 125: /* sortorder ::= */ +{yymsp[1].minor.yy194 = SQLITE_SO_UNDEFINED;} + break; + case 130: /* limit_opt ::= */ +{yymsp[1].minor.yy354.pLimit = 0; yymsp[1].minor.yy354.pOffset = 0;} + break; + case 131: /* limit_opt ::= LIMIT expr */ +{yymsp[-1].minor.yy354.pLimit = yymsp[0].minor.yy190.pExpr; yymsp[-1].minor.yy354.pOffset = 0;} + break; + case 132: /* limit_opt ::= LIMIT expr OFFSET expr */ +{yymsp[-3].minor.yy354.pLimit = yymsp[-2].minor.yy190.pExpr; yymsp[-3].minor.yy354.pOffset = yymsp[0].minor.yy190.pExpr;} + break; + case 133: /* limit_opt ::= LIMIT expr COMMA expr */ +{yymsp[-3].minor.yy354.pOffset = yymsp[-2].minor.yy190.pExpr; yymsp[-3].minor.yy354.pLimit = yymsp[0].minor.yy190.pExpr;} + break; + case 134: /* cmd ::= with DELETE FROM fullname indexed_opt where_opt */ +{ + sqlite3WithPush(pParse, yymsp[-5].minor.yy285, 1); + sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy185, &yymsp[-1].minor.yy0); + sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy185,yymsp[0].minor.yy72); +} + break; + case 137: /* cmd ::= with UPDATE orconf fullname indexed_opt SET setlist where_opt */ +{ + sqlite3WithPush(pParse, yymsp[-7].minor.yy285, 1); + sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy185, &yymsp[-3].minor.yy0); + sqlite3ExprListCheckLength(pParse,yymsp[-1].minor.yy148,"set list"); + sqlite3Update(pParse,yymsp[-4].minor.yy185,yymsp[-1].minor.yy148,yymsp[0].minor.yy72,yymsp[-5].minor.yy194); +} + break; + case 138: /* setlist ::= setlist COMMA nm EQ expr */ +{ + yymsp[-4].minor.yy148 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy148, yymsp[0].minor.yy190.pExpr); + sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy148, &yymsp[-2].minor.yy0, 1); +} + break; + case 139: /* setlist ::= setlist COMMA LP idlist RP EQ expr */ +{ + yymsp[-6].minor.yy148 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy148, yymsp[-3].minor.yy254, yymsp[0].minor.yy190.pExpr); +} + break; + case 140: /* setlist ::= nm EQ expr */ +{ + yylhsminor.yy148 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy190.pExpr); + sqlite3ExprListSetName(pParse, yylhsminor.yy148, &yymsp[-2].minor.yy0, 1); +} + yymsp[-2].minor.yy148 = yylhsminor.yy148; + break; + case 141: /* setlist ::= LP idlist RP EQ expr */ +{ + yymsp[-4].minor.yy148 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy254, yymsp[0].minor.yy190.pExpr); +} + break; + case 142: /* cmd ::= with insert_cmd INTO fullname idlist_opt select */ +{ + sqlite3WithPush(pParse, yymsp[-5].minor.yy285, 1); + sqlite3Insert(pParse, yymsp[-2].minor.yy185, yymsp[0].minor.yy243, yymsp[-1].minor.yy254, yymsp[-4].minor.yy194); +} + break; + case 143: /* cmd ::= with insert_cmd INTO fullname idlist_opt DEFAULT VALUES */ +{ + sqlite3WithPush(pParse, yymsp[-6].minor.yy285, 1); + sqlite3Insert(pParse, yymsp[-3].minor.yy185, 0, yymsp[-2].minor.yy254, yymsp[-5].minor.yy194); +} + break; + case 147: /* idlist_opt ::= LP idlist RP */ +{yymsp[-2].minor.yy254 = yymsp[-1].minor.yy254;} + break; + case 148: /* idlist ::= idlist COMMA nm */ +{yymsp[-2].minor.yy254 = sqlite3IdListAppend(pParse->db,yymsp[-2].minor.yy254,&yymsp[0].minor.yy0);} + break; + case 149: /* idlist ::= nm */ +{yymsp[0].minor.yy254 = sqlite3IdListAppend(pParse->db,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} + break; + case 150: /* expr ::= LP expr RP */ +{spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/ yymsp[-2].minor.yy190.pExpr = yymsp[-1].minor.yy190.pExpr;} + break; + case 151: /* term ::= NULL */ + case 156: /* term ::= FLOAT|BLOB */ yytestcase(yyruleno==156); + case 157: /* term ::= STRING */ yytestcase(yyruleno==157); +{spanExpr(&yymsp[0].minor.yy190,pParse,yymsp[0].major,yymsp[0].minor.yy0);/*A-overwrites-X*/} + break; + case 152: /* expr ::= ID|INDEXED */ + case 153: /* expr ::= JOIN_KW */ yytestcase(yyruleno==153); +{spanExpr(&yymsp[0].minor.yy190,pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} + break; + case 154: /* expr ::= nm DOT nm */ +{ + Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); + Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1); + spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ + yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2); +} + break; + case 155: /* expr ::= nm DOT nm DOT nm */ +{ + Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-4].minor.yy0, 1); + Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1); + Expr *temp3 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1); + Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3); + spanSet(&yymsp[-4].minor.yy190,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4); +} + break; + case 158: /* term ::= INTEGER */ +{ + yylhsminor.yy190.pExpr = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); + yylhsminor.yy190.zStart = yymsp[0].minor.yy0.z; + yylhsminor.yy190.zEnd = yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n; + if( yylhsminor.yy190.pExpr ) yylhsminor.yy190.pExpr->flags |= EP_Leaf; +} + yymsp[0].minor.yy190 = yylhsminor.yy190; + break; + case 159: /* expr ::= VARIABLE */ +{ + if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ + u32 n = yymsp[0].minor.yy0.n; + spanExpr(&yymsp[0].minor.yy190, pParse, TK_VARIABLE, yymsp[0].minor.yy0); + sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy190.pExpr, n); + }else{ + /* When doing a nested parse, one can include terms in an expression + ** that look like this: #1 #2 ... These terms refer to registers + ** in the virtual machine. #N is the N-th register. */ + Token t = yymsp[0].minor.yy0; /*A-overwrites-X*/ + assert( t.n>=2 ); + spanSet(&yymsp[0].minor.yy190, &t, &t); + if( pParse->nested==0 ){ + sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); + yymsp[0].minor.yy190.pExpr = 0; + }else{ + yymsp[0].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0); + if( yymsp[0].minor.yy190.pExpr ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy190.pExpr->iTable); + } + } +} + break; + case 160: /* expr ::= expr COLLATE ID|STRING */ +{ + yymsp[-2].minor.yy190.pExpr = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy190.pExpr, &yymsp[0].minor.yy0, 1); + yymsp[-2].minor.yy190.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; +} + break; + case 161: /* expr ::= CAST LP expr AS typetoken RP */ +{ + spanSet(&yymsp[-5].minor.yy190,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ + yymsp[-5].minor.yy190.pExpr = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); + sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy190.pExpr, yymsp[-3].minor.yy190.pExpr, 0); +} + break; + case 162: /* expr ::= ID|INDEXED LP distinct exprlist RP */ +{ + if( yymsp[-1].minor.yy148 && yymsp[-1].minor.yy148->nExpr>pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){ + sqlite3ErrorMsg(pParse, "too many arguments on function %T", &yymsp[-4].minor.yy0); + } + yylhsminor.yy190.pExpr = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy148, &yymsp[-4].minor.yy0); + spanSet(&yylhsminor.yy190,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); + if( yymsp[-2].minor.yy194==SF_Distinct && yylhsminor.yy190.pExpr ){ + yylhsminor.yy190.pExpr->flags |= EP_Distinct; + } +} + yymsp[-4].minor.yy190 = yylhsminor.yy190; + break; + case 163: /* expr ::= ID|INDEXED LP STAR RP */ +{ + yylhsminor.yy190.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0); + spanSet(&yylhsminor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); +} + yymsp[-3].minor.yy190 = yylhsminor.yy190; + break; + case 164: /* term ::= CTIME_KW */ +{ + yylhsminor.yy190.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0); + spanSet(&yylhsminor.yy190, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0); +} + yymsp[0].minor.yy190 = yylhsminor.yy190; + break; + case 165: /* expr ::= LP nexprlist COMMA expr RP */ +{ + ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy148, yymsp[-1].minor.yy190.pExpr); + yylhsminor.yy190.pExpr = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); + if( yylhsminor.yy190.pExpr ){ + yylhsminor.yy190.pExpr->x.pList = pList; + spanSet(&yylhsminor.yy190, &yymsp[-4].minor.yy0, &yymsp[0].minor.yy0); + }else{ + sqlite3ExprListDelete(pParse->db, pList); + } +} + yymsp[-4].minor.yy190 = yylhsminor.yy190; + break; + case 166: /* expr ::= expr AND expr */ + case 167: /* expr ::= expr OR expr */ yytestcase(yyruleno==167); + case 168: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==168); + case 169: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==169); + case 170: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==170); + case 171: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==171); + case 172: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==172); + case 173: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==173); +{spanBinaryExpr(pParse,yymsp[-1].major,&yymsp[-2].minor.yy190,&yymsp[0].minor.yy190);} + break; + case 174: /* likeop ::= LIKE_KW|MATCH */ +{yymsp[0].minor.yy0=yymsp[0].minor.yy0;/*A-overwrites-X*/} + break; + case 175: /* likeop ::= NOT LIKE_KW|MATCH */ +{yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} + break; + case 176: /* expr ::= expr likeop expr */ +{ + ExprList *pList; + int bNot = yymsp[-1].minor.yy0.n & 0x80000000; + yymsp[-1].minor.yy0.n &= 0x7fffffff; + pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy190.pExpr); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy190.pExpr); + yymsp[-2].minor.yy190.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0); + exprNot(pParse, bNot, &yymsp[-2].minor.yy190); + yymsp[-2].minor.yy190.zEnd = yymsp[0].minor.yy190.zEnd; + if( yymsp[-2].minor.yy190.pExpr ) yymsp[-2].minor.yy190.pExpr->flags |= EP_InfixFunc; +} + break; + case 177: /* expr ::= expr likeop expr ESCAPE expr */ +{ + ExprList *pList; + int bNot = yymsp[-3].minor.yy0.n & 0x80000000; + yymsp[-3].minor.yy0.n &= 0x7fffffff; + pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy190.pExpr); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy190.pExpr); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy190.pExpr); + yymsp[-4].minor.yy190.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0); + exprNot(pParse, bNot, &yymsp[-4].minor.yy190); + yymsp[-4].minor.yy190.zEnd = yymsp[0].minor.yy190.zEnd; + if( yymsp[-4].minor.yy190.pExpr ) yymsp[-4].minor.yy190.pExpr->flags |= EP_InfixFunc; +} + break; + case 178: /* expr ::= expr ISNULL|NOTNULL */ +{spanUnaryPostfix(pParse,yymsp[0].major,&yymsp[-1].minor.yy190,&yymsp[0].minor.yy0);} + break; + case 179: /* expr ::= expr NOT NULL */ +{spanUnaryPostfix(pParse,TK_NOTNULL,&yymsp[-2].minor.yy190,&yymsp[0].minor.yy0);} + break; + case 180: /* expr ::= expr IS expr */ +{ + spanBinaryExpr(pParse,TK_IS,&yymsp[-2].minor.yy190,&yymsp[0].minor.yy190); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy190.pExpr, yymsp[-2].minor.yy190.pExpr, TK_ISNULL); +} + break; + case 181: /* expr ::= expr IS NOT expr */ +{ + spanBinaryExpr(pParse,TK_ISNOT,&yymsp[-3].minor.yy190,&yymsp[0].minor.yy190); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy190.pExpr, yymsp[-3].minor.yy190.pExpr, TK_NOTNULL); +} + break; + case 182: /* expr ::= NOT expr */ + case 183: /* expr ::= BITNOT expr */ yytestcase(yyruleno==183); +{spanUnaryPrefix(&yymsp[-1].minor.yy190,pParse,yymsp[-1].major,&yymsp[0].minor.yy190,&yymsp[-1].minor.yy0);/*A-overwrites-B*/} + break; + case 184: /* expr ::= MINUS expr */ +{spanUnaryPrefix(&yymsp[-1].minor.yy190,pParse,TK_UMINUS,&yymsp[0].minor.yy190,&yymsp[-1].minor.yy0);/*A-overwrites-B*/} + break; + case 185: /* expr ::= PLUS expr */ +{spanUnaryPrefix(&yymsp[-1].minor.yy190,pParse,TK_UPLUS,&yymsp[0].minor.yy190,&yymsp[-1].minor.yy0);/*A-overwrites-B*/} + break; + case 186: /* between_op ::= BETWEEN */ + case 189: /* in_op ::= IN */ yytestcase(yyruleno==189); +{yymsp[0].minor.yy194 = 0;} + break; + case 188: /* expr ::= expr between_op expr AND expr */ +{ + ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy190.pExpr); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy190.pExpr); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy190.pExpr, 0); + if( yymsp[-4].minor.yy190.pExpr ){ + yymsp[-4].minor.yy190.pExpr->x.pList = pList; + }else{ + sqlite3ExprListDelete(pParse->db, pList); + } + exprNot(pParse, yymsp[-3].minor.yy194, &yymsp[-4].minor.yy190); + yymsp[-4].minor.yy190.zEnd = yymsp[0].minor.yy190.zEnd; +} + break; + case 191: /* expr ::= expr in_op LP exprlist RP */ +{ + if( yymsp[-1].minor.yy148==0 ){ + /* Expressions of the form + ** + ** expr1 IN () + ** expr1 NOT IN () + ** + ** simplify to constants 0 (false) and 1 (true), respectively, + ** regardless of the value of expr1. + */ + sqlite3ExprDelete(pParse->db, yymsp[-4].minor.yy190.pExpr); + yymsp[-4].minor.yy190.pExpr = sqlite3ExprAlloc(pParse->db, TK_INTEGER,&sqlite3IntTokens[yymsp[-3].minor.yy194],1); + }else if( yymsp[-1].minor.yy148->nExpr==1 ){ + /* Expressions of the form: + ** + ** expr1 IN (?1) + ** expr1 NOT IN (?2) + ** + ** with exactly one value on the RHS can be simplified to something + ** like this: + ** + ** expr1 == ?1 + ** expr1 <> ?2 + ** + ** But, the RHS of the == or <> is marked with the EP_Generic flag + ** so that it may not contribute to the computation of comparison + ** affinity or the collating sequence to use for comparison. Otherwise, + ** the semantics would be subtly different from IN or NOT IN. + */ + Expr *pRHS = yymsp[-1].minor.yy148->a[0].pExpr; + yymsp[-1].minor.yy148->a[0].pExpr = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy148); + /* pRHS cannot be NULL because a malloc error would have been detected + ** before now and control would have never reached this point */ + if( ALWAYS(pRHS) ){ + pRHS->flags &= ~EP_Collate; + pRHS->flags |= EP_Generic; + } + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, yymsp[-3].minor.yy194 ? TK_NE : TK_EQ, yymsp[-4].minor.yy190.pExpr, pRHS); + }else{ + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0); + if( yymsp[-4].minor.yy190.pExpr ){ + yymsp[-4].minor.yy190.pExpr->x.pList = yymsp[-1].minor.yy148; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy190.pExpr); + }else{ + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy148); + } + exprNot(pParse, yymsp[-3].minor.yy194, &yymsp[-4].minor.yy190); + } + yymsp[-4].minor.yy190.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; + } + break; + case 192: /* expr ::= LP select RP */ +{ + spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/ + yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy190.pExpr, yymsp[-1].minor.yy243); + } + break; + case 193: /* expr ::= expr in_op LP select RP */ +{ + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy190.pExpr, yymsp[-1].minor.yy243); + exprNot(pParse, yymsp[-3].minor.yy194, &yymsp[-4].minor.yy190); + yymsp[-4].minor.yy190.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; + } + break; + case 194: /* expr ::= expr in_op nm dbnm paren_exprlist */ +{ + SrcList *pSrc = sqlite3SrcListAppend(pParse->db, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); + Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0,0); + if( yymsp[0].minor.yy148 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy148); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy190.pExpr, pSelect); + exprNot(pParse, yymsp[-3].minor.yy194, &yymsp[-4].minor.yy190); + yymsp[-4].minor.yy190.zEnd = yymsp[-1].minor.yy0.z ? &yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n] : &yymsp[-2].minor.yy0.z[yymsp[-2].minor.yy0.n]; + } + break; + case 195: /* expr ::= EXISTS LP select RP */ +{ + Expr *p; + spanSet(&yymsp[-3].minor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/ + p = yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); + sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy243); + } + break; + case 196: /* expr ::= CASE case_operand case_exprlist case_else END */ +{ + spanSet(&yymsp[-4].minor.yy190,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-C*/ + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy72, 0); + if( yymsp[-4].minor.yy190.pExpr ){ + yymsp[-4].minor.yy190.pExpr->x.pList = yymsp[-1].minor.yy72 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy148,yymsp[-1].minor.yy72) : yymsp[-2].minor.yy148; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy190.pExpr); + }else{ + sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy148); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy72); + } +} + break; + case 197: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ +{ + yymsp[-4].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy148, yymsp[-2].minor.yy190.pExpr); + yymsp[-4].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy148, yymsp[0].minor.yy190.pExpr); +} + break; + case 198: /* case_exprlist ::= WHEN expr THEN expr */ +{ + yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy190.pExpr); + yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy148, yymsp[0].minor.yy190.pExpr); +} + break; + case 201: /* case_operand ::= expr */ +{yymsp[0].minor.yy72 = yymsp[0].minor.yy190.pExpr; /*A-overwrites-X*/} + break; + case 204: /* nexprlist ::= nexprlist COMMA expr */ +{yymsp[-2].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy148,yymsp[0].minor.yy190.pExpr);} + break; + case 205: /* nexprlist ::= expr */ +{yymsp[0].minor.yy148 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy190.pExpr); /*A-overwrites-Y*/} + break; + case 207: /* paren_exprlist ::= LP exprlist RP */ + case 212: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==212); +{yymsp[-2].minor.yy148 = yymsp[-1].minor.yy148;} + break; + case 208: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ +{ + sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, + sqlite3SrcListAppend(pParse->db,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy148, yymsp[-10].minor.yy194, + &yymsp[-11].minor.yy0, yymsp[0].minor.yy72, SQLITE_SO_ASC, yymsp[-8].minor.yy194, SQLITE_IDXTYPE_APPDEF); +} + break; + case 209: /* uniqueflag ::= UNIQUE */ + case 250: /* raisetype ::= ABORT */ yytestcase(yyruleno==250); +{yymsp[0].minor.yy194 = OE_Abort;} + break; + case 210: /* uniqueflag ::= */ +{yymsp[1].minor.yy194 = OE_None;} + break; + case 213: /* eidlist ::= eidlist COMMA nm collate sortorder */ +{ + yymsp[-4].minor.yy148 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy148, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy194, yymsp[0].minor.yy194); +} + break; + case 214: /* eidlist ::= nm collate sortorder */ +{ + yymsp[-2].minor.yy148 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy194, yymsp[0].minor.yy194); /*A-overwrites-Y*/ +} + break; + case 217: /* cmd ::= DROP INDEX ifexists fullname */ +{sqlite3DropIndex(pParse, yymsp[0].minor.yy185, yymsp[-1].minor.yy194);} + break; + case 218: /* cmd ::= VACUUM */ +{sqlite3Vacuum(pParse,0);} + break; + case 219: /* cmd ::= VACUUM nm */ +{sqlite3Vacuum(pParse,&yymsp[0].minor.yy0);} + break; + case 220: /* cmd ::= PRAGMA nm dbnm */ +{sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} + break; + case 221: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ +{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);} + break; + case 222: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ +{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);} + break; + case 223: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ +{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);} + break; + case 224: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ +{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);} + break; + case 227: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ +{ + Token all; + all.z = yymsp[-3].minor.yy0.z; + all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; + sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy145, &all); +} + break; + case 228: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ +{ + sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy194, yymsp[-4].minor.yy332.a, yymsp[-4].minor.yy332.b, yymsp[-2].minor.yy185, yymsp[0].minor.yy72, yymsp[-10].minor.yy194, yymsp[-8].minor.yy194); + yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ +} + break; + case 229: /* trigger_time ::= BEFORE */ +{ yymsp[0].minor.yy194 = TK_BEFORE; } + break; + case 230: /* trigger_time ::= AFTER */ +{ yymsp[0].minor.yy194 = TK_AFTER; } + break; + case 231: /* trigger_time ::= INSTEAD OF */ +{ yymsp[-1].minor.yy194 = TK_INSTEAD;} + break; + case 232: /* trigger_time ::= */ +{ yymsp[1].minor.yy194 = TK_BEFORE; } + break; + case 233: /* trigger_event ::= DELETE|INSERT */ + case 234: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==234); +{yymsp[0].minor.yy332.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy332.b = 0;} + break; + case 235: /* trigger_event ::= UPDATE OF idlist */ +{yymsp[-2].minor.yy332.a = TK_UPDATE; yymsp[-2].minor.yy332.b = yymsp[0].minor.yy254;} + break; + case 236: /* when_clause ::= */ + case 255: /* key_opt ::= */ yytestcase(yyruleno==255); +{ yymsp[1].minor.yy72 = 0; } + break; + case 237: /* when_clause ::= WHEN expr */ + case 256: /* key_opt ::= KEY expr */ yytestcase(yyruleno==256); +{ yymsp[-1].minor.yy72 = yymsp[0].minor.yy190.pExpr; } + break; + case 238: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ +{ + assert( yymsp[-2].minor.yy145!=0 ); + yymsp[-2].minor.yy145->pLast->pNext = yymsp[-1].minor.yy145; + yymsp[-2].minor.yy145->pLast = yymsp[-1].minor.yy145; +} + break; + case 239: /* trigger_cmd_list ::= trigger_cmd SEMI */ +{ + assert( yymsp[-1].minor.yy145!=0 ); + yymsp[-1].minor.yy145->pLast = yymsp[-1].minor.yy145; +} + break; + case 240: /* trnm ::= nm DOT nm */ +{ + yymsp[-2].minor.yy0 = yymsp[0].minor.yy0; + sqlite3ErrorMsg(pParse, + "qualified table names are not allowed on INSERT, UPDATE, and DELETE " + "statements within triggers"); +} + break; + case 241: /* tridxby ::= INDEXED BY nm */ +{ + sqlite3ErrorMsg(pParse, + "the INDEXED BY clause is not allowed on UPDATE or DELETE statements " + "within triggers"); +} + break; + case 242: /* tridxby ::= NOT INDEXED */ +{ + sqlite3ErrorMsg(pParse, + "the NOT INDEXED clause is not allowed on UPDATE or DELETE statements " + "within triggers"); +} + break; + case 243: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt */ +{yymsp[-6].minor.yy145 = sqlite3TriggerUpdateStep(pParse->db, &yymsp[-4].minor.yy0, yymsp[-1].minor.yy148, yymsp[0].minor.yy72, yymsp[-5].minor.yy194);} + break; + case 244: /* trigger_cmd ::= insert_cmd INTO trnm idlist_opt select */ +{yymsp[-4].minor.yy145 = sqlite3TriggerInsertStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy254, yymsp[0].minor.yy243, yymsp[-4].minor.yy194);/*A-overwrites-R*/} + break; + case 245: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt */ +{yymsp[-4].minor.yy145 = sqlite3TriggerDeleteStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[0].minor.yy72);} + break; + case 246: /* trigger_cmd ::= select */ +{yymsp[0].minor.yy145 = sqlite3TriggerSelectStep(pParse->db, yymsp[0].minor.yy243); /*A-overwrites-X*/} + break; + case 247: /* expr ::= RAISE LP IGNORE RP */ +{ + spanSet(&yymsp[-3].minor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ + yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0); + if( yymsp[-3].minor.yy190.pExpr ){ + yymsp[-3].minor.yy190.pExpr->affinity = OE_Ignore; + } +} + break; + case 248: /* expr ::= RAISE LP raisetype COMMA nm RP */ +{ + spanSet(&yymsp[-5].minor.yy190,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ + yymsp[-5].minor.yy190.pExpr = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1); + if( yymsp[-5].minor.yy190.pExpr ) { + yymsp[-5].minor.yy190.pExpr->affinity = (char)yymsp[-3].minor.yy194; + } +} + break; + case 249: /* raisetype ::= ROLLBACK */ +{yymsp[0].minor.yy194 = OE_Rollback;} + break; + case 251: /* raisetype ::= FAIL */ +{yymsp[0].minor.yy194 = OE_Fail;} + break; + case 252: /* cmd ::= DROP TRIGGER ifexists fullname */ +{ + sqlite3DropTrigger(pParse,yymsp[0].minor.yy185,yymsp[-1].minor.yy194); +} + break; + case 253: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ +{ + sqlite3Attach(pParse, yymsp[-3].minor.yy190.pExpr, yymsp[-1].minor.yy190.pExpr, yymsp[0].minor.yy72); +} + break; + case 254: /* cmd ::= DETACH database_kw_opt expr */ +{ + sqlite3Detach(pParse, yymsp[0].minor.yy190.pExpr); +} + break; + case 257: /* cmd ::= REINDEX */ +{sqlite3Reindex(pParse, 0, 0);} + break; + case 258: /* cmd ::= REINDEX nm dbnm */ +{sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} + break; + case 259: /* cmd ::= ANALYZE */ +{sqlite3Analyze(pParse, 0, 0);} + break; + case 260: /* cmd ::= ANALYZE nm dbnm */ +{sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} + break; + case 261: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ +{ + sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy185,&yymsp[0].minor.yy0); +} + break; + case 262: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ +{ + yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n; + sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0); +} + break; + case 263: /* add_column_fullname ::= fullname */ +{ + disableLookaside(pParse); + sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy185); +} + break; + case 264: /* cmd ::= create_vtab */ +{sqlite3VtabFinishParse(pParse,0);} + break; + case 265: /* cmd ::= create_vtab LP vtabarglist RP */ +{sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);} + break; + case 266: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ +{ + sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy194); +} + break; + case 267: /* vtabarg ::= */ +{sqlite3VtabArgInit(pParse);} + break; + case 268: /* vtabargtoken ::= ANY */ + case 269: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==269); + case 270: /* lp ::= LP */ yytestcase(yyruleno==270); +{sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);} + break; + case 271: /* with ::= */ +{yymsp[1].minor.yy285 = 0;} + break; + case 272: /* with ::= WITH wqlist */ +{ yymsp[-1].minor.yy285 = yymsp[0].minor.yy285; } + break; + case 273: /* with ::= WITH RECURSIVE wqlist */ +{ yymsp[-2].minor.yy285 = yymsp[0].minor.yy285; } + break; + case 274: /* wqlist ::= nm eidlist_opt AS LP select RP */ +{ + yymsp[-5].minor.yy285 = sqlite3WithAdd(pParse, 0, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy148, yymsp[-1].minor.yy243); /*A-overwrites-X*/ +} + break; + case 275: /* wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */ +{ + yymsp[-7].minor.yy285 = sqlite3WithAdd(pParse, yymsp[-7].minor.yy285, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy148, yymsp[-1].minor.yy243); +} + break; + default: + /* (276) input ::= cmdlist */ yytestcase(yyruleno==276); + /* (277) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==277); + /* (278) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=278); + /* (279) ecmd ::= SEMI */ yytestcase(yyruleno==279); + /* (280) ecmd ::= explain cmdx SEMI */ yytestcase(yyruleno==280); + /* (281) explain ::= */ yytestcase(yyruleno==281); + /* (282) trans_opt ::= */ yytestcase(yyruleno==282); + /* (283) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==283); + /* (284) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==284); + /* (285) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==285); + /* (286) savepoint_opt ::= */ yytestcase(yyruleno==286); + /* (287) cmd ::= create_table create_table_args */ yytestcase(yyruleno==287); + /* (288) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==288); + /* (289) columnlist ::= columnname carglist */ yytestcase(yyruleno==289); + /* (290) nm ::= ID|INDEXED */ yytestcase(yyruleno==290); + /* (291) nm ::= STRING */ yytestcase(yyruleno==291); + /* (292) nm ::= JOIN_KW */ yytestcase(yyruleno==292); + /* (293) typetoken ::= typename */ yytestcase(yyruleno==293); + /* (294) typename ::= ID|STRING */ yytestcase(yyruleno==294); + /* (295) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=295); + /* (296) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=296); + /* (297) carglist ::= carglist ccons */ yytestcase(yyruleno==297); + /* (298) carglist ::= */ yytestcase(yyruleno==298); + /* (299) ccons ::= NULL onconf */ yytestcase(yyruleno==299); + /* (300) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==300); + /* (301) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==301); + /* (302) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=302); + /* (303) tconscomma ::= */ yytestcase(yyruleno==303); + /* (304) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=304); + /* (305) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=305); + /* (306) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=306); + /* (307) oneselect ::= values */ yytestcase(yyruleno==307); + /* (308) sclp ::= selcollist COMMA */ yytestcase(yyruleno==308); + /* (309) as ::= ID|STRING */ yytestcase(yyruleno==309); + /* (310) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=310); + /* (311) exprlist ::= nexprlist */ yytestcase(yyruleno==311); + /* (312) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=312); + /* (313) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=313); + /* (314) nmnum ::= ON */ yytestcase(yyruleno==314); + /* (315) nmnum ::= DELETE */ yytestcase(yyruleno==315); + /* (316) nmnum ::= DEFAULT */ yytestcase(yyruleno==316); + /* (317) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==317); + /* (318) foreach_clause ::= */ yytestcase(yyruleno==318); + /* (319) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==319); + /* (320) trnm ::= nm */ yytestcase(yyruleno==320); + /* (321) tridxby ::= */ yytestcase(yyruleno==321); + /* (322) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==322); + /* (323) database_kw_opt ::= */ yytestcase(yyruleno==323); + /* (324) kwcolumn_opt ::= */ yytestcase(yyruleno==324); + /* (325) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==325); + /* (326) vtabarglist ::= vtabarg */ yytestcase(yyruleno==326); + /* (327) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==327); + /* (328) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==328); + /* (329) anylist ::= */ yytestcase(yyruleno==329); + /* (330) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==330); + /* (331) anylist ::= anylist ANY */ yytestcase(yyruleno==331); + break; +/********** End reduce actions ************************************************/ + }; + assert( yyrulenoYY_MAX_SHIFT ){ + yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; + } + yymsp -= yysize-1; + yypParser->yytos = yymsp; + yymsp->stateno = (YYACTIONTYPE)yyact; + yymsp->major = (YYCODETYPE)yygoto; + yyTraceShift(yypParser, yyact); + }else{ + assert( yyact == YY_ACCEPT_ACTION ); + yypParser->yytos -= yysize; + yy_accept(yypParser); + } +} + +/* +** The following code executes when the parse fails +*/ +#ifndef YYNOERRORRECOVERY +static void yy_parse_failed( + yyParser *yypParser /* The parser */ +){ + sqlite3ParserARG_FETCH; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); + } +#endif + while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); + /* Here code is inserted which will be executed whenever the + ** parser fails */ +/************ Begin %parse_failure code ***************************************/ +/************ End %parse_failure code *****************************************/ + sqlite3ParserARG_STORE; /* Suppress warning about unused %extra_argument variable */ +} +#endif /* YYNOERRORRECOVERY */ + +/* +** The following code executes when a syntax error first occurs. +*/ +static void yy_syntax_error( + yyParser *yypParser, /* The parser */ + int yymajor, /* The major type of the error token */ + sqlite3ParserTOKENTYPE yyminor /* The minor type of the error token */ +){ + sqlite3ParserARG_FETCH; +#define TOKEN yyminor +/************ Begin %syntax_error code ****************************************/ + + UNUSED_PARAMETER(yymajor); /* Silence some compiler warnings */ + assert( TOKEN.z[0] ); /* The tokenizer always gives us a token */ + sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &TOKEN); +/************ End %syntax_error code ******************************************/ + sqlite3ParserARG_STORE; /* Suppress warning about unused %extra_argument variable */ +} + +/* +** The following is executed when the parser accepts +*/ +static void yy_accept( + yyParser *yypParser /* The parser */ +){ + sqlite3ParserARG_FETCH; +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); + } +#endif +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + assert( yypParser->yytos==yypParser->yystack ); + /* Here code is inserted which will be executed whenever the + ** parser accepts */ +/*********** Begin %parse_accept code *****************************************/ +/*********** End %parse_accept code *******************************************/ + sqlite3ParserARG_STORE; /* Suppress warning about unused %extra_argument variable */ +} + +/* The main parser program. +** The first argument is a pointer to a structure obtained from +** "sqlite3ParserAlloc" which describes the current state of the parser. +** The second argument is the major token number. The third is +** the minor token. The fourth optional argument is whatever the +** user wants (and specified in the grammar) and is available for +** use by the action routines. +** +** Inputs: +**
      +**
    • A pointer to the parser (an opaque structure.) +**
    • The major token number. +**
    • The minor token number. +**
    • An option argument of a grammar-specified type. +**
    +** +** Outputs: +** None. +*/ +SQLITE_PRIVATE void sqlite3Parser( + void *yyp, /* The parser */ + int yymajor, /* The major token code number */ + sqlite3ParserTOKENTYPE yyminor /* The value for the token */ + sqlite3ParserARG_PDECL /* Optional %extra_argument parameter */ +){ + YYMINORTYPE yyminorunion; + unsigned int yyact; /* The parser action. */ +#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) + int yyendofinput; /* True if we are at the end of input */ +#endif +#ifdef YYERRORSYMBOL + int yyerrorhit = 0; /* True if yymajor has invoked an error */ +#endif + yyParser *yypParser; /* The parser */ + + yypParser = (yyParser*)yyp; + assert( yypParser->yytos!=0 ); +#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) + yyendofinput = (yymajor==0); +#endif + sqlite3ParserARG_STORE; + +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sInput '%s'\n",yyTracePrompt,yyTokenName[yymajor]); + } +#endif + + do{ + yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor); + if( yyact <= YY_MAX_SHIFTREDUCE ){ + yy_shift(yypParser,yyact,yymajor,yyminor); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt--; +#endif + yymajor = YYNOCODE; + }else if( yyact <= YY_MAX_REDUCE ){ + yy_reduce(yypParser,yyact-YY_MIN_REDUCE); + }else{ + assert( yyact == YY_ERROR_ACTION ); + yyminorunion.yy0 = yyminor; +#ifdef YYERRORSYMBOL + int yymx; +#endif +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt); + } +#endif +#ifdef YYERRORSYMBOL + /* A syntax error has occurred. + ** The response to an error depends upon whether or not the + ** grammar defines an error token "ERROR". + ** + ** This is what we do if the grammar does define ERROR: + ** + ** * Call the %syntax_error function. + ** + ** * Begin popping the stack until we enter a state where + ** it is legal to shift the error symbol, then shift + ** the error symbol. + ** + ** * Set the error count to three. + ** + ** * Begin accepting and shifting new tokens. No new error + ** processing will occur until three tokens have been + ** shifted successfully. + ** + */ + if( yypParser->yyerrcnt<0 ){ + yy_syntax_error(yypParser,yymajor,yyminor); + } + yymx = yypParser->yytos->major; + if( yymx==YYERRORSYMBOL || yyerrorhit ){ +#ifndef NDEBUG + if( yyTraceFILE ){ + fprintf(yyTraceFILE,"%sDiscard input token %s\n", + yyTracePrompt,yyTokenName[yymajor]); + } +#endif + yy_destructor(yypParser, (YYCODETYPE)yymajor, &yyminorunion); + yymajor = YYNOCODE; + }else{ + while( yypParser->yytos >= yypParser->yystack + && yymx != YYERRORSYMBOL + && (yyact = yy_find_reduce_action( + yypParser->yytos->stateno, + YYERRORSYMBOL)) >= YY_MIN_REDUCE + ){ + yy_pop_parser_stack(yypParser); + } + if( yypParser->yytos < yypParser->yystack || yymajor==0 ){ + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + yy_parse_failed(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + yymajor = YYNOCODE; + }else if( yymx!=YYERRORSYMBOL ){ + yy_shift(yypParser,yyact,YYERRORSYMBOL,yyminor); + } + } + yypParser->yyerrcnt = 3; + yyerrorhit = 1; +#elif defined(YYNOERRORRECOVERY) + /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to + ** do any kind of error recovery. Instead, simply invoke the syntax + ** error routine and continue going as if nothing had happened. + ** + ** Applications can set this macro (for example inside %include) if + ** they intend to abandon the parse upon the first syntax error seen. + */ + yy_syntax_error(yypParser,yymajor, yyminor); + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + yymajor = YYNOCODE; + +#else /* YYERRORSYMBOL is not defined */ + /* This is what we do if the grammar does not define ERROR: + ** + ** * Report an error message, and throw away the input token. + ** + ** * If the input token is $, then fail the parse. + ** + ** As before, subsequent error messages are suppressed until + ** three input tokens have been successfully shifted. + */ + if( yypParser->yyerrcnt<=0 ){ + yy_syntax_error(yypParser,yymajor, yyminor); + } + yypParser->yyerrcnt = 3; + yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); + if( yyendofinput ){ + yy_parse_failed(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + } + yymajor = YYNOCODE; +#endif + } + }while( yymajor!=YYNOCODE && yypParser->yytos>yypParser->yystack ); +#ifndef NDEBUG + if( yyTraceFILE ){ + yyStackEntry *i; + char cDiv = '['; + fprintf(yyTraceFILE,"%sReturn. Stack=",yyTracePrompt); + for(i=&yypParser->yystack[1]; i<=yypParser->yytos; i++){ + fprintf(yyTraceFILE,"%c%s", cDiv, yyTokenName[i->major]); + cDiv = ' '; + } + fprintf(yyTraceFILE,"]\n"); + } +#endif + return; +} + +/************** End of parse.c ***********************************************/ +/************** Begin file tokenize.c ****************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** An tokenizer for SQL +** +** This file contains C code that splits an SQL input string up into +** individual tokens and sends those tokens one-by-one over to the +** parser for analysis. +*/ +/* #include "sqliteInt.h" */ +/* #include */ + +/* Character classes for tokenizing +** +** In the sqlite3GetToken() function, a switch() on aiClass[c] is implemented +** using a lookup table, whereas a switch() directly on c uses a binary search. +** The lookup table is much faster. To maximize speed, and to ensure that +** a lookup table is used, all of the classes need to be small integers and +** all of them need to be used within the switch. +*/ +#define CC_X 0 /* The letter 'x', or start of BLOB literal */ +#define CC_KYWD 1 /* Alphabetics or '_'. Usable in a keyword */ +#define CC_ID 2 /* unicode characters usable in IDs */ +#define CC_DIGIT 3 /* Digits */ +#define CC_DOLLAR 4 /* '$' */ +#define CC_VARALPHA 5 /* '@', '#', ':'. Alphabetic SQL variables */ +#define CC_VARNUM 6 /* '?'. Numeric SQL variables */ +#define CC_SPACE 7 /* Space characters */ +#define CC_QUOTE 8 /* '"', '\'', or '`'. String literals, quoted ids */ +#define CC_QUOTE2 9 /* '['. [...] style quoted ids */ +#define CC_PIPE 10 /* '|'. Bitwise OR or concatenate */ +#define CC_MINUS 11 /* '-'. Minus or SQL-style comment */ +#define CC_LT 12 /* '<'. Part of < or <= or <> */ +#define CC_GT 13 /* '>'. Part of > or >= */ +#define CC_EQ 14 /* '='. Part of = or == */ +#define CC_BANG 15 /* '!'. Part of != */ +#define CC_SLASH 16 /* '/'. / or c-style comment */ +#define CC_LP 17 /* '(' */ +#define CC_RP 18 /* ')' */ +#define CC_SEMI 19 /* ';' */ +#define CC_PLUS 20 /* '+' */ +#define CC_STAR 21 /* '*' */ +#define CC_PERCENT 22 /* '%' */ +#define CC_COMMA 23 /* ',' */ +#define CC_AND 24 /* '&' */ +#define CC_TILDA 25 /* '~' */ +#define CC_DOT 26 /* '.' */ +#define CC_ILLEGAL 27 /* Illegal character */ + +static const unsigned char aiClass[] = { +#ifdef SQLITE_ASCII +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xa xb xc xd xe xf */ +/* 0x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 7, 7, 27, 7, 7, 27, 27, +/* 1x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +/* 2x */ 7, 15, 8, 5, 4, 22, 24, 8, 17, 18, 21, 20, 23, 11, 26, 16, +/* 3x */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 19, 12, 14, 13, 6, +/* 4x */ 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +/* 5x */ 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 9, 27, 27, 27, 1, +/* 6x */ 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, +/* 7x */ 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 27, 10, 27, 25, 27, +/* 8x */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, +/* 9x */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, +/* Ax */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, +/* Bx */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, +/* Cx */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, +/* Dx */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, +/* Ex */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, +/* Fx */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 +#endif +#ifdef SQLITE_EBCDIC +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xa xb xc xd xe xf */ +/* 0x */ 27, 27, 27, 27, 27, 7, 27, 27, 27, 27, 27, 27, 7, 7, 27, 27, +/* 1x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +/* 2x */ 27, 27, 27, 27, 27, 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +/* 3x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, +/* 4x */ 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 26, 12, 17, 20, 10, +/* 5x */ 24, 27, 27, 27, 27, 27, 27, 27, 27, 27, 15, 4, 21, 18, 19, 27, +/* 6x */ 11, 16, 27, 27, 27, 27, 27, 27, 27, 27, 27, 23, 22, 1, 13, 6, +/* 7x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 8, 5, 5, 5, 8, 14, 8, +/* 8x */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, +/* 9x */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, +/* Ax */ 27, 25, 1, 1, 1, 1, 1, 0, 1, 1, 27, 27, 27, 27, 27, 27, +/* Bx */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 9, 27, 27, 27, 27, 27, +/* Cx */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, +/* Dx */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27, +/* Ex */ 27, 27, 1, 1, 1, 1, 1, 0, 1, 1, 27, 27, 27, 27, 27, 27, +/* Fx */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 27, 27, 27, 27, 27, 27, +#endif +}; + +/* +** The charMap() macro maps alphabetic characters (only) into their +** lower-case ASCII equivalent. On ASCII machines, this is just +** an upper-to-lower case map. On EBCDIC machines we also need +** to adjust the encoding. The mapping is only valid for alphabetics +** which are the only characters for which this feature is used. +** +** Used by keywordhash.h +*/ +#ifdef SQLITE_ASCII +# define charMap(X) sqlite3UpperToLower[(unsigned char)X] +#endif +#ifdef SQLITE_EBCDIC +# define charMap(X) ebcdicToAscii[(unsigned char)X] +const unsigned char ebcdicToAscii[] = { +/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1x */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4x */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5x */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95, 0, 0, /* 6x */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7x */ + 0, 97, 98, 99,100,101,102,103,104,105, 0, 0, 0, 0, 0, 0, /* 8x */ + 0,106,107,108,109,110,111,112,113,114, 0, 0, 0, 0, 0, 0, /* 9x */ + 0, 0,115,116,117,118,119,120,121,122, 0, 0, 0, 0, 0, 0, /* Ax */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* Bx */ + 0, 97, 98, 99,100,101,102,103,104,105, 0, 0, 0, 0, 0, 0, /* Cx */ + 0,106,107,108,109,110,111,112,113,114, 0, 0, 0, 0, 0, 0, /* Dx */ + 0, 0,115,116,117,118,119,120,121,122, 0, 0, 0, 0, 0, 0, /* Ex */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* Fx */ +}; +#endif + +/* +** The sqlite3KeywordCode function looks up an identifier to determine if +** it is a keyword. If it is a keyword, the token code of that keyword is +** returned. If the input is not a keyword, TK_ID is returned. +** +** The implementation of this routine was generated by a program, +** mkkeywordhash.c, located in the tool subdirectory of the distribution. +** The output of the mkkeywordhash.c program is written into a file +** named keywordhash.h and then included into this source file by +** the #include below. +*/ +/************** Include keywordhash.h in the middle of tokenize.c ************/ +/************** Begin file keywordhash.h *************************************/ +/***** This file contains automatically generated code ****** +** +** The code in this file has been automatically generated by +** +** sqlite/tool/mkkeywordhash.c +** +** The code in this file implements a function that determines whether +** or not a given identifier is really an SQL keyword. The same thing +** might be implemented more directly using a hand-written hash table. +** But by using this automatically generated code, the size of the code +** is substantially reduced. This is important for embedded applications +** on platforms with limited memory. +*/ +/* Hash score: 182 */ +static int keywordCode(const char *z, int n, int *pType){ + /* zText[] encodes 834 bytes of keywords in 554 bytes */ + /* REINDEXEDESCAPEACHECKEYBEFOREIGNOREGEXPLAINSTEADDATABASELECT */ + /* ABLEFTHENDEFERRABLELSEXCEPTRANSACTIONATURALTERAISEXCLUSIVE */ + /* XISTSAVEPOINTERSECTRIGGEREFERENCESCONSTRAINTOFFSETEMPORARY */ + /* UNIQUERYWITHOUTERELEASEATTACHAVINGROUPDATEBEGINNERECURSIVE */ + /* BETWEENOTNULLIKECASCADELETECASECOLLATECREATECURRENT_DATEDETACH */ + /* IMMEDIATEJOINSERTMATCHPLANALYZEPRAGMABORTVALUESVIRTUALIMITWHEN */ + /* WHERENAMEAFTEREPLACEANDEFAULTAUTOINCREMENTCASTCOLUMNCOMMIT */ + /* CONFLICTCROSSCURRENT_TIMESTAMPRIMARYDEFERREDISTINCTDROPFAIL */ + /* FROMFULLGLOBYIFISNULLORDERESTRICTRIGHTROLLBACKROWUNIONUSING */ + /* VACUUMVIEWINITIALLY */ + static const char zText[553] = { + 'R','E','I','N','D','E','X','E','D','E','S','C','A','P','E','A','C','H', + 'E','C','K','E','Y','B','E','F','O','R','E','I','G','N','O','R','E','G', + 'E','X','P','L','A','I','N','S','T','E','A','D','D','A','T','A','B','A', + 'S','E','L','E','C','T','A','B','L','E','F','T','H','E','N','D','E','F', + 'E','R','R','A','B','L','E','L','S','E','X','C','E','P','T','R','A','N', + 'S','A','C','T','I','O','N','A','T','U','R','A','L','T','E','R','A','I', + 'S','E','X','C','L','U','S','I','V','E','X','I','S','T','S','A','V','E', + 'P','O','I','N','T','E','R','S','E','C','T','R','I','G','G','E','R','E', + 'F','E','R','E','N','C','E','S','C','O','N','S','T','R','A','I','N','T', + 'O','F','F','S','E','T','E','M','P','O','R','A','R','Y','U','N','I','Q', + 'U','E','R','Y','W','I','T','H','O','U','T','E','R','E','L','E','A','S', + 'E','A','T','T','A','C','H','A','V','I','N','G','R','O','U','P','D','A', + 'T','E','B','E','G','I','N','N','E','R','E','C','U','R','S','I','V','E', + 'B','E','T','W','E','E','N','O','T','N','U','L','L','I','K','E','C','A', + 'S','C','A','D','E','L','E','T','E','C','A','S','E','C','O','L','L','A', + 'T','E','C','R','E','A','T','E','C','U','R','R','E','N','T','_','D','A', + 'T','E','D','E','T','A','C','H','I','M','M','E','D','I','A','T','E','J', + 'O','I','N','S','E','R','T','M','A','T','C','H','P','L','A','N','A','L', + 'Y','Z','E','P','R','A','G','M','A','B','O','R','T','V','A','L','U','E', + 'S','V','I','R','T','U','A','L','I','M','I','T','W','H','E','N','W','H', + 'E','R','E','N','A','M','E','A','F','T','E','R','E','P','L','A','C','E', + 'A','N','D','E','F','A','U','L','T','A','U','T','O','I','N','C','R','E', + 'M','E','N','T','C','A','S','T','C','O','L','U','M','N','C','O','M','M', + 'I','T','C','O','N','F','L','I','C','T','C','R','O','S','S','C','U','R', + 'R','E','N','T','_','T','I','M','E','S','T','A','M','P','R','I','M','A', + 'R','Y','D','E','F','E','R','R','E','D','I','S','T','I','N','C','T','D', + 'R','O','P','F','A','I','L','F','R','O','M','F','U','L','L','G','L','O', + 'B','Y','I','F','I','S','N','U','L','L','O','R','D','E','R','E','S','T', + 'R','I','C','T','R','I','G','H','T','R','O','L','L','B','A','C','K','R', + 'O','W','U','N','I','O','N','U','S','I','N','G','V','A','C','U','U','M', + 'V','I','E','W','I','N','I','T','I','A','L','L','Y', + }; + static const unsigned char aHash[127] = { + 76, 105, 117, 74, 0, 45, 0, 0, 82, 0, 77, 0, 0, + 42, 12, 78, 15, 0, 116, 85, 54, 112, 0, 19, 0, 0, + 121, 0, 119, 115, 0, 22, 93, 0, 9, 0, 0, 70, 71, + 0, 69, 6, 0, 48, 90, 102, 0, 118, 101, 0, 0, 44, + 0, 103, 24, 0, 17, 0, 122, 53, 23, 0, 5, 110, 25, + 96, 0, 0, 124, 106, 60, 123, 57, 28, 55, 0, 91, 0, + 100, 26, 0, 99, 0, 0, 0, 95, 92, 97, 88, 109, 14, + 39, 108, 0, 81, 0, 18, 89, 111, 32, 0, 120, 80, 113, + 62, 46, 84, 0, 0, 94, 40, 59, 114, 0, 36, 0, 0, + 29, 0, 86, 63, 64, 0, 20, 61, 0, 56, + }; + static const unsigned char aNext[124] = { + 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 2, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, + 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 33, 0, 21, 0, 0, 0, 0, 0, 50, + 0, 43, 3, 47, 0, 0, 0, 0, 30, 0, 58, 0, 38, + 0, 0, 0, 1, 66, 0, 0, 67, 0, 41, 0, 0, 0, + 0, 0, 0, 49, 65, 0, 0, 0, 0, 31, 52, 16, 34, + 10, 0, 0, 0, 0, 0, 0, 0, 11, 72, 79, 0, 8, + 0, 104, 98, 0, 107, 0, 87, 0, 75, 51, 0, 27, 37, + 73, 83, 0, 35, 68, 0, 0, + }; + static const unsigned char aLen[124] = { + 7, 7, 5, 4, 6, 4, 5, 3, 6, 7, 3, 6, 6, + 7, 7, 3, 8, 2, 6, 5, 4, 4, 3, 10, 4, 6, + 11, 6, 2, 7, 5, 5, 9, 6, 9, 9, 7, 10, 10, + 4, 6, 2, 3, 9, 4, 2, 6, 5, 7, 4, 5, 7, + 6, 6, 5, 6, 5, 5, 9, 7, 7, 3, 2, 4, 4, + 7, 3, 6, 4, 7, 6, 12, 6, 9, 4, 6, 5, 4, + 7, 6, 5, 6, 7, 5, 4, 5, 6, 5, 7, 3, 7, + 13, 2, 2, 4, 6, 6, 8, 5, 17, 12, 7, 8, 8, + 2, 4, 4, 4, 4, 4, 2, 2, 6, 5, 8, 5, 8, + 3, 5, 5, 6, 4, 9, 3, + }; + static const unsigned short int aOffset[124] = { + 0, 2, 2, 8, 9, 14, 16, 20, 23, 25, 25, 29, 33, + 36, 41, 46, 48, 53, 54, 59, 62, 65, 67, 69, 78, 81, + 86, 91, 95, 96, 101, 105, 109, 117, 122, 128, 136, 142, 152, + 159, 162, 162, 165, 167, 167, 171, 176, 179, 184, 184, 188, 192, + 199, 204, 209, 212, 218, 221, 225, 234, 240, 240, 240, 243, 246, + 250, 251, 255, 261, 265, 272, 278, 290, 296, 305, 307, 313, 318, + 320, 327, 332, 337, 343, 349, 354, 358, 361, 367, 371, 378, 380, + 387, 389, 391, 400, 404, 410, 416, 424, 429, 429, 445, 452, 459, + 460, 467, 471, 475, 479, 483, 486, 488, 490, 496, 500, 508, 513, + 521, 524, 529, 534, 540, 544, 549, + }; + static const unsigned char aCode[124] = { + TK_REINDEX, TK_INDEXED, TK_INDEX, TK_DESC, TK_ESCAPE, + TK_EACH, TK_CHECK, TK_KEY, TK_BEFORE, TK_FOREIGN, + TK_FOR, TK_IGNORE, TK_LIKE_KW, TK_EXPLAIN, TK_INSTEAD, + TK_ADD, TK_DATABASE, TK_AS, TK_SELECT, TK_TABLE, + TK_JOIN_KW, TK_THEN, TK_END, TK_DEFERRABLE, TK_ELSE, + TK_EXCEPT, TK_TRANSACTION,TK_ACTION, TK_ON, TK_JOIN_KW, + TK_ALTER, TK_RAISE, TK_EXCLUSIVE, TK_EXISTS, TK_SAVEPOINT, + TK_INTERSECT, TK_TRIGGER, TK_REFERENCES, TK_CONSTRAINT, TK_INTO, + TK_OFFSET, TK_OF, TK_SET, TK_TEMP, TK_TEMP, + TK_OR, TK_UNIQUE, TK_QUERY, TK_WITHOUT, TK_WITH, + TK_JOIN_KW, TK_RELEASE, TK_ATTACH, TK_HAVING, TK_GROUP, + TK_UPDATE, TK_BEGIN, TK_JOIN_KW, TK_RECURSIVE, TK_BETWEEN, + TK_NOTNULL, TK_NOT, TK_NO, TK_NULL, TK_LIKE_KW, + TK_CASCADE, TK_ASC, TK_DELETE, TK_CASE, TK_COLLATE, + TK_CREATE, TK_CTIME_KW, TK_DETACH, TK_IMMEDIATE, TK_JOIN, + TK_INSERT, TK_MATCH, TK_PLAN, TK_ANALYZE, TK_PRAGMA, + TK_ABORT, TK_VALUES, TK_VIRTUAL, TK_LIMIT, TK_WHEN, + TK_WHERE, TK_RENAME, TK_AFTER, TK_REPLACE, TK_AND, + TK_DEFAULT, TK_AUTOINCR, TK_TO, TK_IN, TK_CAST, + TK_COLUMNKW, TK_COMMIT, TK_CONFLICT, TK_JOIN_KW, TK_CTIME_KW, + TK_CTIME_KW, TK_PRIMARY, TK_DEFERRED, TK_DISTINCT, TK_IS, + TK_DROP, TK_FAIL, TK_FROM, TK_JOIN_KW, TK_LIKE_KW, + TK_BY, TK_IF, TK_ISNULL, TK_ORDER, TK_RESTRICT, + TK_JOIN_KW, TK_ROLLBACK, TK_ROW, TK_UNION, TK_USING, + TK_VACUUM, TK_VIEW, TK_INITIALLY, TK_ALL, + }; + int i, j; + const char *zKW; + if( n>=2 ){ + i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n) % 127; + for(i=((int)aHash[i])-1; i>=0; i=((int)aNext[i])-1){ + if( aLen[i]!=n ) continue; + j = 0; + zKW = &zText[aOffset[i]]; +#ifdef SQLITE_ASCII + while( j=0x42 && sqlite3IsEbcdicIdChar[c-0x40])) +#endif + +/* Make the IdChar function accessible from ctime.c */ +#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS +SQLITE_PRIVATE int sqlite3IsIdChar(u8 c){ return IdChar(c); } +#endif + + +/* +** Return the length (in bytes) of the token that begins at z[0]. +** Store the token type in *tokenType before returning. +*/ +SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ + int i, c; + switch( aiClass[*z] ){ /* Switch on the character-class of the first byte + ** of the token. See the comment on the CC_ defines + ** above. */ + case CC_SPACE: { + testcase( z[0]==' ' ); + testcase( z[0]=='\t' ); + testcase( z[0]=='\n' ); + testcase( z[0]=='\f' ); + testcase( z[0]=='\r' ); + for(i=1; sqlite3Isspace(z[i]); i++){} + *tokenType = TK_SPACE; + return i; + } + case CC_MINUS: { + if( z[1]=='-' ){ + for(i=2; (c=z[i])!=0 && c!='\n'; i++){} + *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ + return i; + } + *tokenType = TK_MINUS; + return 1; + } + case CC_LP: { + *tokenType = TK_LP; + return 1; + } + case CC_RP: { + *tokenType = TK_RP; + return 1; + } + case CC_SEMI: { + *tokenType = TK_SEMI; + return 1; + } + case CC_PLUS: { + *tokenType = TK_PLUS; + return 1; + } + case CC_STAR: { + *tokenType = TK_STAR; + return 1; + } + case CC_SLASH: { + if( z[1]!='*' || z[2]==0 ){ + *tokenType = TK_SLASH; + return 1; + } + for(i=3, c=z[2]; (c!='*' || z[i]!='/') && (c=z[i])!=0; i++){} + if( c ) i++; + *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ + return i; + } + case CC_PERCENT: { + *tokenType = TK_REM; + return 1; + } + case CC_EQ: { + *tokenType = TK_EQ; + return 1 + (z[1]=='='); + } + case CC_LT: { + if( (c=z[1])=='=' ){ + *tokenType = TK_LE; + return 2; + }else if( c=='>' ){ + *tokenType = TK_NE; + return 2; + }else if( c=='<' ){ + *tokenType = TK_LSHIFT; + return 2; + }else{ + *tokenType = TK_LT; + return 1; + } + } + case CC_GT: { + if( (c=z[1])=='=' ){ + *tokenType = TK_GE; + return 2; + }else if( c=='>' ){ + *tokenType = TK_RSHIFT; + return 2; + }else{ + *tokenType = TK_GT; + return 1; + } + } + case CC_BANG: { + if( z[1]!='=' ){ + *tokenType = TK_ILLEGAL; + return 1; + }else{ + *tokenType = TK_NE; + return 2; + } + } + case CC_PIPE: { + if( z[1]!='|' ){ + *tokenType = TK_BITOR; + return 1; + }else{ + *tokenType = TK_CONCAT; + return 2; + } + } + case CC_COMMA: { + *tokenType = TK_COMMA; + return 1; + } + case CC_AND: { + *tokenType = TK_BITAND; + return 1; + } + case CC_TILDA: { + *tokenType = TK_BITNOT; + return 1; + } + case CC_QUOTE: { + int delim = z[0]; + testcase( delim=='`' ); + testcase( delim=='\'' ); + testcase( delim=='"' ); + for(i=1; (c=z[i])!=0; i++){ + if( c==delim ){ + if( z[i+1]==delim ){ + i++; + }else{ + break; + } + } + } + if( c=='\'' ){ + *tokenType = TK_STRING; + return i+1; + }else if( c!=0 ){ + *tokenType = TK_ID; + return i+1; + }else{ + *tokenType = TK_ILLEGAL; + return i; + } + } + case CC_DOT: { +#ifndef SQLITE_OMIT_FLOATING_POINT + if( !sqlite3Isdigit(z[1]) ) +#endif + { + *tokenType = TK_DOT; + return 1; + } + /* If the next character is a digit, this is a floating point + ** number that begins with ".". Fall thru into the next case */ + } + case CC_DIGIT: { + testcase( z[0]=='0' ); testcase( z[0]=='1' ); testcase( z[0]=='2' ); + testcase( z[0]=='3' ); testcase( z[0]=='4' ); testcase( z[0]=='5' ); + testcase( z[0]=='6' ); testcase( z[0]=='7' ); testcase( z[0]=='8' ); + testcase( z[0]=='9' ); + *tokenType = TK_INTEGER; +#ifndef SQLITE_OMIT_HEX_INTEGER + if( z[0]=='0' && (z[1]=='x' || z[1]=='X') && sqlite3Isxdigit(z[2]) ){ + for(i=3; sqlite3Isxdigit(z[i]); i++){} + return i; + } +#endif + for(i=0; sqlite3Isdigit(z[i]); i++){} +#ifndef SQLITE_OMIT_FLOATING_POINT + if( z[i]=='.' ){ + i++; + while( sqlite3Isdigit(z[i]) ){ i++; } + *tokenType = TK_FLOAT; + } + if( (z[i]=='e' || z[i]=='E') && + ( sqlite3Isdigit(z[i+1]) + || ((z[i+1]=='+' || z[i+1]=='-') && sqlite3Isdigit(z[i+2])) + ) + ){ + i += 2; + while( sqlite3Isdigit(z[i]) ){ i++; } + *tokenType = TK_FLOAT; + } +#endif + while( IdChar(z[i]) ){ + *tokenType = TK_ILLEGAL; + i++; + } + return i; + } + case CC_QUOTE2: { + for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} + *tokenType = c==']' ? TK_ID : TK_ILLEGAL; + return i; + } + case CC_VARNUM: { + *tokenType = TK_VARIABLE; + for(i=1; sqlite3Isdigit(z[i]); i++){} + return i; + } + case CC_DOLLAR: + case CC_VARALPHA: { + int n = 0; + testcase( z[0]=='$' ); testcase( z[0]=='@' ); + testcase( z[0]==':' ); testcase( z[0]=='#' ); + *tokenType = TK_VARIABLE; + for(i=1; (c=z[i])!=0; i++){ + if( IdChar(c) ){ + n++; +#ifndef SQLITE_OMIT_TCL_VARIABLE + }else if( c=='(' && n>0 ){ + do{ + i++; + }while( (c=z[i])!=0 && !sqlite3Isspace(c) && c!=')' ); + if( c==')' ){ + i++; + }else{ + *tokenType = TK_ILLEGAL; + } + break; + }else if( c==':' && z[i+1]==':' ){ + i++; +#endif + }else{ + break; + } + } + if( n==0 ) *tokenType = TK_ILLEGAL; + return i; + } + case CC_KYWD: { + for(i=1; aiClass[z[i]]<=CC_KYWD; i++){} + if( IdChar(z[i]) ){ + /* This token started out using characters that can appear in keywords, + ** but z[i] is a character not allowed within keywords, so this must + ** be an identifier instead */ + i++; + break; + } + *tokenType = TK_ID; + return keywordCode((char*)z, i, tokenType); + } + case CC_X: { +#ifndef SQLITE_OMIT_BLOB_LITERAL + testcase( z[0]=='x' ); testcase( z[0]=='X' ); + if( z[1]=='\'' ){ + *tokenType = TK_BLOB; + for(i=2; sqlite3Isxdigit(z[i]); i++){} + if( z[i]!='\'' || i%2 ){ + *tokenType = TK_ILLEGAL; + while( z[i] && z[i]!='\'' ){ i++; } + } + if( z[i] ) i++; + return i; + } +#endif + /* If it is not a BLOB literal, then it must be an ID, since no + ** SQL keywords start with the letter 'x'. Fall through */ + } + case CC_ID: { + i = 1; + break; + } + default: { + *tokenType = TK_ILLEGAL; + return 1; + } + } + while( IdChar(z[i]) ){ i++; } + *tokenType = TK_ID; + return i; +} + +/* +** Run the parser on the given SQL string. The parser structure is +** passed in. An SQLITE_ status code is returned. If an error occurs +** then an and attempt is made to write an error message into +** memory obtained from sqlite3_malloc() and to make *pzErrMsg point to that +** error message. +*/ +SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzErrMsg){ + int nErr = 0; /* Number of errors encountered */ + int i; /* Loop counter */ + void *pEngine; /* The LEMON-generated LALR(1) parser */ + int tokenType; /* type of the next token */ + int lastTokenParsed = -1; /* type of the previous token */ + sqlite3 *db = pParse->db; /* The database connection */ + int mxSqlLen; /* Max length of an SQL string */ +#ifdef sqlite3Parser_ENGINEALWAYSONSTACK + unsigned char zSpace[sizeof(yyParser)]; /* Space for parser engine object */ +#endif + + assert( zSql!=0 ); + mxSqlLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH]; + if( db->nVdbeActive==0 ){ + db->u1.isInterrupted = 0; + } + pParse->rc = SQLITE_OK; + pParse->zTail = zSql; + i = 0; + assert( pzErrMsg!=0 ); + /* sqlite3ParserTrace(stdout, "parser: "); */ +#ifdef sqlite3Parser_ENGINEALWAYSONSTACK + pEngine = zSpace; + sqlite3ParserInit(pEngine); +#else + pEngine = sqlite3ParserAlloc(sqlite3Malloc); + if( pEngine==0 ){ + sqlite3OomFault(db); + return SQLITE_NOMEM_BKPT; + } +#endif + assert( pParse->pNewTable==0 ); + assert( pParse->pNewTrigger==0 ); + assert( pParse->nVar==0 ); + assert( pParse->pVList==0 ); + while( 1 ){ + assert( i>=0 ); + if( zSql[i]!=0 ){ + pParse->sLastToken.z = &zSql[i]; + pParse->sLastToken.n = sqlite3GetToken((u8*)&zSql[i],&tokenType); + i += pParse->sLastToken.n; + if( i>mxSqlLen ){ + pParse->rc = SQLITE_TOOBIG; + break; + } + }else{ + /* Upon reaching the end of input, call the parser two more times + ** with tokens TK_SEMI and 0, in that order. */ + if( lastTokenParsed==TK_SEMI ){ + tokenType = 0; + }else if( lastTokenParsed==0 ){ + break; + }else{ + tokenType = TK_SEMI; + } + } + if( tokenType>=TK_SPACE ){ + assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL ); + if( db->u1.isInterrupted ){ + pParse->rc = SQLITE_INTERRUPT; + break; + } + if( tokenType==TK_ILLEGAL ){ + sqlite3ErrorMsg(pParse, "unrecognized token: \"%T\"", + &pParse->sLastToken); + break; + } + }else{ + sqlite3Parser(pEngine, tokenType, pParse->sLastToken, pParse); + lastTokenParsed = tokenType; + if( pParse->rc!=SQLITE_OK || db->mallocFailed ) break; + } + } + assert( nErr==0 ); + pParse->zTail = &zSql[i]; +#ifdef YYTRACKMAXSTACKDEPTH + sqlite3_mutex_enter(sqlite3MallocMutex()); + sqlite3StatusHighwater(SQLITE_STATUS_PARSER_STACK, + sqlite3ParserStackPeak(pEngine) + ); + sqlite3_mutex_leave(sqlite3MallocMutex()); +#endif /* YYDEBUG */ +#ifdef sqlite3Parser_ENGINEALWAYSONSTACK + sqlite3ParserFinalize(pEngine); +#else + sqlite3ParserFree(pEngine, sqlite3_free); +#endif + if( db->mallocFailed ){ + pParse->rc = SQLITE_NOMEM_BKPT; + } + if( pParse->rc!=SQLITE_OK && pParse->rc!=SQLITE_DONE && pParse->zErrMsg==0 ){ + pParse->zErrMsg = sqlite3MPrintf(db, "%s", sqlite3ErrStr(pParse->rc)); + } + assert( pzErrMsg!=0 ); + if( pParse->zErrMsg ){ + *pzErrMsg = pParse->zErrMsg; + sqlite3_log(pParse->rc, "%s", *pzErrMsg); + pParse->zErrMsg = 0; + nErr++; + } + if( pParse->pVdbe && pParse->nErr>0 && pParse->nested==0 ){ + sqlite3VdbeDelete(pParse->pVdbe); + pParse->pVdbe = 0; + } +#ifndef SQLITE_OMIT_SHARED_CACHE + if( pParse->nested==0 ){ + sqlite3DbFree(db, pParse->aTableLock); + pParse->aTableLock = 0; + pParse->nTableLock = 0; + } +#endif +#ifndef SQLITE_OMIT_VIRTUALTABLE + sqlite3_free(pParse->apVtabLock); +#endif + + if( !IN_DECLARE_VTAB ){ + /* If the pParse->declareVtab flag is set, do not delete any table + ** structure built up in pParse->pNewTable. The calling code (see vtab.c) + ** will take responsibility for freeing the Table structure. + */ + sqlite3DeleteTable(db, pParse->pNewTable); + } + + if( pParse->pWithToFree ) sqlite3WithDelete(db, pParse->pWithToFree); + sqlite3DeleteTrigger(db, pParse->pNewTrigger); + sqlite3DbFree(db, pParse->pVList); + while( pParse->pAinc ){ + AutoincInfo *p = pParse->pAinc; + pParse->pAinc = p->pNext; + sqlite3DbFree(db, p); + } + while( pParse->pZombieTab ){ + Table *p = pParse->pZombieTab; + pParse->pZombieTab = p->pNextZombie; + sqlite3DeleteTable(db, p); + } + assert( nErr==0 || pParse->rc!=SQLITE_OK ); + return nErr; +} + +/************** End of tokenize.c ********************************************/ +/************** Begin file complete.c ****************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** An tokenizer for SQL +** +** This file contains C code that implements the sqlite3_complete() API. +** This code used to be part of the tokenizer.c source file. But by +** separating it out, the code will be automatically omitted from +** static links that do not use it. +*/ +/* #include "sqliteInt.h" */ +#ifndef SQLITE_OMIT_COMPLETE + +/* +** This is defined in tokenize.c. We just have to import the definition. +*/ +#ifndef SQLITE_AMALGAMATION +#ifdef SQLITE_ASCII +#define IdChar(C) ((sqlite3CtypeMap[(unsigned char)C]&0x46)!=0) +#endif +#ifdef SQLITE_EBCDIC +SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[]; +#define IdChar(C) (((c=C)>=0x42 && sqlite3IsEbcdicIdChar[c-0x40])) +#endif +#endif /* SQLITE_AMALGAMATION */ + + +/* +** Token types used by the sqlite3_complete() routine. See the header +** comments on that procedure for additional information. +*/ +#define tkSEMI 0 +#define tkWS 1 +#define tkOTHER 2 +#ifndef SQLITE_OMIT_TRIGGER +#define tkEXPLAIN 3 +#define tkCREATE 4 +#define tkTEMP 5 +#define tkTRIGGER 6 +#define tkEND 7 +#endif + +/* +** Return TRUE if the given SQL string ends in a semicolon. +** +** Special handling is require for CREATE TRIGGER statements. +** Whenever the CREATE TRIGGER keywords are seen, the statement +** must end with ";END;". +** +** This implementation uses a state machine with 8 states: +** +** (0) INVALID We have not yet seen a non-whitespace character. +** +** (1) START At the beginning or end of an SQL statement. This routine +** returns 1 if it ends in the START state and 0 if it ends +** in any other state. +** +** (2) NORMAL We are in the middle of statement which ends with a single +** semicolon. +** +** (3) EXPLAIN The keyword EXPLAIN has been seen at the beginning of +** a statement. +** +** (4) CREATE The keyword CREATE has been seen at the beginning of a +** statement, possibly preceded by EXPLAIN and/or followed by +** TEMP or TEMPORARY +** +** (5) TRIGGER We are in the middle of a trigger definition that must be +** ended by a semicolon, the keyword END, and another semicolon. +** +** (6) SEMI We've seen the first semicolon in the ";END;" that occurs at +** the end of a trigger definition. +** +** (7) END We've seen the ";END" of the ";END;" that occurs at the end +** of a trigger definition. +** +** Transitions between states above are determined by tokens extracted +** from the input. The following tokens are significant: +** +** (0) tkSEMI A semicolon. +** (1) tkWS Whitespace. +** (2) tkOTHER Any other SQL token. +** (3) tkEXPLAIN The "explain" keyword. +** (4) tkCREATE The "create" keyword. +** (5) tkTEMP The "temp" or "temporary" keyword. +** (6) tkTRIGGER The "trigger" keyword. +** (7) tkEND The "end" keyword. +** +** Whitespace never causes a state transition and is always ignored. +** This means that a SQL string of all whitespace is invalid. +** +** If we compile with SQLITE_OMIT_TRIGGER, all of the computation needed +** to recognize the end of a trigger can be omitted. All we have to do +** is look for a semicolon that is not part of an string or comment. +*/ +SQLITE_API int sqlite3_complete(const char *zSql){ + u8 state = 0; /* Current state, using numbers defined in header comment */ + u8 token; /* Value of the next token */ + +#ifndef SQLITE_OMIT_TRIGGER + /* A complex statement machine used to detect the end of a CREATE TRIGGER + ** statement. This is the normal case. + */ + static const u8 trans[8][8] = { + /* Token: */ + /* State: ** SEMI WS OTHER EXPLAIN CREATE TEMP TRIGGER END */ + /* 0 INVALID: */ { 1, 0, 2, 3, 4, 2, 2, 2, }, + /* 1 START: */ { 1, 1, 2, 3, 4, 2, 2, 2, }, + /* 2 NORMAL: */ { 1, 2, 2, 2, 2, 2, 2, 2, }, + /* 3 EXPLAIN: */ { 1, 3, 3, 2, 4, 2, 2, 2, }, + /* 4 CREATE: */ { 1, 4, 2, 2, 2, 4, 5, 2, }, + /* 5 TRIGGER: */ { 6, 5, 5, 5, 5, 5, 5, 5, }, + /* 6 SEMI: */ { 6, 6, 5, 5, 5, 5, 5, 7, }, + /* 7 END: */ { 1, 7, 5, 5, 5, 5, 5, 5, }, + }; +#else + /* If triggers are not supported by this compile then the statement machine + ** used to detect the end of a statement is much simpler + */ + static const u8 trans[3][3] = { + /* Token: */ + /* State: ** SEMI WS OTHER */ + /* 0 INVALID: */ { 1, 0, 2, }, + /* 1 START: */ { 1, 1, 2, }, + /* 2 NORMAL: */ { 1, 2, 2, }, + }; +#endif /* SQLITE_OMIT_TRIGGER */ + +#ifdef SQLITE_ENABLE_API_ARMOR + if( zSql==0 ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + + while( *zSql ){ + switch( *zSql ){ + case ';': { /* A semicolon */ + token = tkSEMI; + break; + } + case ' ': + case '\r': + case '\t': + case '\n': + case '\f': { /* White space is ignored */ + token = tkWS; + break; + } + case '/': { /* C-style comments */ + if( zSql[1]!='*' ){ + token = tkOTHER; + break; + } + zSql += 2; + while( zSql[0] && (zSql[0]!='*' || zSql[1]!='/') ){ zSql++; } + if( zSql[0]==0 ) return 0; + zSql++; + token = tkWS; + break; + } + case '-': { /* SQL-style comments from "--" to end of line */ + if( zSql[1]!='-' ){ + token = tkOTHER; + break; + } + while( *zSql && *zSql!='\n' ){ zSql++; } + if( *zSql==0 ) return state==1; + token = tkWS; + break; + } + case '[': { /* Microsoft-style identifiers in [...] */ + zSql++; + while( *zSql && *zSql!=']' ){ zSql++; } + if( *zSql==0 ) return 0; + token = tkOTHER; + break; + } + case '`': /* Grave-accent quoted symbols used by MySQL */ + case '"': /* single- and double-quoted strings */ + case '\'': { + int c = *zSql; + zSql++; + while( *zSql && *zSql!=c ){ zSql++; } + if( *zSql==0 ) return 0; + token = tkOTHER; + break; + } + default: { +#ifdef SQLITE_EBCDIC + unsigned char c; +#endif + if( IdChar((u8)*zSql) ){ + /* Keywords and unquoted identifiers */ + int nId; + for(nId=1; IdChar(zSql[nId]); nId++){} +#ifdef SQLITE_OMIT_TRIGGER + token = tkOTHER; +#else + switch( *zSql ){ + case 'c': case 'C': { + if( nId==6 && sqlite3StrNICmp(zSql, "create", 6)==0 ){ + token = tkCREATE; + }else{ + token = tkOTHER; + } + break; + } + case 't': case 'T': { + if( nId==7 && sqlite3StrNICmp(zSql, "trigger", 7)==0 ){ + token = tkTRIGGER; + }else if( nId==4 && sqlite3StrNICmp(zSql, "temp", 4)==0 ){ + token = tkTEMP; + }else if( nId==9 && sqlite3StrNICmp(zSql, "temporary", 9)==0 ){ + token = tkTEMP; + }else{ + token = tkOTHER; + } + break; + } + case 'e': case 'E': { + if( nId==3 && sqlite3StrNICmp(zSql, "end", 3)==0 ){ + token = tkEND; + }else +#ifndef SQLITE_OMIT_EXPLAIN + if( nId==7 && sqlite3StrNICmp(zSql, "explain", 7)==0 ){ + token = tkEXPLAIN; + }else +#endif + { + token = tkOTHER; + } + break; + } + default: { + token = tkOTHER; + break; + } + } +#endif /* SQLITE_OMIT_TRIGGER */ + zSql += nId-1; + }else{ + /* Operators and special symbols */ + token = tkOTHER; + } + break; + } + } + state = trans[state][token]; + zSql++; + } + return state==1; +} + +#ifndef SQLITE_OMIT_UTF16 +/* +** This routine is the same as the sqlite3_complete() routine described +** above, except that the parameter is required to be UTF-16 encoded, not +** UTF-8. +*/ +SQLITE_API int sqlite3_complete16(const void *zSql){ + sqlite3_value *pVal; + char const *zSql8; + int rc; + +#ifndef SQLITE_OMIT_AUTOINIT + rc = sqlite3_initialize(); + if( rc ) return rc; +#endif + pVal = sqlite3ValueNew(0); + sqlite3ValueSetStr(pVal, -1, zSql, SQLITE_UTF16NATIVE, SQLITE_STATIC); + zSql8 = sqlite3ValueText(pVal, SQLITE_UTF8); + if( zSql8 ){ + rc = sqlite3_complete(zSql8); + }else{ + rc = SQLITE_NOMEM_BKPT; + } + sqlite3ValueFree(pVal); + return rc & 0xff; +} +#endif /* SQLITE_OMIT_UTF16 */ +#endif /* SQLITE_OMIT_COMPLETE */ + +/************** End of complete.c ********************************************/ +/************** Begin file main.c ********************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Main file for the SQLite library. The routines in this file +** implement the programmer interface to the library. Routines in +** other files are for internal use by SQLite and should not be +** accessed by users of the library. +*/ +/* #include "sqliteInt.h" */ + +#ifdef SQLITE_ENABLE_FTS3 +/************** Include fts3.h in the middle of main.c ***********************/ +/************** Begin file fts3.h ********************************************/ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file is used by programs that want to link against the +** FTS3 library. All it does is declare the sqlite3Fts3Init() interface. +*/ +/* #include "sqlite3.h" */ + +#if 0 +extern "C" { +#endif /* __cplusplus */ + +SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db); + +#if 0 +} /* extern "C" */ +#endif /* __cplusplus */ + +/************** End of fts3.h ************************************************/ +/************** Continuing where we left off in main.c ***********************/ +#endif +#ifdef SQLITE_ENABLE_RTREE +/************** Include rtree.h in the middle of main.c **********************/ +/************** Begin file rtree.h *******************************************/ +/* +** 2008 May 26 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file is used by programs that want to link against the +** RTREE library. All it does is declare the sqlite3RtreeInit() interface. +*/ +/* #include "sqlite3.h" */ + +#if 0 +extern "C" { +#endif /* __cplusplus */ + +SQLITE_PRIVATE int sqlite3RtreeInit(sqlite3 *db); + +#if 0 +} /* extern "C" */ +#endif /* __cplusplus */ + +/************** End of rtree.h ***********************************************/ +/************** Continuing where we left off in main.c ***********************/ +#endif +#ifdef SQLITE_ENABLE_ICU +/************** Include sqliteicu.h in the middle of main.c ******************/ +/************** Begin file sqliteicu.h ***************************************/ +/* +** 2008 May 26 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file is used by programs that want to link against the +** ICU extension. All it does is declare the sqlite3IcuInit() interface. +*/ +/* #include "sqlite3.h" */ + +#if 0 +extern "C" { +#endif /* __cplusplus */ + +SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db); + +#if 0 +} /* extern "C" */ +#endif /* __cplusplus */ + + +/************** End of sqliteicu.h *******************************************/ +/************** Continuing where we left off in main.c ***********************/ +#endif +#ifdef SQLITE_ENABLE_JSON1 +SQLITE_PRIVATE int sqlite3Json1Init(sqlite3*); +#endif +#ifdef SQLITE_ENABLE_FTS5 +SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3*); +#endif + +#ifndef SQLITE_AMALGAMATION +/* IMPLEMENTATION-OF: R-46656-45156 The sqlite3_version[] string constant +** contains the text of SQLITE_VERSION macro. +*/ +SQLITE_API const char sqlite3_version[] = SQLITE_VERSION; +#endif + +/* IMPLEMENTATION-OF: R-53536-42575 The sqlite3_libversion() function returns +** a pointer to the to the sqlite3_version[] string constant. +*/ +SQLITE_API const char *sqlite3_libversion(void){ return sqlite3_version; } + +/* IMPLEMENTATION-OF: R-63124-39300 The sqlite3_sourceid() function returns a +** pointer to a string constant whose value is the same as the +** SQLITE_SOURCE_ID C preprocessor macro. +*/ +SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } + +/* IMPLEMENTATION-OF: R-35210-63508 The sqlite3_libversion_number() function +** returns an integer equal to SQLITE_VERSION_NUMBER. +*/ +SQLITE_API int sqlite3_libversion_number(void){ return SQLITE_VERSION_NUMBER; } + +/* IMPLEMENTATION-OF: R-20790-14025 The sqlite3_threadsafe() function returns +** zero if and only if SQLite was compiled with mutexing code omitted due to +** the SQLITE_THREADSAFE compile-time option being set to 0. +*/ +SQLITE_API int sqlite3_threadsafe(void){ return SQLITE_THREADSAFE; } + +/* +** When compiling the test fixture or with debugging enabled (on Win32), +** this variable being set to non-zero will cause OSTRACE macros to emit +** extra diagnostic information. +*/ +#ifdef SQLITE_HAVE_OS_TRACE +# ifndef SQLITE_DEBUG_OS_TRACE +# define SQLITE_DEBUG_OS_TRACE 0 +# endif + int sqlite3OSTrace = SQLITE_DEBUG_OS_TRACE; +#endif + +#if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE) +/* +** If the following function pointer is not NULL and if +** SQLITE_ENABLE_IOTRACE is enabled, then messages describing +** I/O active are written using this function. These messages +** are intended for debugging activity only. +*/ +SQLITE_API void (SQLITE_CDECL *sqlite3IoTrace)(const char*, ...) = 0; +#endif + +/* +** If the following global variable points to a string which is the +** name of a directory, then that directory will be used to store +** temporary files. +** +** See also the "PRAGMA temp_store_directory" SQL command. +*/ +SQLITE_API char *sqlite3_temp_directory = 0; + +/* +** If the following global variable points to a string which is the +** name of a directory, then that directory will be used to store +** all database files specified with a relative pathname. +** +** See also the "PRAGMA data_store_directory" SQL command. +*/ +SQLITE_API char *sqlite3_data_directory = 0; + +/* +** Initialize SQLite. +** +** This routine must be called to initialize the memory allocation, +** VFS, and mutex subsystems prior to doing any serious work with +** SQLite. But as long as you do not compile with SQLITE_OMIT_AUTOINIT +** this routine will be called automatically by key routines such as +** sqlite3_open(). +** +** This routine is a no-op except on its very first call for the process, +** or for the first call after a call to sqlite3_shutdown. +** +** The first thread to call this routine runs the initialization to +** completion. If subsequent threads call this routine before the first +** thread has finished the initialization process, then the subsequent +** threads must block until the first thread finishes with the initialization. +** +** The first thread might call this routine recursively. Recursive +** calls to this routine should not block, of course. Otherwise the +** initialization process would never complete. +** +** Let X be the first thread to enter this routine. Let Y be some other +** thread. Then while the initial invocation of this routine by X is +** incomplete, it is required that: +** +** * Calls to this routine from Y must block until the outer-most +** call by X completes. +** +** * Recursive calls to this routine from thread X return immediately +** without blocking. +*/ +SQLITE_API int sqlite3_initialize(void){ + MUTEX_LOGIC( sqlite3_mutex *pMaster; ) /* The main static mutex */ + int rc; /* Result code */ +#ifdef SQLITE_EXTRA_INIT + int bRunExtraInit = 0; /* Extra initialization needed */ +#endif + +#ifdef SQLITE_OMIT_WSD + rc = sqlite3_wsd_init(4096, 24); + if( rc!=SQLITE_OK ){ + return rc; + } +#endif + + /* If the following assert() fails on some obscure processor/compiler + ** combination, the work-around is to set the correct pointer + ** size at compile-time using -DSQLITE_PTRSIZE=n compile-time option */ + assert( SQLITE_PTRSIZE==sizeof(char*) ); + + /* If SQLite is already completely initialized, then this call + ** to sqlite3_initialize() should be a no-op. But the initialization + ** must be complete. So isInit must not be set until the very end + ** of this routine. + */ + if( sqlite3GlobalConfig.isInit ) return SQLITE_OK; + + /* Make sure the mutex subsystem is initialized. If unable to + ** initialize the mutex subsystem, return early with the error. + ** If the system is so sick that we are unable to allocate a mutex, + ** there is not much SQLite is going to be able to do. + ** + ** The mutex subsystem must take care of serializing its own + ** initialization. + */ + rc = sqlite3MutexInit(); + if( rc ) return rc; + + /* Initialize the malloc() system and the recursive pInitMutex mutex. + ** This operation is protected by the STATIC_MASTER mutex. Note that + ** MutexAlloc() is called for a static mutex prior to initializing the + ** malloc subsystem - this implies that the allocation of a static + ** mutex must not require support from the malloc subsystem. + */ + MUTEX_LOGIC( pMaster = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); ) + sqlite3_mutex_enter(pMaster); + sqlite3GlobalConfig.isMutexInit = 1; + if( !sqlite3GlobalConfig.isMallocInit ){ + rc = sqlite3MallocInit(); + } + if( rc==SQLITE_OK ){ + sqlite3GlobalConfig.isMallocInit = 1; + if( !sqlite3GlobalConfig.pInitMutex ){ + sqlite3GlobalConfig.pInitMutex = + sqlite3MutexAlloc(SQLITE_MUTEX_RECURSIVE); + if( sqlite3GlobalConfig.bCoreMutex && !sqlite3GlobalConfig.pInitMutex ){ + rc = SQLITE_NOMEM_BKPT; + } + } + } + if( rc==SQLITE_OK ){ + sqlite3GlobalConfig.nRefInitMutex++; + } + sqlite3_mutex_leave(pMaster); + + /* If rc is not SQLITE_OK at this point, then either the malloc + ** subsystem could not be initialized or the system failed to allocate + ** the pInitMutex mutex. Return an error in either case. */ + if( rc!=SQLITE_OK ){ + return rc; + } + + /* Do the rest of the initialization under the recursive mutex so + ** that we will be able to handle recursive calls into + ** sqlite3_initialize(). The recursive calls normally come through + ** sqlite3_os_init() when it invokes sqlite3_vfs_register(), but other + ** recursive calls might also be possible. + ** + ** IMPLEMENTATION-OF: R-00140-37445 SQLite automatically serializes calls + ** to the xInit method, so the xInit method need not be threadsafe. + ** + ** The following mutex is what serializes access to the appdef pcache xInit + ** methods. The sqlite3_pcache_methods.xInit() all is embedded in the + ** call to sqlite3PcacheInitialize(). + */ + sqlite3_mutex_enter(sqlite3GlobalConfig.pInitMutex); + if( sqlite3GlobalConfig.isInit==0 && sqlite3GlobalConfig.inProgress==0 ){ + sqlite3GlobalConfig.inProgress = 1; +#ifdef SQLITE_ENABLE_SQLLOG + { + extern void sqlite3_init_sqllog(void); + sqlite3_init_sqllog(); + } +#endif + memset(&sqlite3BuiltinFunctions, 0, sizeof(sqlite3BuiltinFunctions)); + sqlite3RegisterBuiltinFunctions(); + if( sqlite3GlobalConfig.isPCacheInit==0 ){ + rc = sqlite3PcacheInitialize(); + } + if( rc==SQLITE_OK ){ + sqlite3GlobalConfig.isPCacheInit = 1; + rc = sqlite3OsInit(); + } + if( rc==SQLITE_OK ){ + sqlite3PCacheBufferSetup( sqlite3GlobalConfig.pPage, + sqlite3GlobalConfig.szPage, sqlite3GlobalConfig.nPage); + sqlite3GlobalConfig.isInit = 1; +#ifdef SQLITE_EXTRA_INIT + bRunExtraInit = 1; +#endif + } + sqlite3GlobalConfig.inProgress = 0; + } + sqlite3_mutex_leave(sqlite3GlobalConfig.pInitMutex); + + /* Go back under the static mutex and clean up the recursive + ** mutex to prevent a resource leak. + */ + sqlite3_mutex_enter(pMaster); + sqlite3GlobalConfig.nRefInitMutex--; + if( sqlite3GlobalConfig.nRefInitMutex<=0 ){ + assert( sqlite3GlobalConfig.nRefInitMutex==0 ); + sqlite3_mutex_free(sqlite3GlobalConfig.pInitMutex); + sqlite3GlobalConfig.pInitMutex = 0; + } + sqlite3_mutex_leave(pMaster); + + /* The following is just a sanity check to make sure SQLite has + ** been compiled correctly. It is important to run this code, but + ** we don't want to run it too often and soak up CPU cycles for no + ** reason. So we run it once during initialization. + */ +#ifndef NDEBUG +#ifndef SQLITE_OMIT_FLOATING_POINT + /* This section of code's only "output" is via assert() statements. */ + if ( rc==SQLITE_OK ){ + u64 x = (((u64)1)<<63)-1; + double y; + assert(sizeof(x)==8); + assert(sizeof(x)==sizeof(y)); + memcpy(&y, &x, 8); + assert( sqlite3IsNaN(y) ); + } +#endif +#endif + + /* Do extra initialization steps requested by the SQLITE_EXTRA_INIT + ** compile-time option. + */ +#ifdef SQLITE_EXTRA_INIT + if( bRunExtraInit ){ + int SQLITE_EXTRA_INIT(const char*); + rc = SQLITE_EXTRA_INIT(0); + } +#endif + + return rc; +} + +/* +** Undo the effects of sqlite3_initialize(). Must not be called while +** there are outstanding database connections or memory allocations or +** while any part of SQLite is otherwise in use in any thread. This +** routine is not threadsafe. But it is safe to invoke this routine +** on when SQLite is already shut down. If SQLite is already shut down +** when this routine is invoked, then this routine is a harmless no-op. +*/ +SQLITE_API int sqlite3_shutdown(void){ +#ifdef SQLITE_OMIT_WSD + int rc = sqlite3_wsd_init(4096, 24); + if( rc!=SQLITE_OK ){ + return rc; + } +#endif + + if( sqlite3GlobalConfig.isInit ){ +#ifdef SQLITE_EXTRA_SHUTDOWN + void SQLITE_EXTRA_SHUTDOWN(void); + SQLITE_EXTRA_SHUTDOWN(); +#endif + sqlite3_os_end(); + sqlite3_reset_auto_extension(); + sqlite3GlobalConfig.isInit = 0; + } + if( sqlite3GlobalConfig.isPCacheInit ){ + sqlite3PcacheShutdown(); + sqlite3GlobalConfig.isPCacheInit = 0; + } + if( sqlite3GlobalConfig.isMallocInit ){ + sqlite3MallocEnd(); + sqlite3GlobalConfig.isMallocInit = 0; + +#ifndef SQLITE_OMIT_SHUTDOWN_DIRECTORIES + /* The heap subsystem has now been shutdown and these values are supposed + ** to be NULL or point to memory that was obtained from sqlite3_malloc(), + ** which would rely on that heap subsystem; therefore, make sure these + ** values cannot refer to heap memory that was just invalidated when the + ** heap subsystem was shutdown. This is only done if the current call to + ** this function resulted in the heap subsystem actually being shutdown. + */ + sqlite3_data_directory = 0; + sqlite3_temp_directory = 0; +#endif + } + if( sqlite3GlobalConfig.isMutexInit ){ + sqlite3MutexEnd(); + sqlite3GlobalConfig.isMutexInit = 0; + } + + return SQLITE_OK; +} + +/* +** This API allows applications to modify the global configuration of +** the SQLite library at run-time. +** +** This routine should only be called when there are no outstanding +** database connections or memory allocations. This routine is not +** threadsafe. Failure to heed these warnings can lead to unpredictable +** behavior. +*/ +SQLITE_API int sqlite3_config(int op, ...){ + va_list ap; + int rc = SQLITE_OK; + + /* sqlite3_config() shall return SQLITE_MISUSE if it is invoked while + ** the SQLite library is in use. */ + if( sqlite3GlobalConfig.isInit ) return SQLITE_MISUSE_BKPT; + + va_start(ap, op); + switch( op ){ + + /* Mutex configuration options are only available in a threadsafe + ** compile. + */ +#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-54466-46756 */ + case SQLITE_CONFIG_SINGLETHREAD: { + /* EVIDENCE-OF: R-02748-19096 This option sets the threading mode to + ** Single-thread. */ + sqlite3GlobalConfig.bCoreMutex = 0; /* Disable mutex on core */ + sqlite3GlobalConfig.bFullMutex = 0; /* Disable mutex on connections */ + break; + } +#endif +#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-20520-54086 */ + case SQLITE_CONFIG_MULTITHREAD: { + /* EVIDENCE-OF: R-14374-42468 This option sets the threading mode to + ** Multi-thread. */ + sqlite3GlobalConfig.bCoreMutex = 1; /* Enable mutex on core */ + sqlite3GlobalConfig.bFullMutex = 0; /* Disable mutex on connections */ + break; + } +#endif +#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-59593-21810 */ + case SQLITE_CONFIG_SERIALIZED: { + /* EVIDENCE-OF: R-41220-51800 This option sets the threading mode to + ** Serialized. */ + sqlite3GlobalConfig.bCoreMutex = 1; /* Enable mutex on core */ + sqlite3GlobalConfig.bFullMutex = 1; /* Enable mutex on connections */ + break; + } +#endif +#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-63666-48755 */ + case SQLITE_CONFIG_MUTEX: { + /* Specify an alternative mutex implementation */ + sqlite3GlobalConfig.mutex = *va_arg(ap, sqlite3_mutex_methods*); + break; + } +#endif +#if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-14450-37597 */ + case SQLITE_CONFIG_GETMUTEX: { + /* Retrieve the current mutex implementation */ + *va_arg(ap, sqlite3_mutex_methods*) = sqlite3GlobalConfig.mutex; + break; + } +#endif + + case SQLITE_CONFIG_MALLOC: { + /* EVIDENCE-OF: R-55594-21030 The SQLITE_CONFIG_MALLOC option takes a + ** single argument which is a pointer to an instance of the + ** sqlite3_mem_methods structure. The argument specifies alternative + ** low-level memory allocation routines to be used in place of the memory + ** allocation routines built into SQLite. */ + sqlite3GlobalConfig.m = *va_arg(ap, sqlite3_mem_methods*); + break; + } + case SQLITE_CONFIG_GETMALLOC: { + /* EVIDENCE-OF: R-51213-46414 The SQLITE_CONFIG_GETMALLOC option takes a + ** single argument which is a pointer to an instance of the + ** sqlite3_mem_methods structure. The sqlite3_mem_methods structure is + ** filled with the currently defined memory allocation routines. */ + if( sqlite3GlobalConfig.m.xMalloc==0 ) sqlite3MemSetDefault(); + *va_arg(ap, sqlite3_mem_methods*) = sqlite3GlobalConfig.m; + break; + } + case SQLITE_CONFIG_MEMSTATUS: { + /* EVIDENCE-OF: R-61275-35157 The SQLITE_CONFIG_MEMSTATUS option takes + ** single argument of type int, interpreted as a boolean, which enables + ** or disables the collection of memory allocation statistics. */ + sqlite3GlobalConfig.bMemstat = va_arg(ap, int); + break; + } + case SQLITE_CONFIG_SCRATCH: { + /* EVIDENCE-OF: R-08404-60887 There are three arguments to + ** SQLITE_CONFIG_SCRATCH: A pointer an 8-byte aligned memory buffer from + ** which the scratch allocations will be drawn, the size of each scratch + ** allocation (sz), and the maximum number of scratch allocations (N). */ + sqlite3GlobalConfig.pScratch = va_arg(ap, void*); + sqlite3GlobalConfig.szScratch = va_arg(ap, int); + sqlite3GlobalConfig.nScratch = va_arg(ap, int); + break; + } + case SQLITE_CONFIG_PAGECACHE: { + /* EVIDENCE-OF: R-18761-36601 There are three arguments to + ** SQLITE_CONFIG_PAGECACHE: A pointer to 8-byte aligned memory (pMem), + ** the size of each page cache line (sz), and the number of cache lines + ** (N). */ + sqlite3GlobalConfig.pPage = va_arg(ap, void*); + sqlite3GlobalConfig.szPage = va_arg(ap, int); + sqlite3GlobalConfig.nPage = va_arg(ap, int); + break; + } + case SQLITE_CONFIG_PCACHE_HDRSZ: { + /* EVIDENCE-OF: R-39100-27317 The SQLITE_CONFIG_PCACHE_HDRSZ option takes + ** a single parameter which is a pointer to an integer and writes into + ** that integer the number of extra bytes per page required for each page + ** in SQLITE_CONFIG_PAGECACHE. */ + *va_arg(ap, int*) = + sqlite3HeaderSizeBtree() + + sqlite3HeaderSizePcache() + + sqlite3HeaderSizePcache1(); + break; + } + + case SQLITE_CONFIG_PCACHE: { + /* no-op */ + break; + } + case SQLITE_CONFIG_GETPCACHE: { + /* now an error */ + rc = SQLITE_ERROR; + break; + } + + case SQLITE_CONFIG_PCACHE2: { + /* EVIDENCE-OF: R-63325-48378 The SQLITE_CONFIG_PCACHE2 option takes a + ** single argument which is a pointer to an sqlite3_pcache_methods2 + ** object. This object specifies the interface to a custom page cache + ** implementation. */ + sqlite3GlobalConfig.pcache2 = *va_arg(ap, sqlite3_pcache_methods2*); + break; + } + case SQLITE_CONFIG_GETPCACHE2: { + /* EVIDENCE-OF: R-22035-46182 The SQLITE_CONFIG_GETPCACHE2 option takes a + ** single argument which is a pointer to an sqlite3_pcache_methods2 + ** object. SQLite copies of the current page cache implementation into + ** that object. */ + if( sqlite3GlobalConfig.pcache2.xInit==0 ){ + sqlite3PCacheSetDefault(); + } + *va_arg(ap, sqlite3_pcache_methods2*) = sqlite3GlobalConfig.pcache2; + break; + } + +/* EVIDENCE-OF: R-06626-12911 The SQLITE_CONFIG_HEAP option is only +** available if SQLite is compiled with either SQLITE_ENABLE_MEMSYS3 or +** SQLITE_ENABLE_MEMSYS5 and returns SQLITE_ERROR if invoked otherwise. */ +#if defined(SQLITE_ENABLE_MEMSYS3) || defined(SQLITE_ENABLE_MEMSYS5) + case SQLITE_CONFIG_HEAP: { + /* EVIDENCE-OF: R-19854-42126 There are three arguments to + ** SQLITE_CONFIG_HEAP: An 8-byte aligned pointer to the memory, the + ** number of bytes in the memory buffer, and the minimum allocation size. + */ + sqlite3GlobalConfig.pHeap = va_arg(ap, void*); + sqlite3GlobalConfig.nHeap = va_arg(ap, int); + sqlite3GlobalConfig.mnReq = va_arg(ap, int); + + if( sqlite3GlobalConfig.mnReq<1 ){ + sqlite3GlobalConfig.mnReq = 1; + }else if( sqlite3GlobalConfig.mnReq>(1<<12) ){ + /* cap min request size at 2^12 */ + sqlite3GlobalConfig.mnReq = (1<<12); + } + + if( sqlite3GlobalConfig.pHeap==0 ){ + /* EVIDENCE-OF: R-49920-60189 If the first pointer (the memory pointer) + ** is NULL, then SQLite reverts to using its default memory allocator + ** (the system malloc() implementation), undoing any prior invocation of + ** SQLITE_CONFIG_MALLOC. + ** + ** Setting sqlite3GlobalConfig.m to all zeros will cause malloc to + ** revert to its default implementation when sqlite3_initialize() is run + */ + memset(&sqlite3GlobalConfig.m, 0, sizeof(sqlite3GlobalConfig.m)); + }else{ + /* EVIDENCE-OF: R-61006-08918 If the memory pointer is not NULL then the + ** alternative memory allocator is engaged to handle all of SQLites + ** memory allocation needs. */ +#ifdef SQLITE_ENABLE_MEMSYS3 + sqlite3GlobalConfig.m = *sqlite3MemGetMemsys3(); +#endif +#ifdef SQLITE_ENABLE_MEMSYS5 + sqlite3GlobalConfig.m = *sqlite3MemGetMemsys5(); +#endif + } + break; + } +#endif + + case SQLITE_CONFIG_LOOKASIDE: { + sqlite3GlobalConfig.szLookaside = va_arg(ap, int); + sqlite3GlobalConfig.nLookaside = va_arg(ap, int); + break; + } + + /* Record a pointer to the logger function and its first argument. + ** The default is NULL. Logging is disabled if the function pointer is + ** NULL. + */ + case SQLITE_CONFIG_LOG: { + /* MSVC is picky about pulling func ptrs from va lists. + ** http://support.microsoft.com/kb/47961 + ** sqlite3GlobalConfig.xLog = va_arg(ap, void(*)(void*,int,const char*)); + */ + typedef void(*LOGFUNC_t)(void*,int,const char*); + sqlite3GlobalConfig.xLog = va_arg(ap, LOGFUNC_t); + sqlite3GlobalConfig.pLogArg = va_arg(ap, void*); + break; + } + + /* EVIDENCE-OF: R-55548-33817 The compile-time setting for URI filenames + ** can be changed at start-time using the + ** sqlite3_config(SQLITE_CONFIG_URI,1) or + ** sqlite3_config(SQLITE_CONFIG_URI,0) configuration calls. + */ + case SQLITE_CONFIG_URI: { + /* EVIDENCE-OF: R-25451-61125 The SQLITE_CONFIG_URI option takes a single + ** argument of type int. If non-zero, then URI handling is globally + ** enabled. If the parameter is zero, then URI handling is globally + ** disabled. */ + sqlite3GlobalConfig.bOpenUri = va_arg(ap, int); + break; + } + + case SQLITE_CONFIG_COVERING_INDEX_SCAN: { + /* EVIDENCE-OF: R-36592-02772 The SQLITE_CONFIG_COVERING_INDEX_SCAN + ** option takes a single integer argument which is interpreted as a + ** boolean in order to enable or disable the use of covering indices for + ** full table scans in the query optimizer. */ + sqlite3GlobalConfig.bUseCis = va_arg(ap, int); + break; + } + +#ifdef SQLITE_ENABLE_SQLLOG + case SQLITE_CONFIG_SQLLOG: { + typedef void(*SQLLOGFUNC_t)(void*, sqlite3*, const char*, int); + sqlite3GlobalConfig.xSqllog = va_arg(ap, SQLLOGFUNC_t); + sqlite3GlobalConfig.pSqllogArg = va_arg(ap, void *); + break; + } +#endif + + case SQLITE_CONFIG_MMAP_SIZE: { + /* EVIDENCE-OF: R-58063-38258 SQLITE_CONFIG_MMAP_SIZE takes two 64-bit + ** integer (sqlite3_int64) values that are the default mmap size limit + ** (the default setting for PRAGMA mmap_size) and the maximum allowed + ** mmap size limit. */ + sqlite3_int64 szMmap = va_arg(ap, sqlite3_int64); + sqlite3_int64 mxMmap = va_arg(ap, sqlite3_int64); + /* EVIDENCE-OF: R-53367-43190 If either argument to this option is + ** negative, then that argument is changed to its compile-time default. + ** + ** EVIDENCE-OF: R-34993-45031 The maximum allowed mmap size will be + ** silently truncated if necessary so that it does not exceed the + ** compile-time maximum mmap size set by the SQLITE_MAX_MMAP_SIZE + ** compile-time option. + */ + if( mxMmap<0 || mxMmap>SQLITE_MAX_MMAP_SIZE ){ + mxMmap = SQLITE_MAX_MMAP_SIZE; + } + if( szMmap<0 ) szMmap = SQLITE_DEFAULT_MMAP_SIZE; + if( szMmap>mxMmap) szMmap = mxMmap; + sqlite3GlobalConfig.mxMmap = mxMmap; + sqlite3GlobalConfig.szMmap = szMmap; + break; + } + +#if SQLITE_OS_WIN && defined(SQLITE_WIN32_MALLOC) /* IMP: R-04780-55815 */ + case SQLITE_CONFIG_WIN32_HEAPSIZE: { + /* EVIDENCE-OF: R-34926-03360 SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit + ** unsigned integer value that specifies the maximum size of the created + ** heap. */ + sqlite3GlobalConfig.nHeap = va_arg(ap, int); + break; + } +#endif + + case SQLITE_CONFIG_PMASZ: { + sqlite3GlobalConfig.szPma = va_arg(ap, unsigned int); + break; + } + + case SQLITE_CONFIG_STMTJRNL_SPILL: { + sqlite3GlobalConfig.nStmtSpill = va_arg(ap, int); + break; + } + + default: { + rc = SQLITE_ERROR; + break; + } + } + va_end(ap); + return rc; +} + +/* +** Set up the lookaside buffers for a database connection. +** Return SQLITE_OK on success. +** If lookaside is already active, return SQLITE_BUSY. +** +** The sz parameter is the number of bytes in each lookaside slot. +** The cnt parameter is the number of slots. If pStart is NULL the +** space for the lookaside memory is obtained from sqlite3_malloc(). +** If pStart is not NULL then it is sz*cnt bytes of memory to use for +** the lookaside memory. +*/ +static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ +#ifndef SQLITE_OMIT_LOOKASIDE + void *pStart; + if( db->lookaside.nOut ){ + return SQLITE_BUSY; + } + /* Free any existing lookaside buffer for this handle before + ** allocating a new one so we don't have to have space for + ** both at the same time. + */ + if( db->lookaside.bMalloced ){ + sqlite3_free(db->lookaside.pStart); + } + /* The size of a lookaside slot after ROUNDDOWN8 needs to be larger + ** than a pointer to be useful. + */ + sz = ROUNDDOWN8(sz); /* IMP: R-33038-09382 */ + if( sz<=(int)sizeof(LookasideSlot*) ) sz = 0; + if( cnt<0 ) cnt = 0; + if( sz==0 || cnt==0 ){ + sz = 0; + pStart = 0; + }else if( pBuf==0 ){ + sqlite3BeginBenignMalloc(); + pStart = sqlite3Malloc( sz*cnt ); /* IMP: R-61949-35727 */ + sqlite3EndBenignMalloc(); + if( pStart ) cnt = sqlite3MallocSize(pStart)/sz; + }else{ + pStart = pBuf; + } + db->lookaside.pStart = pStart; + db->lookaside.pFree = 0; + db->lookaside.sz = (u16)sz; + if( pStart ){ + int i; + LookasideSlot *p; + assert( sz > (int)sizeof(LookasideSlot*) ); + p = (LookasideSlot*)pStart; + for(i=cnt-1; i>=0; i--){ + p->pNext = db->lookaside.pFree; + db->lookaside.pFree = p; + p = (LookasideSlot*)&((u8*)p)[sz]; + } + db->lookaside.pEnd = p; + db->lookaside.bDisable = 0; + db->lookaside.bMalloced = pBuf==0 ?1:0; + }else{ + db->lookaside.pStart = db; + db->lookaside.pEnd = db; + db->lookaside.bDisable = 1; + db->lookaside.bMalloced = 0; + } +#endif /* SQLITE_OMIT_LOOKASIDE */ + return SQLITE_OK; +} + +/* +** Return the mutex associated with a database connection. +*/ +SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3 *db){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + return db->mutex; +} + +/* +** Free up as much memory as we can from the given database +** connection. +*/ +SQLITE_API int sqlite3_db_release_memory(sqlite3 *db){ + int i; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(db->mutex); + sqlite3BtreeEnterAll(db); + for(i=0; inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt ){ + Pager *pPager = sqlite3BtreePager(pBt); + sqlite3PagerShrink(pPager); + } + } + sqlite3BtreeLeaveAll(db); + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +/* +** Flush any dirty pages in the pager-cache for any attached database +** to disk. +*/ +SQLITE_API int sqlite3_db_cacheflush(sqlite3 *db){ + int i; + int rc = SQLITE_OK; + int bSeenBusy = 0; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(db->mutex); + sqlite3BtreeEnterAll(db); + for(i=0; rc==SQLITE_OK && inDb; i++){ + Btree *pBt = db->aDb[i].pBt; + if( pBt && sqlite3BtreeIsInTrans(pBt) ){ + Pager *pPager = sqlite3BtreePager(pBt); + rc = sqlite3PagerFlush(pPager); + if( rc==SQLITE_BUSY ){ + bSeenBusy = 1; + rc = SQLITE_OK; + } + } + } + sqlite3BtreeLeaveAll(db); + sqlite3_mutex_leave(db->mutex); + return ((rc==SQLITE_OK && bSeenBusy) ? SQLITE_BUSY : rc); +} + +/* +** Configuration settings for an individual database connection +*/ +SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ + va_list ap; + int rc; + va_start(ap, op); + switch( op ){ + case SQLITE_DBCONFIG_MAINDBNAME: { + db->aDb[0].zDbSName = va_arg(ap,char*); + rc = SQLITE_OK; + break; + } + case SQLITE_DBCONFIG_LOOKASIDE: { + void *pBuf = va_arg(ap, void*); /* IMP: R-26835-10964 */ + int sz = va_arg(ap, int); /* IMP: R-47871-25994 */ + int cnt = va_arg(ap, int); /* IMP: R-04460-53386 */ + rc = setupLookaside(db, pBuf, sz, cnt); + break; + } + default: { + static const struct { + int op; /* The opcode */ + u32 mask; /* Mask of the bit in sqlite3.flags to set/clear */ + } aFlagOp[] = { + { SQLITE_DBCONFIG_ENABLE_FKEY, SQLITE_ForeignKeys }, + { SQLITE_DBCONFIG_ENABLE_TRIGGER, SQLITE_EnableTrigger }, + { SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER, SQLITE_Fts3Tokenizer }, + { SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, SQLITE_LoadExtension }, + { SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE, SQLITE_NoCkptOnClose }, + }; + unsigned int i; + rc = SQLITE_ERROR; /* IMP: R-42790-23372 */ + for(i=0; iflags; + if( onoff>0 ){ + db->flags |= aFlagOp[i].mask; + }else if( onoff==0 ){ + db->flags &= ~aFlagOp[i].mask; + } + if( oldFlags!=db->flags ){ + sqlite3ExpirePreparedStatements(db); + } + if( pRes ){ + *pRes = (db->flags & aFlagOp[i].mask)!=0; + } + rc = SQLITE_OK; + break; + } + } + break; + } + } + va_end(ap); + return rc; +} + + +/* +** Return true if the buffer z[0..n-1] contains all spaces. +*/ +static int allSpaces(const char *z, int n){ + while( n>0 && z[n-1]==' ' ){ n--; } + return n==0; +} + +/* +** This is the default collating function named "BINARY" which is always +** available. +** +** If the padFlag argument is not NULL then space padding at the end +** of strings is ignored. This implements the RTRIM collation. +*/ +static int binCollFunc( + void *padFlag, + int nKey1, const void *pKey1, + int nKey2, const void *pKey2 +){ + int rc, n; + n = nKey1lastRowid; +} + +/* +** Return the number of changes in the most recent call to sqlite3_exec(). +*/ +SQLITE_API int sqlite3_changes(sqlite3 *db){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + return db->nChange; +} + +/* +** Return the number of changes since the database handle was opened. +*/ +SQLITE_API int sqlite3_total_changes(sqlite3 *db){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + return db->nTotalChange; +} + +/* +** Close all open savepoints. This function only manipulates fields of the +** database handle object, it does not close any savepoints that may be open +** at the b-tree/pager level. +*/ +SQLITE_PRIVATE void sqlite3CloseSavepoints(sqlite3 *db){ + while( db->pSavepoint ){ + Savepoint *pTmp = db->pSavepoint; + db->pSavepoint = pTmp->pNext; + sqlite3DbFree(db, pTmp); + } + db->nSavepoint = 0; + db->nStatement = 0; + db->isTransactionSavepoint = 0; +} + +/* +** Invoke the destructor function associated with FuncDef p, if any. Except, +** if this is not the last copy of the function, do not invoke it. Multiple +** copies of a single function are created when create_function() is called +** with SQLITE_ANY as the encoding. +*/ +static void functionDestroy(sqlite3 *db, FuncDef *p){ + FuncDestructor *pDestructor = p->u.pDestructor; + if( pDestructor ){ + pDestructor->nRef--; + if( pDestructor->nRef==0 ){ + pDestructor->xDestroy(pDestructor->pUserData); + sqlite3DbFree(db, pDestructor); + } + } +} + +/* +** Disconnect all sqlite3_vtab objects that belong to database connection +** db. This is called when db is being closed. +*/ +static void disconnectAllVtab(sqlite3 *db){ +#ifndef SQLITE_OMIT_VIRTUALTABLE + int i; + HashElem *p; + sqlite3BtreeEnterAll(db); + for(i=0; inDb; i++){ + Schema *pSchema = db->aDb[i].pSchema; + if( db->aDb[i].pSchema ){ + for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){ + Table *pTab = (Table *)sqliteHashData(p); + if( IsVirtual(pTab) ) sqlite3VtabDisconnect(db, pTab); + } + } + } + for(p=sqliteHashFirst(&db->aModule); p; p=sqliteHashNext(p)){ + Module *pMod = (Module *)sqliteHashData(p); + if( pMod->pEpoTab ){ + sqlite3VtabDisconnect(db, pMod->pEpoTab); + } + } + sqlite3VtabUnlockList(db); + sqlite3BtreeLeaveAll(db); +#else + UNUSED_PARAMETER(db); +#endif +} + +/* +** Return TRUE if database connection db has unfinalized prepared +** statements or unfinished sqlite3_backup objects. +*/ +static int connectionIsBusy(sqlite3 *db){ + int j; + assert( sqlite3_mutex_held(db->mutex) ); + if( db->pVdbe ) return 1; + for(j=0; jnDb; j++){ + Btree *pBt = db->aDb[j].pBt; + if( pBt && sqlite3BtreeIsInBackup(pBt) ) return 1; + } + return 0; +} + +/* +** Close an existing SQLite database +*/ +static int sqlite3Close(sqlite3 *db, int forceZombie){ + if( !db ){ + /* EVIDENCE-OF: R-63257-11740 Calling sqlite3_close() or + ** sqlite3_close_v2() with a NULL pointer argument is a harmless no-op. */ + return SQLITE_OK; + } + if( !sqlite3SafetyCheckSickOrOk(db) ){ + return SQLITE_MISUSE_BKPT; + } + sqlite3_mutex_enter(db->mutex); + if( db->mTrace & SQLITE_TRACE_CLOSE ){ + db->xTrace(SQLITE_TRACE_CLOSE, db->pTraceArg, db, 0); + } + + /* Force xDisconnect calls on all virtual tables */ + disconnectAllVtab(db); + + /* If a transaction is open, the disconnectAllVtab() call above + ** will not have called the xDisconnect() method on any virtual + ** tables in the db->aVTrans[] array. The following sqlite3VtabRollback() + ** call will do so. We need to do this before the check for active + ** SQL statements below, as the v-table implementation may be storing + ** some prepared statements internally. + */ + sqlite3VtabRollback(db); + + /* Legacy behavior (sqlite3_close() behavior) is to return + ** SQLITE_BUSY if the connection can not be closed immediately. + */ + if( !forceZombie && connectionIsBusy(db) ){ + sqlite3ErrorWithMsg(db, SQLITE_BUSY, "unable to close due to unfinalized " + "statements or unfinished backups"); + sqlite3_mutex_leave(db->mutex); + return SQLITE_BUSY; + } + +#ifdef SQLITE_ENABLE_SQLLOG + if( sqlite3GlobalConfig.xSqllog ){ + /* Closing the handle. Fourth parameter is passed the value 2. */ + sqlite3GlobalConfig.xSqllog(sqlite3GlobalConfig.pSqllogArg, db, 0, 2); + } +#endif + + /* Convert the connection into a zombie and then close it. + */ + db->magic = SQLITE_MAGIC_ZOMBIE; + sqlite3LeaveMutexAndCloseZombie(db); + return SQLITE_OK; +} + +/* +** Two variations on the public interface for closing a database +** connection. The sqlite3_close() version returns SQLITE_BUSY and +** leaves the connection option if there are unfinalized prepared +** statements or unfinished sqlite3_backups. The sqlite3_close_v2() +** version forces the connection to become a zombie if there are +** unclosed resources, and arranges for deallocation when the last +** prepare statement or sqlite3_backup closes. +*/ +SQLITE_API int sqlite3_close(sqlite3 *db){ return sqlite3Close(db,0); } +SQLITE_API int sqlite3_close_v2(sqlite3 *db){ return sqlite3Close(db,1); } + + +/* +** Close the mutex on database connection db. +** +** Furthermore, if database connection db is a zombie (meaning that there +** has been a prior call to sqlite3_close(db) or sqlite3_close_v2(db)) and +** every sqlite3_stmt has now been finalized and every sqlite3_backup has +** finished, then free all resources. +*/ +SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){ + HashElem *i; /* Hash table iterator */ + int j; + + /* If there are outstanding sqlite3_stmt or sqlite3_backup objects + ** or if the connection has not yet been closed by sqlite3_close_v2(), + ** then just leave the mutex and return. + */ + if( db->magic!=SQLITE_MAGIC_ZOMBIE || connectionIsBusy(db) ){ + sqlite3_mutex_leave(db->mutex); + return; + } + + /* If we reach this point, it means that the database connection has + ** closed all sqlite3_stmt and sqlite3_backup objects and has been + ** passed to sqlite3_close (meaning that it is a zombie). Therefore, + ** go ahead and free all resources. + */ + + /* If a transaction is open, roll it back. This also ensures that if + ** any database schemas have been modified by an uncommitted transaction + ** they are reset. And that the required b-tree mutex is held to make + ** the pager rollback and schema reset an atomic operation. */ + sqlite3RollbackAll(db, SQLITE_OK); + + /* Free any outstanding Savepoint structures. */ + sqlite3CloseSavepoints(db); + + /* Close all database connections */ + for(j=0; jnDb; j++){ + struct Db *pDb = &db->aDb[j]; + if( pDb->pBt ){ + sqlite3BtreeClose(pDb->pBt); + pDb->pBt = 0; + if( j!=1 ){ + pDb->pSchema = 0; + } + } + } + /* Clear the TEMP schema separately and last */ + if( db->aDb[1].pSchema ){ + sqlite3SchemaClear(db->aDb[1].pSchema); + } + sqlite3VtabUnlockList(db); + + /* Free up the array of auxiliary databases */ + sqlite3CollapseDatabaseArray(db); + assert( db->nDb<=2 ); + assert( db->aDb==db->aDbStatic ); + + /* Tell the code in notify.c that the connection no longer holds any + ** locks and does not require any further unlock-notify callbacks. + */ + sqlite3ConnectionClosed(db); + + for(i=sqliteHashFirst(&db->aFunc); i; i=sqliteHashNext(i)){ + FuncDef *pNext, *p; + p = sqliteHashData(i); + do{ + functionDestroy(db, p); + pNext = p->pNext; + sqlite3DbFree(db, p); + p = pNext; + }while( p ); + } + sqlite3HashClear(&db->aFunc); + for(i=sqliteHashFirst(&db->aCollSeq); i; i=sqliteHashNext(i)){ + CollSeq *pColl = (CollSeq *)sqliteHashData(i); + /* Invoke any destructors registered for collation sequence user data. */ + for(j=0; j<3; j++){ + if( pColl[j].xDel ){ + pColl[j].xDel(pColl[j].pUser); + } + } + sqlite3DbFree(db, pColl); + } + sqlite3HashClear(&db->aCollSeq); +#ifndef SQLITE_OMIT_VIRTUALTABLE + for(i=sqliteHashFirst(&db->aModule); i; i=sqliteHashNext(i)){ + Module *pMod = (Module *)sqliteHashData(i); + if( pMod->xDestroy ){ + pMod->xDestroy(pMod->pAux); + } + sqlite3VtabEponymousTableClear(db, pMod); + sqlite3DbFree(db, pMod); + } + sqlite3HashClear(&db->aModule); +#endif + + sqlite3Error(db, SQLITE_OK); /* Deallocates any cached error strings. */ + sqlite3ValueFree(db->pErr); + sqlite3CloseExtensions(db); +#if SQLITE_USER_AUTHENTICATION + sqlite3_free(db->auth.zAuthUser); + sqlite3_free(db->auth.zAuthPW); +#endif + + db->magic = SQLITE_MAGIC_ERROR; + + /* The temp-database schema is allocated differently from the other schema + ** objects (using sqliteMalloc() directly, instead of sqlite3BtreeSchema()). + ** So it needs to be freed here. Todo: Why not roll the temp schema into + ** the same sqliteMalloc() as the one that allocates the database + ** structure? + */ + sqlite3DbFree(db, db->aDb[1].pSchema); + sqlite3_mutex_leave(db->mutex); + db->magic = SQLITE_MAGIC_CLOSED; + sqlite3_mutex_free(db->mutex); + assert( db->lookaside.nOut==0 ); /* Fails on a lookaside memory leak */ + if( db->lookaside.bMalloced ){ + sqlite3_free(db->lookaside.pStart); + } + sqlite3_free(db); +} + +/* +** Rollback all database files. If tripCode is not SQLITE_OK, then +** any write cursors are invalidated ("tripped" - as in "tripping a circuit +** breaker") and made to return tripCode if there are any further +** attempts to use that cursor. Read cursors remain open and valid +** but are "saved" in case the table pages are moved around. +*/ +SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){ + int i; + int inTrans = 0; + int schemaChange; + assert( sqlite3_mutex_held(db->mutex) ); + sqlite3BeginBenignMalloc(); + + /* Obtain all b-tree mutexes before making any calls to BtreeRollback(). + ** This is important in case the transaction being rolled back has + ** modified the database schema. If the b-tree mutexes are not taken + ** here, then another shared-cache connection might sneak in between + ** the database rollback and schema reset, which can cause false + ** corruption reports in some cases. */ + sqlite3BtreeEnterAll(db); + schemaChange = (db->flags & SQLITE_InternChanges)!=0 && db->init.busy==0; + + for(i=0; inDb; i++){ + Btree *p = db->aDb[i].pBt; + if( p ){ + if( sqlite3BtreeIsInTrans(p) ){ + inTrans = 1; + } + sqlite3BtreeRollback(p, tripCode, !schemaChange); + } + } + sqlite3VtabRollback(db); + sqlite3EndBenignMalloc(); + + if( (db->flags&SQLITE_InternChanges)!=0 && db->init.busy==0 ){ + sqlite3ExpirePreparedStatements(db); + sqlite3ResetAllSchemasOfConnection(db); + } + sqlite3BtreeLeaveAll(db); + + /* Any deferred constraint violations have now been resolved. */ + db->nDeferredCons = 0; + db->nDeferredImmCons = 0; + db->flags &= ~SQLITE_DeferFKs; + + /* If one has been configured, invoke the rollback-hook callback */ + if( db->xRollbackCallback && (inTrans || !db->autoCommit) ){ + db->xRollbackCallback(db->pRollbackArg); + } +} + +/* +** Return a static string containing the name corresponding to the error code +** specified in the argument. +*/ +#if defined(SQLITE_NEED_ERR_NAME) +SQLITE_PRIVATE const char *sqlite3ErrName(int rc){ + const char *zName = 0; + int i, origRc = rc; + for(i=0; i<2 && zName==0; i++, rc &= 0xff){ + switch( rc ){ + case SQLITE_OK: zName = "SQLITE_OK"; break; + case SQLITE_ERROR: zName = "SQLITE_ERROR"; break; + case SQLITE_INTERNAL: zName = "SQLITE_INTERNAL"; break; + case SQLITE_PERM: zName = "SQLITE_PERM"; break; + case SQLITE_ABORT: zName = "SQLITE_ABORT"; break; + case SQLITE_ABORT_ROLLBACK: zName = "SQLITE_ABORT_ROLLBACK"; break; + case SQLITE_BUSY: zName = "SQLITE_BUSY"; break; + case SQLITE_BUSY_RECOVERY: zName = "SQLITE_BUSY_RECOVERY"; break; + case SQLITE_BUSY_SNAPSHOT: zName = "SQLITE_BUSY_SNAPSHOT"; break; + case SQLITE_LOCKED: zName = "SQLITE_LOCKED"; break; + case SQLITE_LOCKED_SHAREDCACHE: zName = "SQLITE_LOCKED_SHAREDCACHE";break; + case SQLITE_NOMEM: zName = "SQLITE_NOMEM"; break; + case SQLITE_READONLY: zName = "SQLITE_READONLY"; break; + case SQLITE_READONLY_RECOVERY: zName = "SQLITE_READONLY_RECOVERY"; break; + case SQLITE_READONLY_CANTLOCK: zName = "SQLITE_READONLY_CANTLOCK"; break; + case SQLITE_READONLY_ROLLBACK: zName = "SQLITE_READONLY_ROLLBACK"; break; + case SQLITE_READONLY_DBMOVED: zName = "SQLITE_READONLY_DBMOVED"; break; + case SQLITE_INTERRUPT: zName = "SQLITE_INTERRUPT"; break; + case SQLITE_IOERR: zName = "SQLITE_IOERR"; break; + case SQLITE_IOERR_READ: zName = "SQLITE_IOERR_READ"; break; + case SQLITE_IOERR_SHORT_READ: zName = "SQLITE_IOERR_SHORT_READ"; break; + case SQLITE_IOERR_WRITE: zName = "SQLITE_IOERR_WRITE"; break; + case SQLITE_IOERR_FSYNC: zName = "SQLITE_IOERR_FSYNC"; break; + case SQLITE_IOERR_DIR_FSYNC: zName = "SQLITE_IOERR_DIR_FSYNC"; break; + case SQLITE_IOERR_TRUNCATE: zName = "SQLITE_IOERR_TRUNCATE"; break; + case SQLITE_IOERR_FSTAT: zName = "SQLITE_IOERR_FSTAT"; break; + case SQLITE_IOERR_UNLOCK: zName = "SQLITE_IOERR_UNLOCK"; break; + case SQLITE_IOERR_RDLOCK: zName = "SQLITE_IOERR_RDLOCK"; break; + case SQLITE_IOERR_DELETE: zName = "SQLITE_IOERR_DELETE"; break; + case SQLITE_IOERR_NOMEM: zName = "SQLITE_IOERR_NOMEM"; break; + case SQLITE_IOERR_ACCESS: zName = "SQLITE_IOERR_ACCESS"; break; + case SQLITE_IOERR_CHECKRESERVEDLOCK: + zName = "SQLITE_IOERR_CHECKRESERVEDLOCK"; break; + case SQLITE_IOERR_LOCK: zName = "SQLITE_IOERR_LOCK"; break; + case SQLITE_IOERR_CLOSE: zName = "SQLITE_IOERR_CLOSE"; break; + case SQLITE_IOERR_DIR_CLOSE: zName = "SQLITE_IOERR_DIR_CLOSE"; break; + case SQLITE_IOERR_SHMOPEN: zName = "SQLITE_IOERR_SHMOPEN"; break; + case SQLITE_IOERR_SHMSIZE: zName = "SQLITE_IOERR_SHMSIZE"; break; + case SQLITE_IOERR_SHMLOCK: zName = "SQLITE_IOERR_SHMLOCK"; break; + case SQLITE_IOERR_SHMMAP: zName = "SQLITE_IOERR_SHMMAP"; break; + case SQLITE_IOERR_SEEK: zName = "SQLITE_IOERR_SEEK"; break; + case SQLITE_IOERR_DELETE_NOENT: zName = "SQLITE_IOERR_DELETE_NOENT";break; + case SQLITE_IOERR_MMAP: zName = "SQLITE_IOERR_MMAP"; break; + case SQLITE_IOERR_GETTEMPPATH: zName = "SQLITE_IOERR_GETTEMPPATH"; break; + case SQLITE_IOERR_CONVPATH: zName = "SQLITE_IOERR_CONVPATH"; break; + case SQLITE_CORRUPT: zName = "SQLITE_CORRUPT"; break; + case SQLITE_CORRUPT_VTAB: zName = "SQLITE_CORRUPT_VTAB"; break; + case SQLITE_NOTFOUND: zName = "SQLITE_NOTFOUND"; break; + case SQLITE_FULL: zName = "SQLITE_FULL"; break; + case SQLITE_CANTOPEN: zName = "SQLITE_CANTOPEN"; break; + case SQLITE_CANTOPEN_NOTEMPDIR: zName = "SQLITE_CANTOPEN_NOTEMPDIR";break; + case SQLITE_CANTOPEN_ISDIR: zName = "SQLITE_CANTOPEN_ISDIR"; break; + case SQLITE_CANTOPEN_FULLPATH: zName = "SQLITE_CANTOPEN_FULLPATH"; break; + case SQLITE_CANTOPEN_CONVPATH: zName = "SQLITE_CANTOPEN_CONVPATH"; break; + case SQLITE_PROTOCOL: zName = "SQLITE_PROTOCOL"; break; + case SQLITE_EMPTY: zName = "SQLITE_EMPTY"; break; + case SQLITE_SCHEMA: zName = "SQLITE_SCHEMA"; break; + case SQLITE_TOOBIG: zName = "SQLITE_TOOBIG"; break; + case SQLITE_CONSTRAINT: zName = "SQLITE_CONSTRAINT"; break; + case SQLITE_CONSTRAINT_UNIQUE: zName = "SQLITE_CONSTRAINT_UNIQUE"; break; + case SQLITE_CONSTRAINT_TRIGGER: zName = "SQLITE_CONSTRAINT_TRIGGER";break; + case SQLITE_CONSTRAINT_FOREIGNKEY: + zName = "SQLITE_CONSTRAINT_FOREIGNKEY"; break; + case SQLITE_CONSTRAINT_CHECK: zName = "SQLITE_CONSTRAINT_CHECK"; break; + case SQLITE_CONSTRAINT_PRIMARYKEY: + zName = "SQLITE_CONSTRAINT_PRIMARYKEY"; break; + case SQLITE_CONSTRAINT_NOTNULL: zName = "SQLITE_CONSTRAINT_NOTNULL";break; + case SQLITE_CONSTRAINT_COMMITHOOK: + zName = "SQLITE_CONSTRAINT_COMMITHOOK"; break; + case SQLITE_CONSTRAINT_VTAB: zName = "SQLITE_CONSTRAINT_VTAB"; break; + case SQLITE_CONSTRAINT_FUNCTION: + zName = "SQLITE_CONSTRAINT_FUNCTION"; break; + case SQLITE_CONSTRAINT_ROWID: zName = "SQLITE_CONSTRAINT_ROWID"; break; + case SQLITE_MISMATCH: zName = "SQLITE_MISMATCH"; break; + case SQLITE_MISUSE: zName = "SQLITE_MISUSE"; break; + case SQLITE_NOLFS: zName = "SQLITE_NOLFS"; break; + case SQLITE_AUTH: zName = "SQLITE_AUTH"; break; + case SQLITE_FORMAT: zName = "SQLITE_FORMAT"; break; + case SQLITE_RANGE: zName = "SQLITE_RANGE"; break; + case SQLITE_NOTADB: zName = "SQLITE_NOTADB"; break; + case SQLITE_ROW: zName = "SQLITE_ROW"; break; + case SQLITE_NOTICE: zName = "SQLITE_NOTICE"; break; + case SQLITE_NOTICE_RECOVER_WAL: zName = "SQLITE_NOTICE_RECOVER_WAL";break; + case SQLITE_NOTICE_RECOVER_ROLLBACK: + zName = "SQLITE_NOTICE_RECOVER_ROLLBACK"; break; + case SQLITE_WARNING: zName = "SQLITE_WARNING"; break; + case SQLITE_WARNING_AUTOINDEX: zName = "SQLITE_WARNING_AUTOINDEX"; break; + case SQLITE_DONE: zName = "SQLITE_DONE"; break; + } + } + if( zName==0 ){ + static char zBuf[50]; + sqlite3_snprintf(sizeof(zBuf), zBuf, "SQLITE_UNKNOWN(%d)", origRc); + zName = zBuf; + } + return zName; +} +#endif + +/* +** Return a static string that describes the kind of error specified in the +** argument. +*/ +SQLITE_PRIVATE const char *sqlite3ErrStr(int rc){ + static const char* const aMsg[] = { + /* SQLITE_OK */ "not an error", + /* SQLITE_ERROR */ "SQL logic error or missing database", + /* SQLITE_INTERNAL */ 0, + /* SQLITE_PERM */ "access permission denied", + /* SQLITE_ABORT */ "callback requested query abort", + /* SQLITE_BUSY */ "database is locked", + /* SQLITE_LOCKED */ "database table is locked", + /* SQLITE_NOMEM */ "out of memory", + /* SQLITE_READONLY */ "attempt to write a readonly database", + /* SQLITE_INTERRUPT */ "interrupted", + /* SQLITE_IOERR */ "disk I/O error", + /* SQLITE_CORRUPT */ "database disk image is malformed", + /* SQLITE_NOTFOUND */ "unknown operation", + /* SQLITE_FULL */ "database or disk is full", + /* SQLITE_CANTOPEN */ "unable to open database file", + /* SQLITE_PROTOCOL */ "locking protocol", + /* SQLITE_EMPTY */ "table contains no data", + /* SQLITE_SCHEMA */ "database schema has changed", + /* SQLITE_TOOBIG */ "string or blob too big", + /* SQLITE_CONSTRAINT */ "constraint failed", + /* SQLITE_MISMATCH */ "datatype mismatch", + /* SQLITE_MISUSE */ "library routine called out of sequence", + /* SQLITE_NOLFS */ "large file support is disabled", + /* SQLITE_AUTH */ "authorization denied", + /* SQLITE_FORMAT */ "auxiliary database format error", + /* SQLITE_RANGE */ "bind or column index out of range", + /* SQLITE_NOTADB */ "file is encrypted or is not a database", + }; + const char *zErr = "unknown error"; + switch( rc ){ + case SQLITE_ABORT_ROLLBACK: { + zErr = "abort due to ROLLBACK"; + break; + } + default: { + rc &= 0xff; + if( ALWAYS(rc>=0) && rcbusyTimeout; + int delay, prior; + + assert( count>=0 ); + if( count < NDELAY ){ + delay = delays[count]; + prior = totals[count]; + }else{ + delay = delays[NDELAY-1]; + prior = totals[NDELAY-1] + delay*(count-(NDELAY-1)); + } + if( prior + delay > timeout ){ + delay = timeout - prior; + if( delay<=0 ) return 0; + } + sqlite3OsSleep(db->pVfs, delay*1000); + return 1; +#else + sqlite3 *db = (sqlite3 *)ptr; + int timeout = ((sqlite3 *)ptr)->busyTimeout; + if( (count+1)*1000 > timeout ){ + return 0; + } + sqlite3OsSleep(db->pVfs, 1000000); + return 1; +#endif +} + +/* +** Invoke the given busy handler. +** +** This routine is called when an operation failed with a lock. +** If this routine returns non-zero, the lock is retried. If it +** returns 0, the operation aborts with an SQLITE_BUSY error. +*/ +SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler *p){ + int rc; + if( NEVER(p==0) || p->xFunc==0 || p->nBusy<0 ) return 0; + rc = p->xFunc(p->pArg, p->nBusy); + if( rc==0 ){ + p->nBusy = -1; + }else{ + p->nBusy++; + } + return rc; +} + +/* +** This routine sets the busy callback for an Sqlite database to the +** given callback function with the given argument. +*/ +SQLITE_API int sqlite3_busy_handler( + sqlite3 *db, + int (*xBusy)(void*,int), + void *pArg +){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(db->mutex); + db->busyHandler.xFunc = xBusy; + db->busyHandler.pArg = pArg; + db->busyHandler.nBusy = 0; + db->busyTimeout = 0; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK +/* +** This routine sets the progress callback for an Sqlite database to the +** given callback function with the given argument. The progress callback will +** be invoked every nOps opcodes. +*/ +SQLITE_API void sqlite3_progress_handler( + sqlite3 *db, + int nOps, + int (*xProgress)(void*), + void *pArg +){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return; + } +#endif + sqlite3_mutex_enter(db->mutex); + if( nOps>0 ){ + db->xProgress = xProgress; + db->nProgressOps = (unsigned)nOps; + db->pProgressArg = pArg; + }else{ + db->xProgress = 0; + db->nProgressOps = 0; + db->pProgressArg = 0; + } + sqlite3_mutex_leave(db->mutex); +} +#endif + + +/* +** This routine installs a default busy handler that waits for the +** specified number of milliseconds before returning 0. +*/ +SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + if( ms>0 ){ + sqlite3_busy_handler(db, sqliteDefaultBusyCallback, (void*)db); + db->busyTimeout = ms; + }else{ + sqlite3_busy_handler(db, 0, 0); + } + return SQLITE_OK; +} + +/* +** Cause any pending operation to stop at its earliest opportunity. +*/ +SQLITE_API void sqlite3_interrupt(sqlite3 *db){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) && (db==0 || db->magic!=SQLITE_MAGIC_ZOMBIE) ){ + (void)SQLITE_MISUSE_BKPT; + return; + } +#endif + db->u1.isInterrupted = 1; +} + + +/* +** This function is exactly the same as sqlite3_create_function(), except +** that it is designed to be called by internal code. The difference is +** that if a malloc() fails in sqlite3_create_function(), an error code +** is returned and the mallocFailed flag cleared. +*/ +SQLITE_PRIVATE int sqlite3CreateFunc( + sqlite3 *db, + const char *zFunctionName, + int nArg, + int enc, + void *pUserData, + void (*xSFunc)(sqlite3_context*,int,sqlite3_value **), + void (*xStep)(sqlite3_context*,int,sqlite3_value **), + void (*xFinal)(sqlite3_context*), + FuncDestructor *pDestructor +){ + FuncDef *p; + int nName; + int extraFlags; + + assert( sqlite3_mutex_held(db->mutex) ); + if( zFunctionName==0 || + (xSFunc && (xFinal || xStep)) || + (!xSFunc && (xFinal && !xStep)) || + (!xSFunc && (!xFinal && xStep)) || + (nArg<-1 || nArg>SQLITE_MAX_FUNCTION_ARG) || + (255<(nName = sqlite3Strlen30( zFunctionName))) ){ + return SQLITE_MISUSE_BKPT; + } + + assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC ); + extraFlags = enc & SQLITE_DETERMINISTIC; + enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY); + +#ifndef SQLITE_OMIT_UTF16 + /* If SQLITE_UTF16 is specified as the encoding type, transform this + ** to one of SQLITE_UTF16LE or SQLITE_UTF16BE using the + ** SQLITE_UTF16NATIVE macro. SQLITE_UTF16 is not used internally. + ** + ** If SQLITE_ANY is specified, add three versions of the function + ** to the hash table. + */ + if( enc==SQLITE_UTF16 ){ + enc = SQLITE_UTF16NATIVE; + }else if( enc==SQLITE_ANY ){ + int rc; + rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF8|extraFlags, + pUserData, xSFunc, xStep, xFinal, pDestructor); + if( rc==SQLITE_OK ){ + rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF16LE|extraFlags, + pUserData, xSFunc, xStep, xFinal, pDestructor); + } + if( rc!=SQLITE_OK ){ + return rc; + } + enc = SQLITE_UTF16BE; + } +#else + enc = SQLITE_UTF8; +#endif + + /* Check if an existing function is being overridden or deleted. If so, + ** and there are active VMs, then return SQLITE_BUSY. If a function + ** is being overridden/deleted but there are no active VMs, allow the + ** operation to continue but invalidate all precompiled statements. + */ + p = sqlite3FindFunction(db, zFunctionName, nArg, (u8)enc, 0); + if( p && (p->funcFlags & SQLITE_FUNC_ENCMASK)==enc && p->nArg==nArg ){ + if( db->nVdbeActive ){ + sqlite3ErrorWithMsg(db, SQLITE_BUSY, + "unable to delete/modify user-function due to active statements"); + assert( !db->mallocFailed ); + return SQLITE_BUSY; + }else{ + sqlite3ExpirePreparedStatements(db); + } + } + + p = sqlite3FindFunction(db, zFunctionName, nArg, (u8)enc, 1); + assert(p || db->mallocFailed); + if( !p ){ + return SQLITE_NOMEM_BKPT; + } + + /* If an older version of the function with a configured destructor is + ** being replaced invoke the destructor function here. */ + functionDestroy(db, p); + + if( pDestructor ){ + pDestructor->nRef++; + } + p->u.pDestructor = pDestructor; + p->funcFlags = (p->funcFlags & SQLITE_FUNC_ENCMASK) | extraFlags; + testcase( p->funcFlags & SQLITE_DETERMINISTIC ); + p->xSFunc = xSFunc ? xSFunc : xStep; + p->xFinalize = xFinal; + p->pUserData = pUserData; + p->nArg = (u16)nArg; + return SQLITE_OK; +} + +/* +** Create new user functions. +*/ +SQLITE_API int sqlite3_create_function( + sqlite3 *db, + const char *zFunc, + int nArg, + int enc, + void *p, + void (*xSFunc)(sqlite3_context*,int,sqlite3_value **), + void (*xStep)(sqlite3_context*,int,sqlite3_value **), + void (*xFinal)(sqlite3_context*) +){ + return sqlite3_create_function_v2(db, zFunc, nArg, enc, p, xSFunc, xStep, + xFinal, 0); +} + +SQLITE_API int sqlite3_create_function_v2( + sqlite3 *db, + const char *zFunc, + int nArg, + int enc, + void *p, + void (*xSFunc)(sqlite3_context*,int,sqlite3_value **), + void (*xStep)(sqlite3_context*,int,sqlite3_value **), + void (*xFinal)(sqlite3_context*), + void (*xDestroy)(void *) +){ + int rc = SQLITE_ERROR; + FuncDestructor *pArg = 0; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + return SQLITE_MISUSE_BKPT; + } +#endif + sqlite3_mutex_enter(db->mutex); + if( xDestroy ){ + pArg = (FuncDestructor *)sqlite3DbMallocZero(db, sizeof(FuncDestructor)); + if( !pArg ){ + xDestroy(p); + goto out; + } + pArg->xDestroy = xDestroy; + pArg->pUserData = p; + } + rc = sqlite3CreateFunc(db, zFunc, nArg, enc, p, xSFunc, xStep, xFinal, pArg); + if( pArg && pArg->nRef==0 ){ + assert( rc!=SQLITE_OK ); + xDestroy(p); + sqlite3DbFree(db, pArg); + } + + out: + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +#ifndef SQLITE_OMIT_UTF16 +SQLITE_API int sqlite3_create_function16( + sqlite3 *db, + const void *zFunctionName, + int nArg, + int eTextRep, + void *p, + void (*xSFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*) +){ + int rc; + char *zFunc8; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) || zFunctionName==0 ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); + zFunc8 = sqlite3Utf16to8(db, zFunctionName, -1, SQLITE_UTF16NATIVE); + rc = sqlite3CreateFunc(db, zFunc8, nArg, eTextRep, p, xSFunc,xStep,xFinal,0); + sqlite3DbFree(db, zFunc8); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} +#endif + + +/* +** Declare that a function has been overloaded by a virtual table. +** +** If the function already exists as a regular global function, then +** this routine is a no-op. If the function does not exist, then create +** a new one that always throws a run-time error. +** +** When virtual tables intend to provide an overloaded function, they +** should call this routine to make sure the global function exists. +** A global function must exist in order for name resolution to work +** properly. +*/ +SQLITE_API int sqlite3_overload_function( + sqlite3 *db, + const char *zName, + int nArg +){ + int rc = SQLITE_OK; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) || zName==0 || nArg<-2 ){ + return SQLITE_MISUSE_BKPT; + } +#endif + sqlite3_mutex_enter(db->mutex); + if( sqlite3FindFunction(db, zName, nArg, SQLITE_UTF8, 0)==0 ){ + rc = sqlite3CreateFunc(db, zName, nArg, SQLITE_UTF8, + 0, sqlite3InvalidFunction, 0, 0, 0); + } + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +#ifndef SQLITE_OMIT_TRACE +/* +** Register a trace function. The pArg from the previously registered trace +** is returned. +** +** A NULL trace function means that no tracing is executes. A non-NULL +** trace is a pointer to a function that is invoked at the start of each +** SQL statement. +*/ +#ifndef SQLITE_OMIT_DEPRECATED +SQLITE_API void *sqlite3_trace(sqlite3 *db, void(*xTrace)(void*,const char*), void *pArg){ + void *pOld; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + sqlite3_mutex_enter(db->mutex); + pOld = db->pTraceArg; + db->mTrace = xTrace ? SQLITE_TRACE_LEGACY : 0; + db->xTrace = (int(*)(u32,void*,void*,void*))xTrace; + db->pTraceArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pOld; +} +#endif /* SQLITE_OMIT_DEPRECATED */ + +/* Register a trace callback using the version-2 interface. +*/ +SQLITE_API int sqlite3_trace_v2( + sqlite3 *db, /* Trace this connection */ + unsigned mTrace, /* Mask of events to be traced */ + int(*xTrace)(unsigned,void*,void*,void*), /* Callback to invoke */ + void *pArg /* Context */ +){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + return SQLITE_MISUSE_BKPT; + } +#endif + sqlite3_mutex_enter(db->mutex); + if( mTrace==0 ) xTrace = 0; + if( xTrace==0 ) mTrace = 0; + db->mTrace = mTrace; + db->xTrace = xTrace; + db->pTraceArg = pArg; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +#ifndef SQLITE_OMIT_DEPRECATED +/* +** Register a profile function. The pArg from the previously registered +** profile function is returned. +** +** A NULL profile function means that no profiling is executes. A non-NULL +** profile is a pointer to a function that is invoked at the conclusion of +** each SQL statement that is run. +*/ +SQLITE_API void *sqlite3_profile( + sqlite3 *db, + void (*xProfile)(void*,const char*,sqlite_uint64), + void *pArg +){ + void *pOld; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + sqlite3_mutex_enter(db->mutex); + pOld = db->pProfileArg; + db->xProfile = xProfile; + db->pProfileArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pOld; +} +#endif /* SQLITE_OMIT_DEPRECATED */ +#endif /* SQLITE_OMIT_TRACE */ + +/* +** Register a function to be invoked when a transaction commits. +** If the invoked function returns non-zero, then the commit becomes a +** rollback. +*/ +SQLITE_API void *sqlite3_commit_hook( + sqlite3 *db, /* Attach the hook to this database */ + int (*xCallback)(void*), /* Function to invoke on each commit */ + void *pArg /* Argument to the function */ +){ + void *pOld; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + sqlite3_mutex_enter(db->mutex); + pOld = db->pCommitArg; + db->xCommitCallback = xCallback; + db->pCommitArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pOld; +} + +/* +** Register a callback to be invoked each time a row is updated, +** inserted or deleted using this database connection. +*/ +SQLITE_API void *sqlite3_update_hook( + sqlite3 *db, /* Attach the hook to this database */ + void (*xCallback)(void*,int,char const *,char const *,sqlite_int64), + void *pArg /* Argument to the function */ +){ + void *pRet; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + sqlite3_mutex_enter(db->mutex); + pRet = db->pUpdateArg; + db->xUpdateCallback = xCallback; + db->pUpdateArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pRet; +} + +/* +** Register a callback to be invoked each time a transaction is rolled +** back by this database connection. +*/ +SQLITE_API void *sqlite3_rollback_hook( + sqlite3 *db, /* Attach the hook to this database */ + void (*xCallback)(void*), /* Callback function */ + void *pArg /* Argument to the function */ +){ + void *pRet; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + sqlite3_mutex_enter(db->mutex); + pRet = db->pRollbackArg; + db->xRollbackCallback = xCallback; + db->pRollbackArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pRet; +} + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** Register a callback to be invoked each time a row is updated, +** inserted or deleted using this database connection. +*/ +SQLITE_API void *sqlite3_preupdate_hook( + sqlite3 *db, /* Attach the hook to this database */ + void(*xCallback)( /* Callback function */ + void*,sqlite3*,int,char const*,char const*,sqlite3_int64,sqlite3_int64), + void *pArg /* First callback argument */ +){ + void *pRet; + sqlite3_mutex_enter(db->mutex); + pRet = db->pPreUpdateArg; + db->xPreUpdateCallback = xCallback; + db->pPreUpdateArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pRet; +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + +#ifndef SQLITE_OMIT_WAL +/* +** The sqlite3_wal_hook() callback registered by sqlite3_wal_autocheckpoint(). +** Invoke sqlite3_wal_checkpoint if the number of frames in the log file +** is greater than sqlite3.pWalArg cast to an integer (the value configured by +** wal_autocheckpoint()). +*/ +SQLITE_PRIVATE int sqlite3WalDefaultHook( + void *pClientData, /* Argument */ + sqlite3 *db, /* Connection */ + const char *zDb, /* Database */ + int nFrame /* Size of WAL */ +){ + if( nFrame>=SQLITE_PTR_TO_INT(pClientData) ){ + sqlite3BeginBenignMalloc(); + sqlite3_wal_checkpoint(db, zDb); + sqlite3EndBenignMalloc(); + } + return SQLITE_OK; +} +#endif /* SQLITE_OMIT_WAL */ + +/* +** Configure an sqlite3_wal_hook() callback to automatically checkpoint +** a database after committing a transaction if there are nFrame or +** more frames in the log file. Passing zero or a negative value as the +** nFrame parameter disables automatic checkpoints entirely. +** +** The callback registered by this function replaces any existing callback +** registered using sqlite3_wal_hook(). Likewise, registering a callback +** using sqlite3_wal_hook() disables the automatic checkpoint mechanism +** configured by this function. +*/ +SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int nFrame){ +#ifdef SQLITE_OMIT_WAL + UNUSED_PARAMETER(db); + UNUSED_PARAMETER(nFrame); +#else +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + if( nFrame>0 ){ + sqlite3_wal_hook(db, sqlite3WalDefaultHook, SQLITE_INT_TO_PTR(nFrame)); + }else{ + sqlite3_wal_hook(db, 0, 0); + } +#endif + return SQLITE_OK; +} + +/* +** Register a callback to be invoked each time a transaction is written +** into the write-ahead-log by this database connection. +*/ +SQLITE_API void *sqlite3_wal_hook( + sqlite3 *db, /* Attach the hook to this db handle */ + int(*xCallback)(void *, sqlite3*, const char*, int), + void *pArg /* First argument passed to xCallback() */ +){ +#ifndef SQLITE_OMIT_WAL + void *pRet; +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + sqlite3_mutex_enter(db->mutex); + pRet = db->pWalArg; + db->xWalCallback = xCallback; + db->pWalArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pRet; +#else + return 0; +#endif +} + +/* +** Checkpoint database zDb. +*/ +SQLITE_API int sqlite3_wal_checkpoint_v2( + sqlite3 *db, /* Database handle */ + const char *zDb, /* Name of attached database (or NULL) */ + int eMode, /* SQLITE_CHECKPOINT_* value */ + int *pnLog, /* OUT: Size of WAL log in frames */ + int *pnCkpt /* OUT: Total number of frames checkpointed */ +){ +#ifdef SQLITE_OMIT_WAL + return SQLITE_OK; +#else + int rc; /* Return code */ + int iDb = SQLITE_MAX_ATTACHED; /* sqlite3.aDb[] index of db to checkpoint */ + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + + /* Initialize the output variables to -1 in case an error occurs. */ + if( pnLog ) *pnLog = -1; + if( pnCkpt ) *pnCkpt = -1; + + assert( SQLITE_CHECKPOINT_PASSIVE==0 ); + assert( SQLITE_CHECKPOINT_FULL==1 ); + assert( SQLITE_CHECKPOINT_RESTART==2 ); + assert( SQLITE_CHECKPOINT_TRUNCATE==3 ); + if( eModeSQLITE_CHECKPOINT_TRUNCATE ){ + /* EVIDENCE-OF: R-03996-12088 The M parameter must be a valid checkpoint + ** mode: */ + return SQLITE_MISUSE; + } + + sqlite3_mutex_enter(db->mutex); + if( zDb && zDb[0] ){ + iDb = sqlite3FindDbName(db, zDb); + } + if( iDb<0 ){ + rc = SQLITE_ERROR; + sqlite3ErrorWithMsg(db, SQLITE_ERROR, "unknown database: %s", zDb); + }else{ + db->busyHandler.nBusy = 0; + rc = sqlite3Checkpoint(db, iDb, eMode, pnLog, pnCkpt); + sqlite3Error(db, rc); + } + rc = sqlite3ApiExit(db, rc); + + /* If there are no active statements, clear the interrupt flag at this + ** point. */ + if( db->nVdbeActive==0 ){ + db->u1.isInterrupted = 0; + } + + sqlite3_mutex_leave(db->mutex); + return rc; +#endif +} + + +/* +** Checkpoint database zDb. If zDb is NULL, or if the buffer zDb points +** to contains a zero-length string, all attached databases are +** checkpointed. +*/ +SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb){ + /* EVIDENCE-OF: R-41613-20553 The sqlite3_wal_checkpoint(D,X) is equivalent to + ** sqlite3_wal_checkpoint_v2(D,X,SQLITE_CHECKPOINT_PASSIVE,0,0). */ + return sqlite3_wal_checkpoint_v2(db,zDb,SQLITE_CHECKPOINT_PASSIVE,0,0); +} + +#ifndef SQLITE_OMIT_WAL +/* +** Run a checkpoint on database iDb. This is a no-op if database iDb is +** not currently open in WAL mode. +** +** If a transaction is open on the database being checkpointed, this +** function returns SQLITE_LOCKED and a checkpoint is not attempted. If +** an error occurs while running the checkpoint, an SQLite error code is +** returned (i.e. SQLITE_IOERR). Otherwise, SQLITE_OK. +** +** The mutex on database handle db should be held by the caller. The mutex +** associated with the specific b-tree being checkpointed is taken by +** this function while the checkpoint is running. +** +** If iDb is passed SQLITE_MAX_ATTACHED, then all attached databases are +** checkpointed. If an error is encountered it is returned immediately - +** no attempt is made to checkpoint any remaining databases. +** +** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART. +*/ +SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3 *db, int iDb, int eMode, int *pnLog, int *pnCkpt){ + int rc = SQLITE_OK; /* Return code */ + int i; /* Used to iterate through attached dbs */ + int bBusy = 0; /* True if SQLITE_BUSY has been encountered */ + + assert( sqlite3_mutex_held(db->mutex) ); + assert( !pnLog || *pnLog==-1 ); + assert( !pnCkpt || *pnCkpt==-1 ); + + for(i=0; inDb && rc==SQLITE_OK; i++){ + if( i==iDb || iDb==SQLITE_MAX_ATTACHED ){ + rc = sqlite3BtreeCheckpoint(db->aDb[i].pBt, eMode, pnLog, pnCkpt); + pnLog = 0; + pnCkpt = 0; + if( rc==SQLITE_BUSY ){ + bBusy = 1; + rc = SQLITE_OK; + } + } + } + + return (rc==SQLITE_OK && bBusy) ? SQLITE_BUSY : rc; +} +#endif /* SQLITE_OMIT_WAL */ + +/* +** This function returns true if main-memory should be used instead of +** a temporary file for transient pager files and statement journals. +** The value returned depends on the value of db->temp_store (runtime +** parameter) and the compile time value of SQLITE_TEMP_STORE. The +** following table describes the relationship between these two values +** and this functions return value. +** +** SQLITE_TEMP_STORE db->temp_store Location of temporary database +** ----------------- -------------- ------------------------------ +** 0 any file (return 0) +** 1 1 file (return 0) +** 1 2 memory (return 1) +** 1 0 file (return 0) +** 2 1 file (return 0) +** 2 2 memory (return 1) +** 2 0 memory (return 1) +** 3 any memory (return 1) +*/ +SQLITE_PRIVATE int sqlite3TempInMemory(const sqlite3 *db){ +#if SQLITE_TEMP_STORE==1 + return ( db->temp_store==2 ); +#endif +#if SQLITE_TEMP_STORE==2 + return ( db->temp_store!=1 ); +#endif +#if SQLITE_TEMP_STORE==3 + UNUSED_PARAMETER(db); + return 1; +#endif +#if SQLITE_TEMP_STORE<1 || SQLITE_TEMP_STORE>3 + UNUSED_PARAMETER(db); + return 0; +#endif +} + +/* +** Return UTF-8 encoded English language explanation of the most recent +** error. +*/ +SQLITE_API const char *sqlite3_errmsg(sqlite3 *db){ + const char *z; + if( !db ){ + return sqlite3ErrStr(SQLITE_NOMEM_BKPT); + } + if( !sqlite3SafetyCheckSickOrOk(db) ){ + return sqlite3ErrStr(SQLITE_MISUSE_BKPT); + } + sqlite3_mutex_enter(db->mutex); + if( db->mallocFailed ){ + z = sqlite3ErrStr(SQLITE_NOMEM_BKPT); + }else{ + testcase( db->pErr==0 ); + z = (char*)sqlite3_value_text(db->pErr); + assert( !db->mallocFailed ); + if( z==0 ){ + z = sqlite3ErrStr(db->errCode); + } + } + sqlite3_mutex_leave(db->mutex); + return z; +} + +#ifndef SQLITE_OMIT_UTF16 +/* +** Return UTF-16 encoded English language explanation of the most recent +** error. +*/ +SQLITE_API const void *sqlite3_errmsg16(sqlite3 *db){ + static const u16 outOfMem[] = { + 'o', 'u', 't', ' ', 'o', 'f', ' ', 'm', 'e', 'm', 'o', 'r', 'y', 0 + }; + static const u16 misuse[] = { + 'l', 'i', 'b', 'r', 'a', 'r', 'y', ' ', + 'r', 'o', 'u', 't', 'i', 'n', 'e', ' ', + 'c', 'a', 'l', 'l', 'e', 'd', ' ', + 'o', 'u', 't', ' ', + 'o', 'f', ' ', + 's', 'e', 'q', 'u', 'e', 'n', 'c', 'e', 0 + }; + + const void *z; + if( !db ){ + return (void *)outOfMem; + } + if( !sqlite3SafetyCheckSickOrOk(db) ){ + return (void *)misuse; + } + sqlite3_mutex_enter(db->mutex); + if( db->mallocFailed ){ + z = (void *)outOfMem; + }else{ + z = sqlite3_value_text16(db->pErr); + if( z==0 ){ + sqlite3ErrorWithMsg(db, db->errCode, sqlite3ErrStr(db->errCode)); + z = sqlite3_value_text16(db->pErr); + } + /* A malloc() may have failed within the call to sqlite3_value_text16() + ** above. If this is the case, then the db->mallocFailed flag needs to + ** be cleared before returning. Do this directly, instead of via + ** sqlite3ApiExit(), to avoid setting the database handle error message. + */ + sqlite3OomClear(db); + } + sqlite3_mutex_leave(db->mutex); + return z; +} +#endif /* SQLITE_OMIT_UTF16 */ + +/* +** Return the most recent error code generated by an SQLite routine. If NULL is +** passed to this function, we assume a malloc() failed during sqlite3_open(). +*/ +SQLITE_API int sqlite3_errcode(sqlite3 *db){ + if( db && !sqlite3SafetyCheckSickOrOk(db) ){ + return SQLITE_MISUSE_BKPT; + } + if( !db || db->mallocFailed ){ + return SQLITE_NOMEM_BKPT; + } + return db->errCode & db->errMask; +} +SQLITE_API int sqlite3_extended_errcode(sqlite3 *db){ + if( db && !sqlite3SafetyCheckSickOrOk(db) ){ + return SQLITE_MISUSE_BKPT; + } + if( !db || db->mallocFailed ){ + return SQLITE_NOMEM_BKPT; + } + return db->errCode; +} +SQLITE_API int sqlite3_system_errno(sqlite3 *db){ + return db ? db->iSysErrno : 0; +} + +/* +** Return a string that describes the kind of error specified in the +** argument. For now, this simply calls the internal sqlite3ErrStr() +** function. +*/ +SQLITE_API const char *sqlite3_errstr(int rc){ + return sqlite3ErrStr(rc); +} + +/* +** Create a new collating function for database "db". The name is zName +** and the encoding is enc. +*/ +static int createCollation( + sqlite3* db, + const char *zName, + u8 enc, + void* pCtx, + int(*xCompare)(void*,int,const void*,int,const void*), + void(*xDel)(void*) +){ + CollSeq *pColl; + int enc2; + + assert( sqlite3_mutex_held(db->mutex) ); + + /* If SQLITE_UTF16 is specified as the encoding type, transform this + ** to one of SQLITE_UTF16LE or SQLITE_UTF16BE using the + ** SQLITE_UTF16NATIVE macro. SQLITE_UTF16 is not used internally. + */ + enc2 = enc; + testcase( enc2==SQLITE_UTF16 ); + testcase( enc2==SQLITE_UTF16_ALIGNED ); + if( enc2==SQLITE_UTF16 || enc2==SQLITE_UTF16_ALIGNED ){ + enc2 = SQLITE_UTF16NATIVE; + } + if( enc2SQLITE_UTF16BE ){ + return SQLITE_MISUSE_BKPT; + } + + /* Check if this call is removing or replacing an existing collation + ** sequence. If so, and there are active VMs, return busy. If there + ** are no active VMs, invalidate any pre-compiled statements. + */ + pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, 0); + if( pColl && pColl->xCmp ){ + if( db->nVdbeActive ){ + sqlite3ErrorWithMsg(db, SQLITE_BUSY, + "unable to delete/modify collation sequence due to active statements"); + return SQLITE_BUSY; + } + sqlite3ExpirePreparedStatements(db); + + /* If collation sequence pColl was created directly by a call to + ** sqlite3_create_collation, and not generated by synthCollSeq(), + ** then any copies made by synthCollSeq() need to be invalidated. + ** Also, collation destructor - CollSeq.xDel() - function may need + ** to be called. + */ + if( (pColl->enc & ~SQLITE_UTF16_ALIGNED)==enc2 ){ + CollSeq *aColl = sqlite3HashFind(&db->aCollSeq, zName); + int j; + for(j=0; j<3; j++){ + CollSeq *p = &aColl[j]; + if( p->enc==pColl->enc ){ + if( p->xDel ){ + p->xDel(p->pUser); + } + p->xCmp = 0; + } + } + } + } + + pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, 1); + if( pColl==0 ) return SQLITE_NOMEM_BKPT; + pColl->xCmp = xCompare; + pColl->pUser = pCtx; + pColl->xDel = xDel; + pColl->enc = (u8)(enc2 | (enc & SQLITE_UTF16_ALIGNED)); + sqlite3Error(db, SQLITE_OK); + return SQLITE_OK; +} + + +/* +** This array defines hard upper bounds on limit values. The +** initializer must be kept in sync with the SQLITE_LIMIT_* +** #defines in sqlite3.h. +*/ +static const int aHardLimit[] = { + SQLITE_MAX_LENGTH, + SQLITE_MAX_SQL_LENGTH, + SQLITE_MAX_COLUMN, + SQLITE_MAX_EXPR_DEPTH, + SQLITE_MAX_COMPOUND_SELECT, + SQLITE_MAX_VDBE_OP, + SQLITE_MAX_FUNCTION_ARG, + SQLITE_MAX_ATTACHED, + SQLITE_MAX_LIKE_PATTERN_LENGTH, + SQLITE_MAX_VARIABLE_NUMBER, /* IMP: R-38091-32352 */ + SQLITE_MAX_TRIGGER_DEPTH, + SQLITE_MAX_WORKER_THREADS, +}; + +/* +** Make sure the hard limits are set to reasonable values +*/ +#if SQLITE_MAX_LENGTH<100 +# error SQLITE_MAX_LENGTH must be at least 100 +#endif +#if SQLITE_MAX_SQL_LENGTH<100 +# error SQLITE_MAX_SQL_LENGTH must be at least 100 +#endif +#if SQLITE_MAX_SQL_LENGTH>SQLITE_MAX_LENGTH +# error SQLITE_MAX_SQL_LENGTH must not be greater than SQLITE_MAX_LENGTH +#endif +#if SQLITE_MAX_COMPOUND_SELECT<2 +# error SQLITE_MAX_COMPOUND_SELECT must be at least 2 +#endif +#if SQLITE_MAX_VDBE_OP<40 +# error SQLITE_MAX_VDBE_OP must be at least 40 +#endif +#if SQLITE_MAX_FUNCTION_ARG<0 || SQLITE_MAX_FUNCTION_ARG>127 +# error SQLITE_MAX_FUNCTION_ARG must be between 0 and 127 +#endif +#if SQLITE_MAX_ATTACHED<0 || SQLITE_MAX_ATTACHED>125 +# error SQLITE_MAX_ATTACHED must be between 0 and 125 +#endif +#if SQLITE_MAX_LIKE_PATTERN_LENGTH<1 +# error SQLITE_MAX_LIKE_PATTERN_LENGTH must be at least 1 +#endif +#if SQLITE_MAX_COLUMN>32767 +# error SQLITE_MAX_COLUMN must not exceed 32767 +#endif +#if SQLITE_MAX_TRIGGER_DEPTH<1 +# error SQLITE_MAX_TRIGGER_DEPTH must be at least 1 +#endif +#if SQLITE_MAX_WORKER_THREADS<0 || SQLITE_MAX_WORKER_THREADS>50 +# error SQLITE_MAX_WORKER_THREADS must be between 0 and 50 +#endif + + +/* +** Change the value of a limit. Report the old value. +** If an invalid limit index is supplied, report -1. +** Make no changes but still report the old value if the +** new limit is negative. +** +** A new lower limit does not shrink existing constructs. +** It merely prevents new constructs that exceed the limit +** from forming. +*/ +SQLITE_API int sqlite3_limit(sqlite3 *db, int limitId, int newLimit){ + int oldLimit; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return -1; + } +#endif + + /* EVIDENCE-OF: R-30189-54097 For each limit category SQLITE_LIMIT_NAME + ** there is a hard upper bound set at compile-time by a C preprocessor + ** macro called SQLITE_MAX_NAME. (The "_LIMIT_" in the name is changed to + ** "_MAX_".) + */ + assert( aHardLimit[SQLITE_LIMIT_LENGTH]==SQLITE_MAX_LENGTH ); + assert( aHardLimit[SQLITE_LIMIT_SQL_LENGTH]==SQLITE_MAX_SQL_LENGTH ); + assert( aHardLimit[SQLITE_LIMIT_COLUMN]==SQLITE_MAX_COLUMN ); + assert( aHardLimit[SQLITE_LIMIT_EXPR_DEPTH]==SQLITE_MAX_EXPR_DEPTH ); + assert( aHardLimit[SQLITE_LIMIT_COMPOUND_SELECT]==SQLITE_MAX_COMPOUND_SELECT); + assert( aHardLimit[SQLITE_LIMIT_VDBE_OP]==SQLITE_MAX_VDBE_OP ); + assert( aHardLimit[SQLITE_LIMIT_FUNCTION_ARG]==SQLITE_MAX_FUNCTION_ARG ); + assert( aHardLimit[SQLITE_LIMIT_ATTACHED]==SQLITE_MAX_ATTACHED ); + assert( aHardLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]== + SQLITE_MAX_LIKE_PATTERN_LENGTH ); + assert( aHardLimit[SQLITE_LIMIT_VARIABLE_NUMBER]==SQLITE_MAX_VARIABLE_NUMBER); + assert( aHardLimit[SQLITE_LIMIT_TRIGGER_DEPTH]==SQLITE_MAX_TRIGGER_DEPTH ); + assert( aHardLimit[SQLITE_LIMIT_WORKER_THREADS]==SQLITE_MAX_WORKER_THREADS ); + assert( SQLITE_LIMIT_WORKER_THREADS==(SQLITE_N_LIMIT-1) ); + + + if( limitId<0 || limitId>=SQLITE_N_LIMIT ){ + return -1; + } + oldLimit = db->aLimit[limitId]; + if( newLimit>=0 ){ /* IMP: R-52476-28732 */ + if( newLimit>aHardLimit[limitId] ){ + newLimit = aHardLimit[limitId]; /* IMP: R-51463-25634 */ + } + db->aLimit[limitId] = newLimit; + } + return oldLimit; /* IMP: R-53341-35419 */ +} + +/* +** This function is used to parse both URIs and non-URI filenames passed by the +** user to API functions sqlite3_open() or sqlite3_open_v2(), and for database +** URIs specified as part of ATTACH statements. +** +** The first argument to this function is the name of the VFS to use (or +** a NULL to signify the default VFS) if the URI does not contain a "vfs=xxx" +** query parameter. The second argument contains the URI (or non-URI filename) +** itself. When this function is called the *pFlags variable should contain +** the default flags to open the database handle with. The value stored in +** *pFlags may be updated before returning if the URI filename contains +** "cache=xxx" or "mode=xxx" query parameters. +** +** If successful, SQLITE_OK is returned. In this case *ppVfs is set to point to +** the VFS that should be used to open the database file. *pzFile is set to +** point to a buffer containing the name of the file to open. It is the +** responsibility of the caller to eventually call sqlite3_free() to release +** this buffer. +** +** If an error occurs, then an SQLite error code is returned and *pzErrMsg +** may be set to point to a buffer containing an English language error +** message. It is the responsibility of the caller to eventually release +** this buffer by calling sqlite3_free(). +*/ +SQLITE_PRIVATE int sqlite3ParseUri( + const char *zDefaultVfs, /* VFS to use if no "vfs=xxx" query option */ + const char *zUri, /* Nul-terminated URI to parse */ + unsigned int *pFlags, /* IN/OUT: SQLITE_OPEN_XXX flags */ + sqlite3_vfs **ppVfs, /* OUT: VFS to use */ + char **pzFile, /* OUT: Filename component of URI */ + char **pzErrMsg /* OUT: Error message (if rc!=SQLITE_OK) */ +){ + int rc = SQLITE_OK; + unsigned int flags = *pFlags; + const char *zVfs = zDefaultVfs; + char *zFile; + char c; + int nUri = sqlite3Strlen30(zUri); + + assert( *pzErrMsg==0 ); + + if( ((flags & SQLITE_OPEN_URI) /* IMP: R-48725-32206 */ + || sqlite3GlobalConfig.bOpenUri) /* IMP: R-51689-46548 */ + && nUri>=5 && memcmp(zUri, "file:", 5)==0 /* IMP: R-57884-37496 */ + ){ + char *zOpt; + int eState; /* Parser state when parsing URI */ + int iIn; /* Input character index */ + int iOut = 0; /* Output character index */ + u64 nByte = nUri+2; /* Bytes of space to allocate */ + + /* Make sure the SQLITE_OPEN_URI flag is set to indicate to the VFS xOpen + ** method that there may be extra parameters following the file-name. */ + flags |= SQLITE_OPEN_URI; + + for(iIn=0; iIn=0 && octet<256 ); + if( octet==0 ){ +#ifndef SQLITE_ENABLE_URI_00_ERROR + /* This branch is taken when "%00" appears within the URI. In this + ** case we ignore all text in the remainder of the path, name or + ** value currently being parsed. So ignore the current character + ** and skip to the next "?", "=" or "&", as appropriate. */ + while( (c = zUri[iIn])!=0 && c!='#' + && (eState!=0 || c!='?') + && (eState!=1 || (c!='=' && c!='&')) + && (eState!=2 || c!='&') + ){ + iIn++; + } + continue; +#else + /* If ENABLE_URI_00_ERROR is defined, "%00" in a URI is an error. */ + *pzErrMsg = sqlite3_mprintf("unexpected %%00 in uri"); + rc = SQLITE_ERROR; + goto parse_uri_out; +#endif + } + c = octet; + }else if( eState==1 && (c=='&' || c=='=') ){ + if( zFile[iOut-1]==0 ){ + /* An empty option name. Ignore this option altogether. */ + while( zUri[iIn] && zUri[iIn]!='#' && zUri[iIn-1]!='&' ) iIn++; + continue; + } + if( c=='&' ){ + zFile[iOut++] = '\0'; + }else{ + eState = 2; + } + c = 0; + }else if( (eState==0 && c=='?') || (eState==2 && c=='&') ){ + c = 0; + eState = 1; + } + zFile[iOut++] = c; + } + if( eState==1 ) zFile[iOut++] = '\0'; + zFile[iOut++] = '\0'; + zFile[iOut++] = '\0'; + + /* Check if there were any options specified that should be interpreted + ** here. Options that are interpreted here include "vfs" and those that + ** correspond to flags that may be passed to the sqlite3_open_v2() + ** method. */ + zOpt = &zFile[sqlite3Strlen30(zFile)+1]; + while( zOpt[0] ){ + int nOpt = sqlite3Strlen30(zOpt); + char *zVal = &zOpt[nOpt+1]; + int nVal = sqlite3Strlen30(zVal); + + if( nOpt==3 && memcmp("vfs", zOpt, 3)==0 ){ + zVfs = zVal; + }else{ + struct OpenMode { + const char *z; + int mode; + } *aMode = 0; + char *zModeType = 0; + int mask = 0; + int limit = 0; + + if( nOpt==5 && memcmp("cache", zOpt, 5)==0 ){ + static struct OpenMode aCacheMode[] = { + { "shared", SQLITE_OPEN_SHAREDCACHE }, + { "private", SQLITE_OPEN_PRIVATECACHE }, + { 0, 0 } + }; + + mask = SQLITE_OPEN_SHAREDCACHE|SQLITE_OPEN_PRIVATECACHE; + aMode = aCacheMode; + limit = mask; + zModeType = "cache"; + } + if( nOpt==4 && memcmp("mode", zOpt, 4)==0 ){ + static struct OpenMode aOpenMode[] = { + { "ro", SQLITE_OPEN_READONLY }, + { "rw", SQLITE_OPEN_READWRITE }, + { "rwc", SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE }, + { "memory", SQLITE_OPEN_MEMORY }, + { 0, 0 } + }; + + mask = SQLITE_OPEN_READONLY | SQLITE_OPEN_READWRITE + | SQLITE_OPEN_CREATE | SQLITE_OPEN_MEMORY; + aMode = aOpenMode; + limit = mask & flags; + zModeType = "access"; + } + + if( aMode ){ + int i; + int mode = 0; + for(i=0; aMode[i].z; i++){ + const char *z = aMode[i].z; + if( nVal==sqlite3Strlen30(z) && 0==memcmp(zVal, z, nVal) ){ + mode = aMode[i].mode; + break; + } + } + if( mode==0 ){ + *pzErrMsg = sqlite3_mprintf("no such %s mode: %s", zModeType, zVal); + rc = SQLITE_ERROR; + goto parse_uri_out; + } + if( (mode & ~SQLITE_OPEN_MEMORY)>limit ){ + *pzErrMsg = sqlite3_mprintf("%s mode not allowed: %s", + zModeType, zVal); + rc = SQLITE_PERM; + goto parse_uri_out; + } + flags = (flags & ~mask) | mode; + } + } + + zOpt = &zVal[nVal+1]; + } + + }else{ + zFile = sqlite3_malloc64(nUri+2); + if( !zFile ) return SQLITE_NOMEM_BKPT; + if( nUri ){ + memcpy(zFile, zUri, nUri); + } + zFile[nUri] = '\0'; + zFile[nUri+1] = '\0'; + flags &= ~SQLITE_OPEN_URI; + } + + *ppVfs = sqlite3_vfs_find(zVfs); + if( *ppVfs==0 ){ + *pzErrMsg = sqlite3_mprintf("no such vfs: %s", zVfs); + rc = SQLITE_ERROR; + } + parse_uri_out: + if( rc!=SQLITE_OK ){ + sqlite3_free(zFile); + zFile = 0; + } + *pFlags = flags; + *pzFile = zFile; + return rc; +} + + +/* +** This routine does the work of opening a database on behalf of +** sqlite3_open() and sqlite3_open16(). The database filename "zFilename" +** is UTF-8 encoded. +*/ +static int openDatabase( + const char *zFilename, /* Database filename UTF-8 encoded */ + sqlite3 **ppDb, /* OUT: Returned database handle */ + unsigned int flags, /* Operational flags */ + const char *zVfs /* Name of the VFS to use */ +){ + sqlite3 *db; /* Store allocated handle here */ + int rc; /* Return code */ + int isThreadsafe; /* True for threadsafe connections */ + char *zOpen = 0; /* Filename argument to pass to BtreeOpen() */ + char *zErrMsg = 0; /* Error message from sqlite3ParseUri() */ + +#ifdef SQLITE_ENABLE_API_ARMOR + if( ppDb==0 ) return SQLITE_MISUSE_BKPT; +#endif + *ppDb = 0; +#ifndef SQLITE_OMIT_AUTOINIT + rc = sqlite3_initialize(); + if( rc ) return rc; +#endif + + /* Only allow sensible combinations of bits in the flags argument. + ** Throw an error if any non-sense combination is used. If we + ** do not block illegal combinations here, it could trigger + ** assert() statements in deeper layers. Sensible combinations + ** are: + ** + ** 1: SQLITE_OPEN_READONLY + ** 2: SQLITE_OPEN_READWRITE + ** 6: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE + */ + assert( SQLITE_OPEN_READONLY == 0x01 ); + assert( SQLITE_OPEN_READWRITE == 0x02 ); + assert( SQLITE_OPEN_CREATE == 0x04 ); + testcase( (1<<(flags&7))==0x02 ); /* READONLY */ + testcase( (1<<(flags&7))==0x04 ); /* READWRITE */ + testcase( (1<<(flags&7))==0x40 ); /* READWRITE | CREATE */ + if( ((1<<(flags&7)) & 0x46)==0 ){ + return SQLITE_MISUSE_BKPT; /* IMP: R-65497-44594 */ + } + + if( sqlite3GlobalConfig.bCoreMutex==0 ){ + isThreadsafe = 0; + }else if( flags & SQLITE_OPEN_NOMUTEX ){ + isThreadsafe = 0; + }else if( flags & SQLITE_OPEN_FULLMUTEX ){ + isThreadsafe = 1; + }else{ + isThreadsafe = sqlite3GlobalConfig.bFullMutex; + } + if( flags & SQLITE_OPEN_PRIVATECACHE ){ + flags &= ~SQLITE_OPEN_SHAREDCACHE; + }else if( sqlite3GlobalConfig.sharedCacheEnabled ){ + flags |= SQLITE_OPEN_SHAREDCACHE; + } + + /* Remove harmful bits from the flags parameter + ** + ** The SQLITE_OPEN_NOMUTEX and SQLITE_OPEN_FULLMUTEX flags were + ** dealt with in the previous code block. Besides these, the only + ** valid input flags for sqlite3_open_v2() are SQLITE_OPEN_READONLY, + ** SQLITE_OPEN_READWRITE, SQLITE_OPEN_CREATE, SQLITE_OPEN_SHAREDCACHE, + ** SQLITE_OPEN_PRIVATECACHE, and some reserved bits. Silently mask + ** off all other flags. + */ + flags &= ~( SQLITE_OPEN_DELETEONCLOSE | + SQLITE_OPEN_EXCLUSIVE | + SQLITE_OPEN_MAIN_DB | + SQLITE_OPEN_TEMP_DB | + SQLITE_OPEN_TRANSIENT_DB | + SQLITE_OPEN_MAIN_JOURNAL | + SQLITE_OPEN_TEMP_JOURNAL | + SQLITE_OPEN_SUBJOURNAL | + SQLITE_OPEN_MASTER_JOURNAL | + SQLITE_OPEN_NOMUTEX | + SQLITE_OPEN_FULLMUTEX | + SQLITE_OPEN_WAL + ); + + /* Allocate the sqlite data structure */ + db = sqlite3MallocZero( sizeof(sqlite3) ); + if( db==0 ) goto opendb_out; + if( isThreadsafe ){ + db->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_RECURSIVE); + if( db->mutex==0 ){ + sqlite3_free(db); + db = 0; + goto opendb_out; + } + } + sqlite3_mutex_enter(db->mutex); + db->errMask = 0xff; + db->nDb = 2; + db->magic = SQLITE_MAGIC_BUSY; + db->aDb = db->aDbStatic; + + assert( sizeof(db->aLimit)==sizeof(aHardLimit) ); + memcpy(db->aLimit, aHardLimit, sizeof(db->aLimit)); + db->aLimit[SQLITE_LIMIT_WORKER_THREADS] = SQLITE_DEFAULT_WORKER_THREADS; + db->autoCommit = 1; + db->nextAutovac = -1; + db->szMmap = sqlite3GlobalConfig.szMmap; + db->nextPagesize = 0; + db->nMaxSorterMmap = 0x7FFFFFFF; + db->flags |= SQLITE_ShortColNames | SQLITE_EnableTrigger | SQLITE_CacheSpill +#if !defined(SQLITE_DEFAULT_AUTOMATIC_INDEX) || SQLITE_DEFAULT_AUTOMATIC_INDEX + | SQLITE_AutoIndex +#endif +#if SQLITE_DEFAULT_CKPTFULLFSYNC + | SQLITE_CkptFullFSync +#endif +#if SQLITE_DEFAULT_FILE_FORMAT<4 + | SQLITE_LegacyFileFmt +#endif +#ifdef SQLITE_ENABLE_LOAD_EXTENSION + | SQLITE_LoadExtension +#endif +#if SQLITE_DEFAULT_RECURSIVE_TRIGGERS + | SQLITE_RecTriggers +#endif +#if defined(SQLITE_DEFAULT_FOREIGN_KEYS) && SQLITE_DEFAULT_FOREIGN_KEYS + | SQLITE_ForeignKeys +#endif +#if defined(SQLITE_REVERSE_UNORDERED_SELECTS) + | SQLITE_ReverseOrder +#endif +#if defined(SQLITE_ENABLE_OVERSIZE_CELL_CHECK) + | SQLITE_CellSizeCk +#endif +#if defined(SQLITE_ENABLE_FTS3_TOKENIZER) + | SQLITE_Fts3Tokenizer +#endif + ; + sqlite3HashInit(&db->aCollSeq); +#ifndef SQLITE_OMIT_VIRTUALTABLE + sqlite3HashInit(&db->aModule); +#endif + + /* Add the default collation sequence BINARY. BINARY works for both UTF-8 + ** and UTF-16, so add a version for each to avoid any unnecessary + ** conversions. The only error that can occur here is a malloc() failure. + ** + ** EVIDENCE-OF: R-52786-44878 SQLite defines three built-in collating + ** functions: + */ + createCollation(db, sqlite3StrBINARY, SQLITE_UTF8, 0, binCollFunc, 0); + createCollation(db, sqlite3StrBINARY, SQLITE_UTF16BE, 0, binCollFunc, 0); + createCollation(db, sqlite3StrBINARY, SQLITE_UTF16LE, 0, binCollFunc, 0); + createCollation(db, "NOCASE", SQLITE_UTF8, 0, nocaseCollatingFunc, 0); + createCollation(db, "RTRIM", SQLITE_UTF8, (void*)1, binCollFunc, 0); + if( db->mallocFailed ){ + goto opendb_out; + } + /* EVIDENCE-OF: R-08308-17224 The default collating function for all + ** strings is BINARY. + */ + db->pDfltColl = sqlite3FindCollSeq(db, SQLITE_UTF8, sqlite3StrBINARY, 0); + assert( db->pDfltColl!=0 ); + + /* Parse the filename/URI argument. */ + db->openFlags = flags; + rc = sqlite3ParseUri(zVfs, zFilename, &flags, &db->pVfs, &zOpen, &zErrMsg); + if( rc!=SQLITE_OK ){ + if( rc==SQLITE_NOMEM ) sqlite3OomFault(db); + sqlite3ErrorWithMsg(db, rc, zErrMsg ? "%s" : 0, zErrMsg); + sqlite3_free(zErrMsg); + goto opendb_out; + } + + /* Open the backend database driver */ + rc = sqlite3BtreeOpen(db->pVfs, zOpen, db, &db->aDb[0].pBt, 0, + flags | SQLITE_OPEN_MAIN_DB); + if( rc!=SQLITE_OK ){ + if( rc==SQLITE_IOERR_NOMEM ){ + rc = SQLITE_NOMEM_BKPT; + } + sqlite3Error(db, rc); + goto opendb_out; + } + sqlite3BtreeEnter(db->aDb[0].pBt); + db->aDb[0].pSchema = sqlite3SchemaGet(db, db->aDb[0].pBt); + if( !db->mallocFailed ) ENC(db) = SCHEMA_ENC(db); + sqlite3BtreeLeave(db->aDb[0].pBt); + db->aDb[1].pSchema = sqlite3SchemaGet(db, 0); + + /* The default safety_level for the main database is FULL; for the temp + ** database it is OFF. This matches the pager layer defaults. + */ + db->aDb[0].zDbSName = "main"; + db->aDb[0].safety_level = SQLITE_DEFAULT_SYNCHRONOUS+1; + db->aDb[1].zDbSName = "temp"; + db->aDb[1].safety_level = PAGER_SYNCHRONOUS_OFF; + + db->magic = SQLITE_MAGIC_OPEN; + if( db->mallocFailed ){ + goto opendb_out; + } + + /* Register all built-in functions, but do not attempt to read the + ** database schema yet. This is delayed until the first time the database + ** is accessed. + */ + sqlite3Error(db, SQLITE_OK); + sqlite3RegisterPerConnectionBuiltinFunctions(db); + rc = sqlite3_errcode(db); + +#ifdef SQLITE_ENABLE_FTS5 + /* Register any built-in FTS5 module before loading the automatic + ** extensions. This allows automatic extensions to register FTS5 + ** tokenizers and auxiliary functions. */ + if( !db->mallocFailed && rc==SQLITE_OK ){ + rc = sqlite3Fts5Init(db); + } +#endif + + /* Load automatic extensions - extensions that have been registered + ** using the sqlite3_automatic_extension() API. + */ + if( rc==SQLITE_OK ){ + sqlite3AutoLoadExtensions(db); + rc = sqlite3_errcode(db); + if( rc!=SQLITE_OK ){ + goto opendb_out; + } + } + +#ifdef SQLITE_ENABLE_FTS1 + if( !db->mallocFailed ){ + extern int sqlite3Fts1Init(sqlite3*); + rc = sqlite3Fts1Init(db); + } +#endif + +#ifdef SQLITE_ENABLE_FTS2 + if( !db->mallocFailed && rc==SQLITE_OK ){ + extern int sqlite3Fts2Init(sqlite3*); + rc = sqlite3Fts2Init(db); + } +#endif + +#ifdef SQLITE_ENABLE_FTS3 /* automatically defined by SQLITE_ENABLE_FTS4 */ + if( !db->mallocFailed && rc==SQLITE_OK ){ + rc = sqlite3Fts3Init(db); + } +#endif + +#ifdef SQLITE_ENABLE_ICU + if( !db->mallocFailed && rc==SQLITE_OK ){ + rc = sqlite3IcuInit(db); + } +#endif + +#ifdef SQLITE_ENABLE_RTREE + if( !db->mallocFailed && rc==SQLITE_OK){ + rc = sqlite3RtreeInit(db); + } +#endif + +#ifdef SQLITE_ENABLE_DBSTAT_VTAB + if( !db->mallocFailed && rc==SQLITE_OK){ + rc = sqlite3DbstatRegister(db); + } +#endif + +#ifdef SQLITE_ENABLE_JSON1 + if( !db->mallocFailed && rc==SQLITE_OK){ + rc = sqlite3Json1Init(db); + } +#endif + + /* -DSQLITE_DEFAULT_LOCKING_MODE=1 makes EXCLUSIVE the default locking + ** mode. -DSQLITE_DEFAULT_LOCKING_MODE=0 make NORMAL the default locking + ** mode. Doing nothing at all also makes NORMAL the default. + */ +#ifdef SQLITE_DEFAULT_LOCKING_MODE + db->dfltLockMode = SQLITE_DEFAULT_LOCKING_MODE; + sqlite3PagerLockingMode(sqlite3BtreePager(db->aDb[0].pBt), + SQLITE_DEFAULT_LOCKING_MODE); +#endif + + if( rc ) sqlite3Error(db, rc); + + /* Enable the lookaside-malloc subsystem */ + setupLookaside(db, 0, sqlite3GlobalConfig.szLookaside, + sqlite3GlobalConfig.nLookaside); + + sqlite3_wal_autocheckpoint(db, SQLITE_DEFAULT_WAL_AUTOCHECKPOINT); + +opendb_out: + if( db ){ + assert( db->mutex!=0 || isThreadsafe==0 + || sqlite3GlobalConfig.bFullMutex==0 ); + sqlite3_mutex_leave(db->mutex); + } + rc = sqlite3_errcode(db); + assert( db!=0 || rc==SQLITE_NOMEM ); + if( rc==SQLITE_NOMEM ){ + sqlite3_close(db); + db = 0; + }else if( rc!=SQLITE_OK ){ + db->magic = SQLITE_MAGIC_SICK; + } + *ppDb = db; +#ifdef SQLITE_ENABLE_SQLLOG + if( sqlite3GlobalConfig.xSqllog ){ + /* Opening a db handle. Fourth parameter is passed 0. */ + void *pArg = sqlite3GlobalConfig.pSqllogArg; + sqlite3GlobalConfig.xSqllog(pArg, db, zFilename, 0); + } +#endif +#if defined(SQLITE_HAS_CODEC) + if( rc==SQLITE_OK ){ + const char *zHexKey = sqlite3_uri_parameter(zOpen, "hexkey"); + if( zHexKey && zHexKey[0] ){ + u8 iByte; + int i; + char zKey[40]; + for(i=0, iByte=0; imutex); + assert( !db->mallocFailed ); + rc = createCollation(db, zName, (u8)enc, pCtx, xCompare, xDel); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +#ifndef SQLITE_OMIT_UTF16 +/* +** Register a new collation sequence with the database handle db. +*/ +SQLITE_API int sqlite3_create_collation16( + sqlite3* db, + const void *zName, + int enc, + void* pCtx, + int(*xCompare)(void*,int,const void*,int,const void*) +){ + int rc = SQLITE_OK; + char *zName8; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(db->mutex); + assert( !db->mallocFailed ); + zName8 = sqlite3Utf16to8(db, zName, -1, SQLITE_UTF16NATIVE); + if( zName8 ){ + rc = createCollation(db, zName8, (u8)enc, pCtx, xCompare, 0); + sqlite3DbFree(db, zName8); + } + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} +#endif /* SQLITE_OMIT_UTF16 */ + +/* +** Register a collation sequence factory callback with the database handle +** db. Replace any previously installed collation sequence factory. +*/ +SQLITE_API int sqlite3_collation_needed( + sqlite3 *db, + void *pCollNeededArg, + void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*) +){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(db->mutex); + db->xCollNeeded = xCollNeeded; + db->xCollNeeded16 = 0; + db->pCollNeededArg = pCollNeededArg; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +#ifndef SQLITE_OMIT_UTF16 +/* +** Register a collation sequence factory callback with the database handle +** db. Replace any previously installed collation sequence factory. +*/ +SQLITE_API int sqlite3_collation_needed16( + sqlite3 *db, + void *pCollNeededArg, + void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*) +){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(db->mutex); + db->xCollNeeded = 0; + db->xCollNeeded16 = xCollNeeded16; + db->pCollNeededArg = pCollNeededArg; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} +#endif /* SQLITE_OMIT_UTF16 */ + +#ifndef SQLITE_OMIT_DEPRECATED +/* +** This function is now an anachronism. It used to be used to recover from a +** malloc() failure, but SQLite now does this automatically. +*/ +SQLITE_API int sqlite3_global_recover(void){ + return SQLITE_OK; +} +#endif + +/* +** Test to see whether or not the database connection is in autocommit +** mode. Return TRUE if it is and FALSE if not. Autocommit mode is on +** by default. Autocommit is disabled by a BEGIN statement and reenabled +** by the next COMMIT or ROLLBACK. +*/ +SQLITE_API int sqlite3_get_autocommit(sqlite3 *db){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + return db->autoCommit; +} + +/* +** The following routines are substitutes for constants SQLITE_CORRUPT, +** SQLITE_MISUSE, SQLITE_CANTOPEN, SQLITE_NOMEM and possibly other error +** constants. They serve two purposes: +** +** 1. Serve as a convenient place to set a breakpoint in a debugger +** to detect when version error conditions occurs. +** +** 2. Invoke sqlite3_log() to provide the source code location where +** a low-level error is first detected. +*/ +static int reportError(int iErr, int lineno, const char *zType){ + sqlite3_log(iErr, "%s at line %d of [%.10s]", + zType, lineno, 20+sqlite3_sourceid()); + return iErr; +} +SQLITE_PRIVATE int sqlite3CorruptError(int lineno){ + testcase( sqlite3GlobalConfig.xLog!=0 ); + return reportError(SQLITE_CORRUPT, lineno, "database corruption"); +} +SQLITE_PRIVATE int sqlite3MisuseError(int lineno){ + testcase( sqlite3GlobalConfig.xLog!=0 ); + return reportError(SQLITE_MISUSE, lineno, "misuse"); +} +SQLITE_PRIVATE int sqlite3CantopenError(int lineno){ + testcase( sqlite3GlobalConfig.xLog!=0 ); + return reportError(SQLITE_CANTOPEN, lineno, "cannot open file"); +} +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3NomemError(int lineno){ + testcase( sqlite3GlobalConfig.xLog!=0 ); + return reportError(SQLITE_NOMEM, lineno, "OOM"); +} +SQLITE_PRIVATE int sqlite3IoerrnomemError(int lineno){ + testcase( sqlite3GlobalConfig.xLog!=0 ); + return reportError(SQLITE_IOERR_NOMEM, lineno, "I/O OOM error"); +} +#endif + +#ifndef SQLITE_OMIT_DEPRECATED +/* +** This is a convenience routine that makes sure that all thread-specific +** data for this thread has been deallocated. +** +** SQLite no longer uses thread-specific data so this routine is now a +** no-op. It is retained for historical compatibility. +*/ +SQLITE_API void sqlite3_thread_cleanup(void){ +} +#endif + +/* +** Return meta information about a specific column of a database table. +** See comment in sqlite3.h (sqlite.h.in) for details. +*/ +SQLITE_API int sqlite3_table_column_metadata( + sqlite3 *db, /* Connection handle */ + const char *zDbName, /* Database name or NULL */ + const char *zTableName, /* Table name */ + const char *zColumnName, /* Column name */ + char const **pzDataType, /* OUTPUT: Declared data type */ + char const **pzCollSeq, /* OUTPUT: Collation sequence name */ + int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ + int *pPrimaryKey, /* OUTPUT: True if column part of PK */ + int *pAutoinc /* OUTPUT: True if column is auto-increment */ +){ + int rc; + char *zErrMsg = 0; + Table *pTab = 0; + Column *pCol = 0; + int iCol = 0; + char const *zDataType = 0; + char const *zCollSeq = 0; + int notnull = 0; + int primarykey = 0; + int autoinc = 0; + + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) || zTableName==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif + + /* Ensure the database schema has been loaded */ + sqlite3_mutex_enter(db->mutex); + sqlite3BtreeEnterAll(db); + rc = sqlite3Init(db, &zErrMsg); + if( SQLITE_OK!=rc ){ + goto error_out; + } + + /* Locate the table in question */ + pTab = sqlite3FindTable(db, zTableName, zDbName); + if( !pTab || pTab->pSelect ){ + pTab = 0; + goto error_out; + } + + /* Find the column for which info is requested */ + if( zColumnName==0 ){ + /* Query for existance of table only */ + }else{ + for(iCol=0; iColnCol; iCol++){ + pCol = &pTab->aCol[iCol]; + if( 0==sqlite3StrICmp(pCol->zName, zColumnName) ){ + break; + } + } + if( iCol==pTab->nCol ){ + if( HasRowid(pTab) && sqlite3IsRowid(zColumnName) ){ + iCol = pTab->iPKey; + pCol = iCol>=0 ? &pTab->aCol[iCol] : 0; + }else{ + pTab = 0; + goto error_out; + } + } + } + + /* The following block stores the meta information that will be returned + ** to the caller in local variables zDataType, zCollSeq, notnull, primarykey + ** and autoinc. At this point there are two possibilities: + ** + ** 1. The specified column name was rowid", "oid" or "_rowid_" + ** and there is no explicitly declared IPK column. + ** + ** 2. The table is not a view and the column name identified an + ** explicitly declared column. Copy meta information from *pCol. + */ + if( pCol ){ + zDataType = sqlite3ColumnType(pCol,0); + zCollSeq = pCol->zColl; + notnull = pCol->notNull!=0; + primarykey = (pCol->colFlags & COLFLAG_PRIMKEY)!=0; + autoinc = pTab->iPKey==iCol && (pTab->tabFlags & TF_Autoincrement)!=0; + }else{ + zDataType = "INTEGER"; + primarykey = 1; + } + if( !zCollSeq ){ + zCollSeq = sqlite3StrBINARY; + } + +error_out: + sqlite3BtreeLeaveAll(db); + + /* Whether the function call succeeded or failed, set the output parameters + ** to whatever their local counterparts contain. If an error did occur, + ** this has the effect of zeroing all output parameters. + */ + if( pzDataType ) *pzDataType = zDataType; + if( pzCollSeq ) *pzCollSeq = zCollSeq; + if( pNotNull ) *pNotNull = notnull; + if( pPrimaryKey ) *pPrimaryKey = primarykey; + if( pAutoinc ) *pAutoinc = autoinc; + + if( SQLITE_OK==rc && !pTab ){ + sqlite3DbFree(db, zErrMsg); + zErrMsg = sqlite3MPrintf(db, "no such table column: %s.%s", zTableName, + zColumnName); + rc = SQLITE_ERROR; + } + sqlite3ErrorWithMsg(db, rc, (zErrMsg?"%s":0), zErrMsg); + sqlite3DbFree(db, zErrMsg); + rc = sqlite3ApiExit(db, rc); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** Sleep for a little while. Return the amount of time slept. +*/ +SQLITE_API int sqlite3_sleep(int ms){ + sqlite3_vfs *pVfs; + int rc; + pVfs = sqlite3_vfs_find(0); + if( pVfs==0 ) return 0; + + /* This function works in milliseconds, but the underlying OsSleep() + ** API uses microseconds. Hence the 1000's. + */ + rc = (sqlite3OsSleep(pVfs, 1000*ms)/1000); + return rc; +} + +/* +** Enable or disable the extended result codes. +*/ +SQLITE_API int sqlite3_extended_result_codes(sqlite3 *db, int onoff){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(db->mutex); + db->errMask = onoff ? 0xffffffff : 0xff; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +/* +** Invoke the xFileControl method on a particular database. +*/ +SQLITE_API int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, void *pArg){ + int rc = SQLITE_ERROR; + Btree *pBtree; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(db->mutex); + pBtree = sqlite3DbNameToBtree(db, zDbName); + if( pBtree ){ + Pager *pPager; + sqlite3_file *fd; + sqlite3BtreeEnter(pBtree); + pPager = sqlite3BtreePager(pBtree); + assert( pPager!=0 ); + fd = sqlite3PagerFile(pPager); + assert( fd!=0 ); + if( op==SQLITE_FCNTL_FILE_POINTER ){ + *(sqlite3_file**)pArg = fd; + rc = SQLITE_OK; + }else if( op==SQLITE_FCNTL_VFS_POINTER ){ + *(sqlite3_vfs**)pArg = sqlite3PagerVfs(pPager); + rc = SQLITE_OK; + }else if( op==SQLITE_FCNTL_JOURNAL_POINTER ){ + *(sqlite3_file**)pArg = sqlite3PagerJrnlFile(pPager); + rc = SQLITE_OK; + }else if( fd->pMethods ){ + rc = sqlite3OsFileControl(fd, op, pArg); + }else{ + rc = SQLITE_NOTFOUND; + } + sqlite3BtreeLeave(pBtree); + } + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** Interface to the testing logic. +*/ +SQLITE_API int sqlite3_test_control(int op, ...){ + int rc = 0; +#ifdef SQLITE_UNTESTABLE + UNUSED_PARAMETER(op); +#else + va_list ap; + va_start(ap, op); + switch( op ){ + + /* + ** Save the current state of the PRNG. + */ + case SQLITE_TESTCTRL_PRNG_SAVE: { + sqlite3PrngSaveState(); + break; + } + + /* + ** Restore the state of the PRNG to the last state saved using + ** PRNG_SAVE. If PRNG_SAVE has never before been called, then + ** this verb acts like PRNG_RESET. + */ + case SQLITE_TESTCTRL_PRNG_RESTORE: { + sqlite3PrngRestoreState(); + break; + } + + /* + ** Reset the PRNG back to its uninitialized state. The next call + ** to sqlite3_randomness() will reseed the PRNG using a single call + ** to the xRandomness method of the default VFS. + */ + case SQLITE_TESTCTRL_PRNG_RESET: { + sqlite3_randomness(0,0); + break; + } + + /* + ** sqlite3_test_control(BITVEC_TEST, size, program) + ** + ** Run a test against a Bitvec object of size. The program argument + ** is an array of integers that defines the test. Return -1 on a + ** memory allocation error, 0 on success, or non-zero for an error. + ** See the sqlite3BitvecBuiltinTest() for additional information. + */ + case SQLITE_TESTCTRL_BITVEC_TEST: { + int sz = va_arg(ap, int); + int *aProg = va_arg(ap, int*); + rc = sqlite3BitvecBuiltinTest(sz, aProg); + break; + } + + /* + ** sqlite3_test_control(FAULT_INSTALL, xCallback) + ** + ** Arrange to invoke xCallback() whenever sqlite3FaultSim() is called, + ** if xCallback is not NULL. + ** + ** As a test of the fault simulator mechanism itself, sqlite3FaultSim(0) + ** is called immediately after installing the new callback and the return + ** value from sqlite3FaultSim(0) becomes the return from + ** sqlite3_test_control(). + */ + case SQLITE_TESTCTRL_FAULT_INSTALL: { + /* MSVC is picky about pulling func ptrs from va lists. + ** http://support.microsoft.com/kb/47961 + ** sqlite3GlobalConfig.xTestCallback = va_arg(ap, int(*)(int)); + */ + typedef int(*TESTCALLBACKFUNC_t)(int); + sqlite3GlobalConfig.xTestCallback = va_arg(ap, TESTCALLBACKFUNC_t); + rc = sqlite3FaultSim(0); + break; + } + + /* + ** sqlite3_test_control(BENIGN_MALLOC_HOOKS, xBegin, xEnd) + ** + ** Register hooks to call to indicate which malloc() failures + ** are benign. + */ + case SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS: { + typedef void (*void_function)(void); + void_function xBenignBegin; + void_function xBenignEnd; + xBenignBegin = va_arg(ap, void_function); + xBenignEnd = va_arg(ap, void_function); + sqlite3BenignMallocHooks(xBenignBegin, xBenignEnd); + break; + } + + /* + ** sqlite3_test_control(SQLITE_TESTCTRL_PENDING_BYTE, unsigned int X) + ** + ** Set the PENDING byte to the value in the argument, if X>0. + ** Make no changes if X==0. Return the value of the pending byte + ** as it existing before this routine was called. + ** + ** IMPORTANT: Changing the PENDING byte from 0x40000000 results in + ** an incompatible database file format. Changing the PENDING byte + ** while any database connection is open results in undefined and + ** deleterious behavior. + */ + case SQLITE_TESTCTRL_PENDING_BYTE: { + rc = PENDING_BYTE; +#ifndef SQLITE_OMIT_WSD + { + unsigned int newVal = va_arg(ap, unsigned int); + if( newVal ) sqlite3PendingByte = newVal; + } +#endif + break; + } + + /* + ** sqlite3_test_control(SQLITE_TESTCTRL_ASSERT, int X) + ** + ** This action provides a run-time test to see whether or not + ** assert() was enabled at compile-time. If X is true and assert() + ** is enabled, then the return value is true. If X is true and + ** assert() is disabled, then the return value is zero. If X is + ** false and assert() is enabled, then the assertion fires and the + ** process aborts. If X is false and assert() is disabled, then the + ** return value is zero. + */ + case SQLITE_TESTCTRL_ASSERT: { + volatile int x = 0; + assert( /*side-effects-ok*/ (x = va_arg(ap,int))!=0 ); + rc = x; + break; + } + + + /* + ** sqlite3_test_control(SQLITE_TESTCTRL_ALWAYS, int X) + ** + ** This action provides a run-time test to see how the ALWAYS and + ** NEVER macros were defined at compile-time. + ** + ** The return value is ALWAYS(X). + ** + ** The recommended test is X==2. If the return value is 2, that means + ** ALWAYS() and NEVER() are both no-op pass-through macros, which is the + ** default setting. If the return value is 1, then ALWAYS() is either + ** hard-coded to true or else it asserts if its argument is false. + ** The first behavior (hard-coded to true) is the case if + ** SQLITE_TESTCTRL_ASSERT shows that assert() is disabled and the second + ** behavior (assert if the argument to ALWAYS() is false) is the case if + ** SQLITE_TESTCTRL_ASSERT shows that assert() is enabled. + ** + ** The run-time test procedure might look something like this: + ** + ** if( sqlite3_test_control(SQLITE_TESTCTRL_ALWAYS, 2)==2 ){ + ** // ALWAYS() and NEVER() are no-op pass-through macros + ** }else if( sqlite3_test_control(SQLITE_TESTCTRL_ASSERT, 1) ){ + ** // ALWAYS(x) asserts that x is true. NEVER(x) asserts x is false. + ** }else{ + ** // ALWAYS(x) is a constant 1. NEVER(x) is a constant 0. + ** } + */ + case SQLITE_TESTCTRL_ALWAYS: { + int x = va_arg(ap,int); + rc = ALWAYS(x); + break; + } + + /* + ** sqlite3_test_control(SQLITE_TESTCTRL_BYTEORDER); + ** + ** The integer returned reveals the byte-order of the computer on which + ** SQLite is running: + ** + ** 1 big-endian, determined at run-time + ** 10 little-endian, determined at run-time + ** 432101 big-endian, determined at compile-time + ** 123410 little-endian, determined at compile-time + */ + case SQLITE_TESTCTRL_BYTEORDER: { + rc = SQLITE_BYTEORDER*100 + SQLITE_LITTLEENDIAN*10 + SQLITE_BIGENDIAN; + break; + } + + /* sqlite3_test_control(SQLITE_TESTCTRL_RESERVE, sqlite3 *db, int N) + ** + ** Set the nReserve size to N for the main database on the database + ** connection db. + */ + case SQLITE_TESTCTRL_RESERVE: { + sqlite3 *db = va_arg(ap, sqlite3*); + int x = va_arg(ap,int); + sqlite3_mutex_enter(db->mutex); + sqlite3BtreeSetPageSize(db->aDb[0].pBt, 0, x, 0); + sqlite3_mutex_leave(db->mutex); + break; + } + + /* sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS, sqlite3 *db, int N) + ** + ** Enable or disable various optimizations for testing purposes. The + ** argument N is a bitmask of optimizations to be disabled. For normal + ** operation N should be 0. The idea is that a test program (like the + ** SQL Logic Test or SLT test module) can run the same SQL multiple times + ** with various optimizations disabled to verify that the same answer + ** is obtained in every case. + */ + case SQLITE_TESTCTRL_OPTIMIZATIONS: { + sqlite3 *db = va_arg(ap, sqlite3*); + db->dbOptFlags = (u16)(va_arg(ap, int) & 0xffff); + break; + } + +#ifdef SQLITE_N_KEYWORD + /* sqlite3_test_control(SQLITE_TESTCTRL_ISKEYWORD, const char *zWord) + ** + ** If zWord is a keyword recognized by the parser, then return the + ** number of keywords. Or if zWord is not a keyword, return 0. + ** + ** This test feature is only available in the amalgamation since + ** the SQLITE_N_KEYWORD macro is not defined in this file if SQLite + ** is built using separate source files. + */ + case SQLITE_TESTCTRL_ISKEYWORD: { + const char *zWord = va_arg(ap, const char*); + int n = sqlite3Strlen30(zWord); + rc = (sqlite3KeywordCode((u8*)zWord, n)!=TK_ID) ? SQLITE_N_KEYWORD : 0; + break; + } +#endif + + /* sqlite3_test_control(SQLITE_TESTCTRL_SCRATCHMALLOC, sz, &pNew, pFree); + ** + ** Pass pFree into sqlite3ScratchFree(). + ** If sz>0 then allocate a scratch buffer into pNew. + */ + case SQLITE_TESTCTRL_SCRATCHMALLOC: { + void *pFree, **ppNew; + int sz; + sz = va_arg(ap, int); + ppNew = va_arg(ap, void**); + pFree = va_arg(ap, void*); + if( sz ) *ppNew = sqlite3ScratchMalloc(sz); + sqlite3ScratchFree(pFree); + break; + } + + /* sqlite3_test_control(SQLITE_TESTCTRL_LOCALTIME_FAULT, int onoff); + ** + ** If parameter onoff is non-zero, configure the wrappers so that all + ** subsequent calls to localtime() and variants fail. If onoff is zero, + ** undo this setting. + */ + case SQLITE_TESTCTRL_LOCALTIME_FAULT: { + sqlite3GlobalConfig.bLocaltimeFault = va_arg(ap, int); + break; + } + + /* sqlite3_test_control(SQLITE_TESTCTRL_NEVER_CORRUPT, int); + ** + ** Set or clear a flag that indicates that the database file is always well- + ** formed and never corrupt. This flag is clear by default, indicating that + ** database files might have arbitrary corruption. Setting the flag during + ** testing causes certain assert() statements in the code to be activated + ** that demonstrat invariants on well-formed database files. + */ + case SQLITE_TESTCTRL_NEVER_CORRUPT: { + sqlite3GlobalConfig.neverCorrupt = va_arg(ap, int); + break; + } + + /* Set the threshold at which OP_Once counters reset back to zero. + ** By default this is 0x7ffffffe (over 2 billion), but that value is + ** too big to test in a reasonable amount of time, so this control is + ** provided to set a small and easily reachable reset value. + */ + case SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD: { + sqlite3GlobalConfig.iOnceResetThreshold = va_arg(ap, int); + break; + } + + /* sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE, xCallback, ptr); + ** + ** Set the VDBE coverage callback function to xCallback with context + ** pointer ptr. + */ + case SQLITE_TESTCTRL_VDBE_COVERAGE: { +#ifdef SQLITE_VDBE_COVERAGE + typedef void (*branch_callback)(void*,int,u8,u8); + sqlite3GlobalConfig.xVdbeBranch = va_arg(ap,branch_callback); + sqlite3GlobalConfig.pVdbeBranchArg = va_arg(ap,void*); +#endif + break; + } + + /* sqlite3_test_control(SQLITE_TESTCTRL_SORTER_MMAP, db, nMax); */ + case SQLITE_TESTCTRL_SORTER_MMAP: { + sqlite3 *db = va_arg(ap, sqlite3*); + db->nMaxSorterMmap = va_arg(ap, int); + break; + } + + /* sqlite3_test_control(SQLITE_TESTCTRL_ISINIT); + ** + ** Return SQLITE_OK if SQLite has been initialized and SQLITE_ERROR if + ** not. + */ + case SQLITE_TESTCTRL_ISINIT: { + if( sqlite3GlobalConfig.isInit==0 ) rc = SQLITE_ERROR; + break; + } + + /* sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, db, dbName, onOff, tnum); + ** + ** This test control is used to create imposter tables. "db" is a pointer + ** to the database connection. dbName is the database name (ex: "main" or + ** "temp") which will receive the imposter. "onOff" turns imposter mode on + ** or off. "tnum" is the root page of the b-tree to which the imposter + ** table should connect. + ** + ** Enable imposter mode only when the schema has already been parsed. Then + ** run a single CREATE TABLE statement to construct the imposter table in + ** the parsed schema. Then turn imposter mode back off again. + ** + ** If onOff==0 and tnum>0 then reset the schema for all databases, causing + ** the schema to be reparsed the next time it is needed. This has the + ** effect of erasing all imposter tables. + */ + case SQLITE_TESTCTRL_IMPOSTER: { + sqlite3 *db = va_arg(ap, sqlite3*); + sqlite3_mutex_enter(db->mutex); + db->init.iDb = sqlite3FindDbName(db, va_arg(ap,const char*)); + db->init.busy = db->init.imposterTable = va_arg(ap,int); + db->init.newTnum = va_arg(ap,int); + if( db->init.busy==0 && db->init.newTnum>0 ){ + sqlite3ResetAllSchemasOfConnection(db); + } + sqlite3_mutex_leave(db->mutex); + break; + } + } + va_end(ap); +#endif /* SQLITE_UNTESTABLE */ + return rc; +} + +/* +** This is a utility routine, useful to VFS implementations, that checks +** to see if a database file was a URI that contained a specific query +** parameter, and if so obtains the value of the query parameter. +** +** The zFilename argument is the filename pointer passed into the xOpen() +** method of a VFS implementation. The zParam argument is the name of the +** query parameter we seek. This routine returns the value of the zParam +** parameter if it exists. If the parameter does not exist, this routine +** returns a NULL pointer. +*/ +SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam){ + if( zFilename==0 || zParam==0 ) return 0; + zFilename += sqlite3Strlen30(zFilename) + 1; + while( zFilename[0] ){ + int x = strcmp(zFilename, zParam); + zFilename += sqlite3Strlen30(zFilename) + 1; + if( x==0 ) return zFilename; + zFilename += sqlite3Strlen30(zFilename) + 1; + } + return 0; +} + +/* +** Return a boolean value for a query parameter. +*/ +SQLITE_API int sqlite3_uri_boolean(const char *zFilename, const char *zParam, int bDflt){ + const char *z = sqlite3_uri_parameter(zFilename, zParam); + bDflt = bDflt!=0; + return z ? sqlite3GetBoolean(z, bDflt) : bDflt; +} + +/* +** Return a 64-bit integer value for a query parameter. +*/ +SQLITE_API sqlite3_int64 sqlite3_uri_int64( + const char *zFilename, /* Filename as passed to xOpen */ + const char *zParam, /* URI parameter sought */ + sqlite3_int64 bDflt /* return if parameter is missing */ +){ + const char *z = sqlite3_uri_parameter(zFilename, zParam); + sqlite3_int64 v; + if( z && sqlite3DecOrHexToI64(z, &v)==SQLITE_OK ){ + bDflt = v; + } + return bDflt; +} + +/* +** Return the Btree pointer identified by zDbName. Return NULL if not found. +*/ +SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3 *db, const char *zDbName){ + int iDb = zDbName ? sqlite3FindDbName(db, zDbName) : 0; + return iDb<0 ? 0 : db->aDb[iDb].pBt; +} + +/* +** Return the filename of the database associated with a database +** connection. +*/ +SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName){ + Btree *pBt; +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + pBt = sqlite3DbNameToBtree(db, zDbName); + return pBt ? sqlite3BtreeGetFilename(pBt) : 0; +} + +/* +** Return 1 if database is read-only or 0 if read/write. Return -1 if +** no such database exists. +*/ +SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName){ + Btree *pBt; +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + (void)SQLITE_MISUSE_BKPT; + return -1; + } +#endif + pBt = sqlite3DbNameToBtree(db, zDbName); + return pBt ? sqlite3BtreeIsReadonly(pBt) : -1; +} + +#ifdef SQLITE_ENABLE_SNAPSHOT +/* +** Obtain a snapshot handle for the snapshot of database zDb currently +** being read by handle db. +*/ +SQLITE_API int sqlite3_snapshot_get( + sqlite3 *db, + const char *zDb, + sqlite3_snapshot **ppSnapshot +){ + int rc = SQLITE_ERROR; +#ifndef SQLITE_OMIT_WAL + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + return SQLITE_MISUSE_BKPT; + } +#endif + sqlite3_mutex_enter(db->mutex); + + if( db->autoCommit==0 ){ + int iDb = sqlite3FindDbName(db, zDb); + if( iDb==0 || iDb>1 ){ + Btree *pBt = db->aDb[iDb].pBt; + if( 0==sqlite3BtreeIsInTrans(pBt) ){ + rc = sqlite3BtreeBeginTrans(pBt, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerSnapshotGet(sqlite3BtreePager(pBt), ppSnapshot); + } + } + } + } + + sqlite3_mutex_leave(db->mutex); +#endif /* SQLITE_OMIT_WAL */ + return rc; +} + +/* +** Open a read-transaction on the snapshot idendified by pSnapshot. +*/ +SQLITE_API int sqlite3_snapshot_open( + sqlite3 *db, + const char *zDb, + sqlite3_snapshot *pSnapshot +){ + int rc = SQLITE_ERROR; +#ifndef SQLITE_OMIT_WAL + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + return SQLITE_MISUSE_BKPT; + } +#endif + sqlite3_mutex_enter(db->mutex); + if( db->autoCommit==0 ){ + int iDb; + iDb = sqlite3FindDbName(db, zDb); + if( iDb==0 || iDb>1 ){ + Btree *pBt = db->aDb[iDb].pBt; + if( 0==sqlite3BtreeIsInReadTrans(pBt) ){ + rc = sqlite3PagerSnapshotOpen(sqlite3BtreePager(pBt), pSnapshot); + if( rc==SQLITE_OK ){ + rc = sqlite3BtreeBeginTrans(pBt, 0); + sqlite3PagerSnapshotOpen(sqlite3BtreePager(pBt), 0); + } + } + } + } + + sqlite3_mutex_leave(db->mutex); +#endif /* SQLITE_OMIT_WAL */ + return rc; +} + +/* +** Recover as many snapshots as possible from the wal file associated with +** schema zDb of database db. +*/ +SQLITE_API int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb){ + int rc = SQLITE_ERROR; + int iDb; +#ifndef SQLITE_OMIT_WAL + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + return SQLITE_MISUSE_BKPT; + } +#endif + + sqlite3_mutex_enter(db->mutex); + iDb = sqlite3FindDbName(db, zDb); + if( iDb==0 || iDb>1 ){ + Btree *pBt = db->aDb[iDb].pBt; + if( 0==sqlite3BtreeIsInReadTrans(pBt) ){ + rc = sqlite3BtreeBeginTrans(pBt, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3PagerSnapshotRecover(sqlite3BtreePager(pBt)); + sqlite3BtreeCommit(pBt); + } + } + } + sqlite3_mutex_leave(db->mutex); +#endif /* SQLITE_OMIT_WAL */ + return rc; +} + +/* +** Free a snapshot handle obtained from sqlite3_snapshot_get(). +*/ +SQLITE_API void sqlite3_snapshot_free(sqlite3_snapshot *pSnapshot){ + sqlite3_free(pSnapshot); +} +#endif /* SQLITE_ENABLE_SNAPSHOT */ + +/************** End of main.c ************************************************/ +/************** Begin file notify.c ******************************************/ +/* +** 2009 March 3 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains the implementation of the sqlite3_unlock_notify() +** API method and its associated functionality. +*/ +/* #include "sqliteInt.h" */ +/* #include "btreeInt.h" */ + +/* Omit this entire file if SQLITE_ENABLE_UNLOCK_NOTIFY is not defined. */ +#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY + +/* +** Public interfaces: +** +** sqlite3ConnectionBlocked() +** sqlite3ConnectionUnlocked() +** sqlite3ConnectionClosed() +** sqlite3_unlock_notify() +*/ + +#define assertMutexHeld() \ + assert( sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)) ) + +/* +** Head of a linked list of all sqlite3 objects created by this process +** for which either sqlite3.pBlockingConnection or sqlite3.pUnlockConnection +** is not NULL. This variable may only accessed while the STATIC_MASTER +** mutex is held. +*/ +static sqlite3 *SQLITE_WSD sqlite3BlockedList = 0; + +#ifndef NDEBUG +/* +** This function is a complex assert() that verifies the following +** properties of the blocked connections list: +** +** 1) Each entry in the list has a non-NULL value for either +** pUnlockConnection or pBlockingConnection, or both. +** +** 2) All entries in the list that share a common value for +** xUnlockNotify are grouped together. +** +** 3) If the argument db is not NULL, then none of the entries in the +** blocked connections list have pUnlockConnection or pBlockingConnection +** set to db. This is used when closing connection db. +*/ +static void checkListProperties(sqlite3 *db){ + sqlite3 *p; + for(p=sqlite3BlockedList; p; p=p->pNextBlocked){ + int seen = 0; + sqlite3 *p2; + + /* Verify property (1) */ + assert( p->pUnlockConnection || p->pBlockingConnection ); + + /* Verify property (2) */ + for(p2=sqlite3BlockedList; p2!=p; p2=p2->pNextBlocked){ + if( p2->xUnlockNotify==p->xUnlockNotify ) seen = 1; + assert( p2->xUnlockNotify==p->xUnlockNotify || !seen ); + assert( db==0 || p->pUnlockConnection!=db ); + assert( db==0 || p->pBlockingConnection!=db ); + } + } +} +#else +# define checkListProperties(x) +#endif + +/* +** Remove connection db from the blocked connections list. If connection +** db is not currently a part of the list, this function is a no-op. +*/ +static void removeFromBlockedList(sqlite3 *db){ + sqlite3 **pp; + assertMutexHeld(); + for(pp=&sqlite3BlockedList; *pp; pp = &(*pp)->pNextBlocked){ + if( *pp==db ){ + *pp = (*pp)->pNextBlocked; + break; + } + } +} + +/* +** Add connection db to the blocked connections list. It is assumed +** that it is not already a part of the list. +*/ +static void addToBlockedList(sqlite3 *db){ + sqlite3 **pp; + assertMutexHeld(); + for( + pp=&sqlite3BlockedList; + *pp && (*pp)->xUnlockNotify!=db->xUnlockNotify; + pp=&(*pp)->pNextBlocked + ); + db->pNextBlocked = *pp; + *pp = db; +} + +/* +** Obtain the STATIC_MASTER mutex. +*/ +static void enterMutex(void){ + sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); + checkListProperties(0); +} + +/* +** Release the STATIC_MASTER mutex. +*/ +static void leaveMutex(void){ + assertMutexHeld(); + checkListProperties(0); + sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); +} + +/* +** Register an unlock-notify callback. +** +** This is called after connection "db" has attempted some operation +** but has received an SQLITE_LOCKED error because another connection +** (call it pOther) in the same process was busy using the same shared +** cache. pOther is found by looking at db->pBlockingConnection. +** +** If there is no blocking connection, the callback is invoked immediately, +** before this routine returns. +** +** If pOther is already blocked on db, then report SQLITE_LOCKED, to indicate +** a deadlock. +** +** Otherwise, make arrangements to invoke xNotify when pOther drops +** its locks. +** +** Each call to this routine overrides any prior callbacks registered +** on the same "db". If xNotify==0 then any prior callbacks are immediately +** cancelled. +*/ +SQLITE_API int sqlite3_unlock_notify( + sqlite3 *db, + void (*xNotify)(void **, int), + void *pArg +){ + int rc = SQLITE_OK; + + sqlite3_mutex_enter(db->mutex); + enterMutex(); + + if( xNotify==0 ){ + removeFromBlockedList(db); + db->pBlockingConnection = 0; + db->pUnlockConnection = 0; + db->xUnlockNotify = 0; + db->pUnlockArg = 0; + }else if( 0==db->pBlockingConnection ){ + /* The blocking transaction has been concluded. Or there never was a + ** blocking transaction. In either case, invoke the notify callback + ** immediately. + */ + xNotify(&pArg, 1); + }else{ + sqlite3 *p; + + for(p=db->pBlockingConnection; p && p!=db; p=p->pUnlockConnection){} + if( p ){ + rc = SQLITE_LOCKED; /* Deadlock detected. */ + }else{ + db->pUnlockConnection = db->pBlockingConnection; + db->xUnlockNotify = xNotify; + db->pUnlockArg = pArg; + removeFromBlockedList(db); + addToBlockedList(db); + } + } + + leaveMutex(); + assert( !db->mallocFailed ); + sqlite3ErrorWithMsg(db, rc, (rc?"database is deadlocked":0)); + sqlite3_mutex_leave(db->mutex); + return rc; +} + +/* +** This function is called while stepping or preparing a statement +** associated with connection db. The operation will return SQLITE_LOCKED +** to the user because it requires a lock that will not be available +** until connection pBlocker concludes its current transaction. +*/ +SQLITE_PRIVATE void sqlite3ConnectionBlocked(sqlite3 *db, sqlite3 *pBlocker){ + enterMutex(); + if( db->pBlockingConnection==0 && db->pUnlockConnection==0 ){ + addToBlockedList(db); + } + db->pBlockingConnection = pBlocker; + leaveMutex(); +} + +/* +** This function is called when +** the transaction opened by database db has just finished. Locks held +** by database connection db have been released. +** +** This function loops through each entry in the blocked connections +** list and does the following: +** +** 1) If the sqlite3.pBlockingConnection member of a list entry is +** set to db, then set pBlockingConnection=0. +** +** 2) If the sqlite3.pUnlockConnection member of a list entry is +** set to db, then invoke the configured unlock-notify callback and +** set pUnlockConnection=0. +** +** 3) If the two steps above mean that pBlockingConnection==0 and +** pUnlockConnection==0, remove the entry from the blocked connections +** list. +*/ +SQLITE_PRIVATE void sqlite3ConnectionUnlocked(sqlite3 *db){ + void (*xUnlockNotify)(void **, int) = 0; /* Unlock-notify cb to invoke */ + int nArg = 0; /* Number of entries in aArg[] */ + sqlite3 **pp; /* Iterator variable */ + void **aArg; /* Arguments to the unlock callback */ + void **aDyn = 0; /* Dynamically allocated space for aArg[] */ + void *aStatic[16]; /* Starter space for aArg[]. No malloc required */ + + aArg = aStatic; + enterMutex(); /* Enter STATIC_MASTER mutex */ + + /* This loop runs once for each entry in the blocked-connections list. */ + for(pp=&sqlite3BlockedList; *pp; /* no-op */ ){ + sqlite3 *p = *pp; + + /* Step 1. */ + if( p->pBlockingConnection==db ){ + p->pBlockingConnection = 0; + } + + /* Step 2. */ + if( p->pUnlockConnection==db ){ + assert( p->xUnlockNotify ); + if( p->xUnlockNotify!=xUnlockNotify && nArg!=0 ){ + xUnlockNotify(aArg, nArg); + nArg = 0; + } + + sqlite3BeginBenignMalloc(); + assert( aArg==aDyn || (aDyn==0 && aArg==aStatic) ); + assert( nArg<=(int)ArraySize(aStatic) || aArg==aDyn ); + if( (!aDyn && nArg==(int)ArraySize(aStatic)) + || (aDyn && nArg==(int)(sqlite3MallocSize(aDyn)/sizeof(void*))) + ){ + /* The aArg[] array needs to grow. */ + void **pNew = (void **)sqlite3Malloc(nArg*sizeof(void *)*2); + if( pNew ){ + memcpy(pNew, aArg, nArg*sizeof(void *)); + sqlite3_free(aDyn); + aDyn = aArg = pNew; + }else{ + /* This occurs when the array of context pointers that need to + ** be passed to the unlock-notify callback is larger than the + ** aStatic[] array allocated on the stack and the attempt to + ** allocate a larger array from the heap has failed. + ** + ** This is a difficult situation to handle. Returning an error + ** code to the caller is insufficient, as even if an error code + ** is returned the transaction on connection db will still be + ** closed and the unlock-notify callbacks on blocked connections + ** will go unissued. This might cause the application to wait + ** indefinitely for an unlock-notify callback that will never + ** arrive. + ** + ** Instead, invoke the unlock-notify callback with the context + ** array already accumulated. We can then clear the array and + ** begin accumulating any further context pointers without + ** requiring any dynamic allocation. This is sub-optimal because + ** it means that instead of one callback with a large array of + ** context pointers the application will receive two or more + ** callbacks with smaller arrays of context pointers, which will + ** reduce the applications ability to prioritize multiple + ** connections. But it is the best that can be done under the + ** circumstances. + */ + xUnlockNotify(aArg, nArg); + nArg = 0; + } + } + sqlite3EndBenignMalloc(); + + aArg[nArg++] = p->pUnlockArg; + xUnlockNotify = p->xUnlockNotify; + p->pUnlockConnection = 0; + p->xUnlockNotify = 0; + p->pUnlockArg = 0; + } + + /* Step 3. */ + if( p->pBlockingConnection==0 && p->pUnlockConnection==0 ){ + /* Remove connection p from the blocked connections list. */ + *pp = p->pNextBlocked; + p->pNextBlocked = 0; + }else{ + pp = &p->pNextBlocked; + } + } + + if( nArg!=0 ){ + xUnlockNotify(aArg, nArg); + } + sqlite3_free(aDyn); + leaveMutex(); /* Leave STATIC_MASTER mutex */ +} + +/* +** This is called when the database connection passed as an argument is +** being closed. The connection is removed from the blocked list. +*/ +SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){ + sqlite3ConnectionUnlocked(db); + enterMutex(); + removeFromBlockedList(db); + checkListProperties(db); + leaveMutex(); +} +#endif + +/************** End of notify.c **********************************************/ +/************** Begin file fts3.c ********************************************/ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This is an SQLite module implementing full-text search. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ + +/* The full-text index is stored in a series of b+tree (-like) +** structures called segments which map terms to doclists. The +** structures are like b+trees in layout, but are constructed from the +** bottom up in optimal fashion and are not updatable. Since trees +** are built from the bottom up, things will be described from the +** bottom up. +** +** +**** Varints **** +** The basic unit of encoding is a variable-length integer called a +** varint. We encode variable-length integers in little-endian order +** using seven bits * per byte as follows: +** +** KEY: +** A = 0xxxxxxx 7 bits of data and one flag bit +** B = 1xxxxxxx 7 bits of data and one flag bit +** +** 7 bits - A +** 14 bits - BA +** 21 bits - BBA +** and so on. +** +** This is similar in concept to how sqlite encodes "varints" but +** the encoding is not the same. SQLite varints are big-endian +** are are limited to 9 bytes in length whereas FTS3 varints are +** little-endian and can be up to 10 bytes in length (in theory). +** +** Example encodings: +** +** 1: 0x01 +** 127: 0x7f +** 128: 0x81 0x00 +** +** +**** Document lists **** +** A doclist (document list) holds a docid-sorted list of hits for a +** given term. Doclists hold docids and associated token positions. +** A docid is the unique integer identifier for a single document. +** A position is the index of a word within the document. The first +** word of the document has a position of 0. +** +** FTS3 used to optionally store character offsets using a compile-time +** option. But that functionality is no longer supported. +** +** A doclist is stored like this: +** +** array { +** varint docid; (delta from previous doclist) +** array { (position list for column 0) +** varint position; (2 more than the delta from previous position) +** } +** array { +** varint POS_COLUMN; (marks start of position list for new column) +** varint column; (index of new column) +** array { +** varint position; (2 more than the delta from previous position) +** } +** } +** varint POS_END; (marks end of positions for this document. +** } +** +** Here, array { X } means zero or more occurrences of X, adjacent in +** memory. A "position" is an index of a token in the token stream +** generated by the tokenizer. Note that POS_END and POS_COLUMN occur +** in the same logical place as the position element, and act as sentinals +** ending a position list array. POS_END is 0. POS_COLUMN is 1. +** The positions numbers are not stored literally but rather as two more +** than the difference from the prior position, or the just the position plus +** 2 for the first position. Example: +** +** label: A B C D E F G H I J K +** value: 123 5 9 1 1 14 35 0 234 72 0 +** +** The 123 value is the first docid. For column zero in this document +** there are two matches at positions 3 and 10 (5-2 and 9-2+3). The 1 +** at D signals the start of a new column; the 1 at E indicates that the +** new column is column number 1. There are two positions at 12 and 45 +** (14-2 and 35-2+12). The 0 at H indicate the end-of-document. The +** 234 at I is the delta to next docid (357). It has one position 70 +** (72-2) and then terminates with the 0 at K. +** +** A "position-list" is the list of positions for multiple columns for +** a single docid. A "column-list" is the set of positions for a single +** column. Hence, a position-list consists of one or more column-lists, +** a document record consists of a docid followed by a position-list and +** a doclist consists of one or more document records. +** +** A bare doclist omits the position information, becoming an +** array of varint-encoded docids. +** +**** Segment leaf nodes **** +** Segment leaf nodes store terms and doclists, ordered by term. Leaf +** nodes are written using LeafWriter, and read using LeafReader (to +** iterate through a single leaf node's data) and LeavesReader (to +** iterate through a segment's entire leaf layer). Leaf nodes have +** the format: +** +** varint iHeight; (height from leaf level, always 0) +** varint nTerm; (length of first term) +** char pTerm[nTerm]; (content of first term) +** varint nDoclist; (length of term's associated doclist) +** char pDoclist[nDoclist]; (content of doclist) +** array { +** (further terms are delta-encoded) +** varint nPrefix; (length of prefix shared with previous term) +** varint nSuffix; (length of unshared suffix) +** char pTermSuffix[nSuffix];(unshared suffix of next term) +** varint nDoclist; (length of term's associated doclist) +** char pDoclist[nDoclist]; (content of doclist) +** } +** +** Here, array { X } means zero or more occurrences of X, adjacent in +** memory. +** +** Leaf nodes are broken into blocks which are stored contiguously in +** the %_segments table in sorted order. This means that when the end +** of a node is reached, the next term is in the node with the next +** greater node id. +** +** New data is spilled to a new leaf node when the current node +** exceeds LEAF_MAX bytes (default 2048). New data which itself is +** larger than STANDALONE_MIN (default 1024) is placed in a standalone +** node (a leaf node with a single term and doclist). The goal of +** these settings is to pack together groups of small doclists while +** making it efficient to directly access large doclists. The +** assumption is that large doclists represent terms which are more +** likely to be query targets. +** +** TODO(shess) It may be useful for blocking decisions to be more +** dynamic. For instance, it may make more sense to have a 2.5k leaf +** node rather than splitting into 2k and .5k nodes. My intuition is +** that this might extend through 2x or 4x the pagesize. +** +** +**** Segment interior nodes **** +** Segment interior nodes store blockids for subtree nodes and terms +** to describe what data is stored by the each subtree. Interior +** nodes are written using InteriorWriter, and read using +** InteriorReader. InteriorWriters are created as needed when +** SegmentWriter creates new leaf nodes, or when an interior node +** itself grows too big and must be split. The format of interior +** nodes: +** +** varint iHeight; (height from leaf level, always >0) +** varint iBlockid; (block id of node's leftmost subtree) +** optional { +** varint nTerm; (length of first term) +** char pTerm[nTerm]; (content of first term) +** array { +** (further terms are delta-encoded) +** varint nPrefix; (length of shared prefix with previous term) +** varint nSuffix; (length of unshared suffix) +** char pTermSuffix[nSuffix]; (unshared suffix of next term) +** } +** } +** +** Here, optional { X } means an optional element, while array { X } +** means zero or more occurrences of X, adjacent in memory. +** +** An interior node encodes n terms separating n+1 subtrees. The +** subtree blocks are contiguous, so only the first subtree's blockid +** is encoded. The subtree at iBlockid will contain all terms less +** than the first term encoded (or all terms if no term is encoded). +** Otherwise, for terms greater than or equal to pTerm[i] but less +** than pTerm[i+1], the subtree for that term will be rooted at +** iBlockid+i. Interior nodes only store enough term data to +** distinguish adjacent children (if the rightmost term of the left +** child is "something", and the leftmost term of the right child is +** "wicked", only "w" is stored). +** +** New data is spilled to a new interior node at the same height when +** the current node exceeds INTERIOR_MAX bytes (default 2048). +** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing +** interior nodes and making the tree too skinny. The interior nodes +** at a given height are naturally tracked by interior nodes at +** height+1, and so on. +** +** +**** Segment directory **** +** The segment directory in table %_segdir stores meta-information for +** merging and deleting segments, and also the root node of the +** segment's tree. +** +** The root node is the top node of the segment's tree after encoding +** the entire segment, restricted to ROOT_MAX bytes (default 1024). +** This could be either a leaf node or an interior node. If the top +** node requires more than ROOT_MAX bytes, it is flushed to %_segments +** and a new root interior node is generated (which should always fit +** within ROOT_MAX because it only needs space for 2 varints, the +** height and the blockid of the previous root). +** +** The meta-information in the segment directory is: +** level - segment level (see below) +** idx - index within level +** - (level,idx uniquely identify a segment) +** start_block - first leaf node +** leaves_end_block - last leaf node +** end_block - last block (including interior nodes) +** root - contents of root node +** +** If the root node is a leaf node, then start_block, +** leaves_end_block, and end_block are all 0. +** +** +**** Segment merging **** +** To amortize update costs, segments are grouped into levels and +** merged in batches. Each increase in level represents exponentially +** more documents. +** +** New documents (actually, document updates) are tokenized and +** written individually (using LeafWriter) to a level 0 segment, with +** incrementing idx. When idx reaches MERGE_COUNT (default 16), all +** level 0 segments are merged into a single level 1 segment. Level 1 +** is populated like level 0, and eventually MERGE_COUNT level 1 +** segments are merged to a single level 2 segment (representing +** MERGE_COUNT^2 updates), and so on. +** +** A segment merge traverses all segments at a given level in +** parallel, performing a straightforward sorted merge. Since segment +** leaf nodes are written in to the %_segments table in order, this +** merge traverses the underlying sqlite disk structures efficiently. +** After the merge, all segment blocks from the merged level are +** deleted. +** +** MERGE_COUNT controls how often we merge segments. 16 seems to be +** somewhat of a sweet spot for insertion performance. 32 and 64 show +** very similar performance numbers to 16 on insertion, though they're +** a tiny bit slower (perhaps due to more overhead in merge-time +** sorting). 8 is about 20% slower than 16, 4 about 50% slower than +** 16, 2 about 66% slower than 16. +** +** At query time, high MERGE_COUNT increases the number of segments +** which need to be scanned and merged. For instance, with 100k docs +** inserted: +** +** MERGE_COUNT segments +** 16 25 +** 8 12 +** 4 10 +** 2 6 +** +** This appears to have only a moderate impact on queries for very +** frequent terms (which are somewhat dominated by segment merge +** costs), and infrequent and non-existent terms still seem to be fast +** even with many segments. +** +** TODO(shess) That said, it would be nice to have a better query-side +** argument for MERGE_COUNT of 16. Also, it is possible/likely that +** optimizations to things like doclist merging will swing the sweet +** spot around. +** +** +** +**** Handling of deletions and updates **** +** Since we're using a segmented structure, with no docid-oriented +** index into the term index, we clearly cannot simply update the term +** index when a document is deleted or updated. For deletions, we +** write an empty doclist (varint(docid) varint(POS_END)), for updates +** we simply write the new doclist. Segment merges overwrite older +** data for a particular docid with newer data, so deletes or updates +** will eventually overtake the earlier data and knock it out. The +** query logic likewise merges doclists so that newer data knocks out +** older data. +*/ + +/************** Include fts3Int.h in the middle of fts3.c ********************/ +/************** Begin file fts3Int.h *****************************************/ +/* +** 2009 Nov 12 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +*/ +#ifndef _FTSINT_H +#define _FTSINT_H + +#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) +# define NDEBUG 1 +#endif + +/* FTS3/FTS4 require virtual tables */ +#ifdef SQLITE_OMIT_VIRTUALTABLE +# undef SQLITE_ENABLE_FTS3 +# undef SQLITE_ENABLE_FTS4 +#endif + +/* +** FTS4 is really an extension for FTS3. It is enabled using the +** SQLITE_ENABLE_FTS3 macro. But to avoid confusion we also all +** the SQLITE_ENABLE_FTS4 macro to serve as an alisse for SQLITE_ENABLE_FTS3. +*/ +#if defined(SQLITE_ENABLE_FTS4) && !defined(SQLITE_ENABLE_FTS3) +# define SQLITE_ENABLE_FTS3 +#endif + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +/* If not building as part of the core, include sqlite3ext.h. */ +#ifndef SQLITE_CORE +/* # include "sqlite3ext.h" */ +SQLITE_EXTENSION_INIT3 +#endif + +/* #include "sqlite3.h" */ +/************** Include fts3_tokenizer.h in the middle of fts3Int.h **********/ +/************** Begin file fts3_tokenizer.h **********************************/ +/* +** 2006 July 10 +** +** The author disclaims copyright to this source code. +** +************************************************************************* +** Defines the interface to tokenizers used by fulltext-search. There +** are three basic components: +** +** sqlite3_tokenizer_module is a singleton defining the tokenizer +** interface functions. This is essentially the class structure for +** tokenizers. +** +** sqlite3_tokenizer is used to define a particular tokenizer, perhaps +** including customization information defined at creation time. +** +** sqlite3_tokenizer_cursor is generated by a tokenizer to generate +** tokens from a particular input. +*/ +#ifndef _FTS3_TOKENIZER_H_ +#define _FTS3_TOKENIZER_H_ + +/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. +** If tokenizers are to be allowed to call sqlite3_*() functions, then +** we will need a way to register the API consistently. +*/ +/* #include "sqlite3.h" */ + +/* +** Structures used by the tokenizer interface. When a new tokenizer +** implementation is registered, the caller provides a pointer to +** an sqlite3_tokenizer_module containing pointers to the callback +** functions that make up an implementation. +** +** When an fts3 table is created, it passes any arguments passed to +** the tokenizer clause of the CREATE VIRTUAL TABLE statement to the +** sqlite3_tokenizer_module.xCreate() function of the requested tokenizer +** implementation. The xCreate() function in turn returns an +** sqlite3_tokenizer structure representing the specific tokenizer to +** be used for the fts3 table (customized by the tokenizer clause arguments). +** +** To tokenize an input buffer, the sqlite3_tokenizer_module.xOpen() +** method is called. It returns an sqlite3_tokenizer_cursor object +** that may be used to tokenize a specific input buffer based on +** the tokenization rules supplied by a specific sqlite3_tokenizer +** object. +*/ +typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; +typedef struct sqlite3_tokenizer sqlite3_tokenizer; +typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; + +struct sqlite3_tokenizer_module { + + /* + ** Structure version. Should always be set to 0 or 1. + */ + int iVersion; + + /* + ** Create a new tokenizer. The values in the argv[] array are the + ** arguments passed to the "tokenizer" clause of the CREATE VIRTUAL + ** TABLE statement that created the fts3 table. For example, if + ** the following SQL is executed: + ** + ** CREATE .. USING fts3( ... , tokenizer arg1 arg2) + ** + ** then argc is set to 2, and the argv[] array contains pointers + ** to the strings "arg1" and "arg2". + ** + ** This method should return either SQLITE_OK (0), or an SQLite error + ** code. If SQLITE_OK is returned, then *ppTokenizer should be set + ** to point at the newly created tokenizer structure. The generic + ** sqlite3_tokenizer.pModule variable should not be initialized by + ** this callback. The caller will do so. + */ + int (*xCreate)( + int argc, /* Size of argv array */ + const char *const*argv, /* Tokenizer argument strings */ + sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ + ); + + /* + ** Destroy an existing tokenizer. The fts3 module calls this method + ** exactly once for each successful call to xCreate(). + */ + int (*xDestroy)(sqlite3_tokenizer *pTokenizer); + + /* + ** Create a tokenizer cursor to tokenize an input buffer. The caller + ** is responsible for ensuring that the input buffer remains valid + ** until the cursor is closed (using the xClose() method). + */ + int (*xOpen)( + sqlite3_tokenizer *pTokenizer, /* Tokenizer object */ + const char *pInput, int nBytes, /* Input buffer */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Created tokenizer cursor */ + ); + + /* + ** Destroy an existing tokenizer cursor. The fts3 module calls this + ** method exactly once for each successful call to xOpen(). + */ + int (*xClose)(sqlite3_tokenizer_cursor *pCursor); + + /* + ** Retrieve the next token from the tokenizer cursor pCursor. This + ** method should either return SQLITE_OK and set the values of the + ** "OUT" variables identified below, or SQLITE_DONE to indicate that + ** the end of the buffer has been reached, or an SQLite error code. + ** + ** *ppToken should be set to point at a buffer containing the + ** normalized version of the token (i.e. after any case-folding and/or + ** stemming has been performed). *pnBytes should be set to the length + ** of this buffer in bytes. The input text that generated the token is + ** identified by the byte offsets returned in *piStartOffset and + ** *piEndOffset. *piStartOffset should be set to the index of the first + ** byte of the token in the input buffer. *piEndOffset should be set + ** to the index of the first byte just past the end of the token in + ** the input buffer. + ** + ** The buffer *ppToken is set to point at is managed by the tokenizer + ** implementation. It is only required to be valid until the next call + ** to xNext() or xClose(). + */ + /* TODO(shess) current implementation requires pInput to be + ** nul-terminated. This should either be fixed, or pInput/nBytes + ** should be converted to zInput. + */ + int (*xNext)( + sqlite3_tokenizer_cursor *pCursor, /* Tokenizer cursor */ + const char **ppToken, int *pnBytes, /* OUT: Normalized text for token */ + int *piStartOffset, /* OUT: Byte offset of token in input buffer */ + int *piEndOffset, /* OUT: Byte offset of end of token in input buffer */ + int *piPosition /* OUT: Number of tokens returned before this one */ + ); + + /*********************************************************************** + ** Methods below this point are only available if iVersion>=1. + */ + + /* + ** Configure the language id of a tokenizer cursor. + */ + int (*xLanguageid)(sqlite3_tokenizer_cursor *pCsr, int iLangid); +}; + +struct sqlite3_tokenizer { + const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ + /* Tokenizer implementations will typically add additional fields */ +}; + +struct sqlite3_tokenizer_cursor { + sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ + /* Tokenizer implementations will typically add additional fields */ +}; + +int fts3_global_term_cnt(int iTerm, int iCol); +int fts3_term_cnt(int iTerm, int iCol); + + +#endif /* _FTS3_TOKENIZER_H_ */ + +/************** End of fts3_tokenizer.h **************************************/ +/************** Continuing where we left off in fts3Int.h ********************/ +/************** Include fts3_hash.h in the middle of fts3Int.h ***************/ +/************** Begin file fts3_hash.h ***************************************/ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the header file for the generic hash-table implementation +** used in SQLite. We've modified it slightly to serve as a standalone +** hash table implementation for the full-text indexing module. +** +*/ +#ifndef _FTS3_HASH_H_ +#define _FTS3_HASH_H_ + +/* Forward declarations of structures. */ +typedef struct Fts3Hash Fts3Hash; +typedef struct Fts3HashElem Fts3HashElem; + +/* A complete hash table is an instance of the following structure. +** The internals of this structure are intended to be opaque -- client +** code should not attempt to access or modify the fields of this structure +** directly. Change this structure only by using the routines below. +** However, many of the "procedures" and "functions" for modifying and +** accessing this structure are really macros, so we can't really make +** this structure opaque. +*/ +struct Fts3Hash { + char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ + char copyKey; /* True if copy of key made on insert */ + int count; /* Number of entries in this table */ + Fts3HashElem *first; /* The first element of the array */ + int htsize; /* Number of buckets in the hash table */ + struct _fts3ht { /* the hash table */ + int count; /* Number of entries with this hash */ + Fts3HashElem *chain; /* Pointer to first entry with this hash */ + } *ht; +}; + +/* Each element in the hash table is an instance of the following +** structure. All elements are stored on a single doubly-linked list. +** +** Again, this structure is intended to be opaque, but it can't really +** be opaque because it is used by macros. +*/ +struct Fts3HashElem { + Fts3HashElem *next, *prev; /* Next and previous elements in the table */ + void *data; /* Data associated with this element */ + void *pKey; int nKey; /* Key associated with this element */ +}; + +/* +** There are 2 different modes of operation for a hash table: +** +** FTS3_HASH_STRING pKey points to a string that is nKey bytes long +** (including the null-terminator, if any). Case +** is respected in comparisons. +** +** FTS3_HASH_BINARY pKey points to binary data nKey bytes long. +** memcmp() is used to compare keys. +** +** A copy of the key is made if the copyKey parameter to fts3HashInit is 1. +*/ +#define FTS3_HASH_STRING 1 +#define FTS3_HASH_BINARY 2 + +/* +** Access routines. To delete, insert a NULL pointer. +*/ +SQLITE_PRIVATE void sqlite3Fts3HashInit(Fts3Hash *pNew, char keyClass, char copyKey); +SQLITE_PRIVATE void *sqlite3Fts3HashInsert(Fts3Hash*, const void *pKey, int nKey, void *pData); +SQLITE_PRIVATE void *sqlite3Fts3HashFind(const Fts3Hash*, const void *pKey, int nKey); +SQLITE_PRIVATE void sqlite3Fts3HashClear(Fts3Hash*); +SQLITE_PRIVATE Fts3HashElem *sqlite3Fts3HashFindElem(const Fts3Hash *, const void *, int); + +/* +** Shorthand for the functions above +*/ +#define fts3HashInit sqlite3Fts3HashInit +#define fts3HashInsert sqlite3Fts3HashInsert +#define fts3HashFind sqlite3Fts3HashFind +#define fts3HashClear sqlite3Fts3HashClear +#define fts3HashFindElem sqlite3Fts3HashFindElem + +/* +** Macros for looping over all elements of a hash table. The idiom is +** like this: +** +** Fts3Hash h; +** Fts3HashElem *p; +** ... +** for(p=fts3HashFirst(&h); p; p=fts3HashNext(p)){ +** SomeStructure *pData = fts3HashData(p); +** // do something with pData +** } +*/ +#define fts3HashFirst(H) ((H)->first) +#define fts3HashNext(E) ((E)->next) +#define fts3HashData(E) ((E)->data) +#define fts3HashKey(E) ((E)->pKey) +#define fts3HashKeysize(E) ((E)->nKey) + +/* +** Number of entries in a hash table +*/ +#define fts3HashCount(H) ((H)->count) + +#endif /* _FTS3_HASH_H_ */ + +/************** End of fts3_hash.h *******************************************/ +/************** Continuing where we left off in fts3Int.h ********************/ + +/* +** This constant determines the maximum depth of an FTS expression tree +** that the library will create and use. FTS uses recursion to perform +** various operations on the query tree, so the disadvantage of a large +** limit is that it may allow very large queries to use large amounts +** of stack space (perhaps causing a stack overflow). +*/ +#ifndef SQLITE_FTS3_MAX_EXPR_DEPTH +# define SQLITE_FTS3_MAX_EXPR_DEPTH 12 +#endif + + +/* +** This constant controls how often segments are merged. Once there are +** FTS3_MERGE_COUNT segments of level N, they are merged into a single +** segment of level N+1. +*/ +#define FTS3_MERGE_COUNT 16 + +/* +** This is the maximum amount of data (in bytes) to store in the +** Fts3Table.pendingTerms hash table. Normally, the hash table is +** populated as documents are inserted/updated/deleted in a transaction +** and used to create a new segment when the transaction is committed. +** However if this limit is reached midway through a transaction, a new +** segment is created and the hash table cleared immediately. +*/ +#define FTS3_MAX_PENDING_DATA (1*1024*1024) + +/* +** Macro to return the number of elements in an array. SQLite has a +** similar macro called ArraySize(). Use a different name to avoid +** a collision when building an amalgamation with built-in FTS3. +*/ +#define SizeofArray(X) ((int)(sizeof(X)/sizeof(X[0]))) + + +#ifndef MIN +# define MIN(x,y) ((x)<(y)?(x):(y)) +#endif +#ifndef MAX +# define MAX(x,y) ((x)>(y)?(x):(y)) +#endif + +/* +** Maximum length of a varint encoded integer. The varint format is different +** from that used by SQLite, so the maximum length is 10, not 9. +*/ +#define FTS3_VARINT_MAX 10 + +/* +** FTS4 virtual tables may maintain multiple indexes - one index of all terms +** in the document set and zero or more prefix indexes. All indexes are stored +** as one or more b+-trees in the %_segments and %_segdir tables. +** +** It is possible to determine which index a b+-tree belongs to based on the +** value stored in the "%_segdir.level" column. Given this value L, the index +** that the b+-tree belongs to is (L<<10). In other words, all b+-trees with +** level values between 0 and 1023 (inclusive) belong to index 0, all levels +** between 1024 and 2047 to index 1, and so on. +** +** It is considered impossible for an index to use more than 1024 levels. In +** theory though this may happen, but only after at least +** (FTS3_MERGE_COUNT^1024) separate flushes of the pending-terms tables. +*/ +#define FTS3_SEGDIR_MAXLEVEL 1024 +#define FTS3_SEGDIR_MAXLEVEL_STR "1024" + +/* +** The testcase() macro is only used by the amalgamation. If undefined, +** make it a no-op. +*/ +#ifndef testcase +# define testcase(X) +#endif + +/* +** Terminator values for position-lists and column-lists. +*/ +#define POS_COLUMN (1) /* Column-list terminator */ +#define POS_END (0) /* Position-list terminator */ + +/* +** This section provides definitions to allow the +** FTS3 extension to be compiled outside of the +** amalgamation. +*/ +#ifndef SQLITE_AMALGAMATION +/* +** Macros indicating that conditional expressions are always true or +** false. +*/ +#ifdef SQLITE_COVERAGE_TEST +# define ALWAYS(x) (1) +# define NEVER(X) (0) +#elif defined(SQLITE_DEBUG) +# define ALWAYS(x) sqlite3Fts3Always((x)!=0) +# define NEVER(x) sqlite3Fts3Never((x)!=0) +SQLITE_PRIVATE int sqlite3Fts3Always(int b); +SQLITE_PRIVATE int sqlite3Fts3Never(int b); +#else +# define ALWAYS(x) (x) +# define NEVER(x) (x) +#endif + +/* +** Internal types used by SQLite. +*/ +typedef unsigned char u8; /* 1-byte (or larger) unsigned integer */ +typedef short int i16; /* 2-byte (or larger) signed integer */ +typedef unsigned int u32; /* 4-byte unsigned integer */ +typedef sqlite3_uint64 u64; /* 8-byte unsigned integer */ +typedef sqlite3_int64 i64; /* 8-byte signed integer */ + +/* +** Macro used to suppress compiler warnings for unused parameters. +*/ +#define UNUSED_PARAMETER(x) (void)(x) + +/* +** Activate assert() only if SQLITE_TEST is enabled. +*/ +#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) +# define NDEBUG 1 +#endif + +/* +** The TESTONLY macro is used to enclose variable declarations or +** other bits of code that are needed to support the arguments +** within testcase() and assert() macros. +*/ +#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) +# define TESTONLY(X) X +#else +# define TESTONLY(X) +#endif + +#endif /* SQLITE_AMALGAMATION */ + +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3Fts3Corrupt(void); +# define FTS_CORRUPT_VTAB sqlite3Fts3Corrupt() +#else +# define FTS_CORRUPT_VTAB SQLITE_CORRUPT_VTAB +#endif + +typedef struct Fts3Table Fts3Table; +typedef struct Fts3Cursor Fts3Cursor; +typedef struct Fts3Expr Fts3Expr; +typedef struct Fts3Phrase Fts3Phrase; +typedef struct Fts3PhraseToken Fts3PhraseToken; + +typedef struct Fts3Doclist Fts3Doclist; +typedef struct Fts3SegFilter Fts3SegFilter; +typedef struct Fts3DeferredToken Fts3DeferredToken; +typedef struct Fts3SegReader Fts3SegReader; +typedef struct Fts3MultiSegReader Fts3MultiSegReader; + +typedef struct MatchinfoBuffer MatchinfoBuffer; + +/* +** A connection to a fulltext index is an instance of the following +** structure. The xCreate and xConnect methods create an instance +** of this structure and xDestroy and xDisconnect free that instance. +** All other methods receive a pointer to the structure as one of their +** arguments. +*/ +struct Fts3Table { + sqlite3_vtab base; /* Base class used by SQLite core */ + sqlite3 *db; /* The database connection */ + const char *zDb; /* logical database name */ + const char *zName; /* virtual table name */ + int nColumn; /* number of named columns in virtual table */ + char **azColumn; /* column names. malloced */ + u8 *abNotindexed; /* True for 'notindexed' columns */ + sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ + char *zContentTbl; /* content=xxx option, or NULL */ + char *zLanguageid; /* languageid=xxx option, or NULL */ + int nAutoincrmerge; /* Value configured by 'automerge' */ + u32 nLeafAdd; /* Number of leaf blocks added this trans */ + + /* Precompiled statements used by the implementation. Each of these + ** statements is run and reset within a single virtual table API call. + */ + sqlite3_stmt *aStmt[40]; + sqlite3_stmt *pSeekStmt; /* Cache for fts3CursorSeekStmt() */ + + char *zReadExprlist; + char *zWriteExprlist; + + int nNodeSize; /* Soft limit for node size */ + u8 bFts4; /* True for FTS4, false for FTS3 */ + u8 bHasStat; /* True if %_stat table exists (2==unknown) */ + u8 bHasDocsize; /* True if %_docsize table exists */ + u8 bDescIdx; /* True if doclists are in reverse order */ + u8 bIgnoreSavepoint; /* True to ignore xSavepoint invocations */ + int nPgsz; /* Page size for host database */ + char *zSegmentsTbl; /* Name of %_segments table */ + sqlite3_blob *pSegments; /* Blob handle open on %_segments table */ + + /* + ** The following array of hash tables is used to buffer pending index + ** updates during transactions. All pending updates buffered at any one + ** time must share a common language-id (see the FTS4 langid= feature). + ** The current language id is stored in variable iPrevLangid. + ** + ** A single FTS4 table may have multiple full-text indexes. For each index + ** there is an entry in the aIndex[] array. Index 0 is an index of all the + ** terms that appear in the document set. Each subsequent index in aIndex[] + ** is an index of prefixes of a specific length. + ** + ** Variable nPendingData contains an estimate the memory consumed by the + ** pending data structures, including hash table overhead, but not including + ** malloc overhead. When nPendingData exceeds nMaxPendingData, all hash + ** tables are flushed to disk. Variable iPrevDocid is the docid of the most + ** recently inserted record. + */ + int nIndex; /* Size of aIndex[] */ + struct Fts3Index { + int nPrefix; /* Prefix length (0 for main terms index) */ + Fts3Hash hPending; /* Pending terms table for this index */ + } *aIndex; + int nMaxPendingData; /* Max pending data before flush to disk */ + int nPendingData; /* Current bytes of pending data */ + sqlite_int64 iPrevDocid; /* Docid of most recently inserted document */ + int iPrevLangid; /* Langid of recently inserted document */ + int bPrevDelete; /* True if last operation was a delete */ + +#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) + /* State variables used for validating that the transaction control + ** methods of the virtual table are called at appropriate times. These + ** values do not contribute to FTS functionality; they are used for + ** verifying the operation of the SQLite core. + */ + int inTransaction; /* True after xBegin but before xCommit/xRollback */ + int mxSavepoint; /* Largest valid xSavepoint integer */ +#endif + +#ifdef SQLITE_TEST + /* True to disable the incremental doclist optimization. This is controled + ** by special insert command 'test-no-incr-doclist'. */ + int bNoIncrDoclist; +#endif +}; + +/* +** When the core wants to read from the virtual table, it creates a +** virtual table cursor (an instance of the following structure) using +** the xOpen method. Cursors are destroyed using the xClose method. +*/ +struct Fts3Cursor { + sqlite3_vtab_cursor base; /* Base class used by SQLite core */ + i16 eSearch; /* Search strategy (see below) */ + u8 isEof; /* True if at End Of Results */ + u8 isRequireSeek; /* True if must seek pStmt to %_content row */ + u8 bSeekStmt; /* True if pStmt is a seek */ + sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ + Fts3Expr *pExpr; /* Parsed MATCH query string */ + int iLangid; /* Language being queried for */ + int nPhrase; /* Number of matchable phrases in query */ + Fts3DeferredToken *pDeferred; /* Deferred search tokens, if any */ + sqlite3_int64 iPrevId; /* Previous id read from aDoclist */ + char *pNextId; /* Pointer into the body of aDoclist */ + char *aDoclist; /* List of docids for full-text queries */ + int nDoclist; /* Size of buffer at aDoclist */ + u8 bDesc; /* True to sort in descending order */ + int eEvalmode; /* An FTS3_EVAL_XX constant */ + int nRowAvg; /* Average size of database rows, in pages */ + sqlite3_int64 nDoc; /* Documents in table */ + i64 iMinDocid; /* Minimum docid to return */ + i64 iMaxDocid; /* Maximum docid to return */ + int isMatchinfoNeeded; /* True when aMatchinfo[] needs filling in */ + MatchinfoBuffer *pMIBuffer; /* Buffer for matchinfo data */ +}; + +#define FTS3_EVAL_FILTER 0 +#define FTS3_EVAL_NEXT 1 +#define FTS3_EVAL_MATCHINFO 2 + +/* +** The Fts3Cursor.eSearch member is always set to one of the following. +** Actualy, Fts3Cursor.eSearch can be greater than or equal to +** FTS3_FULLTEXT_SEARCH. If so, then Fts3Cursor.eSearch - 2 is the index +** of the column to be searched. For example, in +** +** CREATE VIRTUAL TABLE ex1 USING fts3(a,b,c,d); +** SELECT docid FROM ex1 WHERE b MATCH 'one two three'; +** +** Because the LHS of the MATCH operator is 2nd column "b", +** Fts3Cursor.eSearch will be set to FTS3_FULLTEXT_SEARCH+1. (+0 for a, +** +1 for b, +2 for c, +3 for d.) If the LHS of MATCH were "ex1" +** indicating that all columns should be searched, +** then eSearch would be set to FTS3_FULLTEXT_SEARCH+4. +*/ +#define FTS3_FULLSCAN_SEARCH 0 /* Linear scan of %_content table */ +#define FTS3_DOCID_SEARCH 1 /* Lookup by rowid on %_content table */ +#define FTS3_FULLTEXT_SEARCH 2 /* Full-text index search */ + +/* +** The lower 16-bits of the sqlite3_index_info.idxNum value set by +** the xBestIndex() method contains the Fts3Cursor.eSearch value described +** above. The upper 16-bits contain a combination of the following +** bits, used to describe extra constraints on full-text searches. +*/ +#define FTS3_HAVE_LANGID 0x00010000 /* languageid=? */ +#define FTS3_HAVE_DOCID_GE 0x00020000 /* docid>=? */ +#define FTS3_HAVE_DOCID_LE 0x00040000 /* docid<=? */ + +struct Fts3Doclist { + char *aAll; /* Array containing doclist (or NULL) */ + int nAll; /* Size of a[] in bytes */ + char *pNextDocid; /* Pointer to next docid */ + + sqlite3_int64 iDocid; /* Current docid (if pList!=0) */ + int bFreeList; /* True if pList should be sqlite3_free()d */ + char *pList; /* Pointer to position list following iDocid */ + int nList; /* Length of position list */ +}; + +/* +** A "phrase" is a sequence of one or more tokens that must match in +** sequence. A single token is the base case and the most common case. +** For a sequence of tokens contained in double-quotes (i.e. "one two three") +** nToken will be the number of tokens in the string. +*/ +struct Fts3PhraseToken { + char *z; /* Text of the token */ + int n; /* Number of bytes in buffer z */ + int isPrefix; /* True if token ends with a "*" character */ + int bFirst; /* True if token must appear at position 0 */ + + /* Variables above this point are populated when the expression is + ** parsed (by code in fts3_expr.c). Below this point the variables are + ** used when evaluating the expression. */ + Fts3DeferredToken *pDeferred; /* Deferred token object for this token */ + Fts3MultiSegReader *pSegcsr; /* Segment-reader for this token */ +}; + +struct Fts3Phrase { + /* Cache of doclist for this phrase. */ + Fts3Doclist doclist; + int bIncr; /* True if doclist is loaded incrementally */ + int iDoclistToken; + + /* Used by sqlite3Fts3EvalPhrasePoslist() if this is a descendent of an + ** OR condition. */ + char *pOrPoslist; + i64 iOrDocid; + + /* Variables below this point are populated by fts3_expr.c when parsing + ** a MATCH expression. Everything above is part of the evaluation phase. + */ + int nToken; /* Number of tokens in the phrase */ + int iColumn; /* Index of column this phrase must match */ + Fts3PhraseToken aToken[1]; /* One entry for each token in the phrase */ +}; + +/* +** A tree of these objects forms the RHS of a MATCH operator. +** +** If Fts3Expr.eType is FTSQUERY_PHRASE and isLoaded is true, then aDoclist +** points to a malloced buffer, size nDoclist bytes, containing the results +** of this phrase query in FTS3 doclist format. As usual, the initial +** "Length" field found in doclists stored on disk is omitted from this +** buffer. +** +** Variable aMI is used only for FTSQUERY_NEAR nodes to store the global +** matchinfo data. If it is not NULL, it points to an array of size nCol*3, +** where nCol is the number of columns in the queried FTS table. The array +** is populated as follows: +** +** aMI[iCol*3 + 0] = Undefined +** aMI[iCol*3 + 1] = Number of occurrences +** aMI[iCol*3 + 2] = Number of rows containing at least one instance +** +** The aMI array is allocated using sqlite3_malloc(). It should be freed +** when the expression node is. +*/ +struct Fts3Expr { + int eType; /* One of the FTSQUERY_XXX values defined below */ + int nNear; /* Valid if eType==FTSQUERY_NEAR */ + Fts3Expr *pParent; /* pParent->pLeft==this or pParent->pRight==this */ + Fts3Expr *pLeft; /* Left operand */ + Fts3Expr *pRight; /* Right operand */ + Fts3Phrase *pPhrase; /* Valid if eType==FTSQUERY_PHRASE */ + + /* The following are used by the fts3_eval.c module. */ + sqlite3_int64 iDocid; /* Current docid */ + u8 bEof; /* True this expression is at EOF already */ + u8 bStart; /* True if iDocid is valid */ + u8 bDeferred; /* True if this expression is entirely deferred */ + + /* The following are used by the fts3_snippet.c module. */ + int iPhrase; /* Index of this phrase in matchinfo() results */ + u32 *aMI; /* See above */ +}; + +/* +** Candidate values for Fts3Query.eType. Note that the order of the first +** four values is in order of precedence when parsing expressions. For +** example, the following: +** +** "a OR b AND c NOT d NEAR e" +** +** is equivalent to: +** +** "a OR (b AND (c NOT (d NEAR e)))" +*/ +#define FTSQUERY_NEAR 1 +#define FTSQUERY_NOT 2 +#define FTSQUERY_AND 3 +#define FTSQUERY_OR 4 +#define FTSQUERY_PHRASE 5 + + +/* fts3_write.c */ +SQLITE_PRIVATE int sqlite3Fts3UpdateMethod(sqlite3_vtab*,int,sqlite3_value**,sqlite3_int64*); +SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *); +SQLITE_PRIVATE void sqlite3Fts3PendingTermsClear(Fts3Table *); +SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *); +SQLITE_PRIVATE int sqlite3Fts3SegReaderNew(int, int, sqlite3_int64, + sqlite3_int64, sqlite3_int64, const char *, int, Fts3SegReader**); +SQLITE_PRIVATE int sqlite3Fts3SegReaderPending( + Fts3Table*,int,const char*,int,int,Fts3SegReader**); +SQLITE_PRIVATE void sqlite3Fts3SegReaderFree(Fts3SegReader *); +SQLITE_PRIVATE int sqlite3Fts3AllSegdirs(Fts3Table*, int, int, int, sqlite3_stmt **); +SQLITE_PRIVATE int sqlite3Fts3ReadBlock(Fts3Table*, sqlite3_int64, char **, int*, int*); + +SQLITE_PRIVATE int sqlite3Fts3SelectDoctotal(Fts3Table *, sqlite3_stmt **); +SQLITE_PRIVATE int sqlite3Fts3SelectDocsize(Fts3Table *, sqlite3_int64, sqlite3_stmt **); + +#ifndef SQLITE_DISABLE_FTS4_DEFERRED +SQLITE_PRIVATE void sqlite3Fts3FreeDeferredTokens(Fts3Cursor *); +SQLITE_PRIVATE int sqlite3Fts3DeferToken(Fts3Cursor *, Fts3PhraseToken *, int); +SQLITE_PRIVATE int sqlite3Fts3CacheDeferredDoclists(Fts3Cursor *); +SQLITE_PRIVATE void sqlite3Fts3FreeDeferredDoclists(Fts3Cursor *); +SQLITE_PRIVATE int sqlite3Fts3DeferredTokenList(Fts3DeferredToken *, char **, int *); +#else +# define sqlite3Fts3FreeDeferredTokens(x) +# define sqlite3Fts3DeferToken(x,y,z) SQLITE_OK +# define sqlite3Fts3CacheDeferredDoclists(x) SQLITE_OK +# define sqlite3Fts3FreeDeferredDoclists(x) +# define sqlite3Fts3DeferredTokenList(x,y,z) SQLITE_OK +#endif + +SQLITE_PRIVATE void sqlite3Fts3SegmentsClose(Fts3Table *); +SQLITE_PRIVATE int sqlite3Fts3MaxLevel(Fts3Table *, int *); + +/* Special values interpreted by sqlite3SegReaderCursor() */ +#define FTS3_SEGCURSOR_PENDING -1 +#define FTS3_SEGCURSOR_ALL -2 + +SQLITE_PRIVATE int sqlite3Fts3SegReaderStart(Fts3Table*, Fts3MultiSegReader*, Fts3SegFilter*); +SQLITE_PRIVATE int sqlite3Fts3SegReaderStep(Fts3Table *, Fts3MultiSegReader *); +SQLITE_PRIVATE void sqlite3Fts3SegReaderFinish(Fts3MultiSegReader *); + +SQLITE_PRIVATE int sqlite3Fts3SegReaderCursor(Fts3Table *, + int, int, int, const char *, int, int, int, Fts3MultiSegReader *); + +/* Flags allowed as part of the 4th argument to SegmentReaderIterate() */ +#define FTS3_SEGMENT_REQUIRE_POS 0x00000001 +#define FTS3_SEGMENT_IGNORE_EMPTY 0x00000002 +#define FTS3_SEGMENT_COLUMN_FILTER 0x00000004 +#define FTS3_SEGMENT_PREFIX 0x00000008 +#define FTS3_SEGMENT_SCAN 0x00000010 +#define FTS3_SEGMENT_FIRST 0x00000020 + +/* Type passed as 4th argument to SegmentReaderIterate() */ +struct Fts3SegFilter { + const char *zTerm; + int nTerm; + int iCol; + int flags; +}; + +struct Fts3MultiSegReader { + /* Used internally by sqlite3Fts3SegReaderXXX() calls */ + Fts3SegReader **apSegment; /* Array of Fts3SegReader objects */ + int nSegment; /* Size of apSegment array */ + int nAdvance; /* How many seg-readers to advance */ + Fts3SegFilter *pFilter; /* Pointer to filter object */ + char *aBuffer; /* Buffer to merge doclists in */ + int nBuffer; /* Allocated size of aBuffer[] in bytes */ + + int iColFilter; /* If >=0, filter for this column */ + int bRestart; + + /* Used by fts3.c only. */ + int nCost; /* Cost of running iterator */ + int bLookup; /* True if a lookup of a single entry. */ + + /* Output values. Valid only after Fts3SegReaderStep() returns SQLITE_ROW. */ + char *zTerm; /* Pointer to term buffer */ + int nTerm; /* Size of zTerm in bytes */ + char *aDoclist; /* Pointer to doclist buffer */ + int nDoclist; /* Size of aDoclist[] in bytes */ +}; + +SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table*,int,int); + +#define fts3GetVarint32(p, piVal) ( \ + (*(u8*)(p)&0x80) ? sqlite3Fts3GetVarint32(p, piVal) : (*piVal=*(u8*)(p), 1) \ +) + +/* fts3.c */ +SQLITE_PRIVATE void sqlite3Fts3ErrMsg(char**,const char*,...); +SQLITE_PRIVATE int sqlite3Fts3PutVarint(char *, sqlite3_int64); +SQLITE_PRIVATE int sqlite3Fts3GetVarint(const char *, sqlite_int64 *); +SQLITE_PRIVATE int sqlite3Fts3GetVarint32(const char *, int *); +SQLITE_PRIVATE int sqlite3Fts3VarintLen(sqlite3_uint64); +SQLITE_PRIVATE void sqlite3Fts3Dequote(char *); +SQLITE_PRIVATE void sqlite3Fts3DoclistPrev(int,char*,int,char**,sqlite3_int64*,int*,u8*); +SQLITE_PRIVATE int sqlite3Fts3EvalPhraseStats(Fts3Cursor *, Fts3Expr *, u32 *); +SQLITE_PRIVATE int sqlite3Fts3FirstFilter(sqlite3_int64, char *, int, char *); +SQLITE_PRIVATE void sqlite3Fts3CreateStatTable(int*, Fts3Table*); +SQLITE_PRIVATE int sqlite3Fts3EvalTestDeferred(Fts3Cursor *pCsr, int *pRc); + +/* fts3_tokenizer.c */ +SQLITE_PRIVATE const char *sqlite3Fts3NextToken(const char *, int *); +SQLITE_PRIVATE int sqlite3Fts3InitHashTable(sqlite3 *, Fts3Hash *, const char *); +SQLITE_PRIVATE int sqlite3Fts3InitTokenizer(Fts3Hash *pHash, const char *, + sqlite3_tokenizer **, char ** +); +SQLITE_PRIVATE int sqlite3Fts3IsIdChar(char); + +/* fts3_snippet.c */ +SQLITE_PRIVATE void sqlite3Fts3Offsets(sqlite3_context*, Fts3Cursor*); +SQLITE_PRIVATE void sqlite3Fts3Snippet(sqlite3_context *, Fts3Cursor *, const char *, + const char *, const char *, int, int +); +SQLITE_PRIVATE void sqlite3Fts3Matchinfo(sqlite3_context *, Fts3Cursor *, const char *); +SQLITE_PRIVATE void sqlite3Fts3MIBufferFree(MatchinfoBuffer *p); + +/* fts3_expr.c */ +SQLITE_PRIVATE int sqlite3Fts3ExprParse(sqlite3_tokenizer *, int, + char **, int, int, int, const char *, int, Fts3Expr **, char ** +); +SQLITE_PRIVATE void sqlite3Fts3ExprFree(Fts3Expr *); +#ifdef SQLITE_TEST +SQLITE_PRIVATE int sqlite3Fts3ExprInitTestInterface(sqlite3 *db); +SQLITE_PRIVATE int sqlite3Fts3InitTerm(sqlite3 *db); +#endif + +SQLITE_PRIVATE int sqlite3Fts3OpenTokenizer(sqlite3_tokenizer *, int, const char *, int, + sqlite3_tokenizer_cursor ** +); + +/* fts3_aux.c */ +SQLITE_PRIVATE int sqlite3Fts3InitAux(sqlite3 *db); + +SQLITE_PRIVATE void sqlite3Fts3EvalPhraseCleanup(Fts3Phrase *); + +SQLITE_PRIVATE int sqlite3Fts3MsrIncrStart( + Fts3Table*, Fts3MultiSegReader*, int, const char*, int); +SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext( + Fts3Table *, Fts3MultiSegReader *, sqlite3_int64 *, char **, int *); +SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(Fts3Cursor *, Fts3Expr *, int iCol, char **); +SQLITE_PRIVATE int sqlite3Fts3MsrOvfl(Fts3Cursor *, Fts3MultiSegReader *, int *); +SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr); + +/* fts3_tokenize_vtab.c */ +SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *); + +/* fts3_unicode2.c (functions generated by parsing unicode text files) */ +#ifndef SQLITE_DISABLE_FTS3_UNICODE +SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int, int); +SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int); +SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int); +#endif + +#endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */ +#endif /* _FTSINT_H */ + +/************** End of fts3Int.h *********************************************/ +/************** Continuing where we left off in fts3.c ***********************/ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +#if defined(SQLITE_ENABLE_FTS3) && !defined(SQLITE_CORE) +# define SQLITE_CORE 1 +#endif + +/* #include */ +/* #include */ +/* #include */ +/* #include */ +/* #include */ +/* #include */ + +/* #include "fts3.h" */ +#ifndef SQLITE_CORE +/* # include "sqlite3ext.h" */ + SQLITE_EXTENSION_INIT1 +#endif + +static int fts3EvalNext(Fts3Cursor *pCsr); +static int fts3EvalStart(Fts3Cursor *pCsr); +static int fts3TermSegReaderCursor( + Fts3Cursor *, const char *, int, int, Fts3MultiSegReader **); + +#ifndef SQLITE_AMALGAMATION +# if defined(SQLITE_DEBUG) +SQLITE_PRIVATE int sqlite3Fts3Always(int b) { assert( b ); return b; } +SQLITE_PRIVATE int sqlite3Fts3Never(int b) { assert( !b ); return b; } +# endif +#endif + +/* +** Write a 64-bit variable-length integer to memory starting at p[0]. +** The length of data written will be between 1 and FTS3_VARINT_MAX bytes. +** The number of bytes written is returned. +*/ +SQLITE_PRIVATE int sqlite3Fts3PutVarint(char *p, sqlite_int64 v){ + unsigned char *q = (unsigned char *) p; + sqlite_uint64 vu = v; + do{ + *q++ = (unsigned char) ((vu & 0x7f) | 0x80); + vu >>= 7; + }while( vu!=0 ); + q[-1] &= 0x7f; /* turn off high bit in final byte */ + assert( q - (unsigned char *)p <= FTS3_VARINT_MAX ); + return (int) (q - (unsigned char *)p); +} + +#define GETVARINT_STEP(v, ptr, shift, mask1, mask2, var, ret) \ + v = (v & mask1) | ( (*ptr++) << shift ); \ + if( (v & mask2)==0 ){ var = v; return ret; } +#define GETVARINT_INIT(v, ptr, shift, mask1, mask2, var, ret) \ + v = (*ptr++); \ + if( (v & mask2)==0 ){ var = v; return ret; } + +/* +** Read a 64-bit variable-length integer from memory starting at p[0]. +** Return the number of bytes read, or 0 on error. +** The value is stored in *v. +*/ +SQLITE_PRIVATE int sqlite3Fts3GetVarint(const char *p, sqlite_int64 *v){ + const char *pStart = p; + u32 a; + u64 b; + int shift; + + GETVARINT_INIT(a, p, 0, 0x00, 0x80, *v, 1); + GETVARINT_STEP(a, p, 7, 0x7F, 0x4000, *v, 2); + GETVARINT_STEP(a, p, 14, 0x3FFF, 0x200000, *v, 3); + GETVARINT_STEP(a, p, 21, 0x1FFFFF, 0x10000000, *v, 4); + b = (a & 0x0FFFFFFF ); + + for(shift=28; shift<=63; shift+=7){ + u64 c = *p++; + b += (c&0x7F) << shift; + if( (c & 0x80)==0 ) break; + } + *v = b; + return (int)(p - pStart); +} + +/* +** Similar to sqlite3Fts3GetVarint(), except that the output is truncated to a +** 32-bit integer before it is returned. +*/ +SQLITE_PRIVATE int sqlite3Fts3GetVarint32(const char *p, int *pi){ + u32 a; + +#ifndef fts3GetVarint32 + GETVARINT_INIT(a, p, 0, 0x00, 0x80, *pi, 1); +#else + a = (*p++); + assert( a & 0x80 ); +#endif + + GETVARINT_STEP(a, p, 7, 0x7F, 0x4000, *pi, 2); + GETVARINT_STEP(a, p, 14, 0x3FFF, 0x200000, *pi, 3); + GETVARINT_STEP(a, p, 21, 0x1FFFFF, 0x10000000, *pi, 4); + a = (a & 0x0FFFFFFF ); + *pi = (int)(a | ((u32)(*p & 0x0F) << 28)); + return 5; +} + +/* +** Return the number of bytes required to encode v as a varint +*/ +SQLITE_PRIVATE int sqlite3Fts3VarintLen(sqlite3_uint64 v){ + int i = 0; + do{ + i++; + v >>= 7; + }while( v!=0 ); + return i; +} + +/* +** Convert an SQL-style quoted string into a normal string by removing +** the quote characters. The conversion is done in-place. If the +** input does not begin with a quote character, then this routine +** is a no-op. +** +** Examples: +** +** "abc" becomes abc +** 'xyz' becomes xyz +** [pqr] becomes pqr +** `mno` becomes mno +** +*/ +SQLITE_PRIVATE void sqlite3Fts3Dequote(char *z){ + char quote; /* Quote character (if any ) */ + + quote = z[0]; + if( quote=='[' || quote=='\'' || quote=='"' || quote=='`' ){ + int iIn = 1; /* Index of next byte to read from input */ + int iOut = 0; /* Index of next byte to write to output */ + + /* If the first byte was a '[', then the close-quote character is a ']' */ + if( quote=='[' ) quote = ']'; + + while( z[iIn] ){ + if( z[iIn]==quote ){ + if( z[iIn+1]!=quote ) break; + z[iOut++] = quote; + iIn += 2; + }else{ + z[iOut++] = z[iIn++]; + } + } + z[iOut] = '\0'; + } +} + +/* +** Read a single varint from the doclist at *pp and advance *pp to point +** to the first byte past the end of the varint. Add the value of the varint +** to *pVal. +*/ +static void fts3GetDeltaVarint(char **pp, sqlite3_int64 *pVal){ + sqlite3_int64 iVal; + *pp += sqlite3Fts3GetVarint(*pp, &iVal); + *pVal += iVal; +} + +/* +** When this function is called, *pp points to the first byte following a +** varint that is part of a doclist (or position-list, or any other list +** of varints). This function moves *pp to point to the start of that varint, +** and sets *pVal by the varint value. +** +** Argument pStart points to the first byte of the doclist that the +** varint is part of. +*/ +static void fts3GetReverseVarint( + char **pp, + char *pStart, + sqlite3_int64 *pVal +){ + sqlite3_int64 iVal; + char *p; + + /* Pointer p now points at the first byte past the varint we are + ** interested in. So, unless the doclist is corrupt, the 0x80 bit is + ** clear on character p[-1]. */ + for(p = (*pp)-2; p>=pStart && *p&0x80; p--); + p++; + *pp = p; + + sqlite3Fts3GetVarint(p, &iVal); + *pVal = iVal; +} + +/* +** The xDisconnect() virtual table method. +*/ +static int fts3DisconnectMethod(sqlite3_vtab *pVtab){ + Fts3Table *p = (Fts3Table *)pVtab; + int i; + + assert( p->nPendingData==0 ); + assert( p->pSegments==0 ); + + /* Free any prepared statements held */ + sqlite3_finalize(p->pSeekStmt); + for(i=0; iaStmt); i++){ + sqlite3_finalize(p->aStmt[i]); + } + sqlite3_free(p->zSegmentsTbl); + sqlite3_free(p->zReadExprlist); + sqlite3_free(p->zWriteExprlist); + sqlite3_free(p->zContentTbl); + sqlite3_free(p->zLanguageid); + + /* Invoke the tokenizer destructor to free the tokenizer. */ + p->pTokenizer->pModule->xDestroy(p->pTokenizer); + + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Write an error message into *pzErr +*/ +SQLITE_PRIVATE void sqlite3Fts3ErrMsg(char **pzErr, const char *zFormat, ...){ + va_list ap; + sqlite3_free(*pzErr); + va_start(ap, zFormat); + *pzErr = sqlite3_vmprintf(zFormat, ap); + va_end(ap); +} + +/* +** Construct one or more SQL statements from the format string given +** and then evaluate those statements. The success code is written +** into *pRc. +** +** If *pRc is initially non-zero then this routine is a no-op. +*/ +static void fts3DbExec( + int *pRc, /* Success code */ + sqlite3 *db, /* Database in which to run SQL */ + const char *zFormat, /* Format string for SQL */ + ... /* Arguments to the format string */ +){ + va_list ap; + char *zSql; + if( *pRc ) return; + va_start(ap, zFormat); + zSql = sqlite3_vmprintf(zFormat, ap); + va_end(ap); + if( zSql==0 ){ + *pRc = SQLITE_NOMEM; + }else{ + *pRc = sqlite3_exec(db, zSql, 0, 0, 0); + sqlite3_free(zSql); + } +} + +/* +** The xDestroy() virtual table method. +*/ +static int fts3DestroyMethod(sqlite3_vtab *pVtab){ + Fts3Table *p = (Fts3Table *)pVtab; + int rc = SQLITE_OK; /* Return code */ + const char *zDb = p->zDb; /* Name of database (e.g. "main", "temp") */ + sqlite3 *db = p->db; /* Database handle */ + + /* Drop the shadow tables */ + if( p->zContentTbl==0 ){ + fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_content'", zDb, p->zName); + } + fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_segments'", zDb,p->zName); + fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_segdir'", zDb, p->zName); + fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_docsize'", zDb, p->zName); + fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_stat'", zDb, p->zName); + + /* If everything has worked, invoke fts3DisconnectMethod() to free the + ** memory associated with the Fts3Table structure and return SQLITE_OK. + ** Otherwise, return an SQLite error code. + */ + return (rc==SQLITE_OK ? fts3DisconnectMethod(pVtab) : rc); +} + + +/* +** Invoke sqlite3_declare_vtab() to declare the schema for the FTS3 table +** passed as the first argument. This is done as part of the xConnect() +** and xCreate() methods. +** +** If *pRc is non-zero when this function is called, it is a no-op. +** Otherwise, if an error occurs, an SQLite error code is stored in *pRc +** before returning. +*/ +static void fts3DeclareVtab(int *pRc, Fts3Table *p){ + if( *pRc==SQLITE_OK ){ + int i; /* Iterator variable */ + int rc; /* Return code */ + char *zSql; /* SQL statement passed to declare_vtab() */ + char *zCols; /* List of user defined columns */ + const char *zLanguageid; + + zLanguageid = (p->zLanguageid ? p->zLanguageid : "__langid"); + sqlite3_vtab_config(p->db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); + + /* Create a list of user columns for the virtual table */ + zCols = sqlite3_mprintf("%Q, ", p->azColumn[0]); + for(i=1; zCols && inColumn; i++){ + zCols = sqlite3_mprintf("%z%Q, ", zCols, p->azColumn[i]); + } + + /* Create the whole "CREATE TABLE" statement to pass to SQLite */ + zSql = sqlite3_mprintf( + "CREATE TABLE x(%s %Q HIDDEN, docid HIDDEN, %Q HIDDEN)", + zCols, p->zName, zLanguageid + ); + if( !zCols || !zSql ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_declare_vtab(p->db, zSql); + } + + sqlite3_free(zSql); + sqlite3_free(zCols); + *pRc = rc; + } +} + +/* +** Create the %_stat table if it does not already exist. +*/ +SQLITE_PRIVATE void sqlite3Fts3CreateStatTable(int *pRc, Fts3Table *p){ + fts3DbExec(pRc, p->db, + "CREATE TABLE IF NOT EXISTS %Q.'%q_stat'" + "(id INTEGER PRIMARY KEY, value BLOB);", + p->zDb, p->zName + ); + if( (*pRc)==SQLITE_OK ) p->bHasStat = 1; +} + +/* +** Create the backing store tables (%_content, %_segments and %_segdir) +** required by the FTS3 table passed as the only argument. This is done +** as part of the vtab xCreate() method. +** +** If the p->bHasDocsize boolean is true (indicating that this is an +** FTS4 table, not an FTS3 table) then also create the %_docsize and +** %_stat tables required by FTS4. +*/ +static int fts3CreateTables(Fts3Table *p){ + int rc = SQLITE_OK; /* Return code */ + int i; /* Iterator variable */ + sqlite3 *db = p->db; /* The database connection */ + + if( p->zContentTbl==0 ){ + const char *zLanguageid = p->zLanguageid; + char *zContentCols; /* Columns of %_content table */ + + /* Create a list of user columns for the content table */ + zContentCols = sqlite3_mprintf("docid INTEGER PRIMARY KEY"); + for(i=0; zContentCols && inColumn; i++){ + char *z = p->azColumn[i]; + zContentCols = sqlite3_mprintf("%z, 'c%d%q'", zContentCols, i, z); + } + if( zLanguageid && zContentCols ){ + zContentCols = sqlite3_mprintf("%z, langid", zContentCols, zLanguageid); + } + if( zContentCols==0 ) rc = SQLITE_NOMEM; + + /* Create the content table */ + fts3DbExec(&rc, db, + "CREATE TABLE %Q.'%q_content'(%s)", + p->zDb, p->zName, zContentCols + ); + sqlite3_free(zContentCols); + } + + /* Create other tables */ + fts3DbExec(&rc, db, + "CREATE TABLE %Q.'%q_segments'(blockid INTEGER PRIMARY KEY, block BLOB);", + p->zDb, p->zName + ); + fts3DbExec(&rc, db, + "CREATE TABLE %Q.'%q_segdir'(" + "level INTEGER," + "idx INTEGER," + "start_block INTEGER," + "leaves_end_block INTEGER," + "end_block INTEGER," + "root BLOB," + "PRIMARY KEY(level, idx)" + ");", + p->zDb, p->zName + ); + if( p->bHasDocsize ){ + fts3DbExec(&rc, db, + "CREATE TABLE %Q.'%q_docsize'(docid INTEGER PRIMARY KEY, size BLOB);", + p->zDb, p->zName + ); + } + assert( p->bHasStat==p->bFts4 ); + if( p->bHasStat ){ + sqlite3Fts3CreateStatTable(&rc, p); + } + return rc; +} + +/* +** Store the current database page-size in bytes in p->nPgsz. +** +** If *pRc is non-zero when this function is called, it is a no-op. +** Otherwise, if an error occurs, an SQLite error code is stored in *pRc +** before returning. +*/ +static void fts3DatabasePageSize(int *pRc, Fts3Table *p){ + if( *pRc==SQLITE_OK ){ + int rc; /* Return code */ + char *zSql; /* SQL text "PRAGMA %Q.page_size" */ + sqlite3_stmt *pStmt; /* Compiled "PRAGMA %Q.page_size" statement */ + + zSql = sqlite3_mprintf("PRAGMA %Q.page_size", p->zDb); + if( !zSql ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_prepare(p->db, zSql, -1, &pStmt, 0); + if( rc==SQLITE_OK ){ + sqlite3_step(pStmt); + p->nPgsz = sqlite3_column_int(pStmt, 0); + rc = sqlite3_finalize(pStmt); + }else if( rc==SQLITE_AUTH ){ + p->nPgsz = 1024; + rc = SQLITE_OK; + } + } + assert( p->nPgsz>0 || rc!=SQLITE_OK ); + sqlite3_free(zSql); + *pRc = rc; + } +} + +/* +** "Special" FTS4 arguments are column specifications of the following form: +** +** = +** +** There may not be whitespace surrounding the "=" character. The +** term may be quoted, but the may not. +*/ +static int fts3IsSpecialColumn( + const char *z, + int *pnKey, + char **pzValue +){ + char *zValue; + const char *zCsr = z; + + while( *zCsr!='=' ){ + if( *zCsr=='\0' ) return 0; + zCsr++; + } + + *pnKey = (int)(zCsr-z); + zValue = sqlite3_mprintf("%s", &zCsr[1]); + if( zValue ){ + sqlite3Fts3Dequote(zValue); + } + *pzValue = zValue; + return 1; +} + +/* +** Append the output of a printf() style formatting to an existing string. +*/ +static void fts3Appendf( + int *pRc, /* IN/OUT: Error code */ + char **pz, /* IN/OUT: Pointer to string buffer */ + const char *zFormat, /* Printf format string to append */ + ... /* Arguments for printf format string */ +){ + if( *pRc==SQLITE_OK ){ + va_list ap; + char *z; + va_start(ap, zFormat); + z = sqlite3_vmprintf(zFormat, ap); + va_end(ap); + if( z && *pz ){ + char *z2 = sqlite3_mprintf("%s%s", *pz, z); + sqlite3_free(z); + z = z2; + } + if( z==0 ) *pRc = SQLITE_NOMEM; + sqlite3_free(*pz); + *pz = z; + } +} + +/* +** Return a copy of input string zInput enclosed in double-quotes (") and +** with all double quote characters escaped. For example: +** +** fts3QuoteId("un \"zip\"") -> "un \"\"zip\"\"" +** +** The pointer returned points to memory obtained from sqlite3_malloc(). It +** is the callers responsibility to call sqlite3_free() to release this +** memory. +*/ +static char *fts3QuoteId(char const *zInput){ + int nRet; + char *zRet; + nRet = 2 + (int)strlen(zInput)*2 + 1; + zRet = sqlite3_malloc(nRet); + if( zRet ){ + int i; + char *z = zRet; + *(z++) = '"'; + for(i=0; zInput[i]; i++){ + if( zInput[i]=='"' ) *(z++) = '"'; + *(z++) = zInput[i]; + } + *(z++) = '"'; + *(z++) = '\0'; + } + return zRet; +} + +/* +** Return a list of comma separated SQL expressions and a FROM clause that +** could be used in a SELECT statement such as the following: +** +** SELECT FROM %_content AS x ... +** +** to return the docid, followed by each column of text data in order +** from left to write. If parameter zFunc is not NULL, then instead of +** being returned directly each column of text data is passed to an SQL +** function named zFunc first. For example, if zFunc is "unzip" and the +** table has the three user-defined columns "a", "b", and "c", the following +** string is returned: +** +** "docid, unzip(x.'a'), unzip(x.'b'), unzip(x.'c') FROM %_content AS x" +** +** The pointer returned points to a buffer allocated by sqlite3_malloc(). It +** is the responsibility of the caller to eventually free it. +** +** If *pRc is not SQLITE_OK when this function is called, it is a no-op (and +** a NULL pointer is returned). Otherwise, if an OOM error is encountered +** by this function, NULL is returned and *pRc is set to SQLITE_NOMEM. If +** no error occurs, *pRc is left unmodified. +*/ +static char *fts3ReadExprList(Fts3Table *p, const char *zFunc, int *pRc){ + char *zRet = 0; + char *zFree = 0; + char *zFunction; + int i; + + if( p->zContentTbl==0 ){ + if( !zFunc ){ + zFunction = ""; + }else{ + zFree = zFunction = fts3QuoteId(zFunc); + } + fts3Appendf(pRc, &zRet, "docid"); + for(i=0; inColumn; i++){ + fts3Appendf(pRc, &zRet, ",%s(x.'c%d%q')", zFunction, i, p->azColumn[i]); + } + if( p->zLanguageid ){ + fts3Appendf(pRc, &zRet, ", x.%Q", "langid"); + } + sqlite3_free(zFree); + }else{ + fts3Appendf(pRc, &zRet, "rowid"); + for(i=0; inColumn; i++){ + fts3Appendf(pRc, &zRet, ", x.'%q'", p->azColumn[i]); + } + if( p->zLanguageid ){ + fts3Appendf(pRc, &zRet, ", x.%Q", p->zLanguageid); + } + } + fts3Appendf(pRc, &zRet, " FROM '%q'.'%q%s' AS x", + p->zDb, + (p->zContentTbl ? p->zContentTbl : p->zName), + (p->zContentTbl ? "" : "_content") + ); + return zRet; +} + +/* +** Return a list of N comma separated question marks, where N is the number +** of columns in the %_content table (one for the docid plus one for each +** user-defined text column). +** +** If argument zFunc is not NULL, then all but the first question mark +** is preceded by zFunc and an open bracket, and followed by a closed +** bracket. For example, if zFunc is "zip" and the FTS3 table has three +** user-defined text columns, the following string is returned: +** +** "?, zip(?), zip(?), zip(?)" +** +** The pointer returned points to a buffer allocated by sqlite3_malloc(). It +** is the responsibility of the caller to eventually free it. +** +** If *pRc is not SQLITE_OK when this function is called, it is a no-op (and +** a NULL pointer is returned). Otherwise, if an OOM error is encountered +** by this function, NULL is returned and *pRc is set to SQLITE_NOMEM. If +** no error occurs, *pRc is left unmodified. +*/ +static char *fts3WriteExprList(Fts3Table *p, const char *zFunc, int *pRc){ + char *zRet = 0; + char *zFree = 0; + char *zFunction; + int i; + + if( !zFunc ){ + zFunction = ""; + }else{ + zFree = zFunction = fts3QuoteId(zFunc); + } + fts3Appendf(pRc, &zRet, "?"); + for(i=0; inColumn; i++){ + fts3Appendf(pRc, &zRet, ",%s(?)", zFunction); + } + if( p->zLanguageid ){ + fts3Appendf(pRc, &zRet, ", ?"); + } + sqlite3_free(zFree); + return zRet; +} + +/* +** This function interprets the string at (*pp) as a non-negative integer +** value. It reads the integer and sets *pnOut to the value read, then +** sets *pp to point to the byte immediately following the last byte of +** the integer value. +** +** Only decimal digits ('0'..'9') may be part of an integer value. +** +** If *pp does not being with a decimal digit SQLITE_ERROR is returned and +** the output value undefined. Otherwise SQLITE_OK is returned. +** +** This function is used when parsing the "prefix=" FTS4 parameter. +*/ +static int fts3GobbleInt(const char **pp, int *pnOut){ + const int MAX_NPREFIX = 10000000; + const char *p; /* Iterator pointer */ + int nInt = 0; /* Output value */ + + for(p=*pp; p[0]>='0' && p[0]<='9'; p++){ + nInt = nInt * 10 + (p[0] - '0'); + if( nInt>MAX_NPREFIX ){ + nInt = 0; + break; + } + } + if( p==*pp ) return SQLITE_ERROR; + *pnOut = nInt; + *pp = p; + return SQLITE_OK; +} + +/* +** This function is called to allocate an array of Fts3Index structures +** representing the indexes maintained by the current FTS table. FTS tables +** always maintain the main "terms" index, but may also maintain one or +** more "prefix" indexes, depending on the value of the "prefix=" parameter +** (if any) specified as part of the CREATE VIRTUAL TABLE statement. +** +** Argument zParam is passed the value of the "prefix=" option if one was +** specified, or NULL otherwise. +** +** If no error occurs, SQLITE_OK is returned and *apIndex set to point to +** the allocated array. *pnIndex is set to the number of elements in the +** array. If an error does occur, an SQLite error code is returned. +** +** Regardless of whether or not an error is returned, it is the responsibility +** of the caller to call sqlite3_free() on the output array to free it. +*/ +static int fts3PrefixParameter( + const char *zParam, /* ABC in prefix=ABC parameter to parse */ + int *pnIndex, /* OUT: size of *apIndex[] array */ + struct Fts3Index **apIndex /* OUT: Array of indexes for this table */ +){ + struct Fts3Index *aIndex; /* Allocated array */ + int nIndex = 1; /* Number of entries in array */ + + if( zParam && zParam[0] ){ + const char *p; + nIndex++; + for(p=zParam; *p; p++){ + if( *p==',' ) nIndex++; + } + } + + aIndex = sqlite3_malloc(sizeof(struct Fts3Index) * nIndex); + *apIndex = aIndex; + if( !aIndex ){ + return SQLITE_NOMEM; + } + + memset(aIndex, 0, sizeof(struct Fts3Index) * nIndex); + if( zParam ){ + const char *p = zParam; + int i; + for(i=1; i=0 ); + if( nPrefix==0 ){ + nIndex--; + i--; + }else{ + aIndex[i].nPrefix = nPrefix; + } + p++; + } + } + + *pnIndex = nIndex; + return SQLITE_OK; +} + +/* +** This function is called when initializing an FTS4 table that uses the +** content=xxx option. It determines the number of and names of the columns +** of the new FTS4 table. +** +** The third argument passed to this function is the value passed to the +** config=xxx option (i.e. "xxx"). This function queries the database for +** a table of that name. If found, the output variables are populated +** as follows: +** +** *pnCol: Set to the number of columns table xxx has, +** +** *pnStr: Set to the total amount of space required to store a copy +** of each columns name, including the nul-terminator. +** +** *pazCol: Set to point to an array of *pnCol strings. Each string is +** the name of the corresponding column in table xxx. The array +** and its contents are allocated using a single allocation. It +** is the responsibility of the caller to free this allocation +** by eventually passing the *pazCol value to sqlite3_free(). +** +** If the table cannot be found, an error code is returned and the output +** variables are undefined. Or, if an OOM is encountered, SQLITE_NOMEM is +** returned (and the output variables are undefined). +*/ +static int fts3ContentColumns( + sqlite3 *db, /* Database handle */ + const char *zDb, /* Name of db (i.e. "main", "temp" etc.) */ + const char *zTbl, /* Name of content table */ + const char ***pazCol, /* OUT: Malloc'd array of column names */ + int *pnCol, /* OUT: Size of array *pazCol */ + int *pnStr, /* OUT: Bytes of string content */ + char **pzErr /* OUT: error message */ +){ + int rc = SQLITE_OK; /* Return code */ + char *zSql; /* "SELECT *" statement on zTbl */ + sqlite3_stmt *pStmt = 0; /* Compiled version of zSql */ + + zSql = sqlite3_mprintf("SELECT * FROM %Q.%Q", zDb, zTbl); + if( !zSql ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + sqlite3Fts3ErrMsg(pzErr, "%s", sqlite3_errmsg(db)); + } + } + sqlite3_free(zSql); + + if( rc==SQLITE_OK ){ + const char **azCol; /* Output array */ + int nStr = 0; /* Size of all column names (incl. 0x00) */ + int nCol; /* Number of table columns */ + int i; /* Used to iterate through columns */ + + /* Loop through the returned columns. Set nStr to the number of bytes of + ** space required to store a copy of each column name, including the + ** nul-terminator byte. */ + nCol = sqlite3_column_count(pStmt); + for(i=0; i module name ("fts3" or "fts4") +** argv[1] -> database name +** argv[2] -> table name +** argv[...] -> "column name" and other module argument fields. +*/ +static int fts3InitVtab( + int isCreate, /* True for xCreate, false for xConnect */ + sqlite3 *db, /* The SQLite database connection */ + void *pAux, /* Hash table containing tokenizers */ + int argc, /* Number of elements in argv array */ + const char * const *argv, /* xCreate/xConnect argument array */ + sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ + char **pzErr /* Write any error message here */ +){ + Fts3Hash *pHash = (Fts3Hash *)pAux; + Fts3Table *p = 0; /* Pointer to allocated vtab */ + int rc = SQLITE_OK; /* Return code */ + int i; /* Iterator variable */ + int nByte; /* Size of allocation used for *p */ + int iCol; /* Column index */ + int nString = 0; /* Bytes required to hold all column names */ + int nCol = 0; /* Number of columns in the FTS table */ + char *zCsr; /* Space for holding column names */ + int nDb; /* Bytes required to hold database name */ + int nName; /* Bytes required to hold table name */ + int isFts4 = (argv[0][3]=='4'); /* True for FTS4, false for FTS3 */ + const char **aCol; /* Array of column names */ + sqlite3_tokenizer *pTokenizer = 0; /* Tokenizer for this table */ + + int nIndex = 0; /* Size of aIndex[] array */ + struct Fts3Index *aIndex = 0; /* Array of indexes for this table */ + + /* The results of parsing supported FTS4 key=value options: */ + int bNoDocsize = 0; /* True to omit %_docsize table */ + int bDescIdx = 0; /* True to store descending indexes */ + char *zPrefix = 0; /* Prefix parameter value (or NULL) */ + char *zCompress = 0; /* compress=? parameter (or NULL) */ + char *zUncompress = 0; /* uncompress=? parameter (or NULL) */ + char *zContent = 0; /* content=? parameter (or NULL) */ + char *zLanguageid = 0; /* languageid=? parameter (or NULL) */ + char **azNotindexed = 0; /* The set of notindexed= columns */ + int nNotindexed = 0; /* Size of azNotindexed[] array */ + + assert( strlen(argv[0])==4 ); + assert( (sqlite3_strnicmp(argv[0], "fts4", 4)==0 && isFts4) + || (sqlite3_strnicmp(argv[0], "fts3", 4)==0 && !isFts4) + ); + + nDb = (int)strlen(argv[1]) + 1; + nName = (int)strlen(argv[2]) + 1; + + nByte = sizeof(const char *) * (argc-2); + aCol = (const char **)sqlite3_malloc(nByte); + if( aCol ){ + memset((void*)aCol, 0, nByte); + azNotindexed = (char **)sqlite3_malloc(nByte); + } + if( azNotindexed ){ + memset(azNotindexed, 0, nByte); + } + if( !aCol || !azNotindexed ){ + rc = SQLITE_NOMEM; + goto fts3_init_out; + } + + /* Loop through all of the arguments passed by the user to the FTS3/4 + ** module (i.e. all the column names and special arguments). This loop + ** does the following: + ** + ** + Figures out the number of columns the FTSX table will have, and + ** the number of bytes of space that must be allocated to store copies + ** of the column names. + ** + ** + If there is a tokenizer specification included in the arguments, + ** initializes the tokenizer pTokenizer. + */ + for(i=3; rc==SQLITE_OK && i8 + && 0==sqlite3_strnicmp(z, "tokenize", 8) + && 0==sqlite3Fts3IsIdChar(z[8]) + ){ + rc = sqlite3Fts3InitTokenizer(pHash, &z[9], &pTokenizer, pzErr); + } + + /* Check if it is an FTS4 special argument. */ + else if( isFts4 && fts3IsSpecialColumn(z, &nKey, &zVal) ){ + struct Fts4Option { + const char *zOpt; + int nOpt; + } aFts4Opt[] = { + { "matchinfo", 9 }, /* 0 -> MATCHINFO */ + { "prefix", 6 }, /* 1 -> PREFIX */ + { "compress", 8 }, /* 2 -> COMPRESS */ + { "uncompress", 10 }, /* 3 -> UNCOMPRESS */ + { "order", 5 }, /* 4 -> ORDER */ + { "content", 7 }, /* 5 -> CONTENT */ + { "languageid", 10 }, /* 6 -> LANGUAGEID */ + { "notindexed", 10 } /* 7 -> NOTINDEXED */ + }; + + int iOpt; + if( !zVal ){ + rc = SQLITE_NOMEM; + }else{ + for(iOpt=0; iOptnOpt && !sqlite3_strnicmp(z, pOp->zOpt, pOp->nOpt) ){ + break; + } + } + if( iOpt==SizeofArray(aFts4Opt) ){ + sqlite3Fts3ErrMsg(pzErr, "unrecognized parameter: %s", z); + rc = SQLITE_ERROR; + }else{ + switch( iOpt ){ + case 0: /* MATCHINFO */ + if( strlen(zVal)!=4 || sqlite3_strnicmp(zVal, "fts3", 4) ){ + sqlite3Fts3ErrMsg(pzErr, "unrecognized matchinfo: %s", zVal); + rc = SQLITE_ERROR; + } + bNoDocsize = 1; + break; + + case 1: /* PREFIX */ + sqlite3_free(zPrefix); + zPrefix = zVal; + zVal = 0; + break; + + case 2: /* COMPRESS */ + sqlite3_free(zCompress); + zCompress = zVal; + zVal = 0; + break; + + case 3: /* UNCOMPRESS */ + sqlite3_free(zUncompress); + zUncompress = zVal; + zVal = 0; + break; + + case 4: /* ORDER */ + if( (strlen(zVal)!=3 || sqlite3_strnicmp(zVal, "asc", 3)) + && (strlen(zVal)!=4 || sqlite3_strnicmp(zVal, "desc", 4)) + ){ + sqlite3Fts3ErrMsg(pzErr, "unrecognized order: %s", zVal); + rc = SQLITE_ERROR; + } + bDescIdx = (zVal[0]=='d' || zVal[0]=='D'); + break; + + case 5: /* CONTENT */ + sqlite3_free(zContent); + zContent = zVal; + zVal = 0; + break; + + case 6: /* LANGUAGEID */ + assert( iOpt==6 ); + sqlite3_free(zLanguageid); + zLanguageid = zVal; + zVal = 0; + break; + + case 7: /* NOTINDEXED */ + azNotindexed[nNotindexed++] = zVal; + zVal = 0; + break; + } + } + sqlite3_free(zVal); + } + } + + /* Otherwise, the argument is a column name. */ + else { + nString += (int)(strlen(z) + 1); + aCol[nCol++] = z; + } + } + + /* If a content=xxx option was specified, the following: + ** + ** 1. Ignore any compress= and uncompress= options. + ** + ** 2. If no column names were specified as part of the CREATE VIRTUAL + ** TABLE statement, use all columns from the content table. + */ + if( rc==SQLITE_OK && zContent ){ + sqlite3_free(zCompress); + sqlite3_free(zUncompress); + zCompress = 0; + zUncompress = 0; + if( nCol==0 ){ + sqlite3_free((void*)aCol); + aCol = 0; + rc = fts3ContentColumns(db, argv[1], zContent,&aCol,&nCol,&nString,pzErr); + + /* If a languageid= option was specified, remove the language id + ** column from the aCol[] array. */ + if( rc==SQLITE_OK && zLanguageid ){ + int j; + for(j=0; jdb = db; + p->nColumn = nCol; + p->nPendingData = 0; + p->azColumn = (char **)&p[1]; + p->pTokenizer = pTokenizer; + p->nMaxPendingData = FTS3_MAX_PENDING_DATA; + p->bHasDocsize = (isFts4 && bNoDocsize==0); + p->bHasStat = (u8)isFts4; + p->bFts4 = (u8)isFts4; + p->bDescIdx = (u8)bDescIdx; + p->nAutoincrmerge = 0xff; /* 0xff means setting unknown */ + p->zContentTbl = zContent; + p->zLanguageid = zLanguageid; + zContent = 0; + zLanguageid = 0; + TESTONLY( p->inTransaction = -1 ); + TESTONLY( p->mxSavepoint = -1 ); + + p->aIndex = (struct Fts3Index *)&p->azColumn[nCol]; + memcpy(p->aIndex, aIndex, sizeof(struct Fts3Index) * nIndex); + p->nIndex = nIndex; + for(i=0; iaIndex[i].hPending, FTS3_HASH_STRING, 1); + } + p->abNotindexed = (u8 *)&p->aIndex[nIndex]; + + /* Fill in the zName and zDb fields of the vtab structure. */ + zCsr = (char *)&p->abNotindexed[nCol]; + p->zName = zCsr; + memcpy(zCsr, argv[2], nName); + zCsr += nName; + p->zDb = zCsr; + memcpy(zCsr, argv[1], nDb); + zCsr += nDb; + + /* Fill in the azColumn array */ + for(iCol=0; iColazColumn[iCol] = zCsr; + zCsr += n+1; + assert( zCsr <= &((char *)p)[nByte] ); + } + + /* Fill in the abNotindexed array */ + for(iCol=0; iColazColumn[iCol]); + for(i=0; iazColumn[iCol], zNot, n) + ){ + p->abNotindexed[iCol] = 1; + sqlite3_free(zNot); + azNotindexed[i] = 0; + } + } + } + for(i=0; izReadExprlist = fts3ReadExprList(p, zUncompress, &rc); + p->zWriteExprlist = fts3WriteExprList(p, zCompress, &rc); + if( rc!=SQLITE_OK ) goto fts3_init_out; + + /* If this is an xCreate call, create the underlying tables in the + ** database. TODO: For xConnect(), it could verify that said tables exist. + */ + if( isCreate ){ + rc = fts3CreateTables(p); + } + + /* Check to see if a legacy fts3 table has been "upgraded" by the + ** addition of a %_stat table so that it can use incremental merge. + */ + if( !isFts4 && !isCreate ){ + p->bHasStat = 2; + } + + /* Figure out the page-size for the database. This is required in order to + ** estimate the cost of loading large doclists from the database. */ + fts3DatabasePageSize(&rc, p); + p->nNodeSize = p->nPgsz-35; + + /* Declare the table schema to SQLite. */ + fts3DeclareVtab(&rc, p); + +fts3_init_out: + sqlite3_free(zPrefix); + sqlite3_free(aIndex); + sqlite3_free(zCompress); + sqlite3_free(zUncompress); + sqlite3_free(zContent); + sqlite3_free(zLanguageid); + for(i=0; ipModule->xDestroy(pTokenizer); + } + }else{ + assert( p->pSegments==0 ); + *ppVTab = &p->base; + } + return rc; +} + +/* +** The xConnect() and xCreate() methods for the virtual table. All the +** work is done in function fts3InitVtab(). +*/ +static int fts3ConnectMethod( + sqlite3 *db, /* Database connection */ + void *pAux, /* Pointer to tokenizer hash table */ + int argc, /* Number of elements in argv array */ + const char * const *argv, /* xCreate/xConnect argument array */ + sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */ + char **pzErr /* OUT: sqlite3_malloc'd error message */ +){ + return fts3InitVtab(0, db, pAux, argc, argv, ppVtab, pzErr); +} +static int fts3CreateMethod( + sqlite3 *db, /* Database connection */ + void *pAux, /* Pointer to tokenizer hash table */ + int argc, /* Number of elements in argv array */ + const char * const *argv, /* xCreate/xConnect argument array */ + sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */ + char **pzErr /* OUT: sqlite3_malloc'd error message */ +){ + return fts3InitVtab(1, db, pAux, argc, argv, ppVtab, pzErr); +} + +/* +** Set the pIdxInfo->estimatedRows variable to nRow. Unless this +** extension is currently being used by a version of SQLite too old to +** support estimatedRows. In that case this function is a no-op. +*/ +static void fts3SetEstimatedRows(sqlite3_index_info *pIdxInfo, i64 nRow){ +#if SQLITE_VERSION_NUMBER>=3008002 + if( sqlite3_libversion_number()>=3008002 ){ + pIdxInfo->estimatedRows = nRow; + } +#endif +} + +/* +** Set the SQLITE_INDEX_SCAN_UNIQUE flag in pIdxInfo->flags. Unless this +** extension is currently being used by a version of SQLite too old to +** support index-info flags. In that case this function is a no-op. +*/ +static void fts3SetUniqueFlag(sqlite3_index_info *pIdxInfo){ +#if SQLITE_VERSION_NUMBER>=3008012 + if( sqlite3_libversion_number()>=3008012 ){ + pIdxInfo->idxFlags |= SQLITE_INDEX_SCAN_UNIQUE; + } +#endif +} + +/* +** Implementation of the xBestIndex method for FTS3 tables. There +** are three possible strategies, in order of preference: +** +** 1. Direct lookup by rowid or docid. +** 2. Full-text search using a MATCH operator on a non-docid column. +** 3. Linear scan of %_content table. +*/ +static int fts3BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ + Fts3Table *p = (Fts3Table *)pVTab; + int i; /* Iterator variable */ + int iCons = -1; /* Index of constraint to use */ + + int iLangidCons = -1; /* Index of langid=x constraint, if present */ + int iDocidGe = -1; /* Index of docid>=x constraint, if present */ + int iDocidLe = -1; /* Index of docid<=x constraint, if present */ + int iIdx; + + /* By default use a full table scan. This is an expensive option, + ** so search through the constraints to see if a more efficient + ** strategy is possible. + */ + pInfo->idxNum = FTS3_FULLSCAN_SEARCH; + pInfo->estimatedCost = 5000000; + for(i=0; inConstraint; i++){ + int bDocid; /* True if this constraint is on docid */ + struct sqlite3_index_constraint *pCons = &pInfo->aConstraint[i]; + if( pCons->usable==0 ){ + if( pCons->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ + /* There exists an unusable MATCH constraint. This means that if + ** the planner does elect to use the results of this call as part + ** of the overall query plan the user will see an "unable to use + ** function MATCH in the requested context" error. To discourage + ** this, return a very high cost here. */ + pInfo->idxNum = FTS3_FULLSCAN_SEARCH; + pInfo->estimatedCost = 1e50; + fts3SetEstimatedRows(pInfo, ((sqlite3_int64)1) << 50); + return SQLITE_OK; + } + continue; + } + + bDocid = (pCons->iColumn<0 || pCons->iColumn==p->nColumn+1); + + /* A direct lookup on the rowid or docid column. Assign a cost of 1.0. */ + if( iCons<0 && pCons->op==SQLITE_INDEX_CONSTRAINT_EQ && bDocid ){ + pInfo->idxNum = FTS3_DOCID_SEARCH; + pInfo->estimatedCost = 1.0; + iCons = i; + } + + /* A MATCH constraint. Use a full-text search. + ** + ** If there is more than one MATCH constraint available, use the first + ** one encountered. If there is both a MATCH constraint and a direct + ** rowid/docid lookup, prefer the MATCH strategy. This is done even + ** though the rowid/docid lookup is faster than a MATCH query, selecting + ** it would lead to an "unable to use function MATCH in the requested + ** context" error. + */ + if( pCons->op==SQLITE_INDEX_CONSTRAINT_MATCH + && pCons->iColumn>=0 && pCons->iColumn<=p->nColumn + ){ + pInfo->idxNum = FTS3_FULLTEXT_SEARCH + pCons->iColumn; + pInfo->estimatedCost = 2.0; + iCons = i; + } + + /* Equality constraint on the langid column */ + if( pCons->op==SQLITE_INDEX_CONSTRAINT_EQ + && pCons->iColumn==p->nColumn + 2 + ){ + iLangidCons = i; + } + + if( bDocid ){ + switch( pCons->op ){ + case SQLITE_INDEX_CONSTRAINT_GE: + case SQLITE_INDEX_CONSTRAINT_GT: + iDocidGe = i; + break; + + case SQLITE_INDEX_CONSTRAINT_LE: + case SQLITE_INDEX_CONSTRAINT_LT: + iDocidLe = i; + break; + } + } + } + + /* If using a docid=? or rowid=? strategy, set the UNIQUE flag. */ + if( pInfo->idxNum==FTS3_DOCID_SEARCH ) fts3SetUniqueFlag(pInfo); + + iIdx = 1; + if( iCons>=0 ){ + pInfo->aConstraintUsage[iCons].argvIndex = iIdx++; + pInfo->aConstraintUsage[iCons].omit = 1; + } + if( iLangidCons>=0 ){ + pInfo->idxNum |= FTS3_HAVE_LANGID; + pInfo->aConstraintUsage[iLangidCons].argvIndex = iIdx++; + } + if( iDocidGe>=0 ){ + pInfo->idxNum |= FTS3_HAVE_DOCID_GE; + pInfo->aConstraintUsage[iDocidGe].argvIndex = iIdx++; + } + if( iDocidLe>=0 ){ + pInfo->idxNum |= FTS3_HAVE_DOCID_LE; + pInfo->aConstraintUsage[iDocidLe].argvIndex = iIdx++; + } + + /* Regardless of the strategy selected, FTS can deliver rows in rowid (or + ** docid) order. Both ascending and descending are possible. + */ + if( pInfo->nOrderBy==1 ){ + struct sqlite3_index_orderby *pOrder = &pInfo->aOrderBy[0]; + if( pOrder->iColumn<0 || pOrder->iColumn==p->nColumn+1 ){ + if( pOrder->desc ){ + pInfo->idxStr = "DESC"; + }else{ + pInfo->idxStr = "ASC"; + } + pInfo->orderByConsumed = 1; + } + } + + assert( p->pSegments==0 ); + return SQLITE_OK; +} + +/* +** Implementation of xOpen method. +*/ +static int fts3OpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){ + sqlite3_vtab_cursor *pCsr; /* Allocated cursor */ + + UNUSED_PARAMETER(pVTab); + + /* Allocate a buffer large enough for an Fts3Cursor structure. If the + ** allocation succeeds, zero it and return SQLITE_OK. Otherwise, + ** if the allocation fails, return SQLITE_NOMEM. + */ + *ppCsr = pCsr = (sqlite3_vtab_cursor *)sqlite3_malloc(sizeof(Fts3Cursor)); + if( !pCsr ){ + return SQLITE_NOMEM; + } + memset(pCsr, 0, sizeof(Fts3Cursor)); + return SQLITE_OK; +} + +/* +** Finalize the statement handle at pCsr->pStmt. +** +** Or, if that statement handle is one created by fts3CursorSeekStmt(), +** and the Fts3Table.pSeekStmt slot is currently NULL, save the statement +** pointer there instead of finalizing it. +*/ +static void fts3CursorFinalizeStmt(Fts3Cursor *pCsr){ + if( pCsr->bSeekStmt ){ + Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; + if( p->pSeekStmt==0 ){ + p->pSeekStmt = pCsr->pStmt; + sqlite3_reset(pCsr->pStmt); + pCsr->pStmt = 0; + } + pCsr->bSeekStmt = 0; + } + sqlite3_finalize(pCsr->pStmt); +} + +/* +** Close the cursor. For additional information see the documentation +** on the xClose method of the virtual table interface. +*/ +static int fts3CloseMethod(sqlite3_vtab_cursor *pCursor){ + Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; + assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); + fts3CursorFinalizeStmt(pCsr); + sqlite3Fts3ExprFree(pCsr->pExpr); + sqlite3Fts3FreeDeferredTokens(pCsr); + sqlite3_free(pCsr->aDoclist); + sqlite3Fts3MIBufferFree(pCsr->pMIBuffer); + assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* +** If pCsr->pStmt has not been prepared (i.e. if pCsr->pStmt==0), then +** compose and prepare an SQL statement of the form: +** +** "SELECT FROM %_content WHERE rowid = ?" +** +** (or the equivalent for a content=xxx table) and set pCsr->pStmt to +** it. If an error occurs, return an SQLite error code. +*/ +static int fts3CursorSeekStmt(Fts3Cursor *pCsr){ + int rc = SQLITE_OK; + if( pCsr->pStmt==0 ){ + Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; + char *zSql; + if( p->pSeekStmt ){ + pCsr->pStmt = p->pSeekStmt; + p->pSeekStmt = 0; + }else{ + zSql = sqlite3_mprintf("SELECT %s WHERE rowid = ?", p->zReadExprlist); + if( !zSql ) return SQLITE_NOMEM; + rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0); + sqlite3_free(zSql); + } + if( rc==SQLITE_OK ) pCsr->bSeekStmt = 1; + } + return rc; +} + +/* +** Position the pCsr->pStmt statement so that it is on the row +** of the %_content table that contains the last match. Return +** SQLITE_OK on success. +*/ +static int fts3CursorSeek(sqlite3_context *pContext, Fts3Cursor *pCsr){ + int rc = SQLITE_OK; + if( pCsr->isRequireSeek ){ + rc = fts3CursorSeekStmt(pCsr); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pCsr->pStmt, 1, pCsr->iPrevId); + pCsr->isRequireSeek = 0; + if( SQLITE_ROW==sqlite3_step(pCsr->pStmt) ){ + return SQLITE_OK; + }else{ + rc = sqlite3_reset(pCsr->pStmt); + if( rc==SQLITE_OK && ((Fts3Table *)pCsr->base.pVtab)->zContentTbl==0 ){ + /* If no row was found and no error has occurred, then the %_content + ** table is missing a row that is present in the full-text index. + ** The data structures are corrupt. */ + rc = FTS_CORRUPT_VTAB; + pCsr->isEof = 1; + } + } + } + } + + if( rc!=SQLITE_OK && pContext ){ + sqlite3_result_error_code(pContext, rc); + } + return rc; +} + +/* +** This function is used to process a single interior node when searching +** a b-tree for a term or term prefix. The node data is passed to this +** function via the zNode/nNode parameters. The term to search for is +** passed in zTerm/nTerm. +** +** If piFirst is not NULL, then this function sets *piFirst to the blockid +** of the child node that heads the sub-tree that may contain the term. +** +** If piLast is not NULL, then *piLast is set to the right-most child node +** that heads a sub-tree that may contain a term for which zTerm/nTerm is +** a prefix. +** +** If an OOM error occurs, SQLITE_NOMEM is returned. Otherwise, SQLITE_OK. +*/ +static int fts3ScanInteriorNode( + const char *zTerm, /* Term to select leaves for */ + int nTerm, /* Size of term zTerm in bytes */ + const char *zNode, /* Buffer containing segment interior node */ + int nNode, /* Size of buffer at zNode */ + sqlite3_int64 *piFirst, /* OUT: Selected child node */ + sqlite3_int64 *piLast /* OUT: Selected child node */ +){ + int rc = SQLITE_OK; /* Return code */ + const char *zCsr = zNode; /* Cursor to iterate through node */ + const char *zEnd = &zCsr[nNode];/* End of interior node buffer */ + char *zBuffer = 0; /* Buffer to load terms into */ + int nAlloc = 0; /* Size of allocated buffer */ + int isFirstTerm = 1; /* True when processing first term on page */ + sqlite3_int64 iChild; /* Block id of child node to descend to */ + + /* Skip over the 'height' varint that occurs at the start of every + ** interior node. Then load the blockid of the left-child of the b-tree + ** node into variable iChild. + ** + ** Even if the data structure on disk is corrupted, this (reading two + ** varints from the buffer) does not risk an overread. If zNode is a + ** root node, then the buffer comes from a SELECT statement. SQLite does + ** not make this guarantee explicitly, but in practice there are always + ** either more than 20 bytes of allocated space following the nNode bytes of + ** contents, or two zero bytes. Or, if the node is read from the %_segments + ** table, then there are always 20 bytes of zeroed padding following the + ** nNode bytes of content (see sqlite3Fts3ReadBlock() for details). + */ + zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); + zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); + if( zCsr>zEnd ){ + return FTS_CORRUPT_VTAB; + } + + while( zCsrzEnd ){ + rc = FTS_CORRUPT_VTAB; + goto finish_scan; + } + if( nPrefix+nSuffix>nAlloc ){ + char *zNew; + nAlloc = (nPrefix+nSuffix) * 2; + zNew = (char *)sqlite3_realloc(zBuffer, nAlloc); + if( !zNew ){ + rc = SQLITE_NOMEM; + goto finish_scan; + } + zBuffer = zNew; + } + assert( zBuffer ); + memcpy(&zBuffer[nPrefix], zCsr, nSuffix); + nBuffer = nPrefix + nSuffix; + zCsr += nSuffix; + + /* Compare the term we are searching for with the term just loaded from + ** the interior node. If the specified term is greater than or equal + ** to the term from the interior node, then all terms on the sub-tree + ** headed by node iChild are smaller than zTerm. No need to search + ** iChild. + ** + ** If the interior node term is larger than the specified term, then + ** the tree headed by iChild may contain the specified term. + */ + cmp = memcmp(zTerm, zBuffer, (nBuffer>nTerm ? nTerm : nBuffer)); + if( piFirst && (cmp<0 || (cmp==0 && nBuffer>nTerm)) ){ + *piFirst = iChild; + piFirst = 0; + } + + if( piLast && cmp<0 ){ + *piLast = iChild; + piLast = 0; + } + + iChild++; + }; + + if( piFirst ) *piFirst = iChild; + if( piLast ) *piLast = iChild; + + finish_scan: + sqlite3_free(zBuffer); + return rc; +} + + +/* +** The buffer pointed to by argument zNode (size nNode bytes) contains an +** interior node of a b-tree segment. The zTerm buffer (size nTerm bytes) +** contains a term. This function searches the sub-tree headed by the zNode +** node for the range of leaf nodes that may contain the specified term +** or terms for which the specified term is a prefix. +** +** If piLeaf is not NULL, then *piLeaf is set to the blockid of the +** left-most leaf node in the tree that may contain the specified term. +** If piLeaf2 is not NULL, then *piLeaf2 is set to the blockid of the +** right-most leaf node that may contain a term for which the specified +** term is a prefix. +** +** It is possible that the range of returned leaf nodes does not contain +** the specified term or any terms for which it is a prefix. However, if the +** segment does contain any such terms, they are stored within the identified +** range. Because this function only inspects interior segment nodes (and +** never loads leaf nodes into memory), it is not possible to be sure. +** +** If an error occurs, an error code other than SQLITE_OK is returned. +*/ +static int fts3SelectLeaf( + Fts3Table *p, /* Virtual table handle */ + const char *zTerm, /* Term to select leaves for */ + int nTerm, /* Size of term zTerm in bytes */ + const char *zNode, /* Buffer containing segment interior node */ + int nNode, /* Size of buffer at zNode */ + sqlite3_int64 *piLeaf, /* Selected leaf node */ + sqlite3_int64 *piLeaf2 /* Selected leaf node */ +){ + int rc = SQLITE_OK; /* Return code */ + int iHeight; /* Height of this node in tree */ + + assert( piLeaf || piLeaf2 ); + + fts3GetVarint32(zNode, &iHeight); + rc = fts3ScanInteriorNode(zTerm, nTerm, zNode, nNode, piLeaf, piLeaf2); + assert( !piLeaf2 || !piLeaf || rc!=SQLITE_OK || (*piLeaf<=*piLeaf2) ); + + if( rc==SQLITE_OK && iHeight>1 ){ + char *zBlob = 0; /* Blob read from %_segments table */ + int nBlob = 0; /* Size of zBlob in bytes */ + + if( piLeaf && piLeaf2 && (*piLeaf!=*piLeaf2) ){ + rc = sqlite3Fts3ReadBlock(p, *piLeaf, &zBlob, &nBlob, 0); + if( rc==SQLITE_OK ){ + rc = fts3SelectLeaf(p, zTerm, nTerm, zBlob, nBlob, piLeaf, 0); + } + sqlite3_free(zBlob); + piLeaf = 0; + zBlob = 0; + } + + if( rc==SQLITE_OK ){ + rc = sqlite3Fts3ReadBlock(p, piLeaf?*piLeaf:*piLeaf2, &zBlob, &nBlob, 0); + } + if( rc==SQLITE_OK ){ + rc = fts3SelectLeaf(p, zTerm, nTerm, zBlob, nBlob, piLeaf, piLeaf2); + } + sqlite3_free(zBlob); + } + + return rc; +} + +/* +** This function is used to create delta-encoded serialized lists of FTS3 +** varints. Each call to this function appends a single varint to a list. +*/ +static void fts3PutDeltaVarint( + char **pp, /* IN/OUT: Output pointer */ + sqlite3_int64 *piPrev, /* IN/OUT: Previous value written to list */ + sqlite3_int64 iVal /* Write this value to the list */ +){ + assert( iVal-*piPrev > 0 || (*piPrev==0 && iVal==0) ); + *pp += sqlite3Fts3PutVarint(*pp, iVal-*piPrev); + *piPrev = iVal; +} + +/* +** When this function is called, *ppPoslist is assumed to point to the +** start of a position-list. After it returns, *ppPoslist points to the +** first byte after the position-list. +** +** A position list is list of positions (delta encoded) and columns for +** a single document record of a doclist. So, in other words, this +** routine advances *ppPoslist so that it points to the next docid in +** the doclist, or to the first byte past the end of the doclist. +** +** If pp is not NULL, then the contents of the position list are copied +** to *pp. *pp is set to point to the first byte past the last byte copied +** before this function returns. +*/ +static void fts3PoslistCopy(char **pp, char **ppPoslist){ + char *pEnd = *ppPoslist; + char c = 0; + + /* The end of a position list is marked by a zero encoded as an FTS3 + ** varint. A single POS_END (0) byte. Except, if the 0 byte is preceded by + ** a byte with the 0x80 bit set, then it is not a varint 0, but the tail + ** of some other, multi-byte, value. + ** + ** The following while-loop moves pEnd to point to the first byte that is not + ** immediately preceded by a byte with the 0x80 bit set. Then increments + ** pEnd once more so that it points to the byte immediately following the + ** last byte in the position-list. + */ + while( *pEnd | c ){ + c = *pEnd++ & 0x80; + testcase( c!=0 && (*pEnd)==0 ); + } + pEnd++; /* Advance past the POS_END terminator byte */ + + if( pp ){ + int n = (int)(pEnd - *ppPoslist); + char *p = *pp; + memcpy(p, *ppPoslist, n); + p += n; + *pp = p; + } + *ppPoslist = pEnd; +} + +/* +** When this function is called, *ppPoslist is assumed to point to the +** start of a column-list. After it returns, *ppPoslist points to the +** to the terminator (POS_COLUMN or POS_END) byte of the column-list. +** +** A column-list is list of delta-encoded positions for a single column +** within a single document within a doclist. +** +** The column-list is terminated either by a POS_COLUMN varint (1) or +** a POS_END varint (0). This routine leaves *ppPoslist pointing to +** the POS_COLUMN or POS_END that terminates the column-list. +** +** If pp is not NULL, then the contents of the column-list are copied +** to *pp. *pp is set to point to the first byte past the last byte copied +** before this function returns. The POS_COLUMN or POS_END terminator +** is not copied into *pp. +*/ +static void fts3ColumnlistCopy(char **pp, char **ppPoslist){ + char *pEnd = *ppPoslist; + char c = 0; + + /* A column-list is terminated by either a 0x01 or 0x00 byte that is + ** not part of a multi-byte varint. + */ + while( 0xFE & (*pEnd | c) ){ + c = *pEnd++ & 0x80; + testcase( c!=0 && ((*pEnd)&0xfe)==0 ); + } + if( pp ){ + int n = (int)(pEnd - *ppPoslist); + char *p = *pp; + memcpy(p, *ppPoslist, n); + p += n; + *pp = p; + } + *ppPoslist = pEnd; +} + +/* +** Value used to signify the end of an position-list. This is safe because +** it is not possible to have a document with 2^31 terms. +*/ +#define POSITION_LIST_END 0x7fffffff + +/* +** This function is used to help parse position-lists. When this function is +** called, *pp may point to the start of the next varint in the position-list +** being parsed, or it may point to 1 byte past the end of the position-list +** (in which case **pp will be a terminator bytes POS_END (0) or +** (1)). +** +** If *pp points past the end of the current position-list, set *pi to +** POSITION_LIST_END and return. Otherwise, read the next varint from *pp, +** increment the current value of *pi by the value read, and set *pp to +** point to the next value before returning. +** +** Before calling this routine *pi must be initialized to the value of +** the previous position, or zero if we are reading the first position +** in the position-list. Because positions are delta-encoded, the value +** of the previous position is needed in order to compute the value of +** the next position. +*/ +static void fts3ReadNextPos( + char **pp, /* IN/OUT: Pointer into position-list buffer */ + sqlite3_int64 *pi /* IN/OUT: Value read from position-list */ +){ + if( (**pp)&0xFE ){ + fts3GetDeltaVarint(pp, pi); + *pi -= 2; + }else{ + *pi = POSITION_LIST_END; + } +} + +/* +** If parameter iCol is not 0, write an POS_COLUMN (1) byte followed by +** the value of iCol encoded as a varint to *pp. This will start a new +** column list. +** +** Set *pp to point to the byte just after the last byte written before +** returning (do not modify it if iCol==0). Return the total number of bytes +** written (0 if iCol==0). +*/ +static int fts3PutColNumber(char **pp, int iCol){ + int n = 0; /* Number of bytes written */ + if( iCol ){ + char *p = *pp; /* Output pointer */ + n = 1 + sqlite3Fts3PutVarint(&p[1], iCol); + *p = 0x01; + *pp = &p[n]; + } + return n; +} + +/* +** Compute the union of two position lists. The output written +** into *pp contains all positions of both *pp1 and *pp2 in sorted +** order and with any duplicates removed. All pointers are +** updated appropriately. The caller is responsible for insuring +** that there is enough space in *pp to hold the complete output. +*/ +static void fts3PoslistMerge( + char **pp, /* Output buffer */ + char **pp1, /* Left input list */ + char **pp2 /* Right input list */ +){ + char *p = *pp; + char *p1 = *pp1; + char *p2 = *pp2; + + while( *p1 || *p2 ){ + int iCol1; /* The current column index in pp1 */ + int iCol2; /* The current column index in pp2 */ + + if( *p1==POS_COLUMN ) fts3GetVarint32(&p1[1], &iCol1); + else if( *p1==POS_END ) iCol1 = POSITION_LIST_END; + else iCol1 = 0; + + if( *p2==POS_COLUMN ) fts3GetVarint32(&p2[1], &iCol2); + else if( *p2==POS_END ) iCol2 = POSITION_LIST_END; + else iCol2 = 0; + + if( iCol1==iCol2 ){ + sqlite3_int64 i1 = 0; /* Last position from pp1 */ + sqlite3_int64 i2 = 0; /* Last position from pp2 */ + sqlite3_int64 iPrev = 0; + int n = fts3PutColNumber(&p, iCol1); + p1 += n; + p2 += n; + + /* At this point, both p1 and p2 point to the start of column-lists + ** for the same column (the column with index iCol1 and iCol2). + ** A column-list is a list of non-negative delta-encoded varints, each + ** incremented by 2 before being stored. Each list is terminated by a + ** POS_END (0) or POS_COLUMN (1). The following block merges the two lists + ** and writes the results to buffer p. p is left pointing to the byte + ** after the list written. No terminator (POS_END or POS_COLUMN) is + ** written to the output. + */ + fts3GetDeltaVarint(&p1, &i1); + fts3GetDeltaVarint(&p2, &i2); + do { + fts3PutDeltaVarint(&p, &iPrev, (i1pos(*pp1) && pos(*pp2)-pos(*pp1)<=nToken). i.e. +** when the *pp1 token appears before the *pp2 token, but not more than nToken +** slots before it. +** +** e.g. nToken==1 searches for adjacent positions. +*/ +static int fts3PoslistPhraseMerge( + char **pp, /* IN/OUT: Preallocated output buffer */ + int nToken, /* Maximum difference in token positions */ + int isSaveLeft, /* Save the left position */ + int isExact, /* If *pp1 is exactly nTokens before *pp2 */ + char **pp1, /* IN/OUT: Left input list */ + char **pp2 /* IN/OUT: Right input list */ +){ + char *p = *pp; + char *p1 = *pp1; + char *p2 = *pp2; + int iCol1 = 0; + int iCol2 = 0; + + /* Never set both isSaveLeft and isExact for the same invocation. */ + assert( isSaveLeft==0 || isExact==0 ); + + assert( p!=0 && *p1!=0 && *p2!=0 ); + if( *p1==POS_COLUMN ){ + p1++; + p1 += fts3GetVarint32(p1, &iCol1); + } + if( *p2==POS_COLUMN ){ + p2++; + p2 += fts3GetVarint32(p2, &iCol2); + } + + while( 1 ){ + if( iCol1==iCol2 ){ + char *pSave = p; + sqlite3_int64 iPrev = 0; + sqlite3_int64 iPos1 = 0; + sqlite3_int64 iPos2 = 0; + + if( iCol1 ){ + *p++ = POS_COLUMN; + p += sqlite3Fts3PutVarint(p, iCol1); + } + + assert( *p1!=POS_END && *p1!=POS_COLUMN ); + assert( *p2!=POS_END && *p2!=POS_COLUMN ); + fts3GetDeltaVarint(&p1, &iPos1); iPos1 -= 2; + fts3GetDeltaVarint(&p2, &iPos2); iPos2 -= 2; + + while( 1 ){ + if( iPos2==iPos1+nToken + || (isExact==0 && iPos2>iPos1 && iPos2<=iPos1+nToken) + ){ + sqlite3_int64 iSave; + iSave = isSaveLeft ? iPos1 : iPos2; + fts3PutDeltaVarint(&p, &iPrev, iSave+2); iPrev -= 2; + pSave = 0; + assert( p ); + } + if( (!isSaveLeft && iPos2<=(iPos1+nToken)) || iPos2<=iPos1 ){ + if( (*p2&0xFE)==0 ) break; + fts3GetDeltaVarint(&p2, &iPos2); iPos2 -= 2; + }else{ + if( (*p1&0xFE)==0 ) break; + fts3GetDeltaVarint(&p1, &iPos1); iPos1 -= 2; + } + } + + if( pSave ){ + assert( pp && p ); + p = pSave; + } + + fts3ColumnlistCopy(0, &p1); + fts3ColumnlistCopy(0, &p2); + assert( (*p1&0xFE)==0 && (*p2&0xFE)==0 ); + if( 0==*p1 || 0==*p2 ) break; + + p1++; + p1 += fts3GetVarint32(p1, &iCol1); + p2++; + p2 += fts3GetVarint32(p2, &iCol2); + } + + /* Advance pointer p1 or p2 (whichever corresponds to the smaller of + ** iCol1 and iCol2) so that it points to either the 0x00 that marks the + ** end of the position list, or the 0x01 that precedes the next + ** column-number in the position list. + */ + else if( iCol1=pEnd ){ + *pp = 0; + }else{ + sqlite3_int64 iVal; + *pp += sqlite3Fts3GetVarint(*pp, &iVal); + if( bDescIdx ){ + *pVal -= iVal; + }else{ + *pVal += iVal; + } + } +} + +/* +** This function is used to write a single varint to a buffer. The varint +** is written to *pp. Before returning, *pp is set to point 1 byte past the +** end of the value written. +** +** If *pbFirst is zero when this function is called, the value written to +** the buffer is that of parameter iVal. +** +** If *pbFirst is non-zero when this function is called, then the value +** written is either (iVal-*piPrev) (if bDescIdx is zero) or (*piPrev-iVal) +** (if bDescIdx is non-zero). +** +** Before returning, this function always sets *pbFirst to 1 and *piPrev +** to the value of parameter iVal. +*/ +static void fts3PutDeltaVarint3( + char **pp, /* IN/OUT: Output pointer */ + int bDescIdx, /* True for descending docids */ + sqlite3_int64 *piPrev, /* IN/OUT: Previous value written to list */ + int *pbFirst, /* IN/OUT: True after first int written */ + sqlite3_int64 iVal /* Write this value to the list */ +){ + sqlite3_int64 iWrite; + if( bDescIdx==0 || *pbFirst==0 ){ + iWrite = iVal - *piPrev; + }else{ + iWrite = *piPrev - iVal; + } + assert( *pbFirst || *piPrev==0 ); + assert( *pbFirst==0 || iWrite>0 ); + *pp += sqlite3Fts3PutVarint(*pp, iWrite); + *piPrev = iVal; + *pbFirst = 1; +} + + +/* +** This macro is used by various functions that merge doclists. The two +** arguments are 64-bit docid values. If the value of the stack variable +** bDescDoclist is 0 when this macro is invoked, then it returns (i1-i2). +** Otherwise, (i2-i1). +** +** Using this makes it easier to write code that can merge doclists that are +** sorted in either ascending or descending order. +*/ +#define DOCID_CMP(i1, i2) ((bDescDoclist?-1:1) * (i1-i2)) + +/* +** This function does an "OR" merge of two doclists (output contains all +** positions contained in either argument doclist). If the docids in the +** input doclists are sorted in ascending order, parameter bDescDoclist +** should be false. If they are sorted in ascending order, it should be +** passed a non-zero value. +** +** If no error occurs, *paOut is set to point at an sqlite3_malloc'd buffer +** containing the output doclist and SQLITE_OK is returned. In this case +** *pnOut is set to the number of bytes in the output doclist. +** +** If an error occurs, an SQLite error code is returned. The output values +** are undefined in this case. +*/ +static int fts3DoclistOrMerge( + int bDescDoclist, /* True if arguments are desc */ + char *a1, int n1, /* First doclist */ + char *a2, int n2, /* Second doclist */ + char **paOut, int *pnOut /* OUT: Malloc'd doclist */ +){ + sqlite3_int64 i1 = 0; + sqlite3_int64 i2 = 0; + sqlite3_int64 iPrev = 0; + char *pEnd1 = &a1[n1]; + char *pEnd2 = &a2[n2]; + char *p1 = a1; + char *p2 = a2; + char *p; + char *aOut; + int bFirstOut = 0; + + *paOut = 0; + *pnOut = 0; + + /* Allocate space for the output. Both the input and output doclists + ** are delta encoded. If they are in ascending order (bDescDoclist==0), + ** then the first docid in each list is simply encoded as a varint. For + ** each subsequent docid, the varint stored is the difference between the + ** current and previous docid (a positive number - since the list is in + ** ascending order). + ** + ** The first docid written to the output is therefore encoded using the + ** same number of bytes as it is in whichever of the input lists it is + ** read from. And each subsequent docid read from the same input list + ** consumes either the same or less bytes as it did in the input (since + ** the difference between it and the previous value in the output must + ** be a positive value less than or equal to the delta value read from + ** the input list). The same argument applies to all but the first docid + ** read from the 'other' list. And to the contents of all position lists + ** that will be copied and merged from the input to the output. + ** + ** However, if the first docid copied to the output is a negative number, + ** then the encoding of the first docid from the 'other' input list may + ** be larger in the output than it was in the input (since the delta value + ** may be a larger positive integer than the actual docid). + ** + ** The space required to store the output is therefore the sum of the + ** sizes of the two inputs, plus enough space for exactly one of the input + ** docids to grow. + ** + ** A symetric argument may be made if the doclists are in descending + ** order. + */ + aOut = sqlite3_malloc(n1+n2+FTS3_VARINT_MAX-1); + if( !aOut ) return SQLITE_NOMEM; + + p = aOut; + fts3GetDeltaVarint3(&p1, pEnd1, 0, &i1); + fts3GetDeltaVarint3(&p2, pEnd2, 0, &i2); + while( p1 || p2 ){ + sqlite3_int64 iDiff = DOCID_CMP(i1, i2); + + if( p2 && p1 && iDiff==0 ){ + fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i1); + fts3PoslistMerge(&p, &p1, &p2); + fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); + fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); + }else if( !p2 || (p1 && iDiff<0) ){ + fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i1); + fts3PoslistCopy(&p, &p1); + fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); + }else{ + fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i2); + fts3PoslistCopy(&p, &p2); + fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); + } + } + + *paOut = aOut; + *pnOut = (int)(p-aOut); + assert( *pnOut<=n1+n2+FTS3_VARINT_MAX-1 ); + return SQLITE_OK; +} + +/* +** This function does a "phrase" merge of two doclists. In a phrase merge, +** the output contains a copy of each position from the right-hand input +** doclist for which there is a position in the left-hand input doclist +** exactly nDist tokens before it. +** +** If the docids in the input doclists are sorted in ascending order, +** parameter bDescDoclist should be false. If they are sorted in ascending +** order, it should be passed a non-zero value. +** +** The right-hand input doclist is overwritten by this function. +*/ +static int fts3DoclistPhraseMerge( + int bDescDoclist, /* True if arguments are desc */ + int nDist, /* Distance from left to right (1=adjacent) */ + char *aLeft, int nLeft, /* Left doclist */ + char **paRight, int *pnRight /* IN/OUT: Right/output doclist */ +){ + sqlite3_int64 i1 = 0; + sqlite3_int64 i2 = 0; + sqlite3_int64 iPrev = 0; + char *aRight = *paRight; + char *pEnd1 = &aLeft[nLeft]; + char *pEnd2 = &aRight[*pnRight]; + char *p1 = aLeft; + char *p2 = aRight; + char *p; + int bFirstOut = 0; + char *aOut; + + assert( nDist>0 ); + if( bDescDoclist ){ + aOut = sqlite3_malloc(*pnRight + FTS3_VARINT_MAX); + if( aOut==0 ) return SQLITE_NOMEM; + }else{ + aOut = aRight; + } + p = aOut; + + fts3GetDeltaVarint3(&p1, pEnd1, 0, &i1); + fts3GetDeltaVarint3(&p2, pEnd2, 0, &i2); + + while( p1 && p2 ){ + sqlite3_int64 iDiff = DOCID_CMP(i1, i2); + if( iDiff==0 ){ + char *pSave = p; + sqlite3_int64 iPrevSave = iPrev; + int bFirstOutSave = bFirstOut; + + fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i1); + if( 0==fts3PoslistPhraseMerge(&p, nDist, 0, 1, &p1, &p2) ){ + p = pSave; + iPrev = iPrevSave; + bFirstOut = bFirstOutSave; + } + fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); + fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); + }else if( iDiff<0 ){ + fts3PoslistCopy(0, &p1); + fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); + }else{ + fts3PoslistCopy(0, &p2); + fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); + } + } + + *pnRight = (int)(p - aOut); + if( bDescDoclist ){ + sqlite3_free(aRight); + *paRight = aOut; + } + + return SQLITE_OK; +} + +/* +** Argument pList points to a position list nList bytes in size. This +** function checks to see if the position list contains any entries for +** a token in position 0 (of any column). If so, it writes argument iDelta +** to the output buffer pOut, followed by a position list consisting only +** of the entries from pList at position 0, and terminated by an 0x00 byte. +** The value returned is the number of bytes written to pOut (if any). +*/ +SQLITE_PRIVATE int sqlite3Fts3FirstFilter( + sqlite3_int64 iDelta, /* Varint that may be written to pOut */ + char *pList, /* Position list (no 0x00 term) */ + int nList, /* Size of pList in bytes */ + char *pOut /* Write output here */ +){ + int nOut = 0; + int bWritten = 0; /* True once iDelta has been written */ + char *p = pList; + char *pEnd = &pList[nList]; + + if( *p!=0x01 ){ + if( *p==0x02 ){ + nOut += sqlite3Fts3PutVarint(&pOut[nOut], iDelta); + pOut[nOut++] = 0x02; + bWritten = 1; + } + fts3ColumnlistCopy(0, &p); + } + + while( paaOutput); i++){ + if( pTS->aaOutput[i] ){ + if( !aOut ){ + aOut = pTS->aaOutput[i]; + nOut = pTS->anOutput[i]; + pTS->aaOutput[i] = 0; + }else{ + int nNew; + char *aNew; + + int rc = fts3DoclistOrMerge(p->bDescIdx, + pTS->aaOutput[i], pTS->anOutput[i], aOut, nOut, &aNew, &nNew + ); + if( rc!=SQLITE_OK ){ + sqlite3_free(aOut); + return rc; + } + + sqlite3_free(pTS->aaOutput[i]); + sqlite3_free(aOut); + pTS->aaOutput[i] = 0; + aOut = aNew; + nOut = nNew; + } + } + } + + pTS->aaOutput[0] = aOut; + pTS->anOutput[0] = nOut; + return SQLITE_OK; +} + +/* +** Merge the doclist aDoclist/nDoclist into the TermSelect object passed +** as the first argument. The merge is an "OR" merge (see function +** fts3DoclistOrMerge() for details). +** +** This function is called with the doclist for each term that matches +** a queried prefix. It merges all these doclists into one, the doclist +** for the specified prefix. Since there can be a very large number of +** doclists to merge, the merging is done pair-wise using the TermSelect +** object. +** +** This function returns SQLITE_OK if the merge is successful, or an +** SQLite error code (SQLITE_NOMEM) if an error occurs. +*/ +static int fts3TermSelectMerge( + Fts3Table *p, /* FTS table handle */ + TermSelect *pTS, /* TermSelect object to merge into */ + char *aDoclist, /* Pointer to doclist */ + int nDoclist /* Size of aDoclist in bytes */ +){ + if( pTS->aaOutput[0]==0 ){ + /* If this is the first term selected, copy the doclist to the output + ** buffer using memcpy(). + ** + ** Add FTS3_VARINT_MAX bytes of unused space to the end of the + ** allocation. This is so as to ensure that the buffer is big enough + ** to hold the current doclist AND'd with any other doclist. If the + ** doclists are stored in order=ASC order, this padding would not be + ** required (since the size of [doclistA AND doclistB] is always less + ** than or equal to the size of [doclistA] in that case). But this is + ** not true for order=DESC. For example, a doclist containing (1, -1) + ** may be smaller than (-1), as in the first example the -1 may be stored + ** as a single-byte delta, whereas in the second it must be stored as a + ** FTS3_VARINT_MAX byte varint. + ** + ** Similar padding is added in the fts3DoclistOrMerge() function. + */ + pTS->aaOutput[0] = sqlite3_malloc(nDoclist + FTS3_VARINT_MAX + 1); + pTS->anOutput[0] = nDoclist; + if( pTS->aaOutput[0] ){ + memcpy(pTS->aaOutput[0], aDoclist, nDoclist); + }else{ + return SQLITE_NOMEM; + } + }else{ + char *aMerge = aDoclist; + int nMerge = nDoclist; + int iOut; + + for(iOut=0; iOutaaOutput); iOut++){ + if( pTS->aaOutput[iOut]==0 ){ + assert( iOut>0 ); + pTS->aaOutput[iOut] = aMerge; + pTS->anOutput[iOut] = nMerge; + break; + }else{ + char *aNew; + int nNew; + + int rc = fts3DoclistOrMerge(p->bDescIdx, aMerge, nMerge, + pTS->aaOutput[iOut], pTS->anOutput[iOut], &aNew, &nNew + ); + if( rc!=SQLITE_OK ){ + if( aMerge!=aDoclist ) sqlite3_free(aMerge); + return rc; + } + + if( aMerge!=aDoclist ) sqlite3_free(aMerge); + sqlite3_free(pTS->aaOutput[iOut]); + pTS->aaOutput[iOut] = 0; + + aMerge = aNew; + nMerge = nNew; + if( (iOut+1)==SizeofArray(pTS->aaOutput) ){ + pTS->aaOutput[iOut] = aMerge; + pTS->anOutput[iOut] = nMerge; + } + } + } + } + return SQLITE_OK; +} + +/* +** Append SegReader object pNew to the end of the pCsr->apSegment[] array. +*/ +static int fts3SegReaderCursorAppend( + Fts3MultiSegReader *pCsr, + Fts3SegReader *pNew +){ + if( (pCsr->nSegment%16)==0 ){ + Fts3SegReader **apNew; + int nByte = (pCsr->nSegment + 16)*sizeof(Fts3SegReader*); + apNew = (Fts3SegReader **)sqlite3_realloc(pCsr->apSegment, nByte); + if( !apNew ){ + sqlite3Fts3SegReaderFree(pNew); + return SQLITE_NOMEM; + } + pCsr->apSegment = apNew; + } + pCsr->apSegment[pCsr->nSegment++] = pNew; + return SQLITE_OK; +} + +/* +** Add seg-reader objects to the Fts3MultiSegReader object passed as the +** 8th argument. +** +** This function returns SQLITE_OK if successful, or an SQLite error code +** otherwise. +*/ +static int fts3SegReaderCursor( + Fts3Table *p, /* FTS3 table handle */ + int iLangid, /* Language id */ + int iIndex, /* Index to search (from 0 to p->nIndex-1) */ + int iLevel, /* Level of segments to scan */ + const char *zTerm, /* Term to query for */ + int nTerm, /* Size of zTerm in bytes */ + int isPrefix, /* True for a prefix search */ + int isScan, /* True to scan from zTerm to EOF */ + Fts3MultiSegReader *pCsr /* Cursor object to populate */ +){ + int rc = SQLITE_OK; /* Error code */ + sqlite3_stmt *pStmt = 0; /* Statement to iterate through segments */ + int rc2; /* Result of sqlite3_reset() */ + + /* If iLevel is less than 0 and this is not a scan, include a seg-reader + ** for the pending-terms. If this is a scan, then this call must be being + ** made by an fts4aux module, not an FTS table. In this case calling + ** Fts3SegReaderPending might segfault, as the data structures used by + ** fts4aux are not completely populated. So it's easiest to filter these + ** calls out here. */ + if( iLevel<0 && p->aIndex ){ + Fts3SegReader *pSeg = 0; + rc = sqlite3Fts3SegReaderPending(p, iIndex, zTerm, nTerm, isPrefix||isScan, &pSeg); + if( rc==SQLITE_OK && pSeg ){ + rc = fts3SegReaderCursorAppend(pCsr, pSeg); + } + } + + if( iLevel!=FTS3_SEGCURSOR_PENDING ){ + if( rc==SQLITE_OK ){ + rc = sqlite3Fts3AllSegdirs(p, iLangid, iIndex, iLevel, &pStmt); + } + + while( rc==SQLITE_OK && SQLITE_ROW==(rc = sqlite3_step(pStmt)) ){ + Fts3SegReader *pSeg = 0; + + /* Read the values returned by the SELECT into local variables. */ + sqlite3_int64 iStartBlock = sqlite3_column_int64(pStmt, 1); + sqlite3_int64 iLeavesEndBlock = sqlite3_column_int64(pStmt, 2); + sqlite3_int64 iEndBlock = sqlite3_column_int64(pStmt, 3); + int nRoot = sqlite3_column_bytes(pStmt, 4); + char const *zRoot = sqlite3_column_blob(pStmt, 4); + + /* If zTerm is not NULL, and this segment is not stored entirely on its + ** root node, the range of leaves scanned can be reduced. Do this. */ + if( iStartBlock && zTerm ){ + sqlite3_int64 *pi = (isPrefix ? &iLeavesEndBlock : 0); + rc = fts3SelectLeaf(p, zTerm, nTerm, zRoot, nRoot, &iStartBlock, pi); + if( rc!=SQLITE_OK ) goto finished; + if( isPrefix==0 && isScan==0 ) iLeavesEndBlock = iStartBlock; + } + + rc = sqlite3Fts3SegReaderNew(pCsr->nSegment+1, + (isPrefix==0 && isScan==0), + iStartBlock, iLeavesEndBlock, + iEndBlock, zRoot, nRoot, &pSeg + ); + if( rc!=SQLITE_OK ) goto finished; + rc = fts3SegReaderCursorAppend(pCsr, pSeg); + } + } + + finished: + rc2 = sqlite3_reset(pStmt); + if( rc==SQLITE_DONE ) rc = rc2; + + return rc; +} + +/* +** Set up a cursor object for iterating through a full-text index or a +** single level therein. +*/ +SQLITE_PRIVATE int sqlite3Fts3SegReaderCursor( + Fts3Table *p, /* FTS3 table handle */ + int iLangid, /* Language-id to search */ + int iIndex, /* Index to search (from 0 to p->nIndex-1) */ + int iLevel, /* Level of segments to scan */ + const char *zTerm, /* Term to query for */ + int nTerm, /* Size of zTerm in bytes */ + int isPrefix, /* True for a prefix search */ + int isScan, /* True to scan from zTerm to EOF */ + Fts3MultiSegReader *pCsr /* Cursor object to populate */ +){ + assert( iIndex>=0 && iIndexnIndex ); + assert( iLevel==FTS3_SEGCURSOR_ALL + || iLevel==FTS3_SEGCURSOR_PENDING + || iLevel>=0 + ); + assert( iLevelbase.pVtab; + + if( isPrefix ){ + for(i=1; bFound==0 && inIndex; i++){ + if( p->aIndex[i].nPrefix==nTerm ){ + bFound = 1; + rc = sqlite3Fts3SegReaderCursor(p, pCsr->iLangid, + i, FTS3_SEGCURSOR_ALL, zTerm, nTerm, 0, 0, pSegcsr + ); + pSegcsr->bLookup = 1; + } + } + + for(i=1; bFound==0 && inIndex; i++){ + if( p->aIndex[i].nPrefix==nTerm+1 ){ + bFound = 1; + rc = sqlite3Fts3SegReaderCursor(p, pCsr->iLangid, + i, FTS3_SEGCURSOR_ALL, zTerm, nTerm, 1, 0, pSegcsr + ); + if( rc==SQLITE_OK ){ + rc = fts3SegReaderCursorAddZero( + p, pCsr->iLangid, zTerm, nTerm, pSegcsr + ); + } + } + } + } + + if( bFound==0 ){ + rc = sqlite3Fts3SegReaderCursor(p, pCsr->iLangid, + 0, FTS3_SEGCURSOR_ALL, zTerm, nTerm, isPrefix, 0, pSegcsr + ); + pSegcsr->bLookup = !isPrefix; + } + } + + *ppSegcsr = pSegcsr; + return rc; +} + +/* +** Free an Fts3MultiSegReader allocated by fts3TermSegReaderCursor(). +*/ +static void fts3SegReaderCursorFree(Fts3MultiSegReader *pSegcsr){ + sqlite3Fts3SegReaderFinish(pSegcsr); + sqlite3_free(pSegcsr); +} + +/* +** This function retrieves the doclist for the specified term (or term +** prefix) from the database. +*/ +static int fts3TermSelect( + Fts3Table *p, /* Virtual table handle */ + Fts3PhraseToken *pTok, /* Token to query for */ + int iColumn, /* Column to query (or -ve for all columns) */ + int *pnOut, /* OUT: Size of buffer at *ppOut */ + char **ppOut /* OUT: Malloced result buffer */ +){ + int rc; /* Return code */ + Fts3MultiSegReader *pSegcsr; /* Seg-reader cursor for this term */ + TermSelect tsc; /* Object for pair-wise doclist merging */ + Fts3SegFilter filter; /* Segment term filter configuration */ + + pSegcsr = pTok->pSegcsr; + memset(&tsc, 0, sizeof(TermSelect)); + + filter.flags = FTS3_SEGMENT_IGNORE_EMPTY | FTS3_SEGMENT_REQUIRE_POS + | (pTok->isPrefix ? FTS3_SEGMENT_PREFIX : 0) + | (pTok->bFirst ? FTS3_SEGMENT_FIRST : 0) + | (iColumnnColumn ? FTS3_SEGMENT_COLUMN_FILTER : 0); + filter.iCol = iColumn; + filter.zTerm = pTok->z; + filter.nTerm = pTok->n; + + rc = sqlite3Fts3SegReaderStart(p, pSegcsr, &filter); + while( SQLITE_OK==rc + && SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, pSegcsr)) + ){ + rc = fts3TermSelectMerge(p, &tsc, pSegcsr->aDoclist, pSegcsr->nDoclist); + } + + if( rc==SQLITE_OK ){ + rc = fts3TermSelectFinishMerge(p, &tsc); + } + if( rc==SQLITE_OK ){ + *ppOut = tsc.aaOutput[0]; + *pnOut = tsc.anOutput[0]; + }else{ + int i; + for(i=0; ipSegcsr = 0; + return rc; +} + +/* +** This function counts the total number of docids in the doclist stored +** in buffer aList[], size nList bytes. +** +** If the isPoslist argument is true, then it is assumed that the doclist +** contains a position-list following each docid. Otherwise, it is assumed +** that the doclist is simply a list of docids stored as delta encoded +** varints. +*/ +static int fts3DoclistCountDocids(char *aList, int nList){ + int nDoc = 0; /* Return value */ + if( aList ){ + char *aEnd = &aList[nList]; /* Pointer to one byte after EOF */ + char *p = aList; /* Cursor */ + while( peSearch==FTS3_DOCID_SEARCH || pCsr->eSearch==FTS3_FULLSCAN_SEARCH ){ + if( SQLITE_ROW!=sqlite3_step(pCsr->pStmt) ){ + pCsr->isEof = 1; + rc = sqlite3_reset(pCsr->pStmt); + }else{ + pCsr->iPrevId = sqlite3_column_int64(pCsr->pStmt, 0); + rc = SQLITE_OK; + } + }else{ + rc = fts3EvalNext((Fts3Cursor *)pCursor); + } + assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); + return rc; +} + +/* +** The following are copied from sqliteInt.h. +** +** Constants for the largest and smallest possible 64-bit signed integers. +** These macros are designed to work correctly on both 32-bit and 64-bit +** compilers. +*/ +#ifndef SQLITE_AMALGAMATION +# define LARGEST_INT64 (0xffffffff|(((sqlite3_int64)0x7fffffff)<<32)) +# define SMALLEST_INT64 (((sqlite3_int64)-1) - LARGEST_INT64) +#endif + +/* +** If the numeric type of argument pVal is "integer", then return it +** converted to a 64-bit signed integer. Otherwise, return a copy of +** the second parameter, iDefault. +*/ +static sqlite3_int64 fts3DocidRange(sqlite3_value *pVal, i64 iDefault){ + if( pVal ){ + int eType = sqlite3_value_numeric_type(pVal); + if( eType==SQLITE_INTEGER ){ + return sqlite3_value_int64(pVal); + } + } + return iDefault; +} + +/* +** This is the xFilter interface for the virtual table. See +** the virtual table xFilter method documentation for additional +** information. +** +** If idxNum==FTS3_FULLSCAN_SEARCH then do a full table scan against +** the %_content table. +** +** If idxNum==FTS3_DOCID_SEARCH then do a docid lookup for a single entry +** in the %_content table. +** +** If idxNum>=FTS3_FULLTEXT_SEARCH then use the full text index. The +** column on the left-hand side of the MATCH operator is column +** number idxNum-FTS3_FULLTEXT_SEARCH, 0 indexed. argv[0] is the right-hand +** side of the MATCH operator. +*/ +static int fts3FilterMethod( + sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ + int idxNum, /* Strategy index */ + const char *idxStr, /* Unused */ + int nVal, /* Number of elements in apVal */ + sqlite3_value **apVal /* Arguments for the indexing scheme */ +){ + int rc = SQLITE_OK; + char *zSql; /* SQL statement used to access %_content */ + int eSearch; + Fts3Table *p = (Fts3Table *)pCursor->pVtab; + Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; + + sqlite3_value *pCons = 0; /* The MATCH or rowid constraint, if any */ + sqlite3_value *pLangid = 0; /* The "langid = ?" constraint, if any */ + sqlite3_value *pDocidGe = 0; /* The "docid >= ?" constraint, if any */ + sqlite3_value *pDocidLe = 0; /* The "docid <= ?" constraint, if any */ + int iIdx; + + UNUSED_PARAMETER(idxStr); + UNUSED_PARAMETER(nVal); + + eSearch = (idxNum & 0x0000FFFF); + assert( eSearch>=0 && eSearch<=(FTS3_FULLTEXT_SEARCH+p->nColumn) ); + assert( p->pSegments==0 ); + + /* Collect arguments into local variables */ + iIdx = 0; + if( eSearch!=FTS3_FULLSCAN_SEARCH ) pCons = apVal[iIdx++]; + if( idxNum & FTS3_HAVE_LANGID ) pLangid = apVal[iIdx++]; + if( idxNum & FTS3_HAVE_DOCID_GE ) pDocidGe = apVal[iIdx++]; + if( idxNum & FTS3_HAVE_DOCID_LE ) pDocidLe = apVal[iIdx++]; + assert( iIdx==nVal ); + + /* In case the cursor has been used before, clear it now. */ + fts3CursorFinalizeStmt(pCsr); + sqlite3_free(pCsr->aDoclist); + sqlite3Fts3MIBufferFree(pCsr->pMIBuffer); + sqlite3Fts3ExprFree(pCsr->pExpr); + memset(&pCursor[1], 0, sizeof(Fts3Cursor)-sizeof(sqlite3_vtab_cursor)); + + /* Set the lower and upper bounds on docids to return */ + pCsr->iMinDocid = fts3DocidRange(pDocidGe, SMALLEST_INT64); + pCsr->iMaxDocid = fts3DocidRange(pDocidLe, LARGEST_INT64); + + if( idxStr ){ + pCsr->bDesc = (idxStr[0]=='D'); + }else{ + pCsr->bDesc = p->bDescIdx; + } + pCsr->eSearch = (i16)eSearch; + + if( eSearch!=FTS3_DOCID_SEARCH && eSearch!=FTS3_FULLSCAN_SEARCH ){ + int iCol = eSearch-FTS3_FULLTEXT_SEARCH; + const char *zQuery = (const char *)sqlite3_value_text(pCons); + + if( zQuery==0 && sqlite3_value_type(pCons)!=SQLITE_NULL ){ + return SQLITE_NOMEM; + } + + pCsr->iLangid = 0; + if( pLangid ) pCsr->iLangid = sqlite3_value_int(pLangid); + + assert( p->base.zErrMsg==0 ); + rc = sqlite3Fts3ExprParse(p->pTokenizer, pCsr->iLangid, + p->azColumn, p->bFts4, p->nColumn, iCol, zQuery, -1, &pCsr->pExpr, + &p->base.zErrMsg + ); + if( rc!=SQLITE_OK ){ + return rc; + } + + rc = fts3EvalStart(pCsr); + sqlite3Fts3SegmentsClose(p); + if( rc!=SQLITE_OK ) return rc; + pCsr->pNextId = pCsr->aDoclist; + pCsr->iPrevId = 0; + } + + /* Compile a SELECT statement for this cursor. For a full-table-scan, the + ** statement loops through all rows of the %_content table. For a + ** full-text query or docid lookup, the statement retrieves a single + ** row by docid. + */ + if( eSearch==FTS3_FULLSCAN_SEARCH ){ + if( pDocidGe || pDocidLe ){ + zSql = sqlite3_mprintf( + "SELECT %s WHERE rowid BETWEEN %lld AND %lld ORDER BY rowid %s", + p->zReadExprlist, pCsr->iMinDocid, pCsr->iMaxDocid, + (pCsr->bDesc ? "DESC" : "ASC") + ); + }else{ + zSql = sqlite3_mprintf("SELECT %s ORDER BY rowid %s", + p->zReadExprlist, (pCsr->bDesc ? "DESC" : "ASC") + ); + } + if( zSql ){ + rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0); + sqlite3_free(zSql); + }else{ + rc = SQLITE_NOMEM; + } + }else if( eSearch==FTS3_DOCID_SEARCH ){ + rc = fts3CursorSeekStmt(pCsr); + if( rc==SQLITE_OK ){ + rc = sqlite3_bind_value(pCsr->pStmt, 1, pCons); + } + } + if( rc!=SQLITE_OK ) return rc; + + return fts3NextMethod(pCursor); +} + +/* +** This is the xEof method of the virtual table. SQLite calls this +** routine to find out if it has reached the end of a result set. +*/ +static int fts3EofMethod(sqlite3_vtab_cursor *pCursor){ + return ((Fts3Cursor *)pCursor)->isEof; +} + +/* +** This is the xRowid method. The SQLite core calls this routine to +** retrieve the rowid for the current row of the result set. fts3 +** exposes %_content.docid as the rowid for the virtual table. The +** rowid should be written to *pRowid. +*/ +static int fts3RowidMethod(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ + Fts3Cursor *pCsr = (Fts3Cursor *) pCursor; + *pRowid = pCsr->iPrevId; + return SQLITE_OK; +} + +/* +** This is the xColumn method, called by SQLite to request a value from +** the row that the supplied cursor currently points to. +** +** If: +** +** (iCol < p->nColumn) -> The value of the iCol'th user column. +** (iCol == p->nColumn) -> Magic column with the same name as the table. +** (iCol == p->nColumn+1) -> Docid column +** (iCol == p->nColumn+2) -> Langid column +*/ +static int fts3ColumnMethod( + sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ + sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */ + int iCol /* Index of column to read value from */ +){ + int rc = SQLITE_OK; /* Return Code */ + Fts3Cursor *pCsr = (Fts3Cursor *) pCursor; + Fts3Table *p = (Fts3Table *)pCursor->pVtab; + + /* The column value supplied by SQLite must be in range. */ + assert( iCol>=0 && iCol<=p->nColumn+2 ); + + if( iCol==p->nColumn+1 ){ + /* This call is a request for the "docid" column. Since "docid" is an + ** alias for "rowid", use the xRowid() method to obtain the value. + */ + sqlite3_result_int64(pCtx, pCsr->iPrevId); + }else if( iCol==p->nColumn ){ + /* The extra column whose name is the same as the table. + ** Return a blob which is a pointer to the cursor. */ + sqlite3_result_blob(pCtx, &pCsr, sizeof(pCsr), SQLITE_TRANSIENT); + }else if( iCol==p->nColumn+2 && pCsr->pExpr ){ + sqlite3_result_int64(pCtx, pCsr->iLangid); + }else{ + /* The requested column is either a user column (one that contains + ** indexed data), or the language-id column. */ + rc = fts3CursorSeek(0, pCsr); + + if( rc==SQLITE_OK ){ + if( iCol==p->nColumn+2 ){ + int iLangid = 0; + if( p->zLanguageid ){ + iLangid = sqlite3_column_int(pCsr->pStmt, p->nColumn+1); + } + sqlite3_result_int(pCtx, iLangid); + }else if( sqlite3_data_count(pCsr->pStmt)>(iCol+1) ){ + sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1)); + } + } + } + + assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); + return rc; +} + +/* +** This function is the implementation of the xUpdate callback used by +** FTS3 virtual tables. It is invoked by SQLite each time a row is to be +** inserted, updated or deleted. +*/ +static int fts3UpdateMethod( + sqlite3_vtab *pVtab, /* Virtual table handle */ + int nArg, /* Size of argument array */ + sqlite3_value **apVal, /* Array of arguments */ + sqlite_int64 *pRowid /* OUT: The affected (or effected) rowid */ +){ + return sqlite3Fts3UpdateMethod(pVtab, nArg, apVal, pRowid); +} + +/* +** Implementation of xSync() method. Flush the contents of the pending-terms +** hash-table to the database. +*/ +static int fts3SyncMethod(sqlite3_vtab *pVtab){ + + /* Following an incremental-merge operation, assuming that the input + ** segments are not completely consumed (the usual case), they are updated + ** in place to remove the entries that have already been merged. This + ** involves updating the leaf block that contains the smallest unmerged + ** entry and each block (if any) between the leaf and the root node. So + ** if the height of the input segment b-trees is N, and input segments + ** are merged eight at a time, updating the input segments at the end + ** of an incremental-merge requires writing (8*(1+N)) blocks. N is usually + ** small - often between 0 and 2. So the overhead of the incremental + ** merge is somewhere between 8 and 24 blocks. To avoid this overhead + ** dwarfing the actual productive work accomplished, the incremental merge + ** is only attempted if it will write at least 64 leaf blocks. Hence + ** nMinMerge. + ** + ** Of course, updating the input segments also involves deleting a bunch + ** of blocks from the segments table. But this is not considered overhead + ** as it would also be required by a crisis-merge that used the same input + ** segments. + */ + const u32 nMinMerge = 64; /* Minimum amount of incr-merge work to do */ + + Fts3Table *p = (Fts3Table*)pVtab; + int rc = sqlite3Fts3PendingTermsFlush(p); + + if( rc==SQLITE_OK + && p->nLeafAdd>(nMinMerge/16) + && p->nAutoincrmerge && p->nAutoincrmerge!=0xff + ){ + int mxLevel = 0; /* Maximum relative level value in db */ + int A; /* Incr-merge parameter A */ + + rc = sqlite3Fts3MaxLevel(p, &mxLevel); + assert( rc==SQLITE_OK || mxLevel==0 ); + A = p->nLeafAdd * mxLevel; + A += (A/2); + if( A>(int)nMinMerge ) rc = sqlite3Fts3Incrmerge(p, A, p->nAutoincrmerge); + } + sqlite3Fts3SegmentsClose(p); + return rc; +} + +/* +** If it is currently unknown whether or not the FTS table has an %_stat +** table (if p->bHasStat==2), attempt to determine this (set p->bHasStat +** to 0 or 1). Return SQLITE_OK if successful, or an SQLite error code +** if an error occurs. +*/ +static int fts3SetHasStat(Fts3Table *p){ + int rc = SQLITE_OK; + if( p->bHasStat==2 ){ + const char *zFmt ="SELECT 1 FROM %Q.sqlite_master WHERE tbl_name='%q_stat'"; + char *zSql = sqlite3_mprintf(zFmt, p->zDb, p->zName); + if( zSql ){ + sqlite3_stmt *pStmt = 0; + rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0); + if( rc==SQLITE_OK ){ + int bHasStat = (sqlite3_step(pStmt)==SQLITE_ROW); + rc = sqlite3_finalize(pStmt); + if( rc==SQLITE_OK ) p->bHasStat = (u8)bHasStat; + } + sqlite3_free(zSql); + }else{ + rc = SQLITE_NOMEM; + } + } + return rc; +} + +/* +** Implementation of xBegin() method. +*/ +static int fts3BeginMethod(sqlite3_vtab *pVtab){ + Fts3Table *p = (Fts3Table*)pVtab; + UNUSED_PARAMETER(pVtab); + assert( p->pSegments==0 ); + assert( p->nPendingData==0 ); + assert( p->inTransaction!=1 ); + TESTONLY( p->inTransaction = 1 ); + TESTONLY( p->mxSavepoint = -1; ); + p->nLeafAdd = 0; + return fts3SetHasStat(p); +} + +/* +** Implementation of xCommit() method. This is a no-op. The contents of +** the pending-terms hash-table have already been flushed into the database +** by fts3SyncMethod(). +*/ +static int fts3CommitMethod(sqlite3_vtab *pVtab){ + TESTONLY( Fts3Table *p = (Fts3Table*)pVtab ); + UNUSED_PARAMETER(pVtab); + assert( p->nPendingData==0 ); + assert( p->inTransaction!=0 ); + assert( p->pSegments==0 ); + TESTONLY( p->inTransaction = 0 ); + TESTONLY( p->mxSavepoint = -1; ); + return SQLITE_OK; +} + +/* +** Implementation of xRollback(). Discard the contents of the pending-terms +** hash-table. Any changes made to the database are reverted by SQLite. +*/ +static int fts3RollbackMethod(sqlite3_vtab *pVtab){ + Fts3Table *p = (Fts3Table*)pVtab; + sqlite3Fts3PendingTermsClear(p); + assert( p->inTransaction!=0 ); + TESTONLY( p->inTransaction = 0 ); + TESTONLY( p->mxSavepoint = -1; ); + return SQLITE_OK; +} + +/* +** When called, *ppPoslist must point to the byte immediately following the +** end of a position-list. i.e. ( (*ppPoslist)[-1]==POS_END ). This function +** moves *ppPoslist so that it instead points to the first byte of the +** same position list. +*/ +static void fts3ReversePoslist(char *pStart, char **ppPoslist){ + char *p = &(*ppPoslist)[-2]; + char c = 0; + + /* Skip backwards passed any trailing 0x00 bytes added by NearTrim() */ + while( p>pStart && (c=*p--)==0 ); + + /* Search backwards for a varint with value zero (the end of the previous + ** poslist). This is an 0x00 byte preceded by some byte that does not + ** have the 0x80 bit set. */ + while( p>pStart && (*p & 0x80) | c ){ + c = *p--; + } + assert( p==pStart || c==0 ); + + /* At this point p points to that preceding byte without the 0x80 bit + ** set. So to find the start of the poslist, skip forward 2 bytes then + ** over a varint. + ** + ** Normally. The other case is that p==pStart and the poslist to return + ** is the first in the doclist. In this case do not skip forward 2 bytes. + ** The second part of the if condition (c==0 && *ppPoslist>&p[2]) + ** is required for cases where the first byte of a doclist and the + ** doclist is empty. For example, if the first docid is 10, a doclist + ** that begins with: + ** + ** 0x0A 0x00 + */ + if( p>pStart || (c==0 && *ppPoslist>&p[2]) ){ p = &p[2]; } + while( *p++&0x80 ); + *ppPoslist = p; +} + +/* +** Helper function used by the implementation of the overloaded snippet(), +** offsets() and optimize() SQL functions. +** +** If the value passed as the third argument is a blob of size +** sizeof(Fts3Cursor*), then the blob contents are copied to the +** output variable *ppCsr and SQLITE_OK is returned. Otherwise, an error +** message is written to context pContext and SQLITE_ERROR returned. The +** string passed via zFunc is used as part of the error message. +*/ +static int fts3FunctionArg( + sqlite3_context *pContext, /* SQL function call context */ + const char *zFunc, /* Function name */ + sqlite3_value *pVal, /* argv[0] passed to function */ + Fts3Cursor **ppCsr /* OUT: Store cursor handle here */ +){ + Fts3Cursor *pRet; + if( sqlite3_value_type(pVal)!=SQLITE_BLOB + || sqlite3_value_bytes(pVal)!=sizeof(Fts3Cursor *) + ){ + char *zErr = sqlite3_mprintf("illegal first argument to %s", zFunc); + sqlite3_result_error(pContext, zErr, -1); + sqlite3_free(zErr); + return SQLITE_ERROR; + } + memcpy(&pRet, sqlite3_value_blob(pVal), sizeof(Fts3Cursor *)); + *ppCsr = pRet; + return SQLITE_OK; +} + +/* +** Implementation of the snippet() function for FTS3 +*/ +static void fts3SnippetFunc( + sqlite3_context *pContext, /* SQLite function call context */ + int nVal, /* Size of apVal[] array */ + sqlite3_value **apVal /* Array of arguments */ +){ + Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ + const char *zStart = ""; + const char *zEnd = ""; + const char *zEllipsis = "..."; + int iCol = -1; + int nToken = 15; /* Default number of tokens in snippet */ + + /* There must be at least one argument passed to this function (otherwise + ** the non-overloaded version would have been called instead of this one). + */ + assert( nVal>=1 ); + + if( nVal>6 ){ + sqlite3_result_error(pContext, + "wrong number of arguments to function snippet()", -1); + return; + } + if( fts3FunctionArg(pContext, "snippet", apVal[0], &pCsr) ) return; + + switch( nVal ){ + case 6: nToken = sqlite3_value_int(apVal[5]); + case 5: iCol = sqlite3_value_int(apVal[4]); + case 4: zEllipsis = (const char*)sqlite3_value_text(apVal[3]); + case 3: zEnd = (const char*)sqlite3_value_text(apVal[2]); + case 2: zStart = (const char*)sqlite3_value_text(apVal[1]); + } + if( !zEllipsis || !zEnd || !zStart ){ + sqlite3_result_error_nomem(pContext); + }else if( nToken==0 ){ + sqlite3_result_text(pContext, "", -1, SQLITE_STATIC); + }else if( SQLITE_OK==fts3CursorSeek(pContext, pCsr) ){ + sqlite3Fts3Snippet(pContext, pCsr, zStart, zEnd, zEllipsis, iCol, nToken); + } +} + +/* +** Implementation of the offsets() function for FTS3 +*/ +static void fts3OffsetsFunc( + sqlite3_context *pContext, /* SQLite function call context */ + int nVal, /* Size of argument array */ + sqlite3_value **apVal /* Array of arguments */ +){ + Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ + + UNUSED_PARAMETER(nVal); + + assert( nVal==1 ); + if( fts3FunctionArg(pContext, "offsets", apVal[0], &pCsr) ) return; + assert( pCsr ); + if( SQLITE_OK==fts3CursorSeek(pContext, pCsr) ){ + sqlite3Fts3Offsets(pContext, pCsr); + } +} + +/* +** Implementation of the special optimize() function for FTS3. This +** function merges all segments in the database to a single segment. +** Example usage is: +** +** SELECT optimize(t) FROM t LIMIT 1; +** +** where 't' is the name of an FTS3 table. +*/ +static void fts3OptimizeFunc( + sqlite3_context *pContext, /* SQLite function call context */ + int nVal, /* Size of argument array */ + sqlite3_value **apVal /* Array of arguments */ +){ + int rc; /* Return code */ + Fts3Table *p; /* Virtual table handle */ + Fts3Cursor *pCursor; /* Cursor handle passed through apVal[0] */ + + UNUSED_PARAMETER(nVal); + + assert( nVal==1 ); + if( fts3FunctionArg(pContext, "optimize", apVal[0], &pCursor) ) return; + p = (Fts3Table *)pCursor->base.pVtab; + assert( p ); + + rc = sqlite3Fts3Optimize(p); + + switch( rc ){ + case SQLITE_OK: + sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC); + break; + case SQLITE_DONE: + sqlite3_result_text(pContext, "Index already optimal", -1, SQLITE_STATIC); + break; + default: + sqlite3_result_error_code(pContext, rc); + break; + } +} + +/* +** Implementation of the matchinfo() function for FTS3 +*/ +static void fts3MatchinfoFunc( + sqlite3_context *pContext, /* SQLite function call context */ + int nVal, /* Size of argument array */ + sqlite3_value **apVal /* Array of arguments */ +){ + Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ + assert( nVal==1 || nVal==2 ); + if( SQLITE_OK==fts3FunctionArg(pContext, "matchinfo", apVal[0], &pCsr) ){ + const char *zArg = 0; + if( nVal>1 ){ + zArg = (const char *)sqlite3_value_text(apVal[1]); + } + sqlite3Fts3Matchinfo(pContext, pCsr, zArg); + } +} + +/* +** This routine implements the xFindFunction method for the FTS3 +** virtual table. +*/ +static int fts3FindFunctionMethod( + sqlite3_vtab *pVtab, /* Virtual table handle */ + int nArg, /* Number of SQL function arguments */ + const char *zName, /* Name of SQL function */ + void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), /* OUT: Result */ + void **ppArg /* Unused */ +){ + struct Overloaded { + const char *zName; + void (*xFunc)(sqlite3_context*,int,sqlite3_value**); + } aOverload[] = { + { "snippet", fts3SnippetFunc }, + { "offsets", fts3OffsetsFunc }, + { "optimize", fts3OptimizeFunc }, + { "matchinfo", fts3MatchinfoFunc }, + }; + int i; /* Iterator variable */ + + UNUSED_PARAMETER(pVtab); + UNUSED_PARAMETER(nArg); + UNUSED_PARAMETER(ppArg); + + for(i=0; idb; /* Database connection */ + int rc; /* Return Code */ + + /* At this point it must be known if the %_stat table exists or not. + ** So bHasStat may not be 2. */ + rc = fts3SetHasStat(p); + + /* As it happens, the pending terms table is always empty here. This is + ** because an "ALTER TABLE RENAME TABLE" statement inside a transaction + ** always opens a savepoint transaction. And the xSavepoint() method + ** flushes the pending terms table. But leave the (no-op) call to + ** PendingTermsFlush() in in case that changes. + */ + assert( p->nPendingData==0 ); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts3PendingTermsFlush(p); + } + + if( p->zContentTbl==0 ){ + fts3DbExec(&rc, db, + "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';", + p->zDb, p->zName, zName + ); + } + + if( p->bHasDocsize ){ + fts3DbExec(&rc, db, + "ALTER TABLE %Q.'%q_docsize' RENAME TO '%q_docsize';", + p->zDb, p->zName, zName + ); + } + if( p->bHasStat ){ + fts3DbExec(&rc, db, + "ALTER TABLE %Q.'%q_stat' RENAME TO '%q_stat';", + p->zDb, p->zName, zName + ); + } + fts3DbExec(&rc, db, + "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';", + p->zDb, p->zName, zName + ); + fts3DbExec(&rc, db, + "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';", + p->zDb, p->zName, zName + ); + return rc; +} + +/* +** The xSavepoint() method. +** +** Flush the contents of the pending-terms table to disk. +*/ +static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ + int rc = SQLITE_OK; + UNUSED_PARAMETER(iSavepoint); + assert( ((Fts3Table *)pVtab)->inTransaction ); + assert( ((Fts3Table *)pVtab)->mxSavepoint < iSavepoint ); + TESTONLY( ((Fts3Table *)pVtab)->mxSavepoint = iSavepoint ); + if( ((Fts3Table *)pVtab)->bIgnoreSavepoint==0 ){ + rc = fts3SyncMethod(pVtab); + } + return rc; +} + +/* +** The xRelease() method. +** +** This is a no-op. +*/ +static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ + TESTONLY( Fts3Table *p = (Fts3Table*)pVtab ); + UNUSED_PARAMETER(iSavepoint); + UNUSED_PARAMETER(pVtab); + assert( p->inTransaction ); + assert( p->mxSavepoint >= iSavepoint ); + TESTONLY( p->mxSavepoint = iSavepoint-1 ); + return SQLITE_OK; +} + +/* +** The xRollbackTo() method. +** +** Discard the contents of the pending terms table. +*/ +static int fts3RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){ + Fts3Table *p = (Fts3Table*)pVtab; + UNUSED_PARAMETER(iSavepoint); + assert( p->inTransaction ); + assert( p->mxSavepoint >= iSavepoint ); + TESTONLY( p->mxSavepoint = iSavepoint ); + sqlite3Fts3PendingTermsClear(p); + return SQLITE_OK; +} + +static const sqlite3_module fts3Module = { + /* iVersion */ 2, + /* xCreate */ fts3CreateMethod, + /* xConnect */ fts3ConnectMethod, + /* xBestIndex */ fts3BestIndexMethod, + /* xDisconnect */ fts3DisconnectMethod, + /* xDestroy */ fts3DestroyMethod, + /* xOpen */ fts3OpenMethod, + /* xClose */ fts3CloseMethod, + /* xFilter */ fts3FilterMethod, + /* xNext */ fts3NextMethod, + /* xEof */ fts3EofMethod, + /* xColumn */ fts3ColumnMethod, + /* xRowid */ fts3RowidMethod, + /* xUpdate */ fts3UpdateMethod, + /* xBegin */ fts3BeginMethod, + /* xSync */ fts3SyncMethod, + /* xCommit */ fts3CommitMethod, + /* xRollback */ fts3RollbackMethod, + /* xFindFunction */ fts3FindFunctionMethod, + /* xRename */ fts3RenameMethod, + /* xSavepoint */ fts3SavepointMethod, + /* xRelease */ fts3ReleaseMethod, + /* xRollbackTo */ fts3RollbackToMethod, +}; + +/* +** This function is registered as the module destructor (called when an +** FTS3 enabled database connection is closed). It frees the memory +** allocated for the tokenizer hash table. +*/ +static void hashDestroy(void *p){ + Fts3Hash *pHash = (Fts3Hash *)p; + sqlite3Fts3HashClear(pHash); + sqlite3_free(pHash); +} + +/* +** The fts3 built-in tokenizers - "simple", "porter" and "icu"- are +** implemented in files fts3_tokenizer1.c, fts3_porter.c and fts3_icu.c +** respectively. The following three forward declarations are for functions +** declared in these files used to retrieve the respective implementations. +** +** Calling sqlite3Fts3SimpleTokenizerModule() sets the value pointed +** to by the argument to point to the "simple" tokenizer implementation. +** And so on. +*/ +SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); +SQLITE_PRIVATE void sqlite3Fts3PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); +#ifndef SQLITE_DISABLE_FTS3_UNICODE +SQLITE_PRIVATE void sqlite3Fts3UnicodeTokenizer(sqlite3_tokenizer_module const**ppModule); +#endif +#ifdef SQLITE_ENABLE_ICU +SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); +#endif + +/* +** Initialize the fts3 extension. If this extension is built as part +** of the sqlite library, then this function is called directly by +** SQLite. If fts3 is built as a dynamically loadable extension, this +** function is called by the sqlite3_extension_init() entry point. +*/ +SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){ + int rc = SQLITE_OK; + Fts3Hash *pHash = 0; + const sqlite3_tokenizer_module *pSimple = 0; + const sqlite3_tokenizer_module *pPorter = 0; +#ifndef SQLITE_DISABLE_FTS3_UNICODE + const sqlite3_tokenizer_module *pUnicode = 0; +#endif + +#ifdef SQLITE_ENABLE_ICU + const sqlite3_tokenizer_module *pIcu = 0; + sqlite3Fts3IcuTokenizerModule(&pIcu); +#endif + +#ifndef SQLITE_DISABLE_FTS3_UNICODE + sqlite3Fts3UnicodeTokenizer(&pUnicode); +#endif + +#ifdef SQLITE_TEST + rc = sqlite3Fts3InitTerm(db); + if( rc!=SQLITE_OK ) return rc; +#endif + + rc = sqlite3Fts3InitAux(db); + if( rc!=SQLITE_OK ) return rc; + + sqlite3Fts3SimpleTokenizerModule(&pSimple); + sqlite3Fts3PorterTokenizerModule(&pPorter); + + /* Allocate and initialize the hash-table used to store tokenizers. */ + pHash = sqlite3_malloc(sizeof(Fts3Hash)); + if( !pHash ){ + rc = SQLITE_NOMEM; + }else{ + sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); + } + + /* Load the built-in tokenizers into the hash table */ + if( rc==SQLITE_OK ){ + if( sqlite3Fts3HashInsert(pHash, "simple", 7, (void *)pSimple) + || sqlite3Fts3HashInsert(pHash, "porter", 7, (void *)pPorter) + +#ifndef SQLITE_DISABLE_FTS3_UNICODE + || sqlite3Fts3HashInsert(pHash, "unicode61", 10, (void *)pUnicode) +#endif +#ifdef SQLITE_ENABLE_ICU + || (pIcu && sqlite3Fts3HashInsert(pHash, "icu", 4, (void *)pIcu)) +#endif + ){ + rc = SQLITE_NOMEM; + } + } + +#ifdef SQLITE_TEST + if( rc==SQLITE_OK ){ + rc = sqlite3Fts3ExprInitTestInterface(db); + } +#endif + + /* Create the virtual table wrapper around the hash-table and overload + ** the two scalar functions. If this is successful, register the + ** module with sqlite. + */ + if( SQLITE_OK==rc + && SQLITE_OK==(rc = sqlite3Fts3InitHashTable(db, pHash, "fts3_tokenizer")) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", 1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 1)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 2)) + && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", 1)) + ){ + rc = sqlite3_create_module_v2( + db, "fts3", &fts3Module, (void *)pHash, hashDestroy + ); + if( rc==SQLITE_OK ){ + rc = sqlite3_create_module_v2( + db, "fts4", &fts3Module, (void *)pHash, 0 + ); + } + if( rc==SQLITE_OK ){ + rc = sqlite3Fts3InitTok(db, (void *)pHash); + } + return rc; + } + + + /* An error has occurred. Delete the hash table and return the error code. */ + assert( rc!=SQLITE_OK ); + if( pHash ){ + sqlite3Fts3HashClear(pHash); + sqlite3_free(pHash); + } + return rc; +} + +/* +** Allocate an Fts3MultiSegReader for each token in the expression headed +** by pExpr. +** +** An Fts3SegReader object is a cursor that can seek or scan a range of +** entries within a single segment b-tree. An Fts3MultiSegReader uses multiple +** Fts3SegReader objects internally to provide an interface to seek or scan +** within the union of all segments of a b-tree. Hence the name. +** +** If the allocated Fts3MultiSegReader just seeks to a single entry in a +** segment b-tree (if the term is not a prefix or it is a prefix for which +** there exists prefix b-tree of the right length) then it may be traversed +** and merged incrementally. Otherwise, it has to be merged into an in-memory +** doclist and then traversed. +*/ +static void fts3EvalAllocateReaders( + Fts3Cursor *pCsr, /* FTS cursor handle */ + Fts3Expr *pExpr, /* Allocate readers for this expression */ + int *pnToken, /* OUT: Total number of tokens in phrase. */ + int *pnOr, /* OUT: Total number of OR nodes in expr. */ + int *pRc /* IN/OUT: Error code */ +){ + if( pExpr && SQLITE_OK==*pRc ){ + if( pExpr->eType==FTSQUERY_PHRASE ){ + int i; + int nToken = pExpr->pPhrase->nToken; + *pnToken += nToken; + for(i=0; ipPhrase->aToken[i]; + int rc = fts3TermSegReaderCursor(pCsr, + pToken->z, pToken->n, pToken->isPrefix, &pToken->pSegcsr + ); + if( rc!=SQLITE_OK ){ + *pRc = rc; + return; + } + } + assert( pExpr->pPhrase->iDoclistToken==0 ); + pExpr->pPhrase->iDoclistToken = -1; + }else{ + *pnOr += (pExpr->eType==FTSQUERY_OR); + fts3EvalAllocateReaders(pCsr, pExpr->pLeft, pnToken, pnOr, pRc); + fts3EvalAllocateReaders(pCsr, pExpr->pRight, pnToken, pnOr, pRc); + } + } +} + +/* +** Arguments pList/nList contain the doclist for token iToken of phrase p. +** It is merged into the main doclist stored in p->doclist.aAll/nAll. +** +** This function assumes that pList points to a buffer allocated using +** sqlite3_malloc(). This function takes responsibility for eventually +** freeing the buffer. +** +** SQLITE_OK is returned if successful, or SQLITE_NOMEM if an error occurs. +*/ +static int fts3EvalPhraseMergeToken( + Fts3Table *pTab, /* FTS Table pointer */ + Fts3Phrase *p, /* Phrase to merge pList/nList into */ + int iToken, /* Token pList/nList corresponds to */ + char *pList, /* Pointer to doclist */ + int nList /* Number of bytes in pList */ +){ + int rc = SQLITE_OK; + assert( iToken!=p->iDoclistToken ); + + if( pList==0 ){ + sqlite3_free(p->doclist.aAll); + p->doclist.aAll = 0; + p->doclist.nAll = 0; + } + + else if( p->iDoclistToken<0 ){ + p->doclist.aAll = pList; + p->doclist.nAll = nList; + } + + else if( p->doclist.aAll==0 ){ + sqlite3_free(pList); + } + + else { + char *pLeft; + char *pRight; + int nLeft; + int nRight; + int nDiff; + + if( p->iDoclistTokendoclist.aAll; + nLeft = p->doclist.nAll; + pRight = pList; + nRight = nList; + nDiff = iToken - p->iDoclistToken; + }else{ + pRight = p->doclist.aAll; + nRight = p->doclist.nAll; + pLeft = pList; + nLeft = nList; + nDiff = p->iDoclistToken - iToken; + } + + rc = fts3DoclistPhraseMerge( + pTab->bDescIdx, nDiff, pLeft, nLeft, &pRight, &nRight + ); + sqlite3_free(pLeft); + p->doclist.aAll = pRight; + p->doclist.nAll = nRight; + } + + if( iToken>p->iDoclistToken ) p->iDoclistToken = iToken; + return rc; +} + +/* +** Load the doclist for phrase p into p->doclist.aAll/nAll. The loaded doclist +** does not take deferred tokens into account. +** +** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code. +*/ +static int fts3EvalPhraseLoad( + Fts3Cursor *pCsr, /* FTS Cursor handle */ + Fts3Phrase *p /* Phrase object */ +){ + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + int iToken; + int rc = SQLITE_OK; + + for(iToken=0; rc==SQLITE_OK && iTokennToken; iToken++){ + Fts3PhraseToken *pToken = &p->aToken[iToken]; + assert( pToken->pDeferred==0 || pToken->pSegcsr==0 ); + + if( pToken->pSegcsr ){ + int nThis = 0; + char *pThis = 0; + rc = fts3TermSelect(pTab, pToken, p->iColumn, &nThis, &pThis); + if( rc==SQLITE_OK ){ + rc = fts3EvalPhraseMergeToken(pTab, p, iToken, pThis, nThis); + } + } + assert( pToken->pSegcsr==0 ); + } + + return rc; +} + +/* +** This function is called on each phrase after the position lists for +** any deferred tokens have been loaded into memory. It updates the phrases +** current position list to include only those positions that are really +** instances of the phrase (after considering deferred tokens). If this +** means that the phrase does not appear in the current row, doclist.pList +** and doclist.nList are both zeroed. +** +** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code. +*/ +static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){ + int iToken; /* Used to iterate through phrase tokens */ + char *aPoslist = 0; /* Position list for deferred tokens */ + int nPoslist = 0; /* Number of bytes in aPoslist */ + int iPrev = -1; /* Token number of previous deferred token */ + + assert( pPhrase->doclist.bFreeList==0 ); + + for(iToken=0; iTokennToken; iToken++){ + Fts3PhraseToken *pToken = &pPhrase->aToken[iToken]; + Fts3DeferredToken *pDeferred = pToken->pDeferred; + + if( pDeferred ){ + char *pList; + int nList; + int rc = sqlite3Fts3DeferredTokenList(pDeferred, &pList, &nList); + if( rc!=SQLITE_OK ) return rc; + + if( pList==0 ){ + sqlite3_free(aPoslist); + pPhrase->doclist.pList = 0; + pPhrase->doclist.nList = 0; + return SQLITE_OK; + + }else if( aPoslist==0 ){ + aPoslist = pList; + nPoslist = nList; + + }else{ + char *aOut = pList; + char *p1 = aPoslist; + char *p2 = aOut; + + assert( iPrev>=0 ); + fts3PoslistPhraseMerge(&aOut, iToken-iPrev, 0, 1, &p1, &p2); + sqlite3_free(aPoslist); + aPoslist = pList; + nPoslist = (int)(aOut - aPoslist); + if( nPoslist==0 ){ + sqlite3_free(aPoslist); + pPhrase->doclist.pList = 0; + pPhrase->doclist.nList = 0; + return SQLITE_OK; + } + } + iPrev = iToken; + } + } + + if( iPrev>=0 ){ + int nMaxUndeferred = pPhrase->iDoclistToken; + if( nMaxUndeferred<0 ){ + pPhrase->doclist.pList = aPoslist; + pPhrase->doclist.nList = nPoslist; + pPhrase->doclist.iDocid = pCsr->iPrevId; + pPhrase->doclist.bFreeList = 1; + }else{ + int nDistance; + char *p1; + char *p2; + char *aOut; + + if( nMaxUndeferred>iPrev ){ + p1 = aPoslist; + p2 = pPhrase->doclist.pList; + nDistance = nMaxUndeferred - iPrev; + }else{ + p1 = pPhrase->doclist.pList; + p2 = aPoslist; + nDistance = iPrev - nMaxUndeferred; + } + + aOut = (char *)sqlite3_malloc(nPoslist+8); + if( !aOut ){ + sqlite3_free(aPoslist); + return SQLITE_NOMEM; + } + + pPhrase->doclist.pList = aOut; + if( fts3PoslistPhraseMerge(&aOut, nDistance, 0, 1, &p1, &p2) ){ + pPhrase->doclist.bFreeList = 1; + pPhrase->doclist.nList = (int)(aOut - pPhrase->doclist.pList); + }else{ + sqlite3_free(aOut); + pPhrase->doclist.pList = 0; + pPhrase->doclist.nList = 0; + } + sqlite3_free(aPoslist); + } + } + + return SQLITE_OK; +} + +/* +** Maximum number of tokens a phrase may have to be considered for the +** incremental doclists strategy. +*/ +#define MAX_INCR_PHRASE_TOKENS 4 + +/* +** This function is called for each Fts3Phrase in a full-text query +** expression to initialize the mechanism for returning rows. Once this +** function has been called successfully on an Fts3Phrase, it may be +** used with fts3EvalPhraseNext() to iterate through the matching docids. +** +** If parameter bOptOk is true, then the phrase may (or may not) use the +** incremental loading strategy. Otherwise, the entire doclist is loaded into +** memory within this call. +** +** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code. +*/ +static int fts3EvalPhraseStart(Fts3Cursor *pCsr, int bOptOk, Fts3Phrase *p){ + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + int rc = SQLITE_OK; /* Error code */ + int i; + + /* Determine if doclists may be loaded from disk incrementally. This is + ** possible if the bOptOk argument is true, the FTS doclists will be + ** scanned in forward order, and the phrase consists of + ** MAX_INCR_PHRASE_TOKENS or fewer tokens, none of which are are "^first" + ** tokens or prefix tokens that cannot use a prefix-index. */ + int bHaveIncr = 0; + int bIncrOk = (bOptOk + && pCsr->bDesc==pTab->bDescIdx + && p->nToken<=MAX_INCR_PHRASE_TOKENS && p->nToken>0 +#ifdef SQLITE_TEST + && pTab->bNoIncrDoclist==0 +#endif + ); + for(i=0; bIncrOk==1 && inToken; i++){ + Fts3PhraseToken *pToken = &p->aToken[i]; + if( pToken->bFirst || (pToken->pSegcsr!=0 && !pToken->pSegcsr->bLookup) ){ + bIncrOk = 0; + } + if( pToken->pSegcsr ) bHaveIncr = 1; + } + + if( bIncrOk && bHaveIncr ){ + /* Use the incremental approach. */ + int iCol = (p->iColumn >= pTab->nColumn ? -1 : p->iColumn); + for(i=0; rc==SQLITE_OK && inToken; i++){ + Fts3PhraseToken *pToken = &p->aToken[i]; + Fts3MultiSegReader *pSegcsr = pToken->pSegcsr; + if( pSegcsr ){ + rc = sqlite3Fts3MsrIncrStart(pTab, pSegcsr, iCol, pToken->z, pToken->n); + } + } + p->bIncr = 1; + }else{ + /* Load the full doclist for the phrase into memory. */ + rc = fts3EvalPhraseLoad(pCsr, p); + p->bIncr = 0; + } + + assert( rc!=SQLITE_OK || p->nToken<1 || p->aToken[0].pSegcsr==0 || p->bIncr ); + return rc; +} + +/* +** This function is used to iterate backwards (from the end to start) +** through doclists. It is used by this module to iterate through phrase +** doclists in reverse and by the fts3_write.c module to iterate through +** pending-terms lists when writing to databases with "order=desc". +** +** The doclist may be sorted in ascending (parameter bDescIdx==0) or +** descending (parameter bDescIdx==1) order of docid. Regardless, this +** function iterates from the end of the doclist to the beginning. +*/ +SQLITE_PRIVATE void sqlite3Fts3DoclistPrev( + int bDescIdx, /* True if the doclist is desc */ + char *aDoclist, /* Pointer to entire doclist */ + int nDoclist, /* Length of aDoclist in bytes */ + char **ppIter, /* IN/OUT: Iterator pointer */ + sqlite3_int64 *piDocid, /* IN/OUT: Docid pointer */ + int *pnList, /* OUT: List length pointer */ + u8 *pbEof /* OUT: End-of-file flag */ +){ + char *p = *ppIter; + + assert( nDoclist>0 ); + assert( *pbEof==0 ); + assert( p || *piDocid==0 ); + assert( !p || (p>aDoclist && p<&aDoclist[nDoclist]) ); + + if( p==0 ){ + sqlite3_int64 iDocid = 0; + char *pNext = 0; + char *pDocid = aDoclist; + char *pEnd = &aDoclist[nDoclist]; + int iMul = 1; + + while( pDocid0 ); + assert( *pbEof==0 ); + assert( p || *piDocid==0 ); + assert( !p || (p>=aDoclist && p<=&aDoclist[nDoclist]) ); + + if( p==0 ){ + p = aDoclist; + p += sqlite3Fts3GetVarint(p, piDocid); + }else{ + fts3PoslistCopy(0, &p); + while( p<&aDoclist[nDoclist] && *p==0 ) p++; + if( p>=&aDoclist[nDoclist] ){ + *pbEof = 1; + }else{ + sqlite3_int64 iVar; + p += sqlite3Fts3GetVarint(p, &iVar); + *piDocid += ((bDescIdx ? -1 : 1) * iVar); + } + } + + *ppIter = p; +} + +/* +** Advance the iterator pDL to the next entry in pDL->aAll/nAll. Set *pbEof +** to true if EOF is reached. +*/ +static void fts3EvalDlPhraseNext( + Fts3Table *pTab, + Fts3Doclist *pDL, + u8 *pbEof +){ + char *pIter; /* Used to iterate through aAll */ + char *pEnd = &pDL->aAll[pDL->nAll]; /* 1 byte past end of aAll */ + + if( pDL->pNextDocid ){ + pIter = pDL->pNextDocid; + }else{ + pIter = pDL->aAll; + } + + if( pIter>=pEnd ){ + /* We have already reached the end of this doclist. EOF. */ + *pbEof = 1; + }else{ + sqlite3_int64 iDelta; + pIter += sqlite3Fts3GetVarint(pIter, &iDelta); + if( pTab->bDescIdx==0 || pDL->pNextDocid==0 ){ + pDL->iDocid += iDelta; + }else{ + pDL->iDocid -= iDelta; + } + pDL->pList = pIter; + fts3PoslistCopy(0, &pIter); + pDL->nList = (int)(pIter - pDL->pList); + + /* pIter now points just past the 0x00 that terminates the position- + ** list for document pDL->iDocid. However, if this position-list was + ** edited in place by fts3EvalNearTrim(), then pIter may not actually + ** point to the start of the next docid value. The following line deals + ** with this case by advancing pIter past the zero-padding added by + ** fts3EvalNearTrim(). */ + while( pIterpNextDocid = pIter; + assert( pIter>=&pDL->aAll[pDL->nAll] || *pIter ); + *pbEof = 0; + } +} + +/* +** Helper type used by fts3EvalIncrPhraseNext() and incrPhraseTokenNext(). +*/ +typedef struct TokenDoclist TokenDoclist; +struct TokenDoclist { + int bIgnore; + sqlite3_int64 iDocid; + char *pList; + int nList; +}; + +/* +** Token pToken is an incrementally loaded token that is part of a +** multi-token phrase. Advance it to the next matching document in the +** database and populate output variable *p with the details of the new +** entry. Or, if the iterator has reached EOF, set *pbEof to true. +** +** If an error occurs, return an SQLite error code. Otherwise, return +** SQLITE_OK. +*/ +static int incrPhraseTokenNext( + Fts3Table *pTab, /* Virtual table handle */ + Fts3Phrase *pPhrase, /* Phrase to advance token of */ + int iToken, /* Specific token to advance */ + TokenDoclist *p, /* OUT: Docid and doclist for new entry */ + u8 *pbEof /* OUT: True if iterator is at EOF */ +){ + int rc = SQLITE_OK; + + if( pPhrase->iDoclistToken==iToken ){ + assert( p->bIgnore==0 ); + assert( pPhrase->aToken[iToken].pSegcsr==0 ); + fts3EvalDlPhraseNext(pTab, &pPhrase->doclist, pbEof); + p->pList = pPhrase->doclist.pList; + p->nList = pPhrase->doclist.nList; + p->iDocid = pPhrase->doclist.iDocid; + }else{ + Fts3PhraseToken *pToken = &pPhrase->aToken[iToken]; + assert( pToken->pDeferred==0 ); + assert( pToken->pSegcsr || pPhrase->iDoclistToken>=0 ); + if( pToken->pSegcsr ){ + assert( p->bIgnore==0 ); + rc = sqlite3Fts3MsrIncrNext( + pTab, pToken->pSegcsr, &p->iDocid, &p->pList, &p->nList + ); + if( p->pList==0 ) *pbEof = 1; + }else{ + p->bIgnore = 1; + } + } + + return rc; +} + + +/* +** The phrase iterator passed as the second argument: +** +** * features at least one token that uses an incremental doclist, and +** +** * does not contain any deferred tokens. +** +** Advance it to the next matching documnent in the database and populate +** the Fts3Doclist.pList and nList fields. +** +** If there is no "next" entry and no error occurs, then *pbEof is set to +** 1 before returning. Otherwise, if no error occurs and the iterator is +** successfully advanced, *pbEof is set to 0. +** +** If an error occurs, return an SQLite error code. Otherwise, return +** SQLITE_OK. +*/ +static int fts3EvalIncrPhraseNext( + Fts3Cursor *pCsr, /* FTS Cursor handle */ + Fts3Phrase *p, /* Phrase object to advance to next docid */ + u8 *pbEof /* OUT: Set to 1 if EOF */ +){ + int rc = SQLITE_OK; + Fts3Doclist *pDL = &p->doclist; + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + u8 bEof = 0; + + /* This is only called if it is guaranteed that the phrase has at least + ** one incremental token. In which case the bIncr flag is set. */ + assert( p->bIncr==1 ); + + if( p->nToken==1 && p->bIncr ){ + rc = sqlite3Fts3MsrIncrNext(pTab, p->aToken[0].pSegcsr, + &pDL->iDocid, &pDL->pList, &pDL->nList + ); + if( pDL->pList==0 ) bEof = 1; + }else{ + int bDescDoclist = pCsr->bDesc; + struct TokenDoclist a[MAX_INCR_PHRASE_TOKENS]; + + memset(a, 0, sizeof(a)); + assert( p->nToken<=MAX_INCR_PHRASE_TOKENS ); + assert( p->iDoclistTokennToken && bEof==0; i++){ + rc = incrPhraseTokenNext(pTab, p, i, &a[i], &bEof); + if( a[i].bIgnore==0 && (bMaxSet==0 || DOCID_CMP(iMax, a[i].iDocid)<0) ){ + iMax = a[i].iDocid; + bMaxSet = 1; + } + } + assert( rc!=SQLITE_OK || (p->nToken>=1 && a[p->nToken-1].bIgnore==0) ); + assert( rc!=SQLITE_OK || bMaxSet ); + + /* Keep advancing iterators until they all point to the same document */ + for(i=0; inToken; i++){ + while( rc==SQLITE_OK && bEof==0 + && a[i].bIgnore==0 && DOCID_CMP(a[i].iDocid, iMax)<0 + ){ + rc = incrPhraseTokenNext(pTab, p, i, &a[i], &bEof); + if( DOCID_CMP(a[i].iDocid, iMax)>0 ){ + iMax = a[i].iDocid; + i = 0; + } + } + } + + /* Check if the current entries really are a phrase match */ + if( bEof==0 ){ + int nList = 0; + int nByte = a[p->nToken-1].nList; + char *aDoclist = sqlite3_malloc(nByte+1); + if( !aDoclist ) return SQLITE_NOMEM; + memcpy(aDoclist, a[p->nToken-1].pList, nByte+1); + + for(i=0; i<(p->nToken-1); i++){ + if( a[i].bIgnore==0 ){ + char *pL = a[i].pList; + char *pR = aDoclist; + char *pOut = aDoclist; + int nDist = p->nToken-1-i; + int res = fts3PoslistPhraseMerge(&pOut, nDist, 0, 1, &pL, &pR); + if( res==0 ) break; + nList = (int)(pOut - aDoclist); + } + } + if( i==(p->nToken-1) ){ + pDL->iDocid = iMax; + pDL->pList = aDoclist; + pDL->nList = nList; + pDL->bFreeList = 1; + break; + } + sqlite3_free(aDoclist); + } + } + } + + *pbEof = bEof; + return rc; +} + +/* +** Attempt to move the phrase iterator to point to the next matching docid. +** If an error occurs, return an SQLite error code. Otherwise, return +** SQLITE_OK. +** +** If there is no "next" entry and no error occurs, then *pbEof is set to +** 1 before returning. Otherwise, if no error occurs and the iterator is +** successfully advanced, *pbEof is set to 0. +*/ +static int fts3EvalPhraseNext( + Fts3Cursor *pCsr, /* FTS Cursor handle */ + Fts3Phrase *p, /* Phrase object to advance to next docid */ + u8 *pbEof /* OUT: Set to 1 if EOF */ +){ + int rc = SQLITE_OK; + Fts3Doclist *pDL = &p->doclist; + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + + if( p->bIncr ){ + rc = fts3EvalIncrPhraseNext(pCsr, p, pbEof); + }else if( pCsr->bDesc!=pTab->bDescIdx && pDL->nAll ){ + sqlite3Fts3DoclistPrev(pTab->bDescIdx, pDL->aAll, pDL->nAll, + &pDL->pNextDocid, &pDL->iDocid, &pDL->nList, pbEof + ); + pDL->pList = pDL->pNextDocid; + }else{ + fts3EvalDlPhraseNext(pTab, pDL, pbEof); + } + + return rc; +} + +/* +** +** If *pRc is not SQLITE_OK when this function is called, it is a no-op. +** Otherwise, fts3EvalPhraseStart() is called on all phrases within the +** expression. Also the Fts3Expr.bDeferred variable is set to true for any +** expressions for which all descendent tokens are deferred. +** +** If parameter bOptOk is zero, then it is guaranteed that the +** Fts3Phrase.doclist.aAll/nAll variables contain the entire doclist for +** each phrase in the expression (subject to deferred token processing). +** Or, if bOptOk is non-zero, then one or more tokens within the expression +** may be loaded incrementally, meaning doclist.aAll/nAll is not available. +** +** If an error occurs within this function, *pRc is set to an SQLite error +** code before returning. +*/ +static void fts3EvalStartReaders( + Fts3Cursor *pCsr, /* FTS Cursor handle */ + Fts3Expr *pExpr, /* Expression to initialize phrases in */ + int *pRc /* IN/OUT: Error code */ +){ + if( pExpr && SQLITE_OK==*pRc ){ + if( pExpr->eType==FTSQUERY_PHRASE ){ + int nToken = pExpr->pPhrase->nToken; + if( nToken ){ + int i; + for(i=0; ipPhrase->aToken[i].pDeferred==0 ) break; + } + pExpr->bDeferred = (i==nToken); + } + *pRc = fts3EvalPhraseStart(pCsr, 1, pExpr->pPhrase); + }else{ + fts3EvalStartReaders(pCsr, pExpr->pLeft, pRc); + fts3EvalStartReaders(pCsr, pExpr->pRight, pRc); + pExpr->bDeferred = (pExpr->pLeft->bDeferred && pExpr->pRight->bDeferred); + } + } +} + +/* +** An array of the following structures is assembled as part of the process +** of selecting tokens to defer before the query starts executing (as part +** of the xFilter() method). There is one element in the array for each +** token in the FTS expression. +** +** Tokens are divided into AND/NEAR clusters. All tokens in a cluster belong +** to phrases that are connected only by AND and NEAR operators (not OR or +** NOT). When determining tokens to defer, each AND/NEAR cluster is considered +** separately. The root of a tokens AND/NEAR cluster is stored in +** Fts3TokenAndCost.pRoot. +*/ +typedef struct Fts3TokenAndCost Fts3TokenAndCost; +struct Fts3TokenAndCost { + Fts3Phrase *pPhrase; /* The phrase the token belongs to */ + int iToken; /* Position of token in phrase */ + Fts3PhraseToken *pToken; /* The token itself */ + Fts3Expr *pRoot; /* Root of NEAR/AND cluster */ + int nOvfl; /* Number of overflow pages to load doclist */ + int iCol; /* The column the token must match */ +}; + +/* +** This function is used to populate an allocated Fts3TokenAndCost array. +** +** If *pRc is not SQLITE_OK when this function is called, it is a no-op. +** Otherwise, if an error occurs during execution, *pRc is set to an +** SQLite error code. +*/ +static void fts3EvalTokenCosts( + Fts3Cursor *pCsr, /* FTS Cursor handle */ + Fts3Expr *pRoot, /* Root of current AND/NEAR cluster */ + Fts3Expr *pExpr, /* Expression to consider */ + Fts3TokenAndCost **ppTC, /* Write new entries to *(*ppTC)++ */ + Fts3Expr ***ppOr, /* Write new OR root to *(*ppOr)++ */ + int *pRc /* IN/OUT: Error code */ +){ + if( *pRc==SQLITE_OK ){ + if( pExpr->eType==FTSQUERY_PHRASE ){ + Fts3Phrase *pPhrase = pExpr->pPhrase; + int i; + for(i=0; *pRc==SQLITE_OK && inToken; i++){ + Fts3TokenAndCost *pTC = (*ppTC)++; + pTC->pPhrase = pPhrase; + pTC->iToken = i; + pTC->pRoot = pRoot; + pTC->pToken = &pPhrase->aToken[i]; + pTC->iCol = pPhrase->iColumn; + *pRc = sqlite3Fts3MsrOvfl(pCsr, pTC->pToken->pSegcsr, &pTC->nOvfl); + } + }else if( pExpr->eType!=FTSQUERY_NOT ){ + assert( pExpr->eType==FTSQUERY_OR + || pExpr->eType==FTSQUERY_AND + || pExpr->eType==FTSQUERY_NEAR + ); + assert( pExpr->pLeft && pExpr->pRight ); + if( pExpr->eType==FTSQUERY_OR ){ + pRoot = pExpr->pLeft; + **ppOr = pRoot; + (*ppOr)++; + } + fts3EvalTokenCosts(pCsr, pRoot, pExpr->pLeft, ppTC, ppOr, pRc); + if( pExpr->eType==FTSQUERY_OR ){ + pRoot = pExpr->pRight; + **ppOr = pRoot; + (*ppOr)++; + } + fts3EvalTokenCosts(pCsr, pRoot, pExpr->pRight, ppTC, ppOr, pRc); + } + } +} + +/* +** Determine the average document (row) size in pages. If successful, +** write this value to *pnPage and return SQLITE_OK. Otherwise, return +** an SQLite error code. +** +** The average document size in pages is calculated by first calculating +** determining the average size in bytes, B. If B is less than the amount +** of data that will fit on a single leaf page of an intkey table in +** this database, then the average docsize is 1. Otherwise, it is 1 plus +** the number of overflow pages consumed by a record B bytes in size. +*/ +static int fts3EvalAverageDocsize(Fts3Cursor *pCsr, int *pnPage){ + if( pCsr->nRowAvg==0 ){ + /* The average document size, which is required to calculate the cost + ** of each doclist, has not yet been determined. Read the required + ** data from the %_stat table to calculate it. + ** + ** Entry 0 of the %_stat table is a blob containing (nCol+1) FTS3 + ** varints, where nCol is the number of columns in the FTS3 table. + ** The first varint is the number of documents currently stored in + ** the table. The following nCol varints contain the total amount of + ** data stored in all rows of each column of the table, from left + ** to right. + */ + int rc; + Fts3Table *p = (Fts3Table*)pCsr->base.pVtab; + sqlite3_stmt *pStmt; + sqlite3_int64 nDoc = 0; + sqlite3_int64 nByte = 0; + const char *pEnd; + const char *a; + + rc = sqlite3Fts3SelectDoctotal(p, &pStmt); + if( rc!=SQLITE_OK ) return rc; + a = sqlite3_column_blob(pStmt, 0); + assert( a ); + + pEnd = &a[sqlite3_column_bytes(pStmt, 0)]; + a += sqlite3Fts3GetVarint(a, &nDoc); + while( anDoc = nDoc; + pCsr->nRowAvg = (int)(((nByte / nDoc) + p->nPgsz) / p->nPgsz); + assert( pCsr->nRowAvg>0 ); + rc = sqlite3_reset(pStmt); + if( rc!=SQLITE_OK ) return rc; + } + + *pnPage = pCsr->nRowAvg; + return SQLITE_OK; +} + +/* +** This function is called to select the tokens (if any) that will be +** deferred. The array aTC[] has already been populated when this is +** called. +** +** This function is called once for each AND/NEAR cluster in the +** expression. Each invocation determines which tokens to defer within +** the cluster with root node pRoot. See comments above the definition +** of struct Fts3TokenAndCost for more details. +** +** If no error occurs, SQLITE_OK is returned and sqlite3Fts3DeferToken() +** called on each token to defer. Otherwise, an SQLite error code is +** returned. +*/ +static int fts3EvalSelectDeferred( + Fts3Cursor *pCsr, /* FTS Cursor handle */ + Fts3Expr *pRoot, /* Consider tokens with this root node */ + Fts3TokenAndCost *aTC, /* Array of expression tokens and costs */ + int nTC /* Number of entries in aTC[] */ +){ + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + int nDocSize = 0; /* Number of pages per doc loaded */ + int rc = SQLITE_OK; /* Return code */ + int ii; /* Iterator variable for various purposes */ + int nOvfl = 0; /* Total overflow pages used by doclists */ + int nToken = 0; /* Total number of tokens in cluster */ + + int nMinEst = 0; /* The minimum count for any phrase so far. */ + int nLoad4 = 1; /* (Phrases that will be loaded)^4. */ + + /* Tokens are never deferred for FTS tables created using the content=xxx + ** option. The reason being that it is not guaranteed that the content + ** table actually contains the same data as the index. To prevent this from + ** causing any problems, the deferred token optimization is completely + ** disabled for content=xxx tables. */ + if( pTab->zContentTbl ){ + return SQLITE_OK; + } + + /* Count the tokens in this AND/NEAR cluster. If none of the doclists + ** associated with the tokens spill onto overflow pages, or if there is + ** only 1 token, exit early. No tokens to defer in this case. */ + for(ii=0; ii0 ); + + + /* Iterate through all tokens in this AND/NEAR cluster, in ascending order + ** of the number of overflow pages that will be loaded by the pager layer + ** to retrieve the entire doclist for the token from the full-text index. + ** Load the doclists for tokens that are either: + ** + ** a. The cheapest token in the entire query (i.e. the one visited by the + ** first iteration of this loop), or + ** + ** b. Part of a multi-token phrase. + ** + ** After each token doclist is loaded, merge it with the others from the + ** same phrase and count the number of documents that the merged doclist + ** contains. Set variable "nMinEst" to the smallest number of documents in + ** any phrase doclist for which 1 or more token doclists have been loaded. + ** Let nOther be the number of other phrases for which it is certain that + ** one or more tokens will not be deferred. + ** + ** Then, for each token, defer it if loading the doclist would result in + ** loading N or more overflow pages into memory, where N is computed as: + ** + ** (nMinEst + 4^nOther - 1) / (4^nOther) + */ + for(ii=0; iinOvfl) + ){ + pTC = &aTC[iTC]; + } + } + assert( pTC ); + + if( ii && pTC->nOvfl>=((nMinEst+(nLoad4/4)-1)/(nLoad4/4))*nDocSize ){ + /* The number of overflow pages to load for this (and therefore all + ** subsequent) tokens is greater than the estimated number of pages + ** that will be loaded if all subsequent tokens are deferred. + */ + Fts3PhraseToken *pToken = pTC->pToken; + rc = sqlite3Fts3DeferToken(pCsr, pToken, pTC->iCol); + fts3SegReaderCursorFree(pToken->pSegcsr); + pToken->pSegcsr = 0; + }else{ + /* Set nLoad4 to the value of (4^nOther) for the next iteration of the + ** for-loop. Except, limit the value to 2^24 to prevent it from + ** overflowing the 32-bit integer it is stored in. */ + if( ii<12 ) nLoad4 = nLoad4*4; + + if( ii==0 || (pTC->pPhrase->nToken>1 && ii!=nToken-1) ){ + /* Either this is the cheapest token in the entire query, or it is + ** part of a multi-token phrase. Either way, the entire doclist will + ** (eventually) be loaded into memory. It may as well be now. */ + Fts3PhraseToken *pToken = pTC->pToken; + int nList = 0; + char *pList = 0; + rc = fts3TermSelect(pTab, pToken, pTC->iCol, &nList, &pList); + assert( rc==SQLITE_OK || pList==0 ); + if( rc==SQLITE_OK ){ + rc = fts3EvalPhraseMergeToken( + pTab, pTC->pPhrase, pTC->iToken,pList,nList + ); + } + if( rc==SQLITE_OK ){ + int nCount; + nCount = fts3DoclistCountDocids( + pTC->pPhrase->doclist.aAll, pTC->pPhrase->doclist.nAll + ); + if( ii==0 || nCountpToken = 0; + } + + return rc; +} + +/* +** This function is called from within the xFilter method. It initializes +** the full-text query currently stored in pCsr->pExpr. To iterate through +** the results of a query, the caller does: +** +** fts3EvalStart(pCsr); +** while( 1 ){ +** fts3EvalNext(pCsr); +** if( pCsr->bEof ) break; +** ... return row pCsr->iPrevId to the caller ... +** } +*/ +static int fts3EvalStart(Fts3Cursor *pCsr){ + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + int rc = SQLITE_OK; + int nToken = 0; + int nOr = 0; + + /* Allocate a MultiSegReader for each token in the expression. */ + fts3EvalAllocateReaders(pCsr, pCsr->pExpr, &nToken, &nOr, &rc); + + /* Determine which, if any, tokens in the expression should be deferred. */ +#ifndef SQLITE_DISABLE_FTS4_DEFERRED + if( rc==SQLITE_OK && nToken>1 && pTab->bFts4 ){ + Fts3TokenAndCost *aTC; + Fts3Expr **apOr; + aTC = (Fts3TokenAndCost *)sqlite3_malloc( + sizeof(Fts3TokenAndCost) * nToken + + sizeof(Fts3Expr *) * nOr * 2 + ); + apOr = (Fts3Expr **)&aTC[nToken]; + + if( !aTC ){ + rc = SQLITE_NOMEM; + }else{ + int ii; + Fts3TokenAndCost *pTC = aTC; + Fts3Expr **ppOr = apOr; + + fts3EvalTokenCosts(pCsr, 0, pCsr->pExpr, &pTC, &ppOr, &rc); + nToken = (int)(pTC-aTC); + nOr = (int)(ppOr-apOr); + + if( rc==SQLITE_OK ){ + rc = fts3EvalSelectDeferred(pCsr, 0, aTC, nToken); + for(ii=0; rc==SQLITE_OK && iipExpr, &rc); + return rc; +} + +/* +** Invalidate the current position list for phrase pPhrase. +*/ +static void fts3EvalInvalidatePoslist(Fts3Phrase *pPhrase){ + if( pPhrase->doclist.bFreeList ){ + sqlite3_free(pPhrase->doclist.pList); + } + pPhrase->doclist.pList = 0; + pPhrase->doclist.nList = 0; + pPhrase->doclist.bFreeList = 0; +} + +/* +** This function is called to edit the position list associated with +** the phrase object passed as the fifth argument according to a NEAR +** condition. For example: +** +** abc NEAR/5 "def ghi" +** +** Parameter nNear is passed the NEAR distance of the expression (5 in +** the example above). When this function is called, *paPoslist points to +** the position list, and *pnToken is the number of phrase tokens in, the +** phrase on the other side of the NEAR operator to pPhrase. For example, +** if pPhrase refers to the "def ghi" phrase, then *paPoslist points to +** the position list associated with phrase "abc". +** +** All positions in the pPhrase position list that are not sufficiently +** close to a position in the *paPoslist position list are removed. If this +** leaves 0 positions, zero is returned. Otherwise, non-zero. +** +** Before returning, *paPoslist is set to point to the position lsit +** associated with pPhrase. And *pnToken is set to the number of tokens in +** pPhrase. +*/ +static int fts3EvalNearTrim( + int nNear, /* NEAR distance. As in "NEAR/nNear". */ + char *aTmp, /* Temporary space to use */ + char **paPoslist, /* IN/OUT: Position list */ + int *pnToken, /* IN/OUT: Tokens in phrase of *paPoslist */ + Fts3Phrase *pPhrase /* The phrase object to trim the doclist of */ +){ + int nParam1 = nNear + pPhrase->nToken; + int nParam2 = nNear + *pnToken; + int nNew; + char *p2; + char *pOut; + int res; + + assert( pPhrase->doclist.pList ); + + p2 = pOut = pPhrase->doclist.pList; + res = fts3PoslistNearMerge( + &pOut, aTmp, nParam1, nParam2, paPoslist, &p2 + ); + if( res ){ + nNew = (int)(pOut - pPhrase->doclist.pList) - 1; + assert( pPhrase->doclist.pList[nNew]=='\0' ); + assert( nNew<=pPhrase->doclist.nList && nNew>0 ); + memset(&pPhrase->doclist.pList[nNew], 0, pPhrase->doclist.nList - nNew); + pPhrase->doclist.nList = nNew; + *paPoslist = pPhrase->doclist.pList; + *pnToken = pPhrase->nToken; + } + + return res; +} + +/* +** This function is a no-op if *pRc is other than SQLITE_OK when it is called. +** Otherwise, it advances the expression passed as the second argument to +** point to the next matching row in the database. Expressions iterate through +** matching rows in docid order. Ascending order if Fts3Cursor.bDesc is zero, +** or descending if it is non-zero. +** +** If an error occurs, *pRc is set to an SQLite error code. Otherwise, if +** successful, the following variables in pExpr are set: +** +** Fts3Expr.bEof (non-zero if EOF - there is no next row) +** Fts3Expr.iDocid (valid if bEof==0. The docid of the next row) +** +** If the expression is of type FTSQUERY_PHRASE, and the expression is not +** at EOF, then the following variables are populated with the position list +** for the phrase for the visited row: +** +** FTs3Expr.pPhrase->doclist.nList (length of pList in bytes) +** FTs3Expr.pPhrase->doclist.pList (pointer to position list) +** +** It says above that this function advances the expression to the next +** matching row. This is usually true, but there are the following exceptions: +** +** 1. Deferred tokens are not taken into account. If a phrase consists +** entirely of deferred tokens, it is assumed to match every row in +** the db. In this case the position-list is not populated at all. +** +** Or, if a phrase contains one or more deferred tokens and one or +** more non-deferred tokens, then the expression is advanced to the +** next possible match, considering only non-deferred tokens. In other +** words, if the phrase is "A B C", and "B" is deferred, the expression +** is advanced to the next row that contains an instance of "A * C", +** where "*" may match any single token. The position list in this case +** is populated as for "A * C" before returning. +** +** 2. NEAR is treated as AND. If the expression is "x NEAR y", it is +** advanced to point to the next row that matches "x AND y". +** +** See sqlite3Fts3EvalTestDeferred() for details on testing if a row is +** really a match, taking into account deferred tokens and NEAR operators. +*/ +static void fts3EvalNextRow( + Fts3Cursor *pCsr, /* FTS Cursor handle */ + Fts3Expr *pExpr, /* Expr. to advance to next matching row */ + int *pRc /* IN/OUT: Error code */ +){ + if( *pRc==SQLITE_OK ){ + int bDescDoclist = pCsr->bDesc; /* Used by DOCID_CMP() macro */ + assert( pExpr->bEof==0 ); + pExpr->bStart = 1; + + switch( pExpr->eType ){ + case FTSQUERY_NEAR: + case FTSQUERY_AND: { + Fts3Expr *pLeft = pExpr->pLeft; + Fts3Expr *pRight = pExpr->pRight; + assert( !pLeft->bDeferred || !pRight->bDeferred ); + + if( pLeft->bDeferred ){ + /* LHS is entirely deferred. So we assume it matches every row. + ** Advance the RHS iterator to find the next row visited. */ + fts3EvalNextRow(pCsr, pRight, pRc); + pExpr->iDocid = pRight->iDocid; + pExpr->bEof = pRight->bEof; + }else if( pRight->bDeferred ){ + /* RHS is entirely deferred. So we assume it matches every row. + ** Advance the LHS iterator to find the next row visited. */ + fts3EvalNextRow(pCsr, pLeft, pRc); + pExpr->iDocid = pLeft->iDocid; + pExpr->bEof = pLeft->bEof; + }else{ + /* Neither the RHS or LHS are deferred. */ + fts3EvalNextRow(pCsr, pLeft, pRc); + fts3EvalNextRow(pCsr, pRight, pRc); + while( !pLeft->bEof && !pRight->bEof && *pRc==SQLITE_OK ){ + sqlite3_int64 iDiff = DOCID_CMP(pLeft->iDocid, pRight->iDocid); + if( iDiff==0 ) break; + if( iDiff<0 ){ + fts3EvalNextRow(pCsr, pLeft, pRc); + }else{ + fts3EvalNextRow(pCsr, pRight, pRc); + } + } + pExpr->iDocid = pLeft->iDocid; + pExpr->bEof = (pLeft->bEof || pRight->bEof); + if( pExpr->eType==FTSQUERY_NEAR && pExpr->bEof ){ + if( pRight->pPhrase && pRight->pPhrase->doclist.aAll ){ + Fts3Doclist *pDl = &pRight->pPhrase->doclist; + while( *pRc==SQLITE_OK && pRight->bEof==0 ){ + memset(pDl->pList, 0, pDl->nList); + fts3EvalNextRow(pCsr, pRight, pRc); + } + } + if( pLeft->pPhrase && pLeft->pPhrase->doclist.aAll ){ + Fts3Doclist *pDl = &pLeft->pPhrase->doclist; + while( *pRc==SQLITE_OK && pLeft->bEof==0 ){ + memset(pDl->pList, 0, pDl->nList); + fts3EvalNextRow(pCsr, pLeft, pRc); + } + } + } + } + break; + } + + case FTSQUERY_OR: { + Fts3Expr *pLeft = pExpr->pLeft; + Fts3Expr *pRight = pExpr->pRight; + sqlite3_int64 iCmp = DOCID_CMP(pLeft->iDocid, pRight->iDocid); + + assert( pLeft->bStart || pLeft->iDocid==pRight->iDocid ); + assert( pRight->bStart || pLeft->iDocid==pRight->iDocid ); + + if( pRight->bEof || (pLeft->bEof==0 && iCmp<0) ){ + fts3EvalNextRow(pCsr, pLeft, pRc); + }else if( pLeft->bEof || (pRight->bEof==0 && iCmp>0) ){ + fts3EvalNextRow(pCsr, pRight, pRc); + }else{ + fts3EvalNextRow(pCsr, pLeft, pRc); + fts3EvalNextRow(pCsr, pRight, pRc); + } + + pExpr->bEof = (pLeft->bEof && pRight->bEof); + iCmp = DOCID_CMP(pLeft->iDocid, pRight->iDocid); + if( pRight->bEof || (pLeft->bEof==0 && iCmp<0) ){ + pExpr->iDocid = pLeft->iDocid; + }else{ + pExpr->iDocid = pRight->iDocid; + } + + break; + } + + case FTSQUERY_NOT: { + Fts3Expr *pLeft = pExpr->pLeft; + Fts3Expr *pRight = pExpr->pRight; + + if( pRight->bStart==0 ){ + fts3EvalNextRow(pCsr, pRight, pRc); + assert( *pRc!=SQLITE_OK || pRight->bStart ); + } + + fts3EvalNextRow(pCsr, pLeft, pRc); + if( pLeft->bEof==0 ){ + while( !*pRc + && !pRight->bEof + && DOCID_CMP(pLeft->iDocid, pRight->iDocid)>0 + ){ + fts3EvalNextRow(pCsr, pRight, pRc); + } + } + pExpr->iDocid = pLeft->iDocid; + pExpr->bEof = pLeft->bEof; + break; + } + + default: { + Fts3Phrase *pPhrase = pExpr->pPhrase; + fts3EvalInvalidatePoslist(pPhrase); + *pRc = fts3EvalPhraseNext(pCsr, pPhrase, &pExpr->bEof); + pExpr->iDocid = pPhrase->doclist.iDocid; + break; + } + } + } +} + +/* +** If *pRc is not SQLITE_OK, or if pExpr is not the root node of a NEAR +** cluster, then this function returns 1 immediately. +** +** Otherwise, it checks if the current row really does match the NEAR +** expression, using the data currently stored in the position lists +** (Fts3Expr->pPhrase.doclist.pList/nList) for each phrase in the expression. +** +** If the current row is a match, the position list associated with each +** phrase in the NEAR expression is edited in place to contain only those +** phrase instances sufficiently close to their peers to satisfy all NEAR +** constraints. In this case it returns 1. If the NEAR expression does not +** match the current row, 0 is returned. The position lists may or may not +** be edited if 0 is returned. +*/ +static int fts3EvalNearTest(Fts3Expr *pExpr, int *pRc){ + int res = 1; + + /* The following block runs if pExpr is the root of a NEAR query. + ** For example, the query: + ** + ** "w" NEAR "x" NEAR "y" NEAR "z" + ** + ** which is represented in tree form as: + ** + ** | + ** +--NEAR--+ <-- root of NEAR query + ** | | + ** +--NEAR--+ "z" + ** | | + ** +--NEAR--+ "y" + ** | | + ** "w" "x" + ** + ** The right-hand child of a NEAR node is always a phrase. The + ** left-hand child may be either a phrase or a NEAR node. There are + ** no exceptions to this - it's the way the parser in fts3_expr.c works. + */ + if( *pRc==SQLITE_OK + && pExpr->eType==FTSQUERY_NEAR + && pExpr->bEof==0 + && (pExpr->pParent==0 || pExpr->pParent->eType!=FTSQUERY_NEAR) + ){ + Fts3Expr *p; + int nTmp = 0; /* Bytes of temp space */ + char *aTmp; /* Temp space for PoslistNearMerge() */ + + /* Allocate temporary working space. */ + for(p=pExpr; p->pLeft; p=p->pLeft){ + nTmp += p->pRight->pPhrase->doclist.nList; + } + nTmp += p->pPhrase->doclist.nList; + if( nTmp==0 ){ + res = 0; + }else{ + aTmp = sqlite3_malloc(nTmp*2); + if( !aTmp ){ + *pRc = SQLITE_NOMEM; + res = 0; + }else{ + char *aPoslist = p->pPhrase->doclist.pList; + int nToken = p->pPhrase->nToken; + + for(p=p->pParent;res && p && p->eType==FTSQUERY_NEAR; p=p->pParent){ + Fts3Phrase *pPhrase = p->pRight->pPhrase; + int nNear = p->nNear; + res = fts3EvalNearTrim(nNear, aTmp, &aPoslist, &nToken, pPhrase); + } + + aPoslist = pExpr->pRight->pPhrase->doclist.pList; + nToken = pExpr->pRight->pPhrase->nToken; + for(p=pExpr->pLeft; p && res; p=p->pLeft){ + int nNear; + Fts3Phrase *pPhrase; + assert( p->pParent && p->pParent->pLeft==p ); + nNear = p->pParent->nNear; + pPhrase = ( + p->eType==FTSQUERY_NEAR ? p->pRight->pPhrase : p->pPhrase + ); + res = fts3EvalNearTrim(nNear, aTmp, &aPoslist, &nToken, pPhrase); + } + } + + sqlite3_free(aTmp); + } + } + + return res; +} + +/* +** This function is a helper function for sqlite3Fts3EvalTestDeferred(). +** Assuming no error occurs or has occurred, It returns non-zero if the +** expression passed as the second argument matches the row that pCsr +** currently points to, or zero if it does not. +** +** If *pRc is not SQLITE_OK when this function is called, it is a no-op. +** If an error occurs during execution of this function, *pRc is set to +** the appropriate SQLite error code. In this case the returned value is +** undefined. +*/ +static int fts3EvalTestExpr( + Fts3Cursor *pCsr, /* FTS cursor handle */ + Fts3Expr *pExpr, /* Expr to test. May or may not be root. */ + int *pRc /* IN/OUT: Error code */ +){ + int bHit = 1; /* Return value */ + if( *pRc==SQLITE_OK ){ + switch( pExpr->eType ){ + case FTSQUERY_NEAR: + case FTSQUERY_AND: + bHit = ( + fts3EvalTestExpr(pCsr, pExpr->pLeft, pRc) + && fts3EvalTestExpr(pCsr, pExpr->pRight, pRc) + && fts3EvalNearTest(pExpr, pRc) + ); + + /* If the NEAR expression does not match any rows, zero the doclist for + ** all phrases involved in the NEAR. This is because the snippet(), + ** offsets() and matchinfo() functions are not supposed to recognize + ** any instances of phrases that are part of unmatched NEAR queries. + ** For example if this expression: + ** + ** ... MATCH 'a OR (b NEAR c)' + ** + ** is matched against a row containing: + ** + ** 'a b d e' + ** + ** then any snippet() should ony highlight the "a" term, not the "b" + ** (as "b" is part of a non-matching NEAR clause). + */ + if( bHit==0 + && pExpr->eType==FTSQUERY_NEAR + && (pExpr->pParent==0 || pExpr->pParent->eType!=FTSQUERY_NEAR) + ){ + Fts3Expr *p; + for(p=pExpr; p->pPhrase==0; p=p->pLeft){ + if( p->pRight->iDocid==pCsr->iPrevId ){ + fts3EvalInvalidatePoslist(p->pRight->pPhrase); + } + } + if( p->iDocid==pCsr->iPrevId ){ + fts3EvalInvalidatePoslist(p->pPhrase); + } + } + + break; + + case FTSQUERY_OR: { + int bHit1 = fts3EvalTestExpr(pCsr, pExpr->pLeft, pRc); + int bHit2 = fts3EvalTestExpr(pCsr, pExpr->pRight, pRc); + bHit = bHit1 || bHit2; + break; + } + + case FTSQUERY_NOT: + bHit = ( + fts3EvalTestExpr(pCsr, pExpr->pLeft, pRc) + && !fts3EvalTestExpr(pCsr, pExpr->pRight, pRc) + ); + break; + + default: { +#ifndef SQLITE_DISABLE_FTS4_DEFERRED + if( pCsr->pDeferred + && (pExpr->iDocid==pCsr->iPrevId || pExpr->bDeferred) + ){ + Fts3Phrase *pPhrase = pExpr->pPhrase; + assert( pExpr->bDeferred || pPhrase->doclist.bFreeList==0 ); + if( pExpr->bDeferred ){ + fts3EvalInvalidatePoslist(pPhrase); + } + *pRc = fts3EvalDeferredPhrase(pCsr, pPhrase); + bHit = (pPhrase->doclist.pList!=0); + pExpr->iDocid = pCsr->iPrevId; + }else +#endif + { + bHit = (pExpr->bEof==0 && pExpr->iDocid==pCsr->iPrevId); + } + break; + } + } + } + return bHit; +} + +/* +** This function is called as the second part of each xNext operation when +** iterating through the results of a full-text query. At this point the +** cursor points to a row that matches the query expression, with the +** following caveats: +** +** * Up until this point, "NEAR" operators in the expression have been +** treated as "AND". +** +** * Deferred tokens have not yet been considered. +** +** If *pRc is not SQLITE_OK when this function is called, it immediately +** returns 0. Otherwise, it tests whether or not after considering NEAR +** operators and deferred tokens the current row is still a match for the +** expression. It returns 1 if both of the following are true: +** +** 1. *pRc is SQLITE_OK when this function returns, and +** +** 2. After scanning the current FTS table row for the deferred tokens, +** it is determined that the row does *not* match the query. +** +** Or, if no error occurs and it seems the current row does match the FTS +** query, return 0. +*/ +SQLITE_PRIVATE int sqlite3Fts3EvalTestDeferred(Fts3Cursor *pCsr, int *pRc){ + int rc = *pRc; + int bMiss = 0; + if( rc==SQLITE_OK ){ + + /* If there are one or more deferred tokens, load the current row into + ** memory and scan it to determine the position list for each deferred + ** token. Then, see if this row is really a match, considering deferred + ** tokens and NEAR operators (neither of which were taken into account + ** earlier, by fts3EvalNextRow()). + */ + if( pCsr->pDeferred ){ + rc = fts3CursorSeek(0, pCsr); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts3CacheDeferredDoclists(pCsr); + } + } + bMiss = (0==fts3EvalTestExpr(pCsr, pCsr->pExpr, &rc)); + + /* Free the position-lists accumulated for each deferred token above. */ + sqlite3Fts3FreeDeferredDoclists(pCsr); + *pRc = rc; + } + return (rc==SQLITE_OK && bMiss); +} + +/* +** Advance to the next document that matches the FTS expression in +** Fts3Cursor.pExpr. +*/ +static int fts3EvalNext(Fts3Cursor *pCsr){ + int rc = SQLITE_OK; /* Return Code */ + Fts3Expr *pExpr = pCsr->pExpr; + assert( pCsr->isEof==0 ); + if( pExpr==0 ){ + pCsr->isEof = 1; + }else{ + do { + if( pCsr->isRequireSeek==0 ){ + sqlite3_reset(pCsr->pStmt); + } + assert( sqlite3_data_count(pCsr->pStmt)==0 ); + fts3EvalNextRow(pCsr, pExpr, &rc); + pCsr->isEof = pExpr->bEof; + pCsr->isRequireSeek = 1; + pCsr->isMatchinfoNeeded = 1; + pCsr->iPrevId = pExpr->iDocid; + }while( pCsr->isEof==0 && sqlite3Fts3EvalTestDeferred(pCsr, &rc) ); + } + + /* Check if the cursor is past the end of the docid range specified + ** by Fts3Cursor.iMinDocid/iMaxDocid. If so, set the EOF flag. */ + if( rc==SQLITE_OK && ( + (pCsr->bDesc==0 && pCsr->iPrevId>pCsr->iMaxDocid) + || (pCsr->bDesc!=0 && pCsr->iPrevIdiMinDocid) + )){ + pCsr->isEof = 1; + } + + return rc; +} + +/* +** Restart interation for expression pExpr so that the next call to +** fts3EvalNext() visits the first row. Do not allow incremental +** loading or merging of phrase doclists for this iteration. +** +** If *pRc is other than SQLITE_OK when this function is called, it is +** a no-op. If an error occurs within this function, *pRc is set to an +** SQLite error code before returning. +*/ +static void fts3EvalRestart( + Fts3Cursor *pCsr, + Fts3Expr *pExpr, + int *pRc +){ + if( pExpr && *pRc==SQLITE_OK ){ + Fts3Phrase *pPhrase = pExpr->pPhrase; + + if( pPhrase ){ + fts3EvalInvalidatePoslist(pPhrase); + if( pPhrase->bIncr ){ + int i; + for(i=0; inToken; i++){ + Fts3PhraseToken *pToken = &pPhrase->aToken[i]; + assert( pToken->pDeferred==0 ); + if( pToken->pSegcsr ){ + sqlite3Fts3MsrIncrRestart(pToken->pSegcsr); + } + } + *pRc = fts3EvalPhraseStart(pCsr, 0, pPhrase); + } + pPhrase->doclist.pNextDocid = 0; + pPhrase->doclist.iDocid = 0; + pPhrase->pOrPoslist = 0; + } + + pExpr->iDocid = 0; + pExpr->bEof = 0; + pExpr->bStart = 0; + + fts3EvalRestart(pCsr, pExpr->pLeft, pRc); + fts3EvalRestart(pCsr, pExpr->pRight, pRc); + } +} + +/* +** After allocating the Fts3Expr.aMI[] array for each phrase in the +** expression rooted at pExpr, the cursor iterates through all rows matched +** by pExpr, calling this function for each row. This function increments +** the values in Fts3Expr.aMI[] according to the position-list currently +** found in Fts3Expr.pPhrase->doclist.pList for each of the phrase +** expression nodes. +*/ +static void fts3EvalUpdateCounts(Fts3Expr *pExpr){ + if( pExpr ){ + Fts3Phrase *pPhrase = pExpr->pPhrase; + if( pPhrase && pPhrase->doclist.pList ){ + int iCol = 0; + char *p = pPhrase->doclist.pList; + + assert( *p ); + while( 1 ){ + u8 c = 0; + int iCnt = 0; + while( 0xFE & (*p | c) ){ + if( (c&0x80)==0 ) iCnt++; + c = *p++ & 0x80; + } + + /* aMI[iCol*3 + 1] = Number of occurrences + ** aMI[iCol*3 + 2] = Number of rows containing at least one instance + */ + pExpr->aMI[iCol*3 + 1] += iCnt; + pExpr->aMI[iCol*3 + 2] += (iCnt>0); + if( *p==0x00 ) break; + p++; + p += fts3GetVarint32(p, &iCol); + } + } + + fts3EvalUpdateCounts(pExpr->pLeft); + fts3EvalUpdateCounts(pExpr->pRight); + } +} + +/* +** Expression pExpr must be of type FTSQUERY_PHRASE. +** +** If it is not already allocated and populated, this function allocates and +** populates the Fts3Expr.aMI[] array for expression pExpr. If pExpr is part +** of a NEAR expression, then it also allocates and populates the same array +** for all other phrases that are part of the NEAR expression. +** +** SQLITE_OK is returned if the aMI[] array is successfully allocated and +** populated. Otherwise, if an error occurs, an SQLite error code is returned. +*/ +static int fts3EvalGatherStats( + Fts3Cursor *pCsr, /* Cursor object */ + Fts3Expr *pExpr /* FTSQUERY_PHRASE expression */ +){ + int rc = SQLITE_OK; /* Return code */ + + assert( pExpr->eType==FTSQUERY_PHRASE ); + if( pExpr->aMI==0 ){ + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + Fts3Expr *pRoot; /* Root of NEAR expression */ + Fts3Expr *p; /* Iterator used for several purposes */ + + sqlite3_int64 iPrevId = pCsr->iPrevId; + sqlite3_int64 iDocid; + u8 bEof; + + /* Find the root of the NEAR expression */ + pRoot = pExpr; + while( pRoot->pParent && pRoot->pParent->eType==FTSQUERY_NEAR ){ + pRoot = pRoot->pParent; + } + iDocid = pRoot->iDocid; + bEof = pRoot->bEof; + assert( pRoot->bStart ); + + /* Allocate space for the aMSI[] array of each FTSQUERY_PHRASE node */ + for(p=pRoot; p; p=p->pLeft){ + Fts3Expr *pE = (p->eType==FTSQUERY_PHRASE?p:p->pRight); + assert( pE->aMI==0 ); + pE->aMI = (u32 *)sqlite3_malloc(pTab->nColumn * 3 * sizeof(u32)); + if( !pE->aMI ) return SQLITE_NOMEM; + memset(pE->aMI, 0, pTab->nColumn * 3 * sizeof(u32)); + } + + fts3EvalRestart(pCsr, pRoot, &rc); + + while( pCsr->isEof==0 && rc==SQLITE_OK ){ + + do { + /* Ensure the %_content statement is reset. */ + if( pCsr->isRequireSeek==0 ) sqlite3_reset(pCsr->pStmt); + assert( sqlite3_data_count(pCsr->pStmt)==0 ); + + /* Advance to the next document */ + fts3EvalNextRow(pCsr, pRoot, &rc); + pCsr->isEof = pRoot->bEof; + pCsr->isRequireSeek = 1; + pCsr->isMatchinfoNeeded = 1; + pCsr->iPrevId = pRoot->iDocid; + }while( pCsr->isEof==0 + && pRoot->eType==FTSQUERY_NEAR + && sqlite3Fts3EvalTestDeferred(pCsr, &rc) + ); + + if( rc==SQLITE_OK && pCsr->isEof==0 ){ + fts3EvalUpdateCounts(pRoot); + } + } + + pCsr->isEof = 0; + pCsr->iPrevId = iPrevId; + + if( bEof ){ + pRoot->bEof = bEof; + }else{ + /* Caution: pRoot may iterate through docids in ascending or descending + ** order. For this reason, even though it seems more defensive, the + ** do loop can not be written: + ** + ** do {...} while( pRoot->iDocidbEof==0 ); + }while( pRoot->iDocid!=iDocid && rc==SQLITE_OK ); + } + } + return rc; +} + +/* +** This function is used by the matchinfo() module to query a phrase +** expression node for the following information: +** +** 1. The total number of occurrences of the phrase in each column of +** the FTS table (considering all rows), and +** +** 2. For each column, the number of rows in the table for which the +** column contains at least one instance of the phrase. +** +** If no error occurs, SQLITE_OK is returned and the values for each column +** written into the array aiOut as follows: +** +** aiOut[iCol*3 + 1] = Number of occurrences +** aiOut[iCol*3 + 2] = Number of rows containing at least one instance +** +** Caveats: +** +** * If a phrase consists entirely of deferred tokens, then all output +** values are set to the number of documents in the table. In other +** words we assume that very common tokens occur exactly once in each +** column of each row of the table. +** +** * If a phrase contains some deferred tokens (and some non-deferred +** tokens), count the potential occurrence identified by considering +** the non-deferred tokens instead of actual phrase occurrences. +** +** * If the phrase is part of a NEAR expression, then only phrase instances +** that meet the NEAR constraint are included in the counts. +*/ +SQLITE_PRIVATE int sqlite3Fts3EvalPhraseStats( + Fts3Cursor *pCsr, /* FTS cursor handle */ + Fts3Expr *pExpr, /* Phrase expression */ + u32 *aiOut /* Array to write results into (see above) */ +){ + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + int rc = SQLITE_OK; + int iCol; + + if( pExpr->bDeferred && pExpr->pParent->eType!=FTSQUERY_NEAR ){ + assert( pCsr->nDoc>0 ); + for(iCol=0; iColnColumn; iCol++){ + aiOut[iCol*3 + 1] = (u32)pCsr->nDoc; + aiOut[iCol*3 + 2] = (u32)pCsr->nDoc; + } + }else{ + rc = fts3EvalGatherStats(pCsr, pExpr); + if( rc==SQLITE_OK ){ + assert( pExpr->aMI ); + for(iCol=0; iColnColumn; iCol++){ + aiOut[iCol*3 + 1] = pExpr->aMI[iCol*3 + 1]; + aiOut[iCol*3 + 2] = pExpr->aMI[iCol*3 + 2]; + } + } + } + + return rc; +} + +/* +** The expression pExpr passed as the second argument to this function +** must be of type FTSQUERY_PHRASE. +** +** The returned value is either NULL or a pointer to a buffer containing +** a position-list indicating the occurrences of the phrase in column iCol +** of the current row. +** +** More specifically, the returned buffer contains 1 varint for each +** occurrence of the phrase in the column, stored using the normal (delta+2) +** compression and is terminated by either an 0x01 or 0x00 byte. For example, +** if the requested column contains "a b X c d X X" and the position-list +** for 'X' is requested, the buffer returned may contain: +** +** 0x04 0x05 0x03 0x01 or 0x04 0x05 0x03 0x00 +** +** This function works regardless of whether or not the phrase is deferred, +** incremental, or neither. +*/ +SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist( + Fts3Cursor *pCsr, /* FTS3 cursor object */ + Fts3Expr *pExpr, /* Phrase to return doclist for */ + int iCol, /* Column to return position list for */ + char **ppOut /* OUT: Pointer to position list */ +){ + Fts3Phrase *pPhrase = pExpr->pPhrase; + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + char *pIter; + int iThis; + sqlite3_int64 iDocid; + + /* If this phrase is applies specifically to some column other than + ** column iCol, return a NULL pointer. */ + *ppOut = 0; + assert( iCol>=0 && iColnColumn ); + if( (pPhrase->iColumnnColumn && pPhrase->iColumn!=iCol) ){ + return SQLITE_OK; + } + + iDocid = pExpr->iDocid; + pIter = pPhrase->doclist.pList; + if( iDocid!=pCsr->iPrevId || pExpr->bEof ){ + int rc = SQLITE_OK; + int bDescDoclist = pTab->bDescIdx; /* For DOCID_CMP macro */ + int bOr = 0; + u8 bTreeEof = 0; + Fts3Expr *p; /* Used to iterate from pExpr to root */ + Fts3Expr *pNear; /* Most senior NEAR ancestor (or pExpr) */ + int bMatch; + + /* Check if this phrase descends from an OR expression node. If not, + ** return NULL. Otherwise, the entry that corresponds to docid + ** pCsr->iPrevId may lie earlier in the doclist buffer. Or, if the + ** tree that the node is part of has been marked as EOF, but the node + ** itself is not EOF, then it may point to an earlier entry. */ + pNear = pExpr; + for(p=pExpr->pParent; p; p=p->pParent){ + if( p->eType==FTSQUERY_OR ) bOr = 1; + if( p->eType==FTSQUERY_NEAR ) pNear = p; + if( p->bEof ) bTreeEof = 1; + } + if( bOr==0 ) return SQLITE_OK; + + /* This is the descendent of an OR node. In this case we cannot use + ** an incremental phrase. Load the entire doclist for the phrase + ** into memory in this case. */ + if( pPhrase->bIncr ){ + int bEofSave = pNear->bEof; + fts3EvalRestart(pCsr, pNear, &rc); + while( rc==SQLITE_OK && !pNear->bEof ){ + fts3EvalNextRow(pCsr, pNear, &rc); + if( bEofSave==0 && pNear->iDocid==iDocid ) break; + } + assert( rc!=SQLITE_OK || pPhrase->bIncr==0 ); + } + if( bTreeEof ){ + while( rc==SQLITE_OK && !pNear->bEof ){ + fts3EvalNextRow(pCsr, pNear, &rc); + } + } + if( rc!=SQLITE_OK ) return rc; + + bMatch = 1; + for(p=pNear; p; p=p->pLeft){ + u8 bEof = 0; + Fts3Expr *pTest = p; + Fts3Phrase *pPh; + assert( pTest->eType==FTSQUERY_NEAR || pTest->eType==FTSQUERY_PHRASE ); + if( pTest->eType==FTSQUERY_NEAR ) pTest = pTest->pRight; + assert( pTest->eType==FTSQUERY_PHRASE ); + pPh = pTest->pPhrase; + + pIter = pPh->pOrPoslist; + iDocid = pPh->iOrDocid; + if( pCsr->bDesc==bDescDoclist ){ + bEof = !pPh->doclist.nAll || + (pIter >= (pPh->doclist.aAll + pPh->doclist.nAll)); + while( (pIter==0 || DOCID_CMP(iDocid, pCsr->iPrevId)<0 ) && bEof==0 ){ + sqlite3Fts3DoclistNext( + bDescDoclist, pPh->doclist.aAll, pPh->doclist.nAll, + &pIter, &iDocid, &bEof + ); + } + }else{ + bEof = !pPh->doclist.nAll || (pIter && pIter<=pPh->doclist.aAll); + while( (pIter==0 || DOCID_CMP(iDocid, pCsr->iPrevId)>0 ) && bEof==0 ){ + int dummy; + sqlite3Fts3DoclistPrev( + bDescDoclist, pPh->doclist.aAll, pPh->doclist.nAll, + &pIter, &iDocid, &dummy, &bEof + ); + } + } + pPh->pOrPoslist = pIter; + pPh->iOrDocid = iDocid; + if( bEof || iDocid!=pCsr->iPrevId ) bMatch = 0; + } + + if( bMatch ){ + pIter = pPhrase->pOrPoslist; + }else{ + pIter = 0; + } + } + if( pIter==0 ) return SQLITE_OK; + + if( *pIter==0x01 ){ + pIter++; + pIter += fts3GetVarint32(pIter, &iThis); + }else{ + iThis = 0; + } + while( iThisdoclist, and +** * any Fts3MultiSegReader objects held by phrase tokens. +*/ +SQLITE_PRIVATE void sqlite3Fts3EvalPhraseCleanup(Fts3Phrase *pPhrase){ + if( pPhrase ){ + int i; + sqlite3_free(pPhrase->doclist.aAll); + fts3EvalInvalidatePoslist(pPhrase); + memset(&pPhrase->doclist, 0, sizeof(Fts3Doclist)); + for(i=0; inToken; i++){ + fts3SegReaderCursorFree(pPhrase->aToken[i].pSegcsr); + pPhrase->aToken[i].pSegcsr = 0; + } + } +} + + +/* +** Return SQLITE_CORRUPT_VTAB. +*/ +#ifdef SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3Fts3Corrupt(){ + return SQLITE_CORRUPT_VTAB; +} +#endif + +#if !SQLITE_CORE +/* +** Initialize API pointer table, if required. +*/ +#ifdef _WIN32 +__declspec(dllexport) +#endif +SQLITE_API int sqlite3_fts3_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3Fts3Init(db); +} +#endif + +#endif + +/************** End of fts3.c ************************************************/ +/************** Begin file fts3_aux.c ****************************************/ +/* +** 2011 Jan 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +*/ +/* #include "fts3Int.h" */ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +/* #include */ +/* #include */ + +typedef struct Fts3auxTable Fts3auxTable; +typedef struct Fts3auxCursor Fts3auxCursor; + +struct Fts3auxTable { + sqlite3_vtab base; /* Base class used by SQLite core */ + Fts3Table *pFts3Tab; +}; + +struct Fts3auxCursor { + sqlite3_vtab_cursor base; /* Base class used by SQLite core */ + Fts3MultiSegReader csr; /* Must be right after "base" */ + Fts3SegFilter filter; + char *zStop; + int nStop; /* Byte-length of string zStop */ + int iLangid; /* Language id to query */ + int isEof; /* True if cursor is at EOF */ + sqlite3_int64 iRowid; /* Current rowid */ + + int iCol; /* Current value of 'col' column */ + int nStat; /* Size of aStat[] array */ + struct Fts3auxColstats { + sqlite3_int64 nDoc; /* 'documents' values for current csr row */ + sqlite3_int64 nOcc; /* 'occurrences' values for current csr row */ + } *aStat; +}; + +/* +** Schema of the terms table. +*/ +#define FTS3_AUX_SCHEMA \ + "CREATE TABLE x(term, col, documents, occurrences, languageid HIDDEN)" + +/* +** This function does all the work for both the xConnect and xCreate methods. +** These tables have no persistent representation of their own, so xConnect +** and xCreate are identical operations. +*/ +static int fts3auxConnectMethod( + sqlite3 *db, /* Database connection */ + void *pUnused, /* Unused */ + int argc, /* Number of elements in argv array */ + const char * const *argv, /* xCreate/xConnect argument array */ + sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */ + char **pzErr /* OUT: sqlite3_malloc'd error message */ +){ + char const *zDb; /* Name of database (e.g. "main") */ + char const *zFts3; /* Name of fts3 table */ + int nDb; /* Result of strlen(zDb) */ + int nFts3; /* Result of strlen(zFts3) */ + int nByte; /* Bytes of space to allocate here */ + int rc; /* value returned by declare_vtab() */ + Fts3auxTable *p; /* Virtual table object to return */ + + UNUSED_PARAMETER(pUnused); + + /* The user should invoke this in one of two forms: + ** + ** CREATE VIRTUAL TABLE xxx USING fts4aux(fts4-table); + ** CREATE VIRTUAL TABLE xxx USING fts4aux(fts4-table-db, fts4-table); + */ + if( argc!=4 && argc!=5 ) goto bad_args; + + zDb = argv[1]; + nDb = (int)strlen(zDb); + if( argc==5 ){ + if( nDb==4 && 0==sqlite3_strnicmp("temp", zDb, 4) ){ + zDb = argv[3]; + nDb = (int)strlen(zDb); + zFts3 = argv[4]; + }else{ + goto bad_args; + } + }else{ + zFts3 = argv[3]; + } + nFts3 = (int)strlen(zFts3); + + rc = sqlite3_declare_vtab(db, FTS3_AUX_SCHEMA); + if( rc!=SQLITE_OK ) return rc; + + nByte = sizeof(Fts3auxTable) + sizeof(Fts3Table) + nDb + nFts3 + 2; + p = (Fts3auxTable *)sqlite3_malloc(nByte); + if( !p ) return SQLITE_NOMEM; + memset(p, 0, nByte); + + p->pFts3Tab = (Fts3Table *)&p[1]; + p->pFts3Tab->zDb = (char *)&p->pFts3Tab[1]; + p->pFts3Tab->zName = &p->pFts3Tab->zDb[nDb+1]; + p->pFts3Tab->db = db; + p->pFts3Tab->nIndex = 1; + + memcpy((char *)p->pFts3Tab->zDb, zDb, nDb); + memcpy((char *)p->pFts3Tab->zName, zFts3, nFts3); + sqlite3Fts3Dequote((char *)p->pFts3Tab->zName); + + *ppVtab = (sqlite3_vtab *)p; + return SQLITE_OK; + + bad_args: + sqlite3Fts3ErrMsg(pzErr, "invalid arguments to fts4aux constructor"); + return SQLITE_ERROR; +} + +/* +** This function does the work for both the xDisconnect and xDestroy methods. +** These tables have no persistent representation of their own, so xDisconnect +** and xDestroy are identical operations. +*/ +static int fts3auxDisconnectMethod(sqlite3_vtab *pVtab){ + Fts3auxTable *p = (Fts3auxTable *)pVtab; + Fts3Table *pFts3 = p->pFts3Tab; + int i; + + /* Free any prepared statements held */ + for(i=0; iaStmt); i++){ + sqlite3_finalize(pFts3->aStmt[i]); + } + sqlite3_free(pFts3->zSegmentsTbl); + sqlite3_free(p); + return SQLITE_OK; +} + +#define FTS4AUX_EQ_CONSTRAINT 1 +#define FTS4AUX_GE_CONSTRAINT 2 +#define FTS4AUX_LE_CONSTRAINT 4 + +/* +** xBestIndex - Analyze a WHERE and ORDER BY clause. +*/ +static int fts3auxBestIndexMethod( + sqlite3_vtab *pVTab, + sqlite3_index_info *pInfo +){ + int i; + int iEq = -1; + int iGe = -1; + int iLe = -1; + int iLangid = -1; + int iNext = 1; /* Next free argvIndex value */ + + UNUSED_PARAMETER(pVTab); + + /* This vtab delivers always results in "ORDER BY term ASC" order. */ + if( pInfo->nOrderBy==1 + && pInfo->aOrderBy[0].iColumn==0 + && pInfo->aOrderBy[0].desc==0 + ){ + pInfo->orderByConsumed = 1; + } + + /* Search for equality and range constraints on the "term" column. + ** And equality constraints on the hidden "languageid" column. */ + for(i=0; inConstraint; i++){ + if( pInfo->aConstraint[i].usable ){ + int op = pInfo->aConstraint[i].op; + int iCol = pInfo->aConstraint[i].iColumn; + + if( iCol==0 ){ + if( op==SQLITE_INDEX_CONSTRAINT_EQ ) iEq = i; + if( op==SQLITE_INDEX_CONSTRAINT_LT ) iLe = i; + if( op==SQLITE_INDEX_CONSTRAINT_LE ) iLe = i; + if( op==SQLITE_INDEX_CONSTRAINT_GT ) iGe = i; + if( op==SQLITE_INDEX_CONSTRAINT_GE ) iGe = i; + } + if( iCol==4 ){ + if( op==SQLITE_INDEX_CONSTRAINT_EQ ) iLangid = i; + } + } + } + + if( iEq>=0 ){ + pInfo->idxNum = FTS4AUX_EQ_CONSTRAINT; + pInfo->aConstraintUsage[iEq].argvIndex = iNext++; + pInfo->estimatedCost = 5; + }else{ + pInfo->idxNum = 0; + pInfo->estimatedCost = 20000; + if( iGe>=0 ){ + pInfo->idxNum += FTS4AUX_GE_CONSTRAINT; + pInfo->aConstraintUsage[iGe].argvIndex = iNext++; + pInfo->estimatedCost /= 2; + } + if( iLe>=0 ){ + pInfo->idxNum += FTS4AUX_LE_CONSTRAINT; + pInfo->aConstraintUsage[iLe].argvIndex = iNext++; + pInfo->estimatedCost /= 2; + } + } + if( iLangid>=0 ){ + pInfo->aConstraintUsage[iLangid].argvIndex = iNext++; + pInfo->estimatedCost--; + } + + return SQLITE_OK; +} + +/* +** xOpen - Open a cursor. +*/ +static int fts3auxOpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){ + Fts3auxCursor *pCsr; /* Pointer to cursor object to return */ + + UNUSED_PARAMETER(pVTab); + + pCsr = (Fts3auxCursor *)sqlite3_malloc(sizeof(Fts3auxCursor)); + if( !pCsr ) return SQLITE_NOMEM; + memset(pCsr, 0, sizeof(Fts3auxCursor)); + + *ppCsr = (sqlite3_vtab_cursor *)pCsr; + return SQLITE_OK; +} + +/* +** xClose - Close a cursor. +*/ +static int fts3auxCloseMethod(sqlite3_vtab_cursor *pCursor){ + Fts3Table *pFts3 = ((Fts3auxTable *)pCursor->pVtab)->pFts3Tab; + Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; + + sqlite3Fts3SegmentsClose(pFts3); + sqlite3Fts3SegReaderFinish(&pCsr->csr); + sqlite3_free((void *)pCsr->filter.zTerm); + sqlite3_free(pCsr->zStop); + sqlite3_free(pCsr->aStat); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +static int fts3auxGrowStatArray(Fts3auxCursor *pCsr, int nSize){ + if( nSize>pCsr->nStat ){ + struct Fts3auxColstats *aNew; + aNew = (struct Fts3auxColstats *)sqlite3_realloc(pCsr->aStat, + sizeof(struct Fts3auxColstats) * nSize + ); + if( aNew==0 ) return SQLITE_NOMEM; + memset(&aNew[pCsr->nStat], 0, + sizeof(struct Fts3auxColstats) * (nSize - pCsr->nStat) + ); + pCsr->aStat = aNew; + pCsr->nStat = nSize; + } + return SQLITE_OK; +} + +/* +** xNext - Advance the cursor to the next row, if any. +*/ +static int fts3auxNextMethod(sqlite3_vtab_cursor *pCursor){ + Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; + Fts3Table *pFts3 = ((Fts3auxTable *)pCursor->pVtab)->pFts3Tab; + int rc; + + /* Increment our pretend rowid value. */ + pCsr->iRowid++; + + for(pCsr->iCol++; pCsr->iColnStat; pCsr->iCol++){ + if( pCsr->aStat[pCsr->iCol].nDoc>0 ) return SQLITE_OK; + } + + rc = sqlite3Fts3SegReaderStep(pFts3, &pCsr->csr); + if( rc==SQLITE_ROW ){ + int i = 0; + int nDoclist = pCsr->csr.nDoclist; + char *aDoclist = pCsr->csr.aDoclist; + int iCol; + + int eState = 0; + + if( pCsr->zStop ){ + int n = (pCsr->nStopcsr.nTerm) ? pCsr->nStop : pCsr->csr.nTerm; + int mc = memcmp(pCsr->zStop, pCsr->csr.zTerm, n); + if( mc<0 || (mc==0 && pCsr->csr.nTerm>pCsr->nStop) ){ + pCsr->isEof = 1; + return SQLITE_OK; + } + } + + if( fts3auxGrowStatArray(pCsr, 2) ) return SQLITE_NOMEM; + memset(pCsr->aStat, 0, sizeof(struct Fts3auxColstats) * pCsr->nStat); + iCol = 0; + + while( iaStat[0].nDoc++; + eState = 1; + iCol = 0; + break; + + /* State 1. In this state we are expecting either a 1, indicating + ** that the following integer will be a column number, or the + ** start of a position list for column 0. + ** + ** The only difference between state 1 and state 2 is that if the + ** integer encountered in state 1 is not 0 or 1, then we need to + ** increment the column 0 "nDoc" count for this term. + */ + case 1: + assert( iCol==0 ); + if( v>1 ){ + pCsr->aStat[1].nDoc++; + } + eState = 2; + /* fall through */ + + case 2: + if( v==0 ){ /* 0x00. Next integer will be a docid. */ + eState = 0; + }else if( v==1 ){ /* 0x01. Next integer will be a column number. */ + eState = 3; + }else{ /* 2 or greater. A position. */ + pCsr->aStat[iCol+1].nOcc++; + pCsr->aStat[0].nOcc++; + } + break; + + /* State 3. The integer just read is a column number. */ + default: assert( eState==3 ); + iCol = (int)v; + if( fts3auxGrowStatArray(pCsr, iCol+2) ) return SQLITE_NOMEM; + pCsr->aStat[iCol+1].nDoc++; + eState = 2; + break; + } + } + + pCsr->iCol = 0; + rc = SQLITE_OK; + }else{ + pCsr->isEof = 1; + } + return rc; +} + +/* +** xFilter - Initialize a cursor to point at the start of its data. +*/ +static int fts3auxFilterMethod( + sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ + int idxNum, /* Strategy index */ + const char *idxStr, /* Unused */ + int nVal, /* Number of elements in apVal */ + sqlite3_value **apVal /* Arguments for the indexing scheme */ +){ + Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; + Fts3Table *pFts3 = ((Fts3auxTable *)pCursor->pVtab)->pFts3Tab; + int rc; + int isScan = 0; + int iLangVal = 0; /* Language id to query */ + + int iEq = -1; /* Index of term=? value in apVal */ + int iGe = -1; /* Index of term>=? value in apVal */ + int iLe = -1; /* Index of term<=? value in apVal */ + int iLangid = -1; /* Index of languageid=? value in apVal */ + int iNext = 0; + + UNUSED_PARAMETER(nVal); + UNUSED_PARAMETER(idxStr); + + assert( idxStr==0 ); + assert( idxNum==FTS4AUX_EQ_CONSTRAINT || idxNum==0 + || idxNum==FTS4AUX_LE_CONSTRAINT || idxNum==FTS4AUX_GE_CONSTRAINT + || idxNum==(FTS4AUX_LE_CONSTRAINT|FTS4AUX_GE_CONSTRAINT) + ); + + if( idxNum==FTS4AUX_EQ_CONSTRAINT ){ + iEq = iNext++; + }else{ + isScan = 1; + if( idxNum & FTS4AUX_GE_CONSTRAINT ){ + iGe = iNext++; + } + if( idxNum & FTS4AUX_LE_CONSTRAINT ){ + iLe = iNext++; + } + } + if( iNextfilter.zTerm); + sqlite3Fts3SegReaderFinish(&pCsr->csr); + sqlite3_free((void *)pCsr->filter.zTerm); + sqlite3_free(pCsr->aStat); + memset(&pCsr->csr, 0, ((u8*)&pCsr[1]) - (u8*)&pCsr->csr); + + pCsr->filter.flags = FTS3_SEGMENT_REQUIRE_POS|FTS3_SEGMENT_IGNORE_EMPTY; + if( isScan ) pCsr->filter.flags |= FTS3_SEGMENT_SCAN; + + if( iEq>=0 || iGe>=0 ){ + const unsigned char *zStr = sqlite3_value_text(apVal[0]); + assert( (iEq==0 && iGe==-1) || (iEq==-1 && iGe==0) ); + if( zStr ){ + pCsr->filter.zTerm = sqlite3_mprintf("%s", zStr); + pCsr->filter.nTerm = sqlite3_value_bytes(apVal[0]); + if( pCsr->filter.zTerm==0 ) return SQLITE_NOMEM; + } + } + + if( iLe>=0 ){ + pCsr->zStop = sqlite3_mprintf("%s", sqlite3_value_text(apVal[iLe])); + pCsr->nStop = sqlite3_value_bytes(apVal[iLe]); + if( pCsr->zStop==0 ) return SQLITE_NOMEM; + } + + if( iLangid>=0 ){ + iLangVal = sqlite3_value_int(apVal[iLangid]); + + /* If the user specified a negative value for the languageid, use zero + ** instead. This works, as the "languageid=?" constraint will also + ** be tested by the VDBE layer. The test will always be false (since + ** this module will not return a row with a negative languageid), and + ** so the overall query will return zero rows. */ + if( iLangVal<0 ) iLangVal = 0; + } + pCsr->iLangid = iLangVal; + + rc = sqlite3Fts3SegReaderCursor(pFts3, iLangVal, 0, FTS3_SEGCURSOR_ALL, + pCsr->filter.zTerm, pCsr->filter.nTerm, 0, isScan, &pCsr->csr + ); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts3SegReaderStart(pFts3, &pCsr->csr, &pCsr->filter); + } + + if( rc==SQLITE_OK ) rc = fts3auxNextMethod(pCursor); + return rc; +} + +/* +** xEof - Return true if the cursor is at EOF, or false otherwise. +*/ +static int fts3auxEofMethod(sqlite3_vtab_cursor *pCursor){ + Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; + return pCsr->isEof; +} + +/* +** xColumn - Return a column value. +*/ +static int fts3auxColumnMethod( + sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ + sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */ + int iCol /* Index of column to read value from */ +){ + Fts3auxCursor *p = (Fts3auxCursor *)pCursor; + + assert( p->isEof==0 ); + switch( iCol ){ + case 0: /* term */ + sqlite3_result_text(pCtx, p->csr.zTerm, p->csr.nTerm, SQLITE_TRANSIENT); + break; + + case 1: /* col */ + if( p->iCol ){ + sqlite3_result_int(pCtx, p->iCol-1); + }else{ + sqlite3_result_text(pCtx, "*", -1, SQLITE_STATIC); + } + break; + + case 2: /* documents */ + sqlite3_result_int64(pCtx, p->aStat[p->iCol].nDoc); + break; + + case 3: /* occurrences */ + sqlite3_result_int64(pCtx, p->aStat[p->iCol].nOcc); + break; + + default: /* languageid */ + assert( iCol==4 ); + sqlite3_result_int(pCtx, p->iLangid); + break; + } + + return SQLITE_OK; +} + +/* +** xRowid - Return the current rowid for the cursor. +*/ +static int fts3auxRowidMethod( + sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ + sqlite_int64 *pRowid /* OUT: Rowid value */ +){ + Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; + *pRowid = pCsr->iRowid; + return SQLITE_OK; +} + +/* +** Register the fts3aux module with database connection db. Return SQLITE_OK +** if successful or an error code if sqlite3_create_module() fails. +*/ +SQLITE_PRIVATE int sqlite3Fts3InitAux(sqlite3 *db){ + static const sqlite3_module fts3aux_module = { + 0, /* iVersion */ + fts3auxConnectMethod, /* xCreate */ + fts3auxConnectMethod, /* xConnect */ + fts3auxBestIndexMethod, /* xBestIndex */ + fts3auxDisconnectMethod, /* xDisconnect */ + fts3auxDisconnectMethod, /* xDestroy */ + fts3auxOpenMethod, /* xOpen */ + fts3auxCloseMethod, /* xClose */ + fts3auxFilterMethod, /* xFilter */ + fts3auxNextMethod, /* xNext */ + fts3auxEofMethod, /* xEof */ + fts3auxColumnMethod, /* xColumn */ + fts3auxRowidMethod, /* xRowid */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindFunction */ + 0, /* xRename */ + 0, /* xSavepoint */ + 0, /* xRelease */ + 0 /* xRollbackTo */ + }; + int rc; /* Return code */ + + rc = sqlite3_create_module(db, "fts4aux", &fts3aux_module, 0); + return rc; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ + +/************** End of fts3_aux.c ********************************************/ +/************** Begin file fts3_expr.c ***************************************/ +/* +** 2008 Nov 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This module contains code that implements a parser for fts3 query strings +** (the right-hand argument to the MATCH operator). Because the supported +** syntax is relatively simple, the whole tokenizer/parser system is +** hand-coded. +*/ +/* #include "fts3Int.h" */ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +/* +** By default, this module parses the legacy syntax that has been +** traditionally used by fts3. Or, if SQLITE_ENABLE_FTS3_PARENTHESIS +** is defined, then it uses the new syntax. The differences between +** the new and the old syntaxes are: +** +** a) The new syntax supports parenthesis. The old does not. +** +** b) The new syntax supports the AND and NOT operators. The old does not. +** +** c) The old syntax supports the "-" token qualifier. This is not +** supported by the new syntax (it is replaced by the NOT operator). +** +** d) When using the old syntax, the OR operator has a greater precedence +** than an implicit AND. When using the new, both implicity and explicit +** AND operators have a higher precedence than OR. +** +** If compiled with SQLITE_TEST defined, then this module exports the +** symbol "int sqlite3_fts3_enable_parentheses". Setting this variable +** to zero causes the module to use the old syntax. If it is set to +** non-zero the new syntax is activated. This is so both syntaxes can +** be tested using a single build of testfixture. +** +** The following describes the syntax supported by the fts3 MATCH +** operator in a similar format to that used by the lemon parser +** generator. This module does not use actually lemon, it uses a +** custom parser. +** +** query ::= andexpr (OR andexpr)*. +** +** andexpr ::= notexpr (AND? notexpr)*. +** +** notexpr ::= nearexpr (NOT nearexpr|-TOKEN)*. +** notexpr ::= LP query RP. +** +** nearexpr ::= phrase (NEAR distance_opt nearexpr)*. +** +** distance_opt ::= . +** distance_opt ::= / INTEGER. +** +** phrase ::= TOKEN. +** phrase ::= COLUMN:TOKEN. +** phrase ::= "TOKEN TOKEN TOKEN...". +*/ + +#ifdef SQLITE_TEST +SQLITE_API int sqlite3_fts3_enable_parentheses = 0; +#else +# ifdef SQLITE_ENABLE_FTS3_PARENTHESIS +# define sqlite3_fts3_enable_parentheses 1 +# else +# define sqlite3_fts3_enable_parentheses 0 +# endif +#endif + +/* +** Default span for NEAR operators. +*/ +#define SQLITE_FTS3_DEFAULT_NEAR_PARAM 10 + +/* #include */ +/* #include */ + +/* +** isNot: +** This variable is used by function getNextNode(). When getNextNode() is +** called, it sets ParseContext.isNot to true if the 'next node' is a +** FTSQUERY_PHRASE with a unary "-" attached to it. i.e. "mysql" in the +** FTS3 query "sqlite -mysql". Otherwise, ParseContext.isNot is set to +** zero. +*/ +typedef struct ParseContext ParseContext; +struct ParseContext { + sqlite3_tokenizer *pTokenizer; /* Tokenizer module */ + int iLangid; /* Language id used with tokenizer */ + const char **azCol; /* Array of column names for fts3 table */ + int bFts4; /* True to allow FTS4-only syntax */ + int nCol; /* Number of entries in azCol[] */ + int iDefaultCol; /* Default column to query */ + int isNot; /* True if getNextNode() sees a unary - */ + sqlite3_context *pCtx; /* Write error message here */ + int nNest; /* Number of nested brackets */ +}; + +/* +** This function is equivalent to the standard isspace() function. +** +** The standard isspace() can be awkward to use safely, because although it +** is defined to accept an argument of type int, its behavior when passed +** an integer that falls outside of the range of the unsigned char type +** is undefined (and sometimes, "undefined" means segfault). This wrapper +** is defined to accept an argument of type char, and always returns 0 for +** any values that fall outside of the range of the unsigned char type (i.e. +** negative values). +*/ +static int fts3isspace(char c){ + return c==' ' || c=='\t' || c=='\n' || c=='\r' || c=='\v' || c=='\f'; +} + +/* +** Allocate nByte bytes of memory using sqlite3_malloc(). If successful, +** zero the memory before returning a pointer to it. If unsuccessful, +** return NULL. +*/ +static void *fts3MallocZero(int nByte){ + void *pRet = sqlite3_malloc(nByte); + if( pRet ) memset(pRet, 0, nByte); + return pRet; +} + +SQLITE_PRIVATE int sqlite3Fts3OpenTokenizer( + sqlite3_tokenizer *pTokenizer, + int iLangid, + const char *z, + int n, + sqlite3_tokenizer_cursor **ppCsr +){ + sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; + sqlite3_tokenizer_cursor *pCsr = 0; + int rc; + + rc = pModule->xOpen(pTokenizer, z, n, &pCsr); + assert( rc==SQLITE_OK || pCsr==0 ); + if( rc==SQLITE_OK ){ + pCsr->pTokenizer = pTokenizer; + if( pModule->iVersion>=1 ){ + rc = pModule->xLanguageid(pCsr, iLangid); + if( rc!=SQLITE_OK ){ + pModule->xClose(pCsr); + pCsr = 0; + } + } + } + *ppCsr = pCsr; + return rc; +} + +/* +** Function getNextNode(), which is called by fts3ExprParse(), may itself +** call fts3ExprParse(). So this forward declaration is required. +*/ +static int fts3ExprParse(ParseContext *, const char *, int, Fts3Expr **, int *); + +/* +** Extract the next token from buffer z (length n) using the tokenizer +** and other information (column names etc.) in pParse. Create an Fts3Expr +** structure of type FTSQUERY_PHRASE containing a phrase consisting of this +** single token and set *ppExpr to point to it. If the end of the buffer is +** reached before a token is found, set *ppExpr to zero. It is the +** responsibility of the caller to eventually deallocate the allocated +** Fts3Expr structure (if any) by passing it to sqlite3_free(). +** +** Return SQLITE_OK if successful, or SQLITE_NOMEM if a memory allocation +** fails. +*/ +static int getNextToken( + ParseContext *pParse, /* fts3 query parse context */ + int iCol, /* Value for Fts3Phrase.iColumn */ + const char *z, int n, /* Input string */ + Fts3Expr **ppExpr, /* OUT: expression */ + int *pnConsumed /* OUT: Number of bytes consumed */ +){ + sqlite3_tokenizer *pTokenizer = pParse->pTokenizer; + sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; + int rc; + sqlite3_tokenizer_cursor *pCursor; + Fts3Expr *pRet = 0; + int i = 0; + + /* Set variable i to the maximum number of bytes of input to tokenize. */ + for(i=0; iiLangid, z, i, &pCursor); + if( rc==SQLITE_OK ){ + const char *zToken; + int nToken = 0, iStart = 0, iEnd = 0, iPosition = 0; + int nByte; /* total space to allocate */ + + rc = pModule->xNext(pCursor, &zToken, &nToken, &iStart, &iEnd, &iPosition); + if( rc==SQLITE_OK ){ + nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase) + nToken; + pRet = (Fts3Expr *)fts3MallocZero(nByte); + if( !pRet ){ + rc = SQLITE_NOMEM; + }else{ + pRet->eType = FTSQUERY_PHRASE; + pRet->pPhrase = (Fts3Phrase *)&pRet[1]; + pRet->pPhrase->nToken = 1; + pRet->pPhrase->iColumn = iCol; + pRet->pPhrase->aToken[0].n = nToken; + pRet->pPhrase->aToken[0].z = (char *)&pRet->pPhrase[1]; + memcpy(pRet->pPhrase->aToken[0].z, zToken, nToken); + + if( iEndpPhrase->aToken[0].isPrefix = 1; + iEnd++; + } + + while( 1 ){ + if( !sqlite3_fts3_enable_parentheses + && iStart>0 && z[iStart-1]=='-' + ){ + pParse->isNot = 1; + iStart--; + }else if( pParse->bFts4 && iStart>0 && z[iStart-1]=='^' ){ + pRet->pPhrase->aToken[0].bFirst = 1; + iStart--; + }else{ + break; + } + } + + } + *pnConsumed = iEnd; + }else if( i && rc==SQLITE_DONE ){ + rc = SQLITE_OK; + } + + pModule->xClose(pCursor); + } + + *ppExpr = pRet; + return rc; +} + + +/* +** Enlarge a memory allocation. If an out-of-memory allocation occurs, +** then free the old allocation. +*/ +static void *fts3ReallocOrFree(void *pOrig, int nNew){ + void *pRet = sqlite3_realloc(pOrig, nNew); + if( !pRet ){ + sqlite3_free(pOrig); + } + return pRet; +} + +/* +** Buffer zInput, length nInput, contains the contents of a quoted string +** that appeared as part of an fts3 query expression. Neither quote character +** is included in the buffer. This function attempts to tokenize the entire +** input buffer and create an Fts3Expr structure of type FTSQUERY_PHRASE +** containing the results. +** +** If successful, SQLITE_OK is returned and *ppExpr set to point at the +** allocated Fts3Expr structure. Otherwise, either SQLITE_NOMEM (out of memory +** error) or SQLITE_ERROR (tokenization error) is returned and *ppExpr set +** to 0. +*/ +static int getNextString( + ParseContext *pParse, /* fts3 query parse context */ + const char *zInput, int nInput, /* Input string */ + Fts3Expr **ppExpr /* OUT: expression */ +){ + sqlite3_tokenizer *pTokenizer = pParse->pTokenizer; + sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; + int rc; + Fts3Expr *p = 0; + sqlite3_tokenizer_cursor *pCursor = 0; + char *zTemp = 0; + int nTemp = 0; + + const int nSpace = sizeof(Fts3Expr) + sizeof(Fts3Phrase); + int nToken = 0; + + /* The final Fts3Expr data structure, including the Fts3Phrase, + ** Fts3PhraseToken structures token buffers are all stored as a single + ** allocation so that the expression can be freed with a single call to + ** sqlite3_free(). Setting this up requires a two pass approach. + ** + ** The first pass, in the block below, uses a tokenizer cursor to iterate + ** through the tokens in the expression. This pass uses fts3ReallocOrFree() + ** to assemble data in two dynamic buffers: + ** + ** Buffer p: Points to the Fts3Expr structure, followed by the Fts3Phrase + ** structure, followed by the array of Fts3PhraseToken + ** structures. This pass only populates the Fts3PhraseToken array. + ** + ** Buffer zTemp: Contains copies of all tokens. + ** + ** The second pass, in the block that begins "if( rc==SQLITE_DONE )" below, + ** appends buffer zTemp to buffer p, and fills in the Fts3Expr and Fts3Phrase + ** structures. + */ + rc = sqlite3Fts3OpenTokenizer( + pTokenizer, pParse->iLangid, zInput, nInput, &pCursor); + if( rc==SQLITE_OK ){ + int ii; + for(ii=0; rc==SQLITE_OK; ii++){ + const char *zByte; + int nByte = 0, iBegin = 0, iEnd = 0, iPos = 0; + rc = pModule->xNext(pCursor, &zByte, &nByte, &iBegin, &iEnd, &iPos); + if( rc==SQLITE_OK ){ + Fts3PhraseToken *pToken; + + p = fts3ReallocOrFree(p, nSpace + ii*sizeof(Fts3PhraseToken)); + if( !p ) goto no_mem; + + zTemp = fts3ReallocOrFree(zTemp, nTemp + nByte); + if( !zTemp ) goto no_mem; + + assert( nToken==ii ); + pToken = &((Fts3Phrase *)(&p[1]))->aToken[ii]; + memset(pToken, 0, sizeof(Fts3PhraseToken)); + + memcpy(&zTemp[nTemp], zByte, nByte); + nTemp += nByte; + + pToken->n = nByte; + pToken->isPrefix = (iEndbFirst = (iBegin>0 && zInput[iBegin-1]=='^'); + nToken = ii+1; + } + } + + pModule->xClose(pCursor); + pCursor = 0; + } + + if( rc==SQLITE_DONE ){ + int jj; + char *zBuf = 0; + + p = fts3ReallocOrFree(p, nSpace + nToken*sizeof(Fts3PhraseToken) + nTemp); + if( !p ) goto no_mem; + memset(p, 0, (char *)&(((Fts3Phrase *)&p[1])->aToken[0])-(char *)p); + p->eType = FTSQUERY_PHRASE; + p->pPhrase = (Fts3Phrase *)&p[1]; + p->pPhrase->iColumn = pParse->iDefaultCol; + p->pPhrase->nToken = nToken; + + zBuf = (char *)&p->pPhrase->aToken[nToken]; + if( zTemp ){ + memcpy(zBuf, zTemp, nTemp); + sqlite3_free(zTemp); + }else{ + assert( nTemp==0 ); + } + + for(jj=0; jjpPhrase->nToken; jj++){ + p->pPhrase->aToken[jj].z = zBuf; + zBuf += p->pPhrase->aToken[jj].n; + } + rc = SQLITE_OK; + } + + *ppExpr = p; + return rc; +no_mem: + + if( pCursor ){ + pModule->xClose(pCursor); + } + sqlite3_free(zTemp); + sqlite3_free(p); + *ppExpr = 0; + return SQLITE_NOMEM; +} + +/* +** The output variable *ppExpr is populated with an allocated Fts3Expr +** structure, or set to 0 if the end of the input buffer is reached. +** +** Returns an SQLite error code. SQLITE_OK if everything works, SQLITE_NOMEM +** if a malloc failure occurs, or SQLITE_ERROR if a parse error is encountered. +** If SQLITE_ERROR is returned, pContext is populated with an error message. +*/ +static int getNextNode( + ParseContext *pParse, /* fts3 query parse context */ + const char *z, int n, /* Input string */ + Fts3Expr **ppExpr, /* OUT: expression */ + int *pnConsumed /* OUT: Number of bytes consumed */ +){ + static const struct Fts3Keyword { + char *z; /* Keyword text */ + unsigned char n; /* Length of the keyword */ + unsigned char parenOnly; /* Only valid in paren mode */ + unsigned char eType; /* Keyword code */ + } aKeyword[] = { + { "OR" , 2, 0, FTSQUERY_OR }, + { "AND", 3, 1, FTSQUERY_AND }, + { "NOT", 3, 1, FTSQUERY_NOT }, + { "NEAR", 4, 0, FTSQUERY_NEAR } + }; + int ii; + int iCol; + int iColLen; + int rc; + Fts3Expr *pRet = 0; + + const char *zInput = z; + int nInput = n; + + pParse->isNot = 0; + + /* Skip over any whitespace before checking for a keyword, an open or + ** close bracket, or a quoted string. + */ + while( nInput>0 && fts3isspace(*zInput) ){ + nInput--; + zInput++; + } + if( nInput==0 ){ + return SQLITE_DONE; + } + + /* See if we are dealing with a keyword. */ + for(ii=0; ii<(int)(sizeof(aKeyword)/sizeof(struct Fts3Keyword)); ii++){ + const struct Fts3Keyword *pKey = &aKeyword[ii]; + + if( (pKey->parenOnly & ~sqlite3_fts3_enable_parentheses)!=0 ){ + continue; + } + + if( nInput>=pKey->n && 0==memcmp(zInput, pKey->z, pKey->n) ){ + int nNear = SQLITE_FTS3_DEFAULT_NEAR_PARAM; + int nKey = pKey->n; + char cNext; + + /* If this is a "NEAR" keyword, check for an explicit nearness. */ + if( pKey->eType==FTSQUERY_NEAR ){ + assert( nKey==4 ); + if( zInput[4]=='/' && zInput[5]>='0' && zInput[5]<='9' ){ + nNear = 0; + for(nKey=5; zInput[nKey]>='0' && zInput[nKey]<='9'; nKey++){ + nNear = nNear * 10 + (zInput[nKey] - '0'); + } + } + } + + /* At this point this is probably a keyword. But for that to be true, + ** the next byte must contain either whitespace, an open or close + ** parenthesis, a quote character, or EOF. + */ + cNext = zInput[nKey]; + if( fts3isspace(cNext) + || cNext=='"' || cNext=='(' || cNext==')' || cNext==0 + ){ + pRet = (Fts3Expr *)fts3MallocZero(sizeof(Fts3Expr)); + if( !pRet ){ + return SQLITE_NOMEM; + } + pRet->eType = pKey->eType; + pRet->nNear = nNear; + *ppExpr = pRet; + *pnConsumed = (int)((zInput - z) + nKey); + return SQLITE_OK; + } + + /* Turns out that wasn't a keyword after all. This happens if the + ** user has supplied a token such as "ORacle". Continue. + */ + } + } + + /* See if we are dealing with a quoted phrase. If this is the case, then + ** search for the closing quote and pass the whole string to getNextString() + ** for processing. This is easy to do, as fts3 has no syntax for escaping + ** a quote character embedded in a string. + */ + if( *zInput=='"' ){ + for(ii=1; iinNest++; + rc = fts3ExprParse(pParse, zInput+1, nInput-1, ppExpr, &nConsumed); + if( rc==SQLITE_OK && !*ppExpr ){ rc = SQLITE_DONE; } + *pnConsumed = (int)(zInput - z) + 1 + nConsumed; + return rc; + }else if( *zInput==')' ){ + pParse->nNest--; + *pnConsumed = (int)((zInput - z) + 1); + *ppExpr = 0; + return SQLITE_DONE; + } + } + + /* If control flows to this point, this must be a regular token, or + ** the end of the input. Read a regular token using the sqlite3_tokenizer + ** interface. Before doing so, figure out if there is an explicit + ** column specifier for the token. + ** + ** TODO: Strangely, it is not possible to associate a column specifier + ** with a quoted phrase, only with a single token. Not sure if this was + ** an implementation artifact or an intentional decision when fts3 was + ** first implemented. Whichever it was, this module duplicates the + ** limitation. + */ + iCol = pParse->iDefaultCol; + iColLen = 0; + for(ii=0; iinCol; ii++){ + const char *zStr = pParse->azCol[ii]; + int nStr = (int)strlen(zStr); + if( nInput>nStr && zInput[nStr]==':' + && sqlite3_strnicmp(zStr, zInput, nStr)==0 + ){ + iCol = ii; + iColLen = (int)((zInput - z) + nStr + 1); + break; + } + } + rc = getNextToken(pParse, iCol, &z[iColLen], n-iColLen, ppExpr, pnConsumed); + *pnConsumed += iColLen; + return rc; +} + +/* +** The argument is an Fts3Expr structure for a binary operator (any type +** except an FTSQUERY_PHRASE). Return an integer value representing the +** precedence of the operator. Lower values have a higher precedence (i.e. +** group more tightly). For example, in the C language, the == operator +** groups more tightly than ||, and would therefore have a higher precedence. +** +** When using the new fts3 query syntax (when SQLITE_ENABLE_FTS3_PARENTHESIS +** is defined), the order of the operators in precedence from highest to +** lowest is: +** +** NEAR +** NOT +** AND (including implicit ANDs) +** OR +** +** Note that when using the old query syntax, the OR operator has a higher +** precedence than the AND operator. +*/ +static int opPrecedence(Fts3Expr *p){ + assert( p->eType!=FTSQUERY_PHRASE ); + if( sqlite3_fts3_enable_parentheses ){ + return p->eType; + }else if( p->eType==FTSQUERY_NEAR ){ + return 1; + }else if( p->eType==FTSQUERY_OR ){ + return 2; + } + assert( p->eType==FTSQUERY_AND ); + return 3; +} + +/* +** Argument ppHead contains a pointer to the current head of a query +** expression tree being parsed. pPrev is the expression node most recently +** inserted into the tree. This function adds pNew, which is always a binary +** operator node, into the expression tree based on the relative precedence +** of pNew and the existing nodes of the tree. This may result in the head +** of the tree changing, in which case *ppHead is set to the new root node. +*/ +static void insertBinaryOperator( + Fts3Expr **ppHead, /* Pointer to the root node of a tree */ + Fts3Expr *pPrev, /* Node most recently inserted into the tree */ + Fts3Expr *pNew /* New binary node to insert into expression tree */ +){ + Fts3Expr *pSplit = pPrev; + while( pSplit->pParent && opPrecedence(pSplit->pParent)<=opPrecedence(pNew) ){ + pSplit = pSplit->pParent; + } + + if( pSplit->pParent ){ + assert( pSplit->pParent->pRight==pSplit ); + pSplit->pParent->pRight = pNew; + pNew->pParent = pSplit->pParent; + }else{ + *ppHead = pNew; + } + pNew->pLeft = pSplit; + pSplit->pParent = pNew; +} + +/* +** Parse the fts3 query expression found in buffer z, length n. This function +** returns either when the end of the buffer is reached or an unmatched +** closing bracket - ')' - is encountered. +** +** If successful, SQLITE_OK is returned, *ppExpr is set to point to the +** parsed form of the expression and *pnConsumed is set to the number of +** bytes read from buffer z. Otherwise, *ppExpr is set to 0 and SQLITE_NOMEM +** (out of memory error) or SQLITE_ERROR (parse error) is returned. +*/ +static int fts3ExprParse( + ParseContext *pParse, /* fts3 query parse context */ + const char *z, int n, /* Text of MATCH query */ + Fts3Expr **ppExpr, /* OUT: Parsed query structure */ + int *pnConsumed /* OUT: Number of bytes consumed */ +){ + Fts3Expr *pRet = 0; + Fts3Expr *pPrev = 0; + Fts3Expr *pNotBranch = 0; /* Only used in legacy parse mode */ + int nIn = n; + const char *zIn = z; + int rc = SQLITE_OK; + int isRequirePhrase = 1; + + while( rc==SQLITE_OK ){ + Fts3Expr *p = 0; + int nByte = 0; + + rc = getNextNode(pParse, zIn, nIn, &p, &nByte); + assert( nByte>0 || (rc!=SQLITE_OK && p==0) ); + if( rc==SQLITE_OK ){ + if( p ){ + int isPhrase; + + if( !sqlite3_fts3_enable_parentheses + && p->eType==FTSQUERY_PHRASE && pParse->isNot + ){ + /* Create an implicit NOT operator. */ + Fts3Expr *pNot = fts3MallocZero(sizeof(Fts3Expr)); + if( !pNot ){ + sqlite3Fts3ExprFree(p); + rc = SQLITE_NOMEM; + goto exprparse_out; + } + pNot->eType = FTSQUERY_NOT; + pNot->pRight = p; + p->pParent = pNot; + if( pNotBranch ){ + pNot->pLeft = pNotBranch; + pNotBranch->pParent = pNot; + } + pNotBranch = pNot; + p = pPrev; + }else{ + int eType = p->eType; + isPhrase = (eType==FTSQUERY_PHRASE || p->pLeft); + + /* The isRequirePhrase variable is set to true if a phrase or + ** an expression contained in parenthesis is required. If a + ** binary operator (AND, OR, NOT or NEAR) is encounted when + ** isRequirePhrase is set, this is a syntax error. + */ + if( !isPhrase && isRequirePhrase ){ + sqlite3Fts3ExprFree(p); + rc = SQLITE_ERROR; + goto exprparse_out; + } + + if( isPhrase && !isRequirePhrase ){ + /* Insert an implicit AND operator. */ + Fts3Expr *pAnd; + assert( pRet && pPrev ); + pAnd = fts3MallocZero(sizeof(Fts3Expr)); + if( !pAnd ){ + sqlite3Fts3ExprFree(p); + rc = SQLITE_NOMEM; + goto exprparse_out; + } + pAnd->eType = FTSQUERY_AND; + insertBinaryOperator(&pRet, pPrev, pAnd); + pPrev = pAnd; + } + + /* This test catches attempts to make either operand of a NEAR + ** operator something other than a phrase. For example, either of + ** the following: + ** + ** (bracketed expression) NEAR phrase + ** phrase NEAR (bracketed expression) + ** + ** Return an error in either case. + */ + if( pPrev && ( + (eType==FTSQUERY_NEAR && !isPhrase && pPrev->eType!=FTSQUERY_PHRASE) + || (eType!=FTSQUERY_PHRASE && isPhrase && pPrev->eType==FTSQUERY_NEAR) + )){ + sqlite3Fts3ExprFree(p); + rc = SQLITE_ERROR; + goto exprparse_out; + } + + if( isPhrase ){ + if( pRet ){ + assert( pPrev && pPrev->pLeft && pPrev->pRight==0 ); + pPrev->pRight = p; + p->pParent = pPrev; + }else{ + pRet = p; + } + }else{ + insertBinaryOperator(&pRet, pPrev, p); + } + isRequirePhrase = !isPhrase; + } + pPrev = p; + } + assert( nByte>0 ); + } + assert( rc!=SQLITE_OK || (nByte>0 && nByte<=nIn) ); + nIn -= nByte; + zIn += nByte; + } + + if( rc==SQLITE_DONE && pRet && isRequirePhrase ){ + rc = SQLITE_ERROR; + } + + if( rc==SQLITE_DONE ){ + rc = SQLITE_OK; + if( !sqlite3_fts3_enable_parentheses && pNotBranch ){ + if( !pRet ){ + rc = SQLITE_ERROR; + }else{ + Fts3Expr *pIter = pNotBranch; + while( pIter->pLeft ){ + pIter = pIter->pLeft; + } + pIter->pLeft = pRet; + pRet->pParent = pIter; + pRet = pNotBranch; + } + } + } + *pnConsumed = n - nIn; + +exprparse_out: + if( rc!=SQLITE_OK ){ + sqlite3Fts3ExprFree(pRet); + sqlite3Fts3ExprFree(pNotBranch); + pRet = 0; + } + *ppExpr = pRet; + return rc; +} + +/* +** Return SQLITE_ERROR if the maximum depth of the expression tree passed +** as the only argument is more than nMaxDepth. +*/ +static int fts3ExprCheckDepth(Fts3Expr *p, int nMaxDepth){ + int rc = SQLITE_OK; + if( p ){ + if( nMaxDepth<0 ){ + rc = SQLITE_TOOBIG; + }else{ + rc = fts3ExprCheckDepth(p->pLeft, nMaxDepth-1); + if( rc==SQLITE_OK ){ + rc = fts3ExprCheckDepth(p->pRight, nMaxDepth-1); + } + } + } + return rc; +} + +/* +** This function attempts to transform the expression tree at (*pp) to +** an equivalent but more balanced form. The tree is modified in place. +** If successful, SQLITE_OK is returned and (*pp) set to point to the +** new root expression node. +** +** nMaxDepth is the maximum allowable depth of the balanced sub-tree. +** +** Otherwise, if an error occurs, an SQLite error code is returned and +** expression (*pp) freed. +*/ +static int fts3ExprBalance(Fts3Expr **pp, int nMaxDepth){ + int rc = SQLITE_OK; /* Return code */ + Fts3Expr *pRoot = *pp; /* Initial root node */ + Fts3Expr *pFree = 0; /* List of free nodes. Linked by pParent. */ + int eType = pRoot->eType; /* Type of node in this tree */ + + if( nMaxDepth==0 ){ + rc = SQLITE_ERROR; + } + + if( rc==SQLITE_OK ){ + if( (eType==FTSQUERY_AND || eType==FTSQUERY_OR) ){ + Fts3Expr **apLeaf; + apLeaf = (Fts3Expr **)sqlite3_malloc(sizeof(Fts3Expr *) * nMaxDepth); + if( 0==apLeaf ){ + rc = SQLITE_NOMEM; + }else{ + memset(apLeaf, 0, sizeof(Fts3Expr *) * nMaxDepth); + } + + if( rc==SQLITE_OK ){ + int i; + Fts3Expr *p; + + /* Set $p to point to the left-most leaf in the tree of eType nodes. */ + for(p=pRoot; p->eType==eType; p=p->pLeft){ + assert( p->pParent==0 || p->pParent->pLeft==p ); + assert( p->pLeft && p->pRight ); + } + + /* This loop runs once for each leaf in the tree of eType nodes. */ + while( 1 ){ + int iLvl; + Fts3Expr *pParent = p->pParent; /* Current parent of p */ + + assert( pParent==0 || pParent->pLeft==p ); + p->pParent = 0; + if( pParent ){ + pParent->pLeft = 0; + }else{ + pRoot = 0; + } + rc = fts3ExprBalance(&p, nMaxDepth-1); + if( rc!=SQLITE_OK ) break; + + for(iLvl=0; p && iLvlpLeft = apLeaf[iLvl]; + pFree->pRight = p; + pFree->pLeft->pParent = pFree; + pFree->pRight->pParent = pFree; + + p = pFree; + pFree = pFree->pParent; + p->pParent = 0; + apLeaf[iLvl] = 0; + } + } + if( p ){ + sqlite3Fts3ExprFree(p); + rc = SQLITE_TOOBIG; + break; + } + + /* If that was the last leaf node, break out of the loop */ + if( pParent==0 ) break; + + /* Set $p to point to the next leaf in the tree of eType nodes */ + for(p=pParent->pRight; p->eType==eType; p=p->pLeft); + + /* Remove pParent from the original tree. */ + assert( pParent->pParent==0 || pParent->pParent->pLeft==pParent ); + pParent->pRight->pParent = pParent->pParent; + if( pParent->pParent ){ + pParent->pParent->pLeft = pParent->pRight; + }else{ + assert( pParent==pRoot ); + pRoot = pParent->pRight; + } + + /* Link pParent into the free node list. It will be used as an + ** internal node of the new tree. */ + pParent->pParent = pFree; + pFree = pParent; + } + + if( rc==SQLITE_OK ){ + p = 0; + for(i=0; ipParent = 0; + }else{ + assert( pFree!=0 ); + pFree->pRight = p; + pFree->pLeft = apLeaf[i]; + pFree->pLeft->pParent = pFree; + pFree->pRight->pParent = pFree; + + p = pFree; + pFree = pFree->pParent; + p->pParent = 0; + } + } + } + pRoot = p; + }else{ + /* An error occurred. Delete the contents of the apLeaf[] array + ** and pFree list. Everything else is cleaned up by the call to + ** sqlite3Fts3ExprFree(pRoot) below. */ + Fts3Expr *pDel; + for(i=0; ipParent; + sqlite3_free(pDel); + } + } + + assert( pFree==0 ); + sqlite3_free( apLeaf ); + } + }else if( eType==FTSQUERY_NOT ){ + Fts3Expr *pLeft = pRoot->pLeft; + Fts3Expr *pRight = pRoot->pRight; + + pRoot->pLeft = 0; + pRoot->pRight = 0; + pLeft->pParent = 0; + pRight->pParent = 0; + + rc = fts3ExprBalance(&pLeft, nMaxDepth-1); + if( rc==SQLITE_OK ){ + rc = fts3ExprBalance(&pRight, nMaxDepth-1); + } + + if( rc!=SQLITE_OK ){ + sqlite3Fts3ExprFree(pRight); + sqlite3Fts3ExprFree(pLeft); + }else{ + assert( pLeft && pRight ); + pRoot->pLeft = pLeft; + pLeft->pParent = pRoot; + pRoot->pRight = pRight; + pRight->pParent = pRoot; + } + } + } + + if( rc!=SQLITE_OK ){ + sqlite3Fts3ExprFree(pRoot); + pRoot = 0; + } + *pp = pRoot; + return rc; +} + +/* +** This function is similar to sqlite3Fts3ExprParse(), with the following +** differences: +** +** 1. It does not do expression rebalancing. +** 2. It does not check that the expression does not exceed the +** maximum allowable depth. +** 3. Even if it fails, *ppExpr may still be set to point to an +** expression tree. It should be deleted using sqlite3Fts3ExprFree() +** in this case. +*/ +static int fts3ExprParseUnbalanced( + sqlite3_tokenizer *pTokenizer, /* Tokenizer module */ + int iLangid, /* Language id for tokenizer */ + char **azCol, /* Array of column names for fts3 table */ + int bFts4, /* True to allow FTS4-only syntax */ + int nCol, /* Number of entries in azCol[] */ + int iDefaultCol, /* Default column to query */ + const char *z, int n, /* Text of MATCH query */ + Fts3Expr **ppExpr /* OUT: Parsed query structure */ +){ + int nParsed; + int rc; + ParseContext sParse; + + memset(&sParse, 0, sizeof(ParseContext)); + sParse.pTokenizer = pTokenizer; + sParse.iLangid = iLangid; + sParse.azCol = (const char **)azCol; + sParse.nCol = nCol; + sParse.iDefaultCol = iDefaultCol; + sParse.bFts4 = bFts4; + if( z==0 ){ + *ppExpr = 0; + return SQLITE_OK; + } + if( n<0 ){ + n = (int)strlen(z); + } + rc = fts3ExprParse(&sParse, z, n, ppExpr, &nParsed); + assert( rc==SQLITE_OK || *ppExpr==0 ); + + /* Check for mismatched parenthesis */ + if( rc==SQLITE_OK && sParse.nNest ){ + rc = SQLITE_ERROR; + } + + return rc; +} + +/* +** Parameters z and n contain a pointer to and length of a buffer containing +** an fts3 query expression, respectively. This function attempts to parse the +** query expression and create a tree of Fts3Expr structures representing the +** parsed expression. If successful, *ppExpr is set to point to the head +** of the parsed expression tree and SQLITE_OK is returned. If an error +** occurs, either SQLITE_NOMEM (out-of-memory error) or SQLITE_ERROR (parse +** error) is returned and *ppExpr is set to 0. +** +** If parameter n is a negative number, then z is assumed to point to a +** nul-terminated string and the length is determined using strlen(). +** +** The first parameter, pTokenizer, is passed the fts3 tokenizer module to +** use to normalize query tokens while parsing the expression. The azCol[] +** array, which is assumed to contain nCol entries, should contain the names +** of each column in the target fts3 table, in order from left to right. +** Column names must be nul-terminated strings. +** +** The iDefaultCol parameter should be passed the index of the table column +** that appears on the left-hand-side of the MATCH operator (the default +** column to match against for tokens for which a column name is not explicitly +** specified as part of the query string), or -1 if tokens may by default +** match any table column. +*/ +SQLITE_PRIVATE int sqlite3Fts3ExprParse( + sqlite3_tokenizer *pTokenizer, /* Tokenizer module */ + int iLangid, /* Language id for tokenizer */ + char **azCol, /* Array of column names for fts3 table */ + int bFts4, /* True to allow FTS4-only syntax */ + int nCol, /* Number of entries in azCol[] */ + int iDefaultCol, /* Default column to query */ + const char *z, int n, /* Text of MATCH query */ + Fts3Expr **ppExpr, /* OUT: Parsed query structure */ + char **pzErr /* OUT: Error message (sqlite3_malloc) */ +){ + int rc = fts3ExprParseUnbalanced( + pTokenizer, iLangid, azCol, bFts4, nCol, iDefaultCol, z, n, ppExpr + ); + + /* Rebalance the expression. And check that its depth does not exceed + ** SQLITE_FTS3_MAX_EXPR_DEPTH. */ + if( rc==SQLITE_OK && *ppExpr ){ + rc = fts3ExprBalance(ppExpr, SQLITE_FTS3_MAX_EXPR_DEPTH); + if( rc==SQLITE_OK ){ + rc = fts3ExprCheckDepth(*ppExpr, SQLITE_FTS3_MAX_EXPR_DEPTH); + } + } + + if( rc!=SQLITE_OK ){ + sqlite3Fts3ExprFree(*ppExpr); + *ppExpr = 0; + if( rc==SQLITE_TOOBIG ){ + sqlite3Fts3ErrMsg(pzErr, + "FTS expression tree is too large (maximum depth %d)", + SQLITE_FTS3_MAX_EXPR_DEPTH + ); + rc = SQLITE_ERROR; + }else if( rc==SQLITE_ERROR ){ + sqlite3Fts3ErrMsg(pzErr, "malformed MATCH expression: [%s]", z); + } + } + + return rc; +} + +/* +** Free a single node of an expression tree. +*/ +static void fts3FreeExprNode(Fts3Expr *p){ + assert( p->eType==FTSQUERY_PHRASE || p->pPhrase==0 ); + sqlite3Fts3EvalPhraseCleanup(p->pPhrase); + sqlite3_free(p->aMI); + sqlite3_free(p); +} + +/* +** Free a parsed fts3 query expression allocated by sqlite3Fts3ExprParse(). +** +** This function would be simpler if it recursively called itself. But +** that would mean passing a sufficiently large expression to ExprParse() +** could cause a stack overflow. +*/ +SQLITE_PRIVATE void sqlite3Fts3ExprFree(Fts3Expr *pDel){ + Fts3Expr *p; + assert( pDel==0 || pDel->pParent==0 ); + for(p=pDel; p && (p->pLeft||p->pRight); p=(p->pLeft ? p->pLeft : p->pRight)){ + assert( p->pParent==0 || p==p->pParent->pRight || p==p->pParent->pLeft ); + } + while( p ){ + Fts3Expr *pParent = p->pParent; + fts3FreeExprNode(p); + if( pParent && p==pParent->pLeft && pParent->pRight ){ + p = pParent->pRight; + while( p && (p->pLeft || p->pRight) ){ + assert( p==p->pParent->pRight || p==p->pParent->pLeft ); + p = (p->pLeft ? p->pLeft : p->pRight); + } + }else{ + p = pParent; + } + } +} + +/**************************************************************************** +***************************************************************************** +** Everything after this point is just test code. +*/ + +#ifdef SQLITE_TEST + +/* #include */ + +/* +** Function to query the hash-table of tokenizers (see README.tokenizers). +*/ +static int queryTestTokenizer( + sqlite3 *db, + const char *zName, + const sqlite3_tokenizer_module **pp +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy((void *)pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); +} + +/* +** Return a pointer to a buffer containing a text representation of the +** expression passed as the first argument. The buffer is obtained from +** sqlite3_malloc(). It is the responsibility of the caller to use +** sqlite3_free() to release the memory. If an OOM condition is encountered, +** NULL is returned. +** +** If the second argument is not NULL, then its contents are prepended to +** the returned expression text and then freed using sqlite3_free(). +*/ +static char *exprToString(Fts3Expr *pExpr, char *zBuf){ + if( pExpr==0 ){ + return sqlite3_mprintf(""); + } + switch( pExpr->eType ){ + case FTSQUERY_PHRASE: { + Fts3Phrase *pPhrase = pExpr->pPhrase; + int i; + zBuf = sqlite3_mprintf( + "%zPHRASE %d 0", zBuf, pPhrase->iColumn); + for(i=0; zBuf && inToken; i++){ + zBuf = sqlite3_mprintf("%z %.*s%s", zBuf, + pPhrase->aToken[i].n, pPhrase->aToken[i].z, + (pPhrase->aToken[i].isPrefix?"+":"") + ); + } + return zBuf; + } + + case FTSQUERY_NEAR: + zBuf = sqlite3_mprintf("%zNEAR/%d ", zBuf, pExpr->nNear); + break; + case FTSQUERY_NOT: + zBuf = sqlite3_mprintf("%zNOT ", zBuf); + break; + case FTSQUERY_AND: + zBuf = sqlite3_mprintf("%zAND ", zBuf); + break; + case FTSQUERY_OR: + zBuf = sqlite3_mprintf("%zOR ", zBuf); + break; + } + + if( zBuf ) zBuf = sqlite3_mprintf("%z{", zBuf); + if( zBuf ) zBuf = exprToString(pExpr->pLeft, zBuf); + if( zBuf ) zBuf = sqlite3_mprintf("%z} {", zBuf); + + if( zBuf ) zBuf = exprToString(pExpr->pRight, zBuf); + if( zBuf ) zBuf = sqlite3_mprintf("%z}", zBuf); + + return zBuf; +} + +/* +** This is the implementation of a scalar SQL function used to test the +** expression parser. It should be called as follows: +** +** fts3_exprtest(, , , ...); +** +** The first argument, , is the name of the fts3 tokenizer used +** to parse the query expression (see README.tokenizers). The second argument +** is the query expression to parse. Each subsequent argument is the name +** of a column of the fts3 table that the query expression may refer to. +** For example: +** +** SELECT fts3_exprtest('simple', 'Bill col2:Bloggs', 'col1', 'col2'); +*/ +static void fts3ExprTest( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + sqlite3_tokenizer_module const *pModule = 0; + sqlite3_tokenizer *pTokenizer = 0; + int rc; + char **azCol = 0; + const char *zExpr; + int nExpr; + int nCol; + int ii; + Fts3Expr *pExpr; + char *zBuf = 0; + sqlite3 *db = sqlite3_context_db_handle(context); + + if( argc<3 ){ + sqlite3_result_error(context, + "Usage: fts3_exprtest(tokenizer, expr, col1, ...", -1 + ); + return; + } + + rc = queryTestTokenizer(db, + (const char *)sqlite3_value_text(argv[0]), &pModule); + if( rc==SQLITE_NOMEM ){ + sqlite3_result_error_nomem(context); + goto exprtest_out; + }else if( !pModule ){ + sqlite3_result_error(context, "No such tokenizer module", -1); + goto exprtest_out; + } + + rc = pModule->xCreate(0, 0, &pTokenizer); + assert( rc==SQLITE_NOMEM || rc==SQLITE_OK ); + if( rc==SQLITE_NOMEM ){ + sqlite3_result_error_nomem(context); + goto exprtest_out; + } + pTokenizer->pModule = pModule; + + zExpr = (const char *)sqlite3_value_text(argv[1]); + nExpr = sqlite3_value_bytes(argv[1]); + nCol = argc-2; + azCol = (char **)sqlite3_malloc(nCol*sizeof(char *)); + if( !azCol ){ + sqlite3_result_error_nomem(context); + goto exprtest_out; + } + for(ii=0; iixDestroy(pTokenizer); + } + sqlite3_free(azCol); +} + +/* +** Register the query expression parser test function fts3_exprtest() +** with database connection db. +*/ +SQLITE_PRIVATE int sqlite3Fts3ExprInitTestInterface(sqlite3* db){ + int rc = sqlite3_create_function( + db, "fts3_exprtest", -1, SQLITE_UTF8, 0, fts3ExprTest, 0, 0 + ); + if( rc==SQLITE_OK ){ + rc = sqlite3_create_function(db, "fts3_exprtest_rebalance", + -1, SQLITE_UTF8, (void *)1, fts3ExprTest, 0, 0 + ); + } + return rc; +} + +#endif +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ + +/************** End of fts3_expr.c *******************************************/ +/************** Begin file fts3_hash.c ***************************************/ +/* +** 2001 September 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This is the implementation of generic hash-tables used in SQLite. +** We've modified it slightly to serve as a standalone hash table +** implementation for the full-text indexing module. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +/* #include "fts3Int.h" */ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +/* #include */ +/* #include */ +/* #include */ + +/* #include "fts3_hash.h" */ + +/* +** Malloc and Free functions +*/ +static void *fts3HashMalloc(int n){ + void *p = sqlite3_malloc(n); + if( p ){ + memset(p, 0, n); + } + return p; +} +static void fts3HashFree(void *p){ + sqlite3_free(p); +} + +/* Turn bulk memory into a hash table object by initializing the +** fields of the Hash structure. +** +** "pNew" is a pointer to the hash table that is to be initialized. +** keyClass is one of the constants +** FTS3_HASH_BINARY or FTS3_HASH_STRING. The value of keyClass +** determines what kind of key the hash table will use. "copyKey" is +** true if the hash table should make its own private copy of keys and +** false if it should just use the supplied pointer. +*/ +SQLITE_PRIVATE void sqlite3Fts3HashInit(Fts3Hash *pNew, char keyClass, char copyKey){ + assert( pNew!=0 ); + assert( keyClass>=FTS3_HASH_STRING && keyClass<=FTS3_HASH_BINARY ); + pNew->keyClass = keyClass; + pNew->copyKey = copyKey; + pNew->first = 0; + pNew->count = 0; + pNew->htsize = 0; + pNew->ht = 0; +} + +/* Remove all entries from a hash table. Reclaim all memory. +** Call this routine to delete a hash table or to reset a hash table +** to the empty state. +*/ +SQLITE_PRIVATE void sqlite3Fts3HashClear(Fts3Hash *pH){ + Fts3HashElem *elem; /* For looping over all elements of the table */ + + assert( pH!=0 ); + elem = pH->first; + pH->first = 0; + fts3HashFree(pH->ht); + pH->ht = 0; + pH->htsize = 0; + while( elem ){ + Fts3HashElem *next_elem = elem->next; + if( pH->copyKey && elem->pKey ){ + fts3HashFree(elem->pKey); + } + fts3HashFree(elem); + elem = next_elem; + } + pH->count = 0; +} + +/* +** Hash and comparison functions when the mode is FTS3_HASH_STRING +*/ +static int fts3StrHash(const void *pKey, int nKey){ + const char *z = (const char *)pKey; + unsigned h = 0; + if( nKey<=0 ) nKey = (int) strlen(z); + while( nKey > 0 ){ + h = (h<<3) ^ h ^ *z++; + nKey--; + } + return (int)(h & 0x7fffffff); +} +static int fts3StrCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return strncmp((const char*)pKey1,(const char*)pKey2,n1); +} + +/* +** Hash and comparison functions when the mode is FTS3_HASH_BINARY +*/ +static int fts3BinHash(const void *pKey, int nKey){ + int h = 0; + const char *z = (const char *)pKey; + while( nKey-- > 0 ){ + h = (h<<3) ^ h ^ *(z++); + } + return h & 0x7fffffff; +} +static int fts3BinCompare(const void *pKey1, int n1, const void *pKey2, int n2){ + if( n1!=n2 ) return 1; + return memcmp(pKey1,pKey2,n1); +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** The C syntax in this function definition may be unfamilar to some +** programmers, so we provide the following additional explanation: +** +** The name of the function is "ftsHashFunction". The function takes a +** single parameter "keyClass". The return value of ftsHashFunction() +** is a pointer to another function. Specifically, the return value +** of ftsHashFunction() is a pointer to a function that takes two parameters +** with types "const void*" and "int" and returns an "int". +*/ +static int (*ftsHashFunction(int keyClass))(const void*,int){ + if( keyClass==FTS3_HASH_STRING ){ + return &fts3StrHash; + }else{ + assert( keyClass==FTS3_HASH_BINARY ); + return &fts3BinHash; + } +} + +/* +** Return a pointer to the appropriate hash function given the key class. +** +** For help in interpreted the obscure C code in the function definition, +** see the header comment on the previous function. +*/ +static int (*ftsCompareFunction(int keyClass))(const void*,int,const void*,int){ + if( keyClass==FTS3_HASH_STRING ){ + return &fts3StrCompare; + }else{ + assert( keyClass==FTS3_HASH_BINARY ); + return &fts3BinCompare; + } +} + +/* Link an element into the hash table +*/ +static void fts3HashInsertElement( + Fts3Hash *pH, /* The complete hash table */ + struct _fts3ht *pEntry, /* The entry into which pNew is inserted */ + Fts3HashElem *pNew /* The element to be inserted */ +){ + Fts3HashElem *pHead; /* First element already in pEntry */ + pHead = pEntry->chain; + if( pHead ){ + pNew->next = pHead; + pNew->prev = pHead->prev; + if( pHead->prev ){ pHead->prev->next = pNew; } + else { pH->first = pNew; } + pHead->prev = pNew; + }else{ + pNew->next = pH->first; + if( pH->first ){ pH->first->prev = pNew; } + pNew->prev = 0; + pH->first = pNew; + } + pEntry->count++; + pEntry->chain = pNew; +} + + +/* Resize the hash table so that it cantains "new_size" buckets. +** "new_size" must be a power of 2. The hash table might fail +** to resize if sqliteMalloc() fails. +** +** Return non-zero if a memory allocation error occurs. +*/ +static int fts3Rehash(Fts3Hash *pH, int new_size){ + struct _fts3ht *new_ht; /* The new hash table */ + Fts3HashElem *elem, *next_elem; /* For looping over existing elements */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( (new_size & (new_size-1))==0 ); + new_ht = (struct _fts3ht *)fts3HashMalloc( new_size*sizeof(struct _fts3ht) ); + if( new_ht==0 ) return 1; + fts3HashFree(pH->ht); + pH->ht = new_ht; + pH->htsize = new_size; + xHash = ftsHashFunction(pH->keyClass); + for(elem=pH->first, pH->first=0; elem; elem = next_elem){ + int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); + next_elem = elem->next; + fts3HashInsertElement(pH, &new_ht[h], elem); + } + return 0; +} + +/* This function (for internal use only) locates an element in an +** hash table that matches the given key. The hash for this key has +** already been computed and is passed as the 4th parameter. +*/ +static Fts3HashElem *fts3FindElementByHash( + const Fts3Hash *pH, /* The pH to be searched */ + const void *pKey, /* The key we are searching for */ + int nKey, + int h /* The hash for this key. */ +){ + Fts3HashElem *elem; /* Used to loop thru the element list */ + int count; /* Number of elements left to test */ + int (*xCompare)(const void*,int,const void*,int); /* comparison function */ + + if( pH->ht ){ + struct _fts3ht *pEntry = &pH->ht[h]; + elem = pEntry->chain; + count = pEntry->count; + xCompare = ftsCompareFunction(pH->keyClass); + while( count-- && elem ){ + if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ + return elem; + } + elem = elem->next; + } + } + return 0; +} + +/* Remove a single entry from the hash table given a pointer to that +** element and a hash on the element's key. +*/ +static void fts3RemoveElementByHash( + Fts3Hash *pH, /* The pH containing "elem" */ + Fts3HashElem* elem, /* The element to be removed from the pH */ + int h /* Hash value for the element */ +){ + struct _fts3ht *pEntry; + if( elem->prev ){ + elem->prev->next = elem->next; + }else{ + pH->first = elem->next; + } + if( elem->next ){ + elem->next->prev = elem->prev; + } + pEntry = &pH->ht[h]; + if( pEntry->chain==elem ){ + pEntry->chain = elem->next; + } + pEntry->count--; + if( pEntry->count<=0 ){ + pEntry->chain = 0; + } + if( pH->copyKey && elem->pKey ){ + fts3HashFree(elem->pKey); + } + fts3HashFree( elem ); + pH->count--; + if( pH->count<=0 ){ + assert( pH->first==0 ); + assert( pH->count==0 ); + fts3HashClear(pH); + } +} + +SQLITE_PRIVATE Fts3HashElem *sqlite3Fts3HashFindElem( + const Fts3Hash *pH, + const void *pKey, + int nKey +){ + int h; /* A hash on key */ + int (*xHash)(const void*,int); /* The hash function */ + + if( pH==0 || pH->ht==0 ) return 0; + xHash = ftsHashFunction(pH->keyClass); + assert( xHash!=0 ); + h = (*xHash)(pKey,nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + return fts3FindElementByHash(pH,pKey,nKey, h & (pH->htsize-1)); +} + +/* +** Attempt to locate an element of the hash table pH with a key +** that matches pKey,nKey. Return the data for this element if it is +** found, or NULL if there is no match. +*/ +SQLITE_PRIVATE void *sqlite3Fts3HashFind(const Fts3Hash *pH, const void *pKey, int nKey){ + Fts3HashElem *pElem; /* The element that matches key (if any) */ + + pElem = sqlite3Fts3HashFindElem(pH, pKey, nKey); + return pElem ? pElem->data : 0; +} + +/* Insert an element into the hash table pH. The key is pKey,nKey +** and the data is "data". +** +** If no element exists with a matching key, then a new +** element is created. A copy of the key is made if the copyKey +** flag is set. NULL is returned. +** +** If another element already exists with the same key, then the +** new data replaces the old data and the old data is returned. +** The key is not copied in this instance. If a malloc fails, then +** the new data is returned and the hash table is unchanged. +** +** If the "data" parameter to this function is NULL, then the +** element corresponding to "key" is removed from the hash table. +*/ +SQLITE_PRIVATE void *sqlite3Fts3HashInsert( + Fts3Hash *pH, /* The hash table to insert into */ + const void *pKey, /* The key */ + int nKey, /* Number of bytes in the key */ + void *data /* The data */ +){ + int hraw; /* Raw hash value of the key */ + int h; /* the hash of the key modulo hash table size */ + Fts3HashElem *elem; /* Used to loop thru the element list */ + Fts3HashElem *new_elem; /* New element added to the pH */ + int (*xHash)(const void*,int); /* The hash function */ + + assert( pH!=0 ); + xHash = ftsHashFunction(pH->keyClass); + assert( xHash!=0 ); + hraw = (*xHash)(pKey, nKey); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + elem = fts3FindElementByHash(pH,pKey,nKey,h); + if( elem ){ + void *old_data = elem->data; + if( data==0 ){ + fts3RemoveElementByHash(pH,elem,h); + }else{ + elem->data = data; + } + return old_data; + } + if( data==0 ) return 0; + if( (pH->htsize==0 && fts3Rehash(pH,8)) + || (pH->count>=pH->htsize && fts3Rehash(pH, pH->htsize*2)) + ){ + pH->count = 0; + return data; + } + assert( pH->htsize>0 ); + new_elem = (Fts3HashElem*)fts3HashMalloc( sizeof(Fts3HashElem) ); + if( new_elem==0 ) return data; + if( pH->copyKey && pKey!=0 ){ + new_elem->pKey = fts3HashMalloc( nKey ); + if( new_elem->pKey==0 ){ + fts3HashFree(new_elem); + return data; + } + memcpy((void*)new_elem->pKey, pKey, nKey); + }else{ + new_elem->pKey = (void*)pKey; + } + new_elem->nKey = nKey; + pH->count++; + assert( pH->htsize>0 ); + assert( (pH->htsize & (pH->htsize-1))==0 ); + h = hraw & (pH->htsize-1); + fts3HashInsertElement(pH, &pH->ht[h], new_elem); + new_elem->data = data; + return 0; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ + +/************** End of fts3_hash.c *******************************************/ +/************** Begin file fts3_porter.c *************************************/ +/* +** 2006 September 30 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Implementation of the full-text-search tokenizer that implements +** a Porter stemmer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +/* #include "fts3Int.h" */ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +/* #include */ +/* #include */ +/* #include */ +/* #include */ + +/* #include "fts3_tokenizer.h" */ + +/* +** Class derived from sqlite3_tokenizer +*/ +typedef struct porter_tokenizer { + sqlite3_tokenizer base; /* Base class */ +} porter_tokenizer; + +/* +** Class derived from sqlite3_tokenizer_cursor +*/ +typedef struct porter_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *zInput; /* input we are tokenizing */ + int nInput; /* size of the input */ + int iOffset; /* current position in zInput */ + int iToken; /* index of next token to be returned */ + char *zToken; /* storage for current token */ + int nAllocated; /* space allocated to zToken buffer */ +} porter_tokenizer_cursor; + + +/* +** Create a new tokenizer instance. +*/ +static int porterCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + porter_tokenizer *t; + + UNUSED_PARAMETER(argc); + UNUSED_PARAMETER(argv); + + t = (porter_tokenizer *) sqlite3_malloc(sizeof(*t)); + if( t==NULL ) return SQLITE_NOMEM; + memset(t, 0, sizeof(*t)); + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int porterDestroy(sqlite3_tokenizer *pTokenizer){ + sqlite3_free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is zInput[0..nInput-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int porterOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, int nInput, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + porter_tokenizer_cursor *c; + + UNUSED_PARAMETER(pTokenizer); + + c = (porter_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->zInput = zInput; + if( zInput==0 ){ + c->nInput = 0; + }else if( nInput<0 ){ + c->nInput = (int)strlen(zInput); + }else{ + c->nInput = nInput; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->zToken = NULL; /* no space allocated, yet. */ + c->nAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** porterOpen() above. +*/ +static int porterClose(sqlite3_tokenizer_cursor *pCursor){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + sqlite3_free(c->zToken); + sqlite3_free(c); + return SQLITE_OK; +} +/* +** Vowel or consonant +*/ +static const char cType[] = { + 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 1 +}; + +/* +** isConsonant() and isVowel() determine if their first character in +** the string they point to is a consonant or a vowel, according +** to Porter ruls. +** +** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'. +** 'Y' is a consonant unless it follows another consonant, +** in which case it is a vowel. +** +** In these routine, the letters are in reverse order. So the 'y' rule +** is that 'y' is a consonant unless it is followed by another +** consonent. +*/ +static int isVowel(const char*); +static int isConsonant(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return j; + return z[1]==0 || isVowel(z + 1); +} +static int isVowel(const char *z){ + int j; + char x = *z; + if( x==0 ) return 0; + assert( x>='a' && x<='z' ); + j = cType[x-'a']; + if( j<2 ) return 1-j; + return isConsonant(z + 1); +} + +/* +** Let any sequence of one or more vowels be represented by V and let +** C be sequence of one or more consonants. Then every word can be +** represented as: +** +** [C] (VC){m} [V] +** +** In prose: A word is an optional consonant followed by zero or +** vowel-consonant pairs followed by an optional vowel. "m" is the +** number of vowel consonant pairs. This routine computes the value +** of m for the first i bytes of a word. +** +** Return true if the m-value for z is 1 or more. In other words, +** return true if z contains at least one vowel that is followed +** by a consonant. +** +** In this routine z[] is in reverse order. So we are really looking +** for an instance of a consonant followed by a vowel. +*/ +static int m_gt_0(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* Like mgt0 above except we are looking for a value of m which is +** exactly 1 +*/ +static int m_eq_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 1; + while( isConsonant(z) ){ z++; } + return *z==0; +} + +/* Like mgt0 above except we are looking for a value of m>1 instead +** or m>0 +*/ +static int m_gt_1(const char *z){ + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + if( *z==0 ) return 0; + while( isVowel(z) ){ z++; } + if( *z==0 ) return 0; + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if there is a vowel anywhere within z[0..n-1] +*/ +static int hasVowel(const char *z){ + while( isConsonant(z) ){ z++; } + return *z!=0; +} + +/* +** Return TRUE if the word ends in a double consonant. +** +** The text is reversed here. So we are really looking at +** the first two characters of z[]. +*/ +static int doubleConsonant(const char *z){ + return isConsonant(z) && z[0]==z[1]; +} + +/* +** Return TRUE if the word ends with three letters which +** are consonant-vowel-consonent and where the final consonant +** is not 'w', 'x', or 'y'. +** +** The word is reversed here. So we are really checking the +** first three letters and the first one cannot be in [wxy]. +*/ +static int star_oh(const char *z){ + return + isConsonant(z) && + z[0]!='w' && z[0]!='x' && z[0]!='y' && + isVowel(z+1) && + isConsonant(z+2); +} + +/* +** If the word ends with zFrom and xCond() is true for the stem +** of the word that preceeds the zFrom ending, then change the +** ending to zTo. +** +** The input word *pz and zFrom are both in reverse order. zTo +** is in normal order. +** +** Return TRUE if zFrom matches. Return FALSE if zFrom does not +** match. Not that TRUE is returned even if xCond() fails and +** no substitution occurs. +*/ +static int stem( + char **pz, /* The word being stemmed (Reversed) */ + const char *zFrom, /* If the ending matches this... (Reversed) */ + const char *zTo, /* ... change the ending to this (not reversed) */ + int (*xCond)(const char*) /* Condition that must be true */ +){ + char *z = *pz; + while( *zFrom && *zFrom==*z ){ z++; zFrom++; } + if( *zFrom!=0 ) return 0; + if( xCond && !xCond(z) ) return 1; + while( *zTo ){ + *(--z) = *(zTo++); + } + *pz = z; + return 1; +} + +/* +** This is the fallback stemmer used when the porter stemmer is +** inappropriate. The input word is copied into the output with +** US-ASCII case folding. If the input word is too long (more +** than 20 bytes if it contains no digits or more than 6 bytes if +** it contains digits) then word is truncated to 20 or 6 bytes +** by taking 10 or 3 bytes from the beginning and end. +*/ +static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ + int i, mx, j; + int hasDigit = 0; + for(i=0; i='A' && c<='Z' ){ + zOut[i] = c - 'A' + 'a'; + }else{ + if( c>='0' && c<='9' ) hasDigit = 1; + zOut[i] = c; + } + } + mx = hasDigit ? 3 : 10; + if( nIn>mx*2 ){ + for(j=mx, i=nIn-mx; i=(int)sizeof(zReverse)-7 ){ + /* The word is too big or too small for the porter stemmer. + ** Fallback to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + for(i=0, j=sizeof(zReverse)-6; i='A' && c<='Z' ){ + zReverse[j] = c + 'a' - 'A'; + }else if( c>='a' && c<='z' ){ + zReverse[j] = c; + }else{ + /* The use of a character not in [a-zA-Z] means that we fallback + ** to the copy stemmer */ + copy_stemmer(zIn, nIn, zOut, pnOut); + return; + } + } + memset(&zReverse[sizeof(zReverse)-5], 0, 5); + z = &zReverse[j+1]; + + + /* Step 1a */ + if( z[0]=='s' ){ + if( + !stem(&z, "sess", "ss", 0) && + !stem(&z, "sei", "i", 0) && + !stem(&z, "ss", "ss", 0) + ){ + z++; + } + } + + /* Step 1b */ + z2 = z; + if( stem(&z, "dee", "ee", m_gt_0) ){ + /* Do nothing. The work was all in the test */ + }else if( + (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel)) + && z!=z2 + ){ + if( stem(&z, "ta", "ate", 0) || + stem(&z, "lb", "ble", 0) || + stem(&z, "zi", "ize", 0) ){ + /* Do nothing. The work was all in the test */ + }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){ + z++; + }else if( m_eq_1(z) && star_oh(z) ){ + *(--z) = 'e'; + } + } + + /* Step 1c */ + if( z[0]=='y' && hasVowel(z+1) ){ + z[0] = 'i'; + } + + /* Step 2 */ + switch( z[1] ){ + case 'a': + if( !stem(&z, "lanoita", "ate", m_gt_0) ){ + stem(&z, "lanoit", "tion", m_gt_0); + } + break; + case 'c': + if( !stem(&z, "icne", "ence", m_gt_0) ){ + stem(&z, "icna", "ance", m_gt_0); + } + break; + case 'e': + stem(&z, "rezi", "ize", m_gt_0); + break; + case 'g': + stem(&z, "igol", "log", m_gt_0); + break; + case 'l': + if( !stem(&z, "ilb", "ble", m_gt_0) + && !stem(&z, "illa", "al", m_gt_0) + && !stem(&z, "iltne", "ent", m_gt_0) + && !stem(&z, "ile", "e", m_gt_0) + ){ + stem(&z, "ilsuo", "ous", m_gt_0); + } + break; + case 'o': + if( !stem(&z, "noitazi", "ize", m_gt_0) + && !stem(&z, "noita", "ate", m_gt_0) + ){ + stem(&z, "rota", "ate", m_gt_0); + } + break; + case 's': + if( !stem(&z, "msila", "al", m_gt_0) + && !stem(&z, "ssenevi", "ive", m_gt_0) + && !stem(&z, "ssenluf", "ful", m_gt_0) + ){ + stem(&z, "ssensuo", "ous", m_gt_0); + } + break; + case 't': + if( !stem(&z, "itila", "al", m_gt_0) + && !stem(&z, "itivi", "ive", m_gt_0) + ){ + stem(&z, "itilib", "ble", m_gt_0); + } + break; + } + + /* Step 3 */ + switch( z[0] ){ + case 'e': + if( !stem(&z, "etaci", "ic", m_gt_0) + && !stem(&z, "evita", "", m_gt_0) + ){ + stem(&z, "ezila", "al", m_gt_0); + } + break; + case 'i': + stem(&z, "itici", "ic", m_gt_0); + break; + case 'l': + if( !stem(&z, "laci", "ic", m_gt_0) ){ + stem(&z, "luf", "", m_gt_0); + } + break; + case 's': + stem(&z, "ssen", "", m_gt_0); + break; + } + + /* Step 4 */ + switch( z[1] ){ + case 'a': + if( z[0]=='l' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'c': + if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'e': + if( z[0]=='r' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'i': + if( z[0]=='c' && m_gt_1(z+2) ){ + z += 2; + } + break; + case 'l': + if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){ + z += 4; + } + break; + case 'n': + if( z[0]=='t' ){ + if( z[2]=='a' ){ + if( m_gt_1(z+3) ){ + z += 3; + } + }else if( z[2]=='e' ){ + if( !stem(&z, "tneme", "", m_gt_1) + && !stem(&z, "tnem", "", m_gt_1) + ){ + stem(&z, "tne", "", m_gt_1); + } + } + } + break; + case 'o': + if( z[0]=='u' ){ + if( m_gt_1(z+2) ){ + z += 2; + } + }else if( z[3]=='s' || z[3]=='t' ){ + stem(&z, "noi", "", m_gt_1); + } + break; + case 's': + if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 't': + if( !stem(&z, "eta", "", m_gt_1) ){ + stem(&z, "iti", "", m_gt_1); + } + break; + case 'u': + if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){ + z += 3; + } + break; + case 'v': + case 'z': + if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){ + z += 3; + } + break; + } + + /* Step 5a */ + if( z[0]=='e' ){ + if( m_gt_1(z+1) ){ + z++; + }else if( m_eq_1(z+1) && !star_oh(z+1) ){ + z++; + } + } + + /* Step 5b */ + if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){ + z++; + } + + /* z[] is now the stemmed word in reverse order. Flip it back + ** around into forward order and return. + */ + *pnOut = i = (int)strlen(z); + zOut[i] = 0; + while( *z ){ + zOut[--i] = *(z++); + } +} + +/* +** Characters that can be part of a token. We assume any character +** whose value is greater than 0x80 (any UTF character) can be +** part of a token. In other words, delimiters all must have +** values of 0x7f or lower. +*/ +static const char porterIdChar[] = { +/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ +}; +#define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !porterIdChar[ch-0x30])) + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to porterOpen(). +*/ +static int porterNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */ + const char **pzToken, /* OUT: *pzToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; + const char *z = c->zInput; + + while( c->iOffsetnInput ){ + int iStartOffset, ch; + + /* Scan past delimiter characters */ + while( c->iOffsetnInput && isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffsetnInput && !isDelim(z[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int n = c->iOffset-iStartOffset; + if( n>c->nAllocated ){ + char *pNew; + c->nAllocated = n+20; + pNew = sqlite3_realloc(c->zToken, c->nAllocated); + if( !pNew ) return SQLITE_NOMEM; + c->zToken = pNew; + } + porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); + *pzToken = c->zToken; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the porter-stemmer tokenizer +*/ +static const sqlite3_tokenizer_module porterTokenizerModule = { + 0, + porterCreate, + porterDestroy, + porterOpen, + porterClose, + porterNext, + 0 +}; + +/* +** Allocate a new porter tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +SQLITE_PRIVATE void sqlite3Fts3PorterTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &porterTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ + +/************** End of fts3_porter.c *****************************************/ +/************** Begin file fts3_tokenizer.c **********************************/ +/* +** 2007 June 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This is part of an SQLite module implementing full-text search. +** This particular file implements the generic tokenizer interface. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +/* #include "fts3Int.h" */ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +/* #include */ +/* #include */ + +/* +** Return true if the two-argument version of fts3_tokenizer() +** has been activated via a prior call to sqlite3_db_config(db, +** SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER, 1, 0); +*/ +static int fts3TokenizerEnabled(sqlite3_context *context){ + sqlite3 *db = sqlite3_context_db_handle(context); + int isEnabled = 0; + sqlite3_db_config(db,SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER,-1,&isEnabled); + return isEnabled; +} + +/* +** Implementation of the SQL scalar function for accessing the underlying +** hash table. This function may be called as follows: +** +** SELECT (); +** SELECT (, ); +** +** where is the name passed as the second argument +** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer'). +** +** If the argument is specified, it must be a blob value +** containing a pointer to be stored as the hash data corresponding +** to the string . If is not specified, then +** the string must already exist in the has table. Otherwise, +** an error is returned. +** +** Whether or not the argument is specified, the value returned +** is a blob containing the pointer stored as the hash data corresponding +** to string (after the hash-table is updated, if applicable). +*/ +static void fts3TokenizerFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + Fts3Hash *pHash; + void *pPtr = 0; + const unsigned char *zName; + int nName; + + assert( argc==1 || argc==2 ); + + pHash = (Fts3Hash *)sqlite3_user_data(context); + + zName = sqlite3_value_text(argv[0]); + nName = sqlite3_value_bytes(argv[0])+1; + + if( argc==2 ){ + if( fts3TokenizerEnabled(context) ){ + void *pOld; + int n = sqlite3_value_bytes(argv[1]); + if( zName==0 || n!=sizeof(pPtr) ){ + sqlite3_result_error(context, "argument type mismatch", -1); + return; + } + pPtr = *(void **)sqlite3_value_blob(argv[1]); + pOld = sqlite3Fts3HashInsert(pHash, (void *)zName, nName, pPtr); + if( pOld==pPtr ){ + sqlite3_result_error(context, "out of memory", -1); + } + }else{ + sqlite3_result_error(context, "fts3tokenize disabled", -1); + return; + } + }else{ + if( zName ){ + pPtr = sqlite3Fts3HashFind(pHash, zName, nName); + } + if( !pPtr ){ + char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); + sqlite3_result_error(context, zErr, -1); + sqlite3_free(zErr); + return; + } + } + sqlite3_result_blob(context, (void *)&pPtr, sizeof(pPtr), SQLITE_TRANSIENT); +} + +SQLITE_PRIVATE int sqlite3Fts3IsIdChar(char c){ + static const char isFtsIdChar[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1x */ + 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ + }; + return (c&0x80 || isFtsIdChar[(int)(c)]); +} + +SQLITE_PRIVATE const char *sqlite3Fts3NextToken(const char *zStr, int *pn){ + const char *z1; + const char *z2 = 0; + + /* Find the start of the next token. */ + z1 = zStr; + while( z2==0 ){ + char c = *z1; + switch( c ){ + case '\0': return 0; /* No more tokens here */ + case '\'': + case '"': + case '`': { + z2 = z1; + while( *++z2 && (*z2!=c || *++z2==c) ); + break; + } + case '[': + z2 = &z1[1]; + while( *z2 && z2[0]!=']' ) z2++; + if( *z2 ) z2++; + break; + + default: + if( sqlite3Fts3IsIdChar(*z1) ){ + z2 = &z1[1]; + while( sqlite3Fts3IsIdChar(*z2) ) z2++; + }else{ + z1++; + } + } + } + + *pn = (int)(z2-z1); + return z1; +} + +SQLITE_PRIVATE int sqlite3Fts3InitTokenizer( + Fts3Hash *pHash, /* Tokenizer hash table */ + const char *zArg, /* Tokenizer name */ + sqlite3_tokenizer **ppTok, /* OUT: Tokenizer (if applicable) */ + char **pzErr /* OUT: Set to malloced error message */ +){ + int rc; + char *z = (char *)zArg; + int n = 0; + char *zCopy; + char *zEnd; /* Pointer to nul-term of zCopy */ + sqlite3_tokenizer_module *m; + + zCopy = sqlite3_mprintf("%s", zArg); + if( !zCopy ) return SQLITE_NOMEM; + zEnd = &zCopy[strlen(zCopy)]; + + z = (char *)sqlite3Fts3NextToken(zCopy, &n); + if( z==0 ){ + assert( n==0 ); + z = zCopy; + } + z[n] = '\0'; + sqlite3Fts3Dequote(z); + + m = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash,z,(int)strlen(z)+1); + if( !m ){ + sqlite3Fts3ErrMsg(pzErr, "unknown tokenizer: %s", z); + rc = SQLITE_ERROR; + }else{ + char const **aArg = 0; + int iArg = 0; + z = &z[n+1]; + while( zxCreate(iArg, aArg, ppTok); + assert( rc!=SQLITE_OK || *ppTok ); + if( rc!=SQLITE_OK ){ + sqlite3Fts3ErrMsg(pzErr, "unknown tokenizer"); + }else{ + (*ppTok)->pModule = m; + } + sqlite3_free((void *)aArg); + } + + sqlite3_free(zCopy); + return rc; +} + + +#ifdef SQLITE_TEST + +#if defined(INCLUDE_SQLITE_TCL_H) +# include "sqlite_tcl.h" +#else +# include "tcl.h" +#endif +/* #include */ + +/* +** Implementation of a special SQL scalar function for testing tokenizers +** designed to be used in concert with the Tcl testing framework. This +** function must be called with two or more arguments: +** +** SELECT (, ..., ); +** +** where is the name passed as the second argument +** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer') +** concatenated with the string '_test' (e.g. 'fts3_tokenizer_test'). +** +** The return value is a string that may be interpreted as a Tcl +** list. For each token in the , three elements are +** added to the returned list. The first is the token position, the +** second is the token text (folded, stemmed, etc.) and the third is the +** substring of associated with the token. For example, +** using the built-in "simple" tokenizer: +** +** SELECT fts_tokenizer_test('simple', 'I don't see how'); +** +** will return the string: +** +** "{0 i I 1 dont don't 2 see see 3 how how}" +** +*/ +static void testFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + Fts3Hash *pHash; + sqlite3_tokenizer_module *p; + sqlite3_tokenizer *pTokenizer = 0; + sqlite3_tokenizer_cursor *pCsr = 0; + + const char *zErr = 0; + + const char *zName; + int nName; + const char *zInput; + int nInput; + + const char *azArg[64]; + + const char *zToken; + int nToken = 0; + int iStart = 0; + int iEnd = 0; + int iPos = 0; + int i; + + Tcl_Obj *pRet; + + if( argc<2 ){ + sqlite3_result_error(context, "insufficient arguments", -1); + return; + } + + nName = sqlite3_value_bytes(argv[0]); + zName = (const char *)sqlite3_value_text(argv[0]); + nInput = sqlite3_value_bytes(argv[argc-1]); + zInput = (const char *)sqlite3_value_text(argv[argc-1]); + + pHash = (Fts3Hash *)sqlite3_user_data(context); + p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1); + + if( !p ){ + char *zErr2 = sqlite3_mprintf("unknown tokenizer: %s", zName); + sqlite3_result_error(context, zErr2, -1); + sqlite3_free(zErr2); + return; + } + + pRet = Tcl_NewObj(); + Tcl_IncrRefCount(pRet); + + for(i=1; ixCreate(argc-2, azArg, &pTokenizer) ){ + zErr = "error in xCreate()"; + goto finish; + } + pTokenizer->pModule = p; + if( sqlite3Fts3OpenTokenizer(pTokenizer, 0, zInput, nInput, &pCsr) ){ + zErr = "error in xOpen()"; + goto finish; + } + + while( SQLITE_OK==p->xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos) ){ + Tcl_ListObjAppendElement(0, pRet, Tcl_NewIntObj(iPos)); + Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); + zToken = &zInput[iStart]; + nToken = iEnd-iStart; + Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); + } + + if( SQLITE_OK!=p->xClose(pCsr) ){ + zErr = "error in xClose()"; + goto finish; + } + if( SQLITE_OK!=p->xDestroy(pTokenizer) ){ + zErr = "error in xDestroy()"; + goto finish; + } + +finish: + if( zErr ){ + sqlite3_result_error(context, zErr, -1); + }else{ + sqlite3_result_text(context, Tcl_GetString(pRet), -1, SQLITE_TRANSIENT); + } + Tcl_DecrRefCount(pRet); +} + +static +int registerTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module *p +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?, ?)"; + + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); + sqlite3_step(pStmt); + + return sqlite3_finalize(pStmt); +} + + +static +int queryTokenizer( + sqlite3 *db, + char *zName, + const sqlite3_tokenizer_module **pp +){ + int rc; + sqlite3_stmt *pStmt; + const char zSql[] = "SELECT fts3_tokenizer(?)"; + + *pp = 0; + rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ + memcpy((void *)pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); + } + } + + return sqlite3_finalize(pStmt); +} + +SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); + +/* +** Implementation of the scalar function fts3_tokenizer_internal_test(). +** This function is used for testing only, it is not included in the +** build unless SQLITE_TEST is defined. +** +** The purpose of this is to test that the fts3_tokenizer() function +** can be used as designed by the C-code in the queryTokenizer and +** registerTokenizer() functions above. These two functions are repeated +** in the README.tokenizer file as an example, so it is important to +** test them. +** +** To run the tests, evaluate the fts3_tokenizer_internal_test() scalar +** function with no arguments. An assert() will fail if a problem is +** detected. i.e.: +** +** SELECT fts3_tokenizer_internal_test(); +** +*/ +static void intTestFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int rc; + const sqlite3_tokenizer_module *p1; + const sqlite3_tokenizer_module *p2; + sqlite3 *db = (sqlite3 *)sqlite3_user_data(context); + + UNUSED_PARAMETER(argc); + UNUSED_PARAMETER(argv); + + /* Test the query function */ + sqlite3Fts3SimpleTokenizerModule(&p1); + rc = queryTokenizer(db, "simple", &p2); + assert( rc==SQLITE_OK ); + assert( p1==p2 ); + rc = queryTokenizer(db, "nosuchtokenizer", &p2); + assert( rc==SQLITE_ERROR ); + assert( p2==0 ); + assert( 0==strcmp(sqlite3_errmsg(db), "unknown tokenizer: nosuchtokenizer") ); + + /* Test the storage function */ + if( fts3TokenizerEnabled(context) ){ + rc = registerTokenizer(db, "nosuchtokenizer", p1); + assert( rc==SQLITE_OK ); + rc = queryTokenizer(db, "nosuchtokenizer", &p2); + assert( rc==SQLITE_OK ); + assert( p2==p1 ); + } + + sqlite3_result_text(context, "ok", -1, SQLITE_STATIC); +} + +#endif + +/* +** Set up SQL objects in database db used to access the contents of +** the hash table pointed to by argument pHash. The hash table must +** been initialized to use string keys, and to take a private copy +** of the key when a value is inserted. i.e. by a call similar to: +** +** sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); +** +** This function adds a scalar function (see header comment above +** fts3TokenizerFunc() in this file for details) and, if ENABLE_TABLE is +** defined at compilation time, a temporary virtual table (see header +** comment above struct HashTableVtab) to the database schema. Both +** provide read/write access to the contents of *pHash. +** +** The third argument to this function, zName, is used as the name +** of both the scalar and, if created, the virtual table. +*/ +SQLITE_PRIVATE int sqlite3Fts3InitHashTable( + sqlite3 *db, + Fts3Hash *pHash, + const char *zName +){ + int rc = SQLITE_OK; + void *p = (void *)pHash; + const int any = SQLITE_ANY; + +#ifdef SQLITE_TEST + char *zTest = 0; + char *zTest2 = 0; + void *pdb = (void *)db; + zTest = sqlite3_mprintf("%s_test", zName); + zTest2 = sqlite3_mprintf("%s_internal_test", zName); + if( !zTest || !zTest2 ){ + rc = SQLITE_NOMEM; + } +#endif + + if( SQLITE_OK==rc ){ + rc = sqlite3_create_function(db, zName, 1, any, p, fts3TokenizerFunc, 0, 0); + } + if( SQLITE_OK==rc ){ + rc = sqlite3_create_function(db, zName, 2, any, p, fts3TokenizerFunc, 0, 0); + } +#ifdef SQLITE_TEST + if( SQLITE_OK==rc ){ + rc = sqlite3_create_function(db, zTest, -1, any, p, testFunc, 0, 0); + } + if( SQLITE_OK==rc ){ + rc = sqlite3_create_function(db, zTest2, 0, any, pdb, intTestFunc, 0, 0); + } +#endif + +#ifdef SQLITE_TEST + sqlite3_free(zTest); + sqlite3_free(zTest2); +#endif + + return rc; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ + +/************** End of fts3_tokenizer.c **************************************/ +/************** Begin file fts3_tokenizer1.c *********************************/ +/* +** 2006 Oct 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** Implementation of the "simple" full-text-search tokenizer. +*/ + +/* +** The code in this file is only compiled if: +** +** * The FTS3 module is being built as an extension +** (in which case SQLITE_CORE is not defined), or +** +** * The FTS3 module is being built into the core of +** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). +*/ +/* #include "fts3Int.h" */ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +/* #include */ +/* #include */ +/* #include */ +/* #include */ + +/* #include "fts3_tokenizer.h" */ + +typedef struct simple_tokenizer { + sqlite3_tokenizer base; + char delim[128]; /* flag ASCII delimiters */ +} simple_tokenizer; + +typedef struct simple_tokenizer_cursor { + sqlite3_tokenizer_cursor base; + const char *pInput; /* input we are tokenizing */ + int nBytes; /* size of the input */ + int iOffset; /* current position in pInput */ + int iToken; /* index of next token to be returned */ + char *pToken; /* storage for current token */ + int nTokenAllocated; /* space allocated to zToken buffer */ +} simple_tokenizer_cursor; + + +static int simpleDelim(simple_tokenizer *t, unsigned char c){ + return c<0x80 && t->delim[c]; +} +static int fts3_isalnum(int x){ + return (x>='0' && x<='9') || (x>='A' && x<='Z') || (x>='a' && x<='z'); +} + +/* +** Create a new tokenizer instance. +*/ +static int simpleCreate( + int argc, const char * const *argv, + sqlite3_tokenizer **ppTokenizer +){ + simple_tokenizer *t; + + t = (simple_tokenizer *) sqlite3_malloc(sizeof(*t)); + if( t==NULL ) return SQLITE_NOMEM; + memset(t, 0, sizeof(*t)); + + /* TODO(shess) Delimiters need to remain the same from run to run, + ** else we need to reindex. One solution would be a meta-table to + ** track such information in the database, then we'd only want this + ** information on the initial create. + */ + if( argc>1 ){ + int i, n = (int)strlen(argv[1]); + for(i=0; i=0x80 ){ + sqlite3_free(t); + return SQLITE_ERROR; + } + t->delim[ch] = 1; + } + } else { + /* Mark non-alphanumeric ASCII characters as delimiters */ + int i; + for(i=1; i<0x80; i++){ + t->delim[i] = !fts3_isalnum(i) ? -1 : 0; + } + } + + *ppTokenizer = &t->base; + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ + sqlite3_free(pTokenizer); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int simpleOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *pInput, int nBytes, /* String to be tokenized */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + simple_tokenizer_cursor *c; + + UNUSED_PARAMETER(pTokenizer); + + c = (simple_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); + if( c==NULL ) return SQLITE_NOMEM; + + c->pInput = pInput; + if( pInput==0 ){ + c->nBytes = 0; + }else if( nBytes<0 ){ + c->nBytes = (int)strlen(pInput); + }else{ + c->nBytes = nBytes; + } + c->iOffset = 0; /* start tokenizing at the beginning */ + c->iToken = 0; + c->pToken = NULL; /* no space allocated, yet. */ + c->nTokenAllocated = 0; + + *ppCursor = &c->base; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** simpleOpen() above. +*/ +static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + sqlite3_free(c->pToken); + sqlite3_free(c); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to simpleOpen(). +*/ +static int simpleNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; + simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; + unsigned char *p = (unsigned char *)c->pInput; + + while( c->iOffsetnBytes ){ + int iStartOffset; + + /* Scan past delimiter characters */ + while( c->iOffsetnBytes && simpleDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + /* Count non-delimiter characters. */ + iStartOffset = c->iOffset; + while( c->iOffsetnBytes && !simpleDelim(t, p[c->iOffset]) ){ + c->iOffset++; + } + + if( c->iOffset>iStartOffset ){ + int i, n = c->iOffset-iStartOffset; + if( n>c->nTokenAllocated ){ + char *pNew; + c->nTokenAllocated = n+20; + pNew = sqlite3_realloc(c->pToken, c->nTokenAllocated); + if( !pNew ) return SQLITE_NOMEM; + c->pToken = pNew; + } + for(i=0; ipToken[i] = (char)((ch>='A' && ch<='Z') ? ch-'A'+'a' : ch); + } + *ppToken = c->pToken; + *pnBytes = n; + *piStartOffset = iStartOffset; + *piEndOffset = c->iOffset; + *piPosition = c->iToken++; + + return SQLITE_OK; + } + } + return SQLITE_DONE; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module simpleTokenizerModule = { + 0, + simpleCreate, + simpleDestroy, + simpleOpen, + simpleClose, + simpleNext, + 0, +}; + +/* +** Allocate a new simple tokenizer. Return a pointer to the new +** tokenizer in *ppModule +*/ +SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &simpleTokenizerModule; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ + +/************** End of fts3_tokenizer1.c *************************************/ +/************** Begin file fts3_tokenize_vtab.c ******************************/ +/* +** 2013 Apr 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains code for the "fts3tokenize" virtual table module. +** An fts3tokenize virtual table is created as follows: +** +** CREATE VIRTUAL TABLE USING fts3tokenize( +** , , ... +** ); +** +** The table created has the following schema: +** +** CREATE TABLE (input, token, start, end, position) +** +** When queried, the query must include a WHERE clause of type: +** +** input = +** +** The virtual table module tokenizes this , using the FTS3 +** tokenizer specified by the arguments to the CREATE VIRTUAL TABLE +** statement and returns one row for each token in the result. With +** fields set as follows: +** +** input: Always set to a copy of +** token: A token from the input. +** start: Byte offset of the token within the input . +** end: Byte offset of the byte immediately following the end of the +** token within the input string. +** pos: Token offset of token within input. +** +*/ +/* #include "fts3Int.h" */ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +/* #include */ +/* #include */ + +typedef struct Fts3tokTable Fts3tokTable; +typedef struct Fts3tokCursor Fts3tokCursor; + +/* +** Virtual table structure. +*/ +struct Fts3tokTable { + sqlite3_vtab base; /* Base class used by SQLite core */ + const sqlite3_tokenizer_module *pMod; + sqlite3_tokenizer *pTok; +}; + +/* +** Virtual table cursor structure. +*/ +struct Fts3tokCursor { + sqlite3_vtab_cursor base; /* Base class used by SQLite core */ + char *zInput; /* Input string */ + sqlite3_tokenizer_cursor *pCsr; /* Cursor to iterate through zInput */ + int iRowid; /* Current 'rowid' value */ + const char *zToken; /* Current 'token' value */ + int nToken; /* Size of zToken in bytes */ + int iStart; /* Current 'start' value */ + int iEnd; /* Current 'end' value */ + int iPos; /* Current 'pos' value */ +}; + +/* +** Query FTS for the tokenizer implementation named zName. +*/ +static int fts3tokQueryTokenizer( + Fts3Hash *pHash, + const char *zName, + const sqlite3_tokenizer_module **pp, + char **pzErr +){ + sqlite3_tokenizer_module *p; + int nName = (int)strlen(zName); + + p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1); + if( !p ){ + sqlite3Fts3ErrMsg(pzErr, "unknown tokenizer: %s", zName); + return SQLITE_ERROR; + } + + *pp = p; + return SQLITE_OK; +} + +/* +** The second argument, argv[], is an array of pointers to nul-terminated +** strings. This function makes a copy of the array and strings into a +** single block of memory. It then dequotes any of the strings that appear +** to be quoted. +** +** If successful, output parameter *pazDequote is set to point at the +** array of dequoted strings and SQLITE_OK is returned. The caller is +** responsible for eventually calling sqlite3_free() to free the array +** in this case. Or, if an error occurs, an SQLite error code is returned. +** The final value of *pazDequote is undefined in this case. +*/ +static int fts3tokDequoteArray( + int argc, /* Number of elements in argv[] */ + const char * const *argv, /* Input array */ + char ***pazDequote /* Output array */ +){ + int rc = SQLITE_OK; /* Return code */ + if( argc==0 ){ + *pazDequote = 0; + }else{ + int i; + int nByte = 0; + char **azDequote; + + for(i=0; ixCreate((nDequote>1 ? nDequote-1 : 0), azArg, &pTok); + } + + if( rc==SQLITE_OK ){ + pTab = (Fts3tokTable *)sqlite3_malloc(sizeof(Fts3tokTable)); + if( pTab==0 ){ + rc = SQLITE_NOMEM; + } + } + + if( rc==SQLITE_OK ){ + memset(pTab, 0, sizeof(Fts3tokTable)); + pTab->pMod = pMod; + pTab->pTok = pTok; + *ppVtab = &pTab->base; + }else{ + if( pTok ){ + pMod->xDestroy(pTok); + } + } + + sqlite3_free(azDequote); + return rc; +} + +/* +** This function does the work for both the xDisconnect and xDestroy methods. +** These tables have no persistent representation of their own, so xDisconnect +** and xDestroy are identical operations. +*/ +static int fts3tokDisconnectMethod(sqlite3_vtab *pVtab){ + Fts3tokTable *pTab = (Fts3tokTable *)pVtab; + + pTab->pMod->xDestroy(pTab->pTok); + sqlite3_free(pTab); + return SQLITE_OK; +} + +/* +** xBestIndex - Analyze a WHERE and ORDER BY clause. +*/ +static int fts3tokBestIndexMethod( + sqlite3_vtab *pVTab, + sqlite3_index_info *pInfo +){ + int i; + UNUSED_PARAMETER(pVTab); + + for(i=0; inConstraint; i++){ + if( pInfo->aConstraint[i].usable + && pInfo->aConstraint[i].iColumn==0 + && pInfo->aConstraint[i].op==SQLITE_INDEX_CONSTRAINT_EQ + ){ + pInfo->idxNum = 1; + pInfo->aConstraintUsage[i].argvIndex = 1; + pInfo->aConstraintUsage[i].omit = 1; + pInfo->estimatedCost = 1; + return SQLITE_OK; + } + } + + pInfo->idxNum = 0; + assert( pInfo->estimatedCost>1000000.0 ); + + return SQLITE_OK; +} + +/* +** xOpen - Open a cursor. +*/ +static int fts3tokOpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){ + Fts3tokCursor *pCsr; + UNUSED_PARAMETER(pVTab); + + pCsr = (Fts3tokCursor *)sqlite3_malloc(sizeof(Fts3tokCursor)); + if( pCsr==0 ){ + return SQLITE_NOMEM; + } + memset(pCsr, 0, sizeof(Fts3tokCursor)); + + *ppCsr = (sqlite3_vtab_cursor *)pCsr; + return SQLITE_OK; +} + +/* +** Reset the tokenizer cursor passed as the only argument. As if it had +** just been returned by fts3tokOpenMethod(). +*/ +static void fts3tokResetCursor(Fts3tokCursor *pCsr){ + if( pCsr->pCsr ){ + Fts3tokTable *pTab = (Fts3tokTable *)(pCsr->base.pVtab); + pTab->pMod->xClose(pCsr->pCsr); + pCsr->pCsr = 0; + } + sqlite3_free(pCsr->zInput); + pCsr->zInput = 0; + pCsr->zToken = 0; + pCsr->nToken = 0; + pCsr->iStart = 0; + pCsr->iEnd = 0; + pCsr->iPos = 0; + pCsr->iRowid = 0; +} + +/* +** xClose - Close a cursor. +*/ +static int fts3tokCloseMethod(sqlite3_vtab_cursor *pCursor){ + Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; + + fts3tokResetCursor(pCsr); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* +** xNext - Advance the cursor to the next row, if any. +*/ +static int fts3tokNextMethod(sqlite3_vtab_cursor *pCursor){ + Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; + Fts3tokTable *pTab = (Fts3tokTable *)(pCursor->pVtab); + int rc; /* Return code */ + + pCsr->iRowid++; + rc = pTab->pMod->xNext(pCsr->pCsr, + &pCsr->zToken, &pCsr->nToken, + &pCsr->iStart, &pCsr->iEnd, &pCsr->iPos + ); + + if( rc!=SQLITE_OK ){ + fts3tokResetCursor(pCsr); + if( rc==SQLITE_DONE ) rc = SQLITE_OK; + } + + return rc; +} + +/* +** xFilter - Initialize a cursor to point at the start of its data. +*/ +static int fts3tokFilterMethod( + sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ + int idxNum, /* Strategy index */ + const char *idxStr, /* Unused */ + int nVal, /* Number of elements in apVal */ + sqlite3_value **apVal /* Arguments for the indexing scheme */ +){ + int rc = SQLITE_ERROR; + Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; + Fts3tokTable *pTab = (Fts3tokTable *)(pCursor->pVtab); + UNUSED_PARAMETER(idxStr); + UNUSED_PARAMETER(nVal); + + fts3tokResetCursor(pCsr); + if( idxNum==1 ){ + const char *zByte = (const char *)sqlite3_value_text(apVal[0]); + int nByte = sqlite3_value_bytes(apVal[0]); + pCsr->zInput = sqlite3_malloc(nByte+1); + if( pCsr->zInput==0 ){ + rc = SQLITE_NOMEM; + }else{ + memcpy(pCsr->zInput, zByte, nByte); + pCsr->zInput[nByte] = 0; + rc = pTab->pMod->xOpen(pTab->pTok, pCsr->zInput, nByte, &pCsr->pCsr); + if( rc==SQLITE_OK ){ + pCsr->pCsr->pTokenizer = pTab->pTok; + } + } + } + + if( rc!=SQLITE_OK ) return rc; + return fts3tokNextMethod(pCursor); +} + +/* +** xEof - Return true if the cursor is at EOF, or false otherwise. +*/ +static int fts3tokEofMethod(sqlite3_vtab_cursor *pCursor){ + Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; + return (pCsr->zToken==0); +} + +/* +** xColumn - Return a column value. +*/ +static int fts3tokColumnMethod( + sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ + sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */ + int iCol /* Index of column to read value from */ +){ + Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; + + /* CREATE TABLE x(input, token, start, end, position) */ + switch( iCol ){ + case 0: + sqlite3_result_text(pCtx, pCsr->zInput, -1, SQLITE_TRANSIENT); + break; + case 1: + sqlite3_result_text(pCtx, pCsr->zToken, pCsr->nToken, SQLITE_TRANSIENT); + break; + case 2: + sqlite3_result_int(pCtx, pCsr->iStart); + break; + case 3: + sqlite3_result_int(pCtx, pCsr->iEnd); + break; + default: + assert( iCol==4 ); + sqlite3_result_int(pCtx, pCsr->iPos); + break; + } + return SQLITE_OK; +} + +/* +** xRowid - Return the current rowid for the cursor. +*/ +static int fts3tokRowidMethod( + sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ + sqlite_int64 *pRowid /* OUT: Rowid value */ +){ + Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; + *pRowid = (sqlite3_int64)pCsr->iRowid; + return SQLITE_OK; +} + +/* +** Register the fts3tok module with database connection db. Return SQLITE_OK +** if successful or an error code if sqlite3_create_module() fails. +*/ +SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash){ + static const sqlite3_module fts3tok_module = { + 0, /* iVersion */ + fts3tokConnectMethod, /* xCreate */ + fts3tokConnectMethod, /* xConnect */ + fts3tokBestIndexMethod, /* xBestIndex */ + fts3tokDisconnectMethod, /* xDisconnect */ + fts3tokDisconnectMethod, /* xDestroy */ + fts3tokOpenMethod, /* xOpen */ + fts3tokCloseMethod, /* xClose */ + fts3tokFilterMethod, /* xFilter */ + fts3tokNextMethod, /* xNext */ + fts3tokEofMethod, /* xEof */ + fts3tokColumnMethod, /* xColumn */ + fts3tokRowidMethod, /* xRowid */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindFunction */ + 0, /* xRename */ + 0, /* xSavepoint */ + 0, /* xRelease */ + 0 /* xRollbackTo */ + }; + int rc; /* Return code */ + + rc = sqlite3_create_module(db, "fts3tokenize", &fts3tok_module, (void*)pHash); + return rc; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ + +/************** End of fts3_tokenize_vtab.c **********************************/ +/************** Begin file fts3_write.c **************************************/ +/* +** 2009 Oct 23 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file is part of the SQLite FTS3 extension module. Specifically, +** this file contains code to insert, update and delete rows from FTS3 +** tables. It also contains code to merge FTS3 b-tree segments. Some +** of the sub-routines used to merge segments are also used by the query +** code in fts3.c. +*/ + +/* #include "fts3Int.h" */ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +/* #include */ +/* #include */ +/* #include */ + + +#define FTS_MAX_APPENDABLE_HEIGHT 16 + +/* +** When full-text index nodes are loaded from disk, the buffer that they +** are loaded into has the following number of bytes of padding at the end +** of it. i.e. if a full-text index node is 900 bytes in size, then a buffer +** of 920 bytes is allocated for it. +** +** This means that if we have a pointer into a buffer containing node data, +** it is always safe to read up to two varints from it without risking an +** overread, even if the node data is corrupted. +*/ +#define FTS3_NODE_PADDING (FTS3_VARINT_MAX*2) + +/* +** Under certain circumstances, b-tree nodes (doclists) can be loaded into +** memory incrementally instead of all at once. This can be a big performance +** win (reduced IO and CPU) if SQLite stops calling the virtual table xNext() +** method before retrieving all query results (as may happen, for example, +** if a query has a LIMIT clause). +** +** Incremental loading is used for b-tree nodes FTS3_NODE_CHUNK_THRESHOLD +** bytes and larger. Nodes are loaded in chunks of FTS3_NODE_CHUNKSIZE bytes. +** The code is written so that the hard lower-limit for each of these values +** is 1. Clearly such small values would be inefficient, but can be useful +** for testing purposes. +** +** If this module is built with SQLITE_TEST defined, these constants may +** be overridden at runtime for testing purposes. File fts3_test.c contains +** a Tcl interface to read and write the values. +*/ +#ifdef SQLITE_TEST +int test_fts3_node_chunksize = (4*1024); +int test_fts3_node_chunk_threshold = (4*1024)*4; +# define FTS3_NODE_CHUNKSIZE test_fts3_node_chunksize +# define FTS3_NODE_CHUNK_THRESHOLD test_fts3_node_chunk_threshold +#else +# define FTS3_NODE_CHUNKSIZE (4*1024) +# define FTS3_NODE_CHUNK_THRESHOLD (FTS3_NODE_CHUNKSIZE*4) +#endif + +/* +** The two values that may be meaningfully bound to the :1 parameter in +** statements SQL_REPLACE_STAT and SQL_SELECT_STAT. +*/ +#define FTS_STAT_DOCTOTAL 0 +#define FTS_STAT_INCRMERGEHINT 1 +#define FTS_STAT_AUTOINCRMERGE 2 + +/* +** If FTS_LOG_MERGES is defined, call sqlite3_log() to report each automatic +** and incremental merge operation that takes place. This is used for +** debugging FTS only, it should not usually be turned on in production +** systems. +*/ +#ifdef FTS3_LOG_MERGES +static void fts3LogMerge(int nMerge, sqlite3_int64 iAbsLevel){ + sqlite3_log(SQLITE_OK, "%d-way merge from level %d", nMerge, (int)iAbsLevel); +} +#else +#define fts3LogMerge(x, y) +#endif + + +typedef struct PendingList PendingList; +typedef struct SegmentNode SegmentNode; +typedef struct SegmentWriter SegmentWriter; + +/* +** An instance of the following data structure is used to build doclists +** incrementally. See function fts3PendingListAppend() for details. +*/ +struct PendingList { + int nData; + char *aData; + int nSpace; + sqlite3_int64 iLastDocid; + sqlite3_int64 iLastCol; + sqlite3_int64 iLastPos; +}; + + +/* +** Each cursor has a (possibly empty) linked list of the following objects. +*/ +struct Fts3DeferredToken { + Fts3PhraseToken *pToken; /* Pointer to corresponding expr token */ + int iCol; /* Column token must occur in */ + Fts3DeferredToken *pNext; /* Next in list of deferred tokens */ + PendingList *pList; /* Doclist is assembled here */ +}; + +/* +** An instance of this structure is used to iterate through the terms on +** a contiguous set of segment b-tree leaf nodes. Although the details of +** this structure are only manipulated by code in this file, opaque handles +** of type Fts3SegReader* are also used by code in fts3.c to iterate through +** terms when querying the full-text index. See functions: +** +** sqlite3Fts3SegReaderNew() +** sqlite3Fts3SegReaderFree() +** sqlite3Fts3SegReaderIterate() +** +** Methods used to manipulate Fts3SegReader structures: +** +** fts3SegReaderNext() +** fts3SegReaderFirstDocid() +** fts3SegReaderNextDocid() +*/ +struct Fts3SegReader { + int iIdx; /* Index within level, or 0x7FFFFFFF for PT */ + u8 bLookup; /* True for a lookup only */ + u8 rootOnly; /* True for a root-only reader */ + + sqlite3_int64 iStartBlock; /* Rowid of first leaf block to traverse */ + sqlite3_int64 iLeafEndBlock; /* Rowid of final leaf block to traverse */ + sqlite3_int64 iEndBlock; /* Rowid of final block in segment (or 0) */ + sqlite3_int64 iCurrentBlock; /* Current leaf block (or 0) */ + + char *aNode; /* Pointer to node data (or NULL) */ + int nNode; /* Size of buffer at aNode (or 0) */ + int nPopulate; /* If >0, bytes of buffer aNode[] loaded */ + sqlite3_blob *pBlob; /* If not NULL, blob handle to read node */ + + Fts3HashElem **ppNextElem; + + /* Variables set by fts3SegReaderNext(). These may be read directly + ** by the caller. They are valid from the time SegmentReaderNew() returns + ** until SegmentReaderNext() returns something other than SQLITE_OK + ** (i.e. SQLITE_DONE). + */ + int nTerm; /* Number of bytes in current term */ + char *zTerm; /* Pointer to current term */ + int nTermAlloc; /* Allocated size of zTerm buffer */ + char *aDoclist; /* Pointer to doclist of current entry */ + int nDoclist; /* Size of doclist in current entry */ + + /* The following variables are used by fts3SegReaderNextDocid() to iterate + ** through the current doclist (aDoclist/nDoclist). + */ + char *pOffsetList; + int nOffsetList; /* For descending pending seg-readers only */ + sqlite3_int64 iDocid; +}; + +#define fts3SegReaderIsPending(p) ((p)->ppNextElem!=0) +#define fts3SegReaderIsRootOnly(p) ((p)->rootOnly!=0) + +/* +** An instance of this structure is used to create a segment b-tree in the +** database. The internal details of this type are only accessed by the +** following functions: +** +** fts3SegWriterAdd() +** fts3SegWriterFlush() +** fts3SegWriterFree() +*/ +struct SegmentWriter { + SegmentNode *pTree; /* Pointer to interior tree structure */ + sqlite3_int64 iFirst; /* First slot in %_segments written */ + sqlite3_int64 iFree; /* Next free slot in %_segments */ + char *zTerm; /* Pointer to previous term buffer */ + int nTerm; /* Number of bytes in zTerm */ + int nMalloc; /* Size of malloc'd buffer at zMalloc */ + char *zMalloc; /* Malloc'd space (possibly) used for zTerm */ + int nSize; /* Size of allocation at aData */ + int nData; /* Bytes of data in aData */ + char *aData; /* Pointer to block from malloc() */ + i64 nLeafData; /* Number of bytes of leaf data written */ +}; + +/* +** Type SegmentNode is used by the following three functions to create +** the interior part of the segment b+-tree structures (everything except +** the leaf nodes). These functions and type are only ever used by code +** within the fts3SegWriterXXX() family of functions described above. +** +** fts3NodeAddTerm() +** fts3NodeWrite() +** fts3NodeFree() +** +** When a b+tree is written to the database (either as a result of a merge +** or the pending-terms table being flushed), leaves are written into the +** database file as soon as they are completely populated. The interior of +** the tree is assembled in memory and written out only once all leaves have +** been populated and stored. This is Ok, as the b+-tree fanout is usually +** very large, meaning that the interior of the tree consumes relatively +** little memory. +*/ +struct SegmentNode { + SegmentNode *pParent; /* Parent node (or NULL for root node) */ + SegmentNode *pRight; /* Pointer to right-sibling */ + SegmentNode *pLeftmost; /* Pointer to left-most node of this depth */ + int nEntry; /* Number of terms written to node so far */ + char *zTerm; /* Pointer to previous term buffer */ + int nTerm; /* Number of bytes in zTerm */ + int nMalloc; /* Size of malloc'd buffer at zMalloc */ + char *zMalloc; /* Malloc'd space (possibly) used for zTerm */ + int nData; /* Bytes of valid data so far */ + char *aData; /* Node data */ +}; + +/* +** Valid values for the second argument to fts3SqlStmt(). +*/ +#define SQL_DELETE_CONTENT 0 +#define SQL_IS_EMPTY 1 +#define SQL_DELETE_ALL_CONTENT 2 +#define SQL_DELETE_ALL_SEGMENTS 3 +#define SQL_DELETE_ALL_SEGDIR 4 +#define SQL_DELETE_ALL_DOCSIZE 5 +#define SQL_DELETE_ALL_STAT 6 +#define SQL_SELECT_CONTENT_BY_ROWID 7 +#define SQL_NEXT_SEGMENT_INDEX 8 +#define SQL_INSERT_SEGMENTS 9 +#define SQL_NEXT_SEGMENTS_ID 10 +#define SQL_INSERT_SEGDIR 11 +#define SQL_SELECT_LEVEL 12 +#define SQL_SELECT_LEVEL_RANGE 13 +#define SQL_SELECT_LEVEL_COUNT 14 +#define SQL_SELECT_SEGDIR_MAX_LEVEL 15 +#define SQL_DELETE_SEGDIR_LEVEL 16 +#define SQL_DELETE_SEGMENTS_RANGE 17 +#define SQL_CONTENT_INSERT 18 +#define SQL_DELETE_DOCSIZE 19 +#define SQL_REPLACE_DOCSIZE 20 +#define SQL_SELECT_DOCSIZE 21 +#define SQL_SELECT_STAT 22 +#define SQL_REPLACE_STAT 23 + +#define SQL_SELECT_ALL_PREFIX_LEVEL 24 +#define SQL_DELETE_ALL_TERMS_SEGDIR 25 +#define SQL_DELETE_SEGDIR_RANGE 26 +#define SQL_SELECT_ALL_LANGID 27 +#define SQL_FIND_MERGE_LEVEL 28 +#define SQL_MAX_LEAF_NODE_ESTIMATE 29 +#define SQL_DELETE_SEGDIR_ENTRY 30 +#define SQL_SHIFT_SEGDIR_ENTRY 31 +#define SQL_SELECT_SEGDIR 32 +#define SQL_CHOMP_SEGDIR 33 +#define SQL_SEGMENT_IS_APPENDABLE 34 +#define SQL_SELECT_INDEXES 35 +#define SQL_SELECT_MXLEVEL 36 + +#define SQL_SELECT_LEVEL_RANGE2 37 +#define SQL_UPDATE_LEVEL_IDX 38 +#define SQL_UPDATE_LEVEL 39 + +/* +** This function is used to obtain an SQLite prepared statement handle +** for the statement identified by the second argument. If successful, +** *pp is set to the requested statement handle and SQLITE_OK returned. +** Otherwise, an SQLite error code is returned and *pp is set to 0. +** +** If argument apVal is not NULL, then it must point to an array with +** at least as many entries as the requested statement has bound +** parameters. The values are bound to the statements parameters before +** returning. +*/ +static int fts3SqlStmt( + Fts3Table *p, /* Virtual table handle */ + int eStmt, /* One of the SQL_XXX constants above */ + sqlite3_stmt **pp, /* OUT: Statement handle */ + sqlite3_value **apVal /* Values to bind to statement */ +){ + const char *azSql[] = { +/* 0 */ "DELETE FROM %Q.'%q_content' WHERE rowid = ?", +/* 1 */ "SELECT NOT EXISTS(SELECT docid FROM %Q.'%q_content' WHERE rowid!=?)", +/* 2 */ "DELETE FROM %Q.'%q_content'", +/* 3 */ "DELETE FROM %Q.'%q_segments'", +/* 4 */ "DELETE FROM %Q.'%q_segdir'", +/* 5 */ "DELETE FROM %Q.'%q_docsize'", +/* 6 */ "DELETE FROM %Q.'%q_stat'", +/* 7 */ "SELECT %s WHERE rowid=?", +/* 8 */ "SELECT (SELECT max(idx) FROM %Q.'%q_segdir' WHERE level = ?) + 1", +/* 9 */ "REPLACE INTO %Q.'%q_segments'(blockid, block) VALUES(?, ?)", +/* 10 */ "SELECT coalesce((SELECT max(blockid) FROM %Q.'%q_segments') + 1, 1)", +/* 11 */ "REPLACE INTO %Q.'%q_segdir' VALUES(?,?,?,?,?,?)", + + /* Return segments in order from oldest to newest.*/ +/* 12 */ "SELECT idx, start_block, leaves_end_block, end_block, root " + "FROM %Q.'%q_segdir' WHERE level = ? ORDER BY idx ASC", +/* 13 */ "SELECT idx, start_block, leaves_end_block, end_block, root " + "FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?" + "ORDER BY level DESC, idx ASC", + +/* 14 */ "SELECT count(*) FROM %Q.'%q_segdir' WHERE level = ?", +/* 15 */ "SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?", + +/* 16 */ "DELETE FROM %Q.'%q_segdir' WHERE level = ?", +/* 17 */ "DELETE FROM %Q.'%q_segments' WHERE blockid BETWEEN ? AND ?", +/* 18 */ "INSERT INTO %Q.'%q_content' VALUES(%s)", +/* 19 */ "DELETE FROM %Q.'%q_docsize' WHERE docid = ?", +/* 20 */ "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", +/* 21 */ "SELECT size FROM %Q.'%q_docsize' WHERE docid=?", +/* 22 */ "SELECT value FROM %Q.'%q_stat' WHERE id=?", +/* 23 */ "REPLACE INTO %Q.'%q_stat' VALUES(?,?)", +/* 24 */ "", +/* 25 */ "", + +/* 26 */ "DELETE FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?", +/* 27 */ "SELECT ? UNION SELECT level / (1024 * ?) FROM %Q.'%q_segdir'", + +/* This statement is used to determine which level to read the input from +** when performing an incremental merge. It returns the absolute level number +** of the oldest level in the db that contains at least ? segments. Or, +** if no level in the FTS index contains more than ? segments, the statement +** returns zero rows. */ +/* 28 */ "SELECT level, count(*) AS cnt FROM %Q.'%q_segdir' " + " GROUP BY level HAVING cnt>=?" + " ORDER BY (level %% 1024) ASC LIMIT 1", + +/* Estimate the upper limit on the number of leaf nodes in a new segment +** created by merging the oldest :2 segments from absolute level :1. See +** function sqlite3Fts3Incrmerge() for details. */ +/* 29 */ "SELECT 2 * total(1 + leaves_end_block - start_block) " + " FROM %Q.'%q_segdir' WHERE level = ? AND idx < ?", + +/* SQL_DELETE_SEGDIR_ENTRY +** Delete the %_segdir entry on absolute level :1 with index :2. */ +/* 30 */ "DELETE FROM %Q.'%q_segdir' WHERE level = ? AND idx = ?", + +/* SQL_SHIFT_SEGDIR_ENTRY +** Modify the idx value for the segment with idx=:3 on absolute level :2 +** to :1. */ +/* 31 */ "UPDATE %Q.'%q_segdir' SET idx = ? WHERE level=? AND idx=?", + +/* SQL_SELECT_SEGDIR +** Read a single entry from the %_segdir table. The entry from absolute +** level :1 with index value :2. */ +/* 32 */ "SELECT idx, start_block, leaves_end_block, end_block, root " + "FROM %Q.'%q_segdir' WHERE level = ? AND idx = ?", + +/* SQL_CHOMP_SEGDIR +** Update the start_block (:1) and root (:2) fields of the %_segdir +** entry located on absolute level :3 with index :4. */ +/* 33 */ "UPDATE %Q.'%q_segdir' SET start_block = ?, root = ?" + "WHERE level = ? AND idx = ?", + +/* SQL_SEGMENT_IS_APPENDABLE +** Return a single row if the segment with end_block=? is appendable. Or +** no rows otherwise. */ +/* 34 */ "SELECT 1 FROM %Q.'%q_segments' WHERE blockid=? AND block IS NULL", + +/* SQL_SELECT_INDEXES +** Return the list of valid segment indexes for absolute level ? */ +/* 35 */ "SELECT idx FROM %Q.'%q_segdir' WHERE level=? ORDER BY 1 ASC", + +/* SQL_SELECT_MXLEVEL +** Return the largest relative level in the FTS index or indexes. */ +/* 36 */ "SELECT max( level %% 1024 ) FROM %Q.'%q_segdir'", + + /* Return segments in order from oldest to newest.*/ +/* 37 */ "SELECT level, idx, end_block " + "FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ? " + "ORDER BY level DESC, idx ASC", + + /* Update statements used while promoting segments */ +/* 38 */ "UPDATE OR FAIL %Q.'%q_segdir' SET level=-1,idx=? " + "WHERE level=? AND idx=?", +/* 39 */ "UPDATE OR FAIL %Q.'%q_segdir' SET level=? WHERE level=-1" + + }; + int rc = SQLITE_OK; + sqlite3_stmt *pStmt; + + assert( SizeofArray(azSql)==SizeofArray(p->aStmt) ); + assert( eStmt=0 ); + + pStmt = p->aStmt[eStmt]; + if( !pStmt ){ + char *zSql; + if( eStmt==SQL_CONTENT_INSERT ){ + zSql = sqlite3_mprintf(azSql[eStmt], p->zDb, p->zName, p->zWriteExprlist); + }else if( eStmt==SQL_SELECT_CONTENT_BY_ROWID ){ + zSql = sqlite3_mprintf(azSql[eStmt], p->zReadExprlist); + }else{ + zSql = sqlite3_mprintf(azSql[eStmt], p->zDb, p->zName); + } + if( !zSql ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, NULL); + sqlite3_free(zSql); + assert( rc==SQLITE_OK || pStmt==0 ); + p->aStmt[eStmt] = pStmt; + } + } + if( apVal ){ + int i; + int nParam = sqlite3_bind_parameter_count(pStmt); + for(i=0; rc==SQLITE_OK && inPendingData==0 ){ + sqlite3_stmt *pStmt; + rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_LEVEL, &pStmt, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_null(pStmt, 1); + sqlite3_step(pStmt); + rc = sqlite3_reset(pStmt); + } + } + + return rc; +} + +/* +** FTS maintains a separate indexes for each language-id (a 32-bit integer). +** Within each language id, a separate index is maintained to store the +** document terms, and each configured prefix size (configured the FTS +** "prefix=" option). And each index consists of multiple levels ("relative +** levels"). +** +** All three of these values (the language id, the specific index and the +** level within the index) are encoded in 64-bit integer values stored +** in the %_segdir table on disk. This function is used to convert three +** separate component values into the single 64-bit integer value that +** can be used to query the %_segdir table. +** +** Specifically, each language-id/index combination is allocated 1024 +** 64-bit integer level values ("absolute levels"). The main terms index +** for language-id 0 is allocate values 0-1023. The first prefix index +** (if any) for language-id 0 is allocated values 1024-2047. And so on. +** Language 1 indexes are allocated immediately following language 0. +** +** So, for a system with nPrefix prefix indexes configured, the block of +** absolute levels that corresponds to language-id iLangid and index +** iIndex starts at absolute level ((iLangid * (nPrefix+1) + iIndex) * 1024). +*/ +static sqlite3_int64 getAbsoluteLevel( + Fts3Table *p, /* FTS3 table handle */ + int iLangid, /* Language id */ + int iIndex, /* Index in p->aIndex[] */ + int iLevel /* Level of segments */ +){ + sqlite3_int64 iBase; /* First absolute level for iLangid/iIndex */ + assert( iLangid>=0 ); + assert( p->nIndex>0 ); + assert( iIndex>=0 && iIndexnIndex ); + + iBase = ((sqlite3_int64)iLangid * p->nIndex + iIndex) * FTS3_SEGDIR_MAXLEVEL; + return iBase + iLevel; +} + +/* +** Set *ppStmt to a statement handle that may be used to iterate through +** all rows in the %_segdir table, from oldest to newest. If successful, +** return SQLITE_OK. If an error occurs while preparing the statement, +** return an SQLite error code. +** +** There is only ever one instance of this SQL statement compiled for +** each FTS3 table. +** +** The statement returns the following columns from the %_segdir table: +** +** 0: idx +** 1: start_block +** 2: leaves_end_block +** 3: end_block +** 4: root +*/ +SQLITE_PRIVATE int sqlite3Fts3AllSegdirs( + Fts3Table *p, /* FTS3 table */ + int iLangid, /* Language being queried */ + int iIndex, /* Index for p->aIndex[] */ + int iLevel, /* Level to select (relative level) */ + sqlite3_stmt **ppStmt /* OUT: Compiled statement */ +){ + int rc; + sqlite3_stmt *pStmt = 0; + + assert( iLevel==FTS3_SEGCURSOR_ALL || iLevel>=0 ); + assert( iLevel=0 && iIndexnIndex ); + + if( iLevel<0 ){ + /* "SELECT * FROM %_segdir WHERE level BETWEEN ? AND ? ORDER BY ..." */ + rc = fts3SqlStmt(p, SQL_SELECT_LEVEL_RANGE, &pStmt, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pStmt, 1, getAbsoluteLevel(p, iLangid, iIndex, 0)); + sqlite3_bind_int64(pStmt, 2, + getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1) + ); + } + }else{ + /* "SELECT * FROM %_segdir WHERE level = ? ORDER BY ..." */ + rc = fts3SqlStmt(p, SQL_SELECT_LEVEL, &pStmt, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pStmt, 1, getAbsoluteLevel(p, iLangid, iIndex,iLevel)); + } + } + *ppStmt = pStmt; + return rc; +} + + +/* +** Append a single varint to a PendingList buffer. SQLITE_OK is returned +** if successful, or an SQLite error code otherwise. +** +** This function also serves to allocate the PendingList structure itself. +** For example, to create a new PendingList structure containing two +** varints: +** +** PendingList *p = 0; +** fts3PendingListAppendVarint(&p, 1); +** fts3PendingListAppendVarint(&p, 2); +*/ +static int fts3PendingListAppendVarint( + PendingList **pp, /* IN/OUT: Pointer to PendingList struct */ + sqlite3_int64 i /* Value to append to data */ +){ + PendingList *p = *pp; + + /* Allocate or grow the PendingList as required. */ + if( !p ){ + p = sqlite3_malloc(sizeof(*p) + 100); + if( !p ){ + return SQLITE_NOMEM; + } + p->nSpace = 100; + p->aData = (char *)&p[1]; + p->nData = 0; + } + else if( p->nData+FTS3_VARINT_MAX+1>p->nSpace ){ + int nNew = p->nSpace * 2; + p = sqlite3_realloc(p, sizeof(*p) + nNew); + if( !p ){ + sqlite3_free(*pp); + *pp = 0; + return SQLITE_NOMEM; + } + p->nSpace = nNew; + p->aData = (char *)&p[1]; + } + + /* Append the new serialized varint to the end of the list. */ + p->nData += sqlite3Fts3PutVarint(&p->aData[p->nData], i); + p->aData[p->nData] = '\0'; + *pp = p; + return SQLITE_OK; +} + +/* +** Add a docid/column/position entry to a PendingList structure. Non-zero +** is returned if the structure is sqlite3_realloced as part of adding +** the entry. Otherwise, zero. +** +** If an OOM error occurs, *pRc is set to SQLITE_NOMEM before returning. +** Zero is always returned in this case. Otherwise, if no OOM error occurs, +** it is set to SQLITE_OK. +*/ +static int fts3PendingListAppend( + PendingList **pp, /* IN/OUT: PendingList structure */ + sqlite3_int64 iDocid, /* Docid for entry to add */ + sqlite3_int64 iCol, /* Column for entry to add */ + sqlite3_int64 iPos, /* Position of term for entry to add */ + int *pRc /* OUT: Return code */ +){ + PendingList *p = *pp; + int rc = SQLITE_OK; + + assert( !p || p->iLastDocid<=iDocid ); + + if( !p || p->iLastDocid!=iDocid ){ + sqlite3_int64 iDelta = iDocid - (p ? p->iLastDocid : 0); + if( p ){ + assert( p->nDatanSpace ); + assert( p->aData[p->nData]==0 ); + p->nData++; + } + if( SQLITE_OK!=(rc = fts3PendingListAppendVarint(&p, iDelta)) ){ + goto pendinglistappend_out; + } + p->iLastCol = -1; + p->iLastPos = 0; + p->iLastDocid = iDocid; + } + if( iCol>0 && p->iLastCol!=iCol ){ + if( SQLITE_OK!=(rc = fts3PendingListAppendVarint(&p, 1)) + || SQLITE_OK!=(rc = fts3PendingListAppendVarint(&p, iCol)) + ){ + goto pendinglistappend_out; + } + p->iLastCol = iCol; + p->iLastPos = 0; + } + if( iCol>=0 ){ + assert( iPos>p->iLastPos || (iPos==0 && p->iLastPos==0) ); + rc = fts3PendingListAppendVarint(&p, 2+iPos-p->iLastPos); + if( rc==SQLITE_OK ){ + p->iLastPos = iPos; + } + } + + pendinglistappend_out: + *pRc = rc; + if( p!=*pp ){ + *pp = p; + return 1; + } + return 0; +} + +/* +** Free a PendingList object allocated by fts3PendingListAppend(). +*/ +static void fts3PendingListDelete(PendingList *pList){ + sqlite3_free(pList); +} + +/* +** Add an entry to one of the pending-terms hash tables. +*/ +static int fts3PendingTermsAddOne( + Fts3Table *p, + int iCol, + int iPos, + Fts3Hash *pHash, /* Pending terms hash table to add entry to */ + const char *zToken, + int nToken +){ + PendingList *pList; + int rc = SQLITE_OK; + + pList = (PendingList *)fts3HashFind(pHash, zToken, nToken); + if( pList ){ + p->nPendingData -= (pList->nData + nToken + sizeof(Fts3HashElem)); + } + if( fts3PendingListAppend(&pList, p->iPrevDocid, iCol, iPos, &rc) ){ + if( pList==fts3HashInsert(pHash, zToken, nToken, pList) ){ + /* Malloc failed while inserting the new entry. This can only + ** happen if there was no previous entry for this token. + */ + assert( 0==fts3HashFind(pHash, zToken, nToken) ); + sqlite3_free(pList); + rc = SQLITE_NOMEM; + } + } + if( rc==SQLITE_OK ){ + p->nPendingData += (pList->nData + nToken + sizeof(Fts3HashElem)); + } + return rc; +} + +/* +** Tokenize the nul-terminated string zText and add all tokens to the +** pending-terms hash-table. The docid used is that currently stored in +** p->iPrevDocid, and the column is specified by argument iCol. +** +** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code. +*/ +static int fts3PendingTermsAdd( + Fts3Table *p, /* Table into which text will be inserted */ + int iLangid, /* Language id to use */ + const char *zText, /* Text of document to be inserted */ + int iCol, /* Column into which text is being inserted */ + u32 *pnWord /* IN/OUT: Incr. by number tokens inserted */ +){ + int rc; + int iStart = 0; + int iEnd = 0; + int iPos = 0; + int nWord = 0; + + char const *zToken; + int nToken = 0; + + sqlite3_tokenizer *pTokenizer = p->pTokenizer; + sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; + sqlite3_tokenizer_cursor *pCsr; + int (*xNext)(sqlite3_tokenizer_cursor *pCursor, + const char**,int*,int*,int*,int*); + + assert( pTokenizer && pModule ); + + /* If the user has inserted a NULL value, this function may be called with + ** zText==0. In this case, add zero token entries to the hash table and + ** return early. */ + if( zText==0 ){ + *pnWord = 0; + return SQLITE_OK; + } + + rc = sqlite3Fts3OpenTokenizer(pTokenizer, iLangid, zText, -1, &pCsr); + if( rc!=SQLITE_OK ){ + return rc; + } + + xNext = pModule->xNext; + while( SQLITE_OK==rc + && SQLITE_OK==(rc = xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos)) + ){ + int i; + if( iPos>=nWord ) nWord = iPos+1; + + /* Positions cannot be negative; we use -1 as a terminator internally. + ** Tokens must have a non-zero length. + */ + if( iPos<0 || !zToken || nToken<=0 ){ + rc = SQLITE_ERROR; + break; + } + + /* Add the term to the terms index */ + rc = fts3PendingTermsAddOne( + p, iCol, iPos, &p->aIndex[0].hPending, zToken, nToken + ); + + /* Add the term to each of the prefix indexes that it is not too + ** short for. */ + for(i=1; rc==SQLITE_OK && inIndex; i++){ + struct Fts3Index *pIndex = &p->aIndex[i]; + if( nTokennPrefix ) continue; + rc = fts3PendingTermsAddOne( + p, iCol, iPos, &pIndex->hPending, zToken, pIndex->nPrefix + ); + } + } + + pModule->xClose(pCsr); + *pnWord += nWord; + return (rc==SQLITE_DONE ? SQLITE_OK : rc); +} + +/* +** Calling this function indicates that subsequent calls to +** fts3PendingTermsAdd() are to add term/position-list pairs for the +** contents of the document with docid iDocid. +*/ +static int fts3PendingTermsDocid( + Fts3Table *p, /* Full-text table handle */ + int bDelete, /* True if this op is a delete */ + int iLangid, /* Language id of row being written */ + sqlite_int64 iDocid /* Docid of row being written */ +){ + assert( iLangid>=0 ); + assert( bDelete==1 || bDelete==0 ); + + /* TODO(shess) Explore whether partially flushing the buffer on + ** forced-flush would provide better performance. I suspect that if + ** we ordered the doclists by size and flushed the largest until the + ** buffer was half empty, that would let the less frequent terms + ** generate longer doclists. + */ + if( iDocidiPrevDocid + || (iDocid==p->iPrevDocid && p->bPrevDelete==0) + || p->iPrevLangid!=iLangid + || p->nPendingData>p->nMaxPendingData + ){ + int rc = sqlite3Fts3PendingTermsFlush(p); + if( rc!=SQLITE_OK ) return rc; + } + p->iPrevDocid = iDocid; + p->iPrevLangid = iLangid; + p->bPrevDelete = bDelete; + return SQLITE_OK; +} + +/* +** Discard the contents of the pending-terms hash tables. +*/ +SQLITE_PRIVATE void sqlite3Fts3PendingTermsClear(Fts3Table *p){ + int i; + for(i=0; inIndex; i++){ + Fts3HashElem *pElem; + Fts3Hash *pHash = &p->aIndex[i].hPending; + for(pElem=fts3HashFirst(pHash); pElem; pElem=fts3HashNext(pElem)){ + PendingList *pList = (PendingList *)fts3HashData(pElem); + fts3PendingListDelete(pList); + } + fts3HashClear(pHash); + } + p->nPendingData = 0; +} + +/* +** This function is called by the xUpdate() method as part of an INSERT +** operation. It adds entries for each term in the new record to the +** pendingTerms hash table. +** +** Argument apVal is the same as the similarly named argument passed to +** fts3InsertData(). Parameter iDocid is the docid of the new row. +*/ +static int fts3InsertTerms( + Fts3Table *p, + int iLangid, + sqlite3_value **apVal, + u32 *aSz +){ + int i; /* Iterator variable */ + for(i=2; inColumn+2; i++){ + int iCol = i-2; + if( p->abNotindexed[iCol]==0 ){ + const char *zText = (const char *)sqlite3_value_text(apVal[i]); + int rc = fts3PendingTermsAdd(p, iLangid, zText, iCol, &aSz[iCol]); + if( rc!=SQLITE_OK ){ + return rc; + } + aSz[p->nColumn] += sqlite3_value_bytes(apVal[i]); + } + } + return SQLITE_OK; +} + +/* +** This function is called by the xUpdate() method for an INSERT operation. +** The apVal parameter is passed a copy of the apVal argument passed by +** SQLite to the xUpdate() method. i.e: +** +** apVal[0] Not used for INSERT. +** apVal[1] rowid +** apVal[2] Left-most user-defined column +** ... +** apVal[p->nColumn+1] Right-most user-defined column +** apVal[p->nColumn+2] Hidden column with same name as table +** apVal[p->nColumn+3] Hidden "docid" column (alias for rowid) +** apVal[p->nColumn+4] Hidden languageid column +*/ +static int fts3InsertData( + Fts3Table *p, /* Full-text table */ + sqlite3_value **apVal, /* Array of values to insert */ + sqlite3_int64 *piDocid /* OUT: Docid for row just inserted */ +){ + int rc; /* Return code */ + sqlite3_stmt *pContentInsert; /* INSERT INTO %_content VALUES(...) */ + + if( p->zContentTbl ){ + sqlite3_value *pRowid = apVal[p->nColumn+3]; + if( sqlite3_value_type(pRowid)==SQLITE_NULL ){ + pRowid = apVal[1]; + } + if( sqlite3_value_type(pRowid)!=SQLITE_INTEGER ){ + return SQLITE_CONSTRAINT; + } + *piDocid = sqlite3_value_int64(pRowid); + return SQLITE_OK; + } + + /* Locate the statement handle used to insert data into the %_content + ** table. The SQL for this statement is: + ** + ** INSERT INTO %_content VALUES(?, ?, ?, ...) + ** + ** The statement features N '?' variables, where N is the number of user + ** defined columns in the FTS3 table, plus one for the docid field. + */ + rc = fts3SqlStmt(p, SQL_CONTENT_INSERT, &pContentInsert, &apVal[1]); + if( rc==SQLITE_OK && p->zLanguageid ){ + rc = sqlite3_bind_int( + pContentInsert, p->nColumn+2, + sqlite3_value_int(apVal[p->nColumn+4]) + ); + } + if( rc!=SQLITE_OK ) return rc; + + /* There is a quirk here. The users INSERT statement may have specified + ** a value for the "rowid" field, for the "docid" field, or for both. + ** Which is a problem, since "rowid" and "docid" are aliases for the + ** same value. For example: + ** + ** INSERT INTO fts3tbl(rowid, docid) VALUES(1, 2); + ** + ** In FTS3, this is an error. It is an error to specify non-NULL values + ** for both docid and some other rowid alias. + */ + if( SQLITE_NULL!=sqlite3_value_type(apVal[3+p->nColumn]) ){ + if( SQLITE_NULL==sqlite3_value_type(apVal[0]) + && SQLITE_NULL!=sqlite3_value_type(apVal[1]) + ){ + /* A rowid/docid conflict. */ + return SQLITE_ERROR; + } + rc = sqlite3_bind_value(pContentInsert, 1, apVal[3+p->nColumn]); + if( rc!=SQLITE_OK ) return rc; + } + + /* Execute the statement to insert the record. Set *piDocid to the + ** new docid value. + */ + sqlite3_step(pContentInsert); + rc = sqlite3_reset(pContentInsert); + + *piDocid = sqlite3_last_insert_rowid(p->db); + return rc; +} + + + +/* +** Remove all data from the FTS3 table. Clear the hash table containing +** pending terms. +*/ +static int fts3DeleteAll(Fts3Table *p, int bContent){ + int rc = SQLITE_OK; /* Return code */ + + /* Discard the contents of the pending-terms hash table. */ + sqlite3Fts3PendingTermsClear(p); + + /* Delete everything from the shadow tables. Except, leave %_content as + ** is if bContent is false. */ + assert( p->zContentTbl==0 || bContent==0 ); + if( bContent ) fts3SqlExec(&rc, p, SQL_DELETE_ALL_CONTENT, 0); + fts3SqlExec(&rc, p, SQL_DELETE_ALL_SEGMENTS, 0); + fts3SqlExec(&rc, p, SQL_DELETE_ALL_SEGDIR, 0); + if( p->bHasDocsize ){ + fts3SqlExec(&rc, p, SQL_DELETE_ALL_DOCSIZE, 0); + } + if( p->bHasStat ){ + fts3SqlExec(&rc, p, SQL_DELETE_ALL_STAT, 0); + } + return rc; +} + +/* +** +*/ +static int langidFromSelect(Fts3Table *p, sqlite3_stmt *pSelect){ + int iLangid = 0; + if( p->zLanguageid ) iLangid = sqlite3_column_int(pSelect, p->nColumn+1); + return iLangid; +} + +/* +** The first element in the apVal[] array is assumed to contain the docid +** (an integer) of a row about to be deleted. Remove all terms from the +** full-text index. +*/ +static void fts3DeleteTerms( + int *pRC, /* Result code */ + Fts3Table *p, /* The FTS table to delete from */ + sqlite3_value *pRowid, /* The docid to be deleted */ + u32 *aSz, /* Sizes of deleted document written here */ + int *pbFound /* OUT: Set to true if row really does exist */ +){ + int rc; + sqlite3_stmt *pSelect; + + assert( *pbFound==0 ); + if( *pRC ) return; + rc = fts3SqlStmt(p, SQL_SELECT_CONTENT_BY_ROWID, &pSelect, &pRowid); + if( rc==SQLITE_OK ){ + if( SQLITE_ROW==sqlite3_step(pSelect) ){ + int i; + int iLangid = langidFromSelect(p, pSelect); + i64 iDocid = sqlite3_column_int64(pSelect, 0); + rc = fts3PendingTermsDocid(p, 1, iLangid, iDocid); + for(i=1; rc==SQLITE_OK && i<=p->nColumn; i++){ + int iCol = i-1; + if( p->abNotindexed[iCol]==0 ){ + const char *zText = (const char *)sqlite3_column_text(pSelect, i); + rc = fts3PendingTermsAdd(p, iLangid, zText, -1, &aSz[iCol]); + aSz[p->nColumn] += sqlite3_column_bytes(pSelect, i); + } + } + if( rc!=SQLITE_OK ){ + sqlite3_reset(pSelect); + *pRC = rc; + return; + } + *pbFound = 1; + } + rc = sqlite3_reset(pSelect); + }else{ + sqlite3_reset(pSelect); + } + *pRC = rc; +} + +/* +** Forward declaration to account for the circular dependency between +** functions fts3SegmentMerge() and fts3AllocateSegdirIdx(). +*/ +static int fts3SegmentMerge(Fts3Table *, int, int, int); + +/* +** This function allocates a new level iLevel index in the segdir table. +** Usually, indexes are allocated within a level sequentially starting +** with 0, so the allocated index is one greater than the value returned +** by: +** +** SELECT max(idx) FROM %_segdir WHERE level = :iLevel +** +** However, if there are already FTS3_MERGE_COUNT indexes at the requested +** level, they are merged into a single level (iLevel+1) segment and the +** allocated index is 0. +** +** If successful, *piIdx is set to the allocated index slot and SQLITE_OK +** returned. Otherwise, an SQLite error code is returned. +*/ +static int fts3AllocateSegdirIdx( + Fts3Table *p, + int iLangid, /* Language id */ + int iIndex, /* Index for p->aIndex */ + int iLevel, + int *piIdx +){ + int rc; /* Return Code */ + sqlite3_stmt *pNextIdx; /* Query for next idx at level iLevel */ + int iNext = 0; /* Result of query pNextIdx */ + + assert( iLangid>=0 ); + assert( p->nIndex>=1 ); + + /* Set variable iNext to the next available segdir index at level iLevel. */ + rc = fts3SqlStmt(p, SQL_NEXT_SEGMENT_INDEX, &pNextIdx, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64( + pNextIdx, 1, getAbsoluteLevel(p, iLangid, iIndex, iLevel) + ); + if( SQLITE_ROW==sqlite3_step(pNextIdx) ){ + iNext = sqlite3_column_int(pNextIdx, 0); + } + rc = sqlite3_reset(pNextIdx); + } + + if( rc==SQLITE_OK ){ + /* If iNext is FTS3_MERGE_COUNT, indicating that level iLevel is already + ** full, merge all segments in level iLevel into a single iLevel+1 + ** segment and allocate (newly freed) index 0 at level iLevel. Otherwise, + ** if iNext is less than FTS3_MERGE_COUNT, allocate index iNext. + */ + if( iNext>=FTS3_MERGE_COUNT ){ + fts3LogMerge(16, getAbsoluteLevel(p, iLangid, iIndex, iLevel)); + rc = fts3SegmentMerge(p, iLangid, iIndex, iLevel); + *piIdx = 0; + }else{ + *piIdx = iNext; + } + } + + return rc; +} + +/* +** The %_segments table is declared as follows: +** +** CREATE TABLE %_segments(blockid INTEGER PRIMARY KEY, block BLOB) +** +** This function reads data from a single row of the %_segments table. The +** specific row is identified by the iBlockid parameter. If paBlob is not +** NULL, then a buffer is allocated using sqlite3_malloc() and populated +** with the contents of the blob stored in the "block" column of the +** identified table row is. Whether or not paBlob is NULL, *pnBlob is set +** to the size of the blob in bytes before returning. +** +** If an error occurs, or the table does not contain the specified row, +** an SQLite error code is returned. Otherwise, SQLITE_OK is returned. If +** paBlob is non-NULL, then it is the responsibility of the caller to +** eventually free the returned buffer. +** +** This function may leave an open sqlite3_blob* handle in the +** Fts3Table.pSegments variable. This handle is reused by subsequent calls +** to this function. The handle may be closed by calling the +** sqlite3Fts3SegmentsClose() function. Reusing a blob handle is a handy +** performance improvement, but the blob handle should always be closed +** before control is returned to the user (to prevent a lock being held +** on the database file for longer than necessary). Thus, any virtual table +** method (xFilter etc.) that may directly or indirectly call this function +** must call sqlite3Fts3SegmentsClose() before returning. +*/ +SQLITE_PRIVATE int sqlite3Fts3ReadBlock( + Fts3Table *p, /* FTS3 table handle */ + sqlite3_int64 iBlockid, /* Access the row with blockid=$iBlockid */ + char **paBlob, /* OUT: Blob data in malloc'd buffer */ + int *pnBlob, /* OUT: Size of blob data */ + int *pnLoad /* OUT: Bytes actually loaded */ +){ + int rc; /* Return code */ + + /* pnBlob must be non-NULL. paBlob may be NULL or non-NULL. */ + assert( pnBlob ); + + if( p->pSegments ){ + rc = sqlite3_blob_reopen(p->pSegments, iBlockid); + }else{ + if( 0==p->zSegmentsTbl ){ + p->zSegmentsTbl = sqlite3_mprintf("%s_segments", p->zName); + if( 0==p->zSegmentsTbl ) return SQLITE_NOMEM; + } + rc = sqlite3_blob_open( + p->db, p->zDb, p->zSegmentsTbl, "block", iBlockid, 0, &p->pSegments + ); + } + + if( rc==SQLITE_OK ){ + int nByte = sqlite3_blob_bytes(p->pSegments); + *pnBlob = nByte; + if( paBlob ){ + char *aByte = sqlite3_malloc(nByte + FTS3_NODE_PADDING); + if( !aByte ){ + rc = SQLITE_NOMEM; + }else{ + if( pnLoad && nByte>(FTS3_NODE_CHUNK_THRESHOLD) ){ + nByte = FTS3_NODE_CHUNKSIZE; + *pnLoad = nByte; + } + rc = sqlite3_blob_read(p->pSegments, aByte, nByte, 0); + memset(&aByte[nByte], 0, FTS3_NODE_PADDING); + if( rc!=SQLITE_OK ){ + sqlite3_free(aByte); + aByte = 0; + } + } + *paBlob = aByte; + } + } + + return rc; +} + +/* +** Close the blob handle at p->pSegments, if it is open. See comments above +** the sqlite3Fts3ReadBlock() function for details. +*/ +SQLITE_PRIVATE void sqlite3Fts3SegmentsClose(Fts3Table *p){ + sqlite3_blob_close(p->pSegments); + p->pSegments = 0; +} + +static int fts3SegReaderIncrRead(Fts3SegReader *pReader){ + int nRead; /* Number of bytes to read */ + int rc; /* Return code */ + + nRead = MIN(pReader->nNode - pReader->nPopulate, FTS3_NODE_CHUNKSIZE); + rc = sqlite3_blob_read( + pReader->pBlob, + &pReader->aNode[pReader->nPopulate], + nRead, + pReader->nPopulate + ); + + if( rc==SQLITE_OK ){ + pReader->nPopulate += nRead; + memset(&pReader->aNode[pReader->nPopulate], 0, FTS3_NODE_PADDING); + if( pReader->nPopulate==pReader->nNode ){ + sqlite3_blob_close(pReader->pBlob); + pReader->pBlob = 0; + pReader->nPopulate = 0; + } + } + return rc; +} + +static int fts3SegReaderRequire(Fts3SegReader *pReader, char *pFrom, int nByte){ + int rc = SQLITE_OK; + assert( !pReader->pBlob + || (pFrom>=pReader->aNode && pFrom<&pReader->aNode[pReader->nNode]) + ); + while( pReader->pBlob && rc==SQLITE_OK + && (pFrom - pReader->aNode + nByte)>pReader->nPopulate + ){ + rc = fts3SegReaderIncrRead(pReader); + } + return rc; +} + +/* +** Set an Fts3SegReader cursor to point at EOF. +*/ +static void fts3SegReaderSetEof(Fts3SegReader *pSeg){ + if( !fts3SegReaderIsRootOnly(pSeg) ){ + sqlite3_free(pSeg->aNode); + sqlite3_blob_close(pSeg->pBlob); + pSeg->pBlob = 0; + } + pSeg->aNode = 0; +} + +/* +** Move the iterator passed as the first argument to the next term in the +** segment. If successful, SQLITE_OK is returned. If there is no next term, +** SQLITE_DONE. Otherwise, an SQLite error code. +*/ +static int fts3SegReaderNext( + Fts3Table *p, + Fts3SegReader *pReader, + int bIncr +){ + int rc; /* Return code of various sub-routines */ + char *pNext; /* Cursor variable */ + int nPrefix; /* Number of bytes in term prefix */ + int nSuffix; /* Number of bytes in term suffix */ + + if( !pReader->aDoclist ){ + pNext = pReader->aNode; + }else{ + pNext = &pReader->aDoclist[pReader->nDoclist]; + } + + if( !pNext || pNext>=&pReader->aNode[pReader->nNode] ){ + + if( fts3SegReaderIsPending(pReader) ){ + Fts3HashElem *pElem = *(pReader->ppNextElem); + sqlite3_free(pReader->aNode); + pReader->aNode = 0; + if( pElem ){ + char *aCopy; + PendingList *pList = (PendingList *)fts3HashData(pElem); + int nCopy = pList->nData+1; + pReader->zTerm = (char *)fts3HashKey(pElem); + pReader->nTerm = fts3HashKeysize(pElem); + aCopy = (char*)sqlite3_malloc(nCopy); + if( !aCopy ) return SQLITE_NOMEM; + memcpy(aCopy, pList->aData, nCopy); + pReader->nNode = pReader->nDoclist = nCopy; + pReader->aNode = pReader->aDoclist = aCopy; + pReader->ppNextElem++; + assert( pReader->aNode ); + } + return SQLITE_OK; + } + + fts3SegReaderSetEof(pReader); + + /* If iCurrentBlock>=iLeafEndBlock, this is an EOF condition. All leaf + ** blocks have already been traversed. */ + assert( pReader->iCurrentBlock<=pReader->iLeafEndBlock ); + if( pReader->iCurrentBlock>=pReader->iLeafEndBlock ){ + return SQLITE_OK; + } + + rc = sqlite3Fts3ReadBlock( + p, ++pReader->iCurrentBlock, &pReader->aNode, &pReader->nNode, + (bIncr ? &pReader->nPopulate : 0) + ); + if( rc!=SQLITE_OK ) return rc; + assert( pReader->pBlob==0 ); + if( bIncr && pReader->nPopulatenNode ){ + pReader->pBlob = p->pSegments; + p->pSegments = 0; + } + pNext = pReader->aNode; + } + + assert( !fts3SegReaderIsPending(pReader) ); + + rc = fts3SegReaderRequire(pReader, pNext, FTS3_VARINT_MAX*2); + if( rc!=SQLITE_OK ) return rc; + + /* Because of the FTS3_NODE_PADDING bytes of padding, the following is + ** safe (no risk of overread) even if the node data is corrupted. */ + pNext += fts3GetVarint32(pNext, &nPrefix); + pNext += fts3GetVarint32(pNext, &nSuffix); + if( nPrefix<0 || nSuffix<=0 + || &pNext[nSuffix]>&pReader->aNode[pReader->nNode] + ){ + return FTS_CORRUPT_VTAB; + } + + if( nPrefix+nSuffix>pReader->nTermAlloc ){ + int nNew = (nPrefix+nSuffix)*2; + char *zNew = sqlite3_realloc(pReader->zTerm, nNew); + if( !zNew ){ + return SQLITE_NOMEM; + } + pReader->zTerm = zNew; + pReader->nTermAlloc = nNew; + } + + rc = fts3SegReaderRequire(pReader, pNext, nSuffix+FTS3_VARINT_MAX); + if( rc!=SQLITE_OK ) return rc; + + memcpy(&pReader->zTerm[nPrefix], pNext, nSuffix); + pReader->nTerm = nPrefix+nSuffix; + pNext += nSuffix; + pNext += fts3GetVarint32(pNext, &pReader->nDoclist); + pReader->aDoclist = pNext; + pReader->pOffsetList = 0; + + /* Check that the doclist does not appear to extend past the end of the + ** b-tree node. And that the final byte of the doclist is 0x00. If either + ** of these statements is untrue, then the data structure is corrupt. + */ + if( &pReader->aDoclist[pReader->nDoclist]>&pReader->aNode[pReader->nNode] + || (pReader->nPopulate==0 && pReader->aDoclist[pReader->nDoclist-1]) + ){ + return FTS_CORRUPT_VTAB; + } + return SQLITE_OK; +} + +/* +** Set the SegReader to point to the first docid in the doclist associated +** with the current term. +*/ +static int fts3SegReaderFirstDocid(Fts3Table *pTab, Fts3SegReader *pReader){ + int rc = SQLITE_OK; + assert( pReader->aDoclist ); + assert( !pReader->pOffsetList ); + if( pTab->bDescIdx && fts3SegReaderIsPending(pReader) ){ + u8 bEof = 0; + pReader->iDocid = 0; + pReader->nOffsetList = 0; + sqlite3Fts3DoclistPrev(0, + pReader->aDoclist, pReader->nDoclist, &pReader->pOffsetList, + &pReader->iDocid, &pReader->nOffsetList, &bEof + ); + }else{ + rc = fts3SegReaderRequire(pReader, pReader->aDoclist, FTS3_VARINT_MAX); + if( rc==SQLITE_OK ){ + int n = sqlite3Fts3GetVarint(pReader->aDoclist, &pReader->iDocid); + pReader->pOffsetList = &pReader->aDoclist[n]; + } + } + return rc; +} + +/* +** Advance the SegReader to point to the next docid in the doclist +** associated with the current term. +** +** If arguments ppOffsetList and pnOffsetList are not NULL, then +** *ppOffsetList is set to point to the first column-offset list +** in the doclist entry (i.e. immediately past the docid varint). +** *pnOffsetList is set to the length of the set of column-offset +** lists, not including the nul-terminator byte. For example: +*/ +static int fts3SegReaderNextDocid( + Fts3Table *pTab, + Fts3SegReader *pReader, /* Reader to advance to next docid */ + char **ppOffsetList, /* OUT: Pointer to current position-list */ + int *pnOffsetList /* OUT: Length of *ppOffsetList in bytes */ +){ + int rc = SQLITE_OK; + char *p = pReader->pOffsetList; + char c = 0; + + assert( p ); + + if( pTab->bDescIdx && fts3SegReaderIsPending(pReader) ){ + /* A pending-terms seg-reader for an FTS4 table that uses order=desc. + ** Pending-terms doclists are always built up in ascending order, so + ** we have to iterate through them backwards here. */ + u8 bEof = 0; + if( ppOffsetList ){ + *ppOffsetList = pReader->pOffsetList; + *pnOffsetList = pReader->nOffsetList - 1; + } + sqlite3Fts3DoclistPrev(0, + pReader->aDoclist, pReader->nDoclist, &p, &pReader->iDocid, + &pReader->nOffsetList, &bEof + ); + if( bEof ){ + pReader->pOffsetList = 0; + }else{ + pReader->pOffsetList = p; + } + }else{ + char *pEnd = &pReader->aDoclist[pReader->nDoclist]; + + /* Pointer p currently points at the first byte of an offset list. The + ** following block advances it to point one byte past the end of + ** the same offset list. */ + while( 1 ){ + + /* The following line of code (and the "p++" below the while() loop) is + ** normally all that is required to move pointer p to the desired + ** position. The exception is if this node is being loaded from disk + ** incrementally and pointer "p" now points to the first byte past + ** the populated part of pReader->aNode[]. + */ + while( *p | c ) c = *p++ & 0x80; + assert( *p==0 ); + + if( pReader->pBlob==0 || p<&pReader->aNode[pReader->nPopulate] ) break; + rc = fts3SegReaderIncrRead(pReader); + if( rc!=SQLITE_OK ) return rc; + } + p++; + + /* If required, populate the output variables with a pointer to and the + ** size of the previous offset-list. + */ + if( ppOffsetList ){ + *ppOffsetList = pReader->pOffsetList; + *pnOffsetList = (int)(p - pReader->pOffsetList - 1); + } + + /* List may have been edited in place by fts3EvalNearTrim() */ + while( p=pEnd ){ + pReader->pOffsetList = 0; + }else{ + rc = fts3SegReaderRequire(pReader, p, FTS3_VARINT_MAX); + if( rc==SQLITE_OK ){ + sqlite3_int64 iDelta; + pReader->pOffsetList = p + sqlite3Fts3GetVarint(p, &iDelta); + if( pTab->bDescIdx ){ + pReader->iDocid -= iDelta; + }else{ + pReader->iDocid += iDelta; + } + } + } + } + + return SQLITE_OK; +} + + +SQLITE_PRIVATE int sqlite3Fts3MsrOvfl( + Fts3Cursor *pCsr, + Fts3MultiSegReader *pMsr, + int *pnOvfl +){ + Fts3Table *p = (Fts3Table*)pCsr->base.pVtab; + int nOvfl = 0; + int ii; + int rc = SQLITE_OK; + int pgsz = p->nPgsz; + + assert( p->bFts4 ); + assert( pgsz>0 ); + + for(ii=0; rc==SQLITE_OK && iinSegment; ii++){ + Fts3SegReader *pReader = pMsr->apSegment[ii]; + if( !fts3SegReaderIsPending(pReader) + && !fts3SegReaderIsRootOnly(pReader) + ){ + sqlite3_int64 jj; + for(jj=pReader->iStartBlock; jj<=pReader->iLeafEndBlock; jj++){ + int nBlob; + rc = sqlite3Fts3ReadBlock(p, jj, 0, &nBlob, 0); + if( rc!=SQLITE_OK ) break; + if( (nBlob+35)>pgsz ){ + nOvfl += (nBlob + 34)/pgsz; + } + } + } + } + *pnOvfl = nOvfl; + return rc; +} + +/* +** Free all allocations associated with the iterator passed as the +** second argument. +*/ +SQLITE_PRIVATE void sqlite3Fts3SegReaderFree(Fts3SegReader *pReader){ + if( pReader ){ + if( !fts3SegReaderIsPending(pReader) ){ + sqlite3_free(pReader->zTerm); + } + if( !fts3SegReaderIsRootOnly(pReader) ){ + sqlite3_free(pReader->aNode); + } + sqlite3_blob_close(pReader->pBlob); + } + sqlite3_free(pReader); +} + +/* +** Allocate a new SegReader object. +*/ +SQLITE_PRIVATE int sqlite3Fts3SegReaderNew( + int iAge, /* Segment "age". */ + int bLookup, /* True for a lookup only */ + sqlite3_int64 iStartLeaf, /* First leaf to traverse */ + sqlite3_int64 iEndLeaf, /* Final leaf to traverse */ + sqlite3_int64 iEndBlock, /* Final block of segment */ + const char *zRoot, /* Buffer containing root node */ + int nRoot, /* Size of buffer containing root node */ + Fts3SegReader **ppReader /* OUT: Allocated Fts3SegReader */ +){ + Fts3SegReader *pReader; /* Newly allocated SegReader object */ + int nExtra = 0; /* Bytes to allocate segment root node */ + + assert( iStartLeaf<=iEndLeaf ); + if( iStartLeaf==0 ){ + nExtra = nRoot + FTS3_NODE_PADDING; + } + + pReader = (Fts3SegReader *)sqlite3_malloc(sizeof(Fts3SegReader) + nExtra); + if( !pReader ){ + return SQLITE_NOMEM; + } + memset(pReader, 0, sizeof(Fts3SegReader)); + pReader->iIdx = iAge; + pReader->bLookup = bLookup!=0; + pReader->iStartBlock = iStartLeaf; + pReader->iLeafEndBlock = iEndLeaf; + pReader->iEndBlock = iEndBlock; + + if( nExtra ){ + /* The entire segment is stored in the root node. */ + pReader->aNode = (char *)&pReader[1]; + pReader->rootOnly = 1; + pReader->nNode = nRoot; + memcpy(pReader->aNode, zRoot, nRoot); + memset(&pReader->aNode[nRoot], 0, FTS3_NODE_PADDING); + }else{ + pReader->iCurrentBlock = iStartLeaf-1; + } + *ppReader = pReader; + return SQLITE_OK; +} + +/* +** This is a comparison function used as a qsort() callback when sorting +** an array of pending terms by term. This occurs as part of flushing +** the contents of the pending-terms hash table to the database. +*/ +static int SQLITE_CDECL fts3CompareElemByTerm( + const void *lhs, + const void *rhs +){ + char *z1 = fts3HashKey(*(Fts3HashElem **)lhs); + char *z2 = fts3HashKey(*(Fts3HashElem **)rhs); + int n1 = fts3HashKeysize(*(Fts3HashElem **)lhs); + int n2 = fts3HashKeysize(*(Fts3HashElem **)rhs); + + int n = (n1aIndex */ + const char *zTerm, /* Term to search for */ + int nTerm, /* Size of buffer zTerm */ + int bPrefix, /* True for a prefix iterator */ + Fts3SegReader **ppReader /* OUT: SegReader for pending-terms */ +){ + Fts3SegReader *pReader = 0; /* Fts3SegReader object to return */ + Fts3HashElem *pE; /* Iterator variable */ + Fts3HashElem **aElem = 0; /* Array of term hash entries to scan */ + int nElem = 0; /* Size of array at aElem */ + int rc = SQLITE_OK; /* Return Code */ + Fts3Hash *pHash; + + pHash = &p->aIndex[iIndex].hPending; + if( bPrefix ){ + int nAlloc = 0; /* Size of allocated array at aElem */ + + for(pE=fts3HashFirst(pHash); pE; pE=fts3HashNext(pE)){ + char *zKey = (char *)fts3HashKey(pE); + int nKey = fts3HashKeysize(pE); + if( nTerm==0 || (nKey>=nTerm && 0==memcmp(zKey, zTerm, nTerm)) ){ + if( nElem==nAlloc ){ + Fts3HashElem **aElem2; + nAlloc += 16; + aElem2 = (Fts3HashElem **)sqlite3_realloc( + aElem, nAlloc*sizeof(Fts3HashElem *) + ); + if( !aElem2 ){ + rc = SQLITE_NOMEM; + nElem = 0; + break; + } + aElem = aElem2; + } + + aElem[nElem++] = pE; + } + } + + /* If more than one term matches the prefix, sort the Fts3HashElem + ** objects in term order using qsort(). This uses the same comparison + ** callback as is used when flushing terms to disk. + */ + if( nElem>1 ){ + qsort(aElem, nElem, sizeof(Fts3HashElem *), fts3CompareElemByTerm); + } + + }else{ + /* The query is a simple term lookup that matches at most one term in + ** the index. All that is required is a straight hash-lookup. + ** + ** Because the stack address of pE may be accessed via the aElem pointer + ** below, the "Fts3HashElem *pE" must be declared so that it is valid + ** within this entire function, not just this "else{...}" block. + */ + pE = fts3HashFindElem(pHash, zTerm, nTerm); + if( pE ){ + aElem = &pE; + nElem = 1; + } + } + + if( nElem>0 ){ + int nByte = sizeof(Fts3SegReader) + (nElem+1)*sizeof(Fts3HashElem *); + pReader = (Fts3SegReader *)sqlite3_malloc(nByte); + if( !pReader ){ + rc = SQLITE_NOMEM; + }else{ + memset(pReader, 0, nByte); + pReader->iIdx = 0x7FFFFFFF; + pReader->ppNextElem = (Fts3HashElem **)&pReader[1]; + memcpy(pReader->ppNextElem, aElem, nElem*sizeof(Fts3HashElem *)); + } + } + + if( bPrefix ){ + sqlite3_free(aElem); + } + *ppReader = pReader; + return rc; +} + +/* +** Compare the entries pointed to by two Fts3SegReader structures. +** Comparison is as follows: +** +** 1) EOF is greater than not EOF. +** +** 2) The current terms (if any) are compared using memcmp(). If one +** term is a prefix of another, the longer term is considered the +** larger. +** +** 3) By segment age. An older segment is considered larger. +*/ +static int fts3SegReaderCmp(Fts3SegReader *pLhs, Fts3SegReader *pRhs){ + int rc; + if( pLhs->aNode && pRhs->aNode ){ + int rc2 = pLhs->nTerm - pRhs->nTerm; + if( rc2<0 ){ + rc = memcmp(pLhs->zTerm, pRhs->zTerm, pLhs->nTerm); + }else{ + rc = memcmp(pLhs->zTerm, pRhs->zTerm, pRhs->nTerm); + } + if( rc==0 ){ + rc = rc2; + } + }else{ + rc = (pLhs->aNode==0) - (pRhs->aNode==0); + } + if( rc==0 ){ + rc = pRhs->iIdx - pLhs->iIdx; + } + assert( rc!=0 ); + return rc; +} + +/* +** A different comparison function for SegReader structures. In this +** version, it is assumed that each SegReader points to an entry in +** a doclist for identical terms. Comparison is made as follows: +** +** 1) EOF (end of doclist in this case) is greater than not EOF. +** +** 2) By current docid. +** +** 3) By segment age. An older segment is considered larger. +*/ +static int fts3SegReaderDoclistCmp(Fts3SegReader *pLhs, Fts3SegReader *pRhs){ + int rc = (pLhs->pOffsetList==0)-(pRhs->pOffsetList==0); + if( rc==0 ){ + if( pLhs->iDocid==pRhs->iDocid ){ + rc = pRhs->iIdx - pLhs->iIdx; + }else{ + rc = (pLhs->iDocid > pRhs->iDocid) ? 1 : -1; + } + } + assert( pLhs->aNode && pRhs->aNode ); + return rc; +} +static int fts3SegReaderDoclistCmpRev(Fts3SegReader *pLhs, Fts3SegReader *pRhs){ + int rc = (pLhs->pOffsetList==0)-(pRhs->pOffsetList==0); + if( rc==0 ){ + if( pLhs->iDocid==pRhs->iDocid ){ + rc = pRhs->iIdx - pLhs->iIdx; + }else{ + rc = (pLhs->iDocid < pRhs->iDocid) ? 1 : -1; + } + } + assert( pLhs->aNode && pRhs->aNode ); + return rc; +} + +/* +** Compare the term that the Fts3SegReader object passed as the first argument +** points to with the term specified by arguments zTerm and nTerm. +** +** If the pSeg iterator is already at EOF, return 0. Otherwise, return +** -ve if the pSeg term is less than zTerm/nTerm, 0 if the two terms are +** equal, or +ve if the pSeg term is greater than zTerm/nTerm. +*/ +static int fts3SegReaderTermCmp( + Fts3SegReader *pSeg, /* Segment reader object */ + const char *zTerm, /* Term to compare to */ + int nTerm /* Size of term zTerm in bytes */ +){ + int res = 0; + if( pSeg->aNode ){ + if( pSeg->nTerm>nTerm ){ + res = memcmp(pSeg->zTerm, zTerm, nTerm); + }else{ + res = memcmp(pSeg->zTerm, zTerm, pSeg->nTerm); + } + if( res==0 ){ + res = pSeg->nTerm-nTerm; + } + } + return res; +} + +/* +** Argument apSegment is an array of nSegment elements. It is known that +** the final (nSegment-nSuspect) members are already in sorted order +** (according to the comparison function provided). This function shuffles +** the array around until all entries are in sorted order. +*/ +static void fts3SegReaderSort( + Fts3SegReader **apSegment, /* Array to sort entries of */ + int nSegment, /* Size of apSegment array */ + int nSuspect, /* Unsorted entry count */ + int (*xCmp)(Fts3SegReader *, Fts3SegReader *) /* Comparison function */ +){ + int i; /* Iterator variable */ + + assert( nSuspect<=nSegment ); + + if( nSuspect==nSegment ) nSuspect--; + for(i=nSuspect-1; i>=0; i--){ + int j; + for(j=i; j<(nSegment-1); j++){ + Fts3SegReader *pTmp; + if( xCmp(apSegment[j], apSegment[j+1])<0 ) break; + pTmp = apSegment[j+1]; + apSegment[j+1] = apSegment[j]; + apSegment[j] = pTmp; + } + } + +#ifndef NDEBUG + /* Check that the list really is sorted now. */ + for(i=0; i<(nSuspect-1); i++){ + assert( xCmp(apSegment[i], apSegment[i+1])<0 ); + } +#endif +} + +/* +** Insert a record into the %_segments table. +*/ +static int fts3WriteSegment( + Fts3Table *p, /* Virtual table handle */ + sqlite3_int64 iBlock, /* Block id for new block */ + char *z, /* Pointer to buffer containing block data */ + int n /* Size of buffer z in bytes */ +){ + sqlite3_stmt *pStmt; + int rc = fts3SqlStmt(p, SQL_INSERT_SEGMENTS, &pStmt, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pStmt, 1, iBlock); + sqlite3_bind_blob(pStmt, 2, z, n, SQLITE_STATIC); + sqlite3_step(pStmt); + rc = sqlite3_reset(pStmt); + } + return rc; +} + +/* +** Find the largest relative level number in the table. If successful, set +** *pnMax to this value and return SQLITE_OK. Otherwise, if an error occurs, +** set *pnMax to zero and return an SQLite error code. +*/ +SQLITE_PRIVATE int sqlite3Fts3MaxLevel(Fts3Table *p, int *pnMax){ + int rc; + int mxLevel = 0; + sqlite3_stmt *pStmt = 0; + + rc = fts3SqlStmt(p, SQL_SELECT_MXLEVEL, &pStmt, 0); + if( rc==SQLITE_OK ){ + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + mxLevel = sqlite3_column_int(pStmt, 0); + } + rc = sqlite3_reset(pStmt); + } + *pnMax = mxLevel; + return rc; +} + +/* +** Insert a record into the %_segdir table. +*/ +static int fts3WriteSegdir( + Fts3Table *p, /* Virtual table handle */ + sqlite3_int64 iLevel, /* Value for "level" field (absolute level) */ + int iIdx, /* Value for "idx" field */ + sqlite3_int64 iStartBlock, /* Value for "start_block" field */ + sqlite3_int64 iLeafEndBlock, /* Value for "leaves_end_block" field */ + sqlite3_int64 iEndBlock, /* Value for "end_block" field */ + sqlite3_int64 nLeafData, /* Bytes of leaf data in segment */ + char *zRoot, /* Blob value for "root" field */ + int nRoot /* Number of bytes in buffer zRoot */ +){ + sqlite3_stmt *pStmt; + int rc = fts3SqlStmt(p, SQL_INSERT_SEGDIR, &pStmt, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pStmt, 1, iLevel); + sqlite3_bind_int(pStmt, 2, iIdx); + sqlite3_bind_int64(pStmt, 3, iStartBlock); + sqlite3_bind_int64(pStmt, 4, iLeafEndBlock); + if( nLeafData==0 ){ + sqlite3_bind_int64(pStmt, 5, iEndBlock); + }else{ + char *zEnd = sqlite3_mprintf("%lld %lld", iEndBlock, nLeafData); + if( !zEnd ) return SQLITE_NOMEM; + sqlite3_bind_text(pStmt, 5, zEnd, -1, sqlite3_free); + } + sqlite3_bind_blob(pStmt, 6, zRoot, nRoot, SQLITE_STATIC); + sqlite3_step(pStmt); + rc = sqlite3_reset(pStmt); + } + return rc; +} + +/* +** Return the size of the common prefix (if any) shared by zPrev and +** zNext, in bytes. For example, +** +** fts3PrefixCompress("abc", 3, "abcdef", 6) // returns 3 +** fts3PrefixCompress("abX", 3, "abcdef", 6) // returns 2 +** fts3PrefixCompress("abX", 3, "Xbcdef", 6) // returns 0 +*/ +static int fts3PrefixCompress( + const char *zPrev, /* Buffer containing previous term */ + int nPrev, /* Size of buffer zPrev in bytes */ + const char *zNext, /* Buffer containing next term */ + int nNext /* Size of buffer zNext in bytes */ +){ + int n; + UNUSED_PARAMETER(nNext); + for(n=0; nnData; /* Current size of node in bytes */ + int nReq = nData; /* Required space after adding zTerm */ + int nPrefix; /* Number of bytes of prefix compression */ + int nSuffix; /* Suffix length */ + + nPrefix = fts3PrefixCompress(pTree->zTerm, pTree->nTerm, zTerm, nTerm); + nSuffix = nTerm-nPrefix; + + nReq += sqlite3Fts3VarintLen(nPrefix)+sqlite3Fts3VarintLen(nSuffix)+nSuffix; + if( nReq<=p->nNodeSize || !pTree->zTerm ){ + + if( nReq>p->nNodeSize ){ + /* An unusual case: this is the first term to be added to the node + ** and the static node buffer (p->nNodeSize bytes) is not large + ** enough. Use a separately malloced buffer instead This wastes + ** p->nNodeSize bytes, but since this scenario only comes about when + ** the database contain two terms that share a prefix of almost 2KB, + ** this is not expected to be a serious problem. + */ + assert( pTree->aData==(char *)&pTree[1] ); + pTree->aData = (char *)sqlite3_malloc(nReq); + if( !pTree->aData ){ + return SQLITE_NOMEM; + } + } + + if( pTree->zTerm ){ + /* There is no prefix-length field for first term in a node */ + nData += sqlite3Fts3PutVarint(&pTree->aData[nData], nPrefix); + } + + nData += sqlite3Fts3PutVarint(&pTree->aData[nData], nSuffix); + memcpy(&pTree->aData[nData], &zTerm[nPrefix], nSuffix); + pTree->nData = nData + nSuffix; + pTree->nEntry++; + + if( isCopyTerm ){ + if( pTree->nMalloczMalloc, nTerm*2); + if( !zNew ){ + return SQLITE_NOMEM; + } + pTree->nMalloc = nTerm*2; + pTree->zMalloc = zNew; + } + pTree->zTerm = pTree->zMalloc; + memcpy(pTree->zTerm, zTerm, nTerm); + pTree->nTerm = nTerm; + }else{ + pTree->zTerm = (char *)zTerm; + pTree->nTerm = nTerm; + } + return SQLITE_OK; + } + } + + /* If control flows to here, it was not possible to append zTerm to the + ** current node. Create a new node (a right-sibling of the current node). + ** If this is the first node in the tree, the term is added to it. + ** + ** Otherwise, the term is not added to the new node, it is left empty for + ** now. Instead, the term is inserted into the parent of pTree. If pTree + ** has no parent, one is created here. + */ + pNew = (SegmentNode *)sqlite3_malloc(sizeof(SegmentNode) + p->nNodeSize); + if( !pNew ){ + return SQLITE_NOMEM; + } + memset(pNew, 0, sizeof(SegmentNode)); + pNew->nData = 1 + FTS3_VARINT_MAX; + pNew->aData = (char *)&pNew[1]; + + if( pTree ){ + SegmentNode *pParent = pTree->pParent; + rc = fts3NodeAddTerm(p, &pParent, isCopyTerm, zTerm, nTerm); + if( pTree->pParent==0 ){ + pTree->pParent = pParent; + } + pTree->pRight = pNew; + pNew->pLeftmost = pTree->pLeftmost; + pNew->pParent = pParent; + pNew->zMalloc = pTree->zMalloc; + pNew->nMalloc = pTree->nMalloc; + pTree->zMalloc = 0; + }else{ + pNew->pLeftmost = pNew; + rc = fts3NodeAddTerm(p, &pNew, isCopyTerm, zTerm, nTerm); + } + + *ppTree = pNew; + return rc; +} + +/* +** Helper function for fts3NodeWrite(). +*/ +static int fts3TreeFinishNode( + SegmentNode *pTree, + int iHeight, + sqlite3_int64 iLeftChild +){ + int nStart; + assert( iHeight>=1 && iHeight<128 ); + nStart = FTS3_VARINT_MAX - sqlite3Fts3VarintLen(iLeftChild); + pTree->aData[nStart] = (char)iHeight; + sqlite3Fts3PutVarint(&pTree->aData[nStart+1], iLeftChild); + return nStart; +} + +/* +** Write the buffer for the segment node pTree and all of its peers to the +** database. Then call this function recursively to write the parent of +** pTree and its peers to the database. +** +** Except, if pTree is a root node, do not write it to the database. Instead, +** set output variables *paRoot and *pnRoot to contain the root node. +** +** If successful, SQLITE_OK is returned and output variable *piLast is +** set to the largest blockid written to the database (or zero if no +** blocks were written to the db). Otherwise, an SQLite error code is +** returned. +*/ +static int fts3NodeWrite( + Fts3Table *p, /* Virtual table handle */ + SegmentNode *pTree, /* SegmentNode handle */ + int iHeight, /* Height of this node in tree */ + sqlite3_int64 iLeaf, /* Block id of first leaf node */ + sqlite3_int64 iFree, /* Block id of next free slot in %_segments */ + sqlite3_int64 *piLast, /* OUT: Block id of last entry written */ + char **paRoot, /* OUT: Data for root node */ + int *pnRoot /* OUT: Size of root node in bytes */ +){ + int rc = SQLITE_OK; + + if( !pTree->pParent ){ + /* Root node of the tree. */ + int nStart = fts3TreeFinishNode(pTree, iHeight, iLeaf); + *piLast = iFree-1; + *pnRoot = pTree->nData - nStart; + *paRoot = &pTree->aData[nStart]; + }else{ + SegmentNode *pIter; + sqlite3_int64 iNextFree = iFree; + sqlite3_int64 iNextLeaf = iLeaf; + for(pIter=pTree->pLeftmost; pIter && rc==SQLITE_OK; pIter=pIter->pRight){ + int nStart = fts3TreeFinishNode(pIter, iHeight, iNextLeaf); + int nWrite = pIter->nData - nStart; + + rc = fts3WriteSegment(p, iNextFree, &pIter->aData[nStart], nWrite); + iNextFree++; + iNextLeaf += (pIter->nEntry+1); + } + if( rc==SQLITE_OK ){ + assert( iNextLeaf==iFree ); + rc = fts3NodeWrite( + p, pTree->pParent, iHeight+1, iFree, iNextFree, piLast, paRoot, pnRoot + ); + } + } + + return rc; +} + +/* +** Free all memory allocations associated with the tree pTree. +*/ +static void fts3NodeFree(SegmentNode *pTree){ + if( pTree ){ + SegmentNode *p = pTree->pLeftmost; + fts3NodeFree(p->pParent); + while( p ){ + SegmentNode *pRight = p->pRight; + if( p->aData!=(char *)&p[1] ){ + sqlite3_free(p->aData); + } + assert( pRight==0 || p->zMalloc==0 ); + sqlite3_free(p->zMalloc); + sqlite3_free(p); + p = pRight; + } + } +} + +/* +** Add a term to the segment being constructed by the SegmentWriter object +** *ppWriter. When adding the first term to a segment, *ppWriter should +** be passed NULL. This function will allocate a new SegmentWriter object +** and return it via the input/output variable *ppWriter in this case. +** +** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code. +*/ +static int fts3SegWriterAdd( + Fts3Table *p, /* Virtual table handle */ + SegmentWriter **ppWriter, /* IN/OUT: SegmentWriter handle */ + int isCopyTerm, /* True if buffer zTerm must be copied */ + const char *zTerm, /* Pointer to buffer containing term */ + int nTerm, /* Size of term in bytes */ + const char *aDoclist, /* Pointer to buffer containing doclist */ + int nDoclist /* Size of doclist in bytes */ +){ + int nPrefix; /* Size of term prefix in bytes */ + int nSuffix; /* Size of term suffix in bytes */ + int nReq; /* Number of bytes required on leaf page */ + int nData; + SegmentWriter *pWriter = *ppWriter; + + if( !pWriter ){ + int rc; + sqlite3_stmt *pStmt; + + /* Allocate the SegmentWriter structure */ + pWriter = (SegmentWriter *)sqlite3_malloc(sizeof(SegmentWriter)); + if( !pWriter ) return SQLITE_NOMEM; + memset(pWriter, 0, sizeof(SegmentWriter)); + *ppWriter = pWriter; + + /* Allocate a buffer in which to accumulate data */ + pWriter->aData = (char *)sqlite3_malloc(p->nNodeSize); + if( !pWriter->aData ) return SQLITE_NOMEM; + pWriter->nSize = p->nNodeSize; + + /* Find the next free blockid in the %_segments table */ + rc = fts3SqlStmt(p, SQL_NEXT_SEGMENTS_ID, &pStmt, 0); + if( rc!=SQLITE_OK ) return rc; + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + pWriter->iFree = sqlite3_column_int64(pStmt, 0); + pWriter->iFirst = pWriter->iFree; + } + rc = sqlite3_reset(pStmt); + if( rc!=SQLITE_OK ) return rc; + } + nData = pWriter->nData; + + nPrefix = fts3PrefixCompress(pWriter->zTerm, pWriter->nTerm, zTerm, nTerm); + nSuffix = nTerm-nPrefix; + + /* Figure out how many bytes are required by this new entry */ + nReq = sqlite3Fts3VarintLen(nPrefix) + /* varint containing prefix size */ + sqlite3Fts3VarintLen(nSuffix) + /* varint containing suffix size */ + nSuffix + /* Term suffix */ + sqlite3Fts3VarintLen(nDoclist) + /* Size of doclist */ + nDoclist; /* Doclist data */ + + if( nData>0 && nData+nReq>p->nNodeSize ){ + int rc; + + /* The current leaf node is full. Write it out to the database. */ + rc = fts3WriteSegment(p, pWriter->iFree++, pWriter->aData, nData); + if( rc!=SQLITE_OK ) return rc; + p->nLeafAdd++; + + /* Add the current term to the interior node tree. The term added to + ** the interior tree must: + ** + ** a) be greater than the largest term on the leaf node just written + ** to the database (still available in pWriter->zTerm), and + ** + ** b) be less than or equal to the term about to be added to the new + ** leaf node (zTerm/nTerm). + ** + ** In other words, it must be the prefix of zTerm 1 byte longer than + ** the common prefix (if any) of zTerm and pWriter->zTerm. + */ + assert( nPrefixpTree, isCopyTerm, zTerm, nPrefix+1); + if( rc!=SQLITE_OK ) return rc; + + nData = 0; + pWriter->nTerm = 0; + + nPrefix = 0; + nSuffix = nTerm; + nReq = 1 + /* varint containing prefix size */ + sqlite3Fts3VarintLen(nTerm) + /* varint containing suffix size */ + nTerm + /* Term suffix */ + sqlite3Fts3VarintLen(nDoclist) + /* Size of doclist */ + nDoclist; /* Doclist data */ + } + + /* Increase the total number of bytes written to account for the new entry. */ + pWriter->nLeafData += nReq; + + /* If the buffer currently allocated is too small for this entry, realloc + ** the buffer to make it large enough. + */ + if( nReq>pWriter->nSize ){ + char *aNew = sqlite3_realloc(pWriter->aData, nReq); + if( !aNew ) return SQLITE_NOMEM; + pWriter->aData = aNew; + pWriter->nSize = nReq; + } + assert( nData+nReq<=pWriter->nSize ); + + /* Append the prefix-compressed term and doclist to the buffer. */ + nData += sqlite3Fts3PutVarint(&pWriter->aData[nData], nPrefix); + nData += sqlite3Fts3PutVarint(&pWriter->aData[nData], nSuffix); + memcpy(&pWriter->aData[nData], &zTerm[nPrefix], nSuffix); + nData += nSuffix; + nData += sqlite3Fts3PutVarint(&pWriter->aData[nData], nDoclist); + memcpy(&pWriter->aData[nData], aDoclist, nDoclist); + pWriter->nData = nData + nDoclist; + + /* Save the current term so that it can be used to prefix-compress the next. + ** If the isCopyTerm parameter is true, then the buffer pointed to by + ** zTerm is transient, so take a copy of the term data. Otherwise, just + ** store a copy of the pointer. + */ + if( isCopyTerm ){ + if( nTerm>pWriter->nMalloc ){ + char *zNew = sqlite3_realloc(pWriter->zMalloc, nTerm*2); + if( !zNew ){ + return SQLITE_NOMEM; + } + pWriter->nMalloc = nTerm*2; + pWriter->zMalloc = zNew; + pWriter->zTerm = zNew; + } + assert( pWriter->zTerm==pWriter->zMalloc ); + memcpy(pWriter->zTerm, zTerm, nTerm); + }else{ + pWriter->zTerm = (char *)zTerm; + } + pWriter->nTerm = nTerm; + + return SQLITE_OK; +} + +/* +** Flush all data associated with the SegmentWriter object pWriter to the +** database. This function must be called after all terms have been added +** to the segment using fts3SegWriterAdd(). If successful, SQLITE_OK is +** returned. Otherwise, an SQLite error code. +*/ +static int fts3SegWriterFlush( + Fts3Table *p, /* Virtual table handle */ + SegmentWriter *pWriter, /* SegmentWriter to flush to the db */ + sqlite3_int64 iLevel, /* Value for 'level' column of %_segdir */ + int iIdx /* Value for 'idx' column of %_segdir */ +){ + int rc; /* Return code */ + if( pWriter->pTree ){ + sqlite3_int64 iLast = 0; /* Largest block id written to database */ + sqlite3_int64 iLastLeaf; /* Largest leaf block id written to db */ + char *zRoot = NULL; /* Pointer to buffer containing root node */ + int nRoot = 0; /* Size of buffer zRoot */ + + iLastLeaf = pWriter->iFree; + rc = fts3WriteSegment(p, pWriter->iFree++, pWriter->aData, pWriter->nData); + if( rc==SQLITE_OK ){ + rc = fts3NodeWrite(p, pWriter->pTree, 1, + pWriter->iFirst, pWriter->iFree, &iLast, &zRoot, &nRoot); + } + if( rc==SQLITE_OK ){ + rc = fts3WriteSegdir(p, iLevel, iIdx, + pWriter->iFirst, iLastLeaf, iLast, pWriter->nLeafData, zRoot, nRoot); + } + }else{ + /* The entire tree fits on the root node. Write it to the segdir table. */ + rc = fts3WriteSegdir(p, iLevel, iIdx, + 0, 0, 0, pWriter->nLeafData, pWriter->aData, pWriter->nData); + } + p->nLeafAdd++; + return rc; +} + +/* +** Release all memory held by the SegmentWriter object passed as the +** first argument. +*/ +static void fts3SegWriterFree(SegmentWriter *pWriter){ + if( pWriter ){ + sqlite3_free(pWriter->aData); + sqlite3_free(pWriter->zMalloc); + fts3NodeFree(pWriter->pTree); + sqlite3_free(pWriter); + } +} + +/* +** The first value in the apVal[] array is assumed to contain an integer. +** This function tests if there exist any documents with docid values that +** are different from that integer. i.e. if deleting the document with docid +** pRowid would mean the FTS3 table were empty. +** +** If successful, *pisEmpty is set to true if the table is empty except for +** document pRowid, or false otherwise, and SQLITE_OK is returned. If an +** error occurs, an SQLite error code is returned. +*/ +static int fts3IsEmpty(Fts3Table *p, sqlite3_value *pRowid, int *pisEmpty){ + sqlite3_stmt *pStmt; + int rc; + if( p->zContentTbl ){ + /* If using the content=xxx option, assume the table is never empty */ + *pisEmpty = 0; + rc = SQLITE_OK; + }else{ + rc = fts3SqlStmt(p, SQL_IS_EMPTY, &pStmt, &pRowid); + if( rc==SQLITE_OK ){ + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + *pisEmpty = sqlite3_column_int(pStmt, 0); + } + rc = sqlite3_reset(pStmt); + } + } + return rc; +} + +/* +** Set *pnMax to the largest segment level in the database for the index +** iIndex. +** +** Segment levels are stored in the 'level' column of the %_segdir table. +** +** Return SQLITE_OK if successful, or an SQLite error code if not. +*/ +static int fts3SegmentMaxLevel( + Fts3Table *p, + int iLangid, + int iIndex, + sqlite3_int64 *pnMax +){ + sqlite3_stmt *pStmt; + int rc; + assert( iIndex>=0 && iIndexnIndex ); + + /* Set pStmt to the compiled version of: + ** + ** SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ? + ** + ** (1024 is actually the value of macro FTS3_SEGDIR_PREFIXLEVEL_STR). + */ + rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR_MAX_LEVEL, &pStmt, 0); + if( rc!=SQLITE_OK ) return rc; + sqlite3_bind_int64(pStmt, 1, getAbsoluteLevel(p, iLangid, iIndex, 0)); + sqlite3_bind_int64(pStmt, 2, + getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1) + ); + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + *pnMax = sqlite3_column_int64(pStmt, 0); + } + return sqlite3_reset(pStmt); +} + +/* +** iAbsLevel is an absolute level that may be assumed to exist within +** the database. This function checks if it is the largest level number +** within its index. Assuming no error occurs, *pbMax is set to 1 if +** iAbsLevel is indeed the largest level, or 0 otherwise, and SQLITE_OK +** is returned. If an error occurs, an error code is returned and the +** final value of *pbMax is undefined. +*/ +static int fts3SegmentIsMaxLevel(Fts3Table *p, i64 iAbsLevel, int *pbMax){ + + /* Set pStmt to the compiled version of: + ** + ** SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ? + ** + ** (1024 is actually the value of macro FTS3_SEGDIR_PREFIXLEVEL_STR). + */ + sqlite3_stmt *pStmt; + int rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR_MAX_LEVEL, &pStmt, 0); + if( rc!=SQLITE_OK ) return rc; + sqlite3_bind_int64(pStmt, 1, iAbsLevel+1); + sqlite3_bind_int64(pStmt, 2, + ((iAbsLevel/FTS3_SEGDIR_MAXLEVEL)+1) * FTS3_SEGDIR_MAXLEVEL + ); + + *pbMax = 0; + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + *pbMax = sqlite3_column_type(pStmt, 0)==SQLITE_NULL; + } + return sqlite3_reset(pStmt); +} + +/* +** Delete all entries in the %_segments table associated with the segment +** opened with seg-reader pSeg. This function does not affect the contents +** of the %_segdir table. +*/ +static int fts3DeleteSegment( + Fts3Table *p, /* FTS table handle */ + Fts3SegReader *pSeg /* Segment to delete */ +){ + int rc = SQLITE_OK; /* Return code */ + if( pSeg->iStartBlock ){ + sqlite3_stmt *pDelete; /* SQL statement to delete rows */ + rc = fts3SqlStmt(p, SQL_DELETE_SEGMENTS_RANGE, &pDelete, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pDelete, 1, pSeg->iStartBlock); + sqlite3_bind_int64(pDelete, 2, pSeg->iEndBlock); + sqlite3_step(pDelete); + rc = sqlite3_reset(pDelete); + } + } + return rc; +} + +/* +** This function is used after merging multiple segments into a single large +** segment to delete the old, now redundant, segment b-trees. Specifically, +** it: +** +** 1) Deletes all %_segments entries for the segments associated with +** each of the SegReader objects in the array passed as the third +** argument, and +** +** 2) deletes all %_segdir entries with level iLevel, or all %_segdir +** entries regardless of level if (iLevel<0). +** +** SQLITE_OK is returned if successful, otherwise an SQLite error code. +*/ +static int fts3DeleteSegdir( + Fts3Table *p, /* Virtual table handle */ + int iLangid, /* Language id */ + int iIndex, /* Index for p->aIndex */ + int iLevel, /* Level of %_segdir entries to delete */ + Fts3SegReader **apSegment, /* Array of SegReader objects */ + int nReader /* Size of array apSegment */ +){ + int rc = SQLITE_OK; /* Return Code */ + int i; /* Iterator variable */ + sqlite3_stmt *pDelete = 0; /* SQL statement to delete rows */ + + for(i=0; rc==SQLITE_OK && i=0 || iLevel==FTS3_SEGCURSOR_ALL ); + if( iLevel==FTS3_SEGCURSOR_ALL ){ + rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_RANGE, &pDelete, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pDelete, 1, getAbsoluteLevel(p, iLangid, iIndex, 0)); + sqlite3_bind_int64(pDelete, 2, + getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1) + ); + } + }else{ + rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_LEVEL, &pDelete, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64( + pDelete, 1, getAbsoluteLevel(p, iLangid, iIndex, iLevel) + ); + } + } + + if( rc==SQLITE_OK ){ + sqlite3_step(pDelete); + rc = sqlite3_reset(pDelete); + } + + return rc; +} + +/* +** When this function is called, buffer *ppList (size *pnList bytes) contains +** a position list that may (or may not) feature multiple columns. This +** function adjusts the pointer *ppList and the length *pnList so that they +** identify the subset of the position list that corresponds to column iCol. +** +** If there are no entries in the input position list for column iCol, then +** *pnList is set to zero before returning. +** +** If parameter bZero is non-zero, then any part of the input list following +** the end of the output list is zeroed before returning. +*/ +static void fts3ColumnFilter( + int iCol, /* Column to filter on */ + int bZero, /* Zero out anything following *ppList */ + char **ppList, /* IN/OUT: Pointer to position list */ + int *pnList /* IN/OUT: Size of buffer *ppList in bytes */ +){ + char *pList = *ppList; + int nList = *pnList; + char *pEnd = &pList[nList]; + int iCurrent = 0; + char *p = pList; + + assert( iCol>=0 ); + while( 1 ){ + char c = 0; + while( ppMsr->nBuffer ){ + char *pNew; + pMsr->nBuffer = nList*2; + pNew = (char *)sqlite3_realloc(pMsr->aBuffer, pMsr->nBuffer); + if( !pNew ) return SQLITE_NOMEM; + pMsr->aBuffer = pNew; + } + + memcpy(pMsr->aBuffer, pList, nList); + return SQLITE_OK; +} + +SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext( + Fts3Table *p, /* Virtual table handle */ + Fts3MultiSegReader *pMsr, /* Multi-segment-reader handle */ + sqlite3_int64 *piDocid, /* OUT: Docid value */ + char **paPoslist, /* OUT: Pointer to position list */ + int *pnPoslist /* OUT: Size of position list in bytes */ +){ + int nMerge = pMsr->nAdvance; + Fts3SegReader **apSegment = pMsr->apSegment; + int (*xCmp)(Fts3SegReader *, Fts3SegReader *) = ( + p->bDescIdx ? fts3SegReaderDoclistCmpRev : fts3SegReaderDoclistCmp + ); + + if( nMerge==0 ){ + *paPoslist = 0; + return SQLITE_OK; + } + + while( 1 ){ + Fts3SegReader *pSeg; + pSeg = pMsr->apSegment[0]; + + if( pSeg->pOffsetList==0 ){ + *paPoslist = 0; + break; + }else{ + int rc; + char *pList; + int nList; + int j; + sqlite3_int64 iDocid = apSegment[0]->iDocid; + + rc = fts3SegReaderNextDocid(p, apSegment[0], &pList, &nList); + j = 1; + while( rc==SQLITE_OK + && jpOffsetList + && apSegment[j]->iDocid==iDocid + ){ + rc = fts3SegReaderNextDocid(p, apSegment[j], 0, 0); + j++; + } + if( rc!=SQLITE_OK ) return rc; + fts3SegReaderSort(pMsr->apSegment, nMerge, j, xCmp); + + if( nList>0 && fts3SegReaderIsPending(apSegment[0]) ){ + rc = fts3MsrBufferData(pMsr, pList, nList+1); + if( rc!=SQLITE_OK ) return rc; + assert( (pMsr->aBuffer[nList] & 0xFE)==0x00 ); + pList = pMsr->aBuffer; + } + + if( pMsr->iColFilter>=0 ){ + fts3ColumnFilter(pMsr->iColFilter, 1, &pList, &nList); + } + + if( nList>0 ){ + *paPoslist = pList; + *piDocid = iDocid; + *pnPoslist = nList; + break; + } + } + } + + return SQLITE_OK; +} + +static int fts3SegReaderStart( + Fts3Table *p, /* Virtual table handle */ + Fts3MultiSegReader *pCsr, /* Cursor object */ + const char *zTerm, /* Term searched for (or NULL) */ + int nTerm /* Length of zTerm in bytes */ +){ + int i; + int nSeg = pCsr->nSegment; + + /* If the Fts3SegFilter defines a specific term (or term prefix) to search + ** for, then advance each segment iterator until it points to a term of + ** equal or greater value than the specified term. This prevents many + ** unnecessary merge/sort operations for the case where single segment + ** b-tree leaf nodes contain more than one term. + */ + for(i=0; pCsr->bRestart==0 && inSegment; i++){ + int res = 0; + Fts3SegReader *pSeg = pCsr->apSegment[i]; + do { + int rc = fts3SegReaderNext(p, pSeg, 0); + if( rc!=SQLITE_OK ) return rc; + }while( zTerm && (res = fts3SegReaderTermCmp(pSeg, zTerm, nTerm))<0 ); + + if( pSeg->bLookup && res!=0 ){ + fts3SegReaderSetEof(pSeg); + } + } + fts3SegReaderSort(pCsr->apSegment, nSeg, nSeg, fts3SegReaderCmp); + + return SQLITE_OK; +} + +SQLITE_PRIVATE int sqlite3Fts3SegReaderStart( + Fts3Table *p, /* Virtual table handle */ + Fts3MultiSegReader *pCsr, /* Cursor object */ + Fts3SegFilter *pFilter /* Restrictions on range of iteration */ +){ + pCsr->pFilter = pFilter; + return fts3SegReaderStart(p, pCsr, pFilter->zTerm, pFilter->nTerm); +} + +SQLITE_PRIVATE int sqlite3Fts3MsrIncrStart( + Fts3Table *p, /* Virtual table handle */ + Fts3MultiSegReader *pCsr, /* Cursor object */ + int iCol, /* Column to match on. */ + const char *zTerm, /* Term to iterate through a doclist for */ + int nTerm /* Number of bytes in zTerm */ +){ + int i; + int rc; + int nSegment = pCsr->nSegment; + int (*xCmp)(Fts3SegReader *, Fts3SegReader *) = ( + p->bDescIdx ? fts3SegReaderDoclistCmpRev : fts3SegReaderDoclistCmp + ); + + assert( pCsr->pFilter==0 ); + assert( zTerm && nTerm>0 ); + + /* Advance each segment iterator until it points to the term zTerm/nTerm. */ + rc = fts3SegReaderStart(p, pCsr, zTerm, nTerm); + if( rc!=SQLITE_OK ) return rc; + + /* Determine how many of the segments actually point to zTerm/nTerm. */ + for(i=0; iapSegment[i]; + if( !pSeg->aNode || fts3SegReaderTermCmp(pSeg, zTerm, nTerm) ){ + break; + } + } + pCsr->nAdvance = i; + + /* Advance each of the segments to point to the first docid. */ + for(i=0; inAdvance; i++){ + rc = fts3SegReaderFirstDocid(p, pCsr->apSegment[i]); + if( rc!=SQLITE_OK ) return rc; + } + fts3SegReaderSort(pCsr->apSegment, i, i, xCmp); + + assert( iCol<0 || iColnColumn ); + pCsr->iColFilter = iCol; + + return SQLITE_OK; +} + +/* +** This function is called on a MultiSegReader that has been started using +** sqlite3Fts3MsrIncrStart(). One or more calls to MsrIncrNext() may also +** have been made. Calling this function puts the MultiSegReader in such +** a state that if the next two calls are: +** +** sqlite3Fts3SegReaderStart() +** sqlite3Fts3SegReaderStep() +** +** then the entire doclist for the term is available in +** MultiSegReader.aDoclist/nDoclist. +*/ +SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr){ + int i; /* Used to iterate through segment-readers */ + + assert( pCsr->zTerm==0 ); + assert( pCsr->nTerm==0 ); + assert( pCsr->aDoclist==0 ); + assert( pCsr->nDoclist==0 ); + + pCsr->nAdvance = 0; + pCsr->bRestart = 1; + for(i=0; inSegment; i++){ + pCsr->apSegment[i]->pOffsetList = 0; + pCsr->apSegment[i]->nOffsetList = 0; + pCsr->apSegment[i]->iDocid = 0; + } + + return SQLITE_OK; +} + + +SQLITE_PRIVATE int sqlite3Fts3SegReaderStep( + Fts3Table *p, /* Virtual table handle */ + Fts3MultiSegReader *pCsr /* Cursor object */ +){ + int rc = SQLITE_OK; + + int isIgnoreEmpty = (pCsr->pFilter->flags & FTS3_SEGMENT_IGNORE_EMPTY); + int isRequirePos = (pCsr->pFilter->flags & FTS3_SEGMENT_REQUIRE_POS); + int isColFilter = (pCsr->pFilter->flags & FTS3_SEGMENT_COLUMN_FILTER); + int isPrefix = (pCsr->pFilter->flags & FTS3_SEGMENT_PREFIX); + int isScan = (pCsr->pFilter->flags & FTS3_SEGMENT_SCAN); + int isFirst = (pCsr->pFilter->flags & FTS3_SEGMENT_FIRST); + + Fts3SegReader **apSegment = pCsr->apSegment; + int nSegment = pCsr->nSegment; + Fts3SegFilter *pFilter = pCsr->pFilter; + int (*xCmp)(Fts3SegReader *, Fts3SegReader *) = ( + p->bDescIdx ? fts3SegReaderDoclistCmpRev : fts3SegReaderDoclistCmp + ); + + if( pCsr->nSegment==0 ) return SQLITE_OK; + + do { + int nMerge; + int i; + + /* Advance the first pCsr->nAdvance entries in the apSegment[] array + ** forward. Then sort the list in order of current term again. + */ + for(i=0; inAdvance; i++){ + Fts3SegReader *pSeg = apSegment[i]; + if( pSeg->bLookup ){ + fts3SegReaderSetEof(pSeg); + }else{ + rc = fts3SegReaderNext(p, pSeg, 0); + } + if( rc!=SQLITE_OK ) return rc; + } + fts3SegReaderSort(apSegment, nSegment, pCsr->nAdvance, fts3SegReaderCmp); + pCsr->nAdvance = 0; + + /* If all the seg-readers are at EOF, we're finished. return SQLITE_OK. */ + assert( rc==SQLITE_OK ); + if( apSegment[0]->aNode==0 ) break; + + pCsr->nTerm = apSegment[0]->nTerm; + pCsr->zTerm = apSegment[0]->zTerm; + + /* If this is a prefix-search, and if the term that apSegment[0] points + ** to does not share a suffix with pFilter->zTerm/nTerm, then all + ** required callbacks have been made. In this case exit early. + ** + ** Similarly, if this is a search for an exact match, and the first term + ** of segment apSegment[0] is not a match, exit early. + */ + if( pFilter->zTerm && !isScan ){ + if( pCsr->nTermnTerm + || (!isPrefix && pCsr->nTerm>pFilter->nTerm) + || memcmp(pCsr->zTerm, pFilter->zTerm, pFilter->nTerm) + ){ + break; + } + } + + nMerge = 1; + while( nMergeaNode + && apSegment[nMerge]->nTerm==pCsr->nTerm + && 0==memcmp(pCsr->zTerm, apSegment[nMerge]->zTerm, pCsr->nTerm) + ){ + nMerge++; + } + + assert( isIgnoreEmpty || (isRequirePos && !isColFilter) ); + if( nMerge==1 + && !isIgnoreEmpty + && !isFirst + && (p->bDescIdx==0 || fts3SegReaderIsPending(apSegment[0])==0) + ){ + pCsr->nDoclist = apSegment[0]->nDoclist; + if( fts3SegReaderIsPending(apSegment[0]) ){ + rc = fts3MsrBufferData(pCsr, apSegment[0]->aDoclist, pCsr->nDoclist); + pCsr->aDoclist = pCsr->aBuffer; + }else{ + pCsr->aDoclist = apSegment[0]->aDoclist; + } + if( rc==SQLITE_OK ) rc = SQLITE_ROW; + }else{ + int nDoclist = 0; /* Size of doclist */ + sqlite3_int64 iPrev = 0; /* Previous docid stored in doclist */ + + /* The current term of the first nMerge entries in the array + ** of Fts3SegReader objects is the same. The doclists must be merged + ** and a single term returned with the merged doclist. + */ + for(i=0; ipOffsetList ){ + int j; /* Number of segments that share a docid */ + char *pList = 0; + int nList = 0; + int nByte; + sqlite3_int64 iDocid = apSegment[0]->iDocid; + fts3SegReaderNextDocid(p, apSegment[0], &pList, &nList); + j = 1; + while( jpOffsetList + && apSegment[j]->iDocid==iDocid + ){ + fts3SegReaderNextDocid(p, apSegment[j], 0, 0); + j++; + } + + if( isColFilter ){ + fts3ColumnFilter(pFilter->iCol, 0, &pList, &nList); + } + + if( !isIgnoreEmpty || nList>0 ){ + + /* Calculate the 'docid' delta value to write into the merged + ** doclist. */ + sqlite3_int64 iDelta; + if( p->bDescIdx && nDoclist>0 ){ + iDelta = iPrev - iDocid; + }else{ + iDelta = iDocid - iPrev; + } + assert( iDelta>0 || (nDoclist==0 && iDelta==iDocid) ); + assert( nDoclist>0 || iDelta==iDocid ); + + nByte = sqlite3Fts3VarintLen(iDelta) + (isRequirePos?nList+1:0); + if( nDoclist+nByte>pCsr->nBuffer ){ + char *aNew; + pCsr->nBuffer = (nDoclist+nByte)*2; + aNew = sqlite3_realloc(pCsr->aBuffer, pCsr->nBuffer); + if( !aNew ){ + return SQLITE_NOMEM; + } + pCsr->aBuffer = aNew; + } + + if( isFirst ){ + char *a = &pCsr->aBuffer[nDoclist]; + int nWrite; + + nWrite = sqlite3Fts3FirstFilter(iDelta, pList, nList, a); + if( nWrite ){ + iPrev = iDocid; + nDoclist += nWrite; + } + }else{ + nDoclist += sqlite3Fts3PutVarint(&pCsr->aBuffer[nDoclist], iDelta); + iPrev = iDocid; + if( isRequirePos ){ + memcpy(&pCsr->aBuffer[nDoclist], pList, nList); + nDoclist += nList; + pCsr->aBuffer[nDoclist++] = '\0'; + } + } + } + + fts3SegReaderSort(apSegment, nMerge, j, xCmp); + } + if( nDoclist>0 ){ + pCsr->aDoclist = pCsr->aBuffer; + pCsr->nDoclist = nDoclist; + rc = SQLITE_ROW; + } + } + pCsr->nAdvance = nMerge; + }while( rc==SQLITE_OK ); + + return rc; +} + + +SQLITE_PRIVATE void sqlite3Fts3SegReaderFinish( + Fts3MultiSegReader *pCsr /* Cursor object */ +){ + if( pCsr ){ + int i; + for(i=0; inSegment; i++){ + sqlite3Fts3SegReaderFree(pCsr->apSegment[i]); + } + sqlite3_free(pCsr->apSegment); + sqlite3_free(pCsr->aBuffer); + + pCsr->nSegment = 0; + pCsr->apSegment = 0; + pCsr->aBuffer = 0; + } +} + +/* +** Decode the "end_block" field, selected by column iCol of the SELECT +** statement passed as the first argument. +** +** The "end_block" field may contain either an integer, or a text field +** containing the text representation of two non-negative integers separated +** by one or more space (0x20) characters. In the first case, set *piEndBlock +** to the integer value and *pnByte to zero before returning. In the second, +** set *piEndBlock to the first value and *pnByte to the second. +*/ +static void fts3ReadEndBlockField( + sqlite3_stmt *pStmt, + int iCol, + i64 *piEndBlock, + i64 *pnByte +){ + const unsigned char *zText = sqlite3_column_text(pStmt, iCol); + if( zText ){ + int i; + int iMul = 1; + i64 iVal = 0; + for(i=0; zText[i]>='0' && zText[i]<='9'; i++){ + iVal = iVal*10 + (zText[i] - '0'); + } + *piEndBlock = iVal; + while( zText[i]==' ' ) i++; + iVal = 0; + if( zText[i]=='-' ){ + i++; + iMul = -1; + } + for(/* no-op */; zText[i]>='0' && zText[i]<='9'; i++){ + iVal = iVal*10 + (zText[i] - '0'); + } + *pnByte = (iVal * (i64)iMul); + } +} + + +/* +** A segment of size nByte bytes has just been written to absolute level +** iAbsLevel. Promote any segments that should be promoted as a result. +*/ +static int fts3PromoteSegments( + Fts3Table *p, /* FTS table handle */ + sqlite3_int64 iAbsLevel, /* Absolute level just updated */ + sqlite3_int64 nByte /* Size of new segment at iAbsLevel */ +){ + int rc = SQLITE_OK; + sqlite3_stmt *pRange; + + rc = fts3SqlStmt(p, SQL_SELECT_LEVEL_RANGE2, &pRange, 0); + + if( rc==SQLITE_OK ){ + int bOk = 0; + i64 iLast = (iAbsLevel/FTS3_SEGDIR_MAXLEVEL + 1) * FTS3_SEGDIR_MAXLEVEL - 1; + i64 nLimit = (nByte*3)/2; + + /* Loop through all entries in the %_segdir table corresponding to + ** segments in this index on levels greater than iAbsLevel. If there is + ** at least one such segment, and it is possible to determine that all + ** such segments are smaller than nLimit bytes in size, they will be + ** promoted to level iAbsLevel. */ + sqlite3_bind_int64(pRange, 1, iAbsLevel+1); + sqlite3_bind_int64(pRange, 2, iLast); + while( SQLITE_ROW==sqlite3_step(pRange) ){ + i64 nSize = 0, dummy; + fts3ReadEndBlockField(pRange, 2, &dummy, &nSize); + if( nSize<=0 || nSize>nLimit ){ + /* If nSize==0, then the %_segdir.end_block field does not not + ** contain a size value. This happens if it was written by an + ** old version of FTS. In this case it is not possible to determine + ** the size of the segment, and so segment promotion does not + ** take place. */ + bOk = 0; + break; + } + bOk = 1; + } + rc = sqlite3_reset(pRange); + + if( bOk ){ + int iIdx = 0; + sqlite3_stmt *pUpdate1 = 0; + sqlite3_stmt *pUpdate2 = 0; + + if( rc==SQLITE_OK ){ + rc = fts3SqlStmt(p, SQL_UPDATE_LEVEL_IDX, &pUpdate1, 0); + } + if( rc==SQLITE_OK ){ + rc = fts3SqlStmt(p, SQL_UPDATE_LEVEL, &pUpdate2, 0); + } + + if( rc==SQLITE_OK ){ + + /* Loop through all %_segdir entries for segments in this index with + ** levels equal to or greater than iAbsLevel. As each entry is visited, + ** updated it to set (level = -1) and (idx = N), where N is 0 for the + ** oldest segment in the range, 1 for the next oldest, and so on. + ** + ** In other words, move all segments being promoted to level -1, + ** setting the "idx" fields as appropriate to keep them in the same + ** order. The contents of level -1 (which is never used, except + ** transiently here), will be moved back to level iAbsLevel below. */ + sqlite3_bind_int64(pRange, 1, iAbsLevel); + while( SQLITE_ROW==sqlite3_step(pRange) ){ + sqlite3_bind_int(pUpdate1, 1, iIdx++); + sqlite3_bind_int(pUpdate1, 2, sqlite3_column_int(pRange, 0)); + sqlite3_bind_int(pUpdate1, 3, sqlite3_column_int(pRange, 1)); + sqlite3_step(pUpdate1); + rc = sqlite3_reset(pUpdate1); + if( rc!=SQLITE_OK ){ + sqlite3_reset(pRange); + break; + } + } + } + if( rc==SQLITE_OK ){ + rc = sqlite3_reset(pRange); + } + + /* Move level -1 to level iAbsLevel */ + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pUpdate2, 1, iAbsLevel); + sqlite3_step(pUpdate2); + rc = sqlite3_reset(pUpdate2); + } + } + } + + + return rc; +} + +/* +** Merge all level iLevel segments in the database into a single +** iLevel+1 segment. Or, if iLevel<0, merge all segments into a +** single segment with a level equal to the numerically largest level +** currently present in the database. +** +** If this function is called with iLevel<0, but there is only one +** segment in the database, SQLITE_DONE is returned immediately. +** Otherwise, if successful, SQLITE_OK is returned. If an error occurs, +** an SQLite error code is returned. +*/ +static int fts3SegmentMerge( + Fts3Table *p, + int iLangid, /* Language id to merge */ + int iIndex, /* Index in p->aIndex[] to merge */ + int iLevel /* Level to merge */ +){ + int rc; /* Return code */ + int iIdx = 0; /* Index of new segment */ + sqlite3_int64 iNewLevel = 0; /* Level/index to create new segment at */ + SegmentWriter *pWriter = 0; /* Used to write the new, merged, segment */ + Fts3SegFilter filter; /* Segment term filter condition */ + Fts3MultiSegReader csr; /* Cursor to iterate through level(s) */ + int bIgnoreEmpty = 0; /* True to ignore empty segments */ + i64 iMaxLevel = 0; /* Max level number for this index/langid */ + + assert( iLevel==FTS3_SEGCURSOR_ALL + || iLevel==FTS3_SEGCURSOR_PENDING + || iLevel>=0 + ); + assert( iLevel=0 && iIndexnIndex ); + + rc = sqlite3Fts3SegReaderCursor(p, iLangid, iIndex, iLevel, 0, 0, 1, 0, &csr); + if( rc!=SQLITE_OK || csr.nSegment==0 ) goto finished; + + if( iLevel!=FTS3_SEGCURSOR_PENDING ){ + rc = fts3SegmentMaxLevel(p, iLangid, iIndex, &iMaxLevel); + if( rc!=SQLITE_OK ) goto finished; + } + + if( iLevel==FTS3_SEGCURSOR_ALL ){ + /* This call is to merge all segments in the database to a single + ** segment. The level of the new segment is equal to the numerically + ** greatest segment level currently present in the database for this + ** index. The idx of the new segment is always 0. */ + if( csr.nSegment==1 && 0==fts3SegReaderIsPending(csr.apSegment[0]) ){ + rc = SQLITE_DONE; + goto finished; + } + iNewLevel = iMaxLevel; + bIgnoreEmpty = 1; + + }else{ + /* This call is to merge all segments at level iLevel. find the next + ** available segment index at level iLevel+1. The call to + ** fts3AllocateSegdirIdx() will merge the segments at level iLevel+1 to + ** a single iLevel+2 segment if necessary. */ + assert( FTS3_SEGCURSOR_PENDING==-1 ); + iNewLevel = getAbsoluteLevel(p, iLangid, iIndex, iLevel+1); + rc = fts3AllocateSegdirIdx(p, iLangid, iIndex, iLevel+1, &iIdx); + bIgnoreEmpty = (iLevel!=FTS3_SEGCURSOR_PENDING) && (iNewLevel>iMaxLevel); + } + if( rc!=SQLITE_OK ) goto finished; + + assert( csr.nSegment>0 ); + assert( iNewLevel>=getAbsoluteLevel(p, iLangid, iIndex, 0) ); + assert( iNewLevelnLeafData); + } + } + } + + finished: + fts3SegWriterFree(pWriter); + sqlite3Fts3SegReaderFinish(&csr); + return rc; +} + + +/* +** Flush the contents of pendingTerms to level 0 segments. +*/ +SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *p){ + int rc = SQLITE_OK; + int i; + + for(i=0; rc==SQLITE_OK && inIndex; i++){ + rc = fts3SegmentMerge(p, p->iPrevLangid, i, FTS3_SEGCURSOR_PENDING); + if( rc==SQLITE_DONE ) rc = SQLITE_OK; + } + sqlite3Fts3PendingTermsClear(p); + + /* Determine the auto-incr-merge setting if unknown. If enabled, + ** estimate the number of leaf blocks of content to be written + */ + if( rc==SQLITE_OK && p->bHasStat + && p->nAutoincrmerge==0xff && p->nLeafAdd>0 + ){ + sqlite3_stmt *pStmt = 0; + rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pStmt, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE); + rc = sqlite3_step(pStmt); + if( rc==SQLITE_ROW ){ + p->nAutoincrmerge = sqlite3_column_int(pStmt, 0); + if( p->nAutoincrmerge==1 ) p->nAutoincrmerge = 8; + }else if( rc==SQLITE_DONE ){ + p->nAutoincrmerge = 0; + } + rc = sqlite3_reset(pStmt); + } + } + return rc; +} + +/* +** Encode N integers as varints into a blob. +*/ +static void fts3EncodeIntArray( + int N, /* The number of integers to encode */ + u32 *a, /* The integer values */ + char *zBuf, /* Write the BLOB here */ + int *pNBuf /* Write number of bytes if zBuf[] used here */ +){ + int i, j; + for(i=j=0; iiPrevDocid. The sizes are encoded as +** a blob of varints. +*/ +static void fts3InsertDocsize( + int *pRC, /* Result code */ + Fts3Table *p, /* Table into which to insert */ + u32 *aSz /* Sizes of each column, in tokens */ +){ + char *pBlob; /* The BLOB encoding of the document size */ + int nBlob; /* Number of bytes in the BLOB */ + sqlite3_stmt *pStmt; /* Statement used to insert the encoding */ + int rc; /* Result code from subfunctions */ + + if( *pRC ) return; + pBlob = sqlite3_malloc( 10*p->nColumn ); + if( pBlob==0 ){ + *pRC = SQLITE_NOMEM; + return; + } + fts3EncodeIntArray(p->nColumn, aSz, pBlob, &nBlob); + rc = fts3SqlStmt(p, SQL_REPLACE_DOCSIZE, &pStmt, 0); + if( rc ){ + sqlite3_free(pBlob); + *pRC = rc; + return; + } + sqlite3_bind_int64(pStmt, 1, p->iPrevDocid); + sqlite3_bind_blob(pStmt, 2, pBlob, nBlob, sqlite3_free); + sqlite3_step(pStmt); + *pRC = sqlite3_reset(pStmt); +} + +/* +** Record 0 of the %_stat table contains a blob consisting of N varints, +** where N is the number of user defined columns in the fts3 table plus +** two. If nCol is the number of user defined columns, then values of the +** varints are set as follows: +** +** Varint 0: Total number of rows in the table. +** +** Varint 1..nCol: For each column, the total number of tokens stored in +** the column for all rows of the table. +** +** Varint 1+nCol: The total size, in bytes, of all text values in all +** columns of all rows of the table. +** +*/ +static void fts3UpdateDocTotals( + int *pRC, /* The result code */ + Fts3Table *p, /* Table being updated */ + u32 *aSzIns, /* Size increases */ + u32 *aSzDel, /* Size decreases */ + int nChng /* Change in the number of documents */ +){ + char *pBlob; /* Storage for BLOB written into %_stat */ + int nBlob; /* Size of BLOB written into %_stat */ + u32 *a; /* Array of integers that becomes the BLOB */ + sqlite3_stmt *pStmt; /* Statement for reading and writing */ + int i; /* Loop counter */ + int rc; /* Result code from subfunctions */ + + const int nStat = p->nColumn+2; + + if( *pRC ) return; + a = sqlite3_malloc( (sizeof(u32)+10)*nStat ); + if( a==0 ){ + *pRC = SQLITE_NOMEM; + return; + } + pBlob = (char*)&a[nStat]; + rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pStmt, 0); + if( rc ){ + sqlite3_free(a); + *pRC = rc; + return; + } + sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL); + if( sqlite3_step(pStmt)==SQLITE_ROW ){ + fts3DecodeIntArray(nStat, a, + sqlite3_column_blob(pStmt, 0), + sqlite3_column_bytes(pStmt, 0)); + }else{ + memset(a, 0, sizeof(u32)*(nStat) ); + } + rc = sqlite3_reset(pStmt); + if( rc!=SQLITE_OK ){ + sqlite3_free(a); + *pRC = rc; + return; + } + if( nChng<0 && a[0]<(u32)(-nChng) ){ + a[0] = 0; + }else{ + a[0] += nChng; + } + for(i=0; inColumn+1; i++){ + u32 x = a[i+1]; + if( x+aSzIns[i] < aSzDel[i] ){ + x = 0; + }else{ + x = x + aSzIns[i] - aSzDel[i]; + } + a[i+1] = x; + } + fts3EncodeIntArray(nStat, a, pBlob, &nBlob); + rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0); + if( rc ){ + sqlite3_free(a); + *pRC = rc; + return; + } + sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL); + sqlite3_bind_blob(pStmt, 2, pBlob, nBlob, SQLITE_STATIC); + sqlite3_step(pStmt); + *pRC = sqlite3_reset(pStmt); + sqlite3_free(a); +} + +/* +** Merge the entire database so that there is one segment for each +** iIndex/iLangid combination. +*/ +static int fts3DoOptimize(Fts3Table *p, int bReturnDone){ + int bSeenDone = 0; + int rc; + sqlite3_stmt *pAllLangid = 0; + + rc = fts3SqlStmt(p, SQL_SELECT_ALL_LANGID, &pAllLangid, 0); + if( rc==SQLITE_OK ){ + int rc2; + sqlite3_bind_int(pAllLangid, 1, p->iPrevLangid); + sqlite3_bind_int(pAllLangid, 2, p->nIndex); + while( sqlite3_step(pAllLangid)==SQLITE_ROW ){ + int i; + int iLangid = sqlite3_column_int(pAllLangid, 0); + for(i=0; rc==SQLITE_OK && inIndex; i++){ + rc = fts3SegmentMerge(p, iLangid, i, FTS3_SEGCURSOR_ALL); + if( rc==SQLITE_DONE ){ + bSeenDone = 1; + rc = SQLITE_OK; + } + } + } + rc2 = sqlite3_reset(pAllLangid); + if( rc==SQLITE_OK ) rc = rc2; + } + + sqlite3Fts3SegmentsClose(p); + sqlite3Fts3PendingTermsClear(p); + + return (rc==SQLITE_OK && bReturnDone && bSeenDone) ? SQLITE_DONE : rc; +} + +/* +** This function is called when the user executes the following statement: +** +** INSERT INTO () VALUES('rebuild'); +** +** The entire FTS index is discarded and rebuilt. If the table is one +** created using the content=xxx option, then the new index is based on +** the current contents of the xxx table. Otherwise, it is rebuilt based +** on the contents of the %_content table. +*/ +static int fts3DoRebuild(Fts3Table *p){ + int rc; /* Return Code */ + + rc = fts3DeleteAll(p, 0); + if( rc==SQLITE_OK ){ + u32 *aSz = 0; + u32 *aSzIns = 0; + u32 *aSzDel = 0; + sqlite3_stmt *pStmt = 0; + int nEntry = 0; + + /* Compose and prepare an SQL statement to loop through the content table */ + char *zSql = sqlite3_mprintf("SELECT %s" , p->zReadExprlist); + if( !zSql ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0); + sqlite3_free(zSql); + } + + if( rc==SQLITE_OK ){ + int nByte = sizeof(u32) * (p->nColumn+1)*3; + aSz = (u32 *)sqlite3_malloc(nByte); + if( aSz==0 ){ + rc = SQLITE_NOMEM; + }else{ + memset(aSz, 0, nByte); + aSzIns = &aSz[p->nColumn+1]; + aSzDel = &aSzIns[p->nColumn+1]; + } + } + + while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + int iCol; + int iLangid = langidFromSelect(p, pStmt); + rc = fts3PendingTermsDocid(p, 0, iLangid, sqlite3_column_int64(pStmt, 0)); + memset(aSz, 0, sizeof(aSz[0]) * (p->nColumn+1)); + for(iCol=0; rc==SQLITE_OK && iColnColumn; iCol++){ + if( p->abNotindexed[iCol]==0 ){ + const char *z = (const char *) sqlite3_column_text(pStmt, iCol+1); + rc = fts3PendingTermsAdd(p, iLangid, z, iCol, &aSz[iCol]); + aSz[p->nColumn] += sqlite3_column_bytes(pStmt, iCol+1); + } + } + if( p->bHasDocsize ){ + fts3InsertDocsize(&rc, p, aSz); + } + if( rc!=SQLITE_OK ){ + sqlite3_finalize(pStmt); + pStmt = 0; + }else{ + nEntry++; + for(iCol=0; iCol<=p->nColumn; iCol++){ + aSzIns[iCol] += aSz[iCol]; + } + } + } + if( p->bFts4 ){ + fts3UpdateDocTotals(&rc, p, aSzIns, aSzDel, nEntry); + } + sqlite3_free(aSz); + + if( pStmt ){ + int rc2 = sqlite3_finalize(pStmt); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + } + + return rc; +} + + +/* +** This function opens a cursor used to read the input data for an +** incremental merge operation. Specifically, it opens a cursor to scan +** the oldest nSeg segments (idx=0 through idx=(nSeg-1)) in absolute +** level iAbsLevel. +*/ +static int fts3IncrmergeCsr( + Fts3Table *p, /* FTS3 table handle */ + sqlite3_int64 iAbsLevel, /* Absolute level to open */ + int nSeg, /* Number of segments to merge */ + Fts3MultiSegReader *pCsr /* Cursor object to populate */ +){ + int rc; /* Return Code */ + sqlite3_stmt *pStmt = 0; /* Statement used to read %_segdir entry */ + int nByte; /* Bytes allocated at pCsr->apSegment[] */ + + /* Allocate space for the Fts3MultiSegReader.aCsr[] array */ + memset(pCsr, 0, sizeof(*pCsr)); + nByte = sizeof(Fts3SegReader *) * nSeg; + pCsr->apSegment = (Fts3SegReader **)sqlite3_malloc(nByte); + + if( pCsr->apSegment==0 ){ + rc = SQLITE_NOMEM; + }else{ + memset(pCsr->apSegment, 0, nByte); + rc = fts3SqlStmt(p, SQL_SELECT_LEVEL, &pStmt, 0); + } + if( rc==SQLITE_OK ){ + int i; + int rc2; + sqlite3_bind_int64(pStmt, 1, iAbsLevel); + assert( pCsr->nSegment==0 ); + for(i=0; rc==SQLITE_OK && sqlite3_step(pStmt)==SQLITE_ROW && iapSegment[i] + ); + pCsr->nSegment++; + } + rc2 = sqlite3_reset(pStmt); + if( rc==SQLITE_OK ) rc = rc2; + } + + return rc; +} + +typedef struct IncrmergeWriter IncrmergeWriter; +typedef struct NodeWriter NodeWriter; +typedef struct Blob Blob; +typedef struct NodeReader NodeReader; + +/* +** An instance of the following structure is used as a dynamic buffer +** to build up nodes or other blobs of data in. +** +** The function blobGrowBuffer() is used to extend the allocation. +*/ +struct Blob { + char *a; /* Pointer to allocation */ + int n; /* Number of valid bytes of data in a[] */ + int nAlloc; /* Allocated size of a[] (nAlloc>=n) */ +}; + +/* +** This structure is used to build up buffers containing segment b-tree +** nodes (blocks). +*/ +struct NodeWriter { + sqlite3_int64 iBlock; /* Current block id */ + Blob key; /* Last key written to the current block */ + Blob block; /* Current block image */ +}; + +/* +** An object of this type contains the state required to create or append +** to an appendable b-tree segment. +*/ +struct IncrmergeWriter { + int nLeafEst; /* Space allocated for leaf blocks */ + int nWork; /* Number of leaf pages flushed */ + sqlite3_int64 iAbsLevel; /* Absolute level of input segments */ + int iIdx; /* Index of *output* segment in iAbsLevel+1 */ + sqlite3_int64 iStart; /* Block number of first allocated block */ + sqlite3_int64 iEnd; /* Block number of last allocated block */ + sqlite3_int64 nLeafData; /* Bytes of leaf page data so far */ + u8 bNoLeafData; /* If true, store 0 for segment size */ + NodeWriter aNodeWriter[FTS_MAX_APPENDABLE_HEIGHT]; +}; + +/* +** An object of the following type is used to read data from a single +** FTS segment node. See the following functions: +** +** nodeReaderInit() +** nodeReaderNext() +** nodeReaderRelease() +*/ +struct NodeReader { + const char *aNode; + int nNode; + int iOff; /* Current offset within aNode[] */ + + /* Output variables. Containing the current node entry. */ + sqlite3_int64 iChild; /* Pointer to child node */ + Blob term; /* Current term */ + const char *aDoclist; /* Pointer to doclist */ + int nDoclist; /* Size of doclist in bytes */ +}; + +/* +** If *pRc is not SQLITE_OK when this function is called, it is a no-op. +** Otherwise, if the allocation at pBlob->a is not already at least nMin +** bytes in size, extend (realloc) it to be so. +** +** If an OOM error occurs, set *pRc to SQLITE_NOMEM and leave pBlob->a +** unmodified. Otherwise, if the allocation succeeds, update pBlob->nAlloc +** to reflect the new size of the pBlob->a[] buffer. +*/ +static void blobGrowBuffer(Blob *pBlob, int nMin, int *pRc){ + if( *pRc==SQLITE_OK && nMin>pBlob->nAlloc ){ + int nAlloc = nMin; + char *a = (char *)sqlite3_realloc(pBlob->a, nAlloc); + if( a ){ + pBlob->nAlloc = nAlloc; + pBlob->a = a; + }else{ + *pRc = SQLITE_NOMEM; + } + } +} + +/* +** Attempt to advance the node-reader object passed as the first argument to +** the next entry on the node. +** +** Return an error code if an error occurs (SQLITE_NOMEM is possible). +** Otherwise return SQLITE_OK. If there is no next entry on the node +** (e.g. because the current entry is the last) set NodeReader->aNode to +** NULL to indicate EOF. Otherwise, populate the NodeReader structure output +** variables for the new entry. +*/ +static int nodeReaderNext(NodeReader *p){ + int bFirst = (p->term.n==0); /* True for first term on the node */ + int nPrefix = 0; /* Bytes to copy from previous term */ + int nSuffix = 0; /* Bytes to append to the prefix */ + int rc = SQLITE_OK; /* Return code */ + + assert( p->aNode ); + if( p->iChild && bFirst==0 ) p->iChild++; + if( p->iOff>=p->nNode ){ + /* EOF */ + p->aNode = 0; + }else{ + if( bFirst==0 ){ + p->iOff += fts3GetVarint32(&p->aNode[p->iOff], &nPrefix); + } + p->iOff += fts3GetVarint32(&p->aNode[p->iOff], &nSuffix); + + blobGrowBuffer(&p->term, nPrefix+nSuffix, &rc); + if( rc==SQLITE_OK ){ + memcpy(&p->term.a[nPrefix], &p->aNode[p->iOff], nSuffix); + p->term.n = nPrefix+nSuffix; + p->iOff += nSuffix; + if( p->iChild==0 ){ + p->iOff += fts3GetVarint32(&p->aNode[p->iOff], &p->nDoclist); + p->aDoclist = &p->aNode[p->iOff]; + p->iOff += p->nDoclist; + } + } + } + + assert( p->iOff<=p->nNode ); + + return rc; +} + +/* +** Release all dynamic resources held by node-reader object *p. +*/ +static void nodeReaderRelease(NodeReader *p){ + sqlite3_free(p->term.a); +} + +/* +** Initialize a node-reader object to read the node in buffer aNode/nNode. +** +** If successful, SQLITE_OK is returned and the NodeReader object set to +** point to the first entry on the node (if any). Otherwise, an SQLite +** error code is returned. +*/ +static int nodeReaderInit(NodeReader *p, const char *aNode, int nNode){ + memset(p, 0, sizeof(NodeReader)); + p->aNode = aNode; + p->nNode = nNode; + + /* Figure out if this is a leaf or an internal node. */ + if( p->aNode[0] ){ + /* An internal node. */ + p->iOff = 1 + sqlite3Fts3GetVarint(&p->aNode[1], &p->iChild); + }else{ + p->iOff = 1; + } + + return nodeReaderNext(p); +} + +/* +** This function is called while writing an FTS segment each time a leaf o +** node is finished and written to disk. The key (zTerm/nTerm) is guaranteed +** to be greater than the largest key on the node just written, but smaller +** than or equal to the first key that will be written to the next leaf +** node. +** +** The block id of the leaf node just written to disk may be found in +** (pWriter->aNodeWriter[0].iBlock) when this function is called. +*/ +static int fts3IncrmergePush( + Fts3Table *p, /* Fts3 table handle */ + IncrmergeWriter *pWriter, /* Writer object */ + const char *zTerm, /* Term to write to internal node */ + int nTerm /* Bytes at zTerm */ +){ + sqlite3_int64 iPtr = pWriter->aNodeWriter[0].iBlock; + int iLayer; + + assert( nTerm>0 ); + for(iLayer=1; ALWAYS(iLayeraNodeWriter[iLayer]; + int rc = SQLITE_OK; + int nPrefix; + int nSuffix; + int nSpace; + + /* Figure out how much space the key will consume if it is written to + ** the current node of layer iLayer. Due to the prefix compression, + ** the space required changes depending on which node the key is to + ** be added to. */ + nPrefix = fts3PrefixCompress(pNode->key.a, pNode->key.n, zTerm, nTerm); + nSuffix = nTerm - nPrefix; + nSpace = sqlite3Fts3VarintLen(nPrefix); + nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; + + if( pNode->key.n==0 || (pNode->block.n + nSpace)<=p->nNodeSize ){ + /* If the current node of layer iLayer contains zero keys, or if adding + ** the key to it will not cause it to grow to larger than nNodeSize + ** bytes in size, write the key here. */ + + Blob *pBlk = &pNode->block; + if( pBlk->n==0 ){ + blobGrowBuffer(pBlk, p->nNodeSize, &rc); + if( rc==SQLITE_OK ){ + pBlk->a[0] = (char)iLayer; + pBlk->n = 1 + sqlite3Fts3PutVarint(&pBlk->a[1], iPtr); + } + } + blobGrowBuffer(pBlk, pBlk->n + nSpace, &rc); + blobGrowBuffer(&pNode->key, nTerm, &rc); + + if( rc==SQLITE_OK ){ + if( pNode->key.n ){ + pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nPrefix); + } + pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nSuffix); + memcpy(&pBlk->a[pBlk->n], &zTerm[nPrefix], nSuffix); + pBlk->n += nSuffix; + + memcpy(pNode->key.a, zTerm, nTerm); + pNode->key.n = nTerm; + } + }else{ + /* Otherwise, flush the current node of layer iLayer to disk. + ** Then allocate a new, empty sibling node. The key will be written + ** into the parent of this node. */ + rc = fts3WriteSegment(p, pNode->iBlock, pNode->block.a, pNode->block.n); + + assert( pNode->block.nAlloc>=p->nNodeSize ); + pNode->block.a[0] = (char)iLayer; + pNode->block.n = 1 + sqlite3Fts3PutVarint(&pNode->block.a[1], iPtr+1); + + iNextPtr = pNode->iBlock; + pNode->iBlock++; + pNode->key.n = 0; + } + + if( rc!=SQLITE_OK || iNextPtr==0 ) return rc; + iPtr = iNextPtr; + } + + assert( 0 ); + return 0; +} + +/* +** Append a term and (optionally) doclist to the FTS segment node currently +** stored in blob *pNode. The node need not contain any terms, but the +** header must be written before this function is called. +** +** A node header is a single 0x00 byte for a leaf node, or a height varint +** followed by the left-hand-child varint for an internal node. +** +** The term to be appended is passed via arguments zTerm/nTerm. For a +** leaf node, the doclist is passed as aDoclist/nDoclist. For an internal +** node, both aDoclist and nDoclist must be passed 0. +** +** If the size of the value in blob pPrev is zero, then this is the first +** term written to the node. Otherwise, pPrev contains a copy of the +** previous term. Before this function returns, it is updated to contain a +** copy of zTerm/nTerm. +** +** It is assumed that the buffer associated with pNode is already large +** enough to accommodate the new entry. The buffer associated with pPrev +** is extended by this function if requrired. +** +** If an error (i.e. OOM condition) occurs, an SQLite error code is +** returned. Otherwise, SQLITE_OK. +*/ +static int fts3AppendToNode( + Blob *pNode, /* Current node image to append to */ + Blob *pPrev, /* Buffer containing previous term written */ + const char *zTerm, /* New term to write */ + int nTerm, /* Size of zTerm in bytes */ + const char *aDoclist, /* Doclist (or NULL) to write */ + int nDoclist /* Size of aDoclist in bytes */ +){ + int rc = SQLITE_OK; /* Return code */ + int bFirst = (pPrev->n==0); /* True if this is the first term written */ + int nPrefix; /* Size of term prefix in bytes */ + int nSuffix; /* Size of term suffix in bytes */ + + /* Node must have already been started. There must be a doclist for a + ** leaf node, and there must not be a doclist for an internal node. */ + assert( pNode->n>0 ); + assert( (pNode->a[0]=='\0')==(aDoclist!=0) ); + + blobGrowBuffer(pPrev, nTerm, &rc); + if( rc!=SQLITE_OK ) return rc; + + nPrefix = fts3PrefixCompress(pPrev->a, pPrev->n, zTerm, nTerm); + nSuffix = nTerm - nPrefix; + memcpy(pPrev->a, zTerm, nTerm); + pPrev->n = nTerm; + + if( bFirst==0 ){ + pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nPrefix); + } + pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nSuffix); + memcpy(&pNode->a[pNode->n], &zTerm[nPrefix], nSuffix); + pNode->n += nSuffix; + + if( aDoclist ){ + pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nDoclist); + memcpy(&pNode->a[pNode->n], aDoclist, nDoclist); + pNode->n += nDoclist; + } + + assert( pNode->n<=pNode->nAlloc ); + + return SQLITE_OK; +} + +/* +** Append the current term and doclist pointed to by cursor pCsr to the +** appendable b-tree segment opened for writing by pWriter. +** +** Return SQLITE_OK if successful, or an SQLite error code otherwise. +*/ +static int fts3IncrmergeAppend( + Fts3Table *p, /* Fts3 table handle */ + IncrmergeWriter *pWriter, /* Writer object */ + Fts3MultiSegReader *pCsr /* Cursor containing term and doclist */ +){ + const char *zTerm = pCsr->zTerm; + int nTerm = pCsr->nTerm; + const char *aDoclist = pCsr->aDoclist; + int nDoclist = pCsr->nDoclist; + int rc = SQLITE_OK; /* Return code */ + int nSpace; /* Total space in bytes required on leaf */ + int nPrefix; /* Size of prefix shared with previous term */ + int nSuffix; /* Size of suffix (nTerm - nPrefix) */ + NodeWriter *pLeaf; /* Object used to write leaf nodes */ + + pLeaf = &pWriter->aNodeWriter[0]; + nPrefix = fts3PrefixCompress(pLeaf->key.a, pLeaf->key.n, zTerm, nTerm); + nSuffix = nTerm - nPrefix; + + nSpace = sqlite3Fts3VarintLen(nPrefix); + nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; + nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist; + + /* If the current block is not empty, and if adding this term/doclist + ** to the current block would make it larger than Fts3Table.nNodeSize + ** bytes, write this block out to the database. */ + if( pLeaf->block.n>0 && (pLeaf->block.n + nSpace)>p->nNodeSize ){ + rc = fts3WriteSegment(p, pLeaf->iBlock, pLeaf->block.a, pLeaf->block.n); + pWriter->nWork++; + + /* Add the current term to the parent node. The term added to the + ** parent must: + ** + ** a) be greater than the largest term on the leaf node just written + ** to the database (still available in pLeaf->key), and + ** + ** b) be less than or equal to the term about to be added to the new + ** leaf node (zTerm/nTerm). + ** + ** In other words, it must be the prefix of zTerm 1 byte longer than + ** the common prefix (if any) of zTerm and pWriter->zTerm. + */ + if( rc==SQLITE_OK ){ + rc = fts3IncrmergePush(p, pWriter, zTerm, nPrefix+1); + } + + /* Advance to the next output block */ + pLeaf->iBlock++; + pLeaf->key.n = 0; + pLeaf->block.n = 0; + + nSuffix = nTerm; + nSpace = 1; + nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; + nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist; + } + + pWriter->nLeafData += nSpace; + blobGrowBuffer(&pLeaf->block, pLeaf->block.n + nSpace, &rc); + if( rc==SQLITE_OK ){ + if( pLeaf->block.n==0 ){ + pLeaf->block.n = 1; + pLeaf->block.a[0] = '\0'; + } + rc = fts3AppendToNode( + &pLeaf->block, &pLeaf->key, zTerm, nTerm, aDoclist, nDoclist + ); + } + + return rc; +} + +/* +** This function is called to release all dynamic resources held by the +** merge-writer object pWriter, and if no error has occurred, to flush +** all outstanding node buffers held by pWriter to disk. +** +** If *pRc is not SQLITE_OK when this function is called, then no attempt +** is made to write any data to disk. Instead, this function serves only +** to release outstanding resources. +** +** Otherwise, if *pRc is initially SQLITE_OK and an error occurs while +** flushing buffers to disk, *pRc is set to an SQLite error code before +** returning. +*/ +static void fts3IncrmergeRelease( + Fts3Table *p, /* FTS3 table handle */ + IncrmergeWriter *pWriter, /* Merge-writer object */ + int *pRc /* IN/OUT: Error code */ +){ + int i; /* Used to iterate through non-root layers */ + int iRoot; /* Index of root in pWriter->aNodeWriter */ + NodeWriter *pRoot; /* NodeWriter for root node */ + int rc = *pRc; /* Error code */ + + /* Set iRoot to the index in pWriter->aNodeWriter[] of the output segment + ** root node. If the segment fits entirely on a single leaf node, iRoot + ** will be set to 0. If the root node is the parent of the leaves, iRoot + ** will be 1. And so on. */ + for(iRoot=FTS_MAX_APPENDABLE_HEIGHT-1; iRoot>=0; iRoot--){ + NodeWriter *pNode = &pWriter->aNodeWriter[iRoot]; + if( pNode->block.n>0 ) break; + assert( *pRc || pNode->block.nAlloc==0 ); + assert( *pRc || pNode->key.nAlloc==0 ); + sqlite3_free(pNode->block.a); + sqlite3_free(pNode->key.a); + } + + /* Empty output segment. This is a no-op. */ + if( iRoot<0 ) return; + + /* The entire output segment fits on a single node. Normally, this means + ** the node would be stored as a blob in the "root" column of the %_segdir + ** table. However, this is not permitted in this case. The problem is that + ** space has already been reserved in the %_segments table, and so the + ** start_block and end_block fields of the %_segdir table must be populated. + ** And, by design or by accident, released versions of FTS cannot handle + ** segments that fit entirely on the root node with start_block!=0. + ** + ** Instead, create a synthetic root node that contains nothing but a + ** pointer to the single content node. So that the segment consists of a + ** single leaf and a single interior (root) node. + ** + ** Todo: Better might be to defer allocating space in the %_segments + ** table until we are sure it is needed. + */ + if( iRoot==0 ){ + Blob *pBlock = &pWriter->aNodeWriter[1].block; + blobGrowBuffer(pBlock, 1 + FTS3_VARINT_MAX, &rc); + if( rc==SQLITE_OK ){ + pBlock->a[0] = 0x01; + pBlock->n = 1 + sqlite3Fts3PutVarint( + &pBlock->a[1], pWriter->aNodeWriter[0].iBlock + ); + } + iRoot = 1; + } + pRoot = &pWriter->aNodeWriter[iRoot]; + + /* Flush all currently outstanding nodes to disk. */ + for(i=0; iaNodeWriter[i]; + if( pNode->block.n>0 && rc==SQLITE_OK ){ + rc = fts3WriteSegment(p, pNode->iBlock, pNode->block.a, pNode->block.n); + } + sqlite3_free(pNode->block.a); + sqlite3_free(pNode->key.a); + } + + /* Write the %_segdir record. */ + if( rc==SQLITE_OK ){ + rc = fts3WriteSegdir(p, + pWriter->iAbsLevel+1, /* level */ + pWriter->iIdx, /* idx */ + pWriter->iStart, /* start_block */ + pWriter->aNodeWriter[0].iBlock, /* leaves_end_block */ + pWriter->iEnd, /* end_block */ + (pWriter->bNoLeafData==0 ? pWriter->nLeafData : 0), /* end_block */ + pRoot->block.a, pRoot->block.n /* root */ + ); + } + sqlite3_free(pRoot->block.a); + sqlite3_free(pRoot->key.a); + + *pRc = rc; +} + +/* +** Compare the term in buffer zLhs (size in bytes nLhs) with that in +** zRhs (size in bytes nRhs) using memcmp. If one term is a prefix of +** the other, it is considered to be smaller than the other. +** +** Return -ve if zLhs is smaller than zRhs, 0 if it is equal, or +ve +** if it is greater. +*/ +static int fts3TermCmp( + const char *zLhs, int nLhs, /* LHS of comparison */ + const char *zRhs, int nRhs /* RHS of comparison */ +){ + int nCmp = MIN(nLhs, nRhs); + int res; + + res = memcmp(zLhs, zRhs, nCmp); + if( res==0 ) res = nLhs - nRhs; + + return res; +} + + +/* +** Query to see if the entry in the %_segments table with blockid iEnd is +** NULL. If no error occurs and the entry is NULL, set *pbRes 1 before +** returning. Otherwise, set *pbRes to 0. +** +** Or, if an error occurs while querying the database, return an SQLite +** error code. The final value of *pbRes is undefined in this case. +** +** This is used to test if a segment is an "appendable" segment. If it +** is, then a NULL entry has been inserted into the %_segments table +** with blockid %_segdir.end_block. +*/ +static int fts3IsAppendable(Fts3Table *p, sqlite3_int64 iEnd, int *pbRes){ + int bRes = 0; /* Result to set *pbRes to */ + sqlite3_stmt *pCheck = 0; /* Statement to query database with */ + int rc; /* Return code */ + + rc = fts3SqlStmt(p, SQL_SEGMENT_IS_APPENDABLE, &pCheck, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pCheck, 1, iEnd); + if( SQLITE_ROW==sqlite3_step(pCheck) ) bRes = 1; + rc = sqlite3_reset(pCheck); + } + + *pbRes = bRes; + return rc; +} + +/* +** This function is called when initializing an incremental-merge operation. +** It checks if the existing segment with index value iIdx at absolute level +** (iAbsLevel+1) can be appended to by the incremental merge. If it can, the +** merge-writer object *pWriter is initialized to write to it. +** +** An existing segment can be appended to by an incremental merge if: +** +** * It was initially created as an appendable segment (with all required +** space pre-allocated), and +** +** * The first key read from the input (arguments zKey and nKey) is +** greater than the largest key currently stored in the potential +** output segment. +*/ +static int fts3IncrmergeLoad( + Fts3Table *p, /* Fts3 table handle */ + sqlite3_int64 iAbsLevel, /* Absolute level of input segments */ + int iIdx, /* Index of candidate output segment */ + const char *zKey, /* First key to write */ + int nKey, /* Number of bytes in nKey */ + IncrmergeWriter *pWriter /* Populate this object */ +){ + int rc; /* Return code */ + sqlite3_stmt *pSelect = 0; /* SELECT to read %_segdir entry */ + + rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR, &pSelect, 0); + if( rc==SQLITE_OK ){ + sqlite3_int64 iStart = 0; /* Value of %_segdir.start_block */ + sqlite3_int64 iLeafEnd = 0; /* Value of %_segdir.leaves_end_block */ + sqlite3_int64 iEnd = 0; /* Value of %_segdir.end_block */ + const char *aRoot = 0; /* Pointer to %_segdir.root buffer */ + int nRoot = 0; /* Size of aRoot[] in bytes */ + int rc2; /* Return code from sqlite3_reset() */ + int bAppendable = 0; /* Set to true if segment is appendable */ + + /* Read the %_segdir entry for index iIdx absolute level (iAbsLevel+1) */ + sqlite3_bind_int64(pSelect, 1, iAbsLevel+1); + sqlite3_bind_int(pSelect, 2, iIdx); + if( sqlite3_step(pSelect)==SQLITE_ROW ){ + iStart = sqlite3_column_int64(pSelect, 1); + iLeafEnd = sqlite3_column_int64(pSelect, 2); + fts3ReadEndBlockField(pSelect, 3, &iEnd, &pWriter->nLeafData); + if( pWriter->nLeafData<0 ){ + pWriter->nLeafData = pWriter->nLeafData * -1; + } + pWriter->bNoLeafData = (pWriter->nLeafData==0); + nRoot = sqlite3_column_bytes(pSelect, 4); + aRoot = sqlite3_column_blob(pSelect, 4); + }else{ + return sqlite3_reset(pSelect); + } + + /* Check for the zero-length marker in the %_segments table */ + rc = fts3IsAppendable(p, iEnd, &bAppendable); + + /* Check that zKey/nKey is larger than the largest key the candidate */ + if( rc==SQLITE_OK && bAppendable ){ + char *aLeaf = 0; + int nLeaf = 0; + + rc = sqlite3Fts3ReadBlock(p, iLeafEnd, &aLeaf, &nLeaf, 0); + if( rc==SQLITE_OK ){ + NodeReader reader; + for(rc = nodeReaderInit(&reader, aLeaf, nLeaf); + rc==SQLITE_OK && reader.aNode; + rc = nodeReaderNext(&reader) + ){ + assert( reader.aNode ); + } + if( fts3TermCmp(zKey, nKey, reader.term.a, reader.term.n)<=0 ){ + bAppendable = 0; + } + nodeReaderRelease(&reader); + } + sqlite3_free(aLeaf); + } + + if( rc==SQLITE_OK && bAppendable ){ + /* It is possible to append to this segment. Set up the IncrmergeWriter + ** object to do so. */ + int i; + int nHeight = (int)aRoot[0]; + NodeWriter *pNode; + + pWriter->nLeafEst = (int)((iEnd - iStart) + 1)/FTS_MAX_APPENDABLE_HEIGHT; + pWriter->iStart = iStart; + pWriter->iEnd = iEnd; + pWriter->iAbsLevel = iAbsLevel; + pWriter->iIdx = iIdx; + + for(i=nHeight+1; iaNodeWriter[i].iBlock = pWriter->iStart + i*pWriter->nLeafEst; + } + + pNode = &pWriter->aNodeWriter[nHeight]; + pNode->iBlock = pWriter->iStart + pWriter->nLeafEst*nHeight; + blobGrowBuffer(&pNode->block, MAX(nRoot, p->nNodeSize), &rc); + if( rc==SQLITE_OK ){ + memcpy(pNode->block.a, aRoot, nRoot); + pNode->block.n = nRoot; + } + + for(i=nHeight; i>=0 && rc==SQLITE_OK; i--){ + NodeReader reader; + pNode = &pWriter->aNodeWriter[i]; + + rc = nodeReaderInit(&reader, pNode->block.a, pNode->block.n); + while( reader.aNode && rc==SQLITE_OK ) rc = nodeReaderNext(&reader); + blobGrowBuffer(&pNode->key, reader.term.n, &rc); + if( rc==SQLITE_OK ){ + memcpy(pNode->key.a, reader.term.a, reader.term.n); + pNode->key.n = reader.term.n; + if( i>0 ){ + char *aBlock = 0; + int nBlock = 0; + pNode = &pWriter->aNodeWriter[i-1]; + pNode->iBlock = reader.iChild; + rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock, 0); + blobGrowBuffer(&pNode->block, MAX(nBlock, p->nNodeSize), &rc); + if( rc==SQLITE_OK ){ + memcpy(pNode->block.a, aBlock, nBlock); + pNode->block.n = nBlock; + } + sqlite3_free(aBlock); + } + } + nodeReaderRelease(&reader); + } + } + + rc2 = sqlite3_reset(pSelect); + if( rc==SQLITE_OK ) rc = rc2; + } + + return rc; +} + +/* +** Determine the largest segment index value that exists within absolute +** level iAbsLevel+1. If no error occurs, set *piIdx to this value plus +** one before returning SQLITE_OK. Or, if there are no segments at all +** within level iAbsLevel, set *piIdx to zero. +** +** If an error occurs, return an SQLite error code. The final value of +** *piIdx is undefined in this case. +*/ +static int fts3IncrmergeOutputIdx( + Fts3Table *p, /* FTS Table handle */ + sqlite3_int64 iAbsLevel, /* Absolute index of input segments */ + int *piIdx /* OUT: Next free index at iAbsLevel+1 */ +){ + int rc; + sqlite3_stmt *pOutputIdx = 0; /* SQL used to find output index */ + + rc = fts3SqlStmt(p, SQL_NEXT_SEGMENT_INDEX, &pOutputIdx, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pOutputIdx, 1, iAbsLevel+1); + sqlite3_step(pOutputIdx); + *piIdx = sqlite3_column_int(pOutputIdx, 0); + rc = sqlite3_reset(pOutputIdx); + } + + return rc; +} + +/* +** Allocate an appendable output segment on absolute level iAbsLevel+1 +** with idx value iIdx. +** +** In the %_segdir table, a segment is defined by the values in three +** columns: +** +** start_block +** leaves_end_block +** end_block +** +** When an appendable segment is allocated, it is estimated that the +** maximum number of leaf blocks that may be required is the sum of the +** number of leaf blocks consumed by the input segments, plus the number +** of input segments, multiplied by two. This value is stored in stack +** variable nLeafEst. +** +** A total of 16*nLeafEst blocks are allocated when an appendable segment +** is created ((1 + end_block - start_block)==16*nLeafEst). The contiguous +** array of leaf nodes starts at the first block allocated. The array +** of interior nodes that are parents of the leaf nodes start at block +** (start_block + (1 + end_block - start_block) / 16). And so on. +** +** In the actual code below, the value "16" is replaced with the +** pre-processor macro FTS_MAX_APPENDABLE_HEIGHT. +*/ +static int fts3IncrmergeWriter( + Fts3Table *p, /* Fts3 table handle */ + sqlite3_int64 iAbsLevel, /* Absolute level of input segments */ + int iIdx, /* Index of new output segment */ + Fts3MultiSegReader *pCsr, /* Cursor that data will be read from */ + IncrmergeWriter *pWriter /* Populate this object */ +){ + int rc; /* Return Code */ + int i; /* Iterator variable */ + int nLeafEst = 0; /* Blocks allocated for leaf nodes */ + sqlite3_stmt *pLeafEst = 0; /* SQL used to determine nLeafEst */ + sqlite3_stmt *pFirstBlock = 0; /* SQL used to determine first block */ + + /* Calculate nLeafEst. */ + rc = fts3SqlStmt(p, SQL_MAX_LEAF_NODE_ESTIMATE, &pLeafEst, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pLeafEst, 1, iAbsLevel); + sqlite3_bind_int64(pLeafEst, 2, pCsr->nSegment); + if( SQLITE_ROW==sqlite3_step(pLeafEst) ){ + nLeafEst = sqlite3_column_int(pLeafEst, 0); + } + rc = sqlite3_reset(pLeafEst); + } + if( rc!=SQLITE_OK ) return rc; + + /* Calculate the first block to use in the output segment */ + rc = fts3SqlStmt(p, SQL_NEXT_SEGMENTS_ID, &pFirstBlock, 0); + if( rc==SQLITE_OK ){ + if( SQLITE_ROW==sqlite3_step(pFirstBlock) ){ + pWriter->iStart = sqlite3_column_int64(pFirstBlock, 0); + pWriter->iEnd = pWriter->iStart - 1; + pWriter->iEnd += nLeafEst * FTS_MAX_APPENDABLE_HEIGHT; + } + rc = sqlite3_reset(pFirstBlock); + } + if( rc!=SQLITE_OK ) return rc; + + /* Insert the marker in the %_segments table to make sure nobody tries + ** to steal the space just allocated. This is also used to identify + ** appendable segments. */ + rc = fts3WriteSegment(p, pWriter->iEnd, 0, 0); + if( rc!=SQLITE_OK ) return rc; + + pWriter->iAbsLevel = iAbsLevel; + pWriter->nLeafEst = nLeafEst; + pWriter->iIdx = iIdx; + + /* Set up the array of NodeWriter objects */ + for(i=0; iaNodeWriter[i].iBlock = pWriter->iStart + i*pWriter->nLeafEst; + } + return SQLITE_OK; +} + +/* +** Remove an entry from the %_segdir table. This involves running the +** following two statements: +** +** DELETE FROM %_segdir WHERE level = :iAbsLevel AND idx = :iIdx +** UPDATE %_segdir SET idx = idx - 1 WHERE level = :iAbsLevel AND idx > :iIdx +** +** The DELETE statement removes the specific %_segdir level. The UPDATE +** statement ensures that the remaining segments have contiguously allocated +** idx values. +*/ +static int fts3RemoveSegdirEntry( + Fts3Table *p, /* FTS3 table handle */ + sqlite3_int64 iAbsLevel, /* Absolute level to delete from */ + int iIdx /* Index of %_segdir entry to delete */ +){ + int rc; /* Return code */ + sqlite3_stmt *pDelete = 0; /* DELETE statement */ + + rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_ENTRY, &pDelete, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pDelete, 1, iAbsLevel); + sqlite3_bind_int(pDelete, 2, iIdx); + sqlite3_step(pDelete); + rc = sqlite3_reset(pDelete); + } + + return rc; +} + +/* +** One or more segments have just been removed from absolute level iAbsLevel. +** Update the 'idx' values of the remaining segments in the level so that +** the idx values are a contiguous sequence starting from 0. +*/ +static int fts3RepackSegdirLevel( + Fts3Table *p, /* FTS3 table handle */ + sqlite3_int64 iAbsLevel /* Absolute level to repack */ +){ + int rc; /* Return code */ + int *aIdx = 0; /* Array of remaining idx values */ + int nIdx = 0; /* Valid entries in aIdx[] */ + int nAlloc = 0; /* Allocated size of aIdx[] */ + int i; /* Iterator variable */ + sqlite3_stmt *pSelect = 0; /* Select statement to read idx values */ + sqlite3_stmt *pUpdate = 0; /* Update statement to modify idx values */ + + rc = fts3SqlStmt(p, SQL_SELECT_INDEXES, &pSelect, 0); + if( rc==SQLITE_OK ){ + int rc2; + sqlite3_bind_int64(pSelect, 1, iAbsLevel); + while( SQLITE_ROW==sqlite3_step(pSelect) ){ + if( nIdx>=nAlloc ){ + int *aNew; + nAlloc += 16; + aNew = sqlite3_realloc(aIdx, nAlloc*sizeof(int)); + if( !aNew ){ + rc = SQLITE_NOMEM; + break; + } + aIdx = aNew; + } + aIdx[nIdx++] = sqlite3_column_int(pSelect, 0); + } + rc2 = sqlite3_reset(pSelect); + if( rc==SQLITE_OK ) rc = rc2; + } + + if( rc==SQLITE_OK ){ + rc = fts3SqlStmt(p, SQL_SHIFT_SEGDIR_ENTRY, &pUpdate, 0); + } + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pUpdate, 2, iAbsLevel); + } + + assert( p->bIgnoreSavepoint==0 ); + p->bIgnoreSavepoint = 1; + for(i=0; rc==SQLITE_OK && ibIgnoreSavepoint = 0; + + sqlite3_free(aIdx); + return rc; +} + +static void fts3StartNode(Blob *pNode, int iHeight, sqlite3_int64 iChild){ + pNode->a[0] = (char)iHeight; + if( iChild ){ + assert( pNode->nAlloc>=1+sqlite3Fts3VarintLen(iChild) ); + pNode->n = 1 + sqlite3Fts3PutVarint(&pNode->a[1], iChild); + }else{ + assert( pNode->nAlloc>=1 ); + pNode->n = 1; + } +} + +/* +** The first two arguments are a pointer to and the size of a segment b-tree +** node. The node may be a leaf or an internal node. +** +** This function creates a new node image in blob object *pNew by copying +** all terms that are greater than or equal to zTerm/nTerm (for leaf nodes) +** or greater than zTerm/nTerm (for internal nodes) from aNode/nNode. +*/ +static int fts3TruncateNode( + const char *aNode, /* Current node image */ + int nNode, /* Size of aNode in bytes */ + Blob *pNew, /* OUT: Write new node image here */ + const char *zTerm, /* Omit all terms smaller than this */ + int nTerm, /* Size of zTerm in bytes */ + sqlite3_int64 *piBlock /* OUT: Block number in next layer down */ +){ + NodeReader reader; /* Reader object */ + Blob prev = {0, 0, 0}; /* Previous term written to new node */ + int rc = SQLITE_OK; /* Return code */ + int bLeaf = aNode[0]=='\0'; /* True for a leaf node */ + + /* Allocate required output space */ + blobGrowBuffer(pNew, nNode, &rc); + if( rc!=SQLITE_OK ) return rc; + pNew->n = 0; + + /* Populate new node buffer */ + for(rc = nodeReaderInit(&reader, aNode, nNode); + rc==SQLITE_OK && reader.aNode; + rc = nodeReaderNext(&reader) + ){ + if( pNew->n==0 ){ + int res = fts3TermCmp(reader.term.a, reader.term.n, zTerm, nTerm); + if( res<0 || (bLeaf==0 && res==0) ) continue; + fts3StartNode(pNew, (int)aNode[0], reader.iChild); + *piBlock = reader.iChild; + } + rc = fts3AppendToNode( + pNew, &prev, reader.term.a, reader.term.n, + reader.aDoclist, reader.nDoclist + ); + if( rc!=SQLITE_OK ) break; + } + if( pNew->n==0 ){ + fts3StartNode(pNew, (int)aNode[0], reader.iChild); + *piBlock = reader.iChild; + } + assert( pNew->n<=pNew->nAlloc ); + + nodeReaderRelease(&reader); + sqlite3_free(prev.a); + return rc; +} + +/* +** Remove all terms smaller than zTerm/nTerm from segment iIdx in absolute +** level iAbsLevel. This may involve deleting entries from the %_segments +** table, and modifying existing entries in both the %_segments and %_segdir +** tables. +** +** SQLITE_OK is returned if the segment is updated successfully. Or an +** SQLite error code otherwise. +*/ +static int fts3TruncateSegment( + Fts3Table *p, /* FTS3 table handle */ + sqlite3_int64 iAbsLevel, /* Absolute level of segment to modify */ + int iIdx, /* Index within level of segment to modify */ + const char *zTerm, /* Remove terms smaller than this */ + int nTerm /* Number of bytes in buffer zTerm */ +){ + int rc = SQLITE_OK; /* Return code */ + Blob root = {0,0,0}; /* New root page image */ + Blob block = {0,0,0}; /* Buffer used for any other block */ + sqlite3_int64 iBlock = 0; /* Block id */ + sqlite3_int64 iNewStart = 0; /* New value for iStartBlock */ + sqlite3_int64 iOldStart = 0; /* Old value for iStartBlock */ + sqlite3_stmt *pFetch = 0; /* Statement used to fetch segdir */ + + rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR, &pFetch, 0); + if( rc==SQLITE_OK ){ + int rc2; /* sqlite3_reset() return code */ + sqlite3_bind_int64(pFetch, 1, iAbsLevel); + sqlite3_bind_int(pFetch, 2, iIdx); + if( SQLITE_ROW==sqlite3_step(pFetch) ){ + const char *aRoot = sqlite3_column_blob(pFetch, 4); + int nRoot = sqlite3_column_bytes(pFetch, 4); + iOldStart = sqlite3_column_int64(pFetch, 1); + rc = fts3TruncateNode(aRoot, nRoot, &root, zTerm, nTerm, &iBlock); + } + rc2 = sqlite3_reset(pFetch); + if( rc==SQLITE_OK ) rc = rc2; + } + + while( rc==SQLITE_OK && iBlock ){ + char *aBlock = 0; + int nBlock = 0; + iNewStart = iBlock; + + rc = sqlite3Fts3ReadBlock(p, iBlock, &aBlock, &nBlock, 0); + if( rc==SQLITE_OK ){ + rc = fts3TruncateNode(aBlock, nBlock, &block, zTerm, nTerm, &iBlock); + } + if( rc==SQLITE_OK ){ + rc = fts3WriteSegment(p, iNewStart, block.a, block.n); + } + sqlite3_free(aBlock); + } + + /* Variable iNewStart now contains the first valid leaf node. */ + if( rc==SQLITE_OK && iNewStart ){ + sqlite3_stmt *pDel = 0; + rc = fts3SqlStmt(p, SQL_DELETE_SEGMENTS_RANGE, &pDel, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pDel, 1, iOldStart); + sqlite3_bind_int64(pDel, 2, iNewStart-1); + sqlite3_step(pDel); + rc = sqlite3_reset(pDel); + } + } + + if( rc==SQLITE_OK ){ + sqlite3_stmt *pChomp = 0; + rc = fts3SqlStmt(p, SQL_CHOMP_SEGDIR, &pChomp, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pChomp, 1, iNewStart); + sqlite3_bind_blob(pChomp, 2, root.a, root.n, SQLITE_STATIC); + sqlite3_bind_int64(pChomp, 3, iAbsLevel); + sqlite3_bind_int(pChomp, 4, iIdx); + sqlite3_step(pChomp); + rc = sqlite3_reset(pChomp); + } + } + + sqlite3_free(root.a); + sqlite3_free(block.a); + return rc; +} + +/* +** This function is called after an incrmental-merge operation has run to +** merge (or partially merge) two or more segments from absolute level +** iAbsLevel. +** +** Each input segment is either removed from the db completely (if all of +** its data was copied to the output segment by the incrmerge operation) +** or modified in place so that it no longer contains those entries that +** have been duplicated in the output segment. +*/ +static int fts3IncrmergeChomp( + Fts3Table *p, /* FTS table handle */ + sqlite3_int64 iAbsLevel, /* Absolute level containing segments */ + Fts3MultiSegReader *pCsr, /* Chomp all segments opened by this cursor */ + int *pnRem /* Number of segments not deleted */ +){ + int i; + int nRem = 0; + int rc = SQLITE_OK; + + for(i=pCsr->nSegment-1; i>=0 && rc==SQLITE_OK; i--){ + Fts3SegReader *pSeg = 0; + int j; + + /* Find the Fts3SegReader object with Fts3SegReader.iIdx==i. It is hiding + ** somewhere in the pCsr->apSegment[] array. */ + for(j=0; ALWAYS(jnSegment); j++){ + pSeg = pCsr->apSegment[j]; + if( pSeg->iIdx==i ) break; + } + assert( jnSegment && pSeg->iIdx==i ); + + if( pSeg->aNode==0 ){ + /* Seg-reader is at EOF. Remove the entire input segment. */ + rc = fts3DeleteSegment(p, pSeg); + if( rc==SQLITE_OK ){ + rc = fts3RemoveSegdirEntry(p, iAbsLevel, pSeg->iIdx); + } + *pnRem = 0; + }else{ + /* The incremental merge did not copy all the data from this + ** segment to the upper level. The segment is modified in place + ** so that it contains no keys smaller than zTerm/nTerm. */ + const char *zTerm = pSeg->zTerm; + int nTerm = pSeg->nTerm; + rc = fts3TruncateSegment(p, iAbsLevel, pSeg->iIdx, zTerm, nTerm); + nRem++; + } + } + + if( rc==SQLITE_OK && nRem!=pCsr->nSegment ){ + rc = fts3RepackSegdirLevel(p, iAbsLevel); + } + + *pnRem = nRem; + return rc; +} + +/* +** Store an incr-merge hint in the database. +*/ +static int fts3IncrmergeHintStore(Fts3Table *p, Blob *pHint){ + sqlite3_stmt *pReplace = 0; + int rc; /* Return code */ + + rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pReplace, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int(pReplace, 1, FTS_STAT_INCRMERGEHINT); + sqlite3_bind_blob(pReplace, 2, pHint->a, pHint->n, SQLITE_STATIC); + sqlite3_step(pReplace); + rc = sqlite3_reset(pReplace); + } + + return rc; +} + +/* +** Load an incr-merge hint from the database. The incr-merge hint, if one +** exists, is stored in the rowid==1 row of the %_stat table. +** +** If successful, populate blob *pHint with the value read from the %_stat +** table and return SQLITE_OK. Otherwise, if an error occurs, return an +** SQLite error code. +*/ +static int fts3IncrmergeHintLoad(Fts3Table *p, Blob *pHint){ + sqlite3_stmt *pSelect = 0; + int rc; + + pHint->n = 0; + rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pSelect, 0); + if( rc==SQLITE_OK ){ + int rc2; + sqlite3_bind_int(pSelect, 1, FTS_STAT_INCRMERGEHINT); + if( SQLITE_ROW==sqlite3_step(pSelect) ){ + const char *aHint = sqlite3_column_blob(pSelect, 0); + int nHint = sqlite3_column_bytes(pSelect, 0); + if( aHint ){ + blobGrowBuffer(pHint, nHint, &rc); + if( rc==SQLITE_OK ){ + memcpy(pHint->a, aHint, nHint); + pHint->n = nHint; + } + } + } + rc2 = sqlite3_reset(pSelect); + if( rc==SQLITE_OK ) rc = rc2; + } + + return rc; +} + +/* +** If *pRc is not SQLITE_OK when this function is called, it is a no-op. +** Otherwise, append an entry to the hint stored in blob *pHint. Each entry +** consists of two varints, the absolute level number of the input segments +** and the number of input segments. +** +** If successful, leave *pRc set to SQLITE_OK and return. If an error occurs, +** set *pRc to an SQLite error code before returning. +*/ +static void fts3IncrmergeHintPush( + Blob *pHint, /* Hint blob to append to */ + i64 iAbsLevel, /* First varint to store in hint */ + int nInput, /* Second varint to store in hint */ + int *pRc /* IN/OUT: Error code */ +){ + blobGrowBuffer(pHint, pHint->n + 2*FTS3_VARINT_MAX, pRc); + if( *pRc==SQLITE_OK ){ + pHint->n += sqlite3Fts3PutVarint(&pHint->a[pHint->n], iAbsLevel); + pHint->n += sqlite3Fts3PutVarint(&pHint->a[pHint->n], (i64)nInput); + } +} + +/* +** Read the last entry (most recently pushed) from the hint blob *pHint +** and then remove the entry. Write the two values read to *piAbsLevel and +** *pnInput before returning. +** +** If no error occurs, return SQLITE_OK. If the hint blob in *pHint does +** not contain at least two valid varints, return SQLITE_CORRUPT_VTAB. +*/ +static int fts3IncrmergeHintPop(Blob *pHint, i64 *piAbsLevel, int *pnInput){ + const int nHint = pHint->n; + int i; + + i = pHint->n-2; + while( i>0 && (pHint->a[i-1] & 0x80) ) i--; + while( i>0 && (pHint->a[i-1] & 0x80) ) i--; + + pHint->n = i; + i += sqlite3Fts3GetVarint(&pHint->a[i], piAbsLevel); + i += fts3GetVarint32(&pHint->a[i], pnInput); + if( i!=nHint ) return FTS_CORRUPT_VTAB; + + return SQLITE_OK; +} + + +/* +** Attempt an incremental merge that writes nMerge leaf blocks. +** +** Incremental merges happen nMin segments at a time. The segments +** to be merged are the nMin oldest segments (the ones with the smallest +** values for the _segdir.idx field) in the highest level that contains +** at least nMin segments. Multiple merges might occur in an attempt to +** write the quota of nMerge leaf blocks. +*/ +SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){ + int rc; /* Return code */ + int nRem = nMerge; /* Number of leaf pages yet to be written */ + Fts3MultiSegReader *pCsr; /* Cursor used to read input data */ + Fts3SegFilter *pFilter; /* Filter used with cursor pCsr */ + IncrmergeWriter *pWriter; /* Writer object */ + int nSeg = 0; /* Number of input segments */ + sqlite3_int64 iAbsLevel = 0; /* Absolute level number to work on */ + Blob hint = {0, 0, 0}; /* Hint read from %_stat table */ + int bDirtyHint = 0; /* True if blob 'hint' has been modified */ + + /* Allocate space for the cursor, filter and writer objects */ + const int nAlloc = sizeof(*pCsr) + sizeof(*pFilter) + sizeof(*pWriter); + pWriter = (IncrmergeWriter *)sqlite3_malloc(nAlloc); + if( !pWriter ) return SQLITE_NOMEM; + pFilter = (Fts3SegFilter *)&pWriter[1]; + pCsr = (Fts3MultiSegReader *)&pFilter[1]; + + rc = fts3IncrmergeHintLoad(p, &hint); + while( rc==SQLITE_OK && nRem>0 ){ + const i64 nMod = FTS3_SEGDIR_MAXLEVEL * p->nIndex; + sqlite3_stmt *pFindLevel = 0; /* SQL used to determine iAbsLevel */ + int bUseHint = 0; /* True if attempting to append */ + int iIdx = 0; /* Largest idx in level (iAbsLevel+1) */ + + /* Search the %_segdir table for the absolute level with the smallest + ** relative level number that contains at least nMin segments, if any. + ** If one is found, set iAbsLevel to the absolute level number and + ** nSeg to nMin. If no level with at least nMin segments can be found, + ** set nSeg to -1. + */ + rc = fts3SqlStmt(p, SQL_FIND_MERGE_LEVEL, &pFindLevel, 0); + sqlite3_bind_int(pFindLevel, 1, MAX(2, nMin)); + if( sqlite3_step(pFindLevel)==SQLITE_ROW ){ + iAbsLevel = sqlite3_column_int64(pFindLevel, 0); + nSeg = sqlite3_column_int(pFindLevel, 1); + assert( nSeg>=2 ); + }else{ + nSeg = -1; + } + rc = sqlite3_reset(pFindLevel); + + /* If the hint read from the %_stat table is not empty, check if the + ** last entry in it specifies a relative level smaller than or equal + ** to the level identified by the block above (if any). If so, this + ** iteration of the loop will work on merging at the hinted level. + */ + if( rc==SQLITE_OK && hint.n ){ + int nHint = hint.n; + sqlite3_int64 iHintAbsLevel = 0; /* Hint level */ + int nHintSeg = 0; /* Hint number of segments */ + + rc = fts3IncrmergeHintPop(&hint, &iHintAbsLevel, &nHintSeg); + if( nSeg<0 || (iAbsLevel % nMod) >= (iHintAbsLevel % nMod) ){ + iAbsLevel = iHintAbsLevel; + nSeg = nHintSeg; + bUseHint = 1; + bDirtyHint = 1; + }else{ + /* This undoes the effect of the HintPop() above - so that no entry + ** is removed from the hint blob. */ + hint.n = nHint; + } + } + + /* If nSeg is less that zero, then there is no level with at least + ** nMin segments and no hint in the %_stat table. No work to do. + ** Exit early in this case. */ + if( nSeg<0 ) break; + + /* Open a cursor to iterate through the contents of the oldest nSeg + ** indexes of absolute level iAbsLevel. If this cursor is opened using + ** the 'hint' parameters, it is possible that there are less than nSeg + ** segments available in level iAbsLevel. In this case, no work is + ** done on iAbsLevel - fall through to the next iteration of the loop + ** to start work on some other level. */ + memset(pWriter, 0, nAlloc); + pFilter->flags = FTS3_SEGMENT_REQUIRE_POS; + + if( rc==SQLITE_OK ){ + rc = fts3IncrmergeOutputIdx(p, iAbsLevel, &iIdx); + assert( bUseHint==1 || bUseHint==0 ); + if( iIdx==0 || (bUseHint && iIdx==1) ){ + int bIgnore = 0; + rc = fts3SegmentIsMaxLevel(p, iAbsLevel+1, &bIgnore); + if( bIgnore ){ + pFilter->flags |= FTS3_SEGMENT_IGNORE_EMPTY; + } + } + } + + if( rc==SQLITE_OK ){ + rc = fts3IncrmergeCsr(p, iAbsLevel, nSeg, pCsr); + } + if( SQLITE_OK==rc && pCsr->nSegment==nSeg + && SQLITE_OK==(rc = sqlite3Fts3SegReaderStart(p, pCsr, pFilter)) + && SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, pCsr)) + ){ + if( bUseHint && iIdx>0 ){ + const char *zKey = pCsr->zTerm; + int nKey = pCsr->nTerm; + rc = fts3IncrmergeLoad(p, iAbsLevel, iIdx-1, zKey, nKey, pWriter); + }else{ + rc = fts3IncrmergeWriter(p, iAbsLevel, iIdx, pCsr, pWriter); + } + + if( rc==SQLITE_OK && pWriter->nLeafEst ){ + fts3LogMerge(nSeg, iAbsLevel); + do { + rc = fts3IncrmergeAppend(p, pWriter, pCsr); + if( rc==SQLITE_OK ) rc = sqlite3Fts3SegReaderStep(p, pCsr); + if( pWriter->nWork>=nRem && rc==SQLITE_ROW ) rc = SQLITE_OK; + }while( rc==SQLITE_ROW ); + + /* Update or delete the input segments */ + if( rc==SQLITE_OK ){ + nRem -= (1 + pWriter->nWork); + rc = fts3IncrmergeChomp(p, iAbsLevel, pCsr, &nSeg); + if( nSeg!=0 ){ + bDirtyHint = 1; + fts3IncrmergeHintPush(&hint, iAbsLevel, nSeg, &rc); + } + } + } + + if( nSeg!=0 ){ + pWriter->nLeafData = pWriter->nLeafData * -1; + } + fts3IncrmergeRelease(p, pWriter, &rc); + if( nSeg==0 && pWriter->bNoLeafData==0 ){ + fts3PromoteSegments(p, iAbsLevel+1, pWriter->nLeafData); + } + } + + sqlite3Fts3SegReaderFinish(pCsr); + } + + /* Write the hint values into the %_stat table for the next incr-merger */ + if( bDirtyHint && rc==SQLITE_OK ){ + rc = fts3IncrmergeHintStore(p, &hint); + } + + sqlite3_free(pWriter); + sqlite3_free(hint.a); + return rc; +} + +/* +** Convert the text beginning at *pz into an integer and return +** its value. Advance *pz to point to the first character past +** the integer. +*/ +static int fts3Getint(const char **pz){ + const char *z = *pz; + int i = 0; + while( (*z)>='0' && (*z)<='9' ) i = 10*i + *(z++) - '0'; + *pz = z; + return i; +} + +/* +** Process statements of the form: +** +** INSERT INTO table(table) VALUES('merge=A,B'); +** +** A and B are integers that decode to be the number of leaf pages +** written for the merge, and the minimum number of segments on a level +** before it will be selected for a merge, respectively. +*/ +static int fts3DoIncrmerge( + Fts3Table *p, /* FTS3 table handle */ + const char *zParam /* Nul-terminated string containing "A,B" */ +){ + int rc; + int nMin = (FTS3_MERGE_COUNT / 2); + int nMerge = 0; + const char *z = zParam; + + /* Read the first integer value */ + nMerge = fts3Getint(&z); + + /* If the first integer value is followed by a ',', read the second + ** integer value. */ + if( z[0]==',' && z[1]!='\0' ){ + z++; + nMin = fts3Getint(&z); + } + + if( z[0]!='\0' || nMin<2 ){ + rc = SQLITE_ERROR; + }else{ + rc = SQLITE_OK; + if( !p->bHasStat ){ + assert( p->bFts4==0 ); + sqlite3Fts3CreateStatTable(&rc, p); + } + if( rc==SQLITE_OK ){ + rc = sqlite3Fts3Incrmerge(p, nMerge, nMin); + } + sqlite3Fts3SegmentsClose(p); + } + return rc; +} + +/* +** Process statements of the form: +** +** INSERT INTO table(table) VALUES('automerge=X'); +** +** where X is an integer. X==0 means to turn automerge off. X!=0 means +** turn it on. The setting is persistent. +*/ +static int fts3DoAutoincrmerge( + Fts3Table *p, /* FTS3 table handle */ + const char *zParam /* Nul-terminated string containing boolean */ +){ + int rc = SQLITE_OK; + sqlite3_stmt *pStmt = 0; + p->nAutoincrmerge = fts3Getint(&zParam); + if( p->nAutoincrmerge==1 || p->nAutoincrmerge>FTS3_MERGE_COUNT ){ + p->nAutoincrmerge = 8; + } + if( !p->bHasStat ){ + assert( p->bFts4==0 ); + sqlite3Fts3CreateStatTable(&rc, p); + if( rc ) return rc; + } + rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0); + if( rc ) return rc; + sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE); + sqlite3_bind_int(pStmt, 2, p->nAutoincrmerge); + sqlite3_step(pStmt); + rc = sqlite3_reset(pStmt); + return rc; +} + +/* +** Return a 64-bit checksum for the FTS index entry specified by the +** arguments to this function. +*/ +static u64 fts3ChecksumEntry( + const char *zTerm, /* Pointer to buffer containing term */ + int nTerm, /* Size of zTerm in bytes */ + int iLangid, /* Language id for current row */ + int iIndex, /* Index (0..Fts3Table.nIndex-1) */ + i64 iDocid, /* Docid for current row. */ + int iCol, /* Column number */ + int iPos /* Position */ +){ + int i; + u64 ret = (u64)iDocid; + + ret += (ret<<3) + iLangid; + ret += (ret<<3) + iIndex; + ret += (ret<<3) + iCol; + ret += (ret<<3) + iPos; + for(i=0; inIndex-1) */ + int *pRc /* OUT: Return code */ +){ + Fts3SegFilter filter; + Fts3MultiSegReader csr; + int rc; + u64 cksum = 0; + + assert( *pRc==SQLITE_OK ); + + memset(&filter, 0, sizeof(filter)); + memset(&csr, 0, sizeof(csr)); + filter.flags = FTS3_SEGMENT_REQUIRE_POS|FTS3_SEGMENT_IGNORE_EMPTY; + filter.flags |= FTS3_SEGMENT_SCAN; + + rc = sqlite3Fts3SegReaderCursor( + p, iLangid, iIndex, FTS3_SEGCURSOR_ALL, 0, 0, 0, 1,&csr + ); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts3SegReaderStart(p, &csr, &filter); + } + + if( rc==SQLITE_OK ){ + while( SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, &csr)) ){ + char *pCsr = csr.aDoclist; + char *pEnd = &pCsr[csr.nDoclist]; + + i64 iDocid = 0; + i64 iCol = 0; + i64 iPos = 0; + + pCsr += sqlite3Fts3GetVarint(pCsr, &iDocid); + while( pCsriPrevLangid); + sqlite3_bind_int(pAllLangid, 2, p->nIndex); + while( rc==SQLITE_OK && sqlite3_step(pAllLangid)==SQLITE_ROW ){ + int iLangid = sqlite3_column_int(pAllLangid, 0); + int i; + for(i=0; inIndex; i++){ + cksum1 = cksum1 ^ fts3ChecksumIndex(p, iLangid, i, &rc); + } + } + rc2 = sqlite3_reset(pAllLangid); + if( rc==SQLITE_OK ) rc = rc2; + } + + /* This block calculates the checksum according to the %_content table */ + if( rc==SQLITE_OK ){ + sqlite3_tokenizer_module const *pModule = p->pTokenizer->pModule; + sqlite3_stmt *pStmt = 0; + char *zSql; + + zSql = sqlite3_mprintf("SELECT %s" , p->zReadExprlist); + if( !zSql ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0); + sqlite3_free(zSql); + } + + while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + i64 iDocid = sqlite3_column_int64(pStmt, 0); + int iLang = langidFromSelect(p, pStmt); + int iCol; + + for(iCol=0; rc==SQLITE_OK && iColnColumn; iCol++){ + if( p->abNotindexed[iCol]==0 ){ + const char *zText = (const char *)sqlite3_column_text(pStmt, iCol+1); + int nText = sqlite3_column_bytes(pStmt, iCol+1); + sqlite3_tokenizer_cursor *pT = 0; + + rc = sqlite3Fts3OpenTokenizer(p->pTokenizer, iLang, zText, nText,&pT); + while( rc==SQLITE_OK ){ + char const *zToken; /* Buffer containing token */ + int nToken = 0; /* Number of bytes in token */ + int iDum1 = 0, iDum2 = 0; /* Dummy variables */ + int iPos = 0; /* Position of token in zText */ + + rc = pModule->xNext(pT, &zToken, &nToken, &iDum1, &iDum2, &iPos); + if( rc==SQLITE_OK ){ + int i; + cksum2 = cksum2 ^ fts3ChecksumEntry( + zToken, nToken, iLang, 0, iDocid, iCol, iPos + ); + for(i=1; inIndex; i++){ + if( p->aIndex[i].nPrefix<=nToken ){ + cksum2 = cksum2 ^ fts3ChecksumEntry( + zToken, p->aIndex[i].nPrefix, iLang, i, iDocid, iCol, iPos + ); + } + } + } + } + if( pT ) pModule->xClose(pT); + if( rc==SQLITE_DONE ) rc = SQLITE_OK; + } + } + } + + sqlite3_finalize(pStmt); + } + + *pbOk = (cksum1==cksum2); + return rc; +} + +/* +** Run the integrity-check. If no error occurs and the current contents of +** the FTS index are correct, return SQLITE_OK. Or, if the contents of the +** FTS index are incorrect, return SQLITE_CORRUPT_VTAB. +** +** Or, if an error (e.g. an OOM or IO error) occurs, return an SQLite +** error code. +** +** The integrity-check works as follows. For each token and indexed token +** prefix in the document set, a 64-bit checksum is calculated (by code +** in fts3ChecksumEntry()) based on the following: +** +** + The index number (0 for the main index, 1 for the first prefix +** index etc.), +** + The token (or token prefix) text itself, +** + The language-id of the row it appears in, +** + The docid of the row it appears in, +** + The column it appears in, and +** + The tokens position within that column. +** +** The checksums for all entries in the index are XORed together to create +** a single checksum for the entire index. +** +** The integrity-check code calculates the same checksum in two ways: +** +** 1. By scanning the contents of the FTS index, and +** 2. By scanning and tokenizing the content table. +** +** If the two checksums are identical, the integrity-check is deemed to have +** passed. +*/ +static int fts3DoIntegrityCheck( + Fts3Table *p /* FTS3 table handle */ +){ + int rc; + int bOk = 0; + rc = fts3IntegrityCheck(p, &bOk); + if( rc==SQLITE_OK && bOk==0 ) rc = FTS_CORRUPT_VTAB; + return rc; +} + +/* +** Handle a 'special' INSERT of the form: +** +** "INSERT INTO tbl(tbl) VALUES()" +** +** Argument pVal contains the result of . Currently the only +** meaningful value to insert is the text 'optimize'. +*/ +static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){ + int rc; /* Return Code */ + const char *zVal = (const char *)sqlite3_value_text(pVal); + int nVal = sqlite3_value_bytes(pVal); + + if( !zVal ){ + return SQLITE_NOMEM; + }else if( nVal==8 && 0==sqlite3_strnicmp(zVal, "optimize", 8) ){ + rc = fts3DoOptimize(p, 0); + }else if( nVal==7 && 0==sqlite3_strnicmp(zVal, "rebuild", 7) ){ + rc = fts3DoRebuild(p); + }else if( nVal==15 && 0==sqlite3_strnicmp(zVal, "integrity-check", 15) ){ + rc = fts3DoIntegrityCheck(p); + }else if( nVal>6 && 0==sqlite3_strnicmp(zVal, "merge=", 6) ){ + rc = fts3DoIncrmerge(p, &zVal[6]); + }else if( nVal>10 && 0==sqlite3_strnicmp(zVal, "automerge=", 10) ){ + rc = fts3DoAutoincrmerge(p, &zVal[10]); +#ifdef SQLITE_TEST + }else if( nVal>9 && 0==sqlite3_strnicmp(zVal, "nodesize=", 9) ){ + p->nNodeSize = atoi(&zVal[9]); + rc = SQLITE_OK; + }else if( nVal>11 && 0==sqlite3_strnicmp(zVal, "maxpending=", 9) ){ + p->nMaxPendingData = atoi(&zVal[11]); + rc = SQLITE_OK; + }else if( nVal>21 && 0==sqlite3_strnicmp(zVal, "test-no-incr-doclist=", 21) ){ + p->bNoIncrDoclist = atoi(&zVal[21]); + rc = SQLITE_OK; +#endif + }else{ + rc = SQLITE_ERROR; + } + + return rc; +} + +#ifndef SQLITE_DISABLE_FTS4_DEFERRED +/* +** Delete all cached deferred doclists. Deferred doclists are cached +** (allocated) by the sqlite3Fts3CacheDeferredDoclists() function. +*/ +SQLITE_PRIVATE void sqlite3Fts3FreeDeferredDoclists(Fts3Cursor *pCsr){ + Fts3DeferredToken *pDef; + for(pDef=pCsr->pDeferred; pDef; pDef=pDef->pNext){ + fts3PendingListDelete(pDef->pList); + pDef->pList = 0; + } +} + +/* +** Free all entries in the pCsr->pDeffered list. Entries are added to +** this list using sqlite3Fts3DeferToken(). +*/ +SQLITE_PRIVATE void sqlite3Fts3FreeDeferredTokens(Fts3Cursor *pCsr){ + Fts3DeferredToken *pDef; + Fts3DeferredToken *pNext; + for(pDef=pCsr->pDeferred; pDef; pDef=pNext){ + pNext = pDef->pNext; + fts3PendingListDelete(pDef->pList); + sqlite3_free(pDef); + } + pCsr->pDeferred = 0; +} + +/* +** Generate deferred-doclists for all tokens in the pCsr->pDeferred list +** based on the row that pCsr currently points to. +** +** A deferred-doclist is like any other doclist with position information +** included, except that it only contains entries for a single row of the +** table, not for all rows. +*/ +SQLITE_PRIVATE int sqlite3Fts3CacheDeferredDoclists(Fts3Cursor *pCsr){ + int rc = SQLITE_OK; /* Return code */ + if( pCsr->pDeferred ){ + int i; /* Used to iterate through table columns */ + sqlite3_int64 iDocid; /* Docid of the row pCsr points to */ + Fts3DeferredToken *pDef; /* Used to iterate through deferred tokens */ + + Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; + sqlite3_tokenizer *pT = p->pTokenizer; + sqlite3_tokenizer_module const *pModule = pT->pModule; + + assert( pCsr->isRequireSeek==0 ); + iDocid = sqlite3_column_int64(pCsr->pStmt, 0); + + for(i=0; inColumn && rc==SQLITE_OK; i++){ + if( p->abNotindexed[i]==0 ){ + const char *zText = (const char *)sqlite3_column_text(pCsr->pStmt, i+1); + sqlite3_tokenizer_cursor *pTC = 0; + + rc = sqlite3Fts3OpenTokenizer(pT, pCsr->iLangid, zText, -1, &pTC); + while( rc==SQLITE_OK ){ + char const *zToken; /* Buffer containing token */ + int nToken = 0; /* Number of bytes in token */ + int iDum1 = 0, iDum2 = 0; /* Dummy variables */ + int iPos = 0; /* Position of token in zText */ + + rc = pModule->xNext(pTC, &zToken, &nToken, &iDum1, &iDum2, &iPos); + for(pDef=pCsr->pDeferred; pDef && rc==SQLITE_OK; pDef=pDef->pNext){ + Fts3PhraseToken *pPT = pDef->pToken; + if( (pDef->iCol>=p->nColumn || pDef->iCol==i) + && (pPT->bFirst==0 || iPos==0) + && (pPT->n==nToken || (pPT->isPrefix && pPT->nz, pPT->n)) + ){ + fts3PendingListAppend(&pDef->pList, iDocid, i, iPos, &rc); + } + } + } + if( pTC ) pModule->xClose(pTC); + if( rc==SQLITE_DONE ) rc = SQLITE_OK; + } + } + + for(pDef=pCsr->pDeferred; pDef && rc==SQLITE_OK; pDef=pDef->pNext){ + if( pDef->pList ){ + rc = fts3PendingListAppendVarint(&pDef->pList, 0); + } + } + } + + return rc; +} + +SQLITE_PRIVATE int sqlite3Fts3DeferredTokenList( + Fts3DeferredToken *p, + char **ppData, + int *pnData +){ + char *pRet; + int nSkip; + sqlite3_int64 dummy; + + *ppData = 0; + *pnData = 0; + + if( p->pList==0 ){ + return SQLITE_OK; + } + + pRet = (char *)sqlite3_malloc(p->pList->nData); + if( !pRet ) return SQLITE_NOMEM; + + nSkip = sqlite3Fts3GetVarint(p->pList->aData, &dummy); + *pnData = p->pList->nData - nSkip; + *ppData = pRet; + + memcpy(pRet, &p->pList->aData[nSkip], *pnData); + return SQLITE_OK; +} + +/* +** Add an entry for token pToken to the pCsr->pDeferred list. +*/ +SQLITE_PRIVATE int sqlite3Fts3DeferToken( + Fts3Cursor *pCsr, /* Fts3 table cursor */ + Fts3PhraseToken *pToken, /* Token to defer */ + int iCol /* Column that token must appear in (or -1) */ +){ + Fts3DeferredToken *pDeferred; + pDeferred = sqlite3_malloc(sizeof(*pDeferred)); + if( !pDeferred ){ + return SQLITE_NOMEM; + } + memset(pDeferred, 0, sizeof(*pDeferred)); + pDeferred->pToken = pToken; + pDeferred->pNext = pCsr->pDeferred; + pDeferred->iCol = iCol; + pCsr->pDeferred = pDeferred; + + assert( pToken->pDeferred==0 ); + pToken->pDeferred = pDeferred; + + return SQLITE_OK; +} +#endif + +/* +** SQLite value pRowid contains the rowid of a row that may or may not be +** present in the FTS3 table. If it is, delete it and adjust the contents +** of subsiduary data structures accordingly. +*/ +static int fts3DeleteByRowid( + Fts3Table *p, + sqlite3_value *pRowid, + int *pnChng, /* IN/OUT: Decrement if row is deleted */ + u32 *aSzDel +){ + int rc = SQLITE_OK; /* Return code */ + int bFound = 0; /* True if *pRowid really is in the table */ + + fts3DeleteTerms(&rc, p, pRowid, aSzDel, &bFound); + if( bFound && rc==SQLITE_OK ){ + int isEmpty = 0; /* Deleting *pRowid leaves the table empty */ + rc = fts3IsEmpty(p, pRowid, &isEmpty); + if( rc==SQLITE_OK ){ + if( isEmpty ){ + /* Deleting this row means the whole table is empty. In this case + ** delete the contents of all three tables and throw away any + ** data in the pendingTerms hash table. */ + rc = fts3DeleteAll(p, 1); + *pnChng = 0; + memset(aSzDel, 0, sizeof(u32) * (p->nColumn+1) * 2); + }else{ + *pnChng = *pnChng - 1; + if( p->zContentTbl==0 ){ + fts3SqlExec(&rc, p, SQL_DELETE_CONTENT, &pRowid); + } + if( p->bHasDocsize ){ + fts3SqlExec(&rc, p, SQL_DELETE_DOCSIZE, &pRowid); + } + } + } + } + + return rc; +} + +/* +** This function does the work for the xUpdate method of FTS3 virtual +** tables. The schema of the virtual table being: +** +** CREATE TABLE
    ( +** , +**
    HIDDEN, +** docid HIDDEN, +** HIDDEN +** ); +** +** +*/ +SQLITE_PRIVATE int sqlite3Fts3UpdateMethod( + sqlite3_vtab *pVtab, /* FTS3 vtab object */ + int nArg, /* Size of argument array */ + sqlite3_value **apVal, /* Array of arguments */ + sqlite_int64 *pRowid /* OUT: The affected (or effected) rowid */ +){ + Fts3Table *p = (Fts3Table *)pVtab; + int rc = SQLITE_OK; /* Return Code */ + int isRemove = 0; /* True for an UPDATE or DELETE */ + u32 *aSzIns = 0; /* Sizes of inserted documents */ + u32 *aSzDel = 0; /* Sizes of deleted documents */ + int nChng = 0; /* Net change in number of documents */ + int bInsertDone = 0; + + /* At this point it must be known if the %_stat table exists or not. + ** So bHasStat may not be 2. */ + assert( p->bHasStat==0 || p->bHasStat==1 ); + + assert( p->pSegments==0 ); + assert( + nArg==1 /* DELETE operations */ + || nArg==(2 + p->nColumn + 3) /* INSERT or UPDATE operations */ + ); + + /* Check for a "special" INSERT operation. One of the form: + ** + ** INSERT INTO xyz(xyz) VALUES('command'); + */ + if( nArg>1 + && sqlite3_value_type(apVal[0])==SQLITE_NULL + && sqlite3_value_type(apVal[p->nColumn+2])!=SQLITE_NULL + ){ + rc = fts3SpecialInsert(p, apVal[p->nColumn+2]); + goto update_out; + } + + if( nArg>1 && sqlite3_value_int(apVal[2 + p->nColumn + 2])<0 ){ + rc = SQLITE_CONSTRAINT; + goto update_out; + } + + /* Allocate space to hold the change in document sizes */ + aSzDel = sqlite3_malloc( sizeof(aSzDel[0])*(p->nColumn+1)*2 ); + if( aSzDel==0 ){ + rc = SQLITE_NOMEM; + goto update_out; + } + aSzIns = &aSzDel[p->nColumn+1]; + memset(aSzDel, 0, sizeof(aSzDel[0])*(p->nColumn+1)*2); + + rc = fts3Writelock(p); + if( rc!=SQLITE_OK ) goto update_out; + + /* If this is an INSERT operation, or an UPDATE that modifies the rowid + ** value, then this operation requires constraint handling. + ** + ** If the on-conflict mode is REPLACE, this means that the existing row + ** should be deleted from the database before inserting the new row. Or, + ** if the on-conflict mode is other than REPLACE, then this method must + ** detect the conflict and return SQLITE_CONSTRAINT before beginning to + ** modify the database file. + */ + if( nArg>1 && p->zContentTbl==0 ){ + /* Find the value object that holds the new rowid value. */ + sqlite3_value *pNewRowid = apVal[3+p->nColumn]; + if( sqlite3_value_type(pNewRowid)==SQLITE_NULL ){ + pNewRowid = apVal[1]; + } + + if( sqlite3_value_type(pNewRowid)!=SQLITE_NULL && ( + sqlite3_value_type(apVal[0])==SQLITE_NULL + || sqlite3_value_int64(apVal[0])!=sqlite3_value_int64(pNewRowid) + )){ + /* The new rowid is not NULL (in this case the rowid will be + ** automatically assigned and there is no chance of a conflict), and + ** the statement is either an INSERT or an UPDATE that modifies the + ** rowid column. So if the conflict mode is REPLACE, then delete any + ** existing row with rowid=pNewRowid. + ** + ** Or, if the conflict mode is not REPLACE, insert the new record into + ** the %_content table. If we hit the duplicate rowid constraint (or any + ** other error) while doing so, return immediately. + ** + ** This branch may also run if pNewRowid contains a value that cannot + ** be losslessly converted to an integer. In this case, the eventual + ** call to fts3InsertData() (either just below or further on in this + ** function) will return SQLITE_MISMATCH. If fts3DeleteByRowid is + ** invoked, it will delete zero rows (since no row will have + ** docid=$pNewRowid if $pNewRowid is not an integer value). + */ + if( sqlite3_vtab_on_conflict(p->db)==SQLITE_REPLACE ){ + rc = fts3DeleteByRowid(p, pNewRowid, &nChng, aSzDel); + }else{ + rc = fts3InsertData(p, apVal, pRowid); + bInsertDone = 1; + } + } + } + if( rc!=SQLITE_OK ){ + goto update_out; + } + + /* If this is a DELETE or UPDATE operation, remove the old record. */ + if( sqlite3_value_type(apVal[0])!=SQLITE_NULL ){ + assert( sqlite3_value_type(apVal[0])==SQLITE_INTEGER ); + rc = fts3DeleteByRowid(p, apVal[0], &nChng, aSzDel); + isRemove = 1; + } + + /* If this is an INSERT or UPDATE operation, insert the new record. */ + if( nArg>1 && rc==SQLITE_OK ){ + int iLangid = sqlite3_value_int(apVal[2 + p->nColumn + 2]); + if( bInsertDone==0 ){ + rc = fts3InsertData(p, apVal, pRowid); + if( rc==SQLITE_CONSTRAINT && p->zContentTbl==0 ){ + rc = FTS_CORRUPT_VTAB; + } + } + if( rc==SQLITE_OK && (!isRemove || *pRowid!=p->iPrevDocid ) ){ + rc = fts3PendingTermsDocid(p, 0, iLangid, *pRowid); + } + if( rc==SQLITE_OK ){ + assert( p->iPrevDocid==*pRowid ); + rc = fts3InsertTerms(p, iLangid, apVal, aSzIns); + } + if( p->bHasDocsize ){ + fts3InsertDocsize(&rc, p, aSzIns); + } + nChng++; + } + + if( p->bFts4 ){ + fts3UpdateDocTotals(&rc, p, aSzIns, aSzDel, nChng); + } + + update_out: + sqlite3_free(aSzDel); + sqlite3Fts3SegmentsClose(p); + return rc; +} + +/* +** Flush any data in the pending-terms hash table to disk. If successful, +** merge all segments in the database (including the new segment, if +** there was any data to flush) into a single segment. +*/ +SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *p){ + int rc; + rc = sqlite3_exec(p->db, "SAVEPOINT fts3", 0, 0, 0); + if( rc==SQLITE_OK ){ + rc = fts3DoOptimize(p, 1); + if( rc==SQLITE_OK || rc==SQLITE_DONE ){ + int rc2 = sqlite3_exec(p->db, "RELEASE fts3", 0, 0, 0); + if( rc2!=SQLITE_OK ) rc = rc2; + }else{ + sqlite3_exec(p->db, "ROLLBACK TO fts3", 0, 0, 0); + sqlite3_exec(p->db, "RELEASE fts3", 0, 0, 0); + } + } + sqlite3Fts3SegmentsClose(p); + return rc; +} + +#endif + +/************** End of fts3_write.c ******************************************/ +/************** Begin file fts3_snippet.c ************************************/ +/* +** 2009 Oct 23 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +*/ + +/* #include "fts3Int.h" */ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +/* #include */ +/* #include */ + +/* +** Characters that may appear in the second argument to matchinfo(). +*/ +#define FTS3_MATCHINFO_NPHRASE 'p' /* 1 value */ +#define FTS3_MATCHINFO_NCOL 'c' /* 1 value */ +#define FTS3_MATCHINFO_NDOC 'n' /* 1 value */ +#define FTS3_MATCHINFO_AVGLENGTH 'a' /* nCol values */ +#define FTS3_MATCHINFO_LENGTH 'l' /* nCol values */ +#define FTS3_MATCHINFO_LCS 's' /* nCol values */ +#define FTS3_MATCHINFO_HITS 'x' /* 3*nCol*nPhrase values */ +#define FTS3_MATCHINFO_LHITS 'y' /* nCol*nPhrase values */ +#define FTS3_MATCHINFO_LHITS_BM 'b' /* nCol*nPhrase values */ + +/* +** The default value for the second argument to matchinfo(). +*/ +#define FTS3_MATCHINFO_DEFAULT "pcx" + + +/* +** Used as an fts3ExprIterate() context when loading phrase doclists to +** Fts3Expr.aDoclist[]/nDoclist. +*/ +typedef struct LoadDoclistCtx LoadDoclistCtx; +struct LoadDoclistCtx { + Fts3Cursor *pCsr; /* FTS3 Cursor */ + int nPhrase; /* Number of phrases seen so far */ + int nToken; /* Number of tokens seen so far */ +}; + +/* +** The following types are used as part of the implementation of the +** fts3BestSnippet() routine. +*/ +typedef struct SnippetIter SnippetIter; +typedef struct SnippetPhrase SnippetPhrase; +typedef struct SnippetFragment SnippetFragment; + +struct SnippetIter { + Fts3Cursor *pCsr; /* Cursor snippet is being generated from */ + int iCol; /* Extract snippet from this column */ + int nSnippet; /* Requested snippet length (in tokens) */ + int nPhrase; /* Number of phrases in query */ + SnippetPhrase *aPhrase; /* Array of size nPhrase */ + int iCurrent; /* First token of current snippet */ +}; + +struct SnippetPhrase { + int nToken; /* Number of tokens in phrase */ + char *pList; /* Pointer to start of phrase position list */ + int iHead; /* Next value in position list */ + char *pHead; /* Position list data following iHead */ + int iTail; /* Next value in trailing position list */ + char *pTail; /* Position list data following iTail */ +}; + +struct SnippetFragment { + int iCol; /* Column snippet is extracted from */ + int iPos; /* Index of first token in snippet */ + u64 covered; /* Mask of query phrases covered */ + u64 hlmask; /* Mask of snippet terms to highlight */ +}; + +/* +** This type is used as an fts3ExprIterate() context object while +** accumulating the data returned by the matchinfo() function. +*/ +typedef struct MatchInfo MatchInfo; +struct MatchInfo { + Fts3Cursor *pCursor; /* FTS3 Cursor */ + int nCol; /* Number of columns in table */ + int nPhrase; /* Number of matchable phrases in query */ + sqlite3_int64 nDoc; /* Number of docs in database */ + char flag; + u32 *aMatchinfo; /* Pre-allocated buffer */ +}; + +/* +** An instance of this structure is used to manage a pair of buffers, each +** (nElem * sizeof(u32)) bytes in size. See the MatchinfoBuffer code below +** for details. +*/ +struct MatchinfoBuffer { + u8 aRef[3]; + int nElem; + int bGlobal; /* Set if global data is loaded */ + char *zMatchinfo; + u32 aMatchinfo[1]; +}; + + +/* +** The snippet() and offsets() functions both return text values. An instance +** of the following structure is used to accumulate those values while the +** functions are running. See fts3StringAppend() for details. +*/ +typedef struct StrBuffer StrBuffer; +struct StrBuffer { + char *z; /* Pointer to buffer containing string */ + int n; /* Length of z in bytes (excl. nul-term) */ + int nAlloc; /* Allocated size of buffer z in bytes */ +}; + + +/************************************************************************* +** Start of MatchinfoBuffer code. +*/ + +/* +** Allocate a two-slot MatchinfoBuffer object. +*/ +static MatchinfoBuffer *fts3MIBufferNew(int nElem, const char *zMatchinfo){ + MatchinfoBuffer *pRet; + int nByte = sizeof(u32) * (2*nElem + 1) + sizeof(MatchinfoBuffer); + int nStr = (int)strlen(zMatchinfo); + + pRet = sqlite3_malloc(nByte + nStr+1); + if( pRet ){ + memset(pRet, 0, nByte); + pRet->aMatchinfo[0] = (u8*)(&pRet->aMatchinfo[1]) - (u8*)pRet; + pRet->aMatchinfo[1+nElem] = pRet->aMatchinfo[0] + sizeof(u32)*(nElem+1); + pRet->nElem = nElem; + pRet->zMatchinfo = ((char*)pRet) + nByte; + memcpy(pRet->zMatchinfo, zMatchinfo, nStr+1); + pRet->aRef[0] = 1; + } + + return pRet; +} + +static void fts3MIBufferFree(void *p){ + MatchinfoBuffer *pBuf = (MatchinfoBuffer*)((u8*)p - ((u32*)p)[-1]); + + assert( (u32*)p==&pBuf->aMatchinfo[1] + || (u32*)p==&pBuf->aMatchinfo[pBuf->nElem+2] + ); + if( (u32*)p==&pBuf->aMatchinfo[1] ){ + pBuf->aRef[1] = 0; + }else{ + pBuf->aRef[2] = 0; + } + + if( pBuf->aRef[0]==0 && pBuf->aRef[1]==0 && pBuf->aRef[2]==0 ){ + sqlite3_free(pBuf); + } +} + +static void (*fts3MIBufferAlloc(MatchinfoBuffer *p, u32 **paOut))(void*){ + void (*xRet)(void*) = 0; + u32 *aOut = 0; + + if( p->aRef[1]==0 ){ + p->aRef[1] = 1; + aOut = &p->aMatchinfo[1]; + xRet = fts3MIBufferFree; + } + else if( p->aRef[2]==0 ){ + p->aRef[2] = 1; + aOut = &p->aMatchinfo[p->nElem+2]; + xRet = fts3MIBufferFree; + }else{ + aOut = (u32*)sqlite3_malloc(p->nElem * sizeof(u32)); + if( aOut ){ + xRet = sqlite3_free; + if( p->bGlobal ) memcpy(aOut, &p->aMatchinfo[1], p->nElem*sizeof(u32)); + } + } + + *paOut = aOut; + return xRet; +} + +static void fts3MIBufferSetGlobal(MatchinfoBuffer *p){ + p->bGlobal = 1; + memcpy(&p->aMatchinfo[2+p->nElem], &p->aMatchinfo[1], p->nElem*sizeof(u32)); +} + +/* +** Free a MatchinfoBuffer object allocated using fts3MIBufferNew() +*/ +SQLITE_PRIVATE void sqlite3Fts3MIBufferFree(MatchinfoBuffer *p){ + if( p ){ + assert( p->aRef[0]==1 ); + p->aRef[0] = 0; + if( p->aRef[0]==0 && p->aRef[1]==0 && p->aRef[2]==0 ){ + sqlite3_free(p); + } + } +} + +/* +** End of MatchinfoBuffer code. +*************************************************************************/ + + +/* +** This function is used to help iterate through a position-list. A position +** list is a list of unique integers, sorted from smallest to largest. Each +** element of the list is represented by an FTS3 varint that takes the value +** of the difference between the current element and the previous one plus +** two. For example, to store the position-list: +** +** 4 9 113 +** +** the three varints: +** +** 6 7 106 +** +** are encoded. +** +** When this function is called, *pp points to the start of an element of +** the list. *piPos contains the value of the previous entry in the list. +** After it returns, *piPos contains the value of the next element of the +** list and *pp is advanced to the following varint. +*/ +static void fts3GetDeltaPosition(char **pp, int *piPos){ + int iVal; + *pp += fts3GetVarint32(*pp, &iVal); + *piPos += (iVal-2); +} + +/* +** Helper function for fts3ExprIterate() (see below). +*/ +static int fts3ExprIterate2( + Fts3Expr *pExpr, /* Expression to iterate phrases of */ + int *piPhrase, /* Pointer to phrase counter */ + int (*x)(Fts3Expr*,int,void*), /* Callback function to invoke for phrases */ + void *pCtx /* Second argument to pass to callback */ +){ + int rc; /* Return code */ + int eType = pExpr->eType; /* Type of expression node pExpr */ + + if( eType!=FTSQUERY_PHRASE ){ + assert( pExpr->pLeft && pExpr->pRight ); + rc = fts3ExprIterate2(pExpr->pLeft, piPhrase, x, pCtx); + if( rc==SQLITE_OK && eType!=FTSQUERY_NOT ){ + rc = fts3ExprIterate2(pExpr->pRight, piPhrase, x, pCtx); + } + }else{ + rc = x(pExpr, *piPhrase, pCtx); + (*piPhrase)++; + } + return rc; +} + +/* +** Iterate through all phrase nodes in an FTS3 query, except those that +** are part of a sub-tree that is the right-hand-side of a NOT operator. +** For each phrase node found, the supplied callback function is invoked. +** +** If the callback function returns anything other than SQLITE_OK, +** the iteration is abandoned and the error code returned immediately. +** Otherwise, SQLITE_OK is returned after a callback has been made for +** all eligible phrase nodes. +*/ +static int fts3ExprIterate( + Fts3Expr *pExpr, /* Expression to iterate phrases of */ + int (*x)(Fts3Expr*,int,void*), /* Callback function to invoke for phrases */ + void *pCtx /* Second argument to pass to callback */ +){ + int iPhrase = 0; /* Variable used as the phrase counter */ + return fts3ExprIterate2(pExpr, &iPhrase, x, pCtx); +} + + +/* +** This is an fts3ExprIterate() callback used while loading the doclists +** for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also +** fts3ExprLoadDoclists(). +*/ +static int fts3ExprLoadDoclistsCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ + int rc = SQLITE_OK; + Fts3Phrase *pPhrase = pExpr->pPhrase; + LoadDoclistCtx *p = (LoadDoclistCtx *)ctx; + + UNUSED_PARAMETER(iPhrase); + + p->nPhrase++; + p->nToken += pPhrase->nToken; + + return rc; +} + +/* +** Load the doclists for each phrase in the query associated with FTS3 cursor +** pCsr. +** +** If pnPhrase is not NULL, then *pnPhrase is set to the number of matchable +** phrases in the expression (all phrases except those directly or +** indirectly descended from the right-hand-side of a NOT operator). If +** pnToken is not NULL, then it is set to the number of tokens in all +** matchable phrases of the expression. +*/ +static int fts3ExprLoadDoclists( + Fts3Cursor *pCsr, /* Fts3 cursor for current query */ + int *pnPhrase, /* OUT: Number of phrases in query */ + int *pnToken /* OUT: Number of tokens in query */ +){ + int rc; /* Return Code */ + LoadDoclistCtx sCtx = {0,0,0}; /* Context for fts3ExprIterate() */ + sCtx.pCsr = pCsr; + rc = fts3ExprIterate(pCsr->pExpr, fts3ExprLoadDoclistsCb, (void *)&sCtx); + if( pnPhrase ) *pnPhrase = sCtx.nPhrase; + if( pnToken ) *pnToken = sCtx.nToken; + return rc; +} + +static int fts3ExprPhraseCountCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ + (*(int *)ctx)++; + pExpr->iPhrase = iPhrase; + return SQLITE_OK; +} +static int fts3ExprPhraseCount(Fts3Expr *pExpr){ + int nPhrase = 0; + (void)fts3ExprIterate(pExpr, fts3ExprPhraseCountCb, (void *)&nPhrase); + return nPhrase; +} + +/* +** Advance the position list iterator specified by the first two +** arguments so that it points to the first element with a value greater +** than or equal to parameter iNext. +*/ +static void fts3SnippetAdvance(char **ppIter, int *piIter, int iNext){ + char *pIter = *ppIter; + if( pIter ){ + int iIter = *piIter; + + while( iIteriCurrent<0 ){ + /* The SnippetIter object has just been initialized. The first snippet + ** candidate always starts at offset 0 (even if this candidate has a + ** score of 0.0). + */ + pIter->iCurrent = 0; + + /* Advance the 'head' iterator of each phrase to the first offset that + ** is greater than or equal to (iNext+nSnippet). + */ + for(i=0; inPhrase; i++){ + SnippetPhrase *pPhrase = &pIter->aPhrase[i]; + fts3SnippetAdvance(&pPhrase->pHead, &pPhrase->iHead, pIter->nSnippet); + } + }else{ + int iStart; + int iEnd = 0x7FFFFFFF; + + for(i=0; inPhrase; i++){ + SnippetPhrase *pPhrase = &pIter->aPhrase[i]; + if( pPhrase->pHead && pPhrase->iHeadiHead; + } + } + if( iEnd==0x7FFFFFFF ){ + return 1; + } + + pIter->iCurrent = iStart = iEnd - pIter->nSnippet + 1; + for(i=0; inPhrase; i++){ + SnippetPhrase *pPhrase = &pIter->aPhrase[i]; + fts3SnippetAdvance(&pPhrase->pHead, &pPhrase->iHead, iEnd+1); + fts3SnippetAdvance(&pPhrase->pTail, &pPhrase->iTail, iStart); + } + } + + return 0; +} + +/* +** Retrieve information about the current candidate snippet of snippet +** iterator pIter. +*/ +static void fts3SnippetDetails( + SnippetIter *pIter, /* Snippet iterator */ + u64 mCovered, /* Bitmask of phrases already covered */ + int *piToken, /* OUT: First token of proposed snippet */ + int *piScore, /* OUT: "Score" for this snippet */ + u64 *pmCover, /* OUT: Bitmask of phrases covered */ + u64 *pmHighlight /* OUT: Bitmask of terms to highlight */ +){ + int iStart = pIter->iCurrent; /* First token of snippet */ + int iScore = 0; /* Score of this snippet */ + int i; /* Loop counter */ + u64 mCover = 0; /* Mask of phrases covered by this snippet */ + u64 mHighlight = 0; /* Mask of tokens to highlight in snippet */ + + for(i=0; inPhrase; i++){ + SnippetPhrase *pPhrase = &pIter->aPhrase[i]; + if( pPhrase->pTail ){ + char *pCsr = pPhrase->pTail; + int iCsr = pPhrase->iTail; + + while( iCsr<(iStart+pIter->nSnippet) ){ + int j; + u64 mPhrase = (u64)1 << i; + u64 mPos = (u64)1 << (iCsr - iStart); + assert( iCsr>=iStart ); + if( (mCover|mCovered)&mPhrase ){ + iScore++; + }else{ + iScore += 1000; + } + mCover |= mPhrase; + + for(j=0; jnToken; j++){ + mHighlight |= (mPos>>j); + } + + if( 0==(*pCsr & 0x0FE) ) break; + fts3GetDeltaPosition(&pCsr, &iCsr); + } + } + } + + /* Set the output variables before returning. */ + *piToken = iStart; + *piScore = iScore; + *pmCover = mCover; + *pmHighlight = mHighlight; +} + +/* +** This function is an fts3ExprIterate() callback used by fts3BestSnippet(). +** Each invocation populates an element of the SnippetIter.aPhrase[] array. +*/ +static int fts3SnippetFindPositions(Fts3Expr *pExpr, int iPhrase, void *ctx){ + SnippetIter *p = (SnippetIter *)ctx; + SnippetPhrase *pPhrase = &p->aPhrase[iPhrase]; + char *pCsr; + int rc; + + pPhrase->nToken = pExpr->pPhrase->nToken; + rc = sqlite3Fts3EvalPhrasePoslist(p->pCsr, pExpr, p->iCol, &pCsr); + assert( rc==SQLITE_OK || pCsr==0 ); + if( pCsr ){ + int iFirst = 0; + pPhrase->pList = pCsr; + fts3GetDeltaPosition(&pCsr, &iFirst); + assert( iFirst>=0 ); + pPhrase->pHead = pCsr; + pPhrase->pTail = pCsr; + pPhrase->iHead = iFirst; + pPhrase->iTail = iFirst; + }else{ + assert( rc!=SQLITE_OK || ( + pPhrase->pList==0 && pPhrase->pHead==0 && pPhrase->pTail==0 + )); + } + + return rc; +} + +/* +** Select the fragment of text consisting of nFragment contiguous tokens +** from column iCol that represent the "best" snippet. The best snippet +** is the snippet with the highest score, where scores are calculated +** by adding: +** +** (a) +1 point for each occurrence of a matchable phrase in the snippet. +** +** (b) +1000 points for the first occurrence of each matchable phrase in +** the snippet for which the corresponding mCovered bit is not set. +** +** The selected snippet parameters are stored in structure *pFragment before +** returning. The score of the selected snippet is stored in *piScore +** before returning. +*/ +static int fts3BestSnippet( + int nSnippet, /* Desired snippet length */ + Fts3Cursor *pCsr, /* Cursor to create snippet for */ + int iCol, /* Index of column to create snippet from */ + u64 mCovered, /* Mask of phrases already covered */ + u64 *pmSeen, /* IN/OUT: Mask of phrases seen */ + SnippetFragment *pFragment, /* OUT: Best snippet found */ + int *piScore /* OUT: Score of snippet pFragment */ +){ + int rc; /* Return Code */ + int nList; /* Number of phrases in expression */ + SnippetIter sIter; /* Iterates through snippet candidates */ + int nByte; /* Number of bytes of space to allocate */ + int iBestScore = -1; /* Best snippet score found so far */ + int i; /* Loop counter */ + + memset(&sIter, 0, sizeof(sIter)); + + /* Iterate through the phrases in the expression to count them. The same + ** callback makes sure the doclists are loaded for each phrase. + */ + rc = fts3ExprLoadDoclists(pCsr, &nList, 0); + if( rc!=SQLITE_OK ){ + return rc; + } + + /* Now that it is known how many phrases there are, allocate and zero + ** the required space using malloc(). + */ + nByte = sizeof(SnippetPhrase) * nList; + sIter.aPhrase = (SnippetPhrase *)sqlite3_malloc(nByte); + if( !sIter.aPhrase ){ + return SQLITE_NOMEM; + } + memset(sIter.aPhrase, 0, nByte); + + /* Initialize the contents of the SnippetIter object. Then iterate through + ** the set of phrases in the expression to populate the aPhrase[] array. + */ + sIter.pCsr = pCsr; + sIter.iCol = iCol; + sIter.nSnippet = nSnippet; + sIter.nPhrase = nList; + sIter.iCurrent = -1; + rc = fts3ExprIterate(pCsr->pExpr, fts3SnippetFindPositions, (void*)&sIter); + if( rc==SQLITE_OK ){ + + /* Set the *pmSeen output variable. */ + for(i=0; iiCol = iCol; + while( !fts3SnippetNextCandidate(&sIter) ){ + int iPos; + int iScore; + u64 mCover; + u64 mHighlite; + fts3SnippetDetails(&sIter, mCovered, &iPos, &iScore, &mCover,&mHighlite); + assert( iScore>=0 ); + if( iScore>iBestScore ){ + pFragment->iPos = iPos; + pFragment->hlmask = mHighlite; + pFragment->covered = mCover; + iBestScore = iScore; + } + } + + *piScore = iBestScore; + } + sqlite3_free(sIter.aPhrase); + return rc; +} + + +/* +** Append a string to the string-buffer passed as the first argument. +** +** If nAppend is negative, then the length of the string zAppend is +** determined using strlen(). +*/ +static int fts3StringAppend( + StrBuffer *pStr, /* Buffer to append to */ + const char *zAppend, /* Pointer to data to append to buffer */ + int nAppend /* Size of zAppend in bytes (or -1) */ +){ + if( nAppend<0 ){ + nAppend = (int)strlen(zAppend); + } + + /* If there is insufficient space allocated at StrBuffer.z, use realloc() + ** to grow the buffer until so that it is big enough to accomadate the + ** appended data. + */ + if( pStr->n+nAppend+1>=pStr->nAlloc ){ + int nAlloc = pStr->nAlloc+nAppend+100; + char *zNew = sqlite3_realloc(pStr->z, nAlloc); + if( !zNew ){ + return SQLITE_NOMEM; + } + pStr->z = zNew; + pStr->nAlloc = nAlloc; + } + assert( pStr->z!=0 && (pStr->nAlloc >= pStr->n+nAppend+1) ); + + /* Append the data to the string buffer. */ + memcpy(&pStr->z[pStr->n], zAppend, nAppend); + pStr->n += nAppend; + pStr->z[pStr->n] = '\0'; + + return SQLITE_OK; +} + +/* +** The fts3BestSnippet() function often selects snippets that end with a +** query term. That is, the final term of the snippet is always a term +** that requires highlighting. For example, if 'X' is a highlighted term +** and '.' is a non-highlighted term, BestSnippet() may select: +** +** ........X.....X +** +** This function "shifts" the beginning of the snippet forward in the +** document so that there are approximately the same number of +** non-highlighted terms to the right of the final highlighted term as there +** are to the left of the first highlighted term. For example, to this: +** +** ....X.....X.... +** +** This is done as part of extracting the snippet text, not when selecting +** the snippet. Snippet selection is done based on doclists only, so there +** is no way for fts3BestSnippet() to know whether or not the document +** actually contains terms that follow the final highlighted term. +*/ +static int fts3SnippetShift( + Fts3Table *pTab, /* FTS3 table snippet comes from */ + int iLangid, /* Language id to use in tokenizing */ + int nSnippet, /* Number of tokens desired for snippet */ + const char *zDoc, /* Document text to extract snippet from */ + int nDoc, /* Size of buffer zDoc in bytes */ + int *piPos, /* IN/OUT: First token of snippet */ + u64 *pHlmask /* IN/OUT: Mask of tokens to highlight */ +){ + u64 hlmask = *pHlmask; /* Local copy of initial highlight-mask */ + + if( hlmask ){ + int nLeft; /* Tokens to the left of first highlight */ + int nRight; /* Tokens to the right of last highlight */ + int nDesired; /* Ideal number of tokens to shift forward */ + + for(nLeft=0; !(hlmask & ((u64)1 << nLeft)); nLeft++); + for(nRight=0; !(hlmask & ((u64)1 << (nSnippet-1-nRight))); nRight++); + nDesired = (nLeft-nRight)/2; + + /* Ideally, the start of the snippet should be pushed forward in the + ** document nDesired tokens. This block checks if there are actually + ** nDesired tokens to the right of the snippet. If so, *piPos and + ** *pHlMask are updated to shift the snippet nDesired tokens to the + ** right. Otherwise, the snippet is shifted by the number of tokens + ** available. + */ + if( nDesired>0 ){ + int nShift; /* Number of tokens to shift snippet by */ + int iCurrent = 0; /* Token counter */ + int rc; /* Return Code */ + sqlite3_tokenizer_module *pMod; + sqlite3_tokenizer_cursor *pC; + pMod = (sqlite3_tokenizer_module *)pTab->pTokenizer->pModule; + + /* Open a cursor on zDoc/nDoc. Check if there are (nSnippet+nDesired) + ** or more tokens in zDoc/nDoc. + */ + rc = sqlite3Fts3OpenTokenizer(pTab->pTokenizer, iLangid, zDoc, nDoc, &pC); + if( rc!=SQLITE_OK ){ + return rc; + } + while( rc==SQLITE_OK && iCurrent<(nSnippet+nDesired) ){ + const char *ZDUMMY; int DUMMY1 = 0, DUMMY2 = 0, DUMMY3 = 0; + rc = pMod->xNext(pC, &ZDUMMY, &DUMMY1, &DUMMY2, &DUMMY3, &iCurrent); + } + pMod->xClose(pC); + if( rc!=SQLITE_OK && rc!=SQLITE_DONE ){ return rc; } + + nShift = (rc==SQLITE_DONE)+iCurrent-nSnippet; + assert( nShift<=nDesired ); + if( nShift>0 ){ + *piPos += nShift; + *pHlmask = hlmask >> nShift; + } + } + } + return SQLITE_OK; +} + +/* +** Extract the snippet text for fragment pFragment from cursor pCsr and +** append it to string buffer pOut. +*/ +static int fts3SnippetText( + Fts3Cursor *pCsr, /* FTS3 Cursor */ + SnippetFragment *pFragment, /* Snippet to extract */ + int iFragment, /* Fragment number */ + int isLast, /* True for final fragment in snippet */ + int nSnippet, /* Number of tokens in extracted snippet */ + const char *zOpen, /* String inserted before highlighted term */ + const char *zClose, /* String inserted after highlighted term */ + const char *zEllipsis, /* String inserted between snippets */ + StrBuffer *pOut /* Write output here */ +){ + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + int rc; /* Return code */ + const char *zDoc; /* Document text to extract snippet from */ + int nDoc; /* Size of zDoc in bytes */ + int iCurrent = 0; /* Current token number of document */ + int iEnd = 0; /* Byte offset of end of current token */ + int isShiftDone = 0; /* True after snippet is shifted */ + int iPos = pFragment->iPos; /* First token of snippet */ + u64 hlmask = pFragment->hlmask; /* Highlight-mask for snippet */ + int iCol = pFragment->iCol+1; /* Query column to extract text from */ + sqlite3_tokenizer_module *pMod; /* Tokenizer module methods object */ + sqlite3_tokenizer_cursor *pC; /* Tokenizer cursor open on zDoc/nDoc */ + + zDoc = (const char *)sqlite3_column_text(pCsr->pStmt, iCol); + if( zDoc==0 ){ + if( sqlite3_column_type(pCsr->pStmt, iCol)!=SQLITE_NULL ){ + return SQLITE_NOMEM; + } + return SQLITE_OK; + } + nDoc = sqlite3_column_bytes(pCsr->pStmt, iCol); + + /* Open a token cursor on the document. */ + pMod = (sqlite3_tokenizer_module *)pTab->pTokenizer->pModule; + rc = sqlite3Fts3OpenTokenizer(pTab->pTokenizer, pCsr->iLangid, zDoc,nDoc,&pC); + if( rc!=SQLITE_OK ){ + return rc; + } + + while( rc==SQLITE_OK ){ + const char *ZDUMMY; /* Dummy argument used with tokenizer */ + int DUMMY1 = -1; /* Dummy argument used with tokenizer */ + int iBegin = 0; /* Offset in zDoc of start of token */ + int iFin = 0; /* Offset in zDoc of end of token */ + int isHighlight = 0; /* True for highlighted terms */ + + /* Variable DUMMY1 is initialized to a negative value above. Elsewhere + ** in the FTS code the variable that the third argument to xNext points to + ** is initialized to zero before the first (*but not necessarily + ** subsequent*) call to xNext(). This is done for a particular application + ** that needs to know whether or not the tokenizer is being used for + ** snippet generation or for some other purpose. + ** + ** Extreme care is required when writing code to depend on this + ** initialization. It is not a documented part of the tokenizer interface. + ** If a tokenizer is used directly by any code outside of FTS, this + ** convention might not be respected. */ + rc = pMod->xNext(pC, &ZDUMMY, &DUMMY1, &iBegin, &iFin, &iCurrent); + if( rc!=SQLITE_OK ){ + if( rc==SQLITE_DONE ){ + /* Special case - the last token of the snippet is also the last token + ** of the column. Append any punctuation that occurred between the end + ** of the previous token and the end of the document to the output. + ** Then break out of the loop. */ + rc = fts3StringAppend(pOut, &zDoc[iEnd], -1); + } + break; + } + if( iCurrentiLangid, nSnippet, &zDoc[iBegin], n, &iPos, &hlmask + ); + isShiftDone = 1; + + /* Now that the shift has been done, check if the initial "..." are + ** required. They are required if (a) this is not the first fragment, + ** or (b) this fragment does not begin at position 0 of its column. + */ + if( rc==SQLITE_OK ){ + if( iPos>0 || iFragment>0 ){ + rc = fts3StringAppend(pOut, zEllipsis, -1); + }else if( iBegin ){ + rc = fts3StringAppend(pOut, zDoc, iBegin); + } + } + if( rc!=SQLITE_OK || iCurrent=(iPos+nSnippet) ){ + if( isLast ){ + rc = fts3StringAppend(pOut, zEllipsis, -1); + } + break; + } + + /* Set isHighlight to true if this term should be highlighted. */ + isHighlight = (hlmask & ((u64)1 << (iCurrent-iPos)))!=0; + + if( iCurrent>iPos ) rc = fts3StringAppend(pOut, &zDoc[iEnd], iBegin-iEnd); + if( rc==SQLITE_OK && isHighlight ) rc = fts3StringAppend(pOut, zOpen, -1); + if( rc==SQLITE_OK ) rc = fts3StringAppend(pOut, &zDoc[iBegin], iFin-iBegin); + if( rc==SQLITE_OK && isHighlight ) rc = fts3StringAppend(pOut, zClose, -1); + + iEnd = iFin; + } + + pMod->xClose(pC); + return rc; +} + + +/* +** This function is used to count the entries in a column-list (a +** delta-encoded list of term offsets within a single column of a single +** row). When this function is called, *ppCollist should point to the +** beginning of the first varint in the column-list (the varint that +** contains the position of the first matching term in the column data). +** Before returning, *ppCollist is set to point to the first byte after +** the last varint in the column-list (either the 0x00 signifying the end +** of the position-list, or the 0x01 that precedes the column number of +** the next column in the position-list). +** +** The number of elements in the column-list is returned. +*/ +static int fts3ColumnlistCount(char **ppCollist){ + char *pEnd = *ppCollist; + char c = 0; + int nEntry = 0; + + /* A column-list is terminated by either a 0x01 or 0x00. */ + while( 0xFE & (*pEnd | c) ){ + c = *pEnd++ & 0x80; + if( !c ) nEntry++; + } + + *ppCollist = pEnd; + return nEntry; +} + +/* +** This function gathers 'y' or 'b' data for a single phrase. +*/ +static void fts3ExprLHits( + Fts3Expr *pExpr, /* Phrase expression node */ + MatchInfo *p /* Matchinfo context */ +){ + Fts3Table *pTab = (Fts3Table *)p->pCursor->base.pVtab; + int iStart; + Fts3Phrase *pPhrase = pExpr->pPhrase; + char *pIter = pPhrase->doclist.pList; + int iCol = 0; + + assert( p->flag==FTS3_MATCHINFO_LHITS_BM || p->flag==FTS3_MATCHINFO_LHITS ); + if( p->flag==FTS3_MATCHINFO_LHITS ){ + iStart = pExpr->iPhrase * p->nCol; + }else{ + iStart = pExpr->iPhrase * ((p->nCol + 31) / 32); + } + + while( 1 ){ + int nHit = fts3ColumnlistCount(&pIter); + if( (pPhrase->iColumn>=pTab->nColumn || pPhrase->iColumn==iCol) ){ + if( p->flag==FTS3_MATCHINFO_LHITS ){ + p->aMatchinfo[iStart + iCol] = (u32)nHit; + }else if( nHit ){ + p->aMatchinfo[iStart + (iCol+1)/32] |= (1 << (iCol&0x1F)); + } + } + assert( *pIter==0x00 || *pIter==0x01 ); + if( *pIter!=0x01 ) break; + pIter++; + pIter += fts3GetVarint32(pIter, &iCol); + } +} + +/* +** Gather the results for matchinfo directives 'y' and 'b'. +*/ +static void fts3ExprLHitGather( + Fts3Expr *pExpr, + MatchInfo *p +){ + assert( (pExpr->pLeft==0)==(pExpr->pRight==0) ); + if( pExpr->bEof==0 && pExpr->iDocid==p->pCursor->iPrevId ){ + if( pExpr->pLeft ){ + fts3ExprLHitGather(pExpr->pLeft, p); + fts3ExprLHitGather(pExpr->pRight, p); + }else{ + fts3ExprLHits(pExpr, p); + } + } +} + +/* +** fts3ExprIterate() callback used to collect the "global" matchinfo stats +** for a single query. +** +** fts3ExprIterate() callback to load the 'global' elements of a +** FTS3_MATCHINFO_HITS matchinfo array. The global stats are those elements +** of the matchinfo array that are constant for all rows returned by the +** current query. +** +** Argument pCtx is actually a pointer to a struct of type MatchInfo. This +** function populates Matchinfo.aMatchinfo[] as follows: +** +** for(iCol=0; iColpCursor, pExpr, &p->aMatchinfo[3*iPhrase*p->nCol] + ); +} + +/* +** fts3ExprIterate() callback used to collect the "local" part of the +** FTS3_MATCHINFO_HITS array. The local stats are those elements of the +** array that are different for each row returned by the query. +*/ +static int fts3ExprLocalHitsCb( + Fts3Expr *pExpr, /* Phrase expression node */ + int iPhrase, /* Phrase number */ + void *pCtx /* Pointer to MatchInfo structure */ +){ + int rc = SQLITE_OK; + MatchInfo *p = (MatchInfo *)pCtx; + int iStart = iPhrase * p->nCol * 3; + int i; + + for(i=0; inCol && rc==SQLITE_OK; i++){ + char *pCsr; + rc = sqlite3Fts3EvalPhrasePoslist(p->pCursor, pExpr, i, &pCsr); + if( pCsr ){ + p->aMatchinfo[iStart+i*3] = fts3ColumnlistCount(&pCsr); + }else{ + p->aMatchinfo[iStart+i*3] = 0; + } + } + + return rc; +} + +static int fts3MatchinfoCheck( + Fts3Table *pTab, + char cArg, + char **pzErr +){ + if( (cArg==FTS3_MATCHINFO_NPHRASE) + || (cArg==FTS3_MATCHINFO_NCOL) + || (cArg==FTS3_MATCHINFO_NDOC && pTab->bFts4) + || (cArg==FTS3_MATCHINFO_AVGLENGTH && pTab->bFts4) + || (cArg==FTS3_MATCHINFO_LENGTH && pTab->bHasDocsize) + || (cArg==FTS3_MATCHINFO_LCS) + || (cArg==FTS3_MATCHINFO_HITS) + || (cArg==FTS3_MATCHINFO_LHITS) + || (cArg==FTS3_MATCHINFO_LHITS_BM) + ){ + return SQLITE_OK; + } + sqlite3Fts3ErrMsg(pzErr, "unrecognized matchinfo request: %c", cArg); + return SQLITE_ERROR; +} + +static int fts3MatchinfoSize(MatchInfo *pInfo, char cArg){ + int nVal; /* Number of integers output by cArg */ + + switch( cArg ){ + case FTS3_MATCHINFO_NDOC: + case FTS3_MATCHINFO_NPHRASE: + case FTS3_MATCHINFO_NCOL: + nVal = 1; + break; + + case FTS3_MATCHINFO_AVGLENGTH: + case FTS3_MATCHINFO_LENGTH: + case FTS3_MATCHINFO_LCS: + nVal = pInfo->nCol; + break; + + case FTS3_MATCHINFO_LHITS: + nVal = pInfo->nCol * pInfo->nPhrase; + break; + + case FTS3_MATCHINFO_LHITS_BM: + nVal = pInfo->nPhrase * ((pInfo->nCol + 31) / 32); + break; + + default: + assert( cArg==FTS3_MATCHINFO_HITS ); + nVal = pInfo->nCol * pInfo->nPhrase * 3; + break; + } + + return nVal; +} + +static int fts3MatchinfoSelectDoctotal( + Fts3Table *pTab, + sqlite3_stmt **ppStmt, + sqlite3_int64 *pnDoc, + const char **paLen +){ + sqlite3_stmt *pStmt; + const char *a; + sqlite3_int64 nDoc; + + if( !*ppStmt ){ + int rc = sqlite3Fts3SelectDoctotal(pTab, ppStmt); + if( rc!=SQLITE_OK ) return rc; + } + pStmt = *ppStmt; + assert( sqlite3_data_count(pStmt)==1 ); + + a = sqlite3_column_blob(pStmt, 0); + a += sqlite3Fts3GetVarint(a, &nDoc); + if( nDoc==0 ) return FTS_CORRUPT_VTAB; + *pnDoc = (u32)nDoc; + + if( paLen ) *paLen = a; + return SQLITE_OK; +} + +/* +** An instance of the following structure is used to store state while +** iterating through a multi-column position-list corresponding to the +** hits for a single phrase on a single row in order to calculate the +** values for a matchinfo() FTS3_MATCHINFO_LCS request. +*/ +typedef struct LcsIterator LcsIterator; +struct LcsIterator { + Fts3Expr *pExpr; /* Pointer to phrase expression */ + int iPosOffset; /* Tokens count up to end of this phrase */ + char *pRead; /* Cursor used to iterate through aDoclist */ + int iPos; /* Current position */ +}; + +/* +** If LcsIterator.iCol is set to the following value, the iterator has +** finished iterating through all offsets for all columns. +*/ +#define LCS_ITERATOR_FINISHED 0x7FFFFFFF; + +static int fts3MatchinfoLcsCb( + Fts3Expr *pExpr, /* Phrase expression node */ + int iPhrase, /* Phrase number (numbered from zero) */ + void *pCtx /* Pointer to MatchInfo structure */ +){ + LcsIterator *aIter = (LcsIterator *)pCtx; + aIter[iPhrase].pExpr = pExpr; + return SQLITE_OK; +} + +/* +** Advance the iterator passed as an argument to the next position. Return +** 1 if the iterator is at EOF or if it now points to the start of the +** position list for the next column. +*/ +static int fts3LcsIteratorAdvance(LcsIterator *pIter){ + char *pRead = pIter->pRead; + sqlite3_int64 iRead; + int rc = 0; + + pRead += sqlite3Fts3GetVarint(pRead, &iRead); + if( iRead==0 || iRead==1 ){ + pRead = 0; + rc = 1; + }else{ + pIter->iPos += (int)(iRead-2); + } + + pIter->pRead = pRead; + return rc; +} + +/* +** This function implements the FTS3_MATCHINFO_LCS matchinfo() flag. +** +** If the call is successful, the longest-common-substring lengths for each +** column are written into the first nCol elements of the pInfo->aMatchinfo[] +** array before returning. SQLITE_OK is returned in this case. +** +** Otherwise, if an error occurs, an SQLite error code is returned and the +** data written to the first nCol elements of pInfo->aMatchinfo[] is +** undefined. +*/ +static int fts3MatchinfoLcs(Fts3Cursor *pCsr, MatchInfo *pInfo){ + LcsIterator *aIter; + int i; + int iCol; + int nToken = 0; + + /* Allocate and populate the array of LcsIterator objects. The array + ** contains one element for each matchable phrase in the query. + **/ + aIter = sqlite3_malloc(sizeof(LcsIterator) * pCsr->nPhrase); + if( !aIter ) return SQLITE_NOMEM; + memset(aIter, 0, sizeof(LcsIterator) * pCsr->nPhrase); + (void)fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter); + + for(i=0; inPhrase; i++){ + LcsIterator *pIter = &aIter[i]; + nToken -= pIter->pExpr->pPhrase->nToken; + pIter->iPosOffset = nToken; + } + + for(iCol=0; iColnCol; iCol++){ + int nLcs = 0; /* LCS value for this column */ + int nLive = 0; /* Number of iterators in aIter not at EOF */ + + for(i=0; inPhrase; i++){ + int rc; + LcsIterator *pIt = &aIter[i]; + rc = sqlite3Fts3EvalPhrasePoslist(pCsr, pIt->pExpr, iCol, &pIt->pRead); + if( rc!=SQLITE_OK ) return rc; + if( pIt->pRead ){ + pIt->iPos = pIt->iPosOffset; + fts3LcsIteratorAdvance(&aIter[i]); + nLive++; + } + } + + while( nLive>0 ){ + LcsIterator *pAdv = 0; /* The iterator to advance by one position */ + int nThisLcs = 0; /* LCS for the current iterator positions */ + + for(i=0; inPhrase; i++){ + LcsIterator *pIter = &aIter[i]; + if( pIter->pRead==0 ){ + /* This iterator is already at EOF for this column. */ + nThisLcs = 0; + }else{ + if( pAdv==0 || pIter->iPosiPos ){ + pAdv = pIter; + } + if( nThisLcs==0 || pIter->iPos==pIter[-1].iPos ){ + nThisLcs++; + }else{ + nThisLcs = 1; + } + if( nThisLcs>nLcs ) nLcs = nThisLcs; + } + } + if( fts3LcsIteratorAdvance(pAdv) ) nLive--; + } + + pInfo->aMatchinfo[iCol] = nLcs; + } + + sqlite3_free(aIter); + return SQLITE_OK; +} + +/* +** Populate the buffer pInfo->aMatchinfo[] with an array of integers to +** be returned by the matchinfo() function. Argument zArg contains the +** format string passed as the second argument to matchinfo (or the +** default value "pcx" if no second argument was specified). The format +** string has already been validated and the pInfo->aMatchinfo[] array +** is guaranteed to be large enough for the output. +** +** If bGlobal is true, then populate all fields of the matchinfo() output. +** If it is false, then assume that those fields that do not change between +** rows (i.e. FTS3_MATCHINFO_NPHRASE, NCOL, NDOC, AVGLENGTH and part of HITS) +** have already been populated. +** +** Return SQLITE_OK if successful, or an SQLite error code if an error +** occurs. If a value other than SQLITE_OK is returned, the state the +** pInfo->aMatchinfo[] buffer is left in is undefined. +*/ +static int fts3MatchinfoValues( + Fts3Cursor *pCsr, /* FTS3 cursor object */ + int bGlobal, /* True to grab the global stats */ + MatchInfo *pInfo, /* Matchinfo context object */ + const char *zArg /* Matchinfo format string */ +){ + int rc = SQLITE_OK; + int i; + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + sqlite3_stmt *pSelect = 0; + + for(i=0; rc==SQLITE_OK && zArg[i]; i++){ + pInfo->flag = zArg[i]; + switch( zArg[i] ){ + case FTS3_MATCHINFO_NPHRASE: + if( bGlobal ) pInfo->aMatchinfo[0] = pInfo->nPhrase; + break; + + case FTS3_MATCHINFO_NCOL: + if( bGlobal ) pInfo->aMatchinfo[0] = pInfo->nCol; + break; + + case FTS3_MATCHINFO_NDOC: + if( bGlobal ){ + sqlite3_int64 nDoc = 0; + rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &nDoc, 0); + pInfo->aMatchinfo[0] = (u32)nDoc; + } + break; + + case FTS3_MATCHINFO_AVGLENGTH: + if( bGlobal ){ + sqlite3_int64 nDoc; /* Number of rows in table */ + const char *a; /* Aggregate column length array */ + + rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &nDoc, &a); + if( rc==SQLITE_OK ){ + int iCol; + for(iCol=0; iColnCol; iCol++){ + u32 iVal; + sqlite3_int64 nToken; + a += sqlite3Fts3GetVarint(a, &nToken); + iVal = (u32)(((u32)(nToken&0xffffffff)+nDoc/2)/nDoc); + pInfo->aMatchinfo[iCol] = iVal; + } + } + } + break; + + case FTS3_MATCHINFO_LENGTH: { + sqlite3_stmt *pSelectDocsize = 0; + rc = sqlite3Fts3SelectDocsize(pTab, pCsr->iPrevId, &pSelectDocsize); + if( rc==SQLITE_OK ){ + int iCol; + const char *a = sqlite3_column_blob(pSelectDocsize, 0); + for(iCol=0; iColnCol; iCol++){ + sqlite3_int64 nToken; + a += sqlite3Fts3GetVarint(a, &nToken); + pInfo->aMatchinfo[iCol] = (u32)nToken; + } + } + sqlite3_reset(pSelectDocsize); + break; + } + + case FTS3_MATCHINFO_LCS: + rc = fts3ExprLoadDoclists(pCsr, 0, 0); + if( rc==SQLITE_OK ){ + rc = fts3MatchinfoLcs(pCsr, pInfo); + } + break; + + case FTS3_MATCHINFO_LHITS_BM: + case FTS3_MATCHINFO_LHITS: { + int nZero = fts3MatchinfoSize(pInfo, zArg[i]) * sizeof(u32); + memset(pInfo->aMatchinfo, 0, nZero); + fts3ExprLHitGather(pCsr->pExpr, pInfo); + break; + } + + default: { + Fts3Expr *pExpr; + assert( zArg[i]==FTS3_MATCHINFO_HITS ); + pExpr = pCsr->pExpr; + rc = fts3ExprLoadDoclists(pCsr, 0, 0); + if( rc!=SQLITE_OK ) break; + if( bGlobal ){ + if( pCsr->pDeferred ){ + rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &pInfo->nDoc, 0); + if( rc!=SQLITE_OK ) break; + } + rc = fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo); + sqlite3Fts3EvalTestDeferred(pCsr, &rc); + if( rc!=SQLITE_OK ) break; + } + (void)fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo); + break; + } + } + + pInfo->aMatchinfo += fts3MatchinfoSize(pInfo, zArg[i]); + } + + sqlite3_reset(pSelect); + return rc; +} + + +/* +** Populate pCsr->aMatchinfo[] with data for the current row. The +** 'matchinfo' data is an array of 32-bit unsigned integers (C type u32). +*/ +static void fts3GetMatchinfo( + sqlite3_context *pCtx, /* Return results here */ + Fts3Cursor *pCsr, /* FTS3 Cursor object */ + const char *zArg /* Second argument to matchinfo() function */ +){ + MatchInfo sInfo; + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + int rc = SQLITE_OK; + int bGlobal = 0; /* Collect 'global' stats as well as local */ + + u32 *aOut = 0; + void (*xDestroyOut)(void*) = 0; + + memset(&sInfo, 0, sizeof(MatchInfo)); + sInfo.pCursor = pCsr; + sInfo.nCol = pTab->nColumn; + + /* If there is cached matchinfo() data, but the format string for the + ** cache does not match the format string for this request, discard + ** the cached data. */ + if( pCsr->pMIBuffer && strcmp(pCsr->pMIBuffer->zMatchinfo, zArg) ){ + sqlite3Fts3MIBufferFree(pCsr->pMIBuffer); + pCsr->pMIBuffer = 0; + } + + /* If Fts3Cursor.pMIBuffer is NULL, then this is the first time the + ** matchinfo function has been called for this query. In this case + ** allocate the array used to accumulate the matchinfo data and + ** initialize those elements that are constant for every row. + */ + if( pCsr->pMIBuffer==0 ){ + int nMatchinfo = 0; /* Number of u32 elements in match-info */ + int i; /* Used to iterate through zArg */ + + /* Determine the number of phrases in the query */ + pCsr->nPhrase = fts3ExprPhraseCount(pCsr->pExpr); + sInfo.nPhrase = pCsr->nPhrase; + + /* Determine the number of integers in the buffer returned by this call. */ + for(i=0; zArg[i]; i++){ + char *zErr = 0; + if( fts3MatchinfoCheck(pTab, zArg[i], &zErr) ){ + sqlite3_result_error(pCtx, zErr, -1); + sqlite3_free(zErr); + return; + } + nMatchinfo += fts3MatchinfoSize(&sInfo, zArg[i]); + } + + /* Allocate space for Fts3Cursor.aMatchinfo[] and Fts3Cursor.zMatchinfo. */ + pCsr->pMIBuffer = fts3MIBufferNew(nMatchinfo, zArg); + if( !pCsr->pMIBuffer ) rc = SQLITE_NOMEM; + + pCsr->isMatchinfoNeeded = 1; + bGlobal = 1; + } + + if( rc==SQLITE_OK ){ + xDestroyOut = fts3MIBufferAlloc(pCsr->pMIBuffer, &aOut); + if( xDestroyOut==0 ){ + rc = SQLITE_NOMEM; + } + } + + if( rc==SQLITE_OK ){ + sInfo.aMatchinfo = aOut; + sInfo.nPhrase = pCsr->nPhrase; + rc = fts3MatchinfoValues(pCsr, bGlobal, &sInfo, zArg); + if( bGlobal ){ + fts3MIBufferSetGlobal(pCsr->pMIBuffer); + } + } + + if( rc!=SQLITE_OK ){ + sqlite3_result_error_code(pCtx, rc); + if( xDestroyOut ) xDestroyOut(aOut); + }else{ + int n = pCsr->pMIBuffer->nElem * sizeof(u32); + sqlite3_result_blob(pCtx, aOut, n, xDestroyOut); + } +} + +/* +** Implementation of snippet() function. +*/ +SQLITE_PRIVATE void sqlite3Fts3Snippet( + sqlite3_context *pCtx, /* SQLite function call context */ + Fts3Cursor *pCsr, /* Cursor object */ + const char *zStart, /* Snippet start text - "" */ + const char *zEnd, /* Snippet end text - "" */ + const char *zEllipsis, /* Snippet ellipsis text - "..." */ + int iCol, /* Extract snippet from this column */ + int nToken /* Approximate number of tokens in snippet */ +){ + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + int rc = SQLITE_OK; + int i; + StrBuffer res = {0, 0, 0}; + + /* The returned text includes up to four fragments of text extracted from + ** the data in the current row. The first iteration of the for(...) loop + ** below attempts to locate a single fragment of text nToken tokens in + ** size that contains at least one instance of all phrases in the query + ** expression that appear in the current row. If such a fragment of text + ** cannot be found, the second iteration of the loop attempts to locate + ** a pair of fragments, and so on. + */ + int nSnippet = 0; /* Number of fragments in this snippet */ + SnippetFragment aSnippet[4]; /* Maximum of 4 fragments per snippet */ + int nFToken = -1; /* Number of tokens in each fragment */ + + if( !pCsr->pExpr ){ + sqlite3_result_text(pCtx, "", 0, SQLITE_STATIC); + return; + } + + for(nSnippet=1; 1; nSnippet++){ + + int iSnip; /* Loop counter 0..nSnippet-1 */ + u64 mCovered = 0; /* Bitmask of phrases covered by snippet */ + u64 mSeen = 0; /* Bitmask of phrases seen by BestSnippet() */ + + if( nToken>=0 ){ + nFToken = (nToken+nSnippet-1) / nSnippet; + }else{ + nFToken = -1 * nToken; + } + + for(iSnip=0; iSnipnColumn; iRead++){ + SnippetFragment sF = {0, 0, 0, 0}; + int iS = 0; + if( iCol>=0 && iRead!=iCol ) continue; + + /* Find the best snippet of nFToken tokens in column iRead. */ + rc = fts3BestSnippet(nFToken, pCsr, iRead, mCovered, &mSeen, &sF, &iS); + if( rc!=SQLITE_OK ){ + goto snippet_out; + } + if( iS>iBestScore ){ + *pFragment = sF; + iBestScore = iS; + } + } + + mCovered |= pFragment->covered; + } + + /* If all query phrases seen by fts3BestSnippet() are present in at least + ** one of the nSnippet snippet fragments, break out of the loop. + */ + assert( (mCovered&mSeen)==mCovered ); + if( mSeen==mCovered || nSnippet==SizeofArray(aSnippet) ) break; + } + + assert( nFToken>0 ); + + for(i=0; ipCsr, pExpr, p->iCol, &pList); + nTerm = pExpr->pPhrase->nToken; + if( pList ){ + fts3GetDeltaPosition(&pList, &iPos); + assert( iPos>=0 ); + } + + for(iTerm=0; iTermaTerm[p->iTerm++]; + pT->iOff = nTerm-iTerm-1; + pT->pList = pList; + pT->iPos = iPos; + } + + return rc; +} + +/* +** Implementation of offsets() function. +*/ +SQLITE_PRIVATE void sqlite3Fts3Offsets( + sqlite3_context *pCtx, /* SQLite function call context */ + Fts3Cursor *pCsr /* Cursor object */ +){ + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + sqlite3_tokenizer_module const *pMod = pTab->pTokenizer->pModule; + int rc; /* Return Code */ + int nToken; /* Number of tokens in query */ + int iCol; /* Column currently being processed */ + StrBuffer res = {0, 0, 0}; /* Result string */ + TermOffsetCtx sCtx; /* Context for fts3ExprTermOffsetInit() */ + + if( !pCsr->pExpr ){ + sqlite3_result_text(pCtx, "", 0, SQLITE_STATIC); + return; + } + + memset(&sCtx, 0, sizeof(sCtx)); + assert( pCsr->isRequireSeek==0 ); + + /* Count the number of terms in the query */ + rc = fts3ExprLoadDoclists(pCsr, 0, &nToken); + if( rc!=SQLITE_OK ) goto offsets_out; + + /* Allocate the array of TermOffset iterators. */ + sCtx.aTerm = (TermOffset *)sqlite3_malloc(sizeof(TermOffset)*nToken); + if( 0==sCtx.aTerm ){ + rc = SQLITE_NOMEM; + goto offsets_out; + } + sCtx.iDocid = pCsr->iPrevId; + sCtx.pCsr = pCsr; + + /* Loop through the table columns, appending offset information to + ** string-buffer res for each column. + */ + for(iCol=0; iColnColumn; iCol++){ + sqlite3_tokenizer_cursor *pC; /* Tokenizer cursor */ + const char *ZDUMMY; /* Dummy argument used with xNext() */ + int NDUMMY = 0; /* Dummy argument used with xNext() */ + int iStart = 0; + int iEnd = 0; + int iCurrent = 0; + const char *zDoc; + int nDoc; + + /* Initialize the contents of sCtx.aTerm[] for column iCol. There is + ** no way that this operation can fail, so the return code from + ** fts3ExprIterate() can be discarded. + */ + sCtx.iCol = iCol; + sCtx.iTerm = 0; + (void)fts3ExprIterate(pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx); + + /* Retreive the text stored in column iCol. If an SQL NULL is stored + ** in column iCol, jump immediately to the next iteration of the loop. + ** If an OOM occurs while retrieving the data (this can happen if SQLite + ** needs to transform the data from utf-16 to utf-8), return SQLITE_NOMEM + ** to the caller. + */ + zDoc = (const char *)sqlite3_column_text(pCsr->pStmt, iCol+1); + nDoc = sqlite3_column_bytes(pCsr->pStmt, iCol+1); + if( zDoc==0 ){ + if( sqlite3_column_type(pCsr->pStmt, iCol+1)==SQLITE_NULL ){ + continue; + } + rc = SQLITE_NOMEM; + goto offsets_out; + } + + /* Initialize a tokenizer iterator to iterate through column iCol. */ + rc = sqlite3Fts3OpenTokenizer(pTab->pTokenizer, pCsr->iLangid, + zDoc, nDoc, &pC + ); + if( rc!=SQLITE_OK ) goto offsets_out; + + rc = pMod->xNext(pC, &ZDUMMY, &NDUMMY, &iStart, &iEnd, &iCurrent); + while( rc==SQLITE_OK ){ + int i; /* Used to loop through terms */ + int iMinPos = 0x7FFFFFFF; /* Position of next token */ + TermOffset *pTerm = 0; /* TermOffset associated with next token */ + + for(i=0; ipList && (pT->iPos-pT->iOff)iPos-pT->iOff; + pTerm = pT; + } + } + + if( !pTerm ){ + /* All offsets for this column have been gathered. */ + rc = SQLITE_DONE; + }else{ + assert( iCurrent<=iMinPos ); + if( 0==(0xFE&*pTerm->pList) ){ + pTerm->pList = 0; + }else{ + fts3GetDeltaPosition(&pTerm->pList, &pTerm->iPos); + } + while( rc==SQLITE_OK && iCurrentxNext(pC, &ZDUMMY, &NDUMMY, &iStart, &iEnd, &iCurrent); + } + if( rc==SQLITE_OK ){ + char aBuffer[64]; + sqlite3_snprintf(sizeof(aBuffer), aBuffer, + "%d %d %d %d ", iCol, pTerm-sCtx.aTerm, iStart, iEnd-iStart + ); + rc = fts3StringAppend(&res, aBuffer, -1); + }else if( rc==SQLITE_DONE && pTab->zContentTbl==0 ){ + rc = FTS_CORRUPT_VTAB; + } + } + } + if( rc==SQLITE_DONE ){ + rc = SQLITE_OK; + } + + pMod->xClose(pC); + if( rc!=SQLITE_OK ) goto offsets_out; + } + + offsets_out: + sqlite3_free(sCtx.aTerm); + assert( rc!=SQLITE_DONE ); + sqlite3Fts3SegmentsClose(pTab); + if( rc!=SQLITE_OK ){ + sqlite3_result_error_code(pCtx, rc); + sqlite3_free(res.z); + }else{ + sqlite3_result_text(pCtx, res.z, res.n-1, sqlite3_free); + } + return; +} + +/* +** Implementation of matchinfo() function. +*/ +SQLITE_PRIVATE void sqlite3Fts3Matchinfo( + sqlite3_context *pContext, /* Function call context */ + Fts3Cursor *pCsr, /* FTS3 table cursor */ + const char *zArg /* Second arg to matchinfo() function */ +){ + Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; + const char *zFormat; + + if( zArg ){ + zFormat = zArg; + }else{ + zFormat = FTS3_MATCHINFO_DEFAULT; + } + + if( !pCsr->pExpr ){ + sqlite3_result_blob(pContext, "", 0, SQLITE_STATIC); + return; + }else{ + /* Retrieve matchinfo() data. */ + fts3GetMatchinfo(pContext, pCsr, zFormat); + sqlite3Fts3SegmentsClose(pTab); + } +} + +#endif + +/************** End of fts3_snippet.c ****************************************/ +/************** Begin file fts3_unicode.c ************************************/ +/* +** 2012 May 24 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** Implementation of the "unicode" full-text-search tokenizer. +*/ + +#ifndef SQLITE_DISABLE_FTS3_UNICODE + +/* #include "fts3Int.h" */ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) + +/* #include */ +/* #include */ +/* #include */ +/* #include */ + +/* #include "fts3_tokenizer.h" */ + +/* +** The following two macros - READ_UTF8 and WRITE_UTF8 - have been copied +** from the sqlite3 source file utf.c. If this file is compiled as part +** of the amalgamation, they are not required. +*/ +#ifndef SQLITE_AMALGAMATION + +static const unsigned char sqlite3Utf8Trans1[] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00, +}; + +#define READ_UTF8(zIn, zTerm, c) \ + c = *(zIn++); \ + if( c>=0xc0 ){ \ + c = sqlite3Utf8Trans1[c-0xc0]; \ + while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \ + c = (c<<6) + (0x3f & *(zIn++)); \ + } \ + if( c<0x80 \ + || (c&0xFFFFF800)==0xD800 \ + || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } \ + } + +#define WRITE_UTF8(zOut, c) { \ + if( c<0x00080 ){ \ + *zOut++ = (u8)(c&0xFF); \ + } \ + else if( c<0x00800 ){ \ + *zOut++ = 0xC0 + (u8)((c>>6)&0x1F); \ + *zOut++ = 0x80 + (u8)(c & 0x3F); \ + } \ + else if( c<0x10000 ){ \ + *zOut++ = 0xE0 + (u8)((c>>12)&0x0F); \ + *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ + *zOut++ = 0x80 + (u8)(c & 0x3F); \ + }else{ \ + *zOut++ = 0xF0 + (u8)((c>>18) & 0x07); \ + *zOut++ = 0x80 + (u8)((c>>12) & 0x3F); \ + *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ + *zOut++ = 0x80 + (u8)(c & 0x3F); \ + } \ +} + +#endif /* ifndef SQLITE_AMALGAMATION */ + +typedef struct unicode_tokenizer unicode_tokenizer; +typedef struct unicode_cursor unicode_cursor; + +struct unicode_tokenizer { + sqlite3_tokenizer base; + int bRemoveDiacritic; + int nException; + int *aiException; +}; + +struct unicode_cursor { + sqlite3_tokenizer_cursor base; + const unsigned char *aInput; /* Input text being tokenized */ + int nInput; /* Size of aInput[] in bytes */ + int iOff; /* Current offset within aInput[] */ + int iToken; /* Index of next token to be returned */ + char *zToken; /* storage for current token */ + int nAlloc; /* space allocated at zToken */ +}; + + +/* +** Destroy a tokenizer allocated by unicodeCreate(). +*/ +static int unicodeDestroy(sqlite3_tokenizer *pTokenizer){ + if( pTokenizer ){ + unicode_tokenizer *p = (unicode_tokenizer *)pTokenizer; + sqlite3_free(p->aiException); + sqlite3_free(p); + } + return SQLITE_OK; +} + +/* +** As part of a tokenchars= or separators= option, the CREATE VIRTUAL TABLE +** statement has specified that the tokenizer for this table shall consider +** all characters in string zIn/nIn to be separators (if bAlnum==0) or +** token characters (if bAlnum==1). +** +** For each codepoint in the zIn/nIn string, this function checks if the +** sqlite3FtsUnicodeIsalnum() function already returns the desired result. +** If so, no action is taken. Otherwise, the codepoint is added to the +** unicode_tokenizer.aiException[] array. For the purposes of tokenization, +** the return value of sqlite3FtsUnicodeIsalnum() is inverted for all +** codepoints in the aiException[] array. +** +** If a standalone diacritic mark (one that sqlite3FtsUnicodeIsdiacritic() +** identifies as a diacritic) occurs in the zIn/nIn string it is ignored. +** It is not possible to change the behavior of the tokenizer with respect +** to these codepoints. +*/ +static int unicodeAddExceptions( + unicode_tokenizer *p, /* Tokenizer to add exceptions to */ + int bAlnum, /* Replace Isalnum() return value with this */ + const char *zIn, /* Array of characters to make exceptions */ + int nIn /* Length of z in bytes */ +){ + const unsigned char *z = (const unsigned char *)zIn; + const unsigned char *zTerm = &z[nIn]; + int iCode; + int nEntry = 0; + + assert( bAlnum==0 || bAlnum==1 ); + + while( zaiException, (p->nException+nEntry)*sizeof(int)); + if( aNew==0 ) return SQLITE_NOMEM; + nNew = p->nException; + + z = (const unsigned char *)zIn; + while( zi; j--) aNew[j] = aNew[j-1]; + aNew[i] = iCode; + nNew++; + } + } + p->aiException = aNew; + p->nException = nNew; + } + + return SQLITE_OK; +} + +/* +** Return true if the p->aiException[] array contains the value iCode. +*/ +static int unicodeIsException(unicode_tokenizer *p, int iCode){ + if( p->nException>0 ){ + int *a = p->aiException; + int iLo = 0; + int iHi = p->nException-1; + + while( iHi>=iLo ){ + int iTest = (iHi + iLo) / 2; + if( iCode==a[iTest] ){ + return 1; + }else if( iCode>a[iTest] ){ + iLo = iTest+1; + }else{ + iHi = iTest-1; + } + } + } + + return 0; +} + +/* +** Return true if, for the purposes of tokenization, codepoint iCode is +** considered a token character (not a separator). +*/ +static int unicodeIsAlnum(unicode_tokenizer *p, int iCode){ + assert( (sqlite3FtsUnicodeIsalnum(iCode) & 0xFFFFFFFE)==0 ); + return sqlite3FtsUnicodeIsalnum(iCode) ^ unicodeIsException(p, iCode); +} + +/* +** Create a new tokenizer instance. +*/ +static int unicodeCreate( + int nArg, /* Size of array argv[] */ + const char * const *azArg, /* Tokenizer creation arguments */ + sqlite3_tokenizer **pp /* OUT: New tokenizer handle */ +){ + unicode_tokenizer *pNew; /* New tokenizer object */ + int i; + int rc = SQLITE_OK; + + pNew = (unicode_tokenizer *) sqlite3_malloc(sizeof(unicode_tokenizer)); + if( pNew==NULL ) return SQLITE_NOMEM; + memset(pNew, 0, sizeof(unicode_tokenizer)); + pNew->bRemoveDiacritic = 1; + + for(i=0; rc==SQLITE_OK && ibRemoveDiacritic = 1; + } + else if( n==19 && memcmp("remove_diacritics=0", z, 19)==0 ){ + pNew->bRemoveDiacritic = 0; + } + else if( n>=11 && memcmp("tokenchars=", z, 11)==0 ){ + rc = unicodeAddExceptions(pNew, 1, &z[11], n-11); + } + else if( n>=11 && memcmp("separators=", z, 11)==0 ){ + rc = unicodeAddExceptions(pNew, 0, &z[11], n-11); + } + else{ + /* Unrecognized argument */ + rc = SQLITE_ERROR; + } + } + + if( rc!=SQLITE_OK ){ + unicodeDestroy((sqlite3_tokenizer *)pNew); + pNew = 0; + } + *pp = (sqlite3_tokenizer *)pNew; + return rc; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int unicodeOpen( + sqlite3_tokenizer *p, /* The tokenizer */ + const char *aInput, /* Input string */ + int nInput, /* Size of string aInput in bytes */ + sqlite3_tokenizer_cursor **pp /* OUT: New cursor object */ +){ + unicode_cursor *pCsr; + + pCsr = (unicode_cursor *)sqlite3_malloc(sizeof(unicode_cursor)); + if( pCsr==0 ){ + return SQLITE_NOMEM; + } + memset(pCsr, 0, sizeof(unicode_cursor)); + + pCsr->aInput = (const unsigned char *)aInput; + if( aInput==0 ){ + pCsr->nInput = 0; + }else if( nInput<0 ){ + pCsr->nInput = (int)strlen(aInput); + }else{ + pCsr->nInput = nInput; + } + + *pp = &pCsr->base; + UNUSED_PARAMETER(p); + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to +** simpleOpen() above. +*/ +static int unicodeClose(sqlite3_tokenizer_cursor *pCursor){ + unicode_cursor *pCsr = (unicode_cursor *) pCursor; + sqlite3_free(pCsr->zToken); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. The cursor must +** have been opened by a prior call to simpleOpen(). +*/ +static int unicodeNext( + sqlite3_tokenizer_cursor *pC, /* Cursor returned by simpleOpen */ + const char **paToken, /* OUT: Token text */ + int *pnToken, /* OUT: Number of bytes at *paToken */ + int *piStart, /* OUT: Starting offset of token */ + int *piEnd, /* OUT: Ending offset of token */ + int *piPos /* OUT: Position integer of token */ +){ + unicode_cursor *pCsr = (unicode_cursor *)pC; + unicode_tokenizer *p = ((unicode_tokenizer *)pCsr->base.pTokenizer); + int iCode = 0; + char *zOut; + const unsigned char *z = &pCsr->aInput[pCsr->iOff]; + const unsigned char *zStart = z; + const unsigned char *zEnd; + const unsigned char *zTerm = &pCsr->aInput[pCsr->nInput]; + + /* Scan past any delimiter characters before the start of the next token. + ** Return SQLITE_DONE early if this takes us all the way to the end of + ** the input. */ + while( z=zTerm ) return SQLITE_DONE; + + zOut = pCsr->zToken; + do { + int iOut; + + /* Grow the output buffer if required. */ + if( (zOut-pCsr->zToken)>=(pCsr->nAlloc-4) ){ + char *zNew = sqlite3_realloc(pCsr->zToken, pCsr->nAlloc+64); + if( !zNew ) return SQLITE_NOMEM; + zOut = &zNew[zOut - pCsr->zToken]; + pCsr->zToken = zNew; + pCsr->nAlloc += 64; + } + + /* Write the folded case of the last character read to the output */ + zEnd = z; + iOut = sqlite3FtsUnicodeFold(iCode, p->bRemoveDiacritic); + if( iOut ){ + WRITE_UTF8(zOut, iOut); + } + + /* If the cursor is not at EOF, read the next character */ + if( z>=zTerm ) break; + READ_UTF8(z, zTerm, iCode); + }while( unicodeIsAlnum(p, iCode) + || sqlite3FtsUnicodeIsdiacritic(iCode) + ); + + /* Set the output variables and return. */ + pCsr->iOff = (int)(z - pCsr->aInput); + *paToken = pCsr->zToken; + *pnToken = (int)(zOut - pCsr->zToken); + *piStart = (int)(zStart - pCsr->aInput); + *piEnd = (int)(zEnd - pCsr->aInput); + *piPos = pCsr->iToken++; + return SQLITE_OK; +} + +/* +** Set *ppModule to a pointer to the sqlite3_tokenizer_module +** structure for the unicode tokenizer. +*/ +SQLITE_PRIVATE void sqlite3Fts3UnicodeTokenizer(sqlite3_tokenizer_module const **ppModule){ + static const sqlite3_tokenizer_module module = { + 0, + unicodeCreate, + unicodeDestroy, + unicodeOpen, + unicodeClose, + unicodeNext, + 0, + }; + *ppModule = &module; +} + +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ +#endif /* ifndef SQLITE_DISABLE_FTS3_UNICODE */ + +/************** End of fts3_unicode.c ****************************************/ +/************** Begin file fts3_unicode2.c ***********************************/ +/* +** 2012 May 25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +*/ + +/* +** DO NOT EDIT THIS MACHINE GENERATED FILE. +*/ + +#ifndef SQLITE_DISABLE_FTS3_UNICODE +#if defined(SQLITE_ENABLE_FTS3) || defined(SQLITE_ENABLE_FTS4) + +/* #include */ + +/* +** Return true if the argument corresponds to a unicode codepoint +** classified as either a letter or a number. Otherwise false. +** +** The results are undefined if the value passed to this function +** is less than zero. +*/ +SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int c){ + /* Each unsigned integer in the following array corresponds to a contiguous + ** range of unicode codepoints that are not either letters or numbers (i.e. + ** codepoints for which this function should return 0). + ** + ** The most significant 22 bits in each 32-bit value contain the first + ** codepoint in the range. The least significant 10 bits are used to store + ** the size of the range (always at least 1). In other words, the value + ** ((C<<22) + N) represents a range of N codepoints starting with codepoint + ** C. It is not possible to represent a range larger than 1023 codepoints + ** using this format. + */ + static const unsigned int aEntry[] = { + 0x00000030, 0x0000E807, 0x00016C06, 0x0001EC2F, 0x0002AC07, + 0x0002D001, 0x0002D803, 0x0002EC01, 0x0002FC01, 0x00035C01, + 0x0003DC01, 0x000B0804, 0x000B480E, 0x000B9407, 0x000BB401, + 0x000BBC81, 0x000DD401, 0x000DF801, 0x000E1002, 0x000E1C01, + 0x000FD801, 0x00120808, 0x00156806, 0x00162402, 0x00163C01, + 0x00164437, 0x0017CC02, 0x00180005, 0x00181816, 0x00187802, + 0x00192C15, 0x0019A804, 0x0019C001, 0x001B5001, 0x001B580F, + 0x001B9C07, 0x001BF402, 0x001C000E, 0x001C3C01, 0x001C4401, + 0x001CC01B, 0x001E980B, 0x001FAC09, 0x001FD804, 0x00205804, + 0x00206C09, 0x00209403, 0x0020A405, 0x0020C00F, 0x00216403, + 0x00217801, 0x0023901B, 0x00240004, 0x0024E803, 0x0024F812, + 0x00254407, 0x00258804, 0x0025C001, 0x00260403, 0x0026F001, + 0x0026F807, 0x00271C02, 0x00272C03, 0x00275C01, 0x00278802, + 0x0027C802, 0x0027E802, 0x00280403, 0x0028F001, 0x0028F805, + 0x00291C02, 0x00292C03, 0x00294401, 0x0029C002, 0x0029D401, + 0x002A0403, 0x002AF001, 0x002AF808, 0x002B1C03, 0x002B2C03, + 0x002B8802, 0x002BC002, 0x002C0403, 0x002CF001, 0x002CF807, + 0x002D1C02, 0x002D2C03, 0x002D5802, 0x002D8802, 0x002DC001, + 0x002E0801, 0x002EF805, 0x002F1803, 0x002F2804, 0x002F5C01, + 0x002FCC08, 0x00300403, 0x0030F807, 0x00311803, 0x00312804, + 0x00315402, 0x00318802, 0x0031FC01, 0x00320802, 0x0032F001, + 0x0032F807, 0x00331803, 0x00332804, 0x00335402, 0x00338802, + 0x00340802, 0x0034F807, 0x00351803, 0x00352804, 0x00355C01, + 0x00358802, 0x0035E401, 0x00360802, 0x00372801, 0x00373C06, + 0x00375801, 0x00376008, 0x0037C803, 0x0038C401, 0x0038D007, + 0x0038FC01, 0x00391C09, 0x00396802, 0x003AC401, 0x003AD006, + 0x003AEC02, 0x003B2006, 0x003C041F, 0x003CD00C, 0x003DC417, + 0x003E340B, 0x003E6424, 0x003EF80F, 0x003F380D, 0x0040AC14, + 0x00412806, 0x00415804, 0x00417803, 0x00418803, 0x00419C07, + 0x0041C404, 0x0042080C, 0x00423C01, 0x00426806, 0x0043EC01, + 0x004D740C, 0x004E400A, 0x00500001, 0x0059B402, 0x005A0001, + 0x005A6C02, 0x005BAC03, 0x005C4803, 0x005CC805, 0x005D4802, + 0x005DC802, 0x005ED023, 0x005F6004, 0x005F7401, 0x0060000F, + 0x0062A401, 0x0064800C, 0x0064C00C, 0x00650001, 0x00651002, + 0x0066C011, 0x00672002, 0x00677822, 0x00685C05, 0x00687802, + 0x0069540A, 0x0069801D, 0x0069FC01, 0x006A8007, 0x006AA006, + 0x006C0005, 0x006CD011, 0x006D6823, 0x006E0003, 0x006E840D, + 0x006F980E, 0x006FF004, 0x00709014, 0x0070EC05, 0x0071F802, + 0x00730008, 0x00734019, 0x0073B401, 0x0073C803, 0x00770027, + 0x0077F004, 0x007EF401, 0x007EFC03, 0x007F3403, 0x007F7403, + 0x007FB403, 0x007FF402, 0x00800065, 0x0081A806, 0x0081E805, + 0x00822805, 0x0082801A, 0x00834021, 0x00840002, 0x00840C04, + 0x00842002, 0x00845001, 0x00845803, 0x00847806, 0x00849401, + 0x00849C01, 0x0084A401, 0x0084B801, 0x0084E802, 0x00850005, + 0x00852804, 0x00853C01, 0x00864264, 0x00900027, 0x0091000B, + 0x0092704E, 0x00940200, 0x009C0475, 0x009E53B9, 0x00AD400A, + 0x00B39406, 0x00B3BC03, 0x00B3E404, 0x00B3F802, 0x00B5C001, + 0x00B5FC01, 0x00B7804F, 0x00B8C00C, 0x00BA001A, 0x00BA6C59, + 0x00BC00D6, 0x00BFC00C, 0x00C00005, 0x00C02019, 0x00C0A807, + 0x00C0D802, 0x00C0F403, 0x00C26404, 0x00C28001, 0x00C3EC01, + 0x00C64002, 0x00C6580A, 0x00C70024, 0x00C8001F, 0x00C8A81E, + 0x00C94001, 0x00C98020, 0x00CA2827, 0x00CB003F, 0x00CC0100, + 0x01370040, 0x02924037, 0x0293F802, 0x02983403, 0x0299BC10, + 0x029A7C01, 0x029BC008, 0x029C0017, 0x029C8002, 0x029E2402, + 0x02A00801, 0x02A01801, 0x02A02C01, 0x02A08C09, 0x02A0D804, + 0x02A1D004, 0x02A20002, 0x02A2D011, 0x02A33802, 0x02A38012, + 0x02A3E003, 0x02A4980A, 0x02A51C0D, 0x02A57C01, 0x02A60004, + 0x02A6CC1B, 0x02A77802, 0x02A8A40E, 0x02A90C01, 0x02A93002, + 0x02A97004, 0x02A9DC03, 0x02A9EC01, 0x02AAC001, 0x02AAC803, + 0x02AADC02, 0x02AAF802, 0x02AB0401, 0x02AB7802, 0x02ABAC07, + 0x02ABD402, 0x02AF8C0B, 0x03600001, 0x036DFC02, 0x036FFC02, + 0x037FFC01, 0x03EC7801, 0x03ECA401, 0x03EEC810, 0x03F4F802, + 0x03F7F002, 0x03F8001A, 0x03F88007, 0x03F8C023, 0x03F95013, + 0x03F9A004, 0x03FBFC01, 0x03FC040F, 0x03FC6807, 0x03FCEC06, + 0x03FD6C0B, 0x03FF8007, 0x03FFA007, 0x03FFE405, 0x04040003, + 0x0404DC09, 0x0405E411, 0x0406400C, 0x0407402E, 0x040E7C01, + 0x040F4001, 0x04215C01, 0x04247C01, 0x0424FC01, 0x04280403, + 0x04281402, 0x04283004, 0x0428E003, 0x0428FC01, 0x04294009, + 0x0429FC01, 0x042CE407, 0x04400003, 0x0440E016, 0x04420003, + 0x0442C012, 0x04440003, 0x04449C0E, 0x04450004, 0x04460003, + 0x0446CC0E, 0x04471404, 0x045AAC0D, 0x0491C004, 0x05BD442E, + 0x05BE3C04, 0x074000F6, 0x07440027, 0x0744A4B5, 0x07480046, + 0x074C0057, 0x075B0401, 0x075B6C01, 0x075BEC01, 0x075C5401, + 0x075CD401, 0x075D3C01, 0x075DBC01, 0x075E2401, 0x075EA401, + 0x075F0C01, 0x07BBC002, 0x07C0002C, 0x07C0C064, 0x07C2800F, + 0x07C2C40E, 0x07C3040F, 0x07C3440F, 0x07C4401F, 0x07C4C03C, + 0x07C5C02B, 0x07C7981D, 0x07C8402B, 0x07C90009, 0x07C94002, + 0x07CC0021, 0x07CCC006, 0x07CCDC46, 0x07CE0014, 0x07CE8025, + 0x07CF1805, 0x07CF8011, 0x07D0003F, 0x07D10001, 0x07D108B6, + 0x07D3E404, 0x07D4003E, 0x07D50004, 0x07D54018, 0x07D7EC46, + 0x07D9140B, 0x07DA0046, 0x07DC0074, 0x38000401, 0x38008060, + 0x380400F0, + }; + static const unsigned int aAscii[4] = { + 0xFFFFFFFF, 0xFC00FFFF, 0xF8000001, 0xF8000001, + }; + + if( c<128 ){ + return ( (aAscii[c >> 5] & (1 << (c & 0x001F)))==0 ); + }else if( c<(1<<22) ){ + unsigned int key = (((unsigned int)c)<<10) | 0x000003FF; + int iRes = 0; + int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1; + int iLo = 0; + while( iHi>=iLo ){ + int iTest = (iHi + iLo) / 2; + if( key >= aEntry[iTest] ){ + iRes = iTest; + iLo = iTest+1; + }else{ + iHi = iTest-1; + } + } + assert( aEntry[0]=aEntry[iRes] ); + return (((unsigned int)c) >= ((aEntry[iRes]>>10) + (aEntry[iRes]&0x3FF))); + } + return 1; +} + + +/* +** If the argument is a codepoint corresponding to a lowercase letter +** in the ASCII range with a diacritic added, return the codepoint +** of the ASCII letter only. For example, if passed 235 - "LATIN +** SMALL LETTER E WITH DIAERESIS" - return 65 ("LATIN SMALL LETTER +** E"). The resuls of passing a codepoint that corresponds to an +** uppercase letter are undefined. +*/ +static int remove_diacritic(int c){ + unsigned short aDia[] = { + 0, 1797, 1848, 1859, 1891, 1928, 1940, 1995, + 2024, 2040, 2060, 2110, 2168, 2206, 2264, 2286, + 2344, 2383, 2472, 2488, 2516, 2596, 2668, 2732, + 2782, 2842, 2894, 2954, 2984, 3000, 3028, 3336, + 3456, 3696, 3712, 3728, 3744, 3896, 3912, 3928, + 3968, 4008, 4040, 4106, 4138, 4170, 4202, 4234, + 4266, 4296, 4312, 4344, 4408, 4424, 4472, 4504, + 6148, 6198, 6264, 6280, 6360, 6429, 6505, 6529, + 61448, 61468, 61534, 61592, 61642, 61688, 61704, 61726, + 61784, 61800, 61836, 61880, 61914, 61948, 61998, 62122, + 62154, 62200, 62218, 62302, 62364, 62442, 62478, 62536, + 62554, 62584, 62604, 62640, 62648, 62656, 62664, 62730, + 62924, 63050, 63082, 63274, 63390, + }; + char aChar[] = { + '\0', 'a', 'c', 'e', 'i', 'n', 'o', 'u', 'y', 'y', 'a', 'c', + 'd', 'e', 'e', 'g', 'h', 'i', 'j', 'k', 'l', 'n', 'o', 'r', + 's', 't', 'u', 'u', 'w', 'y', 'z', 'o', 'u', 'a', 'i', 'o', + 'u', 'g', 'k', 'o', 'j', 'g', 'n', 'a', 'e', 'i', 'o', 'r', + 'u', 's', 't', 'h', 'a', 'e', 'o', 'y', '\0', '\0', '\0', '\0', + '\0', '\0', '\0', '\0', 'a', 'b', 'd', 'd', 'e', 'f', 'g', 'h', + 'h', 'i', 'k', 'l', 'l', 'm', 'n', 'p', 'r', 'r', 's', 't', + 'u', 'v', 'w', 'w', 'x', 'y', 'z', 'h', 't', 'w', 'y', 'a', + 'e', 'i', 'o', 'u', 'y', + }; + + unsigned int key = (((unsigned int)c)<<3) | 0x00000007; + int iRes = 0; + int iHi = sizeof(aDia)/sizeof(aDia[0]) - 1; + int iLo = 0; + while( iHi>=iLo ){ + int iTest = (iHi + iLo) / 2; + if( key >= aDia[iTest] ){ + iRes = iTest; + iLo = iTest+1; + }else{ + iHi = iTest-1; + } + } + assert( key>=aDia[iRes] ); + return ((c > (aDia[iRes]>>3) + (aDia[iRes]&0x07)) ? c : (int)aChar[iRes]); +} + + +/* +** Return true if the argument interpreted as a unicode codepoint +** is a diacritical modifier character. +*/ +SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int c){ + unsigned int mask0 = 0x08029FDF; + unsigned int mask1 = 0x000361F8; + if( c<768 || c>817 ) return 0; + return (c < 768+32) ? + (mask0 & (1 << (c-768))) : + (mask1 & (1 << (c-768-32))); +} + + +/* +** Interpret the argument as a unicode codepoint. If the codepoint +** is an upper case character that has a lower case equivalent, +** return the codepoint corresponding to the lower case version. +** Otherwise, return a copy of the argument. +** +** The results are undefined if the value passed to this function +** is less than zero. +*/ +SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int bRemoveDiacritic){ + /* Each entry in the following array defines a rule for folding a range + ** of codepoints to lower case. The rule applies to a range of nRange + ** codepoints starting at codepoint iCode. + ** + ** If the least significant bit in flags is clear, then the rule applies + ** to all nRange codepoints (i.e. all nRange codepoints are upper case and + ** need to be folded). Or, if it is set, then the rule only applies to + ** every second codepoint in the range, starting with codepoint C. + ** + ** The 7 most significant bits in flags are an index into the aiOff[] + ** array. If a specific codepoint C does require folding, then its lower + ** case equivalent is ((C + aiOff[flags>>1]) & 0xFFFF). + ** + ** The contents of this array are generated by parsing the CaseFolding.txt + ** file distributed as part of the "Unicode Character Database". See + ** http://www.unicode.org for details. + */ + static const struct TableEntry { + unsigned short iCode; + unsigned char flags; + unsigned char nRange; + } aEntry[] = { + {65, 14, 26}, {181, 64, 1}, {192, 14, 23}, + {216, 14, 7}, {256, 1, 48}, {306, 1, 6}, + {313, 1, 16}, {330, 1, 46}, {376, 116, 1}, + {377, 1, 6}, {383, 104, 1}, {385, 50, 1}, + {386, 1, 4}, {390, 44, 1}, {391, 0, 1}, + {393, 42, 2}, {395, 0, 1}, {398, 32, 1}, + {399, 38, 1}, {400, 40, 1}, {401, 0, 1}, + {403, 42, 1}, {404, 46, 1}, {406, 52, 1}, + {407, 48, 1}, {408, 0, 1}, {412, 52, 1}, + {413, 54, 1}, {415, 56, 1}, {416, 1, 6}, + {422, 60, 1}, {423, 0, 1}, {425, 60, 1}, + {428, 0, 1}, {430, 60, 1}, {431, 0, 1}, + {433, 58, 2}, {435, 1, 4}, {439, 62, 1}, + {440, 0, 1}, {444, 0, 1}, {452, 2, 1}, + {453, 0, 1}, {455, 2, 1}, {456, 0, 1}, + {458, 2, 1}, {459, 1, 18}, {478, 1, 18}, + {497, 2, 1}, {498, 1, 4}, {502, 122, 1}, + {503, 134, 1}, {504, 1, 40}, {544, 110, 1}, + {546, 1, 18}, {570, 70, 1}, {571, 0, 1}, + {573, 108, 1}, {574, 68, 1}, {577, 0, 1}, + {579, 106, 1}, {580, 28, 1}, {581, 30, 1}, + {582, 1, 10}, {837, 36, 1}, {880, 1, 4}, + {886, 0, 1}, {902, 18, 1}, {904, 16, 3}, + {908, 26, 1}, {910, 24, 2}, {913, 14, 17}, + {931, 14, 9}, {962, 0, 1}, {975, 4, 1}, + {976, 140, 1}, {977, 142, 1}, {981, 146, 1}, + {982, 144, 1}, {984, 1, 24}, {1008, 136, 1}, + {1009, 138, 1}, {1012, 130, 1}, {1013, 128, 1}, + {1015, 0, 1}, {1017, 152, 1}, {1018, 0, 1}, + {1021, 110, 3}, {1024, 34, 16}, {1040, 14, 32}, + {1120, 1, 34}, {1162, 1, 54}, {1216, 6, 1}, + {1217, 1, 14}, {1232, 1, 88}, {1329, 22, 38}, + {4256, 66, 38}, {4295, 66, 1}, {4301, 66, 1}, + {7680, 1, 150}, {7835, 132, 1}, {7838, 96, 1}, + {7840, 1, 96}, {7944, 150, 8}, {7960, 150, 6}, + {7976, 150, 8}, {7992, 150, 8}, {8008, 150, 6}, + {8025, 151, 8}, {8040, 150, 8}, {8072, 150, 8}, + {8088, 150, 8}, {8104, 150, 8}, {8120, 150, 2}, + {8122, 126, 2}, {8124, 148, 1}, {8126, 100, 1}, + {8136, 124, 4}, {8140, 148, 1}, {8152, 150, 2}, + {8154, 120, 2}, {8168, 150, 2}, {8170, 118, 2}, + {8172, 152, 1}, {8184, 112, 2}, {8186, 114, 2}, + {8188, 148, 1}, {8486, 98, 1}, {8490, 92, 1}, + {8491, 94, 1}, {8498, 12, 1}, {8544, 8, 16}, + {8579, 0, 1}, {9398, 10, 26}, {11264, 22, 47}, + {11360, 0, 1}, {11362, 88, 1}, {11363, 102, 1}, + {11364, 90, 1}, {11367, 1, 6}, {11373, 84, 1}, + {11374, 86, 1}, {11375, 80, 1}, {11376, 82, 1}, + {11378, 0, 1}, {11381, 0, 1}, {11390, 78, 2}, + {11392, 1, 100}, {11499, 1, 4}, {11506, 0, 1}, + {42560, 1, 46}, {42624, 1, 24}, {42786, 1, 14}, + {42802, 1, 62}, {42873, 1, 4}, {42877, 76, 1}, + {42878, 1, 10}, {42891, 0, 1}, {42893, 74, 1}, + {42896, 1, 4}, {42912, 1, 10}, {42922, 72, 1}, + {65313, 14, 26}, + }; + static const unsigned short aiOff[] = { + 1, 2, 8, 15, 16, 26, 28, 32, + 37, 38, 40, 48, 63, 64, 69, 71, + 79, 80, 116, 202, 203, 205, 206, 207, + 209, 210, 211, 213, 214, 217, 218, 219, + 775, 7264, 10792, 10795, 23228, 23256, 30204, 54721, + 54753, 54754, 54756, 54787, 54793, 54809, 57153, 57274, + 57921, 58019, 58363, 61722, 65268, 65341, 65373, 65406, + 65408, 65410, 65415, 65424, 65436, 65439, 65450, 65462, + 65472, 65476, 65478, 65480, 65482, 65488, 65506, 65511, + 65514, 65521, 65527, 65528, 65529, + }; + + int ret = c; + + assert( c>=0 ); + assert( sizeof(unsigned short)==2 && sizeof(unsigned char)==1 ); + + if( c<128 ){ + if( c>='A' && c<='Z' ) ret = c + ('a' - 'A'); + }else if( c<65536 ){ + int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1; + int iLo = 0; + int iRes = -1; + + while( iHi>=iLo ){ + int iTest = (iHi + iLo) / 2; + int cmp = (c - aEntry[iTest].iCode); + if( cmp>=0 ){ + iRes = iTest; + iLo = iTest+1; + }else{ + iHi = iTest-1; + } + } + assert( iRes<0 || c>=aEntry[iRes].iCode ); + + if( iRes>=0 ){ + const struct TableEntry *p = &aEntry[iRes]; + if( c<(p->iCode + p->nRange) && 0==(0x01 & p->flags & (p->iCode ^ c)) ){ + ret = (c + (aiOff[p->flags>>1])) & 0x0000FFFF; + assert( ret>0 ); + } + } + + if( bRemoveDiacritic ) ret = remove_diacritic(ret); + } + + else if( c>=66560 && c<66600 ){ + ret = c + 40; + } + + return ret; +} +#endif /* defined(SQLITE_ENABLE_FTS3) || defined(SQLITE_ENABLE_FTS4) */ +#endif /* !defined(SQLITE_DISABLE_FTS3_UNICODE) */ + +/************** End of fts3_unicode2.c ***************************************/ +/************** Begin file rtree.c *******************************************/ +/* +** 2001 September 15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains code for implementations of the r-tree and r*-tree +** algorithms packaged as an SQLite virtual table module. +*/ + +/* +** Database Format of R-Tree Tables +** -------------------------------- +** +** The data structure for a single virtual r-tree table is stored in three +** native SQLite tables declared as follows. In each case, the '%' character +** in the table name is replaced with the user-supplied name of the r-tree +** table. +** +** CREATE TABLE %_node(nodeno INTEGER PRIMARY KEY, data BLOB) +** CREATE TABLE %_parent(nodeno INTEGER PRIMARY KEY, parentnode INTEGER) +** CREATE TABLE %_rowid(rowid INTEGER PRIMARY KEY, nodeno INTEGER) +** +** The data for each node of the r-tree structure is stored in the %_node +** table. For each node that is not the root node of the r-tree, there is +** an entry in the %_parent table associating the node with its parent. +** And for each row of data in the table, there is an entry in the %_rowid +** table that maps from the entries rowid to the id of the node that it +** is stored on. +** +** The root node of an r-tree always exists, even if the r-tree table is +** empty. The nodeno of the root node is always 1. All other nodes in the +** table must be the same size as the root node. The content of each node +** is formatted as follows: +** +** 1. If the node is the root node (node 1), then the first 2 bytes +** of the node contain the tree depth as a big-endian integer. +** For non-root nodes, the first 2 bytes are left unused. +** +** 2. The next 2 bytes contain the number of entries currently +** stored in the node. +** +** 3. The remainder of the node contains the node entries. Each entry +** consists of a single 8-byte integer followed by an even number +** of 4-byte coordinates. For leaf nodes the integer is the rowid +** of a record. For internal nodes it is the node number of a +** child page. +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_RTREE) + +#ifndef SQLITE_CORE +/* #include "sqlite3ext.h" */ + SQLITE_EXTENSION_INIT1 +#else +/* #include "sqlite3.h" */ +#endif + +/* #include */ +/* #include */ +/* #include */ + +#ifndef SQLITE_AMALGAMATION +#include "sqlite3rtree.h" +typedef sqlite3_int64 i64; +typedef sqlite3_uint64 u64; +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +#endif + +/* The following macro is used to suppress compiler warnings. +*/ +#ifndef UNUSED_PARAMETER +# define UNUSED_PARAMETER(x) (void)(x) +#endif + +typedef struct Rtree Rtree; +typedef struct RtreeCursor RtreeCursor; +typedef struct RtreeNode RtreeNode; +typedef struct RtreeCell RtreeCell; +typedef struct RtreeConstraint RtreeConstraint; +typedef struct RtreeMatchArg RtreeMatchArg; +typedef struct RtreeGeomCallback RtreeGeomCallback; +typedef union RtreeCoord RtreeCoord; +typedef struct RtreeSearchPoint RtreeSearchPoint; + +/* The rtree may have between 1 and RTREE_MAX_DIMENSIONS dimensions. */ +#define RTREE_MAX_DIMENSIONS 5 + +/* Size of hash table Rtree.aHash. This hash table is not expected to +** ever contain very many entries, so a fixed number of buckets is +** used. +*/ +#define HASHSIZE 97 + +/* The xBestIndex method of this virtual table requires an estimate of +** the number of rows in the virtual table to calculate the costs of +** various strategies. If possible, this estimate is loaded from the +** sqlite_stat1 table (with RTREE_MIN_ROWEST as a hard-coded minimum). +** Otherwise, if no sqlite_stat1 entry is available, use +** RTREE_DEFAULT_ROWEST. +*/ +#define RTREE_DEFAULT_ROWEST 1048576 +#define RTREE_MIN_ROWEST 100 + +/* +** An rtree virtual-table object. +*/ +struct Rtree { + sqlite3_vtab base; /* Base class. Must be first */ + sqlite3 *db; /* Host database connection */ + int iNodeSize; /* Size in bytes of each node in the node table */ + u8 nDim; /* Number of dimensions */ + u8 nDim2; /* Twice the number of dimensions */ + u8 eCoordType; /* RTREE_COORD_REAL32 or RTREE_COORD_INT32 */ + u8 nBytesPerCell; /* Bytes consumed per cell */ + u8 inWrTrans; /* True if inside write transaction */ + int iDepth; /* Current depth of the r-tree structure */ + char *zDb; /* Name of database containing r-tree table */ + char *zName; /* Name of r-tree table */ + u32 nBusy; /* Current number of users of this structure */ + i64 nRowEst; /* Estimated number of rows in this table */ + u32 nCursor; /* Number of open cursors */ + + /* List of nodes removed during a CondenseTree operation. List is + ** linked together via the pointer normally used for hash chains - + ** RtreeNode.pNext. RtreeNode.iNode stores the depth of the sub-tree + ** headed by the node (leaf nodes have RtreeNode.iNode==0). + */ + RtreeNode *pDeleted; + int iReinsertHeight; /* Height of sub-trees Reinsert() has run on */ + + /* Blob I/O on xxx_node */ + sqlite3_blob *pNodeBlob; + + /* Statements to read/write/delete a record from xxx_node */ + sqlite3_stmt *pWriteNode; + sqlite3_stmt *pDeleteNode; + + /* Statements to read/write/delete a record from xxx_rowid */ + sqlite3_stmt *pReadRowid; + sqlite3_stmt *pWriteRowid; + sqlite3_stmt *pDeleteRowid; + + /* Statements to read/write/delete a record from xxx_parent */ + sqlite3_stmt *pReadParent; + sqlite3_stmt *pWriteParent; + sqlite3_stmt *pDeleteParent; + + RtreeNode *aHash[HASHSIZE]; /* Hash table of in-memory nodes. */ +}; + +/* Possible values for Rtree.eCoordType: */ +#define RTREE_COORD_REAL32 0 +#define RTREE_COORD_INT32 1 + +/* +** If SQLITE_RTREE_INT_ONLY is defined, then this virtual table will +** only deal with integer coordinates. No floating point operations +** will be done. +*/ +#ifdef SQLITE_RTREE_INT_ONLY + typedef sqlite3_int64 RtreeDValue; /* High accuracy coordinate */ + typedef int RtreeValue; /* Low accuracy coordinate */ +# define RTREE_ZERO 0 +#else + typedef double RtreeDValue; /* High accuracy coordinate */ + typedef float RtreeValue; /* Low accuracy coordinate */ +# define RTREE_ZERO 0.0 +#endif + +/* +** When doing a search of an r-tree, instances of the following structure +** record intermediate results from the tree walk. +** +** The id is always a node-id. For iLevel>=1 the id is the node-id of +** the node that the RtreeSearchPoint represents. When iLevel==0, however, +** the id is of the parent node and the cell that RtreeSearchPoint +** represents is the iCell-th entry in the parent node. +*/ +struct RtreeSearchPoint { + RtreeDValue rScore; /* The score for this node. Smallest goes first. */ + sqlite3_int64 id; /* Node ID */ + u8 iLevel; /* 0=entries. 1=leaf node. 2+ for higher */ + u8 eWithin; /* PARTLY_WITHIN or FULLY_WITHIN */ + u8 iCell; /* Cell index within the node */ +}; + +/* +** The minimum number of cells allowed for a node is a third of the +** maximum. In Gutman's notation: +** +** m = M/3 +** +** If an R*-tree "Reinsert" operation is required, the same number of +** cells are removed from the overfull node and reinserted into the tree. +*/ +#define RTREE_MINCELLS(p) ((((p)->iNodeSize-4)/(p)->nBytesPerCell)/3) +#define RTREE_REINSERT(p) RTREE_MINCELLS(p) +#define RTREE_MAXCELLS 51 + +/* +** The smallest possible node-size is (512-64)==448 bytes. And the largest +** supported cell size is 48 bytes (8 byte rowid + ten 4 byte coordinates). +** Therefore all non-root nodes must contain at least 3 entries. Since +** 2^40 is greater than 2^64, an r-tree structure always has a depth of +** 40 or less. +*/ +#define RTREE_MAX_DEPTH 40 + + +/* +** Number of entries in the cursor RtreeNode cache. The first entry is +** used to cache the RtreeNode for RtreeCursor.sPoint. The remaining +** entries cache the RtreeNode for the first elements of the priority queue. +*/ +#define RTREE_CACHE_SZ 5 + +/* +** An rtree cursor object. +*/ +struct RtreeCursor { + sqlite3_vtab_cursor base; /* Base class. Must be first */ + u8 atEOF; /* True if at end of search */ + u8 bPoint; /* True if sPoint is valid */ + int iStrategy; /* Copy of idxNum search parameter */ + int nConstraint; /* Number of entries in aConstraint */ + RtreeConstraint *aConstraint; /* Search constraints. */ + int nPointAlloc; /* Number of slots allocated for aPoint[] */ + int nPoint; /* Number of slots used in aPoint[] */ + int mxLevel; /* iLevel value for root of the tree */ + RtreeSearchPoint *aPoint; /* Priority queue for search points */ + RtreeSearchPoint sPoint; /* Cached next search point */ + RtreeNode *aNode[RTREE_CACHE_SZ]; /* Rtree node cache */ + u32 anQueue[RTREE_MAX_DEPTH+1]; /* Number of queued entries by iLevel */ +}; + +/* Return the Rtree of a RtreeCursor */ +#define RTREE_OF_CURSOR(X) ((Rtree*)((X)->base.pVtab)) + +/* +** A coordinate can be either a floating point number or a integer. All +** coordinates within a single R-Tree are always of the same time. +*/ +union RtreeCoord { + RtreeValue f; /* Floating point value */ + int i; /* Integer value */ + u32 u; /* Unsigned for byte-order conversions */ +}; + +/* +** The argument is an RtreeCoord. Return the value stored within the RtreeCoord +** formatted as a RtreeDValue (double or int64). This macro assumes that local +** variable pRtree points to the Rtree structure associated with the +** RtreeCoord. +*/ +#ifdef SQLITE_RTREE_INT_ONLY +# define DCOORD(coord) ((RtreeDValue)coord.i) +#else +# define DCOORD(coord) ( \ + (pRtree->eCoordType==RTREE_COORD_REAL32) ? \ + ((double)coord.f) : \ + ((double)coord.i) \ + ) +#endif + +/* +** A search constraint. +*/ +struct RtreeConstraint { + int iCoord; /* Index of constrained coordinate */ + int op; /* Constraining operation */ + union { + RtreeDValue rValue; /* Constraint value. */ + int (*xGeom)(sqlite3_rtree_geometry*,int,RtreeDValue*,int*); + int (*xQueryFunc)(sqlite3_rtree_query_info*); + } u; + sqlite3_rtree_query_info *pInfo; /* xGeom and xQueryFunc argument */ +}; + +/* Possible values for RtreeConstraint.op */ +#define RTREE_EQ 0x41 /* A */ +#define RTREE_LE 0x42 /* B */ +#define RTREE_LT 0x43 /* C */ +#define RTREE_GE 0x44 /* D */ +#define RTREE_GT 0x45 /* E */ +#define RTREE_MATCH 0x46 /* F: Old-style sqlite3_rtree_geometry_callback() */ +#define RTREE_QUERY 0x47 /* G: New-style sqlite3_rtree_query_callback() */ + + +/* +** An rtree structure node. +*/ +struct RtreeNode { + RtreeNode *pParent; /* Parent node */ + i64 iNode; /* The node number */ + int nRef; /* Number of references to this node */ + int isDirty; /* True if the node needs to be written to disk */ + u8 *zData; /* Content of the node, as should be on disk */ + RtreeNode *pNext; /* Next node in this hash collision chain */ +}; + +/* Return the number of cells in a node */ +#define NCELL(pNode) readInt16(&(pNode)->zData[2]) + +/* +** A single cell from a node, deserialized +*/ +struct RtreeCell { + i64 iRowid; /* Node or entry ID */ + RtreeCoord aCoord[RTREE_MAX_DIMENSIONS*2]; /* Bounding box coordinates */ +}; + + +/* +** This object becomes the sqlite3_user_data() for the SQL functions +** that are created by sqlite3_rtree_geometry_callback() and +** sqlite3_rtree_query_callback() and which appear on the right of MATCH +** operators in order to constrain a search. +** +** xGeom and xQueryFunc are the callback functions. Exactly one of +** xGeom and xQueryFunc fields is non-NULL, depending on whether the +** SQL function was created using sqlite3_rtree_geometry_callback() or +** sqlite3_rtree_query_callback(). +** +** This object is deleted automatically by the destructor mechanism in +** sqlite3_create_function_v2(). +*/ +struct RtreeGeomCallback { + int (*xGeom)(sqlite3_rtree_geometry*, int, RtreeDValue*, int*); + int (*xQueryFunc)(sqlite3_rtree_query_info*); + void (*xDestructor)(void*); + void *pContext; +}; + + +/* +** Value for the first field of every RtreeMatchArg object. The MATCH +** operator tests that the first field of a blob operand matches this +** value to avoid operating on invalid blobs (which could cause a segfault). +*/ +#define RTREE_GEOMETRY_MAGIC 0x891245AB + +/* +** An instance of this structure (in the form of a BLOB) is returned by +** the SQL functions that sqlite3_rtree_geometry_callback() and +** sqlite3_rtree_query_callback() create, and is read as the right-hand +** operand to the MATCH operator of an R-Tree. +*/ +struct RtreeMatchArg { + u32 magic; /* Always RTREE_GEOMETRY_MAGIC */ + RtreeGeomCallback cb; /* Info about the callback functions */ + int nParam; /* Number of parameters to the SQL function */ + sqlite3_value **apSqlParam; /* Original SQL parameter values */ + RtreeDValue aParam[1]; /* Values for parameters to the SQL function */ +}; + +#ifndef MAX +# define MAX(x,y) ((x) < (y) ? (y) : (x)) +#endif +#ifndef MIN +# define MIN(x,y) ((x) > (y) ? (y) : (x)) +#endif + +/* What version of GCC is being used. 0 means GCC is not being used */ +#ifndef GCC_VERSION +#if defined(__GNUC__) && !defined(SQLITE_DISABLE_INTRINSIC) +# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__) +#else +# define GCC_VERSION 0 +#endif +#endif + +/* What version of CLANG is being used. 0 means CLANG is not being used */ +#ifndef CLANG_VERSION +#if defined(__clang__) && !defined(_WIN32) && !defined(SQLITE_DISABLE_INTRINSIC) +# define CLANG_VERSION \ + (__clang_major__*1000000+__clang_minor__*1000+__clang_patchlevel__) +#else +# define CLANG_VERSION 0 +#endif +#endif + +/* The testcase() macro should already be defined in the amalgamation. If +** it is not, make it a no-op. +*/ +#ifndef SQLITE_AMALGAMATION +# define testcase(X) +#endif + +/* +** Macros to determine whether the machine is big or little endian, +** and whether or not that determination is run-time or compile-time. +** +** For best performance, an attempt is made to guess at the byte-order +** using C-preprocessor macros. If that is unsuccessful, or if +** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined +** at run-time. +*/ +#ifndef SQLITE_BYTEORDER +#if defined(i386) || defined(__i386__) || defined(_M_IX86) || \ + defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ + defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ + defined(__arm__) +# define SQLITE_BYTEORDER 1234 +#elif defined(sparc) || defined(__ppc__) +# define SQLITE_BYTEORDER 4321 +#else +# define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */ +#endif +#endif + + +/* What version of MSVC is being used. 0 means MSVC is not being used */ +#ifndef MSVC_VERSION +#if defined(_MSC_VER) && !defined(SQLITE_DISABLE_INTRINSIC) +# define MSVC_VERSION _MSC_VER +#else +# define MSVC_VERSION 0 +#endif +#endif + +/* +** Functions to deserialize a 16 bit integer, 32 bit real number and +** 64 bit integer. The deserialized value is returned. +*/ +static int readInt16(u8 *p){ + return (p[0]<<8) + p[1]; +} +static void readCoord(u8 *p, RtreeCoord *pCoord){ + assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */ +#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 + pCoord->u = _byteswap_ulong(*(u32*)p); +#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) + pCoord->u = __builtin_bswap32(*(u32*)p); +#elif SQLITE_BYTEORDER==4321 + pCoord->u = *(u32*)p; +#else + pCoord->u = ( + (((u32)p[0]) << 24) + + (((u32)p[1]) << 16) + + (((u32)p[2]) << 8) + + (((u32)p[3]) << 0) + ); +#endif +} +static i64 readInt64(u8 *p){ +#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 + u64 x; + memcpy(&x, p, 8); + return (i64)_byteswap_uint64(x); +#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) + u64 x; + memcpy(&x, p, 8); + return (i64)__builtin_bswap64(x); +#elif SQLITE_BYTEORDER==4321 + i64 x; + memcpy(&x, p, 8); + return x; +#else + return ( + (((i64)p[0]) << 56) + + (((i64)p[1]) << 48) + + (((i64)p[2]) << 40) + + (((i64)p[3]) << 32) + + (((i64)p[4]) << 24) + + (((i64)p[5]) << 16) + + (((i64)p[6]) << 8) + + (((i64)p[7]) << 0) + ); +#endif +} + +/* +** Functions to serialize a 16 bit integer, 32 bit real number and +** 64 bit integer. The value returned is the number of bytes written +** to the argument buffer (always 2, 4 and 8 respectively). +*/ +static void writeInt16(u8 *p, int i){ + p[0] = (i>> 8)&0xFF; + p[1] = (i>> 0)&0xFF; +} +static int writeCoord(u8 *p, RtreeCoord *pCoord){ + u32 i; + assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */ + assert( sizeof(RtreeCoord)==4 ); + assert( sizeof(u32)==4 ); +#if SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) + i = __builtin_bswap32(pCoord->u); + memcpy(p, &i, 4); +#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 + i = _byteswap_ulong(pCoord->u); + memcpy(p, &i, 4); +#elif SQLITE_BYTEORDER==4321 + i = pCoord->u; + memcpy(p, &i, 4); +#else + i = pCoord->u; + p[0] = (i>>24)&0xFF; + p[1] = (i>>16)&0xFF; + p[2] = (i>> 8)&0xFF; + p[3] = (i>> 0)&0xFF; +#endif + return 4; +} +static int writeInt64(u8 *p, i64 i){ +#if SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) + i = (i64)__builtin_bswap64((u64)i); + memcpy(p, &i, 8); +#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 + i = (i64)_byteswap_uint64((u64)i); + memcpy(p, &i, 8); +#elif SQLITE_BYTEORDER==4321 + memcpy(p, &i, 8); +#else + p[0] = (i>>56)&0xFF; + p[1] = (i>>48)&0xFF; + p[2] = (i>>40)&0xFF; + p[3] = (i>>32)&0xFF; + p[4] = (i>>24)&0xFF; + p[5] = (i>>16)&0xFF; + p[6] = (i>> 8)&0xFF; + p[7] = (i>> 0)&0xFF; +#endif + return 8; +} + +/* +** Increment the reference count of node p. +*/ +static void nodeReference(RtreeNode *p){ + if( p ){ + p->nRef++; + } +} + +/* +** Clear the content of node p (set all bytes to 0x00). +*/ +static void nodeZero(Rtree *pRtree, RtreeNode *p){ + memset(&p->zData[2], 0, pRtree->iNodeSize-2); + p->isDirty = 1; +} + +/* +** Given a node number iNode, return the corresponding key to use +** in the Rtree.aHash table. +*/ +static int nodeHash(i64 iNode){ + return iNode % HASHSIZE; +} + +/* +** Search the node hash table for node iNode. If found, return a pointer +** to it. Otherwise, return 0. +*/ +static RtreeNode *nodeHashLookup(Rtree *pRtree, i64 iNode){ + RtreeNode *p; + for(p=pRtree->aHash[nodeHash(iNode)]; p && p->iNode!=iNode; p=p->pNext); + return p; +} + +/* +** Add node pNode to the node hash table. +*/ +static void nodeHashInsert(Rtree *pRtree, RtreeNode *pNode){ + int iHash; + assert( pNode->pNext==0 ); + iHash = nodeHash(pNode->iNode); + pNode->pNext = pRtree->aHash[iHash]; + pRtree->aHash[iHash] = pNode; +} + +/* +** Remove node pNode from the node hash table. +*/ +static void nodeHashDelete(Rtree *pRtree, RtreeNode *pNode){ + RtreeNode **pp; + if( pNode->iNode!=0 ){ + pp = &pRtree->aHash[nodeHash(pNode->iNode)]; + for( ; (*pp)!=pNode; pp = &(*pp)->pNext){ assert(*pp); } + *pp = pNode->pNext; + pNode->pNext = 0; + } +} + +/* +** Allocate and return new r-tree node. Initially, (RtreeNode.iNode==0), +** indicating that node has not yet been assigned a node number. It is +** assigned a node number when nodeWrite() is called to write the +** node contents out to the database. +*/ +static RtreeNode *nodeNew(Rtree *pRtree, RtreeNode *pParent){ + RtreeNode *pNode; + pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode) + pRtree->iNodeSize); + if( pNode ){ + memset(pNode, 0, sizeof(RtreeNode) + pRtree->iNodeSize); + pNode->zData = (u8 *)&pNode[1]; + pNode->nRef = 1; + pNode->pParent = pParent; + pNode->isDirty = 1; + nodeReference(pParent); + } + return pNode; +} + +/* +** Clear the Rtree.pNodeBlob object +*/ +static void nodeBlobReset(Rtree *pRtree){ + if( pRtree->pNodeBlob && pRtree->inWrTrans==0 && pRtree->nCursor==0 ){ + sqlite3_blob *pBlob = pRtree->pNodeBlob; + pRtree->pNodeBlob = 0; + sqlite3_blob_close(pBlob); + } +} + +/* +** Obtain a reference to an r-tree node. +*/ +static int nodeAcquire( + Rtree *pRtree, /* R-tree structure */ + i64 iNode, /* Node number to load */ + RtreeNode *pParent, /* Either the parent node or NULL */ + RtreeNode **ppNode /* OUT: Acquired node */ +){ + int rc = SQLITE_OK; + RtreeNode *pNode = 0; + + /* Check if the requested node is already in the hash table. If so, + ** increase its reference count and return it. + */ + if( (pNode = nodeHashLookup(pRtree, iNode)) ){ + assert( !pParent || !pNode->pParent || pNode->pParent==pParent ); + if( pParent && !pNode->pParent ){ + nodeReference(pParent); + pNode->pParent = pParent; + } + pNode->nRef++; + *ppNode = pNode; + return SQLITE_OK; + } + + if( pRtree->pNodeBlob ){ + sqlite3_blob *pBlob = pRtree->pNodeBlob; + pRtree->pNodeBlob = 0; + rc = sqlite3_blob_reopen(pBlob, iNode); + pRtree->pNodeBlob = pBlob; + if( rc ){ + nodeBlobReset(pRtree); + if( rc==SQLITE_NOMEM ) return SQLITE_NOMEM; + } + } + if( pRtree->pNodeBlob==0 ){ + char *zTab = sqlite3_mprintf("%s_node", pRtree->zName); + if( zTab==0 ) return SQLITE_NOMEM; + rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, zTab, "data", iNode, 0, + &pRtree->pNodeBlob); + sqlite3_free(zTab); + } + if( rc ){ + nodeBlobReset(pRtree); + *ppNode = 0; + /* If unable to open an sqlite3_blob on the desired row, that can only + ** be because the shadow tables hold erroneous data. */ + if( rc==SQLITE_ERROR ) rc = SQLITE_CORRUPT_VTAB; + }else if( pRtree->iNodeSize==sqlite3_blob_bytes(pRtree->pNodeBlob) ){ + pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode)+pRtree->iNodeSize); + if( !pNode ){ + rc = SQLITE_NOMEM; + }else{ + pNode->pParent = pParent; + pNode->zData = (u8 *)&pNode[1]; + pNode->nRef = 1; + pNode->iNode = iNode; + pNode->isDirty = 0; + pNode->pNext = 0; + rc = sqlite3_blob_read(pRtree->pNodeBlob, pNode->zData, + pRtree->iNodeSize, 0); + nodeReference(pParent); + } + } + + /* If the root node was just loaded, set pRtree->iDepth to the height + ** of the r-tree structure. A height of zero means all data is stored on + ** the root node. A height of one means the children of the root node + ** are the leaves, and so on. If the depth as specified on the root node + ** is greater than RTREE_MAX_DEPTH, the r-tree structure must be corrupt. + */ + if( pNode && iNode==1 ){ + pRtree->iDepth = readInt16(pNode->zData); + if( pRtree->iDepth>RTREE_MAX_DEPTH ){ + rc = SQLITE_CORRUPT_VTAB; + } + } + + /* If no error has occurred so far, check if the "number of entries" + ** field on the node is too large. If so, set the return code to + ** SQLITE_CORRUPT_VTAB. + */ + if( pNode && rc==SQLITE_OK ){ + if( NCELL(pNode)>((pRtree->iNodeSize-4)/pRtree->nBytesPerCell) ){ + rc = SQLITE_CORRUPT_VTAB; + } + } + + if( rc==SQLITE_OK ){ + if( pNode!=0 ){ + nodeHashInsert(pRtree, pNode); + }else{ + rc = SQLITE_CORRUPT_VTAB; + } + *ppNode = pNode; + }else{ + sqlite3_free(pNode); + *ppNode = 0; + } + + return rc; +} + +/* +** Overwrite cell iCell of node pNode with the contents of pCell. +*/ +static void nodeOverwriteCell( + Rtree *pRtree, /* The overall R-Tree */ + RtreeNode *pNode, /* The node into which the cell is to be written */ + RtreeCell *pCell, /* The cell to write */ + int iCell /* Index into pNode into which pCell is written */ +){ + int ii; + u8 *p = &pNode->zData[4 + pRtree->nBytesPerCell*iCell]; + p += writeInt64(p, pCell->iRowid); + for(ii=0; iinDim2; ii++){ + p += writeCoord(p, &pCell->aCoord[ii]); + } + pNode->isDirty = 1; +} + +/* +** Remove the cell with index iCell from node pNode. +*/ +static void nodeDeleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell){ + u8 *pDst = &pNode->zData[4 + pRtree->nBytesPerCell*iCell]; + u8 *pSrc = &pDst[pRtree->nBytesPerCell]; + int nByte = (NCELL(pNode) - iCell - 1) * pRtree->nBytesPerCell; + memmove(pDst, pSrc, nByte); + writeInt16(&pNode->zData[2], NCELL(pNode)-1); + pNode->isDirty = 1; +} + +/* +** Insert the contents of cell pCell into node pNode. If the insert +** is successful, return SQLITE_OK. +** +** If there is not enough free space in pNode, return SQLITE_FULL. +*/ +static int nodeInsertCell( + Rtree *pRtree, /* The overall R-Tree */ + RtreeNode *pNode, /* Write new cell into this node */ + RtreeCell *pCell /* The cell to be inserted */ +){ + int nCell; /* Current number of cells in pNode */ + int nMaxCell; /* Maximum number of cells for pNode */ + + nMaxCell = (pRtree->iNodeSize-4)/pRtree->nBytesPerCell; + nCell = NCELL(pNode); + + assert( nCell<=nMaxCell ); + if( nCellzData[2], nCell+1); + pNode->isDirty = 1; + } + + return (nCell==nMaxCell); +} + +/* +** If the node is dirty, write it out to the database. +*/ +static int nodeWrite(Rtree *pRtree, RtreeNode *pNode){ + int rc = SQLITE_OK; + if( pNode->isDirty ){ + sqlite3_stmt *p = pRtree->pWriteNode; + if( pNode->iNode ){ + sqlite3_bind_int64(p, 1, pNode->iNode); + }else{ + sqlite3_bind_null(p, 1); + } + sqlite3_bind_blob(p, 2, pNode->zData, pRtree->iNodeSize, SQLITE_STATIC); + sqlite3_step(p); + pNode->isDirty = 0; + rc = sqlite3_reset(p); + if( pNode->iNode==0 && rc==SQLITE_OK ){ + pNode->iNode = sqlite3_last_insert_rowid(pRtree->db); + nodeHashInsert(pRtree, pNode); + } + } + return rc; +} + +/* +** Release a reference to a node. If the node is dirty and the reference +** count drops to zero, the node data is written to the database. +*/ +static int nodeRelease(Rtree *pRtree, RtreeNode *pNode){ + int rc = SQLITE_OK; + if( pNode ){ + assert( pNode->nRef>0 ); + pNode->nRef--; + if( pNode->nRef==0 ){ + if( pNode->iNode==1 ){ + pRtree->iDepth = -1; + } + if( pNode->pParent ){ + rc = nodeRelease(pRtree, pNode->pParent); + } + if( rc==SQLITE_OK ){ + rc = nodeWrite(pRtree, pNode); + } + nodeHashDelete(pRtree, pNode); + sqlite3_free(pNode); + } + } + return rc; +} + +/* +** Return the 64-bit integer value associated with cell iCell of +** node pNode. If pNode is a leaf node, this is a rowid. If it is +** an internal node, then the 64-bit integer is a child page number. +*/ +static i64 nodeGetRowid( + Rtree *pRtree, /* The overall R-Tree */ + RtreeNode *pNode, /* The node from which to extract the ID */ + int iCell /* The cell index from which to extract the ID */ +){ + assert( iCellzData[4 + pRtree->nBytesPerCell*iCell]); +} + +/* +** Return coordinate iCoord from cell iCell in node pNode. +*/ +static void nodeGetCoord( + Rtree *pRtree, /* The overall R-Tree */ + RtreeNode *pNode, /* The node from which to extract a coordinate */ + int iCell, /* The index of the cell within the node */ + int iCoord, /* Which coordinate to extract */ + RtreeCoord *pCoord /* OUT: Space to write result to */ +){ + readCoord(&pNode->zData[12 + pRtree->nBytesPerCell*iCell + 4*iCoord], pCoord); +} + +/* +** Deserialize cell iCell of node pNode. Populate the structure pointed +** to by pCell with the results. +*/ +static void nodeGetCell( + Rtree *pRtree, /* The overall R-Tree */ + RtreeNode *pNode, /* The node containing the cell to be read */ + int iCell, /* Index of the cell within the node */ + RtreeCell *pCell /* OUT: Write the cell contents here */ +){ + u8 *pData; + RtreeCoord *pCoord; + int ii = 0; + pCell->iRowid = nodeGetRowid(pRtree, pNode, iCell); + pData = pNode->zData + (12 + pRtree->nBytesPerCell*iCell); + pCoord = pCell->aCoord; + do{ + readCoord(pData, &pCoord[ii]); + readCoord(pData+4, &pCoord[ii+1]); + pData += 8; + ii += 2; + }while( iinDim2 ); +} + + +/* Forward declaration for the function that does the work of +** the virtual table module xCreate() and xConnect() methods. +*/ +static int rtreeInit( + sqlite3 *, void *, int, const char *const*, sqlite3_vtab **, char **, int +); + +/* +** Rtree virtual table module xCreate method. +*/ +static int rtreeCreate( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + return rtreeInit(db, pAux, argc, argv, ppVtab, pzErr, 1); +} + +/* +** Rtree virtual table module xConnect method. +*/ +static int rtreeConnect( + sqlite3 *db, + void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVtab, + char **pzErr +){ + return rtreeInit(db, pAux, argc, argv, ppVtab, pzErr, 0); +} + +/* +** Increment the r-tree reference count. +*/ +static void rtreeReference(Rtree *pRtree){ + pRtree->nBusy++; +} + +/* +** Decrement the r-tree reference count. When the reference count reaches +** zero the structure is deleted. +*/ +static void rtreeRelease(Rtree *pRtree){ + pRtree->nBusy--; + if( pRtree->nBusy==0 ){ + pRtree->inWrTrans = 0; + pRtree->nCursor = 0; + nodeBlobReset(pRtree); + sqlite3_finalize(pRtree->pWriteNode); + sqlite3_finalize(pRtree->pDeleteNode); + sqlite3_finalize(pRtree->pReadRowid); + sqlite3_finalize(pRtree->pWriteRowid); + sqlite3_finalize(pRtree->pDeleteRowid); + sqlite3_finalize(pRtree->pReadParent); + sqlite3_finalize(pRtree->pWriteParent); + sqlite3_finalize(pRtree->pDeleteParent); + sqlite3_free(pRtree); + } +} + +/* +** Rtree virtual table module xDisconnect method. +*/ +static int rtreeDisconnect(sqlite3_vtab *pVtab){ + rtreeRelease((Rtree *)pVtab); + return SQLITE_OK; +} + +/* +** Rtree virtual table module xDestroy method. +*/ +static int rtreeDestroy(sqlite3_vtab *pVtab){ + Rtree *pRtree = (Rtree *)pVtab; + int rc; + char *zCreate = sqlite3_mprintf( + "DROP TABLE '%q'.'%q_node';" + "DROP TABLE '%q'.'%q_rowid';" + "DROP TABLE '%q'.'%q_parent';", + pRtree->zDb, pRtree->zName, + pRtree->zDb, pRtree->zName, + pRtree->zDb, pRtree->zName + ); + if( !zCreate ){ + rc = SQLITE_NOMEM; + }else{ + nodeBlobReset(pRtree); + rc = sqlite3_exec(pRtree->db, zCreate, 0, 0, 0); + sqlite3_free(zCreate); + } + if( rc==SQLITE_OK ){ + rtreeRelease(pRtree); + } + + return rc; +} + +/* +** Rtree virtual table module xOpen method. +*/ +static int rtreeOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + int rc = SQLITE_NOMEM; + Rtree *pRtree = (Rtree *)pVTab; + RtreeCursor *pCsr; + + pCsr = (RtreeCursor *)sqlite3_malloc(sizeof(RtreeCursor)); + if( pCsr ){ + memset(pCsr, 0, sizeof(RtreeCursor)); + pCsr->base.pVtab = pVTab; + rc = SQLITE_OK; + pRtree->nCursor++; + } + *ppCursor = (sqlite3_vtab_cursor *)pCsr; + + return rc; +} + + +/* +** Free the RtreeCursor.aConstraint[] array and its contents. +*/ +static void freeCursorConstraints(RtreeCursor *pCsr){ + if( pCsr->aConstraint ){ + int i; /* Used to iterate through constraint array */ + for(i=0; inConstraint; i++){ + sqlite3_rtree_query_info *pInfo = pCsr->aConstraint[i].pInfo; + if( pInfo ){ + if( pInfo->xDelUser ) pInfo->xDelUser(pInfo->pUser); + sqlite3_free(pInfo); + } + } + sqlite3_free(pCsr->aConstraint); + pCsr->aConstraint = 0; + } +} + +/* +** Rtree virtual table module xClose method. +*/ +static int rtreeClose(sqlite3_vtab_cursor *cur){ + Rtree *pRtree = (Rtree *)(cur->pVtab); + int ii; + RtreeCursor *pCsr = (RtreeCursor *)cur; + assert( pRtree->nCursor>0 ); + freeCursorConstraints(pCsr); + sqlite3_free(pCsr->aPoint); + for(ii=0; iiaNode[ii]); + sqlite3_free(pCsr); + pRtree->nCursor--; + nodeBlobReset(pRtree); + return SQLITE_OK; +} + +/* +** Rtree virtual table module xEof method. +** +** Return non-zero if the cursor does not currently point to a valid +** record (i.e if the scan has finished), or zero otherwise. +*/ +static int rtreeEof(sqlite3_vtab_cursor *cur){ + RtreeCursor *pCsr = (RtreeCursor *)cur; + return pCsr->atEOF; +} + +/* +** Convert raw bits from the on-disk RTree record into a coordinate value. +** The on-disk format is big-endian and needs to be converted for little- +** endian platforms. The on-disk record stores integer coordinates if +** eInt is true and it stores 32-bit floating point records if eInt is +** false. a[] is the four bytes of the on-disk record to be decoded. +** Store the results in "r". +** +** There are five versions of this macro. The last one is generic. The +** other four are various architectures-specific optimizations. +*/ +#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 +#define RTREE_DECODE_COORD(eInt, a, r) { \ + RtreeCoord c; /* Coordinate decoded */ \ + c.u = _byteswap_ulong(*(u32*)a); \ + r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ +} +#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000) +#define RTREE_DECODE_COORD(eInt, a, r) { \ + RtreeCoord c; /* Coordinate decoded */ \ + c.u = __builtin_bswap32(*(u32*)a); \ + r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ +} +#elif SQLITE_BYTEORDER==1234 +#define RTREE_DECODE_COORD(eInt, a, r) { \ + RtreeCoord c; /* Coordinate decoded */ \ + memcpy(&c.u,a,4); \ + c.u = ((c.u>>24)&0xff)|((c.u>>8)&0xff00)| \ + ((c.u&0xff)<<24)|((c.u&0xff00)<<8); \ + r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ +} +#elif SQLITE_BYTEORDER==4321 +#define RTREE_DECODE_COORD(eInt, a, r) { \ + RtreeCoord c; /* Coordinate decoded */ \ + memcpy(&c.u,a,4); \ + r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ +} +#else +#define RTREE_DECODE_COORD(eInt, a, r) { \ + RtreeCoord c; /* Coordinate decoded */ \ + c.u = ((u32)a[0]<<24) + ((u32)a[1]<<16) \ + +((u32)a[2]<<8) + a[3]; \ + r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ +} +#endif + +/* +** Check the RTree node or entry given by pCellData and p against the MATCH +** constraint pConstraint. +*/ +static int rtreeCallbackConstraint( + RtreeConstraint *pConstraint, /* The constraint to test */ + int eInt, /* True if RTree holding integer coordinates */ + u8 *pCellData, /* Raw cell content */ + RtreeSearchPoint *pSearch, /* Container of this cell */ + sqlite3_rtree_dbl *prScore, /* OUT: score for the cell */ + int *peWithin /* OUT: visibility of the cell */ +){ + sqlite3_rtree_query_info *pInfo = pConstraint->pInfo; /* Callback info */ + int nCoord = pInfo->nCoord; /* No. of coordinates */ + int rc; /* Callback return code */ + RtreeCoord c; /* Translator union */ + sqlite3_rtree_dbl aCoord[RTREE_MAX_DIMENSIONS*2]; /* Decoded coordinates */ + + assert( pConstraint->op==RTREE_MATCH || pConstraint->op==RTREE_QUERY ); + assert( nCoord==2 || nCoord==4 || nCoord==6 || nCoord==8 || nCoord==10 ); + + if( pConstraint->op==RTREE_QUERY && pSearch->iLevel==1 ){ + pInfo->iRowid = readInt64(pCellData); + } + pCellData += 8; +#ifndef SQLITE_RTREE_INT_ONLY + if( eInt==0 ){ + switch( nCoord ){ + case 10: readCoord(pCellData+36, &c); aCoord[9] = c.f; + readCoord(pCellData+32, &c); aCoord[8] = c.f; + case 8: readCoord(pCellData+28, &c); aCoord[7] = c.f; + readCoord(pCellData+24, &c); aCoord[6] = c.f; + case 6: readCoord(pCellData+20, &c); aCoord[5] = c.f; + readCoord(pCellData+16, &c); aCoord[4] = c.f; + case 4: readCoord(pCellData+12, &c); aCoord[3] = c.f; + readCoord(pCellData+8, &c); aCoord[2] = c.f; + default: readCoord(pCellData+4, &c); aCoord[1] = c.f; + readCoord(pCellData, &c); aCoord[0] = c.f; + } + }else +#endif + { + switch( nCoord ){ + case 10: readCoord(pCellData+36, &c); aCoord[9] = c.i; + readCoord(pCellData+32, &c); aCoord[8] = c.i; + case 8: readCoord(pCellData+28, &c); aCoord[7] = c.i; + readCoord(pCellData+24, &c); aCoord[6] = c.i; + case 6: readCoord(pCellData+20, &c); aCoord[5] = c.i; + readCoord(pCellData+16, &c); aCoord[4] = c.i; + case 4: readCoord(pCellData+12, &c); aCoord[3] = c.i; + readCoord(pCellData+8, &c); aCoord[2] = c.i; + default: readCoord(pCellData+4, &c); aCoord[1] = c.i; + readCoord(pCellData, &c); aCoord[0] = c.i; + } + } + if( pConstraint->op==RTREE_MATCH ){ + int eWithin = 0; + rc = pConstraint->u.xGeom((sqlite3_rtree_geometry*)pInfo, + nCoord, aCoord, &eWithin); + if( eWithin==0 ) *peWithin = NOT_WITHIN; + *prScore = RTREE_ZERO; + }else{ + pInfo->aCoord = aCoord; + pInfo->iLevel = pSearch->iLevel - 1; + pInfo->rScore = pInfo->rParentScore = pSearch->rScore; + pInfo->eWithin = pInfo->eParentWithin = pSearch->eWithin; + rc = pConstraint->u.xQueryFunc(pInfo); + if( pInfo->eWithin<*peWithin ) *peWithin = pInfo->eWithin; + if( pInfo->rScore<*prScore || *prScorerScore; + } + } + return rc; +} + +/* +** Check the internal RTree node given by pCellData against constraint p. +** If this constraint cannot be satisfied by any child within the node, +** set *peWithin to NOT_WITHIN. +*/ +static void rtreeNonleafConstraint( + RtreeConstraint *p, /* The constraint to test */ + int eInt, /* True if RTree holds integer coordinates */ + u8 *pCellData, /* Raw cell content as appears on disk */ + int *peWithin /* Adjust downward, as appropriate */ +){ + sqlite3_rtree_dbl val; /* Coordinate value convert to a double */ + + /* p->iCoord might point to either a lower or upper bound coordinate + ** in a coordinate pair. But make pCellData point to the lower bound. + */ + pCellData += 8 + 4*(p->iCoord&0xfe); + + assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE + || p->op==RTREE_GT || p->op==RTREE_EQ ); + assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */ + switch( p->op ){ + case RTREE_LE: + case RTREE_LT: + case RTREE_EQ: + RTREE_DECODE_COORD(eInt, pCellData, val); + /* val now holds the lower bound of the coordinate pair */ + if( p->u.rValue>=val ) return; + if( p->op!=RTREE_EQ ) break; /* RTREE_LE and RTREE_LT end here */ + /* Fall through for the RTREE_EQ case */ + + default: /* RTREE_GT or RTREE_GE, or fallthrough of RTREE_EQ */ + pCellData += 4; + RTREE_DECODE_COORD(eInt, pCellData, val); + /* val now holds the upper bound of the coordinate pair */ + if( p->u.rValue<=val ) return; + } + *peWithin = NOT_WITHIN; +} + +/* +** Check the leaf RTree cell given by pCellData against constraint p. +** If this constraint is not satisfied, set *peWithin to NOT_WITHIN. +** If the constraint is satisfied, leave *peWithin unchanged. +** +** The constraint is of the form: xN op $val +** +** The op is given by p->op. The xN is p->iCoord-th coordinate in +** pCellData. $val is given by p->u.rValue. +*/ +static void rtreeLeafConstraint( + RtreeConstraint *p, /* The constraint to test */ + int eInt, /* True if RTree holds integer coordinates */ + u8 *pCellData, /* Raw cell content as appears on disk */ + int *peWithin /* Adjust downward, as appropriate */ +){ + RtreeDValue xN; /* Coordinate value converted to a double */ + + assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE + || p->op==RTREE_GT || p->op==RTREE_EQ ); + pCellData += 8 + p->iCoord*4; + assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */ + RTREE_DECODE_COORD(eInt, pCellData, xN); + switch( p->op ){ + case RTREE_LE: if( xN <= p->u.rValue ) return; break; + case RTREE_LT: if( xN < p->u.rValue ) return; break; + case RTREE_GE: if( xN >= p->u.rValue ) return; break; + case RTREE_GT: if( xN > p->u.rValue ) return; break; + default: if( xN == p->u.rValue ) return; break; + } + *peWithin = NOT_WITHIN; +} + +/* +** One of the cells in node pNode is guaranteed to have a 64-bit +** integer value equal to iRowid. Return the index of this cell. +*/ +static int nodeRowidIndex( + Rtree *pRtree, + RtreeNode *pNode, + i64 iRowid, + int *piIndex +){ + int ii; + int nCell = NCELL(pNode); + assert( nCell<200 ); + for(ii=0; iipParent; + if( pParent ){ + return nodeRowidIndex(pRtree, pParent, pNode->iNode, piIndex); + } + *piIndex = -1; + return SQLITE_OK; +} + +/* +** Compare two search points. Return negative, zero, or positive if the first +** is less than, equal to, or greater than the second. +** +** The rScore is the primary key. Smaller rScore values come first. +** If the rScore is a tie, then use iLevel as the tie breaker with smaller +** iLevel values coming first. In this way, if rScore is the same for all +** SearchPoints, then iLevel becomes the deciding factor and the result +** is a depth-first search, which is the desired default behavior. +*/ +static int rtreeSearchPointCompare( + const RtreeSearchPoint *pA, + const RtreeSearchPoint *pB +){ + if( pA->rScorerScore ) return -1; + if( pA->rScore>pB->rScore ) return +1; + if( pA->iLeveliLevel ) return -1; + if( pA->iLevel>pB->iLevel ) return +1; + return 0; +} + +/* +** Interchange two search points in a cursor. +*/ +static void rtreeSearchPointSwap(RtreeCursor *p, int i, int j){ + RtreeSearchPoint t = p->aPoint[i]; + assert( iaPoint[i] = p->aPoint[j]; + p->aPoint[j] = t; + i++; j++; + if( i=RTREE_CACHE_SZ ){ + nodeRelease(RTREE_OF_CURSOR(p), p->aNode[i]); + p->aNode[i] = 0; + }else{ + RtreeNode *pTemp = p->aNode[i]; + p->aNode[i] = p->aNode[j]; + p->aNode[j] = pTemp; + } + } +} + +/* +** Return the search point with the lowest current score. +*/ +static RtreeSearchPoint *rtreeSearchPointFirst(RtreeCursor *pCur){ + return pCur->bPoint ? &pCur->sPoint : pCur->nPoint ? pCur->aPoint : 0; +} + +/* +** Get the RtreeNode for the search point with the lowest score. +*/ +static RtreeNode *rtreeNodeOfFirstSearchPoint(RtreeCursor *pCur, int *pRC){ + sqlite3_int64 id; + int ii = 1 - pCur->bPoint; + assert( ii==0 || ii==1 ); + assert( pCur->bPoint || pCur->nPoint ); + if( pCur->aNode[ii]==0 ){ + assert( pRC!=0 ); + id = ii ? pCur->aPoint[0].id : pCur->sPoint.id; + *pRC = nodeAcquire(RTREE_OF_CURSOR(pCur), id, 0, &pCur->aNode[ii]); + } + return pCur->aNode[ii]; +} + +/* +** Push a new element onto the priority queue +*/ +static RtreeSearchPoint *rtreeEnqueue( + RtreeCursor *pCur, /* The cursor */ + RtreeDValue rScore, /* Score for the new search point */ + u8 iLevel /* Level for the new search point */ +){ + int i, j; + RtreeSearchPoint *pNew; + if( pCur->nPoint>=pCur->nPointAlloc ){ + int nNew = pCur->nPointAlloc*2 + 8; + pNew = sqlite3_realloc(pCur->aPoint, nNew*sizeof(pCur->aPoint[0])); + if( pNew==0 ) return 0; + pCur->aPoint = pNew; + pCur->nPointAlloc = nNew; + } + i = pCur->nPoint++; + pNew = pCur->aPoint + i; + pNew->rScore = rScore; + pNew->iLevel = iLevel; + assert( iLevel<=RTREE_MAX_DEPTH ); + while( i>0 ){ + RtreeSearchPoint *pParent; + j = (i-1)/2; + pParent = pCur->aPoint + j; + if( rtreeSearchPointCompare(pNew, pParent)>=0 ) break; + rtreeSearchPointSwap(pCur, j, i); + i = j; + pNew = pParent; + } + return pNew; +} + +/* +** Allocate a new RtreeSearchPoint and return a pointer to it. Return +** NULL if malloc fails. +*/ +static RtreeSearchPoint *rtreeSearchPointNew( + RtreeCursor *pCur, /* The cursor */ + RtreeDValue rScore, /* Score for the new search point */ + u8 iLevel /* Level for the new search point */ +){ + RtreeSearchPoint *pNew, *pFirst; + pFirst = rtreeSearchPointFirst(pCur); + pCur->anQueue[iLevel]++; + if( pFirst==0 + || pFirst->rScore>rScore + || (pFirst->rScore==rScore && pFirst->iLevel>iLevel) + ){ + if( pCur->bPoint ){ + int ii; + pNew = rtreeEnqueue(pCur, rScore, iLevel); + if( pNew==0 ) return 0; + ii = (int)(pNew - pCur->aPoint) + 1; + if( iiaNode[ii]==0 ); + pCur->aNode[ii] = pCur->aNode[0]; + }else{ + nodeRelease(RTREE_OF_CURSOR(pCur), pCur->aNode[0]); + } + pCur->aNode[0] = 0; + *pNew = pCur->sPoint; + } + pCur->sPoint.rScore = rScore; + pCur->sPoint.iLevel = iLevel; + pCur->bPoint = 1; + return &pCur->sPoint; + }else{ + return rtreeEnqueue(pCur, rScore, iLevel); + } +} + +#if 0 +/* Tracing routines for the RtreeSearchPoint queue */ +static void tracePoint(RtreeSearchPoint *p, int idx, RtreeCursor *pCur){ + if( idx<0 ){ printf(" s"); }else{ printf("%2d", idx); } + printf(" %d.%05lld.%02d %g %d", + p->iLevel, p->id, p->iCell, p->rScore, p->eWithin + ); + idx++; + if( idxaNode[idx]); + }else{ + printf("\n"); + } +} +static void traceQueue(RtreeCursor *pCur, const char *zPrefix){ + int ii; + printf("=== %9s ", zPrefix); + if( pCur->bPoint ){ + tracePoint(&pCur->sPoint, -1, pCur); + } + for(ii=0; iinPoint; ii++){ + if( ii>0 || pCur->bPoint ) printf(" "); + tracePoint(&pCur->aPoint[ii], ii, pCur); + } +} +# define RTREE_QUEUE_TRACE(A,B) traceQueue(A,B) +#else +# define RTREE_QUEUE_TRACE(A,B) /* no-op */ +#endif + +/* Remove the search point with the lowest current score. +*/ +static void rtreeSearchPointPop(RtreeCursor *p){ + int i, j, k, n; + i = 1 - p->bPoint; + assert( i==0 || i==1 ); + if( p->aNode[i] ){ + nodeRelease(RTREE_OF_CURSOR(p), p->aNode[i]); + p->aNode[i] = 0; + } + if( p->bPoint ){ + p->anQueue[p->sPoint.iLevel]--; + p->bPoint = 0; + }else if( p->nPoint ){ + p->anQueue[p->aPoint[0].iLevel]--; + n = --p->nPoint; + p->aPoint[0] = p->aPoint[n]; + if( naNode[1] = p->aNode[n+1]; + p->aNode[n+1] = 0; + } + i = 0; + while( (j = i*2+1)aPoint[k], &p->aPoint[j])<0 ){ + if( rtreeSearchPointCompare(&p->aPoint[k], &p->aPoint[i])<0 ){ + rtreeSearchPointSwap(p, i, k); + i = k; + }else{ + break; + } + }else{ + if( rtreeSearchPointCompare(&p->aPoint[j], &p->aPoint[i])<0 ){ + rtreeSearchPointSwap(p, i, j); + i = j; + }else{ + break; + } + } + } + } +} + + +/* +** Continue the search on cursor pCur until the front of the queue +** contains an entry suitable for returning as a result-set row, +** or until the RtreeSearchPoint queue is empty, indicating that the +** query has completed. +*/ +static int rtreeStepToLeaf(RtreeCursor *pCur){ + RtreeSearchPoint *p; + Rtree *pRtree = RTREE_OF_CURSOR(pCur); + RtreeNode *pNode; + int eWithin; + int rc = SQLITE_OK; + int nCell; + int nConstraint = pCur->nConstraint; + int ii; + int eInt; + RtreeSearchPoint x; + + eInt = pRtree->eCoordType==RTREE_COORD_INT32; + while( (p = rtreeSearchPointFirst(pCur))!=0 && p->iLevel>0 ){ + pNode = rtreeNodeOfFirstSearchPoint(pCur, &rc); + if( rc ) return rc; + nCell = NCELL(pNode); + assert( nCell<200 ); + while( p->iCellzData + (4+pRtree->nBytesPerCell*p->iCell); + eWithin = FULLY_WITHIN; + for(ii=0; iiaConstraint + ii; + if( pConstraint->op>=RTREE_MATCH ){ + rc = rtreeCallbackConstraint(pConstraint, eInt, pCellData, p, + &rScore, &eWithin); + if( rc ) return rc; + }else if( p->iLevel==1 ){ + rtreeLeafConstraint(pConstraint, eInt, pCellData, &eWithin); + }else{ + rtreeNonleafConstraint(pConstraint, eInt, pCellData, &eWithin); + } + if( eWithin==NOT_WITHIN ) break; + } + p->iCell++; + if( eWithin==NOT_WITHIN ) continue; + x.iLevel = p->iLevel - 1; + if( x.iLevel ){ + x.id = readInt64(pCellData); + x.iCell = 0; + }else{ + x.id = p->id; + x.iCell = p->iCell - 1; + } + if( p->iCell>=nCell ){ + RTREE_QUEUE_TRACE(pCur, "POP-S:"); + rtreeSearchPointPop(pCur); + } + if( rScoreeWithin = (u8)eWithin; + p->id = x.id; + p->iCell = x.iCell; + RTREE_QUEUE_TRACE(pCur, "PUSH-S:"); + break; + } + if( p->iCell>=nCell ){ + RTREE_QUEUE_TRACE(pCur, "POP-Se:"); + rtreeSearchPointPop(pCur); + } + } + pCur->atEOF = p==0; + return SQLITE_OK; +} + +/* +** Rtree virtual table module xNext method. +*/ +static int rtreeNext(sqlite3_vtab_cursor *pVtabCursor){ + RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; + int rc = SQLITE_OK; + + /* Move to the next entry that matches the configured constraints. */ + RTREE_QUEUE_TRACE(pCsr, "POP-Nx:"); + rtreeSearchPointPop(pCsr); + rc = rtreeStepToLeaf(pCsr); + return rc; +} + +/* +** Rtree virtual table module xRowid method. +*/ +static int rtreeRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *pRowid){ + RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; + RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr); + int rc = SQLITE_OK; + RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc); + if( rc==SQLITE_OK && p ){ + *pRowid = nodeGetRowid(RTREE_OF_CURSOR(pCsr), pNode, p->iCell); + } + return rc; +} + +/* +** Rtree virtual table module xColumn method. +*/ +static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ + Rtree *pRtree = (Rtree *)cur->pVtab; + RtreeCursor *pCsr = (RtreeCursor *)cur; + RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr); + RtreeCoord c; + int rc = SQLITE_OK; + RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc); + + if( rc ) return rc; + if( p==0 ) return SQLITE_OK; + if( i==0 ){ + sqlite3_result_int64(ctx, nodeGetRowid(pRtree, pNode, p->iCell)); + }else{ + nodeGetCoord(pRtree, pNode, p->iCell, i-1, &c); +#ifndef SQLITE_RTREE_INT_ONLY + if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ + sqlite3_result_double(ctx, c.f); + }else +#endif + { + assert( pRtree->eCoordType==RTREE_COORD_INT32 ); + sqlite3_result_int(ctx, c.i); + } + } + return SQLITE_OK; +} + +/* +** Use nodeAcquire() to obtain the leaf node containing the record with +** rowid iRowid. If successful, set *ppLeaf to point to the node and +** return SQLITE_OK. If there is no such record in the table, set +** *ppLeaf to 0 and return SQLITE_OK. If an error occurs, set *ppLeaf +** to zero and return an SQLite error code. +*/ +static int findLeafNode( + Rtree *pRtree, /* RTree to search */ + i64 iRowid, /* The rowid searching for */ + RtreeNode **ppLeaf, /* Write the node here */ + sqlite3_int64 *piNode /* Write the node-id here */ +){ + int rc; + *ppLeaf = 0; + sqlite3_bind_int64(pRtree->pReadRowid, 1, iRowid); + if( sqlite3_step(pRtree->pReadRowid)==SQLITE_ROW ){ + i64 iNode = sqlite3_column_int64(pRtree->pReadRowid, 0); + if( piNode ) *piNode = iNode; + rc = nodeAcquire(pRtree, iNode, 0, ppLeaf); + sqlite3_reset(pRtree->pReadRowid); + }else{ + rc = sqlite3_reset(pRtree->pReadRowid); + } + return rc; +} + +/* +** This function is called to configure the RtreeConstraint object passed +** as the second argument for a MATCH constraint. The value passed as the +** first argument to this function is the right-hand operand to the MATCH +** operator. +*/ +static int deserializeGeometry(sqlite3_value *pValue, RtreeConstraint *pCons){ + RtreeMatchArg *pBlob; /* BLOB returned by geometry function */ + sqlite3_rtree_query_info *pInfo; /* Callback information */ + int nBlob; /* Size of the geometry function blob */ + int nExpected; /* Expected size of the BLOB */ + + /* Check that value is actually a blob. */ + if( sqlite3_value_type(pValue)!=SQLITE_BLOB ) return SQLITE_ERROR; + + /* Check that the blob is roughly the right size. */ + nBlob = sqlite3_value_bytes(pValue); + if( nBlob<(int)sizeof(RtreeMatchArg) ){ + return SQLITE_ERROR; + } + + pInfo = (sqlite3_rtree_query_info*)sqlite3_malloc( sizeof(*pInfo)+nBlob ); + if( !pInfo ) return SQLITE_NOMEM; + memset(pInfo, 0, sizeof(*pInfo)); + pBlob = (RtreeMatchArg*)&pInfo[1]; + + memcpy(pBlob, sqlite3_value_blob(pValue), nBlob); + nExpected = (int)(sizeof(RtreeMatchArg) + + pBlob->nParam*sizeof(sqlite3_value*) + + (pBlob->nParam-1)*sizeof(RtreeDValue)); + if( pBlob->magic!=RTREE_GEOMETRY_MAGIC || nBlob!=nExpected ){ + sqlite3_free(pInfo); + return SQLITE_ERROR; + } + pInfo->pContext = pBlob->cb.pContext; + pInfo->nParam = pBlob->nParam; + pInfo->aParam = pBlob->aParam; + pInfo->apSqlParam = pBlob->apSqlParam; + + if( pBlob->cb.xGeom ){ + pCons->u.xGeom = pBlob->cb.xGeom; + }else{ + pCons->op = RTREE_QUERY; + pCons->u.xQueryFunc = pBlob->cb.xQueryFunc; + } + pCons->pInfo = pInfo; + return SQLITE_OK; +} + +/* +** Rtree virtual table module xFilter method. +*/ +static int rtreeFilter( + sqlite3_vtab_cursor *pVtabCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + Rtree *pRtree = (Rtree *)pVtabCursor->pVtab; + RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; + RtreeNode *pRoot = 0; + int ii; + int rc = SQLITE_OK; + int iCell = 0; + + rtreeReference(pRtree); + + /* Reset the cursor to the same state as rtreeOpen() leaves it in. */ + freeCursorConstraints(pCsr); + sqlite3_free(pCsr->aPoint); + memset(pCsr, 0, sizeof(RtreeCursor)); + pCsr->base.pVtab = (sqlite3_vtab*)pRtree; + + pCsr->iStrategy = idxNum; + if( idxNum==1 ){ + /* Special case - lookup by rowid. */ + RtreeNode *pLeaf; /* Leaf on which the required cell resides */ + RtreeSearchPoint *p; /* Search point for the leaf */ + i64 iRowid = sqlite3_value_int64(argv[0]); + i64 iNode = 0; + rc = findLeafNode(pRtree, iRowid, &pLeaf, &iNode); + if( rc==SQLITE_OK && pLeaf!=0 ){ + p = rtreeSearchPointNew(pCsr, RTREE_ZERO, 0); + assert( p!=0 ); /* Always returns pCsr->sPoint */ + pCsr->aNode[0] = pLeaf; + p->id = iNode; + p->eWithin = PARTLY_WITHIN; + rc = nodeRowidIndex(pRtree, pLeaf, iRowid, &iCell); + p->iCell = (u8)iCell; + RTREE_QUEUE_TRACE(pCsr, "PUSH-F1:"); + }else{ + pCsr->atEOF = 1; + } + }else{ + /* Normal case - r-tree scan. Set up the RtreeCursor.aConstraint array + ** with the configured constraints. + */ + rc = nodeAcquire(pRtree, 1, 0, &pRoot); + if( rc==SQLITE_OK && argc>0 ){ + pCsr->aConstraint = sqlite3_malloc(sizeof(RtreeConstraint)*argc); + pCsr->nConstraint = argc; + if( !pCsr->aConstraint ){ + rc = SQLITE_NOMEM; + }else{ + memset(pCsr->aConstraint, 0, sizeof(RtreeConstraint)*argc); + memset(pCsr->anQueue, 0, sizeof(u32)*(pRtree->iDepth + 1)); + assert( (idxStr==0 && argc==0) + || (idxStr && (int)strlen(idxStr)==argc*2) ); + for(ii=0; iiaConstraint[ii]; + p->op = idxStr[ii*2]; + p->iCoord = idxStr[ii*2+1]-'0'; + if( p->op>=RTREE_MATCH ){ + /* A MATCH operator. The right-hand-side must be a blob that + ** can be cast into an RtreeMatchArg object. One created using + ** an sqlite3_rtree_geometry_callback() SQL user function. + */ + rc = deserializeGeometry(argv[ii], p); + if( rc!=SQLITE_OK ){ + break; + } + p->pInfo->nCoord = pRtree->nDim2; + p->pInfo->anQueue = pCsr->anQueue; + p->pInfo->mxLevel = pRtree->iDepth + 1; + }else{ +#ifdef SQLITE_RTREE_INT_ONLY + p->u.rValue = sqlite3_value_int64(argv[ii]); +#else + p->u.rValue = sqlite3_value_double(argv[ii]); +#endif + } + } + } + } + if( rc==SQLITE_OK ){ + RtreeSearchPoint *pNew; + pNew = rtreeSearchPointNew(pCsr, RTREE_ZERO, (u8)(pRtree->iDepth+1)); + if( pNew==0 ) return SQLITE_NOMEM; + pNew->id = 1; + pNew->iCell = 0; + pNew->eWithin = PARTLY_WITHIN; + assert( pCsr->bPoint==1 ); + pCsr->aNode[0] = pRoot; + pRoot = 0; + RTREE_QUEUE_TRACE(pCsr, "PUSH-Fm:"); + rc = rtreeStepToLeaf(pCsr); + } + } + + nodeRelease(pRtree, pRoot); + rtreeRelease(pRtree); + return rc; +} + +/* +** Rtree virtual table module xBestIndex method. There are three +** table scan strategies to choose from (in order from most to +** least desirable): +** +** idxNum idxStr Strategy +** ------------------------------------------------ +** 1 Unused Direct lookup by rowid. +** 2 See below R-tree query or full-table scan. +** ------------------------------------------------ +** +** If strategy 1 is used, then idxStr is not meaningful. If strategy +** 2 is used, idxStr is formatted to contain 2 bytes for each +** constraint used. The first two bytes of idxStr correspond to +** the constraint in sqlite3_index_info.aConstraintUsage[] with +** (argvIndex==1) etc. +** +** The first of each pair of bytes in idxStr identifies the constraint +** operator as follows: +** +** Operator Byte Value +** ---------------------- +** = 0x41 ('A') +** <= 0x42 ('B') +** < 0x43 ('C') +** >= 0x44 ('D') +** > 0x45 ('E') +** MATCH 0x46 ('F') +** ---------------------- +** +** The second of each pair of bytes identifies the coordinate column +** to which the constraint applies. The leftmost coordinate column +** is 'a', the second from the left 'b' etc. +*/ +static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ + Rtree *pRtree = (Rtree*)tab; + int rc = SQLITE_OK; + int ii; + int bMatch = 0; /* True if there exists a MATCH constraint */ + i64 nRow; /* Estimated rows returned by this scan */ + + int iIdx = 0; + char zIdxStr[RTREE_MAX_DIMENSIONS*8+1]; + memset(zIdxStr, 0, sizeof(zIdxStr)); + + /* Check if there exists a MATCH constraint - even an unusable one. If there + ** is, do not consider the lookup-by-rowid plan as using such a plan would + ** require the VDBE to evaluate the MATCH constraint, which is not currently + ** possible. */ + for(ii=0; iinConstraint; ii++){ + if( pIdxInfo->aConstraint[ii].op==SQLITE_INDEX_CONSTRAINT_MATCH ){ + bMatch = 1; + } + } + + assert( pIdxInfo->idxStr==0 ); + for(ii=0; iinConstraint && iIdx<(int)(sizeof(zIdxStr)-1); ii++){ + struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii]; + + if( bMatch==0 && p->usable + && p->iColumn==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ + ){ + /* We have an equality constraint on the rowid. Use strategy 1. */ + int jj; + for(jj=0; jjaConstraintUsage[jj].argvIndex = 0; + pIdxInfo->aConstraintUsage[jj].omit = 0; + } + pIdxInfo->idxNum = 1; + pIdxInfo->aConstraintUsage[ii].argvIndex = 1; + pIdxInfo->aConstraintUsage[jj].omit = 1; + + /* This strategy involves a two rowid lookups on an B-Tree structures + ** and then a linear search of an R-Tree node. This should be + ** considered almost as quick as a direct rowid lookup (for which + ** sqlite uses an internal cost of 0.0). It is expected to return + ** a single row. + */ + pIdxInfo->estimatedCost = 30.0; + pIdxInfo->estimatedRows = 1; + return SQLITE_OK; + } + + if( p->usable && (p->iColumn>0 || p->op==SQLITE_INDEX_CONSTRAINT_MATCH) ){ + u8 op; + switch( p->op ){ + case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; break; + case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; break; + case SQLITE_INDEX_CONSTRAINT_LE: op = RTREE_LE; break; + case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; break; + case SQLITE_INDEX_CONSTRAINT_GE: op = RTREE_GE; break; + default: + assert( p->op==SQLITE_INDEX_CONSTRAINT_MATCH ); + op = RTREE_MATCH; + break; + } + zIdxStr[iIdx++] = op; + zIdxStr[iIdx++] = (char)(p->iColumn - 1 + '0'); + pIdxInfo->aConstraintUsage[ii].argvIndex = (iIdx/2); + pIdxInfo->aConstraintUsage[ii].omit = 1; + } + } + + pIdxInfo->idxNum = 2; + pIdxInfo->needToFreeIdxStr = 1; + if( iIdx>0 && 0==(pIdxInfo->idxStr = sqlite3_mprintf("%s", zIdxStr)) ){ + return SQLITE_NOMEM; + } + + nRow = pRtree->nRowEst >> (iIdx/2); + pIdxInfo->estimatedCost = (double)6.0 * (double)nRow; + pIdxInfo->estimatedRows = nRow; + + return rc; +} + +/* +** Return the N-dimensional volumn of the cell stored in *p. +*/ +static RtreeDValue cellArea(Rtree *pRtree, RtreeCell *p){ + RtreeDValue area = (RtreeDValue)1; + assert( pRtree->nDim>=1 && pRtree->nDim<=5 ); +#ifndef SQLITE_RTREE_INT_ONLY + if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ + switch( pRtree->nDim ){ + case 5: area = p->aCoord[9].f - p->aCoord[8].f; + case 4: area *= p->aCoord[7].f - p->aCoord[6].f; + case 3: area *= p->aCoord[5].f - p->aCoord[4].f; + case 2: area *= p->aCoord[3].f - p->aCoord[2].f; + default: area *= p->aCoord[1].f - p->aCoord[0].f; + } + }else +#endif + { + switch( pRtree->nDim ){ + case 5: area = p->aCoord[9].i - p->aCoord[8].i; + case 4: area *= p->aCoord[7].i - p->aCoord[6].i; + case 3: area *= p->aCoord[5].i - p->aCoord[4].i; + case 2: area *= p->aCoord[3].i - p->aCoord[2].i; + default: area *= p->aCoord[1].i - p->aCoord[0].i; + } + } + return area; +} + +/* +** Return the margin length of cell p. The margin length is the sum +** of the objects size in each dimension. +*/ +static RtreeDValue cellMargin(Rtree *pRtree, RtreeCell *p){ + RtreeDValue margin = 0; + int ii = pRtree->nDim2 - 2; + do{ + margin += (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii])); + ii -= 2; + }while( ii>=0 ); + return margin; +} + +/* +** Store the union of cells p1 and p2 in p1. +*/ +static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ + int ii = 0; + if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ + do{ + p1->aCoord[ii].f = MIN(p1->aCoord[ii].f, p2->aCoord[ii].f); + p1->aCoord[ii+1].f = MAX(p1->aCoord[ii+1].f, p2->aCoord[ii+1].f); + ii += 2; + }while( iinDim2 ); + }else{ + do{ + p1->aCoord[ii].i = MIN(p1->aCoord[ii].i, p2->aCoord[ii].i); + p1->aCoord[ii+1].i = MAX(p1->aCoord[ii+1].i, p2->aCoord[ii+1].i); + ii += 2; + }while( iinDim2 ); + } +} + +/* +** Return true if the area covered by p2 is a subset of the area covered +** by p1. False otherwise. +*/ +static int cellContains(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ + int ii; + int isInt = (pRtree->eCoordType==RTREE_COORD_INT32); + for(ii=0; iinDim2; ii+=2){ + RtreeCoord *a1 = &p1->aCoord[ii]; + RtreeCoord *a2 = &p2->aCoord[ii]; + if( (!isInt && (a2[0].fa1[1].f)) + || ( isInt && (a2[0].ia1[1].i)) + ){ + return 0; + } + } + return 1; +} + +/* +** Return the amount cell p would grow by if it were unioned with pCell. +*/ +static RtreeDValue cellGrowth(Rtree *pRtree, RtreeCell *p, RtreeCell *pCell){ + RtreeDValue area; + RtreeCell cell; + memcpy(&cell, p, sizeof(RtreeCell)); + area = cellArea(pRtree, &cell); + cellUnion(pRtree, &cell, pCell); + return (cellArea(pRtree, &cell)-area); +} + +static RtreeDValue cellOverlap( + Rtree *pRtree, + RtreeCell *p, + RtreeCell *aCell, + int nCell +){ + int ii; + RtreeDValue overlap = RTREE_ZERO; + for(ii=0; iinDim2; jj+=2){ + RtreeDValue x1, x2; + x1 = MAX(DCOORD(p->aCoord[jj]), DCOORD(aCell[ii].aCoord[jj])); + x2 = MIN(DCOORD(p->aCoord[jj+1]), DCOORD(aCell[ii].aCoord[jj+1])); + if( x2iDepth-iHeight); ii++){ + int iCell; + sqlite3_int64 iBest = 0; + + RtreeDValue fMinGrowth = RTREE_ZERO; + RtreeDValue fMinArea = RTREE_ZERO; + + int nCell = NCELL(pNode); + RtreeCell cell; + RtreeNode *pChild; + + RtreeCell *aCell = 0; + + /* Select the child node which will be enlarged the least if pCell + ** is inserted into it. Resolve ties by choosing the entry with + ** the smallest area. + */ + for(iCell=0; iCellpParent ){ + RtreeNode *pParent = p->pParent; + RtreeCell cell; + int iCell; + + if( nodeParentIndex(pRtree, p, &iCell) ){ + return SQLITE_CORRUPT_VTAB; + } + + nodeGetCell(pRtree, pParent, iCell, &cell); + if( !cellContains(pRtree, &cell, pCell) ){ + cellUnion(pRtree, &cell, pCell); + nodeOverwriteCell(pRtree, pParent, &cell, iCell); + } + + p = pParent; + } + return SQLITE_OK; +} + +/* +** Write mapping (iRowid->iNode) to the _rowid table. +*/ +static int rowidWrite(Rtree *pRtree, sqlite3_int64 iRowid, sqlite3_int64 iNode){ + sqlite3_bind_int64(pRtree->pWriteRowid, 1, iRowid); + sqlite3_bind_int64(pRtree->pWriteRowid, 2, iNode); + sqlite3_step(pRtree->pWriteRowid); + return sqlite3_reset(pRtree->pWriteRowid); +} + +/* +** Write mapping (iNode->iPar) to the _parent table. +*/ +static int parentWrite(Rtree *pRtree, sqlite3_int64 iNode, sqlite3_int64 iPar){ + sqlite3_bind_int64(pRtree->pWriteParent, 1, iNode); + sqlite3_bind_int64(pRtree->pWriteParent, 2, iPar); + sqlite3_step(pRtree->pWriteParent); + return sqlite3_reset(pRtree->pWriteParent); +} + +static int rtreeInsertCell(Rtree *, RtreeNode *, RtreeCell *, int); + + +/* +** Arguments aIdx, aDistance and aSpare all point to arrays of size +** nIdx. The aIdx array contains the set of integers from 0 to +** (nIdx-1) in no particular order. This function sorts the values +** in aIdx according to the indexed values in aDistance. For +** example, assuming the inputs: +** +** aIdx = { 0, 1, 2, 3 } +** aDistance = { 5.0, 2.0, 7.0, 6.0 } +** +** this function sets the aIdx array to contain: +** +** aIdx = { 0, 1, 2, 3 } +** +** The aSpare array is used as temporary working space by the +** sorting algorithm. +*/ +static void SortByDistance( + int *aIdx, + int nIdx, + RtreeDValue *aDistance, + int *aSpare +){ + if( nIdx>1 ){ + int iLeft = 0; + int iRight = 0; + + int nLeft = nIdx/2; + int nRight = nIdx-nLeft; + int *aLeft = aIdx; + int *aRight = &aIdx[nLeft]; + + SortByDistance(aLeft, nLeft, aDistance, aSpare); + SortByDistance(aRight, nRight, aDistance, aSpare); + + memcpy(aSpare, aLeft, sizeof(int)*nLeft); + aLeft = aSpare; + + while( iLeft1 ){ + + int iLeft = 0; + int iRight = 0; + + int nLeft = nIdx/2; + int nRight = nIdx-nLeft; + int *aLeft = aIdx; + int *aRight = &aIdx[nLeft]; + + SortByDimension(pRtree, aLeft, nLeft, iDim, aCell, aSpare); + SortByDimension(pRtree, aRight, nRight, iDim, aCell, aSpare); + + memcpy(aSpare, aLeft, sizeof(int)*nLeft); + aLeft = aSpare; + while( iLeftnDim+1)*(sizeof(int*)+nCell*sizeof(int)); + + aaSorted = (int **)sqlite3_malloc(nByte); + if( !aaSorted ){ + return SQLITE_NOMEM; + } + + aSpare = &((int *)&aaSorted[pRtree->nDim])[pRtree->nDim*nCell]; + memset(aaSorted, 0, nByte); + for(ii=0; iinDim; ii++){ + int jj; + aaSorted[ii] = &((int *)&aaSorted[pRtree->nDim])[ii*nCell]; + for(jj=0; jjnDim; ii++){ + RtreeDValue margin = RTREE_ZERO; + RtreeDValue fBestOverlap = RTREE_ZERO; + RtreeDValue fBestArea = RTREE_ZERO; + int iBestLeft = 0; + int nLeft; + + for( + nLeft=RTREE_MINCELLS(pRtree); + nLeft<=(nCell-RTREE_MINCELLS(pRtree)); + nLeft++ + ){ + RtreeCell left; + RtreeCell right; + int kk; + RtreeDValue overlap; + RtreeDValue area; + + memcpy(&left, &aCell[aaSorted[ii][0]], sizeof(RtreeCell)); + memcpy(&right, &aCell[aaSorted[ii][nCell-1]], sizeof(RtreeCell)); + for(kk=1; kk<(nCell-1); kk++){ + if( kk0 ){ + RtreeNode *pChild = nodeHashLookup(pRtree, iRowid); + if( pChild ){ + nodeRelease(pRtree, pChild->pParent); + nodeReference(pNode); + pChild->pParent = pNode; + } + } + return xSetMapping(pRtree, iRowid, pNode->iNode); +} + +static int SplitNode( + Rtree *pRtree, + RtreeNode *pNode, + RtreeCell *pCell, + int iHeight +){ + int i; + int newCellIsRight = 0; + + int rc = SQLITE_OK; + int nCell = NCELL(pNode); + RtreeCell *aCell; + int *aiUsed; + + RtreeNode *pLeft = 0; + RtreeNode *pRight = 0; + + RtreeCell leftbbox; + RtreeCell rightbbox; + + /* Allocate an array and populate it with a copy of pCell and + ** all cells from node pLeft. Then zero the original node. + */ + aCell = sqlite3_malloc((sizeof(RtreeCell)+sizeof(int))*(nCell+1)); + if( !aCell ){ + rc = SQLITE_NOMEM; + goto splitnode_out; + } + aiUsed = (int *)&aCell[nCell+1]; + memset(aiUsed, 0, sizeof(int)*(nCell+1)); + for(i=0; iiNode==1 ){ + pRight = nodeNew(pRtree, pNode); + pLeft = nodeNew(pRtree, pNode); + pRtree->iDepth++; + pNode->isDirty = 1; + writeInt16(pNode->zData, pRtree->iDepth); + }else{ + pLeft = pNode; + pRight = nodeNew(pRtree, pLeft->pParent); + nodeReference(pLeft); + } + + if( !pLeft || !pRight ){ + rc = SQLITE_NOMEM; + goto splitnode_out; + } + + memset(pLeft->zData, 0, pRtree->iNodeSize); + memset(pRight->zData, 0, pRtree->iNodeSize); + + rc = splitNodeStartree(pRtree, aCell, nCell, pLeft, pRight, + &leftbbox, &rightbbox); + if( rc!=SQLITE_OK ){ + goto splitnode_out; + } + + /* Ensure both child nodes have node numbers assigned to them by calling + ** nodeWrite(). Node pRight always needs a node number, as it was created + ** by nodeNew() above. But node pLeft sometimes already has a node number. + ** In this case avoid the all to nodeWrite(). + */ + if( SQLITE_OK!=(rc = nodeWrite(pRtree, pRight)) + || (0==pLeft->iNode && SQLITE_OK!=(rc = nodeWrite(pRtree, pLeft))) + ){ + goto splitnode_out; + } + + rightbbox.iRowid = pRight->iNode; + leftbbox.iRowid = pLeft->iNode; + + if( pNode->iNode==1 ){ + rc = rtreeInsertCell(pRtree, pLeft->pParent, &leftbbox, iHeight+1); + if( rc!=SQLITE_OK ){ + goto splitnode_out; + } + }else{ + RtreeNode *pParent = pLeft->pParent; + int iCell; + rc = nodeParentIndex(pRtree, pLeft, &iCell); + if( rc==SQLITE_OK ){ + nodeOverwriteCell(pRtree, pParent, &leftbbox, iCell); + rc = AdjustTree(pRtree, pParent, &leftbbox); + } + if( rc!=SQLITE_OK ){ + goto splitnode_out; + } + } + if( (rc = rtreeInsertCell(pRtree, pRight->pParent, &rightbbox, iHeight+1)) ){ + goto splitnode_out; + } + + for(i=0; iiRowid ){ + newCellIsRight = 1; + } + if( rc!=SQLITE_OK ){ + goto splitnode_out; + } + } + if( pNode->iNode==1 ){ + for(i=0; iiRowid, pLeft, iHeight); + } + + if( rc==SQLITE_OK ){ + rc = nodeRelease(pRtree, pRight); + pRight = 0; + } + if( rc==SQLITE_OK ){ + rc = nodeRelease(pRtree, pLeft); + pLeft = 0; + } + +splitnode_out: + nodeRelease(pRtree, pRight); + nodeRelease(pRtree, pLeft); + sqlite3_free(aCell); + return rc; +} + +/* +** If node pLeaf is not the root of the r-tree and its pParent pointer is +** still NULL, load all ancestor nodes of pLeaf into memory and populate +** the pLeaf->pParent chain all the way up to the root node. +** +** This operation is required when a row is deleted (or updated - an update +** is implemented as a delete followed by an insert). SQLite provides the +** rowid of the row to delete, which can be used to find the leaf on which +** the entry resides (argument pLeaf). Once the leaf is located, this +** function is called to determine its ancestry. +*/ +static int fixLeafParent(Rtree *pRtree, RtreeNode *pLeaf){ + int rc = SQLITE_OK; + RtreeNode *pChild = pLeaf; + while( rc==SQLITE_OK && pChild->iNode!=1 && pChild->pParent==0 ){ + int rc2 = SQLITE_OK; /* sqlite3_reset() return code */ + sqlite3_bind_int64(pRtree->pReadParent, 1, pChild->iNode); + rc = sqlite3_step(pRtree->pReadParent); + if( rc==SQLITE_ROW ){ + RtreeNode *pTest; /* Used to test for reference loops */ + i64 iNode; /* Node number of parent node */ + + /* Before setting pChild->pParent, test that we are not creating a + ** loop of references (as we would if, say, pChild==pParent). We don't + ** want to do this as it leads to a memory leak when trying to delete + ** the referenced counted node structures. + */ + iNode = sqlite3_column_int64(pRtree->pReadParent, 0); + for(pTest=pLeaf; pTest && pTest->iNode!=iNode; pTest=pTest->pParent); + if( !pTest ){ + rc2 = nodeAcquire(pRtree, iNode, 0, &pChild->pParent); + } + } + rc = sqlite3_reset(pRtree->pReadParent); + if( rc==SQLITE_OK ) rc = rc2; + if( rc==SQLITE_OK && !pChild->pParent ) rc = SQLITE_CORRUPT_VTAB; + pChild = pChild->pParent; + } + return rc; +} + +static int deleteCell(Rtree *, RtreeNode *, int, int); + +static int removeNode(Rtree *pRtree, RtreeNode *pNode, int iHeight){ + int rc; + int rc2; + RtreeNode *pParent = 0; + int iCell; + + assert( pNode->nRef==1 ); + + /* Remove the entry in the parent cell. */ + rc = nodeParentIndex(pRtree, pNode, &iCell); + if( rc==SQLITE_OK ){ + pParent = pNode->pParent; + pNode->pParent = 0; + rc = deleteCell(pRtree, pParent, iCell, iHeight+1); + } + rc2 = nodeRelease(pRtree, pParent); + if( rc==SQLITE_OK ){ + rc = rc2; + } + if( rc!=SQLITE_OK ){ + return rc; + } + + /* Remove the xxx_node entry. */ + sqlite3_bind_int64(pRtree->pDeleteNode, 1, pNode->iNode); + sqlite3_step(pRtree->pDeleteNode); + if( SQLITE_OK!=(rc = sqlite3_reset(pRtree->pDeleteNode)) ){ + return rc; + } + + /* Remove the xxx_parent entry. */ + sqlite3_bind_int64(pRtree->pDeleteParent, 1, pNode->iNode); + sqlite3_step(pRtree->pDeleteParent); + if( SQLITE_OK!=(rc = sqlite3_reset(pRtree->pDeleteParent)) ){ + return rc; + } + + /* Remove the node from the in-memory hash table and link it into + ** the Rtree.pDeleted list. Its contents will be re-inserted later on. + */ + nodeHashDelete(pRtree, pNode); + pNode->iNode = iHeight; + pNode->pNext = pRtree->pDeleted; + pNode->nRef++; + pRtree->pDeleted = pNode; + + return SQLITE_OK; +} + +static int fixBoundingBox(Rtree *pRtree, RtreeNode *pNode){ + RtreeNode *pParent = pNode->pParent; + int rc = SQLITE_OK; + if( pParent ){ + int ii; + int nCell = NCELL(pNode); + RtreeCell box; /* Bounding box for pNode */ + nodeGetCell(pRtree, pNode, 0, &box); + for(ii=1; iiiNode; + rc = nodeParentIndex(pRtree, pNode, &ii); + if( rc==SQLITE_OK ){ + nodeOverwriteCell(pRtree, pParent, &box, ii); + rc = fixBoundingBox(pRtree, pParent); + } + } + return rc; +} + +/* +** Delete the cell at index iCell of node pNode. After removing the +** cell, adjust the r-tree data structure if required. +*/ +static int deleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell, int iHeight){ + RtreeNode *pParent; + int rc; + + if( SQLITE_OK!=(rc = fixLeafParent(pRtree, pNode)) ){ + return rc; + } + + /* Remove the cell from the node. This call just moves bytes around + ** the in-memory node image, so it cannot fail. + */ + nodeDeleteCell(pRtree, pNode, iCell); + + /* If the node is not the tree root and now has less than the minimum + ** number of cells, remove it from the tree. Otherwise, update the + ** cell in the parent node so that it tightly contains the updated + ** node. + */ + pParent = pNode->pParent; + assert( pParent || pNode->iNode==1 ); + if( pParent ){ + if( NCELL(pNode)nDim; iDim++){ + aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2]); + aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2+1]); + } + } + for(iDim=0; iDimnDim; iDim++){ + aCenterCoord[iDim] = (aCenterCoord[iDim]/(nCell*(RtreeDValue)2)); + } + + for(ii=0; iinDim; iDim++){ + RtreeDValue coord = (DCOORD(aCell[ii].aCoord[iDim*2+1]) - + DCOORD(aCell[ii].aCoord[iDim*2])); + aDistance[ii] += (coord-aCenterCoord[iDim])*(coord-aCenterCoord[iDim]); + } + } + + SortByDistance(aOrder, nCell, aDistance, aSpare); + nodeZero(pRtree, pNode); + + for(ii=0; rc==SQLITE_OK && ii<(nCell-(RTREE_MINCELLS(pRtree)+1)); ii++){ + RtreeCell *p = &aCell[aOrder[ii]]; + nodeInsertCell(pRtree, pNode, p); + if( p->iRowid==pCell->iRowid ){ + if( iHeight==0 ){ + rc = rowidWrite(pRtree, p->iRowid, pNode->iNode); + }else{ + rc = parentWrite(pRtree, p->iRowid, pNode->iNode); + } + } + } + if( rc==SQLITE_OK ){ + rc = fixBoundingBox(pRtree, pNode); + } + for(; rc==SQLITE_OK && iiiNode currently contains + ** the height of the sub-tree headed by the cell. + */ + RtreeNode *pInsert; + RtreeCell *p = &aCell[aOrder[ii]]; + rc = ChooseLeaf(pRtree, p, iHeight, &pInsert); + if( rc==SQLITE_OK ){ + int rc2; + rc = rtreeInsertCell(pRtree, pInsert, p, iHeight); + rc2 = nodeRelease(pRtree, pInsert); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + } + + sqlite3_free(aCell); + return rc; +} + +/* +** Insert cell pCell into node pNode. Node pNode is the head of a +** subtree iHeight high (leaf nodes have iHeight==0). +*/ +static int rtreeInsertCell( + Rtree *pRtree, + RtreeNode *pNode, + RtreeCell *pCell, + int iHeight +){ + int rc = SQLITE_OK; + if( iHeight>0 ){ + RtreeNode *pChild = nodeHashLookup(pRtree, pCell->iRowid); + if( pChild ){ + nodeRelease(pRtree, pChild->pParent); + nodeReference(pNode); + pChild->pParent = pNode; + } + } + if( nodeInsertCell(pRtree, pNode, pCell) ){ + if( iHeight<=pRtree->iReinsertHeight || pNode->iNode==1){ + rc = SplitNode(pRtree, pNode, pCell, iHeight); + }else{ + pRtree->iReinsertHeight = iHeight; + rc = Reinsert(pRtree, pNode, pCell, iHeight); + } + }else{ + rc = AdjustTree(pRtree, pNode, pCell); + if( rc==SQLITE_OK ){ + if( iHeight==0 ){ + rc = rowidWrite(pRtree, pCell->iRowid, pNode->iNode); + }else{ + rc = parentWrite(pRtree, pCell->iRowid, pNode->iNode); + } + } + } + return rc; +} + +static int reinsertNodeContent(Rtree *pRtree, RtreeNode *pNode){ + int ii; + int rc = SQLITE_OK; + int nCell = NCELL(pNode); + + for(ii=0; rc==SQLITE_OK && iiiNode currently contains + ** the height of the sub-tree headed by the cell. + */ + rc = ChooseLeaf(pRtree, &cell, (int)pNode->iNode, &pInsert); + if( rc==SQLITE_OK ){ + int rc2; + rc = rtreeInsertCell(pRtree, pInsert, &cell, (int)pNode->iNode); + rc2 = nodeRelease(pRtree, pInsert); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + } + return rc; +} + +/* +** Select a currently unused rowid for a new r-tree record. +*/ +static int newRowid(Rtree *pRtree, i64 *piRowid){ + int rc; + sqlite3_bind_null(pRtree->pWriteRowid, 1); + sqlite3_bind_null(pRtree->pWriteRowid, 2); + sqlite3_step(pRtree->pWriteRowid); + rc = sqlite3_reset(pRtree->pWriteRowid); + *piRowid = sqlite3_last_insert_rowid(pRtree->db); + return rc; +} + +/* +** Remove the entry with rowid=iDelete from the r-tree structure. +*/ +static int rtreeDeleteRowid(Rtree *pRtree, sqlite3_int64 iDelete){ + int rc; /* Return code */ + RtreeNode *pLeaf = 0; /* Leaf node containing record iDelete */ + int iCell; /* Index of iDelete cell in pLeaf */ + RtreeNode *pRoot; /* Root node of rtree structure */ + + + /* Obtain a reference to the root node to initialize Rtree.iDepth */ + rc = nodeAcquire(pRtree, 1, 0, &pRoot); + + /* Obtain a reference to the leaf node that contains the entry + ** about to be deleted. + */ + if( rc==SQLITE_OK ){ + rc = findLeafNode(pRtree, iDelete, &pLeaf, 0); + } + + /* Delete the cell in question from the leaf node. */ + if( rc==SQLITE_OK ){ + int rc2; + rc = nodeRowidIndex(pRtree, pLeaf, iDelete, &iCell); + if( rc==SQLITE_OK ){ + rc = deleteCell(pRtree, pLeaf, iCell, 0); + } + rc2 = nodeRelease(pRtree, pLeaf); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + + /* Delete the corresponding entry in the _rowid table. */ + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pRtree->pDeleteRowid, 1, iDelete); + sqlite3_step(pRtree->pDeleteRowid); + rc = sqlite3_reset(pRtree->pDeleteRowid); + } + + /* Check if the root node now has exactly one child. If so, remove + ** it, schedule the contents of the child for reinsertion and + ** reduce the tree height by one. + ** + ** This is equivalent to copying the contents of the child into + ** the root node (the operation that Gutman's paper says to perform + ** in this scenario). + */ + if( rc==SQLITE_OK && pRtree->iDepth>0 && NCELL(pRoot)==1 ){ + int rc2; + RtreeNode *pChild; + i64 iChild = nodeGetRowid(pRtree, pRoot, 0); + rc = nodeAcquire(pRtree, iChild, pRoot, &pChild); + if( rc==SQLITE_OK ){ + rc = removeNode(pRtree, pChild, pRtree->iDepth-1); + } + rc2 = nodeRelease(pRtree, pChild); + if( rc==SQLITE_OK ) rc = rc2; + if( rc==SQLITE_OK ){ + pRtree->iDepth--; + writeInt16(pRoot->zData, pRtree->iDepth); + pRoot->isDirty = 1; + } + } + + /* Re-insert the contents of any underfull nodes removed from the tree. */ + for(pLeaf=pRtree->pDeleted; pLeaf; pLeaf=pRtree->pDeleted){ + if( rc==SQLITE_OK ){ + rc = reinsertNodeContent(pRtree, pLeaf); + } + pRtree->pDeleted = pLeaf->pNext; + sqlite3_free(pLeaf); + } + + /* Release the reference to the root node. */ + if( rc==SQLITE_OK ){ + rc = nodeRelease(pRtree, pRoot); + }else{ + nodeRelease(pRtree, pRoot); + } + + return rc; +} + +/* +** Rounding constants for float->double conversion. +*/ +#define RNDTOWARDS (1.0 - 1.0/8388608.0) /* Round towards zero */ +#define RNDAWAY (1.0 + 1.0/8388608.0) /* Round away from zero */ + +#if !defined(SQLITE_RTREE_INT_ONLY) +/* +** Convert an sqlite3_value into an RtreeValue (presumably a float) +** while taking care to round toward negative or positive, respectively. +*/ +static RtreeValue rtreeValueDown(sqlite3_value *v){ + double d = sqlite3_value_double(v); + float f = (float)d; + if( f>d ){ + f = (float)(d*(d<0 ? RNDAWAY : RNDTOWARDS)); + } + return f; +} +static RtreeValue rtreeValueUp(sqlite3_value *v){ + double d = sqlite3_value_double(v); + float f = (float)d; + if( fbase.zErrMsg) to an appropriate value and returns +** SQLITE_CONSTRAINT. +** +** Parameter iCol is the index of the leftmost column involved in the +** constraint failure. If it is 0, then the constraint that failed is +** the unique constraint on the id column. Otherwise, it is the rtree +** (c1<=c2) constraint on columns iCol and iCol+1 that has failed. +** +** If an OOM occurs, SQLITE_NOMEM is returned instead of SQLITE_CONSTRAINT. +*/ +static int rtreeConstraintError(Rtree *pRtree, int iCol){ + sqlite3_stmt *pStmt = 0; + char *zSql; + int rc; + + assert( iCol==0 || iCol%2 ); + zSql = sqlite3_mprintf("SELECT * FROM %Q.%Q", pRtree->zDb, pRtree->zName); + if( zSql ){ + rc = sqlite3_prepare_v2(pRtree->db, zSql, -1, &pStmt, 0); + }else{ + rc = SQLITE_NOMEM; + } + sqlite3_free(zSql); + + if( rc==SQLITE_OK ){ + if( iCol==0 ){ + const char *zCol = sqlite3_column_name(pStmt, 0); + pRtree->base.zErrMsg = sqlite3_mprintf( + "UNIQUE constraint failed: %s.%s", pRtree->zName, zCol + ); + }else{ + const char *zCol1 = sqlite3_column_name(pStmt, iCol); + const char *zCol2 = sqlite3_column_name(pStmt, iCol+1); + pRtree->base.zErrMsg = sqlite3_mprintf( + "rtree constraint failed: %s.(%s<=%s)", pRtree->zName, zCol1, zCol2 + ); + } + } + + sqlite3_finalize(pStmt); + return (rc==SQLITE_OK ? SQLITE_CONSTRAINT : rc); +} + + + +/* +** The xUpdate method for rtree module virtual tables. +*/ +static int rtreeUpdate( + sqlite3_vtab *pVtab, + int nData, + sqlite3_value **azData, + sqlite_int64 *pRowid +){ + Rtree *pRtree = (Rtree *)pVtab; + int rc = SQLITE_OK; + RtreeCell cell; /* New cell to insert if nData>1 */ + int bHaveRowid = 0; /* Set to 1 after new rowid is determined */ + + rtreeReference(pRtree); + assert(nData>=1); + + cell.iRowid = 0; /* Used only to suppress a compiler warning */ + + /* Constraint handling. A write operation on an r-tree table may return + ** SQLITE_CONSTRAINT for two reasons: + ** + ** 1. A duplicate rowid value, or + ** 2. The supplied data violates the "x2>=x1" constraint. + ** + ** In the first case, if the conflict-handling mode is REPLACE, then + ** the conflicting row can be removed before proceeding. In the second + ** case, SQLITE_CONSTRAINT must be returned regardless of the + ** conflict-handling mode specified by the user. + */ + if( nData>1 ){ + int ii; + + /* Populate the cell.aCoord[] array. The first coordinate is azData[3]. + ** + ** NB: nData can only be less than nDim*2+3 if the rtree is mis-declared + ** with "column" that are interpreted as table constraints. + ** Example: CREATE VIRTUAL TABLE bad USING rtree(x,y,CHECK(y>5)); + ** This problem was discovered after years of use, so we silently ignore + ** these kinds of misdeclared tables to avoid breaking any legacy. + */ + assert( nData<=(pRtree->nDim2 + 3) ); + +#ifndef SQLITE_RTREE_INT_ONLY + if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ + for(ii=0; iicell.aCoord[ii+1].f ){ + rc = rtreeConstraintError(pRtree, ii+1); + goto constraint; + } + } + }else +#endif + { + for(ii=0; iicell.aCoord[ii+1].i ){ + rc = rtreeConstraintError(pRtree, ii+1); + goto constraint; + } + } + } + + /* If a rowid value was supplied, check if it is already present in + ** the table. If so, the constraint has failed. */ + if( sqlite3_value_type(azData[2])!=SQLITE_NULL ){ + cell.iRowid = sqlite3_value_int64(azData[2]); + if( sqlite3_value_type(azData[0])==SQLITE_NULL + || sqlite3_value_int64(azData[0])!=cell.iRowid + ){ + int steprc; + sqlite3_bind_int64(pRtree->pReadRowid, 1, cell.iRowid); + steprc = sqlite3_step(pRtree->pReadRowid); + rc = sqlite3_reset(pRtree->pReadRowid); + if( SQLITE_ROW==steprc ){ + if( sqlite3_vtab_on_conflict(pRtree->db)==SQLITE_REPLACE ){ + rc = rtreeDeleteRowid(pRtree, cell.iRowid); + }else{ + rc = rtreeConstraintError(pRtree, 0); + goto constraint; + } + } + } + bHaveRowid = 1; + } + } + + /* If azData[0] is not an SQL NULL value, it is the rowid of a + ** record to delete from the r-tree table. The following block does + ** just that. + */ + if( sqlite3_value_type(azData[0])!=SQLITE_NULL ){ + rc = rtreeDeleteRowid(pRtree, sqlite3_value_int64(azData[0])); + } + + /* If the azData[] array contains more than one element, elements + ** (azData[2]..azData[argc-1]) contain a new record to insert into + ** the r-tree structure. + */ + if( rc==SQLITE_OK && nData>1 ){ + /* Insert the new record into the r-tree */ + RtreeNode *pLeaf = 0; + + /* Figure out the rowid of the new row. */ + if( bHaveRowid==0 ){ + rc = newRowid(pRtree, &cell.iRowid); + } + *pRowid = cell.iRowid; + + if( rc==SQLITE_OK ){ + rc = ChooseLeaf(pRtree, &cell, 0, &pLeaf); + } + if( rc==SQLITE_OK ){ + int rc2; + pRtree->iReinsertHeight = -1; + rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0); + rc2 = nodeRelease(pRtree, pLeaf); + if( rc==SQLITE_OK ){ + rc = rc2; + } + } + } + +constraint: + rtreeRelease(pRtree); + return rc; +} + +/* +** Called when a transaction starts. +*/ +static int rtreeBeginTransaction(sqlite3_vtab *pVtab){ + Rtree *pRtree = (Rtree *)pVtab; + assert( pRtree->inWrTrans==0 ); + pRtree->inWrTrans++; + return SQLITE_OK; +} + +/* +** Called when a transaction completes (either by COMMIT or ROLLBACK). +** The sqlite3_blob object should be released at this point. +*/ +static int rtreeEndTransaction(sqlite3_vtab *pVtab){ + Rtree *pRtree = (Rtree *)pVtab; + pRtree->inWrTrans = 0; + nodeBlobReset(pRtree); + return SQLITE_OK; +} + +/* +** The xRename method for rtree module virtual tables. +*/ +static int rtreeRename(sqlite3_vtab *pVtab, const char *zNewName){ + Rtree *pRtree = (Rtree *)pVtab; + int rc = SQLITE_NOMEM; + char *zSql = sqlite3_mprintf( + "ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";" + "ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";" + "ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";" + , pRtree->zDb, pRtree->zName, zNewName + , pRtree->zDb, pRtree->zName, zNewName + , pRtree->zDb, pRtree->zName, zNewName + ); + if( zSql ){ + rc = sqlite3_exec(pRtree->db, zSql, 0, 0, 0); + sqlite3_free(zSql); + } + return rc; +} + + +/* +** This function populates the pRtree->nRowEst variable with an estimate +** of the number of rows in the virtual table. If possible, this is based +** on sqlite_stat1 data. Otherwise, use RTREE_DEFAULT_ROWEST. +*/ +static int rtreeQueryStat1(sqlite3 *db, Rtree *pRtree){ + const char *zFmt = "SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'"; + char *zSql; + sqlite3_stmt *p; + int rc; + i64 nRow = 0; + + rc = sqlite3_table_column_metadata( + db, pRtree->zDb, "sqlite_stat1",0,0,0,0,0,0 + ); + if( rc!=SQLITE_OK ){ + pRtree->nRowEst = RTREE_DEFAULT_ROWEST; + return rc==SQLITE_ERROR ? SQLITE_OK : rc; + } + zSql = sqlite3_mprintf(zFmt, pRtree->zDb, pRtree->zName); + if( zSql==0 ){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_prepare_v2(db, zSql, -1, &p, 0); + if( rc==SQLITE_OK ){ + if( sqlite3_step(p)==SQLITE_ROW ) nRow = sqlite3_column_int64(p, 0); + rc = sqlite3_finalize(p); + }else if( rc!=SQLITE_NOMEM ){ + rc = SQLITE_OK; + } + + if( rc==SQLITE_OK ){ + if( nRow==0 ){ + pRtree->nRowEst = RTREE_DEFAULT_ROWEST; + }else{ + pRtree->nRowEst = MAX(nRow, RTREE_MIN_ROWEST); + } + } + sqlite3_free(zSql); + } + + return rc; +} + +static sqlite3_module rtreeModule = { + 0, /* iVersion */ + rtreeCreate, /* xCreate - create a table */ + rtreeConnect, /* xConnect - connect to an existing table */ + rtreeBestIndex, /* xBestIndex - Determine search strategy */ + rtreeDisconnect, /* xDisconnect - Disconnect from a table */ + rtreeDestroy, /* xDestroy - Drop a table */ + rtreeOpen, /* xOpen - open a cursor */ + rtreeClose, /* xClose - close a cursor */ + rtreeFilter, /* xFilter - configure scan constraints */ + rtreeNext, /* xNext - advance a cursor */ + rtreeEof, /* xEof */ + rtreeColumn, /* xColumn - read data */ + rtreeRowid, /* xRowid - read data */ + rtreeUpdate, /* xUpdate - write data */ + rtreeBeginTransaction, /* xBegin - begin transaction */ + rtreeEndTransaction, /* xSync - sync transaction */ + rtreeEndTransaction, /* xCommit - commit transaction */ + rtreeEndTransaction, /* xRollback - rollback transaction */ + 0, /* xFindFunction - function overloading */ + rtreeRename, /* xRename - rename the table */ + 0, /* xSavepoint */ + 0, /* xRelease */ + 0, /* xRollbackTo */ +}; + +static int rtreeSqlInit( + Rtree *pRtree, + sqlite3 *db, + const char *zDb, + const char *zPrefix, + int isCreate +){ + int rc = SQLITE_OK; + + #define N_STATEMENT 8 + static const char *azSql[N_STATEMENT] = { + /* Write the xxx_node table */ + "INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(:1, :2)", + "DELETE FROM '%q'.'%q_node' WHERE nodeno = :1", + + /* Read and write the xxx_rowid table */ + "SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = :1", + "INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(:1, :2)", + "DELETE FROM '%q'.'%q_rowid' WHERE rowid = :1", + + /* Read and write the xxx_parent table */ + "SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = :1", + "INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(:1, :2)", + "DELETE FROM '%q'.'%q_parent' WHERE nodeno = :1" + }; + sqlite3_stmt **appStmt[N_STATEMENT]; + int i; + + pRtree->db = db; + + if( isCreate ){ + char *zCreate = sqlite3_mprintf( +"CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY, data BLOB);" +"CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY, nodeno INTEGER);" +"CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY," + " parentnode INTEGER);" +"INSERT INTO '%q'.'%q_node' VALUES(1, zeroblob(%d))", + zDb, zPrefix, zDb, zPrefix, zDb, zPrefix, zDb, zPrefix, pRtree->iNodeSize + ); + if( !zCreate ){ + return SQLITE_NOMEM; + } + rc = sqlite3_exec(db, zCreate, 0, 0, 0); + sqlite3_free(zCreate); + if( rc!=SQLITE_OK ){ + return rc; + } + } + + appStmt[0] = &pRtree->pWriteNode; + appStmt[1] = &pRtree->pDeleteNode; + appStmt[2] = &pRtree->pReadRowid; + appStmt[3] = &pRtree->pWriteRowid; + appStmt[4] = &pRtree->pDeleteRowid; + appStmt[5] = &pRtree->pReadParent; + appStmt[6] = &pRtree->pWriteParent; + appStmt[7] = &pRtree->pDeleteParent; + + rc = rtreeQueryStat1(db, pRtree); + for(i=0; iiNodeSize is populated and SQLITE_OK returned. +** Otherwise, an SQLite error code is returned. +** +** If this function is being called as part of an xConnect(), then the rtree +** table already exists. In this case the node-size is determined by inspecting +** the root node of the tree. +** +** Otherwise, for an xCreate(), use 64 bytes less than the database page-size. +** This ensures that each node is stored on a single database page. If the +** database page-size is so large that more than RTREE_MAXCELLS entries +** would fit in a single node, use a smaller node-size. +*/ +static int getNodeSize( + sqlite3 *db, /* Database handle */ + Rtree *pRtree, /* Rtree handle */ + int isCreate, /* True for xCreate, false for xConnect */ + char **pzErr /* OUT: Error message, if any */ +){ + int rc; + char *zSql; + if( isCreate ){ + int iPageSize = 0; + zSql = sqlite3_mprintf("PRAGMA %Q.page_size", pRtree->zDb); + rc = getIntFromStmt(db, zSql, &iPageSize); + if( rc==SQLITE_OK ){ + pRtree->iNodeSize = iPageSize-64; + if( (4+pRtree->nBytesPerCell*RTREE_MAXCELLS)iNodeSize ){ + pRtree->iNodeSize = 4+pRtree->nBytesPerCell*RTREE_MAXCELLS; + } + }else{ + *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + } + }else{ + zSql = sqlite3_mprintf( + "SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1", + pRtree->zDb, pRtree->zName + ); + rc = getIntFromStmt(db, zSql, &pRtree->iNodeSize); + if( rc!=SQLITE_OK ){ + *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + } + } + + sqlite3_free(zSql); + return rc; +} + +/* +** This function is the implementation of both the xConnect and xCreate +** methods of the r-tree virtual table. +** +** argv[0] -> module name +** argv[1] -> database name +** argv[2] -> table name +** argv[...] -> column names... +*/ +static int rtreeInit( + sqlite3 *db, /* Database connection */ + void *pAux, /* One of the RTREE_COORD_* constants */ + int argc, const char *const*argv, /* Parameters to CREATE TABLE statement */ + sqlite3_vtab **ppVtab, /* OUT: New virtual table */ + char **pzErr, /* OUT: Error message, if any */ + int isCreate /* True for xCreate, false for xConnect */ +){ + int rc = SQLITE_OK; + Rtree *pRtree; + int nDb; /* Length of string argv[1] */ + int nName; /* Length of string argv[2] */ + int eCoordType = (pAux ? RTREE_COORD_INT32 : RTREE_COORD_REAL32); + + const char *aErrMsg[] = { + 0, /* 0 */ + "Wrong number of columns for an rtree table", /* 1 */ + "Too few columns for an rtree table", /* 2 */ + "Too many columns for an rtree table" /* 3 */ + }; + + int iErr = (argc<6) ? 2 : argc>(RTREE_MAX_DIMENSIONS*2+4) ? 3 : argc%2; + if( aErrMsg[iErr] ){ + *pzErr = sqlite3_mprintf("%s", aErrMsg[iErr]); + return SQLITE_ERROR; + } + + sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); + + /* Allocate the sqlite3_vtab structure */ + nDb = (int)strlen(argv[1]); + nName = (int)strlen(argv[2]); + pRtree = (Rtree *)sqlite3_malloc(sizeof(Rtree)+nDb+nName+2); + if( !pRtree ){ + return SQLITE_NOMEM; + } + memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2); + pRtree->nBusy = 1; + pRtree->base.pModule = &rtreeModule; + pRtree->zDb = (char *)&pRtree[1]; + pRtree->zName = &pRtree->zDb[nDb+1]; + pRtree->nDim = (u8)((argc-4)/2); + pRtree->nDim2 = pRtree->nDim*2; + pRtree->nBytesPerCell = 8 + pRtree->nDim2*4; + pRtree->eCoordType = (u8)eCoordType; + memcpy(pRtree->zDb, argv[1], nDb); + memcpy(pRtree->zName, argv[2], nName); + + /* Figure out the node size to use. */ + rc = getNodeSize(db, pRtree, isCreate, pzErr); + + /* Create/Connect to the underlying relational database schema. If + ** that is successful, call sqlite3_declare_vtab() to configure + ** the r-tree table schema. + */ + if( rc==SQLITE_OK ){ + if( (rc = rtreeSqlInit(pRtree, db, argv[1], argv[2], isCreate)) ){ + *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + }else{ + char *zSql = sqlite3_mprintf("CREATE TABLE x(%s", argv[3]); + char *zTmp; + int ii; + for(ii=4; zSql && iinBusy==1 ); + rtreeRelease(pRtree); + } + return rc; +} + + +/* +** Implementation of a scalar function that decodes r-tree nodes to +** human readable strings. This can be used for debugging and analysis. +** +** The scalar function takes two arguments: (1) the number of dimensions +** to the rtree (between 1 and 5, inclusive) and (2) a blob of data containing +** an r-tree node. For a two-dimensional r-tree structure called "rt", to +** deserialize all nodes, a statement like: +** +** SELECT rtreenode(2, data) FROM rt_node; +** +** The human readable string takes the form of a Tcl list with one +** entry for each cell in the r-tree node. Each entry is itself a +** list, containing the 8-byte rowid/pageno followed by the +** *2 coordinates. +*/ +static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ + char *zText = 0; + RtreeNode node; + Rtree tree; + int ii; + + UNUSED_PARAMETER(nArg); + memset(&node, 0, sizeof(RtreeNode)); + memset(&tree, 0, sizeof(Rtree)); + tree.nDim = (u8)sqlite3_value_int(apArg[0]); + tree.nDim2 = tree.nDim*2; + tree.nBytesPerCell = 8 + 8 * tree.nDim; + node.zData = (u8 *)sqlite3_value_blob(apArg[1]); + + for(ii=0; iixDestructor ) pInfo->xDestructor(pInfo->pContext); + sqlite3_free(p); +} + +/* +** This routine frees the BLOB that is returned by geomCallback(). +*/ +static void rtreeMatchArgFree(void *pArg){ + int i; + RtreeMatchArg *p = (RtreeMatchArg*)pArg; + for(i=0; inParam; i++){ + sqlite3_value_free(p->apSqlParam[i]); + } + sqlite3_free(p); +} + +/* +** Each call to sqlite3_rtree_geometry_callback() or +** sqlite3_rtree_query_callback() creates an ordinary SQLite +** scalar function that is implemented by this routine. +** +** All this function does is construct an RtreeMatchArg object that +** contains the geometry-checking callback routines and a list of +** parameters to this function, then return that RtreeMatchArg object +** as a BLOB. +** +** The R-Tree MATCH operator will read the returned BLOB, deserialize +** the RtreeMatchArg object, and use the RtreeMatchArg object to figure +** out which elements of the R-Tree should be returned by the query. +*/ +static void geomCallback(sqlite3_context *ctx, int nArg, sqlite3_value **aArg){ + RtreeGeomCallback *pGeomCtx = (RtreeGeomCallback *)sqlite3_user_data(ctx); + RtreeMatchArg *pBlob; + int nBlob; + int memErr = 0; + + nBlob = sizeof(RtreeMatchArg) + (nArg-1)*sizeof(RtreeDValue) + + nArg*sizeof(sqlite3_value*); + pBlob = (RtreeMatchArg *)sqlite3_malloc(nBlob); + if( !pBlob ){ + sqlite3_result_error_nomem(ctx); + }else{ + int i; + pBlob->magic = RTREE_GEOMETRY_MAGIC; + pBlob->cb = pGeomCtx[0]; + pBlob->apSqlParam = (sqlite3_value**)&pBlob->aParam[nArg]; + pBlob->nParam = nArg; + for(i=0; iapSqlParam[i] = sqlite3_value_dup(aArg[i]); + if( pBlob->apSqlParam[i]==0 ) memErr = 1; +#ifdef SQLITE_RTREE_INT_ONLY + pBlob->aParam[i] = sqlite3_value_int64(aArg[i]); +#else + pBlob->aParam[i] = sqlite3_value_double(aArg[i]); +#endif + } + if( memErr ){ + sqlite3_result_error_nomem(ctx); + rtreeMatchArgFree(pBlob); + }else{ + sqlite3_result_blob(ctx, pBlob, nBlob, rtreeMatchArgFree); + } + } +} + +/* +** Register a new geometry function for use with the r-tree MATCH operator. +*/ +SQLITE_API int sqlite3_rtree_geometry_callback( + sqlite3 *db, /* Register SQL function on this connection */ + const char *zGeom, /* Name of the new SQL function */ + int (*xGeom)(sqlite3_rtree_geometry*,int,RtreeDValue*,int*), /* Callback */ + void *pContext /* Extra data associated with the callback */ +){ + RtreeGeomCallback *pGeomCtx; /* Context object for new user-function */ + + /* Allocate and populate the context object. */ + pGeomCtx = (RtreeGeomCallback *)sqlite3_malloc(sizeof(RtreeGeomCallback)); + if( !pGeomCtx ) return SQLITE_NOMEM; + pGeomCtx->xGeom = xGeom; + pGeomCtx->xQueryFunc = 0; + pGeomCtx->xDestructor = 0; + pGeomCtx->pContext = pContext; + return sqlite3_create_function_v2(db, zGeom, -1, SQLITE_ANY, + (void *)pGeomCtx, geomCallback, 0, 0, rtreeFreeCallback + ); +} + +/* +** Register a new 2nd-generation geometry function for use with the +** r-tree MATCH operator. +*/ +SQLITE_API int sqlite3_rtree_query_callback( + sqlite3 *db, /* Register SQL function on this connection */ + const char *zQueryFunc, /* Name of new SQL function */ + int (*xQueryFunc)(sqlite3_rtree_query_info*), /* Callback */ + void *pContext, /* Extra data passed into the callback */ + void (*xDestructor)(void*) /* Destructor for the extra data */ +){ + RtreeGeomCallback *pGeomCtx; /* Context object for new user-function */ + + /* Allocate and populate the context object. */ + pGeomCtx = (RtreeGeomCallback *)sqlite3_malloc(sizeof(RtreeGeomCallback)); + if( !pGeomCtx ) return SQLITE_NOMEM; + pGeomCtx->xGeom = 0; + pGeomCtx->xQueryFunc = xQueryFunc; + pGeomCtx->xDestructor = xDestructor; + pGeomCtx->pContext = pContext; + return sqlite3_create_function_v2(db, zQueryFunc, -1, SQLITE_ANY, + (void *)pGeomCtx, geomCallback, 0, 0, rtreeFreeCallback + ); +} + +#if !SQLITE_CORE +#ifdef _WIN32 +__declspec(dllexport) +#endif +SQLITE_API int sqlite3_rtree_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3RtreeInit(db); +} +#endif + +#endif + +/************** End of rtree.c ***********************************************/ +/************** Begin file icu.c *********************************************/ +/* +** 2007 May 6 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** $Id: icu.c,v 1.7 2007/12/13 21:54:11 drh Exp $ +** +** This file implements an integration between the ICU library +** ("International Components for Unicode", an open-source library +** for handling unicode data) and SQLite. The integration uses +** ICU to provide the following to SQLite: +** +** * An implementation of the SQL regexp() function (and hence REGEXP +** operator) using the ICU uregex_XX() APIs. +** +** * Implementations of the SQL scalar upper() and lower() functions +** for case mapping. +** +** * Integration of ICU and SQLite collation sequences. +** +** * An implementation of the LIKE operator that uses ICU to +** provide case-independent matching. +*/ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ICU) + +/* Include ICU headers */ +#include +#include +#include +#include + +/* #include */ + +#ifndef SQLITE_CORE +/* #include "sqlite3ext.h" */ + SQLITE_EXTENSION_INIT1 +#else +/* #include "sqlite3.h" */ +#endif + +/* +** Maximum length (in bytes) of the pattern in a LIKE or GLOB +** operator. +*/ +#ifndef SQLITE_MAX_LIKE_PATTERN_LENGTH +# define SQLITE_MAX_LIKE_PATTERN_LENGTH 50000 +#endif + +/* +** Version of sqlite3_free() that is always a function, never a macro. +*/ +static void xFree(void *p){ + sqlite3_free(p); +} + +/* +** This lookup table is used to help decode the first byte of +** a multi-byte UTF8 character. It is copied here from SQLite source +** code file utf8.c. +*/ +static const unsigned char icuUtf8Trans1[] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00, +}; + +#define SQLITE_ICU_READ_UTF8(zIn, c) \ + c = *(zIn++); \ + if( c>=0xc0 ){ \ + c = icuUtf8Trans1[c-0xc0]; \ + while( (*zIn & 0xc0)==0x80 ){ \ + c = (c<<6) + (0x3f & *(zIn++)); \ + } \ + } + +#define SQLITE_ICU_SKIP_UTF8(zIn) \ + assert( *zIn ); \ + if( *(zIn++)>=0xc0 ){ \ + while( (*zIn & 0xc0)==0x80 ){zIn++;} \ + } + + +/* +** Compare two UTF-8 strings for equality where the first string is +** a "LIKE" expression. Return true (1) if they are the same and +** false (0) if they are different. +*/ +static int icuLikeCompare( + const uint8_t *zPattern, /* LIKE pattern */ + const uint8_t *zString, /* The UTF-8 string to compare against */ + const UChar32 uEsc /* The escape character */ +){ + static const int MATCH_ONE = (UChar32)'_'; + static const int MATCH_ALL = (UChar32)'%'; + + int prevEscape = 0; /* True if the previous character was uEsc */ + + while( 1 ){ + + /* Read (and consume) the next character from the input pattern. */ + UChar32 uPattern; + SQLITE_ICU_READ_UTF8(zPattern, uPattern); + if( uPattern==0 ) break; + + /* There are now 4 possibilities: + ** + ** 1. uPattern is an unescaped match-all character "%", + ** 2. uPattern is an unescaped match-one character "_", + ** 3. uPattern is an unescaped escape character, or + ** 4. uPattern is to be handled as an ordinary character + */ + if( !prevEscape && uPattern==MATCH_ALL ){ + /* Case 1. */ + uint8_t c; + + /* Skip any MATCH_ALL or MATCH_ONE characters that follow a + ** MATCH_ALL. For each MATCH_ONE, skip one character in the + ** test string. + */ + while( (c=*zPattern) == MATCH_ALL || c == MATCH_ONE ){ + if( c==MATCH_ONE ){ + if( *zString==0 ) return 0; + SQLITE_ICU_SKIP_UTF8(zString); + } + zPattern++; + } + + if( *zPattern==0 ) return 1; + + while( *zString ){ + if( icuLikeCompare(zPattern, zString, uEsc) ){ + return 1; + } + SQLITE_ICU_SKIP_UTF8(zString); + } + return 0; + + }else if( !prevEscape && uPattern==MATCH_ONE ){ + /* Case 2. */ + if( *zString==0 ) return 0; + SQLITE_ICU_SKIP_UTF8(zString); + + }else if( !prevEscape && uPattern==uEsc){ + /* Case 3. */ + prevEscape = 1; + + }else{ + /* Case 4. */ + UChar32 uString; + SQLITE_ICU_READ_UTF8(zString, uString); + uString = u_foldCase(uString, U_FOLD_CASE_DEFAULT); + uPattern = u_foldCase(uPattern, U_FOLD_CASE_DEFAULT); + if( uString!=uPattern ){ + return 0; + } + prevEscape = 0; + } + } + + return *zString==0; +} + +/* +** Implementation of the like() SQL function. This function implements +** the build-in LIKE operator. The first argument to the function is the +** pattern and the second argument is the string. So, the SQL statements: +** +** A LIKE B +** +** is implemented as like(B, A). If there is an escape character E, +** +** A LIKE B ESCAPE E +** +** is mapped to like(B, A, E). +*/ +static void icuLikeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const unsigned char *zA = sqlite3_value_text(argv[0]); + const unsigned char *zB = sqlite3_value_text(argv[1]); + UChar32 uEsc = 0; + + /* Limit the length of the LIKE or GLOB pattern to avoid problems + ** of deep recursion and N*N behavior in patternCompare(). + */ + if( sqlite3_value_bytes(argv[0])>SQLITE_MAX_LIKE_PATTERN_LENGTH ){ + sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1); + return; + } + + + if( argc==3 ){ + /* The escape character string must consist of a single UTF-8 character. + ** Otherwise, return an error. + */ + int nE= sqlite3_value_bytes(argv[2]); + const unsigned char *zE = sqlite3_value_text(argv[2]); + int i = 0; + if( zE==0 ) return; + U8_NEXT(zE, i, nE, uEsc); + if( i!=nE){ + sqlite3_result_error(context, + "ESCAPE expression must be a single character", -1); + return; + } + } + + if( zA && zB ){ + sqlite3_result_int(context, icuLikeCompare(zA, zB, uEsc)); + } +} + +/* +** This function is called when an ICU function called from within +** the implementation of an SQL scalar function returns an error. +** +** The scalar function context passed as the first argument is +** loaded with an error message based on the following two args. +*/ +static void icuFunctionError( + sqlite3_context *pCtx, /* SQLite scalar function context */ + const char *zName, /* Name of ICU function that failed */ + UErrorCode e /* Error code returned by ICU function */ +){ + char zBuf[128]; + sqlite3_snprintf(128, zBuf, "ICU error: %s(): %s", zName, u_errorName(e)); + zBuf[127] = '\0'; + sqlite3_result_error(pCtx, zBuf, -1); +} + +/* +** Function to delete compiled regexp objects. Registered as +** a destructor function with sqlite3_set_auxdata(). +*/ +static void icuRegexpDelete(void *p){ + URegularExpression *pExpr = (URegularExpression *)p; + uregex_close(pExpr); +} + +/* +** Implementation of SQLite REGEXP operator. This scalar function takes +** two arguments. The first is a regular expression pattern to compile +** the second is a string to match against that pattern. If either +** argument is an SQL NULL, then NULL Is returned. Otherwise, the result +** is 1 if the string matches the pattern, or 0 otherwise. +** +** SQLite maps the regexp() function to the regexp() operator such +** that the following two are equivalent: +** +** zString REGEXP zPattern +** regexp(zPattern, zString) +** +** Uses the following ICU regexp APIs: +** +** uregex_open() +** uregex_matches() +** uregex_close() +*/ +static void icuRegexpFunc(sqlite3_context *p, int nArg, sqlite3_value **apArg){ + UErrorCode status = U_ZERO_ERROR; + URegularExpression *pExpr; + UBool res; + const UChar *zString = sqlite3_value_text16(apArg[1]); + + (void)nArg; /* Unused parameter */ + + /* If the left hand side of the regexp operator is NULL, + ** then the result is also NULL. + */ + if( !zString ){ + return; + } + + pExpr = sqlite3_get_auxdata(p, 0); + if( !pExpr ){ + const UChar *zPattern = sqlite3_value_text16(apArg[0]); + if( !zPattern ){ + return; + } + pExpr = uregex_open(zPattern, -1, 0, 0, &status); + + if( U_SUCCESS(status) ){ + sqlite3_set_auxdata(p, 0, pExpr, icuRegexpDelete); + }else{ + assert(!pExpr); + icuFunctionError(p, "uregex_open", status); + return; + } + } + + /* Configure the text that the regular expression operates on. */ + uregex_setText(pExpr, zString, -1, &status); + if( !U_SUCCESS(status) ){ + icuFunctionError(p, "uregex_setText", status); + return; + } + + /* Attempt the match */ + res = uregex_matches(pExpr, 0, &status); + if( !U_SUCCESS(status) ){ + icuFunctionError(p, "uregex_matches", status); + return; + } + + /* Set the text that the regular expression operates on to a NULL + ** pointer. This is not really necessary, but it is tidier than + ** leaving the regular expression object configured with an invalid + ** pointer after this function returns. + */ + uregex_setText(pExpr, 0, 0, &status); + + /* Return 1 or 0. */ + sqlite3_result_int(p, res ? 1 : 0); +} + +/* +** Implementations of scalar functions for case mapping - upper() and +** lower(). Function upper() converts its input to upper-case (ABC). +** Function lower() converts to lower-case (abc). +** +** ICU provides two types of case mapping, "general" case mapping and +** "language specific". Refer to ICU documentation for the differences +** between the two. +** +** To utilise "general" case mapping, the upper() or lower() scalar +** functions are invoked with one argument: +** +** upper('ABC') -> 'abc' +** lower('abc') -> 'ABC' +** +** To access ICU "language specific" case mapping, upper() or lower() +** should be invoked with two arguments. The second argument is the name +** of the locale to use. Passing an empty string ("") or SQL NULL value +** as the second argument is the same as invoking the 1 argument version +** of upper() or lower(). +** +** lower('I', 'en_us') -> 'i' +** lower('I', 'tr_tr') -> '\u131' (small dotless i) +** +** http://www.icu-project.org/userguide/posix.html#case_mappings +*/ +static void icuCaseFunc16(sqlite3_context *p, int nArg, sqlite3_value **apArg){ + const UChar *zInput; /* Pointer to input string */ + UChar *zOutput = 0; /* Pointer to output buffer */ + int nInput; /* Size of utf-16 input string in bytes */ + int nOut; /* Size of output buffer in bytes */ + int cnt; + int bToUpper; /* True for toupper(), false for tolower() */ + UErrorCode status; + const char *zLocale = 0; + + assert(nArg==1 || nArg==2); + bToUpper = (sqlite3_user_data(p)!=0); + if( nArg==2 ){ + zLocale = (const char *)sqlite3_value_text(apArg[1]); + } + + zInput = sqlite3_value_text16(apArg[0]); + if( !zInput ){ + return; + } + nOut = nInput = sqlite3_value_bytes16(apArg[0]); + if( nOut==0 ){ + sqlite3_result_text16(p, "", 0, SQLITE_STATIC); + return; + } + + for(cnt=0; cnt<2; cnt++){ + UChar *zNew = sqlite3_realloc(zOutput, nOut); + if( zNew==0 ){ + sqlite3_free(zOutput); + sqlite3_result_error_nomem(p); + return; + } + zOutput = zNew; + status = U_ZERO_ERROR; + if( bToUpper ){ + nOut = 2*u_strToUpper(zOutput,nOut/2,zInput,nInput/2,zLocale,&status); + }else{ + nOut = 2*u_strToLower(zOutput,nOut/2,zInput,nInput/2,zLocale,&status); + } + + if( U_SUCCESS(status) ){ + sqlite3_result_text16(p, zOutput, nOut, xFree); + }else if( status==U_BUFFER_OVERFLOW_ERROR ){ + assert( cnt==0 ); + continue; + }else{ + icuFunctionError(p, bToUpper ? "u_strToUpper" : "u_strToLower", status); + } + return; + } + assert( 0 ); /* Unreachable */ +} + +/* +** Collation sequence destructor function. The pCtx argument points to +** a UCollator structure previously allocated using ucol_open(). +*/ +static void icuCollationDel(void *pCtx){ + UCollator *p = (UCollator *)pCtx; + ucol_close(p); +} + +/* +** Collation sequence comparison function. The pCtx argument points to +** a UCollator structure previously allocated using ucol_open(). +*/ +static int icuCollationColl( + void *pCtx, + int nLeft, + const void *zLeft, + int nRight, + const void *zRight +){ + UCollationResult res; + UCollator *p = (UCollator *)pCtx; + res = ucol_strcoll(p, (UChar *)zLeft, nLeft/2, (UChar *)zRight, nRight/2); + switch( res ){ + case UCOL_LESS: return -1; + case UCOL_GREATER: return +1; + case UCOL_EQUAL: return 0; + } + assert(!"Unexpected return value from ucol_strcoll()"); + return 0; +} + +/* +** Implementation of the scalar function icu_load_collation(). +** +** This scalar function is used to add ICU collation based collation +** types to an SQLite database connection. It is intended to be called +** as follows: +** +** SELECT icu_load_collation(, ); +** +** Where is a string containing an ICU locale identifier (i.e. +** "en_AU", "tr_TR" etc.) and is the name of the +** collation sequence to create. +*/ +static void icuLoadCollation( + sqlite3_context *p, + int nArg, + sqlite3_value **apArg +){ + sqlite3 *db = (sqlite3 *)sqlite3_user_data(p); + UErrorCode status = U_ZERO_ERROR; + const char *zLocale; /* Locale identifier - (eg. "jp_JP") */ + const char *zName; /* SQL Collation sequence name (eg. "japanese") */ + UCollator *pUCollator; /* ICU library collation object */ + int rc; /* Return code from sqlite3_create_collation_x() */ + + assert(nArg==2); + (void)nArg; /* Unused parameter */ + zLocale = (const char *)sqlite3_value_text(apArg[0]); + zName = (const char *)sqlite3_value_text(apArg[1]); + + if( !zLocale || !zName ){ + return; + } + + pUCollator = ucol_open(zLocale, &status); + if( !U_SUCCESS(status) ){ + icuFunctionError(p, "ucol_open", status); + return; + } + assert(p); + + rc = sqlite3_create_collation_v2(db, zName, SQLITE_UTF16, (void *)pUCollator, + icuCollationColl, icuCollationDel + ); + if( rc!=SQLITE_OK ){ + ucol_close(pUCollator); + sqlite3_result_error(p, "Error registering collation function", -1); + } +} + +/* +** Register the ICU extension functions with database db. +*/ +SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db){ + static const struct IcuScalar { + const char *zName; /* Function name */ + unsigned char nArg; /* Number of arguments */ + unsigned short enc; /* Optimal text encoding */ + unsigned char iContext; /* sqlite3_user_data() context */ + void (*xFunc)(sqlite3_context*,int,sqlite3_value**); + } scalars[] = { + {"icu_load_collation", 2, SQLITE_UTF8, 1, icuLoadCollation}, + {"regexp", 2, SQLITE_ANY|SQLITE_DETERMINISTIC, 0, icuRegexpFunc}, + {"lower", 1, SQLITE_UTF16|SQLITE_DETERMINISTIC, 0, icuCaseFunc16}, + {"lower", 2, SQLITE_UTF16|SQLITE_DETERMINISTIC, 0, icuCaseFunc16}, + {"upper", 1, SQLITE_UTF16|SQLITE_DETERMINISTIC, 1, icuCaseFunc16}, + {"upper", 2, SQLITE_UTF16|SQLITE_DETERMINISTIC, 1, icuCaseFunc16}, + {"lower", 1, SQLITE_UTF8|SQLITE_DETERMINISTIC, 0, icuCaseFunc16}, + {"lower", 2, SQLITE_UTF8|SQLITE_DETERMINISTIC, 0, icuCaseFunc16}, + {"upper", 1, SQLITE_UTF8|SQLITE_DETERMINISTIC, 1, icuCaseFunc16}, + {"upper", 2, SQLITE_UTF8|SQLITE_DETERMINISTIC, 1, icuCaseFunc16}, + {"like", 2, SQLITE_UTF8|SQLITE_DETERMINISTIC, 0, icuLikeFunc}, + {"like", 3, SQLITE_UTF8|SQLITE_DETERMINISTIC, 0, icuLikeFunc}, + }; + int rc = SQLITE_OK; + int i; + + + for(i=0; rc==SQLITE_OK && i<(int)(sizeof(scalars)/sizeof(scalars[0])); i++){ + const struct IcuScalar *p = &scalars[i]; + rc = sqlite3_create_function( + db, p->zName, p->nArg, p->enc, + p->iContext ? (void*)db : (void*)0, + p->xFunc, 0, 0 + ); + } + + return rc; +} + +#if !SQLITE_CORE +#ifdef _WIN32 +__declspec(dllexport) +#endif +SQLITE_API int sqlite3_icu_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + SQLITE_EXTENSION_INIT2(pApi) + return sqlite3IcuInit(db); +} +#endif + +#endif + +/************** End of icu.c *************************************************/ +/************** Begin file fts3_icu.c ****************************************/ +/* +** 2007 June 22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements a tokenizer for fts3 based on the ICU library. +*/ +/* #include "fts3Int.h" */ +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) +#ifdef SQLITE_ENABLE_ICU + +/* #include */ +/* #include */ +/* #include "fts3_tokenizer.h" */ + +#include +/* #include */ +/* #include */ +#include + +typedef struct IcuTokenizer IcuTokenizer; +typedef struct IcuCursor IcuCursor; + +struct IcuTokenizer { + sqlite3_tokenizer base; + char *zLocale; +}; + +struct IcuCursor { + sqlite3_tokenizer_cursor base; + + UBreakIterator *pIter; /* ICU break-iterator object */ + int nChar; /* Number of UChar elements in pInput */ + UChar *aChar; /* Copy of input using utf-16 encoding */ + int *aOffset; /* Offsets of each character in utf-8 input */ + + int nBuffer; + char *zBuffer; + + int iToken; +}; + +/* +** Create a new tokenizer instance. +*/ +static int icuCreate( + int argc, /* Number of entries in argv[] */ + const char * const *argv, /* Tokenizer creation arguments */ + sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ +){ + IcuTokenizer *p; + int n = 0; + + if( argc>0 ){ + n = strlen(argv[0])+1; + } + p = (IcuTokenizer *)sqlite3_malloc(sizeof(IcuTokenizer)+n); + if( !p ){ + return SQLITE_NOMEM; + } + memset(p, 0, sizeof(IcuTokenizer)); + + if( n ){ + p->zLocale = (char *)&p[1]; + memcpy(p->zLocale, argv[0], n); + } + + *ppTokenizer = (sqlite3_tokenizer *)p; + + return SQLITE_OK; +} + +/* +** Destroy a tokenizer +*/ +static int icuDestroy(sqlite3_tokenizer *pTokenizer){ + IcuTokenizer *p = (IcuTokenizer *)pTokenizer; + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Prepare to begin tokenizing a particular string. The input +** string to be tokenized is pInput[0..nBytes-1]. A cursor +** used to incrementally tokenize this string is returned in +** *ppCursor. +*/ +static int icuOpen( + sqlite3_tokenizer *pTokenizer, /* The tokenizer */ + const char *zInput, /* Input string */ + int nInput, /* Length of zInput in bytes */ + sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ +){ + IcuTokenizer *p = (IcuTokenizer *)pTokenizer; + IcuCursor *pCsr; + + const int32_t opt = U_FOLD_CASE_DEFAULT; + UErrorCode status = U_ZERO_ERROR; + int nChar; + + UChar32 c; + int iInput = 0; + int iOut = 0; + + *ppCursor = 0; + + if( zInput==0 ){ + nInput = 0; + zInput = ""; + }else if( nInput<0 ){ + nInput = strlen(zInput); + } + nChar = nInput+1; + pCsr = (IcuCursor *)sqlite3_malloc( + sizeof(IcuCursor) + /* IcuCursor */ + ((nChar+3)&~3) * sizeof(UChar) + /* IcuCursor.aChar[] */ + (nChar+1) * sizeof(int) /* IcuCursor.aOffset[] */ + ); + if( !pCsr ){ + return SQLITE_NOMEM; + } + memset(pCsr, 0, sizeof(IcuCursor)); + pCsr->aChar = (UChar *)&pCsr[1]; + pCsr->aOffset = (int *)&pCsr->aChar[(nChar+3)&~3]; + + pCsr->aOffset[iOut] = iInput; + U8_NEXT(zInput, iInput, nInput, c); + while( c>0 ){ + int isError = 0; + c = u_foldCase(c, opt); + U16_APPEND(pCsr->aChar, iOut, nChar, c, isError); + if( isError ){ + sqlite3_free(pCsr); + return SQLITE_ERROR; + } + pCsr->aOffset[iOut] = iInput; + + if( iInputpIter = ubrk_open(UBRK_WORD, p->zLocale, pCsr->aChar, iOut, &status); + if( !U_SUCCESS(status) ){ + sqlite3_free(pCsr); + return SQLITE_ERROR; + } + pCsr->nChar = iOut; + + ubrk_first(pCsr->pIter); + *ppCursor = (sqlite3_tokenizer_cursor *)pCsr; + return SQLITE_OK; +} + +/* +** Close a tokenization cursor previously opened by a call to icuOpen(). +*/ +static int icuClose(sqlite3_tokenizer_cursor *pCursor){ + IcuCursor *pCsr = (IcuCursor *)pCursor; + ubrk_close(pCsr->pIter); + sqlite3_free(pCsr->zBuffer); + sqlite3_free(pCsr); + return SQLITE_OK; +} + +/* +** Extract the next token from a tokenization cursor. +*/ +static int icuNext( + sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ + const char **ppToken, /* OUT: *ppToken is the token text */ + int *pnBytes, /* OUT: Number of bytes in token */ + int *piStartOffset, /* OUT: Starting offset of token */ + int *piEndOffset, /* OUT: Ending offset of token */ + int *piPosition /* OUT: Position integer of token */ +){ + IcuCursor *pCsr = (IcuCursor *)pCursor; + + int iStart = 0; + int iEnd = 0; + int nByte = 0; + + while( iStart==iEnd ){ + UChar32 c; + + iStart = ubrk_current(pCsr->pIter); + iEnd = ubrk_next(pCsr->pIter); + if( iEnd==UBRK_DONE ){ + return SQLITE_DONE; + } + + while( iStartaChar, iWhite, pCsr->nChar, c); + if( u_isspace(c) ){ + iStart = iWhite; + }else{ + break; + } + } + assert(iStart<=iEnd); + } + + do { + UErrorCode status = U_ZERO_ERROR; + if( nByte ){ + char *zNew = sqlite3_realloc(pCsr->zBuffer, nByte); + if( !zNew ){ + return SQLITE_NOMEM; + } + pCsr->zBuffer = zNew; + pCsr->nBuffer = nByte; + } + + u_strToUTF8( + pCsr->zBuffer, pCsr->nBuffer, &nByte, /* Output vars */ + &pCsr->aChar[iStart], iEnd-iStart, /* Input vars */ + &status /* Output success/failure */ + ); + } while( nByte>pCsr->nBuffer ); + + *ppToken = pCsr->zBuffer; + *pnBytes = nByte; + *piStartOffset = pCsr->aOffset[iStart]; + *piEndOffset = pCsr->aOffset[iEnd]; + *piPosition = pCsr->iToken++; + + return SQLITE_OK; +} + +/* +** The set of routines that implement the simple tokenizer +*/ +static const sqlite3_tokenizer_module icuTokenizerModule = { + 0, /* iVersion */ + icuCreate, /* xCreate */ + icuDestroy, /* xCreate */ + icuOpen, /* xOpen */ + icuClose, /* xClose */ + icuNext, /* xNext */ + 0, /* xLanguageid */ +}; + +/* +** Set *ppModule to point at the implementation of the ICU tokenizer. +*/ +SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule( + sqlite3_tokenizer_module const**ppModule +){ + *ppModule = &icuTokenizerModule; +} + +#endif /* defined(SQLITE_ENABLE_ICU) */ +#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ + +/************** End of fts3_icu.c ********************************************/ +/************** Begin file sqlite3rbu.c **************************************/ +/* +** 2014 August 30 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** +** OVERVIEW +** +** The RBU extension requires that the RBU update be packaged as an +** SQLite database. The tables it expects to find are described in +** sqlite3rbu.h. Essentially, for each table xyz in the target database +** that the user wishes to write to, a corresponding data_xyz table is +** created in the RBU database and populated with one row for each row to +** update, insert or delete from the target table. +** +** The update proceeds in three stages: +** +** 1) The database is updated. The modified database pages are written +** to a *-oal file. A *-oal file is just like a *-wal file, except +** that it is named "-oal" instead of "-wal". +** Because regular SQLite clients do not look for file named +** "-oal", they go on using the original database in +** rollback mode while the *-oal file is being generated. +** +** During this stage RBU does not update the database by writing +** directly to the target tables. Instead it creates "imposter" +** tables using the SQLITE_TESTCTRL_IMPOSTER interface that it uses +** to update each b-tree individually. All updates required by each +** b-tree are completed before moving on to the next, and all +** updates are done in sorted key order. +** +** 2) The "-oal" file is moved to the equivalent "-wal" +** location using a call to rename(2). Before doing this the RBU +** module takes an EXCLUSIVE lock on the database file, ensuring +** that there are no other active readers. +** +** Once the EXCLUSIVE lock is released, any other database readers +** detect the new *-wal file and read the database in wal mode. At +** this point they see the new version of the database - including +** the updates made as part of the RBU update. +** +** 3) The new *-wal file is checkpointed. This proceeds in the same way +** as a regular database checkpoint, except that a single frame is +** checkpointed each time sqlite3rbu_step() is called. If the RBU +** handle is closed before the entire *-wal file is checkpointed, +** the checkpoint progress is saved in the RBU database and the +** checkpoint can be resumed by another RBU client at some point in +** the future. +** +** POTENTIAL PROBLEMS +** +** The rename() call might not be portable. And RBU is not currently +** syncing the directory after renaming the file. +** +** When state is saved, any commit to the *-oal file and the commit to +** the RBU update database are not atomic. So if the power fails at the +** wrong moment they might get out of sync. As the main database will be +** committed before the RBU update database this will likely either just +** pass unnoticed, or result in SQLITE_CONSTRAINT errors (due to UNIQUE +** constraint violations). +** +** If some client does modify the target database mid RBU update, or some +** other error occurs, the RBU extension will keep throwing errors. It's +** not really clear how to get out of this state. The system could just +** by delete the RBU update database and *-oal file and have the device +** download the update again and start over. +** +** At present, for an UPDATE, both the new.* and old.* records are +** collected in the rbu_xyz table. And for both UPDATEs and DELETEs all +** fields are collected. This means we're probably writing a lot more +** data to disk when saving the state of an ongoing update to the RBU +** update database than is strictly necessary. +** +*/ + +/* #include */ +/* #include */ +/* #include */ + +/* #include "sqlite3.h" */ + +#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_RBU) +/************** Include sqlite3rbu.h in the middle of sqlite3rbu.c ***********/ +/************** Begin file sqlite3rbu.h **************************************/ +/* +** 2014 August 30 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file contains the public interface for the RBU extension. +*/ + +/* +** SUMMARY +** +** Writing a transaction containing a large number of operations on +** b-tree indexes that are collectively larger than the available cache +** memory can be very inefficient. +** +** The problem is that in order to update a b-tree, the leaf page (at least) +** containing the entry being inserted or deleted must be modified. If the +** working set of leaves is larger than the available cache memory, then a +** single leaf that is modified more than once as part of the transaction +** may be loaded from or written to the persistent media multiple times. +** Additionally, because the index updates are likely to be applied in +** random order, access to pages within the database is also likely to be in +** random order, which is itself quite inefficient. +** +** One way to improve the situation is to sort the operations on each index +** by index key before applying them to the b-tree. This leads to an IO +** pattern that resembles a single linear scan through the index b-tree, +** and all but guarantees each modified leaf page is loaded and stored +** exactly once. SQLite uses this trick to improve the performance of +** CREATE INDEX commands. This extension allows it to be used to improve +** the performance of large transactions on existing databases. +** +** Additionally, this extension allows the work involved in writing the +** large transaction to be broken down into sub-transactions performed +** sequentially by separate processes. This is useful if the system cannot +** guarantee that a single update process will run for long enough to apply +** the entire update, for example because the update is being applied on a +** mobile device that is frequently rebooted. Even after the writer process +** has committed one or more sub-transactions, other database clients continue +** to read from the original database snapshot. In other words, partially +** applied transactions are not visible to other clients. +** +** "RBU" stands for "Resumable Bulk Update". As in a large database update +** transmitted via a wireless network to a mobile device. A transaction +** applied using this extension is hence refered to as an "RBU update". +** +** +** LIMITATIONS +** +** An "RBU update" transaction is subject to the following limitations: +** +** * The transaction must consist of INSERT, UPDATE and DELETE operations +** only. +** +** * INSERT statements may not use any default values. +** +** * UPDATE and DELETE statements must identify their target rows by +** non-NULL PRIMARY KEY values. Rows with NULL values stored in PRIMARY +** KEY fields may not be updated or deleted. If the table being written +** has no PRIMARY KEY, affected rows must be identified by rowid. +** +** * UPDATE statements may not modify PRIMARY KEY columns. +** +** * No triggers will be fired. +** +** * No foreign key violations are detected or reported. +** +** * CHECK constraints are not enforced. +** +** * No constraint handling mode except for "OR ROLLBACK" is supported. +** +** +** PREPARATION +** +** An "RBU update" is stored as a separate SQLite database. A database +** containing an RBU update is an "RBU database". For each table in the +** target database to be updated, the RBU database should contain a table +** named "data_" containing the same set of columns as the +** target table, and one more - "rbu_control". The data_% table should +** have no PRIMARY KEY or UNIQUE constraints, but each column should have +** the same type as the corresponding column in the target database. +** The "rbu_control" column should have no type at all. For example, if +** the target database contains: +** +** CREATE TABLE t1(a INTEGER PRIMARY KEY, b TEXT, c UNIQUE); +** +** Then the RBU database should contain: +** +** CREATE TABLE data_t1(a INTEGER, b TEXT, c, rbu_control); +** +** The order of the columns in the data_% table does not matter. +** +** Instead of a regular table, the RBU database may also contain virtual +** tables or view named using the data_ naming scheme. +** +** Instead of the plain data_ naming scheme, RBU database tables +** may also be named data_, where is any sequence +** of zero or more numeric characters (0-9). This can be significant because +** tables within the RBU database are always processed in order sorted by +** name. By judicious selection of the portion of the names +** of the RBU tables the user can therefore control the order in which they +** are processed. This can be useful, for example, to ensure that "external +** content" FTS4 tables are updated before their underlying content tables. +** +** If the target database table is a virtual table or a table that has no +** PRIMARY KEY declaration, the data_% table must also contain a column +** named "rbu_rowid". This column is mapped to the tables implicit primary +** key column - "rowid". Virtual tables for which the "rowid" column does +** not function like a primary key value cannot be updated using RBU. For +** example, if the target db contains either of the following: +** +** CREATE VIRTUAL TABLE x1 USING fts3(a, b); +** CREATE TABLE x1(a, b) +** +** then the RBU database should contain: +** +** CREATE TABLE data_x1(a, b, rbu_rowid, rbu_control); +** +** All non-hidden columns (i.e. all columns matched by "SELECT *") of the +** target table must be present in the input table. For virtual tables, +** hidden columns are optional - they are updated by RBU if present in +** the input table, or not otherwise. For example, to write to an fts4 +** table with a hidden languageid column such as: +** +** CREATE VIRTUAL TABLE ft1 USING fts4(a, b, languageid='langid'); +** +** Either of the following input table schemas may be used: +** +** CREATE TABLE data_ft1(a, b, langid, rbu_rowid, rbu_control); +** CREATE TABLE data_ft1(a, b, rbu_rowid, rbu_control); +** +** For each row to INSERT into the target database as part of the RBU +** update, the corresponding data_% table should contain a single record +** with the "rbu_control" column set to contain integer value 0. The +** other columns should be set to the values that make up the new record +** to insert. +** +** If the target database table has an INTEGER PRIMARY KEY, it is not +** possible to insert a NULL value into the IPK column. Attempting to +** do so results in an SQLITE_MISMATCH error. +** +** For each row to DELETE from the target database as part of the RBU +** update, the corresponding data_% table should contain a single record +** with the "rbu_control" column set to contain integer value 1. The +** real primary key values of the row to delete should be stored in the +** corresponding columns of the data_% table. The values stored in the +** other columns are not used. +** +** For each row to UPDATE from the target database as part of the RBU +** update, the corresponding data_% table should contain a single record +** with the "rbu_control" column set to contain a value of type text. +** The real primary key values identifying the row to update should be +** stored in the corresponding columns of the data_% table row, as should +** the new values of all columns being update. The text value in the +** "rbu_control" column must contain the same number of characters as +** there are columns in the target database table, and must consist entirely +** of 'x' and '.' characters (or in some special cases 'd' - see below). For +** each column that is being updated, the corresponding character is set to +** 'x'. For those that remain as they are, the corresponding character of the +** rbu_control value should be set to '.'. For example, given the tables +** above, the update statement: +** +** UPDATE t1 SET c = 'usa' WHERE a = 4; +** +** is represented by the data_t1 row created by: +** +** INSERT INTO data_t1(a, b, c, rbu_control) VALUES(4, NULL, 'usa', '..x'); +** +** Instead of an 'x' character, characters of the rbu_control value specified +** for UPDATEs may also be set to 'd'. In this case, instead of updating the +** target table with the value stored in the corresponding data_% column, the +** user-defined SQL function "rbu_delta()" is invoked and the result stored in +** the target table column. rbu_delta() is invoked with two arguments - the +** original value currently stored in the target table column and the +** value specified in the data_xxx table. +** +** For example, this row: +** +** INSERT INTO data_t1(a, b, c, rbu_control) VALUES(4, NULL, 'usa', '..d'); +** +** is similar to an UPDATE statement such as: +** +** UPDATE t1 SET c = rbu_delta(c, 'usa') WHERE a = 4; +** +** Finally, if an 'f' character appears in place of a 'd' or 's' in an +** ota_control string, the contents of the data_xxx table column is assumed +** to be a "fossil delta" - a patch to be applied to a blob value in the +** format used by the fossil source-code management system. In this case +** the existing value within the target database table must be of type BLOB. +** It is replaced by the result of applying the specified fossil delta to +** itself. +** +** If the target database table is a virtual table or a table with no PRIMARY +** KEY, the rbu_control value should not include a character corresponding +** to the rbu_rowid value. For example, this: +** +** INSERT INTO data_ft1(a, b, rbu_rowid, rbu_control) +** VALUES(NULL, 'usa', 12, '.x'); +** +** causes a result similar to: +** +** UPDATE ft1 SET b = 'usa' WHERE rowid = 12; +** +** The data_xxx tables themselves should have no PRIMARY KEY declarations. +** However, RBU is more efficient if reading the rows in from each data_xxx +** table in "rowid" order is roughly the same as reading them sorted by +** the PRIMARY KEY of the corresponding target database table. In other +** words, rows should be sorted using the destination table PRIMARY KEY +** fields before they are inserted into the data_xxx tables. +** +** USAGE +** +** The API declared below allows an application to apply an RBU update +** stored on disk to an existing target database. Essentially, the +** application: +** +** 1) Opens an RBU handle using the sqlite3rbu_open() function. +** +** 2) Registers any required virtual table modules with the database +** handle returned by sqlite3rbu_db(). Also, if required, register +** the rbu_delta() implementation. +** +** 3) Calls the sqlite3rbu_step() function one or more times on +** the new handle. Each call to sqlite3rbu_step() performs a single +** b-tree operation, so thousands of calls may be required to apply +** a complete update. +** +** 4) Calls sqlite3rbu_close() to close the RBU update handle. If +** sqlite3rbu_step() has been called enough times to completely +** apply the update to the target database, then the RBU database +** is marked as fully applied. Otherwise, the state of the RBU +** update application is saved in the RBU database for later +** resumption. +** +** See comments below for more detail on APIs. +** +** If an update is only partially applied to the target database by the +** time sqlite3rbu_close() is called, various state information is saved +** within the RBU database. This allows subsequent processes to automatically +** resume the RBU update from where it left off. +** +** To remove all RBU extension state information, returning an RBU database +** to its original contents, it is sufficient to drop all tables that begin +** with the prefix "rbu_" +** +** DATABASE LOCKING +** +** An RBU update may not be applied to a database in WAL mode. Attempting +** to do so is an error (SQLITE_ERROR). +** +** While an RBU handle is open, a SHARED lock may be held on the target +** database file. This means it is possible for other clients to read the +** database, but not to write it. +** +** If an RBU update is started and then suspended before it is completed, +** then an external client writes to the database, then attempting to resume +** the suspended RBU update is also an error (SQLITE_BUSY). +*/ + +#ifndef _SQLITE3RBU_H +#define _SQLITE3RBU_H + +/* #include "sqlite3.h" ** Required for error code definitions ** */ + +#if 0 +extern "C" { +#endif + +typedef struct sqlite3rbu sqlite3rbu; + +/* +** Open an RBU handle. +** +** Argument zTarget is the path to the target database. Argument zRbu is +** the path to the RBU database. Each call to this function must be matched +** by a call to sqlite3rbu_close(). When opening the databases, RBU passes +** the SQLITE_CONFIG_URI flag to sqlite3_open_v2(). So if either zTarget +** or zRbu begin with "file:", it will be interpreted as an SQLite +** database URI, not a regular file name. +** +** If the zState argument is passed a NULL value, the RBU extension stores +** the current state of the update (how many rows have been updated, which +** indexes are yet to be updated etc.) within the RBU database itself. This +** can be convenient, as it means that the RBU application does not need to +** organize removing a separate state file after the update is concluded. +** Or, if zState is non-NULL, it must be a path to a database file in which +** the RBU extension can store the state of the update. +** +** When resuming an RBU update, the zState argument must be passed the same +** value as when the RBU update was started. +** +** Once the RBU update is finished, the RBU extension does not +** automatically remove any zState database file, even if it created it. +** +** By default, RBU uses the default VFS to access the files on disk. To +** use a VFS other than the default, an SQLite "file:" URI containing a +** "vfs=..." option may be passed as the zTarget option. +** +** IMPORTANT NOTE FOR ZIPVFS USERS: The RBU extension works with all of +** SQLite's built-in VFSs, including the multiplexor VFS. However it does +** not work out of the box with zipvfs. Refer to the comment describing +** the zipvfs_create_vfs() API below for details on using RBU with zipvfs. +*/ +SQLITE_API sqlite3rbu *sqlite3rbu_open( + const char *zTarget, + const char *zRbu, + const char *zState +); + +/* +** Open an RBU handle to perform an RBU vacuum on database file zTarget. +** An RBU vacuum is similar to SQLite's built-in VACUUM command, except +** that it can be suspended and resumed like an RBU update. +** +** The second argument to this function identifies a database in which +** to store the state of the RBU vacuum operation if it is suspended. The +** first time sqlite3rbu_vacuum() is called, to start an RBU vacuum +** operation, the state database should either not exist or be empty +** (contain no tables). If an RBU vacuum is suspended by calling +** sqlite3rbu_close() on the RBU handle before sqlite3rbu_step() has +** returned SQLITE_DONE, the vacuum state is stored in the state database. +** The vacuum can be resumed by calling this function to open a new RBU +** handle specifying the same target and state databases. +** +** If the second argument passed to this function is NULL, then the +** name of the state database is "-vacuum", where +** is the name of the target database file. In this case, on UNIX, if the +** state database is not already present in the file-system, it is created +** with the same permissions as the target db is made. +** +** This function does not delete the state database after an RBU vacuum +** is completed, even if it created it. However, if the call to +** sqlite3rbu_close() returns any value other than SQLITE_OK, the contents +** of the state tables within the state database are zeroed. This way, +** the next call to sqlite3rbu_vacuum() opens a handle that starts a +** new RBU vacuum operation. +** +** As with sqlite3rbu_open(), Zipvfs users should rever to the comment +** describing the sqlite3rbu_create_vfs() API function below for +** a description of the complications associated with using RBU with +** zipvfs databases. +*/ +SQLITE_API sqlite3rbu *sqlite3rbu_vacuum( + const char *zTarget, + const char *zState +); + +/* +** Internally, each RBU connection uses a separate SQLite database +** connection to access the target and rbu update databases. This +** API allows the application direct access to these database handles. +** +** The first argument passed to this function must be a valid, open, RBU +** handle. The second argument should be passed zero to access the target +** database handle, or non-zero to access the rbu update database handle. +** Accessing the underlying database handles may be useful in the +** following scenarios: +** +** * If any target tables are virtual tables, it may be necessary to +** call sqlite3_create_module() on the target database handle to +** register the required virtual table implementations. +** +** * If the data_xxx tables in the RBU source database are virtual +** tables, the application may need to call sqlite3_create_module() on +** the rbu update db handle to any required virtual table +** implementations. +** +** * If the application uses the "rbu_delta()" feature described above, +** it must use sqlite3_create_function() or similar to register the +** rbu_delta() implementation with the target database handle. +** +** If an error has occurred, either while opening or stepping the RBU object, +** this function may return NULL. The error code and message may be collected +** when sqlite3rbu_close() is called. +** +** Database handles returned by this function remain valid until the next +** call to any sqlite3rbu_xxx() function other than sqlite3rbu_db(). +*/ +SQLITE_API sqlite3 *sqlite3rbu_db(sqlite3rbu*, int bRbu); + +/* +** Do some work towards applying the RBU update to the target db. +** +** Return SQLITE_DONE if the update has been completely applied, or +** SQLITE_OK if no error occurs but there remains work to do to apply +** the RBU update. If an error does occur, some other error code is +** returned. +** +** Once a call to sqlite3rbu_step() has returned a value other than +** SQLITE_OK, all subsequent calls on the same RBU handle are no-ops +** that immediately return the same value. +*/ +SQLITE_API int sqlite3rbu_step(sqlite3rbu *pRbu); + +/* +** Force RBU to save its state to disk. +** +** If a power failure or application crash occurs during an update, following +** system recovery RBU may resume the update from the point at which the state +** was last saved. In other words, from the most recent successful call to +** sqlite3rbu_close() or this function. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +*/ +SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *pRbu); + +/* +** Close an RBU handle. +** +** If the RBU update has been completely applied, mark the RBU database +** as fully applied. Otherwise, assuming no error has occurred, save the +** current state of the RBU update appliation to the RBU database. +** +** If an error has already occurred as part of an sqlite3rbu_step() +** or sqlite3rbu_open() call, or if one occurs within this function, an +** SQLite error code is returned. Additionally, *pzErrmsg may be set to +** point to a buffer containing a utf-8 formatted English language error +** message. It is the responsibility of the caller to eventually free any +** such buffer using sqlite3_free(). +** +** Otherwise, if no error occurs, this function returns SQLITE_OK if the +** update has been partially applied, or SQLITE_DONE if it has been +** completely applied. +*/ +SQLITE_API int sqlite3rbu_close(sqlite3rbu *pRbu, char **pzErrmsg); + +/* +** Return the total number of key-value operations (inserts, deletes or +** updates) that have been performed on the target database since the +** current RBU update was started. +*/ +SQLITE_API sqlite3_int64 sqlite3rbu_progress(sqlite3rbu *pRbu); + +/* +** Obtain permyriadage (permyriadage is to 10000 as percentage is to 100) +** progress indications for the two stages of an RBU update. This API may +** be useful for driving GUI progress indicators and similar. +** +** An RBU update is divided into two stages: +** +** * Stage 1, in which changes are accumulated in an oal/wal file, and +** * Stage 2, in which the contents of the wal file are copied into the +** main database. +** +** The update is visible to non-RBU clients during stage 2. During stage 1 +** non-RBU reader clients may see the original database. +** +** If this API is called during stage 2 of the update, output variable +** (*pnOne) is set to 10000 to indicate that stage 1 has finished and (*pnTwo) +** to a value between 0 and 10000 to indicate the permyriadage progress of +** stage 2. A value of 5000 indicates that stage 2 is half finished, +** 9000 indicates that it is 90% finished, and so on. +** +** If this API is called during stage 1 of the update, output variable +** (*pnTwo) is set to 0 to indicate that stage 2 has not yet started. The +** value to which (*pnOne) is set depends on whether or not the RBU +** database contains an "rbu_count" table. The rbu_count table, if it +** exists, must contain the same columns as the following: +** +** CREATE TABLE rbu_count(tbl TEXT PRIMARY KEY, cnt INTEGER) WITHOUT ROWID; +** +** There must be one row in the table for each source (data_xxx) table within +** the RBU database. The 'tbl' column should contain the name of the source +** table. The 'cnt' column should contain the number of rows within the +** source table. +** +** If the rbu_count table is present and populated correctly and this +** API is called during stage 1, the *pnOne output variable is set to the +** permyriadage progress of the same stage. If the rbu_count table does +** not exist, then (*pnOne) is set to -1 during stage 1. If the rbu_count +** table exists but is not correctly populated, the value of the *pnOne +** output variable during stage 1 is undefined. +*/ +SQLITE_API void sqlite3rbu_bp_progress(sqlite3rbu *pRbu, int *pnOne, int *pnTwo); + +/* +** Obtain an indication as to the current stage of an RBU update or vacuum. +** This function always returns one of the SQLITE_RBU_STATE_XXX constants +** defined in this file. Return values should be interpreted as follows: +** +** SQLITE_RBU_STATE_OAL: +** RBU is currently building a *-oal file. The next call to sqlite3rbu_step() +** may either add further data to the *-oal file, or compute data that will +** be added by a subsequent call. +** +** SQLITE_RBU_STATE_MOVE: +** RBU has finished building the *-oal file. The next call to sqlite3rbu_step() +** will move the *-oal file to the equivalent *-wal path. If the current +** operation is an RBU update, then the updated version of the database +** file will become visible to ordinary SQLite clients following the next +** call to sqlite3rbu_step(). +** +** SQLITE_RBU_STATE_CHECKPOINT: +** RBU is currently performing an incremental checkpoint. The next call to +** sqlite3rbu_step() will copy a page of data from the *-wal file into +** the target database file. +** +** SQLITE_RBU_STATE_DONE: +** The RBU operation has finished. Any subsequent calls to sqlite3rbu_step() +** will immediately return SQLITE_DONE. +** +** SQLITE_RBU_STATE_ERROR: +** An error has occurred. Any subsequent calls to sqlite3rbu_step() will +** immediately return the SQLite error code associated with the error. +*/ +#define SQLITE_RBU_STATE_OAL 1 +#define SQLITE_RBU_STATE_MOVE 2 +#define SQLITE_RBU_STATE_CHECKPOINT 3 +#define SQLITE_RBU_STATE_DONE 4 +#define SQLITE_RBU_STATE_ERROR 5 + +SQLITE_API int sqlite3rbu_state(sqlite3rbu *pRbu); + +/* +** Create an RBU VFS named zName that accesses the underlying file-system +** via existing VFS zParent. Or, if the zParent parameter is passed NULL, +** then the new RBU VFS uses the default system VFS to access the file-system. +** The new object is registered as a non-default VFS with SQLite before +** returning. +** +** Part of the RBU implementation uses a custom VFS object. Usually, this +** object is created and deleted automatically by RBU. +** +** The exception is for applications that also use zipvfs. In this case, +** the custom VFS must be explicitly created by the user before the RBU +** handle is opened. The RBU VFS should be installed so that the zipvfs +** VFS uses the RBU VFS, which in turn uses any other VFS layers in use +** (for example multiplexor) to access the file-system. For example, +** to assemble an RBU enabled VFS stack that uses both zipvfs and +** multiplexor (error checking omitted): +** +** // Create a VFS named "multiplex" (not the default). +** sqlite3_multiplex_initialize(0, 0); +** +** // Create an rbu VFS named "rbu" that uses multiplexor. If the +** // second argument were replaced with NULL, the "rbu" VFS would +** // access the file-system via the system default VFS, bypassing the +** // multiplexor. +** sqlite3rbu_create_vfs("rbu", "multiplex"); +** +** // Create a zipvfs VFS named "zipvfs" that uses rbu. +** zipvfs_create_vfs_v3("zipvfs", "rbu", 0, xCompressorAlgorithmDetector); +** +** // Make zipvfs the default VFS. +** sqlite3_vfs_register(sqlite3_vfs_find("zipvfs"), 1); +** +** Because the default VFS created above includes a RBU functionality, it +** may be used by RBU clients. Attempting to use RBU with a zipvfs VFS stack +** that does not include the RBU layer results in an error. +** +** The overhead of adding the "rbu" VFS to the system is negligible for +** non-RBU users. There is no harm in an application accessing the +** file-system via "rbu" all the time, even if it only uses RBU functionality +** occasionally. +*/ +SQLITE_API int sqlite3rbu_create_vfs(const char *zName, const char *zParent); + +/* +** Deregister and destroy an RBU vfs created by an earlier call to +** sqlite3rbu_create_vfs(). +** +** VFS objects are not reference counted. If a VFS object is destroyed +** before all database handles that use it have been closed, the results +** are undefined. +*/ +SQLITE_API void sqlite3rbu_destroy_vfs(const char *zName); + +#if 0 +} /* end of the 'extern "C"' block */ +#endif + +#endif /* _SQLITE3RBU_H */ + +/************** End of sqlite3rbu.h ******************************************/ +/************** Continuing where we left off in sqlite3rbu.c *****************/ + +#if defined(_WIN32_WCE) +/* #include "windows.h" */ +#endif + +/* Maximum number of prepared UPDATE statements held by this module */ +#define SQLITE_RBU_UPDATE_CACHESIZE 16 + +/* +** Swap two objects of type TYPE. +*/ +#if !defined(SQLITE_AMALGAMATION) +# define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;} +#endif + +/* +** The rbu_state table is used to save the state of a partially applied +** update so that it can be resumed later. The table consists of integer +** keys mapped to values as follows: +** +** RBU_STATE_STAGE: +** May be set to integer values 1, 2, 4 or 5. As follows: +** 1: the *-rbu file is currently under construction. +** 2: the *-rbu file has been constructed, but not yet moved +** to the *-wal path. +** 4: the checkpoint is underway. +** 5: the rbu update has been checkpointed. +** +** RBU_STATE_TBL: +** Only valid if STAGE==1. The target database name of the table +** currently being written. +** +** RBU_STATE_IDX: +** Only valid if STAGE==1. The target database name of the index +** currently being written, or NULL if the main table is currently being +** updated. +** +** RBU_STATE_ROW: +** Only valid if STAGE==1. Number of rows already processed for the current +** table/index. +** +** RBU_STATE_PROGRESS: +** Trbul number of sqlite3rbu_step() calls made so far as part of this +** rbu update. +** +** RBU_STATE_CKPT: +** Valid if STAGE==4. The 64-bit checksum associated with the wal-index +** header created by recovering the *-wal file. This is used to detect +** cases when another client appends frames to the *-wal file in the +** middle of an incremental checkpoint (an incremental checkpoint cannot +** be continued if this happens). +** +** RBU_STATE_COOKIE: +** Valid if STAGE==1. The current change-counter cookie value in the +** target db file. +** +** RBU_STATE_OALSZ: +** Valid if STAGE==1. The size in bytes of the *-oal file. +*/ +#define RBU_STATE_STAGE 1 +#define RBU_STATE_TBL 2 +#define RBU_STATE_IDX 3 +#define RBU_STATE_ROW 4 +#define RBU_STATE_PROGRESS 5 +#define RBU_STATE_CKPT 6 +#define RBU_STATE_COOKIE 7 +#define RBU_STATE_OALSZ 8 +#define RBU_STATE_PHASEONESTEP 9 + +#define RBU_STAGE_OAL 1 +#define RBU_STAGE_MOVE 2 +#define RBU_STAGE_CAPTURE 3 +#define RBU_STAGE_CKPT 4 +#define RBU_STAGE_DONE 5 + + +#define RBU_CREATE_STATE \ + "CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)" + +typedef struct RbuFrame RbuFrame; +typedef struct RbuObjIter RbuObjIter; +typedef struct RbuState RbuState; +typedef struct rbu_vfs rbu_vfs; +typedef struct rbu_file rbu_file; +typedef struct RbuUpdateStmt RbuUpdateStmt; + +#if !defined(SQLITE_AMALGAMATION) +typedef unsigned int u32; +typedef unsigned short u16; +typedef unsigned char u8; +typedef sqlite3_int64 i64; +#endif + +/* +** These values must match the values defined in wal.c for the equivalent +** locks. These are not magic numbers as they are part of the SQLite file +** format. +*/ +#define WAL_LOCK_WRITE 0 +#define WAL_LOCK_CKPT 1 +#define WAL_LOCK_READ0 3 + +#define SQLITE_FCNTL_RBUCNT 5149216 + +/* +** A structure to store values read from the rbu_state table in memory. +*/ +struct RbuState { + int eStage; + char *zTbl; + char *zIdx; + i64 iWalCksum; + int nRow; + i64 nProgress; + u32 iCookie; + i64 iOalSz; + i64 nPhaseOneStep; +}; + +struct RbuUpdateStmt { + char *zMask; /* Copy of update mask used with pUpdate */ + sqlite3_stmt *pUpdate; /* Last update statement (or NULL) */ + RbuUpdateStmt *pNext; +}; + +/* +** An iterator of this type is used to iterate through all objects in +** the target database that require updating. For each such table, the +** iterator visits, in order: +** +** * the table itself, +** * each index of the table (zero or more points to visit), and +** * a special "cleanup table" state. +** +** abIndexed: +** If the table has no indexes on it, abIndexed is set to NULL. Otherwise, +** it points to an array of flags nTblCol elements in size. The flag is +** set for each column that is either a part of the PK or a part of an +** index. Or clear otherwise. +** +*/ +struct RbuObjIter { + sqlite3_stmt *pTblIter; /* Iterate through tables */ + sqlite3_stmt *pIdxIter; /* Index iterator */ + int nTblCol; /* Size of azTblCol[] array */ + char **azTblCol; /* Array of unquoted target column names */ + char **azTblType; /* Array of target column types */ + int *aiSrcOrder; /* src table col -> target table col */ + u8 *abTblPk; /* Array of flags, set on target PK columns */ + u8 *abNotNull; /* Array of flags, set on NOT NULL columns */ + u8 *abIndexed; /* Array of flags, set on indexed & PK cols */ + int eType; /* Table type - an RBU_PK_XXX value */ + + /* Output variables. zTbl==0 implies EOF. */ + int bCleanup; /* True in "cleanup" state */ + const char *zTbl; /* Name of target db table */ + const char *zDataTbl; /* Name of rbu db table (or null) */ + const char *zIdx; /* Name of target db index (or null) */ + int iTnum; /* Root page of current object */ + int iPkTnum; /* If eType==EXTERNAL, root of PK index */ + int bUnique; /* Current index is unique */ + int nIndex; /* Number of aux. indexes on table zTbl */ + + /* Statements created by rbuObjIterPrepareAll() */ + int nCol; /* Number of columns in current object */ + sqlite3_stmt *pSelect; /* Source data */ + sqlite3_stmt *pInsert; /* Statement for INSERT operations */ + sqlite3_stmt *pDelete; /* Statement for DELETE ops */ + sqlite3_stmt *pTmpInsert; /* Insert into rbu_tmp_$zDataTbl */ + + /* Last UPDATE used (for PK b-tree updates only), or NULL. */ + RbuUpdateStmt *pRbuUpdate; +}; + +/* +** Values for RbuObjIter.eType +** +** 0: Table does not exist (error) +** 1: Table has an implicit rowid. +** 2: Table has an explicit IPK column. +** 3: Table has an external PK index. +** 4: Table is WITHOUT ROWID. +** 5: Table is a virtual table. +*/ +#define RBU_PK_NOTABLE 0 +#define RBU_PK_NONE 1 +#define RBU_PK_IPK 2 +#define RBU_PK_EXTERNAL 3 +#define RBU_PK_WITHOUT_ROWID 4 +#define RBU_PK_VTAB 5 + + +/* +** Within the RBU_STAGE_OAL stage, each call to sqlite3rbu_step() performs +** one of the following operations. +*/ +#define RBU_INSERT 1 /* Insert on a main table b-tree */ +#define RBU_DELETE 2 /* Delete a row from a main table b-tree */ +#define RBU_REPLACE 3 /* Delete and then insert a row */ +#define RBU_IDX_DELETE 4 /* Delete a row from an aux. index b-tree */ +#define RBU_IDX_INSERT 5 /* Insert on an aux. index b-tree */ + +#define RBU_UPDATE 6 /* Update a row in a main table b-tree */ + +/* +** A single step of an incremental checkpoint - frame iWalFrame of the wal +** file should be copied to page iDbPage of the database file. +*/ +struct RbuFrame { + u32 iDbPage; + u32 iWalFrame; +}; + +/* +** RBU handle. +** +** nPhaseOneStep: +** If the RBU database contains an rbu_count table, this value is set to +** a running estimate of the number of b-tree operations required to +** finish populating the *-oal file. This allows the sqlite3_bp_progress() +** API to calculate the permyriadage progress of populating the *-oal file +** using the formula: +** +** permyriadage = (10000 * nProgress) / nPhaseOneStep +** +** nPhaseOneStep is initialized to the sum of: +** +** nRow * (nIndex + 1) +** +** for all source tables in the RBU database, where nRow is the number +** of rows in the source table and nIndex the number of indexes on the +** corresponding target database table. +** +** This estimate is accurate if the RBU update consists entirely of +** INSERT operations. However, it is inaccurate if: +** +** * the RBU update contains any UPDATE operations. If the PK specified +** for an UPDATE operation does not exist in the target table, then +** no b-tree operations are required on index b-trees. Or if the +** specified PK does exist, then (nIndex*2) such operations are +** required (one delete and one insert on each index b-tree). +** +** * the RBU update contains any DELETE operations for which the specified +** PK does not exist. In this case no operations are required on index +** b-trees. +** +** * the RBU update contains REPLACE operations. These are similar to +** UPDATE operations. +** +** nPhaseOneStep is updated to account for the conditions above during the +** first pass of each source table. The updated nPhaseOneStep value is +** stored in the rbu_state table if the RBU update is suspended. +*/ +struct sqlite3rbu { + int eStage; /* Value of RBU_STATE_STAGE field */ + sqlite3 *dbMain; /* target database handle */ + sqlite3 *dbRbu; /* rbu database handle */ + char *zTarget; /* Path to target db */ + char *zRbu; /* Path to rbu db */ + char *zState; /* Path to state db (or NULL if zRbu) */ + char zStateDb[5]; /* Db name for state ("stat" or "main") */ + int rc; /* Value returned by last rbu_step() call */ + char *zErrmsg; /* Error message if rc!=SQLITE_OK */ + int nStep; /* Rows processed for current object */ + int nProgress; /* Rows processed for all objects */ + RbuObjIter objiter; /* Iterator for skipping through tbl/idx */ + const char *zVfsName; /* Name of automatically created rbu vfs */ + rbu_file *pTargetFd; /* File handle open on target db */ + i64 iOalSz; + i64 nPhaseOneStep; + + /* The following state variables are used as part of the incremental + ** checkpoint stage (eStage==RBU_STAGE_CKPT). See comments surrounding + ** function rbuSetupCheckpoint() for details. */ + u32 iMaxFrame; /* Largest iWalFrame value in aFrame[] */ + u32 mLock; + int nFrame; /* Entries in aFrame[] array */ + int nFrameAlloc; /* Allocated size of aFrame[] array */ + RbuFrame *aFrame; + int pgsz; + u8 *aBuf; + i64 iWalCksum; + + /* Used in RBU vacuum mode only */ + int nRbu; /* Number of RBU VFS in the stack */ + rbu_file *pRbuFd; /* Fd for main db of dbRbu */ +}; + +/* +** An rbu VFS is implemented using an instance of this structure. +*/ +struct rbu_vfs { + sqlite3_vfs base; /* rbu VFS shim methods */ + sqlite3_vfs *pRealVfs; /* Underlying VFS */ + sqlite3_mutex *mutex; /* Mutex to protect pMain */ + rbu_file *pMain; /* Linked list of main db files */ +}; + +/* +** Each file opened by an rbu VFS is represented by an instance of +** the following structure. +*/ +struct rbu_file { + sqlite3_file base; /* sqlite3_file methods */ + sqlite3_file *pReal; /* Underlying file handle */ + rbu_vfs *pRbuVfs; /* Pointer to the rbu_vfs object */ + sqlite3rbu *pRbu; /* Pointer to rbu object (rbu target only) */ + + int openFlags; /* Flags this file was opened with */ + u32 iCookie; /* Cookie value for main db files */ + u8 iWriteVer; /* "write-version" value for main db files */ + u8 bNolock; /* True to fail EXCLUSIVE locks */ + + int nShm; /* Number of entries in apShm[] array */ + char **apShm; /* Array of mmap'd *-shm regions */ + char *zDel; /* Delete this when closing file */ + + const char *zWal; /* Wal filename for this main db file */ + rbu_file *pWalFd; /* Wal file descriptor for this main db */ + rbu_file *pMainNext; /* Next MAIN_DB file */ +}; + +/* +** True for an RBU vacuum handle, or false otherwise. +*/ +#define rbuIsVacuum(p) ((p)->zTarget==0) + + +/************************************************************************* +** The following three functions, found below: +** +** rbuDeltaGetInt() +** rbuDeltaChecksum() +** rbuDeltaApply() +** +** are lifted from the fossil source code (http://fossil-scm.org). They +** are used to implement the scalar SQL function rbu_fossil_delta(). +*/ + +/* +** Read bytes from *pz and convert them into a positive integer. When +** finished, leave *pz pointing to the first character past the end of +** the integer. The *pLen parameter holds the length of the string +** in *pz and is decremented once for each character in the integer. +*/ +static unsigned int rbuDeltaGetInt(const char **pz, int *pLen){ + static const signed char zValue[] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, + -1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, -1, -1, -1, -1, 36, + -1, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, -1, -1, -1, 63, -1, + }; + unsigned int v = 0; + int c; + unsigned char *z = (unsigned char*)*pz; + unsigned char *zStart = z; + while( (c = zValue[0x7f&*(z++)])>=0 ){ + v = (v<<6) + c; + } + z--; + *pLen -= z - zStart; + *pz = (char*)z; + return v; +} + +/* +** Compute a 32-bit checksum on the N-byte buffer. Return the result. +*/ +static unsigned int rbuDeltaChecksum(const char *zIn, size_t N){ + const unsigned char *z = (const unsigned char *)zIn; + unsigned sum0 = 0; + unsigned sum1 = 0; + unsigned sum2 = 0; + unsigned sum3 = 0; + while(N >= 16){ + sum0 += ((unsigned)z[0] + z[4] + z[8] + z[12]); + sum1 += ((unsigned)z[1] + z[5] + z[9] + z[13]); + sum2 += ((unsigned)z[2] + z[6] + z[10]+ z[14]); + sum3 += ((unsigned)z[3] + z[7] + z[11]+ z[15]); + z += 16; + N -= 16; + } + while(N >= 4){ + sum0 += z[0]; + sum1 += z[1]; + sum2 += z[2]; + sum3 += z[3]; + z += 4; + N -= 4; + } + sum3 += (sum2 << 8) + (sum1 << 16) + (sum0 << 24); + switch(N){ + case 3: sum3 += (z[2] << 8); + case 2: sum3 += (z[1] << 16); + case 1: sum3 += (z[0] << 24); + default: ; + } + return sum3; +} + +/* +** Apply a delta. +** +** The output buffer should be big enough to hold the whole output +** file and a NUL terminator at the end. The delta_output_size() +** routine will determine this size for you. +** +** The delta string should be null-terminated. But the delta string +** may contain embedded NUL characters (if the input and output are +** binary files) so we also have to pass in the length of the delta in +** the lenDelta parameter. +** +** This function returns the size of the output file in bytes (excluding +** the final NUL terminator character). Except, if the delta string is +** malformed or intended for use with a source file other than zSrc, +** then this routine returns -1. +** +** Refer to the delta_create() documentation above for a description +** of the delta file format. +*/ +static int rbuDeltaApply( + const char *zSrc, /* The source or pattern file */ + int lenSrc, /* Length of the source file */ + const char *zDelta, /* Delta to apply to the pattern */ + int lenDelta, /* Length of the delta */ + char *zOut /* Write the output into this preallocated buffer */ +){ + unsigned int limit; + unsigned int total = 0; +#ifndef FOSSIL_OMIT_DELTA_CKSUM_TEST + char *zOrigOut = zOut; +#endif + + limit = rbuDeltaGetInt(&zDelta, &lenDelta); + if( *zDelta!='\n' ){ + /* ERROR: size integer not terminated by "\n" */ + return -1; + } + zDelta++; lenDelta--; + while( *zDelta && lenDelta>0 ){ + unsigned int cnt, ofst; + cnt = rbuDeltaGetInt(&zDelta, &lenDelta); + switch( zDelta[0] ){ + case '@': { + zDelta++; lenDelta--; + ofst = rbuDeltaGetInt(&zDelta, &lenDelta); + if( lenDelta>0 && zDelta[0]!=',' ){ + /* ERROR: copy command not terminated by ',' */ + return -1; + } + zDelta++; lenDelta--; + total += cnt; + if( total>limit ){ + /* ERROR: copy exceeds output file size */ + return -1; + } + if( (int)(ofst+cnt) > lenSrc ){ + /* ERROR: copy extends past end of input */ + return -1; + } + memcpy(zOut, &zSrc[ofst], cnt); + zOut += cnt; + break; + } + case ':': { + zDelta++; lenDelta--; + total += cnt; + if( total>limit ){ + /* ERROR: insert command gives an output larger than predicted */ + return -1; + } + if( (int)cnt>lenDelta ){ + /* ERROR: insert count exceeds size of delta */ + return -1; + } + memcpy(zOut, zDelta, cnt); + zOut += cnt; + zDelta += cnt; + lenDelta -= cnt; + break; + } + case ';': { + zDelta++; lenDelta--; + zOut[0] = 0; +#ifndef FOSSIL_OMIT_DELTA_CKSUM_TEST + if( cnt!=rbuDeltaChecksum(zOrigOut, total) ){ + /* ERROR: bad checksum */ + return -1; + } +#endif + if( total!=limit ){ + /* ERROR: generated size does not match predicted size */ + return -1; + } + return total; + } + default: { + /* ERROR: unknown delta operator */ + return -1; + } + } + } + /* ERROR: unterminated delta */ + return -1; +} + +static int rbuDeltaOutputSize(const char *zDelta, int lenDelta){ + int size; + size = rbuDeltaGetInt(&zDelta, &lenDelta); + if( *zDelta!='\n' ){ + /* ERROR: size integer not terminated by "\n" */ + return -1; + } + return size; +} + +/* +** End of code taken from fossil. +*************************************************************************/ + +/* +** Implementation of SQL scalar function rbu_fossil_delta(). +** +** This function applies a fossil delta patch to a blob. Exactly two +** arguments must be passed to this function. The first is the blob to +** patch and the second the patch to apply. If no error occurs, this +** function returns the patched blob. +*/ +static void rbuFossilDeltaFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + const char *aDelta; + int nDelta; + const char *aOrig; + int nOrig; + + int nOut; + int nOut2; + char *aOut; + + assert( argc==2 ); + + nOrig = sqlite3_value_bytes(argv[0]); + aOrig = (const char*)sqlite3_value_blob(argv[0]); + nDelta = sqlite3_value_bytes(argv[1]); + aDelta = (const char*)sqlite3_value_blob(argv[1]); + + /* Figure out the size of the output */ + nOut = rbuDeltaOutputSize(aDelta, nDelta); + if( nOut<0 ){ + sqlite3_result_error(context, "corrupt fossil delta", -1); + return; + } + + aOut = sqlite3_malloc(nOut+1); + if( aOut==0 ){ + sqlite3_result_error_nomem(context); + }else{ + nOut2 = rbuDeltaApply(aOrig, nOrig, aDelta, nDelta, aOut); + if( nOut2!=nOut ){ + sqlite3_result_error(context, "corrupt fossil delta", -1); + }else{ + sqlite3_result_blob(context, aOut, nOut, sqlite3_free); + } + } +} + + +/* +** Prepare the SQL statement in buffer zSql against database handle db. +** If successful, set *ppStmt to point to the new statement and return +** SQLITE_OK. +** +** Otherwise, if an error does occur, set *ppStmt to NULL and return +** an SQLite error code. Additionally, set output variable *pzErrmsg to +** point to a buffer containing an error message. It is the responsibility +** of the caller to (eventually) free this buffer using sqlite3_free(). +*/ +static int prepareAndCollectError( + sqlite3 *db, + sqlite3_stmt **ppStmt, + char **pzErrmsg, + const char *zSql +){ + int rc = sqlite3_prepare_v2(db, zSql, -1, ppStmt, 0); + if( rc!=SQLITE_OK ){ + *pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + *ppStmt = 0; + } + return rc; +} + +/* +** Reset the SQL statement passed as the first argument. Return a copy +** of the value returned by sqlite3_reset(). +** +** If an error has occurred, then set *pzErrmsg to point to a buffer +** containing an error message. It is the responsibility of the caller +** to eventually free this buffer using sqlite3_free(). +*/ +static int resetAndCollectError(sqlite3_stmt *pStmt, char **pzErrmsg){ + int rc = sqlite3_reset(pStmt); + if( rc!=SQLITE_OK ){ + *pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(sqlite3_db_handle(pStmt))); + } + return rc; +} + +/* +** Unless it is NULL, argument zSql points to a buffer allocated using +** sqlite3_malloc containing an SQL statement. This function prepares the SQL +** statement against database db and frees the buffer. If statement +** compilation is successful, *ppStmt is set to point to the new statement +** handle and SQLITE_OK is returned. +** +** Otherwise, if an error occurs, *ppStmt is set to NULL and an error code +** returned. In this case, *pzErrmsg may also be set to point to an error +** message. It is the responsibility of the caller to free this error message +** buffer using sqlite3_free(). +** +** If argument zSql is NULL, this function assumes that an OOM has occurred. +** In this case SQLITE_NOMEM is returned and *ppStmt set to NULL. +*/ +static int prepareFreeAndCollectError( + sqlite3 *db, + sqlite3_stmt **ppStmt, + char **pzErrmsg, + char *zSql +){ + int rc; + assert( *pzErrmsg==0 ); + if( zSql==0 ){ + rc = SQLITE_NOMEM; + *ppStmt = 0; + }else{ + rc = prepareAndCollectError(db, ppStmt, pzErrmsg, zSql); + sqlite3_free(zSql); + } + return rc; +} + +/* +** Free the RbuObjIter.azTblCol[] and RbuObjIter.abTblPk[] arrays allocated +** by an earlier call to rbuObjIterCacheTableInfo(). +*/ +static void rbuObjIterFreeCols(RbuObjIter *pIter){ + int i; + for(i=0; inTblCol; i++){ + sqlite3_free(pIter->azTblCol[i]); + sqlite3_free(pIter->azTblType[i]); + } + sqlite3_free(pIter->azTblCol); + pIter->azTblCol = 0; + pIter->azTblType = 0; + pIter->aiSrcOrder = 0; + pIter->abTblPk = 0; + pIter->abNotNull = 0; + pIter->nTblCol = 0; + pIter->eType = 0; /* Invalid value */ +} + +/* +** Finalize all statements and free all allocations that are specific to +** the current object (table/index pair). +*/ +static void rbuObjIterClearStatements(RbuObjIter *pIter){ + RbuUpdateStmt *pUp; + + sqlite3_finalize(pIter->pSelect); + sqlite3_finalize(pIter->pInsert); + sqlite3_finalize(pIter->pDelete); + sqlite3_finalize(pIter->pTmpInsert); + pUp = pIter->pRbuUpdate; + while( pUp ){ + RbuUpdateStmt *pTmp = pUp->pNext; + sqlite3_finalize(pUp->pUpdate); + sqlite3_free(pUp); + pUp = pTmp; + } + + pIter->pSelect = 0; + pIter->pInsert = 0; + pIter->pDelete = 0; + pIter->pRbuUpdate = 0; + pIter->pTmpInsert = 0; + pIter->nCol = 0; +} + +/* +** Clean up any resources allocated as part of the iterator object passed +** as the only argument. +*/ +static void rbuObjIterFinalize(RbuObjIter *pIter){ + rbuObjIterClearStatements(pIter); + sqlite3_finalize(pIter->pTblIter); + sqlite3_finalize(pIter->pIdxIter); + rbuObjIterFreeCols(pIter); + memset(pIter, 0, sizeof(RbuObjIter)); +} + +/* +** Advance the iterator to the next position. +** +** If no error occurs, SQLITE_OK is returned and the iterator is left +** pointing to the next entry. Otherwise, an error code and message is +** left in the RBU handle passed as the first argument. A copy of the +** error code is returned. +*/ +static int rbuObjIterNext(sqlite3rbu *p, RbuObjIter *pIter){ + int rc = p->rc; + if( rc==SQLITE_OK ){ + + /* Free any SQLite statements used while processing the previous object */ + rbuObjIterClearStatements(pIter); + if( pIter->zIdx==0 ){ + rc = sqlite3_exec(p->dbMain, + "DROP TRIGGER IF EXISTS temp.rbu_insert_tr;" + "DROP TRIGGER IF EXISTS temp.rbu_update1_tr;" + "DROP TRIGGER IF EXISTS temp.rbu_update2_tr;" + "DROP TRIGGER IF EXISTS temp.rbu_delete_tr;" + , 0, 0, &p->zErrmsg + ); + } + + if( rc==SQLITE_OK ){ + if( pIter->bCleanup ){ + rbuObjIterFreeCols(pIter); + pIter->bCleanup = 0; + rc = sqlite3_step(pIter->pTblIter); + if( rc!=SQLITE_ROW ){ + rc = resetAndCollectError(pIter->pTblIter, &p->zErrmsg); + pIter->zTbl = 0; + }else{ + pIter->zTbl = (const char*)sqlite3_column_text(pIter->pTblIter, 0); + pIter->zDataTbl = (const char*)sqlite3_column_text(pIter->pTblIter,1); + rc = (pIter->zDataTbl && pIter->zTbl) ? SQLITE_OK : SQLITE_NOMEM; + } + }else{ + if( pIter->zIdx==0 ){ + sqlite3_stmt *pIdx = pIter->pIdxIter; + rc = sqlite3_bind_text(pIdx, 1, pIter->zTbl, -1, SQLITE_STATIC); + } + if( rc==SQLITE_OK ){ + rc = sqlite3_step(pIter->pIdxIter); + if( rc!=SQLITE_ROW ){ + rc = resetAndCollectError(pIter->pIdxIter, &p->zErrmsg); + pIter->bCleanup = 1; + pIter->zIdx = 0; + }else{ + pIter->zIdx = (const char*)sqlite3_column_text(pIter->pIdxIter, 0); + pIter->iTnum = sqlite3_column_int(pIter->pIdxIter, 1); + pIter->bUnique = sqlite3_column_int(pIter->pIdxIter, 2); + rc = pIter->zIdx ? SQLITE_OK : SQLITE_NOMEM; + } + } + } + } + } + + if( rc!=SQLITE_OK ){ + rbuObjIterFinalize(pIter); + p->rc = rc; + } + return rc; +} + + +/* +** The implementation of the rbu_target_name() SQL function. This function +** accepts one or two arguments. The first argument is the name of a table - +** the name of a table in the RBU database. The second, if it is present, is 1 +** for a view or 0 for a table. +** +** For a non-vacuum RBU handle, if the table name matches the pattern: +** +** data[0-9]_ +** +** where is any sequence of 1 or more characters, is returned. +** Otherwise, if the only argument does not match the above pattern, an SQL +** NULL is returned. +** +** "data_t1" -> "t1" +** "data0123_t2" -> "t2" +** "dataAB_t3" -> NULL +** +** For an rbu vacuum handle, a copy of the first argument is returned if +** the second argument is either missing or 0 (not a view). +*/ +static void rbuTargetNameFunc( + sqlite3_context *pCtx, + int argc, + sqlite3_value **argv +){ + sqlite3rbu *p = sqlite3_user_data(pCtx); + const char *zIn; + assert( argc==1 || argc==2 ); + + zIn = (const char*)sqlite3_value_text(argv[0]); + if( zIn ){ + if( rbuIsVacuum(p) ){ + if( argc==1 || 0==sqlite3_value_int(argv[1]) ){ + sqlite3_result_text(pCtx, zIn, -1, SQLITE_STATIC); + } + }else{ + if( strlen(zIn)>4 && memcmp("data", zIn, 4)==0 ){ + int i; + for(i=4; zIn[i]>='0' && zIn[i]<='9'; i++); + if( zIn[i]=='_' && zIn[i+1] ){ + sqlite3_result_text(pCtx, &zIn[i+1], -1, SQLITE_STATIC); + } + } + } + } +} + +/* +** Initialize the iterator structure passed as the second argument. +** +** If no error occurs, SQLITE_OK is returned and the iterator is left +** pointing to the first entry. Otherwise, an error code and message is +** left in the RBU handle passed as the first argument. A copy of the +** error code is returned. +*/ +static int rbuObjIterFirst(sqlite3rbu *p, RbuObjIter *pIter){ + int rc; + memset(pIter, 0, sizeof(RbuObjIter)); + + rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pTblIter, &p->zErrmsg, + sqlite3_mprintf( + "SELECT rbu_target_name(name, type='view') AS target, name " + "FROM sqlite_master " + "WHERE type IN ('table', 'view') AND target IS NOT NULL " + " %s " + "ORDER BY name" + , rbuIsVacuum(p) ? "AND rootpage!=0 AND rootpage IS NOT NULL" : "")); + + if( rc==SQLITE_OK ){ + rc = prepareAndCollectError(p->dbMain, &pIter->pIdxIter, &p->zErrmsg, + "SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' " + " FROM main.sqlite_master " + " WHERE type='index' AND tbl_name = ?" + ); + } + + pIter->bCleanup = 1; + p->rc = rc; + return rbuObjIterNext(p, pIter); +} + +/* +** This is a wrapper around "sqlite3_mprintf(zFmt, ...)". If an OOM occurs, +** an error code is stored in the RBU handle passed as the first argument. +** +** If an error has already occurred (p->rc is already set to something other +** than SQLITE_OK), then this function returns NULL without modifying the +** stored error code. In this case it still calls sqlite3_free() on any +** printf() parameters associated with %z conversions. +*/ +static char *rbuMPrintf(sqlite3rbu *p, const char *zFmt, ...){ + char *zSql = 0; + va_list ap; + va_start(ap, zFmt); + zSql = sqlite3_vmprintf(zFmt, ap); + if( p->rc==SQLITE_OK ){ + if( zSql==0 ) p->rc = SQLITE_NOMEM; + }else{ + sqlite3_free(zSql); + zSql = 0; + } + va_end(ap); + return zSql; +} + +/* +** Argument zFmt is a sqlite3_mprintf() style format string. The trailing +** arguments are the usual subsitution values. This function performs +** the printf() style substitutions and executes the result as an SQL +** statement on the RBU handles database. +** +** If an error occurs, an error code and error message is stored in the +** RBU handle. If an error has already occurred when this function is +** called, it is a no-op. +*/ +static int rbuMPrintfExec(sqlite3rbu *p, sqlite3 *db, const char *zFmt, ...){ + va_list ap; + char *zSql; + va_start(ap, zFmt); + zSql = sqlite3_vmprintf(zFmt, ap); + if( p->rc==SQLITE_OK ){ + if( zSql==0 ){ + p->rc = SQLITE_NOMEM; + }else{ + p->rc = sqlite3_exec(db, zSql, 0, 0, &p->zErrmsg); + } + } + sqlite3_free(zSql); + va_end(ap); + return p->rc; +} + +/* +** Attempt to allocate and return a pointer to a zeroed block of nByte +** bytes. +** +** If an error (i.e. an OOM condition) occurs, return NULL and leave an +** error code in the rbu handle passed as the first argument. Or, if an +** error has already occurred when this function is called, return NULL +** immediately without attempting the allocation or modifying the stored +** error code. +*/ +static void *rbuMalloc(sqlite3rbu *p, int nByte){ + void *pRet = 0; + if( p->rc==SQLITE_OK ){ + assert( nByte>0 ); + pRet = sqlite3_malloc64(nByte); + if( pRet==0 ){ + p->rc = SQLITE_NOMEM; + }else{ + memset(pRet, 0, nByte); + } + } + return pRet; +} + + +/* +** Allocate and zero the pIter->azTblCol[] and abTblPk[] arrays so that +** there is room for at least nCol elements. If an OOM occurs, store an +** error code in the RBU handle passed as the first argument. +*/ +static void rbuAllocateIterArrays(sqlite3rbu *p, RbuObjIter *pIter, int nCol){ + int nByte = (2*sizeof(char*) + sizeof(int) + 3*sizeof(u8)) * nCol; + char **azNew; + + azNew = (char**)rbuMalloc(p, nByte); + if( azNew ){ + pIter->azTblCol = azNew; + pIter->azTblType = &azNew[nCol]; + pIter->aiSrcOrder = (int*)&pIter->azTblType[nCol]; + pIter->abTblPk = (u8*)&pIter->aiSrcOrder[nCol]; + pIter->abNotNull = (u8*)&pIter->abTblPk[nCol]; + pIter->abIndexed = (u8*)&pIter->abNotNull[nCol]; + } +} + +/* +** The first argument must be a nul-terminated string. This function +** returns a copy of the string in memory obtained from sqlite3_malloc(). +** It is the responsibility of the caller to eventually free this memory +** using sqlite3_free(). +** +** If an OOM condition is encountered when attempting to allocate memory, +** output variable (*pRc) is set to SQLITE_NOMEM before returning. Otherwise, +** if the allocation succeeds, (*pRc) is left unchanged. +*/ +static char *rbuStrndup(const char *zStr, int *pRc){ + char *zRet = 0; + + assert( *pRc==SQLITE_OK ); + if( zStr ){ + size_t nCopy = strlen(zStr) + 1; + zRet = (char*)sqlite3_malloc64(nCopy); + if( zRet ){ + memcpy(zRet, zStr, nCopy); + }else{ + *pRc = SQLITE_NOMEM; + } + } + + return zRet; +} + +/* +** Finalize the statement passed as the second argument. +** +** If the sqlite3_finalize() call indicates that an error occurs, and the +** rbu handle error code is not already set, set the error code and error +** message accordingly. +*/ +static void rbuFinalize(sqlite3rbu *p, sqlite3_stmt *pStmt){ + sqlite3 *db = sqlite3_db_handle(pStmt); + int rc = sqlite3_finalize(pStmt); + if( p->rc==SQLITE_OK && rc!=SQLITE_OK ){ + p->rc = rc; + p->zErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + } +} + +/* Determine the type of a table. +** +** peType is of type (int*), a pointer to an output parameter of type +** (int). This call sets the output parameter as follows, depending +** on the type of the table specified by parameters dbName and zTbl. +** +** RBU_PK_NOTABLE: No such table. +** RBU_PK_NONE: Table has an implicit rowid. +** RBU_PK_IPK: Table has an explicit IPK column. +** RBU_PK_EXTERNAL: Table has an external PK index. +** RBU_PK_WITHOUT_ROWID: Table is WITHOUT ROWID. +** RBU_PK_VTAB: Table is a virtual table. +** +** Argument *piPk is also of type (int*), and also points to an output +** parameter. Unless the table has an external primary key index +** (i.e. unless *peType is set to 3), then *piPk is set to zero. Or, +** if the table does have an external primary key index, then *piPk +** is set to the root page number of the primary key index before +** returning. +** +** ALGORITHM: +** +** if( no entry exists in sqlite_master ){ +** return RBU_PK_NOTABLE +** }else if( sql for the entry starts with "CREATE VIRTUAL" ){ +** return RBU_PK_VTAB +** }else if( "PRAGMA index_list()" for the table contains a "pk" index ){ +** if( the index that is the pk exists in sqlite_master ){ +** *piPK = rootpage of that index. +** return RBU_PK_EXTERNAL +** }else{ +** return RBU_PK_WITHOUT_ROWID +** } +** }else if( "PRAGMA table_info()" lists one or more "pk" columns ){ +** return RBU_PK_IPK +** }else{ +** return RBU_PK_NONE +** } +*/ +static void rbuTableType( + sqlite3rbu *p, + const char *zTab, + int *peType, + int *piTnum, + int *piPk +){ + /* + ** 0) SELECT count(*) FROM sqlite_master where name=%Q AND IsVirtual(%Q) + ** 1) PRAGMA index_list = ? + ** 2) SELECT count(*) FROM sqlite_master where name=%Q + ** 3) PRAGMA table_info = ? + */ + sqlite3_stmt *aStmt[4] = {0, 0, 0, 0}; + + *peType = RBU_PK_NOTABLE; + *piPk = 0; + + assert( p->rc==SQLITE_OK ); + p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[0], &p->zErrmsg, + sqlite3_mprintf( + "SELECT (sql LIKE 'create virtual%%'), rootpage" + " FROM sqlite_master" + " WHERE name=%Q", zTab + )); + if( p->rc!=SQLITE_OK || sqlite3_step(aStmt[0])!=SQLITE_ROW ){ + /* Either an error, or no such table. */ + goto rbuTableType_end; + } + if( sqlite3_column_int(aStmt[0], 0) ){ + *peType = RBU_PK_VTAB; /* virtual table */ + goto rbuTableType_end; + } + *piTnum = sqlite3_column_int(aStmt[0], 1); + + p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[1], &p->zErrmsg, + sqlite3_mprintf("PRAGMA index_list=%Q",zTab) + ); + if( p->rc ) goto rbuTableType_end; + while( sqlite3_step(aStmt[1])==SQLITE_ROW ){ + const u8 *zOrig = sqlite3_column_text(aStmt[1], 3); + const u8 *zIdx = sqlite3_column_text(aStmt[1], 1); + if( zOrig && zIdx && zOrig[0]=='p' ){ + p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[2], &p->zErrmsg, + sqlite3_mprintf( + "SELECT rootpage FROM sqlite_master WHERE name = %Q", zIdx + )); + if( p->rc==SQLITE_OK ){ + if( sqlite3_step(aStmt[2])==SQLITE_ROW ){ + *piPk = sqlite3_column_int(aStmt[2], 0); + *peType = RBU_PK_EXTERNAL; + }else{ + *peType = RBU_PK_WITHOUT_ROWID; + } + } + goto rbuTableType_end; + } + } + + p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[3], &p->zErrmsg, + sqlite3_mprintf("PRAGMA table_info=%Q",zTab) + ); + if( p->rc==SQLITE_OK ){ + while( sqlite3_step(aStmt[3])==SQLITE_ROW ){ + if( sqlite3_column_int(aStmt[3],5)>0 ){ + *peType = RBU_PK_IPK; /* explicit IPK column */ + goto rbuTableType_end; + } + } + *peType = RBU_PK_NONE; + } + +rbuTableType_end: { + unsigned int i; + for(i=0; iabIndexed[] array. +*/ +static void rbuObjIterCacheIndexedCols(sqlite3rbu *p, RbuObjIter *pIter){ + sqlite3_stmt *pList = 0; + int bIndex = 0; + + if( p->rc==SQLITE_OK ){ + memcpy(pIter->abIndexed, pIter->abTblPk, sizeof(u8)*pIter->nTblCol); + p->rc = prepareFreeAndCollectError(p->dbMain, &pList, &p->zErrmsg, + sqlite3_mprintf("PRAGMA main.index_list = %Q", pIter->zTbl) + ); + } + + pIter->nIndex = 0; + while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pList) ){ + const char *zIdx = (const char*)sqlite3_column_text(pList, 1); + sqlite3_stmt *pXInfo = 0; + if( zIdx==0 ) break; + p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg, + sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", zIdx) + ); + while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){ + int iCid = sqlite3_column_int(pXInfo, 1); + if( iCid>=0 ) pIter->abIndexed[iCid] = 1; + } + rbuFinalize(p, pXInfo); + bIndex = 1; + pIter->nIndex++; + } + + if( pIter->eType==RBU_PK_WITHOUT_ROWID ){ + /* "PRAGMA index_list" includes the main PK b-tree */ + pIter->nIndex--; + } + + rbuFinalize(p, pList); + if( bIndex==0 ) pIter->abIndexed = 0; +} + + +/* +** If they are not already populated, populate the pIter->azTblCol[], +** pIter->abTblPk[], pIter->nTblCol and pIter->bRowid variables according to +** the table (not index) that the iterator currently points to. +** +** Return SQLITE_OK if successful, or an SQLite error code otherwise. If +** an error does occur, an error code and error message are also left in +** the RBU handle. +*/ +static int rbuObjIterCacheTableInfo(sqlite3rbu *p, RbuObjIter *pIter){ + if( pIter->azTblCol==0 ){ + sqlite3_stmt *pStmt = 0; + int nCol = 0; + int i; /* for() loop iterator variable */ + int bRbuRowid = 0; /* If input table has column "rbu_rowid" */ + int iOrder = 0; + int iTnum = 0; + + /* Figure out the type of table this step will deal with. */ + assert( pIter->eType==0 ); + rbuTableType(p, pIter->zTbl, &pIter->eType, &iTnum, &pIter->iPkTnum); + if( p->rc==SQLITE_OK && pIter->eType==RBU_PK_NOTABLE ){ + p->rc = SQLITE_ERROR; + p->zErrmsg = sqlite3_mprintf("no such table: %s", pIter->zTbl); + } + if( p->rc ) return p->rc; + if( pIter->zIdx==0 ) pIter->iTnum = iTnum; + + assert( pIter->eType==RBU_PK_NONE || pIter->eType==RBU_PK_IPK + || pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_WITHOUT_ROWID + || pIter->eType==RBU_PK_VTAB + ); + + /* Populate the azTblCol[] and nTblCol variables based on the columns + ** of the input table. Ignore any input table columns that begin with + ** "rbu_". */ + p->rc = prepareFreeAndCollectError(p->dbRbu, &pStmt, &p->zErrmsg, + sqlite3_mprintf("SELECT * FROM '%q'", pIter->zDataTbl) + ); + if( p->rc==SQLITE_OK ){ + nCol = sqlite3_column_count(pStmt); + rbuAllocateIterArrays(p, pIter, nCol); + } + for(i=0; p->rc==SQLITE_OK && irc); + pIter->aiSrcOrder[pIter->nTblCol] = pIter->nTblCol; + pIter->azTblCol[pIter->nTblCol++] = zCopy; + } + else if( 0==sqlite3_stricmp("rbu_rowid", zName) ){ + bRbuRowid = 1; + } + } + sqlite3_finalize(pStmt); + pStmt = 0; + + if( p->rc==SQLITE_OK + && rbuIsVacuum(p)==0 + && bRbuRowid!=(pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE) + ){ + p->rc = SQLITE_ERROR; + p->zErrmsg = sqlite3_mprintf( + "table %q %s rbu_rowid column", pIter->zDataTbl, + (bRbuRowid ? "may not have" : "requires") + ); + } + + /* Check that all non-HIDDEN columns in the destination table are also + ** present in the input table. Populate the abTblPk[], azTblType[] and + ** aiTblOrder[] arrays at the same time. */ + if( p->rc==SQLITE_OK ){ + p->rc = prepareFreeAndCollectError(p->dbMain, &pStmt, &p->zErrmsg, + sqlite3_mprintf("PRAGMA table_info(%Q)", pIter->zTbl) + ); + } + while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + const char *zName = (const char*)sqlite3_column_text(pStmt, 1); + if( zName==0 ) break; /* An OOM - finalize() below returns S_NOMEM */ + for(i=iOrder; inTblCol; i++){ + if( 0==strcmp(zName, pIter->azTblCol[i]) ) break; + } + if( i==pIter->nTblCol ){ + p->rc = SQLITE_ERROR; + p->zErrmsg = sqlite3_mprintf("column missing from %q: %s", + pIter->zDataTbl, zName + ); + }else{ + int iPk = sqlite3_column_int(pStmt, 5); + int bNotNull = sqlite3_column_int(pStmt, 3); + const char *zType = (const char*)sqlite3_column_text(pStmt, 2); + + if( i!=iOrder ){ + SWAP(int, pIter->aiSrcOrder[i], pIter->aiSrcOrder[iOrder]); + SWAP(char*, pIter->azTblCol[i], pIter->azTblCol[iOrder]); + } + + pIter->azTblType[iOrder] = rbuStrndup(zType, &p->rc); + pIter->abTblPk[iOrder] = (iPk!=0); + pIter->abNotNull[iOrder] = (u8)bNotNull || (iPk!=0); + iOrder++; + } + } + + rbuFinalize(p, pStmt); + rbuObjIterCacheIndexedCols(p, pIter); + assert( pIter->eType!=RBU_PK_VTAB || pIter->abIndexed==0 ); + assert( pIter->eType!=RBU_PK_VTAB || pIter->nIndex==0 ); + } + + return p->rc; +} + +/* +** This function constructs and returns a pointer to a nul-terminated +** string containing some SQL clause or list based on one or more of the +** column names currently stored in the pIter->azTblCol[] array. +*/ +static char *rbuObjIterGetCollist( + sqlite3rbu *p, /* RBU object */ + RbuObjIter *pIter /* Object iterator for column names */ +){ + char *zList = 0; + const char *zSep = ""; + int i; + for(i=0; inTblCol; i++){ + const char *z = pIter->azTblCol[i]; + zList = rbuMPrintf(p, "%z%s\"%w\"", zList, zSep, z); + zSep = ", "; + } + return zList; +} + +/* +** This function is used to create a SELECT list (the list of SQL +** expressions that follows a SELECT keyword) for a SELECT statement +** used to read from an data_xxx or rbu_tmp_xxx table while updating the +** index object currently indicated by the iterator object passed as the +** second argument. A "PRAGMA index_xinfo = " statement is used +** to obtain the required information. +** +** If the index is of the following form: +** +** CREATE INDEX i1 ON t1(c, b COLLATE nocase); +** +** and "t1" is a table with an explicit INTEGER PRIMARY KEY column +** "ipk", the returned string is: +** +** "`c` COLLATE 'BINARY', `b` COLLATE 'NOCASE', `ipk` COLLATE 'BINARY'" +** +** As well as the returned string, three other malloc'd strings are +** returned via output parameters. As follows: +** +** pzImposterCols: ... +** pzImposterPk: ... +** pzWhere: ... +*/ +static char *rbuObjIterGetIndexCols( + sqlite3rbu *p, /* RBU object */ + RbuObjIter *pIter, /* Object iterator for column names */ + char **pzImposterCols, /* OUT: Columns for imposter table */ + char **pzImposterPk, /* OUT: Imposter PK clause */ + char **pzWhere, /* OUT: WHERE clause */ + int *pnBind /* OUT: Trbul number of columns */ +){ + int rc = p->rc; /* Error code */ + int rc2; /* sqlite3_finalize() return code */ + char *zRet = 0; /* String to return */ + char *zImpCols = 0; /* String to return via *pzImposterCols */ + char *zImpPK = 0; /* String to return via *pzImposterPK */ + char *zWhere = 0; /* String to return via *pzWhere */ + int nBind = 0; /* Value to return via *pnBind */ + const char *zCom = ""; /* Set to ", " later on */ + const char *zAnd = ""; /* Set to " AND " later on */ + sqlite3_stmt *pXInfo = 0; /* PRAGMA index_xinfo = ? */ + + if( rc==SQLITE_OK ){ + assert( p->zErrmsg==0 ); + rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg, + sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", pIter->zIdx) + ); + } + + while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){ + int iCid = sqlite3_column_int(pXInfo, 1); + int bDesc = sqlite3_column_int(pXInfo, 3); + const char *zCollate = (const char*)sqlite3_column_text(pXInfo, 4); + const char *zCol; + const char *zType; + + if( iCid<0 ){ + /* An integer primary key. If the table has an explicit IPK, use + ** its name. Otherwise, use "rbu_rowid". */ + if( pIter->eType==RBU_PK_IPK ){ + int i; + for(i=0; pIter->abTblPk[i]==0; i++); + assert( inTblCol ); + zCol = pIter->azTblCol[i]; + }else if( rbuIsVacuum(p) ){ + zCol = "_rowid_"; + }else{ + zCol = "rbu_rowid"; + } + zType = "INTEGER"; + }else{ + zCol = pIter->azTblCol[iCid]; + zType = pIter->azTblType[iCid]; + } + + zRet = sqlite3_mprintf("%z%s\"%w\" COLLATE %Q", zRet, zCom, zCol, zCollate); + if( pIter->bUnique==0 || sqlite3_column_int(pXInfo, 5) ){ + const char *zOrder = (bDesc ? " DESC" : ""); + zImpPK = sqlite3_mprintf("%z%s\"rbu_imp_%d%w\"%s", + zImpPK, zCom, nBind, zCol, zOrder + ); + } + zImpCols = sqlite3_mprintf("%z%s\"rbu_imp_%d%w\" %s COLLATE %Q", + zImpCols, zCom, nBind, zCol, zType, zCollate + ); + zWhere = sqlite3_mprintf( + "%z%s\"rbu_imp_%d%w\" IS ?", zWhere, zAnd, nBind, zCol + ); + if( zRet==0 || zImpPK==0 || zImpCols==0 || zWhere==0 ) rc = SQLITE_NOMEM; + zCom = ", "; + zAnd = " AND "; + nBind++; + } + + rc2 = sqlite3_finalize(pXInfo); + if( rc==SQLITE_OK ) rc = rc2; + + if( rc!=SQLITE_OK ){ + sqlite3_free(zRet); + sqlite3_free(zImpCols); + sqlite3_free(zImpPK); + sqlite3_free(zWhere); + zRet = 0; + zImpCols = 0; + zImpPK = 0; + zWhere = 0; + p->rc = rc; + } + + *pzImposterCols = zImpCols; + *pzImposterPk = zImpPK; + *pzWhere = zWhere; + *pnBind = nBind; + return zRet; +} + +/* +** Assuming the current table columns are "a", "b" and "c", and the zObj +** paramter is passed "old", return a string of the form: +** +** "old.a, old.b, old.b" +** +** With the column names escaped. +** +** For tables with implicit rowids - RBU_PK_EXTERNAL and RBU_PK_NONE, append +** the text ", old._rowid_" to the returned value. +*/ +static char *rbuObjIterGetOldlist( + sqlite3rbu *p, + RbuObjIter *pIter, + const char *zObj +){ + char *zList = 0; + if( p->rc==SQLITE_OK && pIter->abIndexed ){ + const char *zS = ""; + int i; + for(i=0; inTblCol; i++){ + if( pIter->abIndexed[i] ){ + const char *zCol = pIter->azTblCol[i]; + zList = sqlite3_mprintf("%z%s%s.\"%w\"", zList, zS, zObj, zCol); + }else{ + zList = sqlite3_mprintf("%z%sNULL", zList, zS); + } + zS = ", "; + if( zList==0 ){ + p->rc = SQLITE_NOMEM; + break; + } + } + + /* For a table with implicit rowids, append "old._rowid_" to the list. */ + if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){ + zList = rbuMPrintf(p, "%z, %s._rowid_", zList, zObj); + } + } + return zList; +} + +/* +** Return an expression that can be used in a WHERE clause to match the +** primary key of the current table. For example, if the table is: +** +** CREATE TABLE t1(a, b, c, PRIMARY KEY(b, c)); +** +** Return the string: +** +** "b = ?1 AND c = ?2" +*/ +static char *rbuObjIterGetWhere( + sqlite3rbu *p, + RbuObjIter *pIter +){ + char *zList = 0; + if( pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE ){ + zList = rbuMPrintf(p, "_rowid_ = ?%d", pIter->nTblCol+1); + }else if( pIter->eType==RBU_PK_EXTERNAL ){ + const char *zSep = ""; + int i; + for(i=0; inTblCol; i++){ + if( pIter->abTblPk[i] ){ + zList = rbuMPrintf(p, "%z%sc%d=?%d", zList, zSep, i, i+1); + zSep = " AND "; + } + } + zList = rbuMPrintf(p, + "_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)", zList + ); + + }else{ + const char *zSep = ""; + int i; + for(i=0; inTblCol; i++){ + if( pIter->abTblPk[i] ){ + const char *zCol = pIter->azTblCol[i]; + zList = rbuMPrintf(p, "%z%s\"%w\"=?%d", zList, zSep, zCol, i+1); + zSep = " AND "; + } + } + } + return zList; +} + +/* +** The SELECT statement iterating through the keys for the current object +** (p->objiter.pSelect) currently points to a valid row. However, there +** is something wrong with the rbu_control value in the rbu_control value +** stored in the (p->nCol+1)'th column. Set the error code and error message +** of the RBU handle to something reflecting this. +*/ +static void rbuBadControlError(sqlite3rbu *p){ + p->rc = SQLITE_ERROR; + p->zErrmsg = sqlite3_mprintf("invalid rbu_control value"); +} + + +/* +** Return a nul-terminated string containing the comma separated list of +** assignments that should be included following the "SET" keyword of +** an UPDATE statement used to update the table object that the iterator +** passed as the second argument currently points to if the rbu_control +** column of the data_xxx table entry is set to zMask. +** +** The memory for the returned string is obtained from sqlite3_malloc(). +** It is the responsibility of the caller to eventually free it using +** sqlite3_free(). +** +** If an OOM error is encountered when allocating space for the new +** string, an error code is left in the rbu handle passed as the first +** argument and NULL is returned. Or, if an error has already occurred +** when this function is called, NULL is returned immediately, without +** attempting the allocation or modifying the stored error code. +*/ +static char *rbuObjIterGetSetlist( + sqlite3rbu *p, + RbuObjIter *pIter, + const char *zMask +){ + char *zList = 0; + if( p->rc==SQLITE_OK ){ + int i; + + if( (int)strlen(zMask)!=pIter->nTblCol ){ + rbuBadControlError(p); + }else{ + const char *zSep = ""; + for(i=0; inTblCol; i++){ + char c = zMask[pIter->aiSrcOrder[i]]; + if( c=='x' ){ + zList = rbuMPrintf(p, "%z%s\"%w\"=?%d", + zList, zSep, pIter->azTblCol[i], i+1 + ); + zSep = ", "; + } + else if( c=='d' ){ + zList = rbuMPrintf(p, "%z%s\"%w\"=rbu_delta(\"%w\", ?%d)", + zList, zSep, pIter->azTblCol[i], pIter->azTblCol[i], i+1 + ); + zSep = ", "; + } + else if( c=='f' ){ + zList = rbuMPrintf(p, "%z%s\"%w\"=rbu_fossil_delta(\"%w\", ?%d)", + zList, zSep, pIter->azTblCol[i], pIter->azTblCol[i], i+1 + ); + zSep = ", "; + } + } + } + } + return zList; +} + +/* +** Return a nul-terminated string consisting of nByte comma separated +** "?" expressions. For example, if nByte is 3, return a pointer to +** a buffer containing the string "?,?,?". +** +** The memory for the returned string is obtained from sqlite3_malloc(). +** It is the responsibility of the caller to eventually free it using +** sqlite3_free(). +** +** If an OOM error is encountered when allocating space for the new +** string, an error code is left in the rbu handle passed as the first +** argument and NULL is returned. Or, if an error has already occurred +** when this function is called, NULL is returned immediately, without +** attempting the allocation or modifying the stored error code. +*/ +static char *rbuObjIterGetBindlist(sqlite3rbu *p, int nBind){ + char *zRet = 0; + int nByte = nBind*2 + 1; + + zRet = (char*)rbuMalloc(p, nByte); + if( zRet ){ + int i; + for(i=0; izIdx==0 ); + if( p->rc==SQLITE_OK ){ + const char *zSep = "PRIMARY KEY("; + sqlite3_stmt *pXList = 0; /* PRAGMA index_list = (pIter->zTbl) */ + sqlite3_stmt *pXInfo = 0; /* PRAGMA index_xinfo = */ + + p->rc = prepareFreeAndCollectError(p->dbMain, &pXList, &p->zErrmsg, + sqlite3_mprintf("PRAGMA main.index_list = %Q", pIter->zTbl) + ); + while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXList) ){ + const char *zOrig = (const char*)sqlite3_column_text(pXList,3); + if( zOrig && strcmp(zOrig, "pk")==0 ){ + const char *zIdx = (const char*)sqlite3_column_text(pXList,1); + if( zIdx ){ + p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg, + sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", zIdx) + ); + } + break; + } + } + rbuFinalize(p, pXList); + + while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){ + if( sqlite3_column_int(pXInfo, 5) ){ + /* int iCid = sqlite3_column_int(pXInfo, 0); */ + const char *zCol = (const char*)sqlite3_column_text(pXInfo, 2); + const char *zDesc = sqlite3_column_int(pXInfo, 3) ? " DESC" : ""; + z = rbuMPrintf(p, "%z%s\"%w\"%s", z, zSep, zCol, zDesc); + zSep = ", "; + } + } + z = rbuMPrintf(p, "%z)", z); + rbuFinalize(p, pXInfo); + } + return z; +} + +/* +** This function creates the second imposter table used when writing to +** a table b-tree where the table has an external primary key. If the +** iterator passed as the second argument does not currently point to +** a table (not index) with an external primary key, this function is a +** no-op. +** +** Assuming the iterator does point to a table with an external PK, this +** function creates a WITHOUT ROWID imposter table named "rbu_imposter2" +** used to access that PK index. For example, if the target table is +** declared as follows: +** +** CREATE TABLE t1(a, b TEXT, c REAL, PRIMARY KEY(b, c)); +** +** then the imposter table schema is: +** +** CREATE TABLE rbu_imposter2(c1 TEXT, c2 REAL, id INTEGER) WITHOUT ROWID; +** +*/ +static void rbuCreateImposterTable2(sqlite3rbu *p, RbuObjIter *pIter){ + if( p->rc==SQLITE_OK && pIter->eType==RBU_PK_EXTERNAL ){ + int tnum = pIter->iPkTnum; /* Root page of PK index */ + sqlite3_stmt *pQuery = 0; /* SELECT name ... WHERE rootpage = $tnum */ + const char *zIdx = 0; /* Name of PK index */ + sqlite3_stmt *pXInfo = 0; /* PRAGMA main.index_xinfo = $zIdx */ + const char *zComma = ""; + char *zCols = 0; /* Used to build up list of table cols */ + char *zPk = 0; /* Used to build up table PK declaration */ + + /* Figure out the name of the primary key index for the current table. + ** This is needed for the argument to "PRAGMA index_xinfo". Set + ** zIdx to point to a nul-terminated string containing this name. */ + p->rc = prepareAndCollectError(p->dbMain, &pQuery, &p->zErrmsg, + "SELECT name FROM sqlite_master WHERE rootpage = ?" + ); + if( p->rc==SQLITE_OK ){ + sqlite3_bind_int(pQuery, 1, tnum); + if( SQLITE_ROW==sqlite3_step(pQuery) ){ + zIdx = (const char*)sqlite3_column_text(pQuery, 0); + } + } + if( zIdx ){ + p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg, + sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", zIdx) + ); + } + rbuFinalize(p, pQuery); + + while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){ + int bKey = sqlite3_column_int(pXInfo, 5); + if( bKey ){ + int iCid = sqlite3_column_int(pXInfo, 1); + int bDesc = sqlite3_column_int(pXInfo, 3); + const char *zCollate = (const char*)sqlite3_column_text(pXInfo, 4); + zCols = rbuMPrintf(p, "%z%sc%d %s COLLATE %s", zCols, zComma, + iCid, pIter->azTblType[iCid], zCollate + ); + zPk = rbuMPrintf(p, "%z%sc%d%s", zPk, zComma, iCid, bDesc?" DESC":""); + zComma = ", "; + } + } + zCols = rbuMPrintf(p, "%z, id INTEGER", zCols); + rbuFinalize(p, pXInfo); + + sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 1, tnum); + rbuMPrintfExec(p, p->dbMain, + "CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID", + zCols, zPk + ); + sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 0); + } +} + +/* +** If an error has already occurred when this function is called, it +** immediately returns zero (without doing any work). Or, if an error +** occurs during the execution of this function, it sets the error code +** in the sqlite3rbu object indicated by the first argument and returns +** zero. +** +** The iterator passed as the second argument is guaranteed to point to +** a table (not an index) when this function is called. This function +** attempts to create any imposter table required to write to the main +** table b-tree of the table before returning. Non-zero is returned if +** an imposter table are created, or zero otherwise. +** +** An imposter table is required in all cases except RBU_PK_VTAB. Only +** virtual tables are written to directly. The imposter table has the +** same schema as the actual target table (less any UNIQUE constraints). +** More precisely, the "same schema" means the same columns, types, +** collation sequences. For tables that do not have an external PRIMARY +** KEY, it also means the same PRIMARY KEY declaration. +*/ +static void rbuCreateImposterTable(sqlite3rbu *p, RbuObjIter *pIter){ + if( p->rc==SQLITE_OK && pIter->eType!=RBU_PK_VTAB ){ + int tnum = pIter->iTnum; + const char *zComma = ""; + char *zSql = 0; + int iCol; + sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 1); + + for(iCol=0; p->rc==SQLITE_OK && iColnTblCol; iCol++){ + const char *zPk = ""; + const char *zCol = pIter->azTblCol[iCol]; + const char *zColl = 0; + + p->rc = sqlite3_table_column_metadata( + p->dbMain, "main", pIter->zTbl, zCol, 0, &zColl, 0, 0, 0 + ); + + if( pIter->eType==RBU_PK_IPK && pIter->abTblPk[iCol] ){ + /* If the target table column is an "INTEGER PRIMARY KEY", add + ** "PRIMARY KEY" to the imposter table column declaration. */ + zPk = "PRIMARY KEY "; + } + zSql = rbuMPrintf(p, "%z%s\"%w\" %s %sCOLLATE %s%s", + zSql, zComma, zCol, pIter->azTblType[iCol], zPk, zColl, + (pIter->abNotNull[iCol] ? " NOT NULL" : "") + ); + zComma = ", "; + } + + if( pIter->eType==RBU_PK_WITHOUT_ROWID ){ + char *zPk = rbuWithoutRowidPK(p, pIter); + if( zPk ){ + zSql = rbuMPrintf(p, "%z, %z", zSql, zPk); + } + } + + sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 1, tnum); + rbuMPrintfExec(p, p->dbMain, "CREATE TABLE \"rbu_imp_%w\"(%z)%s", + pIter->zTbl, zSql, + (pIter->eType==RBU_PK_WITHOUT_ROWID ? " WITHOUT ROWID" : "") + ); + sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 0); + } +} + +/* +** Prepare a statement used to insert rows into the "rbu_tmp_xxx" table. +** Specifically a statement of the form: +** +** INSERT INTO rbu_tmp_xxx VALUES(?, ?, ? ...); +** +** The number of bound variables is equal to the number of columns in +** the target table, plus one (for the rbu_control column), plus one more +** (for the rbu_rowid column) if the target table is an implicit IPK or +** virtual table. +*/ +static void rbuObjIterPrepareTmpInsert( + sqlite3rbu *p, + RbuObjIter *pIter, + const char *zCollist, + const char *zRbuRowid +){ + int bRbuRowid = (pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE); + char *zBind = rbuObjIterGetBindlist(p, pIter->nTblCol + 1 + bRbuRowid); + if( zBind ){ + assert( pIter->pTmpInsert==0 ); + p->rc = prepareFreeAndCollectError( + p->dbRbu, &pIter->pTmpInsert, &p->zErrmsg, sqlite3_mprintf( + "INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)", + p->zStateDb, pIter->zDataTbl, zCollist, zRbuRowid, zBind + )); + } +} + +static void rbuTmpInsertFunc( + sqlite3_context *pCtx, + int nVal, + sqlite3_value **apVal +){ + sqlite3rbu *p = sqlite3_user_data(pCtx); + int rc = SQLITE_OK; + int i; + + assert( sqlite3_value_int(apVal[0])!=0 + || p->objiter.eType==RBU_PK_EXTERNAL + || p->objiter.eType==RBU_PK_NONE + ); + if( sqlite3_value_int(apVal[0])!=0 ){ + p->nPhaseOneStep += p->objiter.nIndex; + } + + for(i=0; rc==SQLITE_OK && iobjiter.pTmpInsert, i+1, apVal[i]); + } + if( rc==SQLITE_OK ){ + sqlite3_step(p->objiter.pTmpInsert); + rc = sqlite3_reset(p->objiter.pTmpInsert); + } + + if( rc!=SQLITE_OK ){ + sqlite3_result_error_code(pCtx, rc); + } +} + +/* +** Ensure that the SQLite statement handles required to update the +** target database object currently indicated by the iterator passed +** as the second argument are available. +*/ +static int rbuObjIterPrepareAll( + sqlite3rbu *p, + RbuObjIter *pIter, + int nOffset /* Add "LIMIT -1 OFFSET $nOffset" to SELECT */ +){ + assert( pIter->bCleanup==0 ); + if( pIter->pSelect==0 && rbuObjIterCacheTableInfo(p, pIter)==SQLITE_OK ){ + const int tnum = pIter->iTnum; + char *zCollist = 0; /* List of indexed columns */ + char **pz = &p->zErrmsg; + const char *zIdx = pIter->zIdx; + char *zLimit = 0; + + if( nOffset ){ + zLimit = sqlite3_mprintf(" LIMIT -1 OFFSET %d", nOffset); + if( !zLimit ) p->rc = SQLITE_NOMEM; + } + + if( zIdx ){ + const char *zTbl = pIter->zTbl; + char *zImposterCols = 0; /* Columns for imposter table */ + char *zImposterPK = 0; /* Primary key declaration for imposter */ + char *zWhere = 0; /* WHERE clause on PK columns */ + char *zBind = 0; + int nBind = 0; + + assert( pIter->eType!=RBU_PK_VTAB ); + zCollist = rbuObjIterGetIndexCols( + p, pIter, &zImposterCols, &zImposterPK, &zWhere, &nBind + ); + zBind = rbuObjIterGetBindlist(p, nBind); + + /* Create the imposter table used to write to this index. */ + sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 1); + sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 1,tnum); + rbuMPrintfExec(p, p->dbMain, + "CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID", + zTbl, zImposterCols, zImposterPK + ); + sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 0); + + /* Create the statement to insert index entries */ + pIter->nCol = nBind; + if( p->rc==SQLITE_OK ){ + p->rc = prepareFreeAndCollectError( + p->dbMain, &pIter->pInsert, &p->zErrmsg, + sqlite3_mprintf("INSERT INTO \"rbu_imp_%w\" VALUES(%s)", zTbl, zBind) + ); + } + + /* And to delete index entries */ + if( rbuIsVacuum(p)==0 && p->rc==SQLITE_OK ){ + p->rc = prepareFreeAndCollectError( + p->dbMain, &pIter->pDelete, &p->zErrmsg, + sqlite3_mprintf("DELETE FROM \"rbu_imp_%w\" WHERE %s", zTbl, zWhere) + ); + } + + /* Create the SELECT statement to read keys in sorted order */ + if( p->rc==SQLITE_OK ){ + char *zSql; + if( rbuIsVacuum(p) ){ + zSql = sqlite3_mprintf( + "SELECT %s, 0 AS rbu_control FROM '%q' ORDER BY %s%s", + zCollist, + pIter->zDataTbl, + zCollist, zLimit + ); + }else + + if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){ + zSql = sqlite3_mprintf( + "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' ORDER BY %s%s", + zCollist, p->zStateDb, pIter->zDataTbl, + zCollist, zLimit + ); + }else{ + zSql = sqlite3_mprintf( + "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' " + "UNION ALL " + "SELECT %s, rbu_control FROM '%q' " + "WHERE typeof(rbu_control)='integer' AND rbu_control!=1 " + "ORDER BY %s%s", + zCollist, p->zStateDb, pIter->zDataTbl, + zCollist, pIter->zDataTbl, + zCollist, zLimit + ); + } + p->rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pSelect, pz, zSql); + } + + sqlite3_free(zImposterCols); + sqlite3_free(zImposterPK); + sqlite3_free(zWhere); + sqlite3_free(zBind); + }else{ + int bRbuRowid = (pIter->eType==RBU_PK_VTAB) + ||(pIter->eType==RBU_PK_NONE) + ||(pIter->eType==RBU_PK_EXTERNAL && rbuIsVacuum(p)); + const char *zTbl = pIter->zTbl; /* Table this step applies to */ + const char *zWrite; /* Imposter table name */ + + char *zBindings = rbuObjIterGetBindlist(p, pIter->nTblCol + bRbuRowid); + char *zWhere = rbuObjIterGetWhere(p, pIter); + char *zOldlist = rbuObjIterGetOldlist(p, pIter, "old"); + char *zNewlist = rbuObjIterGetOldlist(p, pIter, "new"); + + zCollist = rbuObjIterGetCollist(p, pIter); + pIter->nCol = pIter->nTblCol; + + /* Create the imposter table or tables (if required). */ + rbuCreateImposterTable(p, pIter); + rbuCreateImposterTable2(p, pIter); + zWrite = (pIter->eType==RBU_PK_VTAB ? "" : "rbu_imp_"); + + /* Create the INSERT statement to write to the target PK b-tree */ + if( p->rc==SQLITE_OK ){ + p->rc = prepareFreeAndCollectError(p->dbMain, &pIter->pInsert, pz, + sqlite3_mprintf( + "INSERT INTO \"%s%w\"(%s%s) VALUES(%s)", + zWrite, zTbl, zCollist, (bRbuRowid ? ", _rowid_" : ""), zBindings + ) + ); + } + + /* Create the DELETE statement to write to the target PK b-tree. + ** Because it only performs INSERT operations, this is not required for + ** an rbu vacuum handle. */ + if( rbuIsVacuum(p)==0 && p->rc==SQLITE_OK ){ + p->rc = prepareFreeAndCollectError(p->dbMain, &pIter->pDelete, pz, + sqlite3_mprintf( + "DELETE FROM \"%s%w\" WHERE %s", zWrite, zTbl, zWhere + ) + ); + } + + if( rbuIsVacuum(p)==0 && pIter->abIndexed ){ + const char *zRbuRowid = ""; + if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){ + zRbuRowid = ", rbu_rowid"; + } + + /* Create the rbu_tmp_xxx table and the triggers to populate it. */ + rbuMPrintfExec(p, p->dbRbu, + "CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS " + "SELECT *%s FROM '%q' WHERE 0;" + , p->zStateDb, pIter->zDataTbl + , (pIter->eType==RBU_PK_EXTERNAL ? ", 0 AS rbu_rowid" : "") + , pIter->zDataTbl + ); + + rbuMPrintfExec(p, p->dbMain, + "CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" " + "BEGIN " + " SELECT rbu_tmp_insert(3, %s);" + "END;" + + "CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" " + "BEGIN " + " SELECT rbu_tmp_insert(3, %s);" + "END;" + + "CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" " + "BEGIN " + " SELECT rbu_tmp_insert(4, %s);" + "END;", + zWrite, zTbl, zOldlist, + zWrite, zTbl, zOldlist, + zWrite, zTbl, zNewlist + ); + + if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){ + rbuMPrintfExec(p, p->dbMain, + "CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" " + "BEGIN " + " SELECT rbu_tmp_insert(0, %s);" + "END;", + zWrite, zTbl, zNewlist + ); + } + + rbuObjIterPrepareTmpInsert(p, pIter, zCollist, zRbuRowid); + } + + /* Create the SELECT statement to read keys from data_xxx */ + if( p->rc==SQLITE_OK ){ + const char *zRbuRowid = ""; + if( bRbuRowid ){ + zRbuRowid = rbuIsVacuum(p) ? ",_rowid_ " : ",rbu_rowid"; + } + p->rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pSelect, pz, + sqlite3_mprintf( + "SELECT %s,%s rbu_control%s FROM '%q'%s", + zCollist, + (rbuIsVacuum(p) ? "0 AS " : ""), + zRbuRowid, + pIter->zDataTbl, zLimit + ) + ); + } + + sqlite3_free(zWhere); + sqlite3_free(zOldlist); + sqlite3_free(zNewlist); + sqlite3_free(zBindings); + } + sqlite3_free(zCollist); + sqlite3_free(zLimit); + } + + return p->rc; +} + +/* +** Set output variable *ppStmt to point to an UPDATE statement that may +** be used to update the imposter table for the main table b-tree of the +** table object that pIter currently points to, assuming that the +** rbu_control column of the data_xyz table contains zMask. +** +** If the zMask string does not specify any columns to update, then this +** is not an error. Output variable *ppStmt is set to NULL in this case. +*/ +static int rbuGetUpdateStmt( + sqlite3rbu *p, /* RBU handle */ + RbuObjIter *pIter, /* Object iterator */ + const char *zMask, /* rbu_control value ('x.x.') */ + sqlite3_stmt **ppStmt /* OUT: UPDATE statement handle */ +){ + RbuUpdateStmt **pp; + RbuUpdateStmt *pUp = 0; + int nUp = 0; + + /* In case an error occurs */ + *ppStmt = 0; + + /* Search for an existing statement. If one is found, shift it to the front + ** of the LRU queue and return immediately. Otherwise, leave nUp pointing + ** to the number of statements currently in the cache and pUp to the + ** last object in the list. */ + for(pp=&pIter->pRbuUpdate; *pp; pp=&((*pp)->pNext)){ + pUp = *pp; + if( strcmp(pUp->zMask, zMask)==0 ){ + *pp = pUp->pNext; + pUp->pNext = pIter->pRbuUpdate; + pIter->pRbuUpdate = pUp; + *ppStmt = pUp->pUpdate; + return SQLITE_OK; + } + nUp++; + } + assert( pUp==0 || pUp->pNext==0 ); + + if( nUp>=SQLITE_RBU_UPDATE_CACHESIZE ){ + for(pp=&pIter->pRbuUpdate; *pp!=pUp; pp=&((*pp)->pNext)); + *pp = 0; + sqlite3_finalize(pUp->pUpdate); + pUp->pUpdate = 0; + }else{ + pUp = (RbuUpdateStmt*)rbuMalloc(p, sizeof(RbuUpdateStmt)+pIter->nTblCol+1); + } + + if( pUp ){ + char *zWhere = rbuObjIterGetWhere(p, pIter); + char *zSet = rbuObjIterGetSetlist(p, pIter, zMask); + char *zUpdate = 0; + + pUp->zMask = (char*)&pUp[1]; + memcpy(pUp->zMask, zMask, pIter->nTblCol); + pUp->pNext = pIter->pRbuUpdate; + pIter->pRbuUpdate = pUp; + + if( zSet ){ + const char *zPrefix = ""; + + if( pIter->eType!=RBU_PK_VTAB ) zPrefix = "rbu_imp_"; + zUpdate = sqlite3_mprintf("UPDATE \"%s%w\" SET %s WHERE %s", + zPrefix, pIter->zTbl, zSet, zWhere + ); + p->rc = prepareFreeAndCollectError( + p->dbMain, &pUp->pUpdate, &p->zErrmsg, zUpdate + ); + *ppStmt = pUp->pUpdate; + } + sqlite3_free(zWhere); + sqlite3_free(zSet); + } + + return p->rc; +} + +static sqlite3 *rbuOpenDbhandle( + sqlite3rbu *p, + const char *zName, + int bUseVfs +){ + sqlite3 *db = 0; + if( p->rc==SQLITE_OK ){ + const int flags = SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_URI; + p->rc = sqlite3_open_v2(zName, &db, flags, bUseVfs ? p->zVfsName : 0); + if( p->rc ){ + p->zErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db)); + sqlite3_close(db); + db = 0; + } + } + return db; +} + +/* +** Free an RbuState object allocated by rbuLoadState(). +*/ +static void rbuFreeState(RbuState *p){ + if( p ){ + sqlite3_free(p->zTbl); + sqlite3_free(p->zIdx); + sqlite3_free(p); + } +} + +/* +** Allocate an RbuState object and load the contents of the rbu_state +** table into it. Return a pointer to the new object. It is the +** responsibility of the caller to eventually free the object using +** sqlite3_free(). +** +** If an error occurs, leave an error code and message in the rbu handle +** and return NULL. +*/ +static RbuState *rbuLoadState(sqlite3rbu *p){ + RbuState *pRet = 0; + sqlite3_stmt *pStmt = 0; + int rc; + int rc2; + + pRet = (RbuState*)rbuMalloc(p, sizeof(RbuState)); + if( pRet==0 ) return 0; + + rc = prepareFreeAndCollectError(p->dbRbu, &pStmt, &p->zErrmsg, + sqlite3_mprintf("SELECT k, v FROM %s.rbu_state", p->zStateDb) + ); + while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + switch( sqlite3_column_int(pStmt, 0) ){ + case RBU_STATE_STAGE: + pRet->eStage = sqlite3_column_int(pStmt, 1); + if( pRet->eStage!=RBU_STAGE_OAL + && pRet->eStage!=RBU_STAGE_MOVE + && pRet->eStage!=RBU_STAGE_CKPT + ){ + p->rc = SQLITE_CORRUPT; + } + break; + + case RBU_STATE_TBL: + pRet->zTbl = rbuStrndup((char*)sqlite3_column_text(pStmt, 1), &rc); + break; + + case RBU_STATE_IDX: + pRet->zIdx = rbuStrndup((char*)sqlite3_column_text(pStmt, 1), &rc); + break; + + case RBU_STATE_ROW: + pRet->nRow = sqlite3_column_int(pStmt, 1); + break; + + case RBU_STATE_PROGRESS: + pRet->nProgress = sqlite3_column_int64(pStmt, 1); + break; + + case RBU_STATE_CKPT: + pRet->iWalCksum = sqlite3_column_int64(pStmt, 1); + break; + + case RBU_STATE_COOKIE: + pRet->iCookie = (u32)sqlite3_column_int64(pStmt, 1); + break; + + case RBU_STATE_OALSZ: + pRet->iOalSz = (u32)sqlite3_column_int64(pStmt, 1); + break; + + case RBU_STATE_PHASEONESTEP: + pRet->nPhaseOneStep = sqlite3_column_int64(pStmt, 1); + break; + + default: + rc = SQLITE_CORRUPT; + break; + } + } + rc2 = sqlite3_finalize(pStmt); + if( rc==SQLITE_OK ) rc = rc2; + + p->rc = rc; + return pRet; +} + + +/* +** Open the database handle and attach the RBU database as "rbu". If an +** error occurs, leave an error code and message in the RBU handle. +*/ +static void rbuOpenDatabase(sqlite3rbu *p, int *pbRetry){ + assert( p->rc || (p->dbMain==0 && p->dbRbu==0) ); + assert( p->rc || rbuIsVacuum(p) || p->zTarget!=0 ); + + /* Open the RBU database */ + p->dbRbu = rbuOpenDbhandle(p, p->zRbu, 1); + + if( p->rc==SQLITE_OK && rbuIsVacuum(p) ){ + sqlite3_file_control(p->dbRbu, "main", SQLITE_FCNTL_RBUCNT, (void*)p); + if( p->zState==0 ){ + const char *zFile = sqlite3_db_filename(p->dbRbu, "main"); + p->zState = rbuMPrintf(p, "file://%s-vacuum?modeof=%s", zFile, zFile); + } + } + + /* If using separate RBU and state databases, attach the state database to + ** the RBU db handle now. */ + if( p->zState ){ + rbuMPrintfExec(p, p->dbRbu, "ATTACH %Q AS stat", p->zState); + memcpy(p->zStateDb, "stat", 4); + }else{ + memcpy(p->zStateDb, "main", 4); + } + +#if 0 + if( p->rc==SQLITE_OK && rbuIsVacuum(p) ){ + p->rc = sqlite3_exec(p->dbRbu, "BEGIN", 0, 0, 0); + } +#endif + + /* If it has not already been created, create the rbu_state table */ + rbuMPrintfExec(p, p->dbRbu, RBU_CREATE_STATE, p->zStateDb); + +#if 0 + if( rbuIsVacuum(p) ){ + if( p->rc==SQLITE_OK ){ + int rc2; + int bOk = 0; + sqlite3_stmt *pCnt = 0; + p->rc = prepareAndCollectError(p->dbRbu, &pCnt, &p->zErrmsg, + "SELECT count(*) FROM stat.sqlite_master" + ); + if( p->rc==SQLITE_OK + && sqlite3_step(pCnt)==SQLITE_ROW + && 1==sqlite3_column_int(pCnt, 0) + ){ + bOk = 1; + } + rc2 = sqlite3_finalize(pCnt); + if( p->rc==SQLITE_OK ) p->rc = rc2; + + if( p->rc==SQLITE_OK && bOk==0 ){ + p->rc = SQLITE_ERROR; + p->zErrmsg = sqlite3_mprintf("invalid state database"); + } + + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3_exec(p->dbRbu, "COMMIT", 0, 0, 0); + } + } + } +#endif + + if( p->rc==SQLITE_OK && rbuIsVacuum(p) ){ + int bOpen = 0; + int rc; + p->nRbu = 0; + p->pRbuFd = 0; + rc = sqlite3_file_control(p->dbRbu, "main", SQLITE_FCNTL_RBUCNT, (void*)p); + if( rc!=SQLITE_NOTFOUND ) p->rc = rc; + if( p->eStage>=RBU_STAGE_MOVE ){ + bOpen = 1; + }else{ + RbuState *pState = rbuLoadState(p); + if( pState ){ + bOpen = (pState->eStage>=RBU_STAGE_MOVE); + rbuFreeState(pState); + } + } + if( bOpen ) p->dbMain = rbuOpenDbhandle(p, p->zRbu, p->nRbu<=1); + } + + p->eStage = 0; + if( p->rc==SQLITE_OK && p->dbMain==0 ){ + if( !rbuIsVacuum(p) ){ + p->dbMain = rbuOpenDbhandle(p, p->zTarget, 1); + }else if( p->pRbuFd->pWalFd ){ + if( pbRetry ){ + p->pRbuFd->bNolock = 0; + sqlite3_close(p->dbRbu); + sqlite3_close(p->dbMain); + p->dbMain = 0; + p->dbRbu = 0; + *pbRetry = 1; + return; + } + p->rc = SQLITE_ERROR; + p->zErrmsg = sqlite3_mprintf("cannot vacuum wal mode database"); + }else{ + char *zTarget; + char *zExtra = 0; + if( strlen(p->zRbu)>=5 && 0==memcmp("file:", p->zRbu, 5) ){ + zExtra = &p->zRbu[5]; + while( *zExtra ){ + if( *zExtra++=='?' ) break; + } + if( *zExtra=='\0' ) zExtra = 0; + } + + zTarget = sqlite3_mprintf("file:%s-vacuum?rbu_memory=1%s%s", + sqlite3_db_filename(p->dbRbu, "main"), + (zExtra==0 ? "" : "&"), (zExtra==0 ? "" : zExtra) + ); + + if( zTarget==0 ){ + p->rc = SQLITE_NOMEM; + return; + } + p->dbMain = rbuOpenDbhandle(p, zTarget, p->nRbu<=1); + sqlite3_free(zTarget); + } + } + + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3_create_function(p->dbMain, + "rbu_tmp_insert", -1, SQLITE_UTF8, (void*)p, rbuTmpInsertFunc, 0, 0 + ); + } + + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3_create_function(p->dbMain, + "rbu_fossil_delta", 2, SQLITE_UTF8, 0, rbuFossilDeltaFunc, 0, 0 + ); + } + + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3_create_function(p->dbRbu, + "rbu_target_name", -1, SQLITE_UTF8, (void*)p, rbuTargetNameFunc, 0, 0 + ); + } + + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3_file_control(p->dbMain, "main", SQLITE_FCNTL_RBU, (void*)p); + } + rbuMPrintfExec(p, p->dbMain, "SELECT * FROM sqlite_master"); + + /* Mark the database file just opened as an RBU target database. If + ** this call returns SQLITE_NOTFOUND, then the RBU vfs is not in use. + ** This is an error. */ + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3_file_control(p->dbMain, "main", SQLITE_FCNTL_RBU, (void*)p); + } + + if( p->rc==SQLITE_NOTFOUND ){ + p->rc = SQLITE_ERROR; + p->zErrmsg = sqlite3_mprintf("rbu vfs not found"); + } +} + +/* +** This routine is a copy of the sqlite3FileSuffix3() routine from the core. +** It is a no-op unless SQLITE_ENABLE_8_3_NAMES is defined. +** +** If SQLITE_ENABLE_8_3_NAMES is set at compile-time and if the database +** filename in zBaseFilename is a URI with the "8_3_names=1" parameter and +** if filename in z[] has a suffix (a.k.a. "extension") that is longer than +** three characters, then shorten the suffix on z[] to be the last three +** characters of the original suffix. +** +** If SQLITE_ENABLE_8_3_NAMES is set to 2 at compile-time, then always +** do the suffix shortening regardless of URI parameter. +** +** Examples: +** +** test.db-journal => test.nal +** test.db-wal => test.wal +** test.db-shm => test.shm +** test.db-mj7f3319fa => test.9fa +*/ +static void rbuFileSuffix3(const char *zBase, char *z){ +#ifdef SQLITE_ENABLE_8_3_NAMES +#if SQLITE_ENABLE_8_3_NAMES<2 + if( sqlite3_uri_boolean(zBase, "8_3_names", 0) ) +#endif + { + int i, sz; + sz = (int)strlen(z)&0xffffff; + for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){} + if( z[i]=='.' && sz>i+4 ) memmove(&z[i+1], &z[sz-3], 4); + } +#endif +} + +/* +** Return the current wal-index header checksum for the target database +** as a 64-bit integer. +** +** The checksum is store in the first page of xShmMap memory as an 8-byte +** blob starting at byte offset 40. +*/ +static i64 rbuShmChecksum(sqlite3rbu *p){ + i64 iRet = 0; + if( p->rc==SQLITE_OK ){ + sqlite3_file *pDb = p->pTargetFd->pReal; + u32 volatile *ptr; + p->rc = pDb->pMethods->xShmMap(pDb, 0, 32*1024, 0, (void volatile**)&ptr); + if( p->rc==SQLITE_OK ){ + iRet = ((i64)ptr[10] << 32) + ptr[11]; + } + } + return iRet; +} + +/* +** This function is called as part of initializing or reinitializing an +** incremental checkpoint. +** +** It populates the sqlite3rbu.aFrame[] array with the set of +** (wal frame -> db page) copy operations required to checkpoint the +** current wal file, and obtains the set of shm locks required to safely +** perform the copy operations directly on the file-system. +** +** If argument pState is not NULL, then the incremental checkpoint is +** being resumed. In this case, if the checksum of the wal-index-header +** following recovery is not the same as the checksum saved in the RbuState +** object, then the rbu handle is set to DONE state. This occurs if some +** other client appends a transaction to the wal file in the middle of +** an incremental checkpoint. +*/ +static void rbuSetupCheckpoint(sqlite3rbu *p, RbuState *pState){ + + /* If pState is NULL, then the wal file may not have been opened and + ** recovered. Running a read-statement here to ensure that doing so + ** does not interfere with the "capture" process below. */ + if( pState==0 ){ + p->eStage = 0; + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3_exec(p->dbMain, "SELECT * FROM sqlite_master", 0, 0, 0); + } + } + + /* Assuming no error has occurred, run a "restart" checkpoint with the + ** sqlite3rbu.eStage variable set to CAPTURE. This turns on the following + ** special behaviour in the rbu VFS: + ** + ** * If the exclusive shm WRITER or READ0 lock cannot be obtained, + ** the checkpoint fails with SQLITE_BUSY (normally SQLite would + ** proceed with running a passive checkpoint instead of failing). + ** + ** * Attempts to read from the *-wal file or write to the database file + ** do not perform any IO. Instead, the frame/page combinations that + ** would be read/written are recorded in the sqlite3rbu.aFrame[] + ** array. + ** + ** * Calls to xShmLock(UNLOCK) to release the exclusive shm WRITER, + ** READ0 and CHECKPOINT locks taken as part of the checkpoint are + ** no-ops. These locks will not be released until the connection + ** is closed. + ** + ** * Attempting to xSync() the database file causes an SQLITE_INTERNAL + ** error. + ** + ** As a result, unless an error (i.e. OOM or SQLITE_BUSY) occurs, the + ** checkpoint below fails with SQLITE_INTERNAL, and leaves the aFrame[] + ** array populated with a set of (frame -> page) mappings. Because the + ** WRITER, CHECKPOINT and READ0 locks are still held, it is safe to copy + ** data from the wal file into the database file according to the + ** contents of aFrame[]. + */ + if( p->rc==SQLITE_OK ){ + int rc2; + p->eStage = RBU_STAGE_CAPTURE; + rc2 = sqlite3_exec(p->dbMain, "PRAGMA main.wal_checkpoint=restart", 0, 0,0); + if( rc2!=SQLITE_INTERNAL ) p->rc = rc2; + } + + if( p->rc==SQLITE_OK && p->nFrame>0 ){ + p->eStage = RBU_STAGE_CKPT; + p->nStep = (pState ? pState->nRow : 0); + p->aBuf = rbuMalloc(p, p->pgsz); + p->iWalCksum = rbuShmChecksum(p); + } + + if( p->rc==SQLITE_OK ){ + if( p->nFrame==0 || (pState && pState->iWalCksum!=p->iWalCksum) ){ + p->rc = SQLITE_DONE; + p->eStage = RBU_STAGE_DONE; + } + } +} + +/* +** Called when iAmt bytes are read from offset iOff of the wal file while +** the rbu object is in capture mode. Record the frame number of the frame +** being read in the aFrame[] array. +*/ +static int rbuCaptureWalRead(sqlite3rbu *pRbu, i64 iOff, int iAmt){ + const u32 mReq = (1<mLock!=mReq ){ + pRbu->rc = SQLITE_BUSY; + return SQLITE_INTERNAL; + } + + pRbu->pgsz = iAmt; + if( pRbu->nFrame==pRbu->nFrameAlloc ){ + int nNew = (pRbu->nFrameAlloc ? pRbu->nFrameAlloc : 64) * 2; + RbuFrame *aNew; + aNew = (RbuFrame*)sqlite3_realloc64(pRbu->aFrame, nNew * sizeof(RbuFrame)); + if( aNew==0 ) return SQLITE_NOMEM; + pRbu->aFrame = aNew; + pRbu->nFrameAlloc = nNew; + } + + iFrame = (u32)((iOff-32) / (i64)(iAmt+24)) + 1; + if( pRbu->iMaxFrame +#errors +Line: 1 Col: 9 Unexpected end tag (strong). Expected DOCTYPE. +Line: 1 Col: 9 Unexpected end tag (strong) after the (implied) root element. +Line: 1 Col: 13 Unexpected end tag (b) after the (implied) root element. +Line: 1 Col: 18 Unexpected end tag (em) after the (implied) root element. +Line: 1 Col: 22 Unexpected end tag (i) after the (implied) root element. +Line: 1 Col: 26 Unexpected end tag (u) after the (implied) root element. +Line: 1 Col: 35 Unexpected end tag (strike) after the (implied) root element. +Line: 1 Col: 39 Unexpected end tag (s) after the (implied) root element. +Line: 1 Col: 47 Unexpected end tag (blink) after the (implied) root element. +Line: 1 Col: 52 Unexpected end tag (tt) after the (implied) root element. +Line: 1 Col: 58 Unexpected end tag (pre) after the (implied) root element. +Line: 1 Col: 64 Unexpected end tag (big) after the (implied) root element. +Line: 1 Col: 72 Unexpected end tag (small) after the (implied) root element. +Line: 1 Col: 79 Unexpected end tag (font) after the (implied) root element. +Line: 1 Col: 88 Unexpected end tag (select) after the (implied) root element. +Line: 1 Col: 93 Unexpected end tag (h1) after the (implied) root element. +Line: 1 Col: 98 Unexpected end tag (h2) after the (implied) root element. +Line: 1 Col: 103 Unexpected end tag (h3) after the (implied) root element. +Line: 1 Col: 108 Unexpected end tag (h4) after the (implied) root element. +Line: 1 Col: 113 Unexpected end tag (h5) after the (implied) root element. +Line: 1 Col: 118 Unexpected end tag (h6) after the (implied) root element. +Line: 1 Col: 125 Unexpected end tag (body) after the (implied) root element. +Line: 1 Col: 130 Unexpected end tag (br). Treated as br element. +Line: 1 Col: 134 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 140 This element (img) has no end tag. +Line: 1 Col: 148 Unexpected end tag (title). Ignored. +Line: 1 Col: 155 Unexpected end tag (span). Ignored. +Line: 1 Col: 163 Unexpected end tag (style). Ignored. +Line: 1 Col: 172 Unexpected end tag (script). Ignored. +Line: 1 Col: 180 Unexpected end tag (table). Ignored. +Line: 1 Col: 185 Unexpected end tag (th). Ignored. +Line: 1 Col: 190 Unexpected end tag (td). Ignored. +Line: 1 Col: 195 Unexpected end tag (tr). Ignored. +Line: 1 Col: 203 This element (frame) has no end tag. +Line: 1 Col: 210 This element (area) has no end tag. +Line: 1 Col: 217 Unexpected end tag (link). Ignored. +Line: 1 Col: 225 This element (param) has no end tag. +Line: 1 Col: 230 This element (hr) has no end tag. +Line: 1 Col: 238 This element (input) has no end tag. +Line: 1 Col: 244 Unexpected end tag (col). Ignored. +Line: 1 Col: 251 Unexpected end tag (base). Ignored. +Line: 1 Col: 258 Unexpected end tag (meta). Ignored. +Line: 1 Col: 269 This element (basefont) has no end tag. +Line: 1 Col: 279 This element (bgsound) has no end tag. +Line: 1 Col: 287 This element (embed) has no end tag. +Line: 1 Col: 296 This element (spacer) has no end tag. +Line: 1 Col: 300 Unexpected end tag (p). Ignored. +Line: 1 Col: 305 End tag (dd) seen too early. Expected other end tag. +Line: 1 Col: 310 End tag (dt) seen too early. Expected other end tag. +Line: 1 Col: 320 Unexpected end tag (caption). Ignored. +Line: 1 Col: 331 Unexpected end tag (colgroup). Ignored. +Line: 1 Col: 339 Unexpected end tag (tbody). Ignored. +Line: 1 Col: 347 Unexpected end tag (tfoot). Ignored. +Line: 1 Col: 355 Unexpected end tag (thead). Ignored. +Line: 1 Col: 365 End tag (address) seen too early. Expected other end tag. +Line: 1 Col: 378 End tag (blockquote) seen too early. Expected other end tag. +Line: 1 Col: 387 End tag (center) seen too early. Expected other end tag. +Line: 1 Col: 393 Unexpected end tag (dir). Ignored. +Line: 1 Col: 399 End tag (div) seen too early. Expected other end tag. +Line: 1 Col: 404 End tag (dl) seen too early. Expected other end tag. +Line: 1 Col: 415 End tag (fieldset) seen too early. Expected other end tag. +Line: 1 Col: 425 End tag (listing) seen too early. Expected other end tag. +Line: 1 Col: 432 End tag (menu) seen too early. Expected other end tag. +Line: 1 Col: 437 End tag (ol) seen too early. Expected other end tag. +Line: 1 Col: 442 End tag (ul) seen too early. Expected other end tag. +Line: 1 Col: 447 End tag (li) seen too early. Expected other end tag. +Line: 1 Col: 454 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 460 This element (wbr) has no end tag. +Line: 1 Col: 476 End tag (button) seen too early. Expected other end tag. +Line: 1 Col: 486 End tag (marquee) seen too early. Expected other end tag. +Line: 1 Col: 495 End tag (object) seen too early. Expected other end tag. +Line: 1 Col: 513 Unexpected end tag (html). Ignored. +Line: 1 Col: 513 Unexpected end tag (frameset). Ignored. +Line: 1 Col: 520 Unexpected end tag (head). Ignored. +Line: 1 Col: 529 Unexpected end tag (iframe). Ignored. +Line: 1 Col: 537 This element (image) has no end tag. +Line: 1 Col: 547 This element (isindex) has no end tag. +Line: 1 Col: 557 Unexpected end tag (noembed). Ignored. +Line: 1 Col: 568 Unexpected end tag (noframes). Ignored. +Line: 1 Col: 579 Unexpected end tag (noscript). Ignored. +Line: 1 Col: 590 Unexpected end tag (optgroup). Ignored. +Line: 1 Col: 599 Unexpected end tag (option). Ignored. +Line: 1 Col: 611 Unexpected end tag (plaintext). Ignored. +Line: 1 Col: 622 Unexpected end tag (textarea). Ignored. +#document +| +| +| +|
    +|

    + +#data +


  • +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end tag (strong) in table context caused voodoo mode. +Line: 1 Col: 20 End tag (strong) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 24 Unexpected end tag (b) in table context caused voodoo mode. +Line: 1 Col: 24 End tag (b) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 29 Unexpected end tag (em) in table context caused voodoo mode. +Line: 1 Col: 29 End tag (em) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 33 Unexpected end tag (i) in table context caused voodoo mode. +Line: 1 Col: 33 End tag (i) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 37 Unexpected end tag (u) in table context caused voodoo mode. +Line: 1 Col: 37 End tag (u) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 46 Unexpected end tag (strike) in table context caused voodoo mode. +Line: 1 Col: 46 End tag (strike) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 50 Unexpected end tag (s) in table context caused voodoo mode. +Line: 1 Col: 50 End tag (s) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 58 Unexpected end tag (blink) in table context caused voodoo mode. +Line: 1 Col: 58 Unexpected end tag (blink). Ignored. +Line: 1 Col: 63 Unexpected end tag (tt) in table context caused voodoo mode. +Line: 1 Col: 63 End tag (tt) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 69 Unexpected end tag (pre) in table context caused voodoo mode. +Line: 1 Col: 69 End tag (pre) seen too early. Expected other end tag. +Line: 1 Col: 75 Unexpected end tag (big) in table context caused voodoo mode. +Line: 1 Col: 75 End tag (big) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 83 Unexpected end tag (small) in table context caused voodoo mode. +Line: 1 Col: 83 End tag (small) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 90 Unexpected end tag (font) in table context caused voodoo mode. +Line: 1 Col: 90 End tag (font) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 99 Unexpected end tag (select) in table context caused voodoo mode. +Line: 1 Col: 99 Unexpected end tag (select). Ignored. +Line: 1 Col: 104 Unexpected end tag (h1) in table context caused voodoo mode. +Line: 1 Col: 104 End tag (h1) seen too early. Expected other end tag. +Line: 1 Col: 109 Unexpected end tag (h2) in table context caused voodoo mode. +Line: 1 Col: 109 End tag (h2) seen too early. Expected other end tag. +Line: 1 Col: 114 Unexpected end tag (h3) in table context caused voodoo mode. +Line: 1 Col: 114 End tag (h3) seen too early. Expected other end tag. +Line: 1 Col: 119 Unexpected end tag (h4) in table context caused voodoo mode. +Line: 1 Col: 119 End tag (h4) seen too early. Expected other end tag. +Line: 1 Col: 124 Unexpected end tag (h5) in table context caused voodoo mode. +Line: 1 Col: 124 End tag (h5) seen too early. Expected other end tag. +Line: 1 Col: 129 Unexpected end tag (h6) in table context caused voodoo mode. +Line: 1 Col: 129 End tag (h6) seen too early. Expected other end tag. +Line: 1 Col: 136 Unexpected end tag (body) in the table row phase. Ignored. +Line: 1 Col: 141 Unexpected end tag (br) in table context caused voodoo mode. +Line: 1 Col: 141 Unexpected end tag (br). Treated as br element. +Line: 1 Col: 145 Unexpected end tag (a) in table context caused voodoo mode. +Line: 1 Col: 145 End tag (a) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 151 Unexpected end tag (img) in table context caused voodoo mode. +Line: 1 Col: 151 This element (img) has no end tag. +Line: 1 Col: 159 Unexpected end tag (title) in table context caused voodoo mode. +Line: 1 Col: 159 Unexpected end tag (title). Ignored. +Line: 1 Col: 166 Unexpected end tag (span) in table context caused voodoo mode. +Line: 1 Col: 166 Unexpected end tag (span). Ignored. +Line: 1 Col: 174 Unexpected end tag (style) in table context caused voodoo mode. +Line: 1 Col: 174 Unexpected end tag (style). Ignored. +Line: 1 Col: 183 Unexpected end tag (script) in table context caused voodoo mode. +Line: 1 Col: 183 Unexpected end tag (script). Ignored. +Line: 1 Col: 196 Unexpected end tag (th). Ignored. +Line: 1 Col: 201 Unexpected end tag (td). Ignored. +Line: 1 Col: 206 Unexpected end tag (tr). Ignored. +Line: 1 Col: 214 This element (frame) has no end tag. +Line: 1 Col: 221 This element (area) has no end tag. +Line: 1 Col: 228 Unexpected end tag (link). Ignored. +Line: 1 Col: 236 This element (param) has no end tag. +Line: 1 Col: 241 This element (hr) has no end tag. +Line: 1 Col: 249 This element (input) has no end tag. +Line: 1 Col: 255 Unexpected end tag (col). Ignored. +Line: 1 Col: 262 Unexpected end tag (base). Ignored. +Line: 1 Col: 269 Unexpected end tag (meta). Ignored. +Line: 1 Col: 280 This element (basefont) has no end tag. +Line: 1 Col: 290 This element (bgsound) has no end tag. +Line: 1 Col: 298 This element (embed) has no end tag. +Line: 1 Col: 307 This element (spacer) has no end tag. +Line: 1 Col: 311 Unexpected end tag (p). Ignored. +Line: 1 Col: 316 End tag (dd) seen too early. Expected other end tag. +Line: 1 Col: 321 End tag (dt) seen too early. Expected other end tag. +Line: 1 Col: 331 Unexpected end tag (caption). Ignored. +Line: 1 Col: 342 Unexpected end tag (colgroup). Ignored. +Line: 1 Col: 350 Unexpected end tag (tbody). Ignored. +Line: 1 Col: 358 Unexpected end tag (tfoot). Ignored. +Line: 1 Col: 366 Unexpected end tag (thead). Ignored. +Line: 1 Col: 376 End tag (address) seen too early. Expected other end tag. +Line: 1 Col: 389 End tag (blockquote) seen too early. Expected other end tag. +Line: 1 Col: 398 End tag (center) seen too early. Expected other end tag. +Line: 1 Col: 404 Unexpected end tag (dir). Ignored. +Line: 1 Col: 410 End tag (div) seen too early. Expected other end tag. +Line: 1 Col: 415 End tag (dl) seen too early. Expected other end tag. +Line: 1 Col: 426 End tag (fieldset) seen too early. Expected other end tag. +Line: 1 Col: 436 End tag (listing) seen too early. Expected other end tag. +Line: 1 Col: 443 End tag (menu) seen too early. Expected other end tag. +Line: 1 Col: 448 End tag (ol) seen too early. Expected other end tag. +Line: 1 Col: 453 End tag (ul) seen too early. Expected other end tag. +Line: 1 Col: 458 End tag (li) seen too early. Expected other end tag. +Line: 1 Col: 465 End tag (nobr) violates step 1, paragraph 1 of the adoption agency algorithm. +Line: 1 Col: 471 This element (wbr) has no end tag. +Line: 1 Col: 487 End tag (button) seen too early. Expected other end tag. +Line: 1 Col: 497 End tag (marquee) seen too early. Expected other end tag. +Line: 1 Col: 506 End tag (object) seen too early. Expected other end tag. +Line: 1 Col: 524 Unexpected end tag (html). Ignored. +Line: 1 Col: 524 Unexpected end tag (frameset). Ignored. +Line: 1 Col: 531 Unexpected end tag (head). Ignored. +Line: 1 Col: 540 Unexpected end tag (iframe). Ignored. +Line: 1 Col: 548 This element (image) has no end tag. +Line: 1 Col: 558 This element (isindex) has no end tag. +Line: 1 Col: 568 Unexpected end tag (noembed). Ignored. +Line: 1 Col: 579 Unexpected end tag (noframes). Ignored. +Line: 1 Col: 590 Unexpected end tag (noscript). Ignored. +Line: 1 Col: 601 Unexpected end tag (optgroup). Ignored. +Line: 1 Col: 610 Unexpected end tag (option). Ignored. +Line: 1 Col: 622 Unexpected end tag (plaintext). Ignored. +Line: 1 Col: 633 Unexpected end tag (textarea). Ignored. +#document +| +| +| +|
    +| +| +| +|

    + +#data + +#errors +Line: 1 Col: 10 Unexpected start tag (frameset). Expected DOCTYPE. +Line: 1 Col: 10 Expected closing tag. Unexpected end of file. +#document +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat new file mode 100644 index 0000000..4f8df86 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests10.dat @@ -0,0 +1,799 @@ +#data + +#errors +#document +| +| +| +| +| + +#data +a +#errors +29: Bogus comment +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| + +#data + +#errors +35: Stray “svg” start tag. +42: Stray end tag “svg” +#document +| +| +| +| +| +#errors +43: Stray “svg” start tag. +50: Stray end tag “svg” +#document +| +| +| +| +|

    +#errors +34: Start tag “svg” seen in “table”. +41: Stray end tag “svg”. +#document +| +| +| +| +| +| + +#data +
    foo
    +#errors +34: Start tag “svg” seen in “table”. +46: Stray end tag “g”. +53: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| + +#data +
    foobar
    +#errors +34: Start tag “svg” seen in “table”. +46: Stray end tag “g”. +58: Stray end tag “g”. +65: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| + +#data +
    foobar
    +#errors +41: Start tag “svg” seen in “table”. +53: Stray end tag “g”. +65: Stray end tag “g”. +72: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| +| + +#data +
    foobar
    +#errors +45: Start tag “svg” seen in “table”. +57: Stray end tag “g”. +69: Stray end tag “g”. +76: Stray end tag “svg”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +| +| +| + +#data +
    foobar
    +#errors +#document +| +| +| +| +| +| +| +|
    +| +| +| "foo" +| +| "bar" + +#data +
    foobar

    baz

    +#errors +#document +| +| +| +| +| +| +| +|
    +| +| +| "foo" +| +| "bar" +|

    +| "baz" + +#data +
    foobar

    baz

    +#errors +#document +| +| +| +| +| +|
    +| +| +| "foo" +| +| "bar" +|

    +| "baz" + +#data +
    foobar

    baz

    quux +#errors +70: HTML start tag “p” in a foreign namespace context. +81: “table” closed but “caption” was still open. +#document +| +| +| +| +| +|
    +| +| +| "foo" +| +| "bar" +|

    +| "baz" +|

    +| "quux" + +#data +
    foobarbaz

    quux +#errors +78: “table” closed but “caption” was still open. +78: Unclosed elements on stack. +#document +| +| +| +| +| +|
    +| +| +| "foo" +| +| "bar" +| "baz" +|

    +| "quux" + +#data +foobar

    baz

    quux +#errors +44: Start tag “svg” seen in “table”. +56: Stray end tag “g”. +68: Stray end tag “g”. +71: HTML start tag “p” in a foreign namespace context. +71: Start tag “p” seen in “table”. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

    +| "baz" +| +| +|

    +| "quux" + +#data +

    quux +#errors +50: Stray “svg” start tag. +54: Stray “g” start tag. +62: Stray end tag “g” +66: Stray “g” start tag. +74: Stray end tag “g” +77: Stray “p” start tag. +88: “table” end tag with “select” open. +#document +| +| +| +| +| +| +| +|
    +|

    quux +#errors +36: Start tag “select” seen in “table”. +42: Stray “svg” start tag. +46: Stray “g” start tag. +54: Stray end tag “g” +58: Stray “g” start tag. +66: Stray end tag “g” +69: Stray “p” start tag. +80: “table” end tag with “select” open. +#document +| +| +| +| +| +|

    +| "quux" + +#data +foobar

    baz +#errors +41: Stray “svg” start tag. +68: HTML start tag “p” in a foreign namespace context. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

    +| "baz" + +#data +foobar

    baz +#errors +34: Stray “svg” start tag. +61: HTML start tag “p” in a foreign namespace context. +#document +| +| +| +| +| +| +| "foo" +| +| "bar" +|

    +| "baz" + +#data +

    +#errors +31: Stray “svg” start tag. +35: Stray “g” start tag. +40: Stray end tag “g” +44: Stray “g” start tag. +49: Stray end tag “g” +52: Stray “p” start tag. +58: Stray “span” start tag. +58: End of file seen and there were open elements. +#document +| +| +| +| + +#data +

    +#errors +42: Stray “svg” start tag. +46: Stray “g” start tag. +51: Stray end tag “g” +55: Stray “g” start tag. +60: Stray end tag “g” +63: Stray “p” start tag. +69: Stray “span” start tag. +#document +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| +| xlink href="foo" + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" + +#data + +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" + +#data +bar +#errors +#document +| +| +| +| +| xlink:href="foo" +| xml:lang="en" +| +| +| xlink href="foo" +| xml lang="en" +| "bar" + +#data + +#errors +#document +| +| +| +| + +#data +

    a +#errors +#document +| +| +| +|
    +| +| "a" + +#data +
    a +#errors +#document +| +| +| +|
    +| +| +| "a" + +#data +
    +#errors +#document +| +| +| +|
    +| +| +| + +#data +
    a +#errors +#document +| +| +| +|
    +| +| +| +| +| "a" + +#data +

    a +#errors +#document +| +| +| +|

    +| +| +| +|

    +| "a" + +#data +
      a +#errors +40: HTML start tag “ul” in a foreign namespace context. +41: End of file in a foreign namespace context. +#document +| +| +| +| +| +| +|
      +| +|
        +| "a" + +#data +
          a +#errors +35: HTML start tag “ul” in a foreign namespace context. +36: End of file in a foreign namespace context. +#document +| +| +| +| +| +| +| +|
            +| "a" + +#data +

            +#errors +#document +| +| +| +| +|

            +| +| +|

            + +#data +

            +#errors +#document +| +| +| +| +|

            +| +| +|

            + +#data +

            +#errors +#document +| +| +| +|

            +| +| +| +|

            +|

            + +#data +
            +#errors +#document +| +| +| +| +| +|
            +| +|
            +| +| + +#data +
            +#errors +#document +| +| +| +| +| +| +| +|
            +|
            +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data +

    +#errors +#document +| +| +| +| +|
    +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| + +#data +
    +#errors +#document +| +| +| +| +| +| +| +|
    +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat new file mode 100644 index 0000000..638cde4 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests11.dat @@ -0,0 +1,482 @@ +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributeName="" +| attributeType="" +| baseFrequency="" +| baseProfile="" +| calcMode="" +| clipPathUnits="" +| contentScriptType="" +| contentStyleType="" +| diffuseConstant="" +| edgeMode="" +| externalResourcesRequired="" +| filterRes="" +| filterUnits="" +| glyphRef="" +| gradientTransform="" +| gradientUnits="" +| kernelMatrix="" +| kernelUnitLength="" +| keyPoints="" +| keySplines="" +| keyTimes="" +| lengthAdjust="" +| limitingConeAngle="" +| markerHeight="" +| markerUnits="" +| markerWidth="" +| maskContentUnits="" +| maskUnits="" +| numOctaves="" +| pathLength="" +| patternContentUnits="" +| patternTransform="" +| patternUnits="" +| pointsAtX="" +| pointsAtY="" +| pointsAtZ="" +| preserveAlpha="" +| preserveAspectRatio="" +| primitiveUnits="" +| refX="" +| refY="" +| repeatCount="" +| repeatDur="" +| requiredExtensions="" +| requiredFeatures="" +| specularConstant="" +| specularExponent="" +| spreadMethod="" +| startOffset="" +| stdDeviation="" +| stitchTiles="" +| surfaceScale="" +| systemLanguage="" +| tableValues="" +| targetX="" +| targetY="" +| textLength="" +| viewBox="" +| viewTarget="" +| xChannelSelector="" +| yChannelSelector="" +| zoomAndPan="" + +#data + +#errors +#document +| +| +| +| +| +| attributename="" +| attributetype="" +| basefrequency="" +| baseprofile="" +| calcmode="" +| clippathunits="" +| contentscripttype="" +| contentstyletype="" +| diffuseconstant="" +| edgemode="" +| externalresourcesrequired="" +| filterres="" +| filterunits="" +| glyphref="" +| gradienttransform="" +| gradientunits="" +| kernelmatrix="" +| kernelunitlength="" +| keypoints="" +| keysplines="" +| keytimes="" +| lengthadjust="" +| limitingconeangle="" +| markerheight="" +| markerunits="" +| markerwidth="" +| maskcontentunits="" +| maskunits="" +| numoctaves="" +| pathlength="" +| patterncontentunits="" +| patterntransform="" +| patternunits="" +| pointsatx="" +| pointsaty="" +| pointsatz="" +| preservealpha="" +| preserveaspectratio="" +| primitiveunits="" +| refx="" +| refy="" +| repeatcount="" +| repeatdur="" +| requiredextensions="" +| requiredfeatures="" +| specularconstant="" +| specularexponent="" +| spreadmethod="" +| startoffset="" +| stddeviation="" +| stitchtiles="" +| surfacescale="" +| systemlanguage="" +| tablevalues="" +| targetx="" +| targety="" +| textlength="" +| viewbox="" +| viewtarget="" +| xchannelselector="" +| ychannelselector="" +| zoomandpan="" + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat new file mode 100644 index 0000000..63107d2 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests12.dat @@ -0,0 +1,62 @@ +#data +

    foobazeggs

    spam

    quuxbar +#errors +#document +| +| +| +| +|

    +| "foo" +| +| +| +| "baz" +| +| +| +| +| "eggs" +| +| +|

    +| "spam" +| +| +| +|
    +| +| +| "quux" +| "bar" + +#data +foobazeggs

    spam
    quuxbar +#errors +#document +| +| +| +| +| "foo" +| +| +| +| "baz" +| +| +| +| +| "eggs" +| +| +|

    +| "spam" +| +| +| +|
    +| +| +| "quux" +| "bar" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat new file mode 100644 index 0000000..b8713f8 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests14.dat @@ -0,0 +1,74 @@ +#data + +#errors +#document +| +| +| +| +| + +#data + +#errors +#document +| +| +| +| +| +| + +#data + +#errors +15: Unexpected start tag html +#document +| +| +| abc:def="gh" +| +| +| + +#data + +#errors +15: Unexpected start tag html +#document +| +| +| xml:lang="bar" +| +| + +#data + +#errors +#document +| +| +| 123="456" +| +| + +#data + +#errors +#document +| +| +| 123="456" +| 789="012" +| +| + +#data + +#errors +#document +| +| +| +| +| 789="012" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat new file mode 100644 index 0000000..6ce1c0d --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests15.dat @@ -0,0 +1,208 @@ +#data +

    X +#errors +Line: 1 Col: 31 Unexpected end tag (p). Ignored. +Line: 1 Col: 36 Expected closing tag. Unexpected end of file. +#document +| +| +| +| +|

    +| +| +| +| +| +| +| " " +|

    +| "X" + +#data +

    +

    X +#errors +Line: 1 Col: 3 Unexpected start tag (p). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end tag (p). Ignored. +Line: 2 Col: 4 Expected closing tag. Unexpected end of file. +#document +| +| +| +|

    +| +| +| +| +| +| +| " +" +|

    +| "X" + +#data + +#errors +Line: 1 Col: 22 Unexpected end tag (html) after the (implied) root element. +#document +| +| +| +| +| " " + +#data + +#errors +Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element. +#document +| +| +| +| +| + +#data + +#errors +Line: 1 Col: 6 Unexpected start tag (html). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end tag (html) after the (implied) root element. +#document +| +| +| +| + +#data +X +#errors +Line: 1 Col: 22 Unexpected end tag (body) after the (implied) root element. +#document +| +| +| +| +| +| "X" + +#data +<!doctype html><table> X<meta></table> +#errors +Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 30 Unexpected start tag (meta) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " X" +| <meta> +| <table> + +#data +<!doctype html><table> x</table> +#errors +Line: 1 Col: 24 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x" +| <table> + +#data +<!doctype html><table> x </table> +#errors +Line: 1 Col: 25 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x " +| <table> + +#data +<!doctype html><table><tr> x</table> +#errors +Line: 1 Col: 28 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " x" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table>X<style> <tr>x </style> </table> +#errors +Line: 1 Col: 23 Unexpected non-space characters in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "X" +| <table> +| <style> +| " <tr>x " +| " " + +#data +<!doctype html><div><table><a>foo</a> <tr><td>bar</td> </tr></table></div> +#errors +Line: 1 Col: 30 Unexpected start tag (a) in table context caused voodoo mode. +Line: 1 Col: 37 Unexpected end tag (a) in table context caused voodoo mode. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| <a> +| "foo" +| <table> +| " " +| <tbody> +| <tr> +| <td> +| "bar" +| " " + +#data +<frame></frame></frame><frameset><frame><frameset><frame></frameset><noframes></frameset><noframes> +#errors +6: Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”. +13: Stray start tag “frame”. +21: Stray end tag “frame”. +29: Stray end tag “frame”. +39: “frameset” start tag after “body” already open. +105: End of file seen inside an [R]CDATA element. +105: End of file seen and there were open elements. +XXX: These errors are wrong, please fix me! +#document +| <html> +| <head> +| <frameset> +| <frame> +| <frameset> +| <frame> +| <noframes> +| "</frameset><noframes>" + +#data +<!DOCTYPE html><object></html> +#errors +1: Expected closing tag. Unexpected end of file +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <object> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat new file mode 100644 index 0000000..c8ef66f --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests16.dat @@ -0,0 +1,2299 @@ +#data +<!doctype html><script> +#errors +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script>a +#errors +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "a" +| <body> + +#data +<!doctype html><script>< +#errors +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<" +| <body> + +#data +<!doctype html><script></ +#errors +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</" +| <body> + +#data +<!doctype html><script></S +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</S" +| <body> + +#data +<!doctype html><script></SC +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SC" +| <body> + +#data +<!doctype html><script></SCR +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCR" +| <body> + +#data +<!doctype html><script></SCRI +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRI" +| <body> + +#data +<!doctype html><script></SCRIP +#errors +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRIP" +| <body> + +#data +<!doctype html><script></SCRIPT +#errors +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</SCRIPT" +| <body> + +#data +<!doctype html><script></SCRIPT +#errors +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script></s +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</s" +| <body> + +#data +<!doctype html><script></sc +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</sc" +| <body> + +#data +<!doctype html><script></scr +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scr" +| <body> + +#data +<!doctype html><script></scri +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scri" +| <body> + +#data +<!doctype html><script></scrip +#errors +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</scrip" +| <body> + +#data +<!doctype html><script></script +#errors +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "</script" +| <body> + +#data +<!doctype html><script></script +#errors +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| <body> + +#data +<!doctype html><script><! +#errors +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!" +| <body> + +#data +<!doctype html><script><!a +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!a" +| <body> + +#data +<!doctype html><script><!- +#errors +Line: 1 Col: 26 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!-" +| <body> + +#data +<!doctype html><script><!-a +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!-a" +| <body> + +#data +<!doctype html><script><!-- +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<!doctype html><script><!--a +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--a" +| <body> + +#data +<!doctype html><script><!--< +#errors +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<" +| <body> + +#data +<!doctype html><script><!--<a +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<a" +| <body> + +#data +<!doctype html><script><!--</ +#errors +Line: 1 Col: 27 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--</" +| <body> + +#data +<!doctype html><script><!--</script +#errors +Line: 1 Col: 35 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--</script" +| <body> + +#data +<!doctype html><script><!--</script +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<!doctype html><script><!--<s +#errors +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<s" +| <body> + +#data +<!doctype html><script><!--<script +#errors +Line: 1 Col: 34 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script" +| <body> + +#data +<!doctype html><script><!--<script +#errors +Line: 1 Col: 35 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script " +| <body> + +#data +<!doctype html><script><!--<script < +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script <" +| <body> + +#data +<!doctype html><script><!--<script <a +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script <a" +| <body> + +#data +<!doctype html><script><!--<script </ +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </" +| <body> + +#data +<!doctype html><script><!--<script </s +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </s" +| <body> + +#data +<!doctype html><script><!--<script </script +#errors +Line: 1 Col: 43 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script" +| <body> + +#data +<!doctype html><script><!--<script </scripta +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </scripta" +| <body> + +#data +<!doctype html><script><!--<script </script +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script> +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script>" +| <body> + +#data +<!doctype html><script><!--<script </script/ +#errors +Line: 1 Col: 44 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script/" +| <body> + +#data +<!doctype html><script><!--<script </script < +#errors +Line: 1 Col: 45 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script <" +| <body> + +#data +<!doctype html><script><!--<script </script <a +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script <a" +| <body> + +#data +<!doctype html><script><!--<script </script </ +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script </" +| <body> + +#data +<!doctype html><script><!--<script </script </script +#errors +Line: 1 Col: 52 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script </script" +| <body> + +#data +<!doctype html><script><!--<script </script </script +#errors +Line: 1 Col: 53 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script </script/ +#errors +Line: 1 Col: 53 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script </script </script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<!doctype html><script><!--<script - +#errors +Line: 1 Col: 36 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -" +| <body> + +#data +<!doctype html><script><!--<script -a +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -a" +| <body> + +#data +<!doctype html><script><!--<script -< +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -<" +| <body> + +#data +<!doctype html><script><!--<script -- +#errors +Line: 1 Col: 37 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --" +| <body> + +#data +<!doctype html><script><!--<script --a +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --a" +| <body> + +#data +<!doctype html><script><!--<script --< +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --<" +| <body> + +#data +<!doctype html><script><!--<script --> +#errors +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script -->< +#errors +Line: 1 Col: 39 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --><" +| <body> + +#data +<!doctype html><script><!--<script --></ +#errors +Line: 1 Col: 40 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --></" +| <body> + +#data +<!doctype html><script><!--<script --></script +#errors +Line: 1 Col: 46 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script --></script" +| <body> + +#data +<!doctype html><script><!--<script --></script +#errors +Line: 1 Col: 47 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script --></script/ +#errors +Line: 1 Col: 47 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script --></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<!doctype html><script><!--<script><\/script>--></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script><\/script>-->" +| <body> + +#data +<!doctype html><script><!--<script></scr'+'ipt>--></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt>-->" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>--><!--</script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>--><!--" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>-- ></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>-- >" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>- -></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- ->" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>- - ></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- - >" +| <body> + +#data +<!doctype html><script><!--<script></script><script></script>-></script> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>->" +| <body> + +#data +<!doctype html><script><!--<script>--!></script>X +#errors +Line: 1 Col: 49 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script>--!></script>X" +| <body> + +#data +<!doctype html><script><!--<scr'+'ipt></script>--></script> +#errors +Line: 1 Col: 59 Unexpected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<scr'+'ipt>" +| <body> +| "-->" + +#data +<!doctype html><script><!--<script></scr'+'ipt></script>X +#errors +Line: 1 Col: 57 Unexpected end of file. Expected end tag (script). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt></script>X" +| <body> + +#data +<!doctype html><style><!--<style></style>--></style> +#errors +Line: 1 Col: 52 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--<style>" +| <body> +| "-->" + +#data +<!doctype html><style><!--</style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--" +| <body> +| "X" + +#data +<!doctype html><style><!--...</style>...--></style> +#errors +Line: 1 Col: 51 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--..." +| <body> +| "...-->" + +#data +<!doctype html><style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>" +| <body> +| "X" + +#data +<!doctype html><style><!--...<style><!--...--!></style>--></style> +#errors +Line: 1 Col: 66 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--...<style><!--...--!>" +| <body> +| "-->" + +#data +<!doctype html><style><!--...</style><!-- --><style>@import ...</style> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "<!--..." +| <!-- --> +| <style> +| "@import ..." +| <body> + +#data +<!doctype html><style>...<style><!--...</style><!-- --></style> +#errors +Line: 1 Col: 63 Unexpected end tag (style). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "...<style><!--..." +| <!-- --> +| <body> + +#data +<!doctype html><style>...<!--[if IE]><style>...</style>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <style> +| "...<!--[if IE]><style>..." +| <body> +| "X" + +#data +<!doctype html><title><!--<title>--> +#errors +Line: 1 Col: 52 Unexpected end tag (title). +#document +| +| +| +| +| "<!--<title>" +| <body> +| "-->" + +#data +<!doctype html><title></title> +#errors +#document +| +| +| +| +| "" +| + +#data +foo/title><link></head><body>X +#errors +Line: 1 Col: 52 Unexpected end of file. Expected end tag (title). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <title> +| "foo/title><link></head><body>X" +| <body> + +#data +<!doctype html><noscript><!--<noscript></noscript>--></noscript> +#errors +Line: 1 Col: 64 Unexpected end tag (noscript). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<!--<noscript>" +| <body> +| "-->" + +#data +<!doctype html><noscript><!--</noscript>X<noscript>--></noscript> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<!--" +| <body> +| "X" +| <noscript> +| "-->" + +#data +<!doctype html><noscript><iframe></noscript>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noscript> +| "<iframe>" +| <body> +| "X" + +#data +<!doctype html><noframes><!--<noframes></noframes>--></noframes> +#errors +Line: 1 Col: 64 Unexpected end tag (noframes). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noframes> +| "<!--<noframes>" +| <body> +| "-->" + +#data +<!doctype html><noframes><body><script><!--...</script></body></noframes></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <noframes> +| "<body><script><!--...</script></body>" +| <body> + +#data +<!doctype html><textarea><!--<textarea></textarea>--></textarea> +#errors +Line: 1 Col: 64 Unexpected end tag (textarea). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "<!--<textarea>" +| "-->" + +#data +<!doctype html><textarea></textarea></textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "</textarea>" + +#data +<!doctype html><textarea><</textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "<" + +#data +<!doctype html><textarea>a<b</textarea> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> +| "a<b" + +#data +<!doctype html><iframe><!--<iframe></iframe>--></iframe> +#errors +Line: 1 Col: 56 Unexpected end tag (iframe). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> +| "<!--<iframe>" +| "-->" + +#data +<!doctype html><iframe>...<!--X->...<!--/X->...</iframe> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> +| "...<!--X->...<!--/X->..." + +#data +<!doctype html><xmp><!--<xmp></xmp>--></xmp> +#errors +Line: 1 Col: 44 Unexpected end tag (xmp). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <xmp> +| "<!--<xmp>" +| "-->" + +#data +<!doctype html><noembed><!--<noembed></noembed>--></noembed> +#errors +Line: 1 Col: 60 Unexpected end tag (noembed). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <noembed> +| "<!--<noembed>" +| "-->" + +#data +<script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 8 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script>a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 9 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "a" +| <body> + +#data +<script>< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 9 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<" +| <body> + +#data +<script></ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 10 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</" +| <body> + +#data +<script></S +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</S" +| <body> + +#data +<script></SC +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SC" +| <body> + +#data +<script></SCR +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCR" +| <body> + +#data +<script></SCRI +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRI" +| <body> + +#data +<script></SCRIP +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 15 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRIP" +| <body> + +#data +<script></SCRIPT +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</SCRIPT" +| <body> + +#data +<script></SCRIPT +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script></s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</s" +| <body> + +#data +<script></sc +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</sc" +| <body> + +#data +<script></scr +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scr" +| <body> + +#data +<script></scri +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scri" +| <body> + +#data +<script></scrip +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 15 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</scrip" +| <body> + +#data +<script></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 16 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</script" +| <body> + +#data +<script></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 17 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| <body> + +#data +<script><! +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 10 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!" +| <body> + +#data +<script><!a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!a" +| <body> + +#data +<script><!- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!-" +| <body> + +#data +<script><!-a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!-a" +| <body> + +#data +<script><!-- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 12 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<script><!--a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--a" +| <body> + +#data +<script><!--< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 13 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<" +| <body> + +#data +<script><!--<a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<a" +| <body> + +#data +<script><!--</ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--</" +| <body> + +#data +<script><!--</script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--</script" +| <body> + +#data +<script><!--</script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--" +| <body> + +#data +<script><!--<s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 14 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<s" +| <body> + +#data +<script><!--<script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 19 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script" +| <body> + +#data +<script><!--<script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 20 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script " +| <body> + +#data +<script><!--<script < +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script <" +| <body> + +#data +<script><!--<script <a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script <a" +| <body> + +#data +<script><!--<script </ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </" +| <body> + +#data +<script><!--<script </s +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </s" +| <body> + +#data +<script><!--<script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 28 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script" +| <body> + +#data +<script><!--<script </scripta +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </scripta" +| <body> + +#data +<script><!--<script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script>" +| <body> + +#data +<script><!--<script </script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script/" +| <body> + +#data +<script><!--<script </script < +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 30 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script <" +| <body> + +#data +<script><!--<script </script <a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script <a" +| <body> + +#data +<script><!--<script </script </ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script </" +| <body> + +#data +<script><!--<script </script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script </script" +| <body> + +#data +<script><!--<script </script </script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script </script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 38 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script </script </script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script </script " +| <body> + +#data +<script><!--<script - +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -" +| <body> + +#data +<script><!--<script -a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -a" +| <body> + +#data +<script><!--<script -- +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --" +| <body> + +#data +<script><!--<script --a +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --a" +| <body> + +#data +<script><!--<script --> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script -->< +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 24 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --><" +| <body> + +#data +<script><!--<script --></ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 25 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --></" +| <body> + +#data +<script><!--<script --></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 31 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script --></script" +| <body> + +#data +<script><!--<script --></script +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script --></script/ +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 32 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script --></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script -->" +| <body> + +#data +<script><!--<script><\/script>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script><\/script>-->" +| <body> + +#data +<script><!--<script></scr'+'ipt>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt>-->" +| <body> + +#data +<script><!--<script></script><script></script></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>" +| <body> + +#data +<script><!--<script></script><script></script>--><!--</script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>--><!--" +| <body> + +#data +<script><!--<script></script><script></script>-- ></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>-- >" +| <body> + +#data +<script><!--<script></script><script></script>- -></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- ->" +| <body> + +#data +<script><!--<script></script><script></script>- - ></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>- - >" +| <body> + +#data +<script><!--<script></script><script></script>-></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +#document +| <html> +| <head> +| <script> +| "<!--<script></script><script></script>->" +| <body> + +#data +<script><!--<script>--!></script>X +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 34 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script>--!></script>X" +| <body> + +#data +<script><!--<scr'+'ipt></script>--></script> +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 44 Unexpected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<scr'+'ipt>" +| <body> +| "-->" + +#data +<script><!--<script></scr'+'ipt></script>X +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 42 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "<!--<script></scr'+'ipt></script>X" +| <body> + +#data +<style><!--<style></style>--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--<style>" +| <body> +| "-->" + +#data +<style><!--</style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--" +| <body> +| "X" + +#data +<style><!--...</style>...--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 36 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--..." +| <body> +| "...-->" + +#data +<style><!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style></style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--<br><html xmlns:v="urn:schemas-microsoft-com:vml"><!--[if !mso]><style>" +| <body> +| "X" + +#data +<style><!--...<style><!--...--!></style>--></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 51 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "<!--...<style><!--...--!>" +| <body> +| "-->" + +#data +<style><!--...</style><!-- --><style>@import ...</style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "<!--..." +| <!-- --> +| <style> +| "@import ..." +| <body> + +#data +<style>...<style><!--...</style><!-- --></style> +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 48 Unexpected end tag (style). +#document +| <html> +| <head> +| <style> +| "...<style><!--..." +| <!-- --> +| <body> + +#data +<style>...<!--[if IE]><style>...</style>X +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| <html> +| <head> +| <style> +| "...<!--[if IE]><style>..." +| <body> +| "X" + +#data +<title><!--<title>--> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end tag (title). +#document +| +| +| +| "<!--<title>" +| <body> +| "-->" + +#data +<title></title> +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +#document +| +| +| +| "" +| + +#data +foo/title><link></head><body>X +#errors +Line: 1 Col: 7 Unexpected start tag (title). Expected DOCTYPE. +Line: 1 Col: 37 Unexpected end of file. Expected end tag (title). +#document +| <html> +| <head> +| <title> +| "foo/title><link></head><body>X" +| <body> + +#data +<noscript><!--<noscript></noscript>--></noscript> +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (noscript). +#document +| <html> +| <head> +| <noscript> +| "<!--<noscript>" +| <body> +| "-->" + +#data +<noscript><!--</noscript>X<noscript>--></noscript> +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +#document +| <html> +| <head> +| <noscript> +| "<!--" +| <body> +| "X" +| <noscript> +| "-->" + +#data +<noscript><iframe></noscript>X +#errors +Line: 1 Col: 10 Unexpected start tag (noscript). Expected DOCTYPE. +#document +| <html> +| <head> +| <noscript> +| "<iframe>" +| <body> +| "X" + +#data +<noframes><!--<noframes></noframes>--></noframes> +#errors +Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (noframes). +#document +| <html> +| <head> +| <noframes> +| "<!--<noframes>" +| <body> +| "-->" + +#data +<noframes><body><script><!--...</script></body></noframes></html> +#errors +Line: 1 Col: 10 Unexpected start tag (noframes). Expected DOCTYPE. +#document +| <html> +| <head> +| <noframes> +| "<body><script><!--...</script></body>" +| <body> + +#data +<textarea><!--<textarea></textarea>--></textarea> +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +Line: 1 Col: 49 Unexpected end tag (textarea). +#document +| <html> +| <head> +| <body> +| <textarea> +| "<!--<textarea>" +| "-->" + +#data +<textarea></textarea></textarea> +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| <textarea> +| "</textarea>" + +#data +<iframe><!--<iframe></iframe>--></iframe> +#errors +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE. +Line: 1 Col: 41 Unexpected end tag (iframe). +#document +| <html> +| <head> +| <body> +| <iframe> +| "<!--<iframe>" +| "-->" + +#data +<iframe>...<!--X->...<!--/X->...</iframe> +#errors +Line: 1 Col: 8 Unexpected start tag (iframe). Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| <iframe> +| "...<!--X->...<!--/X->..." + +#data +<xmp><!--<xmp></xmp>--></xmp> +#errors +Line: 1 Col: 5 Unexpected start tag (xmp). Expected DOCTYPE. +Line: 1 Col: 29 Unexpected end tag (xmp). +#document +| <html> +| <head> +| <body> +| <xmp> +| "<!--<xmp>" +| "-->" + +#data +<noembed><!--<noembed></noembed>--></noembed> +#errors +Line: 1 Col: 9 Unexpected start tag (noembed). Expected DOCTYPE. +Line: 1 Col: 45 Unexpected end tag (noembed). +#document +| <html> +| <head> +| <body> +| <noembed> +| "<!--<noembed>" +| "-->" + +#data +<!doctype html><table> + +#errors +Line 2 Col 0 Unexpected end of file. Expected table content. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| " +" + +#data +<!doctype html><table><td><span><font></span><span> +#errors +Line 1 Col 26 Unexpected table cell start tag (td) in the table body phase. +Line 1 Col 45 Unexpected end tag (span). +Line 1 Col 51 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <span> +| <font> +| <font> +| <span> + +#data +<!doctype html><form><table></form><form></table></form> +#errors +35: Stray end tag “form”. +41: Start tag “form” seen in “table”. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <table> +| <form> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat new file mode 100644 index 0000000..7b555f8 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests17.dat @@ -0,0 +1,153 @@ +#data +<!doctype html><table><tbody><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><tr><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <table> +| <tbody> +| <tr> +| <td> + +#data +<!doctype html><table><tr><td><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <select> +| <td> + +#data +<!doctype html><table><tr><th><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <th> +| <select> +| <td> + +#data +<!doctype html><table><caption><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <select> +| <tbody> +| <tr> + +#data +<!doctype html><select><tr> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><th> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><tbody> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><thead> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><tfoot> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><select><caption> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><table><tr></table>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| "a" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat new file mode 100644 index 0000000..680e1f0 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests18.dat @@ -0,0 +1,269 @@ +#data +<!doctype html><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> + +#data +<!doctype html><table><tbody><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> + +#data +<!doctype html><table><tbody><tr><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><tbody><tr><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><td><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><caption><plaintext></plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <plaintext> +| "</plaintext>" + +#data +<!doctype html><table><tr><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <tbody> +| <tr> +| <style> +| "</script>" + +#data +<!doctype html><table><tr><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <tbody> +| <tr> +| <script> +| "</style>" + +#data +<!doctype html><table><caption><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| <style> +| "</script>" +| "abc" + +#data +<!doctype html><table><td><style></script></style>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <style> +| "</script>" +| "abc" + +#data +<!doctype html><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" + +#data +<!doctype html><table><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" +| <table> + +#data +<!doctype html><table><tr><select><script></style></script>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <script> +| "</style>" +| "abc" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><frameset></frameset><noframes>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" + +#data +<!doctype html><frameset></frameset><noframes>abc</noframes><!--abc--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" +| <!-- abc --> + +#data +<!doctype html><frameset></frameset></html><noframes>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" + +#data +<!doctype html><frameset></frameset></html><noframes>abc</noframes><!--abc--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <noframes> +| "abc" +| <!-- abc --> + +#data +<!doctype html><table><tr></tbody><tfoot> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <tfoot> + +#data +<!doctype html><table><td><svg></svg>abc<td> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <svg svg> +| "abc" +| <td> diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat new file mode 100644 index 0000000..0d62f5a --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests19.dat @@ -0,0 +1,1237 @@ +#data +<!doctype html><math><mn DefinitionUrl="foo"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <math mn> +| definitionURL="foo" + +#data +<!doctype html><html></p><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <!-- foo --> +| <head> +| <body> + +#data +<!doctype html><head></head></p><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <!-- foo --> +| <body> + +#data +<!doctype html><body><p><pre> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <pre> + +#data +<!doctype html><body><p><listing> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <listing> + +#data +<!doctype html><p><plaintext> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <plaintext> + +#data +<!doctype html><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <h1> + +#data +<!doctype html><form><isindex> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> + +#data +<!doctype html><isindex action="POST"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| action="POST" +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><isindex prompt="this is isindex"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "this is isindex" +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><isindex type="hidden"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| type="hidden" +| <hr> + +#data +<!doctype html><isindex name="foo"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| <hr> + +#data +<!doctype html><ruby><p><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <p> +| <rp> + +#data +<!doctype html><ruby><div><span><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <span> +| <rp> + +#data +<!doctype html><ruby><div><p><rp> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <p> +| <rp> + +#data +<!doctype html><ruby><p><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <p> +| <rt> + +#data +<!doctype html><ruby><div><span><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <span> +| <rt> + +#data +<!doctype html><ruby><div><p><rt> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <ruby> +| <div> +| <p> +| <rt> + +#data +<!doctype html><math/><foo> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> +| <foo> + +#data +<!doctype html><svg/><foo> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <svg svg> +| <foo> + +#data +<!doctype html><div></body><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| <!-- foo --> + +#data +<!doctype html><h1><div><h3><span></h1>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <h1> +| <div> +| <h3> +| <span> +| "foo" + +#data +<!doctype html><p></h3>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "foo" + +#data +<!doctype html><h3><li>abc</h2>foo +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <h3> +| <li> +| "abc" +| "foo" + +#data +<!doctype html><table>abc<!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "abc" +| <table> +| <!-- foo --> + +#data +<!doctype html><table> <!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| " " +| <!-- foo --> + +#data +<!doctype html><table> b <!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| " b " +| <table> +| <!-- foo --> + +#data +<!doctype html><select><option><option> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> +| <option> + +#data +<!doctype html><select><option></optgroup> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> + +#data +<!doctype html><select><option></optgroup> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> + +#data +<!doctype html><p><math><mi><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mi> +| <p> +| <h1> + +#data +<!doctype html><p><math><mo><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mo> +| <p> +| <h1> + +#data +<!doctype html><p><math><mn><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mn> +| <p> +| <h1> + +#data +<!doctype html><p><math><ms><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math ms> +| <p> +| <h1> + +#data +<!doctype html><p><math><mtext><p><h1> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mtext> +| <p> +| <h1> + +#data +<!doctype html><frameset></noframes> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html c=d><body></html><html a=b> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <body> + +#data +<!doctype html><html c=d><frameset></frameset></html><html a=b> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html><!--foo--> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <!-- foo --> + +#data +<!doctype html><html><frameset></frameset></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| " " + +#data +<!doctype html><html><frameset></frameset></html>abc +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html><p> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><html><frameset></frameset></html></p> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<html><frameset></frameset></html><!doctype html> +#errors +#document +| <html> +| <head> +| <frameset> + +#data +<!doctype html><body><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!doctype html><p><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><p>a<frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "a" + +#data +<!doctype html><p> <frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><pre><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <pre> + +#data +<!doctype html><listing><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <listing> + +#data +<!doctype html><li><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <li> + +#data +<!doctype html><dd><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dd> + +#data +<!doctype html><dt><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dt> + +#data +<!doctype html><button><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <button> + +#data +<!doctype html><applet><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <applet> + +#data +<!doctype html><marquee><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <marquee> + +#data +<!doctype html><object><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <object> + +#data +<!doctype html><table><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> + +#data +<!doctype html><area><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <area> + +#data +<!doctype html><basefont><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <basefont> +| <frameset> + +#data +<!doctype html><bgsound><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <bgsound> +| <frameset> + +#data +<!doctype html><br><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <br> + +#data +<!doctype html><embed><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <embed> + +#data +<!doctype html><img><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <img> + +#data +<!doctype html><input><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <input> + +#data +<!doctype html><keygen><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <keygen> + +#data +<!doctype html><wbr><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <wbr> + +#data +<!doctype html><hr><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <hr> + +#data +<!doctype html><textarea></textarea><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <textarea> + +#data +<!doctype html><xmp></xmp><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <xmp> + +#data +<!doctype html><iframe></iframe><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <iframe> + +#data +<!doctype html><select></select><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> + +#data +<!doctype html><svg></svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><math></math><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><svg><foreignObject><div> <frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<!doctype html><svg>a</svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <svg svg> +| "a" + +#data +<!doctype html><svg> </svg><frameset><frame> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> +| <frame> + +#data +<html>aaa<frameset></frameset> +#errors +#document +| <html> +| <head> +| <body> +| "aaa" + +#data +<html> a <frameset></frameset> +#errors +#document +| <html> +| <head> +| <body> +| "a " + +#data +<!doctype html><div><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!doctype html><div><body><frameset> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> + +#data +<!doctype html><p><math></p>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| "a" + +#data +<!doctype html><p><math><mn><span></p>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <math math> +| <math mn> +| <span> +| <p> +| "a" + +#data +<!doctype html><math></html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <math math> + +#data +<!doctype html><meta charset="ascii"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <meta> +| charset="ascii" +| <body> + +#data +<!doctype html><meta http-equiv="content-type" content="text/html;charset=ascii"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <meta> +| content="text/html;charset=ascii" +| http-equiv="content-type" +| <body> + +#data +<!doctype html><head><!--aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa--><meta charset="utf8"> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <!-- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa --> +| <meta> +| charset="utf8" +| <body> + +#data +<!doctype html><html a=b><head></head><html c=d> +#errors +#document +| <!DOCTYPE html> +| <html> +| a="b" +| c="d" +| <head> +| <body> + +#data +<!doctype html><image/> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <img> + +#data +<!doctype html>a<i>b<table>c<b>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "a" +| <i> +| "bc" +| <b> +| "de" +| "f" +| <table> + +#data +<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" +| <table> + +#data +<!doctype html><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" + +#data +<!doctype html><table><i>a<b>b<div>c</i> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <i> +| "c" +| <table> + +#data +<!doctype html><table><i>a<b>b<div>c<a>d</i>e</b>f +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <b> +| "b" +| <b> +| <div> +| <b> +| <i> +| "c" +| <a> +| "d" +| <a> +| "e" +| <a> +| "f" +| <table> + +#data +<!doctype html><table><i>a<div>b<tr>c<b>d</i>e +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <i> +| "a" +| <div> +| "b" +| <i> +| "c" +| <b> +| "d" +| <b> +| "e" +| <table> +| <tbody> +| <tr> + +#data +<!doctype html><table><td><table><i>a<div>b<b>c</i>d +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| <i> +| "a" +| <div> +| <i> +| "b" +| <b> +| "c" +| <b> +| "d" +| <table> + +#data +<!doctype html><body><bgsound> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <bgsound> + +#data +<!doctype html><body><basefont> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <basefont> + +#data +<!doctype html><a><b></a><basefont> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <a> +| <b> +| <basefont> + +#data +<!doctype html><a><b></a><bgsound> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <a> +| <b> +| <bgsound> + +#data +<!doctype html><figcaption><article></figcaption>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <figcaption> +| <article> +| "a" + +#data +<!doctype html><summary><article></summary>a +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <summary> +| <article> +| "a" + +#data +<!doctype html><p><a><plaintext>b +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <a> +| <plaintext> +| <a> +| "b" + +#data +<!DOCTYPE html><div>a<a></div>b<p>c</p>d +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <div> +| "a" +| <a> +| <a> +| "b" +| <p> +| "c" +| "d" diff --git a/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat new file mode 100644 index 0000000..60d8592 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/tests2.dat @@ -0,0 +1,763 @@ +#data +<!DOCTYPE html>Test +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "Test" + +#data +<textarea>test</div>test +#errors +Line: 1 Col: 10 Unexpected start tag (textarea). Expected DOCTYPE. +Line: 1 Col: 24 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <textarea> +| "test</div>test" + +#data +<table><td> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase. +Line: 1 Col: 11 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> + +#data +<table><td>test</tbody></table> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected table cell start tag (td) in the table body phase. +#document +| <html> +| <head> +| <body> +| <table> +| <tbody> +| <tr> +| <td> +| "test" + +#data +<frame>test +#errors +Line: 1 Col: 7 Unexpected start tag (frame). Expected DOCTYPE. +Line: 1 Col: 7 Unexpected start tag frame. Ignored. +#document +| <html> +| <head> +| <body> +| "test" + +#data +<!DOCTYPE html><frameset>test +#errors +Line: 1 Col: 29 Unepxected characters in the frameset phase. Characters ignored. +Line: 1 Col: 29 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!DOCTYPE html><frameset><!DOCTYPE html> +#errors +Line: 1 Col: 40 Unexpected DOCTYPE. Ignored. +Line: 1 Col: 40 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <frameset> + +#data +<!DOCTYPE html><font><p><b>test</font> +#errors +Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm. +Line: 1 Col: 38 End tag (font) violates step 1, paragraph 3 of the adoption agency algorithm. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <font> +| <p> +| <font> +| <b> +| "test" + +#data +<!DOCTYPE html><dt><div><dd> +#errors +Line: 1 Col: 28 Missing end tag (div, dt). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <dt> +| <div> +| <dd> + +#data +<script></x +#errors +Line: 1 Col: 8 Unexpected start tag (script). Expected DOCTYPE. +Line: 1 Col: 11 Unexpected end of file. Expected end tag (script). +#document +| <html> +| <head> +| <script> +| "</x" +| <body> + +#data +<table><plaintext><td> +#errors +Line: 1 Col: 7 Unexpected start tag (table). Expected DOCTYPE. +Line: 1 Col: 18 Unexpected start tag (plaintext) in table context caused voodoo mode. +Line: 1 Col: 22 Unexpected end of file. Expected table content. +#document +| <html> +| <head> +| <body> +| <plaintext> +| "<td>" +| <table> + +#data +<plaintext></plaintext> +#errors +Line: 1 Col: 11 Unexpected start tag (plaintext). Expected DOCTYPE. +Line: 1 Col: 23 Expected closing tag. Unexpected end of file. +#document +| <html> +| <head> +| <body> +| <plaintext> +| "</plaintext>" + +#data +<!DOCTYPE html><table><tr>TEST +#errors +Line: 1 Col: 30 Unexpected non-space characters in table context caused voodoo mode. +Line: 1 Col: 30 Unexpected end of file. Expected table content. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "TEST" +| <table> +| <tbody> +| <tr> + +#data +<!DOCTYPE html><body t1=1><body t2=2><body t3=3 t4=4> +#errors +Line: 1 Col: 37 Unexpected start tag (body). +Line: 1 Col: 53 Unexpected start tag (body). +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| t1="1" +| t2="2" +| t3="3" +| t4="4" + +#data +</b test +#errors +Line: 1 Col: 8 Unexpected end of file in attribute name. +Line: 1 Col: 8 End tag contains unexpected attributes. +Line: 1 Col: 8 Unexpected end tag (b). Expected DOCTYPE. +Line: 1 Col: 8 Unexpected end tag (b) after the (implied) root element. +#document +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html></b test<b &=&>X +#errors +Line: 1 Col: 32 Named entity didn't end with ';'. +Line: 1 Col: 33 End tag contains unexpected attributes. +Line: 1 Col: 33 Unexpected end tag (b) after the (implied) root element. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "X" + +#data +<!doctypehtml><scrIPt type=text/x-foobar;baz>X</SCRipt +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +Line: 1 Col: 54 Unexpected end of file in the tag name. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <script> +| type="text/x-foobar;baz" +| "X</SCRipt" +| <body> + +#data +& +#errors +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&" + +#data +&# +#errors +Line: 1 Col: 1 Numeric entity expected. Got end of file instead. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#" + +#data +&#X +#errors +Line: 1 Col: 3 Numeric entity expected but none found. +Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#X" + +#data +&#x +#errors +Line: 1 Col: 3 Numeric entity expected but none found. +Line: 1 Col: 3 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&#x" + +#data +- +#errors +Line: 1 Col: 4 Numeric entity didn't end with ';'. +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "-" + +#data +&x-test +#errors +Line: 1 Col: 1 Named entity expected. Got none. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&x-test" + +#data +<!doctypehtml><p><li> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <li> + +#data +<!doctypehtml><p><dt> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <dt> + +#data +<!doctypehtml><p><dd> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <dd> + +#data +<!doctypehtml><p><form> +#errors +Line: 1 Col: 9 No space after literal string 'DOCTYPE'. +Line: 1 Col: 23 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| <form> + +#data +<!DOCTYPE html><p></P>X +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <p> +| "X" + +#data +& +#errors +Line: 1 Col: 4 Named entity didn't end with ';'. +Line: 1 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&" + +#data +&AMp; +#errors +Line: 1 Col: 1 Named entity expected. Got none. +Line: 1 Col: 1 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "&AMp;" + +#data +<!DOCTYPE html><html><head></head><body><thisISasillyTESTelementNameToMakeSureCrazyTagNamesArePARSEDcorrectLY> +#errors +Line: 1 Col: 110 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <thisisasillytestelementnametomakesurecrazytagnamesareparsedcorrectly> + +#data +<!DOCTYPE html>X</body>X +#errors +Line: 1 Col: 24 Unexpected non-space characters in the after body phase. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| "XX" + +#data +<!DOCTYPE html><!-- X +#errors +Line: 1 Col: 21 Unexpected end of file in comment. +#document +| <!DOCTYPE html> +| <!-- X --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><table><caption>test TEST</caption><td>test +#errors +Line: 1 Col: 54 Unexpected table cell start tag (td) in the table body phase. +Line: 1 Col: 58 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <table> +| <caption> +| "test TEST" +| <tbody> +| <tr> +| <td> +| "test" + +#data +<!DOCTYPE html><select><option><optgroup> +#errors +Line: 1 Col: 41 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <option> +| <optgroup> + +#data +<!DOCTYPE html><select><optgroup><option></optgroup><option><select><option> +#errors +Line: 1 Col: 68 Unexpected select start tag in the select phase treated as select end tag. +Line: 1 Col: 76 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> +| <option> +| <option> +| <option> + +#data +<!DOCTYPE html><select><optgroup><option><optgroup> +#errors +Line: 1 Col: 51 Expected closing tag. Unexpected end of file. +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> +| <option> +| <optgroup> + +#data +<!DOCTYPE html><datalist><option>foo</datalist>bar +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <datalist> +| <option> +| "foo" +| "bar" + +#data +<!DOCTYPE html><font><input><input></font> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <font> +| <input> +| <input> + +#data +<!DOCTYPE html><!-- XXX - XXX --> +#errors +#document +| <!DOCTYPE html> +| <!-- XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><!-- XXX - XXX +#errors +Line: 1 Col: 29 Unexpected end of file in comment (-) +#document +| <!DOCTYPE html> +| <!-- XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><!-- XXX - XXX - XXX --> +#errors +#document +| <!DOCTYPE html> +| <!-- XXX - XXX - XXX --> +| <html> +| <head> +| <body> + +#data +<isindex test=x name=x> +#errors +Line: 1 Col: 23 Unexpected start tag (isindex). Expected DOCTYPE. +Line: 1 Col: 23 Unexpected start tag isindex. Don't use it! +#document +| <html> +| <head> +| <body> +| <form> +| <hr> +| <label> +| "This is a searchable index. Enter search keywords: " +| <input> +| name="isindex" +| test="x" +| <hr> + +#data +test +test +#errors +Line: 2 Col: 4 Unexpected non-space characters. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> +| "test +test" + +#data +<!DOCTYPE html><body><title>test</body> +#errors +#document +| +| +| +| +| +| "test</body>" + +#data +<!DOCTYPE html><body><title>X +#errors +#document +| +| +| +| +| +| "X" +| <meta> +| name="z" +| <link> +| rel="foo" +| <style> +| " +x { content:"</style" } " + +#data +<!DOCTYPE html><select><optgroup></optgroup></select> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> +| <select> +| <optgroup> + +#data + + +#errors +Line: 2 Col: 1 Unexpected End of file. Expected DOCTYPE. +#document +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html> <html> +#errors +#document +| <!DOCTYPE html> +| <html> +| <head> +| <body> + +#data +<!DOCTYPE html><script> +</script> <title>x +#errors +#document +| +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +Line: 1 Col: 21 Unexpected start tag (script) that can be in head. Moved. +#document +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +Line: 1 Col: 28 Unexpected start tag (style) that can be in head. Moved. +#document +| +| +| +#errors +Line: 1 Col: 6 Unexpected start tag (head). Expected DOCTYPE. +#document +| +| +| +| +| "x" +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +Line: 1 Col: 22 Unexpected end of file. Expected end tag (style). +#document +| +| +| --> x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +| x +#errors +Line: 1 Col: 7 Unexpected start tag (style). Expected DOCTYPE. +#document +| +| +|

    +#errors +#document +| +| +| +| +| +| ddd +#errors +#document +| +| +| +#errors +#document +| +| +| +| +|
  • +| +| ", + " +
    << Back to Go HTTP/2 demo server`) + }) +} + +func httpsHost() string { + if *hostHTTPS != "" { + return *hostHTTPS + } + if v := *httpsAddr; strings.HasPrefix(v, ":") { + return "localhost" + v + } else { + return v + } +} + +func httpHost() string { + if *hostHTTP != "" { + return *hostHTTP + } + if v := *httpAddr; strings.HasPrefix(v, ":") { + return "localhost" + v + } else { + return v + } +} + +func serveProdTLS() error { + const cacheDir = "/var/cache/autocert" + if err := os.MkdirAll(cacheDir, 0700); err != nil { + return err + } + m := autocert.Manager{ + Cache: autocert.DirCache(cacheDir), + Prompt: autocert.AcceptTOS, + HostPolicy: autocert.HostWhitelist("http2.golang.org"), + } + srv := &http.Server{ + TLSConfig: &tls.Config{ + GetCertificate: m.GetCertificate, + }, + } + http2.ConfigureServer(srv, &http2.Server{ + NewWriteScheduler: func() http2.WriteScheduler { + return http2.NewPriorityWriteScheduler(nil) + }, + }) + ln, err := net.Listen("tcp", ":443") + if err != nil { + return err + } + return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig)) +} + +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +func serveProd() error { + errc := make(chan error, 2) + go func() { errc <- http.ListenAndServe(":80", nil) }() + go func() { errc <- serveProdTLS() }() + return <-errc +} + +const idleTimeout = 5 * time.Minute +const activeTimeout = 10 * time.Minute + +// TODO: put this into the standard library and actually send +// PING frames and GOAWAY, etc: golang.org/issue/14204 +func idleTimeoutHook() func(net.Conn, http.ConnState) { + var mu sync.Mutex + m := map[net.Conn]*time.Timer{} + return func(c net.Conn, cs http.ConnState) { + mu.Lock() + defer mu.Unlock() + if t, ok := m[c]; ok { + delete(m, c) + t.Stop() + } + var d time.Duration + switch cs { + case http.StateNew, http.StateIdle: + d = idleTimeout + case http.StateActive: + d = activeTimeout + default: + return + } + m[c] = time.AfterFunc(d, func() { + log.Printf("closing idle conn %v after %v", c.RemoteAddr(), d) + go c.Close() + }) + } +} + +func main() { + var srv http.Server + flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.") + flag.Parse() + srv.Addr = *httpsAddr + srv.ConnState = idleTimeoutHook() + + registerHandlers() + + if *prod { + *hostHTTP = "http2.golang.org" + *hostHTTPS = "http2.golang.org" + log.Fatal(serveProd()) + } + + url := "https://" + httpsHost() + "/" + log.Printf("Listening on " + url) + http2.ConfigureServer(&srv, &http2.Server{}) + + if *httpAddr != "" { + go func() { + log.Printf("Listening on http://" + httpHost() + "/ (for unencrypted HTTP/1)") + log.Fatal(http.ListenAndServe(*httpAddr, nil)) + }() + } + + go func() { + log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key")) + }() + select {} +} diff --git a/vendor/golang.org/x/net/http2/h2demo/launch.go b/vendor/golang.org/x/net/http2/h2demo/launch.go new file mode 100644 index 0000000..df0866a --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/launch.go @@ -0,0 +1,302 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" +) + +var ( + proj = flag.String("project", "symbolic-datum-552", "name of Project") + zone = flag.String("zone", "us-central1-a", "GCE zone") + mach = flag.String("machinetype", "n1-standard-1", "Machine type") + instName = flag.String("instance_name", "http2-demo", "Name of VM instance.") + sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.") + staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.") + + writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.") + publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.") +) + +func readFile(v string) string { + slurp, err := ioutil.ReadFile(v) + if err != nil { + log.Fatalf("Error reading %s: %v", v, err) + } + return strings.TrimSpace(string(slurp)) +} + +var config = &oauth2.Config{ + // The client-id and secret should be for an "Installed Application" when using + // the CLI. Later we'll use a web application with a callback. + ClientID: readFile("client-id.dat"), + ClientSecret: readFile("client-secret.dat"), + Endpoint: google.Endpoint, + Scopes: []string{ + compute.DevstorageFullControlScope, + compute.ComputeScope, + "https://www.googleapis.com/auth/sqlservice", + "https://www.googleapis.com/auth/sqlservice.admin", + }, + RedirectURL: "urn:ietf:wg:oauth:2.0:oob", +} + +const baseConfig = `#cloud-config +coreos: + units: + - name: h2demo.service + command: start + content: | + [Unit] + Description=HTTP2 Demo + + [Service] + ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo' + ExecStart=/opt/bin/h2demo --prod + RestartSec=5s + Restart=always + Type=simple + + [Install] + WantedBy=multi-user.target +` + +func main() { + flag.Parse() + if *proj == "" { + log.Fatalf("Missing --project flag") + } + prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj + machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach + + const tokenFileName = "token.dat" + tokenFile := tokenCacheFile(tokenFileName) + tokenSource := oauth2.ReuseTokenSource(nil, tokenFile) + token, err := tokenSource.Token() + if err != nil { + if *writeObject != "" { + log.Fatalf("Can't use --write_object without a valid token.dat file already cached.") + } + log.Printf("Error getting token from %s: %v", tokenFileName, err) + log.Printf("Get auth code from %v", config.AuthCodeURL("my-state")) + fmt.Print("\nEnter auth code: ") + sc := bufio.NewScanner(os.Stdin) + sc.Scan() + authCode := strings.TrimSpace(sc.Text()) + token, err = config.Exchange(oauth2.NoContext, authCode) + if err != nil { + log.Fatalf("Error exchanging auth code for a token: %v", err) + } + if err := tokenFile.WriteToken(token); err != nil { + log.Fatalf("Error writing to %s: %v", tokenFileName, err) + } + tokenSource = oauth2.ReuseTokenSource(token, nil) + } + + oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource) + + if *writeObject != "" { + writeCloudStorageObject(oauthClient) + return + } + + computeService, _ := compute.New(oauthClient) + + natIP := *staticIP + if natIP == "" { + // Try to find it by name. + aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do() + if err != nil { + log.Fatal(err) + } + // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList + IPLoop: + for _, asl := range aggAddrList.Items { + for _, addr := range asl.Addresses { + if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" { + natIP = addr.Address + break IPLoop + } + } + } + } + + cloudConfig := baseConfig + if *sshPub != "" { + key := strings.TrimSpace(readFile(*sshPub)) + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key) + } + if os.Getenv("USER") == "bradfitz" { + cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com") + } + const maxCloudConfig = 32 << 10 // per compute API docs + if len(cloudConfig) > maxCloudConfig { + log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig) + } + + instance := &compute.Instance{ + Name: *instName, + Description: "Go Builder", + MachineType: machType, + Disks: []*compute.AttachedDisk{instanceDisk(computeService)}, + Tags: &compute.Tags{ + Items: []string{"http-server", "https-server"}, + }, + Metadata: &compute.Metadata{ + Items: []*compute.MetadataItems{ + { + Key: "user-data", + Value: &cloudConfig, + }, + }, + }, + NetworkInterfaces: []*compute.NetworkInterface{ + { + AccessConfigs: []*compute.AccessConfig{ + { + Type: "ONE_TO_ONE_NAT", + Name: "External NAT", + NatIP: natIP, + }, + }, + Network: prefix + "/global/networks/default", + }, + }, + ServiceAccounts: []*compute.ServiceAccount{ + { + Email: "default", + Scopes: []string{ + compute.DevstorageFullControlScope, + compute.ComputeScope, + }, + }, + }, + } + + log.Printf("Creating instance...") + op, err := computeService.Instances.Insert(*proj, *zone, instance).Do() + if err != nil { + log.Fatalf("Failed to create instance: %v", err) + } + opName := op.Name + log.Printf("Created. Waiting on operation %v", opName) +OpLoop: + for { + time.Sleep(2 * time.Second) + op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do() + if err != nil { + log.Fatalf("Failed to get op %s: %v", opName, err) + } + switch op.Status { + case "PENDING", "RUNNING": + log.Printf("Waiting on operation %v", opName) + continue + case "DONE": + if op.Error != nil { + for _, operr := range op.Error.Errors { + log.Printf("Error: %+v", operr) + } + log.Fatalf("Failed to start.") + } + log.Printf("Success. %+v", op) + break OpLoop + default: + log.Fatalf("Unknown status %q: %+v", op.Status, op) + } + } + + inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do() + if err != nil { + log.Fatalf("Error getting instance after creation: %v", err) + } + ij, _ := json.MarshalIndent(inst, "", " ") + log.Printf("Instance: %s", ij) +} + +func instanceDisk(svc *compute.Service) *compute.AttachedDisk { + const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016" + diskName := *instName + "-disk" + + return &compute.AttachedDisk{ + AutoDelete: true, + Boot: true, + Type: "PERSISTENT", + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskName: diskName, + SourceImage: imageURL, + DiskSizeGb: 50, + }, + } +} + +func writeCloudStorageObject(httpClient *http.Client) { + content := os.Stdin + const maxSlurp = 1 << 20 + var buf bytes.Buffer + n, err := io.CopyN(&buf, content, maxSlurp) + if err != nil && err != io.EOF { + log.Fatalf("Error reading from stdin: %v, %v", n, err) + } + contentType := http.DetectContentType(buf.Bytes()) + + req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content)) + if err != nil { + log.Fatal(err) + } + req.Header.Set("x-goog-api-version", "2") + if *publicObject { + req.Header.Set("x-goog-acl", "public-read") + } + req.Header.Set("Content-Type", contentType) + res, err := httpClient.Do(req) + if err != nil { + log.Fatal(err) + } + if res.StatusCode != 200 { + res.Write(os.Stderr) + log.Fatalf("Failed.") + } + log.Printf("Success.") + os.Exit(0) +} + +type tokenCacheFile string + +func (f tokenCacheFile) Token() (*oauth2.Token, error) { + slurp, err := ioutil.ReadFile(string(f)) + if err != nil { + return nil, err + } + t := new(oauth2.Token) + if err := json.Unmarshal(slurp, t); err != nil { + return nil, err + } + return t, nil +} + +func (f tokenCacheFile) WriteToken(t *oauth2.Token) error { + jt, err := json.Marshal(t) + if err != nil { + return err + } + return ioutil.WriteFile(string(f), jt, 0600) +} diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.key b/vendor/golang.org/x/net/http2/h2demo/rootCA.key new file mode 100644 index 0000000..a15a6ab --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q +62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby +XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV +mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ +JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ +SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA +nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e +/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx +qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser +hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j +NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E +LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7 +8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c +0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws +K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd +bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo +QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt +Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1 +nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy +b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7 +gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev +WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr +C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj +x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA +hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y +-----END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem new file mode 100644 index 0000000..3a323e7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG +A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 +DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 +NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG +cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv +c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS +R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT +ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk +JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 +mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW +caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G +A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt +hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB +MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES +MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv +bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h +U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao +eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 +UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD +58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n +sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF +kPe6XoSbiLm/kxk32T0= +-----END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl new file mode 100644 index 0000000..6db3891 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl @@ -0,0 +1 @@ +E2CE26BF3285059C diff --git a/vendor/golang.org/x/net/http2/h2demo/server.crt b/vendor/golang.org/x/net/http2/h2demo/server.crt new file mode 100644 index 0000000..c59059b --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/server.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV +UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT +C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW +DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow +RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE +ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l +gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2 +dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL +A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws +/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88 +F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB +AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R +rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD +EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19 +KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI +dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU +90p6/CbU71bGbfpM2PHot2fm +-----END CERTIFICATE----- diff --git a/vendor/golang.org/x/net/http2/h2demo/server.key b/vendor/golang.org/x/net/http2/h2demo/server.key new file mode 100644 index 0000000..f329c14 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi +fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm +J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef +b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55 +mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/ +fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p +3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3 +qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4 +NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80 +LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN +a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+ +Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL +W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO +gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm +S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS +Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp +V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4 +KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4 +yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5 +drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e +ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R +48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5 +c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY +nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl +IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd +-----END RSA PRIVATE KEY----- diff --git a/vendor/golang.org/x/net/http2/h2demo/tmpl.go b/vendor/golang.org/x/net/http2/h2demo/tmpl.go new file mode 100644 index 0000000..504d6a7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2demo/tmpl.go @@ -0,0 +1,1991 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build h2demo + +package main + +import "html/template" + +var pushTmpl = template.Must(template.New("serverpush").Parse(` + + + + + + + + + HTTP/2 Server Push Demo + + + + + + + + + +
    +Note: This page exists for demonstration purposes. For the actual cmd/go docs, go to golang.org/cmd/go. +
    + + + +
    +... +
    + + + + +
    +
    +
    +
    + Run + Format + + + +
    +
    + + +
    +
    + + +

    Command go

    + + + + + + + + + + + + + + +

    +Go is a tool for managing Go source code. +

    +

    +Usage: +

    +
    go command [arguments]
    +
    +

    +The commands are: +

    +
    build       compile packages and dependencies
    +clean       remove object files
    +doc         show documentation for package or symbol
    +env         print Go environment information
    +bug         start a bug report
    +fix         run go tool fix on packages
    +fmt         run gofmt on package sources
    +generate    generate Go files by processing source
    +get         download and install packages and dependencies
    +install     compile and install packages and dependencies
    +list        list packages
    +run         compile and run Go program
    +test        test packages
    +tool        run specified go tool
    +version     print Go version
    +vet         run go tool vet on packages
    +
    +

    +Use "go help [command]" for more information about a command. +

    +

    +Additional help topics: +

    +
    c           calling between Go and C
    +buildmode   description of build modes
    +filetype    file types
    +gopath      GOPATH environment variable
    +environment environment variables
    +importpath  import path syntax
    +packages    description of package lists
    +testflag    description of testing flags
    +testfunc    description of testing functions
    +
    +

    +Use "go help [topic]" for more information about that topic. +

    +

    Compile packages and dependencies

    +

    +Usage: +

    +
    go build [-o output] [-i] [build flags] [packages]
    +
    +

    +Build compiles the packages named by the import paths, +along with their dependencies, but it does not install the results. +

    +

    +If the arguments to build are a list of .go files, build treats +them as a list of source files specifying a single package. +

    +

    +When compiling a single main package, build writes +the resulting executable to an output file named after +the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe') +or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe'). +The '.exe' suffix is added when writing a Windows executable. +

    +

    +When compiling multiple packages or a single non-main package, +build compiles the packages but discards the resulting object, +serving only as a check that the packages can be built. +

    +

    +When compiling packages, build ignores files that end in '_test.go'. +

    +

    +The -o flag, only allowed when compiling a single package, +forces build to write the resulting executable or object +to the named output file, instead of the default behavior described +in the last two paragraphs. +

    +

    +The -i flag installs the packages that are dependencies of the target. +

    +

    +The build flags are shared by the build, clean, get, install, list, run, +and test commands: +

    +
    -a
    +	force rebuilding of packages that are already up-to-date.
    +-n
    +	print the commands but do not run them.
    +-p n
    +	the number of programs, such as build commands or
    +	test binaries, that can be run in parallel.
    +	The default is the number of CPUs available.
    +-race
    +	enable data race detection.
    +	Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64.
    +-msan
    +	enable interoperation with memory sanitizer.
    +	Supported only on linux/amd64,
    +	and only with Clang/LLVM as the host C compiler.
    +-v
    +	print the names of packages as they are compiled.
    +-work
    +	print the name of the temporary work directory and
    +	do not delete it when exiting.
    +-x
    +	print the commands.
    +
    +-asmflags 'flag list'
    +	arguments to pass on each go tool asm invocation.
    +-buildmode mode
    +	build mode to use. See 'go help buildmode' for more.
    +-compiler name
    +	name of compiler to use, as in runtime.Compiler (gccgo or gc).
    +-gccgoflags 'arg list'
    +	arguments to pass on each gccgo compiler/linker invocation.
    +-gcflags 'arg list'
    +	arguments to pass on each go tool compile invocation.
    +-installsuffix suffix
    +	a suffix to use in the name of the package installation directory,
    +	in order to keep output separate from default builds.
    +	If using the -race flag, the install suffix is automatically set to race
    +	or, if set explicitly, has _race appended to it.  Likewise for the -msan
    +	flag.  Using a -buildmode option that requires non-default compile flags
    +	has a similar effect.
    +-ldflags 'flag list'
    +	arguments to pass on each go tool link invocation.
    +-linkshared
    +	link against shared libraries previously created with
    +	-buildmode=shared.
    +-pkgdir dir
    +	install and load all packages from dir instead of the usual locations.
    +	For example, when building with a non-standard configuration,
    +	use -pkgdir to keep generated packages in a separate location.
    +-tags 'tag list'
    +	a list of build tags to consider satisfied during the build.
    +	For more information about build tags, see the description of
    +	build constraints in the documentation for the go/build package.
    +-toolexec 'cmd args'
    +	a program to use to invoke toolchain programs like vet and asm.
    +	For example, instead of running asm, the go command will run
    +	'cmd args /path/to/asm <arguments for asm>'.
    +
    +

    +The list flags accept a space-separated list of strings. To embed spaces +in an element in the list, surround it with either single or double quotes. +

    +

    +For more about specifying packages, see 'go help packages'. +For more about where packages and binaries are installed, +run 'go help gopath'. +For more about calling between Go and C/C++, run 'go help c'. +

    +

    +Note: Build adheres to certain conventions such as those described +by 'go help gopath'. Not all projects can follow these conventions, +however. Installations that have their own conventions or that use +a separate software build system may choose to use lower-level +invocations such as 'go tool compile' and 'go tool link' to avoid +some of the overheads and design decisions of the build tool. +

    +

    +See also: go install, go get, go clean. +

    +

    Remove object files

    +

    +Usage: +

    +
    go clean [-i] [-r] [-n] [-x] [build flags] [packages]
    +
    +

    +Clean removes object files from package source directories. +The go command builds most objects in a temporary directory, +so go clean is mainly concerned with object files left by other +tools or by manual invocations of go build. +

    +

    +Specifically, clean removes the following files from each of the +source directories corresponding to the import paths: +

    +
    _obj/            old object directory, left from Makefiles
    +_test/           old test directory, left from Makefiles
    +_testmain.go     old gotest file, left from Makefiles
    +test.out         old test log, left from Makefiles
    +build.out        old test log, left from Makefiles
    +*.[568ao]        object files, left from Makefiles
    +
    +DIR(.exe)        from go build
    +DIR.test(.exe)   from go test -c
    +MAINFILE(.exe)   from go build MAINFILE.go
    +*.so             from SWIG
    +
    +

    +In the list, DIR represents the final path element of the +directory, and MAINFILE is the base name of any Go source +file in the directory that is not included when building +the package. +

    +

    +The -i flag causes clean to remove the corresponding installed +archive or binary (what 'go install' would create). +

    +

    +The -n flag causes clean to print the remove commands it would execute, +but not run them. +

    +

    +The -r flag causes clean to be applied recursively to all the +dependencies of the packages named by the import paths. +

    +

    +The -x flag causes clean to print remove commands as it executes them. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Show documentation for package or symbol

    +

    +Usage: +

    +
    go doc [-u] [-c] [package|[package.]symbol[.method]]
    +
    +

    +Doc prints the documentation comments associated with the item identified by its +arguments (a package, const, func, type, var, or method) followed by a one-line +summary of each of the first-level items "under" that item (package-level +declarations for a package, methods for a type, etc.). +

    +

    +Doc accepts zero, one, or two arguments. +

    +

    +Given no arguments, that is, when run as +

    +
    go doc
    +
    +

    +it prints the package documentation for the package in the current directory. +If the package is a command (package main), the exported symbols of the package +are elided from the presentation unless the -cmd flag is provided. +

    +

    +When run with one argument, the argument is treated as a Go-syntax-like +representation of the item to be documented. What the argument selects depends +on what is installed in GOROOT and GOPATH, as well as the form of the argument, +which is schematically one of these: +

    +
    go doc <pkg>
    +go doc <sym>[.<method>]
    +go doc [<pkg>.]<sym>[.<method>]
    +go doc [<pkg>.][<sym>.]<method>
    +
    +

    +The first item in this list matched by the argument is the one whose documentation +is printed. (See the examples below.) However, if the argument starts with a capital +letter it is assumed to identify a symbol or method in the current directory. +

    +

    +For packages, the order of scanning is determined lexically in breadth-first order. +That is, the package presented is the one that matches the search and is nearest +the root and lexically first at its level of the hierarchy. The GOROOT tree is +always scanned in its entirety before GOPATH. +

    +

    +If there is no package specified or matched, the package in the current +directory is selected, so "go doc Foo" shows the documentation for symbol Foo in +the current package. +

    +

    +The package path must be either a qualified path or a proper suffix of a +path. The go tool's usual package mechanism does not apply: package path +elements like . and ... are not implemented by go doc. +

    +

    +When run with two arguments, the first must be a full package path (not just a +suffix), and the second is a symbol or symbol and method; this is similar to the +syntax accepted by godoc: +

    +
    go doc <pkg> <sym>[.<method>]
    +
    +

    +In all forms, when matching symbols, lower-case letters in the argument match +either case but upper-case letters match exactly. This means that there may be +multiple matches of a lower-case argument in a package if different symbols have +different cases. If this occurs, documentation for all matches is printed. +

    +

    +Examples: +

    +
    go doc
    +	Show documentation for current package.
    +go doc Foo
    +	Show documentation for Foo in the current package.
    +	(Foo starts with a capital letter so it cannot match
    +	a package path.)
    +go doc encoding/json
    +	Show documentation for the encoding/json package.
    +go doc json
    +	Shorthand for encoding/json.
    +go doc json.Number (or go doc json.number)
    +	Show documentation and method summary for json.Number.
    +go doc json.Number.Int64 (or go doc json.number.int64)
    +	Show documentation for json.Number's Int64 method.
    +go doc cmd/doc
    +	Show package docs for the doc command.
    +go doc -cmd cmd/doc
    +	Show package docs and exported symbols within the doc command.
    +go doc template.new
    +	Show documentation for html/template's New function.
    +	(html/template is lexically before text/template)
    +go doc text/template.new # One argument
    +	Show documentation for text/template's New function.
    +go doc text/template new # Two arguments
    +	Show documentation for text/template's New function.
    +
    +At least in the current tree, these invocations all print the
    +documentation for json.Decoder's Decode method:
    +
    +go doc json.Decoder.Decode
    +go doc json.decoder.decode
    +go doc json.decode
    +cd go/src/encoding/json; go doc decode
    +
    +

    +Flags: +

    +
    -c
    +	Respect case when matching symbols.
    +-cmd
    +	Treat a command (package main) like a regular package.
    +	Otherwise package main's exported symbols are hidden
    +	when showing the package's top-level documentation.
    +-u
    +	Show documentation for unexported as well as exported
    +	symbols and methods.
    +
    +

    Print Go environment information

    +

    +Usage: +

    +
    go env [var ...]
    +
    +

    +Env prints Go environment information. +

    +

    +By default env prints information as a shell script +(on Windows, a batch file). If one or more variable +names is given as arguments, env prints the value of +each named variable on its own line. +

    +

    Start a bug report

    +

    +Usage: +

    +
    go bug
    +
    +

    +Bug opens the default browser and starts a new bug report. +The report includes useful system information. +

    +

    Run go tool fix on packages

    +

    +Usage: +

    +
    go fix [packages]
    +
    +

    +Fix runs the Go fix command on the packages named by the import paths. +

    +

    +For more about fix, see 'go doc cmd/fix'. +For more about specifying packages, see 'go help packages'. +

    +

    +To run fix with specific options, run 'go tool fix'. +

    +

    +See also: go fmt, go vet. +

    +

    Run gofmt on package sources

    +

    +Usage: +

    +
    go fmt [-n] [-x] [packages]
    +
    +

    +Fmt runs the command 'gofmt -l -w' on the packages named +by the import paths. It prints the names of the files that are modified. +

    +

    +For more about gofmt, see 'go doc cmd/gofmt'. +For more about specifying packages, see 'go help packages'. +

    +

    +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +To run gofmt with specific options, run gofmt itself. +

    +

    +See also: go fix, go vet. +

    +

    Generate Go files by processing source

    +

    +Usage: +

    +
    go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]
    +
    +

    +Generate runs commands described by directives within existing +files. Those commands can run any process but the intent is to +create or update Go source files. +

    +

    +Go generate is never run automatically by go build, go get, go test, +and so on. It must be run explicitly. +

    +

    +Go generate scans the file for directives, which are lines of +the form, +

    +
    //go:generate command argument...
    +
    +

    +(note: no leading spaces and no space in "//go") where command +is the generator to be run, corresponding to an executable file +that can be run locally. It must either be in the shell path +(gofmt), a fully qualified path (/usr/you/bin/mytool), or a +command alias, described below. +

    +

    +Note that go generate does not parse the file, so lines that look +like directives in comments or multiline strings will be treated +as directives. +

    +

    +The arguments to the directive are space-separated tokens or +double-quoted strings passed to the generator as individual +arguments when it is run. +

    +

    +Quoted strings use Go syntax and are evaluated before execution; a +quoted string appears as a single argument to the generator. +

    +

    +Go generate sets several variables when it runs the generator: +

    +
    $GOARCH
    +	The execution architecture (arm, amd64, etc.)
    +$GOOS
    +	The execution operating system (linux, windows, etc.)
    +$GOFILE
    +	The base name of the file.
    +$GOLINE
    +	The line number of the directive in the source file.
    +$GOPACKAGE
    +	The name of the package of the file containing the directive.
    +$DOLLAR
    +	A dollar sign.
    +
    +

    +Other than variable substitution and quoted-string evaluation, no +special processing such as "globbing" is performed on the command +line. +

    +

    +As a last step before running the command, any invocations of any +environment variables with alphanumeric names, such as $GOFILE or +$HOME, are expanded throughout the command line. The syntax for +variable expansion is $NAME on all operating systems. Due to the +order of evaluation, variables are expanded even inside quoted +strings. If the variable NAME is not set, $NAME expands to the +empty string. +

    +

    +A directive of the form, +

    +
    //go:generate -command xxx args...
    +
    +

    +specifies, for the remainder of this source file only, that the +string xxx represents the command identified by the arguments. This +can be used to create aliases or to handle multiword generators. +For example, +

    +
    //go:generate -command foo go tool foo
    +
    +

    +specifies that the command "foo" represents the generator +"go tool foo". +

    +

    +Generate processes packages in the order given on the command line, +one at a time. If the command line lists .go files, they are treated +as a single package. Within a package, generate processes the +source files in a package in file name order, one at a time. Within +a source file, generate runs generators in the order they appear +in the file, one at a time. +

    +

    +If any generator returns an error exit status, "go generate" skips +all further processing for that package. +

    +

    +The generator is run in the package's source directory. +

    +

    +Go generate accepts one specific flag: +

    +
    -run=""
    +	if non-empty, specifies a regular expression to select
    +	directives whose full original source text (excluding
    +	any trailing spaces and final newline) matches the
    +	expression.
    +
    +

    +It also accepts the standard build flags including -v, -n, and -x. +The -v flag prints the names of packages and files as they are +processed. +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Download and install packages and dependencies

    +

    +Usage: +

    +
    go get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]
    +
    +

    +Get downloads the packages named by the import paths, along with their +dependencies. It then installs the named packages, like 'go install'. +

    +

    +The -d flag instructs get to stop after downloading the packages; that is, +it instructs get not to install the packages. +

    +

    +The -f flag, valid only when -u is set, forces get -u not to verify that +each package has been checked out from the source control repository +implied by its import path. This can be useful if the source is a local fork +of the original. +

    +

    +The -fix flag instructs get to run the fix tool on the downloaded packages +before resolving dependencies or building the code. +

    +

    +The -insecure flag permits fetching from repositories and resolving +custom domains using insecure schemes such as HTTP. Use with caution. +

    +

    +The -t flag instructs get to also download the packages required to build +the tests for the specified packages. +

    +

    +The -u flag instructs get to use the network to update the named packages +and their dependencies. By default, get uses the network to check out +missing packages but does not use it to look for updates to existing packages. +

    +

    +The -v flag enables verbose progress and debug output. +

    +

    +Get also accepts build flags to control the installation. See 'go help build'. +

    +

    +When checking out a new package, get creates the target directory +GOPATH/src/<import-path>. If the GOPATH contains multiple entries, +get uses the first one. For more details see: 'go help gopath'. +

    +

    +When checking out or updating a package, get looks for a branch or tag +that matches the locally installed version of Go. The most important +rule is that if the local installation is running version "go1", get +searches for a branch or tag named "go1". If no such version exists it +retrieves the most recent version of the package. +

    +

    +When go get checks out or updates a Git repository, +it also updates any git submodules referenced by the repository. +

    +

    +Get never checks out or updates code stored in vendor directories. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    +For more about how 'go get' finds source code to +download, see 'go help importpath'. +

    +

    +See also: go build, go install, go clean. +

    +

    Compile and install packages and dependencies

    +

    +Usage: +

    +
    go install [build flags] [packages]
    +
    +

    +Install compiles and installs the packages named by the import paths, +along with their dependencies. +

    +

    +For more about the build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. +

    +

    +See also: go build, go get, go clean. +

    +

    List packages

    +

    +Usage: +

    +
    go list [-e] [-f format] [-json] [build flags] [packages]
    +
    +

    +List lists the packages named by the import paths, one per line. +

    +

    +The default output shows the package import path: +

    +
    bytes
    +encoding/json
    +github.com/gorilla/mux
    +golang.org/x/net/html
    +
    +

    +The -f flag specifies an alternate format for the list, using the +syntax of package template. The default output is equivalent to -f +''. The struct being passed to the template is: +

    +
    type Package struct {
    +    Dir           string // directory containing package sources
    +    ImportPath    string // import path of package in dir
    +    ImportComment string // path in import comment on package statement
    +    Name          string // package name
    +    Doc           string // package documentation string
    +    Target        string // install path
    +    Shlib         string // the shared library that contains this package (only set when -linkshared)
    +    Goroot        bool   // is this package in the Go root?
    +    Standard      bool   // is this package part of the standard Go library?
    +    Stale         bool   // would 'go install' do anything for this package?
    +    StaleReason   string // explanation for Stale==true
    +    Root          string // Go root or Go path dir containing this package
    +    ConflictDir   string // this directory shadows Dir in $GOPATH
    +    BinaryOnly    bool   // binary-only package: cannot be recompiled from sources
    +
    +    // Source files
    +    GoFiles        []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
    +    CgoFiles       []string // .go sources files that import "C"
    +    IgnoredGoFiles []string // .go sources ignored due to build constraints
    +    CFiles         []string // .c source files
    +    CXXFiles       []string // .cc, .cxx and .cpp source files
    +    MFiles         []string // .m source files
    +    HFiles         []string // .h, .hh, .hpp and .hxx source files
    +    FFiles         []string // .f, .F, .for and .f90 Fortran source files
    +    SFiles         []string // .s source files
    +    SwigFiles      []string // .swig files
    +    SwigCXXFiles   []string // .swigcxx files
    +    SysoFiles      []string // .syso object files to add to archive
    +    TestGoFiles    []string // _test.go files in package
    +    XTestGoFiles   []string // _test.go files outside package
    +
    +    // Cgo directives
    +    CgoCFLAGS    []string // cgo: flags for C compiler
    +    CgoCPPFLAGS  []string // cgo: flags for C preprocessor
    +    CgoCXXFLAGS  []string // cgo: flags for C++ compiler
    +    CgoFFLAGS    []string // cgo: flags for Fortran compiler
    +    CgoLDFLAGS   []string // cgo: flags for linker
    +    CgoPkgConfig []string // cgo: pkg-config names
    +
    +    // Dependency information
    +    Imports      []string // import paths used by this package
    +    Deps         []string // all (recursively) imported dependencies
    +    TestImports  []string // imports from TestGoFiles
    +    XTestImports []string // imports from XTestGoFiles
    +
    +    // Error information
    +    Incomplete bool            // this package or a dependency has an error
    +    Error      *PackageError   // error loading package
    +    DepsErrors []*PackageError // errors loading dependencies
    +}
    +
    +

    +Packages stored in vendor directories report an ImportPath that includes the +path to the vendor directory (for example, "d/vendor/p" instead of "p"), +so that the ImportPath uniquely identifies a given copy of a package. +The Imports, Deps, TestImports, and XTestImports lists also contain these +expanded imports paths. See golang.org/s/go15vendor for more about vendoring. +

    +

    +The error information, if any, is +

    +
    type PackageError struct {
    +    ImportStack   []string // shortest path from package named on command line to this one
    +    Pos           string   // position of error (if present, file:line:col)
    +    Err           string   // the error itself
    +}
    +
    +

    +The template function "join" calls strings.Join. +

    +

    +The template function "context" returns the build context, defined as: +

    +
    type Context struct {
    +	GOARCH        string   // target architecture
    +	GOOS          string   // target operating system
    +	GOROOT        string   // Go root
    +	GOPATH        string   // Go path
    +	CgoEnabled    bool     // whether cgo can be used
    +	UseAllFiles   bool     // use files regardless of +build lines, file names
    +	Compiler      string   // compiler to assume when computing target paths
    +	BuildTags     []string // build constraints to match in +build lines
    +	ReleaseTags   []string // releases the current release is compatible with
    +	InstallSuffix string   // suffix to use in the name of the install dir
    +}
    +
    +

    +For more information about the meaning of these fields see the documentation +for the go/build package's Context type. +

    +

    +The -json flag causes the package data to be printed in JSON format +instead of using the template format. +

    +

    +The -e flag changes the handling of erroneous packages, those that +cannot be found or are malformed. By default, the list command +prints an error to standard error for each erroneous package and +omits the packages from consideration during the usual printing. +With the -e flag, the list command never prints errors to standard +error and instead processes the erroneous packages with the usual +printing. Erroneous packages will have a non-empty ImportPath and +a non-nil Error field; other information may or may not be missing +(zeroed). +

    +

    +For more about build flags, see 'go help build'. +

    +

    +For more about specifying packages, see 'go help packages'. +

    +

    Compile and run Go program

    +

    +Usage: +

    +
    go run [build flags] [-exec xprog] gofiles... [arguments...]
    +
    +

    +Run compiles and runs the main package comprising the named Go source files. +A Go source file is defined to be a file ending in a literal ".go" suffix. +

    +

    +By default, 'go run' runs the compiled binary directly: 'a.out arguments...'. +If the -exec flag is given, 'go run' invokes the binary using xprog: +

    +
    'xprog a.out arguments...'.
    +
    +

    +If the -exec flag is not given, GOOS or GOARCH is different from the system +default, and a program named go_$GOOS_$GOARCH_exec can be found +on the current search path, 'go run' invokes the binary using that program, +for example 'go_nacl_386_exec a.out arguments...'. This allows execution of +cross-compiled programs when a simulator or other execution method is +available. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +See also: go build. +

    +

    Test packages

    +

    +Usage: +

    +
    go test [build/test flags] [packages] [build/test flags & test binary flags]
    +
    +

    +'Go test' automates testing the packages named by the import paths. +It prints a summary of the test results in the format: +

    +
    ok   archive/tar   0.011s
    +FAIL archive/zip   0.022s
    +ok   compress/gzip 0.033s
    +...
    +
    +

    +followed by detailed output for each failed package. +

    +

    +'Go test' recompiles each package along with any files with names matching +the file pattern "*_test.go". +Files whose names begin with "_" (including "_test.go") or "." are ignored. +These additional files can contain test functions, benchmark functions, and +example functions. See 'go help testfunc' for more. +Each listed package causes the execution of a separate test binary. +

    +

    +Test files that declare a package with the suffix "_test" will be compiled as a +separate package, and then linked and run with the main test binary. +

    +

    +The go tool will ignore a directory named "testdata", making it available +to hold ancillary data needed by the tests. +

    +

    +By default, go test needs no arguments. It compiles and tests the package +with source in the current directory, including tests, and runs the tests. +

    +

    +The package is built in a temporary directory so it does not interfere with the +non-test installation. +

    +

    +In addition to the build flags, the flags handled by 'go test' itself are: +

    +
    -args
    +    Pass the remainder of the command line (everything after -args)
    +    to the test binary, uninterpreted and unchanged.
    +    Because this flag consumes the remainder of the command line,
    +    the package list (if present) must appear before this flag.
    +
    +-c
    +    Compile the test binary to pkg.test but do not run it
    +    (where pkg is the last element of the package's import path).
    +    The file name can be changed with the -o flag.
    +
    +-exec xprog
    +    Run the test binary using xprog. The behavior is the same as
    +    in 'go run'. See 'go help run' for details.
    +
    +-i
    +    Install packages that are dependencies of the test.
    +    Do not run the test.
    +
    +-o file
    +    Compile the test binary to the named file.
    +    The test still runs (unless -c or -i is specified).
    +
    +

    +The test binary also accepts flags that control execution of the test; these +flags are also accessible by 'go test'. See 'go help testflag' for details. +

    +

    +For more about build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. +

    +

    +See also: go build, go vet. +

    +

    Run specified go tool

    +

    +Usage: +

    +
    go tool [-n] command [args...]
    +
    +

    +Tool runs the go tool command identified by the arguments. +With no arguments it prints the list of known tools. +

    +

    +The -n flag causes tool to print the command that would be +executed but not execute it. +

    +

    +For more about each tool command, see 'go tool command -h'. +

    +

    Print Go version

    +

    +Usage: +

    +
    go version
    +
    +

    +Version prints the Go version, as reported by runtime.Version. +

    +

    Run go tool vet on packages

    +

    +Usage: +

    +
    go vet [-n] [-x] [build flags] [packages]
    +
    +

    +Vet runs the Go vet command on the packages named by the import paths. +

    +

    +For more about vet, see 'go doc cmd/vet'. +For more about specifying packages, see 'go help packages'. +

    +

    +To run the vet tool with specific options, run 'go tool vet'. +

    +

    +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. +

    +

    +For more about build flags, see 'go help build'. +

    +

    +See also: go fmt, go fix. +

    +

    Calling between Go and C

    +

    +There are two different ways to call between Go and C/C++ code. +

    +

    +The first is the cgo tool, which is part of the Go distribution. For +information on how to use it see the cgo documentation (go doc cmd/cgo). +

    +

    +The second is the SWIG program, which is a general tool for +interfacing between languages. For information on SWIG see +http://swig.org/. When running go build, any file with a .swig +extension will be passed to SWIG. Any file with a .swigcxx extension +will be passed to SWIG with the -c++ option. +

    +

    +When either cgo or SWIG is used, go build will pass any .c, .m, .s, +or .S files to the C compiler, and any .cc, .cpp, .cxx files to the C++ +compiler. The CC or CXX environment variables may be set to determine +the C or C++ compiler, respectively, to use. +

    +

    Description of build modes

    +

    +The 'go build' and 'go install' commands take a -buildmode argument which +indicates which kind of object file is to be built. Currently supported values +are: +

    +
    -buildmode=archive
    +	Build the listed non-main packages into .a files. Packages named
    +	main are ignored.
    +
    +-buildmode=c-archive
    +	Build the listed main package, plus all packages it imports,
    +	into a C archive file. The only callable symbols will be those
    +	functions exported using a cgo //export comment. Requires
    +	exactly one main package to be listed.
    +
    +-buildmode=c-shared
    +	Build the listed main packages, plus all packages that they
    +	import, into C shared libraries. The only callable symbols will
    +	be those functions exported using a cgo //export comment.
    +	Non-main packages are ignored.
    +
    +-buildmode=default
    +	Listed main packages are built into executables and listed
    +	non-main packages are built into .a files (the default
    +	behavior).
    +
    +-buildmode=shared
    +	Combine all the listed non-main packages into a single shared
    +	library that will be used when building with the -linkshared
    +	option. Packages named main are ignored.
    +
    +-buildmode=exe
    +	Build the listed main packages and everything they import into
    +	executables. Packages not named main are ignored.
    +
    +-buildmode=pie
    +	Build the listed main packages and everything they import into
    +	position independent executables (PIE). Packages not named
    +	main are ignored.
    +
    +-buildmode=plugin
    +	Build the listed main packages, plus all packages that they
    +	import, into a Go plugin. Packages not named main are ignored.
    +
    +

    File types

    +

    +The go command examines the contents of a restricted set of files +in each directory. It identifies which files to examine based on +the extension of the file name. These extensions are: +

    +
    .go
    +	Go source files.
    +.c, .h
    +	C source files.
    +	If the package uses cgo or SWIG, these will be compiled with the
    +	OS-native compiler (typically gcc); otherwise they will
    +	trigger an error.
    +.cc, .cpp, .cxx, .hh, .hpp, .hxx
    +	C++ source files. Only useful with cgo or SWIG, and always
    +	compiled with the OS-native compiler.
    +.m
    +	Objective-C source files. Only useful with cgo, and always
    +	compiled with the OS-native compiler.
    +.s, .S
    +	Assembler source files.
    +	If the package uses cgo or SWIG, these will be assembled with the
    +	OS-native assembler (typically gcc (sic)); otherwise they
    +	will be assembled with the Go assembler.
    +.swig, .swigcxx
    +	SWIG definition files.
    +.syso
    +	System object files.
    +
    +

    +Files of each of these types except .syso may contain build +constraints, but the go command stops scanning for build constraints +at the first item in the file that is not a blank line or //-style +line comment. See the go/build package documentation for +more details. +

    +

    +Non-test Go source files can also include a //go:binary-only-package +comment, indicating that the package sources are included +for documentation only and must not be used to build the +package binary. This enables distribution of Go packages in +their compiled form alone. See the go/build package documentation +for more details. +

    +

    GOPATH environment variable

    +

    +The Go path is used to resolve import statements. +It is implemented by and documented in the go/build package. +

    +

    +The GOPATH environment variable lists places to look for Go code. +On Unix, the value is a colon-separated string. +On Windows, the value is a semicolon-separated string. +On Plan 9, the value is a list. +

    +

    +If the environment variable is unset, GOPATH defaults +to a subdirectory named "go" in the user's home directory +($HOME/go on Unix, %USERPROFILE%\go on Windows), +unless that directory holds a Go distribution. +Run "go env GOPATH" to see the current GOPATH. +

    +

    +See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH. +

    +

    +Each directory listed in GOPATH must have a prescribed structure: +

    +

    +The src directory holds source code. The path below src +determines the import path or executable name. +

    +

    +The pkg directory holds installed package objects. +As in the Go tree, each target operating system and +architecture pair has its own subdirectory of pkg +(pkg/GOOS_GOARCH). +

    +

    +If DIR is a directory listed in the GOPATH, a package with +source in DIR/src/foo/bar can be imported as "foo/bar" and +has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a". +

    +

    +The bin directory holds compiled commands. +Each command is named for its source directory, but only +the final element, not the entire path. That is, the +command with source in DIR/src/foo/quux is installed into +DIR/bin/quux, not DIR/bin/foo/quux. The "foo/" prefix is stripped +so that you can add DIR/bin to your PATH to get at the +installed commands. If the GOBIN environment variable is +set, commands are installed to the directory it names instead +of DIR/bin. GOBIN must be an absolute path. +

    +

    +Here's an example directory layout: +

    +
    GOPATH=/home/user/go
    +
    +/home/user/go/
    +    src/
    +        foo/
    +            bar/               (go code in package bar)
    +                x.go
    +            quux/              (go code in package main)
    +                y.go
    +    bin/
    +        quux                   (installed command)
    +    pkg/
    +        linux_amd64/
    +            foo/
    +                bar.a          (installed package object)
    +
    +

    +Go searches each directory listed in GOPATH to find source code, +but new packages are always downloaded into the first directory +in the list. +

    +

    +See https://golang.org/doc/code.html for an example. +

    +

    Internal Directories

    +

    +Code in or below a directory named "internal" is importable only +by code in the directory tree rooted at the parent of "internal". +Here's an extended version of the directory layout above: +

    +
    /home/user/go/
    +    src/
    +        crash/
    +            bang/              (go code in package bang)
    +                b.go
    +        foo/                   (go code in package foo)
    +            f.go
    +            bar/               (go code in package bar)
    +                x.go
    +            internal/
    +                baz/           (go code in package baz)
    +                    z.go
    +            quux/              (go code in package main)
    +                y.go
    +
    +

    +The code in z.go is imported as "foo/internal/baz", but that +import statement can only appear in source files in the subtree +rooted at foo. The source files foo/f.go, foo/bar/x.go, and +foo/quux/y.go can all import "foo/internal/baz", but the source file +crash/bang/b.go cannot. +

    +

    +See https://golang.org/s/go14internal for details. +

    +

    Vendor Directories

    +

    +Go 1.6 includes support for using local copies of external dependencies +to satisfy imports of those dependencies, often referred to as vendoring. +

    +

    +Code below a directory named "vendor" is importable only +by code in the directory tree rooted at the parent of "vendor", +and only using an import path that omits the prefix up to and +including the vendor element. +

    +

    +Here's the example from the previous section, +but with the "internal" directory renamed to "vendor" +and a new foo/vendor/crash/bang directory added: +

    +
    /home/user/go/
    +    src/
    +        crash/
    +            bang/              (go code in package bang)
    +                b.go
    +        foo/                   (go code in package foo)
    +            f.go
    +            bar/               (go code in package bar)
    +                x.go
    +            vendor/
    +                crash/
    +                    bang/      (go code in package bang)
    +                        b.go
    +                baz/           (go code in package baz)
    +                    z.go
    +            quux/              (go code in package main)
    +                y.go
    +
    +

    +The same visibility rules apply as for internal, but the code +in z.go is imported as "baz", not as "foo/vendor/baz". +

    +

    +Code in vendor directories deeper in the source tree shadows +code in higher directories. Within the subtree rooted at foo, an import +of "crash/bang" resolves to "foo/vendor/crash/bang", not the +top-level "crash/bang". +

    +

    +Code in vendor directories is not subject to import path +checking (see 'go help importpath'). +

    +

    +When 'go get' checks out or updates a git repository, it now also +updates submodules. +

    +

    +Vendor directories do not affect the placement of new repositories +being checked out for the first time by 'go get': those are always +placed in the main GOPATH, never in a vendor subtree. +

    +

    +See https://golang.org/s/go15vendor for details. +

    +

    Environment variables

    +

    +The go command, and the tools it invokes, examine a few different +environment variables. For many of these, you can see the default +value of on your system by running 'go env NAME', where NAME is the +name of the variable. +

    +

    +General-purpose environment variables: +

    +
    GCCGO
    +	The gccgo command to run for 'go build -compiler=gccgo'.
    +GOARCH
    +	The architecture, or processor, for which to compile code.
    +	Examples are amd64, 386, arm, ppc64.
    +GOBIN
    +	The directory where 'go install' will install a command.
    +GOOS
    +	The operating system for which to compile code.
    +	Examples are linux, darwin, windows, netbsd.
    +GOPATH
    +	For more details see: 'go help gopath'.
    +GORACE
    +	Options for the race detector.
    +	See https://golang.org/doc/articles/race_detector.html.
    +GOROOT
    +	The root of the go tree.
    +
    +

    +Environment variables for use with cgo: +

    +
    CC
    +	The command to use to compile C code.
    +CGO_ENABLED
    +	Whether the cgo command is supported.  Either 0 or 1.
    +CGO_CFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C code.
    +CGO_CPPFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C or C++ code.
    +CGO_CXXFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	C++ code.
    +CGO_FFLAGS
    +	Flags that cgo will pass to the compiler when compiling
    +	Fortran code.
    +CGO_LDFLAGS
    +	Flags that cgo will pass to the compiler when linking.
    +CXX
    +	The command to use to compile C++ code.
    +PKG_CONFIG
    +	Path to pkg-config tool.
    +
    +

    +Architecture-specific environment variables: +

    +
    GOARM
    +	For GOARCH=arm, the ARM architecture for which to compile.
    +	Valid values are 5, 6, 7.
    +GO386
    +	For GOARCH=386, the floating point instruction set.
    +	Valid values are 387, sse2.
    +
    +

    +Special-purpose environment variables: +

    +
    GOROOT_FINAL
    +	The root of the installed Go tree, when it is
    +	installed in a location other than where it is built.
    +	File names in stack traces are rewritten from GOROOT to
    +	GOROOT_FINAL.
    +GO_EXTLINK_ENABLED
    +	Whether the linker should use external linking mode
    +	when using -linkmode=auto with code that uses cgo.
    +	Set to 0 to disable external linking mode, 1 to enable it.
    +GIT_ALLOW_PROTOCOL
    +	Defined by Git. A colon-separated list of schemes that are allowed to be used
    +	with git fetch/clone. If set, any scheme not explicitly mentioned will be
    +	considered insecure by 'go get'.
    +
    +

    Import path syntax

    +

    +An import path (see 'go help packages') denotes a package stored in the local +file system. In general, an import path denotes either a standard package (such +as "unicode/utf8") or a package found in one of the work spaces (For more +details see: 'go help gopath'). +

    +

    Relative import paths

    +

    +An import path beginning with ./ or ../ is called a relative path. +The toolchain supports relative import paths as a shortcut in two ways. +

    +

    +First, a relative path can be used as a shorthand on the command line. +If you are working in the directory containing the code imported as +"unicode" and want to run the tests for "unicode/utf8", you can type +"go test ./utf8" instead of needing to specify the full path. +Similarly, in the reverse situation, "go test .." will test "unicode" from +the "unicode/utf8" directory. Relative patterns are also allowed, like +"go test ./..." to test all subdirectories. See 'go help packages' for details +on the pattern syntax. +

    +

    +Second, if you are compiling a Go program not in a work space, +you can use a relative path in an import statement in that program +to refer to nearby code also not in a work space. +This makes it easy to experiment with small multipackage programs +outside of the usual work spaces, but such programs cannot be +installed with "go install" (there is no work space in which to install them), +so they are rebuilt from scratch each time they are built. +To avoid ambiguity, Go programs cannot use relative import paths +within a work space. +

    +

    Remote import paths

    +

    +Certain import paths also +describe how to obtain the source code for the package using +a revision control system. +

    +

    +A few common code hosting sites have special syntax: +

    +
    Bitbucket (Git, Mercurial)
    +
    +	import "bitbucket.org/user/project"
    +	import "bitbucket.org/user/project/sub/directory"
    +
    +GitHub (Git)
    +
    +	import "github.com/user/project"
    +	import "github.com/user/project/sub/directory"
    +
    +Launchpad (Bazaar)
    +
    +	import "launchpad.net/project"
    +	import "launchpad.net/project/series"
    +	import "launchpad.net/project/series/sub/directory"
    +
    +	import "launchpad.net/~user/project/branch"
    +	import "launchpad.net/~user/project/branch/sub/directory"
    +
    +IBM DevOps Services (Git)
    +
    +	import "hub.jazz.net/git/user/project"
    +	import "hub.jazz.net/git/user/project/sub/directory"
    +
    +

    +For code hosted on other servers, import paths may either be qualified +with the version control type, or the go tool can dynamically fetch +the import path over https/http and discover where the code resides +from a <meta> tag in the HTML. +

    +

    +To declare the code location, an import path of the form +

    +
    repository.vcs/path
    +
    +

    +specifies the given repository, with or without the .vcs suffix, +using the named version control system, and then the path inside +that repository. The supported version control systems are: +

    +
    Bazaar      .bzr
    +Git         .git
    +Mercurial   .hg
    +Subversion  .svn
    +
    +

    +For example, +

    +
    import "example.org/user/foo.hg"
    +
    +

    +denotes the root directory of the Mercurial repository at +example.org/user/foo or foo.hg, and +

    +
    import "example.org/repo.git/foo/bar"
    +
    +

    +denotes the foo/bar directory of the Git repository at +example.org/repo or repo.git. +

    +

    +When a version control system supports multiple protocols, +each is tried in turn when downloading. For example, a Git +download tries https://, then git+ssh://. +

    +

    +By default, downloads are restricted to known secure protocols +(e.g. https, ssh). To override this setting for Git downloads, the +GIT_ALLOW_PROTOCOL environment variable can be set (For more details see: +'go help environment'). +

    +

    +If the import path is not a known code hosting site and also lacks a +version control qualifier, the go tool attempts to fetch the import +over https/http and looks for a <meta> tag in the document's HTML +<head>. +

    +

    +The meta tag has the form: +

    +
    <meta name="go-import" content="import-prefix vcs repo-root">
    +
    +

    +The import-prefix is the import path corresponding to the repository +root. It must be a prefix or an exact match of the package being +fetched with "go get". If it's not an exact match, another http +request is made at the prefix to verify the <meta> tags match. +

    +

    +The meta tag should appear as early in the file as possible. +In particular, it should appear before any raw JavaScript or CSS, +to avoid confusing the go command's restricted parser. +

    +

    +The vcs is one of "git", "hg", "svn", etc, +

    +

    +The repo-root is the root of the version control system +containing a scheme and not containing a .vcs qualifier. +

    +

    +For example, +

    +
    import "example.org/pkg/foo"
    +
    +

    +will result in the following requests: +

    +
    https://example.org/pkg/foo?go-get=1 (preferred)
    +http://example.org/pkg/foo?go-get=1  (fallback, only with -insecure)
    +
    +

    +If that page contains the meta tag +

    +
    <meta name="go-import" content="example.org git https://code.org/r/p/exproj">
    +
    +

    +the go tool will verify that https://example.org/?go-get=1 contains the +same meta tag and then git clone https://code.org/r/p/exproj into +GOPATH/src/example.org. +

    +

    +New downloaded packages are written to the first directory listed in the GOPATH +environment variable (For more details see: 'go help gopath'). +

    +

    +The go command attempts to download the version of the +package appropriate for the Go release being used. +Run 'go help get' for more. +

    +

    Import path checking

    +

    +When the custom import path feature described above redirects to a +known code hosting site, each of the resulting packages has two possible +import paths, using the custom domain or the known hosting site. +

    +

    +A package statement is said to have an "import comment" if it is immediately +followed (before the next newline) by a comment of one of these two forms: +

    +
    package math // import "path"
    +package math /* import "path" */
    +
    +

    +The go command will refuse to install a package with an import comment +unless it is being referred to by that import path. In this way, import comments +let package authors make sure the custom import path is used and not a +direct path to the underlying code hosting site. +

    +

    +Import path checking is disabled for code found within vendor trees. +This makes it possible to copy code into alternate locations in vendor trees +without needing to update import comments. +

    +

    +See https://golang.org/s/go14customimport for details. +

    +

    Description of package lists

    +

    +Many commands apply to a set of packages: +

    +
    go action [packages]
    +
    +

    +Usually, [packages] is a list of import paths. +

    +

    +An import path that is a rooted path or that begins with +a . or .. element is interpreted as a file system path and +denotes the package in that directory. +

    +

    +Otherwise, the import path P denotes the package found in +the directory DIR/src/P for some DIR listed in the GOPATH +environment variable (For more details see: 'go help gopath'). +

    +

    +If no import paths are given, the action applies to the +package in the current directory. +

    +

    +There are four reserved names for paths that should not be used +for packages to be built with the go tool: +

    +

    +- "main" denotes the top-level package in a stand-alone executable. +

    +

    +- "all" expands to all package directories found in all the GOPATH +trees. For example, 'go list all' lists all the packages on the local +system. +

    +

    +- "std" is like all but expands to just the packages in the standard +Go library. +

    +

    +- "cmd" expands to the Go repository's commands and their +internal libraries. +

    +

    +Import paths beginning with "cmd/" only match source code in +the Go repository. +

    +

    +An import path is a pattern if it includes one or more "..." wildcards, +each of which can match any string, including the empty string and +strings containing slashes. Such a pattern expands to all package +directories found in the GOPATH trees with names matching the +patterns. As a special case, x/... matches x as well as x's subdirectories. +For example, net/... expands to net and packages in its subdirectories. +

    +

    +An import path can also name a package to be downloaded from +a remote repository. Run 'go help importpath' for details. +

    +

    +Every package in a program must have a unique import path. +By convention, this is arranged by starting each path with a +unique prefix that belongs to you. For example, paths used +internally at Google all begin with 'google', and paths +denoting remote repositories begin with the path to the code, +such as 'github.com/user/repo'. +

    +

    +Packages in a program need not have unique package names, +but there are two reserved package names with special meaning. +The name main indicates a command, not a library. +Commands are built into binaries and cannot be imported. +The name documentation indicates documentation for +a non-Go program in the directory. Files in package documentation +are ignored by the go command. +

    +

    +As a special case, if the package list is a list of .go files from a +single directory, the command is applied to a single synthesized +package made up of exactly those files, ignoring any build constraints +in those files and ignoring any other files in the directory. +

    +

    +Directory and file names that begin with "." or "_" are ignored +by the go tool, as are directories named "testdata". +

    +

    Description of testing flags

    +

    +The 'go test' command takes both flags that apply to 'go test' itself +and flags that apply to the resulting test binary. +

    +

    +Several of the flags control profiling and write an execution profile +suitable for "go tool pprof"; run "go tool pprof -h" for more +information. The --alloc_space, --alloc_objects, and --show_bytes +options of pprof control how the information is presented. +

    +

    +The following flags are recognized by the 'go test' command and +control the execution of any test: +

    +
    -bench regexp
    +    Run (sub)benchmarks matching a regular expression.
    +    The given regular expression is split into smaller ones by
    +    top-level '/', where each must match the corresponding part of a
    +    benchmark's identifier.
    +    By default, no benchmarks run. To run all benchmarks,
    +    use '-bench .' or '-bench=.'.
    +
    +-benchtime t
    +    Run enough iterations of each benchmark to take t, specified
    +    as a time.Duration (for example, -benchtime 1h30s).
    +    The default is 1 second (1s).
    +
    +-count n
    +    Run each test and benchmark n times (default 1).
    +    If -cpu is set, run n times for each GOMAXPROCS value.
    +    Examples are always run once.
    +
    +-cover
    +    Enable coverage analysis.
    +
    +-covermode set,count,atomic
    +    Set the mode for coverage analysis for the package[s]
    +    being tested. The default is "set" unless -race is enabled,
    +    in which case it is "atomic".
    +    The values:
    +	set: bool: does this statement run?
    +	count: int: how many times does this statement run?
    +	atomic: int: count, but correct in multithreaded tests;
    +		significantly more expensive.
    +    Sets -cover.
    +
    +-coverpkg pkg1,pkg2,pkg3
    +    Apply coverage analysis in each test to the given list of packages.
    +    The default is for each test to analyze only the package being tested.
    +    Packages are specified as import paths.
    +    Sets -cover.
    +
    +-cpu 1,2,4
    +    Specify a list of GOMAXPROCS values for which the tests or
    +    benchmarks should be executed.  The default is the current value
    +    of GOMAXPROCS.
    +
    +-parallel n
    +    Allow parallel execution of test functions that call t.Parallel.
    +    The value of this flag is the maximum number of tests to run
    +    simultaneously; by default, it is set to the value of GOMAXPROCS.
    +    Note that -parallel only applies within a single test binary.
    +    The 'go test' command may run tests for different packages
    +    in parallel as well, according to the setting of the -p flag
    +    (see 'go help build').
    +
    +-run regexp
    +    Run only those tests and examples matching the regular expression.
    +    For tests the regular expression is split into smaller ones by
    +    top-level '/', where each must match the corresponding part of a
    +    test's identifier.
    +
    +-short
    +    Tell long-running tests to shorten their run time.
    +    It is off by default but set during all.bash so that installing
    +    the Go tree can run a sanity check but not spend time running
    +    exhaustive tests.
    +
    +-timeout t
    +    If a test runs longer than t, panic.
    +    The default is 10 minutes (10m).
    +
    +-v
    +    Verbose output: log all tests as they are run. Also print all
    +    text from Log and Logf calls even if the test succeeds.
    +
    +

    +The following flags are also recognized by 'go test' and can be used to +profile the tests during execution: +

    +
    -benchmem
    +    Print memory allocation statistics for benchmarks.
    +
    +-blockprofile block.out
    +    Write a goroutine blocking profile to the specified file
    +    when all tests are complete.
    +    Writes test binary as -c would.
    +
    +-blockprofilerate n
    +    Control the detail provided in goroutine blocking profiles by
    +    calling runtime.SetBlockProfileRate with n.
    +    See 'go doc runtime.SetBlockProfileRate'.
    +    The profiler aims to sample, on average, one blocking event every
    +    n nanoseconds the program spends blocked.  By default,
    +    if -test.blockprofile is set without this flag, all blocking events
    +    are recorded, equivalent to -test.blockprofilerate=1.
    +
    +-coverprofile cover.out
    +    Write a coverage profile to the file after all tests have passed.
    +    Sets -cover.
    +
    +-cpuprofile cpu.out
    +    Write a CPU profile to the specified file before exiting.
    +    Writes test binary as -c would.
    +
    +-memprofile mem.out
    +    Write a memory profile to the file after all tests have passed.
    +    Writes test binary as -c would.
    +
    +-memprofilerate n
    +    Enable more precise (and expensive) memory profiles by setting
    +    runtime.MemProfileRate.  See 'go doc runtime.MemProfileRate'.
    +    To profile all memory allocations, use -test.memprofilerate=1
    +    and pass --alloc_space flag to the pprof tool.
    +
    +-mutexprofile mutex.out
    +    Write a mutex contention profile to the specified file
    +    when all tests are complete.
    +    Writes test binary as -c would.
    +
    +-mutexprofilefraction n
    +    Sample 1 in n stack traces of goroutines holding a
    +    contended mutex.
    +
    +-outputdir directory
    +    Place output files from profiling in the specified directory,
    +    by default the directory in which "go test" is running.
    +
    +-trace trace.out
    +    Write an execution trace to the specified file before exiting.
    +
    +

    +Each of these flags is also recognized with an optional 'test.' prefix, +as in -test.v. When invoking the generated test binary (the result of +'go test -c') directly, however, the prefix is mandatory. +

    +

    +The 'go test' command rewrites or removes recognized flags, +as appropriate, both before and after the optional package list, +before invoking the test binary. +

    +

    +For instance, the command +

    +
    go test -v -myflag testdata -cpuprofile=prof.out -x
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out
    +
    +

    +(The -x flag is removed because it applies only to the go command's +execution, not to the test itself.) +

    +

    +The test flags that generate profiles (other than for coverage) also +leave the test binary in pkg.test for use when analyzing the profiles. +

    +

    +When 'go test' runs a test binary, it does so from within the +corresponding package's source code directory. Depending on the test, +it may be necessary to do the same when invoking a generated test +binary directly. +

    +

    +The command-line package list, if present, must appear before any +flag not known to the go test command. Continuing the example above, +the package list would have to appear before -myflag, but could appear +on either side of -v. +

    +

    +To keep an argument for a test binary from being interpreted as a +known flag or a package name, use -args (see 'go help test') which +passes the remainder of the command line through to the test binary +uninterpreted and unaltered. +

    +

    +For instance, the command +

    +
    go test -v -args -x -v
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test -test.v -x -v
    +
    +

    +Similarly, +

    +
    go test -args math
    +
    +

    +will compile the test binary and then run it as +

    +
    pkg.test math
    +
    +

    +In the first example, the -x and the second -v are passed through to the +test binary unchanged and with no effect on the go command itself. +In the second example, the argument math is passed through to the test +binary, instead of being interpreted as the package list. +

    +

    Description of testing functions

    +

    +The 'go test' command expects to find test, benchmark, and example functions +in the "*_test.go" files corresponding to the package under test. +

    +

    +A test function is one named TestXXX (where XXX is any alphanumeric string +not starting with a lower case letter) and should have the signature, +

    +
    func TestXXX(t *testing.T) { ... }
    +
    +

    +A benchmark function is one named BenchmarkXXX and should have the signature, +

    +
    func BenchmarkXXX(b *testing.B) { ... }
    +
    +

    +An example function is similar to a test function but, instead of using +*testing.T to report success or failure, prints output to os.Stdout. +If the last comment in the function starts with "Output:" then the output +is compared exactly against the comment (see examples below). If the last +comment begins with "Unordered output:" then the output is compared to the +comment, however the order of the lines is ignored. An example with no such +comment is compiled but not executed. An example with no text after +"Output:" is compiled, executed, and expected to produce no output. +

    +

    +Godoc displays the body of ExampleXXX to demonstrate the use +of the function, constant, or variable XXX. An example of a method M with +receiver type T or *T is named ExampleT_M. There may be multiple examples +for a given function, constant, or variable, distinguished by a trailing _xxx, +where xxx is a suffix not beginning with an upper case letter. +

    +

    +Here is an example of an example: +

    +
    func ExamplePrintln() {
    +	Println("The output of\nthis example.")
    +	// Output: The output of
    +	// this example.
    +}
    +
    +

    +Here is another example where the ordering of the output is ignored: +

    +
    func ExamplePerm() {
    +	for _, value := range Perm(4) {
    +		fmt.Println(value)
    +	}
    +
    +	// Unordered output: 4
    +	// 2
    +	// 1
    +	// 3
    +	// 0
    +}
    +
    +

    +The entire test file is presented as the example when it contains a single +example function, at least one other function, type, variable, or constant +declaration, and no test or benchmark functions. +

    +

    +See the documentation of the testing package for more information. +

    + + + +
    +
    + + + + + + + + +`)) diff --git a/vendor/golang.org/x/net/http2/h2i/README.md b/vendor/golang.org/x/net/http2/h2i/README.md new file mode 100644 index 0000000..fb5c5ef --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2i/README.md @@ -0,0 +1,97 @@ +# h2i + +**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol' +days of telnetting to your HTTP/1.n servers? We're bringing you +back. + +Features: +- send raw HTTP/2 frames + - PING + - SETTINGS + - HEADERS + - etc +- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2 +- pretty print all received HTTP/2 frames from the peer (including HPACK decoding) +- tab completion of commands, options + +Not yet features, but soon: +- unnecessary CONTINUATION frames on short boundaries, to test peer implementations +- request bodies (DATA frames) +- send invalid frames for testing server implementations (supported by underlying Framer) + +Later: +- act like a server + +## Installation + +``` +$ go get golang.org/x/net/http2/h2i +$ h2i +``` + +## Demo + +``` +$ h2i +Usage: h2i + + -insecure + Whether to skip TLS cert validation + -nextproto string + Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14") + +$ h2i google.com +Connecting to google.com:443 ... +Connected to 74.125.224.41:443 +Negotiated protocol "h2-14" +[FrameHeader SETTINGS len=18] + [MAX_CONCURRENT_STREAMS = 100] + [INITIAL_WINDOW_SIZE = 1048576] + [MAX_FRAME_SIZE = 16384] +[FrameHeader WINDOW_UPDATE len=4] + Window-Increment = 983041 + +h2i> PING h2iSayHI +[FrameHeader PING flags=ACK len=8] + Data = "h2iSayHI" +h2i> headers +(as HTTP/1.1)> GET / HTTP/1.1 +(as HTTP/1.1)> Host: ip.appspot.com +(as HTTP/1.1)> User-Agent: h2i/brad-n-blake +(as HTTP/1.1)> +Opening Stream-ID 1: + :authority = ip.appspot.com + :method = GET + :path = / + :scheme = https + user-agent = h2i/brad-n-blake +[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77] + :status = "200" + alternate-protocol = "443:quic,p=1" + content-length = "15" + content-type = "text/html" + date = "Fri, 01 May 2015 23:06:56 GMT" + server = "Google Frontend" +[FrameHeader DATA flags=END_STREAM stream=1 len=15] + "173.164.155.78\n" +[FrameHeader PING len=8] + Data = "\x00\x00\x00\x00\x00\x00\x00\x00" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader PING flags=ACK len=8] + Data = "h2i_ping" +h2i> ping +[FrameHeader GOAWAY len=22] + Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1) + +ReadFrame: EOF +``` + +## Status + +Quick few hour hack. So much yet to do. Feel free to file issues for +bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/) +and I aren't yet accepting pull requests until things settle down. + diff --git a/vendor/golang.org/x/net/http2/h2i/h2i.go b/vendor/golang.org/x/net/http2/h2i/h2i.go new file mode 100644 index 0000000..62e5752 --- /dev/null +++ b/vendor/golang.org/x/net/http2/h2i/h2i.go @@ -0,0 +1,522 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9,!solaris + +/* +The h2i command is an interactive HTTP/2 console. + +Usage: + $ h2i [flags] + +Interactive commands in the console: (all parts case-insensitive) + + ping [data] + settings ack + settings FOO=n BAR=z + headers (open a new stream by typing HTTP/1.1) +*/ +package main + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "regexp" + "strconv" + "strings" + + "golang.org/x/crypto/ssh/terminal" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +// Flags +var ( + flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.") + flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation") + flagSettings = flag.String("settings", "empty", "comma-separated list of KEY=value settings for the initial SETTINGS frame. The magic value 'empty' sends an empty initial settings frame, and the magic value 'omit' causes no initial settings frame to be sent.") + flagDial = flag.String("dial", "", "optional ip:port to dial, to connect to a host:port but use a different SNI name (including a SNI name without DNS)") +) + +type command struct { + run func(*h2i, []string) error // required + + // complete optionally specifies tokens (case-insensitive) which are + // valid for this subcommand. + complete func() []string +} + +var commands = map[string]command{ + "ping": {run: (*h2i).cmdPing}, + "settings": { + run: (*h2i).cmdSettings, + complete: func() []string { + return []string{ + "ACK", + http2.SettingHeaderTableSize.String(), + http2.SettingEnablePush.String(), + http2.SettingMaxConcurrentStreams.String(), + http2.SettingInitialWindowSize.String(), + http2.SettingMaxFrameSize.String(), + http2.SettingMaxHeaderListSize.String(), + } + }, + }, + "quit": {run: (*h2i).cmdQuit}, + "headers": {run: (*h2i).cmdHeaders}, +} + +func usage() { + fmt.Fprintf(os.Stderr, "Usage: h2i \n\n") + flag.PrintDefaults() +} + +// withPort adds ":443" if another port isn't already present. +func withPort(host string) string { + if _, _, err := net.SplitHostPort(host); err != nil { + return net.JoinHostPort(host, "443") + } + return host +} + +// withoutPort strips the port from addr if present. +func withoutPort(addr string) string { + if h, _, err := net.SplitHostPort(addr); err == nil { + return h + } + return addr +} + +// h2i is the app's state. +type h2i struct { + host string + tc *tls.Conn + framer *http2.Framer + term *terminal.Terminal + + // owned by the command loop: + streamID uint32 + hbuf bytes.Buffer + henc *hpack.Encoder + + // owned by the readFrames loop: + peerSetting map[http2.SettingID]uint32 + hdec *hpack.Decoder +} + +func main() { + flag.Usage = usage + flag.Parse() + if flag.NArg() != 1 { + usage() + os.Exit(2) + } + log.SetFlags(0) + + host := flag.Arg(0) + app := &h2i{ + host: host, + peerSetting: make(map[http2.SettingID]uint32), + } + app.henc = hpack.NewEncoder(&app.hbuf) + + if err := app.Main(); err != nil { + if app.term != nil { + app.logf("%v\n", err) + } else { + fmt.Fprintf(os.Stderr, "%v\n", err) + } + os.Exit(1) + } + fmt.Fprintf(os.Stdout, "\n") +} + +func (app *h2i) Main() error { + cfg := &tls.Config{ + ServerName: withoutPort(app.host), + NextProtos: strings.Split(*flagNextProto, ","), + InsecureSkipVerify: *flagInsecure, + } + + hostAndPort := *flagDial + if hostAndPort == "" { + hostAndPort = withPort(app.host) + } + log.Printf("Connecting to %s ...", hostAndPort) + tc, err := tls.Dial("tcp", hostAndPort, cfg) + if err != nil { + return fmt.Errorf("Error dialing %s: %v", hostAndPort, err) + } + log.Printf("Connected to %v", tc.RemoteAddr()) + defer tc.Close() + + if err := tc.Handshake(); err != nil { + return fmt.Errorf("TLS handshake: %v", err) + } + if !*flagInsecure { + if err := tc.VerifyHostname(app.host); err != nil { + return fmt.Errorf("VerifyHostname: %v", err) + } + } + state := tc.ConnectionState() + log.Printf("Negotiated protocol %q", state.NegotiatedProtocol) + if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" { + return fmt.Errorf("Could not negotiate protocol mutually") + } + + if _, err := io.WriteString(tc, http2.ClientPreface); err != nil { + return err + } + + app.framer = http2.NewFramer(tc, tc) + + oldState, err := terminal.MakeRaw(int(os.Stdin.Fd())) + if err != nil { + return err + } + defer terminal.Restore(0, oldState) + + var screen = struct { + io.Reader + io.Writer + }{os.Stdin, os.Stdout} + + app.term = terminal.NewTerminal(screen, "h2i> ") + lastWord := regexp.MustCompile(`.+\W(\w+)$`) + app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) { + if key != '\t' { + return + } + if pos != len(line) { + // TODO: we're being lazy for now, only supporting tab completion at the end. + return + } + // Auto-complete for the command itself. + if !strings.Contains(line, " ") { + var name string + name, _, ok = lookupCommand(line) + if !ok { + return + } + return name, len(name), true + } + _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')]) + if !ok || c.complete == nil { + return + } + if strings.HasSuffix(line, " ") { + app.logf("%s", strings.Join(c.complete(), " ")) + return line, pos, true + } + m := lastWord.FindStringSubmatch(line) + if m == nil { + return line, len(line), true + } + soFar := m[1] + var match []string + for _, cand := range c.complete() { + if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) { + continue + } + match = append(match, cand) + } + if len(match) == 0 { + return + } + if len(match) > 1 { + // TODO: auto-complete any common prefix + app.logf("%s", strings.Join(match, " ")) + return line, pos, true + } + newLine = line[:len(line)-len(soFar)] + match[0] + return newLine, len(newLine), true + + } + + errc := make(chan error, 2) + go func() { errc <- app.readFrames() }() + go func() { errc <- app.readConsole() }() + return <-errc +} + +func (app *h2i) logf(format string, args ...interface{}) { + fmt.Fprintf(app.term, format+"\r\n", args...) +} + +func (app *h2i) readConsole() error { + if s := *flagSettings; s != "omit" { + var args []string + if s != "empty" { + args = strings.Split(s, ",") + } + _, c, ok := lookupCommand("settings") + if !ok { + panic("settings command not found") + } + c.run(app, args) + } + + for { + line, err := app.term.ReadLine() + if err == io.EOF { + return nil + } + if err != nil { + return fmt.Errorf("terminal.ReadLine: %v", err) + } + f := strings.Fields(line) + if len(f) == 0 { + continue + } + cmd, args := f[0], f[1:] + if _, c, ok := lookupCommand(cmd); ok { + err = c.run(app, args) + } else { + app.logf("Unknown command %q", line) + } + if err == errExitApp { + return nil + } + if err != nil { + return err + } + } +} + +func lookupCommand(prefix string) (name string, c command, ok bool) { + prefix = strings.ToLower(prefix) + if c, ok = commands[prefix]; ok { + return prefix, c, ok + } + + for full, candidate := range commands { + if strings.HasPrefix(full, prefix) { + if c.run != nil { + return "", command{}, false // ambiguous + } + c = candidate + name = full + } + } + return name, c, c.run != nil +} + +var errExitApp = errors.New("internal sentinel error value to quit the console reading loop") + +func (a *h2i) cmdQuit(args []string) error { + if len(args) > 0 { + a.logf("the QUIT command takes no argument") + return nil + } + return errExitApp +} + +func (a *h2i) cmdSettings(args []string) error { + if len(args) == 1 && strings.EqualFold(args[0], "ACK") { + return a.framer.WriteSettingsAck() + } + var settings []http2.Setting + for _, arg := range args { + if strings.EqualFold(arg, "ACK") { + a.logf("Error: ACK must be only argument with the SETTINGS command") + return nil + } + eq := strings.Index(arg, "=") + if eq == -1 { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + sid, ok := settingByName(arg[:eq]) + if !ok { + a.logf("Error: unknown setting name %q", arg[:eq]) + return nil + } + val, err := strconv.ParseUint(arg[eq+1:], 10, 32) + if err != nil { + a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg) + return nil + } + settings = append(settings, http2.Setting{ + ID: sid, + Val: uint32(val), + }) + } + a.logf("Sending: %v", settings) + return a.framer.WriteSettings(settings...) +} + +func settingByName(name string) (http2.SettingID, bool) { + for _, sid := range [...]http2.SettingID{ + http2.SettingHeaderTableSize, + http2.SettingEnablePush, + http2.SettingMaxConcurrentStreams, + http2.SettingInitialWindowSize, + http2.SettingMaxFrameSize, + http2.SettingMaxHeaderListSize, + } { + if strings.EqualFold(sid.String(), name) { + return sid, true + } + } + return 0, false +} + +func (app *h2i) cmdPing(args []string) error { + if len(args) > 1 { + app.logf("invalid PING usage: only accepts 0 or 1 args") + return nil // nil means don't end the program + } + var data [8]byte + if len(args) == 1 { + copy(data[:], args[0]) + } else { + copy(data[:], "h2i_ping") + } + return app.framer.WritePing(false, data) +} + +func (app *h2i) cmdHeaders(args []string) error { + if len(args) > 0 { + app.logf("Error: HEADERS doesn't yet take arguments.") + // TODO: flags for restricting window size, to force CONTINUATION + // frames. + return nil + } + var h1req bytes.Buffer + app.term.SetPrompt("(as HTTP/1.1)> ") + defer app.term.SetPrompt("h2i> ") + for { + line, err := app.term.ReadLine() + if err != nil { + return err + } + h1req.WriteString(line) + h1req.WriteString("\r\n") + if line == "" { + break + } + } + req, err := http.ReadRequest(bufio.NewReader(&h1req)) + if err != nil { + app.logf("Invalid HTTP/1.1 request: %v", err) + return nil + } + if app.streamID == 0 { + app.streamID = 1 + } else { + app.streamID += 2 + } + app.logf("Opening Stream-ID %d:", app.streamID) + hbf := app.encodeHeaders(req) + if len(hbf) > 16<<10 { + app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go") + return nil + } + return app.framer.WriteHeaders(http2.HeadersFrameParam{ + StreamID: app.streamID, + BlockFragment: hbf, + EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now + EndHeaders: true, // for now + }) +} + +func (app *h2i) readFrames() error { + for { + f, err := app.framer.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame: %v", err) + } + app.logf("%v", f) + switch f := f.(type) { + case *http2.PingFrame: + app.logf(" Data = %q", f.Data) + case *http2.SettingsFrame: + f.ForeachSetting(func(s http2.Setting) error { + app.logf(" %v", s) + app.peerSetting[s.ID] = s.Val + return nil + }) + case *http2.WindowUpdateFrame: + app.logf(" Window-Increment = %v", f.Increment) + case *http2.GoAwayFrame: + app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)", f.LastStreamID, f.ErrCode, f.ErrCode) + case *http2.DataFrame: + app.logf(" %q", f.Data()) + case *http2.HeadersFrame: + if f.HasPriority() { + app.logf(" PRIORITY = %v", f.Priority) + } + if app.hdec == nil { + // TODO: if the user uses h2i to send a SETTINGS frame advertising + // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE + // and stuff here instead of using the 4k default. But for now: + tableSize := uint32(4 << 10) + app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) + } + app.hdec.Write(f.HeaderBlockFragment()) + case *http2.PushPromiseFrame: + if app.hdec == nil { + // TODO: if the user uses h2i to send a SETTINGS frame advertising + // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE + // and stuff here instead of using the 4k default. But for now: + tableSize := uint32(4 << 10) + app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField) + } + app.hdec.Write(f.HeaderBlockFragment()) + } + } +} + +// called from readLoop +func (app *h2i) onNewHeaderField(f hpack.HeaderField) { + if f.Sensitive { + app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value) + } + app.logf(" %s = %q", f.Name, f.Value) +} + +func (app *h2i) encodeHeaders(req *http.Request) []byte { + app.hbuf.Reset() + + // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go + host := req.Host + if host == "" { + host = req.URL.Host + } + + path := req.RequestURI + if path == "" { + path = "/" + } + + app.writeHeader(":authority", host) // probably not right for all sites + app.writeHeader(":method", req.Method) + app.writeHeader(":path", path) + app.writeHeader(":scheme", "https") + + for k, vv := range req.Header { + lowKey := strings.ToLower(k) + if lowKey == "host" { + continue + } + for _, v := range vv { + app.writeHeader(lowKey, v) + } + } + return app.hbuf.Bytes() +} + +func (app *h2i) writeHeader(name, value string) { + app.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) + app.logf(" %s = %s", name, value) +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 0000000..c2805f6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,78 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" +) + +var ( + commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case + commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case +) + +func init() { + for _, v := range []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 0000000..54726c2 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,240 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.table.init() + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true + } + + j, nameValueMatch := e.dynTab.table.search(f) + if nameValueMatch || (i == 0 && j != 0) { + return j + uint64(staticTable.len()), nameValueMatch + } + + return i, false +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode_test.go b/vendor/golang.org/x/net/http2/hpack/encode_test.go new file mode 100644 index 0000000..05f12db --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode_test.go @@ -0,0 +1,386 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "encoding/hex" + "fmt" + "math/rand" + "reflect" + "strings" + "testing" +) + +func TestEncoderTableSizeUpdate(t *testing.T) { + tests := []struct { + size1, size2 uint32 + wantHex string + }{ + // Should emit 2 table size updates (2048 and 4096) + {2048, 4096, "3fe10f 3fe11f 82"}, + + // Should emit 1 table size update (2048) + {16384, 2048, "3fe10f 82"}, + } + for _, tt := range tests { + var buf bytes.Buffer + e := NewEncoder(&buf) + e.SetMaxDynamicTableSize(tt.size1) + e.SetMaxDynamicTableSize(tt.size2) + if err := e.WriteField(pair(":method", "GET")); err != nil { + t.Fatal(err) + } + want := removeSpace(tt.wantHex) + if got := hex.EncodeToString(buf.Bytes()); got != want { + t.Errorf("e.SetDynamicTableSize %v, %v = %q; want %q", tt.size1, tt.size2, got, want) + } + } +} + +func TestEncoderWriteField(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + var got []HeaderField + d := NewDecoder(4<<10, func(f HeaderField) { + got = append(got, f) + }) + + tests := []struct { + hdrs []HeaderField + }{ + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }}, + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }}, + {[]HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }}, + } + for i, tt := range tests { + buf.Reset() + got = got[:0] + for _, hf := range tt.hdrs { + if err := e.WriteField(hf); err != nil { + t.Fatal(err) + } + } + _, err := d.Write(buf.Bytes()) + if err != nil { + t.Errorf("%d. Decoder Write = %v", i, err) + } + if !reflect.DeepEqual(got, tt.hdrs) { + t.Errorf("%d. Decoded %+v; want %+v", i, got, tt.hdrs) + } + } +} + +func TestEncoderSearchTable(t *testing.T) { + e := NewEncoder(nil) + + e.dynTab.add(pair("foo", "bar")) + e.dynTab.add(pair("blake", "miz")) + e.dynTab.add(pair(":method", "GET")) + + tests := []struct { + hf HeaderField + wantI uint64 + wantMatch bool + }{ + // Name and Value match + {pair("foo", "bar"), uint64(staticTable.len()) + 3, true}, + {pair("blake", "miz"), uint64(staticTable.len()) + 2, true}, + {pair(":method", "GET"), 2, true}, + + // Only name match because Sensitive == true. This is allowed to match + // any ":method" entry. The current implementation uses the last entry + // added in newStaticTable. + {HeaderField{":method", "GET", true}, 3, false}, + + // Only Name matches + {pair("foo", "..."), uint64(staticTable.len()) + 3, false}, + {pair("blake", "..."), uint64(staticTable.len()) + 2, false}, + // As before, this is allowed to match any ":method" entry. + {pair(":method", "..."), 3, false}, + + // None match + {pair("foo-", "bar"), 0, false}, + } + for _, tt := range tests { + if gotI, gotMatch := e.searchTable(tt.hf); gotI != tt.wantI || gotMatch != tt.wantMatch { + t.Errorf("d.search(%+v) = %v, %v; want %v, %v", tt.hf, gotI, gotMatch, tt.wantI, tt.wantMatch) + } + } +} + +func TestAppendVarInt(t *testing.T) { + tests := []struct { + n byte + i uint64 + want []byte + }{ + // Fits in a byte: + {1, 0, []byte{0}}, + {2, 2, []byte{2}}, + {3, 6, []byte{6}}, + {4, 14, []byte{14}}, + {5, 30, []byte{30}}, + {6, 62, []byte{62}}, + {7, 126, []byte{126}}, + {8, 254, []byte{254}}, + + // Multiple bytes: + {5, 1337, []byte{31, 154, 10}}, + } + for _, tt := range tests { + got := appendVarInt(nil, tt.n, tt.i) + if !bytes.Equal(got, tt.want) { + t.Errorf("appendVarInt(nil, %v, %v) = %v; want %v", tt.n, tt.i, got, tt.want) + } + } +} + +func TestAppendHpackString(t *testing.T) { + tests := []struct { + s, wantHex string + }{ + // Huffman encoded + {"www.example.com", "8c f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, + + // Not Huffman encoded + {"a", "01 61"}, + + // zero length + {"", "00"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendHpackString(nil, tt.s) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendHpackString(nil, %q) = %q; want %q", tt.s, got, want) + } + } +} + +func TestAppendIndexed(t *testing.T) { + tests := []struct { + i uint64 + wantHex string + }{ + // 1 byte + {1, "81"}, + {126, "fe"}, + + // 2 bytes + {127, "ff00"}, + {128, "ff01"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendIndexed(nil, tt.i) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendIndex(nil, %v) = %q; want %q", tt.i, got, want) + } + } +} + +func TestAppendNewName(t *testing.T) { + tests := []struct { + f HeaderField + indexing bool + wantHex string + }{ + // Incremental indexing + {HeaderField{"custom-key", "custom-value", false}, true, "40 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + + // Without indexing + {HeaderField{"custom-key", "custom-value", false}, false, "00 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + + // Never indexed + {HeaderField{"custom-key", "custom-value", true}, true, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + {HeaderField{"custom-key", "custom-value", true}, false, "10 88 25a8 49e9 5ba9 7d7f 89 25a8 49e9 5bb8 e8b4 bf"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendNewName(nil, tt.f, tt.indexing) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendNewName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) + } + } +} + +func TestAppendIndexedName(t *testing.T) { + tests := []struct { + f HeaderField + i uint64 + indexing bool + wantHex string + }{ + // Incremental indexing + {HeaderField{":status", "302", false}, 8, true, "48 82 6402"}, + + // Without indexing + {HeaderField{":status", "302", false}, 8, false, "08 82 6402"}, + + // Never indexed + {HeaderField{":status", "302", true}, 8, true, "18 82 6402"}, + {HeaderField{":status", "302", true}, 8, false, "18 82 6402"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendIndexedName(nil, tt.f, tt.i, tt.indexing) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendIndexedName(nil, %+v, %v) = %q; want %q", tt.f, tt.indexing, got, want) + } + } +} + +func TestAppendTableSize(t *testing.T) { + tests := []struct { + i uint32 + wantHex string + }{ + // Fits into 1 byte + {30, "3e"}, + + // Extra byte + {31, "3f00"}, + {32, "3f01"}, + } + for _, tt := range tests { + want := removeSpace(tt.wantHex) + buf := appendTableSize(nil, tt.i) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("appendTableSize(nil, %v) = %q; want %q", tt.i, got, want) + } + } +} + +func TestEncoderSetMaxDynamicTableSize(t *testing.T) { + var buf bytes.Buffer + e := NewEncoder(&buf) + tests := []struct { + v uint32 + wantUpdate bool + wantMinSize uint32 + wantMaxSize uint32 + }{ + // Set new table size to 2048 + {2048, true, 2048, 2048}, + + // Set new table size to 16384, but still limited to + // 4096 + {16384, true, 2048, 4096}, + } + for _, tt := range tests { + e.SetMaxDynamicTableSize(tt.v) + if got := e.tableSizeUpdate; tt.wantUpdate != got { + t.Errorf("e.tableSizeUpdate = %v; want %v", got, tt.wantUpdate) + } + if got := e.minSize; tt.wantMinSize != got { + t.Errorf("e.minSize = %v; want %v", got, tt.wantMinSize) + } + if got := e.dynTab.maxSize; tt.wantMaxSize != got { + t.Errorf("e.maxSize = %v; want %v", got, tt.wantMaxSize) + } + } +} + +func TestEncoderSetMaxDynamicTableSizeLimit(t *testing.T) { + e := NewEncoder(nil) + // 4095 < initialHeaderTableSize means maxSize is truncated to + // 4095. + e.SetMaxDynamicTableSizeLimit(4095) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + if got, want := e.maxSizeLimit, uint32(4095); got != want { + t.Errorf("e.maxSizeLimit = %v; want %v", got, want) + } + if got, want := e.tableSizeUpdate, true; got != want { + t.Errorf("e.tableSizeUpdate = %v; want %v", got, want) + } + // maxSize will be truncated to maxSizeLimit + e.SetMaxDynamicTableSize(16384) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + // 8192 > current maxSizeLimit, so maxSize does not change. + e.SetMaxDynamicTableSizeLimit(8192) + if got, want := e.dynTab.maxSize, uint32(4095); got != want { + t.Errorf("e.dynTab.maxSize = %v; want %v", got, want) + } + if got, want := e.maxSizeLimit, uint32(8192); got != want { + t.Errorf("e.maxSizeLimit = %v; want %v", got, want) + } +} + +func removeSpace(s string) string { + return strings.Replace(s, " ", "", -1) +} + +func BenchmarkEncoderSearchTable(b *testing.B) { + e := NewEncoder(nil) + + // A sample of possible header fields. + // This is not based on any actual data from HTTP/2 traces. + var possible []HeaderField + for _, f := range staticTable.ents { + if f.Value == "" { + possible = append(possible, f) + continue + } + // Generate 5 random values, except for cookie and set-cookie, + // which we know can have many values in practice. + num := 5 + if f.Name == "cookie" || f.Name == "set-cookie" { + num = 25 + } + for i := 0; i < num; i++ { + f.Value = fmt.Sprintf("%s-%d", f.Name, i) + possible = append(possible, f) + } + } + for k := 0; k < 10; k++ { + f := HeaderField{ + Name: fmt.Sprintf("x-header-%d", k), + Sensitive: rand.Int()%2 == 0, + } + for i := 0; i < 5; i++ { + f.Value = fmt.Sprintf("%s-%d", f.Name, i) + possible = append(possible, f) + } + } + + // Add a random sample to the dynamic table. This very loosely simulates + // a history of 100 requests with 20 header fields per request. + for r := 0; r < 100*20; r++ { + f := possible[rand.Int31n(int32(len(possible)))] + // Skip if this is in the staticTable verbatim. + if _, has := staticTable.search(f); !has { + e.dynTab.add(f) + } + } + + b.ResetTimer() + for n := 0; n < b.N; n++ { + for _, f := range possible { + e.searchTable(f) + } + } +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 0000000..176644a --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,490 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7541 section 4.1. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + } + d.dynTab.table.init() + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + table headerFieldTable + size uint32 // in bytes + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +func (dt *dynamicTable) add(f HeaderField) { + dt.table.addEntry(f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff. +func (dt *dynamicTable) evict() { + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ + } + dt.table.evictOldest(n) +} + +func (d *Decoder) maxTableIndex() int { + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + // See Section 2.3.3. + if i == 0 { + return + } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } + if i > uint64(d.maxTableIndex()) { + return + } + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack_test.go b/vendor/golang.org/x/net/http2/hpack/hpack_test.go new file mode 100644 index 0000000..bc7f476 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack_test.go @@ -0,0 +1,722 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "encoding/hex" + "fmt" + "math/rand" + "reflect" + "strings" + "testing" + "time" +) + +func (d *Decoder) mustAt(idx int) HeaderField { + if hf, ok := d.at(uint64(idx)); !ok { + panic(fmt.Sprintf("bogus index %d", idx)) + } else { + return hf + } +} + +func TestDynamicTableAt(t *testing.T) { + d := NewDecoder(4096, nil) + at := d.mustAt + if got, want := at(2), (pair(":method", "GET")); got != want { + t.Errorf("at(2) = %v; want %v", got, want) + } + d.dynTab.add(pair("foo", "bar")) + d.dynTab.add(pair("blake", "miz")) + if got, want := at(staticTable.len()+1), (pair("blake", "miz")); got != want { + t.Errorf("at(dyn 1) = %v; want %v", got, want) + } + if got, want := at(staticTable.len()+2), (pair("foo", "bar")); got != want { + t.Errorf("at(dyn 2) = %v; want %v", got, want) + } + if got, want := at(3), (pair(":method", "POST")); got != want { + t.Errorf("at(3) = %v; want %v", got, want) + } +} + +func TestDynamicTableSizeEvict(t *testing.T) { + d := NewDecoder(4096, nil) + if want := uint32(0); d.dynTab.size != want { + t.Fatalf("size = %d; want %d", d.dynTab.size, want) + } + add := d.dynTab.add + add(pair("blake", "eats pizza")) + if want := uint32(15 + 32); d.dynTab.size != want { + t.Fatalf("after pizza, size = %d; want %d", d.dynTab.size, want) + } + add(pair("foo", "bar")) + if want := uint32(15 + 32 + 6 + 32); d.dynTab.size != want { + t.Fatalf("after foo bar, size = %d; want %d", d.dynTab.size, want) + } + d.dynTab.setMaxSize(15 + 32 + 1 /* slop */) + if want := uint32(6 + 32); d.dynTab.size != want { + t.Fatalf("after setMaxSize, size = %d; want %d", d.dynTab.size, want) + } + if got, want := d.mustAt(staticTable.len()+1), (pair("foo", "bar")); got != want { + t.Errorf("at(dyn 1) = %v; want %v", got, want) + } + add(pair("long", strings.Repeat("x", 500))) + if want := uint32(0); d.dynTab.size != want { + t.Fatalf("after big one, size = %d; want %d", d.dynTab.size, want) + } +} + +func TestDecoderDecode(t *testing.T) { + tests := []struct { + name string + in []byte + want []HeaderField + wantDynTab []HeaderField // newest entry first + }{ + // C.2.1 Literal Header Field with Indexing + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.1 + {"C.2.1", dehex("400a 6375 7374 6f6d 2d6b 6579 0d63 7573 746f 6d2d 6865 6164 6572"), + []HeaderField{pair("custom-key", "custom-header")}, + []HeaderField{pair("custom-key", "custom-header")}, + }, + + // C.2.2 Literal Header Field without Indexing + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.2 + {"C.2.2", dehex("040c 2f73 616d 706c 652f 7061 7468"), + []HeaderField{pair(":path", "/sample/path")}, + []HeaderField{}}, + + // C.2.3 Literal Header Field never Indexed + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.3 + {"C.2.3", dehex("1008 7061 7373 776f 7264 0673 6563 7265 74"), + []HeaderField{{"password", "secret", true}}, + []HeaderField{}}, + + // C.2.4 Indexed Header Field + // http://http2.github.io/http2-spec/compression.html#rfc.section.C.2.4 + {"C.2.4", []byte("\x82"), + []HeaderField{pair(":method", "GET")}, + []HeaderField{}}, + } + for _, tt := range tests { + d := NewDecoder(4096, nil) + hf, err := d.DecodeFull(tt.in) + if err != nil { + t.Errorf("%s: %v", tt.name, err) + continue + } + if !reflect.DeepEqual(hf, tt.want) { + t.Errorf("%s: Got %v; want %v", tt.name, hf, tt.want) + } + gotDynTab := d.dynTab.reverseCopy() + if !reflect.DeepEqual(gotDynTab, tt.wantDynTab) { + t.Errorf("%s: dynamic table after = %v; want %v", tt.name, gotDynTab, tt.wantDynTab) + } + } +} + +func (dt *dynamicTable) reverseCopy() (hf []HeaderField) { + hf = make([]HeaderField, len(dt.table.ents)) + for i := range hf { + hf[i] = dt.table.ents[len(dt.table.ents)-1-i] + } + return +} + +type encAndWant struct { + enc []byte + want []HeaderField + wantDynTab []HeaderField + wantDynSize uint32 +} + +// C.3 Request Examples without Huffman Coding +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.3 +func TestDecodeC3_NoHuffman(t *testing.T) { + testDecodeSeries(t, 4096, []encAndWant{ + {dehex("8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }, + []HeaderField{ + pair(":authority", "www.example.com"), + }, + 57, + }, + {dehex("8286 84be 5808 6e6f 2d63 6163 6865"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }, + []HeaderField{ + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 110, + }, + {dehex("8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }, + []HeaderField{ + pair("custom-key", "custom-value"), + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 164, + }, + }) +} + +// C.4 Request Examples with Huffman Coding +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.4 +func TestDecodeC4_Huffman(t *testing.T) { + testDecodeSeries(t, 4096, []encAndWant{ + {dehex("8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + }, + []HeaderField{ + pair(":authority", "www.example.com"), + }, + 57, + }, + {dehex("8286 84be 5886 a8eb 1064 9cbf"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "http"), + pair(":path", "/"), + pair(":authority", "www.example.com"), + pair("cache-control", "no-cache"), + }, + []HeaderField{ + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 110, + }, + {dehex("8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf"), + []HeaderField{ + pair(":method", "GET"), + pair(":scheme", "https"), + pair(":path", "/index.html"), + pair(":authority", "www.example.com"), + pair("custom-key", "custom-value"), + }, + []HeaderField{ + pair("custom-key", "custom-value"), + pair("cache-control", "no-cache"), + pair(":authority", "www.example.com"), + }, + 164, + }, + }) +} + +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.5 +// "This section shows several consecutive header lists, corresponding +// to HTTP responses, on the same connection. The HTTP/2 setting +// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 +// octets, causing some evictions to occur." +func TestDecodeC5_ResponsesNoHuff(t *testing.T) { + testDecodeSeries(t, 256, []encAndWant{ + {dehex(` +4803 3330 3258 0770 7269 7661 7465 611d +4d6f 6e2c 2032 3120 4f63 7420 3230 3133 +2032 303a 3133 3a32 3120 474d 546e 1768 +7474 7073 3a2f 2f77 7777 2e65 7861 6d70 +6c65 2e63 6f6d +`), + []HeaderField{ + pair(":status", "302"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + pair(":status", "302"), + }, + 222, + }, + {dehex("4803 3330 37c1 c0bf"), + []HeaderField{ + pair(":status", "307"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair(":status", "307"), + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + }, + 222, + }, + {dehex(` +88c1 611d 4d6f 6e2c 2032 3120 4f63 7420 +3230 3133 2032 303a 3133 3a32 3220 474d +54c0 5a04 677a 6970 7738 666f 6f3d 4153 +444a 4b48 514b 425a 584f 5157 454f 5049 +5541 5851 5745 4f49 553b 206d 6178 2d61 +6765 3d33 3630 303b 2076 6572 7369 6f6e +3d31 +`), + []HeaderField{ + pair(":status", "200"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + pair("location", "https://www.example.com"), + pair("content-encoding", "gzip"), + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + }, + []HeaderField{ + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + pair("content-encoding", "gzip"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + }, + 215, + }, + }) +} + +// http://http2.github.io/http2-spec/compression.html#rfc.section.C.6 +// "This section shows the same examples as the previous section, but +// using Huffman encoding for the literal values. The HTTP/2 setting +// parameter SETTINGS_HEADER_TABLE_SIZE is set to the value of 256 +// octets, causing some evictions to occur. The eviction mechanism +// uses the length of the decoded literal values, so the same +// evictions occurs as in the previous section." +func TestDecodeC6_ResponsesHuffman(t *testing.T) { + testDecodeSeries(t, 256, []encAndWant{ + {dehex(` +4882 6402 5885 aec3 771a 4b61 96d0 7abe +9410 54d4 44a8 2005 9504 0b81 66e0 82a6 +2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8 +e9ae 82ae 43d3 +`), + []HeaderField{ + pair(":status", "302"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + pair(":status", "302"), + }, + 222, + }, + {dehex("4883 640e ffc1 c0bf"), + []HeaderField{ + pair(":status", "307"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("location", "https://www.example.com"), + }, + []HeaderField{ + pair(":status", "307"), + pair("location", "https://www.example.com"), + pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"), + pair("cache-control", "private"), + }, + 222, + }, + {dehex(` +88c1 6196 d07a be94 1054 d444 a820 0595 +040b 8166 e084 a62d 1bff c05a 839b d9ab +77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b +3960 d5af 2708 7f36 72c1 ab27 0fb5 291f +9587 3160 65c0 03ed 4ee5 b106 3d50 07 +`), + []HeaderField{ + pair(":status", "200"), + pair("cache-control", "private"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + pair("location", "https://www.example.com"), + pair("content-encoding", "gzip"), + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + }, + []HeaderField{ + pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"), + pair("content-encoding", "gzip"), + pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"), + }, + 215, + }, + }) +} + +func testDecodeSeries(t *testing.T, size uint32, steps []encAndWant) { + d := NewDecoder(size, nil) + for i, step := range steps { + hf, err := d.DecodeFull(step.enc) + if err != nil { + t.Fatalf("Error at step index %d: %v", i, err) + } + if !reflect.DeepEqual(hf, step.want) { + t.Fatalf("At step index %d: Got headers %v; want %v", i, hf, step.want) + } + gotDynTab := d.dynTab.reverseCopy() + if !reflect.DeepEqual(gotDynTab, step.wantDynTab) { + t.Errorf("After step index %d, dynamic table = %v; want %v", i, gotDynTab, step.wantDynTab) + } + if d.dynTab.size != step.wantDynSize { + t.Errorf("After step index %d, dynamic table size = %v; want %v", i, d.dynTab.size, step.wantDynSize) + } + } +} + +func TestHuffmanDecodeExcessPadding(t *testing.T) { + tests := [][]byte{ + {0xff}, // Padding Exceeds 7 bits + {0x1f, 0xff}, // {"a", 1 byte excess padding} + {0x1f, 0xff, 0xff}, // {"a", 2 byte excess padding} + {0x1f, 0xff, 0xff, 0xff}, // {"a", 3 byte excess padding} + {0xff, 0x9f, 0xff, 0xff, 0xff}, // {"a", 29 bit excess padding} + {'R', 0xbc, '0', 0xff, 0xff, 0xff, 0xff}, // Padding ends on partial symbol. + } + for i, in := range tests { + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("test-%d: decode(%q) = %v; want ErrInvalidHuffman", i, in, err) + } + } +} + +func TestHuffmanDecodeEOS(t *testing.T) { + in := []byte{0xff, 0xff, 0xff, 0xff, 0xfc} // {EOS, "?"} + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("error = %v; want ErrInvalidHuffman", err) + } +} + +func TestHuffmanDecodeMaxLengthOnTrailingByte(t *testing.T) { + in := []byte{0x00, 0x01} // {"0", "0", "0"} + var buf bytes.Buffer + if err := huffmanDecode(&buf, 2, in); err != ErrStringLength { + t.Errorf("error = %v; want ErrStringLength", err) + } +} + +func TestHuffmanDecodeCorruptPadding(t *testing.T) { + in := []byte{0x00} + var buf bytes.Buffer + if _, err := HuffmanDecode(&buf, in); err != ErrInvalidHuffman { + t.Errorf("error = %v; want ErrInvalidHuffman", err) + } +} + +func TestHuffmanDecode(t *testing.T) { + tests := []struct { + inHex, want string + }{ + {"f1e3 c2e5 f23a 6ba0 ab90 f4ff", "www.example.com"}, + {"a8eb 1064 9cbf", "no-cache"}, + {"25a8 49e9 5ba9 7d7f", "custom-key"}, + {"25a8 49e9 5bb8 e8b4 bf", "custom-value"}, + {"6402", "302"}, + {"aec3 771a 4b", "private"}, + {"d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff", "Mon, 21 Oct 2013 20:13:21 GMT"}, + {"9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3", "https://www.example.com"}, + {"9bd9 ab", "gzip"}, + {"94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07", + "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1"}, + } + for i, tt := range tests { + var buf bytes.Buffer + in, err := hex.DecodeString(strings.Replace(tt.inHex, " ", "", -1)) + if err != nil { + t.Errorf("%d. hex input error: %v", i, err) + continue + } + if _, err := HuffmanDecode(&buf, in); err != nil { + t.Errorf("%d. decode error: %v", i, err) + continue + } + if got := buf.String(); tt.want != got { + t.Errorf("%d. decode = %q; want %q", i, got, tt.want) + } + } +} + +func TestAppendHuffmanString(t *testing.T) { + tests := []struct { + in, want string + }{ + {"www.example.com", "f1e3 c2e5 f23a 6ba0 ab90 f4ff"}, + {"no-cache", "a8eb 1064 9cbf"}, + {"custom-key", "25a8 49e9 5ba9 7d7f"}, + {"custom-value", "25a8 49e9 5bb8 e8b4 bf"}, + {"302", "6402"}, + {"private", "aec3 771a 4b"}, + {"Mon, 21 Oct 2013 20:13:21 GMT", "d07a be94 1054 d444 a820 0595 040b 8166 e082 a62d 1bff"}, + {"https://www.example.com", "9d29 ad17 1863 c78f 0b97 c8e9 ae82 ae43 d3"}, + {"gzip", "9bd9 ab"}, + {"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", + "94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07"}, + } + for i, tt := range tests { + buf := []byte{} + want := strings.Replace(tt.want, " ", "", -1) + buf = AppendHuffmanString(buf, tt.in) + if got := hex.EncodeToString(buf); want != got { + t.Errorf("%d. encode = %q; want %q", i, got, want) + } + } +} + +func TestHuffmanMaxStrLen(t *testing.T) { + const msg = "Some string" + huff := AppendHuffmanString(nil, msg) + + testGood := func(max int) { + var out bytes.Buffer + if err := huffmanDecode(&out, max, huff); err != nil { + t.Errorf("For maxLen=%d, unexpected error: %v", max, err) + } + if out.String() != msg { + t.Errorf("For maxLen=%d, out = %q; want %q", max, out.String(), msg) + } + } + testGood(0) + testGood(len(msg)) + testGood(len(msg) + 1) + + var out bytes.Buffer + if err := huffmanDecode(&out, len(msg)-1, huff); err != ErrStringLength { + t.Errorf("err = %v; want ErrStringLength", err) + } +} + +func TestHuffmanRoundtripStress(t *testing.T) { + const Len = 50 // of uncompressed string + input := make([]byte, Len) + var output bytes.Buffer + var huff []byte + + n := 5000 + if testing.Short() { + n = 100 + } + seed := time.Now().UnixNano() + t.Logf("Seed = %v", seed) + src := rand.New(rand.NewSource(seed)) + var encSize int64 + for i := 0; i < n; i++ { + for l := range input { + input[l] = byte(src.Intn(256)) + } + huff = AppendHuffmanString(huff[:0], string(input)) + encSize += int64(len(huff)) + output.Reset() + if err := huffmanDecode(&output, 0, huff); err != nil { + t.Errorf("Failed to decode %q -> %q -> error %v", input, huff, err) + continue + } + if !bytes.Equal(output.Bytes(), input) { + t.Errorf("Roundtrip failure on %q -> %q -> %q", input, huff, output.Bytes()) + } + } + t.Logf("Compressed size of original: %0.02f%% (%v -> %v)", 100*(float64(encSize)/(Len*float64(n))), Len*n, encSize) +} + +func TestHuffmanDecodeFuzz(t *testing.T) { + const Len = 50 // of compressed + var buf, zbuf bytes.Buffer + + n := 5000 + if testing.Short() { + n = 100 + } + seed := time.Now().UnixNano() + t.Logf("Seed = %v", seed) + src := rand.New(rand.NewSource(seed)) + numFail := 0 + for i := 0; i < n; i++ { + zbuf.Reset() + if i == 0 { + // Start with at least one invalid one. + zbuf.WriteString("00\x91\xff\xff\xff\xff\xc8") + } else { + for l := 0; l < Len; l++ { + zbuf.WriteByte(byte(src.Intn(256))) + } + } + + buf.Reset() + if err := huffmanDecode(&buf, 0, zbuf.Bytes()); err != nil { + if err == ErrInvalidHuffman { + numFail++ + continue + } + t.Errorf("Failed to decode %q: %v", zbuf.Bytes(), err) + continue + } + } + t.Logf("%0.02f%% are invalid (%d / %d)", 100*float64(numFail)/float64(n), numFail, n) + if numFail < 1 { + t.Error("expected at least one invalid huffman encoding (test starts with one)") + } +} + +func TestReadVarInt(t *testing.T) { + type res struct { + i uint64 + consumed int + err error + } + tests := []struct { + n byte + p []byte + want res + }{ + // Fits in a byte: + {1, []byte{0}, res{0, 1, nil}}, + {2, []byte{2}, res{2, 1, nil}}, + {3, []byte{6}, res{6, 1, nil}}, + {4, []byte{14}, res{14, 1, nil}}, + {5, []byte{30}, res{30, 1, nil}}, + {6, []byte{62}, res{62, 1, nil}}, + {7, []byte{126}, res{126, 1, nil}}, + {8, []byte{254}, res{254, 1, nil}}, + + // Doesn't fit in a byte: + {1, []byte{1}, res{0, 0, errNeedMore}}, + {2, []byte{3}, res{0, 0, errNeedMore}}, + {3, []byte{7}, res{0, 0, errNeedMore}}, + {4, []byte{15}, res{0, 0, errNeedMore}}, + {5, []byte{31}, res{0, 0, errNeedMore}}, + {6, []byte{63}, res{0, 0, errNeedMore}}, + {7, []byte{127}, res{0, 0, errNeedMore}}, + {8, []byte{255}, res{0, 0, errNeedMore}}, + + // Ignoring top bits: + {5, []byte{255, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 111 + {5, []byte{159, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 100 + {5, []byte{191, 154, 10}, res{1337, 3, nil}}, // high dummy three bits: 101 + + // Extra byte: + {5, []byte{191, 154, 10, 2}, res{1337, 3, nil}}, // extra byte + + // Short a byte: + {5, []byte{191, 154}, res{0, 0, errNeedMore}}, + + // integer overflow: + {1, []byte{255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, res{0, 0, errVarintOverflow}}, + } + for _, tt := range tests { + i, remain, err := readVarInt(tt.n, tt.p) + consumed := len(tt.p) - len(remain) + got := res{i, consumed, err} + if got != tt.want { + t.Errorf("readVarInt(%d, %v ~ %x) = %+v; want %+v", tt.n, tt.p, tt.p, got, tt.want) + } + } +} + +// Fuzz crash, originally reported at https://github.com/bradfitz/http2/issues/56 +func TestHuffmanFuzzCrash(t *testing.T) { + got, err := HuffmanDecodeToString([]byte("00\x91\xff\xff\xff\xff\xc8")) + if got != "" { + t.Errorf("Got %q; want empty string", got) + } + if err != ErrInvalidHuffman { + t.Errorf("Err = %v; want ErrInvalidHuffman", err) + } +} + +func pair(name, value string) HeaderField { + return HeaderField{Name: name, Value: value} +} + +func dehex(s string) []byte { + s = strings.Replace(s, " ", "", -1) + s = strings.Replace(s, "\n", "", -1) + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +func TestEmitEnabled(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) + enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) + + numCallback := 0 + var dec *Decoder + dec = NewDecoder(8<<20, func(HeaderField) { + numCallback++ + dec.SetEmitEnabled(false) + }) + if !dec.EmitEnabled() { + t.Errorf("initial emit enabled = false; want true") + } + if _, err := dec.Write(buf.Bytes()); err != nil { + t.Error(err) + } + if numCallback != 1 { + t.Errorf("num callbacks = %d; want 1", numCallback) + } + if dec.EmitEnabled() { + t.Errorf("emit enabled = true; want false") + } +} + +func TestSaveBufLimit(t *testing.T) { + const maxStr = 1 << 10 + var got []HeaderField + dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) { + got = append(got, hf) + }) + dec.SetMaxStringLength(maxStr) + var frag []byte + frag = append(frag[:0], encodeTypeByte(false, false)) + frag = appendVarInt(frag, 7, 3) + frag = append(frag, "foo"...) + frag = appendVarInt(frag, 7, 3) + frag = append(frag, "bar"...) + + if _, err := dec.Write(frag); err != nil { + t.Fatal(err) + } + + want := []HeaderField{{Name: "foo", Value: "bar"}} + if !reflect.DeepEqual(got, want) { + t.Errorf("After small writes, got %v; want %v", got, want) + } + + frag = append(frag[:0], encodeTypeByte(false, false)) + frag = appendVarInt(frag, 7, maxStr*3) + frag = append(frag, make([]byte, maxStr*3)...) + + _, err := dec.Write(frag) + if err != ErrStringLength { + t.Fatalf("Write error = %v; want ErrStringLength", err) + } +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 0000000..8850e39 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,212 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + n := rootHuffmanNode + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } else { + cbits -= 8 + } + } + } + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { + break + } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1< 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 0000000..a66cfbe --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,479 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables_test.go b/vendor/golang.org/x/net/http2/hpack/tables_test.go new file mode 100644 index 0000000..d963f36 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables_test.go @@ -0,0 +1,214 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bufio" + "regexp" + "strconv" + "strings" + "testing" +) + +func TestHeaderFieldTable(t *testing.T) { + table := &headerFieldTable{} + table.init() + table.addEntry(pair("key1", "value1-1")) + table.addEntry(pair("key2", "value2-1")) + table.addEntry(pair("key1", "value1-2")) + table.addEntry(pair("key3", "value3-1")) + table.addEntry(pair("key4", "value4-1")) + table.addEntry(pair("key2", "value2-2")) + + // Tests will be run twice: once before evicting anything, and + // again after evicting the three oldest entries. + tests := []struct { + f HeaderField + beforeWantStaticI uint64 + beforeWantMatch bool + afterWantStaticI uint64 + afterWantMatch bool + }{ + {HeaderField{"key1", "value1-1", false}, 1, true, 0, false}, + {HeaderField{"key1", "value1-2", false}, 3, true, 0, false}, + {HeaderField{"key1", "value1-3", false}, 3, false, 0, false}, + {HeaderField{"key2", "value2-1", false}, 2, true, 3, false}, + {HeaderField{"key2", "value2-2", false}, 6, true, 3, true}, + {HeaderField{"key2", "value2-3", false}, 6, false, 3, false}, + {HeaderField{"key4", "value4-1", false}, 5, true, 2, true}, + // Name match only, because sensitive. + {HeaderField{"key4", "value4-1", true}, 5, false, 2, false}, + // Key not found. + {HeaderField{"key5", "value5-x", false}, 0, false, 0, false}, + } + + staticToDynamic := func(i uint64) uint64 { + if i == 0 { + return 0 + } + return uint64(table.len()) - i + 1 // dynamic is the reversed table + } + + searchStatic := func(f HeaderField) (uint64, bool) { + old := staticTable + staticTable = table + defer func() { staticTable = old }() + return staticTable.search(f) + } + + searchDynamic := func(f HeaderField) (uint64, bool) { + return table.search(f) + } + + for _, test := range tests { + gotI, gotMatch := searchStatic(test.f) + if wantI, wantMatch := test.beforeWantStaticI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("before evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + gotI, gotMatch = searchDynamic(test.f) + wantDynamicI := staticToDynamic(test.beforeWantStaticI) + if wantI, wantMatch := wantDynamicI, test.beforeWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("before evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + } + + table.evictOldest(3) + + for _, test := range tests { + gotI, gotMatch := searchStatic(test.f) + if wantI, wantMatch := test.afterWantStaticI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("after evictions: searchStatic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + gotI, gotMatch = searchDynamic(test.f) + wantDynamicI := staticToDynamic(test.afterWantStaticI) + if wantI, wantMatch := wantDynamicI, test.afterWantMatch; gotI != wantI || gotMatch != wantMatch { + t.Errorf("after evictions: searchDynamic(%+v)=%v,%v want %v,%v", test.f, gotI, gotMatch, wantI, wantMatch) + } + } +} + +func TestHeaderFieldTable_LookupMapEviction(t *testing.T) { + table := &headerFieldTable{} + table.init() + table.addEntry(pair("key1", "value1-1")) + table.addEntry(pair("key2", "value2-1")) + table.addEntry(pair("key1", "value1-2")) + table.addEntry(pair("key3", "value3-1")) + table.addEntry(pair("key4", "value4-1")) + table.addEntry(pair("key2", "value2-2")) + + // evict all pairs + table.evictOldest(table.len()) + + if l := table.len(); l > 0 { + t.Errorf("table.len() = %d, want 0", l) + } + + if l := len(table.byName); l > 0 { + t.Errorf("len(table.byName) = %d, want 0", l) + } + + if l := len(table.byNameValue); l > 0 { + t.Errorf("len(table.byNameValue) = %d, want 0", l) + } +} + +func TestStaticTable(t *testing.T) { + fromSpec := ` + +-------+-----------------------------+---------------+ + | 1 | :authority | | + | 2 | :method | GET | + | 3 | :method | POST | + | 4 | :path | / | + | 5 | :path | /index.html | + | 6 | :scheme | http | + | 7 | :scheme | https | + | 8 | :status | 200 | + | 9 | :status | 204 | + | 10 | :status | 206 | + | 11 | :status | 304 | + | 12 | :status | 400 | + | 13 | :status | 404 | + | 14 | :status | 500 | + | 15 | accept-charset | | + | 16 | accept-encoding | gzip, deflate | + | 17 | accept-language | | + | 18 | accept-ranges | | + | 19 | accept | | + | 20 | access-control-allow-origin | | + | 21 | age | | + | 22 | allow | | + | 23 | authorization | | + | 24 | cache-control | | + | 25 | content-disposition | | + | 26 | content-encoding | | + | 27 | content-language | | + | 28 | content-length | | + | 29 | content-location | | + | 30 | content-range | | + | 31 | content-type | | + | 32 | cookie | | + | 33 | date | | + | 34 | etag | | + | 35 | expect | | + | 36 | expires | | + | 37 | from | | + | 38 | host | | + | 39 | if-match | | + | 40 | if-modified-since | | + | 41 | if-none-match | | + | 42 | if-range | | + | 43 | if-unmodified-since | | + | 44 | last-modified | | + | 45 | link | | + | 46 | location | | + | 47 | max-forwards | | + | 48 | proxy-authenticate | | + | 49 | proxy-authorization | | + | 50 | range | | + | 51 | referer | | + | 52 | refresh | | + | 53 | retry-after | | + | 54 | server | | + | 55 | set-cookie | | + | 56 | strict-transport-security | | + | 57 | transfer-encoding | | + | 58 | user-agent | | + | 59 | vary | | + | 60 | via | | + | 61 | www-authenticate | | + +-------+-----------------------------+---------------+ +` + bs := bufio.NewScanner(strings.NewReader(fromSpec)) + re := regexp.MustCompile(`\| (\d+)\s+\| (\S+)\s*\| (\S(.*\S)?)?\s+\|`) + for bs.Scan() { + l := bs.Text() + if !strings.Contains(l, "|") { + continue + } + m := re.FindStringSubmatch(l) + if m == nil { + continue + } + i, err := strconv.Atoi(m[1]) + if err != nil { + t.Errorf("Bogus integer on line %q", l) + continue + } + if i < 1 || i > staticTable.len() { + t.Errorf("Bogus index %d on line %q", i, l) + continue + } + if got, want := staticTable.ents[i-1].Name, m[2]; got != want { + t.Errorf("header index %d name = %q; want %q", i, got, want) + } + if got, want := staticTable.ents[i-1].Value, m[3]; got != want { + t.Errorf("header index %d value = %q; want %q", i, got, want) + } + } + if err := bs.Err(); err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 0000000..d565f40 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,391 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +// +package http2 // import "golang.org/x/net/http2" + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + + "golang.org/x/net/lex/httplex" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httplex.ValidHeaderName for the base rules. +// +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validWireHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !httplex.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { + return false + } + } + return true +} + +var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) + +func init() { + for i := 100; i <= 999; i++ { + if v := http.StatusText(i); v != "" { + httpCodeStringCommon[i] = strconv.Itoa(i) + } + } +} + +func httpCodeString(code int) string { + if s, ok := httpCodeStringCommon[code]; ok { + return s + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) + }, +} + +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 2616, section 4.4. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} diff --git a/vendor/golang.org/x/net/http2/http2_test.go b/vendor/golang.org/x/net/http2/http2_test.go new file mode 100644 index 0000000..5248776 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2_test.go @@ -0,0 +1,199 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "errors" + "flag" + "fmt" + "net/http" + "os/exec" + "strconv" + "strings" + "testing" + + "golang.org/x/net/http2/hpack" +) + +var knownFailing = flag.Bool("known_failing", false, "Run known-failing tests.") + +func condSkipFailingTest(t *testing.T) { + if !*knownFailing { + t.Skip("Skipping known-failing test without --known_failing") + } +} + +func init() { + inTests = true + DebugGoroutines = true + flag.BoolVar(&VerboseLogs, "verboseh2", VerboseLogs, "Verbose HTTP/2 debug logging") +} + +func TestSettingString(t *testing.T) { + tests := []struct { + s Setting + want string + }{ + {Setting{SettingMaxFrameSize, 123}, "[MAX_FRAME_SIZE = 123]"}, + {Setting{1<<16 - 1, 123}, "[UNKNOWN_SETTING_65535 = 123]"}, + } + for i, tt := range tests { + got := fmt.Sprint(tt.s) + if got != tt.want { + t.Errorf("%d. for %#v, string = %q; want %q", i, tt.s, got, tt.want) + } + } +} + +type twriter struct { + t testing.TB + st *serverTester // optional +} + +func (w twriter) Write(p []byte) (n int, err error) { + if w.st != nil { + ps := string(p) + for _, phrase := range w.st.logFilter { + if strings.Contains(ps, phrase) { + return len(p), nil // no logging + } + } + } + w.t.Logf("%s", p) + return len(p), nil +} + +// like encodeHeader, but don't add implicit pseudo headers. +func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if err := enc.WriteField(hpack.HeaderField{Name: k, Value: v}); err != nil { + t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } + } + return buf.Bytes() +} + +// Verify that curl has http2. +func requireCurl(t *testing.T) { + out, err := dockerLogs(curl(t, "--version")) + if err != nil { + t.Skipf("failed to determine curl features; skipping test") + } + if !strings.Contains(string(out), "HTTP2") { + t.Skip("curl doesn't support HTTP2; skipping test") + } +} + +func curl(t *testing.T, args ...string) (container string) { + out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output() + if err != nil { + t.Skipf("Failed to run curl in docker: %v, %s", err, out) + } + return strings.TrimSpace(string(out)) +} + +// Verify that h2load exists. +func requireH2load(t *testing.T) { + out, err := dockerLogs(h2load(t, "--version")) + if err != nil { + t.Skipf("failed to probe h2load; skipping test: %s", out) + } + if !strings.Contains(string(out), "h2load nghttp2/") { + t.Skipf("h2load not present; skipping test. (Output=%q)", out) + } +} + +func h2load(t *testing.T, args ...string) (container string) { + out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output() + if err != nil { + t.Skipf("Failed to run h2load in docker: %v, %s", err, out) + } + return strings.TrimSpace(string(out)) +} + +type puppetCommand struct { + fn func(w http.ResponseWriter, r *http.Request) + done chan<- bool +} + +type handlerPuppet struct { + ch chan puppetCommand +} + +func newHandlerPuppet() *handlerPuppet { + return &handlerPuppet{ + ch: make(chan puppetCommand), + } +} + +func (p *handlerPuppet) act(w http.ResponseWriter, r *http.Request) { + for cmd := range p.ch { + cmd.fn(w, r) + cmd.done <- true + } +} + +func (p *handlerPuppet) done() { close(p.ch) } +func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) { + done := make(chan bool) + p.ch <- puppetCommand{fn, done} + <-done +} +func dockerLogs(container string) ([]byte, error) { + out, err := exec.Command("docker", "wait", container).CombinedOutput() + if err != nil { + return out, err + } + exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out))) + if err != nil { + return out, errors.New("unexpected exit status from docker wait") + } + out, err = exec.Command("docker", "logs", container).CombinedOutput() + exec.Command("docker", "rm", container).Run() + if err == nil && exitStatus != 0 { + err = fmt.Errorf("exit status %d: %s", exitStatus, out) + } + return out, err +} + +func kill(container string) { + exec.Command("docker", "kill", container).Run() + exec.Command("docker", "rm", container).Run() +} + +func cleanDate(res *http.Response) { + if d := res.Header["Date"]; len(d) == 1 { + d[0] = "XXX" + } +} + +func TestSorterPoolAllocs(t *testing.T) { + ss := []string{"a", "b", "c"} + h := http.Header{ + "a": nil, + "b": nil, + "c": nil, + } + sorter := new(sorter) + + if allocs := testing.AllocsPerRun(100, func() { + sorter.SortStrings(ss) + }); allocs >= 1 { + t.Logf("SortStrings allocs = %v; want <1", allocs) + } + + if allocs := testing.AllocsPerRun(5, func() { + if len(sorter.Keys(h)) != 3 { + t.Fatal("wrong result") + } + }); allocs > 0 { + t.Logf("Keys allocs = %v; want <1", allocs) + } +} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go new file mode 100644 index 0000000..508cebc --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go16.go @@ -0,0 +1,21 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.6 + +package http2 + +import ( + "net/http" + "time" +) + +func configureTransport(t1 *http.Transport) (*Transport, error) { + return nil, errTransportVersion +} + +func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { + return 0 + +} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go new file mode 100644 index 0000000..140434a --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go17.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package http2 + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +type contextContext interface { + Done() <-chan struct{} + Err() error +} + +type fakeContext struct{} + +func (fakeContext) Done() <-chan struct{} { return nil } +func (fakeContext) Err() error { panic("should not be called") } + +func reqContext(r *http.Request) fakeContext { + return fakeContext{} +} + +func setResponseUncompressed(res *http.Response) { + // Nothing. +} + +type clientTrace struct{} + +func requestTrace(*http.Request) *clientTrace { return nil } +func traceGotConn(*http.Request, *ClientConn) {} +func traceFirstResponseByte(*clientTrace) {} +func traceWroteHeaders(*clientTrace) {} +func traceWroteRequest(*clientTrace, error) {} +func traceGot100Continue(trace *clientTrace) {} +func traceWait100Continue(trace *clientTrace) {} + +func nop() {} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { + return nil, nop +} + +func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { + return ctx, nop +} + +func requestWithContext(req *http.Request, ctx contextContext) *http.Request { + return req +} + +// temporary copy of Go 1.6's private tls.Config.clone: +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} + +func (cc *ClientConn) Ping(ctx contextContext) error { + return cc.ping(ctx) +} + +func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go new file mode 100644 index 0000000..6f8d3f8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go18.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package http2 + +import ( + "io" + "net/http" +) + +func configureServer18(h1 *http.Server, h2 *Server) error { + // No IdleTimeout to sync prior to Go 1.8. + return nil +} + +func shouldLogPanic(panicValue interface{}) bool { + return panicValue != nil +} + +func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { + return nil +} + +func reqBodyIsNoBody(io.ReadCloser) bool { return false } + +func go18httpNoBody() io.ReadCloser { return nil } // for tests only diff --git a/vendor/golang.org/x/net/http2/not_go19.go b/vendor/golang.org/x/net/http2/not_go19.go new file mode 100644 index 0000000..5ae0772 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go19.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package http2 + +import ( + "net/http" +) + +func configureServer19(s *http.Server, conf *Server) error { + // not supported prior to go1.9 + return nil +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 0000000..a614009 --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,163 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + if p.b == nil { + return 0 + } + return p.b.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b != nil && p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + p.b = nil + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + if p.breakErr != nil { + return len(d), nil // discard when there is no reader + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + if dst == &p.breakErr { + p.b = nil + } + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/vendor/golang.org/x/net/http2/pipe_test.go b/vendor/golang.org/x/net/http2/pipe_test.go new file mode 100644 index 0000000..1bf351f --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe_test.go @@ -0,0 +1,130 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "testing" +) + +func TestPipeClose(t *testing.T) { + var p pipe + p.b = new(bytes.Buffer) + a := errors.New("a") + b := errors.New("b") + p.CloseWithError(a) + p.CloseWithError(b) + _, err := p.Read(make([]byte, 1)) + if err != a { + t.Errorf("err = %v want %v", err, a) + } +} + +func TestPipeDoneChan(t *testing.T) { + var p pipe + done := p.Done() + select { + case <-done: + t.Fatal("done too soon") + default: + } + p.CloseWithError(io.EOF) + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_ErrFirst(t *testing.T) { + var p pipe + p.CloseWithError(io.EOF) + done := p.Done() + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_Break(t *testing.T) { + var p pipe + done := p.Done() + select { + case <-done: + t.Fatal("done too soon") + default: + } + p.BreakWithError(io.EOF) + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeDoneChan_Break_ErrFirst(t *testing.T) { + var p pipe + p.BreakWithError(io.EOF) + done := p.Done() + select { + case <-done: + default: + t.Fatal("should be done") + } +} + +func TestPipeCloseWithError(t *testing.T) { + p := &pipe{b: new(bytes.Buffer)} + const body = "foo" + io.WriteString(p, body) + a := errors.New("test error") + p.CloseWithError(a) + all, err := ioutil.ReadAll(p) + if string(all) != body { + t.Errorf("read bytes = %q; want %q", all, body) + } + if err != a { + t.Logf("read error = %v, %v", err, a) + } + // Read and Write should fail. + if n, err := p.Write([]byte("abc")); err != errClosedPipeWrite || n != 0 { + t.Errorf("Write(abc) after close\ngot %v, %v\nwant 0, %v", n, err, errClosedPipeWrite) + } + if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 { + t.Errorf("Read() after close\ngot %v, nil\nwant 0, %v", n, errClosedPipeWrite) + } +} + +func TestPipeBreakWithError(t *testing.T) { + p := &pipe{b: new(bytes.Buffer)} + io.WriteString(p, "foo") + a := errors.New("test err") + p.BreakWithError(a) + all, err := ioutil.ReadAll(p) + if string(all) != "" { + t.Errorf("read bytes = %q; want empty string", all) + } + if err != a { + t.Logf("read error = %v, %v", err, a) + } + if p.b != nil { + t.Errorf("buffer should be nil after BreakWithError") + } + // Write should succeed silently. + if n, err := p.Write([]byte("abc")); err != nil || n != 3 { + t.Errorf("Write(abc) after break\ngot %v, %v\nwant 0, nil", n, err) + } + if p.b != nil { + t.Errorf("buffer should be nil after Write") + } + // Read should fail. + if n, err := p.Read(make([]byte, 1)); err == nil || n != 0 { + t.Errorf("Read() after close\ngot %v, nil\nwant 0, not nil", n) + } +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 0000000..7a50226 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2888 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "math" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } + if conf == nil { + conf = new(Server) + } + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + if err := configureServer18(s, conf); err != nil { + return err + } + if err := configureServer19(s, conf); err != nil { + return err + } + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + switch cs { + case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + conf.ServeConn(c, &ServeConnOpts{ + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() + } + + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + baseCtx contextContext + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + writeSched WriteScheduler + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder + + // Used by startGracefulShutdown. + shutdownOnce sync.Once +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx contextContext + cancelCtx func() + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://tools.ietf.org/html/rfc7540#section-5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr, err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(FrameWriteRequest{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + }, + }) + sc.unackedSettings++ + + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + defer sc.idleTimer.Stop() + } + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + + loopNum := 0 + for { + loopNum++ + select { + case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } + sc.writeFrame(wr) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer != nil { + settingsTimer.Stop() + settingsTimer = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } + } + + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY + // with no error code (graceful shutdown), don't start the timer until + // all open streams have been completed. + sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame + gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 + if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { + sc.shutDownIn(goAwayTimeout) + } + } +} + +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: + } +} + +var errPrefaceTimeout = errors.New("timeout waiting for client preface") + +// readPreface reads the ClientPreface greeting from the peer or +// returns errPrefaceTimeout on timeout, or an error if the greeting +// is invalid. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errPrefaceTimeout + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wr: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { + sc.serveG.check() + + // If true, wr will not be written and wr.done will not be signaled. + var ignoreWrite bool + + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wr.write.(type) { + case *writeResHeaders: + wr.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } + ignoreWrite = true + } + } + + if !ignoreWrite { + sc.writeSched.Push(wr) + } + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wr (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wr.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) + } + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr, err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// the main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + sc.writingFrameAsync = false + + wr := res.wr + + if writeEndsStream(wr.write) { + st := wr.stream + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } + } + + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written we need to send one, the best frame +// to send is selected, preferring first things that aren't +// stream-specific (e.g. ACKing settings), and then finding the +// highest priority stream. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame || sc.inFrameScheduleLoop { + return + } + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break + } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. +func (sc *serverConn) startGracefulShutdown() { + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) +} + +// After sending GOAWAY, the connection will close after goAwayTimeout. +// If we close the connection immediately after sending GOAWAY, there may +// be unsent data in our kernel receive buffer, which will cause the kernel +// to send a TCP RST on close() instead of a FIN. This RST will abort the +// connection immediately, whether or not the client had received the GOAWAY. +// +// Ideally we should delay for at least 1 RTT + epsilon so the client has +// a chance to read the GOAWAY and stop sending messages. Measuring RTT +// is hard, so we approximate with 1 second. See golang.org/issue/18701. +// +// This is a var so it can be shorter in tests, where all requests uses the +// loopback interface making the expected RTT very small. +// +// TODO: configurable? +var goAwayTimeout = 1 * time.Second + +func (sc *serverConn) startGracefulShutdownInternal() { + sc.goAway(ErrCodeNo) +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + if sc.inGoAway { + return + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(FrameWriteRequest{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.resetQueued = true + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return streamError(f.StreamID, ErrCodeFlowControl) + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.cancelCtx() + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- + } + delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdownInternal() + } + } + if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.CloseStream(st.id) +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + data := f.Data() + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + return streamError(id, ErrCodeStreamClosed) + } + if f.Length > 0 { + // Check whether the client has flow control quota. + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) + } + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdownInternal() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://tools.ietf.org/html/rfc7540#section-5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxClientStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxClientStreamID = id + + if sc.idleTimer != nil { + sc.idleTimer.Stop() + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { + if sc.unackedSettings == 0 { + // They should know better. + return streamError(id, ErrCodeProtocol) + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return streamError(id, ErrCodeRefusedStream) + } + + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { + handler = new400Handler(err) + } + + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return streamError(st.id, ErrCodeProtocol) + } + + if len(f.PseudoFields()) > 0 { + return streamError(st.id, ErrCodeProtocol) + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return streamError(st.id, ErrCodeProtocol) + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) + } + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) + return nil +} + +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") + } + + ctx, cancelCtx := contextWithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, + } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } + + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ + } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) + } + + return st +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), + } + + isConnect := rp.method == "CONNECT" + if isConnect { + if rp.path != "" || rp.scheme != "" || rp.authority == "" { + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + bodyOpen := !f.StreamEnded() + if rp.method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") + } + + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err + } + if bodyOpen { + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState + } + + needsContinue := rp.header.Get("Expect") == "100-continue" + if needsContinue { + rp.header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.header["Cookie"]; len(cookies) > 1 { + rp.header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range rp.header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(rp.header, "Trailer") + + var url_ *url.URL + var requestURI string + if rp.method == "CONNECT" { + url_ = &url.URL{Host: rp.authority} + requestURI = rp.authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.path) + if err != nil { + return nil, nil, streamError(st.id, ErrCodeProtocol) + } + requestURI = rp.path + } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + req := &http.Request{ + Method: rp.method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: rp.header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: rp.authority, + Body: body, + Trailer: trailer, + } + req = requestWithContext(req, st.ctx) + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() + if didPanic { + e := recover() + sc.writeFrameFromHandler(FrameWriteRequest{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + // Same as net/http: + if shouldLogPanic(e) { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

    HTTP Error 431

    Request Header Field(s) Too Large

    ") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(FrameWriteRequest{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { + sc.serveG.checkNotOn() // NOT on + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(FrameWriteRequest{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. +type requestBody struct { + stream *stream + conn *serverConn + closed bool // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil || b.sawEOF { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if err == io.EOF { + b.sawEOF = true + } + if b.conn == nil && inTests { + return + } + b.conn.noteBodyReadFromHandler(b.stream, n, err) + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + if !ValidTrailerHeader(k) { + // Forbidden by RFC 2616 14.40. + rws.conn.logf("ignoring invalid trailer %q", k) + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + rws.dirty = true + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + endStream := rws.handlerDone && !rws.hasTrailers() + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true + return 0, err + } + } + + if rws.handlerDone && rws.hasTrailers() { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + if err != nil { + rws.dirty = true + } + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 2616 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + cw := rws.stream.cw + go func() { + cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. +func checkWriteHeaderCode(code int) { + // Issue 22880: require valid WriteHeader status codes. + // For now we only enforce that it's three digits. + // In the future we might block things over 599 (600 and above aren't defined + // at http://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). + // But for now any three digits. + // + // We used to send "HTTP/1.1 000 0" on the wire in responses but there's + // no equivalent bogus thing we can realistically send in HTTP/2, + // so we'll consistently panic instead and help people find their bugs + // early. (We can't return an error from WriteHeader even if we wanted to.) + if code < 100 || code > 999 { + panic(fmt.Sprintf("invalid WriteHeader code %v", code)) + } +} + +func (w *responseWriter) WriteHeader(code int) { + checkWriteHeaderCode(code) + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler might call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + dirty := rws.dirty + rws.handlerDone = true + w.Flush() + w.rws = nil + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +// pushOptions is the internal version of http.PushOptions, which we +// cannot include here because it's only defined in Go 1.8 and later. +type pushOptions struct { + Method string + Header http.Header +} + +func (w *responseWriter) push(target string, opts pushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := &startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.serveMsgCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg *startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiaed. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdownInternal() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 2616 section 2.1 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) + } + } + te := h["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 +func ValidTrailerHeader(name string) bool { + name = http.CanonicalHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/vendor/golang.org/x/net/http2/server_push_test.go b/vendor/golang.org/x/net/http2/server_push_test.go new file mode 100644 index 0000000..918fd30 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server_push_test.go @@ -0,0 +1,521 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package http2 + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "sync" + "testing" + "time" +) + +func TestServer_Push_Success(t *testing.T) { + const ( + mainBody = "index page" + pushedBody = "pushed page" + userAgent = "testagent" + cookie = "testcookie" + ) + + var stURL string + checkPromisedReq := func(r *http.Request, wantMethod string, wantH http.Header) error { + if got, want := r.Method, wantMethod; got != want { + return fmt.Errorf("promised Req.Method=%q, want %q", got, want) + } + if got, want := r.Header, wantH; !reflect.DeepEqual(got, want) { + return fmt.Errorf("promised Req.Header=%q, want %q", got, want) + } + if got, want := "https://"+r.Host, stURL; got != want { + return fmt.Errorf("promised Req.Host=%q, want %q", got, want) + } + if r.Body == nil { + return fmt.Errorf("nil Body") + } + if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 { + return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err) + } + return nil + } + + errc := make(chan error, 3) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + // Push "/pushed?get" as a GET request, using an absolute URL. + opt := &http.PushOptions{ + Header: http.Header{ + "User-Agent": {userAgent}, + }, + } + if err := w.(http.Pusher).Push(stURL+"/pushed?get", opt); err != nil { + errc <- fmt.Errorf("error pushing /pushed?get: %v", err) + return + } + // Push "/pushed?head" as a HEAD request, using a path. + opt = &http.PushOptions{ + Method: "HEAD", + Header: http.Header{ + "User-Agent": {userAgent}, + "Cookie": {cookie}, + }, + } + if err := w.(http.Pusher).Push("/pushed?head", opt); err != nil { + errc <- fmt.Errorf("error pushing /pushed?head: %v", err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(mainBody))) + w.WriteHeader(200) + io.WriteString(w, mainBody) + errc <- nil + + case "/pushed?get": + wantH := http.Header{} + wantH.Set("User-Agent", userAgent) + if err := checkPromisedReq(r, "GET", wantH); err != nil { + errc <- fmt.Errorf("/pushed?get: %v", err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(pushedBody))) + w.WriteHeader(200) + io.WriteString(w, pushedBody) + errc <- nil + + case "/pushed?head": + wantH := http.Header{} + wantH.Set("User-Agent", userAgent) + wantH.Set("Cookie", cookie) + if err := checkPromisedReq(r, "HEAD", wantH); err != nil { + errc <- fmt.Errorf("/pushed?head: %v", err) + return + } + w.WriteHeader(204) + errc <- nil + + default: + errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) + } + }) + stURL = st.ts.URL + + // Send one request, which should push two responses. + st.greet() + getSlash(st) + for k := 0; k < 3; k++ { + select { + case <-time.After(2 * time.Second): + t.Errorf("timeout waiting for handler %d to finish", k) + case err := <-errc: + if err != nil { + t.Fatal(err) + } + } + } + + checkPushPromise := func(f Frame, promiseID uint32, wantH [][2]string) error { + pp, ok := f.(*PushPromiseFrame) + if !ok { + return fmt.Errorf("got a %T; want *PushPromiseFrame", f) + } + if !pp.HeadersEnded() { + return fmt.Errorf("want END_HEADERS flag in PushPromiseFrame") + } + if got, want := pp.PromiseID, promiseID; got != want { + return fmt.Errorf("got PromiseID %v; want %v", got, want) + } + gotH := st.decodeHeader(pp.HeaderBlockFragment()) + if !reflect.DeepEqual(gotH, wantH) { + return fmt.Errorf("got promised headers %v; want %v", gotH, wantH) + } + return nil + } + checkHeaders := func(f Frame, wantH [][2]string) error { + hf, ok := f.(*HeadersFrame) + if !ok { + return fmt.Errorf("got a %T; want *HeadersFrame", f) + } + gotH := st.decodeHeader(hf.HeaderBlockFragment()) + if !reflect.DeepEqual(gotH, wantH) { + return fmt.Errorf("got response headers %v; want %v", gotH, wantH) + } + return nil + } + checkData := func(f Frame, wantData string) error { + df, ok := f.(*DataFrame) + if !ok { + return fmt.Errorf("got a %T; want *DataFrame", f) + } + if gotData := string(df.Data()); gotData != wantData { + return fmt.Errorf("got response data %q; want %q", gotData, wantData) + } + return nil + } + + // Stream 1 has 2 PUSH_PROMISE + HEADERS + DATA + // Stream 2 has HEADERS + DATA + // Stream 4 has HEADERS + expected := map[uint32][]func(Frame) error{ + 1: { + func(f Frame) error { + return checkPushPromise(f, 2, [][2]string{ + {":method", "GET"}, + {":scheme", "https"}, + {":authority", st.ts.Listener.Addr().String()}, + {":path", "/pushed?get"}, + {"user-agent", userAgent}, + }) + }, + func(f Frame) error { + return checkPushPromise(f, 4, [][2]string{ + {":method", "HEAD"}, + {":scheme", "https"}, + {":authority", st.ts.Listener.Addr().String()}, + {":path", "/pushed?head"}, + {"cookie", cookie}, + {"user-agent", userAgent}, + }) + }, + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "200"}, + {"content-type", "text/html"}, + {"content-length", strconv.Itoa(len(mainBody))}, + }) + }, + func(f Frame) error { + return checkData(f, mainBody) + }, + }, + 2: { + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "200"}, + {"content-type", "text/html"}, + {"content-length", strconv.Itoa(len(pushedBody))}, + }) + }, + func(f Frame) error { + return checkData(f, pushedBody) + }, + }, + 4: { + func(f Frame) error { + return checkHeaders(f, [][2]string{ + {":status", "204"}, + }) + }, + }, + } + + consumed := map[uint32]int{} + for k := 0; len(expected) > 0; k++ { + f, err := st.readFrame() + if err != nil { + for id, left := range expected { + t.Errorf("stream %d: missing %d frames", id, len(left)) + } + t.Fatalf("readFrame %d: %v", k, err) + } + id := f.Header().StreamID + label := fmt.Sprintf("stream %d, frame %d", id, consumed[id]) + if len(expected[id]) == 0 { + t.Fatalf("%s: unexpected frame %#+v", label, f) + } + check := expected[id][0] + expected[id] = expected[id][1:] + if len(expected[id]) == 0 { + delete(expected, id) + } + if err := check(f); err != nil { + t.Fatalf("%s: %v", label, err) + } + consumed[id]++ + } +} + +func TestServer_Push_SuccessNoRace(t *testing.T) { + // Regression test for issue #18326. Ensure the request handler can mutate + // pushed request headers without racing with the PUSH_PROMISE write. + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + opt := &http.PushOptions{ + Header: http.Header{"User-Agent": {"testagent"}}, + } + if err := w.(http.Pusher).Push("/pushed", opt); err != nil { + errc <- fmt.Errorf("error pushing: %v", err) + return + } + w.WriteHeader(200) + errc <- nil + + case "/pushed": + // Update request header, ensure there is no race. + r.Header.Set("User-Agent", "newagent") + r.Header.Set("Cookie", "cookie") + w.WriteHeader(200) + errc <- nil + + default: + errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) + } + }) + + // Send one request, which should push one response. + st.greet() + getSlash(st) + for k := 0; k < 2; k++ { + select { + case <-time.After(2 * time.Second): + t.Errorf("timeout waiting for handler %d to finish", k) + case err := <-errc: + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestServer_Push_RejectRecursivePush(t *testing.T) { + // Expect two requests, but might get three if there's a bug and the second push succeeds. + errc := make(chan error, 3) + handler := func(w http.ResponseWriter, r *http.Request) error { + baseURL := "https://" + r.Host + switch r.URL.Path { + case "/": + if err := w.(http.Pusher).Push(baseURL+"/push1", nil); err != nil { + return fmt.Errorf("first Push()=%v, want nil", err) + } + return nil + + case "/push1": + if got, want := w.(http.Pusher).Push(baseURL+"/push2", nil), ErrRecursivePush; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + + default: + return fmt.Errorf("unexpected path: %q", r.URL.Path) + } + } + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + errc <- handler(w, r) + }) + defer st.Close() + st.greet() + getSlash(st) + if err := <-errc; err != nil { + t.Errorf("First request failed: %v", err) + } + if err := <-errc; err != nil { + t.Errorf("Second request failed: %v", err) + } +} + +func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, *http.Request) error, settings ...Setting) { + // Expect one request, but might get two if there's a bug and the push succeeds. + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + errc <- doPush(w.(http.Pusher), r) + }) + defer st.Close() + st.greet() + if err := st.fr.WriteSettings(settings...); err != nil { + st.t.Fatalf("WriteSettings: %v", err) + } + st.wantSettingsAck() + getSlash(st) + if err := <-errc; err != nil { + t.Error(err) + } + // Should not get a PUSH_PROMISE frame. + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Error("stream should end after headers") + } +} + +func TestServer_Push_RejectIfDisabled(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if got, want := p.Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + }, + Setting{SettingEnablePush, 0}) +} + +func TestServer_Push_RejectWhenNoConcurrentStreams(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if got, want := p.Push("https://"+r.Host+"/pushed", nil), ErrPushLimitReached; got != want { + return fmt.Errorf("Push()=%v, want %v", got, want) + } + return nil + }, + Setting{SettingMaxConcurrentStreams, 0}) +} + +func TestServer_Push_RejectWrongScheme(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("http://"+r.Host+"/pushed", nil); err == nil { + return errors.New("Push() should have failed (push target URL is http)") + } + return nil + }) +} + +func TestServer_Push_RejectMissingHost(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("https:pushed", nil); err == nil { + return errors.New("Push() should have failed (push target URL missing host)") + } + return nil + }) +} + +func TestServer_Push_RejectRelativePath(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("../test", nil); err == nil { + return errors.New("Push() should have failed (push target is a relative path)") + } + return nil + }) +} + +func TestServer_Push_RejectForbiddenMethod(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Method: "POST"}); err == nil { + return errors.New("Push() should have failed (cannot promise a POST)") + } + return nil + }) +} + +func TestServer_Push_RejectForbiddenHeader(t *testing.T) { + testServer_Push_RejectSingleRequest(t, + func(p http.Pusher, r *http.Request) error { + header := http.Header{ + "Content-Length": {"10"}, + "Content-Encoding": {"gzip"}, + "Trailer": {"Foo"}, + "Te": {"trailers"}, + "Host": {"test.com"}, + ":authority": {"test.com"}, + } + if err := p.Push("https://"+r.Host+"/pushed", &http.PushOptions{Header: header}); err == nil { + return errors.New("Push() should have failed (forbidden headers)") + } + return nil + }) +} + +func TestServer_Push_StateTransitions(t *testing.T) { + const body = "foo" + + gotPromise := make(chan bool) + finishedPush := make(chan bool) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + switch r.URL.RequestURI() { + case "/": + if err := w.(http.Pusher).Push("/pushed", nil); err != nil { + t.Errorf("Push error: %v", err) + } + // Don't finish this request until the push finishes so we don't + // nondeterministically interleave output frames with the push. + <-finishedPush + case "/pushed": + <-gotPromise + } + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(body))) + w.WriteHeader(200) + io.WriteString(w, body) + }) + defer st.Close() + + st.greet() + if st.stream(2) != nil { + t.Fatal("stream 2 should be empty") + } + if got, want := st.streamState(2), stateIdle; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + getSlash(st) + // After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote. + st.wantPushPromise() + if got, want := st.streamState(2), stateHalfClosedRemote; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + // We stall the HTTP handler for "/pushed" until the above check. If we don't + // stall the handler, then the handler might write HEADERS and DATA and finish + // the stream before we check st.streamState(2) -- should that happen, we'll + // see stateClosed and fail the above check. + close(gotPromise) + st.wantHeaders() + if df := st.wantData(); !df.StreamEnded() { + t.Fatal("expected END_STREAM flag on DATA") + } + if got, want := st.streamState(2), stateClosed; got != want { + t.Fatalf("streamState(2)=%v, want %v", got, want) + } + close(finishedPush) +} + +func TestServer_Push_RejectAfterGoAway(t *testing.T) { + var readyOnce sync.Once + ready := make(chan struct{}) + errc := make(chan error, 2) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + select { + case <-ready: + case <-time.After(5 * time.Second): + errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed") + } + if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want { + errc <- fmt.Errorf("Push()=%v, want %v", got, want) + } + errc <- nil + }) + defer st.Close() + st.greet() + getSlash(st) + + // Send GOAWAY and wait for it to be processed. + st.fr.WriteGoAway(1, ErrCodeNo, nil) + go func() { + for { + select { + case <-ready: + return + default: + } + st.sc.serveMsgCh <- func(loopNum int) { + if !st.sc.pushEnabled { + readyOnce.Do(func() { close(ready) }) + } + } + } + }() + if err := <-errc; err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/server_test.go b/vendor/golang.org/x/net/http2/server_test.go new file mode 100644 index 0000000..bd1ba20 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server_test.go @@ -0,0 +1,3725 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/http2/hpack" +) + +var stderrVerbose = flag.Bool("stderr_verbose", false, "Mirror verbosity to stderr, unbuffered") + +func stderrv() io.Writer { + if *stderrVerbose { + return os.Stderr + } + + return ioutil.Discard +} + +type serverTester struct { + cc net.Conn // client conn + t testing.TB + ts *httptest.Server + fr *Framer + serverLogBuf bytes.Buffer // logger for httptest.Server + logFilter []string // substrings to filter out + scMu sync.Mutex // guards sc + sc *serverConn + hpackDec *hpack.Decoder + decodedHeaders [][2]string + + // If http2debug!=2, then we capture Frame debug logs that will be written + // to t.Log after a test fails. The read and write logs use separate locks + // and buffers so we don't accidentally introduce synchronization between + // the read and write goroutines, which may hide data races. + frameReadLogMu sync.Mutex + frameReadLogBuf bytes.Buffer + frameWriteLogMu sync.Mutex + frameWriteLogBuf bytes.Buffer + + // writing headers: + headerBuf bytes.Buffer + hpackEnc *hpack.Encoder +} + +func init() { + testHookOnPanicMu = new(sync.Mutex) + goAwayTimeout = 25 * time.Millisecond +} + +func resetHooks() { + testHookOnPanicMu.Lock() + testHookOnPanic = nil + testHookOnPanicMu.Unlock() +} + +type serverTesterOpt string + +var optOnlyServer = serverTesterOpt("only_server") +var optQuiet = serverTesterOpt("quiet_logging") +var optFramerReuseFrames = serverTesterOpt("frame_reuse_frames") + +func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester { + resetHooks() + + ts := httptest.NewUnstartedServer(handler) + + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, + NextProtos: []string{NextProtoTLS}, + } + + var onlyServer, quiet, framerReuseFrames bool + h2server := new(Server) + for _, opt := range opts { + switch v := opt.(type) { + case func(*tls.Config): + v(tlsConfig) + case func(*httptest.Server): + v(ts) + case func(*Server): + v(h2server) + case serverTesterOpt: + switch v { + case optOnlyServer: + onlyServer = true + case optQuiet: + quiet = true + case optFramerReuseFrames: + framerReuseFrames = true + } + case func(net.Conn, http.ConnState): + ts.Config.ConnState = v + default: + t.Fatalf("unknown newServerTester option type %T", v) + } + } + + ConfigureServer(ts.Config, h2server) + + st := &serverTester{ + t: t, + ts: ts, + } + st.hpackEnc = hpack.NewEncoder(&st.headerBuf) + st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField) + + ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config + if quiet { + ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + } else { + ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), "", log.LstdFlags) + } + ts.StartTLS() + + if VerboseLogs { + t.Logf("Running test server at: %s", ts.URL) + } + testHookGetServerConn = func(v *serverConn) { + st.scMu.Lock() + defer st.scMu.Unlock() + st.sc = v + } + log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st})) + if !onlyServer { + cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig) + if err != nil { + t.Fatal(err) + } + st.cc = cc + st.fr = NewFramer(cc, cc) + if framerReuseFrames { + st.fr.SetReuseFrames() + } + if !logFrameReads && !logFrameWrites { + st.fr.debugReadLoggerf = func(m string, v ...interface{}) { + m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" + st.frameReadLogMu.Lock() + fmt.Fprintf(&st.frameReadLogBuf, m, v...) + st.frameReadLogMu.Unlock() + } + st.fr.debugWriteLoggerf = func(m string, v ...interface{}) { + m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" + st.frameWriteLogMu.Lock() + fmt.Fprintf(&st.frameWriteLogBuf, m, v...) + st.frameWriteLogMu.Unlock() + } + st.fr.logReads = true + st.fr.logWrites = true + } + } + return st +} + +func (st *serverTester) closeConn() { + st.scMu.Lock() + defer st.scMu.Unlock() + st.sc.conn.Close() +} + +func (st *serverTester) addLogFilter(phrase string) { + st.logFilter = append(st.logFilter, phrase) +} + +func (st *serverTester) stream(id uint32) *stream { + ch := make(chan *stream, 1) + st.sc.serveMsgCh <- func(int) { + ch <- st.sc.streams[id] + } + return <-ch +} + +func (st *serverTester) streamState(id uint32) streamState { + ch := make(chan streamState, 1) + st.sc.serveMsgCh <- func(int) { + state, _ := st.sc.state(id) + ch <- state + } + return <-ch +} + +// loopNum reports how many times this conn's select loop has gone around. +func (st *serverTester) loopNum() int { + lastc := make(chan int, 1) + st.sc.serveMsgCh <- func(loopNum int) { + lastc <- loopNum + } + return <-lastc +} + +// awaitIdle heuristically awaits for the server conn's select loop to be idle. +// The heuristic is that the server connection's serve loop must schedule +// 50 times in a row without any channel sends or receives occurring. +func (st *serverTester) awaitIdle() { + remain := 50 + last := st.loopNum() + for remain > 0 { + n := st.loopNum() + if n == last+1 { + remain-- + } else { + remain = 50 + } + last = n + } +} + +func (st *serverTester) Close() { + if st.t.Failed() { + st.frameReadLogMu.Lock() + if st.frameReadLogBuf.Len() > 0 { + st.t.Logf("Framer read log:\n%s", st.frameReadLogBuf.String()) + } + st.frameReadLogMu.Unlock() + + st.frameWriteLogMu.Lock() + if st.frameWriteLogBuf.Len() > 0 { + st.t.Logf("Framer write log:\n%s", st.frameWriteLogBuf.String()) + } + st.frameWriteLogMu.Unlock() + + // If we failed already (and are likely in a Fatal, + // unwindowing), force close the connection, so the + // httptest.Server doesn't wait forever for the conn + // to close. + if st.cc != nil { + st.cc.Close() + } + } + st.ts.Close() + if st.cc != nil { + st.cc.Close() + } + log.SetOutput(os.Stderr) +} + +// greet initiates the client's HTTP/2 connection into a state where +// frames may be sent. +func (st *serverTester) greet() { + st.greetAndCheckSettings(func(Setting) error { return nil }) +} + +func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) { + st.writePreface() + st.writeInitialSettings() + st.wantSettings().ForeachSetting(checkSetting) + st.writeSettingsAck() + + // The initial WINDOW_UPDATE and SETTINGS ACK can come in any order. + var gotSettingsAck bool + var gotWindowUpdate bool + + for i := 0; i < 2; i++ { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + switch f := f.(type) { + case *SettingsFrame: + if !f.Header().Flags.Has(FlagSettingsAck) { + st.t.Fatal("Settings Frame didn't have ACK set") + } + gotSettingsAck = true + + case *WindowUpdateFrame: + if f.FrameHeader.StreamID != 0 { + st.t.Fatalf("WindowUpdate StreamID = %d; want 0", f.FrameHeader.StreamID) + } + incr := uint32((&Server{}).initialConnRecvWindowSize() - initialWindowSize) + if f.Increment != incr { + st.t.Fatalf("WindowUpdate increment = %d; want %d", f.Increment, incr) + } + gotWindowUpdate = true + + default: + st.t.Fatalf("Wanting a settings ACK or window update, received a %T", f) + } + } + + if !gotSettingsAck { + st.t.Fatalf("Didn't get a settings ACK") + } + if !gotWindowUpdate { + st.t.Fatalf("Didn't get a window update") + } +} + +func (st *serverTester) writePreface() { + n, err := st.cc.Write(clientPreface) + if err != nil { + st.t.Fatalf("Error writing client preface: %v", err) + } + if n != len(clientPreface) { + st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(clientPreface)) + } +} + +func (st *serverTester) writeInitialSettings() { + if err := st.fr.WriteSettings(); err != nil { + st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) + } +} + +func (st *serverTester) writeSettingsAck() { + if err := st.fr.WriteSettingsAck(); err != nil { + st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) + } +} + +func (st *serverTester) writeHeaders(p HeadersFrameParam) { + if err := st.fr.WriteHeaders(p); err != nil { + st.t.Fatalf("Error writing HEADERS: %v", err) + } +} + +func (st *serverTester) writePriority(id uint32, p PriorityParam) { + if err := st.fr.WritePriority(id, p); err != nil { + st.t.Fatalf("Error writing PRIORITY: %v", err) + } +} + +func (st *serverTester) encodeHeaderField(k, v string) { + err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } +} + +// encodeHeaderRaw is the magic-free version of encodeHeader. +// It takes 0 or more (k, v) pairs and encodes them. +func (st *serverTester) encodeHeaderRaw(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + st.headerBuf.Reset() + for len(headers) > 0 { + k, v := headers[0], headers[1] + st.encodeHeaderField(k, v) + headers = headers[2:] + } + return st.headerBuf.Bytes() +} + +// encodeHeader encodes headers and returns their HPACK bytes. headers +// must contain an even number of key/value pairs. There may be +// multiple pairs for keys (e.g. "cookie"). The :method, :path, and +// :scheme headers default to GET, / and https. The :authority header +// defaults to st.ts.Listener.Addr(). +func (st *serverTester) encodeHeader(headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + + st.headerBuf.Reset() + defaultAuthority := st.ts.Listener.Addr().String() + + if len(headers) == 0 { + // Fast path, mostly for benchmarks, so test code doesn't pollute + // profiles when we're looking to improve server allocations. + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":scheme", "https") + st.encodeHeaderField(":authority", defaultAuthority) + st.encodeHeaderField(":path", "/") + return st.headerBuf.Bytes() + } + + if len(headers) == 2 && headers[0] == ":method" { + // Another fast path for benchmarks. + st.encodeHeaderField(":method", headers[1]) + st.encodeHeaderField(":scheme", "https") + st.encodeHeaderField(":authority", defaultAuthority) + st.encodeHeaderField(":path", "/") + return st.headerBuf.Bytes() + } + + pseudoCount := map[string]int{} + keys := []string{":method", ":scheme", ":authority", ":path"} + vals := map[string][]string{ + ":method": {"GET"}, + ":scheme": {"https"}, + ":authority": {defaultAuthority}, + ":path": {"/"}, + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + headers = headers[2:] + if _, ok := vals[k]; !ok { + keys = append(keys, k) + } + if strings.HasPrefix(k, ":") { + pseudoCount[k]++ + if pseudoCount[k] == 1 { + vals[k] = []string{v} + } else { + // Allows testing of invalid headers w/ dup pseudo fields. + vals[k] = append(vals[k], v) + } + } else { + vals[k] = append(vals[k], v) + } + } + for _, k := range keys { + for _, v := range vals[k] { + st.encodeHeaderField(k, v) + } + } + return st.headerBuf.Bytes() +} + +// bodylessReq1 writes a HEADERS frames with StreamID 1 and EndStream and EndHeaders set. +func (st *serverTester) bodylessReq1(headers ...string) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(headers...), + EndStream: true, + EndHeaders: true, + }) +} + +func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { + if err := st.fr.WriteData(streamID, endStream, data); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) { + if err := st.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil { + st.t.Fatalf("Error writing DATA: %v", err) + } +} + +func readFrameTimeout(fr *Framer, wait time.Duration) (Frame, error) { + ch := make(chan interface{}, 1) + go func() { + fr, err := fr.ReadFrame() + if err != nil { + ch <- err + } else { + ch <- fr + } + }() + t := time.NewTimer(wait) + select { + case v := <-ch: + t.Stop() + if fr, ok := v.(Frame); ok { + return fr, nil + } + return nil, v.(error) + case <-t.C: + return nil, errors.New("timeout waiting for frame") + } +} + +func (st *serverTester) readFrame() (Frame, error) { + return readFrameTimeout(st.fr, 2*time.Second) +} + +func (st *serverTester) wantHeaders() *HeadersFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a HEADERS frame: %v", err) + } + hf, ok := f.(*HeadersFrame) + if !ok { + st.t.Fatalf("got a %T; want *HeadersFrame", f) + } + return hf +} + +func (st *serverTester) wantContinuation() *ContinuationFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err) + } + cf, ok := f.(*ContinuationFrame) + if !ok { + st.t.Fatalf("got a %T; want *ContinuationFrame", f) + } + return cf +} + +func (st *serverTester) wantData() *DataFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a DATA frame: %v", err) + } + df, ok := f.(*DataFrame) + if !ok { + st.t.Fatalf("got a %T; want *DataFrame", f) + } + return df +} + +func (st *serverTester) wantSettings() *SettingsFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + st.t.Fatalf("got a %T; want *SettingsFrame", f) + } + return sf +} + +func (st *serverTester) wantPing() *PingFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a PING frame: %v", err) + } + pf, ok := f.(*PingFrame) + if !ok { + st.t.Fatalf("got a %T; want *PingFrame", f) + } + return pf +} + +func (st *serverTester) wantGoAway() *GoAwayFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err) + } + gf, ok := f.(*GoAwayFrame) + if !ok { + st.t.Fatalf("got a %T; want *GoAwayFrame", f) + } + return gf +} + +func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting an RSTStream frame: %v", err) + } + rs, ok := f.(*RSTStreamFrame) + if !ok { + st.t.Fatalf("got a %T; want *RSTStreamFrame", f) + } + if rs.FrameHeader.StreamID != streamID { + st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID) + } + if rs.ErrCode != errCode { + st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode) + } +} + +func (st *serverTester) wantWindowUpdate(streamID, incr uint32) { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err) + } + wu, ok := f.(*WindowUpdateFrame) + if !ok { + st.t.Fatalf("got a %T; want *WindowUpdateFrame", f) + } + if wu.FrameHeader.StreamID != streamID { + st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID) + } + if wu.Increment != incr { + st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr) + } +} + +func (st *serverTester) wantSettingsAck() { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + sf, ok := f.(*SettingsFrame) + if !ok { + st.t.Fatalf("Wanting a settings ACK, received a %T", f) + } + if !sf.Header().Flags.Has(FlagSettingsAck) { + st.t.Fatal("Settings Frame didn't have ACK set") + } +} + +func (st *serverTester) wantPushPromise() *PushPromiseFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatal(err) + } + ppf, ok := f.(*PushPromiseFrame) + if !ok { + st.t.Fatalf("Wanted PushPromise, received %T", ppf) + } + return ppf +} + +func TestServer(t *testing.T) { + gotReq := make(chan bool, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Foo", "Bar") + gotReq <- true + }) + defer st.Close() + + covers("3.5", ` + The server connection preface consists of a potentially empty + SETTINGS frame ([SETTINGS]) that MUST be the first frame the + server sends in the HTTP/2 connection. + `) + + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + + select { + case <-gotReq: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for request") + } +} + +func TestServer_Request_Get(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("foo-bar", "some-value"), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Method != "GET" { + t.Errorf("Method = %q; want GET", r.Method) + } + if r.URL.Path != "/" { + t.Errorf("URL.Path = %q; want /", r.URL.Path) + } + if r.ContentLength != 0 { + t.Errorf("ContentLength = %v; want 0", r.ContentLength) + } + if r.Close { + t.Error("Close = true; want false") + } + if !strings.Contains(r.RemoteAddr, ":") { + t.Errorf("RemoteAddr = %q; want something with a colon", r.RemoteAddr) + } + if r.Proto != "HTTP/2.0" || r.ProtoMajor != 2 || r.ProtoMinor != 0 { + t.Errorf("Proto = %q Major=%v,Minor=%v; want HTTP/2.0", r.Proto, r.ProtoMajor, r.ProtoMinor) + } + wantHeader := http.Header{ + "Foo-Bar": []string{"some-value"}, + } + if !reflect.DeepEqual(r.Header, wantHeader) { + t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) + } + if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { + t.Errorf("Read = %d, %v; want 0, EOF", n, err) + } + }) +} + +func TestServer_Request_Get_PathSlashes(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":path", "/%2f/"), + EndStream: true, // no DATA frames + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.RequestURI != "/%2f/" { + t.Errorf("RequestURI = %q; want /%%2f/", r.RequestURI) + } + if r.URL.Path != "///" { + t.Errorf("URL.Path = %q; want ///", r.URL.Path) + } + }) +} + +// TODO: add a test with EndStream=true on the HEADERS but setting a +// Content-Length anyway. Should we just omit it and force it to +// zero? + +func TestServer_Request_Post_NoContentLength_EndStream(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != 0 { + t.Errorf("ContentLength = %v; want 0", r.ContentLength) + } + if n, err := r.Body.Read([]byte(" ")); err != io.EOF || n != 0 { + t.Errorf("Read = %d, %v; want 0, EOF", n, err) + } + }) +} + +func TestServer_Request_Post_Body_ImmediateEOF(t *testing.T) { + testBodyContents(t, -1, "", func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, nil) // just kidding. empty body. + }) +} + +func TestServer_Request_Post_Body_OneData(t *testing.T) { + const content = "Some content" + testBodyContents(t, -1, content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte(content)) + }) +} + +func TestServer_Request_Post_Body_TwoData(t *testing.T) { + const content = "Some content" + testBodyContents(t, -1, content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, false, []byte(content[:5])) + st.writeData(1, true, []byte(content[5:])) + }) +} + +func TestServer_Request_Post_Body_ContentLength_Correct(t *testing.T) { + const content = "Some content" + testBodyContents(t, int64(len(content)), content, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", strconv.Itoa(len(content)), + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte(content)) + }) +} + +func TestServer_Request_Post_Body_ContentLength_TooLarge(t *testing.T) { + testBodyContentsFail(t, 3, "request declared a Content-Length of 3 but only wrote 2 bytes", + func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", "3", + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte("12")) + }) +} + +func TestServer_Request_Post_Body_ContentLength_TooSmall(t *testing.T) { + testBodyContentsFail(t, 4, "sender tried to send more than declared Content-Length of 4 bytes", + func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader( + ":method", "POST", + "content-length", "4", + ), + EndStream: false, // to say DATA frames are coming + EndHeaders: true, + }) + st.writeData(1, true, []byte("12345")) + }) +} + +func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, write func(st *serverTester)) { + testServerRequest(t, write, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != wantContentLength { + t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) + } + all, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(all) != wantBody { + t.Errorf("Read = %q; want %q", all, wantBody) + } + if err := r.Body.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + }) +} + +func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError string, write func(st *serverTester)) { + testServerRequest(t, write, func(r *http.Request) { + if r.Method != "POST" { + t.Errorf("Method = %q; want POST", r.Method) + } + if r.ContentLength != wantContentLength { + t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) + } + all, err := ioutil.ReadAll(r.Body) + if err == nil { + t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.", + wantReadError, all) + } + if !strings.Contains(err.Error(), wantReadError) { + t.Fatalf("Body.Read = %v; want substring %q", err, wantReadError) + } + if err := r.Body.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + }) +} + +// Using a Host header, instead of :authority +func TestServer_Request_Get_Host(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":authority", "", "host", host), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Host != host { + t.Errorf("Host = %q; want %q", r.Host, host) + } + }) +} + +// Using an :authority pseudo-header, instead of Host +func TestServer_Request_Get_Authority(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":authority", host), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if r.Host != host { + t.Errorf("Host = %q; want %q", r.Host, host) + } + }) +} + +func TestServer_Request_WithContinuation(t *testing.T) { + wantHeader := http.Header{ + "Foo-One": []string{"value-one"}, + "Foo-Two": []string{"value-two"}, + "Foo-Three": []string{"value-three"}, + } + testServerRequest(t, func(st *serverTester) { + fullHeaders := st.encodeHeader( + "foo-one", "value-one", + "foo-two", "value-two", + "foo-three", "value-three", + ) + remain := fullHeaders + chunks := 0 + for len(remain) > 0 { + const maxChunkSize = 5 + chunk := remain + if len(chunk) > maxChunkSize { + chunk = chunk[:maxChunkSize] + } + remain = remain[len(chunk):] + + if chunks == 0 { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: chunk, + EndStream: true, // no DATA frames + EndHeaders: false, // we'll have continuation frames + }) + } else { + err := st.fr.WriteContinuation(1, len(remain) == 0, chunk) + if err != nil { + t.Fatal(err) + } + } + chunks++ + } + if chunks < 2 { + t.Fatal("too few chunks") + } + }, func(r *http.Request) { + if !reflect.DeepEqual(r.Header, wantHeader) { + t.Errorf("Header = %#v; want %#v", r.Header, wantHeader) + } + }) +} + +// Concatenated cookie headers. ("8.1.2.5 Compressing the Cookie Header Field") +func TestServer_Request_CookieConcat(t *testing.T) { + const host = "example.com" + testServerRequest(t, func(st *serverTester) { + st.bodylessReq1( + ":authority", host, + "cookie", "a=b", + "cookie", "c=d", + "cookie", "e=f", + ) + }, func(r *http.Request) { + const want = "a=b; c=d; e=f" + if got := r.Header.Get("Cookie"); got != want { + t.Errorf("Cookie = %q; want %q", got, want) + } + }) +} + +func TestServer_Request_Reject_CapitalHeader(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("UPPER", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameColon(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has:colon", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameNULL(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("has\x00null", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldNameEmpty(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("", "v") }) +} + +func TestServer_Request_Reject_HeaderFieldValueNewline(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\nnewline") }) +} + +func TestServer_Request_Reject_HeaderFieldValueCR(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\rcarriage") }) +} + +func TestServer_Request_Reject_HeaderFieldValueDEL(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1("foo", "has\x7fdel") }) +} + +func TestServer_Request_Reject_Pseudo_Missing_method(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":method", "") }) +} + +func TestServer_Request_Reject_Pseudo_ExactlyOne(t *testing.T) { + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid value" ... + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter("duplicate pseudo-header") + st.bodylessReq1(":method", "GET", ":method", "POST") + }) +} + +func TestServer_Request_Reject_Pseudo_AfterRegular(t *testing.T) { + // 8.1.2.3 Request Pseudo-Header Fields + // "All pseudo-header fields MUST appear in the header block + // before regular header fields. Any request or response that + // contains a pseudo-header field that appears in a header + // block after a regular header field MUST be treated as + // malformed (Section 8.1.2.6)." + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter("pseudo-header after regular header") + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":method", Value: "GET"}) + enc.WriteField(hpack.HeaderField{Name: "regular", Value: "foobar"}) + enc.WriteField(hpack.HeaderField{Name: ":path", Value: "/"}) + enc.WriteField(hpack.HeaderField{Name: ":scheme", Value: "https"}) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: buf.Bytes(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Request_Reject_Pseudo_Missing_path(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":path", "") }) +} + +func TestServer_Request_Reject_Pseudo_Missing_scheme(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "") }) +} + +func TestServer_Request_Reject_Pseudo_scheme_invalid(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { st.bodylessReq1(":scheme", "bogus") }) +} + +func TestServer_Request_Reject_Pseudo_Unknown(t *testing.T) { + testRejectRequest(t, func(st *serverTester) { + st.addLogFilter(`invalid pseudo-header ":unknown_thing"`) + st.bodylessReq1(":unknown_thing", "") + }) +} + +func testRejectRequest(t *testing.T, send func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("server request made it to handler; should've been rejected") + }) + defer st.Close() + + st.greet() + send(st) + st.wantRSTStream(1, ErrCodeProtocol) +} + +func testRejectRequestWithProtocolError(t *testing.T, send func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("server request made it to handler; should've been rejected") + }, optQuiet) + defer st.Close() + + st.greet() + send(st) + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeProtocol { + t.Errorf("err code = %v; want %v", gf.ErrCode, ErrCodeProtocol) + } +} + +// Section 5.1, on idle connections: "Receiving any frame other than +// HEADERS or PRIORITY on a stream in this state MUST be treated as a +// connection error (Section 5.4.1) of type PROTOCOL_ERROR." +func TestRejectFrameOnIdle_WindowUpdate(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteWindowUpdate(123, 456) + }) +} +func TestRejectFrameOnIdle_Data(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteData(123, true, nil) + }) +} +func TestRejectFrameOnIdle_RSTStream(t *testing.T) { + testRejectRequestWithProtocolError(t, func(st *serverTester) { + st.fr.WriteRSTStream(123, ErrCodeCancel) + }) +} + +func TestServer_Request_Connect(t *testing.T) { + testServerRequest(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ), + EndStream: true, + EndHeaders: true, + }) + }, func(r *http.Request) { + if g, w := r.Method, "CONNECT"; g != w { + t.Errorf("Method = %q; want %q", g, w) + } + if g, w := r.RequestURI, "example.com:123"; g != w { + t.Errorf("RequestURI = %q; want %q", g, w) + } + if g, w := r.URL.Host, "example.com:123"; g != w { + t.Errorf("URL.Host = %q; want %q", g, w) + } + }) +} + +func TestServer_Request_Connect_InvalidPath(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ":path", "/bogus", + ), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Request_Connect_InvalidScheme(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeaderRaw( + ":method", "CONNECT", + ":authority", "example.com:123", + ":scheme", "https", + ), + EndStream: true, + EndHeaders: true, + }) + }) +} + +func TestServer_Ping(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + + // Server should ignore this one, since it has ACK set. + ackPingData := [8]byte{1, 2, 4, 8, 16, 32, 64, 128} + if err := st.fr.WritePing(true, ackPingData); err != nil { + t.Fatal(err) + } + + // But the server should reply to this one, since ACK is false. + pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} + if err := st.fr.WritePing(false, pingData); err != nil { + t.Fatal(err) + } + + pf := st.wantPing() + if !pf.Flags.Has(FlagPingAck) { + t.Error("response ping doesn't have ACK set") + } + if pf.Data != pingData { + t.Errorf("response ping has data %q; want %q", pf.Data, pingData) + } +} + +func TestServer_RejectsLargeFrames(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("see golang.org/issue/13434") + } + + st := newServerTester(t, nil) + defer st.Close() + st.greet() + + // Write too large of a frame (too large by one byte) + // We ignore the return value because it's expected that the server + // will only read the first 9 bytes (the headre) and then disconnect. + st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1)) + + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeFrameSize { + t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize) + } + if st.serverLogBuf.Len() != 0 { + // Previously we spun here for a bit until the GOAWAY disconnect + // timer fired, logging while we fired. + t.Errorf("unexpected server output: %.500s\n", st.serverLogBuf.Bytes()) + } +} + +func TestServer_Handler_Sends_WindowUpdate(t *testing.T) { + puppet := newHandlerPuppet() + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + puppet.act(w, r) + }) + defer st.Close() + defer puppet.done() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // data coming + EndHeaders: true, + }) + st.writeData(1, false, []byte("abcdef")) + puppet.do(readBodyHandler(t, "abc")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + puppet.do(readBodyHandler(t, "def")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + st.writeData(1, true, []byte("ghijkl")) // END_STREAM here + puppet.do(readBodyHandler(t, "ghi")) + puppet.do(readBodyHandler(t, "jkl")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(0, 3) // no more stream-level, since END_STREAM +} + +// the version of the TestServer_Handler_Sends_WindowUpdate with padding. +// See golang.org/issue/16556 +func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) { + puppet := newHandlerPuppet() + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + puppet.act(w, r) + }) + defer st.Close() + defer puppet.done() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + st.writeDataPadded(1, false, []byte("abcdef"), []byte{0, 0, 0, 0}) + + // Expect to immediately get our 5 bytes of padding back for + // both the connection and stream (4 bytes of padding + 1 byte of length) + st.wantWindowUpdate(0, 5) + st.wantWindowUpdate(1, 5) + + puppet.do(readBodyHandler(t, "abc")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) + + puppet.do(readBodyHandler(t, "def")) + st.wantWindowUpdate(0, 3) + st.wantWindowUpdate(1, 3) +} + +func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) { + st := newServerTester(t, nil) + defer st.Close() + st.greet() + if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil { + t.Fatal(err) + } + gf := st.wantGoAway() + if gf.ErrCode != ErrCodeFlowControl { + t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl) + } + if gf.LastStreamID != 0 { + t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0) + } +} + +func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) { + inHandler := make(chan bool) + blockHandler := make(chan bool) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + <-blockHandler + }) + defer st.Close() + defer close(blockHandler) + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + // Send a bogus window update: + if err := st.fr.WriteWindowUpdate(1, 1<<31-1); err != nil { + t.Fatal(err) + } + st.wantRSTStream(1, ErrCodeFlowControl) +} + +// testServerPostUnblock sends a hanging POST with unsent data to handler, +// then runs fn once in the handler, and verifies that the error returned from +// handler is acceptable. It fails if takes over 5 seconds for handler to exit. +func testServerPostUnblock(t *testing.T, + handler func(http.ResponseWriter, *http.Request) error, + fn func(*serverTester), + checkErr func(error), + otherHeaders ...string) { + inHandler := make(chan bool) + errc := make(chan error, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + errc <- handler(w, r) + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(append([]string{":method", "POST"}, otherHeaders...)...), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + fn(st) + select { + case err := <-errc: + if checkErr != nil { + checkErr(err) + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for Handler to return") + } +} + +func TestServer_RSTStream_Unblocks_Read(t *testing.T) { + testServerPostUnblock(t, + func(w http.ResponseWriter, r *http.Request) (err error) { + _, err = r.Body.Read(make([]byte, 1)) + return + }, + func(st *serverTester) { + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }, + func(err error) { + want := StreamError{StreamID: 0x1, Code: 0x8} + if !reflect.DeepEqual(err, want) { + t.Errorf("Read error = %v; want %v", err, want) + } + }, + ) +} + +func TestServer_RSTStream_Unblocks_Header_Write(t *testing.T) { + // Run this test a bunch, because it doesn't always + // deadlock. But with a bunch, it did. + n := 50 + if testing.Short() { + n = 5 + } + for i := 0; i < n; i++ { + testServer_RSTStream_Unblocks_Header_Write(t) + } +} + +func testServer_RSTStream_Unblocks_Header_Write(t *testing.T) { + inHandler := make(chan bool, 1) + unblockHandler := make(chan bool, 1) + headerWritten := make(chan bool, 1) + wroteRST := make(chan bool, 1) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + <-wroteRST + w.Header().Set("foo", "bar") + w.WriteHeader(200) + w.(http.Flusher).Flush() + headerWritten <- true + <-unblockHandler + }) + defer st.Close() + + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + wroteRST <- true + st.awaitIdle() + select { + case <-headerWritten: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for header write") + } + unblockHandler <- true +} + +func TestServer_DeadConn_Unblocks_Read(t *testing.T) { + testServerPostUnblock(t, + func(w http.ResponseWriter, r *http.Request) (err error) { + _, err = r.Body.Read(make([]byte, 1)) + return + }, + func(st *serverTester) { st.cc.Close() }, + func(err error) { + if err == nil { + t.Error("unexpected nil error from Request.Body.Read") + } + }, + ) +} + +var blockUntilClosed = func(w http.ResponseWriter, r *http.Request) error { + <-w.(http.CloseNotifier).CloseNotify() + return nil +} + +func TestServer_CloseNotify_After_RSTStream(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }, nil) +} + +func TestServer_CloseNotify_After_ConnClose(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { st.cc.Close() }, nil) +} + +// that CloseNotify unblocks after a stream error due to the client's +// problem that's unrelated to them explicitly canceling it (which is +// TestServer_CloseNotify_After_RSTStream above) +func TestServer_CloseNotify_After_StreamError(t *testing.T) { + testServerPostUnblock(t, blockUntilClosed, func(st *serverTester) { + // data longer than declared Content-Length => stream error + st.writeData(1, true, []byte("1234")) + }, nil, "content-length", "3") +} + +func TestServer_StateTransitions(t *testing.T) { + var st *serverTester + inHandler := make(chan bool) + writeData := make(chan bool) + leaveHandler := make(chan bool) + st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + if st.stream(1) == nil { + t.Errorf("nil stream 1 in handler") + } + if got, want := st.streamState(1), stateOpen; got != want { + t.Errorf("in handler, state is %v; want %v", got, want) + } + writeData <- true + if n, err := r.Body.Read(make([]byte, 1)); n != 0 || err != io.EOF { + t.Errorf("body read = %d, %v; want 0, EOF", n, err) + } + if got, want := st.streamState(1), stateHalfClosedRemote; got != want { + t.Errorf("in handler, state is %v; want %v", got, want) + } + + <-leaveHandler + }) + st.greet() + if st.stream(1) != nil { + t.Fatal("stream 1 should be empty") + } + if got := st.streamState(1); got != stateIdle { + t.Fatalf("stream 1 should be idle; got %v", got) + } + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, // keep it open + EndHeaders: true, + }) + <-inHandler + <-writeData + st.writeData(1, true, nil) + + leaveHandler <- true + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("expected END_STREAM flag") + } + + if got, want := st.streamState(1), stateClosed; got != want { + t.Errorf("at end, state is %v; want %v", got, want) + } + if st.stream(1) != nil { + t.Fatal("at end, stream 1 should be gone") + } +} + +// test HEADERS w/o EndHeaders + another HEADERS (should get rejected) +func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + st.writeHeaders(HeadersFrameParam{ // Not a continuation. + StreamID: 3, // different stream. + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +// test HEADERS w/o EndHeaders + PING (should get rejected) +func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + if err := st.fr.WritePing(false, [8]byte{}); err != nil { + t.Fatal(err) + } + }) +} + +// test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected) +func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + }) +} + +// test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID +func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, + }) + if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + }) +} + +// No HEADERS on stream 0. +func TestServer_Rejects_Headers0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writeHeaders(HeadersFrameParam{ + StreamID: 0, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + }) +} + +// No CONTINUATION on stream 0. +func TestServer_Rejects_Continuation0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil { + t.Fatal(err) + } + }) +} + +// No PRIORITY on stream 0. +func TestServer_Rejects_Priority0(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writePriority(0, PriorityParam{StreamDep: 1}) + }) +} + +// No HEADERS frame with a self-dependence. +func TestServer_Rejects_HeadersSelfDependence(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + Priority: PriorityParam{StreamDep: 1}, + }) + }) +} + +// No PRIORTY frame with a self-dependence. +func TestServer_Rejects_PrioritySelfDependence(t *testing.T) { + testServerRejectsStream(t, ErrCodeProtocol, func(st *serverTester) { + st.fr.AllowIllegalWrites = true + st.writePriority(1, PriorityParam{StreamDep: 1}) + }) +} + +func TestServer_Rejects_PushPromise(t *testing.T) { + testServerRejectsConn(t, func(st *serverTester) { + pp := PushPromiseParam{ + StreamID: 1, + PromiseID: 3, + } + if err := st.fr.WritePushPromise(pp); err != nil { + t.Fatal(err) + } + }) +} + +// testServerRejectsConn tests that the server hangs up with a GOAWAY +// frame and a server close after the client does something +// deserving a CONNECTION_ERROR. +func testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + st.addLogFilter("connection error: PROTOCOL_ERROR") + defer st.Close() + st.greet() + writeReq(st) + + st.wantGoAway() + errc := make(chan error, 1) + go func() { + fr, err := st.fr.ReadFrame() + if err == nil { + err = fmt.Errorf("got frame of type %T", fr) + } + errc <- err + }() + select { + case err := <-errc: + if err != io.EOF { + t.Errorf("ReadFrame = %v; want io.EOF", err) + } + case <-time.After(2 * time.Second): + t.Error("timeout waiting for disconnect") + } +} + +// testServerRejectsStream tests that the server sends a RST_STREAM with the provided +// error code after a client sends a bogus request. +func testServerRejectsStream(t *testing.T, code ErrCode, writeReq func(*serverTester)) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + defer st.Close() + st.greet() + writeReq(st) + st.wantRSTStream(1, code) +} + +// testServerRequest sets up an idle HTTP/2 connection and lets you +// write a single request with writeReq, and then verify that the +// *http.Request is built correctly in checkReq. +func testServerRequest(t *testing.T, writeReq func(*serverTester), checkReq func(*http.Request)) { + gotReq := make(chan bool, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + t.Fatal("nil Body") + } + checkReq(r) + gotReq <- true + }) + defer st.Close() + + st.greet() + writeReq(st) + + select { + case <-gotReq: + case <-time.After(2 * time.Second): + t.Error("timeout waiting for request") + } +} + +func getSlash(st *serverTester) { st.bodylessReq1() } + +func TestServer_Response_NoData(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + // Nothing. + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("want END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + }) +} + +func TestServer_Response_NoData_Header_FooBar(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Foo-Bar", "some-value") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Fatal("want END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo-bar", "some-value"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Content-Type", "foo/bar") + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("don't want END_STREAM, expecting data") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "foo/bar"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + df := st.wantData() + if !df.StreamEnded() { + t.Error("expected DATA to have END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + }) +} + +func TestServer_Response_TransferEncoding_chunked(t *testing.T) { + const msg = "hi" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Transfer-Encoding", "chunked") // should be stripped + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +// Header accessed only after the initial write. +func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + w.Header().Set("foo", "should be ignored") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +// Header accessed before the initial write and later mutated. +func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("foo", "proper value") + io.WriteString(w, msg) + w.Header().Set("foo", "should be ignored") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo", "proper value"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + }) +} + +func TestServer_Response_Data_SniffLenType(t *testing.T) { + const msg = "this is HTML." + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("don't want END_STREAM, expecting data") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", strconv.Itoa(len(msg))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + df := st.wantData() + if !df.StreamEnded() { + t.Error("expected DATA to have END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + }) +} + +func TestServer_Response_Header_Flush_MidWrite(t *testing.T) { + const msg = "this is HTML" + const msg2 = ", and this is the next chunk" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + io.WriteString(w, msg2) + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/html; charset=utf-8"}, // sniffed + // and no content-length + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + { + df := st.wantData() + if df.StreamEnded() { + t.Error("unexpected END_STREAM flag") + } + if got := string(df.Data()); got != msg { + t.Errorf("got DATA %q; want %q", got, msg) + } + } + { + df := st.wantData() + if !df.StreamEnded() { + t.Error("wanted END_STREAM flag on last data chunk") + } + if got := string(df.Data()); got != msg2 { + t.Errorf("got DATA %q; want %q", got, msg2) + } + } + }) +} + +func TestServer_Response_LargeWrite(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + n, err := w.Write(bytes.Repeat([]byte("a"), size)) + if err != nil { + return fmt.Errorf("Write error: %v", err) + } + if n != size { + return fmt.Errorf("wrong size %d from Write", n) + } + return nil + }, func(st *serverTester) { + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, 0}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + // Give the handler quota to write: + if err := st.fr.WriteWindowUpdate(1, size); err != nil { + t.Fatal(err) + } + // Give the handler quota to write to connection-level + // window as well + if err := st.fr.WriteWindowUpdate(0, size); err != nil { + t.Fatal(err) + } + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, // sniffed + // and no content-length + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + var bytes, frames int + for { + df := st.wantData() + bytes += len(df.Data()) + frames++ + for _, b := range df.Data() { + if b != 'a' { + t.Fatal("non-'a' byte seen in DATA") + } + } + if df.StreamEnded() { + break + } + } + if bytes != size { + t.Errorf("Got %d bytes; want %d", bytes, size) + } + if want := int(size / maxFrameSize); frames < want || frames > want*2 { + t.Errorf("Got %d frames; want %d", frames, size) + } + }) +} + +// Test that the handler can't write more than the client allows +func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { + // Make these reads. Before each read, the client adds exactly enough + // flow-control to satisfy the read. Numbers chosen arbitrarily. + reads := []int{123, 1, 13, 127} + size := 0 + for _, n := range reads { + size += n + } + + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + n, err := w.Write(bytes.Repeat([]byte("a"), size)) + if err != nil { + return fmt.Errorf("Write error: %v", err) + } + if n != size { + return fmt.Errorf("wrong size %d from Write", n) + } + return nil + }, func(st *serverTester) { + // Set the window size to something explicit for this test. + // It's also how much initial data we expect. + if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, uint32(reads[0])}); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + df := st.wantData() + if got := len(df.Data()); got != reads[0] { + t.Fatalf("Initial window size = %d but got DATA with %d bytes", reads[0], got) + } + + for _, quota := range reads[1:] { + if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil { + t.Fatal(err) + } + df := st.wantData() + if int(quota) != len(df.Data()) { + t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota) + } + } + }) +} + +// Test that the handler blocked in a Write is unblocked if the server sends a RST_STREAM. +func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) { + const size = 1 << 20 + const maxFrameSize = 16 << 10 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + errc := make(chan error, 1) + go func() { + _, err := w.Write(bytes.Repeat([]byte("a"), size)) + errc <- err + }() + select { + case err := <-errc: + if err == nil { + return errors.New("unexpected nil error from Write in handler") + } + return nil + case <-time.After(2 * time.Second): + return errors.New("timeout waiting for Write in handler") + } + }, func(st *serverTester) { + if err := st.fr.WriteSettings( + Setting{SettingInitialWindowSize, 0}, + Setting{SettingMaxFrameSize, maxFrameSize}, + ); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { + t.Fatal(err) + } + }) +} + +func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.(http.Flusher).Flush() + // Nothing; send empty DATA + return nil + }, func(st *serverTester) { + // Handler gets no data quota: + if err := st.fr.WriteSettings(Setting{SettingInitialWindowSize, 0}); err != nil { + t.Fatal(err) + } + st.wantSettingsAck() + + getSlash(st) // make the single request + + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + + df := st.wantData() + if got := len(df.Data()); got != 0 { + t.Fatalf("unexpected %d DATA bytes; want 0", got) + } + if !df.StreamEnded() { + t.Fatal("DATA didn't have END_STREAM") + } + }) +} + +func TestServer_Response_Automatic100Continue(t *testing.T) { + const msg = "foo" + const reply = "bar" + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + if v := r.Header.Get("Expect"); v != "" { + t.Errorf("Expect header = %q; want empty", v) + } + buf := make([]byte, len(msg)) + // This read should trigger the 100-continue being sent. + if n, err := io.ReadFull(r.Body, buf); err != nil || n != len(msg) || string(buf) != msg { + return fmt.Errorf("ReadFull = %q, %v; want %q, nil", buf[:n], err, msg) + } + _, err := io.WriteString(w, reply) + return err + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "POST", "expect", "100-continue"), + EndStream: false, + EndHeaders: true, + }) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "100"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Fatalf("Got headers %v; want %v", goth, wanth) + } + + // Okay, they sent status 100, so we can send our + // gigantic and/or sensitive "foo" payload now. + st.writeData(1, true, []byte(msg)) + + st.wantWindowUpdate(0, uint32(len(msg))) + + hf = st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("expected data to follow") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + goth = st.decodeHeader(hf.HeaderBlockFragment()) + wanth = [][2]string{ + {":status", "200"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", strconv.Itoa(len(reply))}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } + + df := st.wantData() + if string(df.Data()) != reply { + t.Errorf("Client read %q; want %q", df.Data(), reply) + } + if !df.StreamEnded() { + t.Errorf("expect data stream end") + } + }) +} + +func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) { + errc := make(chan error, 1) + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + p := []byte("some data.\n") + for { + _, err := w.Write(p) + if err != nil { + errc <- err + return nil + } + } + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: false, + EndHeaders: true, + }) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("unexpected END_STREAM flag") + } + if !hf.HeadersEnded() { + t.Fatal("want END_HEADERS flag") + } + // Close the connection and wait for the handler to (hopefully) notice. + st.cc.Close() + select { + case <-errc: + case <-time.After(5 * time.Second): + t.Error("timeout") + } + }) +} + +func TestServer_Rejects_Too_Many_Streams(t *testing.T) { + const testPath = "/some/path" + + inHandler := make(chan uint32) + leaveHandler := make(chan bool) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + id := w.(*responseWriter).rws.stream.id + inHandler <- id + if id == 1+(defaultMaxStreams+1)*2 && r.URL.Path != testPath { + t.Errorf("decoded final path as %q; want %q", r.URL.Path, testPath) + } + <-leaveHandler + }) + defer st.Close() + st.greet() + nextStreamID := uint32(1) + streamID := func() uint32 { + defer func() { nextStreamID += 2 }() + return nextStreamID + } + sendReq := func(id uint32, headers ...string) { + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(headers...), + EndStream: true, + EndHeaders: true, + }) + } + for i := 0; i < defaultMaxStreams; i++ { + sendReq(streamID()) + <-inHandler + } + defer func() { + for i := 0; i < defaultMaxStreams; i++ { + leaveHandler <- true + } + }() + + // And this one should cross the limit: + // (It's also sent as a CONTINUATION, to verify we still track the decoder context, + // even if we're rejecting it) + rejectID := streamID() + headerBlock := st.encodeHeader(":path", testPath) + frag1, frag2 := headerBlock[:3], headerBlock[3:] + st.writeHeaders(HeadersFrameParam{ + StreamID: rejectID, + BlockFragment: frag1, + EndStream: true, + EndHeaders: false, // CONTINUATION coming + }) + if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil { + t.Fatal(err) + } + st.wantRSTStream(rejectID, ErrCodeProtocol) + + // But let a handler finish: + leaveHandler <- true + st.wantHeaders() + + // And now another stream should be able to start: + goodID := streamID() + sendReq(goodID, ":path", testPath) + select { + case got := <-inHandler: + if got != goodID { + t.Errorf("Got stream %d; want %d", got, goodID) + } + case <-time.After(3 * time.Second): + t.Error("timeout waiting for handler") + } +} + +// So many response headers that the server needs to use CONTINUATION frames: +func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + h := w.Header() + for i := 0; i < 5000; i++ { + h.Set(fmt.Sprintf("x-header-%d", i), fmt.Sprintf("x-value-%d", i)) + } + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.HeadersEnded() { + t.Fatal("got unwanted END_HEADERS flag") + } + n := 0 + for { + n++ + cf := st.wantContinuation() + if cf.HeadersEnded() { + break + } + } + if n < 5 { + t.Errorf("Only got %d CONTINUATION frames; expected 5+ (currently 6)", n) + } + }) +} + +// This previously crashed (reported by Mathieu Lonjaret as observed +// while using Camlistore) because we got a DATA frame from the client +// after the handler exited and our logic at the time was wrong, +// keeping a stream in the map in stateClosed, which tickled an +// invariant check later when we tried to remove that stream (via +// defer sc.closeAllStreamsOnConnClose) when the serverConn serve loop +// ended. +func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + // nothing + return nil + }, func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: false, // DATA is coming + EndHeaders: true, + }) + hf := st.wantHeaders() + if !hf.HeadersEnded() || !hf.StreamEnded() { + t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf) + } + + // Sent when the a Handler closes while a client has + // indicated it's still sending DATA: + st.wantRSTStream(1, ErrCodeNo) + + // Now the handler has ended, so it's ended its + // stream, but the client hasn't closed its side + // (stateClosedLocal). So send more data and verify + // it doesn't crash with an internal invariant panic, like + // it did before. + st.writeData(1, true, []byte("foo")) + + // Get our flow control bytes back, since the handler didn't get them. + st.wantWindowUpdate(0, uint32(len("foo"))) + + // Sent after a peer sends data anyway (admittedly the + // previous RST_STREAM might've still been in-flight), + // but they'll get the more friendly 'cancel' code + // first. + st.wantRSTStream(1, ErrCodeStreamClosed) + + // Set up a bunch of machinery to record the panic we saw + // previously. + var ( + panMu sync.Mutex + panicVal interface{} + ) + + testHookOnPanicMu.Lock() + testHookOnPanic = func(sc *serverConn, pv interface{}) bool { + panMu.Lock() + panicVal = pv + panMu.Unlock() + return true + } + testHookOnPanicMu.Unlock() + + // Now force the serve loop to end, via closing the connection. + st.cc.Close() + select { + case <-st.sc.doneServing: + // Loop has exited. + panMu.Lock() + got := panicVal + panMu.Unlock() + if got != nil { + t.Errorf("Got panic: %v", got) + } + case <-time.After(5 * time.Second): + t.Error("timeout") + } + }) +} + +func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) } +func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) } + +func testRejectTLS(t *testing.T, max uint16) { + st := newServerTester(t, nil, func(c *tls.Config) { + c.MaxVersion = max + }) + defer st.Close() + gf := st.wantGoAway() + if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { + t.Errorf("Got error code %v; want %v", got, want) + } +} + +func TestServer_Rejects_TLSBadCipher(t *testing.T) { + st := newServerTester(t, nil, func(c *tls.Config) { + // Only list bad ones: + c.CipherSuites = []uint16{ + tls.TLS_RSA_WITH_RC4_128_SHA, + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + } + }) + defer st.Close() + gf := st.wantGoAway() + if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { + t.Errorf("Got error code %v; want %v", got, want) + } +} + +func TestServer_Advertises_Common_Cipher(t *testing.T) { + const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + st := newServerTester(t, nil, func(c *tls.Config) { + // Have the client only support the one required by the spec. + c.CipherSuites = []uint16{requiredSuite} + }, func(ts *httptest.Server) { + var srv *http.Server = ts.Config + // Have the server configured with no specific cipher suites. + // This tests that Go's defaults include the required one. + srv.TLSConfig = nil + }) + defer st.Close() + st.greet() +} + +func (st *serverTester) onHeaderField(f hpack.HeaderField) { + if f.Name == "date" { + return + } + st.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value}) +} + +func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) { + st.decodedHeaders = nil + if _, err := st.hpackDec.Write(headerBlock); err != nil { + st.t.Fatalf("hpack decoding error: %v", err) + } + if err := st.hpackDec.Close(); err != nil { + st.t.Fatalf("hpack decoding error: %v", err) + } + return st.decodedHeaders +} + +// testServerResponse sets up an idle HTTP/2 connection. The client function should +// write a single request that must be handled by the handler. This waits up to 5s +// for client to return, then up to an additional 2s for the handler to return. +func testServerResponse(t testing.TB, + handler func(http.ResponseWriter, *http.Request) error, + client func(*serverTester), +) { + errc := make(chan error, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if r.Body == nil { + t.Fatal("nil Body") + } + errc <- handler(w, r) + }) + defer st.Close() + + donec := make(chan bool) + go func() { + defer close(donec) + st.greet() + client(st) + }() + + select { + case <-donec: + case <-time.After(5 * time.Second): + t.Fatal("timeout in client") + } + + select { + case err := <-errc: + if err != nil { + t.Fatalf("Error in handler: %v", err) + } + case <-time.After(2 * time.Second): + t.Fatal("timeout in handler") + } +} + +// readBodyHandler returns an http Handler func that reads len(want) +// bytes from r.Body and fails t if the contents read were not +// the value of want. +func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + buf := make([]byte, len(want)) + _, err := io.ReadFull(r.Body, buf) + if err != nil { + t.Error(err) + return + } + if string(buf) != want { + t.Errorf("read %q; want %q", buf, want) + } + } +} + +// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See: +// https://github.com/tatsuhiro-t/nghttp2/issues/140 & +// http://sourceforge.net/p/curl/bugs/1472/ +func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) } +func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) } + +func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) { + if runtime.GOOS != "linux" { + t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") + } + if testing.Short() { + t.Skip("skipping curl test in short mode") + } + requireCurl(t) + var gotConn int32 + testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) } + + const msg = "Hello from curl!\n" + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Foo", "Bar") + w.Header().Set("Client-Proto", r.Proto) + io.WriteString(w, msg) + })) + ConfigureServer(ts.Config, &Server{ + PermitProhibitedCipherSuites: permitProhibitedCipherSuites, + }) + ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config + ts.StartTLS() + defer ts.Close() + + t.Logf("Running test server for curl to hit at: %s", ts.URL) + container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL) + defer kill(container) + resc := make(chan interface{}, 1) + go func() { + res, err := dockerLogs(container) + if err != nil { + resc <- err + } else { + resc <- res + } + }() + select { + case res := <-resc: + if err, ok := res.(error); ok { + t.Fatal(err) + } + body := string(res.([]byte)) + // Search for both "key: value" and "key:value", since curl changed their format + // Our Dockerfile contains the latest version (no space), but just in case people + // didn't rebuild, check both. + if !strings.Contains(body, "foo: Bar") && !strings.Contains(body, "foo:Bar") { + t.Errorf("didn't see foo: Bar header") + t.Logf("Got: %s", body) + } + if !strings.Contains(body, "client-proto: HTTP/2") && !strings.Contains(body, "client-proto:HTTP/2") { + t.Errorf("didn't see client-proto: HTTP/2 header") + t.Logf("Got: %s", res) + } + if !strings.Contains(string(res.([]byte)), msg) { + t.Errorf("didn't see %q content", msg) + t.Logf("Got: %s", res) + } + case <-time.After(3 * time.Second): + t.Errorf("timeout waiting for curl") + } + + if atomic.LoadInt32(&gotConn) == 0 { + t.Error("never saw an http2 connection") + } +} + +var doh2load = flag.Bool("h2load", false, "Run h2load test") + +func TestServerWithH2Load(t *testing.T) { + if !*doh2load { + t.Skip("Skipping without --h2load flag.") + } + if runtime.GOOS != "linux" { + t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway") + } + requireH2load(t) + + msg := strings.Repeat("Hello, h2load!\n", 5000) + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + io.WriteString(w, msg) + })) + ts.StartTLS() + defer ts.Close() + + cmd := exec.Command("docker", "run", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl", + "-n100000", "-c100", "-m100", ts.URL) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + t.Fatal(err) + } +} + +// Issue 12843 +func TestServerDoS_MaxHeaderListSize(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) + defer st.Close() + + // shake hands + frameSize := defaultMaxReadFrameSize + var advHeaderListSize *uint32 + st.greetAndCheckSettings(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + if s.Val < minMaxFrameSize { + frameSize = minMaxFrameSize + } else if s.Val > maxFrameSize { + frameSize = maxFrameSize + } else { + frameSize = int(s.Val) + } + case SettingMaxHeaderListSize: + advHeaderListSize = &s.Val + } + return nil + }) + + if advHeaderListSize == nil { + t.Errorf("server didn't advertise a max header list size") + } else if *advHeaderListSize == 0 { + t.Errorf("server advertised a max header list size of 0") + } + + st.encodeHeaderField(":method", "GET") + st.encodeHeaderField(":path", "/") + st.encodeHeaderField(":scheme", "https") + cookie := strings.Repeat("*", 4058) + st.encodeHeaderField("cookie", cookie) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.headerBuf.Bytes(), + EndStream: true, + EndHeaders: false, + }) + + // Capture the short encoding of a duplicate ~4K cookie, now + // that we've already sent it once. + st.headerBuf.Reset() + st.encodeHeaderField("cookie", cookie) + + // Now send 1MB of it. + const size = 1 << 20 + b := bytes.Repeat(st.headerBuf.Bytes(), size/st.headerBuf.Len()) + for len(b) > 0 { + chunk := b + if len(chunk) > frameSize { + chunk = chunk[:frameSize] + } + b = b[len(chunk):] + st.fr.WriteContinuation(1, len(b) == 0, chunk) + } + + h := st.wantHeaders() + if !h.HeadersEnded() { + t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) + } + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "431"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", "63"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +func TestCompressionErrorOnWrite(t *testing.T) { + const maxStrLen = 8 << 10 + var serverConfig *http.Server + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }, func(ts *httptest.Server) { + serverConfig = ts.Config + serverConfig.MaxHeaderBytes = maxStrLen + }) + st.addLogFilter("connection error: COMPRESSION_ERROR") + defer st.Close() + st.greet() + + maxAllowed := st.sc.framer.maxHeaderStringLen() + + // Crank this up, now that we have a conn connected with the + // hpack.Decoder's max string length set has been initialized + // from the earlier low ~8K value. We want this higher so don't + // hit the max header list size. We only want to test hitting + // the max string size. + serverConfig.MaxHeaderBytes = 1 << 20 + + // First a request with a header that's exactly the max allowed size + // for the hpack compression. It's still too long for the header list + // size, so we'll get the 431 error, but that keeps the compression + // context still valid. + hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed)) + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + if !h.HeadersEnded() { + t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) + } + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "431"}, + {"content-type", "text/html; charset=utf-8"}, + {"content-length", "63"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } + df := st.wantData() + if !strings.Contains(string(df.Data()), "HTTP Error 431") { + t.Errorf("Unexpected data body: %q", df.Data()) + } + if !df.StreamEnded() { + t.Fatalf("expect data stream end") + } + + // And now send one that's just one byte too big. + hbf = st.encodeHeader("bar", strings.Repeat("b", maxAllowed+1)) + st.writeHeaders(HeadersFrameParam{ + StreamID: 3, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeCompression { + t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) + } +} + +func TestCompressionErrorOnClose(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }) + st.addLogFilter("connection error: COMPRESSION_ERROR") + defer st.Close() + st.greet() + + hbf := st.encodeHeader("foo", "bar") + hbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails. + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeCompression { + t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) + } +} + +// test that a server handler can read trailers from a client +func TestServerReadsTrailers(t *testing.T) { + const testBody = "some test body" + writeReq := func(st *serverTester) { + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader("trailer", "Foo, Bar", "trailer", "Baz"), + EndStream: false, + EndHeaders: true, + }) + st.writeData(1, false, []byte(testBody)) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeaderRaw( + "foo", "foov", + "bar", "barv", + "baz", "bazv", + "surprise", "wasn't declared; shouldn't show up", + ), + EndStream: true, + EndHeaders: true, + }) + } + checkReq := func(r *http.Request) { + wantTrailer := http.Header{ + "Foo": nil, + "Bar": nil, + "Baz": nil, + } + if !reflect.DeepEqual(r.Trailer, wantTrailer) { + t.Errorf("initial Trailer = %v; want %v", r.Trailer, wantTrailer) + } + slurp, err := ioutil.ReadAll(r.Body) + if string(slurp) != testBody { + t.Errorf("read body %q; want %q", slurp, testBody) + } + if err != nil { + t.Fatalf("Body slurp: %v", err) + } + wantTrailerAfter := http.Header{ + "Foo": {"foov"}, + "Bar": {"barv"}, + "Baz": {"bazv"}, + } + if !reflect.DeepEqual(r.Trailer, wantTrailerAfter) { + t.Errorf("final Trailer = %v; want %v", r.Trailer, wantTrailerAfter) + } + } + testServerRequest(t, writeReq, checkReq) +} + +// test that a server handler can send trailers +func TestServerWritesTrailers_WithFlush(t *testing.T) { testServerWritesTrailers(t, true) } +func TestServerWritesTrailers_WithoutFlush(t *testing.T) { testServerWritesTrailers(t, false) } + +func testServerWritesTrailers(t *testing.T, withFlush bool) { + // See https://httpwg.github.io/specs/rfc7540.html#rfc.section.8.1.3 + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B") + w.Header().Add("Trailer", "Server-Trailer-C") + w.Header().Add("Trailer", "Transfer-Encoding, Content-Length, Trailer") // filtered + + // Regular headers: + w.Header().Set("Foo", "Bar") + w.Header().Set("Content-Length", "5") // len("Hello") + + io.WriteString(w, "Hello") + if withFlush { + w.(http.Flusher).Flush() + } + w.Header().Set("Server-Trailer-A", "valuea") + w.Header().Set("Server-Trailer-C", "valuec") // skipping B + // After a flush, random keys like Server-Surprise shouldn't show up: + w.Header().Set("Server-Surpise", "surprise! this isn't predeclared!") + // But we do permit promoting keys to trailers after a + // flush if they start with the magic + // otherwise-invalid "Trailer:" prefix: + w.Header().Set("Trailer:Post-Header-Trailer", "hi1") + w.Header().Set("Trailer:post-header-trailer2", "hi2") + w.Header().Set("Trailer:Range", "invalid") + w.Header().Set("Trailer:Foo\x01Bogus", "invalid") + w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 2616 14.40") + w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 2616 14.40") + w.Header().Set("Trailer", "should not be included; Forbidden by RFC 2616 14.40") + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if hf.StreamEnded() { + t.Fatal("response HEADERS had END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("response HEADERS didn't have END_HEADERS") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"foo", "Bar"}, + {"trailer", "Server-Trailer-A, Server-Trailer-B"}, + {"trailer", "Server-Trailer-C"}, + {"trailer", "Transfer-Encoding, Content-Length, Trailer"}, + {"content-type", "text/plain; charset=utf-8"}, + {"content-length", "5"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + df := st.wantData() + if string(df.Data()) != "Hello" { + t.Fatalf("Client read %q; want Hello", df.Data()) + } + if df.StreamEnded() { + t.Fatalf("data frame had STREAM_ENDED") + } + tf := st.wantHeaders() // for the trailers + if !tf.StreamEnded() { + t.Fatalf("trailers HEADERS lacked END_STREAM") + } + if !tf.HeadersEnded() { + t.Fatalf("trailers HEADERS lacked END_HEADERS") + } + wanth = [][2]string{ + {"post-header-trailer", "hi1"}, + {"post-header-trailer2", "hi2"}, + {"server-trailer-a", "valuea"}, + {"server-trailer-c", "valuec"}, + } + goth = st.decodeHeader(tf.HeaderBlockFragment()) + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + }) +} + +// validate transmitted header field names & values +// golang.org/issue/14048 +func TestServerDoesntWriteInvalidHeaders(t *testing.T) { + testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { + w.Header().Add("OK1", "x") + w.Header().Add("Bad:Colon", "x") // colon (non-token byte) in key + w.Header().Add("Bad1\x00", "x") // null in key + w.Header().Add("Bad2", "x\x00y") // null in value + return nil + }, func(st *serverTester) { + getSlash(st) + hf := st.wantHeaders() + if !hf.StreamEnded() { + t.Error("response HEADERS lacked END_STREAM") + } + if !hf.HeadersEnded() { + t.Fatal("response HEADERS didn't have END_HEADERS") + } + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "200"}, + {"ok1", "x"}, + {"content-length", "0"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) + } + }) +} + +func BenchmarkServerGets(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + + const msg = "Hello, world" + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + id := 1 + uint32(i)*2 + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } + } +} + +func BenchmarkServerPosts(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + + const msg = "Hello, world" + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + // Consume the (empty) body from th peer before replying, otherwise + // the server will sometimes (depending on scheduling) send the peer a + // a RST_STREAM with the CANCEL error code. + if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + id := 1 + uint32(i)*2 + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + st.writeData(id, true, nil) + st.wantHeaders() + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } + } +} + +// Send a stream of messages from server to client in separate data frames. +// Brings up performance issues seen in long streams. +// Created to show problem in go issue #18502 +func BenchmarkServerToClientStreamDefaultOptions(b *testing.B) { + benchmarkServerToClientStream(b) +} + +// Justification for Change-Id: Iad93420ef6c3918f54249d867098f1dadfa324d8 +// Expect to see memory/alloc reduction by opting in to Frame reuse with the Framer. +func BenchmarkServerToClientStreamReuseFrames(b *testing.B) { + benchmarkServerToClientStream(b, optFramerReuseFrames) +} + +func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msgLen = 1 + // default window size + const windowSize = 1<<16 - 1 + + // next message to send from the server and for the client to expect + nextMsg := func(i int) []byte { + msg := make([]byte, msgLen) + msg[0] = byte(i) + if len(msg) != msgLen { + panic("invalid test setup msg length") + } + return msg + } + + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + // Consume the (empty) body from th peer before replying, otherwise + // the server will sometimes (depending on scheduling) send the peer a + // a RST_STREAM with the CANCEL error code. + if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) + } + for i := 0; i < b.N; i += 1 { + w.Write(nextMsg(i)) + w.(http.Flusher).Flush() + } + }, newServerOpts...) + defer st.Close() + st.greet() + + const id = uint32(1) + + st.writeHeaders(HeadersFrameParam{ + StreamID: id, + BlockFragment: st.encodeHeader(":method", "POST"), + EndStream: false, + EndHeaders: true, + }) + + st.writeData(id, true, nil) + st.wantHeaders() + + var pendingWindowUpdate = uint32(0) + + for i := 0; i < b.N; i += 1 { + expected := nextMsg(i) + df := st.wantData() + if bytes.Compare(expected, df.data) != 0 { + b.Fatalf("Bad message received; want %v; got %v", expected, df.data) + } + // try to send infrequent but large window updates so they don't overwhelm the test + pendingWindowUpdate += uint32(len(df.data)) + if pendingWindowUpdate >= windowSize/2 { + if err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil { + b.Fatal(err) + } + if err := st.fr.WriteWindowUpdate(id, pendingWindowUpdate); err != nil { + b.Fatal(err) + } + pendingWindowUpdate = 0 + } + } + df := st.wantData() + if !df.StreamEnded() { + b.Fatalf("DATA didn't have END_STREAM; got %v", df) + } +} + +// go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53 +// Verify we don't hang. +func TestIssue53(t *testing.T) { + const data = "PRI * HTTP/2.0\r\n\r\nSM" + + "\r\n\r\n\x00\x00\x00\x01\ainfinfin\ad" + s := &http.Server{ + ErrorLog: log.New(io.MultiWriter(stderrv(), twriter{t: t}), "", log.LstdFlags), + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("hello")) + }), + } + s2 := &Server{ + MaxReadFrameSize: 1 << 16, + PermitProhibitedCipherSuites: true, + } + c := &issue53Conn{[]byte(data), false, false} + s2.ServeConn(c, &ServeConnOpts{BaseConfig: s}) + if !c.closed { + t.Fatal("connection is not closed") + } +} + +type issue53Conn struct { + data []byte + closed bool + written bool +} + +func (c *issue53Conn) Read(b []byte) (n int, err error) { + if len(c.data) == 0 { + return 0, io.EOF + } + n = copy(b, c.data) + c.data = c.data[n:] + return +} + +func (c *issue53Conn) Write(b []byte) (n int, err error) { + c.written = true + return len(b), nil +} + +func (c *issue53Conn) Close() error { + c.closed = true + return nil +} + +func (c *issue53Conn) LocalAddr() net.Addr { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} +} +func (c *issue53Conn) RemoteAddr() net.Addr { + return &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 49706} +} +func (c *issue53Conn) SetDeadline(t time.Time) error { return nil } +func (c *issue53Conn) SetReadDeadline(t time.Time) error { return nil } +func (c *issue53Conn) SetWriteDeadline(t time.Time) error { return nil } + +// golang.org/issue/12895 +func TestConfigureServer(t *testing.T) { + tests := []struct { + name string + tlsConfig *tls.Config + wantErr string + }{ + { + name: "empty server", + }, + { + name: "just the required cipher suite", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + }, + }, + { + name: "just the alternative required cipher suite", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, + }, + }, + { + name: "missing required cipher suite", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384}, + }, + wantErr: "is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.", + }, + { + name: "required after bad", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + }, + wantErr: "contains an HTTP/2-approved cipher suite (0xc02f), but it comes after", + }, + { + name: "bad after required", + tlsConfig: &tls.Config{ + CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_RC4_128_SHA}, + }, + }, + } + for _, tt := range tests { + srv := &http.Server{TLSConfig: tt.tlsConfig} + err := ConfigureServer(srv, nil) + if (err != nil) != (tt.wantErr != "") { + if tt.wantErr != "" { + t.Errorf("%s: success, but want error", tt.name) + } else { + t.Errorf("%s: unexpected error: %v", tt.name, err) + } + } + if err != nil && tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("%s: err = %v; want substring %q", tt.name, err, tt.wantErr) + } + if err == nil && !srv.TLSConfig.PreferServerCipherSuites { + t.Errorf("%s: PreferServerCipherSuite is false; want true", tt.name) + } + } +} + +func TestServerRejectHeadWithBody(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "HEAD"), + EndStream: false, // what we're testing, a bogus HEAD request with body + EndHeaders: true, + }) + st.wantRSTStream(1, ErrCodeProtocol) +} + +func TestServerNoAutoContentLengthOnHead(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // No response body. (or smaller than one frame) + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, // clients send odd numbers + BlockFragment: st.encodeHeader(":method", "HEAD"), + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "200"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +// golang.org/issue/13495 +func TestServerNoDuplicateContentType(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.Header()["Content-Type"] = []string{""} + fmt.Fprintf(w, "hi") + }) + defer st.Close() + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + h := st.wantHeaders() + headers := st.decodeHeader(h.HeaderBlockFragment()) + want := [][2]string{ + {":status", "200"}, + {"content-type", ""}, + {"content-length", "41"}, + } + if !reflect.DeepEqual(headers, want) { + t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) + } +} + +func disableGoroutineTracking() (restore func()) { + old := DebugGoroutines + DebugGoroutines = false + return func() { DebugGoroutines = old } +} + +func BenchmarkServer_GetRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + + st.greet() + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "GET") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders() + st.wantData() + } +} + +func BenchmarkServer_PostRequest(b *testing.B) { + defer disableGoroutineTracking()() + b.ReportAllocs() + const msg = "Hello, world." + st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(ioutil.Discard, r.Body) + if err != nil || n > 0 { + b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) + } + io.WriteString(w, msg) + }) + defer st.Close() + st.greet() + // Give the server quota to reply. (plus it has the the 64KB) + if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { + b.Fatal(err) + } + hbf := st.encodeHeader(":method", "POST") + for i := 0; i < b.N; i++ { + streamID := uint32(1 + 2*i) + st.writeHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: hbf, + EndStream: false, + EndHeaders: true, + }) + st.writeData(streamID, true, nil) + st.wantHeaders() + st.wantData() + } +} + +type connStateConn struct { + net.Conn + cs tls.ConnectionState +} + +func (c connStateConn) ConnectionState() tls.ConnectionState { return c.cs } + +// golang.org/issue/12737 -- handle any net.Conn, not just +// *tls.Conn. +func TestServerHandleCustomConn(t *testing.T) { + var s Server + c1, c2 := net.Pipe() + clientDone := make(chan struct{}) + handlerDone := make(chan struct{}) + var req *http.Request + go func() { + defer close(clientDone) + defer c2.Close() + fr := NewFramer(c2, c2) + io.WriteString(c2, ClientPreface) + fr.WriteSettings() + fr.WriteSettingsAck() + f, err := fr.ReadFrame() + if err != nil { + t.Error(err) + return + } + if sf, ok := f.(*SettingsFrame); !ok || sf.IsAck() { + t.Errorf("Got %v; want non-ACK SettingsFrame", summarizeFrame(f)) + return + } + f, err = fr.ReadFrame() + if err != nil { + t.Error(err) + return + } + if sf, ok := f.(*SettingsFrame); !ok || !sf.IsAck() { + t.Errorf("Got %v; want ACK SettingsFrame", summarizeFrame(f)) + return + } + var henc hpackEncoder + fr.WriteHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: henc.encodeHeaderRaw(t, ":method", "GET", ":path", "/", ":scheme", "https", ":authority", "foo.com"), + EndStream: true, + EndHeaders: true, + }) + go io.Copy(ioutil.Discard, c2) + <-handlerDone + }() + const testString = "my custom ConnectionState" + fakeConnState := tls.ConnectionState{ + ServerName: testString, + Version: tls.VersionTLS12, + CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + } + go s.ServeConn(connStateConn{c1, fakeConnState}, &ServeConnOpts{ + BaseConfig: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer close(handlerDone) + req = r + }), + }}) + select { + case <-clientDone: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for handler") + } + if req.TLS == nil { + t.Fatalf("Request.TLS is nil. Got: %#v", req) + } + if req.TLS.ServerName != testString { + t.Fatalf("Request.TLS = %+v; want ServerName of %q", req.TLS, testString) + } +} + +// golang.org/issue/14214 +func TestServer_Rejects_ConnHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + t.Error("should not get to Handler") + }) + defer st.Close() + st.greet() + st.bodylessReq1("connection", "foo") + hf := st.wantHeaders() + goth := st.decodeHeader(hf.HeaderBlockFragment()) + wanth := [][2]string{ + {":status", "400"}, + {"content-type", "text/plain; charset=utf-8"}, + {"x-content-type-options", "nosniff"}, + {"content-length", "51"}, + } + if !reflect.DeepEqual(goth, wanth) { + t.Errorf("Got headers %v; want %v", goth, wanth) + } +} + +type hpackEncoder struct { + enc *hpack.Encoder + buf bytes.Buffer +} + +func (he *hpackEncoder) encodeHeaderRaw(t *testing.T, headers ...string) []byte { + if len(headers)%2 == 1 { + panic("odd number of kv args") + } + he.buf.Reset() + if he.enc == nil { + he.enc = hpack.NewEncoder(&he.buf) + } + for len(headers) > 0 { + k, v := headers[0], headers[1] + err := he.enc.WriteField(hpack.HeaderField{Name: k, Value: v}) + if err != nil { + t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) + } + headers = headers[2:] + } + return he.buf.Bytes() +} + +func TestCheckValidHTTP2Request(t *testing.T) { + tests := []struct { + h http.Header + want error + }{ + { + h: http.Header{"Te": {"trailers"}}, + want: nil, + }, + { + h: http.Header{"Te": {"trailers", "bogus"}}, + want: errors.New(`request header "TE" may only be "trailers" in HTTP/2`), + }, + { + h: http.Header{"Foo": {""}}, + want: nil, + }, + { + h: http.Header{"Connection": {""}}, + want: errors.New(`request header "Connection" is not valid in HTTP/2`), + }, + { + h: http.Header{"Proxy-Connection": {""}}, + want: errors.New(`request header "Proxy-Connection" is not valid in HTTP/2`), + }, + { + h: http.Header{"Keep-Alive": {""}}, + want: errors.New(`request header "Keep-Alive" is not valid in HTTP/2`), + }, + { + h: http.Header{"Upgrade": {""}}, + want: errors.New(`request header "Upgrade" is not valid in HTTP/2`), + }, + } + for i, tt := range tests { + got := checkValidHTTP2RequestHeaders(tt.h) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. checkValidHTTP2Request = %v; want %v", i, got, tt.want) + } + } +} + +// golang.org/issue/14030 +func TestExpect100ContinueAfterHandlerWrites(t *testing.T) { + const msg = "Hello" + const msg2 = "World" + + doRead := make(chan bool, 1) + defer close(doRead) // fallback cleanup + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, msg) + w.(http.Flusher).Flush() + + // Do a read, which might force a 100-continue status to be sent. + <-doRead + r.Body.Read(make([]byte, 10)) + + io.WriteString(w, msg2) + + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20)) + req.Header.Set("Expect", "100-continue") + + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + buf := make([]byte, len(msg)) + if _, err := io.ReadFull(res.Body, buf); err != nil { + t.Fatal(err) + } + if string(buf) != msg { + t.Fatalf("msg = %q; want %q", buf, msg) + } + + doRead <- true + + if _, err := io.ReadFull(res.Body, buf); err != nil { + t.Fatal(err) + } + if string(buf) != msg2 { + t.Fatalf("second msg = %q; want %q", buf, msg2) + } +} + +type funcReader func([]byte) (n int, err error) + +func (f funcReader) Read(p []byte) (n int, err error) { return f(p) } + +// golang.org/issue/16481 -- return flow control when streams close with unread data. +// (The Server version of the bug. See also TestUnreadFlowControlReturned_Transport) +func TestUnreadFlowControlReturned_Server(t *testing.T) { + unblock := make(chan bool, 1) + defer close(unblock) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // Don't read the 16KB request body. Wait until the client's + // done sending it and then return. This should cause the Server + // to then return those 16KB of flow control to the client. + <-unblock + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + // This previously hung on the 4th iteration. + for i := 0; i < 6; i++ { + body := io.MultiReader( + io.LimitReader(neverEnding('A'), 16<<10), + funcReader(func([]byte) (n int, err error) { + unblock <- true + return 0, io.EOF + }), + ) + req, _ := http.NewRequest("POST", st.ts.URL, body) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + } + +} + +func TestServerIdleTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + }, func(h2s *Server) { + h2s.IdleTimeout = 500 * time.Millisecond + }) + defer st.Close() + + st.greet() + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeNo { + t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) + } +} + +func TestServerIdleTimeout_AfterRequest(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + const timeout = 250 * time.Millisecond + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + time.Sleep(timeout * 2) + }, func(h2s *Server) { + h2s.IdleTimeout = timeout + }) + defer st.Close() + + st.greet() + + // Send a request which takes twice the timeout. Verifies the + // idle timeout doesn't fire while we're in a request: + st.bodylessReq1() + st.wantHeaders() + + // But the idle timeout should be rearmed after the request + // is done: + ga := st.wantGoAway() + if ga.ErrCode != ErrCodeNo { + t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) + } +} + +// grpc-go closes the Request.Body currently with a Read. +// Verify that it doesn't race. +// See https://github.com/grpc/grpc-go/pull/938 +func TestRequestBodyReadCloseRace(t *testing.T) { + for i := 0; i < 100; i++ { + body := &requestBody{ + pipe: &pipe{ + b: new(bytes.Buffer), + }, + } + body.pipe.CloseWithError(io.EOF) + + done := make(chan bool, 1) + buf := make([]byte, 10) + go func() { + time.Sleep(1 * time.Millisecond) + body.Close() + done <- true + }() + body.Read(buf) + <-done + } +} + +func TestIssue20704Race(t *testing.T) { + if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" { + t.Skip("skipping in short mode") + } + const ( + itemSize = 1 << 10 + itemCount = 100 + ) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + for i := 0; i < itemCount; i++ { + _, err := w.Write(make([]byte, itemSize)) + if err != nil { + return + } + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + cl := &http.Client{Transport: tr} + + for i := 0; i < 1000; i++ { + resp, err := cl.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + // Force a RST stream to the server by closing without + // reading the body: + resp.Body.Close() + } +} diff --git a/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml new file mode 100644 index 0000000..31a84be --- /dev/null +++ b/vendor/golang.org/x/net/http2/testdata/draft-ietf-httpbis-http2.xml @@ -0,0 +1,5021 @@ + + + + + + + + + + + + + + + + + + + Hypertext Transfer Protocol version 2 + + + Twist +
    + mbelshe@chromium.org +
    +
    + + + Google, Inc +
    + fenix@google.com +
    +
    + + + Mozilla +
    + + 331 E Evelyn Street + Mountain View + CA + 94041 + US + + martin.thomson@gmail.com +
    +
    + + + Applications + HTTPbis + HTTP + SPDY + Web + + + + This specification describes an optimized expression of the semantics of the Hypertext + Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a + reduced perception of latency by introducing header field compression and allowing multiple + concurrent messages on the same connection. It also introduces unsolicited push of + representations from servers to clients. + + + This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax. + HTTP's existing semantics remain unchanged. + + + + + + Discussion of this draft takes place on the HTTPBIS working group mailing list + (ietf-http-wg@w3.org), which is archived at . + + + Working Group information can be found at ; that specific to HTTP/2 are at . + + + The changes in this draft are summarized in . + + + +
    + + +
    + + + The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the + HTTP/1.1 message format () has + several characteristics that have a negative overall effect on application performance + today. + + + In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given + TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed + request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1 + clients that need to make many requests typically use multiple connections to a server in + order to achieve concurrency and thereby reduce latency. + + + Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary + network traffic, as well as causing the initial TCP congestion + window to quickly fill. This can result in excessive latency when multiple requests are + made on a new TCP connection. + + + HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an + underlying connection. Specifically, it allows interleaving of request and response + messages on the same connection and uses an efficient coding for HTTP header fields. It + also allows prioritization of requests, letting more important requests complete more + quickly, further improving performance. + + + The resulting protocol is more friendly to the network, because fewer TCP connections can + be used in comparison to HTTP/1.x. This means less competition with other flows, and + longer-lived connections, which in turn leads to better utilization of available network + capacity. + + + Finally, HTTP/2 also enables more efficient processing of messages through use of binary + message framing. + +
    + +
    + + HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core + features of HTTP/1.1, but aims to be more efficient in several ways. + + + The basic protocol unit in HTTP/2 is a frame. Each frame + type serves a different purpose. For example, HEADERS and + DATA frames form the basis of HTTP requests and + responses; other frame types like SETTINGS, + WINDOW_UPDATE, and PUSH_PROMISE are used in support of other + HTTP/2 features. + + + Multiplexing of requests is achieved by having each HTTP request-response exchange + associated with its own stream. Streams are largely + independent of each other, so a blocked or stalled request or response does not prevent + progress on other streams. + + + Flow control and prioritization ensure that it is possible to efficiently use multiplexed + streams. Flow control helps to ensure that only data that + can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed + to the most important streams first. + + + HTTP/2 adds a new interaction mode, whereby a server can push + responses to a client. Server push allows a server to speculatively send a client + data that the server anticipates the client will need, trading off some network usage + against a potential latency gain. The server does this by synthesizing a request, which it + sends as a PUSH_PROMISE frame. The server is then able to send a response to + the synthetic request on a separate stream. + + + Frames that contain HTTP header fields are compressed. + HTTP requests can be highly redundant, so compression can reduce the size of requests and + responses significantly. + + +
    + + The HTTP/2 specification is split into four parts: + + + Starting HTTP/2 covers how an HTTP/2 connection is + initiated. + + + The framing and streams layers describe the way HTTP/2 frames are + structured and formed into multiplexed streams. + + + Frame and error + definitions include details of the frame and error types used in HTTP/2. + + + HTTP mappings and additional + requirements describe how HTTP semantics are expressed using frames and + streams. + + + + + While some of the frame and stream layer concepts are isolated from HTTP, this + specification does not define a completely generic framing layer. The framing and streams + layers are tailored to the needs of the HTTP protocol and server push. + +
    + +
    + + The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD + NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as + described in RFC 2119. + + + All numeric values are in network byte order. Values are unsigned unless otherwise + indicated. Literal values are provided in decimal or hexadecimal as appropriate. + Hexadecimal literals are prefixed with 0x to distinguish them + from decimal literals. + + + The following terms are used: + + + The endpoint initiating the HTTP/2 connection. + + + A transport-layer connection between two endpoints. + + + An error that affects the entire HTTP/2 connection. + + + Either the client or server of the connection. + + + The smallest unit of communication within an HTTP/2 connection, consisting of a header + and a variable-length sequence of octets structured according to the frame type. + + + An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint + that is remote to the primary subject of discussion. + + + An endpoint that is receiving frames. + + + An endpoint that is transmitting frames. + + + The endpoint which did not initiate the HTTP/2 connection. + + + A bi-directional flow of frames across a virtual channel within the HTTP/2 connection. + + + An error on the individual HTTP/2 stream. + + + + + Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined + in . + +
    +
    + +
    + + An HTTP/2 connection is an application layer protocol running on top of a TCP connection + (). The client is the TCP connection initiator. + + + HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same + default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result, + implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the + upstream server (the immediate peer to which the client wishes to establish a connection) + supports HTTP/2. + + + + The means by which support for HTTP/2 is determined is different for "http" and "https" + URIs. Discovery for "http" URIs is described in . Discovery + for "https" URIs is described in . + + +
    + + The protocol defined in this document has two identifiers. + + + + The string "h2" identifies the protocol where HTTP/2 uses TLS. This identifier is used in the TLS application layer protocol negotiation extension (ALPN) + field and any place that HTTP/2 over TLS is identified. + + + The "h2" string is serialized into an ALPN protocol identifier as the two octet + sequence: 0x68, 0x32. + + + + + The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP. + This identifier is used in the HTTP/1.1 Upgrade header field and any place that + HTTP/2 over TCP is identified. + + + + + + Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message + semantics described in this document. + + + RFC Editor's Note: please remove the remainder of this section prior to the + publication of a final version of this document. + + + Only implementations of the final, published RFC can identify themselves as "h2" or "h2c". + Until such an RFC exists, implementations MUST NOT identify themselves using these + strings. + + + Examples and text throughout the rest of this document use "h2" as a matter of + editorial convenience only. Implementations of draft versions MUST NOT identify using + this string. + + + Implementations of draft versions of the protocol MUST add the string "-" and the + corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11 + over TLS is identified using the string "h2-11". + + + Non-compatible experiments that are based on these draft versions MUST append the string + "-" and an experiment name to the identifier. For example, an experimental implementation + of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself + as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in + . Experimenters are + encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list. + +
    + +
    + + A client that makes a request for an "http" URI without prior knowledge about support for + HTTP/2 uses the HTTP Upgrade mechanism (). The client makes an HTTP/1.1 request that includes an Upgrade + header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include + exactly one HTTP2-Settings header field. + +
    + For example: + + +]]> +
    + + Requests that contain an entity body MUST be sent in their entirety before the client can + send HTTP/2 frames. This means that a large request entity can block the use of the + connection until it is completely sent. + + + If concurrency of an initial request with subsequent requests is important, an OPTIONS + request can be used to perform the upgrade to HTTP/2, at the cost of an additional + round-trip. + + + A server that does not support HTTP/2 can respond to the request as though the Upgrade + header field were absent: + +
    + +HTTP/1.1 200 OK +Content-Length: 243 +Content-Type: text/html + +... + +
    + + A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with + "h2" implies HTTP/2 over TLS, which is instead negotiated as described in . + + + A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols) + response. After the empty line that terminates the 101 response, the server can begin + sending HTTP/2 frames. These frames MUST include a response to the request that initiated + the Upgrade. + + +
    + + For example: + + +HTTP/1.1 101 Switching Protocols +Connection: Upgrade +Upgrade: h2c + +[ HTTP/2 connection ... + +
    + + The first HTTP/2 frame sent by the server is a SETTINGS frame () as the server connection preface (). Upon receiving the 101 response, the client sends a connection preface, which includes a + SETTINGS frame. + + + The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is + assigned default priority values. Stream 1 is + implicitly half closed from the client toward the server, since the request is completed + as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the + response. + + +
    + + A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field + that includes parameters that govern the HTTP/2 connection, provided in anticipation of + the server accepting the request to upgrade. + +
    + +
    + + A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present, + or if more than one is present. A server MUST NOT send this header field. + + + + The content of the HTTP2-Settings header field is the + payload of a SETTINGS frame (), encoded as a + base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The + ABNF production for token68 is + defined in . + + + Since the upgrade is only intended to apply to the immediate connection, a client + sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded + downstream. + + + A server decodes and interprets these values as it would any other + SETTINGS frame. Acknowledgement of the + SETTINGS parameters is not necessary, since a 101 response serves as implicit + acknowledgment. Providing these values in the Upgrade request gives a client an + opportunity to provide parameters prior to receiving any frames from the server. + +
    +
    + +
    + + A client that makes a request to an "https" URI uses TLS + with the application layer protocol negotiation extension. + + + HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a + client or selected by a server. + + + Once TLS negotiation is complete, both the client and the server send a connection preface. + +
    + +
    + + A client can learn that a particular server supports HTTP/2 by other means. For example, + describes a mechanism for advertising this capability. + + + A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2, + after the connection preface; a server can + identify such a connection by the presence of the connection preface. This only affects + the establishment of HTTP/2 connections over cleartext TCP; implementations that support + HTTP/2 over TLS MUST use protocol negotiation in TLS. + + + Without additional information, prior support for HTTP/2 is not a strong signal that a + given server will support HTTP/2 for future connections. For example, it is possible for + server configurations to change, for configurations to differ between instances in + clustered servers, or for network conditions to change. + +
    + +
    + + Upon establishment of a TCP connection and determination that HTTP/2 will be used by both + peers, each endpoint MUST send a connection preface as a final confirmation and to + establish the initial SETTINGS parameters for the HTTP/2 connection. The client and + server each send a different connection preface. + + + The client connection preface starts with a sequence of 24 octets, which in hex notation + are: + +
    + +
    + + (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence + is followed by a SETTINGS frame (). The + SETTINGS frame MAY be empty. The client sends the client connection + preface immediately upon receipt of a 101 Switching Protocols response (indicating a + successful upgrade), or as the first application data octets of a TLS connection. If + starting an HTTP/2 connection with prior knowledge of server support for the protocol, the + client connection preface is sent upon connection establishment. + + + + + The client connection preface is selected so that a large proportion of HTTP/1.1 or + HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note + that this does not address the concerns raised in . + + + + + The server connection preface consists of a potentially empty SETTINGS + frame () that MUST be the first frame the server sends in the + HTTP/2 connection. + + + The SETTINGS frames received from a peer as part of the connection preface + MUST be acknowledged (see ) after sending the connection + preface. + + + To avoid unnecessary latency, clients are permitted to send additional frames to the + server immediately after sending the client connection preface, without waiting to receive + the server connection preface. It is important to note, however, that the server + connection preface SETTINGS frame might include parameters that necessarily + alter how a client is expected to communicate with the server. Upon receiving the + SETTINGS frame, the client is expected to honor any parameters established. + In some configurations, it is possible for the server to transmit SETTINGS + before the client sends additional frames, providing an opportunity to avoid this issue. + + + Clients and servers MUST treat an invalid connection preface as a connection error of type + PROTOCOL_ERROR. A GOAWAY frame () + MAY be omitted in this case, since an invalid preface indicates that the peer is not using + HTTP/2. + +
    +
    + +
    + + Once the HTTP/2 connection is established, endpoints can begin exchanging frames. + + +
    + + All frames begin with a fixed 9-octet header followed by a variable-length payload. + +
    + +
    + + The fields of the frame header are defined as: + + + + The length of the frame payload expressed as an unsigned 24-bit integer. Values + greater than 214 (16,384) MUST NOT be sent unless the receiver has + set a larger value for SETTINGS_MAX_FRAME_SIZE. + + + The 9 octets of the frame header are not included in this value. + + + + + The 8-bit type of the frame. The frame type determines the format and semantics of + the frame. Implementations MUST ignore and discard any frame that has a type that + is unknown. + + + + + An 8-bit field reserved for frame-type specific boolean flags. + + + Flags are assigned semantics specific to the indicated frame type. Flags that have + no defined semantics for a particular frame type MUST be ignored, and MUST be left + unset (0) when sending. + + + + + A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST + remain unset (0) when sending and MUST be ignored when receiving. + + + + + A 31-bit stream identifier (see ). The value 0 is + reserved for frames that are associated with the connection as a whole as opposed to + an individual stream. + + + + + + The structure and content of the frame payload is dependent entirely on the frame type. + +
    + +
    + + The size of a frame payload is limited by the maximum size that a receiver advertises in + the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value + between 214 (16,384) and 224-1 (16,777,215) octets, + inclusive. + + + All implementations MUST be capable of receiving and minimally processing frames up to + 214 octets in length, plus the 9 octet frame + header. The size of the frame header is not included when describing frame sizes. + + + Certain frame types, such as PING, impose additional limits + on the amount of payload data allowed. + + + + + If a frame size exceeds any defined limit, or is too small to contain mandatory frame + data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error + in a frame that could alter the state of the entire connection MUST be treated as a connection error; this includes any frame carrying + a header block (that is, HEADERS, + PUSH_PROMISE, and CONTINUATION), SETTINGS, + and any WINDOW_UPDATE frame with a stream identifier of 0. + + + Endpoints are not obligated to use all available space in a frame. Responsiveness can be + improved by using frames that are smaller than the permitted maximum size. Sending large + frames can result in delays in sending time-sensitive frames (such + RST_STREAM, WINDOW_UPDATE, or PRIORITY) + which if blocked by the transmission of a large frame, could affect performance. + +
    + +
    + + Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values. + They are used within HTTP request and response messages as well as server push operations + (see ). + + + Header lists are collections of zero or more header fields. When transmitted over a + connection, a header list is serialized into a header block using HTTP Header Compression. The serialized header block is then + divided into one or more octet sequences, called header block fragments, and transmitted + within the payload of HEADERS, PUSH_PROMISE or CONTINUATION frames. + + + The Cookie header field is treated specially by the HTTP + mapping (see ). + + + A receiving endpoint reassembles the header block by concatenating its fragments, then + decompresses the block to reconstruct the header list. + + + A complete header block consists of either: + + + a single HEADERS or PUSH_PROMISE frame, + with the END_HEADERS flag set, or + + + a HEADERS or PUSH_PROMISE frame with the END_HEADERS + flag cleared and one or more CONTINUATION frames, + where the last CONTINUATION frame has the END_HEADERS flag set. + + + + + Header compression is stateful. One compression context and one decompression context is + used for the entire connection. Each header block is processed as a discrete unit. + Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved + frames of any other type or from any other stream. The last frame in a sequence of + HEADERS or CONTINUATION frames MUST have the END_HEADERS + flag set. The last frame in a sequence of PUSH_PROMISE or + CONTINUATION frames MUST have the END_HEADERS flag set. This allows a + header block to be logically equivalent to a single frame. + + + Header block fragments can only be sent as the payload of HEADERS, + PUSH_PROMISE or CONTINUATION frames, because these frames + carry data that can modify the compression context maintained by a receiver. An endpoint + receiving HEADERS, PUSH_PROMISE or + CONTINUATION frames MUST reassemble header blocks and perform decompression + even if the frames are to be discarded. A receiver MUST terminate the connection with a + connection error of type + COMPRESSION_ERROR if it does not decompress a header block. + +
    +
    + +
    + + A "stream" is an independent, bi-directional sequence of frames exchanged between the client + and server within an HTTP/2 connection. Streams have several important characteristics: + + + A single HTTP/2 connection can contain multiple concurrently open streams, with either + endpoint interleaving frames from multiple streams. + + + Streams can be established and used unilaterally or shared by either the client or + server. + + + Streams can be closed by either endpoint. + + + The order in which frames are sent on a stream is significant. Recipients process frames + in the order they are received. In particular, the order of HEADERS, + and DATA frames is semantically significant. + + + Streams are identified by an integer. Stream identifiers are assigned to streams by the + endpoint initiating the stream. + + + + +
    + + The lifecycle of a stream is shown in . + + +
    + + | |<-----------' | + | R | closed | R | + `-------------------->| |<--------------------' + +--------+ + + H: HEADERS frame (with implied CONTINUATIONs) + PP: PUSH_PROMISE frame (with implied CONTINUATIONs) + ES: END_STREAM flag + R: RST_STREAM frame +]]> + +
    + + + Note that this diagram shows stream state transitions and the frames and flags that affect + those transitions only. In this regard, CONTINUATION frames do not result + in state transitions; they are effectively part of the HEADERS or + PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is + processed as a separate event to the frame that bears it; a HEADERS frame + with the END_STREAM flag set can cause two state transitions. + + + Both endpoints have a subjective view of the state of a stream that could be different + when frames are in transit. Endpoints do not coordinate the creation of streams; they are + created unilaterally by either endpoint. The negative consequences of a mismatch in + states are limited to the "closed" state after sending RST_STREAM, where + frames might be received for some time after closing. + + + Streams have the following states: + + + + + + All streams start in the "idle" state. In this state, no frames have been + exchanged. + + + The following transitions are valid from this state: + + + Sending or receiving a HEADERS frame causes the stream to become + "open". The stream identifier is selected as described in . The same HEADERS frame can also + cause a stream to immediately become "half closed". + + + Sending a PUSH_PROMISE frame marks the associated stream for + later use. The stream state for the reserved stream transitions to "reserved + (local)". + + + Receiving a PUSH_PROMISE frame marks the associated stream as + reserved by the remote peer. The state of the stream becomes "reserved + (remote)". + + + + + Receiving any frames other than HEADERS or + PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + + + A stream in the "reserved (local)" state is one that has been promised by sending a + PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an + idle stream by associating the stream with an open stream that was initiated by the + remote peer (see ). + + + In this state, only the following transitions are possible: + + + The endpoint can send a HEADERS frame. This causes the stream to + open in a "half closed (remote)" state. + + + Either endpoint can send a RST_STREAM frame to cause the stream + to become "closed". This releases the stream reservation. + + + + + An endpoint MUST NOT send any type of frame other than HEADERS or + RST_STREAM in this state. + + + A PRIORITY frame MAY be received in this state. Receiving any type + of frame other than RST_STREAM or PRIORITY on a stream + in this state MUST be treated as a connection + error of type PROTOCOL_ERROR. + + + + + + + A stream in the "reserved (remote)" state has been reserved by a remote peer. + + + In this state, only the following transitions are possible: + + + Receiving a HEADERS frame causes the stream to transition to + "half closed (local)". + + + Either endpoint can send a RST_STREAM frame to cause the stream + to become "closed". This releases the stream reservation. + + + + + An endpoint MAY send a PRIORITY frame in this state to reprioritize + the reserved stream. An endpoint MUST NOT send any type of frame other than + RST_STREAM, WINDOW_UPDATE, or PRIORITY + in this state. + + + Receiving any type of frame other than HEADERS or + RST_STREAM on a stream in this state MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + + + A stream in the "open" state may be used by both peers to send frames of any type. + In this state, sending peers observe advertised stream + level flow control limits. + + + From this state either endpoint can send a frame with an END_STREAM flag set, which + causes the stream to transition into one of the "half closed" states: an endpoint + sending an END_STREAM flag causes the stream state to become "half closed (local)"; + an endpoint receiving an END_STREAM flag causes the stream state to become "half + closed (remote)". + + + Either endpoint can send a RST_STREAM frame from this state, causing + it to transition immediately to "closed". + + + + + + + A stream that is in the "half closed (local)" state cannot be used for sending + frames. Only WINDOW_UPDATE, PRIORITY and + RST_STREAM frames can be sent in this state. + + + A stream transitions from this state to "closed" when a frame that contains an + END_STREAM flag is received, or when either peer sends a RST_STREAM + frame. + + + A receiver can ignore WINDOW_UPDATE frames in this state, which might + arrive for a short period after a frame bearing the END_STREAM flag is sent. + + + PRIORITY frames received in this state are used to reprioritize + streams that depend on the current stream. + + + + + + + A stream that is "half closed (remote)" is no longer being used by the peer to send + frames. In this state, an endpoint is no longer obligated to maintain a receiver + flow control window if it performs flow control. + + + If an endpoint receives additional frames for a stream that is in this state, other + than WINDOW_UPDATE, PRIORITY or + RST_STREAM, it MUST respond with a stream error of type + STREAM_CLOSED. + + + A stream that is "half closed (remote)" can be used by the endpoint to send frames + of any type. In this state, the endpoint continues to observe advertised stream level flow control limits. + + + A stream can transition from this state to "closed" by sending a frame that contains + an END_STREAM flag, or when either peer sends a RST_STREAM frame. + + + + + + + The "closed" state is the terminal state. + + + An endpoint MUST NOT send frames other than PRIORITY on a closed + stream. An endpoint that receives any frame other than PRIORITY + after receiving a RST_STREAM MUST treat that as a stream error of type + STREAM_CLOSED. Similarly, an endpoint that receives any frames after + receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type + STREAM_CLOSED, unless the frame is permitted as described below. + + + WINDOW_UPDATE or RST_STREAM frames can be received in + this state for a short period after a DATA or HEADERS + frame containing an END_STREAM flag is sent. Until the remote peer receives and + processes RST_STREAM or the frame bearing the END_STREAM flag, it + might send frames of these types. Endpoints MUST ignore + WINDOW_UPDATE or RST_STREAM frames received in this + state, though endpoints MAY choose to treat frames that arrive a significant time + after sending END_STREAM as a connection + error of type PROTOCOL_ERROR. + + + PRIORITY frames can be sent on closed streams to prioritize streams + that are dependent on the closed stream. Endpoints SHOULD process + PRIORITY frame, though they can be ignored if the stream has been + removed from the dependency tree (see ). + + + If this state is reached as a result of sending a RST_STREAM frame, + the peer that receives the RST_STREAM might have already sent - or + enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint + MUST ignore frames that it receives on closed streams after it has sent a + RST_STREAM frame. An endpoint MAY choose to limit the period over + which it ignores frames and treat frames that arrive after this time as being in + error. + + + Flow controlled frames (i.e., DATA) received after sending + RST_STREAM are counted toward the connection flow control window. + Even though these frames might be ignored, because they are sent before the sender + receives the RST_STREAM, the sender will consider the frames to count + against the flow control window. + + + An endpoint might receive a PUSH_PROMISE frame after it sends + RST_STREAM. PUSH_PROMISE causes a stream to become + "reserved" even if the associated stream has been reset. Therefore, a + RST_STREAM is needed to close an unwanted promised stream. + + + + + + In the absence of more specific guidance elsewhere in this document, implementations + SHOULD treat the receipt of a frame that is not expressly permitted in the description of + a state as a connection error of type + PROTOCOL_ERROR. Frame of unknown types are ignored. + + + An example of the state transitions for an HTTP request/response exchange can be found in + . An example of the state transitions for server push can be + found in and . + + +
    + + Streams are identified with an unsigned 31-bit integer. Streams initiated by a client + MUST use odd-numbered stream identifiers; those initiated by the server MUST use + even-numbered stream identifiers. A stream identifier of zero (0x0) is used for + connection control messages; the stream identifier zero cannot be used to establish a + new stream. + + + HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are + responded to with a stream identifier of one (0x1). After the upgrade + completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1 + cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1. + + + The identifier of a newly established stream MUST be numerically greater than all + streams that the initiating endpoint has opened or reserved. This governs streams that + are opened using a HEADERS frame and streams that are reserved using + PUSH_PROMISE. An endpoint that receives an unexpected stream identifier + MUST respond with a connection error of + type PROTOCOL_ERROR. + + + The first use of a new stream identifier implicitly closes all streams in the "idle" + state that might have been initiated by that peer with a lower-valued stream identifier. + For example, if a client sends a HEADERS frame on stream 7 without ever + sending a frame on stream 5, then stream 5 transitions to the "closed" state when the + first frame for stream 7 is sent or received. + + + Stream identifiers cannot be reused. Long-lived connections can result in an endpoint + exhausting the available range of stream identifiers. A client that is unable to + establish a new stream identifier can establish a new connection for new streams. A + server that is unable to establish a new stream identifier can send a + GOAWAY frame so that the client is forced to open a new connection for + new streams. + +
    + +
    + + A peer can limit the number of concurrently active streams using the + SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent + streams setting is specific to each endpoint and applies only to the peer that receives + the setting. That is, clients specify the maximum number of concurrent streams the + server can initiate, and servers specify the maximum number of concurrent streams the + client can initiate. + + + Streams that are in the "open" state, or either of the "half closed" states count toward + the maximum number of streams that an endpoint is permitted to open. Streams in any of + these three states count toward the limit advertised in the + SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the + "reserved" states do not count toward the stream limit. + + + Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a + HEADERS frame that causes their advertised concurrent stream limit to be + exceeded MUST treat this as a stream error. An + endpoint that wishes to reduce the value of + SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current + number of open streams can either close streams that exceed the new value or allow + streams to complete. + +
    +
    + +
    + + Using streams for multiplexing introduces contention over use of the TCP connection, + resulting in blocked streams. A flow control scheme ensures that streams on the same + connection do not destructively interfere with each other. Flow control is used for both + individual streams and for the connection as a whole. + + + HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame. + + +
    + + HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be + used without requiring protocol changes. Flow control in HTTP/2 has the following + characteristics: + + + Flow control is specific to a connection; i.e., it is "hop-by-hop", not + "end-to-end". + + + Flow control is based on window update frames. Receivers advertise how many octets + they are prepared to receive on a stream and for the entire connection. This is a + credit-based scheme. + + + Flow control is directional with overall control provided by the receiver. A + receiver MAY choose to set any window size that it desires for each stream and for + the entire connection. A sender MUST respect flow control limits imposed by a + receiver. Clients, servers and intermediaries all independently advertise their + flow control window as a receiver and abide by the flow control limits set by + their peer when sending. + + + The initial value for the flow control window is 65,535 octets for both new streams + and the overall connection. + + + The frame type determines whether flow control applies to a frame. Of the frames + specified in this document, only DATA frames are subject to flow + control; all other frame types do not consume space in the advertised flow control + window. This ensures that important control frames are not blocked by flow control. + + + Flow control cannot be disabled. + + + HTTP/2 defines only the format and semantics of the WINDOW_UPDATE + frame (). This document does not stipulate how a + receiver decides when to send this frame or the value that it sends, nor does it + specify how a sender chooses to send packets. Implementations are able to select + any algorithm that suits their needs. + + + + + Implementations are also responsible for managing how requests and responses are sent + based on priority; choosing how to avoid head of line blocking for requests; and + managing the creation of new streams. Algorithm choices for these could interact with + any flow control algorithm. + +
    + +
    + + Flow control is defined to protect endpoints that are operating under resource + constraints. For example, a proxy needs to share memory between many connections, and + also might have a slow upstream connection and a fast downstream one. Flow control + addresses cases where the receiver is unable process data on one stream, yet wants to + continue to process other streams in the same connection. + + + Deployments that do not require this capability can advertise a flow control window of + the maximum size, incrementing the available space when new data is received. This + effectively disables flow control for that receiver. Conversely, a sender is always + subject to the flow control window advertised by the receiver. + + + Deployments with constrained resources (for example, memory) can employ flow control to + limit the amount of memory a peer can consume. Note, however, that this can lead to + suboptimal use of available network resources if flow control is enabled without + knowledge of the bandwidth-delay product (see ). + + + Even with full awareness of the current bandwidth-delay product, implementation of flow + control can be difficult. When using flow control, the receiver MUST read from the TCP + receive buffer in a timely fashion. Failure to do so could lead to a deadlock when + critical frames, such as WINDOW_UPDATE, are not read and acted upon. + +
    +
    + +
    + + A client can assign a priority for a new stream by including prioritization information in + the HEADERS frame that opens the stream. For an existing + stream, the PRIORITY frame can be used to change the + priority. + + + The purpose of prioritization is to allow an endpoint to express how it would prefer its + peer allocate resources when managing concurrent streams. Most importantly, priority can + be used to select streams for transmitting frames when there is limited capacity for + sending. + + + Streams can be prioritized by marking them as dependent on the completion of other streams + (). Each dependency is assigned a relative weight, a number + that is used to determine the relative proportion of available resources that are assigned + to streams dependent on the same stream. + + + + Explicitly setting the priority for a stream is input to a prioritization process. It + does not guarantee any particular processing or transmission order for the stream relative + to any other stream. An endpoint cannot force a peer to process concurrent streams in a + particular order using priority. Expressing priority is therefore only ever a suggestion. + + + Providing prioritization information is optional, so default values are used if no + explicit indicator is provided (). + + +
    + + Each stream can be given an explicit dependency on another stream. Including a + dependency expresses a preference to allocate resources to the identified stream rather + than to the dependent stream. + + + A stream that is not dependent on any other stream is given a stream dependency of 0x0. + In other words, the non-existent stream 0 forms the root of the tree. + + + A stream that depends on another stream is a dependent stream. The stream upon which a + stream is dependent is a parent stream. A dependency on a stream that is not currently + in the tree - such as a stream in the "idle" state - results in that stream being given + a default priority. + + + When assigning a dependency on another stream, the stream is added as a new dependency + of the parent stream. Dependent streams that share the same parent are not ordered with + respect to each other. For example, if streams B and C are dependent on stream A, and + if stream D is created with a dependency on stream A, this results in a dependency order + of A followed by B, C, and D in any order. + +
    + /|\ + B C B D C +]]> +
    + + An exclusive flag allows for the insertion of a new level of dependencies. The + exclusive flag causes the stream to become the sole dependency of its parent stream, + causing other dependencies to become dependent on the exclusive stream. In the + previous example, if stream D is created with an exclusive dependency on stream A, this + results in D becoming the dependency parent of B and C. + +
    + D + B C / \ + B C +]]> +
    + + Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all + of the streams that it depends on (the chain of parent streams up to 0x0) are either + closed, or it is not possible to make progress on them. + + + A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR. + +
    + +
    + + All dependent streams are allocated an integer weight between 1 and 256 (inclusive). + + + Streams with the same parent SHOULD be allocated resources proportionally based on their + weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A + with weight 12, and if no progress can be made on A, stream B ideally receives one third + of the resources allocated to stream C. + +
    + +
    + + Stream priorities are changed using the PRIORITY frame. Setting a + dependency causes a stream to become dependent on the identified parent stream. + + + Dependent streams move with their parent stream if the parent is reprioritized. Setting + a dependency with the exclusive flag for a reprioritized stream moves all the + dependencies of the new parent stream to become dependent on the reprioritized stream. + + + If a stream is made dependent on one of its own dependencies, the formerly dependent + stream is first moved to be dependent on the reprioritized stream's previous parent. + The moved dependency retains its weight. + +
    + + For example, consider an original dependency tree where B and C depend on A, D and E + depend on C, and F depends on D. If A is made dependent on D, then D takes the place + of A. All other dependency relationships stay the same, except for F, which becomes + dependent on A if the reprioritization is exclusive. + + F B C ==> F A OR A + / \ | / \ /|\ + D E E B C B C F + | | | + F E E + (intermediate) (non-exclusive) (exclusive) +]]> +
    +
    + +
    + + When a stream is removed from the dependency tree, its dependencies can be moved to + become dependent on the parent of the closed stream. The weights of new dependencies + are recalculated by distributing the weight of the dependency of the closed stream + proportionally based on the weights of its dependencies. + + + Streams that are removed from the dependency tree cause some prioritization information + to be lost. Resources are shared between streams with the same parent stream, which + means that if a stream in that set closes or becomes blocked, any spare capacity + allocated to a stream is distributed to the immediate neighbors of the stream. However, + if the common dependency is removed from the tree, those streams share resources with + streams at the next highest level. + + + For example, assume streams A and B share a parent, and streams C and D both depend on + stream A. Prior to the removal of stream A, if streams A and D are unable to proceed, + then stream C receives all the resources dedicated to stream A. If stream A is removed + from the tree, the weight of stream A is divided between streams C and D. If stream D + is still unable to proceed, this results in stream C receiving a reduced proportion of + resources. For equal starting weights, C receives one third, rather than one half, of + available resources. + + + It is possible for a stream to become closed while prioritization information that + creates a dependency on that stream is in transit. If a stream identified in a + dependency has no associated priority information, then the dependent stream is instead + assigned a default priority. This potentially creates + suboptimal prioritization, since the stream could be given a priority that is different + to what is intended. + + + To avoid these problems, an endpoint SHOULD retain stream prioritization state for a + period after streams become closed. The longer state is retained, the lower the chance + that streams are assigned incorrect or default priority values. + + + This could create a large state burden for an endpoint, so this state MAY be limited. + An endpoint MAY apply a fixed upper limit on the number of closed streams for which + prioritization state is tracked to limit state exposure. The amount of additional state + an endpoint maintains could be dependent on load; under high load, prioritization state + can be discarded to limit resource commitments. In extreme cases, an endpoint could + even discard prioritization state for active or reserved streams. If a fixed limit is + applied, endpoints SHOULD maintain state for at least as many streams as allowed by + their setting for SETTINGS_MAX_CONCURRENT_STREAMS. + + + An endpoint receiving a PRIORITY frame that changes the priority of a + closed stream SHOULD alter the dependencies of the streams that depend on it, if it has + retained enough state to do so. + +
    + +
    + + Providing priority information is optional. Streams are assigned a non-exclusive + dependency on stream 0x0 by default. Pushed streams + initially depend on their associated stream. In both cases, streams are assigned a + default weight of 16. + +
    +
    + +
    + + HTTP/2 framing permits two classes of error: + + + An error condition that renders the entire connection unusable is a connection error. + + + An error in an individual stream is a stream error. + + + + + A list of error codes is included in . + + +
    + + A connection error is any error which prevents further processing of the framing layer, + or which corrupts any connection state. + + + An endpoint that encounters a connection error SHOULD first send a GOAWAY + frame () with the stream identifier of the last stream that it + successfully received from its peer. The GOAWAY frame includes an error + code that indicates why the connection is terminating. After sending the + GOAWAY frame, the endpoint MUST close the TCP connection. + + + It is possible that the GOAWAY will not be reliably received by the + receiving endpoint (see ). In the event of a connection error, + GOAWAY only provides a best effort attempt to communicate with the peer + about why the connection is being terminated. + + + An endpoint can end a connection at any time. In particular, an endpoint MAY choose to + treat a stream error as a connection error. Endpoints SHOULD send a + GOAWAY frame when ending a connection, providing that circumstances + permit it. + +
    + +
    + + A stream error is an error related to a specific stream that does not affect processing + of other streams. + + + An endpoint that detects a stream error sends a RST_STREAM frame () that contains the stream identifier of the stream where the error + occurred. The RST_STREAM frame includes an error code that indicates the + type of error. + + + A RST_STREAM is the last frame that an endpoint can send on a stream. + The peer that sends the RST_STREAM frame MUST be prepared to receive any + frames that were sent or enqueued for sending by the remote peer. These frames can be + ignored, except where they modify connection state (such as the state maintained for + header compression, or flow control). + + + Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for + any stream. However, an endpoint MAY send additional RST_STREAM frames if + it receives frames on a closed stream after more than a round-trip time. This behavior + is permitted to deal with misbehaving implementations. + + + An endpoint MUST NOT send a RST_STREAM in response to an + RST_STREAM frame, to avoid looping. + +
    + +
    + + If the TCP connection is closed or reset while streams remain in open or half closed + states, then the endpoint MUST assume that those streams were abnormally interrupted and + could be incomplete. + +
    +
    + +
    + + HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide + additional services or alter any aspect of the protocol, within the limitations described + in this section. Extensions are effective only within the scope of a single HTTP/2 + connection. + + + Extensions are permitted to use new frame types, new + settings, or new error + codes. Registries are established for managing these extension points: frame types, settings and + error codes. + + + Implementations MUST ignore unknown or unsupported values in all extensible protocol + elements. Implementations MUST discard frames that have unknown or unsupported types. + This means that any of these extension points can be safely used by extensions without + prior arrangement or negotiation. However, extension frames that appear in the middle of + a header block are not permitted; these MUST be treated + as a connection error of type + PROTOCOL_ERROR. + + + However, extensions that could change the semantics of existing protocol components MUST + be negotiated before being used. For example, an extension that changes the layout of the + HEADERS frame cannot be used until the peer has given a positive signal + that this is acceptable. In this case, it could also be necessary to coordinate when the + revised layout comes into effect. Note that treating any frame other than + DATA frames as flow controlled is such a change in semantics, and can only + be done through negotiation. + + + This document doesn't mandate a specific method for negotiating the use of an extension, + but notes that a setting could be used for that + purpose. If both peers set a value that indicates willingness to use the extension, then + the extension can be used. If a setting is used for extension negotiation, the initial + value MUST be defined so that the extension is initially disabled. + +
    +
    + +
    + + This specification defines a number of frame types, each identified by a unique 8-bit type + code. Each frame type serves a distinct purpose either in the establishment and management + of the connection as a whole, or of individual streams. + + + The transmission of specific frame types can alter the state of a connection. If endpoints + fail to maintain a synchronized view of the connection state, successful communication + within the connection will no longer be possible. Therefore, it is important that endpoints + have a shared comprehension of how the state is affected by the use any given frame. + + +
    + + DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated + with a stream. One or more DATA frames are used, for instance, to carry HTTP request or + response payloads. + + + DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to + obscure the size of messages. + +
    + +
    + + The DATA frame contains the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is optional and is only present if the PADDED flag is set. + + + Application data. The amount of data is the remainder of the frame payload after + subtracting the length of the other fields that are present. + + + Padding octets that contain no application semantic value. Padding octets MUST be set + to zero when sending and ignored when receiving. + + + + + + The DATA frame defines the following flags: + + + Bit 1 being set indicates that this frame is the last that the endpoint will send for + the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state. + + + Bit 4 being set indicates that the Pad Length field and any padding that it describes + is present. + + + + + DATA frames MUST be associated with a stream. If a DATA frame is received whose stream + identifier field is 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + DATA frames are subject to flow control and can only be sent when a stream is in the + "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow + control, including Pad Length and Padding fields if present. If a DATA frame is received + whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond + with a stream error of type + STREAM_CLOSED. + + + The total number of padding octets is determined by the value of the Pad Length field. If + the length of the padding is greater than the length of the frame payload, the recipient + MUST treat this as a connection error of + type PROTOCOL_ERROR. + + + A frame can be increased in size by one octet by including a Pad Length field with a + value of zero. + + + + + Padding is a security feature; see . + +
    + +
    + + The HEADERS frame (type=0x1) is used to open a stream, + and additionally carries a header block fragment. HEADERS frames can be sent on a stream + in the "open" or "half closed (remote)" states. + +
    + +
    + + The HEADERS frame payload has the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is only present if the PADDED flag is set. + + + A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set. + + + A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set. + + + An 8-bit weight for the stream, see . Add one to the + value to obtain a weight between 1 and 256. This field is only present if the + PRIORITY flag is set. + + + A header block fragment. + + + Padding octets that contain no application semantic value. Padding octets MUST be set + to zero when sending and ignored when receiving. + + + + + + The HEADERS frame defines the following flags: + + + + Bit 1 being set indicates that the header block is + the last that the endpoint will send for the identified stream. Setting this flag + causes the stream to enter one of "half closed" + states. + + + A HEADERS frame carries the END_STREAM flag that signals the end of a stream. + However, a HEADERS frame with the END_STREAM flag set can be followed by + CONTINUATION frames on the same stream. Logically, the + CONTINUATION frames are part of the HEADERS frame. + + + + + Bit 3 being set indicates that this frame contains an entire header block and is not followed by any + CONTINUATION frames. + + + A HEADERS frame without the END_HEADERS flag set MUST be followed by a + CONTINUATION frame for the same stream. A receiver MUST treat the + receipt of any other type of frame or a frame on a different stream as a connection error of type + PROTOCOL_ERROR. + + + + + Bit 4 being set indicates that the Pad Length field and any padding that it + describes is present. + + + + + Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight + fields are present; see . + + + + + + + The payload of a HEADERS frame contains a header block + fragment. A header block that does not fit within a HEADERS frame is continued in + a CONTINUATION frame. + + + + HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose + stream identifier field is 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + + The HEADERS frame changes the connection state as described in . + + + + The HEADERS frame includes optional padding. Padding fields and flags are identical to + those defined for DATA frames. + + + Prioritization information in a HEADERS frame is logically equivalent to a separate + PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in + stream prioritization when new streams are created. Priorization fields in HEADERS frames + subsequent to the first on a stream reprioritize the + stream. + +
    + +
    + + The PRIORITY frame (type=0x2) specifies the sender-advised + priority of a stream. It can be sent at any time for an existing stream, including + closed streams. This enables reprioritization of existing streams. + +
    + +
    + + The payload of a PRIORITY frame contains the following fields: + + + A single bit flag indicates that the stream dependency is exclusive, see . + + + A 31-bit stream identifier for the stream that this stream depends on, see . + + + An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256. + + + + + + The PRIORITY frame does not define any flags. + + + + The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received + with a stream identifier of 0x0, the recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open", + "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be + sent between consecutive frames that comprise a single header + block. Note that this frame could arrive after processing or frame sending has + completed, which would cause it to have no effect on the current stream. For a stream + that is in the "half closed (remote)" or "closed" - state, this frame can only affect + processing of the current stream and not frame transmission. + + + The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state. + This allows for the reprioritization of a group of dependent streams by altering the + priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a + closed stream risks being ignored due to the peer having discarded priority state + information for that stream. + +
    + +
    + + The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by + the initiator of a stream, it indicates that they wish to cancel the stream or that an + error condition has occurred. When sent by the receiver of a stream, it indicates that + either the receiver is rejecting the stream, requesting that the stream be cancelled, or + that an error condition has occurred. + +
    + +
    + + + The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code. The error code indicates why the stream is being + terminated. + + + + The RST_STREAM frame does not define any flags. + + + + The RST_STREAM frame fully terminates the referenced stream and causes it to enter the + closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send + additional frames for that stream, with the exception of PRIORITY. However, + after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process + additional frames sent on the stream that might have been sent by the peer prior to the + arrival of the RST_STREAM. + + + + RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received + with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type + PROTOCOL_ERROR. + + + + RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM + frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type + PROTOCOL_ERROR. + + +
    + +
    + + The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints + communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is + also used to acknowledge the receipt of those parameters. Individually, a SETTINGS + parameter can also be referred to as a "setting". + + + SETTINGS parameters are not negotiated; they describe characteristics of the sending peer, + which are used by the receiving peer. Different values for the same parameter can be + advertised by each peer. For example, a client might set a high initial flow control + window, whereas a server might set a lower value to conserve resources. + + + + A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be + sent at any other time by either endpoint over the lifetime of the connection. + Implementations MUST support all of the parameters defined by this specification. + + + + Each parameter in a SETTINGS frame replaces any existing value for that parameter. + Parameters are processed in the order in which they appear, and a receiver of a SETTINGS + frame does not need to maintain any state other than the current value of its + parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by + a receiver. + + + SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS + frame defines the following flag: + + + Bit 1 being set indicates that this frame acknowledges receipt and application of the + peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST + be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value + other than 0 MUST be treated as a connection + error of type FRAME_SIZE_ERROR. For more info, see Settings Synchronization. + + + + + SETTINGS frames always apply to a connection, never a single stream. The stream + identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS + frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond + with a connection error of type + PROTOCOL_ERROR. + + + The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame + MUST be treated as a connection error of type + PROTOCOL_ERROR. + + +
    + + The payload of a SETTINGS frame consists of zero or more parameters, each consisting of + an unsigned 16-bit setting identifier and an unsigned 32-bit value. + + +
    + +
    +
    + +
    + + The following parameters are defined: + + + + Allows the sender to inform the remote endpoint of the maximum size of the header + compression table used to decode header blocks, in octets. The encoder can select + any size equal to or less than this value by using signaling specific to the + header compression format inside a header block. The initial value is 4,096 + octets. + + + + + This setting can be use to disable server + push. An endpoint MUST NOT send a PUSH_PROMISE frame if it + receives this parameter set to a value of 0. An endpoint that has both set this + parameter to 0 and had it acknowledged MUST treat the receipt of a + PUSH_PROMISE frame as a connection error of type + PROTOCOL_ERROR. + + + The initial value is 1, which indicates that server push is permitted. Any value + other than 0 or 1 MUST be treated as a connection error of type + PROTOCOL_ERROR. + + + + + Indicates the maximum number of concurrent streams that the sender will allow. + This limit is directional: it applies to the number of streams that the sender + permits the receiver to create. Initially there is no limit to this value. It is + recommended that this value be no smaller than 100, so as to not unnecessarily + limit parallelism. + + + A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special + by endpoints. A zero value does prevent the creation of new streams, however this + can also happen for any limit that is exhausted with active streams. Servers + SHOULD only set a zero value for short durations; if a server does not wish to + accept requests, closing the connection could be preferable. + + + + + Indicates the sender's initial window size (in octets) for stream level flow + control. The initial value is 216-1 (65,535) octets. + + + This setting affects the window size of all streams, including existing streams, + see . + + + Values above the maximum flow control window size of 231-1 MUST + be treated as a connection error of + type FLOW_CONTROL_ERROR. + + + + + Indicates the size of the largest frame payload that the sender is willing to + receive, in octets. + + + The initial value is 214 (16,384) octets. The value advertised by + an endpoint MUST be between this initial value and the maximum allowed frame size + (224-1 or 16,777,215 octets), inclusive. Values outside this range + MUST be treated as a connection error + of type PROTOCOL_ERROR. + + + + + This advisory setting informs a peer of the maximum size of header list that the + sender is prepared to accept, in octets. The value is based on the uncompressed + size of header fields, including the length of the name and value in octets plus + an overhead of 32 octets for each header field. + + + For any given request, a lower limit than what is advertised MAY be enforced. The + initial value of this setting is unlimited. + + + + + + An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier + MUST ignore that setting. + +
    + +
    + + Most values in SETTINGS benefit from or require an understanding of when the peer has + received and applied the changed parameter values. In order to provide + such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag + is not set MUST apply the updated parameters as soon as possible upon receipt. + + + The values in the SETTINGS frame MUST be processed in the order they appear, with no + other frame processing between values. Unsupported parameters MUST be ignored. Once + all values have been processed, the recipient MUST immediately emit a SETTINGS frame + with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender + of the altered parameters can rely on the setting having been applied. + + + If the sender of a SETTINGS frame does not receive an acknowledgement within a + reasonable amount of time, it MAY issue a connection error of type + SETTINGS_TIMEOUT. + +
    +
    + +
    + + The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of + streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned + 31-bit identifier of the stream the endpoint plans to create along with a set of headers + that provide additional context for the stream. contains a + thorough description of the use of PUSH_PROMISE frames. + + +
    + +
    + + The PUSH_PROMISE frame payload has the following fields: + + + An 8-bit field containing the length of the frame padding in units of octets. This + field is only present if the PADDED flag is set. + + + A single reserved bit. + + + An unsigned 31-bit integer that identifies the stream that is reserved by the + PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next + stream sent by the sender (see new stream + identifier). + + + A header block fragment containing request header + fields. + + + Padding octets. + + + + + + The PUSH_PROMISE frame defines the following flags: + + + + Bit 3 being set indicates that this frame contains an entire header block and is not followed by any + CONTINUATION frames. + + + A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a + CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any + other type of frame or a frame on a different stream as a connection error of type + PROTOCOL_ERROR. + + + + + Bit 4 being set indicates that the Pad Length field and any padding that it + describes is present. + + + + + + + PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream + identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the + stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type + PROTOCOL_ERROR. + + + + Promised streams are not required to be used in the order they are promised. The + PUSH_PROMISE only reserves stream identifiers for later use. + + + + PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the + peer endpoint is set to 0. An endpoint that has set this setting and has received + acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type + PROTOCOL_ERROR. + + + Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a + RST_STREAM referencing the promised stream identifier back to the sender of + the PUSH_PROMISE. + + + + A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for + header compression. PUSH_PROMISE also reserves a stream for later use, causing the + promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a + stream unless that stream is either "open" or "half closed (remote)"; the sender MUST + ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST + be in the "idle" state). + + + Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream + state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a + stream that is neither "open" nor "half closed (local)" as a connection error of type + PROTOCOL_ERROR. However, an endpoint that has sent + RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that + might have been created before the RST_STREAM frame is received and + processed. + + + A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a + stream that is not currently in the "idle" state) as a connection error of type + PROTOCOL_ERROR. + + + + The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical + to those defined for DATA frames. + +
    + +
    + + The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the + sender, as well as determining whether an idle connection is still functional. PING + frames can be sent from any endpoint. + +
    + +
    + + + In addition to the frame header, PING frames MUST contain 8 octets of data in the payload. + A sender can include any value it chooses and use those bytes in any fashion. + + + Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with + the ACK flag set in response, with an identical payload. PING responses SHOULD be given + higher priority than any other frame. + + + + The PING frame defines the following flags: + + + Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST + set this flag in PING responses. An endpoint MUST NOT respond to PING frames + containing this flag. + + + + + PING frames are not associated with any individual stream. If a PING frame is received + with a stream identifier field value other than 0x0, the recipient MUST respond with a + connection error of type + PROTOCOL_ERROR. + + + Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type + FRAME_SIZE_ERROR. + + +
    + +
    + + The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this + connection. GOAWAY can be sent by either the client or the server. Once sent, the sender + will ignore frames sent on any new streams with identifiers higher than the included last + stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the + connection, although a new connection can be established for new streams. + + + The purpose of this frame is to allow an endpoint to gracefully stop accepting new + streams, while still finishing processing of previously established streams. This enables + administrative actions, like server maintainance. + + + There is an inherent race condition between an endpoint starting new streams and the + remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream + identifier of the last peer-initiated stream which was or might be processed on the + sending endpoint in this connection. For instance, if the server sends a GOAWAY frame, + the identified stream is the highest numbered stream initiated by the client. + + + If the receiver of the GOAWAY has sent data on streams with a higher stream identifier + than what is indicated in the GOAWAY frame, those streams are not or will not be + processed. The receiver of the GOAWAY frame can treat the streams as though they had + never been created at all, thereby allowing those streams to be retried later on a new + connection. + + + Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote + can know whether a stream has been partially processed or not. For example, if an HTTP + client sends a POST at the same time that a server closes a connection, the client cannot + know if the server started to process that POST request if the server does not send a + GOAWAY frame to indicate what streams it might have acted on. + + + An endpoint might choose to close a connection without sending GOAWAY for misbehaving + peers. + + +
    + +
    + + The GOAWAY frame does not define any flags. + + + The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat + a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type + PROTOCOL_ERROR. + + + The last stream identifier in the GOAWAY frame contains the highest numbered stream + identifier for which the sender of the GOAWAY frame might have taken some action on, or + might yet take action on. All streams up to and including the identified stream might + have been processed in some way. The last stream identifier can be set to 0 if no streams + were processed. + + + In this context, "processed" means that some data from the stream was passed to some + higher layer of software that might have taken some action as a result. + + + If a connection terminates without a GOAWAY frame, the last stream identifier is + effectively the highest possible stream identifier. + + + On streams with lower or equal numbered identifiers that were not closed completely prior + to the connection being closed, re-attempting requests, transactions, or any protocol + activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or + DELETE. Any protocol activity that uses higher numbered streams can be safely retried + using a new connection. + + + Activity on streams numbered lower or equal to the last stream identifier might still + complete successfully. The sender of a GOAWAY frame might gracefully shut down a + connection by sending a GOAWAY frame, maintaining the connection in an open state until + all in-progress streams complete. + + + An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an + endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could + subsequently encounter an condition that requires immediate termination of the connection. + The last stream identifier from the last GOAWAY frame received indicates which streams + could have been acted upon. Endpoints MUST NOT increase the value they send in the last + stream identifier, since the peers might already have retried unprocessed requests on + another connection. + + + A client that is unable to retry requests loses all requests that are in flight when the + server closes the connection. This is especially true for intermediaries that might + not be serving clients using HTTP/2. A server that is attempting to gracefully shut down + a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to + 231-1 and a NO_ERROR code. This signals to the client that + a shutdown is imminent and that no further requests can be initiated. After waiting at + least one round trip time, the server can send another GOAWAY frame with an updated last + stream identifier. This ensures that a connection can be cleanly shut down without losing + requests. + + + + After sending a GOAWAY frame, the sender can discard frames for streams with identifiers + higher than the identified last stream. However, any frames that alter connection state + cannot be completely ignored. For instance, HEADERS, + PUSH_PROMISE and CONTINUATION frames MUST be minimally + processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow + control window. Failure to process these frames can cause flow control or header + compression state to become unsynchronized. + + + + The GOAWAY frame also contains a 32-bit error code that + contains the reason for closing the connection. + + + Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug + data is intended for diagnostic purposes only and carries no semantic value. Debug + information could contain security- or privacy-sensitive data. Logged or otherwise + persistently stored debug data MUST have adequate safeguards to prevent unauthorized + access. + +
    + +
    + + The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview. + + + Flow control operates at two levels: on each individual stream and on the entire + connection. + + + Both types of flow control are hop-by-hop; that is, only between the two endpoints. + Intermediaries do not forward WINDOW_UPDATE frames between dependent connections. + However, throttling of data transfer by any receiver can indirectly cause the propagation + of flow control information toward the original sender. + + + Flow control only applies to frames that are identified as being subject to flow control. + Of the frame types defined in this document, this includes only DATA frames. + Frames that are exempt from flow control MUST be accepted and processed, unless the + receiver is unable to assign resources to handling the frame. A receiver MAY respond with + a stream error or connection error of type + FLOW_CONTROL_ERROR if it is unable to accept a frame. + +
    + +
    + + The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer + indicating the number of octets that the sender can transmit in addition to the existing + flow control window. The legal range for the increment to the flow control window is 1 to + 231-1 (0x7fffffff) octets. + + + The WINDOW_UPDATE frame does not define any flags. + + + The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the + former case, the frame's stream identifier indicates the affected stream; in the latter, + the value "0" indicates that the entire connection is the subject of the frame. + + + A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window + increment of 0 as a stream error of type + PROTOCOL_ERROR; errors on the connection flow control window MUST be + treated as a connection error. + + + WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag. + This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)" + or "closed" stream. A receiver MUST NOT treat this as an error, see . + + + A receiver that receives a flow controlled frame MUST always account for its contribution + against the connection flow control window, unless the receiver treats this as a connection error. This is necessary even if the + frame is in error. Since the sender counts the frame toward the flow control window, if + the receiver does not, the flow control window at sender and receiver can become + different. + + +
    + + Flow control in HTTP/2 is implemented using a window kept by each sender on every + stream. The flow control window is a simple integer value that indicates how many octets + of data the sender is permitted to transmit; as such, its size is a measure of the + buffering capacity of the receiver. + + + Two flow control windows are applicable: the stream flow control window and the + connection flow control window. The sender MUST NOT send a flow controlled frame with a + length that exceeds the space available in either of the flow control windows advertised + by the receiver. Frames with zero length with the END_STREAM flag set (that is, an + empty DATA frame) MAY be sent if there is no available space in either + flow control window. + + + For flow control calculations, the 9 octet frame header is not counted. + + + After sending a flow controlled frame, the sender reduces the space available in both + windows by the length of the transmitted frame. + + + The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up + space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream + and connection level flow control windows. + + + A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the + amount specified in the frame. + + + A sender MUST NOT allow a flow control window to exceed 231-1 octets. + If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this + maximum it MUST terminate either the stream or the connection, as appropriate. For + streams, the sender sends a RST_STREAM with the error code of + FLOW_CONTROL_ERROR code; for the connection, a GOAWAY + frame with a FLOW_CONTROL_ERROR code. + + + Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are + completely asynchronous with respect to each other. This property allows a receiver to + aggressively update the window size kept by the sender to prevent streams from stalling. + +
    + +
    + + When an HTTP/2 connection is first established, new streams are created with an initial + flow control window size of 65,535 octets. The connection flow control window is 65,535 + octets. Both endpoints can adjust the initial window size for new streams by including + a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS + frame that forms part of the connection preface. The connection flow control window can + only be changed using WINDOW_UPDATE frames. + + + Prior to receiving a SETTINGS frame that sets a value for + SETTINGS_INITIAL_WINDOW_SIZE, an endpoint can only use the default + initial window size when sending flow controlled frames. Similarly, the connection flow + control window is set to the default initial window size until a WINDOW_UPDATE frame is + received. + + + A SETTINGS frame can alter the initial flow control window size for all + current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, + a receiver MUST adjust the size of all stream flow control windows that it maintains by + the difference between the new value and the old value. + + + A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in + a flow control window to become negative. A sender MUST track the negative flow control + window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE + frames that cause the flow control window to become positive. + + + For example, if the client sends 60KB immediately on connection establishment, and the + server sets the initial window size to be 16KB, the client will recalculate the + available flow control window to be -44KB on receipt of the SETTINGS + frame. The client retains a negative flow control window until WINDOW_UPDATE frames + restore the window to being positive, after which the client can resume sending. + + + A SETTINGS frame cannot alter the connection flow control window. + + + An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that + causes any flow control window to exceed the maximum size as a connection error of type + FLOW_CONTROL_ERROR. + +
    + +
    + + A receiver that wishes to use a smaller flow control window than the current size can + send a new SETTINGS frame. However, the receiver MUST be prepared to + receive data that exceeds this window size, since the sender might send data that + exceeds the lower limit prior to processing the SETTINGS frame. + + + After sending a SETTINGS frame that reduces the initial flow control window size, a + receiver has two options for handling streams that exceed flow control limits: + + + The receiver can immediately send RST_STREAM with + FLOW_CONTROL_ERROR error code for the affected streams. + + + The receiver can accept the streams and tolerate the resulting head of line + blocking, sending WINDOW_UPDATE frames as it consumes data. + + + +
    +
    + +
    + + The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments. Any number of CONTINUATION frames can + be sent on an existing stream, as long as the preceding frame is on the same stream and is + a HEADERS, PUSH_PROMISE or CONTINUATION frame without the + END_HEADERS flag set. + + +
    + +
    + + The CONTINUATION frame payload contains a header block + fragment. + + + + The CONTINUATION frame defines the following flag: + + + + Bit 3 being set indicates that this frame ends a header + block. + + + If the END_HEADERS bit is not set, this frame MUST be followed by another + CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or + a frame on a different stream as a connection + error of type PROTOCOL_ERROR. + + + + + + + The CONTINUATION frame changes the connection state as defined in . + + + + CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received + whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. + + + + A CONTINUATION frame MUST be preceded by a HEADERS, + PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A + recipient that observes violation of this rule MUST respond with a connection error of type + PROTOCOL_ERROR. + +
    +
    + +
    + + Error codes are 32-bit fields that are used in RST_STREAM and + GOAWAY frames to convey the reasons for the stream or connection error. + + + + Error codes share a common code space. Some error codes apply only to either streams or the + entire connection and have no defined semantics in the other context. + + + + The following error codes are defined: + + + The associated condition is not as a result of an error. For example, a + GOAWAY might include this code to indicate graceful shutdown of a + connection. + + + The endpoint detected an unspecific protocol error. This error is for use when a more + specific error code is not available. + + + The endpoint encountered an unexpected internal error. + + + The endpoint detected that its peer violated the flow control protocol. + + + The endpoint sent a SETTINGS frame, but did not receive a response in a + timely manner. See Settings Synchronization. + + + The endpoint received a frame after a stream was half closed. + + + The endpoint received a frame with an invalid size. + + + The endpoint refuses the stream prior to performing any application processing, see + for details. + + + Used by the endpoint to indicate that the stream is no longer needed. + + + The endpoint is unable to maintain the header compression context for the connection. + + + The connection established in response to a CONNECT + request was reset or abnormally closed. + + + The endpoint detected that its peer is exhibiting a behavior that might be generating + excessive load. + + + The underlying transport has properties that do not meet minimum security + requirements (see ). + + + + + Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be + treated by an implementation as being equivalent to INTERNAL_ERROR. + +
    + +
    + + HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means + that, from the application perspective, the features of the protocol are largely + unchanged. To achieve this, all request and response semantics are preserved, although the + syntax of conveying those semantics has changed. + + + Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax + and Routing , such as the HTTP and HTTPS URI schemes, are also + applicable in HTTP/2, but the expression of those semantics for this protocol are defined + in the sections below. + + +
    + + A client sends an HTTP request on a new stream, using a previously unused stream identifier. A server sends an HTTP response on + the same stream as the request. + + + An HTTP message (request or response) consists of: + + + for a response only, zero or more HEADERS frames (each followed by zero + or more CONTINUATION frames) containing the message headers of + informational (1xx) HTTP responses (see and ), + and + + + one HEADERS frame (followed by zero or more CONTINUATION + frames) containing the message headers (see ), and + + + zero or more DATA frames containing the message payload (see ), and + + + optionally, one HEADERS frame, followed by zero or more + CONTINUATION frames containing the trailer-part, if present (see ). + + + The last frame in the sequence bears an END_STREAM flag, noting that a + HEADERS frame bearing the END_STREAM flag can be followed by + CONTINUATION frames that carry any remaining portions of the header block. + + + Other frames (from any stream) MUST NOT occur between either HEADERS frame + and any CONTINUATION frames that might follow. + + + + Trailing header fields are carried in a header block that also terminates the stream. + That is, a sequence starting with a HEADERS frame, followed by zero or more + CONTINUATION frames, where the HEADERS frame bears an + END_STREAM flag. Header blocks after the first that do not terminate the stream are not + part of an HTTP request or response. + + + A HEADERS frame (and associated CONTINUATION frames) can + only appear at the start or end of a stream. An endpoint that receives a + HEADERS frame without the END_STREAM flag set after receiving a final + (non-informational) status code MUST treat the corresponding request or response as malformed. + + + + An HTTP request/response exchange fully consumes a single stream. A request starts with + the HEADERS frame that puts the stream into an "open" state. The request + ends with a frame bearing END_STREAM, which causes the stream to become "half closed + (local)" for the client and "half closed (remote)" for the server. A response starts with + a HEADERS frame and ends with a frame bearing END_STREAM, which places the + stream in the "closed" state. + + + +
    + + HTTP/2 removes support for the 101 (Switching Protocols) informational status code + (). + + + The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol. + Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate + their use (see ). + +
    + +
    + + HTTP header fields carry information as a series of key-value pairs. For a listing of + registered HTTP headers, see the Message Header Field Registry maintained at . + + +
    + + While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the + status code for the response, HTTP/2 uses special pseudo-header fields beginning with + ':' character (ASCII 0x3a) for this purpose. + + + Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate + pseudo-header fields other than those defined in this document. + + + Pseudo-header fields are only valid in the context in which they are defined. + Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header + fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST + NOT appear in trailers. Endpoints MUST treat a request or response that contains + undefined or invalid pseudo-header fields as malformed. + + + Just as in HTTP/1.x, header field names are strings of ASCII characters that are + compared in a case-insensitive fashion. However, header field names MUST be converted + to lowercase prior to their encoding in HTTP/2. A request or response containing + uppercase header field names MUST be treated as malformed. + + + All pseudo-header fields MUST appear in the header block before regular header fields. + Any request or response that contains a pseudo-header field that appears in a header + block after a regular header field MUST be treated as malformed. + +
    + +
    + + HTTP/2 does not use the Connection header field to + indicate connection-specific header fields; in this protocol, connection-specific + metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message + containing connection-specific header fields; any message containing + connection-specific header fields MUST be treated as malformed. + + + This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need + to remove any header fields nominated by the Connection header field, along with the + Connection header field itself. Such intermediaries SHOULD also remove other + connection-specific header fields, such as Keep-Alive, Proxy-Connection, + Transfer-Encoding and Upgrade, even if they are not nominated by Connection. + + + One exception to this is the TE header field, which MAY be present in an HTTP/2 + request, but when it is MUST NOT contain any value other than "trailers". + + + + + HTTP/2 purposefully does not support upgrade to another protocol. The handshake + methods described in are believed sufficient to + negotiate the use of alternative protocols. + + + +
    + +
    + + The following pseudo-header fields are defined for HTTP/2 requests: + + + + The :method pseudo-header field includes the HTTP + method (). + + + + + The :scheme pseudo-header field includes the scheme + portion of the target URI (). + + + :scheme is not restricted to http and https schemed URIs. A + proxy or gateway can translate requests for non-HTTP schemes, enabling the use + of HTTP to interact with non-HTTP services. + + + + + The :authority pseudo-header field includes the + authority portion of the target URI (). The authority MUST NOT include the deprecated userinfo subcomponent for http + or https schemed URIs. + + + To ensure that the HTTP/1.1 request line can be reproduced accurately, this + pseudo-header field MUST be omitted when translating from an HTTP/1.1 request + that has a request target in origin or asterisk form (see ). Clients that generate + HTTP/2 requests directly SHOULD use the :authority pseudo-header + field instead of the Host header field. An + intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by + copying the value of the :authority pseudo-header + field. + + + + + The :path pseudo-header field includes the path and + query parts of the target URI (the path-absolute + production from and optionally a '?' character + followed by the query production, see and ). A request in asterisk form includes the value '*' for the + :path pseudo-header field. + + + This pseudo-header field MUST NOT be empty for http + or https URIs; http or + https URIs that do not contain a path component + MUST include a value of '/'. The exception to this rule is an OPTIONS request + for an http or https + URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ). + + + + + + All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request. An HTTP request that omits mandatory + pseudo-header fields is malformed. + + + HTTP/2 does not define a way to carry the version identifier that is included in the + HTTP/1.1 request line. + +
    + +
    + + For HTTP/2 responses, a single :status pseudo-header + field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all + responses, otherwise the response is malformed. + + + HTTP/2 does not define a way to carry the version or reason phrase that is included in + an HTTP/1.1 status line. + +
    + +
    + + The Cookie header field can carry a significant amount of + redundant data. + + + The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs"). + This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from + being separated into different name-value pairs. This can significantly reduce + compression efficiency as individual cookie-pairs are updated. + + + To allow for better compression efficiency, the Cookie header field MAY be split into + separate header fields, each with one or more cookie-pairs. If there are multiple + Cookie header fields after decompression, these MUST be concatenated into a single + octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ") + before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a + generic HTTP server application. + +
    + + Therefore, the following two lists of Cookie header fields are semantically + equivalent. + + +
    +
    + +
    + + A malformed request or response is one that is an otherwise valid sequence of HTTP/2 + frames, but is otherwise invalid due to the presence of extraneous frames, prohibited + header fields, the absence of mandatory header fields, or the inclusion of uppercase + header field names. + + + A request or response that includes an entity body can include a content-length header field. A request or response is also + malformed if the value of a content-length header field + does not equal the sum of the DATA frame payload lengths that form the + body. A response that is defined to have no payload, as described in , can have a non-zero + content-length header field, even though no content is + included in DATA frames. + + + Intermediaries that process HTTP requests or responses (i.e., any intermediary not + acting as a tunnel) MUST NOT forward a malformed request or response. Malformed + requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR. + + + For malformed requests, a server MAY send an HTTP response prior to closing or + resetting the stream. Clients MUST NOT accept a malformed response. Note that these + requirements are intended to protect against several types of common attacks against + HTTP; they are deliberately strict, because being permissive can expose + implementations to these vulnerabilities. + +
    +
    + +
    + + This section shows HTTP/1.1 requests and responses, with illustrations of equivalent + HTTP/2 requests and responses. + + + An HTTP GET request includes request header fields and no body and is therefore + transmitted as a single HEADERS frame, followed by zero or more + CONTINUATION frames containing the serialized block of request header + fields. The HEADERS frame in the following has both the END_HEADERS and + END_STREAM flags set; no CONTINUATION frames are sent: + + +
    + + END_STREAM + Accept: image/jpeg + END_HEADERS + :method = GET + :scheme = https + :path = /resource + host = example.org + accept = image/jpeg +]]> +
    + + + Similarly, a response that includes only response header fields is transmitted as a + HEADERS frame (again, followed by zero or more + CONTINUATION frames) containing the serialized block of response header + fields. + + +
    + + END_STREAM + Expires: Thu, 23 Jan ... + END_HEADERS + :status = 304 + etag = "xyzzy" + expires = Thu, 23 Jan ... +]]> +
    + + + An HTTP POST request that includes request header fields and payload data is transmitted + as one HEADERS frame, followed by zero or more + CONTINUATION frames containing the request header fields, followed by one + or more DATA frames, with the last CONTINUATION (or + HEADERS) frame having the END_HEADERS flag set and the final + DATA frame having the END_STREAM flag set: + + +
    + - END_STREAM + Content-Type: image/jpeg - END_HEADERS + Content-Length: 123 :method = POST + :path = /resource + {binary data} :scheme = https + + CONTINUATION + + END_HEADERS + content-type = image/jpeg + host = example.org + content-length = 123 + + DATA + + END_STREAM + {binary data} +]]> + + Note that data contributing to any given header field could be spread between header + block fragments. The allocation of header fields to frames in this example is + illustrative only. + +
    + + + A response that includes header fields and payload data is transmitted as a + HEADERS frame, followed by zero or more CONTINUATION + frames, followed by one or more DATA frames, with the last + DATA frame in the sequence having the END_STREAM flag set: + + +
    + - END_STREAM + Content-Length: 123 + END_HEADERS + :status = 200 + {binary data} content-type = image/jpeg + content-length = 123 + + DATA + + END_STREAM + {binary data} +]]> +
    + + + Trailing header fields are sent as a header block after both the request or response + header block and all the DATA frames have been sent. The + HEADERS frame starting the trailers header block has the END_STREAM flag + set. + + +
    + - END_STREAM + Transfer-Encoding: chunked + END_HEADERS + Trailer: Foo :status = 200 + content-length = 123 + 123 content-type = image/jpeg + {binary data} trailer = Foo + 0 + Foo: bar DATA + - END_STREAM + {binary data} + + HEADERS + + END_STREAM + + END_HEADERS + foo = bar +]]> +
    + + +
    + + An informational response using a 1xx status code other than 101 is transmitted as a + HEADERS frame, followed by zero or more CONTINUATION + frames: + + - END_STREAM + + END_HEADERS + :status = 103 + extension-field = bar +]]> +
    +
    + +
    + + In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error + occurs, because there is no means to determine the nature of the error. It is possible + that some server processing occurred prior to the error, which could result in + undesirable effects if the request were reattempted. + + + HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has + not been processed: + + + The GOAWAY frame indicates the highest stream number that might have + been processed. Requests on streams with higher numbers are therefore guaranteed to + be safe to retry. + + + The REFUSED_STREAM error code can be included in a + RST_STREAM frame to indicate that the stream is being closed prior to + any processing having occurred. Any request that was sent on the reset stream can + be safely retried. + + + + + Requests that have not been processed have not failed; clients MAY automatically retry + them, even those with non-idempotent methods. + + + A server MUST NOT indicate that a stream has not been processed unless it can guarantee + that fact. If frames that are on a stream are passed to the application layer for any + stream, then REFUSED_STREAM MUST NOT be used for that stream, and a + GOAWAY frame MUST include a stream identifier that is greater than or + equal to the given stream identifier. + + + In addition to these mechanisms, the PING frame provides a way for a + client to easily test a connection. Connections that remain idle can become broken as + some middleboxes (for instance, network address translators, or load balancers) silently + discard connection bindings. The PING frame allows a client to safely + test whether a connection is still active without sending a request. + +
    +
    + +
    + + HTTP/2 allows a server to pre-emptively send (or "push") responses (along with + corresponding "promised" requests) to a client in association with a previous + client-initiated request. This can be useful when the server knows the client will need + to have those responses available in order to fully process the response to the original + request. + + + + Pushing additional message exchanges in this fashion is optional, and is negotiated + between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set + to 0 to indicate that server push is disabled. + + + Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a + promised request that is not cacheable, unsafe or that includes a request body MUST + reset the stream with a stream error of type + PROTOCOL_ERROR. + + + Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP + cache. Pushed responses are considered successfully validated on the origin server (e.g., + if the "no-cache" cache response directive is present) while the stream identified by the + promised stream ID is still open. + + + Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY + be made available to the application separately. + + + An intermediary can receive pushes from the server and choose not to forward them on to + the client. In other words, how to make use of the pushed information is up to that + intermediary. Equally, the intermediary might choose to make additional pushes to the + client, without any action taken by the server. + + + A client cannot push. Thus, servers MUST treat the receipt of a + PUSH_PROMISE frame as a connection + error of type PROTOCOL_ERROR. Clients MUST reject any attempt to + change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating + the message as a connection error of type + PROTOCOL_ERROR. + + +
    + + Server push is semantically equivalent to a server responding to a request; however, in + this case that request is also sent by the server, as a PUSH_PROMISE + frame. + + + The PUSH_PROMISE frame includes a header block that contains a complete + set of request header fields that the server attributes to the request. It is not + possible to push a response to a request that includes a request body. + + + + Pushed responses are always associated with an explicit request from the client. The + PUSH_PROMISE frames sent by the server are sent on that explicit + request's stream. The PUSH_PROMISE frame also includes a promised stream + identifier, chosen from the stream identifiers available to the server (see ). + + + + The header fields in PUSH_PROMISE and any subsequent + CONTINUATION frames MUST be a valid and complete set of request header fields. The server MUST include a method in + the :method header field that is safe and cacheable. If a + client receives a PUSH_PROMISE that does not include a complete and valid + set of header fields, or the :method header field identifies + a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR. + + + + The server SHOULD send PUSH_PROMISE () + frames prior to sending any frames that reference the promised responses. This avoids a + race where clients issue requests prior to receiving any PUSH_PROMISE + frames. + + + For example, if the server receives a request for a document containing embedded links + to multiple image files, and the server chooses to push those additional images to the + client, sending push promises before the DATA frames that contain the + image links ensures that the client is able to see the promises before discovering + embedded links. Similarly, if the server pushes responses referenced by the header block + (for instance, in Link header fields), sending the push promises before sending the + header block ensures that clients do not request them. + + + + PUSH_PROMISE frames MUST NOT be sent by the client. + + + PUSH_PROMISE frames can be sent by the server in response to any + client-initiated stream, but the stream MUST be in either the "open" or "half closed + (remote)" state with respect to the server. PUSH_PROMISE frames are + interspersed with the frames that comprise a response, though they cannot be + interspersed with HEADERS and CONTINUATION frames that + comprise a single header block. + + + Sending a PUSH_PROMISE frame creates a new stream and puts the stream + into the “reserved (local)” state for the server and the “reserved (remote)” state for + the client. + +
    + +
    + + After sending the PUSH_PROMISE frame, the server can begin delivering the + pushed response as a response on a server-initiated + stream that uses the promised stream identifier. The server uses this stream to + transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed" + to the client after the initial HEADERS frame is sent. + + + + Once a client receives a PUSH_PROMISE frame and chooses to accept the + pushed response, the client SHOULD NOT issue any requests for the promised response + until after the promised stream has closed. + + + + If the client determines, for any reason, that it does not wish to receive the pushed + response from the server, or if the server takes too long to begin sending the promised + response, the client can send an RST_STREAM frame, using either the + CANCEL or REFUSED_STREAM codes, and referencing the pushed + stream's identifier. + + + A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the + number of responses that can be concurrently pushed by a server. Advertising a + SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by + preventing the server from creating the necessary streams. This does not prohibit a + server from sending PUSH_PROMISE frames; clients need to reset any + promised streams that are not wanted. + + + + Clients receiving a pushed response MUST validate that either the server is + authoritative (see ), or the proxy that provided the pushed + response is configured for the corresponding request. For example, a server that offers + a certificate for only the example.com DNS-ID or Common Name + is not permitted to push a response for https://www.example.org/doc. + + + The response for a PUSH_PROMISE stream begins with a + HEADERS frame, which immediately puts the stream into the “half closed + (remote)” state for the server and “half closed (local)” state for the client, and ends + with a frame bearing END_STREAM, which places the stream in the "closed" state. + + + The client never sends a frame with the END_STREAM flag for a server push. + + + +
    + +
    + +
    + + In HTTP/1.x, the pseudo-method CONNECT () is used to convert an HTTP connection into a tunnel to a remote host. + CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin + server for the purposes of interacting with https resources. + + + In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to + a remote host, for similar purposes. The HTTP header field mapping works as defined in + Request Header Fields, with a few + differences. Specifically: + + + The :method header field is set to CONNECT. + + + The :scheme and :path header + fields MUST be omitted. + + + The :authority header field contains the host and port to + connect to (equivalent to the authority-form of the request-target of CONNECT + requests, see ). + + + + + A proxy that supports CONNECT establishes a TCP connection to + the server identified in the :authority header field. Once + this connection is successfully established, the proxy sends a HEADERS + frame containing a 2xx series status code to the client, as defined in . + + + After the initial HEADERS frame sent by each peer, all subsequent + DATA frames correspond to data sent on the TCP connection. The payload of + any DATA frames sent by the client is transmitted by the proxy to the TCP + server; data received from the TCP server is assembled into DATA frames by + the proxy. Frame types other than DATA or stream management frames + (RST_STREAM, WINDOW_UPDATE, and PRIORITY) + MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received. + + + The TCP connection can be closed by either peer. The END_STREAM flag on a + DATA frame is treated as being equivalent to the TCP FIN bit. A client is + expected to send a DATA frame with the END_STREAM flag set after receiving + a frame bearing the END_STREAM flag. A proxy that receives a DATA frame + with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP + segment. A proxy that receives a TCP segment with the FIN bit set sends a + DATA frame with the END_STREAM flag set. Note that the final TCP segment + or DATA frame could be empty. + + + A TCP connection error is signaled with RST_STREAM. A proxy treats any + error in the TCP connection, which includes receiving a TCP segment with the RST bit set, + as a stream error of type + CONNECT_ERROR. Correspondingly, a proxy MUST send a TCP segment with the + RST bit set if it detects an error with the stream or the HTTP/2 connection. + +
    +
    + +
    + + This section outlines attributes of the HTTP protocol that improve interoperability, reduce + exposure to known security vulnerabilities, or reduce the potential for implementation + variation. + + +
    + + HTTP/2 connections are persistent. For best performance, it is expected clients will not + close connections until it is determined that no further communication with a server is + necessary (for example, when a user navigates away from a particular web page), or until + the server closes the connection. + + + Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair, + where host is derived from a URI, a selected alternative + service, or a configured proxy. + + + A client can create additional connections as replacements, either to replace connections + that are near to exhausting the available stream + identifier space, to refresh the keying material for a TLS connection, or to + replace connections that have encountered errors. + + + A client MAY open multiple connections to the same IP address and TCP port using different + Server Name Indication values or to provide different TLS + client certificates, but SHOULD avoid creating multiple connections with the same + configuration. + + + Servers are encouraged to maintain open connections for as long as possible, but are + permitted to terminate idle connections if necessary. When either endpoint chooses to + close the transport-layer TCP connection, the terminating endpoint SHOULD first send a + GOAWAY () frame so that both endpoints can reliably + determine whether previously sent frames have been processed and gracefully complete or + terminate any necessary remaining tasks. + + +
    + + Connections that are made to an origin servers, either directly or through a tunnel + created using the CONNECT method MAY be reused for + requests with multiple different URI authority components. A connection can be reused + as long as the origin server is authoritative. For + http resources, this depends on the host having resolved to + the same IP address. + + + For https resources, connection reuse additionally depends + on having a certificate that is valid for the host in the URI. An origin server might + offer a certificate with multiple subjectAltName attributes, + or names with wildcards, one of which is valid for the authority in the URI. For + example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for + requests to URIs starting with https://a.example.com/ and + https://b.example.com/. + + + In some deployments, reusing a connection for multiple origins can result in requests + being directed to the wrong origin server. For example, TLS termination might be + performed by a middlebox that uses the TLS Server Name Indication + (SNI) extension to select an origin server. This means that it is possible + for clients to send confidential information to servers that might not be the intended + target for the request, even though the server is otherwise authoritative. + + + A server that does not wish clients to reuse connections can indicate that it is not + authoritative for a request by sending a 421 (Misdirected Request) status code in response + to the request (see ). + + + A client that is configured to use a proxy over HTTP/2 directs requests to that proxy + through a single connection. That is, all requests sent via a proxy reuse the + connection to the proxy. + +
    + +
    + + The 421 (Misdirected Request) status code indicates that the request was directed at a + server that is not able to produce a response. This can be sent by a server that is not + configured to produce responses for the combination of scheme and authority that are + included in the request URI. + + + Clients receiving a 421 (Misdirected Request) response from a server MAY retry the + request - whether the request method is idempotent or not - over a different connection. + This is possible if a connection is reused () or if an alternative + service is selected (). + + + This status code MUST NOT be generated by proxies. + + + A 421 response is cacheable by default; i.e., unless otherwise indicated by the method + definition or explicit cache controls (see ). + +
    +
    + +
    + + Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over + TLS. The general TLS usage guidance in SHOULD be followed, with + some additional restrictions that are specific to HTTP/2. + + + + An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on + feature set and cipher suite described in this section. Due to implementation + limitations, it might not be possible to fail TLS negotiation. An endpoint MUST + immediately terminate an HTTP/2 connection that does not meet these minimum requirements + with a connection error of type + INADEQUATE_SECURITY. + + +
    + + The TLS implementation MUST support the Server Name Indication + (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when + negotiating TLS. + + + The TLS implementation MUST disable compression. TLS compression can lead to the + exposure of information that would not otherwise be revealed . + Generic compression is unnecessary since HTTP/2 provides compression features that are + more aware of context and therefore likely to be more appropriate for use for + performance, security or other reasons. + + + The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS + renegotiation as a connection error of type + PROTOCOL_ERROR. Note that disabling renegotiation can result in + long-lived connections becoming unusable due to limits on the number of messages the + underlying cipher suite can encipher. + + + A client MAY use renegotiation to provide confidentiality protection for client + credentials offered in the handshake, but any renegotiation MUST occur prior to sending + the connection preface. A server SHOULD request a client certificate if it sees a + renegotiation request immediately after establishing a connection. + + + This effectively prevents the use of renegotiation in response to a request for a + specific protected resource. A future specification might provide a way to support this + use case. + +
    + +
    + + The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST + only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE). Ephemeral key exchange MUST + have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE. + Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher + suites that use stream or block ciphers. Authenticated Encryption with Additional Data + (AEAD) modes, such as the Galois Counter Model (GCM) mode for + AES are acceptable. + + + The effect of these restrictions is that TLS 1.2 implementations could have + non-intersecting sets of available cipher suites, since these prevent the use of the + cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of + HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 . + + + Clients MAY advertise support of cipher suites that are prohibited by the above + restrictions in order to allow for connection to servers that do not support HTTP/2. + This enables a fallback to protocols without these constraints without the additional + latency imposed by using a separate connection for fallback. + +
    +
    +
    + +
    +
    + + HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is + authoritative in providing a given response, see . This relies on local name resolution for the "http" + URI scheme, and the authenticated server identity for the "https" scheme (see ). + +
    + +
    + + In a cross-protocol attack, an attacker causes a client to initiate a transaction in one + protocol toward a server that understands a different protocol. An attacker might be able + to cause the transaction to appear as valid transaction in the second protocol. In + combination with the capabilities of the web context, this can be used to interact with + poorly protected servers in private networks. + + + Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient + protection against cross protocol attacks. ALPN provides a positive indication that a + server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based + protocols. + + + The encryption in TLS makes it difficult for attackers to control the data which could be + used in a cross-protocol attack on a cleartext protocol. + + + The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks. + The connection preface contains a string that is + designed to confuse HTTP/1.1 servers, but no special protection is offered for other + protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an + Upgrade header field in addition to the client connection preface could be exposed to a + cross-protocol attack. + +
    + +
    + + HTTP/2 header field names and values are encoded as sequences of octets with a length + prefix. This enables HTTP/2 to carry any string of octets as the name or value of a + header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1 + directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might + exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal + header fields, extra header fields, or even new messages that are entirely falsified. + + + Header field names or values that contain characters not permitted by HTTP/1.1, including + carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an + intermediary, as stipulated in . + + + Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker. + Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values. + +
    + +
    + + Pushed responses do not have an explicit request from the client; the request + is provided by the server in the PUSH_PROMISE frame. + + + Caching responses that are pushed is possible based on the guidance provided by the origin + server in the Cache-Control header field. However, this can cause issues if a single + server hosts more than one tenant. For example, a server might offer multiple users each + a small portion of its URI space. + + + Where multiple tenants share space on the same server, that server MUST ensure that + tenants are not able to push representations of resources that they do not have authority + over. Failure to enforce this would allow a tenant to provide a representation that would + be served out of cache, overriding the actual representation that the authoritative tenant + provides. + + + Pushed responses for which an origin server is not authoritative (see + ) are never cached or used. + +
    + +
    + + An HTTP/2 connection can demand a greater commitment of resources to operate than a + HTTP/1.1 connection. The use of header compression and flow control depend on a + commitment of resources for storing a greater amount of state. Settings for these + features ensure that memory commitments for these features are strictly bounded. + + + The number of PUSH_PROMISE frames is not constrained in the same fashion. + A client that accepts server push SHOULD limit the number of streams it allows to be in + the "reserved (remote)" state. Excessive number of server push streams can be treated as + a stream error of type + ENHANCE_YOUR_CALM. + + + Processing capacity cannot be guarded as effectively as state capacity. + + + The SETTINGS frame can be abused to cause a peer to expend additional + processing time. This might be done by pointlessly changing SETTINGS parameters, setting + multiple undefined parameters, or changing the same setting multiple times in the same + frame. WINDOW_UPDATE or PRIORITY frames can be abused to + cause an unnecessary waste of resources. + + + Large numbers of small or empty frames can be abused to cause a peer to expend time + processing frame headers. Note however that some uses are entirely legitimate, such as + the sending of an empty DATA frame to end a stream. + + + Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses. + + + Limits in SETTINGS parameters cannot be reduced instantaneously, which + leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In + particular, immediately after establishing a connection, limits set by a server are not + known to clients and could be exceeded without being an obvious protocol violation. + + + All these features - i.e., SETTINGS changes, small frames, header + compression - have legitimate uses. These features become a burden only when they are + used unnecessarily or to excess. + + + An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of + service attack. Implementations SHOULD track the use of these features and set limits on + their use. An endpoint MAY treat activity that is suspicious as a connection error of type + ENHANCE_YOUR_CALM. + + +
    + + A large header block can cause an implementation to + commit a large amount of state. Header fields that are critical for routing can appear + toward the end of a header block, which prevents streaming of header fields to their + ultimate destination. For this an other reasons, such as ensuring cache correctness, + means that an endpoint might need to buffer the entire header block. Since there is no + hard limit to the size of a header block, some endpoints could be forced commit a large + amount of available memory for header fields. + + + An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of + limits that might apply on the size of header blocks. This setting is only advisory, so + endpoints MAY choose to send header blocks that exceed this limit and risk having the + request or response being treated as malformed. This setting specific to a connection, + so any request or response could encounter a hop with a lower, unknown limit. An + intermediary can attempt to avoid this problem by passing on values presented by + different peers, but they are not obligated to do so. + + + A server that receives a larger header block than it is willing to handle can send an + HTTP 431 (Request Header Fields Too Large) status code . A + client can discard responses that it cannot process. The header block MUST be processed + to ensure a consistent connection state, unless the connection is closed. + +
    +
    + +
    + + HTTP/2 enables greater use of compression for both header fields () and entity bodies. Compression can allow an attacker to recover + secret data when it is compressed in the same context as data under attacker control. + + + There are demonstrable attacks on compression that exploit the characteristics of the web + (e.g., ). The attacker induces multiple requests containing + varying plaintext, observing the length of the resulting ciphertext in each, which + reveals a shorter length when a guess about the secret is correct. + + + Implementations communicating on a secure channel MUST NOT compress content that includes + both confidential and attacker-controlled data unless separate compression dictionaries + are used for each source of data. Compression MUST NOT be used if the source of data + cannot be reliably determined. Generic stream compression, such as that provided by TLS + MUST NOT be used with HTTP/2 (). + + + Further considerations regarding the compression of header fields are described in . + +
    + +
    + + Padding within HTTP/2 is not intended as a replacement for general purpose padding, such + as might be provided by TLS. Redundant padding could even be + counterproductive. Correct application can depend on having specific knowledge of the + data that is being padded. + + + To mitigate attacks that rely on compression, disabling or limiting compression might be + preferable to padding as a countermeasure. + + + Padding can be used to obscure the exact size of frame content, and is provided to + mitigate specific attacks within HTTP. For example, attacks where compressed content + includes both attacker-controlled plaintext and secret data (see for example, ). + + + Use of padding can result in less protection than might seem immediately obvious. At + best, padding only makes it more difficult for an attacker to infer length information by + increasing the number of frames an attacker has to observe. Incorrectly implemented + padding schemes can be easily defeated. In particular, randomized padding with a + predictable distribution provides very little protection; similarly, padding payloads to a + fixed size exposes information as payload sizes cross the fixed size boundary, which could + be possible if an attacker can control plaintext. + + + Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding + for HEADERS and PUSH_PROMISE frames. A valid reason for an + intermediary to change the amount of padding of frames is to improve the protections that + padding provides. + +
    + +
    + + Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions + of a single client or server over time. This includes the value of settings, the manner + in which flow control windows are managed, the way priorities are allocated to streams, + timing of reactions to stimulus, and handling of any optional features. + + + As far as this creates observable differences in behavior, they could be used as a basis + for fingerprinting a specific client, as defined in . + +
    +
    + +
    + + A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation + (ALPN) Protocol IDs" registry established in . + + + This document establishes a registry for frame types, settings, and error codes. These new + registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section. + + + This document registers the HTTP2-Settings header field for + use in HTTP; and the 421 (Misdirected Request) status code. + + + This document registers the PRI method for use in HTTP, to avoid + collisions with the connection preface. + + +
    + + This document creates two registrations for the identification of HTTP/2 in the + "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in . + + + The "h2" string identifies HTTP/2 when used over TLS: + + HTTP/2 over TLS + 0x68 0x32 ("h2") + This document + + + + The "h2c" string identifies HTTP/2 when used over cleartext TCP: + + HTTP/2 over TCP + 0x68 0x32 0x63 ("h2c") + This document + + +
    + +
    + + This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame + Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under + either of the "IETF Review" or "IESG Approval" policies for + values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for + experimental use. + + + New entries in this registry require the following information: + + + A name or label for the frame type. + + + The 8-bit code assigned to the frame type. + + + A reference to a specification that includes a description of the frame layout, + it's semantics and flags that the frame type uses, including any parts of the frame + that are conditionally present based on the value of flags. + + + + + The entries in the following table are registered by this document. + + + Frame Type + Code + Section + DATA0x0 + HEADERS0x1 + PRIORITY0x2 + RST_STREAM0x3 + SETTINGS0x4 + PUSH_PROMISE0x5 + PING0x6 + GOAWAY0x7 + WINDOW_UPDATE0x8 + CONTINUATION0x9 + +
    + +
    + + This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry + manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to + 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use. + + + New registrations are advised to provide the following information: + + + A symbolic name for the setting. Specifying a setting name is optional. + + + The 16-bit code assigned to the setting. + + + An initial value for the setting. + + + An optional reference to a specification that describes the use of the setting. + + + + + An initial set of setting registrations can be found in . + + + Name + Code + Initial Value + Specification + HEADER_TABLE_SIZE + 0x14096 + ENABLE_PUSH + 0x21 + MAX_CONCURRENT_STREAMS + 0x3(infinite) + INITIAL_WINDOW_SIZE + 0x465535 + MAX_FRAME_SIZE + 0x516384 + MAX_HEADER_LIST_SIZE + 0x6(infinite) + + +
    + +
    + + This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code" + registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the + "Expert Review" policy. + + + Registrations for error codes are required to include a description of the error code. An + expert reviewer is advised to examine new registrations for possible duplication with + existing error codes. Use of existing registrations is to be encouraged, but not + mandated. + + + New registrations are advised to provide the following information: + + + A name for the error code. Specifying an error code name is optional. + + + The 32-bit error code value. + + + A brief description of the error code semantics, longer if no detailed specification + is provided. + + + An optional reference for a specification that defines the error code. + + + + + The entries in the following table are registered by this document. + + + Name + Code + Description + Specification + NO_ERROR0x0 + Graceful shutdown + + PROTOCOL_ERROR0x1 + Protocol error detected + + INTERNAL_ERROR0x2 + Implementation fault + + FLOW_CONTROL_ERROR0x3 + Flow control limits exceeded + + SETTINGS_TIMEOUT0x4 + Settings not acknowledged + + STREAM_CLOSED0x5 + Frame received for closed stream + + FRAME_SIZE_ERROR0x6 + Frame size incorrect + + REFUSED_STREAM0x7 + Stream not processed + + CANCEL0x8 + Stream cancelled + + COMPRESSION_ERROR0x9 + Compression state not updated + + CONNECT_ERROR0xa + TCP connection error for CONNECT method + + ENHANCE_YOUR_CALM0xb + Processing capacity exceeded + + INADEQUATE_SECURITY0xc + Negotiated TLS parameters not acceptable + + + +
    + +
    + + This section registers the HTTP2-Settings header field in the + Permanent Message Header Field Registry. + + + HTTP2-Settings + + + http + + + standard + + + IETF + + + of this document + + + This header field is only used by an HTTP/2 client for Upgrade-based negotiation. + + + +
    + +
    + + This section registers the PRI method in the HTTP Method + Registry (). + + + PRI + + + No + + + No + + + of this document + + + This method is never used by an actual client. This method will appear to be used + when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection + preface. + + + +
    + +
    + + This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext + Transfer Protocol (HTTP) Status Code Registry (). + + + + + 421 + + + Misdirected Request + + + of this document + + + +
    + +
    + +
    + + This document includes substantial input from the following individuals: + + + Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin + Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin + Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY + contributors). + + + Gabriel Montenegro and Willy Tarreau (Upgrade mechanism). + + + William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto + Peon, Rob Trace (Flow control). + + + Mike Bishop (Extensibility). + + + Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan + (Substantial editorial contributions). + + + Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp. + + + Alexey Melnikov was an editor of this document during 2013. + + + A substantial proportion of Martin's contribution was supported by Microsoft during his + employment there. + + + +
    +
    + + + + + + HPACK - Header Compression for HTTP/2 + + + + + + + + + + + + Transmission Control Protocol + + + University of Southern California (USC)/Information Sciences + Institute + + + + + + + + + + + Key words for use in RFCs to Indicate Requirement Levels + + + Harvard University +
    sob@harvard.edu
    +
    + +
    + + +
    + + + + + HTTP Over TLS + + + + + + + + + + Uniform Resource Identifier (URI): Generic + Syntax + + + + + + + + + + + + The Base16, Base32, and Base64 Data Encodings + + + + + + + + + Guidelines for Writing an IANA Considerations Section in RFCs + + + + + + + + + + + Augmented BNF for Syntax Specifications: ABNF + + + + + + + + + + + The Transport Layer Security (TLS) Protocol Version 1.2 + + + + + + + + + + + Transport Layer Security (TLS) Extensions: Extension Definitions + + + + + + + + + + Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension + + + + + + + + + + + + + TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois + Counter Mode (GCM) + + + + + + + + + + + Digital Signature Standard (DSS) + + NIST + + + + + + + + + Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + + Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Range Requests + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + World Wide Web Consortium +
    ylafon@w3.org
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Caching + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + Akamai +
    mnot@mnot.net
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + Hypertext Transfer Protocol (HTTP/1.1): Authentication + + Adobe Systems Incorporated +
    fielding@gbiv.com
    +
    + + greenbytes GmbH +
    julian.reschke@greenbytes.de
    +
    + +
    + + +
    + + + + HTTP State Management Mechanism + + + + + +
    + + + + + + TCP Extensions for High Performance + + + + + + + + + + + + Transport Layer Security Protocol Compression Methods + + + + + + + + + Additional HTTP Status Codes + + + + + + + + + + + Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS) + + + + + + + + + + + + + + + AES Galois Counter Mode (GCM) Cipher Suites for TLS + + + + + + + + + + + + HTML5 + + + + + + + + + + + Latest version available at + . + + + + + + + Talking to Yourself for Fun and Profit + + + + + + + + + + + + + + BREACH: Reviving the CRIME Attack + + + + + + + + + + + Registration Procedures for Message Header Fields + + Nine by Nine +
    GK-IETF@ninebynine.org
    +
    + + BEA Systems +
    mnot@pobox.com
    +
    + + HP Labs +
    JeffMogul@acm.org
    +
    + +
    + + +
    + + + + Recommendations for Secure Use of TLS and DTLS + + + + + + + + + + + + + + + + + + HTTP Alternative Services + + + Akamai + + + Mozilla + + + greenbytes + + + + + + +
    + +
    + + This section is to be removed by RFC Editor before publication. + + +
    + + Renamed Not Authoritative status code to Misdirected Request. + +
    + +
    + + Pseudo-header fields are now required to appear strictly before regular ones. + + + Restored 1xx series status codes, except 101. + + + Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting + to limit the damage. + + + Added a setting to advise peers of header set size limits. + + + Removed segments. + + + Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping. + +
    + +
    + + Restored extensibility options. + + + Restricting TLS cipher suites to AEAD only. + + + Removing Content-Encoding requirements. + + + Permitting the use of PRIORITY after stream close. + + + Removed ALTSVC frame. + + + Removed BLOCKED frame. + + + Reducing the maximum padding size to 256 octets; removing padding from + CONTINUATION frames. + + + Removed per-frame GZIP compression. + +
    + +
    + + Added BLOCKED frame (at risk). + + + Simplified priority scheme. + + + Added DATA per-frame GZIP compression. + +
    + +
    + + Changed "connection header" to "connection preface" to avoid confusion. + + + Added dependency-based stream prioritization. + + + Added "h2c" identifier to distinguish between cleartext and secured HTTP/2. + + + Adding missing padding to PUSH_PROMISE. + + + Integrate ALTSVC frame and supporting text. + + + Dropping requirement on "deflate" Content-Encoding. + + + Improving security considerations around use of compression. + +
    + +
    + + Adding padding for data frames. + + + Renumbering frame types, error codes, and settings. + + + Adding INADEQUATE_SECURITY error code. + + + Updating TLS usage requirements to 1.2; forbidding TLS compression. + + + Removing extensibility for frames and settings. + + + Changing setting identifier size. + + + Removing the ability to disable flow control. + + + Changing the protocol identification token to "h2". + + + Changing the use of :authority to make it optional and to allow userinfo in non-HTTP + cases. + + + Allowing split on 0x0 for Cookie. + + + Reserved PRI method in HTTP/1.1 to avoid possible future collisions. + +
    + +
    + + Added cookie crumbling for more efficient header compression. + + + Added header field ordering with the value-concatenation mechanism. + +
    + +
    + + Marked draft for implementation. + +
    + +
    + + Adding definition for CONNECT method. + + + Constraining the use of push to safe, cacheable methods with no request body. + + + Changing from :host to :authority to remove any potential confusion. + + + Adding setting for header compression table size. + + + Adding settings acknowledgement. + + + Removing unnecessary and potentially problematic flags from CONTINUATION. + + + Added denial of service considerations. + +
    +
    + + Marking the draft ready for implementation. + + + Renumbering END_PUSH_PROMISE flag. + + + Editorial clarifications and changes. + +
    + +
    + + Added CONTINUATION frame for HEADERS and PUSH_PROMISE. + + + PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is + zero. + + + Push expanded to allow all safe methods without a request body. + + + Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1 + hop-by-hop header fields. + + + Requiring that intermediaries not forward requests with missing or illegal routing + :-headers. + + + Clarified requirements around handling different frames after stream close, stream reset + and GOAWAY. + + + Added more specific prohibitions for sending of different frame types in various stream + states. + + + Making the last received setting value the effective value. + + + Clarified requirements on TLS version, extension and ciphers. + +
    + +
    + + Committed major restructuring atrocities. + + + Added reference to first header compression draft. + + + Added more formal description of frame lifecycle. + + + Moved END_STREAM (renamed from FINAL) back to HEADERS/DATA. + + + Removed HEADERS+PRIORITY, added optional priority to HEADERS frame. + + + Added PRIORITY frame. + +
    + +
    + + Added continuations to frames carrying header blocks. + + + Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful + concepts, like cookies. + + + Removed "message". + + + Switched to TLS ALPN from NPN. + + + Editorial changes. + +
    + +
    + + Added IANA considerations section for frame types, error codes and settings. + + + Removed data frame compression. + + + Added PUSH_PROMISE. + + + Added globally applicable flags to framing. + + + Removed zlib-based header compression mechanism. + + + Updated references. + + + Clarified stream identifier reuse. + + + Removed CREDENTIALS frame and associated mechanisms. + + + Added advice against naive implementation of flow control. + + + Added session header section. + + + Restructured frame header. Removed distinction between data and control frames. + + + Altered flow control properties to include session-level limits. + + + Added note on cacheability of pushed resources and multiple tenant servers. + + + Changed protocol label form based on discussions. + +
    + +
    + + Changed title throughout. + + + Removed section on Incompatibilities with SPDY draft#2. + + + Changed INTERNAL_ERROR on GOAWAY to have a value of 2 . + + + Replaced abstract and introduction. + + + Added section on starting HTTP/2.0, including upgrade mechanism. + + + Removed unused references. + + + Added flow control principles based on . + +
    + +
    + + Adopted as base for draft-ietf-httpbis-http2. + + + Updated authors/editors list. + + + Added status note. + +
    +
    + +
    +
    + diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 0000000..c65f1a3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,2284 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math" + mathrand "math/rand" + "net" + "net/http" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" + "golang.org/x/net/lex/httplex" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allowed. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It requires Go 1.6 or later and returns an error if the net/http package is too old +// or if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + return err +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + singleUse bool // whether being used for a single http.Request + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + trace *clientTrace // or nil + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu + requestedGzip bool + on100 func() // optional code to run if get a 100 continue response + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel waits for the user to cancel a request or for the done +// channel to be signaled. A non-nil error is returned only if the request was +// canceled. +func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { + ctx := reqContext(req) + if req.Cancel == nil && ctx.Done() == nil { + return nil + } + select { + case <-req.Cancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + } +} + +// awaitRequestCancel waits for the user to cancel a request, its context to +// expire, or for the request to be done (any way it might be removed from the +// cc.streams map: peer reset, successful completion, TCP connection breakage, +// etc). If the request is canceled, then cs will be canceled and closed. +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + if err := awaitRequestCancel(req, cs.done); err != nil { + cs.cancelStream() + cs.bufPipe.CloseWithError(err) + } +} + +func (cs *clientStream) cancelStream() { + cc := cs.cc + cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cc.mu.Unlock() + + if !didReset { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc.forgetStreamID(cs.ID) + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { + select { + case <-cs.peerReset: + return cs.resetErr + case <-cs.done: + return errStreamClosed + default: + return nil + } +} + +func (cs *clientStream) getStartedWrite() bool { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return cs.startedWrite +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +var ErrNoCachedConn = errors.New("http2: no cached connection was available") + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + for retry := 0; ; retry++ { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + traceGotConn(req, cc) + res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) + if err != nil && retry <= 6 { + if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { + // After the first retry, do exponential backoff with 10% jitter. + if retry == 0 { + continue + } + backoff := float64(uint(1) << (uint(retry) - 1)) + backoff += backoff * (0.1 * mathrand.Float64()) + select { + case <-time.After(time.Second * time.Duration(backoff)): + continue + case <-reqContext(req).Done(): + return nil, reqContext(req).Err() + } + } + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") +) + +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { + if !canRetryError(err) { + return nil, err + } + if !afterBodyWrite { + return req, nil + } + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return req, nil + } + // Otherwise we depend on the Request having its GetBody + // func defined. + getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody + if getBody == nil { + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) + } + body, err := getBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil +} + +func canRetryError(err error) bool { + if err == errClientConnUnusable || err == errClientConnGotGoAway { + return true + } + if se, ok := err.(StreamError); ok { + return se.Code == ErrCodeRefusedStream + } + return false +} + +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.newClientConn(tconn, singleUse) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *cloneTLSConfig(t.TLSClientConfig) + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return transportExpectContinueTimeout(t.t1) +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, false) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + + cc.bw.Write(clientPreface) + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + + old := cc.goAway + cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } +} + +// CanTakeNewRequest reports whether the connection can take a new request, +// meaning it has not been closed or received or sent a GOAWAY. +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + if cc.singleUse && cc.nextStreamID > 1 { + return false + } + return cc.goAway == nil && !cc.closed && + int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + nextID := cc.nextStreamID + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } + cc.tconn.Close() +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || reqBodyIsNoBody(req.Body) { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + resp, _, err := cc.roundTrip(req) + return resp, err +} + +func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { + if err := checkConnHeaders(req); err != nil { + return nil, false, err + } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, false, err + } + hasTrailers := trailers != "" + + cc.mu.Lock() + if err := cc.awaitOpenSlotForRequest(req); err != nil { + cc.mu.Unlock() + return nil, false, err + } + + body := req.Body + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, false, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = requestTrace(req) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) + cc.wmu.Unlock() + traceWroteHeaders(cs.trace) + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) + return nil, false, werr + } + + var respHeaderTimer <-chan time.Time + if hasBody { + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + bodyWritten := false + ctx := reqContext(req) + + handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, false, nil + } + + for { + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + case <-respHeaderTimer: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errTimeout + case <-ctx.Done(): + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), ctx.Err() + case <-req.Cancel: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.getStartedWrite(), cs.resetErr + case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } + if err != nil { + return nil, cs.getStartedWrite(), err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. +// Must hold cc.mu. +func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { + var waitingForConn chan struct{} + var waitingForConnErr error // guarded by cc.mu + for { + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + return errClientConnUnusable + } + if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { + if waitingForConn != nil { + close(waitingForConn) + } + return nil + } + // Unfortunately, we cannot wait on a condition variable and channel at + // the same time, so instead, we spin up a goroutine to check if the + // request is canceled while we wait for a slot to open in the connection. + if waitingForConn == nil { + waitingForConn = make(chan struct{}) + go func() { + if err := awaitRequestCancel(req, waitingForConn); err != nil { + cc.mu.Lock() + waitingForConnErr = err + cc.cond.Broadcast() + cc.mu.Unlock() + } + }() + } + cc.pendingRequests++ + cc.cond.Wait() + cc.pendingRequests-- + if waitingForConnErr != nil { + return waitingForConnErr + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > maxFrameSize { + chunk = chunk[:maxFrameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + traceWroteRequest(cs.trace, err) + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls, err = cc.encodeTrailers(req) + cc.mu.Unlock() + if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeInternal, err) + cc.forgetStreamID(cs.ID) + return err + } + } + + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkResetOrDone(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httplex.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httplex.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + f(":method", req.Method) + if req.Method != "CONNECT" { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + cc.writeHeader(strings.ToLower(name), value) + }) + + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { + cc.hbuf.Reset() + + hlSize := uint64(0) + for k, vv := range req.Trailer { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filtered at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes(), nil +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() + delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + } + close(cs.done) + // Wake up checkResetOrDone via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{cc: cc} + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF + } + for _, cs := range cc.streams { + cs.bufPipe.CloseWithError(err) // no-op if already closed + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a HEADERS reply + gotSettings := false + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, false); cs != nil { + cs.cc.writeStreamReset(cs.ID, se.Code, err) + cs.cc.forgetStreamID(cs.ID) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if f.StreamEnded() { + // Issue 20521: If the stream has ended, streamByID() causes + // clientStream.done to be closed, which causes the request's bodyWriter + // to be closed with an errStreamClosed, which may be received by + // clientConn.RoundTrip before the result of processing these headers. + // Deferring stream closure allows the header processing to occur first. + // clientConn.RoundTrip may still receive the bodyWriter error first, but + // the fix for issue 16102 prioritises any response. + // + // Issue 22413: If there is no request body, we should close the + // stream before writing to cs.resc so that the stream is closed + // immediately once RoundTrip returns. + if cs.req.Body != nil { + defer cc.forgetStreamID(f.StreamID) + } else { + cc.forgetStreamID(f.StreamID) + } + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cc.forgetStreamID(cs.ID) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 100 expect continue). This special +// case is going away after Issue 13851 is fixed. +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("malformed response from server: missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") + } + + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + setResponseUncompressed(res) + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() + } + + cs.bufPipe.BreakWithError(errClosedResponseBody) + cc.forgetStreamID(cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } + return nil + } + if !cs.firstByte { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + if f.Length > 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + // Return any padded flow control now, since we won't + // refund it later on body reads. + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } + cc.mu.Unlock() + + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } + cs.bufPipe.closeWithErrorAndCode(err, code) + + select { + case cs.resc <- resAndError{err: err}: + default: + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) + case SettingInitialWindowSize: + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } + + // Adjust flow control of currently-open + // frames by the difference of the old initial + // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := streamError(cs.ID, f.ErrCode) + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + return nil +} + +// Ping sends a PING frame to the server and waits for the ack. +// Public implementation is in go17.go and not_go17.go +func (cc *ClientConn) ping(ctx contextContext) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httplex.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") +} diff --git a/vendor/golang.org/x/net/http2/transport_test.go b/vendor/golang.org/x/net/http2/transport_test.go new file mode 100644 index 0000000..adee48c --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport_test.go @@ -0,0 +1,3847 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/http2/hpack" +) + +var ( + extNet = flag.Bool("extnet", false, "do external network tests") + transportHost = flag.String("transporthost", "http2.golang.org", "hostname to use for TestTransport") + insecure = flag.Bool("insecure", false, "insecure TLS dials") // TODO: dead code. remove? +) + +var tlsConfigInsecure = &tls.Config{InsecureSkipVerify: true} + +type testContext struct{} + +func (testContext) Done() <-chan struct{} { return make(chan struct{}) } +func (testContext) Err() error { panic("should not be called") } +func (testContext) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } +func (testContext) Value(key interface{}) interface{} { return nil } + +func TestTransportExternal(t *testing.T) { + if !*extNet { + t.Skip("skipping external network test") + } + req, _ := http.NewRequest("GET", "https://"+*transportHost+"/", nil) + rt := &Transport{TLSClientConfig: tlsConfigInsecure} + res, err := rt.RoundTrip(req) + if err != nil { + t.Fatalf("%v", err) + } + res.Write(os.Stdout) +} + +type fakeTLSConn struct { + net.Conn +} + +func (c *fakeTLSConn) ConnectionState() tls.ConnectionState { + return tls.ConnectionState{ + Version: tls.VersionTLS12, + CipherSuite: cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + } +} + +func startH2cServer(t *testing.T) net.Listener { + h2Server := &Server{} + l := newLocalListener(t) + go func() { + conn, err := l.Accept() + if err != nil { + t.Error(err) + return + } + h2Server.ServeConn(&fakeTLSConn{conn}, &ServeConnOpts{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, %v, http: %v", r.URL.Path, r.TLS == nil) + })}) + }() + return l +} + +func TestTransportH2c(t *testing.T) { + l := startH2cServer(t) + defer l.Close() + req, err := http.NewRequest("GET", "http://"+l.Addr().String()+"/foobar", nil) + if err != nil { + t.Fatal(err) + } + tr := &Transport{ + AllowHTTP: true, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + return net.Dial(network, addr) + }, + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if res.ProtoMajor != 2 { + t.Fatal("proto not h2c") + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if got, want := string(body), "Hello, /foobar, http: true"; got != want { + t.Fatalf("response got %v, want %v", got, want) + } +} + +func TestTransport(t *testing.T) { + const body = "sup" + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, body) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + t.Logf("Got res: %+v", res) + if g, w := res.StatusCode, 200; g != w { + t.Errorf("StatusCode = %v; want %v", g, w) + } + if g, w := res.Status, "200 OK"; g != w { + t.Errorf("Status = %q; want %q", g, w) + } + wantHeader := http.Header{ + "Content-Length": []string{"3"}, + "Content-Type": []string{"text/plain; charset=utf-8"}, + "Date": []string{"XXX"}, // see cleanDate + } + cleanDate(res) + if !reflect.DeepEqual(res.Header, wantHeader) { + t.Errorf("res Header = %v; want %v", res.Header, wantHeader) + } + if res.Request != req { + t.Errorf("Response.Request = %p; want %p", res.Request, req) + } + if res.TLS == nil { + t.Error("Response.TLS = nil; want non-nil") + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("Body read: %v", err) + } else if string(slurp) != body { + t.Errorf("Body = %q; want %q", slurp, body) + } +} + +func onSameConn(t *testing.T, modReq func(*http.Request)) bool { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.RemoteAddr) + }, optOnlyServer, func(c net.Conn, st http.ConnState) { + t.Logf("conn %v is now state %v", c.RemoteAddr(), st) + }) + defer st.Close() + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + get := func() string { + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + modReq(req) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("Body read: %v", err) + } + addr := strings.TrimSpace(string(slurp)) + if addr == "" { + t.Fatalf("didn't get an addr in response") + } + return addr + } + first := get() + second := get() + return first == second +} + +func TestTransportReusesConns(t *testing.T) { + if !onSameConn(t, func(*http.Request) {}) { + t.Errorf("first and second responses were on different connections") + } +} + +func TestTransportReusesConn_RequestClose(t *testing.T) { + if onSameConn(t, func(r *http.Request) { r.Close = true }) { + t.Errorf("first and second responses were not on different connections") + } +} + +func TestTransportReusesConn_ConnClose(t *testing.T) { + if onSameConn(t, func(r *http.Request) { r.Header.Set("Connection", "close") }) { + t.Errorf("first and second responses were not on different connections") + } +} + +// Tests that the Transport only keeps one pending dial open per destination address. +// https://golang.org/issue/13397 +func TestTransportGroupsPendingDials(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.RemoteAddr) + }, optOnlyServer) + defer st.Close() + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + } + defer tr.CloseIdleConnections() + var ( + mu sync.Mutex + dials = map[string]int{} + ) + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Error(err) + return + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Error(err) + return + } + defer res.Body.Close() + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("Body read: %v", err) + } + addr := strings.TrimSpace(string(slurp)) + if addr == "" { + t.Errorf("didn't get an addr in response") + } + mu.Lock() + dials[addr]++ + mu.Unlock() + }() + } + wg.Wait() + if len(dials) != 1 { + t.Errorf("saw %d dials; want 1: %v", len(dials), dials) + } + tr.CloseIdleConnections() + if err := retry(50, 10*time.Millisecond, func() error { + cp, ok := tr.connPool().(*clientConnPool) + if !ok { + return fmt.Errorf("Conn pool is %T; want *clientConnPool", tr.connPool()) + } + cp.mu.Lock() + defer cp.mu.Unlock() + if len(cp.dialing) != 0 { + return fmt.Errorf("dialing map = %v; want empty", cp.dialing) + } + if len(cp.conns) != 0 { + return fmt.Errorf("conns = %v; want empty", cp.conns) + } + if len(cp.keys) != 0 { + return fmt.Errorf("keys = %v; want empty", cp.keys) + } + return nil + }); err != nil { + t.Errorf("State of pool after CloseIdleConnections: %v", err) + } +} + +func retry(tries int, delay time.Duration, fn func() error) error { + var err error + for i := 0; i < tries; i++ { + err = fn() + if err == nil { + return nil + } + time.Sleep(delay) + } + return err +} + +func TestTransportAbortClosesPipes(t *testing.T) { + shutdown := make(chan struct{}) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() + <-shutdown + }, + optOnlyServer, + ) + defer st.Close() + defer close(shutdown) // we must shutdown before st.Close() to avoid hanging + + done := make(chan struct{}) + requestMade := make(chan struct{}) + go func() { + defer close(done) + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + close(requestMade) + _, err = ioutil.ReadAll(res.Body) + if err == nil { + t.Error("expected error from res.Body.Read") + } + }() + + <-requestMade + // Now force the serve loop to end, via closing the connection. + st.closeConn() + // deadlock? that's a bug. + select { + case <-done: + case <-time.After(3 * time.Second): + t.Fatal("timeout") + } +} + +// TODO: merge this with TestTransportBody to make TestTransportRequest? This +// could be a table-driven test with extra goodies. +func TestTransportPath(t *testing.T) { + gotc := make(chan *url.URL, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + gotc <- r.URL + }, + optOnlyServer, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + const ( + path = "/testpath" + query = "q=1" + ) + surl := st.ts.URL + path + "?" + query + req, err := http.NewRequest("POST", surl, nil) + if err != nil { + t.Fatal(err) + } + c := &http.Client{Transport: tr} + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + got := <-gotc + if got.Path != path { + t.Errorf("Read Path = %q; want %q", got.Path, path) + } + if got.RawQuery != query { + t.Errorf("Read RawQuery = %q; want %q", got.RawQuery, query) + } +} + +func randString(n int) string { + rnd := rand.New(rand.NewSource(int64(n))) + b := make([]byte, n) + for i := range b { + b[i] = byte(rnd.Intn(256)) + } + return string(b) +} + +type panicReader struct{} + +func (panicReader) Read([]byte) (int, error) { panic("unexpected Read") } +func (panicReader) Close() error { panic("unexpected Close") } + +func TestActualContentLength(t *testing.T) { + tests := []struct { + req *http.Request + want int64 + }{ + // Verify we don't read from Body: + 0: { + req: &http.Request{Body: panicReader{}}, + want: -1, + }, + // nil Body means 0, regardless of ContentLength: + 1: { + req: &http.Request{Body: nil, ContentLength: 5}, + want: 0, + }, + // ContentLength is used if set. + 2: { + req: &http.Request{Body: panicReader{}, ContentLength: 5}, + want: 5, + }, + // http.NoBody means 0, not -1. + 3: { + req: &http.Request{Body: go18httpNoBody()}, + want: 0, + }, + } + for i, tt := range tests { + got := actualContentLength(tt.req) + if got != tt.want { + t.Errorf("test[%d]: got %d; want %d", i, got, tt.want) + } + } +} + +func TestTransportBody(t *testing.T) { + bodyTests := []struct { + body string + noContentLen bool + }{ + {body: "some message"}, + {body: "some message", noContentLen: true}, + {body: strings.Repeat("a", 1<<20), noContentLen: true}, + {body: strings.Repeat("a", 1<<20)}, + {body: randString(16<<10 - 1)}, + {body: randString(16 << 10)}, + {body: randString(16<<10 + 1)}, + {body: randString(512<<10 - 1)}, + {body: randString(512 << 10)}, + {body: randString(512<<10 + 1)}, + {body: randString(1<<20 - 1)}, + {body: randString(1 << 20)}, + {body: randString(1<<20 + 2)}, + } + + type reqInfo struct { + req *http.Request + slurp []byte + err error + } + gotc := make(chan reqInfo, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + slurp, err := ioutil.ReadAll(r.Body) + if err != nil { + gotc <- reqInfo{err: err} + } else { + gotc <- reqInfo{req: r, slurp: slurp} + } + }, + optOnlyServer, + ) + defer st.Close() + + for i, tt := range bodyTests { + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + var body io.Reader = strings.NewReader(tt.body) + if tt.noContentLen { + body = struct{ io.Reader }{body} // just a Reader, hiding concrete type and other methods + } + req, err := http.NewRequest("POST", st.ts.URL, body) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + c := &http.Client{Transport: tr} + res, err := c.Do(req) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + defer res.Body.Close() + ri := <-gotc + if ri.err != nil { + t.Errorf("#%d: read error: %v", i, ri.err) + continue + } + if got := string(ri.slurp); got != tt.body { + t.Errorf("#%d: Read body mismatch.\n got: %q (len %d)\nwant: %q (len %d)", i, shortString(got), len(got), shortString(tt.body), len(tt.body)) + } + wantLen := int64(len(tt.body)) + if tt.noContentLen && tt.body != "" { + wantLen = -1 + } + if ri.req.ContentLength != wantLen { + t.Errorf("#%d. handler got ContentLength = %v; want %v", i, ri.req.ContentLength, wantLen) + } + } +} + +func shortString(v string) string { + const maxLen = 100 + if len(v) <= maxLen { + return v + } + return fmt.Sprintf("%v[...%d bytes omitted...]%v", v[:maxLen/2], len(v)-maxLen, v[len(v)-maxLen/2:]) +} + +func TestTransportDialTLS(t *testing.T) { + var mu sync.Mutex // guards following + var gotReq, didDial bool + + ts := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + gotReq = true + mu.Unlock() + }, + optOnlyServer, + ) + defer ts.Close() + tr := &Transport{ + DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) { + mu.Lock() + didDial = true + mu.Unlock() + cfg.InsecureSkipVerify = true + c, err := tls.Dial(netw, addr, cfg) + if err != nil { + return nil, err + } + return c, c.Handshake() + }, + } + defer tr.CloseIdleConnections() + client := &http.Client{Transport: tr} + res, err := client.Get(ts.ts.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + mu.Lock() + if !gotReq { + t.Error("didn't get request") + } + if !didDial { + t.Error("didn't use dial hook") + } +} + +func TestConfigureTransport(t *testing.T) { + t1 := &http.Transport{} + err := ConfigureTransport(t1) + if err == errTransportVersion { + t.Skip(err) + } + if err != nil { + t.Fatal(err) + } + if got := fmt.Sprintf("%#v", t1); !strings.Contains(got, `"h2"`) { + // Laziness, to avoid buildtags. + t.Errorf("stringification of HTTP/1 transport didn't contain \"h2\": %v", got) + } + wantNextProtos := []string{"h2", "http/1.1"} + if t1.TLSClientConfig == nil { + t.Errorf("nil t1.TLSClientConfig") + } else if !reflect.DeepEqual(t1.TLSClientConfig.NextProtos, wantNextProtos) { + t.Errorf("TLSClientConfig.NextProtos = %q; want %q", t1.TLSClientConfig.NextProtos, wantNextProtos) + } + if err := ConfigureTransport(t1); err == nil { + t.Error("unexpected success on second call to ConfigureTransport") + } + + // And does it work? + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, r.Proto) + }, optOnlyServer) + defer st.Close() + + t1.TLSClientConfig.InsecureSkipVerify = true + c := &http.Client{Transport: t1} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if got, want := string(slurp), "HTTP/2.0"; got != want { + t.Errorf("body = %q; want %q", got, want) + } +} + +type capitalizeReader struct { + r io.Reader +} + +func (cr capitalizeReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + for i, b := range p[:n] { + if b >= 'a' && b <= 'z' { + p[i] = b - ('a' - 'A') + } + } + return +} + +type flushWriter struct { + w io.Writer +} + +func (fw flushWriter) Write(p []byte) (n int, err error) { + n, err = fw.w.Write(p) + if f, ok := fw.w.(http.Flusher); ok { + f.Flush() + } + return +} + +type clientTester struct { + t *testing.T + tr *Transport + sc, cc net.Conn // server and client conn + fr *Framer // server's framer + client func() error + server func() error +} + +func newClientTester(t *testing.T) *clientTester { + var dialOnce struct { + sync.Mutex + dialed bool + } + ct := &clientTester{ + t: t, + } + ct.tr = &Transport{ + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialOnce.Lock() + defer dialOnce.Unlock() + if dialOnce.dialed { + return nil, errors.New("only one dial allowed in test mode") + } + dialOnce.dialed = true + return ct.cc, nil + }, + } + + ln := newLocalListener(t) + cc, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + + } + sc, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + ln.Close() + ct.cc = cc + ct.sc = sc + ct.fr = NewFramer(sc, sc) + return ct +} + +func newLocalListener(t *testing.T) net.Listener { + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err == nil { + return ln + } + ln, err = net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + return ln +} + +func (ct *clientTester) greet(settings ...Setting) { + buf := make([]byte, len(ClientPreface)) + _, err := io.ReadFull(ct.sc, buf) + if err != nil { + ct.t.Fatalf("reading client preface: %v", err) + } + f, err := ct.fr.ReadFrame() + if err != nil { + ct.t.Fatalf("Reading client settings frame: %v", err) + } + if sf, ok := f.(*SettingsFrame); !ok { + ct.t.Fatalf("Wanted client settings frame; got %v", f) + _ = sf // stash it away? + } + if err := ct.fr.WriteSettings(settings...); err != nil { + ct.t.Fatal(err) + } + if err := ct.fr.WriteSettingsAck(); err != nil { + ct.t.Fatal(err) + } +} + +func (ct *clientTester) readNonSettingsFrame() (Frame, error) { + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return nil, err + } + if _, ok := f.(*SettingsFrame); ok { + continue + } + return f, nil + } +} + +func (ct *clientTester) cleanup() { + ct.tr.CloseIdleConnections() +} + +func (ct *clientTester) run() { + errc := make(chan error, 2) + ct.start("client", errc, ct.client) + ct.start("server", errc, ct.server) + defer ct.cleanup() + for i := 0; i < 2; i++ { + if err := <-errc; err != nil { + ct.t.Error(err) + return + } + } +} + +func (ct *clientTester) start(which string, errc chan<- error, fn func() error) { + go func() { + finished := false + var err error + defer func() { + if !finished { + err = fmt.Errorf("%s goroutine didn't finish.", which) + } else if err != nil { + err = fmt.Errorf("%s: %v", which, err) + } + errc <- err + }() + err = fn() + finished = true + }() +} + +func (ct *clientTester) readFrame() (Frame, error) { + return readFrameTimeout(ct.fr, 2*time.Second) +} + +func (ct *clientTester) firstHeaders() (*HeadersFrame, error) { + for { + f, err := ct.readFrame() + if err != nil { + return nil, fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + hf, ok := f.(*HeadersFrame) + if !ok { + return nil, fmt.Errorf("Got %T; want HeadersFrame", f) + } + return hf, nil + } +} + +type countingReader struct { + n *int64 +} + +func (r countingReader) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = byte(i) + } + atomic.AddInt64(r.n, int64(len(p))) + return len(p), err +} + +func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) } +func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) } + +func testTransportReqBodyAfterResponse(t *testing.T, status int) { + const bodySize = 10 << 20 + clientDone := make(chan struct{}) + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + + var n int64 // atomic + req, err := http.NewRequest("PUT", "https://dummy.tld/", io.LimitReader(countingReader{&n}, bodySize)) + if err != nil { + return err + } + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != status { + return fmt.Errorf("status code = %v; want %v", res.StatusCode, status) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("Slurp: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("unexpected body: %q", slurp) + } + if status == 200 { + if got := atomic.LoadInt64(&n); got != bodySize { + return fmt.Errorf("For 200 response, Transport wrote %d bytes; want %d", got, bodySize) + } + } else { + if got := atomic.LoadInt64(&n); got == 0 || got >= bodySize { + return fmt.Errorf("For %d response, Transport wrote %d bytes; want (0,%d) exclusive", status, got, bodySize) + } + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + var dataRecv int64 + var closed bool + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } + } + //println(fmt.Sprintf("server got frame: %v", f)) + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + if f.StreamEnded() { + return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f) + } + case *DataFrame: + dataLen := len(f.Data()) + if dataLen > 0 { + if dataRecv == 0 { + enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + } + if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { + return err + } + if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { + return err + } + } + dataRecv += int64(dataLen) + + if !closed && ((status != 200 && dataRecv > 0) || + (status == 200 && dataRecv == bodySize)) { + closed = true + if err := ct.fr.WriteData(f.StreamID, true, nil); err != nil { + return err + } + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +// See golang.org/issue/13444 +func TestTransportFullDuplex(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) // redundant but for clarity + w.(http.Flusher).Flush() + io.Copy(flushWriter{w}, capitalizeReader{r.Body}) + fmt.Fprintf(w, "bye.\n") + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + pr, pw := io.Pipe() + req, err := http.NewRequest("PUT", st.ts.URL, ioutil.NopCloser(pr)) + if err != nil { + t.Fatal(err) + } + req.ContentLength = -1 + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + t.Fatalf("StatusCode = %v; want %v", res.StatusCode, 200) + } + bs := bufio.NewScanner(res.Body) + want := func(v string) { + if !bs.Scan() { + t.Fatalf("wanted to read %q but Scan() = false, err = %v", v, bs.Err()) + } + } + write := func(v string) { + _, err := io.WriteString(pw, v) + if err != nil { + t.Fatalf("pipe write: %v", err) + } + } + write("foo\n") + want("FOO") + write("bar\n") + want("BAR") + pw.Close() + want("bye.") + if err := bs.Err(); err != nil { + t.Fatal(err) + } +} + +func TestTransportConnectRequest(t *testing.T) { + gotc := make(chan *http.Request, 1) + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + gotc <- r + }, optOnlyServer) + defer st.Close() + + u, err := url.Parse(st.ts.URL) + if err != nil { + t.Fatal(err) + } + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + tests := []struct { + req *http.Request + want string + }{ + { + req: &http.Request{ + Method: "CONNECT", + Header: http.Header{}, + URL: u, + }, + want: u.Host, + }, + { + req: &http.Request{ + Method: "CONNECT", + Header: http.Header{}, + URL: u, + Host: "example.com:123", + }, + want: "example.com:123", + }, + } + + for i, tt := range tests { + res, err := c.Do(tt.req) + if err != nil { + t.Errorf("%d. RoundTrip = %v", i, err) + continue + } + res.Body.Close() + req := <-gotc + if req.Method != "CONNECT" { + t.Errorf("method = %q; want CONNECT", req.Method) + } + if req.Host != tt.want { + t.Errorf("Host = %q; want %q", req.Host, tt.want) + } + if req.URL.Host != tt.want { + t.Errorf("URL.Host = %q; want %q", req.URL.Host, tt.want) + } + } +} + +type headerType int + +const ( + noHeader headerType = iota // omitted + oneHeader + splitHeader // broken into continuation on purpose +) + +const ( + f0 = noHeader + f1 = oneHeader + f2 = splitHeader + d0 = false + d1 = true +) + +// Test all 36 combinations of response frame orders: +// (3 ways of 100-continue) * (2 ways of headers) * (2 ways of data) * (3 ways of trailers):func TestTransportResponsePattern_00f0(t *testing.T) { testTransportResponsePattern(h0, h1, false, h0) } +// Generated by http://play.golang.org/p/SScqYKJYXd +func TestTransportResPattern_c0h1d0t0(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f0) } +func TestTransportResPattern_c0h1d0t1(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f1) } +func TestTransportResPattern_c0h1d0t2(t *testing.T) { testTransportResPattern(t, f0, f1, d0, f2) } +func TestTransportResPattern_c0h1d1t0(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f0) } +func TestTransportResPattern_c0h1d1t1(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f1) } +func TestTransportResPattern_c0h1d1t2(t *testing.T) { testTransportResPattern(t, f0, f1, d1, f2) } +func TestTransportResPattern_c0h2d0t0(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f0) } +func TestTransportResPattern_c0h2d0t1(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f1) } +func TestTransportResPattern_c0h2d0t2(t *testing.T) { testTransportResPattern(t, f0, f2, d0, f2) } +func TestTransportResPattern_c0h2d1t0(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f0) } +func TestTransportResPattern_c0h2d1t1(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f1) } +func TestTransportResPattern_c0h2d1t2(t *testing.T) { testTransportResPattern(t, f0, f2, d1, f2) } +func TestTransportResPattern_c1h1d0t0(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f0) } +func TestTransportResPattern_c1h1d0t1(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f1) } +func TestTransportResPattern_c1h1d0t2(t *testing.T) { testTransportResPattern(t, f1, f1, d0, f2) } +func TestTransportResPattern_c1h1d1t0(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f0) } +func TestTransportResPattern_c1h1d1t1(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f1) } +func TestTransportResPattern_c1h1d1t2(t *testing.T) { testTransportResPattern(t, f1, f1, d1, f2) } +func TestTransportResPattern_c1h2d0t0(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f0) } +func TestTransportResPattern_c1h2d0t1(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f1) } +func TestTransportResPattern_c1h2d0t2(t *testing.T) { testTransportResPattern(t, f1, f2, d0, f2) } +func TestTransportResPattern_c1h2d1t0(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f0) } +func TestTransportResPattern_c1h2d1t1(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f1) } +func TestTransportResPattern_c1h2d1t2(t *testing.T) { testTransportResPattern(t, f1, f2, d1, f2) } +func TestTransportResPattern_c2h1d0t0(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f0) } +func TestTransportResPattern_c2h1d0t1(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f1) } +func TestTransportResPattern_c2h1d0t2(t *testing.T) { testTransportResPattern(t, f2, f1, d0, f2) } +func TestTransportResPattern_c2h1d1t0(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f0) } +func TestTransportResPattern_c2h1d1t1(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f1) } +func TestTransportResPattern_c2h1d1t2(t *testing.T) { testTransportResPattern(t, f2, f1, d1, f2) } +func TestTransportResPattern_c2h2d0t0(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f0) } +func TestTransportResPattern_c2h2d0t1(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f1) } +func TestTransportResPattern_c2h2d0t2(t *testing.T) { testTransportResPattern(t, f2, f2, d0, f2) } +func TestTransportResPattern_c2h2d1t0(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f0) } +func TestTransportResPattern_c2h2d1t1(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f1) } +func TestTransportResPattern_c2h2d1t2(t *testing.T) { testTransportResPattern(t, f2, f2, d1, f2) } + +func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerType, withData bool, trailers headerType) { + const reqBody = "some request body" + const resBody = "some response body" + + if resHeader == noHeader { + // TODO: test 100-continue followed by immediate + // server stream reset, without headers in the middle? + panic("invalid combination") + } + + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody)) + if expect100Continue != noHeader { + req.Header.Set("Expect", "100-continue") + } + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("Slurp: %v", err) + } + wantBody := resBody + if !withData { + wantBody = "" + } + if string(slurp) != wantBody { + return fmt.Errorf("body = %q; want %q", slurp, wantBody) + } + if trailers == noHeader { + if len(res.Trailer) > 0 { + t.Errorf("Trailer = %v; want none", res.Trailer) + } + } else { + want := http.Header{"Some-Trailer": {"some-value"}} + if !reflect.DeepEqual(res.Trailer, want) { + t.Errorf("Trailer = %v; want %v", res.Trailer, want) + } + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + endStream := false + send := func(mode headerType) { + hbf := buf.Bytes() + switch mode { + case oneHeader: + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.Header().StreamID, + EndHeaders: true, + EndStream: endStream, + BlockFragment: hbf, + }) + case splitHeader: + if len(hbf) < 2 { + panic("too small") + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.Header().StreamID, + EndHeaders: false, + EndStream: endStream, + BlockFragment: hbf[:1], + }) + ct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:]) + default: + panic("bogus mode") + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *DataFrame: + if !f.StreamEnded() { + // No need to send flow control tokens. The test request body is tiny. + continue + } + // Response headers (1+ frames; 1 or 2 in this test, but never 0) + { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "x-foo", Value: "blah"}) + enc.WriteField(hpack.HeaderField{Name: "x-bar", Value: "more"}) + if trailers != noHeader { + enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "some-trailer"}) + } + endStream = withData == false && trailers == noHeader + send(resHeader) + } + if withData { + endStream = trailers == noHeader + ct.fr.WriteData(f.StreamID, endStream, []byte(resBody)) + } + if trailers != noHeader { + endStream = true + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"}) + send(trailers) + } + if endStream { + return nil + } + case *HeadersFrame: + if expect100Continue != noHeader { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) + send(expect100Continue) + } + } + } + } + ct.run() +} + +func TestTransportReceiveUndeclaredTrailer(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, nil) + } + if len(slurp) > 0 { + return fmt.Errorf("body = %q; want nothing", slurp) + } + if _, ok := res.Trailer["Some-Trailer"]; !ok { + return fmt.Errorf("expected Some-Trailer") + } + return nil + } + ct.server = func() error { + ct.greet() + + var n int + var hf *HeadersFrame + for hf == nil && n < 10 { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + hf, _ = f.(*HeadersFrame) + n++ + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + // send headers without Trailer header + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + // send trailers + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "I'm an undeclared Trailer!"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + return nil + } + ct.run() +} + +func TestTransportInvalidTrailer_Pseudo1(t *testing.T) { + testTransportInvalidTrailer_Pseudo(t, oneHeader) +} +func TestTransportInvalidTrailer_Pseudo2(t *testing.T) { + testTransportInvalidTrailer_Pseudo(t, splitHeader) +} +func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) { + testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"}) + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + }) +} + +func TestTransportInvalidTrailer_Capital1(t *testing.T) { + testTransportInvalidTrailer_Capital(t, oneHeader) +} +func TestTransportInvalidTrailer_Capital2(t *testing.T) { + testTransportInvalidTrailer_Capital(t, splitHeader) +} +func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) { + testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"}) + }) +} +func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) { + testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"}) + }) +} +func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) { + testInvalidTrailer(t, oneHeader, headerFieldValueError("has\nnewline"), func(enc *hpack.Encoder) { + enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"}) + }) +} + +func testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeTrailer func(*hpack.Encoder)) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + return fmt.Errorf("status code = %v; want 200", res.StatusCode) + } + slurp, err := ioutil.ReadAll(res.Body) + se, ok := err.(StreamError) + if !ok || se.Cause != wantErr { + return fmt.Errorf("res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v", slurp, err, wantErr, wantErr) + } + if len(slurp) > 0 { + return fmt.Errorf("body = %q; want nothing", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + var endStream bool + send := func(mode headerType) { + hbf := buf.Bytes() + switch mode { + case oneHeader: + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: endStream, + BlockFragment: hbf, + }) + case splitHeader: + if len(hbf) < 2 { + panic("too small") + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: false, + EndStream: endStream, + BlockFragment: hbf[:1], + }) + ct.fr.WriteContinuation(f.StreamID, true, hbf[1:]) + default: + panic("bogus mode") + } + } + // Response headers (1+ frames; 1 or 2 in this test, but never 0) + { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "declared"}) + endStream = false + send(oneHeader) + } + // Trailers: + { + endStream = true + buf.Reset() + writeTrailer(enc) + send(trailers) + } + return nil + } + } + } + ct.run() +} + +// headerListSize returns the HTTP2 header list size of h. +// http://httpwg.org/specs/rfc7540.html#SETTINGS_MAX_HEADER_LIST_SIZE +// http://httpwg.org/specs/rfc7540.html#MaxHeaderBlock +func headerListSize(h http.Header) (size uint32) { + for k, vv := range h { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + size += hf.Size() + } + } + return size +} + +// padHeaders adds data to an http.Header until headerListSize(h) == +// limit. Due to the way header list sizes are calculated, padHeaders +// cannot add fewer than len("Pad-Headers") + 32 bytes to h, and will +// call t.Fatal if asked to do so. PadHeaders first reserves enough +// space for an empty "Pad-Headers" key, then adds as many copies of +// filler as possible. Any remaining bytes necessary to push the +// header list size up to limit are added to h["Pad-Headers"]. +func padHeaders(t *testing.T, h http.Header, limit uint64, filler string) { + if limit > 0xffffffff { + t.Fatalf("padHeaders: refusing to pad to more than 2^32-1 bytes. limit = %v", limit) + } + hf := hpack.HeaderField{Name: "Pad-Headers", Value: ""} + minPadding := uint64(hf.Size()) + size := uint64(headerListSize(h)) + + minlimit := size + minPadding + if limit < minlimit { + t.Fatalf("padHeaders: limit %v < %v", limit, minlimit) + } + + // Use a fixed-width format for name so that fieldSize + // remains constant. + nameFmt := "Pad-Headers-%06d" + hf = hpack.HeaderField{Name: fmt.Sprintf(nameFmt, 1), Value: filler} + fieldSize := uint64(hf.Size()) + + // Add as many complete filler values as possible, leaving + // room for at least one empty "Pad-Headers" key. + limit = limit - minPadding + for i := 0; size+fieldSize < limit; i++ { + name := fmt.Sprintf(nameFmt, i) + h.Add(name, filler) + size += fieldSize + } + + // Add enough bytes to reach limit. + remain := limit - size + lastValue := strings.Repeat("*", int(remain)) + h.Add("Pad-Headers", lastValue) +} + +func TestPadHeaders(t *testing.T) { + check := func(h http.Header, limit uint32, fillerLen int) { + if h == nil { + h = make(http.Header) + } + filler := strings.Repeat("f", fillerLen) + padHeaders(t, h, uint64(limit), filler) + gotSize := headerListSize(h) + if gotSize != limit { + t.Errorf("Got size = %v; want %v", gotSize, limit) + } + } + // Try all possible combinations for small fillerLen and limit. + hf := hpack.HeaderField{Name: "Pad-Headers", Value: ""} + minLimit := hf.Size() + for limit := minLimit; limit <= 128; limit++ { + for fillerLen := 0; uint32(fillerLen) <= limit; fillerLen++ { + check(nil, limit, fillerLen) + } + } + + // Try a few tests with larger limits, plus cumulative + // tests. Since these tests are cumulative, tests[i+1].limit + // must be >= tests[i].limit + minLimit. See the comment on + // padHeaders for more info on why the limit arg has this + // restriction. + tests := []struct { + fillerLen int + limit uint32 + }{ + { + fillerLen: 64, + limit: 1024, + }, + { + fillerLen: 1024, + limit: 1286, + }, + { + fillerLen: 256, + limit: 2048, + }, + { + fillerLen: 1024, + limit: 10 * 1024, + }, + { + fillerLen: 1023, + limit: 11 * 1024, + }, + } + h := make(http.Header) + for _, tc := range tests { + check(nil, tc.limit, tc.fillerLen) + check(h, tc.limit, tc.fillerLen) + } +} + +func TestTransportChecksRequestHeaderListSize(t *testing.T) { + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + // Consume body & force client to send + // trailers before writing response. + // ioutil.ReadAll returns non-nil err for + // requests that attempt to send greater than + // maxHeaderListSize bytes of trailers, since + // those requests generate a stream reset. + ioutil.ReadAll(r.Body) + r.Body.Close() + }, + func(ts *httptest.Server) { + ts.Config.MaxHeaderBytes = 16 << 10 + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + checkRoundTrip := func(req *http.Request, wantErr error, desc string) { + res, err := tr.RoundTrip(req) + if err != wantErr { + if res != nil { + res.Body.Close() + } + t.Errorf("%v: RoundTrip err = %v; want %v", desc, err, wantErr) + return + } + if err == nil { + if res == nil { + t.Errorf("%v: response nil; want non-nil.", desc) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + t.Errorf("%v: response status = %v; want %v", desc, res.StatusCode, http.StatusOK) + } + return + } + if res != nil { + t.Errorf("%v: RoundTrip err = %v but response non-nil", desc, err) + } + } + headerListSizeForRequest := func(req *http.Request) (size uint64) { + contentLen := actualContentLength(req) + trailers, err := commaSeparatedTrailers(req) + if err != nil { + t.Fatalf("headerListSizeForRequest: %v", err) + } + cc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff} + cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.mu.Lock() + hdrs, err := cc.encodeHeaders(req, true, trailers, contentLen) + cc.mu.Unlock() + if err != nil { + t.Fatalf("headerListSizeForRequest: %v", err) + } + hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(hf hpack.HeaderField) { + size += uint64(hf.Size()) + }) + if len(hdrs) > 0 { + if _, err := hpackDec.Write(hdrs); err != nil { + t.Fatalf("headerListSizeForRequest: %v", err) + } + } + return size + } + // Create a new Request for each test, rather than reusing the + // same Request, to avoid a race when modifying req.Headers. + // See https://github.com/golang/go/issues/21316 + newRequest := func() *http.Request { + // Body must be non-nil to enable writing trailers. + body := strings.NewReader("hello") + req, err := http.NewRequest("POST", st.ts.URL, body) + if err != nil { + t.Fatalf("newRequest: NewRequest: %v", err) + } + return req + } + + // Make an arbitrary request to ensure we get the server's + // settings frame and initialize peerMaxHeaderListSize. + req := newRequest() + checkRoundTrip(req, nil, "Initial request") + + // Get the ClientConn associated with the request and validate + // peerMaxHeaderListSize. + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + cc, err := tr.connPool().GetClientConn(req, addr) + if err != nil { + t.Fatalf("GetClientConn: %v", err) + } + cc.mu.Lock() + peerSize := cc.peerMaxHeaderListSize + cc.mu.Unlock() + st.scMu.Lock() + wantSize := uint64(st.sc.maxHeaderListSize()) + st.scMu.Unlock() + if peerSize != wantSize { + t.Errorf("peerMaxHeaderListSize = %v; want %v", peerSize, wantSize) + } + + // Sanity check peerSize. (*serverConn) maxHeaderListSize adds + // 320 bytes of padding. + wantHeaderBytes := uint64(st.ts.Config.MaxHeaderBytes) + 320 + if peerSize != wantHeaderBytes { + t.Errorf("peerMaxHeaderListSize = %v; want %v.", peerSize, wantHeaderBytes) + } + + // Pad headers & trailers, but stay under peerSize. + req = newRequest() + req.Header = make(http.Header) + req.Trailer = make(http.Header) + filler := strings.Repeat("*", 1024) + padHeaders(t, req.Trailer, peerSize, filler) + // cc.encodeHeaders adds some default headers to the request, + // so we need to leave room for those. + defaultBytes := headerListSizeForRequest(req) + padHeaders(t, req.Header, peerSize-defaultBytes, filler) + checkRoundTrip(req, nil, "Headers & Trailers under limit") + + // Add enough header bytes to push us over peerSize. + req = newRequest() + req.Header = make(http.Header) + padHeaders(t, req.Header, peerSize, filler) + checkRoundTrip(req, errRequestHeaderListSize, "Headers over limit") + + // Push trailers over the limit. + req = newRequest() + req.Trailer = make(http.Header) + padHeaders(t, req.Trailer, peerSize+1, filler) + checkRoundTrip(req, errRequestHeaderListSize, "Trailers over limit") + + // Send headers with a single large value. + req = newRequest() + filler = strings.Repeat("*", int(peerSize)) + req.Header = make(http.Header) + req.Header.Set("Big", filler) + checkRoundTrip(req, errRequestHeaderListSize, "Single large header") + + // Send trailers with a single large value. + req = newRequest() + req.Trailer = make(http.Header) + req.Trailer.Set("Big", filler) + checkRoundTrip(req, errRequestHeaderListSize, "Single large trailer") +} + +func TestTransportChecksResponseHeaderListSize(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != errResponseHeaderListSize { + if res != nil { + res.Body.Close() + } + size := int64(0) + for k, vv := range res.Header { + for _, v := range vv { + size += int64(len(k)) + int64(len(v)) + 32 + } + } + return fmt.Errorf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + large := strings.Repeat("a", 1<<10) + for i := 0; i < 5042; i++ { + enc.WriteField(hpack.HeaderField{Name: large, Value: large}) + } + if size, want := buf.Len(), 6329; size != want { + // Note: this number might change if + // our hpack implementation + // changes. That's fine. This is + // just a sanity check that our + // response can fit in a single + // header block fragment frame. + return fmt.Errorf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want) + } + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + return nil + } + } + } + ct.run() +} + +// Test that the the Transport returns a typed error from Response.Body.Read calls +// when the server sends an error. (here we use a panic, since that should generate +// a stream error, but others like cancel should be similar) +func TestTransportBodyReadErrorType(t *testing.T) { + doPanic := make(chan bool, 1) + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() // force headers out + <-doPanic + panic("boom") + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + doPanic <- true + buf := make([]byte, 100) + n, err := res.Body.Read(buf) + want := StreamError{StreamID: 0x1, Code: 0x2} + if !reflect.DeepEqual(want, err) { + t.Errorf("Read = %v, %#v; want error %#v", n, err, want) + } +} + +// golang.org/issue/13924 +// This used to fail after many iterations, especially with -race: +// go test -v -run=TestTransportDoubleCloseOnWriteError -count=500 -race +func TestTransportDoubleCloseOnWriteError(t *testing.T) { + var ( + mu sync.Mutex + conn net.Conn // to close if set + ) + + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + defer mu.Unlock() + if conn != nil { + conn.Close() + } + }, + optOnlyServer, + ) + defer st.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + mu.Lock() + defer mu.Unlock() + conn = tc + return tc, nil + }, + } + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + c.Get(st.ts.URL) +} + +// Test that the http1 Transport.DisableKeepAlives option is respected +// and connections are closed as soon as idle. +// See golang.org/issue/14008 +func TestTransportDisableKeepAlives(t *testing.T) { + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "hi") + }, + optOnlyServer, + ) + defer st.Close() + + connClosed := make(chan struct{}) // closed on tls.Conn.Close + tr := &Transport{ + t1: &http.Transport{ + DisableKeepAlives: true, + }, + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + return ¬eCloseConn{Conn: tc, closefn: func() { close(connClosed) }}, nil + }, + } + c := &http.Client{Transport: tr} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + if _, err := ioutil.ReadAll(res.Body); err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + select { + case <-connClosed: + case <-time.After(1 * time.Second): + t.Errorf("timeout") + } + +} + +// Test concurrent requests with Transport.DisableKeepAlives. We can share connections, +// but when things are totally idle, it still needs to close. +func TestTransportDisableKeepAlives_Concurrency(t *testing.T) { + const D = 25 * time.Millisecond + st := newServerTester(t, + func(w http.ResponseWriter, r *http.Request) { + time.Sleep(D) + io.WriteString(w, "hi") + }, + optOnlyServer, + ) + defer st.Close() + + var dials int32 + var conns sync.WaitGroup + tr := &Transport{ + t1: &http.Transport{ + DisableKeepAlives: true, + }, + TLSClientConfig: tlsConfigInsecure, + DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { + tc, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + atomic.AddInt32(&dials, 1) + conns.Add(1) + return ¬eCloseConn{Conn: tc, closefn: func() { conns.Done() }}, nil + }, + } + c := &http.Client{Transport: tr} + var reqs sync.WaitGroup + const N = 20 + for i := 0; i < N; i++ { + reqs.Add(1) + if i == N-1 { + // For the final request, try to make all the + // others close. This isn't verified in the + // count, other than the Log statement, since + // it's so timing dependent. This test is + // really to make sure we don't interrupt a + // valid request. + time.Sleep(D * 2) + } + go func() { + defer reqs.Done() + res, err := c.Get(st.ts.URL) + if err != nil { + t.Error(err) + return + } + if _, err := ioutil.ReadAll(res.Body); err != nil { + t.Error(err) + return + } + res.Body.Close() + }() + } + reqs.Wait() + conns.Wait() + t.Logf("did %d dials, %d requests", atomic.LoadInt32(&dials), N) +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} + +func isTimeout(err error) bool { + switch err := err.(type) { + case nil: + return false + case *url.Error: + return isTimeout(err.Err) + case net.Error: + return err.Timeout() + } + return false +} + +// Test that the http1 Transport.ResponseHeaderTimeout option and cancel is sent. +func TestTransportResponseHeaderTimeout_NoBody(t *testing.T) { + testTransportResponseHeaderTimeout(t, false) +} +func TestTransportResponseHeaderTimeout_Body(t *testing.T) { + testTransportResponseHeaderTimeout(t, true) +} + +func testTransportResponseHeaderTimeout(t *testing.T, body bool) { + ct := newClientTester(t) + ct.tr.t1 = &http.Transport{ + ResponseHeaderTimeout: 5 * time.Millisecond, + } + ct.client = func() error { + c := &http.Client{Transport: ct.tr} + var err error + var n int64 + const bodySize = 4 << 20 + if body { + _, err = c.Post("https://dummy.tld/", "text/foo", io.LimitReader(countingReader{&n}, bodySize)) + } else { + _, err = c.Get("https://dummy.tld/") + } + if !isTimeout(err) { + t.Errorf("client expected timeout error; got %#v", err) + } + if body && n != bodySize { + t.Errorf("only read %d bytes of body; want %d", n, bodySize) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + switch f := f.(type) { + case *DataFrame: + dataLen := len(f.Data()) + if dataLen > 0 { + if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { + return err + } + if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { + return err + } + } + case *RSTStreamFrame: + if f.StreamID == 1 && f.ErrCode == ErrCodeCancel { + return nil + } + } + } + } + ct.run() +} + +func TestTransportDisableCompression(t *testing.T) { + const body = "sup" + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + want := http.Header{ + "User-Agent": []string{"Go-http-client/2.0"}, + } + if !reflect.DeepEqual(r.Header, want) { + t.Errorf("request headers = %v; want %v", r.Header, want) + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + t1: &http.Transport{ + DisableCompression: true, + }, + } + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() +} + +// RFC 7540 section 8.1.2.2 +func TestTransportRejectsConnHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + var got []string + for k := range r.Header { + got = append(got, k) + } + sort.Strings(got) + w.Header().Set("Got-Header", strings.Join(got, ",")) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + tests := []struct { + key string + value []string + want string + }{ + { + key: "Upgrade", + value: []string{"anything"}, + want: "ERROR: http2: invalid Upgrade request header: [\"anything\"]", + }, + { + key: "Connection", + value: []string{"foo"}, + want: "ERROR: http2: invalid Connection request header: [\"foo\"]", + }, + { + key: "Connection", + value: []string{"close"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Connection", + value: []string{"close", "something-else"}, + want: "ERROR: http2: invalid Connection request header: [\"close\" \"something-else\"]", + }, + { + key: "Connection", + value: []string{"keep-alive"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Proxy-Connection", // just deleted and ignored + value: []string{"keep-alive"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{""}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{"foo"}, + want: "ERROR: http2: invalid Transfer-Encoding request header: [\"foo\"]", + }, + { + key: "Transfer-Encoding", + value: []string{"chunked"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Transfer-Encoding", + value: []string{"chunked", "other"}, + want: "ERROR: http2: invalid Transfer-Encoding request header: [\"chunked\" \"other\"]", + }, + { + key: "Content-Length", + value: []string{"123"}, + want: "Accept-Encoding,User-Agent", + }, + { + key: "Keep-Alive", + value: []string{"doop"}, + want: "Accept-Encoding,User-Agent", + }, + } + + for _, tt := range tests { + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Header[tt.key] = tt.value + res, err := tr.RoundTrip(req) + var got string + if err != nil { + got = fmt.Sprintf("ERROR: %v", err) + } else { + got = res.Header.Get("Got-Header") + res.Body.Close() + } + if got != tt.want { + t.Errorf("For key %q, value %q, got = %q; want %q", tt.key, tt.value, got, tt.want) + } + } +} + +// golang.org/issue/14048 +func TestTransportFailsOnInvalidHeaders(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + var got []string + for k := range r.Header { + got = append(got, k) + } + sort.Strings(got) + w.Header().Set("Got-Header", strings.Join(got, ",")) + }, optOnlyServer) + defer st.Close() + + tests := [...]struct { + h http.Header + wantErr string + }{ + 0: { + h: http.Header{"with space": {"foo"}}, + wantErr: `invalid HTTP header name "with space"`, + }, + 1: { + h: http.Header{"name": {"Брэд"}}, + wantErr: "", // okay + }, + 2: { + h: http.Header{"имя": {"Brad"}}, + wantErr: `invalid HTTP header name "имя"`, + }, + 3: { + h: http.Header{"foo": {"foo\x01bar"}}, + wantErr: `invalid HTTP header value "foo\x01bar" for header "foo"`, + }, + } + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + for i, tt := range tests { + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Header = tt.h + res, err := tr.RoundTrip(req) + var bad bool + if tt.wantErr == "" { + if err != nil { + bad = true + t.Errorf("case %d: error = %v; want no error", i, err) + } + } else { + if !strings.Contains(fmt.Sprint(err), tt.wantErr) { + bad = true + t.Errorf("case %d: error = %v; want error %q", i, err, tt.wantErr) + } + } + if err == nil { + if bad { + t.Logf("case %d: server got headers %q", i, res.Header.Get("Got-Header")) + } + res.Body.Close() + } + } +} + +// Tests that gzipReader doesn't crash on a second Read call following +// the first Read call's gzip.NewReader returning an error. +func TestGzipReader_DoubleReadCrash(t *testing.T) { + gz := &gzipReader{ + body: ioutil.NopCloser(strings.NewReader("0123456789")), + } + var buf [1]byte + n, err1 := gz.Read(buf[:]) + if n != 0 || !strings.Contains(fmt.Sprint(err1), "invalid header") { + t.Fatalf("Read = %v, %v; want 0, invalid header", n, err1) + } + n, err2 := gz.Read(buf[:]) + if n != 0 || err2 != err1 { + t.Fatalf("second Read = %v, %v; want 0, %v", n, err2, err1) + } +} + +func TestTransportNewTLSConfig(t *testing.T) { + tests := [...]struct { + conf *tls.Config + host string + want *tls.Config + }{ + // Normal case. + 0: { + conf: nil, + host: "foo.com", + want: &tls.Config{ + ServerName: "foo.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // User-provided name (bar.com) takes precedence: + 1: { + conf: &tls.Config{ + ServerName: "bar.com", + }, + host: "foo.com", + want: &tls.Config{ + ServerName: "bar.com", + NextProtos: []string{NextProtoTLS}, + }, + }, + + // NextProto is prepended: + 2: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar"}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{NextProtoTLS, "foo", "bar"}, + }, + }, + + // NextProto is not duplicated: + 3: { + conf: &tls.Config{ + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + host: "example.com", + want: &tls.Config{ + ServerName: "example.com", + NextProtos: []string{"foo", "bar", NextProtoTLS}, + }, + }, + } + for i, tt := range tests { + // Ignore the session ticket keys part, which ends up populating + // unexported fields in the Config: + if tt.conf != nil { + tt.conf.SessionTicketsDisabled = true + } + + tr := &Transport{TLSClientConfig: tt.conf} + got := tr.newTLSConfig(tt.host) + + got.SessionTicketsDisabled = false + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d. got %#v; want %#v", i, got, tt.want) + } + } +} + +// The Google GFE responds to HEAD requests with a HEADERS frame +// without END_STREAM, followed by a 0-length DATA frame with +// END_STREAM. Make sure we don't get confused by that. (We did.) +func TestTransportReadHeadResponse(t *testing.T) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + if res.ContentLength != 123 { + return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("ReadAll: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, // as the GFE does + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(hf.StreamID, true, nil) + + <-clientDone + return nil + } + } + ct.run() +} + +func TestTransportReadHeadResponseWithBody(t *testing.T) { + // This test use not valid response format. + // Discarding logger output to not spam tests output. + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stderr) + + response := "redirecting to /elsewhere" + ct := newClientTester(t) + clientDone := make(chan struct{}) + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + if res.ContentLength != int64(len(response)) { + return fmt.Errorf("Content-Length = %d; want %d", res.ContentLength, len(response)) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("ReadAll: %v", err) + } + if len(slurp) > 0 { + return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: strconv.Itoa(len(response))}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(hf.StreamID, true, []byte(response)) + + <-clientDone + return nil + } + } + ct.run() +} + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (int, error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +// golang.org/issue/15425: test that a handler closing the request +// body doesn't terminate the stream to the peer. (It just stops +// readability from the handler's side, and eventually the client +// runs out of flow control tokens) +func TestTransportHandlerBodyClose(t *testing.T) { + const bodySize = 10 << 20 + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + r.Body.Close() + io.Copy(w, io.LimitReader(neverEnding('A'), bodySize)) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + g0 := runtime.NumGoroutine() + + const numReq = 10 + for i := 0; i < numReq; i++ { + req, err := http.NewRequest("POST", st.ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) + if err != nil { + t.Fatal(err) + } + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + if n != bodySize || err != nil { + t.Fatalf("req#%d: Copy = %d, %v; want %d, nil", i, n, err, bodySize) + } + } + tr.CloseIdleConnections() + + gd := runtime.NumGoroutine() - g0 + if gd > numReq/2 { + t.Errorf("appeared to leak goroutines") + } + +} + +// https://golang.org/issue/15930 +func TestTransportFlowControl(t *testing.T) { + const bufLen = 64 << 10 + var total int64 = 100 << 20 // 100MB + if testing.Short() { + total = 10 << 20 + } + + var wrote int64 // updated atomically + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + b := make([]byte, bufLen) + for wrote < total { + n, err := w.Write(b) + atomic.AddInt64(&wrote, int64(n)) + if err != nil { + t.Errorf("ResponseWriter.Write error: %v", err) + break + } + w.(http.Flusher).Flush() + } + }, optOnlyServer) + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + t.Fatal("NewRequest error:", err) + } + resp, err := tr.RoundTrip(req) + if err != nil { + t.Fatal("RoundTrip error:", err) + } + defer resp.Body.Close() + + var read int64 + b := make([]byte, bufLen) + for { + n, err := resp.Body.Read(b) + if err == io.EOF { + break + } + if err != nil { + t.Fatal("Read error:", err) + } + read += int64(n) + + const max = transportDefaultStreamFlow + if w := atomic.LoadInt64(&wrote); -max > read-w || read-w > max { + t.Fatalf("Too much data inflight: server wrote %v bytes but client only received %v", w, read) + } + + // Let the server get ahead of the client. + time.Sleep(1 * time.Millisecond) + } +} + +// golang.org/issue/14627 -- if the server sends a GOAWAY frame, make +// the Transport remember it and return it back to users (via +// RoundTrip or request body reads) if needed (e.g. if the server +// proceeds to close the TCP connection before the client gets its +// response) +func TestTransportUsesGoAwayDebugError_RoundTrip(t *testing.T) { + testTransportUsesGoAwayDebugError(t, false) +} + +func TestTransportUsesGoAwayDebugError_Body(t *testing.T) { + testTransportUsesGoAwayDebugError(t, true) +} + +func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + + const goAwayErrCode = ErrCodeHTTP11Required // arbitrary + const goAwayDebugData = "some debug data" + + ct.client = func() error { + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if failMidBody { + if err != nil { + return fmt.Errorf("unexpected client RoundTrip error: %v", err) + } + _, err = io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + } + want := GoAwayError{ + LastStreamID: 5, + ErrCode: goAwayErrCode, + DebugData: goAwayDebugData, + } + if !reflect.DeepEqual(err, want) { + t.Errorf("RoundTrip error = %T: %#v, want %T (%#v)", err, err, want, want) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + t.Logf("ReadFrame: %v", err) + return nil + } + hf, ok := f.(*HeadersFrame) + if !ok { + continue + } + if failMidBody { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + } + // Write two GOAWAY frames, to test that the Transport takes + // the interesting parts of both. + ct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData)) + ct.fr.WriteGoAway(5, goAwayErrCode, nil) + ct.sc.(*net.TCPConn).CloseWrite() + <-clientDone + return nil + } + } + ct.run() +} + +func testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) { + ct := newClientTester(t) + + clientClosed := make(chan struct{}) + serverWroteFirstByte := make(chan struct{}) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + <-serverWroteFirstByte + + if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 { + return fmt.Errorf("body read = %v, %v; want 1, nil", n, err) + } + res.Body.Close() // leaving 4999 bytes unread + close(clientClosed) + + return nil + } + ct.server = func() error { + ct.greet() + + var hf *HeadersFrame + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + var ok bool + hf, ok = f.(*HeadersFrame) + if !ok { + return fmt.Errorf("Got %T; want HeadersFrame", f) + } + break + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + // Two cases: + // - Send one DATA frame with 5000 bytes. + // - Send two DATA frames with 1 and 4999 bytes each. + // + // In both cases, the client should consume one byte of data, + // refund that byte, then refund the following 4999 bytes. + // + // In the second case, the server waits for the client connection to + // close before seconding the second DATA frame. This tests the case + // where the client receives a DATA frame after it has reset the stream. + if oneDataFrame { + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 5000)) + close(serverWroteFirstByte) + <-clientClosed + } else { + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 1)) + close(serverWroteFirstByte) + <-clientClosed + ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 4999)) + } + + waitingFor := "RSTStreamFrame" + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for %s: %v", waitingFor, err) + } + if _, ok := f.(*SettingsFrame); ok { + continue + } + switch waitingFor { + case "RSTStreamFrame": + if rf, ok := f.(*RSTStreamFrame); !ok || rf.ErrCode != ErrCodeCancel { + return fmt.Errorf("Expected a RSTStreamFrame with code cancel; got %v", summarizeFrame(f)) + } + waitingFor = "WindowUpdateFrame" + case "WindowUpdateFrame": + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != 4999 { + return fmt.Errorf("Expected WindowUpdateFrame for 4999 bytes; got %v", summarizeFrame(f)) + } + return nil + } + } + } + ct.run() +} + +// See golang.org/issue/16481 +func TestTransportReturnsUnusedFlowControlSingleWrite(t *testing.T) { + testTransportReturnsUnusedFlowControl(t, true) +} + +// See golang.org/issue/20469 +func TestTransportReturnsUnusedFlowControlMultipleWrites(t *testing.T) { + testTransportReturnsUnusedFlowControl(t, false) +} + +// Issue 16612: adjust flow control on open streams when transport +// receives SETTINGS with INITIAL_WINDOW_SIZE from server. +func TestTransportAdjustsFlowControl(t *testing.T) { + ct := newClientTester(t) + clientDone := make(chan struct{}) + + const bodySize = 1 << 20 + + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + + req, _ := http.NewRequest("POST", "https://dummy.tld/", struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + res.Body.Close() + return nil + } + ct.server = func() error { + _, err := io.ReadFull(ct.sc, make([]byte, len(ClientPreface))) + if err != nil { + return fmt.Errorf("reading client preface: %v", err) + } + + var gotBytes int64 + var sentSettings bool + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + return nil + default: + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + } + switch f := f.(type) { + case *DataFrame: + gotBytes += int64(len(f.Data())) + // After we've got half the client's + // initial flow control window's worth + // of request body data, give it just + // enough flow control to finish. + if gotBytes >= initialWindowSize/2 && !sentSettings { + sentSettings = true + + ct.fr.WriteSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize}) + ct.fr.WriteWindowUpdate(0, bodySize) + ct.fr.WriteSettingsAck() + } + + if f.StreamEnded() { + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + } + } + } + ct.run() +} + +// See golang.org/issue/16556 +func TestTransportReturnsDataPaddingFlowControl(t *testing.T) { + ct := newClientTester(t) + + unblockClient := make(chan bool, 1) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err != nil { + return err + } + defer res.Body.Close() + <-unblockClient + return nil + } + ct.server = func() error { + ct.greet() + + var hf *HeadersFrame + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + continue + } + var ok bool + hf, ok = f.(*HeadersFrame) + if !ok { + return fmt.Errorf("Got %T; want HeadersFrame", f) + } + break + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + pad := make([]byte, 5) + ct.fr.WriteDataPadded(hf.StreamID, false, make([]byte, 5000), pad) // without ending stream + + f, err := ct.readNonSettingsFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for first WindowUpdateFrame: %v", err) + } + wantBack := uint32(len(pad)) + 1 // one byte for the length of the padding + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID != 0 { + return fmt.Errorf("Expected conn WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f)) + } + + f, err = ct.readNonSettingsFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for second WindowUpdateFrame: %v", err) + } + if wuf, ok := f.(*WindowUpdateFrame); !ok || wuf.Increment != wantBack || wuf.StreamID == 0 { + return fmt.Errorf("Expected stream WindowUpdateFrame for %d bytes; got %v", wantBack, summarizeFrame(f)) + } + unblockClient <- true + return nil + } + ct.run() +} + +// golang.org/issue/16572 -- RoundTrip shouldn't hang when it gets a +// StreamError as a result of the response HEADERS +func TestTransportReturnsErrorOnBadResponseHeaders(t *testing.T) { + ct := newClientTester(t) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := ct.tr.RoundTrip(req) + if err == nil { + res.Body.Close() + return errors.New("unexpected successful GET") + } + want := StreamError{1, ErrCodeProtocol, headerFieldNameError(" content-type")} + if !reflect.DeepEqual(want, err) { + t.Errorf("RoundTrip error = %#v; want %#v", err, want) + } + return nil + } + ct.server = func() error { + ct.greet() + + hf, err := ct.firstHeaders() + if err != nil { + return err + } + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: " content-type", Value: "bogus"}) // bogus spaces + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + + for { + fr, err := ct.readFrame() + if err != nil { + return fmt.Errorf("error waiting for RST_STREAM from client: %v", err) + } + if _, ok := fr.(*SettingsFrame); ok { + continue + } + if rst, ok := fr.(*RSTStreamFrame); !ok || rst.StreamID != 1 || rst.ErrCode != ErrCodeProtocol { + t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr)) + } + break + } + + return nil + } + ct.run() +} + +// byteAndEOFReader returns is in an io.Reader which reads one byte +// (the underlying byte) and io.EOF at once in its Read call. +type byteAndEOFReader byte + +func (b byteAndEOFReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + panic("unexpected useless call") + } + p[0] = byte(b) + return 1, io.EOF +} + +// Issue 16788: the Transport had a regression where it started +// sending a spurious DATA frame with a duplicate END_STREAM bit after +// the request body writer goroutine had already read an EOF from the +// Request.Body and included the END_STREAM on a data-carrying DATA +// frame. +// +// Notably, to trigger this, the requests need to use a Request.Body +// which returns (non-0, io.EOF) and also needs to set the ContentLength +// explicitly. +func TestTransportBodyDoubleEndStream(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + // Nothing. + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + for i := 0; i < 2; i++ { + req, _ := http.NewRequest("POST", st.ts.URL, byteAndEOFReader('a')) + req.ContentLength = 1 + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatalf("failure on req %d: %v", i+1, err) + } + defer res.Body.Close() + } +} + +// golang.org/issue/16847, golang.org/issue/19103 +func TestTransportRequestPathPseudo(t *testing.T) { + type result struct { + path string + err string + } + tests := []struct { + req *http.Request + want result + }{ + 0: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Host: "foo.com", + Path: "/foo", + }, + }, + want: result{path: "/foo"}, + }, + // In Go 1.7, we accepted paths of "//foo". + // In Go 1.8, we rejected it (issue 16847). + // In Go 1.9, we accepted it again (issue 19103). + 1: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Host: "foo.com", + Path: "//foo", + }, + }, + want: result{path: "//foo"}, + }, + + // Opaque with //$Matching_Hostname/path + 2: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "https", + Opaque: "//foo.com/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque with some other Request.Host instead: + 3: { + req: &http.Request{ + Method: "GET", + Host: "bar.com", + URL: &url.URL{ + Scheme: "https", + Opaque: "//bar.com/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque without the leading "//": + 4: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Opaque: "/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{path: "/path"}, + }, + + // Opaque we can't handle: + 5: { + req: &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "https", + Opaque: "//unknown_host/path", + Host: "foo.com", + Path: "/ignored", + }, + }, + want: result{err: `invalid request :path "https://unknown_host/path" from URL.Opaque = "//unknown_host/path"`}, + }, + + // A CONNECT request: + 6: { + req: &http.Request{ + Method: "CONNECT", + URL: &url.URL{ + Host: "foo.com", + }, + }, + want: result{}, + }, + } + for i, tt := range tests { + cc := &ClientConn{peerMaxHeaderListSize: 0xffffffffffffffff} + cc.henc = hpack.NewEncoder(&cc.hbuf) + cc.mu.Lock() + hdrs, err := cc.encodeHeaders(tt.req, false, "", -1) + cc.mu.Unlock() + var got result + hpackDec := hpack.NewDecoder(initialHeaderTableSize, func(f hpack.HeaderField) { + if f.Name == ":path" { + got.path = f.Value + } + }) + if err != nil { + got.err = err.Error() + } else if len(hdrs) > 0 { + if _, err := hpackDec.Write(hdrs); err != nil { + t.Errorf("%d. bogus hpack: %v", i, err) + continue + } + } + if got != tt.want { + t.Errorf("%d. got %+v; want %+v", i, got, tt.want) + } + + } + +} + +// golang.org/issue/17071 -- don't sniff the first byte of the request body +// before we've determined that the ClientConn is usable. +func TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) { + const body = "foo" + req, _ := http.NewRequest("POST", "http://foo.com/", ioutil.NopCloser(strings.NewReader(body))) + cc := &ClientConn{ + closed: true, + } + _, err := cc.RoundTrip(req) + if err != errClientConnUnusable { + t.Fatalf("RoundTrip = %v; want errClientConnUnusable", err) + } + slurp, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("ReadAll = %v", err) + } + if string(slurp) != body { + t.Errorf("Body = %q; want %q", slurp, body) + } +} + +func TestClientConnPing(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optOnlyServer) + defer st.Close() + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + cc, err := tr.dialClientConn(st.ts.Listener.Addr().String(), false) + if err != nil { + t.Fatal(err) + } + if err = cc.Ping(testContext{}); err != nil { + t.Fatal(err) + } +} + +// Issue 16974: if the server sent a DATA frame after the user +// canceled the Transport's Request, the Transport previously wrote to a +// closed pipe, got an error, and ended up closing the whole TCP +// connection. +func TestTransportCancelDataResponseRace(t *testing.T) { + cancel := make(chan struct{}) + clientGotError := make(chan bool, 1) + + const msg = "Hello." + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + if strings.Contains(r.URL.Path, "/hello") { + time.Sleep(50 * time.Millisecond) + io.WriteString(w, msg) + return + } + for i := 0; i < 50; i++ { + io.WriteString(w, "Some data.") + w.(http.Flusher).Flush() + if i == 2 { + close(cancel) + <-clientGotError + } + time.Sleep(10 * time.Millisecond) + } + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", st.ts.URL, nil) + req.Cancel = cancel + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + if _, err = io.Copy(ioutil.Discard, res.Body); err == nil { + t.Fatal("unexpected success") + } + clientGotError <- true + + res, err = c.Get(st.ts.URL + "/hello") + if err != nil { + t.Fatal(err) + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != msg { + t.Errorf("Got = %q; want %q", slurp, msg) + } +} + +// Issue 21316: It should be safe to reuse an http.Request after the +// request has completed. +func TestTransportNoRaceOnRequestObjectAfterRequestComplete(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + io.WriteString(w, "body") + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, _ := http.NewRequest("GET", st.ts.URL, nil) + resp, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if _, err = io.Copy(ioutil.Discard, resp.Body); err != nil { + t.Fatalf("error reading response body: %v", err) + } + if err := resp.Body.Close(); err != nil { + t.Fatalf("error closing response body: %v", err) + } + + // This access of req.Header should not race with code in the transport. + req.Header = http.Header{} +} + +func TestTransportRetryAfterGOAWAY(t *testing.T) { + var dialer struct { + sync.Mutex + count int + } + ct1 := make(chan *clientTester) + ct2 := make(chan *clientTester) + + ln := newLocalListener(t) + defer ln.Close() + + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + } + tr.DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialer.Lock() + defer dialer.Unlock() + dialer.count++ + if dialer.count == 3 { + return nil, errors.New("unexpected number of dials") + } + cc, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + return nil, fmt.Errorf("dial error: %v", err) + } + sc, err := ln.Accept() + if err != nil { + return nil, fmt.Errorf("accept error: %v", err) + } + ct := &clientTester{ + t: t, + tr: tr, + cc: cc, + sc: sc, + fr: NewFramer(sc, sc), + } + switch dialer.count { + case 1: + ct1 <- ct + case 2: + ct2 <- ct + } + return cc, nil + } + + errs := make(chan error, 3) + done := make(chan struct{}) + defer close(done) + + // Client. + go func() { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + res, err := tr.RoundTrip(req) + if res != nil { + res.Body.Close() + if got := res.Header.Get("Foo"); got != "bar" { + err = fmt.Errorf("foo header = %q; want bar", got) + } + } + if err != nil { + err = fmt.Errorf("RoundTrip: %v", err) + } + errs <- err + }() + + connToClose := make(chan io.Closer, 2) + + // Server for the first request. + go func() { + var ct *clientTester + select { + case ct = <-ct1: + case <-done: + return + } + + connToClose <- ct.cc + ct.greet() + hf, err := ct.firstHeaders() + if err != nil { + errs <- fmt.Errorf("server1 failed reading HEADERS: %v", err) + return + } + t.Logf("server1 got %v", hf) + if err := ct.fr.WriteGoAway(0 /*max id*/, ErrCodeNo, nil); err != nil { + errs <- fmt.Errorf("server1 failed writing GOAWAY: %v", err) + return + } + errs <- nil + }() + + // Server for the second request. + go func() { + var ct *clientTester + select { + case ct = <-ct2: + case <-done: + return + } + + connToClose <- ct.cc + ct.greet() + hf, err := ct.firstHeaders() + if err != nil { + errs <- fmt.Errorf("server2 failed reading HEADERS: %v", err) + return + } + t.Logf("server2 got %v", hf) + + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) + err = ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: false, + BlockFragment: buf.Bytes(), + }) + if err != nil { + errs <- fmt.Errorf("server2 failed writing response HEADERS: %v", err) + } else { + errs <- nil + } + }() + + for k := 0; k < 3; k++ { + select { + case err := <-errs: + if err != nil { + t.Error(err) + } + case <-time.After(1 * time.Second): + t.Errorf("timed out") + } + } + + for { + select { + case c := <-connToClose: + c.Close() + default: + return + } + } +} + +func TestTransportRetryAfterRefusedStream(t *testing.T) { + clientDone := make(chan struct{}) + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + resp, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip: %v", err) + } + resp.Body.Close() + if resp.StatusCode != 204 { + return fmt.Errorf("Status = %v; want 204", resp.StatusCode) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + nreq := 0 + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + nreq++ + if nreq == 1 { + ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) + } else { + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +func TestTransportRetryHasLimit(t *testing.T) { + // Skip in short mode because the total expected delay is 1s+2s+4s+8s+16s=29s. + if testing.Short() { + t.Skip("skipping long test in short mode") + } + clientDone := make(chan struct{}) + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + defer close(clientDone) + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + resp, err := ct.tr.RoundTrip(req) + if err == nil { + return fmt.Errorf("RoundTrip expected error, got response: %+v", resp) + } + t.Logf("expected error, got: %v", err) + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it + // will have reported any + // errors on its side. + return nil + default: + return err + } + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} + +func TestTransportResponseDataBeforeHeaders(t *testing.T) { + // This test use not valid response format. + // Discarding logger output to not spam tests output. + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stderr) + + ct := newClientTester(t) + ct.client = func() error { + defer ct.cc.(*net.TCPConn).CloseWrite() + req := httptest.NewRequest("GET", "https://dummy.tld/", nil) + // First request is normal to ensure the check is per stream and not per connection. + _, err := ct.tr.RoundTrip(req) + if err != nil { + return fmt.Errorf("RoundTrip expected no error, got: %v", err) + } + // Second request returns a DATA frame with no HEADERS. + resp, err := ct.tr.RoundTrip(req) + if err == nil { + return fmt.Errorf("RoundTrip expected error, got response: %+v", resp) + } + if err, ok := err.(StreamError); !ok || err.Code != ErrCodeProtocol { + return fmt.Errorf("expected stream PROTOCOL_ERROR, got: %v", err) + } + return nil + } + ct.server = func() error { + ct.greet() + for { + f, err := ct.fr.ReadFrame() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + switch f := f.(type) { + case *WindowUpdateFrame, *SettingsFrame: + case *HeadersFrame: + switch f.StreamID { + case 1: + // Send a valid response to first request. + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + case 3: + ct.fr.WriteData(f.StreamID, true, []byte("payload")) + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + ct.run() +} +func TestTransportRequestsStallAtServerLimit(t *testing.T) { + const maxConcurrent = 2 + + greet := make(chan struct{}) // server sends initial SETTINGS frame + gotRequest := make(chan struct{}) // server received a request + clientDone := make(chan struct{}) + + // Collect errors from goroutines. + var wg sync.WaitGroup + errs := make(chan error, 100) + defer func() { + wg.Wait() + close(errs) + for err := range errs { + t.Error(err) + } + }() + + // We will send maxConcurrent+2 requests. This checker goroutine waits for the + // following stages: + // 1. The first maxConcurrent requests are received by the server. + // 2. The client will cancel the next request + // 3. The server is unblocked so it can service the first maxConcurrent requests + // 4. The client will send the final request + wg.Add(1) + unblockClient := make(chan struct{}) + clientRequestCancelled := make(chan struct{}) + unblockServer := make(chan struct{}) + go func() { + defer wg.Done() + // Stage 1. + for k := 0; k < maxConcurrent; k++ { + <-gotRequest + } + // Stage 2. + close(unblockClient) + <-clientRequestCancelled + // Stage 3: give some time for the final RoundTrip call to be scheduled and + // verify that the final request is not sent. + time.Sleep(50 * time.Millisecond) + select { + case <-gotRequest: + errs <- errors.New("last request did not stall") + close(unblockServer) + return + default: + } + close(unblockServer) + // Stage 4. + <-gotRequest + }() + + ct := newClientTester(t) + ct.client = func() error { + var wg sync.WaitGroup + defer func() { + wg.Wait() + close(clientDone) + ct.cc.(*net.TCPConn).CloseWrite() + }() + for k := 0; k < maxConcurrent+2; k++ { + wg.Add(1) + go func(k int) { + defer wg.Done() + // Don't send the second request until after receiving SETTINGS from the server + // to avoid a race where we use the default SettingMaxConcurrentStreams, which + // is much larger than maxConcurrent. We have to send the first request before + // waiting because the first request triggers the dial and greet. + if k > 0 { + <-greet + } + // Block until maxConcurrent requests are sent before sending any more. + if k >= maxConcurrent { + <-unblockClient + } + req, _ := http.NewRequest("GET", fmt.Sprintf("https://dummy.tld/%d", k), nil) + if k == maxConcurrent { + // This request will be canceled. + cancel := make(chan struct{}) + req.Cancel = cancel + close(cancel) + _, err := ct.tr.RoundTrip(req) + close(clientRequestCancelled) + if err == nil { + errs <- fmt.Errorf("RoundTrip(%d) should have failed due to cancel", k) + return + } + } else { + resp, err := ct.tr.RoundTrip(req) + if err != nil { + errs <- fmt.Errorf("RoundTrip(%d): %v", k, err) + return + } + ioutil.ReadAll(resp.Body) + resp.Body.Close() + if resp.StatusCode != 204 { + errs <- fmt.Errorf("Status = %v; want 204", resp.StatusCode) + return + } + } + }(k) + } + return nil + } + + ct.server = func() error { + var wg sync.WaitGroup + defer wg.Wait() + + ct.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent}) + + // Server write loop. + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + writeResp := make(chan uint32, maxConcurrent+1) + + wg.Add(1) + go func() { + defer wg.Done() + <-unblockServer + for id := range writeResp { + buf.Reset() + enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: id, + EndHeaders: true, + EndStream: true, + BlockFragment: buf.Bytes(), + }) + } + }() + + // Server read loop. + var nreq int + for { + f, err := ct.fr.ReadFrame() + if err != nil { + select { + case <-clientDone: + // If the client's done, it will have reported any errors on its side. + return nil + default: + return err + } + } + switch f := f.(type) { + case *WindowUpdateFrame: + case *SettingsFrame: + // Wait for the client SETTINGS ack until ending the greet. + close(greet) + case *HeadersFrame: + if !f.HeadersEnded() { + return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) + } + gotRequest <- struct{}{} + nreq++ + writeResp <- f.StreamID + if nreq == maxConcurrent+1 { + close(writeResp) + } + default: + return fmt.Errorf("Unexpected client frame %v", f) + } + } + } + + ct.run() +} + +func TestAuthorityAddr(t *testing.T) { + tests := []struct { + scheme, authority string + want string + }{ + {"http", "foo.com", "foo.com:80"}, + {"https", "foo.com", "foo.com:443"}, + {"https", "foo.com:1234", "foo.com:1234"}, + {"https", "1.2.3.4:1234", "1.2.3.4:1234"}, + {"https", "1.2.3.4", "1.2.3.4:443"}, + {"https", "[::1]:1234", "[::1]:1234"}, + {"https", "[::1]", "[::1]:443"}, + } + for _, tt := range tests { + got := authorityAddr(tt.scheme, tt.authority) + if got != tt.want { + t.Errorf("authorityAddr(%q, %q) = %q; want %q", tt.scheme, tt.authority, got, tt.want) + } + } +} + +// Issue 20448: stop allocating for DATA frames' payload after +// Response.Body.Close is called. +func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) { + megabyteZero := make([]byte, 1<<20) + + writeErr := make(chan error, 1) + + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() + var sum int64 + for i := 0; i < 100; i++ { + n, err := w.Write(megabyteZero) + sum += int64(n) + if err != nil { + writeErr <- err + return + } + } + t.Logf("wrote all %d bytes", sum) + writeErr <- nil + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + res, err := c.Get(st.ts.URL) + if err != nil { + t.Fatal(err) + } + var buf [1]byte + if _, err := res.Body.Read(buf[:]); err != nil { + t.Error(err) + } + if err := res.Body.Close(); err != nil { + t.Error(err) + } + + trb, ok := res.Body.(transportResponseBody) + if !ok { + t.Fatalf("res.Body = %T; want transportResponseBody", res.Body) + } + if trb.cs.bufPipe.b != nil { + t.Errorf("response body pipe is still open") + } + + gotErr := <-writeErr + if gotErr == nil { + t.Errorf("Handler unexpectedly managed to write its entire response without getting an error") + } else if gotErr != errStreamClosed { + t.Errorf("Handler Write err = %v; want errStreamClosed", gotErr) + } +} + +// Issue 18891: make sure Request.Body == NoBody means no DATA frame +// is ever sent, even if empty. +func TestTransportNoBodyMeansNoDATA(t *testing.T) { + ct := newClientTester(t) + + unblockClient := make(chan bool) + + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", go18httpNoBody()) + ct.tr.RoundTrip(req) + <-unblockClient + return nil + } + ct.server = func() error { + defer close(unblockClient) + defer ct.cc.(*net.TCPConn).Close() + ct.greet() + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) + } + switch f := f.(type) { + default: + return fmt.Errorf("Got %T; want HeadersFrame", f) + case *WindowUpdateFrame, *SettingsFrame: + continue + case *HeadersFrame: + if !f.StreamEnded() { + return fmt.Errorf("got headers frame without END_STREAM") + } + return nil + } + } + } + ct.run() +} + +func benchSimpleRoundTrip(b *testing.B, nHeaders int) { + defer disableGoroutineTracking()() + b.ReportAllocs() + st := newServerTester(b, + func(w http.ResponseWriter, r *http.Request) { + }, + optOnlyServer, + optQuiet, + ) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + req, err := http.NewRequest("GET", st.ts.URL, nil) + if err != nil { + b.Fatal(err) + } + + for i := 0; i < nHeaders; i++ { + name := fmt.Sprint("A-", i) + req.Header.Set(name, "*") + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + res, err := tr.RoundTrip(req) + if err != nil { + if res != nil { + res.Body.Close() + } + b.Fatalf("RoundTrip err = %v; want nil", err) + } + res.Body.Close() + if res.StatusCode != http.StatusOK { + b.Fatalf("Response code = %v; want %v", res.StatusCode, http.StatusOK) + } + } +} + +type infiniteReader struct{} + +func (r infiniteReader) Read(b []byte) (int, error) { + return len(b), nil +} + +// Issue 20521: it is not an error to receive a response and end stream +// from the server without the body being consumed. +func TestTransportResponseAndResetWithoutConsumingBodyRace(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }, optOnlyServer) + defer st.Close() + + tr := &Transport{TLSClientConfig: tlsConfigInsecure} + defer tr.CloseIdleConnections() + + // The request body needs to be big enough to trigger flow control. + req, _ := http.NewRequest("PUT", st.ts.URL, infiniteReader{}) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != http.StatusOK { + t.Fatalf("Response code = %v; want %v", res.StatusCode, http.StatusOK) + } +} + +// Verify transport doesn't crash when receiving bogus response lacking a :status header. +// Issue 22880. +func TestTransportHandlesInvalidStatuslessResponse(t *testing.T) { + ct := newClientTester(t) + ct.client = func() error { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + _, err := ct.tr.RoundTrip(req) + const substr = "malformed response from server: missing status pseudo header" + if !strings.Contains(fmt.Sprint(err), substr) { + return fmt.Errorf("RoundTrip error = %v; want substring %q", err, substr) + } + return nil + } + ct.server = func() error { + ct.greet() + var buf bytes.Buffer + enc := hpack.NewEncoder(&buf) + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + return err + } + switch f := f.(type) { + case *HeadersFrame: + enc.WriteField(hpack.HeaderField{Name: "content-type", Value: "text/html"}) // no :status header + ct.fr.WriteHeaders(HeadersFrameParam{ + StreamID: f.StreamID, + EndHeaders: true, + EndStream: false, // we'll send some DATA to try to crash the transport + BlockFragment: buf.Bytes(), + }) + ct.fr.WriteData(f.StreamID, true, []byte("payload")) + return nil + } + } + } + ct.run() +} + +func BenchmarkClientRequestHeaders(b *testing.B) { + b.Run(" 0 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 0) }) + b.Run(" 10 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 10) }) + b.Run(" 100 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 100) }) + b.Run("1000 Headers", func(b *testing.B) { benchSimpleRoundTrip(b, 1000) }) +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 0000000..54ab4a8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,365 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/http2/hpack" + "golang.org/x/net/lex/httplex" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("writeEndsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + +type writeSettings []Setting + +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + ctx.Flush() // ignore error: we're hanging up on them anyway + return err +} + +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // uppper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only only if k is in keys. +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !httplex.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 0000000..4fe3073 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,242 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { + // write is the interface value that does the writing, once the + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. + write writeFramer + + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID + } + return 0 + } + return wr.stream.id +} + +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) + } + return 0 +} + +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, + } + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, + } + return consumed, rest, 2 + } + + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) + } + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) +} + +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { + return + } + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) + } + wr.write = nil // prevent use (assume it's tainted after wr.done send) +} + +// writeQueue is used by implementations of WriteScheduler. +type writeQueue struct { + s []FrameWriteRequest +} + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) +} + +func (q *writeQueue) shift() FrameWriteRequest { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wr := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = FrameWriteRequest{} + q.s = q.s[:len(q.s)-1] + return wr +} + +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { + if len(q.s) == 0 { + return FrameWriteRequest{}, false + } + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) +} + +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) + } + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 0000000..848fed6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this funcion returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority_test.go b/vendor/golang.org/x/net/http2/writesched_priority_test.go new file mode 100644 index 0000000..f2b535a --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority_test.go @@ -0,0 +1,541 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "sort" + "testing" +) + +func defaultPriorityWriteScheduler() *priorityWriteScheduler { + return NewPriorityWriteScheduler(nil).(*priorityWriteScheduler) +} + +func checkPriorityWellFormed(ws *priorityWriteScheduler) error { + for id, n := range ws.nodes { + if id != n.id { + return fmt.Errorf("bad ws.nodes: ws.nodes[%d] = %d", id, n.id) + } + if n.parent == nil { + if n.next != nil || n.prev != nil { + return fmt.Errorf("bad node %d: nil parent but prev/next not nil", id) + } + continue + } + found := false + for k := n.parent.kids; k != nil; k = k.next { + if k.id == id { + found = true + break + } + } + if !found { + return fmt.Errorf("bad node %d: not found in parent %d kids list", id, n.parent.id) + } + } + return nil +} + +func fmtTree(ws *priorityWriteScheduler, fmtNode func(*priorityNode) string) string { + var ids []int + for _, n := range ws.nodes { + ids = append(ids, int(n.id)) + } + sort.Ints(ids) + + var buf bytes.Buffer + for _, id := range ids { + if buf.Len() != 0 { + buf.WriteString(" ") + } + if id == 0 { + buf.WriteString(fmtNode(&ws.root)) + } else { + buf.WriteString(fmtNode(ws.nodes[uint32(id)])) + } + } + return buf.String() +} + +func fmtNodeParentSkipRoot(n *priorityNode) string { + switch { + case n.id == 0: + return "" + case n.parent == nil: + return fmt.Sprintf("%d{parent:nil}", n.id) + default: + return fmt.Sprintf("%d{parent:%d}", n.id, n.parent.id) + } +} + +func fmtNodeWeightParentSkipRoot(n *priorityNode) string { + switch { + case n.id == 0: + return "" + case n.parent == nil: + return fmt.Sprintf("%d{weight:%d,parent:nil}", n.id, n.weight) + default: + return fmt.Sprintf("%d{weight:%d,parent:%d}", n.id, n.weight, n.parent.id) + } +} + +func TestPriorityTwoStreams(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + + want := "1{weight:15,parent:0} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + + // Move 1's parent to 2. + ws.AdjustStream(1, PriorityParam{ + StreamDep: 2, + Weight: 32, + Exclusive: false, + }) + want = "1{weight:32,parent:2} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityAdjustExclusiveZero(t *testing.T) { + // 1, 2, and 3 are all children of the 0 stream. + // Exclusive reprioritization to any of the streams should bring + // the rest of the streams under the reprioritized stream. + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + ws.OpenStream(3, OpenStreamOptions{}) + + want := "1{weight:15,parent:0} 2{weight:15,parent:0} 3{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + + ws.AdjustStream(2, PriorityParam{ + StreamDep: 0, + Weight: 20, + Exclusive: true, + }) + want = "1{weight:15,parent:2} 2{weight:20,parent:0} 3{weight:15,parent:2}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityAdjustOwnParent(t *testing.T) { + // Assigning a node as its own parent should have no effect. + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + ws.AdjustStream(2, PriorityParam{ + StreamDep: 2, + Weight: 20, + Exclusive: true, + }) + want := "1{weight:15,parent:0} 2{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityClosedStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxClosedNodesInTree: 2}).(*priorityWriteScheduler) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + + // Close the first three streams. We lose 1, but keep 2 and 3. + ws.CloseStream(1) + ws.CloseStream(2) + ws.CloseStream(3) + + want := "2{weight:15,parent:0} 3{weight:15,parent:2} 4{weight:15,parent:3}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After close\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } + + // Adding a stream as an exclusive child of 1 gives it default + // priorities, since 1 is gone. + ws.OpenStream(5, OpenStreamOptions{}) + ws.AdjustStream(5, PriorityParam{StreamDep: 1, Weight: 15, Exclusive: true}) + + // Adding a stream as an exclusive child of 2 should work, since 2 is not gone. + ws.OpenStream(6, OpenStreamOptions{}) + ws.AdjustStream(6, PriorityParam{StreamDep: 2, Weight: 15, Exclusive: true}) + + want = "2{weight:15,parent:0} 3{weight:15,parent:6} 4{weight:15,parent:3} 5{weight:15,parent:0} 6{weight:15,parent:2}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After add streams\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityClosedStreamsDisabled(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + + // Close the first two streams. We keep only 3. + ws.CloseStream(1) + ws.CloseStream(2) + + want := "3{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After close\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityIdleStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{MaxIdleNodesInTree: 2}).(*priorityWriteScheduler) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle + ws.OpenStream(4, OpenStreamOptions{}) + ws.OpenStream(5, OpenStreamOptions{}) + ws.OpenStream(6, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{StreamDep: 1, Weight: 15}) + ws.AdjustStream(5, PriorityParam{StreamDep: 2, Weight: 15}) + ws.AdjustStream(6, PriorityParam{StreamDep: 3, Weight: 15}) + + want := "2{weight:15,parent:0} 3{weight:20,parent:2} 4{weight:15,parent:0} 5{weight:15,parent:2} 6{weight:15,parent:3}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPriorityIdleStreamsDisabled(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{}).(*priorityWriteScheduler) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 15}) // idle + ws.AdjustStream(3, PriorityParam{StreamDep: 2, Weight: 20}) // idle + ws.OpenStream(4, OpenStreamOptions{}) + + want := "4{weight:15,parent:0}" + if got := fmtTree(ws, fmtNodeWeightParentSkipRoot); got != want { + t.Errorf("After open\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection531NonExclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.1. + // A,B,C,D = 1,2,3,4 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{ + StreamDep: 1, + Weight: 15, + Exclusive: false, + }) + want := "1{parent:0} 2{parent:1} 3{parent:1} 4{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection531Exclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.1. + // A,B,C,D = 1,2,3,4 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{}) + ws.AdjustStream(4, PriorityParam{ + StreamDep: 1, + Weight: 15, + Exclusive: true, + }) + want := "1{parent:0} 2{parent:4} 3{parent:4} 4{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func makeSection533Tree() *priorityWriteScheduler { + // Initial tree from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + return ws +} + +func TestPrioritySection533NonExclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + ws.AdjustStream(1, PriorityParam{ + StreamDep: 4, + Weight: 15, + Exclusive: false, + }) + want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:4}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func TestPrioritySection533Exclusive(t *testing.T) { + // Example from RFC 7540 Section 5.3.3. + // A,B,C,D,E,F = 1,2,3,4,5,6 + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(5, OpenStreamOptions{PusherID: 3}) + ws.OpenStream(6, OpenStreamOptions{PusherID: 4}) + ws.AdjustStream(1, PriorityParam{ + StreamDep: 4, + Weight: 15, + Exclusive: true, + }) + want := "1{parent:4} 2{parent:1} 3{parent:1} 4{parent:0} 5{parent:3} 6{parent:1}" + if got := fmtTree(ws, fmtNodeParentSkipRoot); got != want { + t.Errorf("After adjust\ngot %q\nwant %q", got, want) + } + if err := checkPriorityWellFormed(ws); err != nil { + t.Error(err) + } +} + +func checkPopAll(ws WriteScheduler, order []uint32) error { + for k, id := range order { + wr, ok := ws.Pop() + if !ok { + return fmt.Errorf("Pop[%d]: got ok=false, want %d (order=%v)", k, id, order) + } + if got := wr.StreamID(); got != id { + return fmt.Errorf("Pop[%d]: got %v, want %d (order=%v)", k, got, id, order) + } + } + wr, ok := ws.Pop() + if ok { + return fmt.Errorf("Pop[%d]: got %v, want ok=false (order=%v)", len(order), wr.StreamID(), order) + } + return nil +} + +func TestPriorityPopFrom533Tree(t *testing.T) { + ws := makeSection533Tree() + + ws.Push(makeWriteHeadersRequest(3 /*C*/)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteHeadersRequest(5 /*E*/)) + ws.Push(makeWriteHeadersRequest(1 /*A*/)) + t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot)) + + if err := checkPopAll(ws, []uint32{0 /*NonStream*/, 1, 3, 5}); err != nil { + t.Error(err) + } +} + +func TestPriorityPopFromLinearTree(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + ws.OpenStream(3, OpenStreamOptions{PusherID: 2}) + ws.OpenStream(4, OpenStreamOptions{PusherID: 3}) + + ws.Push(makeWriteHeadersRequest(3)) + ws.Push(makeWriteHeadersRequest(4)) + ws.Push(makeWriteHeadersRequest(1)) + ws.Push(makeWriteHeadersRequest(2)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteNonStreamRequest()) + t.Log("tree:", fmtTree(ws, fmtNodeParentSkipRoot)) + + if err := checkPopAll(ws, []uint32{0, 0 /*NonStreams*/, 1, 2, 3, 4}); err != nil { + t.Error(err) + } +} + +func TestPriorityFlowControl(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: false}) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + + sc := &serverConn{maxFrameSize: 16} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 16), false}, st1, nil}) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 16), false}, st2, nil}) + ws.AdjustStream(2, PriorityParam{StreamDep: 1}) + + // No flow-control bytes available. + if wr, ok := ws.Pop(); ok { + t.Fatalf("Pop(limited by flow control)=%v,true, want false", wr) + } + + // Add enough flow-control bytes to write st2 in two Pop calls. + // Should write data from st2 even though it's lower priority than st1. + for i := 1; i <= 2; i++ { + st2.flow.add(8) + wr, ok := ws.Pop() + if !ok { + t.Fatalf("Pop(%d)=false, want true", i) + } + if got, want := wr.DataSize(), 8; got != want { + t.Fatalf("Pop(%d)=%d bytes, want %d bytes", i, got, want) + } + } +} + +func TestPriorityThrottleOutOfOrderWrites(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ThrottleOutOfOrderWrites: true}) + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{PusherID: 1}) + + sc := &serverConn{maxFrameSize: 4096} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + st1.flow.add(4096) + st2.flow.add(4096) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 4096), false}, st2, nil}) + ws.AdjustStream(2, PriorityParam{StreamDep: 1}) + + // We have enough flow-control bytes to write st2 in a single Pop call. + // However, due to out-of-order write throttling, the first call should + // only write 1KB. + wr, ok := ws.Pop() + if !ok { + t.Fatalf("Pop(st2.first)=false, want true") + } + if got, want := wr.StreamID(), uint32(2); got != want { + t.Fatalf("Pop(st2.first)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 1024; got != want { + t.Fatalf("Pop(st2.first)=%d bytes, want %d bytes", got, want) + } + + // Now add data on st1. This should take precedence. + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 4096), false}, st1, nil}) + wr, ok = ws.Pop() + if !ok { + t.Fatalf("Pop(st1)=false, want true") + } + if got, want := wr.StreamID(), uint32(1); got != want { + t.Fatalf("Pop(st1)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 4096; got != want { + t.Fatalf("Pop(st1)=%d bytes, want %d bytes", got, want) + } + + // Should go back to writing 1KB from st2. + wr, ok = ws.Pop() + if !ok { + t.Fatalf("Pop(st2.last)=false, want true") + } + if got, want := wr.StreamID(), uint32(2); got != want { + t.Fatalf("Pop(st2.last)=stream %d, want stream %d", got, want) + } + if got, want := wr.DataSize(), 1024; got != want { + t.Fatalf("Pop(st2.last)=%d bytes, want %d bytes", got, want) + } +} + +func TestPriorityWeights(t *testing.T) { + ws := defaultPriorityWriteScheduler() + ws.OpenStream(1, OpenStreamOptions{}) + ws.OpenStream(2, OpenStreamOptions{}) + + sc := &serverConn{maxFrameSize: 8} + st1 := &stream{id: 1, sc: sc} + st2 := &stream{id: 2, sc: sc} + st1.flow.add(40) + st2.flow.add(40) + + ws.Push(FrameWriteRequest{&writeData{1, make([]byte, 40), false}, st1, nil}) + ws.Push(FrameWriteRequest{&writeData{2, make([]byte, 40), false}, st2, nil}) + ws.AdjustStream(1, PriorityParam{StreamDep: 0, Weight: 34}) + ws.AdjustStream(2, PriorityParam{StreamDep: 0, Weight: 9}) + + // st1 gets 3.5x the bandwidth of st2 (3.5 = (34+1)/(9+1)). + // The maximum frame size is 8 bytes. The write sequence should be: + // st1, total bytes so far is (st1=8, st=0) + // st2, total bytes so far is (st1=8, st=8) + // st1, total bytes so far is (st1=16, st=8) + // st1, total bytes so far is (st1=24, st=8) // 3x bandwidth + // st1, total bytes so far is (st1=32, st=8) // 4x bandwidth + // st2, total bytes so far is (st1=32, st=16) // 2x bandwidth + // st1, total bytes so far is (st1=40, st=16) + // st2, total bytes so far is (st1=40, st=24) + // st2, total bytes so far is (st1=40, st=32) + // st2, total bytes so far is (st1=40, st=40) + if err := checkPopAll(ws, []uint32{1, 2, 1, 1, 1, 2, 1, 2, 2, 2}); err != nil { + t.Error(err) + } +} + +func TestPriorityRstStreamOnNonOpenStreams(t *testing.T) { + ws := NewPriorityWriteScheduler(&PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 0, + MaxIdleNodesInTree: 0, + }) + ws.OpenStream(1, OpenStreamOptions{}) + ws.CloseStream(1) + ws.Push(FrameWriteRequest{write: streamError(1, ErrCodeProtocol)}) + ws.Push(FrameWriteRequest{write: streamError(2, ErrCodeProtocol)}) + + if err := checkPopAll(ws, []uint32{1, 2}); err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 0000000..36d7919 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle or closed, it's deleted from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for _, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/http2/writesched_random_test.go b/vendor/golang.org/x/net/http2/writesched_random_test.go new file mode 100644 index 0000000..3bf4aa3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random_test.go @@ -0,0 +1,44 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "testing" + +func TestRandomScheduler(t *testing.T) { + ws := NewRandomWriteScheduler() + ws.Push(makeWriteHeadersRequest(3)) + ws.Push(makeWriteHeadersRequest(4)) + ws.Push(makeWriteHeadersRequest(1)) + ws.Push(makeWriteHeadersRequest(2)) + ws.Push(makeWriteNonStreamRequest()) + ws.Push(makeWriteNonStreamRequest()) + + // Pop all frames. Should get the non-stream requests first, + // followed by the stream requests in any order. + var order []FrameWriteRequest + for { + wr, ok := ws.Pop() + if !ok { + break + } + order = append(order, wr) + } + t.Logf("got frames: %v", order) + if len(order) != 6 { + t.Fatalf("got %d frames, expected 6", len(order)) + } + if order[0].StreamID() != 0 || order[1].StreamID() != 0 { + t.Fatal("expected non-stream frames first", order[0], order[1]) + } + got := make(map[uint32]bool) + for _, wr := range order[2:] { + got[wr.StreamID()] = true + } + for id := uint32(1); id <= 4; id++ { + if !got[id] { + t.Errorf("frame not found for stream %d", id) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched_test.go b/vendor/golang.org/x/net/http2/writesched_test.go new file mode 100644 index 0000000..0807056 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_test.go @@ -0,0 +1,125 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "reflect" + "testing" +) + +func makeWriteNonStreamRequest() FrameWriteRequest { + return FrameWriteRequest{writeSettingsAck{}, nil, nil} +} + +func makeWriteHeadersRequest(streamID uint32) FrameWriteRequest { + st := &stream{id: streamID} + return FrameWriteRequest{&writeResHeaders{streamID: streamID, httpResCode: 200}, st, nil} +} + +func checkConsume(wr FrameWriteRequest, nbytes int32, want []FrameWriteRequest) error { + consumed, rest, n := wr.Consume(nbytes) + var wantConsumed, wantRest FrameWriteRequest + switch len(want) { + case 0: + case 1: + wantConsumed = want[0] + case 2: + wantConsumed = want[0] + wantRest = want[1] + } + if !reflect.DeepEqual(consumed, wantConsumed) || !reflect.DeepEqual(rest, wantRest) || n != len(want) { + return fmt.Errorf("got %v, %v, %v\nwant %v, %v, %v", consumed, rest, n, wantConsumed, wantRest, len(want)) + } + return nil +} + +func TestFrameWriteRequestNonData(t *testing.T) { + wr := makeWriteNonStreamRequest() + if got, want := wr.DataSize(), 0; got != want { + t.Errorf("DataSize: got %v, want %v", got, want) + } + + // Non-DATA frames are always consumed whole. + if err := checkConsume(wr, 0, []FrameWriteRequest{wr}); err != nil { + t.Errorf("Consume:\n%v", err) + } +} + +func TestFrameWriteRequestData(t *testing.T) { + st := &stream{ + id: 1, + sc: &serverConn{maxFrameSize: 16}, + } + const size = 32 + wr := FrameWriteRequest{&writeData{st.id, make([]byte, size), true}, st, make(chan error)} + if got, want := wr.DataSize(), size; got != want { + t.Errorf("DataSize: got %v, want %v", got, want) + } + + // No flow-control bytes available: cannot consume anything. + if err := checkConsume(wr, math.MaxInt32, []FrameWriteRequest{}); err != nil { + t.Errorf("Consume(limited by flow control):\n%v", err) + } + + // Add enough flow-control bytes to consume the entire frame, + // but we're now restricted by st.sc.maxFrameSize. + st.flow.add(size) + want := []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, st.sc.maxFrameSize), false}, + stream: st, + done: nil, + }, + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(wr, math.MaxInt32, want); err != nil { + t.Errorf("Consume(limited by maxFrameSize):\n%v", err) + } + rest := want[1] + + // Consume 8 bytes from the remaining frame. + want = []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, 8), false}, + stream: st, + done: nil, + }, + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(rest, 8, want); err != nil { + t.Errorf("Consume(8):\n%v", err) + } + rest = want[1] + + // Consume all remaining bytes. + want = []FrameWriteRequest{ + { + write: &writeData{st.id, make([]byte, size-st.sc.maxFrameSize-8), true}, + stream: st, + done: wr.done, + }, + } + if err := checkConsume(rest, math.MaxInt32, want); err != nil { + t.Errorf("Consume(remainder):\n%v", err) + } +} + +func TestFrameWriteRequest_StreamID(t *testing.T) { + const streamID = 123 + wr := FrameWriteRequest{write: streamError(streamID, ErrCodeNo)} + if got := wr.StreamID(); got != streamID { + t.Errorf("FrameWriteRequest(StreamError) = %v; want %v", got, streamID) + } +} diff --git a/vendor/golang.org/x/net/http2/z_spec_test.go b/vendor/golang.org/x/net/http2/z_spec_test.go new file mode 100644 index 0000000..610b2cd --- /dev/null +++ b/vendor/golang.org/x/net/http2/z_spec_test.go @@ -0,0 +1,356 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/xml" + "flag" + "fmt" + "io" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "testing" +) + +var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests") + +// The global map of sentence coverage for the http2 spec. +var defaultSpecCoverage specCoverage + +var loadSpecOnce sync.Once + +func loadSpec() { + if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil { + panic(err) + } else { + defaultSpecCoverage = readSpecCov(f) + f.Close() + } +} + +// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not +// "covered" will be included in report outputted by TestSpecCoverage. +func covers(sec, sentences string) { + loadSpecOnce.Do(loadSpec) + defaultSpecCoverage.cover(sec, sentences) +} + +type specPart struct { + section string + sentence string +} + +func (ss specPart) Less(oo specPart) bool { + atoi := func(s string) int { + n, err := strconv.Atoi(s) + if err != nil { + panic(err) + } + return n + } + a := strings.Split(ss.section, ".") + b := strings.Split(oo.section, ".") + for len(a) > 0 { + if len(b) == 0 { + return false + } + x, y := atoi(a[0]), atoi(b[0]) + if x == y { + a, b = a[1:], b[1:] + continue + } + return x < y + } + if len(b) > 0 { + return true + } + return false +} + +type bySpecSection []specPart + +func (a bySpecSection) Len() int { return len(a) } +func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) } +func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type specCoverage struct { + coverage map[specPart]bool + d *xml.Decoder +} + +func joinSection(sec []int) string { + s := fmt.Sprintf("%d", sec[0]) + for _, n := range sec[1:] { + s = fmt.Sprintf("%s.%d", s, n) + } + return s +} + +func (sc specCoverage) readSection(sec []int) { + var ( + buf = new(bytes.Buffer) + sub = 0 + ) + for { + tk, err := sc.d.Token() + if err != nil { + if err == io.EOF { + return + } + panic(err) + } + switch v := tk.(type) { + case xml.StartElement: + if skipElement(v) { + if err := sc.d.Skip(); err != nil { + panic(err) + } + if v.Name.Local == "section" { + sub++ + } + break + } + switch v.Name.Local { + case "section": + sub++ + sc.readSection(append(sec, sub)) + case "xref": + buf.Write(sc.readXRef(v)) + } + case xml.CharData: + if len(sec) == 0 { + break + } + buf.Write(v) + case xml.EndElement: + if v.Name.Local == "section" { + sc.addSentences(joinSection(sec), buf.String()) + return + } + } + } +} + +func (sc specCoverage) readXRef(se xml.StartElement) []byte { + var b []byte + for { + tk, err := sc.d.Token() + if err != nil { + panic(err) + } + switch v := tk.(type) { + case xml.CharData: + if b != nil { + panic("unexpected CharData") + } + b = []byte(string(v)) + case xml.EndElement: + if v.Name.Local != "xref" { + panic("expected ") + } + if b != nil { + return b + } + sig := attrSig(se) + switch sig { + case "target": + return []byte(fmt.Sprintf("[%s]", attrValue(se, "target"))) + case "fmt-of,rel,target", "fmt-,,rel,target": + return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel"))) + case "fmt-of,sec,target", "fmt-,,sec,target": + return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target"))) + case "fmt-of,rel,sec,target": + return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel"))) + default: + panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se))) + } + default: + panic(fmt.Sprintf("unexpected tag %q", v)) + } + } +} + +var skipAnchor = map[string]bool{ + "intro": true, + "Overview": true, +} + +var skipTitle = map[string]bool{ + "Acknowledgements": true, + "Change Log": true, + "Document Organization": true, + "Conventions and Terminology": true, +} + +func skipElement(s xml.StartElement) bool { + switch s.Name.Local { + case "artwork": + return true + case "section": + for _, attr := range s.Attr { + switch attr.Name.Local { + case "anchor": + if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") { + return true + } + case "title": + if skipTitle[attr.Value] { + return true + } + } + } + } + return false +} + +func readSpecCov(r io.Reader) specCoverage { + sc := specCoverage{ + coverage: map[specPart]bool{}, + d: xml.NewDecoder(r)} + sc.readSection(nil) + return sc +} + +func (sc specCoverage) addSentences(sec string, sentence string) { + for _, s := range parseSentences(sentence) { + sc.coverage[specPart{sec, s}] = false + } +} + +func (sc specCoverage) cover(sec string, sentence string) { + for _, s := range parseSentences(sentence) { + p := specPart{sec, s} + if _, ok := sc.coverage[p]; !ok { + panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s)) + } + sc.coverage[specPart{sec, s}] = true + } + +} + +var whitespaceRx = regexp.MustCompile(`\s+`) + +func parseSentences(sens string) []string { + sens = strings.TrimSpace(sens) + if sens == "" { + return nil + } + ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ") + for i, s := range ss { + s = strings.TrimSpace(s) + if !strings.HasSuffix(s, ".") { + s += "." + } + ss[i] = s + } + return ss +} + +func TestSpecParseSentences(t *testing.T) { + tests := []struct { + ss string + want []string + }{ + {"Sentence 1. Sentence 2.", + []string{ + "Sentence 1.", + "Sentence 2.", + }}, + {"Sentence 1. \nSentence 2.\tSentence 3.", + []string{ + "Sentence 1.", + "Sentence 2.", + "Sentence 3.", + }}, + } + + for i, tt := range tests { + got := parseSentences(tt.ss) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("%d: got = %q, want %q", i, got, tt.want) + } + } +} + +func TestSpecCoverage(t *testing.T) { + if !*coverSpec { + t.Skip() + } + + loadSpecOnce.Do(loadSpec) + + var ( + list []specPart + cv = defaultSpecCoverage.coverage + total = len(cv) + complete = 0 + ) + + for sp, touched := range defaultSpecCoverage.coverage { + if touched { + complete++ + } else { + list = append(list, sp) + } + } + sort.Stable(bySpecSection(list)) + + if testing.Short() && len(list) > 5 { + list = list[:5] + } + + for _, p := range list { + t.Errorf("\tSECTION %s: %s", p.section, p.sentence) + } + + t.Logf("%d/%d (%d%%) sentences covered", complete, total, (complete/total)*100) +} + +func attrSig(se xml.StartElement) string { + var names []string + for _, attr := range se.Attr { + if attr.Name.Local == "fmt" { + names = append(names, "fmt-"+attr.Value) + } else { + names = append(names, attr.Name.Local) + } + } + sort.Strings(names) + return strings.Join(names, ",") +} + +func attrValue(se xml.StartElement, attr string) string { + for _, a := range se.Attr { + if a.Name.Local == attr { + return a.Value + } + } + panic("unknown attribute " + attr) +} + +func TestSpecPartLess(t *testing.T) { + tests := []struct { + sec1, sec2 string + want bool + }{ + {"6.2.1", "6.2", false}, + {"6.2", "6.2.1", true}, + {"6.10", "6.10.1", true}, + {"6.10", "6.1.1", false}, // 10, not 1 + {"6.1", "6.1", false}, // equal, so not less + } + for _, tt := range tests { + got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"}) + if got != tt.want { + t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/dstunreach.go b/vendor/golang.org/x/net/icmp/dstunreach.go new file mode 100644 index 0000000..75db991 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/dstunreach.go @@ -0,0 +1,41 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A DstUnreach represents an ICMP destination unreachable message +// body. +type DstUnreach struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *DstUnreach) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DstUnreach) Marshal(proto int) ([]byte, error) { + return marshalMultipartMessageBody(proto, p.Data, p.Extensions) +} + +// parseDstUnreach parses b as an ICMP destination unreachable message +// body. +func parseDstUnreach(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &DstUnreach{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/echo.go b/vendor/golang.org/x/net/icmp/echo.go new file mode 100644 index 0000000..e6f15ef --- /dev/null +++ b/vendor/golang.org/x/net/icmp/echo.go @@ -0,0 +1,45 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// An Echo represents an ICMP echo request or reply message body. +type Echo struct { + ID int // identifier + Seq int // sequence number + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *Echo) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *Echo) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq)) + copy(b[4:], p.Data) + return b, nil +} + +// parseEcho parses b as an ICMP echo request or reply message body. +func parseEcho(proto int, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/endpoint.go b/vendor/golang.org/x/net/icmp/endpoint.go new file mode 100644 index 0000000..a68bfb0 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/endpoint.go @@ -0,0 +1,113 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "runtime" + "syscall" + "time" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var _ net.PacketConn = &PacketConn{} + +// A PacketConn represents a packet network endpoint that uses either +// ICMPv4 or ICMPv6. +type PacketConn struct { + c net.PacketConn + p4 *ipv4.PacketConn + p6 *ipv6.PacketConn +} + +func (c *PacketConn) ok() bool { return c != nil && c.c != nil } + +// IPv4PacketConn returns the ipv4.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv4. +func (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn { + if !c.ok() { + return nil + } + return c.p4 +} + +// IPv6PacketConn returns the ipv6.PacketConn of c. +// It returns nil when c is not created as the endpoint for ICMPv6. +func (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn { + if !c.ok() { + return nil + } + return c.p6 +} + +// ReadFrom reads an ICMP message from the connection. +func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) { + if !c.ok() { + return 0, nil, syscall.EINVAL + } + // Please be informed that ipv4.NewPacketConn enables + // IP_STRIPHDR option by default on Darwin. + // See golang.org/issue/9395 for further information. + if runtime.GOOS == "darwin" && c.p4 != nil { + n, _, peer, err := c.p4.ReadFrom(b) + return n, peer, err + } + return c.c.ReadFrom(b) +} + +// WriteTo writes the ICMP message b to dst. +// Dst must be net.UDPAddr when c is a non-privileged +// datagram-oriented ICMP endpoint. Otherwise it must be net.IPAddr. +func (c *PacketConn) WriteTo(b []byte, dst net.Addr) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.c.WriteTo(b, dst) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.Close() +} + +// LocalAddr returns the local network address. +func (c *PacketConn) LocalAddr() net.Addr { + if !c.ok() { + return nil + } + return c.c.LocalAddr() +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.ok() { + return syscall.EINVAL + } + return c.c.SetWriteDeadline(t) +} diff --git a/vendor/golang.org/x/net/icmp/example_test.go b/vendor/golang.org/x/net/icmp/example_test.go new file mode 100644 index 0000000..1df4cec --- /dev/null +++ b/vendor/golang.org/x/net/icmp/example_test.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "log" + "net" + "os" + "runtime" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" +) + +func ExamplePacketConn_nonPrivilegedPing() { + switch runtime.GOOS { + case "darwin": + case "linux": + log.Println("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + log.Println("not supported on", runtime.GOOS) + return + } + + c, err := icmp.ListenPacket("udp6", "fe80::1%en0") + if err != nil { + log.Fatal(err) + } + defer c.Close() + + wm := icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: 1, + Data: []byte("HELLO-R-U-THERE"), + }, + } + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + if _, err := c.WriteTo(wb, &net.UDPAddr{IP: net.ParseIP("ff02::1"), Zone: "en0"}); err != nil { + log.Fatal(err) + } + + rb := make([]byte, 1500) + n, peer, err := c.ReadFrom(rb) + if err != nil { + log.Fatal(err) + } + rm, err := icmp.ParseMessage(58, rb[:n]) + if err != nil { + log.Fatal(err) + } + switch rm.Type { + case ipv6.ICMPTypeEchoReply: + log.Printf("got reflection from %v", peer) + default: + log.Printf("got %+v; want echo reply", rm) + } +} diff --git a/vendor/golang.org/x/net/icmp/extension.go b/vendor/golang.org/x/net/icmp/extension.go new file mode 100644 index 0000000..402a751 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// An Extension represents an ICMP extension. +type Extension interface { + // Len returns the length of ICMP extension. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP extension. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Marshal(proto int) ([]byte, error) +} + +const extensionVersion = 2 + +func validExtensionHeader(b []byte) bool { + v := int(b[0]&0xf0) >> 4 + s := binary.BigEndian.Uint16(b[2:4]) + if s != 0 { + s = checksum(b) + } + if v != extensionVersion || s != 0 { + return false + } + return true +} + +// parseExtensions parses b as a list of ICMP extensions. +// The length attribute l must be the length attribute field in +// received icmp messages. +// +// It will return a list of ICMP extensions and an adjusted length +// attribute that represents the length of the padded original +// datagram field. Otherwise, it returns an error. +func parseExtensions(b []byte, l int) ([]Extension, int, error) { + // Still a lot of non-RFC 4884 compliant implementations are + // out there. Set the length attribute l to 128 when it looks + // inappropriate for backwards compatibility. + // + // A minimal extension at least requires 8 octets; 4 octets + // for an extension header, and 4 octets for a single object + // header. + // + // See RFC 4884 for further information. + if 128 > l || l+8 > len(b) { + l = 128 + } + if l+8 > len(b) { + return nil, -1, errNoExtension + } + if !validExtensionHeader(b[l:]) { + if l == 128 { + return nil, -1, errNoExtension + } + l = 128 + if !validExtensionHeader(b[l:]) { + return nil, -1, errNoExtension + } + } + var exts []Extension + for b = b[l+4:]; len(b) >= 4; { + ol := int(binary.BigEndian.Uint16(b[:2])) + if 4 > ol || ol > len(b) { + break + } + switch b[2] { + case classMPLSLabelStack: + ext, err := parseMPLSLabelStack(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + case classInterfaceInfo: + ext, err := parseInterfaceInfo(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) + } + b = b[ol:] + } + return exts, l, nil +} diff --git a/vendor/golang.org/x/net/icmp/extension_test.go b/vendor/golang.org/x/net/icmp/extension_test.go new file mode 100644 index 0000000..0b3f7b9 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/extension_test.go @@ -0,0 +1,259 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + "reflect" + "testing" + + "golang.org/x/net/internal/iana" +) + +var marshalAndParseExtensionTests = []struct { + proto int + hdr []byte + obj []byte + exts []Extension +}{ + // MPLS label stack with no label + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x01, 0x01, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + }, + }, + }, + // MPLS label stack with a single label + { + proto: iana.ProtocolIPv6ICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x08, 0x01, 0x01, + 0x03, 0xe8, 0xe9, 0xff, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + // MPLS label stack with multiple labels + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x0c, 0x01, 0x01, + 0x03, 0xe8, 0xde, 0xfe, + 0x03, 0xe8, 0xe1, 0xff, + }, + exts: []Extension{ + &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16013, + TC: 0x7, + S: false, + TTL: 254, + }, + { + Label: 16014, + TC: 0, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + // Interface information with no attribute + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x02, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + }, + }, + }, + // Interface information with ifIndex and name + { + proto: iana.ProtocolICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x10, 0x02, 0x0a, + 0x00, 0x00, 0x00, 0x10, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0a, + Interface: &net.Interface{ + Index: 16, + Name: "en101", + }, + }, + }, + }, + // Interface information with ifIndex, IPAddr, name and MTU + { + proto: iana.ProtocolIPv6ICMP, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x28, 0x02, 0x0f, + 0x00, 0x00, 0x00, 0x0f, + 0x00, 0x02, 0x00, 0x00, + 0xfe, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, + }, + exts: []Extension{ + &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + }, +} + +func TestMarshalAndParseExtension(t *testing.T) { + for i, tt := range marshalAndParseExtensionTests { + for j, ext := range tt.exts { + var err error + var b []byte + switch ext := ext.(type) { + case *MPLSLabelStack: + b, err = ext.Marshal(tt.proto) + if err != nil { + t.Errorf("#%v/%v: %v", i, j, err) + continue + } + case *InterfaceInfo: + b, err = ext.Marshal(tt.proto) + if err != nil { + t.Errorf("#%v/%v: %v", i, j, err) + continue + } + } + if !reflect.DeepEqual(b, tt.obj) { + t.Errorf("#%v/%v: got %#v; want %#v", i, j, b, tt.obj) + continue + } + } + + for j, wire := range []struct { + data []byte // original datagram + inlattr int // length of padded original datagram, a hint + outlattr int // length of padded original datagram, a want + err error + }{ + {nil, 0, -1, errNoExtension}, + {make([]byte, 127), 128, -1, errNoExtension}, + + {make([]byte, 128), 127, -1, errNoExtension}, + {make([]byte, 128), 128, -1, errNoExtension}, + {make([]byte, 128), 129, -1, errNoExtension}, + + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 127, 128, nil}, + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 128, 128, nil}, + {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 129, 128, nil}, + + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 511, -1, errNoExtension}, + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 512, 512, nil}, + {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 513, -1, errNoExtension}, + } { + exts, l, err := parseExtensions(wire.data, wire.inlattr) + if err != wire.err { + t.Errorf("#%v/%v: got %v; want %v", i, j, err, wire.err) + continue + } + if wire.err != nil { + continue + } + if l != wire.outlattr { + t.Errorf("#%v/%v: got %v; want %v", i, j, l, wire.outlattr) + } + if !reflect.DeepEqual(exts, tt.exts) { + for j, ext := range exts { + switch ext := ext.(type) { + case *MPLSLabelStack: + want := tt.exts[j].(*MPLSLabelStack) + t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) + case *InterfaceInfo: + want := tt.exts[j].(*InterfaceInfo) + t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) + } + } + continue + } + } + } +} + +var parseInterfaceNameTests = []struct { + b []byte + error +}{ + {[]byte{0, 'e', 'n', '0'}, errInvalidExtension}, + {[]byte{4, 'e', 'n', '0'}, nil}, + {[]byte{7, 'e', 'n', '0', 0xff, 0xff, 0xff, 0xff}, errInvalidExtension}, + {[]byte{8, 'e', 'n', '0', 0xff, 0xff, 0xff}, errMessageTooShort}, +} + +func TestParseInterfaceName(t *testing.T) { + ifi := InterfaceInfo{Interface: &net.Interface{}} + for i, tt := range parseInterfaceNameTests { + if _, err := ifi.parseName(tt.b); err != tt.error { + t.Errorf("#%d: got %v; want %v", i, err, tt.error) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/helper_posix.go b/vendor/golang.org/x/net/icmp/helper_posix.go new file mode 100644 index 0000000..398fd38 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/helper_posix.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "strconv" + "syscall" +) + +func sockaddr(family int, address string) (syscall.Sockaddr, error) { + switch family { + case syscall.AF_INET: + a, err := net.ResolveIPAddr("ip4", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv4zero + } + if a.IP = a.IP.To4(); a.IP == nil { + return nil, net.InvalidAddrError("non-ipv4 address") + } + sa := &syscall.SockaddrInet4{} + copy(sa.Addr[:], a.IP) + return sa, nil + case syscall.AF_INET6: + a, err := net.ResolveIPAddr("ip6", address) + if err != nil { + return nil, err + } + if len(a.IP) == 0 { + a.IP = net.IPv6unspecified + } + if a.IP.Equal(net.IPv4zero) { + a.IP = net.IPv6unspecified + } + if a.IP = a.IP.To16(); a.IP == nil || a.IP.To4() != nil { + return nil, net.InvalidAddrError("non-ipv6 address") + } + sa := &syscall.SockaddrInet6{ZoneId: zoneToUint32(a.Zone)} + copy(sa.Addr[:], a.IP) + return sa, nil + default: + return nil, net.InvalidAddrError("unexpected family") + } +} + +func zoneToUint32(zone string) uint32 { + if zone == "" { + return 0 + } + if ifi, err := net.InterfaceByName(zone); err == nil { + return uint32(ifi.Index) + } + n, err := strconv.Atoi(zone) + if err != nil { + return 0 + } + return uint32(n) +} + +func last(s string, b byte) int { + i := len(s) + for i--; i >= 0; i-- { + if s[i] == b { + break + } + } + return i +} diff --git a/vendor/golang.org/x/net/icmp/interface.go b/vendor/golang.org/x/net/icmp/interface.go new file mode 100644 index 0000000..78b5b98 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/interface.go @@ -0,0 +1,236 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "strings" + + "golang.org/x/net/internal/iana" +) + +const ( + classInterfaceInfo = 2 + + afiIPv4 = 1 + afiIPv6 = 2 +) + +const ( + attrMTU = 1 << iota + attrName + attrIPAddr + attrIfIndex +) + +// An InterfaceInfo represents interface and next-hop identification. +type InterfaceInfo struct { + Class int // extension object class number + Type int // extension object sub-type + Interface *net.Interface + Addr *net.IPAddr +} + +func (ifi *InterfaceInfo) nameLen() int { + if len(ifi.Interface.Name) > 63 { + return 64 + } + l := 1 + len(ifi.Interface.Name) + return (l + 3) &^ 3 +} + +func (ifi *InterfaceInfo) attrsAndLen(proto int) (attrs, l int) { + l = 4 + if ifi.Interface != nil && ifi.Interface.Index > 0 { + attrs |= attrIfIndex + l += 4 + if len(ifi.Interface.Name) > 0 { + attrs |= attrName + l += ifi.nameLen() + } + if ifi.Interface.MTU > 0 { + attrs |= attrMTU + l += 4 + } + } + if ifi.Addr != nil { + switch proto { + case iana.ProtocolICMP: + if ifi.Addr.IP.To4() != nil { + attrs |= attrIPAddr + l += 4 + net.IPv4len + } + case iana.ProtocolIPv6ICMP: + if ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + attrs |= attrIPAddr + l += 4 + net.IPv6len + } + } + } + return +} + +// Len implements the Len method of Extension interface. +func (ifi *InterfaceInfo) Len(proto int) int { + _, l := ifi.attrsAndLen(proto) + return l +} + +// Marshal implements the Marshal method of Extension interface. +func (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) { + attrs, l := ifi.attrsAndLen(proto) + b := make([]byte, l) + if err := ifi.marshal(proto, b, attrs, l); err != nil { + return nil, err + } + return b, nil +} + +func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error { + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classInterfaceInfo, byte(ifi.Type) + for b = b[4:]; len(b) > 0 && attrs != 0; { + switch { + case attrs&attrIfIndex != 0: + b = ifi.marshalIfIndex(proto, b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b = ifi.marshalIPAddr(proto, b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b = ifi.marshalName(proto, b) + attrs &^= attrName + case attrs&attrMTU != 0: + b = ifi.marshalMTU(proto, b) + attrs &^= attrMTU + } + } + return nil +} + +func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte { + switch proto { + case iana.ProtocolICMP: + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv4)) + copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4()) + b = b[4+net.IPv4len:] + case iana.ProtocolIPv6ICMP: + binary.BigEndian.PutUint16(b[:2], uint16(afiIPv6)) + copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16()) + b = b[4+net.IPv6len:] + } + return b +} + +func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + afi := int(binary.BigEndian.Uint16(b[:2])) + b = b[4:] + switch afi { + case afiIPv4: + if len(b) < net.IPv4len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv4len) + copy(ifi.Addr.IP, b[:net.IPv4len]) + b = b[net.IPv4len:] + case afiIPv6: + if len(b) < net.IPv6len { + return nil, errMessageTooShort + } + ifi.Addr.IP = make(net.IP, net.IPv6len) + copy(ifi.Addr.IP, b[:net.IPv6len]) + b = b[net.IPv6len:] + } + return b, nil +} + +func (ifi *InterfaceInfo) marshalName(proto int, b []byte) []byte { + l := byte(ifi.nameLen()) + b[0] = l + copy(b[1:], []byte(ifi.Interface.Name)) + return b[l:] +} + +func (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) { + if 4 > len(b) || len(b) < int(b[0]) { + return nil, errMessageTooShort + } + l := int(b[0]) + if l%4 != 0 || 4 > l || l > 64 { + return nil, errInvalidExtension + } + var name [63]byte + copy(name[:], b[1:l]) + ifi.Interface.Name = strings.Trim(string(name[:]), "\000") + return b[l:], nil +} + +func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte { + binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU)) + return b[4:] +} + +func (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4])) + return b[4:], nil +} + +func parseInterfaceInfo(b []byte) (Extension, error) { + ifi := &InterfaceInfo{ + Class: int(b[2]), + Type: int(b[3]), + } + if ifi.Type&(attrIfIndex|attrName|attrMTU) != 0 { + ifi.Interface = &net.Interface{} + } + if ifi.Type&attrIPAddr != 0 { + ifi.Addr = &net.IPAddr{} + } + attrs := ifi.Type & (attrIfIndex | attrIPAddr | attrName | attrMTU) + for b = b[4:]; len(b) > 0 && attrs != 0; { + var err error + switch { + case attrs&attrIfIndex != 0: + b, err = ifi.parseIfIndex(b) + attrs &^= attrIfIndex + case attrs&attrIPAddr != 0: + b, err = ifi.parseIPAddr(b) + attrs &^= attrIPAddr + case attrs&attrName != 0: + b, err = ifi.parseName(b) + attrs &^= attrName + case attrs&attrMTU != 0: + b, err = ifi.parseMTU(b) + attrs &^= attrMTU + } + if err != nil { + return nil, err + } + } + if ifi.Interface != nil && ifi.Interface.Name != "" && ifi.Addr != nil && ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil { + ifi.Addr.Zone = ifi.Interface.Name + } + return ifi, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4.go b/vendor/golang.org/x/net/icmp/ipv4.go new file mode 100644 index 0000000..ffc66ed --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4.go @@ -0,0 +1,61 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "runtime" + + "golang.org/x/net/internal/socket" + "golang.org/x/net/ipv4" +) + +// freebsdVersion is set in sys_freebsd.go. +// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. +var freebsdVersion uint32 + +// ParseIPv4Header parses b as an IPv4 header of ICMP error message +// invoking packet, which is contained in ICMP error message. +func ParseIPv4Header(b []byte) (*ipv4.Header, error) { + if len(b) < ipv4.HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return nil, errBufferTooShort + } + h := &ipv4.Header{ + Version: int(b[0] >> 4), + Len: hdrlen, + TOS: int(b[1]), + ID: int(binary.BigEndian.Uint16(b[4:6])), + FragOff: int(binary.BigEndian.Uint16(b[6:8])), + TTL: int(b[8]), + Protocol: int(b[9]), + Checksum: int(binary.BigEndian.Uint16(b[10:12])), + Src: net.IPv4(b[12], b[13], b[14], b[15]), + Dst: net.IPv4(b[16], b[17], b[18], b[19]), + } + switch runtime.GOOS { + case "darwin": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + case "freebsd": + if freebsdVersion >= 1000000 { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } else { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + } + h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + if hdrlen-ipv4.HeaderLen > 0 { + h.Options = make([]byte, hdrlen-ipv4.HeaderLen) + copy(h.Options, b[ipv4.HeaderLen:]) + } + return h, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4_test.go b/vendor/golang.org/x/net/icmp/ipv4_test.go new file mode 100644 index 0000000..058953f --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv4_test.go @@ -0,0 +1,83 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/socket" + "golang.org/x/net/ipv4" +) + +type ipv4HeaderTest struct { + wireHeaderFromKernel [ipv4.HeaderLen]byte + wireHeaderFromTradBSDKernel [ipv4.HeaderLen]byte + Header *ipv4.Header +} + +var ipv4HeaderLittleEndianTest = ipv4HeaderTest{ + // TODO(mikio): Add platform dependent wire header formats when + // we support new platforms. + wireHeaderFromKernel: [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromTradBSDKernel: [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + Header: &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: ipv4.DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + }, +} + +func TestParseIPv4Header(t *testing.T) { + tt := &ipv4HeaderLittleEndianTest + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for non-little endian machine yet") + } + + var wh []byte + switch runtime.GOOS { + case "darwin": + wh = tt.wireHeaderFromTradBSDKernel[:] + case "freebsd": + if freebsdVersion >= 1000000 { + wh = tt.wireHeaderFromKernel[:] + } else { + wh = tt.wireHeaderFromTradBSDKernel[:] + } + default: + wh = tt.wireHeaderFromKernel[:] + } + h, err := ParseIPv4Header(wh) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, tt.Header) { + t.Fatalf("got %#v; want %#v", h, tt.Header) + } +} diff --git a/vendor/golang.org/x/net/icmp/ipv6.go b/vendor/golang.org/x/net/icmp/ipv6.go new file mode 100644 index 0000000..2e8cfeb --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ipv6.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "net" + + "golang.org/x/net/internal/iana" +) + +const ipv6PseudoHeaderLen = 2*net.IPv6len + 8 + +// IPv6PseudoHeader returns an IPv6 pseudo header for checksum +// calculation. +func IPv6PseudoHeader(src, dst net.IP) []byte { + b := make([]byte, ipv6PseudoHeaderLen) + copy(b, src.To16()) + copy(b[net.IPv6len:], dst.To16()) + b[len(b)-1] = byte(iana.ProtocolIPv6ICMP) + return b +} diff --git a/vendor/golang.org/x/net/icmp/listen_posix.go b/vendor/golang.org/x/net/icmp/listen_posix.go new file mode 100644 index 0000000..7fac4f9 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_posix.go @@ -0,0 +1,100 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package icmp + +import ( + "net" + "os" + "runtime" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + var family, proto int + switch network { + case "udp4": + family, proto = syscall.AF_INET, iana.ProtocolICMP + case "udp6": + family, proto = syscall.AF_INET6, iana.ProtocolIPv6ICMP + default: + i := last(network, ':') + switch network[:i] { + case "ip4": + proto = iana.ProtocolICMP + case "ip6": + proto = iana.ProtocolIPv6ICMP + } + } + var cerr error + var c net.PacketConn + switch family { + case syscall.AF_INET, syscall.AF_INET6: + s, err := syscall.Socket(family, syscall.SOCK_DGRAM, proto) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + if runtime.GOOS == "darwin" && family == syscall.AF_INET { + if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil { + syscall.Close(s) + return nil, os.NewSyscallError("setsockopt", err) + } + } + sa, err := sockaddr(family, address) + if err != nil { + syscall.Close(s) + return nil, err + } + if err := syscall.Bind(s, sa); err != nil { + syscall.Close(s) + return nil, os.NewSyscallError("bind", err) + } + f := os.NewFile(uintptr(s), "datagram-oriented icmp") + c, cerr = net.FilePacketConn(f) + f.Close() + default: + c, cerr = net.ListenPacket(network, address) + } + if cerr != nil { + return nil, cerr + } + switch proto { + case iana.ProtocolICMP: + return &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil + case iana.ProtocolIPv6ICMP: + return &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil + default: + return &PacketConn{c: c}, nil + } +} diff --git a/vendor/golang.org/x/net/icmp/listen_stub.go b/vendor/golang.org/x/net/icmp/listen_stub.go new file mode 100644 index 0000000..668728d --- /dev/null +++ b/vendor/golang.org/x/net/icmp/listen_stub.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package icmp + +// ListenPacket listens for incoming ICMP packets addressed to +// address. See net.Dial for the syntax of address. +// +// For non-privileged datagram-oriented ICMP endpoints, network must +// be "udp4" or "udp6". The endpoint allows to read, write a few +// limited ICMP messages such as echo request and echo reply. +// Currently only Darwin and Linux support this. +// +// Examples: +// ListenPacket("udp4", "192.168.0.1") +// ListenPacket("udp4", "0.0.0.0") +// ListenPacket("udp6", "fe80::1%en0") +// ListenPacket("udp6", "::") +// +// For privileged raw ICMP endpoints, network must be "ip4" or "ip6" +// followed by a colon and an ICMP protocol number or name. +// +// Examples: +// ListenPacket("ip4:icmp", "192.168.0.1") +// ListenPacket("ip4:1", "0.0.0.0") +// ListenPacket("ip6:ipv6-icmp", "fe80::1%en0") +// ListenPacket("ip6:58", "::") +func ListenPacket(network, address string) (*PacketConn, error) { + return nil, errOpNoSupport +} diff --git a/vendor/golang.org/x/net/icmp/message.go b/vendor/golang.org/x/net/icmp/message.go new file mode 100644 index 0000000..81140b0 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message.go @@ -0,0 +1,152 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package icmp provides basic functions for the manipulation of +// messages used in the Internet Control Message Protocols, +// ICMPv4 and ICMPv6. +// +// ICMPv4 and ICMPv6 are defined in RFC 792 and RFC 4443. +// Multi-part message support for ICMP is defined in RFC 4884. +// ICMP extensions for MPLS are defined in RFC 4950. +// ICMP extensions for interface and next-hop identification are +// defined in RFC 5837. +package icmp // import "golang.org/x/net/icmp" + +import ( + "encoding/binary" + "errors" + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. + +var ( + errMessageTooShort = errors.New("message too short") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errOpNoSupport = errors.New("operation not supported") + errNoExtension = errors.New("no extension") + errInvalidExtension = errors.New("invalid extension") +) + +func checksum(b []byte) uint16 { + csumcv := len(b) - 1 // checksum coverage + s := uint32(0) + for i := 0; i < csumcv; i += 2 { + s += uint32(b[i+1])<<8 | uint32(b[i]) + } + if csumcv&1 == 0 { + s += uint32(b[csumcv]) + } + s = s>>16 + s&0xffff + s = s + s>>16 + return ^uint16(s) +} + +// A Type represents an ICMP message type. +type Type interface { + Protocol() int +} + +// A Message represents an ICMP message. +type Message struct { + Type Type // type, either ipv4.ICMPType or ipv6.ICMPType + Code int // code + Checksum int // checksum + Body MessageBody // body +} + +// Marshal returns the binary encoding of the ICMP message m. +// +// For an ICMPv4 message, the returned message always contains the +// calculated checksum field. +// +// For an ICMPv6 message, the returned message contains the calculated +// checksum field when psh is not nil, otherwise the kernel will +// compute the checksum field during the message transmission. +// When psh is not nil, it must be the pseudo header for IPv6. +func (m *Message) Marshal(psh []byte) ([]byte, error) { + var mtype int + switch typ := m.Type.(type) { + case ipv4.ICMPType: + mtype = int(typ) + case ipv6.ICMPType: + mtype = int(typ) + default: + return nil, syscall.EINVAL + } + b := []byte{byte(mtype), byte(m.Code), 0, 0} + if m.Type.Protocol() == iana.ProtocolIPv6ICMP && psh != nil { + b = append(psh, b...) + } + if m.Body != nil && m.Body.Len(m.Type.Protocol()) != 0 { + mb, err := m.Body.Marshal(m.Type.Protocol()) + if err != nil { + return nil, err + } + b = append(b, mb...) + } + if m.Type.Protocol() == iana.ProtocolIPv6ICMP { + if psh == nil { // cannot calculate checksum here + return b, nil + } + off, l := 2*net.IPv6len, len(b)-len(psh) + binary.BigEndian.PutUint32(b[off:off+4], uint32(l)) + } + s := checksum(b) + // Place checksum back in header; using ^= avoids the + // assumption the checksum bytes are zero. + b[len(psh)+2] ^= byte(s) + b[len(psh)+3] ^= byte(s >> 8) + return b[len(psh):], nil +} + +var parseFns = map[Type]func(int, []byte) (MessageBody, error){ + ipv4.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv4.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv4.ICMPTypeParameterProblem: parseParamProb, + + ipv4.ICMPTypeEcho: parseEcho, + ipv4.ICMPTypeEchoReply: parseEcho, + + ipv6.ICMPTypeDestinationUnreachable: parseDstUnreach, + ipv6.ICMPTypePacketTooBig: parsePacketTooBig, + ipv6.ICMPTypeTimeExceeded: parseTimeExceeded, + ipv6.ICMPTypeParameterProblem: parseParamProb, + + ipv6.ICMPTypeEchoRequest: parseEcho, + ipv6.ICMPTypeEchoReply: parseEcho, +} + +// ParseMessage parses b as an ICMP message. +// Proto must be either the ICMPv4 or ICMPv6 protocol number. +func ParseMessage(proto int, b []byte) (*Message, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + var err error + m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))} + switch proto { + case iana.ProtocolICMP: + m.Type = ipv4.ICMPType(b[0]) + case iana.ProtocolIPv6ICMP: + m.Type = ipv6.ICMPType(b[0]) + default: + return nil, syscall.EINVAL + } + if fn, ok := parseFns[m.Type]; !ok { + m.Body, err = parseDefaultMessageBody(proto, b[4:]) + } else { + m.Body, err = fn(proto, b[4:]) + } + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/icmp/message_test.go b/vendor/golang.org/x/net/icmp/message_test.go new file mode 100644 index 0000000..5d2605f --- /dev/null +++ b/vendor/golang.org/x/net/icmp/message_test.go @@ -0,0 +1,134 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "net" + "reflect" + "testing" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var marshalAndParseMessageForIPv4Tests = []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv4.ICMPTypePhoturis, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, +} + +func TestMarshalAndParseMessageForIPv4(t *testing.T) { + for i, tt := range marshalAndParseMessageForIPv4Tests { + b, err := tt.Marshal(nil) + if err != nil { + t.Fatal(err) + } + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + if !reflect.DeepEqual(m.Body, tt.Body) { + t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) + } + } +} + +var marshalAndParseMessageForIPv6Tests = []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypePacketTooBig, Code: 0, + Body: &icmp.PacketTooBig{ + MTU: 1<<16 - 1, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv6.ICMPTypeDuplicateAddressConfirmation, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, +} + +func TestMarshalAndParseMessageForIPv6(t *testing.T) { + pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) + for i, tt := range marshalAndParseMessageForIPv6Tests { + for _, psh := range [][]byte{pshicmp, nil} { + b, err := tt.Marshal(psh) + if err != nil { + t.Fatal(err) + } + m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + if !reflect.DeepEqual(m.Body, tt.Body) { + t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) + } + } + } +} diff --git a/vendor/golang.org/x/net/icmp/messagebody.go b/vendor/golang.org/x/net/icmp/messagebody.go new file mode 100644 index 0000000..2463730 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/messagebody.go @@ -0,0 +1,41 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A MessageBody represents an ICMP message body. +type MessageBody interface { + // Len returns the length of ICMP message body. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Len(proto int) int + + // Marshal returns the binary encoding of ICMP message body. + // Proto must be either the ICMPv4 or ICMPv6 protocol number. + Marshal(proto int) ([]byte, error) +} + +// A DefaultMessageBody represents the default message body. +type DefaultMessageBody struct { + Data []byte // data +} + +// Len implements the Len method of MessageBody interface. +func (p *DefaultMessageBody) Len(proto int) int { + if p == nil { + return 0 + } + return len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *DefaultMessageBody) Marshal(proto int) ([]byte, error) { + return p.Data, nil +} + +// parseDefaultMessageBody parses b as an ICMP message body. +func parseDefaultMessageBody(proto int, b []byte) (MessageBody, error) { + p := &DefaultMessageBody{Data: make([]byte, len(b))} + copy(p.Data, b) + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/mpls.go b/vendor/golang.org/x/net/icmp/mpls.go new file mode 100644 index 0000000..c314917 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/mpls.go @@ -0,0 +1,77 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A MPLSLabel represents a MPLS label stack entry. +type MPLSLabel struct { + Label int // label value + TC int // traffic class; formerly experimental use + S bool // bottom of stack + TTL int // time to live +} + +const ( + classMPLSLabelStack = 1 + typeIncomingMPLSLabelStack = 1 +) + +// A MPLSLabelStack represents a MPLS label stack. +type MPLSLabelStack struct { + Class int // extension object class number + Type int // extension object sub-type + Labels []MPLSLabel +} + +// Len implements the Len method of Extension interface. +func (ls *MPLSLabelStack) Len(proto int) int { + return 4 + (4 * len(ls.Labels)) +} + +// Marshal implements the Marshal method of Extension interface. +func (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) { + b := make([]byte, ls.Len(proto)) + if err := ls.marshal(proto, b); err != nil { + return nil, err + } + return b, nil +} + +func (ls *MPLSLabelStack) marshal(proto int, b []byte) error { + l := ls.Len(proto) + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack + off := 4 + for _, ll := range ls.Labels { + b[off], b[off+1], b[off+2] = byte(ll.Label>>12), byte(ll.Label>>4&0xff), byte(ll.Label<<4&0xf0) + b[off+2] |= byte(ll.TC << 1 & 0x0e) + if ll.S { + b[off+2] |= 0x1 + } + b[off+3] = byte(ll.TTL) + off += 4 + } + return nil +} + +func parseMPLSLabelStack(b []byte) (Extension, error) { + ls := &MPLSLabelStack{ + Class: int(b[2]), + Type: int(b[3]), + } + for b = b[4:]; len(b) >= 4; b = b[4:] { + ll := MPLSLabel{ + Label: int(b[0])<<12 | int(b[1])<<4 | int(b[2])>>4, + TC: int(b[2]&0x0e) >> 1, + TTL: int(b[3]), + } + if b[2]&0x1 != 0 { + ll.S = true + } + ls.Labels = append(ls.Labels, ll) + } + return ls, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart.go b/vendor/golang.org/x/net/icmp/multipart.go new file mode 100644 index 0000000..f271356 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart.go @@ -0,0 +1,109 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "golang.org/x/net/internal/iana" + +// multipartMessageBodyDataLen takes b as an original datagram and +// exts as extensions, and returns a required length for message body +// and a required length for a padded original datagram in wire +// format. +func multipartMessageBodyDataLen(proto int, b []byte, exts []Extension) (bodyLen, dataLen int) { + for _, ext := range exts { + bodyLen += ext.Len(proto) + } + if bodyLen > 0 { + dataLen = multipartMessageOrigDatagramLen(proto, b) + bodyLen += 4 // length of extension header + } else { + dataLen = len(b) + } + bodyLen += dataLen + return bodyLen, dataLen +} + +// multipartMessageOrigDatagramLen takes b as an original datagram, +// and returns a required length for a padded orignal datagram in wire +// format. +func multipartMessageOrigDatagramLen(proto int, b []byte) int { + roundup := func(b []byte, align int) int { + // According to RFC 4884, the padded original datagram + // field must contain at least 128 octets. + if len(b) < 128 { + return 128 + } + r := len(b) + return (r + align - 1) & ^(align - 1) + } + switch proto { + case iana.ProtocolICMP: + return roundup(b, 4) + case iana.ProtocolIPv6ICMP: + return roundup(b, 8) + default: + return len(b) + } +} + +// marshalMultipartMessageBody takes data as an original datagram and +// exts as extesnsions, and returns a binary encoding of message body. +// It can be used for non-multipart message bodies when exts is nil. +func marshalMultipartMessageBody(proto int, data []byte, exts []Extension) ([]byte, error) { + bodyLen, dataLen := multipartMessageBodyDataLen(proto, data, exts) + b := make([]byte, 4+bodyLen) + copy(b[4:], data) + off := dataLen + 4 + if len(exts) > 0 { + b[dataLen+4] = byte(extensionVersion << 4) + off += 4 // length of object header + for _, ext := range exts { + switch ext := ext.(type) { + case *MPLSLabelStack: + if err := ext.marshal(proto, b[off:]); err != nil { + return nil, err + } + off += ext.Len(proto) + case *InterfaceInfo: + attrs, l := ext.attrsAndLen(proto) + if err := ext.marshal(proto, b[off:], attrs, l); err != nil { + return nil, err + } + off += ext.Len(proto) + } + } + s := checksum(b[dataLen+4:]) + b[dataLen+4+2] ^= byte(s) + b[dataLen+4+3] ^= byte(s >> 8) + switch proto { + case iana.ProtocolICMP: + b[1] = byte(dataLen / 4) + case iana.ProtocolIPv6ICMP: + b[0] = byte(dataLen / 8) + } + } + return b, nil +} + +// parseMultipartMessageBody parses b as either a non-multipart +// message body or a multipart message body. +func parseMultipartMessageBody(proto int, b []byte) ([]byte, []Extension, error) { + var l int + switch proto { + case iana.ProtocolICMP: + l = 4 * int(b[1]) + case iana.ProtocolIPv6ICMP: + l = 8 * int(b[0]) + } + if len(b) == 4 { + return nil, nil, nil + } + exts, l, err := parseExtensions(b[4:], l) + if err != nil { + l = len(b) - 4 + } + data := make([]byte, l) + copy(data, b[4:]) + return data, exts, nil +} diff --git a/vendor/golang.org/x/net/icmp/multipart_test.go b/vendor/golang.org/x/net/icmp/multipart_test.go new file mode 100644 index 0000000..966ccb8 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/multipart_test.go @@ -0,0 +1,442 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "fmt" + "net" + "reflect" + "testing" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +var marshalAndParseMultipartMessageForIPv4Tests = []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 2).To4(), + }, + }, + }, + }, + }, +} + +func TestMarshalAndParseMultipartMessageForIPv4(t *testing.T) { + for i, tt := range marshalAndParseMultipartMessageForIPv4Tests { + b, err := tt.Marshal(nil) + if err != nil { + t.Fatal(err) + } + if b[5] != 32 { + t.Errorf("#%v: got %v; want 32", i, b[5]) + } + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + switch m.Type { + case ipv4.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv4.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv4.ICMPTypeParameterProblem: + got, want := m.Body.(*icmp.ParamProb), tt.Body.(*icmp.ParamProb) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + } + } +} + +var marshalAndParseMultipartMessageForIPv6Tests = []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en102", + }, + }, + }, + }, + }, +} + +func TestMarshalAndParseMultipartMessageForIPv6(t *testing.T) { + pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) + for i, tt := range marshalAndParseMultipartMessageForIPv6Tests { + for _, psh := range [][]byte{pshicmp, nil} { + b, err := tt.Marshal(psh) + if err != nil { + t.Fatal(err) + } + if b[4] != 16 { + t.Errorf("#%v: got %v; want 16", i, b[4]) + } + m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tt.Type || m.Code != tt.Code { + t.Errorf("#%v: got %v; want %v", i, m, &tt) + } + switch m.Type { + case ipv6.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + case ipv6.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + } + } + } + } +} + +func dumpExtensions(i int, gotExts, wantExts []icmp.Extension) string { + var s string + for j, got := range gotExts { + switch got := got.(type) { + case *icmp.MPLSLabelStack: + want := wantExts[j].(*icmp.MPLSLabelStack) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%v/%v: got %#v; want %#v\n", i, j, got, want) + } + case *icmp.InterfaceInfo: + want := wantExts[j].(*icmp.InterfaceInfo) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%v/%v: got %#v, %#v, %#v; want %#v, %#v, %#v\n", i, j, got, got.Interface, got.Addr, want, want.Interface, want.Addr) + } + } + } + return s[:len(s)-1] +} + +var multipartMessageBodyLenTests = []struct { + proto int + in icmp.MessageBody + out int +}{ + { + iana.ProtocolICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // [pointer, unused] and original datagram + }, + + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload, original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 132, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.PacketTooBig{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // mtu and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // pointer and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 127), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 136, // [length, unused], extension header, object header, object payload and original datagram + }, +} + +func TestMultipartMessageBodyLen(t *testing.T) { + for i, tt := range multipartMessageBodyLenTests { + if out := tt.in.Len(tt.proto); out != tt.out { + t.Errorf("#%d: got %d; want %d", i, out, tt.out) + } + } +} diff --git a/vendor/golang.org/x/net/icmp/packettoobig.go b/vendor/golang.org/x/net/icmp/packettoobig.go new file mode 100644 index 0000000..a1c9df7 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/packettoobig.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "encoding/binary" + +// A PacketTooBig represents an ICMP packet too big message body. +type PacketTooBig struct { + MTU int // maximum transmission unit of the nexthop link + Data []byte // data, known as original datagram field +} + +// Len implements the Len method of MessageBody interface. +func (p *PacketTooBig) Len(proto int) int { + if p == nil { + return 0 + } + return 4 + len(p.Data) +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *PacketTooBig) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4+len(p.Data)) + binary.BigEndian.PutUint32(b[:4], uint32(p.MTU)) + copy(b[4:], p.Data) + return b, nil +} + +// parsePacketTooBig parses b as an ICMP packet too big message body. +func parsePacketTooBig(proto int, b []byte) (MessageBody, error) { + bodyLen := len(b) + if bodyLen < 4 { + return nil, errMessageTooShort + } + p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))} + if bodyLen > 4 { + p.Data = make([]byte, bodyLen-4) + copy(p.Data, b[4:]) + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/paramprob.go b/vendor/golang.org/x/net/icmp/paramprob.go new file mode 100644 index 0000000..0a2548d --- /dev/null +++ b/vendor/golang.org/x/net/icmp/paramprob.go @@ -0,0 +1,63 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import ( + "encoding/binary" + "golang.org/x/net/internal/iana" +) + +// A ParamProb represents an ICMP parameter problem message body. +type ParamProb struct { + Pointer uintptr // offset within the data where the error was detected + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *ParamProb) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ParamProb) Marshal(proto int) ([]byte, error) { + if proto == iana.ProtocolIPv6ICMP { + b := make([]byte, p.Len(proto)) + binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer)) + copy(b[4:], p.Data) + return b, nil + } + b, err := marshalMultipartMessageBody(proto, p.Data, p.Extensions) + if err != nil { + return nil, err + } + b[0] = byte(p.Pointer) + return b, nil +} + +// parseParamProb parses b as an ICMP parameter problem message body. +func parseParamProb(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &ParamProb{} + if proto == iana.ProtocolIPv6ICMP { + p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4])) + p.Data = make([]byte, len(b)-4) + copy(p.Data, b[4:]) + return p, nil + } + p.Pointer = uintptr(b[0]) + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/ping_test.go b/vendor/golang.org/x/net/icmp/ping_test.go new file mode 100644 index 0000000..3171dad --- /dev/null +++ b/vendor/golang.org/x/net/icmp/ping_test.go @@ -0,0 +1,200 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "errors" + "fmt" + "net" + "os" + "runtime" + "sync" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +func googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) { + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + return nil, err + } + netaddr := func(ip net.IP) (net.Addr, error) { + switch c.LocalAddr().(type) { + case *net.UDPAddr: + return &net.UDPAddr{IP: ip}, nil + case *net.IPAddr: + return &net.IPAddr{IP: ip}, nil + default: + return nil, errors.New("neither UDPAddr nor IPAddr") + } + } + for _, ip := range ips { + switch protocol { + case iana.ProtocolICMP: + if ip.To4() != nil { + return netaddr(ip) + } + case iana.ProtocolIPv6ICMP: + if ip.To16() != nil && ip.To4() == nil { + return netaddr(ip) + } + } + } + return nil, errors.New("no A or AAAA record") +} + +type pingTest struct { + network, address string + protocol int + mtype icmp.Type +} + +var nonPrivilegedPingTests = []pingTest{ + {"udp4", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, + + {"udp6", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, +} + +func TestNonPrivilegedPing(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + for i, tt := range nonPrivilegedPingTests { + if err := doPing(tt, i); err != nil { + t.Error(err) + } + } +} + +var privilegedPingTests = []pingTest{ + {"ip4:icmp", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, + + {"ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, +} + +func TestPrivilegedPing(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + for i, tt := range privilegedPingTests { + if err := doPing(tt, i); err != nil { + t.Error(err) + } + } +} + +func doPing(tt pingTest, seq int) error { + c, err := icmp.ListenPacket(tt.network, tt.address) + if err != nil { + return err + } + defer c.Close() + + dst, err := googleAddr(c, tt.protocol) + if err != nil { + return err + } + + if tt.network != "udp6" && tt.protocol == iana.ProtocolIPv6ICMP { + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeDestinationUnreachable) + f.Accept(ipv6.ICMPTypePacketTooBig) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeParameterProblem) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := c.IPv6PacketConn().SetICMPFilter(&f); err != nil { + return err + } + } + + wm := icmp.Message{ + Type: tt.mtype, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: 1 << uint(seq), + Data: []byte("HELLO-R-U-THERE"), + }, + } + wb, err := wm.Marshal(nil) + if err != nil { + return err + } + if n, err := c.WriteTo(wb, dst); err != nil { + return err + } else if n != len(wb) { + return fmt.Errorf("got %v; want %v", n, len(wb)) + } + + rb := make([]byte, 1500) + if err := c.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + return err + } + n, peer, err := c.ReadFrom(rb) + if err != nil { + return err + } + rm, err := icmp.ParseMessage(tt.protocol, rb[:n]) + if err != nil { + return err + } + switch rm.Type { + case ipv4.ICMPTypeEchoReply, ipv6.ICMPTypeEchoReply: + return nil + default: + return fmt.Errorf("got %+v from %v; want echo reply", rm, peer) + } +} + +func TestConcurrentNonPrivilegedListenPacket(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + network, address := "udp4", "127.0.0.1" + if !nettest.SupportsIPv4() { + network, address = "udp6", "::1" + } + const N = 1000 + var wg sync.WaitGroup + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + defer wg.Done() + c, err := icmp.ListenPacket(network, address) + if err != nil { + t.Error(err) + return + } + c.Close() + }() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/icmp/sys_freebsd.go b/vendor/golang.org/x/net/icmp/sys_freebsd.go new file mode 100644 index 0000000..c75f3dd --- /dev/null +++ b/vendor/golang.org/x/net/icmp/sys_freebsd.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +import "syscall" + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") +} diff --git a/vendor/golang.org/x/net/icmp/timeexceeded.go b/vendor/golang.org/x/net/icmp/timeexceeded.go new file mode 100644 index 0000000..344e158 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/timeexceeded.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp + +// A TimeExceeded represents an ICMP time exceeded message body. +type TimeExceeded struct { + Data []byte // data, known as original datagram field + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *TimeExceeded) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *TimeExceeded) Marshal(proto int) ([]byte, error) { + return marshalMultipartMessageBody(proto, p.Data, p.Extensions) +} + +// parseTimeExceeded parses b as an ICMP time exceeded message body. +func parseTimeExceeded(proto int, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &TimeExceeded{} + var err error + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + if err != nil { + return nil, err + } + return p, nil +} diff --git a/vendor/golang.org/x/net/idna/example_test.go b/vendor/golang.org/x/net/idna/example_test.go new file mode 100644 index 0000000..948f6eb --- /dev/null +++ b/vendor/golang.org/x/net/idna/example_test.go @@ -0,0 +1,70 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna_test + +import ( + "fmt" + + "golang.org/x/net/idna" +) + +func ExampleProfile() { + // Raw Punycode has no restrictions and does no mappings. + fmt.Println(idna.ToASCII("")) + fmt.Println(idna.ToASCII("*.faß.com")) + fmt.Println(idna.Punycode.ToASCII("*.faß.com")) + + // Rewrite IDN for lookup. This (currently) uses transitional mappings to + // find a balance between IDNA2003 and IDNA2008 compatibility. + fmt.Println(idna.Lookup.ToASCII("")) + fmt.Println(idna.Lookup.ToASCII("www.faß.com")) + + // Convert an IDN to ASCII for registration purposes. This changes the + // encoding, but reports an error if the input was illformed. + fmt.Println(idna.Registration.ToASCII("")) + fmt.Println(idna.Registration.ToASCII("www.faß.com")) + + // Output: + // + // *.xn--fa-hia.com + // *.xn--fa-hia.com + // + // www.fass.com + // idna: invalid label "" + // www.xn--fa-hia.com +} + +func ExampleNew() { + var p *idna.Profile + + // Raw Punycode has no restrictions and does no mappings. + p = idna.New() + fmt.Println(p.ToASCII("*.faß.com")) + + // Do mappings. Note that star is not allowed in a DNS lookup. + p = idna.New( + idna.MapForLookup(), + idna.Transitional(true)) // Map ß -> ss + fmt.Println(p.ToASCII("*.faß.com")) + + // Lookup for registration. Also does not allow '*'. + p = idna.New(idna.ValidateForRegistration()) + fmt.Println(p.ToUnicode("*.faß.com")) + + // Set up a profile maps for lookup, but allows wild cards. + p = idna.New( + idna.MapForLookup(), + idna.Transitional(true), // Map ß -> ss + idna.StrictDomainName(false)) // Set more permissive ASCII rules. + fmt.Println(p.ToASCII("*.faß.com")) + + // Output: + // *.xn--fa-hia.com + // *.fass.com idna: disallowed rune U+002A + // *.faß.com idna: disallowed rune U+002A + // *.fass.com +} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 0000000..346fe44 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,732 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in http://www.unicode.org/reports/tr46. +// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/bidi" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissible ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of an IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see http://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + var isBidi bool + if p.mapping != nil { + s, isBidi, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // TODO: allow for a quick check of the tables data. + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if isBidi && p.bidirule != nil && err == nil { + for labels.reset(); !labels.done(); labels.next() { + if !p.bidirule(labels.label()) { + err = &labelError{s, "B"} + break + } + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { + // TODO: consider first doing a quick check to see if any of these checks + // need to be done. This will make it slower in the general case, but + // faster in the common case. + mapped = norm.NFC.String(s) + isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft + return mapped, isBidi, nil +} + +func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { + // TODO: filter need for normalization in loop below. + if !norm.NFC.IsNormalString(s) { + return s, false, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return s, bidi, runeError(utf8.RuneError) + } + bidi = bidi || info(v).isBidi(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, bidi, runeError(r) + } + i += sz + } + return s, bidi, nil +} + +func (c info) isBidi(s string) bool { + if !c.isMapped() { + return c&attributesMask == rtl + } + // TODO: also store bidi info for mapped data. This is possible, but a bit + // cumbersome and not for the common case. + p, _ := bidi.LookupString(s) + switch p.Class() { + case bidi.R, bidi.AL, bidi.AN: + return true + } + return false +} + +func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { + var ( + b []byte + k int + ) + // combinedInfoBits contains the or-ed bits of all runes. We use this + // to derive the mayNeedNorm bit later. This may trigger normalization + // overeagerly, but it will not do so in the common case. The end result + // is another 10% saving on BenchmarkProfile for the common case. + var combinedInfoBits info + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + b = append(b, s[k:i]...) + b = append(b, "\ufffd"...) + k = len(s) + if err == nil { + err = runeError(utf8.RuneError) + } + break + } + combinedInfoBits |= info(v) + bidi = bidi || info(v).isBidi(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + if combinedInfoBits&mayNeedNorm != 0 { + s = norm.NFC.String(s) + } + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, bidi, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + // TODO: detect whether string may have to be normalized in the following + // loop. + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return runeError(utf8.RuneError) + } + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) (err error) { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/idna_test.go b/vendor/golang.org/x/net/idna/idna_test.go new file mode 100644 index 0000000..0b067ca --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna_test.go @@ -0,0 +1,108 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +import ( + "testing" +) + +var idnaTestCases = [...]struct { + ascii, unicode string +}{ + // Labels. + {"books", "books"}, + {"xn--bcher-kva", "bücher"}, + + // Domains. + {"foo--xn--bar.org", "foo--xn--bar.org"}, + {"golang.org", "golang.org"}, + {"example.xn--p1ai", "example.рф"}, + {"xn--czrw28b.tw", "商業.tw"}, + {"www.xn--mller-kva.de", "www.müller.de"}, +} + +func TestIDNA(t *testing.T) { + for _, tc := range idnaTestCases { + if a, err := ToASCII(tc.unicode); err != nil { + t.Errorf("ToASCII(%q): %v", tc.unicode, err) + } else if a != tc.ascii { + t.Errorf("ToASCII(%q): got %q, want %q", tc.unicode, a, tc.ascii) + } + + if u, err := ToUnicode(tc.ascii); err != nil { + t.Errorf("ToUnicode(%q): %v", tc.ascii, err) + } else if u != tc.unicode { + t.Errorf("ToUnicode(%q): got %q, want %q", tc.ascii, u, tc.unicode) + } + } +} + +func TestIDNASeparators(t *testing.T) { + type subCase struct { + unicode string + wantASCII string + wantErr bool + } + + testCases := []struct { + name string + profile *Profile + subCases []subCase + }{ + { + name: "Punycode", profile: Punycode, + subCases: []subCase{ + {"example\u3002jp", "xn--examplejp-ck3h", false}, + {"東京\uFF0Ejp", "xn--jp-l92cn98g071o", false}, + {"大阪\uFF61jp", "xn--jp-ku9cz72u463f", false}, + }, + }, + { + name: "Lookup", profile: Lookup, + subCases: []subCase{ + {"example\u3002jp", "example.jp", false}, + {"東京\uFF0Ejp", "xn--1lqs71d.jp", false}, + {"大阪\uFF61jp", "xn--pssu33l.jp", false}, + }, + }, + { + name: "Display", profile: Display, + subCases: []subCase{ + {"example\u3002jp", "example.jp", false}, + {"東京\uFF0Ejp", "xn--1lqs71d.jp", false}, + {"大阪\uFF61jp", "xn--pssu33l.jp", false}, + }, + }, + { + name: "Registration", profile: Registration, + subCases: []subCase{ + {"example\u3002jp", "", true}, + {"東京\uFF0Ejp", "", true}, + {"大阪\uFF61jp", "", true}, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + for _, c := range tc.subCases { + gotA, err := tc.profile.ToASCII(c.unicode) + if c.wantErr { + if err == nil { + t.Errorf("ToASCII(%q): got no error, but an error expected", c.unicode) + } + } else { + if err != nil { + t.Errorf("ToASCII(%q): got err=%v, but no error expected", c.unicode, err) + } else if gotA != c.wantASCII { + t.Errorf("ToASCII(%q): got %q, want %q", c.unicode, gotA, c.wantASCII) + } + } + } + }) + } +} + +// TODO(nigeltao): test errors, once we've specified when ToASCII and ToUnicode +// return errors. diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 0000000..02c7d59 --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,203 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +func punyError(s string) error { return &labelError{s, "A3"} } + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", punyError(encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", punyError(encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", punyError(encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", punyError(encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", punyError(encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", punyError(encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", punyError(s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", punyError(s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/idna/punycode_test.go b/vendor/golang.org/x/net/idna/punycode_test.go new file mode 100644 index 0000000..bfec81d --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode_test.go @@ -0,0 +1,198 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +import ( + "strings" + "testing" +) + +var punycodeTestCases = [...]struct { + s, encoded string +}{ + {"", ""}, + {"-", "--"}, + {"-a", "-a-"}, + {"-a-", "-a--"}, + {"a", "a-"}, + {"a-", "a--"}, + {"a-b", "a-b-"}, + {"books", "books-"}, + {"bücher", "bcher-kva"}, + {"Hello世界", "Hello-ck1hg65u"}, + {"ü", "tda"}, + {"üý", "tdac"}, + + // The test cases below come from RFC 3492 section 7.1 with Errata 3026. + { + // (A) Arabic (Egyptian). + "\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" + + "\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F", + "egbpdaj6bu4bxfgehfvwxn", + }, + { + // (B) Chinese (simplified). + "\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587", + "ihqwcrb4cv8a8dqg056pqjye", + }, + { + // (C) Chinese (traditional). + "\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587", + "ihqwctvzc91f659drss3x8bo0yb", + }, + { + // (D) Czech. + "\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" + + "\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" + + "\u0065\u0073\u006B\u0079", + "Proprostnemluvesky-uyb24dma41a", + }, + { + // (E) Hebrew. + "\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" + + "\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" + + "\u05D1\u05E8\u05D9\u05EA", + "4dbcagdahymbxekheh6e0a7fei0b", + }, + { + // (F) Hindi (Devanagari). + "\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" + + "\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" + + "\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" + + "\u0939\u0948\u0902", + "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd", + }, + { + // (G) Japanese (kanji and hiragana). + "\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" + + "\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B", + "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa", + }, + { + // (H) Korean (Hangul syllables). + "\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" + + "\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" + + "\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C", + "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" + + "psd879ccm6fea98c", + }, + { + // (I) Russian (Cyrillic). + "\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" + + "\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" + + "\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" + + "\u0438", + "b1abfaaepdrnnbgefbadotcwatmq2g4l", + }, + { + // (J) Spanish. + "\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" + + "\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" + + "\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" + + "\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" + + "\u0061\u00F1\u006F\u006C", + "PorqunopuedensimplementehablarenEspaol-fmd56a", + }, + { + // (K) Vietnamese. + "\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" + + "\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" + + "\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" + + "\u0056\u0069\u1EC7\u0074", + "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g", + }, + { + // (L) 3B. + "\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F", + "3B-ww4c5e180e575a65lsy2b", + }, + { + // (M) -with-SUPER-MONKEYS. + "\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" + + "\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" + + "\u004F\u004E\u004B\u0045\u0059\u0053", + "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n", + }, + { + // (N) Hello-Another-Way-. + "\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" + + "\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" + + "\u305D\u308C\u305E\u308C\u306E\u5834\u6240", + "Hello-Another-Way--fc4qua05auwb3674vfr0b", + }, + { + // (O) 2. + "\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032", + "2-u9tlzr9756bt3uc0v", + }, + { + // (P) MajiKoi5 + "\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" + + "\u308B\u0035\u79D2\u524D", + "MajiKoi5-783gue6qz075azm5e", + }, + { + // (Q) de + "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", + "de-jg4avhby1noc0d", + }, + { + // (R) + "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", + "d9juau41awczczp", + }, + { + // (S) -> $1.00 <- + "\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" + + "\u003C\u002D", + "-> $1.00 <--", + }, +} + +func TestPunycode(t *testing.T) { + for _, tc := range punycodeTestCases { + if got, err := decode(tc.encoded); err != nil { + t.Errorf("decode(%q): %v", tc.encoded, err) + } else if got != tc.s { + t.Errorf("decode(%q): got %q, want %q", tc.encoded, got, tc.s) + } + + if got, err := encode("", tc.s); err != nil { + t.Errorf(`encode("", %q): %v`, tc.s, err) + } else if got != tc.encoded { + t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded) + } + } +} + +var punycodeErrorTestCases = [...]string{ + "decode -", // A sole '-' is invalid. + "decode foo\x00bar", // '\x00' is not in [0-9A-Za-z]. + "decode foo#bar", // '#' is not in [0-9A-Za-z]. + "decode foo\u00A3bar", // '\u00A3' is not in [0-9A-Za-z]. + "decode 9", // "9a" decodes to codepoint \u00A3; "9" is truncated. + "decode 99999a", // "99999a" decodes to codepoint \U0048A3C1, which is > \U0010FFFF. + "decode 9999999999a", // "9999999999a" overflows the int32 calculation. + + "encode " + strings.Repeat("x", 65536) + "\uff00", // int32 overflow. +} + +func TestPunycodeErrors(t *testing.T) { + for _, tc := range punycodeErrorTestCases { + var err error + switch { + case strings.HasPrefix(tc, "decode "): + _, err = decode(tc[7:]) + case strings.HasPrefix(tc, "encode "): + _, err = encode("", tc[7:]) + } + if err == nil { + if len(tc) > 256 { + tc = tc[:100] + "..." + tc[len(tc)-100:] + } + t.Errorf("no error for %s", tc) + } + } +} diff --git a/vendor/golang.org/x/net/idna/tables.go b/vendor/golang.org/x/net/idna/tables.go new file mode 100644 index 0000000..f910b26 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables.go @@ -0,0 +1,4557 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +var mappings string = "" + // Size: 8176 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, + 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, + 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, + 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, + 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, + 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 264 entries, 528 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} + +// idnaSparseValues: 1915 entries, 7660 bytes +var idnaSparseValues = [1915]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8a + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x93 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa3 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xc9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xda + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xeb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x109 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x110 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11b + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x138 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x142 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x144 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x149 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x14f + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x151 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x168 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x170 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x176 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x181 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x186 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x193 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x198 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a4 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1ae + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b4 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c5 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1cf + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x34, offset 0x1d2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1da + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1dd + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1ea + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f6 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x205 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x215 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x221 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x223 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22d + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x239 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x245 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x251 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x259 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25e + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x268 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x279 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28c + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x295 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ab + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2af + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b5 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2b9 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2bd + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x2c3 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x54, offset 0x2ca + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2d0 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d8 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2df + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0x5b, offset 0x2fb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x301 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x305 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x307 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x30a + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x30c + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30f + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x319 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x31c + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32f + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x334 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x67, offset 0x337 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x33b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x340 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x345 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x34b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x351 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x360 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x366 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x36a + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x70, offset 0x379 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37e + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x386 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x39b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a3 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b4 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3bd + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3cd + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3da + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e4 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e9 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fa + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3ff + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x401 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x405 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x407 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x40b + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x414 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x41a + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41e + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x438 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x440 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x446 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44d + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x452 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x456 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x45c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x461 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x46a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x475 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x47c + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x483 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x48a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x493 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x496 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x49b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a7 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4b2 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b9 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c6 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4ca + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4da + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e9 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4f0 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f5 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f8 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4fc + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xaa, offset 0x500 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x50f + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x51b + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xae, offset 0x522 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xaf, offset 0x52b + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x533 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x53a + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb2, offset 0x548 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x555 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb4, offset 0x562 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb5, offset 0x56b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x56f + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x585 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xb9, offset 0x590 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x599 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbb, offset 0x59f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5a7 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbd, offset 0x5b0 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xbe, offset 0x5ba + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xbf, offset 0x5bd + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc1, offset 0x5cc + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5d1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc4, offset 0x5e7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc5, offset 0x5f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc6, offset 0x5f6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x600 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc8, offset 0x609 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc9, offset 0x615 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x622 + {value: 0x0000, lo: 0x07}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcb, offset 0x62a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcc, offset 0x62d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xcd, offset 0x632 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xce, offset 0x635 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcf, offset 0x638 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd0, offset 0x63b + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd1, offset 0x642 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd2, offset 0x649 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd3, offset 0x64d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd4, offset 0x658 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd5, offset 0x65b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x661 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd7, offset 0x666 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xd8, offset 0x66a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd9, offset 0x66d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xda, offset 0x670 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xdb, offset 0x673 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xdc, offset 0x676 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xdd, offset 0x679 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xde, offset 0x67e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xdf, offset 0x688 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe0, offset 0x68b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe1, offset 0x68f + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x69e + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xe3, offset 0x6aa + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xe4, offset 0x6ae + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe5, offset 0x6b3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe7, offset 0x6bc + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6c1 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6ca + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xea, offset 0x6d5 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xeb, offset 0x6db + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xec, offset 0x6e3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xed, offset 0x6e7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xee, offset 0x6eb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xef, offset 0x6f1 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xf0, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xf1, offset 0x6fc + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xf2, offset 0x6ff + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xf3, offset 0x70f + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf4, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf5, offset 0x719 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf6, offset 0x71c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf7, offset 0x720 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf8, offset 0x726 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf9, offset 0x72b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfa, offset 0x730 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x735 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xbf}, + // Block 0xfc, offset 0x738 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0xfd, offset 0x73d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xfe, offset 0x740 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xff, offset 0x743 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x100, offset 0x747 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x101, offset 0x74b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x102, offset 0x74e + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x103, offset 0x75e + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x104, offset 0x76f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x105, offset 0x774 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x106, offset 0x776 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x107, offset 0x778 + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E diff --git a/vendor/golang.org/x/net/idna/trie.go b/vendor/golang.org/x/net/idna/trie.go new file mode 100644 index 0000000..c4ef847 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trie.go @@ -0,0 +1,72 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} + +// Sparse block handling code. + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var idnaSparse = sparseBlocks{ + values: idnaSparseValues[:], + offset: idnaSparseOffset[:], +} + +// Don't use newIdnaTrie to avoid unconditional linking in of the table. +var trie = &idnaTrie{} + +// lookup determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go new file mode 100644 index 0000000..7a8cf88 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -0,0 +1,119 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// This file contains definitions for interpreting the trie value of the idna +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds information from the IDNA mapping table for a single rune. It is +// the value returned by a trie lookup. In most cases, all information fits in +// a 16-bit value. For mappings, this value may contain an index into a slice +// with the mapped string. Such mappings can consist of the actual mapped value +// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the +// input rune. This technique is used by the cases packages and reduces the +// table size significantly. +// +// The per-rune values have the following format: +// +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + catSmallMask = 0x3 + catBigMask = 0xF8 + indexShift = 3 + xorBit = 0x4 // interpret the index as an xor pattern + inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. + + joinShift = 8 + joinMask = 0x07 + + // Attributes + attributesMask = 0x1800 + viramaModifier = 0x1800 + modifier = 0x1000 + rtl = 0x0800 + + mayNeedNorm = 0x2000 +) + +// A category corresponds to a category defined in the IDNA mapping table. +type category uint16 + +const ( + unknown category = 0 // not currently defined in unicode. + mapped category = 1 + disallowedSTD3Mapped category = 2 + deviation category = 3 +) + +const ( + valid category = 0x08 + validNV8 category = 0x18 + validXV8 category = 0x28 + disallowed category = 0x40 + disallowedSTD3Valid category = 0x80 + ignored category = 0xC0 +) + +// join types and additional rune information +const ( + joiningL = (iota + 1) + joiningD + joiningT + joiningR + + //the following types are derived during processing + joinZWJ + joinZWNJ + joinVirama + numJoinTypes +) + +func (c info) isMapped() bool { + return c&0x3 != 0 +} + +func (c info) category() category { + small := c & catSmallMask + if small != 0 { + return category(small) + } + return category(c & catBigMask) +} + +func (c info) joinType() info { + if c.isMapped() { + return 0 + } + return (c >> joinShift) & joinMask +} + +func (c info) isModifier() bool { + return c&(modifier|catSmallMask) == modifier +} + +func (c info) isViramaModifier() bool { + return c&(attributesMask|catSmallMask) == viramaModifier +} diff --git a/vendor/golang.org/x/net/internal/iana/const.go b/vendor/golang.org/x/net/internal/iana/const.go new file mode 100644 index 0000000..c9df24d --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/const.go @@ -0,0 +1,180 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). +package iana // import "golang.org/x/net/internal/iana" + +// Differentiated Services Field Codepoints (DSCP), Updated: 2017-05-12 +const ( + DiffServCS0 = 0x0 // CS0 + DiffServCS1 = 0x20 // CS1 + DiffServCS2 = 0x40 // CS2 + DiffServCS3 = 0x60 // CS3 + DiffServCS4 = 0x80 // CS4 + DiffServCS5 = 0xa0 // CS5 + DiffServCS6 = 0xc0 // CS6 + DiffServCS7 = 0xe0 // CS7 + DiffServAF11 = 0x28 // AF11 + DiffServAF12 = 0x30 // AF12 + DiffServAF13 = 0x38 // AF13 + DiffServAF21 = 0x48 // AF21 + DiffServAF22 = 0x50 // AF22 + DiffServAF23 = 0x58 // AF23 + DiffServAF31 = 0x68 // AF31 + DiffServAF32 = 0x70 // AF32 + DiffServAF33 = 0x78 // AF33 + DiffServAF41 = 0x88 // AF41 + DiffServAF42 = 0x90 // AF42 + DiffServAF43 = 0x98 // AF43 + DiffServEF = 0xb8 // EF + DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT +) + +// IPv4 TOS Byte and IPv6 Traffic Class Octet, Updated: 2001-09-06 +const ( + NotECNTransport = 0x0 // Not-ECT (Not ECN-Capable Transport) + ECNTransport1 = 0x1 // ECT(1) (ECN-Capable Transport(1)) + ECNTransport0 = 0x2 // ECT(0) (ECN-Capable Transport(0)) + CongestionExperienced = 0x3 // CE (Congestion Experienced) +) + +// Protocol Numbers, Updated: 2016-06-22 +const ( + ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number + ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option + ProtocolICMP = 1 // Internet Control Message + ProtocolIGMP = 2 // Internet Group Management + ProtocolGGP = 3 // Gateway-to-Gateway + ProtocolIPv4 = 4 // IPv4 encapsulation + ProtocolST = 5 // Stream + ProtocolTCP = 6 // Transmission Control + ProtocolCBT = 7 // CBT + ProtocolEGP = 8 // Exterior Gateway Protocol + ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP) + ProtocolBBNRCCMON = 10 // BBN RCC Monitoring + ProtocolNVPII = 11 // Network Voice Protocol + ProtocolPUP = 12 // PUP + ProtocolEMCON = 14 // EMCON + ProtocolXNET = 15 // Cross Net Debugger + ProtocolCHAOS = 16 // Chaos + ProtocolUDP = 17 // User Datagram + ProtocolMUX = 18 // Multiplexing + ProtocolDCNMEAS = 19 // DCN Measurement Subsystems + ProtocolHMP = 20 // Host Monitoring + ProtocolPRM = 21 // Packet Radio Measurement + ProtocolXNSIDP = 22 // XEROX NS IDP + ProtocolTRUNK1 = 23 // Trunk-1 + ProtocolTRUNK2 = 24 // Trunk-2 + ProtocolLEAF1 = 25 // Leaf-1 + ProtocolLEAF2 = 26 // Leaf-2 + ProtocolRDP = 27 // Reliable Data Protocol + ProtocolIRTP = 28 // Internet Reliable Transaction + ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4 + ProtocolNETBLT = 30 // Bulk Data Transfer Protocol + ProtocolMFENSP = 31 // MFE Network Services Protocol + ProtocolMERITINP = 32 // MERIT Internodal Protocol + ProtocolDCCP = 33 // Datagram Congestion Control Protocol + Protocol3PC = 34 // Third Party Connect Protocol + ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol + ProtocolXTP = 36 // XTP + ProtocolDDP = 37 // Datagram Delivery Protocol + ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto + ProtocolTPPP = 39 // TP++ Transport Protocol + ProtocolIL = 40 // IL Transport Protocol + ProtocolIPv6 = 41 // IPv6 encapsulation + ProtocolSDRP = 42 // Source Demand Routing Protocol + ProtocolIPv6Route = 43 // Routing Header for IPv6 + ProtocolIPv6Frag = 44 // Fragment Header for IPv6 + ProtocolIDRP = 45 // Inter-Domain Routing Protocol + ProtocolRSVP = 46 // Reservation Protocol + ProtocolGRE = 47 // Generic Routing Encapsulation + ProtocolDSR = 48 // Dynamic Source Routing Protocol + ProtocolBNA = 49 // BNA + ProtocolESP = 50 // Encap Security Payload + ProtocolAH = 51 // Authentication Header + ProtocolINLSP = 52 // Integrated Net Layer Security TUBA + ProtocolNARP = 54 // NBMA Address Resolution Protocol + ProtocolMOBILE = 55 // IP Mobility + ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management + ProtocolSKIP = 57 // SKIP + ProtocolIPv6ICMP = 58 // ICMP for IPv6 + ProtocolIPv6NoNxt = 59 // No Next Header for IPv6 + ProtocolIPv6Opts = 60 // Destination Options for IPv6 + ProtocolCFTP = 62 // CFTP + ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK + ProtocolKRYPTOLAN = 65 // Kryptolan + ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol + ProtocolIPPC = 67 // Internet Pluribus Packet Core + ProtocolSATMON = 69 // SATNET Monitoring + ProtocolVISA = 70 // VISA Protocol + ProtocolIPCV = 71 // Internet Packet Core Utility + ProtocolCPNX = 72 // Computer Protocol Network Executive + ProtocolCPHB = 73 // Computer Protocol Heart Beat + ProtocolWSN = 74 // Wang Span Network + ProtocolPVP = 75 // Packet Video Protocol + ProtocolBRSATMON = 76 // Backroom SATNET Monitoring + ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary + ProtocolWBMON = 78 // WIDEBAND Monitoring + ProtocolWBEXPAK = 79 // WIDEBAND EXPAK + ProtocolISOIP = 80 // ISO Internet Protocol + ProtocolVMTP = 81 // VMTP + ProtocolSECUREVMTP = 82 // SECURE-VMTP + ProtocolVINES = 83 // VINES + ProtocolTTP = 84 // Transaction Transport Protocol + ProtocolIPTM = 84 // Internet Protocol Traffic Manager + ProtocolNSFNETIGP = 85 // NSFNET-IGP + ProtocolDGP = 86 // Dissimilar Gateway Protocol + ProtocolTCF = 87 // TCF + ProtocolEIGRP = 88 // EIGRP + ProtocolOSPFIGP = 89 // OSPFIGP + ProtocolSpriteRPC = 90 // Sprite RPC Protocol + ProtocolLARP = 91 // Locus Address Resolution Protocol + ProtocolMTP = 92 // Multicast Transport Protocol + ProtocolAX25 = 93 // AX.25 Frames + ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol + ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro. + ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation + ProtocolENCAP = 98 // Encapsulation Header + ProtocolGMTP = 100 // GMTP + ProtocolIFMP = 101 // Ipsilon Flow Management Protocol + ProtocolPNNI = 102 // PNNI over IP + ProtocolPIM = 103 // Protocol Independent Multicast + ProtocolARIS = 104 // ARIS + ProtocolSCPS = 105 // SCPS + ProtocolQNX = 106 // QNX + ProtocolAN = 107 // Active Networks + ProtocolIPComp = 108 // IP Payload Compression Protocol + ProtocolSNP = 109 // Sitara Networks Protocol + ProtocolCompaqPeer = 110 // Compaq Peer Protocol + ProtocolIPXinIP = 111 // IPX in IP + ProtocolVRRP = 112 // Virtual Router Redundancy Protocol + ProtocolPGM = 113 // PGM Reliable Transport Protocol + ProtocolL2TP = 115 // Layer Two Tunneling Protocol + ProtocolDDX = 116 // D-II Data Exchange (DDX) + ProtocolIATP = 117 // Interactive Agent Transfer Protocol + ProtocolSTP = 118 // Schedule Transfer Protocol + ProtocolSRP = 119 // SpectraLink Radio Protocol + ProtocolUTI = 120 // UTI + ProtocolSMP = 121 // Simple Message Protocol + ProtocolPTP = 123 // Performance Transparency Protocol + ProtocolISIS = 124 // ISIS over IPv4 + ProtocolFIRE = 125 // FIRE + ProtocolCRTP = 126 // Combat Radio Transport Protocol + ProtocolCRUDP = 127 // Combat Radio User Datagram + ProtocolSSCOPMCE = 128 // SSCOPMCE + ProtocolIPLT = 129 // IPLT + ProtocolSPS = 130 // Secure Packet Shield + ProtocolPIPE = 131 // Private IP Encapsulation within IP + ProtocolSCTP = 132 // Stream Control Transmission Protocol + ProtocolFC = 133 // Fibre Channel + ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE + ProtocolMobilityHeader = 135 // Mobility Header + ProtocolUDPLite = 136 // UDPLite + ProtocolMPLSinIP = 137 // MPLS-in-IP + ProtocolMANET = 138 // MANET Protocols + ProtocolHIP = 139 // Host Identity Protocol + ProtocolShim6 = 140 // Shim6 Protocol + ProtocolWESP = 141 // Wrapped Encapsulating Security Payload + ProtocolROHC = 142 // Robust Header Compression + ProtocolReserved = 255 // Reserved +) diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go new file mode 100644 index 0000000..86c78b3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/gen.go @@ -0,0 +1,293 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates internet protocol constants and tables by +// reading IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" +) + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "http://www.iana.org/assignments/dscp-registry/dscp-registry.xml", + parseDSCPRegistry, + }, + { + "http://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml", + parseTOSTCByte, + }, + { + "http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", + parseProtocolNumbers, + }, +} + +func main() { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") + fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url) + os.Exit(1) + } + if err := r.parse(&bb, resp.Body); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile("const.go", b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func parseDSCPRegistry(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var dr dscpRegistry + if err := dec.Decode(&dr); err != nil { + return err + } + drs := dr.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated) + fmt.Fprintf(w, "const (\n") + for _, dr := range drs { + fmt.Fprintf(w, "DiffServ%s = %#x", dr.Name, dr.Value) + fmt.Fprintf(w, "// %s\n", dr.OrigName) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type dscpRegistry struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + PoolRecords []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>record"` + Records []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>registry>record"` +} + +type canonDSCPRecord struct { + OrigName string + Name string + Value int +} + +func (drr *dscpRegistry) escape() []canonDSCPRecord { + drs := make([]canonDSCPRecord, len(drr.Records)) + sr := strings.NewReplacer( + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, dr := range drr.Records { + s := strings.TrimSpace(dr.Name) + drs[i].OrigName = s + drs[i].Name = sr.Replace(s) + n, err := strconv.ParseUint(dr.Space, 2, 8) + if err != nil { + continue + } + drs[i].Value = int(n) << 2 + } + return drs +} + +func parseTOSTCByte(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var ttb tosTCByte + if err := dec.Decode(&ttb); err != nil { + return err + } + trs := ttb.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", ttb.Title, ttb.Updated) + fmt.Fprintf(w, "const (\n") + for _, tr := range trs { + fmt.Fprintf(w, "%s = %#x", tr.Keyword, tr.Value) + fmt.Fprintf(w, "// %s\n", tr.OrigKeyword) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type tosTCByte struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + Records []struct { + Binary string `xml:"binary"` + Keyword string `xml:"keyword"` + } `xml:"registry>record"` +} + +type canonTOSTCByteRecord struct { + OrigKeyword string + Keyword string + Value int +} + +func (ttb *tosTCByte) escape() []canonTOSTCByteRecord { + trs := make([]canonTOSTCByteRecord, len(ttb.Records)) + sr := strings.NewReplacer( + "Capable", "", + "(", "", + ")", "", + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, tr := range ttb.Records { + s := strings.TrimSpace(tr.Keyword) + trs[i].OrigKeyword = s + ss := strings.Split(s, " ") + if len(ss) > 1 { + trs[i].Keyword = strings.Join(ss[1:], " ") + } else { + trs[i].Keyword = ss[0] + } + trs[i].Keyword = sr.Replace(trs[i].Keyword) + n, err := strconv.ParseUint(tr.Binary, 2, 8) + if err != nil { + continue + } + trs[i].Value = int(n) + } + return trs +} + +func parseProtocolNumbers(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var pn protocolNumbers + if err := dec.Decode(&pn); err != nil { + return err + } + prs := pn.escape() + prs = append([]canonProtocolRecord{{ + Name: "IP", + Descr: "IPv4 encapsulation, pseudo protocol number", + Value: 0, + }}, prs...) + fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value) + s := pr.Descr + if s == "" { + s = pr.OrigName + } + fmt.Fprintf(w, "// %s\n", s) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type protocolNumbers struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + RegTitle string `xml:"registry>title"` + Note string `xml:"registry>note"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + Descr string `xml:"description"` + } `xml:"registry>record"` +} + +type canonProtocolRecord struct { + OrigName string + Name string + Descr string + Value int +} + +func (pn *protocolNumbers) escape() []canonProtocolRecord { + prs := make([]canonProtocolRecord, len(pn.Records)) + sr := strings.NewReplacer( + "-in-", "in", + "-within-", "within", + "-over-", "over", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range pn.Records { + if strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "deprecated") { + continue + } + prs[i].OrigName = pr.Name + s := strings.TrimSpace(pr.Name) + switch pr.Name { + case "ISIS over IPv4": + prs[i].Name = "ISIS" + case "manet": + prs[i].Name = "MANET" + default: + prs[i].Name = sr.Replace(s) + } + ss := strings.Split(pr.Descr, "\n") + for i := range ss { + ss[i] = strings.TrimSpace(ss[i]) + } + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_bsd.go b/vendor/golang.org/x/net/internal/nettest/helper_bsd.go new file mode 100644 index 0000000..a6e433b --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_bsd.go @@ -0,0 +1,53 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package nettest + +import ( + "runtime" + "strconv" + "strings" + "syscall" +) + +var darwinVersion int + +func init() { + if runtime.GOOS == "darwin" { + // See http://support.apple.com/kb/HT1633. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + darwinVersion, _ = strconv.Atoi(ss[0]) + } +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + switch runtime.GOOS { + case "freebsd": + // See http://www.freebsd.org/cgi/query-pr.cgi?pr=180065. + // Even after the fix, it looks like the latest + // kernels don't deliver link-local scoped multicast + // packets correctly. + return false + case "darwin": + return !causesIPv6Crash() + default: + return true + } +} + +func causesIPv6Crash() bool { + // We see some kernel crash when running IPv6 with IP-level + // options on Darwin kernel version 12 or below. + // See golang.org/issues/17015. + return darwinVersion < 13 +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go b/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go new file mode 100644 index 0000000..bc7da5e --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_nobsd.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux solaris + +package nettest + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return true +} + +func causesIPv6Crash() bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_posix.go b/vendor/golang.org/x/net/internal/nettest/helper_posix.go new file mode 100644 index 0000000..963ed99 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_posix.go @@ -0,0 +1,31 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package nettest + +import ( + "os" + "syscall" +) + +func protocolNotSupported(err error) bool { + switch err := err.(type) { + case syscall.Errno: + switch err { + case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: + return true + } + case *os.SyscallError: + switch err := err.Err.(type) { + case syscall.Errno: + switch err { + case syscall.EPROTONOSUPPORT, syscall.ENOPROTOOPT: + return true + } + } + } + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_stub.go b/vendor/golang.org/x/net/internal/nettest/helper_stub.go new file mode 100644 index 0000000..ea61b6f --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_stub.go @@ -0,0 +1,32 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package nettest + +import ( + "fmt" + "runtime" +) + +func maxOpenFiles() int { + return defaultMaxOpenFiles +} + +func supportsRawIPSocket() (string, bool) { + return fmt.Sprintf("not supported on %s", runtime.GOOS), false +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return false +} + +func causesIPv6Crash() bool { + return false +} + +func protocolNotSupported(err error) bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_unix.go b/vendor/golang.org/x/net/internal/nettest/helper_unix.go new file mode 100644 index 0000000..ed13e44 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_unix.go @@ -0,0 +1,29 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package nettest + +import ( + "fmt" + "os" + "runtime" + "syscall" +) + +func maxOpenFiles() int { + var rlim syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil { + return defaultMaxOpenFiles + } + return int(rlim.Cur) +} + +func supportsRawIPSocket() (string, bool) { + if os.Getuid() != 0 { + return fmt.Sprintf("must be root on %s", runtime.GOOS), false + } + return "", true +} diff --git a/vendor/golang.org/x/net/internal/nettest/helper_windows.go b/vendor/golang.org/x/net/internal/nettest/helper_windows.go new file mode 100644 index 0000000..3dcb727 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/helper_windows.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import ( + "fmt" + "runtime" + "syscall" +) + +func maxOpenFiles() int { + return 4 * defaultMaxOpenFiles /* actually it's 16581375 */ +} + +func supportsRawIPSocket() (string, bool) { + // From http://msdn.microsoft.com/en-us/library/windows/desktop/ms740548.aspx: + // Note: To use a socket of type SOCK_RAW requires administrative privileges. + // Users running Winsock applications that use raw sockets must be a member of + // the Administrators group on the local computer, otherwise raw socket calls + // will fail with an error code of WSAEACCES. On Windows Vista and later, access + // for raw sockets is enforced at socket creation. In earlier versions of Windows, + // access for raw sockets is enforced during other socket operations. + s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, 0) + if err == syscall.WSAEACCES { + return fmt.Sprintf("no access to raw socket allowed on %s", runtime.GOOS), false + } + if err != nil { + return err.Error(), false + } + syscall.Closesocket(s) + return "", true +} + +func supportsIPv6MulticastDeliveryOnLoopback() bool { + return true +} + +func causesIPv6Crash() bool { + return false +} diff --git a/vendor/golang.org/x/net/internal/nettest/interface.go b/vendor/golang.org/x/net/internal/nettest/interface.go new file mode 100644 index 0000000..8e6333a --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/interface.go @@ -0,0 +1,94 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import "net" + +// IsMulticastCapable reports whether ifi is an IP multicast-capable +// network interface. Network must be "ip", "ip4" or "ip6". +func IsMulticastCapable(network string, ifi *net.Interface) (net.IP, bool) { + switch network { + case "ip", "ip4", "ip6": + default: + return nil, false + } + if ifi == nil || ifi.Flags&net.FlagUp == 0 || ifi.Flags&net.FlagMulticast == 0 { + return nil, false + } + return hasRoutableIP(network, ifi) +} + +// RoutedInterface returns a network interface that can route IP +// traffic and satisfies flags. It returns nil when an appropriate +// network interface is not found. Network must be "ip", "ip4" or +// "ip6". +func RoutedInterface(network string, flags net.Flags) *net.Interface { + switch network { + case "ip", "ip4", "ip6": + default: + return nil + } + ift, err := net.Interfaces() + if err != nil { + return nil + } + for _, ifi := range ift { + if ifi.Flags&flags != flags { + continue + } + if _, ok := hasRoutableIP(network, &ifi); !ok { + continue + } + return &ifi + } + return nil +} + +func hasRoutableIP(network string, ifi *net.Interface) (net.IP, bool) { + ifat, err := ifi.Addrs() + if err != nil { + return nil, false + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := routableIP(network, ifa.IP); ip != nil { + return ip, true + } + case *net.IPNet: + if ip := routableIP(network, ifa.IP); ip != nil { + return ip, true + } + } + } + return nil, false +} + +func routableIP(network string, ip net.IP) net.IP { + if !ip.IsLoopback() && !ip.IsLinkLocalUnicast() && !ip.IsGlobalUnicast() { + return nil + } + switch network { + case "ip4": + if ip := ip.To4(); ip != nil { + return ip + } + case "ip6": + if ip.IsLoopback() { // addressing scope of the loopback address depends on each implementation + return nil + } + if ip := ip.To16(); ip != nil && ip.To4() == nil { + return ip + } + default: + if ip := ip.To4(); ip != nil { + return ip + } + if ip := ip.To16(); ip != nil { + return ip + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/nettest/rlimit.go b/vendor/golang.org/x/net/internal/nettest/rlimit.go new file mode 100644 index 0000000..bb34aec --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/rlimit.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +const defaultMaxOpenFiles = 256 + +// MaxOpenFiles returns the maximum number of open files for the +// caller's process. +func MaxOpenFiles() int { return maxOpenFiles() } diff --git a/vendor/golang.org/x/net/internal/nettest/stack.go b/vendor/golang.org/x/net/internal/nettest/stack.go new file mode 100644 index 0000000..06f4e09 --- /dev/null +++ b/vendor/golang.org/x/net/internal/nettest/stack.go @@ -0,0 +1,152 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nettest provides utilities for network testing. +package nettest // import "golang.org/x/net/internal/nettest" + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "runtime" +) + +var ( + supportsIPv4 bool + supportsIPv6 bool +) + +func init() { + if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { + ln.Close() + supportsIPv4 = true + } + if ln, err := net.Listen("tcp6", "[::1]:0"); err == nil { + ln.Close() + supportsIPv6 = true + } +} + +// SupportsIPv4 reports whether the platform supports IPv4 networking +// functionality. +func SupportsIPv4() bool { return supportsIPv4 } + +// SupportsIPv6 reports whether the platform supports IPv6 networking +// functionality. +func SupportsIPv6() bool { return supportsIPv6 } + +// SupportsRawIPSocket reports whether the platform supports raw IP +// sockets. +func SupportsRawIPSocket() (string, bool) { + return supportsRawIPSocket() +} + +// SupportsIPv6MulticastDeliveryOnLoopback reports whether the +// platform supports IPv6 multicast packet delivery on software +// loopback interface. +func SupportsIPv6MulticastDeliveryOnLoopback() bool { + return supportsIPv6MulticastDeliveryOnLoopback() +} + +// ProtocolNotSupported reports whether err is a protocol not +// supported error. +func ProtocolNotSupported(err error) bool { + return protocolNotSupported(err) +} + +// TestableNetwork reports whether network is testable on the current +// platform configuration. +func TestableNetwork(network string) bool { + // This is based on logic from standard library's + // net/platform_test.go. + switch network { + case "unix", "unixgram": + switch runtime.GOOS { + case "android", "nacl", "plan9", "windows": + return false + } + if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { + return false + } + case "unixpacket": + switch runtime.GOOS { + case "android", "darwin", "freebsd", "nacl", "plan9", "windows": + return false + case "netbsd": + // It passes on amd64 at least. 386 fails (Issue 22927). arm is unknown. + if runtime.GOARCH == "386" { + return false + } + } + } + return true +} + +// NewLocalListener returns a listener which listens to a loopback IP +// address or local file system path. +// Network must be "tcp", "tcp4", "tcp6", "unix" or "unixpacket". +func NewLocalListener(network string) (net.Listener, error) { + switch network { + case "tcp": + if supportsIPv4 { + if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { + return ln, nil + } + } + if supportsIPv6 { + return net.Listen("tcp6", "[::1]:0") + } + case "tcp4": + if supportsIPv4 { + return net.Listen("tcp4", "127.0.0.1:0") + } + case "tcp6": + if supportsIPv6 { + return net.Listen("tcp6", "[::1]:0") + } + case "unix", "unixpacket": + return net.Listen(network, localPath()) + } + return nil, fmt.Errorf("%s is not supported", network) +} + +// NewLocalPacketListener returns a packet listener which listens to a +// loopback IP address or local file system path. +// Network must be "udp", "udp4", "udp6" or "unixgram". +func NewLocalPacketListener(network string) (net.PacketConn, error) { + switch network { + case "udp": + if supportsIPv4 { + if c, err := net.ListenPacket("udp4", "127.0.0.1:0"); err == nil { + return c, nil + } + } + if supportsIPv6 { + return net.ListenPacket("udp6", "[::1]:0") + } + case "udp4": + if supportsIPv4 { + return net.ListenPacket("udp4", "127.0.0.1:0") + } + case "udp6": + if supportsIPv6 { + return net.ListenPacket("udp6", "[::1]:0") + } + case "unixgram": + return net.ListenPacket(network, localPath()) + } + return nil, fmt.Errorf("%s is not supported", network) +} + +func localPath() string { + f, err := ioutil.TempFile("", "nettest") + if err != nil { + panic(err) + } + path := f.Name() + f.Close() + os.Remove(path) + return path +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/golang.org/x/net/internal/socket/cmsghdr.go new file mode 100644 index 0000000..1eb07d2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +func (h *cmsghdr) len() int { return int(h.Len) } +func (h *cmsghdr) lvl() int { return int(h.Level) } +func (h *cmsghdr) typ() int { return int(h.Type) } diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go new file mode 100644 index 0000000..d1d0c2d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go new file mode 100644 index 0000000..bac6681 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go new file mode 100644 index 0000000..63f0534 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint64(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go new file mode 100644 index 0000000..7dedd43 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go new file mode 100644 index 0000000..a4e7122 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type cmsghdr struct{} + +const sizeofCmsghdr = 0 + +func (h *cmsghdr) len() int { return 0 } +func (h *cmsghdr) lvl() int { return 0 } +func (h *cmsghdr) typ() int { return 0 } + +func (h *cmsghdr) set(l, lvl, typ int) {} diff --git a/vendor/golang.org/x/net/internal/socket/defs_darwin.go b/vendor/golang.org/x/net/internal/socket/defs_darwin.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_darwin.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_freebsd.go b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_linux.go b/vendor/golang.org/x/net/internal/socket/defs_linux.go new file mode 100644 index 0000000..ce9ec2f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_linux.go @@ -0,0 +1,49 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include +#include + +#define _GNU_SOURCE +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_netbsd.go b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go new file mode 100644 index 0000000..3f84335 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go @@ -0,0 +1,47 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_openbsd.go b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_solaris.go b/vendor/golang.org/x/net/internal/socket/defs_solaris.go new file mode 100644 index 0000000..14e28c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_solaris.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/golang.org/x/net/internal/socket/error_unix.go new file mode 100644 index 0000000..93dff91 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -0,0 +1,31 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "syscall" + +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.EAGAIN: + return errEAGAIN + case syscall.EINVAL: + return errEINVAL + case syscall.ENOENT: + return errENOENT + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/error_windows.go b/vendor/golang.org/x/net/internal/socket/error_windows.go new file mode 100644 index 0000000..6a6379a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "syscall" + +var ( + errERROR_IO_PENDING error = syscall.ERROR_IO_PENDING + errEINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.ERROR_IO_PENDING: + return errERROR_IO_PENDING + case syscall.EINVAL: + return errEINVAL + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go new file mode 100644 index 0000000..05d6082 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go new file mode 100644 index 0000000..afb34ad --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go new file mode 100644 index 0000000..8d17a40 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*int8)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/golang.org/x/net/internal/socket/iovec_stub.go new file mode 100644 index 0000000..c87d2a9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type iovec struct{} + +func (v *iovec) set(b []byte) {} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go new file mode 100644 index 0000000..2e80a9c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,!netbsd + +package socket + +import "net" + +type mmsghdr struct{} + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go new file mode 100644 index 0000000..3c42ea7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -0,0 +1,42 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux netbsd + +package socket + +import "net" + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + for i := range hs { + vs := make([]iovec, len(ms[i].Buffers)) + var sa []byte + if parseFn != nil { + sa = make([]byte, sizeofSockaddrInet6) + } + if marshalFn != nil { + sa = marshalFn(ms[i].Addr) + } + hs[i].Hdr.pack(vs, ms[i].Buffers, ms[i].OOB, sa) + } + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + for i := range hs { + ms[i].N = int(hs[i].Len) + ms[i].NN = hs[i].Hdr.controllen() + ms[i].Flags = hs[i].Hdr.flags() + if parseFn != nil { + var err error + ms[i].Addr, err = parseFn(hs[i].Hdr.name(), hint) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go new file mode 100644 index 0000000..5567afc --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -0,0 +1,39 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.Control = (*byte)(unsafe.Pointer(&oob[0])) + h.Controllen = uint32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go new file mode 100644 index 0000000..b8c87b7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = int32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go new file mode 100644 index 0000000..5a38798 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.setControl(oob) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go new file mode 100644 index 0000000..a7a5987 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint32(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go new file mode 100644 index 0000000..610fc4f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint64(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint64(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go new file mode 100644 index 0000000..71a69e2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go new file mode 100644 index 0000000..6465b20 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + if len(vs) > 0 { + h.Iov = &vs[0] + h.Iovlen = int32(len(vs)) + } + if len(oob) > 0 { + h.Accrights = (*int8)(unsafe.Pointer(&oob[0])) + h.Accrightslen = int32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) controllen() int { + return int(h.Accrightslen) +} + +func (h *msghdr) flags() int { + return int(NativeEndian.Uint32(h.Pad_cgo_2[:])) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go new file mode 100644 index 0000000..64e8173 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type msghdr struct{} + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {} +func (h *msghdr) name() []byte { return nil } +func (h *msghdr) controllen() int { return 0 } +func (h *msghdr) flags() int { return 0 } diff --git a/vendor/golang.org/x/net/internal/socket/rawconn.go b/vendor/golang.org/x/net/internal/socket/rawconn.go new file mode 100644 index 0000000..d6871d5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn.go @@ -0,0 +1,66 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "syscall" +) + +// A Conn represents a raw connection. +type Conn struct { + network string + c syscall.RawConn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + var err error + var cc Conn + switch c := c.(type) { + case *net.TCPConn: + cc.network = "tcp" + cc.c, err = c.SyscallConn() + case *net.UDPConn: + cc.network = "udp" + cc.c, err = c.SyscallConn() + case *net.IPConn: + cc.network = "ip" + cc.c, err = c.SyscallConn() + default: + return nil, errors.New("unknown connection type") + } + if err != nil { + return nil, err + } + return &cc, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + var operr error + var n int + fn := func(s uintptr) { + n, operr = getsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return 0, err + } + return n, os.NewSyscallError("getsockopt", operr) +} + +func (o *Option) set(c *Conn, b []byte) error { + var operr error + fn := func(s uintptr) { + operr = setsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return err + } + return os.NewSyscallError("setsockopt", operr) +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go new file mode 100644 index 0000000..499164a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -0,0 +1,74 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build linux + +package socket + +import ( + "net" + "os" + "syscall" +) + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var parseFn func([]byte, string) (net.Addr, error) + if c.network != "tcp" { + parseFn = parseInetAddr + } + if err := hs.pack(ms, parseFn, nil); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("recvmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], parseFn, c.network); err != nil { + return n, err + } + return n, nil +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var marshalFn func(net.Addr) []byte + if c.network != "tcp" { + marshalFn = marshalInetAddr + } + if err := hs.pack(ms, nil, marshalFn); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("sendmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], nil, ""); err != nil { + return n, err + } + return n, nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go new file mode 100644 index 0000000..b21d2e6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "os" + "syscall" +) + +func (c *Conn) recvMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if c.network != "tcp" { + sa = make([]byte, sizeofSockaddrInet6) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("recvmsg", operr) + } + if c.network != "tcp" { + var err error + m.Addr, err = parseInetAddr(sa[:], c.network) + if err != nil { + return err + } + } + m.N = n + m.NN = h.controllen() + m.Flags = h.flags() + return nil +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if m.Addr != nil { + sa = marshalInetAddr(m.Addr) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("sendmsg", operr) + } + m.N = n + m.NN = len(m.OOB) + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go new file mode 100644 index 0000000..f78832a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !linux + +package socket + +import "errors" + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go new file mode 100644 index 0000000..96733cb --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_stub.go b/vendor/golang.org/x/net/internal/socket/rawconn_stub.go new file mode 100644 index 0000000..d2add1a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/reflect.go b/vendor/golang.org/x/net/internal/socket/reflect.go new file mode 100644 index 0000000..bb179f1 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/reflect.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "reflect" + "runtime" +) + +// A Conn represents a raw connection. +type Conn struct { + c net.Conn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + return &Conn{c: c}, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + s, err := socketOf(c.c) + if err != nil { + return 0, err + } + n, err := getsockopt(s, o.Level, o.Name, b) + return n, os.NewSyscallError("getsockopt", err) +} + +func (o *Option) set(c *Conn, b []byte) error { + s, err := socketOf(c.c) + if err != nil { + return err + } + return os.NewSyscallError("setsockopt", setsockopt(s, o.Level, o.Name, b)) +} + +func socketOf(c net.Conn) (uintptr, error) { + switch c.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + v := reflect.ValueOf(c) + switch e := v.Elem(); e.Kind() { + case reflect.Struct: + fd := e.FieldByName("conn").FieldByName("fd") + switch e := fd.Elem(); e.Kind() { + case reflect.Struct: + sysfd := e.FieldByName("sysfd") + if runtime.GOOS == "windows" { + return uintptr(sysfd.Uint()), nil + } + return uintptr(sysfd.Int()), nil + } + } + } + return 0, errors.New("invalid type") +} diff --git a/vendor/golang.org/x/net/internal/socket/socket.go b/vendor/golang.org/x/net/internal/socket/socket.go new file mode 100644 index 0000000..5f9730e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socket provides a portable interface for socket system +// calls. +package socket // import "golang.org/x/net/internal/socket" + +import ( + "errors" + "net" + "unsafe" +) + +// An Option represents a sticky socket option. +type Option struct { + Level int // level + Name int // name; must be equal or greater than 1 + Len int // length of value in bytes; must be equal or greater than 1 +} + +// Get reads a value for the option from the kernel. +// It returns the number of bytes written into b. +func (o *Option) Get(c *Conn, b []byte) (int, error) { + if o.Name < 1 || o.Len < 1 { + return 0, errors.New("invalid option") + } + if len(b) < o.Len { + return 0, errors.New("short buffer") + } + return o.get(c, b) +} + +// GetInt returns an integer value for the option. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) GetInt(c *Conn) (int, error) { + if o.Len != 1 && o.Len != 4 { + return 0, errors.New("invalid option") + } + var b []byte + var bb [4]byte + if o.Len == 1 { + b = bb[:1] + } else { + b = bb[:4] + } + n, err := o.get(c, b) + if err != nil { + return 0, err + } + if n != o.Len { + return 0, errors.New("invalid option length") + } + if o.Len == 1 { + return int(b[0]), nil + } + return int(NativeEndian.Uint32(b[:4])), nil +} + +// Set writes the option and value to the kernel. +func (o *Option) Set(c *Conn, b []byte) error { + if o.Name < 1 || o.Len < 1 { + return errors.New("invalid option") + } + if len(b) < o.Len { + return errors.New("short buffer") + } + return o.set(c, b) +} + +// SetInt writes the option and value to the kernel. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) SetInt(c *Conn, v int) error { + if o.Len != 1 && o.Len != 4 { + return errors.New("invalid option") + } + var b []byte + if o.Len == 1 { + b = []byte{byte(v)} + } else { + var bb [4]byte + NativeEndian.PutUint32(bb[:o.Len], uint32(v)) + b = bb[:4] + } + return o.set(c, b) +} + +func controlHeaderLen() int { + return roundup(sizeofCmsghdr) +} + +func controlMessageLen(dataLen int) int { + return roundup(sizeofCmsghdr) + dataLen +} + +// ControlMessageSpace returns the whole length of control message. +func ControlMessageSpace(dataLen int) int { + return roundup(sizeofCmsghdr) + roundup(dataLen) +} + +// A ControlMessage represents the head message in a stream of control +// messages. +// +// A control message comprises of a header, data and a few padding +// fields to conform to the interface to the kernel. +// +// See RFC 3542 for further information. +type ControlMessage []byte + +// Data returns the data field of the control message at the head on +// m. +func (m ControlMessage) Data(dataLen int) []byte { + l := controlHeaderLen() + if len(m) < l || len(m) < l+dataLen { + return nil + } + return m[l : l+dataLen] +} + +// Next returns the control message at the next on m. +// +// Next works only for standard control messages. +func (m ControlMessage) Next(dataLen int) ControlMessage { + l := ControlMessageSpace(dataLen) + if len(m) < l { + return nil + } + return m[l:] +} + +// MarshalHeader marshals the header fields of the control message at +// the head on m. +func (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error { + if len(m) < controlHeaderLen() { + return errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(dataLen), lvl, typ) + return nil +} + +// ParseHeader parses and returns the header fields of the control +// message at the head on m. +func (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) { + l := controlHeaderLen() + if len(m) < l { + return 0, 0, 0, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + return h.lvl(), h.typ(), int(uint64(h.len()) - uint64(l)), nil +} + +// Marshal marshals the control message at the head on m, and returns +// the next control message. +func (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, error) { + l := len(data) + if len(m) < ControlMessageSpace(l) { + return nil, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(l), lvl, typ) + if l > 0 { + copy(m.Data(l), data) + } + return m.Next(l), nil +} + +// Parse parses m as a single or multiple control messages. +// +// Parse works for both standard and compatible messages. +func (m ControlMessage) Parse() ([]ControlMessage, error) { + var ms []ControlMessage + for len(m) >= controlHeaderLen() { + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + l := h.len() + if l <= 0 { + return nil, errors.New("invalid header length") + } + if uint64(l) < uint64(controlHeaderLen()) { + return nil, errors.New("invalid message length") + } + if uint64(l) > uint64(len(m)) { + return nil, errors.New("short buffer") + } + // On message reception: + // + // |<- ControlMessageSpace --------------->| + // |<- controlMessageLen ---------->| | + // |<- controlHeaderLen ->| | | + // +---------------+------+---------+------+ + // | Header | PadH | Data | PadD | + // +---------------+------+---------+------+ + // + // On compatible message reception: + // + // | ... |<- controlMessageLen ----------->| + // | ... |<- controlHeaderLen ->| | + // +-----+---------------+------+----------+ + // | ... | Header | PadH | Data | + // +-----+---------------+------+----------+ + ms = append(ms, ControlMessage(m[:l])) + ll := l - controlHeaderLen() + if len(m) >= ControlMessageSpace(ll) { + m = m[ControlMessageSpace(ll):] + } else { + m = m[controlMessageLen(ll):] + } + } + return ms, nil +} + +// NewControlMessage returns a new stream of control messages. +func NewControlMessage(dataLen []int) ControlMessage { + var l int + for i := range dataLen { + l += ControlMessageSpace(dataLen[i]) + } + return make([]byte, l) +} + +// A Message represents an IO message. +type Message struct { + // When writing, the Buffers field must contain at least one + // byte to write. + // When reading, the Buffers field will always contain a byte + // to read. + Buffers [][]byte + + // OOB contains protocol-specific control or miscellaneous + // ancillary data known as out-of-band data. + OOB []byte + + // Addr specifies a destination address when writing. + // It can be nil when the underlying protocol of the raw + // connection uses connection-oriented communication. + // After a successful read, it may contain the source address + // on the received packet. + Addr net.Addr + + N int // # of bytes read or written from/to Buffers + NN int // # of bytes read or written from/to OOB + Flags int // protocol-specific information on the received message +} + +// RecvMsg wraps recvmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +func (c *Conn) RecvMsg(m *Message, flags int) error { + return c.recvMsg(m, flags) +} + +// SendMsg wraps sendmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +func (c *Conn) SendMsg(m *Message, flags int) error { + return c.sendMsg(m, flags) +} + +// RecvMsgs wraps recvmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// Only Linux supports this. +func (c *Conn) RecvMsgs(ms []Message, flags int) (int, error) { + return c.recvMsgs(ms, flags) +} + +// SendMsgs wraps sendmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// Only Linux supports this. +func (c *Conn) SendMsgs(ms []Message, flags int) (int, error) { + return c.sendMsgs(ms, flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go b/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go new file mode 100644 index 0000000..c4edd4a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket_go1_9_test.go @@ -0,0 +1,259 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/internal/socket" +) + +type mockControl struct { + Level int + Type int + Data []byte +} + +func TestControlMessage(t *testing.T) { + for _, tt := range []struct { + cs []mockControl + }{ + { + []mockControl{ + {Level: 1, Type: 1}, + }, + }, + { + []mockControl{ + {Level: 2, Type: 2, Data: []byte{0xfe}}, + }, + }, + { + []mockControl{ + {Level: 3, Type: 3, Data: []byte{0xfe, 0xff, 0xff, 0xfe}}, + }, + }, + { + []mockControl{ + {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}}, + }, + }, + { + []mockControl{ + {Level: 4, Type: 4, Data: []byte{0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe}}, + {Level: 2, Type: 2, Data: []byte{0xfe}}, + }, + }, + } { + var w []byte + var tailPadLen int + mm := socket.NewControlMessage([]int{0}) + for i, c := range tt.cs { + m := socket.NewControlMessage([]int{len(c.Data)}) + l := len(m) - len(mm) + if i == len(tt.cs)-1 && l > len(c.Data) { + tailPadLen = l - len(c.Data) + } + w = append(w, m...) + } + + var err error + ww := make([]byte, len(w)) + copy(ww, w) + m := socket.ControlMessage(ww) + for _, c := range tt.cs { + if err = m.MarshalHeader(c.Level, c.Type, len(c.Data)); err != nil { + t.Fatalf("(%v).MarshalHeader() = %v", tt.cs, err) + } + copy(m.Data(len(c.Data)), c.Data) + m = m.Next(len(c.Data)) + } + m = socket.ControlMessage(w) + for _, c := range tt.cs { + m, err = m.Marshal(c.Level, c.Type, c.Data) + if err != nil { + t.Fatalf("(%v).Marshal() = %v", tt.cs, err) + } + } + if !bytes.Equal(ww, w) { + t.Fatalf("got %#v; want %#v", ww, w) + } + + ws := [][]byte{w} + if tailPadLen > 0 { + // Test a message with no tail padding. + nopad := w[:len(w)-tailPadLen] + ws = append(ws, [][]byte{nopad}...) + } + for _, w := range ws { + ms, err := socket.ControlMessage(w).Parse() + if err != nil { + t.Fatalf("(%v).Parse() = %v", tt.cs, err) + } + for i, m := range ms { + lvl, typ, dataLen, err := m.ParseHeader() + if err != nil { + t.Fatalf("(%v).ParseHeader() = %v", tt.cs, err) + } + if lvl != tt.cs[i].Level || typ != tt.cs[i].Type || dataLen != len(tt.cs[i].Data) { + t.Fatalf("%v: got %d, %d, %d; want %d, %d, %d", tt.cs[i], lvl, typ, dataLen, tt.cs[i].Level, tt.cs[i].Type, len(tt.cs[i].Data)) + } + } + } + } +} + +func TestUDP(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + t.Fatal(err) + } + + t.Run("Message", func(t *testing.T) { + data := []byte("HELLO-R-U-THERE") + wm := socket.Message{ + Buffers: bytes.SplitAfter(data, []byte("-")), + Addr: c.LocalAddr(), + } + if err := cc.SendMsg(&wm, 0); err != nil { + t.Fatal(err) + } + b := make([]byte, 32) + rm := socket.Message{ + Buffers: [][]byte{b[:1], b[1:3], b[3:7], b[7:11], b[11:]}, + } + if err := cc.RecvMsg(&rm, 0); err != nil { + t.Fatal(err) + } + if !bytes.Equal(b[:rm.N], data) { + t.Fatalf("got %#v; want %#v", b[:rm.N], data) + } + }) + switch runtime.GOOS { + case "android", "linux": + t.Run("Messages", func(t *testing.T) { + data := []byte("HELLO-R-U-THERE") + wmbs := bytes.SplitAfter(data, []byte("-")) + wms := []socket.Message{ + {Buffers: wmbs[:1], Addr: c.LocalAddr()}, + {Buffers: wmbs[1:], Addr: c.LocalAddr()}, + } + n, err := cc.SendMsgs(wms, 0) + if err != nil { + t.Fatal(err) + } + if n != len(wms) { + t.Fatalf("got %d; want %d", n, len(wms)) + } + b := make([]byte, 32) + rmbs := [][][]byte{{b[:len(wmbs[0])]}, {b[len(wmbs[0]):]}} + rms := []socket.Message{ + {Buffers: rmbs[0]}, + {Buffers: rmbs[1]}, + } + n, err = cc.RecvMsgs(rms, 0) + if err != nil { + t.Fatal(err) + } + if n != len(rms) { + t.Fatalf("got %d; want %d", n, len(rms)) + } + nn := 0 + for i := 0; i < n; i++ { + nn += rms[i].N + } + if !bytes.Equal(b[:nn], data) { + t.Fatalf("got %#v; want %#v", b[:nn], data) + } + }) + } + + // The behavior of transmission for zero byte paylaod depends + // on each platform implementation. Some may transmit only + // protocol header and options, other may transmit nothing. + // We test only that SendMsg and SendMsgs will not crash with + // empty buffers. + wm := socket.Message{ + Buffers: [][]byte{{}}, + Addr: c.LocalAddr(), + } + cc.SendMsg(&wm, 0) + wms := []socket.Message{ + {Buffers: [][]byte{{}}, Addr: c.LocalAddr()}, + } + cc.SendMsgs(wms, 0) +} + +func BenchmarkUDP(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + b.Fatal(err) + } + data := []byte("HELLO-R-U-THERE") + wm := socket.Message{ + Buffers: [][]byte{data}, + Addr: c.LocalAddr(), + } + rm := socket.Message{ + Buffers: [][]byte{make([]byte, 128)}, + OOB: make([]byte, 128), + } + + for M := 1; M <= 1<<9; M = M << 1 { + b.Run(fmt.Sprintf("Iter-%d", M), func(b *testing.B) { + for i := 0; i < b.N; i++ { + for j := 0; j < M; j++ { + if err := cc.SendMsg(&wm, 0); err != nil { + b.Fatal(err) + } + if err := cc.RecvMsg(&rm, 0); err != nil { + b.Fatal(err) + } + } + } + }) + switch runtime.GOOS { + case "android", "linux": + wms := make([]socket.Message, M) + for i := range wms { + wms[i].Buffers = [][]byte{data} + wms[i].Addr = c.LocalAddr() + } + rms := make([]socket.Message, M) + for i := range rms { + rms[i].Buffers = [][]byte{make([]byte, 128)} + rms[i].OOB = make([]byte, 128) + } + b.Run(fmt.Sprintf("Batch-%d", M), func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := cc.SendMsgs(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := cc.RecvMsgs(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + } + } +} diff --git a/vendor/golang.org/x/net/internal/socket/socket_test.go b/vendor/golang.org/x/net/internal/socket/socket_test.go new file mode 100644 index 0000000..bf3751b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket_test.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket_test + +import ( + "net" + "runtime" + "syscall" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/internal/socket" +) + +func TestSocket(t *testing.T) { + t.Run("Option", func(t *testing.T) { + testSocketOption(t, &socket.Option{Level: syscall.SOL_SOCKET, Name: syscall.SO_RCVBUF, Len: 4}) + }) +} + +func testSocketOption(t *testing.T, so *socket.Option) { + c, err := nettest.NewLocalPacketListener("udp") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + t.Fatal(err) + } + const N = 2048 + if err := so.SetInt(cc, N); err != nil { + t.Fatal(err) + } + n, err := so.GetInt(cc) + if err != nil { + t.Fatal(err) + } + if n < N { + t.Fatalf("got %d; want greater than or equal to %d", n, N) + } +} diff --git a/vendor/golang.org/x/net/internal/socket/sys.go b/vendor/golang.org/x/net/internal/socket/sys.go new file mode 100644 index 0000000..4f0eead --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // NativeEndian is the machine native endian implementation of + // ByteOrder. + NativeEndian binary.ByteOrder + + kernelAlign int +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + NativeEndian = binary.LittleEndian + } else { + NativeEndian = binary.BigEndian + } + kernelAlign = probeProtocolStack() +} + +func roundup(l int) int { + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/golang.org/x/net/internal/socket/sys_bsd.go new file mode 100644 index 0000000..f13e14f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd openbsd + +package socket + +import "errors" + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go new file mode 100644 index 0000000..f723fa3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd netbsd openbsd + +package socket + +import "unsafe" + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_darwin.go b/vendor/golang.org/x/net/internal/socket/sys_darwin.go new file mode 100644 index 0000000..b17d223 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_darwin.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go new file mode 100644 index 0000000..b17d223 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux.go b/vendor/golang.org/x/net/internal/socket/sys_linux.go new file mode 100644 index 0000000..1559521 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!s390x,!386 + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.go b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go new file mode 100644 index 0000000..235b2cc --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 4 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.s b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s new file mode 100644 index 0000000..93e7d75 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-36 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-36 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go new file mode 100644 index 0000000..9decee2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x12b + sysSENDMMSG = 0x133 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go new file mode 100644 index 0000000..d753b43 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x16d + sysSENDMMSG = 0x176 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go new file mode 100644 index 0000000..b670894 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go new file mode 100644 index 0000000..9c0d740 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go new file mode 100644 index 0000000..071a4ab --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go new file mode 100644 index 0000000..071a4ab --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go new file mode 100644 index 0000000..9c0d740 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go new file mode 100644 index 0000000..21c1e3f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go new file mode 100644 index 0000000..21c1e3f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go new file mode 100644 index 0000000..327979e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 8 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s new file mode 100644 index 0000000..06d7562 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-72 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-72 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_netbsd.go b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go new file mode 100644 index 0000000..431851c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +const ( + sysRECVMMSG = 0x1db + sysSENDMMSG = 0x1dc +) + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go new file mode 100644 index 0000000..dc130c2 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -0,0 +1,168 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "encoding/binary" + "errors" + "net" + "runtime" + "strconv" + "sync" + "time" +) + +func marshalInetAddr(a net.Addr) []byte { + switch a := a.(type) { + case *net.TCPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.UDPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.IPAddr: + return marshalSockaddr(a.IP, 0, a.Zone) + default: + return nil + } +} + +func marshalSockaddr(ip net.IP, port int, zone string) []byte { + if ip4 := ip.To4(); ip4 != nil { + b := make([]byte, sizeofSockaddrInet) + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) + default: + b[0] = sizeofSockaddrInet + b[1] = sysAF_INET + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[4:8], ip4) + return b + } + if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { + b := make([]byte, sizeofSockaddrInet6) + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) + default: + b[0] = sizeofSockaddrInet6 + b[1] = sysAF_INET6 + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[8:24], ip6) + if zone != "" { + NativeEndian.PutUint32(b[24:28], uint32(zoneCache.index(zone))) + } + return b + } + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + if len(b) < 2 { + return nil, errors.New("invalid address") + } + var af int + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + af = int(NativeEndian.Uint16(b[:2])) + default: + af = int(b[1]) + } + var ip net.IP + var zone string + if af == sysAF_INET { + if len(b) < sizeofSockaddrInet { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv4len) + copy(ip, b[4:8]) + } + if af == sysAF_INET6 { + if len(b) < sizeofSockaddrInet6 { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv6len) + copy(ip, b[8:24]) + if id := int(NativeEndian.Uint32(b[24:28])); id > 0 { + zone = zoneCache.name(id) + } + } + switch network { + case "tcp", "tcp4", "tcp6": + return &net.TCPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + case "udp", "udp4", "udp6": + return &net.UDPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + default: + return &net.IPAddr{IP: ip, Zone: zone}, nil + } +} + +// An ipv6ZoneCache represents a cache holding partial network +// interface information. It is used for reducing the cost of IPv6 +// addressing scope zone resolution. +// +// Multiple names sharing the index are managed by first-come +// first-served basis for consistency. +type ipv6ZoneCache struct { + sync.RWMutex // guard the following + lastFetched time.Time // last time routing information was fetched + toIndex map[string]int // interface name to its index + toName map[int]string // interface index to its name +} + +var zoneCache = ipv6ZoneCache{ + toIndex: make(map[string]int), + toName: make(map[int]string), +} + +func (zc *ipv6ZoneCache) update(ift []net.Interface) { + zc.Lock() + defer zc.Unlock() + now := time.Now() + if zc.lastFetched.After(now.Add(-60 * time.Second)) { + return + } + zc.lastFetched = now + if len(ift) == 0 { + var err error + if ift, err = net.Interfaces(); err != nil { + return + } + } + zc.toIndex = make(map[string]int, len(ift)) + zc.toName = make(map[int]string, len(ift)) + for _, ifi := range ift { + zc.toIndex[ifi.Name] = ifi.Index + if _, ok := zc.toName[ifi.Index]; !ok { + zc.toName[ifi.Index] = ifi.Name + } + } +} + +func (zc *ipv6ZoneCache) name(zone int) string { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + name, ok := zoneCache.toName[zone] + if !ok { + name = strconv.Itoa(zone) + } + return name +} + +func (zc *ipv6ZoneCache) index(zone string) int { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + index, ok := zoneCache.toIndex[zone] + if !ok { + index, _ = strconv.Atoi(zone) + } + return index +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris.go b/vendor/golang.org/x/net/internal/socket/sys_solaris.go new file mode 100644 index 0000000..cced74e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "runtime" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" +//go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" + +//go:linkname procGetsockopt libc___xnet_getsockopt +//go:linkname procSetsockopt libc_setsockopt +//go:linkname procRecvmsg libc___xnet_recvmsg +//go:linkname procSendmsg libc___xnet_sendmsg + +var ( + procGetsockopt uintptr + procSetsockopt uintptr + procRecvmsg uintptr + procSendmsg uintptr +) + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) +func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procRecvmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSendmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s new file mode 100644 index 0000000..a18ac5e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) + +TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSysvicall6(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go new file mode 100644 index 0000000..d9f06d0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import ( + "errors" + "net" + "runtime" + "unsafe" +) + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64p32", "mips64p32": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +func marshalInetAddr(ip net.IP, port int, zone string) []byte { + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + return nil, errors.New("not implemented") +} + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + return 0, errors.New("not implemented") +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return errors.New("not implemented") +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go new file mode 100644 index 0000000..18eba30 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!s390x,!386 netbsd openbsd + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_windows.go b/vendor/golang.org/x/net/internal/socket/sys_windows.go new file mode 100644 index 0000000..54a470e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -0,0 +1,70 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x17 + + sysSOCK_RAW = 0x3 +) + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), (*int32)(unsafe.Pointer(&l))) + return int(l), err +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b))) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go new file mode 100644 index 0000000..26f8fef --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go new file mode 100644 index 0000000..e2987f7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go new file mode 100644 index 0000000..26f8fef --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go new file mode 100644 index 0000000..e2987f7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go new file mode 100644 index 0000000..c582abd --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go new file mode 100644 index 0000000..04a2488 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go new file mode 100644 index 0000000..35c7cb9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go new file mode 100644 index 0000000..04a2488 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go new file mode 100644 index 0000000..4302069 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go new file mode 100644 index 0000000..4302069 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go new file mode 100644 index 0000000..4302069 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go new file mode 100644 index 0000000..4302069 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go new file mode 100644 index 0000000..1502f6c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go new file mode 100644 index 0000000..db60491 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go @@ -0,0 +1,65 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go new file mode 100644 index 0000000..2a1a799 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go @@ -0,0 +1,68 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go new file mode 100644 index 0000000..206ea2d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go new file mode 100644 index 0000000..1c83636 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go new file mode 100644 index 0000000..a6c0bf4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go new file mode 100644 index 0000000..1c83636 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go new file mode 100644 index 0000000..327c632 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go @@ -0,0 +1,60 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1a + + sysSOCK_RAW = 0x4 +) + +type iovec struct { + Base *int8 + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Accrights *int8 + Accrightslen int32 + Pad_cgo_2 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x20 +) diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 0000000..685f0e7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go new file mode 100644 index 0000000..66325a9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries_test.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package timeseries + +import ( + "math" + "testing" + "time" +) + +func isNear(x *Float, y float64, tolerance float64) bool { + return math.Abs(x.Value()-y) < tolerance +} + +func isApproximate(x *Float, y float64) bool { + return isNear(x, y, 1e-2) +} + +func checkApproximate(t *testing.T, o Observable, y float64) { + x := o.(*Float) + if !isApproximate(x, y) { + t.Errorf("Wanted %g, got %g", y, x.Value()) + } +} + +func checkNear(t *testing.T, o Observable, y, tolerance float64) { + x := o.(*Float) + if !isNear(x, y, tolerance) { + t.Errorf("Wanted %g +- %g, got %g", y, tolerance, x.Value()) + } +} + +var baseTime = time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC) + +func tu(s int64) time.Time { + return baseTime.Add(time.Duration(s) * time.Second) +} + +func tu2(s int64, ns int64) time.Time { + return baseTime.Add(time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond) +} + +func TestBasicTimeSeries(t *testing.T) { + ts := NewTimeSeries(NewFloat) + fo := new(Float) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(1)), 40) + checkApproximate(t, ts.Total(), 40) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 40) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 70) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 60) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 90) + *fo = Float(100) + ts.AddWithTime(fo, tu(100)) + checkApproximate(t, ts.Range(tu(99), tu(100)), 100) + checkApproximate(t, ts.Range(tu(0), tu(4)), 36) + checkApproximate(t, ts.Total(), 190) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(4)), 44) + checkApproximate(t, ts.Range(tu(37), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(50), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(99), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Total(), 210) + + for i, l := range ts.ComputeRange(tu(36), tu(100), 64) { + if i == 63 { + checkApproximate(t, l, 100) + } else { + checkApproximate(t, l, 0) + } + } + + checkApproximate(t, ts.Range(tu(0), tu(100)), 210) + checkApproximate(t, ts.Range(tu(10), tu(100)), 100) + + for i, l := range ts.ComputeRange(tu(0), tu(100), 100) { + if i < 10 { + checkApproximate(t, l, 11) + } else if i >= 90 { + checkApproximate(t, l, 10) + } else { + checkApproximate(t, l, 0) + } + } +} + +func TestFloat(t *testing.T) { + f := Float(1) + if g, w := f.String(), "1"; g != w { + t.Errorf("Float(1).String = %q; want %q", g, w) + } + f2 := Float(2) + var o Observable = &f2 + f.Add(o) + if g, w := f.Value(), 3.0; g != w { + t.Errorf("Float post-add = %v; want %v", g, w) + } + f.Multiply(2) + if g, w := f.Value(), 6.0; g != w { + t.Errorf("Float post-multiply = %v; want %v", g, w) + } + f.Clear() + if g, w := f.Value(), 0.0; g != w { + t.Errorf("Float post-clear = %v; want %v", g, w) + } + f.CopyFrom(&f2) + if g, w := f.Value(), 2.0; g != w { + t.Errorf("Float post-CopyFrom = %v; want %v", g, w) + } +} + +type mockClock struct { + time time.Time +} + +func (m *mockClock) Time() time.Time { return m.time } +func (m *mockClock) Set(t time.Time) { m.time = t } + +const buckets = 6 + +var testResolutions = []time.Duration{ + 10 * time.Second, // level holds one minute of observations + 100 * time.Second, // level holds ten minutes of observations + 10 * time.Minute, // level holds one hour of observations +} + +// TestTimeSeries uses a small number of buckets to force a higher +// error rate on approximations from the timeseries. +type TestTimeSeries struct { + timeSeries +} + +func TestExpectedErrorRate(t *testing.T) { + ts := new(TestTimeSeries) + fake := new(mockClock) + fake.Set(time.Now()) + ts.timeSeries.init(testResolutions, NewFloat, buckets, fake) + for i := 1; i <= 61*61; i++ { + fake.Set(fake.Time().Add(1 * time.Second)) + ob := Float(1) + ts.AddWithTime(&ob, fake.Time()) + + // The results should be accurate within one missing bucket (1/6) of the observations recorded. + checkNear(t, ts.Latest(0, buckets), min(float64(i), 60), 10) + checkNear(t, ts.Latest(1, buckets), min(float64(i), 600), 100) + checkNear(t, ts.Latest(2, buckets), min(float64(i), 3600), 600) + } +} + +func min(a, b float64) float64 { + if a < b { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/ipv4/batch.go b/vendor/golang.org/x/net/ipv4/batch.go new file mode 100644 index 0000000..b445499 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/batch.go @@ -0,0 +1,191 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// RawConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +// +// Unlike the ReadFrom method, it doesn't strip the IPv4 header +// followed by option headers from the received IPv4 datagram when the +// underlying transport is net.IPConn. Each Buffers field of Message +// must be large enough to accommodate an IPv4 header and option +// headers. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv4/bpf_test.go b/vendor/golang.org/x/net/ipv4/bpf_test.go new file mode 100644 index 0000000..b44da90 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/bpf_test.go @@ -0,0 +1,93 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv4" +) + +func TestBPF(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("not supported on %s", runtime.GOOS) + } + + l, err := net.ListenPacket("udp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + p := ipv4.NewPacketConn(l) + + // This filter accepts UDP packets whose first payload byte is + // even. + prog, err := bpf.Assemble([]bpf.Instruction{ + // Load the first byte of the payload (skipping UDP header). + bpf.LoadAbsolute{Off: 8, Size: 1}, + // Select LSB of the byte. + bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, + // Byte is even? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, + // Accept. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("compiling BPF: %s", err) + } + + if err = p.SetBPF(prog); err != nil { + t.Fatalf("attaching filter to Conn: %s", err) + } + + s, err := net.Dial("udp4", l.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + go func() { + for i := byte(0); i < 10; i++ { + s.Write([]byte{i}) + } + }() + + l.SetDeadline(time.Now().Add(2 * time.Second)) + seen := make([]bool, 5) + for { + var b [512]byte + n, _, err := l.ReadFrom(b[:]) + if err != nil { + t.Fatalf("reading from listener: %s", err) + } + if n != 1 { + t.Fatalf("unexpected packet length, want 1, got %d", n) + } + if b[0] >= 10 { + t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) + } + if b[0]%2 != 0 { + t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) + } + seen[b[0]/2] = true + + seenAll := true + for _, v := range seen { + if !v { + seenAll = false + break + } + } + if seenAll { + break + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/control.go b/vendor/golang.org/x/net/ipv4/control.go new file mode 100644 index 0000000..a2b02ca --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +type ControlFlags uint + +const ( + FlagTTL ControlFlags = 1 << iota // pass the TTL on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet +) + +// A ControlMessage represents per packet basis IP-level socket options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn or RawConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn or RawConn allows to send the options + // to the protocol stack. + // + TTL int // time-to-live, receiving only + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var m socket.ControlMessage + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { + m = socket.NewControlMessage([]int{ctlOpts[ctlPacketInfo].length}) + } + if len(m) > 0 { + ctlOpts[ctlPacketInfo].marshal(m, cm) + } + return m +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIP { + continue + } + switch { + case typ == ctlOpts[ctlTTL].name && l >= ctlOpts[ctlTTL].length: + ctlOpts[ctlTTL].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlDst].name && l >= ctlOpts[ctlDst].length: + ctlOpts[ctlDst].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlInterface].name && l >= ctlOpts[ctlInterface].length: + ctlOpts[ctlInterface].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTTL].length) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlDst].length) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlInterface].length) + } + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTTL = iota // header field + ctlSrc // header field + ctlDst // header field + ctlInterface // inbound or outbound interface + ctlPacketInfo // inbound or outbound packet path + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go new file mode 100644 index 0000000..77e7ad5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalDst(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVDSTADDR, net.IPv4len) + return m.Next(net.IPv4len) +} + +func parseDst(cm *ControlMessage, b []byte) { + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, b[:net.IPv4len]) +} + +func marshalInterface(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVIF, syscall.SizeofSockaddrDatalink) + return m.Next(syscall.SizeofSockaddrDatalink) +} + +func parseInterface(cm *ControlMessage, b []byte) { + sadl := (*syscall.SockaddrDatalink)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(sadl.Index) +} diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go new file mode 100644 index 0000000..425338f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_PKTINFO, sizeofInetPktinfo) + if cm != nil { + pi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0])) + if ip := cm.Src.To4(); ip != nil { + copy(pi.Spec_dst[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInetPktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inetPktinfo)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(pi.Ifindex) + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, pi.Addr[:]) +} diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go new file mode 100644 index 0000000..5a2f7d8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/control_test.go b/vendor/golang.org/x/net/ipv4/control_test.go new file mode 100644 index 0000000..f87fe12 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_test.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "testing" + + "golang.org/x/net/ipv4" +) + +func TestControlMessageParseWithFuzz(t *testing.T) { + var cm ipv4.ControlMessage + for _, fuzz := range []string{ + "\f\x00\x00\x00\x00\x00\x00\x00\x14\x00\x00\x00", + "\f\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00", + } { + cm.Parse([]byte(fuzz)) + } +} diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go new file mode 100644 index 0000000..e1ae816 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTTL]; ok && cf&FlagTTL != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTTL) + } else { + opt.clear(FlagTTL) + } + } + if so, ok := sockOpts[ssoPacketInfo]; ok { + if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & (FlagSrc | FlagDst | FlagInterface)) + } else { + opt.clear(cf & (FlagSrc | FlagDst | FlagInterface)) + } + } + } else { + if so, ok := sockOpts[ssoReceiveDst]; ok && cf&FlagDst != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagDst) + } else { + opt.clear(FlagDst) + } + } + if so, ok := sockOpts[ssoReceiveInterface]; ok && cf&FlagInterface != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagInterface) + } else { + opt.clear(FlagInterface) + } + } + } + return nil +} + +func marshalTTL(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVTTL, 1) + return m.Next(1) +} + +func parseTTL(cm *ControlMessage, b []byte) { + cm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0]))) +} diff --git a/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/golang.org/x/net/ipv4/control_windows.go new file mode 100644 index 0000000..ce55c66 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_windows.go @@ -0,0 +1,16 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "syscall" + + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} diff --git a/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/golang.org/x/net/ipv4/defs_darwin.go new file mode 100644 index 0000000..c8f2e05 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_darwin.go @@ -0,0 +1,77 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_STRIPHDR = C.IP_STRIPHDR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go new file mode 100644 index 0000000..f30544e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/golang.org/x/net/ipv4/defs_freebsd.go new file mode 100644 index 0000000..4dd57d8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_freebsd.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_SENDSRCADDR = C.IP_SENDSRCADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_ONESBCAST = C.IP_ONESBCAST + sysIP_BINDANY = C.IP_BINDANY + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_MINTTL = C.IP_MINTTL + sysIP_DONTFRAG = C.IP_DONTFRAG + sysIP_RECVTOS = C.IP_RECVTOS + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/golang.org/x/net/ipv4/defs_linux.go new file mode 100644 index 0000000..beb1107 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_linux.go @@ -0,0 +1,122 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_PKTOPTIONS = C.IP_PKTOPTIONS + sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER + sysIP_RECVERR = C.IP_RECVERR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_RECVTOS = C.IP_RECVTOS + sysIP_MTU = C.IP_MTU + sysIP_FREEBIND = C.IP_FREEBIND + sysIP_TRANSPARENT = C.IP_TRANSPARENT + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR + sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR + sysIP_MINTTL = C.IP_MINTTL + sysIP_NODEFRAG = C.IP_NODEFRAG + sysIP_UNICAST_IF = C.IP_UNICAST_IF + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_MSFILTER = C.IP_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL + + //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT + //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT + //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO + //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE + //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE + //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT + + sysICMP_FILTER = C.ICMP_FILTER + + sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE + sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL + sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP + sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6 + sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS + sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + sizeofSockExtendedErr = C.sizeof_struct_sock_extended_err + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPFilter = C.sizeof_struct_icmp_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type sockExtendedErr C.struct_sock_extended_err + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpFilter C.struct_icmp_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/golang.org/x/net/ipv4/defs_netbsd.go new file mode 100644 index 0000000..8f8af1b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_netbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/golang.org/x/net/ipv4/defs_openbsd.go new file mode 100644 index 0000000..8f8af1b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_openbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/golang.org/x/net/ipv4/defs_solaris.go new file mode 100644 index 0000000..aeb33e9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_solaris.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVSLLA = C.IP_RECVSLLA + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_NEXTHOP = C.IP_NEXTHOP + + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + sysIP_DONTFRAG = C.IP_DONTFRAG + + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC + sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL + sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF + + sysIP_REUSEADDR = C.IP_REUSEADDR + sysIP_DONTROUTE = C.IP_DONTROUTE + sysIP_BROADCAST = C.IP_BROADCAST + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/dgramopt.go b/vendor/golang.org/x/net/ipv4/dgramopt.go new file mode 100644 index 0000000..54d77d5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/bpf" +) + +// MulticastTTL returns the time-to-live field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastTTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetMulticastTTL sets the time-to-live field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, ttl) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ICMPFilter returns an ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go new file mode 100644 index 0000000..b43935a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -0,0 +1,244 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv4 implements IP-level socket options for the Internet +// Protocol version 4. +// +// The package provides IP-level socket options that allow +// manipulation of IPv4 facilities. +// +// The IPv4 protocol and basic host requirements for IPv4 are defined +// in RFC 791 and RFC 1122. +// Host extensions for multicasting and socket interface extensions +// for multicast source filters are defined in RFC 1112 and RFC 3678. +// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC +// 3376. +// Source-specific multicast is defined in RFC 4607. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv4 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the type-of-service field on the +// IPv4 header for each packet. +// +// ln, err := net.Listen("tcp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv4.NewConn(c).SetTOS(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv4 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.IPv4(224, 0, 0, 250) +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv4 and Ethernet. +// +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, cm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if cm.Dst.IsMulticast() { +// if cm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTOS(0x0) +// p.SetTTL(16) +// if _, err := p.WriteTo(data, nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// for _, ifi := range []*net.Interface{en0, en1} { +// if err := p.SetMulticastInterface(ifi); err != nil { +// // error handling +// } +// p.SetMulticastTTL(2) +// if _, err := p.WriteTo(data, nil, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn or RawConn may join multiple +// multicast groups. For example, a UDP listener with port 1024 might +// join two different groups across over two different network +// interfaces by using: +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv4.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// p2 := ipv4.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn or RawConn on IGMPv3 supported +// platform is able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)} +// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)}) +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on IGMPv3 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// IGMPv1 or IGMPv2 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv4 // import "golang.org/x/net/ipv4" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go new file mode 100644 index 0000000..2ab8773 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -0,0 +1,187 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn and RawConn are +// not implemented. + +// A Conn represents a network endpoint that uses the IPv4 transport. +// It is used to control basic IP-level socket options such as TOS and +// TTL. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses the +// IPv4 transport. It is used to control several IP-level socket +// options including multicasting. It also provides datagram based +// network I/O methods specific to the IPv4 and higher layer protocols +// such as UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage sets the per packet IP-level socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + p := &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } + return p +} + +// A RawConn represents a packet network endpoint that uses the IPv4 +// transport. It is used to control several IP-level socket options +// including IPv4 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv4 and higher layer +// protocols that handle IPv4 datagram directly such as OSPF, GRE. +type RawConn struct { + genericOpt + dgramOpt + packetHandler +} + +// SetControlMessage sets the per packet IP-level socket options. +func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *RawConn) SetDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *RawConn) SetReadDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *RawConn) SetWriteDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *RawConn) Close() error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.Close() +} + +// NewRawConn returns a new RawConn using c as its underlying +// transport. +func NewRawConn(c net.PacketConn) (*RawConn, error) { + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + return nil, err + } + r := &RawConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + packetHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc}, + } + so, ok := sockOpts[ssoHeaderPrepend] + if !ok { + return nil, errOpNoSupport + } + if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { + return nil, err + } + return r, nil +} diff --git a/vendor/golang.org/x/net/ipv4/example_test.go b/vendor/golang.org/x/net/ipv4/example_test.go new file mode 100644 index 0000000..ddc7577 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/example_test.go @@ -0,0 +1,224 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "fmt" + "log" + "net" + "os" + "runtime" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" +) + +func ExampleConn_markingTCP() { + ln, err := net.Listen("tcp", "0.0.0.0:1024") + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + for { + c, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + if c.RemoteAddr().(*net.TCPAddr).IP.To4() != nil { + p := ipv4.NewConn(c) + if err := p.SetTOS(0x28); err != nil { // DSCP AF11 + log.Fatal(err) + } + if err := p.SetTTL(128); err != nil { + log.Fatal(err) + } + } + if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { + log.Fatal(err) + } + }(c) + } +} + +func ExamplePacketConn_servingOneShotMulticastDNS() { + c, err := net.ListenPacket("udp4", "0.0.0.0:5353") // mDNS over UDP + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + mDNSLinkLocal := net.UDPAddr{IP: net.IPv4(224, 0, 0, 251)} + if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &mDNSLinkLocal) + if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { + log.Fatal(err) + } + + b := make([]byte, 1500) + for { + _, cm, peer, err := p.ReadFrom(b) + if err != nil { + log.Fatal(err) + } + if !cm.Dst.IsMulticast() || !cm.Dst.Equal(mDNSLinkLocal.IP) { + continue + } + answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this + if _, err := p.WriteTo(answers, nil, peer); err != nil { + log.Fatal(err) + } + } +} + +func ExamplePacketConn_tracingIPPacketRoute() { + // Tracing an IP packet route to www.google.com. + + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + log.Fatal(err) + } + var dst net.IPAddr + for _, ip := range ips { + if ip.To4() != nil { + dst.IP = ip + fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) + break + } + } + if dst.IP == nil { + log.Fatal("no A record found") + } + + c, err := net.ListenPacket("ip4:1", "0.0.0.0") // ICMP for IPv4 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + + if err := p.SetControlMessage(ipv4.FlagTTL|ipv4.FlagSrc|ipv4.FlagDst|ipv4.FlagInterface, true); err != nil { + log.Fatal(err) + } + wm := icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + } + + rb := make([]byte, 1500) + for i := 1; i <= 64; i++ { // up to 64 hops + wm.Body.(*icmp.Echo).Seq = i + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + if err := p.SetTTL(i); err != nil { + log.Fatal(err) + } + + // In the real world usually there are several + // multiple traffic-engineered paths for each hop. + // You may need to probe a few times to each hop. + begin := time.Now() + if _, err := p.WriteTo(wb, nil, &dst); err != nil { + log.Fatal(err) + } + if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + log.Fatal(err) + } + n, cm, peer, err := p.ReadFrom(rb) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + fmt.Printf("%v\t*\n", i) + continue + } + log.Fatal(err) + } + rm, err := icmp.ParseMessage(1, rb[:n]) + if err != nil { + log.Fatal(err) + } + rtt := time.Since(begin) + + // In the real world you need to determine whether the + // received message is yours using ControlMessage.Src, + // ControlMessage.Dst, icmp.Echo.ID and icmp.Echo.Seq. + switch rm.Type { + case ipv4.ICMPTypeTimeExceeded: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) + case ipv4.ICMPTypeEchoReply: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, cm) + return + default: + log.Printf("unknown ICMP message: %+v\n", rm) + } + } +} + +func ExampleRawConn_advertisingOSPFHello() { + c, err := net.ListenPacket("ip4:89", "0.0.0.0") // OSPF for IPv4 + if err != nil { + log.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + log.Fatal(err) + } + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + allSPFRouters := net.IPAddr{IP: net.IPv4(224, 0, 0, 5)} + if err := r.JoinGroup(en0, &allSPFRouters); err != nil { + log.Fatal(err) + } + defer r.LeaveGroup(en0, &allSPFRouters) + + hello := make([]byte, 24) // fake hello data, you need to implement this + ospf := make([]byte, 24) // fake ospf header, you need to implement this + ospf[0] = 2 // version 2 + ospf[1] = 1 // hello packet + ospf = append(ospf, hello...) + iph := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 0xc0, // DSCP CS6 + TotalLen: ipv4.HeaderLen + len(ospf), + TTL: 1, + Protocol: 89, + Dst: allSPFRouters.IP.To4(), + } + + var cm *ipv4.ControlMessage + switch runtime.GOOS { + case "darwin", "linux": + cm = &ipv4.ControlMessage{IfIndex: en0.Index} + default: + if err := r.SetMulticastInterface(en0); err != nil { + log.Fatal(err) + } + } + if err := r.WriteTo(iph, ospf, cm); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go new file mode 100644 index 0000000..ffb44fe --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/gen.go @@ -0,0 +1,199 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", + parseICMPv4Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "package ipv4\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv4Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv4Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigDescr) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv4Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv4ParamRecord struct { + OrigDescr string + Descr string + Value int +} + +func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Descr, "Reserved") || + strings.Contains(pr.Descr, "Unassigned") || + strings.Contains(pr.Descr, "Deprecated") || + strings.Contains(pr.Descr, "Experiment") || + strings.Contains(pr.Descr, "experiment") { + continue + } + ss := strings.Split(pr.Descr, "\n") + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + s := strings.TrimSpace(prs[i].Descr) + prs[i].OrigDescr = s + prs[i].Descr = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv4/genericopt.go b/vendor/golang.org/x/net/ipv4/genericopt.go new file mode 100644 index 0000000..119bf84 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/genericopt.go @@ -0,0 +1,57 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "syscall" + +// TOS returns the type-of-service field value for outgoing packets. +func (c *genericOpt) TOS() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTOS] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTOS sets the type-of-service field value for future outgoing +// packets. +func (c *genericOpt) SetTOS(tos int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTOS] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, tos) +} + +// TTL returns the time-to-live field value for outgoing packets. +func (c *genericOpt) TTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTTL] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTTL sets the time-to-live field value for future outgoing +// packets. +func (c *genericOpt) SetTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTTL] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, ttl) +} diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go new file mode 100644 index 0000000..8bb0f0f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -0,0 +1,159 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "encoding/binary" + "fmt" + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +const ( + Version = 4 // protocol version + HeaderLen = 20 // header length without extension headers + maxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields +) + +type HeaderFlags int + +const ( + MoreFragments HeaderFlags = 1 << iota // more fragments flag + DontFragment // don't fragment flag +) + +// A Header represents an IPv4 header. +type Header struct { + Version int // protocol version + Len int // header length + TOS int // type-of-service + TotalLen int // packet total length + ID int // identification + Flags HeaderFlags // flags + FragOff int // fragment offset + TTL int // time-to-live + Protocol int // next protocol + Checksum int // checksum + Src net.IP // source address + Dst net.IP // destination address + Options []byte // options, extension headers +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst) +} + +// Marshal returns the binary encoding of h. +func (h *Header) Marshal() ([]byte, error) { + if h == nil { + return nil, syscall.EINVAL + } + if h.Len < HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := HeaderLen + len(h.Options) + b := make([]byte, hdrlen) + b[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f)) + b[1] = byte(h.TOS) + flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + case "freebsd": + if freebsdVersion < 1100000 { + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } else { + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + default: + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + binary.BigEndian.PutUint16(b[4:6], uint16(h.ID)) + b[8] = byte(h.TTL) + b[9] = byte(h.Protocol) + binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum)) + if ip := h.Src.To4(); ip != nil { + copy(b[12:16], ip[:net.IPv4len]) + } + if ip := h.Dst.To4(); ip != nil { + copy(b[16:20], ip[:net.IPv4len]) + } else { + return nil, errMissingAddress + } + if len(h.Options) > 0 { + copy(b[HeaderLen:], h.Options) + } + return b, nil +} + +// Parse parses b as an IPv4 header and sotres the result in h. +func (h *Header) Parse(b []byte) error { + if h == nil || len(b) < HeaderLen { + return errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return errBufferTooShort + } + h.Version = int(b[0] >> 4) + h.Len = hdrlen + h.TOS = int(b[1]) + h.ID = int(binary.BigEndian.Uint16(b[4:6])) + h.TTL = int(b[8]) + h.Protocol = int(b[9]) + h.Checksum = int(binary.BigEndian.Uint16(b[10:12])) + h.Src = net.IPv4(b[12], b[13], b[14], b[15]) + h.Dst = net.IPv4(b[16], b[17], b[18], b[19]) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + case "freebsd": + if freebsdVersion < 1100000 { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + if freebsdVersion < 1000000 { + h.TotalLen += hdrlen + } + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + } else { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + optlen := hdrlen - HeaderLen + if optlen > 0 && len(b) >= hdrlen { + if cap(h.Options) < optlen { + h.Options = make([]byte, optlen) + } else { + h.Options = h.Options[:optlen] + } + copy(h.Options, b[HeaderLen:hdrlen]) + } + return nil +} + +// ParseHeader parses b as an IPv4 header. +func ParseHeader(b []byte) (*Header, error) { + h := new(Header) + if err := h.Parse(b); err != nil { + return nil, err + } + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv4/header_test.go b/vendor/golang.org/x/net/ipv4/header_test.go new file mode 100644 index 0000000..a246aee --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header_test.go @@ -0,0 +1,228 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "bytes" + "encoding/binary" + "net" + "reflect" + "runtime" + "strings" + "testing" + + "golang.org/x/net/internal/socket" +) + +type headerTest struct { + wireHeaderFromKernel []byte + wireHeaderToKernel []byte + wireHeaderFromTradBSDKernel []byte + wireHeaderToTradBSDKernel []byte + wireHeaderFromFreeBSD10Kernel []byte + wireHeaderToFreeBSD10Kernel []byte + *Header +} + +var headerLittleEndianTests = []headerTest{ + // TODO(mikio): Add platform dependent wire header formats when + // we support new platforms. + { + wireHeaderFromKernel: []byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToKernel: []byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromTradBSDKernel: []byte{ + 0x45, 0x01, 0xdb, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToTradBSDKernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderFromFreeBSD10Kernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + wireHeaderToFreeBSD10Kernel: []byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + }, + Header: &Header{ + Version: Version, + Len: HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + }, + }, + + // with option headers + { + wireHeaderFromKernel: []byte{ + 0x46, 0x01, 0xbe, 0xf3, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToKernel: []byte{ + 0x46, 0x01, 0xbe, 0xf3, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderFromTradBSDKernel: []byte{ + 0x46, 0x01, 0xdb, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToTradBSDKernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderFromFreeBSD10Kernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + wireHeaderToFreeBSD10Kernel: []byte{ + 0x46, 0x01, 0xf3, 0xbe, + 0xca, 0xfe, 0xdc, 0x45, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + 0xff, 0xfe, 0xfe, 0xff, + }, + Header: &Header{ + Version: Version, + Len: HeaderLen + 4, + TOS: 1, + TotalLen: 0xbef3, + ID: 0xcafe, + Flags: DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + Options: []byte{0xff, 0xfe, 0xfe, 0xff}, + }, + }, +} + +func TestMarshalHeader(t *testing.T) { + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for non-little endian machine yet") + } + + for _, tt := range headerLittleEndianTests { + b, err := tt.Header.Marshal() + if err != nil { + t.Fatal(err) + } + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = tt.wireHeaderToTradBSDKernel + case "freebsd": + switch { + case freebsdVersion < 1000000: + wh = tt.wireHeaderToTradBSDKernel + case 1000000 <= freebsdVersion && freebsdVersion < 1100000: + wh = tt.wireHeaderToFreeBSD10Kernel + default: + wh = tt.wireHeaderToKernel + } + default: + wh = tt.wireHeaderToKernel + } + if !bytes.Equal(b, wh) { + t.Fatalf("got %#v; want %#v", b, wh) + } + } +} + +func TestParseHeader(t *testing.T) { + if socket.NativeEndian != binary.LittleEndian { + t.Skip("no test for big endian machine yet") + } + + for _, tt := range headerLittleEndianTests { + var wh []byte + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + wh = tt.wireHeaderFromTradBSDKernel + case "freebsd": + switch { + case freebsdVersion < 1000000: + wh = tt.wireHeaderFromTradBSDKernel + case 1000000 <= freebsdVersion && freebsdVersion < 1100000: + wh = tt.wireHeaderFromFreeBSD10Kernel + default: + wh = tt.wireHeaderFromKernel + } + default: + wh = tt.wireHeaderFromKernel + } + h, err := ParseHeader(wh) + if err != nil { + t.Fatal(err) + } + if err := h.Parse(wh); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, tt.Header) { + t.Fatalf("got %#v; want %#v", h, tt.Header) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go new file mode 100644 index 0000000..a5052e3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -0,0 +1,63 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "errors" + "net" +) + +var ( + errMissingAddress = errors.New("missing address") + errMissingHeader = errors.New("missing header") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") + errNoSuchMulticastInterface = errors.New("no such multicast interface") + + // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. + freebsdVersion uint32 +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP4(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go new file mode 100644 index 0000000..be10c94 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/iana.go @@ -0,0 +1,34 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package ipv4 + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +const ( + ICMPTypeEchoReply ICMPType = 0 // Echo Reply + ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable + ICMPTypeRedirect ICMPType = 5 // Redirect + ICMPTypeEcho ICMPType = 8 // Echo + ICMPTypeRouterAdvertisement ICMPType = 9 // Router Advertisement + ICMPTypeRouterSolicitation ICMPType = 10 // Router Solicitation + ICMPTypeTimeExceeded ICMPType = 11 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 12 // Parameter Problem + ICMPTypeTimestamp ICMPType = 13 // Timestamp + ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply + ICMPTypePhoturis ICMPType = 40 // Photuris +) + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +var icmpTypes = map[ICMPType]string{ + 0: "echo reply", + 3: "destination unreachable", + 5: "redirect", + 8: "echo", + 9: "router advertisement", + 10: "router solicitation", + 11: "time exceeded", + 12: "parameter problem", + 13: "timestamp", + 14: "timestamp reply", + 40: "photuris", +} diff --git a/vendor/golang.org/x/net/ipv4/icmp.go b/vendor/golang.org/x/net/ipv4/icmp.go new file mode 100644 index 0000000..9902bb3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/iana" + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv4 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model and it works not +// only for IPv6 but IPv4. A node means a device that implements IP. +// A router means a node that forwards IP packets not explicitly +// addressed to itself, and a host means a node that is not a router. +type ICMPFilter struct { + icmpFilter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_linux.go b/vendor/golang.org/x/net/ipv4/icmp_linux.go new file mode 100644 index 0000000..6e1c5c8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_linux.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +func (f *icmpFilter) accept(typ ICMPType) { + f.Data &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) block(typ ICMPType) { + f.Data |= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) setAll(block bool) { + if block { + f.Data = 1<<32 - 1 + } else { + f.Data = 0 + } +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return f.Data&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go new file mode 100644 index 0000000..21bb29a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +const sizeofICMPFilter = 0x0 + +type icmpFilter struct { +} + +func (f *icmpFilter) accept(typ ICMPType) { +} + +func (f *icmpFilter) block(typ ICMPType) { +} + +func (f *icmpFilter) setAll(block bool) { +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_test.go b/vendor/golang.org/x/net/ipv4/icmp_test.go new file mode 100644 index 0000000..3324b54 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_test.go @@ -0,0 +1,95 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var icmpStringTests = []struct { + in ipv4.ICMPType + out string +}{ + {ipv4.ICMPTypeDestinationUnreachable, "destination unreachable"}, + + {256, ""}, +} + +func TestICMPString(t *testing.T) { + for _, tt := range icmpStringTests { + s := tt.in.String() + if s != tt.out { + t.Errorf("got %s; want %s", s, tt.out) + } + } +} + +func TestICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + var f ipv4.ICMPFilter + for _, toggle := range []bool{false, true} { + f.SetAll(toggle) + for _, typ := range []ipv4.ICMPType{ + ipv4.ICMPTypeDestinationUnreachable, + ipv4.ICMPTypeEchoReply, + ipv4.ICMPTypeTimeExceeded, + ipv4.ICMPTypeParameterProblem, + } { + f.Accept(typ) + if f.WillBlock(typ) { + t.Errorf("ipv4.ICMPFilter.Set(%v, false) failed", typ) + } + f.Block(typ) + if !f.WillBlock(typ) { + t.Errorf("ipv4.ICMPFilter.Set(%v, true) failed", typ) + } + } + } +} + +func TestSetICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + + var f ipv4.ICMPFilter + f.SetAll(true) + f.Accept(ipv4.ICMPTypeEcho) + f.Accept(ipv4.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + kf, err := p.ICMPFilter() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(kf, &f) { + t.Fatalf("got %#v; want %#v", kf, f) + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicast_test.go b/vendor/golang.org/x/net/ipv4/multicast_test.go new file mode 100644 index 0000000..bcf4973 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicast_test.go @@ -0,0 +1,334 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var packetConnReadWriteMulticastUDPTests = []struct { + addr string + grp, src *net.UDPAddr +}{ + {"224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {"232.0.1.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastUDPTests { + c, err := net.ListenPacket("udp4", tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + grp := *tt.grp + grp.Port = c.LocalAddr().(*net.UDPAddr).Port + p := ipv4.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, &grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, &grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + p.SetMulticastTTL(i + 1) + if n, err := p.WriteTo(wb, nil, &grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } + } +} + +var packetConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "solaris", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, tt.grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagDst | ipv4.FlagInterface + if runtime.GOOS != "solaris" { + // Solaris never allows to modify ICMP properties. + cf |= ipv4.FlagTTL + } + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + p.SetMulticastTTL(i + 1) + if n, err := p.WriteTo(wb, nil, tt.grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + if err != nil { + t.Fatal(err) + } + switch { + case m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 + case m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 + default: + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } + } +} + +var rawConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 254)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 254)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestRawConnReadWriteMulticastICMP(t *testing.T) { + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range rawConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + if tt.src == nil { + if err := r.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer r.LeaveGroup(ifi, tt.grp) + } else { + if err := r.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer r.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := r.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := r.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := r.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := r.MulticastLoopback(); err != nil { + t.Fatal(err) + } + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + wh := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: i + 1, + TotalLen: ipv4.HeaderLen + len(wb), + Protocol: 1, + Dst: tt.grp.IP, + } + if err := r.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := r.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + r.SetMulticastTTL(i + 1) + if err := r.WriteTo(wh, wb, nil); err != nil { + t.Fatal(err) + } + rb := make([]byte, ipv4.HeaderLen+128) + if rh, b, _, err := r.ReadFrom(rb); err != nil { + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + switch { + case (rh.Dst.IsLoopback() || rh.Dst.IsLinkLocalUnicast() || rh.Dst.IsGlobalUnicast()) && m.Type == ipv4.ICMPTypeEchoReply && m.Code == 0: // net.inet.icmp.bmcastecho=1 + case rh.Dst.IsMulticast() && m.Type == ipv4.ICMPTypeEcho && m.Code == 0: // net.inet.icmp.bmcastecho=0 + default: + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicastlistener_test.go b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go new file mode 100644 index 0000000..e43fbbe --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicastlistener_test.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var udpMultipleGroupListenerTests = []net.Addr{ + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, // see RFC 4727 + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}, + &net.UDPAddr{IP: net.IPv4(224, 0, 0, 254)}, +} + +func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c, err := net.ListenPacket("udp4", "0.0.0.0:0") // wildcard address with no reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv4.NewPacketConn(c) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } +} + +func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c1, err := net.ListenPacket("udp4", "224.0.0.0:0") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c1.Close() + _, port, err := net.SplitHostPort(c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + c2, err := net.ListenPacket("udp4", net.JoinHostPort("224.0.0.0", port)) // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var ps [2]*ipv4.PacketConn + ps[0] = ipv4.NewPacketConn(c1) + ps[1] = ipv4.NewPacketConn(c2) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + for _, p := range ps { + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + for _, p := range ps { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } + } +} + +func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + type ml struct { + c *ipv4.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + port := "0" + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip4", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("udp4", net.JoinHostPort(ip.String(), port)) // unicast address with non-reusable port + if err != nil { + // The listen may fail when the serivce is + // already in use, but it's fine because the + // purpose of this is not to test the + // bookkeeping of IP control block inside the + // kernel. + t.Log(err) + continue + } + defer c.Close() + if port == "0" { + _, port, err = net.SplitHostPort(c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + } + p := ipv4.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPSingleRawConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") // wildcard address + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip4", &ifi); !ok { + continue + } + if err := r.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := r.LeaveGroup(ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPPerInterfaceSingleRawConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if testing.Short() { + t.Skip("to avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + gaddr := net.IPAddr{IP: net.IPv4(224, 0, 0, 254)} // see RFC 4727 + type ml struct { + c *ipv4.RawConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip4", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("ip4:253", ip.String()) // unicast address + if err != nil { + t.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + if err := r.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{r, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go new file mode 100644 index 0000000..f7efac2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/multicastsockopt_test.go @@ -0,0 +1,195 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +var packetConnMulticastSocketOptionTests = []struct { + net, proto, addr string + grp, src net.Addr +}{ + {"udp4", "", "224.0.0.0:0", &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}, nil}, // see RFC 4727 + {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 + + {"udp4", "", "232.0.0.0:0", &net.UDPAddr{IP: net.IPv4(232, 0, 1, 249)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 + {"ip4", ":icmp", "0.0.0.0", &net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestPacketConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnMulticastSocketOptionTests { + if tt.net == "ip4" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, p, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) + } + } +} + +var rawConnMulticastSocketOptionTests = []struct { + grp, src net.Addr +}{ + {&net.IPAddr{IP: net.IPv4(224, 0, 0, 250)}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.IPv4(232, 0, 1, 250)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}}, // see RFC 5771 +} + +func TestRawConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range rawConnMulticastSocketOptionTests { + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, r, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, r, ifi, tt.grp, tt.src) + } + } +} + +type testIPv4MulticastConn interface { + MulticastTTL() (int, error) + SetMulticastTTL(ttl int) error + MulticastLoopback() (bool, error) + SetMulticastLoopback(bool) error + JoinGroup(*net.Interface, net.Addr) error + LeaveGroup(*net.Interface, net.Addr) error + JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error +} + +func testMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp net.Addr) { + const ttl = 255 + if err := c.SetMulticastTTL(ttl); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastTTL(); err != nil { + t.Error(err) + return + } else if v != ttl { + t.Errorf("got %v; want %v", v, ttl) + return + } + + for _, toggle := range []bool{true, false} { + if err := c.SetMulticastLoopback(toggle); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastLoopback(); err != nil { + t.Error(err) + return + } else if v != toggle { + t.Errorf("got %v; want %v", v, toggle) + return + } + } + + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} + +func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv4MulticastConn, ifi *net.Interface, grp, src net.Addr) { + // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support IGMPv2/3 fail here + t.Logf("not supported on %s", runtime.GOOS) + return + } + t.Error(err) + return + } + if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go new file mode 100644 index 0000000..f00f5b0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -0,0 +1,69 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadFrom and WriteTo methods of RawConn +// are not implemented. + +// A packetHandler represents the IPv4 datagram handler. +type packetHandler struct { + *net.IPConn + *socket.Conn + rawOpt +} + +func (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn != nil } + +// ReadFrom reads an IPv4 datagram from the endpoint c, copying the +// datagram into b. It returns the received datagram as the IPv4 +// header h, the payload p and the control message cm. +func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + if !c.ok() { + return nil, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +func slicePacket(b []byte) (h, p []byte, err error) { + if len(b) < HeaderLen { + return nil, nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + return b[:hdrlen], b[hdrlen:], nil +} + +// WriteTo writes an IPv4 datagram through the endpoint c, copying the +// datagram from the IPv4 header h and the payload p. The control +// message cm allows the datagram path and the outgoing interface to be +// specified. Currently only Darwin and Linux support this. The cm +// may be nil if control of the outgoing datagram is not required. +// +// The IPv4 header h must contain appropriate fields that include: +// +// Version = +// Len = +// TOS = +// TotalLen = +// ID = platform sets an appropriate value if ID is zero +// FragOff = +// TTL = +// Protocol = +// Checksum = platform sets an appropriate value if Checksum is zero +// Src = platform sets an appropriate value if Src is nil +// Dst = +// Options = optional +func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { + if !c.ok() { + return syscall.EINVAL + } + return c.writeTo(h, p, cm) +} diff --git a/vendor/golang.org/x/net/ipv4/packet_go1_8.go b/vendor/golang.org/x/net/ipv4/packet_go1_8.go new file mode 100644 index 0000000..b47d186 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet_go1_8.go @@ -0,0 +1,56 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv4 + +import "net" + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + n, nn, _, src, err := c.ReadMsgIP(b, oob) + if err != nil { + return nil, nil, nil, err + } + var hs []byte + if hs, p, err = slicePacket(b[:n]); err != nil { + return nil, nil, nil, err + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, err + } + if nn > 0 { + cm = new(ControlMessage) + if err := cm.Parse(oob[:nn]); err != nil { + return nil, nil, nil, err + } + } + if src != nil && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + oob := cm.Marshal() + wh, err := h.Marshal() + if err != nil { + return err + } + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + wh = append(wh, p...) + _, _, err = c.WriteMsgIP(wh, oob, dst) + return err +} diff --git a/vendor/golang.org/x/net/ipv4/packet_go1_9.go b/vendor/golang.org/x/net/ipv4/packet_go1_9.go new file mode 100644 index 0000000..082c36d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + if err := c.RecvMsg(&m, 0); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + var hs []byte + if hs, p, err = slicePacket(b[:m.N]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + } + if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + m := socket.Message{ + OOB: cm.Marshal(), + } + wh, err := h.Marshal() + if err != nil { + return err + } + m.Buffers = [][]byte{wh, p} + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + m.Addr = dst + if err := c.SendMsg(&m, 0); err != nil { + return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/payload.go b/vendor/golang.org/x/net/ipv4/payload.go new file mode 100644 index 0000000..f95f811 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv4 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go new file mode 100644 index 0000000..3f06d76 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl,!plan9,!windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.writeTo(b, cm, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go new file mode 100644 index 0000000..d26ccd9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go @@ -0,0 +1,59 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + nb := make([]byte, maxHeaderLen+len(b)) + if n, nn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { + return 0, nil, nil, err + } + hdrlen := int(nb[0]&0x0f) << 2 + copy(b, nb[hdrlen:]) + n -= hdrlen + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP4(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go new file mode 100644 index 0000000..2f19311 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + m.Buffers = [][]byte{b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + h := make([]byte, HeaderLen) + m.Buffers = [][]byte{h, b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + hdrlen := int(h[0]&0x0f) << 2 + if hdrlen > len(h) { + d := hdrlen - len(h) + copy(b, b[d:]) + m.N -= d + } else { + m.N -= hdrlen + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP4(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go new file mode 100644 index 0000000..3926de7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go b/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go new file mode 100644 index 0000000..1cd926e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_go1_8_test.go @@ -0,0 +1,248 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv4_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + b.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + t.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr()) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr()) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr) { + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go b/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go new file mode 100644 index 0000000..365de02 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_go1_9_test.go @@ -0,0 +1,388 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + b.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv4.Message{ + { + Buffers: [][]byte{payload}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv4.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv4.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv4.Message{ + { + Buffers: [][]byte{datagram}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv4.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv4.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph, err := (&ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TotalLen: ipv4.HeaderLen + len(payload), + TTL: 1, + Protocol: iana.ProtocolReserved, + Src: net.IPv4(192, 0, 2, 1), + Dst: net.IPv4(192, 0, 2, 254), + }).Marshal() + if err != nil { + t.Fatal(err) + } + greh := []byte{0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolGRE), "127.0.0.1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv4.PacketConn, data []byte, dst net.Addr, batch bool) { + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + batchReader := func() { + defer wg.Done() + ms := []ipv4.Message{ + { + Buffers: [][]byte{make([]byte, 128)}, + OOB: ipv4.NewControlMessage(cf), + }, + } + n, err := p.ReadBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + var cm ipv4.ControlMessage + if err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil { + t.Error(err) + return + } + var b []byte + if _, ok := dst.(*net.IPAddr); ok { + var h ipv4.Header + if err := h.Parse(ms[0].Buffers[0][:ms[0].N]); err != nil { + t.Error(err) + return + } + b = ms[0].Buffers[0][h.Len:ms[0].N] + } else { + b = ms[0].Buffers[0][:ms[0].N] + } + if !bytes.Equal(b, data) { + t.Errorf("got %#v; want %#v", b, data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + batchWriter := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + ms := []ipv4.Message{ + { + Buffers: [][]byte{data}, + OOB: cm.Marshal(), + Addr: dst, + }, + } + n, err := p.WriteBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + if ms[0].N != len(data) { + t.Errorf("got %d; want %d", ms[0].N, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + if batch { + go batchWriter(i%2 != 0) + } else { + go writer(i%2 != 0) + } + + } + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/readwrite_test.go b/vendor/golang.org/x/net/ipv4/readwrite_test.go new file mode 100644 index 0000000..3896a8a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/readwrite_test.go @@ -0,0 +1,140 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func BenchmarkReadWriteUnicast(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + + dst := c.LocalAddr() + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + + b.Run("NetUDP", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("IPv4UDP", func(b *testing.B) { + p := ipv4.NewPacketConn(c) + cf := ipv4.FlagTTL | ipv4.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + cm := ipv4.ControlMessage{TTL: 1} + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) +} + +func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + cf := ipv4.FlagTTL | ipv4.FlagSrc | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + rb := make([]byte, 128) + if n, cm, _, err := p.ReadFrom(rb); err != nil { + t.Error(err) + return + } else if !bytes.Equal(rb[:n], wb) { + t.Errorf("got %v; want %v", rb[:n], wb) + return + } else { + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + } + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv4.ControlMessage{ + Src: net.IPv4(127, 0, 0, 1), + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Error(err) + return + } else if n != len(wb) { + t.Errorf("got %d; want %d", n, len(wb)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt.go b/vendor/golang.org/x/net/ipv4/sockopt.go new file mode 100644 index 0000000..22e90c0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt.go @@ -0,0 +1,44 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTOS = iota // header field for unicast packet + ssoTTL // header field for unicast packet + ssoMulticastTTL // header field for multicast packet + ssoMulticastInterface // outbound interface for multicast packet + ssoMulticastLoopback // loopback for multicast packet + ssoReceiveTTL // header field on received packet + ssoReceiveDst // header field on received packet + ssoReceiveInterface // inbound interface on received packet + ssoPacketInfo // incbound or outbound packet path + ssoHeaderPrepend // ipv4 header prepend + ssoStripHeader // strip ipv4 header + ssoICMPFilter // icmp filter + ssoJoinGroup // any-source multicast + ssoLeaveGroup // any-source multicast + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeIPMreqn + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go new file mode 100644 index 0000000..e96955b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -0,0 +1,71 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + switch so.typ { + case ssoTypeIPMreqn: + return so.getIPMreqn(c) + default: + return so.getMulticastIf(c) + } +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + switch so.typ { + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, nil) + default: + return so.setMulticastIf(c, ifi) + } +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPFilter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPFilter]byte)(unsafe.Pointer(f))[:sizeofICMPFilter] + return so.Set(c, b) +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go new file mode 100644 index 0000000..23249b7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/vendor/golang.org/x/net/ipv4/sys_asmreq.go new file mode 100644 index 0000000..0388cba --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -0,0 +1,119 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + mreq := ipMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + b := (*[sizeofIPMreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPMreq] + return so.Set(c, b) +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + var b [4]byte + if _, err := so.Get(c, b[:]); err != nil { + return nil, err + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return so.Set(c, b[:]) +} + +func setIPMreqInterface(mreq *ipMreq, ifi *net.Interface) error { + if ifi == nil { + return nil + } + ifat, err := ifi.Addrs() + if err != nil { + return err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + } + } + return errNoSuchInterface +} + +func netIP4ToInterface(ip net.IP) (*net.Interface, error) { + ift, err := net.Interfaces() + if err != nil { + return nil, err + } + for _, ifi := range ift { + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + case *net.IPNet: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + } + } + } + return nil, errNoSuchInterface +} + +func netInterfaceToIP4(ifi *net.Interface) (net.IP, error) { + if ifi == nil { + return net.IPv4zero.To4(), nil + } + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + } + } + return nil, errNoSuchInterface +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go new file mode 100644 index 0000000..f391920 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go new file mode 100644 index 0000000..1f24f69 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + b := make([]byte, so.Len) + if _, err := so.Get(c, b); err != nil { + return nil, err + } + mreqn := (*ipMreqn)(unsafe.Pointer(&b[0])) + if mreqn.Ifindex == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(mreqn.Ifindex)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreqn ipMreqn + if ifi != nil { + mreqn.Ifindex = int32(ifi.Index) + } + if grp != nil { + mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]} + } + b := (*[sizeofIPMreqn]byte)(unsafe.Pointer(&mreqn))[:sizeofIPMreqn] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go new file mode 100644 index 0000000..0711d3d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf.go b/vendor/golang.org/x/net/ipv4/sys_bpf.go new file mode 100644 index 0000000..9f30b73 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go new file mode 100644 index 0000000..9a21320 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go new file mode 100644 index 0000000..58256dd --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_darwin.go b/vendor/golang.org/x/net/ipv4/sys_darwin.go new file mode 100644 index 0000000..e8fb191 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_darwin.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "strconv" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoStripHeader: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_STRIPHDR, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. + // See http://support.apple.com/kb/HT1633. + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return + } + ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO + ctlOpts[ctlPacketInfo].length = sizeofInetPktinfo + ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo + ctlOpts[ctlPacketInfo].parse = parsePacketInfo + sockOpts[ssoPacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}} + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} +} + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_dragonfly.go b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go new file mode 100644 index 0000000..859764f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/golang.org/x/net/ipv4/sys_freebsd.go new file mode 100644 index 0000000..b800324 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") + if freebsdVersion >= 1000000 { + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + } + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_linux.go b/vendor/golang.org/x/net/ipv4/sys_linux.go new file mode 100644 index 0000000..60defe1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_linux.go @@ -0,0 +1,59 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_PKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysICMP_FILTER, Len: sizeofICMPFilter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_solaris.go b/vendor/golang.org/x/net/ipv4/sys_solaris.go new file mode 100644 index 0000000..832fef1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 4, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go new file mode 100644 index 0000000..ae5704e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go new file mode 100644 index 0000000..e6b7623 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go new file mode 100644 index 0000000..4f07647 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv4/sys_windows.go b/vendor/golang.org/x/net/ipv4/sys_windows.go new file mode 100644 index 0000000..b0913d5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_windows.go @@ -0,0 +1,67 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_DONTFRAGMENT = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0xf + sysIP_DROP_SOURCE_MEMBERSHIP = 0x10 + sysIP_PKTINFO = 0x13 + + sizeofInetPktinfo = 0x8 + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc +) + +type inetPktinfo struct { + Addr [4]byte + Ifindex int32 +} + +type ipMreq struct { + Multiaddr [4]byte + Interface [4]byte +} + +type ipMreqSource struct { + Multiaddr [4]byte + Sourceaddr [4]byte + Interface [4]byte +} + +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} diff --git a/vendor/golang.org/x/net/ipv4/unicast_test.go b/vendor/golang.org/x/net/ipv4/unicast_test.go new file mode 100644 index 0000000..02c089f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/unicast_test.go @@ -0,0 +1,247 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func TestPacketConnReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := nettest.NewLocalPacketListener("udp4") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv4.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + p.SetTTL(i + 1) + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, nil, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } +} + +func TestPacketConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + p := ipv4.NewPacketConn(c) + defer p.Close() + cf := ipv4.FlagDst | ipv4.FlagInterface + if runtime.GOOS != "solaris" { + // Solaris never allows to modify ICMP properties. + cf |= ipv4.FlagTTL + } + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + p.SetTTL(i + 1) + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, nil, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + loop: + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + if err != nil { + t.Fatal(err) + } + if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { + // On Linux we must handle own sent packets. + goto loop + } + if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } +} + +func TestRawConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "0.0.0.0") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + dst, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + defer r.Close() + cf := ipv4.FlagTTL | ipv4.FlagDst | ipv4.FlagInterface + + for i, toggle := range []bool{true, false, true} { + wb, err := (&icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(nil) + if err != nil { + t.Fatal(err) + } + wh := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: i + 1, + TotalLen: ipv4.HeaderLen + len(wb), + TTL: i + 1, + Protocol: 1, + Dst: dst.IP, + } + if err := r.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := r.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if err := r.WriteTo(wh, wb, nil); err != nil { + t.Fatal(err) + } + rb := make([]byte, ipv4.HeaderLen+128) + loop: + if err := r.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if _, b, _, err := r.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + if err != nil { + t.Fatal(err) + } + if runtime.GOOS == "linux" && m.Type == ipv4.ICMPTypeEcho { + // On Linux we must handle own sent packets. + goto loop + } + if m.Type != ipv4.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv4.ICMPTypeEchoReply, 0) + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go new file mode 100644 index 0000000..db5213b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/unicastsockopt_test.go @@ -0,0 +1,148 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" +) + +func TestConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + ln, err := net.Listen("tcp4", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + errc := make(chan error, 1) + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + errc <- c.Close() + }() + + c, err := net.Dial("tcp4", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv4.NewConn(c)) + + if err := <-errc; err != nil { + t.Errorf("server: %v", err) + } +} + +var packetConnUnicastSocketOptionTests = []struct { + net, proto, addr string +}{ + {"udp4", "", "127.0.0.1:0"}, + {"ip4", ":icmp", "127.0.0.1"}, +} + +func TestPacketConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnUnicastSocketOptionTests { + if tt.net == "ip4" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv4.NewPacketConn(c)) + } +} + +func TestRawConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip4", net.FlagUp|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + c, err := net.ListenPacket("ip4:icmp", "127.0.0.1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + r, err := ipv4.NewRawConn(c) + if err != nil { + t.Fatal(err) + } + + testUnicastSocketOptions(t, r) +} + +type testIPv4UnicastConn interface { + TOS() (int, error) + SetTOS(int) error + TTL() (int, error) + SetTTL(int) error +} + +func testUnicastSocketOptions(t *testing.T, c testIPv4UnicastConn) { + tos := iana.DiffServCS0 | iana.NotECNTransport + switch runtime.GOOS { + case "windows": + // IP_TOS option is supported on Windows 8 and beyond. + t.Skipf("not supported on %s", runtime.GOOS) + } + + if err := c.SetTOS(tos); err != nil { + t.Fatal(err) + } + if v, err := c.TOS(); err != nil { + t.Fatal(err) + } else if v != tos { + t.Fatalf("got %v; want %v", v, tos) + } + const ttl = 255 + if err := c.SetTTL(ttl); err != nil { + t.Fatal(err) + } + if v, err := c.TTL(); err != nil { + t.Fatal(err) + } else if v != ttl { + t.Fatalf("got %v; want %v", v, ttl) + } +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/golang.org/x/net/ipv4/zsys_darwin.go new file mode 100644 index 0000000..c07cc88 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -0,0 +1,99 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_STRIPHDR = 0x17 + sysIP_RECVTTL = 0x18 + sysIP_BOUND_IF = 0x19 + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_MULTICAST_IFINDEX = 0x42 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go new file mode 100644 index 0000000..c4365e9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -0,0 +1,31 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x41 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go new file mode 100644 index 0000000..8c4aec9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go new file mode 100644 index 0000000..4b10b7c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go new file mode 100644 index 0000000..4b10b7c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go new file mode 100644 index 0000000..c0260f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go new file mode 100644 index 0000000..c0260f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go new file mode 100644 index 0000000..c0260f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go new file mode 100644 index 0000000..c0260f0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go new file mode 100644 index 0000000..f65bd9a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go new file mode 100644 index 0000000..9c967ea --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go new file mode 100644 index 0000000..fd3624d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x17 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go new file mode 100644 index 0000000..12f36be --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x1e + sysIP_RECVTTL = 0x1f + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/golang.org/x/net/ipv4/zsys_solaris.go new file mode 100644 index 0000000..0a3875c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -0,0 +1,100 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x9 + sysIP_RECVSLLA = 0xa + sysIP_RECVTTL = 0xb + + sysIP_MULTICAST_IF = 0x10 + sysIP_MULTICAST_TTL = 0x11 + sysIP_MULTICAST_LOOP = 0x12 + sysIP_ADD_MEMBERSHIP = 0x13 + sysIP_DROP_MEMBERSHIP = 0x14 + sysIP_BLOCK_SOURCE = 0x15 + sysIP_UNBLOCK_SOURCE = 0x16 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x17 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x18 + sysIP_NEXTHOP = 0x19 + + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + sysIP_DONTFRAG = 0x1b + + sysIP_BOUND_IF = 0x41 + sysIP_UNSPEC_SRC = 0x42 + sysIP_BROADCAST_TTL = 0x43 + sysIP_DHCPINIT_IF = 0x45 + + sysIP_REUSEADDR = 0x104 + sysIP_DONTROUTE = 0x105 + sysIP_BROADCAST = 0x106 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} diff --git a/vendor/golang.org/x/net/ipv6/batch.go b/vendor/golang.org/x/net/ipv6/batch.go new file mode 100644 index 0000000..4f5fe68 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/batch.go @@ -0,0 +1,119 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv6/bpf_test.go b/vendor/golang.org/x/net/ipv6/bpf_test.go new file mode 100644 index 0000000..8253e1f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/bpf_test.go @@ -0,0 +1,96 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + "time" + + "golang.org/x/net/bpf" + "golang.org/x/net/ipv6" +) + +func TestBPF(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + l, err := net.ListenPacket("udp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + + p := ipv6.NewPacketConn(l) + + // This filter accepts UDP packets whose first payload byte is + // even. + prog, err := bpf.Assemble([]bpf.Instruction{ + // Load the first byte of the payload (skipping UDP header). + bpf.LoadAbsolute{Off: 8, Size: 1}, + // Select LSB of the byte. + bpf.ALUOpConstant{Op: bpf.ALUOpAnd, Val: 1}, + // Byte is even? + bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0, SkipFalse: 1}, + // Accept. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + if err != nil { + t.Fatalf("compiling BPF: %s", err) + } + + if err = p.SetBPF(prog); err != nil { + t.Fatalf("attaching filter to Conn: %s", err) + } + + s, err := net.Dial("udp6", l.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + go func() { + for i := byte(0); i < 10; i++ { + s.Write([]byte{i}) + } + }() + + l.SetDeadline(time.Now().Add(2 * time.Second)) + seen := make([]bool, 5) + for { + var b [512]byte + n, _, err := l.ReadFrom(b[:]) + if err != nil { + t.Fatalf("reading from listener: %s", err) + } + if n != 1 { + t.Fatalf("unexpected packet length, want 1, got %d", n) + } + if b[0] >= 10 { + t.Fatalf("unexpected byte, want 0-9, got %d", b[0]) + } + if b[0]%2 != 0 { + t.Fatalf("got odd byte %d, wanted only even bytes", b[0]) + } + seen[b[0]/2] = true + + seenAll := true + for _, v := range seen { + if !v { + seenAll = false + break + } + } + if seenAll { + break + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/control.go b/vendor/golang.org/x/net/ipv6/control.go new file mode 100644 index 0000000..2da6444 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the +// former still support RFC 2292 only. Please be aware that almost +// all protocol implementations prohibit using a combination of RFC +// 2292 and RFC 3542 for some practical reasons. + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +// A ControlFlags represents per packet basis IP-level socket option +// control flags. +type ControlFlags uint + +const ( + FlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet + FlagHopLimit // pass the hop limit on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet + FlagPathMTU // pass the path MTU on the received packet path +) + +const flagPacketInfo = FlagDst | FlagInterface + +// A ControlMessage represents per packet basis IP-level socket +// options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn allows to send the options to the + // protocol stack. + // + TrafficClass int // traffic class, must be 1 <= value <= 255 when specifying + HopLimit int // hop limit, must be 1 <= value <= 255 when specifying + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying + NextHop net.IP // next hop address, specifying only + MTU int // path MTU, receiving only +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var l int + tclass := false + if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { + tclass = true + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + hoplimit := false + if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { + hoplimit = true + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { + pktinfo = true + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + nexthop := false + if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { + nexthop = true + l += socket.ControlMessageSpace(ctlOpts[ctlNextHop].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + bb := b + if tclass { + bb = ctlOpts[ctlTrafficClass].marshal(bb, cm) + } + if hoplimit { + bb = ctlOpts[ctlHopLimit].marshal(bb, cm) + } + if pktinfo { + bb = ctlOpts[ctlPacketInfo].marshal(bb, cm) + } + if nexthop { + bb = ctlOpts[ctlNextHop].marshal(bb, cm) + } + } + return b +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIPv6 { + continue + } + switch { + case typ == ctlOpts[ctlTrafficClass].name && l >= ctlOpts[ctlTrafficClass].length: + ctlOpts[ctlTrafficClass].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlHopLimit].name && l >= ctlOpts[ctlHopLimit].length: + ctlOpts[ctlHopLimit].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPathMTU].name && l >= ctlOpts[ctlPathMTU].length: + ctlOpts[ctlPathMTU].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPathMTU].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTrafficClass = iota // header field + ctlHopLimit // header field + ctlPacketInfo // inbound or outbound packet path + ctlNextHop // nexthop + ctlPathMTU // path mtu + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go new file mode 100644 index 0000000..9fd9eb1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func marshal2292NextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go new file mode 100644 index 0000000..eec529c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_TCLASS, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.TrafficClass)) + } + return m.Next(4) +} + +func parseTrafficClass(cm *ControlMessage, b []byte) { + cm.TrafficClass = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalHopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func parseHopLimit(cm *ControlMessage, b []byte) { + cm.HopLimit = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inet6Pktinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, pi.Addr[:]) + cm.IfIndex = int(pi.Ifindex) +} + +func marshalNextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} + +func parseNextHop(cm *ControlMessage, b []byte) { +} + +func marshalPathMTU(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PATHMTU, sizeofIPv6Mtuinfo) + return m.Next(sizeofIPv6Mtuinfo) +} + +func parsePathMTU(cm *ControlMessage, b []byte) { + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, mi.Addr.Addr[:]) + cm.IfIndex = int(mi.Addr.Scope_id) + cm.MTU = int(mi.Mtu) +} diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go new file mode 100644 index 0000000..a045f28 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/control_test.go b/vendor/golang.org/x/net/ipv6/control_test.go new file mode 100644 index 0000000..c186ca9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_test.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "testing" + + "golang.org/x/net/ipv6" +) + +func TestControlMessageParseWithFuzz(t *testing.T) { + var cm ipv6.ControlMessage + for _, fuzz := range []string{ + "\f\x00\x00\x00)\x00\x00\x00.\x00\x00\x00", + "\f\x00\x00\x00)\x00\x00\x00,\x00\x00\x00", + } { + cm.Parse([]byte(fuzz)) + } +} diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go new file mode 100644 index 0000000..6651506 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTrafficClass]; ok && cf&FlagTrafficClass != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTrafficClass) + } else { + opt.clear(FlagTrafficClass) + } + } + if so, ok := sockOpts[ssoReceiveHopLimit]; ok && cf&FlagHopLimit != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagHopLimit) + } else { + opt.clear(FlagHopLimit) + } + } + if so, ok := sockOpts[ssoReceivePacketInfo]; ok && cf&flagPacketInfo != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & flagPacketInfo) + } else { + opt.clear(cf & flagPacketInfo) + } + } + if so, ok := sockOpts[ssoReceivePathMTU]; ok && cf&FlagPathMTU != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagPathMTU) + } else { + opt.clear(FlagPathMTU) + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/golang.org/x/net/ipv6/control_windows.go new file mode 100644 index 0000000..ef2563b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_windows.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "syscall" + + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} diff --git a/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/golang.org/x/net/ipv6/defs_darwin.go new file mode 100644 index 0000000..55ddc11 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_darwin.go @@ -0,0 +1,112 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#define __APPLE_USE_RFC_3542 +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go new file mode 100644 index 0000000..a4c383a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/golang.org/x/net/ipv6/defs_freebsd.go new file mode 100644 index 0000000..53e6253 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_freebsd.go @@ -0,0 +1,105 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_BINDANY = C.IPV6_BINDANY + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/golang.org/x/net/ipv6/defs_linux.go new file mode 100644 index 0000000..3308cb2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_linux.go @@ -0,0 +1,147 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIPV6_ADDRFORM = C.IPV6_ADDRFORM + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_FLOWINFO = C.IPV6_FLOWINFO + + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP + sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT + sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER + sysIPV6_MTU = C.IPV6_MTU + sysIPV6_RECVERR = C.IPV6_RECVERR + sysIPV6_V6ONLY = C.IPV6_V6ONLY + sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST + sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST + + //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT + //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT + //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO + //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE + //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE + //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT + + sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR + sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES + + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + + sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT + + sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR + sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR + sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT + sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF + + sysICMPV6_FILTER = C.ICMPV6_FILTER + + sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK + sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS + sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS + sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6FlowlabelReq C.struct_in6_flowlabel_req + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/golang.org/x/net/ipv6/defs_netbsd.go new file mode 100644 index 0000000..be9ceb9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_netbsd.go @@ -0,0 +1,80 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/golang.org/x/net/ipv6/defs_openbsd.go new file mode 100644 index 0000000..177ddf8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_openbsd.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL + sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL + sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL + sysIPSEC6_OUTSA = C.IPSEC6_OUTSA + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_PIPEX = C.IPV6_PIPEX + + sysIPV6_RTABLE = C.IPV6_RTABLE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/golang.org/x/net/ipv6/defs_solaris.go new file mode 100644 index 0000000..0f8ce2b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_solaris.go @@ -0,0 +1,114 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + + sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_SEC_OPT = C.IPV6_SEC_OPT + sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + + sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK + sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT + sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK + sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT + sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK + sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT + + sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK + + sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC + + sysICMP6_FILTER = C.ICMP6_FILTER + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go new file mode 100644 index 0000000..703dafe --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -0,0 +1,302 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/bpf" +) + +// MulticastHopLimit returns the hop limit field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastHopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetMulticastHopLimit sets the hop limit field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, hoplim) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// Checksum reports whether the kernel will compute, store or verify a +// checksum for both incoming and outgoing packets. If on is true, it +// returns an offset in bytes into the data of where the checksum +// field is located. +func (c *dgramOpt) Checksum() (on bool, offset int, err error) { + if !c.ok() { + return false, 0, syscall.EINVAL + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return false, 0, errOpNoSupport + } + offset, err = so.GetInt(c.Conn) + if err != nil { + return false, 0, err + } + if offset < 0 { + return false, 0, nil + } + return true, offset, nil +} + +// SetChecksum enables the kernel checksum processing. If on is ture, +// the offset should be an offset in bytes into the data of where the +// checksum field is located. +func (c *dgramOpt) SetChecksum(on bool, offset int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return errOpNoSupport + } + if !on { + offset = -1 + } + return so.SetInt(c.Conn, offset) +} + +// ICMPFilter returns an ICMP filter. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go new file mode 100644 index 0000000..664a97d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -0,0 +1,243 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv6 implements IP-level socket options for the Internet +// Protocol version 6. +// +// The package provides IP-level socket options that allow +// manipulation of IPv6 facilities. +// +// The IPv6 protocol is defined in RFC 8200. +// Socket interface extensions are defined in RFC 3493, RFC 3542 and +// RFC 3678. +// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810. +// Source-specific multicast is defined in RFC 4607. +// +// On Darwin, this package requires OS X Mavericks version 10.9 or +// above, or equivalent. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv6 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the traffic class field on the IPv6 +// header for each packet. +// +// ln, err := net.Listen("tcp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv6 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.ParseIP("ff02::114") +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv6 and Ethernet. +// +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, rcm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if rcm.Dst.IsMulticast() { +// if rcm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTrafficClass(0x0) +// p.SetHopLimit(16) +// if _, err := p.WriteTo(data[:n], nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// wcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1} +// for _, ifi := range []*net.Interface{en0, en1} { +// wcm.IfIndex = ifi.Index +// if _, err := p.WriteTo(data[:n], &wcm, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn may join multiple multicast +// groups. For example, a UDP listener with port 1024 might join two +// different groups across over two different network interfaces by +// using: +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv6.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// p2 := ipv6.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff01::114")}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn on MLDv2 supported platform is +// able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.ParseIP("ff32::8000:9")} +// ssmsource := net.UDPAddr{IP: net.ParseIP("fe80::cafe")} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.ParseIP("fe80::dead")} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on MLDv2 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// MLDv1 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv6 // import "golang.org/x/net/ipv6" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go new file mode 100644 index 0000000..0624c17 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -0,0 +1,128 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn are not +// implemented. + +// A Conn represents a network endpoint that uses IPv6 transport. +// It allows to set basic IP-level socket options such as traffic +// class and hop limit. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// PathMTU returns a path MTU value for the destination associated +// with the endpoint. +func (c *Conn) PathMTU() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoPathMTU] + if !ok { + return 0, errOpNoSupport + } + _, mtu, err := so.getMTUInfo(c.Conn) + if err != nil { + return 0, err + } + return mtu, nil +} + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses IPv6 +// transport. It is used to control several IP-level socket options +// including IPv6 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv6 and higher layer +// protocols such as OSPF, GRE, and UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage allows to receive the per packet basis IP-level +// socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + return &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } +} diff --git a/vendor/golang.org/x/net/ipv6/example_test.go b/vendor/golang.org/x/net/ipv6/example_test.go new file mode 100644 index 0000000..e761aa2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/example_test.go @@ -0,0 +1,216 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "log" + "net" + "os" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/ipv6" +) + +func ExampleConn_markingTCP() { + ln, err := net.Listen("tcp", "[::]:1024") + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + for { + c, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + if c.RemoteAddr().(*net.TCPAddr).IP.To16() != nil && c.RemoteAddr().(*net.TCPAddr).IP.To4() == nil { + p := ipv6.NewConn(c) + if err := p.SetTrafficClass(0x28); err != nil { // DSCP AF11 + log.Fatal(err) + } + if err := p.SetHopLimit(128); err != nil { + log.Fatal(err) + } + } + if _, err := c.Write([]byte("HELLO-R-U-THERE-ACK")); err != nil { + log.Fatal(err) + } + }(c) + } +} + +func ExamplePacketConn_servingOneShotMulticastDNS() { + c, err := net.ListenPacket("udp6", "[::]:5353") // mDNS over UDP + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + mDNSLinkLocal := net.UDPAddr{IP: net.ParseIP("ff02::fb")} + if err := p.JoinGroup(en0, &mDNSLinkLocal); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &mDNSLinkLocal) + if err := p.SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { + log.Fatal(err) + } + + var wcm ipv6.ControlMessage + b := make([]byte, 1500) + for { + _, rcm, peer, err := p.ReadFrom(b) + if err != nil { + log.Fatal(err) + } + if !rcm.Dst.IsMulticast() || !rcm.Dst.Equal(mDNSLinkLocal.IP) { + continue + } + wcm.IfIndex = rcm.IfIndex + answers := []byte("FAKE-MDNS-ANSWERS") // fake mDNS answers, you need to implement this + if _, err := p.WriteTo(answers, &wcm, peer); err != nil { + log.Fatal(err) + } + } +} + +func ExamplePacketConn_tracingIPPacketRoute() { + // Tracing an IP packet route to www.google.com. + + const host = "www.google.com" + ips, err := net.LookupIP(host) + if err != nil { + log.Fatal(err) + } + var dst net.IPAddr + for _, ip := range ips { + if ip.To16() != nil && ip.To4() == nil { + dst.IP = ip + fmt.Printf("using %v for tracing an IP packet route to %s\n", dst.IP, host) + break + } + } + if dst.IP == nil { + log.Fatal("no AAAA record found") + } + + c, err := net.ListenPacket("ip6:58", "::") // ICMP for IPv6 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + if err := p.SetControlMessage(ipv6.FlagHopLimit|ipv6.FlagSrc|ipv6.FlagDst|ipv6.FlagInterface, true); err != nil { + log.Fatal(err) + } + wm := icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + } + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + log.Fatal(err) + } + + var wcm ipv6.ControlMessage + rb := make([]byte, 1500) + for i := 1; i <= 64; i++ { // up to 64 hops + wm.Body.(*icmp.Echo).Seq = i + wb, err := wm.Marshal(nil) + if err != nil { + log.Fatal(err) + } + + // In the real world usually there are several + // multiple traffic-engineered paths for each hop. + // You may need to probe a few times to each hop. + begin := time.Now() + wcm.HopLimit = i + if _, err := p.WriteTo(wb, &wcm, &dst); err != nil { + log.Fatal(err) + } + if err := p.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + log.Fatal(err) + } + n, rcm, peer, err := p.ReadFrom(rb) + if err != nil { + if err, ok := err.(net.Error); ok && err.Timeout() { + fmt.Printf("%v\t*\n", i) + continue + } + log.Fatal(err) + } + rm, err := icmp.ParseMessage(58, rb[:n]) + if err != nil { + log.Fatal(err) + } + rtt := time.Since(begin) + + // In the real world you need to determine whether the + // received message is yours using ControlMessage.Src, + // ControlMesage.Dst, icmp.Echo.ID and icmp.Echo.Seq. + switch rm.Type { + case ipv6.ICMPTypeTimeExceeded: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) + case ipv6.ICMPTypeEchoReply: + names, _ := net.LookupAddr(peer.String()) + fmt.Printf("%d\t%v %+v %v\n\t%+v\n", i, peer, names, rtt, rcm) + return + } + } +} + +func ExamplePacketConn_advertisingOSPFHello() { + c, err := net.ListenPacket("ip6:89", "::") // OSPF for IPv6 + if err != nil { + log.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + + en0, err := net.InterfaceByName("en0") + if err != nil { + log.Fatal(err) + } + allSPFRouters := net.IPAddr{IP: net.ParseIP("ff02::5")} + if err := p.JoinGroup(en0, &allSPFRouters); err != nil { + log.Fatal(err) + } + defer p.LeaveGroup(en0, &allSPFRouters) + + hello := make([]byte, 24) // fake hello data, you need to implement this + ospf := make([]byte, 16) // fake ospf header, you need to implement this + ospf[0] = 3 // version 3 + ospf[1] = 1 // hello packet + ospf = append(ospf, hello...) + if err := p.SetChecksum(true, 12); err != nil { + log.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: 0xc0, // DSCP CS6 + HopLimit: 1, + IfIndex: en0.Index, + } + if _, err := p.WriteTo(ospf, &cm, &allSPFRouters); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go new file mode 100644 index 0000000..41886ec --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/gen.go @@ -0,0 +1,199 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "http://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", + parseICMPv6Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "package ipv6\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv6Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv6Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigName) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv6Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv6ParamRecord struct { + OrigName string + Name string + Value int +} + +func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Name, "Reserved") || + strings.Contains(pr.Name, "Unassigned") || + strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "Experiment") || + strings.Contains(pr.Name, "experiment") { + continue + } + ss := strings.Split(pr.Name, "\n") + if len(ss) > 1 { + prs[i].Name = strings.Join(ss, " ") + } else { + prs[i].Name = ss[0] + } + s := strings.TrimSpace(prs[i].Name) + prs[i].OrigName = s + prs[i].Name = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv6/genericopt.go b/vendor/golang.org/x/net/ipv6/genericopt.go new file mode 100644 index 0000000..e9dbc2e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/genericopt.go @@ -0,0 +1,58 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "syscall" + +// TrafficClass returns the traffic class field value for outgoing +// packets. +func (c *genericOpt) TrafficClass() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTrafficClass sets the traffic class field value for future +// outgoing packets. +func (c *genericOpt) SetTrafficClass(tclass int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, tclass) +} + +// HopLimit returns the hop limit field value for outgoing packets. +func (c *genericOpt) HopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetHopLimit sets the hop limit field value for future outgoing +// packets. +func (c *genericOpt) SetHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, hoplim) +} diff --git a/vendor/golang.org/x/net/ipv6/header.go b/vendor/golang.org/x/net/ipv6/header.go new file mode 100644 index 0000000..e05cb08 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + "fmt" + "net" +) + +const ( + Version = 6 // protocol version + HeaderLen = 40 // header length +) + +// A Header represents an IPv6 base header. +type Header struct { + Version int // protocol version + TrafficClass int // traffic class + FlowLabel int // flow label + PayloadLen int // payload length + NextHeader int // next header + HopLimit int // hop limit + Src net.IP // source address + Dst net.IP // destination address +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst) +} + +// ParseHeader parses b as an IPv6 base header. +func ParseHeader(b []byte) (*Header, error) { + if len(b) < HeaderLen { + return nil, errHeaderTooShort + } + h := &Header{ + Version: int(b[0]) >> 4, + TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4, + FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]), + PayloadLen: int(binary.BigEndian.Uint16(b[4:6])), + NextHeader: int(b[6]), + HopLimit: int(b[7]), + } + h.Src = make(net.IP, net.IPv6len) + copy(h.Src, b[8:24]) + h.Dst = make(net.IP, net.IPv6len) + copy(h.Dst, b[24:40]) + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv6/header_test.go b/vendor/golang.org/x/net/ipv6/header_test.go new file mode 100644 index 0000000..ca11dc2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header_test.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "reflect" + "strings" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv6" +) + +var ( + wireHeaderFromKernel = [ipv6.HeaderLen]byte{ + 0x69, 0x8b, 0xee, 0xf1, + 0xca, 0xfe, 0x2c, 0x01, + 0x20, 0x01, 0x0d, 0xb8, + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, + 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + } + + testHeader = &ipv6.Header{ + Version: ipv6.Version, + TrafficClass: iana.DiffServAF43, + FlowLabel: 0xbeef1, + PayloadLen: 0xcafe, + NextHeader: iana.ProtocolIPv6Frag, + HopLimit: 1, + Src: net.ParseIP("2001:db8:1::1"), + Dst: net.ParseIP("2001:db8:2::1"), + } +) + +func TestParseHeader(t *testing.T) { + h, err := ipv6.ParseHeader(wireHeaderFromKernel[:]) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, testHeader) { + t.Fatalf("got %#v; want %#v", h, testHeader) + } + s := h.String() + if strings.Contains(s, ",") { + t.Fatalf("should be space-separated values: %s", s) + } +} diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go new file mode 100644 index 0000000..2597401 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "errors" + "net" +) + +var ( + errMissingAddress = errors.New("missing address") + errHeaderTooShort = errors.New("header too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP16(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go new file mode 100644 index 0000000..3c6214f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/iana.go @@ -0,0 +1,82 @@ +// go generate gen.go +// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package ipv6 + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +const ( + ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable + ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big + ICMPTypeTimeExceeded ICMPType = 3 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 4 // Parameter Problem + ICMPTypeEchoRequest ICMPType = 128 // Echo Request + ICMPTypeEchoReply ICMPType = 129 // Echo Reply + ICMPTypeMulticastListenerQuery ICMPType = 130 // Multicast Listener Query + ICMPTypeMulticastListenerReport ICMPType = 131 // Multicast Listener Report + ICMPTypeMulticastListenerDone ICMPType = 132 // Multicast Listener Done + ICMPTypeRouterSolicitation ICMPType = 133 // Router Solicitation + ICMPTypeRouterAdvertisement ICMPType = 134 // Router Advertisement + ICMPTypeNeighborSolicitation ICMPType = 135 // Neighbor Solicitation + ICMPTypeNeighborAdvertisement ICMPType = 136 // Neighbor Advertisement + ICMPTypeRedirect ICMPType = 137 // Redirect Message + ICMPTypeRouterRenumbering ICMPType = 138 // Router Renumbering + ICMPTypeNodeInformationQuery ICMPType = 139 // ICMP Node Information Query + ICMPTypeNodeInformationResponse ICMPType = 140 // ICMP Node Information Response + ICMPTypeInverseNeighborDiscoverySolicitation ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message + ICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message + ICMPTypeVersion2MulticastListenerReport ICMPType = 143 // Version 2 Multicast Listener Report + ICMPTypeHomeAgentAddressDiscoveryRequest ICMPType = 144 // Home Agent Address Discovery Request Message + ICMPTypeHomeAgentAddressDiscoveryReply ICMPType = 145 // Home Agent Address Discovery Reply Message + ICMPTypeMobilePrefixSolicitation ICMPType = 146 // Mobile Prefix Solicitation + ICMPTypeMobilePrefixAdvertisement ICMPType = 147 // Mobile Prefix Advertisement + ICMPTypeCertificationPathSolicitation ICMPType = 148 // Certification Path Solicitation Message + ICMPTypeCertificationPathAdvertisement ICMPType = 149 // Certification Path Advertisement Message + ICMPTypeMulticastRouterAdvertisement ICMPType = 151 // Multicast Router Advertisement + ICMPTypeMulticastRouterSolicitation ICMPType = 152 // Multicast Router Solicitation + ICMPTypeMulticastRouterTermination ICMPType = 153 // Multicast Router Termination + ICMPTypeFMIPv6 ICMPType = 154 // FMIPv6 Messages + ICMPTypeRPLControl ICMPType = 155 // RPL Control Message + ICMPTypeILNPv6LocatorUpdate ICMPType = 156 // ILNPv6 Locator Update Message + ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request + ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation + ICMPTypeMPLControl ICMPType = 159 // MPL Control Message +) + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +var icmpTypes = map[ICMPType]string{ + 1: "destination unreachable", + 2: "packet too big", + 3: "time exceeded", + 4: "parameter problem", + 128: "echo request", + 129: "echo reply", + 130: "multicast listener query", + 131: "multicast listener report", + 132: "multicast listener done", + 133: "router solicitation", + 134: "router advertisement", + 135: "neighbor solicitation", + 136: "neighbor advertisement", + 137: "redirect message", + 138: "router renumbering", + 139: "icmp node information query", + 140: "icmp node information response", + 141: "inverse neighbor discovery solicitation message", + 142: "inverse neighbor discovery advertisement message", + 143: "version 2 multicast listener report", + 144: "home agent address discovery request message", + 145: "home agent address discovery reply message", + 146: "mobile prefix solicitation", + 147: "mobile prefix advertisement", + 148: "certification path solicitation message", + 149: "certification path advertisement message", + 151: "multicast router advertisement", + 152: "multicast router solicitation", + 153: "multicast router termination", + 154: "fmipv6 messages", + 155: "rpl control message", + 156: "ilnpv6 locator update message", + 157: "duplicate address request", + 158: "duplicate address confirmation", + 159: "mpl control message", +} diff --git a/vendor/golang.org/x/net/ipv6/icmp.go b/vendor/golang.org/x/net/ipv6/icmp.go new file mode 100644 index 0000000..b7f48e2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/iana" + +// BUG(mikio): On Windows, methods related to ICMPFilter are not +// implemented. + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv6 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolIPv6ICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model. A node means a +// device that implements IP. A router means a node that forwards IP +// packets not explicitly addressed to itself, and a host means a node +// that is not a router. +type ICMPFilter struct { + icmpv6Filter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go new file mode 100644 index 0000000..e1a791d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Filt { + if block { + f.Filt[i] = 0 + } else { + f.Filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_linux.go b/vendor/golang.org/x/net/ipv6/icmp_linux.go new file mode 100644 index 0000000..647f6b4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_linux.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Data[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Data[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Data { + if block { + f.Data[i] = 1<<32 - 1 + } else { + f.Data[i] = 0 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/vendor/golang.org/x/net/ipv6/icmp_solaris.go new file mode 100644 index 0000000..7c23bb1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_solaris.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.X__icmp6_filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.X__icmp6_filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.X__icmp6_filt { + if block { + f.X__icmp6_filt[i] = 0 + } else { + f.X__icmp6_filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.X__icmp6_filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go new file mode 100644 index 0000000..c4b9be6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +type icmpv6Filter struct { +} + +func (f *icmpv6Filter) accept(typ ICMPType) { +} + +func (f *icmpv6Filter) block(typ ICMPType) { +} + +func (f *icmpv6Filter) setAll(block bool) { +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_test.go b/vendor/golang.org/x/net/ipv6/icmp_test.go new file mode 100644 index 0000000..d8e9675 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_test.go @@ -0,0 +1,96 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "reflect" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var icmpStringTests = []struct { + in ipv6.ICMPType + out string +}{ + {ipv6.ICMPTypeDestinationUnreachable, "destination unreachable"}, + + {256, ""}, +} + +func TestICMPString(t *testing.T) { + for _, tt := range icmpStringTests { + s := tt.in.String() + if s != tt.out { + t.Errorf("got %s; want %s", s, tt.out) + } + } +} + +func TestICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + var f ipv6.ICMPFilter + for _, toggle := range []bool{false, true} { + f.SetAll(toggle) + for _, typ := range []ipv6.ICMPType{ + ipv6.ICMPTypeDestinationUnreachable, + ipv6.ICMPTypeEchoReply, + ipv6.ICMPTypeNeighborSolicitation, + ipv6.ICMPTypeDuplicateAddressConfirmation, + } { + f.Accept(typ) + if f.WillBlock(typ) { + t.Errorf("ipv6.ICMPFilter.Set(%v, false) failed", typ) + } + f.Block(typ) + if !f.WillBlock(typ) { + t.Errorf("ipv6.ICMPFilter.Set(%v, true) failed", typ) + } + } + } +} + +func TestSetICMPFilter(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoRequest) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + kf, err := p.ICMPFilter() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(kf, &f) { + t.Fatalf("got %#v; want %#v", kf, f) + } +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_windows.go b/vendor/golang.org/x/net/ipv6/icmp_windows.go new file mode 100644 index 0000000..443cd07 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_windows.go @@ -0,0 +1,22 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) block(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) setAll(block bool) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + // TODO(mikio): implement this + return false +} diff --git a/vendor/golang.org/x/net/ipv6/mocktransponder_test.go b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go new file mode 100644 index 0000000..6efe56c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/mocktransponder_test.go @@ -0,0 +1,32 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "testing" +) + +func connector(t *testing.T, network, addr string, done chan<- bool) { + defer func() { done <- true }() + + c, err := net.Dial(network, addr) + if err != nil { + t.Error(err) + return + } + c.Close() +} + +func acceptor(t *testing.T, ln net.Listener, done chan<- bool) { + defer func() { done <- true }() + + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + c.Close() +} diff --git a/vendor/golang.org/x/net/ipv6/multicast_test.go b/vendor/golang.org/x/net/ipv6/multicast_test.go new file mode 100644 index 0000000..69a21cd --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicast_test.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var packetConnReadWriteMulticastUDPTests = []struct { + addr string + grp, src *net.UDPAddr +}{ + {"[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + + {"[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if !nettest.SupportsIPv6MulticastDeliveryOnLoopback() { + t.Skipf("multicast delivery doesn't work correctly on %s", runtime.GOOS) + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastUDPTests { + c, err := net.ListenPacket("udp6", tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + grp := *tt.grp + grp.Port = c.LocalAddr().(*net.UDPAddr).Port + p := ipv6.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, &grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, &grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, &grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, &grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + IfIndex: ifi.Index, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + cm.HopLimit = i + 1 + if n, err := p.WriteTo(wb, &cm, &grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatal(err) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } + } +} + +var packetConnReadWriteMulticastICMPTests = []struct { + grp, src *net.IPAddr +}{ + {&net.IPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + + {&net.IPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnReadWriteMulticastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if !nettest.SupportsIPv6MulticastDeliveryOnLoopback() { + t.Skipf("multicast delivery doesn't work correctly on %s", runtime.GOOS) + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + for _, tt := range packetConnReadWriteMulticastICMPTests { + c, err := net.ListenPacket("ip6:ipv6-icmp", "::") + if err != nil { + t.Fatal(err) + } + defer c.Close() + + pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, tt.grp.IP) + p := ipv6.NewPacketConn(c) + defer p.Close() + if tt.src == nil { + if err := p.JoinGroup(ifi, tt.grp); err != nil { + t.Fatal(err) + } + defer p.LeaveGroup(ifi, tt.grp) + } else { + if err := p.JoinSourceSpecificGroup(ifi, tt.grp, tt.src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + defer p.LeaveSourceSpecificGroup(ifi, tt.grp, tt.src) + } + if err := p.SetMulticastInterface(ifi); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastInterface(); err != nil { + t.Fatal(err) + } + if err := p.SetMulticastLoopback(true); err != nil { + t.Fatal(err) + } + if _, err := p.MulticastLoopback(); err != nil { + t.Fatal(err) + } + + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + IfIndex: ifi.Index, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + + var psh []byte + for i, toggle := range []bool{true, false, true} { + if toggle { + psh = nil + if err := p.SetChecksum(true, 2); err != nil { + // Solaris never allows to + // modify ICMP properties. + if runtime.GOOS != "solaris" { + t.Fatal(err) + } + } + } else { + psh = pshicmp + // Some platforms never allow to + // disable the kernel checksum + // processing. + p.SetChecksum(false, -1) + } + wb, err := (&icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(psh) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + if err := p.SetDeadline(time.Now().Add(200 * time.Millisecond)); err != nil { + t.Fatal(err) + } + cm.HopLimit = i + 1 + if n, err := p.WriteTo(wb, &cm, tt.grp); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { + t.Fatal(err) + } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) + } + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/multicastlistener_test.go b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go new file mode 100644 index 0000000..b27713e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicastlistener_test.go @@ -0,0 +1,261 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var udpMultipleGroupListenerTests = []net.Addr{ + &net.UDPAddr{IP: net.ParseIP("ff02::114")}, // see RFC 4727 + &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}, + &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}, +} + +func TestUDPSinglePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c, err := net.ListenPacket("udp6", "[::]:0") // wildcard address with non-reusable port + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } +} + +func TestUDPMultiplePacketConnWithMultipleGroupListeners(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + for _, gaddr := range udpMultipleGroupListenerTests { + c1, err := net.ListenPacket("udp6", "[ff02::]:0") // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c1.Close() + _, port, err := net.SplitHostPort(c1.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + c2, err := net.ListenPacket("udp6", net.JoinHostPort("ff02::", port)) // wildcard address with reusable port + if err != nil { + t.Fatal(err) + } + defer c2.Close() + + var ps [2]*ipv6.PacketConn + ps[0] = ipv6.NewPacketConn(c1) + ps[1] = ipv6.NewPacketConn(c2) + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + for _, p := range ps { + if err := p.JoinGroup(&ifi, gaddr); err != nil { + t.Fatal(err) + } + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + for _, p := range ps { + if err := p.LeaveGroup(ifi, gaddr); err != nil { + t.Fatal(err) + } + } + } + } +} + +func TestUDPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + type ml struct { + c *ipv6.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + port := "0" + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip6", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("udp6", net.JoinHostPort(ip.String()+"%"+ifi.Name, port)) // unicast address with non-reusable port + if err != nil { + // The listen may fail when the serivce is + // already in use, but it's fine because the + // purpose of this is not to test the + // bookkeeping of IP control block inside the + // kernel. + t.Log(err) + continue + } + defer c.Close() + if port == "0" { + _, port, err = net.SplitHostPort(c.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + } + p := ipv6.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::") // wildcard address + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + var mift []*net.Interface + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + if _, ok := nettest.IsMulticastCapable("ip6", &ifi); !ok { + continue + } + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mift = append(mift, &ift[i]) + } + for _, ifi := range mift { + if err := p.LeaveGroup(ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} + +func TestIPPerInterfaceSinglePacketConnWithSingleGroupListener(t *testing.T) { + switch runtime.GOOS { + case "darwin", "dragonfly", "openbsd": // platforms that return fe80::1%lo0: bind: can't assign requested address + t.Skipf("not supported on %s", runtime.GOOS) + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + gaddr := net.IPAddr{IP: net.ParseIP("ff02::114")} // see RFC 4727 + type ml struct { + c *ipv6.PacketConn + ifi *net.Interface + } + var mlt []*ml + + ift, err := net.Interfaces() + if err != nil { + t.Fatal(err) + } + for i, ifi := range ift { + ip, ok := nettest.IsMulticastCapable("ip6", &ifi) + if !ok { + continue + } + c, err := net.ListenPacket("ip6:ipv6-icmp", ip.String()+"%"+ifi.Name) // unicast address + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + if err := p.JoinGroup(&ifi, &gaddr); err != nil { + t.Fatal(err) + } + mlt = append(mlt, &ml{p, &ift[i]}) + } + for _, m := range mlt { + if err := m.c.LeaveGroup(m.ifi, &gaddr); err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go new file mode 100644 index 0000000..9e6b902 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/multicastsockopt_test.go @@ -0,0 +1,157 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var packetConnMulticastSocketOptionTests = []struct { + net, proto, addr string + grp, src net.Addr +}{ + {"udp6", "", "[ff02::]:0", &net.UDPAddr{IP: net.ParseIP("ff02::114")}, nil}, // see RFC 4727 + {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff02::115")}, nil}, // see RFC 4727 + + {"udp6", "", "[ff30::8000:0]:0", &net.UDPAddr{IP: net.ParseIP("ff30::8000:1")}, &net.UDPAddr{IP: net.IPv6loopback}}, // see RFC 5771 + {"ip6", ":ipv6-icmp", "::", &net.IPAddr{IP: net.ParseIP("ff30::8000:2")}, &net.IPAddr{IP: net.IPv6loopback}}, // see RFC 5771 +} + +func TestPacketConnMulticastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagMulticast|net.FlagLoopback) + if ifi == nil { + t.Skipf("not available on %s", runtime.GOOS) + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnMulticastSocketOptionTests { + if tt.net == "ip6" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + if tt.src == nil { + testMulticastSocketOptions(t, p, ifi, tt.grp) + } else { + testSourceSpecificMulticastSocketOptions(t, p, ifi, tt.grp, tt.src) + } + } +} + +type testIPv6MulticastConn interface { + MulticastHopLimit() (int, error) + SetMulticastHopLimit(ttl int) error + MulticastLoopback() (bool, error) + SetMulticastLoopback(bool) error + JoinGroup(*net.Interface, net.Addr) error + LeaveGroup(*net.Interface, net.Addr) error + JoinSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + LeaveSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + ExcludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error + IncludeSourceSpecificGroup(*net.Interface, net.Addr, net.Addr) error +} + +func testMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp net.Addr) { + const hoplim = 255 + if err := c.SetMulticastHopLimit(hoplim); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastHopLimit(); err != nil { + t.Error(err) + return + } else if v != hoplim { + t.Errorf("got %v; want %v", v, hoplim) + return + } + + for _, toggle := range []bool{true, false} { + if err := c.SetMulticastLoopback(toggle); err != nil { + t.Error(err) + return + } + if v, err := c.MulticastLoopback(); err != nil { + t.Error(err) + return + } else if v != toggle { + t.Errorf("got %v; want %v", v, toggle) + return + } + } + + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} + +func testSourceSpecificMulticastSocketOptions(t *testing.T, c testIPv6MulticastConn, ifi *net.Interface, grp, src net.Addr) { + // MCAST_JOIN_GROUP -> MCAST_BLOCK_SOURCE -> MCAST_UNBLOCK_SOURCE -> MCAST_LEAVE_GROUP + if err := c.JoinGroup(ifi, grp); err != nil { + t.Error(err) + return + } + if err := c.ExcludeSourceSpecificGroup(ifi, grp, src); err != nil { + switch runtime.GOOS { + case "freebsd", "linux": + default: // platforms that don't support MLDv2 fail here + t.Logf("not supported on %s", runtime.GOOS) + return + } + t.Error(err) + return + } + if err := c.IncludeSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_SOURCE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + + // MCAST_JOIN_SOURCE_GROUP -> MCAST_LEAVE_GROUP + if err := c.JoinSourceSpecificGroup(ifi, grp, src); err != nil { + t.Error(err) + return + } + if err := c.LeaveGroup(ifi, grp); err != nil { + t.Error(err) + return + } +} diff --git a/vendor/golang.org/x/net/ipv6/payload.go b/vendor/golang.org/x/net/ipv6/payload.go new file mode 100644 index 0000000..a8197f1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv6 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go new file mode 100644 index 0000000..4ee4b06 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -0,0 +1,35 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.writeTo(b, cm, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go new file mode 100644 index 0000000..fdc6c39 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + if n, nn, _, src, err = c.ReadMsgIP(b, oob); err != nil { + return 0, nil, nil, err + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP16(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go new file mode 100644 index 0000000..8f6d02e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go @@ -0,0 +1,57 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP16(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go new file mode 100644 index 0000000..99a4354 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -0,0 +1,41 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go b/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go new file mode 100644 index 0000000..c11d92a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_go1_8_test.go @@ -0,0 +1,242 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv6_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagHopLimit | ipv6.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr()) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr()) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr) { + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go b/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go new file mode 100644 index 0000000..e2fd733 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_go1_9_test.go @@ -0,0 +1,373 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6_test + +import ( + "bytes" + "fmt" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkPacketConnReadWriteUnicast(b *testing.B) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + b.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + bb := make([]byte, 128) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback); ifi != nil { + cm.IfIndex = ifi.Index + } + + b.Run("UDP", func(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagHopLimit | ipv6.FlagInterface + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv6.Message{ + { + Buffers: [][]byte{payload}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv6.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv6.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(payload, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(payload, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) + b.Run("IP", func(b *testing.B) { + switch runtime.GOOS { + case "netbsd": + b.Skip("need to configure gre on netbsd") + case "openbsd": + b.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + dst := c.LocalAddr() + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + wms := []ipv6.Message{ + { + Buffers: [][]byte{datagram}, + Addr: dst, + OOB: cm.Marshal(), + }, + } + rms := []ipv6.Message{ + { + Buffers: [][]byte{bb}, + OOB: ipv6.NewControlMessage(cf), + }, + } + b.Run("Net", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(datagram, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("ToFrom", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(datagram, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(bb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("Batch", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := p.WriteBatch(wms, 0); err != nil { + b.Fatal(err) + } + if _, err := p.ReadBatch(rms, 0); err != nil { + b.Fatal(err) + } + } + }) + }) +} + +func TestPacketConnConcurrentReadWriteUnicast(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + + payload := []byte("HELLO-R-U-THERE") + iph := []byte{ + 0x69, 0x8b, 0xee, 0xf1, 0xca, 0xfe, 0xff, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + } + greh := []byte{0x00, 0x00, 0x86, 0xdd, 0x00, 0x00, 0x00, 0x00} + datagram := append(greh, append(iph, payload...)...) + + t.Run("UDP", func(t *testing.T) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, payload, c.LocalAddr(), true) + }) + }) + t.Run("IP", func(t *testing.T) { + switch runtime.GOOS { + case "netbsd": + t.Skip("need to configure gre on netbsd") + case "openbsd": + t.Skip("net.inet.gre.allow=0 by default on openbsd") + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolGRE), "::1") + if err != nil { + t.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + t.Run("ToFrom", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), false) + }) + t.Run("Batch", func(t *testing.T) { + testPacketConnConcurrentReadWriteUnicast(t, p, datagram, c.LocalAddr(), true) + }) + }) +} + +func testPacketConnConcurrentReadWriteUnicast(t *testing.T, p *ipv6.PacketConn, data []byte, dst net.Addr, batch bool) { + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + b := make([]byte, 128) + n, cm, _, err := p.ReadFrom(b) + if err != nil { + t.Error(err) + return + } + if !bytes.Equal(b[:n], data) { + t.Errorf("got %#v; want %#v", b[:n], data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + batchReader := func() { + defer wg.Done() + ms := []ipv6.Message{ + { + Buffers: [][]byte{make([]byte, 128)}, + OOB: ipv6.NewControlMessage(cf), + }, + } + n, err := p.ReadBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + var cm ipv6.ControlMessage + if err := cm.Parse(ms[0].OOB[:ms[0].NN]); err != nil { + t.Error(err) + return + } + b := ms[0].Buffers[0][:ms[0].N] + if !bytes.Equal(b, data) { + t.Errorf("got %#v; want %#v", b, data) + return + } + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + return + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + n, err := p.WriteTo(data, &cm, dst) + if err != nil { + t.Error(err) + return + } + if n != len(data) { + t.Errorf("got %d; want %d", n, len(data)) + return + } + } + batchWriter := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + ms := []ipv6.Message{ + { + Buffers: [][]byte{data}, + OOB: cm.Marshal(), + Addr: dst, + }, + } + n, err := p.WriteBatch(ms, 0) + if err != nil { + t.Error(err) + return + } + if n != len(ms) { + t.Errorf("got %d; want %d", n, len(ms)) + return + } + if ms[0].N != len(data) { + t.Errorf("got %d; want %d", ms[0].N, len(data)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + if batch { + go batchWriter(i%2 != 0) + } else { + go writer(i%2 != 0) + } + } + wg.Add(N) + for i := 0; i < N; i++ { + if batch { + go batchReader() + } else { + go reader() + } + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/readwrite_test.go b/vendor/golang.org/x/net/ipv6/readwrite_test.go new file mode 100644 index 0000000..206b915 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/readwrite_test.go @@ -0,0 +1,148 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "runtime" + "strings" + "sync" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func BenchmarkReadWriteUnicast(b *testing.B) { + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + b.Skipf("not supported on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + } + defer c.Close() + + dst := c.LocalAddr() + wb, rb := []byte("HELLO-R-U-THERE"), make([]byte, 128) + + b.Run("NetUDP", func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := c.WriteTo(wb, dst); err != nil { + b.Fatal(err) + } + if _, _, err := c.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) + b.Run("IPv6UDP", func(b *testing.B) { + p := ipv6.NewPacketConn(c) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + if err := p.SetControlMessage(cf, true); err != nil { + b.Fatal(err) + } + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + HopLimit: 1, + } + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + for i := 0; i < b.N; i++ { + if _, err := p.WriteTo(wb, &cm, dst); err != nil { + b.Fatal(err) + } + if _, _, _, err := p.ReadFrom(rb); err != nil { + b.Fatal(err) + } + } + }) +} + +func TestPacketConnConcurrentReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + wb := []byte("HELLO-R-U-THERE") + + if err := p.SetControlMessage(cf, true); err != nil { // probe before test + if nettest.ProtocolNotSupported(err) { + t.Skipf("not supported on %s", runtime.GOOS) + } + t.Fatal(err) + } + + var wg sync.WaitGroup + reader := func() { + defer wg.Done() + rb := make([]byte, 128) + if n, cm, _, err := p.ReadFrom(rb); err != nil { + t.Error(err) + return + } else if !bytes.Equal(rb[:n], wb) { + t.Errorf("got %v; want %v", rb[:n], wb) + return + } else { + s := cm.String() + if strings.Contains(s, ",") { + t.Errorf("should be space-separated values: %s", s) + } + } + } + writer := func(toggle bool) { + defer wg.Done() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + if ifi != nil { + cm.IfIndex = ifi.Index + } + if err := p.SetControlMessage(cf, toggle); err != nil { + t.Error(err) + return + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Error(err) + return + } else if n != len(wb) { + t.Errorf("got %d; want %d", n, len(wb)) + return + } + } + + const N = 10 + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Add(2 * N) + for i := 0; i < 2*N; i++ { + go writer(i%2 != 0) + } + wg.Add(N) + for i := 0; i < N; i++ { + go reader() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt.go b/vendor/golang.org/x/net/ipv6/sockopt.go new file mode 100644 index 0000000..cc3907d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTrafficClass = iota // header field for unicast packet, RFC 3542 + ssoHopLimit // header field for unicast packet, RFC 3493 + ssoMulticastInterface // outbound interface for multicast packet, RFC 3493 + ssoMulticastHopLimit // header field for multicast packet, RFC 3493 + ssoMulticastLoopback // loopback for multicast packet, RFC 3493 + ssoReceiveTrafficClass // header field on received packet, RFC 3542 + ssoReceiveHopLimit // header field on received packet, RFC 2292 or 3542 + ssoReceivePacketInfo // incbound or outbound packet path, RFC 2292 or 3542 + ssoReceivePathMTU // path mtu, RFC 3542 + ssoPathMTU // path mtu, RFC 3542 + ssoChecksum // packet checksum, RFC 2292 or 3542 + ssoICMPFilter // icmp filter, RFC 2292 or 3542 + ssoJoinGroup // any-source multicast, RFC 3493 + ssoLeaveGroup // any-source multicast, RFC 3493 + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go new file mode 100644 index 0000000..0eac86e --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -0,0 +1,87 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + n, err := so.GetInt(c) + if err != nil { + return nil, err + } + return net.InterfaceByIndex(n) +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + var n int + if ifi != nil { + n = ifi.Index + } + return so.SetInt(c, n) +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPv6Filter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPv6Filter]byte)(unsafe.Pointer(f))[:sizeofICMPv6Filter] + return so.Set(c, b) +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, 0, err + } + if n != sizeofIPv6Mtuinfo { + return nil, 0, errOpNoSupport + } + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if mi.Addr.Scope_id == 0 { + return nil, int(mi.Mtu), nil + } + ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) + if err != nil { + return nil, 0, err + } + return ifi, int(mi.Mtu), nil +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go new file mode 100644 index 0000000..1f4a273 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -0,0 +1,46 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + return nil, 0, errOpNoSupport +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_test.go b/vendor/golang.org/x/net/ipv6/sockopt_test.go new file mode 100644 index 0000000..774338d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_test.go @@ -0,0 +1,133 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "fmt" + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +var supportsIPv6 bool = nettest.SupportsIPv6() + +func TestConnInitiatorPathMTU(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go acceptor(t, ln, done) + + c, err := net.Dial("tcp6", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_PATHMTU option + t.Logf("not supported on %s", runtime.GOOS) + default: + t.Fatal(err) + } + } else { + t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) + } + + <-done +} + +func TestConnResponderPathMTU(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + done := make(chan bool) + go connector(t, "tcp6", ln.Addr().String(), done) + + c, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if pmtu, err := ipv6.NewConn(c).PathMTU(); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_PATHMTU option + t.Logf("not supported on %s", runtime.GOOS) + default: + t.Fatal(err) + } + } else { + t.Logf("path mtu for %v: %v", c.RemoteAddr(), pmtu) + } + + <-done +} + +func TestPacketConnChecksum(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolOSPFIGP), "::") // OSPF for IPv6 + if err != nil { + t.Fatal(err) + } + defer c.Close() + + p := ipv6.NewPacketConn(c) + offset := 12 // see RFC 5340 + + for _, toggle := range []bool{false, true} { + if err := p.SetChecksum(toggle, offset); err != nil { + if toggle { + t.Fatalf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) + } else { + // Some platforms never allow to disable the kernel + // checksum processing. + t.Logf("ipv6.PacketConn.SetChecksum(%v, %v) failed: %v", toggle, offset, err) + } + } + if on, offset, err := p.Checksum(); err != nil { + t.Fatal(err) + } else { + t.Logf("kernel checksum processing enabled=%v, offset=%v", on, offset) + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/golang.org/x/net/ipv6/sys_asmreq.go new file mode 100644 index 0000000..b0510c0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreq ipv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + b := (*[sizeofIPv6Mreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPv6Mreq] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go new file mode 100644 index 0000000..eece961 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf.go b/vendor/golang.org/x/net/ipv6/sys_bpf.go new file mode 100644 index 0000000..b2dbcb2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go new file mode 100644 index 0000000..676bea5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv6 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go new file mode 100644 index 0000000..e416eaa --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly netbsd openbsd + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_darwin.go b/vendor/golang.org/x/net/ipv6/sys_darwin.go new file mode 100644 index 0000000..e3d0443 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_darwin.go @@ -0,0 +1,106 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "strconv" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlHopLimit: {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_2292PKTINFO, sizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292HOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292PKTINFO, Len: 4}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. + // See http://support.apple.com/kb/HT1633. + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return + } + ctlOpts[ctlTrafficClass] = ctlOpt{sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass} + ctlOpts[ctlHopLimit] = ctlOpt{sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit} + ctlOpts[ctlPacketInfo] = ctlOpt{sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo} + ctlOpts[ctlNextHop] = ctlOpt{sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop} + ctlOpts[ctlPathMTU] = ctlOpt{sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU} + sockOpts[ssoTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}} + sockOpts[ssoReceiveTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}} + sockOpts[ssoReceiveHopLimit] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}} + sockOpts[ssoReceivePacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}} + sockOpts[ssoReceivePathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}} + sockOpts[ssoPathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/golang.org/x/net/ipv6/sys_freebsd.go new file mode 100644 index 0000000..e9349dc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -0,0 +1,92 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_linux.go b/vendor/golang.org/x/net/ipv6/sys_linux.go new file mode 100644 index 0000000..bc21810 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_linux.go @@ -0,0 +1,74 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMPV6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_solaris.go b/vendor/golang.org/x/net/ipv6/sys_solaris.go new file mode 100644 index 0000000..d348b5f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_solaris.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go new file mode 100644 index 0000000..add8ccc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go new file mode 100644 index 0000000..581ee49 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go new file mode 100644 index 0000000..b845388 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv6/sys_windows.go b/vendor/golang.org/x/net/ipv6/sys_windows.go new file mode 100644 index 0000000..fc36b01 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_windows.go @@ -0,0 +1,75 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PKTINFO = 0x13 + + sizeofSockaddrInet6 = 0x1c + + sizeofIPv6Mreq = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofICMPv6Filter = 0 +) + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type icmpv6Filter struct { + // TODO(mikio): implement this +} + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/unicast_test.go b/vendor/golang.org/x/net/ipv6/unicast_test.go new file mode 100644 index 0000000..a0b7d95 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/unicast_test.go @@ -0,0 +1,184 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "bytes" + "net" + "os" + "runtime" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func TestPacketConnReadWriteUnicastUDP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + c, err := nettest.NewLocalPacketListener("udp6") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst := c.LocalAddr() + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + wb := []byte("HELLO-R-U-THERE") + + for i, toggle := range []bool{true, false, true} { + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + cm.HopLimit = i + 1 + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + t.Fatal(err) + } else if !bytes.Equal(rb[:n], wb) { + t.Fatalf("got %v; want %v", rb[:n], wb) + } + } +} + +func TestPacketConnReadWriteUnicastICMP(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + c, err := net.ListenPacket("ip6:ipv6-icmp", "::1") + if err != nil { + t.Fatal(err) + } + defer c.Close() + p := ipv6.NewPacketConn(c) + defer p.Close() + + dst, err := net.ResolveIPAddr("ip6", "::1") + if err != nil { + t.Fatal(err) + } + + pshicmp := icmp.IPv6PseudoHeader(c.LocalAddr().(*net.IPAddr).IP, dst.IP) + cm := ipv6.ControlMessage{ + TrafficClass: iana.DiffServAF11 | iana.CongestionExperienced, + Src: net.IPv6loopback, + } + cf := ipv6.FlagTrafficClass | ipv6.FlagHopLimit | ipv6.FlagSrc | ipv6.FlagDst | ipv6.FlagInterface | ipv6.FlagPathMTU + ifi := nettest.RoutedInterface("ip6", net.FlagUp|net.FlagLoopback) + if ifi != nil { + cm.IfIndex = ifi.Index + } + + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := p.SetICMPFilter(&f); err != nil { + t.Fatal(err) + } + + var psh []byte + for i, toggle := range []bool{true, false, true} { + if toggle { + psh = nil + if err := p.SetChecksum(true, 2); err != nil { + // Solaris never allows to modify + // ICMP properties. + if runtime.GOOS != "solaris" { + t.Fatal(err) + } + } + } else { + psh = pshicmp + // Some platforms never allow to disable the + // kernel checksum processing. + p.SetChecksum(false, -1) + } + wb, err := (&icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: i + 1, + Data: []byte("HELLO-R-U-THERE"), + }, + }).Marshal(psh) + if err != nil { + t.Fatal(err) + } + if err := p.SetControlMessage(cf, toggle); err != nil { + if nettest.ProtocolNotSupported(err) { + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } + cm.HopLimit = i + 1 + if err := p.SetWriteDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, err := p.WriteTo(wb, &cm, dst); err != nil { + t.Fatal(err) + } else if n != len(wb) { + t.Fatalf("got %v; want %v", n, len(wb)) + } + rb := make([]byte, 128) + if err := p.SetReadDeadline(time.Now().Add(100 * time.Millisecond)); err != nil { + t.Fatal(err) + } + if n, _, _, err := p.ReadFrom(rb); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels have some limitation on receiving icmp packet through raw socket + t.Logf("not supported on %s", runtime.GOOS) + continue + } + t.Fatal(err) + } else { + if m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]); err != nil { + t.Fatal(err) + } else if m.Type != ipv6.ICMPTypeEchoReply || m.Code != 0 { + t.Fatalf("got type=%v, code=%v; want type=%v, code=%v", m.Type, m.Code, ipv6.ICMPTypeEchoReply, 0) + } + } + } +} diff --git a/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go new file mode 100644 index 0000000..e175dcc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/unicastsockopt_test.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6_test + +import ( + "net" + "runtime" + "testing" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv6" +) + +func TestConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + ln, err := net.Listen("tcp6", "[::1]:0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + errc := make(chan error, 1) + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + errc <- c.Close() + }() + + c, err := net.Dial("tcp6", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv6.NewConn(c)) + + if err := <-errc; err != nil { + t.Errorf("server: %v", err) + } +} + +var packetConnUnicastSocketOptionTests = []struct { + net, proto, addr string +}{ + {"udp6", "", "[::1]:0"}, + {"ip6", ":ipv6-icmp", "::1"}, +} + +func TestPacketConnUnicastSocketOptions(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + t.Skipf("not supported on %s", runtime.GOOS) + } + if !supportsIPv6 { + t.Skip("ipv6 is not supported") + } + + m, ok := nettest.SupportsRawIPSocket() + for _, tt := range packetConnUnicastSocketOptionTests { + if tt.net == "ip6" && !ok { + t.Log(m) + continue + } + c, err := net.ListenPacket(tt.net+tt.proto, tt.addr) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + testUnicastSocketOptions(t, ipv6.NewPacketConn(c)) + } +} + +type testIPv6UnicastConn interface { + TrafficClass() (int, error) + SetTrafficClass(int) error + HopLimit() (int, error) + SetHopLimit(int) error +} + +func testUnicastSocketOptions(t *testing.T, c testIPv6UnicastConn) { + tclass := iana.DiffServCS0 | iana.NotECNTransport + if err := c.SetTrafficClass(tclass); err != nil { + switch runtime.GOOS { + case "darwin": // older darwin kernels don't support IPV6_TCLASS option + t.Logf("not supported on %s", runtime.GOOS) + goto next + } + t.Fatal(err) + } + if v, err := c.TrafficClass(); err != nil { + t.Fatal(err) + } else if v != tclass { + t.Fatalf("got %v; want %v", v, tclass) + } + +next: + hoplim := 255 + if err := c.SetHopLimit(hoplim); err != nil { + t.Fatal(err) + } + if v, err := c.HopLimit(); err != nil { + t.Fatal(err) + } else if v != hoplim { + t.Fatalf("got %v; want %v", v, hoplim) + } +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/golang.org/x/net/ipv6/zsys_darwin.go new file mode 100644 index 0000000..6aab1df --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -0,0 +1,131 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + sysIPV6_2292PKTINFO = 0x13 + sysIPV6_2292HOPLIMIT = 0x14 + sysIPV6_2292NEXTHOP = 0x15 + sysIPV6_2292HOPOPTS = 0x16 + sysIPV6_2292DSTOPTS = 0x17 + sysIPV6_2292RTHDR = 0x18 + + sysIPV6_2292PKTOPTIONS = 0x19 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RECVTCLASS = 0x23 + sysIPV6_TCLASS = 0x24 + + sysIPV6_RTHDRDSTOPTS = 0x39 + + sysIPV6_RECVPKTINFO = 0x3d + + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_MSFILTER = 0x4a + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_BOUND_IF = 0x7d + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go new file mode 100644 index 0000000..d2de804 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -0,0 +1,88 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go new file mode 100644 index 0000000..919e572 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -0,0 +1,122 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go new file mode 100644 index 0000000..cb8141f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go new file mode 100644 index 0000000..cb8141f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go new file mode 100644 index 0000000..73aa8c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go new file mode 100644 index 0000000..73aa8c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go new file mode 100644 index 0000000..73aa8c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go new file mode 100644 index 0000000..73aa8c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go new file mode 100644 index 0000000..c9bf6a8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go new file mode 100644 index 0000000..b64f015 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go new file mode 100644 index 0000000..bcada13 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -0,0 +1,84 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go new file mode 100644 index 0000000..86cf3c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTH_LEVEL = 0x35 + sysIPV6_ESP_TRANS_LEVEL = 0x36 + sysIPV6_ESP_NETWORK_LEVEL = 0x37 + sysIPSEC6_OUTSA = 0x38 + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + sysIPV6_IPCOMP_LEVEL = 0x3c + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + sysIPV6_PIPEX = 0x3f + + sysIPV6_RTABLE = 0x1021 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/golang.org/x/net/ipv6/zsys_solaris.go new file mode 100644 index 0000000..cf1837d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -0,0 +1,131 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x5 + sysIPV6_MULTICAST_IF = 0x6 + sysIPV6_MULTICAST_HOPS = 0x7 + sysIPV6_MULTICAST_LOOP = 0x8 + sysIPV6_JOIN_GROUP = 0x9 + sysIPV6_LEAVE_GROUP = 0xa + + sysIPV6_PKTINFO = 0xb + + sysIPV6_HOPLIMIT = 0xc + sysIPV6_NEXTHOP = 0xd + sysIPV6_HOPOPTS = 0xe + sysIPV6_DSTOPTS = 0xf + + sysIPV6_RTHDR = 0x10 + sysIPV6_RTHDRDSTOPTS = 0x11 + + sysIPV6_RECVPKTINFO = 0x12 + sysIPV6_RECVHOPLIMIT = 0x13 + sysIPV6_RECVHOPOPTS = 0x14 + + sysIPV6_RECVRTHDR = 0x16 + + sysIPV6_RECVRTHDRDSTOPTS = 0x17 + + sysIPV6_CHECKSUM = 0x18 + sysIPV6_RECVTCLASS = 0x19 + sysIPV6_USE_MIN_MTU = 0x20 + sysIPV6_DONTFRAG = 0x21 + sysIPV6_SEC_OPT = 0x22 + sysIPV6_SRC_PREFERENCES = 0x23 + sysIPV6_RECVPATHMTU = 0x24 + sysIPV6_PATHMTU = 0x25 + sysIPV6_TCLASS = 0x26 + sysIPV6_V6ONLY = 0x27 + + sysIPV6_RECVDSTOPTS = 0x28 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sysIPV6_PREFER_SRC_HOME = 0x1 + sysIPV6_PREFER_SRC_COA = 0x2 + sysIPV6_PREFER_SRC_PUBLIC = 0x4 + sysIPV6_PREFER_SRC_TMP = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x10 + sysIPV6_PREFER_SRC_CGA = 0x20 + + sysIPV6_PREFER_SRC_MIPMASK = 0x3 + sysIPV6_PREFER_SRC_MIPDEFAULT = 0x1 + sysIPV6_PREFER_SRC_TMPMASK = 0xc + sysIPV6_PREFER_SRC_TMPDEFAULT = 0x4 + sysIPV6_PREFER_SRC_CGAMASK = 0x30 + sysIPV6_PREFER_SRC_CGADEFAULT = 0x10 + + sysIPV6_PREFER_SRC_MASK = 0x3f + + sysIPV6_PREFER_SRC_DEFAULT = 0x15 + + sysIPV6_BOUND_IF = 0x41 + sysIPV6_UNSPEC_SRC = 0x42 + + sysICMP6_FILTER = 0x1 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet6 = 0x20 + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x24 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} + +type icmpv6Filter struct { + X__icmp6_filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go new file mode 100644 index 0000000..20f2b89 --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex.go @@ -0,0 +1,351 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httplex contains rules around lexical matters of various +// HTTP-related specifications. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httplex + +import ( + "net" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { + v = trimOWS(v) + if comma := strings.IndexByte(v, ','); comma != -1 { + return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + } + return tokenEqual(v, token) +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// CTL = +func isCTL(b byte) bool { + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !IsTokenRune(r) { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex_test.go b/vendor/golang.org/x/net/lex/httplex/httplex_test.go new file mode 100644 index 0000000..f47adc9 --- /dev/null +++ b/vendor/golang.org/x/net/lex/httplex/httplex_test.go @@ -0,0 +1,119 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httplex + +import ( + "testing" +) + +func isChar(c rune) bool { return c <= 127 } + +func isCtl(c rune) bool { return c <= 31 || c == 127 } + +func isSeparator(c rune) bool { + switch c { + case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t': + return true + } + return false +} + +func TestIsToken(t *testing.T) { + for i := 0; i <= 130; i++ { + r := rune(i) + expected := isChar(r) && !isCtl(r) && !isSeparator(r) + if IsTokenRune(r) != expected { + t.Errorf("isToken(0x%x) = %v", r, !expected) + } + } +} + +func TestHeaderValuesContainsToken(t *testing.T) { + tests := []struct { + vals []string + token string + want bool + }{ + { + vals: []string{"foo"}, + token: "foo", + want: true, + }, + { + vals: []string{"bar", "foo"}, + token: "foo", + want: true, + }, + { + vals: []string{"foo"}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo"}, + token: "bar", + want: false, + }, + { + vals: []string{" foo "}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar,foo,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar , foo"}, + token: "FOO", + want: true, + }, + { + vals: []string{"foo ,bar "}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar, foo ,bar"}, + token: "FOO", + want: true, + }, + { + vals: []string{"bar , foo"}, + token: "FOO", + want: true, + }, + } + for _, tt := range tests { + got := HeaderValuesContainsToken(tt.vals, tt.token) + if got != tt.want { + t.Errorf("headerValuesContainsToken(%q, %q) = %v; want %v", tt.vals, tt.token, got, tt.want) + } + } +} + +func TestPunycodeHostPort(t *testing.T) { + tests := []struct { + in, want string + }{ + {"www.google.com", "www.google.com"}, + {"гофер.рф", "xn--c1ae0ajs.xn--p1ai"}, + {"bücher.de", "xn--bcher-kva.de"}, + {"bücher.de:8080", "xn--bcher-kva.de:8080"}, + {"[1::6]:8080", "[1::6]:8080"}, + } + for _, tt := range tests { + got, err := PunycodeHostPort(tt.in) + if tt.want != got || err != nil { + t.Errorf("PunycodeHostPort(%q) = %q, %v, want %q, nil", tt.in, got, err, tt.want) + } + } +} diff --git a/vendor/golang.org/x/net/lif/address.go b/vendor/golang.org/x/net/lif/address.go new file mode 100644 index 0000000..afb957f --- /dev/null +++ b/vendor/golang.org/x/net/lif/address.go @@ -0,0 +1,105 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "errors" + "unsafe" +) + +// An Addr represents an address associated with packet routing. +type Addr interface { + // Family returns an address family. + Family() int +} + +// An Inet4Addr represents an internet address for IPv4. +type Inet4Addr struct { + IP [4]byte // IP address + PrefixLen int // address prefix length +} + +// Family implements the Family method of Addr interface. +func (a *Inet4Addr) Family() int { return sysAF_INET } + +// An Inet6Addr represents an internet address for IPv6. +type Inet6Addr struct { + IP [16]byte // IP address + PrefixLen int // address prefix length + ZoneID int // zone identifier +} + +// Family implements the Family method of Addr interface. +func (a *Inet6Addr) Family() int { return sysAF_INET6 } + +// Addrs returns a list of interface addresses. +// +// The provided af must be an address family and name must be a data +// link name. The zero value of af or name means a wildcard. +func Addrs(af int, name string) ([]Addr, error) { + eps, err := newEndpoints(af) + if len(eps) == 0 { + return nil, err + } + defer func() { + for _, ep := range eps { + ep.close() + } + }() + lls, err := links(eps, name) + if len(lls) == 0 { + return nil, err + } + var as []Addr + for _, ll := range lls { + var lifr lifreq + for i := 0; i < len(ll.Name); i++ { + lifr.Name[i] = int8(ll.Name[i]) + } + for _, ep := range eps { + ioc := int64(sysSIOCGLIFADDR) + err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifr)) + if err != nil { + continue + } + sa := (*sockaddrStorage)(unsafe.Pointer(&lifr.Lifru[0])) + l := int(nativeEndian.Uint32(lifr.Lifru1[:4])) + if l == 0 { + continue + } + switch sa.Family { + case sysAF_INET: + a := &Inet4Addr{PrefixLen: l} + copy(a.IP[:], lifr.Lifru[4:8]) + as = append(as, a) + case sysAF_INET6: + a := &Inet6Addr{PrefixLen: l, ZoneID: int(nativeEndian.Uint32(lifr.Lifru[24:28]))} + copy(a.IP[:], lifr.Lifru[8:24]) + as = append(as, a) + } + } + } + return as, nil +} + +func parseLinkAddr(b []byte) ([]byte, error) { + nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) + l := 4 + nlen + alen + slen + if len(b) < l { + return nil, errors.New("invalid address") + } + b = b[4:] + var addr []byte + if nlen > 0 { + b = b[nlen:] + } + if alen > 0 { + addr = make([]byte, alen) + copy(addr, b[:alen]) + } + return addr, nil +} diff --git a/vendor/golang.org/x/net/lif/address_test.go b/vendor/golang.org/x/net/lif/address_test.go new file mode 100644 index 0000000..a25f10b --- /dev/null +++ b/vendor/golang.org/x/net/lif/address_test.go @@ -0,0 +1,123 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "fmt" + "testing" +) + +type addrFamily int + +func (af addrFamily) String() string { + switch af { + case sysAF_UNSPEC: + return "unspec" + case sysAF_INET: + return "inet4" + case sysAF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +const hexDigit = "0123456789abcdef" + +type llAddr []byte + +func (a llAddr) String() string { + if len(a) == 0 { + return "" + } + buf := make([]byte, 0, len(a)*3-1) + for i, b := range a { + if i > 0 { + buf = append(buf, ':') + } + buf = append(buf, hexDigit[b>>4]) + buf = append(buf, hexDigit[b&0xF]) + } + return string(buf) +} + +type ipAddr []byte + +func (a ipAddr) String() string { + if len(a) == 0 { + return "" + } + if len(a) == 4 { + return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) + } + if len(a) == 16 { + return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) + } + s := make([]byte, len(a)*2) + for i, tn := range a { + s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] + } + return string(s) +} + +func (a *Inet4Addr) String() string { + return fmt.Sprintf("(%s %s %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen) +} + +func (a *Inet6Addr) String() string { + return fmt.Sprintf("(%s %s %d %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.PrefixLen, a.ZoneID) +} + +type addrPack struct { + af int + as []Addr +} + +func addrPacks() ([]addrPack, error) { + var lastErr error + var aps []addrPack + for _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + as, err := Addrs(af, "") + if err != nil { + lastErr = err + continue + } + aps = append(aps, addrPack{af: af, as: as}) + } + return aps, lastErr +} + +func TestAddrs(t *testing.T) { + aps, err := addrPacks() + if len(aps) == 0 && err != nil { + t.Fatal(err) + } + lps, err := linkPacks() + if len(lps) == 0 && err != nil { + t.Fatal(err) + } + for _, lp := range lps { + n := 0 + for _, ll := range lp.lls { + as, err := Addrs(lp.af, ll.Name) + if err != nil { + t.Fatal(lp.af, ll.Name, err) + } + t.Logf("af=%s name=%s %v", addrFamily(lp.af), ll.Name, as) + n += len(as) + } + for _, ap := range aps { + if ap.af != lp.af { + continue + } + if n != len(ap.as) { + t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(ap.as)) + continue + } + } + } +} diff --git a/vendor/golang.org/x/net/lif/binary.go b/vendor/golang.org/x/net/lif/binary.go new file mode 100644 index 0000000..738a94f --- /dev/null +++ b/vendor/golang.org/x/net/lif/binary.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +// This file contains duplicates of encoding/binary package. +// +// This package is supposed to be used by the net package of standard +// library. Therefore the package set used in the package must be the +// same as net package. + +var ( + littleEndian binaryLittleEndian + bigEndian binaryBigEndian +) + +type binaryByteOrder interface { + Uint16([]byte) uint16 + Uint32([]byte) uint32 + Uint64([]byte) uint64 + PutUint16([]byte, uint16) + PutUint32([]byte, uint32) + PutUint64([]byte, uint64) +} + +type binaryLittleEndian struct{} + +func (binaryLittleEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[0]) | uint16(b[1])<<8 +} + +func (binaryLittleEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) +} + +func (binaryLittleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (binaryLittleEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (binaryLittleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (binaryLittleEndian) PutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) + b[4] = byte(v >> 32) + b[5] = byte(v >> 40) + b[6] = byte(v >> 48) + b[7] = byte(v >> 56) +} + +type binaryBigEndian struct{} + +func (binaryBigEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[1]) | uint16(b[0])<<8 +} + +func (binaryBigEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (binaryBigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (binaryBigEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (binaryBigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +func (binaryBigEndian) PutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) +} diff --git a/vendor/golang.org/x/net/lif/defs_solaris.go b/vendor/golang.org/x/net/lif/defs_solaris.go new file mode 100644 index 0000000..02c1998 --- /dev/null +++ b/vendor/golang.org/x/net/lif/defs_solaris.go @@ -0,0 +1,90 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package lif + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_DGRAM = C.SOCK_DGRAM +) + +type sockaddrStorage C.struct_sockaddr_storage + +const ( + sysLIFC_NOXMIT = C.LIFC_NOXMIT + sysLIFC_EXTERNAL_SOURCE = C.LIFC_EXTERNAL_SOURCE + sysLIFC_TEMPORARY = C.LIFC_TEMPORARY + sysLIFC_ALLZONES = C.LIFC_ALLZONES + sysLIFC_UNDER_IPMP = C.LIFC_UNDER_IPMP + sysLIFC_ENABLED = C.LIFC_ENABLED + + sysSIOCGLIFADDR = C.SIOCGLIFADDR + sysSIOCGLIFDSTADDR = C.SIOCGLIFDSTADDR + sysSIOCGLIFFLAGS = C.SIOCGLIFFLAGS + sysSIOCGLIFMTU = C.SIOCGLIFMTU + sysSIOCGLIFNETMASK = C.SIOCGLIFNETMASK + sysSIOCGLIFMETRIC = C.SIOCGLIFMETRIC + sysSIOCGLIFNUM = C.SIOCGLIFNUM + sysSIOCGLIFINDEX = C.SIOCGLIFINDEX + sysSIOCGLIFSUBNET = C.SIOCGLIFSUBNET + sysSIOCGLIFLNKINFO = C.SIOCGLIFLNKINFO + sysSIOCGLIFCONF = C.SIOCGLIFCONF + sysSIOCGLIFHWADDR = C.SIOCGLIFHWADDR +) + +const ( + sysIFF_UP = C.IFF_UP + sysIFF_BROADCAST = C.IFF_BROADCAST + sysIFF_DEBUG = C.IFF_DEBUG + sysIFF_LOOPBACK = C.IFF_LOOPBACK + sysIFF_POINTOPOINT = C.IFF_POINTOPOINT + sysIFF_NOTRAILERS = C.IFF_NOTRAILERS + sysIFF_RUNNING = C.IFF_RUNNING + sysIFF_NOARP = C.IFF_NOARP + sysIFF_PROMISC = C.IFF_PROMISC + sysIFF_ALLMULTI = C.IFF_ALLMULTI + sysIFF_INTELLIGENT = C.IFF_INTELLIGENT + sysIFF_MULTICAST = C.IFF_MULTICAST + sysIFF_MULTI_BCAST = C.IFF_MULTI_BCAST + sysIFF_UNNUMBERED = C.IFF_UNNUMBERED + sysIFF_PRIVATE = C.IFF_PRIVATE +) + +const ( + sizeofLifnum = C.sizeof_struct_lifnum + sizeofLifreq = C.sizeof_struct_lifreq + sizeofLifconf = C.sizeof_struct_lifconf + sizeofLifIfinfoReq = C.sizeof_struct_lif_ifinfo_req +) + +type lifnum C.struct_lifnum + +type lifreq C.struct_lifreq + +type lifconf C.struct_lifconf + +type lifIfinfoReq C.struct_lif_ifinfo_req + +const ( + sysIFT_IPV4 = C.IFT_IPV4 + sysIFT_IPV6 = C.IFT_IPV6 + sysIFT_6TO4 = C.IFT_6TO4 +) diff --git a/vendor/golang.org/x/net/lif/lif.go b/vendor/golang.org/x/net/lif/lif.go new file mode 100644 index 0000000..6e81f81 --- /dev/null +++ b/vendor/golang.org/x/net/lif/lif.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +// Package lif provides basic functions for the manipulation of +// logical network interfaces and interface addresses on Solaris. +// +// The package supports Solaris 11 or above. +package lif + +import "syscall" + +type endpoint struct { + af int + s uintptr +} + +func (ep *endpoint) close() error { + return syscall.Close(int(ep.s)) +} + +func newEndpoints(af int) ([]endpoint, error) { + var lastErr error + var eps []endpoint + afs := []int{sysAF_INET, sysAF_INET6} + if af != sysAF_UNSPEC { + afs = []int{af} + } + for _, af := range afs { + s, err := syscall.Socket(af, sysSOCK_DGRAM, 0) + if err != nil { + lastErr = err + continue + } + eps = append(eps, endpoint{af: af, s: uintptr(s)}) + } + if len(eps) == 0 { + return nil, lastErr + } + return eps, nil +} diff --git a/vendor/golang.org/x/net/lif/link.go b/vendor/golang.org/x/net/lif/link.go new file mode 100644 index 0000000..913a53e --- /dev/null +++ b/vendor/golang.org/x/net/lif/link.go @@ -0,0 +1,126 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import "unsafe" + +// A Link represents logical data link information. +// +// It also represents base information for logical network interface. +// On Solaris, each logical network interface represents network layer +// adjacency information and the interface has a only single network +// address or address pair for tunneling. It's usual that multiple +// logical network interfaces share the same logical data link. +type Link struct { + Name string // name, equivalent to IP interface name + Index int // index, equivalent to IP interface index + Type int // type + Flags int // flags + MTU int // maximum transmission unit, basically link MTU but may differ between IP address families + Addr []byte // address +} + +func (ll *Link) fetch(s uintptr) { + var lifr lifreq + for i := 0; i < len(ll.Name); i++ { + lifr.Name[i] = int8(ll.Name[i]) + } + ioc := int64(sysSIOCGLIFINDEX) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Index = int(nativeEndian.Uint32(lifr.Lifru[:4])) + } + ioc = int64(sysSIOCGLIFFLAGS) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Flags = int(nativeEndian.Uint64(lifr.Lifru[:8])) + } + ioc = int64(sysSIOCGLIFMTU) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.MTU = int(nativeEndian.Uint32(lifr.Lifru[:4])) + } + switch ll.Type { + case sysIFT_IPV4, sysIFT_IPV6, sysIFT_6TO4: + default: + ioc = int64(sysSIOCGLIFHWADDR) + if err := ioctl(s, uintptr(ioc), unsafe.Pointer(&lifr)); err == nil { + ll.Addr, _ = parseLinkAddr(lifr.Lifru[4:]) + } + } +} + +// Links returns a list of logical data links. +// +// The provided af must be an address family and name must be a data +// link name. The zero value of af or name means a wildcard. +func Links(af int, name string) ([]Link, error) { + eps, err := newEndpoints(af) + if len(eps) == 0 { + return nil, err + } + defer func() { + for _, ep := range eps { + ep.close() + } + }() + return links(eps, name) +} + +func links(eps []endpoint, name string) ([]Link, error) { + var lls []Link + lifn := lifnum{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP} + lifc := lifconf{Flags: sysLIFC_NOXMIT | sysLIFC_TEMPORARY | sysLIFC_ALLZONES | sysLIFC_UNDER_IPMP} + for _, ep := range eps { + lifn.Family = uint16(ep.af) + ioc := int64(sysSIOCGLIFNUM) + if err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifn)); err != nil { + continue + } + if lifn.Count == 0 { + continue + } + b := make([]byte, lifn.Count*sizeofLifreq) + lifc.Family = uint16(ep.af) + lifc.Len = lifn.Count * sizeofLifreq + if len(lifc.Lifcu) == 8 { + nativeEndian.PutUint64(lifc.Lifcu[:], uint64(uintptr(unsafe.Pointer(&b[0])))) + } else { + nativeEndian.PutUint32(lifc.Lifcu[:], uint32(uintptr(unsafe.Pointer(&b[0])))) + } + ioc = int64(sysSIOCGLIFCONF) + if err := ioctl(ep.s, uintptr(ioc), unsafe.Pointer(&lifc)); err != nil { + continue + } + nb := make([]byte, 32) // see LIFNAMSIZ in net/if.h + for i := 0; i < int(lifn.Count); i++ { + lifr := (*lifreq)(unsafe.Pointer(&b[i*sizeofLifreq])) + for i := 0; i < 32; i++ { + if lifr.Name[i] == 0 { + nb = nb[:i] + break + } + nb[i] = byte(lifr.Name[i]) + } + llname := string(nb) + nb = nb[:32] + if isDupLink(lls, llname) || name != "" && name != llname { + continue + } + ll := Link{Name: llname, Type: int(lifr.Type)} + ll.fetch(ep.s) + lls = append(lls, ll) + } + } + return lls, nil +} + +func isDupLink(lls []Link, name string) bool { + for _, ll := range lls { + if ll.Name == name { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/net/lif/link_test.go b/vendor/golang.org/x/net/lif/link_test.go new file mode 100644 index 0000000..0cb9b95 --- /dev/null +++ b/vendor/golang.org/x/net/lif/link_test.go @@ -0,0 +1,63 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "fmt" + "testing" +) + +func (ll *Link) String() string { + return fmt.Sprintf("name=%s index=%d type=%d flags=%#x mtu=%d addr=%v", ll.Name, ll.Index, ll.Type, ll.Flags, ll.MTU, llAddr(ll.Addr)) +} + +type linkPack struct { + af int + lls []Link +} + +func linkPacks() ([]linkPack, error) { + var lastErr error + var lps []linkPack + for _, af := range [...]int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + lls, err := Links(af, "") + if err != nil { + lastErr = err + continue + } + lps = append(lps, linkPack{af: af, lls: lls}) + } + return lps, lastErr +} + +func TestLinks(t *testing.T) { + lps, err := linkPacks() + if len(lps) == 0 && err != nil { + t.Fatal(err) + } + for _, lp := range lps { + n := 0 + for _, sll := range lp.lls { + lls, err := Links(lp.af, sll.Name) + if err != nil { + t.Fatal(lp.af, sll.Name, err) + } + for _, ll := range lls { + if ll.Name != sll.Name || ll.Index != sll.Index { + t.Errorf("af=%s got %v; want %v", addrFamily(lp.af), &ll, &sll) + continue + } + t.Logf("af=%s name=%s %v", addrFamily(lp.af), sll.Name, &ll) + n++ + } + } + if n != len(lp.lls) { + t.Errorf("af=%s got %d; want %d", addrFamily(lp.af), n, len(lp.lls)) + continue + } + } +} diff --git a/vendor/golang.org/x/net/lif/sys.go b/vendor/golang.org/x/net/lif/sys.go new file mode 100644 index 0000000..c896041 --- /dev/null +++ b/vendor/golang.org/x/net/lif/sys.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import "unsafe" + +var nativeEndian binaryByteOrder + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = littleEndian + } else { + nativeEndian = bigEndian + } +} diff --git a/vendor/golang.org/x/net/lif/sys_solaris_amd64.s b/vendor/golang.org/x/net/lif/sys_solaris_amd64.s new file mode 100644 index 0000000..39d76af --- /dev/null +++ b/vendor/golang.org/x/net/lif/sys_solaris_amd64.s @@ -0,0 +1,8 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) diff --git a/vendor/golang.org/x/net/lif/syscall.go b/vendor/golang.org/x/net/lif/syscall.go new file mode 100644 index 0000000..aadab2e --- /dev/null +++ b/vendor/golang.org/x/net/lif/syscall.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package lif + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +//go:linkname procIoctl libc_ioctl + +var procIoctl uintptr + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func ioctl(s, ioc uintptr, arg unsafe.Pointer) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procIoctl)), 3, s, ioc, uintptr(arg), 0, 0, 0) + if errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go b/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go new file mode 100644 index 0000000..b5e999b --- /dev/null +++ b/vendor/golang.org/x/net/lif/zsys_solaris_amd64.go @@ -0,0 +1,103 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package lif + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1a + + sysSOCK_DGRAM = 0x1 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +const ( + sysLIFC_NOXMIT = 0x1 + sysLIFC_EXTERNAL_SOURCE = 0x2 + sysLIFC_TEMPORARY = 0x4 + sysLIFC_ALLZONES = 0x8 + sysLIFC_UNDER_IPMP = 0x10 + sysLIFC_ENABLED = 0x20 + + sysSIOCGLIFADDR = -0x3f87968f + sysSIOCGLIFDSTADDR = -0x3f87968d + sysSIOCGLIFFLAGS = -0x3f87968b + sysSIOCGLIFMTU = -0x3f879686 + sysSIOCGLIFNETMASK = -0x3f879683 + sysSIOCGLIFMETRIC = -0x3f879681 + sysSIOCGLIFNUM = -0x3ff3967e + sysSIOCGLIFINDEX = -0x3f87967b + sysSIOCGLIFSUBNET = -0x3f879676 + sysSIOCGLIFLNKINFO = -0x3f879674 + sysSIOCGLIFCONF = -0x3fef965b + sysSIOCGLIFHWADDR = -0x3f879640 +) + +const ( + sysIFF_UP = 0x1 + sysIFF_BROADCAST = 0x2 + sysIFF_DEBUG = 0x4 + sysIFF_LOOPBACK = 0x8 + sysIFF_POINTOPOINT = 0x10 + sysIFF_NOTRAILERS = 0x20 + sysIFF_RUNNING = 0x40 + sysIFF_NOARP = 0x80 + sysIFF_PROMISC = 0x100 + sysIFF_ALLMULTI = 0x200 + sysIFF_INTELLIGENT = 0x400 + sysIFF_MULTICAST = 0x800 + sysIFF_MULTI_BCAST = 0x1000 + sysIFF_UNNUMBERED = 0x2000 + sysIFF_PRIVATE = 0x8000 +) + +const ( + sizeofLifnum = 0xc + sizeofLifreq = 0x178 + sizeofLifconf = 0x18 + sizeofLifIfinfoReq = 0x10 +) + +type lifnum struct { + Family uint16 + Pad_cgo_0 [2]byte + Flags int32 + Count int32 +} + +type lifreq struct { + Name [32]int8 + Lifru1 [4]byte + Type uint32 + Lifru [336]byte +} + +type lifconf struct { + Family uint16 + Pad_cgo_0 [2]byte + Flags int32 + Len int32 + Pad_cgo_1 [4]byte + Lifcu [8]byte +} + +type lifIfinfoReq struct { + Maxhops uint8 + Pad_cgo_0 [3]byte + Reachtime uint32 + Reachretrans uint32 + Maxmtu uint32 +} + +const ( + sysIFT_IPV4 = 0xc8 + sysIFT_IPV6 = 0xc9 + sysIFT_6TO4 = 0xca +) diff --git a/vendor/golang.org/x/net/nettest/conntest.go b/vendor/golang.org/x/net/nettest/conntest.go new file mode 100644 index 0000000..5bd3a8c --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest.go @@ -0,0 +1,456 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nettest provides utilities for network testing. +package nettest + +import ( + "bytes" + "encoding/binary" + "io" + "io/ioutil" + "math/rand" + "net" + "runtime" + "sync" + "testing" + "time" +) + +var ( + aLongTimeAgo = time.Unix(233431200, 0) + neverTimeout = time.Time{} +) + +// MakePipe creates a connection between two endpoints and returns the pair +// as c1 and c2, such that anything written to c1 is read by c2 and vice-versa. +// The stop function closes all resources, including c1, c2, and the underlying +// net.Listener (if there is one), and should not be nil. +type MakePipe func() (c1, c2 net.Conn, stop func(), err error) + +// TestConn tests that a net.Conn implementation properly satisfies the interface. +// The tests should not produce any false positives, but may experience +// false negatives. Thus, some issues may only be detected when the test is +// run multiple times. For maximal effectiveness, run the tests under the +// race detector. +func TestConn(t *testing.T, mp MakePipe) { + testConn(t, mp) +} + +type connTester func(t *testing.T, c1, c2 net.Conn) + +func timeoutWrapper(t *testing.T, mp MakePipe, f connTester) { + c1, c2, stop, err := mp() + if err != nil { + t.Fatalf("unable to make pipe: %v", err) + } + var once sync.Once + defer once.Do(func() { stop() }) + timer := time.AfterFunc(time.Minute, func() { + once.Do(func() { + t.Error("test timed out; terminating pipe") + stop() + }) + }) + defer timer.Stop() + f(t, c1, c2) +} + +// testBasicIO tests that the data sent on c1 is properly received on c2. +func testBasicIO(t *testing.T, c1, c2 net.Conn) { + want := make([]byte, 1<<20) + rand.New(rand.NewSource(0)).Read(want) + + dataCh := make(chan []byte) + go func() { + rd := bytes.NewReader(want) + if err := chunkedCopy(c1, rd); err != nil { + t.Errorf("unexpected c1.Write error: %v", err) + } + if err := c1.Close(); err != nil { + t.Errorf("unexpected c1.Close error: %v", err) + } + }() + + go func() { + wr := new(bytes.Buffer) + if err := chunkedCopy(wr, c2); err != nil { + t.Errorf("unexpected c2.Read error: %v", err) + } + if err := c2.Close(); err != nil { + t.Errorf("unexpected c2.Close error: %v", err) + } + dataCh <- wr.Bytes() + }() + + if got := <-dataCh; !bytes.Equal(got, want) { + t.Errorf("transmitted data differs") + } +} + +// testPingPong tests that the two endpoints can synchronously send data to +// each other in a typical request-response pattern. +func testPingPong(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + defer wg.Wait() + + pingPonger := func(c net.Conn) { + defer wg.Done() + buf := make([]byte, 8) + var prev uint64 + for { + if _, err := io.ReadFull(c, buf); err != nil { + if err == io.EOF { + break + } + t.Errorf("unexpected Read error: %v", err) + } + + v := binary.LittleEndian.Uint64(buf) + binary.LittleEndian.PutUint64(buf, v+1) + if prev != 0 && prev+2 != v { + t.Errorf("mismatching value: got %d, want %d", v, prev+2) + } + prev = v + if v == 1000 { + break + } + + if _, err := c.Write(buf); err != nil { + t.Errorf("unexpected Write error: %v", err) + break + } + } + if err := c.Close(); err != nil { + t.Errorf("unexpected Close error: %v", err) + } + } + + wg.Add(2) + go pingPonger(c1) + go pingPonger(c2) + + // Start off the chain reaction. + if _, err := c1.Write(make([]byte, 8)); err != nil { + t.Errorf("unexpected c1.Write error: %v", err) + } +} + +// testRacyRead tests that it is safe to mutate the input Read buffer +// immediately after cancelation has occurred. +func testRacyRead(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, rand.New(rand.NewSource(0))) + + var wg sync.WaitGroup + defer wg.Wait() + + c1.SetReadDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := c1.Read(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + checkForTimeoutError(t, err) + c1.SetReadDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// testRacyWrite tests that it is safe to mutate the input Write buffer +// immediately after cancelation has occurred. +func testRacyWrite(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(ioutil.Discard, c2) + + var wg sync.WaitGroup + defer wg.Wait() + + c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := c1.Write(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + checkForTimeoutError(t, err) + c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// testReadTimeout tests that Read timeouts do not affect Write. +func testReadTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(ioutil.Discard, c2) + + c1.SetReadDeadline(aLongTimeAgo) + _, err := c1.Read(make([]byte, 1024)) + checkForTimeoutError(t, err) + if _, err := c1.Write(make([]byte, 1024)); err != nil { + t.Errorf("unexpected Write error: %v", err) + } +} + +// testWriteTimeout tests that Write timeouts do not affect Read. +func testWriteTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, rand.New(rand.NewSource(0))) + + c1.SetWriteDeadline(aLongTimeAgo) + _, err := c1.Write(make([]byte, 1024)) + checkForTimeoutError(t, err) + if _, err := c1.Read(make([]byte, 1024)); err != nil { + t.Errorf("unexpected Read error: %v", err) + } +} + +// testPastTimeout tests that a deadline set in the past immediately times out +// Read and Write requests. +func testPastTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, c2) + + testRoundtrip(t, c1) + + c1.SetDeadline(aLongTimeAgo) + n, err := c1.Write(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Write count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + n, err = c1.Read(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Read count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + + testRoundtrip(t, c1) +} + +// testPresentTimeout tests that a deadline set while there are pending +// Read and Write operations immediately times out those operations. +func testPresentTimeout(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(3) + + deadlineSet := make(chan bool, 1) + go func() { + defer wg.Done() + time.Sleep(100 * time.Millisecond) + deadlineSet <- true + c1.SetReadDeadline(aLongTimeAgo) + c1.SetWriteDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + n, err := c1.Read(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Read count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + if len(deadlineSet) == 0 { + t.Error("Read timed out before deadline is set") + } + }() + go func() { + defer wg.Done() + var err error + for err == nil { + _, err = c1.Write(make([]byte, 1024)) + } + checkForTimeoutError(t, err) + if len(deadlineSet) == 0 { + t.Error("Write timed out before deadline is set") + } + }() +} + +// testFutureTimeout tests that a future deadline will eventually time out +// Read and Write operations. +func testFutureTimeout(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + wg.Add(2) + + c1.SetDeadline(time.Now().Add(100 * time.Millisecond)) + go func() { + defer wg.Done() + _, err := c1.Read(make([]byte, 1024)) + checkForTimeoutError(t, err) + }() + go func() { + defer wg.Done() + var err error + for err == nil { + _, err = c1.Write(make([]byte, 1024)) + } + checkForTimeoutError(t, err) + }() + wg.Wait() + + go chunkedCopy(c2, c2) + resyncConn(t, c1) + testRoundtrip(t, c1) +} + +// testCloseTimeout tests that calling Close immediately times out pending +// Read and Write operations. +func testCloseTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, c2) + + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(3) + + // Test for cancelation upon connection closure. + c1.SetDeadline(neverTimeout) + go func() { + defer wg.Done() + time.Sleep(100 * time.Millisecond) + c1.Close() + }() + go func() { + defer wg.Done() + var err error + buf := make([]byte, 1024) + for err == nil { + _, err = c1.Read(buf) + } + }() + go func() { + defer wg.Done() + var err error + buf := make([]byte, 1024) + for err == nil { + _, err = c1.Write(buf) + } + }() +} + +// testConcurrentMethods tests that the methods of net.Conn can safely +// be called concurrently. +func testConcurrentMethods(t *testing.T, c1, c2 net.Conn) { + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; see https://golang.org/issue/20489") + } + go chunkedCopy(c2, c2) + + // The results of the calls may be nonsensical, but this should + // not trigger a race detector warning. + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(7) + go func() { + defer wg.Done() + c1.Read(make([]byte, 1024)) + }() + go func() { + defer wg.Done() + c1.Write(make([]byte, 1024)) + }() + go func() { + defer wg.Done() + c1.SetDeadline(time.Now().Add(10 * time.Millisecond)) + }() + go func() { + defer wg.Done() + c1.SetReadDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + c1.SetWriteDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + c1.LocalAddr() + }() + go func() { + defer wg.Done() + c1.RemoteAddr() + }() + } + wg.Wait() // At worst, the deadline is set 10ms into the future + + resyncConn(t, c1) + testRoundtrip(t, c1) +} + +// checkForTimeoutError checks that the error satisfies the Error interface +// and that Timeout returns true. +func checkForTimeoutError(t *testing.T, err error) { + if nerr, ok := err.(net.Error); ok { + if !nerr.Timeout() { + t.Errorf("err.Timeout() = false, want true") + } + } else { + t.Errorf("got %T, want net.Error", err) + } +} + +// testRoundtrip writes something into c and reads it back. +// It assumes that everything written into c is echoed back to itself. +func testRoundtrip(t *testing.T, c net.Conn) { + if err := c.SetDeadline(neverTimeout); err != nil { + t.Errorf("roundtrip SetDeadline error: %v", err) + } + + const s = "Hello, world!" + buf := []byte(s) + if _, err := c.Write(buf); err != nil { + t.Errorf("roundtrip Write error: %v", err) + } + if _, err := io.ReadFull(c, buf); err != nil { + t.Errorf("roundtrip Read error: %v", err) + } + if string(buf) != s { + t.Errorf("roundtrip data mismatch: got %q, want %q", buf, s) + } +} + +// resyncConn resynchronizes the connection into a sane state. +// It assumes that everything written into c is echoed back to itself. +// It assumes that 0xff is not currently on the wire or in the read buffer. +func resyncConn(t *testing.T, c net.Conn) { + c.SetDeadline(neverTimeout) + errCh := make(chan error) + go func() { + _, err := c.Write([]byte{0xff}) + errCh <- err + }() + buf := make([]byte, 1024) + for { + n, err := c.Read(buf) + if n > 0 && bytes.IndexByte(buf[:n], 0xff) == n-1 { + break + } + if err != nil { + t.Errorf("unexpected Read error: %v", err) + break + } + } + if err := <-errCh; err != nil { + t.Errorf("unexpected Write error: %v", err) + } +} + +// chunkedCopy copies from r to w in fixed-width chunks to avoid +// causing a Write that exceeds the maximum packet size for packet-based +// connections like "unixpacket". +// We assume that the maximum packet size is at least 1024. +func chunkedCopy(w io.Writer, r io.Reader) error { + b := make([]byte, 1024) + _, err := io.CopyBuffer(struct{ io.Writer }{w}, struct{ io.Reader }{r}, b) + return err +} diff --git a/vendor/golang.org/x/net/nettest/conntest_go16.go b/vendor/golang.org/x/net/nettest/conntest_go16.go new file mode 100644 index 0000000..4cbf48e --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest_go16.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package nettest + +import "testing" + +func testConn(t *testing.T, mp MakePipe) { + // Avoid using subtests on Go 1.6 and below. + timeoutWrapper(t, mp, testBasicIO) + timeoutWrapper(t, mp, testPingPong) + timeoutWrapper(t, mp, testRacyRead) + timeoutWrapper(t, mp, testRacyWrite) + timeoutWrapper(t, mp, testReadTimeout) + timeoutWrapper(t, mp, testWriteTimeout) + timeoutWrapper(t, mp, testPastTimeout) + timeoutWrapper(t, mp, testPresentTimeout) + timeoutWrapper(t, mp, testFutureTimeout) + timeoutWrapper(t, mp, testCloseTimeout) + timeoutWrapper(t, mp, testConcurrentMethods) +} diff --git a/vendor/golang.org/x/net/nettest/conntest_go17.go b/vendor/golang.org/x/net/nettest/conntest_go17.go new file mode 100644 index 0000000..fa039f0 --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest_go17.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package nettest + +import "testing" + +func testConn(t *testing.T, mp MakePipe) { + // Use subtests on Go 1.7 and above since it is better organized. + t.Run("BasicIO", func(t *testing.T) { timeoutWrapper(t, mp, testBasicIO) }) + t.Run("PingPong", func(t *testing.T) { timeoutWrapper(t, mp, testPingPong) }) + t.Run("RacyRead", func(t *testing.T) { timeoutWrapper(t, mp, testRacyRead) }) + t.Run("RacyWrite", func(t *testing.T) { timeoutWrapper(t, mp, testRacyWrite) }) + t.Run("ReadTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testReadTimeout) }) + t.Run("WriteTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testWriteTimeout) }) + t.Run("PastTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPastTimeout) }) + t.Run("PresentTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPresentTimeout) }) + t.Run("FutureTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testFutureTimeout) }) + t.Run("CloseTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testCloseTimeout) }) + t.Run("ConcurrentMethods", func(t *testing.T) { timeoutWrapper(t, mp, testConcurrentMethods) }) +} diff --git a/vendor/golang.org/x/net/nettest/conntest_test.go b/vendor/golang.org/x/net/nettest/conntest_test.go new file mode 100644 index 0000000..9f9453f --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest_test.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package nettest + +import ( + "net" + "os" + "runtime" + "testing" + + "golang.org/x/net/internal/nettest" +) + +func TestTestConn(t *testing.T) { + tests := []struct{ name, network string }{ + {"TCP", "tcp"}, + {"UnixPipe", "unix"}, + {"UnixPacketPipe", "unixpacket"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if !nettest.TestableNetwork(tt.network) { + t.Skipf("not supported on %s", runtime.GOOS) + } + + mp := func() (c1, c2 net.Conn, stop func(), err error) { + ln, err := nettest.NewLocalListener(tt.network) + if err != nil { + return nil, nil, nil, err + } + + // Start a connection between two endpoints. + var err1, err2 error + done := make(chan bool) + go func() { + c2, err2 = ln.Accept() + close(done) + }() + c1, err1 = net.Dial(ln.Addr().Network(), ln.Addr().String()) + <-done + + stop = func() { + if err1 == nil { + c1.Close() + } + if err2 == nil { + c2.Close() + } + ln.Close() + switch tt.network { + case "unix", "unixpacket": + os.Remove(ln.Addr().String()) + } + } + + switch { + case err1 != nil: + stop() + return nil, nil, nil, err1 + case err2 != nil: + stop() + return nil, nil, nil, err2 + default: + return c1, c2, stop, nil + } + } + + TestConn(t, mp) + }) + } +} diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go new file mode 100644 index 0000000..56f43bf --- /dev/null +++ b/vendor/golang.org/x/net/netutil/listen.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package netutil provides network utility functions, complementing the more +// common ones in the net package. +package netutil // import "golang.org/x/net/netutil" + +import ( + "net" + "sync" +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} diff --git a/vendor/golang.org/x/net/netutil/listen_test.go b/vendor/golang.org/x/net/netutil/listen_test.go new file mode 100644 index 0000000..5e07d7b --- /dev/null +++ b/vendor/golang.org/x/net/netutil/listen_test.go @@ -0,0 +1,101 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package netutil + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/internal/nettest" +) + +func TestLimitListener(t *testing.T) { + const max = 5 + attempts := (nettest.MaxOpenFiles() - max) / 2 + if attempts > 256 { // maximum length of accept queue is 128 by default + attempts = 256 + } + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + defer l.Close() + l = LimitListener(l, max) + + var open int32 + go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if n := atomic.AddInt32(&open, 1); n > max { + t.Errorf("%d open connections, want <= %d", n, max) + } + defer atomic.AddInt32(&open, -1) + time.Sleep(10 * time.Millisecond) + fmt.Fprint(w, "some body") + })) + + var wg sync.WaitGroup + var failed int32 + for i := 0; i < attempts; i++ { + wg.Add(1) + go func() { + defer wg.Done() + c := http.Client{Timeout: 3 * time.Second} + r, err := c.Get("http://" + l.Addr().String()) + if err != nil { + t.Log(err) + atomic.AddInt32(&failed, 1) + return + } + defer r.Body.Close() + io.Copy(ioutil.Discard, r.Body) + }() + } + wg.Wait() + + // We expect some Gets to fail as the kernel's accept queue is filled, + // but most should succeed. + if int(failed) >= attempts/2 { + t.Errorf("%d requests failed within %d attempts", failed, attempts) + } +} + +type errorListener struct { + net.Listener +} + +func (errorListener) Accept() (net.Conn, error) { + return nil, errFake +} + +var errFake = errors.New("fake error from errorListener") + +// This used to hang. +func TestLimitListenerError(t *testing.T) { + donec := make(chan bool, 1) + go func() { + const n = 2 + ll := LimitListener(errorListener{}, n) + for i := 0; i < n+1; i++ { + _, err := ll.Accept() + if err != errFake { + t.Fatalf("Accept error = %v; want errFake", err) + } + } + donec <- true + }() + select { + case <-donec: + case <-time.After(5 * time.Second): + t.Fatal("timeout. deadlock?") + } +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 0000000..4c5ad88 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,18 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" +) + +type direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var Direct = direct{} + +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 0000000..0689bb6 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,140 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/per_host_test.go b/vendor/golang.org/x/net/proxy/per_host_test.go new file mode 100644 index 0000000..a7d8095 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host_test.go @@ -0,0 +1,55 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "net" + "reflect" + "testing" +) + +type recordingProxy struct { + addrs []string +} + +func (r *recordingProxy) Dial(network, addr string) (net.Conn, error) { + r.addrs = append(r.addrs, addr) + return nil, errors.New("recordingProxy") +} + +func TestPerHost(t *testing.T) { + var def, bypass recordingProxy + perHost := NewPerHost(&def, &bypass) + perHost.AddFromString("localhost,*.zone,127.0.0.1,10.0.0.1/8,1000::/16") + + expectedDef := []string{ + "example.com:123", + "1.2.3.4:123", + "[1001::]:123", + } + expectedBypass := []string{ + "localhost:123", + "zone:123", + "foo.zone:123", + "127.0.0.1:123", + "10.1.2.3:123", + "[1000::]:123", + } + + for _, addr := range expectedDef { + perHost.Dial("tcp", addr) + } + for _, addr := range expectedBypass { + perHost.Dial("tcp", addr) + } + + if !reflect.DeepEqual(expectedDef, def.addrs) { + t.Errorf("Hosts which went to the default proxy didn't match. Got %v, want %v", def.addrs, expectedDef) + } + if !reflect.DeepEqual(expectedBypass, bypass.addrs) { + t.Errorf("Hosts which went to the bypass proxy didn't match. Got %v, want %v", bypass.addrs, expectedBypass) + } +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 0000000..553ead7 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,134 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" + "sync" +) + +// A Dialer is a means to establish a connection. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func FromEnvironment() Dialer { + allProxy := allProxyEnv.Get() + if len(allProxy) == 0 { + return Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return Direct + } + proxy, err := FromURL(proxyURL, Direct) + if err != nil { + return Direct + } + + noProxy := noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/golang.org/x/net/proxy/proxy_test.go b/vendor/golang.org/x/net/proxy/proxy_test.go new file mode 100644 index 0000000..0f31e21 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy_test.go @@ -0,0 +1,215 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "bytes" + "fmt" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" + "testing" +) + +type proxyFromEnvTest struct { + allProxyEnv string + noProxyEnv string + wantTypeOf Dialer +} + +func (t proxyFromEnvTest) String() string { + var buf bytes.Buffer + space := func() { + if buf.Len() > 0 { + buf.WriteByte(' ') + } + } + if t.allProxyEnv != "" { + fmt.Fprintf(&buf, "all_proxy=%q", t.allProxyEnv) + } + if t.noProxyEnv != "" { + space() + fmt.Fprintf(&buf, "no_proxy=%q", t.noProxyEnv) + } + return strings.TrimSpace(buf.String()) +} + +func TestFromEnvironment(t *testing.T) { + ResetProxyEnv() + + type dummyDialer struct { + direct + } + + RegisterDialerType("irc", func(_ *url.URL, _ Dialer) (Dialer, error) { + return dummyDialer{}, nil + }) + + proxyFromEnvTests := []proxyFromEnvTest{ + {allProxyEnv: "127.0.0.1:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {allProxyEnv: "ftp://example.com:8000", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {allProxyEnv: "socks5://example.com:8080", noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: &PerHost{}}, + {allProxyEnv: "irc://example.com:8000", wantTypeOf: dummyDialer{}}, + {noProxyEnv: "localhost, 127.0.0.1", wantTypeOf: direct{}}, + {wantTypeOf: direct{}}, + } + + for _, tt := range proxyFromEnvTests { + os.Setenv("ALL_PROXY", tt.allProxyEnv) + os.Setenv("NO_PROXY", tt.noProxyEnv) + ResetCachedEnvironment() + + d := FromEnvironment() + if got, want := fmt.Sprintf("%T", d), fmt.Sprintf("%T", tt.wantTypeOf); got != want { + t.Errorf("%v: got type = %T, want %T", tt, d, tt.wantTypeOf) + } + } +} + +func TestFromURL(t *testing.T) { + endSystem, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer endSystem.Close() + gateway, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer gateway.Close() + + var wg sync.WaitGroup + wg.Add(1) + go socks5Gateway(t, gateway, endSystem, socks5Domain, &wg) + + url, err := url.Parse("socks5://user:password@" + gateway.Addr().String()) + if err != nil { + t.Fatalf("url.Parse failed: %v", err) + } + proxy, err := FromURL(url, Direct) + if err != nil { + t.Fatalf("FromURL failed: %v", err) + } + _, port, err := net.SplitHostPort(endSystem.Addr().String()) + if err != nil { + t.Fatalf("net.SplitHostPort failed: %v", err) + } + if c, err := proxy.Dial("tcp", "localhost:"+port); err != nil { + t.Fatalf("FromURL.Dial failed: %v", err) + } else { + c.Close() + } + + wg.Wait() +} + +func TestSOCKS5(t *testing.T) { + endSystem, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer endSystem.Close() + gateway, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer gateway.Close() + + var wg sync.WaitGroup + wg.Add(1) + go socks5Gateway(t, gateway, endSystem, socks5IP4, &wg) + + proxy, err := SOCKS5("tcp", gateway.Addr().String(), nil, Direct) + if err != nil { + t.Fatalf("SOCKS5 failed: %v", err) + } + if c, err := proxy.Dial("tcp", endSystem.Addr().String()); err != nil { + t.Fatalf("SOCKS5.Dial failed: %v", err) + } else { + c.Close() + } + + wg.Wait() +} + +func socks5Gateway(t *testing.T, gateway, endSystem net.Listener, typ byte, wg *sync.WaitGroup) { + defer wg.Done() + + c, err := gateway.Accept() + if err != nil { + t.Errorf("net.Listener.Accept failed: %v", err) + return + } + defer c.Close() + + b := make([]byte, 32) + var n int + if typ == socks5Domain { + n = 4 + } else { + n = 3 + } + if _, err := io.ReadFull(c, b[:n]); err != nil { + t.Errorf("io.ReadFull failed: %v", err) + return + } + if _, err := c.Write([]byte{socks5Version, socks5AuthNone}); err != nil { + t.Errorf("net.Conn.Write failed: %v", err) + return + } + if typ == socks5Domain { + n = 16 + } else { + n = 10 + } + if _, err := io.ReadFull(c, b[:n]); err != nil { + t.Errorf("io.ReadFull failed: %v", err) + return + } + if b[0] != socks5Version || b[1] != socks5Connect || b[2] != 0x00 || b[3] != typ { + t.Errorf("got an unexpected packet: %#02x %#02x %#02x %#02x", b[0], b[1], b[2], b[3]) + return + } + if typ == socks5Domain { + copy(b[:5], []byte{socks5Version, 0x00, 0x00, socks5Domain, 9}) + b = append(b, []byte("localhost")...) + } else { + copy(b[:4], []byte{socks5Version, 0x00, 0x00, socks5IP4}) + } + host, port, err := net.SplitHostPort(endSystem.Addr().String()) + if err != nil { + t.Errorf("net.SplitHostPort failed: %v", err) + return + } + b = append(b, []byte(net.ParseIP(host).To4())...) + p, err := strconv.Atoi(port) + if err != nil { + t.Errorf("strconv.Atoi failed: %v", err) + return + } + b = append(b, []byte{byte(p >> 8), byte(p)}...) + if _, err := c.Write(b); err != nil { + t.Errorf("net.Conn.Write failed: %v", err) + return + } +} + +func ResetProxyEnv() { + for _, env := range []*envOnce{allProxyEnv, noProxyEnv} { + for _, v := range env.names { + os.Setenv(v, "") + } + } + ResetCachedEnvironment() +} + +func ResetCachedEnvironment() { + allProxyEnv.reset() + noProxyEnv.reset() +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 0000000..3fed38e --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,214 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "io" + "net" + "strconv" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) { + s := &socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type socks5 struct { + user, password string + network, addr string + forward Dialer +} + +const socks5Version = 5 + +const ( + socks5AuthNone = 0 + socks5AuthPassword = 2 +) + +const socks5Connect = 1 + +const ( + socks5IP4 = 1 + socks5Domain = 3 + socks5IP6 = 4 +) + +var socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, socks5IP4) + ip = ip4 + } else { + buf = append(buf, socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(socks5Errors) { + failure = socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case socks5IP4: + bytesToDiscard = net.IPv4len + case socks5IP6: + bytesToDiscard = net.IPv6len + case socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go new file mode 100644 index 0000000..f85a3c3 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/gen.go @@ -0,0 +1,713 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This program generates table.go and table_test.go based on the authoritative +// public suffix list at https://publicsuffix.org/list/effective_tld_names.dat +// +// The version is derived from +// https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat +// and a human-readable form is at +// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat +// +// To fetch a particular git revision, such as 5c70ccd250, pass +// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" +// and -version "an explicit version string". + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "regexp" + "sort" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // These sum of these four values must be no greater than 32. + nodesBitsChildren = 10 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + // These sum of these four values must be no greater than 32. + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +var ( + maxChildren int + maxTextOffset int + maxTextLength int + maxHi uint32 + maxLo uint32 +) + +func max(a, b int) int { + if a < b { + return b + } + return a +} + +func u32max(a, b uint32) uint32 { + if a < b { + return b + } + return a +} + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 + numNodeType = 3 +) + +func nodeTypeStr(n int) string { + switch n { + case nodeTypeNormal: + return "+" + case nodeTypeException: + return "!" + case nodeTypeParentOnly: + return "o" + } + panic("unreachable") +} + +const ( + defaultURL = "https://publicsuffix.org/list/effective_tld_names.dat" + gitCommitURL = "https://api.github.com/repos/publicsuffix/list/commits?path=public_suffix_list.dat" +) + +var ( + labelEncoding = map[string]uint32{} + labelsList = []string{} + labelsMap = map[string]bool{} + rules = []string{} + + // validSuffixRE is used to check that the entries in the public suffix + // list are in canonical form (after Punycode encoding). Specifically, + // capital letters are not allowed. + validSuffixRE = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) + + shaRE = regexp.MustCompile(`"sha":"([^"]+)"`) + dateRE = regexp.MustCompile(`"committer":{[^{]+"date":"([^"]+)"`) + + comments = flag.Bool("comments", false, "generate table.go comments, for debugging") + subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") + url = flag.String("url", defaultURL, "URL of the publicsuffix.org list. If empty, stdin is read instead") + v = flag.Bool("v", false, "verbose output (to stderr)") + version = flag.String("version", "", "the effective_tld_names.dat version") +) + +func main() { + if err := main1(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main1() error { + flag.Parse() + if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { + return fmt.Errorf("not enough bits to encode the nodes table") + } + if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { + return fmt.Errorf("not enough bits to encode the children table") + } + if *version == "" { + if *url != defaultURL { + return fmt.Errorf("-version was not specified, and the -url is not the default one") + } + sha, date, err := gitCommit() + if err != nil { + return err + } + *version = fmt.Sprintf("publicsuffix.org's public_suffix_list.dat, git revision %s (%s)", sha, date) + } + var r io.Reader = os.Stdin + if *url != "" { + res, err := http.Get(*url) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) + } + r = res.Body + defer res.Body.Close() + } + + var root node + icann := false + br := bufio.NewReader(r) + for { + s, err := br.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return err + } + s = strings.TrimSpace(s) + if strings.Contains(s, "BEGIN ICANN DOMAINS") { + icann = true + continue + } + if strings.Contains(s, "END ICANN DOMAINS") { + icann = false + continue + } + if s == "" || strings.HasPrefix(s, "//") { + continue + } + s, err = idna.ToASCII(s) + if err != nil { + return err + } + if !validSuffixRE.MatchString(s) { + return fmt.Errorf("bad publicsuffix.org list data: %q", s) + } + + if *subset { + switch { + case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): + case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): + case s == "ao" || strings.HasSuffix(s, ".ao"): + case s == "ar" || strings.HasSuffix(s, ".ar"): + case s == "arpa" || strings.HasSuffix(s, ".arpa"): + case s == "cy" || strings.HasSuffix(s, ".cy"): + case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): + case s == "jp": + case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): + case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): + case s == "om" || strings.HasSuffix(s, ".om"): + case s == "uk" || strings.HasSuffix(s, ".uk"): + case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): + case s == "tw" || strings.HasSuffix(s, ".tw"): + case s == "zw" || strings.HasSuffix(s, ".zw"): + case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): + // xn--p1ai is Russian-Cyrillic "рф". + default: + continue + } + } + + rules = append(rules, s) + + nt, wildcard := nodeTypeNormal, false + switch { + case strings.HasPrefix(s, "*."): + s, nt = s[2:], nodeTypeParentOnly + wildcard = true + case strings.HasPrefix(s, "!"): + s, nt = s[1:], nodeTypeException + } + labels := strings.Split(s, ".") + for n, i := &root, len(labels)-1; i >= 0; i-- { + label := labels[i] + n = n.child(label) + if i == 0 { + if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { + n.nodeType = nt + } + n.icann = n.icann && icann + n.wildcard = n.wildcard || wildcard + } + labelsMap[label] = true + } + } + labelsList = make([]string, 0, len(labelsMap)) + for label := range labelsMap { + labelsList = append(labelsList, label) + } + sort.Strings(labelsList) + + if err := generate(printReal, &root, "table.go"); err != nil { + return err + } + if err := generate(printTest, &root, "table_test.go"); err != nil { + return err + } + return nil +} + +func generate(p func(io.Writer, *node) error, root *node, filename string) error { + buf := new(bytes.Buffer) + if err := p(buf, root); err != nil { + return err + } + b, err := format.Source(buf.Bytes()) + if err != nil { + return err + } + return ioutil.WriteFile(filename, b, 0644) +} + +func gitCommit() (sha, date string, retErr error) { + res, err := http.Get(gitCommitURL) + if err != nil { + return "", "", err + } + if res.StatusCode != http.StatusOK { + return "", "", fmt.Errorf("bad GET status for %s: %d", gitCommitURL, res.Status) + } + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if m := shaRE.FindSubmatch(b); m != nil { + sha = string(m[1]) + } + if m := dateRE.FindSubmatch(b); m != nil { + date = string(m[1]) + } + if sha == "" || date == "" { + retErr = fmt.Errorf("could not find commit SHA and date in %s", gitCommitURL) + } + return sha, date, retErr +} + +func printTest(w io.Writer, n *node) error { + fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") + fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") + for _, rule := range rules { + fmt.Fprintf(w, "%q,\n", rule) + } + fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") + if err := n.walk(w, printNodeLabel); err != nil { + return err + } + fmt.Fprintf(w, "}\n") + return nil +} + +func printReal(w io.Writer, n *node) error { + const header = `// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = %q + +const ( + nodesBitsChildren = %d + nodesBitsICANN = %d + nodesBitsTextOffset = %d + nodesBitsTextLength = %d + + childrenBitsWildcard = %d + childrenBitsNodeType = %d + childrenBitsHi = %d + childrenBitsLo = %d +) + +const ( + nodeTypeNormal = %d + nodeTypeException = %d + nodeTypeParentOnly = %d +) + +// numTLD is the number of top level domains. +const numTLD = %d + +` + fmt.Fprintf(w, header, *version, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, + nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) + + text := combineText(labelsList) + if text == "" { + return fmt.Errorf("internal error: makeText returned no text") + } + for _, label := range labelsList { + offset, length := strings.Index(text, label), len(label) + if offset < 0 { + return fmt.Errorf("internal error: could not find %q in text %q", label, text) + } + maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) + if offset >= 1<= 1< 64 { + n, plus = 64, " +" + } + fmt.Fprintf(w, "%q%s\n", text[:n], plus) + text = text[n:] + } + + if err := n.walk(w, assignIndexes); err != nil { + return err + } + + fmt.Fprintf(w, ` + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] children index +// [%2d bits] ICANN bit +// [%2d bits] text index +// [%2d bits] text length +var nodes = [...]uint32{ +`, + 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) + if err := n.walk(w, printNode); err != nil { + return err + } + fmt.Fprintf(w, `} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] wildcard bit +// [%2d bits] node type +// [%2d bits] high nodes index (exclusive) of children +// [%2d bits] low nodes index (inclusive) of children +var children=[...]uint32{ +`, + 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) + for i, c := range childrenEncoding { + s := "---------------" + lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 + if *comments { + fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", + c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + } else { + fmt.Fprintf(w, "0x%x,\n", c) + } + } + fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { + ss = ss[1:] + } + return ss +} + +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) + for i, s := range ss { + if len(s) <= prefixLen { + continue + } + mergeLabel(ss, i, prefixLen, prefixes) + } + } + + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { + continue + } + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return + } +} + +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } + } + + return prefixes +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 0000000..8bbf3bc --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,135 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// http://publicsuffix.org/. A public suffix is one under which Internet users +// can directly register names. +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is privately +// managed. For example, foo.org and foo.co.uk are ICANN domains, +// foo.dyndns.org and foo.blogspot.co.uk are private domains. +// +// Use cases for distinguishing ICANN domains like foo.com from private +// domains like foo.appspot.com can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, wildcard := domain, len(domain), false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + icann = u&(1<>= nodesBitsICANN + u = children[u&(1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1< len(b[j]) +} + +// eTLDPlusOneTestCases come from +// https://github.com/publicsuffix/list/blob/master/tests/test_psl.txt +var eTLDPlusOneTestCases = []struct { + domain, want string +}{ + // Empty input. + {"", ""}, + // Unlisted TLD. + {"example", ""}, + {"example.example", "example.example"}, + {"b.example.example", "example.example"}, + {"a.b.example.example", "example.example"}, + // TLD with only 1 rule. + {"biz", ""}, + {"domain.biz", "domain.biz"}, + {"b.domain.biz", "domain.biz"}, + {"a.b.domain.biz", "domain.biz"}, + // TLD with some 2-level rules. + {"com", ""}, + {"example.com", "example.com"}, + {"b.example.com", "example.com"}, + {"a.b.example.com", "example.com"}, + {"uk.com", ""}, + {"example.uk.com", "example.uk.com"}, + {"b.example.uk.com", "example.uk.com"}, + {"a.b.example.uk.com", "example.uk.com"}, + {"test.ac", "test.ac"}, + // TLD with only 1 (wildcard) rule. + {"mm", ""}, + {"c.mm", ""}, + {"b.c.mm", "b.c.mm"}, + {"a.b.c.mm", "b.c.mm"}, + // More complex TLD. + {"jp", ""}, + {"test.jp", "test.jp"}, + {"www.test.jp", "test.jp"}, + {"ac.jp", ""}, + {"test.ac.jp", "test.ac.jp"}, + {"www.test.ac.jp", "test.ac.jp"}, + {"kyoto.jp", ""}, + {"test.kyoto.jp", "test.kyoto.jp"}, + {"ide.kyoto.jp", ""}, + {"b.ide.kyoto.jp", "b.ide.kyoto.jp"}, + {"a.b.ide.kyoto.jp", "b.ide.kyoto.jp"}, + {"c.kobe.jp", ""}, + {"b.c.kobe.jp", "b.c.kobe.jp"}, + {"a.b.c.kobe.jp", "b.c.kobe.jp"}, + {"city.kobe.jp", "city.kobe.jp"}, + {"www.city.kobe.jp", "city.kobe.jp"}, + // TLD with a wildcard rule and exceptions. + {"ck", ""}, + {"test.ck", ""}, + {"b.test.ck", "b.test.ck"}, + {"a.b.test.ck", "b.test.ck"}, + {"www.ck", "www.ck"}, + {"www.www.ck", "www.ck"}, + // US K12. + {"us", ""}, + {"test.us", "test.us"}, + {"www.test.us", "test.us"}, + {"ak.us", ""}, + {"test.ak.us", "test.ak.us"}, + {"www.test.ak.us", "test.ak.us"}, + {"k12.ak.us", ""}, + {"test.k12.ak.us", "test.k12.ak.us"}, + {"www.test.k12.ak.us", "test.k12.ak.us"}, + // Punycoded IDN labels + {"xn--85x722f.com.cn", "xn--85x722f.com.cn"}, + {"xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, + {"www.xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn"}, + {"shishi.xn--55qx5d.cn", "shishi.xn--55qx5d.cn"}, + {"xn--55qx5d.cn", ""}, + {"xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, + {"www.xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s"}, + {"shishi.xn--fiqs8s", "shishi.xn--fiqs8s"}, + {"xn--fiqs8s", ""}, +} + +func TestEffectiveTLDPlusOne(t *testing.T) { + for _, tc := range eTLDPlusOneTestCases { + got, _ := EffectiveTLDPlusOne(tc.domain) + if got != tc.want { + t.Errorf("%q: got %q, want %q", tc.domain, got, tc.want) + } + } +} diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go new file mode 100644 index 0000000..549511c --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/table.go @@ -0,0 +1,9419 @@ +// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = "publicsuffix.org's public_suffix_list.dat, git revision 38b238d6324042f2c2e6270459d1f4ccfe789fba (2017-08-28T20:09:01Z)" + +const ( + nodesBitsChildren = 10 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 +) + +// numTLD is the number of top level domains. +const numTLD = 1557 + +// Text is the combined text of all labels. +const text = "bifukagawalterbihorologyukuhashimoichinosekigaharaxastronomy-gat" + + "ewaybomloans3-ca-central-1bikedagestangeorgeorgiabilbaogakihokum" + + "akogengerdalces3-website-us-west-1billustrationikinuyamashinashi" + + "kitchenikkoebenhavnikolaevents3-website-us-west-2bioddabirdartce" + + "nterprisesakikugawarszawashingtondclkariyameldalindesnesakurainv" + + "estmentsakyotanabellunord-odalivornomutashinainzais-a-candidateb" + + "irkenesoddtangenovaraumalopolskanlandrayddnsfreebox-oslocus-3bir" + + "thplacebitballooningladefinimakanegasakindlegokasells-for-lessal" + + "angenikonantankarlsoyurihonjoyentattoolsztynsettlersalondonetska" + + "rmoyusuharabjarkoyusuisserveexchangebjerkreimbalsfjordgcahcesuol" + + "ocalhostrodawaraugustowadaegubalsanagochihayaakasakawaharanzanne" + + "frankfurtarumizusawabkhaziamallamagazineat-url-o-g-i-naturalhist" + + "orymuseumcentereviewskrakowebredirectmeteorappaleobihirosakikami" + + "jimabogadocscbgdyniabruzzoologicalvinklein-addrammenuernberggfar" + + "merseinebinagisochildrensgardenaturalsciencesnaturelles3-ap-nort" + + "heast-2ixboxenapponazure-mobileastcoastaldefenceatonsberg12000em" + + "mafanconagawakayamadridvagsoyericssonyoursidealerimo-i-ranaamesj" + + "evuemielno-ip6bjugninohekinannestadraydnsaltdalombardiamondsalva" + + "dordalibabalatinord-frontierblockbustermezjavald-aostaplesalzbur" + + "glassassinationalheritagematsubarakawagoebloombergbauerninomiyak" + + "onojosoyrorosamegawabloxcmsamnangerbluedancebmoattachmentsamsclu" + + "bindalombardynamisches-dnsamsungleezebmsandvikcoromantovalle-d-a" + + "ostathellebmwedeployuufcfanirasakis-a-catererbnpparibaselburgliw" + + "icebnrwegroweibolzanorddalomzaporizhzheguris-a-celticsfanishiaza" + + "is-a-chefarmsteadrivelandrobaknoluoktachikawalbrzycharternidrudu" + + "nsanfranciscofreakunedre-eikerbonnishigoppdalorenskoglobalashovh" + + "achinohedmarkarpaczeladzlglobodoes-itvedestrandupontariobookingl" + + "ogoweirboomladbrokesangobootsanjournalismailillesandefjordurbana" + + "mexnetlifyis-a-conservativefsnillfjordurhamburgloppenzaogashimad" + + "achicagoboatsannanishiharaboschaefflerdalotenkawabostikaruizawab" + + "ostonakijinsekikogentingmbhartiffanyuzawabotanicalgardenishiizun" + + "azukis-a-cpadualstackspace-to-rentalstomakomaibarabotanicgardeni" + + "shikatakayamatta-varjjataxihuanishikatsuragit-repostfoldnavybota" + + "nybouncemerckmsdnipropetrovskjervoyagebounty-fullensakerryproper" + + "tiesannohelplfinancialotteboutiquebecngminakamichiharabozentsuji" + + "iebplacedekagaminordkappgafanpachigasakievennodesashibetsukumiya" + + "mazonawsaarlandyndns-at-workinggroupalmspringsakerbrandywinevall" + + "eybrasiliabresciabrindisibenikebristoloseyouripirangapartmentsan" + + "okarumaifarsundyndns-blogdnsantabarbarabritishcolumbialowiezachp" + + "omorskienishikawazukamitsuebroadcastlefrakkestadyndns-freeboxost" + + "rowwlkpmgmodenakatombetsumitakagiizebroadwaybroke-itgorybrokerbr" + + "onnoysundyndns-homednsantacruzsantafedjeffersonishimerabrotherme" + + "saverdeatnurembergmxfinitybrowsersafetymarketsanukis-a-cubicle-s" + + "lavellinotteroybrumunddalottokonamegatakasugais-a-democratjeldsu" + + "ndyndns-ipamperedchefashionishinomiyashironobrunelasticbeanstalk" + + "asaokaminoyamaxunusualpersonishinoomotegobrusselsaotomeloyalistj" + + "ordalshalsenishinoshimattelefonicarbonia-iglesias-carboniaiglesi" + + "ascarboniabruxellesapodlasiellaktyubinskiptveterinairealtorlandy" + + "ndns-mailouvrehabmerbryanskleppanamabrynewjerseybuskerudinewport" + + "lligatjmaxxxjaworznowtv-infoodnetworkshoppingrimstadyndns-office" + + "-on-the-webcambulancebuzenishiokoppegardyndns-picsapporobuzzpana" + + "sonicateringebugattipschlesischesardegnamsskoganeis-a-designerim" + + "arumorimachidabwfastlylbaltimore-og-romsdalillyokozehimejibigawa" + + "ukraanghkeymachinewhampshirebungoonord-aurdalpha-myqnapcloudacce" + + "sscambridgestonemurorangeiseiyoichippubetsubetsugaruhrhcloudns3-" + + "eu-central-1bzhitomirumalselvendrellowiczest-le-patronishitosash" + + "imizunaminamiashigaracompute-1computerhistoryofscience-fictionco" + + "msecuritytacticsaseboknowsitallvivano-frankivskasuyanagawacondos" + + "hichinohealth-carereformitakeharaconferenceconstructionconsulado" + + "esntexistanbullensvanguardyndns-workisboringrueconsultanthropolo" + + "gyconsultingvollcontactoyonocontemporaryarteducationalchikugodoh" + + "aruovatoyookannamifunecontractorskenconventureshinodearthdfcbank" + + "aszubycookingchannelsdvrdnsdojoetsuwanouchikujogaszczytnordreisa" + + "-geekatowicecoolkuszkolahppiacenzaganquannakadomarineustarhubsas" + + "katchewancooperaunitemp-dnsassaris-a-gurulsandoycopenhagencyclop" + + "edichernihivanovodkagoshimalvikashibatakashimaseratis-a-financia" + + "ladvisor-aurdalucaniacorsicagliaridagawashtenawdev-myqnapcloudap" + + "plebtimnetzwhoswhokksundyndns1corvettenrightathomeftparliamentoy" + + "osatoyakokonoecosenzakopanerairguardiann-arboretumbriacosidnsfor" + + "-better-thanawatchesatxn--12c1fe0bradescorporationcostumedio-cam" + + "pidano-mediocampidanomediocouchpotatofriesaudacouncilcouponsauhe" + + "radynnsavannahgacoursesaves-the-whalessandria-trani-barletta-and" + + "riatranibarlettaandriacqhachiojiyahoooshikamaishimodatecranbrook" + + "uwanalyticsavonaplesaxocreditcardynulvikatsushikabeeldengeluidyn" + + "v6creditunioncremonashgabadaddjambylcrewiiheyakagecricketrzyncri" + + "meast-kazakhstanangercrotonexus-2crownprovidercrsvparmacruisesbs" + + "chokoladencryptonomichigangwoncuisinellair-traffic-controlleycul" + + "turalcentertainmentoyotaris-a-hard-workercuneocupcakecxn--12cfi8" + + "ixb8lcyberlevagangaviikanonjis-a-huntercymrussiacyonabarunzencyo" + + "utheworkpccwildlifedorainfracloudcontrolledogawarabikomaezakirun" + + "orfolkebibleikangerfidonnakaniikawatanagurafieldfiguerestauranto" + + "yotsukaidownloadfilateliafilegearfilminamiechizenfinalfinancefin" + + "eartscientistockholmestrandfinlandfinnoyfirebaseapparscjohnsonfi" + + "renzefirestonefirmdaleirvikatsuyamasfjordenfishingolffanscotland" + + "fitjarfitnessettlementoyourafjalerflesbergulenflickragerotikakeg" + + "awaflightscrapper-siteflirflogintogurafloraflorencefloridavvesii" + + "dazaifudaigojomedizinhistorischescrappingunmarburguovdageaidnusl" + + "ivinghistoryfloripaderbornfloristanohatakahamangyshlakasamatsudo" + + "ntexisteingeekaufenflorogerserveftpartis-a-landscaperflowerserve" + + "game-serversicherungushikamifuranortonflynnhostingxn--1ck2e1bamb" + + "leclercasadelamonedatingjerstadotsuruokakudamatsuemrflynnhubanan" + + "arepublicaseihichisobetsuitainairforcechirealmetlifeinsuranceu-1" + + "fndfor-ourfor-someethnologyfor-theaterforexrothachirogatakahatak" + + "aishimogosenforgotdnservehalflifestyleforli-cesena-forlicesenafo" + + "rlikescandynamic-dnservehttpartnerservehumourforsaleitungsenfors" + + "andasuolodingenfortmissoulancashireggio-calabriafortworthadanose" + + "gawaforuminamifuranofosneserveirchernovtsykkylvenetogakushimotog" + + "anewyorkshirecipesaro-urbino-pesarourbinopesaromasvuotnaharimamu" + + "rogawassamukawataricohdatsunanjoburgriwataraidyndns-remotewdyndn" + + "s-serverdaluccapitalonewspaperfotaruis-a-lawyerfoxfordebianfredr" + + "ikstadtvserveminecraftoystre-slidrettozawafreeddnsgeekgalaxyfree" + + "masonryfreesitexascolipicenogiftservemp3freetlservep2partservepi" + + "cservequakefreiburgfreightcminamiiselectozsdeloittevadsoccertifi" + + "cationfresenius-4fribourgfriuli-v-giuliafriuli-ve-giuliafriuli-v" + + "egiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafr" + + "iuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafri" + + "uliveneziagiuliafriulivgiuliafrlfroganservesarcasmatartanddesign" + + "frognfrolandfrom-akrehamnfrom-alfrom-arfrom-azfrom-capebretonami" + + "astalowa-wolayangroupartyfrom-coguchikuzenfrom-ctrani-andria-bar" + + "letta-trani-andriafrom-dchirurgiens-dentistes-en-francefrom-dedy" + + "n-ip24from-flanderservicesettsurgeonshalloffamemergencyachtsevas" + + "topolefrom-gausdalfrom-higashiagatsumagoizumizakirkenesevenassis" + + "icilyfrom-iafrom-idfrom-ilfrom-incheonfrom-ksewilliamhillfrom-ky" + + "owariasahikawafrom-lancasterfrom-maniwakuratextileksvikautokeino" + + "from-mdfrom-megurokunohealthcareersharis-a-liberalfrom-microsoft" + + "bankazofrom-mnfrom-modellingfrom-msharpasadenamsosnowiechiryukyu" + + "ragifuchungbukharafrom-mtnfrom-nchitachinakagawatchandclockashih" + + "arafrom-ndfrom-nefrom-nhktraniandriabarlettatraniandriafrom-njcb" + + "nlfrom-nminamiizukamishihoronobeauxartsandcraftshawaiijimarugame" + + "-hostrolekamikitayamatsuris-a-libertarianfrom-nvalled-aostatoilf" + + "rom-nyfrom-ohkurafrom-oketohmannorth-kazakhstanfrom-orfrom-padov" + + "aksdalfrom-pratohnoshooguyfrom-rivnefrom-schoenbrunnfrom-sdfrom-" + + "tnfrom-txn--1ctwolominamatakkokamiokamiminershellaspeziafrom-uta" + + "zuerichardlillehammerfeste-ipassagenshimojis-a-linux-useranishia" + + "ritabashijonawatefrom-val-daostavalleyfrom-vtranoyfrom-wafrom-wi" + + "elunnerfrom-wvalledaostavangerfrom-wyfrosinonefrostalbanshimokaw" + + "afroyahikobeardubaiduckdnshimokitayamafstavernfujiiderafujikawag" + + "uchikonefujiminohtawaramotoineppubolognakanotoddenfujinomiyadafu" + + "jiokayamansionshimonitayanagithubusercontentransportransurlfujis" + + "atoshonairtelecitychyattorneyagawakuyabukidsmynasushiobaragusart" + + "shimonosekikawafujisawafujishiroishidakabiratoridefenseljordfuji" + + "tsurugashimaritimekeepingfujixeroxn--1lqs03nfujiyoshidafukayabea" + + "tshimosuwalkis-a-llamarylandfukuchiyamadafukudominichitosetogits" + + "uldalucernefukuis-a-musicianfukumitsubishigakirovogradoyfukuokaz" + + "akiryuohadselfipassenger-associationfukuroishikarikaturindalfuku" + + "sakisarazurewebsiteshikagamiishibukawafukuyamagatakaharufunabash" + + "iriuchinadafunagatakahashimamakishiwadafunahashikamiamakusatsuma" + + "sendaisennangonohejis-a-nascarfanfundaciofuoiskujukuriyamanxn--1" + + "lqs71dfuosskoczowinbarcelonagasakikonaikawachinaganoharamcoacham" + + "pionshiphoptobishimaizurugbydgoszczecinemakeupowiathletajimabari" + + "akembuchikumagayagawakkanaibetsubamericanfamilydscloudcontrolapp" + + "spotagerfurnitureggio-emilia-romagnakasatsunairtrafficplexus-1fu" + + "rubiraquarellebesbyenglandfurudonostiaarpaviancarrierfurukawais-" + + "a-nurservebbshimotsukefusodegaurafussagamiharafutabayamaguchinom" + + "igawafutboldlygoingnowhere-for-moregontrailroadfuttsurugimperiaf" + + "uturecmshimotsumafuturehostingfuturemailingfvgfylkesbiblackfrida" + + "yfyresdalhangglidinghangoutsystemscloudfunctionshinichinanhannan" + + "mokuizumodernhannotaireshinjournalisteinkjerusalembroideryhanyuz" + + "enhapmirhareidsbergenharstadharvestcelebrationhasamarcheapgfoggi" + + "ahasaminami-alpssells-itrapaniimimatakatoris-a-playerhashbanghas" + + "udahasura-appharmacienshinjukumanohasvikazunohatogayaitakamoriok" + + "aluganskolevangerhatoyamazakitahiroshimarnardalhatsukaichikaisei" + + "s-a-republicancerresearchaeologicaliforniahattfjelldalhayashimam" + + "otobungotakadapliernewmexicodyn-vpnplusterhazuminobusellsyourhom" + + "egoodshinkamigotoyohashimotoshimahboehringerikehelsinkitakamiizu" + + "misanofidelityhembygdsforbundhemneshinshinotsurgeryhemsedalhepfo" + + "rgeherokussldheroyhgtvallee-aosteroyhigashichichibunkyonanaoshim" + + "ageandsoundandvisionhigashihiroshimanehigashiizumozakitakatakana" + + "beautysfjordhigashikagawahigashikagurasoedahigashikawakitaaikita" + + "kyushuaiahigashikurumeiwamarriottravelchannelhigashimatsushimars" + + "hallstatebankddielddanuorrikuzentakataiwanairlinebraskaunjargals" + + "aceohigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycle" + + "shinshirohigashinarusembokukitamidoris-a-rockstarachowicehigashi" + + "nehigashiomihachimanchesterhigashiosakasayamanakakogawahigashish" + + "irakawamatakanezawahigashisumiyoshikawaminamiaikitamotosumy-rout" + + "erhigashitsunotogawahigashiurausukitanakagusukumoduminamiminowah" + + "igashiyamatokoriyamanashifteditchyouripharmacyshintokushimahigas" + + "hiyodogawahigashiyoshinogaris-a-socialistmein-vigorgehiraizumisa" + + "tohobby-sitehirakatashinagawahiranais-a-soxfanhirarahiratsukagaw" + + "ahirayaizuwakamatsubushikusakadogawahistorichouseshintomikasahar" + + "ahitachiomiyagildeskaliszhitachiotagooglecodespotravelersinsuran" + + "cehitraeumtgeradellogliastradinghjartdalhjelmelandholeckobierzyc" + + "eholidayhomeiphdhomelinkfhappouhomelinuxn--1qqw23ahomeofficehome" + + "securitymaceratakaokamakurazakitashiobarahomesecuritypchloehomes" + + "enseminehomeunixn--2m4a15ehondahoneywellbeingzonehongopocznorthw" + + "esternmutualhonjyoitakarazukameokameyamatotakadahornindalhorseou" + + "lminamiogunicomcastresistancehortendofinternet-dnshinyoshitomiok" + + "amogawahospitalhoteleshiojirishirifujiedahotmailhoyangerhoylande" + + "troitskydivinghumanitieshioyanaizuhurdalhurumajis-a-studentalhyl" + + "lestadhyogoris-a-teacherkassymantechnologyhyugawarahyundaiwafune" + + "hzchocolatemasekashiwarajewishartgalleryjfkharkovalleeaosteigenj" + + "gorajlcube-serverrankoshigayakumoldelmenhorstagejlljmphilipsynol" + + "ogy-diskstationjnjcphilatelyjoyokaichibahccavuotnagareyamalborkd" + + "alwaysdatabaseballangenoamishirasatochigiessensiositelemarkherso" + + "njpmorganjpnjprshiraokananporovigotpantheonsitejuniperjurkoshuna" + + "ntokigawakosugekotohiradomainshiratakahagitlaborkotourakouhokuta" + + "makis-an-artistcgrouphiladelphiaareadmyblogsitekounosupplieshish" + + "ikuis-an-engineeringkouyamashikokuchuokouzushimasoykozagawakozak" + + "is-an-entertainerkozowindmillkpnkppspdnshisognekrasnodarkredston" + + "ekristiansandcatshisuifuelblagdenesnaaseralingenkainanaejrietisa" + + "latinabenonichoshibuyachiyodavvenjargaulardalutskasukabedzin-the" + + "-bandaioiraseeklogest-mon-blogueurovisionisshingugekristiansundk" + + "rodsheradkrokstadelvaldaostarnbergkryminamisanrikubetsupportrent" + + "ino-alto-adigekumatorinokumejimasudakumenanyokkaichiropractichoy" + + "odobashichikashukujitawarakunisakis-bykunitachiarailwaykunitomig" + + "usukumamotoyamassa-carrara-massacarraramassabusinessebyklegalloc" + + "alhistoryggeelvinckhmelnytskyivanylvenicekunneppulawykunstsammlu" + + "ngkunstunddesignkuokgrouphoenixn--30rr7ykureggioemiliaromagnakay" + + "amatsumaebashikshacknetrentino-altoadigekurgankurobelaudiblebork" + + "angerkurogimilanokuroisoftwarendalenugkuromatsunais-certifieduca" + + "torahimeshimamateramochizukirakurotakikawasakis-foundationkushir" + + "ogawakustanais-gonekusupplykutchanelkutnokuzumakis-into-animelbo" + + "urnekvafjordkvalsundkvamlidlugolekafjordkvanangenkvinesdalkvinnh" + + "eradkviteseidskogkvitsoykwpspiegelkzmisugitokorozawamitourismola" + + "ngevagrarchaeologyeongbuknx-serveronakatsugawamitoyoakemiuramiya" + + "zumiyotamanomjondalenmlbfanmonstermonticellolmontrealestatefarme" + + "quipmentrentino-s-tirollagrigentomologyeonggiehtavuoatnagaivuotn" + + "agaokakyotambabia-goracleaningatlantabusebastopologyeongnamegawa" + + "keisenbahnmonza-brianzaporizhzhiamonza-e-della-brianzapposhitara" + + "mamonzabrianzaptokuyamatsusakahoginankokubunjis-leetnedalmonzaeb" + + "rianzaramonzaedellabrianzamoonscalezajskolobrzegersundmoparachut" + + "ingmordoviajessheiminamitanemoriyamatsushigemoriyoshimilitarymor" + + "monmouthagakhanamigawamoroyamatsuuramortgagemoscowindowshizukuis" + + "himofusaintlouis-a-bruinsfanmoseushistorymosjoenmoskeneshizuokan" + + "azawamosshoujis-lostre-toteneis-an-accountantshirahamatonbetsurn" + + "adalmosvikomaganemoteginowaniihamatamakawajimaoris-not-certified" + + "unetbankhakassiamoviemovistargardmtpchristiansburgrondarmtranbym" + + "uenstermuginozawaonsenmuikamisunagawamukochikushinonsenergymulho" + + "uservebeermunakatanemuncieszynmuosattemuphonefosshowamurmanskoma" + + "kiyosunndalmurotorcraftrentino-stirolmusashimurayamatsuzakis-sav" + + "edmusashinoharamuseetrentino-sud-tirolmuseumverenigingmusicargod" + + "addynaliascoli-picenogataijis-slickharkivgucciprianiigataishinom" + + "akinderoymutsuzawamy-vigorlicemy-wanggouvicenzamyactivedirectory" + + "myasustor-elvdalmycdn77-securecifedexhibitionmyddnskingmydissent" + + "rentino-sudtirolmydrobofagemydshowtimemorialmyeffectrentino-sued" + + "-tirolmyfirewallonieruchomoscienceandindustrynmyfritzmyftpaccess" + + "hriramsterdamnserverbaniamyfusionmyhome-serversaillesienarashino" + + "mykolaivaolbia-tempio-olbiatempioolbialystokkepnoduminamiuonumat" + + "sumotofukemymailermymediapchristmasakimobetsuliguriamyokohamamat" + + "sudamypephotographysiomypetsigdalmyphotoshibajddarchitecturealty" + + "dalipaymypsxn--32vp30hagebostadmysecuritycamerakermyshopblocksil" + + "komatsushimashikizunokunimihoboleslawiechonanbuilderschmidtre-ga" + + "uldalukowhalingroks-thisayamanobeokalmykiamytis-a-bloggermytulea" + + "piagetmyipictetrentino-suedtirolmyvnchromedicaltanissettairamywi" + + "reitrentinoa-adigepinkomforbarclays3-us-east-2pioneerpippupictur" + + "esimple-urlpiszpittsburghofauskedsmokorsetagayasells-for-usgarde" + + "npiwatepixolinopizzapkommunalforbundplanetariuminamiyamashirokaw" + + "anabelembetsukubanklabudhabikinokawabarthaebaruminamimakis-a-pai" + + "nteractivegarsheis-a-patsfanplantationplantslingplatformshangril" + + "anslupskommuneplaystationplazaplchryslerplumbingopmnpodzonepohlp" + + "oivronpokerpokrovskomonopolitiendapolkowicepoltavalle-aostarostw" + + "odzislawinnersnoasaitamatsukuris-uberleetrdpomorzeszowiosokaneya" + + "mazoepordenonepornporsangerporsanguidell-ogliastraderporsgrunnan" + + "poznanpraxis-a-bookkeeperugiaprdpreservationpresidioprgmrprimelh" + + "uscultureisenprincipeprivatizehealthinsuranceprochowiceproductio" + + "nsokndalprofbsbxn--12co0c3b4evalleaostaticschuleprogressivegasia" + + "promombetsurfbx-oschwarzgwangjuifminamidaitomangotsukisofukushim" + + "aparocherkasyno-dschweizpropertyprotectionprotonetrentinoaadigep" + + "rudentialpruszkowitdkomorotsukamisatokamachintaifun-dnsaliasdabu" + + "rprzeworskogptplusdecorativeartsolarssonpvtrentinoalto-adigepwch" + + "ungnamdalseidfjordyndns-weberlincolniyodogawapzqldqponqslgbtrent" + + "inoaltoadigequicksytesolognequipelementsolundbeckomvuxn--2scrj9c" + + "hoseiroumuenchenissandnessjoenissayokoshibahikariwanumatakazakis" + + "-a-greenissedaluroyqvchurchaseljeepsongdalenviknagatorodoystufft" + + "oread-booksnesomnaritakurashikis-very-badajozorastuttgartrentino" + + "sudtirolsusakis-very-evillagesusonosuzakaniepcesuzukanmakiwakuni" + + "gamidsundsuzukis-very-goodhandsonsvalbardunloppacificirclegnicaf" + + "ederationsveiosvelvikongsvingersvizzerasvn-reposooswedenswidnica" + + "rtierswiebodzindianapolis-a-anarchistoireggiocalabriaswiftcovers" + + "winoujscienceandhistoryswisshikis-very-nicesynology-dsopotrentin" + + "os-tirolturystykanoyaltakasakiwientuscanytushuissier-justicetuva" + + "lle-daostatic-accessorreisahayakawakamiichikawamisatotaltuxfamil" + + "ytwmailvbargainstitutelevisionaustdalimanowarudaustevollavangena" + + "turbruksgymnaturhistorisches3-eu-west-1venneslaskerrylogisticsor" + + "tlandvestfoldvestnesoruminanovestre-slidreamhostersouthcarolinaz" + + "awavestre-totennishiawakuravestvagoyvevelstadvibo-valentiavibova" + + "lentiavideovillaskimitsubatamicable-modemoneyvinnicartoonartdeco" + + "ffeedbackplaneapplinzis-very-sweetpeppervinnytsiavipsinaappilots" + + "irdalvirginiavirtualvirtueeldomeindianmarketingvirtuelvisakataki" + + "nouevistaprinternationalfirearmsouthwestfalenviterboltrevisohugh" + + "esor-odalvivoldavixn--3bst00mincommbankmpspbarclaycards3-sa-east" + + "-1vlaanderenvladikavkazimierz-dolnyvladimirvlogoipimientaketomis" + + "atolgavolkswagentsowavologdanskonskowolawavolvolkenkundenvolyngd" + + "alvossevangenvotevotingvotoyonakagyokutourspjelkavikongsbergwloc" + + "lawekonsulatrobeepilepsydneywmflabspreadbettingworldworse-thanda" + + "wowithgoogleapisa-hockeynutsiracusakakinokiawpdevcloudwritesthis" + + "blogsytewroclawithyoutubeneventoeidsvollwtcircustomerwtfbxoscien" + + "cecentersciencehistorywuozuwwwiwatsukiyonowruzhgorodeowzmiuwajim" + + "axn--42c2d9axn--45br5cylxn--45brj9citadeliveryxn--45q11citicatho" + + "licheltenham-radio-opencraftrainingripescaravantaaxn--4gbriminin" + + "gxn--4it168dxn--4it797kooris-an-actorxn--4pvxs4allxn--54b7fta0cc" + + "ivilaviationxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49civilisati" + + "onxn--5rtq34kopervikhmelnitskiyamashikexn--5su34j936bgsgxn--5tzm" + + "5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civili" + + "zationxn--80adxhkspydebergxn--80ao21axn--80aqecdr1axn--80asehdba" + + "rreauctionaval-d-aosta-valleyolasiteu-2xn--80aswgxn--80audnedaln" + + "xn--8ltr62koryokamikawanehonbetsurutaharaxn--8pvr4uxn--8y0a063ax" + + "n--90a3academy-firewall-gatewayxn--90aeroportalaheadjudaicaaarbo" + + "rteaches-yogasawaracingroks-theatreexn--90aishobaraomoriguchihar" + + "ahkkeravjuedischesapeakebayernrtritonxn--90azhytomyrxn--9dbhblg6" + + "dietcimdbarrel-of-knowledgemologicallimitediscountysvardolls3-us" + + "-gov-west-1xn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aropor" + + "t-byandexn--3ds443gxn--asky-iraxn--aurskog-hland-jnbarrell-of-kn" + + "owledgeologyombondiscoveryomitanobninskarasjohkaminokawanishiaiz" + + "ubangeu-3utilitiesquare7xn--avery-yuasakegawaxn--b-5gaxn--b4w605" + + "ferdxn--bck1b9a5dre4civilwarmanagementjxn--0trq7p7nnxn--bdddj-mr" + + "abdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccav" + + "uotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsu" + + "rreyxn--bjddar-ptamayufuettertdasnetzxn--blt-elabourxn--bmlo-gra" + + "ingerxn--bod-2naroyxn--brnny-wuaccident-investigation-aptiblease" + + "ating-organicbcn-north-1xn--brnnysund-m8accident-prevention-webh" + + "openairbusantiquest-a-la-maisondre-landebudapest-a-la-masionionj" + + "ukudoyamagentositelekommunikationthewifiat-band-campaniaxn--brum" + + "-voagatroandinosaurepbodynathomebuiltrentinosued-tirolxn--btsfjo" + + "rd-9zaxn--c1avgxn--c2br7gxn--c3s14minnesotaketakatsukis-into-car" + + "shiranukanagawaxn--cck2b3barsyonlinewhollandishakotanavigationav" + + "oibmdisrechtranakaiwamizawaweddingjesdalimoliserniaustinnatuurwe" + + "tenschappenaumburgjerdrumckinseyokosukanzakiyokawaragrocerybnika" + + "hokutobamaintenancebetsuikicks-assedic66xn--cg4bkis-with-theband" + + "ovre-eikerxn--ciqpnxn--clchc0ea0b2g2a9gcdn77-sslattumintelligenc" + + "exn--comunicaes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr" + + "694bashkiriaustraliaisondriodejaneirochesterxn--czrs0trogstadxn-" + + "-czru2dxn--czrw28basilicataniaustrheimatunduhrennesoyokotebinore" + + "-og-uvdalaziobiraskvolloabathsbcasacamdvrcampobassociatestingjem" + + "nes3-ap-southeast-1xn--d1acj3basketballyngenavuotnaklodzkodairau" + + "thordalandroiddnss3-eu-west-2xn--d1alfaromeoxn--d1atromsaitomobe" + + "llevuelosangelesjaguarmeniaxn--d5qv7z876claimsardiniaxn--davvenj" + + "rga-y4axn--djrs72d6uyxn--djty4kosaigawaxn--dnna-grajewolterskluw" + + "erxn--drbak-wuaxn--dyry-iraxn--e1a4clanbibaidarq-axn--eckvdtc9dx" + + "n--efvn9srlxn--efvy88haibarakisosakitagawaxn--ehqz56nxn--elqq16h" + + "air-surveillancexn--estv75gxn--eveni-0qa01gaxn--f6qx53axn--fct42" + + "9kosakaerodromegallupinbarefootballfinanzgoraurskog-holandroverh" + + "alla-speziaetnagahamaroygardenebakkeshibechambagriculturennebude" + + "jjudygarlandd-dnshome-webservercellikes-piedmontblancomeeres3-ap" + + "-south-1kappchizippodhaleangaviikadenadexetereport3l3p0rtargets-" + + "itargivestbytomaritimobaravennagasuke12hpalace164lima-cityeatsel" + + "inogradultarnobrzegyptianativeamericanantiques3-ap-northeast-133" + + "7xn--fhbeiarnxn--finny-yuaxn--fiq228c5hsrtrentinostirolxn--fiq64" + + "batodayonagoyautomotivecoalvdalaskanittedallasalleasinglesurance" + + "rtmgretagajoboji234xn--fiqs8srvaporcloudxn--fiqz9storagexn--fjor" + + "d-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--fpcrj9c3dxn" + + "--frde-grandrapidstordalxn--frna-woaraisaijotromsojampagefrontap" + + "piemontexn--frya-hraxn--fzc2c9e2cldmailuxembourgrongaxn--fzys8d6" + + "9uvgmailxn--g2xx48clickasumigaurawa-mazowszextraspacekitagatajir" + + "issagaeroclubmedecincinnationwidealstahaugesunderseaportsinfolld" + + "alabamagasakishimabarackmazerbaijan-mayendoftheinternetflixilove" + + "collegefantasyleaguernseyxn--gckr3f0fedorapeopleirfjordynvpncher" + + "nivtsiciliaxn--gecrj9clinichernigovernmentjometacentruminamiawaj" + + "ikis-a-doctorayxn--ggaviika-8ya47hakatanoshiroomuraxn--gildeskl-" + + "g0axn--givuotna-8yasakaiminatoyonezawaxn--gjvik-wuaxn--gk3at1exn" + + "--gls-elacaixaxn--gmq050isleofmandalxn--gmqw5axn--h-2failxn--h1a" + + "eghakodatexn--h2breg3evenestorepaircraftrentinosud-tirolxn--h2br" + + "j9c8cliniquenoharaxn--h3cuzk1digitalxn--hbmer-xqaxn--hcesuolo-7y" + + "a35batsfjordivtasvuodnakamagayahababyglandivttasvuotnakamurataji" + + "mibuildingjovikarasjokarasuyamarylhurstjohnayorovnoceanographics" + + "3-us-west-1xn--hery-iraxn--hgebostad-g3axn--hmmrfeasta-s4acctrus" + + "teexn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hx" + + "t814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn" + + "--indery-fyasugivingxn--io0a7issmarterthanyouxn--j1aefedoraproje" + + "ctoyotomiyazakis-a-knightpointtokaizukamikoaniikappugliaxn--j1am" + + "hakonexn--j6w193gxn--jlq61u9w7bauhausposts-and-telecommunication" + + "sncfdiyonaguniversityoriikarateu-4xn--jlster-byasuokanraxn--jrpe" + + "land-54axn--jvr189misakis-into-cartoonshiraois-a-techietis-a-the" + + "rapistoiaxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kf" + + "jord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--" + + "3e0b707exn--koluokta-7ya57hakubaghdadxn--kprw13dxn--kpry57dxn--k" + + "pu716fermodalenxn--kput3iwchofunatoriginsurecreationishiwakis-a-" + + "geekashiwazakiyosatokashikiyosemitexn--krager-gyatomitamamuraxn-" + + "-kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49j" + + "elenia-goraxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyatsukanumazu" + + "ryxn--kvnangen-k0axn--l-1fairwindstorfjordxn--l1accentureklambor" + + "ghiniizaxn--laheadju-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ld" + + "ingen-q1axn--leagaviika-52bbcasertaipeiheijiitatebayashiibahcavu" + + "otnagaraholtalenvironmentalconservationflfanfshostrowiecasinordl" + + "andnpalermomahachijorpelandrangedalindashorokanaieverbankaratsug" + + "inamikatagamiharuconnectashkentatamotors3-us-west-2xn--lesund-hu" + + "axn--lgbbat1ad8jeonnamerikawauexn--lgrd-poaclintonoshoesarluxury" + + "xn--lhppi-xqaxn--linds-pramericanartrvareserveblogspotrentinosue" + + "dtirolxn--lns-qlapyatigorskypexn--loabt-0qaxn--lrdal-sraxn--lren" + + "skog-54axn--lt-liaclothingdustkakamigaharaxn--lten-granexn--lury" + + "-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddestorjdevclo" + + "udfrontdoorxn--mgb9awbferraraxn--mgba3a3ejtrysiljanxn--mgba3a4f1" + + "6axn--mgba3a4franamizuholdingsmilelverumisasaguris-into-gamessin" + + "atsukigatakasagotembaixadaxn--mgba7c0bbn0axn--mgbaakc7dvferrarit" + + "togoldpoint2thisamitsukexn--mgbaam7a8hakuis-a-personaltrainerxn-" + + "-mgbab2bdxn--mgbai9a5eva00bbtatarantottoriiyamanouchikuhokuryuga" + + "sakitaurayasudautoscanadaejeonbukaragandasnesoddenmarkhangelskja" + + "kdnepropetrovskiervaapsteiermark12xn--mgbai9azgqp6jetztrentino-a" + + "-adigexn--mgbayh7gpagespeedmobilizeroxn--mgbb9fbpobanazawaxn--mg" + + "bbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgberp4a5d4a87gxn--mgbe" + + "rp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskodjejuegosh" + + "ikiminokamoenairportland-4-salernoboribetsuckstpetersburgxn--mgb" + + "qly7c0a67fbcnsarpsborgrossetouchijiwadegreexn--mgbqly7cvafranzis" + + "kanerdpolicexn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bbvacations" + + "watch-and-clockerxn--mgbx4cd0abbottulanxessor-varangerxn--mix082" + + "ferreroticanonoichinomiyakexn--mix891fetsundyroyrvikinguitarscho" + + "larshipschoolxn--mjndalen-64axn--mk0axindustriesteamfamberkeleyx" + + "n--mk1bu44cntkmaxxn--11b4c3dyndns-wikinkobayashikaoirminamibosog" + + "ndaluzernxn--mkru45ixn--mlatvuopmi-s4axn--mli-tlaquilanciaxn--ml" + + "selv-iuaxn--moreke-juaxn--mori-qsakuhokkaidoomdnsiskinkyotobetsu" + + "midatlanticolognextdirectmparaglidingroundhandlingroznyxn--mosje" + + "n-eyawaraxn--mot-tlarvikoseis-an-actresshirakofuefukihaboromskog" + + "xn--mre-og-romsdal-qqbentleyoshiokaracoldwarmiamihamadaveroykeni" + + "waizumiotsukuibestadds3-external-1xn--msy-ula0hakusandiegoodyear" + + "xn--mtta-vrjjat-k7afamilycompanycolonialwilliamsburgrparisor-fro" + + "nxn--muost-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn-" + + "-3hcrj9cistrondheimmobilienxn--nit225kosherbrookegawaxn--nmesjev" + + "uemie-tcbalestrandabergamoarekexn--nnx388axn--nodessakuragawaxn-" + + "-nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-bya" + + "eservecounterstrikexn--nvuotna-hwaxn--nyqy26axn--o1achattanoogan" + + "ordre-landxn--o3cw4haldenxn--o3cyx2axn--od0algxn--od0aq3beppubli" + + "shproxyzgorzeleccollectionhlfanhs3-website-ap-northeast-1xn--ogb" + + "pf8flekkefjordxn--oppegrd-ixaxn--ostery-fyawatahamaxn--osyro-wua" + + "xn--p1acfgujolsterxn--p1aixn--pbt977coloradoplateaudioxn--pgbs0d" + + "hlxn--porsgu-sta26fhvalerxn--pssu33lxn--pssy2uxn--q9jyb4columbus" + + "heyxn--qcka1pmcdonaldstreamuneuesolutionsomaxn--qqqt11misconfuse" + + "dxn--qxamusementunesorfoldxn--rady-iraxn--rdal-poaxn--rde-ulavag" + + "iskexn--rdy-0nabarixn--rennesy-v1axn--rhkkervju-01aflakstadaokag" + + "akibichuoxn--rholt-mragowoodsideltaitogliattirestudioxn--rhqv96g" + + "xn--rht27zxn--rht3dxn--rht61exn--risa-5narusawaxn--risr-iraxn--r" + + "land-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31halsaikitahatakama" + + "tsukawaxn--rovu88bernuorockartuzyukinfinitintuitateshinanomachim" + + "kentateyamavocatanzarowebspacebizenakanojohanamakinoharassnasaba" + + "erobatickets3-ap-southeast-2xn--rros-granvindafjordxn--rskog-uua" + + "xn--rst-0narutokyotangovtunkoninjamisonxn--rsta-francaiseharaxn-" + + "-rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithruheredumbrell" + + "ajollamericanexpressexyxn--s9brj9communitysnesarufutsunomiyawaka" + + "saikaitakoelnxn--sandnessjen-ogbizxn--sandy-yuaxn--seral-lraxn--" + + "ses554gxn--sgne-gratangenxn--skierv-utazaskoyabearalvahkijobserv" + + "erisignieznoipifonymishimatsunoxn--skjervy-v1axn--skjk-soaxn--sk" + + "nit-yqaxn--sknland-fxaxn--slat-5narviikamitondabayashiogamagoriz" + + "iaxn--slt-elabbvieeexn--smla-hraxn--smna-gratis-a-bulls-fanxn--s" + + "nase-nraxn--sndre-land-0cbremangerxn--snes-poaxn--snsa-roaxn--sr" + + "-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbeski" + + "dyn-o-saurlandes3-website-ap-southeast-1xn--srfold-byaxn--srreis" + + "a-q1axn--srum-grazxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalse" + + "n-sqbestbuyshouses3-website-ap-southeast-2xn--stre-toten-zcbstud" + + "yndns-at-homedepotenzamamicrolightingxn--t60b56axn--tckweatherch" + + "annelxn--tiq49xqyjevnakershuscountryestateofdelawarezzoologyxn--" + + "tjme-hraxn--tn0agrinet-freakstuff-4-salexn--tnsberg-q1axn--tor13" + + "1oxn--trany-yuaxn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr" + + "-vraxn--uc0atvarggatrentoyokawaxn--uc0ay4axn--uist22hammarfeasta" + + "fricapetownnews-stagingxn--uisz3gxn--unjrga-rtaobaokinawashirosa" + + "tochiokinoshimalatvuopmiasakuchinotsuchiurakawalesundxn--unup4yx" + + "n--uuwu58axn--vads-jraxn--vard-jraxn--vegrshei-c0axn--vermgensbe" + + "rater-ctbetainaboxfusejnynysadodgeometre-experts-comptables3-web" + + "site-eu-west-1xn--vermgensberatung-pwbieigersundray-dnsupdaterno" + + "pilawavoues3-fips-us-gov-west-1xn--vestvgy-ixa6oxn--vg-yiabcgxn-" + + "-vgan-qoaxn--vgsy-qoa0jewelryxn--vgu402comobilyxn--vhquvaroyxn--" + + "vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bi" + + "elawalmartatsunoceanographiquevje-og-hornnes3-website-sa-east-1x" + + "n--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1comparemarkerr" + + "yhotelsasayamaxn--wgbl6axn--xhq521biellaakesvuemieleccexn--xkc2a" + + "l3hye2axn--xkc2dl3a5ee0hamurakamigoris-a-photographerokuappfizer" + + "xn--y9a3aquariumissilewismillerxn--yer-znarvikoshimizumakis-an-a" + + "narchistoricalsocietyxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn-" + + "-3oq18vl8pn36axn--ystre-slidre-ujbieszczadygeyachimataikikuchiku" + + "seikarugamvikareliancexn--zbx025dxn--zf0ao64axn--zf0avxn--3pxu8k" + + "onyveloftrentino-aadigexn--zfr164bievatmallorcadaques3-website-u" + + "s-east-1xperiaxz" + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// If the table was generated with the -comments flag, there is a //-comment +// after each node's data. In it is the nodes-array indexes of the children, +// formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [ 0 bits] unused +// [10 bits] children index +// [ 1 bits] ICANN bit +// [15 bits] text index +// [ 6 bits] text length +var nodes = [...]uint32{ + 0x31fe83, + 0x28e944, + 0x2ed8c6, + 0x380743, + 0x380746, + 0x3a5306, + 0x3b5e43, + 0x30a7c4, + 0x20d0c7, + 0x2ed508, + 0x1a07102, + 0x31f1c7, + 0x368c09, + 0x2d68ca, + 0x2d68cb, + 0x238503, + 0x2dec46, + 0x23d6c5, + 0x1e07542, + 0x21cf84, + 0x266d03, + 0x346145, + 0x22035c2, + 0x20a643, + 0x271f944, + 0x342285, + 0x2a10042, + 0x38a48e, + 0x255083, + 0x3affc6, + 0x2e00142, + 0x2d4207, + 0x240d86, + 0x3204f02, + 0x22ee43, + 0x256204, + 0x32d106, + 0x25b788, + 0x2811c6, + 0x378fc4, + 0x3600242, + 0x33b8c9, + 0x212107, + 0x2e6046, + 0x341809, + 0x2a0048, + 0x33a904, + 0x2a0f46, + 0x21f886, + 0x3a02d42, + 0x3a014f, + 0x28c84e, + 0x21bfc4, + 0x382c85, + 0x30a6c5, + 0x2e2109, + 0x249089, + 0x33b1c7, + 0x23f8c6, + 0x20ae43, + 0x3e01d42, + 0x2e3203, + 0x225d0a, + 0x20cac3, + 0x242f85, + 0x28e142, + 0x28e149, + 0x4200bc2, + 0x209204, + 0x28ad46, + 0x2e5c05, + 0x361644, + 0x4a1a344, + 0x203ec3, + 0x218d04, + 0x4e00702, + 0x2f8e84, + 0x52f5f04, + 0x339bca, + 0x5600f82, + 0x28bc47, + 0x281548, + 0x6206502, + 0x31d0c7, + 0x2c6d44, + 0x2c6d47, + 0x393c45, + 0x35e887, + 0x33af86, + 0x271dc4, + 0x378385, + 0x28ea47, + 0x72001c2, + 0x224143, + 0x200c42, + 0x200c43, + 0x760b5c2, + 0x20f4c5, + 0x7a01d02, + 0x357844, + 0x27e405, + 0x21bf07, + 0x25aece, + 0x2bf044, + 0x23df04, + 0x211c43, + 0x28a4c9, + 0x30eacb, + 0x2ea6c8, + 0x3415c8, + 0x306208, + 0x2b7288, + 0x33a74a, + 0x35e787, + 0x321606, + 0x7e8f282, + 0x36a683, + 0x377683, + 0x37fd44, + 0x3b5e83, + 0x32c343, + 0x1727e02, + 0x8203302, + 0x283f45, + 0x29e006, + 0x2da184, + 0x388547, + 0x2fa686, + 0x389384, + 0x3aa107, + 0x223d43, + 0x86cd5c2, + 0x8a0d342, + 0x8e1e642, + 0x21e646, + 0x9200002, + 0x2501c5, + 0x329343, + 0x201684, + 0x2efb04, + 0x2efb05, + 0x203c43, + 0x979c783, + 0x9a092c2, + 0x291d85, + 0x291d8b, + 0x343c06, + 0x21270b, + 0x226544, + 0x213a49, + 0x2148c4, + 0x9e14b02, + 0x215943, + 0x216283, + 0x1616b42, + 0x275fc3, + 0x216b4a, + 0xa201102, + 0x21d205, + 0x29a88a, + 0x2e0544, + 0x201103, + 0x325384, + 0x21ae03, + 0x21ae04, + 0x21ae07, + 0x21b605, + 0x21d685, + 0x21dc46, + 0x21dfc6, + 0x21ea43, + 0x222688, + 0x206c03, + 0xa60c702, + 0x245848, + 0x23614b, + 0x228908, + 0x228e06, + 0x229dc7, + 0x22da48, + 0xb6024c2, + 0xba430c2, + 0x32da08, + 0x233347, + 0x2e7b45, + 0x2e7b48, + 0x2c3b08, + 0x2be483, + 0x232e04, + 0x37fd82, + 0xbe34382, + 0xc23e102, + 0xca37302, + 0x237303, + 0xce01382, + 0x30a783, + 0x300f44, + 0x20a043, + 0x322844, + 0x20d7cb, + 0x2322c3, + 0x2e6a46, + 0x245f44, + 0x2982ce, + 0x381245, + 0x3b00c8, + 0x263347, + 0x26334a, + 0x22e803, + 0x317a07, + 0x30ec85, + 0x23a384, + 0x272706, + 0x272707, + 0x330f44, + 0x301f87, + 0x25a184, + 0x25b204, + 0x25b206, + 0x25f704, + 0x36bdc6, + 0x216983, + 0x233108, + 0x316ec8, + 0x23dec3, + 0x275f83, + 0x3a6604, + 0x3aae83, + 0xd235f42, + 0xd6df482, + 0x207143, + 0x203f86, + 0x2a1043, + 0x285184, + 0xda165c2, + 0x2165c3, + 0x35f083, + 0x21fe02, + 0xde008c2, + 0x2c9786, + 0x23e347, + 0x2fd645, + 0x38fd04, + 0x294d45, + 0x2f8a47, + 0x2add85, + 0x2e4689, + 0x2e9906, + 0x2ef808, + 0x2fd546, + 0xe20e982, + 0x2ddb08, + 0x300d06, + 0x219205, + 0x316887, + 0x316dc4, + 0x316dc5, + 0x281384, + 0x345d88, + 0xe6127c2, + 0xea04882, + 0x33ca06, + 0x2cf588, + 0x34d485, + 0x351546, + 0x356108, + 0x371488, + 0xee35dc5, + 0xf214f44, + 0x34e247, + 0xf614602, + 0xfa22902, + 0x10e0f882, + 0x28ae45, + 0x2aaa45, + 0x30af86, + 0x350007, + 0x386287, + 0x11638543, + 0x2b0307, + 0x30e7c8, + 0x3a0849, + 0x38a647, + 0x3b9c87, + 0x238788, + 0x238f86, + 0x239e86, + 0x23aacc, + 0x23c08a, + 0x23c407, + 0x23d58b, + 0x23e187, + 0x23e18e, + 0x19a3f304, + 0x240244, + 0x242547, + 0x3ac747, + 0x246d46, + 0x246d47, + 0x247407, + 0x19e29682, + 0x2495c6, + 0x2495ca, + 0x24a08b, + 0x24ac87, + 0x24b845, + 0x24bb83, + 0x24bdc6, + 0x24bdc7, + 0x20d283, + 0x1a206e02, + 0x24c78a, + 0x1a769d02, + 0x1aa4f282, + 0x1ae4dd42, + 0x1b240e82, + 0x24e9c5, + 0x24ef44, + 0x1ba1a442, + 0x2f8f05, + 0x24a683, + 0x2149c5, + 0x2b7184, + 0x205ec4, + 0x25a486, + 0x262586, + 0x291f83, + 0x204844, + 0x3894c3, + 0x1c204c82, + 0x210ac4, + 0x210ac6, + 0x34e7c5, + 0x37e946, + 0x316988, + 0x273544, + 0x266ac8, + 0x398785, + 0x22bc88, + 0x2b2dc6, + 0x26d907, + 0x233d84, + 0x233d86, + 0x242bc3, + 0x393fc3, + 0x211d08, + 0x322004, + 0x356747, + 0x20c7c6, + 0x2dedc9, + 0x322a88, + 0x325448, + 0x331ac4, + 0x35f103, + 0x229942, + 0x1d2234c2, + 0x1d61a202, + 0x36c083, + 0x1da08e02, + 0x20d204, + 0x3521c6, + 0x3b3745, + 0x24fa83, + 0x23cf44, + 0x2b95c7, + 0x25a783, + 0x251208, + 0x218405, + 0x264143, + 0x27e385, + 0x27e4c4, + 0x300a06, + 0x218f84, + 0x21ab86, + 0x21be46, + 0x210584, + 0x23e543, + 0x1de1a582, + 0x23dd05, + 0x20b9c3, + 0x1e20c882, + 0x23aa83, + 0x2231c5, + 0x23cac3, + 0x23cac9, + 0x1e606b82, + 0x1ee07842, + 0x2918c5, + 0x2211c6, + 0x2d9d46, + 0x2bb248, + 0x2bb24b, + 0x203fcb, + 0x220bc5, + 0x2fd845, + 0x2cdfc9, + 0x1600302, + 0x210748, + 0x213d44, + 0x1f601842, + 0x326403, + 0x1fecdd46, + 0x348e08, + 0x20208b42, + 0x2bdec8, + 0x2060c182, + 0x2bf7ca, + 0x20a3fd03, + 0x203606, + 0x36cc48, + 0x209708, + 0x3b3a46, + 0x37c807, + 0x3a0347, + 0x34daca, + 0x2e05c4, + 0x354d44, + 0x368649, + 0x2139fb45, + 0x28ca46, + 0x210083, + 0x253d44, + 0x2160df44, + 0x20df47, + 0x22c507, + 0x234404, + 0x2df805, + 0x30b048, + 0x375e07, + 0x381007, + 0x21a07602, + 0x32e984, + 0x29b188, + 0x2504c4, + 0x251844, + 0x251c45, + 0x251d87, + 0x222349, + 0x252a04, + 0x253149, + 0x253388, + 0x253ac4, + 0x253ac7, + 0x21e54003, + 0x254187, + 0x1609c42, + 0x16b4a42, + 0x254b86, + 0x2550c7, + 0x255584, + 0x257687, + 0x258d47, + 0x259983, + 0x2f6802, + 0x207d82, + 0x231683, + 0x231684, + 0x23168b, + 0x3416c8, + 0x263c84, + 0x25c985, + 0x25eb47, + 0x260105, + 0x2c8c0a, + 0x263bc3, + 0x22206b02, + 0x206b04, + 0x267189, + 0x26a743, + 0x26a807, + 0x373089, + 0x212508, + 0x2db543, + 0x282f07, + 0x283649, + 0x23d483, + 0x289844, + 0x28d209, + 0x290146, + 0x21c203, + 0x200182, + 0x264d83, + 0x2b4847, + 0x2c3e85, + 0x3413c6, + 0x259004, + 0x374e05, + 0x225cc3, + 0x20e646, + 0x213c42, + 0x3a1784, + 0x2260d382, + 0x226603, + 0x22a01802, + 0x251743, + 0x21e444, + 0x21e447, + 0x201986, + 0x20df02, + 0x22e0dec2, + 0x2c4244, + 0x23235182, + 0x23601b82, + 0x265704, + 0x265705, + 0x345105, + 0x35c386, + 0x23a074c2, + 0x2074c5, + 0x213005, + 0x2157c3, + 0x219d06, + 0x21a645, + 0x21e5c2, + 0x34d0c5, + 0x21e5c4, + 0x228203, + 0x22a443, + 0x23e11442, + 0x2dcf47, + 0x376084, + 0x376089, + 0x253c44, + 0x2357c3, + 0x300589, + 0x389e08, + 0x242aa8c4, + 0x2aa8c6, + 0x219983, + 0x25d3c3, + 0x323043, + 0x246eebc2, + 0x379b82, + 0x24a17202, + 0x32af48, + 0x358e08, + 0x3a5a46, + 0x2fd0c5, + 0x317885, + 0x333d07, + 0x2247c5, + 0x210642, + 0x24e04742, + 0x160a442, + 0x2447c8, + 0x2dda45, + 0x2bfbc4, + 0x2f2845, + 0x381d87, + 0x240944, + 0x24c682, + 0x25200582, + 0x33ffc4, + 0x21ca07, + 0x292507, + 0x35e844, + 0x29a843, + 0x23de04, + 0x23de08, + 0x23a1c6, + 0x27258a, + 0x222204, + 0x29abc8, + 0x290584, + 0x229ec6, + 0x29c484, + 0x28b146, + 0x376349, + 0x274847, + 0x241243, + 0x256351c2, + 0x2755c3, + 0x214d02, + 0x25a52e42, + 0x313486, + 0x374588, + 0x2ac047, + 0x3ab249, + 0x299f49, + 0x2acf05, + 0x2adec9, + 0x2ae685, + 0x2ae7c9, + 0x2afe45, + 0x2b11c8, + 0x25e0a104, + 0x26259ac7, + 0x2b13c3, + 0x2b13c7, + 0x3ba046, + 0x2b1a47, + 0x2a9b05, + 0x2a2cc3, + 0x26636d02, + 0x339704, + 0x26a42a42, + 0x266603, + 0x26e206c2, + 0x30df06, + 0x2814c5, + 0x2b3cc7, + 0x332043, + 0x32c2c4, + 0x217003, + 0x342c43, + 0x27205e82, + 0x27a0c442, + 0x3a5404, + 0x2f67c3, + 0x24e545, + 0x27e01c82, + 0x286007c2, + 0x2c8286, + 0x322144, + 0x38c444, + 0x38c44a, + 0x28e00942, + 0x38298a, + 0x39b8c8, + 0x29231604, + 0x2046c3, + 0x20d8c3, + 0x306349, + 0x25bd09, + 0x364986, + 0x29655783, + 0x335d45, + 0x30d2cd, + 0x39ba86, + 0x204f4b, + 0x29a02b02, + 0x225b48, + 0x2be22782, + 0x2c203e02, + 0x2b1685, + 0x2c604182, + 0x266847, + 0x21b987, + 0x20bf43, + 0x23b188, + 0x2ca02542, + 0x3780c4, + 0x21a8c3, + 0x348505, + 0x364603, + 0x33c406, + 0x212a84, + 0x275f43, + 0x2b6443, + 0x2ce09942, + 0x2fd7c4, + 0x379c85, + 0x3b6587, + 0x280003, + 0x2b5103, + 0x2b5c03, + 0x1631182, + 0x2b5cc3, + 0x2b63c3, + 0x2d2086c2, + 0x3a2e44, + 0x262786, + 0x34ba83, + 0x2086c3, + 0x2d6b8042, + 0x2b8048, + 0x2b8304, + 0x37ce46, + 0x2b8bc7, + 0x258346, + 0x2a0304, + 0x3b201702, + 0x3b9f0b, + 0x307c0e, + 0x221d4f, + 0x2ac5c3, + 0x3ba64d42, + 0x160b542, + 0x3be00a82, + 0x2e89c3, + 0x2e4903, + 0x2de046, + 0x207986, + 0x203007, + 0x304704, + 0x3c221302, + 0x3c618742, + 0x3a1205, + 0x2e7007, + 0x38c946, + 0x3ca28142, + 0x228144, + 0x2bc743, + 0x3ce09a02, + 0x3d366443, + 0x2bce04, + 0x2c5409, + 0x16cb602, + 0x3d605242, + 0x385d85, + 0x3dacb882, + 0x3de03582, + 0x3541c7, + 0x21b2c9, + 0x368e8b, + 0x3a0105, + 0x2714c9, + 0x384d06, + 0x343c47, + 0x3e206844, + 0x341d89, + 0x380907, + 0x348ac7, + 0x2122c3, + 0x2122c6, + 0x312247, + 0x263a43, + 0x263a46, + 0x3ea01cc2, + 0x3ee022c2, + 0x22bf03, + 0x32bec5, + 0x25a007, + 0x227906, + 0x2c3e05, + 0x207a84, + 0x28ddc5, + 0x2fae04, + 0x3f204bc2, + 0x337447, + 0x2ca604, + 0x24f3c4, + 0x25bc0d, + 0x25d749, + 0x3ab748, + 0x25e044, + 0x234a85, + 0x322907, + 0x3329c4, + 0x2fa747, + 0x204bc5, + 0x3f6ac504, + 0x2b5e05, + 0x269404, + 0x256fc6, + 0x34fe05, + 0x3fa048c2, + 0x2011c4, + 0x2011c5, + 0x3802c6, + 0x206d85, + 0x3c0144, + 0x2cda83, + 0x208d46, + 0x222545, + 0x22b605, + 0x34ff04, + 0x222283, + 0x22228c, + 0x3fe90a82, + 0x40206702, + 0x40600282, + 0x211a83, + 0x211a84, + 0x40a02942, + 0x2fba48, + 0x341485, + 0x34c984, + 0x36ee86, + 0x40e0d842, + 0x41234502, + 0x41601fc2, + 0x2a6a85, + 0x210446, + 0x226144, + 0x32d646, + 0x28ba06, + 0x215c83, + 0x41b2770a, + 0x2f6b05, + 0x2f6fc3, + 0x22a9c6, + 0x30c989, + 0x22a9c7, + 0x29f648, + 0x29ff09, + 0x241b08, + 0x22e546, + 0x209b03, + 0x41e0c202, + 0x395343, + 0x395349, + 0x333608, + 0x42253442, + 0x42604a82, + 0x229443, + 0x2e4505, + 0x25c404, + 0x2c9ec9, + 0x26eb44, + 0x2e0908, + 0x2050c3, + 0x20dc44, + 0x2acd03, + 0x221208, + 0x25bb47, + 0x42e281c2, + 0x270d02, + 0x388b05, + 0x272dc9, + 0x28cac3, + 0x284bc4, + 0x335d04, + 0x227543, + 0x28580a, + 0x43382842, + 0x43601182, + 0x2cd543, + 0x384f83, + 0x160dc02, + 0x20ffc3, + 0x43a14702, + 0x43e00802, + 0x4420f644, + 0x20f646, + 0x3b6a46, + 0x248c44, + 0x37d243, + 0x200803, + 0x2f60c3, + 0x24a406, + 0x30aa05, + 0x2cd6c7, + 0x343b09, + 0x2d2d85, + 0x2d3f46, + 0x2d4908, + 0x2d4b06, + 0x260ec4, + 0x2a1d8b, + 0x2d8403, + 0x2d8405, + 0x2d8548, + 0x22c2c2, + 0x3544c2, + 0x4464ea42, + 0x44a14642, + 0x221343, + 0x44e745c2, + 0x2745c3, + 0x2d8844, + 0x2d8e03, + 0x45605902, + 0x45a0c0c6, + 0x2af186, + 0x45edcac2, + 0x462162c2, + 0x4662a482, + 0x46a00e82, + 0x46e176c2, + 0x47202ec2, + 0x205383, + 0x344905, + 0x348206, + 0x4761bf84, + 0x34e5ca, + 0x20bd46, + 0x220e04, + 0x28a483, + 0x4820ea42, + 0x204d42, + 0x23d503, + 0x48608e83, + 0x2d8047, + 0x34fd07, + 0x49e31787, + 0x23fcc7, + 0x2309c3, + 0x33188a, + 0x263544, + 0x3863c4, + 0x3863ca, + 0x24b685, + 0x4a2190c2, + 0x254b43, + 0x4a601942, + 0x21b543, + 0x275583, + 0x4ae02b82, + 0x2b0284, + 0x2256c4, + 0x208105, + 0x39e745, + 0x2fc3c6, + 0x2fc746, + 0x4b206802, + 0x4b600982, + 0x3139c5, + 0x2aee92, + 0x259806, + 0x231483, + 0x315a06, + 0x231485, + 0x1616b82, + 0x53a17102, + 0x35fd43, + 0x217103, + 0x35d703, + 0x53e02c82, + 0x38a783, + 0x54205b82, + 0x20cc43, + 0x3a2e88, + 0x231e83, + 0x231e86, + 0x3b0c87, + 0x26c286, + 0x26c28b, + 0x220d47, + 0x339504, + 0x54a00e42, + 0x341305, + 0x54e08e43, + 0x2aec83, + 0x32de85, + 0x331783, + 0x55331786, + 0x2108ca, + 0x2488c3, + 0x240c44, + 0x2cf4c6, + 0x2364c6, + 0x55601a03, + 0x32c187, + 0x364887, + 0x2a3885, + 0x251046, + 0x222583, + 0x57619f43, + 0x57a0cb42, + 0x34bd44, + 0x22c24c, + 0x232f09, + 0x2445c7, + 0x38ad45, + 0x252c84, + 0x25e6c8, + 0x265d45, + 0x57e6c505, + 0x27b709, + 0x2e6103, + 0x24f204, + 0x5821cc82, + 0x221543, + 0x5869bf42, + 0x3bbe86, + 0x16235c2, + 0x58a35b42, + 0x2a6988, + 0x2ac343, + 0x2b5d47, + 0x2daa05, + 0x2e5205, + 0x2e520b, + 0x2e58c6, + 0x2e5406, + 0x2e9006, + 0x232b84, + 0x2e9246, + 0x58eeae88, + 0x246003, + 0x231a43, + 0x231a44, + 0x2ea484, + 0x2eab87, + 0x2ec3c5, + 0x592ec502, + 0x59607082, + 0x207085, + 0x295bc4, + 0x2ef38b, + 0x2efa08, + 0x2998c4, + 0x228182, + 0x59e99842, + 0x350e83, + 0x2efec4, + 0x2f0185, + 0x2f0607, + 0x2f2384, + 0x220c04, + 0x5a204102, + 0x36f5c9, + 0x2f3185, + 0x3a03c5, + 0x2f3e45, + 0x5a621483, + 0x2f4dc4, + 0x2f4dcb, + 0x2f5204, + 0x2f5c0b, + 0x2f6005, + 0x221e8a, + 0x2f7608, + 0x2f780a, + 0x2f7fc3, + 0x2f7fca, + 0x5aa33502, + 0x5ae2fa42, + 0x236903, + 0x5b2f9f02, + 0x2f9f03, + 0x5b71c482, + 0x5bb29ac2, + 0x2fac84, + 0x2227c6, + 0x32d385, + 0x2fd4c3, + 0x320446, + 0x317345, + 0x262a84, + 0x5be06b42, + 0x2ba844, + 0x2cdc4a, + 0x22fd07, + 0x2e5e86, + 0x2612c7, + 0x20c743, + 0x2bce48, + 0x39fd8b, + 0x230305, + 0x2f41c5, + 0x2f41c6, + 0x2ea004, + 0x3bf388, + 0x20e543, + 0x21f784, + 0x21f787, + 0x355746, + 0x344b06, + 0x29810a, + 0x250d44, + 0x250d4a, + 0x5c20c386, + 0x20c387, + 0x25ca07, + 0x27b0c4, + 0x27b0c9, + 0x262445, + 0x2439cb, + 0x2eef43, + 0x21ad43, + 0x5c625b03, + 0x23a584, + 0x5ca00482, + 0x2f70c6, + 0x5cea2a45, + 0x315c45, + 0x258586, + 0x352b04, + 0x5d2044c2, + 0x24bbc4, + 0x5d60b282, + 0x28b5c5, + 0x236c84, + 0x22cb43, + 0x5de17142, + 0x217143, + 0x273e86, + 0x5e204242, + 0x2241c8, + 0x22a844, + 0x22a846, + 0x204dc6, + 0x25ec04, + 0x208cc5, + 0x214e48, + 0x215647, + 0x2159c7, + 0x2159cf, + 0x29b086, + 0x22f483, + 0x22f484, + 0x36edc4, + 0x213103, + 0x22a004, + 0x2494c4, + 0x5e60fd02, + 0x291cc3, + 0x24bf43, + 0x5ea0d2c2, + 0x22f043, + 0x20d2c3, + 0x21d70a, + 0x2e7d07, + 0x381f0c, + 0x3821c6, + 0x2f5a86, + 0x2f6447, + 0x5ee0e947, + 0x252d49, + 0x245984, + 0x253e04, + 0x5f221382, + 0x5f600a02, + 0x2984c6, + 0x32bf84, + 0x2df606, + 0x239048, + 0x2bf2c4, + 0x266886, + 0x2d9d05, + 0x26e488, + 0x2041c3, + 0x26fd85, + 0x270b03, + 0x3a04c3, + 0x3a04c4, + 0x206ac3, + 0x5fa0e602, + 0x5fe00742, + 0x2eee09, + 0x273885, + 0x276bc4, + 0x27ab05, + 0x217e84, + 0x2c62c7, + 0x36ecc5, + 0x231944, + 0x231948, + 0x2d6206, + 0x2dac04, + 0x2e0788, + 0x2e1fc7, + 0x60202502, + 0x2e6f44, + 0x2131c4, + 0x348cc7, + 0x60602504, + 0x210f82, + 0x60a06742, + 0x227103, + 0x2dfc84, + 0x2b2143, + 0x370645, + 0x60e06d42, + 0x2eeac5, + 0x21b9c2, + 0x35c7c5, + 0x374745, + 0x61204d02, + 0x35f004, + 0x61606182, + 0x266d86, + 0x2a7806, + 0x272f08, + 0x2c7588, + 0x30de84, + 0x2f97c5, + 0x395809, + 0x2fd8c4, + 0x210884, + 0x208483, + 0x61a1f545, + 0x2cb6c7, + 0x28d004, + 0x31288d, + 0x332182, + 0x33f203, + 0x3479c3, + 0x61e00d02, + 0x397dc5, + 0x212cc7, + 0x23fd84, + 0x23fd87, + 0x2a0109, + 0x2cdd89, + 0x277e07, + 0x20f803, + 0x2ba348, + 0x2522c9, + 0x349c47, + 0x355685, + 0x395546, + 0x398bc6, + 0x3aaf05, + 0x25d845, + 0x62209142, + 0x37da45, + 0x2bad08, + 0x2c9546, + 0x626c0d47, + 0x2f6244, + 0x29bb07, + 0x300246, + 0x62a3b442, + 0x37ffc6, + 0x302d4a, + 0x3035c5, + 0x62ee6282, + 0x63260a02, + 0x312586, + 0x2b36c8, + 0x636926c7, + 0x63a04502, + 0x226783, + 0x36a846, + 0x22cf04, + 0x3b0b46, + 0x344e06, + 0x36d78a, + 0x377705, + 0x208806, + 0x2205c3, + 0x2205c4, + 0x203082, + 0x314a43, + 0x63e11ac2, + 0x2f8483, + 0x382c04, + 0x2b3804, + 0x2b380a, + 0x22e603, + 0x281288, + 0x22e60a, + 0x2b4247, + 0x309306, + 0x266c44, + 0x220cc2, + 0x228cc2, + 0x64207002, + 0x23ddc3, + 0x25c7c7, + 0x320707, + 0x28e8c4, + 0x39d147, + 0x2f0706, + 0x21e747, + 0x233484, + 0x398ac5, + 0x2ce485, + 0x6462be42, + 0x231146, + 0x327943, + 0x371742, + 0x383306, + 0x64a08bc2, + 0x64e05082, + 0x3c0985, + 0x6522a202, + 0x65604782, + 0x348085, + 0x39e345, + 0x2088c5, + 0x26f003, + 0x352285, + 0x2e5987, + 0x305cc5, + 0x311985, + 0x3b01c4, + 0x24d486, + 0x264544, + 0x65a00d42, + 0x666f2bc5, + 0x2ab647, + 0x3176c8, + 0x29f806, + 0x29f80d, + 0x2aac09, + 0x2aac12, + 0x359f05, + 0x36f8c3, + 0x66a08882, + 0x314544, + 0x39bb03, + 0x3963c5, + 0x304a45, + 0x66e1a902, + 0x264183, + 0x67231802, + 0x67a43242, + 0x67e1f342, + 0x2ed385, + 0x23fec3, + 0x36d408, + 0x68204382, + 0x686000c2, + 0x2b0246, + 0x35f2ca, + 0x205503, + 0x209f43, + 0x2ef103, + 0x69202642, + 0x77602cc2, + 0x77e0d582, + 0x206442, + 0x37fdc9, + 0x2caa44, + 0x23b488, + 0x782fd502, + 0x78603642, + 0x2f5e45, + 0x23d9c8, + 0x3a2fc8, + 0x25920c, + 0x22fac3, + 0x78a68dc2, + 0x78e0c402, + 0x2d3206, + 0x30a185, + 0x2a7b83, + 0x381c46, + 0x30a2c6, + 0x20d883, + 0x30bc43, + 0x30c146, + 0x30cd84, + 0x29d386, + 0x2d85c5, + 0x30d10a, + 0x2397c4, + 0x30e244, + 0x30f08a, + 0x79203442, + 0x2413c5, + 0x31018a, + 0x310a85, + 0x311344, + 0x311446, + 0x3115c4, + 0x221806, + 0x79611042, + 0x33c0c6, + 0x3b1b45, + 0x3b80c7, + 0x200206, + 0x2de844, + 0x2de847, + 0x327646, + 0x245345, + 0x245347, + 0x3abdc7, + 0x3abdce, + 0x232206, + 0x2fa605, + 0x202447, + 0x216303, + 0x3326c7, + 0x2172c5, + 0x21b0c4, + 0x2343c2, + 0x2432c7, + 0x304784, + 0x383884, + 0x270b8b, + 0x224e03, + 0x2d4c47, + 0x224e04, + 0x2f11c7, + 0x299543, + 0x33dd4d, + 0x398608, + 0x224604, + 0x231845, + 0x312bc5, + 0x313003, + 0x79a0c4c2, + 0x314a03, + 0x314d43, + 0x20f204, + 0x283745, + 0x22a4c7, + 0x220646, + 0x382943, + 0x38344b, + 0x259c8b, + 0x2ac9cb, + 0x2fbd4b, + 0x2c578a, + 0x30e48b, + 0x32420b, + 0x362f0c, + 0x38bf4b, + 0x3bdf51, + 0x3bfd8a, + 0x31604b, + 0x31630c, + 0x31660b, + 0x316b8a, + 0x317c8a, + 0x318c8e, + 0x31930b, + 0x3195ca, + 0x31a9d1, + 0x31ae0a, + 0x31b30b, + 0x31b84e, + 0x31c18c, + 0x31c68b, + 0x31c94e, + 0x31cccc, + 0x31d9ca, + 0x31eccc, + 0x79f1efca, + 0x31f7c8, + 0x320909, + 0x3232ca, + 0x32354a, + 0x3237cb, + 0x326d8e, + 0x327111, + 0x330189, + 0x3303ca, + 0x3313cb, + 0x334a0a, + 0x3354d6, + 0x336e4b, + 0x337b0a, + 0x337f4a, + 0x33a4cb, + 0x33b749, + 0x33e6c9, + 0x33ec8d, + 0x33f2cb, + 0x34040b, + 0x340dcb, + 0x347049, + 0x34768e, + 0x347dca, + 0x3494ca, + 0x349a0a, + 0x34a14b, + 0x34a98b, + 0x34ac4d, + 0x34c50d, + 0x34cd50, + 0x34d20b, + 0x35064c, + 0x3512cb, + 0x353ccb, + 0x35528e, + 0x355e0b, + 0x355e0d, + 0x35ae8b, + 0x35b90f, + 0x35bccb, + 0x35c50a, + 0x35cb49, + 0x35de09, + 0x35e18b, + 0x35e44e, + 0x36020b, + 0x361acf, + 0x36394b, + 0x363c0b, + 0x363ecb, + 0x3643ca, + 0x368a89, + 0x36e04f, + 0x372a8c, + 0x3732cc, + 0x37374e, + 0x373ccf, + 0x37408e, + 0x375690, + 0x375a8f, + 0x37660e, + 0x376f4c, + 0x377252, + 0x379891, + 0x37a18e, + 0x37a94e, + 0x37ae8e, + 0x37b20f, + 0x37b5ce, + 0x37b953, + 0x37be11, + 0x37c24c, + 0x37c54e, + 0x37c9cc, + 0x37de53, + 0x37ead0, + 0x37f30c, + 0x37f60c, + 0x37facb, + 0x38044e, + 0x380d8b, + 0x3816cb, + 0x382fcc, + 0x38b38a, + 0x38b74c, + 0x38ba4c, + 0x38bd49, + 0x38d7cb, + 0x38da88, + 0x38df49, + 0x38df4f, + 0x38f88b, + 0x7a39028a, + 0x391e4c, + 0x393009, + 0x393488, + 0x39368b, + 0x393d8b, + 0x39490a, + 0x394b8b, + 0x3950cc, + 0x396048, + 0x398d4b, + 0x39b1cb, + 0x39ef4e, + 0x3a05cb, + 0x3a1f0b, + 0x3ab94b, + 0x3abc09, + 0x3ac14d, + 0x3b1d4a, + 0x3b2c97, + 0x3b4398, + 0x3b6bc9, + 0x3b7d0b, + 0x3b8fd4, + 0x3b94cb, + 0x3b9a4a, + 0x3ba38a, + 0x3ba60b, + 0x3badd0, + 0x3bb1d1, + 0x3bc00a, + 0x3bd54d, + 0x3bdc4d, + 0x3c05cb, + 0x3c1206, + 0x231243, + 0x7a791143, + 0x26ed86, + 0x248805, + 0x22d287, + 0x3240c6, + 0x1608742, + 0x2c1fc9, + 0x320244, + 0x2e4d48, + 0x210943, + 0x314487, + 0x239202, + 0x2b3d03, + 0x7aa04542, + 0x2d0d06, + 0x2d2104, + 0x37a844, + 0x3443c3, + 0x3443c5, + 0x7b2cb8c2, + 0x7b6aeb44, + 0x27b007, + 0x7ba43282, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x200e03, + 0x207102, + 0x16fb88, + 0x20f882, + 0x323043, + 0x28cac3, + 0x208e83, + 0xe03, + 0x201a03, + 0x215443, + 0x32b7d6, + 0x32ca13, + 0x39cfc9, + 0x34e148, + 0x341189, + 0x310306, + 0x340010, + 0x24c9d3, + 0x355808, + 0x2a0a87, + 0x37d347, + 0x28db0a, + 0x232309, + 0x3961c9, + 0x28664b, + 0x33af86, + 0x20728a, + 0x228e06, + 0x31fe43, + 0x2dce85, + 0x233108, + 0x266e4d, + 0x28af0c, + 0x218c87, + 0x318fcd, + 0x214f44, + 0x23a84a, + 0x23bbca, + 0x23c08a, + 0x24ccc7, + 0x246b87, + 0x24a904, + 0x233d86, + 0x209d44, + 0x2c7ec8, + 0x26eb89, + 0x2bb246, + 0x2bb248, + 0x24d18d, + 0x2cdfc9, + 0x209708, + 0x3a0347, + 0x300fca, + 0x2550c6, + 0x2664c7, + 0x2bd584, + 0x292347, + 0x35180a, + 0x38690e, + 0x2247c5, + 0x29224b, + 0x32f709, + 0x25bd09, + 0x21b7c7, + 0x2936ca, + 0x348c07, + 0x307d49, + 0x20b808, + 0x33420b, + 0x2e4505, + 0x3ab60a, + 0x2734c9, + 0x331d0a, + 0x2d2e0b, + 0x38668b, + 0x2863d5, + 0x30be85, + 0x3a03c5, + 0x2f4dca, + 0x364a8a, + 0x32f487, + 0x2252c3, + 0x298448, + 0x2db34a, + 0x22a846, + 0x252109, + 0x26e488, + 0x2dac04, + 0x2b2149, + 0x2c7588, + 0x2b2d07, + 0x2f2bc6, + 0x2ab647, + 0x376d87, + 0x24a205, + 0x22460c, + 0x231845, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x20f882, + 0x238543, + 0x208e83, + 0x200e03, + 0x201a03, + 0x238543, + 0x208e83, + 0xe03, + 0x231e83, + 0x201a03, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0xe03, + 0x201a03, + 0x16fb88, + 0x20f882, + 0x201742, + 0x23c2c2, + 0x202542, + 0x200542, + 0x2e6dc2, + 0x4638543, + 0x23cac3, + 0x21b583, + 0x323043, + 0x255783, + 0x28cac3, + 0x2dcd86, + 0x208e83, + 0x201a03, + 0x20bdc3, + 0x16fb88, + 0x345b44, + 0x20da07, + 0x2112c3, + 0x2b1684, + 0x208543, + 0x21b843, + 0x323043, + 0x36dc7, + 0x145944, + 0xf183, + 0x145c05, + 0x207102, + 0x19c783, + 0x5a0f882, + 0x1490fc9, + 0x9144d, + 0x9178d, + 0x23c2c2, + 0x31604, + 0x145c49, + 0x200442, + 0x5f4ed48, + 0xf4544, + 0x16fb88, + 0x1409702, + 0x1510cc6, + 0x239283, + 0x2bcc43, + 0x6638543, + 0x23a844, + 0x6a3cac3, + 0x6f23043, + 0x205e82, + 0x231604, + 0x208e83, + 0x301dc3, + 0x2014c2, + 0x201a03, + 0x222dc2, + 0x2fabc3, + 0x204242, + 0x205983, + 0x26e543, + 0x200202, + 0x16fb88, + 0x239283, + 0x301dc3, + 0x2014c2, + 0x2fabc3, + 0x204242, + 0x205983, + 0x26e543, + 0x200202, + 0x2fabc3, + 0x204242, + 0x205983, + 0x26e543, + 0x200202, + 0x238543, + 0x39c783, + 0x238543, + 0x23cac3, + 0x323043, + 0x231604, + 0x255783, + 0x28cac3, + 0x21bf84, + 0x208e83, + 0x201a03, + 0x20cb02, + 0x221483, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x39c783, + 0x20f882, + 0x238543, + 0x23cac3, + 0x323043, + 0x231604, + 0x208e83, + 0x201a03, + 0x355685, + 0x21a902, + 0x207102, + 0x16fb88, + 0x1480cc8, + 0x323043, + 0x20fec1, + 0x201641, + 0x203c01, + 0x201301, + 0x267401, + 0x2ae601, + 0x211341, + 0x28a0c1, + 0x24dfc1, + 0x2fbf81, + 0x200141, + 0x200001, + 0x131645, + 0x16fb88, + 0x2008c1, + 0x201781, + 0x200301, + 0x200081, + 0x200181, + 0x200401, + 0x200041, + 0x2086c1, + 0x200101, + 0x200281, + 0x200801, + 0x200981, + 0x200441, + 0x204101, + 0x2227c1, + 0x200341, + 0x200741, + 0x2002c1, + 0x2000c1, + 0x203441, + 0x200201, + 0x200c81, + 0x2005c1, + 0x204541, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x20f882, + 0x238543, + 0x23cac3, + 0x200442, + 0x201a03, + 0x36dc7, + 0x8cbc7, + 0x24386, + 0x44f4a, + 0x906c8, + 0x5c288, + 0x5c6c7, + 0xffc6, + 0xe1d45, + 0x11205, + 0x86286, + 0x12cf06, + 0x286644, + 0x31cf87, + 0x16fb88, + 0x2de944, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x238543, + 0x23cac3, + 0x21b583, + 0x323043, + 0x255783, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x21a902, + 0x2ba8c3, + 0x242043, + 0x2cc103, + 0x202d42, + 0x33eb43, + 0x203ec3, + 0x20fc03, + 0x200001, + 0x2ed0c5, + 0x203c43, + 0x226544, + 0x332083, + 0x322103, + 0x222903, + 0x383283, + 0xaa38543, + 0x240244, + 0x24ac83, + 0x207583, + 0x2228c3, + 0x23aa83, + 0x23cac3, + 0x23c803, + 0x202103, + 0x2aab03, + 0x322083, + 0x2bdec3, + 0x20df43, + 0x255684, + 0x257307, + 0x2f6802, + 0x25c003, + 0x263783, + 0x27e983, + 0x20fe03, + 0x20dec3, + 0xaf23043, + 0x209ac3, + 0x204c03, + 0x231603, + 0x34bc85, + 0x209c83, + 0x304d43, + 0xb207a83, + 0x374803, + 0x213643, + 0x229443, + 0x28cac3, + 0x22c2c2, + 0x20c0c3, + 0x208e83, + 0x1600e03, + 0x22b1c3, + 0x2014c3, + 0x21a743, + 0x201a03, + 0x36ea03, + 0x223583, + 0x221483, + 0x233503, + 0x30bcc3, + 0x2fad83, + 0x317345, + 0x20c843, + 0x2df706, + 0x2fadc3, + 0x349703, + 0x2205c4, + 0x20c9c3, + 0x386603, + 0x2f1a03, + 0x20bdc3, + 0x21a902, + 0x22fac3, + 0x30e403, + 0x30fac4, + 0x383884, + 0x21a5c3, + 0x16fb88, + 0x207102, + 0x200242, + 0x202d42, + 0x20cac2, + 0x201d02, + 0x201442, + 0x23de42, + 0x201842, + 0x207b02, + 0x201fc2, + 0x2281c2, + 0x214642, + 0x2745c2, + 0x20cb42, + 0x2e6dc2, + 0x21cc82, + 0x225b82, + 0x204102, + 0x2204c2, + 0x205842, + 0x200482, + 0x221dc2, + 0x2044c2, + 0x20d2c2, + 0x200a02, + 0x21f542, + 0x204782, + 0x7102, + 0x242, + 0x2d42, + 0xcac2, + 0x1d02, + 0x1442, + 0x3de42, + 0x1842, + 0x7b02, + 0x1fc2, + 0x281c2, + 0x14642, + 0x745c2, + 0xcb42, + 0xe6dc2, + 0x1cc82, + 0x25b82, + 0x4102, + 0x204c2, + 0x5842, + 0x482, + 0x21dc2, + 0x44c2, + 0xd2c2, + 0xa02, + 0x1f542, + 0x4782, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x2442, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x20f882, + 0x201a03, + 0xc638543, + 0x323043, + 0x28cac3, + 0x1a3443, + 0x219302, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x1a3443, + 0x201a03, + 0x4542, + 0x201c02, + 0x1442b45, + 0x232282, + 0x16fb88, + 0xf882, + 0x209d82, + 0x209b02, + 0x20ddc2, + 0x2190c2, + 0x206802, + 0x11205, + 0x201282, + 0x2014c2, + 0x202c82, + 0x200dc2, + 0x21cc82, + 0x3951c2, + 0x206742, + 0x260a42, + 0x36dc7, + 0x1501cd, + 0xe1dc9, + 0x5900b, + 0xe5848, + 0x56809, + 0x106046, + 0x323043, + 0x16fb88, + 0x145944, + 0xf183, + 0x145c05, + 0x16fb88, + 0x5d3c6, + 0x145c49, + 0x126447, + 0x207102, + 0x286644, + 0x20f882, + 0x238543, + 0x201742, + 0x23cac3, + 0x207b02, + 0x2de944, + 0x255783, + 0x253442, + 0x208e83, + 0x200442, + 0x201a03, + 0x3a03c6, + 0x323d8f, + 0x7156c3, + 0x16fb88, + 0x20f882, + 0x21b583, + 0x323043, + 0x28cac3, + 0xe03, + 0x152e1cb, + 0xe2648, + 0x14b7aca, + 0x14f5907, + 0x8dbcb, + 0x149785, + 0x36dc7, + 0x20f882, + 0x238543, + 0x323043, + 0x208e83, + 0x207102, + 0x200b42, + 0x2092c2, + 0xfe38543, + 0x248582, + 0x23cac3, + 0x209c42, + 0x20d382, + 0x323043, + 0x210642, + 0x259c42, + 0x2aeb02, + 0x2006c2, + 0x295e02, + 0x203102, + 0x200782, + 0x2351c2, + 0x2335c2, + 0x252e42, + 0x2b5102, + 0x2d2942, + 0x327982, + 0x2111c2, + 0x28cac3, + 0x200802, + 0x208e83, + 0x24d382, + 0x289e82, + 0x201a03, + 0x2485c2, + 0x20d2c2, + 0x221382, + 0x200742, + 0x204d02, + 0x2e6282, + 0x22be42, + 0x231802, + 0x2312c2, + 0x3195ca, + 0x35c50a, + 0x39090a, + 0x3c1382, + 0x208a82, + 0x212a42, + 0x10223fc9, + 0x1072c38a, + 0x1438547, + 0x10a02482, + 0x1416dc3, + 0x12c2, + 0x12c38a, + 0x252044, + 0x11238543, + 0x23cac3, + 0x253384, + 0x323043, + 0x231604, + 0x255783, + 0x28cac3, + 0x208e83, + 0xe3bc5, + 0x200e03, + 0x201a03, + 0x20c843, + 0x202443, + 0x16fb88, + 0x140ff44, + 0x1441c5, + 0x12620a, + 0x11ec42, + 0x1affc6, + 0x35ad1, + 0x11a23fc9, + 0x144248, + 0x10b388, + 0x8cf47, + 0xbc2, + 0x13164b, + 0x1b320a, + 0x71ca, + 0x26547, + 0x16fb88, + 0x114008, + 0x14507, + 0x17c2198b, + 0x23087, + 0xc702, + 0x5b907, + 0x1920a, + 0x8cc4f, + 0x4f70f, + 0x22902, + 0xf882, + 0xaaa48, + 0xe228a, + 0x6a08, + 0x64b88, + 0xdfbc8, + 0x4c82, + 0x42bcf, + 0xa670b, + 0xf8d08, + 0x3e607, + 0x185b8a, + 0x3af8b, + 0x57f89, + 0x185a87, + 0x6908, + 0x1089cc, + 0x81a87, + 0x1a800a, + 0xdd088, + 0x1aafce, + 0x2438e, + 0x2638b, + 0x27bcb, + 0x2920b, + 0x2c049, + 0x2ff8b, + 0x31ccd, + 0x329cb, + 0x62b4d, + 0x62ecd, + 0xfa44a, + 0x1836cb, + 0x3b64b, + 0x47085, + 0x1802cc10, + 0x12d40f, + 0x12db4f, + 0x37a4d, + 0xbf490, + 0xc182, + 0x18623a08, + 0x8ca48, + 0x18af52c5, + 0x52a0b, + 0x11f3d0, + 0x5ad08, + 0x6b0a, + 0x27d89, + 0x6b307, + 0x6b647, + 0x6b807, + 0x6bb87, + 0x6ca87, + 0x6d487, + 0x6ddc7, + 0x6e187, + 0x6f187, + 0x6f487, + 0x70147, + 0x70307, + 0x704c7, + 0x70687, + 0x70987, + 0x70e47, + 0x71707, + 0x72007, + 0x72c87, + 0x731c7, + 0x73387, + 0x73707, + 0x74487, + 0x74687, + 0x750c7, + 0x75287, + 0x75447, + 0x75dc7, + 0x76087, + 0x77a47, + 0x78187, + 0x78447, + 0x78bc7, + 0x78d87, + 0x79187, + 0x79687, + 0x79907, + 0x79d07, + 0x79ec7, + 0x7a087, + 0x7ae07, + 0x7c447, + 0x7c987, + 0x7cc87, + 0x7ce47, + 0x7d1c7, + 0x7d787, + 0x13c42, + 0x64c8a, + 0xe90c7, + 0x287c5, + 0x806d1, + 0x157c6, + 0x11318a, + 0xaa8ca, + 0x5d3c6, + 0xb880b, + 0x17202, + 0x3a1d1, + 0x1bbc89, + 0x9c0c9, + 0x351c2, + 0xa808a, + 0xac7c9, + 0xacf0f, + 0xada4e, + 0xae208, + 0x206c2, + 0xb649, + 0x1025ce, + 0xe8b4c, + 0xf328f, + 0x1a5b4e, + 0x1684c, + 0x18009, + 0x1c291, + 0x1f108, + 0x2ac92, + 0x2bb4d, + 0x33c4d, + 0x15208b, + 0x41cd5, + 0x164ec9, + 0xfcf8a, + 0x40809, + 0x4d650, + 0x4e70b, + 0x5898f, + 0x6390b, + 0x7298c, + 0x77650, + 0x8430a, + 0x853cd, + 0x894ce, + 0x8ef4a, + 0xede0c, + 0x176a54, + 0x1bb911, + 0x95a8b, + 0x97fcf, + 0xa290d, + 0xa76ce, + 0xb2bcc, + 0xb330c, + 0x160b0b, + 0x160e0e, + 0xd6750, + 0x11868b, + 0x1876cd, + 0x1bce4f, + 0xba0cc, + 0xbb0ce, + 0xbc011, + 0xc7c4c, + 0xc9307, + 0xc9c0d, + 0x130d4c, + 0x1605d0, + 0x174c0d, + 0xd1b47, + 0xd7c10, + 0xdd6c8, + 0xf178b, + 0x134c4f, + 0x3ef48, + 0x11338d, + 0x15c750, + 0x172e49, + 0x18e086c6, + 0xb8243, + 0xbc445, + 0x9a02, + 0x143889, + 0x5e04a, + 0x10fb06, + 0x2594a, + 0x1900c949, + 0x1c003, + 0xdebd1, + 0xdf009, + 0xe0407, + 0x35c4b, + 0xe67d0, + 0xe6c8c, + 0xe8e48, + 0xe9805, + 0xb988, + 0x1ad4ca, + 0x1c0c7, + 0x16bac7, + 0x982, + 0x12bcca, + 0x12e7c9, + 0x79545, + 0x402ca, + 0x9260f, + 0x4b8cb, + 0x14bd4c, + 0x17a492, + 0x94e45, + 0xec1c8, + 0x17618a, + 0x196f3d05, + 0x190ecc, + 0x129ac3, + 0x1951c2, + 0xfb30a, + 0x14fb70c, + 0x14f508, + 0x62d08, + 0x36d47, + 0xb282, + 0x4242, + 0x47590, + 0xa02, + 0x3904f, + 0x86286, + 0x7c0e, + 0xebbcb, + 0x8f148, + 0xda049, + 0x18f052, + 0x95cd, + 0x586c8, + 0x58ec9, + 0x5d50d, + 0x5e4c9, + 0x5e88b, + 0x60648, + 0x65808, + 0x65b88, + 0x65e49, + 0x6604a, + 0x6a98c, + 0xeb04a, + 0x10bd07, + 0x1f54d, + 0xfde8b, + 0x12004c, + 0x404c8, + 0x4f049, + 0x1b01d0, + 0xc2, + 0x2d3cd, + 0x2642, + 0x2cc2, + 0x10bc4a, + 0x11308a, + 0x11438b, + 0x3b80c, + 0x113b0a, + 0x113d8e, + 0xf2cd, + 0x11d708, + 0x4542, + 0x11f46c0e, + 0x1260ee4e, + 0x12f43f8a, + 0x1373a14e, + 0x13f9d38e, + 0x1460138c, + 0x1438547, + 0x1438549, + 0x1416dc3, + 0x14e3700c, + 0x15707789, + 0x15f3b509, + 0x12c2, + 0x146b51, + 0xed91, + 0x143ecd, + 0x13a091, + 0x19d2d1, + 0x12cf, + 0x36f4f, + 0x1076cc, + 0x13b44c, + 0x18954d, + 0x1b5295, + 0x10ed8c, + 0xea88c, + 0x122ed0, + 0x158fcc, + 0x16d9cc, + 0x191819, + 0x1a83d9, + 0x1aa459, + 0x1b3e94, + 0x1b8ad4, + 0x1c0d14, + 0x2394, + 0x3754, + 0x1670ee49, + 0x16dc0fc9, + 0x176ea949, + 0x1221f309, + 0x12c2, + 0x12a1f309, + 0x12c2, + 0x238a, + 0x12c2, + 0x1321f309, + 0x12c2, + 0x238a, + 0x12c2, + 0x13a1f309, + 0x12c2, + 0x1421f309, + 0x12c2, + 0x14a1f309, + 0x12c2, + 0x238a, + 0x12c2, + 0x1521f309, + 0x12c2, + 0x238a, + 0x12c2, + 0x15a1f309, + 0x12c2, + 0x1621f309, + 0x12c2, + 0x238a, + 0x12c2, + 0x16a1f309, + 0x12c2, + 0x1721f309, + 0x12c2, + 0x17a1f309, + 0x12c2, + 0x238a, + 0x12c2, + 0x35ac5, + 0x1b3204, + 0x146c0e, + 0xee4e, + 0x143f8a, + 0x13a14e, + 0x19d38e, + 0x138c, + 0x3700c, + 0x107789, + 0x13b509, + 0x10ee49, + 0x1c0fc9, + 0xea949, + 0x122f8d, + 0x2649, + 0x3a09, + 0x5bf04, + 0x11d8c4, + 0x126144, + 0x15f784, + 0x8de84, + 0x4b744, + 0x6e44, + 0x67344, + 0x8cf44, + 0x157e2c3, + 0xc182, + 0xf2c3, + 0x4c82, + 0x207102, + 0x20f882, + 0x201742, + 0x207602, + 0x207b02, + 0x200442, + 0x204242, + 0x238543, + 0x23cac3, + 0x323043, + 0x231603, + 0x208e83, + 0x201a03, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x208e83, + 0x201a03, + 0x160c3, + 0x323043, + 0x31604, + 0x207102, + 0x39c783, + 0x1b638543, + 0x2bf347, + 0x323043, + 0x211a83, + 0x21bf84, + 0x208e83, + 0x201a03, + 0x243d0a, + 0x3a03c5, + 0x221483, + 0x205082, + 0x16fb88, + 0x16fb88, + 0xf882, + 0x127482, + 0x1bf51b0b, + 0x5ba45, + 0x35dc5, + 0x114b46, + 0x145944, + 0xf183, + 0x145c05, + 0x131645, + 0x16fb88, + 0x23087, + 0x38543, + 0x1c644d87, + 0x1432c6, + 0x1c93b345, + 0x143387, + 0x1b4d0a, + 0x1b4bc8, + 0x11887, + 0x6df88, + 0x99707, + 0x152cf, + 0x435c7, + 0x150d86, + 0x11f3d0, + 0x12a58f, + 0x20a89, + 0x10fb84, + 0x1cd4344e, + 0xb098c, + 0x5810a, + 0xa7987, + 0x3520a, + 0xbb49, + 0xb514c, + 0x4304a, + 0x5ec8a, + 0x145c49, + 0x10fb06, + 0xa7a4a, + 0xe8a, + 0xa4e49, + 0xde488, + 0xde786, + 0xe284d, + 0xbc8c5, + 0x126447, + 0x1019c9, + 0xf72c7, + 0xb5ed4, + 0x103acb, + 0xf8b4a, + 0xab10d, + 0xd3c3, + 0xd3c3, + 0x24386, + 0xd3c3, + 0x19c783, + 0x16fb88, + 0xf882, + 0x53384, + 0x5f843, + 0x155685, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x203ec3, + 0x238543, + 0x23cac3, + 0x21b583, + 0x323043, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x29c283, + 0x202443, + 0x203ec3, + 0x286644, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x206683, + 0x238543, + 0x23cac3, + 0x207603, + 0x21b583, + 0x323043, + 0x231604, + 0x3797c3, + 0x229443, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x221483, + 0x36a883, + 0x1ea38543, + 0x23cac3, + 0x250ac3, + 0x323043, + 0x212143, + 0x229443, + 0x201a03, + 0x204103, + 0x35f584, + 0x16fb88, + 0x1f238543, + 0x23cac3, + 0x2ae2c3, + 0x323043, + 0x28cac3, + 0x21bf84, + 0x208e83, + 0x201a03, + 0x20e943, + 0x16fb88, + 0x1fa38543, + 0x23cac3, + 0x21b583, + 0x200e03, + 0x201a03, + 0x16fb88, + 0x1438547, + 0x39c783, + 0x238543, + 0x23cac3, + 0x323043, + 0x231604, + 0x21bf84, + 0x208e83, + 0x201a03, + 0x131645, + 0x36dc7, + 0xb610b, + 0xdf404, + 0xbc8c5, + 0x1480cc8, + 0xae90d, + 0x20e6c505, + 0x7bd44, + 0x10c3, + 0x172d45, + 0x33b145, + 0x16fb88, + 0xd3c2, + 0x2bc3, + 0xf9306, + 0x31f948, + 0x3347c7, + 0x286644, + 0x39c286, + 0x3b5146, + 0x16fb88, + 0x2ddac3, + 0x342a49, + 0x26d615, + 0x6d61f, + 0x238543, + 0x3b3a52, + 0xf6306, + 0x114dc5, + 0x6b0a, + 0x27d89, + 0x3b380f, + 0x2de944, + 0x3490c5, + 0x304b10, + 0x34e347, + 0x200e03, + 0x293408, + 0x12ce46, + 0x29630a, + 0x230f04, + 0x2f3743, + 0x3a03c6, + 0x205082, + 0x22facb, + 0xe03, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x2f9a03, + 0x20f882, + 0x6ed43, + 0x208e83, + 0x201a03, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x201a03, + 0x238543, + 0x23cac3, + 0x323043, + 0x211a83, + 0x228243, + 0x201a03, + 0x20f882, + 0x238543, + 0x23cac3, + 0x208e83, + 0xe03, + 0x201a03, + 0x207102, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x35dc5, + 0x286644, + 0x238543, + 0x23cac3, + 0x20f644, + 0x208e83, + 0x201a03, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x1a3443, + 0x201a03, + 0x238543, + 0x23cac3, + 0x21b583, + 0x204c03, + 0x28cac3, + 0x208e83, + 0xe03, + 0x201a03, + 0x20f882, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x323043, + 0x210543, + 0x707c3, + 0x11a83, + 0x208e83, + 0x201a03, + 0x3195ca, + 0x335289, + 0x35438b, + 0x35490a, + 0x35c50a, + 0x369bcb, + 0x38274a, + 0x38b38a, + 0x39090a, + 0x390b8b, + 0x3ad209, + 0x3af10a, + 0x3af7cb, + 0x3b978b, + 0x3bfb4a, + 0x238543, + 0x23cac3, + 0x21b583, + 0x28cac3, + 0x208e83, + 0xe03, + 0x201a03, + 0x35dcb, + 0x651c8, + 0x1174c9, + 0x16fb88, + 0x238543, + 0x26b304, + 0x20b342, + 0x21bf84, + 0x346145, + 0x203ec3, + 0x286644, + 0x238543, + 0x240244, + 0x23cac3, + 0x253384, + 0x2de944, + 0x231604, + 0x229443, + 0x208e83, + 0x201a03, + 0x22d585, + 0x206683, + 0x221483, + 0x20ec43, + 0x231944, + 0x20fe84, + 0x2cc105, + 0x16fb88, + 0x30dc84, + 0x36bdc6, + 0x281384, + 0x20f882, + 0x381107, + 0x254d87, + 0x251844, + 0x260105, + 0x374e05, + 0x2b13c5, + 0x231604, + 0x2cf6c8, + 0x23eb46, + 0x3bffc8, + 0x257cc5, + 0x2e4505, + 0x263544, + 0x201a03, + 0x2f4544, + 0x368dc6, + 0x3a04c3, + 0x231944, + 0x280bc5, + 0x2e4ac4, + 0x34da44, + 0x205082, + 0x2669c6, + 0x3a2906, + 0x30a185, + 0x207102, + 0x39c783, + 0x2760f882, + 0x223b84, + 0x207b02, + 0x28cac3, + 0x200e82, + 0x208e83, + 0x200442, + 0x215443, + 0x202443, + 0x16fb88, + 0x16fb88, + 0x323043, + 0x207102, + 0x2820f882, + 0x323043, + 0x270443, + 0x3797c3, + 0x32e5c4, + 0x208e83, + 0x201a03, + 0x16fb88, + 0x207102, + 0x28a0f882, + 0x238543, + 0x208e83, + 0xe03, + 0x201a03, + 0x482, + 0x208882, + 0x21a902, + 0x211a83, + 0x2ef783, + 0x207102, + 0x131645, + 0x16fb88, + 0x36dc7, + 0x20f882, + 0x23cac3, + 0x253384, + 0x2020c3, + 0x323043, + 0x204c03, + 0x28cac3, + 0x208e83, + 0x21eb43, + 0x201a03, + 0x2252c3, + 0x122213, + 0x124cd4, + 0x36dc7, + 0x139986, + 0x5e24b, + 0x24386, + 0x5c0c7, + 0x120589, + 0xe838a, + 0x9058d, + 0x14fecc, + 0x3954a, + 0x11205, + 0x1b4d48, + 0x86286, + 0x31586, + 0x12cf06, + 0x20c182, + 0x10b14c, + 0x1b33c7, + 0x2a691, + 0x238543, + 0x6df05, + 0x7588, + 0x18ec4, + 0x29cbe1c6, + 0x806c6, + 0xb9a06, + 0x960ca, + 0xb4003, + 0x2a24c984, + 0xe8345, + 0x18e43, + 0x2a63dc47, + 0xe3bc5, + 0xb88cc, + 0xf7a88, + 0xbd248, + 0xa6589, + 0x14dc08, + 0x1425886, + 0x2ab71549, + 0x14978a, + 0x16308, + 0x114b48, + 0x8cf44, + 0xb5ac5, + 0x2ae42bc3, + 0x2b332106, + 0x2b6f4dc4, + 0x2bb39d87, + 0x114b44, + 0x114b44, + 0x114b44, + 0x114b44, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x207102, + 0x20f882, + 0x323043, + 0x205e82, + 0x208e83, + 0x201a03, + 0x215443, + 0x373ccf, + 0x37408e, + 0x16fb88, + 0x238543, + 0x4db87, + 0x23cac3, + 0x323043, + 0x255783, + 0x208e83, + 0x201a03, + 0x20d4c3, + 0x20d4c7, + 0x200142, + 0x2ce609, + 0x200242, + 0x24788b, + 0x2c110a, + 0x2c67c9, + 0x201242, + 0x2100c6, + 0x26cd95, + 0x2479d5, + 0x275793, + 0x247f53, + 0x201d42, + 0x212c45, + 0x31d44c, + 0x27c6cb, + 0x29c705, + 0x20cac2, + 0x28e142, + 0x384c06, + 0x200bc2, + 0x3acc46, + 0x2dd20d, + 0x26540c, + 0x22cc84, + 0x200f82, + 0x203402, + 0x22b048, + 0x201d02, + 0x20a746, + 0x28bf04, + 0x26cf55, + 0x275913, + 0x216d03, + 0x33844a, + 0x205407, + 0x3145c9, + 0x38d4c7, + 0x20d342, + 0x200002, + 0x3ba886, + 0x212702, + 0x16fb88, + 0x216b42, + 0x201102, + 0x27f847, + 0x217387, + 0x222d85, + 0x20c702, + 0x225287, + 0x225448, + 0x2024c2, + 0x2430c2, + 0x237302, + 0x201382, + 0x242688, + 0x20a043, + 0x25fa08, + 0x2e9b0d, + 0x2322c3, + 0x32ec08, + 0x245f4f, + 0x24630e, + 0x339a4a, + 0x22e811, + 0x22ec90, + 0x2c34cd, + 0x2c380c, + 0x36a707, + 0x3385c7, + 0x39c349, + 0x20d302, + 0x201442, + 0x25db0c, + 0x25de0b, + 0x2008c2, + 0x360cc6, + 0x20e982, + 0x204882, + 0x222902, + 0x20f882, + 0x3b69c4, + 0x244387, + 0x229682, + 0x24a347, + 0x24b547, + 0x20d282, + 0x20c8c2, + 0x24da45, + 0x21a442, + 0x2f290e, + 0x2ab3cd, + 0x23cac3, + 0x28d58e, + 0x2c5c0d, + 0x25ac43, + 0x201482, + 0x2891c4, + 0x216582, + 0x20fac2, + 0x364145, + 0x373587, + 0x393202, + 0x207602, + 0x252f87, + 0x255ac8, + 0x2f6802, + 0x294ec6, + 0x25d98c, + 0x25dccb, + 0x206b02, + 0x26764f, + 0x267a10, + 0x267e0f, + 0x2681d5, + 0x268714, + 0x268c0e, + 0x268f8e, + 0x26930f, + 0x2696ce, + 0x269a54, + 0x269f53, + 0x26a40d, + 0x27d949, + 0x291ac3, + 0x201802, + 0x2b7505, + 0x206346, + 0x207b02, + 0x3a4ec7, + 0x323043, + 0x217202, + 0x37e548, + 0x22ea51, + 0x22ee90, + 0x2007c2, + 0x290e07, + 0x204182, + 0x332b07, + 0x209a02, + 0x342089, + 0x384bc7, + 0x27ac08, + 0x2be006, + 0x2ef683, + 0x339205, + 0x2022c2, + 0x207a82, + 0x3bac85, + 0x391345, + 0x204bc2, + 0x231043, + 0x2e4b47, + 0x205747, + 0x200502, + 0x25f1c4, + 0x211b83, + 0x211b89, + 0x215148, + 0x200282, + 0x202942, + 0x242387, + 0x263285, + 0x2ad208, + 0x215c87, + 0x21a243, + 0x294c86, + 0x2c334d, + 0x2c36cc, + 0x2c8346, + 0x209b02, + 0x20c202, + 0x204a82, + 0x245dcf, + 0x2461ce, + 0x374e87, + 0x20b302, + 0x2c72c5, + 0x2c72c6, + 0x214702, + 0x200802, + 0x228246, + 0x2b57c3, + 0x332a46, + 0x2d0285, + 0x2d028d, + 0x2d0855, + 0x2d108c, + 0x2d1e4d, + 0x2d2212, + 0x214642, + 0x2745c2, + 0x202ec2, + 0x249386, + 0x302486, + 0x200982, + 0x2063c6, + 0x202c82, + 0x39b505, + 0x200542, + 0x2ab4c9, + 0x2e324c, + 0x2e358b, + 0x200442, + 0x257708, + 0x2052c2, + 0x20cb42, + 0x278ec6, + 0x21f285, + 0x36c107, + 0x24bc85, + 0x28ea05, + 0x235d82, + 0x219a42, + 0x21cc82, + 0x2f3587, + 0x2613cd, + 0x26174c, + 0x317947, + 0x2235c2, + 0x225b82, + 0x23f688, + 0x343a08, + 0x34c008, + 0x313344, + 0x361087, + 0x2efc43, + 0x299842, + 0x206682, + 0x2f2149, + 0x3ab3c7, + 0x204102, + 0x2792c5, + 0x22fa42, + 0x236902, + 0x35dc83, + 0x35dc86, + 0x2f9a02, + 0x2fab42, + 0x200c02, + 0x281e06, + 0x345607, + 0x221282, + 0x206b42, + 0x25f84f, + 0x28d3cd, + 0x3029ce, + 0x2c5a8c, + 0x201a42, + 0x204142, + 0x2bde45, + 0x317e46, + 0x209002, + 0x205842, + 0x200482, + 0x215c04, + 0x2e9984, + 0x2b8706, + 0x204242, + 0x37d6c7, + 0x233803, + 0x233808, + 0x33cb48, + 0x240687, + 0x249286, + 0x202502, + 0x242603, + 0x351107, + 0x26ffc6, + 0x2e2d05, + 0x3136c8, + 0x206182, + 0x337547, + 0x21f542, + 0x332182, + 0x207f02, + 0x2e95c9, + 0x23b442, + 0x2018c2, + 0x248383, + 0x377787, + 0x2002c2, + 0x2e33cc, + 0x2e36cb, + 0x2c83c6, + 0x218d85, + 0x22a202, + 0x204782, + 0x2c1486, + 0x237e83, + 0x378407, + 0x243cc2, + 0x200d42, + 0x26cc15, + 0x247b95, + 0x275653, + 0x2480d3, + 0x2955c7, + 0x2c0ec8, + 0x379d90, + 0x3c020f, + 0x2c0ed3, + 0x2c6592, + 0x2ce1d0, + 0x2db58f, + 0x2dc512, + 0x2dffd1, + 0x2e0cd3, + 0x2e9392, + 0x2ea0cf, + 0x2f7c4e, + 0x2f9a92, + 0x2faed1, + 0x303e4f, + 0x347a4e, + 0x3559d1, + 0x2fee10, + 0x32f912, + 0x36fd51, + 0x3af4c6, + 0x30dd47, + 0x382ac7, + 0x203702, + 0x286d05, + 0x304887, + 0x21a902, + 0x218f42, + 0x230d85, + 0x226c43, + 0x244c06, + 0x26158d, + 0x2618cc, + 0x206442, + 0x31d2cb, + 0x27c58a, + 0x212b0a, + 0x2c04c9, + 0x2f0c0b, + 0x215dcd, + 0x304f8c, + 0x2f574a, + 0x277bcc, + 0x27d34b, + 0x29c54c, + 0x2b4c0b, + 0x2e31c3, + 0x36f946, + 0x3061c2, + 0x2fd502, + 0x256d03, + 0x203642, + 0x203643, + 0x260b86, + 0x268387, + 0x2c48c6, + 0x2e2448, + 0x343708, + 0x2cc7c6, + 0x20c402, + 0x309b4d, + 0x309e8c, + 0x2dea07, + 0x30db47, + 0x2302c2, + 0x221682, + 0x260982, + 0x255e82, + 0x20f882, + 0x208e83, + 0x201a03, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x21bf84, + 0x208e83, + 0x201a03, + 0x215443, + 0x207102, + 0x207542, + 0x2da97d45, + 0x2de97685, + 0x2e320c86, + 0x16fb88, + 0x2e6b68c5, + 0x20f882, + 0x201742, + 0x2ea34cc5, + 0x2ee852c5, + 0x2f285e07, + 0x2f6f6e09, + 0x2fa74084, + 0x207b02, + 0x217202, + 0x2fe56a05, + 0x302977c9, + 0x30785908, + 0x30ab3185, + 0x30f3f5c7, + 0x31227248, + 0x316ec085, + 0x31a00106, + 0x31e41489, + 0x323311c8, + 0x326c8988, + 0x32a9ef0a, + 0x32e7e204, + 0x332d99c5, + 0x336c30c8, + 0x33b85d85, + 0x21a602, + 0x33e11103, + 0x342aa246, + 0x3475d1c8, + 0x34a8ab86, + 0x34e8a688, + 0x35348206, + 0x356e2dc4, + 0x204d42, + 0x35addc87, + 0x35eaf444, + 0x36280087, + 0x367b0c87, + 0x200442, + 0x36aa3885, + 0x36e8f904, + 0x372f1447, + 0x37632c47, + 0x37a89006, + 0x37e38385, + 0x3829d7c7, + 0x386d5dc8, + 0x38ab7887, + 0x38ea6c89, + 0x3939e345, + 0x397778c7, + 0x39a974c6, + 0x39e102c8, + 0x3279cd, + 0x27a209, + 0x28384b, + 0x289ecb, + 0x2ae3cb, + 0x2e62cb, + 0x31804b, + 0x31830b, + 0x318949, + 0x31984b, + 0x319b0b, + 0x31a08b, + 0x31b08a, + 0x31b5ca, + 0x31bbcc, + 0x31e00b, + 0x31ea4a, + 0x33064a, + 0x33c6ce, + 0x33d1ce, + 0x33d54a, + 0x33efca, + 0x33fa8b, + 0x33fd4b, + 0x340b0b, + 0x36124b, + 0x36184a, + 0x36250b, + 0x3627ca, + 0x362a4a, + 0x362cca, + 0x38424b, + 0x38c6cb, + 0x38e64e, + 0x38e9cb, + 0x39464b, + 0x395b0b, + 0x39900a, + 0x399289, + 0x3994ca, + 0x39a94a, + 0x3addcb, + 0x3afa8b, + 0x3b05ca, + 0x3b1fcb, + 0x3b674b, + 0x3bf58b, + 0x3a287a88, + 0x3a68fd09, + 0x3aaa6409, + 0x3aee4d48, + 0x34b945, + 0x202d43, + 0x21b744, + 0x345805, + 0x273dc6, + 0x274805, + 0x28f584, + 0x3a4dc8, + 0x312ec5, + 0x299a84, + 0x211587, + 0x2a550a, + 0x3813ca, + 0x308f07, + 0x202c47, + 0x303647, + 0x271907, + 0x2ff9c5, + 0x204906, + 0x22b9c7, + 0x2c8684, + 0x2db006, + 0x2daf06, + 0x208185, + 0x331c04, + 0x388bc6, + 0x2a4707, + 0x232646, + 0x2bfa07, + 0x232dc3, + 0x26c7c6, + 0x23cf85, + 0x285f07, + 0x27100a, + 0x284e04, + 0x220808, + 0x2a2009, + 0x2d0e47, + 0x31e8c6, + 0x257988, + 0x28b2c9, + 0x314784, + 0x376004, + 0x35d785, + 0x22b6c8, + 0x2ccc07, + 0x29a3c9, + 0x3af5c8, + 0x353706, + 0x24d486, + 0x29fd88, + 0x365bc6, + 0x297685, + 0x2890c6, + 0x280ec8, + 0x256286, + 0x25cb8b, + 0x2ac646, + 0x2a224d, + 0x208605, + 0x2af306, + 0x218a05, + 0x35d949, + 0x27a787, + 0x36d148, + 0x2969c6, + 0x2a1509, + 0x341046, + 0x270f85, + 0x2a7f06, + 0x2d3586, + 0x2d3b09, + 0x333f06, + 0x3529c7, + 0x248c85, + 0x201d83, + 0x25cd05, + 0x2a2507, + 0x338d06, + 0x208509, + 0x320c86, + 0x289306, + 0x219fc9, + 0x288ac9, + 0x2a8747, + 0x20cd08, + 0x280509, + 0x286988, + 0x38b5c6, + 0x2de245, + 0x23fa4a, + 0x289386, + 0x2bf1c6, + 0x2d7605, + 0x272408, + 0x2220c7, + 0x239fca, + 0x253b46, + 0x27a645, + 0x20a506, + 0x236b47, + 0x31e787, + 0x24fc45, + 0x271145, + 0x2e79c6, + 0x2fbfc6, + 0x2be306, + 0x2bb884, + 0x287e09, + 0x290bc6, + 0x2d430a, + 0x222b88, + 0x3059c8, + 0x3813ca, + 0x205b45, + 0x2a4645, + 0x3575c8, + 0x2b0fc8, + 0x2b43c7, + 0x295946, + 0x329608, + 0x30a447, + 0x287088, + 0x2bbec6, + 0x289b88, + 0x29cd06, + 0x257e47, + 0x2a27c6, + 0x388bc6, + 0x383d4a, + 0x345506, + 0x2de249, + 0x36b086, + 0x2b6c0a, + 0x2e2dc9, + 0x2fe406, + 0x2bccc4, + 0x2b75cd, + 0x28ff87, + 0x32df46, + 0x2c8845, + 0x3410c5, + 0x204dc6, + 0x2d4fc9, + 0x3879c7, + 0x2826c6, + 0x2bd406, + 0x28f609, + 0x33f784, + 0x3a1184, + 0x39c0c8, + 0x260f46, + 0x279388, + 0x30fec8, + 0x378187, + 0x3beb49, + 0x2be507, + 0x2b678a, + 0x2fc88f, + 0x25100a, + 0x2bdc45, + 0x281105, + 0x220085, + 0x28be47, + 0x236703, + 0x20cf08, + 0x201e46, + 0x201f49, + 0x2e4806, + 0x3a3607, + 0x2a12c9, + 0x36d048, + 0x2d76c7, + 0x315603, + 0x34b9c5, + 0x236685, + 0x2bb6cb, + 0x385e44, + 0x30ad44, + 0x27f006, + 0x315e87, + 0x392a4a, + 0x251a87, + 0x36a947, + 0x2852c5, + 0x2016c5, + 0x253689, + 0x388bc6, + 0x25190d, + 0x334145, + 0x2a10c3, + 0x200dc3, + 0x39cf05, + 0x3534c5, + 0x257988, + 0x283007, + 0x3a0f06, + 0x2a6086, + 0x232545, + 0x23cd87, + 0x377c87, + 0x23ea07, + 0x2d9a4a, + 0x26c888, + 0x2bb884, + 0x256007, + 0x284707, + 0x352846, + 0x26f5c7, + 0x2ece48, + 0x2e8548, + 0x276346, + 0x374f88, + 0x2d1704, + 0x22b9c6, + 0x239b86, + 0x333b86, + 0x2d0006, + 0x233ac4, + 0x2719c6, + 0x2c7146, + 0x29f406, + 0x2381c6, + 0x213ec6, + 0x223f06, + 0x3a0e08, + 0x3bcc88, + 0x2da288, + 0x274a08, + 0x357546, + 0x217e05, + 0x2dd4c6, + 0x2b3205, + 0x397f07, + 0x27df05, + 0x21ae83, + 0x2058c5, + 0x34cc44, + 0x214005, + 0x22dc83, + 0x33d807, + 0x374a48, + 0x2bfac6, + 0x2b0c4d, + 0x2810c6, + 0x29e985, + 0x227603, + 0x2c2a89, + 0x33f906, + 0x29dd86, + 0x2a8004, + 0x250f87, + 0x334546, + 0x387c85, + 0x20b2c3, + 0x209484, + 0x2848c6, + 0x204a04, + 0x239c88, + 0x2005c9, + 0x325f49, + 0x2a7e0a, + 0x2a918d, + 0x20abc7, + 0x2bf046, + 0x205ec4, + 0x2f6e09, + 0x28e688, + 0x28fb86, + 0x245246, + 0x26f5c7, + 0x2b9786, + 0x22c986, + 0x36aac6, + 0x3b0d0a, + 0x227248, + 0x364dc5, + 0x26fa09, + 0x28758a, + 0x2f1e88, + 0x2a40c8, + 0x29dd08, + 0x2ad74c, + 0x318585, + 0x2a6308, + 0x2e7546, + 0x36d2c6, + 0x3a34c7, + 0x251985, + 0x289245, + 0x325e09, + 0x219847, + 0x201f05, + 0x22d887, + 0x200dc3, + 0x2cd145, + 0x214308, + 0x25d087, + 0x2a3f89, + 0x2dac05, + 0x395a04, + 0x2a8e48, + 0x2dddc7, + 0x2d7888, + 0x2508c8, + 0x2d6645, + 0x281906, + 0x2a6186, + 0x277449, + 0x2b26c7, + 0x2b3ac6, + 0x2236c7, + 0x20e743, + 0x274084, + 0x2d1805, + 0x23cec4, + 0x393244, + 0x288547, + 0x25b347, + 0x234284, + 0x2a3dd0, + 0x234e47, + 0x2016c5, + 0x37178c, + 0x250684, + 0x2a9e48, + 0x257d49, + 0x36e646, + 0x34dd48, + 0x223384, + 0x37d0c8, + 0x23a5c6, + 0x238048, + 0x2a4cc6, + 0x2cc8cb, + 0x201d85, + 0x2d1688, + 0x200a04, + 0x200a0a, + 0x2a3f89, + 0x357f06, + 0x220148, + 0x263805, + 0x2b9044, + 0x2a9d46, + 0x23e8c8, + 0x287a88, + 0x329e86, + 0x358b04, + 0x23f9c6, + 0x2be587, + 0x27ff87, + 0x26f5cf, + 0x204187, + 0x2fe4c7, + 0x23d2c5, + 0x35fcc5, + 0x2a8409, + 0x2ed806, + 0x286045, + 0x288dc7, + 0x2c6188, + 0x29f505, + 0x2a27c6, + 0x2229c8, + 0x28ab8a, + 0x39c888, + 0x292f47, + 0x2fccc6, + 0x26f9c6, + 0x20ca43, + 0x2052c3, + 0x287749, + 0x280389, + 0x2a6b86, + 0x2dac05, + 0x304588, + 0x220148, + 0x365d48, + 0x36ab4b, + 0x2b0e87, + 0x315849, + 0x26f848, + 0x356284, + 0x3886c8, + 0x295089, + 0x2b3dc5, + 0x28bd47, + 0x274105, + 0x287988, + 0x297bcb, + 0x29d510, + 0x2aec45, + 0x21e20c, + 0x3a10c5, + 0x285343, + 0x296706, + 0x2c5a04, + 0x28fa06, + 0x2a4707, + 0x222a44, + 0x24c3c8, + 0x20cdcd, + 0x330a05, + 0x20ac04, + 0x241b84, + 0x27bd89, + 0x292bc8, + 0x320b07, + 0x23a648, + 0x287ec8, + 0x2829c5, + 0x28c647, + 0x282947, + 0x342807, + 0x271149, + 0x223c49, + 0x36c986, + 0x2c3a06, + 0x26f806, + 0x33e9c5, + 0x3b4944, + 0x200006, + 0x200386, + 0x282a08, + 0x23680b, + 0x284cc7, + 0x205ec4, + 0x334486, + 0x2ed187, + 0x388f45, + 0x210bc5, + 0x21b484, + 0x223bc6, + 0x200088, + 0x2f6e09, + 0x259706, + 0x28df88, + 0x387d46, + 0x355088, + 0x2d6c8c, + 0x282886, + 0x29e64d, + 0x29eacb, + 0x352a85, + 0x377dc7, + 0x334006, + 0x31e648, + 0x36ca09, + 0x276608, + 0x2016c5, + 0x2076c7, + 0x286a88, + 0x332489, + 0x2a0986, + 0x25960a, + 0x31e3c8, + 0x27644b, + 0x2d964c, + 0x37d1c8, + 0x283e46, + 0x28c048, + 0x28a807, + 0x2e4909, + 0x2976cd, + 0x2a26c6, + 0x365308, + 0x3bcb49, + 0x2c4a48, + 0x289c88, + 0x2c798c, + 0x2c8e87, + 0x2c96c7, + 0x270f85, + 0x31a807, + 0x2c6048, + 0x2a9dc6, + 0x26020c, + 0x2f60c8, + 0x2d5708, + 0x262246, + 0x236407, + 0x36cb84, + 0x274a08, + 0x28d88c, + 0x22834c, + 0x2bdcc5, + 0x2b85c7, + 0x358a86, + 0x236386, + 0x35db08, + 0x202b84, + 0x23264b, + 0x37d80b, + 0x2fccc6, + 0x20cc47, + 0x339305, + 0x278585, + 0x232786, + 0x2637c5, + 0x385e05, + 0x2e40c7, + 0x27f609, + 0x2fc184, + 0x2feac5, + 0x2ead45, + 0x2b5448, + 0x235685, + 0x2c0b89, + 0x2b16c7, + 0x2b16cb, + 0x261ac6, + 0x3a0b49, + 0x331b48, + 0x272885, + 0x342908, + 0x223c88, + 0x249b07, + 0x383b47, + 0x2885c9, + 0x237f87, + 0x27de09, + 0x29b88c, + 0x2a6b88, + 0x331009, + 0x360987, + 0x287f89, + 0x25b487, + 0x2d9748, + 0x3bed05, + 0x22b946, + 0x2c8888, + 0x30cf08, + 0x287449, + 0x385e47, + 0x278645, + 0x21f949, + 0x345306, + 0x2440c4, + 0x2440c6, + 0x35d048, + 0x254547, + 0x236a08, + 0x375049, + 0x3b1a07, + 0x2a56c6, + 0x377e84, + 0x205949, + 0x28c4c8, + 0x262107, + 0x2b56c6, + 0x236746, + 0x2bf144, + 0x241986, + 0x202003, + 0x34f109, + 0x201d46, + 0x3752c5, + 0x2a6086, + 0x2d79c5, + 0x286f08, + 0x37cf07, + 0x261e06, + 0x234d06, + 0x3059c8, + 0x2a8587, + 0x2a2705, + 0x2a3bc8, + 0x3bb748, + 0x31e3c8, + 0x3a0f85, + 0x22b9c6, + 0x325d09, + 0x2772c4, + 0x351d8b, + 0x22c68b, + 0x364cc9, + 0x200dc3, + 0x25efc5, + 0x21d306, + 0x3ba188, + 0x2fc804, + 0x2bfac6, + 0x2d9b89, + 0x2bc9c5, + 0x2e4006, + 0x2dddc6, + 0x220144, + 0x2af4ca, + 0x375208, + 0x30cf06, + 0x2cf245, + 0x3b8247, + 0x23d187, + 0x281904, + 0x22c8c7, + 0x2b6784, + 0x333b06, + 0x20cf43, + 0x271145, + 0x334f05, + 0x3beec8, + 0x2561c5, + 0x2825c9, + 0x274847, + 0x27484b, + 0x2aa04c, + 0x2aa64a, + 0x33f5c7, + 0x202e83, + 0x202e88, + 0x3a1145, + 0x29f585, + 0x2140c4, + 0x2d9646, + 0x257d46, + 0x2419c7, + 0x34d58b, + 0x233ac4, + 0x2e7644, + 0x2cbd04, + 0x2d3706, + 0x222a44, + 0x22b7c8, + 0x34b885, + 0x24fac5, + 0x365c87, + 0x377ec9, + 0x3534c5, + 0x38dcca, + 0x248b89, + 0x2911ca, + 0x3b0e49, + 0x310444, + 0x2bd4c5, + 0x2b9888, + 0x2f150b, + 0x35d785, + 0x33be86, + 0x236304, + 0x282b06, + 0x3b1889, + 0x2ed287, + 0x320e48, + 0x2a9506, + 0x2be507, + 0x287a88, + 0x3870c6, + 0x39b804, + 0x3743c7, + 0x376945, + 0x389b87, + 0x200104, + 0x333f86, + 0x2d5f48, + 0x29ec88, + 0x2e7007, + 0x27f988, + 0x29cdc5, + 0x213e44, + 0x3812c8, + 0x27fa84, + 0x220005, + 0x2ffbc4, + 0x30a547, + 0x290c87, + 0x2880c8, + 0x2d7a06, + 0x256145, + 0x2823c8, + 0x39ca88, + 0x2a7d49, + 0x22c986, + 0x23a048, + 0x20088a, + 0x388fc8, + 0x2ec085, + 0x349286, + 0x248a48, + 0x20778a, + 0x226047, + 0x28ee45, + 0x29ad48, + 0x2c2404, + 0x272486, + 0x2c9a48, + 0x213ec6, + 0x20b308, + 0x296e87, + 0x211486, + 0x2bccc4, + 0x364707, + 0x2b8e84, + 0x3b1847, + 0x2a064d, + 0x288805, + 0x2d4dcb, + 0x2285c6, + 0x257808, + 0x24c384, + 0x357746, + 0x2848c6, + 0x28c387, + 0x29e30d, + 0x24e587, + 0x2b93c8, + 0x278705, + 0x276e08, + 0x2ccb86, + 0x29ce48, + 0x22ab46, + 0x25a707, + 0x39ae89, + 0x36ebc7, + 0x28fe48, + 0x27af45, + 0x222e08, + 0x219405, + 0x3ab545, + 0x3b10c5, + 0x23ef43, + 0x289144, + 0x26fa05, + 0x241489, + 0x3043c6, + 0x2ecf48, + 0x383905, + 0x2bb507, + 0x2ad54a, + 0x2e3f49, + 0x2d348a, + 0x2da308, + 0x22d6cc, + 0x288e4d, + 0x301bc3, + 0x20b208, + 0x209445, + 0x28a946, + 0x36cec6, + 0x2ebb05, + 0x2237c9, + 0x20e1c5, + 0x2823c8, + 0x25fe06, + 0x35e006, + 0x2a8d09, + 0x39ed87, + 0x297e86, + 0x2ad4c8, + 0x333a88, + 0x2e4f47, + 0x2381ce, + 0x2ccdc5, + 0x332385, + 0x213dc8, + 0x20a247, + 0x200842, + 0x2c7504, + 0x28f90a, + 0x2621c8, + 0x389206, + 0x2a1408, + 0x2a6186, + 0x3337c8, + 0x2b3ac8, + 0x3ab504, + 0x2bba45, + 0x681384, + 0x681384, + 0x681384, + 0x201e03, + 0x2365c6, + 0x282886, + 0x2a508c, + 0x200943, + 0x223286, + 0x20cf04, + 0x33f888, + 0x2d99c5, + 0x28fa06, + 0x2c31c8, + 0x2db2c6, + 0x261d86, + 0x357d08, + 0x2d1887, + 0x237d49, + 0x2fa8ca, + 0x20a944, + 0x27df05, + 0x29a385, + 0x2f6c06, + 0x20ac06, + 0x2a5ac6, + 0x2ff206, + 0x237e84, + 0x237e8b, + 0x23c584, + 0x2a5245, + 0x2b2ac5, + 0x378246, + 0x2090c8, + 0x288d07, + 0x320c04, + 0x232fc3, + 0x2c1f05, + 0x311847, + 0x288c0b, + 0x3bedc7, + 0x2c30c8, + 0x2e7287, + 0x23d406, + 0x27a4c8, + 0x2b004b, + 0x345746, + 0x21d449, + 0x2b01c5, + 0x315603, + 0x2e4006, + 0x296d88, + 0x21f083, + 0x271e03, + 0x287a86, + 0x2a6186, + 0x36958a, + 0x283e85, + 0x28470b, + 0x2a5fcb, + 0x210a83, + 0x20b943, + 0x2b6704, + 0x2af6c7, + 0x296e04, + 0x277344, + 0x2e73c4, + 0x223e88, + 0x2cf188, + 0x205249, + 0x39e3c8, + 0x28b487, + 0x2381c6, + 0x2ecb8f, + 0x2ccf06, + 0x2d9944, + 0x2cefca, + 0x311747, + 0x208206, + 0x297509, + 0x2051c5, + 0x3bf005, + 0x205306, + 0x222f43, + 0x2c2449, + 0x2273c6, + 0x202d09, + 0x392a46, + 0x271145, + 0x2be0c5, + 0x204183, + 0x2af808, + 0x213887, + 0x201e44, + 0x33f708, + 0x2ffe04, + 0x2f0486, + 0x296706, + 0x248fc6, + 0x2d1549, + 0x29f505, + 0x388bc6, + 0x2666c9, + 0x2cb906, + 0x223f06, + 0x397346, + 0x21ce85, + 0x2ffbc6, + 0x25a704, + 0x3bed05, + 0x2c8884, + 0x2b9f86, + 0x334104, + 0x2136c3, + 0x28e745, + 0x23dac8, + 0x262987, + 0x2c1ac9, + 0x28ed48, + 0x29fb51, + 0x2dde4a, + 0x2fcc07, + 0x25a986, + 0x20cf04, + 0x2c8988, + 0x233fc8, + 0x29fd0a, + 0x2c094d, + 0x2a7f06, + 0x357e06, + 0x3647c6, + 0x24fac7, + 0x2b9485, + 0x210187, + 0x20cdc5, + 0x2b1804, + 0x2ae086, + 0x241807, + 0x2c214d, + 0x248987, + 0x3a4cc8, + 0x2826c9, + 0x349186, + 0x2a0905, + 0x22dcc4, + 0x35d146, + 0x281806, + 0x262346, + 0x2a1c88, + 0x21cd43, + 0x20aa83, + 0x338e45, + 0x207b06, + 0x2b3a85, + 0x2a9708, + 0x2a48ca, + 0x3a2dc4, + 0x33f888, + 0x29dd08, + 0x378087, + 0x3839c9, + 0x2c2dc8, + 0x2a6d07, + 0x2957c6, + 0x213eca, + 0x35d1c8, + 0x2f8589, + 0x292c88, + 0x229b89, + 0x2e8747, + 0x33bdc5, + 0x36ad46, + 0x2a9c48, + 0x287c08, + 0x29de88, + 0x2fcdc8, + 0x2a5245, + 0x218944, + 0x213588, + 0x24b384, + 0x3b0c44, + 0x271145, + 0x299ac7, + 0x377c89, + 0x28c187, + 0x2008c5, + 0x27f206, + 0x363686, + 0x200b84, + 0x2a9046, + 0x255f84, + 0x276d06, + 0x377a46, + 0x21eec6, + 0x2016c5, + 0x2a95c7, + 0x202e83, + 0x21dd89, + 0x3057c8, + 0x2f6d04, + 0x2f6d0d, + 0x29ed88, + 0x2d7248, + 0x2f8506, + 0x39af89, + 0x2e3f49, + 0x3b1585, + 0x2a49ca, + 0x2edbca, + 0x2a5ccc, + 0x2a5e46, + 0x27fe06, + 0x2cd086, + 0x2c84c9, + 0x28ab86, + 0x2101c6, + 0x20e286, + 0x274a08, + 0x27f986, + 0x2d92cb, + 0x299c45, + 0x24fac5, + 0x280085, + 0x39be46, + 0x213e83, + 0x248f46, + 0x248907, + 0x2c8845, + 0x24d545, + 0x3410c5, + 0x313846, + 0x204dc4, + 0x385806, + 0x284049, + 0x39bccc, + 0x2b1548, + 0x23e844, + 0x2ff8c6, + 0x2286c6, + 0x296d88, + 0x220148, + 0x39bbc9, + 0x3b8247, + 0x260c89, + 0x255806, + 0x237404, + 0x214944, + 0x20a584, + 0x287a88, + 0x377aca, + 0x353446, + 0x35fb87, + 0x37e787, + 0x3a0c45, + 0x29a344, + 0x295046, + 0x2b94c6, + 0x202bc3, + 0x305607, + 0x2507c8, + 0x3b16ca, + 0x2d4708, + 0x28a688, + 0x334145, + 0x352b85, + 0x284dc5, + 0x3a1006, + 0x2393c6, + 0x25b285, + 0x34f349, + 0x29a14c, + 0x284e87, + 0x29fd88, + 0x24ee05, + 0x681384, + 0x240ac4, + 0x25d1c4, + 0x217946, + 0x2a728e, + 0x3bf087, + 0x24fcc5, + 0x27724c, + 0x2ffcc7, + 0x241787, + 0x274e89, + 0x2208c9, + 0x28ee45, + 0x3057c8, + 0x325d09, + 0x31e285, + 0x2c8788, + 0x227546, + 0x381546, + 0x2e2dc4, + 0x25ff08, + 0x248743, + 0x235e44, + 0x2c1f85, + 0x204dc7, + 0x21b4c5, + 0x200749, + 0x27e64d, + 0x2935c6, + 0x229b04, + 0x2958c8, + 0x27f44a, + 0x21da87, + 0x243905, + 0x235e83, + 0x2a618e, + 0x2af90c, + 0x2f1f87, + 0x2a7447, + 0x200143, + 0x28abc5, + 0x25d1c5, + 0x2a17c8, + 0x29db49, + 0x23e746, + 0x296e04, + 0x2fcb46, + 0x3650cb, + 0x2e3ccc, + 0x376447, + 0x2d9585, + 0x3bb648, + 0x2e4d05, + 0x2cefc7, + 0x2ddc87, + 0x248745, + 0x213e83, + 0x3b36c4, + 0x21b705, + 0x2fc085, + 0x2fc086, + 0x2821c8, + 0x241807, + 0x36d1c6, + 0x25b686, + 0x3b1006, + 0x2f88c9, + 0x28c747, + 0x262606, + 0x2e3e46, + 0x27e106, + 0x2af405, + 0x21e8c6, + 0x390e05, + 0x235708, + 0x2990cb, + 0x294b86, + 0x37e7c4, + 0x2c8109, + 0x274844, + 0x2274c8, + 0x2441c7, + 0x289b84, + 0x2c2688, + 0x2c94c4, + 0x2af444, + 0x39ac45, + 0x330a46, + 0x223dc7, + 0x20b3c3, + 0x2a5785, + 0x32a504, + 0x3323c6, + 0x3b1608, + 0x39c785, + 0x298d89, + 0x21fb45, + 0x223288, + 0x22cfc7, + 0x398048, + 0x2c1907, + 0x2fe589, + 0x271846, + 0x360486, + 0x20e284, + 0x295705, + 0x3093cc, + 0x280087, + 0x280fc7, + 0x37e648, + 0x2935c6, + 0x2794c4, + 0x34bc04, + 0x288449, + 0x2cd186, + 0x253707, + 0x2cff84, + 0x24ab06, + 0x35f245, + 0x2d7547, + 0x2d9246, + 0x2594c9, + 0x2eda07, + 0x26f5c7, + 0x2a8b86, + 0x24aa45, + 0x285988, + 0x227248, + 0x2f6a46, + 0x39c7c5, + 0x344806, + 0x202c03, + 0x2a1649, + 0x2a584e, + 0x2c1608, + 0x2fff08, + 0x2f684b, + 0x298fc6, + 0x20a884, + 0x261d84, + 0x2a594a, + 0x21e107, + 0x2626c5, + 0x21d449, + 0x2c7205, + 0x3b0c87, + 0x250584, + 0x27b907, + 0x30fdc8, + 0x2d0f06, + 0x365489, + 0x2c2eca, + 0x21e086, + 0x29e8c6, + 0x2b2a45, + 0x38ef85, + 0x325647, + 0x24ec48, + 0x35f188, + 0x3ab506, + 0x2be145, + 0x20a98e, + 0x2bb884, + 0x2a1745, + 0x27eb89, + 0x2ed608, + 0x292e86, + 0x2a36cc, + 0x2a44d0, + 0x2a6ecf, + 0x2a8308, + 0x33f5c7, + 0x2016c5, + 0x26fa05, + 0x389089, + 0x29af49, + 0x23fac6, + 0x35d807, + 0x2b8545, + 0x2b43c9, + 0x3528c6, + 0x28a9cd, + 0x288789, + 0x277344, + 0x2c1388, + 0x213649, + 0x353606, + 0x27f305, + 0x360486, + 0x320d09, + 0x281688, + 0x217e05, + 0x200984, + 0x2a388b, + 0x3534c5, + 0x2a39c6, + 0x289186, + 0x26e646, + 0x27c18b, + 0x298e89, + 0x25b5c5, + 0x397e07, + 0x2dddc6, + 0x34dec6, + 0x25cf48, + 0x330b49, + 0x3a4a8c, + 0x311648, + 0x23c586, + 0x329e83, + 0x28bf46, + 0x27bfc5, + 0x284a48, + 0x2bdb46, + 0x2d7788, + 0x251b05, + 0x283245, + 0x27a8c8, + 0x333947, + 0x36ce07, + 0x2419c7, + 0x34dd48, + 0x39ad08, + 0x31a706, + 0x2b9dc7, + 0x273f47, + 0x27be8a, + 0x20d703, + 0x39be46, + 0x23e985, + 0x28f904, + 0x2826c9, + 0x2fe504, + 0x262a04, + 0x2a4d44, + 0x2a744b, + 0x2137c7, + 0x20abc5, + 0x29cac8, + 0x27f206, + 0x27f208, + 0x283dc6, + 0x293345, + 0x293e85, + 0x295f46, + 0x296b48, + 0x297448, + 0x282886, + 0x29c90f, + 0x2a1110, + 0x208605, + 0x202e83, + 0x2374c5, + 0x315788, + 0x29ae49, + 0x31e3c8, + 0x2f8748, + 0x2bec08, + 0x213887, + 0x27eec9, + 0x2d7988, + 0x2730c4, + 0x2a4bc8, + 0x2b5509, + 0x2babc7, + 0x2a2644, + 0x28c248, + 0x2a938a, + 0x3085c6, + 0x2a7f06, + 0x22c849, + 0x2a4707, + 0x2d4588, + 0x2fdbc8, + 0x2cfe08, + 0x3690c5, + 0x38ff05, + 0x24fac5, + 0x25d185, + 0x38cb87, + 0x213e85, + 0x2c8845, + 0x20ae06, + 0x31e307, + 0x2f1447, + 0x2a9686, + 0x2da845, + 0x2a39c6, + 0x202f45, + 0x2b83c8, + 0x2f1e04, + 0x2cb986, + 0x348084, + 0x2b9048, + 0x2cba8a, + 0x28300c, + 0x34d785, + 0x24fb86, + 0x3a4c46, + 0x234b86, + 0x23c604, + 0x35f505, + 0x283c07, + 0x2a4789, + 0x2d3c07, + 0x681384, + 0x681384, + 0x320a85, + 0x38d584, + 0x2a308a, + 0x27f086, + 0x27a704, + 0x208185, + 0x3875c5, + 0x2b93c4, + 0x288dc7, + 0x21fac7, + 0x2d3708, + 0x342348, + 0x217e09, + 0x2a5308, + 0x2a324b, + 0x251044, + 0x375f45, + 0x2860c5, + 0x241949, + 0x330b49, + 0x2c8008, + 0x243f48, + 0x2df044, + 0x228705, + 0x202d43, + 0x2f6bc5, + 0x388c46, + 0x29d98c, + 0x2189c6, + 0x37cfc6, + 0x293105, + 0x3138c8, + 0x2c1786, + 0x25ab06, + 0x2a7f06, + 0x22e2cc, + 0x262504, + 0x3b114a, + 0x293048, + 0x29d7c7, + 0x32a406, + 0x23e807, + 0x2f2ec5, + 0x2b56c6, + 0x35c286, + 0x367cc7, + 0x262a44, + 0x30a645, + 0x27eb84, + 0x2b1887, + 0x27edc8, + 0x27fc8a, + 0x286907, + 0x375387, + 0x33f547, + 0x2e4e49, + 0x29d98a, + 0x2373c3, + 0x262945, + 0x20b343, + 0x2e7409, + 0x254ec8, + 0x23d2c7, + 0x31e4c9, + 0x227346, + 0x2042c8, + 0x33d785, + 0x39cb8a, + 0x2dbc89, + 0x276209, + 0x3a34c7, + 0x2340c9, + 0x21edc8, + 0x367e86, + 0x24fd48, + 0x21ce87, + 0x237f87, + 0x248b87, + 0x2d5dc8, + 0x2ff746, + 0x2a9145, + 0x283c07, + 0x29e3c8, + 0x348004, + 0x2d41c4, + 0x297d87, + 0x2b3e47, + 0x325b8a, + 0x367e06, + 0x35854a, + 0x2c7447, + 0x2bb647, + 0x358004, + 0x27dec4, + 0x2d7446, + 0x281b84, + 0x281b8c, + 0x203185, + 0x21ff89, + 0x265684, + 0x2b9485, + 0x27f3c8, + 0x22d245, + 0x204dc6, + 0x225f44, + 0x28f30a, + 0x2b25c6, + 0x2a424a, + 0x2b7887, + 0x236b45, + 0x222f45, + 0x3a0c8a, + 0x296cc5, + 0x2a7e06, + 0x24b384, + 0x2b6886, + 0x325705, + 0x2bdc06, + 0x2e700c, + 0x2d388a, + 0x2957c4, + 0x2381c6, + 0x2a4707, + 0x2d91c4, + 0x274a08, + 0x39e246, + 0x20a809, + 0x2baec9, + 0x2a6c89, + 0x351f46, + 0x21cf86, + 0x24fe87, + 0x34f288, + 0x21cd89, + 0x2137c7, + 0x29cc46, + 0x2be587, + 0x364685, + 0x2bb884, + 0x24fa47, + 0x274105, + 0x28f845, + 0x36c347, + 0x248608, + 0x3bb5c6, + 0x29f24d, + 0x2a19cf, + 0x2a5fcd, + 0x200904, + 0x23dbc6, + 0x2dc1c8, + 0x20e245, + 0x27c048, + 0x2499ca, + 0x277344, + 0x365646, + 0x33ae07, + 0x233ac7, + 0x2d1949, + 0x24fd05, + 0x2b93c4, + 0x2bb98a, + 0x2c2989, + 0x2341c7, + 0x272306, + 0x353606, + 0x228646, + 0x374486, + 0x2db94f, + 0x2dc089, + 0x27f986, + 0x233ec6, + 0x320289, + 0x2b9ec7, + 0x229403, + 0x22e446, + 0x2052c3, + 0x2eb9c8, + 0x2be3c7, + 0x2a8509, + 0x296588, + 0x36cf48, + 0x385f86, + 0x218909, + 0x398845, + 0x2b9f84, + 0x29a687, + 0x2c8545, + 0x200904, + 0x20ac88, + 0x202044, + 0x2b9c07, + 0x3749c6, + 0x2e7a85, + 0x292c88, + 0x3534cb, + 0x3778c7, + 0x3a0f06, + 0x2ccf84, + 0x348186, + 0x271145, + 0x274105, + 0x285709, + 0x2889c9, + 0x237fc4, + 0x238005, + 0x238205, + 0x39ca06, + 0x3058c8, + 0x2c6b86, + 0x25060b, + 0x36e4ca, + 0x2b8f85, + 0x293f06, + 0x3a2ac5, + 0x2e9dc5, + 0x2ad387, + 0x39c0c8, + 0x260c84, + 0x26be86, + 0x2974c6, + 0x21ef87, + 0x3155c4, + 0x2848c6, + 0x2427c5, + 0x2427c9, + 0x21b584, + 0x29a4c9, + 0x282886, + 0x2c8f48, + 0x238205, + 0x37e885, + 0x2bdc06, + 0x3a4989, + 0x2208c9, + 0x37d046, + 0x2ed708, + 0x277348, + 0x3a2a84, + 0x2bbcc4, + 0x2bbcc8, + 0x32e048, + 0x260d89, + 0x388bc6, + 0x2a7f06, + 0x3294cd, + 0x2bfac6, + 0x2d6b49, + 0x2dd5c5, + 0x205306, + 0x2102c8, + 0x326885, + 0x273f84, + 0x271145, + 0x2882c8, + 0x2a2e49, + 0x27ec44, + 0x333f86, + 0x22d10a, + 0x2f1e88, + 0x325d09, + 0x261f0a, + 0x31e446, + 0x2a1b88, + 0x2ced85, + 0x2c5ec8, + 0x2c1a05, + 0x227209, + 0x37ac49, + 0x203282, + 0x2b01c5, + 0x2782c6, + 0x2827c7, + 0x34e085, + 0x30ce06, + 0x326948, + 0x2935c6, + 0x2b9749, + 0x2810c6, + 0x25cdc8, + 0x2b0805, + 0x264906, + 0x25a808, + 0x287a88, + 0x2e8648, + 0x353788, + 0x21e8c4, + 0x281943, + 0x2b9984, + 0x286b06, + 0x3646c4, + 0x2ffe47, + 0x25aa09, + 0x2cbd05, + 0x2fdbc6, + 0x22e446, + 0x28200b, + 0x2b8ec6, + 0x2cf8c6, + 0x2d13c8, + 0x24d486, + 0x236943, + 0x2164c3, + 0x2bb884, + 0x239f45, + 0x387b87, + 0x27edc8, + 0x27edcf, + 0x283b0b, + 0x3056c8, + 0x334006, + 0x3059ce, + 0x251143, + 0x387b04, + 0x2b8e45, + 0x2b9246, + 0x29514b, + 0x299b86, + 0x222a49, + 0x2e7a85, + 0x3999c8, + 0x216688, + 0x22078c, + 0x2a7486, + 0x2f6c06, + 0x2dac05, + 0x28fc08, + 0x25a805, + 0x356288, + 0x2a3a4a, + 0x2a6409, + 0x681384, + 0x3b60f882, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x39c783, + 0x238543, + 0x23cac3, + 0x323043, + 0x231604, + 0x208e83, + 0x201a03, + 0x213083, + 0x286644, + 0x238543, + 0x240244, + 0x23cac3, + 0x2de944, + 0x323043, + 0x34e347, + 0x28cac3, + 0x200e03, + 0x293408, + 0x201a03, + 0x29630b, + 0x2f3743, + 0x3a03c6, + 0x205082, + 0x22facb, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x238543, + 0x23cac3, + 0x323043, + 0x201a03, + 0x220b83, + 0x201503, + 0x207102, + 0x16fb88, + 0x32d1c5, + 0x274188, + 0x2f9f88, + 0x20f882, + 0x20a605, + 0x3785c7, + 0x201842, + 0x24c5c7, + 0x207b02, + 0x2f6607, + 0x2cc409, + 0x2ce948, + 0x2cfc89, + 0x24b2c2, + 0x2707c7, + 0x37cdc4, + 0x378687, + 0x36e3c7, + 0x264d42, + 0x28cac3, + 0x214642, + 0x204d42, + 0x200442, + 0x21cc82, + 0x206b42, + 0x20d2c2, + 0x2aff05, + 0x240a05, + 0xf882, + 0x3cac3, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0x1a3443, + 0x201a03, + 0x170c3, + 0x8c1, + 0x238543, + 0x23cac3, + 0x323043, + 0x231604, + 0x255783, + 0x208e83, + 0x1a3443, + 0x201a03, + 0x221f43, + 0x3e4f5906, + 0x42bc3, + 0x873c5, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x20f882, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x84c2, + 0x16fb88, + 0xe03, + 0x1a3443, + 0x4ec04, + 0xe5105, + 0x207102, + 0x39cdc4, + 0x238543, + 0x23cac3, + 0x323043, + 0x38acc3, + 0x2b13c5, + 0x255783, + 0x211a83, + 0x208e83, + 0x21b543, + 0x201a03, + 0x215443, + 0x20e383, + 0x202443, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x20f882, + 0x201a03, + 0x16fb88, + 0x323043, + 0x1a3443, + 0x16fb88, + 0x1a3443, + 0x2bcc43, + 0x238543, + 0x23a844, + 0x23cac3, + 0x323043, + 0x205e82, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x238543, + 0x23cac3, + 0x323043, + 0x205e82, + 0x229443, + 0x208e83, + 0x201a03, + 0x2ef783, + 0x215443, + 0x207102, + 0x20f882, + 0x323043, + 0x208e83, + 0x201a03, + 0x3a03c5, + 0xa4f06, + 0x286644, + 0x205082, + 0x16fb88, + 0x207102, + 0x25088, + 0x134943, + 0x20f882, + 0x42899306, + 0x6a04, + 0xb610b, + 0x44e86, + 0x8cbc7, + 0x23cac3, + 0x51648, + 0x323043, + 0x8b205, + 0x1493c4, + 0x227583, + 0x556c7, + 0xe06c4, + 0x208e83, + 0x1a3284, + 0x1a3443, + 0x201a03, + 0x2f4544, + 0xb5ec8, + 0x12cf06, + 0x16308, + 0x1252c5, + 0x9fc9, + 0x20f882, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x200e03, + 0x201a03, + 0x2f3743, + 0x205082, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x323043, + 0x231603, + 0x21bf84, + 0x208e83, + 0xe03, + 0x201a03, + 0x238543, + 0x23cac3, + 0x2de944, + 0x323043, + 0x208e83, + 0x201a03, + 0x3a03c6, + 0x23cac3, + 0x323043, + 0x18a783, + 0x201a03, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x8cbc7, + 0x16fb88, + 0x323043, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x45238543, + 0x23cac3, + 0x208e83, + 0x201a03, + 0x16fb88, + 0x207102, + 0x20f882, + 0x238543, + 0x323043, + 0x208e83, + 0x200442, + 0x201a03, + 0x31f1c7, + 0x342b8b, + 0x22fc83, + 0x244708, + 0x34f007, + 0x348746, + 0x382d45, + 0x232309, + 0x28c848, + 0x346789, + 0x346790, + 0x36f64b, + 0x2e2109, + 0x205dc3, + 0x20af09, + 0x23bd86, + 0x23bd8c, + 0x32d288, + 0x3bc208, + 0x244a49, + 0x29854e, + 0x2cc1cb, + 0x2e5c0c, + 0x203ec3, + 0x26ad0c, + 0x203ec9, + 0x30ae47, + 0x23ca0c, + 0x2b478a, + 0x252044, + 0x2768cd, + 0x26abc8, + 0x21308d, + 0x26fec6, + 0x28664b, + 0x200cc9, + 0x2cf787, + 0x332c86, + 0x3372c9, + 0x34834a, + 0x319108, + 0x2f3204, + 0x2fe987, + 0x363787, + 0x2d0184, + 0x38d204, + 0x2345c9, + 0x28a4c9, + 0x2b7288, + 0x216d05, + 0x339645, + 0x213c86, + 0x276789, + 0x249c4d, + 0x33bf88, + 0x213b87, + 0x382dc8, + 0x2fa686, + 0x39b444, + 0x2501c5, + 0x201c46, + 0x202884, + 0x203dc7, + 0x206f4a, + 0x219784, + 0x21dfc6, + 0x21ea49, + 0x21ea4f, + 0x21fc8d, + 0x220f06, + 0x224c90, + 0x225086, + 0x2257c7, + 0x2269c7, + 0x2269cf, + 0x2276c9, + 0x22cb06, + 0x22da47, + 0x22da48, + 0x22f289, + 0x358088, + 0x2eb507, + 0x212843, + 0x394f46, + 0x3c0b48, + 0x29880a, + 0x236089, + 0x205d83, + 0x3784c6, + 0x26bcca, + 0x28eb87, + 0x30ac8a, + 0x25a18e, + 0x227806, + 0x2b03c7, + 0x217bc6, + 0x203f86, + 0x38fd0b, + 0x31708a, + 0x32138d, + 0x21d047, + 0x20e408, + 0x20e409, + 0x20e40f, + 0x2c1c4c, + 0x2b4089, + 0x2d890e, + 0x34e44a, + 0x28b906, + 0x314a86, + 0x319d8c, + 0x31be8c, + 0x327508, + 0x36eac7, + 0x274d85, + 0x3485c4, + 0x20f88e, + 0x299684, + 0x388947, + 0x39140a, + 0x38a814, + 0x39390f, + 0x226b88, + 0x394e08, + 0x35eccd, + 0x35ecce, + 0x3a0849, + 0x238788, + 0x23878f, + 0x23c70c, + 0x23c70f, + 0x23d907, + 0x240c0a, + 0x2459cb, + 0x243788, + 0x245c87, + 0x3ac74d, + 0x322b46, + 0x276a86, + 0x248dc9, + 0x364b08, + 0x24cf48, + 0x24cf4e, + 0x2f4087, + 0x24e145, + 0x24e9c5, + 0x204b44, + 0x348a06, + 0x2b7188, + 0x20db03, + 0x2f948e, + 0x3acb08, + 0x2b588b, + 0x378bc7, + 0x3ab345, + 0x233d86, + 0x2b1f87, + 0x32f2c8, + 0x325449, + 0x322dc5, + 0x28e788, + 0x21c946, + 0x3afeca, + 0x20f789, + 0x23cac9, + 0x23cacb, + 0x346448, + 0x2d0049, + 0x216dc6, + 0x23768a, + 0x293c0a, + 0x240e0c, + 0x28e4c7, + 0x2ce74a, + 0x36b38b, + 0x36b399, + 0x312408, + 0x3a0445, + 0x2cdd46, + 0x25c489, + 0x3449c6, + 0x2df8ca, + 0x28ca46, + 0x20df44, + 0x2cdecd, + 0x20df47, + 0x218209, + 0x250ac5, + 0x250c08, + 0x251409, + 0x251844, + 0x251f47, + 0x251f48, + 0x2526c7, + 0x26e2c8, + 0x255cc7, + 0x25b845, + 0x25f3cc, + 0x25fc09, + 0x2c8c0a, + 0x39ec09, + 0x20b009, + 0x37ee4c, + 0x264f0b, + 0x2662c8, + 0x267448, + 0x26a804, + 0x289848, + 0x28d209, + 0x2b4847, + 0x20e646, + 0x200f47, + 0x2c4289, + 0x32264b, + 0x325147, + 0x201a87, + 0x2b79c7, + 0x213004, + 0x213005, + 0x2a7c05, + 0x34b1cb, + 0x3a9384, + 0x350448, + 0x26e94a, + 0x21ca07, + 0x300687, + 0x294712, + 0x276c06, + 0x23a1c6, + 0x33888e, + 0x27ab46, + 0x29abc8, + 0x29b38f, + 0x213448, + 0x302848, + 0x3bd10a, + 0x3bd111, + 0x2a990e, + 0x25654a, + 0x25654c, + 0x20bf07, + 0x238990, + 0x200408, + 0x2a9b05, + 0x2b238a, + 0x2028cc, + 0x29cf8d, + 0x302346, + 0x302347, + 0x30234c, + 0x30c80c, + 0x335d4c, + 0x2edfcb, + 0x28e0c4, + 0x22c9c4, + 0x354609, + 0x39e807, + 0x229989, + 0x293a49, + 0x3b6587, + 0x2b4606, + 0x2b4609, + 0x2b4a03, + 0x21b7ca, + 0x31fd07, + 0x34304b, + 0x32120a, + 0x2f6744, + 0x35f646, + 0x286b89, + 0x281a04, + 0x20324a, + 0x3a1205, + 0x2c4d45, + 0x2c4d4d, + 0x2c508e, + 0x2b9ac5, + 0x32ab86, + 0x39ffc7, + 0x25f64a, + 0x3a8286, + 0x2eefc4, + 0x2f9847, + 0x3bc50b, + 0x2fa747, + 0x30b444, + 0x256fc6, + 0x256fcd, + 0x2c3f4c, + 0x208d46, + 0x33c18a, + 0x230206, + 0x22ddc8, + 0x285107, + 0x34c98a, + 0x3840c6, + 0x210443, + 0x210446, + 0x3c09c8, + 0x2a344a, + 0x2801c7, + 0x2801c8, + 0x289e04, + 0x256ac7, + 0x283288, + 0x345388, + 0x284508, + 0x35874a, + 0x2e4505, + 0x2e9a07, + 0x256393, + 0x343d86, + 0x2e0908, + 0x229f89, + 0x24c488, + 0x38600b, + 0x2d3d48, + 0x2bc644, + 0x27a9c6, + 0x317ec6, + 0x330889, + 0x3bc3c7, + 0x25f4c8, + 0x2931c6, + 0x36c244, + 0x30aa05, + 0x2d4008, + 0x2cd88a, + 0x2cdb48, + 0x2d4b06, + 0x2a1d8a, + 0x2fc208, + 0x2d8fc8, + 0x2d9ec8, + 0x2da506, + 0x2dc3c6, + 0x20c0cc, + 0x2dc990, + 0x285505, + 0x213248, + 0x30d410, + 0x213250, + 0x34660e, + 0x20bd4e, + 0x20bd54, + 0x20e78f, + 0x20eb46, + 0x3072d1, + 0x332e13, + 0x333288, + 0x31d245, + 0x2a0bc8, + 0x395705, + 0x23540c, + 0x2309c9, + 0x2994c9, + 0x230e47, + 0x263549, + 0x261047, + 0x2ffa46, + 0x24ffc7, + 0x20ef05, + 0x217103, + 0x20dcc9, + 0x22a249, + 0x38a783, + 0x3b35c4, + 0x358c8d, + 0x3b83cf, + 0x36c285, + 0x331786, + 0x21ac47, + 0x32d007, + 0x290806, + 0x29080b, + 0x2aa805, + 0x263c06, + 0x300b87, + 0x257449, + 0x345a06, + 0x20cb45, + 0x2248cb, + 0x230786, + 0x38ad45, + 0x273988, + 0x2a6988, + 0x2ba50c, + 0x2ba510, + 0x2b64c9, + 0x2c5607, + 0x2e520b, + 0x30be86, + 0x2eb3ca, + 0x2ec90b, + 0x2ee70a, + 0x2ee986, + 0x2ef645, + 0x31fa46, + 0x37d408, + 0x230f0a, + 0x35e95c, + 0x2f380c, + 0x2f3b08, + 0x3a03c5, + 0x35cec7, + 0x25b0c6, + 0x27f7c5, + 0x2227c6, + 0x2909c8, + 0x2c2c07, + 0x298448, + 0x2b04ca, + 0x33764c, + 0x3378c9, + 0x39b5c7, + 0x215c04, + 0x24ea86, + 0x2d518a, + 0x293b45, + 0x211ecc, + 0x212e48, + 0x389c88, + 0x21904c, + 0x2266cc, + 0x229549, + 0x229787, + 0x23ff4c, + 0x2454c4, + 0x24718a, + 0x23354c, + 0x279a4b, + 0x24bfcb, + 0x3821c6, + 0x2f7447, + 0x20e947, + 0x238bcf, + 0x303191, + 0x2e16d2, + 0x314ecd, + 0x314ece, + 0x31520e, + 0x20e948, + 0x20e952, + 0x253e08, + 0x34ec47, + 0x25430a, + 0x208b08, + 0x27ab05, + 0x38c9ca, + 0x2255c7, + 0x2e6f44, + 0x227103, + 0x297185, + 0x3bd387, + 0x2fb547, + 0x29d18e, + 0x308c8d, + 0x30d7c9, + 0x21f545, + 0x31c443, + 0x326446, + 0x264085, + 0x27dc48, + 0x2c0649, + 0x2a0105, + 0x3ac94f, + 0x2b6207, + 0x382bc5, + 0x37958a, + 0x358946, + 0x2522c9, + 0x37db4c, + 0x2fec09, + 0x2094c6, + 0x26e74c, + 0x329f86, + 0x3017c8, + 0x301c86, + 0x312586, + 0x2082c4, + 0x266643, + 0x2b380a, + 0x32e411, + 0x30650a, + 0x265345, + 0x271ac7, + 0x25c7c7, + 0x283384, + 0x28338b, + 0x2cfb08, + 0x2c1486, + 0x37e6c5, + 0x3b01c4, + 0x280ac9, + 0x320804, + 0x24cd87, + 0x359f05, + 0x359f07, + 0x338ac5, + 0x2affc3, + 0x34eb08, + 0x35f2ca, + 0x20b3c3, + 0x32d20a, + 0x281ec6, + 0x3ac6cf, + 0x2f4009, + 0x2f9410, + 0x2ebe48, + 0x2d5809, + 0x29f087, + 0x256f4f, + 0x31e884, + 0x2de9c4, + 0x224f06, + 0x317b06, + 0x2e2aca, + 0x381c46, + 0x2ff587, + 0x30c148, + 0x30c347, + 0x30cbc7, + 0x30f08a, + 0x310b4b, + 0x3b1b45, + 0x2e1308, + 0x204443, + 0x2045cc, + 0x38000f, + 0x274b8d, + 0x2aefc7, + 0x30d909, + 0x2e8207, + 0x24f2c8, + 0x38aa0c, + 0x2bc548, + 0x231848, + 0x321d0e, + 0x336054, + 0x336564, + 0x354e4a, + 0x37018b, + 0x261104, + 0x261109, + 0x3656c8, + 0x24ef85, + 0x20d60a, + 0x3acd47, + 0x31f944, + 0x39c783, + 0x238543, + 0x240244, + 0x23cac3, + 0x323043, + 0x231604, + 0x255783, + 0x28cac3, + 0x20c0c6, + 0x21bf84, + 0x208e83, + 0x201a03, + 0x221483, + 0x207102, + 0x39c783, + 0x20f882, + 0x238543, + 0x240244, + 0x23cac3, + 0x323043, + 0x255783, + 0x20c0c6, + 0x208e83, + 0x201a03, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x21b583, + 0x208e83, + 0x1a3443, + 0x201a03, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x21bf84, + 0x208e83, + 0x201a03, + 0x207102, + 0x242043, + 0x20f882, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x201382, + 0x235f42, + 0x20f882, + 0x238543, + 0x206902, + 0x200942, + 0x231604, + 0x20f644, + 0x22a482, + 0x21bf84, + 0x200442, + 0x201a03, + 0x221483, + 0x3821c6, + 0x21a902, + 0x202642, + 0x20c4c2, + 0x47a13443, + 0x47e0bf03, + 0x5d306, + 0x5d306, + 0x286644, + 0x200e03, + 0x14b700a, + 0x12ea0c, + 0xf4cc, + 0x871cd, + 0x131645, + 0x26547, + 0x1b1c6, + 0x21088, + 0x23087, + 0x28b08, + 0x1aa20a, + 0x1397c7, + 0x48adf485, + 0x1359c9, + 0x3e34b, + 0x35dcb, + 0x42e48, + 0x172f4a, + 0x9288e, + 0x144c28b, + 0x6a04, + 0x63d46, + 0x7588, + 0xf8d08, + 0x3e607, + 0x1a787, + 0x57f89, + 0x81a87, + 0xdd088, + 0x12f5c9, + 0x49804, + 0x49f45, + 0x12bfce, + 0xb084d, + 0x8ca48, + 0x48e34406, + 0x49834408, + 0x7b548, + 0x11f3d0, + 0x5998c, + 0x6b9c7, + 0x6c647, + 0x71387, + 0x77fc7, + 0x13c42, + 0x144ec7, + 0x11724c, + 0x43b87, + 0xac206, + 0xac7c9, + 0xae208, + 0x206c2, + 0x942, + 0xbee8b, + 0x1a3307, + 0x18009, + 0x164ec9, + 0x3ef48, + 0xb8042, + 0x134649, + 0xcc60a, + 0xd2689, + 0xdfdc9, + 0xe0b08, + 0xe1b87, + 0xe4489, + 0xe61c5, + 0xe67d0, + 0x191646, + 0x11205, + 0x31e8d, + 0x235c6, + 0xefd07, + 0xf4558, + 0x14f508, + 0xc74a, + 0xb282, + 0x5524d, + 0xa02, + 0x86286, + 0x95408, + 0x8f148, + 0x16fa49, + 0x586c8, + 0x6420e, + 0x126447, + 0x1051cd, + 0xfb445, + 0x144c48, + 0x19fc08, + 0x106046, + 0xc2, + 0x12cf06, + 0x4542, + 0x341, + 0x65a07, + 0xf6fc3, + 0x492f4dc4, + 0x4969c243, + 0x141, + 0x19d06, + 0x141, + 0x1, + 0x19d06, + 0xf6fc3, + 0x1402285, + 0x252044, + 0x238543, + 0x253384, + 0x231604, + 0x208e83, + 0x229e45, + 0x221f43, + 0x20c843, + 0x355685, + 0x202443, + 0x4aa38543, + 0x23cac3, + 0x323043, + 0x200041, + 0x28cac3, + 0x20f644, + 0x21bf84, + 0x208e83, + 0x201a03, + 0x215443, + 0x16fb88, + 0x207102, + 0x39c783, + 0x20f882, + 0x238543, + 0x23cac3, + 0x21b583, + 0x200942, + 0x231604, + 0x255783, + 0x28cac3, + 0x208e83, + 0x200e03, + 0x201a03, + 0x202443, + 0x16fb88, + 0x37fd82, + 0x18c1c7, + 0xf882, + 0x10a985, + 0x1480cc8, + 0x10c50e, + 0x4ba0ab02, + 0x31fec8, + 0x2bdd86, + 0x2ca186, + 0x2bd707, + 0x4be00b42, + 0x4c3ac548, + 0x21870a, + 0x26b448, + 0x200242, + 0x31fb49, + 0x3b1b87, + 0x21ec06, + 0x34e849, + 0x2e9b44, + 0x348646, + 0x2ca584, + 0x27f584, + 0x25f009, + 0x32d906, + 0x240ac5, + 0x297a85, + 0x3b9d87, + 0x2c76c7, + 0x2979c4, + 0x2bd946, + 0x307b85, + 0x30a3c5, + 0x3a2a05, + 0x339407, + 0x378a05, + 0x31ddc9, + 0x234fc5, + 0x32f404, + 0x3a81c7, + 0x341b0e, + 0x306bc9, + 0x338749, + 0x388d86, + 0x24a608, + 0x36ae4b, + 0x2b698c, + 0x33ea46, + 0x2e5ac7, + 0x212245, + 0x38d20a, + 0x2b7389, + 0x209b49, + 0x259f06, + 0x300945, + 0x2edac5, + 0x3570c9, + 0x3a2b8b, + 0x27e286, + 0x3471c6, + 0x20de04, + 0x2943c6, + 0x24e1c8, + 0x3c0846, + 0x215006, + 0x205fc8, + 0x2092c7, + 0x209909, + 0x211385, + 0x16fb88, + 0x21a704, + 0x2394c4, + 0x201105, + 0x3a6649, + 0x228f87, + 0x228f8b, + 0x22b3ca, + 0x230905, + 0x4c612842, + 0x342f07, + 0x4ca30c08, + 0x3578c7, + 0x2c3d45, + 0x209dca, + 0xf882, + 0x2be6cb, + 0x255e0a, + 0x22a146, + 0x216383, + 0x2a038d, + 0x3572cc, + 0x357a4d, + 0x250545, + 0x334fc5, + 0x20db47, + 0x36c689, + 0x218606, + 0x381ac5, + 0x2d2b88, + 0x2942c3, + 0x2fa288, + 0x2942c8, + 0x2cb287, + 0x314808, + 0x3b49c9, + 0x374847, + 0x342707, + 0x202108, + 0x2d1c84, + 0x2d1c87, + 0x26fdc8, + 0x355546, + 0x3b874f, + 0x226207, + 0x2eb686, + 0x2298c5, + 0x22a8c3, + 0x381947, + 0x37cc43, + 0x252886, + 0x254006, + 0x254706, + 0x298b85, + 0x26e2c3, + 0x397cc8, + 0x37f889, + 0x3920cb, + 0x254888, + 0x255985, + 0x2584c5, + 0x4cef6802, + 0x250089, + 0x34eec7, + 0x263c85, + 0x25ef07, + 0x260506, + 0x374345, + 0x263ecb, + 0x2662c4, + 0x26b005, + 0x26b147, + 0x27db86, + 0x27e045, + 0x289a47, + 0x28a187, + 0x2d5104, + 0x291b8a, + 0x292048, + 0x2cee09, + 0x2a0f05, + 0x3bf1c6, + 0x24e38a, + 0x2be906, + 0x26f2c7, + 0x2ceacd, + 0x2aa349, + 0x396fc5, + 0x339f07, + 0x333448, + 0x25a5c8, + 0x332847, + 0x358246, + 0x21cb87, + 0x253c43, + 0x34b1c4, + 0x371cc5, + 0x39d947, + 0x3a2409, + 0x231b08, + 0x34cbc5, + 0x23bac4, + 0x254a45, + 0x256c4d, + 0x2006c2, + 0x230386, + 0x2861c6, + 0x2e654a, + 0x3904c6, + 0x39ab85, + 0x342445, + 0x342447, + 0x3afd0c, + 0x27b3ca, + 0x294086, + 0x28ad05, + 0x294206, + 0x294547, + 0x296886, + 0x298a8c, + 0x34e989, + 0x4d21a187, + 0x29b745, + 0x29b746, + 0x29bcc8, + 0x246f85, + 0x2ab085, + 0x2ab808, + 0x2aba0a, + 0x4d6335c2, + 0x4da14d02, + 0x2e76c5, + 0x2eb603, + 0x243408, + 0x252403, + 0x2abc84, + 0x25240b, + 0x36b208, + 0x2daa48, + 0x4df3b049, + 0x2afc09, + 0x2b0746, + 0x2b1c08, + 0x2b1e09, + 0x2b2886, + 0x2b2a05, + 0x3944c6, + 0x2b2f49, + 0x389347, + 0x2647c6, + 0x2de087, + 0x218487, + 0x2dd9c4, + 0x4e34f809, + 0x2d32c8, + 0x3ac448, + 0x3932c7, + 0x2cd346, + 0x36c489, + 0x2ca847, + 0x32598a, + 0x358388, + 0x208387, + 0x208f86, + 0x271d8a, + 0x26fbc8, + 0x2ed485, + 0x230685, + 0x2ef1c7, + 0x311cc9, + 0x30150b, + 0x31a308, + 0x235049, + 0x254c87, + 0x2bd04c, + 0x2bfccc, + 0x2bffca, + 0x2c024c, + 0x2ca108, + 0x2ca308, + 0x2ca504, + 0x2caa09, + 0x2cac49, + 0x2cae8a, + 0x2cb109, + 0x2cb447, + 0x3ba98c, + 0x23f586, + 0x2cbf88, + 0x2be9c6, + 0x387486, + 0x396ec7, + 0x306dc8, + 0x3445cb, + 0x28e307, + 0x250289, + 0x350b89, + 0x253507, + 0x2771c4, + 0x271c07, + 0x2fda46, + 0x21d8c6, + 0x33c345, + 0x297248, + 0x2993c4, + 0x2993c6, + 0x27b28b, + 0x21bac9, + 0x36c886, + 0x204bc9, + 0x339586, + 0x25f1c8, + 0x211b83, + 0x300ac5, + 0x219b09, + 0x21da05, + 0x2fba44, + 0x27d046, + 0x2fd385, + 0x299906, + 0x310ec7, + 0x33a986, + 0x3b134b, + 0x237587, + 0x241646, + 0x354786, + 0x3b9e46, + 0x297989, + 0x25384a, + 0x2bbb85, + 0x2202cd, + 0x2abb06, + 0x204a86, + 0x2f3f06, + 0x22dd45, + 0x2e6ac7, + 0x300087, + 0x2e7dce, + 0x28cac3, + 0x2cd309, + 0x210c89, + 0x38d607, + 0x364207, + 0x2a5bc5, + 0x2b57c5, + 0x4e63470f, + 0x2d5a47, + 0x2d5c08, + 0x2d6144, + 0x2d7106, + 0x4ea4ea42, + 0x2da786, + 0x20c0c6, + 0x210e4e, + 0x2fa0ca, + 0x273b06, + 0x23398a, + 0x211689, + 0x32b385, + 0x3a4808, + 0x3bca06, + 0x306748, + 0x33aac8, + 0x2194cb, + 0x2bd805, + 0x378a88, + 0x20610c, + 0x2c3c07, + 0x254246, + 0x2fd1c8, + 0x3488c8, + 0x4ee06802, + 0x23588b, + 0x2123c9, + 0x205549, + 0x2174c7, + 0x223408, + 0x4f36bec8, + 0x38ffcb, + 0x23edc9, + 0x338f0d, + 0x27fa88, + 0x22b1c8, + 0x4f6014c2, + 0x203cc4, + 0x4fa19302, + 0x2fe206, + 0x4fe004c2, + 0x261b8a, + 0x2199c6, + 0x232808, + 0x2c6f48, + 0x2b6f06, + 0x22fe46, + 0x2f9186, + 0x2b5a45, + 0x2443c4, + 0x50206d04, + 0x214106, + 0x29c747, + 0x50620c47, + 0x2d644b, + 0x341ec9, + 0x33500a, + 0x2106c4, + 0x342588, + 0x26458d, + 0x2f2489, + 0x2f26c8, + 0x2f2d49, + 0x2f4544, + 0x245884, + 0x285cc5, + 0x320fcb, + 0x36b186, + 0x34b905, + 0x2279c9, + 0x2bda08, + 0x210dc4, + 0x38d389, + 0x2064c5, + 0x2c7708, + 0x342dc7, + 0x338b48, + 0x286d86, + 0x233207, + 0x29a989, + 0x224a49, + 0x38adc5, + 0x34dfc5, + 0x50a08402, + 0x32f1c4, + 0x2fdd45, + 0x2ce506, + 0x33bd05, + 0x387e47, + 0x214205, + 0x27dbc4, + 0x388e46, + 0x381b47, + 0x23d046, + 0x2c41c5, + 0x207f48, + 0x2bdf85, + 0x211a07, + 0x214689, + 0x21bc0a, + 0x2fc487, + 0x2fc48c, + 0x240a86, + 0x37e349, + 0x246a45, + 0x246ec8, + 0x207c03, + 0x216d85, + 0x2fd705, + 0x282d47, + 0x50e06ac2, + 0x22f647, + 0x2e56c6, + 0x373b46, + 0x30bfc6, + 0x348806, + 0x206748, + 0x2a0d05, + 0x2eb747, + 0x2eb74d, + 0x227103, + 0x227105, + 0x379347, + 0x22f988, + 0x378f05, + 0x2216c8, + 0x37ccc6, + 0x335b87, + 0x2cbec5, + 0x2bd886, + 0x39ce45, + 0x21c70a, + 0x2f1346, + 0x383f47, + 0x2bca85, + 0x2f5047, + 0x2f97c4, + 0x2fb9c6, + 0x2fe345, + 0x32d70b, + 0x2fd8c9, + 0x24214a, + 0x38ae48, + 0x30e048, + 0x380a8c, + 0x3964c7, + 0x3054c8, + 0x307f48, + 0x3084c5, + 0x311a8a, + 0x31c449, + 0x51200d02, + 0x201886, + 0x216044, + 0x216049, + 0x27d549, + 0x27e9c7, + 0x2b4e07, + 0x2938c9, + 0x22df48, + 0x22df4f, + 0x2e3a06, + 0x2df14b, + 0x34b445, + 0x34b447, + 0x368849, + 0x21aa46, + 0x38d307, + 0x2e1a45, + 0x23ae84, + 0x284fc6, + 0x2262c4, + 0x2db107, + 0x2d6f08, + 0x51700848, + 0x301245, + 0x301387, + 0x260a09, + 0x205304, + 0x24b348, + 0x51ab7cc8, + 0x283384, + 0x23c208, + 0x332d44, + 0x22be49, + 0x351a45, + 0x51e05082, + 0x2e3a45, + 0x310045, + 0x20fc48, + 0x23d747, + 0x52200d42, + 0x3322c5, + 0x2d8e46, + 0x27cb06, + 0x32f188, + 0x337d48, + 0x33bcc6, + 0x34bb06, + 0x38c289, + 0x373a86, + 0x21a90b, + 0x2e5f85, + 0x208a46, + 0x29e108, + 0x3a0a06, + 0x322c46, + 0x221b8a, + 0x23b30a, + 0x2498c5, + 0x2a0dc7, + 0x313646, + 0x52606442, + 0x379487, + 0x266cc5, + 0x24e304, + 0x24e305, + 0x2105c6, + 0x278fc7, + 0x215dc5, + 0x23b484, + 0x2c4788, + 0x322d05, + 0x3af347, + 0x3b6dc5, + 0x21c645, + 0x258f84, + 0x2ee209, + 0x3079c8, + 0x263146, + 0x2b5386, + 0x345186, + 0x52b08148, + 0x308347, + 0x30874d, + 0x3090cc, + 0x3096c9, + 0x309909, + 0x52f67742, + 0x3b6343, + 0x215ac3, + 0x2fdb05, + 0x39da4a, + 0x32f046, + 0x30e2c5, + 0x311084, + 0x31108b, + 0x323a8c, + 0x3244cc, + 0x3247d5, + 0x32660d, + 0x327d0f, + 0x3280d2, + 0x32854f, + 0x328912, + 0x328d93, + 0x32924d, + 0x32980d, + 0x329b8e, + 0x32a10e, + 0x32a94c, + 0x32ad0c, + 0x32b14b, + 0x32b4ce, + 0x32c612, + 0x32ee0c, + 0x32fd90, + 0x33cd52, + 0x33d9cc, + 0x33e08d, + 0x33e3cc, + 0x3406d1, + 0x34734d, + 0x349e0d, + 0x34a40a, + 0x34a68c, + 0x34af8c, + 0x34b60c, + 0x34c20c, + 0x3523d3, + 0x352cd0, + 0x3530d0, + 0x35398d, + 0x353f8c, + 0x354b89, + 0x35690d, + 0x356c53, + 0x3595d1, + 0x359a13, + 0x35a0cf, + 0x35a48c, + 0x35a78f, + 0x35ab4d, + 0x35b14f, + 0x35b510, + 0x35bf8e, + 0x35f88e, + 0x35fe10, + 0x36150d, + 0x361e8e, + 0x36220c, + 0x363213, + 0x3658ce, + 0x365f50, + 0x366351, + 0x36678f, + 0x366b53, + 0x3672cd, + 0x36760f, + 0x3679ce, + 0x368090, + 0x368489, + 0x369210, + 0x36980f, + 0x369e8f, + 0x36a252, + 0x36dcce, + 0x36e7cd, + 0x36f00d, + 0x36f34d, + 0x37078d, + 0x370acd, + 0x370e10, + 0x37120b, + 0x371a8c, + 0x371e0c, + 0x37240c, + 0x37270e, + 0x382350, + 0x384512, + 0x38498b, + 0x384e8e, + 0x38520e, + 0x386dce, + 0x38724b, + 0x53388016, + 0x38988d, + 0x38a014, + 0x38b04d, + 0x38cd55, + 0x38e30d, + 0x38ec8f, + 0x38f4cf, + 0x39238f, + 0x39274e, + 0x392ccd, + 0x394091, + 0x39668c, + 0x39698c, + 0x396c8b, + 0x39710c, + 0x3974cf, + 0x397892, + 0x39824d, + 0x39974c, + 0x399bcc, + 0x399ecd, + 0x39a20f, + 0x39a5ce, + 0x39d70c, + 0x39dccd, + 0x39e00b, + 0x39e9cc, + 0x39f2cd, + 0x39f60e, + 0x39f989, + 0x3a1353, + 0x3a188d, + 0x3a1bcd, + 0x3a21cc, + 0x3a264e, + 0x3a37cf, + 0x3a3b8c, + 0x3a3e8d, + 0x3a41cf, + 0x3a458c, + 0x3a508c, + 0x3a550c, + 0x3a580c, + 0x3a5ecd, + 0x3a6212, + 0x3a688c, + 0x3a6b8c, + 0x3a6e91, + 0x3a72cf, + 0x3a768f, + 0x3a7a53, + 0x3a8a0e, + 0x3a8d8f, + 0x3a914c, + 0x537a948e, + 0x3a980f, + 0x3a9bd6, + 0x3aaa92, + 0x3acf0c, + 0x3ada0f, + 0x3ae08d, + 0x3ae3cf, + 0x3ae78c, + 0x3aea8d, + 0x3aedcd, + 0x3b084e, + 0x3b228c, + 0x3b258c, + 0x3b2890, + 0x3b57d1, + 0x3b5c0b, + 0x3b5f4c, + 0x3b624e, + 0x3b7211, + 0x3b764e, + 0x3b79cd, + 0x3bc7cb, + 0x3bd88f, + 0x3be394, + 0x210642, + 0x210642, + 0x204d43, + 0x210642, + 0x204d43, + 0x210642, + 0x2009c2, + 0x394505, + 0x3b6f0c, + 0x210642, + 0x210642, + 0x2009c2, + 0x210642, + 0x29c345, + 0x21bc05, + 0x210642, + 0x210642, + 0x201102, + 0x29c345, + 0x326b49, + 0x3592cc, + 0x210642, + 0x210642, + 0x210642, + 0x210642, + 0x394505, + 0x210642, + 0x210642, + 0x210642, + 0x210642, + 0x201102, + 0x326b49, + 0x210642, + 0x210642, + 0x210642, + 0x21bc05, + 0x210642, + 0x21bc05, + 0x3592cc, + 0x3b6f0c, + 0x39c783, + 0x238543, + 0x23cac3, + 0x323043, + 0x231604, + 0x208e83, + 0x201a03, + 0xe008, + 0x64344, + 0xe03, + 0xc63c8, + 0x207102, + 0x5460f882, + 0x24ac83, + 0x23f044, + 0x2020c3, + 0x39e544, + 0x23a1c6, + 0x216f83, + 0x304704, + 0x2d7b05, + 0x28cac3, + 0x208e83, + 0x1a3443, + 0x201a03, + 0x243d0a, + 0x3821c6, + 0x38558c, + 0x16fb88, + 0x20f882, + 0x238543, + 0x23cac3, + 0x323043, + 0x229443, + 0x20c0c6, + 0x208e83, + 0x201a03, + 0x221483, + 0xac408, + 0x131645, + 0x35f09, + 0x35c2, + 0x55b95645, + 0x26547, + 0xba9c8, + 0x14b0e, + 0x90212, + 0x10a78b, + 0x1398c6, + 0x55edf485, + 0x562df48c, + 0x148f87, + 0x36dc7, + 0x15000a, + 0x46690, + 0x13b345, + 0xb610b, + 0xf8d08, + 0x3e607, + 0x3af8b, + 0x57f89, + 0x185a87, + 0x81a87, + 0x7e4c7, + 0x3e546, + 0xdd088, + 0x56824386, + 0xb084d, + 0x14f9d0, + 0x56c0c182, + 0x8ca48, + 0x4f450, + 0x15090c, + 0x5735cd4d, + 0x64a88, + 0x721c7, + 0x76f09, + 0x5d3c6, + 0x9bec8, + 0x351c2, + 0xa808a, + 0x293c7, + 0x43b87, + 0xac7c9, + 0xae208, + 0x8b205, + 0xd538e, + 0x5c4e, + 0x17a8f, + 0x18009, + 0x164ec9, + 0x15d38b, + 0x7ba8f, + 0xee40c, + 0xa88cb, + 0xc8b48, + 0xd6347, + 0xdbe88, + 0xfe78b, + 0xff34c, + 0x10038c, + 0x1037cc, + 0x10b54d, + 0x3ef48, + 0xd2942, + 0x134649, + 0x195d8b, + 0xcd546, + 0x11f30b, + 0xe118a, + 0xe1d45, + 0xe67d0, + 0xe9f06, + 0x16b986, + 0x11205, + 0x10fc48, + 0xefd07, + 0xeffc7, + 0x8d047, + 0xfe04a, + 0xba84a, + 0x86286, + 0x99d0d, + 0x8f148, + 0x586c8, + 0x58ec9, + 0xbc8c5, + 0x1ad70c, + 0x10b74b, + 0x19e604, + 0x105e09, + 0x106046, + 0x16546, + 0x2642, + 0x12cf06, + 0xc68b, + 0x112707, + 0x4542, + 0xd1305, + 0x2e604, + 0x8c1, + 0x52d03, + 0x56764886, + 0x9c243, + 0x7b02, + 0x293c4, + 0x242, + 0x86644, + 0xf82, + 0x6502, + 0x3302, + 0xd342, + 0x1382, + 0xdf482, + 0x8c2, + 0x22902, + 0x40e82, + 0x1a442, + 0x4c82, + 0x234c2, + 0x3cac3, + 0x6b82, + 0x1842, + 0x7602, + 0x6b02, + 0x17202, + 0x36d02, + 0x206c2, + 0xc442, + 0x1c82, + 0x942, + 0x55783, + 0x4182, + 0x2542, + 0xb8042, + 0x9a02, + 0x282, + 0x2942, + 0xd842, + 0xc202, + 0x4a82, + 0x182842, + 0x745c2, + 0xe82, + 0x8e83, + 0x1942, + 0x6802, + 0x982, + 0x5b82, + 0x18ad45, + 0x7082, + 0x2fa42, + 0x13ebc3, + 0x482, + 0xb282, + 0xa02, + 0x2502, + 0x6742, + 0xd42, + 0xc2, + 0x2642, + 0x35dc5, + 0x17f087, + 0x20d0c3, + 0x207102, + 0x238543, + 0x23cac3, + 0x21b583, + 0x2046c3, + 0x229443, + 0x208e83, + 0x200e03, + 0x201a03, + 0x29c283, + 0x10c3, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x21b583, + 0x28cac3, + 0x208e83, + 0x200e03, + 0x1a3443, + 0x201a03, + 0x238543, + 0x23cac3, + 0x201a03, + 0x238543, + 0x23cac3, + 0x323043, + 0x200041, + 0x28cac3, + 0x208e83, + 0x21b543, + 0x201a03, + 0x146f44, + 0x39c783, + 0x238543, + 0x23cac3, + 0x26eac3, + 0x21b583, + 0x207b03, + 0x289303, + 0x219983, + 0x241503, + 0x323043, + 0x231604, + 0x208e83, + 0x201a03, + 0x202443, + 0x333cc4, + 0x251183, + 0x3ec3, + 0x3c0943, + 0x20a3c8, + 0x271dc4, + 0x2cf30a, + 0x2bed86, + 0x112384, + 0x3a7ec7, + 0x226cca, + 0x2e38c9, + 0x3b7f87, + 0x3be84a, + 0x39c783, + 0x2e774b, + 0x28b689, + 0x345285, + 0x2da5c7, + 0xf882, + 0x238543, + 0x21a447, + 0x2379c5, + 0x2ca689, + 0x23cac3, + 0x2bd606, + 0x2c9883, + 0xe5743, + 0x110646, + 0xd386, + 0x16f07, + 0x21af86, + 0x222985, + 0x3a3147, + 0x2de5c7, + 0x59b23043, + 0x33dc07, + 0x374703, + 0x3b5045, + 0x231604, + 0x231308, + 0x366fcc, + 0x2b4fc5, + 0x2aa4c6, + 0x21a307, + 0x39b687, + 0x23dfc7, + 0x23f108, + 0x30f50f, + 0x2e3b05, + 0x24ad87, + 0x33acc7, + 0x2abdca, + 0x2d29c9, + 0x39e6c5, + 0x31078a, + 0xc546, + 0x2c9905, + 0x3703c4, + 0x2c6e86, + 0x300e07, + 0x2d2847, + 0x306908, + 0x217645, + 0x2378c6, + 0x214f85, + 0x2e8105, + 0x21ba04, + 0x2b6e07, + 0x20658a, + 0x34d908, + 0x367f06, + 0x29443, + 0x2e4505, + 0x26bf86, + 0x3babc6, + 0x211106, + 0x28cac3, + 0x3984c7, + 0x33ac45, + 0x208e83, + 0x2e144d, + 0x200e03, + 0x306a08, + 0x3b3644, + 0x310945, + 0x2abcc6, + 0x23f386, + 0x208947, + 0x2aed47, + 0x26f045, + 0x201a03, + 0x20a147, + 0x277089, + 0x36bbc9, + 0x227f4a, + 0x235d82, + 0x3b5004, + 0x2eb2c4, + 0x344487, + 0x22f508, + 0x2f0889, + 0x226fc9, + 0x2f1ac7, + 0x28bb46, + 0xf3006, + 0x2f4544, + 0x2f4b4a, + 0x2f8248, + 0x2f9049, + 0x2c4bc6, + 0x2b9545, + 0x34d7c8, + 0x2cdc4a, + 0x20ec43, + 0x333e46, + 0x2f1bc7, + 0x225f45, + 0x3b3505, + 0x3a04c3, + 0x231944, + 0x230645, + 0x28a287, + 0x307b05, + 0x2ef086, + 0x103d45, + 0x273bc3, + 0x273bc9, + 0x26c04c, + 0x2a2b4c, + 0x2d8648, + 0x284187, + 0x301e08, + 0x30214a, + 0x302fcb, + 0x28b7c8, + 0x23ec48, + 0x23f486, + 0x345045, + 0x34624a, + 0x228cc5, + 0x205082, + 0x2cbd87, + 0x29f806, + 0x368d45, + 0x304209, + 0x281405, + 0x3716c5, + 0x218ac9, + 0x388a46, + 0x204448, + 0x332643, + 0x217186, + 0x27cf86, + 0x311f05, + 0x311f09, + 0x2f0fc9, + 0x27a3c7, + 0x114204, + 0x314207, + 0x226ec9, + 0x23f805, + 0x444c8, + 0x39c485, + 0x341a05, + 0x3911c9, + 0x20cac2, + 0x2628c4, + 0x200882, + 0x204182, + 0x30e985, + 0x312108, + 0x2bc805, + 0x2cb603, + 0x2cb605, + 0x2da983, + 0x2162c2, + 0x383c84, + 0x2fc183, + 0x20cb42, + 0x341504, + 0x2ec043, + 0x206682, + 0x28cfc3, + 0x295384, + 0x2eae03, + 0x2f6584, + 0x204242, + 0x221383, + 0x219c43, + 0x206182, + 0x332182, + 0x2f0e09, + 0x204382, + 0x290d84, + 0x201f82, + 0x34d644, + 0x28bb04, + 0x2c0d84, + 0x202642, + 0x23e882, + 0x229703, + 0x302d83, + 0x24a9c4, + 0x28a404, + 0x2f1d44, + 0x2f8404, + 0x315743, + 0x224183, + 0x20c4c4, + 0x315584, + 0x315d86, + 0x232ec2, + 0x20f882, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x207102, + 0x39c783, + 0x238543, + 0x23cac3, + 0x201843, + 0x323043, + 0x231604, + 0x2f10c4, + 0x21bf84, + 0x208e83, + 0x201a03, + 0x221483, + 0x2f5204, + 0x31fe83, + 0x2c37c3, + 0x359e44, + 0x39c286, + 0x211c43, + 0x36dc7, + 0x21f243, + 0x202103, + 0x2b8d83, + 0x263a43, + 0x229443, + 0x3321c5, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x216403, + 0x239043, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x323043, + 0x255783, + 0x208e83, + 0x2464c4, + 0x1a3443, + 0x201a03, + 0x25b0c4, + 0x2c6c85, + 0x36dc7, + 0x20f882, + 0x201742, + 0x207b02, + 0x204d42, + 0xe03, + 0x200442, + 0x238543, + 0x240244, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x21bf84, + 0x208e83, + 0xe03, + 0x201a03, + 0x215443, + 0x286644, + 0x16fb88, + 0x238543, + 0x200e03, + 0x10c3, + 0x13e8c4, + 0x252044, + 0x16fb88, + 0x238543, + 0x253384, + 0x231604, + 0x200e03, + 0x2014c2, + 0x201a03, + 0x20c843, + 0x31944, + 0x355685, + 0x205082, + 0x3156c3, + 0x145c49, + 0xdfb46, + 0x19c588, + 0x207102, + 0x16fb88, + 0x20f882, + 0x23cac3, + 0x323043, + 0x200942, + 0xe03, + 0x201a03, + 0x207102, + 0x1bea07, + 0x1370c9, + 0x3dc3, + 0x16fb88, + 0xd303, + 0x5db4c807, + 0x38543, + 0x1788, + 0x23cac3, + 0x323043, + 0x186c46, + 0x255783, + 0xe8888, + 0xc9148, + 0x3fbc6, + 0x28cac3, + 0xd30c8, + 0x187ec3, + 0xe8a85, + 0x3ccc7, + 0x8e83, + 0x63c3, + 0x1a03, + 0xcb02, + 0x17044a, + 0x10ea43, + 0x313e44, + 0x10f30b, + 0x10f8c8, + 0x95e02, + 0x207102, + 0x20f882, + 0x238543, + 0x23cac3, + 0x2de944, + 0x323043, + 0x255783, + 0x28cac3, + 0x208e83, + 0x238543, + 0x23cac3, + 0x323043, + 0x229443, + 0x208e83, + 0x201a03, + 0x236903, + 0x215443, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x10c3, + 0x238543, + 0x23cac3, + 0x323043, + 0x231604, + 0x229443, + 0x208e83, + 0x201a03, + 0x21a902, + 0x200141, + 0x207102, + 0x200001, + 0x327e02, + 0x16fb88, + 0x224c85, + 0x2008c1, + 0x38543, + 0x201781, + 0x200301, + 0x200081, + 0x2ac602, + 0x37cc44, + 0x394483, + 0x200181, + 0x200401, + 0x200041, + 0x200101, + 0x2ea547, + 0x2ec54f, + 0x2fbc06, + 0x200281, + 0x33e906, + 0x200801, + 0x200981, + 0x306f8e, + 0x200441, + 0x201a03, + 0x204101, + 0x258885, + 0x20cb02, + 0x3a03c5, + 0x200341, + 0x200741, + 0x2002c1, + 0x205082, + 0x2000c1, + 0x200201, + 0x200c81, + 0x2005c1, + 0x204541, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x221f43, + 0x238543, + 0x323043, + 0x95d48, + 0x28cac3, + 0x208e83, + 0x31483, + 0x201a03, + 0x14eec08, + 0x16308, + 0x16fb88, + 0xe03, + 0x8e444, + 0x4ec04, + 0x14eec0a, + 0x16fb88, + 0x1a3443, + 0x238543, + 0x23cac3, + 0x323043, + 0x208e83, + 0x201a03, + 0x203ec3, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x2de944, + 0x201a03, + 0x22d585, + 0x35f2c4, + 0x238543, + 0x208e83, + 0x201a03, + 0x1f40a, + 0xf1844, + 0x118b06, + 0x20f882, + 0x238543, + 0x23adc9, + 0x23cac3, + 0x375449, + 0x323043, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x2f4348, + 0x22dc07, + 0x355685, + 0xb4c8, + 0x1bea07, + 0x2f78a, + 0x178ccb, + 0x13c507, + 0x4a4c8, + 0x14f64a, + 0x19dc8, + 0x1370c9, + 0x30507, + 0x742c7, + 0x19bf08, + 0x1788, + 0x4b04f, + 0x1c045, + 0x1a87, + 0x186c46, + 0x41287, + 0x4a786, + 0xe8888, + 0x96fc6, + 0x188847, + 0x178809, + 0x1bf307, + 0xd81c9, + 0xbcbc9, + 0xc6a06, + 0xc9148, + 0xc7845, + 0x57b0a, + 0xd30c8, + 0x187ec3, + 0xdad48, + 0x3ccc7, + 0x131f45, + 0x787d0, + 0x63c3, + 0x1a3443, + 0x125807, + 0x1cc85, + 0xf02c8, + 0xe385, + 0x10ea43, + 0x16d5c8, + 0x12906, + 0x198909, + 0xb2007, + 0x145f0b, + 0x180884, + 0x104f04, + 0x10f30b, + 0x10f8c8, + 0x110547, + 0x131645, + 0x238543, + 0x23cac3, + 0x21b583, + 0x201a03, + 0x20c743, + 0x323043, + 0x1a3443, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x15d4cb, + 0x207102, + 0x20f882, + 0x201a03, + 0x16fb88, + 0x207102, + 0x20f882, + 0x207b02, + 0x200942, + 0x20b302, + 0x208e83, + 0x200442, + 0x207102, + 0x39c783, + 0x20f882, + 0x238543, + 0x23cac3, + 0x207b02, + 0x323043, + 0x255783, + 0x28cac3, + 0x21bf84, + 0x208e83, + 0x21eb43, + 0x201a03, + 0x313e44, + 0x202443, + 0x323043, + 0x20f882, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0x200e03, + 0x201a03, + 0x3ad3c7, + 0x238543, + 0x282c07, + 0x2d7f86, + 0x20e583, + 0x207603, + 0x323043, + 0x204c03, + 0x231604, + 0x2d5204, + 0x30e706, + 0x20bd43, + 0x208e83, + 0x201a03, + 0x22d585, + 0x321704, + 0x350503, + 0x39b4c3, + 0x2cbd87, + 0x342d45, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x99807, + 0x203402, + 0x28f283, + 0x205403, + 0x39c783, + 0x65e38543, + 0x206902, + 0x23cac3, + 0x2020c3, + 0x323043, + 0x231604, + 0x3797c3, + 0x2e3b03, + 0x28cac3, + 0x21bf84, + 0x6620ea42, + 0x208e83, + 0x201a03, + 0x206683, + 0x22e603, + 0x21a902, + 0x202443, + 0x16fb88, + 0x323043, + 0x10c3, + 0x31f944, + 0x39c783, + 0x20f882, + 0x238543, + 0x240244, + 0x23cac3, + 0x323043, + 0x231604, + 0x255783, + 0x3a2e44, + 0x20f644, + 0x20c0c6, + 0x21bf84, + 0x208e83, + 0x201a03, + 0x221483, + 0x29f806, + 0x4504b, + 0x24386, + 0x3204a, + 0x112d0a, + 0x16fb88, + 0x214f44, + 0x67638543, + 0x39c744, + 0x23cac3, + 0x259004, + 0x323043, + 0x210543, + 0x28cac3, + 0x208e83, + 0x1a3443, + 0x201a03, + 0xbac3, + 0x3381cb, + 0x3af10a, + 0x3bf84c, + 0xe4288, + 0x207102, + 0x20f882, + 0x207b02, + 0x2b13c5, + 0x231604, + 0x204a82, + 0x28cac3, + 0x20f644, + 0x204d42, + 0x200442, + 0x20d2c2, + 0x21a902, + 0x19c783, + 0x35f42, + 0x2b3509, + 0x2f7148, + 0x351689, + 0x2410c9, + 0x350f0a, + 0x26080a, + 0x2127c2, + 0x222902, + 0xf882, + 0x238543, + 0x229682, + 0x24af46, + 0x369d02, + 0x206a42, + 0x37904e, + 0x2213ce, + 0x284b47, + 0x208e07, + 0x2ec8c2, + 0x23cac3, + 0x323043, + 0x200042, + 0x200942, + 0x31603, + 0x23980f, + 0x20b542, + 0x2dd887, + 0x2b4a87, + 0x2b7e87, + 0x31a4cc, + 0x2c448c, + 0x223984, + 0x285b0a, + 0x221302, + 0x209a02, + 0x2c0884, + 0x21f502, + 0x2ca102, + 0x2c46c4, + 0x21a602, + 0x200282, + 0x11a83, + 0x297047, + 0x2beb05, + 0x20d842, + 0x239784, + 0x382842, + 0x2e3008, + 0x208e83, + 0x203488, + 0x203cc2, + 0x223b45, + 0x38dbc6, + 0x201a03, + 0x207082, + 0x2f0ac7, + 0xcb02, + 0x2797c5, + 0x358b85, + 0x209642, + 0x20fd02, + 0x2cf9ca, + 0x26eeca, + 0x21b9c2, + 0x2a4dc4, + 0x2002c2, + 0x3b4ec8, + 0x20d582, + 0x315b08, + 0x30ab47, + 0x30ba09, + 0x203442, + 0x310e45, + 0x3044c5, + 0x21770b, + 0x2d054c, + 0x237348, + 0x321b08, + 0x232ec2, + 0x208a02, + 0x207102, + 0x16fb88, + 0x20f882, + 0x238543, + 0x207b02, + 0x204d42, + 0xe03, + 0x200442, + 0x201a03, + 0x20d2c2, + 0x207102, + 0x68a0f882, + 0x68f23043, + 0x211a83, + 0x204a82, + 0x208e83, + 0x391783, + 0x201a03, + 0x2ef783, + 0x37f186, + 0x1615443, + 0x16fb88, + 0x11205, + 0xae90d, + 0xacc8a, + 0x6e487, + 0x69601e02, + 0x69a00242, + 0x69e00bc2, + 0x6a200702, + 0x6a60b5c2, + 0x6aa01382, + 0x36dc7, + 0x6ae0f882, + 0x6b20c8c2, + 0x6b604842, + 0x6ba04c82, + 0x2213c3, + 0x18ec4, + 0x2298c3, + 0x6be1d882, + 0x6c200182, + 0x53c47, + 0x6c60a442, + 0x6ca00782, + 0x6ce01bc2, + 0x6d205e82, + 0x6d601c82, + 0x6da00942, + 0xc2845, + 0x23ef43, + 0x281a04, + 0x6de1f502, + 0x6e205242, + 0x6e603582, + 0x17d50b, + 0x6ea01fc2, + 0x6f253442, + 0x6f604a82, + 0x6fa0b302, + 0x6fe14702, + 0x70200802, + 0x70614642, + 0x70a745c2, + 0x70e0ea42, + 0x71204802, + 0x71604d42, + 0x71a03382, + 0x71e08682, + 0x7224d382, + 0x1a3284, + 0x35efc3, + 0x72604f82, + 0x72a10902, + 0x72e11542, + 0x73201f02, + 0x73600442, + 0x73a0cb42, + 0x15d647, + 0x73e04102, + 0x74204142, + 0x7460d2c2, + 0x74a21382, + 0x1ad70c, + 0x74e2a202, + 0x75245542, + 0x75605942, + 0x75a06442, + 0x75e0c402, + 0x76260982, + 0x76600202, + 0x76a16fc2, + 0x76e7d302, + 0x772610c2, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x12143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x6ef797c3, + 0x212143, + 0x332244, + 0x2f7046, + 0x2f9a03, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x244949, + 0x235f42, + 0x26c783, + 0x2bcec3, + 0x20fbc5, + 0x2020c3, + 0x3797c3, + 0x212143, + 0x20c0c3, + 0x248d43, + 0x242989, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x3797c3, + 0x212143, + 0x235f42, + 0x235f42, + 0x3797c3, + 0x212143, + 0x77a38543, + 0x23cac3, + 0x20a6c3, + 0x28cac3, + 0x208e83, + 0xe03, + 0x201a03, + 0x16fb88, + 0x20f882, + 0x238543, + 0x208e83, + 0x201a03, + 0x238543, + 0x23cac3, + 0x323043, + 0x28cac3, + 0x208e83, + 0xe03, + 0x201a03, + 0x252044, + 0x20f882, + 0x238543, + 0x345903, + 0x23cac3, + 0x253384, + 0x21b583, + 0x323043, + 0x231604, + 0x255783, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x20c843, + 0x355685, + 0x248d43, + 0x202443, + 0xe03, + 0x20f882, + 0x238543, + 0x3797c3, + 0x208e83, + 0x201a03, + 0x207102, + 0x39c783, + 0x16fb88, + 0x238543, + 0x23cac3, + 0x323043, + 0x23a1c6, + 0x231604, + 0x255783, + 0x21bf84, + 0x208e83, + 0x201a03, + 0x221483, + 0x238543, + 0x23cac3, + 0x208e83, + 0x201a03, + 0x1442047, + 0x238543, + 0x24386, + 0x23cac3, + 0x323043, + 0xe5586, + 0x208e83, + 0x201a03, + 0x31dc48, + 0x321949, + 0x330189, + 0x33bb08, + 0x38fb48, + 0x38fb49, + 0x24558d, + 0x24dd8f, + 0x2f53d0, + 0x35648d, + 0x37210c, + 0x39064b, + 0xba9c8, + 0xac605, + 0x207102, + 0x342b85, + 0x200243, + 0x7ae0f882, + 0x23cac3, + 0x323043, + 0x2d8c47, + 0x263a43, + 0x28cac3, + 0x208e83, + 0x21b543, + 0x217e03, + 0x200e03, + 0x201a03, + 0x3821c6, + 0x205082, + 0x202443, + 0x16fb88, + 0x207102, + 0x39c783, + 0x20f882, + 0x238543, + 0x23cac3, + 0x323043, + 0x231604, + 0x28cac3, + 0x208e83, + 0x201a03, + 0x215443, + 0x106904, + 0x15217c6, + 0x207102, + 0x20f882, + 0x323043, + 0x28cac3, + 0x201a03, +} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [ 1 bits] unused +// [ 1 bits] wildcard bit +// [ 2 bits] node type +// [14 bits] high nodes index (exclusive) of children +// [14 bits] low nodes index (inclusive) of children +var children = [...]uint32{ + 0x0, + 0x10000000, + 0x20000000, + 0x40000000, + 0x50000000, + 0x60000000, + 0x186c615, + 0x187061b, + 0x189461c, + 0x19f0625, + 0x1a0467c, + 0x1a18681, + 0x1a2c686, + 0x1a4c68b, + 0x1a50693, + 0x1a68694, + 0x1a9069a, + 0x1a946a4, + 0x1aac6a5, + 0x1ab06ab, + 0x1ab46ac, + 0x1af06ad, + 0x1af46bc, + 0x21afc6bd, + 0x1b446bf, + 0x1b486d1, + 0x1b686d2, + 0x1b7c6da, + 0x1b806df, + 0x1bb06e0, + 0x1bcc6ec, + 0x1bf46f3, + 0x1c006fd, + 0x1c04700, + 0x1c9c701, + 0x1cb0727, + 0x1cc472c, + 0x1cf4731, + 0x1d0473d, + 0x1d18741, + 0x1d3c746, + 0x1e7474f, + 0x1e7879d, + 0x1ee479e, + 0x1f507b9, + 0x1f687d4, + 0x1f7c7da, + 0x1f847df, + 0x1f987e1, + 0x1f9c7e6, + 0x1fb87e7, + 0x20047ee, + 0x2020801, + 0x2024808, + 0x2028809, + 0x204480a, + 0x2080811, + 0x62084820, + 0x209c821, + 0x20b4827, + 0x20b882d, + 0x20c882e, + 0x2178832, + 0x217c85e, + 0x2218c85f, + 0x22190863, + 0x22194864, + 0x21cc865, + 0x21d0873, + 0x2658874, + 0x226f8996, + 0x226fc9be, + 0x227009bf, + 0x2270c9c0, + 0x227109c3, + 0x2271c9c4, + 0x227209c7, + 0x227249c8, + 0x227289c9, + 0x2272c9ca, + 0x227309cb, + 0x2273c9cc, + 0x227409cf, + 0x2274c9d0, + 0x227509d3, + 0x227549d4, + 0x227589d5, + 0x227649d6, + 0x227689d9, + 0x2276c9da, + 0x227709db, + 0x27749dc, + 0x227789dd, + 0x227849de, + 0x227889e1, + 0x27909e2, + 0x27cc9e4, + 0x227ec9f3, + 0x227f09fb, + 0x227f49fc, + 0x27f89fd, + 0x227fc9fe, + 0x28009ff, + 0x281ca00, + 0x2834a07, + 0x2838a0d, + 0x2848a0e, + 0x2854a12, + 0x2888a15, + 0x288ca22, + 0x28a0a23, + 0x228a8a28, + 0x2968a2a, + 0x2296ca5a, + 0x2974a5b, + 0x2978a5d, + 0x2990a5e, + 0x29a4a64, + 0x29cca69, + 0x29eca73, + 0x2a1ca7b, + 0x2a44a87, + 0x2a48a91, + 0x2a6ca92, + 0x2a70a9b, + 0x2a84a9c, + 0x2a88aa1, + 0x2a8caa2, + 0x2aacaa3, + 0x2ac8aab, + 0x2accab2, + 0x22ad0ab3, + 0x2ad4ab4, + 0x2ad8ab5, + 0x2ae8ab6, + 0x2aecaba, + 0x2b64abb, + 0x2b68ad9, + 0x2b84ada, + 0x2b94ae1, + 0x2ba8ae5, + 0x2bc0aea, + 0x2bd8af0, + 0x2bf0af6, + 0x2bf4afc, + 0x2c0cafd, + 0x2c28b03, + 0x2c48b0a, + 0x2c60b12, + 0x2cc0b18, + 0x2cdcb30, + 0x2ce4b37, + 0x2ce8b39, + 0x2cfcb3a, + 0x2d40b3f, + 0x2dc0b50, + 0x2decb70, + 0x2df0b7b, + 0x2df8b7c, + 0x2e18b7e, + 0x2e1cb86, + 0x2e40b87, + 0x2e48b90, + 0x2e84b92, + 0x2ec8ba1, + 0x2eccbb2, + 0x2f34bb3, + 0x2f38bcd, + 0x22f3cbce, + 0x22f40bcf, + 0x22f50bd0, + 0x22f54bd4, + 0x22f58bd5, + 0x22f5cbd6, + 0x22f60bd7, + 0x2f78bd8, + 0x2f9cbde, + 0x2fbcbe7, + 0x3580bef, + 0x358cd60, + 0x35acd63, + 0x3768d6b, + 0x3838dda, + 0x38a8e0e, + 0x3900e2a, + 0x39e8e40, + 0x3a40e7a, + 0x3a7ce90, + 0x3b78e9f, + 0x3c44ede, + 0x3cdcf11, + 0x3d6cf37, + 0x3dd0f5b, + 0x4008f74, + 0x40c1002, + 0x418d030, + 0x41d9063, + 0x4261076, + 0x429d098, + 0x42ed0a7, + 0x43650bb, + 0x643690d9, + 0x6436d0da, + 0x643710db, + 0x43ed0dc, + 0x44490fb, + 0x44c5112, + 0x453d131, + 0x45bd14f, + 0x462916f, + 0x475518a, + 0x47ad1d5, + 0x647b11eb, + 0x48491ec, + 0x48d1212, + 0x491d234, + 0x4985247, + 0x4a2d261, + 0x4af528b, + 0x4b5d2bd, + 0x4c712d7, + 0x64c7531c, + 0x64c7931d, + 0x4cd531e, + 0x4d31335, + 0x4dc134c, + 0x4e3d370, + 0x4e8138f, + 0x4f653a0, + 0x4f993d9, + 0x4ff93e6, + 0x506d3fe, + 0x50f541b, + 0x513543d, + 0x51a544d, + 0x651a9469, + 0x651ad46a, + 0x251b146b, + 0x51c946c, + 0x51e5472, + 0x5229479, + 0x523948a, + 0x525148e, + 0x52c9494, + 0x52d14b2, + 0x52e54b4, + 0x53014b9, + 0x532d4c0, + 0x53314cb, + 0x53394cc, + 0x534d4ce, + 0x53694d3, + 0x53754da, + 0x537d4dd, + 0x53b94df, + 0x53cd4ee, + 0x53d54f3, + 0x53e14f5, + 0x53e94f8, + 0x540d4fa, + 0x5431503, + 0x544950c, + 0x544d512, + 0x5455513, + 0x5459515, + 0x54c1516, + 0x54c5530, + 0x54e9531, + 0x550d53a, + 0x5529543, + 0x553954a, + 0x554d54e, + 0x5551553, + 0x5559554, + 0x556d556, + 0x557d55b, + 0x558155f, + 0x559d560, + 0x5e2d567, + 0x5e6578b, + 0x5e91799, + 0x5ead7a4, + 0x5ecd7ab, + 0x5eed7b3, + 0x5f317bb, + 0x5f397cc, + 0x25f3d7ce, + 0x25f417cf, + 0x5f497d0, + 0x60c17d2, + 0x260c5830, + 0x260d5831, + 0x260dd835, + 0x260e9837, + 0x60ed83a, + 0x60f183b, + 0x611983c, + 0x6141846, + 0x6145850, + 0x617d851, + 0x619985f, + 0x6cf1866, + 0x6cf5b3c, + 0x6cf9b3d, + 0x26cfdb3e, + 0x6d01b3f, + 0x26d05b40, + 0x6d09b41, + 0x26d15b42, + 0x6d19b45, + 0x6d1db46, + 0x26d21b47, + 0x6d25b48, + 0x26d2db49, + 0x6d31b4b, + 0x6d35b4c, + 0x26d45b4d, + 0x6d49b51, + 0x6d4db52, + 0x6d51b53, + 0x6d55b54, + 0x26d59b55, + 0x6d5db56, + 0x6d61b57, + 0x6d65b58, + 0x6d69b59, + 0x26d71b5a, + 0x6d75b5c, + 0x6d79b5d, + 0x6d7db5e, + 0x26d81b5f, + 0x6d85b60, + 0x26d8db61, + 0x26d91b63, + 0x6dadb64, + 0x6dbdb6b, + 0x6e01b6f, + 0x6e05b80, + 0x6e29b81, + 0x6e2db8a, + 0x6e31b8b, + 0x6fbdb8c, + 0x26fc1bef, + 0x26fc9bf0, + 0x26fcdbf2, + 0x26fd1bf3, + 0x6fd9bf4, + 0x70b5bf6, + 0x270b9c2d, + 0x70bdc2e, + 0x70e9c2f, + 0x70edc3a, + 0x7111c3b, + 0x711dc44, + 0x713dc47, + 0x7141c4f, + 0x7179c50, + 0x7411c5e, + 0x74cdd04, + 0x74e1d33, + 0x7515d38, + 0x7545d45, + 0x7561d51, + 0x7589d58, + 0x75a9d62, + 0x75c5d6a, + 0x75edd71, + 0x75fdd7b, + 0x7601d7f, + 0x7605d80, + 0x7639d81, + 0x7645d8e, + 0x7665d91, + 0x76ddd99, + 0x276e1db7, + 0x7705db8, + 0x7725dc1, + 0x7739dc9, + 0x774ddce, + 0x7751dd3, + 0x7771dd4, + 0x7815ddc, + 0x7831e05, + 0x7855e0c, + 0x785de15, + 0x7869e17, + 0x7871e1a, + 0x7885e1c, + 0x78a5e21, + 0x78b1e29, + 0x78bde2c, + 0x78ede2f, + 0x79c1e3b, + 0x79c5e70, + 0x79d9e71, + 0x79e1e76, + 0x79f9e78, + 0x79fde7e, + 0x7a09e7f, + 0x7a0de82, + 0x7a29e83, + 0x7a65e8a, + 0x7a69e99, + 0x7a89e9a, + 0x7ad9ea2, + 0x7af5eb6, + 0x7b49ebd, + 0x7b4ded2, + 0x7b51ed3, + 0x7b55ed4, + 0x7b99ed5, + 0x7ba9ee6, + 0x7be9eea, + 0x7bedefa, + 0x7c1defb, + 0x7d65f07, + 0x7d8df59, + 0x7db9f63, + 0x7dc5f6e, + 0x7dcdf71, + 0x7eddf73, + 0x7ee9fb7, + 0x7ef5fba, + 0x7f01fbd, + 0x7f0dfc0, + 0x7f19fc3, + 0x7f25fc6, + 0x7f31fc9, + 0x7f3dfcc, + 0x7f49fcf, + 0x7f55fd2, + 0x7f61fd5, + 0x7f6dfd8, + 0x7f79fdb, + 0x7f81fde, + 0x7f8dfe0, + 0x7f99fe3, + 0x7fa5fe6, + 0x7fb1fe9, + 0x7fbdfec, + 0x7fc9fef, + 0x7fd5ff2, + 0x7fe1ff5, + 0x7fedff8, + 0x7ff9ffb, + 0x8005ffe, + 0x8032001, + 0x803e00c, + 0x804a00f, + 0x8056012, + 0x8062015, + 0x806e018, + 0x807601b, + 0x808201d, + 0x808e020, + 0x809a023, + 0x80a6026, + 0x80b2029, + 0x80be02c, + 0x80ca02f, + 0x80d6032, + 0x80e2035, + 0x80ee038, + 0x80fa03b, + 0x810603e, + 0x8112041, + 0x811a044, + 0x8126046, + 0x8132049, + 0x813e04c, + 0x814a04f, + 0x8156052, + 0x8162055, + 0x816e058, + 0x817a05b, + 0x817e05e, + 0x818a05f, + 0x81a6062, + 0x81aa069, + 0x81ba06a, + 0x81d606e, + 0x821a075, + 0x821e086, + 0x8232087, + 0x826608c, + 0x8276099, + 0x829609d, + 0x82ae0a5, + 0x82c60ab, + 0x82ce0b1, + 0x283120b3, + 0x83160c4, + 0x83420c5, + 0x834a0d0, + 0x835e0d2, +} + +// max children 494 (capacity 1023) +// max text offset 28750 (capacity 32767) +// max text length 36 (capacity 63) +// max hi 8407 (capacity 16383) +// max lo 8402 (capacity 16383) diff --git a/vendor/golang.org/x/net/publicsuffix/table_test.go b/vendor/golang.org/x/net/publicsuffix/table_test.go new file mode 100644 index 0000000..6261018 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/table_test.go @@ -0,0 +1,16756 @@ +// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +var rules = [...]string{ + "ac", + "com.ac", + "edu.ac", + "gov.ac", + "net.ac", + "mil.ac", + "org.ac", + "ad", + "nom.ad", + "ae", + "co.ae", + "net.ae", + "org.ae", + "sch.ae", + "ac.ae", + "gov.ae", + "mil.ae", + "aero", + "accident-investigation.aero", + "accident-prevention.aero", + "aerobatic.aero", + "aeroclub.aero", + "aerodrome.aero", + "agents.aero", + "aircraft.aero", + "airline.aero", + "airport.aero", + "air-surveillance.aero", + "airtraffic.aero", + "air-traffic-control.aero", + "ambulance.aero", + "amusement.aero", + "association.aero", + "author.aero", + "ballooning.aero", + "broker.aero", + "caa.aero", + "cargo.aero", + "catering.aero", + "certification.aero", + "championship.aero", + "charter.aero", + "civilaviation.aero", + "club.aero", + "conference.aero", + "consultant.aero", + "consulting.aero", + "control.aero", + "council.aero", + "crew.aero", + "design.aero", + "dgca.aero", + "educator.aero", + "emergency.aero", + "engine.aero", + "engineer.aero", + "entertainment.aero", + "equipment.aero", + "exchange.aero", + "express.aero", + "federation.aero", + "flight.aero", + "freight.aero", + "fuel.aero", + "gliding.aero", + "government.aero", + "groundhandling.aero", + "group.aero", + "hanggliding.aero", + "homebuilt.aero", + "insurance.aero", + "journal.aero", + "journalist.aero", + "leasing.aero", + "logistics.aero", + "magazine.aero", + "maintenance.aero", + "media.aero", + "microlight.aero", + "modelling.aero", + "navigation.aero", + "parachuting.aero", + "paragliding.aero", + "passenger-association.aero", + "pilot.aero", + "press.aero", + "production.aero", + "recreation.aero", + "repbody.aero", + "res.aero", + "research.aero", + "rotorcraft.aero", + "safety.aero", + "scientist.aero", + "services.aero", + "show.aero", + "skydiving.aero", + "software.aero", + "student.aero", + "trader.aero", + "trading.aero", + "trainer.aero", + "union.aero", + "workinggroup.aero", + "works.aero", + "af", + "gov.af", + "com.af", + "org.af", + "net.af", + "edu.af", + "ag", + "com.ag", + "org.ag", + "net.ag", + "co.ag", + "nom.ag", + "ai", + "off.ai", + "com.ai", + "net.ai", + "org.ai", + "al", + "com.al", + "edu.al", + "gov.al", + "mil.al", + "net.al", + "org.al", + "am", + "ao", + "ed.ao", + "gv.ao", + "og.ao", + "co.ao", + "pb.ao", + "it.ao", + "aq", + "ar", + "com.ar", + "edu.ar", + "gob.ar", + "gov.ar", + "int.ar", + "mil.ar", + "musica.ar", + "net.ar", + "org.ar", + "tur.ar", + "arpa", + "e164.arpa", + "in-addr.arpa", + "ip6.arpa", + "iris.arpa", + "uri.arpa", + "urn.arpa", + "as", + "gov.as", + "asia", + "at", + "ac.at", + "co.at", + "gv.at", + "or.at", + "au", + "com.au", + "net.au", + "org.au", + "edu.au", + "gov.au", + "asn.au", + "id.au", + "info.au", + "conf.au", + "oz.au", + "act.au", + "nsw.au", + "nt.au", + "qld.au", + "sa.au", + "tas.au", + "vic.au", + "wa.au", + "act.edu.au", + "nsw.edu.au", + "nt.edu.au", + "qld.edu.au", + "sa.edu.au", + "tas.edu.au", + "vic.edu.au", + "wa.edu.au", + "qld.gov.au", + "sa.gov.au", + "tas.gov.au", + "vic.gov.au", + "wa.gov.au", + "aw", + "com.aw", + "ax", + "az", + "com.az", + "net.az", + "int.az", + "gov.az", + "org.az", + "edu.az", + "info.az", + "pp.az", + "mil.az", + "name.az", + "pro.az", + "biz.az", + "ba", + "com.ba", + "edu.ba", + "gov.ba", + "mil.ba", + "net.ba", + "org.ba", + "bb", + "biz.bb", + "co.bb", + "com.bb", + "edu.bb", + "gov.bb", + "info.bb", + "net.bb", + "org.bb", + "store.bb", + "tv.bb", + "*.bd", + "be", + "ac.be", + "bf", + "gov.bf", + "bg", + "a.bg", + "b.bg", + "c.bg", + "d.bg", + "e.bg", + "f.bg", + "g.bg", + "h.bg", + "i.bg", + "j.bg", + "k.bg", + "l.bg", + "m.bg", + "n.bg", + "o.bg", + "p.bg", + "q.bg", + "r.bg", + "s.bg", + "t.bg", + "u.bg", + "v.bg", + "w.bg", + "x.bg", + "y.bg", + "z.bg", + "0.bg", + "1.bg", + "2.bg", + "3.bg", + "4.bg", + "5.bg", + "6.bg", + "7.bg", + "8.bg", + "9.bg", + "bh", + "com.bh", + "edu.bh", + "net.bh", + "org.bh", + "gov.bh", + "bi", + "co.bi", + "com.bi", + "edu.bi", + "or.bi", + "org.bi", + "biz", + "bj", + "asso.bj", + "barreau.bj", + "gouv.bj", + "bm", + "com.bm", + "edu.bm", + "gov.bm", + "net.bm", + "org.bm", + "*.bn", + "bo", + "com.bo", + "edu.bo", + "gov.bo", + "gob.bo", + "int.bo", + "org.bo", + "net.bo", + "mil.bo", + "tv.bo", + "br", + "adm.br", + "adv.br", + "agr.br", + "am.br", + "arq.br", + "art.br", + "ato.br", + "b.br", + "belem.br", + "bio.br", + "blog.br", + "bmd.br", + "cim.br", + "cng.br", + "cnt.br", + "com.br", + "coop.br", + "cri.br", + "def.br", + "ecn.br", + "eco.br", + "edu.br", + "emp.br", + "eng.br", + "esp.br", + "etc.br", + "eti.br", + "far.br", + "flog.br", + "floripa.br", + "fm.br", + "fnd.br", + "fot.br", + "fst.br", + "g12.br", + "ggf.br", + "gov.br", + "ac.gov.br", + "al.gov.br", + "am.gov.br", + "ap.gov.br", + "ba.gov.br", + "ce.gov.br", + "df.gov.br", + "es.gov.br", + "go.gov.br", + "ma.gov.br", + "mg.gov.br", + "ms.gov.br", + "mt.gov.br", + "pa.gov.br", + "pb.gov.br", + "pe.gov.br", + "pi.gov.br", + "pr.gov.br", + "rj.gov.br", + "rn.gov.br", + "ro.gov.br", + "rr.gov.br", + "rs.gov.br", + "sc.gov.br", + "se.gov.br", + "sp.gov.br", + "to.gov.br", + "imb.br", + "ind.br", + "inf.br", + "jampa.br", + "jor.br", + "jus.br", + "leg.br", + "lel.br", + "mat.br", + "med.br", + "mil.br", + "mp.br", + "mus.br", + "net.br", + "*.nom.br", + "not.br", + "ntr.br", + "odo.br", + "org.br", + "poa.br", + "ppg.br", + "pro.br", + "psc.br", + "psi.br", + "qsl.br", + "radio.br", + "rec.br", + "recife.br", + "slg.br", + "srv.br", + "taxi.br", + "teo.br", + "tmp.br", + "trd.br", + "tur.br", + "tv.br", + "vet.br", + "vix.br", + "vlog.br", + "wiki.br", + "zlg.br", + "bs", + "com.bs", + "net.bs", + "org.bs", + "edu.bs", + "gov.bs", + "bt", + "com.bt", + "edu.bt", + "gov.bt", + "net.bt", + "org.bt", + "bv", + "bw", + "co.bw", + "org.bw", + "by", + "gov.by", + "mil.by", + "com.by", + "of.by", + "bz", + "com.bz", + "net.bz", + "org.bz", + "edu.bz", + "gov.bz", + "ca", + "ab.ca", + "bc.ca", + "mb.ca", + "nb.ca", + "nf.ca", + "nl.ca", + "ns.ca", + "nt.ca", + "nu.ca", + "on.ca", + "pe.ca", + "qc.ca", + "sk.ca", + "yk.ca", + "gc.ca", + "cat", + "cc", + "cd", + "gov.cd", + "cf", + "cg", + "ch", + "ci", + "org.ci", + "or.ci", + "com.ci", + "co.ci", + "edu.ci", + "ed.ci", + "ac.ci", + "net.ci", + "go.ci", + "asso.ci", + "xn--aroport-bya.ci", + "int.ci", + "presse.ci", + "md.ci", + "gouv.ci", + "*.ck", + "!www.ck", + "cl", + "gov.cl", + "gob.cl", + "co.cl", + "mil.cl", + "cm", + "co.cm", + "com.cm", + "gov.cm", + "net.cm", + "cn", + "ac.cn", + "com.cn", + "edu.cn", + "gov.cn", + "net.cn", + "org.cn", + "mil.cn", + "xn--55qx5d.cn", + "xn--io0a7i.cn", + "xn--od0alg.cn", + "ah.cn", + "bj.cn", + "cq.cn", + "fj.cn", + "gd.cn", + "gs.cn", + "gz.cn", + "gx.cn", + "ha.cn", + "hb.cn", + "he.cn", + "hi.cn", + "hl.cn", + "hn.cn", + "jl.cn", + "js.cn", + "jx.cn", + "ln.cn", + "nm.cn", + "nx.cn", + "qh.cn", + "sc.cn", + "sd.cn", + "sh.cn", + "sn.cn", + "sx.cn", + "tj.cn", + "xj.cn", + "xz.cn", + "yn.cn", + "zj.cn", + "hk.cn", + "mo.cn", + "tw.cn", + "co", + "arts.co", + "com.co", + "edu.co", + "firm.co", + "gov.co", + "info.co", + "int.co", + "mil.co", + "net.co", + "nom.co", + "org.co", + "rec.co", + "web.co", + "com", + "coop", + "cr", + "ac.cr", + "co.cr", + "ed.cr", + "fi.cr", + "go.cr", + "or.cr", + "sa.cr", + "cu", + "com.cu", + "edu.cu", + "org.cu", + "net.cu", + "gov.cu", + "inf.cu", + "cv", + "cw", + "com.cw", + "edu.cw", + "net.cw", + "org.cw", + "cx", + "gov.cx", + "cy", + "ac.cy", + "biz.cy", + "com.cy", + "ekloges.cy", + "gov.cy", + "ltd.cy", + "name.cy", + "net.cy", + "org.cy", + "parliament.cy", + "press.cy", + "pro.cy", + "tm.cy", + "cz", + "de", + "dj", + "dk", + "dm", + "com.dm", + "net.dm", + "org.dm", + "edu.dm", + "gov.dm", + "do", + "art.do", + "com.do", + "edu.do", + "gob.do", + "gov.do", + "mil.do", + "net.do", + "org.do", + "sld.do", + "web.do", + "dz", + "com.dz", + "org.dz", + "net.dz", + "gov.dz", + "edu.dz", + "asso.dz", + "pol.dz", + "art.dz", + "ec", + "com.ec", + "info.ec", + "net.ec", + "fin.ec", + "k12.ec", + "med.ec", + "pro.ec", + "org.ec", + "edu.ec", + "gov.ec", + "gob.ec", + "mil.ec", + "edu", + "ee", + "edu.ee", + "gov.ee", + "riik.ee", + "lib.ee", + "med.ee", + "com.ee", + "pri.ee", + "aip.ee", + "org.ee", + "fie.ee", + "eg", + "com.eg", + "edu.eg", + "eun.eg", + "gov.eg", + "mil.eg", + "name.eg", + "net.eg", + "org.eg", + "sci.eg", + "*.er", + "es", + "com.es", + "nom.es", + "org.es", + "gob.es", + "edu.es", + "et", + "com.et", + "gov.et", + "org.et", + "edu.et", + "biz.et", + "name.et", + "info.et", + "net.et", + "eu", + "fi", + "aland.fi", + "*.fj", + "*.fk", + "fm", + "fo", + "fr", + "com.fr", + "asso.fr", + "nom.fr", + "prd.fr", + "presse.fr", + "tm.fr", + "aeroport.fr", + "assedic.fr", + "avocat.fr", + "avoues.fr", + "cci.fr", + "chambagri.fr", + "chirurgiens-dentistes.fr", + "experts-comptables.fr", + "geometre-expert.fr", + "gouv.fr", + "greta.fr", + "huissier-justice.fr", + "medecin.fr", + "notaires.fr", + "pharmacien.fr", + "port.fr", + "veterinaire.fr", + "ga", + "gb", + "gd", + "ge", + "com.ge", + "edu.ge", + "gov.ge", + "org.ge", + "mil.ge", + "net.ge", + "pvt.ge", + "gf", + "gg", + "co.gg", + "net.gg", + "org.gg", + "gh", + "com.gh", + "edu.gh", + "gov.gh", + "org.gh", + "mil.gh", + "gi", + "com.gi", + "ltd.gi", + "gov.gi", + "mod.gi", + "edu.gi", + "org.gi", + "gl", + "co.gl", + "com.gl", + "edu.gl", + "net.gl", + "org.gl", + "gm", + "gn", + "ac.gn", + "com.gn", + "edu.gn", + "gov.gn", + "org.gn", + "net.gn", + "gov", + "gp", + "com.gp", + "net.gp", + "mobi.gp", + "edu.gp", + "org.gp", + "asso.gp", + "gq", + "gr", + "com.gr", + "edu.gr", + "net.gr", + "org.gr", + "gov.gr", + "gs", + "gt", + "com.gt", + "edu.gt", + "gob.gt", + "ind.gt", + "mil.gt", + "net.gt", + "org.gt", + "*.gu", + "gw", + "gy", + "co.gy", + "com.gy", + "edu.gy", + "gov.gy", + "net.gy", + "org.gy", + "hk", + "com.hk", + "edu.hk", + "gov.hk", + "idv.hk", + "net.hk", + "org.hk", + "xn--55qx5d.hk", + "xn--wcvs22d.hk", + "xn--lcvr32d.hk", + "xn--mxtq1m.hk", + "xn--gmqw5a.hk", + "xn--ciqpn.hk", + "xn--gmq050i.hk", + "xn--zf0avx.hk", + "xn--io0a7i.hk", + "xn--mk0axi.hk", + "xn--od0alg.hk", + "xn--od0aq3b.hk", + "xn--tn0ag.hk", + "xn--uc0atv.hk", + "xn--uc0ay4a.hk", + "hm", + "hn", + "com.hn", + "edu.hn", + "org.hn", + "net.hn", + "mil.hn", + "gob.hn", + "hr", + "iz.hr", + "from.hr", + "name.hr", + "com.hr", + "ht", + "com.ht", + "shop.ht", + "firm.ht", + "info.ht", + "adult.ht", + "net.ht", + "pro.ht", + "org.ht", + "med.ht", + "art.ht", + "coop.ht", + "pol.ht", + "asso.ht", + "edu.ht", + "rel.ht", + "gouv.ht", + "perso.ht", + "hu", + "co.hu", + "info.hu", + "org.hu", + "priv.hu", + "sport.hu", + "tm.hu", + "2000.hu", + "agrar.hu", + "bolt.hu", + "casino.hu", + "city.hu", + "erotica.hu", + "erotika.hu", + "film.hu", + "forum.hu", + "games.hu", + "hotel.hu", + "ingatlan.hu", + "jogasz.hu", + "konyvelo.hu", + "lakas.hu", + "media.hu", + "news.hu", + "reklam.hu", + "sex.hu", + "shop.hu", + "suli.hu", + "szex.hu", + "tozsde.hu", + "utazas.hu", + "video.hu", + "id", + "ac.id", + "biz.id", + "co.id", + "desa.id", + "go.id", + "mil.id", + "my.id", + "net.id", + "or.id", + "sch.id", + "web.id", + "ie", + "gov.ie", + "il", + "ac.il", + "co.il", + "gov.il", + "idf.il", + "k12.il", + "muni.il", + "net.il", + "org.il", + "im", + "ac.im", + "co.im", + "com.im", + "ltd.co.im", + "net.im", + "org.im", + "plc.co.im", + "tt.im", + "tv.im", + "in", + "co.in", + "firm.in", + "net.in", + "org.in", + "gen.in", + "ind.in", + "nic.in", + "ac.in", + "edu.in", + "res.in", + "gov.in", + "mil.in", + "info", + "int", + "eu.int", + "io", + "com.io", + "iq", + "gov.iq", + "edu.iq", + "mil.iq", + "com.iq", + "org.iq", + "net.iq", + "ir", + "ac.ir", + "co.ir", + "gov.ir", + "id.ir", + "net.ir", + "org.ir", + "sch.ir", + "xn--mgba3a4f16a.ir", + "xn--mgba3a4fra.ir", + "is", + "net.is", + "com.is", + "edu.is", + "gov.is", + "org.is", + "int.is", + "it", + "gov.it", + "edu.it", + "abr.it", + "abruzzo.it", + "aosta-valley.it", + "aostavalley.it", + "bas.it", + "basilicata.it", + "cal.it", + "calabria.it", + "cam.it", + "campania.it", + "emilia-romagna.it", + "emiliaromagna.it", + "emr.it", + "friuli-v-giulia.it", + "friuli-ve-giulia.it", + "friuli-vegiulia.it", + "friuli-venezia-giulia.it", + "friuli-veneziagiulia.it", + "friuli-vgiulia.it", + "friuliv-giulia.it", + "friulive-giulia.it", + "friulivegiulia.it", + "friulivenezia-giulia.it", + "friuliveneziagiulia.it", + "friulivgiulia.it", + "fvg.it", + "laz.it", + "lazio.it", + "lig.it", + "liguria.it", + "lom.it", + "lombardia.it", + "lombardy.it", + "lucania.it", + "mar.it", + "marche.it", + "mol.it", + "molise.it", + "piedmont.it", + "piemonte.it", + "pmn.it", + "pug.it", + "puglia.it", + "sar.it", + "sardegna.it", + "sardinia.it", + "sic.it", + "sicilia.it", + "sicily.it", + "taa.it", + "tos.it", + "toscana.it", + "trentino-a-adige.it", + "trentino-aadige.it", + "trentino-alto-adige.it", + "trentino-altoadige.it", + "trentino-s-tirol.it", + "trentino-stirol.it", + "trentino-sud-tirol.it", + "trentino-sudtirol.it", + "trentino-sued-tirol.it", + "trentino-suedtirol.it", + "trentinoa-adige.it", + "trentinoaadige.it", + "trentinoalto-adige.it", + "trentinoaltoadige.it", + "trentinos-tirol.it", + "trentinostirol.it", + "trentinosud-tirol.it", + "trentinosudtirol.it", + "trentinosued-tirol.it", + "trentinosuedtirol.it", + "tuscany.it", + "umb.it", + "umbria.it", + "val-d-aosta.it", + "val-daosta.it", + "vald-aosta.it", + "valdaosta.it", + "valle-aosta.it", + "valle-d-aosta.it", + "valle-daosta.it", + "valleaosta.it", + "valled-aosta.it", + "valledaosta.it", + "vallee-aoste.it", + "valleeaoste.it", + "vao.it", + "vda.it", + "ven.it", + "veneto.it", + "ag.it", + "agrigento.it", + "al.it", + "alessandria.it", + "alto-adige.it", + "altoadige.it", + "an.it", + "ancona.it", + "andria-barletta-trani.it", + "andria-trani-barletta.it", + "andriabarlettatrani.it", + "andriatranibarletta.it", + "ao.it", + "aosta.it", + "aoste.it", + "ap.it", + "aq.it", + "aquila.it", + "ar.it", + "arezzo.it", + "ascoli-piceno.it", + "ascolipiceno.it", + "asti.it", + "at.it", + "av.it", + "avellino.it", + "ba.it", + "balsan.it", + "bari.it", + "barletta-trani-andria.it", + "barlettatraniandria.it", + "belluno.it", + "benevento.it", + "bergamo.it", + "bg.it", + "bi.it", + "biella.it", + "bl.it", + "bn.it", + "bo.it", + "bologna.it", + "bolzano.it", + "bozen.it", + "br.it", + "brescia.it", + "brindisi.it", + "bs.it", + "bt.it", + "bz.it", + "ca.it", + "cagliari.it", + "caltanissetta.it", + "campidano-medio.it", + "campidanomedio.it", + "campobasso.it", + "carbonia-iglesias.it", + "carboniaiglesias.it", + "carrara-massa.it", + "carraramassa.it", + "caserta.it", + "catania.it", + "catanzaro.it", + "cb.it", + "ce.it", + "cesena-forli.it", + "cesenaforli.it", + "ch.it", + "chieti.it", + "ci.it", + "cl.it", + "cn.it", + "co.it", + "como.it", + "cosenza.it", + "cr.it", + "cremona.it", + "crotone.it", + "cs.it", + "ct.it", + "cuneo.it", + "cz.it", + "dell-ogliastra.it", + "dellogliastra.it", + "en.it", + "enna.it", + "fc.it", + "fe.it", + "fermo.it", + "ferrara.it", + "fg.it", + "fi.it", + "firenze.it", + "florence.it", + "fm.it", + "foggia.it", + "forli-cesena.it", + "forlicesena.it", + "fr.it", + "frosinone.it", + "ge.it", + "genoa.it", + "genova.it", + "go.it", + "gorizia.it", + "gr.it", + "grosseto.it", + "iglesias-carbonia.it", + "iglesiascarbonia.it", + "im.it", + "imperia.it", + "is.it", + "isernia.it", + "kr.it", + "la-spezia.it", + "laquila.it", + "laspezia.it", + "latina.it", + "lc.it", + "le.it", + "lecce.it", + "lecco.it", + "li.it", + "livorno.it", + "lo.it", + "lodi.it", + "lt.it", + "lu.it", + "lucca.it", + "macerata.it", + "mantova.it", + "massa-carrara.it", + "massacarrara.it", + "matera.it", + "mb.it", + "mc.it", + "me.it", + "medio-campidano.it", + "mediocampidano.it", + "messina.it", + "mi.it", + "milan.it", + "milano.it", + "mn.it", + "mo.it", + "modena.it", + "monza-brianza.it", + "monza-e-della-brianza.it", + "monza.it", + "monzabrianza.it", + "monzaebrianza.it", + "monzaedellabrianza.it", + "ms.it", + "mt.it", + "na.it", + "naples.it", + "napoli.it", + "no.it", + "novara.it", + "nu.it", + "nuoro.it", + "og.it", + "ogliastra.it", + "olbia-tempio.it", + "olbiatempio.it", + "or.it", + "oristano.it", + "ot.it", + "pa.it", + "padova.it", + "padua.it", + "palermo.it", + "parma.it", + "pavia.it", + "pc.it", + "pd.it", + "pe.it", + "perugia.it", + "pesaro-urbino.it", + "pesarourbino.it", + "pescara.it", + "pg.it", + "pi.it", + "piacenza.it", + "pisa.it", + "pistoia.it", + "pn.it", + "po.it", + "pordenone.it", + "potenza.it", + "pr.it", + "prato.it", + "pt.it", + "pu.it", + "pv.it", + "pz.it", + "ra.it", + "ragusa.it", + "ravenna.it", + "rc.it", + "re.it", + "reggio-calabria.it", + "reggio-emilia.it", + "reggiocalabria.it", + "reggioemilia.it", + "rg.it", + "ri.it", + "rieti.it", + "rimini.it", + "rm.it", + "rn.it", + "ro.it", + "roma.it", + "rome.it", + "rovigo.it", + "sa.it", + "salerno.it", + "sassari.it", + "savona.it", + "si.it", + "siena.it", + "siracusa.it", + "so.it", + "sondrio.it", + "sp.it", + "sr.it", + "ss.it", + "suedtirol.it", + "sv.it", + "ta.it", + "taranto.it", + "te.it", + "tempio-olbia.it", + "tempioolbia.it", + "teramo.it", + "terni.it", + "tn.it", + "to.it", + "torino.it", + "tp.it", + "tr.it", + "trani-andria-barletta.it", + "trani-barletta-andria.it", + "traniandriabarletta.it", + "tranibarlettaandria.it", + "trapani.it", + "trentino.it", + "trento.it", + "treviso.it", + "trieste.it", + "ts.it", + "turin.it", + "tv.it", + "ud.it", + "udine.it", + "urbino-pesaro.it", + "urbinopesaro.it", + "va.it", + "varese.it", + "vb.it", + "vc.it", + "ve.it", + "venezia.it", + "venice.it", + "verbania.it", + "vercelli.it", + "verona.it", + "vi.it", + "vibo-valentia.it", + "vibovalentia.it", + "vicenza.it", + "viterbo.it", + "vr.it", + "vs.it", + "vt.it", + "vv.it", + "je", + "co.je", + "net.je", + "org.je", + "*.jm", + "jo", + "com.jo", + "org.jo", + "net.jo", + "edu.jo", + "sch.jo", + "gov.jo", + "mil.jo", + "name.jo", + "jobs", + "jp", + "ac.jp", + "ad.jp", + "co.jp", + "ed.jp", + "go.jp", + "gr.jp", + "lg.jp", + "ne.jp", + "or.jp", + "aichi.jp", + "akita.jp", + "aomori.jp", + "chiba.jp", + "ehime.jp", + "fukui.jp", + "fukuoka.jp", + "fukushima.jp", + "gifu.jp", + "gunma.jp", + "hiroshima.jp", + "hokkaido.jp", + "hyogo.jp", + "ibaraki.jp", + "ishikawa.jp", + "iwate.jp", + "kagawa.jp", + "kagoshima.jp", + "kanagawa.jp", + "kochi.jp", + "kumamoto.jp", + "kyoto.jp", + "mie.jp", + "miyagi.jp", + "miyazaki.jp", + "nagano.jp", + "nagasaki.jp", + "nara.jp", + "niigata.jp", + "oita.jp", + "okayama.jp", + "okinawa.jp", + "osaka.jp", + "saga.jp", + "saitama.jp", + "shiga.jp", + "shimane.jp", + "shizuoka.jp", + "tochigi.jp", + "tokushima.jp", + "tokyo.jp", + "tottori.jp", + "toyama.jp", + "wakayama.jp", + "yamagata.jp", + "yamaguchi.jp", + "yamanashi.jp", + "xn--4pvxs.jp", + "xn--vgu402c.jp", + "xn--c3s14m.jp", + "xn--f6qx53a.jp", + "xn--8pvr4u.jp", + "xn--uist22h.jp", + "xn--djrs72d6uy.jp", + "xn--mkru45i.jp", + "xn--0trq7p7nn.jp", + "xn--8ltr62k.jp", + "xn--2m4a15e.jp", + "xn--efvn9s.jp", + "xn--32vp30h.jp", + "xn--4it797k.jp", + "xn--1lqs71d.jp", + "xn--5rtp49c.jp", + "xn--5js045d.jp", + "xn--ehqz56n.jp", + "xn--1lqs03n.jp", + "xn--qqqt11m.jp", + "xn--kbrq7o.jp", + "xn--pssu33l.jp", + "xn--ntsq17g.jp", + "xn--uisz3g.jp", + "xn--6btw5a.jp", + "xn--1ctwo.jp", + "xn--6orx2r.jp", + "xn--rht61e.jp", + "xn--rht27z.jp", + "xn--djty4k.jp", + "xn--nit225k.jp", + "xn--rht3d.jp", + "xn--klty5x.jp", + "xn--kltx9a.jp", + "xn--kltp7d.jp", + "xn--uuwu58a.jp", + "xn--zbx025d.jp", + "xn--ntso0iqx3a.jp", + "xn--elqq16h.jp", + "xn--4it168d.jp", + "xn--klt787d.jp", + "xn--rny31h.jp", + "xn--7t0a264c.jp", + "xn--5rtq34k.jp", + "xn--k7yn95e.jp", + "xn--tor131o.jp", + "xn--d5qv7z876c.jp", + "*.kawasaki.jp", + "*.kitakyushu.jp", + "*.kobe.jp", + "*.nagoya.jp", + "*.sapporo.jp", + "*.sendai.jp", + "*.yokohama.jp", + "!city.kawasaki.jp", + "!city.kitakyushu.jp", + "!city.kobe.jp", + "!city.nagoya.jp", + "!city.sapporo.jp", + "!city.sendai.jp", + "!city.yokohama.jp", + "aisai.aichi.jp", + "ama.aichi.jp", + "anjo.aichi.jp", + "asuke.aichi.jp", + "chiryu.aichi.jp", + "chita.aichi.jp", + "fuso.aichi.jp", + "gamagori.aichi.jp", + "handa.aichi.jp", + "hazu.aichi.jp", + "hekinan.aichi.jp", + "higashiura.aichi.jp", + "ichinomiya.aichi.jp", + "inazawa.aichi.jp", + "inuyama.aichi.jp", + "isshiki.aichi.jp", + "iwakura.aichi.jp", + "kanie.aichi.jp", + "kariya.aichi.jp", + "kasugai.aichi.jp", + "kira.aichi.jp", + "kiyosu.aichi.jp", + "komaki.aichi.jp", + "konan.aichi.jp", + "kota.aichi.jp", + "mihama.aichi.jp", + "miyoshi.aichi.jp", + "nishio.aichi.jp", + "nisshin.aichi.jp", + "obu.aichi.jp", + "oguchi.aichi.jp", + "oharu.aichi.jp", + "okazaki.aichi.jp", + "owariasahi.aichi.jp", + "seto.aichi.jp", + "shikatsu.aichi.jp", + "shinshiro.aichi.jp", + "shitara.aichi.jp", + "tahara.aichi.jp", + "takahama.aichi.jp", + "tobishima.aichi.jp", + "toei.aichi.jp", + "togo.aichi.jp", + "tokai.aichi.jp", + "tokoname.aichi.jp", + "toyoake.aichi.jp", + "toyohashi.aichi.jp", + "toyokawa.aichi.jp", + "toyone.aichi.jp", + "toyota.aichi.jp", + "tsushima.aichi.jp", + "yatomi.aichi.jp", + "akita.akita.jp", + "daisen.akita.jp", + "fujisato.akita.jp", + "gojome.akita.jp", + "hachirogata.akita.jp", + "happou.akita.jp", + "higashinaruse.akita.jp", + "honjo.akita.jp", + "honjyo.akita.jp", + "ikawa.akita.jp", + "kamikoani.akita.jp", + "kamioka.akita.jp", + "katagami.akita.jp", + "kazuno.akita.jp", + "kitaakita.akita.jp", + "kosaka.akita.jp", + "kyowa.akita.jp", + "misato.akita.jp", + "mitane.akita.jp", + "moriyoshi.akita.jp", + "nikaho.akita.jp", + "noshiro.akita.jp", + "odate.akita.jp", + "oga.akita.jp", + "ogata.akita.jp", + "semboku.akita.jp", + "yokote.akita.jp", + "yurihonjo.akita.jp", + "aomori.aomori.jp", + "gonohe.aomori.jp", + "hachinohe.aomori.jp", + "hashikami.aomori.jp", + "hiranai.aomori.jp", + "hirosaki.aomori.jp", + "itayanagi.aomori.jp", + "kuroishi.aomori.jp", + "misawa.aomori.jp", + "mutsu.aomori.jp", + "nakadomari.aomori.jp", + "noheji.aomori.jp", + "oirase.aomori.jp", + "owani.aomori.jp", + "rokunohe.aomori.jp", + "sannohe.aomori.jp", + "shichinohe.aomori.jp", + "shingo.aomori.jp", + "takko.aomori.jp", + "towada.aomori.jp", + "tsugaru.aomori.jp", + "tsuruta.aomori.jp", + "abiko.chiba.jp", + "asahi.chiba.jp", + "chonan.chiba.jp", + "chosei.chiba.jp", + "choshi.chiba.jp", + "chuo.chiba.jp", + "funabashi.chiba.jp", + "futtsu.chiba.jp", + "hanamigawa.chiba.jp", + "ichihara.chiba.jp", + "ichikawa.chiba.jp", + "ichinomiya.chiba.jp", + "inzai.chiba.jp", + "isumi.chiba.jp", + "kamagaya.chiba.jp", + "kamogawa.chiba.jp", + "kashiwa.chiba.jp", + "katori.chiba.jp", + "katsuura.chiba.jp", + "kimitsu.chiba.jp", + "kisarazu.chiba.jp", + "kozaki.chiba.jp", + "kujukuri.chiba.jp", + "kyonan.chiba.jp", + "matsudo.chiba.jp", + "midori.chiba.jp", + "mihama.chiba.jp", + "minamiboso.chiba.jp", + "mobara.chiba.jp", + "mutsuzawa.chiba.jp", + "nagara.chiba.jp", + "nagareyama.chiba.jp", + "narashino.chiba.jp", + "narita.chiba.jp", + "noda.chiba.jp", + "oamishirasato.chiba.jp", + "omigawa.chiba.jp", + "onjuku.chiba.jp", + "otaki.chiba.jp", + "sakae.chiba.jp", + "sakura.chiba.jp", + "shimofusa.chiba.jp", + "shirako.chiba.jp", + "shiroi.chiba.jp", + "shisui.chiba.jp", + "sodegaura.chiba.jp", + "sosa.chiba.jp", + "tako.chiba.jp", + "tateyama.chiba.jp", + "togane.chiba.jp", + "tohnosho.chiba.jp", + "tomisato.chiba.jp", + "urayasu.chiba.jp", + "yachimata.chiba.jp", + "yachiyo.chiba.jp", + "yokaichiba.chiba.jp", + "yokoshibahikari.chiba.jp", + "yotsukaido.chiba.jp", + "ainan.ehime.jp", + "honai.ehime.jp", + "ikata.ehime.jp", + "imabari.ehime.jp", + "iyo.ehime.jp", + "kamijima.ehime.jp", + "kihoku.ehime.jp", + "kumakogen.ehime.jp", + "masaki.ehime.jp", + "matsuno.ehime.jp", + "matsuyama.ehime.jp", + "namikata.ehime.jp", + "niihama.ehime.jp", + "ozu.ehime.jp", + "saijo.ehime.jp", + "seiyo.ehime.jp", + "shikokuchuo.ehime.jp", + "tobe.ehime.jp", + "toon.ehime.jp", + "uchiko.ehime.jp", + "uwajima.ehime.jp", + "yawatahama.ehime.jp", + "echizen.fukui.jp", + "eiheiji.fukui.jp", + "fukui.fukui.jp", + "ikeda.fukui.jp", + "katsuyama.fukui.jp", + "mihama.fukui.jp", + "minamiechizen.fukui.jp", + "obama.fukui.jp", + "ohi.fukui.jp", + "ono.fukui.jp", + "sabae.fukui.jp", + "sakai.fukui.jp", + "takahama.fukui.jp", + "tsuruga.fukui.jp", + "wakasa.fukui.jp", + "ashiya.fukuoka.jp", + "buzen.fukuoka.jp", + "chikugo.fukuoka.jp", + "chikuho.fukuoka.jp", + "chikujo.fukuoka.jp", + "chikushino.fukuoka.jp", + "chikuzen.fukuoka.jp", + "chuo.fukuoka.jp", + "dazaifu.fukuoka.jp", + "fukuchi.fukuoka.jp", + "hakata.fukuoka.jp", + "higashi.fukuoka.jp", + "hirokawa.fukuoka.jp", + "hisayama.fukuoka.jp", + "iizuka.fukuoka.jp", + "inatsuki.fukuoka.jp", + "kaho.fukuoka.jp", + "kasuga.fukuoka.jp", + "kasuya.fukuoka.jp", + "kawara.fukuoka.jp", + "keisen.fukuoka.jp", + "koga.fukuoka.jp", + "kurate.fukuoka.jp", + "kurogi.fukuoka.jp", + "kurume.fukuoka.jp", + "minami.fukuoka.jp", + "miyako.fukuoka.jp", + "miyama.fukuoka.jp", + "miyawaka.fukuoka.jp", + "mizumaki.fukuoka.jp", + "munakata.fukuoka.jp", + "nakagawa.fukuoka.jp", + "nakama.fukuoka.jp", + "nishi.fukuoka.jp", + "nogata.fukuoka.jp", + "ogori.fukuoka.jp", + "okagaki.fukuoka.jp", + "okawa.fukuoka.jp", + "oki.fukuoka.jp", + "omuta.fukuoka.jp", + "onga.fukuoka.jp", + "onojo.fukuoka.jp", + "oto.fukuoka.jp", + "saigawa.fukuoka.jp", + "sasaguri.fukuoka.jp", + "shingu.fukuoka.jp", + "shinyoshitomi.fukuoka.jp", + "shonai.fukuoka.jp", + "soeda.fukuoka.jp", + "sue.fukuoka.jp", + "tachiarai.fukuoka.jp", + "tagawa.fukuoka.jp", + "takata.fukuoka.jp", + "toho.fukuoka.jp", + "toyotsu.fukuoka.jp", + "tsuiki.fukuoka.jp", + "ukiha.fukuoka.jp", + "umi.fukuoka.jp", + "usui.fukuoka.jp", + "yamada.fukuoka.jp", + "yame.fukuoka.jp", + "yanagawa.fukuoka.jp", + "yukuhashi.fukuoka.jp", + "aizubange.fukushima.jp", + "aizumisato.fukushima.jp", + "aizuwakamatsu.fukushima.jp", + "asakawa.fukushima.jp", + "bandai.fukushima.jp", + "date.fukushima.jp", + "fukushima.fukushima.jp", + "furudono.fukushima.jp", + "futaba.fukushima.jp", + "hanawa.fukushima.jp", + "higashi.fukushima.jp", + "hirata.fukushima.jp", + "hirono.fukushima.jp", + "iitate.fukushima.jp", + "inawashiro.fukushima.jp", + "ishikawa.fukushima.jp", + "iwaki.fukushima.jp", + "izumizaki.fukushima.jp", + "kagamiishi.fukushima.jp", + "kaneyama.fukushima.jp", + "kawamata.fukushima.jp", + "kitakata.fukushima.jp", + "kitashiobara.fukushima.jp", + "koori.fukushima.jp", + "koriyama.fukushima.jp", + "kunimi.fukushima.jp", + "miharu.fukushima.jp", + "mishima.fukushima.jp", + "namie.fukushima.jp", + "nango.fukushima.jp", + "nishiaizu.fukushima.jp", + "nishigo.fukushima.jp", + "okuma.fukushima.jp", + "omotego.fukushima.jp", + "ono.fukushima.jp", + "otama.fukushima.jp", + "samegawa.fukushima.jp", + "shimogo.fukushima.jp", + "shirakawa.fukushima.jp", + "showa.fukushima.jp", + "soma.fukushima.jp", + "sukagawa.fukushima.jp", + "taishin.fukushima.jp", + "tamakawa.fukushima.jp", + "tanagura.fukushima.jp", + "tenei.fukushima.jp", + "yabuki.fukushima.jp", + "yamato.fukushima.jp", + "yamatsuri.fukushima.jp", + "yanaizu.fukushima.jp", + "yugawa.fukushima.jp", + "anpachi.gifu.jp", + "ena.gifu.jp", + "gifu.gifu.jp", + "ginan.gifu.jp", + "godo.gifu.jp", + "gujo.gifu.jp", + "hashima.gifu.jp", + "hichiso.gifu.jp", + "hida.gifu.jp", + "higashishirakawa.gifu.jp", + "ibigawa.gifu.jp", + "ikeda.gifu.jp", + "kakamigahara.gifu.jp", + "kani.gifu.jp", + "kasahara.gifu.jp", + "kasamatsu.gifu.jp", + "kawaue.gifu.jp", + "kitagata.gifu.jp", + "mino.gifu.jp", + "minokamo.gifu.jp", + "mitake.gifu.jp", + "mizunami.gifu.jp", + "motosu.gifu.jp", + "nakatsugawa.gifu.jp", + "ogaki.gifu.jp", + "sakahogi.gifu.jp", + "seki.gifu.jp", + "sekigahara.gifu.jp", + "shirakawa.gifu.jp", + "tajimi.gifu.jp", + "takayama.gifu.jp", + "tarui.gifu.jp", + "toki.gifu.jp", + "tomika.gifu.jp", + "wanouchi.gifu.jp", + "yamagata.gifu.jp", + "yaotsu.gifu.jp", + "yoro.gifu.jp", + "annaka.gunma.jp", + "chiyoda.gunma.jp", + "fujioka.gunma.jp", + "higashiagatsuma.gunma.jp", + "isesaki.gunma.jp", + "itakura.gunma.jp", + "kanna.gunma.jp", + "kanra.gunma.jp", + "katashina.gunma.jp", + "kawaba.gunma.jp", + "kiryu.gunma.jp", + "kusatsu.gunma.jp", + "maebashi.gunma.jp", + "meiwa.gunma.jp", + "midori.gunma.jp", + "minakami.gunma.jp", + "naganohara.gunma.jp", + "nakanojo.gunma.jp", + "nanmoku.gunma.jp", + "numata.gunma.jp", + "oizumi.gunma.jp", + "ora.gunma.jp", + "ota.gunma.jp", + "shibukawa.gunma.jp", + "shimonita.gunma.jp", + "shinto.gunma.jp", + "showa.gunma.jp", + "takasaki.gunma.jp", + "takayama.gunma.jp", + "tamamura.gunma.jp", + "tatebayashi.gunma.jp", + "tomioka.gunma.jp", + "tsukiyono.gunma.jp", + "tsumagoi.gunma.jp", + "ueno.gunma.jp", + "yoshioka.gunma.jp", + "asaminami.hiroshima.jp", + "daiwa.hiroshima.jp", + "etajima.hiroshima.jp", + "fuchu.hiroshima.jp", + "fukuyama.hiroshima.jp", + "hatsukaichi.hiroshima.jp", + "higashihiroshima.hiroshima.jp", + "hongo.hiroshima.jp", + "jinsekikogen.hiroshima.jp", + "kaita.hiroshima.jp", + "kui.hiroshima.jp", + "kumano.hiroshima.jp", + "kure.hiroshima.jp", + "mihara.hiroshima.jp", + "miyoshi.hiroshima.jp", + "naka.hiroshima.jp", + "onomichi.hiroshima.jp", + "osakikamijima.hiroshima.jp", + "otake.hiroshima.jp", + "saka.hiroshima.jp", + "sera.hiroshima.jp", + "seranishi.hiroshima.jp", + "shinichi.hiroshima.jp", + "shobara.hiroshima.jp", + "takehara.hiroshima.jp", + "abashiri.hokkaido.jp", + "abira.hokkaido.jp", + "aibetsu.hokkaido.jp", + "akabira.hokkaido.jp", + "akkeshi.hokkaido.jp", + "asahikawa.hokkaido.jp", + "ashibetsu.hokkaido.jp", + "ashoro.hokkaido.jp", + "assabu.hokkaido.jp", + "atsuma.hokkaido.jp", + "bibai.hokkaido.jp", + "biei.hokkaido.jp", + "bifuka.hokkaido.jp", + "bihoro.hokkaido.jp", + "biratori.hokkaido.jp", + "chippubetsu.hokkaido.jp", + "chitose.hokkaido.jp", + "date.hokkaido.jp", + "ebetsu.hokkaido.jp", + "embetsu.hokkaido.jp", + "eniwa.hokkaido.jp", + "erimo.hokkaido.jp", + "esan.hokkaido.jp", + "esashi.hokkaido.jp", + "fukagawa.hokkaido.jp", + "fukushima.hokkaido.jp", + "furano.hokkaido.jp", + "furubira.hokkaido.jp", + "haboro.hokkaido.jp", + "hakodate.hokkaido.jp", + "hamatonbetsu.hokkaido.jp", + "hidaka.hokkaido.jp", + "higashikagura.hokkaido.jp", + "higashikawa.hokkaido.jp", + "hiroo.hokkaido.jp", + "hokuryu.hokkaido.jp", + "hokuto.hokkaido.jp", + "honbetsu.hokkaido.jp", + "horokanai.hokkaido.jp", + "horonobe.hokkaido.jp", + "ikeda.hokkaido.jp", + "imakane.hokkaido.jp", + "ishikari.hokkaido.jp", + "iwamizawa.hokkaido.jp", + "iwanai.hokkaido.jp", + "kamifurano.hokkaido.jp", + "kamikawa.hokkaido.jp", + "kamishihoro.hokkaido.jp", + "kamisunagawa.hokkaido.jp", + "kamoenai.hokkaido.jp", + "kayabe.hokkaido.jp", + "kembuchi.hokkaido.jp", + "kikonai.hokkaido.jp", + "kimobetsu.hokkaido.jp", + "kitahiroshima.hokkaido.jp", + "kitami.hokkaido.jp", + "kiyosato.hokkaido.jp", + "koshimizu.hokkaido.jp", + "kunneppu.hokkaido.jp", + "kuriyama.hokkaido.jp", + "kuromatsunai.hokkaido.jp", + "kushiro.hokkaido.jp", + "kutchan.hokkaido.jp", + "kyowa.hokkaido.jp", + "mashike.hokkaido.jp", + "matsumae.hokkaido.jp", + "mikasa.hokkaido.jp", + "minamifurano.hokkaido.jp", + "mombetsu.hokkaido.jp", + "moseushi.hokkaido.jp", + "mukawa.hokkaido.jp", + "muroran.hokkaido.jp", + "naie.hokkaido.jp", + "nakagawa.hokkaido.jp", + "nakasatsunai.hokkaido.jp", + "nakatombetsu.hokkaido.jp", + "nanae.hokkaido.jp", + "nanporo.hokkaido.jp", + "nayoro.hokkaido.jp", + "nemuro.hokkaido.jp", + "niikappu.hokkaido.jp", + "niki.hokkaido.jp", + "nishiokoppe.hokkaido.jp", + "noboribetsu.hokkaido.jp", + "numata.hokkaido.jp", + "obihiro.hokkaido.jp", + "obira.hokkaido.jp", + "oketo.hokkaido.jp", + "okoppe.hokkaido.jp", + "otaru.hokkaido.jp", + "otobe.hokkaido.jp", + "otofuke.hokkaido.jp", + "otoineppu.hokkaido.jp", + "oumu.hokkaido.jp", + "ozora.hokkaido.jp", + "pippu.hokkaido.jp", + "rankoshi.hokkaido.jp", + "rebun.hokkaido.jp", + "rikubetsu.hokkaido.jp", + "rishiri.hokkaido.jp", + "rishirifuji.hokkaido.jp", + "saroma.hokkaido.jp", + "sarufutsu.hokkaido.jp", + "shakotan.hokkaido.jp", + "shari.hokkaido.jp", + "shibecha.hokkaido.jp", + "shibetsu.hokkaido.jp", + "shikabe.hokkaido.jp", + "shikaoi.hokkaido.jp", + "shimamaki.hokkaido.jp", + "shimizu.hokkaido.jp", + "shimokawa.hokkaido.jp", + "shinshinotsu.hokkaido.jp", + "shintoku.hokkaido.jp", + "shiranuka.hokkaido.jp", + "shiraoi.hokkaido.jp", + "shiriuchi.hokkaido.jp", + "sobetsu.hokkaido.jp", + "sunagawa.hokkaido.jp", + "taiki.hokkaido.jp", + "takasu.hokkaido.jp", + "takikawa.hokkaido.jp", + "takinoue.hokkaido.jp", + "teshikaga.hokkaido.jp", + "tobetsu.hokkaido.jp", + "tohma.hokkaido.jp", + "tomakomai.hokkaido.jp", + "tomari.hokkaido.jp", + "toya.hokkaido.jp", + "toyako.hokkaido.jp", + "toyotomi.hokkaido.jp", + "toyoura.hokkaido.jp", + "tsubetsu.hokkaido.jp", + "tsukigata.hokkaido.jp", + "urakawa.hokkaido.jp", + "urausu.hokkaido.jp", + "uryu.hokkaido.jp", + "utashinai.hokkaido.jp", + "wakkanai.hokkaido.jp", + "wassamu.hokkaido.jp", + "yakumo.hokkaido.jp", + "yoichi.hokkaido.jp", + "aioi.hyogo.jp", + "akashi.hyogo.jp", + "ako.hyogo.jp", + "amagasaki.hyogo.jp", + "aogaki.hyogo.jp", + "asago.hyogo.jp", + "ashiya.hyogo.jp", + "awaji.hyogo.jp", + "fukusaki.hyogo.jp", + "goshiki.hyogo.jp", + "harima.hyogo.jp", + "himeji.hyogo.jp", + "ichikawa.hyogo.jp", + "inagawa.hyogo.jp", + "itami.hyogo.jp", + "kakogawa.hyogo.jp", + "kamigori.hyogo.jp", + "kamikawa.hyogo.jp", + "kasai.hyogo.jp", + "kasuga.hyogo.jp", + "kawanishi.hyogo.jp", + "miki.hyogo.jp", + "minamiawaji.hyogo.jp", + "nishinomiya.hyogo.jp", + "nishiwaki.hyogo.jp", + "ono.hyogo.jp", + "sanda.hyogo.jp", + "sannan.hyogo.jp", + "sasayama.hyogo.jp", + "sayo.hyogo.jp", + "shingu.hyogo.jp", + "shinonsen.hyogo.jp", + "shiso.hyogo.jp", + "sumoto.hyogo.jp", + "taishi.hyogo.jp", + "taka.hyogo.jp", + "takarazuka.hyogo.jp", + "takasago.hyogo.jp", + "takino.hyogo.jp", + "tamba.hyogo.jp", + "tatsuno.hyogo.jp", + "toyooka.hyogo.jp", + "yabu.hyogo.jp", + "yashiro.hyogo.jp", + "yoka.hyogo.jp", + "yokawa.hyogo.jp", + "ami.ibaraki.jp", + "asahi.ibaraki.jp", + "bando.ibaraki.jp", + "chikusei.ibaraki.jp", + "daigo.ibaraki.jp", + "fujishiro.ibaraki.jp", + "hitachi.ibaraki.jp", + "hitachinaka.ibaraki.jp", + "hitachiomiya.ibaraki.jp", + "hitachiota.ibaraki.jp", + "ibaraki.ibaraki.jp", + "ina.ibaraki.jp", + "inashiki.ibaraki.jp", + "itako.ibaraki.jp", + "iwama.ibaraki.jp", + "joso.ibaraki.jp", + "kamisu.ibaraki.jp", + "kasama.ibaraki.jp", + "kashima.ibaraki.jp", + "kasumigaura.ibaraki.jp", + "koga.ibaraki.jp", + "miho.ibaraki.jp", + "mito.ibaraki.jp", + "moriya.ibaraki.jp", + "naka.ibaraki.jp", + "namegata.ibaraki.jp", + "oarai.ibaraki.jp", + "ogawa.ibaraki.jp", + "omitama.ibaraki.jp", + "ryugasaki.ibaraki.jp", + "sakai.ibaraki.jp", + "sakuragawa.ibaraki.jp", + "shimodate.ibaraki.jp", + "shimotsuma.ibaraki.jp", + "shirosato.ibaraki.jp", + "sowa.ibaraki.jp", + "suifu.ibaraki.jp", + "takahagi.ibaraki.jp", + "tamatsukuri.ibaraki.jp", + "tokai.ibaraki.jp", + "tomobe.ibaraki.jp", + "tone.ibaraki.jp", + "toride.ibaraki.jp", + "tsuchiura.ibaraki.jp", + "tsukuba.ibaraki.jp", + "uchihara.ibaraki.jp", + "ushiku.ibaraki.jp", + "yachiyo.ibaraki.jp", + "yamagata.ibaraki.jp", + "yawara.ibaraki.jp", + "yuki.ibaraki.jp", + "anamizu.ishikawa.jp", + "hakui.ishikawa.jp", + "hakusan.ishikawa.jp", + "kaga.ishikawa.jp", + "kahoku.ishikawa.jp", + "kanazawa.ishikawa.jp", + "kawakita.ishikawa.jp", + "komatsu.ishikawa.jp", + "nakanoto.ishikawa.jp", + "nanao.ishikawa.jp", + "nomi.ishikawa.jp", + "nonoichi.ishikawa.jp", + "noto.ishikawa.jp", + "shika.ishikawa.jp", + "suzu.ishikawa.jp", + "tsubata.ishikawa.jp", + "tsurugi.ishikawa.jp", + "uchinada.ishikawa.jp", + "wajima.ishikawa.jp", + "fudai.iwate.jp", + "fujisawa.iwate.jp", + "hanamaki.iwate.jp", + "hiraizumi.iwate.jp", + "hirono.iwate.jp", + "ichinohe.iwate.jp", + "ichinoseki.iwate.jp", + "iwaizumi.iwate.jp", + "iwate.iwate.jp", + "joboji.iwate.jp", + "kamaishi.iwate.jp", + "kanegasaki.iwate.jp", + "karumai.iwate.jp", + "kawai.iwate.jp", + "kitakami.iwate.jp", + "kuji.iwate.jp", + "kunohe.iwate.jp", + "kuzumaki.iwate.jp", + "miyako.iwate.jp", + "mizusawa.iwate.jp", + "morioka.iwate.jp", + "ninohe.iwate.jp", + "noda.iwate.jp", + "ofunato.iwate.jp", + "oshu.iwate.jp", + "otsuchi.iwate.jp", + "rikuzentakata.iwate.jp", + "shiwa.iwate.jp", + "shizukuishi.iwate.jp", + "sumita.iwate.jp", + "tanohata.iwate.jp", + "tono.iwate.jp", + "yahaba.iwate.jp", + "yamada.iwate.jp", + "ayagawa.kagawa.jp", + "higashikagawa.kagawa.jp", + "kanonji.kagawa.jp", + "kotohira.kagawa.jp", + "manno.kagawa.jp", + "marugame.kagawa.jp", + "mitoyo.kagawa.jp", + "naoshima.kagawa.jp", + "sanuki.kagawa.jp", + "tadotsu.kagawa.jp", + "takamatsu.kagawa.jp", + "tonosho.kagawa.jp", + "uchinomi.kagawa.jp", + "utazu.kagawa.jp", + "zentsuji.kagawa.jp", + "akune.kagoshima.jp", + "amami.kagoshima.jp", + "hioki.kagoshima.jp", + "isa.kagoshima.jp", + "isen.kagoshima.jp", + "izumi.kagoshima.jp", + "kagoshima.kagoshima.jp", + "kanoya.kagoshima.jp", + "kawanabe.kagoshima.jp", + "kinko.kagoshima.jp", + "kouyama.kagoshima.jp", + "makurazaki.kagoshima.jp", + "matsumoto.kagoshima.jp", + "minamitane.kagoshima.jp", + "nakatane.kagoshima.jp", + "nishinoomote.kagoshima.jp", + "satsumasendai.kagoshima.jp", + "soo.kagoshima.jp", + "tarumizu.kagoshima.jp", + "yusui.kagoshima.jp", + "aikawa.kanagawa.jp", + "atsugi.kanagawa.jp", + "ayase.kanagawa.jp", + "chigasaki.kanagawa.jp", + "ebina.kanagawa.jp", + "fujisawa.kanagawa.jp", + "hadano.kanagawa.jp", + "hakone.kanagawa.jp", + "hiratsuka.kanagawa.jp", + "isehara.kanagawa.jp", + "kaisei.kanagawa.jp", + "kamakura.kanagawa.jp", + "kiyokawa.kanagawa.jp", + "matsuda.kanagawa.jp", + "minamiashigara.kanagawa.jp", + "miura.kanagawa.jp", + "nakai.kanagawa.jp", + "ninomiya.kanagawa.jp", + "odawara.kanagawa.jp", + "oi.kanagawa.jp", + "oiso.kanagawa.jp", + "sagamihara.kanagawa.jp", + "samukawa.kanagawa.jp", + "tsukui.kanagawa.jp", + "yamakita.kanagawa.jp", + "yamato.kanagawa.jp", + "yokosuka.kanagawa.jp", + "yugawara.kanagawa.jp", + "zama.kanagawa.jp", + "zushi.kanagawa.jp", + "aki.kochi.jp", + "geisei.kochi.jp", + "hidaka.kochi.jp", + "higashitsuno.kochi.jp", + "ino.kochi.jp", + "kagami.kochi.jp", + "kami.kochi.jp", + "kitagawa.kochi.jp", + "kochi.kochi.jp", + "mihara.kochi.jp", + "motoyama.kochi.jp", + "muroto.kochi.jp", + "nahari.kochi.jp", + "nakamura.kochi.jp", + "nankoku.kochi.jp", + "nishitosa.kochi.jp", + "niyodogawa.kochi.jp", + "ochi.kochi.jp", + "okawa.kochi.jp", + "otoyo.kochi.jp", + "otsuki.kochi.jp", + "sakawa.kochi.jp", + "sukumo.kochi.jp", + "susaki.kochi.jp", + "tosa.kochi.jp", + "tosashimizu.kochi.jp", + "toyo.kochi.jp", + "tsuno.kochi.jp", + "umaji.kochi.jp", + "yasuda.kochi.jp", + "yusuhara.kochi.jp", + "amakusa.kumamoto.jp", + "arao.kumamoto.jp", + "aso.kumamoto.jp", + "choyo.kumamoto.jp", + "gyokuto.kumamoto.jp", + "kamiamakusa.kumamoto.jp", + "kikuchi.kumamoto.jp", + "kumamoto.kumamoto.jp", + "mashiki.kumamoto.jp", + "mifune.kumamoto.jp", + "minamata.kumamoto.jp", + "minamioguni.kumamoto.jp", + "nagasu.kumamoto.jp", + "nishihara.kumamoto.jp", + "oguni.kumamoto.jp", + "ozu.kumamoto.jp", + "sumoto.kumamoto.jp", + "takamori.kumamoto.jp", + "uki.kumamoto.jp", + "uto.kumamoto.jp", + "yamaga.kumamoto.jp", + "yamato.kumamoto.jp", + "yatsushiro.kumamoto.jp", + "ayabe.kyoto.jp", + "fukuchiyama.kyoto.jp", + "higashiyama.kyoto.jp", + "ide.kyoto.jp", + "ine.kyoto.jp", + "joyo.kyoto.jp", + "kameoka.kyoto.jp", + "kamo.kyoto.jp", + "kita.kyoto.jp", + "kizu.kyoto.jp", + "kumiyama.kyoto.jp", + "kyotamba.kyoto.jp", + "kyotanabe.kyoto.jp", + "kyotango.kyoto.jp", + "maizuru.kyoto.jp", + "minami.kyoto.jp", + "minamiyamashiro.kyoto.jp", + "miyazu.kyoto.jp", + "muko.kyoto.jp", + "nagaokakyo.kyoto.jp", + "nakagyo.kyoto.jp", + "nantan.kyoto.jp", + "oyamazaki.kyoto.jp", + "sakyo.kyoto.jp", + "seika.kyoto.jp", + "tanabe.kyoto.jp", + "uji.kyoto.jp", + "ujitawara.kyoto.jp", + "wazuka.kyoto.jp", + "yamashina.kyoto.jp", + "yawata.kyoto.jp", + "asahi.mie.jp", + "inabe.mie.jp", + "ise.mie.jp", + "kameyama.mie.jp", + "kawagoe.mie.jp", + "kiho.mie.jp", + "kisosaki.mie.jp", + "kiwa.mie.jp", + "komono.mie.jp", + "kumano.mie.jp", + "kuwana.mie.jp", + "matsusaka.mie.jp", + "meiwa.mie.jp", + "mihama.mie.jp", + "minamiise.mie.jp", + "misugi.mie.jp", + "miyama.mie.jp", + "nabari.mie.jp", + "shima.mie.jp", + "suzuka.mie.jp", + "tado.mie.jp", + "taiki.mie.jp", + "taki.mie.jp", + "tamaki.mie.jp", + "toba.mie.jp", + "tsu.mie.jp", + "udono.mie.jp", + "ureshino.mie.jp", + "watarai.mie.jp", + "yokkaichi.mie.jp", + "furukawa.miyagi.jp", + "higashimatsushima.miyagi.jp", + "ishinomaki.miyagi.jp", + "iwanuma.miyagi.jp", + "kakuda.miyagi.jp", + "kami.miyagi.jp", + "kawasaki.miyagi.jp", + "marumori.miyagi.jp", + "matsushima.miyagi.jp", + "minamisanriku.miyagi.jp", + "misato.miyagi.jp", + "murata.miyagi.jp", + "natori.miyagi.jp", + "ogawara.miyagi.jp", + "ohira.miyagi.jp", + "onagawa.miyagi.jp", + "osaki.miyagi.jp", + "rifu.miyagi.jp", + "semine.miyagi.jp", + "shibata.miyagi.jp", + "shichikashuku.miyagi.jp", + "shikama.miyagi.jp", + "shiogama.miyagi.jp", + "shiroishi.miyagi.jp", + "tagajo.miyagi.jp", + "taiwa.miyagi.jp", + "tome.miyagi.jp", + "tomiya.miyagi.jp", + "wakuya.miyagi.jp", + "watari.miyagi.jp", + "yamamoto.miyagi.jp", + "zao.miyagi.jp", + "aya.miyazaki.jp", + "ebino.miyazaki.jp", + "gokase.miyazaki.jp", + "hyuga.miyazaki.jp", + "kadogawa.miyazaki.jp", + "kawaminami.miyazaki.jp", + "kijo.miyazaki.jp", + "kitagawa.miyazaki.jp", + "kitakata.miyazaki.jp", + "kitaura.miyazaki.jp", + "kobayashi.miyazaki.jp", + "kunitomi.miyazaki.jp", + "kushima.miyazaki.jp", + "mimata.miyazaki.jp", + "miyakonojo.miyazaki.jp", + "miyazaki.miyazaki.jp", + "morotsuka.miyazaki.jp", + "nichinan.miyazaki.jp", + "nishimera.miyazaki.jp", + "nobeoka.miyazaki.jp", + "saito.miyazaki.jp", + "shiiba.miyazaki.jp", + "shintomi.miyazaki.jp", + "takaharu.miyazaki.jp", + "takanabe.miyazaki.jp", + "takazaki.miyazaki.jp", + "tsuno.miyazaki.jp", + "achi.nagano.jp", + "agematsu.nagano.jp", + "anan.nagano.jp", + "aoki.nagano.jp", + "asahi.nagano.jp", + "azumino.nagano.jp", + "chikuhoku.nagano.jp", + "chikuma.nagano.jp", + "chino.nagano.jp", + "fujimi.nagano.jp", + "hakuba.nagano.jp", + "hara.nagano.jp", + "hiraya.nagano.jp", + "iida.nagano.jp", + "iijima.nagano.jp", + "iiyama.nagano.jp", + "iizuna.nagano.jp", + "ikeda.nagano.jp", + "ikusaka.nagano.jp", + "ina.nagano.jp", + "karuizawa.nagano.jp", + "kawakami.nagano.jp", + "kiso.nagano.jp", + "kisofukushima.nagano.jp", + "kitaaiki.nagano.jp", + "komagane.nagano.jp", + "komoro.nagano.jp", + "matsukawa.nagano.jp", + "matsumoto.nagano.jp", + "miasa.nagano.jp", + "minamiaiki.nagano.jp", + "minamimaki.nagano.jp", + "minamiminowa.nagano.jp", + "minowa.nagano.jp", + "miyada.nagano.jp", + "miyota.nagano.jp", + "mochizuki.nagano.jp", + "nagano.nagano.jp", + "nagawa.nagano.jp", + "nagiso.nagano.jp", + "nakagawa.nagano.jp", + "nakano.nagano.jp", + "nozawaonsen.nagano.jp", + "obuse.nagano.jp", + "ogawa.nagano.jp", + "okaya.nagano.jp", + "omachi.nagano.jp", + "omi.nagano.jp", + "ookuwa.nagano.jp", + "ooshika.nagano.jp", + "otaki.nagano.jp", + "otari.nagano.jp", + "sakae.nagano.jp", + "sakaki.nagano.jp", + "saku.nagano.jp", + "sakuho.nagano.jp", + "shimosuwa.nagano.jp", + "shinanomachi.nagano.jp", + "shiojiri.nagano.jp", + "suwa.nagano.jp", + "suzaka.nagano.jp", + "takagi.nagano.jp", + "takamori.nagano.jp", + "takayama.nagano.jp", + "tateshina.nagano.jp", + "tatsuno.nagano.jp", + "togakushi.nagano.jp", + "togura.nagano.jp", + "tomi.nagano.jp", + "ueda.nagano.jp", + "wada.nagano.jp", + "yamagata.nagano.jp", + "yamanouchi.nagano.jp", + "yasaka.nagano.jp", + "yasuoka.nagano.jp", + "chijiwa.nagasaki.jp", + "futsu.nagasaki.jp", + "goto.nagasaki.jp", + "hasami.nagasaki.jp", + "hirado.nagasaki.jp", + "iki.nagasaki.jp", + "isahaya.nagasaki.jp", + "kawatana.nagasaki.jp", + "kuchinotsu.nagasaki.jp", + "matsuura.nagasaki.jp", + "nagasaki.nagasaki.jp", + "obama.nagasaki.jp", + "omura.nagasaki.jp", + "oseto.nagasaki.jp", + "saikai.nagasaki.jp", + "sasebo.nagasaki.jp", + "seihi.nagasaki.jp", + "shimabara.nagasaki.jp", + "shinkamigoto.nagasaki.jp", + "togitsu.nagasaki.jp", + "tsushima.nagasaki.jp", + "unzen.nagasaki.jp", + "ando.nara.jp", + "gose.nara.jp", + "heguri.nara.jp", + "higashiyoshino.nara.jp", + "ikaruga.nara.jp", + "ikoma.nara.jp", + "kamikitayama.nara.jp", + "kanmaki.nara.jp", + "kashiba.nara.jp", + "kashihara.nara.jp", + "katsuragi.nara.jp", + "kawai.nara.jp", + "kawakami.nara.jp", + "kawanishi.nara.jp", + "koryo.nara.jp", + "kurotaki.nara.jp", + "mitsue.nara.jp", + "miyake.nara.jp", + "nara.nara.jp", + "nosegawa.nara.jp", + "oji.nara.jp", + "ouda.nara.jp", + "oyodo.nara.jp", + "sakurai.nara.jp", + "sango.nara.jp", + "shimoichi.nara.jp", + "shimokitayama.nara.jp", + "shinjo.nara.jp", + "soni.nara.jp", + "takatori.nara.jp", + "tawaramoto.nara.jp", + "tenkawa.nara.jp", + "tenri.nara.jp", + "uda.nara.jp", + "yamatokoriyama.nara.jp", + "yamatotakada.nara.jp", + "yamazoe.nara.jp", + "yoshino.nara.jp", + "aga.niigata.jp", + "agano.niigata.jp", + "gosen.niigata.jp", + "itoigawa.niigata.jp", + "izumozaki.niigata.jp", + "joetsu.niigata.jp", + "kamo.niigata.jp", + "kariwa.niigata.jp", + "kashiwazaki.niigata.jp", + "minamiuonuma.niigata.jp", + "mitsuke.niigata.jp", + "muika.niigata.jp", + "murakami.niigata.jp", + "myoko.niigata.jp", + "nagaoka.niigata.jp", + "niigata.niigata.jp", + "ojiya.niigata.jp", + "omi.niigata.jp", + "sado.niigata.jp", + "sanjo.niigata.jp", + "seiro.niigata.jp", + "seirou.niigata.jp", + "sekikawa.niigata.jp", + "shibata.niigata.jp", + "tagami.niigata.jp", + "tainai.niigata.jp", + "tochio.niigata.jp", + "tokamachi.niigata.jp", + "tsubame.niigata.jp", + "tsunan.niigata.jp", + "uonuma.niigata.jp", + "yahiko.niigata.jp", + "yoita.niigata.jp", + "yuzawa.niigata.jp", + "beppu.oita.jp", + "bungoono.oita.jp", + "bungotakada.oita.jp", + "hasama.oita.jp", + "hiji.oita.jp", + "himeshima.oita.jp", + "hita.oita.jp", + "kamitsue.oita.jp", + "kokonoe.oita.jp", + "kuju.oita.jp", + "kunisaki.oita.jp", + "kusu.oita.jp", + "oita.oita.jp", + "saiki.oita.jp", + "taketa.oita.jp", + "tsukumi.oita.jp", + "usa.oita.jp", + "usuki.oita.jp", + "yufu.oita.jp", + "akaiwa.okayama.jp", + "asakuchi.okayama.jp", + "bizen.okayama.jp", + "hayashima.okayama.jp", + "ibara.okayama.jp", + "kagamino.okayama.jp", + "kasaoka.okayama.jp", + "kibichuo.okayama.jp", + "kumenan.okayama.jp", + "kurashiki.okayama.jp", + "maniwa.okayama.jp", + "misaki.okayama.jp", + "nagi.okayama.jp", + "niimi.okayama.jp", + "nishiawakura.okayama.jp", + "okayama.okayama.jp", + "satosho.okayama.jp", + "setouchi.okayama.jp", + "shinjo.okayama.jp", + "shoo.okayama.jp", + "soja.okayama.jp", + "takahashi.okayama.jp", + "tamano.okayama.jp", + "tsuyama.okayama.jp", + "wake.okayama.jp", + "yakage.okayama.jp", + "aguni.okinawa.jp", + "ginowan.okinawa.jp", + "ginoza.okinawa.jp", + "gushikami.okinawa.jp", + "haebaru.okinawa.jp", + "higashi.okinawa.jp", + "hirara.okinawa.jp", + "iheya.okinawa.jp", + "ishigaki.okinawa.jp", + "ishikawa.okinawa.jp", + "itoman.okinawa.jp", + "izena.okinawa.jp", + "kadena.okinawa.jp", + "kin.okinawa.jp", + "kitadaito.okinawa.jp", + "kitanakagusuku.okinawa.jp", + "kumejima.okinawa.jp", + "kunigami.okinawa.jp", + "minamidaito.okinawa.jp", + "motobu.okinawa.jp", + "nago.okinawa.jp", + "naha.okinawa.jp", + "nakagusuku.okinawa.jp", + "nakijin.okinawa.jp", + "nanjo.okinawa.jp", + "nishihara.okinawa.jp", + "ogimi.okinawa.jp", + "okinawa.okinawa.jp", + "onna.okinawa.jp", + "shimoji.okinawa.jp", + "taketomi.okinawa.jp", + "tarama.okinawa.jp", + "tokashiki.okinawa.jp", + "tomigusuku.okinawa.jp", + "tonaki.okinawa.jp", + "urasoe.okinawa.jp", + "uruma.okinawa.jp", + "yaese.okinawa.jp", + "yomitan.okinawa.jp", + "yonabaru.okinawa.jp", + "yonaguni.okinawa.jp", + "zamami.okinawa.jp", + "abeno.osaka.jp", + "chihayaakasaka.osaka.jp", + "chuo.osaka.jp", + "daito.osaka.jp", + "fujiidera.osaka.jp", + "habikino.osaka.jp", + "hannan.osaka.jp", + "higashiosaka.osaka.jp", + "higashisumiyoshi.osaka.jp", + "higashiyodogawa.osaka.jp", + "hirakata.osaka.jp", + "ibaraki.osaka.jp", + "ikeda.osaka.jp", + "izumi.osaka.jp", + "izumiotsu.osaka.jp", + "izumisano.osaka.jp", + "kadoma.osaka.jp", + "kaizuka.osaka.jp", + "kanan.osaka.jp", + "kashiwara.osaka.jp", + "katano.osaka.jp", + "kawachinagano.osaka.jp", + "kishiwada.osaka.jp", + "kita.osaka.jp", + "kumatori.osaka.jp", + "matsubara.osaka.jp", + "minato.osaka.jp", + "minoh.osaka.jp", + "misaki.osaka.jp", + "moriguchi.osaka.jp", + "neyagawa.osaka.jp", + "nishi.osaka.jp", + "nose.osaka.jp", + "osakasayama.osaka.jp", + "sakai.osaka.jp", + "sayama.osaka.jp", + "sennan.osaka.jp", + "settsu.osaka.jp", + "shijonawate.osaka.jp", + "shimamoto.osaka.jp", + "suita.osaka.jp", + "tadaoka.osaka.jp", + "taishi.osaka.jp", + "tajiri.osaka.jp", + "takaishi.osaka.jp", + "takatsuki.osaka.jp", + "tondabayashi.osaka.jp", + "toyonaka.osaka.jp", + "toyono.osaka.jp", + "yao.osaka.jp", + "ariake.saga.jp", + "arita.saga.jp", + "fukudomi.saga.jp", + "genkai.saga.jp", + "hamatama.saga.jp", + "hizen.saga.jp", + "imari.saga.jp", + "kamimine.saga.jp", + "kanzaki.saga.jp", + "karatsu.saga.jp", + "kashima.saga.jp", + "kitagata.saga.jp", + "kitahata.saga.jp", + "kiyama.saga.jp", + "kouhoku.saga.jp", + "kyuragi.saga.jp", + "nishiarita.saga.jp", + "ogi.saga.jp", + "omachi.saga.jp", + "ouchi.saga.jp", + "saga.saga.jp", + "shiroishi.saga.jp", + "taku.saga.jp", + "tara.saga.jp", + "tosu.saga.jp", + "yoshinogari.saga.jp", + "arakawa.saitama.jp", + "asaka.saitama.jp", + "chichibu.saitama.jp", + "fujimi.saitama.jp", + "fujimino.saitama.jp", + "fukaya.saitama.jp", + "hanno.saitama.jp", + "hanyu.saitama.jp", + "hasuda.saitama.jp", + "hatogaya.saitama.jp", + "hatoyama.saitama.jp", + "hidaka.saitama.jp", + "higashichichibu.saitama.jp", + "higashimatsuyama.saitama.jp", + "honjo.saitama.jp", + "ina.saitama.jp", + "iruma.saitama.jp", + "iwatsuki.saitama.jp", + "kamiizumi.saitama.jp", + "kamikawa.saitama.jp", + "kamisato.saitama.jp", + "kasukabe.saitama.jp", + "kawagoe.saitama.jp", + "kawaguchi.saitama.jp", + "kawajima.saitama.jp", + "kazo.saitama.jp", + "kitamoto.saitama.jp", + "koshigaya.saitama.jp", + "kounosu.saitama.jp", + "kuki.saitama.jp", + "kumagaya.saitama.jp", + "matsubushi.saitama.jp", + "minano.saitama.jp", + "misato.saitama.jp", + "miyashiro.saitama.jp", + "miyoshi.saitama.jp", + "moroyama.saitama.jp", + "nagatoro.saitama.jp", + "namegawa.saitama.jp", + "niiza.saitama.jp", + "ogano.saitama.jp", + "ogawa.saitama.jp", + "ogose.saitama.jp", + "okegawa.saitama.jp", + "omiya.saitama.jp", + "otaki.saitama.jp", + "ranzan.saitama.jp", + "ryokami.saitama.jp", + "saitama.saitama.jp", + "sakado.saitama.jp", + "satte.saitama.jp", + "sayama.saitama.jp", + "shiki.saitama.jp", + "shiraoka.saitama.jp", + "soka.saitama.jp", + "sugito.saitama.jp", + "toda.saitama.jp", + "tokigawa.saitama.jp", + "tokorozawa.saitama.jp", + "tsurugashima.saitama.jp", + "urawa.saitama.jp", + "warabi.saitama.jp", + "yashio.saitama.jp", + "yokoze.saitama.jp", + "yono.saitama.jp", + "yorii.saitama.jp", + "yoshida.saitama.jp", + "yoshikawa.saitama.jp", + "yoshimi.saitama.jp", + "aisho.shiga.jp", + "gamo.shiga.jp", + "higashiomi.shiga.jp", + "hikone.shiga.jp", + "koka.shiga.jp", + "konan.shiga.jp", + "kosei.shiga.jp", + "koto.shiga.jp", + "kusatsu.shiga.jp", + "maibara.shiga.jp", + "moriyama.shiga.jp", + "nagahama.shiga.jp", + "nishiazai.shiga.jp", + "notogawa.shiga.jp", + "omihachiman.shiga.jp", + "otsu.shiga.jp", + "ritto.shiga.jp", + "ryuoh.shiga.jp", + "takashima.shiga.jp", + "takatsuki.shiga.jp", + "torahime.shiga.jp", + "toyosato.shiga.jp", + "yasu.shiga.jp", + "akagi.shimane.jp", + "ama.shimane.jp", + "gotsu.shimane.jp", + "hamada.shimane.jp", + "higashiizumo.shimane.jp", + "hikawa.shimane.jp", + "hikimi.shimane.jp", + "izumo.shimane.jp", + "kakinoki.shimane.jp", + "masuda.shimane.jp", + "matsue.shimane.jp", + "misato.shimane.jp", + "nishinoshima.shimane.jp", + "ohda.shimane.jp", + "okinoshima.shimane.jp", + "okuizumo.shimane.jp", + "shimane.shimane.jp", + "tamayu.shimane.jp", + "tsuwano.shimane.jp", + "unnan.shimane.jp", + "yakumo.shimane.jp", + "yasugi.shimane.jp", + "yatsuka.shimane.jp", + "arai.shizuoka.jp", + "atami.shizuoka.jp", + "fuji.shizuoka.jp", + "fujieda.shizuoka.jp", + "fujikawa.shizuoka.jp", + "fujinomiya.shizuoka.jp", + "fukuroi.shizuoka.jp", + "gotemba.shizuoka.jp", + "haibara.shizuoka.jp", + "hamamatsu.shizuoka.jp", + "higashiizu.shizuoka.jp", + "ito.shizuoka.jp", + "iwata.shizuoka.jp", + "izu.shizuoka.jp", + "izunokuni.shizuoka.jp", + "kakegawa.shizuoka.jp", + "kannami.shizuoka.jp", + "kawanehon.shizuoka.jp", + "kawazu.shizuoka.jp", + "kikugawa.shizuoka.jp", + "kosai.shizuoka.jp", + "makinohara.shizuoka.jp", + "matsuzaki.shizuoka.jp", + "minamiizu.shizuoka.jp", + "mishima.shizuoka.jp", + "morimachi.shizuoka.jp", + "nishiizu.shizuoka.jp", + "numazu.shizuoka.jp", + "omaezaki.shizuoka.jp", + "shimada.shizuoka.jp", + "shimizu.shizuoka.jp", + "shimoda.shizuoka.jp", + "shizuoka.shizuoka.jp", + "susono.shizuoka.jp", + "yaizu.shizuoka.jp", + "yoshida.shizuoka.jp", + "ashikaga.tochigi.jp", + "bato.tochigi.jp", + "haga.tochigi.jp", + "ichikai.tochigi.jp", + "iwafune.tochigi.jp", + "kaminokawa.tochigi.jp", + "kanuma.tochigi.jp", + "karasuyama.tochigi.jp", + "kuroiso.tochigi.jp", + "mashiko.tochigi.jp", + "mibu.tochigi.jp", + "moka.tochigi.jp", + "motegi.tochigi.jp", + "nasu.tochigi.jp", + "nasushiobara.tochigi.jp", + "nikko.tochigi.jp", + "nishikata.tochigi.jp", + "nogi.tochigi.jp", + "ohira.tochigi.jp", + "ohtawara.tochigi.jp", + "oyama.tochigi.jp", + "sakura.tochigi.jp", + "sano.tochigi.jp", + "shimotsuke.tochigi.jp", + "shioya.tochigi.jp", + "takanezawa.tochigi.jp", + "tochigi.tochigi.jp", + "tsuga.tochigi.jp", + "ujiie.tochigi.jp", + "utsunomiya.tochigi.jp", + "yaita.tochigi.jp", + "aizumi.tokushima.jp", + "anan.tokushima.jp", + "ichiba.tokushima.jp", + "itano.tokushima.jp", + "kainan.tokushima.jp", + "komatsushima.tokushima.jp", + "matsushige.tokushima.jp", + "mima.tokushima.jp", + "minami.tokushima.jp", + "miyoshi.tokushima.jp", + "mugi.tokushima.jp", + "nakagawa.tokushima.jp", + "naruto.tokushima.jp", + "sanagochi.tokushima.jp", + "shishikui.tokushima.jp", + "tokushima.tokushima.jp", + "wajiki.tokushima.jp", + "adachi.tokyo.jp", + "akiruno.tokyo.jp", + "akishima.tokyo.jp", + "aogashima.tokyo.jp", + "arakawa.tokyo.jp", + "bunkyo.tokyo.jp", + "chiyoda.tokyo.jp", + "chofu.tokyo.jp", + "chuo.tokyo.jp", + "edogawa.tokyo.jp", + "fuchu.tokyo.jp", + "fussa.tokyo.jp", + "hachijo.tokyo.jp", + "hachioji.tokyo.jp", + "hamura.tokyo.jp", + "higashikurume.tokyo.jp", + "higashimurayama.tokyo.jp", + "higashiyamato.tokyo.jp", + "hino.tokyo.jp", + "hinode.tokyo.jp", + "hinohara.tokyo.jp", + "inagi.tokyo.jp", + "itabashi.tokyo.jp", + "katsushika.tokyo.jp", + "kita.tokyo.jp", + "kiyose.tokyo.jp", + "kodaira.tokyo.jp", + "koganei.tokyo.jp", + "kokubunji.tokyo.jp", + "komae.tokyo.jp", + "koto.tokyo.jp", + "kouzushima.tokyo.jp", + "kunitachi.tokyo.jp", + "machida.tokyo.jp", + "meguro.tokyo.jp", + "minato.tokyo.jp", + "mitaka.tokyo.jp", + "mizuho.tokyo.jp", + "musashimurayama.tokyo.jp", + "musashino.tokyo.jp", + "nakano.tokyo.jp", + "nerima.tokyo.jp", + "ogasawara.tokyo.jp", + "okutama.tokyo.jp", + "ome.tokyo.jp", + "oshima.tokyo.jp", + "ota.tokyo.jp", + "setagaya.tokyo.jp", + "shibuya.tokyo.jp", + "shinagawa.tokyo.jp", + "shinjuku.tokyo.jp", + "suginami.tokyo.jp", + "sumida.tokyo.jp", + "tachikawa.tokyo.jp", + "taito.tokyo.jp", + "tama.tokyo.jp", + "toshima.tokyo.jp", + "chizu.tottori.jp", + "hino.tottori.jp", + "kawahara.tottori.jp", + "koge.tottori.jp", + "kotoura.tottori.jp", + "misasa.tottori.jp", + "nanbu.tottori.jp", + "nichinan.tottori.jp", + "sakaiminato.tottori.jp", + "tottori.tottori.jp", + "wakasa.tottori.jp", + "yazu.tottori.jp", + "yonago.tottori.jp", + "asahi.toyama.jp", + "fuchu.toyama.jp", + "fukumitsu.toyama.jp", + "funahashi.toyama.jp", + "himi.toyama.jp", + "imizu.toyama.jp", + "inami.toyama.jp", + "johana.toyama.jp", + "kamiichi.toyama.jp", + "kurobe.toyama.jp", + "nakaniikawa.toyama.jp", + "namerikawa.toyama.jp", + "nanto.toyama.jp", + "nyuzen.toyama.jp", + "oyabe.toyama.jp", + "taira.toyama.jp", + "takaoka.toyama.jp", + "tateyama.toyama.jp", + "toga.toyama.jp", + "tonami.toyama.jp", + "toyama.toyama.jp", + "unazuki.toyama.jp", + "uozu.toyama.jp", + "yamada.toyama.jp", + "arida.wakayama.jp", + "aridagawa.wakayama.jp", + "gobo.wakayama.jp", + "hashimoto.wakayama.jp", + "hidaka.wakayama.jp", + "hirogawa.wakayama.jp", + "inami.wakayama.jp", + "iwade.wakayama.jp", + "kainan.wakayama.jp", + "kamitonda.wakayama.jp", + "katsuragi.wakayama.jp", + "kimino.wakayama.jp", + "kinokawa.wakayama.jp", + "kitayama.wakayama.jp", + "koya.wakayama.jp", + "koza.wakayama.jp", + "kozagawa.wakayama.jp", + "kudoyama.wakayama.jp", + "kushimoto.wakayama.jp", + "mihama.wakayama.jp", + "misato.wakayama.jp", + "nachikatsuura.wakayama.jp", + "shingu.wakayama.jp", + "shirahama.wakayama.jp", + "taiji.wakayama.jp", + "tanabe.wakayama.jp", + "wakayama.wakayama.jp", + "yuasa.wakayama.jp", + "yura.wakayama.jp", + "asahi.yamagata.jp", + "funagata.yamagata.jp", + "higashine.yamagata.jp", + "iide.yamagata.jp", + "kahoku.yamagata.jp", + "kaminoyama.yamagata.jp", + "kaneyama.yamagata.jp", + "kawanishi.yamagata.jp", + "mamurogawa.yamagata.jp", + "mikawa.yamagata.jp", + "murayama.yamagata.jp", + "nagai.yamagata.jp", + "nakayama.yamagata.jp", + "nanyo.yamagata.jp", + "nishikawa.yamagata.jp", + "obanazawa.yamagata.jp", + "oe.yamagata.jp", + "oguni.yamagata.jp", + "ohkura.yamagata.jp", + "oishida.yamagata.jp", + "sagae.yamagata.jp", + "sakata.yamagata.jp", + "sakegawa.yamagata.jp", + "shinjo.yamagata.jp", + "shirataka.yamagata.jp", + "shonai.yamagata.jp", + "takahata.yamagata.jp", + "tendo.yamagata.jp", + "tozawa.yamagata.jp", + "tsuruoka.yamagata.jp", + "yamagata.yamagata.jp", + "yamanobe.yamagata.jp", + "yonezawa.yamagata.jp", + "yuza.yamagata.jp", + "abu.yamaguchi.jp", + "hagi.yamaguchi.jp", + "hikari.yamaguchi.jp", + "hofu.yamaguchi.jp", + "iwakuni.yamaguchi.jp", + "kudamatsu.yamaguchi.jp", + "mitou.yamaguchi.jp", + "nagato.yamaguchi.jp", + "oshima.yamaguchi.jp", + "shimonoseki.yamaguchi.jp", + "shunan.yamaguchi.jp", + "tabuse.yamaguchi.jp", + "tokuyama.yamaguchi.jp", + "toyota.yamaguchi.jp", + "ube.yamaguchi.jp", + "yuu.yamaguchi.jp", + "chuo.yamanashi.jp", + "doshi.yamanashi.jp", + "fuefuki.yamanashi.jp", + "fujikawa.yamanashi.jp", + "fujikawaguchiko.yamanashi.jp", + "fujiyoshida.yamanashi.jp", + "hayakawa.yamanashi.jp", + "hokuto.yamanashi.jp", + "ichikawamisato.yamanashi.jp", + "kai.yamanashi.jp", + "kofu.yamanashi.jp", + "koshu.yamanashi.jp", + "kosuge.yamanashi.jp", + "minami-alps.yamanashi.jp", + "minobu.yamanashi.jp", + "nakamichi.yamanashi.jp", + "nanbu.yamanashi.jp", + "narusawa.yamanashi.jp", + "nirasaki.yamanashi.jp", + "nishikatsura.yamanashi.jp", + "oshino.yamanashi.jp", + "otsuki.yamanashi.jp", + "showa.yamanashi.jp", + "tabayama.yamanashi.jp", + "tsuru.yamanashi.jp", + "uenohara.yamanashi.jp", + "yamanakako.yamanashi.jp", + "yamanashi.yamanashi.jp", + "*.ke", + "kg", + "org.kg", + "net.kg", + "com.kg", + "edu.kg", + "gov.kg", + "mil.kg", + "*.kh", + "ki", + "edu.ki", + "biz.ki", + "net.ki", + "org.ki", + "gov.ki", + "info.ki", + "com.ki", + "km", + "org.km", + "nom.km", + "gov.km", + "prd.km", + "tm.km", + "edu.km", + "mil.km", + "ass.km", + "com.km", + "coop.km", + "asso.km", + "presse.km", + "medecin.km", + "notaires.km", + "pharmaciens.km", + "veterinaire.km", + "gouv.km", + "kn", + "net.kn", + "org.kn", + "edu.kn", + "gov.kn", + "kp", + "com.kp", + "edu.kp", + "gov.kp", + "org.kp", + "rep.kp", + "tra.kp", + "kr", + "ac.kr", + "co.kr", + "es.kr", + "go.kr", + "hs.kr", + "kg.kr", + "mil.kr", + "ms.kr", + "ne.kr", + "or.kr", + "pe.kr", + "re.kr", + "sc.kr", + "busan.kr", + "chungbuk.kr", + "chungnam.kr", + "daegu.kr", + "daejeon.kr", + "gangwon.kr", + "gwangju.kr", + "gyeongbuk.kr", + "gyeonggi.kr", + "gyeongnam.kr", + "incheon.kr", + "jeju.kr", + "jeonbuk.kr", + "jeonnam.kr", + "seoul.kr", + "ulsan.kr", + "*.kw", + "ky", + "edu.ky", + "gov.ky", + "com.ky", + "org.ky", + "net.ky", + "kz", + "org.kz", + "edu.kz", + "net.kz", + "gov.kz", + "mil.kz", + "com.kz", + "la", + "int.la", + "net.la", + "info.la", + "edu.la", + "gov.la", + "per.la", + "com.la", + "org.la", + "lb", + "com.lb", + "edu.lb", + "gov.lb", + "net.lb", + "org.lb", + "lc", + "com.lc", + "net.lc", + "co.lc", + "org.lc", + "edu.lc", + "gov.lc", + "li", + "lk", + "gov.lk", + "sch.lk", + "net.lk", + "int.lk", + "com.lk", + "org.lk", + "edu.lk", + "ngo.lk", + "soc.lk", + "web.lk", + "ltd.lk", + "assn.lk", + "grp.lk", + "hotel.lk", + "ac.lk", + "lr", + "com.lr", + "edu.lr", + "gov.lr", + "org.lr", + "net.lr", + "ls", + "co.ls", + "org.ls", + "lt", + "gov.lt", + "lu", + "lv", + "com.lv", + "edu.lv", + "gov.lv", + "org.lv", + "mil.lv", + "id.lv", + "net.lv", + "asn.lv", + "conf.lv", + "ly", + "com.ly", + "net.ly", + "gov.ly", + "plc.ly", + "edu.ly", + "sch.ly", + "med.ly", + "org.ly", + "id.ly", + "ma", + "co.ma", + "net.ma", + "gov.ma", + "org.ma", + "ac.ma", + "press.ma", + "mc", + "tm.mc", + "asso.mc", + "md", + "me", + "co.me", + "net.me", + "org.me", + "edu.me", + "ac.me", + "gov.me", + "its.me", + "priv.me", + "mg", + "org.mg", + "nom.mg", + "gov.mg", + "prd.mg", + "tm.mg", + "edu.mg", + "mil.mg", + "com.mg", + "co.mg", + "mh", + "mil", + "mk", + "com.mk", + "org.mk", + "net.mk", + "edu.mk", + "gov.mk", + "inf.mk", + "name.mk", + "ml", + "com.ml", + "edu.ml", + "gouv.ml", + "gov.ml", + "net.ml", + "org.ml", + "presse.ml", + "*.mm", + "mn", + "gov.mn", + "edu.mn", + "org.mn", + "mo", + "com.mo", + "net.mo", + "org.mo", + "edu.mo", + "gov.mo", + "mobi", + "mp", + "mq", + "mr", + "gov.mr", + "ms", + "com.ms", + "edu.ms", + "gov.ms", + "net.ms", + "org.ms", + "mt", + "com.mt", + "edu.mt", + "net.mt", + "org.mt", + "mu", + "com.mu", + "net.mu", + "org.mu", + "gov.mu", + "ac.mu", + "co.mu", + "or.mu", + "museum", + "academy.museum", + "agriculture.museum", + "air.museum", + "airguard.museum", + "alabama.museum", + "alaska.museum", + "amber.museum", + "ambulance.museum", + "american.museum", + "americana.museum", + "americanantiques.museum", + "americanart.museum", + "amsterdam.museum", + "and.museum", + "annefrank.museum", + "anthro.museum", + "anthropology.museum", + "antiques.museum", + "aquarium.museum", + "arboretum.museum", + "archaeological.museum", + "archaeology.museum", + "architecture.museum", + "art.museum", + "artanddesign.museum", + "artcenter.museum", + "artdeco.museum", + "arteducation.museum", + "artgallery.museum", + "arts.museum", + "artsandcrafts.museum", + "asmatart.museum", + "assassination.museum", + "assisi.museum", + "association.museum", + "astronomy.museum", + "atlanta.museum", + "austin.museum", + "australia.museum", + "automotive.museum", + "aviation.museum", + "axis.museum", + "badajoz.museum", + "baghdad.museum", + "bahn.museum", + "bale.museum", + "baltimore.museum", + "barcelona.museum", + "baseball.museum", + "basel.museum", + "baths.museum", + "bauern.museum", + "beauxarts.museum", + "beeldengeluid.museum", + "bellevue.museum", + "bergbau.museum", + "berkeley.museum", + "berlin.museum", + "bern.museum", + "bible.museum", + "bilbao.museum", + "bill.museum", + "birdart.museum", + "birthplace.museum", + "bonn.museum", + "boston.museum", + "botanical.museum", + "botanicalgarden.museum", + "botanicgarden.museum", + "botany.museum", + "brandywinevalley.museum", + "brasil.museum", + "bristol.museum", + "british.museum", + "britishcolumbia.museum", + "broadcast.museum", + "brunel.museum", + "brussel.museum", + "brussels.museum", + "bruxelles.museum", + "building.museum", + "burghof.museum", + "bus.museum", + "bushey.museum", + "cadaques.museum", + "california.museum", + "cambridge.museum", + "can.museum", + "canada.museum", + "capebreton.museum", + "carrier.museum", + "cartoonart.museum", + "casadelamoneda.museum", + "castle.museum", + "castres.museum", + "celtic.museum", + "center.museum", + "chattanooga.museum", + "cheltenham.museum", + "chesapeakebay.museum", + "chicago.museum", + "children.museum", + "childrens.museum", + "childrensgarden.museum", + "chiropractic.museum", + "chocolate.museum", + "christiansburg.museum", + "cincinnati.museum", + "cinema.museum", + "circus.museum", + "civilisation.museum", + "civilization.museum", + "civilwar.museum", + "clinton.museum", + "clock.museum", + "coal.museum", + "coastaldefence.museum", + "cody.museum", + "coldwar.museum", + "collection.museum", + "colonialwilliamsburg.museum", + "coloradoplateau.museum", + "columbia.museum", + "columbus.museum", + "communication.museum", + "communications.museum", + "community.museum", + "computer.museum", + "computerhistory.museum", + "xn--comunicaes-v6a2o.museum", + "contemporary.museum", + "contemporaryart.museum", + "convent.museum", + "copenhagen.museum", + "corporation.museum", + "xn--correios-e-telecomunicaes-ghc29a.museum", + "corvette.museum", + "costume.museum", + "countryestate.museum", + "county.museum", + "crafts.museum", + "cranbrook.museum", + "creation.museum", + "cultural.museum", + "culturalcenter.museum", + "culture.museum", + "cyber.museum", + "cymru.museum", + "dali.museum", + "dallas.museum", + "database.museum", + "ddr.museum", + "decorativearts.museum", + "delaware.museum", + "delmenhorst.museum", + "denmark.museum", + "depot.museum", + "design.museum", + "detroit.museum", + "dinosaur.museum", + "discovery.museum", + "dolls.museum", + "donostia.museum", + "durham.museum", + "eastafrica.museum", + "eastcoast.museum", + "education.museum", + "educational.museum", + "egyptian.museum", + "eisenbahn.museum", + "elburg.museum", + "elvendrell.museum", + "embroidery.museum", + "encyclopedic.museum", + "england.museum", + "entomology.museum", + "environment.museum", + "environmentalconservation.museum", + "epilepsy.museum", + "essex.museum", + "estate.museum", + "ethnology.museum", + "exeter.museum", + "exhibition.museum", + "family.museum", + "farm.museum", + "farmequipment.museum", + "farmers.museum", + "farmstead.museum", + "field.museum", + "figueres.museum", + "filatelia.museum", + "film.museum", + "fineart.museum", + "finearts.museum", + "finland.museum", + "flanders.museum", + "florida.museum", + "force.museum", + "fortmissoula.museum", + "fortworth.museum", + "foundation.museum", + "francaise.museum", + "frankfurt.museum", + "franziskaner.museum", + "freemasonry.museum", + "freiburg.museum", + "fribourg.museum", + "frog.museum", + "fundacio.museum", + "furniture.museum", + "gallery.museum", + "garden.museum", + "gateway.museum", + "geelvinck.museum", + "gemological.museum", + "geology.museum", + "georgia.museum", + "giessen.museum", + "glas.museum", + "glass.museum", + "gorge.museum", + "grandrapids.museum", + "graz.museum", + "guernsey.museum", + "halloffame.museum", + "hamburg.museum", + "handson.museum", + "harvestcelebration.museum", + "hawaii.museum", + "health.museum", + "heimatunduhren.museum", + "hellas.museum", + "helsinki.museum", + "hembygdsforbund.museum", + "heritage.museum", + "histoire.museum", + "historical.museum", + "historicalsociety.museum", + "historichouses.museum", + "historisch.museum", + "historisches.museum", + "history.museum", + "historyofscience.museum", + "horology.museum", + "house.museum", + "humanities.museum", + "illustration.museum", + "imageandsound.museum", + "indian.museum", + "indiana.museum", + "indianapolis.museum", + "indianmarket.museum", + "intelligence.museum", + "interactive.museum", + "iraq.museum", + "iron.museum", + "isleofman.museum", + "jamison.museum", + "jefferson.museum", + "jerusalem.museum", + "jewelry.museum", + "jewish.museum", + "jewishart.museum", + "jfk.museum", + "journalism.museum", + "judaica.museum", + "judygarland.museum", + "juedisches.museum", + "juif.museum", + "karate.museum", + "karikatur.museum", + "kids.museum", + "koebenhavn.museum", + "koeln.museum", + "kunst.museum", + "kunstsammlung.museum", + "kunstunddesign.museum", + "labor.museum", + "labour.museum", + "lajolla.museum", + "lancashire.museum", + "landes.museum", + "lans.museum", + "xn--lns-qla.museum", + "larsson.museum", + "lewismiller.museum", + "lincoln.museum", + "linz.museum", + "living.museum", + "livinghistory.museum", + "localhistory.museum", + "london.museum", + "losangeles.museum", + "louvre.museum", + "loyalist.museum", + "lucerne.museum", + "luxembourg.museum", + "luzern.museum", + "mad.museum", + "madrid.museum", + "mallorca.museum", + "manchester.museum", + "mansion.museum", + "mansions.museum", + "manx.museum", + "marburg.museum", + "maritime.museum", + "maritimo.museum", + "maryland.museum", + "marylhurst.museum", + "media.museum", + "medical.museum", + "medizinhistorisches.museum", + "meeres.museum", + "memorial.museum", + "mesaverde.museum", + "michigan.museum", + "midatlantic.museum", + "military.museum", + "mill.museum", + "miners.museum", + "mining.museum", + "minnesota.museum", + "missile.museum", + "missoula.museum", + "modern.museum", + "moma.museum", + "money.museum", + "monmouth.museum", + "monticello.museum", + "montreal.museum", + "moscow.museum", + "motorcycle.museum", + "muenchen.museum", + "muenster.museum", + "mulhouse.museum", + "muncie.museum", + "museet.museum", + "museumcenter.museum", + "museumvereniging.museum", + "music.museum", + "national.museum", + "nationalfirearms.museum", + "nationalheritage.museum", + "nativeamerican.museum", + "naturalhistory.museum", + "naturalhistorymuseum.museum", + "naturalsciences.museum", + "nature.museum", + "naturhistorisches.museum", + "natuurwetenschappen.museum", + "naumburg.museum", + "naval.museum", + "nebraska.museum", + "neues.museum", + "newhampshire.museum", + "newjersey.museum", + "newmexico.museum", + "newport.museum", + "newspaper.museum", + "newyork.museum", + "niepce.museum", + "norfolk.museum", + "north.museum", + "nrw.museum", + "nuernberg.museum", + "nuremberg.museum", + "nyc.museum", + "nyny.museum", + "oceanographic.museum", + "oceanographique.museum", + "omaha.museum", + "online.museum", + "ontario.museum", + "openair.museum", + "oregon.museum", + "oregontrail.museum", + "otago.museum", + "oxford.museum", + "pacific.museum", + "paderborn.museum", + "palace.museum", + "paleo.museum", + "palmsprings.museum", + "panama.museum", + "paris.museum", + "pasadena.museum", + "pharmacy.museum", + "philadelphia.museum", + "philadelphiaarea.museum", + "philately.museum", + "phoenix.museum", + "photography.museum", + "pilots.museum", + "pittsburgh.museum", + "planetarium.museum", + "plantation.museum", + "plants.museum", + "plaza.museum", + "portal.museum", + "portland.museum", + "portlligat.museum", + "posts-and-telecommunications.museum", + "preservation.museum", + "presidio.museum", + "press.museum", + "project.museum", + "public.museum", + "pubol.museum", + "quebec.museum", + "railroad.museum", + "railway.museum", + "research.museum", + "resistance.museum", + "riodejaneiro.museum", + "rochester.museum", + "rockart.museum", + "roma.museum", + "russia.museum", + "saintlouis.museum", + "salem.museum", + "salvadordali.museum", + "salzburg.museum", + "sandiego.museum", + "sanfrancisco.museum", + "santabarbara.museum", + "santacruz.museum", + "santafe.museum", + "saskatchewan.museum", + "satx.museum", + "savannahga.museum", + "schlesisches.museum", + "schoenbrunn.museum", + "schokoladen.museum", + "school.museum", + "schweiz.museum", + "science.museum", + "scienceandhistory.museum", + "scienceandindustry.museum", + "sciencecenter.museum", + "sciencecenters.museum", + "science-fiction.museum", + "sciencehistory.museum", + "sciences.museum", + "sciencesnaturelles.museum", + "scotland.museum", + "seaport.museum", + "settlement.museum", + "settlers.museum", + "shell.museum", + "sherbrooke.museum", + "sibenik.museum", + "silk.museum", + "ski.museum", + "skole.museum", + "society.museum", + "sologne.museum", + "soundandvision.museum", + "southcarolina.museum", + "southwest.museum", + "space.museum", + "spy.museum", + "square.museum", + "stadt.museum", + "stalbans.museum", + "starnberg.museum", + "state.museum", + "stateofdelaware.museum", + "station.museum", + "steam.museum", + "steiermark.museum", + "stjohn.museum", + "stockholm.museum", + "stpetersburg.museum", + "stuttgart.museum", + "suisse.museum", + "surgeonshall.museum", + "surrey.museum", + "svizzera.museum", + "sweden.museum", + "sydney.museum", + "tank.museum", + "tcm.museum", + "technology.museum", + "telekommunikation.museum", + "television.museum", + "texas.museum", + "textile.museum", + "theater.museum", + "time.museum", + "timekeeping.museum", + "topology.museum", + "torino.museum", + "touch.museum", + "town.museum", + "transport.museum", + "tree.museum", + "trolley.museum", + "trust.museum", + "trustee.museum", + "uhren.museum", + "ulm.museum", + "undersea.museum", + "university.museum", + "usa.museum", + "usantiques.museum", + "usarts.museum", + "uscountryestate.museum", + "usculture.museum", + "usdecorativearts.museum", + "usgarden.museum", + "ushistory.museum", + "ushuaia.museum", + "uslivinghistory.museum", + "utah.museum", + "uvic.museum", + "valley.museum", + "vantaa.museum", + "versailles.museum", + "viking.museum", + "village.museum", + "virginia.museum", + "virtual.museum", + "virtuel.museum", + "vlaanderen.museum", + "volkenkunde.museum", + "wales.museum", + "wallonie.museum", + "war.museum", + "washingtondc.museum", + "watchandclock.museum", + "watch-and-clock.museum", + "western.museum", + "westfalen.museum", + "whaling.museum", + "wildlife.museum", + "williamsburg.museum", + "windmill.museum", + "workshop.museum", + "york.museum", + "yorkshire.museum", + "yosemite.museum", + "youth.museum", + "zoological.museum", + "zoology.museum", + "xn--9dbhblg6di.museum", + "xn--h1aegh.museum", + "mv", + "aero.mv", + "biz.mv", + "com.mv", + "coop.mv", + "edu.mv", + "gov.mv", + "info.mv", + "int.mv", + "mil.mv", + "museum.mv", + "name.mv", + "net.mv", + "org.mv", + "pro.mv", + "mw", + "ac.mw", + "biz.mw", + "co.mw", + "com.mw", + "coop.mw", + "edu.mw", + "gov.mw", + "int.mw", + "museum.mw", + "net.mw", + "org.mw", + "mx", + "com.mx", + "org.mx", + "gob.mx", + "edu.mx", + "net.mx", + "my", + "com.my", + "net.my", + "org.my", + "gov.my", + "edu.my", + "mil.my", + "name.my", + "mz", + "ac.mz", + "adv.mz", + "co.mz", + "edu.mz", + "gov.mz", + "mil.mz", + "net.mz", + "org.mz", + "na", + "info.na", + "pro.na", + "name.na", + "school.na", + "or.na", + "dr.na", + "us.na", + "mx.na", + "ca.na", + "in.na", + "cc.na", + "tv.na", + "ws.na", + "mobi.na", + "co.na", + "com.na", + "org.na", + "name", + "nc", + "asso.nc", + "nom.nc", + "ne", + "net", + "nf", + "com.nf", + "net.nf", + "per.nf", + "rec.nf", + "web.nf", + "arts.nf", + "firm.nf", + "info.nf", + "other.nf", + "store.nf", + "ng", + "com.ng", + "edu.ng", + "gov.ng", + "i.ng", + "mil.ng", + "mobi.ng", + "name.ng", + "net.ng", + "org.ng", + "sch.ng", + "ni", + "ac.ni", + "biz.ni", + "co.ni", + "com.ni", + "edu.ni", + "gob.ni", + "in.ni", + "info.ni", + "int.ni", + "mil.ni", + "net.ni", + "nom.ni", + "org.ni", + "web.ni", + "nl", + "bv.nl", + "no", + "fhs.no", + "vgs.no", + "fylkesbibl.no", + "folkebibl.no", + "museum.no", + "idrett.no", + "priv.no", + "mil.no", + "stat.no", + "dep.no", + "kommune.no", + "herad.no", + "aa.no", + "ah.no", + "bu.no", + "fm.no", + "hl.no", + "hm.no", + "jan-mayen.no", + "mr.no", + "nl.no", + "nt.no", + "of.no", + "ol.no", + "oslo.no", + "rl.no", + "sf.no", + "st.no", + "svalbard.no", + "tm.no", + "tr.no", + "va.no", + "vf.no", + "gs.aa.no", + "gs.ah.no", + "gs.bu.no", + "gs.fm.no", + "gs.hl.no", + "gs.hm.no", + "gs.jan-mayen.no", + "gs.mr.no", + "gs.nl.no", + "gs.nt.no", + "gs.of.no", + "gs.ol.no", + "gs.oslo.no", + "gs.rl.no", + "gs.sf.no", + "gs.st.no", + "gs.svalbard.no", + "gs.tm.no", + "gs.tr.no", + "gs.va.no", + "gs.vf.no", + "akrehamn.no", + "xn--krehamn-dxa.no", + "algard.no", + "xn--lgrd-poac.no", + "arna.no", + "brumunddal.no", + "bryne.no", + "bronnoysund.no", + "xn--brnnysund-m8ac.no", + "drobak.no", + "xn--drbak-wua.no", + "egersund.no", + "fetsund.no", + "floro.no", + "xn--flor-jra.no", + "fredrikstad.no", + "hokksund.no", + "honefoss.no", + "xn--hnefoss-q1a.no", + "jessheim.no", + "jorpeland.no", + "xn--jrpeland-54a.no", + "kirkenes.no", + "kopervik.no", + "krokstadelva.no", + "langevag.no", + "xn--langevg-jxa.no", + "leirvik.no", + "mjondalen.no", + "xn--mjndalen-64a.no", + "mo-i-rana.no", + "mosjoen.no", + "xn--mosjen-eya.no", + "nesoddtangen.no", + "orkanger.no", + "osoyro.no", + "xn--osyro-wua.no", + "raholt.no", + "xn--rholt-mra.no", + "sandnessjoen.no", + "xn--sandnessjen-ogb.no", + "skedsmokorset.no", + "slattum.no", + "spjelkavik.no", + "stathelle.no", + "stavern.no", + "stjordalshalsen.no", + "xn--stjrdalshalsen-sqb.no", + "tananger.no", + "tranby.no", + "vossevangen.no", + "afjord.no", + "xn--fjord-lra.no", + "agdenes.no", + "al.no", + "xn--l-1fa.no", + "alesund.no", + "xn--lesund-hua.no", + "alstahaug.no", + "alta.no", + "xn--lt-liac.no", + "alaheadju.no", + "xn--laheadju-7ya.no", + "alvdal.no", + "amli.no", + "xn--mli-tla.no", + "amot.no", + "xn--mot-tla.no", + "andebu.no", + "andoy.no", + "xn--andy-ira.no", + "andasuolo.no", + "ardal.no", + "xn--rdal-poa.no", + "aremark.no", + "arendal.no", + "xn--s-1fa.no", + "aseral.no", + "xn--seral-lra.no", + "asker.no", + "askim.no", + "askvoll.no", + "askoy.no", + "xn--asky-ira.no", + "asnes.no", + "xn--snes-poa.no", + "audnedaln.no", + "aukra.no", + "aure.no", + "aurland.no", + "aurskog-holand.no", + "xn--aurskog-hland-jnb.no", + "austevoll.no", + "austrheim.no", + "averoy.no", + "xn--avery-yua.no", + "balestrand.no", + "ballangen.no", + "balat.no", + "xn--blt-elab.no", + "balsfjord.no", + "bahccavuotna.no", + "xn--bhccavuotna-k7a.no", + "bamble.no", + "bardu.no", + "beardu.no", + "beiarn.no", + "bajddar.no", + "xn--bjddar-pta.no", + "baidar.no", + "xn--bidr-5nac.no", + "berg.no", + "bergen.no", + "berlevag.no", + "xn--berlevg-jxa.no", + "bearalvahki.no", + "xn--bearalvhki-y4a.no", + "bindal.no", + "birkenes.no", + "bjarkoy.no", + "xn--bjarky-fya.no", + "bjerkreim.no", + "bjugn.no", + "bodo.no", + "xn--bod-2na.no", + "badaddja.no", + "xn--bdddj-mrabd.no", + "budejju.no", + "bokn.no", + "bremanger.no", + "bronnoy.no", + "xn--brnny-wuac.no", + "bygland.no", + "bykle.no", + "barum.no", + "xn--brum-voa.no", + "bo.telemark.no", + "xn--b-5ga.telemark.no", + "bo.nordland.no", + "xn--b-5ga.nordland.no", + "bievat.no", + "xn--bievt-0qa.no", + "bomlo.no", + "xn--bmlo-gra.no", + "batsfjord.no", + "xn--btsfjord-9za.no", + "bahcavuotna.no", + "xn--bhcavuotna-s4a.no", + "dovre.no", + "drammen.no", + "drangedal.no", + "dyroy.no", + "xn--dyry-ira.no", + "donna.no", + "xn--dnna-gra.no", + "eid.no", + "eidfjord.no", + "eidsberg.no", + "eidskog.no", + "eidsvoll.no", + "eigersund.no", + "elverum.no", + "enebakk.no", + "engerdal.no", + "etne.no", + "etnedal.no", + "evenes.no", + "evenassi.no", + "xn--eveni-0qa01ga.no", + "evje-og-hornnes.no", + "farsund.no", + "fauske.no", + "fuossko.no", + "fuoisku.no", + "fedje.no", + "fet.no", + "finnoy.no", + "xn--finny-yua.no", + "fitjar.no", + "fjaler.no", + "fjell.no", + "flakstad.no", + "flatanger.no", + "flekkefjord.no", + "flesberg.no", + "flora.no", + "fla.no", + "xn--fl-zia.no", + "folldal.no", + "forsand.no", + "fosnes.no", + "frei.no", + "frogn.no", + "froland.no", + "frosta.no", + "frana.no", + "xn--frna-woa.no", + "froya.no", + "xn--frya-hra.no", + "fusa.no", + "fyresdal.no", + "forde.no", + "xn--frde-gra.no", + "gamvik.no", + "gangaviika.no", + "xn--ggaviika-8ya47h.no", + "gaular.no", + "gausdal.no", + "gildeskal.no", + "xn--gildeskl-g0a.no", + "giske.no", + "gjemnes.no", + "gjerdrum.no", + "gjerstad.no", + "gjesdal.no", + "gjovik.no", + "xn--gjvik-wua.no", + "gloppen.no", + "gol.no", + "gran.no", + "grane.no", + "granvin.no", + "gratangen.no", + "grimstad.no", + "grong.no", + "kraanghke.no", + "xn--kranghke-b0a.no", + "grue.no", + "gulen.no", + "hadsel.no", + "halden.no", + "halsa.no", + "hamar.no", + "hamaroy.no", + "habmer.no", + "xn--hbmer-xqa.no", + "hapmir.no", + "xn--hpmir-xqa.no", + "hammerfest.no", + "hammarfeasta.no", + "xn--hmmrfeasta-s4ac.no", + "haram.no", + "hareid.no", + "harstad.no", + "hasvik.no", + "aknoluokta.no", + "xn--koluokta-7ya57h.no", + "hattfjelldal.no", + "aarborte.no", + "haugesund.no", + "hemne.no", + "hemnes.no", + "hemsedal.no", + "heroy.more-og-romsdal.no", + "xn--hery-ira.xn--mre-og-romsdal-qqb.no", + "heroy.nordland.no", + "xn--hery-ira.nordland.no", + "hitra.no", + "hjartdal.no", + "hjelmeland.no", + "hobol.no", + "xn--hobl-ira.no", + "hof.no", + "hol.no", + "hole.no", + "holmestrand.no", + "holtalen.no", + "xn--holtlen-hxa.no", + "hornindal.no", + "horten.no", + "hurdal.no", + "hurum.no", + "hvaler.no", + "hyllestad.no", + "hagebostad.no", + "xn--hgebostad-g3a.no", + "hoyanger.no", + "xn--hyanger-q1a.no", + "hoylandet.no", + "xn--hylandet-54a.no", + "ha.no", + "xn--h-2fa.no", + "ibestad.no", + "inderoy.no", + "xn--indery-fya.no", + "iveland.no", + "jevnaker.no", + "jondal.no", + "jolster.no", + "xn--jlster-bya.no", + "karasjok.no", + "karasjohka.no", + "xn--krjohka-hwab49j.no", + "karlsoy.no", + "galsa.no", + "xn--gls-elac.no", + "karmoy.no", + "xn--karmy-yua.no", + "kautokeino.no", + "guovdageaidnu.no", + "klepp.no", + "klabu.no", + "xn--klbu-woa.no", + "kongsberg.no", + "kongsvinger.no", + "kragero.no", + "xn--krager-gya.no", + "kristiansand.no", + "kristiansund.no", + "krodsherad.no", + "xn--krdsherad-m8a.no", + "kvalsund.no", + "rahkkeravju.no", + "xn--rhkkervju-01af.no", + "kvam.no", + "kvinesdal.no", + "kvinnherad.no", + "kviteseid.no", + "kvitsoy.no", + "xn--kvitsy-fya.no", + "kvafjord.no", + "xn--kvfjord-nxa.no", + "giehtavuoatna.no", + "kvanangen.no", + "xn--kvnangen-k0a.no", + "navuotna.no", + "xn--nvuotna-hwa.no", + "kafjord.no", + "xn--kfjord-iua.no", + "gaivuotna.no", + "xn--givuotna-8ya.no", + "larvik.no", + "lavangen.no", + "lavagis.no", + "loabat.no", + "xn--loabt-0qa.no", + "lebesby.no", + "davvesiida.no", + "leikanger.no", + "leirfjord.no", + "leka.no", + "leksvik.no", + "lenvik.no", + "leangaviika.no", + "xn--leagaviika-52b.no", + "lesja.no", + "levanger.no", + "lier.no", + "lierne.no", + "lillehammer.no", + "lillesand.no", + "lindesnes.no", + "lindas.no", + "xn--linds-pra.no", + "lom.no", + "loppa.no", + "lahppi.no", + "xn--lhppi-xqa.no", + "lund.no", + "lunner.no", + "luroy.no", + "xn--lury-ira.no", + "luster.no", + "lyngdal.no", + "lyngen.no", + "ivgu.no", + "lardal.no", + "lerdal.no", + "xn--lrdal-sra.no", + "lodingen.no", + "xn--ldingen-q1a.no", + "lorenskog.no", + "xn--lrenskog-54a.no", + "loten.no", + "xn--lten-gra.no", + "malvik.no", + "masoy.no", + "xn--msy-ula0h.no", + "muosat.no", + "xn--muost-0qa.no", + "mandal.no", + "marker.no", + "marnardal.no", + "masfjorden.no", + "meland.no", + "meldal.no", + "melhus.no", + "meloy.no", + "xn--mely-ira.no", + "meraker.no", + "xn--merker-kua.no", + "moareke.no", + "xn--moreke-jua.no", + "midsund.no", + "midtre-gauldal.no", + "modalen.no", + "modum.no", + "molde.no", + "moskenes.no", + "moss.no", + "mosvik.no", + "malselv.no", + "xn--mlselv-iua.no", + "malatvuopmi.no", + "xn--mlatvuopmi-s4a.no", + "namdalseid.no", + "aejrie.no", + "namsos.no", + "namsskogan.no", + "naamesjevuemie.no", + "xn--nmesjevuemie-tcba.no", + "laakesvuemie.no", + "nannestad.no", + "narvik.no", + "narviika.no", + "naustdal.no", + "nedre-eiker.no", + "nes.akershus.no", + "nes.buskerud.no", + "nesna.no", + "nesodden.no", + "nesseby.no", + "unjarga.no", + "xn--unjrga-rta.no", + "nesset.no", + "nissedal.no", + "nittedal.no", + "nord-aurdal.no", + "nord-fron.no", + "nord-odal.no", + "norddal.no", + "nordkapp.no", + "davvenjarga.no", + "xn--davvenjrga-y4a.no", + "nordre-land.no", + "nordreisa.no", + "raisa.no", + "xn--risa-5na.no", + "nore-og-uvdal.no", + "notodden.no", + "naroy.no", + "xn--nry-yla5g.no", + "notteroy.no", + "xn--nttery-byae.no", + "odda.no", + "oksnes.no", + "xn--ksnes-uua.no", + "oppdal.no", + "oppegard.no", + "xn--oppegrd-ixa.no", + "orkdal.no", + "orland.no", + "xn--rland-uua.no", + "orskog.no", + "xn--rskog-uua.no", + "orsta.no", + "xn--rsta-fra.no", + "os.hedmark.no", + "os.hordaland.no", + "osen.no", + "osteroy.no", + "xn--ostery-fya.no", + "ostre-toten.no", + "xn--stre-toten-zcb.no", + "overhalla.no", + "ovre-eiker.no", + "xn--vre-eiker-k8a.no", + "oyer.no", + "xn--yer-zna.no", + "oygarden.no", + "xn--ygarden-p1a.no", + "oystre-slidre.no", + "xn--ystre-slidre-ujb.no", + "porsanger.no", + "porsangu.no", + "xn--porsgu-sta26f.no", + "porsgrunn.no", + "radoy.no", + "xn--rady-ira.no", + "rakkestad.no", + "rana.no", + "ruovat.no", + "randaberg.no", + "rauma.no", + "rendalen.no", + "rennebu.no", + "rennesoy.no", + "xn--rennesy-v1a.no", + "rindal.no", + "ringebu.no", + "ringerike.no", + "ringsaker.no", + "rissa.no", + "risor.no", + "xn--risr-ira.no", + "roan.no", + "rollag.no", + "rygge.no", + "ralingen.no", + "xn--rlingen-mxa.no", + "rodoy.no", + "xn--rdy-0nab.no", + "romskog.no", + "xn--rmskog-bya.no", + "roros.no", + "xn--rros-gra.no", + "rost.no", + "xn--rst-0na.no", + "royken.no", + "xn--ryken-vua.no", + "royrvik.no", + "xn--ryrvik-bya.no", + "rade.no", + "xn--rde-ula.no", + "salangen.no", + "siellak.no", + "saltdal.no", + "salat.no", + "xn--slt-elab.no", + "xn--slat-5na.no", + "samnanger.no", + "sande.more-og-romsdal.no", + "sande.xn--mre-og-romsdal-qqb.no", + "sande.vestfold.no", + "sandefjord.no", + "sandnes.no", + "sandoy.no", + "xn--sandy-yua.no", + "sarpsborg.no", + "sauda.no", + "sauherad.no", + "sel.no", + "selbu.no", + "selje.no", + "seljord.no", + "sigdal.no", + "siljan.no", + "sirdal.no", + "skaun.no", + "skedsmo.no", + "ski.no", + "skien.no", + "skiptvet.no", + "skjervoy.no", + "xn--skjervy-v1a.no", + "skierva.no", + "xn--skierv-uta.no", + "skjak.no", + "xn--skjk-soa.no", + "skodje.no", + "skanland.no", + "xn--sknland-fxa.no", + "skanit.no", + "xn--sknit-yqa.no", + "smola.no", + "xn--smla-hra.no", + "snillfjord.no", + "snasa.no", + "xn--snsa-roa.no", + "snoasa.no", + "snaase.no", + "xn--snase-nra.no", + "sogndal.no", + "sokndal.no", + "sola.no", + "solund.no", + "songdalen.no", + "sortland.no", + "spydeberg.no", + "stange.no", + "stavanger.no", + "steigen.no", + "steinkjer.no", + "stjordal.no", + "xn--stjrdal-s1a.no", + "stokke.no", + "stor-elvdal.no", + "stord.no", + "stordal.no", + "storfjord.no", + "omasvuotna.no", + "strand.no", + "stranda.no", + "stryn.no", + "sula.no", + "suldal.no", + "sund.no", + "sunndal.no", + "surnadal.no", + "sveio.no", + "svelvik.no", + "sykkylven.no", + "sogne.no", + "xn--sgne-gra.no", + "somna.no", + "xn--smna-gra.no", + "sondre-land.no", + "xn--sndre-land-0cb.no", + "sor-aurdal.no", + "xn--sr-aurdal-l8a.no", + "sor-fron.no", + "xn--sr-fron-q1a.no", + "sor-odal.no", + "xn--sr-odal-q1a.no", + "sor-varanger.no", + "xn--sr-varanger-ggb.no", + "matta-varjjat.no", + "xn--mtta-vrjjat-k7af.no", + "sorfold.no", + "xn--srfold-bya.no", + "sorreisa.no", + "xn--srreisa-q1a.no", + "sorum.no", + "xn--srum-gra.no", + "tana.no", + "deatnu.no", + "time.no", + "tingvoll.no", + "tinn.no", + "tjeldsund.no", + "dielddanuorri.no", + "tjome.no", + "xn--tjme-hra.no", + "tokke.no", + "tolga.no", + "torsken.no", + "tranoy.no", + "xn--trany-yua.no", + "tromso.no", + "xn--troms-zua.no", + "tromsa.no", + "romsa.no", + "trondheim.no", + "troandin.no", + "trysil.no", + "trana.no", + "xn--trna-woa.no", + "trogstad.no", + "xn--trgstad-r1a.no", + "tvedestrand.no", + "tydal.no", + "tynset.no", + "tysfjord.no", + "divtasvuodna.no", + "divttasvuotna.no", + "tysnes.no", + "tysvar.no", + "xn--tysvr-vra.no", + "tonsberg.no", + "xn--tnsberg-q1a.no", + "ullensaker.no", + "ullensvang.no", + "ulvik.no", + "utsira.no", + "vadso.no", + "xn--vads-jra.no", + "cahcesuolo.no", + "xn--hcesuolo-7ya35b.no", + "vaksdal.no", + "valle.no", + "vang.no", + "vanylven.no", + "vardo.no", + "xn--vard-jra.no", + "varggat.no", + "xn--vrggt-xqad.no", + "vefsn.no", + "vaapste.no", + "vega.no", + "vegarshei.no", + "xn--vegrshei-c0a.no", + "vennesla.no", + "verdal.no", + "verran.no", + "vestby.no", + "vestnes.no", + "vestre-slidre.no", + "vestre-toten.no", + "vestvagoy.no", + "xn--vestvgy-ixa6o.no", + "vevelstad.no", + "vik.no", + "vikna.no", + "vindafjord.no", + "volda.no", + "voss.no", + "varoy.no", + "xn--vry-yla5g.no", + "vagan.no", + "xn--vgan-qoa.no", + "voagat.no", + "vagsoy.no", + "xn--vgsy-qoa0j.no", + "vaga.no", + "xn--vg-yiab.no", + "valer.ostfold.no", + "xn--vler-qoa.xn--stfold-9xa.no", + "valer.hedmark.no", + "xn--vler-qoa.hedmark.no", + "*.np", + "nr", + "biz.nr", + "info.nr", + "gov.nr", + "edu.nr", + "org.nr", + "net.nr", + "com.nr", + "nu", + "nz", + "ac.nz", + "co.nz", + "cri.nz", + "geek.nz", + "gen.nz", + "govt.nz", + "health.nz", + "iwi.nz", + "kiwi.nz", + "maori.nz", + "mil.nz", + "xn--mori-qsa.nz", + "net.nz", + "org.nz", + "parliament.nz", + "school.nz", + "om", + "co.om", + "com.om", + "edu.om", + "gov.om", + "med.om", + "museum.om", + "net.om", + "org.om", + "pro.om", + "onion", + "org", + "pa", + "ac.pa", + "gob.pa", + "com.pa", + "org.pa", + "sld.pa", + "edu.pa", + "net.pa", + "ing.pa", + "abo.pa", + "med.pa", + "nom.pa", + "pe", + "edu.pe", + "gob.pe", + "nom.pe", + "mil.pe", + "org.pe", + "com.pe", + "net.pe", + "pf", + "com.pf", + "org.pf", + "edu.pf", + "*.pg", + "ph", + "com.ph", + "net.ph", + "org.ph", + "gov.ph", + "edu.ph", + "ngo.ph", + "mil.ph", + "i.ph", + "pk", + "com.pk", + "net.pk", + "edu.pk", + "org.pk", + "fam.pk", + "biz.pk", + "web.pk", + "gov.pk", + "gob.pk", + "gok.pk", + "gon.pk", + "gop.pk", + "gos.pk", + "info.pk", + "pl", + "com.pl", + "net.pl", + "org.pl", + "aid.pl", + "agro.pl", + "atm.pl", + "auto.pl", + "biz.pl", + "edu.pl", + "gmina.pl", + "gsm.pl", + "info.pl", + "mail.pl", + "miasta.pl", + "media.pl", + "mil.pl", + "nieruchomosci.pl", + "nom.pl", + "pc.pl", + "powiat.pl", + "priv.pl", + "realestate.pl", + "rel.pl", + "sex.pl", + "shop.pl", + "sklep.pl", + "sos.pl", + "szkola.pl", + "targi.pl", + "tm.pl", + "tourism.pl", + "travel.pl", + "turystyka.pl", + "gov.pl", + "ap.gov.pl", + "ic.gov.pl", + "is.gov.pl", + "us.gov.pl", + "kmpsp.gov.pl", + "kppsp.gov.pl", + "kwpsp.gov.pl", + "psp.gov.pl", + "wskr.gov.pl", + "kwp.gov.pl", + "mw.gov.pl", + "ug.gov.pl", + "um.gov.pl", + "umig.gov.pl", + "ugim.gov.pl", + "upow.gov.pl", + "uw.gov.pl", + "starostwo.gov.pl", + "pa.gov.pl", + "po.gov.pl", + "psse.gov.pl", + "pup.gov.pl", + "rzgw.gov.pl", + "sa.gov.pl", + "so.gov.pl", + "sr.gov.pl", + "wsa.gov.pl", + "sko.gov.pl", + "uzs.gov.pl", + "wiih.gov.pl", + "winb.gov.pl", + "pinb.gov.pl", + "wios.gov.pl", + "witd.gov.pl", + "wzmiuw.gov.pl", + "piw.gov.pl", + "wiw.gov.pl", + "griw.gov.pl", + "wif.gov.pl", + "oum.gov.pl", + "sdn.gov.pl", + "zp.gov.pl", + "uppo.gov.pl", + "mup.gov.pl", + "wuoz.gov.pl", + "konsulat.gov.pl", + "oirm.gov.pl", + "augustow.pl", + "babia-gora.pl", + "bedzin.pl", + "beskidy.pl", + "bialowieza.pl", + "bialystok.pl", + "bielawa.pl", + "bieszczady.pl", + "boleslawiec.pl", + "bydgoszcz.pl", + "bytom.pl", + "cieszyn.pl", + "czeladz.pl", + "czest.pl", + "dlugoleka.pl", + "elblag.pl", + "elk.pl", + "glogow.pl", + "gniezno.pl", + "gorlice.pl", + "grajewo.pl", + "ilawa.pl", + "jaworzno.pl", + "jelenia-gora.pl", + "jgora.pl", + "kalisz.pl", + "kazimierz-dolny.pl", + "karpacz.pl", + "kartuzy.pl", + "kaszuby.pl", + "katowice.pl", + "kepno.pl", + "ketrzyn.pl", + "klodzko.pl", + "kobierzyce.pl", + "kolobrzeg.pl", + "konin.pl", + "konskowola.pl", + "kutno.pl", + "lapy.pl", + "lebork.pl", + "legnica.pl", + "lezajsk.pl", + "limanowa.pl", + "lomza.pl", + "lowicz.pl", + "lubin.pl", + "lukow.pl", + "malbork.pl", + "malopolska.pl", + "mazowsze.pl", + "mazury.pl", + "mielec.pl", + "mielno.pl", + "mragowo.pl", + "naklo.pl", + "nowaruda.pl", + "nysa.pl", + "olawa.pl", + "olecko.pl", + "olkusz.pl", + "olsztyn.pl", + "opoczno.pl", + "opole.pl", + "ostroda.pl", + "ostroleka.pl", + "ostrowiec.pl", + "ostrowwlkp.pl", + "pila.pl", + "pisz.pl", + "podhale.pl", + "podlasie.pl", + "polkowice.pl", + "pomorze.pl", + "pomorskie.pl", + "prochowice.pl", + "pruszkow.pl", + "przeworsk.pl", + "pulawy.pl", + "radom.pl", + "rawa-maz.pl", + "rybnik.pl", + "rzeszow.pl", + "sanok.pl", + "sejny.pl", + "slask.pl", + "slupsk.pl", + "sosnowiec.pl", + "stalowa-wola.pl", + "skoczow.pl", + "starachowice.pl", + "stargard.pl", + "suwalki.pl", + "swidnica.pl", + "swiebodzin.pl", + "swinoujscie.pl", + "szczecin.pl", + "szczytno.pl", + "tarnobrzeg.pl", + "tgory.pl", + "turek.pl", + "tychy.pl", + "ustka.pl", + "walbrzych.pl", + "warmia.pl", + "warszawa.pl", + "waw.pl", + "wegrow.pl", + "wielun.pl", + "wlocl.pl", + "wloclawek.pl", + "wodzislaw.pl", + "wolomin.pl", + "wroclaw.pl", + "zachpomor.pl", + "zagan.pl", + "zarow.pl", + "zgora.pl", + "zgorzelec.pl", + "pm", + "pn", + "gov.pn", + "co.pn", + "org.pn", + "edu.pn", + "net.pn", + "post", + "pr", + "com.pr", + "net.pr", + "org.pr", + "gov.pr", + "edu.pr", + "isla.pr", + "pro.pr", + "biz.pr", + "info.pr", + "name.pr", + "est.pr", + "prof.pr", + "ac.pr", + "pro", + "aaa.pro", + "aca.pro", + "acct.pro", + "avocat.pro", + "bar.pro", + "cpa.pro", + "eng.pro", + "jur.pro", + "law.pro", + "med.pro", + "recht.pro", + "ps", + "edu.ps", + "gov.ps", + "sec.ps", + "plo.ps", + "com.ps", + "org.ps", + "net.ps", + "pt", + "net.pt", + "gov.pt", + "org.pt", + "edu.pt", + "int.pt", + "publ.pt", + "com.pt", + "nome.pt", + "pw", + "co.pw", + "ne.pw", + "or.pw", + "ed.pw", + "go.pw", + "belau.pw", + "py", + "com.py", + "coop.py", + "edu.py", + "gov.py", + "mil.py", + "net.py", + "org.py", + "qa", + "com.qa", + "edu.qa", + "gov.qa", + "mil.qa", + "name.qa", + "net.qa", + "org.qa", + "sch.qa", + "re", + "asso.re", + "com.re", + "nom.re", + "ro", + "arts.ro", + "com.ro", + "firm.ro", + "info.ro", + "nom.ro", + "nt.ro", + "org.ro", + "rec.ro", + "store.ro", + "tm.ro", + "www.ro", + "rs", + "ac.rs", + "co.rs", + "edu.rs", + "gov.rs", + "in.rs", + "org.rs", + "ru", + "ac.ru", + "edu.ru", + "gov.ru", + "int.ru", + "mil.ru", + "test.ru", + "rw", + "gov.rw", + "net.rw", + "edu.rw", + "ac.rw", + "com.rw", + "co.rw", + "int.rw", + "mil.rw", + "gouv.rw", + "sa", + "com.sa", + "net.sa", + "org.sa", + "gov.sa", + "med.sa", + "pub.sa", + "edu.sa", + "sch.sa", + "sb", + "com.sb", + "edu.sb", + "gov.sb", + "net.sb", + "org.sb", + "sc", + "com.sc", + "gov.sc", + "net.sc", + "org.sc", + "edu.sc", + "sd", + "com.sd", + "net.sd", + "org.sd", + "edu.sd", + "med.sd", + "tv.sd", + "gov.sd", + "info.sd", + "se", + "a.se", + "ac.se", + "b.se", + "bd.se", + "brand.se", + "c.se", + "d.se", + "e.se", + "f.se", + "fh.se", + "fhsk.se", + "fhv.se", + "g.se", + "h.se", + "i.se", + "k.se", + "komforb.se", + "kommunalforbund.se", + "komvux.se", + "l.se", + "lanbib.se", + "m.se", + "n.se", + "naturbruksgymn.se", + "o.se", + "org.se", + "p.se", + "parti.se", + "pp.se", + "press.se", + "r.se", + "s.se", + "t.se", + "tm.se", + "u.se", + "w.se", + "x.se", + "y.se", + "z.se", + "sg", + "com.sg", + "net.sg", + "org.sg", + "gov.sg", + "edu.sg", + "per.sg", + "sh", + "com.sh", + "net.sh", + "gov.sh", + "org.sh", + "mil.sh", + "si", + "sj", + "sk", + "sl", + "com.sl", + "net.sl", + "edu.sl", + "gov.sl", + "org.sl", + "sm", + "sn", + "art.sn", + "com.sn", + "edu.sn", + "gouv.sn", + "org.sn", + "perso.sn", + "univ.sn", + "so", + "com.so", + "net.so", + "org.so", + "sr", + "st", + "co.st", + "com.st", + "consulado.st", + "edu.st", + "embaixada.st", + "gov.st", + "mil.st", + "net.st", + "org.st", + "principe.st", + "saotome.st", + "store.st", + "su", + "sv", + "com.sv", + "edu.sv", + "gob.sv", + "org.sv", + "red.sv", + "sx", + "gov.sx", + "sy", + "edu.sy", + "gov.sy", + "net.sy", + "mil.sy", + "com.sy", + "org.sy", + "sz", + "co.sz", + "ac.sz", + "org.sz", + "tc", + "td", + "tel", + "tf", + "tg", + "th", + "ac.th", + "co.th", + "go.th", + "in.th", + "mi.th", + "net.th", + "or.th", + "tj", + "ac.tj", + "biz.tj", + "co.tj", + "com.tj", + "edu.tj", + "go.tj", + "gov.tj", + "int.tj", + "mil.tj", + "name.tj", + "net.tj", + "nic.tj", + "org.tj", + "test.tj", + "web.tj", + "tk", + "tl", + "gov.tl", + "tm", + "com.tm", + "co.tm", + "org.tm", + "net.tm", + "nom.tm", + "gov.tm", + "mil.tm", + "edu.tm", + "tn", + "com.tn", + "ens.tn", + "fin.tn", + "gov.tn", + "ind.tn", + "intl.tn", + "nat.tn", + "net.tn", + "org.tn", + "info.tn", + "perso.tn", + "tourism.tn", + "edunet.tn", + "rnrt.tn", + "rns.tn", + "rnu.tn", + "mincom.tn", + "agrinet.tn", + "defense.tn", + "turen.tn", + "to", + "com.to", + "gov.to", + "net.to", + "org.to", + "edu.to", + "mil.to", + "tr", + "com.tr", + "info.tr", + "biz.tr", + "net.tr", + "org.tr", + "web.tr", + "gen.tr", + "tv.tr", + "av.tr", + "dr.tr", + "bbs.tr", + "name.tr", + "tel.tr", + "gov.tr", + "bel.tr", + "pol.tr", + "mil.tr", + "k12.tr", + "edu.tr", + "kep.tr", + "nc.tr", + "gov.nc.tr", + "travel", + "tt", + "co.tt", + "com.tt", + "org.tt", + "net.tt", + "biz.tt", + "info.tt", + "pro.tt", + "int.tt", + "coop.tt", + "jobs.tt", + "mobi.tt", + "travel.tt", + "museum.tt", + "aero.tt", + "name.tt", + "gov.tt", + "edu.tt", + "tv", + "tw", + "edu.tw", + "gov.tw", + "mil.tw", + "com.tw", + "net.tw", + "org.tw", + "idv.tw", + "game.tw", + "ebiz.tw", + "club.tw", + "xn--zf0ao64a.tw", + "xn--uc0atv.tw", + "xn--czrw28b.tw", + "tz", + "ac.tz", + "co.tz", + "go.tz", + "hotel.tz", + "info.tz", + "me.tz", + "mil.tz", + "mobi.tz", + "ne.tz", + "or.tz", + "sc.tz", + "tv.tz", + "ua", + "com.ua", + "edu.ua", + "gov.ua", + "in.ua", + "net.ua", + "org.ua", + "cherkassy.ua", + "cherkasy.ua", + "chernigov.ua", + "chernihiv.ua", + "chernivtsi.ua", + "chernovtsy.ua", + "ck.ua", + "cn.ua", + "cr.ua", + "crimea.ua", + "cv.ua", + "dn.ua", + "dnepropetrovsk.ua", + "dnipropetrovsk.ua", + "dominic.ua", + "donetsk.ua", + "dp.ua", + "if.ua", + "ivano-frankivsk.ua", + "kh.ua", + "kharkiv.ua", + "kharkov.ua", + "kherson.ua", + "khmelnitskiy.ua", + "khmelnytskyi.ua", + "kiev.ua", + "kirovograd.ua", + "km.ua", + "kr.ua", + "krym.ua", + "ks.ua", + "kv.ua", + "kyiv.ua", + "lg.ua", + "lt.ua", + "lugansk.ua", + "lutsk.ua", + "lv.ua", + "lviv.ua", + "mk.ua", + "mykolaiv.ua", + "nikolaev.ua", + "od.ua", + "odesa.ua", + "odessa.ua", + "pl.ua", + "poltava.ua", + "rivne.ua", + "rovno.ua", + "rv.ua", + "sb.ua", + "sebastopol.ua", + "sevastopol.ua", + "sm.ua", + "sumy.ua", + "te.ua", + "ternopil.ua", + "uz.ua", + "uzhgorod.ua", + "vinnica.ua", + "vinnytsia.ua", + "vn.ua", + "volyn.ua", + "yalta.ua", + "zaporizhzhe.ua", + "zaporizhzhia.ua", + "zhitomir.ua", + "zhytomyr.ua", + "zp.ua", + "zt.ua", + "ug", + "co.ug", + "or.ug", + "ac.ug", + "sc.ug", + "go.ug", + "ne.ug", + "com.ug", + "org.ug", + "uk", + "ac.uk", + "co.uk", + "gov.uk", + "ltd.uk", + "me.uk", + "net.uk", + "nhs.uk", + "org.uk", + "plc.uk", + "police.uk", + "*.sch.uk", + "us", + "dni.us", + "fed.us", + "isa.us", + "kids.us", + "nsn.us", + "ak.us", + "al.us", + "ar.us", + "as.us", + "az.us", + "ca.us", + "co.us", + "ct.us", + "dc.us", + "de.us", + "fl.us", + "ga.us", + "gu.us", + "hi.us", + "ia.us", + "id.us", + "il.us", + "in.us", + "ks.us", + "ky.us", + "la.us", + "ma.us", + "md.us", + "me.us", + "mi.us", + "mn.us", + "mo.us", + "ms.us", + "mt.us", + "nc.us", + "nd.us", + "ne.us", + "nh.us", + "nj.us", + "nm.us", + "nv.us", + "ny.us", + "oh.us", + "ok.us", + "or.us", + "pa.us", + "pr.us", + "ri.us", + "sc.us", + "sd.us", + "tn.us", + "tx.us", + "ut.us", + "vi.us", + "vt.us", + "va.us", + "wa.us", + "wi.us", + "wv.us", + "wy.us", + "k12.ak.us", + "k12.al.us", + "k12.ar.us", + "k12.as.us", + "k12.az.us", + "k12.ca.us", + "k12.co.us", + "k12.ct.us", + "k12.dc.us", + "k12.de.us", + "k12.fl.us", + "k12.ga.us", + "k12.gu.us", + "k12.ia.us", + "k12.id.us", + "k12.il.us", + "k12.in.us", + "k12.ks.us", + "k12.ky.us", + "k12.la.us", + "k12.ma.us", + "k12.md.us", + "k12.me.us", + "k12.mi.us", + "k12.mn.us", + "k12.mo.us", + "k12.ms.us", + "k12.mt.us", + "k12.nc.us", + "k12.ne.us", + "k12.nh.us", + "k12.nj.us", + "k12.nm.us", + "k12.nv.us", + "k12.ny.us", + "k12.oh.us", + "k12.ok.us", + "k12.or.us", + "k12.pa.us", + "k12.pr.us", + "k12.ri.us", + "k12.sc.us", + "k12.tn.us", + "k12.tx.us", + "k12.ut.us", + "k12.vi.us", + "k12.vt.us", + "k12.va.us", + "k12.wa.us", + "k12.wi.us", + "k12.wy.us", + "cc.ak.us", + "cc.al.us", + "cc.ar.us", + "cc.as.us", + "cc.az.us", + "cc.ca.us", + "cc.co.us", + "cc.ct.us", + "cc.dc.us", + "cc.de.us", + "cc.fl.us", + "cc.ga.us", + "cc.gu.us", + "cc.hi.us", + "cc.ia.us", + "cc.id.us", + "cc.il.us", + "cc.in.us", + "cc.ks.us", + "cc.ky.us", + "cc.la.us", + "cc.ma.us", + "cc.md.us", + "cc.me.us", + "cc.mi.us", + "cc.mn.us", + "cc.mo.us", + "cc.ms.us", + "cc.mt.us", + "cc.nc.us", + "cc.nd.us", + "cc.ne.us", + "cc.nh.us", + "cc.nj.us", + "cc.nm.us", + "cc.nv.us", + "cc.ny.us", + "cc.oh.us", + "cc.ok.us", + "cc.or.us", + "cc.pa.us", + "cc.pr.us", + "cc.ri.us", + "cc.sc.us", + "cc.sd.us", + "cc.tn.us", + "cc.tx.us", + "cc.ut.us", + "cc.vi.us", + "cc.vt.us", + "cc.va.us", + "cc.wa.us", + "cc.wi.us", + "cc.wv.us", + "cc.wy.us", + "lib.ak.us", + "lib.al.us", + "lib.ar.us", + "lib.as.us", + "lib.az.us", + "lib.ca.us", + "lib.co.us", + "lib.ct.us", + "lib.dc.us", + "lib.fl.us", + "lib.ga.us", + "lib.gu.us", + "lib.hi.us", + "lib.ia.us", + "lib.id.us", + "lib.il.us", + "lib.in.us", + "lib.ks.us", + "lib.ky.us", + "lib.la.us", + "lib.ma.us", + "lib.md.us", + "lib.me.us", + "lib.mi.us", + "lib.mn.us", + "lib.mo.us", + "lib.ms.us", + "lib.mt.us", + "lib.nc.us", + "lib.nd.us", + "lib.ne.us", + "lib.nh.us", + "lib.nj.us", + "lib.nm.us", + "lib.nv.us", + "lib.ny.us", + "lib.oh.us", + "lib.ok.us", + "lib.or.us", + "lib.pa.us", + "lib.pr.us", + "lib.ri.us", + "lib.sc.us", + "lib.sd.us", + "lib.tn.us", + "lib.tx.us", + "lib.ut.us", + "lib.vi.us", + "lib.vt.us", + "lib.va.us", + "lib.wa.us", + "lib.wi.us", + "lib.wy.us", + "pvt.k12.ma.us", + "chtr.k12.ma.us", + "paroch.k12.ma.us", + "ann-arbor.mi.us", + "cog.mi.us", + "dst.mi.us", + "eaton.mi.us", + "gen.mi.us", + "mus.mi.us", + "tec.mi.us", + "washtenaw.mi.us", + "uy", + "com.uy", + "edu.uy", + "gub.uy", + "mil.uy", + "net.uy", + "org.uy", + "uz", + "co.uz", + "com.uz", + "net.uz", + "org.uz", + "va", + "vc", + "com.vc", + "net.vc", + "org.vc", + "gov.vc", + "mil.vc", + "edu.vc", + "ve", + "arts.ve", + "co.ve", + "com.ve", + "e12.ve", + "edu.ve", + "firm.ve", + "gob.ve", + "gov.ve", + "info.ve", + "int.ve", + "mil.ve", + "net.ve", + "org.ve", + "rec.ve", + "store.ve", + "tec.ve", + "web.ve", + "vg", + "vi", + "co.vi", + "com.vi", + "k12.vi", + "net.vi", + "org.vi", + "vn", + "com.vn", + "net.vn", + "org.vn", + "edu.vn", + "gov.vn", + "int.vn", + "ac.vn", + "biz.vn", + "info.vn", + "name.vn", + "pro.vn", + "health.vn", + "vu", + "com.vu", + "edu.vu", + "net.vu", + "org.vu", + "wf", + "ws", + "com.ws", + "net.ws", + "org.ws", + "gov.ws", + "edu.ws", + "yt", + "xn--mgbaam7a8h", + "xn--y9a3aq", + "xn--54b7fta0cc", + "xn--90ae", + "xn--90ais", + "xn--fiqs8s", + "xn--fiqz9s", + "xn--lgbbat1ad8j", + "xn--wgbh1c", + "xn--e1a4c", + "xn--node", + "xn--qxam", + "xn--j6w193g", + "xn--2scrj9c", + "xn--3hcrj9c", + "xn--45br5cyl", + "xn--h2breg3eve", + "xn--h2brj9c8c", + "xn--mgbgu82a", + "xn--rvc1e0am3e", + "xn--h2brj9c", + "xn--mgbbh1a71e", + "xn--fpcrj9c3d", + "xn--gecrj9c", + "xn--s9brj9c", + "xn--45brj9c", + "xn--xkc2dl3a5ee0h", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "xn--mgbtx2b", + "xn--mgbayh7gpa", + "xn--3e0b707e", + "xn--80ao21a", + "xn--fzc2c9e2c", + "xn--xkc2al3hye2a", + "xn--mgbc0a9azcg", + "xn--d1alf", + "xn--l1acc", + "xn--mix891f", + "xn--mix082f", + "xn--mgbx4cd0ab", + "xn--mgb9awbf", + "xn--mgbai9azgqp6j", + "xn--mgbai9a5eva00b", + "xn--ygbi2ammx", + "xn--90a3ac", + "xn--o1ac.xn--90a3ac", + "xn--c1avg.xn--90a3ac", + "xn--90azh.xn--90a3ac", + "xn--d1at.xn--90a3ac", + "xn--o1ach.xn--90a3ac", + "xn--80au.xn--90a3ac", + "xn--p1ai", + "xn--wgbl6a", + "xn--mgberp4a5d4ar", + "xn--mgberp4a5d4a87g", + "xn--mgbqly7c0a67fbc", + "xn--mgbqly7cvafr", + "xn--mgbpl2fh", + "xn--yfro4i67o", + "xn--clchc0ea0b2g2a9gcd", + "xn--ogbpf8fl", + "xn--mgbtf8fl", + "xn--o3cw4h", + "xn--12c1fe0br.xn--o3cw4h", + "xn--12co0c3b4eva.xn--o3cw4h", + "xn--h3cuzk1di.xn--o3cw4h", + "xn--o3cyx2a.xn--o3cw4h", + "xn--m3ch0j3a.xn--o3cw4h", + "xn--12cfi8ixb8l.xn--o3cw4h", + "xn--pgbs0dh", + "xn--kpry57d", + "xn--kprw13d", + "xn--nnx388a", + "xn--j1amh", + "xn--mgb2ddes", + "xxx", + "*.ye", + "ac.za", + "agric.za", + "alt.za", + "co.za", + "edu.za", + "gov.za", + "grondar.za", + "law.za", + "mil.za", + "net.za", + "ngo.za", + "nis.za", + "nom.za", + "org.za", + "school.za", + "tm.za", + "web.za", + "zm", + "ac.zm", + "biz.zm", + "co.zm", + "com.zm", + "edu.zm", + "gov.zm", + "info.zm", + "mil.zm", + "net.zm", + "org.zm", + "sch.zm", + "zw", + "ac.zw", + "co.zw", + "gov.zw", + "mil.zw", + "org.zw", + "aaa", + "aarp", + "abarth", + "abb", + "abbott", + "abbvie", + "abc", + "able", + "abogado", + "abudhabi", + "academy", + "accenture", + "accountant", + "accountants", + "aco", + "active", + "actor", + "adac", + "ads", + "adult", + "aeg", + "aetna", + "afamilycompany", + "afl", + "africa", + "agakhan", + "agency", + "aig", + "aigo", + "airbus", + "airforce", + "airtel", + "akdn", + "alfaromeo", + "alibaba", + "alipay", + "allfinanz", + "allstate", + "ally", + "alsace", + "alstom", + "americanexpress", + "americanfamily", + "amex", + "amfam", + "amica", + "amsterdam", + "analytics", + "android", + "anquan", + "anz", + "aol", + "apartments", + "app", + "apple", + "aquarelle", + "arab", + "aramco", + "archi", + "army", + "art", + "arte", + "asda", + "associates", + "athleta", + "attorney", + "auction", + "audi", + "audible", + "audio", + "auspost", + "author", + "auto", + "autos", + "avianca", + "aws", + "axa", + "azure", + "baby", + "baidu", + "banamex", + "bananarepublic", + "band", + "bank", + "bar", + "barcelona", + "barclaycard", + "barclays", + "barefoot", + "bargains", + "baseball", + "basketball", + "bauhaus", + "bayern", + "bbc", + "bbt", + "bbva", + "bcg", + "bcn", + "beats", + "beauty", + "beer", + "bentley", + "berlin", + "best", + "bestbuy", + "bet", + "bharti", + "bible", + "bid", + "bike", + "bing", + "bingo", + "bio", + "black", + "blackfriday", + "blanco", + "blockbuster", + "blog", + "bloomberg", + "blue", + "bms", + "bmw", + "bnl", + "bnpparibas", + "boats", + "boehringer", + "bofa", + "bom", + "bond", + "boo", + "book", + "booking", + "boots", + "bosch", + "bostik", + "boston", + "bot", + "boutique", + "box", + "bradesco", + "bridgestone", + "broadway", + "broker", + "brother", + "brussels", + "budapest", + "bugatti", + "build", + "builders", + "business", + "buy", + "buzz", + "bzh", + "cab", + "cafe", + "cal", + "call", + "calvinklein", + "cam", + "camera", + "camp", + "cancerresearch", + "canon", + "capetown", + "capital", + "capitalone", + "car", + "caravan", + "cards", + "care", + "career", + "careers", + "cars", + "cartier", + "casa", + "case", + "caseih", + "cash", + "casino", + "catering", + "catholic", + "cba", + "cbn", + "cbre", + "cbs", + "ceb", + "center", + "ceo", + "cern", + "cfa", + "cfd", + "chanel", + "channel", + "chase", + "chat", + "cheap", + "chintai", + "chloe", + "christmas", + "chrome", + "chrysler", + "church", + "cipriani", + "circle", + "cisco", + "citadel", + "citi", + "citic", + "city", + "cityeats", + "claims", + "cleaning", + "click", + "clinic", + "clinique", + "clothing", + "cloud", + "club", + "clubmed", + "coach", + "codes", + "coffee", + "college", + "cologne", + "comcast", + "commbank", + "community", + "company", + "compare", + "computer", + "comsec", + "condos", + "construction", + "consulting", + "contact", + "contractors", + "cooking", + "cookingchannel", + "cool", + "corsica", + "country", + "coupon", + "coupons", + "courses", + "credit", + "creditcard", + "creditunion", + "cricket", + "crown", + "crs", + "cruise", + "cruises", + "csc", + "cuisinella", + "cymru", + "cyou", + "dabur", + "dad", + "dance", + "data", + "date", + "dating", + "datsun", + "day", + "dclk", + "dds", + "deal", + "dealer", + "deals", + "degree", + "delivery", + "dell", + "deloitte", + "delta", + "democrat", + "dental", + "dentist", + "desi", + "design", + "dev", + "dhl", + "diamonds", + "diet", + "digital", + "direct", + "directory", + "discount", + "discover", + "dish", + "diy", + "dnp", + "docs", + "doctor", + "dodge", + "dog", + "doha", + "domains", + "dot", + "download", + "drive", + "dtv", + "dubai", + "duck", + "dunlop", + "duns", + "dupont", + "durban", + "dvag", + "dvr", + "earth", + "eat", + "eco", + "edeka", + "education", + "email", + "emerck", + "energy", + "engineer", + "engineering", + "enterprises", + "epost", + "epson", + "equipment", + "ericsson", + "erni", + "esq", + "estate", + "esurance", + "etisalat", + "eurovision", + "eus", + "events", + "everbank", + "exchange", + "expert", + "exposed", + "express", + "extraspace", + "fage", + "fail", + "fairwinds", + "faith", + "family", + "fan", + "fans", + "farm", + "farmers", + "fashion", + "fast", + "fedex", + "feedback", + "ferrari", + "ferrero", + "fiat", + "fidelity", + "fido", + "film", + "final", + "finance", + "financial", + "fire", + "firestone", + "firmdale", + "fish", + "fishing", + "fit", + "fitness", + "flickr", + "flights", + "flir", + "florist", + "flowers", + "fly", + "foo", + "food", + "foodnetwork", + "football", + "ford", + "forex", + "forsale", + "forum", + "foundation", + "fox", + "free", + "fresenius", + "frl", + "frogans", + "frontdoor", + "frontier", + "ftr", + "fujitsu", + "fujixerox", + "fun", + "fund", + "furniture", + "futbol", + "fyi", + "gal", + "gallery", + "gallo", + "gallup", + "game", + "games", + "gap", + "garden", + "gbiz", + "gdn", + "gea", + "gent", + "genting", + "george", + "ggee", + "gift", + "gifts", + "gives", + "giving", + "glade", + "glass", + "gle", + "global", + "globo", + "gmail", + "gmbh", + "gmo", + "gmx", + "godaddy", + "gold", + "goldpoint", + "golf", + "goo", + "goodhands", + "goodyear", + "goog", + "google", + "gop", + "got", + "grainger", + "graphics", + "gratis", + "green", + "gripe", + "grocery", + "group", + "guardian", + "gucci", + "guge", + "guide", + "guitars", + "guru", + "hair", + "hamburg", + "hangout", + "haus", + "hbo", + "hdfc", + "hdfcbank", + "health", + "healthcare", + "help", + "helsinki", + "here", + "hermes", + "hgtv", + "hiphop", + "hisamitsu", + "hitachi", + "hiv", + "hkt", + "hockey", + "holdings", + "holiday", + "homedepot", + "homegoods", + "homes", + "homesense", + "honda", + "honeywell", + "horse", + "hospital", + "host", + "hosting", + "hot", + "hoteles", + "hotels", + "hotmail", + "house", + "how", + "hsbc", + "htc", + "hughes", + "hyatt", + "hyundai", + "ibm", + "icbc", + "ice", + "icu", + "ieee", + "ifm", + "ikano", + "imamat", + "imdb", + "immo", + "immobilien", + "industries", + "infiniti", + "ing", + "ink", + "institute", + "insurance", + "insure", + "intel", + "international", + "intuit", + "investments", + "ipiranga", + "irish", + "iselect", + "ismaili", + "ist", + "istanbul", + "itau", + "itv", + "iveco", + "iwc", + "jaguar", + "java", + "jcb", + "jcp", + "jeep", + "jetzt", + "jewelry", + "jio", + "jlc", + "jll", + "jmp", + "jnj", + "joburg", + "jot", + "joy", + "jpmorgan", + "jprs", + "juegos", + "juniper", + "kaufen", + "kddi", + "kerryhotels", + "kerrylogistics", + "kerryproperties", + "kfh", + "kia", + "kim", + "kinder", + "kindle", + "kitchen", + "kiwi", + "koeln", + "komatsu", + "kosher", + "kpmg", + "kpn", + "krd", + "kred", + "kuokgroup", + "kyoto", + "lacaixa", + "ladbrokes", + "lamborghini", + "lamer", + "lancaster", + "lancia", + "lancome", + "land", + "landrover", + "lanxess", + "lasalle", + "lat", + "latino", + "latrobe", + "law", + "lawyer", + "lds", + "lease", + "leclerc", + "lefrak", + "legal", + "lego", + "lexus", + "lgbt", + "liaison", + "lidl", + "life", + "lifeinsurance", + "lifestyle", + "lighting", + "like", + "lilly", + "limited", + "limo", + "lincoln", + "linde", + "link", + "lipsy", + "live", + "living", + "lixil", + "loan", + "loans", + "locker", + "locus", + "loft", + "lol", + "london", + "lotte", + "lotto", + "love", + "lpl", + "lplfinancial", + "ltd", + "ltda", + "lundbeck", + "lupin", + "luxe", + "luxury", + "macys", + "madrid", + "maif", + "maison", + "makeup", + "man", + "management", + "mango", + "map", + "market", + "marketing", + "markets", + "marriott", + "marshalls", + "maserati", + "mattel", + "mba", + "mcd", + "mcdonalds", + "mckinsey", + "med", + "media", + "meet", + "melbourne", + "meme", + "memorial", + "men", + "menu", + "meo", + "merckmsd", + "metlife", + "miami", + "microsoft", + "mini", + "mint", + "mit", + "mitsubishi", + "mlb", + "mls", + "mma", + "mobile", + "mobily", + "moda", + "moe", + "moi", + "mom", + "monash", + "money", + "monster", + "montblanc", + "mopar", + "mormon", + "mortgage", + "moscow", + "moto", + "motorcycles", + "mov", + "movie", + "movistar", + "msd", + "mtn", + "mtpc", + "mtr", + "mutual", + "nab", + "nadex", + "nagoya", + "nationwide", + "natura", + "navy", + "nba", + "nec", + "netbank", + "netflix", + "network", + "neustar", + "new", + "newholland", + "news", + "next", + "nextdirect", + "nexus", + "nfl", + "ngo", + "nhk", + "nico", + "nike", + "nikon", + "ninja", + "nissan", + "nissay", + "nokia", + "northwesternmutual", + "norton", + "now", + "nowruz", + "nowtv", + "nra", + "nrw", + "ntt", + "nyc", + "obi", + "observer", + "off", + "office", + "okinawa", + "olayan", + "olayangroup", + "oldnavy", + "ollo", + "omega", + "one", + "ong", + "onl", + "online", + "onyourside", + "ooo", + "open", + "oracle", + "orange", + "organic", + "origins", + "osaka", + "otsuka", + "ott", + "ovh", + "page", + "pamperedchef", + "panasonic", + "panerai", + "paris", + "pars", + "partners", + "parts", + "party", + "passagens", + "pay", + "pccw", + "pet", + "pfizer", + "pharmacy", + "phd", + "philips", + "phone", + "photo", + "photography", + "photos", + "physio", + "piaget", + "pics", + "pictet", + "pictures", + "pid", + "pin", + "ping", + "pink", + "pioneer", + "pizza", + "place", + "play", + "playstation", + "plumbing", + "plus", + "pnc", + "pohl", + "poker", + "politie", + "porn", + "pramerica", + "praxi", + "press", + "prime", + "prod", + "productions", + "prof", + "progressive", + "promo", + "properties", + "property", + "protection", + "pru", + "prudential", + "pub", + "pwc", + "qpon", + "quebec", + "quest", + "qvc", + "racing", + "radio", + "raid", + "read", + "realestate", + "realtor", + "realty", + "recipes", + "red", + "redstone", + "redumbrella", + "rehab", + "reise", + "reisen", + "reit", + "reliance", + "ren", + "rent", + "rentals", + "repair", + "report", + "republican", + "rest", + "restaurant", + "review", + "reviews", + "rexroth", + "rich", + "richardli", + "ricoh", + "rightathome", + "ril", + "rio", + "rip", + "rmit", + "rocher", + "rocks", + "rodeo", + "rogers", + "room", + "rsvp", + "rugby", + "ruhr", + "run", + "rwe", + "ryukyu", + "saarland", + "safe", + "safety", + "sakura", + "sale", + "salon", + "samsclub", + "samsung", + "sandvik", + "sandvikcoromant", + "sanofi", + "sap", + "sapo", + "sarl", + "sas", + "save", + "saxo", + "sbi", + "sbs", + "sca", + "scb", + "schaeffler", + "schmidt", + "scholarships", + "school", + "schule", + "schwarz", + "science", + "scjohnson", + "scor", + "scot", + "search", + "seat", + "secure", + "security", + "seek", + "select", + "sener", + "services", + "ses", + "seven", + "sew", + "sex", + "sexy", + "sfr", + "shangrila", + "sharp", + "shaw", + "shell", + "shia", + "shiksha", + "shoes", + "shop", + "shopping", + "shouji", + "show", + "showtime", + "shriram", + "silk", + "sina", + "singles", + "site", + "ski", + "skin", + "sky", + "skype", + "sling", + "smart", + "smile", + "sncf", + "soccer", + "social", + "softbank", + "software", + "sohu", + "solar", + "solutions", + "song", + "sony", + "soy", + "space", + "spiegel", + "spot", + "spreadbetting", + "srl", + "srt", + "stada", + "staples", + "star", + "starhub", + "statebank", + "statefarm", + "statoil", + "stc", + "stcgroup", + "stockholm", + "storage", + "store", + "stream", + "studio", + "study", + "style", + "sucks", + "supplies", + "supply", + "support", + "surf", + "surgery", + "suzuki", + "swatch", + "swiftcover", + "swiss", + "sydney", + "symantec", + "systems", + "tab", + "taipei", + "talk", + "taobao", + "target", + "tatamotors", + "tatar", + "tattoo", + "tax", + "taxi", + "tci", + "tdk", + "team", + "tech", + "technology", + "telecity", + "telefonica", + "temasek", + "tennis", + "teva", + "thd", + "theater", + "theatre", + "tiaa", + "tickets", + "tienda", + "tiffany", + "tips", + "tires", + "tirol", + "tjmaxx", + "tjx", + "tkmaxx", + "tmall", + "today", + "tokyo", + "tools", + "top", + "toray", + "toshiba", + "total", + "tours", + "town", + "toyota", + "toys", + "trade", + "trading", + "training", + "travelchannel", + "travelers", + "travelersinsurance", + "trust", + "trv", + "tube", + "tui", + "tunes", + "tushu", + "tvs", + "ubank", + "ubs", + "uconnect", + "unicom", + "university", + "uno", + "uol", + "ups", + "vacations", + "vana", + "vanguard", + "vegas", + "ventures", + "verisign", + "versicherung", + "vet", + "viajes", + "video", + "vig", + "viking", + "villas", + "vin", + "vip", + "virgin", + "visa", + "vision", + "vista", + "vistaprint", + "viva", + "vivo", + "vlaanderen", + "vodka", + "volkswagen", + "volvo", + "vote", + "voting", + "voto", + "voyage", + "vuelos", + "wales", + "walmart", + "walter", + "wang", + "wanggou", + "warman", + "watch", + "watches", + "weather", + "weatherchannel", + "webcam", + "weber", + "website", + "wed", + "wedding", + "weibo", + "weir", + "whoswho", + "wien", + "wiki", + "williamhill", + "win", + "windows", + "wine", + "winners", + "wme", + "wolterskluwer", + "woodside", + "work", + "works", + "world", + "wow", + "wtc", + "wtf", + "xbox", + "xerox", + "xfinity", + "xihuan", + "xin", + "xn--11b4c3d", + "xn--1ck2e1b", + "xn--1qqw23a", + "xn--30rr7y", + "xn--3bst00m", + "xn--3ds443g", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", + "xn--45q11c", + "xn--4gbrim", + "xn--55qw42g", + "xn--55qx5d", + "xn--5su34j936bgsg", + "xn--5tzm5g", + "xn--6frz82g", + "xn--6qq986b3xl", + "xn--80adxhks", + "xn--80aqecdr1a", + "xn--80asehdb", + "xn--80aswg", + "xn--8y0a063a", + "xn--9dbq2a", + "xn--9et52u", + "xn--9krt00a", + "xn--b4w605ferd", + "xn--bck1b9a5dre4c", + "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", + "xn--cg4bki", + "xn--czr694b", + "xn--czrs0t", + "xn--czru2d", + "xn--d1acj3b", + "xn--eckvdtc9d", + "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", + "xn--fiq228c5hs", + "xn--fiq64b", + "xn--fjq720a", + "xn--flw351e", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", + "xn--gk3at1e", + "xn--hxt814e", + "xn--i1b6b1a6a2e", + "xn--imr513n", + "xn--io0a7i", + "xn--j1aef", + "xn--jlq61u9w7b", + "xn--jvr189m", + "xn--kcrx77d1x4a", + "xn--kpu716f", + "xn--kput3i", + "xn--mgba3a3ejt", + "xn--mgba7c0bbn0a", + "xn--mgbaakc7dvf", + "xn--mgbab2bd", + "xn--mgbb9fbpob", + "xn--mgbca7dzdo", + "xn--mgbi4ecexp", + "xn--mgbt3dhd", + "xn--mk1bu44c", + "xn--mxtq1m", + "xn--ngbc5azd", + "xn--ngbe9e0a", + "xn--ngbrx", + "xn--nqv7f", + "xn--nqv7fs00ema", + "xn--nyqy26a", + "xn--p1acf", + "xn--pbt977c", + "xn--pssy2u", + "xn--q9jyb4c", + "xn--qcka1pmc", + "xn--rhqv96g", + "xn--rovu88b", + "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", + "xn--tiq49xqyj", + "xn--unup4y", + "xn--vermgensberater-ctb", + "xn--vermgensberatung-pwb", + "xn--vhquv", + "xn--vuq861b", + "xn--w4r85el8fhu5dnra", + "xn--w4rs40l", + "xn--xhq521b", + "xn--zfr164b", + "xperia", + "xyz", + "yachts", + "yahoo", + "yamaxun", + "yandex", + "yodobashi", + "yoga", + "yokohama", + "you", + "youtube", + "yun", + "zappos", + "zara", + "zero", + "zip", + "zippo", + "zone", + "zuerich", + "cc.ua", + "inf.ua", + "ltd.ua", + "beep.pl", + "*.compute.estate", + "*.alces.network", + "*.alwaysdata.net", + "cloudfront.net", + "*.compute.amazonaws.com", + "*.compute-1.amazonaws.com", + "*.compute.amazonaws.com.cn", + "us-east-1.amazonaws.com", + "cn-north-1.eb.amazonaws.com.cn", + "elasticbeanstalk.com", + "ap-northeast-1.elasticbeanstalk.com", + "ap-northeast-2.elasticbeanstalk.com", + "ap-south-1.elasticbeanstalk.com", + "ap-southeast-1.elasticbeanstalk.com", + "ap-southeast-2.elasticbeanstalk.com", + "ca-central-1.elasticbeanstalk.com", + "eu-central-1.elasticbeanstalk.com", + "eu-west-1.elasticbeanstalk.com", + "eu-west-2.elasticbeanstalk.com", + "sa-east-1.elasticbeanstalk.com", + "us-east-1.elasticbeanstalk.com", + "us-east-2.elasticbeanstalk.com", + "us-gov-west-1.elasticbeanstalk.com", + "us-west-1.elasticbeanstalk.com", + "us-west-2.elasticbeanstalk.com", + "*.elb.amazonaws.com", + "*.elb.amazonaws.com.cn", + "s3.amazonaws.com", + "s3-ap-northeast-1.amazonaws.com", + "s3-ap-northeast-2.amazonaws.com", + "s3-ap-south-1.amazonaws.com", + "s3-ap-southeast-1.amazonaws.com", + "s3-ap-southeast-2.amazonaws.com", + "s3-ca-central-1.amazonaws.com", + "s3-eu-central-1.amazonaws.com", + "s3-eu-west-1.amazonaws.com", + "s3-eu-west-2.amazonaws.com", + "s3-external-1.amazonaws.com", + "s3-fips-us-gov-west-1.amazonaws.com", + "s3-sa-east-1.amazonaws.com", + "s3-us-gov-west-1.amazonaws.com", + "s3-us-east-2.amazonaws.com", + "s3-us-west-1.amazonaws.com", + "s3-us-west-2.amazonaws.com", + "s3.ap-northeast-2.amazonaws.com", + "s3.ap-south-1.amazonaws.com", + "s3.cn-north-1.amazonaws.com.cn", + "s3.ca-central-1.amazonaws.com", + "s3.eu-central-1.amazonaws.com", + "s3.eu-west-2.amazonaws.com", + "s3.us-east-2.amazonaws.com", + "s3.dualstack.ap-northeast-1.amazonaws.com", + "s3.dualstack.ap-northeast-2.amazonaws.com", + "s3.dualstack.ap-south-1.amazonaws.com", + "s3.dualstack.ap-southeast-1.amazonaws.com", + "s3.dualstack.ap-southeast-2.amazonaws.com", + "s3.dualstack.ca-central-1.amazonaws.com", + "s3.dualstack.eu-central-1.amazonaws.com", + "s3.dualstack.eu-west-1.amazonaws.com", + "s3.dualstack.eu-west-2.amazonaws.com", + "s3.dualstack.sa-east-1.amazonaws.com", + "s3.dualstack.us-east-1.amazonaws.com", + "s3.dualstack.us-east-2.amazonaws.com", + "s3-website-us-east-1.amazonaws.com", + "s3-website-us-west-1.amazonaws.com", + "s3-website-us-west-2.amazonaws.com", + "s3-website-ap-northeast-1.amazonaws.com", + "s3-website-ap-southeast-1.amazonaws.com", + "s3-website-ap-southeast-2.amazonaws.com", + "s3-website-eu-west-1.amazonaws.com", + "s3-website-sa-east-1.amazonaws.com", + "s3-website.ap-northeast-2.amazonaws.com", + "s3-website.ap-south-1.amazonaws.com", + "s3-website.ca-central-1.amazonaws.com", + "s3-website.eu-central-1.amazonaws.com", + "s3-website.eu-west-2.amazonaws.com", + "s3-website.us-east-2.amazonaws.com", + "t3l3p0rt.net", + "tele.amune.org", + "on-aptible.com", + "user.party.eus", + "pimienta.org", + "poivron.org", + "potager.org", + "sweetpepper.org", + "myasustor.com", + "myfritz.net", + "*.awdev.ca", + "*.advisor.ws", + "backplaneapp.io", + "betainabox.com", + "bnr.la", + "boomla.net", + "boxfuse.io", + "square7.ch", + "bplaced.com", + "bplaced.de", + "square7.de", + "bplaced.net", + "square7.net", + "browsersafetymark.io", + "mycd.eu", + "ae.org", + "ar.com", + "br.com", + "cn.com", + "com.de", + "com.se", + "de.com", + "eu.com", + "gb.com", + "gb.net", + "hu.com", + "hu.net", + "jp.net", + "jpn.com", + "kr.com", + "mex.com", + "no.com", + "qc.com", + "ru.com", + "sa.com", + "se.com", + "se.net", + "uk.com", + "uk.net", + "us.com", + "uy.com", + "za.bz", + "za.com", + "africa.com", + "gr.com", + "in.net", + "us.org", + "co.com", + "c.la", + "certmgr.org", + "xenapponazure.com", + "virtueeldomein.nl", + "c66.me", + "jdevcloud.com", + "wpdevcloud.com", + "cloudaccess.host", + "freesite.host", + "cloudaccess.net", + "cloudcontrolled.com", + "cloudcontrolapp.com", + "co.ca", + "co.cz", + "c.cdn77.org", + "cdn77-ssl.net", + "r.cdn77.net", + "rsc.cdn77.org", + "ssl.origin.cdn77-secure.org", + "cloudns.asia", + "cloudns.biz", + "cloudns.club", + "cloudns.cc", + "cloudns.eu", + "cloudns.in", + "cloudns.info", + "cloudns.org", + "cloudns.pro", + "cloudns.pw", + "cloudns.us", + "co.nl", + "co.no", + "dyn.cosidns.de", + "dynamisches-dns.de", + "dnsupdater.de", + "internet-dns.de", + "l-o-g-i-n.de", + "dynamic-dns.info", + "feste-ip.net", + "knx-server.net", + "static-access.net", + "realm.cz", + "*.cryptonomic.net", + "cupcake.is", + "cyon.link", + "cyon.site", + "daplie.me", + "localhost.daplie.me", + "biz.dk", + "co.dk", + "firm.dk", + "reg.dk", + "store.dk", + "debian.net", + "dedyn.io", + "dnshome.de", + "drayddns.com", + "dreamhosters.com", + "mydrobo.com", + "drud.io", + "drud.us", + "duckdns.org", + "dy.fi", + "tunk.org", + "dyndns-at-home.com", + "dyndns-at-work.com", + "dyndns-blog.com", + "dyndns-free.com", + "dyndns-home.com", + "dyndns-ip.com", + "dyndns-mail.com", + "dyndns-office.com", + "dyndns-pics.com", + "dyndns-remote.com", + "dyndns-server.com", + "dyndns-web.com", + "dyndns-wiki.com", + "dyndns-work.com", + "dyndns.biz", + "dyndns.info", + "dyndns.org", + "dyndns.tv", + "at-band-camp.net", + "ath.cx", + "barrel-of-knowledge.info", + "barrell-of-knowledge.info", + "better-than.tv", + "blogdns.com", + "blogdns.net", + "blogdns.org", + "blogsite.org", + "boldlygoingnowhere.org", + "broke-it.net", + "buyshouses.net", + "cechire.com", + "dnsalias.com", + "dnsalias.net", + "dnsalias.org", + "dnsdojo.com", + "dnsdojo.net", + "dnsdojo.org", + "does-it.net", + "doesntexist.com", + "doesntexist.org", + "dontexist.com", + "dontexist.net", + "dontexist.org", + "doomdns.com", + "doomdns.org", + "dvrdns.org", + "dyn-o-saur.com", + "dynalias.com", + "dynalias.net", + "dynalias.org", + "dynathome.net", + "dyndns.ws", + "endofinternet.net", + "endofinternet.org", + "endoftheinternet.org", + "est-a-la-maison.com", + "est-a-la-masion.com", + "est-le-patron.com", + "est-mon-blogueur.com", + "for-better.biz", + "for-more.biz", + "for-our.info", + "for-some.biz", + "for-the.biz", + "forgot.her.name", + "forgot.his.name", + "from-ak.com", + "from-al.com", + "from-ar.com", + "from-az.net", + "from-ca.com", + "from-co.net", + "from-ct.com", + "from-dc.com", + "from-de.com", + "from-fl.com", + "from-ga.com", + "from-hi.com", + "from-ia.com", + "from-id.com", + "from-il.com", + "from-in.com", + "from-ks.com", + "from-ky.com", + "from-la.net", + "from-ma.com", + "from-md.com", + "from-me.org", + "from-mi.com", + "from-mn.com", + "from-mo.com", + "from-ms.com", + "from-mt.com", + "from-nc.com", + "from-nd.com", + "from-ne.com", + "from-nh.com", + "from-nj.com", + "from-nm.com", + "from-nv.com", + "from-ny.net", + "from-oh.com", + "from-ok.com", + "from-or.com", + "from-pa.com", + "from-pr.com", + "from-ri.com", + "from-sc.com", + "from-sd.com", + "from-tn.com", + "from-tx.com", + "from-ut.com", + "from-va.com", + "from-vt.com", + "from-wa.com", + "from-wi.com", + "from-wv.com", + "from-wy.com", + "ftpaccess.cc", + "fuettertdasnetz.de", + "game-host.org", + "game-server.cc", + "getmyip.com", + "gets-it.net", + "go.dyndns.org", + "gotdns.com", + "gotdns.org", + "groks-the.info", + "groks-this.info", + "ham-radio-op.net", + "here-for-more.info", + "hobby-site.com", + "hobby-site.org", + "home.dyndns.org", + "homedns.org", + "homeftp.net", + "homeftp.org", + "homeip.net", + "homelinux.com", + "homelinux.net", + "homelinux.org", + "homeunix.com", + "homeunix.net", + "homeunix.org", + "iamallama.com", + "in-the-band.net", + "is-a-anarchist.com", + "is-a-blogger.com", + "is-a-bookkeeper.com", + "is-a-bruinsfan.org", + "is-a-bulls-fan.com", + "is-a-candidate.org", + "is-a-caterer.com", + "is-a-celticsfan.org", + "is-a-chef.com", + "is-a-chef.net", + "is-a-chef.org", + "is-a-conservative.com", + "is-a-cpa.com", + "is-a-cubicle-slave.com", + "is-a-democrat.com", + "is-a-designer.com", + "is-a-doctor.com", + "is-a-financialadvisor.com", + "is-a-geek.com", + "is-a-geek.net", + "is-a-geek.org", + "is-a-green.com", + "is-a-guru.com", + "is-a-hard-worker.com", + "is-a-hunter.com", + "is-a-knight.org", + "is-a-landscaper.com", + "is-a-lawyer.com", + "is-a-liberal.com", + "is-a-libertarian.com", + "is-a-linux-user.org", + "is-a-llama.com", + "is-a-musician.com", + "is-a-nascarfan.com", + "is-a-nurse.com", + "is-a-painter.com", + "is-a-patsfan.org", + "is-a-personaltrainer.com", + "is-a-photographer.com", + "is-a-player.com", + "is-a-republican.com", + "is-a-rockstar.com", + "is-a-socialist.com", + "is-a-soxfan.org", + "is-a-student.com", + "is-a-teacher.com", + "is-a-techie.com", + "is-a-therapist.com", + "is-an-accountant.com", + "is-an-actor.com", + "is-an-actress.com", + "is-an-anarchist.com", + "is-an-artist.com", + "is-an-engineer.com", + "is-an-entertainer.com", + "is-by.us", + "is-certified.com", + "is-found.org", + "is-gone.com", + "is-into-anime.com", + "is-into-cars.com", + "is-into-cartoons.com", + "is-into-games.com", + "is-leet.com", + "is-lost.org", + "is-not-certified.com", + "is-saved.org", + "is-slick.com", + "is-uberleet.com", + "is-very-bad.org", + "is-very-evil.org", + "is-very-good.org", + "is-very-nice.org", + "is-very-sweet.org", + "is-with-theband.com", + "isa-geek.com", + "isa-geek.net", + "isa-geek.org", + "isa-hockeynut.com", + "issmarterthanyou.com", + "isteingeek.de", + "istmein.de", + "kicks-ass.net", + "kicks-ass.org", + "knowsitall.info", + "land-4-sale.us", + "lebtimnetz.de", + "leitungsen.de", + "likes-pie.com", + "likescandy.com", + "merseine.nu", + "mine.nu", + "misconfused.org", + "mypets.ws", + "myphotos.cc", + "neat-url.com", + "office-on-the.net", + "on-the-web.tv", + "podzone.net", + "podzone.org", + "readmyblog.org", + "saves-the-whales.com", + "scrapper-site.net", + "scrapping.cc", + "selfip.biz", + "selfip.com", + "selfip.info", + "selfip.net", + "selfip.org", + "sells-for-less.com", + "sells-for-u.com", + "sells-it.net", + "sellsyourhome.org", + "servebbs.com", + "servebbs.net", + "servebbs.org", + "serveftp.net", + "serveftp.org", + "servegame.org", + "shacknet.nu", + "simple-url.com", + "space-to-rent.com", + "stuff-4-sale.org", + "stuff-4-sale.us", + "teaches-yoga.com", + "thruhere.net", + "traeumtgerade.de", + "webhop.biz", + "webhop.info", + "webhop.net", + "webhop.org", + "worse-than.tv", + "writesthisblog.com", + "ddnss.de", + "dyn.ddnss.de", + "dyndns.ddnss.de", + "dyndns1.de", + "dyn-ip24.de", + "home-webserver.de", + "dyn.home-webserver.de", + "myhome-server.de", + "ddnss.org", + "definima.net", + "definima.io", + "ddnsfree.com", + "ddnsgeek.com", + "giize.com", + "gleeze.com", + "kozow.com", + "loseyourip.com", + "ooguy.com", + "theworkpc.com", + "casacam.net", + "dynu.net", + "accesscam.org", + "camdvr.org", + "freeddns.org", + "mywire.org", + "webredirect.org", + "myddns.rocks", + "blogsite.xyz", + "dynv6.net", + "e4.cz", + "mytuleap.com", + "enonic.io", + "customer.enonic.io", + "eu.org", + "al.eu.org", + "asso.eu.org", + "at.eu.org", + "au.eu.org", + "be.eu.org", + "bg.eu.org", + "ca.eu.org", + "cd.eu.org", + "ch.eu.org", + "cn.eu.org", + "cy.eu.org", + "cz.eu.org", + "de.eu.org", + "dk.eu.org", + "edu.eu.org", + "ee.eu.org", + "es.eu.org", + "fi.eu.org", + "fr.eu.org", + "gr.eu.org", + "hr.eu.org", + "hu.eu.org", + "ie.eu.org", + "il.eu.org", + "in.eu.org", + "int.eu.org", + "is.eu.org", + "it.eu.org", + "jp.eu.org", + "kr.eu.org", + "lt.eu.org", + "lu.eu.org", + "lv.eu.org", + "mc.eu.org", + "me.eu.org", + "mk.eu.org", + "mt.eu.org", + "my.eu.org", + "net.eu.org", + "ng.eu.org", + "nl.eu.org", + "no.eu.org", + "nz.eu.org", + "paris.eu.org", + "pl.eu.org", + "pt.eu.org", + "q-a.eu.org", + "ro.eu.org", + "ru.eu.org", + "se.eu.org", + "si.eu.org", + "sk.eu.org", + "tr.eu.org", + "uk.eu.org", + "us.eu.org", + "eu-1.evennode.com", + "eu-2.evennode.com", + "eu-3.evennode.com", + "eu-4.evennode.com", + "us-1.evennode.com", + "us-2.evennode.com", + "us-3.evennode.com", + "us-4.evennode.com", + "twmail.cc", + "twmail.net", + "twmail.org", + "mymailer.com.tw", + "url.tw", + "apps.fbsbx.com", + "ru.net", + "adygeya.ru", + "bashkiria.ru", + "bir.ru", + "cbg.ru", + "com.ru", + "dagestan.ru", + "grozny.ru", + "kalmykia.ru", + "kustanai.ru", + "marine.ru", + "mordovia.ru", + "msk.ru", + "mytis.ru", + "nalchik.ru", + "nov.ru", + "pyatigorsk.ru", + "spb.ru", + "vladikavkaz.ru", + "vladimir.ru", + "abkhazia.su", + "adygeya.su", + "aktyubinsk.su", + "arkhangelsk.su", + "armenia.su", + "ashgabad.su", + "azerbaijan.su", + "balashov.su", + "bashkiria.su", + "bryansk.su", + "bukhara.su", + "chimkent.su", + "dagestan.su", + "east-kazakhstan.su", + "exnet.su", + "georgia.su", + "grozny.su", + "ivanovo.su", + "jambyl.su", + "kalmykia.su", + "kaluga.su", + "karacol.su", + "karaganda.su", + "karelia.su", + "khakassia.su", + "krasnodar.su", + "kurgan.su", + "kustanai.su", + "lenug.su", + "mangyshlak.su", + "mordovia.su", + "msk.su", + "murmansk.su", + "nalchik.su", + "navoi.su", + "north-kazakhstan.su", + "nov.su", + "obninsk.su", + "penza.su", + "pokrovsk.su", + "sochi.su", + "spb.su", + "tashkent.su", + "termez.su", + "togliatti.su", + "troitsk.su", + "tselinograd.su", + "tula.su", + "tuva.su", + "vladikavkaz.su", + "vladimir.su", + "vologda.su", + "channelsdvr.net", + "fastlylb.net", + "map.fastlylb.net", + "freetls.fastly.net", + "map.fastly.net", + "a.prod.fastly.net", + "global.prod.fastly.net", + "a.ssl.fastly.net", + "b.ssl.fastly.net", + "global.ssl.fastly.net", + "fhapp.xyz", + "fedorainfracloud.org", + "fedorapeople.org", + "cloud.fedoraproject.org", + "filegear.me", + "firebaseapp.com", + "flynnhub.com", + "flynnhosting.net", + "freebox-os.com", + "freeboxos.com", + "fbx-os.fr", + "fbxos.fr", + "freebox-os.fr", + "freeboxos.fr", + "myfusion.cloud", + "*.futurecms.at", + "futurehosting.at", + "futuremailing.at", + "*.ex.ortsinfo.at", + "*.kunden.ortsinfo.at", + "*.statics.cloud", + "service.gov.uk", + "github.io", + "githubusercontent.com", + "gitlab.io", + "homeoffice.gov.uk", + "ro.im", + "shop.ro", + "goip.de", + "*.0emm.com", + "appspot.com", + "blogspot.ae", + "blogspot.al", + "blogspot.am", + "blogspot.ba", + "blogspot.be", + "blogspot.bg", + "blogspot.bj", + "blogspot.ca", + "blogspot.cf", + "blogspot.ch", + "blogspot.cl", + "blogspot.co.at", + "blogspot.co.id", + "blogspot.co.il", + "blogspot.co.ke", + "blogspot.co.nz", + "blogspot.co.uk", + "blogspot.co.za", + "blogspot.com", + "blogspot.com.ar", + "blogspot.com.au", + "blogspot.com.br", + "blogspot.com.by", + "blogspot.com.co", + "blogspot.com.cy", + "blogspot.com.ee", + "blogspot.com.eg", + "blogspot.com.es", + "blogspot.com.mt", + "blogspot.com.ng", + "blogspot.com.tr", + "blogspot.com.uy", + "blogspot.cv", + "blogspot.cz", + "blogspot.de", + "blogspot.dk", + "blogspot.fi", + "blogspot.fr", + "blogspot.gr", + "blogspot.hk", + "blogspot.hr", + "blogspot.hu", + "blogspot.ie", + "blogspot.in", + "blogspot.is", + "blogspot.it", + "blogspot.jp", + "blogspot.kr", + "blogspot.li", + "blogspot.lt", + "blogspot.lu", + "blogspot.md", + "blogspot.mk", + "blogspot.mr", + "blogspot.mx", + "blogspot.my", + "blogspot.nl", + "blogspot.no", + "blogspot.pe", + "blogspot.pt", + "blogspot.qa", + "blogspot.re", + "blogspot.ro", + "blogspot.rs", + "blogspot.ru", + "blogspot.se", + "blogspot.sg", + "blogspot.si", + "blogspot.sk", + "blogspot.sn", + "blogspot.td", + "blogspot.tw", + "blogspot.ug", + "blogspot.vn", + "cloudfunctions.net", + "cloud.goog", + "codespot.com", + "googleapis.com", + "googlecode.com", + "pagespeedmobilizer.com", + "publishproxy.com", + "withgoogle.com", + "withyoutube.com", + "hashbang.sh", + "hasura-app.io", + "hepforge.org", + "herokuapp.com", + "herokussl.com", + "moonscale.net", + "iki.fi", + "biz.at", + "info.at", + "info.cx", + "ac.leg.br", + "al.leg.br", + "am.leg.br", + "ap.leg.br", + "ba.leg.br", + "ce.leg.br", + "df.leg.br", + "es.leg.br", + "go.leg.br", + "ma.leg.br", + "mg.leg.br", + "ms.leg.br", + "mt.leg.br", + "pa.leg.br", + "pb.leg.br", + "pe.leg.br", + "pi.leg.br", + "pr.leg.br", + "rj.leg.br", + "rn.leg.br", + "ro.leg.br", + "rr.leg.br", + "rs.leg.br", + "sc.leg.br", + "se.leg.br", + "sp.leg.br", + "to.leg.br", + "pixolino.com", + "ipifony.net", + "*.triton.zone", + "*.cns.joyent.com", + "js.org", + "keymachine.de", + "knightpoint.systems", + "co.krd", + "edu.krd", + "git-repos.de", + "lcube-server.de", + "svn-repos.de", + "we.bs", + "barsy.bg", + "barsyonline.com", + "barsy.de", + "barsy.eu", + "barsy.in", + "barsy.net", + "barsy.online", + "barsy.support", + "*.magentosite.cloud", + "hb.cldmail.ru", + "cloud.metacentrum.cz", + "custom.metacentrum.cz", + "meteorapp.com", + "eu.meteorapp.com", + "co.pl", + "azurewebsites.net", + "azure-mobile.net", + "cloudapp.net", + "bmoattachments.org", + "net.ru", + "org.ru", + "pp.ru", + "bitballoon.com", + "netlify.com", + "4u.com", + "ngrok.io", + "nfshost.com", + "nsupdate.info", + "nerdpol.ovh", + "blogsyte.com", + "brasilia.me", + "cable-modem.org", + "ciscofreak.com", + "collegefan.org", + "couchpotatofries.org", + "damnserver.com", + "ddns.me", + "ditchyourip.com", + "dnsfor.me", + "dnsiskinky.com", + "dvrcam.info", + "dynns.com", + "eating-organic.net", + "fantasyleague.cc", + "geekgalaxy.com", + "golffan.us", + "health-carereform.com", + "homesecuritymac.com", + "homesecuritypc.com", + "hopto.me", + "ilovecollege.info", + "loginto.me", + "mlbfan.org", + "mmafan.biz", + "myactivedirectory.com", + "mydissent.net", + "myeffect.net", + "mymediapc.net", + "mypsx.net", + "mysecuritycamera.com", + "mysecuritycamera.net", + "mysecuritycamera.org", + "net-freaks.com", + "nflfan.org", + "nhlfan.net", + "no-ip.ca", + "no-ip.co.uk", + "no-ip.net", + "noip.us", + "onthewifi.com", + "pgafan.net", + "point2this.com", + "pointto.us", + "privatizehealthinsurance.net", + "quicksytes.com", + "read-books.org", + "securitytactics.com", + "serveexchange.com", + "servehumour.com", + "servep2p.com", + "servesarcasm.com", + "stufftoread.com", + "ufcfan.org", + "unusualperson.com", + "workisboring.com", + "3utilities.com", + "bounceme.net", + "ddns.net", + "ddnsking.com", + "gotdns.ch", + "hopto.org", + "myftp.biz", + "myftp.org", + "myvnc.com", + "no-ip.biz", + "no-ip.info", + "no-ip.org", + "noip.me", + "redirectme.net", + "servebeer.com", + "serveblog.net", + "servecounterstrike.com", + "serveftp.com", + "servegame.com", + "servehalflife.com", + "servehttp.com", + "serveirc.com", + "serveminecraft.net", + "servemp3.com", + "servepics.com", + "servequake.com", + "sytes.net", + "webhop.me", + "zapto.org", + "stage.nodeart.io", + "nodum.co", + "nodum.io", + "nyc.mn", + "nom.ae", + "nom.ai", + "nom.al", + "nym.by", + "nym.bz", + "nom.cl", + "nom.gd", + "nom.gl", + "nym.gr", + "nom.gt", + "nom.hn", + "nom.im", + "nym.kz", + "nym.la", + "nom.li", + "nym.li", + "nym.lt", + "nym.lu", + "nym.me", + "nom.mk", + "nym.mx", + "nom.nu", + "nym.nz", + "nym.pe", + "nym.pt", + "nom.pw", + "nom.qa", + "nom.rs", + "nom.si", + "nym.sk", + "nym.su", + "nym.sx", + "nym.tw", + "nom.ug", + "nom.uy", + "nom.vc", + "nom.vg", + "cya.gg", + "nid.io", + "opencraft.hosting", + "operaunite.com", + "outsystemscloud.com", + "ownprovider.com", + "oy.lc", + "pgfog.com", + "pagefrontapp.com", + "art.pl", + "gliwice.pl", + "krakow.pl", + "poznan.pl", + "wroc.pl", + "zakopane.pl", + "pantheonsite.io", + "gotpantheon.com", + "mypep.link", + "on-web.fr", + "*.platform.sh", + "*.platformsh.site", + "xen.prgmr.com", + "priv.at", + "protonet.io", + "chirurgiens-dentistes-en-france.fr", + "byen.site", + "qa2.com", + "dev-myqnapcloud.com", + "alpha-myqnapcloud.com", + "myqnapcloud.com", + "*.quipelements.com", + "vapor.cloud", + "vaporcloud.io", + "rackmaze.com", + "rackmaze.net", + "rhcloud.com", + "hzc.io", + "wellbeingzone.eu", + "ptplus.fit", + "wellbeingzone.co.uk", + "sandcats.io", + "logoip.de", + "logoip.com", + "firewall-gateway.com", + "firewall-gateway.de", + "my-gateway.de", + "my-router.de", + "spdns.de", + "spdns.eu", + "firewall-gateway.net", + "my-firewall.org", + "myfirewall.org", + "spdns.org", + "*.sensiosite.cloud", + "biz.ua", + "co.ua", + "pp.ua", + "shiftedit.io", + "myshopblocks.com", + "1kapp.com", + "appchizi.com", + "applinzi.com", + "sinaapp.com", + "vipsinaapp.com", + "bounty-full.com", + "alpha.bounty-full.com", + "beta.bounty-full.com", + "static.land", + "dev.static.land", + "sites.static.land", + "apps.lair.io", + "*.stolos.io", + "spacekit.io", + "stackspace.space", + "storj.farm", + "temp-dns.com", + "diskstation.me", + "dscloud.biz", + "dscloud.me", + "dscloud.mobi", + "dsmynas.com", + "dsmynas.net", + "dsmynas.org", + "familyds.com", + "familyds.net", + "familyds.org", + "i234.me", + "myds.me", + "synology.me", + "vpnplus.to", + "taifun-dns.de", + "gda.pl", + "gdansk.pl", + "gdynia.pl", + "med.pl", + "sopot.pl", + "cust.dev.thingdust.io", + "cust.disrec.thingdust.io", + "cust.prod.thingdust.io", + "cust.testing.thingdust.io", + "bloxcms.com", + "townnews-staging.com", + "12hp.at", + "2ix.at", + "4lima.at", + "lima-city.at", + "12hp.ch", + "2ix.ch", + "4lima.ch", + "lima-city.ch", + "trafficplex.cloud", + "de.cool", + "12hp.de", + "2ix.de", + "4lima.de", + "lima-city.de", + "1337.pictures", + "clan.rip", + "lima-city.rocks", + "webspace.rocks", + "lima.zone", + "*.transurl.be", + "*.transurl.eu", + "*.transurl.nl", + "tuxfamily.org", + "dd-dns.de", + "diskstation.eu", + "diskstation.org", + "dray-dns.de", + "draydns.de", + "dyn-vpn.de", + "dynvpn.de", + "mein-vigor.de", + "my-vigor.de", + "my-wan.de", + "syno-ds.de", + "synology-diskstation.de", + "synology-ds.de", + "uber.space", + "hk.com", + "hk.org", + "ltd.hk", + "inc.hk", + "lib.de.us", + "router.management", + "v-info.info", + "wedeploy.io", + "wedeploy.me", + "wedeploy.sh", + "remotewd.com", + "wmflabs.org", + "cistron.nl", + "demon.nl", + "xs4all.space", + "yolasite.com", + "ybo.faith", + "yombo.me", + "homelink.one", + "ybo.party", + "ybo.review", + "ybo.science", + "ybo.trade", + "za.net", + "za.org", + "now.sh", +} + +var nodeLabels = [...]string{ + "aaa", + "aarp", + "abarth", + "abb", + "abbott", + "abbvie", + "abc", + "able", + "abogado", + "abudhabi", + "ac", + "academy", + "accenture", + "accountant", + "accountants", + "aco", + "active", + "actor", + "ad", + "adac", + "ads", + "adult", + "ae", + "aeg", + "aero", + "aetna", + "af", + "afamilycompany", + "afl", + "africa", + "ag", + "agakhan", + "agency", + "ai", + "aig", + "aigo", + "airbus", + "airforce", + "airtel", + "akdn", + "al", + "alfaromeo", + "alibaba", + "alipay", + "allfinanz", + "allstate", + "ally", + "alsace", + "alstom", + "am", + "americanexpress", + "americanfamily", + "amex", + "amfam", + "amica", + "amsterdam", + "analytics", + "android", + "anquan", + "anz", + "ao", + "aol", + "apartments", + "app", + "apple", + "aq", + "aquarelle", + "ar", + "arab", + "aramco", + "archi", + "army", + "arpa", + "art", + "arte", + "as", + "asda", + "asia", + "associates", + "at", + "athleta", + "attorney", + "au", + "auction", + "audi", + "audible", + "audio", + "auspost", + "author", + "auto", + "autos", + "avianca", + "aw", + "aws", + "ax", + "axa", + "az", + "azure", + "ba", + "baby", + "baidu", + "banamex", + "bananarepublic", + "band", + "bank", + "bar", + "barcelona", + "barclaycard", + "barclays", + "barefoot", + "bargains", + "baseball", + "basketball", + "bauhaus", + "bayern", + "bb", + "bbc", + "bbt", + "bbva", + "bcg", + "bcn", + "bd", + "be", + "beats", + "beauty", + "beer", + "bentley", + "berlin", + "best", + "bestbuy", + "bet", + "bf", + "bg", + "bh", + "bharti", + "bi", + "bible", + "bid", + "bike", + "bing", + "bingo", + "bio", + "biz", + "bj", + "black", + "blackfriday", + "blanco", + "blockbuster", + "blog", + "bloomberg", + "blue", + "bm", + "bms", + "bmw", + "bn", + "bnl", + "bnpparibas", + "bo", + "boats", + "boehringer", + "bofa", + "bom", + "bond", + "boo", + "book", + "booking", + "boots", + "bosch", + "bostik", + "boston", + "bot", + "boutique", + "box", + "br", + "bradesco", + "bridgestone", + "broadway", + "broker", + "brother", + "brussels", + "bs", + "bt", + "budapest", + "bugatti", + "build", + "builders", + "business", + "buy", + "buzz", + "bv", + "bw", + "by", + "bz", + "bzh", + "ca", + "cab", + "cafe", + "cal", + "call", + "calvinklein", + "cam", + "camera", + "camp", + "cancerresearch", + "canon", + "capetown", + "capital", + "capitalone", + "car", + "caravan", + "cards", + "care", + "career", + "careers", + "cars", + "cartier", + "casa", + "case", + "caseih", + "cash", + "casino", + "cat", + "catering", + "catholic", + "cba", + "cbn", + "cbre", + "cbs", + "cc", + "cd", + "ceb", + "center", + "ceo", + "cern", + "cf", + "cfa", + "cfd", + "cg", + "ch", + "chanel", + "channel", + "chase", + "chat", + "cheap", + "chintai", + "chloe", + "christmas", + "chrome", + "chrysler", + "church", + "ci", + "cipriani", + "circle", + "cisco", + "citadel", + "citi", + "citic", + "city", + "cityeats", + "ck", + "cl", + "claims", + "cleaning", + "click", + "clinic", + "clinique", + "clothing", + "cloud", + "club", + "clubmed", + "cm", + "cn", + "co", + "coach", + "codes", + "coffee", + "college", + "cologne", + "com", + "comcast", + "commbank", + "community", + "company", + "compare", + "computer", + "comsec", + "condos", + "construction", + "consulting", + "contact", + "contractors", + "cooking", + "cookingchannel", + "cool", + "coop", + "corsica", + "country", + "coupon", + "coupons", + "courses", + "cr", + "credit", + "creditcard", + "creditunion", + "cricket", + "crown", + "crs", + "cruise", + "cruises", + "csc", + "cu", + "cuisinella", + "cv", + "cw", + "cx", + "cy", + "cymru", + "cyou", + "cz", + "dabur", + "dad", + "dance", + "data", + "date", + "dating", + "datsun", + "day", + "dclk", + "dds", + "de", + "deal", + "dealer", + "deals", + "degree", + "delivery", + "dell", + "deloitte", + "delta", + "democrat", + "dental", + "dentist", + "desi", + "design", + "dev", + "dhl", + "diamonds", + "diet", + "digital", + "direct", + "directory", + "discount", + "discover", + "dish", + "diy", + "dj", + "dk", + "dm", + "dnp", + "do", + "docs", + "doctor", + "dodge", + "dog", + "doha", + "domains", + "dot", + "download", + "drive", + "dtv", + "dubai", + "duck", + "dunlop", + "duns", + "dupont", + "durban", + "dvag", + "dvr", + "dz", + "earth", + "eat", + "ec", + "eco", + "edeka", + "edu", + "education", + "ee", + "eg", + "email", + "emerck", + "energy", + "engineer", + "engineering", + "enterprises", + "epost", + "epson", + "equipment", + "er", + "ericsson", + "erni", + "es", + "esq", + "estate", + "esurance", + "et", + "etisalat", + "eu", + "eurovision", + "eus", + "events", + "everbank", + "exchange", + "expert", + "exposed", + "express", + "extraspace", + "fage", + "fail", + "fairwinds", + "faith", + "family", + "fan", + "fans", + "farm", + "farmers", + "fashion", + "fast", + "fedex", + "feedback", + "ferrari", + "ferrero", + "fi", + "fiat", + "fidelity", + "fido", + "film", + "final", + "finance", + "financial", + "fire", + "firestone", + "firmdale", + "fish", + "fishing", + "fit", + "fitness", + "fj", + "fk", + "flickr", + "flights", + "flir", + "florist", + "flowers", + "fly", + "fm", + "fo", + "foo", + "food", + "foodnetwork", + "football", + "ford", + "forex", + "forsale", + "forum", + "foundation", + "fox", + "fr", + "free", + "fresenius", + "frl", + "frogans", + "frontdoor", + "frontier", + "ftr", + "fujitsu", + "fujixerox", + "fun", + "fund", + "furniture", + "futbol", + "fyi", + "ga", + "gal", + "gallery", + "gallo", + "gallup", + "game", + "games", + "gap", + "garden", + "gb", + "gbiz", + "gd", + "gdn", + "ge", + "gea", + "gent", + "genting", + "george", + "gf", + "gg", + "ggee", + "gh", + "gi", + "gift", + "gifts", + "gives", + "giving", + "gl", + "glade", + "glass", + "gle", + "global", + "globo", + "gm", + "gmail", + "gmbh", + "gmo", + "gmx", + "gn", + "godaddy", + "gold", + "goldpoint", + "golf", + "goo", + "goodhands", + "goodyear", + "goog", + "google", + "gop", + "got", + "gov", + "gp", + "gq", + "gr", + "grainger", + "graphics", + "gratis", + "green", + "gripe", + "grocery", + "group", + "gs", + "gt", + "gu", + "guardian", + "gucci", + "guge", + "guide", + "guitars", + "guru", + "gw", + "gy", + "hair", + "hamburg", + "hangout", + "haus", + "hbo", + "hdfc", + "hdfcbank", + "health", + "healthcare", + "help", + "helsinki", + "here", + "hermes", + "hgtv", + "hiphop", + "hisamitsu", + "hitachi", + "hiv", + "hk", + "hkt", + "hm", + "hn", + "hockey", + "holdings", + "holiday", + "homedepot", + "homegoods", + "homes", + "homesense", + "honda", + "honeywell", + "horse", + "hospital", + "host", + "hosting", + "hot", + "hoteles", + "hotels", + "hotmail", + "house", + "how", + "hr", + "hsbc", + "ht", + "htc", + "hu", + "hughes", + "hyatt", + "hyundai", + "ibm", + "icbc", + "ice", + "icu", + "id", + "ie", + "ieee", + "ifm", + "ikano", + "il", + "im", + "imamat", + "imdb", + "immo", + "immobilien", + "in", + "industries", + "infiniti", + "info", + "ing", + "ink", + "institute", + "insurance", + "insure", + "int", + "intel", + "international", + "intuit", + "investments", + "io", + "ipiranga", + "iq", + "ir", + "irish", + "is", + "iselect", + "ismaili", + "ist", + "istanbul", + "it", + "itau", + "itv", + "iveco", + "iwc", + "jaguar", + "java", + "jcb", + "jcp", + "je", + "jeep", + "jetzt", + "jewelry", + "jio", + "jlc", + "jll", + "jm", + "jmp", + "jnj", + "jo", + "jobs", + "joburg", + "jot", + "joy", + "jp", + "jpmorgan", + "jprs", + "juegos", + "juniper", + "kaufen", + "kddi", + "ke", + "kerryhotels", + "kerrylogistics", + "kerryproperties", + "kfh", + "kg", + "kh", + "ki", + "kia", + "kim", + "kinder", + "kindle", + "kitchen", + "kiwi", + "km", + "kn", + "koeln", + "komatsu", + "kosher", + "kp", + "kpmg", + "kpn", + "kr", + "krd", + "kred", + "kuokgroup", + "kw", + "ky", + "kyoto", + "kz", + "la", + "lacaixa", + "ladbrokes", + "lamborghini", + "lamer", + "lancaster", + "lancia", + "lancome", + "land", + "landrover", + "lanxess", + "lasalle", + "lat", + "latino", + "latrobe", + "law", + "lawyer", + "lb", + "lc", + "lds", + "lease", + "leclerc", + "lefrak", + "legal", + "lego", + "lexus", + "lgbt", + "li", + "liaison", + "lidl", + "life", + "lifeinsurance", + "lifestyle", + "lighting", + "like", + "lilly", + "limited", + "limo", + "lincoln", + "linde", + "link", + "lipsy", + "live", + "living", + "lixil", + "lk", + "loan", + "loans", + "locker", + "locus", + "loft", + "lol", + "london", + "lotte", + "lotto", + "love", + "lpl", + "lplfinancial", + "lr", + "ls", + "lt", + "ltd", + "ltda", + "lu", + "lundbeck", + "lupin", + "luxe", + "luxury", + "lv", + "ly", + "ma", + "macys", + "madrid", + "maif", + "maison", + "makeup", + "man", + "management", + "mango", + "map", + "market", + "marketing", + "markets", + "marriott", + "marshalls", + "maserati", + "mattel", + "mba", + "mc", + "mcd", + "mcdonalds", + "mckinsey", + "md", + "me", + "med", + "media", + "meet", + "melbourne", + "meme", + "memorial", + "men", + "menu", + "meo", + "merckmsd", + "metlife", + "mg", + "mh", + "miami", + "microsoft", + "mil", + "mini", + "mint", + "mit", + "mitsubishi", + "mk", + "ml", + "mlb", + "mls", + "mm", + "mma", + "mn", + "mo", + "mobi", + "mobile", + "mobily", + "moda", + "moe", + "moi", + "mom", + "monash", + "money", + "monster", + "montblanc", + "mopar", + "mormon", + "mortgage", + "moscow", + "moto", + "motorcycles", + "mov", + "movie", + "movistar", + "mp", + "mq", + "mr", + "ms", + "msd", + "mt", + "mtn", + "mtpc", + "mtr", + "mu", + "museum", + "mutual", + "mv", + "mw", + "mx", + "my", + "mz", + "na", + "nab", + "nadex", + "nagoya", + "name", + "nationwide", + "natura", + "navy", + "nba", + "nc", + "ne", + "nec", + "net", + "netbank", + "netflix", + "network", + "neustar", + "new", + "newholland", + "news", + "next", + "nextdirect", + "nexus", + "nf", + "nfl", + "ng", + "ngo", + "nhk", + "ni", + "nico", + "nike", + "nikon", + "ninja", + "nissan", + "nissay", + "nl", + "no", + "nokia", + "northwesternmutual", + "norton", + "now", + "nowruz", + "nowtv", + "np", + "nr", + "nra", + "nrw", + "ntt", + "nu", + "nyc", + "nz", + "obi", + "observer", + "off", + "office", + "okinawa", + "olayan", + "olayangroup", + "oldnavy", + "ollo", + "om", + "omega", + "one", + "ong", + "onion", + "onl", + "online", + "onyourside", + "ooo", + "open", + "oracle", + "orange", + "org", + "organic", + "origins", + "osaka", + "otsuka", + "ott", + "ovh", + "pa", + "page", + "pamperedchef", + "panasonic", + "panerai", + "paris", + "pars", + "partners", + "parts", + "party", + "passagens", + "pay", + "pccw", + "pe", + "pet", + "pf", + "pfizer", + "pg", + "ph", + "pharmacy", + "phd", + "philips", + "phone", + "photo", + "photography", + "photos", + "physio", + "piaget", + "pics", + "pictet", + "pictures", + "pid", + "pin", + "ping", + "pink", + "pioneer", + "pizza", + "pk", + "pl", + "place", + "play", + "playstation", + "plumbing", + "plus", + "pm", + "pn", + "pnc", + "pohl", + "poker", + "politie", + "porn", + "post", + "pr", + "pramerica", + "praxi", + "press", + "prime", + "pro", + "prod", + "productions", + "prof", + "progressive", + "promo", + "properties", + "property", + "protection", + "pru", + "prudential", + "ps", + "pt", + "pub", + "pw", + "pwc", + "py", + "qa", + "qpon", + "quebec", + "quest", + "qvc", + "racing", + "radio", + "raid", + "re", + "read", + "realestate", + "realtor", + "realty", + "recipes", + "red", + "redstone", + "redumbrella", + "rehab", + "reise", + "reisen", + "reit", + "reliance", + "ren", + "rent", + "rentals", + "repair", + "report", + "republican", + "rest", + "restaurant", + "review", + "reviews", + "rexroth", + "rich", + "richardli", + "ricoh", + "rightathome", + "ril", + "rio", + "rip", + "rmit", + "ro", + "rocher", + "rocks", + "rodeo", + "rogers", + "room", + "rs", + "rsvp", + "ru", + "rugby", + "ruhr", + "run", + "rw", + "rwe", + "ryukyu", + "sa", + "saarland", + "safe", + "safety", + "sakura", + "sale", + "salon", + "samsclub", + "samsung", + "sandvik", + "sandvikcoromant", + "sanofi", + "sap", + "sapo", + "sarl", + "sas", + "save", + "saxo", + "sb", + "sbi", + "sbs", + "sc", + "sca", + "scb", + "schaeffler", + "schmidt", + "scholarships", + "school", + "schule", + "schwarz", + "science", + "scjohnson", + "scor", + "scot", + "sd", + "se", + "search", + "seat", + "secure", + "security", + "seek", + "select", + "sener", + "services", + "ses", + "seven", + "sew", + "sex", + "sexy", + "sfr", + "sg", + "sh", + "shangrila", + "sharp", + "shaw", + "shell", + "shia", + "shiksha", + "shoes", + "shop", + "shopping", + "shouji", + "show", + "showtime", + "shriram", + "si", + "silk", + "sina", + "singles", + "site", + "sj", + "sk", + "ski", + "skin", + "sky", + "skype", + "sl", + "sling", + "sm", + "smart", + "smile", + "sn", + "sncf", + "so", + "soccer", + "social", + "softbank", + "software", + "sohu", + "solar", + "solutions", + "song", + "sony", + "soy", + "space", + "spiegel", + "spot", + "spreadbetting", + "sr", + "srl", + "srt", + "st", + "stada", + "staples", + "star", + "starhub", + "statebank", + "statefarm", + "statoil", + "stc", + "stcgroup", + "stockholm", + "storage", + "store", + "stream", + "studio", + "study", + "style", + "su", + "sucks", + "supplies", + "supply", + "support", + "surf", + "surgery", + "suzuki", + "sv", + "swatch", + "swiftcover", + "swiss", + "sx", + "sy", + "sydney", + "symantec", + "systems", + "sz", + "tab", + "taipei", + "talk", + "taobao", + "target", + "tatamotors", + "tatar", + "tattoo", + "tax", + "taxi", + "tc", + "tci", + "td", + "tdk", + "team", + "tech", + "technology", + "tel", + "telecity", + "telefonica", + "temasek", + "tennis", + "teva", + "tf", + "tg", + "th", + "thd", + "theater", + "theatre", + "tiaa", + "tickets", + "tienda", + "tiffany", + "tips", + "tires", + "tirol", + "tj", + "tjmaxx", + "tjx", + "tk", + "tkmaxx", + "tl", + "tm", + "tmall", + "tn", + "to", + "today", + "tokyo", + "tools", + "top", + "toray", + "toshiba", + "total", + "tours", + "town", + "toyota", + "toys", + "tr", + "trade", + "trading", + "training", + "travel", + "travelchannel", + "travelers", + "travelersinsurance", + "trust", + "trv", + "tt", + "tube", + "tui", + "tunes", + "tushu", + "tv", + "tvs", + "tw", + "tz", + "ua", + "ubank", + "ubs", + "uconnect", + "ug", + "uk", + "unicom", + "university", + "uno", + "uol", + "ups", + "us", + "uy", + "uz", + "va", + "vacations", + "vana", + "vanguard", + "vc", + "ve", + "vegas", + "ventures", + "verisign", + "versicherung", + "vet", + "vg", + "vi", + "viajes", + "video", + "vig", + "viking", + "villas", + "vin", + "vip", + "virgin", + "visa", + "vision", + "vista", + "vistaprint", + "viva", + "vivo", + "vlaanderen", + "vn", + "vodka", + "volkswagen", + "volvo", + "vote", + "voting", + "voto", + "voyage", + "vu", + "vuelos", + "wales", + "walmart", + "walter", + "wang", + "wanggou", + "warman", + "watch", + "watches", + "weather", + "weatherchannel", + "webcam", + "weber", + "website", + "wed", + "wedding", + "weibo", + "weir", + "wf", + "whoswho", + "wien", + "wiki", + "williamhill", + "win", + "windows", + "wine", + "winners", + "wme", + "wolterskluwer", + "woodside", + "work", + "works", + "world", + "wow", + "ws", + "wtc", + "wtf", + "xbox", + "xerox", + "xfinity", + "xihuan", + "xin", + "xn--11b4c3d", + "xn--1ck2e1b", + "xn--1qqw23a", + "xn--2scrj9c", + "xn--30rr7y", + "xn--3bst00m", + "xn--3ds443g", + "xn--3e0b707e", + "xn--3hcrj9c", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", + "xn--45br5cyl", + "xn--45brj9c", + "xn--45q11c", + "xn--4gbrim", + "xn--54b7fta0cc", + "xn--55qw42g", + "xn--55qx5d", + "xn--5su34j936bgsg", + "xn--5tzm5g", + "xn--6frz82g", + "xn--6qq986b3xl", + "xn--80adxhks", + "xn--80ao21a", + "xn--80aqecdr1a", + "xn--80asehdb", + "xn--80aswg", + "xn--8y0a063a", + "xn--90a3ac", + "xn--90ae", + "xn--90ais", + "xn--9dbq2a", + "xn--9et52u", + "xn--9krt00a", + "xn--b4w605ferd", + "xn--bck1b9a5dre4c", + "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", + "xn--cg4bki", + "xn--clchc0ea0b2g2a9gcd", + "xn--czr694b", + "xn--czrs0t", + "xn--czru2d", + "xn--d1acj3b", + "xn--d1alf", + "xn--e1a4c", + "xn--eckvdtc9d", + "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", + "xn--fiq228c5hs", + "xn--fiq64b", + "xn--fiqs8s", + "xn--fiqz9s", + "xn--fjq720a", + "xn--flw351e", + "xn--fpcrj9c3d", + "xn--fzc2c9e2c", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", + "xn--gecrj9c", + "xn--gk3at1e", + "xn--h2breg3eve", + "xn--h2brj9c", + "xn--h2brj9c8c", + "xn--hxt814e", + "xn--i1b6b1a6a2e", + "xn--imr513n", + "xn--io0a7i", + "xn--j1aef", + "xn--j1amh", + "xn--j6w193g", + "xn--jlq61u9w7b", + "xn--jvr189m", + "xn--kcrx77d1x4a", + "xn--kprw13d", + "xn--kpry57d", + "xn--kpu716f", + "xn--kput3i", + "xn--l1acc", + "xn--lgbbat1ad8j", + "xn--mgb2ddes", + "xn--mgb9awbf", + "xn--mgba3a3ejt", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "xn--mgba7c0bbn0a", + "xn--mgbaakc7dvf", + "xn--mgbaam7a8h", + "xn--mgbab2bd", + "xn--mgbai9a5eva00b", + "xn--mgbai9azgqp6j", + "xn--mgbayh7gpa", + "xn--mgbb9fbpob", + "xn--mgbbh1a71e", + "xn--mgbc0a9azcg", + "xn--mgbca7dzdo", + "xn--mgberp4a5d4a87g", + "xn--mgberp4a5d4ar", + "xn--mgbgu82a", + "xn--mgbi4ecexp", + "xn--mgbpl2fh", + "xn--mgbqly7c0a67fbc", + "xn--mgbqly7cvafr", + "xn--mgbt3dhd", + "xn--mgbtf8fl", + "xn--mgbtx2b", + "xn--mgbx4cd0ab", + "xn--mix082f", + "xn--mix891f", + "xn--mk1bu44c", + "xn--mxtq1m", + "xn--ngbc5azd", + "xn--ngbe9e0a", + "xn--ngbrx", + "xn--nnx388a", + "xn--node", + "xn--nqv7f", + "xn--nqv7fs00ema", + "xn--nyqy26a", + "xn--o3cw4h", + "xn--ogbpf8fl", + "xn--p1acf", + "xn--p1ai", + "xn--pbt977c", + "xn--pgbs0dh", + "xn--pssy2u", + "xn--q9jyb4c", + "xn--qcka1pmc", + "xn--qxam", + "xn--rhqv96g", + "xn--rovu88b", + "xn--rvc1e0am3e", + "xn--s9brj9c", + "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", + "xn--tiq49xqyj", + "xn--unup4y", + "xn--vermgensberater-ctb", + "xn--vermgensberatung-pwb", + "xn--vhquv", + "xn--vuq861b", + "xn--w4r85el8fhu5dnra", + "xn--w4rs40l", + "xn--wgbh1c", + "xn--wgbl6a", + "xn--xhq521b", + "xn--xkc2al3hye2a", + "xn--xkc2dl3a5ee0h", + "xn--y9a3aq", + "xn--yfro4i67o", + "xn--ygbi2ammx", + "xn--zfr164b", + "xperia", + "xxx", + "xyz", + "yachts", + "yahoo", + "yamaxun", + "yandex", + "ye", + "yodobashi", + "yoga", + "yokohama", + "you", + "youtube", + "yt", + "yun", + "za", + "zappos", + "zara", + "zero", + "zip", + "zippo", + "zm", + "zone", + "zuerich", + "zw", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "nom", + "ac", + "blogspot", + "co", + "gov", + "mil", + "net", + "nom", + "org", + "sch", + "accident-investigation", + "accident-prevention", + "aerobatic", + "aeroclub", + "aerodrome", + "agents", + "air-surveillance", + "air-traffic-control", + "aircraft", + "airline", + "airport", + "airtraffic", + "ambulance", + "amusement", + "association", + "author", + "ballooning", + "broker", + "caa", + "cargo", + "catering", + "certification", + "championship", + "charter", + "civilaviation", + "club", + "conference", + "consultant", + "consulting", + "control", + "council", + "crew", + "design", + "dgca", + "educator", + "emergency", + "engine", + "engineer", + "entertainment", + "equipment", + "exchange", + "express", + "federation", + "flight", + "freight", + "fuel", + "gliding", + "government", + "groundhandling", + "group", + "hanggliding", + "homebuilt", + "insurance", + "journal", + "journalist", + "leasing", + "logistics", + "magazine", + "maintenance", + "media", + "microlight", + "modelling", + "navigation", + "parachuting", + "paragliding", + "passenger-association", + "pilot", + "press", + "production", + "recreation", + "repbody", + "res", + "research", + "rotorcraft", + "safety", + "scientist", + "services", + "show", + "skydiving", + "software", + "student", + "trader", + "trading", + "trainer", + "union", + "workinggroup", + "works", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "net", + "nom", + "org", + "com", + "net", + "nom", + "off", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "blogspot", + "co", + "ed", + "gv", + "it", + "og", + "pb", + "com", + "edu", + "gob", + "gov", + "int", + "mil", + "musica", + "net", + "org", + "tur", + "blogspot", + "e164", + "in-addr", + "ip6", + "iris", + "uri", + "urn", + "gov", + "cloudns", + "12hp", + "2ix", + "4lima", + "ac", + "biz", + "co", + "futurecms", + "futurehosting", + "futuremailing", + "gv", + "info", + "lima-city", + "or", + "ortsinfo", + "priv", + "blogspot", + "ex", + "kunden", + "act", + "asn", + "com", + "conf", + "edu", + "gov", + "id", + "info", + "net", + "nsw", + "nt", + "org", + "oz", + "qld", + "sa", + "tas", + "vic", + "wa", + "blogspot", + "act", + "nsw", + "nt", + "qld", + "sa", + "tas", + "vic", + "wa", + "qld", + "sa", + "tas", + "vic", + "wa", + "com", + "biz", + "com", + "edu", + "gov", + "info", + "int", + "mil", + "name", + "net", + "org", + "pp", + "pro", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "biz", + "co", + "com", + "edu", + "gov", + "info", + "net", + "org", + "store", + "tv", + "ac", + "blogspot", + "transurl", + "gov", + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "a", + "b", + "barsy", + "blogspot", + "c", + "d", + "e", + "f", + "g", + "h", + "i", + "j", + "k", + "l", + "m", + "n", + "o", + "p", + "q", + "r", + "s", + "t", + "u", + "v", + "w", + "x", + "y", + "z", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "edu", + "or", + "org", + "cloudns", + "dscloud", + "dyndns", + "for-better", + "for-more", + "for-some", + "for-the", + "mmafan", + "myftp", + "no-ip", + "selfip", + "webhop", + "asso", + "barreau", + "blogspot", + "gouv", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gob", + "gov", + "int", + "mil", + "net", + "org", + "tv", + "adm", + "adv", + "agr", + "am", + "arq", + "art", + "ato", + "b", + "belem", + "bio", + "blog", + "bmd", + "cim", + "cng", + "cnt", + "com", + "coop", + "cri", + "def", + "ecn", + "eco", + "edu", + "emp", + "eng", + "esp", + "etc", + "eti", + "far", + "flog", + "floripa", + "fm", + "fnd", + "fot", + "fst", + "g12", + "ggf", + "gov", + "imb", + "ind", + "inf", + "jampa", + "jor", + "jus", + "leg", + "lel", + "mat", + "med", + "mil", + "mp", + "mus", + "net", + "nom", + "not", + "ntr", + "odo", + "org", + "poa", + "ppg", + "pro", + "psc", + "psi", + "qsl", + "radio", + "rec", + "recife", + "slg", + "srv", + "taxi", + "teo", + "tmp", + "trd", + "tur", + "tv", + "vet", + "vix", + "vlog", + "wiki", + "zlg", + "blogspot", + "ac", + "al", + "am", + "ap", + "ba", + "ce", + "df", + "es", + "go", + "ma", + "mg", + "ms", + "mt", + "pa", + "pb", + "pe", + "pi", + "pr", + "rj", + "rn", + "ro", + "rr", + "rs", + "sc", + "se", + "sp", + "to", + "ac", + "al", + "am", + "ap", + "ba", + "ce", + "df", + "es", + "go", + "ma", + "mg", + "ms", + "mt", + "pa", + "pb", + "pe", + "pi", + "pr", + "rj", + "rn", + "ro", + "rr", + "rs", + "sc", + "se", + "sp", + "to", + "com", + "edu", + "gov", + "net", + "org", + "we", + "com", + "edu", + "gov", + "net", + "org", + "co", + "org", + "com", + "gov", + "mil", + "nym", + "of", + "blogspot", + "com", + "edu", + "gov", + "net", + "nym", + "org", + "za", + "ab", + "awdev", + "bc", + "blogspot", + "co", + "gc", + "mb", + "nb", + "nf", + "nl", + "no-ip", + "ns", + "nt", + "nu", + "on", + "pe", + "qc", + "sk", + "yk", + "cloudns", + "fantasyleague", + "ftpaccess", + "game-server", + "myphotos", + "scrapping", + "twmail", + "gov", + "blogspot", + "12hp", + "2ix", + "4lima", + "blogspot", + "gotdns", + "lima-city", + "square7", + "ac", + "asso", + "co", + "com", + "ed", + "edu", + "go", + "gouv", + "int", + "md", + "net", + "or", + "org", + "presse", + "xn--aroport-bya", + "www", + "blogspot", + "co", + "gob", + "gov", + "mil", + "nom", + "magentosite", + "myfusion", + "sensiosite", + "statics", + "trafficplex", + "vapor", + "cloudns", + "co", + "com", + "gov", + "net", + "ac", + "ah", + "bj", + "com", + "cq", + "edu", + "fj", + "gd", + "gov", + "gs", + "gx", + "gz", + "ha", + "hb", + "he", + "hi", + "hk", + "hl", + "hn", + "jl", + "js", + "jx", + "ln", + "mil", + "mo", + "net", + "nm", + "nx", + "org", + "qh", + "sc", + "sd", + "sh", + "sn", + "sx", + "tj", + "tw", + "xj", + "xn--55qx5d", + "xn--io0a7i", + "xn--od0alg", + "xz", + "yn", + "zj", + "amazonaws", + "cn-north-1", + "compute", + "eb", + "elb", + "s3", + "cn-north-1", + "arts", + "com", + "edu", + "firm", + "gov", + "info", + "int", + "mil", + "net", + "nodum", + "nom", + "org", + "rec", + "web", + "blogspot", + "0emm", + "1kapp", + "3utilities", + "4u", + "africa", + "alpha-myqnapcloud", + "amazonaws", + "appchizi", + "applinzi", + "appspot", + "ar", + "barsyonline", + "betainabox", + "bitballoon", + "blogdns", + "blogspot", + "blogsyte", + "bloxcms", + "bounty-full", + "bplaced", + "br", + "cechire", + "ciscofreak", + "cloudcontrolapp", + "cloudcontrolled", + "cn", + "co", + "codespot", + "damnserver", + "ddnsfree", + "ddnsgeek", + "ddnsking", + "de", + "dev-myqnapcloud", + "ditchyourip", + "dnsalias", + "dnsdojo", + "dnsiskinky", + "doesntexist", + "dontexist", + "doomdns", + "drayddns", + "dreamhosters", + "dsmynas", + "dyn-o-saur", + "dynalias", + "dyndns-at-home", + "dyndns-at-work", + "dyndns-blog", + "dyndns-free", + "dyndns-home", + "dyndns-ip", + "dyndns-mail", + "dyndns-office", + "dyndns-pics", + "dyndns-remote", + "dyndns-server", + "dyndns-web", + "dyndns-wiki", + "dyndns-work", + "dynns", + "elasticbeanstalk", + "est-a-la-maison", + "est-a-la-masion", + "est-le-patron", + "est-mon-blogueur", + "eu", + "evennode", + "familyds", + "fbsbx", + "firebaseapp", + "firewall-gateway", + "flynnhub", + "freebox-os", + "freeboxos", + "from-ak", + "from-al", + "from-ar", + "from-ca", + "from-ct", + "from-dc", + "from-de", + "from-fl", + "from-ga", + "from-hi", + "from-ia", + "from-id", + "from-il", + "from-in", + "from-ks", + "from-ky", + "from-ma", + "from-md", + "from-mi", + "from-mn", + "from-mo", + "from-ms", + "from-mt", + "from-nc", + "from-nd", + "from-ne", + "from-nh", + "from-nj", + "from-nm", + "from-nv", + "from-oh", + "from-ok", + "from-or", + "from-pa", + "from-pr", + "from-ri", + "from-sc", + "from-sd", + "from-tn", + "from-tx", + "from-ut", + "from-va", + "from-vt", + "from-wa", + "from-wi", + "from-wv", + "from-wy", + "gb", + "geekgalaxy", + "getmyip", + "giize", + "githubusercontent", + "gleeze", + "googleapis", + "googlecode", + "gotdns", + "gotpantheon", + "gr", + "health-carereform", + "herokuapp", + "herokussl", + "hk", + "hobby-site", + "homelinux", + "homesecuritymac", + "homesecuritypc", + "homeunix", + "hu", + "iamallama", + "is-a-anarchist", + "is-a-blogger", + "is-a-bookkeeper", + "is-a-bulls-fan", + "is-a-caterer", + "is-a-chef", + "is-a-conservative", + "is-a-cpa", + "is-a-cubicle-slave", + "is-a-democrat", + "is-a-designer", + "is-a-doctor", + "is-a-financialadvisor", + "is-a-geek", + "is-a-green", + "is-a-guru", + "is-a-hard-worker", + "is-a-hunter", + "is-a-landscaper", + "is-a-lawyer", + "is-a-liberal", + "is-a-libertarian", + "is-a-llama", + "is-a-musician", + "is-a-nascarfan", + "is-a-nurse", + "is-a-painter", + "is-a-personaltrainer", + "is-a-photographer", + "is-a-player", + "is-a-republican", + "is-a-rockstar", + "is-a-socialist", + "is-a-student", + "is-a-teacher", + "is-a-techie", + "is-a-therapist", + "is-an-accountant", + "is-an-actor", + "is-an-actress", + "is-an-anarchist", + "is-an-artist", + "is-an-engineer", + "is-an-entertainer", + "is-certified", + "is-gone", + "is-into-anime", + "is-into-cars", + "is-into-cartoons", + "is-into-games", + "is-leet", + "is-not-certified", + "is-slick", + "is-uberleet", + "is-with-theband", + "isa-geek", + "isa-hockeynut", + "issmarterthanyou", + "jdevcloud", + "joyent", + "jpn", + "kozow", + "kr", + "likes-pie", + "likescandy", + "logoip", + "loseyourip", + "meteorapp", + "mex", + "myactivedirectory", + "myasustor", + "mydrobo", + "myqnapcloud", + "mysecuritycamera", + "myshopblocks", + "mytuleap", + "myvnc", + "neat-url", + "net-freaks", + "netlify", + "nfshost", + "no", + "on-aptible", + "onthewifi", + "ooguy", + "operaunite", + "outsystemscloud", + "ownprovider", + "pagefrontapp", + "pagespeedmobilizer", + "pgfog", + "pixolino", + "point2this", + "prgmr", + "publishproxy", + "qa2", + "qc", + "quicksytes", + "quipelements", + "rackmaze", + "remotewd", + "rhcloud", + "ru", + "sa", + "saves-the-whales", + "se", + "securitytactics", + "selfip", + "sells-for-less", + "sells-for-u", + "servebbs", + "servebeer", + "servecounterstrike", + "serveexchange", + "serveftp", + "servegame", + "servehalflife", + "servehttp", + "servehumour", + "serveirc", + "servemp3", + "servep2p", + "servepics", + "servequake", + "servesarcasm", + "simple-url", + "sinaapp", + "space-to-rent", + "stufftoread", + "teaches-yoga", + "temp-dns", + "theworkpc", + "townnews-staging", + "uk", + "unusualperson", + "us", + "uy", + "vipsinaapp", + "withgoogle", + "withyoutube", + "workisboring", + "wpdevcloud", + "writesthisblog", + "xenapponazure", + "yolasite", + "za", + "ap-northeast-1", + "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "compute", + "compute-1", + "elb", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "s3", + "s3-ap-northeast-1", + "s3-ap-northeast-2", + "s3-ap-south-1", + "s3-ap-southeast-1", + "s3-ap-southeast-2", + "s3-ca-central-1", + "s3-eu-central-1", + "s3-eu-west-1", + "s3-eu-west-2", + "s3-external-1", + "s3-fips-us-gov-west-1", + "s3-sa-east-1", + "s3-us-east-2", + "s3-us-gov-west-1", + "s3-us-west-1", + "s3-us-west-2", + "s3-website-ap-northeast-1", + "s3-website-ap-southeast-1", + "s3-website-ap-southeast-2", + "s3-website-eu-west-1", + "s3-website-sa-east-1", + "s3-website-us-east-1", + "s3-website-us-west-1", + "s3-website-us-west-2", + "sa-east-1", + "us-east-1", + "us-east-2", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "dualstack", + "s3", + "s3-website", + "s3", + "alpha", + "beta", + "ap-northeast-1", + "ap-northeast-2", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-gov-west-1", + "us-west-1", + "us-west-2", + "eu-1", + "eu-2", + "eu-3", + "eu-4", + "us-1", + "us-2", + "us-3", + "us-4", + "apps", + "cns", + "eu", + "xen", + "de", + "ac", + "co", + "ed", + "fi", + "go", + "or", + "sa", + "com", + "edu", + "gov", + "inf", + "net", + "org", + "blogspot", + "com", + "edu", + "net", + "org", + "ath", + "gov", + "info", + "ac", + "biz", + "com", + "ekloges", + "gov", + "ltd", + "name", + "net", + "org", + "parliament", + "press", + "pro", + "tm", + "blogspot", + "blogspot", + "co", + "e4", + "metacentrum", + "realm", + "cloud", + "custom", + "12hp", + "2ix", + "4lima", + "barsy", + "blogspot", + "bplaced", + "com", + "cosidns", + "dd-dns", + "ddnss", + "dnshome", + "dnsupdater", + "dray-dns", + "draydns", + "dyn-ip24", + "dyn-vpn", + "dynamisches-dns", + "dyndns1", + "dynvpn", + "firewall-gateway", + "fuettertdasnetz", + "git-repos", + "goip", + "home-webserver", + "internet-dns", + "isteingeek", + "istmein", + "keymachine", + "l-o-g-i-n", + "lcube-server", + "lebtimnetz", + "leitungsen", + "lima-city", + "logoip", + "mein-vigor", + "my-gateway", + "my-router", + "my-vigor", + "my-wan", + "myhome-server", + "spdns", + "square7", + "svn-repos", + "syno-ds", + "synology-diskstation", + "synology-ds", + "taifun-dns", + "traeumtgerade", + "dyn", + "dyn", + "dyndns", + "dyn", + "biz", + "blogspot", + "co", + "firm", + "reg", + "store", + "com", + "edu", + "gov", + "net", + "org", + "art", + "com", + "edu", + "gob", + "gov", + "mil", + "net", + "org", + "sld", + "web", + "art", + "asso", + "com", + "edu", + "gov", + "net", + "org", + "pol", + "com", + "edu", + "fin", + "gob", + "gov", + "info", + "k12", + "med", + "mil", + "net", + "org", + "pro", + "aip", + "com", + "edu", + "fie", + "gov", + "lib", + "med", + "org", + "pri", + "riik", + "blogspot", + "com", + "edu", + "eun", + "gov", + "mil", + "name", + "net", + "org", + "sci", + "blogspot", + "com", + "edu", + "gob", + "nom", + "org", + "blogspot", + "compute", + "biz", + "com", + "edu", + "gov", + "info", + "name", + "net", + "org", + "barsy", + "cloudns", + "diskstation", + "mycd", + "spdns", + "transurl", + "wellbeingzone", + "party", + "user", + "ybo", + "storj", + "aland", + "blogspot", + "dy", + "iki", + "ptplus", + "aeroport", + "assedic", + "asso", + "avocat", + "avoues", + "blogspot", + "cci", + "chambagri", + "chirurgiens-dentistes", + "chirurgiens-dentistes-en-france", + "com", + "experts-comptables", + "fbx-os", + "fbxos", + "freebox-os", + "freeboxos", + "geometre-expert", + "gouv", + "greta", + "huissier-justice", + "medecin", + "nom", + "notaires", + "on-web", + "pharmacien", + "port", + "prd", + "presse", + "tm", + "veterinaire", + "nom", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "pvt", + "co", + "cya", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "org", + "com", + "edu", + "gov", + "ltd", + "mod", + "org", + "co", + "com", + "edu", + "net", + "nom", + "org", + "ac", + "com", + "edu", + "gov", + "net", + "org", + "cloud", + "asso", + "com", + "edu", + "mobi", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "net", + "nym", + "org", + "com", + "edu", + "gob", + "ind", + "mil", + "net", + "nom", + "org", + "co", + "com", + "edu", + "gov", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "idv", + "inc", + "ltd", + "net", + "org", + "xn--55qx5d", + "xn--ciqpn", + "xn--gmq050i", + "xn--gmqw5a", + "xn--io0a7i", + "xn--lcvr32d", + "xn--mk0axi", + "xn--mxtq1m", + "xn--od0alg", + "xn--od0aq3b", + "xn--tn0ag", + "xn--uc0atv", + "xn--uc0ay4a", + "xn--wcvs22d", + "xn--zf0avx", + "com", + "edu", + "gob", + "mil", + "net", + "nom", + "org", + "cloudaccess", + "freesite", + "opencraft", + "blogspot", + "com", + "from", + "iz", + "name", + "adult", + "art", + "asso", + "com", + "coop", + "edu", + "firm", + "gouv", + "info", + "med", + "net", + "org", + "perso", + "pol", + "pro", + "rel", + "shop", + "2000", + "agrar", + "blogspot", + "bolt", + "casino", + "city", + "co", + "erotica", + "erotika", + "film", + "forum", + "games", + "hotel", + "info", + "ingatlan", + "jogasz", + "konyvelo", + "lakas", + "media", + "news", + "org", + "priv", + "reklam", + "sex", + "shop", + "sport", + "suli", + "szex", + "tm", + "tozsde", + "utazas", + "video", + "ac", + "biz", + "co", + "desa", + "go", + "mil", + "my", + "net", + "or", + "sch", + "web", + "blogspot", + "blogspot", + "gov", + "ac", + "co", + "gov", + "idf", + "k12", + "muni", + "net", + "org", + "blogspot", + "ac", + "co", + "com", + "net", + "nom", + "org", + "ro", + "tt", + "tv", + "ltd", + "plc", + "ac", + "barsy", + "blogspot", + "cloudns", + "co", + "edu", + "firm", + "gen", + "gov", + "ind", + "mil", + "net", + "nic", + "org", + "res", + "barrel-of-knowledge", + "barrell-of-knowledge", + "cloudns", + "dvrcam", + "dynamic-dns", + "dyndns", + "for-our", + "groks-the", + "groks-this", + "here-for-more", + "ilovecollege", + "knowsitall", + "no-ip", + "nsupdate", + "selfip", + "v-info", + "webhop", + "eu", + "backplaneapp", + "boxfuse", + "browsersafetymark", + "com", + "dedyn", + "definima", + "drud", + "enonic", + "github", + "gitlab", + "hasura-app", + "hzc", + "lair", + "ngrok", + "nid", + "nodeart", + "nodum", + "pantheonsite", + "protonet", + "sandcats", + "shiftedit", + "spacekit", + "stolos", + "thingdust", + "vaporcloud", + "wedeploy", + "customer", + "apps", + "stage", + "dev", + "disrec", + "prod", + "testing", + "cust", + "cust", + "cust", + "cust", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "ac", + "co", + "gov", + "id", + "net", + "org", + "sch", + "xn--mgba3a4f16a", + "xn--mgba3a4fra", + "blogspot", + "com", + "cupcake", + "edu", + "gov", + "int", + "net", + "org", + "abr", + "abruzzo", + "ag", + "agrigento", + "al", + "alessandria", + "alto-adige", + "altoadige", + "an", + "ancona", + "andria-barletta-trani", + "andria-trani-barletta", + "andriabarlettatrani", + "andriatranibarletta", + "ao", + "aosta", + "aosta-valley", + "aostavalley", + "aoste", + "ap", + "aq", + "aquila", + "ar", + "arezzo", + "ascoli-piceno", + "ascolipiceno", + "asti", + "at", + "av", + "avellino", + "ba", + "balsan", + "bari", + "barletta-trani-andria", + "barlettatraniandria", + "bas", + "basilicata", + "belluno", + "benevento", + "bergamo", + "bg", + "bi", + "biella", + "bl", + "blogspot", + "bn", + "bo", + "bologna", + "bolzano", + "bozen", + "br", + "brescia", + "brindisi", + "bs", + "bt", + "bz", + "ca", + "cagliari", + "cal", + "calabria", + "caltanissetta", + "cam", + "campania", + "campidano-medio", + "campidanomedio", + "campobasso", + "carbonia-iglesias", + "carboniaiglesias", + "carrara-massa", + "carraramassa", + "caserta", + "catania", + "catanzaro", + "cb", + "ce", + "cesena-forli", + "cesenaforli", + "ch", + "chieti", + "ci", + "cl", + "cn", + "co", + "como", + "cosenza", + "cr", + "cremona", + "crotone", + "cs", + "ct", + "cuneo", + "cz", + "dell-ogliastra", + "dellogliastra", + "edu", + "emilia-romagna", + "emiliaromagna", + "emr", + "en", + "enna", + "fc", + "fe", + "fermo", + "ferrara", + "fg", + "fi", + "firenze", + "florence", + "fm", + "foggia", + "forli-cesena", + "forlicesena", + "fr", + "friuli-v-giulia", + "friuli-ve-giulia", + "friuli-vegiulia", + "friuli-venezia-giulia", + "friuli-veneziagiulia", + "friuli-vgiulia", + "friuliv-giulia", + "friulive-giulia", + "friulivegiulia", + "friulivenezia-giulia", + "friuliveneziagiulia", + "friulivgiulia", + "frosinone", + "fvg", + "ge", + "genoa", + "genova", + "go", + "gorizia", + "gov", + "gr", + "grosseto", + "iglesias-carbonia", + "iglesiascarbonia", + "im", + "imperia", + "is", + "isernia", + "kr", + "la-spezia", + "laquila", + "laspezia", + "latina", + "laz", + "lazio", + "lc", + "le", + "lecce", + "lecco", + "li", + "lig", + "liguria", + "livorno", + "lo", + "lodi", + "lom", + "lombardia", + "lombardy", + "lt", + "lu", + "lucania", + "lucca", + "macerata", + "mantova", + "mar", + "marche", + "massa-carrara", + "massacarrara", + "matera", + "mb", + "mc", + "me", + "medio-campidano", + "mediocampidano", + "messina", + "mi", + "milan", + "milano", + "mn", + "mo", + "modena", + "mol", + "molise", + "monza", + "monza-brianza", + "monza-e-della-brianza", + "monzabrianza", + "monzaebrianza", + "monzaedellabrianza", + "ms", + "mt", + "na", + "naples", + "napoli", + "no", + "novara", + "nu", + "nuoro", + "og", + "ogliastra", + "olbia-tempio", + "olbiatempio", + "or", + "oristano", + "ot", + "pa", + "padova", + "padua", + "palermo", + "parma", + "pavia", + "pc", + "pd", + "pe", + "perugia", + "pesaro-urbino", + "pesarourbino", + "pescara", + "pg", + "pi", + "piacenza", + "piedmont", + "piemonte", + "pisa", + "pistoia", + "pmn", + "pn", + "po", + "pordenone", + "potenza", + "pr", + "prato", + "pt", + "pu", + "pug", + "puglia", + "pv", + "pz", + "ra", + "ragusa", + "ravenna", + "rc", + "re", + "reggio-calabria", + "reggio-emilia", + "reggiocalabria", + "reggioemilia", + "rg", + "ri", + "rieti", + "rimini", + "rm", + "rn", + "ro", + "roma", + "rome", + "rovigo", + "sa", + "salerno", + "sar", + "sardegna", + "sardinia", + "sassari", + "savona", + "si", + "sic", + "sicilia", + "sicily", + "siena", + "siracusa", + "so", + "sondrio", + "sp", + "sr", + "ss", + "suedtirol", + "sv", + "ta", + "taa", + "taranto", + "te", + "tempio-olbia", + "tempioolbia", + "teramo", + "terni", + "tn", + "to", + "torino", + "tos", + "toscana", + "tp", + "tr", + "trani-andria-barletta", + "trani-barletta-andria", + "traniandriabarletta", + "tranibarlettaandria", + "trapani", + "trentino", + "trentino-a-adige", + "trentino-aadige", + "trentino-alto-adige", + "trentino-altoadige", + "trentino-s-tirol", + "trentino-stirol", + "trentino-sud-tirol", + "trentino-sudtirol", + "trentino-sued-tirol", + "trentino-suedtirol", + "trentinoa-adige", + "trentinoaadige", + "trentinoalto-adige", + "trentinoaltoadige", + "trentinos-tirol", + "trentinostirol", + "trentinosud-tirol", + "trentinosudtirol", + "trentinosued-tirol", + "trentinosuedtirol", + "trento", + "treviso", + "trieste", + "ts", + "turin", + "tuscany", + "tv", + "ud", + "udine", + "umb", + "umbria", + "urbino-pesaro", + "urbinopesaro", + "va", + "val-d-aosta", + "val-daosta", + "vald-aosta", + "valdaosta", + "valle-aosta", + "valle-d-aosta", + "valle-daosta", + "valleaosta", + "valled-aosta", + "valledaosta", + "vallee-aoste", + "valleeaoste", + "vao", + "varese", + "vb", + "vc", + "vda", + "ve", + "ven", + "veneto", + "venezia", + "venice", + "verbania", + "vercelli", + "verona", + "vi", + "vibo-valentia", + "vibovalentia", + "vicenza", + "viterbo", + "vr", + "vs", + "vt", + "vv", + "co", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "sch", + "ac", + "ad", + "aichi", + "akita", + "aomori", + "blogspot", + "chiba", + "co", + "ed", + "ehime", + "fukui", + "fukuoka", + "fukushima", + "gifu", + "go", + "gr", + "gunma", + "hiroshima", + "hokkaido", + "hyogo", + "ibaraki", + "ishikawa", + "iwate", + "kagawa", + "kagoshima", + "kanagawa", + "kawasaki", + "kitakyushu", + "kobe", + "kochi", + "kumamoto", + "kyoto", + "lg", + "mie", + "miyagi", + "miyazaki", + "nagano", + "nagasaki", + "nagoya", + "nara", + "ne", + "niigata", + "oita", + "okayama", + "okinawa", + "or", + "osaka", + "saga", + "saitama", + "sapporo", + "sendai", + "shiga", + "shimane", + "shizuoka", + "tochigi", + "tokushima", + "tokyo", + "tottori", + "toyama", + "wakayama", + "xn--0trq7p7nn", + "xn--1ctwo", + "xn--1lqs03n", + "xn--1lqs71d", + "xn--2m4a15e", + "xn--32vp30h", + "xn--4it168d", + "xn--4it797k", + "xn--4pvxs", + "xn--5js045d", + "xn--5rtp49c", + "xn--5rtq34k", + "xn--6btw5a", + "xn--6orx2r", + "xn--7t0a264c", + "xn--8ltr62k", + "xn--8pvr4u", + "xn--c3s14m", + "xn--d5qv7z876c", + "xn--djrs72d6uy", + "xn--djty4k", + "xn--efvn9s", + "xn--ehqz56n", + "xn--elqq16h", + "xn--f6qx53a", + "xn--k7yn95e", + "xn--kbrq7o", + "xn--klt787d", + "xn--kltp7d", + "xn--kltx9a", + "xn--klty5x", + "xn--mkru45i", + "xn--nit225k", + "xn--ntso0iqx3a", + "xn--ntsq17g", + "xn--pssu33l", + "xn--qqqt11m", + "xn--rht27z", + "xn--rht3d", + "xn--rht61e", + "xn--rny31h", + "xn--tor131o", + "xn--uist22h", + "xn--uisz3g", + "xn--uuwu58a", + "xn--vgu402c", + "xn--zbx025d", + "yamagata", + "yamaguchi", + "yamanashi", + "yokohama", + "aisai", + "ama", + "anjo", + "asuke", + "chiryu", + "chita", + "fuso", + "gamagori", + "handa", + "hazu", + "hekinan", + "higashiura", + "ichinomiya", + "inazawa", + "inuyama", + "isshiki", + "iwakura", + "kanie", + "kariya", + "kasugai", + "kira", + "kiyosu", + "komaki", + "konan", + "kota", + "mihama", + "miyoshi", + "nishio", + "nisshin", + "obu", + "oguchi", + "oharu", + "okazaki", + "owariasahi", + "seto", + "shikatsu", + "shinshiro", + "shitara", + "tahara", + "takahama", + "tobishima", + "toei", + "togo", + "tokai", + "tokoname", + "toyoake", + "toyohashi", + "toyokawa", + "toyone", + "toyota", + "tsushima", + "yatomi", + "akita", + "daisen", + "fujisato", + "gojome", + "hachirogata", + "happou", + "higashinaruse", + "honjo", + "honjyo", + "ikawa", + "kamikoani", + "kamioka", + "katagami", + "kazuno", + "kitaakita", + "kosaka", + "kyowa", + "misato", + "mitane", + "moriyoshi", + "nikaho", + "noshiro", + "odate", + "oga", + "ogata", + "semboku", + "yokote", + "yurihonjo", + "aomori", + "gonohe", + "hachinohe", + "hashikami", + "hiranai", + "hirosaki", + "itayanagi", + "kuroishi", + "misawa", + "mutsu", + "nakadomari", + "noheji", + "oirase", + "owani", + "rokunohe", + "sannohe", + "shichinohe", + "shingo", + "takko", + "towada", + "tsugaru", + "tsuruta", + "abiko", + "asahi", + "chonan", + "chosei", + "choshi", + "chuo", + "funabashi", + "futtsu", + "hanamigawa", + "ichihara", + "ichikawa", + "ichinomiya", + "inzai", + "isumi", + "kamagaya", + "kamogawa", + "kashiwa", + "katori", + "katsuura", + "kimitsu", + "kisarazu", + "kozaki", + "kujukuri", + "kyonan", + "matsudo", + "midori", + "mihama", + "minamiboso", + "mobara", + "mutsuzawa", + "nagara", + "nagareyama", + "narashino", + "narita", + "noda", + "oamishirasato", + "omigawa", + "onjuku", + "otaki", + "sakae", + "sakura", + "shimofusa", + "shirako", + "shiroi", + "shisui", + "sodegaura", + "sosa", + "tako", + "tateyama", + "togane", + "tohnosho", + "tomisato", + "urayasu", + "yachimata", + "yachiyo", + "yokaichiba", + "yokoshibahikari", + "yotsukaido", + "ainan", + "honai", + "ikata", + "imabari", + "iyo", + "kamijima", + "kihoku", + "kumakogen", + "masaki", + "matsuno", + "matsuyama", + "namikata", + "niihama", + "ozu", + "saijo", + "seiyo", + "shikokuchuo", + "tobe", + "toon", + "uchiko", + "uwajima", + "yawatahama", + "echizen", + "eiheiji", + "fukui", + "ikeda", + "katsuyama", + "mihama", + "minamiechizen", + "obama", + "ohi", + "ono", + "sabae", + "sakai", + "takahama", + "tsuruga", + "wakasa", + "ashiya", + "buzen", + "chikugo", + "chikuho", + "chikujo", + "chikushino", + "chikuzen", + "chuo", + "dazaifu", + "fukuchi", + "hakata", + "higashi", + "hirokawa", + "hisayama", + "iizuka", + "inatsuki", + "kaho", + "kasuga", + "kasuya", + "kawara", + "keisen", + "koga", + "kurate", + "kurogi", + "kurume", + "minami", + "miyako", + "miyama", + "miyawaka", + "mizumaki", + "munakata", + "nakagawa", + "nakama", + "nishi", + "nogata", + "ogori", + "okagaki", + "okawa", + "oki", + "omuta", + "onga", + "onojo", + "oto", + "saigawa", + "sasaguri", + "shingu", + "shinyoshitomi", + "shonai", + "soeda", + "sue", + "tachiarai", + "tagawa", + "takata", + "toho", + "toyotsu", + "tsuiki", + "ukiha", + "umi", + "usui", + "yamada", + "yame", + "yanagawa", + "yukuhashi", + "aizubange", + "aizumisato", + "aizuwakamatsu", + "asakawa", + "bandai", + "date", + "fukushima", + "furudono", + "futaba", + "hanawa", + "higashi", + "hirata", + "hirono", + "iitate", + "inawashiro", + "ishikawa", + "iwaki", + "izumizaki", + "kagamiishi", + "kaneyama", + "kawamata", + "kitakata", + "kitashiobara", + "koori", + "koriyama", + "kunimi", + "miharu", + "mishima", + "namie", + "nango", + "nishiaizu", + "nishigo", + "okuma", + "omotego", + "ono", + "otama", + "samegawa", + "shimogo", + "shirakawa", + "showa", + "soma", + "sukagawa", + "taishin", + "tamakawa", + "tanagura", + "tenei", + "yabuki", + "yamato", + "yamatsuri", + "yanaizu", + "yugawa", + "anpachi", + "ena", + "gifu", + "ginan", + "godo", + "gujo", + "hashima", + "hichiso", + "hida", + "higashishirakawa", + "ibigawa", + "ikeda", + "kakamigahara", + "kani", + "kasahara", + "kasamatsu", + "kawaue", + "kitagata", + "mino", + "minokamo", + "mitake", + "mizunami", + "motosu", + "nakatsugawa", + "ogaki", + "sakahogi", + "seki", + "sekigahara", + "shirakawa", + "tajimi", + "takayama", + "tarui", + "toki", + "tomika", + "wanouchi", + "yamagata", + "yaotsu", + "yoro", + "annaka", + "chiyoda", + "fujioka", + "higashiagatsuma", + "isesaki", + "itakura", + "kanna", + "kanra", + "katashina", + "kawaba", + "kiryu", + "kusatsu", + "maebashi", + "meiwa", + "midori", + "minakami", + "naganohara", + "nakanojo", + "nanmoku", + "numata", + "oizumi", + "ora", + "ota", + "shibukawa", + "shimonita", + "shinto", + "showa", + "takasaki", + "takayama", + "tamamura", + "tatebayashi", + "tomioka", + "tsukiyono", + "tsumagoi", + "ueno", + "yoshioka", + "asaminami", + "daiwa", + "etajima", + "fuchu", + "fukuyama", + "hatsukaichi", + "higashihiroshima", + "hongo", + "jinsekikogen", + "kaita", + "kui", + "kumano", + "kure", + "mihara", + "miyoshi", + "naka", + "onomichi", + "osakikamijima", + "otake", + "saka", + "sera", + "seranishi", + "shinichi", + "shobara", + "takehara", + "abashiri", + "abira", + "aibetsu", + "akabira", + "akkeshi", + "asahikawa", + "ashibetsu", + "ashoro", + "assabu", + "atsuma", + "bibai", + "biei", + "bifuka", + "bihoro", + "biratori", + "chippubetsu", + "chitose", + "date", + "ebetsu", + "embetsu", + "eniwa", + "erimo", + "esan", + "esashi", + "fukagawa", + "fukushima", + "furano", + "furubira", + "haboro", + "hakodate", + "hamatonbetsu", + "hidaka", + "higashikagura", + "higashikawa", + "hiroo", + "hokuryu", + "hokuto", + "honbetsu", + "horokanai", + "horonobe", + "ikeda", + "imakane", + "ishikari", + "iwamizawa", + "iwanai", + "kamifurano", + "kamikawa", + "kamishihoro", + "kamisunagawa", + "kamoenai", + "kayabe", + "kembuchi", + "kikonai", + "kimobetsu", + "kitahiroshima", + "kitami", + "kiyosato", + "koshimizu", + "kunneppu", + "kuriyama", + "kuromatsunai", + "kushiro", + "kutchan", + "kyowa", + "mashike", + "matsumae", + "mikasa", + "minamifurano", + "mombetsu", + "moseushi", + "mukawa", + "muroran", + "naie", + "nakagawa", + "nakasatsunai", + "nakatombetsu", + "nanae", + "nanporo", + "nayoro", + "nemuro", + "niikappu", + "niki", + "nishiokoppe", + "noboribetsu", + "numata", + "obihiro", + "obira", + "oketo", + "okoppe", + "otaru", + "otobe", + "otofuke", + "otoineppu", + "oumu", + "ozora", + "pippu", + "rankoshi", + "rebun", + "rikubetsu", + "rishiri", + "rishirifuji", + "saroma", + "sarufutsu", + "shakotan", + "shari", + "shibecha", + "shibetsu", + "shikabe", + "shikaoi", + "shimamaki", + "shimizu", + "shimokawa", + "shinshinotsu", + "shintoku", + "shiranuka", + "shiraoi", + "shiriuchi", + "sobetsu", + "sunagawa", + "taiki", + "takasu", + "takikawa", + "takinoue", + "teshikaga", + "tobetsu", + "tohma", + "tomakomai", + "tomari", + "toya", + "toyako", + "toyotomi", + "toyoura", + "tsubetsu", + "tsukigata", + "urakawa", + "urausu", + "uryu", + "utashinai", + "wakkanai", + "wassamu", + "yakumo", + "yoichi", + "aioi", + "akashi", + "ako", + "amagasaki", + "aogaki", + "asago", + "ashiya", + "awaji", + "fukusaki", + "goshiki", + "harima", + "himeji", + "ichikawa", + "inagawa", + "itami", + "kakogawa", + "kamigori", + "kamikawa", + "kasai", + "kasuga", + "kawanishi", + "miki", + "minamiawaji", + "nishinomiya", + "nishiwaki", + "ono", + "sanda", + "sannan", + "sasayama", + "sayo", + "shingu", + "shinonsen", + "shiso", + "sumoto", + "taishi", + "taka", + "takarazuka", + "takasago", + "takino", + "tamba", + "tatsuno", + "toyooka", + "yabu", + "yashiro", + "yoka", + "yokawa", + "ami", + "asahi", + "bando", + "chikusei", + "daigo", + "fujishiro", + "hitachi", + "hitachinaka", + "hitachiomiya", + "hitachiota", + "ibaraki", + "ina", + "inashiki", + "itako", + "iwama", + "joso", + "kamisu", + "kasama", + "kashima", + "kasumigaura", + "koga", + "miho", + "mito", + "moriya", + "naka", + "namegata", + "oarai", + "ogawa", + "omitama", + "ryugasaki", + "sakai", + "sakuragawa", + "shimodate", + "shimotsuma", + "shirosato", + "sowa", + "suifu", + "takahagi", + "tamatsukuri", + "tokai", + "tomobe", + "tone", + "toride", + "tsuchiura", + "tsukuba", + "uchihara", + "ushiku", + "yachiyo", + "yamagata", + "yawara", + "yuki", + "anamizu", + "hakui", + "hakusan", + "kaga", + "kahoku", + "kanazawa", + "kawakita", + "komatsu", + "nakanoto", + "nanao", + "nomi", + "nonoichi", + "noto", + "shika", + "suzu", + "tsubata", + "tsurugi", + "uchinada", + "wajima", + "fudai", + "fujisawa", + "hanamaki", + "hiraizumi", + "hirono", + "ichinohe", + "ichinoseki", + "iwaizumi", + "iwate", + "joboji", + "kamaishi", + "kanegasaki", + "karumai", + "kawai", + "kitakami", + "kuji", + "kunohe", + "kuzumaki", + "miyako", + "mizusawa", + "morioka", + "ninohe", + "noda", + "ofunato", + "oshu", + "otsuchi", + "rikuzentakata", + "shiwa", + "shizukuishi", + "sumita", + "tanohata", + "tono", + "yahaba", + "yamada", + "ayagawa", + "higashikagawa", + "kanonji", + "kotohira", + "manno", + "marugame", + "mitoyo", + "naoshima", + "sanuki", + "tadotsu", + "takamatsu", + "tonosho", + "uchinomi", + "utazu", + "zentsuji", + "akune", + "amami", + "hioki", + "isa", + "isen", + "izumi", + "kagoshima", + "kanoya", + "kawanabe", + "kinko", + "kouyama", + "makurazaki", + "matsumoto", + "minamitane", + "nakatane", + "nishinoomote", + "satsumasendai", + "soo", + "tarumizu", + "yusui", + "aikawa", + "atsugi", + "ayase", + "chigasaki", + "ebina", + "fujisawa", + "hadano", + "hakone", + "hiratsuka", + "isehara", + "kaisei", + "kamakura", + "kiyokawa", + "matsuda", + "minamiashigara", + "miura", + "nakai", + "ninomiya", + "odawara", + "oi", + "oiso", + "sagamihara", + "samukawa", + "tsukui", + "yamakita", + "yamato", + "yokosuka", + "yugawara", + "zama", + "zushi", + "city", + "city", + "city", + "aki", + "geisei", + "hidaka", + "higashitsuno", + "ino", + "kagami", + "kami", + "kitagawa", + "kochi", + "mihara", + "motoyama", + "muroto", + "nahari", + "nakamura", + "nankoku", + "nishitosa", + "niyodogawa", + "ochi", + "okawa", + "otoyo", + "otsuki", + "sakawa", + "sukumo", + "susaki", + "tosa", + "tosashimizu", + "toyo", + "tsuno", + "umaji", + "yasuda", + "yusuhara", + "amakusa", + "arao", + "aso", + "choyo", + "gyokuto", + "kamiamakusa", + "kikuchi", + "kumamoto", + "mashiki", + "mifune", + "minamata", + "minamioguni", + "nagasu", + "nishihara", + "oguni", + "ozu", + "sumoto", + "takamori", + "uki", + "uto", + "yamaga", + "yamato", + "yatsushiro", + "ayabe", + "fukuchiyama", + "higashiyama", + "ide", + "ine", + "joyo", + "kameoka", + "kamo", + "kita", + "kizu", + "kumiyama", + "kyotamba", + "kyotanabe", + "kyotango", + "maizuru", + "minami", + "minamiyamashiro", + "miyazu", + "muko", + "nagaokakyo", + "nakagyo", + "nantan", + "oyamazaki", + "sakyo", + "seika", + "tanabe", + "uji", + "ujitawara", + "wazuka", + "yamashina", + "yawata", + "asahi", + "inabe", + "ise", + "kameyama", + "kawagoe", + "kiho", + "kisosaki", + "kiwa", + "komono", + "kumano", + "kuwana", + "matsusaka", + "meiwa", + "mihama", + "minamiise", + "misugi", + "miyama", + "nabari", + "shima", + "suzuka", + "tado", + "taiki", + "taki", + "tamaki", + "toba", + "tsu", + "udono", + "ureshino", + "watarai", + "yokkaichi", + "furukawa", + "higashimatsushima", + "ishinomaki", + "iwanuma", + "kakuda", + "kami", + "kawasaki", + "marumori", + "matsushima", + "minamisanriku", + "misato", + "murata", + "natori", + "ogawara", + "ohira", + "onagawa", + "osaki", + "rifu", + "semine", + "shibata", + "shichikashuku", + "shikama", + "shiogama", + "shiroishi", + "tagajo", + "taiwa", + "tome", + "tomiya", + "wakuya", + "watari", + "yamamoto", + "zao", + "aya", + "ebino", + "gokase", + "hyuga", + "kadogawa", + "kawaminami", + "kijo", + "kitagawa", + "kitakata", + "kitaura", + "kobayashi", + "kunitomi", + "kushima", + "mimata", + "miyakonojo", + "miyazaki", + "morotsuka", + "nichinan", + "nishimera", + "nobeoka", + "saito", + "shiiba", + "shintomi", + "takaharu", + "takanabe", + "takazaki", + "tsuno", + "achi", + "agematsu", + "anan", + "aoki", + "asahi", + "azumino", + "chikuhoku", + "chikuma", + "chino", + "fujimi", + "hakuba", + "hara", + "hiraya", + "iida", + "iijima", + "iiyama", + "iizuna", + "ikeda", + "ikusaka", + "ina", + "karuizawa", + "kawakami", + "kiso", + "kisofukushima", + "kitaaiki", + "komagane", + "komoro", + "matsukawa", + "matsumoto", + "miasa", + "minamiaiki", + "minamimaki", + "minamiminowa", + "minowa", + "miyada", + "miyota", + "mochizuki", + "nagano", + "nagawa", + "nagiso", + "nakagawa", + "nakano", + "nozawaonsen", + "obuse", + "ogawa", + "okaya", + "omachi", + "omi", + "ookuwa", + "ooshika", + "otaki", + "otari", + "sakae", + "sakaki", + "saku", + "sakuho", + "shimosuwa", + "shinanomachi", + "shiojiri", + "suwa", + "suzaka", + "takagi", + "takamori", + "takayama", + "tateshina", + "tatsuno", + "togakushi", + "togura", + "tomi", + "ueda", + "wada", + "yamagata", + "yamanouchi", + "yasaka", + "yasuoka", + "chijiwa", + "futsu", + "goto", + "hasami", + "hirado", + "iki", + "isahaya", + "kawatana", + "kuchinotsu", + "matsuura", + "nagasaki", + "obama", + "omura", + "oseto", + "saikai", + "sasebo", + "seihi", + "shimabara", + "shinkamigoto", + "togitsu", + "tsushima", + "unzen", + "city", + "ando", + "gose", + "heguri", + "higashiyoshino", + "ikaruga", + "ikoma", + "kamikitayama", + "kanmaki", + "kashiba", + "kashihara", + "katsuragi", + "kawai", + "kawakami", + "kawanishi", + "koryo", + "kurotaki", + "mitsue", + "miyake", + "nara", + "nosegawa", + "oji", + "ouda", + "oyodo", + "sakurai", + "sango", + "shimoichi", + "shimokitayama", + "shinjo", + "soni", + "takatori", + "tawaramoto", + "tenkawa", + "tenri", + "uda", + "yamatokoriyama", + "yamatotakada", + "yamazoe", + "yoshino", + "aga", + "agano", + "gosen", + "itoigawa", + "izumozaki", + "joetsu", + "kamo", + "kariwa", + "kashiwazaki", + "minamiuonuma", + "mitsuke", + "muika", + "murakami", + "myoko", + "nagaoka", + "niigata", + "ojiya", + "omi", + "sado", + "sanjo", + "seiro", + "seirou", + "sekikawa", + "shibata", + "tagami", + "tainai", + "tochio", + "tokamachi", + "tsubame", + "tsunan", + "uonuma", + "yahiko", + "yoita", + "yuzawa", + "beppu", + "bungoono", + "bungotakada", + "hasama", + "hiji", + "himeshima", + "hita", + "kamitsue", + "kokonoe", + "kuju", + "kunisaki", + "kusu", + "oita", + "saiki", + "taketa", + "tsukumi", + "usa", + "usuki", + "yufu", + "akaiwa", + "asakuchi", + "bizen", + "hayashima", + "ibara", + "kagamino", + "kasaoka", + "kibichuo", + "kumenan", + "kurashiki", + "maniwa", + "misaki", + "nagi", + "niimi", + "nishiawakura", + "okayama", + "satosho", + "setouchi", + "shinjo", + "shoo", + "soja", + "takahashi", + "tamano", + "tsuyama", + "wake", + "yakage", + "aguni", + "ginowan", + "ginoza", + "gushikami", + "haebaru", + "higashi", + "hirara", + "iheya", + "ishigaki", + "ishikawa", + "itoman", + "izena", + "kadena", + "kin", + "kitadaito", + "kitanakagusuku", + "kumejima", + "kunigami", + "minamidaito", + "motobu", + "nago", + "naha", + "nakagusuku", + "nakijin", + "nanjo", + "nishihara", + "ogimi", + "okinawa", + "onna", + "shimoji", + "taketomi", + "tarama", + "tokashiki", + "tomigusuku", + "tonaki", + "urasoe", + "uruma", + "yaese", + "yomitan", + "yonabaru", + "yonaguni", + "zamami", + "abeno", + "chihayaakasaka", + "chuo", + "daito", + "fujiidera", + "habikino", + "hannan", + "higashiosaka", + "higashisumiyoshi", + "higashiyodogawa", + "hirakata", + "ibaraki", + "ikeda", + "izumi", + "izumiotsu", + "izumisano", + "kadoma", + "kaizuka", + "kanan", + "kashiwara", + "katano", + "kawachinagano", + "kishiwada", + "kita", + "kumatori", + "matsubara", + "minato", + "minoh", + "misaki", + "moriguchi", + "neyagawa", + "nishi", + "nose", + "osakasayama", + "sakai", + "sayama", + "sennan", + "settsu", + "shijonawate", + "shimamoto", + "suita", + "tadaoka", + "taishi", + "tajiri", + "takaishi", + "takatsuki", + "tondabayashi", + "toyonaka", + "toyono", + "yao", + "ariake", + "arita", + "fukudomi", + "genkai", + "hamatama", + "hizen", + "imari", + "kamimine", + "kanzaki", + "karatsu", + "kashima", + "kitagata", + "kitahata", + "kiyama", + "kouhoku", + "kyuragi", + "nishiarita", + "ogi", + "omachi", + "ouchi", + "saga", + "shiroishi", + "taku", + "tara", + "tosu", + "yoshinogari", + "arakawa", + "asaka", + "chichibu", + "fujimi", + "fujimino", + "fukaya", + "hanno", + "hanyu", + "hasuda", + "hatogaya", + "hatoyama", + "hidaka", + "higashichichibu", + "higashimatsuyama", + "honjo", + "ina", + "iruma", + "iwatsuki", + "kamiizumi", + "kamikawa", + "kamisato", + "kasukabe", + "kawagoe", + "kawaguchi", + "kawajima", + "kazo", + "kitamoto", + "koshigaya", + "kounosu", + "kuki", + "kumagaya", + "matsubushi", + "minano", + "misato", + "miyashiro", + "miyoshi", + "moroyama", + "nagatoro", + "namegawa", + "niiza", + "ogano", + "ogawa", + "ogose", + "okegawa", + "omiya", + "otaki", + "ranzan", + "ryokami", + "saitama", + "sakado", + "satte", + "sayama", + "shiki", + "shiraoka", + "soka", + "sugito", + "toda", + "tokigawa", + "tokorozawa", + "tsurugashima", + "urawa", + "warabi", + "yashio", + "yokoze", + "yono", + "yorii", + "yoshida", + "yoshikawa", + "yoshimi", + "city", + "city", + "aisho", + "gamo", + "higashiomi", + "hikone", + "koka", + "konan", + "kosei", + "koto", + "kusatsu", + "maibara", + "moriyama", + "nagahama", + "nishiazai", + "notogawa", + "omihachiman", + "otsu", + "ritto", + "ryuoh", + "takashima", + "takatsuki", + "torahime", + "toyosato", + "yasu", + "akagi", + "ama", + "gotsu", + "hamada", + "higashiizumo", + "hikawa", + "hikimi", + "izumo", + "kakinoki", + "masuda", + "matsue", + "misato", + "nishinoshima", + "ohda", + "okinoshima", + "okuizumo", + "shimane", + "tamayu", + "tsuwano", + "unnan", + "yakumo", + "yasugi", + "yatsuka", + "arai", + "atami", + "fuji", + "fujieda", + "fujikawa", + "fujinomiya", + "fukuroi", + "gotemba", + "haibara", + "hamamatsu", + "higashiizu", + "ito", + "iwata", + "izu", + "izunokuni", + "kakegawa", + "kannami", + "kawanehon", + "kawazu", + "kikugawa", + "kosai", + "makinohara", + "matsuzaki", + "minamiizu", + "mishima", + "morimachi", + "nishiizu", + "numazu", + "omaezaki", + "shimada", + "shimizu", + "shimoda", + "shizuoka", + "susono", + "yaizu", + "yoshida", + "ashikaga", + "bato", + "haga", + "ichikai", + "iwafune", + "kaminokawa", + "kanuma", + "karasuyama", + "kuroiso", + "mashiko", + "mibu", + "moka", + "motegi", + "nasu", + "nasushiobara", + "nikko", + "nishikata", + "nogi", + "ohira", + "ohtawara", + "oyama", + "sakura", + "sano", + "shimotsuke", + "shioya", + "takanezawa", + "tochigi", + "tsuga", + "ujiie", + "utsunomiya", + "yaita", + "aizumi", + "anan", + "ichiba", + "itano", + "kainan", + "komatsushima", + "matsushige", + "mima", + "minami", + "miyoshi", + "mugi", + "nakagawa", + "naruto", + "sanagochi", + "shishikui", + "tokushima", + "wajiki", + "adachi", + "akiruno", + "akishima", + "aogashima", + "arakawa", + "bunkyo", + "chiyoda", + "chofu", + "chuo", + "edogawa", + "fuchu", + "fussa", + "hachijo", + "hachioji", + "hamura", + "higashikurume", + "higashimurayama", + "higashiyamato", + "hino", + "hinode", + "hinohara", + "inagi", + "itabashi", + "katsushika", + "kita", + "kiyose", + "kodaira", + "koganei", + "kokubunji", + "komae", + "koto", + "kouzushima", + "kunitachi", + "machida", + "meguro", + "minato", + "mitaka", + "mizuho", + "musashimurayama", + "musashino", + "nakano", + "nerima", + "ogasawara", + "okutama", + "ome", + "oshima", + "ota", + "setagaya", + "shibuya", + "shinagawa", + "shinjuku", + "suginami", + "sumida", + "tachikawa", + "taito", + "tama", + "toshima", + "chizu", + "hino", + "kawahara", + "koge", + "kotoura", + "misasa", + "nanbu", + "nichinan", + "sakaiminato", + "tottori", + "wakasa", + "yazu", + "yonago", + "asahi", + "fuchu", + "fukumitsu", + "funahashi", + "himi", + "imizu", + "inami", + "johana", + "kamiichi", + "kurobe", + "nakaniikawa", + "namerikawa", + "nanto", + "nyuzen", + "oyabe", + "taira", + "takaoka", + "tateyama", + "toga", + "tonami", + "toyama", + "unazuki", + "uozu", + "yamada", + "arida", + "aridagawa", + "gobo", + "hashimoto", + "hidaka", + "hirogawa", + "inami", + "iwade", + "kainan", + "kamitonda", + "katsuragi", + "kimino", + "kinokawa", + "kitayama", + "koya", + "koza", + "kozagawa", + "kudoyama", + "kushimoto", + "mihama", + "misato", + "nachikatsuura", + "shingu", + "shirahama", + "taiji", + "tanabe", + "wakayama", + "yuasa", + "yura", + "asahi", + "funagata", + "higashine", + "iide", + "kahoku", + "kaminoyama", + "kaneyama", + "kawanishi", + "mamurogawa", + "mikawa", + "murayama", + "nagai", + "nakayama", + "nanyo", + "nishikawa", + "obanazawa", + "oe", + "oguni", + "ohkura", + "oishida", + "sagae", + "sakata", + "sakegawa", + "shinjo", + "shirataka", + "shonai", + "takahata", + "tendo", + "tozawa", + "tsuruoka", + "yamagata", + "yamanobe", + "yonezawa", + "yuza", + "abu", + "hagi", + "hikari", + "hofu", + "iwakuni", + "kudamatsu", + "mitou", + "nagato", + "oshima", + "shimonoseki", + "shunan", + "tabuse", + "tokuyama", + "toyota", + "ube", + "yuu", + "chuo", + "doshi", + "fuefuki", + "fujikawa", + "fujikawaguchiko", + "fujiyoshida", + "hayakawa", + "hokuto", + "ichikawamisato", + "kai", + "kofu", + "koshu", + "kosuge", + "minami-alps", + "minobu", + "nakamichi", + "nanbu", + "narusawa", + "nirasaki", + "nishikatsura", + "oshino", + "otsuki", + "showa", + "tabayama", + "tsuru", + "uenohara", + "yamanakako", + "yamanashi", + "city", + "co", + "blogspot", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "biz", + "com", + "edu", + "gov", + "info", + "net", + "org", + "ass", + "asso", + "com", + "coop", + "edu", + "gouv", + "gov", + "medecin", + "mil", + "nom", + "notaires", + "org", + "pharmaciens", + "prd", + "presse", + "tm", + "veterinaire", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "org", + "rep", + "tra", + "ac", + "blogspot", + "busan", + "chungbuk", + "chungnam", + "co", + "daegu", + "daejeon", + "es", + "gangwon", + "go", + "gwangju", + "gyeongbuk", + "gyeonggi", + "gyeongnam", + "hs", + "incheon", + "jeju", + "jeonbuk", + "jeonnam", + "kg", + "mil", + "ms", + "ne", + "or", + "pe", + "re", + "sc", + "seoul", + "ulsan", + "co", + "edu", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "net", + "nym", + "org", + "bnr", + "c", + "com", + "edu", + "gov", + "info", + "int", + "net", + "nym", + "org", + "per", + "static", + "dev", + "sites", + "com", + "edu", + "gov", + "net", + "org", + "co", + "com", + "edu", + "gov", + "net", + "org", + "oy", + "blogspot", + "nom", + "nym", + "cyon", + "mypep", + "ac", + "assn", + "com", + "edu", + "gov", + "grp", + "hotel", + "int", + "ltd", + "net", + "ngo", + "org", + "sch", + "soc", + "web", + "com", + "edu", + "gov", + "net", + "org", + "co", + "org", + "blogspot", + "gov", + "nym", + "blogspot", + "nym", + "asn", + "com", + "conf", + "edu", + "gov", + "id", + "mil", + "net", + "org", + "com", + "edu", + "gov", + "id", + "med", + "net", + "org", + "plc", + "sch", + "ac", + "co", + "gov", + "net", + "org", + "press", + "router", + "asso", + "tm", + "blogspot", + "ac", + "brasilia", + "c66", + "co", + "daplie", + "ddns", + "diskstation", + "dnsfor", + "dscloud", + "edu", + "filegear", + "gov", + "hopto", + "i234", + "its", + "loginto", + "myds", + "net", + "noip", + "nym", + "org", + "priv", + "synology", + "webhop", + "wedeploy", + "yombo", + "localhost", + "co", + "com", + "edu", + "gov", + "mil", + "nom", + "org", + "prd", + "tm", + "blogspot", + "com", + "edu", + "gov", + "inf", + "name", + "net", + "nom", + "org", + "com", + "edu", + "gouv", + "gov", + "net", + "org", + "presse", + "edu", + "gov", + "nyc", + "org", + "com", + "edu", + "gov", + "net", + "org", + "dscloud", + "blogspot", + "gov", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "net", + "org", + "blogspot", + "ac", + "co", + "com", + "gov", + "net", + "or", + "org", + "academy", + "agriculture", + "air", + "airguard", + "alabama", + "alaska", + "amber", + "ambulance", + "american", + "americana", + "americanantiques", + "americanart", + "amsterdam", + "and", + "annefrank", + "anthro", + "anthropology", + "antiques", + "aquarium", + "arboretum", + "archaeological", + "archaeology", + "architecture", + "art", + "artanddesign", + "artcenter", + "artdeco", + "arteducation", + "artgallery", + "arts", + "artsandcrafts", + "asmatart", + "assassination", + "assisi", + "association", + "astronomy", + "atlanta", + "austin", + "australia", + "automotive", + "aviation", + "axis", + "badajoz", + "baghdad", + "bahn", + "bale", + "baltimore", + "barcelona", + "baseball", + "basel", + "baths", + "bauern", + "beauxarts", + "beeldengeluid", + "bellevue", + "bergbau", + "berkeley", + "berlin", + "bern", + "bible", + "bilbao", + "bill", + "birdart", + "birthplace", + "bonn", + "boston", + "botanical", + "botanicalgarden", + "botanicgarden", + "botany", + "brandywinevalley", + "brasil", + "bristol", + "british", + "britishcolumbia", + "broadcast", + "brunel", + "brussel", + "brussels", + "bruxelles", + "building", + "burghof", + "bus", + "bushey", + "cadaques", + "california", + "cambridge", + "can", + "canada", + "capebreton", + "carrier", + "cartoonart", + "casadelamoneda", + "castle", + "castres", + "celtic", + "center", + "chattanooga", + "cheltenham", + "chesapeakebay", + "chicago", + "children", + "childrens", + "childrensgarden", + "chiropractic", + "chocolate", + "christiansburg", + "cincinnati", + "cinema", + "circus", + "civilisation", + "civilization", + "civilwar", + "clinton", + "clock", + "coal", + "coastaldefence", + "cody", + "coldwar", + "collection", + "colonialwilliamsburg", + "coloradoplateau", + "columbia", + "columbus", + "communication", + "communications", + "community", + "computer", + "computerhistory", + "contemporary", + "contemporaryart", + "convent", + "copenhagen", + "corporation", + "corvette", + "costume", + "countryestate", + "county", + "crafts", + "cranbrook", + "creation", + "cultural", + "culturalcenter", + "culture", + "cyber", + "cymru", + "dali", + "dallas", + "database", + "ddr", + "decorativearts", + "delaware", + "delmenhorst", + "denmark", + "depot", + "design", + "detroit", + "dinosaur", + "discovery", + "dolls", + "donostia", + "durham", + "eastafrica", + "eastcoast", + "education", + "educational", + "egyptian", + "eisenbahn", + "elburg", + "elvendrell", + "embroidery", + "encyclopedic", + "england", + "entomology", + "environment", + "environmentalconservation", + "epilepsy", + "essex", + "estate", + "ethnology", + "exeter", + "exhibition", + "family", + "farm", + "farmequipment", + "farmers", + "farmstead", + "field", + "figueres", + "filatelia", + "film", + "fineart", + "finearts", + "finland", + "flanders", + "florida", + "force", + "fortmissoula", + "fortworth", + "foundation", + "francaise", + "frankfurt", + "franziskaner", + "freemasonry", + "freiburg", + "fribourg", + "frog", + "fundacio", + "furniture", + "gallery", + "garden", + "gateway", + "geelvinck", + "gemological", + "geology", + "georgia", + "giessen", + "glas", + "glass", + "gorge", + "grandrapids", + "graz", + "guernsey", + "halloffame", + "hamburg", + "handson", + "harvestcelebration", + "hawaii", + "health", + "heimatunduhren", + "hellas", + "helsinki", + "hembygdsforbund", + "heritage", + "histoire", + "historical", + "historicalsociety", + "historichouses", + "historisch", + "historisches", + "history", + "historyofscience", + "horology", + "house", + "humanities", + "illustration", + "imageandsound", + "indian", + "indiana", + "indianapolis", + "indianmarket", + "intelligence", + "interactive", + "iraq", + "iron", + "isleofman", + "jamison", + "jefferson", + "jerusalem", + "jewelry", + "jewish", + "jewishart", + "jfk", + "journalism", + "judaica", + "judygarland", + "juedisches", + "juif", + "karate", + "karikatur", + "kids", + "koebenhavn", + "koeln", + "kunst", + "kunstsammlung", + "kunstunddesign", + "labor", + "labour", + "lajolla", + "lancashire", + "landes", + "lans", + "larsson", + "lewismiller", + "lincoln", + "linz", + "living", + "livinghistory", + "localhistory", + "london", + "losangeles", + "louvre", + "loyalist", + "lucerne", + "luxembourg", + "luzern", + "mad", + "madrid", + "mallorca", + "manchester", + "mansion", + "mansions", + "manx", + "marburg", + "maritime", + "maritimo", + "maryland", + "marylhurst", + "media", + "medical", + "medizinhistorisches", + "meeres", + "memorial", + "mesaverde", + "michigan", + "midatlantic", + "military", + "mill", + "miners", + "mining", + "minnesota", + "missile", + "missoula", + "modern", + "moma", + "money", + "monmouth", + "monticello", + "montreal", + "moscow", + "motorcycle", + "muenchen", + "muenster", + "mulhouse", + "muncie", + "museet", + "museumcenter", + "museumvereniging", + "music", + "national", + "nationalfirearms", + "nationalheritage", + "nativeamerican", + "naturalhistory", + "naturalhistorymuseum", + "naturalsciences", + "nature", + "naturhistorisches", + "natuurwetenschappen", + "naumburg", + "naval", + "nebraska", + "neues", + "newhampshire", + "newjersey", + "newmexico", + "newport", + "newspaper", + "newyork", + "niepce", + "norfolk", + "north", + "nrw", + "nuernberg", + "nuremberg", + "nyc", + "nyny", + "oceanographic", + "oceanographique", + "omaha", + "online", + "ontario", + "openair", + "oregon", + "oregontrail", + "otago", + "oxford", + "pacific", + "paderborn", + "palace", + "paleo", + "palmsprings", + "panama", + "paris", + "pasadena", + "pharmacy", + "philadelphia", + "philadelphiaarea", + "philately", + "phoenix", + "photography", + "pilots", + "pittsburgh", + "planetarium", + "plantation", + "plants", + "plaza", + "portal", + "portland", + "portlligat", + "posts-and-telecommunications", + "preservation", + "presidio", + "press", + "project", + "public", + "pubol", + "quebec", + "railroad", + "railway", + "research", + "resistance", + "riodejaneiro", + "rochester", + "rockart", + "roma", + "russia", + "saintlouis", + "salem", + "salvadordali", + "salzburg", + "sandiego", + "sanfrancisco", + "santabarbara", + "santacruz", + "santafe", + "saskatchewan", + "satx", + "savannahga", + "schlesisches", + "schoenbrunn", + "schokoladen", + "school", + "schweiz", + "science", + "science-fiction", + "scienceandhistory", + "scienceandindustry", + "sciencecenter", + "sciencecenters", + "sciencehistory", + "sciences", + "sciencesnaturelles", + "scotland", + "seaport", + "settlement", + "settlers", + "shell", + "sherbrooke", + "sibenik", + "silk", + "ski", + "skole", + "society", + "sologne", + "soundandvision", + "southcarolina", + "southwest", + "space", + "spy", + "square", + "stadt", + "stalbans", + "starnberg", + "state", + "stateofdelaware", + "station", + "steam", + "steiermark", + "stjohn", + "stockholm", + "stpetersburg", + "stuttgart", + "suisse", + "surgeonshall", + "surrey", + "svizzera", + "sweden", + "sydney", + "tank", + "tcm", + "technology", + "telekommunikation", + "television", + "texas", + "textile", + "theater", + "time", + "timekeeping", + "topology", + "torino", + "touch", + "town", + "transport", + "tree", + "trolley", + "trust", + "trustee", + "uhren", + "ulm", + "undersea", + "university", + "usa", + "usantiques", + "usarts", + "uscountryestate", + "usculture", + "usdecorativearts", + "usgarden", + "ushistory", + "ushuaia", + "uslivinghistory", + "utah", + "uvic", + "valley", + "vantaa", + "versailles", + "viking", + "village", + "virginia", + "virtual", + "virtuel", + "vlaanderen", + "volkenkunde", + "wales", + "wallonie", + "war", + "washingtondc", + "watch-and-clock", + "watchandclock", + "western", + "westfalen", + "whaling", + "wildlife", + "williamsburg", + "windmill", + "workshop", + "xn--9dbhblg6di", + "xn--comunicaes-v6a2o", + "xn--correios-e-telecomunicaes-ghc29a", + "xn--h1aegh", + "xn--lns-qla", + "york", + "yorkshire", + "yosemite", + "youth", + "zoological", + "zoology", + "aero", + "biz", + "com", + "coop", + "edu", + "gov", + "info", + "int", + "mil", + "museum", + "name", + "net", + "org", + "pro", + "ac", + "biz", + "co", + "com", + "coop", + "edu", + "gov", + "int", + "museum", + "net", + "org", + "blogspot", + "com", + "edu", + "gob", + "net", + "nym", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "org", + "ac", + "adv", + "co", + "edu", + "gov", + "mil", + "net", + "org", + "ca", + "cc", + "co", + "com", + "dr", + "in", + "info", + "mobi", + "mx", + "name", + "or", + "org", + "pro", + "school", + "tv", + "us", + "ws", + "her", + "his", + "forgot", + "forgot", + "asso", + "nom", + "alwaysdata", + "at-band-camp", + "azure-mobile", + "azurewebsites", + "barsy", + "blogdns", + "boomla", + "bounceme", + "bplaced", + "broke-it", + "buyshouses", + "casacam", + "cdn77", + "cdn77-ssl", + "channelsdvr", + "cloudaccess", + "cloudapp", + "cloudfront", + "cloudfunctions", + "cryptonomic", + "ddns", + "debian", + "definima", + "dnsalias", + "dnsdojo", + "does-it", + "dontexist", + "dsmynas", + "dynalias", + "dynathome", + "dynu", + "dynv6", + "eating-organic", + "endofinternet", + "familyds", + "fastly", + "fastlylb", + "feste-ip", + "firewall-gateway", + "flynnhosting", + "from-az", + "from-co", + "from-la", + "from-ny", + "gb", + "gets-it", + "ham-radio-op", + "homeftp", + "homeip", + "homelinux", + "homeunix", + "hu", + "in", + "in-the-band", + "ipifony", + "is-a-chef", + "is-a-geek", + "isa-geek", + "jp", + "kicks-ass", + "knx-server", + "moonscale", + "mydissent", + "myeffect", + "myfritz", + "mymediapc", + "mypsx", + "mysecuritycamera", + "nhlfan", + "no-ip", + "office-on-the", + "pgafan", + "podzone", + "privatizehealthinsurance", + "rackmaze", + "redirectme", + "ru", + "scrapper-site", + "se", + "selfip", + "sells-it", + "servebbs", + "serveblog", + "serveftp", + "serveminecraft", + "square7", + "static-access", + "sytes", + "t3l3p0rt", + "thruhere", + "twmail", + "uk", + "webhop", + "za", + "r", + "freetls", + "map", + "prod", + "ssl", + "a", + "global", + "a", + "b", + "global", + "map", + "alces", + "arts", + "com", + "firm", + "info", + "net", + "other", + "per", + "rec", + "store", + "web", + "com", + "edu", + "gov", + "i", + "mil", + "mobi", + "name", + "net", + "org", + "sch", + "blogspot", + "ac", + "biz", + "co", + "com", + "edu", + "gob", + "in", + "info", + "int", + "mil", + "net", + "nom", + "org", + "web", + "blogspot", + "bv", + "cistron", + "co", + "demon", + "transurl", + "virtueeldomein", + "aa", + "aarborte", + "aejrie", + "afjord", + "agdenes", + "ah", + "akershus", + "aknoluokta", + "akrehamn", + "al", + "alaheadju", + "alesund", + "algard", + "alstahaug", + "alta", + "alvdal", + "amli", + "amot", + "andasuolo", + "andebu", + "andoy", + "ardal", + "aremark", + "arendal", + "arna", + "aseral", + "asker", + "askim", + "askoy", + "askvoll", + "asnes", + "audnedaln", + "aukra", + "aure", + "aurland", + "aurskog-holand", + "austevoll", + "austrheim", + "averoy", + "badaddja", + "bahcavuotna", + "bahccavuotna", + "baidar", + "bajddar", + "balat", + "balestrand", + "ballangen", + "balsfjord", + "bamble", + "bardu", + "barum", + "batsfjord", + "bearalvahki", + "beardu", + "beiarn", + "berg", + "bergen", + "berlevag", + "bievat", + "bindal", + "birkenes", + "bjarkoy", + "bjerkreim", + "bjugn", + "blogspot", + "bodo", + "bokn", + "bomlo", + "bremanger", + "bronnoy", + "bronnoysund", + "brumunddal", + "bryne", + "bu", + "budejju", + "buskerud", + "bygland", + "bykle", + "cahcesuolo", + "co", + "davvenjarga", + "davvesiida", + "deatnu", + "dep", + "dielddanuorri", + "divtasvuodna", + "divttasvuotna", + "donna", + "dovre", + "drammen", + "drangedal", + "drobak", + "dyroy", + "egersund", + "eid", + "eidfjord", + "eidsberg", + "eidskog", + "eidsvoll", + "eigersund", + "elverum", + "enebakk", + "engerdal", + "etne", + "etnedal", + "evenassi", + "evenes", + "evje-og-hornnes", + "farsund", + "fauske", + "fedje", + "fet", + "fetsund", + "fhs", + "finnoy", + "fitjar", + "fjaler", + "fjell", + "fla", + "flakstad", + "flatanger", + "flekkefjord", + "flesberg", + "flora", + "floro", + "fm", + "folkebibl", + "folldal", + "forde", + "forsand", + "fosnes", + "frana", + "fredrikstad", + "frei", + "frogn", + "froland", + "frosta", + "froya", + "fuoisku", + "fuossko", + "fusa", + "fylkesbibl", + "fyresdal", + "gaivuotna", + "galsa", + "gamvik", + "gangaviika", + "gaular", + "gausdal", + "giehtavuoatna", + "gildeskal", + "giske", + "gjemnes", + "gjerdrum", + "gjerstad", + "gjesdal", + "gjovik", + "gloppen", + "gol", + "gran", + "grane", + "granvin", + "gratangen", + "grimstad", + "grong", + "grue", + "gulen", + "guovdageaidnu", + "ha", + "habmer", + "hadsel", + "hagebostad", + "halden", + "halsa", + "hamar", + "hamaroy", + "hammarfeasta", + "hammerfest", + "hapmir", + "haram", + "hareid", + "harstad", + "hasvik", + "hattfjelldal", + "haugesund", + "hedmark", + "hemne", + "hemnes", + "hemsedal", + "herad", + "hitra", + "hjartdal", + "hjelmeland", + "hl", + "hm", + "hobol", + "hof", + "hokksund", + "hol", + "hole", + "holmestrand", + "holtalen", + "honefoss", + "hordaland", + "hornindal", + "horten", + "hoyanger", + "hoylandet", + "hurdal", + "hurum", + "hvaler", + "hyllestad", + "ibestad", + "idrett", + "inderoy", + "iveland", + "ivgu", + "jan-mayen", + "jessheim", + "jevnaker", + "jolster", + "jondal", + "jorpeland", + "kafjord", + "karasjohka", + "karasjok", + "karlsoy", + "karmoy", + "kautokeino", + "kirkenes", + "klabu", + "klepp", + "kommune", + "kongsberg", + "kongsvinger", + "kopervik", + "kraanghke", + "kragero", + "kristiansand", + "kristiansund", + "krodsherad", + "krokstadelva", + "kvafjord", + "kvalsund", + "kvam", + "kvanangen", + "kvinesdal", + "kvinnherad", + "kviteseid", + "kvitsoy", + "laakesvuemie", + "lahppi", + "langevag", + "lardal", + "larvik", + "lavagis", + "lavangen", + "leangaviika", + "lebesby", + "leikanger", + "leirfjord", + "leirvik", + "leka", + "leksvik", + "lenvik", + "lerdal", + "lesja", + "levanger", + "lier", + "lierne", + "lillehammer", + "lillesand", + "lindas", + "lindesnes", + "loabat", + "lodingen", + "lom", + "loppa", + "lorenskog", + "loten", + "lund", + "lunner", + "luroy", + "luster", + "lyngdal", + "lyngen", + "malatvuopmi", + "malselv", + "malvik", + "mandal", + "marker", + "marnardal", + "masfjorden", + "masoy", + "matta-varjjat", + "meland", + "meldal", + "melhus", + "meloy", + "meraker", + "midsund", + "midtre-gauldal", + "mil", + "mjondalen", + "mo-i-rana", + "moareke", + "modalen", + "modum", + "molde", + "more-og-romsdal", + "mosjoen", + "moskenes", + "moss", + "mosvik", + "mr", + "muosat", + "museum", + "naamesjevuemie", + "namdalseid", + "namsos", + "namsskogan", + "nannestad", + "naroy", + "narviika", + "narvik", + "naustdal", + "navuotna", + "nedre-eiker", + "nesna", + "nesodden", + "nesoddtangen", + "nesseby", + "nesset", + "nissedal", + "nittedal", + "nl", + "nord-aurdal", + "nord-fron", + "nord-odal", + "norddal", + "nordkapp", + "nordland", + "nordre-land", + "nordreisa", + "nore-og-uvdal", + "notodden", + "notteroy", + "nt", + "odda", + "of", + "oksnes", + "ol", + "omasvuotna", + "oppdal", + "oppegard", + "orkanger", + "orkdal", + "orland", + "orskog", + "orsta", + "osen", + "oslo", + "osoyro", + "osteroy", + "ostfold", + "ostre-toten", + "overhalla", + "ovre-eiker", + "oyer", + "oygarden", + "oystre-slidre", + "porsanger", + "porsangu", + "porsgrunn", + "priv", + "rade", + "radoy", + "rahkkeravju", + "raholt", + "raisa", + "rakkestad", + "ralingen", + "rana", + "randaberg", + "rauma", + "rendalen", + "rennebu", + "rennesoy", + "rindal", + "ringebu", + "ringerike", + "ringsaker", + "risor", + "rissa", + "rl", + "roan", + "rodoy", + "rollag", + "romsa", + "romskog", + "roros", + "rost", + "royken", + "royrvik", + "ruovat", + "rygge", + "salangen", + "salat", + "saltdal", + "samnanger", + "sandefjord", + "sandnes", + "sandnessjoen", + "sandoy", + "sarpsborg", + "sauda", + "sauherad", + "sel", + "selbu", + "selje", + "seljord", + "sf", + "siellak", + "sigdal", + "siljan", + "sirdal", + "skanit", + "skanland", + "skaun", + "skedsmo", + "skedsmokorset", + "ski", + "skien", + "skierva", + "skiptvet", + "skjak", + "skjervoy", + "skodje", + "slattum", + "smola", + "snaase", + "snasa", + "snillfjord", + "snoasa", + "sogndal", + "sogne", + "sokndal", + "sola", + "solund", + "somna", + "sondre-land", + "songdalen", + "sor-aurdal", + "sor-fron", + "sor-odal", + "sor-varanger", + "sorfold", + "sorreisa", + "sortland", + "sorum", + "spjelkavik", + "spydeberg", + "st", + "stange", + "stat", + "stathelle", + "stavanger", + "stavern", + "steigen", + "steinkjer", + "stjordal", + "stjordalshalsen", + "stokke", + "stor-elvdal", + "stord", + "stordal", + "storfjord", + "strand", + "stranda", + "stryn", + "sula", + "suldal", + "sund", + "sunndal", + "surnadal", + "svalbard", + "sveio", + "svelvik", + "sykkylven", + "tana", + "tananger", + "telemark", + "time", + "tingvoll", + "tinn", + "tjeldsund", + "tjome", + "tm", + "tokke", + "tolga", + "tonsberg", + "torsken", + "tr", + "trana", + "tranby", + "tranoy", + "troandin", + "trogstad", + "tromsa", + "tromso", + "trondheim", + "trysil", + "tvedestrand", + "tydal", + "tynset", + "tysfjord", + "tysnes", + "tysvar", + "ullensaker", + "ullensvang", + "ulvik", + "unjarga", + "utsira", + "va", + "vaapste", + "vadso", + "vaga", + "vagan", + "vagsoy", + "vaksdal", + "valle", + "vang", + "vanylven", + "vardo", + "varggat", + "varoy", + "vefsn", + "vega", + "vegarshei", + "vennesla", + "verdal", + "verran", + "vestby", + "vestfold", + "vestnes", + "vestre-slidre", + "vestre-toten", + "vestvagoy", + "vevelstad", + "vf", + "vgs", + "vik", + "vikna", + "vindafjord", + "voagat", + "volda", + "voss", + "vossevangen", + "xn--andy-ira", + "xn--asky-ira", + "xn--aurskog-hland-jnb", + "xn--avery-yua", + "xn--bdddj-mrabd", + "xn--bearalvhki-y4a", + "xn--berlevg-jxa", + "xn--bhcavuotna-s4a", + "xn--bhccavuotna-k7a", + "xn--bidr-5nac", + "xn--bievt-0qa", + "xn--bjarky-fya", + "xn--bjddar-pta", + "xn--blt-elab", + "xn--bmlo-gra", + "xn--bod-2na", + "xn--brnny-wuac", + "xn--brnnysund-m8ac", + "xn--brum-voa", + "xn--btsfjord-9za", + "xn--davvenjrga-y4a", + "xn--dnna-gra", + "xn--drbak-wua", + "xn--dyry-ira", + "xn--eveni-0qa01ga", + "xn--finny-yua", + "xn--fjord-lra", + "xn--fl-zia", + "xn--flor-jra", + "xn--frde-gra", + "xn--frna-woa", + "xn--frya-hra", + "xn--ggaviika-8ya47h", + "xn--gildeskl-g0a", + "xn--givuotna-8ya", + "xn--gjvik-wua", + "xn--gls-elac", + "xn--h-2fa", + "xn--hbmer-xqa", + "xn--hcesuolo-7ya35b", + "xn--hgebostad-g3a", + "xn--hmmrfeasta-s4ac", + "xn--hnefoss-q1a", + "xn--hobl-ira", + "xn--holtlen-hxa", + "xn--hpmir-xqa", + "xn--hyanger-q1a", + "xn--hylandet-54a", + "xn--indery-fya", + "xn--jlster-bya", + "xn--jrpeland-54a", + "xn--karmy-yua", + "xn--kfjord-iua", + "xn--klbu-woa", + "xn--koluokta-7ya57h", + "xn--krager-gya", + "xn--kranghke-b0a", + "xn--krdsherad-m8a", + "xn--krehamn-dxa", + "xn--krjohka-hwab49j", + "xn--ksnes-uua", + "xn--kvfjord-nxa", + "xn--kvitsy-fya", + "xn--kvnangen-k0a", + "xn--l-1fa", + "xn--laheadju-7ya", + "xn--langevg-jxa", + "xn--ldingen-q1a", + "xn--leagaviika-52b", + "xn--lesund-hua", + "xn--lgrd-poac", + "xn--lhppi-xqa", + "xn--linds-pra", + "xn--loabt-0qa", + "xn--lrdal-sra", + "xn--lrenskog-54a", + "xn--lt-liac", + "xn--lten-gra", + "xn--lury-ira", + "xn--mely-ira", + "xn--merker-kua", + "xn--mjndalen-64a", + "xn--mlatvuopmi-s4a", + "xn--mli-tla", + "xn--mlselv-iua", + "xn--moreke-jua", + "xn--mosjen-eya", + "xn--mot-tla", + "xn--mre-og-romsdal-qqb", + "xn--msy-ula0h", + "xn--mtta-vrjjat-k7af", + "xn--muost-0qa", + "xn--nmesjevuemie-tcba", + "xn--nry-yla5g", + "xn--nttery-byae", + "xn--nvuotna-hwa", + "xn--oppegrd-ixa", + "xn--ostery-fya", + "xn--osyro-wua", + "xn--porsgu-sta26f", + "xn--rady-ira", + "xn--rdal-poa", + "xn--rde-ula", + "xn--rdy-0nab", + "xn--rennesy-v1a", + "xn--rhkkervju-01af", + "xn--rholt-mra", + "xn--risa-5na", + "xn--risr-ira", + "xn--rland-uua", + "xn--rlingen-mxa", + "xn--rmskog-bya", + "xn--rros-gra", + "xn--rskog-uua", + "xn--rst-0na", + "xn--rsta-fra", + "xn--ryken-vua", + "xn--ryrvik-bya", + "xn--s-1fa", + "xn--sandnessjen-ogb", + "xn--sandy-yua", + "xn--seral-lra", + "xn--sgne-gra", + "xn--skierv-uta", + "xn--skjervy-v1a", + "xn--skjk-soa", + "xn--sknit-yqa", + "xn--sknland-fxa", + "xn--slat-5na", + "xn--slt-elab", + "xn--smla-hra", + "xn--smna-gra", + "xn--snase-nra", + "xn--sndre-land-0cb", + "xn--snes-poa", + "xn--snsa-roa", + "xn--sr-aurdal-l8a", + "xn--sr-fron-q1a", + "xn--sr-odal-q1a", + "xn--sr-varanger-ggb", + "xn--srfold-bya", + "xn--srreisa-q1a", + "xn--srum-gra", + "xn--stfold-9xa", + "xn--stjrdal-s1a", + "xn--stjrdalshalsen-sqb", + "xn--stre-toten-zcb", + "xn--tjme-hra", + "xn--tnsberg-q1a", + "xn--trany-yua", + "xn--trgstad-r1a", + "xn--trna-woa", + "xn--troms-zua", + "xn--tysvr-vra", + "xn--unjrga-rta", + "xn--vads-jra", + "xn--vard-jra", + "xn--vegrshei-c0a", + "xn--vestvgy-ixa6o", + "xn--vg-yiab", + "xn--vgan-qoa", + "xn--vgsy-qoa0j", + "xn--vre-eiker-k8a", + "xn--vrggt-xqad", + "xn--vry-yla5g", + "xn--yer-zna", + "xn--ygarden-p1a", + "xn--ystre-slidre-ujb", + "gs", + "gs", + "nes", + "gs", + "nes", + "gs", + "os", + "valer", + "xn--vler-qoa", + "gs", + "gs", + "os", + "gs", + "heroy", + "sande", + "gs", + "gs", + "bo", + "heroy", + "xn--b-5ga", + "xn--hery-ira", + "gs", + "gs", + "gs", + "gs", + "valer", + "gs", + "gs", + "gs", + "gs", + "bo", + "xn--b-5ga", + "gs", + "gs", + "gs", + "sande", + "gs", + "sande", + "xn--hery-ira", + "xn--vler-qoa", + "biz", + "com", + "edu", + "gov", + "info", + "net", + "org", + "merseine", + "mine", + "nom", + "shacknet", + "ac", + "co", + "cri", + "geek", + "gen", + "govt", + "health", + "iwi", + "kiwi", + "maori", + "mil", + "net", + "nym", + "org", + "parliament", + "school", + "xn--mori-qsa", + "blogspot", + "co", + "com", + "edu", + "gov", + "med", + "museum", + "net", + "org", + "pro", + "homelink", + "barsy", + "accesscam", + "ae", + "amune", + "blogdns", + "blogsite", + "bmoattachments", + "boldlygoingnowhere", + "cable-modem", + "camdvr", + "cdn77", + "cdn77-secure", + "certmgr", + "cloudns", + "collegefan", + "couchpotatofries", + "ddnss", + "diskstation", + "dnsalias", + "dnsdojo", + "doesntexist", + "dontexist", + "doomdns", + "dsmynas", + "duckdns", + "dvrdns", + "dynalias", + "dyndns", + "endofinternet", + "endoftheinternet", + "eu", + "familyds", + "fedorainfracloud", + "fedorapeople", + "fedoraproject", + "freeddns", + "from-me", + "game-host", + "gotdns", + "hepforge", + "hk", + "hobby-site", + "homedns", + "homeftp", + "homelinux", + "homeunix", + "hopto", + "is-a-bruinsfan", + "is-a-candidate", + "is-a-celticsfan", + "is-a-chef", + "is-a-geek", + "is-a-knight", + "is-a-linux-user", + "is-a-patsfan", + "is-a-soxfan", + "is-found", + "is-lost", + "is-saved", + "is-very-bad", + "is-very-evil", + "is-very-good", + "is-very-nice", + "is-very-sweet", + "isa-geek", + "js", + "kicks-ass", + "misconfused", + "mlbfan", + "my-firewall", + "myfirewall", + "myftp", + "mysecuritycamera", + "mywire", + "nflfan", + "no-ip", + "pimienta", + "podzone", + "poivron", + "potager", + "read-books", + "readmyblog", + "selfip", + "sellsyourhome", + "servebbs", + "serveftp", + "servegame", + "spdns", + "stuff-4-sale", + "sweetpepper", + "tunk", + "tuxfamily", + "twmail", + "ufcfan", + "us", + "webhop", + "webredirect", + "wmflabs", + "za", + "zapto", + "tele", + "c", + "rsc", + "origin", + "ssl", + "go", + "home", + "al", + "asso", + "at", + "au", + "be", + "bg", + "ca", + "cd", + "ch", + "cn", + "cy", + "cz", + "de", + "dk", + "edu", + "ee", + "es", + "fi", + "fr", + "gr", + "hr", + "hu", + "ie", + "il", + "in", + "int", + "is", + "it", + "jp", + "kr", + "lt", + "lu", + "lv", + "mc", + "me", + "mk", + "mt", + "my", + "net", + "ng", + "nl", + "no", + "nz", + "paris", + "pl", + "pt", + "q-a", + "ro", + "ru", + "se", + "si", + "sk", + "tr", + "uk", + "us", + "cloud", + "nerdpol", + "abo", + "ac", + "com", + "edu", + "gob", + "ing", + "med", + "net", + "nom", + "org", + "sld", + "ybo", + "blogspot", + "com", + "edu", + "gob", + "mil", + "net", + "nom", + "nym", + "org", + "com", + "edu", + "org", + "com", + "edu", + "gov", + "i", + "mil", + "net", + "ngo", + "org", + "1337", + "biz", + "com", + "edu", + "fam", + "gob", + "gok", + "gon", + "gop", + "gos", + "gov", + "info", + "net", + "org", + "web", + "agro", + "aid", + "art", + "atm", + "augustow", + "auto", + "babia-gora", + "bedzin", + "beep", + "beskidy", + "bialowieza", + "bialystok", + "bielawa", + "bieszczady", + "biz", + "boleslawiec", + "bydgoszcz", + "bytom", + "cieszyn", + "co", + "com", + "czeladz", + "czest", + "dlugoleka", + "edu", + "elblag", + "elk", + "gda", + "gdansk", + "gdynia", + "gliwice", + "glogow", + "gmina", + "gniezno", + "gorlice", + "gov", + "grajewo", + "gsm", + "ilawa", + "info", + "jaworzno", + "jelenia-gora", + "jgora", + "kalisz", + "karpacz", + "kartuzy", + "kaszuby", + "katowice", + "kazimierz-dolny", + "kepno", + "ketrzyn", + "klodzko", + "kobierzyce", + "kolobrzeg", + "konin", + "konskowola", + "krakow", + "kutno", + "lapy", + "lebork", + "legnica", + "lezajsk", + "limanowa", + "lomza", + "lowicz", + "lubin", + "lukow", + "mail", + "malbork", + "malopolska", + "mazowsze", + "mazury", + "med", + "media", + "miasta", + "mielec", + "mielno", + "mil", + "mragowo", + "naklo", + "net", + "nieruchomosci", + "nom", + "nowaruda", + "nysa", + "olawa", + "olecko", + "olkusz", + "olsztyn", + "opoczno", + "opole", + "org", + "ostroda", + "ostroleka", + "ostrowiec", + "ostrowwlkp", + "pc", + "pila", + "pisz", + "podhale", + "podlasie", + "polkowice", + "pomorskie", + "pomorze", + "powiat", + "poznan", + "priv", + "prochowice", + "pruszkow", + "przeworsk", + "pulawy", + "radom", + "rawa-maz", + "realestate", + "rel", + "rybnik", + "rzeszow", + "sanok", + "sejny", + "sex", + "shop", + "sklep", + "skoczow", + "slask", + "slupsk", + "sopot", + "sos", + "sosnowiec", + "stalowa-wola", + "starachowice", + "stargard", + "suwalki", + "swidnica", + "swiebodzin", + "swinoujscie", + "szczecin", + "szczytno", + "szkola", + "targi", + "tarnobrzeg", + "tgory", + "tm", + "tourism", + "travel", + "turek", + "turystyka", + "tychy", + "ustka", + "walbrzych", + "warmia", + "warszawa", + "waw", + "wegrow", + "wielun", + "wlocl", + "wloclawek", + "wodzislaw", + "wolomin", + "wroc", + "wroclaw", + "zachpomor", + "zagan", + "zakopane", + "zarow", + "zgora", + "zgorzelec", + "ap", + "griw", + "ic", + "is", + "kmpsp", + "konsulat", + "kppsp", + "kwp", + "kwpsp", + "mup", + "mw", + "oirm", + "oum", + "pa", + "pinb", + "piw", + "po", + "psp", + "psse", + "pup", + "rzgw", + "sa", + "sdn", + "sko", + "so", + "sr", + "starostwo", + "ug", + "ugim", + "um", + "umig", + "upow", + "uppo", + "us", + "uw", + "uzs", + "wif", + "wiih", + "winb", + "wios", + "witd", + "wiw", + "wsa", + "wskr", + "wuoz", + "wzmiuw", + "zp", + "co", + "edu", + "gov", + "net", + "org", + "ac", + "biz", + "com", + "edu", + "est", + "gov", + "info", + "isla", + "name", + "net", + "org", + "pro", + "prof", + "aaa", + "aca", + "acct", + "avocat", + "bar", + "cloudns", + "cpa", + "eng", + "jur", + "law", + "med", + "recht", + "com", + "edu", + "gov", + "net", + "org", + "plo", + "sec", + "blogspot", + "com", + "edu", + "gov", + "int", + "net", + "nome", + "nym", + "org", + "publ", + "belau", + "cloudns", + "co", + "ed", + "go", + "ne", + "nom", + "or", + "com", + "coop", + "edu", + "gov", + "mil", + "net", + "org", + "blogspot", + "com", + "edu", + "gov", + "mil", + "name", + "net", + "nom", + "org", + "sch", + "asso", + "blogspot", + "com", + "nom", + "ybo", + "clan", + "arts", + "blogspot", + "com", + "firm", + "info", + "nom", + "nt", + "org", + "rec", + "shop", + "store", + "tm", + "www", + "lima-city", + "myddns", + "webspace", + "ac", + "blogspot", + "co", + "edu", + "gov", + "in", + "nom", + "org", + "ac", + "adygeya", + "bashkiria", + "bir", + "blogspot", + "cbg", + "cldmail", + "com", + "dagestan", + "edu", + "gov", + "grozny", + "int", + "kalmykia", + "kustanai", + "marine", + "mil", + "mordovia", + "msk", + "mytis", + "nalchik", + "net", + "nov", + "org", + "pp", + "pyatigorsk", + "spb", + "test", + "vladikavkaz", + "vladimir", + "hb", + "ac", + "co", + "com", + "edu", + "gouv", + "gov", + "int", + "mil", + "net", + "com", + "edu", + "gov", + "med", + "net", + "org", + "pub", + "sch", + "com", + "edu", + "gov", + "net", + "org", + "com", + "edu", + "gov", + "net", + "org", + "ybo", + "com", + "edu", + "gov", + "info", + "med", + "net", + "org", + "tv", + "a", + "ac", + "b", + "bd", + "blogspot", + "brand", + "c", + "com", + "d", + "e", + "f", + "fh", + "fhsk", + "fhv", + "g", + "h", + "i", + "k", + "komforb", + "kommunalforbund", + "komvux", + "l", + "lanbib", + "m", + "n", + "naturbruksgymn", + "o", + "org", + "p", + "parti", + "pp", + "press", + "r", + "s", + "t", + "tm", + "u", + "w", + "x", + "y", + "z", + "blogspot", + "com", + "edu", + "gov", + "net", + "org", + "per", + "com", + "gov", + "hashbang", + "mil", + "net", + "now", + "org", + "platform", + "wedeploy", + "blogspot", + "nom", + "byen", + "cyon", + "platformsh", + "blogspot", + "nym", + "com", + "edu", + "gov", + "net", + "org", + "art", + "blogspot", + "com", + "edu", + "gouv", + "org", + "perso", + "univ", + "com", + "net", + "org", + "stackspace", + "uber", + "xs4all", + "co", + "com", + "consulado", + "edu", + "embaixada", + "gov", + "mil", + "net", + "org", + "principe", + "saotome", + "store", + "abkhazia", + "adygeya", + "aktyubinsk", + "arkhangelsk", + "armenia", + "ashgabad", + "azerbaijan", + "balashov", + "bashkiria", + "bryansk", + "bukhara", + "chimkent", + "dagestan", + "east-kazakhstan", + "exnet", + "georgia", + "grozny", + "ivanovo", + "jambyl", + "kalmykia", + "kaluga", + "karacol", + "karaganda", + "karelia", + "khakassia", + "krasnodar", + "kurgan", + "kustanai", + "lenug", + "mangyshlak", + "mordovia", + "msk", + "murmansk", + "nalchik", + "navoi", + "north-kazakhstan", + "nov", + "nym", + "obninsk", + "penza", + "pokrovsk", + "sochi", + "spb", + "tashkent", + "termez", + "togliatti", + "troitsk", + "tselinograd", + "tula", + "tuva", + "vladikavkaz", + "vladimir", + "vologda", + "barsy", + "com", + "edu", + "gob", + "org", + "red", + "gov", + "nym", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "knightpoint", + "ac", + "co", + "org", + "blogspot", + "ac", + "co", + "go", + "in", + "mi", + "net", + "or", + "ac", + "biz", + "co", + "com", + "edu", + "go", + "gov", + "int", + "mil", + "name", + "net", + "nic", + "org", + "test", + "web", + "gov", + "co", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "agrinet", + "com", + "defense", + "edunet", + "ens", + "fin", + "gov", + "ind", + "info", + "intl", + "mincom", + "nat", + "net", + "org", + "perso", + "rnrt", + "rns", + "rnu", + "tourism", + "turen", + "com", + "edu", + "gov", + "mil", + "net", + "org", + "vpnplus", + "av", + "bbs", + "bel", + "biz", + "com", + "dr", + "edu", + "gen", + "gov", + "info", + "k12", + "kep", + "mil", + "name", + "nc", + "net", + "org", + "pol", + "tel", + "tv", + "web", + "blogspot", + "gov", + "ybo", + "aero", + "biz", + "co", + "com", + "coop", + "edu", + "gov", + "info", + "int", + "jobs", + "mobi", + "museum", + "name", + "net", + "org", + "pro", + "travel", + "better-than", + "dyndns", + "on-the-web", + "worse-than", + "blogspot", + "club", + "com", + "ebiz", + "edu", + "game", + "gov", + "idv", + "mil", + "net", + "nym", + "org", + "url", + "xn--czrw28b", + "xn--uc0atv", + "xn--zf0ao64a", + "mymailer", + "ac", + "co", + "go", + "hotel", + "info", + "me", + "mil", + "mobi", + "ne", + "or", + "sc", + "tv", + "biz", + "cc", + "cherkassy", + "cherkasy", + "chernigov", + "chernihiv", + "chernivtsi", + "chernovtsy", + "ck", + "cn", + "co", + "com", + "cr", + "crimea", + "cv", + "dn", + "dnepropetrovsk", + "dnipropetrovsk", + "dominic", + "donetsk", + "dp", + "edu", + "gov", + "if", + "in", + "inf", + "ivano-frankivsk", + "kh", + "kharkiv", + "kharkov", + "kherson", + "khmelnitskiy", + "khmelnytskyi", + "kiev", + "kirovograd", + "km", + "kr", + "krym", + "ks", + "kv", + "kyiv", + "lg", + "lt", + "ltd", + "lugansk", + "lutsk", + "lv", + "lviv", + "mk", + "mykolaiv", + "net", + "nikolaev", + "od", + "odesa", + "odessa", + "org", + "pl", + "poltava", + "pp", + "rivne", + "rovno", + "rv", + "sb", + "sebastopol", + "sevastopol", + "sm", + "sumy", + "te", + "ternopil", + "uz", + "uzhgorod", + "vinnica", + "vinnytsia", + "vn", + "volyn", + "yalta", + "zaporizhzhe", + "zaporizhzhia", + "zhitomir", + "zhytomyr", + "zp", + "zt", + "ac", + "blogspot", + "co", + "com", + "go", + "ne", + "nom", + "or", + "org", + "sc", + "ac", + "co", + "gov", + "ltd", + "me", + "net", + "nhs", + "org", + "plc", + "police", + "sch", + "blogspot", + "no-ip", + "wellbeingzone", + "homeoffice", + "service", + "ak", + "al", + "ar", + "as", + "az", + "ca", + "cloudns", + "co", + "ct", + "dc", + "de", + "dni", + "drud", + "fed", + "fl", + "ga", + "golffan", + "gu", + "hi", + "ia", + "id", + "il", + "in", + "is-by", + "isa", + "kids", + "ks", + "ky", + "la", + "land-4-sale", + "ma", + "md", + "me", + "mi", + "mn", + "mo", + "ms", + "mt", + "nc", + "nd", + "ne", + "nh", + "nj", + "nm", + "noip", + "nsn", + "nv", + "ny", + "oh", + "ok", + "or", + "pa", + "pointto", + "pr", + "ri", + "sc", + "sd", + "stuff-4-sale", + "tn", + "tx", + "ut", + "va", + "vi", + "vt", + "wa", + "wi", + "wv", + "wy", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "chtr", + "paroch", + "pvt", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "ann-arbor", + "cc", + "cog", + "dst", + "eaton", + "gen", + "k12", + "lib", + "mus", + "tec", + "washtenaw", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "k12", + "lib", + "cc", + "cc", + "k12", + "lib", + "com", + "edu", + "gub", + "mil", + "net", + "nom", + "org", + "blogspot", + "co", + "com", + "net", + "org", + "com", + "edu", + "gov", + "mil", + "net", + "nom", + "org", + "arts", + "co", + "com", + "e12", + "edu", + "firm", + "gob", + "gov", + "info", + "int", + "mil", + "net", + "org", + "rec", + "store", + "tec", + "web", + "nom", + "co", + "com", + "k12", + "net", + "org", + "ac", + "biz", + "blogspot", + "com", + "edu", + "gov", + "health", + "info", + "int", + "name", + "net", + "org", + "pro", + "com", + "edu", + "net", + "org", + "advisor", + "com", + "dyndns", + "edu", + "gov", + "mypets", + "net", + "org", + "xn--80au", + "xn--90azh", + "xn--c1avg", + "xn--d1at", + "xn--o1ac", + "xn--o1ach", + "xn--12c1fe0br", + "xn--12cfi8ixb8l", + "xn--12co0c3b4eva", + "xn--h3cuzk1di", + "xn--m3ch0j3a", + "xn--o3cyx2a", + "blogsite", + "fhapp", + "ac", + "agric", + "alt", + "co", + "edu", + "gov", + "grondar", + "law", + "mil", + "net", + "ngo", + "nis", + "nom", + "org", + "school", + "tm", + "web", + "blogspot", + "ac", + "biz", + "co", + "com", + "edu", + "gov", + "info", + "mil", + "net", + "org", + "sch", + "lima", + "triton", + "ac", + "co", + "gov", + "mil", + "org", +} diff --git a/vendor/golang.org/x/net/route/address.go b/vendor/golang.org/x/net/route/address.go new file mode 100644 index 0000000..e6bfa39 --- /dev/null +++ b/vendor/golang.org/x/net/route/address.go @@ -0,0 +1,425 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import "runtime" + +// An Addr represents an address associated with packet routing. +type Addr interface { + // Family returns an address family. + Family() int +} + +// A LinkAddr represents a link-layer address. +type LinkAddr struct { + Index int // interface index when attached + Name string // interface name when attached + Addr []byte // link-layer address when attached +} + +// Family implements the Family method of Addr interface. +func (a *LinkAddr) Family() int { return sysAF_LINK } + +func (a *LinkAddr) lenAndSpace() (int, int) { + l := 8 + len(a.Name) + len(a.Addr) + return l, roundup(l) +} + +func (a *LinkAddr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + nlen, alen := len(a.Name), len(a.Addr) + if nlen > 255 || alen > 255 { + return 0, errInvalidAddr + } + b[0] = byte(l) + b[1] = sysAF_LINK + if a.Index > 0 { + nativeEndian.PutUint16(b[2:4], uint16(a.Index)) + } + data := b[8:] + if nlen > 0 { + b[5] = byte(nlen) + copy(data[:nlen], a.Addr) + data = data[nlen:] + } + if alen > 0 { + b[6] = byte(alen) + copy(data[:alen], a.Name) + data = data[alen:] + } + return ll, nil +} + +func parseLinkAddr(b []byte) (Addr, error) { + if len(b) < 8 { + return nil, errInvalidAddr + } + _, a, err := parseKernelLinkAddr(sysAF_LINK, b[4:]) + if err != nil { + return nil, err + } + a.(*LinkAddr).Index = int(nativeEndian.Uint16(b[2:4])) + return a, nil +} + +// parseKernelLinkAddr parses b as a link-layer address in +// conventional BSD kernel form. +func parseKernelLinkAddr(_ int, b []byte) (int, Addr, error) { + // The encoding looks like the following: + // +----------------------------+ + // | Type (1 octet) | + // +----------------------------+ + // | Name length (1 octet) | + // +----------------------------+ + // | Address length (1 octet) | + // +----------------------------+ + // | Selector length (1 octet) | + // +----------------------------+ + // | Data (variable) | + // +----------------------------+ + // + // On some platforms, all-bit-one of length field means "don't + // care". + nlen, alen, slen := int(b[1]), int(b[2]), int(b[3]) + if nlen == 0xff { + nlen = 0 + } + if alen == 0xff { + alen = 0 + } + if slen == 0xff { + slen = 0 + } + l := 4 + nlen + alen + slen + if len(b) < l { + return 0, nil, errInvalidAddr + } + data := b[4:] + var name string + var addr []byte + if nlen > 0 { + name = string(data[:nlen]) + data = data[nlen:] + } + if alen > 0 { + addr = data[:alen] + data = data[alen:] + } + return l, &LinkAddr{Name: name, Addr: addr}, nil +} + +// An Inet4Addr represents an internet address for IPv4. +type Inet4Addr struct { + IP [4]byte // IP address +} + +// Family implements the Family method of Addr interface. +func (a *Inet4Addr) Family() int { return sysAF_INET } + +func (a *Inet4Addr) lenAndSpace() (int, int) { + return sizeofSockaddrInet, roundup(sizeofSockaddrInet) +} + +func (a *Inet4Addr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + b[0] = byte(l) + b[1] = sysAF_INET + copy(b[4:8], a.IP[:]) + return ll, nil +} + +// An Inet6Addr represents an internet address for IPv6. +type Inet6Addr struct { + IP [16]byte // IP address + ZoneID int // zone identifier +} + +// Family implements the Family method of Addr interface. +func (a *Inet6Addr) Family() int { return sysAF_INET6 } + +func (a *Inet6Addr) lenAndSpace() (int, int) { + return sizeofSockaddrInet6, roundup(sizeofSockaddrInet6) +} + +func (a *Inet6Addr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + b[0] = byte(l) + b[1] = sysAF_INET6 + copy(b[8:24], a.IP[:]) + if a.ZoneID > 0 { + nativeEndian.PutUint32(b[24:28], uint32(a.ZoneID)) + } + return ll, nil +} + +// parseInetAddr parses b as an internet address for IPv4 or IPv6. +func parseInetAddr(af int, b []byte) (Addr, error) { + switch af { + case sysAF_INET: + if len(b) < sizeofSockaddrInet { + return nil, errInvalidAddr + } + a := &Inet4Addr{} + copy(a.IP[:], b[4:8]) + return a, nil + case sysAF_INET6: + if len(b) < sizeofSockaddrInet6 { + return nil, errInvalidAddr + } + a := &Inet6Addr{ZoneID: int(nativeEndian.Uint32(b[24:28]))} + copy(a.IP[:], b[8:24]) + if a.IP[0] == 0xfe && a.IP[1]&0xc0 == 0x80 || a.IP[0] == 0xff && (a.IP[1]&0x0f == 0x01 || a.IP[1]&0x0f == 0x02) { + // KAME based IPv6 protocol stack usually + // embeds the interface index in the + // interface-local or link-local address as + // the kernel-internal form. + id := int(bigEndian.Uint16(a.IP[2:4])) + if id != 0 { + a.ZoneID = id + a.IP[2], a.IP[3] = 0, 0 + } + } + return a, nil + default: + return nil, errInvalidAddr + } +} + +// parseKernelInetAddr parses b as an internet address in conventional +// BSD kernel form. +func parseKernelInetAddr(af int, b []byte) (int, Addr, error) { + // The encoding looks similar to the NLRI encoding. + // +----------------------------+ + // | Length (1 octet) | + // +----------------------------+ + // | Address prefix (variable) | + // +----------------------------+ + // + // The differences between the kernel form and the NLRI + // encoding are: + // + // - The length field of the kernel form indicates the prefix + // length in bytes, not in bits + // + // - In the kernel form, zero value of the length field + // doesn't mean 0.0.0.0/0 or ::/0 + // + // - The kernel form appends leading bytes to the prefix field + // to make the tuple to be conformed with + // the routing message boundary + l := int(b[0]) + if runtime.GOOS == "darwin" { + // On Darwn, an address in the kernel form is also + // used as a message filler. + if l == 0 || len(b) > roundup(l) { + l = roundup(l) + } + } else { + l = roundup(l) + } + if len(b) < l { + return 0, nil, errInvalidAddr + } + // Don't reorder case expressions. + // The case expressions for IPv6 must come first. + const ( + off4 = 4 // offset of in_addr + off6 = 8 // offset of in6_addr + ) + switch { + case b[0] == sizeofSockaddrInet6: + a := &Inet6Addr{} + copy(a.IP[:], b[off6:off6+16]) + return int(b[0]), a, nil + case af == sysAF_INET6: + a := &Inet6Addr{} + if l-1 < off6 { + copy(a.IP[:], b[1:l]) + } else { + copy(a.IP[:], b[l-off6:l]) + } + return int(b[0]), a, nil + case b[0] == sizeofSockaddrInet: + a := &Inet4Addr{} + copy(a.IP[:], b[off4:off4+4]) + return int(b[0]), a, nil + default: // an old fashion, AF_UNSPEC or unknown means AF_INET + a := &Inet4Addr{} + if l-1 < off4 { + copy(a.IP[:], b[1:l]) + } else { + copy(a.IP[:], b[l-off4:l]) + } + return int(b[0]), a, nil + } +} + +// A DefaultAddr represents an address of various operating +// system-specific features. +type DefaultAddr struct { + af int + Raw []byte // raw format of address +} + +// Family implements the Family method of Addr interface. +func (a *DefaultAddr) Family() int { return a.af } + +func (a *DefaultAddr) lenAndSpace() (int, int) { + l := len(a.Raw) + return l, roundup(l) +} + +func (a *DefaultAddr) marshal(b []byte) (int, error) { + l, ll := a.lenAndSpace() + if len(b) < ll { + return 0, errShortBuffer + } + if l > 255 { + return 0, errInvalidAddr + } + b[1] = byte(l) + copy(b[:l], a.Raw) + return ll, nil +} + +func parseDefaultAddr(b []byte) (Addr, error) { + if len(b) < 2 || len(b) < int(b[0]) { + return nil, errInvalidAddr + } + a := &DefaultAddr{af: int(b[1]), Raw: b[:b[0]]} + return a, nil +} + +func addrsSpace(as []Addr) int { + var l int + for _, a := range as { + switch a := a.(type) { + case *LinkAddr: + _, ll := a.lenAndSpace() + l += ll + case *Inet4Addr: + _, ll := a.lenAndSpace() + l += ll + case *Inet6Addr: + _, ll := a.lenAndSpace() + l += ll + case *DefaultAddr: + _, ll := a.lenAndSpace() + l += ll + } + } + return l +} + +// marshalAddrs marshals as and returns a bitmap indicating which +// address is stored in b. +func marshalAddrs(b []byte, as []Addr) (uint, error) { + var attrs uint + for i, a := range as { + switch a := a.(type) { + case *LinkAddr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *Inet4Addr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *Inet6Addr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + case *DefaultAddr: + l, err := a.marshal(b) + if err != nil { + return 0, err + } + b = b[l:] + attrs |= 1 << uint(i) + } + } + return attrs, nil +} + +func parseAddrs(attrs uint, fn func(int, []byte) (int, Addr, error), b []byte) ([]Addr, error) { + var as [sysRTAX_MAX]Addr + af := int(sysAF_UNSPEC) + for i := uint(0); i < sysRTAX_MAX && len(b) >= roundup(0); i++ { + if attrs&(1<> 8) +} + +func (binaryLittleEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (binaryLittleEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (binaryLittleEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +type binaryBigEndian struct{} + +func (binaryBigEndian) Uint16(b []byte) uint16 { + _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808 + return uint16(b[1]) | uint16(b[0])<<8 +} + +func (binaryBigEndian) PutUint16(b []byte, v uint16) { + _ = b[1] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (binaryBigEndian) Uint32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (binaryBigEndian) PutUint32(b []byte, v uint32) { + _ = b[3] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (binaryBigEndian) Uint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} diff --git a/vendor/golang.org/x/net/route/defs_darwin.go b/vendor/golang.org/x/net/route/defs_darwin.go new file mode 100644 index 0000000..e771644 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_darwin.go @@ -0,0 +1,114 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_STAT = C.NET_RT_STAT + sysNET_RT_TRASH = C.NET_RT_TRASH + sysNET_RT_IFLIST2 = C.NET_RT_IFLIST2 + sysNET_RT_DUMP2 = C.NET_RT_DUMP2 + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFINFO2 = C.RTM_IFINFO2 + sysRTM_NEWMADDR2 = C.RTM_NEWMADDR2 + sysRTM_GET2 = C.RTM_GET2 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrDarwin15 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrDarwin15 = C.sizeof_struct_ifa_msghdr + sizeofIfmaMsghdrDarwin15 = C.sizeof_struct_ifma_msghdr + sizeofIfMsghdr2Darwin15 = C.sizeof_struct_if_msghdr2 + sizeofIfmaMsghdr2Darwin15 = C.sizeof_struct_ifma_msghdr2 + sizeofIfDataDarwin15 = C.sizeof_struct_if_data + sizeofIfData64Darwin15 = C.sizeof_struct_if_data64 + + sizeofRtMsghdrDarwin15 = C.sizeof_struct_rt_msghdr + sizeofRtMsghdr2Darwin15 = C.sizeof_struct_rt_msghdr2 + sizeofRtMetricsDarwin15 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_dragonfly.go b/vendor/golang.org/x/net/route/defs_dragonfly.go new file mode 100644 index 0000000..dd31de2 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_dragonfly.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_P1003_1B = C.CTL_P1003_1B + sysCTL_LWKT = C.CTL_LWKT + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_MPLS1 = C.RTA_MPLS1 + sysRTA_MPLS2 = C.RTA_MPLS2 + sysRTA_MPLS3 = C.RTA_MPLS3 + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MPLS1 = C.RTAX_MPLS1 + sysRTAX_MPLS2 = C.RTAX_MPLS2 + sysRTAX_MPLS3 = C.RTAX_MPLS3 + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrDragonFlyBSD4 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifa_msghdr + sizeofIfmaMsghdrDragonFlyBSD4 = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrDragonFlyBSD4 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrDragonFlyBSD4 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsDragonFlyBSD4 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_freebsd.go b/vendor/golang.org/x/net/route/defs_freebsd.go new file mode 100644 index 0000000..d95594d --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_freebsd.go @@ -0,0 +1,337 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include + +struct if_data_freebsd7 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd8 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd9 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd10 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_vhid; + u_char ifi_baudrate_pf; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + uint64_t ifi_hwassist; + time_t __ifi_epoch; + struct timeval __ifi_lastchange; +}; + +struct if_data_freebsd11 { + uint8_t ifi_type; + uint8_t ifi_physical; + uint8_t ifi_addrlen; + uint8_t ifi_hdrlen; + uint8_t ifi_link_state; + uint8_t ifi_vhid; + uint16_t ifi_datalen; + uint32_t ifi_mtu; + uint32_t ifi_metric; + uint64_t ifi_baudrate; + uint64_t ifi_ipackets; + uint64_t ifi_ierrors; + uint64_t ifi_opackets; + uint64_t ifi_oerrors; + uint64_t ifi_collisions; + uint64_t ifi_ibytes; + uint64_t ifi_obytes; + uint64_t ifi_imcasts; + uint64_t ifi_omcasts; + uint64_t ifi_iqdrops; + uint64_t ifi_oqdrops; + uint64_t ifi_noproto; + uint64_t ifi_hwassist; + union { + time_t tt; + uint64_t ph; + } __ifi_epoch; + union { + struct timeval tv; + struct { + uint64_t ph1; + uint64_t ph2; + } ph; + } __ifi_lastchange; +}; + +struct if_msghdr_freebsd7 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd7 ifm_data; +}; + +struct if_msghdr_freebsd8 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd8 ifm_data; +}; + +struct if_msghdr_freebsd9 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd9 ifm_data; +}; + +struct if_msghdr_freebsd10 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd10 ifm_data; +}; + +struct if_msghdr_freebsd11 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data_freebsd11 ifm_data; +}; +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_IFMALIST = C.NET_RT_IFMALIST + sysNET_RT_IFLISTL = C.NET_RT_IFLISTL +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_P1003_1B = C.CTL_P1003_1B +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_NEWMADDR = C.RTM_NEWMADDR + sysRTM_DELMADDR = C.RTM_DELMADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrlFreeBSD10 = C.sizeof_struct_if_msghdrl + sizeofIfaMsghdrFreeBSD10 = C.sizeof_struct_ifa_msghdr + sizeofIfaMsghdrlFreeBSD10 = C.sizeof_struct_ifa_msghdrl + sizeofIfmaMsghdrFreeBSD10 = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrFreeBSD10 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrFreeBSD10 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsFreeBSD10 = C.sizeof_struct_rt_metrics + + sizeofIfMsghdrFreeBSD7 = C.sizeof_struct_if_msghdr_freebsd7 + sizeofIfMsghdrFreeBSD8 = C.sizeof_struct_if_msghdr_freebsd8 + sizeofIfMsghdrFreeBSD9 = C.sizeof_struct_if_msghdr_freebsd9 + sizeofIfMsghdrFreeBSD10 = C.sizeof_struct_if_msghdr_freebsd10 + sizeofIfMsghdrFreeBSD11 = C.sizeof_struct_if_msghdr_freebsd11 + + sizeofIfDataFreeBSD7 = C.sizeof_struct_if_data_freebsd7 + sizeofIfDataFreeBSD8 = C.sizeof_struct_if_data_freebsd8 + sizeofIfDataFreeBSD9 = C.sizeof_struct_if_data_freebsd9 + sizeofIfDataFreeBSD10 = C.sizeof_struct_if_data_freebsd10 + sizeofIfDataFreeBSD11 = C.sizeof_struct_if_data_freebsd11 + + sizeofIfMsghdrlFreeBSD10Emu = C.sizeof_struct_if_msghdrl + sizeofIfaMsghdrFreeBSD10Emu = C.sizeof_struct_ifa_msghdr + sizeofIfaMsghdrlFreeBSD10Emu = C.sizeof_struct_ifa_msghdrl + sizeofIfmaMsghdrFreeBSD10Emu = C.sizeof_struct_ifma_msghdr + sizeofIfAnnouncemsghdrFreeBSD10Emu = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrFreeBSD10Emu = C.sizeof_struct_rt_msghdr + sizeofRtMetricsFreeBSD10Emu = C.sizeof_struct_rt_metrics + + sizeofIfMsghdrFreeBSD7Emu = C.sizeof_struct_if_msghdr_freebsd7 + sizeofIfMsghdrFreeBSD8Emu = C.sizeof_struct_if_msghdr_freebsd8 + sizeofIfMsghdrFreeBSD9Emu = C.sizeof_struct_if_msghdr_freebsd9 + sizeofIfMsghdrFreeBSD10Emu = C.sizeof_struct_if_msghdr_freebsd10 + sizeofIfMsghdrFreeBSD11Emu = C.sizeof_struct_if_msghdr_freebsd11 + + sizeofIfDataFreeBSD7Emu = C.sizeof_struct_if_data_freebsd7 + sizeofIfDataFreeBSD8Emu = C.sizeof_struct_if_data_freebsd8 + sizeofIfDataFreeBSD9Emu = C.sizeof_struct_if_data_freebsd9 + sizeofIfDataFreeBSD10Emu = C.sizeof_struct_if_data_freebsd10 + sizeofIfDataFreeBSD11Emu = C.sizeof_struct_if_data_freebsd11 + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_netbsd.go b/vendor/golang.org/x/net/route/defs_netbsd.go new file mode 100644 index 0000000..b0abd54 --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_netbsd.go @@ -0,0 +1,112 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_VFS = C.CTL_VFS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_USER = C.CTL_USER + sysCTL_DDB = C.CTL_DDB + sysCTL_PROC = C.CTL_PROC + sysCTL_VENDOR = C.CTL_VENDOR + sysCTL_EMUL = C.CTL_EMUL + sysCTL_SECURITY = C.CTL_SECURITY + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_OLDADD = C.RTM_OLDADD + sysRTM_OLDDEL = C.RTM_OLDDEL + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_IEEE80211 = C.RTM_IEEE80211 + sysRTM_SETGATE = C.RTM_SETGATE + sysRTM_LLINFO_UPD = C.RTM_LLINFO_UPD + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_CHGADDR = C.RTM_CHGADDR + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_TAG = C.RTA_TAG + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_TAG = C.RTAX_TAG + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofIfMsghdrNetBSD7 = C.sizeof_struct_if_msghdr + sizeofIfaMsghdrNetBSD7 = C.sizeof_struct_ifa_msghdr + sizeofIfAnnouncemsghdrNetBSD7 = C.sizeof_struct_if_announcemsghdr + + sizeofRtMsghdrNetBSD7 = C.sizeof_struct_rt_msghdr + sizeofRtMetricsNetBSD7 = C.sizeof_struct_rt_metrics + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/defs_openbsd.go b/vendor/golang.org/x/net/route/defs_openbsd.go new file mode 100644 index 0000000..173bb5d --- /dev/null +++ b/vendor/golang.org/x/net/route/defs_openbsd.go @@ -0,0 +1,116 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package route + +/* +#include +#include + +#include +#include +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_ROUTE = C.AF_ROUTE + sysAF_LINK = C.AF_LINK + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW + + sysNET_RT_DUMP = C.NET_RT_DUMP + sysNET_RT_FLAGS = C.NET_RT_FLAGS + sysNET_RT_IFLIST = C.NET_RT_IFLIST + sysNET_RT_STATS = C.NET_RT_STATS + sysNET_RT_TABLE = C.NET_RT_TABLE + sysNET_RT_IFNAMES = C.NET_RT_IFNAMES + sysNET_RT_MAXID = C.NET_RT_MAXID +) + +const ( + sysCTL_MAXNAME = C.CTL_MAXNAME + + sysCTL_UNSPEC = C.CTL_UNSPEC + sysCTL_KERN = C.CTL_KERN + sysCTL_VM = C.CTL_VM + sysCTL_FS = C.CTL_FS + sysCTL_NET = C.CTL_NET + sysCTL_DEBUG = C.CTL_DEBUG + sysCTL_HW = C.CTL_HW + sysCTL_MACHDEP = C.CTL_MACHDEP + sysCTL_DDB = C.CTL_DDB + sysCTL_VFS = C.CTL_VFS + sysCTL_MAXID = C.CTL_MAXID +) + +const ( + sysRTM_VERSION = C.RTM_VERSION + + sysRTM_ADD = C.RTM_ADD + sysRTM_DELETE = C.RTM_DELETE + sysRTM_CHANGE = C.RTM_CHANGE + sysRTM_GET = C.RTM_GET + sysRTM_LOSING = C.RTM_LOSING + sysRTM_REDIRECT = C.RTM_REDIRECT + sysRTM_MISS = C.RTM_MISS + sysRTM_LOCK = C.RTM_LOCK + sysRTM_RESOLVE = C.RTM_RESOLVE + sysRTM_NEWADDR = C.RTM_NEWADDR + sysRTM_DELADDR = C.RTM_DELADDR + sysRTM_IFINFO = C.RTM_IFINFO + sysRTM_IFANNOUNCE = C.RTM_IFANNOUNCE + sysRTM_DESYNC = C.RTM_DESYNC + sysRTM_INVALIDATE = C.RTM_INVALIDATE + sysRTM_BFD = C.RTM_BFD + sysRTM_PROPOSAL = C.RTM_PROPOSAL + + sysRTA_DST = C.RTA_DST + sysRTA_GATEWAY = C.RTA_GATEWAY + sysRTA_NETMASK = C.RTA_NETMASK + sysRTA_GENMASK = C.RTA_GENMASK + sysRTA_IFP = C.RTA_IFP + sysRTA_IFA = C.RTA_IFA + sysRTA_AUTHOR = C.RTA_AUTHOR + sysRTA_BRD = C.RTA_BRD + sysRTA_SRC = C.RTA_SRC + sysRTA_SRCMASK = C.RTA_SRCMASK + sysRTA_LABEL = C.RTA_LABEL + sysRTA_BFD = C.RTA_BFD + sysRTA_DNS = C.RTA_DNS + sysRTA_STATIC = C.RTA_STATIC + sysRTA_SEARCH = C.RTA_SEARCH + + sysRTAX_DST = C.RTAX_DST + sysRTAX_GATEWAY = C.RTAX_GATEWAY + sysRTAX_NETMASK = C.RTAX_NETMASK + sysRTAX_GENMASK = C.RTAX_GENMASK + sysRTAX_IFP = C.RTAX_IFP + sysRTAX_IFA = C.RTAX_IFA + sysRTAX_AUTHOR = C.RTAX_AUTHOR + sysRTAX_BRD = C.RTAX_BRD + sysRTAX_SRC = C.RTAX_SRC + sysRTAX_SRCMASK = C.RTAX_SRCMASK + sysRTAX_LABEL = C.RTAX_LABEL + sysRTAX_BFD = C.RTAX_BFD + sysRTAX_DNS = C.RTAX_DNS + sysRTAX_STATIC = C.RTAX_STATIC + sysRTAX_SEARCH = C.RTAX_SEARCH + sysRTAX_MAX = C.RTAX_MAX +) + +const ( + sizeofRtMsghdr = C.sizeof_struct_rt_msghdr + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/route/interface.go b/vendor/golang.org/x/net/route/interface.go new file mode 100644 index 0000000..854906d --- /dev/null +++ b/vendor/golang.org/x/net/route/interface.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +// An InterfaceMessage represents an interface message. +type InterfaceMessage struct { + Version int // message version + Type int // message type + Flags int // interface flags + Index int // interface index + Name string // interface name + Addrs []Addr // addresses + + extOff int // offset of header extension + raw []byte // raw message +} + +// An InterfaceAddrMessage represents an interface address message. +type InterfaceAddrMessage struct { + Version int // message version + Type int // message type + Flags int // interface flags + Index int // interface index + Addrs []Addr // addresses + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceAddrMessage) Sys() []Sys { return nil } + +// An InterfaceMulticastAddrMessage represents an interface multicast +// address message. +type InterfaceMulticastAddrMessage struct { + Version int // message version + Type int // messsage type + Flags int // interface flags + Index int // interface index + Addrs []Addr // addresses + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMulticastAddrMessage) Sys() []Sys { return nil } + +// An InterfaceAnnounceMessage represents an interface announcement +// message. +type InterfaceAnnounceMessage struct { + Version int // message version + Type int // message type + Index int // interface index + Name string // interface name + What int // what type of announcement + + raw []byte // raw message +} + +// Sys implements the Sys method of Message interface. +func (m *InterfaceAnnounceMessage) Sys() []Sys { return nil } diff --git a/vendor/golang.org/x/net/route/interface_announce.go b/vendor/golang.org/x/net/route/interface_announce.go new file mode 100644 index 0000000..520d657 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_announce.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd netbsd + +package route + +func (w *wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAnnounceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Index: int(nativeEndian.Uint16(b[4:6])), + What: int(nativeEndian.Uint16(b[22:24])), + raw: b[:l], + } + for i := 0; i < 16; i++ { + if b[6+i] != 0 { + continue + } + m.Name = string(b[6 : 6+i]) + break + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_classic.go b/vendor/golang.org/x/net/route/interface_classic.go new file mode 100644 index 0000000..ac4e7a6 --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_classic.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly netbsd + +package route + +import "runtime" + +func (w *wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[4:8])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Addrs: make([]Addr, sysRTAX_MAX), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + extOff: w.extOff, + raw: b[:l], + } + a, err := parseLinkAddr(b[w.bodyOff:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (w *wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + raw: b[:l], + } + if runtime.GOOS == "netbsd" { + m.Index = int(nativeEndian.Uint16(b[16:18])) + } else { + m.Index = int(nativeEndian.Uint16(b[12:14])) + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_freebsd.go b/vendor/golang.org/x/net/route/interface_freebsd.go new file mode 100644 index 0000000..9f6f50c --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_freebsd.go @@ -0,0 +1,78 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (w *wireFormat) parseInterfaceMessage(typ RIBType, b []byte) (Message, error) { + var extOff, bodyOff int + if typ == sysNET_RT_IFLISTL { + if len(b) < 20 { + return nil, errMessageTooShort + } + extOff = int(nativeEndian.Uint16(b[18:20])) + bodyOff = int(nativeEndian.Uint16(b[16:18])) + } else { + extOff = w.extOff + bodyOff = w.bodyOff + } + if len(b) < extOff || len(b) < bodyOff { + return nil, errInvalidMessage + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[4:8])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + Addrs: make([]Addr, sysRTAX_MAX), + extOff: extOff, + raw: b[:l], + } + a, err := parseLinkAddr(b[bodyOff:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (w *wireFormat) parseInterfaceAddrMessage(typ RIBType, b []byte) (Message, error) { + var bodyOff int + if typ == sysNET_RT_IFLISTL { + if len(b) < 24 { + return nil, errMessageTooShort + } + bodyOff = int(nativeEndian.Uint16(b[16:18])) + } else { + bodyOff = w.bodyOff + } + if len(b) < bodyOff { + return nil, errInvalidMessage + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_multicast.go b/vendor/golang.org/x/net/route/interface_multicast.go new file mode 100644 index 0000000..1e99a9c --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_multicast.go @@ -0,0 +1,30 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd + +package route + +func (w *wireFormat) parseInterfaceMulticastAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceMulticastAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[12:14])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[4:8])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/interface_openbsd.go b/vendor/golang.org/x/net/route/interface_openbsd.go new file mode 100644 index 0000000..e4a143c --- /dev/null +++ b/vendor/golang.org/x/net/route/interface_openbsd.go @@ -0,0 +1,90 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (*wireFormat) parseInterfaceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 32 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + attrs := uint(nativeEndian.Uint32(b[12:16])) + if attrs&sysRTA_IFP == 0 { + return nil, nil + } + m := &InterfaceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[16:20])), + Index: int(nativeEndian.Uint16(b[6:8])), + Addrs: make([]Addr, sysRTAX_MAX), + raw: b[:l], + } + ll := int(nativeEndian.Uint16(b[4:6])) + if len(b) < ll { + return nil, errInvalidMessage + } + a, err := parseLinkAddr(b[ll:]) + if err != nil { + return nil, err + } + m.Addrs[sysRTAX_IFP] = a + m.Name = a.(*LinkAddr).Name + return m, nil +} + +func (*wireFormat) parseInterfaceAddrMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 24 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + bodyOff := int(nativeEndian.Uint16(b[4:6])) + if len(b) < bodyOff { + return nil, errInvalidMessage + } + m := &InterfaceAddrMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[12:16])), + Index: int(nativeEndian.Uint16(b[6:8])), + raw: b[:l], + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} + +func (*wireFormat) parseInterfaceAnnounceMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < 26 { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &InterfaceAnnounceMessage{ + Version: int(b[2]), + Type: int(b[3]), + Index: int(nativeEndian.Uint16(b[6:8])), + What: int(nativeEndian.Uint16(b[8:10])), + raw: b[:l], + } + for i := 0; i < 16; i++ { + if b[10+i] != 0 { + continue + } + m.Name = string(b[10 : 10+i]) + break + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/message.go b/vendor/golang.org/x/net/route/message.go new file mode 100644 index 0000000..0fa7e09 --- /dev/null +++ b/vendor/golang.org/x/net/route/message.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +// A Message represents a routing message. +type Message interface { + // Sys returns operating system-specific information. + Sys() []Sys +} + +// A Sys reprensents operating system-specific information. +type Sys interface { + // SysType returns a type of operating system-specific + // information. + SysType() SysType +} + +// A SysType represents a type of operating system-specific +// information. +type SysType int + +const ( + SysMetrics SysType = iota + SysStats +) + +// ParseRIB parses b as a routing information base and returns a list +// of routing messages. +func ParseRIB(typ RIBType, b []byte) ([]Message, error) { + if !typ.parseable() { + return nil, errUnsupportedMessage + } + var msgs []Message + nmsgs, nskips := 0, 0 + for len(b) > 4 { + nmsgs++ + l := int(nativeEndian.Uint16(b[:2])) + if l == 0 { + return nil, errInvalidMessage + } + if len(b) < l { + return nil, errMessageTooShort + } + if b[2] != sysRTM_VERSION { + b = b[l:] + continue + } + if w, ok := wireFormats[int(b[3])]; !ok { + nskips++ + } else { + m, err := w.parse(typ, b) + if err != nil { + return nil, err + } + if m == nil { + nskips++ + } else { + msgs = append(msgs, m) + } + } + b = b[l:] + } + // We failed to parse any of the messages - version mismatch? + if nmsgs != len(msgs)+nskips { + return nil, errMessageMismatch + } + return msgs, nil +} diff --git a/vendor/golang.org/x/net/route/message_darwin_test.go b/vendor/golang.org/x/net/route/message_darwin_test.go new file mode 100644 index 0000000..316aa75 --- /dev/null +++ b/vendor/golang.org/x/net/route/message_darwin_test.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "testing" + +func TestFetchAndParseRIBOnDarwin(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_FLAGS, sysNET_RT_DUMP2, sysNET_RT_IFLIST2} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } +} diff --git a/vendor/golang.org/x/net/route/message_freebsd_test.go b/vendor/golang.org/x/net/route/message_freebsd_test.go new file mode 100644 index 0000000..db4b567 --- /dev/null +++ b/vendor/golang.org/x/net/route/message_freebsd_test.go @@ -0,0 +1,92 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import ( + "testing" + "unsafe" +) + +func TestFetchAndParseRIBOnFreeBSD(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_IFMALIST} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(s) + } + } +} + +func TestFetchAndParseRIBOnFreeBSD10AndAbove(t *testing.T) { + if _, err := FetchRIB(sysAF_UNSPEC, sysNET_RT_IFLISTL, 0); err != nil { + t.Skip("NET_RT_IFLISTL not supported") + } + var p uintptr + if kernelAlign != int(unsafe.Sizeof(p)) { + t.Skip("NET_RT_IFLIST vs. NET_RT_IFLISTL doesn't work for 386 emulation on amd64") + } + + var tests = [2]struct { + typ RIBType + b []byte + msgs []Message + ss []string + }{ + {typ: sysNET_RT_IFLIST}, + {typ: sysNET_RT_IFLISTL}, + } + for i := range tests { + var lastErr error + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, tests[i].typ) + if err != nil { + lastErr = err + continue + } + tests[i].msgs = append(tests[i].msgs, rs...) + } + if len(tests[i].msgs) == 0 && lastErr != nil { + t.Error(tests[i].typ, lastErr) + continue + } + tests[i].ss, lastErr = msgs(tests[i].msgs).validate() + if lastErr != nil { + t.Error(tests[i].typ, lastErr) + continue + } + for _, s := range tests[i].ss { + t.Log(s) + } + } + for i := len(tests) - 1; i > 0; i-- { + if len(tests[i].ss) != len(tests[i-1].ss) { + t.Errorf("got %v; want %v", tests[i].ss, tests[i-1].ss) + continue + } + for j, s1 := range tests[i].ss { + s0 := tests[i-1].ss[j] + if s1 != s0 { + t.Errorf("got %s; want %s", s1, s0) + } + } + } +} diff --git a/vendor/golang.org/x/net/route/message_test.go b/vendor/golang.org/x/net/route/message_test.go new file mode 100644 index 0000000..e848dab --- /dev/null +++ b/vendor/golang.org/x/net/route/message_test.go @@ -0,0 +1,239 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "os" + "syscall" + "testing" + "time" +) + +func TestFetchAndParseRIB(t *testing.T) { + for _, typ := range []RIBType{sysNET_RT_DUMP, sysNET_RT_IFLIST} { + var lastErr error + var ms []Message + for _, af := range []int{sysAF_UNSPEC, sysAF_INET, sysAF_INET6} { + rs, err := fetchAndParseRIB(af, typ) + if err != nil { + lastErr = err + continue + } + ms = append(ms, rs...) + } + if len(ms) == 0 && lastErr != nil { + t.Error(typ, lastErr) + continue + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(typ, err) + continue + } + for _, s := range ss { + t.Log(typ, s) + } + } +} + +var ( + rtmonSock int + rtmonErr error +) + +func init() { + // We need to keep rtmonSock alive to avoid treading on + // recycled socket descriptors. + rtmonSock, rtmonErr = syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) +} + +// TestMonitorAndParseRIB leaks a worker goroutine and a socket +// descriptor but that's intentional. +func TestMonitorAndParseRIB(t *testing.T) { + if testing.Short() || os.Getuid() != 0 { + t.Skip("must be root") + } + + if rtmonErr != nil { + t.Fatal(rtmonErr) + } + + // We suppose that using an IPv4 link-local address and the + // dot1Q ID for Token Ring and FDDI doesn't harm anyone. + pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} + if err := pv.configure(1002); err != nil { + t.Skip(err) + } + if err := pv.setup(); err != nil { + t.Skip(err) + } + pv.teardown() + + go func() { + b := make([]byte, os.Getpagesize()) + for { + // There's no easy way to unblock this read + // call because the routing message exchange + // over routing socket is a connectionless + // message-oriented protocol, no control plane + // for signaling connectivity, and we cannot + // use the net package of standard library due + // to the lack of support for routing socket + // and circular dependency. + n, err := syscall.Read(rtmonSock, b) + if err != nil { + return + } + ms, err := ParseRIB(0, b[:n]) + if err != nil { + t.Error(err) + return + } + ss, err := msgs(ms).validate() + if err != nil { + t.Error(err) + return + } + for _, s := range ss { + t.Log(s) + } + } + }() + + for _, vid := range []int{1002, 1003, 1004, 1005} { + pv := &propVirtual{addr: "169.254.0.1", mask: "255.255.255.0"} + if err := pv.configure(vid); err != nil { + t.Fatal(err) + } + if err := pv.setup(); err != nil { + t.Fatal(err) + } + time.Sleep(200 * time.Millisecond) + if err := pv.teardown(); err != nil { + t.Fatal(err) + } + time.Sleep(200 * time.Millisecond) + } +} + +func TestParseRIBWithFuzz(t *testing.T) { + for _, fuzz := range []string{ + "0\x00\x05\x050000000000000000" + + "00000000000000000000" + + "00000000000000000000" + + "00000000000000000000" + + "0000000000000\x02000000" + + "00000000", + "\x02\x00\x05\f0000000000000000" + + "0\x0200000000000000", + "\x02\x00\x05\x100000000000000\x1200" + + "0\x00\xff\x00", + "\x02\x00\x05\f0000000000000000" + + "0\x12000\x00\x02\x0000", + "\x00\x00\x00\x01\x00", + "00000", + } { + for typ := RIBType(0); typ < 256; typ++ { + ParseRIB(typ, []byte(fuzz)) + } + } +} + +func TestRouteMessage(t *testing.T) { + s, err := syscall.Socket(sysAF_ROUTE, sysSOCK_RAW, sysAF_UNSPEC) + if err != nil { + t.Fatal(err) + } + defer syscall.Close(s) + + var ms []RouteMessage + for _, af := range []int{sysAF_INET, sysAF_INET6} { + if _, err := fetchAndParseRIB(af, sysNET_RT_DUMP); err != nil { + t.Log(err) + continue + } + switch af { + case sysAF_INET: + ms = append(ms, []RouteMessage{ + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, + nil, + nil, + nil, + &LinkAddr{}, + &Inet4Addr{}, + nil, + &Inet4Addr{}, + }, + }, + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet4Addr{IP: [4]byte{127, 0, 0, 1}}, + }, + }, + }...) + case sysAF_INET6: + ms = append(ms, []RouteMessage{ + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + nil, + nil, + nil, + &LinkAddr{}, + &Inet6Addr{}, + nil, + &Inet6Addr{}, + }, + }, + { + Type: sysRTM_GET, + Addrs: []Addr{ + &Inet6Addr{IP: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}, + }, + }, + }...) + } + } + for i, m := range ms { + m.ID = uintptr(os.Getpid()) + m.Seq = i + 1 + wb, err := m.Marshal() + if err != nil { + t.Fatalf("%v: %v", m, err) + } + if _, err := syscall.Write(s, wb); err != nil { + t.Fatalf("%v: %v", m, err) + } + rb := make([]byte, os.Getpagesize()) + n, err := syscall.Read(s, rb) + if err != nil { + t.Fatalf("%v: %v", m, err) + } + rms, err := ParseRIB(0, rb[:n]) + if err != nil { + t.Fatalf("%v: %v", m, err) + } + for _, rm := range rms { + err := rm.(*RouteMessage).Err + if err != nil { + t.Errorf("%v: %v", m, err) + } + } + ss, err := msgs(rms).validate() + if err != nil { + t.Fatalf("%v: %v", m, err) + } + for _, s := range ss { + t.Log(s) + } + } +} diff --git a/vendor/golang.org/x/net/route/route.go b/vendor/golang.org/x/net/route/route.go new file mode 100644 index 0000000..081da0d --- /dev/null +++ b/vendor/golang.org/x/net/route/route.go @@ -0,0 +1,123 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +// Package route provides basic functions for the manipulation of +// packet routing facilities on BSD variants. +// +// The package supports any version of Darwin, any version of +// DragonFly BSD, FreeBSD 7 through 11, NetBSD 6 and above, and +// OpenBSD 5.6 and above. +package route + +import ( + "errors" + "os" + "syscall" +) + +var ( + errUnsupportedMessage = errors.New("unsupported message") + errMessageMismatch = errors.New("message mismatch") + errMessageTooShort = errors.New("message too short") + errInvalidMessage = errors.New("invalid message") + errInvalidAddr = errors.New("invalid address") + errShortBuffer = errors.New("short buffer") +) + +// A RouteMessage represents a message conveying an address prefix, a +// nexthop address and an output interface. +// +// Unlike other messages, this message can be used to query adjacency +// information for the given address prefix, to add a new route, and +// to delete or modify the existing route from the routing information +// base inside the kernel by writing and reading route messages on a +// routing socket. +// +// For the manipulation of routing information, the route message must +// contain appropriate fields that include: +// +// Version = +// Type = +// Flags = +// Index = +// ID = +// Seq = +// Addrs = +// +// The Type field specifies a type of manipulation, the Flags field +// specifies a class of target information and the Addrs field +// specifies target information like the following: +// +// route.RouteMessage{ +// Version: RTM_VERSION, +// Type: RTM_GET, +// Flags: RTF_UP | RTF_HOST, +// ID: uintptr(os.Getpid()), +// Seq: 1, +// Addrs: []route.Addrs{ +// RTAX_DST: &route.Inet4Addr{ ... }, +// RTAX_IFP: &route.LinkAddr{ ... }, +// RTAX_BRD: &route.Inet4Addr{ ... }, +// }, +// } +// +// The values for the above fields depend on the implementation of +// each operating system. +// +// The Err field on a response message contains an error value on the +// requested operation. If non-nil, the requested operation is failed. +type RouteMessage struct { + Version int // message version + Type int // message type + Flags int // route flags + Index int // interface index when atatched + ID uintptr // sender's identifier; usually process ID + Seq int // sequence number + Err error // error on requested operation + Addrs []Addr // addresses + + extOff int // offset of header extension + raw []byte // raw message +} + +// Marshal returns the binary encoding of m. +func (m *RouteMessage) Marshal() ([]byte, error) { + return m.marshal() +} + +// A RIBType reprensents a type of routing information base. +type RIBType int + +const ( + RIBTypeRoute RIBType = syscall.NET_RT_DUMP + RIBTypeInterface RIBType = syscall.NET_RT_IFLIST +) + +// FetchRIB fetches a routing information base from the operating +// system. +// +// The provided af must be an address family. +// +// The provided arg must be a RIBType-specific argument. +// When RIBType is related to routes, arg might be a set of route +// flags. When RIBType is related to network interfaces, arg might be +// an interface index or a set of interface flags. In most cases, zero +// means a wildcard. +func FetchRIB(af int, typ RIBType, arg int) ([]byte, error) { + mib := [6]int32{sysCTL_NET, sysAF_ROUTE, 0, int32(af), int32(typ), int32(arg)} + n := uintptr(0) + if err := sysctl(mib[:], nil, &n, nil, 0); err != nil { + return nil, os.NewSyscallError("sysctl", err) + } + if n == 0 { + return nil, nil + } + b := make([]byte, n) + if err := sysctl(mib[:], &b[0], &n, nil, 0); err != nil { + return nil, os.NewSyscallError("sysctl", err) + } + return b[:n], nil +} diff --git a/vendor/golang.org/x/net/route/route_classic.go b/vendor/golang.org/x/net/route/route_classic.go new file mode 100644 index 0000000..02fa688 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_classic.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package route + +import ( + "runtime" + "syscall" +) + +func (m *RouteMessage) marshal() ([]byte, error) { + w, ok := wireFormats[m.Type] + if !ok { + return nil, errUnsupportedMessage + } + l := w.bodyOff + addrsSpace(m.Addrs) + if runtime.GOOS == "darwin" { + // Fix stray pointer writes on macOS. + // See golang.org/issue/22456. + l += 1024 + } + b := make([]byte, l) + nativeEndian.PutUint16(b[:2], uint16(l)) + if m.Version == 0 { + b[2] = sysRTM_VERSION + } else { + b[2] = byte(m.Version) + } + b[3] = byte(m.Type) + nativeEndian.PutUint32(b[8:12], uint32(m.Flags)) + nativeEndian.PutUint16(b[4:6], uint16(m.Index)) + nativeEndian.PutUint32(b[16:20], uint32(m.ID)) + nativeEndian.PutUint32(b[20:24], uint32(m.Seq)) + attrs, err := marshalAddrs(b[w.bodyOff:], m.Addrs) + if err != nil { + return nil, err + } + if attrs > 0 { + nativeEndian.PutUint32(b[12:16], uint32(attrs)) + } + return b, nil +} + +func (w *wireFormat) parseRouteMessage(typ RIBType, b []byte) (Message, error) { + if len(b) < w.bodyOff { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &RouteMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[8:12])), + Index: int(nativeEndian.Uint16(b[4:6])), + ID: uintptr(nativeEndian.Uint32(b[16:20])), + Seq: int(nativeEndian.Uint32(b[20:24])), + extOff: w.extOff, + raw: b[:l], + } + errno := syscall.Errno(nativeEndian.Uint32(b[28:32])) + if errno != 0 { + m.Err = errno + } + var err error + m.Addrs, err = parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[w.bodyOff:]) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/golang.org/x/net/route/route_openbsd.go b/vendor/golang.org/x/net/route/route_openbsd.go new file mode 100644 index 0000000..daf2e90 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_openbsd.go @@ -0,0 +1,65 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "syscall" + +func (m *RouteMessage) marshal() ([]byte, error) { + l := sizeofRtMsghdr + addrsSpace(m.Addrs) + b := make([]byte, l) + nativeEndian.PutUint16(b[:2], uint16(l)) + if m.Version == 0 { + b[2] = sysRTM_VERSION + } else { + b[2] = byte(m.Version) + } + b[3] = byte(m.Type) + nativeEndian.PutUint16(b[4:6], uint16(sizeofRtMsghdr)) + nativeEndian.PutUint32(b[16:20], uint32(m.Flags)) + nativeEndian.PutUint16(b[6:8], uint16(m.Index)) + nativeEndian.PutUint32(b[24:28], uint32(m.ID)) + nativeEndian.PutUint32(b[28:32], uint32(m.Seq)) + attrs, err := marshalAddrs(b[sizeofRtMsghdr:], m.Addrs) + if err != nil { + return nil, err + } + if attrs > 0 { + nativeEndian.PutUint32(b[12:16], uint32(attrs)) + } + return b, nil +} + +func (*wireFormat) parseRouteMessage(_ RIBType, b []byte) (Message, error) { + if len(b) < sizeofRtMsghdr { + return nil, errMessageTooShort + } + l := int(nativeEndian.Uint16(b[:2])) + if len(b) < l { + return nil, errInvalidMessage + } + m := &RouteMessage{ + Version: int(b[2]), + Type: int(b[3]), + Flags: int(nativeEndian.Uint32(b[16:20])), + Index: int(nativeEndian.Uint16(b[6:8])), + ID: uintptr(nativeEndian.Uint32(b[24:28])), + Seq: int(nativeEndian.Uint32(b[28:32])), + raw: b[:l], + } + ll := int(nativeEndian.Uint16(b[4:6])) + if len(b) < ll { + return nil, errInvalidMessage + } + errno := syscall.Errno(nativeEndian.Uint32(b[32:36])) + if errno != 0 { + m.Err = errno + } + as, err := parseAddrs(uint(nativeEndian.Uint32(b[12:16])), parseKernelInetAddr, b[ll:]) + if err != nil { + return nil, err + } + m.Addrs = as + return m, nil +} diff --git a/vendor/golang.org/x/net/route/route_test.go b/vendor/golang.org/x/net/route/route_test.go new file mode 100644 index 0000000..61bd174 --- /dev/null +++ b/vendor/golang.org/x/net/route/route_test.go @@ -0,0 +1,390 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "fmt" + "os/exec" + "runtime" + "time" +) + +func (m *RouteMessage) String() string { + return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[12:16]))) +} + +func (m *InterfaceMessage) String() string { + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + return fmt.Sprintf("%s", attrs) +} + +func (m *InterfaceAddrMessage) String() string { + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + return fmt.Sprintf("%s", attrs) +} + +func (m *InterfaceMulticastAddrMessage) String() string { + return fmt.Sprintf("%s", addrAttrs(nativeEndian.Uint32(m.raw[4:8]))) +} + +func (m *InterfaceAnnounceMessage) String() string { + what := "" + switch m.What { + case 0: + what = "arrival" + case 1: + what = "departure" + } + return fmt.Sprintf("(%d %s %s)", m.Index, m.Name, what) +} + +func (m *InterfaceMetrics) String() string { + return fmt.Sprintf("(type=%d mtu=%d)", m.Type, m.MTU) +} + +func (m *RouteMetrics) String() string { + return fmt.Sprintf("(pmtu=%d)", m.PathMTU) +} + +type addrAttrs uint + +var addrAttrNames = [...]string{ + "dst", + "gateway", + "netmask", + "genmask", + "ifp", + "ifa", + "author", + "brd", + "df:mpls1-n:tag-o:src", // mpls1 for dragonfly, tag for netbsd, src for openbsd + "df:mpls2-o:srcmask", // mpls2 for dragonfly, srcmask for openbsd + "df:mpls3-o:label", // mpls3 for dragonfly, label for openbsd + "o:bfd", // bfd for openbsd + "o:dns", // dns for openbsd + "o:static", // static for openbsd + "o:search", // search for openbsd +} + +func (attrs addrAttrs) String() string { + var s string + for i, name := range addrAttrNames { + if attrs&(1<" + } + return s +} + +type msgs []Message + +func (ms msgs) validate() ([]string, error) { + var ss []string + for _, m := range ms { + switch m := m.(type) { + case *RouteMessage: + if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[12:16]))); err != nil { + return nil, err + } + sys := m.Sys() + if sys == nil { + return nil, fmt.Errorf("no sys for %s", m.String()) + } + ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) + case *InterfaceMessage: + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + if err := addrs(m.Addrs).match(attrs); err != nil { + return nil, err + } + sys := m.Sys() + if sys == nil { + return nil, fmt.Errorf("no sys for %s", m.String()) + } + ss = append(ss, m.String()+" "+syss(sys).String()+" "+addrs(m.Addrs).String()) + case *InterfaceAddrMessage: + var attrs addrAttrs + if runtime.GOOS == "openbsd" { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[12:16])) + } else { + attrs = addrAttrs(nativeEndian.Uint32(m.raw[4:8])) + } + if err := addrs(m.Addrs).match(attrs); err != nil { + return nil, err + } + ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) + case *InterfaceMulticastAddrMessage: + if err := addrs(m.Addrs).match(addrAttrs(nativeEndian.Uint32(m.raw[4:8]))); err != nil { + return nil, err + } + ss = append(ss, m.String()+" "+addrs(m.Addrs).String()) + case *InterfaceAnnounceMessage: + ss = append(ss, m.String()) + default: + ss = append(ss, fmt.Sprintf("%+v", m)) + } + } + return ss, nil +} + +type syss []Sys + +func (sys syss) String() string { + var s string + for _, sy := range sys { + switch sy := sy.(type) { + case *InterfaceMetrics: + if len(s) > 0 { + s += " " + } + s += sy.String() + case *RouteMetrics: + if len(s) > 0 { + s += " " + } + s += sy.String() + } + } + return s +} + +type addrFamily int + +func (af addrFamily) String() string { + switch af { + case sysAF_UNSPEC: + return "unspec" + case sysAF_LINK: + return "link" + case sysAF_INET: + return "inet4" + case sysAF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +const hexDigit = "0123456789abcdef" + +type llAddr []byte + +func (a llAddr) String() string { + if len(a) == 0 { + return "" + } + buf := make([]byte, 0, len(a)*3-1) + for i, b := range a { + if i > 0 { + buf = append(buf, ':') + } + buf = append(buf, hexDigit[b>>4]) + buf = append(buf, hexDigit[b&0xF]) + } + return string(buf) +} + +type ipAddr []byte + +func (a ipAddr) String() string { + if len(a) == 0 { + return "" + } + if len(a) == 4 { + return fmt.Sprintf("%d.%d.%d.%d", a[0], a[1], a[2], a[3]) + } + if len(a) == 16 { + return fmt.Sprintf("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]) + } + s := make([]byte, len(a)*2) + for i, tn := range a { + s[i*2], s[i*2+1] = hexDigit[tn>>4], hexDigit[tn&0xf] + } + return string(s) +} + +func (a *LinkAddr) String() string { + name := a.Name + if name == "" { + name = "" + } + lla := llAddr(a.Addr).String() + if lla == "" { + lla = "" + } + return fmt.Sprintf("(%v %d %s %s)", addrFamily(a.Family()), a.Index, name, lla) +} + +func (a *Inet4Addr) String() string { + return fmt.Sprintf("(%v %v)", addrFamily(a.Family()), ipAddr(a.IP[:])) +} + +func (a *Inet6Addr) String() string { + return fmt.Sprintf("(%v %v %d)", addrFamily(a.Family()), ipAddr(a.IP[:]), a.ZoneID) +} + +func (a *DefaultAddr) String() string { + return fmt.Sprintf("(%v %s)", addrFamily(a.Family()), ipAddr(a.Raw[2:]).String()) +} + +type addrs []Addr + +func (as addrs) String() string { + var s string + for _, a := range as { + if a == nil { + continue + } + if len(s) > 0 { + s += " " + } + switch a := a.(type) { + case *LinkAddr: + s += a.String() + case *Inet4Addr: + s += a.String() + case *Inet6Addr: + s += a.String() + case *DefaultAddr: + s += a.String() + } + } + if s == "" { + return "" + } + return s +} + +func (as addrs) match(attrs addrAttrs) error { + var ts addrAttrs + af := sysAF_UNSPEC + for i := range as { + if as[i] != nil { + ts |= 1 << uint(i) + } + switch as[i].(type) { + case *Inet4Addr: + if af == sysAF_UNSPEC { + af = sysAF_INET + } + if af != sysAF_INET { + return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) + } + case *Inet6Addr: + if af == sysAF_UNSPEC { + af = sysAF_INET6 + } + if af != sysAF_INET6 { + return fmt.Errorf("got %v; want %v", addrs(as), addrFamily(af)) + } + } + } + if ts != attrs && ts > attrs { + return fmt.Errorf("%v not included in %v", ts, attrs) + } + return nil +} + +func fetchAndParseRIB(af int, typ RIBType) ([]Message, error) { + var err error + var b []byte + for i := 0; i < 3; i++ { + if b, err = FetchRIB(af, typ, 0); err != nil { + time.Sleep(10 * time.Millisecond) + continue + } + break + } + if err != nil { + return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) + } + ms, err := ParseRIB(typ, b) + if err != nil { + return nil, fmt.Errorf("%v %d %v", addrFamily(af), typ, err) + } + return ms, nil +} + +// propVirtual is a proprietary virtual network interface. +type propVirtual struct { + name string + addr, mask string + setupCmds []*exec.Cmd + teardownCmds []*exec.Cmd +} + +func (pv *propVirtual) setup() error { + for _, cmd := range pv.setupCmds { + if err := cmd.Run(); err != nil { + pv.teardown() + return err + } + } + return nil +} + +func (pv *propVirtual) teardown() error { + for _, cmd := range pv.teardownCmds { + if err := cmd.Run(); err != nil { + return err + } + } + return nil +} + +func (pv *propVirtual) configure(suffix int) error { + if runtime.GOOS == "openbsd" { + pv.name = fmt.Sprintf("vether%d", suffix) + } else { + pv.name = fmt.Sprintf("vlan%d", suffix) + } + xname, err := exec.LookPath("ifconfig") + if err != nil { + return err + } + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "create"}, + }) + if runtime.GOOS == "netbsd" { + // NetBSD requires an underlying dot1Q-capable network + // interface. + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "vlan", fmt.Sprintf("%d", suffix&0xfff), "vlanif", "wm0"}, + }) + } + pv.setupCmds = append(pv.setupCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "inet", pv.addr, "netmask", pv.mask}, + }) + pv.teardownCmds = append(pv.teardownCmds, &exec.Cmd{ + Path: xname, + Args: []string{"ifconfig", pv.name, "destroy"}, + }) + return nil +} diff --git a/vendor/golang.org/x/net/route/sys.go b/vendor/golang.org/x/net/route/sys.go new file mode 100644 index 0000000..3d0ee9b --- /dev/null +++ b/vendor/golang.org/x/net/route/sys.go @@ -0,0 +1,39 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import "unsafe" + +var ( + nativeEndian binaryByteOrder + kernelAlign int + wireFormats map[int]*wireFormat +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + nativeEndian = littleEndian + } else { + nativeEndian = bigEndian + } + kernelAlign, wireFormats = probeRoutingStack() +} + +func roundup(l int) int { + if l == 0 { + return kernelAlign + } + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} + +type wireFormat struct { + extOff int // offset of header extension + bodyOff int // offset of message body + parse func(RIBType, []byte) (Message, error) +} diff --git a/vendor/golang.org/x/net/route/sys_darwin.go b/vendor/golang.org/x/net/route/sys_darwin.go new file mode 100644 index 0000000..d2daf5c --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_darwin.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (typ RIBType) parseable() bool { + switch typ { + case sysNET_RT_STAT, sysNET_RT_TRASH: + return false + default: + return true + } +} + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + rtm := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdrDarwin15} + rtm.parse = rtm.parseRouteMessage + rtm2 := &wireFormat{extOff: 36, bodyOff: sizeofRtMsghdr2Darwin15} + rtm2.parse = rtm2.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDarwin15} + ifm.parse = ifm.parseInterfaceMessage + ifm2 := &wireFormat{extOff: 32, bodyOff: sizeofIfMsghdr2Darwin15} + ifm2.parse = ifm2.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrDarwin15, bodyOff: sizeofIfaMsghdrDarwin15} + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDarwin15, bodyOff: sizeofIfmaMsghdrDarwin15} + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifmam2 := &wireFormat{extOff: sizeofIfmaMsghdr2Darwin15, bodyOff: sizeofIfmaMsghdr2Darwin15} + ifmam2.parse = ifmam2.parseInterfaceMulticastAddrMessage + // Darwin kernels require 32-bit aligned access to routing facilities. + return 4, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFINFO2: ifm2, + sysRTM_NEWMADDR2: ifmam2, + sysRTM_GET2: rtm2, + } +} diff --git a/vendor/golang.org/x/net/route/sys_dragonfly.go b/vendor/golang.org/x/net/route/sys_dragonfly.go new file mode 100644 index 0000000..0c14bc2 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_dragonfly.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "unsafe" + +func (typ RIBType) parseable() bool { return true } + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrDragonFlyBSD4} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrDragonFlyBSD4} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrDragonFlyBSD4, bodyOff: sizeofIfaMsghdrDragonFlyBSD4} + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam := &wireFormat{extOff: sizeofIfmaMsghdrDragonFlyBSD4, bodyOff: sizeofIfmaMsghdrDragonFlyBSD4} + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrDragonFlyBSD4, bodyOff: sizeofIfAnnouncemsghdrDragonFlyBSD4} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return int(unsafe.Sizeof(p)), map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFANNOUNCE: ifanm, + } +} diff --git a/vendor/golang.org/x/net/route/sys_freebsd.go b/vendor/golang.org/x/net/route/sys_freebsd.go new file mode 100644 index 0000000..89ba1c4 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_freebsd.go @@ -0,0 +1,155 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import ( + "syscall" + "unsafe" +) + +func (typ RIBType) parseable() bool { return true } + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + if kernelAlign == 8 { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } + } + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[m.extOff+4 : m.extOff+8])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + wordSize := int(unsafe.Sizeof(p)) + align := int(unsafe.Sizeof(p)) + // In the case of kern.supported_archs="amd64 i386", we need + // to know the underlying kernel's architecture because the + // alignment for routing facilities are set at the build time + // of the kernel. + conf, _ := syscall.Sysctl("kern.conftxt") + for i, j := 0, 0; j < len(conf); j++ { + if conf[j] != '\n' { + continue + } + s := conf[i:j] + i = j + 1 + if len(s) > len("machine") && s[:len("machine")] == "machine" { + s = s[len("machine"):] + for k := 0; k < len(s); k++ { + if s[k] == ' ' || s[k] == '\t' { + s = s[1:] + } + break + } + if s == "amd64" { + align = 8 + } + break + } + } + var rtm, ifm, ifam, ifmam, ifanm *wireFormat + if align != wordSize { // 386 emulation on amd64 + rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10Emu - sizeofRtMetricsFreeBSD10Emu, bodyOff: sizeofRtMsghdrFreeBSD10Emu} + ifm = &wireFormat{extOff: 16} + ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10Emu, bodyOff: sizeofIfaMsghdrFreeBSD10Emu} + ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10Emu, bodyOff: sizeofIfmaMsghdrFreeBSD10Emu} + ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10Emu, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10Emu} + } else { + rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10 - sizeofRtMetricsFreeBSD10, bodyOff: sizeofRtMsghdrFreeBSD10} + ifm = &wireFormat{extOff: 16} + ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10, bodyOff: sizeofIfaMsghdrFreeBSD10} + ifmam = &wireFormat{extOff: sizeofIfmaMsghdrFreeBSD10, bodyOff: sizeofIfmaMsghdrFreeBSD10} + ifanm = &wireFormat{extOff: sizeofIfAnnouncemsghdrFreeBSD10, bodyOff: sizeofIfAnnouncemsghdrFreeBSD10} + } + rel, _ := syscall.SysctlUint32("kern.osreldate") + switch { + case rel < 800000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD7Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD7 + } + case 800000 <= rel && rel < 900000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD8Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD8 + } + case 900000 <= rel && rel < 1000000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD9Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD9 + } + case 1000000 <= rel && rel < 1100000: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD10Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD10 + } + default: + if align != wordSize { // 386 emulation on amd64 + ifm.bodyOff = sizeofIfMsghdrFreeBSD11Emu + } else { + ifm.bodyOff = sizeofIfMsghdrFreeBSD11 + } + } + rtm.parse = rtm.parseRouteMessage + ifm.parse = ifm.parseInterfaceMessage + ifam.parse = ifam.parseInterfaceAddrMessage + ifmam.parse = ifmam.parseInterfaceMulticastAddrMessage + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return align, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_NEWMADDR: ifmam, + sysRTM_DELMADDR: ifmam, + sysRTM_IFANNOUNCE: ifanm, + } +} diff --git a/vendor/golang.org/x/net/route/sys_netbsd.go b/vendor/golang.org/x/net/route/sys_netbsd.go new file mode 100644 index 0000000..02f71d5 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_netbsd.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +func (typ RIBType) parseable() bool { return true } + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint64(m.raw[m.extOff+8 : m.extOff+16])), + }, + } +} + +// RouteMetrics represents route metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[m.extOff]), + MTU: int(nativeEndian.Uint32(m.raw[m.extOff+8 : m.extOff+12])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + rtm := &wireFormat{extOff: 40, bodyOff: sizeofRtMsghdrNetBSD7} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: 16, bodyOff: sizeofIfMsghdrNetBSD7} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: sizeofIfaMsghdrNetBSD7, bodyOff: sizeofIfaMsghdrNetBSD7} + ifam.parse = ifam.parseInterfaceAddrMessage + ifanm := &wireFormat{extOff: sizeofIfAnnouncemsghdrNetBSD7, bodyOff: sizeofIfAnnouncemsghdrNetBSD7} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + // NetBSD 6 and above kernels require 64-bit aligned access to + // routing facilities. + return 8, map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFANNOUNCE: ifanm, + sysRTM_IFINFO: ifm, + } +} diff --git a/vendor/golang.org/x/net/route/sys_openbsd.go b/vendor/golang.org/x/net/route/sys_openbsd.go new file mode 100644 index 0000000..c5674e8 --- /dev/null +++ b/vendor/golang.org/x/net/route/sys_openbsd.go @@ -0,0 +1,80 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package route + +import "unsafe" + +func (typ RIBType) parseable() bool { + switch typ { + case sysNET_RT_STATS, sysNET_RT_TABLE: + return false + default: + return true + } +} + +// RouteMetrics represents route metrics. +type RouteMetrics struct { + PathMTU int // path maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (rmx *RouteMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *RouteMessage) Sys() []Sys { + return []Sys{ + &RouteMetrics{ + PathMTU: int(nativeEndian.Uint32(m.raw[60:64])), + }, + } +} + +// InterfaceMetrics represents interface metrics. +type InterfaceMetrics struct { + Type int // interface type + MTU int // maximum transmission unit +} + +// SysType implements the SysType method of Sys interface. +func (imx *InterfaceMetrics) SysType() SysType { return SysMetrics } + +// Sys implements the Sys method of Message interface. +func (m *InterfaceMessage) Sys() []Sys { + return []Sys{ + &InterfaceMetrics{ + Type: int(m.raw[24]), + MTU: int(nativeEndian.Uint32(m.raw[28:32])), + }, + } +} + +func probeRoutingStack() (int, map[int]*wireFormat) { + var p uintptr + rtm := &wireFormat{extOff: -1, bodyOff: -1} + rtm.parse = rtm.parseRouteMessage + ifm := &wireFormat{extOff: -1, bodyOff: -1} + ifm.parse = ifm.parseInterfaceMessage + ifam := &wireFormat{extOff: -1, bodyOff: -1} + ifam.parse = ifam.parseInterfaceAddrMessage + ifanm := &wireFormat{extOff: -1, bodyOff: -1} + ifanm.parse = ifanm.parseInterfaceAnnounceMessage + return int(unsafe.Sizeof(p)), map[int]*wireFormat{ + sysRTM_ADD: rtm, + sysRTM_DELETE: rtm, + sysRTM_CHANGE: rtm, + sysRTM_GET: rtm, + sysRTM_LOSING: rtm, + sysRTM_REDIRECT: rtm, + sysRTM_MISS: rtm, + sysRTM_LOCK: rtm, + sysRTM_RESOLVE: rtm, + sysRTM_NEWADDR: ifam, + sysRTM_DELADDR: ifam, + sysRTM_IFINFO: ifm, + sysRTM_IFANNOUNCE: ifanm, + sysRTM_DESYNC: rtm, + } +} diff --git a/vendor/golang.org/x/net/route/syscall.go b/vendor/golang.org/x/net/route/syscall.go new file mode 100644 index 0000000..c211188 --- /dev/null +++ b/vendor/golang.org/x/net/route/syscall.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package route + +import ( + "syscall" + "unsafe" +) + +var zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var p unsafe.Pointer + if len(mib) > 0 { + p = unsafe.Pointer(&mib[0]) + } else { + p = unsafe.Pointer(&zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return error(errno) + } + return nil +} diff --git a/vendor/golang.org/x/net/route/zsys_darwin.go b/vendor/golang.org/x/net/route/zsys_darwin.go new file mode 100644 index 0000000..4e2e1ab --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_darwin.go @@ -0,0 +1,99 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_STAT = 0x4 + sysNET_RT_TRASH = 0x5 + sysNET_RT_IFLIST2 = 0x6 + sysNET_RT_DUMP2 = 0x7 + sysNET_RT_MAXID = 0xa +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_MAXID = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFINFO2 = 0x12 + sysRTM_NEWMADDR2 = 0x13 + sysRTM_GET2 = 0x14 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrDarwin15 = 0x70 + sizeofIfaMsghdrDarwin15 = 0x14 + sizeofIfmaMsghdrDarwin15 = 0x10 + sizeofIfMsghdr2Darwin15 = 0xa0 + sizeofIfmaMsghdr2Darwin15 = 0x14 + sizeofIfDataDarwin15 = 0x60 + sizeofIfData64Darwin15 = 0x80 + + sizeofRtMsghdrDarwin15 = 0x5c + sizeofRtMsghdr2Darwin15 = 0x5c + sizeofRtMetricsDarwin15 = 0x38 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_dragonfly.go b/vendor/golang.org/x/net/route/zsys_dragonfly.go new file mode 100644 index 0000000..719c88d --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_dragonfly.go @@ -0,0 +1,98 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_MAXID = 0x4 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 + sysCTL_LWKT = 0xa + sysCTL_MAXID = 0xb +) + +const ( + sysRTM_VERSION = 0x6 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_MPLS1 = 0x100 + sysRTA_MPLS2 = 0x200 + sysRTA_MPLS3 = 0x400 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MPLS1 = 0x8 + sysRTAX_MPLS2 = 0x9 + sysRTAX_MPLS3 = 0xa + sysRTAX_MAX = 0xb +) + +const ( + sizeofIfMsghdrDragonFlyBSD4 = 0xb0 + sizeofIfaMsghdrDragonFlyBSD4 = 0x14 + sizeofIfmaMsghdrDragonFlyBSD4 = 0x10 + sizeofIfAnnouncemsghdrDragonFlyBSD4 = 0x18 + + sizeofRtMsghdrDragonFlyBSD4 = 0x98 + sizeofRtMetricsDragonFlyBSD4 = 0x70 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_386.go b/vendor/golang.org/x/net/route/zsys_freebsd_386.go new file mode 100644 index 0000000..b03bc01 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_386.go @@ -0,0 +1,126 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0x68 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0x6c + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x5c + sizeofRtMetricsFreeBSD10 = 0x38 + + sizeofIfMsghdrFreeBSD7 = 0x60 + sizeofIfMsghdrFreeBSD8 = 0x60 + sizeofIfMsghdrFreeBSD9 = 0x60 + sizeofIfMsghdrFreeBSD10 = 0x64 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x50 + sizeofIfDataFreeBSD8 = 0x50 + sizeofIfDataFreeBSD9 = 0x50 + sizeofIfDataFreeBSD10 = 0x54 + sizeofIfDataFreeBSD11 = 0x98 + + // MODIFIED BY HAND FOR 386 EMULATION ON AMD64 + // 386 EMULATION USES THE UNDERLYING RAW DATA LAYOUT + + sizeofIfMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x98 + sizeofRtMetricsFreeBSD10Emu = 0x70 + + sizeofIfMsghdrFreeBSD7Emu = 0xa8 + sizeofIfMsghdrFreeBSD8Emu = 0xa8 + sizeofIfMsghdrFreeBSD9Emu = 0xa8 + sizeofIfMsghdrFreeBSD10Emu = 0xa8 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x98 + sizeofIfDataFreeBSD8Emu = 0x98 + sizeofIfDataFreeBSD9Emu = 0x98 + sizeofIfDataFreeBSD10Emu = 0x98 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go new file mode 100644 index 0000000..0b675b3 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_amd64.go @@ -0,0 +1,123 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0xb0 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0xb0 + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x98 + sizeofRtMetricsFreeBSD10 = 0x70 + + sizeofIfMsghdrFreeBSD7 = 0xa8 + sizeofIfMsghdrFreeBSD8 = 0xa8 + sizeofIfMsghdrFreeBSD9 = 0xa8 + sizeofIfMsghdrFreeBSD10 = 0xa8 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x98 + sizeofIfDataFreeBSD8 = 0x98 + sizeofIfDataFreeBSD9 = 0x98 + sizeofIfDataFreeBSD10 = 0x98 + sizeofIfDataFreeBSD11 = 0x98 + + sizeofIfMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0xb0 + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x98 + sizeofRtMetricsFreeBSD10Emu = 0x70 + + sizeofIfMsghdrFreeBSD7Emu = 0xa8 + sizeofIfMsghdrFreeBSD8Emu = 0xa8 + sizeofIfMsghdrFreeBSD9Emu = 0xa8 + sizeofIfMsghdrFreeBSD10Emu = 0xa8 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x98 + sizeofIfDataFreeBSD8Emu = 0x98 + sizeofIfDataFreeBSD9Emu = 0x98 + sizeofIfDataFreeBSD10Emu = 0x98 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_freebsd_arm.go b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go new file mode 100644 index 0000000..58f8ea1 --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_freebsd_arm.go @@ -0,0 +1,123 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_IFMALIST = 0x4 + sysNET_RT_IFLISTL = 0x5 +) + +const ( + sysCTL_MAXNAME = 0x18 + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_P1003_1B = 0x9 +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_NEWMADDR = 0xf + sysRTM_DELMADDR = 0x10 + sysRTM_IFANNOUNCE = 0x11 + sysRTM_IEEE80211 = 0x12 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_MAX = 0x8 +) + +const ( + sizeofIfMsghdrlFreeBSD10 = 0x68 + sizeofIfaMsghdrFreeBSD10 = 0x14 + sizeofIfaMsghdrlFreeBSD10 = 0x6c + sizeofIfmaMsghdrFreeBSD10 = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10 = 0x18 + + sizeofRtMsghdrFreeBSD10 = 0x5c + sizeofRtMetricsFreeBSD10 = 0x38 + + sizeofIfMsghdrFreeBSD7 = 0x70 + sizeofIfMsghdrFreeBSD8 = 0x70 + sizeofIfMsghdrFreeBSD9 = 0x70 + sizeofIfMsghdrFreeBSD10 = 0x70 + sizeofIfMsghdrFreeBSD11 = 0xa8 + + sizeofIfDataFreeBSD7 = 0x60 + sizeofIfDataFreeBSD8 = 0x60 + sizeofIfDataFreeBSD9 = 0x60 + sizeofIfDataFreeBSD10 = 0x60 + sizeofIfDataFreeBSD11 = 0x98 + + sizeofIfMsghdrlFreeBSD10Emu = 0x68 + sizeofIfaMsghdrFreeBSD10Emu = 0x14 + sizeofIfaMsghdrlFreeBSD10Emu = 0x6c + sizeofIfmaMsghdrFreeBSD10Emu = 0x10 + sizeofIfAnnouncemsghdrFreeBSD10Emu = 0x18 + + sizeofRtMsghdrFreeBSD10Emu = 0x5c + sizeofRtMetricsFreeBSD10Emu = 0x38 + + sizeofIfMsghdrFreeBSD7Emu = 0x70 + sizeofIfMsghdrFreeBSD8Emu = 0x70 + sizeofIfMsghdrFreeBSD9Emu = 0x70 + sizeofIfMsghdrFreeBSD10Emu = 0x70 + sizeofIfMsghdrFreeBSD11Emu = 0xa8 + + sizeofIfDataFreeBSD7Emu = 0x60 + sizeofIfDataFreeBSD8Emu = 0x60 + sizeofIfDataFreeBSD9Emu = 0x60 + sizeofIfDataFreeBSD10Emu = 0x60 + sizeofIfDataFreeBSD11Emu = 0x98 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_netbsd.go b/vendor/golang.org/x/net/route/zsys_netbsd.go new file mode 100644 index 0000000..e0df45e --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_netbsd.go @@ -0,0 +1,97 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x22 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x5 + sysNET_RT_MAXID = 0x6 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_VFS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_USER = 0x8 + sysCTL_DDB = 0x9 + sysCTL_PROC = 0xa + sysCTL_VENDOR = 0xb + sysCTL_EMUL = 0xc + sysCTL_SECURITY = 0xd + sysCTL_MAXID = 0xe +) + +const ( + sysRTM_VERSION = 0x4 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_OLDADD = 0x9 + sysRTM_OLDDEL = 0xa + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFANNOUNCE = 0x10 + sysRTM_IEEE80211 = 0x11 + sysRTM_SETGATE = 0x12 + sysRTM_LLINFO_UPD = 0x13 + sysRTM_IFINFO = 0x14 + sysRTM_CHGADDR = 0x15 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_TAG = 0x100 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_TAG = 0x8 + sysRTAX_MAX = 0x9 +) + +const ( + sizeofIfMsghdrNetBSD7 = 0x98 + sizeofIfaMsghdrNetBSD7 = 0x18 + sizeofIfAnnouncemsghdrNetBSD7 = 0x18 + + sizeofRtMsghdrNetBSD7 = 0x78 + sizeofRtMetricsNetBSD7 = 0x50 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/route/zsys_openbsd.go b/vendor/golang.org/x/net/route/zsys_openbsd.go new file mode 100644 index 0000000..db8c8ef --- /dev/null +++ b/vendor/golang.org/x/net/route/zsys_openbsd.go @@ -0,0 +1,101 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package route + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_ROUTE = 0x11 + sysAF_LINK = 0x12 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 + + sysNET_RT_DUMP = 0x1 + sysNET_RT_FLAGS = 0x2 + sysNET_RT_IFLIST = 0x3 + sysNET_RT_STATS = 0x4 + sysNET_RT_TABLE = 0x5 + sysNET_RT_IFNAMES = 0x6 + sysNET_RT_MAXID = 0x7 +) + +const ( + sysCTL_MAXNAME = 0xc + + sysCTL_UNSPEC = 0x0 + sysCTL_KERN = 0x1 + sysCTL_VM = 0x2 + sysCTL_FS = 0x3 + sysCTL_NET = 0x4 + sysCTL_DEBUG = 0x5 + sysCTL_HW = 0x6 + sysCTL_MACHDEP = 0x7 + sysCTL_DDB = 0x9 + sysCTL_VFS = 0xa + sysCTL_MAXID = 0xb +) + +const ( + sysRTM_VERSION = 0x5 + + sysRTM_ADD = 0x1 + sysRTM_DELETE = 0x2 + sysRTM_CHANGE = 0x3 + sysRTM_GET = 0x4 + sysRTM_LOSING = 0x5 + sysRTM_REDIRECT = 0x6 + sysRTM_MISS = 0x7 + sysRTM_LOCK = 0x8 + sysRTM_RESOLVE = 0xb + sysRTM_NEWADDR = 0xc + sysRTM_DELADDR = 0xd + sysRTM_IFINFO = 0xe + sysRTM_IFANNOUNCE = 0xf + sysRTM_DESYNC = 0x10 + sysRTM_INVALIDATE = 0x11 + sysRTM_BFD = 0x12 + sysRTM_PROPOSAL = 0x13 + + sysRTA_DST = 0x1 + sysRTA_GATEWAY = 0x2 + sysRTA_NETMASK = 0x4 + sysRTA_GENMASK = 0x8 + sysRTA_IFP = 0x10 + sysRTA_IFA = 0x20 + sysRTA_AUTHOR = 0x40 + sysRTA_BRD = 0x80 + sysRTA_SRC = 0x100 + sysRTA_SRCMASK = 0x200 + sysRTA_LABEL = 0x400 + sysRTA_BFD = 0x800 + sysRTA_DNS = 0x1000 + sysRTA_STATIC = 0x2000 + sysRTA_SEARCH = 0x4000 + + sysRTAX_DST = 0x0 + sysRTAX_GATEWAY = 0x1 + sysRTAX_NETMASK = 0x2 + sysRTAX_GENMASK = 0x3 + sysRTAX_IFP = 0x4 + sysRTAX_IFA = 0x5 + sysRTAX_AUTHOR = 0x6 + sysRTAX_BRD = 0x7 + sysRTAX_SRC = 0x8 + sysRTAX_SRCMASK = 0x9 + sysRTAX_LABEL = 0xa + sysRTAX_BFD = 0xb + sysRTAX_DNS = 0xc + sysRTAX_STATIC = 0xd + sysRTAX_SEARCH = 0xe + sysRTAX_MAX = 0xf +) + +const ( + sizeofRtMsghdr = 0x60 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 0000000..c646a69 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

    /debug/events

    + +
  • + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
    {{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
    + +{{if $.EventLogs}} +


    +

    Family: {{$.Family}}

    + +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
    WhenElapsed
    {{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
    {{$el.Stack|trimSpace}}
    {{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
    +{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 0000000..9bf4286 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
    Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
    +
    + +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
    [{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
    +`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/histogram_test.go b/vendor/golang.org/x/net/trace/histogram_test.go new file mode 100644 index 0000000..d384b93 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram_test.go @@ -0,0 +1,325 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "math" + "testing" +) + +type sumTest struct { + value int64 + sum int64 + sumOfSquares float64 + total int64 +} + +var sumTests = []sumTest{ + {100, 100, 10000, 1}, + {50, 150, 12500, 2}, + {50, 200, 15000, 3}, + {50, 250, 17500, 4}, +} + +type bucketingTest struct { + in int64 + log int + bucket int +} + +var bucketingTests = []bucketingTest{ + {0, 0, 0}, + {1, 1, 0}, + {2, 2, 1}, + {3, 2, 1}, + {4, 3, 2}, + {1000, 10, 9}, + {1023, 10, 9}, + {1024, 11, 10}, + {1000000, 20, 19}, +} + +type multiplyTest struct { + in int64 + ratio float64 + expectedSum int64 + expectedTotal int64 + expectedSumOfSquares float64 +} + +var multiplyTests = []multiplyTest{ + {15, 2.5, 37, 2, 562.5}, + {128, 4.6, 758, 13, 77953.9}, +} + +type percentileTest struct { + fraction float64 + expected int64 +} + +var percentileTests = []percentileTest{ + {0.25, 48}, + {0.5, 96}, + {0.6, 109}, + {0.75, 128}, + {0.90, 205}, + {0.95, 230}, + {0.99, 256}, +} + +func TestSum(t *testing.T) { + var h histogram + + for _, test := range sumTests { + h.addMeasurement(test.value) + sum := h.sum + if sum != test.sum { + t.Errorf("h.Sum = %v WANT: %v", sum, test.sum) + } + + sumOfSquares := h.sumOfSquares + if sumOfSquares != test.sumOfSquares { + t.Errorf("h.SumOfSquares = %v WANT: %v", sumOfSquares, test.sumOfSquares) + } + + total := h.total() + if total != test.total { + t.Errorf("h.Total = %v WANT: %v", total, test.total) + } + } +} + +func TestMultiply(t *testing.T) { + var h histogram + for i, test := range multiplyTests { + h.addMeasurement(test.in) + h.Multiply(test.ratio) + if h.sum != test.expectedSum { + t.Errorf("#%v: h.sum = %v WANT: %v", i, h.sum, test.expectedSum) + } + if h.total() != test.expectedTotal { + t.Errorf("#%v: h.total = %v WANT: %v", i, h.total(), test.expectedTotal) + } + if h.sumOfSquares != test.expectedSumOfSquares { + t.Errorf("#%v: h.SumOfSquares = %v WANT: %v", i, test.expectedSumOfSquares, h.sumOfSquares) + } + } +} + +func TestBucketingFunctions(t *testing.T) { + for _, test := range bucketingTests { + log := log2(test.in) + if log != test.log { + t.Errorf("log2 = %v WANT: %v", log, test.log) + } + + bucket := getBucket(test.in) + if bucket != test.bucket { + t.Errorf("getBucket = %v WANT: %v", bucket, test.bucket) + } + } +} + +func TestAverage(t *testing.T) { + a := new(histogram) + average := a.average() + if average != 0 { + t.Errorf("Average of empty histogram was %v WANT: 0", average) + } + + a.addMeasurement(1) + a.addMeasurement(1) + a.addMeasurement(3) + const expected = float64(5) / float64(3) + average = a.average() + + if !isApproximate(average, expected) { + t.Errorf("Average = %g WANT: %v", average, expected) + } +} + +func TestStandardDeviation(t *testing.T) { + a := new(histogram) + add(a, 10, 1<<4) + add(a, 10, 1<<5) + add(a, 10, 1<<6) + stdDev := a.standardDeviation() + const expected = 19.95 + + if !isApproximate(stdDev, expected) { + t.Errorf("StandardDeviation = %v WANT: %v", stdDev, expected) + } + + // No values + a = new(histogram) + stdDev = a.standardDeviation() + + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 1, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 10, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } +} + +func TestPercentileBoundary(t *testing.T) { + a := new(histogram) + add(a, 5, 1<<4) + add(a, 10, 1<<6) + add(a, 5, 1<<7) + + for _, test := range percentileTests { + percentile := a.percentileBoundary(test.fraction) + if percentile != test.expected { + t.Errorf("h.PercentileBoundary (fraction=%v) = %v WANT: %v", test.fraction, percentile, test.expected) + } + } +} + +func TestCopyFrom(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1} + + a.CopyFrom(&b) + + if a.String() != b.String() { + t.Errorf("a.String = %s WANT: %s", a.String(), b.String()) + } +} + +func TestClear(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + + a.Clear() + + expected := "0, 0.000000, 0, 0, []" + if a.String() != expected { + t.Errorf("a.String = %s WANT %s", a.String(), expected) + } +} + +func TestNew(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := "0, 0.000000, 0, 0, []" + if b.(*histogram).String() != expected { + t.Errorf("b.(*histogram).String = %s WANT: %s", b.(*histogram).String(), expected) + } +} + +func TestAdd(t *testing.T) { + // The tests here depend on the associativity of addMeasurement and Add. + // Add empty observation + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := a.String() + a.Add(b) + if a.String() != expected { + t.Errorf("a.String = %s WANT: %s", a.String(), expected) + } + + // Add same bucketed value, no new buckets + c := new(histogram) + d := new(histogram) + e := new(histogram) + c.addMeasurement(12) + d.addMeasurement(11) + e.addMeasurement(12) + e.addMeasurement(11) + c.Add(d) + if c.String() != e.String() { + t.Errorf("c.String = %s WANT: %s", c.String(), e.String()) + } + + // Add bucketed values + f := new(histogram) + g := new(histogram) + h := new(histogram) + f.addMeasurement(4) + f.addMeasurement(12) + f.addMeasurement(100) + g.addMeasurement(18) + g.addMeasurement(36) + g.addMeasurement(255) + h.addMeasurement(4) + h.addMeasurement(12) + h.addMeasurement(100) + h.addMeasurement(18) + h.addMeasurement(36) + h.addMeasurement(255) + f.Add(g) + if f.String() != h.String() { + t.Errorf("f.String = %q WANT: %q", f.String(), h.String()) + } + + // add buckets to no buckets + i := new(histogram) + j := new(histogram) + k := new(histogram) + j.addMeasurement(18) + j.addMeasurement(36) + j.addMeasurement(255) + k.addMeasurement(18) + k.addMeasurement(36) + k.addMeasurement(255) + i.Add(j) + if i.String() != k.String() { + t.Errorf("i.String = %q WANT: %q", i.String(), k.String()) + } + + // add buckets to single value (no overlap) + l := new(histogram) + m := new(histogram) + n := new(histogram) + l.addMeasurement(0) + m.addMeasurement(18) + m.addMeasurement(36) + m.addMeasurement(255) + n.addMeasurement(0) + n.addMeasurement(18) + n.addMeasurement(36) + n.addMeasurement(255) + l.Add(m) + if l.String() != n.String() { + t.Errorf("l.String = %q WANT: %q", l.String(), n.String()) + } + + // mixed order + o := new(histogram) + p := new(histogram) + o.addMeasurement(0) + o.addMeasurement(2) + o.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(2) + if o.String() != p.String() { + t.Errorf("o.String = %q WANT: %q", o.String(), p.String()) + } +} + +func add(h *histogram, times int, val int64) { + for i := 0; i < times; i++ { + h.addMeasurement(val) + } +} + +func isApproximate(x, y float64) bool { + return math.Abs(x-y) < 1e-2 +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 0000000..bb72a52 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1082 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc("/debug/requests", Traces) + http.HandleFunc("/debug/events", Events) +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + tr.Elapsed = time.Now().Sub(tr.Start) + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Timing information. + Start time.Time + Elapsed time.Duration // zero while active + + // Trace information if non-zero. + traceID uint64 + spanID uint64 + + // Whether this trace resulted in an error. + IsError bool + + // Append-only sequence of events (modulo discards). + mu sync.RWMutex + events []event + maxEvents int + + refs int32 // how many buckets this is in + recycler func(interface{}) + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.refs = 0 + tr.recycler = nil + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { tr.IsError = true } + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.recycler = f +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.traceID, tr.spanID = traceID, spanID +} + +func (tr *trace) SetMaxEvents(m int) { + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + t := tr.Elapsed + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

    /debug/requests

    +{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
    {{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
    +{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
    +

    Family: {{$.Family}}

    + +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

    Showing {{len $.Traces}} of {{$.Total}} traces.

    +{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
    + {{if $.Active}}Active{{else}}Completed{{end}} Requests +
    WhenElapsed (s)
    {{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
    {{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
    +{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

    Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

    +{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go new file mode 100644 index 0000000..d608191 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package trace + +import "golang.org/x/net/context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go new file mode 100644 index 0000000..df6e1fb --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package trace + +import "context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_test.go b/vendor/golang.org/x/net/trace/trace_test.go new file mode 100644 index 0000000..bfd9dfe --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_test.go @@ -0,0 +1,178 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "net/http" + "reflect" + "testing" +) + +type s struct{} + +func (s) String() string { return "lazy string" } + +// TestReset checks whether all the fields are zeroed after reset. +func TestReset(t *testing.T) { + tr := New("foo", "bar") + tr.LazyLog(s{}, false) + tr.LazyPrintf("%d", 1) + tr.SetRecycler(func(_ interface{}) {}) + tr.SetTraceInfo(3, 4) + tr.SetMaxEvents(100) + tr.SetError() + tr.Finish() + + tr.(*trace).reset() + + if !reflect.DeepEqual(tr, new(trace)) { + t.Errorf("reset didn't clear all fields: %+v", tr) + } +} + +// TestResetLog checks whether all the fields are zeroed after reset. +func TestResetLog(t *testing.T) { + el := NewEventLog("foo", "bar") + el.Printf("message") + el.Errorf("error") + el.Finish() + + el.(*eventLog).reset() + + if !reflect.DeepEqual(el, new(eventLog)) { + t.Errorf("reset didn't clear all fields: %+v", el) + } +} + +func TestAuthRequest(t *testing.T) { + testCases := []struct { + host string + want bool + }{ + {host: "192.168.23.1", want: false}, + {host: "192.168.23.1:8080", want: false}, + {host: "malformed remote addr", want: false}, + {host: "localhost", want: true}, + {host: "localhost:8080", want: true}, + {host: "127.0.0.1", want: true}, + {host: "127.0.0.1:8080", want: true}, + {host: "::1", want: true}, + {host: "[::1]:8080", want: true}, + } + for _, tt := range testCases { + req := &http.Request{RemoteAddr: tt.host} + any, sensitive := AuthRequest(req) + if any != tt.want || sensitive != tt.want { + t.Errorf("AuthRequest(%q) = %t, %t; want %t, %t", tt.host, any, sensitive, tt.want, tt.want) + } + } +} + +// TestParseTemplate checks that all templates used by this package are valid +// as they are parsed on first usage +func TestParseTemplate(t *testing.T) { + if tmpl := distTmpl(); tmpl == nil { + t.Error("invalid template returned from distTmpl()") + } + if tmpl := pageTmpl(); tmpl == nil { + t.Error("invalid template returned from pageTmpl()") + } + if tmpl := eventsTmpl(); tmpl == nil { + t.Error("invalid template returned from eventsTmpl()") + } +} + +func benchmarkTrace(b *testing.B, maxEvents, numEvents int) { + numSpans := (b.N + numEvents + 1) / numEvents + + for i := 0; i < numSpans; i++ { + tr := New("test", "test") + tr.SetMaxEvents(maxEvents) + for j := 0; j < numEvents; j++ { + tr.LazyPrintf("%d", j) + } + tr.Finish() + } +} + +func BenchmarkTrace_Default_2(b *testing.B) { + benchmarkTrace(b, 0, 2) +} + +func BenchmarkTrace_Default_10(b *testing.B) { + benchmarkTrace(b, 0, 10) +} + +func BenchmarkTrace_Default_100(b *testing.B) { + benchmarkTrace(b, 0, 100) +} + +func BenchmarkTrace_Default_1000(b *testing.B) { + benchmarkTrace(b, 0, 1000) +} + +func BenchmarkTrace_Default_10000(b *testing.B) { + benchmarkTrace(b, 0, 10000) +} + +func BenchmarkTrace_10_2(b *testing.B) { + benchmarkTrace(b, 10, 2) +} + +func BenchmarkTrace_10_10(b *testing.B) { + benchmarkTrace(b, 10, 10) +} + +func BenchmarkTrace_10_100(b *testing.B) { + benchmarkTrace(b, 10, 100) +} + +func BenchmarkTrace_10_1000(b *testing.B) { + benchmarkTrace(b, 10, 1000) +} + +func BenchmarkTrace_10_10000(b *testing.B) { + benchmarkTrace(b, 10, 10000) +} + +func BenchmarkTrace_100_2(b *testing.B) { + benchmarkTrace(b, 100, 2) +} + +func BenchmarkTrace_100_10(b *testing.B) { + benchmarkTrace(b, 100, 10) +} + +func BenchmarkTrace_100_100(b *testing.B) { + benchmarkTrace(b, 100, 100) +} + +func BenchmarkTrace_100_1000(b *testing.B) { + benchmarkTrace(b, 100, 1000) +} + +func BenchmarkTrace_100_10000(b *testing.B) { + benchmarkTrace(b, 100, 10000) +} + +func BenchmarkTrace_1000_2(b *testing.B) { + benchmarkTrace(b, 1000, 2) +} + +func BenchmarkTrace_1000_10(b *testing.B) { + benchmarkTrace(b, 1000, 10) +} + +func BenchmarkTrace_1000_100(b *testing.B) { + benchmarkTrace(b, 1000, 100) +} + +func BenchmarkTrace_1000_1000(b *testing.B) { + benchmarkTrace(b, 1000, 1000) +} + +func BenchmarkTrace_1000_10000(b *testing.B) { + benchmarkTrace(b, 1000, 10000) +} diff --git a/vendor/golang.org/x/net/webdav/file.go b/vendor/golang.org/x/net/webdav/file.go new file mode 100644 index 0000000..748118d --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file.go @@ -0,0 +1,796 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "io" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "sync" + "time" + + "golang.org/x/net/context" +) + +// slashClean is equivalent to but slightly more efficient than +// path.Clean("/" + name). +func slashClean(name string) string { + if name == "" || name[0] != '/' { + name = "/" + name + } + return path.Clean(name) +} + +// A FileSystem implements access to a collection of named files. The elements +// in a file path are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +// +// Each method has the same semantics as the os package's function of the same +// name. +// +// Note that the os.Rename documentation says that "OS-specific restrictions +// might apply". In particular, whether or not renaming a file or directory +// overwriting another existing file or directory is an error is OS-dependent. +type FileSystem interface { + Mkdir(ctx context.Context, name string, perm os.FileMode) error + OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) + RemoveAll(ctx context.Context, name string) error + Rename(ctx context.Context, oldName, newName string) error + Stat(ctx context.Context, name string) (os.FileInfo, error) +} + +// A File is returned by a FileSystem's OpenFile method and can be served by a +// Handler. +// +// A File may optionally implement the DeadPropsHolder interface, if it can +// load and save dead properties. +type File interface { + http.File + io.Writer +} + +// A Dir implements FileSystem using the native file system restricted to a +// specific directory tree. +// +// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's +// string value is a filename on the native file system, not a URL, so it is +// separated by filepath.Separator, which isn't necessarily '/'. +// +// An empty Dir is treated as ".". +type Dir string + +func (d Dir) resolve(name string) string { + // This implementation is based on Dir.Open's code in the standard net/http package. + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return "" + } + dir := string(d) + if dir == "" { + dir = "." + } + return filepath.Join(dir, filepath.FromSlash(slashClean(name))) +} + +func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + return os.Mkdir(name, perm) +} + +func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + f, err := os.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + return f, nil +} + +func (d Dir) RemoveAll(ctx context.Context, name string) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + if name == filepath.Clean(string(d)) { + // Prohibit removing the virtual root directory. + return os.ErrInvalid + } + return os.RemoveAll(name) +} + +func (d Dir) Rename(ctx context.Context, oldName, newName string) error { + if oldName = d.resolve(oldName); oldName == "" { + return os.ErrNotExist + } + if newName = d.resolve(newName); newName == "" { + return os.ErrNotExist + } + if root := filepath.Clean(string(d)); root == oldName || root == newName { + // Prohibit renaming from or to the virtual root directory. + return os.ErrInvalid + } + return os.Rename(oldName, newName) +} + +func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + return os.Stat(name) +} + +// NewMemFS returns a new in-memory FileSystem implementation. +func NewMemFS() FileSystem { + return &memFS{ + root: memFSNode{ + children: make(map[string]*memFSNode), + mode: 0660 | os.ModeDir, + modTime: time.Now(), + }, + } +} + +// A memFS implements FileSystem, storing all metadata and actual file data +// in-memory. No limits on filesystem size are used, so it is not recommended +// this be used where the clients are untrusted. +// +// Concurrent access is permitted. The tree structure is protected by a mutex, +// and each node's contents and metadata are protected by a per-node mutex. +// +// TODO: Enforce file permissions. +type memFS struct { + mu sync.Mutex + root memFSNode +} + +// TODO: clean up and rationalize the walk/find code. + +// walk walks the directory tree for the fullname, calling f at each step. If f +// returns an error, the walk will be aborted and return that same error. +// +// dir is the directory at that step, frag is the name fragment, and final is +// whether it is the final step. For example, walking "/foo/bar/x" will result +// in 3 calls to f: +// - "/", "foo", false +// - "/foo/", "bar", false +// - "/foo/bar/", "x", true +// The frag argument will be empty only if dir is the root node and the walk +// ends at that root node. +func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error { + original := fullname + fullname = slashClean(fullname) + + // Strip any leading "/"s to make fullname a relative path, as the walk + // starts at fs.root. + if fullname[0] == '/' { + fullname = fullname[1:] + } + dir := &fs.root + + for { + frag, remaining := fullname, "" + i := strings.IndexRune(fullname, '/') + final := i < 0 + if !final { + frag, remaining = fullname[:i], fullname[i+1:] + } + if frag == "" && dir != &fs.root { + panic("webdav: empty path fragment for a clean path") + } + if err := f(dir, frag, final); err != nil { + return &os.PathError{ + Op: op, + Path: original, + Err: err, + } + } + if final { + break + } + child := dir.children[frag] + if child == nil { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrNotExist, + } + } + if !child.mode.IsDir() { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrInvalid, + } + } + dir, fullname = child, remaining + } + return nil +} + +// find returns the parent of the named node and the relative name fragment +// from the parent to the child. For example, if finding "/foo/bar/baz" then +// parent will be the node for "/foo/bar" and frag will be "baz". +// +// If the fullname names the root node, then parent, frag and err will be zero. +// +// find returns an error if the parent does not already exist or the parent +// isn't a directory, but it will not return an error per se if the child does +// not already exist. The error returned is either nil or an *os.PathError +// whose Op is op. +func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) { + err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error { + if !final { + return nil + } + if frag0 != "" { + parent, frag = parent0, frag0 + } + return nil + }) + return parent, frag, err +} + +func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("mkdir", name) + if err != nil { + return err + } + if dir == nil { + // We can't create the root. + return os.ErrInvalid + } + if _, ok := dir.children[frag]; ok { + return os.ErrExist + } + dir.children[frag] = &memFSNode{ + children: make(map[string]*memFSNode), + mode: perm.Perm() | os.ModeDir, + modTime: time.Now(), + } + return nil +} + +func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("open", name) + if err != nil { + return nil, err + } + var n *memFSNode + if dir == nil { + // We're opening the root. + if flag&(os.O_WRONLY|os.O_RDWR) != 0 { + return nil, os.ErrPermission + } + n, frag = &fs.root, "/" + + } else { + n = dir.children[frag] + if flag&(os.O_SYNC|os.O_APPEND) != 0 { + // memFile doesn't support these flags yet. + return nil, os.ErrInvalid + } + if flag&os.O_CREATE != 0 { + if flag&os.O_EXCL != 0 && n != nil { + return nil, os.ErrExist + } + if n == nil { + n = &memFSNode{ + mode: perm.Perm(), + } + dir.children[frag] = n + } + } + if n == nil { + return nil, os.ErrNotExist + } + if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 { + n.mu.Lock() + n.data = nil + n.mu.Unlock() + } + } + + children := make([]os.FileInfo, 0, len(n.children)) + for cName, c := range n.children { + children = append(children, c.stat(cName)) + } + return &memFile{ + n: n, + nameSnapshot: frag, + childrenSnapshot: children, + }, nil +} + +func (fs *memFS) RemoveAll(ctx context.Context, name string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("remove", name) + if err != nil { + return err + } + if dir == nil { + // We can't remove the root. + return os.ErrInvalid + } + delete(dir.children, frag) + return nil +} + +func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + oldName = slashClean(oldName) + newName = slashClean(newName) + if oldName == newName { + return nil + } + if strings.HasPrefix(newName, oldName+"/") { + // We can't rename oldName to be a sub-directory of itself. + return os.ErrInvalid + } + + oDir, oFrag, err := fs.find("rename", oldName) + if err != nil { + return err + } + if oDir == nil { + // We can't rename from the root. + return os.ErrInvalid + } + + nDir, nFrag, err := fs.find("rename", newName) + if err != nil { + return err + } + if nDir == nil { + // We can't rename to the root. + return os.ErrInvalid + } + + oNode, ok := oDir.children[oFrag] + if !ok { + return os.ErrNotExist + } + if oNode.children != nil { + if nNode, ok := nDir.children[nFrag]; ok { + if nNode.children == nil { + return errNotADirectory + } + if len(nNode.children) != 0 { + return errDirectoryNotEmpty + } + } + } + delete(oDir.children, oFrag) + nDir.children[nFrag] = oNode + return nil +} + +func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("stat", name) + if err != nil { + return nil, err + } + if dir == nil { + // We're stat'ting the root. + return fs.root.stat("/"), nil + } + if n, ok := dir.children[frag]; ok { + return n.stat(path.Base(name)), nil + } + return nil, os.ErrNotExist +} + +// A memFSNode represents a single entry in the in-memory filesystem and also +// implements os.FileInfo. +type memFSNode struct { + // children is protected by memFS.mu. + children map[string]*memFSNode + + mu sync.Mutex + data []byte + mode os.FileMode + modTime time.Time + deadProps map[xml.Name]Property +} + +func (n *memFSNode) stat(name string) *memFileInfo { + n.mu.Lock() + defer n.mu.Unlock() + return &memFileInfo{ + name: name, + size: int64(len(n.data)), + mode: n.mode, + modTime: n.modTime, + } +} + +func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) { + n.mu.Lock() + defer n.mu.Unlock() + if len(n.deadProps) == 0 { + return nil, nil + } + ret := make(map[xml.Name]Property, len(n.deadProps)) + for k, v := range n.deadProps { + ret[k] = v + } + return ret, nil +} + +func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) { + n.mu.Lock() + defer n.mu.Unlock() + pstat := Propstat{Status: http.StatusOK} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + if patch.Remove { + delete(n.deadProps, p.XMLName) + continue + } + if n.deadProps == nil { + n.deadProps = map[xml.Name]Property{} + } + n.deadProps[p.XMLName] = p + } + } + return []Propstat{pstat}, nil +} + +type memFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (f *memFileInfo) Name() string { return f.name } +func (f *memFileInfo) Size() int64 { return f.size } +func (f *memFileInfo) Mode() os.FileMode { return f.mode } +func (f *memFileInfo) ModTime() time.Time { return f.modTime } +func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() } +func (f *memFileInfo) Sys() interface{} { return nil } + +// A memFile is a File implementation for a memFSNode. It is a per-file (not +// per-node) read/write position, and a snapshot of the memFS' tree structure +// (a node's name and children) for that node. +type memFile struct { + n *memFSNode + nameSnapshot string + childrenSnapshot []os.FileInfo + // pos is protected by n.mu. + pos int +} + +// A *memFile implements the optional DeadPropsHolder interface. +var _ DeadPropsHolder = (*memFile)(nil) + +func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() } +func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) } + +func (f *memFile) Close() error { + return nil +} + +func (f *memFile) Read(p []byte) (int, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos >= len(f.n.data) { + return 0, io.EOF + } + n := copy(p, f.n.data[f.pos:]) + f.pos += n + return n, nil +} + +func (f *memFile) Readdir(count int) ([]os.FileInfo, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if !f.n.mode.IsDir() { + return nil, os.ErrInvalid + } + old := f.pos + if old >= len(f.childrenSnapshot) { + // The os.File Readdir docs say that at the end of a directory, + // the error is io.EOF if count > 0 and nil if count <= 0. + if count > 0 { + return nil, io.EOF + } + return nil, nil + } + if count > 0 { + f.pos += count + if f.pos > len(f.childrenSnapshot) { + f.pos = len(f.childrenSnapshot) + } + } else { + f.pos = len(f.childrenSnapshot) + old = 0 + } + return f.childrenSnapshot[old:f.pos], nil +} + +func (f *memFile) Seek(offset int64, whence int) (int64, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + npos := f.pos + // TODO: How to handle offsets greater than the size of system int? + switch whence { + case os.SEEK_SET: + npos = int(offset) + case os.SEEK_CUR: + npos += int(offset) + case os.SEEK_END: + npos = len(f.n.data) + int(offset) + default: + npos = -1 + } + if npos < 0 { + return 0, os.ErrInvalid + } + f.pos = npos + return int64(f.pos), nil +} + +func (f *memFile) Stat() (os.FileInfo, error) { + return f.n.stat(f.nameSnapshot), nil +} + +func (f *memFile) Write(p []byte) (int, error) { + lenp := len(p) + f.n.mu.Lock() + defer f.n.mu.Unlock() + + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos < len(f.n.data) { + n := copy(f.n.data[f.pos:], p) + f.pos += n + p = p[n:] + } else if f.pos > len(f.n.data) { + // Write permits the creation of holes, if we've seek'ed past the + // existing end of file. + if f.pos <= cap(f.n.data) { + oldLen := len(f.n.data) + f.n.data = f.n.data[:f.pos] + hole := f.n.data[oldLen:] + for i := range hole { + hole[i] = 0 + } + } else { + d := make([]byte, f.pos, f.pos+len(p)) + copy(d, f.n.data) + f.n.data = d + } + } + + if len(p) > 0 { + // We should only get here if f.pos == len(f.n.data). + f.n.data = append(f.n.data, p...) + f.pos = len(f.n.data) + } + f.n.modTime = time.Now() + return lenp, nil +} + +// moveFiles moves files and/or directories from src to dst. +// +// See section 9.9.4 for when various HTTP status codes apply. +func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) { + created := false + if _, err := fs.Stat(ctx, dst); err != nil { + if !os.IsNotExist(err) { + return http.StatusForbidden, err + } + created = true + } else if overwrite { + // Section 9.9.3 says that "If a resource exists at the destination + // and the Overwrite header is "T", then prior to performing the move, + // the server must perform a DELETE with "Depth: infinity" on the + // destination resource. + if err := fs.RemoveAll(ctx, dst); err != nil { + return http.StatusForbidden, err + } + } else { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.Rename(ctx, src, dst); err != nil { + return http.StatusForbidden, err + } + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +func copyProps(dst, src File) error { + d, ok := dst.(DeadPropsHolder) + if !ok { + return nil + } + s, ok := src.(DeadPropsHolder) + if !ok { + return nil + } + m, err := s.DeadProps() + if err != nil { + return err + } + props := make([]Property, 0, len(m)) + for _, prop := range m { + props = append(props, prop) + } + _, err = d.Patch([]Proppatch{{Props: props}}) + return err +} + +// copyFiles copies files and/or directories from src to dst. +// +// See section 9.8.5 for when various HTTP status codes apply. +func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) { + if recursion == 1000 { + return http.StatusInternalServerError, errRecursionTooDeep + } + recursion++ + + // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/ + // into /A/B/ could lead to infinite recursion if not handled correctly." + + srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + defer srcFile.Close() + srcStat, err := srcFile.Stat() + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + srcPerm := srcStat.Mode() & os.ModePerm + + created := false + if _, err := fs.Stat(ctx, dst); err != nil { + if os.IsNotExist(err) { + created = true + } else { + return http.StatusForbidden, err + } + } else { + if !overwrite { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) { + return http.StatusForbidden, err + } + } + + if srcStat.IsDir() { + if err := fs.Mkdir(ctx, dst, srcPerm); err != nil { + return http.StatusForbidden, err + } + if depth == infiniteDepth { + children, err := srcFile.Readdir(-1) + if err != nil { + return http.StatusForbidden, err + } + for _, c := range children { + name := c.Name() + s := path.Join(src, name) + d := path.Join(dst, name) + cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion) + if cErr != nil { + // TODO: MultiStatus. + return cStatus, cErr + } + } + } + + } else { + dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm) + if err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusForbidden, err + + } + _, copyErr := io.Copy(dstFile, srcFile) + propsErr := copyProps(dstFile, srcFile) + closeErr := dstFile.Close() + if copyErr != nil { + return http.StatusInternalServerError, copyErr + } + if propsErr != nil { + return http.StatusInternalServerError, propsErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + } + + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +// walkFS traverses filesystem fs starting at name up to depth levels. +// +// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node, +// walkFS calls walkFn. If a visited file system node is a directory and +// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node. +func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error { + // This implementation is based on Walk's code in the standard path/filepath package. + err := walkFn(name, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + if !info.IsDir() || depth == 0 { + return nil + } + if depth == 1 { + depth = 0 + } + + // Read directory names. + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return walkFn(name, info, err) + } + fileInfos, err := f.Readdir(0) + f.Close() + if err != nil { + return walkFn(name, info, err) + } + + for _, fileInfo := range fileInfos { + filename := path.Join(name, fileInfo.Name()) + fileInfo, err := fs.Stat(ctx, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/webdav/file_go1.6.go b/vendor/golang.org/x/net/webdav/file_go1.6.go new file mode 100644 index 0000000..fa38770 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_go1.6.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package webdav + +import ( + "net/http" + + "golang.org/x/net/context" +) + +func getContext(r *http.Request) context.Context { + return context.Background() +} diff --git a/vendor/golang.org/x/net/webdav/file_go1.7.go b/vendor/golang.org/x/net/webdav/file_go1.7.go new file mode 100644 index 0000000..d1c3de8 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_go1.7.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package webdav + +import ( + "context" + "net/http" +) + +func getContext(r *http.Request) context.Context { + return r.Context() +} diff --git a/vendor/golang.org/x/net/webdav/file_test.go b/vendor/golang.org/x/net/webdav/file_test.go new file mode 100644 index 0000000..bfd96e1 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/file_test.go @@ -0,0 +1,1184 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSlashClean(t *testing.T) { + testCases := []string{ + "", + ".", + "/", + "/./", + "//", + "//.", + "//a", + "/a", + "/a/b/c", + "/a//b/./../c/d/", + "a", + "a/b/c", + } + for _, tc := range testCases { + got := slashClean(tc) + want := path.Clean("/" + tc) + if got != want { + t.Errorf("tc=%q: got %q, want %q", tc, got, want) + } + } +} + +func TestDirResolve(t *testing.T) { + testCases := []struct { + dir, name, want string + }{ + {"/", "", "/"}, + {"/", "/", "/"}, + {"/", ".", "/"}, + {"/", "./a", "/a"}, + {"/", "..", "/"}, + {"/", "..", "/"}, + {"/", "../", "/"}, + {"/", "../.", "/"}, + {"/", "../a", "/a"}, + {"/", "../..", "/"}, + {"/", "../bar/a", "/bar/a"}, + {"/", "../baz/a", "/baz/a"}, + {"/", "...", "/..."}, + {"/", ".../a", "/.../a"}, + {"/", ".../..", "/"}, + {"/", "a", "/a"}, + {"/", "a/./b", "/a/b"}, + {"/", "a/../../b", "/b"}, + {"/", "a/../b", "/b"}, + {"/", "a/b", "/a/b"}, + {"/", "a/b/c/../../d", "/a/d"}, + {"/", "a/b/c/../../../d", "/d"}, + {"/", "a/b/c/../../../../d", "/d"}, + {"/", "a/b/c/d", "/a/b/c/d"}, + + {"/foo/bar", "", "/foo/bar"}, + {"/foo/bar", "/", "/foo/bar"}, + {"/foo/bar", ".", "/foo/bar"}, + {"/foo/bar", "./a", "/foo/bar/a"}, + {"/foo/bar", "..", "/foo/bar"}, + {"/foo/bar", "../", "/foo/bar"}, + {"/foo/bar", "../.", "/foo/bar"}, + {"/foo/bar", "../a", "/foo/bar/a"}, + {"/foo/bar", "../..", "/foo/bar"}, + {"/foo/bar", "../bar/a", "/foo/bar/bar/a"}, + {"/foo/bar", "../baz/a", "/foo/bar/baz/a"}, + {"/foo/bar", "...", "/foo/bar/..."}, + {"/foo/bar", ".../a", "/foo/bar/.../a"}, + {"/foo/bar", ".../..", "/foo/bar"}, + {"/foo/bar", "a", "/foo/bar/a"}, + {"/foo/bar", "a/./b", "/foo/bar/a/b"}, + {"/foo/bar", "a/../../b", "/foo/bar/b"}, + {"/foo/bar", "a/../b", "/foo/bar/b"}, + {"/foo/bar", "a/b", "/foo/bar/a/b"}, + {"/foo/bar", "a/b/c/../../d", "/foo/bar/a/d"}, + {"/foo/bar", "a/b/c/../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/../../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/d", "/foo/bar/a/b/c/d"}, + + {"/foo/bar/", "", "/foo/bar"}, + {"/foo/bar/", "/", "/foo/bar"}, + {"/foo/bar/", ".", "/foo/bar"}, + {"/foo/bar/", "./a", "/foo/bar/a"}, + {"/foo/bar/", "..", "/foo/bar"}, + + {"/foo//bar///", "", "/foo/bar"}, + {"/foo//bar///", "/", "/foo/bar"}, + {"/foo//bar///", ".", "/foo/bar"}, + {"/foo//bar///", "./a", "/foo/bar/a"}, + {"/foo//bar///", "..", "/foo/bar"}, + + {"/x/y/z", "ab/c\x00d/ef", ""}, + + {".", "", "."}, + {".", "/", "."}, + {".", ".", "."}, + {".", "./a", "a"}, + {".", "..", "."}, + {".", "..", "."}, + {".", "../", "."}, + {".", "../.", "."}, + {".", "../a", "a"}, + {".", "../..", "."}, + {".", "../bar/a", "bar/a"}, + {".", "../baz/a", "baz/a"}, + {".", "...", "..."}, + {".", ".../a", ".../a"}, + {".", ".../..", "."}, + {".", "a", "a"}, + {".", "a/./b", "a/b"}, + {".", "a/../../b", "b"}, + {".", "a/../b", "b"}, + {".", "a/b", "a/b"}, + {".", "a/b/c/../../d", "a/d"}, + {".", "a/b/c/../../../d", "d"}, + {".", "a/b/c/../../../../d", "d"}, + {".", "a/b/c/d", "a/b/c/d"}, + + {"", "", "."}, + {"", "/", "."}, + {"", ".", "."}, + {"", "./a", "a"}, + {"", "..", "."}, + } + + for _, tc := range testCases { + d := Dir(filepath.FromSlash(tc.dir)) + if got := filepath.ToSlash(d.resolve(tc.name)); got != tc.want { + t.Errorf("dir=%q, name=%q: got %q, want %q", tc.dir, tc.name, got, tc.want) + } + } +} + +func TestWalk(t *testing.T) { + type walkStep struct { + name, frag string + final bool + } + + testCases := []struct { + dir string + want []walkStep + }{ + {"", []walkStep{ + {"", "", true}, + }}, + {"/", []walkStep{ + {"", "", true}, + }}, + {"/a", []walkStep{ + {"", "a", true}, + }}, + {"/a/", []walkStep{ + {"", "a", true}, + }}, + {"/a/b", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/c", []walkStep{ + {"", "a", false}, + {"a", "b", false}, + {"b", "c", true}, + }}, + // The following test case is the one mentioned explicitly + // in the method description. + {"/foo/bar/x", []walkStep{ + {"", "foo", false}, + {"foo", "bar", false}, + {"bar", "x", true}, + }}, + } + + ctx := context.Background() + + for _, tc := range testCases { + fs := NewMemFS().(*memFS) + + parts := strings.Split(tc.dir, "/") + for p := 2; p < len(parts); p++ { + d := strings.Join(parts[:p], "/") + if err := fs.Mkdir(ctx, d, 0666); err != nil { + t.Errorf("tc.dir=%q: mkdir: %q: %v", tc.dir, d, err) + } + } + + i, prevFrag := 0, "" + err := fs.walk("test", tc.dir, func(dir *memFSNode, frag string, final bool) error { + got := walkStep{ + name: prevFrag, + frag: frag, + final: final, + } + want := tc.want[i] + + if got != want { + return fmt.Errorf("got %+v, want %+v", got, want) + } + i, prevFrag = i+1, frag + return nil + }) + if err != nil { + t.Errorf("tc.dir=%q: %v", tc.dir, err) + } + } +} + +// find appends to ss the names of the named file and its children. It is +// analogous to the Unix find command. +// +// The returned strings are not guaranteed to be in any particular order. +func find(ctx context.Context, ss []string, fs FileSystem, name string) ([]string, error) { + stat, err := fs.Stat(ctx, name) + if err != nil { + return nil, err + } + ss = append(ss, name) + if stat.IsDir() { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + return nil, err + } + for _, c := range children { + ss, err = find(ctx, ss, fs, path.Join(name, c.Name())) + if err != nil { + return nil, err + } + } + } + return ss, nil +} + +func testFS(t *testing.T, fs FileSystem) { + errStr := func(err error) string { + switch { + case os.IsExist(err): + return "errExist" + case os.IsNotExist(err): + return "errNotExist" + case err != nil: + return "err" + } + return "ok" + } + + // The non-"find" non-"stat" test cases should change the file system state. The + // indentation of the "find"s and "stat"s helps distinguish such test cases. + testCases := []string{ + " stat / want dir", + " stat /a want errNotExist", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + "create /a A want ok", + " stat /a want 1", + "create /d/e EEE want errNotExist", + "mk-dir /a want errExist", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + " stat /d want dir", + "create /d/e EEE want ok", + " stat /d/e want 3", + " find / /a /d /d/e", + "create /d/f FFFF want ok", + "create /d/g GGGGGGG want ok", + "mk-dir /d/m want ok", + "mk-dir /d/m want errExist", + "create /d/m/p PPPPP want ok", + " stat /d/e want 3", + " stat /d/f want 4", + " stat /d/g want 7", + " stat /d/h want errNotExist", + " stat /d/m want dir", + " stat /d/m/p want 5", + " find / /a /d /d/e /d/f /d/g /d/m /d/m/p", + "rm-all /d want ok", + " stat /a want 1", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + " stat /d/f want errNotExist", + " stat /d/g want errNotExist", + " stat /d/m want errNotExist", + " stat /d/m/p want errNotExist", + " find / /a", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + "create /d/f FFFF want ok", + "rm-all /d/f want ok", + "mk-dir /d/m want ok", + "rm-all /z want ok", + "rm-all / want err", + "create /b BB want ok", + " stat / want dir", + " stat /a want 1", + " stat /b want 2", + " stat /c want errNotExist", + " stat /d want dir", + " stat /d/m want dir", + " find / /a /b /d /d/m", + "move__ o=F /b /c want ok", + " stat /b want errNotExist", + " stat /c want 2", + " stat /d/m want dir", + " stat /d/n want errNotExist", + " find / /a /c /d /d/m", + "move__ o=F /d/m /d/n want ok", + "create /d/n/q QQQQ want ok", + " stat /d/m want errNotExist", + " stat /d/n want dir", + " stat /d/n/q want 4", + "move__ o=F /d /d/n/z want err", + "move__ o=T /c /d/n/q want ok", + " stat /c want errNotExist", + " stat /d/n/q want 2", + " find / /a /d /d/n /d/n/q", + "create /d/n/r RRRRR want ok", + "mk-dir /u want ok", + "mk-dir /u/v want ok", + "move__ o=F /d/n /u want errExist", + "create /t TTTTTT want ok", + "move__ o=F /d/n /t want errExist", + "rm-all /t want ok", + "move__ o=F /d/n /t want ok", + " stat /d want dir", + " stat /d/n want errNotExist", + " stat /d/n/r want errNotExist", + " stat /t want dir", + " stat /t/q want 2", + " stat /t/r want 5", + " find / /a /d /t /t/q /t/r /u /u/v", + "move__ o=F /t / want errExist", + "move__ o=T /t /u/v want ok", + " stat /u/v/r want 5", + "move__ o=F / /z want err", + " find / /a /d /u /u/v /u/v/q /u/v/r", + " stat /a want 1", + " stat /b want errNotExist", + " stat /c want errNotExist", + " stat /u/v/r want 5", + "copy__ o=F d=0 /a /b want ok", + "copy__ o=T d=0 /a /c want ok", + " stat /a want 1", + " stat /b want 1", + " stat /c want 1", + " stat /u/v/r want 5", + "copy__ o=F d=0 /u/v/r /b want errExist", + " stat /b want 1", + "copy__ o=T d=0 /u/v/r /b want ok", + " stat /a want 1", + " stat /b want 5", + " stat /u/v/r want 5", + "rm-all /a want ok", + "rm-all /b want ok", + "mk-dir /u/v/w want ok", + "create /u/v/w/s SSSSSSSS want ok", + " stat /d want dir", + " stat /d/x want errNotExist", + " stat /d/y want errNotExist", + " stat /u/v/r want 5", + " stat /u/v/w/s want 8", + " find / /c /d /u /u/v /u/v/q /u/v/r /u/v/w /u/v/w/s", + "copy__ o=T d=0 /u/v /d/x want ok", + "copy__ o=T d=∞ /u/v /d/y want ok", + "rm-all /u want ok", + " stat /d/x want dir", + " stat /d/x/q want errNotExist", + " stat /d/x/r want errNotExist", + " stat /d/x/w want errNotExist", + " stat /d/x/w/s want errNotExist", + " stat /d/y want dir", + " stat /d/y/q want 2", + " stat /d/y/r want 5", + " stat /d/y/w want dir", + " stat /d/y/w/s want 8", + " stat /u want errNotExist", + " find / /c /d /d/x /d/y /d/y/q /d/y/r /d/y/w /d/y/w/s", + "copy__ o=F d=∞ /d/y /d/x want errExist", + } + + ctx := context.Background() + + for i, tc := range testCases { + tc = strings.TrimSpace(tc) + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create": + parts := strings.Split(arg, " ") + if len(parts) != 4 || parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid write", i, tc) + } + f, opErr := fs.OpenFile(ctx, parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if got := errStr(opErr); got != parts[3] { + t.Fatalf("test case #%d %q: OpenFile: got %q (%v), want %q", i, tc, got, opErr, parts[3]) + } + if f != nil { + if _, err := f.Write([]byte(parts[1])); err != nil { + t.Fatalf("test case #%d %q: Write: %v", i, tc, err) + } + if err := f.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + } + + case "find": + got, err := find(ctx, nil, fs, "/") + if err != nil { + t.Fatalf("test case #%d %q: find: %v", i, tc, err) + } + sort.Strings(got) + want := strings.Split(arg, " ") + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %s\nwant %s", i, tc, got, want) + } + + case "copy__", "mk-dir", "move__", "rm-all", "stat": + nParts := 3 + switch op { + case "copy__": + nParts = 6 + case "move__": + nParts = 5 + } + parts := strings.Split(arg, " ") + if len(parts) != nParts { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + + got, opErr := "", error(nil) + switch op { + case "copy__": + depth := 0 + if parts[1] == "d=∞" { + depth = infiniteDepth + } + _, opErr = copyFiles(ctx, fs, parts[2], parts[3], parts[0] == "o=T", depth, 0) + case "mk-dir": + opErr = fs.Mkdir(ctx, parts[0], 0777) + case "move__": + _, opErr = moveFiles(ctx, fs, parts[1], parts[2], parts[0] == "o=T") + case "rm-all": + opErr = fs.RemoveAll(ctx, parts[0]) + case "stat": + var stat os.FileInfo + fileName := parts[0] + if stat, opErr = fs.Stat(ctx, fileName); opErr == nil { + if stat.IsDir() { + got = "dir" + } else { + got = strconv.Itoa(int(stat.Size())) + } + + if fileName == "/" { + // For a Dir FileSystem, the virtual file system root maps to a + // real file system name like "/tmp/webdav-test012345", which does + // not end with "/". We skip such cases. + } else if statName := stat.Name(); path.Base(fileName) != statName { + t.Fatalf("test case #%d %q: file name %q inconsistent with stat name %q", + i, tc, fileName, statName) + } + } + } + if got == "" { + got = errStr(opErr) + } + + if parts[len(parts)-2] != "want" { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + if want := parts[len(parts)-1]; got != want { + t.Fatalf("test case #%d %q: got %q (%v), want %q", i, tc, got, opErr, want) + } + } + } +} + +func TestDir(t *testing.T) { + switch runtime.GOOS { + case "nacl": + t.Skip("see golang.org/issue/12004") + case "plan9": + t.Skip("see golang.org/issue/11453") + } + + td, err := ioutil.TempDir("", "webdav-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + testFS(t, Dir(td)) +} + +func TestMemFS(t *testing.T) { + testFS(t, NewMemFS()) +} + +func TestMemFSRoot(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + for i := 0; i < 5; i++ { + stat, err := fs.Stat(ctx, "/") + if err != nil { + t.Fatalf("i=%d: Stat: %v", i, err) + } + if !stat.IsDir() { + t.Fatalf("i=%d: Stat.IsDir is false, want true", i) + } + + f, err := fs.OpenFile(ctx, "/", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("i=%d: OpenFile: %v", i, err) + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + t.Fatalf("i=%d: Readdir: %v", i, err) + } + if len(children) != i { + t.Fatalf("i=%d: got %d children, want %d", i, len(children), i) + } + + if _, err := f.Write(make([]byte, 1)); err == nil { + t.Fatalf("i=%d: Write: got nil error, want non-nil", i) + } + + if err := fs.Mkdir(ctx, fmt.Sprintf("/dir%d", i), 0777); err != nil { + t.Fatalf("i=%d: Mkdir: %v", i, err) + } + } +} + +func TestMemFileReaddir(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + if err := fs.Mkdir(ctx, "/foo", 0777); err != nil { + t.Fatalf("Mkdir: %v", err) + } + readdir := func(count int) ([]os.FileInfo, error) { + f, err := fs.OpenFile(ctx, "/foo", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + return f.Readdir(count) + } + if got, err := readdir(-1); len(got) != 0 || err != nil { + t.Fatalf("readdir(-1): got %d fileInfos with err=%v, want 0, ", len(got), err) + } + if got, err := readdir(+1); len(got) != 0 || err != io.EOF { + t.Fatalf("readdir(+1): got %d fileInfos with err=%v, want 0, EOF", len(got), err) + } +} + +func TestMemFile(t *testing.T) { + testCases := []string{ + "wantData ", + "wantSize 0", + "write abc", + "wantData abc", + "write de", + "wantData abcde", + "wantSize 5", + "write 5*x", + "write 4*y+2*z", + "write 3*st", + "wantData abcdexxxxxyyyyzzststst", + "wantSize 22", + "seek set 4 want 4", + "write EFG", + "wantData abcdEFGxxxyyyyzzststst", + "wantSize 22", + "seek set 2 want 2", + "read cdEF", + "read Gx", + "seek cur 0 want 8", + "seek cur 2 want 10", + "seek cur -1 want 9", + "write J", + "wantData abcdEFGxxJyyyyzzststst", + "wantSize 22", + "seek cur -4 want 6", + "write ghijk", + "wantData abcdEFghijkyyyzzststst", + "wantSize 22", + "read yyyz", + "seek cur 0 want 15", + "write ", + "seek cur 0 want 15", + "read ", + "seek cur 0 want 15", + "seek end -3 want 19", + "write ZZ", + "wantData abcdEFghijkyyyzzstsZZt", + "wantSize 22", + "write 4*A", + "wantData abcdEFghijkyyyzzstsZZAAAA", + "wantSize 25", + "seek end 0 want 25", + "seek end -5 want 20", + "read Z+4*A", + "write 5*B", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB", + "wantSize 30", + "seek end 10 want 40", + "write C", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C", + "wantSize 41", + "write D", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD", + "wantSize 42", + "seek set 43 want 43", + "write E", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E", + "wantSize 44", + "seek set 0 want 0", + "write 5*123456789_", + "wantData 123456789_123456789_123456789_123456789_123456789_", + "wantSize 50", + "seek cur 0 want 50", + "seek cur -99 want err", + } + + ctx := context.Background() + + const filename = "/foo" + fs := NewMemFS() + f, err := fs.OpenFile(ctx, filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + // Expand an arg like "3*a+2*b" to "aaabb". + parts := strings.Split(arg, "+") + for j, part := range parts { + if k := strings.IndexByte(part, '*'); k >= 0 { + repeatCount, repeatStr := part[:k], part[k+1:] + n, err := strconv.Atoi(repeatCount) + if err != nil { + t.Fatalf("test case #%d %q: invalid repeat count %q", i, tc, repeatCount) + } + parts[j] = strings.Repeat(repeatStr, n) + } + } + arg = strings.Join(parts, "") + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "read": + buf := make([]byte, len(arg)) + if _, err := io.ReadFull(f, buf); err != nil { + t.Fatalf("test case #%d %q: ReadFull: %v", i, tc, err) + } + if got := string(buf); got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + + case "seek": + parts := strings.Split(arg, " ") + if len(parts) != 4 { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + + whence := 0 + switch parts[0] { + default: + t.Fatalf("test case #%d %q: invalid seek whence", i, tc) + case "set": + whence = os.SEEK_SET + case "cur": + whence = os.SEEK_CUR + case "end": + whence = os.SEEK_END + } + offset, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid offset %q", i, tc, parts[1]) + } + + if parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + if parts[3] == "err" { + _, err := f.Seek(int64(offset), whence) + if err == nil { + t.Fatalf("test case #%d %q: Seek returned nil error, want non-nil", i, tc) + } + } else { + got, err := f.Seek(int64(offset), whence) + if err != nil { + t.Fatalf("test case #%d %q: Seek: %v", i, tc, err) + } + want, err := strconv.Atoi(parts[3]) + if err != nil { + t.Fatalf("test case #%d %q: invalid want %q", i, tc, parts[3]) + } + if got != int64(want) { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + + case "write": + n, err := f.Write([]byte(arg)) + if err != nil { + t.Fatalf("test case #%d %q: write: %v", i, tc, err) + } + if n != len(arg) { + t.Fatalf("test case #%d %q: write returned %d bytes, want %d", i, tc, n, len(arg)) + } + + case "wantData": + g, err := fs.OpenFile(ctx, filename, os.O_RDONLY, 0666) + if err != nil { + t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err) + } + gotBytes, err := ioutil.ReadAll(g) + if err != nil { + t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err) + } + for i, c := range gotBytes { + if c == '\x00' { + gotBytes[i] = '.' + } + } + got := string(gotBytes) + if got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + if err := g.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + + case "wantSize": + n, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg) + } + fi, err := fs.Stat(ctx, filename) + if err != nil { + t.Fatalf("test case #%d %q: Stat: %v", i, tc, err) + } + if got, want := fi.Size(), int64(n); got != want { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + } +} + +// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a +// memFile doesn't allocate a new buffer for each of those N times. Otherwise, +// calling io.Copy(aMemFile, src) is likely to have quadratic complexity. +func TestMemFileWriteAllocs(t *testing.T) { + if runtime.Compiler == "gccgo" { + t.Skip("gccgo allocates here") + } + ctx := context.Background() + fs := NewMemFS() + f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + a := testing.AllocsPerRun(100, func() { + f.Write(xxx) + }) + // AllocsPerRun returns an integral value, so we compare the rounded-down + // number to zero. + if a > 0 { + t.Fatalf("%v allocs per run, want 0", a) + } +} + +func BenchmarkMemFileWrite(b *testing.B) { + ctx := context.Background() + fs := NewMemFS() + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + b.Fatalf("OpenFile: %v", err) + } + for j := 0; j < 100; j++ { + f.Write(xxx) + } + if err := f.Close(); err != nil { + b.Fatalf("Close: %v", err) + } + if err := fs.RemoveAll(ctx, "/xxx"); err != nil { + b.Fatalf("RemoveAll: %v", err) + } + } +} + +func TestCopyMoveProps(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + create := func(name string) error { + f, err := fs.OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + _, wErr := f.Write([]byte("contents")) + cErr := f.Close() + if wErr != nil { + return wErr + } + return cErr + } + patch := func(name string, patches ...Proppatch) error { + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) + if err != nil { + return err + } + _, pErr := f.(DeadPropsHolder).Patch(patches) + cErr := f.Close() + if pErr != nil { + return pErr + } + return cErr + } + props := func(name string) (map[xml.Name]Property, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) + if err != nil { + return nil, err + } + m, pErr := f.(DeadPropsHolder).DeadProps() + cErr := f.Close() + if pErr != nil { + return nil, pErr + } + if cErr != nil { + return nil, cErr + } + return m, nil + } + + p0 := Property{ + XMLName: xml.Name{Space: "x:", Local: "boat"}, + InnerXML: []byte("pea-green"), + } + p1 := Property{ + XMLName: xml.Name{Space: "x:", Local: "ring"}, + InnerXML: []byte("1 shilling"), + } + p2 := Property{ + XMLName: xml.Name{Space: "x:", Local: "spoon"}, + InnerXML: []byte("runcible"), + } + p3 := Property{ + XMLName: xml.Name{Space: "x:", Local: "moon"}, + InnerXML: []byte("light"), + } + + if err := create("/src"); err != nil { + t.Fatalf("create /src: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil { + t.Fatalf("patch /src +p0 +p1: %v", err) + } + if _, err := copyFiles(ctx, fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil { + t.Fatalf("copyFiles /src /tmp: %v", err) + } + if _, err := moveFiles(ctx, fs, "/tmp", "/dst", true); err != nil { + t.Fatalf("moveFiles /tmp /dst: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil { + t.Fatalf("patch /src -p0: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p2}}); err != nil { + t.Fatalf("patch /src +p2: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p1}, Remove: true}); err != nil { + t.Fatalf("patch /dst -p1: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p3}}); err != nil { + t.Fatalf("patch /dst +p3: %v", err) + } + + gotSrc, err := props("/src") + if err != nil { + t.Fatalf("props /src: %v", err) + } + wantSrc := map[xml.Name]Property{ + p1.XMLName: p1, + p2.XMLName: p2, + } + if !reflect.DeepEqual(gotSrc, wantSrc) { + t.Fatalf("props /src:\ngot %v\nwant %v", gotSrc, wantSrc) + } + + gotDst, err := props("/dst") + if err != nil { + t.Fatalf("props /dst: %v", err) + } + wantDst := map[xml.Name]Property{ + p0.XMLName: p0, + p3.XMLName: p3, + } + if !reflect.DeepEqual(gotDst, wantDst) { + t.Fatalf("props /dst:\ngot %v\nwant %v", gotDst, wantDst) + } +} + +func TestWalkFS(t *testing.T) { + testCases := []struct { + desc string + buildfs []string + startAt string + depth int + walkFn filepath.WalkFunc + want []string + }{{ + "just root", + []string{}, + "/", + infiniteDepth, + nil, + []string{ + "/", + }, + }, { + "infinite walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + infiniteDepth, + nil, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/d", + "/e", + "/f", + }, + }, { + "infinite walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/a", + infiniteDepth, + nil, + []string{ + "/a", + "/a/b", + "/a/b/c", + "/a/d", + }, + }, { + "depth 1 walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + 1, + nil, + []string{ + "/", + "/a", + "/e", + "/f", + }, + }, { + "depth 1 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 1, + nil, + []string{ + "/a/b", + "/a/b/c", + "/a/b/g", + }, + }, { + "depth 0 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk from file", + []string{ + "mkdir /a", + "touch /a/b", + "touch /a/c", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk with skipped subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + "touch /a/b/z", + }, + "/", + infiniteDepth, + func(path string, info os.FileInfo, err error) error { + if path == "/a/b/g" { + return filepath.SkipDir + } + return nil + }, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/b/z", + }, + }} + ctx := context.Background() + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + var got []string + traceFn := func(path string, info os.FileInfo, err error) error { + if tc.walkFn != nil { + err = tc.walkFn(path, info, err) + if err != nil { + return err + } + } + got = append(got, path) + return nil + } + fi, err := fs.Stat(ctx, tc.startAt) + if err != nil { + t.Fatalf("%s: cannot stat: %v", tc.desc, err) + } + err = walkFS(ctx, fs, tc.depth, tc.startAt, fi, traceFn) + if err != nil { + t.Errorf("%s:\ngot error %v, want nil", tc.desc, err) + continue + } + sort.Strings(got) + sort.Strings(tc.want) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %q\nwant %q", tc.desc, got, tc.want) + continue + } + } +} + +func buildTestFS(buildfs []string) (FileSystem, error) { + // TODO: Could this be merged with the build logic in TestFS? + + ctx := context.Background() + fs := NewMemFS() + for _, b := range buildfs { + op := strings.Split(b, " ") + switch op[0] { + case "mkdir": + err := fs.Mkdir(ctx, op[1], os.ModeDir|0777) + if err != nil { + return nil, err + } + case "touch": + f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + f.Close() + case "write": + f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return nil, err + } + _, err = f.Write([]byte(op[2])) + f.Close() + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown file operation %q", op[0]) + } + } + return fs, nil +} diff --git a/vendor/golang.org/x/net/webdav/if.go b/vendor/golang.org/x/net/webdav/if.go new file mode 100644 index 0000000..416e81c --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +// The If header is covered by Section 10.4. +// http://www.webdav.org/specs/rfc4918.html#HEADER_If + +import ( + "strings" +) + +// ifHeader is a disjunction (OR) of ifLists. +type ifHeader struct { + lists []ifList +} + +// ifList is a conjunction (AND) of Conditions, and an optional resource tag. +type ifList struct { + resourceTag string + conditions []Condition +} + +// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string +// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is +// returned by req.Header.Get("If") for a http.Request req. +func parseIfHeader(httpHeader string) (h ifHeader, ok bool) { + s := strings.TrimSpace(httpHeader) + switch tokenType, _, _ := lex(s); tokenType { + case '(': + return parseNoTagLists(s) + case angleTokenType: + return parseTaggedLists(s) + default: + return ifHeader{}, false + } +} + +func parseNoTagLists(s string) (h ifHeader, ok bool) { + for { + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + } +} + +func parseTaggedLists(s string) (h ifHeader, ok bool) { + resourceTag, n := "", 0 + for first := true; ; first = false { + tokenType, tokenStr, remaining := lex(s) + switch tokenType { + case angleTokenType: + if !first && n == 0 { + return ifHeader{}, false + } + resourceTag, n = tokenStr, 0 + s = remaining + case '(': + n++ + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + l.resourceTag = resourceTag + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + default: + return ifHeader{}, false + } + } +} + +func parseList(s string) (l ifList, remaining string, ok bool) { + tokenType, _, s := lex(s) + if tokenType != '(' { + return ifList{}, "", false + } + for { + tokenType, _, remaining = lex(s) + if tokenType == ')' { + if len(l.conditions) == 0 { + return ifList{}, "", false + } + return l, remaining, true + } + c, remaining, ok := parseCondition(s) + if !ok { + return ifList{}, "", false + } + l.conditions = append(l.conditions, c) + s = remaining + } +} + +func parseCondition(s string) (c Condition, remaining string, ok bool) { + tokenType, tokenStr, s := lex(s) + if tokenType == notTokenType { + c.Not = true + tokenType, tokenStr, s = lex(s) + } + switch tokenType { + case strTokenType, angleTokenType: + c.Token = tokenStr + case squareTokenType: + c.ETag = tokenStr + default: + return Condition{}, "", false + } + return c, s, true +} + +// Single-rune tokens like '(' or ')' have a token type equal to their rune. +// All other tokens have a negative token type. +const ( + errTokenType = rune(-1) + eofTokenType = rune(-2) + strTokenType = rune(-3) + notTokenType = rune(-4) + angleTokenType = rune(-5) + squareTokenType = rune(-6) +) + +func lex(s string) (tokenType rune, tokenStr string, remaining string) { + // The net/textproto Reader that parses the HTTP header will collapse + // Linear White Space that spans multiple "\r\n" lines to a single " ", + // so we don't need to look for '\r' or '\n'. + for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') { + s = s[1:] + } + if len(s) == 0 { + return eofTokenType, "", "" + } + i := 0 +loop: + for ; i < len(s); i++ { + switch s[i] { + case '\t', ' ', '(', ')', '<', '>', '[', ']': + break loop + } + } + + if i != 0 { + tokenStr, remaining = s[:i], s[i:] + if tokenStr == "Not" { + return notTokenType, "", remaining + } + return strTokenType, tokenStr, remaining + } + + j := 0 + switch s[0] { + case '<': + j, tokenType = strings.IndexByte(s, '>'), angleTokenType + case '[': + j, tokenType = strings.IndexByte(s, ']'), squareTokenType + default: + return rune(s[0]), "", s[1:] + } + if j < 0 { + return errTokenType, "", "" + } + return tokenType, s[1:j], s[j+1:] +} diff --git a/vendor/golang.org/x/net/webdav/if_test.go b/vendor/golang.org/x/net/webdav/if_test.go new file mode 100644 index 0000000..aad61a4 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/if_test.go @@ -0,0 +1,322 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseIfHeader(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + want ifHeader + }{{ + "bad: empty", + ``, + ifHeader{}, + }, { + "bad: no parens", + `foobar`, + ifHeader{}, + }, { + "bad: empty list #1", + `()`, + ifHeader{}, + }, { + "bad: empty list #2", + `(a) (b c) () (d)`, + ifHeader{}, + }, { + "bad: no list after resource #1", + ``, + ifHeader{}, + }, { + "bad: no list after resource #2", + ` (a)`, + ifHeader{}, + }, { + "bad: no list after resource #3", + ` (a) (b) `, + ifHeader{}, + }, { + "bad: no-tag-list followed by tagged-list", + `(a) (b) (c)`, + ifHeader{}, + }, { + "bad: unfinished list", + `(a`, + ifHeader{}, + }, { + "bad: unfinished ETag", + `([b`, + ifHeader{}, + }, { + "bad: unfinished Notted list", + `(Not a`, + ifHeader{}, + }, { + "bad: double Not", + `(Not Not a)`, + ifHeader{}, + }, { + "good: one list with a Token", + `(a)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }}, + }, + }, { + "good: one list with an ETag", + `([a])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + ETag: `a`, + }}, + }}, + }, + }, { + "good: one list with three Nots", + `(Not a Not b Not [d])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }, { + Not: true, + Token: `b`, + }, { + Not: true, + ETag: `d`, + }}, + }}, + }, + }, { + "good: two lists", + `(a) (b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Token: `b`, + }}, + }}, + }, + }, { + "good: two Notted lists", + `(Not a) (Not b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `b`, + }}, + }}, + }, + }, { + "section 7.5.1", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/users/f/fielding/index.html`, + conditions: []Condition{{ + Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`, + }}, + }}, + }, + }, { + "section 7.5.2 #1", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #2", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #3", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/member`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 9.9.6", + `() + ()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`, + }}, + }, { + conditions: []Condition{{ + Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`, + }}, + }}, + }, + }, { + "section 9.10.8", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`, + }}, + }}, + }, + }, { + "section 10.4.6", + `( + ["I am an ETag"]) + (["I am another ETag"])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `"I am an ETag"`, + }}, + }, { + conditions: []Condition{{ + ETag: `"I am another ETag"`, + }}, + }}, + }, + }, { + "section 10.4.7", + `(Not + )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`, + }}, + }}, + }, + }, { + "section 10.4.8", + `() + (Not )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `DAV:no-lock`, + }}, + }}, + }, + }, { + "section 10.4.9", + ` + ( + [W/"A weak ETag"]) (["strong ETag"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/resource1`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `W/"A weak ETag"`, + }}, + }, { + resourceTag: `/resource1`, + conditions: []Condition{{ + ETag: `"strong ETag"`, + }}, + }}, + }, + }, { + "section 10.4.10", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/specs/`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }}, + }, + }, { + "section 10.4.11 #1", + ` (["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + ETag: `"4217"`, + }}, + }}, + }, + }, { + "section 10.4.11 #2", + ` (Not ["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + Not: true, + ETag: `"4217"`, + }}, + }}, + }, + }} + + for _, tc := range testCases { + got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1)) + if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok { + t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok) + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want) + continue + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/README b/vendor/golang.org/x/net/webdav/internal/xml/README new file mode 100644 index 0000000..89656f4 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/README @@ -0,0 +1,11 @@ +This is a fork of the encoding/xml package at ca1d6c4, the last commit before +https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name +space behavior" made late in the lead-up to the Go 1.5 release. + +The list of encoding/xml changes is at +https://go.googlesource.com/go/+log/master/src/encoding/xml + +This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is +released. + +See http://golang.org/issue/11841 diff --git a/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go new file mode 100644 index 0000000..a712843 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/atom_test.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import "time" + +var atomValue = &Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Example Feed", + Link: []Link{{Href: "http://example.org/"}}, + Updated: ParseTime("2003-12-13T18:30:02Z"), + Author: Person{Name: "John Doe"}, + Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6", + + Entry: []Entry{ + { + Title: "Atom-Powered Robots Run Amok", + Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}}, + Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a", + Updated: ParseTime("2003-12-13T18:30:02Z"), + Summary: NewText("Some text."), + }, + }, +} + +var atomXml = `` + + `` + + `Example Feed` + + `urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6` + + `` + + `John Doe` + + `` + + `Atom-Powered Robots Run Amok` + + `urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a` + + `` + + `2003-12-13T18:30:02Z` + + `` + + `Some text.` + + `` + + `` + +func ParseTime(str string) time.Time { + t, err := time.Parse(time.RFC3339, str) + if err != nil { + panic(err) + } + return t +} + +func NewText(text string) Text { + return Text{ + Body: text, + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/example_test.go b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go new file mode 100644 index 0000000..21b48de --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/example_test.go @@ -0,0 +1,151 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml_test + +import ( + "encoding/xml" + "fmt" + "os" +) + +func ExampleMarshalIndent() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + output, err := xml.MarshalIndent(v, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + os.Stdout.Write(output) + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +func ExampleEncoder() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + enc := xml.NewEncoder(os.Stdout) + enc.Indent(" ", " ") + if err := enc.Encode(v); err != nil { + fmt.Printf("error: %v\n", err) + } + + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +// This example demonstrates unmarshaling an XML excerpt into a value with +// some preset fields. Note that the Phone field isn't modified and that +// the XML element is ignored. Also, the Groups field is assigned +// considering the element path provided in its tag. +func ExampleUnmarshal() { + type Email struct { + Where string `xml:"where,attr"` + Addr string + } + type Address struct { + City, State string + } + type Result struct { + XMLName xml.Name `xml:"Person"` + Name string `xml:"FullName"` + Phone string + Email []Email + Groups []string `xml:"Group>Value"` + Address + } + v := Result{Name: "none", Phone: "none"} + + data := ` + + Grace R. Emlin + Example Inc. + + gre@example.com + + + gre@work.com + + + Friends + Squash + + Hanga Roa + Easter Island + + ` + err := xml.Unmarshal([]byte(data), &v) + if err != nil { + fmt.Printf("error: %v", err) + return + } + fmt.Printf("XMLName: %#v\n", v.XMLName) + fmt.Printf("Name: %q\n", v.Name) + fmt.Printf("Phone: %q\n", v.Phone) + fmt.Printf("Email: %v\n", v.Email) + fmt.Printf("Groups: %v\n", v.Groups) + fmt.Printf("Address: %v\n", v.Address) + // Output: + // XMLName: xml.Name{Space:"", Local:"Person"} + // Name: "Grace R. Emlin" + // Phone: "none" + // Email: [{home gre@example.com} {work gre@work.com}] + // Groups: [Friends Squash] + // Address: {Hanga Roa Easter Island} +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go new file mode 100644 index 0000000..cb82ec2 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go @@ -0,0 +1,1223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +const ( + // A generic XML header suitable for use with the output of Marshal. + // This is not automatically added to any output of this package, + // it is provided as a convenience. + Header = `` + "\n" +) + +// Marshal returns the XML encoding of v. +// +// Marshal handles an array or slice by marshalling each of the elements. +// Marshal handles a pointer by marshalling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshalling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML +// elements containing the data. +// +// The name for the XML elements is taken from, in order of preference: +// - the tag on the XMLName field, if the data is a struct +// - the value of the XMLName field of type xml.Name +// - the tag of the struct field used to obtain the data +// - the name of the struct field used to obtain the data +// - the name of the marshalled type +// +// The XML element for a struct contains marshalled elements for each of the +// exported fields of the struct, with these exceptions: +// - the XMLName field, described above, is omitted. +// - a field with tag "-" is omitted. +// - a field with tag "name,attr" becomes an attribute with +// the given name in the XML element. +// - a field with tag ",attr" becomes an attribute with the +// field name in the XML element. +// - a field with tag ",chardata" is written as character data, +// not as an XML element. +// - a field with tag ",innerxml" is written verbatim, not subject +// to the usual marshalling procedure. +// - a field with tag ",comment" is written as an XML comment, not +// subject to the usual marshalling procedure. It must not contain +// the "--" string within it. +// - a field with a tag including the "omitempty" option is omitted +// if the field value is empty. The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or +// string of length zero. +// - an anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// If a field uses a tag "a>b>c", then the element c will be nested inside +// parent elements a and b. Fields that appear next to each other that name +// the same parent will be enclosed in one XML element. +// +// See MarshalIndent for an example. +// +// Marshal will return an error if asked to marshal a channel, function, or map. +func Marshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + if err := NewEncoder(&b).Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves into valid XML elements. +// +// MarshalXML encodes the receiver as zero or more XML elements. +// By convention, arrays or slices are typically encoded as a sequence +// of elements, one per entry. +// Using start as the element tag is not required, but doing so +// will enable Unmarshal to match the XML elements to the correct +// struct field. +// One common implementation strategy is to construct a separate +// value with a layout corresponding to the desired XML and then +// to encode it using e.EncodeElement. +// Another common strategy is to use repeated calls to e.EncodeToken +// to generate the XML output one token at a time. +// The sequence of encoded tokens must make up zero or more valid +// XML elements. +type Marshaler interface { + MarshalXML(e *Encoder, start StartElement) error +} + +// MarshalerAttr is the interface implemented by objects that can marshal +// themselves into valid XML attributes. +// +// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. +// Using name as the attribute name is not required, but doing so +// will enable Unmarshal to match the attribute to the correct +// struct field. +// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute +// will be generated in the output. +// MarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type MarshalerAttr interface { + MarshalXMLAttr(name Name) (Attr, error) +} + +// MarshalIndent works like Marshal, but each XML element begins on a new +// indented line that starts with prefix and is followed by one or more +// copies of indent according to the nesting depth. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + var b bytes.Buffer + enc := NewEncoder(&b) + enc.Indent(prefix, indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// An Encoder writes XML data to an output stream. +type Encoder struct { + p printer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{printer{Writer: bufio.NewWriter(w)}} + e.p.encoder = e + return e +} + +// Indent sets the encoder to generate XML in which each element +// begins on a new indented line that starts with prefix and is followed by +// one or more copies of indent according to the nesting depth. +func (enc *Encoder) Indent(prefix, indent string) { + enc.p.prefix = prefix + enc.p.indent = indent +} + +// Encode writes the XML encoding of v to the stream. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// Encode calls Flush before returning. +func (enc *Encoder) Encode(v interface{}) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) + if err != nil { + return err + } + return enc.p.Flush() +} + +// EncodeElement writes the XML encoding of v to the stream, +// using start as the outermost tag in the encoding. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// EncodeElement calls Flush before returning. +func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) + if err != nil { + return err + } + return enc.p.Flush() +} + +var ( + begComment = []byte("") + endProcInst = []byte("?>") + endDirective = []byte(">") +) + +// EncodeToken writes the given XML token to the stream. +// It returns an error if StartElement and EndElement tokens are not +// properly matched. +// +// EncodeToken does not call Flush, because usually it is part of a +// larger operation such as Encode or EncodeElement (or a custom +// Marshaler's MarshalXML invoked during those), and those will call +// Flush when finished. Callers that create an Encoder and then invoke +// EncodeToken directly, without using Encode or EncodeElement, need to +// call Flush when finished to ensure that the XML is written to the +// underlying writer. +// +// EncodeToken allows writing a ProcInst with Target set to "xml" only +// as the first token in the stream. +// +// When encoding a StartElement holding an XML namespace prefix +// declaration for a prefix that is not already declared, contained +// elements (including the StartElement itself) will use the declared +// prefix when encoding names with matching namespace URIs. +func (enc *Encoder) EncodeToken(t Token) error { + + p := &enc.p + switch t := t.(type) { + case StartElement: + if err := p.writeStart(&t); err != nil { + return err + } + case EndElement: + if err := p.writeEnd(t.Name); err != nil { + return err + } + case CharData: + escapeText(p, t, false) + case Comment: + if bytes.Contains(t, endComment) { + return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") + } + p.WriteString("") + return p.cachedWriteError() + case ProcInst: + // First token to be encoded which is also a ProcInst with target of xml + // is the xml declaration. The only ProcInst where target of xml is allowed. + if t.Target == "xml" && p.Buffered() != 0 { + return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") + } + if !isNameString(t.Target) { + return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") + } + if bytes.Contains(t.Inst, endProcInst) { + return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") + } + p.WriteString(" 0 { + p.WriteByte(' ') + p.Write(t.Inst) + } + p.WriteString("?>") + case Directive: + if !isValidDirective(t) { + return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") + } + p.WriteString("") + default: + return fmt.Errorf("xml: EncodeToken of invalid token type") + + } + return p.cachedWriteError() +} + +// isValidDirective reports whether dir is a valid directive text, +// meaning angle brackets are matched, ignoring comments and strings. +func isValidDirective(dir Directive) bool { + var ( + depth int + inquote uint8 + incomment bool + ) + for i, c := range dir { + switch { + case incomment: + if c == '>' { + if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { + incomment = false + } + } + // Just ignore anything in comment + case inquote != 0: + if c == inquote { + inquote = 0 + } + // Just ignore anything within quotes + case c == '\'' || c == '"': + inquote = c + case c == '<': + if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { + incomment = true + } else { + depth++ + } + case c == '>': + if depth == 0 { + return false + } + depth-- + } + } + return depth == 0 && inquote == 0 && !incomment +} + +// Flush flushes any buffered XML to the underlying writer. +// See the EncodeToken documentation for details about when it is necessary. +func (enc *Encoder) Flush() error { + return enc.p.Flush() +} + +type printer struct { + *bufio.Writer + encoder *Encoder + seq int + indent string + prefix string + depth int + indentedIn bool + putNewline bool + defaultNS string + attrNS map[string]string // map prefix -> name space + attrPrefix map[string]string // map name space -> prefix + prefixes []printerPrefix + tags []Name +} + +// printerPrefix holds a namespace undo record. +// When an element is popped, the prefix record +// is set back to the recorded URL. The empty +// prefix records the URL for the default name space. +// +// The start of an element is recorded with an element +// that has mark=true. +type printerPrefix struct { + prefix string + url string + mark bool +} + +func (p *printer) prefixForNS(url string, isAttr bool) string { + // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" + // and must be referred to that way. + // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", + // but users should not be trying to use that one directly - that's our job.) + if url == xmlURL { + return "xml" + } + if !isAttr && url == p.defaultNS { + // We can use the default name space. + return "" + } + return p.attrPrefix[url] +} + +// defineNS pushes any namespace definition found in the given attribute. +// If ignoreNonEmptyDefault is true, an xmlns="nonempty" +// attribute will be ignored. +func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error { + var prefix string + if attr.Name.Local == "xmlns" { + if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL { + return fmt.Errorf("xml: cannot redefine xmlns attribute prefix") + } + } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" { + prefix = attr.Name.Local + if attr.Value == "" { + // Technically, an empty XML namespace is allowed for an attribute. + // From http://www.w3.org/TR/xml-names11/#scoping-defaulting: + // + // The attribute value in a namespace declaration for a prefix may be + // empty. This has the effect, within the scope of the declaration, of removing + // any association of the prefix with a namespace name. + // + // However our namespace prefixes here are used only as hints. There's + // no need to respect the removal of a namespace prefix, so we ignore it. + return nil + } + } else { + // Ignore: it's not a namespace definition + return nil + } + if prefix == "" { + if attr.Value == p.defaultNS { + // No need for redefinition. + return nil + } + if attr.Value != "" && ignoreNonEmptyDefault { + // We have an xmlns="..." value but + // it can't define a name space in this context, + // probably because the element has an empty + // name space. In this case, we just ignore + // the name space declaration. + return nil + } + } else if _, ok := p.attrPrefix[attr.Value]; ok { + // There's already a prefix for the given name space, + // so use that. This prevents us from + // having two prefixes for the same name space + // so attrNS and attrPrefix can remain bijective. + return nil + } + p.pushPrefix(prefix, attr.Value) + return nil +} + +// createNSPrefix creates a name space prefix attribute +// to use for the given name space, defining a new prefix +// if necessary. +// If isAttr is true, the prefix is to be created for an attribute +// prefix, which means that the default name space cannot +// be used. +func (p *printer) createNSPrefix(url string, isAttr bool) { + if _, ok := p.attrPrefix[url]; ok { + // We already have a prefix for the given URL. + return + } + switch { + case !isAttr && url == p.defaultNS: + // We can use the default name space. + return + case url == "": + // The only way we can encode names in the empty + // name space is by using the default name space, + // so we must use that. + if p.defaultNS != "" { + // The default namespace is non-empty, so we + // need to set it to empty. + p.pushPrefix("", "") + } + return + case url == xmlURL: + return + } + // TODO If the URL is an existing prefix, we could + // use it as is. That would enable the + // marshaling of elements that had been unmarshaled + // and with a name space prefix that was not found. + // although technically it would be incorrect. + + // Pick a name. We try to use the final element of the path + // but fall back to _. + prefix := strings.TrimRight(url, "/") + if i := strings.LastIndex(prefix, "/"); i >= 0 { + prefix = prefix[i+1:] + } + if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { + prefix = "_" + } + if strings.HasPrefix(prefix, "xml") { + // xmlanything is reserved. + prefix = "_" + prefix + } + if p.attrNS[prefix] != "" { + // Name is taken. Find a better one. + for p.seq++; ; p.seq++ { + if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { + prefix = id + break + } + } + } + + p.pushPrefix(prefix, url) +} + +// writeNamespaces writes xmlns attributes for all the +// namespace prefixes that have been defined in +// the current element. +func (p *printer) writeNamespaces() { + for i := len(p.prefixes) - 1; i >= 0; i-- { + prefix := p.prefixes[i] + if prefix.mark { + return + } + p.WriteString(" ") + if prefix.prefix == "" { + // Default name space. + p.WriteString(`xmlns="`) + } else { + p.WriteString("xmlns:") + p.WriteString(prefix.prefix) + p.WriteString(`="`) + } + EscapeText(p, []byte(p.nsForPrefix(prefix.prefix))) + p.WriteString(`"`) + } +} + +// pushPrefix pushes a new prefix on the prefix stack +// without checking to see if it is already defined. +func (p *printer) pushPrefix(prefix, url string) { + p.prefixes = append(p.prefixes, printerPrefix{ + prefix: prefix, + url: p.nsForPrefix(prefix), + }) + p.setAttrPrefix(prefix, url) +} + +// nsForPrefix returns the name space for the given +// prefix. Note that this is not valid for the +// empty attribute prefix, which always has an empty +// name space. +func (p *printer) nsForPrefix(prefix string) string { + if prefix == "" { + return p.defaultNS + } + return p.attrNS[prefix] +} + +// markPrefix marks the start of an element on the prefix +// stack. +func (p *printer) markPrefix() { + p.prefixes = append(p.prefixes, printerPrefix{ + mark: true, + }) +} + +// popPrefix pops all defined prefixes for the current +// element. +func (p *printer) popPrefix() { + for len(p.prefixes) > 0 { + prefix := p.prefixes[len(p.prefixes)-1] + p.prefixes = p.prefixes[:len(p.prefixes)-1] + if prefix.mark { + break + } + p.setAttrPrefix(prefix.prefix, prefix.url) + } +} + +// setAttrPrefix sets an attribute name space prefix. +// If url is empty, the attribute is removed. +// If prefix is empty, the default name space is set. +func (p *printer) setAttrPrefix(prefix, url string) { + if prefix == "" { + p.defaultNS = url + return + } + if url == "" { + delete(p.attrPrefix, p.attrNS[prefix]) + delete(p.attrNS, prefix) + return + } + if p.attrPrefix == nil { + // Need to define a new name space. + p.attrPrefix = make(map[string]string) + p.attrNS = make(map[string]string) + } + // Remove any old prefix value. This is OK because we maintain a + // strict one-to-one mapping between prefix and URL (see + // defineNS) + delete(p.attrPrefix, p.attrNS[prefix]) + p.attrPrefix[url] = prefix + p.attrNS[prefix] = url +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +) + +// marshalValue writes one or more XML elements representing val. +// If val was obtained from a struct field, finfo must have its details. +func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { + if startTemplate != nil && startTemplate.Name.Local == "" { + return fmt.Errorf("xml: EncodeElement of StartElement with missing name") + } + + if !val.IsValid() { + return nil + } + if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { + return nil + } + + // Drill into interfaces and pointers. + // This can turn into an infinite loop given a cyclic chain, + // but it matches the Go 1 behavior. + for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + kind := val.Kind() + typ := val.Type() + + // Check for marshaler. + if val.CanInterface() && typ.Implements(marshalerType) { + return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerType) { + return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Check for text marshaler. + if val.CanInterface() && typ.Implements(textMarshalerType) { + return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Slices and arrays iterate over the elements. They do not have an enclosing tag. + if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { + for i, n := 0, val.Len(); i < n; i++ { + if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { + return err + } + } + return nil + } + + tinfo, err := getTypeInfo(typ) + if err != nil { + return err + } + + // Create start element. + // Precedence for the XML element name is: + // 0. startTemplate + // 1. XMLName field in underlying struct; + // 2. field name/tag in the struct field; and + // 3. type name + var start StartElement + + // explicitNS records whether the element's name space has been + // explicitly set (for example an XMLName field). + explicitNS := false + + if startTemplate != nil { + start.Name = startTemplate.Name + explicitNS = true + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if tinfo.xmlname != nil { + xmlname := tinfo.xmlname + if xmlname.name != "" { + start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name + } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { + start.Name = v + } + explicitNS = true + } + if start.Name.Local == "" && finfo != nil { + start.Name.Local = finfo.name + if finfo.xmlns != "" { + start.Name.Space = finfo.xmlns + explicitNS = true + } + } + if start.Name.Local == "" { + name := typ.Name() + if name == "" { + return &UnsupportedTypeError{typ} + } + start.Name.Local = name + } + + // defaultNS records the default name space as set by a xmlns="..." + // attribute. We don't set p.defaultNS because we want to let + // the attribute writing code (in p.defineNS) be solely responsible + // for maintaining that. + defaultNS := p.defaultNS + + // Attributes + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr == 0 { + continue + } + attr, err := p.fieldAttr(finfo, val) + if err != nil { + return err + } + if attr.Name.Local == "" { + continue + } + start.Attr = append(start.Attr, attr) + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + defaultNS = attr.Value + } + } + if !explicitNS { + // Historic behavior: elements use the default name space + // they are contained in by default. + start.Name.Space = defaultNS + } + // Historic behaviour: an element that's in a namespace sets + // the default namespace for all elements contained within it. + start.setDefaultNamespace() + + if err := p.writeStart(&start); err != nil { + return err + } + + if val.Kind() == reflect.Struct { + err = p.marshalStruct(tinfo, val) + } else { + s, b, err1 := p.marshalSimple(typ, val) + if err1 != nil { + err = err1 + } else if b != nil { + EscapeText(p, b) + } else { + p.EscapeString(s) + } + } + if err != nil { + return err + } + + if err := p.writeEnd(start.Name); err != nil { + return err + } + + return p.cachedWriteError() +} + +// fieldAttr returns the attribute of the given field. +// If the returned attribute has an empty Name.Local, +// it should not be used. +// The given value holds the value containing the field. +func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) { + fv := finfo.value(val) + name := Name{Space: finfo.xmlns, Local: finfo.name} + if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { + return Attr{}, nil + } + if fv.Kind() == reflect.Interface && fv.IsNil() { + return Attr{}, nil + } + if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { + attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + } + if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { + text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + } + // Dereference or skip nil pointer, interface values. + switch fv.Kind() { + case reflect.Ptr, reflect.Interface: + if fv.IsNil() { + return Attr{}, nil + } + fv = fv.Elem() + } + s, b, err := p.marshalSimple(fv.Type(), fv) + if err != nil { + return Attr{}, err + } + if b != nil { + s = string(b) + } + return Attr{name, s}, nil +} + +// defaultStart returns the default start element to use, +// given the reflect type, field info, and start template. +func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { + var start StartElement + // Precedence for the XML element name is as above, + // except that we do not look inside structs for the first field. + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if finfo != nil && finfo.name != "" { + start.Name.Local = finfo.name + start.Name.Space = finfo.xmlns + } else if typ.Name() != "" { + start.Name.Local = typ.Name() + } else { + // Must be a pointer to a named type, + // since it has the Marshaler methods. + start.Name.Local = typ.Elem().Name() + } + // Historic behaviour: elements use the name space of + // the element they are contained in by default. + if start.Name.Space == "" { + start.Name.Space = p.defaultNS + } + start.setDefaultNamespace() + return start +} + +// marshalInterface marshals a Marshaler interface value. +func (p *printer) marshalInterface(val Marshaler, start StartElement) error { + // Push a marker onto the tag stack so that MarshalXML + // cannot close the XML tags that it did not open. + p.tags = append(p.tags, Name{}) + n := len(p.tags) + + err := val.MarshalXML(p.encoder, start) + if err != nil { + return err + } + + // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. + if len(p.tags) > n { + return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) + } + p.tags = p.tags[:n-1] + return nil +} + +// marshalTextInterface marshals a TextMarshaler interface value. +func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { + if err := p.writeStart(&start); err != nil { + return err + } + text, err := val.MarshalText() + if err != nil { + return err + } + EscapeText(p, text) + return p.writeEnd(start.Name) +} + +// writeStart writes the given start element. +func (p *printer) writeStart(start *StartElement) error { + if start.Name.Local == "" { + return fmt.Errorf("xml: start tag with no name") + } + + p.tags = append(p.tags, start.Name) + p.markPrefix() + // Define any name spaces explicitly declared in the attributes. + // We do this as a separate pass so that explicitly declared prefixes + // will take precedence over implicitly declared prefixes + // regardless of the order of the attributes. + ignoreNonEmptyDefault := start.Name.Space == "" + for _, attr := range start.Attr { + if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil { + return err + } + } + // Define any new name spaces implied by the attributes. + for _, attr := range start.Attr { + name := attr.Name + // From http://www.w3.org/TR/xml-names11/#defaulting + // "Default namespace declarations do not apply directly + // to attribute names; the interpretation of unprefixed + // attributes is determined by the element on which they + // appear." + // This means we don't need to create a new namespace + // when an attribute name space is empty. + if name.Space != "" && !name.isNamespace() { + p.createNSPrefix(name.Space, true) + } + } + p.createNSPrefix(start.Name.Space, false) + + p.writeIndent(1) + p.WriteByte('<') + p.writeName(start.Name, false) + p.writeNamespaces() + for _, attr := range start.Attr { + name := attr.Name + if name.Local == "" || name.isNamespace() { + // Namespaces have already been written by writeNamespaces above. + continue + } + p.WriteByte(' ') + p.writeName(name, true) + p.WriteString(`="`) + p.EscapeString(attr.Value) + p.WriteByte('"') + } + p.WriteByte('>') + return nil +} + +// writeName writes the given name. It assumes +// that p.createNSPrefix(name) has already been called. +func (p *printer) writeName(name Name, isAttr bool) { + if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" { + p.WriteString(prefix) + p.WriteByte(':') + } + p.WriteString(name.Local) +} + +func (p *printer) writeEnd(name Name) error { + if name.Local == "" { + return fmt.Errorf("xml: end tag with no name") + } + if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { + return fmt.Errorf("xml: end tag without start tag", name.Local) + } + if top := p.tags[len(p.tags)-1]; top != name { + if top.Local != name.Local { + return fmt.Errorf("xml: end tag does not match start tag <%s>", name.Local, top.Local) + } + return fmt.Errorf("xml: end tag in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) + } + p.tags = p.tags[:len(p.tags)-1] + + p.writeIndent(-1) + p.WriteByte('<') + p.WriteByte('/') + p.writeName(name, false) + p.WriteByte('>') + p.popPrefix() + return nil +} + +func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil, nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil + case reflect.String: + return val.String(), nil, nil + case reflect.Bool: + return strconv.FormatBool(val.Bool()), nil, nil + case reflect.Array: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // [...]byte + var bytes []byte + if val.CanAddr() { + bytes = val.Slice(0, val.Len()).Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return "", bytes, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // []byte + return "", val.Bytes(), nil + } + return "", nil, &UnsupportedTypeError{typ} +} + +var ddBytes = []byte("--") + +func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { + s := parentStack{p: p} + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr != 0 { + continue + } + vf := finfo.value(val) + + // Dereference or skip nil pointer, interface values. + switch vf.Kind() { + case reflect.Ptr, reflect.Interface: + if !vf.IsNil() { + vf = vf.Elem() + } + } + + switch finfo.flags & fMode { + case fCharData: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { + data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + } + var scratch [64]byte + switch vf.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + case reflect.Float32, reflect.Float64: + Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + case reflect.Bool: + Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + case reflect.String: + if err := EscapeText(p, []byte(vf.String())); err != nil { + return err + } + case reflect.Slice: + if elem, ok := vf.Interface().([]byte); ok { + if err := EscapeText(p, elem); err != nil { + return err + } + } + } + continue + + case fComment: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + k := vf.Kind() + if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { + return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) + } + if vf.Len() == 0 { + continue + } + p.writeIndent(0) + p.WriteString("" is invalid grammar. Make it "- -->" + p.WriteByte(' ') + } + p.WriteString("-->") + continue + + case fInnerXml: + iface := vf.Interface() + switch raw := iface.(type) { + case []byte: + p.Write(raw) + continue + case string: + p.WriteString(raw) + continue + } + + case fElement, fElement | fAny: + if err := s.setParents(finfo, vf); err != nil { + return err + } + } + if err := p.marshalValue(vf, finfo, nil); err != nil { + return err + } + } + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + return p.cachedWriteError() +} + +var noField fieldInfo + +// return the bufio Writer's cached write error +func (p *printer) cachedWriteError() error { + _, err := p.Write(nil) + return err +} + +func (p *printer) writeIndent(depthDelta int) { + if len(p.prefix) == 0 && len(p.indent) == 0 { + return + } + if depthDelta < 0 { + p.depth-- + if p.indentedIn { + p.indentedIn = false + return + } + p.indentedIn = false + } + if p.putNewline { + p.WriteByte('\n') + } else { + p.putNewline = true + } + if len(p.prefix) > 0 { + p.WriteString(p.prefix) + } + if len(p.indent) > 0 { + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + } + if depthDelta > 0 { + p.depth++ + p.indentedIn = true + } +} + +type parentStack struct { + p *printer + xmlns string + parents []string +} + +// setParents sets the stack of current parents to those found in finfo. +// It only writes the start elements if vf holds a non-nil value. +// If finfo is &noField, it pops all elements. +func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error { + xmlns := s.p.defaultNS + if finfo.xmlns != "" { + xmlns = finfo.xmlns + } + commonParents := 0 + if xmlns == s.xmlns { + for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ { + if finfo.parents[commonParents] != s.parents[commonParents] { + break + } + } + } + // Pop off any parents that aren't in common with the previous field. + for i := len(s.parents) - 1; i >= commonParents; i-- { + if err := s.p.writeEnd(Name{ + Space: s.xmlns, + Local: s.parents[i], + }); err != nil { + return err + } + } + s.parents = finfo.parents + s.xmlns = xmlns + if commonParents >= len(s.parents) { + // No new elements to push. + return nil + } + if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() { + // The element is nil, so no need for the start elements. + s.parents = s.parents[:commonParents] + return nil + } + // Push any new parents required. + for _, name := range s.parents[commonParents:] { + start := &StartElement{ + Name: Name{ + Space: s.xmlns, + Local: name, + }, + } + // Set the default name space for parent elements + // to match what we do with other elements. + if s.xmlns != s.p.defaultNS { + start.setDefaultNamespace() + } + if err := s.p.writeStart(start); err != nil { + return err + } + } + return nil +} + +// A MarshalXMLError is returned when Marshal encounters a type +// that cannot be converted into XML. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "xml: unsupported type: " + e.Type.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go new file mode 100644 index 0000000..226cfd0 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal_test.go @@ -0,0 +1,1939 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +type DriveType int + +const ( + HyperDrive DriveType = iota + ImprobabilityDrive +) + +type Passenger struct { + Name []string `xml:"name"` + Weight float32 `xml:"weight"` +} + +type Ship struct { + XMLName struct{} `xml:"spaceship"` + + Name string `xml:"name,attr"` + Pilot string `xml:"pilot,attr"` + Drive DriveType `xml:"drive"` + Age uint `xml:"age"` + Passenger []*Passenger `xml:"passenger"` + secret string +} + +type NamedType string + +type Port struct { + XMLName struct{} `xml:"port"` + Type string `xml:"type,attr,omitempty"` + Comment string `xml:",comment"` + Number string `xml:",chardata"` +} + +type Domain struct { + XMLName struct{} `xml:"domain"` + Country string `xml:",attr,omitempty"` + Name []byte `xml:",chardata"` + Comment []byte `xml:",comment"` +} + +type Book struct { + XMLName struct{} `xml:"book"` + Title string `xml:",chardata"` +} + +type Event struct { + XMLName struct{} `xml:"event"` + Year int `xml:",chardata"` +} + +type Movie struct { + XMLName struct{} `xml:"movie"` + Length uint `xml:",chardata"` +} + +type Pi struct { + XMLName struct{} `xml:"pi"` + Approximation float32 `xml:",chardata"` +} + +type Universe struct { + XMLName struct{} `xml:"universe"` + Visible float64 `xml:",chardata"` +} + +type Particle struct { + XMLName struct{} `xml:"particle"` + HasMass bool `xml:",chardata"` +} + +type Departure struct { + XMLName struct{} `xml:"departure"` + When time.Time `xml:",chardata"` +} + +type SecretAgent struct { + XMLName struct{} `xml:"agent"` + Handle string `xml:"handle,attr"` + Identity string + Obfuscate string `xml:",innerxml"` +} + +type NestedItems struct { + XMLName struct{} `xml:"result"` + Items []string `xml:">item"` + Item1 []string `xml:"Items>item1"` +} + +type NestedOrder struct { + XMLName struct{} `xml:"result"` + Field1 string `xml:"parent>c"` + Field2 string `xml:"parent>b"` + Field3 string `xml:"parent>a"` +} + +type MixedNested struct { + XMLName struct{} `xml:"result"` + A string `xml:"parent1>a"` + B string `xml:"b"` + C string `xml:"parent1>parent2>c"` + D string `xml:"parent1>d"` +} + +type NilTest struct { + A interface{} `xml:"parent1>parent2>a"` + B interface{} `xml:"parent1>b"` + C interface{} `xml:"parent1>parent2>c"` +} + +type Service struct { + XMLName struct{} `xml:"service"` + Domain *Domain `xml:"host>domain"` + Port *Port `xml:"host>port"` + Extra1 interface{} + Extra2 interface{} `xml:"host>extra2"` +} + +var nilStruct *Ship + +type EmbedA struct { + EmbedC + EmbedB EmbedB + FieldA string +} + +type EmbedB struct { + FieldB string + *EmbedC +} + +type EmbedC struct { + FieldA1 string `xml:"FieldA>A1"` + FieldA2 string `xml:"FieldA>A2"` + FieldB string + FieldC string +} + +type NameCasing struct { + XMLName struct{} `xml:"casing"` + Xy string + XY string + XyA string `xml:"Xy,attr"` + XYA string `xml:"XY,attr"` +} + +type NamePrecedence struct { + XMLName Name `xml:"Parent"` + FromTag XMLNameWithoutTag `xml:"InTag"` + FromNameVal XMLNameWithoutTag + FromNameTag XMLNameWithTag + InFieldName string +} + +type XMLNameWithTag struct { + XMLName Name `xml:"InXMLNameTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithNSTag struct { + XMLName Name `xml:"ns InXMLNameWithNSTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithoutTag struct { + XMLName Name + Value string `xml:",chardata"` +} + +type NameInField struct { + Foo Name `xml:"ns foo"` +} + +type AttrTest struct { + Int int `xml:",attr"` + Named int `xml:"int,attr"` + Float float64 `xml:",attr"` + Uint8 uint8 `xml:",attr"` + Bool bool `xml:",attr"` + Str string `xml:",attr"` + Bytes []byte `xml:",attr"` +} + +type OmitAttrTest struct { + Int int `xml:",attr,omitempty"` + Named int `xml:"int,attr,omitempty"` + Float float64 `xml:",attr,omitempty"` + Uint8 uint8 `xml:",attr,omitempty"` + Bool bool `xml:",attr,omitempty"` + Str string `xml:",attr,omitempty"` + Bytes []byte `xml:",attr,omitempty"` +} + +type OmitFieldTest struct { + Int int `xml:",omitempty"` + Named int `xml:"int,omitempty"` + Float float64 `xml:",omitempty"` + Uint8 uint8 `xml:",omitempty"` + Bool bool `xml:",omitempty"` + Str string `xml:",omitempty"` + Bytes []byte `xml:",omitempty"` + Ptr *PresenceTest `xml:",omitempty"` +} + +type AnyTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField AnyHolder `xml:",any"` +} + +type AnyOmitTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField *AnyHolder `xml:",any,omitempty"` +} + +type AnySliceTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField []AnyHolder `xml:",any"` +} + +type AnyHolder struct { + XMLName Name + XML string `xml:",innerxml"` +} + +type RecurseA struct { + A string + B *RecurseB +} + +type RecurseB struct { + A *RecurseA + B string +} + +type PresenceTest struct { + Exists *struct{} +} + +type IgnoreTest struct { + PublicSecret string `xml:"-"` +} + +type MyBytes []byte + +type Data struct { + Bytes []byte + Attr []byte `xml:",attr"` + Custom MyBytes +} + +type Plain struct { + V interface{} +} + +type MyInt int + +type EmbedInt struct { + MyInt +} + +type Strings struct { + X []string `xml:"A>B,omitempty"` +} + +type PointerFieldsTest struct { + XMLName Name `xml:"dummy"` + Name *string `xml:"name,attr"` + Age *uint `xml:"age,attr"` + Empty *string `xml:"empty,attr"` + Contents *string `xml:",chardata"` +} + +type ChardataEmptyTest struct { + XMLName Name `xml:"test"` + Contents *string `xml:",chardata"` +} + +type MyMarshalerTest struct { +} + +var _ Marshaler = (*MyMarshalerTest)(nil) + +func (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error { + e.EncodeToken(start) + e.EncodeToken(CharData([]byte("hello world"))) + e.EncodeToken(EndElement{start.Name}) + return nil +} + +type MyMarshalerAttrTest struct{} + +var _ MarshalerAttr = (*MyMarshalerAttrTest)(nil) + +func (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MyMarshalerValueAttrTest struct{} + +var _ MarshalerAttr = MyMarshalerValueAttrTest{} + +func (m MyMarshalerValueAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MarshalerStruct struct { + Foo MyMarshalerAttrTest `xml:",attr"` +} + +type MarshalerValueStruct struct { + Foo MyMarshalerValueAttrTest `xml:",attr"` +} + +type InnerStruct struct { + XMLName Name `xml:"testns outer"` +} + +type OuterStruct struct { + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterNamedStruct struct { + InnerStruct + XMLName Name `xml:"outerns test"` + IntAttr int `xml:"int,attr"` +} + +type OuterNamedOrderedStruct struct { + XMLName Name `xml:"outerns test"` + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterOuterStruct struct { + OuterStruct +} + +type NestedAndChardata struct { + AB []string `xml:"A>B"` + Chardata string `xml:",chardata"` +} + +type NestedAndComment struct { + AB []string `xml:"A>B"` + Comment string `xml:",comment"` +} + +type XMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body string +} + +type NamedXMLNSFieldStruct struct { + XMLName struct{} `xml:"testns test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type XMLNSFieldStructWithOmitEmpty struct { + Ns string `xml:"xmlns,attr,omitempty"` + Body string +} + +type NamedXMLNSFieldStructWithEmptyNamespace struct { + XMLName struct{} `xml:"test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type RecursiveXMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body *RecursiveXMLNSFieldStruct `xml:",omitempty"` + Text string `xml:",omitempty"` +} + +func ifaceptr(x interface{}) interface{} { + return &x +} + +var ( + nameAttr = "Sarah" + ageAttr = uint(12) + contentsAttr = "lorem ipsum" +) + +// Unless explicitly stated as such (or *Plain), all of the +// tests below are two-way tests. When introducing new tests, +// please try to make them two-way as well to ensure that +// marshalling and unmarshalling are as symmetrical as feasible. +var marshalTests = []struct { + Value interface{} + ExpectXML string + MarshalOnly bool + UnmarshalOnly bool +}{ + // Test nil marshals to nothing + {Value: nil, ExpectXML: ``, MarshalOnly: true}, + {Value: nilStruct, ExpectXML: ``, MarshalOnly: true}, + + // Test value types + {Value: &Plain{true}, ExpectXML: `true`}, + {Value: &Plain{false}, ExpectXML: `false`}, + {Value: &Plain{int(42)}, ExpectXML: `42`}, + {Value: &Plain{int8(42)}, ExpectXML: `42`}, + {Value: &Plain{int16(42)}, ExpectXML: `42`}, + {Value: &Plain{int32(42)}, ExpectXML: `42`}, + {Value: &Plain{uint(42)}, ExpectXML: `42`}, + {Value: &Plain{uint8(42)}, ExpectXML: `42`}, + {Value: &Plain{uint16(42)}, ExpectXML: `42`}, + {Value: &Plain{uint32(42)}, ExpectXML: `42`}, + {Value: &Plain{float32(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{float64(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `65501`}, + {Value: &Plain{"gopher"}, ExpectXML: `gopher`}, + {Value: &Plain{[]byte("gopher")}, ExpectXML: `gopher`}, + {Value: &Plain{""}, ExpectXML: `</>`}, + {Value: &Plain{[]byte("")}, ExpectXML: `</>`}, + {Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `</>`}, + {Value: &Plain{NamedType("potato")}, ExpectXML: `potato`}, + {Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `true`}, + + // Test time. + { + Value: &Plain{time.Unix(1e9, 123456789).UTC()}, + ExpectXML: `2001-09-09T01:46:40.123456789Z`, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A []byte field is only nil if the element was not found. + { + Value: &Data{}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + { + Value: &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Check that []byte works, including named []byte types. + { + Value: &Data{Bytes: []byte("ab"), Custom: MyBytes("cd"), Attr: []byte{'v'}}, + ExpectXML: `abcd`, + }, + + // Test innerxml + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + ExpectXML: `James Bond`, + MarshalOnly: true, + }, + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "James Bond", + }, + ExpectXML: `James Bond`, + UnmarshalOnly: true, + }, + + // Test structs + {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Type: ""}, ExpectXML: ``}, + {Value: &Port{Number: "443", Comment: "https"}, ExpectXML: `443`}, + {Value: &Port{Number: "443", Comment: "add space-"}, ExpectXML: `443`, MarshalOnly: true}, + {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `google.com&friends`}, + {Value: &Domain{Name: []byte("google.com"), Comment: []byte(" &friends ")}, ExpectXML: `google.com`}, + {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `Pride & Prejudice`}, + {Value: &Event{Year: -3114}, ExpectXML: `-3114`}, + {Value: &Movie{Length: 13440}, ExpectXML: `13440`}, + {Value: &Pi{Approximation: 3.14159265}, ExpectXML: `3.1415927`}, + {Value: &Universe{Visible: 9.3e13}, ExpectXML: `9.3e+13`}, + {Value: &Particle{HasMass: true}, ExpectXML: `true`}, + {Value: &Departure{When: ParseTime("2013-01-09T00:15:00-09:00")}, ExpectXML: `2013-01-09T00:15:00-09:00`}, + {Value: atomValue, ExpectXML: atomXml}, + { + Value: &Ship{ + Name: "Heart of Gold", + Pilot: "Computer", + Age: 1, + Drive: ImprobabilityDrive, + Passenger: []*Passenger{ + { + Name: []string{"Zaphod", "Beeblebrox"}, + Weight: 7.25, + }, + { + Name: []string{"Trisha", "McMillen"}, + Weight: 5.5, + }, + { + Name: []string{"Ford", "Prefect"}, + Weight: 7, + }, + { + Name: []string{"Arthur", "Dent"}, + Weight: 6.75, + }, + }, + }, + ExpectXML: `` + + `` + strconv.Itoa(int(ImprobabilityDrive)) + `` + + `1` + + `` + + `Zaphod` + + `Beeblebrox` + + `7.25` + + `` + + `` + + `Trisha` + + `McMillen` + + `5.5` + + `` + + `` + + `Ford` + + `Prefect` + + `7` + + `` + + `` + + `Arthur` + + `Dent` + + `6.75` + + `` + + ``, + }, + + // Test a>b + { + Value: &NestedItems{Items: nil, Item1: nil}, + ExpectXML: `` + + `` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{}, Item1: []string{}}, + ExpectXML: `` + + `` + + `` + + ``, + MarshalOnly: true, + }, + { + Value: &NestedItems{Items: nil, Item1: []string{"A"}}, + ExpectXML: `` + + `` + + `A` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: nil}, + ExpectXML: `` + + `` + + `A` + + `B` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}}, + ExpectXML: `` + + `` + + `A` + + `B` + + `C` + + `` + + ``, + }, + { + Value: &NestedOrder{Field1: "C", Field2: "B", Field3: "A"}, + ExpectXML: `` + + `` + + `C` + + `B` + + `A` + + `` + + ``, + }, + { + Value: &NilTest{A: "A", B: nil, C: "C"}, + ExpectXML: `` + + `` + + `A` + + `C` + + `` + + ``, + MarshalOnly: true, // Uses interface{} + }, + { + Value: &MixedNested{A: "A", B: "B", C: "C", D: "D"}, + ExpectXML: `` + + `A` + + `B` + + `` + + `C` + + `D` + + `` + + ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}}, + ExpectXML: `80`, + }, + { + Value: &Service{}, + ExpectXML: ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"}, + ExpectXML: `` + + `80` + + `A` + + `B` + + ``, + MarshalOnly: true, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra2: "example"}, + ExpectXML: `` + + `80` + + `example` + + ``, + MarshalOnly: true, + }, + { + Value: &struct { + XMLName struct{} `xml:"space top"` + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + E1 string `xml:"x>e"` + }{ + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + E1: "e1", + }, + ExpectXML: `` + + `abc` + + `` + + `c1` + + `d1` + + `` + + `` + + `e1` + + `` + + ``, + }, + { + Value: &struct { + XMLName Name + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + }{ + XMLName: Name{ + Space: "space0", + Local: "top", + }, + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + }, + ExpectXML: `` + + `ab` + + `c` + + `` + + `c1` + + `d1` + + `` + + ``, + }, + { + Value: &struct { + XMLName struct{} `xml:"top"` + B string `xml:"space x>b"` + B1 string `xml:"space1 x>b"` + }{ + B: "b", + B1: "b1", + }, + ExpectXML: `` + + `b` + + `b1` + + ``, + }, + + // Test struct embedding + { + Value: &EmbedA{ + EmbedC: EmbedC{ + FieldA1: "", // Shadowed by A.A + FieldA2: "", // Shadowed by A.A + FieldB: "A.C.B", + FieldC: "A.C.C", + }, + EmbedB: EmbedB{ + FieldB: "A.B.B", + EmbedC: &EmbedC{ + FieldA1: "A.B.C.A1", + FieldA2: "A.B.C.A2", + FieldB: "", // Shadowed by A.B.B + FieldC: "A.B.C.C", + }, + }, + FieldA: "A.A", + }, + ExpectXML: `` + + `A.C.B` + + `A.C.C` + + `` + + `A.B.B` + + `` + + `A.B.C.A1` + + `A.B.C.A2` + + `` + + `A.B.C.C` + + `` + + `A.A` + + ``, + }, + + // Test that name casing matters + { + Value: &NameCasing{Xy: "mixed", XY: "upper", XyA: "mixedA", XYA: "upperA"}, + ExpectXML: `mixedupper`, + }, + + // Test the order in which the XML element name is chosen + { + Value: &NamePrecedence{ + FromTag: XMLNameWithoutTag{Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "InXMLName"}, Value: "B"}, + FromNameTag: XMLNameWithTag{Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + MarshalOnly: true, + }, + { + Value: &NamePrecedence{ + XMLName: Name{Local: "Parent"}, + FromTag: XMLNameWithoutTag{XMLName: Name{Local: "InTag"}, Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "FromNameVal"}, Value: "B"}, + FromNameTag: XMLNameWithTag{XMLName: Name{Local: "InXMLNameTag"}, Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + UnmarshalOnly: true, + }, + + // xml.Name works in a plain field as well. + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + }, + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Marshaling zero xml.Name uses the tag or field name. + { + Value: &NameInField{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // Test attributes + { + Value: &AttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &AttrTest{Bytes: []byte{}}, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{}, + ExpectXML: ``, + }, + + // pointer fields + { + Value: &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr}, + ExpectXML: `lorem ipsum`, + MarshalOnly: true, + }, + + // empty chardata pointer field + { + Value: &ChardataEmptyTest{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // omitempty on fields + { + Value: &OmitFieldTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + Ptr: &PresenceTest{}, + }, + ExpectXML: `` + + `8` + + `9` + + `23.5` + + `255` + + `true` + + `str` + + `byt` + + `` + + ``, + }, + { + Value: &OmitFieldTest{}, + ExpectXML: ``, + }, + + // Test ",any" + { + ExpectXML: `knownunknown`, + Value: &AnyTest{ + Nested: "known", + AnyField: AnyHolder{ + XMLName: Name{Local: "other"}, + XML: "unknown", + }, + }, + }, + { + Value: &AnyTest{Nested: "known", + AnyField: AnyHolder{ + XML: "", + XMLName: Name{Local: "AnyField"}, + }, + }, + ExpectXML: `known`, + }, + { + ExpectXML: `b`, + Value: &AnyOmitTest{ + Nested: "b", + }, + }, + { + ExpectXML: `bei`, + Value: &AnySliceTest{ + Nested: "b", + AnyField: []AnyHolder{ + { + XMLName: Name{Local: "c"}, + XML: "e", + }, + { + XMLName: Name{Space: "f", Local: "g"}, + XML: "i", + }, + }, + }, + }, + { + ExpectXML: `b`, + Value: &AnySliceTest{ + Nested: "b", + }, + }, + + // Test recursive types. + { + Value: &RecurseA{ + A: "a1", + B: &RecurseB{ + A: &RecurseA{"a2", nil}, + B: "b1", + }, + }, + ExpectXML: `a1a2b1`, + }, + + // Test ignoring fields via "-" tag + { + ExpectXML: ``, + Value: &IgnoreTest{}, + }, + { + ExpectXML: ``, + Value: &IgnoreTest{PublicSecret: "can't tell"}, + MarshalOnly: true, + }, + { + ExpectXML: `ignore me`, + Value: &IgnoreTest{}, + UnmarshalOnly: true, + }, + + // Test escaping. + { + ExpectXML: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + Value: &AnyTest{ + Nested: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + AnyField: AnyHolder{XMLName: Name{Local: "empty"}}, + }, + }, + { + ExpectXML: `newline: ; cr: ; tab: ;`, + Value: &AnyTest{ + Nested: "newline: \n; cr: \r; tab: \t;", + AnyField: AnyHolder{XMLName: Name{Local: "AnyField"}}, + }, + }, + { + ExpectXML: "1\r2\r\n3\n\r4\n5", + Value: &AnyTest{ + Nested: "1\n2\n3\n\n4\n5", + }, + UnmarshalOnly: true, + }, + { + ExpectXML: `42`, + Value: &EmbedInt{ + MyInt: 42, + }, + }, + // Test omitempty with parent chain; see golang.org/issue/4168. + { + ExpectXML: ``, + Value: &Strings{}, + }, + // Custom marshalers. + { + ExpectXML: `hello world`, + Value: &MyMarshalerTest{}, + }, + { + ExpectXML: ``, + Value: &MarshalerStruct{}, + }, + { + ExpectXML: ``, + Value: &MarshalerValueStruct{}, + }, + { + ExpectXML: ``, + Value: &OuterStruct{IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedOrderedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterOuterStruct{OuterStruct{IntAttr: 10}}, + }, + { + ExpectXML: `test`, + Value: &NestedAndChardata{AB: make([]string, 2), Chardata: "test"}, + }, + { + ExpectXML: ``, + Value: &NestedAndComment{AB: make([]string, 2), Comment: "test"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStructWithOmitEmpty{Body: "hello world"}, + }, + { + // The xmlns attribute must be ignored because the + // element is in the empty namespace, so it's not possible + // to set the default namespace to something non-empty. + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStructWithEmptyNamespace{Ns: "foo", Body: "hello world"}, + MarshalOnly: true, + }, + { + ExpectXML: `hello world`, + Value: &RecursiveXMLNSFieldStruct{ + Ns: "foo", + Body: &RecursiveXMLNSFieldStruct{ + Text: "hello world", + }, + }, + }, +} + +func TestMarshal(t *testing.T) { + for idx, test := range marshalTests { + if test.UnmarshalOnly { + continue + } + data, err := Marshal(test.Value) + if err != nil { + t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + if strings.Contains(want, "\n") { + t.Errorf("#%d: marshal(%#v):\nHAVE:\n%s\nWANT:\n%s", idx, test.Value, got, want) + } else { + t.Errorf("#%d: marshal(%#v):\nhave %#q\nwant %#q", idx, test.Value, got, want) + } + } + } +} + +type AttrParent struct { + X string `xml:"X>Y,attr"` +} + +type BadAttr struct { + Name []string `xml:"name,attr"` +} + +var marshalErrorTests = []struct { + Value interface{} + Err string + Kind reflect.Kind +}{ + { + Value: make(chan bool), + Err: "xml: unsupported type: chan bool", + Kind: reflect.Chan, + }, + { + Value: map[string]string{ + "question": "What do you get when you multiply six by nine?", + "answer": "42", + }, + Err: "xml: unsupported type: map[string]string", + Kind: reflect.Map, + }, + { + Value: map[*Ship]bool{nil: false}, + Err: "xml: unsupported type: map[*xml.Ship]bool", + Kind: reflect.Map, + }, + { + Value: &Domain{Comment: []byte("f--bar")}, + Err: `xml: comments must not contain "--"`, + }, + // Reject parent chain with attr, never worked; see golang.org/issue/5033. + { + Value: &AttrParent{}, + Err: `xml: X>Y chain not valid with attr flag`, + }, + { + Value: BadAttr{[]string{"X", "Y"}}, + Err: `xml: unsupported type: []string`, + }, +} + +var marshalIndentTests = []struct { + Value interface{} + Prefix string + Indent string + ExpectXML string +}{ + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + Prefix: "", + Indent: "\t", + ExpectXML: fmt.Sprintf("\n\tJames Bond\n"), + }, +} + +func TestMarshalErrors(t *testing.T) { + for idx, test := range marshalErrorTests { + data, err := Marshal(test.Value) + if err == nil { + t.Errorf("#%d: marshal(%#v) = [success] %q, want error %v", idx, test.Value, data, test.Err) + continue + } + if err.Error() != test.Err { + t.Errorf("#%d: marshal(%#v) = [error] %v, want %v", idx, test.Value, err, test.Err) + } + if test.Kind != reflect.Invalid { + if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind { + t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind) + } + } + } +} + +// Do invertibility testing on the various structures that we test +func TestUnmarshal(t *testing.T) { + for i, test := range marshalTests { + if test.MarshalOnly { + continue + } + if _, ok := test.Value.(*Plain); ok { + continue + } + vt := reflect.TypeOf(test.Value) + dest := reflect.New(vt.Elem()).Interface() + err := Unmarshal([]byte(test.ExpectXML), dest) + + switch fix := dest.(type) { + case *Feed: + fix.Author.InnerXML = "" + for i := range fix.Entry { + fix.Entry[i].Author.InnerXML = "" + } + } + + if err != nil { + t.Errorf("#%d: unexpected error: %#v", i, err) + } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) { + t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want) + } + } +} + +func TestMarshalIndent(t *testing.T) { + for i, test := range marshalIndentTests { + data, err := MarshalIndent(test.Value, test.Prefix, test.Indent) + if err != nil { + t.Errorf("#%d: Error: %s", i, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + t.Errorf("#%d: MarshalIndent:\nGot:%s\nWant:\n%s", i, got, want) + } + } +} + +type limitedBytesWriter struct { + w io.Writer + remain int // until writes fail +} + +func (lw *limitedBytesWriter) Write(p []byte) (n int, err error) { + if lw.remain <= 0 { + println("error") + return 0, errors.New("write limit hit") + } + if len(p) > lw.remain { + p = p[:lw.remain] + n, _ = lw.w.Write(p) + lw.remain = 0 + return n, errors.New("write limit hit") + } + n, err = lw.w.Write(p) + lw.remain -= n + return n, err +} + +func TestMarshalWriteErrors(t *testing.T) { + var buf bytes.Buffer + const writeCap = 1024 + w := &limitedBytesWriter{&buf, writeCap} + enc := NewEncoder(w) + var err error + var i int + const n = 4000 + for i = 1; i <= n; i++ { + err = enc.Encode(&Passenger{ + Name: []string{"Alice", "Bob"}, + Weight: 5, + }) + if err != nil { + break + } + } + if err == nil { + t.Error("expected an error") + } + if i == n { + t.Errorf("expected to fail before the end") + } + if buf.Len() != writeCap { + t.Errorf("buf.Len() = %d; want %d", buf.Len(), writeCap) + } +} + +func TestMarshalWriteIOErrors(t *testing.T) { + enc := NewEncoder(errWriter{}) + + expectErr := "unwritable" + err := enc.Encode(&Passenger{}) + if err == nil || err.Error() != expectErr { + t.Errorf("EscapeTest = [error] %v, want %v", err, expectErr) + } +} + +func TestMarshalFlush(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(CharData("hello world")); err != nil { + t.Fatalf("enc.EncodeToken: %v", err) + } + if buf.Len() > 0 { + t.Fatalf("enc.EncodeToken caused actual write: %q", buf.Bytes()) + } + if err := enc.Flush(); err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if buf.String() != "hello world" { + t.Fatalf("after enc.Flush, buf.String() = %q, want %q", buf.String(), "hello world") + } +} + +var encodeElementTests = []struct { + desc string + value interface{} + start StartElement + expectXML string +}{{ + desc: "simple string", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + }, + expectXML: `hello`, +}, { + desc: "string with added attributes", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "x"}, + Value: "y", + }, { + Name: Name{Local: "foo"}, + Value: "bar", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element with default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element in name space with different default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "XMLMarshaler with start element with default name space", + value: &MyMarshalerTest{}, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello world`, +}} + +func TestEncodeElement(t *testing.T) { + for idx, test := range encodeElementTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + err := enc.EncodeElement(test.value, test.start) + if err != nil { + t.Fatalf("enc.EncodeElement: %v", err) + } + err = enc.Flush() + if err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if got, want := buf.String(), test.expectXML; got != want { + t.Errorf("#%d(%s): EncodeElement(%#v, %#v):\nhave %#q\nwant %#q", idx, test.desc, test.value, test.start, got, want) + } + } +} + +func BenchmarkMarshal(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + Marshal(atomValue) + } +} + +func BenchmarkUnmarshal(b *testing.B) { + b.ReportAllocs() + xml := []byte(atomXml) + for i := 0; i < b.N; i++ { + Unmarshal(xml, &Feed{}) + } +} + +// golang.org/issue/6556 +func TestStructPointerMarshal(t *testing.T) { + type A struct { + XMLName string `xml:"a"` + B []interface{} + } + type C struct { + XMLName Name + Value string `xml:"value"` + } + + a := new(A) + a.B = append(a.B, &C{ + XMLName: Name{Local: "c"}, + Value: "x", + }) + + b, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + if x := string(b); x != "x" { + t.Fatal(x) + } + var v A + err = Unmarshal(b, &v) + if err != nil { + t.Fatal(err) + } +} + +var encodeTokenTests = []struct { + desc string + toks []Token + want string + err string +}{{ + desc: "start element with name space", + toks: []Token{ + StartElement{Name{"space", "local"}, nil}, + }, + want: ``, +}, { + desc: "start element with no name", + toks: []Token{ + StartElement{Name{"space", ""}, nil}, + }, + err: "xml: start tag with no name", +}, { + desc: "end element with no name", + toks: []Token{ + EndElement{Name{"space", ""}}, + }, + err: "xml: end tag with no name", +}, { + desc: "char data", + toks: []Token{ + CharData("foo"), + }, + want: `foo`, +}, { + desc: "char data with escaped chars", + toks: []Token{ + CharData(" \t\n"), + }, + want: " \n", +}, { + desc: "comment", + toks: []Token{ + Comment("foo"), + }, + want: ``, +}, { + desc: "comment with invalid content", + toks: []Token{ + Comment("foo-->"), + }, + err: "xml: EncodeToken of Comment containing --> marker", +}, { + desc: "proc instruction", + toks: []Token{ + ProcInst{"Target", []byte("Instruction")}, + }, + want: ``, +}, { + desc: "proc instruction with empty target", + toks: []Token{ + ProcInst{"", []byte("Instruction")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "proc instruction with bad content", + toks: []Token{ + ProcInst{"", []byte("Instruction?>")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "directive", + toks: []Token{ + Directive("foo"), + }, + want: ``, +}, { + desc: "more complex directive", + toks: []Token{ + Directive("DOCTYPE doc [ '> ]"), + }, + want: `'> ]>`, +}, { + desc: "directive instruction with bad name", + toks: []Token{ + Directive("foo>"), + }, + err: "xml: EncodeToken of Directive containing wrong < or > markers", +}, { + desc: "end tag without start tag", + toks: []Token{ + EndElement{Name{"foo", "bar"}}, + }, + err: "xml: end tag without start tag", +}, { + desc: "mismatching end tag local name", + toks: []Token{ + StartElement{Name{"", "foo"}, nil}, + EndElement{Name{"", "bar"}}, + }, + err: "xml: end tag does not match start tag ", + want: ``, +}, { + desc: "mismatching end tag namespace", + toks: []Token{ + StartElement{Name{"space", "foo"}, nil}, + EndElement{Name{"another", "foo"}}, + }, + err: "xml: end tag in namespace another does not match start tag in namespace space", + want: ``, +}, { + desc: "start element with explicit namespace", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "start element with explicit namespace and colliding prefix", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + {Name{"x", "bar"}, "other"}, + }}, + }, + want: ``, +}, { + desc: "start element using previously defined namespace", + toks: []Token{ + StartElement{Name{"", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"space", "x"}, "y"}, + }}, + }, + want: ``, +}, { + desc: "nested name space with same prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space1"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space2"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + EndElement{Name{"", "foo"}}, + EndElement{Name{"", "foo"}}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + }, + want: ``, +}, { + desc: "start element defining several prefixes for the same name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "a"}, "space"}, + {Name{"xmlns", "b"}, "space"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element redefines name space", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element creates alias for default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element defines default name space with existing prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element uses empty attribute name space when default ns defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "redefine xmlns", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"foo", "xmlns"}, "space"}, + }}, + }, + err: `xml: cannot redefine xmlns attribute prefix`, +}, { + desc: "xmlns with explicit name space #1", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xml", "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "xmlns with explicit name space #2", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{xmlURL, "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "empty name space declaration is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "foo"}, ""}, + }}, + }, + want: ``, +}, { + desc: "attribute with no name is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", ""}, "value"}, + }}, + }, + want: ``, +}, { + desc: "namespace URL with non-valid name", + toks: []Token{ + StartElement{Name{"/34", "foo"}, []Attr{ + {Name{"/34", "x"}, "value"}, + }}, + }, + want: `<_:foo xmlns:_="/34" _:x="value">`, +}, { + desc: "nested element resets default namespace to empty", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, ""}, + {Name{"", "x"}, "value"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element requires empty default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, nil}, + }, + want: ``, +}, { + desc: "attribute uses name space from xmlns", + toks: []Token{ + StartElement{Name{"some/space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + {Name{"some/space", "other"}, "other value"}, + }}, + }, + want: ``, +}, { + desc: "default name space should not be used by attributes", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"xmlns", "bar"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "default name space not used by attributes, not explicitly defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "impossible xmlns declaration", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "bar"}, []Attr{ + {Name{"space", "attr"}, "value"}, + }}, + }, + want: ``, +}} + +func TestEncodeToken(t *testing.T) { +loop: + for i, tt := range encodeTokenTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + var err error + for j, tok := range tt.toks { + err = enc.EncodeToken(tok) + if err != nil && j < len(tt.toks)-1 { + t.Errorf("#%d %s token #%d: %v", i, tt.desc, j, err) + continue loop + } + } + errorf := func(f string, a ...interface{}) { + t.Errorf("#%d %s token #%d:%s", i, tt.desc, len(tt.toks)-1, fmt.Sprintf(f, a...)) + } + switch { + case tt.err != "" && err == nil: + errorf(" expected error; got none") + continue + case tt.err == "" && err != nil: + errorf(" got error: %v", err) + continue + case tt.err != "" && err != nil && tt.err != err.Error(): + errorf(" error mismatch; got %v, want %v", err, tt.err) + continue + } + if err := enc.Flush(); err != nil { + errorf(" %v", err) + continue + } + if got := buf.String(); got != tt.want { + errorf("\ngot %v\nwant %v", got, tt.want) + continue + } + } +} + +func TestProcInstEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s", err) + } + + if err := enc.EncodeToken(ProcInst{"Target", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to add non-xml target ProcInst") + } + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err == nil { + t.Fatalf("enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token") + } +} + +func TestDecodeEncode(t *testing.T) { + var in, out bytes.Buffer + in.WriteString(` + + + +`) + dec := NewDecoder(&in) + enc := NewEncoder(&out) + for tok, err := dec.Token(); err == nil; tok, err = dec.Token() { + err = enc.EncodeToken(tok) + if err != nil { + t.Fatalf("enc.EncodeToken: Unable to encode token (%#v), %v", tok, err) + } + } +} + +// Issue 9796. Used to fail with GORACE="halt_on_error=1" -race. +func TestRace9796(t *testing.T) { + type A struct{} + type B struct { + C []A `xml:"X>Y"` + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + Marshal(B{[]A{{}}}) + wg.Done() + }() + } + wg.Wait() +} + +func TestIsValidDirective(t *testing.T) { + testOK := []string{ + "<>", + "< < > >", + "' '>' >", + " ]>", + " '<' ' doc ANY> ]>", + ">>> a < comment --> [ ] >", + } + testKO := []string{ + "<", + ">", + "", + "< > > < < >", + " -->", + "", + "'", + "", + } + for _, s := range testOK { + if !isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be valid", s) + } + } + for _, s := range testKO { + if isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be invalid", s) + } + } +} + +// Issue 11719. EncodeToken used to silently eat tokens with an invalid type. +func TestSimpleUseOfEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(&StartElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(&EndElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(StartElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: StartElement %s", err) + } + if err := enc.EncodeToken(EndElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: EndElement %s", err) + } + if err := enc.EncodeToken(Universe{}); err == nil { + t.Errorf("enc.EncodeToken: invalid type not caught") + } + if err := enc.Flush(); err != nil { + t.Errorf("enc.Flush: %s", err) + } + if buf.Len() == 0 { + t.Errorf("enc.EncodeToken: empty buffer") + } + want := "" + if buf.String() != want { + t.Errorf("enc.EncodeToken: expected %q; got %q", want, buf.String()) + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read.go b/vendor/golang.org/x/net/webdav/internal/xml/read.go new file mode 100644 index 0000000..4089056 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read.go @@ -0,0 +1,692 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: +// an XML element is an order-dependent collection of anonymous +// values, while a data structure is an order-independent collection +// of named values. +// See package json for a textual representation more suitable +// to data structures. + +// Unmarshal parses the XML-encoded data and stores the result in +// the value pointed to by v, which must be an arbitrary struct, +// slice, or string. Well-formed data that does not fit into v is +// discarded. +// +// Because Unmarshal uses the reflect package, it can only assign +// to exported (upper case) fields. Unmarshal uses a case-sensitive +// comparison to match XML element names to tag values and struct +// field names. +// +// Unmarshal maps an XML element to a struct using the following rules. +// In the rules, the tag of a field refers to the value associated with the +// key 'xml' in the struct field's tag (see the example above). +// +// * If the struct has a field of type []byte or string with tag +// ",innerxml", Unmarshal accumulates the raw XML nested inside the +// element in that field. The rest of the rules still apply. +// +// * If the struct has a field named XMLName of type xml.Name, +// Unmarshal records the element name in that field. +// +// * If the XMLName field has an associated tag of the form +// "name" or "namespace-URL name", the XML element must have +// the given name (and, optionally, name space) or else Unmarshal +// returns an error. +// +// * If the XML element has an attribute whose name matches a +// struct field name with an associated tag containing ",attr" or +// the explicit name in a struct field tag of the form "name,attr", +// Unmarshal records the attribute value in that field. +// +// * If the XML element contains character data, that data is +// accumulated in the first struct field that has tag ",chardata". +// The struct field may have type []byte or string. +// If there is no such field, the character data is discarded. +// +// * If the XML element contains comments, they are accumulated in +// the first struct field that has tag ",comment". The struct +// field may have type []byte or string. If there is no such +// field, the comments are discarded. +// +// * If the XML element contains a sub-element whose name matches +// the prefix of a tag formatted as "a" or "a>b>c", unmarshal +// will descend into the XML structure looking for elements with the +// given names, and will map the innermost elements to that struct +// field. A tag starting with ">" is equivalent to one starting +// with the field name followed by ">". +// +// * If the XML element contains a sub-element whose name matches +// a struct field's XMLName tag and the struct field has no +// explicit name tag as per the previous rule, unmarshal maps +// the sub-element to that struct field. +// +// * If the XML element contains a sub-element whose name matches a +// field without any mode flags (",attr", ",chardata", etc), Unmarshal +// maps the sub-element to that struct field. +// +// * If the XML element contains a sub-element that hasn't matched any +// of the above rules and the struct has a field with tag ",any", +// unmarshal maps the sub-element to that struct field. +// +// * An anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// * A struct field with tag "-" is never unmarshalled into. +// +// Unmarshal maps an XML element to a string or []byte by saving the +// concatenation of that element's character data in the string or +// []byte. The saved []byte is never nil. +// +// Unmarshal maps an attribute value to a string or []byte by saving +// the value in the string or slice. +// +// Unmarshal maps an XML element to a slice by extending the length of +// the slice and mapping the element to the newly created value. +// +// Unmarshal maps an XML element or attribute value to a bool by +// setting it to the boolean value represented by the string. +// +// Unmarshal maps an XML element or attribute value to an integer or +// floating-point field by setting the field to the result of +// interpreting the string value in decimal. There is no check for +// overflow. +// +// Unmarshal maps an XML element to an xml.Name by recording the +// element name. +// +// Unmarshal maps an XML element to a pointer by setting the pointer +// to a freshly allocated value and then mapping the element to that value. +// +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +// Decode works like xml.Unmarshal, except it reads the decoder +// stream to find the start element. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeElement(v, nil) +} + +// DecodeElement works like xml.Unmarshal except that it takes +// a pointer to the start XML element to decode into v. +// It is useful when a client reads some raw XML tokens itself +// but also wants to defer to Unmarshal for some elements. +func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer passed to Unmarshal") + } + return d.unmarshal(val.Elem(), start) +} + +// An UnmarshalError represents an error in the unmarshalling process. +type UnmarshalError string + +func (e UnmarshalError) Error() string { return string(e) } + +// Unmarshaler is the interface implemented by objects that can unmarshal +// an XML element description of themselves. +// +// UnmarshalXML decodes a single XML element +// beginning with the given start element. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXML must consume exactly one XML element. +// One common implementation strategy is to unmarshal into +// a separate value with a layout matching the expected XML +// using d.DecodeElement, and then to copy the data from +// that value into the receiver. +// Another common strategy is to use d.Token to process the +// XML object one token at a time. +// UnmarshalXML may not use d.RawToken. +type Unmarshaler interface { + UnmarshalXML(d *Decoder, start StartElement) error +} + +// UnmarshalerAttr is the interface implemented by objects that can unmarshal +// an XML attribute description of themselves. +// +// UnmarshalXMLAttr decodes a single XML attribute. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type UnmarshalerAttr interface { + UnmarshalXMLAttr(attr Attr) error +} + +// receiverType returns the receiver type to use in an expression like "%s.MethodName". +func receiverType(val interface{}) string { + t := reflect.TypeOf(val) + if t.Name() != "" { + return t.String() + } + return "(" + t.String() + ")" +} + +// unmarshalInterface unmarshals a single XML element into val. +// start is the opening tag of the element. +func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { + // Record that decoder must stop at end tag corresponding to start. + p.pushEOF() + + p.unmarshalDepth++ + err := val.UnmarshalXML(p, *start) + p.unmarshalDepth-- + if err != nil { + p.popEOF() + return err + } + + if !p.popEOF() { + return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) + } + + return nil +} + +// unmarshalTextInterface unmarshals a single XML element into val. +// The chardata contained in the element (but not its children) +// is passed to the text unmarshaler. +func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { + var buf []byte + depth := 1 + for depth > 0 { + t, err := p.Token() + if err != nil { + return err + } + switch t := t.(type) { + case CharData: + if depth == 1 { + buf = append(buf, t...) + } + case StartElement: + depth++ + case EndElement: + depth-- + } + } + return val.UnmarshalText(buf) +} + +// unmarshalAttr unmarshals a single XML attribute into val. +func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { + return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + } + + // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + } + + copyValue(val, []byte(attr.Value)) + return nil +} + +var ( + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Unmarshal a single XML element into val. +func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { + // Find start element if we need it. + if start == nil { + for { + tok, err := p.Token() + if err != nil { + return err + } + if t, ok := tok.(StartElement); ok { + start = &t + break + } + } + } + + // Load value from interface, but only if the result will be + // usefully addressable. + if val.Kind() == reflect.Interface && !val.IsNil() { + e := val.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + val = e + } + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { + return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + } + } + + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + } + } + + var ( + data []byte + saveData reflect.Value + comment []byte + saveComment reflect.Value + saveXML reflect.Value + saveXMLIndex int + saveXMLData []byte + saveAny reflect.Value + sv reflect.Value + tinfo *typeInfo + err error + ) + + switch v := val; v.Kind() { + default: + return errors.New("unknown type " + v.Type().String()) + + case reflect.Interface: + // TODO: For now, simply ignore the field. In the near + // future we may choose to unmarshal the start + // element on it, if not nil. + return p.Skip() + + case reflect.Slice: + typ := v.Type() + if typ.Elem().Kind() == reflect.Uint8 { + // []byte + saveData = v + break + } + + // Slice of element values. + // Grow slice. + n := v.Len() + if n >= v.Cap() { + ncap := 2 * n + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(typ, n, ncap) + reflect.Copy(new, v) + v.Set(new) + } + v.SetLen(n + 1) + + // Recur to read element into slice. + if err := p.unmarshal(v.Index(n), start); err != nil { + v.SetLen(n) + return err + } + return nil + + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + saveData = v + + case reflect.Struct: + typ := v.Type() + if typ == nameType { + v.Set(reflect.ValueOf(start.Name)) + break + } + + sv = v + tinfo, err = getTypeInfo(typ) + if err != nil { + return err + } + + // Validate and assign element name. + if tinfo.xmlname != nil { + finfo := tinfo.xmlname + if finfo.name != "" && finfo.name != start.Name.Local { + return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") + } + if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " + if start.Name.Space == "" { + e += "no name space" + } else { + e += start.Name.Space + } + return UnmarshalError(e) + } + fv := finfo.value(sv) + if _, ok := fv.Interface().(Name); ok { + fv.Set(reflect.ValueOf(start.Name)) + } + } + + // Assign attributes. + // Also, determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv) + // Look for attribute. + for _, a := range start.Attr { + if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { + if err := p.unmarshalAttr(strv, a); err != nil { + return err + } + break + } + } + + case fCharData: + if !saveData.IsValid() { + saveData = finfo.value(sv) + } + + case fComment: + if !saveComment.IsValid() { + saveComment = finfo.value(sv) + } + + case fAny, fAny | fElement: + if !saveAny.IsValid() { + saveAny = finfo.value(sv) + } + + case fInnerXml: + if !saveXML.IsValid() { + saveXML = finfo.value(sv) + if p.saved == nil { + saveXMLIndex = 0 + p.saved = new(bytes.Buffer) + } else { + saveXMLIndex = p.savedOffset() + } + } + } + } + } + + // Find end element. + // Process sub-elements along the way. +Loop: + for { + var savedOffset int + if saveXML.IsValid() { + savedOffset = p.savedOffset() + } + tok, err := p.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case StartElement: + consumed := false + if sv.IsValid() { + consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + if err != nil { + return err + } + if !consumed && saveAny.IsValid() { + consumed = true + if err := p.unmarshal(saveAny, &t); err != nil { + return err + } + } + } + if !consumed { + if err := p.Skip(); err != nil { + return err + } + } + + case EndElement: + if saveXML.IsValid() { + saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + if saveXMLIndex == 0 { + p.saved = nil + } + } + break Loop + + case CharData: + if saveData.IsValid() { + data = append(data, t...) + } + + case Comment: + if saveComment.IsValid() { + comment = append(comment, t...) + } + } + } + + if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { + if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + + if saveData.IsValid() && saveData.CanAddr() { + pv := saveData.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + } + + if err := copyValue(saveData, data); err != nil { + return err + } + + switch t := saveComment; t.Kind() { + case reflect.String: + t.SetString(string(comment)) + case reflect.Slice: + t.Set(reflect.ValueOf(comment)) + } + + switch t := saveXML; t.Kind() { + case reflect.String: + t.SetString(string(saveXMLData)) + case reflect.Slice: + t.Set(reflect.ValueOf(saveXMLData)) + } + + return nil +} + +func copyValue(dst reflect.Value, src []byte) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Ptr { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + // Save accumulated data. + switch dst.Kind() { + case reflect.Invalid: + // Probably a comment. + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetUint(utmp) + case reflect.Float32, reflect.Float64: + ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if err != nil { + return err + } + dst.SetFloat(ftmp) + case reflect.Bool: + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + case reflect.Slice: + if len(src) == 0 { + // non-nil to flag presence + src = []byte{} + } + dst.SetBytes(src) + } + return nil +} + +// unmarshalPath walks down an XML structure looking for wanted +// paths, and calls unmarshal on them. +// The consumed result tells whether XML elements have been consumed +// from the Decoder until start's matching end element, or if it's +// still untouched because start is uninteresting for sv's fields. +func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { + recurse := false +Loop: + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + continue + } + for j := range parents { + if parents[j] != finfo.parents[j] { + continue Loop + } + } + if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { + // It's a perfect match, unmarshal the field. + return true, p.unmarshal(finfo.value(sv), start) + } + if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { + // It's a prefix for the field. Break and recurse + // since it's not ok for one field path to be itself + // the prefix for another field path. + recurse = true + + // We can reuse the same slice as long as we + // don't try to append to it. + parents = finfo.parents[:len(parents)+1] + break + } + } + if !recurse { + // We have no business with this element. + return false, nil + } + // The element is not a perfect match for any field, but one + // or more fields have the path to this element as a parent + // prefix. Recurse and attempt to match these. + for { + var tok Token + tok, err = p.Token() + if err != nil { + return true, err + } + switch t := tok.(type) { + case StartElement: + consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + if err != nil { + return true, err + } + if !consumed2 { + if err := p.Skip(); err != nil { + return true, err + } + } + case EndElement: + return true, nil + } + } +} + +// Skip reads tokens until it has consumed the end element +// matching the most recent start element already consumed. +// It recurs if it encounters a start element, so it can be used to +// skip nested structures. +// It returns nil if it finds an end element matching the start +// element; otherwise it returns an error describing the problem. +func (d *Decoder) Skip() error { + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok.(type) { + case StartElement: + if err := d.Skip(); err != nil { + return err + } + case EndElement: + return nil + } + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read_test.go b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go new file mode 100644 index 0000000..02f1e10 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/read_test.go @@ -0,0 +1,744 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + "testing" + "time" +) + +// Stripped down Atom feed data structures. + +func TestUnmarshalFeed(t *testing.T) { + var f Feed + if err := Unmarshal([]byte(atomFeedString), &f); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(f, atomFeed) { + t.Fatalf("have %#v\nwant %#v", f, atomFeed) + } +} + +// hget http://codereview.appspot.com/rss/mine/rsc +const atomFeedString = ` + +Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub +2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +rietveld: correct tab handling +2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + + ` + +type Feed struct { + XMLName Name `xml:"http://www.w3.org/2005/Atom feed"` + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated,attr"` + Author Person `xml:"author"` + Entry []Entry `xml:"entry"` +} + +type Entry struct { + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated"` + Author Person `xml:"author"` + Summary Text `xml:"summary"` +} + +type Link struct { + Rel string `xml:"rel,attr,omitempty"` + Href string `xml:"href,attr"` +} + +type Person struct { + Name string `xml:"name"` + URI string `xml:"uri"` + Email string `xml:"email"` + InnerXML string `xml:",innerxml"` +} + +type Text struct { + Type string `xml:"type,attr,omitempty"` + Body string `xml:",chardata"` +} + +var atomFeed = Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Code Review - My issues", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/"}, + {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"}, + }, + Id: "http://codereview.appspot.com/", + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "rietveld<>", + InnerXML: "rietveld<>", + }, + Entry: []Entry{ + { + Title: "rietveld: an attempt at pubsubhubbub\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/126085"}, + }, + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a", + Summary: Text{ + Type: "html", + Body: ` + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a <link rel="hub" href="hub-server"> tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can't quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed's actual URL in +the link rel="self", but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +`, + }, + }, + { + Title: "rietveld: correct tab handling\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/124106"}, + }, + Updated: ParseTime("2009-10-03T23:02:17+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a", + Summary: Text{ + Type: "html", + Body: ` + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn't know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + +`, + }, + }, + }, +} + +const pathTestString = ` + + 1 + + + A + + + B + + + C + D + + <_> + E + + + 2 + +` + +type PathTestItem struct { + Value string +} + +type PathTestA struct { + Items []PathTestItem `xml:">Item1"` + Before, After string +} + +type PathTestB struct { + Other []PathTestItem `xml:"Items>Item1"` + Before, After string +} + +type PathTestC struct { + Values1 []string `xml:"Items>Item1>Value"` + Values2 []string `xml:"Items>Item2>Value"` + Before, After string +} + +type PathTestSet struct { + Item1 []PathTestItem +} + +type PathTestD struct { + Other PathTestSet `xml:"Items"` + Before, After string +} + +type PathTestE struct { + Underline string `xml:"Items>_>Value"` + Before, After string +} + +var pathTests = []interface{}{ + &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"}, + &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"}, + &PathTestE{Underline: "E", Before: "1", After: "2"}, +} + +func TestUnmarshalPaths(t *testing.T) { + for _, pt := range pathTests { + v := reflect.New(reflect.TypeOf(pt).Elem()).Interface() + if err := Unmarshal([]byte(pathTestString), v); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(v, pt) { + t.Fatalf("have %#v\nwant %#v", v, pt) + } + } +} + +type BadPathTestA struct { + First string `xml:"items>item1"` + Other string `xml:"items>item2"` + Second string `xml:"items"` +} + +type BadPathTestB struct { + Other string `xml:"items>item2>value"` + First string `xml:"items>item1"` + Second string `xml:"items>item1>value"` +} + +type BadPathTestC struct { + First string + Second string `xml:"First"` +} + +type BadPathTestD struct { + BadPathEmbeddedA + BadPathEmbeddedB +} + +type BadPathEmbeddedA struct { + First string +} + +type BadPathEmbeddedB struct { + Second string `xml:"First"` +} + +var badPathTests = []struct { + v, e interface{} +}{ + {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}}, + {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}}, + {&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}}, + {&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}}, +} + +func TestUnmarshalBadPaths(t *testing.T) { + for _, tt := range badPathTests { + err := Unmarshal([]byte(pathTestString), tt.v) + if !reflect.DeepEqual(err, tt.e) { + t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e) + } + } +} + +const OK = "OK" +const withoutNameTypeData = ` + +` + +type TestThree struct { + XMLName Name `xml:"Test3"` + Attr string `xml:",attr"` +} + +func TestUnmarshalWithoutNameType(t *testing.T) { + var x TestThree + if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if x.Attr != OK { + t.Fatalf("have %v\nwant %v", x.Attr, OK) + } +} + +func TestUnmarshalAttr(t *testing.T) { + type ParamVal struct { + Int int `xml:"int,attr"` + } + + type ParamPtr struct { + Int *int `xml:"int,attr"` + } + + type ParamStringPtr struct { + Int *string `xml:"int,attr"` + } + + x := []byte(``) + + p1 := &ParamPtr{} + if err := Unmarshal(x, p1); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p1.Int == nil { + t.Fatalf("Unmarshal failed in to *int field") + } else if *p1.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1) + } + + p2 := &ParamVal{} + if err := Unmarshal(x, p2); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p2.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1) + } + + p3 := &ParamStringPtr{} + if err := Unmarshal(x, p3); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p3.Int == nil { + t.Fatalf("Unmarshal failed in to *string field") + } else if *p3.Int != "1" { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1) + } +} + +type Tables struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table"` + FTable string `xml:"http://www.w3schools.com/furniture table"` +} + +var tables = []struct { + xml string + tab Tables + ns string +}{ + { + xml: `` + + `hello
    ` + + `world
    ` + + `
    `, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world
    ` + + `hello
    ` + + `
    `, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world` + + `hello` + + ``, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `bogus
    ` + + `
    `, + tab: Tables{}, + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{HTable: "only"}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{FTable: "only"}, + ns: "http://www.w3schools.com/furniture", + }, + { + xml: `` + + `only
    ` + + `
    `, + tab: Tables{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNS(t *testing.T) { + for i, tt := range tables { + var dst Tables + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestRoundTrip(t *testing.T) { + // From issue 7535 + const s = `` + in := bytes.NewBufferString(s) + for i := 0; i < 10; i++ { + out := &bytes.Buffer{} + d := NewDecoder(in) + e := NewEncoder(out) + + for { + t, err := d.Token() + if err == io.EOF { + break + } + if err != nil { + fmt.Println("failed:", err) + return + } + e.EncodeToken(t) + } + e.Flush() + in = out + } + if got := in.String(); got != s { + t.Errorf("have: %q\nwant: %q\n", got, s) + } +} + +func TestMarshalNS(t *testing.T) { + dst := Tables{"hello", "world"} + data, err := Marshal(&dst) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `hello
    world
    ` + str := string(data) + if str != want { + t.Errorf("have: %q\nwant: %q\n", str, want) + } +} + +type TableAttrs struct { + TAttr TAttr +} + +type TAttr struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"` + FTable string `xml:"http://www.w3schools.com/furniture table,attr"` + Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"` + Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"` + Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"` + Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"` + Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"` +} + +var tableAttrs = []struct { + xml string + tab TableAttrs + ns string +}{ + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + ns: "http://www.w3schools.com/furniture", + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: ``, + tab: TableAttrs{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNSAttr(t *testing.T) { + for i, tt := range tableAttrs { + var dst TableAttrs + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestMarshalNSAttr(t *testing.T) { + src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}} + data, err := Marshal(&src) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `` + str := string(data) + if str != want { + t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want) + } + + var dst TableAttrs + if err := Unmarshal(data, &dst); err != nil { + t.Errorf("Unmarshal: %v", err) + } + + if dst != src { + t.Errorf("Unmarshal = %q, want %q", dst, src) + } +} + +type MyCharData struct { + body string +} + +func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error { + for { + t, err := d.Token() + if err == io.EOF { // found end of element + break + } + if err != nil { + return err + } + if char, ok := t.(CharData); ok { + m.body += string(char) + } + } + return nil +} + +var _ Unmarshaler = (*MyCharData)(nil) + +func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error { + panic("must not call") +} + +type MyAttr struct { + attr string +} + +func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error { + m.attr = attr.Value + return nil +} + +var _ UnmarshalerAttr = (*MyAttr)(nil) + +type MyStruct struct { + Data *MyCharData + Attr *MyAttr `xml:",attr"` + + Data2 MyCharData + Attr2 MyAttr `xml:",attr"` +} + +func TestUnmarshaler(t *testing.T) { + xml := ` + + hello world + howdy world + + ` + + var m MyStruct + if err := Unmarshal([]byte(xml), &m); err != nil { + t.Fatal(err) + } + + if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" { + t.Errorf("m=%#+v\n", m) + } +} + +type Pea struct { + Cotelydon string +} + +type Pod struct { + Pea interface{} `xml:"Pea"` +} + +// https://golang.org/issue/6836 +func TestUnmarshalIntoInterface(t *testing.T) { + pod := new(Pod) + pod.Pea = new(Pea) + xml := `Green stuff` + err := Unmarshal([]byte(xml), pod) + if err != nil { + t.Fatalf("failed to unmarshal %q: %v", xml, err) + } + pea, ok := pod.Pea.(*Pea) + if !ok { + t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea) + } + have, want := pea.Cotelydon, "Green stuff" + if have != want { + t.Errorf("failed to unmarshal into interface, have %q want %q", have, want) + } +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go new file mode 100644 index 0000000..fdde288 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go @@ -0,0 +1,371 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the xml representation of a type. +type typeInfo struct { + xmlname *fieldInfo + fields []fieldInfo +} + +// fieldInfo holds details for the xml representation of a single field. +type fieldInfo struct { + idx []int + name string + xmlns string + flags fieldFlags + parents []string +} + +type fieldFlags int + +const ( + fElement fieldFlags = 1 << iota + fAttr + fCharData + fInnerXml + fComment + fAny + + fOmitEmpty + + fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny +) + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +var nameType = reflect.TypeOf(Name{}) + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct && typ != nameType { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + if tinfo.xmlname == nil { + tinfo.xmlname = inner.xmlname + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + if f.Name == "XMLName" { + tinfo.xmlname = finfo + continue + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("xml") + if i := strings.Index(tag, " "); i >= 0 { + finfo.xmlns, tag = tag[:i], tag[i+1:] + } + + // Parse flags. + tokens := strings.Split(tag, ",") + if len(tokens) == 1 { + finfo.flags = fElement + } else { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "attr": + finfo.flags |= fAttr + case "chardata": + finfo.flags |= fCharData + case "innerxml": + finfo.flags |= fInnerXml + case "comment": + finfo.flags |= fComment + case "any": + finfo.flags |= fAny + case "omitempty": + finfo.flags |= fOmitEmpty + } + } + + // Validate the flags used. + valid := true + switch mode := finfo.flags & fMode; mode { + case 0: + finfo.flags |= fElement + case fAttr, fCharData, fInnerXml, fComment, fAny: + if f.Name == "XMLName" || tag != "" && mode != fAttr { + valid = false + } + default: + // This will also catch multiple modes in a single field. + valid = false + } + if finfo.flags&fMode == fAny { + finfo.flags |= fElement + } + if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { + valid = false + } + if !valid { + return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + } + + // Use of xmlns without a name is not allowed. + if finfo.xmlns != "" && tag == "" { + return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + + if f.Name == "XMLName" { + // The XMLName field records the XML element name. Don't + // process it as usual because its name should default to + // empty rather than to the field name. + finfo.name = tag + return finfo, nil + } + + if tag == "" { + // If the name part of the tag is completely empty, get + // default from XMLName of underlying struct if feasible, + // or field name otherwise. + if xmlname := lookupXMLName(f.Type); xmlname != nil { + finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name + } else { + finfo.name = f.Name + } + return finfo, nil + } + + if finfo.xmlns == "" && finfo.flags&fAttr == 0 { + // If it's an element no namespace specified, get the default + // from the XMLName of enclosing struct if possible. + if xmlname := lookupXMLName(typ); xmlname != nil { + finfo.xmlns = xmlname.xmlns + } + } + + // Prepare field name and parents. + parents := strings.Split(tag, ">") + if parents[0] == "" { + parents[0] = f.Name + } + if parents[len(parents)-1] == "" { + return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) + } + finfo.name = parents[len(parents)-1] + if len(parents) > 1 { + if (finfo.flags & fElement) == 0 { + return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) + } + finfo.parents = parents[:len(parents)-1] + } + + // If the field type has an XMLName field, the names must match + // so that the behavior of both marshalling and unmarshalling + // is straightforward and unambiguous. + if finfo.flags&fElement != 0 { + ftyp := f.Type + xmlname := lookupXMLName(ftyp) + if xmlname != nil && xmlname.name != finfo.name { + return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", + finfo.name, typ, f.Name, xmlname.name, ftyp) + } + } + return finfo, nil +} + +// lookupXMLName returns the fieldInfo for typ's XMLName field +// in case it exists and has a valid xml field tag, otherwise +// it returns nil. +func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() != reflect.Struct { + return nil + } + for i, n := 0, typ.NumField(); i < n; i++ { + f := typ.Field(i) + if f.Name != "XMLName" { + continue + } + finfo, err := structFieldInfo(typ, &f) + if finfo.name != "" && err == nil { + return finfo + } + // Also consider errors as a non-existent field tag + // and let getTypeInfo itself report the error. + break + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int +Loop: + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if oldf.flags&fMode != newf.flags&fMode { + continue + } + if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { + continue + } + minl := min(len(newf.parents), len(oldf.parents)) + for p := 0; p < minl; p++ { + if oldf.parents[p] != newf.parents[p] { + continue Loop + } + } + if len(oldf.parents) > len(newf.parents) { + if oldf.parents[len(newf.parents)] == newf.name { + conflicts = append(conflicts, i) + } + } else if len(oldf.parents) < len(newf.parents) { + if newf.parents[len(oldf.parents)] == oldf.name { + conflicts = append(conflicts, i) + } + } else { + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + } + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, if any of them is at the same depth level, it's an error. + for _, i := range conflicts { + oldf := &tinfo.fields[i] + if len(oldf.idx) == len(newf.idx) { + f1 := typ.FieldByIndex(oldf.idx) + f2 := typ.FieldByIndex(newf.idx) + return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// A TagPathError represents an error in the unmarshalling process +// caused by the use of field tags with conflicting paths. +type TagPathError struct { + Struct reflect.Type + Field1, Tag1 string + Field2, Tag2 string +} + +func (e *TagPathError) Error() string { + return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} diff --git a/vendor/golang.org/x/net/webdav/internal/xml/xml.go b/vendor/golang.org/x/net/webdav/internal/xml/xml.go new file mode 100644 index 0000000..5b79cbe --- /dev/null +++ b/vendor/golang.org/x/net/webdav/internal/xml/xml.go @@ -0,0 +1,1998 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xml implements a simple XML 1.0 parser that +// understands XML name spaces. +package xml + +// References: +// Annotated XML spec: http://www.xml.com/axml/testaxml.htm +// XML name spaces: http://www.w3.org/TR/REC-xml-names/ + +// TODO(rsc): +// Test error handling. + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A SyntaxError represents a syntax error in the XML input stream. +type SyntaxError struct { + Msg string + Line int +} + +func (e *SyntaxError) Error() string { + return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg +} + +// A Name represents an XML name (Local) annotated with a name space +// identifier (Space). In tokens returned by Decoder.Token, the Space +// identifier is given as a canonical URL, not the short prefix used in +// the document being parsed. +// +// As a special case, XML namespace declarations will use the literal +// string "xmlns" for the Space field instead of the fully resolved URL. +// See Encoder.EncodeToken for more information on namespace encoding +// behaviour. +type Name struct { + Space, Local string +} + +// isNamespace reports whether the name is a namespace-defining name. +func (name Name) isNamespace() bool { + return name.Local == "xmlns" || name.Space == "xmlns" +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +// A Token is an interface holding one of the token types: +// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. +type Token interface{} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// setDefaultNamespace sets the namespace of the element +// as the default for all elements contained within it. +func (e *StartElement) setDefaultNamespace() { + if e.Name.Space == "" { + // If there's no namespace on the element, don't + // set the default. Strictly speaking this might be wrong, as + // we can't tell if the element had no namespace set + // or was just using the default namespace. + return + } + // Don't add a default name space if there's already one set. + for _, attr := range e.Attr { + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + return + } + } + e.Attr = append(e.Attr, Attr{ + Name: Name{ + Local: "xmlns", + }, + Value: e.Name.Space, + }) +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// A CharData represents XML character data (raw text), +// in which XML escape sequences have been replaced by +// the characters they represent. +type CharData []byte + +func makeCopy(b []byte) []byte { + b1 := make([]byte, len(b)) + copy(b1, b) + return b1 +} + +func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } + +// A Comment represents an XML comment of the form . +// The bytes do not include the comment markers. +type Comment []byte + +func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } + +// A ProcInst represents an XML processing instruction of the form +type ProcInst struct { + Target string + Inst []byte +} + +func (p ProcInst) Copy() ProcInst { + p.Inst = makeCopy(p.Inst) + return p +} + +// A Directive represents an XML directive of the form . +// The bytes do not include the markers. +type Directive []byte + +func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } + +// CopyToken returns a copy of a Token. +func CopyToken(t Token) Token { + switch v := t.(type) { + case CharData: + return v.Copy() + case Comment: + return v.Copy() + case Directive: + return v.Copy() + case ProcInst: + return v.Copy() + case StartElement: + return v.Copy() + } + return t +} + +// A Decoder represents an XML parser reading a particular input stream. +// The parser assumes that its input is encoded in UTF-8. +type Decoder struct { + // Strict defaults to true, enforcing the requirements + // of the XML specification. + // If set to false, the parser allows input containing common + // mistakes: + // * If an element is missing an end tag, the parser invents + // end tags as necessary to keep the return values from Token + // properly balanced. + // * In attribute values and character data, unknown or malformed + // character entities (sequences beginning with &) are left alone. + // + // Setting: + // + // d.Strict = false; + // d.AutoClose = HTMLAutoClose; + // d.Entity = HTMLEntity + // + // creates a parser that can handle typical HTML. + // + // Strict mode does not enforce the requirements of the XML name spaces TR. + // In particular it does not reject name space tags using undefined prefixes. + // Such tags are recorded with the unknown prefix as the name space URL. + Strict bool + + // When Strict == false, AutoClose indicates a set of elements to + // consider closed immediately after they are opened, regardless + // of whether an end element is present. + AutoClose []string + + // Entity can be used to map non-standard entity names to string replacements. + // The parser behaves as if these standard mappings are present in the map, + // regardless of the actual map content: + // + // "lt": "<", + // "gt": ">", + // "amp": "&", + // "apos": "'", + // "quot": `"`, + Entity map[string]string + + // CharsetReader, if non-nil, defines a function to generate + // charset-conversion readers, converting from the provided + // non-UTF-8 charset into UTF-8. If CharsetReader is nil or + // returns an error, parsing stops with an error. One of the + // the CharsetReader's result values must be non-nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // DefaultSpace sets the default name space used for unadorned tags, + // as if the entire XML stream were wrapped in an element containing + // the attribute xmlns="DefaultSpace". + DefaultSpace string + + r io.ByteReader + buf bytes.Buffer + saved *bytes.Buffer + stk *stack + free *stack + needClose bool + toClose Name + nextToken Token + nextByte int + ns map[string]string + err error + line int + offset int64 + unmarshalDepth int +} + +// NewDecoder creates a new XML parser reading from r. +// If r does not implement io.ByteReader, NewDecoder will +// do its own buffering. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + ns: make(map[string]string), + nextByte: -1, + line: 1, + Strict: true, + } + d.switchToReader(r) + return d +} + +// Token returns the next XML token in the input stream. +// At the end of the input stream, Token returns nil, io.EOF. +// +// Slices of bytes in the returned token data refer to the +// parser's internal buffer and remain valid only until the next +// call to Token. To acquire a copy of the bytes, call CopyToken +// or the token's Copy method. +// +// Token expands self-closing elements such as
    +// into separate start and end elements returned by successive calls. +// +// Token guarantees that the StartElement and EndElement +// tokens it returns are properly nested and matched: +// if Token encounters an unexpected end element, +// it will return an error. +// +// Token implements XML name spaces as described by +// http://www.w3.org/TR/REC-xml-names/. Each of the +// Name structures contained in the Token has the Space +// set to the URL identifying its name space when known. +// If Token encounters an unrecognized name space prefix, +// it uses the prefix as the Space rather than report an error. +func (d *Decoder) Token() (t Token, err error) { + if d.stk != nil && d.stk.kind == stkEOF { + err = io.EOF + return + } + if d.nextToken != nil { + t = d.nextToken + d.nextToken = nil + } else if t, err = d.rawToken(); err != nil { + return + } + + if !d.Strict { + if t1, ok := d.autoClose(t); ok { + d.nextToken = t + t = t1 + } + } + switch t1 := t.(type) { + case StartElement: + // In XML name spaces, the translations listed in the + // attributes apply to the element name and + // to the other attribute names, so process + // the translations first. + for _, a := range t1.Attr { + if a.Name.Space == "xmlns" { + v, ok := d.ns[a.Name.Local] + d.pushNs(a.Name.Local, v, ok) + d.ns[a.Name.Local] = a.Value + } + if a.Name.Space == "" && a.Name.Local == "xmlns" { + // Default space for untagged names + v, ok := d.ns[""] + d.pushNs("", v, ok) + d.ns[""] = a.Value + } + } + + d.translate(&t1.Name, true) + for i := range t1.Attr { + d.translate(&t1.Attr[i].Name, false) + } + d.pushElement(t1.Name) + t = t1 + + case EndElement: + d.translate(&t1.Name, true) + if !d.popElement(&t1) { + return nil, d.err + } + t = t1 + } + return +} + +const xmlURL = "http://www.w3.org/XML/1998/namespace" + +// Apply name space translation to name n. +// The default name space (for Space=="") +// applies only to element names, not to attribute names. +func (d *Decoder) translate(n *Name, isElementName bool) { + switch { + case n.Space == "xmlns": + return + case n.Space == "" && !isElementName: + return + case n.Space == "xml": + n.Space = xmlURL + case n.Space == "" && n.Local == "xmlns": + return + } + if v, ok := d.ns[n.Space]; ok { + n.Space = v + } else if n.Space == "" { + n.Space = d.DefaultSpace + } +} + +func (d *Decoder) switchToReader(r io.Reader) { + // Get efficient byte at a time reader. + // Assume that if reader has its own + // ReadByte, it's efficient enough. + // Otherwise, use bufio. + if rb, ok := r.(io.ByteReader); ok { + d.r = rb + } else { + d.r = bufio.NewReader(r) + } +} + +// Parsing state - stack holds old name space translations +// and the current set of open elements. The translations to pop when +// ending a given tag are *below* it on the stack, which is +// more work but forced on us by XML. +type stack struct { + next *stack + kind int + name Name + ok bool +} + +const ( + stkStart = iota + stkNs + stkEOF +) + +func (d *Decoder) push(kind int) *stack { + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.next = d.stk + s.kind = kind + d.stk = s + return s +} + +func (d *Decoder) pop() *stack { + s := d.stk + if s != nil { + d.stk = s.next + s.next = d.free + d.free = s + } + return s +} + +// Record that after the current element is finished +// (that element is already pushed on the stack) +// Token should return EOF until popEOF is called. +func (d *Decoder) pushEOF() { + // Walk down stack to find Start. + // It might not be the top, because there might be stkNs + // entries above it. + start := d.stk + for start.kind != stkStart { + start = start.next + } + // The stkNs entries below a start are associated with that + // element too; skip over them. + for start.next != nil && start.next.kind == stkNs { + start = start.next + } + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.kind = stkEOF + s.next = start.next + start.next = s +} + +// Undo a pushEOF. +// The element must have been finished, so the EOF should be at the top of the stack. +func (d *Decoder) popEOF() bool { + if d.stk == nil || d.stk.kind != stkEOF { + return false + } + d.pop() + return true +} + +// Record that we are starting an element with the given name. +func (d *Decoder) pushElement(name Name) { + s := d.push(stkStart) + s.name = name +} + +// Record that we are changing the value of ns[local]. +// The old value is url, ok. +func (d *Decoder) pushNs(local string, url string, ok bool) { + s := d.push(stkNs) + s.name.Local = local + s.name.Space = url + s.ok = ok +} + +// Creates a SyntaxError with the current line number. +func (d *Decoder) syntaxError(msg string) error { + return &SyntaxError{Msg: msg, Line: d.line} +} + +// Record that we are ending an element with the given name. +// The name must match the record at the top of the stack, +// which must be a pushElement record. +// After popping the element, apply any undo records from +// the stack to restore the name translations that existed +// before we saw this element. +func (d *Decoder) popElement(t *EndElement) bool { + s := d.pop() + name := t.Name + switch { + case s == nil || s.kind != stkStart: + d.err = d.syntaxError("unexpected end element ") + return false + case s.name.Local != name.Local: + if !d.Strict { + d.needClose = true + d.toClose = t.Name + t.Name = s.name + return true + } + d.err = d.syntaxError("element <" + s.name.Local + "> closed by ") + return false + case s.name.Space != name.Space: + d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + + "closed by in space " + name.Space) + return false + } + + // Pop stack until a Start or EOF is on the top, undoing the + // translations that were associated with the element we just closed. + for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { + s := d.pop() + if s.ok { + d.ns[s.name.Local] = s.name.Space + } else { + delete(d.ns, s.name.Local) + } + } + + return true +} + +// If the top element on the stack is autoclosing and +// t is not the end tag, invent the end tag. +func (d *Decoder) autoClose(t Token) (Token, bool) { + if d.stk == nil || d.stk.kind != stkStart { + return nil, false + } + name := strings.ToLower(d.stk.name.Local) + for _, s := range d.AutoClose { + if strings.ToLower(s) == name { + // This one should be auto closed if t doesn't close it. + et, ok := t.(EndElement) + if !ok || et.Name.Local != name { + return EndElement{d.stk.name}, true + } + break + } + } + return nil, false +} + +var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") + +// RawToken is like Token but does not verify that +// start and end elements match and does not translate +// name space prefixes to their corresponding URLs. +func (d *Decoder) RawToken() (Token, error) { + if d.unmarshalDepth > 0 { + return nil, errRawToken + } + return d.rawToken() +} + +func (d *Decoder) rawToken() (Token, error) { + if d.err != nil { + return nil, d.err + } + if d.needClose { + // The last element we read was self-closing and + // we returned just the StartElement half. + // Return the EndElement half now. + d.needClose = false + return EndElement{d.toClose}, nil + } + + b, ok := d.getc() + if !ok { + return nil, d.err + } + + if b != '<' { + // Text section. + d.ungetc(b) + data := d.text(-1, false) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '/': + // ' { + d.err = d.syntaxError("invalid characters between ") + return nil, d.err + } + return EndElement{name}, nil + + case '?': + // ' { + break + } + b0 = b + } + data := d.buf.Bytes() + data = data[0 : len(data)-2] // chop ?> + + if target == "xml" { + content := string(data) + ver := procInst("version", content) + if ver != "" && ver != "1.0" { + d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) + return nil, d.err + } + enc := procInst("encoding", content) + if enc != "" && enc != "utf-8" && enc != "UTF-8" { + if d.CharsetReader == nil { + d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) + return nil, d.err + } + newr, err := d.CharsetReader(enc, d.r.(io.Reader)) + if err != nil { + d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) + return nil, d.err + } + if newr == nil { + panic("CharsetReader returned a nil Reader for charset " + enc) + } + d.switchToReader(newr) + } + } + return ProcInst{target, data}, nil + + case '!': + // ' { + break + } + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-3] // chop --> + return Comment(data), nil + + case '[': // . + data := d.text(-1, true) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + // Probably a directive: , , etc. + // We don't care, but accumulate for caller. Quoted angle + // brackets do not count for nesting. + d.buf.Reset() + d.buf.WriteByte(b) + inquote := uint8(0) + depth := 0 + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if inquote == 0 && b == '>' && depth == 0 { + break + } + HandleB: + d.buf.WriteByte(b) + switch { + case b == inquote: + inquote = 0 + + case inquote != 0: + // in quotes, no special action + + case b == '\'' || b == '"': + inquote = b + + case b == '>' && inquote == 0: + depth-- + + case b == '<' && inquote == 0: + // Look for ` + +var testEntity = map[string]string{"何": "What", "is-it": "is it?"} + +var rawTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"", "hello"}}, + CharData("\n "), + StartElement{Name{"", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"", "query"}}, + CharData("\n "), + StartElement{Name{"", "goodbye"}, []Attr{}}, + EndElement{Name{"", "goodbye"}}, + CharData("\n "), + StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"", "inner"}, []Attr{}}, + EndElement{Name{"", "inner"}}, + CharData("\n "), + EndElement{Name{"", "outer"}}, + CharData("\n "), + StartElement{Name{"tag", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"tag", "name"}}, + CharData("\n"), + EndElement{Name{"", "body"}}, + Comment(" missing final newline "), +} + +var cookedTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"ns2", "hello"}}, + CharData("\n "), + StartElement{Name{"ns2", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"ns2", "query"}}, + CharData("\n "), + StartElement{Name{"ns2", "goodbye"}, []Attr{}}, + EndElement{Name{"ns2", "goodbye"}}, + CharData("\n "), + StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"ns2", "inner"}, []Attr{}}, + EndElement{Name{"ns2", "inner"}}, + CharData("\n "), + EndElement{Name{"ns2", "outer"}}, + CharData("\n "), + StartElement{Name{"ns3", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"ns3", "name"}}, + CharData("\n"), + EndElement{Name{"ns2", "body"}}, + Comment(" missing final newline "), +} + +const testInputAltEncoding = ` + +VALUE` + +var rawTokensAltEncoding = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("value"), + EndElement{Name{"", "tag"}}, +} + +var xmlInput = []string{ + // unexpected EOF cases + "<", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "", + "", + " c;", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "cdata]]>", +} + +func TestRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + testRawToken(t, d, testInput, rawTokens) +} + +const nonStrictInput = ` +non&entity +&unknown;entity +{ +&#zzz; +&なまえ3; +<-gt; +&; +&0a; +` + +var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"} + +var nonStrictTokens = []Token{ + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("non&entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&unknown;entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("{"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&#zzz;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&なまえ3;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("<-gt;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&0a;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), +} + +func TestNonStrictRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(nonStrictInput)) + d.Strict = false + testRawToken(t, d, nonStrictInput, nonStrictTokens) +} + +type downCaser struct { + t *testing.T + r io.ByteReader +} + +func (d *downCaser) ReadByte() (c byte, err error) { + c, err = d.r.ReadByte() + if c >= 'A' && c <= 'Z' { + c += 'a' - 'A' + } + return +} + +func (d *downCaser) Read(p []byte) (int, error) { + d.t.Fatalf("unexpected Read call on downCaser reader") + panic("unreachable") +} + +func TestRawTokenAltEncoding(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) { + if charset != "x-testing-uppercase" { + t.Fatalf("unexpected charset %q", charset) + } + return &downCaser{t, input.(io.ByteReader)}, nil + } + testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding) +} + +func TestRawTokenAltEncodingNoConverter(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + token, err := d.RawToken() + if token == nil { + t.Fatalf("expected a token on first RawToken call") + } + if err != nil { + t.Fatal(err) + } + token, err = d.RawToken() + if token != nil { + t.Errorf("expected a nil token; got %#v", token) + } + if err == nil { + t.Fatalf("expected an error on second RawToken call") + } + const encoding = "x-testing-uppercase" + if !strings.Contains(err.Error(), encoding) { + t.Errorf("expected error to contain %q; got error: %v", + encoding, err) + } +} + +func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) { + lastEnd := int64(0) + for i, want := range rawTokens { + start := d.InputOffset() + have, err := d.RawToken() + end := d.InputOffset() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + var shave, swant string + if _, ok := have.(CharData); ok { + shave = fmt.Sprintf("CharData(%q)", have) + } else { + shave = fmt.Sprintf("%#v", have) + } + if _, ok := want.(CharData); ok { + swant = fmt.Sprintf("CharData(%q)", want) + } else { + swant = fmt.Sprintf("%#v", want) + } + t.Errorf("token %d = %s, want %s", i, shave, swant) + } + + // Check that InputOffset returned actual token. + switch { + case start < lastEnd: + t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have) + case start >= end: + // Special case: EndElement can be synthesized. + if start == end && end == lastEnd { + break + } + t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have) + case end > int64(len(raw)): + t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have) + default: + text := raw[start:end] + if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) { + t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have) + } + } + lastEnd = end + } +} + +// Ensure that directives (specifically !DOCTYPE) include the complete +// text of any nested directives, noting that < and > do not change +// nesting depth if they are in single or double quotes. + +var nestedDirectivesInput = ` +]> +">]> +]> +'>]> +]> +'>]> +]> +` + +var nestedDirectivesTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE [">]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestNestedDirectives(t *testing.T) { + d := NewDecoder(strings.NewReader(nestedDirectivesInput)) + + for i, want := range nestedDirectivesTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + + for i, want := range cookedTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestSyntax(t *testing.T) { + for i := range xmlInput { + d := NewDecoder(strings.NewReader(xmlInput[i])) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if _, ok := err.(*SyntaxError); !ok { + t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i]) + } + } +} + +type allScalars struct { + True1 bool + True2 bool + False1 bool + False2 bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint int + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + String string + PtrString *string +} + +var all = allScalars{ + True1: true, + True2: true, + False1: false, + False2: false, + Int: 1, + Int8: -2, + Int16: 3, + Int32: -4, + Int64: 5, + Uint: 6, + Uint8: 7, + Uint16: 8, + Uint32: 9, + Uint64: 10, + Uintptr: 11, + Float32: 13.0, + Float64: 14.0, + String: "15", + PtrString: &sixteen, +} + +var sixteen = "16" + +const testScalarsInput = ` + true + 1 + false + 0 + 1 + -2 + 3 + -4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12.0 + 13.0 + 14.0 + 15 + 16 +` + +func TestAllScalars(t *testing.T) { + var a allScalars + err := Unmarshal([]byte(testScalarsInput), &a) + + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, all) { + t.Errorf("have %+v want %+v", a, all) + } +} + +type item struct { + Field_a string +} + +func TestIssue569(t *testing.T) { + data := `abcd` + var i item + err := Unmarshal([]byte(data), &i) + + if err != nil || i.Field_a != "abcd" { + t.Fatal("Expecting abcd") + } +} + +func TestUnquotedAttrs(t *testing.T) { + data := "" + d := NewDecoder(strings.NewReader(data)) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != "tag" { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != "azAZ09:-_" { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != "attr" { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } +} + +func TestValuelessAttrs(t *testing.T) { + tests := [][3]string{ + {"

    ", "p", "nowrap"}, + {"

    ", "p", "nowrap"}, + {"", "input", "checked"}, + {"", "input", "checked"}, + } + for _, test := range tests { + d := NewDecoder(strings.NewReader(test[0])) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != test[1] { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != test[2] { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != test[2] { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } + } +} + +func TestCopyTokenCharData(t *testing.T) { + data := []byte("same data") + var tok1 Token = CharData(data) + tok2 := CopyToken(tok1) + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) != CharData") + } + data[1] = 'o' + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestCopyTokenStartElement(t *testing.T) { + elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}} + var tok1 Token = elt + tok2 := CopyToken(tok1) + if tok1.(StartElement).Attr[0].Value != "en" { + t.Error("CopyToken overwrote Attr[0]") + } + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(StartElement) != StartElement") + } + tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"} + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestSyntaxErrorLineNum(t *testing.T) { + testInput := "

    Foo

    \n\n

    Bar\n" + d := NewDecoder(strings.NewReader(testInput)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Error("Expected SyntaxError.") + } + if synerr.Line != 3 { + t.Error("SyntaxError didn't have correct line number.") + } +} + +func TestTrailingRawToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.RawToken(); err == nil; _, err = d.RawToken() { + } + if err != io.EOF { + t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err) + } +} + +func TestTrailingToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +func TestEntityInsideCDATA(t *testing.T) { + input := `` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +var characterTests = []struct { + in string + err string +}{ + {"\x12", "illegal character code U+0012"}, + {"\x0b", "illegal character code U+000B"}, + {"\xef\xbf\xbe", "illegal character code U+FFFE"}, + {"\r\n\x07", "illegal character code U+0007"}, + {"what's up", "expected attribute name in element"}, + {"&abc\x01;", "invalid character entity &abc (no semicolon)"}, + {"&\x01;", "invalid character entity & (no semicolon)"}, + {"&\xef\xbf\xbe;", "invalid character entity &\uFFFE;"}, + {"&hello;", "invalid character entity &hello;"}, +} + +func TestDisallowedCharacters(t *testing.T) { + + for i, tt := range characterTests { + d := NewDecoder(strings.NewReader(tt.in)) + var err error + + for err == nil { + _, err = d.Token() + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err) + } + if synerr.Msg != tt.err { + t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg) + } + } +} + +type procInstEncodingTest struct { + expect, got string +} + +var procInstTests = []struct { + input string + expect [2]string +}{ + {`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}}, + {`encoding="FOO" `, [2]string{"", "FOO"}}, +} + +func TestProcInstEncoding(t *testing.T) { + for _, test := range procInstTests { + if got := procInst("version", test.input); got != test.expect[0] { + t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0]) + } + if got := procInst("encoding", test.input); got != test.expect[1] { + t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1]) + } + } +} + +// Ensure that directives with comments include the complete +// text of any nested directives. + +var directivesWithCommentsInput = ` +]> +]> + --> --> []> +` + +var directivesWithCommentsTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestDirectivesWithComments(t *testing.T) { + d := NewDecoder(strings.NewReader(directivesWithCommentsInput)) + + for i, want := range directivesWithCommentsTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +// Writer whose Write method always returns an error. +type errWriter struct{} + +func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") } + +func TestEscapeTextIOErrors(t *testing.T) { + expectErr := "unwritable" + err := EscapeText(errWriter{}, []byte{'A'}) + + if err == nil || err.Error() != expectErr { + t.Errorf("have %v, want %v", err, expectErr) + } +} + +func TestEscapeTextInvalidChar(t *testing.T) { + input := []byte("A \x00 terminated string.") + expected := "A \uFFFD terminated string." + + buff := new(bytes.Buffer) + if err := EscapeText(buff, input); err != nil { + t.Fatalf("have %v, want nil", err) + } + text := buff.String() + + if text != expected { + t.Errorf("have %v, want %v", text, expected) + } +} + +func TestIssue5880(t *testing.T) { + type T []byte + data, err := Marshal(T{192, 168, 0, 1}) + if err != nil { + t.Errorf("Marshal error: %v", err) + } + if !utf8.Valid(data) { + t.Errorf("Marshal generated invalid UTF-8: %x", data) + } +} diff --git a/vendor/golang.org/x/net/webdav/litmus_test_server.go b/vendor/golang.org/x/net/webdav/litmus_test_server.go new file mode 100644 index 0000000..514db5d --- /dev/null +++ b/vendor/golang.org/x/net/webdav/litmus_test_server.go @@ -0,0 +1,94 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program is a server for the WebDAV 'litmus' compliance test at +http://www.webdav.org/neon/litmus/ +To run the test: + +go run litmus_test_server.go + +and separately, from the downloaded litmus-xxx directory: + +make URL=http://localhost:9999/ check +*/ +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/webdav" +) + +var port = flag.Int("port", 9999, "server port") + +func main() { + flag.Parse() + log.SetFlags(0) + h := &webdav.Handler{ + FileSystem: webdav.NewMemFS(), + LockSystem: webdav.NewMemLS(), + Logger: func(r *http.Request, err error) { + litmus := r.Header.Get("X-Litmus") + if len(litmus) > 19 { + litmus = litmus[:16] + "..." + } + + switch r.Method { + case "COPY", "MOVE": + dst := "" + if u, err := url.Parse(r.Header.Get("Destination")); err == nil { + dst = u.Path + } + o := r.Header.Get("Overwrite") + log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err) + default: + log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err) + } + }, + } + + // The next line would normally be: + // http.Handle("/", h) + // but we wrap that HTTP handler h to cater for a special case. + // + // The propfind_invalid2 litmus test case expects an empty namespace prefix + // declaration to be an error. The FAQ in the webdav litmus test says: + // + // "What does the "propfind_invalid2" test check for?... + // + // If a request was sent with an XML body which included an empty namespace + // prefix declaration (xmlns:ns1=""), then the server must reject that with + // a "400 Bad Request" response, as it is invalid according to the XML + // Namespace specification." + // + // On the other hand, the Go standard library's encoding/xml package + // accepts an empty xmlns namespace, as per the discussion at + // https://github.com/golang/go/issues/8068 + // + // Empty namespaces seem disallowed in the second (2006) edition of the XML + // standard, but allowed in a later edition. The grammar differs between + // http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and + // http://www.w3.org/TR/REC-xml-names/#dt-prefix + // + // Thus, we assume that the propfind_invalid2 test is obsolete, and + // hard-code the 400 Bad Request response that the test expects. + http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" { + http.Error(w, "400 Bad Request", http.StatusBadRequest) + return + } + h.ServeHTTP(w, r) + })) + + addr := fmt.Sprintf(":%d", *port) + log.Printf("Serving %v", addr) + log.Fatal(http.ListenAndServe(addr, nil)) +} diff --git a/vendor/golang.org/x/net/webdav/lock.go b/vendor/golang.org/x/net/webdav/lock.go new file mode 100644 index 0000000..344ac5c --- /dev/null +++ b/vendor/golang.org/x/net/webdav/lock.go @@ -0,0 +1,445 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "container/heap" + "errors" + "strconv" + "strings" + "sync" + "time" +) + +var ( + // ErrConfirmationFailed is returned by a LockSystem's Confirm method. + ErrConfirmationFailed = errors.New("webdav: confirmation failed") + // ErrForbidden is returned by a LockSystem's Unlock method. + ErrForbidden = errors.New("webdav: forbidden") + // ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods. + ErrLocked = errors.New("webdav: locked") + // ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods. + ErrNoSuchLock = errors.New("webdav: no such lock") +) + +// Condition can match a WebDAV resource, based on a token or ETag. +// Exactly one of Token and ETag should be non-empty. +type Condition struct { + Not bool + Token string + ETag string +} + +// LockSystem manages access to a collection of named resources. The elements +// in a lock name are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +type LockSystem interface { + // Confirm confirms that the caller can claim all of the locks specified by + // the given conditions, and that holding the union of all of those locks + // gives exclusive access to all of the named resources. Up to two resources + // can be named. Empty names are ignored. + // + // Exactly one of release and err will be non-nil. If release is non-nil, + // all of the requested locks are held until release is called. Calling + // release does not unlock the lock, in the WebDAV UNLOCK sense, but once + // Confirm has confirmed that a lock claim is valid, that lock cannot be + // Confirmed again until it has been released. + // + // If Confirm returns ErrConfirmationFailed then the Handler will continue + // to try any other set of locks presented (a WebDAV HTTP request can + // present more than one set of locks). If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error) + + // Create creates a lock with the given depth, duration, owner and root + // (name). The depth will either be negative (meaning infinite) or zero. + // + // If Create returns ErrLocked then the Handler will write a "423 Locked" + // HTTP status. If it returns any other non-nil error, the Handler will + // write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + // + // The token returned identifies the created lock. It should be an absolute + // URI as defined by RFC 3986, Section 4.3. In particular, it should not + // contain whitespace. + Create(now time.Time, details LockDetails) (token string, err error) + + // Refresh refreshes the lock with the given token. + // + // If Refresh returns ErrLocked then the Handler will write a "423 Locked" + // HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write + // a "412 Precondition Failed" HTTP Status. If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) + + // Unlock unlocks the lock with the given token. + // + // If Unlock returns ErrForbidden then the Handler will write a "403 + // Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler + // will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock + // then the Handler will write a "409 Conflict" HTTP Status. If it returns + // any other non-nil error, the Handler will write a "500 Internal Server + // Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for + // when to use each error. + Unlock(now time.Time, token string) error +} + +// LockDetails are a lock's metadata. +type LockDetails struct { + // Root is the root resource name being locked. For a zero-depth lock, the + // root is the only resource being locked. + Root string + // Duration is the lock timeout. A negative duration means infinite. + Duration time.Duration + // OwnerXML is the verbatim XML given in a LOCK HTTP request. + // + // TODO: does the "verbatim" nature play well with XML namespaces? + // Does the OwnerXML field need to have more structure? See + // https://codereview.appspot.com/175140043/#msg2 + OwnerXML string + // ZeroDepth is whether the lock has zero depth. If it does not have zero + // depth, it has infinite depth. + ZeroDepth bool +} + +// NewMemLS returns a new in-memory LockSystem. +func NewMemLS() LockSystem { + return &memLS{ + byName: make(map[string]*memLSNode), + byToken: make(map[string]*memLSNode), + gen: uint64(time.Now().Unix()), + } +} + +type memLS struct { + mu sync.Mutex + byName map[string]*memLSNode + byToken map[string]*memLSNode + gen uint64 + // byExpiry only contains those nodes whose LockDetails have a finite + // Duration and are yet to expire. + byExpiry byExpiry +} + +func (m *memLS) nextToken() string { + m.gen++ + return strconv.FormatUint(m.gen, 10) +} + +func (m *memLS) collectExpiredNodes(now time.Time) { + for len(m.byExpiry) > 0 { + if now.Before(m.byExpiry[0].expiry) { + break + } + m.remove(m.byExpiry[0]) + } +} + +func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + var n0, n1 *memLSNode + if name0 != "" { + if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil { + return nil, ErrConfirmationFailed + } + } + if name1 != "" { + if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil { + return nil, ErrConfirmationFailed + } + } + + // Don't hold the same node twice. + if n1 == n0 { + n1 = nil + } + + if n0 != nil { + m.hold(n0) + } + if n1 != nil { + m.hold(n1) + } + return func() { + m.mu.Lock() + defer m.mu.Unlock() + if n1 != nil { + m.unhold(n1) + } + if n0 != nil { + m.unhold(n0) + } + }, nil +} + +// lookup returns the node n that locks the named resource, provided that n +// matches at least one of the given conditions and that lock isn't held by +// another party. Otherwise, it returns nil. +// +// n may be a parent of the named resource, if n is an infinite depth lock. +func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) { + // TODO: support Condition.Not and Condition.ETag. + for _, c := range conditions { + n = m.byToken[c.Token] + if n == nil || n.held { + continue + } + if name == n.details.Root { + return n + } + if n.details.ZeroDepth { + continue + } + if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") { + return n + } + } + return nil +} + +func (m *memLS) hold(n *memLSNode) { + if n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = true + if n.details.Duration >= 0 && n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func (m *memLS) unhold(n *memLSNode) { + if !n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = false + if n.details.Duration >= 0 { + heap.Push(&m.byExpiry, n) + } +} + +func (m *memLS) Create(now time.Time, details LockDetails) (string, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + details.Root = slashClean(details.Root) + + if !m.canCreate(details.Root, details.ZeroDepth) { + return "", ErrLocked + } + n := m.create(details.Root) + n.token = m.nextToken() + m.byToken[n.token] = n + n.details = details + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.token, nil +} + +func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return LockDetails{}, ErrNoSuchLock + } + if n.held { + return LockDetails{}, ErrLocked + } + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } + n.details.Duration = duration + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.details, nil +} + +func (m *memLS) Unlock(now time.Time, token string) error { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return ErrNoSuchLock + } + if n.held { + return ErrLocked + } + m.remove(n) + return nil +} + +func (m *memLS) canCreate(name string, zeroDepth bool) bool { + return walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + return true + } + if first { + if n.token != "" { + // The target node is already locked. + return false + } + if !zeroDepth { + // The requested lock depth is infinite, and the fact that n exists + // (n != nil) means that a descendent of the target node is locked. + return false + } + } else if n.token != "" && !n.details.ZeroDepth { + // An ancestor of the target node is locked with infinite depth. + return false + } + return true + }) +} + +func (m *memLS) create(name string) (ret *memLSNode) { + walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + n = &memLSNode{ + details: LockDetails{ + Root: name0, + }, + byExpiryIndex: -1, + } + m.byName[name0] = n + } + n.refCount++ + if first { + ret = n + } + return true + }) + return ret +} + +func (m *memLS) remove(n *memLSNode) { + delete(m.byToken, n.token) + n.token = "" + walkToRoot(n.details.Root, func(name0 string, first bool) bool { + x := m.byName[name0] + x.refCount-- + if x.refCount == 0 { + delete(m.byName, name0) + } + return true + }) + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func walkToRoot(name string, f func(name0 string, first bool) bool) bool { + for first := true; ; first = false { + if !f(name, first) { + return false + } + if name == "/" { + break + } + name = name[:strings.LastIndex(name, "/")] + if name == "" { + name = "/" + } + } + return true +} + +type memLSNode struct { + // details are the lock metadata. Even if this node's name is not explicitly locked, + // details.Root will still equal the node's name. + details LockDetails + // token is the unique identifier for this node's lock. An empty token means that + // this node is not explicitly locked. + token string + // refCount is the number of self-or-descendent nodes that are explicitly locked. + refCount int + // expiry is when this node's lock expires. + expiry time.Time + // byExpiryIndex is the index of this node in memLS.byExpiry. It is -1 + // if this node does not expire, or has expired. + byExpiryIndex int + // held is whether this node's lock is actively held by a Confirm call. + held bool +} + +type byExpiry []*memLSNode + +func (b *byExpiry) Len() int { + return len(*b) +} + +func (b *byExpiry) Less(i, j int) bool { + return (*b)[i].expiry.Before((*b)[j].expiry) +} + +func (b *byExpiry) Swap(i, j int) { + (*b)[i], (*b)[j] = (*b)[j], (*b)[i] + (*b)[i].byExpiryIndex = i + (*b)[j].byExpiryIndex = j +} + +func (b *byExpiry) Push(x interface{}) { + n := x.(*memLSNode) + n.byExpiryIndex = len(*b) + *b = append(*b, n) +} + +func (b *byExpiry) Pop() interface{} { + i := len(*b) - 1 + n := (*b)[i] + (*b)[i] = nil + n.byExpiryIndex = -1 + *b = (*b)[:i] + return n +} + +const infiniteTimeout = -1 + +// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is +// empty, an infiniteTimeout is returned. +func parseTimeout(s string) (time.Duration, error) { + if s == "" { + return infiniteTimeout, nil + } + if i := strings.IndexByte(s, ','); i >= 0 { + s = s[:i] + } + s = strings.TrimSpace(s) + if s == "Infinite" { + return infiniteTimeout, nil + } + const pre = "Second-" + if !strings.HasPrefix(s, pre) { + return 0, errInvalidTimeout + } + s = s[len(pre):] + if s == "" || s[0] < '0' || '9' < s[0] { + return 0, errInvalidTimeout + } + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || 1<<32-1 < n { + return 0, errInvalidTimeout + } + return time.Duration(n) * time.Second, nil +} diff --git a/vendor/golang.org/x/net/webdav/lock_test.go b/vendor/golang.org/x/net/webdav/lock_test.go new file mode 100644 index 0000000..5cf14cd --- /dev/null +++ b/vendor/golang.org/x/net/webdav/lock_test.go @@ -0,0 +1,731 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "fmt" + "math/rand" + "path" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" +) + +func TestWalkToRoot(t *testing.T) { + testCases := []struct { + name string + want []string + }{{ + "/a/b/c/d", + []string{ + "/a/b/c/d", + "/a/b/c", + "/a/b", + "/a", + "/", + }, + }, { + "/a", + []string{ + "/a", + "/", + }, + }, { + "/", + []string{ + "/", + }, + }} + + for _, tc := range testCases { + var got []string + if !walkToRoot(tc.name, func(name0 string, first bool) bool { + if first != (len(got) == 0) { + t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got)) + return false + } + got = append(got, name0) + return true + }) { + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want) + } + } +} + +var lockTestDurations = []time.Duration{ + infiniteTimeout, // infiniteTimeout means to never expire. + 0, // A zero duration means to expire immediately. + 100 * time.Hour, // A very large duration will not expire in these tests. +} + +// lockTestNames are the names of a set of mutually compatible locks. For each +// name fragment: +// - _ means no explicit lock. +// - i means an infinite-depth lock, +// - z means a zero-depth lock, +var lockTestNames = []string{ + "/_/_/_/_/z", + "/_/_/i", + "/_/z", + "/_/z/i", + "/_/z/z", + "/_/z/_/i", + "/_/z/_/z", + "/i", + "/z", + "/z/_/i", + "/z/_/z", +} + +func lockTestZeroDepth(name string) bool { + switch name[len(name)-1] { + case 'i': + return false + case 'z': + return true + } + panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name)) +} + +func TestMemLSCanCreate(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + for _, name := range lockTestNames { + _, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + } + + wantCanCreate := func(name string, zeroDepth bool) bool { + for _, n := range lockTestNames { + switch { + case n == name: + // An existing lock has the same name as the proposed lock. + return false + case strings.HasPrefix(n, name): + // An existing lock would be a child of the proposed lock, + // which conflicts if the proposed lock has infinite depth. + if !zeroDepth { + return false + } + case strings.HasPrefix(name, n): + // An existing lock would be an ancestor of the proposed lock, + // which conflicts if the ancestor has infinite depth. + if n[len(n)-1] == 'i' { + return false + } + } + } + return true + } + + var check func(int, string) + check = func(recursion int, name string) { + for _, zeroDepth := range []bool{false, true} { + got := m.canCreate(name, zeroDepth) + want := wantCanCreate(name, zeroDepth) + if got != want { + t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want) + } + } + if recursion == 6 { + return + } + if name != "/" { + name += "/" + } + for _, c := range "_iz" { + check(recursion+1, name+string(c)) + } + } + check(0, "/") +} + +func TestMemLSLookup(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + badToken := m.nextToken() + t.Logf("badToken=%q", badToken) + + for _, name := range lockTestNames { + token, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token) + } + + baseNames := append([]string{"/a", "/b/c"}, lockTestNames...) + for _, baseName := range baseNames { + for _, suffix := range []string{"", "/0", "/1/2/3"} { + name := baseName + suffix + + goodToken := "" + base := m.byName[baseName] + if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) { + goodToken = base.token + } + + for _, token := range []string{badToken, goodToken} { + if token == "" { + continue + } + + got := m.lookup(name, Condition{Token: token}) + want := base + if token == badToken { + want = nil + } + if got != want { + t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p", + name, token, token == badToken, got, want) + } + } + } + } +} + +func TestMemLSConfirm(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + alice, err := m.Create(now, LockDetails{ + Root: "/alice", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + tweedle, err := m.Create(now, LockDetails{ + Root: "/tweedle", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + + // Test a mismatch between name and condition. + _, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (mismatch): inconsistent state: %v", err) + } + + // Test two names (that fall under the same lock) in the one Confirm call. + release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (twins): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (twins): inconsistent state: %v", err) + } + release() + if err := m.consistent(); err != nil { + t.Fatalf("release (twins): inconsistent state: %v", err) + } + + // Test the same two names in overlapping Confirm / release calls. + releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #0): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err) + } + + _, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err) + } + + releaseDee() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #2): inconsistent state: %v", err) + } + + releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #3): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err) + } + + // Test that you can't unlock a held lock. + err = m.Unlock(now, tweedle) + if err != ErrLocked { + t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err) + } + + releaseDum() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #5): inconsistent state: %v", err) + } + + err = m.Unlock(now, tweedle) + if err != nil { + t.Fatalf("Unlock (sequence #6): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err) + } +} + +func TestMemLSNonCanonicalRoot(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + token, err := m.Create(now, LockDetails{ + Root: "/foo/./bar//", + Duration: 1 * time.Second, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + if err := m.Unlock(now, token); err != nil { + t.Fatalf("Unlock: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock: inconsistent state: %v", err) + } +} + +func TestMemLSExpiry(t *testing.T) { + m := NewMemLS().(*memLS) + testCases := []string{ + "setNow 0", + "create /a.5", + "want /a.5", + "create /c.6", + "want /a.5 /c.6", + "create /a/b.7", + "want /a.5 /a/b.7 /c.6", + "setNow 4", + "want /a.5 /a/b.7 /c.6", + "setNow 5", + "want /a/b.7 /c.6", + "setNow 6", + "want /a/b.7", + "setNow 7", + "want ", + "setNow 8", + "want ", + "create /a.12", + "create /b.13", + "create /c.15", + "create /a/d.16", + "want /a.12 /a/d.16 /b.13 /c.15", + "refresh /a.14", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 12", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 13", + "want /a.14 /a/d.16 /c.15", + "setNow 14", + "want /a/d.16 /c.15", + "refresh /a/d.20", + "refresh /c.20", + "want /a/d.20 /c.20", + "setNow 20", + "want ", + } + + tokens := map[string]string{} + zTime := time.Unix(0, 0) + now := zTime + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create", "refresh": + parts := strings.Split(arg, ".") + if len(parts) != 2 { + t.Fatalf("test case #%d %q: invalid create", i, tc) + } + root := parts[0] + d, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now) + + switch op { + case "create": + token, err := m.Create(now, LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + }) + if err != nil { + t.Fatalf("test case #%d %q: Create: %v", i, tc, err) + } + tokens[root] = token + + case "refresh": + token := tokens[root] + if token == "" { + t.Fatalf("test case #%d %q: no token for %q", i, tc, root) + } + got, err := m.Refresh(now, token, dur) + if err != nil { + t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err) + } + want := LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + } + if got != want { + t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want) + } + } + + case "setNow": + d, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + now = time.Unix(0, 0).Add(time.Duration(d) * time.Second) + + case "want": + m.mu.Lock() + m.collectExpiredNodes(now) + got := make([]string, 0, len(m.byToken)) + for _, n := range m.byToken { + got = append(got, fmt.Sprintf("%s.%d", + n.details.Root, n.expiry.Sub(zTime)/time.Second)) + } + m.mu.Unlock() + sort.Strings(got) + want := []string{} + if arg != "" { + want = strings.Split(arg, " ") + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want) + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err) + } + } +} + +func TestMemLS(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + rng := rand.New(rand.NewSource(0)) + tokens := map[string]string{} + nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0 + const N = 2000 + + for i := 0; i < N; i++ { + name := lockTestNames[rng.Intn(len(lockTestNames))] + duration := lockTestDurations[rng.Intn(len(lockTestDurations))] + confirmed, unlocked := false, false + + // If the name was already locked, we randomly confirm/release, refresh + // or unlock it. Otherwise, we create a lock. + token := tokens[name] + if token != "" { + switch rng.Intn(3) { + case 0: + confirmed = true + nConfirm++ + release, err := m.Confirm(now, name, "", Condition{Token: token}) + if err != nil { + t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err) + } + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + release() + + case 1: + nRefresh++ + if _, err := m.Refresh(now, token, duration); err != nil { + t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err) + } + + case 2: + unlocked = true + nUnlock++ + if err := m.Unlock(now, token); err != nil { + t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err) + } + } + + } else { + nCreate++ + var err error + token, err = m.Create(now, LockDetails{ + Root: name, + Duration: duration, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("iteration #%d: Create %q: %v", i, name, err) + } + } + + if !confirmed { + if duration == 0 || unlocked { + // A zero-duration lock should expire immediately and is + // effectively equivalent to being unlocked. + tokens[name] = "" + } else { + tokens[name] = token + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + } + + if nConfirm < N/10 { + t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10) + } + if nCreate < N/10 { + t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10) + } + if nRefresh < N/10 { + t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10) + } + if nUnlock < N/10 { + t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10) + } +} + +func (m *memLS) consistent() error { + m.mu.Lock() + defer m.mu.Unlock() + + // If m.byName is non-empty, then it must contain an entry for the root "/", + // and its refCount should equal the number of locked nodes. + if len(m.byName) > 0 { + n := m.byName["/"] + if n == nil { + return fmt.Errorf(`non-empty m.byName does not contain the root "/"`) + } + if n.refCount != len(m.byToken) { + return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken)) + } + } + + for name, n := range m.byName { + // The map keys should be consistent with the node's copy of the key. + if n.details.Root != name { + return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name) + } + + // A name must be clean, and start with a "/". + if len(name) == 0 || name[0] != '/' { + return fmt.Errorf(`node name %q does not start with "/"`, name) + } + if name != path.Clean(name) { + return fmt.Errorf(`node name %q is not clean`, name) + } + + // A node's refCount should be positive. + if n.refCount <= 0 { + return fmt.Errorf("non-positive refCount for node at name %q", name) + } + + // A node's refCount should be the number of self-or-descendents that + // are locked (i.e. have a non-empty token). + var list []string + for name0, n0 := range m.byName { + // All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z', + // so strings.HasPrefix is equivalent to self-or-descendent name match. + // We don't have to worry about "/foo/bar" being a false positive match + // for "/foo/b". + if strings.HasPrefix(name0, name) && n0.token != "" { + list = append(list, name0) + } + } + if n.refCount != len(list) { + sort.Strings(list) + return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)", + name, n.refCount, list, len(list)) + } + + // A node n is in m.byToken if it has a non-empty token. + if n.token != "" { + if _, ok := m.byToken[n.token]; !ok { + return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token) + } + } + + // A node n is in m.byExpiry if it has a non-negative byExpiryIndex. + if n.byExpiryIndex >= 0 { + if n.byExpiryIndex >= len(m.byExpiry) { + return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry)) + } + if n != m.byExpiry[n.byExpiryIndex] { + return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex) + } + } + } + + for token, n := range m.byToken { + // The map keys should be consistent with the node's copy of the key. + if n.token != token { + return fmt.Errorf("node token %q != byToken map key %q", n.token, token) + } + + // Every node in m.byToken is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root) + } + } + + for i, n := range m.byExpiry { + // The slice indices should be consistent with the node's copy of the index. + if n.byExpiryIndex != i { + return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i) + } + + // Every node in m.byExpiry is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root) + } + + // No node in m.byExpiry should be held. + if n.held { + return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root) + } + } + return nil +} + +func TestParseTimeout(t *testing.T) { + testCases := []struct { + s string + want time.Duration + wantErr error + }{{ + "", + infiniteTimeout, + nil, + }, { + "Infinite", + infiniteTimeout, + nil, + }, { + "Infinitesimal", + 0, + errInvalidTimeout, + }, { + "infinite", + 0, + errInvalidTimeout, + }, { + "Second-0", + 0 * time.Second, + nil, + }, { + "Second-123", + 123 * time.Second, + nil, + }, { + " Second-456 ", + 456 * time.Second, + nil, + }, { + "Second-4100000000", + 4100000000 * time.Second, + nil, + }, { + "junk", + 0, + errInvalidTimeout, + }, { + "Second-", + 0, + errInvalidTimeout, + }, { + "Second--1", + 0, + errInvalidTimeout, + }, { + "Second--123", + 0, + errInvalidTimeout, + }, { + "Second-+123", + 0, + errInvalidTimeout, + }, { + "Second-0x123", + 0, + errInvalidTimeout, + }, { + "second-123", + 0, + errInvalidTimeout, + }, { + "Second-4294967295", + 4294967295 * time.Second, + nil, + }, { + // Section 10.7 says that "The timeout value for TimeType "Second" + // must not be greater than 2^32-1." + "Second-4294967296", + 0, + errInvalidTimeout, + }, { + // This test case comes from section 9.10.9 of the spec. It says, + // + // "In this request, the client has specified that it desires an + // infinite-length lock, if available, otherwise a timeout of 4.1 + // billion seconds, if available." + // + // The Go WebDAV package always supports infinite length locks, + // and ignores the fallback after the comma. + "Infinite, Second-4100000000", + infiniteTimeout, + nil, + }} + + for _, tc := range testCases { + got, gotErr := parseTimeout(tc.s) + if got != tc.want || gotErr != tc.wantErr { + t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr) + } + } +} diff --git a/vendor/golang.org/x/net/webdav/prop.go b/vendor/golang.org/x/net/webdav/prop.go new file mode 100644 index 0000000..e36a3b3 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/prop.go @@ -0,0 +1,418 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "mime" + "net/http" + "os" + "path/filepath" + "strconv" + + "golang.org/x/net/context" +) + +// Proppatch describes a property update instruction as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH +type Proppatch struct { + // Remove specifies whether this patch removes properties. If it does not + // remove them, it sets them. + Remove bool + // Props contains the properties to be set or removed. + Props []Property +} + +// Propstat describes a XML propstat element as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +type Propstat struct { + // Props contains the properties for which Status applies. + Props []Property + + // Status defines the HTTP status code of the properties in Prop. + // Allowed values include, but are not limited to the WebDAV status + // code extensions for HTTP/1.1. + // http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 + Status int + + // XMLError contains the XML representation of the optional error element. + // XML content within this field must not rely on any predefined + // namespace declarations or prefixes. If empty, the XML error element + // is omitted. + XMLError string + + // ResponseDescription contains the contents of the optional + // responsedescription field. If empty, the XML element is omitted. + ResponseDescription string +} + +// makePropstats returns a slice containing those of x and y whose Props slice +// is non-empty. If both are empty, it returns a slice containing an otherwise +// zero Propstat whose HTTP status code is 200 OK. +func makePropstats(x, y Propstat) []Propstat { + pstats := make([]Propstat, 0, 2) + if len(x.Props) != 0 { + pstats = append(pstats, x) + } + if len(y.Props) != 0 { + pstats = append(pstats, y) + } + if len(pstats) == 0 { + pstats = append(pstats, Propstat{ + Status: http.StatusOK, + }) + } + return pstats +} + +// DeadPropsHolder holds the dead properties of a resource. +// +// Dead properties are those properties that are explicitly defined. In +// comparison, live properties, such as DAV:getcontentlength, are implicitly +// defined by the underlying resource, and cannot be explicitly overridden or +// removed. See the Terminology section of +// http://www.webdav.org/specs/rfc4918.html#rfc.section.3 +// +// There is a whitelist of the names of live properties. This package handles +// all live properties, and will only pass non-whitelisted names to the Patch +// method of DeadPropsHolder implementations. +type DeadPropsHolder interface { + // DeadProps returns a copy of the dead properties held. + DeadProps() (map[xml.Name]Property, error) + + // Patch patches the dead properties held. + // + // Patching is atomic; either all or no patches succeed. It returns (nil, + // non-nil) if an internal server error occurred, otherwise the Propstats + // collectively contain one Property for each proposed patch Property. If + // all patches succeed, Patch returns a slice of length one and a Propstat + // element with a 200 OK HTTP status code. If none succeed, for reasons + // other than an internal server error, no Propstat has status 200 OK. + // + // For more details on when various HTTP status codes apply, see + // http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status + Patch([]Proppatch) ([]Propstat, error) +} + +// liveProps contains all supported, protected DAV: properties. +var liveProps = map[xml.Name]struct { + // findFn implements the propfind function of this property. If nil, + // it indicates a hidden property. + findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error) + // dir is true if the property applies to directories. + dir bool +}{ + {Space: "DAV:", Local: "resourcetype"}: { + findFn: findResourceType, + dir: true, + }, + {Space: "DAV:", Local: "displayname"}: { + findFn: findDisplayName, + dir: true, + }, + {Space: "DAV:", Local: "getcontentlength"}: { + findFn: findContentLength, + dir: false, + }, + {Space: "DAV:", Local: "getlastmodified"}: { + findFn: findLastModified, + // http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified + // suggests that getlastmodified should only apply to GETable + // resources, and this package does not support GET on directories. + // + // Nonetheless, some WebDAV clients expect child directories to be + // sortable by getlastmodified date, so this value is true, not false. + // See golang.org/issue/15334. + dir: true, + }, + {Space: "DAV:", Local: "creationdate"}: { + findFn: nil, + dir: false, + }, + {Space: "DAV:", Local: "getcontentlanguage"}: { + findFn: nil, + dir: false, + }, + {Space: "DAV:", Local: "getcontenttype"}: { + findFn: findContentType, + dir: false, + }, + {Space: "DAV:", Local: "getetag"}: { + findFn: findETag, + // findETag implements ETag as the concatenated hex values of a file's + // modification time and size. This is not a reliable synchronization + // mechanism for directories, so we do not advertise getetag for DAV + // collections. + dir: false, + }, + + // TODO: The lockdiscovery property requires LockSystem to list the + // active locks on a resource. + {Space: "DAV:", Local: "lockdiscovery"}: {}, + {Space: "DAV:", Local: "supportedlock"}: { + findFn: findSupportedLock, + dir: true, + }, +} + +// TODO(nigeltao) merge props and allprop? + +// Props returns the status of the properties named pnames for resource name. +// +// Each Propstat has a unique status and each property name will only be part +// of one Propstat element. +func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pstatOK := Propstat{Status: http.StatusOK} + pstatNotFound := Propstat{Status: http.StatusNotFound} + for _, pn := range pnames { + // If this file has dead properties, check if they contain pn. + if dp, ok := deadProps[pn]; ok { + pstatOK.Props = append(pstatOK.Props, dp) + continue + } + // Otherwise, it must either be a live property or we don't know it. + if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) { + innerXML, err := prop.findFn(ctx, fs, ls, name, fi) + if err != nil { + return nil, err + } + pstatOK.Props = append(pstatOK.Props, Property{ + XMLName: pn, + InnerXML: []byte(innerXML), + }) + } else { + pstatNotFound.Props = append(pstatNotFound.Props, Property{ + XMLName: pn, + }) + } + } + return makePropstats(pstatOK, pstatNotFound), nil +} + +// Propnames returns the property names defined for resource name. +func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps)) + for pn, prop := range liveProps { + if prop.findFn != nil && (prop.dir || !isDir) { + pnames = append(pnames, pn) + } + } + for pn := range deadProps { + pnames = append(pnames, pn) + } + return pnames, nil +} + +// Allprop returns the properties defined for resource name and the properties +// named in include. +// +// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined +// within the RFC plus dead properties. Other live properties should only be +// returned if they are named in 'include'. +// +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND +func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) { + pnames, err := propnames(ctx, fs, ls, name) + if err != nil { + return nil, err + } + // Add names from include if they are not already covered in pnames. + nameset := make(map[xml.Name]bool) + for _, pn := range pnames { + nameset[pn] = true + } + for _, pn := range include { + if !nameset[pn] { + pnames = append(pnames, pn) + } + } + return props(ctx, fs, ls, name, pnames) +} + +// Patch patches the properties of resource name. The return values are +// constrained in the same manner as DeadPropsHolder.Patch. +func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) { + conflict := false +loop: + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + conflict = true + break loop + } + } + } + if conflict { + pstatForbidden := Propstat{ + Status: http.StatusForbidden, + XMLError: ``, + } + pstatFailedDep := Propstat{ + Status: StatusFailedDependency, + } + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName}) + } else { + pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName}) + } + } + } + return makePropstats(pstatForbidden, pstatFailedDep), nil + } + + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0) + if err != nil { + return nil, err + } + defer f.Close() + if dph, ok := f.(DeadPropsHolder); ok { + ret, err := dph.Patch(patches) + if err != nil { + return nil, err + } + // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that + // "The contents of the prop XML element must only list the names of + // properties to which the result in the status element applies." + for _, pstat := range ret { + for i, p := range pstat.Props { + pstat.Props[i] = Property{XMLName: p.XMLName} + } + } + return ret, nil + } + // The file doesn't implement the optional DeadPropsHolder interface, so + // all patches are forbidden. + pstat := Propstat{Status: http.StatusForbidden} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + } + } + return []Propstat{pstat}, nil +} + +func escapeXML(s string) string { + for i := 0; i < len(s); i++ { + // As an optimization, if s contains only ASCII letters, digits or a + // few special characters, the escaped value is s itself and we don't + // need to allocate a buffer and convert between string and []byte. + switch c := s[i]; { + case c == ' ' || c == '_' || + ('+' <= c && c <= '9') || // Digits as well as + , - . and / + ('A' <= c && c <= 'Z') || + ('a' <= c && c <= 'z'): + continue + } + // Otherwise, go through the full escaping process. + var buf bytes.Buffer + xml.EscapeText(&buf, []byte(s)) + return buf.String() + } + return s +} + +func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if fi.IsDir() { + return ``, nil + } + return "", nil +} + +func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if slashClean(name) == "/" { + // Hide the real name of a possibly prefixed root directory. + return "", nil + } + return escapeXML(fi.Name()), nil +} + +func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return strconv.FormatInt(fi.Size(), 10), nil +} + +func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return fi.ModTime().Format(http.TimeFormat), nil +} + +func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return "", err + } + defer f.Close() + // This implementation is based on serveContent's code in the standard net/http package. + ctype := mime.TypeByExtension(filepath.Ext(name)) + if ctype != "" { + return ctype, nil + } + // Read a chunk to decide between utf-8 text and binary. + var buf [512]byte + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return "", err + } + ctype = http.DetectContentType(buf[:n]) + // Rewind file. + _, err = f.Seek(0, os.SEEK_SET) + return ctype, err +} + +func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + // The Apache http 2.4 web server by default concatenates the + // modification time and size of a file. We replicate the heuristic + // with nanosecond granularity. + return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil +} + +func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return `` + + `` + + `` + + `` + + ``, nil +} diff --git a/vendor/golang.org/x/net/webdav/prop_test.go b/vendor/golang.org/x/net/webdav/prop_test.go new file mode 100644 index 0000000..57d0e82 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/prop_test.go @@ -0,0 +1,613 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "encoding/xml" + "fmt" + "net/http" + "os" + "reflect" + "sort" + "testing" + + "golang.org/x/net/context" +) + +func TestMemPS(t *testing.T) { + ctx := context.Background() + // calcProps calculates the getlastmodified and getetag DAV: property + // values in pstats for resource name in file-system fs. + calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error { + fi, err := fs.Stat(ctx, name) + if err != nil { + return err + } + for _, pst := range pstats { + for i, p := range pst.Props { + switch p.XMLName { + case xml.Name{Space: "DAV:", Local: "getlastmodified"}: + p.InnerXML = []byte(fi.ModTime().Format(http.TimeFormat)) + pst.Props[i] = p + case xml.Name{Space: "DAV:", Local: "getetag"}: + if fi.IsDir() { + continue + } + etag, err := findETag(ctx, fs, ls, name, fi) + if err != nil { + return err + } + p.InnerXML = []byte(etag) + pst.Props[i] = p + } + } + } + return nil + } + + const ( + lockEntry = `` + + `` + + `` + + `` + + `` + statForbiddenError = `` + ) + + type propOp struct { + op string + name string + pnames []xml.Name + patches []Proppatch + wantPnames []xml.Name + wantPropstats []Propstat + } + + testCases := []struct { + desc string + noDeadProps bool + buildfs []string + propOp []propOp + }{{ + desc: "propname", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propname", + name: "/dir", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "DAV:", Local: "getlastmodified"}, + }, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + }, + }}, + }, { + desc: "allprop dir and file", + buildfs: []string{"mkdir /dir", "write /file foobarbaz"}, + propOp: []propOp{{ + op: "allprop", + name: "/dir", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(``), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("dir"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + pnames: []xml.Name{ + {"DAV:", "resourcetype"}, + {"foo", "bar"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}}, { + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}}, + }, + }}, + }, { + desc: "propfind DAV:resourcetype", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(``), + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }}, + }}, + }}, + }, { + desc: "propfind unsupported DAV properties", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getcontentlanguage"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlanguage"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "creationdate"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "creationdate"}, + }}, + }}, + }}, + }, { + desc: "propfind getetag for files but not for directories", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }}, + }}, + }}, + }, { + desc: "proppatch property on no-dead-properties file system", + buildfs: []string{"mkdir /dir"}, + noDeadProps: true, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }}, + }, { + desc: "proppatch dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + }}, + }, { + desc: "proppatch dead property with failed dependency", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }, { + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("xxx"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + }}, + }, { + Status: StatusFailedDependency, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "proppatch remove dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }, { + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }}, + }, { + desc: "propname with dead property", + buildfs: []string{"touch /file"}, + propOp: []propOp{{ + op: "proppatch", + name: "/file", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "foo", Local: "bar"}, + }, + }}, + }, { + desc: "proppatch remove unknown dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "bad: propfind unknown property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"foo:", "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo:", Local: "bar"}, + }}, + }}, + }}, + }} + + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + if tc.noDeadProps { + fs = noDeadPropsFS{fs} + } + ls := NewMemLS() + for _, op := range tc.propOp { + desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name) + if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil { + t.Fatalf("%s: calcProps: %v", desc, err) + } + + // Call property system. + var propstats []Propstat + switch op.op { + case "propname": + pnames, err := propnames(ctx, fs, ls, op.name) + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + sort.Sort(byXMLName(pnames)) + sort.Sort(byXMLName(op.wantPnames)) + if !reflect.DeepEqual(pnames, op.wantPnames) { + t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames) + } + continue + case "allprop": + propstats, err = allprop(ctx, fs, ls, op.name, op.pnames) + case "propfind": + propstats, err = props(ctx, fs, ls, op.name, op.pnames) + case "proppatch": + propstats, err = patch(ctx, fs, ls, op.name, op.patches) + default: + t.Fatalf("%s: %s not implemented", desc, op.op) + } + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + // Compare return values from allprop, propfind or proppatch. + for _, pst := range propstats { + sort.Sort(byPropname(pst.Props)) + } + for _, pst := range op.wantPropstats { + sort.Sort(byPropname(pst.Props)) + } + sort.Sort(byStatus(propstats)) + sort.Sort(byStatus(op.wantPropstats)) + if !reflect.DeepEqual(propstats, op.wantPropstats) { + t.Errorf("%s: propstat\ngot %q\nwant %q", desc, propstats, op.wantPropstats) + } + } + } +} + +func cmpXMLName(a, b xml.Name) bool { + if a.Space != b.Space { + return a.Space < b.Space + } + return a.Local < b.Local +} + +type byXMLName []xml.Name + +func (b byXMLName) Len() int { return len(b) } +func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) } + +type byPropname []Property + +func (b byPropname) Len() int { return len(b) } +func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) } + +type byStatus []Propstat + +func (b byStatus) Len() int { return len(b) } +func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status } + +type noDeadPropsFS struct { + FileSystem +} + +func (fs noDeadPropsFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + f, err := fs.FileSystem.OpenFile(ctx, name, flag, perm) + if err != nil { + return nil, err + } + return noDeadPropsFile{f}, nil +} + +// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods +// provided by the underlying File implementation. +type noDeadPropsFile struct { + f File +} + +func (f noDeadPropsFile) Close() error { return f.f.Close() } +func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) } +func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) } +func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) } +func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() } +func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) } diff --git a/vendor/golang.org/x/net/webdav/webdav.go b/vendor/golang.org/x/net/webdav/webdav.go new file mode 100644 index 0000000..7b56687 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/webdav.go @@ -0,0 +1,702 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package webdav provides a WebDAV server implementation. +package webdav // import "golang.org/x/net/webdav" + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" +) + +type Handler struct { + // Prefix is the URL path prefix to strip from WebDAV resource paths. + Prefix string + // FileSystem is the virtual file system. + FileSystem FileSystem + // LockSystem is the lock management system. + LockSystem LockSystem + // Logger is an optional error logger. If non-nil, it will be called + // for all HTTP requests. + Logger func(*http.Request, error) +} + +func (h *Handler) stripPrefix(p string) (string, int, error) { + if h.Prefix == "" { + return p, http.StatusOK, nil + } + if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) { + return r, http.StatusOK, nil + } + return p, http.StatusNotFound, errPrefixMismatch +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + status, err := http.StatusBadRequest, errUnsupportedMethod + if h.FileSystem == nil { + status, err = http.StatusInternalServerError, errNoFileSystem + } else if h.LockSystem == nil { + status, err = http.StatusInternalServerError, errNoLockSystem + } else { + switch r.Method { + case "OPTIONS": + status, err = h.handleOptions(w, r) + case "GET", "HEAD", "POST": + status, err = h.handleGetHeadPost(w, r) + case "DELETE": + status, err = h.handleDelete(w, r) + case "PUT": + status, err = h.handlePut(w, r) + case "MKCOL": + status, err = h.handleMkcol(w, r) + case "COPY", "MOVE": + status, err = h.handleCopyMove(w, r) + case "LOCK": + status, err = h.handleLock(w, r) + case "UNLOCK": + status, err = h.handleUnlock(w, r) + case "PROPFIND": + status, err = h.handlePropfind(w, r) + case "PROPPATCH": + status, err = h.handleProppatch(w, r) + } + } + + if status != 0 { + w.WriteHeader(status) + if status != http.StatusNoContent { + w.Write([]byte(StatusText(status))) + } + } + if h.Logger != nil { + h.Logger(r, err) + } +} + +func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) { + token, err = h.LockSystem.Create(now, LockDetails{ + Root: root, + Duration: infiniteTimeout, + ZeroDepth: true, + }) + if err != nil { + if err == ErrLocked { + return "", StatusLocked, err + } + return "", http.StatusInternalServerError, err + } + return token, 0, nil +} + +func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) { + hdr := r.Header.Get("If") + if hdr == "" { + // An empty If header means that the client hasn't previously created locks. + // Even if this client doesn't care about locks, we still need to check that + // the resources aren't locked by another client, so we create temporary + // locks that would conflict with another client's locks. These temporary + // locks are unlocked at the end of the HTTP request. + now, srcToken, dstToken := time.Now(), "", "" + if src != "" { + srcToken, status, err = h.lock(now, src) + if err != nil { + return nil, status, err + } + } + if dst != "" { + dstToken, status, err = h.lock(now, dst) + if err != nil { + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + return nil, status, err + } + } + + return func() { + if dstToken != "" { + h.LockSystem.Unlock(now, dstToken) + } + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + }, 0, nil + } + + ih, ok := parseIfHeader(hdr) + if !ok { + return nil, http.StatusBadRequest, errInvalidIfHeader + } + // ih is a disjunction (OR) of ifLists, so any ifList will do. + for _, l := range ih.lists { + lsrc := l.resourceTag + if lsrc == "" { + lsrc = src + } else { + u, err := url.Parse(lsrc) + if err != nil { + continue + } + if u.Host != r.Host { + continue + } + lsrc, status, err = h.stripPrefix(u.Path) + if err != nil { + return nil, status, err + } + } + release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...) + if err == ErrConfirmationFailed { + continue + } + if err != nil { + return nil, http.StatusInternalServerError, err + } + return release, 0, nil + } + // Section 10.4.1 says that "If this header is evaluated and all state lists + // fail, then the request must fail with a 412 (Precondition Failed) status." + // We follow the spec even though the cond_put_corrupt_token test case from + // the litmus test warns on seeing a 412 instead of a 423 (Locked). + return nil, http.StatusPreconditionFailed, ErrLocked +} + +func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ctx := getContext(r) + allow := "OPTIONS, LOCK, PUT, MKCOL" + if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil { + if fi.IsDir() { + allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND" + } else { + allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT" + } + } + w.Header().Set("Allow", allow) + // http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes + w.Header().Set("DAV", "1, 2") + // http://msdn.microsoft.com/en-au/library/cc250217.aspx + w.Header().Set("MS-Author-Via", "DAV") + return 0, nil +} + +func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + // TODO: check locks for read-only access?? + ctx := getContext(r) + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0) + if err != nil { + return http.StatusNotFound, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return http.StatusNotFound, err + } + if fi.IsDir() { + return http.StatusMethodNotAllowed, nil + } + etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + // Let ServeContent determine the Content-Type header. + http.ServeContent(w, r, reqPath, fi.ModTime(), f) + return 0, nil +} + +func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := getContext(r) + + // TODO: return MultiStatus where appropriate. + + // "godoc os RemoveAll" says that "If the path does not exist, RemoveAll + // returns nil (no error)." WebDAV semantics are that it should return a + // "404 Not Found". We therefore have to Stat before we RemoveAll. + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil { + return http.StatusMethodNotAllowed, err + } + return http.StatusNoContent, nil +} + +func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz' + // comments in http.checkEtag. + ctx := getContext(r) + + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return http.StatusNotFound, err + } + _, copyErr := io.Copy(f, r.Body) + fi, statErr := f.Stat() + closeErr := f.Close() + // TODO(rost): Returning 405 Method Not Allowed might not be appropriate. + if copyErr != nil { + return http.StatusMethodNotAllowed, copyErr + } + if statErr != nil { + return http.StatusMethodNotAllowed, statErr + } + if closeErr != nil { + return http.StatusMethodNotAllowed, closeErr + } + etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + return http.StatusCreated, nil +} + +func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := getContext(r) + + if r.ContentLength > 0 { + return http.StatusUnsupportedMediaType, nil + } + if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusMethodNotAllowed, err + } + return http.StatusCreated, nil +} + +func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) { + hdr := r.Header.Get("Destination") + if hdr == "" { + return http.StatusBadRequest, errInvalidDestination + } + u, err := url.Parse(hdr) + if err != nil { + return http.StatusBadRequest, errInvalidDestination + } + if u.Host != r.Host { + return http.StatusBadGateway, errInvalidDestination + } + + src, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + + dst, status, err := h.stripPrefix(u.Path) + if err != nil { + return status, err + } + + if dst == "" { + return http.StatusBadGateway, errInvalidDestination + } + if dst == src { + return http.StatusForbidden, errDestinationEqualsSource + } + + ctx := getContext(r) + + if r.Method == "COPY" { + // Section 7.5.1 says that a COPY only needs to lock the destination, + // not both destination and source. Strictly speaking, this is racy, + // even though a COPY doesn't modify the source, if a concurrent + // operation modifies the source. However, the litmus test explicitly + // checks that COPYing a locked-by-another source is OK. + release, status, err := h.confirmLocks(r, "", dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.8.3 says that "The COPY method on a collection without a Depth + // header must act as if a Depth header with value "infinity" was included". + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.8.3 says that "A client may submit a Depth header on a + // COPY on a collection with a value of "0" or "infinity"." + return http.StatusBadRequest, errInvalidDepth + } + } + return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0) + } + + release, status, err := h.confirmLocks(r, src, dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.9.2 says that "The MOVE method on a collection must act as if + // a "Depth: infinity" header was used on it. A client must not submit a + // Depth header on a MOVE on a collection with any value but "infinity"." + if hdr := r.Header.Get("Depth"); hdr != "" { + if parseDepth(hdr) != infiniteDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T") +} + +func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) { + duration, err := parseTimeout(r.Header.Get("Timeout")) + if err != nil { + return http.StatusBadRequest, err + } + li, status, err := readLockInfo(r.Body) + if err != nil { + return status, err + } + + ctx := getContext(r) + token, ld, now, created := "", LockDetails{}, time.Now(), false + if li == (lockInfo{}) { + // An empty lockInfo means to refresh the lock. + ih, ok := parseIfHeader(r.Header.Get("If")) + if !ok { + return http.StatusBadRequest, errInvalidIfHeader + } + if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 { + token = ih.lists[0].conditions[0].Token + } + if token == "" { + return http.StatusBadRequest, errInvalidLockToken + } + ld, err = h.LockSystem.Refresh(now, token, duration) + if err != nil { + if err == ErrNoSuchLock { + return http.StatusPreconditionFailed, err + } + return http.StatusInternalServerError, err + } + + } else { + // Section 9.10.3 says that "If no Depth header is submitted on a LOCK request, + // then the request MUST act as if a "Depth:infinity" had been submitted." + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.10.3 says that "Values other than 0 or infinity must not be + // used with the Depth header on a LOCK method". + return http.StatusBadRequest, errInvalidDepth + } + } + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ld = LockDetails{ + Root: reqPath, + Duration: duration, + OwnerXML: li.Owner.InnerXML, + ZeroDepth: depth == 0, + } + token, err = h.LockSystem.Create(now, ld) + if err != nil { + if err == ErrLocked { + return StatusLocked, err + } + return http.StatusInternalServerError, err + } + defer func() { + if retErr != nil { + h.LockSystem.Unlock(now, token) + } + }() + + // Create the resource if it didn't previously exist. + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + // TODO: detect missing intermediate dirs and return http.StatusConflict? + return http.StatusInternalServerError, err + } + f.Close() + created = true + } + + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We add angle brackets. + w.Header().Set("Lock-Token", "<"+token+">") + } + + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + if created { + // This is "w.WriteHeader(http.StatusCreated)" and not "return + // http.StatusCreated, nil" because we write our own (XML) response to w + // and Handler.ServeHTTP would otherwise write "Created". + w.WriteHeader(http.StatusCreated) + } + writeLockInfo(w, token, ld) + return 0, nil +} + +func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) { + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We strip its angle brackets. + t := r.Header.Get("Lock-Token") + if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' { + return http.StatusBadRequest, errInvalidLockToken + } + t = t[1 : len(t)-1] + + switch err = h.LockSystem.Unlock(time.Now(), t); err { + case nil: + return http.StatusNoContent, err + case ErrForbidden: + return http.StatusForbidden, err + case ErrLocked: + return StatusLocked, err + case ErrNoSuchLock: + return http.StatusConflict, err + default: + return http.StatusInternalServerError, err + } +} + +func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ctx := getContext(r) + fi, err := h.FileSystem.Stat(ctx, reqPath) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth == invalidDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + pf, status, err := readPropfind(r.Body) + if err != nil { + return status, err + } + + mw := multistatusWriter{w: w} + + walkFn := func(reqPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + var pstats []Propstat + if pf.Propname != nil { + pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath) + if err != nil { + return err + } + pstat := Propstat{Status: http.StatusOK} + for _, xmlname := range pnames { + pstat.Props = append(pstat.Props, Property{XMLName: xmlname}) + } + pstats = append(pstats, pstat) + } else if pf.Allprop != nil { + pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } else { + pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } + if err != nil { + return err + } + return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats)) + } + + walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn) + closeErr := mw.close() + if walkErr != nil { + return http.StatusInternalServerError, walkErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := getContext(r) + + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + patches, status, err := readProppatch(r.Body) + if err != nil { + return status, err + } + pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches) + if err != nil { + return http.StatusInternalServerError, err + } + mw := multistatusWriter{w: w} + writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats)) + closeErr := mw.close() + if writeErr != nil { + return http.StatusInternalServerError, writeErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func makePropstatResponse(href string, pstats []Propstat) *response { + resp := response{ + Href: []string{(&url.URL{Path: href}).EscapedPath()}, + Propstat: make([]propstat, 0, len(pstats)), + } + for _, p := range pstats { + var xmlErr *xmlError + if p.XMLError != "" { + xmlErr = &xmlError{InnerXML: []byte(p.XMLError)} + } + resp.Propstat = append(resp.Propstat, propstat{ + Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)), + Prop: p.Props, + ResponseDescription: p.ResponseDescription, + Error: xmlErr, + }) + } + return &resp +} + +const ( + infiniteDepth = -1 + invalidDepth = -2 +) + +// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and +// infiniteDepth. Parsing any other string returns invalidDepth. +// +// Different WebDAV methods have further constraints on valid depths: +// - PROPFIND has no further restrictions, as per section 9.1. +// - COPY accepts only "0" or "infinity", as per section 9.8.3. +// - MOVE accepts only "infinity", as per section 9.9.2. +// - LOCK accepts only "0" or "infinity", as per section 9.10.3. +// These constraints are enforced by the handleXxx methods. +func parseDepth(s string) int { + switch s { + case "0": + return 0 + case "1": + return 1 + case "infinity": + return infiniteDepth + } + return invalidDepth +} + +// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 +const ( + StatusMulti = 207 + StatusUnprocessableEntity = 422 + StatusLocked = 423 + StatusFailedDependency = 424 + StatusInsufficientStorage = 507 +) + +func StatusText(code int) string { + switch code { + case StatusMulti: + return "Multi-Status" + case StatusUnprocessableEntity: + return "Unprocessable Entity" + case StatusLocked: + return "Locked" + case StatusFailedDependency: + return "Failed Dependency" + case StatusInsufficientStorage: + return "Insufficient Storage" + } + return http.StatusText(code) +} + +var ( + errDestinationEqualsSource = errors.New("webdav: destination equals source") + errDirectoryNotEmpty = errors.New("webdav: directory not empty") + errInvalidDepth = errors.New("webdav: invalid depth") + errInvalidDestination = errors.New("webdav: invalid destination") + errInvalidIfHeader = errors.New("webdav: invalid If header") + errInvalidLockInfo = errors.New("webdav: invalid lock info") + errInvalidLockToken = errors.New("webdav: invalid lock token") + errInvalidPropfind = errors.New("webdav: invalid propfind") + errInvalidProppatch = errors.New("webdav: invalid proppatch") + errInvalidResponse = errors.New("webdav: invalid response") + errInvalidTimeout = errors.New("webdav: invalid timeout") + errNoFileSystem = errors.New("webdav: no file system") + errNoLockSystem = errors.New("webdav: no lock system") + errNotADirectory = errors.New("webdav: not a directory") + errPrefixMismatch = errors.New("webdav: prefix mismatch") + errRecursionTooDeep = errors.New("webdav: recursion too deep") + errUnsupportedLockInfo = errors.New("webdav: unsupported lock info") + errUnsupportedMethod = errors.New("webdav: unsupported method") +) diff --git a/vendor/golang.org/x/net/webdav/webdav_test.go b/vendor/golang.org/x/net/webdav/webdav_test.go new file mode 100644 index 0000000..25e0d54 --- /dev/null +++ b/vendor/golang.org/x/net/webdav/webdav_test.go @@ -0,0 +1,344 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "regexp" + "sort" + "strings" + "testing" + + "golang.org/x/net/context" +) + +// TODO: add tests to check XML responses with the expected prefix path +func TestPrefix(t *testing.T) { + const dst, blah = "Destination", "blah blah blah" + + // createLockBody comes from the example in Section 9.10.7. + const createLockBody = ` + + + + + http://example.org/~ejw/contact.html + + + ` + + do := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) { + var bodyReader io.Reader + if body != "" { + bodyReader = strings.NewReader(body) + } + req, err := http.NewRequest(method, urlStr, bodyReader) + if err != nil { + return nil, err + } + for len(headers) >= 2 { + req.Header.Add(headers[0], headers[1]) + headers = headers[2:] + } + res, err := http.DefaultTransport.RoundTrip(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != wantStatusCode { + return nil, fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode) + } + return res.Header, nil + } + + prefixes := []string{ + "/", + "/a/", + "/a/b/", + "/a/b/c/", + } + ctx := context.Background() + for _, prefix := range prefixes { + fs := NewMemFS() + h := &Handler{ + FileSystem: fs, + LockSystem: NewMemLS(), + } + mux := http.NewServeMux() + if prefix != "/" { + h.Prefix = prefix + } + mux.Handle(prefix, h) + srv := httptest.NewServer(mux) + defer srv.Close() + + // The script is: + // MKCOL /a + // MKCOL /a/b + // PUT /a/b/c + // COPY /a/b/c /a/b/d + // MKCOL /a/b/e + // MOVE /a/b/d /a/b/e/f + // LOCK /a/b/e/g + // PUT /a/b/e/g + // which should yield the (possibly stripped) filenames /a/b/c, + // /a/b/e/f and /a/b/e/g, plus their parent directories. + + wantA := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusMovedPermanently, + "/a/b/": http.StatusNotFound, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a", "", wantA); err != nil { + t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err) + continue + } + + wantB := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusMovedPermanently, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a/b", "", wantB); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err) + continue + } + + wantC := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/c", blah, wantC); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err) + continue + } + + wantD := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if _, err := do("COPY", srv.URL+"/a/b/c", "", wantD, dst, srv.URL+"/a/b/d"); err != nil { + t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err) + continue + } + + wantE := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a/b/e", "", wantE); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err) + continue + } + + wantF := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MOVE", srv.URL+"/a/b/d", "", wantF, dst, srv.URL+"/a/b/e/f"); err != nil { + t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err) + continue + } + + var lockToken string + wantG := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if h, err := do("LOCK", srv.URL+"/a/b/e/g", createLockBody, wantG); err != nil { + t.Errorf("prefix=%-9q LOCK /a/b/e/g: %v", prefix, err) + continue + } else { + lockToken = h.Get("Lock-Token") + } + + ifHeader := fmt.Sprintf("<%s/a/b/e/g> (%s)", srv.URL, lockToken) + wantH := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/e/g", blah, wantH, "If", ifHeader); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/e/g: %v", prefix, err) + continue + } + + got, err := find(ctx, nil, fs, "/") + if err != nil { + t.Errorf("prefix=%-9q find: %v", prefix, err) + continue + } + sort.Strings(got) + want := map[string][]string{ + "/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f", "/a/b/e/g"}, + "/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f", "/b/e/g"}, + "/a/b/": {"/", "/c", "/e", "/e/f", "/e/g"}, + "/a/b/c/": {"/"}, + }[prefix] + if !reflect.DeepEqual(got, want) { + t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want) + continue + } + } +} + +func TestEscapeXML(t *testing.T) { + // These test cases aren't exhaustive, and there is more than one way to + // escape e.g. a quot (as """ or """) or an apos. We presume that + // the encoding/xml package tests xml.EscapeText more thoroughly. This test + // here is just a sanity check for this package's escapeXML function, and + // its attempt to provide a fast path (and avoid a bytes.Buffer allocation) + // when escaping filenames is obviously a no-op. + testCases := map[string]string{ + "": "", + " ": " ", + "&": "&", + "*": "*", + "+": "+", + ",": ",", + "-": "-", + ".": ".", + "/": "/", + "0": "0", + "9": "9", + ":": ":", + "<": "<", + ">": ">", + "A": "A", + "_": "_", + "a": "a", + "~": "~", + "\u0201": "\u0201", + "&": "&amp;", + "foo&baz": "foo&<b/ar>baz", + } + + for in, want := range testCases { + if got := escapeXML(in); got != want { + t.Errorf("in=%q: got %q, want %q", in, got, want) + } + } +} + +func TestFilenameEscape(t *testing.T) { + hrefRe := regexp.MustCompile(`([^<]*)`) + displayNameRe := regexp.MustCompile(`([^<]*)`) + do := func(method, urlStr string) (string, string, error) { + req, err := http.NewRequest(method, urlStr, nil) + if err != nil { + return "", "", err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + hrefMatch := hrefRe.FindStringSubmatch(string(b)) + if len(hrefMatch) != 2 { + return "", "", errors.New("D:href not found") + } + displayNameMatch := displayNameRe.FindStringSubmatch(string(b)) + if len(displayNameMatch) != 2 { + return "", "", errors.New("D:displayname not found") + } + + return hrefMatch[1], displayNameMatch[1], nil + } + + testCases := []struct { + name, wantHref, wantDisplayName string + }{{ + name: `/foo%bar`, + wantHref: `/foo%25bar`, + wantDisplayName: `foo%bar`, + }, { + name: `/こんにちわ世界`, + wantHref: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`, + wantDisplayName: `こんにちわ世界`, + }, { + name: `/Program Files/`, + wantHref: `/Program%20Files`, + wantDisplayName: `Program Files`, + }, { + name: `/go+lang`, + wantHref: `/go+lang`, + wantDisplayName: `go+lang`, + }, { + name: `/go&lang`, + wantHref: `/go&lang`, + wantDisplayName: `go&lang`, + }, { + name: `/goexclusive"` + Shared *struct{} `xml:"lockscope>shared"` + Write *struct{} `xml:"locktype>write"` + Owner owner `xml:"owner"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner +type owner struct { + InnerXML string `xml:",innerxml"` +} + +func readLockInfo(r io.Reader) (li lockInfo, status int, err error) { + c := &countingReader{r: r} + if err = ixml.NewDecoder(c).Decode(&li); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to refresh the lock. + // http://www.webdav.org/specs/rfc4918.html#refreshing-locks + return lockInfo{}, 0, nil + } + err = errInvalidLockInfo + } + return lockInfo{}, http.StatusBadRequest, err + } + // We only support exclusive (non-shared) write locks. In practice, these are + // the only types of locks that seem to matter. + if li.Exclusive == nil || li.Shared != nil || li.Write == nil { + return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo + } + return li, 0, nil +} + +type countingReader struct { + n int + r io.Reader +} + +func (c *countingReader) Read(p []byte) (int, error) { + n, err := c.r.Read(p) + c.n += n + return n, err +} + +func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) { + depth := "infinity" + if ld.ZeroDepth { + depth = "0" + } + timeout := ld.Duration / time.Second + return fmt.Fprintf(w, "\n"+ + "\n"+ + " \n"+ + " \n"+ + " %s\n"+ + " %s\n"+ + " Second-%d\n"+ + " %s\n"+ + " %s\n"+ + "", + depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root), + ) +} + +func escape(s string) string { + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '&', '\'', '<', '>': + b := bytes.NewBuffer(nil) + ixml.EscapeText(b, []byte(s)) + return b.String() + } + } + return s +} + +// Next returns the next token, if any, in the XML stream of d. +// RFC 4918 requires to ignore comments, processing instructions +// and directives. +// http://www.webdav.org/specs/rfc4918.html#property_values +// http://www.webdav.org/specs/rfc4918.html#xml-extensibility +func next(d *ixml.Decoder) (ixml.Token, error) { + for { + t, err := d.Token() + if err != nil { + return t, err + } + switch t.(type) { + case ixml.Comment, ixml.Directive, ixml.ProcInst: + continue + default: + return t, nil + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind) +type propfindProps []xml.Name + +// UnmarshalXML appends the property names enclosed within start to pn. +// +// It returns an error if start does not contain any properties or if +// properties contain values. Character data between properties is ignored. +func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + for { + t, err := next(d) + if err != nil { + return err + } + switch t.(type) { + case ixml.EndElement: + if len(*pn) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + name := t.(ixml.StartElement).Name + t, err = next(d) + if err != nil { + return err + } + if _, ok := t.(ixml.EndElement); !ok { + return fmt.Errorf("unexpected token %T", t) + } + *pn = append(*pn, xml.Name(name)) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind +type propfind struct { + XMLName ixml.Name `xml:"DAV: propfind"` + Allprop *struct{} `xml:"DAV: allprop"` + Propname *struct{} `xml:"DAV: propname"` + Prop propfindProps `xml:"DAV: prop"` + Include propfindProps `xml:"DAV: include"` +} + +func readPropfind(r io.Reader) (pf propfind, status int, err error) { + c := countingReader{r: r} + if err = ixml.NewDecoder(&c).Decode(&pf); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to propfind allprop. + // http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND + return propfind{Allprop: new(struct{})}, 0, nil + } + err = errInvalidPropfind + } + return propfind{}, http.StatusBadRequest, err + } + + if pf.Allprop == nil && pf.Include != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Prop != nil && pf.Propname != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + return pf, 0, nil +} + +// Property represents a single DAV resource property as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties +type Property struct { + // XMLName is the fully qualified name that identifies this property. + XMLName xml.Name + + // Lang is an optional xml:lang attribute. + Lang string `xml:"xml:lang,attr,omitempty"` + + // InnerXML contains the XML representation of the property value. + // See http://www.webdav.org/specs/rfc4918.html#property_values + // + // Property values of complex type or mixed-content must have fully + // expanded XML namespaces or be self-contained with according + // XML namespace declarations. They must not rely on any XML + // namespace declarations within the scope of the XML document, + // even including the DAV: namespace. + InnerXML []byte `xml:",innerxml"` +} + +// ixmlProperty is the same as the Property type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlProperty struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error +// See multistatusWriter for the "D:" namespace prefix. +type xmlError struct { + XMLName ixml.Name `xml:"D:error"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +// See multistatusWriter for the "D:" namespace prefix. +type propstat struct { + Prop []Property `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// ixmlPropstat is the same as the propstat type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlPropstat struct { + Prop []ixmlProperty `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace +// before encoding. See multistatusWriter. +func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error { + // Convert from a propstat to an ixmlPropstat. + ixmlPs := ixmlPropstat{ + Prop: make([]ixmlProperty, len(ps.Prop)), + Status: ps.Status, + Error: ps.Error, + ResponseDescription: ps.ResponseDescription, + } + for k, prop := range ps.Prop { + ixmlPs.Prop[k] = ixmlProperty{ + XMLName: ixml.Name(prop.XMLName), + Lang: prop.Lang, + InnerXML: prop.InnerXML, + } + } + + for k, prop := range ixmlPs.Prop { + if prop.XMLName.Space == "DAV:" { + prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local} + ixmlPs.Prop[k] = prop + } + } + // Distinct type to avoid infinite recursion of MarshalXML. + type newpropstat ixmlPropstat + return e.EncodeElement(newpropstat(ixmlPs), start) +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response +// See multistatusWriter for the "D:" namespace prefix. +type response struct { + XMLName ixml.Name `xml:"D:response"` + Href []string `xml:"D:href"` + Propstat []propstat `xml:"D:propstat"` + Status string `xml:"D:status,omitempty"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MultistatusWriter marshals one or more Responses into a XML +// multistatus response. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus +// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as +// "DAV:" on this element, is prepended on the nested response, as well as on all +// its nested elements. All property names in the DAV: namespace are prefixed as +// well. This is because some versions of Mini-Redirector (on windows 7) ignore +// elements with a default namespace (no prefixed namespace). A less intrusive fix +// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177 +type multistatusWriter struct { + // ResponseDescription contains the optional responsedescription + // of the multistatus XML element. Only the latest content before + // close will be emitted. Empty response descriptions are not + // written. + responseDescription string + + w http.ResponseWriter + enc *ixml.Encoder +} + +// Write validates and emits a DAV response as part of a multistatus response +// element. +// +// It sets the HTTP status code of its underlying http.ResponseWriter to 207 +// (Multi-Status) and populates the Content-Type header. If r is the +// first, valid response to be written, Write prepends the XML representation +// of r with a multistatus tag. Callers must call close after the last response +// has been written. +func (w *multistatusWriter) write(r *response) error { + switch len(r.Href) { + case 0: + return errInvalidResponse + case 1: + if len(r.Propstat) > 0 != (r.Status == "") { + return errInvalidResponse + } + default: + if len(r.Propstat) > 0 || r.Status == "" { + return errInvalidResponse + } + } + err := w.writeHeader() + if err != nil { + return err + } + return w.enc.Encode(r) +} + +// writeHeader writes a XML multistatus start element on w's underlying +// http.ResponseWriter and returns the result of the write operation. +// After the first write attempt, writeHeader becomes a no-op. +func (w *multistatusWriter) writeHeader() error { + if w.enc != nil { + return nil + } + w.w.Header().Add("Content-Type", "text/xml; charset=utf-8") + w.w.WriteHeader(StatusMulti) + _, err := fmt.Fprintf(w.w, ``) + if err != nil { + return err + } + w.enc = ixml.NewEncoder(w.w) + return w.enc.EncodeToken(ixml.StartElement{ + Name: ixml.Name{ + Space: "DAV:", + Local: "multistatus", + }, + Attr: []ixml.Attr{{ + Name: ixml.Name{Space: "xmlns", Local: "D"}, + Value: "DAV:", + }}, + }) +} + +// Close completes the marshalling of the multistatus response. It returns +// an error if the multistatus response could not be completed. If both the +// return value and field enc of w are nil, then no multistatus response has +// been written. +func (w *multistatusWriter) close() error { + if w.enc == nil { + return nil + } + var end []ixml.Token + if w.responseDescription != "" { + name := ixml.Name{Space: "DAV:", Local: "responsedescription"} + end = append(end, + ixml.StartElement{Name: name}, + ixml.CharData(w.responseDescription), + ixml.EndElement{Name: name}, + ) + } + end = append(end, ixml.EndElement{ + Name: ixml.Name{Space: "DAV:", Local: "multistatus"}, + }) + for _, t := range end { + err := w.enc.EncodeToken(t) + if err != nil { + return err + } + } + return w.enc.Flush() +} + +var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} + +func xmlLang(s ixml.StartElement, d string) string { + for _, attr := range s.Attr { + if attr.Name == xmlLangName { + return attr.Value + } + } + return d +} + +type xmlValue []byte + +func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + // The XML value of a property can be arbitrary, mixed-content XML. + // To make sure that the unmarshalled value contains all required + // namespaces, we encode all the property value XML tokens into a + // buffer. This forces the encoder to redeclare any used namespaces. + var b bytes.Buffer + e := ixml.NewEncoder(&b) + for { + t, err := next(d) + if err != nil { + return err + } + if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name { + break + } + if err = e.EncodeToken(t); err != nil { + return err + } + } + err := e.Flush() + if err != nil { + return err + } + *v = b.Bytes() + return nil +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) +type proppatchProps []Property + +// UnmarshalXML appends the property names and values enclosed within start +// to ps. +// +// An xml:lang attribute that is defined either on the DAV:prop or property +// name XML element is propagated to the property's Lang field. +// +// UnmarshalXML returns an error if start does not contain any properties or if +// property values contain syntactically incorrect XML. +func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + lang := xmlLang(start, "") + for { + t, err := next(d) + if err != nil { + return err + } + switch elem := t.(type) { + case ixml.EndElement: + if len(*ps) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + p := Property{ + XMLName: xml.Name(t.(ixml.StartElement).Name), + Lang: xmlLang(t.(ixml.StartElement), lang), + } + err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem) + if err != nil { + return err + } + *ps = append(*ps, p) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove +type setRemove struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + Prop proppatchProps `xml:"DAV: prop"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate +type propertyupdate struct { + XMLName ixml.Name `xml:"DAV: propertyupdate"` + Lang string `xml:"xml:lang,attr,omitempty"` + SetRemove []setRemove `xml:",any"` +} + +func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) { + var pu propertyupdate + if err = ixml.NewDecoder(r).Decode(&pu); err != nil { + return nil, http.StatusBadRequest, err + } + for _, op := range pu.SetRemove { + remove := false + switch op.XMLName { + case ixml.Name{Space: "DAV:", Local: "set"}: + // No-op. + case ixml.Name{Space: "DAV:", Local: "remove"}: + for _, p := range op.Prop { + if len(p.InnerXML) > 0 { + return nil, http.StatusBadRequest, errInvalidProppatch + } + } + remove = true + default: + return nil, http.StatusBadRequest, errInvalidProppatch + } + patches = append(patches, Proppatch{Remove: remove, Props: op.Prop}) + } + return patches, 0, nil +} diff --git a/vendor/golang.org/x/net/webdav/xml_test.go b/vendor/golang.org/x/net/webdav/xml_test.go new file mode 100644 index 0000000..a3d9e1e --- /dev/null +++ b/vendor/golang.org/x/net/webdav/xml_test.go @@ -0,0 +1,906 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "testing" + + ixml "golang.org/x/net/webdav/internal/xml" +) + +func TestReadLockInfo(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + wantLI lockInfo + wantStatus int + }{{ + "bad: junk", + "xxx", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid owner XML", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " no end tag \n" + + " \n" + + "", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid UTF-8", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " \xff \n" + + " \n" + + "", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #1", + "" + + "\n" + + " \n" + + " \n", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #2", + "" + + "\n" + + " \n" + + " \n" + + " \n", + lockInfo{}, + http.StatusBadRequest, + }, { + "good: empty", + "", + lockInfo{}, + 0, + }, { + "good: plain-text owner", + "" + + "\n" + + " \n" + + " \n" + + " gopher\n" + + "", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "gopher", + }, + }, + 0, + }, { + "section 9.10.7", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " http://example.org/~ejw/contact.html\n" + + " \n" + + "", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "\n http://example.org/~ejw/contact.html\n ", + }, + }, + 0, + }} + + for _, tc := range testCases { + li, status, err := readLockInfo(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus { + t.Errorf("%s:\ngot lockInfo=%v, status=%v\nwant lockInfo=%v, status=%v", + tc.desc, li, status, tc.wantLI, tc.wantStatus) + continue + } + } +} + +func TestReadPropfind(t *testing.T) { + testCases := []struct { + desc string + input string + wantPF propfind + wantStatus int + }{{ + desc: "propfind: propname", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: empty body means allprop", + input: "", + wantPF: propfind{ + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop followed by include", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: include followed by allprop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: prop with ignored comments", + input: "" + + "\n" + + " \n" + + " \n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored whitespace", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored mixed-content", + input: "" + + "\n" + + " foobar\n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propname with ignored element (section A.4)", + input: "" + + "\n" + + " \n" + + " *boss*\n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: bad: junk", + input: "xxx", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and allprop (section A.3)", + input: "" + + "\n" + + " " + + " " + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and prop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: allprop and prop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty propfind with ignored element (section A.4)", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty prop", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: prop with just chardata", + input: "" + + "\n" + + " foo\n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: interrupted prop", + input: "" + + "\n" + + " \n", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: malformed end element prop", + input: "" + + "\n" + + " \n", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with chardata value", + input: "" + + "\n" + + " bar\n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with whitespace value", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: include without allprop", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pf, status, err := readPropfind(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus { + t.Errorf("%s:\ngot propfind=%v, status=%v\nwant propfind=%v, status=%v", + tc.desc, pf, status, tc.wantPF, tc.wantStatus) + continue + } + } +} + +func TestMultistatusWriter(t *testing.T) { + ///The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + responses []response + respdesc string + writeHeader bool + wantXML string + wantCode int + wantErr error + }{{ + desc: "section 9.2.2 (failed dependency)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Authors", + }, + }}, + Status: "HTTP/1.1 424 Failed Dependency", + }, { + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Copyright-Owner", + }, + }}, + Status: "HTTP/1.1 409 Conflict", + }}, + ResponseDescription: "Copyright Owner cannot be deleted or altered.", + }}, + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 424 Failed Dependency` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 409 Conflict` + + ` ` + + ` Copyright Owner cannot be deleted or altered.` + + `` + + ``, + wantCode: StatusMulti, + }, { + desc: "section 9.6.2 (lock-token-submitted)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Status: "HTTP/1.1 423 Locked", + Error: &xmlError{ + InnerXML: []byte(``), + }, + }}, + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` HTTP/1.1 423 Locked` + + ` ` + + ` ` + + ``, + wantCode: StatusMulti, + }, { + desc: "section 9.1.3", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "bigbox"}, + InnerXML: []byte(`` + + `` + + `Box type A` + + ``), + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "author"}, + InnerXML: []byte(`` + + `` + + `J.J. Johnson` + + ``), + }}, + Status: "HTTP/1.1 200 OK", + }, { + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "DingALing"}, + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "Random"}, + }}, + Status: "HTTP/1.1 403 Forbidden", + ResponseDescription: "The user does not have access to the DingALing property.", + }}, + }}, + respdesc: "There has been an access violation error.", + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` ` + + ` ` + + ` Box type A` + + ` J.J. Johnson` + + ` ` + + ` HTTP/1.1 200 OK` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 403 Forbidden` + + ` The user does not have access to the DingALing property.` + + ` ` + + ` ` + + ` There has been an access violation error.` + + ``, + wantCode: StatusMulti, + }, { + desc: "no response written", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "no response written (with description)", + respdesc: "too bad", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "empty multistatus with header", + writeHeader: true, + wantXML: ``, + wantCode: StatusMulti, + }, { + desc: "bad: no href", + responses: []response{{ + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and no status", + responses: []response{{ + Href: []string{"http://example.com/foo", "http://example.com/bar"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: one href and no propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: status with one href and propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + Status: "HTTP/1.1 200 OK", + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and propstat", + responses: []response{{ + Href: []string{ + "http://example.com/foo", + "http://example.com/bar", + }, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }} + + n := xmlNormalizer{omitWhitespace: true} +loop: + for _, tc := range testCases { + rec := httptest.NewRecorder() + w := multistatusWriter{w: rec, responseDescription: tc.respdesc} + if tc.writeHeader { + if err := w.writeHeader(); err != nil { + t.Errorf("%s: got writeHeader error %v, want nil", tc.desc, err) + continue + } + } + for _, r := range tc.responses { + if err := w.write(&r); err != nil { + if err != tc.wantErr { + t.Errorf("%s: got write error %v, want %v", + tc.desc, err, tc.wantErr) + } + continue loop + } + } + if err := w.close(); err != tc.wantErr { + t.Errorf("%s: got close error %v, want %v", + tc.desc, err, tc.wantErr) + continue + } + if rec.Code != tc.wantCode { + t.Errorf("%s: got HTTP status code %d, want %d\n", + tc.desc, rec.Code, tc.wantCode) + continue + } + gotXML := rec.Body.String() + eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML) + } + } +} + +func TestReadProppatch(t *testing.T) { + ppStr := func(pps []Proppatch) string { + var outer []string + for _, pp := range pps { + var inner []string + for _, p := range pp.Props { + inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}", + p.XMLName, p.Lang, p.InnerXML)) + } + outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}", + pp.Remove, strings.Join(inner, ", "))) + } + return "[" + strings.Join(outer, ", ") + "]" + } + + testCases := []struct { + desc string + input string + wantPP []Proppatch + wantStatus int + }{{ + desc: "proppatch: section 9.2 (with simple property value)", + input: `` + + `` + + `` + + ` ` + + ` somevalue` + + ` ` + + ` ` + + ` ` + + ` ` + + ``, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"}, + "", + []byte(`somevalue`), + }}, + }, { + Remove: true, + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Copyright-Owner"}, + "", + nil, + }}, + }}, + }, { + desc: "proppatch: lang attribute on prop", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ``, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://example.com/ns", Local: "foo"}, + "en", + nil, + }}, + }}, + }, { + desc: "bad: remove with value", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ` Jim Whitehead` + + ` ` + + ` ` + + ` ` + + ``, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty propertyupdate", + input: `` + + `` + + ``, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty prop", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ``, + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pp, status, err := readProppatch(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if status != tc.wantStatus { + t.Errorf("%s: got status %d, want %d", tc.desc, status, tc.wantStatus) + continue + } + if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus { + t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP)) + } + } +} + +func TestUnmarshalXMLValue(t *testing.T) { + testCases := []struct { + desc string + input string + wantVal string + }{{ + desc: "simple char data", + input: "foo", + wantVal: "foo", + }, { + desc: "empty element", + input: "", + wantVal: "", + }, { + desc: "preserve namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve root element namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve whitespace", + input: " \t ", + wantVal: " \t ", + }, { + desc: "preserve mixed content", + input: ` a `, + wantVal: ` a `, + }, { + desc: "section 9.2", + input: `` + + `` + + ` Jim Whitehead` + + ` Roy Fielding` + + ``, + wantVal: `` + + ` Jim Whitehead` + + ` Roy Fielding`, + }, { + desc: "section 4.3.1 (mixed content)", + input: `` + + `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of ]]>.` + + ` ` + + ``, + wantVal: `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of <RFC2518>.` + + ` `, + }} + + var n xmlNormalizer + for _, tc := range testCases { + d := ixml.NewDecoder(strings.NewReader(tc.input)) + var v xmlValue + if err := d.Decode(&v); err != nil { + t.Errorf("%s: got error %v, want nil", tc.desc, err) + continue + } + eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal) + } + } +} + +// xmlNormalizer normalizes XML. +type xmlNormalizer struct { + // omitWhitespace instructs to ignore whitespace between element tags. + omitWhitespace bool + // omitComments instructs to ignore XML comments. + omitComments bool +} + +// normalize writes the normalized XML content of r to w. It applies the +// following rules +// +// * Rename namespace prefixes according to an internal heuristic. +// * Remove unnecessary namespace declarations. +// * Sort attributes in XML start elements in lexical order of their +// fully qualified name. +// * Remove XML directives and processing instructions. +// * Remove CDATA between XML tags that only contains whitespace, if +// instructed to do so. +// * Remove comments, if instructed to do so. +// +func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error { + d := ixml.NewDecoder(r) + e := ixml.NewEncoder(w) + for { + t, err := d.Token() + if err != nil { + if t == nil && err == io.EOF { + break + } + return err + } + switch val := t.(type) { + case ixml.Directive, ixml.ProcInst: + continue + case ixml.Comment: + if n.omitComments { + continue + } + case ixml.CharData: + if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 { + continue + } + case ixml.StartElement: + start, _ := ixml.CopyToken(val).(ixml.StartElement) + attr := start.Attr[:0] + for _, a := range start.Attr { + if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" { + continue + } + attr = append(attr, a) + } + sort.Sort(byName(attr)) + start.Attr = attr + t = start + } + err = e.EncodeToken(t) + if err != nil { + return err + } + } + return e.Flush() +} + +// equalXML tests for equality of the normalized XML contents of a and b. +func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) { + var buf bytes.Buffer + if err := n.normalize(&buf, a); err != nil { + return false, err + } + normA := buf.String() + buf.Reset() + if err := n.normalize(&buf, b); err != nil { + return false, err + } + normB := buf.String() + return normA == normB, nil +} + +type byName []ixml.Attr + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { + if a[i].Name.Space != a[j].Name.Space { + return a[i].Name.Space < a[j].Name.Space + } + return a[i].Name.Local < a[j].Name.Local +} diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go new file mode 100644 index 0000000..69a4ac7 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/client.go @@ -0,0 +1,106 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "io" + "net" + "net/http" + "net/url" +) + +// DialError is an error that occurs while dialling a websocket server. +type DialError struct { + *Config + Err error +} + +func (e *DialError) Error() string { + return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error() +} + +// NewConfig creates a new WebSocket config for client connection. +func NewConfig(server, origin string) (config *Config, err error) { + config = new(Config) + config.Version = ProtocolVersionHybi13 + config.Location, err = url.ParseRequestURI(server) + if err != nil { + return + } + config.Origin, err = url.ParseRequestURI(origin) + if err != nil { + return + } + config.Header = http.Header(make(map[string][]string)) + return +} + +// NewClient creates a new WebSocket client connection over rwc. +func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + err = hybiClientHandshake(config, br, bw) + if err != nil { + return + } + buf := bufio.NewReadWriter(br, bw) + ws = newHybiClientConn(config, buf, rwc) + return +} + +// Dial opens a new client connection to a WebSocket. +func Dial(url_, protocol, origin string) (ws *Conn, err error) { + config, err := NewConfig(url_, origin) + if err != nil { + return nil, err + } + if protocol != "" { + config.Protocol = []string{protocol} + } + return DialConfig(config) +} + +var portMap = map[string]string{ + "ws": "80", + "wss": "443", +} + +func parseAuthority(location *url.URL) string { + if _, ok := portMap[location.Scheme]; ok { + if _, _, err := net.SplitHostPort(location.Host); err != nil { + return net.JoinHostPort(location.Host, portMap[location.Scheme]) + } + } + return location.Host +} + +// DialConfig opens a new client connection to a WebSocket with a config. +func DialConfig(config *Config) (ws *Conn, err error) { + var client net.Conn + if config.Location == nil { + return nil, &DialError{config, ErrBadWebSocketLocation} + } + if config.Origin == nil { + return nil, &DialError{config, ErrBadWebSocketOrigin} + } + dialer := config.Dialer + if dialer == nil { + dialer = &net.Dialer{} + } + client, err = dialWithDialer(dialer, config) + if err != nil { + goto Error + } + ws, err = NewClient(config, client) + if err != nil { + client.Close() + goto Error + } + return + +Error: + return nil, &DialError{config, err} +} diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go new file mode 100644 index 0000000..2dab943 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "net" +) + +func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { + switch config.Location.Scheme { + case "ws": + conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + + case "wss": + conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + + default: + err = ErrBadScheme + } + return +} diff --git a/vendor/golang.org/x/net/websocket/dial_test.go b/vendor/golang.org/x/net/websocket/dial_test.go new file mode 100644 index 0000000..aa03e30 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial_test.go @@ -0,0 +1,43 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "fmt" + "log" + "net" + "net/http/httptest" + "testing" + "time" +) + +// This test depend on Go 1.3+ because in earlier versions the Dialer won't be +// used in TLS connections and a timeout won't be triggered. +func TestDialConfigTLSWithDialer(t *testing.T) { + tlsServer := httptest.NewTLSServer(nil) + tlsServerAddr := tlsServer.Listener.Addr().String() + log.Print("Test TLS WebSocket server listening on ", tlsServerAddr) + defer tlsServer.Close() + config, _ := NewConfig(fmt.Sprintf("wss://%s/echo", tlsServerAddr), "http://localhost") + config.Dialer = &net.Dialer{ + Deadline: time.Now().Add(-time.Minute), + } + config.TlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + _, err := DialConfig(config) + dialerr, ok := err.(*DialError) + if !ok { + t.Fatalf("DialError expected, got %#v", err) + } + neterr, ok := dialerr.Err.(*net.OpError) + if !ok { + t.Fatalf("net.OpError error expected, got %#v", dialerr.Err) + } + if !neterr.Timeout() { + t.Fatalf("expected timeout error, got %#v", neterr) + } +} diff --git a/vendor/golang.org/x/net/websocket/exampledial_test.go b/vendor/golang.org/x/net/websocket/exampledial_test.go new file mode 100644 index 0000000..72bb9d4 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/exampledial_test.go @@ -0,0 +1,31 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "fmt" + "log" + + "golang.org/x/net/websocket" +) + +// This example demonstrates a trivial client. +func ExampleDial() { + origin := "http://localhost/" + url := "ws://localhost:12345/ws" + ws, err := websocket.Dial(url, "", origin) + if err != nil { + log.Fatal(err) + } + if _, err := ws.Write([]byte("hello, world!\n")); err != nil { + log.Fatal(err) + } + var msg = make([]byte, 512) + var n int + if n, err = ws.Read(msg); err != nil { + log.Fatal(err) + } + fmt.Printf("Received: %s.\n", msg[:n]) +} diff --git a/vendor/golang.org/x/net/websocket/examplehandler_test.go b/vendor/golang.org/x/net/websocket/examplehandler_test.go new file mode 100644 index 0000000..f22a98f --- /dev/null +++ b/vendor/golang.org/x/net/websocket/examplehandler_test.go @@ -0,0 +1,26 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket_test + +import ( + "io" + "net/http" + + "golang.org/x/net/websocket" +) + +// Echo the data received on the WebSocket. +func EchoServer(ws *websocket.Conn) { + io.Copy(ws, ws) +} + +// This example demonstrates a trivial echo server. +func ExampleHandler() { + http.Handle("/echo", websocket.Handler(EchoServer)) + err := http.ListenAndServe(":12345", nil) + if err != nil { + panic("ListenAndServe: " + err.Error()) + } +} diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go new file mode 100644 index 0000000..8cffdd1 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -0,0 +1,583 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +// This file implements a protocol of hybi draft. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + closeStatusNormal = 1000 + closeStatusGoingAway = 1001 + closeStatusProtocolError = 1002 + closeStatusUnsupportedData = 1003 + closeStatusFrameTooLarge = 1004 + closeStatusNoStatusRcvd = 1005 + closeStatusAbnormalClosure = 1006 + closeStatusBadMessageData = 1007 + closeStatusPolicyViolation = 1008 + closeStatusTooBigData = 1009 + closeStatusExtensionMismatch = 1010 + + maxControlFramePayloadLength = 125 +) + +var ( + ErrBadMaskingKey = &ProtocolError{"bad masking key"} + ErrBadPongMessage = &ProtocolError{"bad pong message"} + ErrBadClosingStatus = &ProtocolError{"bad closing status"} + ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"} + ErrNotImplemented = &ProtocolError{"not implemented"} + + handshakeHeader = map[string]bool{ + "Host": true, + "Upgrade": true, + "Connection": true, + "Sec-Websocket-Key": true, + "Sec-Websocket-Origin": true, + "Sec-Websocket-Version": true, + "Sec-Websocket-Protocol": true, + "Sec-Websocket-Accept": true, + } +) + +// A hybiFrameHeader is a frame header as defined in hybi draft. +type hybiFrameHeader struct { + Fin bool + Rsv [3]bool + OpCode byte + Length int64 + MaskingKey []byte + + data *bytes.Buffer +} + +// A hybiFrameReader is a reader for hybi frame. +type hybiFrameReader struct { + reader io.Reader + + header hybiFrameHeader + pos int64 + length int +} + +func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { + n, err = frame.reader.Read(msg) + if frame.header.MaskingKey != nil { + for i := 0; i < n; i++ { + msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] + frame.pos++ + } + } + return n, err +} + +func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode } + +func (frame *hybiFrameReader) HeaderReader() io.Reader { + if frame.header.data == nil { + return nil + } + if frame.header.data.Len() == 0 { + return nil + } + return frame.header.data +} + +func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil } + +func (frame *hybiFrameReader) Len() (n int) { return frame.length } + +// A hybiFrameReaderFactory creates new frame reader based on its frame type. +type hybiFrameReaderFactory struct { + *bufio.Reader +} + +// NewFrameReader reads a frame header from the connection, and creates new reader for the frame. +// See Section 5.2 Base Framing protocol for detail. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2 +func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) { + hybiFrame := new(hybiFrameReader) + frame = hybiFrame + var header []byte + var b byte + // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0 + for i := 0; i < 3; i++ { + j := uint(6 - i) + hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0 + } + hybiFrame.header.OpCode = header[0] & 0x0f + + // Second byte. Mask/Payload len(7bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + mask := (b & 0x80) != 0 + b &= 0x7f + lengthFields := 0 + switch { + case b <= 125: // Payload length 7bits. + hybiFrame.header.Length = int64(b) + case b == 126: // Payload length 7+16bits + lengthFields = 2 + case b == 127: // Payload length 7+64bits + lengthFields = 8 + } + for i := 0; i < lengthFields; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits + b &= 0x7f + } + header = append(header, b) + hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) + } + if mask { + // Masking key. 4 bytes. + for i := 0; i < 4; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b) + } + } + hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length) + hybiFrame.header.data = bytes.NewBuffer(header) + hybiFrame.length = len(header) + int(hybiFrame.header.Length) + return +} + +// A HybiFrameWriter is a writer for hybi frame. +type hybiFrameWriter struct { + writer *bufio.Writer + + header *hybiFrameHeader +} + +func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) { + var header []byte + var b byte + if frame.header.Fin { + b |= 0x80 + } + for i := 0; i < 3; i++ { + if frame.header.Rsv[i] { + j := uint(6 - i) + b |= 1 << j + } + } + b |= frame.header.OpCode + header = append(header, b) + if frame.header.MaskingKey != nil { + b = 0x80 + } else { + b = 0 + } + lengthFields := 0 + length := len(msg) + switch { + case length <= 125: + b |= byte(length) + case length < 65536: + b |= 126 + lengthFields = 2 + default: + b |= 127 + lengthFields = 8 + } + header = append(header, b) + for i := 0; i < lengthFields; i++ { + j := uint((lengthFields - i - 1) * 8) + b = byte((length >> j) & 0xff) + header = append(header, b) + } + if frame.header.MaskingKey != nil { + if len(frame.header.MaskingKey) != 4 { + return 0, ErrBadMaskingKey + } + header = append(header, frame.header.MaskingKey...) + frame.writer.Write(header) + data := make([]byte, length) + for i := range data { + data[i] = msg[i] ^ frame.header.MaskingKey[i%4] + } + frame.writer.Write(data) + err = frame.writer.Flush() + return length, err + } + frame.writer.Write(header) + frame.writer.Write(msg) + err = frame.writer.Flush() + return length, err +} + +func (frame *hybiFrameWriter) Close() error { return nil } + +type hybiFrameWriterFactory struct { + *bufio.Writer + needMaskingKey bool +} + +func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType} + if buf.needMaskingKey { + frameHeader.MaskingKey, err = generateMaskingKey() + if err != nil { + return nil, err + } + } + return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil +} + +type hybiFrameHandler struct { + conn *Conn + payloadType byte +} + +func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { + if handler.conn.IsServerConn() { + // The client MUST mask all frames sent to the server. + if frame.(*hybiFrameReader).header.MaskingKey == nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } else { + // The server MUST NOT mask all frames. + if frame.(*hybiFrameReader).header.MaskingKey != nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } + if header := frame.HeaderReader(); header != nil { + io.Copy(ioutil.Discard, header) + } + switch frame.PayloadType() { + case ContinuationFrame: + frame.(*hybiFrameReader).header.OpCode = handler.payloadType + case TextFrame, BinaryFrame: + handler.payloadType = frame.PayloadType() + case CloseFrame: + return nil, io.EOF + case PingFrame, PongFrame: + b := make([]byte, maxControlFramePayloadLength) + n, err := io.ReadFull(frame, b) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + io.Copy(ioutil.Discard, frame) + if frame.PayloadType() == PingFrame { + if _, err := handler.WritePong(b[:n]); err != nil { + return nil, err + } + } + return nil, nil + } + return frame, nil +} + +func (handler *hybiFrameHandler) WriteClose(status int) (err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame) + if err != nil { + return err + } + msg := make([]byte, 2) + binary.BigEndian.PutUint16(msg, uint16(status)) + _, err = w.Write(msg) + w.Close() + return err +} + +func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// newHybiConn creates a new WebSocket connection speaking hybi draft protocol. +func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + if buf == nil { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + buf = bufio.NewReadWriter(br, bw) + } + ws := &Conn{config: config, request: request, buf: buf, rwc: rwc, + frameReaderFactory: hybiFrameReaderFactory{buf.Reader}, + frameWriterFactory: hybiFrameWriterFactory{ + buf.Writer, request == nil}, + PayloadType: TextFrame, + defaultCloseStatus: closeStatusNormal} + ws.frameHandler = &hybiFrameHandler{conn: ws} + return ws +} + +// generateMaskingKey generates a masking key for a frame. +func generateMaskingKey() (maskingKey []byte, err error) { + maskingKey = make([]byte, 4) + if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil { + return + } + return +} + +// generateNonce generates a nonce consisting of a randomly selected 16-byte +// value that has been base64-encoded. +func generateNonce() (nonce []byte) { + key := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + nonce = make([]byte, 24) + base64.StdEncoding.Encode(nonce, key) + return +} + +// removeZone removes IPv6 zone identifer from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of +// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. +func getNonceAccept(nonce []byte) (expected []byte, err error) { + h := sha1.New() + if _, err = h.Write(nonce); err != nil { + return + } + if _, err = h.Write([]byte(websocketGUID)); err != nil { + return + } + expected = make([]byte, 28) + base64.StdEncoding.Encode(expected, h.Sum(nil)) + return +} + +// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17 +func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { + bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") + bw.WriteString("Upgrade: websocket\r\n") + bw.WriteString("Connection: Upgrade\r\n") + nonce := generateNonce() + if config.handshakeData != nil { + nonce = []byte(config.handshakeData["key"]) + } + bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n") + bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n") + + if config.Version != ProtocolVersionHybi13 { + return ErrBadProtocolVersion + } + + bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n") + if len(config.Protocol) > 0 { + bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + err = config.Header.WriteSubset(bw, handshakeHeader) + if err != nil { + return err + } + + bw.WriteString("\r\n") + if err = bw.Flush(); err != nil { + return err + } + + resp, err := http.ReadResponse(br, &http.Request{Method: "GET"}) + if err != nil { + return err + } + if resp.StatusCode != 101 { + return ErrBadStatus + } + if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" || + strings.ToLower(resp.Header.Get("Connection")) != "upgrade" { + return ErrBadUpgrade + } + expectedAccept, err := getNonceAccept(nonce) + if err != nil { + return err + } + if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) { + return ErrChallengeResponse + } + if resp.Header.Get("Sec-WebSocket-Extensions") != "" { + return ErrUnsupportedExtensions + } + offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol") + if offeredProtocol != "" { + protocolMatched := false + for i := 0; i < len(config.Protocol); i++ { + if config.Protocol[i] == offeredProtocol { + protocolMatched = true + break + } + } + if !protocolMatched { + return ErrBadWebSocketProtocol + } + config.Protocol = []string{offeredProtocol} + } + + return nil +} + +// newHybiClientConn creates a client WebSocket connection after handshake. +func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn { + return newHybiConn(config, buf, rwc, nil) +} + +// A HybiServerHandshaker performs a server handshake using hybi draft protocol. +type hybiServerHandshaker struct { + *Config + accept []byte +} + +func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) { + c.Version = ProtocolVersionHybi13 + if req.Method != "GET" { + return http.StatusMethodNotAllowed, ErrBadRequestMethod + } + // HTTP version can be safely ignored. + + if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" || + !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") { + return http.StatusBadRequest, ErrNotWebSocket + } + + key := req.Header.Get("Sec-Websocket-Key") + if key == "" { + return http.StatusBadRequest, ErrChallengeResponse + } + version := req.Header.Get("Sec-Websocket-Version") + switch version { + case "13": + c.Version = ProtocolVersionHybi13 + default: + return http.StatusBadRequest, ErrBadWebSocketVersion + } + var scheme string + if req.TLS != nil { + scheme = "wss" + } else { + scheme = "ws" + } + c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI()) + if err != nil { + return http.StatusBadRequest, err + } + protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol")) + if protocol != "" { + protocols := strings.Split(protocol, ",") + for i := 0; i < len(protocols); i++ { + c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i])) + } + } + c.accept, err = getNonceAccept([]byte(key)) + if err != nil { + return http.StatusInternalServerError, err + } + return http.StatusSwitchingProtocols, nil +} + +// Origin parses the Origin header in req. +// If the Origin header is not set, it returns nil and nil. +func Origin(config *Config, req *http.Request) (*url.URL, error) { + var origin string + switch config.Version { + case ProtocolVersionHybi13: + origin = req.Header.Get("Origin") + } + if origin == "" { + return nil, nil + } + return url.ParseRequestURI(origin) +} + +func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) { + if len(c.Protocol) > 0 { + if len(c.Protocol) != 1 { + // You need choose a Protocol in Handshake func in Server. + return ErrBadWebSocketProtocol + } + } + buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n") + buf.WriteString("Upgrade: websocket\r\n") + buf.WriteString("Connection: Upgrade\r\n") + buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n") + if len(c.Protocol) > 0 { + buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + if c.Header != nil { + err := c.Header.WriteSubset(buf, handshakeHeader) + if err != nil { + return err + } + } + buf.WriteString("\r\n") + return buf.Flush() +} + +func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiServerConn(c.Config, buf, rwc, request) +} + +// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol. +func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiConn(config, buf, rwc, request) +} diff --git a/vendor/golang.org/x/net/websocket/hybi_test.go b/vendor/golang.org/x/net/websocket/hybi_test.go new file mode 100644 index 0000000..9504aa2 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi_test.go @@ -0,0 +1,608 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "testing" +) + +// Test the getNonceAccept function with values in +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 +func TestSecWebSocketAccept(t *testing.T) { + nonce := []byte("dGhlIHNhbXBsZSBub25jZQ==") + expected := []byte("s3pPLMBiTxaQ9kYGzzhZRbK+xOo=") + accept, err := getNonceAccept(nonce) + if err != nil { + t.Errorf("getNonceAccept: returned error %v", err) + return + } + if !bytes.Equal(expected, accept) { + t.Errorf("getNonceAccept: expected %q got %q", expected, accept) + } +} + +func TestHybiClientHandshake(t *testing.T) { + type test struct { + url, host string + } + tests := []test{ + {"ws://server.example.com/chat", "server.example.com"}, + {"ws://127.0.0.1/chat", "127.0.0.1"}, + } + if _, err := url.ParseRequestURI("http://[fe80::1%25lo0]"); err == nil { + tests = append(tests, test{"ws://[fe80::1%25lo0]/chat", "[fe80::1]"}) + } + + for _, tt := range tests { + var b bytes.Buffer + bw := bufio.NewWriter(&b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +Sec-WebSocket-Protocol: chat + +`)) + var err error + var config Config + config.Location, err = url.ParseRequestURI(tt.url) + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + if err := hybiClientHandshake(&config, br, bw); err != nil { + t.Fatal("handshake", err) + } + req, err := http.ReadRequest(bufio.NewReader(&b)) + if err != nil { + t.Fatal("read request", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %s", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %s", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %s", req.Proto) + } + if req.Host != tt.host { + t.Errorf("request host expected %s, but got %s", tt.host, req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf("%s expected %s, but got %v", k, v, req.Header.Get(k)) + } + } + } +} + +func TestHybiClientHandshakeWithHeader(t *testing.T) { + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= +Sec-WebSocket-Protocol: chat + +`)) + var err error + config := new(Config) + config.Location, err = url.ParseRequestURI("ws://server.example.com/chat") + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.Header = http.Header(make(map[string][]string)) + config.Header.Add("User-Agent", "test") + + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + err = hybiClientHandshake(config, br, bw) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + req, err := http.ReadRequest(bufio.NewReader(b)) + if err != nil { + t.Fatalf("read request: %v", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %q", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %q", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %q", req.Proto) + } + if req.Host != "server.example.com" { + t.Errorf("request Host expected server.example.com, but got %v", req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + "User-Agent": "test", + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf(fmt.Sprintf("%s expected %q but got %q", k, v, req.Header.Get(k))) + } + } +} + +func TestHybiServerHandshake(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + expectedProtocols := []string{"chat", "superchat"} + if fmt.Sprintf("%v", config.Protocol) != fmt.Sprintf("%v", expectedProtocols) { + t.Errorf("protocol expected %q but got %q", expectedProtocols, config.Protocol) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + config.Protocol = config.Protocol[:1] + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "Sec-WebSocket-Protocol: chat", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} + +func TestHybiServerHandshakeNoSubProtocol(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + if len(config.Protocol) != 0 { + t.Errorf("len(config.Protocol) expected 0, but got %q", len(config.Protocol)) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} + +func TestHybiServerHandshakeHybiBadVersion(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Sec-WebSocket-Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 9 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != ErrBadWebSocketVersion { + t.Errorf("handshake expected err %q but got %q", ErrBadWebSocketVersion, err) + } + if code != http.StatusBadRequest { + t.Errorf("status expected %q but got %q", http.StatusBadRequest, code) + } +} + +func testHybiFrame(t *testing.T, testHeader, testPayload, testMaskedPayload []byte, frameHeader *hybiFrameHeader) { + b := bytes.NewBuffer([]byte{}) + frameWriterFactory := &hybiFrameWriterFactory{bufio.NewWriter(b), false} + w, _ := frameWriterFactory.NewFrameWriter(TextFrame) + w.(*hybiFrameWriter).header = frameHeader + _, err := w.Write(testPayload) + w.Close() + if err != nil { + t.Errorf("Write error %q", err) + } + var expectedFrame []byte + expectedFrame = append(expectedFrame, testHeader...) + expectedFrame = append(expectedFrame, testMaskedPayload...) + if !bytes.Equal(expectedFrame, b.Bytes()) { + t.Errorf("frame expected %q got %q", expectedFrame, b.Bytes()) + } + frameReaderFactory := &hybiFrameReaderFactory{bufio.NewReader(b)} + r, err := frameReaderFactory.NewFrameReader() + if err != nil { + t.Errorf("Read error %q", err) + } + if header := r.HeaderReader(); header == nil { + t.Errorf("no header") + } else { + actualHeader := make([]byte, r.Len()) + n, err := header.Read(actualHeader) + if err != nil { + t.Errorf("Read header error %q", err) + } else { + if n < len(testHeader) { + t.Errorf("header too short %q got %q", testHeader, actualHeader[:n]) + } + if !bytes.Equal(testHeader, actualHeader[:n]) { + t.Errorf("header expected %q got %q", testHeader, actualHeader[:n]) + } + } + } + if trailer := r.TrailerReader(); trailer != nil { + t.Errorf("unexpected trailer %q", trailer) + } + frame := r.(*hybiFrameReader) + if frameHeader.Fin != frame.header.Fin || + frameHeader.OpCode != frame.header.OpCode || + len(testPayload) != int(frame.header.Length) { + t.Errorf("mismatch %v (%d) vs %v", frameHeader, len(testPayload), frame) + } + payload := make([]byte, len(testPayload)) + _, err = r.Read(payload) + if err != nil && err != io.EOF { + t.Errorf("read %v", err) + } + if !bytes.Equal(testPayload, payload) { + t.Errorf("payload %q vs %q", testPayload, payload) + } +} + +func TestHybiShortTextFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} + payload := []byte("hello") + testHybiFrame(t, []byte{0x81, 0x05}, payload, payload, frameHeader) + + payload = make([]byte, 125) + testHybiFrame(t, []byte{0x81, 125}, payload, payload, frameHeader) +} + +func TestHybiShortMaskedTextFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame, + MaskingKey: []byte{0xcc, 0x55, 0x80, 0x20}} + payload := []byte("hello") + maskedPayload := []byte{0xa4, 0x30, 0xec, 0x4c, 0xa3} + header := []byte{0x81, 0x85} + header = append(header, frameHeader.MaskingKey...) + testHybiFrame(t, header, payload, maskedPayload, frameHeader) +} + +func TestHybiShortBinaryFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: BinaryFrame} + payload := []byte("hello") + testHybiFrame(t, []byte{0x82, 0x05}, payload, payload, frameHeader) + + payload = make([]byte, 125) + testHybiFrame(t, []byte{0x82, 125}, payload, payload, frameHeader) +} + +func TestHybiControlFrame(t *testing.T) { + payload := []byte("hello") + + frameHeader := &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x05}, payload, payload, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x00}, nil, nil, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x05}, payload, payload, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x00}, nil, nil, frameHeader) + + frameHeader = &hybiFrameHeader{Fin: true, OpCode: CloseFrame} + payload = []byte{0x03, 0xe8} // 1000 + testHybiFrame(t, []byte{0x88, 0x02}, payload, payload, frameHeader) +} + +func TestHybiLongFrame(t *testing.T) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: TextFrame} + payload := make([]byte, 126) + testHybiFrame(t, []byte{0x81, 126, 0x00, 126}, payload, payload, frameHeader) + + payload = make([]byte, 65535) + testHybiFrame(t, []byte{0x81, 126, 0xff, 0xff}, payload, payload, frameHeader) + + payload = make([]byte, 65536) + testHybiFrame(t, []byte{0x81, 127, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00}, payload, payload, frameHeader) +} + +func TestHybiClientRead(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', + 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping + 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + msg := make([]byte, 512) + n, err := conn.Read(msg) + if err != nil { + t.Errorf("read 1st frame, error %q", err) + } + if n != 5 { + t.Errorf("read 1st frame, expect 5, got %d", n) + } + if !bytes.Equal(wireData[2:7], msg[:n]) { + t.Errorf("read 1st frame %v, got %v", wireData[2:7], msg[:n]) + } + n, err = conn.Read(msg) + if err != nil { + t.Errorf("read 2nd frame, error %q", err) + } + if n != 5 { + t.Errorf("read 2nd frame, expect 5, got %d", n) + } + if !bytes.Equal(wireData[16:21], msg[:n]) { + t.Errorf("read 2nd frame %v, got %v", wireData[16:21], msg[:n]) + } + n, err = conn.Read(msg) + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } +} + +func TestHybiShortRead(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o', + 0x89, 0x05, 'h', 'e', 'l', 'l', 'o', // ping + 0x81, 0x05, 'w', 'o', 'r', 'l', 'd'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + step := 0 + pos := 0 + expectedPos := []int{2, 5, 16, 19} + expectedLen := []int{3, 2, 3, 2} + for { + msg := make([]byte, 3) + n, err := conn.Read(msg) + if step >= len(expectedPos) { + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } + return + } + pos = expectedPos[step] + endPos := pos + expectedLen[step] + if err != nil { + t.Errorf("read from %d, got error %q", pos, err) + return + } + if n != endPos-pos { + t.Errorf("read from %d, expect %d, got %d", pos, endPos-pos, n) + } + if !bytes.Equal(wireData[pos:endPos], msg[:n]) { + t.Errorf("read from %d, frame %v, got %v", pos, wireData[pos:endPos], msg[:n]) + } + step++ + } +} + +func TestHybiServerRead(t *testing.T) { + wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello + 0x89, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // ping: hello + 0x81, 0x85, 0xed, 0x83, 0xb4, 0x24, + 0x9a, 0xec, 0xc6, 0x48, 0x89, // world + } + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) + + expected := [][]byte{[]byte("hello"), []byte("world")} + + msg := make([]byte, 512) + n, err := conn.Read(msg) + if err != nil { + t.Errorf("read 1st frame, error %q", err) + } + if n != 5 { + t.Errorf("read 1st frame, expect 5, got %d", n) + } + if !bytes.Equal(expected[0], msg[:n]) { + t.Errorf("read 1st frame %q, got %q", expected[0], msg[:n]) + } + + n, err = conn.Read(msg) + if err != nil { + t.Errorf("read 2nd frame, error %q", err) + } + if n != 5 { + t.Errorf("read 2nd frame, expect 5, got %d", n) + } + if !bytes.Equal(expected[1], msg[:n]) { + t.Errorf("read 2nd frame %q, got %q", expected[1], msg[:n]) + } + + n, err = conn.Read(msg) + if err == nil { + t.Errorf("read not EOF") + } + if n != 0 { + t.Errorf("expect read 0, got %d", n) + } +} + +func TestHybiServerReadWithoutMasking(t *testing.T) { + wireData := []byte{0x81, 0x05, 'h', 'e', 'l', 'l', 'o'} + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, new(http.Request)) + // server MUST close the connection upon receiving a non-masked frame. + msg := make([]byte, 512) + _, err := conn.Read(msg) + if err != io.EOF { + t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) + } +} + +func TestHybiClientReadWithMasking(t *testing.T) { + wireData := []byte{0x81, 0x85, 0xcc, 0x55, 0x80, 0x20, + 0xa4, 0x30, 0xec, 0x4c, 0xa3, // hello + } + br := bufio.NewReader(bytes.NewBuffer(wireData)) + bw := bufio.NewWriter(bytes.NewBuffer([]byte{})) + conn := newHybiConn(newConfig(t, "/"), bufio.NewReadWriter(br, bw), nil, nil) + + // client MUST close the connection upon receiving a masked frame. + msg := make([]byte, 512) + _, err := conn.Read(msg) + if err != io.EOF { + t.Errorf("read 1st frame, expect %q, but got %q", io.EOF, err) + } +} + +// Test the hybiServerHandshaker supports firefox implementation and +// checks Connection request header include (but it's not necessary +// equal to) "upgrade" +func TestHybiServerFirefoxHandshake(t *testing.T) { + config := new(Config) + handshaker := &hybiServerHandshaker{Config: config} + br := bufio.NewReader(strings.NewReader(`GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: keep-alive, upgrade +Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== +Origin: http://example.com +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 + +`)) + req, err := http.ReadRequest(br) + if err != nil { + t.Fatal("request", err) + } + code, err := handshaker.ReadHandshake(br, req) + if err != nil { + t.Errorf("handshake failed: %v", err) + } + if code != http.StatusSwitchingProtocols { + t.Errorf("status expected %q but got %q", http.StatusSwitchingProtocols, code) + } + b := bytes.NewBuffer([]byte{}) + bw := bufio.NewWriter(b) + + config.Protocol = []string{"chat"} + + err = handshaker.AcceptHandshake(bw) + if err != nil { + t.Errorf("handshake response failed: %v", err) + } + expectedResponse := strings.Join([]string{ + "HTTP/1.1 101 Switching Protocols", + "Upgrade: websocket", + "Connection: Upgrade", + "Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", + "Sec-WebSocket-Protocol: chat", + "", ""}, "\r\n") + + if b.String() != expectedResponse { + t.Errorf("handshake expected %q but got %q", expectedResponse, b.String()) + } +} diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go new file mode 100644 index 0000000..0895dea --- /dev/null +++ b/vendor/golang.org/x/net/websocket/server.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "fmt" + "io" + "net/http" +) + +func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) { + var hs serverHandshaker = &hybiServerHandshaker{Config: config} + code, err := hs.ReadHandshake(buf.Reader, req) + if err == ErrBadWebSocketVersion { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if err != nil { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if handshake != nil { + err = handshake(config, req) + if err != nil { + code = http.StatusForbidden + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + } + err = hs.AcceptHandshake(buf.Writer) + if err != nil { + code = http.StatusBadRequest + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + conn = hs.NewServerConn(buf, rwc, req) + return +} + +// Server represents a server of a WebSocket. +type Server struct { + // Config is a WebSocket configuration for new WebSocket connection. + Config + + // Handshake is an optional function in WebSocket handshake. + // For example, you can check, or don't check Origin header. + // Another example, you can select config.Protocol. + Handshake func(*Config, *http.Request) error + + // Handler handles a WebSocket connection. + Handler +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.serveWebSocket(w, req) +} + +func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) { + rwc, buf, err := w.(http.Hijacker).Hijack() + if err != nil { + panic("Hijack failed: " + err.Error()) + } + // The server should abort the WebSocket connection if it finds + // the client did not send a handshake that matches with protocol + // specification. + defer rwc.Close() + conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake) + if err != nil { + return + } + if conn == nil { + panic("unexpected nil conn") + } + s.Handler(conn) +} + +// Handler is a simple interface to a WebSocket browser client. +// It checks if Origin header is valid URL by default. +// You might want to verify websocket.Conn.Config().Origin in the func. +// If you use Server instead of Handler, you could call websocket.Origin and +// check the origin in your Handshake func. So, if you want to accept +// non-browser clients, which do not send an Origin header, set a +// Server.Handshake that does not check the origin. +type Handler func(*Conn) + +func checkOrigin(config *Config, req *http.Request) (err error) { + config.Origin, err = Origin(config, req) + if err == nil && config.Origin == nil { + return fmt.Errorf("null origin") + } + return err +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s := Server{Handler: h, Handshake: checkOrigin} + s.serveWebSocket(w, req) +} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go new file mode 100644 index 0000000..e242c89 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -0,0 +1,448 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements a client and server for the WebSocket protocol +// as specified in RFC 6455. +// +// This package currently lacks some features found in an alternative +// and more actively maintained WebSocket package: +// +// https://godoc.org/github.com/gorilla/websocket +// +package websocket // import "golang.org/x/net/websocket" + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + ProtocolVersionHybi13 = 13 + ProtocolVersionHybi = ProtocolVersionHybi13 + SupportedProtocolVersion = "13" + + ContinuationFrame = 0 + TextFrame = 1 + BinaryFrame = 2 + CloseFrame = 8 + PingFrame = 9 + PongFrame = 10 + UnknownFrame = 255 + + DefaultMaxPayloadBytes = 32 << 20 // 32MB +) + +// ProtocolError represents WebSocket protocol errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrBadProtocolVersion = &ProtocolError{"bad protocol version"} + ErrBadScheme = &ProtocolError{"bad scheme"} + ErrBadStatus = &ProtocolError{"bad status"} + ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"} + ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"} + ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"} + ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"} + ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"} + ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"} + ErrBadFrame = &ProtocolError{"bad frame"} + ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"} + ErrNotWebSocket = &ProtocolError{"not websocket protocol"} + ErrBadRequestMethod = &ProtocolError{"bad method"} + ErrNotSupported = &ProtocolError{"not supported"} +) + +// ErrFrameTooLarge is returned by Codec's Receive method if payload size +// exceeds limit set by Conn.MaxPayloadBytes +var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit") + +// Addr is an implementation of net.Addr for WebSocket. +type Addr struct { + *url.URL +} + +// Network returns the network type for a WebSocket, "websocket". +func (addr *Addr) Network() string { return "websocket" } + +// Config is a WebSocket configuration +type Config struct { + // A WebSocket server address. + Location *url.URL + + // A Websocket client origin. + Origin *url.URL + + // WebSocket subprotocols. + Protocol []string + + // WebSocket protocol version. + Version int + + // TLS config for secure WebSocket (wss). + TlsConfig *tls.Config + + // Additional header fields to be sent in WebSocket opening handshake. + Header http.Header + + // Dialer used when opening websocket connections. + Dialer *net.Dialer + + handshakeData map[string]string +} + +// serverHandshaker is an interface to handle WebSocket server side handshake. +type serverHandshaker interface { + // ReadHandshake reads handshake request message from client. + // Returns http response code and error if any. + ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) + + // AcceptHandshake accepts the client handshake request and sends + // handshake response back to client. + AcceptHandshake(buf *bufio.Writer) (err error) + + // NewServerConn creates a new WebSocket connection. + NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn) +} + +// frameReader is an interface to read a WebSocket frame. +type frameReader interface { + // Reader is to read payload of the frame. + io.Reader + + // PayloadType returns payload type. + PayloadType() byte + + // HeaderReader returns a reader to read header of the frame. + HeaderReader() io.Reader + + // TrailerReader returns a reader to read trailer of the frame. + // If it returns nil, there is no trailer in the frame. + TrailerReader() io.Reader + + // Len returns total length of the frame, including header and trailer. + Len() int +} + +// frameReaderFactory is an interface to creates new frame reader. +type frameReaderFactory interface { + NewFrameReader() (r frameReader, err error) +} + +// frameWriter is an interface to write a WebSocket frame. +type frameWriter interface { + // Writer is to write payload of the frame. + io.WriteCloser +} + +// frameWriterFactory is an interface to create new frame writer. +type frameWriterFactory interface { + NewFrameWriter(payloadType byte) (w frameWriter, err error) +} + +type frameHandler interface { + HandleFrame(frame frameReader) (r frameReader, err error) + WriteClose(status int) (err error) +} + +// Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. +type Conn struct { + config *Config + request *http.Request + + buf *bufio.ReadWriter + rwc io.ReadWriteCloser + + rio sync.Mutex + frameReaderFactory + frameReader + + wio sync.Mutex + frameWriterFactory + + frameHandler + PayloadType byte + defaultCloseStatus int + + // MaxPayloadBytes limits the size of frame payload received over Conn + // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used. + MaxPayloadBytes int +} + +// Read implements the io.Reader interface: +// it reads data of a frame from the WebSocket connection. +// if msg is not large enough for the frame data, it fills the msg and next Read +// will read the rest of the frame data. +// it reads Text frame or Binary frame. +func (ws *Conn) Read(msg []byte) (n int, err error) { + ws.rio.Lock() + defer ws.rio.Unlock() +again: + if ws.frameReader == nil { + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return 0, err + } + ws.frameReader, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return 0, err + } + if ws.frameReader == nil { + goto again + } + } + n, err = ws.frameReader.Read(msg) + if err == io.EOF { + if trailer := ws.frameReader.TrailerReader(); trailer != nil { + io.Copy(ioutil.Discard, trailer) + } + ws.frameReader = nil + goto again + } + return n, err +} + +// Write implements the io.Writer interface: +// it writes data as a frame to the WebSocket connection. +func (ws *Conn) Write(msg []byte) (n int, err error) { + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// Close implements the io.Closer interface. +func (ws *Conn) Close() error { + err := ws.frameHandler.WriteClose(ws.defaultCloseStatus) + err1 := ws.rwc.Close() + if err != nil { + return err + } + return err1 +} + +func (ws *Conn) IsClientConn() bool { return ws.request == nil } +func (ws *Conn) IsServerConn() bool { return ws.request != nil } + +// LocalAddr returns the WebSocket Origin for the connection for client, or +// the WebSocket location for server. +func (ws *Conn) LocalAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Origin} + } + return &Addr{ws.config.Location} +} + +// RemoteAddr returns the WebSocket location for the connection for client, or +// the Websocket Origin for server. +func (ws *Conn) RemoteAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Location} + } + return &Addr{ws.config.Origin} +} + +var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn") + +// SetDeadline sets the connection's network read & write deadlines. +func (ws *Conn) SetDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetDeadline(t) + } + return errSetDeadline +} + +// SetReadDeadline sets the connection's network read deadline. +func (ws *Conn) SetReadDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetReadDeadline(t) + } + return errSetDeadline +} + +// SetWriteDeadline sets the connection's network write deadline. +func (ws *Conn) SetWriteDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetWriteDeadline(t) + } + return errSetDeadline +} + +// Config returns the WebSocket config. +func (ws *Conn) Config() *Config { return ws.config } + +// Request returns the http request upgraded to the WebSocket. +// It is nil for client side. +func (ws *Conn) Request() *http.Request { return ws.request } + +// Codec represents a symmetric pair of functions that implement a codec. +type Codec struct { + Marshal func(v interface{}) (data []byte, payloadType byte, err error) + Unmarshal func(data []byte, payloadType byte, v interface{}) (err error) +} + +// Send sends v marshaled by cd.Marshal as single frame to ws. +func (cd Codec) Send(ws *Conn, v interface{}) (err error) { + data, payloadType, err := cd.Marshal(v) + if err != nil { + return err + } + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(payloadType) + if err != nil { + return err + } + _, err = w.Write(data) + w.Close() + return err +} + +// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores +// in v. The whole frame payload is read to an in-memory buffer; max size of +// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds +// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire +// completely. The next call to Receive would read and discard leftover data of +// previous oversized frame before processing next frame. +func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { + ws.rio.Lock() + defer ws.rio.Unlock() + if ws.frameReader != nil { + _, err = io.Copy(ioutil.Discard, ws.frameReader) + if err != nil { + return err + } + ws.frameReader = nil + } +again: + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return err + } + frame, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return err + } + if frame == nil { + goto again + } + maxPayloadBytes := ws.MaxPayloadBytes + if maxPayloadBytes == 0 { + maxPayloadBytes = DefaultMaxPayloadBytes + } + if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) { + // payload size exceeds limit, no need to call Unmarshal + // + // set frameReader to current oversized frame so that + // the next call to this function can drain leftover + // data before processing the next frame + ws.frameReader = frame + return ErrFrameTooLarge + } + payloadType := frame.PayloadType() + data, err := ioutil.ReadAll(frame) + if err != nil { + return err + } + return cd.Unmarshal(data, payloadType, v) +} + +func marshal(v interface{}) (msg []byte, payloadType byte, err error) { + switch data := v.(type) { + case string: + return []byte(data), TextFrame, nil + case []byte: + return data, BinaryFrame, nil + } + return nil, UnknownFrame, ErrNotSupported +} + +func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + switch data := v.(type) { + case *string: + *data = string(msg) + return nil + case *[]byte: + *data = msg + return nil + } + return ErrNotSupported +} + +/* +Message is a codec to send/receive text/binary data in a frame on WebSocket connection. +To send/receive text frame, use string type. +To send/receive binary frame, use []byte type. + +Trivial usage: + + import "websocket" + + // receive text frame + var message string + websocket.Message.Receive(ws, &message) + + // send text frame + message = "hello" + websocket.Message.Send(ws, message) + + // receive binary frame + var data []byte + websocket.Message.Receive(ws, &data) + + // send binary frame + data = []byte{0, 1, 2} + websocket.Message.Send(ws, data) + +*/ +var Message = Codec{marshal, unmarshal} + +func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) { + msg, err = json.Marshal(v) + return msg, TextFrame, err +} + +func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + return json.Unmarshal(msg, v) +} + +/* +JSON is a codec to send/receive JSON data in a frame from a WebSocket connection. + +Trivial usage: + + import "websocket" + + type T struct { + Msg string + Count int + } + + // receive JSON type T + var data T + websocket.JSON.Receive(ws, &data) + + // send JSON type T + websocket.JSON.Send(ws, data) +*/ +var JSON = Codec{jsonMarshal, jsonUnmarshal} diff --git a/vendor/golang.org/x/net/websocket/websocket_test.go b/vendor/golang.org/x/net/websocket/websocket_test.go new file mode 100644 index 0000000..2054ce8 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket_test.go @@ -0,0 +1,665 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "log" + "net" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +var serverAddr string +var once sync.Once + +func echoServer(ws *Conn) { + defer ws.Close() + io.Copy(ws, ws) +} + +type Count struct { + S string + N int +} + +func countServer(ws *Conn) { + defer ws.Close() + for { + var count Count + err := JSON.Receive(ws, &count) + if err != nil { + return + } + count.N++ + count.S = strings.Repeat(count.S, count.N) + err = JSON.Send(ws, count) + if err != nil { + return + } + } +} + +type testCtrlAndDataHandler struct { + hybiFrameHandler +} + +func (h *testCtrlAndDataHandler) WritePing(b []byte) (int, error) { + h.hybiFrameHandler.conn.wio.Lock() + defer h.hybiFrameHandler.conn.wio.Unlock() + w, err := h.hybiFrameHandler.conn.frameWriterFactory.NewFrameWriter(PingFrame) + if err != nil { + return 0, err + } + n, err := w.Write(b) + w.Close() + return n, err +} + +func ctrlAndDataServer(ws *Conn) { + defer ws.Close() + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + go func() { + for i := 0; ; i++ { + var b []byte + if i%2 != 0 { // with or without payload + b = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-SERVER", i)) + } + if _, err := h.WritePing(b); err != nil { + break + } + if _, err := h.WritePong(b); err != nil { // unsolicited pong + break + } + time.Sleep(10 * time.Millisecond) + } + }() + + b := make([]byte, 128) + for { + n, err := ws.Read(b) + if err != nil { + break + } + if _, err := ws.Write(b[:n]); err != nil { + break + } + } +} + +func subProtocolHandshake(config *Config, req *http.Request) error { + for _, proto := range config.Protocol { + if proto == "chat" { + config.Protocol = []string{proto} + return nil + } + } + return ErrBadWebSocketProtocol +} + +func subProtoServer(ws *Conn) { + for _, proto := range ws.Config().Protocol { + io.WriteString(ws, proto) + } +} + +func startServer() { + http.Handle("/echo", Handler(echoServer)) + http.Handle("/count", Handler(countServer)) + http.Handle("/ctrldata", Handler(ctrlAndDataServer)) + subproto := Server{ + Handshake: subProtocolHandshake, + Handler: Handler(subProtoServer), + } + http.Handle("/subproto", subproto) + server := httptest.NewServer(nil) + serverAddr = server.Listener.Addr().String() + log.Print("Test WebSocket server listening on ", serverAddr) +} + +func newConfig(t *testing.T, path string) *Config { + config, _ := NewConfig(fmt.Sprintf("ws://%s%s", serverAddr, path), "http://localhost") + return config +} + +func TestEcho(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + msg := []byte("hello, world\n") + if _, err := conn.Write(msg); err != nil { + t.Errorf("Write: %v", err) + } + var actual_msg = make([]byte, 512) + n, err := conn.Read(actual_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + actual_msg = actual_msg[0:n] + if !bytes.Equal(msg, actual_msg) { + t.Errorf("Echo: expected %q got %q", msg, actual_msg) + } + conn.Close() +} + +func TestAddr(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + ra := conn.RemoteAddr().String() + if !strings.HasPrefix(ra, "ws://") || !strings.HasSuffix(ra, "/echo") { + t.Errorf("Bad remote addr: %v", ra) + } + la := conn.LocalAddr().String() + if !strings.HasPrefix(la, "http://") { + t.Errorf("Bad local addr: %v", la) + } + conn.Close() +} + +func TestCount(t *testing.T) { + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/count"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + var count Count + count.S = "hello" + if err := JSON.Send(conn, count); err != nil { + t.Errorf("Write: %v", err) + } + if err := JSON.Receive(conn, &count); err != nil { + t.Errorf("Read: %v", err) + } + if count.N != 1 { + t.Errorf("count: expected %d got %d", 1, count.N) + } + if count.S != "hello" { + t.Errorf("count: expected %q got %q", "hello", count.S) + } + if err := JSON.Send(conn, count); err != nil { + t.Errorf("Write: %v", err) + } + if err := JSON.Receive(conn, &count); err != nil { + t.Errorf("Read: %v", err) + } + if count.N != 2 { + t.Errorf("count: expected %d got %d", 2, count.N) + } + if count.S != "hellohello" { + t.Errorf("count: expected %q got %q", "hellohello", count.S) + } + conn.Close() +} + +func TestWithQuery(t *testing.T) { + once.Do(startServer) + + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + config := newConfig(t, "/echo") + config.Location, err = url.ParseRequestURI(fmt.Sprintf("ws://%s/echo?q=v", serverAddr)) + if err != nil { + t.Fatal("location url", err) + } + + ws, err := NewClient(config, client) + if err != nil { + t.Errorf("WebSocket handshake: %v", err) + return + } + ws.Close() +} + +func testWithProtocol(t *testing.T, subproto []string) (string, error) { + once.Do(startServer) + + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + config := newConfig(t, "/subproto") + config.Protocol = subproto + + ws, err := NewClient(config, client) + if err != nil { + return "", err + } + msg := make([]byte, 16) + n, err := ws.Read(msg) + if err != nil { + return "", err + } + ws.Close() + return string(msg[:n]), nil +} + +func TestWithProtocol(t *testing.T) { + proto, err := testWithProtocol(t, []string{"chat"}) + if err != nil { + t.Errorf("SubProto: unexpected error: %v", err) + } + if proto != "chat" { + t.Errorf("SubProto: expected %q, got %q", "chat", proto) + } +} + +func TestWithTwoProtocol(t *testing.T) { + proto, err := testWithProtocol(t, []string{"test", "chat"}) + if err != nil { + t.Errorf("SubProto: unexpected error: %v", err) + } + if proto != "chat" { + t.Errorf("SubProto: expected %q, got %q", "chat", proto) + } +} + +func TestWithBadProtocol(t *testing.T) { + _, err := testWithProtocol(t, []string{"test"}) + if err != ErrBadStatus { + t.Errorf("SubProto: expected %v, got %v", ErrBadStatus, err) + } +} + +func TestHTTP(t *testing.T) { + once.Do(startServer) + + // If the client did not send a handshake that matches the protocol + // specification, the server MUST return an HTTP response with an + // appropriate error code (such as 400 Bad Request) + resp, err := http.Get(fmt.Sprintf("http://%s/echo", serverAddr)) + if err != nil { + t.Errorf("Get: error %#v", err) + return + } + if resp == nil { + t.Error("Get: resp is null") + return + } + if resp.StatusCode != http.StatusBadRequest { + t.Errorf("Get: expected %q got %q", http.StatusBadRequest, resp.StatusCode) + } +} + +func TestTrailingSpaces(t *testing.T) { + // http://code.google.com/p/go/issues/detail?id=955 + // The last runs of this create keys with trailing spaces that should not be + // generated by the client. + once.Do(startServer) + config := newConfig(t, "/echo") + for i := 0; i < 30; i++ { + // body + ws, err := DialConfig(config) + if err != nil { + t.Errorf("Dial #%d failed: %v", i, err) + break + } + ws.Close() + } +} + +func TestDialConfigBadVersion(t *testing.T) { + once.Do(startServer) + config := newConfig(t, "/echo") + config.Version = 1234 + + _, err := DialConfig(config) + + if dialerr, ok := err.(*DialError); ok { + if dialerr.Err != ErrBadProtocolVersion { + t.Errorf("dial expected err %q but got %q", ErrBadProtocolVersion, dialerr.Err) + } + } +} + +func TestDialConfigWithDialer(t *testing.T) { + once.Do(startServer) + config := newConfig(t, "/echo") + config.Dialer = &net.Dialer{ + Deadline: time.Now().Add(-time.Minute), + } + _, err := DialConfig(config) + dialerr, ok := err.(*DialError) + if !ok { + t.Fatalf("DialError expected, got %#v", err) + } + neterr, ok := dialerr.Err.(*net.OpError) + if !ok { + t.Fatalf("net.OpError error expected, got %#v", dialerr.Err) + } + if !neterr.Timeout() { + t.Fatalf("expected timeout error, got %#v", neterr) + } +} + +func TestSmallBuffer(t *testing.T) { + // http://code.google.com/p/go/issues/detail?id=1145 + // Read should be able to handle reading a fragment of a frame. + once.Do(startServer) + + // websocket.Dial() + client, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + conn, err := NewClient(newConfig(t, "/echo"), client) + if err != nil { + t.Errorf("WebSocket handshake error: %v", err) + return + } + + msg := []byte("hello, world\n") + if _, err := conn.Write(msg); err != nil { + t.Errorf("Write: %v", err) + } + var small_msg = make([]byte, 8) + n, err := conn.Read(small_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + if !bytes.Equal(msg[:len(small_msg)], small_msg) { + t.Errorf("Echo: expected %q got %q", msg[:len(small_msg)], small_msg) + } + var second_msg = make([]byte, len(msg)) + n, err = conn.Read(second_msg) + if err != nil { + t.Errorf("Read: %v", err) + } + second_msg = second_msg[0:n] + if !bytes.Equal(msg[len(small_msg):], second_msg) { + t.Errorf("Echo: expected %q got %q", msg[len(small_msg):], second_msg) + } + conn.Close() +} + +var parseAuthorityTests = []struct { + in *url.URL + out string +}{ + { + &url.URL{ + Scheme: "ws", + Host: "www.google.com", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "wss", + Host: "www.google.com", + }, + "www.google.com:443", + }, + { + &url.URL{ + Scheme: "ws", + Host: "www.google.com:80", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "wss", + Host: "www.google.com:443", + }, + "www.google.com:443", + }, + // some invalid ones for parseAuthority. parseAuthority doesn't + // concern itself with the scheme unless it actually knows about it + { + &url.URL{ + Scheme: "http", + Host: "www.google.com", + }, + "www.google.com", + }, + { + &url.URL{ + Scheme: "http", + Host: "www.google.com:80", + }, + "www.google.com:80", + }, + { + &url.URL{ + Scheme: "asdf", + Host: "127.0.0.1", + }, + "127.0.0.1", + }, + { + &url.URL{ + Scheme: "asdf", + Host: "www.google.com", + }, + "www.google.com", + }, +} + +func TestParseAuthority(t *testing.T) { + for _, tt := range parseAuthorityTests { + out := parseAuthority(tt.in) + if out != tt.out { + t.Errorf("got %v; want %v", out, tt.out) + } + } +} + +type closerConn struct { + net.Conn + closed int // count of the number of times Close was called +} + +func (c *closerConn) Close() error { + c.closed++ + return c.Conn.Close() +} + +func TestClose(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/11454") + } + + once.Do(startServer) + + conn, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal("dialing", err) + } + + cc := closerConn{Conn: conn} + + client, err := NewClient(newConfig(t, "/echo"), &cc) + if err != nil { + t.Fatalf("WebSocket handshake: %v", err) + } + + // set the deadline to ten minutes ago, which will have expired by the time + // client.Close sends the close status frame. + conn.SetDeadline(time.Now().Add(-10 * time.Minute)) + + if err := client.Close(); err == nil { + t.Errorf("ws.Close(): expected error, got %v", err) + } + if cc.closed < 1 { + t.Fatalf("ws.Close(): expected underlying ws.rwc.Close to be called > 0 times, got: %v", cc.closed) + } +} + +var originTests = []struct { + req *http.Request + origin *url.URL +}{ + { + req: &http.Request{ + Header: http.Header{ + "Origin": []string{"http://www.example.com"}, + }, + }, + origin: &url.URL{ + Scheme: "http", + Host: "www.example.com", + }, + }, + { + req: &http.Request{}, + }, +} + +func TestOrigin(t *testing.T) { + conf := newConfig(t, "/echo") + conf.Version = ProtocolVersionHybi13 + for i, tt := range originTests { + origin, err := Origin(conf, tt.req) + if err != nil { + t.Error(err) + continue + } + if !reflect.DeepEqual(origin, tt.origin) { + t.Errorf("#%d: got origin %v; want %v", i, origin, tt.origin) + continue + } + } +} + +func TestCtrlAndData(t *testing.T) { + once.Do(startServer) + + c, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal(err) + } + ws, err := NewClient(newConfig(t, "/ctrldata"), c) + if err != nil { + t.Fatal(err) + } + defer ws.Close() + + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + b := make([]byte, 128) + for i := 0; i < 2; i++ { + data := []byte(fmt.Sprintf("#%d-DATA-FRAME-FROM-CLIENT", i)) + if _, err := ws.Write(data); err != nil { + t.Fatalf("#%d: %v", i, err) + } + var ctrl []byte + if i%2 != 0 { // with or without payload + ctrl = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-CLIENT", i)) + } + if _, err := h.WritePing(ctrl); err != nil { + t.Fatalf("#%d: %v", i, err) + } + n, err := ws.Read(b) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + if !bytes.Equal(b[:n], data) { + t.Fatalf("#%d: got %v; want %v", i, b[:n], data) + } + } +} + +func TestCodec_ReceiveLimited(t *testing.T) { + const limit = 2048 + var payloads [][]byte + for _, size := range []int{ + 1024, + 2048, + 4096, // receive of this message would be interrupted due to limit + 2048, // this one is to make sure next receive recovers discarding leftovers + } { + b := make([]byte, size) + rand.Read(b) + payloads = append(payloads, b) + } + handlerDone := make(chan struct{}) + limitedHandler := func(ws *Conn) { + defer close(handlerDone) + ws.MaxPayloadBytes = limit + defer ws.Close() + for i, p := range payloads { + t.Logf("payload #%d (size %d, exceeds limit: %v)", i, len(p), len(p) > limit) + var recv []byte + err := Message.Receive(ws, &recv) + switch err { + case nil: + case ErrFrameTooLarge: + if len(p) <= limit { + t.Fatalf("unexpected frame size limit: expected %d bytes of payload having limit at %d", len(p), limit) + } + continue + default: + t.Fatalf("unexpected error: %v (want either nil or ErrFrameTooLarge)", err) + } + if len(recv) > limit { + t.Fatalf("received %d bytes of payload having limit at %d", len(recv), limit) + } + if !bytes.Equal(p, recv) { + t.Fatalf("received payload differs:\ngot:\t%v\nwant:\t%v", recv, p) + } + } + } + server := httptest.NewServer(Handler(limitedHandler)) + defer server.CloseClientConnections() + defer server.Close() + addr := server.Listener.Addr().String() + ws, err := Dial("ws://"+addr+"/", "", "http://localhost/") + if err != nil { + t.Fatal(err) + } + defer ws.Close() + for i, p := range payloads { + if err := Message.Send(ws, p); err != nil { + t.Fatalf("payload #%d (size %d): %v", i, len(p), err) + } + } + <-handlerDone +} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf.go b/vendor/golang.org/x/net/xsrftoken/xsrf.go new file mode 100644 index 0000000..bc861e1 --- /dev/null +++ b/vendor/golang.org/x/net/xsrftoken/xsrf.go @@ -0,0 +1,94 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xsrftoken provides methods for generating and validating secure XSRF tokens. +package xsrftoken // import "golang.org/x/net/xsrftoken" + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "strconv" + "strings" + "time" +) + +// Timeout is the duration for which XSRF tokens are valid. +// It is exported so clients may set cookie timeouts that match generated tokens. +const Timeout = 24 * time.Hour + +// clean sanitizes a string for inclusion in a token by replacing all ":"s. +func clean(s string) string { + return strings.Replace(s, ":", "_", -1) +} + +// Generate returns a URL-safe secure XSRF token that expires in 24 hours. +// +// key is a secret key for your application; it must be non-empty. +// userID is an optional unique identifier for the user. +// actionID is an optional action the user is taking (e.g. POSTing to a particular path). +func Generate(key, userID, actionID string) string { + return generateTokenAtTime(key, userID, actionID, time.Now()) +} + +// generateTokenAtTime is like Generate, but returns a token that expires 24 hours from now. +func generateTokenAtTime(key, userID, actionID string, now time.Time) string { + if len(key) == 0 { + panic("zero length xsrf secret key") + } + // Round time up and convert to milliseconds. + milliTime := (now.UnixNano() + 1e6 - 1) / 1e6 + + h := hmac.New(sha1.New, []byte(key)) + fmt.Fprintf(h, "%s:%s:%d", clean(userID), clean(actionID), milliTime) + + // Get the padded base64 string then removing the padding. + tok := string(h.Sum(nil)) + tok = base64.URLEncoding.EncodeToString([]byte(tok)) + tok = strings.TrimRight(tok, "=") + + return fmt.Sprintf("%s:%d", tok, milliTime) +} + +// Valid reports whether a token is a valid, unexpired token returned by Generate. +func Valid(token, key, userID, actionID string) bool { + return validTokenAtTime(token, key, userID, actionID, time.Now()) +} + +// validTokenAtTime reports whether a token is valid at the given time. +func validTokenAtTime(token, key, userID, actionID string, now time.Time) bool { + if len(key) == 0 { + panic("zero length xsrf secret key") + } + // Extract the issue time of the token. + sep := strings.LastIndex(token, ":") + if sep < 0 { + return false + } + millis, err := strconv.ParseInt(token[sep+1:], 10, 64) + if err != nil { + return false + } + issueTime := time.Unix(0, millis*1e6) + + // Check that the token is not expired. + if now.Sub(issueTime) >= Timeout { + return false + } + + // Check that the token is not from the future. + // Allow 1 minute grace period in case the token is being verified on a + // machine whose clock is behind the machine that issued the token. + if issueTime.After(now.Add(1 * time.Minute)) { + return false + } + + expected := generateTokenAtTime(key, userID, actionID, issueTime) + + // Check that the token matches the expected value. + // Use constant time comparison to avoid timing attacks. + return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1 +} diff --git a/vendor/golang.org/x/net/xsrftoken/xsrf_test.go b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go new file mode 100644 index 0000000..6c8e7d9 --- /dev/null +++ b/vendor/golang.org/x/net/xsrftoken/xsrf_test.go @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xsrftoken + +import ( + "encoding/base64" + "testing" + "time" +) + +const ( + key = "quay" + userID = "12345678" + actionID = "POST /form" +) + +var ( + now = time.Now() + oneMinuteFromNow = now.Add(1 * time.Minute) +) + +func TestValidToken(t *testing.T) { + tok := generateTokenAtTime(key, userID, actionID, now) + if !validTokenAtTime(tok, key, userID, actionID, oneMinuteFromNow) { + t.Error("One second later: Expected token to be valid") + } + if !validTokenAtTime(tok, key, userID, actionID, now.Add(Timeout-1*time.Nanosecond)) { + t.Error("Just before timeout: Expected token to be valid") + } + if !validTokenAtTime(tok, key, userID, actionID, now.Add(-1*time.Minute+1*time.Millisecond)) { + t.Error("One minute in the past: Expected token to be valid") + } +} + +// TestSeparatorReplacement tests that separators are being correctly substituted +func TestSeparatorReplacement(t *testing.T) { + tok := generateTokenAtTime("foo:bar", "baz", "wah", now) + tok2 := generateTokenAtTime("foo", "bar:baz", "wah", now) + if tok == tok2 { + t.Errorf("Expected generated tokens to be different") + } +} + +func TestInvalidToken(t *testing.T) { + invalidTokenTests := []struct { + name, key, userID, actionID string + t time.Time + }{ + {"Bad key", "foobar", userID, actionID, oneMinuteFromNow}, + {"Bad userID", key, "foobar", actionID, oneMinuteFromNow}, + {"Bad actionID", key, userID, "foobar", oneMinuteFromNow}, + {"Expired", key, userID, actionID, now.Add(Timeout + 1*time.Millisecond)}, + {"More than 1 minute from the future", key, userID, actionID, now.Add(-1*time.Nanosecond - 1*time.Minute)}, + } + + tok := generateTokenAtTime(key, userID, actionID, now) + for _, itt := range invalidTokenTests { + if validTokenAtTime(tok, itt.key, itt.userID, itt.actionID, itt.t) { + t.Errorf("%v: Expected token to be invalid", itt.name) + } + } +} + +// TestValidateBadData primarily tests that no unexpected panics are triggered +// during parsing +func TestValidateBadData(t *testing.T) { + badDataTests := []struct { + name, tok string + }{ + {"Invalid Base64", "ASDab24(@)$*=="}, + {"No delimiter", base64.URLEncoding.EncodeToString([]byte("foobar12345678"))}, + {"Invalid time", base64.URLEncoding.EncodeToString([]byte("foobar:foobar"))}, + {"Wrong length", "1234" + generateTokenAtTime(key, userID, actionID, now)}, + } + + for _, bdt := range badDataTests { + if validTokenAtTime(bdt.tok, key, userID, actionID, oneMinuteFromNow) { + t.Errorf("%v: Expected token to be invalid", bdt.name) + } + } +} diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml new file mode 100644 index 0000000..fa139db --- /dev/null +++ b/vendor/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - tip + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 0000000..46aa2b1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 0000000..eb8dcee --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,77 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) +[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +Or you can manually git clone the repository to +`$(go env GOPATH)/src/golang.org/x/oauth2`. + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + + +## App Engine + +In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means it's no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + +```go +import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" +) + +func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") +} +``` + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the oauth2 repository is located at +https://github.com/golang/oauth2/issues. diff --git a/vendor/golang.org/x/oauth2/amazon/amazon.go b/vendor/golang.org/x/oauth2/amazon/amazon.go new file mode 100644 index 0000000..d21da11 --- /dev/null +++ b/vendor/golang.org/x/oauth2/amazon/amazon.go @@ -0,0 +1,16 @@ +// Copyright 2017 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package amazon provides constants for using OAuth2 to access Amazon. +package amazon + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Amazon's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.amazon.com/ap/oa", + TokenURL: "https://api.amazon.com/auth/o2/token", +} diff --git a/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go b/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go new file mode 100644 index 0000000..44af1f1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/bitbucket/bitbucket.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bitbucket provides constants for using OAuth2 to access Bitbucket. +package bitbucket + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Bitbucket's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://bitbucket.org/site/oauth2/authorize", + TokenURL: "https://bitbucket.org/site/oauth2/access_token", +} diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go new file mode 100644 index 0000000..8962c49 --- /dev/null +++ b/vendor/golang.org/x/oauth2/client_appengine.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// App Engine hooks. + +package oauth2 + +import ( + "net/http" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" +) + +func init() { + internal.RegisterContextClientFunc(contextClientAppEngine) +} + +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil +} diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go new file mode 100644 index 0000000..4afb631 --- /dev/null +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -0,0 +1,107 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clientcredentials implements the OAuth2.0 "client credentials" token flow, +// also known as the "two-legged OAuth 2.0". +// +// This should be used when the client is acting on its own behalf or when the client +// is the resource owner. It may also be used when requesting access to protected +// resources based on an authorization previously arranged with the authorization +// server. +// +// See https://tools.ietf.org/html/rfc6749#section-4.4 +package clientcredentials // import "golang.org/x/oauth2/clientcredentials" + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +// Config describes a 2-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // TokenURL is the resource server's token endpoint + // URL. This is a constant specific to each server. + TokenURL string + + // Scope specifies optional requested permissions. + Scopes []string + + // EndpointParams specifies additional parameters for requests to the token endpoint. + EndpointParams url.Values +} + +// Token uses client credentials to retrieve a token. +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { + return c.TokenSource(ctx).Token() +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context and the +// client ID and client secret. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + source := &tokenSource{ + ctx: ctx, + conf: c, + } + return oauth2.ReuseTokenSource(nil, source) +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +// Token refreshes the token by using a new client credentials request. +// tokens received this way do not include a refresh token +func (c *tokenSource) Token() (*oauth2.Token, error) { + v := url.Values{ + "grant_type": {"client_credentials"}, + "scope": internal.CondVal(strings.Join(c.conf.Scopes, " ")), + } + for k, p := range c.conf.EndpointParams { + if _, ok := v[k]; ok { + return nil, fmt.Errorf("oauth2: cannot overwrite parameter %q", k) + } + v[k] = p + } + tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v) + if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*oauth2.RetrieveError)(rErr) + } + return nil, err + } + t := &oauth2.Token{ + AccessToken: tk.AccessToken, + TokenType: tk.TokenType, + RefreshToken: tk.RefreshToken, + Expiry: tk.Expiry, + } + return t.WithExtra(tk.Raw), nil +} diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go new file mode 100644 index 0000000..108520c --- /dev/null +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package clientcredentials + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +func newConf(serverURL string) *Config { + return &Config{ + ClientID: "CLIENT_ID", + ClientSecret: "CLIENT_SECRET", + Scopes: []string{"scope1", "scope2"}, + TokenURL: serverURL + "/token", + EndpointParams: url.Values{"audience": {"audience1"}}, + } +} + +type mockTransport struct { + rt func(req *http.Request) (resp *http.Response, err error) +} + +func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + return t.rt(req) +} + +func TestTokenRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("authenticate client request URL = %q; want %q", r.URL, "/token") + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + if got, want := r.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; got != want { + t.Errorf("Content-Type header = %q; want %q", got, want) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + r.Body.Close() + } + if err != nil { + t.Errorf("failed reading request body: %s.", err) + } + if string(body) != "audience=audience1&grant_type=client_credentials&scope=scope1+scope2" { + t.Errorf("payload = %q; want %q", string(body), "grant_type=client_credentials&scope=scope1+scope2") + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Token(context.Background()) + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("token invalid. got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Access token = %q; want %q", tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c") + } + if tok.TokenType != "bearer" { + t.Errorf("token type = %q; want %q", tok.TokenType, "bearer") + } +} + +func TestTokenRefreshRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "audience=audience1&grant_type=client_credentials&scope=scope1+scope2" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(context.Background()) + c.Get(ts.URL + "/somethingelse") +} diff --git a/vendor/golang.org/x/oauth2/example_test.go b/vendor/golang.org/x/oauth2/example_test.go new file mode 100644 index 0000000..fc2f793 --- /dev/null +++ b/vendor/golang.org/x/oauth2/example_test.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2_test + +import ( + "context" + "fmt" + "log" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +func ExampleConfig() { + ctx := context.Background() + conf := &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + Scopes: []string{"SCOPE1", "SCOPE2"}, + Endpoint: oauth2.Endpoint{ + AuthURL: "https://provider.com/o/oauth2/auth", + TokenURL: "https://provider.com/o/oauth2/token", + }, + } + + // Redirect user to consent page to ask for permission + // for the scopes specified above. + url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) + fmt.Printf("Visit the URL for the auth dialog: %v", url) + + // Use the authorization code that is pushed to the redirect + // URL. Exchange will do the handshake to retrieve the + // initial access token. The HTTP Client returned by + // conf.Client will refresh the token as necessary. + var code string + if _, err := fmt.Scan(&code); err != nil { + log.Fatal(err) + } + tok, err := conf.Exchange(ctx, code) + if err != nil { + log.Fatal(err) + } + + client := conf.Client(ctx, tok) + client.Get("...") +} + +func ExampleConfig_customHTTP() { + ctx := context.Background() + + conf := &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + Scopes: []string{"SCOPE1", "SCOPE2"}, + Endpoint: oauth2.Endpoint{ + TokenURL: "https://provider.com/o/oauth2/token", + AuthURL: "https://provider.com/o/oauth2/auth", + }, + } + + // Redirect user to consent page to ask for permission + // for the scopes specified above. + url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) + fmt.Printf("Visit the URL for the auth dialog: %v", url) + + // Use the authorization code that is pushed to the redirect + // URL. Exchange will do the handshake to retrieve the + // initial access token. The HTTP Client returned by + // conf.Client will refresh the token as necessary. + var code string + if _, err := fmt.Scan(&code); err != nil { + log.Fatal(err) + } + + // Use the custom HTTP client when requesting a token. + httpClient := &http.Client{Timeout: 2 * time.Second} + ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient) + + tok, err := conf.Exchange(ctx, code) + if err != nil { + log.Fatal(err) + } + + client := conf.Client(ctx, tok) + _ = client +} diff --git a/vendor/golang.org/x/oauth2/facebook/facebook.go b/vendor/golang.org/x/oauth2/facebook/facebook.go new file mode 100644 index 0000000..14c801a --- /dev/null +++ b/vendor/golang.org/x/oauth2/facebook/facebook.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package facebook provides constants for using OAuth2 to access Facebook. +package facebook // import "golang.org/x/oauth2/facebook" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Facebook's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.facebook.com/dialog/oauth", + TokenURL: "https://graph.facebook.com/oauth/access_token", +} diff --git a/vendor/golang.org/x/oauth2/fitbit/fitbit.go b/vendor/golang.org/x/oauth2/fitbit/fitbit.go new file mode 100644 index 0000000..b31b82a --- /dev/null +++ b/vendor/golang.org/x/oauth2/fitbit/fitbit.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fitbit provides constants for using OAuth2 to access the Fitbit API. +package fitbit // import "golang.org/x/oauth2/fitbit" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is the Fitbit API's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.fitbit.com/oauth2/authorize", + TokenURL: "https://api.fitbit.com/oauth2/token", +} diff --git a/vendor/golang.org/x/oauth2/foursquare/foursquare.go b/vendor/golang.org/x/oauth2/foursquare/foursquare.go new file mode 100644 index 0000000..d2fa099 --- /dev/null +++ b/vendor/golang.org/x/oauth2/foursquare/foursquare.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package foursquare provides constants for using OAuth2 to access Foursquare. +package foursquare // import "golang.org/x/oauth2/foursquare" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Foursquare's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://foursquare.com/oauth2/authorize", + TokenURL: "https://foursquare.com/oauth2/access_token", +} diff --git a/vendor/golang.org/x/oauth2/github/github.go b/vendor/golang.org/x/oauth2/github/github.go new file mode 100644 index 0000000..f297801 --- /dev/null +++ b/vendor/golang.org/x/oauth2/github/github.go @@ -0,0 +1,16 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package github provides constants for using OAuth2 to access Github. +package github // import "golang.org/x/oauth2/github" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Github's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://github.com/login/oauth/authorize", + TokenURL: "https://github.com/login/oauth/access_token", +} diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 0000000..50d918b --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "sort" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// appengineFlex is set at init time by appengineflex_hook.go. If true, we are on App Engine Flex. +var appengineFlex bool + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineAppIDFunc func(c context.Context) string + +// AppEngineTokenSource returns a token source that fetches tokens +// issued to the current App Engine application's service account. +// If you are implementing a 3-legged OAuth 2.0 flow on App Engine +// that involves user accounts, see oauth2.Config instead. +// +// The provided context must have come from appengine.NewContext. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &appEngineTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type appEngineTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go new file mode 100644 index 0000000..56669ea --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -0,0 +1,14 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID +} diff --git a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go new file mode 100644 index 0000000..5d0231a --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm + +package google + +func init() { + appengineFlex = true // Flex doesn't support appengine.AccessToken; depend on metadata server. +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 0000000..b4b6274 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,137 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// DefaultCredentials holds "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type DefaultCredentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource returns the token source for +// "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCredentials, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + creds, err := readCredentialsFile(ctx, filename, scope) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return creds, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + if creds, err := readCredentialsFile(ctx, filename, scope); err == nil { + return creds, nil + } else if !os.IsNotExist(err) { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on Google App Engine use those credentials. + if appengineTokenFunc != nil && !appengineFlex { + return &DefaultCredentials{ + ProjectID: appengineAppIDFunc(ctx), + TokenSource: AppEngineTokenSource(ctx, scope...), + }, nil + } + + // Fourth, if we're on Google Compute Engine use the metadata server. + if metadata.OnGCE() { + id, _ := metadata.ProjectID() + return &DefaultCredentials{ + ProjectID: id, + TokenSource: ComputeTokenSource(""), + }, nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var f credentialsFile + if err := json.Unmarshal(b, &f); err != nil { + return nil, err + } + ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + if err != nil { + return nil, err + } + return &DefaultCredentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: b, + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/example_test.go b/vendor/golang.org/x/oauth2/google/example_test.go new file mode 100644 index 0000000..92bc3b4 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/example_test.go @@ -0,0 +1,150 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm appengine + +package google_test + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/appengine" + "google.golang.org/appengine/urlfetch" +) + +func ExampleDefaultClient() { + client, err := google.DefaultClient(oauth2.NoContext, + "https://www.googleapis.com/auth/devstorage.full_control") + if err != nil { + log.Fatal(err) + } + client.Get("...") +} + +func Example_webServer() { + // Your credentials should be obtained from the Google + // Developer Console (https://console.developers.google.com). + conf := &oauth2.Config{ + ClientID: "YOUR_CLIENT_ID", + ClientSecret: "YOUR_CLIENT_SECRET", + RedirectURL: "YOUR_REDIRECT_URL", + Scopes: []string{ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/blogger", + }, + Endpoint: google.Endpoint, + } + // Redirect user to Google's consent page to ask for permission + // for the scopes specified above. + url := conf.AuthCodeURL("state") + fmt.Printf("Visit the URL for the auth dialog: %v", url) + + // Handle the exchange code to initiate a transport. + tok, err := conf.Exchange(oauth2.NoContext, "authorization-code") + if err != nil { + log.Fatal(err) + } + client := conf.Client(oauth2.NoContext, tok) + client.Get("...") +} + +func ExampleJWTConfigFromJSON() { + // Your credentials should be obtained from the Google + // Developer Console (https://console.developers.google.com). + // Navigate to your project, then see the "Credentials" page + // under "APIs & Auth". + // To create a service account client, click "Create new Client ID", + // select "Service Account", and click "Create Client ID". A JSON + // key file will then be downloaded to your computer. + data, err := ioutil.ReadFile("/path/to/your-project-key.json") + if err != nil { + log.Fatal(err) + } + conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/bigquery") + if err != nil { + log.Fatal(err) + } + // Initiate an http.Client. The following GET request will be + // authorized and authenticated on the behalf of + // your service account. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + +func ExampleSDKConfig() { + // The credentials will be obtained from the first account that + // has been authorized with `gcloud auth login`. + conf, err := google.NewSDKConfig("") + if err != nil { + log.Fatal(err) + } + // Initiate an http.Client. The following GET request will be + // authorized and authenticated on the behalf of the SDK user. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + +func Example_serviceAccount() { + // Your credentials should be obtained from the Google + // Developer Console (https://console.developers.google.com). + conf := &jwt.Config{ + Email: "xxx@developer.gserviceaccount.com", + // The contents of your RSA private key or your PEM file + // that contains a private key. + // If you have a p12 file instead, you + // can use `openssl` to export the private key into a pem file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // The field only supports PEM containers with no passphrase. + // The openssl command will convert p12 keys to passphrase-less PEM containers. + PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."), + Scopes: []string{ + "https://www.googleapis.com/auth/bigquery", + "https://www.googleapis.com/auth/blogger", + }, + TokenURL: google.JWTTokenURL, + // If you would like to impersonate a user, you can + // create a transport with a subject. The following GET + // request will be made on the behalf of user@example.com. + // Optional. + Subject: "user@example.com", + } + // Initiate an http.Client, the following GET request will be + // authorized and authenticated on the behalf of user@example.com. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + +func ExampleAppEngineTokenSource() { + var req *http.Request // from the ServeHTTP handler + ctx := appengine.NewContext(req) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "https://www.googleapis.com/auth/bigquery"), + Base: &urlfetch.Transport{ + Context: ctx, + }, + }, + } + client.Get("...") +} + +func ExampleComputeTokenSource() { + client := &http.Client{ + Transport: &oauth2.Transport{ + // Fetch from Google Compute Engine's metadata server to retrieve + // an access token for the provided account. + // If no account is specified, "default" is used. + Source: google.ComputeTokenSource(""), + }, + } + client.Get("...") +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 0000000..66a8b0e --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,202 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. +// It supports the Web server flow, client-side credentials, service accounts, +// Google Compute Engine service accounts, and Google App Engine service +// accounts. +// +// For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +package google // import "golang.org/x/oauth2/google" + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://accounts.google.com/o/oauth2/token", +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var f credentialsFile + if err := json.Unmarshal(jsonKey, &f); err != nil { + return nil, err + } + if f.Type != serviceAccountKey { + return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) + } + scope = append([]string(nil), scope...) // copy + return f.jwtConfig(scope), nil +} + +// JSON key file types. +const ( + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" +) + +// credentialsFile is the unmarshalled representation of a credentials file. +type credentialsFile struct { + Type string `json:"type"` // serviceAccountKey or userCredentialsKey + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + + // User Credential fields + // (These typically come from gcloud auth.) + ClientSecret string `json:"client_secret"` + ClientID string `json:"client_id"` + RefreshToken string `json:"refresh_token"` +} + +func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { + cfg := &jwt.Config{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: scopes, + TokenURL: f.TokenURL, + } + if cfg.TokenURL == "" { + cfg.TokenURL = JWTTokenURL + } + return cfg +} + +func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { + switch f.Type { + case serviceAccountKey: + cfg := f.jwtConfig(scopes) + return cfg.TokenSource(ctx), nil + case userCredentialsKey: + cfg := &oauth2.Config{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: scopes, + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: f.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", f.Type) + } +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account}) +} + +type computeSource struct { + account string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + return &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/google_test.go b/vendor/golang.org/x/oauth2/google/google_test.go new file mode 100644 index 0000000..287c699 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google_test.go @@ -0,0 +1,116 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "strings" + "testing" +) + +var webJSONKey = []byte(` +{ + "web": { + "auth_uri": "https://google.com/o/oauth2/auth", + "client_secret": "3Oknc4jS_wA2r9i", + "token_uri": "https://google.com/o/oauth2/token", + "client_email": "222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com", + "redirect_uris": ["https://www.example.com/oauth2callback"], + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com", + "client_id": "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "javascript_origins": ["https://www.example.com"] + } +}`) + +var installedJSONKey = []byte(`{ + "installed": { + "client_id": "222-installed.apps.googleusercontent.com", + "redirect_uris": ["https://www.example.com/oauth2callback"] + } +}`) + +var jwtJSONKey = []byte(`{ + "private_key_id": "268f54e43a1af97cfc71731688434f45aca15c8b", + "private_key": "super secret key", + "client_email": "gopher@developer.gserviceaccount.com", + "client_id": "gopher.apps.googleusercontent.com", + "token_uri": "https://accounts.google.com/o/gophers/token", + "type": "service_account" +}`) + +var jwtJSONKeyNoTokenURL = []byte(`{ + "private_key_id": "268f54e43a1af97cfc71731688434f45aca15c8b", + "private_key": "super secret key", + "client_email": "gopher@developer.gserviceaccount.com", + "client_id": "gopher.apps.googleusercontent.com", + "type": "service_account" +}`) + +func TestConfigFromJSON(t *testing.T) { + conf, err := ConfigFromJSON(webJSONKey, "scope1", "scope2") + if err != nil { + t.Error(err) + } + if got, want := conf.ClientID, "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com"; got != want { + t.Errorf("ClientID = %q; want %q", got, want) + } + if got, want := conf.ClientSecret, "3Oknc4jS_wA2r9i"; got != want { + t.Errorf("ClientSecret = %q; want %q", got, want) + } + if got, want := conf.RedirectURL, "https://www.example.com/oauth2callback"; got != want { + t.Errorf("RedictURL = %q; want %q", got, want) + } + if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want { + t.Errorf("Scopes = %q; want %q", got, want) + } + if got, want := conf.Endpoint.AuthURL, "https://google.com/o/oauth2/auth"; got != want { + t.Errorf("AuthURL = %q; want %q", got, want) + } + if got, want := conf.Endpoint.TokenURL, "https://google.com/o/oauth2/token"; got != want { + t.Errorf("TokenURL = %q; want %q", got, want) + } +} + +func TestConfigFromJSON_Installed(t *testing.T) { + conf, err := ConfigFromJSON(installedJSONKey) + if err != nil { + t.Error(err) + } + if got, want := conf.ClientID, "222-installed.apps.googleusercontent.com"; got != want { + t.Errorf("ClientID = %q; want %q", got, want) + } +} + +func TestJWTConfigFromJSON(t *testing.T) { + conf, err := JWTConfigFromJSON(jwtJSONKey, "scope1", "scope2") + if err != nil { + t.Fatal(err) + } + if got, want := conf.Email, "gopher@developer.gserviceaccount.com"; got != want { + t.Errorf("Email = %q, want %q", got, want) + } + if got, want := string(conf.PrivateKey), "super secret key"; got != want { + t.Errorf("PrivateKey = %q, want %q", got, want) + } + if got, want := conf.PrivateKeyID, "268f54e43a1af97cfc71731688434f45aca15c8b"; got != want { + t.Errorf("PrivateKeyID = %q, want %q", got, want) + } + if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want { + t.Errorf("Scopes = %q; want %q", got, want) + } + if got, want := conf.TokenURL, "https://accounts.google.com/o/gophers/token"; got != want { + t.Errorf("TokenURL = %q; want %q", got, want) + } +} + +func TestJWTConfigFromJSONNoTokenURL(t *testing.T) { + conf, err := JWTConfigFromJSON(jwtJSONKeyNoTokenURL, "scope1", "scope2") + if err != nil { + t.Fatal(err) + } + if got, want := conf.TokenURL, "https://accounts.google.com/o/oauth2/token"; got != want { + t.Errorf("TokenURL = %q; want %q", got, want) + } +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 0000000..b0fdb3a --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/jwt_test.go b/vendor/golang.org/x/oauth2/google/jwt_test.go new file mode 100644 index 0000000..f844436 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt_test.go @@ -0,0 +1,91 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "strings" + "testing" + "time" + + "golang.org/x/oauth2/jws" +) + +func TestJWTAccessTokenSourceFromJSON(t *testing.T) { + // Generate a key we can use in the test data. + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + // Encode the key and substitute into our example JSON. + enc := pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(privateKey), + }) + enc, err = json.Marshal(string(enc)) + if err != nil { + t.Fatalf("json.Marshal: %v", err) + } + jsonKey := bytes.Replace(jwtJSONKey, []byte(`"super secret key"`), enc, 1) + + ts, err := JWTAccessTokenSourceFromJSON(jsonKey, "audience") + if err != nil { + t.Fatalf("JWTAccessTokenSourceFromJSON: %v\nJSON: %s", err, string(jsonKey)) + } + + tok, err := ts.Token() + if err != nil { + t.Fatalf("Token: %v", err) + } + + if got, want := tok.TokenType, "Bearer"; got != want { + t.Errorf("TokenType = %q, want %q", got, want) + } + if got := tok.Expiry; tok.Expiry.Before(time.Now()) { + t.Errorf("Expiry = %v, should not be expired", got) + } + + err = jws.Verify(tok.AccessToken, &privateKey.PublicKey) + if err != nil { + t.Errorf("jws.Verify on AccessToken: %v", err) + } + + claim, err := jws.Decode(tok.AccessToken) + if err != nil { + t.Fatalf("jws.Decode on AccessToken: %v", err) + } + + if got, want := claim.Iss, "gopher@developer.gserviceaccount.com"; got != want { + t.Errorf("Iss = %q, want %q", got, want) + } + if got, want := claim.Sub, "gopher@developer.gserviceaccount.com"; got != want { + t.Errorf("Sub = %q, want %q", got, want) + } + if got, want := claim.Aud, "audience"; got != want { + t.Errorf("Aud = %q, want %q", got, want) + } + + // Finally, check the header private key. + parts := strings.Split(tok.AccessToken, ".") + hdrJSON, err := base64.RawURLEncoding.DecodeString(parts[0]) + if err != nil { + t.Fatalf("base64 DecodeString: %v\nString: %q", err, parts[0]) + } + var hdr jws.Header + if err := json.Unmarshal([]byte(hdrJSON), &hdr); err != nil { + t.Fatalf("json.Unmarshal: %v (%q)", err, hdrJSON) + } + + if got, want := hdr.KeyID, "268f54e43a1af97cfc71731688434f45aca15c8b"; got != want { + t.Errorf("Header KeyID = %q, want %q", got, want) + } +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 0000000..bdc1808 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := internal.ParseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/golang.org/x/oauth2/google/sdk_test.go b/vendor/golang.org/x/oauth2/google/sdk_test.go new file mode 100644 index 0000000..4489bb9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk_test.go @@ -0,0 +1,46 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import "testing" + +func TestSDKConfig(t *testing.T) { + sdkConfigPath = func() (string, error) { + return "testdata/gcloud", nil + } + + tests := []struct { + account string + accessToken string + err bool + }{ + {"", "bar_access_token", false}, + {"foo@example.com", "foo_access_token", false}, + {"bar@example.com", "bar_access_token", false}, + {"baz@serviceaccount.example.com", "", true}, + } + for _, tt := range tests { + c, err := NewSDKConfig(tt.account) + if got, want := err != nil, tt.err; got != want { + if !tt.err { + t.Errorf("got %v, want nil", err) + } else { + t.Errorf("got nil, want error") + } + continue + } + if err != nil { + continue + } + tok := c.initialToken + if tok == nil { + t.Errorf("got nil, want %q", tt.accessToken) + continue + } + if tok.AccessToken != tt.accessToken { + t.Errorf("got %q, want %q", tok.AccessToken, tt.accessToken) + } + } +} diff --git a/vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials b/vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials new file mode 100644 index 0000000..ff5eefb --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials @@ -0,0 +1,122 @@ +{ + "data": [ + { + "credential": { + "_class": "OAuth2Credentials", + "_module": "oauth2client.client", + "access_token": "foo_access_token", + "client_id": "foo_client_id", + "client_secret": "foo_client_secret", + "id_token": { + "at_hash": "foo_at_hash", + "aud": "foo_aud", + "azp": "foo_azp", + "cid": "foo_cid", + "email": "foo@example.com", + "email_verified": true, + "exp": 1420573614, + "iat": 1420569714, + "id": "1337", + "iss": "accounts.google.com", + "sub": "1337", + "token_hash": "foo_token_hash", + "verified_email": true + }, + "invalid": false, + "refresh_token": "foo_refresh_token", + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "token_expiry": "2015-01-09T00:51:51Z", + "token_response": { + "access_token": "foo_access_token", + "expires_in": 3600, + "id_token": "foo_id_token", + "token_type": "Bearer" + }, + "token_uri": "https://accounts.google.com/o/oauth2/token", + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "foo@example.com", + "clientId": "foo_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + }, + { + "credential": { + "_class": "OAuth2Credentials", + "_module": "oauth2client.client", + "access_token": "bar_access_token", + "client_id": "bar_client_id", + "client_secret": "bar_client_secret", + "id_token": { + "at_hash": "bar_at_hash", + "aud": "bar_aud", + "azp": "bar_azp", + "cid": "bar_cid", + "email": "bar@example.com", + "email_verified": true, + "exp": 1420573614, + "iat": 1420569714, + "id": "1337", + "iss": "accounts.google.com", + "sub": "1337", + "token_hash": "bar_token_hash", + "verified_email": true + }, + "invalid": false, + "refresh_token": "bar_refresh_token", + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "token_expiry": "2015-01-09T00:51:51Z", + "token_response": { + "access_token": "bar_access_token", + "expires_in": 3600, + "id_token": "bar_id_token", + "token_type": "Bearer" + }, + "token_uri": "https://accounts.google.com/o/oauth2/token", + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "bar@example.com", + "clientId": "bar_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + }, + { + "credential": { + "_class": "ServiceAccountCredentials", + "_kwargs": {}, + "_module": "oauth2client.client", + "_private_key_id": "00000000000000000000000000000000", + "_private_key_pkcs8_text": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCt3fpiynPSaUhWSIKMGV331zudwJ6GkGmvQtwsoK2S2LbvnSwU\nNxgj4fp08kIDR5p26wF4+t/HrKydMwzftXBfZ9UmLVJgRdSswmS5SmChCrfDS5OE\nvFFcN5+6w1w8/Nu657PF/dse8T0bV95YrqyoR0Osy8WHrUOMSIIbC3hRuwIDAQAB\nAoGAJrGE/KFjn0sQ7yrZ6sXmdLawrM3mObo/2uI9T60+k7SpGbBX0/Pi6nFrJMWZ\nTVONG7P3Mu5aCPzzuVRYJB0j8aldSfzABTY3HKoWCczqw1OztJiEseXGiYz4QOyr\nYU3qDyEpdhS6q6wcoLKGH+hqRmz6pcSEsc8XzOOu7s4xW8kCQQDkc75HjhbarCnd\nJJGMe3U76+6UGmdK67ltZj6k6xoB5WbTNChY9TAyI2JC+ppYV89zv3ssj4L+02u3\nHIHFGxsHAkEAwtU1qYb1tScpchPobnYUFiVKJ7KA8EZaHVaJJODW/cghTCV7BxcJ\nbgVvlmk4lFKn3lPKAgWw7PdQsBTVBUcCrQJATPwoIirizrv3u5soJUQxZIkENAqV\nxmybZx9uetrzP7JTrVbFRf0SScMcyN90hdLJiQL8+i4+gaszgFht7sNMnwJAAbfj\nq0UXcauQwALQ7/h2oONfTg5S+MuGC/AxcXPSMZbMRGGoPh3D5YaCv27aIuS/ukQ+\n6dmm/9AGlCb64fsIWQJAPaokbjIifo+LwC5gyK73Mc4t8nAOSZDenzd/2f6TCq76\nS1dcnKiPxaED7W/y6LJiuBT2rbZiQ2L93NJpFZD/UA==\n-----END RSA PRIVATE KEY-----\n", + "_revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "_scopes": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "_service_account_email": "baz@serviceaccount.example.com", + "_service_account_id": "baz.serviceaccount.example.com", + "_token_uri": "https://accounts.google.com/o/oauth2/token", + "_user_agent": "Cloud SDK Command Line Tool", + "access_token": null, + "assertion_type": null, + "client_id": null, + "client_secret": null, + "id_token": null, + "invalid": false, + "refresh_token": null, + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "service_account_name": "baz@serviceaccount.example.com", + "token_expiry": null, + "token_response": null, + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "baz@serviceaccount.example.com", + "clientId": "baz_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + } + ], + "file_version": 1 +} diff --git a/vendor/golang.org/x/oauth2/google/testdata/gcloud/properties b/vendor/golang.org/x/oauth2/google/testdata/gcloud/properties new file mode 100644 index 0000000..025de88 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/testdata/gcloud/properties @@ -0,0 +1,2 @@ +[core] +account = bar@example.com \ No newline at end of file diff --git a/vendor/golang.org/x/oauth2/heroku/heroku.go b/vendor/golang.org/x/oauth2/heroku/heroku.go new file mode 100644 index 0000000..5b4fdb8 --- /dev/null +++ b/vendor/golang.org/x/oauth2/heroku/heroku.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package heroku provides constants for using OAuth2 to access Heroku. +package heroku // import "golang.org/x/oauth2/heroku" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Heroku's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://id.heroku.com/oauth/authorize", + TokenURL: "https://id.heroku.com/oauth/token", +} diff --git a/vendor/golang.org/x/oauth2/hipchat/hipchat.go b/vendor/golang.org/x/oauth2/hipchat/hipchat.go new file mode 100644 index 0000000..594fe07 --- /dev/null +++ b/vendor/golang.org/x/oauth2/hipchat/hipchat.go @@ -0,0 +1,60 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hipchat provides constants for using OAuth2 to access HipChat. +package hipchat // import "golang.org/x/oauth2/hipchat" + +import ( + "encoding/json" + "errors" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +// Endpoint is HipChat's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.hipchat.com/users/authorize", + TokenURL: "https://api.hipchat.com/v2/oauth/token", +} + +// ServerEndpoint returns a new oauth2.Endpoint for a HipChat Server instance +// running on the given domain or host. +func ServerEndpoint(host string) oauth2.Endpoint { + return oauth2.Endpoint{ + AuthURL: "https://" + host + "/users/authorize", + TokenURL: "https://" + host + "/v2/oauth/token", + } +} + +// ClientCredentialsConfigFromCaps generates a Config from a HipChat API +// capabilities descriptor. It does not verify the scopes against the +// capabilities document at this time. +// +// For more information see: https://www.hipchat.com/docs/apiv2/method/get_capabilities +func ClientCredentialsConfigFromCaps(capsJSON []byte, clientID, clientSecret string, scopes ...string) (*clientcredentials.Config, error) { + var caps struct { + Caps struct { + Endpoint struct { + TokenURL string `json:"tokenUrl"` + } `json:"oauth2Provider"` + } `json:"capabilities"` + } + + if err := json.Unmarshal(capsJSON, &caps); err != nil { + return nil, err + } + + // Verify required fields. + if caps.Caps.Endpoint.TokenURL == "" { + return nil, errors.New("oauth2/hipchat: missing OAuth2 token URL in the capabilities descriptor JSON") + } + + return &clientcredentials.Config{ + ClientID: clientID, + ClientSecret: clientSecret, + Scopes: scopes, + TokenURL: caps.Caps.Endpoint.TokenURL, + }, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go new file mode 100644 index 0000000..03265e8 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -0,0 +1,6 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 0000000..6978192 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "bufio" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "strings" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/vendor/golang.org/x/oauth2/internal/oauth2_test.go b/vendor/golang.org/x/oauth2/internal/oauth2_test.go new file mode 100644 index 0000000..07d51c4 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2_test.go @@ -0,0 +1,61 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseINI(t *testing.T) { + tests := []struct { + ini string + want map[string]map[string]string + }{ + { + `root = toor +[foo] +bar = hop +ini = nin +`, + map[string]map[string]string{ + "": {"root": "toor"}, + "foo": {"bar": "hop", "ini": "nin"}, + }, + }, + { + `[empty] +[section] +empty= +`, + map[string]map[string]string{ + "": {}, + "empty": {}, + "section": {"empty": ""}, + }, + }, + { + `ignore +[invalid +=stuff +;comment=true +`, + map[string]map[string]string{ + "": {}, + }, + }, + } + for _, tt := range tests { + result, err := ParseINI(strings.NewReader(tt.ini)) + if err != nil { + t.Errorf("ParseINI(%q) error %v, want: no error", tt.ini, err) + continue + } + if !reflect.DeepEqual(result, tt.want) { + t.Errorf("ParseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want) + } + } +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 0000000..8a10204 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,263 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://api.codeswholesale.com/oauth/token", + "https://api.dropbox.com/", + "https://api.dropboxapi.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", + "https://api.soundcloud.com/", + "https://api.twitch.tv/", + "https://app.box.com/", + "https://connect.stripe.com/", + "https://graph.facebook.com", // see https://github.com/golang/oauth2/issues/214 + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", + "https://login.windows.net", + "https://login.live.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", + "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", + "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", + "https://sandbox.codeswholesale.com/oauth/token", + "https://api.sipgate.com/v1/authorization/oauth", +} + +// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. +var brokenAuthHeaderDomains = []string{ + ".force.com", + ".myshopify.com", + ".okta.com", + ".oktapreview.com", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + if u, err := url.Parse(tokenURL); err == nil { + for _, s := range brokenAuthHeaderDomains { + if strings.HasSuffix(u.Host, s) { + return false + } + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + bustedAuth := !providerAuthHeaderWorks(tokenURL) + if bustedAuth { + if clientID != "" { + v.Set("client_id", clientID) + } + if clientSecret != "" { + v.Set("client_secret", clientSecret) + } + } + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) + } + r, err := ctxhttp.Do(ctx, hc, req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} + +type RetrieveError struct { + Response *http.Response + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/internal/token_test.go b/vendor/golang.org/x/oauth2/internal/token_test.go new file mode 100644 index 0000000..a52bb81 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token_test.go @@ -0,0 +1,108 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "golang.org/x/net/context" +) + +func TestRegisterBrokenAuthHeaderProvider(t *testing.T) { + RegisterBrokenAuthHeaderProvider("https://aaa.com/") + tokenURL := "https://aaa.com/token" + if providerAuthHeaderWorks(tokenURL) { + t.Errorf("got %q as unbroken; want broken", tokenURL) + } +} + +func TestRetrieveTokenBustedNoSecret(t *testing.T) { + const clientID = "client-id" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.FormValue("client_id"), clientID; got != want { + t.Errorf("client_id = %q; want %q", got, want) + } + if got, want := r.FormValue("client_secret"), ""; got != want { + t.Errorf("client_secret = %q; want empty", got) + } + io.WriteString(w, "{}") // something non-empty, required to set a Content-Type in Go 1.10 + })) + defer ts.Close() + + RegisterBrokenAuthHeaderProvider(ts.URL) + _, err := RetrieveToken(context.Background(), clientID, "", ts.URL, url.Values{}) + if err != nil { + t.Errorf("RetrieveToken = %v; want no error", err) + } +} + +func Test_providerAuthHeaderWorks(t *testing.T) { + for _, p := range brokenAuthHeaderProviders { + if providerAuthHeaderWorks(p) { + t.Errorf("got %q as unbroken; want broken", p) + } + p := fmt.Sprintf("%ssomesuffix", p) + if providerAuthHeaderWorks(p) { + t.Errorf("got %q as unbroken; want broken", p) + } + } + p := "https://api.not-in-the-list-example.com/" + if !providerAuthHeaderWorks(p) { + t.Errorf("got %q as unbroken; want broken", p) + } +} + +func TestProviderAuthHeaderWorksDomain(t *testing.T) { + tests := []struct { + tokenURL string + wantWorks bool + }{ + {"https://dev-12345.okta.com/token-url", false}, + {"https://dev-12345.oktapreview.com/token-url", false}, + {"https://dev-12345.okta.org/token-url", true}, + {"https://foo.bar.force.com/token-url", false}, + {"https://foo.force.com/token-url", false}, + {"https://force.com/token-url", true}, + } + + for _, test := range tests { + got := providerAuthHeaderWorks(test.tokenURL) + if got != test.wantWorks { + t.Errorf("providerAuthHeaderWorks(%q) = %v; want %v", test.tokenURL, got, test.wantWorks) + } + } +} + +func TestRetrieveTokenWithContexts(t *testing.T) { + const clientID = "client-id" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "{}") // something non-empty, required to set a Content-Type in Go 1.10 + })) + defer ts.Close() + + _, err := RetrieveToken(context.Background(), clientID, "", ts.URL, url.Values{}) + if err != nil { + t.Errorf("RetrieveToken (with background context) = %v; want no error", err) + } + + ctx, cancelfunc := context.WithCancel(context.Background()) + + cancellingts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cancelfunc() + })) + defer cancellingts.Close() + + _, err = RetrieveToken(ctx, clientID, "", cancellingts.URL, url.Values{}) + if err == nil { + t.Errorf("RetrieveToken (with cancelled context) = nil; want error") + } +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 0000000..783bd98 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,68 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/vendor/golang.org/x/oauth2/internal/transport_test.go b/vendor/golang.org/x/oauth2/internal/transport_test.go new file mode 100644 index 0000000..8772ec5 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport_test.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "net/http" + "testing" + + "golang.org/x/net/context" +) + +func TestContextClient(t *testing.T) { + rc := &http.Client{} + RegisterContextClientFunc(func(context.Context) (*http.Client, error) { + return rc, nil + }) + + c := &http.Client{} + ctx := context.WithValue(context.Background(), HTTPClient, c) + + hc, err := ContextClient(ctx) + if err != nil { + t.Fatalf("want valid client; got err = %v", err) + } + if hc != c { + t.Fatalf("want context client = %p; got = %p", c, hc) + } + + hc, err = ContextClient(context.TODO()) + if err != nil { + t.Fatalf("want valid client; got err = %v", err) + } + if hc != rc { + t.Fatalf("want registered client = %p; got = %p", c, hc) + } +} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 0000000..683d2d2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws // import "golang.org/x/oauth2/jws" + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) +} diff --git a/vendor/golang.org/x/oauth2/jws/jws_test.go b/vendor/golang.org/x/oauth2/jws/jws_test.go new file mode 100644 index 0000000..39a136a --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws_test.go @@ -0,0 +1,46 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jws + +import ( + "crypto/rand" + "crypto/rsa" + "testing" +) + +func TestSignAndVerify(t *testing.T) { + header := &Header{ + Algorithm: "RS256", + Typ: "JWT", + } + payload := &ClaimSet{ + Iss: "http://google.com/", + Aud: "", + Exp: 3610, + Iat: 10, + } + + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + token, err := Encode(header, payload, privateKey) + if err != nil { + t.Fatal(err) + } + + err = Verify(token, &privateKey.PublicKey) + if err != nil { + t.Fatal(err) + } +} + +func TestVerifyFailsOnMalformedClaim(t *testing.T) { + err := Verify("abc.def", nil) + if err == nil { + t.Error("got no errors; want improperly formed JWT not to be verified") + } +} diff --git a/vendor/golang.org/x/oauth2/jwt/example_test.go b/vendor/golang.org/x/oauth2/jwt/example_test.go new file mode 100644 index 0000000..58503d8 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/example_test.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jwt_test + +import ( + "context" + + "golang.org/x/oauth2/jwt" +) + +func ExampleJWTConfig() { + ctx := context.Background() + conf := &jwt.Config{ + Email: "xxx@developer.com", + // The contents of your RSA private key or your PEM file + // that contains a private key. + // If you have a p12 file instead, you + // can use `openssl` to export the private key into a pem file. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + // It only supports PEM containers with no passphrase. + PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."), + Subject: "user@example.com", + TokenURL: "https://provider.com/o/oauth2/token", + } + // Initiate an http.Client, the following GET request will be + // authorized and authenticated on the behalf of user@example.com. + client := conf.Client(ctx) + client.Get("...") +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 0000000..e08f315 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,162 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + h := *defaultHeader + h.KeyID = js.conf.PrivateKeyID + payload, err := jws.Encode(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, &oauth2.RetrieveError{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt_test.go b/vendor/golang.org/x/oauth2/jwt/jwt_test.go new file mode 100644 index 0000000..1fbb9aa --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt_test.go @@ -0,0 +1,221 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jwt + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/jws" +) + +var dummyPrivateKey = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE +DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY +fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK +1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr +k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 +/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt +3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn +2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 +nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK +6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf +5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e +DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 +M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g +z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y +1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK +J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U +f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx +QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA +cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr +Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw +5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg +KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 +OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd +mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ +5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== +-----END RSA PRIVATE KEY-----`) + +func TestJWTFetch_JSONResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{ + "access_token": "90d64460d14870c08c81352a05dedd3465940a7c", + "scope": "user", + "token_type": "bearer", + "expires_in": 3600 + }`)) + })) + defer ts.Close() + + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + tok, err := conf.TokenSource(context.Background()).Token() + if err != nil { + t.Fatal(err) + } + if !tok.Valid() { + t.Errorf("got invalid token: %v", tok) + } + if got, want := tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c"; got != want { + t.Errorf("access token = %q; want %q", got, want) + } + if got, want := tok.TokenType, "bearer"; got != want { + t.Errorf("token type = %q; want %q", got, want) + } + if got := tok.Expiry.IsZero(); got { + t.Errorf("token expiry = %v, want none", got) + } + scope := tok.Extra("scope") + if got, want := scope, "user"; got != want { + t.Errorf("scope = %q; want %q", got, want) + } +} + +func TestJWTFetch_BadResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + tok, err := conf.TokenSource(context.Background()).Token() + if err != nil { + t.Fatal(err) + } + if tok == nil { + t.Fatalf("got nil token; want token") + } + if tok.Valid() { + t.Errorf("got invalid token: %v", tok) + } + if got, want := tok.AccessToken, ""; got != want { + t.Errorf("access token = %q; want %q", got, want) + } + if got, want := tok.TokenType, "bearer"; got != want { + t.Errorf("token type = %q; want %q", got, want) + } + scope := tok.Extra("scope") + if got, want := scope, "user"; got != want { + t.Errorf("token scope = %q; want %q", got, want) + } +} + +func TestJWTFetch_BadResponseType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + tok, err := conf.TokenSource(context.Background()).Token() + if err == nil { + t.Error("got a token; expected error") + if got, want := tok.AccessToken, ""; got != want { + t.Errorf("access token = %q; want %q", got, want) + } + } +} + +func TestJWTFetch_Assertion(t *testing.T) { + var assertion string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + assertion = r.Form.Get("assertion") + + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{ + "access_token": "90d64460d14870c08c81352a05dedd3465940a7c", + "scope": "user", + "token_type": "bearer", + "expires_in": 3600 + }`)) + })) + defer ts.Close() + + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + PrivateKeyID: "ABCDEFGHIJKLMNOPQRSTUVWXYZ", + TokenURL: ts.URL, + } + + _, err := conf.TokenSource(context.Background()).Token() + if err != nil { + t.Fatalf("Failed to fetch token: %v", err) + } + + parts := strings.Split(assertion, ".") + if len(parts) != 3 { + t.Fatalf("assertion = %q; want 3 parts", assertion) + } + gotjson, err := base64.RawURLEncoding.DecodeString(parts[0]) + if err != nil { + t.Fatalf("invalid token header; err = %v", err) + } + + got := jws.Header{} + if err := json.Unmarshal(gotjson, &got); err != nil { + t.Errorf("failed to unmarshal json token header = %q; err = %v", gotjson, err) + } + + want := jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: "ABCDEFGHIJKLMNOPQRSTUVWXYZ", + } + if got != want { + t.Errorf("access token header = %q; want %q", got, want) + } +} + +func TestTokenRetrieveError(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-type", "application/json") + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"error": "invalid_grant"}`)) + })) + defer ts.Close() + + conf := &Config{ + Email: "aaa@xxx.com", + PrivateKey: dummyPrivateKey, + TokenURL: ts.URL, + } + + _, err := conf.TokenSource(context.Background()).Token() + if err == nil { + t.Fatalf("got no error, expected one") + } + _, ok := err.(*oauth2.RetrieveError) + if !ok { + t.Fatalf("got %T error, expected *RetrieveError", err) + } + // Test error string for backwards compatibility + expected := fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", "400 Bad Request", `{"error": "invalid_grant"}`) + if errStr := err.Error(); errStr != expected { + t.Fatalf("got %#v, expected %#v", errStr, expected) + } +} diff --git a/vendor/golang.org/x/oauth2/linkedin/linkedin.go b/vendor/golang.org/x/oauth2/linkedin/linkedin.go new file mode 100644 index 0000000..b619f93 --- /dev/null +++ b/vendor/golang.org/x/oauth2/linkedin/linkedin.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package linkedin provides constants for using OAuth2 to access LinkedIn. +package linkedin // import "golang.org/x/oauth2/linkedin" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is LinkedIn's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.linkedin.com/uas/oauth2/authorization", + TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken", +} diff --git a/vendor/golang.org/x/oauth2/mailru/mailru.go b/vendor/golang.org/x/oauth2/mailru/mailru.go new file mode 100644 index 0000000..dddd9dd --- /dev/null +++ b/vendor/golang.org/x/oauth2/mailru/mailru.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mailru provides constants for using OAuth2 to access Mail.Ru. +package mailru // import "golang.org/x/oauth2/mailru" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Mail.Ru's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://o2.mail.ru/login", + TokenURL: "https://o2.mail.ru/token", +} diff --git a/vendor/golang.org/x/oauth2/mediamath/mediamath.go b/vendor/golang.org/x/oauth2/mediamath/mediamath.go new file mode 100644 index 0000000..3ebce5d --- /dev/null +++ b/vendor/golang.org/x/oauth2/mediamath/mediamath.go @@ -0,0 +1,22 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mediamath provides constants for using OAuth2 to access MediaMath. +package mediamath // import "golang.org/x/oauth2/mediamath" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is MediaMath's OAuth 2.0 endpoint for production. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://api.mediamath.com/oauth2/v1.0/authorize", + TokenURL: "https://api.mediamath.com/oauth2/v1.0/token", +} + +// SandboxEndpoint is MediaMath's OAuth 2.0 endpoint for sandbox. +var SandboxEndpoint = oauth2.Endpoint{ + AuthURL: "https://t1sandbox.mediamath.com/oauth2/v1.0/authorize", + TokenURL: "https://t1sandbox.mediamath.com/oauth2/v1.0/token", +} diff --git a/vendor/golang.org/x/oauth2/microsoft/microsoft.go b/vendor/golang.org/x/oauth2/microsoft/microsoft.go new file mode 100644 index 0000000..3ffbc57 --- /dev/null +++ b/vendor/golang.org/x/oauth2/microsoft/microsoft.go @@ -0,0 +1,31 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package microsoft provides constants for using OAuth2 to access Windows Live ID. +package microsoft // import "golang.org/x/oauth2/microsoft" + +import ( + "golang.org/x/oauth2" +) + +// LiveConnectEndpoint is Windows's Live ID OAuth 2.0 endpoint. +var LiveConnectEndpoint = oauth2.Endpoint{ + AuthURL: "https://login.live.com/oauth20_authorize.srf", + TokenURL: "https://login.live.com/oauth20_token.srf", +} + +// AzureADEndpoint returns a new oauth2.Endpoint for the given tenant at Azure Active Directory. +// If tenant is empty, it uses the tenant called `common`. +// +// For more information see: +// https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-v2-protocols#endpoints +func AzureADEndpoint(tenant string) oauth2.Endpoint { + if tenant == "" { + tenant = "common" + } + return oauth2.Endpoint{ + AuthURL: "https://login.microsoftonline.com/" + tenant + "/oauth2/v2.0/authorize", + TokenURL: "https://login.microsoftonline.com/" + tenant + "/oauth2/v2.0/token", + } +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 0000000..4bafe87 --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,344 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests. +// It can additionally grant authorization with Bearer JWT. +package oauth2 // import "golang.org/x/oauth2" + +import ( + "bytes" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint contains the OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string +} + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-zero string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + "redirect_uri": internal.CondVal(c.RedirectURL), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// Note that if a custom *http.Client is provided via the Context it +// is used only for token acquisition and is not used to configure the +// *http.Client returned from NewClient. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + c, err := internal.ContextClient(ctx) + if err != nil { + return &http.Client{Transport: internal.ErrorTransport{Err: err}} + } + return c + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextTransport(ctx), + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/oauth2_test.go b/vendor/golang.org/x/oauth2/oauth2_test.go new file mode 100644 index 0000000..14f041d --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2_test.go @@ -0,0 +1,516 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "golang.org/x/net/context" +) + +type mockTransport struct { + rt func(req *http.Request) (resp *http.Response, err error) +} + +func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + return t.rt(req) +} + +func newConf(url string) *Config { + return &Config{ + ClientID: "CLIENT_ID", + ClientSecret: "CLIENT_SECRET", + RedirectURL: "REDIRECT_URL", + Scopes: []string{"scope1", "scope2"}, + Endpoint: Endpoint{ + AuthURL: url + "/auth", + TokenURL: url + "/token", + }, + } +} + +func TestAuthCodeURL(t *testing.T) { + conf := newConf("server") + url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce) + const want = "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo" + if got := url; got != want { + t.Errorf("got auth code URL = %q; want %q", got, want) + } +} + +func TestAuthCodeURL_CustomParam(t *testing.T) { + conf := newConf("server") + param := SetAuthURLParam("foo", "bar") + url := conf.AuthCodeURL("baz", param) + const want = "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz" + if got := url; got != want { + t.Errorf("got auth code = %q; want %q", got, want) + } +} + +func TestAuthCodeURL_Optional(t *testing.T) { + conf := &Config{ + ClientID: "CLIENT_ID", + Endpoint: Endpoint{ + AuthURL: "/auth-url", + TokenURL: "/token-url", + }, + } + url := conf.AuthCodeURL("") + const want = "/auth-url?client_id=CLIENT_ID&response_type=code" + if got := url; got != want { + t.Fatalf("got auth code = %q; want %q", got, want) + } +} + +func TestURLUnsafeClientConfig(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), "Basic Q0xJRU5UX0lEJTNGJTNGOkNMSUVOVF9TRUNSRVQlM0YlM0Y="; got != want { + t.Errorf("Authorization header = %q; want %q", got, want) + } + + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + conf.ClientID = "CLIENT_ID??" + conf.ClientSecret = "CLIENT_SECRET??" + _, err := conf.Exchange(context.Background(), "exchange-code") + if err != nil { + t.Error(err) + } +} + +func TestExchangeRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + if string(body) != "code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL" { + t.Errorf("Unexpected exchange payload, %v is found.", string(body)) + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(context.Background(), "exchange-code") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } + if tok.TokenType != "bearer" { + t.Errorf("Unexpected token type, %#v.", tok.TokenType) + } + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected value for scope: %v", scope) + } +} + +func TestExchangeRequest_JSONResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("Unexpected exchange request URL, %v is found.", r.URL) + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + if string(body) != "code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL" { + t.Errorf("Unexpected exchange payload, %v is found.", string(body)) + } + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(context.Background(), "exchange-code") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } + if tok.TokenType != "bearer" { + t.Errorf("Unexpected token type, %#v.", tok.TokenType) + } + scope := tok.Extra("scope") + if scope != "user" { + t.Errorf("Unexpected value for scope: %v", scope) + } + expiresIn := tok.Extra("expires_in") + if expiresIn != float64(86400) { + t.Errorf("Unexpected non-numeric value for expires_in: %v", expiresIn) + } +} + +func TestExtraValueRetrieval(t *testing.T) { + values := url.Values{} + kvmap := map[string]string{ + "scope": "user", "token_type": "bearer", "expires_in": "86400.92", + "server_time": "1443571905.5606415", "referer_ip": "10.0.0.1", + "etag": "\"afZYj912P4alikMz_P11982\"", "request_id": "86400", + "untrimmed": " untrimmed ", + } + for key, value := range kvmap { + values.Set(key, value) + } + + tok := Token{raw: values} + scope := tok.Extra("scope") + if got, want := scope, "user"; got != want { + t.Errorf("got scope = %q; want %q", got, want) + } + serverTime := tok.Extra("server_time") + if got, want := serverTime, 1443571905.5606415; got != want { + t.Errorf("got server_time value = %v; want %v", got, want) + } + refererIP := tok.Extra("referer_ip") + if got, want := refererIP, "10.0.0.1"; got != want { + t.Errorf("got referer_ip value = %v, want %v", got, want) + } + expiresIn := tok.Extra("expires_in") + if got, want := expiresIn, 86400.92; got != want { + t.Errorf("got expires_in value = %v, want %v", got, want) + } + requestID := tok.Extra("request_id") + if got, want := requestID, int64(86400); got != want { + t.Errorf("got request_id value = %v, want %v", got, want) + } + untrimmed := tok.Extra("untrimmed") + if got, want := untrimmed, " untrimmed "; got != want { + t.Errorf("got untrimmed = %q; want %q", got, want) + } +} + +const day = 24 * time.Hour + +func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) { + seconds := int32(day.Seconds()) + for _, c := range []struct { + expires string + want bool + }{ + {fmt.Sprintf(`"expires_in": %d`, seconds), true}, + {fmt.Sprintf(`"expires_in": "%d"`, seconds), true}, // PayPal case + {fmt.Sprintf(`"expires": %d`, seconds), true}, // Facebook case + {`"expires": false`, false}, // wrong type + {`"expires": {}`, false}, // wrong type + {`"expires": "zzz"`, false}, // wrong value + } { + testExchangeRequest_JSONResponse_expiry(t, c.expires, c.want) + } +} + +func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, want bool) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp))) + })) + defer ts.Close() + conf := newConf(ts.URL) + t1 := time.Now().Add(day) + tok, err := conf.Exchange(context.Background(), "exchange-code") + t2 := time.Now().Add(day) + + if got := (err == nil); got != want { + if want { + t.Errorf("unexpected error: got %v", err) + } else { + t.Errorf("unexpected success") + } + } + if !want { + return + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + expiry := tok.Expiry + if expiry.Before(t1) || expiry.After(t2) { + t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2) + } +} + +func TestExchangeRequest_BadResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Exchange(context.Background(), "code") + if err != nil { + t.Fatal(err) + } + if tok.AccessToken != "" { + t.Errorf("Unexpected access token, %#v.", tok.AccessToken) + } +} + +func TestExchangeRequest_BadResponseType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + _, err := conf.Exchange(context.Background(), "exchange-code") + if err == nil { + t.Error("expected error from invalid access_token type") + } +} + +func TestExchangeRequest_NonBasicAuth(t *testing.T) { + tr := &mockTransport{ + rt: func(r *http.Request) (w *http.Response, err error) { + headerAuth := r.Header.Get("Authorization") + if headerAuth != "" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + return nil, errors.New("no response") + }, + } + c := &http.Client{Transport: tr} + conf := &Config{ + ClientID: "CLIENT_ID", + Endpoint: Endpoint{ + AuthURL: "https://accounts.google.com/auth", + TokenURL: "https://accounts.google.com/token", + }, + } + + ctx := context.WithValue(context.Background(), HTTPClient, c) + conf.Exchange(ctx, "code") +} + +func TestPasswordCredentialsTokenRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + expected := "/token" + if r.URL.String() != expected { + t.Errorf("URL = %q; want %q", r.URL, expected) + } + headerAuth := r.Header.Get("Authorization") + expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" + if headerAuth != expected { + t.Errorf("Authorization header = %q; want %q", headerAuth, expected) + } + headerContentType := r.Header.Get("Content-Type") + expected = "application/x-www-form-urlencoded" + if headerContentType != expected { + t.Errorf("Content-Type header = %q; want %q", headerContentType, expected) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + expected = "grant_type=password&password=password1&scope=scope1+scope2&username=user1" + if string(body) != expected { + t.Errorf("res.Body = %q; want %q", string(body), expected) + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.PasswordCredentialsToken(context.Background(), "user1", "password1") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + expected := "90d64460d14870c08c81352a05dedd3465940a7c" + if tok.AccessToken != expected { + t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected) + } + expected = "bearer" + if tok.TokenType != expected { + t.Errorf("TokenType = %q; want %q", tok.TokenType, expected) + } +} + +func TestTokenRefreshRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(context.Background(), &Token{RefreshToken: "REFRESH_TOKEN"}) + c.Get(ts.URL + "/somethingelse") +} + +func TestFetchWithNoRefreshToken(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(context.Background(), nil) + _, err := c.Get(ts.URL + "/somethingelse") + if err == nil { + t.Errorf("Fetch should return an error if no refresh token is set") + } +} + +func TestTokenRetrieveError(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + w.Header().Set("Content-type", "application/json") + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"error": "invalid_grant"}`)) + })) + defer ts.Close() + conf := newConf(ts.URL) + _, err := conf.Exchange(context.Background(), "exchange-code") + if err == nil { + t.Fatalf("got no error, expected one") + } + _, ok := err.(*RetrieveError) + if !ok { + t.Fatalf("got %T error, expected *RetrieveError", err) + } + // Test error string for backwards compatibility + expected := fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", "400 Bad Request", `{"error": "invalid_grant"}`) + if errStr := err.Error(); errStr != expected { + t.Fatalf("got %#v, expected %#v", errStr, expected) + } +} + +func TestRefreshToken_RefreshTokenReplacement(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"ACCESS_TOKEN", "scope": "user", "token_type": "bearer", "refresh_token": "NEW_REFRESH_TOKEN"}`)) + return + })) + defer ts.Close() + conf := newConf(ts.URL) + tkr := &tokenRefresher{ + conf: conf, + ctx: context.Background(), + refreshToken: "OLD_REFRESH_TOKEN", + } + tk, err := tkr.Token() + if err != nil { + t.Errorf("got err = %v; want none", err) + return + } + if tk.RefreshToken != tkr.refreshToken { + t.Errorf("tokenRefresher.refresh_token = %q; want %q", tkr.refreshToken, tk.RefreshToken) + } +} + +func TestRefreshToken_RefreshTokenPreservation(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"ACCESS_TOKEN", "scope": "user", "token_type": "bearer"}`)) + return + })) + defer ts.Close() + conf := newConf(ts.URL) + const oldRefreshToken = "OLD_REFRESH_TOKEN" + tkr := &tokenRefresher{ + conf: conf, + ctx: context.Background(), + refreshToken: oldRefreshToken, + } + _, err := tkr.Token() + if err != nil { + t.Fatalf("got err = %v; want none", err) + } + if tkr.refreshToken != oldRefreshToken { + t.Errorf("tokenRefresher.refreshToken = %q; want %q", tkr.refreshToken, oldRefreshToken) + } +} + +func TestConfigClientWithToken(t *testing.T) { + tok := &Token{ + AccessToken: "abc123", + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want { + t.Errorf("Authorization header = %q; want %q", got, want) + } + return + })) + defer ts.Close() + conf := newConf(ts.URL) + + c := conf.Client(context.Background(), tok) + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Error(err) + } + _, err = c.Do(req) + if err != nil { + t.Error(err) + } +} diff --git a/vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go b/vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go new file mode 100644 index 0000000..c0d093c --- /dev/null +++ b/vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki. +package odnoklassniki // import "golang.org/x/oauth2/odnoklassniki" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Odnoklassniki's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.odnoklassniki.ru/oauth/authorize", + TokenURL: "https://api.odnoklassniki.ru/oauth/token.do", +} diff --git a/vendor/golang.org/x/oauth2/paypal/paypal.go b/vendor/golang.org/x/oauth2/paypal/paypal.go new file mode 100644 index 0000000..2e713c5 --- /dev/null +++ b/vendor/golang.org/x/oauth2/paypal/paypal.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package paypal provides constants for using OAuth2 to access PayPal. +package paypal // import "golang.org/x/oauth2/paypal" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", + TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice", +} + +// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment. +var SandboxEndpoint = oauth2.Endpoint{ + AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", + TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice", +} diff --git a/vendor/golang.org/x/oauth2/slack/slack.go b/vendor/golang.org/x/oauth2/slack/slack.go new file mode 100644 index 0000000..593d2f6 --- /dev/null +++ b/vendor/golang.org/x/oauth2/slack/slack.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slack provides constants for using OAuth2 to access Slack. +package slack // import "golang.org/x/oauth2/slack" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Slack's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://slack.com/oauth/authorize", + TokenURL: "https://slack.com/api/oauth.access", +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 0000000..34db8cd --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,175 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*RetrieveError)(rErr) + } + return nil, err + } + return tokenFromInternal(tk), nil +} + +// RetrieveError is the error returned when the token endpoint returns a +// non-2XX HTTP status code. +type RetrieveError struct { + Response *http.Response + // Body is the body that was consumed by reading Response.Body. + // It may be truncated. + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/token_test.go b/vendor/golang.org/x/oauth2/token_test.go new file mode 100644 index 0000000..80db83c --- /dev/null +++ b/vendor/golang.org/x/oauth2/token_test.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "testing" + "time" +) + +func TestTokenExtra(t *testing.T) { + type testCase struct { + key string + val interface{} + want interface{} + } + const key = "extra-key" + cases := []testCase{ + {key: key, val: "abc", want: "abc"}, + {key: key, val: 123, want: 123}, + {key: key, val: "", want: ""}, + {key: "other-key", val: "def", want: nil}, + } + for _, tc := range cases { + extra := make(map[string]interface{}) + extra[tc.key] = tc.val + tok := &Token{raw: extra} + if got, want := tok.Extra(key), tc.want; got != want { + t.Errorf("Extra(%q) = %q; want %q", key, got, want) + } + } +} + +func TestTokenExpiry(t *testing.T) { + now := time.Now() + cases := []struct { + name string + tok *Token + want bool + }{ + {name: "12 seconds", tok: &Token{Expiry: now.Add(12 * time.Second)}, want: false}, + {name: "10 seconds", tok: &Token{Expiry: now.Add(expiryDelta)}, want: true}, + {name: "-1 hour", tok: &Token{Expiry: now.Add(-1 * time.Hour)}, want: true}, + } + for _, tc := range cases { + if got, want := tc.tok.expired(), tc.want; got != want { + t.Errorf("expired (%q) = %v; want %v", tc.name, got, want) + } + } +} + +func TestTokenTypeMethod(t *testing.T) { + cases := []struct { + name string + tok *Token + want string + }{ + {name: "bearer-mixed_case", tok: &Token{TokenType: "beAREr"}, want: "Bearer"}, + {name: "default-bearer", tok: &Token{}, want: "Bearer"}, + {name: "basic", tok: &Token{TokenType: "basic"}, want: "Basic"}, + {name: "basic-capitalized", tok: &Token{TokenType: "Basic"}, want: "Basic"}, + {name: "mac", tok: &Token{TokenType: "mac"}, want: "MAC"}, + {name: "mac-caps", tok: &Token{TokenType: "MAC"}, want: "MAC"}, + {name: "mac-mixed_case", tok: &Token{TokenType: "mAc"}, want: "MAC"}, + } + for _, tc := range cases { + if got, want := tc.tok.Type(), tc.want; got != want { + t.Errorf("TokenType(%q) = %v; want %v", tc.name, got, want) + } + } +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 0000000..92ac7e2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,132 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/golang.org/x/oauth2/transport_test.go b/vendor/golang.org/x/oauth2/transport_test.go new file mode 100644 index 0000000..d6e8087 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport_test.go @@ -0,0 +1,108 @@ +package oauth2 + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" +) + +type tokenSource struct{ token *Token } + +func (t *tokenSource) Token() (*Token, error) { + return t.token, nil +} + +func TestTransportNilTokenSource(t *testing.T) { + tr := &Transport{} + server := newMockServer(func(w http.ResponseWriter, r *http.Request) {}) + defer server.Close() + client := &http.Client{Transport: tr} + resp, err := client.Get(server.URL) + if err == nil { + t.Errorf("got no errors, want an error with nil token source") + } + if resp != nil { + t.Errorf("Response = %v; want nil", resp) + } +} + +func TestTransportTokenSource(t *testing.T) { + ts := &tokenSource{ + token: &Token{ + AccessToken: "abc", + }, + } + tr := &Transport{ + Source: ts, + } + server := newMockServer(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), "Bearer abc"; got != want { + t.Errorf("Authorization header = %q; want %q", got, want) + } + }) + defer server.Close() + client := &http.Client{Transport: tr} + res, err := client.Get(server.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() +} + +// Test for case-sensitive token types, per https://github.com/golang/oauth2/issues/113 +func TestTransportTokenSourceTypes(t *testing.T) { + const val = "abc" + tests := []struct { + key string + val string + want string + }{ + {key: "bearer", val: val, want: "Bearer abc"}, + {key: "mac", val: val, want: "MAC abc"}, + {key: "basic", val: val, want: "Basic abc"}, + } + for _, tc := range tests { + ts := &tokenSource{ + token: &Token{ + AccessToken: tc.val, + TokenType: tc.key, + }, + } + tr := &Transport{ + Source: ts, + } + server := newMockServer(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), tc.want; got != want { + t.Errorf("Authorization header (%q) = %q; want %q", val, got, want) + } + }) + defer server.Close() + client := &http.Client{Transport: tr} + res, err := client.Get(server.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + } +} + +func TestTokenValidNoAccessToken(t *testing.T) { + token := &Token{} + if token.Valid() { + t.Errorf("got valid with no access token; want invalid") + } +} + +func TestExpiredWithExpiry(t *testing.T) { + token := &Token{ + Expiry: time.Now().Add(-5 * time.Hour), + } + if token.Valid() { + t.Errorf("got valid with expired token; want invalid") + } +} + +func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server { + return httptest.NewServer(http.HandlerFunc(handler)) +} diff --git a/vendor/golang.org/x/oauth2/twitch/twitch.go b/vendor/golang.org/x/oauth2/twitch/twitch.go new file mode 100644 index 0000000..8c5f06a --- /dev/null +++ b/vendor/golang.org/x/oauth2/twitch/twitch.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package twitch provides constants for using OAuth2 to access Twitch. +package twitch // import "golang.org/x/oauth2/twitch" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Twitch's OAuth 2.0 endpoint. +// +// For more information see: +// https://dev.twitch.tv/docs/authentication +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://api.twitch.tv/kraken/oauth2/authorize", + TokenURL: "https://api.twitch.tv/kraken/oauth2/token", +} diff --git a/vendor/golang.org/x/oauth2/uber/uber.go b/vendor/golang.org/x/oauth2/uber/uber.go new file mode 100644 index 0000000..5520a64 --- /dev/null +++ b/vendor/golang.org/x/oauth2/uber/uber.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uber provides constants for using OAuth2 to access Uber. +package uber // import "golang.org/x/oauth2/uber" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Uber's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://login.uber.com/oauth/v2/authorize", + TokenURL: "https://login.uber.com/oauth/v2/token", +} diff --git a/vendor/golang.org/x/oauth2/vk/vk.go b/vendor/golang.org/x/oauth2/vk/vk.go new file mode 100644 index 0000000..bd8e159 --- /dev/null +++ b/vendor/golang.org/x/oauth2/vk/vk.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package vk provides constants for using OAuth2 to access VK.com. +package vk // import "golang.org/x/oauth2/vk" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is VK's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://oauth.vk.com/authorize", + TokenURL: "https://oauth.vk.com/access_token", +} diff --git a/vendor/golang.org/x/oauth2/yahoo/yahoo.go b/vendor/golang.org/x/oauth2/yahoo/yahoo.go new file mode 100644 index 0000000..9fa78a2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/yahoo/yahoo.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package yahoo provides constants for using OAuth2 to access Yahoo. +package yahoo // import "golang.org/x/oauth2/yahoo" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Yahoo's OAuth 2.0 endpoint. +// See https://developer.yahoo.com/oauth2/guide/ +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://api.login.yahoo.com/oauth2/request_auth", + TokenURL: "https://api.login.yahoo.com/oauth2/get_token", +} diff --git a/vendor/golang.org/x/oauth2/yandex/yandex.go b/vendor/golang.org/x/oauth2/yandex/yandex.go new file mode 100644 index 0000000..5ebf666 --- /dev/null +++ b/vendor/golang.org/x/oauth2/yandex/yandex.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package yandex provides constants for using OAuth2 to access Yandex APIs. +package yandex // import "golang.org/x/oauth2/yandex" + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is the Yandex OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://oauth.yandex.com/authorize", + TokenURL: "https://oauth.yandex.com/token", +} diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml new file mode 100644 index 0000000..0762cb9 --- /dev/null +++ b/vendor/google.golang.org/appengine/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.6.3 + - 1.7.1 + +install: + - go get -v -t -d google.golang.org/appengine/... + - mkdir sdk + - curl -o sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.40.zip" + - unzip -q sdk.zip -d sdk + - export APPENGINE_DEV_APPSERVER=$(pwd)/sdk/go_appengine/dev_appserver.py + +script: + - go version + - go test -v google.golang.org/appengine/... + - go test -v -race google.golang.org/appengine/... + - sdk/go_appengine/goapp test -v google.golang.org/appengine/... diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/google.golang.org/appengine/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md new file mode 100644 index 0000000..b6b11d9 --- /dev/null +++ b/vendor/google.golang.org/appengine/README.md @@ -0,0 +1,73 @@ +# Go App Engine packages + +[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) + +This repository supports the Go runtime on App Engine, +including both the standard App Engine and the +"App Engine flexible environment" (formerly known as "Managed VMs"). +It provides APIs for interacting with App Engine services. +Its canonical import path is `google.golang.org/appengine`. + +See https://cloud.google.com/appengine/docs/go/ +for more information. + +File issue reports and feature requests on the [Google App Engine issue +tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect). + +## Directory structure +The top level directory of this repository is the `appengine` package. It +contains the +basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API +packages are in subdirectories (e.g. `datastore`). + +There is an `internal` subdirectory that contains service protocol buffers, +plus packages required for connectivity to make API calls. App Engine apps +should not directly import any package under `internal`. + +## Updating a Go App Engine app + +This section describes how to update an older Go App Engine app to use +these packages. A provided tool, `aefix`, can help automate steps 2 and 3 +(run `go get google.golang.org/appengine/cmd/aefix` to install it), but +read the details below since `aefix` can't perform all the changes. + +### 1. Update YAML files (App Engine flexible environment / Managed VMs only) + +The `app.yaml` file (and YAML files for modules) should have these new lines added: +``` +vm: true +``` +See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details. + +### 2. Update import paths + +The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. +You will need to update your code to use import paths starting with that; for instance, +code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. + +### 3. Update code using deprecated, removed or modified APIs + +Most App Engine services are available with exactly the same API. +A few APIs were cleaned up, and some are not available yet. +This list summarises the differences: + +* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. +* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. +* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. +* `appengine.Datacenter` now takes a `context.Context` argument. +* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. +* `delay.Call` now returns an error. +* `search.FieldLoadSaver` now handles document metadata. +* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the + `context.Context` instead. +* `aetest` no longer declares its own Context type, and uses the standard one instead. +* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been + deprecated and unused for a long time. +* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. + Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. +* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. + Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the + feature you require is not present in the new + [blobstore package](https://google.golang.org/appengine/blobstore). +* `appengine/socket` is not required on App Engine flexible environment / Managed VMs. + Use the standard `net` package instead. diff --git a/vendor/google.golang.org/appengine/aetest/doc.go b/vendor/google.golang.org/appengine/aetest/doc.go new file mode 100644 index 0000000..86ce8c2 --- /dev/null +++ b/vendor/google.golang.org/appengine/aetest/doc.go @@ -0,0 +1,42 @@ +/* +Package aetest provides an API for running dev_appserver for use in tests. + +An example test file: + + package foo_test + + import ( + "testing" + + "google.golang.org/appengine/memcache" + "google.golang.org/appengine/aetest" + ) + + func TestFoo(t *testing.T) { + ctx, done, err := aetest.NewContext() + if err != nil { + t.Fatal(err) + } + defer done() + + it := &memcache.Item{ + Key: "some-key", + Value: []byte("some-value"), + } + err = memcache.Set(ctx, it) + if err != nil { + t.Fatalf("Set err: %v", err) + } + it, err = memcache.Get(ctx, "some-key") + if err != nil { + t.Fatalf("Get err: %v; want no error", err) + } + if g, w := string(it.Value), "some-value" ; g != w { + t.Errorf("retrieved Item.Value = %q, want %q", g, w) + } + } + +The environment variable APPENGINE_DEV_APPSERVER specifies the location of the +dev_appserver.py executable to use. If unset, the system PATH is consulted. +*/ +package aetest diff --git a/vendor/google.golang.org/appengine/aetest/instance.go b/vendor/google.golang.org/appengine/aetest/instance.go new file mode 100644 index 0000000..a8f99d8 --- /dev/null +++ b/vendor/google.golang.org/appengine/aetest/instance.go @@ -0,0 +1,51 @@ +package aetest + +import ( + "io" + "net/http" + + "golang.org/x/net/context" + "google.golang.org/appengine" +) + +// Instance represents a running instance of the development API Server. +type Instance interface { + // Close kills the child api_server.py process, releasing its resources. + io.Closer + // NewRequest returns an *http.Request associated with this instance. + NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) +} + +// Options is used to specify options when creating an Instance. +type Options struct { + // AppID specifies the App ID to use during tests. + // By default, "testapp". + AppID string + // StronglyConsistentDatastore is whether the local datastore should be + // strongly consistent. This will diverge from production behaviour. + StronglyConsistentDatastore bool +} + +// NewContext starts an instance of the development API server, and returns +// a context that will route all API calls to that server, as well as a +// closure that must be called when the Context is no longer required. +func NewContext() (context.Context, func(), error) { + inst, err := NewInstance(nil) + if err != nil { + return nil, nil, err + } + req, err := inst.NewRequest("GET", "/", nil) + if err != nil { + inst.Close() + return nil, nil, err + } + ctx := appengine.NewContext(req) + return ctx, func() { + inst.Close() + }, nil +} + +// PrepareDevAppserver is a hook which, if set, will be called before the +// dev_appserver.py is started, each time it is started. If aetest.NewContext +// is invoked from the goapp test tool, this hook is unnecessary. +var PrepareDevAppserver func() error diff --git a/vendor/google.golang.org/appengine/aetest/instance_classic.go b/vendor/google.golang.org/appengine/aetest/instance_classic.go new file mode 100644 index 0000000..fbceaa5 --- /dev/null +++ b/vendor/google.golang.org/appengine/aetest/instance_classic.go @@ -0,0 +1,21 @@ +// +build appengine + +package aetest + +import "appengine/aetest" + +// NewInstance launches a running instance of api_server.py which can be used +// for multiple test Contexts that delegate all App Engine API calls to that +// instance. +// If opts is nil the default values are used. +func NewInstance(opts *Options) (Instance, error) { + aetest.PrepareDevAppserver = PrepareDevAppserver + var aeOpts *aetest.Options + if opts != nil { + aeOpts = &aetest.Options{ + AppID: opts.AppID, + StronglyConsistentDatastore: opts.StronglyConsistentDatastore, + } + } + return aetest.NewInstance(aeOpts) +} diff --git a/vendor/google.golang.org/appengine/aetest/instance_test.go b/vendor/google.golang.org/appengine/aetest/instance_test.go new file mode 100644 index 0000000..edc3ecd --- /dev/null +++ b/vendor/google.golang.org/appengine/aetest/instance_test.go @@ -0,0 +1,116 @@ +package aetest + +import ( + "os" + "testing" + + "google.golang.org/appengine" + "google.golang.org/appengine/datastore" + "google.golang.org/appengine/memcache" + "google.golang.org/appengine/user" +) + +func TestBasicAPICalls(t *testing.T) { + // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set. + if os.Getenv("APPENGINE_DEV_APPSERVER") == "" { + t.Skip("APPENGINE_DEV_APPSERVER not set") + } + + inst, err := NewInstance(nil) + if err != nil { + t.Fatalf("NewInstance: %v", err) + } + defer inst.Close() + + req, err := inst.NewRequest("GET", "http://example.com/page", nil) + if err != nil { + t.Fatalf("NewRequest: %v", err) + } + ctx := appengine.NewContext(req) + + it := &memcache.Item{ + Key: "some-key", + Value: []byte("some-value"), + } + err = memcache.Set(ctx, it) + if err != nil { + t.Fatalf("Set err: %v", err) + } + it, err = memcache.Get(ctx, "some-key") + if err != nil { + t.Fatalf("Get err: %v; want no error", err) + } + if g, w := string(it.Value), "some-value"; g != w { + t.Errorf("retrieved Item.Value = %q, want %q", g, w) + } + + type Entity struct{ Value string } + e := &Entity{Value: "foo"} + k := datastore.NewIncompleteKey(ctx, "Entity", nil) + k, err = datastore.Put(ctx, k, e) + if err != nil { + t.Fatalf("datastore.Put: %v", err) + } + e = new(Entity) + if err := datastore.Get(ctx, k, e); err != nil { + t.Fatalf("datastore.Get: %v", err) + } + if g, w := e.Value, "foo"; g != w { + t.Errorf("retrieved Entity.Value = %q, want %q", g, w) + } +} + +func TestContext(t *testing.T) { + // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set. + if os.Getenv("APPENGINE_DEV_APPSERVER") == "" { + t.Skip("APPENGINE_DEV_APPSERVER not set") + } + + // Check that the context methods work. + _, done, err := NewContext() + if err != nil { + t.Fatalf("NewContext: %v", err) + } + done() +} + +func TestUsers(t *testing.T) { + // Only run the test if APPENGINE_DEV_APPSERVER is explicitly set. + if os.Getenv("APPENGINE_DEV_APPSERVER") == "" { + t.Skip("APPENGINE_DEV_APPSERVER not set") + } + + inst, err := NewInstance(nil) + if err != nil { + t.Fatalf("NewInstance: %v", err) + } + defer inst.Close() + + req, err := inst.NewRequest("GET", "http://example.com/page", nil) + if err != nil { + t.Fatalf("NewRequest: %v", err) + } + ctx := appengine.NewContext(req) + + if user := user.Current(ctx); user != nil { + t.Errorf("user.Current initially %v, want nil", user) + } + + u := &user.User{ + Email: "gopher@example.com", + Admin: true, + } + Login(u, req) + + if got := user.Current(ctx); got.Email != u.Email { + t.Errorf("user.Current: %v, want %v", got, u) + } + if admin := user.IsAdmin(ctx); !admin { + t.Errorf("user.IsAdmin: %t, want true", admin) + } + + Logout(req) + if user := user.Current(ctx); user != nil { + t.Errorf("user.Current after logout %v, want nil", user) + } +} diff --git a/vendor/google.golang.org/appengine/aetest/instance_vm.go b/vendor/google.golang.org/appengine/aetest/instance_vm.go new file mode 100644 index 0000000..ee81480 --- /dev/null +++ b/vendor/google.golang.org/appengine/aetest/instance_vm.go @@ -0,0 +1,276 @@ +// +build !appengine + +package aetest + +import ( + "bufio" + "crypto/rand" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "regexp" + "time" + + "golang.org/x/net/context" + "google.golang.org/appengine/internal" +) + +// NewInstance launches a running instance of api_server.py which can be used +// for multiple test Contexts that delegate all App Engine API calls to that +// instance. +// If opts is nil the default values are used. +func NewInstance(opts *Options) (Instance, error) { + i := &instance{ + opts: opts, + appID: "testapp", + } + if opts != nil && opts.AppID != "" { + i.appID = opts.AppID + } + if err := i.startChild(); err != nil { + return nil, err + } + return i, nil +} + +func newSessionID() string { + var buf [16]byte + io.ReadFull(rand.Reader, buf[:]) + return fmt.Sprintf("%x", buf[:]) +} + +// instance implements the Instance interface. +type instance struct { + opts *Options + child *exec.Cmd + apiURL *url.URL // base URL of API HTTP server + adminURL string // base URL of admin HTTP server + appDir string + appID string + relFuncs []func() // funcs to release any associated contexts +} + +// NewRequest returns an *http.Request associated with this instance. +func (i *instance) NewRequest(method, urlStr string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + + // Associate this request. + release := internal.RegisterTestRequest(req, i.apiURL, func(ctx context.Context) context.Context { + ctx = internal.WithAppIDOverride(ctx, "dev~"+i.appID) + return ctx + }) + i.relFuncs = append(i.relFuncs, release) + + return req, nil +} + +// Close kills the child api_server.py process, releasing its resources. +func (i *instance) Close() (err error) { + for _, rel := range i.relFuncs { + rel() + } + i.relFuncs = nil + if i.child == nil { + return nil + } + defer func() { + i.child = nil + err1 := os.RemoveAll(i.appDir) + if err == nil { + err = err1 + } + }() + + if p := i.child.Process; p != nil { + errc := make(chan error, 1) + go func() { + errc <- i.child.Wait() + }() + + // Call the quit handler on the admin server. + res, err := http.Get(i.adminURL + "/quit") + if err != nil { + p.Kill() + return fmt.Errorf("unable to call /quit handler: %v", err) + } + res.Body.Close() + + select { + case <-time.After(15 * time.Second): + p.Kill() + return errors.New("timeout killing child process") + case err = <-errc: + // Do nothing. + } + } + return +} + +func fileExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func findPython() (path string, err error) { + for _, name := range []string{"python2.7", "python"} { + path, err = exec.LookPath(name) + if err == nil { + return + } + } + return +} + +func findDevAppserver() (string, error) { + if p := os.Getenv("APPENGINE_DEV_APPSERVER"); p != "" { + if fileExists(p) { + return p, nil + } + return "", fmt.Errorf("invalid APPENGINE_DEV_APPSERVER environment variable; path %q doesn't exist", p) + } + return exec.LookPath("dev_appserver.py") +} + +var apiServerAddrRE = regexp.MustCompile(`Starting API server at: (\S+)`) +var adminServerAddrRE = regexp.MustCompile(`Starting admin server at: (\S+)`) + +func (i *instance) startChild() (err error) { + if PrepareDevAppserver != nil { + if err := PrepareDevAppserver(); err != nil { + return err + } + } + python, err := findPython() + if err != nil { + return fmt.Errorf("Could not find python interpreter: %v", err) + } + devAppserver, err := findDevAppserver() + if err != nil { + return fmt.Errorf("Could not find dev_appserver.py: %v", err) + } + + i.appDir, err = ioutil.TempDir("", "appengine-aetest") + if err != nil { + return err + } + defer func() { + if err != nil { + os.RemoveAll(i.appDir) + } + }() + err = os.Mkdir(filepath.Join(i.appDir, "app"), 0755) + if err != nil { + return err + } + err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "app.yaml"), []byte(i.appYAML()), 0644) + if err != nil { + return err + } + err = ioutil.WriteFile(filepath.Join(i.appDir, "app", "stubapp.go"), []byte(appSource), 0644) + if err != nil { + return err + } + + appserverArgs := []string{ + devAppserver, + "--port=0", + "--api_port=0", + "--admin_port=0", + "--automatic_restart=false", + "--skip_sdk_update_check=true", + "--clear_datastore=true", + "--clear_search_indexes=true", + "--datastore_path", filepath.Join(i.appDir, "datastore"), + } + if i.opts != nil && i.opts.StronglyConsistentDatastore { + appserverArgs = append(appserverArgs, "--datastore_consistency_policy=consistent") + } + appserverArgs = append(appserverArgs, filepath.Join(i.appDir, "app")) + + i.child = exec.Command(python, + appserverArgs..., + ) + i.child.Stdout = os.Stdout + var stderr io.Reader + stderr, err = i.child.StderrPipe() + if err != nil { + return err + } + stderr = io.TeeReader(stderr, os.Stderr) + if err = i.child.Start(); err != nil { + return err + } + + // Read stderr until we have read the URLs of the API server and admin interface. + errc := make(chan error, 1) + go func() { + s := bufio.NewScanner(stderr) + for s.Scan() { + if match := apiServerAddrRE.FindStringSubmatch(s.Text()); match != nil { + u, err := url.Parse(match[1]) + if err != nil { + errc <- fmt.Errorf("failed to parse API URL %q: %v", match[1], err) + return + } + i.apiURL = u + } + if match := adminServerAddrRE.FindStringSubmatch(s.Text()); match != nil { + i.adminURL = match[1] + } + if i.adminURL != "" && i.apiURL != nil { + break + } + } + errc <- s.Err() + }() + + select { + case <-time.After(15 * time.Second): + if p := i.child.Process; p != nil { + p.Kill() + } + return errors.New("timeout starting child process") + case err := <-errc: + if err != nil { + return fmt.Errorf("error reading child process stderr: %v", err) + } + } + if i.adminURL == "" { + return errors.New("unable to find admin server URL") + } + if i.apiURL == nil { + return errors.New("unable to find API server URL") + } + return nil +} + +func (i *instance) appYAML() string { + return fmt.Sprintf(appYAMLTemplate, i.appID) +} + +const appYAMLTemplate = ` +application: %s +version: 1 +runtime: go +api_version: go1 +vm: true + +handlers: +- url: /.* + script: _go_app +` + +const appSource = ` +package main +import "google.golang.org/appengine" +func main() { appengine.Main() } +` diff --git a/vendor/google.golang.org/appengine/aetest/user.go b/vendor/google.golang.org/appengine/aetest/user.go new file mode 100644 index 0000000..bf9266f --- /dev/null +++ b/vendor/google.golang.org/appengine/aetest/user.go @@ -0,0 +1,36 @@ +package aetest + +import ( + "hash/crc32" + "net/http" + "strconv" + + "google.golang.org/appengine/user" +) + +// Login causes the provided Request to act as though issued by the given user. +func Login(u *user.User, req *http.Request) { + req.Header.Set("X-AppEngine-User-Email", u.Email) + id := u.ID + if id == "" { + id = strconv.Itoa(int(crc32.Checksum([]byte(u.Email), crc32.IEEETable))) + } + req.Header.Set("X-AppEngine-User-Id", id) + req.Header.Set("X-AppEngine-User-Federated-Identity", u.Email) + req.Header.Set("X-AppEngine-User-Federated-Provider", u.FederatedProvider) + if u.Admin { + req.Header.Set("X-AppEngine-User-Is-Admin", "1") + } else { + req.Header.Set("X-AppEngine-User-Is-Admin", "0") + } +} + +// Logout causes the provided Request to act as though issued by a logged-out +// user. +func Logout(req *http.Request) { + req.Header.Del("X-AppEngine-User-Email") + req.Header.Del("X-AppEngine-User-Id") + req.Header.Del("X-AppEngine-User-Is-Admin") + req.Header.Del("X-AppEngine-User-Federated-Identity") + req.Header.Del("X-AppEngine-User-Federated-Provider") +} diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go new file mode 100644 index 0000000..475cf2e --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine.go @@ -0,0 +1,112 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package appengine provides basic functionality for Google App Engine. +// +// For more information on how to write Go apps for Google App Engine, see: +// https://cloud.google.com/appengine/docs/go/ +package appengine // import "google.golang.org/appengine" + +import ( + "net/http" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// The gophers party all night; the rabbits provide the beats. + +// Main is the principal entry point for an app running in App Engine. +// +// On App Engine Flexible it installs a trivial health checker if one isn't +// already registered, and starts listening on port 8080 (overridden by the +// $PORT environment variable). +// +// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests +// for details on how to do your own health checking. +// +// Main is not yet supported on App Engine Standard. +// +// Main never returns. +// +// Main is designed so that the app's main package looks like this: +// +// package main +// +// import ( +// "google.golang.org/appengine" +// +// _ "myapp/package0" +// _ "myapp/package1" +// ) +// +// func main() { +// appengine.Main() +// } +// +// The "myapp/packageX" packages are expected to register HTTP handlers +// in their init functions. +func Main() { + internal.Main() +} + +// IsDevAppServer reports whether the App Engine app is running in the +// development App Server. +func IsDevAppServer() bool { + return internal.IsDevAppServer() +} + +// NewContext returns a context for an in-flight HTTP request. +// This function is cheap. +func NewContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) +} + +// WithContext returns a copy of the parent context +// and associates it with an in-flight HTTP request. +// This function is cheap. +func WithContext(parent context.Context, req *http.Request) context.Context { + return internal.WithContext(parent, req) +} + +// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call. + +// BlobKey is a key for a blobstore blob. +// +// Conceptually, this type belongs in the blobstore package, but it lives in +// the appengine package to avoid a circular dependency: blobstore depends on +// datastore, and datastore needs to refer to the BlobKey type. +type BlobKey string + +// GeoPoint represents a location as latitude/longitude in degrees. +type GeoPoint struct { + Lat, Lng float64 +} + +// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. +func (g GeoPoint) Valid() bool { + return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 +} + +// APICallFunc defines a function type for handling an API call. +// See WithCallOverride. +type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error + +// WithAPICallFunc returns a copy of the parent context +// that will cause API calls to invoke f instead of their normal operation. +// +// This is intended for advanced users only. +func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { + return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) +} + +// APICall performs an API call. +// +// This is not intended for general use; it is exported for use in conjunction +// with WithAPICallFunc. +func APICall(ctx context.Context, service, method string, in, out proto.Message) error { + return internal.Call(ctx, service, method, in, out) +} diff --git a/vendor/google.golang.org/appengine/appengine_test.go b/vendor/google.golang.org/appengine/appengine_test.go new file mode 100644 index 0000000..f1cf0a1 --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine_test.go @@ -0,0 +1,49 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "testing" +) + +func TestValidGeoPoint(t *testing.T) { + testCases := []struct { + desc string + pt GeoPoint + want bool + }{ + { + "valid", + GeoPoint{67.21, 13.37}, + true, + }, + { + "high lat", + GeoPoint{-90.01, 13.37}, + false, + }, + { + "low lat", + GeoPoint{90.01, 13.37}, + false, + }, + { + "high lng", + GeoPoint{67.21, 182}, + false, + }, + { + "low lng", + GeoPoint{67.21, -181}, + false, + }, + } + + for _, tc := range testCases { + if got := tc.pt.Valid(); got != tc.want { + t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want) + } + } +} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go new file mode 100644 index 0000000..f4b645a --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine_vm.go @@ -0,0 +1,20 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package appengine + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// BackgroundContext returns a context not associated with a request. +// This should only be used when not servicing a request. +// This only works in App Engine "flexible environment". +func BackgroundContext() context.Context { + return internal.BackgroundContext() +} diff --git a/vendor/google.golang.org/appengine/blobstore/blobstore.go b/vendor/google.golang.org/appengine/blobstore/blobstore.go new file mode 100644 index 0000000..1c8087b --- /dev/null +++ b/vendor/google.golang.org/appengine/blobstore/blobstore.go @@ -0,0 +1,276 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package blobstore provides a client for App Engine's persistent blob +// storage service. +package blobstore // import "google.golang.org/appengine/blobstore" + +import ( + "bufio" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/datastore" + "google.golang.org/appengine/internal" + + basepb "google.golang.org/appengine/internal/base" + blobpb "google.golang.org/appengine/internal/blobstore" +) + +const ( + blobInfoKind = "__BlobInfo__" + blobFileIndexKind = "__BlobFileIndex__" + zeroKey = appengine.BlobKey("") +) + +// BlobInfo is the blob metadata that is stored in the datastore. +// Filename may be empty. +type BlobInfo struct { + BlobKey appengine.BlobKey + ContentType string `datastore:"content_type"` + CreationTime time.Time `datastore:"creation"` + Filename string `datastore:"filename"` + Size int64 `datastore:"size"` + MD5 string `datastore:"md5_hash"` + + // ObjectName is the Google Cloud Storage name for this blob. + ObjectName string `datastore:"gs_object_name"` +} + +// isErrFieldMismatch returns whether err is a datastore.ErrFieldMismatch. +// +// The blobstore stores blob metadata in the datastore. When loading that +// metadata, it may contain fields that we don't care about. datastore.Get will +// return datastore.ErrFieldMismatch in that case, so we ignore that specific +// error. +func isErrFieldMismatch(err error) bool { + _, ok := err.(*datastore.ErrFieldMismatch) + return ok +} + +// Stat returns the BlobInfo for a provided blobKey. If no blob was found for +// that key, Stat returns datastore.ErrNoSuchEntity. +func Stat(c context.Context, blobKey appengine.BlobKey) (*BlobInfo, error) { + c, _ = appengine.Namespace(c, "") // Blobstore is always in the empty string namespace + dskey := datastore.NewKey(c, blobInfoKind, string(blobKey), 0, nil) + bi := &BlobInfo{ + BlobKey: blobKey, + } + if err := datastore.Get(c, dskey, bi); err != nil && !isErrFieldMismatch(err) { + return nil, err + } + return bi, nil +} + +// Send sets the headers on response to instruct App Engine to send a blob as +// the response body. This is more efficient than reading and writing it out +// manually and isn't subject to normal response size limits. +func Send(response http.ResponseWriter, blobKey appengine.BlobKey) { + hdr := response.Header() + hdr.Set("X-AppEngine-BlobKey", string(blobKey)) + + if hdr.Get("Content-Type") == "" { + // This value is known to dev_appserver to mean automatic. + // In production this is remapped to the empty value which + // means automatic. + hdr.Set("Content-Type", "application/vnd.google.appengine.auto") + } +} + +// UploadURL creates an upload URL for the form that the user will +// fill out, passing the application path to load when the POST of the +// form is completed. These URLs expire and should not be reused. The +// opts parameter may be nil. +func UploadURL(c context.Context, successPath string, opts *UploadURLOptions) (*url.URL, error) { + req := &blobpb.CreateUploadURLRequest{ + SuccessPath: proto.String(successPath), + } + if opts != nil { + if n := opts.MaxUploadBytes; n != 0 { + req.MaxUploadSizeBytes = &n + } + if n := opts.MaxUploadBytesPerBlob; n != 0 { + req.MaxUploadSizePerBlobBytes = &n + } + if s := opts.StorageBucket; s != "" { + req.GsBucketName = &s + } + } + res := &blobpb.CreateUploadURLResponse{} + if err := internal.Call(c, "blobstore", "CreateUploadURL", req, res); err != nil { + return nil, err + } + return url.Parse(*res.Url) +} + +// UploadURLOptions are the options to create an upload URL. +type UploadURLOptions struct { + MaxUploadBytes int64 // optional + MaxUploadBytesPerBlob int64 // optional + + // StorageBucket specifies the Google Cloud Storage bucket in which + // to store the blob. + // This is required if you use Cloud Storage instead of Blobstore. + // Your application must have permission to write to the bucket. + // You may optionally specify a bucket name and path in the format + // "bucket_name/path", in which case the included path will be the + // prefix of the uploaded object's name. + StorageBucket string +} + +// Delete deletes a blob. +func Delete(c context.Context, blobKey appengine.BlobKey) error { + return DeleteMulti(c, []appengine.BlobKey{blobKey}) +} + +// DeleteMulti deletes multiple blobs. +func DeleteMulti(c context.Context, blobKey []appengine.BlobKey) error { + s := make([]string, len(blobKey)) + for i, b := range blobKey { + s[i] = string(b) + } + req := &blobpb.DeleteBlobRequest{ + BlobKey: s, + } + res := &basepb.VoidProto{} + if err := internal.Call(c, "blobstore", "DeleteBlob", req, res); err != nil { + return err + } + return nil +} + +func errorf(format string, args ...interface{}) error { + return fmt.Errorf("blobstore: "+format, args...) +} + +// ParseUpload parses the synthetic POST request that your app gets from +// App Engine after a user's successful upload of blobs. Given the request, +// ParseUpload returns a map of the blobs received (keyed by HTML form +// element name) and other non-blob POST parameters. +func ParseUpload(req *http.Request) (blobs map[string][]*BlobInfo, other url.Values, err error) { + _, params, err := mime.ParseMediaType(req.Header.Get("Content-Type")) + if err != nil { + return nil, nil, err + } + boundary := params["boundary"] + if boundary == "" { + return nil, nil, errorf("did not find MIME multipart boundary") + } + + blobs = make(map[string][]*BlobInfo) + other = make(url.Values) + + mreader := multipart.NewReader(io.MultiReader(req.Body, strings.NewReader("\r\n\r\n")), boundary) + for { + part, perr := mreader.NextPart() + if perr == io.EOF { + break + } + if perr != nil { + return nil, nil, errorf("error reading next mime part with boundary %q (len=%d): %v", + boundary, len(boundary), perr) + } + + bi := &BlobInfo{} + ctype, params, err := mime.ParseMediaType(part.Header.Get("Content-Disposition")) + if err != nil { + return nil, nil, err + } + bi.Filename = params["filename"] + formKey := params["name"] + + ctype, params, err = mime.ParseMediaType(part.Header.Get("Content-Type")) + if err != nil { + return nil, nil, err + } + bi.BlobKey = appengine.BlobKey(params["blob-key"]) + if ctype != "message/external-body" || bi.BlobKey == "" { + if formKey != "" { + slurp, serr := ioutil.ReadAll(part) + if serr != nil { + return nil, nil, errorf("error reading %q MIME part", formKey) + } + other[formKey] = append(other[formKey], string(slurp)) + } + continue + } + + // App Engine sends a MIME header as the body of each MIME part. + tp := textproto.NewReader(bufio.NewReader(part)) + header, mimeerr := tp.ReadMIMEHeader() + if mimeerr != nil { + return nil, nil, mimeerr + } + bi.Size, err = strconv.ParseInt(header.Get("Content-Length"), 10, 64) + if err != nil { + return nil, nil, err + } + bi.ContentType = header.Get("Content-Type") + + // Parse the time from the MIME header like: + // X-AppEngine-Upload-Creation: 2011-03-15 21:38:34.712136 + createDate := header.Get("X-AppEngine-Upload-Creation") + if createDate == "" { + return nil, nil, errorf("expected to find an X-AppEngine-Upload-Creation header") + } + bi.CreationTime, err = time.Parse("2006-01-02 15:04:05.000000", createDate) + if err != nil { + return nil, nil, errorf("error parsing X-AppEngine-Upload-Creation: %s", err) + } + + if hdr := header.Get("Content-MD5"); hdr != "" { + md5, err := base64.URLEncoding.DecodeString(hdr) + if err != nil { + return nil, nil, errorf("bad Content-MD5 %q: %v", hdr, err) + } + bi.MD5 = string(md5) + } + + // If the GCS object name was provided, record it. + bi.ObjectName = header.Get("X-AppEngine-Cloud-Storage-Object") + + blobs[formKey] = append(blobs[formKey], bi) + } + return +} + +// Reader is a blob reader. +type Reader interface { + io.Reader + io.ReaderAt + io.Seeker +} + +// NewReader returns a reader for a blob. It always succeeds; if the blob does +// not exist then an error will be reported upon first read. +func NewReader(c context.Context, blobKey appengine.BlobKey) Reader { + return openBlob(c, blobKey) +} + +// BlobKeyForFile returns a BlobKey for a Google Storage file. +// The filename should be of the form "/gs/bucket_name/object_name". +func BlobKeyForFile(c context.Context, filename string) (appengine.BlobKey, error) { + req := &blobpb.CreateEncodedGoogleStorageKeyRequest{ + Filename: &filename, + } + res := &blobpb.CreateEncodedGoogleStorageKeyResponse{} + if err := internal.Call(c, "blobstore", "CreateEncodedGoogleStorageKey", req, res); err != nil { + return "", err + } + return appengine.BlobKey(*res.BlobKey), nil +} diff --git a/vendor/google.golang.org/appengine/blobstore/blobstore_test.go b/vendor/google.golang.org/appengine/blobstore/blobstore_test.go new file mode 100644 index 0000000..c2be7ef --- /dev/null +++ b/vendor/google.golang.org/appengine/blobstore/blobstore_test.go @@ -0,0 +1,183 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package blobstore + +import ( + "io" + "os" + "strconv" + "strings" + "testing" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal/aetesting" + + pb "google.golang.org/appengine/internal/blobstore" +) + +const rbs = readBufferSize + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func fakeFetchData(req *pb.FetchDataRequest, res *pb.FetchDataResponse) error { + i0 := int(*req.StartIndex) + i1 := int(*req.EndIndex + 1) // Blobstore's end-indices are inclusive; Go's are exclusive. + bk := *req.BlobKey + if i := strings.Index(bk, "."); i != -1 { + // Strip everything past the ".". + bk = bk[:i] + } + switch bk { + case "a14p": + const s = "abcdefghijklmnop" + i0 := min(len(s), i0) + i1 := min(len(s), i1) + res.Data = []byte(s[i0:i1]) + case "longBlob": + res.Data = make([]byte, i1-i0) + for i := range res.Data { + res.Data[i] = 'A' + uint8(i0/rbs) + i0++ + } + } + return nil +} + +// step is one step of a readerTest. +// It consists of a Reader method to call, the method arguments +// (lenp, offset, whence) and the expected results. +type step struct { + method string + lenp int + offset int64 + whence int + want string + wantErr error +} + +var readerTest = []struct { + blobKey string + step []step +}{ + {"noSuchBlobKey", []step{ + {"Read", 8, 0, 0, "", io.EOF}, + }}, + {"a14p.0", []step{ + // Test basic reads. + {"Read", 1, 0, 0, "a", nil}, + {"Read", 3, 0, 0, "bcd", nil}, + {"Read", 1, 0, 0, "e", nil}, + {"Read", 2, 0, 0, "fg", nil}, + // Test Seek. + {"Seek", 0, 2, os.SEEK_SET, "2", nil}, + {"Read", 5, 0, 0, "cdefg", nil}, + {"Seek", 0, 2, os.SEEK_CUR, "9", nil}, + {"Read", 1, 0, 0, "j", nil}, + // Test reads up to and past EOF. + {"Read", 5, 0, 0, "klmno", nil}, + {"Read", 5, 0, 0, "p", nil}, + {"Read", 5, 0, 0, "", io.EOF}, + // Test ReadAt. + {"ReadAt", 4, 0, 0, "abcd", nil}, + {"ReadAt", 4, 3, 0, "defg", nil}, + {"ReadAt", 4, 12, 0, "mnop", nil}, + {"ReadAt", 4, 13, 0, "nop", io.EOF}, + {"ReadAt", 4, 99, 0, "", io.EOF}, + }}, + {"a14p.1", []step{ + // Test Seek before any reads. + {"Seek", 0, 2, os.SEEK_SET, "2", nil}, + {"Read", 1, 0, 0, "c", nil}, + // Test that ReadAt doesn't affect the Read offset. + {"ReadAt", 3, 9, 0, "jkl", nil}, + {"Read", 3, 0, 0, "def", nil}, + }}, + {"a14p.2", []step{ + // Test ReadAt before any reads or seeks. + {"ReadAt", 2, 14, 0, "op", nil}, + }}, + {"longBlob.0", []step{ + // Test basic read. + {"Read", 1, 0, 0, "A", nil}, + // Test that Read returns early when the buffer is exhausted. + {"Seek", 0, rbs - 2, os.SEEK_SET, strconv.Itoa(rbs - 2), nil}, + {"Read", 5, 0, 0, "AA", nil}, + {"Read", 3, 0, 0, "BBB", nil}, + // Test that what we just read is still in the buffer. + {"Seek", 0, rbs - 2, os.SEEK_SET, strconv.Itoa(rbs - 2), nil}, + {"Read", 5, 0, 0, "AABBB", nil}, + // Test ReadAt. + {"ReadAt", 3, rbs - 4, 0, "AAA", nil}, + {"ReadAt", 6, rbs - 4, 0, "AAAABB", nil}, + {"ReadAt", 8, rbs - 4, 0, "AAAABBBB", nil}, + {"ReadAt", 5, rbs - 4, 0, "AAAAB", nil}, + {"ReadAt", 2, rbs - 4, 0, "AA", nil}, + // Test seeking backwards from the Read offset. + {"Seek", 0, 2*rbs - 8, os.SEEK_SET, strconv.Itoa(2*rbs - 8), nil}, + {"Read", 1, 0, 0, "B", nil}, + {"Read", 1, 0, 0, "B", nil}, + {"Read", 1, 0, 0, "B", nil}, + {"Read", 1, 0, 0, "B", nil}, + {"Read", 8, 0, 0, "BBBBCCCC", nil}, + }}, + {"longBlob.1", []step{ + // Test ReadAt with a slice larger than the buffer size. + {"LargeReadAt", 2*rbs - 2, 0, 0, strconv.Itoa(2*rbs - 2), nil}, + {"LargeReadAt", 2*rbs - 1, 0, 0, strconv.Itoa(2*rbs - 1), nil}, + {"LargeReadAt", 2*rbs + 0, 0, 0, strconv.Itoa(2*rbs + 0), nil}, + {"LargeReadAt", 2*rbs + 1, 0, 0, strconv.Itoa(2*rbs + 1), nil}, + {"LargeReadAt", 2*rbs + 2, 0, 0, strconv.Itoa(2*rbs + 2), nil}, + {"LargeReadAt", 2*rbs - 2, 1, 0, strconv.Itoa(2*rbs - 2), nil}, + {"LargeReadAt", 2*rbs - 1, 1, 0, strconv.Itoa(2*rbs - 1), nil}, + {"LargeReadAt", 2*rbs + 0, 1, 0, strconv.Itoa(2*rbs + 0), nil}, + {"LargeReadAt", 2*rbs + 1, 1, 0, strconv.Itoa(2*rbs + 1), nil}, + {"LargeReadAt", 2*rbs + 2, 1, 0, strconv.Itoa(2*rbs + 2), nil}, + }}, +} + +func TestReader(t *testing.T) { + for _, rt := range readerTest { + c := aetesting.FakeSingleContext(t, "blobstore", "FetchData", fakeFetchData) + r := NewReader(c, appengine.BlobKey(rt.blobKey)) + for i, step := range rt.step { + var ( + got string + gotErr error + n int + offset int64 + ) + switch step.method { + case "LargeReadAt": + p := make([]byte, step.lenp) + n, gotErr = r.ReadAt(p, step.offset) + got = strconv.Itoa(n) + case "Read": + p := make([]byte, step.lenp) + n, gotErr = r.Read(p) + got = string(p[:n]) + case "ReadAt": + p := make([]byte, step.lenp) + n, gotErr = r.ReadAt(p, step.offset) + got = string(p[:n]) + case "Seek": + offset, gotErr = r.Seek(step.offset, step.whence) + got = strconv.FormatInt(offset, 10) + default: + t.Fatalf("unknown method: %s", step.method) + } + if gotErr != step.wantErr { + t.Fatalf("%s step %d: got error %v want %v", rt.blobKey, i, gotErr, step.wantErr) + } + if got != step.want { + t.Fatalf("%s step %d: got %q want %q", rt.blobKey, i, got, step.want) + } + } + } +} diff --git a/vendor/google.golang.org/appengine/blobstore/read.go b/vendor/google.golang.org/appengine/blobstore/read.go new file mode 100644 index 0000000..578b1f5 --- /dev/null +++ b/vendor/google.golang.org/appengine/blobstore/read.go @@ -0,0 +1,160 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package blobstore + +import ( + "errors" + "fmt" + "io" + "os" + "sync" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + + blobpb "google.golang.org/appengine/internal/blobstore" +) + +// openBlob returns a reader for a blob. It always succeeds; if the blob does +// not exist then an error will be reported upon first read. +func openBlob(c context.Context, blobKey appengine.BlobKey) Reader { + return &reader{ + c: c, + blobKey: blobKey, + } +} + +const readBufferSize = 256 * 1024 + +// reader is a blob reader. It implements the Reader interface. +type reader struct { + c context.Context + + // Either blobKey or filename is set: + blobKey appengine.BlobKey + filename string + + closeFunc func() // is nil if unavailable or already closed. + + // buf is the read buffer. r is how much of buf has been read. + // off is the offset of buf[0] relative to the start of the blob. + // An invariant is 0 <= r && r <= len(buf). + // Reads that don't require an RPC call will increment r but not off. + // Seeks may modify r without discarding the buffer, but only if the + // invariant can be maintained. + mu sync.Mutex + buf []byte + r int + off int64 +} + +func (r *reader) Close() error { + if f := r.closeFunc; f != nil { + f() + } + r.closeFunc = nil + return nil +} + +func (r *reader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + r.mu.Lock() + defer r.mu.Unlock() + if r.r == len(r.buf) { + if err := r.fetch(r.off + int64(r.r)); err != nil { + return 0, err + } + } + n := copy(p, r.buf[r.r:]) + r.r += n + return n, nil +} + +func (r *reader) ReadAt(p []byte, off int64) (int, error) { + if len(p) == 0 { + return 0, nil + } + r.mu.Lock() + defer r.mu.Unlock() + // Convert relative offsets to absolute offsets. + ab0 := r.off + int64(r.r) + ab1 := r.off + int64(len(r.buf)) + ap0 := off + ap1 := off + int64(len(p)) + // Check if we can satisfy the read entirely out of the existing buffer. + if r.off <= ap0 && ap1 <= ab1 { + // Convert off from an absolute offset to a relative offset. + rp0 := int(ap0 - r.off) + return copy(p, r.buf[rp0:]), nil + } + // Restore the original Read/Seek offset after ReadAt completes. + defer r.seek(ab0) + // Repeatedly fetch and copy until we have filled p. + n := 0 + for len(p) > 0 { + if err := r.fetch(off + int64(n)); err != nil { + return n, err + } + r.r = copy(p, r.buf) + n += r.r + p = p[r.r:] + } + return n, nil +} + +func (r *reader) Seek(offset int64, whence int) (ret int64, err error) { + r.mu.Lock() + defer r.mu.Unlock() + switch whence { + case os.SEEK_SET: + ret = offset + case os.SEEK_CUR: + ret = r.off + int64(r.r) + offset + case os.SEEK_END: + return 0, errors.New("seeking relative to the end of a blob isn't supported") + default: + return 0, fmt.Errorf("invalid Seek whence value: %d", whence) + } + if ret < 0 { + return 0, errors.New("negative Seek offset") + } + return r.seek(ret) +} + +// fetch fetches readBufferSize bytes starting at the given offset. On success, +// the data is saved as r.buf. +func (r *reader) fetch(off int64) error { + req := &blobpb.FetchDataRequest{ + BlobKey: proto.String(string(r.blobKey)), + StartIndex: proto.Int64(off), + EndIndex: proto.Int64(off + readBufferSize - 1), // EndIndex is inclusive. + } + res := &blobpb.FetchDataResponse{} + if err := internal.Call(r.c, "blobstore", "FetchData", req, res); err != nil { + return err + } + if len(res.Data) == 0 { + return io.EOF + } + r.buf, r.r, r.off = res.Data, 0, off + return nil +} + +// seek seeks to the given offset with an effective whence equal to SEEK_SET. +// It discards the read buffer if the invariant cannot be maintained. +func (r *reader) seek(off int64) (int64, error) { + delta := off - r.off + if delta >= 0 && delta < int64(len(r.buf)) { + r.r = int(delta) + return off, nil + } + r.buf, r.r, r.off = nil, 0, off + return off, nil +} diff --git a/vendor/google.golang.org/appengine/capability/capability.go b/vendor/google.golang.org/appengine/capability/capability.go new file mode 100644 index 0000000..3a60bd5 --- /dev/null +++ b/vendor/google.golang.org/appengine/capability/capability.go @@ -0,0 +1,52 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package capability exposes information about outages and scheduled downtime +for specific API capabilities. + +This package does not work in App Engine "flexible environment". + +Example: + if !capability.Enabled(c, "datastore_v3", "write") { + // show user a different page + } +*/ +package capability // import "google.golang.org/appengine/capability" + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + "google.golang.org/appengine/log" + + pb "google.golang.org/appengine/internal/capability" +) + +// Enabled returns whether an API's capabilities are enabled. +// The wildcard "*" capability matches every capability of an API. +// If the underlying RPC fails (if the package is unknown, for example), +// false is returned and information is written to the application log. +func Enabled(ctx context.Context, api, capability string) bool { + req := &pb.IsEnabledRequest{ + Package: &api, + Capability: []string{capability}, + } + res := &pb.IsEnabledResponse{} + if err := internal.Call(ctx, "capability_service", "IsEnabled", req, res); err != nil { + log.Warningf(ctx, "capability.Enabled: RPC failed: %v", err) + return false + } + switch *res.SummaryStatus { + case pb.IsEnabledResponse_ENABLED, + pb.IsEnabledResponse_SCHEDULED_FUTURE, + pb.IsEnabledResponse_SCHEDULED_NOW: + return true + case pb.IsEnabledResponse_UNKNOWN: + log.Errorf(ctx, "capability.Enabled: unknown API capability %s/%s", api, capability) + return false + default: + return false + } +} diff --git a/vendor/google.golang.org/appengine/channel/channel.go b/vendor/google.golang.org/appengine/channel/channel.go new file mode 100644 index 0000000..dfe0a3f --- /dev/null +++ b/vendor/google.golang.org/appengine/channel/channel.go @@ -0,0 +1,83 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package channel implements the server side of App Engine's Channel API. + +Create creates a new channel associated with the given clientID, +which must be unique to the client that will use the returned token. + + token, err := channel.Create(c, "player1") + if err != nil { + // handle error + } + // return token to the client in an HTTP response + +Send sends a message to the client over the channel identified by clientID. + + channel.Send(c, "player1", "Game over!") +*/ +package channel // import "google.golang.org/appengine/channel" + +import ( + "encoding/json" + + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + basepb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/channel" +) + +// Create creates a channel and returns a token for use by the client. +// The clientID is an application-provided string used to identify the client. +func Create(c context.Context, clientID string) (token string, err error) { + req := &pb.CreateChannelRequest{ + ApplicationKey: &clientID, + } + resp := &pb.CreateChannelResponse{} + err = internal.Call(c, service, "CreateChannel", req, resp) + token = resp.GetToken() + return token, remapError(err) +} + +// Send sends a message on the channel associated with clientID. +func Send(c context.Context, clientID, message string) error { + req := &pb.SendMessageRequest{ + ApplicationKey: &clientID, + Message: &message, + } + resp := &basepb.VoidProto{} + return remapError(internal.Call(c, service, "SendChannelMessage", req, resp)) +} + +// SendJSON is a helper function that sends a JSON-encoded value +// on the channel associated with clientID. +func SendJSON(c context.Context, clientID string, value interface{}) error { + m, err := json.Marshal(value) + if err != nil { + return err + } + return Send(c, clientID, string(m)) +} + +// remapError fixes any APIError referencing "xmpp" into one referencing "channel". +func remapError(err error) error { + if e, ok := err.(*internal.APIError); ok { + if e.Service == "xmpp" { + e.Service = "channel" + } + } + return err +} + +var service = "xmpp" // prod + +func init() { + if appengine.IsDevAppServer() { + service = "channel" // dev + } + internal.RegisterErrorCodeMap("channel", pb.ChannelServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/channel/channel_test.go b/vendor/google.golang.org/appengine/channel/channel_test.go new file mode 100644 index 0000000..c7498eb --- /dev/null +++ b/vendor/google.golang.org/appengine/channel/channel_test.go @@ -0,0 +1,21 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package channel + +import ( + "testing" + + "google.golang.org/appengine/internal" +) + +func TestRemapError(t *testing.T) { + err := &internal.APIError{ + Service: "xmpp", + } + err = remapError(err).(*internal.APIError) + if err.Service != "channel" { + t.Errorf("err.Service = %q, want %q", err.Service, "channel") + } +} diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go new file mode 100644 index 0000000..7b27e6b --- /dev/null +++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql.go @@ -0,0 +1,62 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package cloudsql exposes access to Google Cloud SQL databases. + +This package does not work in App Engine "flexible environment". + +This package is intended for MySQL drivers to make App Engine-specific +connections. Applications should use this package through database/sql: +Select a pure Go MySQL driver that supports this package, and use sql.Open +with protocol "cloudsql" and an address of the Cloud SQL instance. + +A Go MySQL driver that has been tested to work well with Cloud SQL +is the go-sql-driver: + import "database/sql" + import _ "github.com/go-sql-driver/mysql" + + db, err := sql.Open("mysql", "user@cloudsql(project-id:instance-name)/dbname") + + +Another driver that works well with Cloud SQL is the mymysql driver: + import "database/sql" + import _ "github.com/ziutek/mymysql/godrv" + + db, err := sql.Open("mymysql", "cloudsql:instance-name*dbname/user/password") + + +Using either of these drivers, you can perform a standard SQL query. +This example assumes there is a table named 'users' with +columns 'first_name' and 'last_name': + + rows, err := db.Query("SELECT first_name, last_name FROM users") + if err != nil { + log.Errorf(ctx, "db.Query: %v", err) + } + defer rows.Close() + + for rows.Next() { + var firstName string + var lastName string + if err := rows.Scan(&firstName, &lastName); err != nil { + log.Errorf(ctx, "rows.Scan: %v", err) + continue + } + log.Infof(ctx, "First: %v - Last: %v", firstName, lastName) + } + if err := rows.Err(); err != nil { + log.Errorf(ctx, "Row error: %v", err) + } +*/ +package cloudsql + +import ( + "net" +) + +// Dial connects to the named Cloud SQL instance. +func Dial(instance string) (net.Conn, error) { + return connect(instance) +} diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go new file mode 100644 index 0000000..af62dba --- /dev/null +++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go @@ -0,0 +1,17 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package cloudsql + +import ( + "net" + + "appengine/cloudsql" +) + +func connect(instance string) (net.Conn, error) { + return cloudsql.Dial(instance) +} diff --git a/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go new file mode 100644 index 0000000..90fa7b3 --- /dev/null +++ b/vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go @@ -0,0 +1,16 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package cloudsql + +import ( + "errors" + "net" +) + +func connect(instance string) (net.Conn, error) { + return nil, errors.New(`cloudsql: not supported in App Engine "flexible environment"`) +} diff --git a/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go new file mode 100644 index 0000000..e317cdd --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go @@ -0,0 +1,342 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Program aebundler turns a Go app into a fully self-contained tar file. +// The app and its subdirectories (if any) are placed under "." +// and the dependencies from $GOPATH are placed under ./_gopath/src. +// A main func is synthesized if one does not exist. +// +// A sample Dockerfile to be used with this bundler could look like this: +// FROM gcr.io/google_appengine/go-compat +// ADD . /app +// RUN GOPATH=/app/_gopath go build -tags appenginevm -o /app/_ah/exe +package main + +import ( + "archive/tar" + "flag" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +var ( + output = flag.String("o", "", "name of output tar file or '-' for stdout") + rootDir = flag.String("root", ".", "directory name of application root") + vm = flag.Bool("vm", true, `bundle an app for App Engine "flexible environment"`) + + skipFiles = map[string]bool{ + ".git": true, + ".gitconfig": true, + ".hg": true, + ".travis.yml": true, + } +) + +const ( + newMain = `package main +import "google.golang.org/appengine" +func main() { + appengine.Main() +} +` +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, "\t%s -o \tBundle app to named tar file or stdout\n", os.Args[0]) + fmt.Fprintf(os.Stderr, "\noptional arguments:\n") + flag.PrintDefaults() +} + +func main() { + flag.Usage = usage + flag.Parse() + + var tags []string + if *vm { + tags = append(tags, "appenginevm") + } else { + tags = append(tags, "appengine") + } + + tarFile := *output + if tarFile == "" { + usage() + errorf("Required -o flag not specified.") + } + + app, err := analyze(tags) + if err != nil { + errorf("Error analyzing app: %v", err) + } + if err := app.bundle(tarFile); err != nil { + errorf("Unable to bundle app: %v", err) + } +} + +// errorf prints the error message and exits. +func errorf(format string, a ...interface{}) { + fmt.Fprintf(os.Stderr, "aebundler: "+format+"\n", a...) + os.Exit(1) +} + +type app struct { + hasMain bool + appFiles []string + imports map[string]string +} + +// analyze checks the app for building with the given build tags and returns hasMain, +// app files, and a map of full directory import names to original import names. +func analyze(tags []string) (*app, error) { + ctxt := buildContext(tags) + hasMain, appFiles, err := checkMain(ctxt) + if err != nil { + return nil, err + } + gopath := filepath.SplitList(ctxt.GOPATH) + im, err := imports(ctxt, *rootDir, gopath) + return &app{ + hasMain: hasMain, + appFiles: appFiles, + imports: im, + }, err +} + +// buildContext returns the context for building the source. +func buildContext(tags []string) *build.Context { + return &build.Context{ + GOARCH: build.Default.GOARCH, + GOOS: build.Default.GOOS, + GOROOT: build.Default.GOROOT, + GOPATH: build.Default.GOPATH, + Compiler: build.Default.Compiler, + BuildTags: append(build.Default.BuildTags, tags...), + } +} + +// bundle bundles the app into the named tarFile ("-"==stdout). +func (s *app) bundle(tarFile string) (err error) { + var out io.Writer + if tarFile == "-" { + out = os.Stdout + } else { + f, err := os.Create(tarFile) + if err != nil { + return err + } + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() + out = f + } + tw := tar.NewWriter(out) + + for srcDir, importName := range s.imports { + dstDir := "_gopath/src/" + importName + if err = copyTree(tw, dstDir, srcDir); err != nil { + return fmt.Errorf("unable to copy directory %v to %v: %v", srcDir, dstDir, err) + } + } + if err := copyTree(tw, ".", *rootDir); err != nil { + return fmt.Errorf("unable to copy root directory to /app: %v", err) + } + if !s.hasMain { + if err := synthesizeMain(tw, s.appFiles); err != nil { + return fmt.Errorf("unable to synthesize new main func: %v", err) + } + } + + if err := tw.Close(); err != nil { + return fmt.Errorf("unable to close tar file %v: %v", tarFile, err) + } + return nil +} + +// synthesizeMain generates a new main func and writes it to the tarball. +func synthesizeMain(tw *tar.Writer, appFiles []string) error { + appMap := make(map[string]bool) + for _, f := range appFiles { + appMap[f] = true + } + var f string + for i := 0; i < 100; i++ { + f = fmt.Sprintf("app_main%d.go", i) + if !appMap[filepath.Join(*rootDir, f)] { + break + } + } + if appMap[filepath.Join(*rootDir, f)] { + return fmt.Errorf("unable to find unique name for %v", f) + } + hdr := &tar.Header{ + Name: f, + Mode: 0644, + Size: int64(len(newMain)), + } + if err := tw.WriteHeader(hdr); err != nil { + return fmt.Errorf("unable to write header for %v: %v", f, err) + } + if _, err := tw.Write([]byte(newMain)); err != nil { + return fmt.Errorf("unable to write %v to tar file: %v", f, err) + } + return nil +} + +// imports returns a map of all import directories (recursively) used by the app. +// The return value maps full directory names to original import names. +func imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) { + pkg, err := ctxt.ImportDir(srcDir, 0) + if err != nil { + return nil, fmt.Errorf("unable to analyze source: %v", err) + } + + // Resolve all non-standard-library imports + result := make(map[string]string) + for _, v := range pkg.Imports { + if !strings.Contains(v, ".") { + continue + } + src, err := findInGopath(v, gopath) + if err != nil { + return nil, fmt.Errorf("unable to find import %v in gopath %v: %v", v, gopath, err) + } + result[src] = v + im, err := imports(ctxt, src, gopath) + if err != nil { + return nil, fmt.Errorf("unable to parse package %v: %v", src, err) + } + for k, v := range im { + result[k] = v + } + } + return result, nil +} + +// findInGopath searches the gopath for the named import directory. +func findInGopath(dir string, gopath []string) (string, error) { + for _, v := range gopath { + dst := filepath.Join(v, "src", dir) + if _, err := os.Stat(dst); err == nil { + return dst, nil + } + } + return "", fmt.Errorf("unable to find package %v in gopath %v", dir, gopath) +} + +// copyTree copies srcDir to tar file dstDir, ignoring skipFiles. +func copyTree(tw *tar.Writer, dstDir, srcDir string) error { + entries, err := ioutil.ReadDir(srcDir) + if err != nil { + return fmt.Errorf("unable to read dir %v: %v", srcDir, err) + } + for _, entry := range entries { + n := entry.Name() + if skipFiles[n] { + continue + } + s := filepath.Join(srcDir, n) + d := filepath.Join(dstDir, n) + if entry.IsDir() { + if err := copyTree(tw, d, s); err != nil { + return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err) + } + continue + } + if err := copyFile(tw, d, s); err != nil { + return fmt.Errorf("unable to copy dir %v to %v: %v", s, d, err) + } + } + return nil +} + +// copyFile copies src to tar file dst. +func copyFile(tw *tar.Writer, dst, src string) error { + s, err := os.Open(src) + if err != nil { + return fmt.Errorf("unable to open %v: %v", src, err) + } + defer s.Close() + fi, err := s.Stat() + if err != nil { + return fmt.Errorf("unable to stat %v: %v", src, err) + } + + hdr, err := tar.FileInfoHeader(fi, dst) + if err != nil { + return fmt.Errorf("unable to create tar header for %v: %v", dst, err) + } + hdr.Name = dst + if err := tw.WriteHeader(hdr); err != nil { + return fmt.Errorf("unable to write header for %v: %v", dst, err) + } + _, err = io.Copy(tw, s) + if err != nil { + return fmt.Errorf("unable to copy %v to %v: %v", src, dst, err) + } + return nil +} + +// checkMain verifies that there is a single "main" function. +// It also returns a list of all Go source files in the app. +func checkMain(ctxt *build.Context) (bool, []string, error) { + pkg, err := ctxt.ImportDir(*rootDir, 0) + if err != nil { + return false, nil, fmt.Errorf("unable to analyze source: %v", err) + } + if !pkg.IsCommand() { + errorf("Your app's package needs to be changed from %q to \"main\".\n", pkg.Name) + } + // Search for a "func main" + var hasMain bool + var appFiles []string + for _, f := range pkg.GoFiles { + n := filepath.Join(*rootDir, f) + appFiles = append(appFiles, n) + if hasMain, err = readFile(n); err != nil { + return false, nil, fmt.Errorf("error parsing %q: %v", n, err) + } + } + return hasMain, appFiles, nil +} + +// isMain returns whether the given function declaration is a main function. +// Such a function must be called "main", not have a receiver, and have no arguments or return types. +func isMain(f *ast.FuncDecl) bool { + ft := f.Type + return f.Name.Name == "main" && f.Recv == nil && ft.Params.NumFields() == 0 && ft.Results.NumFields() == 0 +} + +// readFile reads and parses the Go source code file and returns whether it has a main function. +func readFile(filename string) (hasMain bool, err error) { + var src []byte + src, err = ioutil.ReadFile(filename) + if err != nil { + return + } + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, filename, src, 0) + for _, decl := range file.Decls { + funcDecl, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + if !isMain(funcDecl) { + continue + } + hasMain = true + break + } + return +} diff --git a/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go b/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go new file mode 100644 index 0000000..155fc1c --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go @@ -0,0 +1,268 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Program aedeploy assists with deploying App Engine "flexible environment" Go apps to production. +// A temporary directory is created; the app, its subdirectories, and all its +// dependencies from $GOPATH are copied into the directory; then the app +// is deployed to production with the provided command. +// +// The app must be in "package main". +// +// This command must be issued from within the root directory of the app +// (where the app.yaml file is located). +package main + +import ( + "flag" + "fmt" + "go/build" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" +) + +var ( + skipFiles = map[string]bool{ + ".git": true, + ".gitconfig": true, + ".hg": true, + ".travis.yml": true, + } + + gopathCache = map[string]string{} +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, "\t%s gcloud --verbosity debug preview app deploy --version myversion ./app.yaml\tDeploy app to production\n", os.Args[0]) +} + +func main() { + flag.Usage = usage + flag.Parse() + if flag.NArg() < 1 { + usage() + os.Exit(1) + } + + if err := aedeploy(); err != nil { + fmt.Fprintf(os.Stderr, os.Args[0]+": Error: %v\n", err) + os.Exit(1) + } +} + +func aedeploy() error { + tags := []string{"appenginevm"} + app, err := analyze(tags) + if err != nil { + return err + } + + tmpDir, err := app.bundle() + if tmpDir != "" { + defer os.RemoveAll(tmpDir) + } + if err != nil { + return err + } + + if err := os.Chdir(tmpDir); err != nil { + return fmt.Errorf("unable to chdir to %v: %v", tmpDir, err) + } + return deploy() +} + +// deploy calls the provided command to deploy the app from the temporary directory. +func deploy() error { + cmd := exec.Command(flag.Arg(0), flag.Args()[1:]...) + cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("unable to run %q: %v", strings.Join(flag.Args(), " "), err) + } + return nil +} + +type app struct { + appFiles []string + imports map[string]string +} + +// analyze checks the app for building with the given build tags and returns +// app files, and a map of full directory import names to original import names. +func analyze(tags []string) (*app, error) { + ctxt := buildContext(tags) + appFiles, err := appFiles(ctxt) + if err != nil { + return nil, err + } + gopath := filepath.SplitList(ctxt.GOPATH) + im, err := imports(ctxt, ".", gopath) + return &app{ + appFiles: appFiles, + imports: im, + }, err +} + +// buildContext returns the context for building the source. +func buildContext(tags []string) *build.Context { + return &build.Context{ + GOARCH: "amd64", + GOOS: "linux", + GOROOT: build.Default.GOROOT, + GOPATH: build.Default.GOPATH, + Compiler: build.Default.Compiler, + BuildTags: append(defaultBuildTags, tags...), + } +} + +// All build tags except go1.7, since Go 1.6 is the runtime version. +var defaultBuildTags = []string{ + "go1.1", "go1.2", "go1.3", "go1.4", "go1.5", "go1.6"} + +// bundle bundles the app into a temporary directory. +func (s *app) bundle() (tmpdir string, err error) { + workDir, err := ioutil.TempDir("", "aedeploy") + if err != nil { + return "", fmt.Errorf("unable to create tmpdir: %v", err) + } + + for srcDir, importName := range s.imports { + dstDir := "_gopath/src/" + importName + if err := copyTree(workDir, dstDir, srcDir); err != nil { + return workDir, fmt.Errorf("unable to copy directory %v to %v: %v", srcDir, dstDir, err) + } + } + if err := copyTree(workDir, ".", "."); err != nil { + return workDir, fmt.Errorf("unable to copy root directory to /app: %v", err) + } + return workDir, nil +} + +// imports returns a map of all import directories (recursively) used by the app. +// The return value maps full directory names to original import names. +func imports(ctxt *build.Context, srcDir string, gopath []string) (map[string]string, error) { + pkg, err := ctxt.ImportDir(srcDir, 0) + if err != nil { + return nil, err + } + + // Resolve all non-standard-library imports + result := make(map[string]string) + for _, v := range pkg.Imports { + if !strings.Contains(v, ".") { + continue + } + src, err := findInGopath(v, gopath) + if err != nil { + return nil, fmt.Errorf("unable to find import %v in gopath %v: %v", v, gopath, err) + } + if _, ok := result[src]; ok { // Already processed + continue + } + result[src] = v + im, err := imports(ctxt, src, gopath) + if err != nil { + return nil, fmt.Errorf("unable to parse package %v: %v", src, err) + } + for k, v := range im { + result[k] = v + } + } + return result, nil +} + +// findInGopath searches the gopath for the named import directory. +func findInGopath(dir string, gopath []string) (string, error) { + if v, ok := gopathCache[dir]; ok { + return v, nil + } + for _, v := range gopath { + dst := filepath.Join(v, "src", dir) + if _, err := os.Stat(dst); err == nil { + gopathCache[dir] = dst + return dst, nil + } + } + return "", fmt.Errorf("unable to find package %v in gopath %v", dir, gopath) +} + +// copyTree copies srcDir to dstDir relative to dstRoot, ignoring skipFiles. +func copyTree(dstRoot, dstDir, srcDir string) error { + d := filepath.Join(dstRoot, dstDir) + if err := os.MkdirAll(d, 0755); err != nil { + return fmt.Errorf("unable to create directory %q: %v", d, err) + } + + entries, err := ioutil.ReadDir(srcDir) + if err != nil { + return fmt.Errorf("unable to read dir %q: %v", srcDir, err) + } + for _, entry := range entries { + n := entry.Name() + if skipFiles[n] { + continue + } + s := filepath.Join(srcDir, n) + if entry.Mode()&os.ModeSymlink == os.ModeSymlink { + if entry, err = os.Stat(s); err != nil { + return fmt.Errorf("unable to stat %v: %v", s, err) + } + } + d := filepath.Join(dstDir, n) + if entry.IsDir() { + if err := copyTree(dstRoot, d, s); err != nil { + return fmt.Errorf("unable to copy dir %q to %q: %v", s, d, err) + } + continue + } + if err := copyFile(dstRoot, d, s); err != nil { + return fmt.Errorf("unable to copy dir %q to %q: %v", s, d, err) + } + } + return nil +} + +// copyFile copies src to dst relative to dstRoot. +func copyFile(dstRoot, dst, src string) error { + s, err := os.Open(src) + if err != nil { + return fmt.Errorf("unable to open %q: %v", src, err) + } + defer s.Close() + + dst = filepath.Join(dstRoot, dst) + d, err := os.Create(dst) + if err != nil { + return fmt.Errorf("unable to create %q: %v", dst, err) + } + _, err = io.Copy(d, s) + if err != nil { + d.Close() // ignore error, copy already failed. + return fmt.Errorf("unable to copy %q to %q: %v", src, dst, err) + } + if err := d.Close(); err != nil { + return fmt.Errorf("unable to close %q: %v", dst, err) + } + return nil +} + +// appFiles returns a list of all Go source files in the app. +func appFiles(ctxt *build.Context) ([]string, error) { + pkg, err := ctxt.ImportDir(".", 0) + if err != nil { + return nil, err + } + if !pkg.IsCommand() { + return nil, fmt.Errorf(`the root of your app needs to be package "main" (currently %q). Please see https://cloud.google.com/appengine/docs/flexible/go/ for more details on structuring your app.`, pkg.Name) + } + var appFiles []string + for _, f := range pkg.GoFiles { + n := filepath.Join(".", f) + appFiles = append(appFiles, n) + } + return appFiles, nil +} diff --git a/vendor/google.golang.org/appengine/cmd/aefix/ae.go b/vendor/google.golang.org/appengine/cmd/aefix/ae.go new file mode 100644 index 0000000..0fe2d4a --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aefix/ae.go @@ -0,0 +1,185 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" + "path" + "strconv" + "strings" +) + +const ( + ctxPackage = "golang.org/x/net/context" + + newPackageBase = "google.golang.org/" + stutterPackage = false +) + +func init() { + register(fix{ + "ae", + "2016-04-15", + aeFn, + `Update old App Engine APIs to new App Engine APIs`, + }) +} + +// logMethod is the set of methods on appengine.Context used for logging. +var logMethod = map[string]bool{ + "Debugf": true, + "Infof": true, + "Warningf": true, + "Errorf": true, + "Criticalf": true, +} + +// mapPackage turns "appengine" into "google.golang.org/appengine", etc. +func mapPackage(s string) string { + if stutterPackage { + s += "/" + path.Base(s) + } + return newPackageBase + s +} + +func aeFn(f *ast.File) bool { + // During the walk, we track the last thing seen that looks like + // an appengine.Context, and reset it once the walk leaves a func. + var lastContext *ast.Ident + + fixed := false + + // Update imports. + mainImp := "appengine" + for _, imp := range f.Imports { + pth, _ := strconv.Unquote(imp.Path.Value) + if pth == "appengine" || strings.HasPrefix(pth, "appengine/") { + newPth := mapPackage(pth) + imp.Path.Value = strconv.Quote(newPth) + fixed = true + + if pth == "appengine" { + mainImp = newPth + } + } + } + + // Update any API changes. + walk(f, func(n interface{}) { + if ft, ok := n.(*ast.FuncType); ok && ft.Params != nil { + // See if this func has an `appengine.Context arg`. + // If so, remember its identifier. + for _, param := range ft.Params.List { + if !isPkgDot(param.Type, "appengine", "Context") { + continue + } + if len(param.Names) == 1 { + lastContext = param.Names[0] + break + } + } + return + } + + if as, ok := n.(*ast.AssignStmt); ok { + if len(as.Lhs) == 1 && len(as.Rhs) == 1 { + // If this node is an assignment from an appengine.NewContext invocation, + // remember the identifier on the LHS. + if isCall(as.Rhs[0], "appengine", "NewContext") { + if ident, ok := as.Lhs[0].(*ast.Ident); ok { + lastContext = ident + return + } + } + // x (=|:=) appengine.Timeout(y, z) + // should become + // x, _ (=|:=) context.WithTimeout(y, z) + if isCall(as.Rhs[0], "appengine", "Timeout") { + addImport(f, ctxPackage) + as.Lhs = append(as.Lhs, ast.NewIdent("_")) + // isCall already did the type checking. + sel := as.Rhs[0].(*ast.CallExpr).Fun.(*ast.SelectorExpr) + sel.X = ast.NewIdent("context") + sel.Sel = ast.NewIdent("WithTimeout") + fixed = true + return + } + } + return + } + + // If this node is a FuncDecl, we've finished the function, so reset lastContext. + if _, ok := n.(*ast.FuncDecl); ok { + lastContext = nil + return + } + + if call, ok := n.(*ast.CallExpr); ok { + if isPkgDot(call.Fun, "appengine", "Datacenter") && len(call.Args) == 0 { + insertContext(f, call, lastContext) + fixed = true + return + } + if isPkgDot(call.Fun, "taskqueue", "QueueStats") && len(call.Args) == 3 { + call.Args = call.Args[:2] // drop last arg + fixed = true + return + } + + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return + } + if lastContext != nil && refersTo(sel.X, lastContext) && logMethod[sel.Sel.Name] { + // c.Errorf(...) + // should become + // log.Errorf(c, ...) + addImport(f, mapPackage("appengine/log")) + sel.X = &ast.Ident{ // ast.NewIdent doesn't preserve the position. + NamePos: sel.X.Pos(), + Name: "log", + } + insertContext(f, call, lastContext) + fixed = true + return + } + } + }) + + // Change any `appengine.Context` to `context.Context`. + // Do this in a separate walk because the previous walk + // wants to identify "appengine.Context". + walk(f, func(n interface{}) { + expr, ok := n.(ast.Expr) + if ok && isPkgDot(expr, "appengine", "Context") { + addImport(f, ctxPackage) + // isPkgDot did the type checking. + n.(*ast.SelectorExpr).X.(*ast.Ident).Name = "context" + fixed = true + return + } + }) + + // The changes above might remove the need to import "appengine". + // Check if it's used, and drop it if it isn't. + if fixed && !usesImport(f, mainImp) { + deleteImport(f, mainImp) + } + + return fixed +} + +// ctx may be nil. +func insertContext(f *ast.File, call *ast.CallExpr, ctx *ast.Ident) { + if ctx == nil { + // context is unknown, so use a plain "ctx". + ctx = ast.NewIdent("ctx") + } else { + // Create a fresh *ast.Ident so we drop the position information. + ctx = ast.NewIdent(ctx.Name) + } + + call.Args = append([]ast.Expr{ctx}, call.Args...) +} diff --git a/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go b/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go new file mode 100644 index 0000000..21f5695 --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aefix/ae_test.go @@ -0,0 +1,144 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package main + +func init() { + addTestCases(aeTests, nil) +} + +var aeTests = []testCase{ + // Collection of fixes: + // - imports + // - appengine.Timeout -> context.WithTimeout + // - add ctx arg to appengine.Datacenter + // - logging API + { + Name: "ae.0", + In: `package foo + +import ( + "net/http" + "time" + + "appengine" + "appengine/datastore" +) + +func f(w http.ResponseWriter, r *http.Request) { + c := appengine.NewContext(r) + + c = appengine.Timeout(c, 5*time.Second) + err := datastore.ErrNoSuchEntity + c.Errorf("Something interesting happened: %v", err) + _ = appengine.Datacenter() +} +`, + Out: `package foo + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + "google.golang.org/appengine" + "google.golang.org/appengine/datastore" + "google.golang.org/appengine/log" +) + +func f(w http.ResponseWriter, r *http.Request) { + c := appengine.NewContext(r) + + c, _ = context.WithTimeout(c, 5*time.Second) + err := datastore.ErrNoSuchEntity + log.Errorf(c, "Something interesting happened: %v", err) + _ = appengine.Datacenter(c) +} +`, + }, + + // Updating a function that takes an appengine.Context arg. + { + Name: "ae.1", + In: `package foo + +import ( + "appengine" +) + +func LogSomething(c2 appengine.Context) { + c2.Warningf("Stand back! I'm going to try science!") +} +`, + Out: `package foo + +import ( + "golang.org/x/net/context" + "google.golang.org/appengine/log" +) + +func LogSomething(c2 context.Context) { + log.Warningf(c2, "Stand back! I'm going to try science!") +} +`, + }, + + // Less widely used API changes: + // - drop maxTasks arg to taskqueue.QueueStats + { + Name: "ae.2", + In: `package foo + +import ( + "appengine" + "appengine/taskqueue" +) + +func f(ctx appengine.Context) { + stats, err := taskqueue.QueueStats(ctx, []string{"one", "two"}, 0) +} +`, + Out: `package foo + +import ( + "golang.org/x/net/context" + "google.golang.org/appengine/taskqueue" +) + +func f(ctx context.Context) { + stats, err := taskqueue.QueueStats(ctx, []string{"one", "two"}) +} +`, + }, + + // Check that the main "appengine" import will not be dropped + // if an appengine.Context -> context.Context change happens + // but the appengine package is still referenced. + { + Name: "ae.3", + In: `package foo + +import ( + "appengine" + "io" +) + +func f(ctx appengine.Context, w io.Writer) { + _ = appengine.IsDevAppServer() +} +`, + Out: `package foo + +import ( + "golang.org/x/net/context" + "google.golang.org/appengine" + "io" +) + +func f(ctx context.Context, w io.Writer) { + _ = appengine.IsDevAppServer() +} +`, + }, +} diff --git a/vendor/google.golang.org/appengine/cmd/aefix/fix.go b/vendor/google.golang.org/appengine/cmd/aefix/fix.go new file mode 100644 index 0000000..a100be7 --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aefix/fix.go @@ -0,0 +1,848 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "path" + "reflect" + "strconv" + "strings" +) + +type fix struct { + name string + date string // date that fix was introduced, in YYYY-MM-DD format + f func(*ast.File) bool + desc string +} + +// main runs sort.Sort(byName(fixes)) before printing list of fixes. +type byName []fix + +func (f byName) Len() int { return len(f) } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (f byName) Less(i, j int) bool { return f[i].name < f[j].name } + +// main runs sort.Sort(byDate(fixes)) before applying fixes. +type byDate []fix + +func (f byDate) Len() int { return len(f) } +func (f byDate) Swap(i, j int) { f[i], f[j] = f[j], f[i] } +func (f byDate) Less(i, j int) bool { return f[i].date < f[j].date } + +var fixes []fix + +func register(f fix) { + fixes = append(fixes, f) +} + +// walk traverses the AST x, calling visit(y) for each node y in the tree but +// also with a pointer to each ast.Expr, ast.Stmt, and *ast.BlockStmt, +// in a bottom-up traversal. +func walk(x interface{}, visit func(interface{})) { + walkBeforeAfter(x, nop, visit) +} + +func nop(interface{}) {} + +// walkBeforeAfter is like walk but calls before(x) before traversing +// x's children and after(x) afterward. +func walkBeforeAfter(x interface{}, before, after func(interface{})) { + before(x) + + switch n := x.(type) { + default: + panic(fmt.Errorf("unexpected type %T in walkBeforeAfter", x)) + + case nil: + + // pointers to interfaces + case *ast.Decl: + walkBeforeAfter(*n, before, after) + case *ast.Expr: + walkBeforeAfter(*n, before, after) + case *ast.Spec: + walkBeforeAfter(*n, before, after) + case *ast.Stmt: + walkBeforeAfter(*n, before, after) + + // pointers to struct pointers + case **ast.BlockStmt: + walkBeforeAfter(*n, before, after) + case **ast.CallExpr: + walkBeforeAfter(*n, before, after) + case **ast.FieldList: + walkBeforeAfter(*n, before, after) + case **ast.FuncType: + walkBeforeAfter(*n, before, after) + case **ast.Ident: + walkBeforeAfter(*n, before, after) + case **ast.BasicLit: + walkBeforeAfter(*n, before, after) + + // pointers to slices + case *[]ast.Decl: + walkBeforeAfter(*n, before, after) + case *[]ast.Expr: + walkBeforeAfter(*n, before, after) + case *[]*ast.File: + walkBeforeAfter(*n, before, after) + case *[]*ast.Ident: + walkBeforeAfter(*n, before, after) + case *[]ast.Spec: + walkBeforeAfter(*n, before, after) + case *[]ast.Stmt: + walkBeforeAfter(*n, before, after) + + // These are ordered and grouped to match ../../pkg/go/ast/ast.go + case *ast.Field: + walkBeforeAfter(&n.Names, before, after) + walkBeforeAfter(&n.Type, before, after) + walkBeforeAfter(&n.Tag, before, after) + case *ast.FieldList: + for _, field := range n.List { + walkBeforeAfter(field, before, after) + } + case *ast.BadExpr: + case *ast.Ident: + case *ast.Ellipsis: + walkBeforeAfter(&n.Elt, before, after) + case *ast.BasicLit: + case *ast.FuncLit: + walkBeforeAfter(&n.Type, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.CompositeLit: + walkBeforeAfter(&n.Type, before, after) + walkBeforeAfter(&n.Elts, before, after) + case *ast.ParenExpr: + walkBeforeAfter(&n.X, before, after) + case *ast.SelectorExpr: + walkBeforeAfter(&n.X, before, after) + case *ast.IndexExpr: + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Index, before, after) + case *ast.SliceExpr: + walkBeforeAfter(&n.X, before, after) + if n.Low != nil { + walkBeforeAfter(&n.Low, before, after) + } + if n.High != nil { + walkBeforeAfter(&n.High, before, after) + } + case *ast.TypeAssertExpr: + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Type, before, after) + case *ast.CallExpr: + walkBeforeAfter(&n.Fun, before, after) + walkBeforeAfter(&n.Args, before, after) + case *ast.StarExpr: + walkBeforeAfter(&n.X, before, after) + case *ast.UnaryExpr: + walkBeforeAfter(&n.X, before, after) + case *ast.BinaryExpr: + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Y, before, after) + case *ast.KeyValueExpr: + walkBeforeAfter(&n.Key, before, after) + walkBeforeAfter(&n.Value, before, after) + + case *ast.ArrayType: + walkBeforeAfter(&n.Len, before, after) + walkBeforeAfter(&n.Elt, before, after) + case *ast.StructType: + walkBeforeAfter(&n.Fields, before, after) + case *ast.FuncType: + walkBeforeAfter(&n.Params, before, after) + if n.Results != nil { + walkBeforeAfter(&n.Results, before, after) + } + case *ast.InterfaceType: + walkBeforeAfter(&n.Methods, before, after) + case *ast.MapType: + walkBeforeAfter(&n.Key, before, after) + walkBeforeAfter(&n.Value, before, after) + case *ast.ChanType: + walkBeforeAfter(&n.Value, before, after) + + case *ast.BadStmt: + case *ast.DeclStmt: + walkBeforeAfter(&n.Decl, before, after) + case *ast.EmptyStmt: + case *ast.LabeledStmt: + walkBeforeAfter(&n.Stmt, before, after) + case *ast.ExprStmt: + walkBeforeAfter(&n.X, before, after) + case *ast.SendStmt: + walkBeforeAfter(&n.Chan, before, after) + walkBeforeAfter(&n.Value, before, after) + case *ast.IncDecStmt: + walkBeforeAfter(&n.X, before, after) + case *ast.AssignStmt: + walkBeforeAfter(&n.Lhs, before, after) + walkBeforeAfter(&n.Rhs, before, after) + case *ast.GoStmt: + walkBeforeAfter(&n.Call, before, after) + case *ast.DeferStmt: + walkBeforeAfter(&n.Call, before, after) + case *ast.ReturnStmt: + walkBeforeAfter(&n.Results, before, after) + case *ast.BranchStmt: + case *ast.BlockStmt: + walkBeforeAfter(&n.List, before, after) + case *ast.IfStmt: + walkBeforeAfter(&n.Init, before, after) + walkBeforeAfter(&n.Cond, before, after) + walkBeforeAfter(&n.Body, before, after) + walkBeforeAfter(&n.Else, before, after) + case *ast.CaseClause: + walkBeforeAfter(&n.List, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.SwitchStmt: + walkBeforeAfter(&n.Init, before, after) + walkBeforeAfter(&n.Tag, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.TypeSwitchStmt: + walkBeforeAfter(&n.Init, before, after) + walkBeforeAfter(&n.Assign, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.CommClause: + walkBeforeAfter(&n.Comm, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.SelectStmt: + walkBeforeAfter(&n.Body, before, after) + case *ast.ForStmt: + walkBeforeAfter(&n.Init, before, after) + walkBeforeAfter(&n.Cond, before, after) + walkBeforeAfter(&n.Post, before, after) + walkBeforeAfter(&n.Body, before, after) + case *ast.RangeStmt: + walkBeforeAfter(&n.Key, before, after) + walkBeforeAfter(&n.Value, before, after) + walkBeforeAfter(&n.X, before, after) + walkBeforeAfter(&n.Body, before, after) + + case *ast.ImportSpec: + case *ast.ValueSpec: + walkBeforeAfter(&n.Type, before, after) + walkBeforeAfter(&n.Values, before, after) + walkBeforeAfter(&n.Names, before, after) + case *ast.TypeSpec: + walkBeforeAfter(&n.Type, before, after) + + case *ast.BadDecl: + case *ast.GenDecl: + walkBeforeAfter(&n.Specs, before, after) + case *ast.FuncDecl: + if n.Recv != nil { + walkBeforeAfter(&n.Recv, before, after) + } + walkBeforeAfter(&n.Type, before, after) + if n.Body != nil { + walkBeforeAfter(&n.Body, before, after) + } + + case *ast.File: + walkBeforeAfter(&n.Decls, before, after) + + case *ast.Package: + walkBeforeAfter(&n.Files, before, after) + + case []*ast.File: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []ast.Decl: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []ast.Expr: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []*ast.Ident: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []ast.Stmt: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + case []ast.Spec: + for i := range n { + walkBeforeAfter(&n[i], before, after) + } + } + after(x) +} + +// imports returns true if f imports path. +func imports(f *ast.File, path string) bool { + return importSpec(f, path) != nil +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err == nil { + return t + } + return "" +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// isPkgDot returns true if t is the expression "pkg.name" +// where pkg is an imported identifier. +func isPkgDot(t ast.Expr, pkg, name string) bool { + sel, ok := t.(*ast.SelectorExpr) + return ok && isTopName(sel.X, pkg) && sel.Sel.String() == name +} + +// isPtrPkgDot returns true if f is the expression "*pkg.name" +// where pkg is an imported identifier. +func isPtrPkgDot(t ast.Expr, pkg, name string) bool { + ptr, ok := t.(*ast.StarExpr) + return ok && isPkgDot(ptr.X, pkg, name) +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// isName returns true if n is an identifier with the given name. +func isName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.String() == name +} + +// isCall returns true if t is a call to pkg.name. +func isCall(t ast.Expr, pkg, name string) bool { + call, ok := t.(*ast.CallExpr) + return ok && isPkgDot(call.Fun, pkg, name) +} + +// If n is an *ast.Ident, isIdent returns it; otherwise isIdent returns nil. +func isIdent(n interface{}) *ast.Ident { + id, _ := n.(*ast.Ident) + return id +} + +// refersTo returns true if n is a reference to the same object as x. +func refersTo(n ast.Node, x *ast.Ident) bool { + id, ok := n.(*ast.Ident) + // The test of id.Name == x.Name handles top-level unresolved + // identifiers, which all have Obj == nil. + return ok && id.Obj == x.Obj && id.Name == x.Name +} + +// isBlank returns true if n is the blank identifier. +func isBlank(n ast.Expr) bool { + return isName(n, "_") +} + +// isEmptyString returns true if n is an empty string literal. +func isEmptyString(n ast.Expr) bool { + lit, ok := n.(*ast.BasicLit) + return ok && lit.Kind == token.STRING && len(lit.Value) == 2 +} + +func warn(pos token.Pos, msg string, args ...interface{}) { + if pos.IsValid() { + msg = "%s: " + msg + arg1 := []interface{}{fset.Position(pos).String()} + args = append(arg1, args...) + } + fmt.Fprintf(os.Stderr, msg+"\n", args...) +} + +// countUses returns the number of uses of the identifier x in scope. +func countUses(x *ast.Ident, scope []ast.Stmt) int { + count := 0 + ff := func(n interface{}) { + if n, ok := n.(ast.Node); ok && refersTo(n, x) { + count++ + } + } + for _, n := range scope { + walk(n, ff) + } + return count +} + +// rewriteUses replaces all uses of the identifier x and !x in scope +// with f(x.Pos()) and fnot(x.Pos()). +func rewriteUses(x *ast.Ident, f, fnot func(token.Pos) ast.Expr, scope []ast.Stmt) { + var lastF ast.Expr + ff := func(n interface{}) { + ptr, ok := n.(*ast.Expr) + if !ok { + return + } + nn := *ptr + + // The child node was just walked and possibly replaced. + // If it was replaced and this is a negation, replace with fnot(p). + not, ok := nn.(*ast.UnaryExpr) + if ok && not.Op == token.NOT && not.X == lastF { + *ptr = fnot(nn.Pos()) + return + } + if refersTo(nn, x) { + lastF = f(nn.Pos()) + *ptr = lastF + } + } + for _, n := range scope { + walk(n, ff) + } +} + +// assignsTo returns true if any of the code in scope assigns to or takes the address of x. +func assignsTo(x *ast.Ident, scope []ast.Stmt) bool { + assigned := false + ff := func(n interface{}) { + if assigned { + return + } + switch n := n.(type) { + case *ast.UnaryExpr: + // use of &x + if n.Op == token.AND && refersTo(n.X, x) { + assigned = true + return + } + case *ast.AssignStmt: + for _, l := range n.Lhs { + if refersTo(l, x) { + assigned = true + return + } + } + } + } + for _, n := range scope { + if assigned { + break + } + walk(n, ff) + } + return assigned +} + +// newPkgDot returns an ast.Expr referring to "pkg.name" at position pos. +func newPkgDot(pos token.Pos, pkg, name string) ast.Expr { + return &ast.SelectorExpr{ + X: &ast.Ident{ + NamePos: pos, + Name: pkg, + }, + Sel: &ast.Ident{ + NamePos: pos, + Name: name, + }, + } +} + +// renameTop renames all references to the top-level name old. +// It returns true if it makes any changes. +func renameTop(f *ast.File, old, new string) bool { + var fixed bool + + // Rename any conflicting imports + // (assuming package name is last element of path). + for _, s := range f.Imports { + if s.Name != nil { + if s.Name.Name == old { + s.Name.Name = new + fixed = true + } + } else { + _, thisName := path.Split(importPath(s)) + if thisName == old { + s.Name = ast.NewIdent(new) + fixed = true + } + } + } + + // Rename any top-level declarations. + for _, d := range f.Decls { + switch d := d.(type) { + case *ast.FuncDecl: + if d.Recv == nil && d.Name.Name == old { + d.Name.Name = new + d.Name.Obj.Name = new + fixed = true + } + case *ast.GenDecl: + for _, s := range d.Specs { + switch s := s.(type) { + case *ast.TypeSpec: + if s.Name.Name == old { + s.Name.Name = new + s.Name.Obj.Name = new + fixed = true + } + case *ast.ValueSpec: + for _, n := range s.Names { + if n.Name == old { + n.Name = new + n.Obj.Name = new + fixed = true + } + } + } + } + } + } + + // Rename top-level old to new, both unresolved names + // (probably defined in another file) and names that resolve + // to a declaration we renamed. + walk(f, func(n interface{}) { + id, ok := n.(*ast.Ident) + if ok && isTopName(id, old) { + id.Name = new + fixed = true + } + if ok && id.Obj != nil && id.Name == old && id.Obj.Name == new { + id.Name = id.Obj.Name + fixed = true + } + }) + + return fixed +} + +// matchLen returns the length of the longest prefix shared by x and y. +func matchLen(x, y string) int { + i := 0 + for i < len(x) && i < len(y) && x[i] == y[i] { + i++ + } + return i +} + +// addImport adds the import path to the file f, if absent. +func addImport(f *ast.File, ipath string) (added bool) { + if imports(f, ipath) { + return false + } + + // Determine name of import. + // Assume added imports follow convention of using last element. + _, name := path.Split(ipath) + + // Rename any conflicting top-level references from name to name_. + renameTop(f, name, name+"_") + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(ipath), + }, + } + + // Find an import decl to add to. + var ( + bestMatch = -1 + lastImport = -1 + impDecl *ast.GenDecl + impIndex = -1 + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Compute longest shared prefix with imports in this block. + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + n := matchLen(importPath(impspec), ipath) + if n > bestMatch { + bestMatch = n + impDecl = gen + impIndex = j + } + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Ensure the import decl has parentheses, if needed. + if len(impDecl.Specs) > 0 && !impDecl.Lparen.IsValid() { + impDecl.Lparen = impDecl.Pos() + } + + insertAt := impIndex + 1 + if insertAt == 0 { + insertAt = len(impDecl.Specs) + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + if insertAt > 0 { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + prev := impDecl.Specs[insertAt-1] + newImport.Path.ValuePos = prev.Pos() + newImport.EndPos = prev.Pos() + } + + f.Imports = append(f.Imports, newImport) + return true +} + +// deleteImport deletes the import path from the file f, if present. +func deleteImport(f *ast.File, path string) (deleted bool) { + oldImport := importSpec(f, path) + + // Find the import node that imports path, if any. + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if oldImport != impspec { + continue + } + + // We found an import spec that imports path. + // Delete it. + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + } else if len(gen.Specs) == 1 { + gen.Lparen = token.NoPos // drop parens + } + if j > 0 { + // We deleted an entry but now there will be + // a blank line-sized hole where the import was. + // Close the hole by making the previous + // import appear to "end" where this one did. + gen.Specs[j-1].(*ast.ImportSpec).EndPos = impspec.End() + } + break + } + } + + // Delete it from f.Imports. + for i, imp := range f.Imports { + if imp == oldImport { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + break + } + } + + return +} + +// rewriteImport rewrites any import of path oldPath to path newPath. +func rewriteImport(f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +func usesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + walk(f, func(n interface{}) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }) + + return +} + +func expr(s string) ast.Expr { + x, err := parser.ParseExpr(s) + if err != nil { + panic("parsing " + s + ": " + err.Error()) + } + // Remove position information to avoid spurious newlines. + killPos(reflect.ValueOf(x)) + return x +} + +var posType = reflect.TypeOf(token.Pos(0)) + +func killPos(v reflect.Value) { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + if !v.IsNil() { + killPos(v.Elem()) + } + case reflect.Slice: + n := v.Len() + for i := 0; i < n; i++ { + killPos(v.Index(i)) + } + case reflect.Struct: + n := v.NumField() + for i := 0; i < n; i++ { + f := v.Field(i) + if f.Type() == posType { + f.SetInt(0) + continue + } + killPos(f) + } + } +} + +// A Rename describes a single renaming. +type rename struct { + OldImport string // only apply rename if this import is present + NewImport string // add this import during rewrite + Old string // old name: p.T or *p.T + New string // new name: p.T or *p.T +} + +func renameFix(tab []rename) func(*ast.File) bool { + return func(f *ast.File) bool { + return renameFixTab(f, tab) + } +} + +func parseName(s string) (ptr bool, pkg, nam string) { + i := strings.Index(s, ".") + if i < 0 { + panic("parseName: invalid name " + s) + } + if strings.HasPrefix(s, "*") { + ptr = true + s = s[1:] + i-- + } + pkg = s[:i] + nam = s[i+1:] + return +} + +func renameFixTab(f *ast.File, tab []rename) bool { + fixed := false + added := map[string]bool{} + check := map[string]bool{} + for _, t := range tab { + if !imports(f, t.OldImport) { + continue + } + optr, opkg, onam := parseName(t.Old) + walk(f, func(n interface{}) { + np, ok := n.(*ast.Expr) + if !ok { + return + } + x := *np + if optr { + p, ok := x.(*ast.StarExpr) + if !ok { + return + } + x = p.X + } + if !isPkgDot(x, opkg, onam) { + return + } + if t.NewImport != "" && !added[t.NewImport] { + addImport(f, t.NewImport) + added[t.NewImport] = true + } + *np = expr(t.New) + check[t.OldImport] = true + fixed = true + }) + } + + for ipath := range check { + if !usesImport(f, ipath) { + deleteImport(f, ipath) + } + } + return fixed +} diff --git a/vendor/google.golang.org/appengine/cmd/aefix/main.go b/vendor/google.golang.org/appengine/cmd/aefix/main.go new file mode 100644 index 0000000..8e193a6 --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aefix/main.go @@ -0,0 +1,258 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/scanner" + "go/token" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" +) + +var ( + fset = token.NewFileSet() + exitCode = 0 +) + +var allowedRewrites = flag.String("r", "", + "restrict the rewrites to this comma-separated list") + +var forceRewrites = flag.String("force", "", + "force these fixes to run even if the code looks updated") + +var allowed, force map[string]bool + +var doDiff = flag.Bool("diff", false, "display diffs instead of rewriting files") + +// enable for debugging fix failures +const debug = false // display incorrectly reformatted source and exit + +func usage() { + fmt.Fprintf(os.Stderr, "usage: aefix [-diff] [-r fixname,...] [-force fixname,...] [path ...]\n") + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, "\nAvailable rewrites are:\n") + sort.Sort(byName(fixes)) + for _, f := range fixes { + fmt.Fprintf(os.Stderr, "\n%s\n", f.name) + desc := strings.TrimSpace(f.desc) + desc = strings.Replace(desc, "\n", "\n\t", -1) + fmt.Fprintf(os.Stderr, "\t%s\n", desc) + } + os.Exit(2) +} + +func main() { + flag.Usage = usage + flag.Parse() + + sort.Sort(byDate(fixes)) + + if *allowedRewrites != "" { + allowed = make(map[string]bool) + for _, f := range strings.Split(*allowedRewrites, ",") { + allowed[f] = true + } + } + + if *forceRewrites != "" { + force = make(map[string]bool) + for _, f := range strings.Split(*forceRewrites, ",") { + force[f] = true + } + } + + if flag.NArg() == 0 { + if err := processFile("standard input", true); err != nil { + report(err) + } + os.Exit(exitCode) + } + + for i := 0; i < flag.NArg(); i++ { + path := flag.Arg(i) + switch dir, err := os.Stat(path); { + case err != nil: + report(err) + case dir.IsDir(): + walkDir(path) + default: + if err := processFile(path, false); err != nil { + report(err) + } + } + } + + os.Exit(exitCode) +} + +const parserMode = parser.ParseComments + +func gofmtFile(f *ast.File) ([]byte, error) { + var buf bytes.Buffer + if err := format.Node(&buf, fset, f); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func processFile(filename string, useStdin bool) error { + var f *os.File + var err error + var fixlog bytes.Buffer + + if useStdin { + f = os.Stdin + } else { + f, err = os.Open(filename) + if err != nil { + return err + } + defer f.Close() + } + + src, err := ioutil.ReadAll(f) + if err != nil { + return err + } + + file, err := parser.ParseFile(fset, filename, src, parserMode) + if err != nil { + return err + } + + // Apply all fixes to file. + newFile := file + fixed := false + for _, fix := range fixes { + if allowed != nil && !allowed[fix.name] { + continue + } + if fix.f(newFile) { + fixed = true + fmt.Fprintf(&fixlog, " %s", fix.name) + + // AST changed. + // Print and parse, to update any missing scoping + // or position information for subsequent fixers. + newSrc, err := gofmtFile(newFile) + if err != nil { + return err + } + newFile, err = parser.ParseFile(fset, filename, newSrc, parserMode) + if err != nil { + if debug { + fmt.Printf("%s", newSrc) + report(err) + os.Exit(exitCode) + } + return err + } + } + } + if !fixed { + return nil + } + fmt.Fprintf(os.Stderr, "%s: fixed %s\n", filename, fixlog.String()[1:]) + + // Print AST. We did that after each fix, so this appears + // redundant, but it is necessary to generate gofmt-compatible + // source code in a few cases. The official gofmt style is the + // output of the printer run on a standard AST generated by the parser, + // but the source we generated inside the loop above is the + // output of the printer run on a mangled AST generated by a fixer. + newSrc, err := gofmtFile(newFile) + if err != nil { + return err + } + + if *doDiff { + data, err := diff(src, newSrc) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Printf("diff %s fixed/%s\n", filename, filename) + os.Stdout.Write(data) + return nil + } + + if useStdin { + os.Stdout.Write(newSrc) + return nil + } + + return ioutil.WriteFile(f.Name(), newSrc, 0) +} + +var gofmtBuf bytes.Buffer + +func gofmt(n interface{}) string { + gofmtBuf.Reset() + if err := format.Node(&gofmtBuf, fset, n); err != nil { + return "<" + err.Error() + ">" + } + return gofmtBuf.String() +} + +func report(err error) { + scanner.PrintError(os.Stderr, err) + exitCode = 2 +} + +func walkDir(path string) { + filepath.Walk(path, visitFile) +} + +func visitFile(path string, f os.FileInfo, err error) error { + if err == nil && isGoFile(f) { + err = processFile(path, false) + } + if err != nil { + report(err) + } + return nil +} + +func isGoFile(f os.FileInfo) bool { + // ignore non-Go files + name := f.Name() + return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") +} + +func diff(b1, b2 []byte) (data []byte, err error) { + f1, err := ioutil.TempFile("", "go-fix") + if err != nil { + return nil, err + } + defer os.Remove(f1.Name()) + defer f1.Close() + + f2, err := ioutil.TempFile("", "go-fix") + if err != nil { + return nil, err + } + defer os.Remove(f2.Name()) + defer f2.Close() + + f1.Write(b1) + f2.Write(b2) + + data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + return +} diff --git a/vendor/google.golang.org/appengine/cmd/aefix/main_test.go b/vendor/google.golang.org/appengine/cmd/aefix/main_test.go new file mode 100644 index 0000000..2151bf2 --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aefix/main_test.go @@ -0,0 +1,129 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" + "go/parser" + "strings" + "testing" +) + +type testCase struct { + Name string + Fn func(*ast.File) bool + In string + Out string +} + +var testCases []testCase + +func addTestCases(t []testCase, fn func(*ast.File) bool) { + // Fill in fn to avoid repetition in definitions. + if fn != nil { + for i := range t { + if t[i].Fn == nil { + t[i].Fn = fn + } + } + } + testCases = append(testCases, t...) +} + +func fnop(*ast.File) bool { return false } + +func parseFixPrint(t *testing.T, fn func(*ast.File) bool, desc, in string, mustBeGofmt bool) (out string, fixed, ok bool) { + file, err := parser.ParseFile(fset, desc, in, parserMode) + if err != nil { + t.Errorf("%s: parsing: %v", desc, err) + return + } + + outb, err := gofmtFile(file) + if err != nil { + t.Errorf("%s: printing: %v", desc, err) + return + } + if s := string(outb); in != s && mustBeGofmt { + t.Errorf("%s: not gofmt-formatted.\n--- %s\n%s\n--- %s | gofmt\n%s", + desc, desc, in, desc, s) + tdiff(t, in, s) + return + } + + if fn == nil { + for _, fix := range fixes { + if fix.f(file) { + fixed = true + } + } + } else { + fixed = fn(file) + } + + outb, err = gofmtFile(file) + if err != nil { + t.Errorf("%s: printing: %v", desc, err) + return + } + + return string(outb), fixed, true +} + +func TestRewrite(t *testing.T) { + for _, tt := range testCases { + // Apply fix: should get tt.Out. + out, fixed, ok := parseFixPrint(t, tt.Fn, tt.Name, tt.In, true) + if !ok { + continue + } + + // reformat to get printing right + out, _, ok = parseFixPrint(t, fnop, tt.Name, out, false) + if !ok { + continue + } + + if out != tt.Out { + t.Errorf("%s: incorrect output.\n", tt.Name) + if !strings.HasPrefix(tt.Name, "testdata/") { + t.Errorf("--- have\n%s\n--- want\n%s", out, tt.Out) + } + tdiff(t, out, tt.Out) + continue + } + + if changed := out != tt.In; changed != fixed { + t.Errorf("%s: changed=%v != fixed=%v", tt.Name, changed, fixed) + continue + } + + // Should not change if run again. + out2, fixed2, ok := parseFixPrint(t, tt.Fn, tt.Name+" output", out, true) + if !ok { + continue + } + + if fixed2 { + t.Errorf("%s: applied fixes during second round", tt.Name) + continue + } + + if out2 != out { + t.Errorf("%s: changed output after second round of fixes.\n--- output after first round\n%s\n--- output after second round\n%s", + tt.Name, out, out2) + tdiff(t, out, out2) + } + } +} + +func tdiff(t *testing.T, a, b string) { + data, err := diff([]byte(a), []byte(b)) + if err != nil { + t.Error(err) + return + } + t.Error(string(data)) +} diff --git a/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go b/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go new file mode 100644 index 0000000..d54d375 --- /dev/null +++ b/vendor/google.golang.org/appengine/cmd/aefix/typecheck.go @@ -0,0 +1,673 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "go/ast" + "go/token" + "os" + "reflect" + "strings" +) + +// Partial type checker. +// +// The fact that it is partial is very important: the input is +// an AST and a description of some type information to +// assume about one or more packages, but not all the +// packages that the program imports. The checker is +// expected to do as much as it can with what it has been +// given. There is not enough information supplied to do +// a full type check, but the type checker is expected to +// apply information that can be derived from variable +// declarations, function and method returns, and type switches +// as far as it can, so that the caller can still tell the types +// of expression relevant to a particular fix. +// +// TODO(rsc,gri): Replace with go/typechecker. +// Doing that could be an interesting test case for go/typechecker: +// the constraints about working with partial information will +// likely exercise it in interesting ways. The ideal interface would +// be to pass typecheck a map from importpath to package API text +// (Go source code), but for now we use data structures (TypeConfig, Type). +// +// The strings mostly use gofmt form. +// +// A Field or FieldList has as its type a comma-separated list +// of the types of the fields. For example, the field list +// x, y, z int +// has type "int, int, int". + +// The prefix "type " is the type of a type. +// For example, given +// var x int +// type T int +// x's type is "int" but T's type is "type int". +// mkType inserts the "type " prefix. +// getType removes it. +// isType tests for it. + +func mkType(t string) string { + return "type " + t +} + +func getType(t string) string { + if !isType(t) { + return "" + } + return t[len("type "):] +} + +func isType(t string) bool { + return strings.HasPrefix(t, "type ") +} + +// TypeConfig describes the universe of relevant types. +// For ease of creation, the types are all referred to by string +// name (e.g., "reflect.Value"). TypeByName is the only place +// where the strings are resolved. + +type TypeConfig struct { + Type map[string]*Type + Var map[string]string + Func map[string]string +} + +// typeof returns the type of the given name, which may be of +// the form "x" or "p.X". +func (cfg *TypeConfig) typeof(name string) string { + if cfg.Var != nil { + if t := cfg.Var[name]; t != "" { + return t + } + } + if cfg.Func != nil { + if t := cfg.Func[name]; t != "" { + return "func()" + t + } + } + return "" +} + +// Type describes the Fields and Methods of a type. +// If the field or method cannot be found there, it is next +// looked for in the Embed list. +type Type struct { + Field map[string]string // map field name to type + Method map[string]string // map method name to comma-separated return types (should start with "func ") + Embed []string // list of types this type embeds (for extra methods) + Def string // definition of named type +} + +// dot returns the type of "typ.name", making its decision +// using the type information in cfg. +func (typ *Type) dot(cfg *TypeConfig, name string) string { + if typ.Field != nil { + if t := typ.Field[name]; t != "" { + return t + } + } + if typ.Method != nil { + if t := typ.Method[name]; t != "" { + return t + } + } + + for _, e := range typ.Embed { + etyp := cfg.Type[e] + if etyp != nil { + if t := etyp.dot(cfg, name); t != "" { + return t + } + } + } + + return "" +} + +// typecheck type checks the AST f assuming the information in cfg. +// It returns two maps with type information: +// typeof maps AST nodes to type information in gofmt string form. +// assign maps type strings to lists of expressions that were assigned +// to values of another type that were assigned to that type. +func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, assign map[string][]interface{}) { + typeof = make(map[interface{}]string) + assign = make(map[string][]interface{}) + cfg1 := &TypeConfig{} + *cfg1 = *cfg // make copy so we can add locally + copied := false + + // gather function declarations + for _, decl := range f.Decls { + fn, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + typecheck1(cfg, fn.Type, typeof, assign) + t := typeof[fn.Type] + if fn.Recv != nil { + // The receiver must be a type. + rcvr := typeof[fn.Recv] + if !isType(rcvr) { + if len(fn.Recv.List) != 1 { + continue + } + rcvr = mkType(gofmt(fn.Recv.List[0].Type)) + typeof[fn.Recv.List[0].Type] = rcvr + } + rcvr = getType(rcvr) + if rcvr != "" && rcvr[0] == '*' { + rcvr = rcvr[1:] + } + typeof[rcvr+"."+fn.Name.Name] = t + } else { + if isType(t) { + t = getType(t) + } else { + t = gofmt(fn.Type) + } + typeof[fn.Name] = t + + // Record typeof[fn.Name.Obj] for future references to fn.Name. + typeof[fn.Name.Obj] = t + } + } + + // gather struct declarations + for _, decl := range f.Decls { + d, ok := decl.(*ast.GenDecl) + if ok { + for _, s := range d.Specs { + switch s := s.(type) { + case *ast.TypeSpec: + if cfg1.Type[s.Name.Name] != nil { + break + } + if !copied { + copied = true + // Copy map lazily: it's time. + cfg1.Type = make(map[string]*Type) + for k, v := range cfg.Type { + cfg1.Type[k] = v + } + } + t := &Type{Field: map[string]string{}} + cfg1.Type[s.Name.Name] = t + switch st := s.Type.(type) { + case *ast.StructType: + for _, f := range st.Fields.List { + for _, n := range f.Names { + t.Field[n.Name] = gofmt(f.Type) + } + } + case *ast.ArrayType, *ast.StarExpr, *ast.MapType: + t.Def = gofmt(st) + } + } + } + } + } + + typecheck1(cfg1, f, typeof, assign) + return typeof, assign +} + +func makeExprList(a []*ast.Ident) []ast.Expr { + var b []ast.Expr + for _, x := range a { + b = append(b, x) + } + return b +} + +// Typecheck1 is the recursive form of typecheck. +// It is like typecheck but adds to the information in typeof +// instead of allocating a new map. +func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, assign map[string][]interface{}) { + // set sets the type of n to typ. + // If isDecl is true, n is being declared. + set := func(n ast.Expr, typ string, isDecl bool) { + if typeof[n] != "" || typ == "" { + if typeof[n] != typ { + assign[typ] = append(assign[typ], n) + } + return + } + typeof[n] = typ + + // If we obtained typ from the declaration of x + // propagate the type to all the uses. + // The !isDecl case is a cheat here, but it makes + // up in some cases for not paying attention to + // struct fields. The real type checker will be + // more accurate so we won't need the cheat. + if id, ok := n.(*ast.Ident); ok && id.Obj != nil && (isDecl || typeof[id.Obj] == "") { + typeof[id.Obj] = typ + } + } + + // Type-check an assignment lhs = rhs. + // If isDecl is true, this is := so we can update + // the types of the objects that lhs refers to. + typecheckAssign := func(lhs, rhs []ast.Expr, isDecl bool) { + if len(lhs) > 1 && len(rhs) == 1 { + if _, ok := rhs[0].(*ast.CallExpr); ok { + t := split(typeof[rhs[0]]) + // Lists should have same length but may not; pair what can be paired. + for i := 0; i < len(lhs) && i < len(t); i++ { + set(lhs[i], t[i], isDecl) + } + return + } + } + if len(lhs) == 1 && len(rhs) == 2 { + // x = y, ok + rhs = rhs[:1] + } else if len(lhs) == 2 && len(rhs) == 1 { + // x, ok = y + lhs = lhs[:1] + } + + // Match as much as we can. + for i := 0; i < len(lhs) && i < len(rhs); i++ { + x, y := lhs[i], rhs[i] + if typeof[y] != "" { + set(x, typeof[y], isDecl) + } else { + set(y, typeof[x], false) + } + } + } + + expand := func(s string) string { + typ := cfg.Type[s] + if typ != nil && typ.Def != "" { + return typ.Def + } + return s + } + + // The main type check is a recursive algorithm implemented + // by walkBeforeAfter(n, before, after). + // Most of it is bottom-up, but in a few places we need + // to know the type of the function we are checking. + // The before function records that information on + // the curfn stack. + var curfn []*ast.FuncType + + before := func(n interface{}) { + // push function type on stack + switch n := n.(type) { + case *ast.FuncDecl: + curfn = append(curfn, n.Type) + case *ast.FuncLit: + curfn = append(curfn, n.Type) + } + } + + // After is the real type checker. + after := func(n interface{}) { + if n == nil { + return + } + if false && reflect.TypeOf(n).Kind() == reflect.Ptr { // debugging trace + defer func() { + if t := typeof[n]; t != "" { + pos := fset.Position(n.(ast.Node).Pos()) + fmt.Fprintf(os.Stderr, "%s: typeof[%s] = %s\n", pos, gofmt(n), t) + } + }() + } + + switch n := n.(type) { + case *ast.FuncDecl, *ast.FuncLit: + // pop function type off stack + curfn = curfn[:len(curfn)-1] + + case *ast.FuncType: + typeof[n] = mkType(joinFunc(split(typeof[n.Params]), split(typeof[n.Results]))) + + case *ast.FieldList: + // Field list is concatenation of sub-lists. + t := "" + for _, field := range n.List { + if t != "" { + t += ", " + } + t += typeof[field] + } + typeof[n] = t + + case *ast.Field: + // Field is one instance of the type per name. + all := "" + t := typeof[n.Type] + if !isType(t) { + // Create a type, because it is typically *T or *p.T + // and we might care about that type. + t = mkType(gofmt(n.Type)) + typeof[n.Type] = t + } + t = getType(t) + if len(n.Names) == 0 { + all = t + } else { + for _, id := range n.Names { + if all != "" { + all += ", " + } + all += t + typeof[id.Obj] = t + typeof[id] = t + } + } + typeof[n] = all + + case *ast.ValueSpec: + // var declaration. Use type if present. + if n.Type != nil { + t := typeof[n.Type] + if !isType(t) { + t = mkType(gofmt(n.Type)) + typeof[n.Type] = t + } + t = getType(t) + for _, id := range n.Names { + set(id, t, true) + } + } + // Now treat same as assignment. + typecheckAssign(makeExprList(n.Names), n.Values, true) + + case *ast.AssignStmt: + typecheckAssign(n.Lhs, n.Rhs, n.Tok == token.DEFINE) + + case *ast.Ident: + // Identifier can take its type from underlying object. + if t := typeof[n.Obj]; t != "" { + typeof[n] = t + } + + case *ast.SelectorExpr: + // Field or method. + name := n.Sel.Name + if t := typeof[n.X]; t != "" { + if strings.HasPrefix(t, "*") { + t = t[1:] // implicit * + } + if typ := cfg.Type[t]; typ != nil { + if t := typ.dot(cfg, name); t != "" { + typeof[n] = t + return + } + } + tt := typeof[t+"."+name] + if isType(tt) { + typeof[n] = getType(tt) + return + } + } + // Package selector. + if x, ok := n.X.(*ast.Ident); ok && x.Obj == nil { + str := x.Name + "." + name + if cfg.Type[str] != nil { + typeof[n] = mkType(str) + return + } + if t := cfg.typeof(x.Name + "." + name); t != "" { + typeof[n] = t + return + } + } + + case *ast.CallExpr: + // make(T) has type T. + if isTopName(n.Fun, "make") && len(n.Args) >= 1 { + typeof[n] = gofmt(n.Args[0]) + return + } + // new(T) has type *T + if isTopName(n.Fun, "new") && len(n.Args) == 1 { + typeof[n] = "*" + gofmt(n.Args[0]) + return + } + // Otherwise, use type of function to determine arguments. + t := typeof[n.Fun] + in, out := splitFunc(t) + if in == nil && out == nil { + return + } + typeof[n] = join(out) + for i, arg := range n.Args { + if i >= len(in) { + break + } + if typeof[arg] == "" { + typeof[arg] = in[i] + } + } + + case *ast.TypeAssertExpr: + // x.(type) has type of x. + if n.Type == nil { + typeof[n] = typeof[n.X] + return + } + // x.(T) has type T. + if t := typeof[n.Type]; isType(t) { + typeof[n] = getType(t) + } else { + typeof[n] = gofmt(n.Type) + } + + case *ast.SliceExpr: + // x[i:j] has type of x. + typeof[n] = typeof[n.X] + + case *ast.IndexExpr: + // x[i] has key type of x's type. + t := expand(typeof[n.X]) + if strings.HasPrefix(t, "[") || strings.HasPrefix(t, "map[") { + // Lazy: assume there are no nested [] in the array + // length or map key type. + if i := strings.Index(t, "]"); i >= 0 { + typeof[n] = t[i+1:] + } + } + + case *ast.StarExpr: + // *x for x of type *T has type T when x is an expr. + // We don't use the result when *x is a type, but + // compute it anyway. + t := expand(typeof[n.X]) + if isType(t) { + typeof[n] = "type *" + getType(t) + } else if strings.HasPrefix(t, "*") { + typeof[n] = t[len("*"):] + } + + case *ast.UnaryExpr: + // &x for x of type T has type *T. + t := typeof[n.X] + if t != "" && n.Op == token.AND { + typeof[n] = "*" + t + } + + case *ast.CompositeLit: + // T{...} has type T. + typeof[n] = gofmt(n.Type) + + case *ast.ParenExpr: + // (x) has type of x. + typeof[n] = typeof[n.X] + + case *ast.RangeStmt: + t := expand(typeof[n.X]) + if t == "" { + return + } + var key, value string + if t == "string" { + key, value = "int", "rune" + } else if strings.HasPrefix(t, "[") { + key = "int" + if i := strings.Index(t, "]"); i >= 0 { + value = t[i+1:] + } + } else if strings.HasPrefix(t, "map[") { + if i := strings.Index(t, "]"); i >= 0 { + key, value = t[4:i], t[i+1:] + } + } + changed := false + if n.Key != nil && key != "" { + changed = true + set(n.Key, key, n.Tok == token.DEFINE) + } + if n.Value != nil && value != "" { + changed = true + set(n.Value, value, n.Tok == token.DEFINE) + } + // Ugly failure of vision: already type-checked body. + // Do it again now that we have that type info. + if changed { + typecheck1(cfg, n.Body, typeof, assign) + } + + case *ast.TypeSwitchStmt: + // Type of variable changes for each case in type switch, + // but go/parser generates just one variable. + // Repeat type check for each case with more precise + // type information. + as, ok := n.Assign.(*ast.AssignStmt) + if !ok { + return + } + varx, ok := as.Lhs[0].(*ast.Ident) + if !ok { + return + } + t := typeof[varx] + for _, cas := range n.Body.List { + cas := cas.(*ast.CaseClause) + if len(cas.List) == 1 { + // Variable has specific type only when there is + // exactly one type in the case list. + if tt := typeof[cas.List[0]]; isType(tt) { + tt = getType(tt) + typeof[varx] = tt + typeof[varx.Obj] = tt + typecheck1(cfg, cas.Body, typeof, assign) + } + } + } + // Restore t. + typeof[varx] = t + typeof[varx.Obj] = t + + case *ast.ReturnStmt: + if len(curfn) == 0 { + // Probably can't happen. + return + } + f := curfn[len(curfn)-1] + res := n.Results + if f.Results != nil { + t := split(typeof[f.Results]) + for i := 0; i < len(res) && i < len(t); i++ { + set(res[i], t[i], false) + } + } + } + } + walkBeforeAfter(f, before, after) +} + +// Convert between function type strings and lists of types. +// Using strings makes this a little harder, but it makes +// a lot of the rest of the code easier. This will all go away +// when we can use go/typechecker directly. + +// splitFunc splits "func(x,y,z) (a,b,c)" into ["x", "y", "z"] and ["a", "b", "c"]. +func splitFunc(s string) (in, out []string) { + if !strings.HasPrefix(s, "func(") { + return nil, nil + } + + i := len("func(") // index of beginning of 'in' arguments + nparen := 0 + for j := i; j < len(s); j++ { + switch s[j] { + case '(': + nparen++ + case ')': + nparen-- + if nparen < 0 { + // found end of parameter list + out := strings.TrimSpace(s[j+1:]) + if len(out) >= 2 && out[0] == '(' && out[len(out)-1] == ')' { + out = out[1 : len(out)-1] + } + return split(s[i:j]), split(out) + } + } + } + return nil, nil +} + +// joinFunc is the inverse of splitFunc. +func joinFunc(in, out []string) string { + outs := "" + if len(out) == 1 { + outs = " " + out[0] + } else if len(out) > 1 { + outs = " (" + join(out) + ")" + } + return "func(" + join(in) + ")" + outs +} + +// split splits "int, float" into ["int", "float"] and splits "" into []. +func split(s string) []string { + out := []string{} + i := 0 // current type being scanned is s[i:j]. + nparen := 0 + for j := 0; j < len(s); j++ { + switch s[j] { + case ' ': + if i == j { + i++ + } + case '(': + nparen++ + case ')': + nparen-- + if nparen < 0 { + // probably can't happen + return nil + } + case ',': + if nparen == 0 { + if i < j { + out = append(out, s[i:j]) + } + i = j + 1 + } + } + } + if nparen != 0 { + // probably can't happen + return nil + } + if i < len(s) { + out = append(out, s[i:]) + } + return out +} + +// join is the inverse of split. +func join(x []string) string { + return strings.Join(x, ", ") +} diff --git a/vendor/google.golang.org/appengine/datastore/datastore.go b/vendor/google.golang.org/appengine/datastore/datastore.go new file mode 100644 index 0000000..9422e41 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/datastore.go @@ -0,0 +1,406 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/datastore" +) + +var ( + // ErrInvalidEntityType is returned when functions like Get or Next are + // passed a dst or src argument of invalid type. + ErrInvalidEntityType = errors.New("datastore: invalid entity type") + // ErrInvalidKey is returned when an invalid key is presented. + ErrInvalidKey = errors.New("datastore: invalid key") + // ErrNoSuchEntity is returned when no entity was found for a given key. + ErrNoSuchEntity = errors.New("datastore: no such entity") +) + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. +// StructType is the type of the struct pointed to by the destination argument +// passed to Get or to Iterator.Next. +type ErrFieldMismatch struct { + StructType reflect.Type + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", + e.FieldName, e.StructType, e.Reason) +} + +// protoToKey converts a Reference proto to a *Key. +func protoToKey(r *pb.Reference) (k *Key, err error) { + appID := r.GetApp() + namespace := r.GetNameSpace() + for _, e := range r.Path.Element { + k = &Key{ + kind: e.GetType(), + stringID: e.GetName(), + intID: e.GetId(), + parent: k, + appID: appID, + namespace: namespace, + } + if !k.valid() { + return nil, ErrInvalidKey + } + } + return +} + +// keyToProto converts a *Key to a Reference proto. +func keyToProto(defaultAppID string, k *Key) *pb.Reference { + appID := k.appID + if appID == "" { + appID = defaultAppID + } + n := 0 + for i := k; i != nil; i = i.parent { + n++ + } + e := make([]*pb.Path_Element, n) + for i := k; i != nil; i = i.parent { + n-- + e[n] = &pb.Path_Element{ + Type: &i.kind, + } + // At most one of {Name,Id} should be set. + // Neither will be set for incomplete keys. + if i.stringID != "" { + e[n].Name = &i.stringID + } else if i.intID != 0 { + e[n].Id = &i.intID + } + } + var namespace *string + if k.namespace != "" { + namespace = proto.String(k.namespace) + } + return &pb.Reference{ + App: proto.String(appID), + NameSpace: namespace, + Path: &pb.Path{ + Element: e, + }, + } +} + +// multiKeyToProto is a batch version of keyToProto. +func multiKeyToProto(appID string, key []*Key) []*pb.Reference { + ret := make([]*pb.Reference, len(key)) + for i, k := range key { + ret[i] = keyToProto(appID, k) + } + return ret +} + +// multiValid is a batch version of Key.valid. It returns an error, not a +// []bool. +func multiValid(key []*Key) error { + invalid := false + for _, k := range key { + if !k.valid() { + invalid = true + break + } + } + if !invalid { + return nil + } + err := make(appengine.MultiError, len(key)) + for i, k := range key { + if !k.valid() { + err[i] = ErrInvalidKey + } + } + return err +} + +// It's unfortunate that the two semantically equivalent concepts pb.Reference +// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the +// two have different protobuf field numbers. + +// referenceValueToKey is the same as protoToKey except the input is a +// PropertyValue_ReferenceValue instead of a Reference. +func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) { + appID := r.GetApp() + namespace := r.GetNameSpace() + for _, e := range r.Pathelement { + k = &Key{ + kind: e.GetType(), + stringID: e.GetName(), + intID: e.GetId(), + parent: k, + appID: appID, + namespace: namespace, + } + if !k.valid() { + return nil, ErrInvalidKey + } + } + return +} + +// keyToReferenceValue is the same as keyToProto except the output is a +// PropertyValue_ReferenceValue instead of a Reference. +func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue { + ref := keyToProto(defaultAppID, k) + pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element)) + for i, e := range ref.Path.Element { + pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{ + Type: e.Type, + Id: e.Id, + Name: e.Name, + } + } + return &pb.PropertyValue_ReferenceValue{ + App: ref.App, + NameSpace: ref.NameSpace, + Pathelement: pe, + } +} + +type multiArgType int + +const ( + multiArgTypeInvalid multiArgType = iota + multiArgTypePropertyLoadSaver + multiArgTypeStruct + multiArgTypeStructPtr + multiArgTypeInterface +) + +// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct +// type S, for some interface type I, or some non-interface non-pointer type P +// such that P or *P implements PropertyLoadSaver. +// +// It returns what category the slice's elements are, and the reflect.Type +// that represents S, I or P. +// +// As a special case, PropertyList is an invalid type for v. +func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { + if v.Kind() != reflect.Slice { + return multiArgTypeInvalid, nil + } + if v.Type() == typeOfPropertyList { + return multiArgTypeInvalid, nil + } + elemType = v.Type().Elem() + if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { + return multiArgTypePropertyLoadSaver, elemType + } + switch elemType.Kind() { + case reflect.Struct: + return multiArgTypeStruct, elemType + case reflect.Interface: + return multiArgTypeInterface, elemType + case reflect.Ptr: + elemType = elemType.Elem() + if elemType.Kind() == reflect.Struct { + return multiArgTypeStructPtr, elemType + } + } + return multiArgTypeInvalid, nil +} + +// Get loads the entity stored for k into dst, which must be a struct pointer +// or implement PropertyLoadSaver. If there is no such entity for the key, Get +// returns ErrNoSuchEntity. +// +// The values of dst's unmatched struct fields are not modified, and matching +// slice-typed fields are not reset before appending to them. In particular, it +// is recommended to pass a pointer to a zero valued struct on each Get call. +// +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. ErrFieldMismatch is only returned if +// dst is a struct pointer. +func Get(c context.Context, key *Key, dst interface{}) error { + if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here + return ErrInvalidEntityType + } + err := GetMulti(c, []*Key{key}, []interface{}{dst}) + if me, ok := err.(appengine.MultiError); ok { + return me[0] + } + return err +} + +// GetMulti is a batch version of Get. +// +// dst must be a []S, []*S, []I or []P, for some struct type S, some interface +// type I, or some non-interface non-pointer type P such that P or *P +// implements PropertyLoadSaver. If an []I, each element must be a valid dst +// for Get: it must be a struct pointer or implement PropertyLoadSaver. +// +// As a special case, PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when []PropertyList was intended. +func GetMulti(c context.Context, key []*Key, dst interface{}) error { + v := reflect.ValueOf(dst) + multiArgType, _ := checkMultiArg(v) + if multiArgType == multiArgTypeInvalid { + return errors.New("datastore: dst has invalid type") + } + if len(key) != v.Len() { + return errors.New("datastore: key and dst slices have different length") + } + if len(key) == 0 { + return nil + } + if err := multiValid(key); err != nil { + return err + } + req := &pb.GetRequest{ + Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key), + } + res := &pb.GetResponse{} + if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil { + return err + } + if len(key) != len(res.Entity) { + return errors.New("datastore: internal error: server returned the wrong number of entities") + } + multiErr, any := make(appengine.MultiError, len(key)), false + for i, e := range res.Entity { + if e.Entity == nil { + multiErr[i] = ErrNoSuchEntity + } else { + elem := v.Index(i) + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + if multiArgType == multiArgTypeStructPtr && elem.IsNil() { + elem.Set(reflect.New(elem.Type().Elem())) + } + multiErr[i] = loadEntity(elem.Interface(), e.Entity) + } + if multiErr[i] != nil { + any = true + } + } + if any { + return multiErr + } + return nil +} + +// Put saves the entity src into the datastore with key k. src must be a struct +// pointer or implement PropertyLoadSaver; if a struct pointer then any +// unexported fields of that struct will be skipped. If k is an incomplete key, +// the returned key will be a unique key generated by the datastore. +func Put(c context.Context, key *Key, src interface{}) (*Key, error) { + k, err := PutMulti(c, []*Key{key}, []interface{}{src}) + if err != nil { + if me, ok := err.(appengine.MultiError); ok { + return nil, me[0] + } + return nil, err + } + return k[0], nil +} + +// PutMulti is a batch version of Put. +// +// src must satisfy the same conditions as the dst argument to GetMulti. +func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) { + v := reflect.ValueOf(src) + multiArgType, _ := checkMultiArg(v) + if multiArgType == multiArgTypeInvalid { + return nil, errors.New("datastore: src has invalid type") + } + if len(key) != v.Len() { + return nil, errors.New("datastore: key and src slices have different length") + } + if len(key) == 0 { + return nil, nil + } + appID := internal.FullyQualifiedAppID(c) + if err := multiValid(key); err != nil { + return nil, err + } + req := &pb.PutRequest{} + for i := range key { + elem := v.Index(i) + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + sProto, err := saveEntity(appID, key[i], elem.Interface()) + if err != nil { + return nil, err + } + req.Entity = append(req.Entity, sProto) + } + res := &pb.PutResponse{} + if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil { + return nil, err + } + if len(key) != len(res.Key) { + return nil, errors.New("datastore: internal error: server returned the wrong number of keys") + } + ret := make([]*Key, len(key)) + for i := range ret { + var err error + ret[i], err = protoToKey(res.Key[i]) + if err != nil || ret[i].Incomplete() { + return nil, errors.New("datastore: internal error: server returned an invalid key") + } + } + return ret, nil +} + +// Delete deletes the entity for the given key. +func Delete(c context.Context, key *Key) error { + err := DeleteMulti(c, []*Key{key}) + if me, ok := err.(appengine.MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti is a batch version of Delete. +func DeleteMulti(c context.Context, key []*Key) error { + if len(key) == 0 { + return nil + } + if err := multiValid(key); err != nil { + return err + } + req := &pb.DeleteRequest{ + Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key), + } + res := &pb.DeleteResponse{} + return internal.Call(c, "datastore_v3", "Delete", req, res) +} + +func namespaceMod(m proto.Message, namespace string) { + // pb.Query is the only type that has a name_space field. + // All other namespace support in datastore is in the keys. + switch m := m.(type) { + case *pb.Query: + if m.NameSpace == nil { + m.NameSpace = &namespace + } + } +} + +func init() { + internal.NamespaceMods["datastore_v3"] = namespaceMod + internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name) + internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT)) +} diff --git a/vendor/google.golang.org/appengine/datastore/datastore_test.go b/vendor/google.golang.org/appengine/datastore/datastore_test.go new file mode 100644 index 0000000..b2856a9 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/datastore_test.go @@ -0,0 +1,1567 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "reflect" + "strings" + "testing" + "time" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/datastore" +) + +const testAppID = "testApp" + +type ( + myBlob []byte + myByte byte + myString string +) + +func makeMyByteSlice(n int) []myByte { + b := make([]myByte, n) + for i := range b { + b[i] = myByte(i) + } + return b +} + +func makeInt8Slice(n int) []int8 { + b := make([]int8, n) + for i := range b { + b[i] = int8(i) + } + return b +} + +func makeUint8Slice(n int) []uint8 { + b := make([]uint8, n) + for i := range b { + b[i] = uint8(i) + } + return b +} + +func newKey(stringID string, parent *Key) *Key { + return &Key{ + kind: "kind", + stringID: stringID, + intID: 0, + parent: parent, + appID: testAppID, + } +} + +var ( + testKey0 = newKey("name0", nil) + testKey1a = newKey("name1", nil) + testKey1b = newKey("name1", nil) + testKey2a = newKey("name2", testKey0) + testKey2b = newKey("name2", testKey0) + testGeoPt0 = appengine.GeoPoint{Lat: 1.2, Lng: 3.4} + testGeoPt1 = appengine.GeoPoint{Lat: 5, Lng: 10} + testBadGeoPt = appengine.GeoPoint{Lat: 1000, Lng: 34} +) + +type B0 struct { + B []byte +} + +type B1 struct { + B []int8 +} + +type B2 struct { + B myBlob +} + +type B3 struct { + B []myByte +} + +type B4 struct { + B [][]byte +} + +type B5 struct { + B ByteString +} + +type C0 struct { + I int + C chan int +} + +type C1 struct { + I int + C *chan int +} + +type C2 struct { + I int + C []chan int +} + +type C3 struct { + C string +} + +type E struct{} + +type G0 struct { + G appengine.GeoPoint +} + +type G1 struct { + G []appengine.GeoPoint +} + +type K0 struct { + K *Key +} + +type K1 struct { + K []*Key +} + +type N0 struct { + X0 + Nonymous X0 + Ignore string `datastore:"-"` + Other string +} + +type N1 struct { + X0 + Nonymous []X0 + Ignore string `datastore:"-"` + Other string +} + +type N2 struct { + N1 `datastore:"red"` + Green N1 `datastore:"green"` + Blue N1 + White N1 `datastore:"-"` +} + +type O0 struct { + I int64 +} + +type O1 struct { + I int32 +} + +type U0 struct { + U uint +} + +type U1 struct { + U string +} + +type T struct { + T time.Time +} + +type X0 struct { + S string + I int + i int +} + +type X1 struct { + S myString + I int32 + J int64 +} + +type X2 struct { + Z string + i int +} + +type X3 struct { + S bool + I int +} + +type Y0 struct { + B bool + F []float64 + G []float64 +} + +type Y1 struct { + B bool + F float64 +} + +type Y2 struct { + B bool + F []int64 +} + +type Tagged struct { + A int `datastore:"a,noindex"` + B []int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + // The "flatten" option is parsed but ignored for now. + F int `datastore:",noindex,flatten"` + G int `datastore:",flatten"` + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + + Y0 `datastore:"-"` + Z chan int `datastore:"-,"` +} + +type InvalidTagged1 struct { + I int `datastore:"\t"` +} + +type InvalidTagged2 struct { + I int + J int `datastore:"I"` +} + +type Inner1 struct { + W int32 + X string +} + +type Inner2 struct { + Y float64 +} + +type Inner3 struct { + Z bool +} + +type Outer struct { + A int16 + I []Inner1 + J Inner2 + Inner3 +} + +type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + Z bool +} + +type Dotted struct { + A DottedA `datastore:"A0.A1.A2"` +} + +type DottedA struct { + B DottedB `datastore:"B3"` +} + +type DottedB struct { + C int `datastore:"C4.C5"` +} + +type SliceOfSlices struct { + I int + S []struct { + J int + F []float64 + } +} + +type Recursive struct { + I int + R []Recursive +} + +type MutuallyRecursive0 struct { + I int + R []MutuallyRecursive1 +} + +type MutuallyRecursive1 struct { + I int + R []MutuallyRecursive0 +} + +type Doubler struct { + S string + I int64 + B bool +} + +func (d *Doubler) Load(props []Property) error { + return LoadStruct(d, props) +} + +func (d *Doubler) Save() ([]Property, error) { + // Save the default Property slice to an in-memory buffer (a PropertyList). + props, err := SaveStruct(d) + if err != nil { + return nil, err + } + var list PropertyList + if err := list.Load(props); err != nil { + return nil, err + } + + // Edit that PropertyList, and send it on. + for i := range list { + switch v := list[i].Value.(type) { + case string: + // + means string concatenation. + list[i].Value = v + v + case int64: + // + means integer addition. + list[i].Value = v + v + } + } + return list.Save() +} + +var _ PropertyLoadSaver = (*Doubler)(nil) + +type Deriver struct { + S, Derived, Ignored string +} + +func (e *Deriver) Load(props []Property) error { + for _, p := range props { + if p.Name != "S" { + continue + } + e.S = p.Value.(string) + e.Derived = "derived+" + e.S + } + return nil +} + +func (e *Deriver) Save() ([]Property, error) { + return []Property{ + { + Name: "S", + Value: e.S, + }, + }, nil +} + +var _ PropertyLoadSaver = (*Deriver)(nil) + +type BadMultiPropEntity struct{} + +func (e *BadMultiPropEntity) Load(props []Property) error { + return errors.New("unimplemented") +} + +func (e *BadMultiPropEntity) Save() ([]Property, error) { + // Write multiple properties with the same name "I", but Multiple is false. + var props []Property + for i := 0; i < 3; i++ { + props = append(props, Property{ + Name: "I", + Value: int64(i), + }) + } + return props, nil +} + +var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil) + +type BK struct { + Key appengine.BlobKey +} + +type testCase struct { + desc string + src interface{} + want interface{} + putErr string + getErr string +} + +var testCases = []testCase{ + { + "chan save fails", + &C0{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "*chan save fails", + &C1{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "[]chan save fails", + &C2{I: -1, C: make([]chan int, 8)}, + &E{}, + "unsupported struct field", + "", + }, + { + "chan load fails", + &C3{C: "not a chan"}, + &C0{}, + "", + "type mismatch", + }, + { + "*chan load fails", + &C3{C: "not a *chan"}, + &C1{}, + "", + "type mismatch", + }, + { + "[]chan load fails", + &C3{C: "not a []chan"}, + &C2{}, + "", + "type mismatch", + }, + { + "empty struct", + &E{}, + &E{}, + "", + "", + }, + { + "geopoint", + &G0{G: testGeoPt0}, + &G0{G: testGeoPt0}, + "", + "", + }, + { + "geopoint invalid", + &G0{G: testBadGeoPt}, + &G0{}, + "invalid GeoPoint value", + "", + }, + { + "geopoint as props", + &G0{G: testGeoPt0}, + &PropertyList{ + Property{Name: "G", Value: testGeoPt0, NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "geopoint slice", + &G1{G: []appengine.GeoPoint{testGeoPt0, testGeoPt1}}, + &G1{G: []appengine.GeoPoint{testGeoPt0, testGeoPt1}}, + "", + "", + }, + { + "key", + &K0{K: testKey1a}, + &K0{K: testKey1b}, + "", + "", + }, + { + "key with parent", + &K0{K: testKey2a}, + &K0{K: testKey2b}, + "", + "", + }, + { + "nil key", + &K0{}, + &K0{}, + "", + "", + }, + { + "all nil keys in slice", + &K1{[]*Key{nil, nil}}, + &K1{[]*Key{nil, nil}}, + "", + "", + }, + { + "some nil keys in slice", + &K1{[]*Key{testKey1a, nil, testKey2a}}, + &K1{[]*Key{testKey1b, nil, testKey2b}}, + "", + "", + }, + { + "overflow", + &O0{I: 1 << 48}, + &O1{}, + "", + "overflow", + }, + { + "time", + &T{T: time.Unix(1e9, 0)}, + &T{T: time.Unix(1e9, 0)}, + "", + "", + }, + { + "time as props", + &T{T: time.Unix(1e9, 0)}, + &PropertyList{ + Property{Name: "T", Value: time.Unix(1e9, 0).UTC(), NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "uint save", + &U0{U: 1}, + &U0{}, + "unsupported struct field", + "", + }, + { + "uint load", + &U1{U: "not a uint"}, + &U0{}, + "", + "type mismatch", + }, + { + "zero", + &X0{}, + &X0{}, + "", + "", + }, + { + "basic", + &X0{S: "one", I: 2, i: 3}, + &X0{S: "one", I: 2}, + "", + "", + }, + { + "save string/int load myString/int32", + &X0{S: "one", I: 2, i: 3}, + &X1{S: "one", I: 2}, + "", + "", + }, + { + "missing fields", + &X0{S: "one", I: 2, i: 3}, + &X2{}, + "", + "no such struct field", + }, + { + "save string load bool", + &X0{S: "one", I: 2, i: 3}, + &X3{I: 2}, + "", + "type mismatch", + }, + { + "basic slice", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y0{B: true, F: []float64{7, 8, 9}}, + "", + "", + }, + { + "save []float64 load float64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y1{B: true}, + "", + "requires a slice", + }, + { + "save []float64 load []int64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y2{B: true}, + "", + "type mismatch", + }, + { + "single slice is too long", + &Y0{F: make([]float64, maxIndexedProperties+1)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "two slices are too long", + &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "one slice and one scalar are too long", + &Y0{F: make([]float64, maxIndexedProperties), B: true}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "long blob", + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "long []int8 is too long", + &B1{B: makeInt8Slice(maxIndexedProperties + 1)}, + &B1{}, + "too many indexed properties", + "", + }, + { + "short []int8", + &B1{B: makeInt8Slice(3)}, + &B1{B: makeInt8Slice(3)}, + "", + "", + }, + { + "long myBlob", + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short myBlob", + &B2{B: makeUint8Slice(3)}, + &B2{B: makeUint8Slice(3)}, + "", + "", + }, + { + "long []myByte", + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short []myByte", + &B3{B: makeMyByteSlice(3)}, + &B3{B: makeMyByteSlice(3)}, + "", + "", + }, + { + "slice of blobs", + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + "", + "", + }, + { + "short ByteString", + &B5{B: ByteString(makeUint8Slice(3))}, + &B5{B: ByteString(makeUint8Slice(3))}, + "", + "", + }, + { + "short ByteString as props", + &B5{B: ByteString(makeUint8Slice(3))}, + &PropertyList{ + Property{Name: "B", Value: ByteString(makeUint8Slice(3)), NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "short ByteString into string", + &B5{B: ByteString("legacy")}, + &struct{ B string }{"legacy"}, + "", + "", + }, + { + "[]byte must be noindex", + &PropertyList{ + Property{Name: "B", Value: makeUint8Slice(3), NoIndex: false}, + }, + nil, + "cannot index a []byte valued Property", + "", + }, + { + "save tagged load props", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, F: 6, G: 7, I: 8, J: 9}, + &PropertyList{ + // A and B are renamed to a and b; A and C are noindex, I is ignored. + // Indexed properties are loaded before raw properties. Thus, the + // result is: b, b, b, D, E, a, c. + Property{Name: "b", Value: int64(21), NoIndex: false, Multiple: true}, + Property{Name: "b", Value: int64(22), NoIndex: false, Multiple: true}, + Property{Name: "b", Value: int64(23), NoIndex: false, Multiple: true}, + Property{Name: "D", Value: int64(4), NoIndex: false, Multiple: false}, + Property{Name: "E", Value: int64(5), NoIndex: false, Multiple: false}, + Property{Name: "G", Value: int64(7), NoIndex: false, Multiple: false}, + Property{Name: "a", Value: int64(1), NoIndex: true, Multiple: false}, + Property{Name: "C", Value: int64(3), NoIndex: true, Multiple: false}, + Property{Name: "F", Value: int64(6), NoIndex: true, Multiple: false}, + Property{Name: "J", Value: int64(9), NoIndex: true, Multiple: false}, + }, + "", + "", + }, + { + "save tagged load tagged", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7}, + "", + "", + }, + { + "save props load tagged", + &PropertyList{ + Property{Name: "A", Value: int64(11), NoIndex: true, Multiple: false}, + Property{Name: "a", Value: int64(12), NoIndex: true, Multiple: false}, + }, + &Tagged{A: 12}, + "", + `cannot load field "A"`, + }, + { + "invalid tagged1", + &InvalidTagged1{I: 1}, + &InvalidTagged1{}, + "struct tag has invalid property name", + "", + }, + { + "invalid tagged2", + &InvalidTagged2{I: 1, J: 2}, + &InvalidTagged2{}, + "struct tag has repeated property name", + "", + }, + { + "doubler", + &Doubler{S: "s", I: 1, B: true}, + &Doubler{S: "ss", I: 2, B: true}, + "", + "", + }, + { + "save struct load props", + &X0{S: "s", I: 1}, + &PropertyList{ + Property{Name: "S", Value: "s", NoIndex: false, Multiple: false}, + Property{Name: "I", Value: int64(1), NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "save props load struct", + &PropertyList{ + Property{Name: "S", Value: "s", NoIndex: false, Multiple: false}, + Property{Name: "I", Value: int64(1), NoIndex: false, Multiple: false}, + }, + &X0{S: "s", I: 1}, + "", + "", + }, + { + "nil-value props", + &PropertyList{ + Property{Name: "I", Value: nil, NoIndex: false, Multiple: false}, + Property{Name: "B", Value: nil, NoIndex: false, Multiple: false}, + Property{Name: "S", Value: nil, NoIndex: false, Multiple: false}, + Property{Name: "F", Value: nil, NoIndex: false, Multiple: false}, + Property{Name: "K", Value: nil, NoIndex: false, Multiple: false}, + Property{Name: "T", Value: nil, NoIndex: false, Multiple: false}, + Property{Name: "J", Value: nil, NoIndex: false, Multiple: true}, + Property{Name: "J", Value: int64(7), NoIndex: false, Multiple: true}, + Property{Name: "J", Value: nil, NoIndex: false, Multiple: true}, + }, + &struct { + I int64 + B bool + S string + F float64 + K *Key + T time.Time + J []int64 + }{ + J: []int64{0, 7, 0}, + }, + "", + "", + }, + { + "save outer load props", + &Outer{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false, Multiple: false}, + Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true}, + Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true}, + Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: false, Multiple: false}, + Property{Name: "Z", Value: true, NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "save props load outer-equivalent", + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false, Multiple: false}, + Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true}, + Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true}, + Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true}, + Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: false, Multiple: false}, + Property{Name: "Z", Value: true, NoIndex: false, Multiple: false}, + }, + &OuterEquivalent{ + A: 1, + IDotW: []int32{10, 20, 30}, + IDotX: []string{"ten", "twenty", "thirty"}, + JDotY: 3.14, + Z: true, + }, + "", + "", + }, + { + "save outer-equivalent load outer", + &OuterEquivalent{ + A: 1, + IDotW: []int32{10, 20, 30}, + IDotX: []string{"ten", "twenty", "thirty"}, + JDotY: 3.14, + Z: true, + }, + &Outer{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + "", + "", + }, + { + "dotted names save", + &Dotted{A: DottedA{B: DottedB{C: 88}}}, + &PropertyList{ + Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(88), NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "dotted names load", + &PropertyList{ + Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(99), NoIndex: false, Multiple: false}, + }, + &Dotted{A: DottedA{B: DottedB{C: 99}}}, + "", + "", + }, + { + "save struct load deriver", + &X0{S: "s", I: 1}, + &Deriver{S: "s", Derived: "derived+s"}, + "", + "", + }, + { + "save deriver load struct", + &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"}, + &X0{S: "s"}, + "", + "", + }, + { + "bad multi-prop entity", + &BadMultiPropEntity{}, + &BadMultiPropEntity{}, + "Multiple is false", + "", + }, + // Regression: CL 25062824 broke handling of appengine.BlobKey fields. + { + "appengine.BlobKey", + &BK{Key: "blah"}, + &BK{Key: "blah"}, + "", + "", + }, + { + "zero time.Time", + &T{T: time.Time{}}, + &T{T: time.Time{}}, + "", + "", + }, + { + "time.Time near Unix zero time", + &T{T: time.Unix(0, 4e3)}, + &T{T: time.Unix(0, 4e3)}, + "", + "", + }, + { + "time.Time, far in the future", + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + "", + "", + }, + { + "time.Time, very far in the past", + &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "time.Time, very far in the future", + &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "structs", + &N0{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: X0{S: "four", I: 5, i: 6}, + Ignore: "ignore", + Other: "other", + }, + &N0{ + X0: X0{S: "one", I: 2}, + Nonymous: X0{S: "four", I: 5}, + Other: "other", + }, + "", + "", + }, + { + "slice of structs", + &N1{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: []X0{ + {S: "four", I: 5, i: 6}, + {S: "seven", I: 8, i: 9}, + {S: "ten", I: 11, i: 12}, + {S: "thirteen", I: 14, i: 15}, + }, + Ignore: "ignore", + Other: "other", + }, + &N1{ + X0: X0{S: "one", I: 2}, + Nonymous: []X0{ + {S: "four", I: 5}, + {S: "seven", I: 8}, + {S: "ten", I: 11}, + {S: "thirteen", I: 14}, + }, + Other: "other", + }, + "", + "", + }, + { + "structs with slices of structs", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + "", + "", + }, + { + "save structs load props", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &PropertyList{ + Property{Name: "red.S", Value: "rouge", NoIndex: false, Multiple: false}, + Property{Name: "red.I", Value: int64(0), NoIndex: false, Multiple: false}, + Property{Name: "red.Nonymous.S", Value: "rosso0", NoIndex: false, Multiple: true}, + Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "red.Nonymous.S", Value: "rosso1", NoIndex: false, Multiple: true}, + Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "red.Other", Value: "", NoIndex: false, Multiple: false}, + Property{Name: "green.S", Value: "vert", NoIndex: false, Multiple: false}, + Property{Name: "green.I", Value: int64(0), NoIndex: false, Multiple: false}, + Property{Name: "green.Nonymous.S", Value: "verde0", NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.S", Value: "verde1", NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.S", Value: "verde2", NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "green.Other", Value: "", NoIndex: false, Multiple: false}, + Property{Name: "Blue.S", Value: "bleu", NoIndex: false, Multiple: false}, + Property{Name: "Blue.I", Value: int64(0), NoIndex: false, Multiple: false}, + Property{Name: "Blue.Nonymous.S", Value: "blu0", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blu1", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blu2", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blu3", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Other", Value: "", NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "save props load structs with ragged fields", + &PropertyList{ + Property{Name: "red.S", Value: "rot", NoIndex: false, Multiple: false}, + Property{Name: "green.Nonymous.I", Value: int64(10), NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(11), NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(12), NoIndex: false, Multiple: true}, + Property{Name: "green.Nonymous.I", Value: int64(13), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blau0", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(20), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blau1", NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.I", Value: int64(21), NoIndex: false, Multiple: true}, + Property{Name: "Blue.Nonymous.S", Value: "blau2", NoIndex: false, Multiple: true}, + }, + &N2{ + N1: N1{ + X0: X0{S: "rot"}, + }, + Green: N1{ + Nonymous: []X0{ + {I: 10}, + {I: 11}, + {I: 12}, + {I: 13}, + }, + }, + Blue: N1{ + Nonymous: []X0{ + {S: "blau0", I: 20}, + {S: "blau1", I: 21}, + {S: "blau2"}, + }, + }, + }, + "", + "", + }, + { + "save structs with noindex tags", + &struct { + A struct { + X string `datastore:",noindex"` + Y string + } `datastore:",noindex"` + B struct { + X string `datastore:",noindex"` + Y string + } + }{}, + &PropertyList{ + Property{Name: "B.Y", Value: "", NoIndex: false, Multiple: false}, + Property{Name: "A.X", Value: "", NoIndex: true, Multiple: false}, + Property{Name: "A.Y", Value: "", NoIndex: true, Multiple: false}, + Property{Name: "B.X", Value: "", NoIndex: true, Multiple: false}, + }, + "", + "", + }, + { + "embedded struct with name override", + &struct { + Inner1 `datastore:"foo"` + }{}, + &PropertyList{ + Property{Name: "foo.W", Value: int64(0), NoIndex: false, Multiple: false}, + Property{Name: "foo.X", Value: "", NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "slice of slices", + &SliceOfSlices{}, + nil, + "flattening nested structs leads to a slice of slices", + "", + }, + { + "recursive struct", + &Recursive{}, + nil, + "recursive struct", + "", + }, + { + "mutually recursive struct", + &MutuallyRecursive0{}, + nil, + "recursive struct", + "", + }, + { + "non-exported struct fields", + &struct { + i, J int64 + }{i: 1, J: 2}, + &PropertyList{ + Property{Name: "J", Value: int64(2), NoIndex: false, Multiple: false}, + }, + "", + "", + }, + { + "json.RawMessage", + &struct { + J json.RawMessage + }{ + J: json.RawMessage("rawr"), + }, + &PropertyList{ + Property{Name: "J", Value: []byte("rawr"), NoIndex: true, Multiple: false}, + }, + "", + "", + }, + { + "json.RawMessage to myBlob", + &struct { + B json.RawMessage + }{ + B: json.RawMessage("rawr"), + }, + &B2{B: myBlob("rawr")}, + "", + "", + }, +} + +// checkErr returns the empty string if either both want and err are zero, +// or if want is a non-empty substring of err's string representation. +func checkErr(want string, err error) string { + if err != nil { + got := err.Error() + if want == "" || strings.Index(got, want) == -1 { + return got + } + } else if want != "" { + return fmt.Sprintf("want error %q", want) + } + return "" +} + +func TestRoundTrip(t *testing.T) { + for _, tc := range testCases { + p, err := saveEntity(testAppID, testKey0, tc.src) + if s := checkErr(tc.putErr, err); s != "" { + t.Errorf("%s: save: %s", tc.desc, s) + continue + } + if p == nil { + continue + } + var got interface{} + if _, ok := tc.want.(*PropertyList); ok { + got = new(PropertyList) + } else { + got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + } + err = loadEntity(got, p) + if s := checkErr(tc.getErr, err); s != "" { + t.Errorf("%s: load: %s", tc.desc, s) + continue + } + equal := false + if gotT, ok := got.(*T); ok { + // Round tripping a time.Time can result in a different time.Location: Local instead of UTC. + // We therefore test equality explicitly, instead of relying on reflect.DeepEqual. + equal = gotT.T.Equal(tc.want.(*T).T) + } else { + equal = reflect.DeepEqual(got, tc.want) + } + if !equal { + t.Errorf("%s: compare: got %v want %v", tc.desc, got, tc.want) + continue + } + } +} + +func TestQueryConstruction(t *testing.T) { + tests := []struct { + q, exp *Query + err string + }{ + { + q: NewQuery("Foo"), + exp: &Query{ + kind: "Foo", + limit: -1, + }, + }, + { + // Regular filtered query with standard spacing. + q: NewQuery("Foo").Filter("foo >", 7), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterThan, + Value: 7, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with no spacing. + q: NewQuery("Foo").Filter("foo=", 6), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: equal, + Value: 6, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with funky spacing. + q: NewQuery("Foo").Filter(" foo< ", 8), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: lessThan, + Value: 8, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with multicharacter op. + q: NewQuery("Foo").Filter("foo >=", 9), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterEq, + Value: 9, + }, + }, + limit: -1, + }, + }, + { + // Query with ordering. + q: NewQuery("Foo").Order("bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: ascending, + }, + }, + limit: -1, + }, + }, + { + // Query with reverse ordering, and funky spacing. + q: NewQuery("Foo").Order(" - bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: descending, + }, + }, + limit: -1, + }, + }, + { + // Query with an empty ordering. + q: NewQuery("Foo").Order(""), + err: "empty order", + }, + { + // Query with a + ordering. + q: NewQuery("Foo").Order("+bar"), + err: "invalid order", + }, + } + for i, test := range tests { + if test.q.err != nil { + got := test.q.err.Error() + if !strings.Contains(got, test.err) { + t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err) + } + continue + } + if !reflect.DeepEqual(test.q, test.exp) { + t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp) + } + } +} + +func TestStringMeaning(t *testing.T) { + var xx [4]interface{} + xx[0] = &struct { + X string + }{"xx0"} + xx[1] = &struct { + X string `datastore:",noindex"` + }{"xx1"} + xx[2] = &struct { + X []byte + }{[]byte("xx2")} + xx[3] = &struct { + X []byte `datastore:",noindex"` + }{[]byte("xx3")} + + indexed := [4]bool{ + true, + false, + false, // A []byte is always no-index. + false, + } + want := [4]pb.Property_Meaning{ + pb.Property_NO_MEANING, + pb.Property_TEXT, + pb.Property_BLOB, + pb.Property_BLOB, + } + + for i, x := range xx { + props, err := SaveStruct(x) + if err != nil { + t.Errorf("i=%d: SaveStruct: %v", i, err) + continue + } + e, err := propertiesToProto("appID", testKey0, props) + if err != nil { + t.Errorf("i=%d: propertiesToProto: %v", i, err) + continue + } + var p *pb.Property + switch { + case indexed[i] && len(e.Property) == 1: + p = e.Property[0] + case !indexed[i] && len(e.RawProperty) == 1: + p = e.RawProperty[0] + default: + t.Errorf("i=%d: EntityProto did not have expected property slice", i) + continue + } + if got := p.GetMeaning(); got != want[i] { + t.Errorf("i=%d: meaning: got %v, want %v", i, got, want[i]) + continue + } + } +} + +func TestNamespaceResetting(t *testing.T) { + // These environment variables are necessary because *Query.Run will + // call internal.FullyQualifiedAppID which checks these variables or falls + // back to the Metadata service that is not available in tests. + environ := []struct { + key, value string + }{ + {"GAE_LONG_APP_ID", "my-app-id"}, + {"GAE_PARTITION", "1"}, + } + for _, v := range environ { + old := os.Getenv(v.key) + os.Setenv(v.key, v.value) + v.value = old + } + defer func() { // Restore old environment after the test completes. + for _, v := range environ { + if v.value == "" { + os.Unsetenv(v.key) + continue + } + os.Setenv(v.key, v.value) + } + }() + + namec := make(chan *string, 1) + c0 := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(req *pb.Query, res *pb.QueryResult) error { + namec <- req.NameSpace + return fmt.Errorf("RPC error") + }) + + // Check that wrapping c0 in a namespace twice works correctly. + c1, err := appengine.Namespace(c0, "A") + if err != nil { + t.Fatalf("appengine.Namespace: %v", err) + } + c2, err := appengine.Namespace(c1, "") // should act as the original context + if err != nil { + t.Fatalf("appengine.Namespace: %v", err) + } + + q := NewQuery("SomeKind") + + q.Run(c0) + if ns := <-namec; ns != nil { + t.Errorf(`RunQuery with c0: ns = %q, want nil`, *ns) + } + + q.Run(c1) + if ns := <-namec; ns == nil { + t.Error(`RunQuery with c1: ns = nil, want "A"`) + } else if *ns != "A" { + t.Errorf(`RunQuery with c1: ns = %q, want "A"`, *ns) + } + + q.Run(c2) + if ns := <-namec; ns != nil { + t.Errorf(`RunQuery with c2: ns = %q, want nil`, *ns) + } +} diff --git a/vendor/google.golang.org/appengine/datastore/doc.go b/vendor/google.golang.org/appengine/datastore/doc.go new file mode 100644 index 0000000..92ffe6d --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/doc.go @@ -0,0 +1,351 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package datastore provides a client for App Engine's datastore service. + + +Basic Operations + +Entities are the unit of storage and are associated with a key. A key +consists of an optional parent key, a string application ID, a string kind +(also known as an entity type), and either a StringID or an IntID. A +StringID is also known as an entity name or key name. + +It is valid to create a key with a zero StringID and a zero IntID; this is +called an incomplete key, and does not refer to any saved entity. Putting an +entity into the datastore under an incomplete key will cause a unique key +to be generated for that entity, with a non-zero IntID. + +An entity's contents are a mapping from case-sensitive field names to values. +Valid value types are: + - signed integers (int, int8, int16, int32 and int64), + - bool, + - string, + - float32 and float64, + - []byte (up to 1 megabyte in length), + - any type whose underlying type is one of the above predeclared types, + - ByteString, + - *Key, + - time.Time (stored with microsecond precision), + - appengine.BlobKey, + - appengine.GeoPoint, + - structs whose fields are all valid value types, + - slices of any of the above. + +Slices of structs are valid, as are structs that contain slices. However, if +one struct contains another, then at most one of those can be repeated. This +disqualifies recursively defined struct types: any struct T that (directly or +indirectly) contains a []T. + +The Get and Put functions load and save an entity's contents. An entity's +contents are typically represented by a struct pointer. + +Example code: + + type Entity struct { + Value string + } + + func handle(w http.ResponseWriter, r *http.Request) { + ctx := appengine.NewContext(r) + + k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil) + e := new(Entity) + if err := datastore.Get(ctx, k, e); err != nil { + http.Error(w, err.Error(), 500) + return + } + + old := e.Value + e.Value = r.URL.Path + + if _, err := datastore.Put(ctx, k, e); err != nil { + http.Error(w, err.Error(), 500) + return + } + + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value) + } + +GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and +Delete functions. They take a []*Key instead of a *Key, and may return an +appengine.MultiError when encountering partial failure. + + +Properties + +An entity's contents can be represented by a variety of types. These are +typically struct pointers, but can also be any type that implements the +PropertyLoadSaver interface. If using a struct pointer, you do not have to +explicitly implement the PropertyLoadSaver interface; the datastore will +automatically convert via reflection. If a struct pointer does implement that +interface then those methods will be used in preference to the default +behavior for struct pointers. Struct pointers are more strongly typed and are +easier to use; PropertyLoadSavers are more flexible. + +The actual types passed do not have to match between Get and Put calls or even +across different App Engine requests. It is valid to put a *PropertyList and +get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1. +Conceptually, any entity is saved as a sequence of properties, and is loaded +into the destination value on a property-by-property basis. When loading into +a struct pointer, an entity that cannot be completely represented (such as a +missing field) will result in an ErrFieldMismatch error but it is up to the +caller whether this error is fatal, recoverable or ignorable. + +By default, for struct pointers, all properties are potentially indexed, and +the property name is the same as the field name (and hence must start with an +upper case letter). Fields may have a `datastore:"name,options"` tag. The tag +name is the property name, which must be one or more valid Go identifiers +joined by ".", but may start with a lower case letter. An empty tag name means +to just use the field name. A "-" tag name means that the datastore will +ignore that field. If options is "noindex" then the field will not be indexed. +If the options is "" then the comma may be omitted. There are no other +recognized options. + +Fields (except for []byte) are indexed by default. Strings longer than 1500 +bytes cannot be indexed; fields used to store long strings should be +tagged with "noindex". Similarly, ByteStrings longer than 1500 bytes cannot be +indexed. + +Example code: + + // A and B are renamed to a and b. + // A, C and J are not indexed. + // D's tag is equivalent to having no tag at all (E). + // I is ignored entirely by the datastore. + // J has tag information for both the datastore and json packages. + type TaggedStruct struct { + A int `datastore:"a,noindex"` + B int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + } + + +Structured Properties + +If the struct pointed to contains other structs, then the nested or embedded +structs are flattened. For example, given these definitions: + + type Inner1 struct { + W int32 + X string + } + + type Inner2 struct { + Y float64 + } + + type Inner3 struct { + Z bool + } + + type Outer struct { + A int16 + I []Inner1 + J Inner2 + Inner3 + } + +then an Outer's properties would be equivalent to those of: + + type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + Z bool + } + +If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the +equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`. + +If an outer struct is tagged "noindex" then all of its implicit flattened +fields are effectively "noindex". + + +The PropertyLoadSaver Interface + +An entity's contents can also be represented by any type that implements the +PropertyLoadSaver interface. This type may be a struct pointer, but it does +not have to be. The datastore package will call Load when getting the entity's +contents, and Save when putting the entity's contents. +Possible uses include deriving non-stored fields, verifying fields, or indexing +a field only if its value is positive. + +Example code: + + type CustomPropsExample struct { + I, J int + // Sum is not stored, but should always be equal to I + J. + Sum int `datastore:"-"` + } + + func (x *CustomPropsExample) Load(ps []datastore.Property) error { + // Load I and J as usual. + if err := datastore.LoadStruct(x, ps); err != nil { + return err + } + // Derive the Sum field. + x.Sum = x.I + x.J + return nil + } + + func (x *CustomPropsExample) Save() ([]datastore.Property, error) { + // Validate the Sum field. + if x.Sum != x.I + x.J { + return errors.New("CustomPropsExample has inconsistent sum") + } + // Save I and J as usual. The code below is equivalent to calling + // "return datastore.SaveStruct(x)", but is done manually for + // demonstration purposes. + return []datastore.Property{ + { + Name: "I", + Value: int64(x.I), + }, + { + Name: "J", + Value: int64(x.J), + }, + } + } + +The *PropertyList type implements PropertyLoadSaver, and can therefore hold an +arbitrary entity's contents. + + +Queries + +Queries retrieve entities based on their properties or key's ancestry. Running +a query yields an iterator of results: either keys or (key, entity) pairs. +Queries are re-usable and it is safe to call Query.Run from concurrent +goroutines. Iterators are not safe for concurrent use. + +Queries are immutable, and are either created by calling NewQuery, or derived +from an existing query by calling a method like Filter or Order that returns a +new query value. A query is typically constructed by calling NewQuery followed +by a chain of zero or more such methods. These methods are: + - Ancestor and Filter constrain the entities returned by running a query. + - Order affects the order in which they are returned. + - Project constrains the fields returned. + - Distinct de-duplicates projected entities. + - KeysOnly makes the iterator return only keys, not (key, entity) pairs. + - Start, End, Offset and Limit define which sub-sequence of matching entities + to return. Start and End take cursors, Offset and Limit take integers. Start + and Offset affect the first result, End and Limit affect the last result. + If both Start and Offset are set, then the offset is relative to Start. + If both End and Limit are set, then the earliest constraint wins. Limit is + relative to Start+Offset, not relative to End. As a special case, a + negative limit means unlimited. + +Example code: + + type Widget struct { + Description string + Price int + } + + func handle(w http.ResponseWriter, r *http.Request) { + ctx := appengine.NewContext(r) + q := datastore.NewQuery("Widget"). + Filter("Price <", 1000). + Order("-Price") + b := new(bytes.Buffer) + for t := q.Run(ctx); ; { + var x Widget + key, err := t.Next(&x) + if err == datastore.Done { + break + } + if err != nil { + serveError(ctx, w, err) + return + } + fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x) + } + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + io.Copy(w, b) + } + + +Transactions + +RunInTransaction runs a function in a transaction. + +Example code: + + type Counter struct { + Count int + } + + func inc(ctx context.Context, key *datastore.Key) (int, error) { + var x Counter + if err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity { + return 0, err + } + x.Count++ + if _, err := datastore.Put(ctx, key, &x); err != nil { + return 0, err + } + return x.Count, nil + } + + func handle(w http.ResponseWriter, r *http.Request) { + ctx := appengine.NewContext(r) + var count int + err := datastore.RunInTransaction(ctx, func(ctx context.Context) error { + var err1 error + count, err1 = inc(ctx, datastore.NewKey(ctx, "Counter", "singleton", 0, nil)) + return err1 + }, nil) + if err != nil { + serveError(ctx, w, err) + return + } + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, "Count=%d", count) + } + + +Metadata + +The datastore package provides access to some of App Engine's datastore +metadata. This metadata includes information about the entity groups, +namespaces, entity kinds, and properties in the datastore, as well as the +property representations for each property. + +Example code: + + func handle(w http.ResponseWriter, r *http.Request) { + // Print all the kinds in the datastore, with all the indexed + // properties (and their representations) for each. + ctx := appengine.NewContext(r) + + kinds, err := datastore.Kinds(ctx) + if err != nil { + serveError(ctx, w, err) + return + } + + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + for _, kind := range kinds { + fmt.Fprintf(w, "%s:\n", kind) + props, err := datastore.KindProperties(ctx, kind) + if err != nil { + fmt.Fprintln(w, "\t(unable to retrieve properties)") + continue + } + for p, rep := range props { + fmt.Fprintf(w, "\t-%s (%s)\n", p, strings.Join(", ", rep)) + } + } + } +*/ +package datastore // import "google.golang.org/appengine/datastore" diff --git a/vendor/google.golang.org/appengine/datastore/key.go b/vendor/google.golang.org/appengine/datastore/key.go new file mode 100644 index 0000000..ac1f002 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/key.go @@ -0,0 +1,309 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "bytes" + "encoding/base64" + "encoding/gob" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/datastore" +) + +// Key represents the datastore key for a stored entity, and is immutable. +type Key struct { + kind string + stringID string + intID int64 + parent *Key + appID string + namespace string +} + +// Kind returns the key's kind (also known as entity type). +func (k *Key) Kind() string { + return k.kind +} + +// StringID returns the key's string ID (also known as an entity name or key +// name), which may be "". +func (k *Key) StringID() string { + return k.stringID +} + +// IntID returns the key's integer ID, which may be 0. +func (k *Key) IntID() int64 { + return k.intID +} + +// Parent returns the key's parent key, which may be nil. +func (k *Key) Parent() *Key { + return k.parent +} + +// AppID returns the key's application ID. +func (k *Key) AppID() string { + return k.appID +} + +// Namespace returns the key's namespace. +func (k *Key) Namespace() string { + return k.namespace +} + +// Incomplete returns whether the key does not refer to a stored entity. +// In particular, whether the key has a zero StringID and a zero IntID. +func (k *Key) Incomplete() bool { + return k.stringID == "" && k.intID == 0 +} + +// valid returns whether the key is valid. +func (k *Key) valid() bool { + if k == nil { + return false + } + for ; k != nil; k = k.parent { + if k.kind == "" || k.appID == "" { + return false + } + if k.stringID != "" && k.intID != 0 { + return false + } + if k.parent != nil { + if k.parent.Incomplete() { + return false + } + if k.parent.appID != k.appID || k.parent.namespace != k.namespace { + return false + } + } + } + return true +} + +// Equal returns whether two keys are equal. +func (k *Key) Equal(o *Key) bool { + for k != nil && o != nil { + if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace { + return false + } + k, o = k.parent, o.parent + } + return k == o +} + +// root returns the furthest ancestor of a key, which may be itself. +func (k *Key) root() *Key { + for k.parent != nil { + k = k.parent + } + return k +} + +// marshal marshals the key's string representation to the buffer. +func (k *Key) marshal(b *bytes.Buffer) { + if k.parent != nil { + k.parent.marshal(b) + } + b.WriteByte('/') + b.WriteString(k.kind) + b.WriteByte(',') + if k.stringID != "" { + b.WriteString(k.stringID) + } else { + b.WriteString(strconv.FormatInt(k.intID, 10)) + } +} + +// String returns a string representation of the key. +func (k *Key) String() string { + if k == nil { + return "" + } + b := bytes.NewBuffer(make([]byte, 0, 512)) + k.marshal(b) + return b.String() +} + +type gobKey struct { + Kind string + StringID string + IntID int64 + Parent *gobKey + AppID string + Namespace string +} + +func keyToGobKey(k *Key) *gobKey { + if k == nil { + return nil + } + return &gobKey{ + Kind: k.kind, + StringID: k.stringID, + IntID: k.intID, + Parent: keyToGobKey(k.parent), + AppID: k.appID, + Namespace: k.namespace, + } +} + +func gobKeyToKey(gk *gobKey) *Key { + if gk == nil { + return nil + } + return &Key{ + kind: gk.Kind, + stringID: gk.StringID, + intID: gk.IntID, + parent: gobKeyToKey(gk.Parent), + appID: gk.AppID, + namespace: gk.Namespace, + } +} + +func (k *Key) GobEncode() ([]byte, error) { + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (k *Key) GobDecode(buf []byte) error { + gk := new(gobKey) + if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { + return err + } + *k = *gobKeyToKey(gk) + return nil +} + +func (k *Key) MarshalJSON() ([]byte, error) { + return []byte(`"` + k.Encode() + `"`), nil +} + +func (k *Key) UnmarshalJSON(buf []byte) error { + if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { + return errors.New("datastore: bad JSON key") + } + k2, err := DecodeKey(string(buf[1 : len(buf)-1])) + if err != nil { + return err + } + *k = *k2 + return nil +} + +// Encode returns an opaque representation of the key +// suitable for use in HTML and URLs. +// This is compatible with the Python and Java runtimes. +func (k *Key) Encode() string { + ref := keyToProto("", k) + + b, err := proto.Marshal(ref) + if err != nil { + panic(err) + } + + // Trailing padding is stripped. + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// DecodeKey decodes a key from the opaque representation returned by Encode. +func DecodeKey(encoded string) (*Key, error) { + // Re-add padding. + if m := len(encoded) % 4; m != 0 { + encoded += strings.Repeat("=", 4-m) + } + + b, err := base64.URLEncoding.DecodeString(encoded) + if err != nil { + return nil, err + } + + ref := new(pb.Reference) + if err := proto.Unmarshal(b, ref); err != nil { + return nil, err + } + + return protoToKey(ref) +} + +// NewIncompleteKey creates a new incomplete key. +// kind cannot be empty. +func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key { + return NewKey(c, kind, "", 0, parent) +} + +// NewKey creates a new key. +// kind cannot be empty. +// Either one or both of stringID and intID must be zero. If both are zero, +// the key returned is incomplete. +// parent must either be a complete key or nil. +func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key { + // If there's a parent key, use its namespace. + // Otherwise, use any namespace attached to the context. + var namespace string + if parent != nil { + namespace = parent.namespace + } else { + namespace = internal.NamespaceFromContext(c) + } + + return &Key{ + kind: kind, + stringID: stringID, + intID: intID, + parent: parent, + appID: internal.FullyQualifiedAppID(c), + namespace: namespace, + } +} + +// AllocateIDs returns a range of n integer IDs with the given kind and parent +// combination. kind cannot be empty; parent may be nil. The IDs in the range +// returned will not be used by the datastore's automatic ID sequence generator +// and may be used with NewKey without conflict. +// +// The range is inclusive at the low end and exclusive at the high end. In +// other words, valid intIDs x satisfy low <= x && x < high. +// +// If no error is returned, low + n == high. +func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) { + if kind == "" { + return 0, 0, errors.New("datastore: AllocateIDs given an empty kind") + } + if n < 0 { + return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n) + } + if n == 0 { + return 0, 0, nil + } + req := &pb.AllocateIdsRequest{ + ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)), + Size: proto.Int64(int64(n)), + } + res := &pb.AllocateIdsResponse{} + if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil { + return 0, 0, err + } + // The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops) + // is inclusive at the low end and exclusive at the high end, so we add 1. + low = res.GetStart() + high = res.GetEnd() + 1 + if low+int64(n) != high { + return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n) + } + return low, high, nil +} diff --git a/vendor/google.golang.org/appengine/datastore/key_test.go b/vendor/google.golang.org/appengine/datastore/key_test.go new file mode 100644 index 0000000..1fb3e97 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/key_test.go @@ -0,0 +1,204 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "testing" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +func TestKeyEncoding(t *testing.T) { + testCases := []struct { + desc string + key *Key + exp string + }{ + { + desc: "A simple key with an int ID", + key: &Key{ + kind: "Person", + intID: 1, + appID: "glibrary", + }, + exp: "aghnbGlicmFyeXIMCxIGUGVyc29uGAEM", + }, + { + desc: "A simple key with a string ID", + key: &Key{ + kind: "Graph", + stringID: "graph:7-day-active", + appID: "glibrary", + }, + exp: "aghnbGlicmFyeXIdCxIFR3JhcGgiEmdyYXBoOjctZGF5LWFjdGl2ZQw", + }, + { + desc: "A key with a parent", + key: &Key{ + kind: "WordIndex", + intID: 1033, + parent: &Key{ + kind: "WordIndex", + intID: 1020032, + appID: "glibrary", + }, + appID: "glibrary", + }, + exp: "aghnbGlicmFyeXIhCxIJV29yZEluZGV4GIChPgwLEglXb3JkSW5kZXgYiQgM", + }, + } + for _, tc := range testCases { + enc := tc.key.Encode() + if enc != tc.exp { + t.Errorf("%s: got %q, want %q", tc.desc, enc, tc.exp) + } + + key, err := DecodeKey(tc.exp) + if err != nil { + t.Errorf("%s: failed decoding key: %v", tc.desc, err) + continue + } + if !key.Equal(tc.key) { + t.Errorf("%s: decoded key %v, want %v", tc.desc, key, tc.key) + } + } +} + +func TestKeyGob(t *testing.T) { + k := &Key{ + kind: "Gopher", + intID: 3, + parent: &Key{ + kind: "Mom", + stringID: "narwhal", + appID: "gopher-con", + }, + appID: "gopher-con", + } + + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(k); err != nil { + t.Fatalf("gob encode failed: %v", err) + } + + k2 := new(Key) + if err := gob.NewDecoder(buf).Decode(k2); err != nil { + t.Fatalf("gob decode failed: %v", err) + } + if !k2.Equal(k) { + t.Errorf("gob round trip of %v produced %v", k, k2) + } +} + +func TestNilKeyGob(t *testing.T) { + type S struct { + Key *Key + } + s1 := new(S) + + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(s1); err != nil { + t.Fatalf("gob encode failed: %v", err) + } + + s2 := new(S) + if err := gob.NewDecoder(buf).Decode(s2); err != nil { + t.Fatalf("gob decode failed: %v", err) + } + if s2.Key != nil { + t.Errorf("gob round trip of nil key produced %v", s2.Key) + } +} + +func TestKeyJSON(t *testing.T) { + k := &Key{ + kind: "Gopher", + intID: 2, + parent: &Key{ + kind: "Mom", + stringID: "narwhal", + appID: "gopher-con", + }, + appID: "gopher-con", + } + exp := `"` + k.Encode() + `"` + + buf, err := json.Marshal(k) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + if s := string(buf); s != exp { + t.Errorf("JSON encoding of key %v: got %q, want %q", k, s, exp) + } + + k2 := new(Key) + if err := json.Unmarshal(buf, k2); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if !k2.Equal(k) { + t.Errorf("JSON round trip of %v produced %v", k, k2) + } +} + +func TestNilKeyJSON(t *testing.T) { + type S struct { + Key *Key + } + s1 := new(S) + + buf, err := json.Marshal(s1) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + + s2 := new(S) + if err := json.Unmarshal(buf, s2); err != nil { + t.Fatalf("json.Unmarshal failed: %v", err) + } + if s2.Key != nil { + t.Errorf("JSON round trip of nil key produced %v", s2.Key) + } +} + +func TestIncompleteKeyWithParent(t *testing.T) { + c := internal.WithAppIDOverride(context.Background(), "s~some-app") + + // fadduh is a complete key. + fadduh := NewKey(c, "Person", "", 1, nil) + if fadduh.Incomplete() { + t.Fatalf("fadduh is incomplete") + } + + // robert is an incomplete key with fadduh as a parent. + robert := NewIncompleteKey(c, "Person", fadduh) + if !robert.Incomplete() { + t.Fatalf("robert is complete") + } + + // Both should be valid keys. + if !fadduh.valid() { + t.Errorf("fadduh is invalid: %v", fadduh) + } + if !robert.valid() { + t.Errorf("robert is invalid: %v", robert) + } +} + +func TestNamespace(t *testing.T) { + key := &Key{ + kind: "Person", + intID: 1, + appID: "s~some-app", + namespace: "mynamespace", + } + if g, w := key.Namespace(), "mynamespace"; g != w { + t.Errorf("key.Namespace() = %q, want %q", g, w) + } +} diff --git a/vendor/google.golang.org/appengine/datastore/load.go b/vendor/google.golang.org/appengine/datastore/load.go new file mode 100644 index 0000000..3f3c80c --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/load.go @@ -0,0 +1,334 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "fmt" + "reflect" + "time" + + "google.golang.org/appengine" + pb "google.golang.org/appengine/internal/datastore" +) + +var ( + typeOfBlobKey = reflect.TypeOf(appengine.BlobKey("")) + typeOfByteSlice = reflect.TypeOf([]byte(nil)) + typeOfByteString = reflect.TypeOf(ByteString(nil)) + typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{}) + typeOfTime = reflect.TypeOf(time.Time{}) +) + +// typeMismatchReason returns a string explaining why the property p could not +// be stored in an entity field of type v.Type(). +func typeMismatchReason(p Property, v reflect.Value) string { + entityType := "empty" + switch p.Value.(type) { + case int64: + entityType = "int" + case bool: + entityType = "bool" + case string: + entityType = "string" + case float64: + entityType = "float" + case *Key: + entityType = "*datastore.Key" + case time.Time: + entityType = "time.Time" + case appengine.BlobKey: + entityType = "appengine.BlobKey" + case appengine.GeoPoint: + entityType = "appengine.GeoPoint" + case ByteString: + entityType = "datastore.ByteString" + case []byte: + entityType = "[]byte" + } + return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) +} + +type propertyLoader struct { + // m holds the number of times a substruct field like "Foo.Bar.Baz" has + // been seen so far. The map is constructed lazily. + m map[string]int +} + +func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string { + var v reflect.Value + // Traverse a struct's struct-typed fields. + for name := p.Name; ; { + decoder, ok := codec.byName[name] + if !ok { + return "no such struct field" + } + v = structValue.Field(decoder.index) + if !v.IsValid() { + return "no such struct field" + } + if !v.CanSet() { + return "cannot set struct field" + } + + if decoder.substructCodec == nil { + break + } + + if v.Kind() == reflect.Slice { + if l.m == nil { + l.m = make(map[string]int) + } + index := l.m[p.Name] + l.m[p.Name] = index + 1 + for v.Len() <= index { + v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) + } + structValue = v.Index(index) + requireSlice = false + } else { + structValue = v + } + // Strip the "I." from "I.X". + name = name[len(codec.byIndex[decoder.index].name):] + codec = decoder.substructCodec + } + + var slice reflect.Value + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + slice = v + v = reflect.New(v.Type().Elem()).Elem() + } else if requireSlice { + return "multiple-valued property requires a slice field type" + } + + // Convert indexValues to a Go value with a meaning derived from the + // destination type. + pValue := p.Value + if iv, ok := pValue.(indexValue); ok { + meaning := pb.Property_NO_MEANING + switch v.Type() { + case typeOfBlobKey: + meaning = pb.Property_BLOBKEY + case typeOfByteSlice: + meaning = pb.Property_BLOB + case typeOfByteString: + meaning = pb.Property_BYTESTRING + case typeOfGeoPoint: + meaning = pb.Property_GEORSS_POINT + case typeOfTime: + meaning = pb.Property_GD_WHEN + } + var err error + pValue, err = propValue(iv.value, meaning) + if err != nil { + return err.Error() + } + } + + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x, ok := pValue.(int64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowInt(x) { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + } + v.SetInt(x) + case reflect.Bool: + x, ok := pValue.(bool) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.SetBool(x) + case reflect.String: + switch x := pValue.(type) { + case appengine.BlobKey: + v.SetString(string(x)) + case ByteString: + v.SetString(string(x)) + case string: + v.SetString(x) + default: + if pValue != nil { + return typeMismatchReason(p, v) + } + } + case reflect.Float32, reflect.Float64: + x, ok := pValue.(float64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowFloat(x) { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + } + v.SetFloat(x) + case reflect.Ptr: + x, ok := pValue.(*Key) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if _, ok := v.Interface().(*Key); !ok { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + case reflect.Struct: + switch v.Type() { + case typeOfTime: + x, ok := pValue.(time.Time) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + case typeOfGeoPoint: + x, ok := pValue.(appengine.GeoPoint) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + default: + return typeMismatchReason(p, v) + } + case reflect.Slice: + x, ok := pValue.([]byte) + if !ok { + if y, yok := pValue.(ByteString); yok { + x, ok = []byte(y), true + } + } + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.Type().Elem().Kind() != reflect.Uint8 { + return typeMismatchReason(p, v) + } + v.SetBytes(x) + default: + return typeMismatchReason(p, v) + } + if slice.IsValid() { + slice.Set(reflect.Append(slice, v)) + } + return "" +} + +// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer. +func loadEntity(dst interface{}, src *pb.EntityProto) (err error) { + props, err := protoToProperties(src) + if err != nil { + return err + } + if e, ok := dst.(PropertyLoadSaver); ok { + return e.Load(props) + } + return LoadStruct(dst, props) +} + +func (s structPLS) Load(props []Property) error { + var fieldName, reason string + var l propertyLoader + for _, p := range props { + if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" { + // We don't return early, as we try to load as many properties as possible. + // It is valid to load an entity into a struct that cannot fully represent it. + // That case returns an error, but the caller is free to ignore it. + fieldName, reason = p.Name, errStr + } + } + if reason != "" { + return &ErrFieldMismatch{ + StructType: s.v.Type(), + FieldName: fieldName, + Reason: reason, + } + } + return nil +} + +func protoToProperties(src *pb.EntityProto) ([]Property, error) { + props, rawProps := src.Property, src.RawProperty + out := make([]Property, 0, len(props)+len(rawProps)) + for { + var ( + x *pb.Property + noIndex bool + ) + if len(props) > 0 { + x, props = props[0], props[1:] + } else if len(rawProps) > 0 { + x, rawProps = rawProps[0], rawProps[1:] + noIndex = true + } else { + break + } + + var value interface{} + if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE { + value = indexValue{x.Value} + } else { + var err error + value, err = propValue(x.Value, x.GetMeaning()) + if err != nil { + return nil, err + } + } + out = append(out, Property{ + Name: x.GetName(), + Value: value, + NoIndex: noIndex, + Multiple: x.GetMultiple(), + }) + } + return out, nil +} + +// propValue returns a Go value that combines the raw PropertyValue with a +// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time. +func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) { + switch { + case v.Int64Value != nil: + if m == pb.Property_GD_WHEN { + return fromUnixMicro(*v.Int64Value), nil + } else { + return *v.Int64Value, nil + } + case v.BooleanValue != nil: + return *v.BooleanValue, nil + case v.StringValue != nil: + if m == pb.Property_BLOB { + return []byte(*v.StringValue), nil + } else if m == pb.Property_BLOBKEY { + return appengine.BlobKey(*v.StringValue), nil + } else if m == pb.Property_BYTESTRING { + return ByteString(*v.StringValue), nil + } else { + return *v.StringValue, nil + } + case v.DoubleValue != nil: + return *v.DoubleValue, nil + case v.Referencevalue != nil: + key, err := referenceValueToKey(v.Referencevalue) + if err != nil { + return nil, err + } + return key, nil + case v.Pointvalue != nil: + // NOTE: Strangely, latitude maps to X, longitude to Y. + return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil + } + return nil, nil +} + +// indexValue is a Property value that is created when entities are loaded from +// an index, such as from a projection query. +// +// Such Property values do not contain all of the metadata required to be +// faithfully represented as a Go value, and are instead represented as an +// opaque indexValue. Load the properties into a concrete struct type (e.g. by +// passing a struct pointer to Iterator.Next) to reconstruct actual Go values +// of type int, string, time.Time, etc. +type indexValue struct { + value *pb.PropertyValue +} diff --git a/vendor/google.golang.org/appengine/datastore/metadata.go b/vendor/google.golang.org/appengine/datastore/metadata.go new file mode 100644 index 0000000..67995f9 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/metadata.go @@ -0,0 +1,78 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import "golang.org/x/net/context" + +// Datastore kinds for the metadata entities. +const ( + namespaceKind = "__namespace__" + kindKind = "__kind__" + propertyKind = "__property__" +) + +// Namespaces returns all the datastore namespaces. +func Namespaces(ctx context.Context) ([]string, error) { + // TODO(djd): Support range queries. + q := NewQuery(namespaceKind).KeysOnly() + keys, err := q.GetAll(ctx, nil) + if err != nil { + return nil, err + } + // The empty namespace key uses a numeric ID (==1), but luckily + // the string ID defaults to "" for numeric IDs anyway. + return keyNames(keys), nil +} + +// Kinds returns the names of all the kinds in the current namespace. +func Kinds(ctx context.Context) ([]string, error) { + // TODO(djd): Support range queries. + q := NewQuery(kindKind).KeysOnly() + keys, err := q.GetAll(ctx, nil) + if err != nil { + return nil, err + } + return keyNames(keys), nil +} + +// keyNames returns a slice of the provided keys' names (string IDs). +func keyNames(keys []*Key) []string { + n := make([]string, 0, len(keys)) + for _, k := range keys { + n = append(n, k.StringID()) + } + return n +} + +// KindProperties returns all the indexed properties for the given kind. +// The properties are returned as a map of property names to a slice of the +// representation types. The representation types for the supported Go property +// types are: +// "INT64": signed integers and time.Time +// "DOUBLE": float32 and float64 +// "BOOLEAN": bool +// "STRING": string, []byte and ByteString +// "POINT": appengine.GeoPoint +// "REFERENCE": *Key +// "USER": (not used in the Go runtime) +func KindProperties(ctx context.Context, kind string) (map[string][]string, error) { + // TODO(djd): Support range queries. + kindKey := NewKey(ctx, kindKind, kind, 0, nil) + q := NewQuery(propertyKind).Ancestor(kindKey) + + propMap := map[string][]string{} + props := []struct { + Repr []string `datastore:property_representation` + }{} + + keys, err := q.GetAll(ctx, &props) + if err != nil { + return nil, err + } + for i, p := range props { + propMap[keys[i].StringID()] = p.Repr + } + return propMap, nil +} diff --git a/vendor/google.golang.org/appengine/datastore/prop.go b/vendor/google.golang.org/appengine/datastore/prop.go new file mode 100644 index 0000000..1f50ac0 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/prop.go @@ -0,0 +1,296 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "fmt" + "reflect" + "strings" + "sync" + "unicode" +) + +// Entities with more than this many indexed properties will not be saved. +const maxIndexedProperties = 20000 + +// []byte fields more than 1 megabyte long will not be loaded or saved. +const maxBlobLen = 1 << 20 + +// Property is a name/value pair plus some metadata. A datastore entity's +// contents are loaded and saved as a sequence of Properties. An entity can +// have multiple Properties with the same name, provided that p.Multiple is +// true on all of that entity's Properties with that name. +type Property struct { + // Name is the property name. + Name string + // Value is the property value. The valid types are: + // - int64 + // - bool + // - string + // - float64 + // - ByteString + // - *Key + // - time.Time + // - appengine.BlobKey + // - appengine.GeoPoint + // - []byte (up to 1 megabyte in length) + // This set is smaller than the set of valid struct field types that the + // datastore can load and save. A Property Value cannot be a slice (apart + // from []byte); use multiple Properties instead. Also, a Value's type + // must be explicitly on the list above; it is not sufficient for the + // underlying type to be on that list. For example, a Value of "type + // myInt64 int64" is invalid. Smaller-width integers and floats are also + // invalid. Again, this is more restrictive than the set of valid struct + // field types. + // + // A Value will have an opaque type when loading entities from an index, + // such as via a projection query. Load entities into a struct instead + // of a PropertyLoadSaver when using a projection query. + // + // A Value may also be the nil interface value; this is equivalent to + // Python's None but not directly representable by a Go struct. Loading + // a nil-valued property into a struct will set that field to the zero + // value. + Value interface{} + // NoIndex is whether the datastore cannot index this property. + NoIndex bool + // Multiple is whether the entity can have multiple properties with + // the same name. Even if a particular instance only has one property with + // a certain name, Multiple should be true if a struct would best represent + // it as a field of type []T instead of type T. + Multiple bool +} + +// ByteString is a short byte slice (up to 1500 bytes) that can be indexed. +type ByteString []byte + +// PropertyLoadSaver can be converted from and to a slice of Properties. +type PropertyLoadSaver interface { + Load([]Property) error + Save() ([]Property, error) +} + +// PropertyList converts a []Property to implement PropertyLoadSaver. +type PropertyList []Property + +var ( + typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() + typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) +) + +// Load loads all of the provided properties into l. +// It does not first reset *l to an empty slice. +func (l *PropertyList) Load(p []Property) error { + *l = append(*l, p...) + return nil +} + +// Save saves all of l's properties as a slice or Properties. +func (l *PropertyList) Save() ([]Property, error) { + return *l, nil +} + +// validPropertyName returns whether name consists of one or more valid Go +// identifiers joined by ".". +func validPropertyName(name string) bool { + if name == "" { + return false + } + for _, s := range strings.Split(name, ".") { + if s == "" { + return false + } + first := true + for _, c := range s { + if first { + first = false + if c != '_' && !unicode.IsLetter(c) { + return false + } + } else { + if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + } + return true +} + +// structTag is the parsed `datastore:"name,options"` tag of a struct field. +// If a field has no tag, or the tag has an empty name, then the structTag's +// name is just the field name. A "-" name means that the datastore ignores +// that field. +type structTag struct { + name string + noIndex bool +} + +// structCodec describes how to convert a struct to and from a sequence of +// properties. +type structCodec struct { + // byIndex gives the structTag for the i'th field. + byIndex []structTag + // byName gives the field codec for the structTag with the given name. + byName map[string]fieldCodec + // hasSlice is whether a struct or any of its nested or embedded structs + // has a slice-typed field (other than []byte). + hasSlice bool + // complete is whether the structCodec is complete. An incomplete + // structCodec may be encountered when walking a recursive struct. + complete bool +} + +// fieldCodec is a struct field's index and, if that struct field's type is +// itself a struct, that substruct's structCodec. +type fieldCodec struct { + index int + substructCodec *structCodec +} + +// structCodecs collects the structCodecs that have already been calculated. +var ( + structCodecsMutex sync.Mutex + structCodecs = make(map[reflect.Type]*structCodec) +) + +// getStructCodec returns the structCodec for the given struct type. +func getStructCodec(t reflect.Type) (*structCodec, error) { + structCodecsMutex.Lock() + defer structCodecsMutex.Unlock() + return getStructCodecLocked(t) +} + +// getStructCodecLocked implements getStructCodec. The structCodecsMutex must +// be held when calling this function. +func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) { + c, ok := structCodecs[t] + if ok { + return c, nil + } + c = &structCodec{ + byIndex: make([]structTag, t.NumField()), + byName: make(map[string]fieldCodec), + } + + // Add c to the structCodecs map before we are sure it is good. If t is + // a recursive type, it needs to find the incomplete entry for itself in + // the map. + structCodecs[t] = c + defer func() { + if retErr != nil { + delete(structCodecs, t) + } + }() + + for i := range c.byIndex { + f := t.Field(i) + tags := strings.Split(f.Tag.Get("datastore"), ",") + name := tags[0] + opts := make(map[string]bool) + for _, t := range tags[1:] { + opts[t] = true + } + if name == "" { + if !f.Anonymous { + name = f.Name + } + } else if name == "-" { + c.byIndex[i] = structTag{name: name} + continue + } else if !validPropertyName(name) { + return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name) + } + + substructType, fIsSlice := reflect.Type(nil), false + switch f.Type.Kind() { + case reflect.Struct: + substructType = f.Type + case reflect.Slice: + if f.Type.Elem().Kind() == reflect.Struct { + substructType = f.Type.Elem() + } + fIsSlice = f.Type != typeOfByteSlice + c.hasSlice = c.hasSlice || fIsSlice + } + + if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint { + if name != "" { + name = name + "." + } + sub, err := getStructCodecLocked(substructType) + if err != nil { + return nil, err + } + if !sub.complete { + return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name) + } + if fIsSlice && sub.hasSlice { + return nil, fmt.Errorf( + "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name) + } + c.hasSlice = c.hasSlice || sub.hasSlice + for relName := range sub.byName { + absName := name + relName + if _, ok := c.byName[absName]; ok { + return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", absName) + } + c.byName[absName] = fieldCodec{index: i, substructCodec: sub} + } + } else { + if _, ok := c.byName[name]; ok { + return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name) + } + c.byName[name] = fieldCodec{index: i} + } + + c.byIndex[i] = structTag{ + name: name, + noIndex: opts["noindex"], + } + } + c.complete = true + return c, nil +} + +// structPLS adapts a struct to be a PropertyLoadSaver. +type structPLS struct { + v reflect.Value + codec *structCodec +} + +// newStructPLS returns a PropertyLoadSaver for the struct pointer p. +func newStructPLS(p interface{}) (PropertyLoadSaver, error) { + v := reflect.ValueOf(p) + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return nil, ErrInvalidEntityType + } + v = v.Elem() + codec, err := getStructCodec(v.Type()) + if err != nil { + return nil, err + } + return structPLS{v, codec}, nil +} + +// LoadStruct loads the properties from p to dst. +// dst must be a struct pointer. +func LoadStruct(dst interface{}, p []Property) error { + x, err := newStructPLS(dst) + if err != nil { + return err + } + return x.Load(p) +} + +// SaveStruct returns the properties from src as a slice of Properties. +// src must be a struct pointer. +func SaveStruct(src interface{}) ([]Property, error) { + x, err := newStructPLS(src) + if err != nil { + return nil, err + } + return x.Save() +} diff --git a/vendor/google.golang.org/appengine/datastore/prop_test.go b/vendor/google.golang.org/appengine/datastore/prop_test.go new file mode 100644 index 0000000..6889521 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/prop_test.go @@ -0,0 +1,604 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "reflect" + "testing" + "time" + + "google.golang.org/appengine" +) + +func TestValidPropertyName(t *testing.T) { + testCases := []struct { + name string + want bool + }{ + // Invalid names. + {"", false}, + {"'", false}, + {".", false}, + {"..", false}, + {".foo", false}, + {"0", false}, + {"00", false}, + {"X.X.4.X.X", false}, + {"\n", false}, + {"\x00", false}, + {"abc\xffz", false}, + {"foo.", false}, + {"foo..", false}, + {"foo..bar", false}, + {"☃", false}, + {`"`, false}, + // Valid names. + {"AB", true}, + {"Abc", true}, + {"X.X.X.X.X", true}, + {"_", true}, + {"_0", true}, + {"a", true}, + {"a_B", true}, + {"f00", true}, + {"f0o", true}, + {"fo0", true}, + {"foo", true}, + {"foo.bar", true}, + {"foo.bar.baz", true}, + {"世界", true}, + } + for _, tc := range testCases { + got := validPropertyName(tc.name) + if got != tc.want { + t.Errorf("%q: got %v, want %v", tc.name, got, tc.want) + } + } +} + +func TestStructCodec(t *testing.T) { + type oStruct struct { + O int + } + type pStruct struct { + P int + Q int + } + type rStruct struct { + R int + S pStruct + T oStruct + oStruct + } + type uStruct struct { + U int + v int + } + type vStruct struct { + V string `datastore:",noindex"` + } + oStructCodec := &structCodec{ + byIndex: []structTag{ + {name: "O"}, + }, + byName: map[string]fieldCodec{ + "O": {index: 0}, + }, + complete: true, + } + pStructCodec := &structCodec{ + byIndex: []structTag{ + {name: "P"}, + {name: "Q"}, + }, + byName: map[string]fieldCodec{ + "P": {index: 0}, + "Q": {index: 1}, + }, + complete: true, + } + rStructCodec := &structCodec{ + byIndex: []structTag{ + {name: "R"}, + {name: "S."}, + {name: "T."}, + {name: ""}, + }, + byName: map[string]fieldCodec{ + "R": {index: 0}, + "S.P": {index: 1, substructCodec: pStructCodec}, + "S.Q": {index: 1, substructCodec: pStructCodec}, + "T.O": {index: 2, substructCodec: oStructCodec}, + "O": {index: 3, substructCodec: oStructCodec}, + }, + complete: true, + } + uStructCodec := &structCodec{ + byIndex: []structTag{ + {name: "U"}, + {name: "v"}, + }, + byName: map[string]fieldCodec{ + "U": {index: 0}, + "v": {index: 1}, + }, + complete: true, + } + vStructCodec := &structCodec{ + byIndex: []structTag{ + {name: "V", noIndex: true}, + }, + byName: map[string]fieldCodec{ + "V": {index: 0}, + }, + complete: true, + } + + testCases := []struct { + desc string + structValue interface{} + want *structCodec + }{ + { + "oStruct", + oStruct{}, + oStructCodec, + }, + { + "pStruct", + pStruct{}, + pStructCodec, + }, + { + "rStruct", + rStruct{}, + rStructCodec, + }, + { + "uStruct", + uStruct{}, + uStructCodec, + }, + { + "non-basic fields", + struct { + B appengine.BlobKey + K *Key + T time.Time + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "B"}, + {name: "K"}, + {name: "T"}, + }, + byName: map[string]fieldCodec{ + "B": {index: 0}, + "K": {index: 1}, + "T": {index: 2}, + }, + complete: true, + }, + }, + { + "struct tags with ignored embed", + struct { + A int `datastore:"a,noindex"` + B int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + oStruct `datastore:"-"` + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "a", noIndex: true}, + {name: "b", noIndex: false}, + {name: "C", noIndex: true}, + {name: "D", noIndex: false}, + {name: "E", noIndex: false}, + {name: "-", noIndex: false}, + {name: "J", noIndex: true}, + {name: "-", noIndex: false}, + }, + byName: map[string]fieldCodec{ + "a": {index: 0}, + "b": {index: 1}, + "C": {index: 2}, + "D": {index: 3}, + "E": {index: 4}, + "J": {index: 6}, + }, + complete: true, + }, + }, + { + "unexported fields", + struct { + A int + b int + C int `datastore:"x"` + d int `datastore:"Y"` + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "A"}, + {name: "b"}, + {name: "x"}, + {name: "Y"}, + }, + byName: map[string]fieldCodec{ + "A": {index: 0}, + "b": {index: 1}, + "x": {index: 2}, + "Y": {index: 3}, + }, + complete: true, + }, + }, + { + "nested and embedded structs", + struct { + A int + B int + CC oStruct + DDD rStruct + oStruct + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "A"}, + {name: "B"}, + {name: "CC."}, + {name: "DDD."}, + {name: ""}, + }, + byName: map[string]fieldCodec{ + "A": {index: 0}, + "B": {index: 1}, + "CC.O": {index: 2, substructCodec: oStructCodec}, + "DDD.R": {index: 3, substructCodec: rStructCodec}, + "DDD.S.P": {index: 3, substructCodec: rStructCodec}, + "DDD.S.Q": {index: 3, substructCodec: rStructCodec}, + "DDD.T.O": {index: 3, substructCodec: rStructCodec}, + "DDD.O": {index: 3, substructCodec: rStructCodec}, + "O": {index: 4, substructCodec: oStructCodec}, + }, + complete: true, + }, + }, + { + "struct tags with nested and embedded structs", + struct { + A int `datastore:"-"` + B int `datastore:"w"` + C oStruct `datastore:"xx"` + D rStruct `datastore:"y"` + oStruct `datastore:"z"` + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "-"}, + {name: "w"}, + {name: "xx."}, + {name: "y."}, + {name: "z."}, + }, + byName: map[string]fieldCodec{ + "w": {index: 1}, + "xx.O": {index: 2, substructCodec: oStructCodec}, + "y.R": {index: 3, substructCodec: rStructCodec}, + "y.S.P": {index: 3, substructCodec: rStructCodec}, + "y.S.Q": {index: 3, substructCodec: rStructCodec}, + "y.T.O": {index: 3, substructCodec: rStructCodec}, + "y.O": {index: 3, substructCodec: rStructCodec}, + "z.O": {index: 4, substructCodec: oStructCodec}, + }, + complete: true, + }, + }, + { + "unexported nested and embedded structs", + struct { + a int + B int + c uStruct + D uStruct + uStruct + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "a"}, + {name: "B"}, + {name: "c."}, + {name: "D."}, + {name: ""}, + }, + byName: map[string]fieldCodec{ + "a": {index: 0}, + "B": {index: 1}, + "c.U": {index: 2, substructCodec: uStructCodec}, + "c.v": {index: 2, substructCodec: uStructCodec}, + "D.U": {index: 3, substructCodec: uStructCodec}, + "D.v": {index: 3, substructCodec: uStructCodec}, + "U": {index: 4, substructCodec: uStructCodec}, + "v": {index: 4, substructCodec: uStructCodec}, + }, + complete: true, + }, + }, + { + "noindex nested struct", + struct { + A oStruct `datastore:",noindex"` + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "A.", noIndex: true}, + }, + byName: map[string]fieldCodec{ + "A.O": {index: 0, substructCodec: oStructCodec}, + }, + complete: true, + }, + }, + { + "noindex slice", + struct { + A []string `datastore:",noindex"` + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "A", noIndex: true}, + }, + byName: map[string]fieldCodec{ + "A": {index: 0}, + }, + hasSlice: true, + complete: true, + }, + }, + { + "noindex embedded struct slice", + struct { + // vStruct has a single field, V, also with noindex. + A []vStruct `datastore:",noindex"` + }{}, + &structCodec{ + byIndex: []structTag{ + {name: "A.", noIndex: true}, + }, + byName: map[string]fieldCodec{ + "A.V": {index: 0, substructCodec: vStructCodec}, + }, + hasSlice: true, + complete: true, + }, + }, + } + + for _, tc := range testCases { + got, err := getStructCodec(reflect.TypeOf(tc.structValue)) + if err != nil { + t.Errorf("%s: getStructCodec: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s\ngot %+v\nwant %+v\n", tc.desc, got, tc.want) + continue + } + } +} + +func TestRepeatedPropertyName(t *testing.T) { + good := []interface{}{ + struct { + A int `datastore:"-"` + }{}, + struct { + A int `datastore:"b"` + B int + }{}, + struct { + A int + B int `datastore:"B"` + }{}, + struct { + A int `datastore:"B"` + B int `datastore:"-"` + }{}, + struct { + A int `datastore:"-"` + B int `datastore:"A"` + }{}, + struct { + A int `datastore:"B"` + B int `datastore:"A"` + }{}, + struct { + A int `datastore:"B"` + B int `datastore:"C"` + C int `datastore:"A"` + }{}, + struct { + A int `datastore:"B"` + B int `datastore:"C"` + C int `datastore:"D"` + }{}, + } + bad := []interface{}{ + struct { + A int `datastore:"B"` + B int + }{}, + struct { + A int + B int `datastore:"A"` + }{}, + struct { + A int `datastore:"C"` + B int `datastore:"C"` + }{}, + struct { + A int `datastore:"B"` + B int `datastore:"C"` + C int `datastore:"B"` + }{}, + } + testGetStructCodec(t, good, bad) +} + +func TestFlatteningNestedStructs(t *testing.T) { + type deepGood struct { + A struct { + B []struct { + C struct { + D int + } + } + } + } + type deepBad struct { + A struct { + B []struct { + C struct { + D []int + } + } + } + } + type iSay struct { + Tomato int + } + type youSay struct { + Tomato int + } + type tweedledee struct { + Dee int `datastore:"D"` + } + type tweedledum struct { + Dum int `datastore:"D"` + } + + good := []interface{}{ + struct { + X []struct { + Y string + } + }{}, + struct { + X []struct { + Y []byte + } + }{}, + struct { + P []int + X struct { + Y []int + } + }{}, + struct { + X struct { + Y []int + } + Q []int + }{}, + struct { + P []int + X struct { + Y []int + } + Q []int + }{}, + struct { + deepGood + }{}, + struct { + DG deepGood + }{}, + struct { + Foo struct { + Z int `datastore:"X"` + } `datastore:"A"` + Bar struct { + Z int `datastore:"Y"` + } `datastore:"A"` + }{}, + } + bad := []interface{}{ + struct { + X []struct { + Y []string + } + }{}, + struct { + X []struct { + Y []int + } + }{}, + struct { + deepBad + }{}, + struct { + DB deepBad + }{}, + struct { + iSay + youSay + }{}, + struct { + tweedledee + tweedledum + }{}, + struct { + Foo struct { + Z int + } `datastore:"A"` + Bar struct { + Z int + } `datastore:"A"` + }{}, + } + testGetStructCodec(t, good, bad) +} + +func testGetStructCodec(t *testing.T, good []interface{}, bad []interface{}) { + for _, x := range good { + if _, err := getStructCodec(reflect.TypeOf(x)); err != nil { + t.Errorf("type %T: got non-nil error (%s), want nil", x, err) + } + } + for _, x := range bad { + if _, err := getStructCodec(reflect.TypeOf(x)); err == nil { + t.Errorf("type %T: got nil error, want non-nil", x) + } + } +} + +func TestNilKeyIsStored(t *testing.T) { + x := struct { + K *Key + I int + }{} + p := PropertyList{} + // Save x as properties. + p1, _ := SaveStruct(&x) + p.Load(p1) + // Set x's fields to non-zero. + x.K = &Key{} + x.I = 2 + // Load x from properties. + p2, _ := p.Save() + LoadStruct(&x, p2) + // Check that x's fields were set to zero. + if x.K != nil { + t.Errorf("K field was not zero") + } + if x.I != 0 { + t.Errorf("I field was not zero") + } +} diff --git a/vendor/google.golang.org/appengine/datastore/query.go b/vendor/google.golang.org/appengine/datastore/query.go new file mode 100644 index 0000000..3847b0f --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/query.go @@ -0,0 +1,724 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/datastore" +) + +type operator int + +const ( + lessThan operator = iota + lessEq + equal + greaterEq + greaterThan +) + +var operatorToProto = map[operator]*pb.Query_Filter_Operator{ + lessThan: pb.Query_Filter_LESS_THAN.Enum(), + lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(), + equal: pb.Query_Filter_EQUAL.Enum(), + greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(), + greaterThan: pb.Query_Filter_GREATER_THAN.Enum(), +} + +// filter is a conditional filter on query results. +type filter struct { + FieldName string + Op operator + Value interface{} +} + +type sortDirection int + +const ( + ascending sortDirection = iota + descending +) + +var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{ + ascending: pb.Query_Order_ASCENDING.Enum(), + descending: pb.Query_Order_DESCENDING.Enum(), +} + +// order is a sort order on query results. +type order struct { + FieldName string + Direction sortDirection +} + +// NewQuery creates a new Query for a specific entity kind. +// +// An empty kind means to return all entities, including entities created and +// managed by other App Engine features, and is called a kindless query. +// Kindless queries cannot include filters or sort orders on property values. +func NewQuery(kind string) *Query { + return &Query{ + kind: kind, + limit: -1, + } +} + +// Query represents a datastore query. +type Query struct { + kind string + ancestor *Key + filter []filter + order []order + projection []string + + distinct bool + keysOnly bool + eventual bool + limit int32 + offset int32 + start *pb.CompiledCursor + end *pb.CompiledCursor + + err error +} + +func (q *Query) clone() *Query { + x := *q + // Copy the contents of the slice-typed fields to a new backing store. + if len(q.filter) > 0 { + x.filter = make([]filter, len(q.filter)) + copy(x.filter, q.filter) + } + if len(q.order) > 0 { + x.order = make([]order, len(q.order)) + copy(x.order, q.order) + } + return &x +} + +// Ancestor returns a derivative query with an ancestor filter. +// The ancestor should not be nil. +func (q *Query) Ancestor(ancestor *Key) *Query { + q = q.clone() + if ancestor == nil { + q.err = errors.New("datastore: nil query ancestor") + return q + } + q.ancestor = ancestor + return q +} + +// EventualConsistency returns a derivative query that returns eventually +// consistent results. +// It only has an effect on ancestor queries. +func (q *Query) EventualConsistency() *Query { + q = q.clone() + q.eventual = true + return q +} + +// Filter returns a derivative query with a field-based filter. +// The filterStr argument must be a field name followed by optional space, +// followed by an operator, one of ">", "<", ">=", "<=", or "=". +// Fields are compared against the provided value using the operator. +// Multiple filters are AND'ed together. +func (q *Query) Filter(filterStr string, value interface{}) *Query { + q = q.clone() + filterStr = strings.TrimSpace(filterStr) + if len(filterStr) < 1 { + q.err = errors.New("datastore: invalid filter: " + filterStr) + return q + } + f := filter{ + FieldName: strings.TrimRight(filterStr, " ><=!"), + Value: value, + } + switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { + case "<=": + f.Op = lessEq + case ">=": + f.Op = greaterEq + case "<": + f.Op = lessThan + case ">": + f.Op = greaterThan + case "=": + f.Op = equal + default: + q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) + return q + } + q.filter = append(q.filter, f) + return q +} + +// Order returns a derivative query with a field-based sort order. Orders are +// applied in the order they are added. The default order is ascending; to sort +// in descending order prefix the fieldName with a minus sign (-). +func (q *Query) Order(fieldName string) *Query { + q = q.clone() + fieldName = strings.TrimSpace(fieldName) + o := order{ + Direction: ascending, + FieldName: fieldName, + } + if strings.HasPrefix(fieldName, "-") { + o.Direction = descending + o.FieldName = strings.TrimSpace(fieldName[1:]) + } else if strings.HasPrefix(fieldName, "+") { + q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) + return q + } + if len(o.FieldName) == 0 { + q.err = errors.New("datastore: empty order") + return q + } + q.order = append(q.order, o) + return q +} + +// Project returns a derivative query that yields only the given fields. It +// cannot be used with KeysOnly. +func (q *Query) Project(fieldNames ...string) *Query { + q = q.clone() + q.projection = append([]string(nil), fieldNames...) + return q +} + +// Distinct returns a derivative query that yields de-duplicated entities with +// respect to the set of projected fields. It is only used for projection +// queries. +func (q *Query) Distinct() *Query { + q = q.clone() + q.distinct = true + return q +} + +// KeysOnly returns a derivative query that yields only keys, not keys and +// entities. It cannot be used with projection queries. +func (q *Query) KeysOnly() *Query { + q = q.clone() + q.keysOnly = true + return q +} + +// Limit returns a derivative query that has a limit on the number of results +// returned. A negative value means unlimited. +func (q *Query) Limit(limit int) *Query { + q = q.clone() + if limit < math.MinInt32 || limit > math.MaxInt32 { + q.err = errors.New("datastore: query limit overflow") + return q + } + q.limit = int32(limit) + return q +} + +// Offset returns a derivative query that has an offset of how many keys to +// skip over before returning results. A negative value is invalid. +func (q *Query) Offset(offset int) *Query { + q = q.clone() + if offset < 0 { + q.err = errors.New("datastore: negative query offset") + return q + } + if offset > math.MaxInt32 { + q.err = errors.New("datastore: query offset overflow") + return q + } + q.offset = int32(offset) + return q +} + +// Start returns a derivative query with the given start point. +func (q *Query) Start(c Cursor) *Query { + q = q.clone() + if c.cc == nil { + q.err = errors.New("datastore: invalid cursor") + return q + } + q.start = c.cc + return q +} + +// End returns a derivative query with the given end point. +func (q *Query) End(c Cursor) *Query { + q = q.clone() + if c.cc == nil { + q.err = errors.New("datastore: invalid cursor") + return q + } + q.end = c.cc + return q +} + +// toProto converts the query to a protocol buffer. +func (q *Query) toProto(dst *pb.Query, appID string) error { + if len(q.projection) != 0 && q.keysOnly { + return errors.New("datastore: query cannot both project and be keys-only") + } + dst.Reset() + dst.App = proto.String(appID) + if q.kind != "" { + dst.Kind = proto.String(q.kind) + } + if q.ancestor != nil { + dst.Ancestor = keyToProto(appID, q.ancestor) + if q.eventual { + dst.Strong = proto.Bool(false) + } + } + if q.projection != nil { + dst.PropertyName = q.projection + if q.distinct { + dst.GroupByPropertyName = q.projection + } + } + if q.keysOnly { + dst.KeysOnly = proto.Bool(true) + dst.RequirePerfectPlan = proto.Bool(true) + } + for _, qf := range q.filter { + if qf.FieldName == "" { + return errors.New("datastore: empty query filter field name") + } + p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false) + if errStr != "" { + return errors.New("datastore: bad query filter value type: " + errStr) + } + xf := &pb.Query_Filter{ + Op: operatorToProto[qf.Op], + Property: []*pb.Property{p}, + } + if xf.Op == nil { + return errors.New("datastore: unknown query filter operator") + } + dst.Filter = append(dst.Filter, xf) + } + for _, qo := range q.order { + if qo.FieldName == "" { + return errors.New("datastore: empty query order field name") + } + xo := &pb.Query_Order{ + Property: proto.String(qo.FieldName), + Direction: sortDirectionToProto[qo.Direction], + } + if xo.Direction == nil { + return errors.New("datastore: unknown query order direction") + } + dst.Order = append(dst.Order, xo) + } + if q.limit >= 0 { + dst.Limit = proto.Int32(q.limit) + } + if q.offset != 0 { + dst.Offset = proto.Int32(q.offset) + } + dst.CompiledCursor = q.start + dst.EndCompiledCursor = q.end + dst.Compile = proto.Bool(true) + return nil +} + +// Count returns the number of results for the query. +// +// The running time and number of API calls made by Count scale linearly with +// the sum of the query's offset and limit. Unless the result count is +// expected to be small, it is best to specify a limit; otherwise Count will +// continue until it finishes counting or the provided context expires. +func (q *Query) Count(c context.Context) (int, error) { + // Check that the query is well-formed. + if q.err != nil { + return 0, q.err + } + + // Run a copy of the query, with keysOnly true (if we're not a projection, + // since the two are incompatible), and an adjusted offset. We also set the + // limit to zero, as we don't want any actual entity data, just the number + // of skipped results. + newQ := q.clone() + newQ.keysOnly = len(newQ.projection) == 0 + newQ.limit = 0 + if q.limit < 0 { + // If the original query was unlimited, set the new query's offset to maximum. + newQ.offset = math.MaxInt32 + } else { + newQ.offset = q.offset + q.limit + if newQ.offset < 0 { + // Do the best we can, in the presence of overflow. + newQ.offset = math.MaxInt32 + } + } + req := &pb.Query{} + if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil { + return 0, err + } + res := &pb.QueryResult{} + if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil { + return 0, err + } + + // n is the count we will return. For example, suppose that our original + // query had an offset of 4 and a limit of 2008: the count will be 2008, + // provided that there are at least 2012 matching entities. However, the + // RPCs will only skip 1000 results at a time. The RPC sequence is: + // call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset + // response has (skippedResults, moreResults) = (1000, true) + // n += 1000 // n == 1000 + // call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n + // response has (skippedResults, moreResults) = (1000, true) + // n += 1000 // n == 2000 + // call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n + // response has (skippedResults, moreResults) = (12, false) + // n += 12 // n == 2012 + // // exit the loop + // n -= 4 // n == 2008 + var n int32 + for { + // The QueryResult should have no actual entity data, just skipped results. + if len(res.Result) != 0 { + return 0, errors.New("datastore: internal error: Count request returned too much data") + } + n += res.GetSkippedResults() + if !res.GetMoreResults() { + break + } + if err := callNext(c, res, newQ.offset-n, 0); err != nil { + return 0, err + } + } + n -= q.offset + if n < 0 { + // If the offset was greater than the number of matching entities, + // return 0 instead of negative. + n = 0 + } + return int(n), nil +} + +// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that +// returned by a query with more results. +func callNext(c context.Context, res *pb.QueryResult, offset, limit int32) error { + if res.Cursor == nil { + return errors.New("datastore: internal error: server did not return a cursor") + } + req := &pb.NextRequest{ + Cursor: res.Cursor, + } + if limit >= 0 { + req.Count = proto.Int32(limit) + } + if offset != 0 { + req.Offset = proto.Int32(offset) + } + if res.CompiledCursor != nil { + req.Compile = proto.Bool(true) + } + res.Reset() + return internal.Call(c, "datastore_v3", "Next", req, res) +} + +// GetAll runs the query in the given context and returns all keys that match +// that query, as well as appending the values to dst. +// +// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- +// interface, non-pointer type P such that P or *P implements PropertyLoadSaver. +// +// As a special case, *PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when *[]PropertyList was intended. +// +// The keys returned by GetAll will be in a 1-1 correspondence with the entities +// added to dst. +// +// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. +// +// The running time and number of API calls made by GetAll scale linearly with +// with the sum of the query's offset and limit. Unless the result count is +// expected to be small, it is best to specify a limit; otherwise GetAll will +// continue until it finishes collecting results or the provided context +// expires. +func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) { + var ( + dv reflect.Value + mat multiArgType + elemType reflect.Type + errFieldMismatch error + ) + if !q.keysOnly { + dv = reflect.ValueOf(dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return nil, ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType = checkMultiArg(dv) + if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { + return nil, ErrInvalidEntityType + } + } + + var keys []*Key + for t := q.Run(c); ; { + k, e, err := t.next() + if err == Done { + break + } + if err != nil { + return keys, err + } + if !q.keysOnly { + ev := reflect.New(elemType) + if elemType.Kind() == reflect.Map { + // This is a special case. The zero values of a map type are + // not immediately useful; they have to be make'd. + // + // Funcs and channels are similar, in that a zero value is not useful, + // but even a freshly make'd channel isn't useful: there's no fixed + // channel buffer size that is always going to be large enough, and + // there's no goroutine to drain the other end. Theoretically, these + // types could be supported, for example by sniffing for a constructor + // method or requiring prior registration, but for now it's not a + // frequent enough concern to be worth it. Programmers can work around + // it by explicitly using Iterator.Next instead of the Query.GetAll + // convenience method. + x := reflect.MakeMap(elemType) + ev.Elem().Set(x) + } + if err = loadEntity(ev.Interface(), e); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return keys, err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + } + keys = append(keys, k) + } + return keys, errFieldMismatch +} + +// Run runs the query in the given context. +func (q *Query) Run(c context.Context) *Iterator { + if q.err != nil { + return &Iterator{err: q.err} + } + t := &Iterator{ + c: c, + limit: q.limit, + q: q, + prevCC: q.start, + } + var req pb.Query + if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil { + t.err = err + return t + } + if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil { + t.err = err + return t + } + offset := q.offset - t.res.GetSkippedResults() + for offset > 0 && t.res.GetMoreResults() { + t.prevCC = t.res.CompiledCursor + if err := callNext(t.c, &t.res, offset, t.limit); err != nil { + t.err = err + break + } + skip := t.res.GetSkippedResults() + if skip < 0 { + t.err = errors.New("datastore: internal error: negative number of skipped_results") + break + } + offset -= skip + } + if offset < 0 { + t.err = errors.New("datastore: internal error: query offset was overshot") + } + return t +} + +// Iterator is the result of running a query. +type Iterator struct { + c context.Context + err error + // res is the result of the most recent RunQuery or Next API call. + res pb.QueryResult + // i is how many elements of res.Result we have iterated over. + i int + // limit is the limit on the number of results this iterator should return. + // A negative value means unlimited. + limit int32 + // q is the original query which yielded this iterator. + q *Query + // prevCC is the compiled cursor that marks the end of the previous batch + // of results. + prevCC *pb.CompiledCursor +} + +// Done is returned when a query iteration has completed. +var Done = errors.New("datastore: query has no more results") + +// Next returns the key of the next result. When there are no more results, +// Done is returned as the error. +// +// If the query is not keys only and dst is non-nil, it also loads the entity +// stored for that key into the struct pointer or PropertyLoadSaver dst, with +// the same semantics and possible errors as for the Get function. +func (t *Iterator) Next(dst interface{}) (*Key, error) { + k, e, err := t.next() + if err != nil { + return nil, err + } + if dst != nil && !t.q.keysOnly { + err = loadEntity(dst, e) + } + return k, err +} + +func (t *Iterator) next() (*Key, *pb.EntityProto, error) { + if t.err != nil { + return nil, nil, t.err + } + + // Issue datastore_v3/Next RPCs as necessary. + for t.i == len(t.res.Result) { + if !t.res.GetMoreResults() { + t.err = Done + return nil, nil, t.err + } + t.prevCC = t.res.CompiledCursor + if err := callNext(t.c, &t.res, 0, t.limit); err != nil { + t.err = err + return nil, nil, t.err + } + if t.res.GetSkippedResults() != 0 { + t.err = errors.New("datastore: internal error: iterator has skipped results") + return nil, nil, t.err + } + t.i = 0 + if t.limit >= 0 { + t.limit -= int32(len(t.res.Result)) + if t.limit < 0 { + t.err = errors.New("datastore: internal error: query returned more results than the limit") + return nil, nil, t.err + } + } + } + + // Extract the key from the t.i'th element of t.res.Result. + e := t.res.Result[t.i] + t.i++ + if e.Key == nil { + return nil, nil, errors.New("datastore: internal error: server did not return a key") + } + k, err := protoToKey(e.Key) + if err != nil || k.Incomplete() { + return nil, nil, errors.New("datastore: internal error: server returned an invalid key") + } + return k, e, nil +} + +// Cursor returns a cursor for the iterator's current location. +func (t *Iterator) Cursor() (Cursor, error) { + if t.err != nil && t.err != Done { + return Cursor{}, t.err + } + // If we are at either end of the current batch of results, + // return the compiled cursor at that end. + skipped := t.res.GetSkippedResults() + if t.i == 0 && skipped == 0 { + if t.prevCC == nil { + // A nil pointer (of type *pb.CompiledCursor) means no constraint: + // passing it as the end cursor of a new query means unlimited results + // (glossing over the integer limit parameter for now). + // A non-nil pointer to an empty pb.CompiledCursor means the start: + // passing it as the end cursor of a new query means 0 results. + // If prevCC was nil, then the original query had no start cursor, but + // Iterator.Cursor should return "the start" instead of unlimited. + return Cursor{&zeroCC}, nil + } + return Cursor{t.prevCC}, nil + } + if t.i == len(t.res.Result) { + return Cursor{t.res.CompiledCursor}, nil + } + // Otherwise, re-run the query offset to this iterator's position, starting from + // the most recent compiled cursor. This is done on a best-effort basis, as it + // is racy; if a concurrent process has added or removed entities, then the + // cursor returned may be inconsistent. + q := t.q.clone() + q.start = t.prevCC + q.offset = skipped + int32(t.i) + q.limit = 0 + q.keysOnly = len(q.projection) == 0 + t1 := q.Run(t.c) + _, _, err := t1.next() + if err != Done { + if err == nil { + err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results") + } + return Cursor{}, err + } + return Cursor{t1.res.CompiledCursor}, nil +} + +var zeroCC pb.CompiledCursor + +// Cursor is an iterator's position. It can be converted to and from an opaque +// string. A cursor can be used from different HTTP requests, but only with a +// query with the same kind, ancestor, filter and order constraints. +type Cursor struct { + cc *pb.CompiledCursor +} + +// String returns a base-64 string representation of a cursor. +func (c Cursor) String() string { + if c.cc == nil { + return "" + } + b, err := proto.Marshal(c.cc) + if err != nil { + // The only way to construct a Cursor with a non-nil cc field is to + // unmarshal from the byte representation. We panic if the unmarshal + // succeeds but the marshaling of the unchanged protobuf value fails. + panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err)) + } + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// Decode decodes a cursor from its base-64 string representation. +func DecodeCursor(s string) (Cursor, error) { + if s == "" { + return Cursor{&zeroCC}, nil + } + if n := len(s) % 4; n != 0 { + s += strings.Repeat("=", 4-n) + } + b, err := base64.URLEncoding.DecodeString(s) + if err != nil { + return Cursor{}, err + } + cc := &pb.CompiledCursor{} + if err := proto.Unmarshal(b, cc); err != nil { + return Cursor{}, err + } + return Cursor{cc}, nil +} diff --git a/vendor/google.golang.org/appengine/datastore/query_test.go b/vendor/google.golang.org/appengine/datastore/query_test.go new file mode 100644 index 0000000..f1b9de8 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/query_test.go @@ -0,0 +1,583 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine/internal" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/datastore" +) + +var ( + path1 = &pb.Path{ + Element: []*pb.Path_Element{ + { + Type: proto.String("Gopher"), + Id: proto.Int64(6), + }, + }, + } + path2 = &pb.Path{ + Element: []*pb.Path_Element{ + { + Type: proto.String("Gopher"), + Id: proto.Int64(6), + }, + { + Type: proto.String("Gopher"), + Id: proto.Int64(8), + }, + }, + } +) + +func fakeRunQuery(in *pb.Query, out *pb.QueryResult) error { + expectedIn := &pb.Query{ + App: proto.String("dev~fake-app"), + Kind: proto.String("Gopher"), + Compile: proto.Bool(true), + } + if !proto.Equal(in, expectedIn) { + return fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn) + } + *out = pb.QueryResult{ + Result: []*pb.EntityProto{ + { + Key: &pb.Reference{ + App: proto.String("s~test-app"), + Path: path1, + }, + EntityGroup: path1, + Property: []*pb.Property{ + { + Meaning: pb.Property_TEXT.Enum(), + Name: proto.String("Name"), + Value: &pb.PropertyValue{ + StringValue: proto.String("George"), + }, + }, + { + Name: proto.String("Height"), + Value: &pb.PropertyValue{ + Int64Value: proto.Int64(32), + }, + }, + }, + }, + { + Key: &pb.Reference{ + App: proto.String("s~test-app"), + Path: path2, + }, + EntityGroup: path1, // ancestor is George + Property: []*pb.Property{ + { + Meaning: pb.Property_TEXT.Enum(), + Name: proto.String("Name"), + Value: &pb.PropertyValue{ + StringValue: proto.String("Rufus"), + }, + }, + // No height for Rufus. + }, + }, + }, + MoreResults: proto.Bool(false), + } + return nil +} + +type StructThatImplementsPLS struct{} + +func (StructThatImplementsPLS) Load(p []Property) error { return nil } +func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = StructThatImplementsPLS{} + +type StructPtrThatImplementsPLS struct{} + +func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil } +func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{} + +type PropertyMap map[string]Property + +func (m PropertyMap) Load(props []Property) error { + for _, p := range props { + if p.Multiple { + return errors.New("PropertyMap does not support multiple properties") + } + m[p.Name] = p + } + return nil +} + +func (m PropertyMap) Save() ([]Property, error) { + props := make([]Property, 0, len(m)) + for _, p := range m { + if p.Multiple { + return nil, errors.New("PropertyMap does not support multiple properties") + } + props = append(props, p) + } + return props, nil +} + +var _ PropertyLoadSaver = PropertyMap{} + +type Gopher struct { + Name string + Height int +} + +// typeOfEmptyInterface is the type of interface{}, but we can't use +// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an +// interface{}. +var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem() + +func TestCheckMultiArg(t *testing.T) { + testCases := []struct { + v interface{} + mat multiArgType + elemType reflect.Type + }{ + // Invalid cases. + {nil, multiArgTypeInvalid, nil}, + {Gopher{}, multiArgTypeInvalid, nil}, + {&Gopher{}, multiArgTypeInvalid, nil}, + {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case. + {PropertyMap{}, multiArgTypeInvalid, nil}, + {[]*PropertyList(nil), multiArgTypeInvalid, nil}, + {[]*PropertyMap(nil), multiArgTypeInvalid, nil}, + {[]**Gopher(nil), multiArgTypeInvalid, nil}, + {[]*interface{}(nil), multiArgTypeInvalid, nil}, + // Valid cases. + { + []PropertyList(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyList{}), + }, + { + []PropertyMap(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyMap{}), + }, + { + []StructThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructThatImplementsPLS{}), + }, + { + []StructPtrThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructPtrThatImplementsPLS{}), + }, + { + []Gopher(nil), + multiArgTypeStruct, + reflect.TypeOf(Gopher{}), + }, + { + []*Gopher(nil), + multiArgTypeStructPtr, + reflect.TypeOf(Gopher{}), + }, + { + []interface{}(nil), + multiArgTypeInterface, + typeOfEmptyInterface, + }, + } + for _, tc := range testCases { + mat, elemType := checkMultiArg(reflect.ValueOf(tc.v)) + if mat != tc.mat || elemType != tc.elemType { + t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v", + tc.v, mat, elemType, tc.mat, tc.elemType) + } + } +} + +func TestSimpleQuery(t *testing.T) { + struct1 := Gopher{Name: "George", Height: 32} + struct2 := Gopher{Name: "Rufus"} + pList1 := PropertyList{ + { + Name: "Name", + Value: "George", + }, + { + Name: "Height", + Value: int64(32), + }, + } + pList2 := PropertyList{ + { + Name: "Name", + Value: "Rufus", + }, + } + pMap1 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "George", + }, + "Height": Property{ + Name: "Height", + Value: int64(32), + }, + } + pMap2 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "Rufus", + }, + } + + testCases := []struct { + dst interface{} + want interface{} + }{ + // The destination must have type *[]P, *[]S or *[]*S, for some non-interface + // type P such that *P implements PropertyLoadSaver, or for some struct type S. + {new([]Gopher), &[]Gopher{struct1, struct2}}, + {new([]*Gopher), &[]*Gopher{&struct1, &struct2}}, + {new([]PropertyList), &[]PropertyList{pList1, pList2}}, + {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}}, + + // Any other destination type is invalid. + {0, nil}, + {Gopher{}, nil}, + {PropertyList{}, nil}, + {PropertyMap{}, nil}, + {[]int{}, nil}, + {[]Gopher{}, nil}, + {[]PropertyList{}, nil}, + {new(int), nil}, + {new(Gopher), nil}, + {new(PropertyList), nil}, // This is a special case. + {new(PropertyMap), nil}, + {new([]int), nil}, + {new([]map[int]int), nil}, + {new([]map[string]Property), nil}, + {new([]map[string]interface{}), nil}, + {new([]*int), nil}, + {new([]*map[int]int), nil}, + {new([]*map[string]Property), nil}, + {new([]*map[string]interface{}), nil}, + {new([]**Gopher), nil}, + {new([]*PropertyList), nil}, + {new([]*PropertyMap), nil}, + } + for _, tc := range testCases { + nCall := 0 + c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error { + nCall++ + return fakeRunQuery(in, out) + }) + c = internal.WithAppIDOverride(c, "dev~fake-app") + + var ( + expectedErr error + expectedNCall int + ) + if tc.want == nil { + expectedErr = ErrInvalidEntityType + } else { + expectedNCall = 1 + } + keys, err := NewQuery("Gopher").GetAll(c, tc.dst) + if err != expectedErr { + t.Errorf("dst type %T: got error [%v], want [%v]", tc.dst, err, expectedErr) + continue + } + if nCall != expectedNCall { + t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall) + continue + } + if err != nil { + continue + } + + key1 := NewKey(c, "Gopher", "", 6, nil) + expectedKeys := []*Key{ + key1, + NewKey(c, "Gopher", "", 8, key1), + } + if l1, l2 := len(keys), len(expectedKeys); l1 != l2 { + t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2) + continue + } + for i, key := range keys { + if key.AppID() != "s~test-app" { + t.Errorf(`dst type %T: Key #%d's AppID = %q, want "s~test-app"`, tc.dst, i, key.AppID()) + continue + } + if !keysEqual(key, expectedKeys[i]) { + t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i]) + continue + } + } + + if !reflect.DeepEqual(tc.dst, tc.want) { + t.Errorf("dst type %T: Entities got %+v, want %+v", tc.dst, tc.dst, tc.want) + continue + } + } +} + +// keysEqual is like (*Key).Equal, but ignores the App ID. +func keysEqual(a, b *Key) bool { + for a != nil && b != nil { + if a.Kind() != b.Kind() || a.StringID() != b.StringID() || a.IntID() != b.IntID() { + return false + } + a, b = a.Parent(), b.Parent() + } + return a == b +} + +func TestQueriesAreImmutable(t *testing.T) { + // Test that deriving q2 from q1 does not modify q1. + q0 := NewQuery("foo") + q1 := NewQuery("foo") + q2 := q1.Offset(2) + if !reflect.DeepEqual(q0, q1) { + t.Errorf("q0 and q1 were not equal") + } + if reflect.DeepEqual(q1, q2) { + t.Errorf("q1 and q2 were equal") + } + + // Test that deriving from q4 twice does not conflict, even though + // q4 has a long list of order clauses. This tests that the arrays + // backed by a query's slice of orders are not shared. + f := func() *Query { + q := NewQuery("bar") + // 47 is an ugly number that is unlikely to be near a re-allocation + // point in repeated append calls. For example, it's not near a power + // of 2 or a multiple of 10. + for i := 0; i < 47; i++ { + q = q.Order(fmt.Sprintf("x%d", i)) + } + return q + } + q3 := f().Order("y") + q4 := f() + q5 := q4.Order("y") + q6 := q4.Order("z") + if !reflect.DeepEqual(q3, q5) { + t.Errorf("q3 and q5 were not equal") + } + if reflect.DeepEqual(q5, q6) { + t.Errorf("q5 and q6 were equal") + } +} + +func TestFilterParser(t *testing.T) { + testCases := []struct { + filterStr string + wantOK bool + wantFieldName string + wantOp operator + }{ + // Supported ops. + {"x<", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {" x < ", true, "x", lessThan}, + {"x <=", true, "x", lessEq}, + {"x =", true, "x", equal}, + {"x >=", true, "x", greaterEq}, + {"x >", true, "x", greaterThan}, + {"in >", true, "in", greaterThan}, + {"in>", true, "in", greaterThan}, + // Valid but (currently) unsupported ops. + {"x!=", false, "", 0}, + {"x !=", false, "", 0}, + {" x != ", false, "", 0}, + {"x IN", false, "", 0}, + {"x in", false, "", 0}, + // Invalid ops. + {"x EQ", false, "", 0}, + {"x lt", false, "", 0}, + {"x <>", false, "", 0}, + {"x >>", false, "", 0}, + {"x ==", false, "", 0}, + {"x =<", false, "", 0}, + {"x =>", false, "", 0}, + {"x !", false, "", 0}, + {"x ", false, "", 0}, + {"x", false, "", 0}, + } + for _, tc := range testCases { + q := NewQuery("foo").Filter(tc.filterStr, 42) + if ok := q.err == nil; ok != tc.wantOK { + t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK) + continue + } + if !tc.wantOK { + continue + } + if len(q.filter) != 1 { + t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1) + continue + } + got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42} + if got != want { + t.Errorf("%q: got %v, want %v", tc.filterStr, got, want) + continue + } + } +} + +func TestQueryToProto(t *testing.T) { + // The context is required to make Keys for the test cases. + var got *pb.Query + NoErr := errors.New("No error") + c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error { + got = in + return NoErr // return a non-nil error so Run doesn't keep going. + }) + c = internal.WithAppIDOverride(c, "dev~fake-app") + + testCases := []struct { + desc string + query *Query + want *pb.Query + err string + }{ + { + desc: "empty", + query: NewQuery(""), + want: &pb.Query{}, + }, + { + desc: "standard query", + query: NewQuery("kind").Order("-I").Filter("I >", 17).Filter("U =", "Dave").Limit(7).Offset(42), + want: &pb.Query{ + Kind: proto.String("kind"), + Filter: []*pb.Query_Filter{ + { + Op: pb.Query_Filter_GREATER_THAN.Enum(), + Property: []*pb.Property{ + { + Name: proto.String("I"), + Value: &pb.PropertyValue{Int64Value: proto.Int64(17)}, + Multiple: proto.Bool(false), + }, + }, + }, + { + Op: pb.Query_Filter_EQUAL.Enum(), + Property: []*pb.Property{ + { + Name: proto.String("U"), + Value: &pb.PropertyValue{StringValue: proto.String("Dave")}, + Multiple: proto.Bool(false), + }, + }, + }, + }, + Order: []*pb.Query_Order{ + { + Property: proto.String("I"), + Direction: pb.Query_Order_DESCENDING.Enum(), + }, + }, + Limit: proto.Int32(7), + Offset: proto.Int32(42), + }, + }, + { + desc: "ancestor", + query: NewQuery("").Ancestor(NewKey(c, "kind", "Mummy", 0, nil)), + want: &pb.Query{ + Ancestor: &pb.Reference{ + App: proto.String("dev~fake-app"), + Path: &pb.Path{ + Element: []*pb.Path_Element{{Type: proto.String("kind"), Name: proto.String("Mummy")}}, + }, + }, + }, + }, + { + desc: "projection", + query: NewQuery("").Project("A", "B"), + want: &pb.Query{ + PropertyName: []string{"A", "B"}, + }, + }, + { + desc: "projection with distinct", + query: NewQuery("").Project("A", "B").Distinct(), + want: &pb.Query{ + PropertyName: []string{"A", "B"}, + GroupByPropertyName: []string{"A", "B"}, + }, + }, + { + desc: "keys only", + query: NewQuery("").KeysOnly(), + want: &pb.Query{ + KeysOnly: proto.Bool(true), + RequirePerfectPlan: proto.Bool(true), + }, + }, + { + desc: "empty filter", + query: NewQuery("kind").Filter("=", 17), + err: "empty query filter field nam", + }, + { + desc: "bad filter type", + query: NewQuery("kind").Filter("M =", map[string]bool{}), + err: "bad query filter value type", + }, + { + desc: "bad filter operator", + query: NewQuery("kind").Filter("I <<=", 17), + err: `invalid operator "<<=" in filter "I <<="`, + }, + { + desc: "empty order", + query: NewQuery("kind").Order(""), + err: "empty order", + }, + { + desc: "bad order direction", + query: NewQuery("kind").Order("+I"), + err: `invalid order: "+I`, + }, + } + + for _, tt := range testCases { + got = nil + if _, err := tt.query.Run(c).Next(nil); err != NoErr { + if tt.err == "" || !strings.Contains(err.Error(), tt.err) { + t.Errorf("%s: error %v, want %q", tt.desc, err, tt.err) + } + continue + } + if tt.err != "" { + t.Errorf("%s: no error, want %q", tt.desc, tt.err) + continue + } + // Fields that are common to all protos. + tt.want.App = proto.String("dev~fake-app") + tt.want.Compile = proto.Bool(true) + if !proto.Equal(got, tt.want) { + t.Errorf("%s:\ngot %v\nwant %v", tt.desc, got, tt.want) + } + } +} diff --git a/vendor/google.golang.org/appengine/datastore/save.go b/vendor/google.golang.org/appengine/datastore/save.go new file mode 100644 index 0000000..b5f9592 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/save.go @@ -0,0 +1,300 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "errors" + "fmt" + "math" + "reflect" + "time" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine" + pb "google.golang.org/appengine/internal/datastore" +) + +func toUnixMicro(t time.Time) int64 { + // We cannot use t.UnixNano() / 1e3 because we want to handle times more than + // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot + // be represented in the numerator of a single int64 divide. + return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) +} + +func fromUnixMicro(t int64) time.Time { + return time.Unix(t/1e6, (t%1e6)*1e3).UTC() +} + +var ( + minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) + maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) +) + +// valueToProto converts a named value to a newly allocated Property. +// The returned error string is empty on success. +func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) { + var ( + pv pb.PropertyValue + unsupported bool + ) + switch v.Kind() { + case reflect.Invalid: + // No-op. + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + pv.Int64Value = proto.Int64(v.Int()) + case reflect.Bool: + pv.BooleanValue = proto.Bool(v.Bool()) + case reflect.String: + pv.StringValue = proto.String(v.String()) + case reflect.Float32, reflect.Float64: + pv.DoubleValue = proto.Float64(v.Float()) + case reflect.Ptr: + if k, ok := v.Interface().(*Key); ok { + if k != nil { + pv.Referencevalue = keyToReferenceValue(defaultAppID, k) + } + } else { + unsupported = true + } + case reflect.Struct: + switch t := v.Interface().(type) { + case time.Time: + if t.Before(minTime) || t.After(maxTime) { + return nil, "time value out of range" + } + pv.Int64Value = proto.Int64(toUnixMicro(t)) + case appengine.GeoPoint: + if !t.Valid() { + return nil, "invalid GeoPoint value" + } + // NOTE: Strangely, latitude maps to X, longitude to Y. + pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng} + default: + unsupported = true + } + case reflect.Slice: + if b, ok := v.Interface().([]byte); ok { + pv.StringValue = proto.String(string(b)) + } else { + // nvToProto should already catch slice values. + // If we get here, we have a slice of slice values. + unsupported = true + } + default: + unsupported = true + } + if unsupported { + return nil, "unsupported datastore value type: " + v.Type().String() + } + p = &pb.Property{ + Name: proto.String(name), + Value: &pv, + Multiple: proto.Bool(multiple), + } + if v.IsValid() { + switch v.Interface().(type) { + case []byte: + p.Meaning = pb.Property_BLOB.Enum() + case ByteString: + p.Meaning = pb.Property_BYTESTRING.Enum() + case appengine.BlobKey: + p.Meaning = pb.Property_BLOBKEY.Enum() + case time.Time: + p.Meaning = pb.Property_GD_WHEN.Enum() + case appengine.GeoPoint: + p.Meaning = pb.Property_GEORSS_POINT.Enum() + } + } + return p, "" +} + +// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. +func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) { + var err error + var props []Property + if e, ok := src.(PropertyLoadSaver); ok { + props, err = e.Save() + } else { + props, err = SaveStruct(src) + } + if err != nil { + return nil, err + } + return propertiesToProto(defaultAppID, key, props) +} + +func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error { + p := Property{ + Name: name, + NoIndex: noIndex, + Multiple: multiple, + } + switch x := v.Interface().(type) { + case *Key: + p.Value = x + case time.Time: + p.Value = x + case appengine.BlobKey: + p.Value = x + case appengine.GeoPoint: + p.Value = x + case ByteString: + p.Value = x + default: + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.Value = v.Int() + case reflect.Bool: + p.Value = v.Bool() + case reflect.String: + p.Value = v.String() + case reflect.Float32, reflect.Float64: + p.Value = v.Float() + case reflect.Slice: + if v.Type().Elem().Kind() == reflect.Uint8 { + p.NoIndex = true + p.Value = v.Bytes() + } + case reflect.Struct: + if !v.CanAddr() { + return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") + } + sub, err := newStructPLS(v.Addr().Interface()) + if err != nil { + return fmt.Errorf("datastore: unsupported struct field: %v", err) + } + return sub.(structPLS).save(props, name, noIndex, multiple) + } + } + if p.Value == nil { + return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) + } + *props = append(*props, p) + return nil +} + +func (s structPLS) Save() ([]Property, error) { + var props []Property + if err := s.save(&props, "", false, false); err != nil { + return nil, err + } + return props, nil +} + +func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error { + for i, t := range s.codec.byIndex { + if t.name == "-" { + continue + } + name := t.name + if prefix != "" { + name = prefix + name + } + v := s.v.Field(i) + if !v.IsValid() || !v.CanSet() { + continue + } + noIndex1 := noIndex || t.noIndex + // For slice fields that aren't []byte, save each element. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + for j := 0; j < v.Len(); j++ { + if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil { + return err + } + } + continue + } + // Otherwise, save the field itself. + if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil { + return err + } + } + return nil +} + +func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) { + e := &pb.EntityProto{ + Key: keyToProto(defaultAppID, key), + } + if key.parent == nil { + e.EntityGroup = &pb.Path{} + } else { + e.EntityGroup = keyToProto(defaultAppID, key.root()).Path + } + prevMultiple := make(map[string]bool) + + for _, p := range props { + if pm, ok := prevMultiple[p.Name]; ok { + if !pm || !p.Multiple { + return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name) + } + } else { + prevMultiple[p.Name] = p.Multiple + } + + x := &pb.Property{ + Name: proto.String(p.Name), + Value: new(pb.PropertyValue), + Multiple: proto.Bool(p.Multiple), + } + switch v := p.Value.(type) { + case int64: + x.Value.Int64Value = proto.Int64(v) + case bool: + x.Value.BooleanValue = proto.Bool(v) + case string: + x.Value.StringValue = proto.String(v) + if p.NoIndex { + x.Meaning = pb.Property_TEXT.Enum() + } + case float64: + x.Value.DoubleValue = proto.Float64(v) + case *Key: + if v != nil { + x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v) + } + case time.Time: + if v.Before(minTime) || v.After(maxTime) { + return nil, fmt.Errorf("datastore: time value out of range") + } + x.Value.Int64Value = proto.Int64(toUnixMicro(v)) + x.Meaning = pb.Property_GD_WHEN.Enum() + case appengine.BlobKey: + x.Value.StringValue = proto.String(string(v)) + x.Meaning = pb.Property_BLOBKEY.Enum() + case appengine.GeoPoint: + if !v.Valid() { + return nil, fmt.Errorf("datastore: invalid GeoPoint value") + } + // NOTE: Strangely, latitude maps to X, longitude to Y. + x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng} + x.Meaning = pb.Property_GEORSS_POINT.Enum() + case []byte: + x.Value.StringValue = proto.String(string(v)) + x.Meaning = pb.Property_BLOB.Enum() + if !p.NoIndex { + return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name) + } + case ByteString: + x.Value.StringValue = proto.String(string(v)) + x.Meaning = pb.Property_BYTESTRING.Enum() + default: + if p.Value != nil { + return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name) + } + } + + if p.NoIndex { + e.RawProperty = append(e.RawProperty, x) + } else { + e.Property = append(e.Property, x) + if len(e.Property) > maxIndexedProperties { + return nil, errors.New("datastore: too many indexed properties") + } + } + } + return e, nil +} diff --git a/vendor/google.golang.org/appengine/datastore/time_test.go b/vendor/google.golang.org/appengine/datastore/time_test.go new file mode 100644 index 0000000..ba74b44 --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/time_test.go @@ -0,0 +1,65 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "testing" + "time" +) + +func TestUnixMicro(t *testing.T) { + // Test that all these time.Time values survive a round trip to unix micros. + testCases := []time.Time{ + {}, + time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), + time.Unix(-1e6, -1000), + time.Unix(-1e6, 0), + time.Unix(-1e6, +1000), + time.Unix(-60, -1000), + time.Unix(-60, 0), + time.Unix(-60, +1000), + time.Unix(-1, -1000), + time.Unix(-1, 0), + time.Unix(-1, +1000), + time.Unix(0, -3000), + time.Unix(0, -2000), + time.Unix(0, -1000), + time.Unix(0, 0), + time.Unix(0, +1000), + time.Unix(0, +2000), + time.Unix(+60, -1000), + time.Unix(+60, 0), + time.Unix(+60, +1000), + time.Unix(+1e6, -1000), + time.Unix(+1e6, 0), + time.Unix(+1e6, +1000), + time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC), + time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC), + time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), + time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC), + } + for _, tc := range testCases { + got := fromUnixMicro(toUnixMicro(tc)) + if !got.Equal(tc) { + t.Errorf("got %q, want %q", got, tc) + } + } + + // Test that a time.Time that isn't an integral number of microseconds + // is not perfectly reconstructed after a round trip. + t0 := time.Unix(0, 123) + t1 := fromUnixMicro(toUnixMicro(t0)) + if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 { + t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond()) + } +} diff --git a/vendor/google.golang.org/appengine/datastore/transaction.go b/vendor/google.golang.org/appengine/datastore/transaction.go new file mode 100644 index 0000000..a7f3f2b --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/transaction.go @@ -0,0 +1,87 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "errors" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/datastore" +) + +func init() { + internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) { + x.Transaction = t + }) + internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) { + x.Transaction = t + }) + internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) { + x.Transaction = t + }) + internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) { + x.Transaction = t + }) +} + +// ErrConcurrentTransaction is returned when a transaction is rolled back due +// to a conflict with a concurrent transaction. +var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") + +// RunInTransaction runs f in a transaction. It calls f with a transaction +// context tc that f should use for all App Engine operations. +// +// If f returns nil, RunInTransaction attempts to commit the transaction, +// returning nil if it succeeds. If the commit fails due to a conflicting +// transaction, RunInTransaction retries f, each time with a new transaction +// context. It gives up and returns ErrConcurrentTransaction after three +// failed attempts. The number of attempts can be configured by specifying +// TransactionOptions.Attempts. +// +// If f returns non-nil, then any datastore changes will not be applied and +// RunInTransaction returns that same error. The function f is not retried. +// +// Note that when f returns, the transaction is not yet committed. Calling code +// must be careful not to assume that any of f's changes have been committed +// until RunInTransaction returns nil. +// +// Since f may be called multiple times, f should usually be idempotent. +// datastore.Get is not idempotent when unmarshaling slice fields. +// +// Nested transactions are not supported; c may not be a transaction context. +func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error { + xg := false + if opts != nil { + xg = opts.XG + } + attempts := 3 + if opts != nil && opts.Attempts > 0 { + attempts = opts.Attempts + } + for i := 0; i < attempts; i++ { + if err := internal.RunTransactionOnce(c, f, xg); err != internal.ErrConcurrentTransaction { + return err + } + } + return ErrConcurrentTransaction +} + +// TransactionOptions are the options for running a transaction. +type TransactionOptions struct { + // XG is whether the transaction can cross multiple entity groups. In + // comparison, a single group transaction is one where all datastore keys + // used have the same root key. Note that cross group transactions do not + // have the same behavior as single group transactions. In particular, it + // is much more likely to see partially applied transactions in different + // entity groups, in global queries. + // It is valid to set XG to true even if the transaction is within a + // single entity group. + XG bool + // Attempts controls the number of retries to perform when commits fail + // due to a conflicting transaction. If omitted, it defaults to 3. + Attempts int +} diff --git a/vendor/google.golang.org/appengine/delay/delay.go b/vendor/google.golang.org/appengine/delay/delay.go new file mode 100644 index 0000000..9e517ca --- /dev/null +++ b/vendor/google.golang.org/appengine/delay/delay.go @@ -0,0 +1,278 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package delay provides a way to execute code outside the scope of a +user request by using the taskqueue API. + +To declare a function that may be executed later, call Func +in a top-level assignment context, passing it an arbitrary string key +and a function whose first argument is of type context.Context. + var laterFunc = delay.Func("key", myFunc) +It is also possible to use a function literal. + var laterFunc = delay.Func("key", func(c context.Context, x string) { + // ... + }) + +To call a function, invoke its Call method. + laterFunc.Call(c, "something") +A function may be called any number of times. If the function has any +return arguments, and the last one is of type error, the function may +return a non-nil error to signal that the function should be retried. + +The arguments to functions may be of any type that is encodable by the gob +package. If an argument is of interface type, it is the client's responsibility +to register with the gob package whatever concrete type may be passed for that +argument; see http://golang.org/pkg/gob/#Register for details. + +Any errors during initialization or execution of a function will be +logged to the application logs. Error logs that occur during initialization will +be associated with the request that invoked the Call method. + +The state of a function invocation that has not yet successfully +executed is preserved by combining the file name in which it is declared +with the string key that was passed to the Func function. Updating an app +with pending function invocations is safe as long as the relevant +functions have the (filename, key) combination preserved. + +The delay package uses the Task Queue API to create tasks that call the +reserved application path "/_ah/queue/go/delay". +This path must not be marked as "login: required" in app.yaml; +it must be marked as "login: admin" or have no access restriction. +*/ +package delay // import "google.golang.org/appengine/delay" + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "net/http" + "reflect" + "runtime" + + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/log" + "google.golang.org/appengine/taskqueue" +) + +// Function represents a function that may have a delayed invocation. +type Function struct { + fv reflect.Value // Kind() == reflect.Func + key string + err error // any error during initialization +} + +const ( + // The HTTP path for invocations. + path = "/_ah/queue/go/delay" + // Use the default queue. + queue = "" +) + +var ( + // registry of all delayed functions + funcs = make(map[string]*Function) + + // precomputed types + contextType = reflect.TypeOf((*context.Context)(nil)).Elem() + errorType = reflect.TypeOf((*error)(nil)).Elem() + + // errors + errFirstArg = errors.New("first argument must be context.Context") +) + +// Func declares a new Function. The second argument must be a function with a +// first argument of type context.Context. +// This function must be called at program initialization time. That means it +// must be called in a global variable declaration or from an init function. +// This restriction is necessary because the instance that delays a function +// call may not be the one that executes it. Only the code executed at program +// initialization time is guaranteed to have been run by an instance before it +// receives a request. +func Func(key string, i interface{}) *Function { + f := &Function{fv: reflect.ValueOf(i)} + + // Derive unique, somewhat stable key for this func. + _, file, _, _ := runtime.Caller(1) + f.key = file + ":" + key + + t := f.fv.Type() + if t.Kind() != reflect.Func { + f.err = errors.New("not a function") + return f + } + if t.NumIn() == 0 || t.In(0) != contextType { + f.err = errFirstArg + return f + } + + // Register the function's arguments with the gob package. + // This is required because they are marshaled inside a []interface{}. + // gob.Register only expects to be called during initialization; + // that's fine because this function expects the same. + for i := 0; i < t.NumIn(); i++ { + // Only concrete types may be registered. If the argument has + // interface type, the client is resposible for registering the + // concrete types it will hold. + if t.In(i).Kind() == reflect.Interface { + continue + } + gob.Register(reflect.Zero(t.In(i)).Interface()) + } + + if old := funcs[f.key]; old != nil { + old.err = fmt.Errorf("multiple functions registered for %s in %s", key, file) + } + funcs[f.key] = f + return f +} + +type invocation struct { + Key string + Args []interface{} +} + +// Call invokes a delayed function. +// err := f.Call(c, ...) +// is equivalent to +// t, _ := f.Task(...) +// _, err := taskqueue.Add(c, t, "") +func (f *Function) Call(c context.Context, args ...interface{}) error { + t, err := f.Task(args...) + if err != nil { + return err + } + _, err = taskqueueAdder(c, t, queue) + return err +} + +// Task creates a Task that will invoke the function. +// Its parameters may be tweaked before adding it to a queue. +// Users should not modify the Path or Payload fields of the returned Task. +func (f *Function) Task(args ...interface{}) (*taskqueue.Task, error) { + if f.err != nil { + return nil, fmt.Errorf("delay: func is invalid: %v", f.err) + } + + nArgs := len(args) + 1 // +1 for the context.Context + ft := f.fv.Type() + minArgs := ft.NumIn() + if ft.IsVariadic() { + minArgs-- + } + if nArgs < minArgs { + return nil, fmt.Errorf("delay: too few arguments to func: %d < %d", nArgs, minArgs) + } + if !ft.IsVariadic() && nArgs > minArgs { + return nil, fmt.Errorf("delay: too many arguments to func: %d > %d", nArgs, minArgs) + } + + // Check arg types. + for i := 1; i < nArgs; i++ { + at := reflect.TypeOf(args[i-1]) + var dt reflect.Type + if i < minArgs { + // not a variadic arg + dt = ft.In(i) + } else { + // a variadic arg + dt = ft.In(minArgs).Elem() + } + // nil arguments won't have a type, so they need special handling. + if at == nil { + // nil interface + switch dt.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + continue // may be nil + } + return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not nilable", i, dt) + } + switch at.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + av := reflect.ValueOf(args[i-1]) + if av.IsNil() { + // nil value in interface; not supported by gob, so we replace it + // with a nil interface value + args[i-1] = nil + } + } + if !at.AssignableTo(dt) { + return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not assignable to %v", i, at, dt) + } + } + + inv := invocation{ + Key: f.key, + Args: args, + } + + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(inv); err != nil { + return nil, fmt.Errorf("delay: gob encoding failed: %v", err) + } + + return &taskqueue.Task{ + Path: path, + Payload: buf.Bytes(), + }, nil +} + +var taskqueueAdder = taskqueue.Add // for testing + +func init() { + http.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) { + runFunc(appengine.NewContext(req), w, req) + }) +} + +func runFunc(c context.Context, w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + + var inv invocation + if err := gob.NewDecoder(req.Body).Decode(&inv); err != nil { + log.Errorf(c, "delay: failed decoding task payload: %v", err) + log.Warningf(c, "delay: dropping task") + return + } + + f := funcs[inv.Key] + if f == nil { + log.Errorf(c, "delay: no func with key %q found", inv.Key) + log.Warningf(c, "delay: dropping task") + return + } + + ft := f.fv.Type() + in := []reflect.Value{reflect.ValueOf(c)} + for _, arg := range inv.Args { + var v reflect.Value + if arg != nil { + v = reflect.ValueOf(arg) + } else { + // Task was passed a nil argument, so we must construct + // the zero value for the argument here. + n := len(in) // we're constructing the nth argument + var at reflect.Type + if !ft.IsVariadic() || n < ft.NumIn()-1 { + at = ft.In(n) + } else { + at = ft.In(ft.NumIn() - 1).Elem() + } + v = reflect.Zero(at) + } + in = append(in, v) + } + out := f.fv.Call(in) + + if n := ft.NumOut(); n > 0 && ft.Out(n-1) == errorType { + if errv := out[n-1]; !errv.IsNil() { + log.Errorf(c, "delay: func failed (will retry): %v", errv.Interface()) + w.WriteHeader(http.StatusInternalServerError) + return + } + } +} diff --git a/vendor/google.golang.org/appengine/delay/delay_test.go b/vendor/google.golang.org/appengine/delay/delay_test.go new file mode 100644 index 0000000..1c37e79 --- /dev/null +++ b/vendor/google.golang.org/appengine/delay/delay_test.go @@ -0,0 +1,375 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package delay + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + "google.golang.org/appengine/taskqueue" +) + +type CustomType struct { + N int +} + +type CustomInterface interface { + N() int +} + +type CustomImpl int + +func (c CustomImpl) N() int { return int(c) } + +// CustomImpl needs to be registered with gob. +func init() { + gob.Register(CustomImpl(0)) +} + +var ( + invalidFunc = Func("invalid", func() {}) + + regFuncRuns = 0 + regFuncMsg = "" + regFunc = Func("reg", func(c context.Context, arg string) { + regFuncRuns++ + regFuncMsg = arg + }) + + custFuncTally = 0 + custFunc = Func("cust", func(c context.Context, ct *CustomType, ci CustomInterface) { + a, b := 2, 3 + if ct != nil { + a = ct.N + } + if ci != nil { + b = ci.N() + } + custFuncTally += a + b + }) + + anotherCustFunc = Func("cust2", func(c context.Context, n int, ct *CustomType, ci CustomInterface) { + }) + + varFuncMsg = "" + varFunc = Func("variadic", func(c context.Context, format string, args ...int) { + // convert []int to []interface{} for fmt.Sprintf. + as := make([]interface{}, len(args)) + for i, a := range args { + as[i] = a + } + varFuncMsg = fmt.Sprintf(format, as...) + }) + + errFuncRuns = 0 + errFuncErr = errors.New("error!") + errFunc = Func("err", func(c context.Context) error { + errFuncRuns++ + if errFuncRuns == 1 { + return nil + } + return errFuncErr + }) + + dupeWhich = 0 + dupe1Func = Func("dupe", func(c context.Context) { + if dupeWhich == 0 { + dupeWhich = 1 + } + }) + dupe2Func = Func("dupe", func(c context.Context) { + if dupeWhich == 0 { + dupeWhich = 2 + } + }) +) + +type fakeContext struct { + ctx context.Context + logging [][]interface{} +} + +func newFakeContext() *fakeContext { + f := new(fakeContext) + f.ctx = internal.WithCallOverride(context.Background(), f.call) + f.ctx = internal.WithLogOverride(f.ctx, f.logf) + return f +} + +func (f *fakeContext) call(ctx context.Context, service, method string, in, out proto.Message) error { + panic("should never be called") +} + +var logLevels = map[int64]string{1: "INFO", 3: "ERROR"} + +func (f *fakeContext) logf(level int64, format string, args ...interface{}) { + f.logging = append(f.logging, append([]interface{}{logLevels[level], format}, args...)) +} + +func TestInvalidFunction(t *testing.T) { + c := newFakeContext() + + if got, want := invalidFunc.Call(c.ctx), fmt.Errorf("delay: func is invalid: %s", errFirstArg); got.Error() != want.Error() { + t.Errorf("Incorrect error: got %q, want %q", got, want) + } +} + +func TestVariadicFunctionArguments(t *testing.T) { + // Check the argument type validation for variadic functions. + + c := newFakeContext() + + calls := 0 + taskqueueAdder = func(c context.Context, t *taskqueue.Task, _ string) (*taskqueue.Task, error) { + calls++ + return t, nil + } + + varFunc.Call(c.ctx, "hi") + varFunc.Call(c.ctx, "%d", 12) + varFunc.Call(c.ctx, "%d %d %d", 3, 1, 4) + if calls != 3 { + t.Errorf("Got %d calls to taskqueueAdder, want 3", calls) + } + + if got, want := varFunc.Call(c.ctx, "%d %s", 12, "a string is bad"), errors.New("delay: argument 3 has wrong type: string is not assignable to int"); got.Error() != want.Error() { + t.Errorf("Incorrect error: got %q, want %q", got, want) + } +} + +func TestBadArguments(t *testing.T) { + // Try running regFunc with different sets of inappropriate arguments. + + c := newFakeContext() + + tests := []struct { + args []interface{} // all except context + wantErr string + }{ + { + args: nil, + wantErr: "delay: too few arguments to func: 1 < 2", + }, + { + args: []interface{}{"lala", 53}, + wantErr: "delay: too many arguments to func: 3 > 2", + }, + { + args: []interface{}{53}, + wantErr: "delay: argument 1 has wrong type: int is not assignable to string", + }, + } + for i, tc := range tests { + got := regFunc.Call(c.ctx, tc.args...) + if got.Error() != tc.wantErr { + t.Errorf("Call %v: got %q, want %q", i, got, tc.wantErr) + } + } +} + +func TestRunningFunction(t *testing.T) { + c := newFakeContext() + + // Fake out the adding of a task. + var task *taskqueue.Task + taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) { + if queue != "" { + t.Errorf(`Got queue %q, expected ""`, queue) + } + task = tk + return tk, nil + } + + regFuncRuns, regFuncMsg = 0, "" // reset state + const msg = "Why, hello!" + regFunc.Call(c.ctx, msg) + + // Simulate the Task Queue service. + req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw := httptest.NewRecorder() + runFunc(c.ctx, rw, req) + + if regFuncRuns != 1 { + t.Errorf("regFuncRuns: got %d, want 1", regFuncRuns) + } + if regFuncMsg != msg { + t.Errorf("regFuncMsg: got %q, want %q", regFuncMsg, msg) + } +} + +func TestCustomType(t *testing.T) { + c := newFakeContext() + + // Fake out the adding of a task. + var task *taskqueue.Task + taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) { + if queue != "" { + t.Errorf(`Got queue %q, expected ""`, queue) + } + task = tk + return tk, nil + } + + custFuncTally = 0 // reset state + custFunc.Call(c.ctx, &CustomType{N: 11}, CustomImpl(13)) + + // Simulate the Task Queue service. + req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw := httptest.NewRecorder() + runFunc(c.ctx, rw, req) + + if custFuncTally != 24 { + t.Errorf("custFuncTally = %d, want 24", custFuncTally) + } + + // Try the same, but with nil values; one is a nil pointer (and thus a non-nil interface value), + // and the other is a nil interface value. + custFuncTally = 0 // reset state + custFunc.Call(c.ctx, (*CustomType)(nil), nil) + + // Simulate the Task Queue service. + req, err = http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw = httptest.NewRecorder() + runFunc(c.ctx, rw, req) + + if custFuncTally != 5 { + t.Errorf("custFuncTally = %d, want 5", custFuncTally) + } +} + +func TestRunningVariadic(t *testing.T) { + c := newFakeContext() + + // Fake out the adding of a task. + var task *taskqueue.Task + taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) { + if queue != "" { + t.Errorf(`Got queue %q, expected ""`, queue) + } + task = tk + return tk, nil + } + + varFuncMsg = "" // reset state + varFunc.Call(c.ctx, "Amiga %d has %d KB RAM", 500, 512) + + // Simulate the Task Queue service. + req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw := httptest.NewRecorder() + runFunc(c.ctx, rw, req) + + const expected = "Amiga 500 has 512 KB RAM" + if varFuncMsg != expected { + t.Errorf("varFuncMsg = %q, want %q", varFuncMsg, expected) + } +} + +func TestErrorFunction(t *testing.T) { + c := newFakeContext() + + // Fake out the adding of a task. + var task *taskqueue.Task + taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) { + if queue != "" { + t.Errorf(`Got queue %q, expected ""`, queue) + } + task = tk + return tk, nil + } + + errFunc.Call(c.ctx) + + // Simulate the Task Queue service. + // The first call should succeed; the second call should fail. + { + req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw := httptest.NewRecorder() + runFunc(c.ctx, rw, req) + } + { + req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw := httptest.NewRecorder() + runFunc(c.ctx, rw, req) + if rw.Code != http.StatusInternalServerError { + t.Errorf("Got status code %d, want %d", rw.Code, http.StatusInternalServerError) + } + + wantLogging := [][]interface{}{ + {"ERROR", "delay: func failed (will retry): %v", errFuncErr}, + } + if !reflect.DeepEqual(c.logging, wantLogging) { + t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging) + } + } +} + +func TestDuplicateFunction(t *testing.T) { + c := newFakeContext() + + // Fake out the adding of a task. + var task *taskqueue.Task + taskqueueAdder = func(_ context.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) { + if queue != "" { + t.Errorf(`Got queue %q, expected ""`, queue) + } + task = tk + return tk, nil + } + + if err := dupe1Func.Call(c.ctx); err == nil { + t.Error("dupe1Func.Call did not return error") + } + if task != nil { + t.Error("dupe1Func.Call posted a task") + } + if err := dupe2Func.Call(c.ctx); err != nil { + t.Errorf("dupe2Func.Call error: %v", err) + } + if task == nil { + t.Fatalf("dupe2Func.Call did not post a task") + } + + // Simulate the Task Queue service. + req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload)) + if err != nil { + t.Fatalf("Failed making http.Request: %v", err) + } + rw := httptest.NewRecorder() + runFunc(c.ctx, rw, req) + + if dupeWhich == 1 { + t.Error("dupe2Func.Call used old registered function") + } else if dupeWhich != 2 { + t.Errorf("dupeWhich = %d; want 2", dupeWhich) + } +} diff --git a/vendor/google.golang.org/appengine/demos/guestbook/app.yaml b/vendor/google.golang.org/appengine/demos/guestbook/app.yaml new file mode 100644 index 0000000..3342503 --- /dev/null +++ b/vendor/google.golang.org/appengine/demos/guestbook/app.yaml @@ -0,0 +1,14 @@ +# Demo application for App Engine "flexible environment". +runtime: go +vm: true +api_version: go1 + +handlers: +# Favicon. Without this, the browser hits this once per page view. +- url: /favicon.ico + static_files: favicon.ico + upload: favicon.ico + +# Main app. All the real work is here. +- url: /.* + script: _go_app diff --git a/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico b/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico new file mode 100644 index 0000000..1a71ea7 Binary files /dev/null and b/vendor/google.golang.org/appengine/demos/guestbook/favicon.ico differ diff --git a/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go b/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go new file mode 100644 index 0000000..04a0432 --- /dev/null +++ b/vendor/google.golang.org/appengine/demos/guestbook/guestbook.go @@ -0,0 +1,109 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// This example only works on App Engine "flexible environment". +// +build !appengine + +package main + +import ( + "html/template" + "net/http" + "time" + + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/datastore" + "google.golang.org/appengine/log" + "google.golang.org/appengine/user" +) + +var initTime time.Time + +type Greeting struct { + Author string + Content string + Date time.Time +} + +func main() { + http.HandleFunc("/", handleMainPage) + http.HandleFunc("/sign", handleSign) + appengine.Main() +} + +// guestbookKey returns the key used for all guestbook entries. +func guestbookKey(ctx context.Context) *datastore.Key { + // The string "default_guestbook" here could be varied to have multiple guestbooks. + return datastore.NewKey(ctx, "Guestbook", "default_guestbook", 0, nil) +} + +var tpl = template.Must(template.ParseGlob("templates/*.html")) + +func handleMainPage(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "GET requests only", http.StatusMethodNotAllowed) + return + } + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + + ctx := appengine.NewContext(r) + tic := time.Now() + q := datastore.NewQuery("Greeting").Ancestor(guestbookKey(ctx)).Order("-Date").Limit(10) + var gg []*Greeting + if _, err := q.GetAll(ctx, &gg); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + log.Errorf(ctx, "GetAll: %v", err) + return + } + log.Infof(ctx, "Datastore lookup took %s", time.Since(tic).String()) + log.Infof(ctx, "Rendering %d greetings", len(gg)) + + var email, logout, login string + if u := user.Current(ctx); u != nil { + logout, _ = user.LogoutURL(ctx, "/") + email = u.Email + } else { + login, _ = user.LoginURL(ctx, "/") + } + data := struct { + Greetings []*Greeting + Login, Logout, Email string + }{ + Greetings: gg, + Login: login, + Logout: logout, + Email: email, + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + if err := tpl.ExecuteTemplate(w, "guestbook.html", data); err != nil { + log.Errorf(ctx, "%v", err) + } +} + +func handleSign(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + http.Error(w, "POST requests only", http.StatusMethodNotAllowed) + return + } + ctx := appengine.NewContext(r) + g := &Greeting{ + Content: r.FormValue("content"), + Date: time.Now(), + } + if u := user.Current(ctx); u != nil { + g.Author = u.String() + } + key := datastore.NewIncompleteKey(ctx, "Greeting", guestbookKey(ctx)) + if _, err := datastore.Put(ctx, key, g); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + // Redirect with 303 which causes the subsequent request to use GET. + http.Redirect(w, r, "/", http.StatusSeeOther) +} diff --git a/vendor/google.golang.org/appengine/demos/guestbook/index.yaml b/vendor/google.golang.org/appengine/demos/guestbook/index.yaml new file mode 100644 index 0000000..315ffeb --- /dev/null +++ b/vendor/google.golang.org/appengine/demos/guestbook/index.yaml @@ -0,0 +1,7 @@ +indexes: + +- kind: Greeting + ancestor: yes + properties: + - name: Date + direction: desc diff --git a/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html b/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html new file mode 100644 index 0000000..322b7cf --- /dev/null +++ b/vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html @@ -0,0 +1,26 @@ + + + + Guestbook Demo + + +

    + {{with .Email}}You are currently logged in as {{.}}.{{end}} + {{with .Login}}Sign in{{end}} + {{with .Logout}}Sign out{{end}} +

    + + {{range .Greetings }} +

    + {{with .Author}}{{.}}{{else}}An anonymous person{{end}} + on {{.Date.Format "3:04pm, Mon 2 Jan"}} + wrote

    {{.Content}}
    +

    + {{end}} + +
    +
    +
    +
    + + diff --git a/vendor/google.golang.org/appengine/demos/helloworld/app.yaml b/vendor/google.golang.org/appengine/demos/helloworld/app.yaml new file mode 100644 index 0000000..1509119 --- /dev/null +++ b/vendor/google.golang.org/appengine/demos/helloworld/app.yaml @@ -0,0 +1,10 @@ +runtime: go +api_version: go1 +vm: true + +handlers: +- url: /favicon.ico + static_files: favicon.ico + upload: favicon.ico +- url: /.* + script: _go_app diff --git a/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico b/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico new file mode 100644 index 0000000..f19c04d Binary files /dev/null and b/vendor/google.golang.org/appengine/demos/helloworld/favicon.ico differ diff --git a/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go b/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go new file mode 100644 index 0000000..fbe9f56 --- /dev/null +++ b/vendor/google.golang.org/appengine/demos/helloworld/helloworld.go @@ -0,0 +1,50 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// This example only works on App Engine "flexible environment". +// +build !appengine + +package main + +import ( + "html/template" + "net/http" + "time" + + "google.golang.org/appengine" + "google.golang.org/appengine/log" +) + +var initTime = time.Now() + +func main() { + http.HandleFunc("/", handle) + appengine.Main() +} + +func handle(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + + ctx := appengine.NewContext(r) + log.Infof(ctx, "Serving the front page.") + + tmpl.Execute(w, time.Since(initTime)) +} + +var tmpl = template.Must(template.New("front").Parse(` + + +

    +Hello, World! 세상아 안녕! +

    + +

    +This instance has been running for {{.}}. +

    + + +`)) diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go new file mode 100644 index 0000000..16d0772 --- /dev/null +++ b/vendor/google.golang.org/appengine/errors.go @@ -0,0 +1,46 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// This file provides error functions for common API failure modes. + +package appengine + +import ( + "fmt" + + "google.golang.org/appengine/internal" +) + +// IsOverQuota reports whether err represents an API call failure +// due to insufficient available quota. +func IsOverQuota(err error) bool { + callErr, ok := err.(*internal.CallError) + return ok && callErr.Code == 4 +} + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/google.golang.org/appengine/file/file.go b/vendor/google.golang.org/appengine/file/file.go new file mode 100644 index 0000000..c3cd58b --- /dev/null +++ b/vendor/google.golang.org/appengine/file/file.go @@ -0,0 +1,28 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package file provides helper functions for using Google Cloud Storage. +package file + +import ( + "fmt" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + aipb "google.golang.org/appengine/internal/app_identity" +) + +// DefaultBucketName returns the name of this application's +// default Google Cloud Storage bucket. +func DefaultBucketName(c context.Context) (string, error) { + req := &aipb.GetDefaultGcsBucketNameRequest{} + res := &aipb.GetDefaultGcsBucketNameResponse{} + + err := internal.Call(c, "app_identity_service", "GetDefaultGcsBucketName", req, res) + if err != nil { + return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", res) + } + return res.GetDefaultGcsBucketName(), nil +} diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go new file mode 100644 index 0000000..b8dcf8f --- /dev/null +++ b/vendor/google.golang.org/appengine/identity.go @@ -0,0 +1,142 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "time" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/app_identity" + modpb "google.golang.org/appengine/internal/modules" +) + +// AppID returns the application ID for the current application. +// The string will be a plain application ID (e.g. "appid"), with a +// domain prefix for custom domain deployments (e.g. "example.com:appid"). +func AppID(c context.Context) string { return internal.AppID(c) } + +// DefaultVersionHostname returns the standard hostname of the default version +// of the current application (e.g. "my-app.appspot.com"). This is suitable for +// use in constructing URLs. +func DefaultVersionHostname(c context.Context) string { + return internal.DefaultVersionHostname(c) +} + +// ModuleName returns the module name of the current instance. +func ModuleName(c context.Context) string { + return internal.ModuleName(c) +} + +// ModuleHostname returns a hostname of a module instance. +// If module is the empty string, it refers to the module of the current instance. +// If version is empty, it refers to the version of the current instance if valid, +// or the default version of the module of the current instance. +// If instance is empty, ModuleHostname returns the load-balancing hostname. +func ModuleHostname(c context.Context, module, version, instance string) (string, error) { + req := &modpb.GetHostnameRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + if instance != "" { + req.Instance = &instance + } + res := &modpb.GetHostnameResponse{} + if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { + return "", err + } + return *res.Hostname, nil +} + +// VersionID returns the version ID for the current application. +// It will be of the form "X.Y", where X is specified in app.yaml, +// and Y is a number generated when each version of the app is uploaded. +// It does not include a module name. +func VersionID(c context.Context) string { return internal.VersionID(c) } + +// InstanceID returns a mostly-unique identifier for this instance. +func InstanceID() string { return internal.InstanceID() } + +// Datacenter returns an identifier for the datacenter that the instance is running in. +func Datacenter(c context.Context) string { return internal.Datacenter(c) } + +// ServerSoftware returns the App Engine release version. +// In production, it looks like "Google App Engine/X.Y.Z". +// In the development appserver, it looks like "Development/X.Y". +func ServerSoftware() string { return internal.ServerSoftware() } + +// RequestID returns a string that uniquely identifies the request. +func RequestID(c context.Context) string { return internal.RequestID(c) } + +// AccessToken generates an OAuth2 access token for the specified scopes on +// behalf of service account of this application. This token will expire after +// the returned time. +func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { + req := &pb.GetAccessTokenRequest{Scope: scopes} + res := &pb.GetAccessTokenResponse{} + + err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) + if err != nil { + return "", time.Time{}, err + } + return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil +} + +// Certificate represents a public certificate for the app. +type Certificate struct { + KeyName string + Data []byte // PEM-encoded X.509 certificate +} + +// PublicCertificates retrieves the public certificates for the app. +// They can be used to verify a signature returned by SignBytes. +func PublicCertificates(c context.Context) ([]Certificate, error) { + req := &pb.GetPublicCertificateForAppRequest{} + res := &pb.GetPublicCertificateForAppResponse{} + if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { + return nil, err + } + var cs []Certificate + for _, pc := range res.PublicCertificateList { + cs = append(cs, Certificate{ + KeyName: pc.GetKeyName(), + Data: []byte(pc.GetX509CertificatePem()), + }) + } + return cs, nil +} + +// ServiceAccount returns a string representing the service account name, in +// the form of an email address (typically app_id@appspot.gserviceaccount.com). +func ServiceAccount(c context.Context) (string, error) { + req := &pb.GetServiceAccountNameRequest{} + res := &pb.GetServiceAccountNameResponse{} + + err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) + if err != nil { + return "", err + } + return res.GetServiceAccountName(), err +} + +// SignBytes signs bytes using a private key unique to your application. +func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { + req := &pb.SignForAppRequest{BytesToSign: bytes} + res := &pb.SignForAppResponse{} + + if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { + return "", nil, err + } + return res.GetKeyName(), res.GetSignatureBytes(), nil +} + +func init() { + internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) + internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/image/image.go b/vendor/google.golang.org/appengine/image/image.go new file mode 100644 index 0000000..027a41b --- /dev/null +++ b/vendor/google.golang.org/appengine/image/image.go @@ -0,0 +1,67 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package image provides image services. +package image // import "google.golang.org/appengine/image" + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/image" +) + +type ServingURLOptions struct { + Secure bool // whether the URL should use HTTPS + + // Size must be between zero and 1600. + // If Size is non-zero, a resized version of the image is served, + // and Size is the served image's longest dimension. The aspect ratio is preserved. + // If Crop is true the image is cropped from the center instead of being resized. + Size int + Crop bool +} + +// ServingURL returns a URL that will serve an image from Blobstore. +func ServingURL(c context.Context, key appengine.BlobKey, opts *ServingURLOptions) (*url.URL, error) { + req := &pb.ImagesGetUrlBaseRequest{ + BlobKey: (*string)(&key), + } + if opts != nil && opts.Secure { + req.CreateSecureUrl = &opts.Secure + } + res := &pb.ImagesGetUrlBaseResponse{} + if err := internal.Call(c, "images", "GetUrlBase", req, res); err != nil { + return nil, err + } + + // The URL may have suffixes added to dynamically resize or crop: + // - adding "=s32" will serve the image resized to 32 pixels, preserving the aspect ratio. + // - adding "=s32-c" is the same as "=s32" except it will be cropped. + u := *res.Url + if opts != nil && opts.Size > 0 { + u += fmt.Sprintf("=s%d", opts.Size) + if opts.Crop { + u += "-c" + } + } + return url.Parse(u) +} + +// DeleteServingURL deletes the serving URL for an image. +func DeleteServingURL(c context.Context, key appengine.BlobKey) error { + req := &pb.ImagesDeleteUrlBaseRequest{ + BlobKey: (*string)(&key), + } + res := &pb.ImagesDeleteUrlBaseResponse{} + return internal.Call(c, "images", "DeleteUrlBase", req, res) +} + +func init() { + internal.RegisterErrorCodeMap("images", pb.ImagesServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/internal/aetesting/fake.go b/vendor/google.golang.org/appengine/internal/aetesting/fake.go new file mode 100644 index 0000000..eb5b2c6 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/aetesting/fake.go @@ -0,0 +1,81 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package aetesting provides utilities for testing App Engine packages. +// This is not for testing user applications. +package aetesting + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// FakeSingleContext returns a context whose Call invocations will be serviced +// by f, which should be a function that has two arguments of the input and output +// protocol buffer type, and one error return. +func FakeSingleContext(t *testing.T, service, method string, f interface{}) context.Context { + fv := reflect.ValueOf(f) + if fv.Kind() != reflect.Func { + t.Fatal("not a function") + } + ft := fv.Type() + if ft.NumIn() != 2 || ft.NumOut() != 1 { + t.Fatalf("f has %d in and %d out, want 2 in and 1 out", ft.NumIn(), ft.NumOut()) + } + for i := 0; i < 2; i++ { + at := ft.In(i) + if !at.Implements(protoMessageType) { + t.Fatalf("arg %d does not implement proto.Message", i) + } + } + if ft.Out(0) != errorType { + t.Fatalf("f's return is %v, want error", ft.Out(0)) + } + s := &single{ + t: t, + service: service, + method: method, + f: fv, + } + return internal.WithCallOverride(internal.ContextForTesting(&http.Request{}), s.call) +} + +var ( + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() + errorType = reflect.TypeOf((*error)(nil)).Elem() +) + +type single struct { + t *testing.T + service, method string + f reflect.Value +} + +func (s *single) call(ctx context.Context, service, method string, in, out proto.Message) error { + if service == "__go__" { + if method == "GetNamespace" { + return nil // always yield an empty namespace + } + return fmt.Errorf("Unknown API call /%s.%s", service, method) + } + if service != s.service || method != s.method { + s.t.Fatalf("Unexpected call to /%s.%s", service, method) + } + ins := []reflect.Value{ + reflect.ValueOf(in), + reflect.ValueOf(out), + } + outs := s.f.Call(ins) + if outs[0].IsNil() { + return nil + } + return outs[0].Interface().(error) +} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go new file mode 100644 index 0000000..ec5aa59 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -0,0 +1,646 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + logpb "google.golang.org/appengine/internal/log" + remotepb "google.golang.org/appengine/internal/remote_api" +) + +const ( + apiPath = "/rpc_http" +) + +var ( + // Incoming headers. + ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") + dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") + traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") + curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") + userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") + remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") + + // Outgoing headers. + apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") + apiEndpointHeaderValue = []string{"app-engine-apis"} + apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") + apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} + apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") + apiContentType = http.CanonicalHeaderKey("Content-Type") + apiContentTypeValue = []string{"application/octet-stream"} + logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") + + apiHTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: limitDial, + }, + } +) + +func apiURL() *url.URL { + host, port := "appengine.googleapis.internal", "10001" + if h := os.Getenv("API_HOST"); h != "" { + host = h + } + if p := os.Getenv("API_PORT"); p != "" { + port = p + } + return &url.URL{ + Scheme: "http", + Host: host + ":" + port, + Path: apiPath, + } +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + c := &context{ + req: r, + outHeader: w.Header(), + apiURL: apiURL(), + } + stopFlushing := make(chan int) + + ctxs.Lock() + ctxs.m[r] = c + ctxs.Unlock() + defer func() { + ctxs.Lock() + delete(ctxs.m, r) + ctxs.Unlock() + }() + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } + + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + + executeRequestSafely(c, r) + c.outHeader = nil // make sure header changes aren't respected any more + + stopFlushing <- 1 // any logging beyond this point will be dropped + + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go c.flushLog(false) + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } +} + +func executeRequestSafely(c *context, r *http.Request) { + defer func() { + if x := recover(); x != nil { + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() + + http.DefaultServeMux.ServeHTTP(c, r) +} + +func renderPanic(x interface{}) string { + buf := make([]byte, 16<<10) // 16 KB should be plenty + buf = buf[:runtime.Stack(buf, false)] + + // Remove the first few stack frames: + // this func + // the recover closure in the caller + // That will root the stack trace at the site of the panic. + const ( + skipStart = "internal.renderPanic" + skipFrames = 2 + ) + start := bytes.Index(buf, []byte(skipStart)) + p := start + for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { + p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 + if p < 0 { + break + } + } + if p >= 0 { + // buf[start:p+1] is the block to remove. + // Copy buf[p+1:] over buf[start:] and shrink buf. + copy(buf[start:], buf[p+1:]) + buf = buf[:len(buf)-(p+1-start)] + } + + // Add panic heading. + head := fmt.Sprintf("panic: %v\n\n", x) + if len(head) > len(buf) { + // Extremely unlikely to happen. + return head + } + copy(buf[len(head):], buf) + copy(buf, head) + + return string(buf) +} + +var ctxs = struct { + sync.Mutex + m map[*http.Request]*context + bg *context // background context, lazily initialized + // dec is used by tests to decorate the netcontext.Context returned + // for a given request. This allows tests to add overrides (such as + // WithAppIDOverride) to the context. The map is nil outside tests. + dec map[*http.Request]func(netcontext.Context) netcontext.Context +}{ + m: make(map[*http.Request]*context), +} + +// context represents the context of an in-flight HTTP request. +// It implements the appengine.Context and http.ResponseWriter interfaces. +type context struct { + req *http.Request + + outCode int + outHeader http.Header + outBody []byte + + pendingLogs struct { + sync.Mutex + lines []*logpb.UserAppLogLine + flushes int + } + + apiURL *url.URL +} + +var contextKey = "holds a *context" + +func fromContext(ctx netcontext.Context) *context { + c, _ := ctx.Value(&contextKey).(*context) + return c +} + +func withContext(parent netcontext.Context, c *context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { + ctx = withNamespace(ctx, ns) + } + return ctx +} + +func toContext(c *context) netcontext.Context { + return withContext(netcontext.Background(), c) +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + return c.req.Header + } + return nil +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + ctxs.Lock() + c := ctxs.m[req] + d := ctxs.dec[req] + ctxs.Unlock() + + if d != nil { + parent = d(parent) + } + + if c == nil { + // Someone passed in an http.Request that is not in-flight. + // We panic here rather than panicking at a later point + // so that stack traces will be more sensible. + log.Panic("appengine: NewContext passed an unknown http.Request") + } + return withContext(parent, c) +} + +func BackgroundContext() netcontext.Context { + ctxs.Lock() + defer ctxs.Unlock() + + if ctxs.bg != nil { + return toContext(ctxs.bg) + } + + // Compute background security ticket. + appID := partitionlessAppID() + escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) + majVersion := VersionID(nil) + if i := strings.Index(majVersion, "."); i > 0 { + majVersion = majVersion[:i] + } + ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) + + ctxs.bg = &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{ticket}, + }, + }, + apiURL: apiURL(), + } + + // TODO(dsymonds): Wire up the shutdown handler to do a final flush. + go ctxs.bg.logFlusher(make(chan int)) + + return toContext(ctxs.bg) +} + +// RegisterTestRequest registers the HTTP request req for testing, such that +// any API calls are sent to the provided URL. It returns a closure to delete +// the registration. +// It should only be used by aetest package. +func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { + c := &context{ + req: req, + apiURL: apiURL, + } + ctxs.Lock() + defer ctxs.Unlock() + if _, ok := ctxs.m[req]; ok { + log.Panic("req already associated with context") + } + if _, ok := ctxs.dec[req]; ok { + log.Panic("req already associated with context") + } + if ctxs.dec == nil { + ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) + } + ctxs.m[req] = c + ctxs.dec[req] = decorate + + return func() { + ctxs.Lock() + delete(ctxs.m, req) + delete(ctxs.dec, req) + ctxs.Unlock() + } +} + +var errTimeout = &CallError{ + Detail: "Deadline exceeded", + Code: int32(remotepb.RpcError_CANCELLED), + Timeout: true, +} + +func (c *context) Header() http.Header { return c.outHeader } + +// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status +// codes do not permit a response body (nor response entity headers such as +// Content-Length, Content-Type, etc). +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +func (c *context) Write(b []byte) (int, error) { + if c.outCode == 0 { + c.WriteHeader(http.StatusOK) + } + if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { + return 0, http.ErrBodyNotAllowed + } + c.outBody = append(c.outBody, b...) + return len(b), nil +} + +func (c *context) WriteHeader(code int) { + if c.outCode != 0 { + logf(c, 3, "WriteHeader called multiple times on request.") // error level + return + } + c.outCode = code +} + +func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { + hreq := &http.Request{ + Method: "POST", + URL: c.apiURL, + Header: http.Header{ + apiEndpointHeader: apiEndpointHeaderValue, + apiMethodHeader: apiMethodHeaderValue, + apiContentType: apiContentTypeValue, + apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, + }, + Body: ioutil.NopCloser(bytes.NewReader(body)), + ContentLength: int64(len(body)), + Host: c.apiURL.Host, + } + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } + + tr := apiHTTPClient.Transport.(*http.Transport) + + var timedOut int32 // atomic; set to 1 if timed out + t := time.AfterFunc(timeout, func() { + atomic.StoreInt32(&timedOut, 1) + tr.CancelRequest(hreq) + }) + defer t.Stop() + defer func() { + // Check if timeout was exceeded. + if atomic.LoadInt32(&timedOut) != 0 { + err = errTimeout + } + }() + + hresp, err := apiHTTPClient.Do(hreq) + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + defer hresp.Body.Close() + hrespBody, err := ioutil.ReadAll(hresp.Body) + if hresp.StatusCode != 200 { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge response bad: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return hrespBody, nil +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if ns := NamespaceFromContext(ctx); ns != "" { + if fn, ok := NamespaceMods[service]; ok { + fn(in, ns) + } + } + + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errors.New("not an App Engine context") + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + // Default RPC timeout is 60s. + timeout := 60 * time.Second + if deadline, ok := ctx.Deadline(); ok { + timeout = deadline.Sub(time.Now()) + } + + data, err := proto.Marshal(in) + if err != nil { + return err + } + + ticket := c.req.Header.Get(ticketHeader) + req := &remotepb.Request{ + ServiceName: &service, + Method: &method, + Request: data, + RequestId: &ticket, + } + hreqBody, err := proto.Marshal(req) + if err != nil { + return err + } + + hrespBody, err := c.post(hreqBody, timeout) + if err != nil { + return err + } + + res := &remotepb.Response{} + if err := proto.Unmarshal(hrespBody, res); err != nil { + return err + } + if res.RpcError != nil { + ce := &CallError{ + Detail: res.RpcError.GetDetail(), + Code: *res.RpcError.Code, + } + switch remotepb.RpcError_ErrorCode(ce.Code) { + case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: + ce.Timeout = true + } + return ce + } + if res.ApplicationError != nil { + return &APIError{ + Service: *req.ServiceName, + Detail: res.ApplicationError.GetDetail(), + Code: *res.ApplicationError.Code, + } + } + if res.Exception != nil || res.JavaException != nil { + // This shouldn't happen, but let's be defensive. + return &CallError{ + Detail: "service bridge returned exception", + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return proto.Unmarshal(res.Response, out) +} + +func (c *context) Request() *http.Request { + return c.req +} + +func (c *context) addLogLine(ll *logpb.UserAppLogLine) { + // Truncate long log lines. + // TODO(dsymonds): Check if this is still necessary. + const lim = 8 << 10 + if len(*ll.Message) > lim { + suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) + ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) + } + + c.pendingLogs.Lock() + c.pendingLogs.lines = append(c.pendingLogs.lines, ll) + c.pendingLogs.Unlock() +} + +var logLevelName = map[int64]string{ + 0: "DEBUG", + 1: "INFO", + 2: "WARNING", + 3: "ERROR", + 4: "CRITICAL", +} + +func logf(c *context, level int64, format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + log.Print(logLevelName[level] + ": " + s) +} + +// flushLog attempts to flush any pending logs to the appserver. +// It should not be called concurrently. +func (c *context) flushLog(force bool) (flushed bool) { + c.pendingLogs.Lock() + // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. + n, rem := 0, 30<<20 + for ; n < len(c.pendingLogs.lines); n++ { + ll := c.pendingLogs.lines[n] + // Each log line will require about 3 bytes of overhead. + nb := proto.Size(ll) + 3 + if nb > rem { + break + } + rem -= nb + } + lines := c.pendingLogs.lines[:n] + c.pendingLogs.lines = c.pendingLogs.lines[n:] + c.pendingLogs.Unlock() + + if len(lines) == 0 && !force { + // Nothing to flush. + return false + } + + rescueLogs := false + defer func() { + if rescueLogs { + c.pendingLogs.Lock() + c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) + c.pendingLogs.Unlock() + } + }() + + buf, err := proto.Marshal(&logpb.UserAppLogGroup{ + LogLine: lines, + }) + if err != nil { + log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) + rescueLogs = true + return false + } + + req := &logpb.FlushRequest{ + Logs: buf, + } + res := &basepb.VoidProto{} + c.pendingLogs.Lock() + c.pendingLogs.flushes++ + c.pendingLogs.Unlock() + if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { + log.Printf("internal.flushLog: Flush RPC: %v", err) + rescueLogs = true + return false + } + return true +} + +const ( + // Log flushing parameters. + flushInterval = 1 * time.Second + forceFlushInterval = 60 * time.Second +) + +func (c *context) logFlusher(stop <-chan int) { + lastFlush := time.Now() + tick := time.NewTicker(flushInterval) + for { + select { + case <-stop: + // Request finished. + tick.Stop() + return + case <-tick.C: + force := time.Now().Sub(lastFlush) > forceFlushInterval + if c.flushLog(force) { + lastFlush = time.Now() + } + } + } +} + +func ContextForTesting(req *http.Request) netcontext.Context { + return toContext(&context{req: req}) +} diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go new file mode 100644 index 0000000..597f66e --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -0,0 +1,159 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "errors" + "fmt" + "net/http" + "time" + + "appengine" + "appengine_internal" + basepb "appengine_internal/base" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +var contextKey = "holds an appengine.Context" + +func fromContext(ctx netcontext.Context) appengine.Context { + c, _ := ctx.Value(&contextKey).(appengine.Context) + return c +} + +// This is only for classic App Engine adapters. +func ClassicContextFromContext(ctx netcontext.Context) appengine.Context { + return fromContext(ctx) +} + +func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + + s := &basepb.StringProto{} + c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) + if ns := s.GetValue(); ns != "" { + ctx = NamespacedContext(ctx, ns) + } + + return ctx +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + if req, ok := c.Request().(*http.Request); ok { + return req.Header + } + } + return nil +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + c := appengine.NewContext(req) + return withContext(parent, c) +} + +type testingContext struct { + appengine.Context + + req *http.Request +} + +func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" } +func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error { + if service == "__go__" && method == "GetNamespace" { + return nil + } + return fmt.Errorf("testingContext: unsupported Call") +} +func (t *testingContext) Request() interface{} { return t.req } + +func ContextForTesting(req *http.Request) netcontext.Context { + return withContext(netcontext.Background(), &testingContext{req: req}) +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if ns := NamespaceFromContext(ctx); ns != "" { + if fn, ok := NamespaceMods[service]; ok { + fn(in, ns) + } + } + + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errors.New("not an App Engine context") + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + var opts *appengine_internal.CallOptions + if d, ok := ctx.Deadline(); ok { + opts = &appengine_internal.CallOptions{ + Timeout: d.Sub(time.Now()), + } + } + + err := c.Call(service, method, in, out, opts) + switch v := err.(type) { + case *appengine_internal.APIError: + return &APIError{ + Service: v.Service, + Detail: v.Detail, + Code: v.Code, + } + case *appengine_internal.CallError: + return &CallError{ + Detail: v.Detail, + Code: v.Code, + Timeout: v.Timeout, + } + } + return err +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + panic("handleHTTP called; this should be impossible") +} + +func logf(c appengine.Context, level int64, format string, args ...interface{}) { + var fn func(format string, args ...interface{}) + switch level { + case 0: + fn = c.Debugf + case 1: + fn = c.Infof + case 2: + fn = c.Warningf + case 3: + fn = c.Errorf + case 4: + fn = c.Criticalf + default: + // This shouldn't happen. + fn = c.Criticalf + } + fn(format, args...) +} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go new file mode 100644 index 0000000..2db33a7 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -0,0 +1,86 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error + +var callOverrideKey = "holds []CallOverrideFunc" + +func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { + // We avoid appending to any existing call override + // so we don't risk overwriting a popped stack below. + var cofs []CallOverrideFunc + if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { + cofs = append(cofs, uf...) + } + cofs = append(cofs, f) + return netcontext.WithValue(ctx, &callOverrideKey, cofs) +} + +func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { + cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) + if len(cofs) == 0 { + return nil, nil, false + } + // We found a list of overrides; grab the last, and reconstitute a + // context that will hide it. + f := cofs[len(cofs)-1] + ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + return f, ctx, true +} + +type logOverrideFunc func(level int64, format string, args ...interface{}) + +var logOverrideKey = "holds a logOverrideFunc" + +func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { + return netcontext.WithValue(ctx, &logOverrideKey, f) +} + +var appIDOverrideKey = "holds a string, being the full app ID" + +func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { + return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +} + +var namespaceKey = "holds the namespace string" + +func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { + return netcontext.WithValue(ctx, &namespaceKey, ns) +} + +func NamespaceFromContext(ctx netcontext.Context) string { + // If there's no namespace, return the empty string. + ns, _ := ctx.Value(&namespaceKey).(string) + return ns +} + +// FullyQualifiedAppID returns the fully-qualified application ID. +// This may contain a partition prefix (e.g. "s~" for High Replication apps), +// or a domain prefix (e.g. "example.com:"). +func FullyQualifiedAppID(ctx netcontext.Context) string { + if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { + return id + } + return fullyQualifiedAppID(ctx) +} + +func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { + if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { + f(level, format, args...) + return + } + logf(fromContext(ctx), level, format, args...) +} + +// NamespacedContext wraps a Context to support namespaces. +func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { + return withNamespace(ctx, namespace) +} diff --git a/vendor/google.golang.org/appengine/internal/api_race_test.go b/vendor/google.golang.org/appengine/internal/api_race_test.go new file mode 100644 index 0000000..6cfe906 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_race_test.go @@ -0,0 +1,9 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build race + +package internal + +func init() { raceDetector = true } diff --git a/vendor/google.golang.org/appengine/internal/api_test.go b/vendor/google.golang.org/appengine/internal/api_test.go new file mode 100644 index 0000000..386d7f6 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_test.go @@ -0,0 +1,467 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "os/exec" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + remotepb "google.golang.org/appengine/internal/remote_api" +) + +const testTicketHeader = "X-Magic-Ticket-Header" + +func init() { + ticketHeader = testTicketHeader +} + +type fakeAPIHandler struct { + hang chan int // used for RunSlowly RPC + + LogFlushes int32 // atomic +} + +func (f *fakeAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + writeResponse := func(res *remotepb.Response) { + hresBody, err := proto.Marshal(res) + if err != nil { + http.Error(w, fmt.Sprintf("Failed encoding API response: %v", err), 500) + return + } + w.Write(hresBody) + } + + if r.URL.Path != "/rpc_http" { + http.NotFound(w, r) + return + } + hreqBody, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, fmt.Sprintf("Bad body: %v", err), 500) + return + } + apiReq := &remotepb.Request{} + if err := proto.Unmarshal(hreqBody, apiReq); err != nil { + http.Error(w, fmt.Sprintf("Bad encoded API request: %v", err), 500) + return + } + if *apiReq.RequestId != "s3cr3t" { + writeResponse(&remotepb.Response{ + RpcError: &remotepb.RpcError{ + Code: proto.Int32(int32(remotepb.RpcError_SECURITY_VIOLATION)), + Detail: proto.String("bad security ticket"), + }, + }) + return + } + if got, want := r.Header.Get(dapperHeader), "trace-001"; got != want { + writeResponse(&remotepb.Response{ + RpcError: &remotepb.RpcError{ + Code: proto.Int32(int32(remotepb.RpcError_BAD_REQUEST)), + Detail: proto.String(fmt.Sprintf("trace info = %q, want %q", got, want)), + }, + }) + return + } + + service, method := *apiReq.ServiceName, *apiReq.Method + var resOut proto.Message + if service == "actordb" && method == "LookupActor" { + req := &basepb.StringProto{} + res := &basepb.StringProto{} + if err := proto.Unmarshal(apiReq.Request, req); err != nil { + http.Error(w, fmt.Sprintf("Bad encoded request: %v", err), 500) + return + } + if *req.Value == "Doctor Who" { + res.Value = proto.String("David Tennant") + } + resOut = res + } + if service == "errors" { + switch method { + case "Non200": + http.Error(w, "I'm a little teapot.", 418) + return + case "ShortResponse": + w.Header().Set("Content-Length", "100") + w.Write([]byte("way too short")) + return + case "OverQuota": + writeResponse(&remotepb.Response{ + RpcError: &remotepb.RpcError{ + Code: proto.Int32(int32(remotepb.RpcError_OVER_QUOTA)), + Detail: proto.String("you are hogging the resources!"), + }, + }) + return + case "RunSlowly": + // TestAPICallRPCFailure creates f.hang, but does not strobe it + // until Call returns with remotepb.RpcError_CANCELLED. + // This is here to force a happens-before relationship between + // the httptest server handler and shutdown. + <-f.hang + resOut = &basepb.VoidProto{} + } + } + if service == "logservice" && method == "Flush" { + // Pretend log flushing is slow. + time.Sleep(50 * time.Millisecond) + atomic.AddInt32(&f.LogFlushes, 1) + resOut = &basepb.VoidProto{} + } + + encOut, err := proto.Marshal(resOut) + if err != nil { + http.Error(w, fmt.Sprintf("Failed encoding response: %v", err), 500) + return + } + writeResponse(&remotepb.Response{ + Response: encOut, + }) +} + +func setup() (f *fakeAPIHandler, c *context, cleanup func()) { + f = &fakeAPIHandler{} + srv := httptest.NewServer(f) + u, err := url.Parse(srv.URL + apiPath) + if err != nil { + panic(fmt.Sprintf("url.Parse(%q): %v", srv.URL+apiPath, err)) + } + return f, &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{"s3cr3t"}, + dapperHeader: []string{"trace-001"}, + }, + }, + apiURL: u, + }, srv.Close +} + +func TestAPICall(t *testing.T) { + _, c, cleanup := setup() + defer cleanup() + + req := &basepb.StringProto{ + Value: proto.String("Doctor Who"), + } + res := &basepb.StringProto{} + err := Call(toContext(c), "actordb", "LookupActor", req, res) + if err != nil { + t.Fatalf("API call failed: %v", err) + } + if got, want := *res.Value, "David Tennant"; got != want { + t.Errorf("Response is %q, want %q", got, want) + } +} + +func TestAPICallRPCFailure(t *testing.T) { + f, c, cleanup := setup() + defer cleanup() + + testCases := []struct { + method string + code remotepb.RpcError_ErrorCode + }{ + {"Non200", remotepb.RpcError_UNKNOWN}, + {"ShortResponse", remotepb.RpcError_UNKNOWN}, + {"OverQuota", remotepb.RpcError_OVER_QUOTA}, + {"RunSlowly", remotepb.RpcError_CANCELLED}, + } + f.hang = make(chan int) // only for RunSlowly + for _, tc := range testCases { + ctx, _ := netcontext.WithTimeout(toContext(c), 100*time.Millisecond) + err := Call(ctx, "errors", tc.method, &basepb.VoidProto{}, &basepb.VoidProto{}) + ce, ok := err.(*CallError) + if !ok { + t.Errorf("%s: API call error is %T (%v), want *CallError", tc.method, err, err) + continue + } + if ce.Code != int32(tc.code) { + t.Errorf("%s: ce.Code = %d, want %d", tc.method, ce.Code, tc.code) + } + if tc.method == "RunSlowly" { + f.hang <- 1 // release the HTTP handler + } + } +} + +func TestAPICallDialFailure(t *testing.T) { + // See what happens if the API host is unresponsive. + // This should time out quickly, not hang forever. + _, c, cleanup := setup() + defer cleanup() + // Reset the URL to the production address so that dialing fails. + c.apiURL = apiURL() + + start := time.Now() + err := Call(toContext(c), "foo", "bar", &basepb.VoidProto{}, &basepb.VoidProto{}) + const max = 1 * time.Second + if taken := time.Since(start); taken > max { + t.Errorf("Dial hang took too long: %v > %v", taken, max) + } + if err == nil { + t.Error("Call did not fail") + } +} + +func TestDelayedLogFlushing(t *testing.T) { + f, c, cleanup := setup() + defer cleanup() + + http.HandleFunc("/quick_log", func(w http.ResponseWriter, r *http.Request) { + logC := WithContext(netcontext.Background(), r) + fromContext(logC).apiURL = c.apiURL // Otherwise it will try to use the default URL. + Logf(logC, 1, "It's a lovely day.") + w.WriteHeader(200) + w.Write(make([]byte, 100<<10)) // write 100 KB to force HTTP flush + }) + + r := &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Path: "/quick_log", + }, + Header: c.req.Header, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + } + w := httptest.NewRecorder() + + // Check that log flushing does not hold up the HTTP response. + start := time.Now() + handleHTTP(w, r) + if d := time.Since(start); d > 10*time.Millisecond { + t.Errorf("handleHTTP took %v, want under 10ms", d) + } + const hdr = "X-AppEngine-Log-Flush-Count" + if h := w.HeaderMap.Get(hdr); h != "1" { + t.Errorf("%s header = %q, want %q", hdr, h, "1") + } + if f := atomic.LoadInt32(&f.LogFlushes); f != 0 { + t.Errorf("After HTTP response: f.LogFlushes = %d, want 0", f) + } + + // Check that the log flush eventually comes in. + time.Sleep(100 * time.Millisecond) + if f := atomic.LoadInt32(&f.LogFlushes); f != 1 { + t.Errorf("After 100ms: f.LogFlushes = %d, want 1", f) + } +} + +func TestRemoteAddr(t *testing.T) { + var addr string + http.HandleFunc("/remote_addr", func(w http.ResponseWriter, r *http.Request) { + addr = r.RemoteAddr + }) + + testCases := []struct { + headers http.Header + addr string + }{ + {http.Header{"X-Appengine-User-Ip": []string{"10.5.2.1"}}, "10.5.2.1:80"}, + {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4"}}, "1.2.3.4:80"}, + {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4:8080"}}, "1.2.3.4:8080"}, + { + http.Header{"X-Appengine-Remote-Addr": []string{"2401:fa00:9:1:7646:a0ff:fe90:ca66"}}, + "[2401:fa00:9:1:7646:a0ff:fe90:ca66]:80", + }, + { + http.Header{"X-Appengine-Remote-Addr": []string{"[::1]:http"}}, + "[::1]:http", + }, + {http.Header{}, "127.0.0.1:80"}, + } + + for _, tc := range testCases { + r := &http.Request{ + Method: "GET", + URL: &url.URL{Scheme: "http", Path: "/remote_addr"}, + Header: tc.headers, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + } + handleHTTP(httptest.NewRecorder(), r) + if addr != tc.addr { + t.Errorf("Header %v, got %q, want %q", tc.headers, addr, tc.addr) + } + } +} + +func TestPanickingHandler(t *testing.T) { + http.HandleFunc("/panic", func(http.ResponseWriter, *http.Request) { + panic("whoops!") + }) + r := &http.Request{ + Method: "GET", + URL: &url.URL{Scheme: "http", Path: "/panic"}, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + } + rec := httptest.NewRecorder() + handleHTTP(rec, r) + if rec.Code != 500 { + t.Errorf("Panicking handler returned HTTP %d, want HTTP %d", rec.Code, 500) + } +} + +var raceDetector = false + +func TestAPICallAllocations(t *testing.T) { + if raceDetector { + t.Skip("not running under race detector") + } + + // Run the test API server in a subprocess so we aren't counting its allocations. + u, cleanup := launchHelperProcess(t) + defer cleanup() + c := &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{"s3cr3t"}, + dapperHeader: []string{"trace-001"}, + }, + }, + apiURL: u, + } + + req := &basepb.StringProto{ + Value: proto.String("Doctor Who"), + } + res := &basepb.StringProto{} + var apiErr error + avg := testing.AllocsPerRun(100, func() { + ctx, _ := netcontext.WithTimeout(toContext(c), 100*time.Millisecond) + if err := Call(ctx, "actordb", "LookupActor", req, res); err != nil && apiErr == nil { + apiErr = err // get the first error only + } + }) + if apiErr != nil { + t.Errorf("API call failed: %v", apiErr) + } + + // Lots of room for improvement... + // TODO(djd): Reduce maximum to 85 once the App Engine SDK is based on 1.6. + const min, max float64 = 70, 90 + if avg < min || max < avg { + t.Errorf("Allocations per API call = %g, want in [%g,%g]", avg, min, max) + } +} + +func launchHelperProcess(t *testing.T) (apiURL *url.URL, cleanup func()) { + cmd := exec.Command(os.Args[0], "-test.run=TestHelperProcess") + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + stdin, err := cmd.StdinPipe() + if err != nil { + t.Fatalf("StdinPipe: %v", err) + } + stdout, err := cmd.StdoutPipe() + if err != nil { + t.Fatalf("StdoutPipe: %v", err) + } + if err := cmd.Start(); err != nil { + t.Fatalf("Starting helper process: %v", err) + } + + scan := bufio.NewScanner(stdout) + var u *url.URL + for scan.Scan() { + line := scan.Text() + if hp := strings.TrimPrefix(line, helperProcessMagic); hp != line { + var err error + u, err = url.Parse(hp) + if err != nil { + t.Fatalf("Failed to parse %q: %v", hp, err) + } + break + } + } + if err := scan.Err(); err != nil { + t.Fatalf("Scanning helper process stdout: %v", err) + } + if u == nil { + t.Fatal("Helper process never reported") + } + + return u, func() { + stdin.Close() + if err := cmd.Wait(); err != nil { + t.Errorf("Helper process did not exit cleanly: %v", err) + } + } +} + +const helperProcessMagic = "A lovely helper process is listening at " + +// This isn't a real test. It's used as a helper process. +func TestHelperProcess(*testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + defer os.Exit(0) + + f := &fakeAPIHandler{} + srv := httptest.NewServer(f) + defer srv.Close() + fmt.Println(helperProcessMagic + srv.URL + apiPath) + + // Wait for stdin to be closed. + io.Copy(ioutil.Discard, os.Stdin) +} + +func TestBackgroundContext(t *testing.T) { + environ := []struct { + key, value string + }{ + {"GAE_LONG_APP_ID", "my-app-id"}, + {"GAE_MINOR_VERSION", "067924799508853122"}, + {"GAE_MODULE_INSTANCE", "0"}, + {"GAE_MODULE_NAME", "default"}, + {"GAE_MODULE_VERSION", "20150612t184001"}, + } + for _, v := range environ { + old := os.Getenv(v.key) + os.Setenv(v.key, v.value) + v.value = old + } + defer func() { // Restore old environment after the test completes. + for _, v := range environ { + if v.value == "" { + os.Unsetenv(v.key) + continue + } + os.Setenv(v.key, v.value) + } + }() + + ctx, key := fromContext(BackgroundContext()), "X-Magic-Ticket-Header" + if g, w := ctx.req.Header.Get(key), "my-app-id/default.20150612t184001.0"; g != w { + t.Errorf("%v = %q, want %q", key, g, w) + } + + // Check that using the background context doesn't panic. + req := &basepb.StringProto{ + Value: proto.String("Doctor Who"), + } + res := &basepb.StringProto{} + Call(BackgroundContext(), "actordb", "LookupActor", req, res) // expected to fail +} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go new file mode 100644 index 0000000..11df8c0 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_id.go @@ -0,0 +1,28 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "strings" +) + +func parseFullAppID(appid string) (partition, domain, displayID string) { + if i := strings.Index(appid, "~"); i != -1 { + partition, appid = appid[:i], appid[i+1:] + } + if i := strings.Index(appid, ":"); i != -1 { + domain, appid = appid[:i], appid[i+1:] + } + return partition, domain, appid +} + +// appID returns "appid" or "domain.com:appid". +func appID(fullAppID string) string { + _, dom, dis := parseFullAppID(fullAppID) + if dom != "" { + return dom + ":" + dis + } + return dis +} diff --git a/vendor/google.golang.org/appengine/internal/app_id_test.go b/vendor/google.golang.org/appengine/internal/app_id_test.go new file mode 100644 index 0000000..e69195c --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_id_test.go @@ -0,0 +1,34 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "testing" +) + +func TestAppIDParsing(t *testing.T) { + testCases := []struct { + in string + partition, domain, displayID string + }{ + {"simple-app-id", "", "", "simple-app-id"}, + {"domain.com:domain-app-id", "", "domain.com", "domain-app-id"}, + {"part~partition-app-id", "part", "", "partition-app-id"}, + {"part~domain.com:display", "part", "domain.com", "display"}, + } + + for _, tc := range testCases { + part, dom, dis := parseFullAppID(tc.in) + if part != tc.partition { + t.Errorf("partition of %q: got %q, want %q", tc.in, part, tc.partition) + } + if dom != tc.domain { + t.Errorf("domain of %q: got %q, want %q", tc.in, dom, tc.domain) + } + if dis != tc.displayID { + t.Errorf("displayID of %q: got %q, want %q", tc.in, dis, tc.displayID) + } + } +} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go new file mode 100644 index 0000000..87d9701 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go @@ -0,0 +1,296 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto +// DO NOT EDIT! + +/* +Package app_identity is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/app_identity/app_identity_service.proto + +It has these top-level messages: + AppIdentityServiceError + SignForAppRequest + SignForAppResponse + GetPublicCertificateForAppRequest + PublicCertificate + GetPublicCertificateForAppResponse + GetServiceAccountNameRequest + GetServiceAccountNameResponse + GetAccessTokenRequest + GetAccessTokenResponse + GetDefaultGcsBucketNameRequest + GetDefaultGcsBucketNameResponse +*/ +package app_identity + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AppIdentityServiceError_ErrorCode int32 + +const ( + AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 + AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 + AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 + AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 + AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 + AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 + AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 + AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 +) + +var AppIdentityServiceError_ErrorCode_name = map[int32]string{ + 0: "SUCCESS", + 9: "UNKNOWN_SCOPE", + 1000: "BLOB_TOO_LARGE", + 1001: "DEADLINE_EXCEEDED", + 1002: "NOT_A_VALID_APP", + 1003: "UNKNOWN_ERROR", + 1005: "NOT_ALLOWED", + 1006: "NOT_IMPLEMENTED", +} +var AppIdentityServiceError_ErrorCode_value = map[string]int32{ + "SUCCESS": 0, + "UNKNOWN_SCOPE": 9, + "BLOB_TOO_LARGE": 1000, + "DEADLINE_EXCEEDED": 1001, + "NOT_A_VALID_APP": 1002, + "UNKNOWN_ERROR": 1003, + "NOT_ALLOWED": 1005, + "NOT_IMPLEMENTED": 1006, +} + +func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { + p := new(AppIdentityServiceError_ErrorCode) + *p = x + return p +} +func (x AppIdentityServiceError_ErrorCode) String() string { + return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) +} +func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") + if err != nil { + return err + } + *x = AppIdentityServiceError_ErrorCode(value) + return nil +} + +type AppIdentityServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } +func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } +func (*AppIdentityServiceError) ProtoMessage() {} + +type SignForAppRequest struct { + BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } +func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } +func (*SignForAppRequest) ProtoMessage() {} + +func (m *SignForAppRequest) GetBytesToSign() []byte { + if m != nil { + return m.BytesToSign + } + return nil +} + +type SignForAppResponse struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` + SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } +func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } +func (*SignForAppResponse) ProtoMessage() {} + +func (m *SignForAppResponse) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *SignForAppResponse) GetSignatureBytes() []byte { + if m != nil { + return m.SignatureBytes + } + return nil +} + +type GetPublicCertificateForAppRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } +func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppRequest) ProtoMessage() {} + +type PublicCertificate struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` + X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } +func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } +func (*PublicCertificate) ProtoMessage() {} + +func (m *PublicCertificate) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *PublicCertificate) GetX509CertificatePem() string { + if m != nil && m.X509CertificatePem != nil { + return *m.X509CertificatePem + } + return "" +} + +type GetPublicCertificateForAppResponse struct { + PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"` + MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } +func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppResponse) ProtoMessage() {} + +func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { + if m != nil { + return m.PublicCertificateList + } + return nil +} + +func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { + if m != nil && m.MaxClientCacheTimeInSecond != nil { + return *m.MaxClientCacheTimeInSecond + } + return 0 +} + +type GetServiceAccountNameRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } +func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameRequest) ProtoMessage() {} + +type GetServiceAccountNameResponse struct { + ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } +func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameResponse) ProtoMessage() {} + +func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenRequest struct { + Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` + ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"` + ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } +func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenRequest) ProtoMessage() {} + +func (m *GetAccessTokenRequest) GetScope() []string { + if m != nil { + return m.Scope + } + return nil +} + +func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { + if m != nil && m.ServiceAccountId != nil { + return *m.ServiceAccountId + } + return 0 +} + +func (m *GetAccessTokenRequest) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenResponse struct { + AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"` + ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } +func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenResponse) ProtoMessage() {} + +func (m *GetAccessTokenResponse) GetAccessToken() string { + if m != nil && m.AccessToken != nil { + return *m.AccessToken + } + return "" +} + +func (m *GetAccessTokenResponse) GetExpirationTime() int64 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +type GetDefaultGcsBucketNameRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } +func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} + +type GetDefaultGcsBucketNameResponse struct { + DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } +func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} + +func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { + if m != nil && m.DefaultGcsBucketName != nil { + return *m.DefaultGcsBucketName + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto new file mode 100644 index 0000000..19610ca --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto @@ -0,0 +1,64 @@ +syntax = "proto2"; +option go_package = "app_identity"; + +package appengine; + +message AppIdentityServiceError { + enum ErrorCode { + SUCCESS = 0; + UNKNOWN_SCOPE = 9; + BLOB_TOO_LARGE = 1000; + DEADLINE_EXCEEDED = 1001; + NOT_A_VALID_APP = 1002; + UNKNOWN_ERROR = 1003; + NOT_ALLOWED = 1005; + NOT_IMPLEMENTED = 1006; + } +} + +message SignForAppRequest { + optional bytes bytes_to_sign = 1; +} + +message SignForAppResponse { + optional string key_name = 1; + optional bytes signature_bytes = 2; +} + +message GetPublicCertificateForAppRequest { +} + +message PublicCertificate { + optional string key_name = 1; + optional string x509_certificate_pem = 2; +} + +message GetPublicCertificateForAppResponse { + repeated PublicCertificate public_certificate_list = 1; + optional int64 max_client_cache_time_in_second = 2; +} + +message GetServiceAccountNameRequest { +} + +message GetServiceAccountNameResponse { + optional string service_account_name = 1; +} + +message GetAccessTokenRequest { + repeated string scope = 1; + optional int64 service_account_id = 2; + optional string service_account_name = 3; +} + +message GetAccessTokenResponse { + optional string access_token = 1; + optional int64 expiration_time = 2; +} + +message GetDefaultGcsBucketNameRequest { +} + +message GetDefaultGcsBucketNameResponse { + optional string default_gcs_bucket_name = 1; +} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go new file mode 100644 index 0000000..36a1956 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go @@ -0,0 +1,133 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/base/api_base.proto +// DO NOT EDIT! + +/* +Package base is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/base/api_base.proto + +It has these top-level messages: + StringProto + Integer32Proto + Integer64Proto + BoolProto + DoubleProto + BytesProto + VoidProto +*/ +package base + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type StringProto struct { + Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StringProto) Reset() { *m = StringProto{} } +func (m *StringProto) String() string { return proto.CompactTextString(m) } +func (*StringProto) ProtoMessage() {} + +func (m *StringProto) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Integer32Proto struct { + Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } +func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } +func (*Integer32Proto) ProtoMessage() {} + +func (m *Integer32Proto) GetValue() int32 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Integer64Proto struct { + Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } +func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } +func (*Integer64Proto) ProtoMessage() {} + +func (m *Integer64Proto) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BoolProto struct { + Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BoolProto) Reset() { *m = BoolProto{} } +func (m *BoolProto) String() string { return proto.CompactTextString(m) } +func (*BoolProto) ProtoMessage() {} + +func (m *BoolProto) GetValue() bool { + if m != nil && m.Value != nil { + return *m.Value + } + return false +} + +type DoubleProto struct { + Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DoubleProto) Reset() { *m = DoubleProto{} } +func (m *DoubleProto) String() string { return proto.CompactTextString(m) } +func (*DoubleProto) ProtoMessage() {} + +func (m *DoubleProto) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BytesProto struct { + Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BytesProto) Reset() { *m = BytesProto{} } +func (m *BytesProto) String() string { return proto.CompactTextString(m) } +func (*BytesProto) ProtoMessage() {} + +func (m *BytesProto) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type VoidProto struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *VoidProto) Reset() { *m = VoidProto{} } +func (m *VoidProto) String() string { return proto.CompactTextString(m) } +func (*VoidProto) ProtoMessage() {} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto new file mode 100644 index 0000000..56cd7a3 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto @@ -0,0 +1,33 @@ +// Built-in base types for API calls. Primarily useful as return types. + +syntax = "proto2"; +option go_package = "base"; + +package appengine.base; + +message StringProto { + required string value = 1; +} + +message Integer32Proto { + required int32 value = 1; +} + +message Integer64Proto { + required int64 value = 1; +} + +message BoolProto { + required bool value = 1; +} + +message DoubleProto { + required double value = 1; +} + +message BytesProto { + required bytes value = 1 [ctype=CORD]; +} + +message VoidProto { +} diff --git a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go new file mode 100644 index 0000000..8705ec3 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go @@ -0,0 +1,347 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/blobstore/blobstore_service.proto +// DO NOT EDIT! + +/* +Package blobstore is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/blobstore/blobstore_service.proto + +It has these top-level messages: + BlobstoreServiceError + CreateUploadURLRequest + CreateUploadURLResponse + DeleteBlobRequest + FetchDataRequest + FetchDataResponse + CloneBlobRequest + CloneBlobResponse + DecodeBlobKeyRequest + DecodeBlobKeyResponse + CreateEncodedGoogleStorageKeyRequest + CreateEncodedGoogleStorageKeyResponse +*/ +package blobstore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type BlobstoreServiceError_ErrorCode int32 + +const ( + BlobstoreServiceError_OK BlobstoreServiceError_ErrorCode = 0 + BlobstoreServiceError_INTERNAL_ERROR BlobstoreServiceError_ErrorCode = 1 + BlobstoreServiceError_URL_TOO_LONG BlobstoreServiceError_ErrorCode = 2 + BlobstoreServiceError_PERMISSION_DENIED BlobstoreServiceError_ErrorCode = 3 + BlobstoreServiceError_BLOB_NOT_FOUND BlobstoreServiceError_ErrorCode = 4 + BlobstoreServiceError_DATA_INDEX_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 5 + BlobstoreServiceError_BLOB_FETCH_SIZE_TOO_LARGE BlobstoreServiceError_ErrorCode = 6 + BlobstoreServiceError_ARGUMENT_OUT_OF_RANGE BlobstoreServiceError_ErrorCode = 8 + BlobstoreServiceError_INVALID_BLOB_KEY BlobstoreServiceError_ErrorCode = 9 +) + +var BlobstoreServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INTERNAL_ERROR", + 2: "URL_TOO_LONG", + 3: "PERMISSION_DENIED", + 4: "BLOB_NOT_FOUND", + 5: "DATA_INDEX_OUT_OF_RANGE", + 6: "BLOB_FETCH_SIZE_TOO_LARGE", + 8: "ARGUMENT_OUT_OF_RANGE", + 9: "INVALID_BLOB_KEY", +} +var BlobstoreServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INTERNAL_ERROR": 1, + "URL_TOO_LONG": 2, + "PERMISSION_DENIED": 3, + "BLOB_NOT_FOUND": 4, + "DATA_INDEX_OUT_OF_RANGE": 5, + "BLOB_FETCH_SIZE_TOO_LARGE": 6, + "ARGUMENT_OUT_OF_RANGE": 8, + "INVALID_BLOB_KEY": 9, +} + +func (x BlobstoreServiceError_ErrorCode) Enum() *BlobstoreServiceError_ErrorCode { + p := new(BlobstoreServiceError_ErrorCode) + *p = x + return p +} +func (x BlobstoreServiceError_ErrorCode) String() string { + return proto.EnumName(BlobstoreServiceError_ErrorCode_name, int32(x)) +} +func (x *BlobstoreServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(BlobstoreServiceError_ErrorCode_value, data, "BlobstoreServiceError_ErrorCode") + if err != nil { + return err + } + *x = BlobstoreServiceError_ErrorCode(value) + return nil +} + +type BlobstoreServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *BlobstoreServiceError) Reset() { *m = BlobstoreServiceError{} } +func (m *BlobstoreServiceError) String() string { return proto.CompactTextString(m) } +func (*BlobstoreServiceError) ProtoMessage() {} + +type CreateUploadURLRequest struct { + SuccessPath *string `protobuf:"bytes,1,req,name=success_path" json:"success_path,omitempty"` + MaxUploadSizeBytes *int64 `protobuf:"varint,2,opt,name=max_upload_size_bytes" json:"max_upload_size_bytes,omitempty"` + MaxUploadSizePerBlobBytes *int64 `protobuf:"varint,3,opt,name=max_upload_size_per_blob_bytes" json:"max_upload_size_per_blob_bytes,omitempty"` + GsBucketName *string `protobuf:"bytes,4,opt,name=gs_bucket_name" json:"gs_bucket_name,omitempty"` + UrlExpiryTimeSeconds *int32 `protobuf:"varint,5,opt,name=url_expiry_time_seconds" json:"url_expiry_time_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateUploadURLRequest) Reset() { *m = CreateUploadURLRequest{} } +func (m *CreateUploadURLRequest) String() string { return proto.CompactTextString(m) } +func (*CreateUploadURLRequest) ProtoMessage() {} + +func (m *CreateUploadURLRequest) GetSuccessPath() string { + if m != nil && m.SuccessPath != nil { + return *m.SuccessPath + } + return "" +} + +func (m *CreateUploadURLRequest) GetMaxUploadSizeBytes() int64 { + if m != nil && m.MaxUploadSizeBytes != nil { + return *m.MaxUploadSizeBytes + } + return 0 +} + +func (m *CreateUploadURLRequest) GetMaxUploadSizePerBlobBytes() int64 { + if m != nil && m.MaxUploadSizePerBlobBytes != nil { + return *m.MaxUploadSizePerBlobBytes + } + return 0 +} + +func (m *CreateUploadURLRequest) GetGsBucketName() string { + if m != nil && m.GsBucketName != nil { + return *m.GsBucketName + } + return "" +} + +func (m *CreateUploadURLRequest) GetUrlExpiryTimeSeconds() int32 { + if m != nil && m.UrlExpiryTimeSeconds != nil { + return *m.UrlExpiryTimeSeconds + } + return 0 +} + +type CreateUploadURLResponse struct { + Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateUploadURLResponse) Reset() { *m = CreateUploadURLResponse{} } +func (m *CreateUploadURLResponse) String() string { return proto.CompactTextString(m) } +func (*CreateUploadURLResponse) ProtoMessage() {} + +func (m *CreateUploadURLResponse) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +type DeleteBlobRequest struct { + BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"` + Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteBlobRequest) Reset() { *m = DeleteBlobRequest{} } +func (m *DeleteBlobRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteBlobRequest) ProtoMessage() {} + +func (m *DeleteBlobRequest) GetBlobKey() []string { + if m != nil { + return m.BlobKey + } + return nil +} + +func (m *DeleteBlobRequest) GetToken() string { + if m != nil && m.Token != nil { + return *m.Token + } + return "" +} + +type FetchDataRequest struct { + BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` + StartIndex *int64 `protobuf:"varint,2,req,name=start_index" json:"start_index,omitempty"` + EndIndex *int64 `protobuf:"varint,3,req,name=end_index" json:"end_index,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FetchDataRequest) Reset() { *m = FetchDataRequest{} } +func (m *FetchDataRequest) String() string { return proto.CompactTextString(m) } +func (*FetchDataRequest) ProtoMessage() {} + +func (m *FetchDataRequest) GetBlobKey() string { + if m != nil && m.BlobKey != nil { + return *m.BlobKey + } + return "" +} + +func (m *FetchDataRequest) GetStartIndex() int64 { + if m != nil && m.StartIndex != nil { + return *m.StartIndex + } + return 0 +} + +func (m *FetchDataRequest) GetEndIndex() int64 { + if m != nil && m.EndIndex != nil { + return *m.EndIndex + } + return 0 +} + +type FetchDataResponse struct { + Data []byte `protobuf:"bytes,1000,req,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FetchDataResponse) Reset() { *m = FetchDataResponse{} } +func (m *FetchDataResponse) String() string { return proto.CompactTextString(m) } +func (*FetchDataResponse) ProtoMessage() {} + +func (m *FetchDataResponse) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type CloneBlobRequest struct { + BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` + MimeType []byte `protobuf:"bytes,2,req,name=mime_type" json:"mime_type,omitempty"` + TargetAppId []byte `protobuf:"bytes,3,req,name=target_app_id" json:"target_app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CloneBlobRequest) Reset() { *m = CloneBlobRequest{} } +func (m *CloneBlobRequest) String() string { return proto.CompactTextString(m) } +func (*CloneBlobRequest) ProtoMessage() {} + +func (m *CloneBlobRequest) GetBlobKey() []byte { + if m != nil { + return m.BlobKey + } + return nil +} + +func (m *CloneBlobRequest) GetMimeType() []byte { + if m != nil { + return m.MimeType + } + return nil +} + +func (m *CloneBlobRequest) GetTargetAppId() []byte { + if m != nil { + return m.TargetAppId + } + return nil +} + +type CloneBlobResponse struct { + BlobKey []byte `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CloneBlobResponse) Reset() { *m = CloneBlobResponse{} } +func (m *CloneBlobResponse) String() string { return proto.CompactTextString(m) } +func (*CloneBlobResponse) ProtoMessage() {} + +func (m *CloneBlobResponse) GetBlobKey() []byte { + if m != nil { + return m.BlobKey + } + return nil +} + +type DecodeBlobKeyRequest struct { + BlobKey []string `protobuf:"bytes,1,rep,name=blob_key" json:"blob_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DecodeBlobKeyRequest) Reset() { *m = DecodeBlobKeyRequest{} } +func (m *DecodeBlobKeyRequest) String() string { return proto.CompactTextString(m) } +func (*DecodeBlobKeyRequest) ProtoMessage() {} + +func (m *DecodeBlobKeyRequest) GetBlobKey() []string { + if m != nil { + return m.BlobKey + } + return nil +} + +type DecodeBlobKeyResponse struct { + Decoded []string `protobuf:"bytes,1,rep,name=decoded" json:"decoded,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DecodeBlobKeyResponse) Reset() { *m = DecodeBlobKeyResponse{} } +func (m *DecodeBlobKeyResponse) String() string { return proto.CompactTextString(m) } +func (*DecodeBlobKeyResponse) ProtoMessage() {} + +func (m *DecodeBlobKeyResponse) GetDecoded() []string { + if m != nil { + return m.Decoded + } + return nil +} + +type CreateEncodedGoogleStorageKeyRequest struct { + Filename *string `protobuf:"bytes,1,req,name=filename" json:"filename,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateEncodedGoogleStorageKeyRequest) Reset() { *m = CreateEncodedGoogleStorageKeyRequest{} } +func (m *CreateEncodedGoogleStorageKeyRequest) String() string { return proto.CompactTextString(m) } +func (*CreateEncodedGoogleStorageKeyRequest) ProtoMessage() {} + +func (m *CreateEncodedGoogleStorageKeyRequest) GetFilename() string { + if m != nil && m.Filename != nil { + return *m.Filename + } + return "" +} + +type CreateEncodedGoogleStorageKeyResponse struct { + BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateEncodedGoogleStorageKeyResponse) Reset() { *m = CreateEncodedGoogleStorageKeyResponse{} } +func (m *CreateEncodedGoogleStorageKeyResponse) String() string { return proto.CompactTextString(m) } +func (*CreateEncodedGoogleStorageKeyResponse) ProtoMessage() {} + +func (m *CreateEncodedGoogleStorageKeyResponse) GetBlobKey() string { + if m != nil && m.BlobKey != nil { + return *m.BlobKey + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto new file mode 100644 index 0000000..33b2650 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto @@ -0,0 +1,71 @@ +syntax = "proto2"; +option go_package = "blobstore"; + +package appengine; + +message BlobstoreServiceError { + enum ErrorCode { + OK = 0; + INTERNAL_ERROR = 1; + URL_TOO_LONG = 2; + PERMISSION_DENIED = 3; + BLOB_NOT_FOUND = 4; + DATA_INDEX_OUT_OF_RANGE = 5; + BLOB_FETCH_SIZE_TOO_LARGE = 6; + ARGUMENT_OUT_OF_RANGE = 8; + INVALID_BLOB_KEY = 9; + } +} + +message CreateUploadURLRequest { + required string success_path = 1; + optional int64 max_upload_size_bytes = 2; + optional int64 max_upload_size_per_blob_bytes = 3; + optional string gs_bucket_name = 4; + optional int32 url_expiry_time_seconds = 5; +} + +message CreateUploadURLResponse { + required string url = 1; +} + +message DeleteBlobRequest { + repeated string blob_key = 1; + optional string token = 2; +} + +message FetchDataRequest { + required string blob_key = 1; + required int64 start_index = 2; + required int64 end_index = 3; +} + +message FetchDataResponse { + required bytes data = 1000 [ctype = CORD]; +} + +message CloneBlobRequest { + required bytes blob_key = 1; + required bytes mime_type = 2; + required bytes target_app_id = 3; +} + +message CloneBlobResponse { + required bytes blob_key = 1; +} + +message DecodeBlobKeyRequest { + repeated string blob_key = 1; +} + +message DecodeBlobKeyResponse { + repeated string decoded = 1; +} + +message CreateEncodedGoogleStorageKeyRequest { + required string filename = 1; +} + +message CreateEncodedGoogleStorageKeyResponse { + required string blob_key = 1; +} diff --git a/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go b/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go new file mode 100644 index 0000000..1736364 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go @@ -0,0 +1,125 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/capability/capability_service.proto +// DO NOT EDIT! + +/* +Package capability is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/capability/capability_service.proto + +It has these top-level messages: + IsEnabledRequest + IsEnabledResponse +*/ +package capability + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type IsEnabledResponse_SummaryStatus int32 + +const ( + IsEnabledResponse_DEFAULT IsEnabledResponse_SummaryStatus = 0 + IsEnabledResponse_ENABLED IsEnabledResponse_SummaryStatus = 1 + IsEnabledResponse_SCHEDULED_FUTURE IsEnabledResponse_SummaryStatus = 2 + IsEnabledResponse_SCHEDULED_NOW IsEnabledResponse_SummaryStatus = 3 + IsEnabledResponse_DISABLED IsEnabledResponse_SummaryStatus = 4 + IsEnabledResponse_UNKNOWN IsEnabledResponse_SummaryStatus = 5 +) + +var IsEnabledResponse_SummaryStatus_name = map[int32]string{ + 0: "DEFAULT", + 1: "ENABLED", + 2: "SCHEDULED_FUTURE", + 3: "SCHEDULED_NOW", + 4: "DISABLED", + 5: "UNKNOWN", +} +var IsEnabledResponse_SummaryStatus_value = map[string]int32{ + "DEFAULT": 0, + "ENABLED": 1, + "SCHEDULED_FUTURE": 2, + "SCHEDULED_NOW": 3, + "DISABLED": 4, + "UNKNOWN": 5, +} + +func (x IsEnabledResponse_SummaryStatus) Enum() *IsEnabledResponse_SummaryStatus { + p := new(IsEnabledResponse_SummaryStatus) + *p = x + return p +} +func (x IsEnabledResponse_SummaryStatus) String() string { + return proto.EnumName(IsEnabledResponse_SummaryStatus_name, int32(x)) +} +func (x *IsEnabledResponse_SummaryStatus) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IsEnabledResponse_SummaryStatus_value, data, "IsEnabledResponse_SummaryStatus") + if err != nil { + return err + } + *x = IsEnabledResponse_SummaryStatus(value) + return nil +} + +type IsEnabledRequest struct { + Package *string `protobuf:"bytes,1,req,name=package" json:"package,omitempty"` + Capability []string `protobuf:"bytes,2,rep,name=capability" json:"capability,omitempty"` + Call []string `protobuf:"bytes,3,rep,name=call" json:"call,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsEnabledRequest) Reset() { *m = IsEnabledRequest{} } +func (m *IsEnabledRequest) String() string { return proto.CompactTextString(m) } +func (*IsEnabledRequest) ProtoMessage() {} + +func (m *IsEnabledRequest) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *IsEnabledRequest) GetCapability() []string { + if m != nil { + return m.Capability + } + return nil +} + +func (m *IsEnabledRequest) GetCall() []string { + if m != nil { + return m.Call + } + return nil +} + +type IsEnabledResponse struct { + SummaryStatus *IsEnabledResponse_SummaryStatus `protobuf:"varint,1,opt,name=summary_status,enum=appengine.IsEnabledResponse_SummaryStatus" json:"summary_status,omitempty"` + TimeUntilScheduled *int64 `protobuf:"varint,2,opt,name=time_until_scheduled" json:"time_until_scheduled,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IsEnabledResponse) Reset() { *m = IsEnabledResponse{} } +func (m *IsEnabledResponse) String() string { return proto.CompactTextString(m) } +func (*IsEnabledResponse) ProtoMessage() {} + +func (m *IsEnabledResponse) GetSummaryStatus() IsEnabledResponse_SummaryStatus { + if m != nil && m.SummaryStatus != nil { + return *m.SummaryStatus + } + return IsEnabledResponse_DEFAULT +} + +func (m *IsEnabledResponse) GetTimeUntilScheduled() int64 { + if m != nil && m.TimeUntilScheduled != nil { + return *m.TimeUntilScheduled + } + return 0 +} diff --git a/vendor/google.golang.org/appengine/internal/capability/capability_service.proto b/vendor/google.golang.org/appengine/internal/capability/capability_service.proto new file mode 100644 index 0000000..5660ab6 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/capability/capability_service.proto @@ -0,0 +1,28 @@ +syntax = "proto2"; +option go_package = "capability"; + +package appengine; + +message IsEnabledRequest { + required string package = 1; + repeated string capability = 2; + repeated string call = 3; +} + +message IsEnabledResponse { + enum SummaryStatus { + DEFAULT = 0; + ENABLED = 1; + SCHEDULED_FUTURE = 2; + SCHEDULED_NOW = 3; + DISABLED = 4; + UNKNOWN = 5; + } + optional SummaryStatus summary_status = 1; + + optional int64 time_until_scheduled = 2; +} + +service CapabilityService { + rpc IsEnabled(IsEnabledRequest) returns (IsEnabledResponse) {}; +} diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go new file mode 100644 index 0000000..7b8d00c --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go @@ -0,0 +1,154 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/channel/channel_service.proto +// DO NOT EDIT! + +/* +Package channel is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/channel/channel_service.proto + +It has these top-level messages: + ChannelServiceError + CreateChannelRequest + CreateChannelResponse + SendMessageRequest +*/ +package channel + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ChannelServiceError_ErrorCode int32 + +const ( + ChannelServiceError_OK ChannelServiceError_ErrorCode = 0 + ChannelServiceError_INTERNAL_ERROR ChannelServiceError_ErrorCode = 1 + ChannelServiceError_INVALID_CHANNEL_KEY ChannelServiceError_ErrorCode = 2 + ChannelServiceError_BAD_MESSAGE ChannelServiceError_ErrorCode = 3 + ChannelServiceError_INVALID_CHANNEL_TOKEN_DURATION ChannelServiceError_ErrorCode = 4 + ChannelServiceError_APPID_ALIAS_REQUIRED ChannelServiceError_ErrorCode = 5 +) + +var ChannelServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INTERNAL_ERROR", + 2: "INVALID_CHANNEL_KEY", + 3: "BAD_MESSAGE", + 4: "INVALID_CHANNEL_TOKEN_DURATION", + 5: "APPID_ALIAS_REQUIRED", +} +var ChannelServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INTERNAL_ERROR": 1, + "INVALID_CHANNEL_KEY": 2, + "BAD_MESSAGE": 3, + "INVALID_CHANNEL_TOKEN_DURATION": 4, + "APPID_ALIAS_REQUIRED": 5, +} + +func (x ChannelServiceError_ErrorCode) Enum() *ChannelServiceError_ErrorCode { + p := new(ChannelServiceError_ErrorCode) + *p = x + return p +} +func (x ChannelServiceError_ErrorCode) String() string { + return proto.EnumName(ChannelServiceError_ErrorCode_name, int32(x)) +} +func (x *ChannelServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ChannelServiceError_ErrorCode_value, data, "ChannelServiceError_ErrorCode") + if err != nil { + return err + } + *x = ChannelServiceError_ErrorCode(value) + return nil +} + +type ChannelServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ChannelServiceError) Reset() { *m = ChannelServiceError{} } +func (m *ChannelServiceError) String() string { return proto.CompactTextString(m) } +func (*ChannelServiceError) ProtoMessage() {} + +type CreateChannelRequest struct { + ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"` + DurationMinutes *int32 `protobuf:"varint,2,opt,name=duration_minutes" json:"duration_minutes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateChannelRequest) Reset() { *m = CreateChannelRequest{} } +func (m *CreateChannelRequest) String() string { return proto.CompactTextString(m) } +func (*CreateChannelRequest) ProtoMessage() {} + +func (m *CreateChannelRequest) GetApplicationKey() string { + if m != nil && m.ApplicationKey != nil { + return *m.ApplicationKey + } + return "" +} + +func (m *CreateChannelRequest) GetDurationMinutes() int32 { + if m != nil && m.DurationMinutes != nil { + return *m.DurationMinutes + } + return 0 +} + +type CreateChannelResponse struct { + Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"` + DurationMinutes *int32 `protobuf:"varint,3,opt,name=duration_minutes" json:"duration_minutes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateChannelResponse) Reset() { *m = CreateChannelResponse{} } +func (m *CreateChannelResponse) String() string { return proto.CompactTextString(m) } +func (*CreateChannelResponse) ProtoMessage() {} + +func (m *CreateChannelResponse) GetToken() string { + if m != nil && m.Token != nil { + return *m.Token + } + return "" +} + +func (m *CreateChannelResponse) GetDurationMinutes() int32 { + if m != nil && m.DurationMinutes != nil { + return *m.DurationMinutes + } + return 0 +} + +type SendMessageRequest struct { + ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"` + Message *string `protobuf:"bytes,2,req,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SendMessageRequest) Reset() { *m = SendMessageRequest{} } +func (m *SendMessageRequest) String() string { return proto.CompactTextString(m) } +func (*SendMessageRequest) ProtoMessage() {} + +func (m *SendMessageRequest) GetApplicationKey() string { + if m != nil && m.ApplicationKey != nil { + return *m.ApplicationKey + } + return "" +} + +func (m *SendMessageRequest) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/channel/channel_service.proto b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto new file mode 100644 index 0000000..2b5a918 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/channel/channel_service.proto @@ -0,0 +1,30 @@ +syntax = "proto2"; +option go_package = "channel"; + +package appengine; + +message ChannelServiceError { + enum ErrorCode { + OK = 0; + INTERNAL_ERROR = 1; + INVALID_CHANNEL_KEY = 2; + BAD_MESSAGE = 3; + INVALID_CHANNEL_TOKEN_DURATION = 4; + APPID_ALIAS_REQUIRED = 5; + } +} + +message CreateChannelRequest { + required string application_key = 1; + optional int32 duration_minutes = 2; +} + +message CreateChannelResponse { + optional string token = 2; + optional int32 duration_minutes = 3; +} + +message SendMessageRequest { + required string application_key = 1; + required string message = 2; +} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go new file mode 100644 index 0000000..8613cb7 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go @@ -0,0 +1,2778 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto +// DO NOT EDIT! + +/* +Package datastore is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/datastore/datastore_v3.proto + +It has these top-level messages: + Action + PropertyValue + Property + Path + Reference + User + EntityProto + CompositeProperty + Index + CompositeIndex + IndexPostfix + IndexPosition + Snapshot + InternalHeader + Transaction + Query + CompiledQuery + CompiledCursor + Cursor + Error + Cost + GetRequest + GetResponse + PutRequest + PutResponse + TouchRequest + TouchResponse + DeleteRequest + DeleteResponse + NextRequest + QueryResult + AllocateIdsRequest + AllocateIdsResponse + CompositeIndices + AddActionsRequest + AddActionsResponse + BeginTransactionRequest + CommitResponse +*/ +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Property_Meaning int32 + +const ( + Property_NO_MEANING Property_Meaning = 0 + Property_BLOB Property_Meaning = 14 + Property_TEXT Property_Meaning = 15 + Property_BYTESTRING Property_Meaning = 16 + Property_ATOM_CATEGORY Property_Meaning = 1 + Property_ATOM_LINK Property_Meaning = 2 + Property_ATOM_TITLE Property_Meaning = 3 + Property_ATOM_CONTENT Property_Meaning = 4 + Property_ATOM_SUMMARY Property_Meaning = 5 + Property_ATOM_AUTHOR Property_Meaning = 6 + Property_GD_WHEN Property_Meaning = 7 + Property_GD_EMAIL Property_Meaning = 8 + Property_GEORSS_POINT Property_Meaning = 9 + Property_GD_IM Property_Meaning = 10 + Property_GD_PHONENUMBER Property_Meaning = 11 + Property_GD_POSTALADDRESS Property_Meaning = 12 + Property_GD_RATING Property_Meaning = 13 + Property_BLOBKEY Property_Meaning = 17 + Property_ENTITY_PROTO Property_Meaning = 19 + Property_INDEX_VALUE Property_Meaning = 18 +) + +var Property_Meaning_name = map[int32]string{ + 0: "NO_MEANING", + 14: "BLOB", + 15: "TEXT", + 16: "BYTESTRING", + 1: "ATOM_CATEGORY", + 2: "ATOM_LINK", + 3: "ATOM_TITLE", + 4: "ATOM_CONTENT", + 5: "ATOM_SUMMARY", + 6: "ATOM_AUTHOR", + 7: "GD_WHEN", + 8: "GD_EMAIL", + 9: "GEORSS_POINT", + 10: "GD_IM", + 11: "GD_PHONENUMBER", + 12: "GD_POSTALADDRESS", + 13: "GD_RATING", + 17: "BLOBKEY", + 19: "ENTITY_PROTO", + 18: "INDEX_VALUE", +} +var Property_Meaning_value = map[string]int32{ + "NO_MEANING": 0, + "BLOB": 14, + "TEXT": 15, + "BYTESTRING": 16, + "ATOM_CATEGORY": 1, + "ATOM_LINK": 2, + "ATOM_TITLE": 3, + "ATOM_CONTENT": 4, + "ATOM_SUMMARY": 5, + "ATOM_AUTHOR": 6, + "GD_WHEN": 7, + "GD_EMAIL": 8, + "GEORSS_POINT": 9, + "GD_IM": 10, + "GD_PHONENUMBER": 11, + "GD_POSTALADDRESS": 12, + "GD_RATING": 13, + "BLOBKEY": 17, + "ENTITY_PROTO": 19, + "INDEX_VALUE": 18, +} + +func (x Property_Meaning) Enum() *Property_Meaning { + p := new(Property_Meaning) + *p = x + return p +} +func (x Property_Meaning) String() string { + return proto.EnumName(Property_Meaning_name, int32(x)) +} +func (x *Property_Meaning) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") + if err != nil { + return err + } + *x = Property_Meaning(value) + return nil +} + +type Property_FtsTokenizationOption int32 + +const ( + Property_HTML Property_FtsTokenizationOption = 1 + Property_ATOM Property_FtsTokenizationOption = 2 +) + +var Property_FtsTokenizationOption_name = map[int32]string{ + 1: "HTML", + 2: "ATOM", +} +var Property_FtsTokenizationOption_value = map[string]int32{ + "HTML": 1, + "ATOM": 2, +} + +func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { + p := new(Property_FtsTokenizationOption) + *p = x + return p +} +func (x Property_FtsTokenizationOption) String() string { + return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) +} +func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") + if err != nil { + return err + } + *x = Property_FtsTokenizationOption(value) + return nil +} + +type EntityProto_Kind int32 + +const ( + EntityProto_GD_CONTACT EntityProto_Kind = 1 + EntityProto_GD_EVENT EntityProto_Kind = 2 + EntityProto_GD_MESSAGE EntityProto_Kind = 3 +) + +var EntityProto_Kind_name = map[int32]string{ + 1: "GD_CONTACT", + 2: "GD_EVENT", + 3: "GD_MESSAGE", +} +var EntityProto_Kind_value = map[string]int32{ + "GD_CONTACT": 1, + "GD_EVENT": 2, + "GD_MESSAGE": 3, +} + +func (x EntityProto_Kind) Enum() *EntityProto_Kind { + p := new(EntityProto_Kind) + *p = x + return p +} +func (x EntityProto_Kind) String() string { + return proto.EnumName(EntityProto_Kind_name, int32(x)) +} +func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") + if err != nil { + return err + } + *x = EntityProto_Kind(value) + return nil +} + +type Index_Property_Direction int32 + +const ( + Index_Property_ASCENDING Index_Property_Direction = 1 + Index_Property_DESCENDING Index_Property_Direction = 2 +) + +var Index_Property_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Index_Property_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Index_Property_Direction) Enum() *Index_Property_Direction { + p := new(Index_Property_Direction) + *p = x + return p +} +func (x Index_Property_Direction) String() string { + return proto.EnumName(Index_Property_Direction_name, int32(x)) +} +func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") + if err != nil { + return err + } + *x = Index_Property_Direction(value) + return nil +} + +type CompositeIndex_State int32 + +const ( + CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 + CompositeIndex_READ_WRITE CompositeIndex_State = 2 + CompositeIndex_DELETED CompositeIndex_State = 3 + CompositeIndex_ERROR CompositeIndex_State = 4 +) + +var CompositeIndex_State_name = map[int32]string{ + 1: "WRITE_ONLY", + 2: "READ_WRITE", + 3: "DELETED", + 4: "ERROR", +} +var CompositeIndex_State_value = map[string]int32{ + "WRITE_ONLY": 1, + "READ_WRITE": 2, + "DELETED": 3, + "ERROR": 4, +} + +func (x CompositeIndex_State) Enum() *CompositeIndex_State { + p := new(CompositeIndex_State) + *p = x + return p +} +func (x CompositeIndex_State) String() string { + return proto.EnumName(CompositeIndex_State_name, int32(x)) +} +func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") + if err != nil { + return err + } + *x = CompositeIndex_State(value) + return nil +} + +type Snapshot_Status int32 + +const ( + Snapshot_INACTIVE Snapshot_Status = 0 + Snapshot_ACTIVE Snapshot_Status = 1 +) + +var Snapshot_Status_name = map[int32]string{ + 0: "INACTIVE", + 1: "ACTIVE", +} +var Snapshot_Status_value = map[string]int32{ + "INACTIVE": 0, + "ACTIVE": 1, +} + +func (x Snapshot_Status) Enum() *Snapshot_Status { + p := new(Snapshot_Status) + *p = x + return p +} +func (x Snapshot_Status) String() string { + return proto.EnumName(Snapshot_Status_name, int32(x)) +} +func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") + if err != nil { + return err + } + *x = Snapshot_Status(value) + return nil +} + +type Query_Hint int32 + +const ( + Query_ORDER_FIRST Query_Hint = 1 + Query_ANCESTOR_FIRST Query_Hint = 2 + Query_FILTER_FIRST Query_Hint = 3 +) + +var Query_Hint_name = map[int32]string{ + 1: "ORDER_FIRST", + 2: "ANCESTOR_FIRST", + 3: "FILTER_FIRST", +} +var Query_Hint_value = map[string]int32{ + "ORDER_FIRST": 1, + "ANCESTOR_FIRST": 2, + "FILTER_FIRST": 3, +} + +func (x Query_Hint) Enum() *Query_Hint { + p := new(Query_Hint) + *p = x + return p +} +func (x Query_Hint) String() string { + return proto.EnumName(Query_Hint_name, int32(x)) +} +func (x *Query_Hint) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") + if err != nil { + return err + } + *x = Query_Hint(value) + return nil +} + +type Query_Filter_Operator int32 + +const ( + Query_Filter_LESS_THAN Query_Filter_Operator = 1 + Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 + Query_Filter_GREATER_THAN Query_Filter_Operator = 3 + Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 + Query_Filter_EQUAL Query_Filter_Operator = 5 + Query_Filter_IN Query_Filter_Operator = 6 + Query_Filter_EXISTS Query_Filter_Operator = 7 +) + +var Query_Filter_Operator_name = map[int32]string{ + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", + 6: "IN", + 7: "EXISTS", +} +var Query_Filter_Operator_value = map[string]int32{ + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "IN": 6, + "EXISTS": 7, +} + +func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { + p := new(Query_Filter_Operator) + *p = x + return p +} +func (x Query_Filter_Operator) String() string { + return proto.EnumName(Query_Filter_Operator_name, int32(x)) +} +func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") + if err != nil { + return err + } + *x = Query_Filter_Operator(value) + return nil +} + +type Query_Order_Direction int32 + +const ( + Query_Order_ASCENDING Query_Order_Direction = 1 + Query_Order_DESCENDING Query_Order_Direction = 2 +) + +var Query_Order_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Query_Order_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Query_Order_Direction) Enum() *Query_Order_Direction { + p := new(Query_Order_Direction) + *p = x + return p +} +func (x Query_Order_Direction) String() string { + return proto.EnumName(Query_Order_Direction_name, int32(x)) +} +func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") + if err != nil { + return err + } + *x = Query_Order_Direction(value) + return nil +} + +type Error_ErrorCode int32 + +const ( + Error_BAD_REQUEST Error_ErrorCode = 1 + Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 + Error_INTERNAL_ERROR Error_ErrorCode = 3 + Error_NEED_INDEX Error_ErrorCode = 4 + Error_TIMEOUT Error_ErrorCode = 5 + Error_PERMISSION_DENIED Error_ErrorCode = 6 + Error_BIGTABLE_ERROR Error_ErrorCode = 7 + Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 + Error_CAPABILITY_DISABLED Error_ErrorCode = 9 + Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 + Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 +) + +var Error_ErrorCode_name = map[int32]string{ + 1: "BAD_REQUEST", + 2: "CONCURRENT_TRANSACTION", + 3: "INTERNAL_ERROR", + 4: "NEED_INDEX", + 5: "TIMEOUT", + 6: "PERMISSION_DENIED", + 7: "BIGTABLE_ERROR", + 8: "COMMITTED_BUT_STILL_APPLYING", + 9: "CAPABILITY_DISABLED", + 10: "TRY_ALTERNATE_BACKEND", + 11: "SAFE_TIME_TOO_OLD", +} +var Error_ErrorCode_value = map[string]int32{ + "BAD_REQUEST": 1, + "CONCURRENT_TRANSACTION": 2, + "INTERNAL_ERROR": 3, + "NEED_INDEX": 4, + "TIMEOUT": 5, + "PERMISSION_DENIED": 6, + "BIGTABLE_ERROR": 7, + "COMMITTED_BUT_STILL_APPLYING": 8, + "CAPABILITY_DISABLED": 9, + "TRY_ALTERNATE_BACKEND": 10, + "SAFE_TIME_TOO_OLD": 11, +} + +func (x Error_ErrorCode) Enum() *Error_ErrorCode { + p := new(Error_ErrorCode) + *p = x + return p +} +func (x Error_ErrorCode) String() string { + return proto.EnumName(Error_ErrorCode_name, int32(x)) +} +func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") + if err != nil { + return err + } + *x = Error_ErrorCode(value) + return nil +} + +type PutRequest_AutoIdPolicy int32 + +const ( + PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 + PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 +) + +var PutRequest_AutoIdPolicy_name = map[int32]string{ + 0: "CURRENT", + 1: "SEQUENTIAL", +} +var PutRequest_AutoIdPolicy_value = map[string]int32{ + "CURRENT": 0, + "SEQUENTIAL": 1, +} + +func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { + p := new(PutRequest_AutoIdPolicy) + *p = x + return p +} +func (x PutRequest_AutoIdPolicy) String() string { + return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) +} +func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") + if err != nil { + return err + } + *x = PutRequest_AutoIdPolicy(value) + return nil +} + +type Action struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Action) Reset() { *m = Action{} } +func (m *Action) String() string { return proto.CompactTextString(m) } +func (*Action) ProtoMessage() {} + +type PropertyValue struct { + Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` + BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` + StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` + Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"` + Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"` + Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue) Reset() { *m = PropertyValue{} } +func (m *PropertyValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue) ProtoMessage() {} + +func (m *PropertyValue) GetInt64Value() int64 { + if m != nil && m.Int64Value != nil { + return *m.Int64Value + } + return 0 +} + +func (m *PropertyValue) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *PropertyValue) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *PropertyValue) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { + if m != nil { + return m.Pointvalue + } + return nil +} + +func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { + if m != nil { + return m.Uservalue + } + return nil +} + +func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { + if m != nil { + return m.Referencevalue + } + return nil +} + +type PropertyValue_PointValue struct { + X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` + Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } +func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_PointValue) ProtoMessage() {} + +func (m *PropertyValue_PointValue) GetX() float64 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *PropertyValue_PointValue) GetY() float64 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type PropertyValue_UserValue struct { + Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } +func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_UserValue) ProtoMessage() {} + +func (m *PropertyValue_UserValue) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *PropertyValue_UserValue) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *PropertyValue_UserValue) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type PropertyValue_ReferenceValue struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` + Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } +func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue) ProtoMessage() {} + +func (m *PropertyValue_ReferenceValue) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { + if m != nil { + return m.Pathelement + } + return nil +} + +type PropertyValue_ReferenceValue_PathElement struct { + Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PropertyValue_ReferenceValue_PathElement) Reset() { + *m = PropertyValue_ReferenceValue_PathElement{} +} +func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} + +func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Property struct { + Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` + MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"` + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` + Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` + Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` + FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` + Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} + +const Default_Property_Meaning Property_Meaning = Property_NO_MEANING +const Default_Property_Searchable bool = false +const Default_Property_Locale string = "en" + +func (m *Property) GetMeaning() Property_Meaning { + if m != nil && m.Meaning != nil { + return *m.Meaning + } + return Default_Property_Meaning +} + +func (m *Property) GetMeaningUri() string { + if m != nil && m.MeaningUri != nil { + return *m.MeaningUri + } + return "" +} + +func (m *Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Property) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +func (m *Property) GetMultiple() bool { + if m != nil && m.Multiple != nil { + return *m.Multiple + } + return false +} + +func (m *Property) GetSearchable() bool { + if m != nil && m.Searchable != nil { + return *m.Searchable + } + return Default_Property_Searchable +} + +func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { + if m != nil && m.FtsTokenizationOption != nil { + return *m.FtsTokenizationOption + } + return Property_HTML +} + +func (m *Property) GetLocale() string { + if m != nil && m.Locale != nil { + return *m.Locale + } + return Default_Property_Locale +} + +type Path struct { + Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} + +func (m *Path) GetElement() []*Path_Element { + if m != nil { + return m.Element + } + return nil +} + +type Path_Element struct { + Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Path_Element) Reset() { *m = Path_Element{} } +func (m *Path_Element) String() string { return proto.CompactTextString(m) } +func (*Path_Element) ProtoMessage() {} + +func (m *Path_Element) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *Path_Element) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *Path_Element) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Reference struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` + Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Reference) Reset() { *m = Reference{} } +func (m *Reference) String() string { return proto.CompactTextString(m) } +func (*Reference) ProtoMessage() {} + +func (m *Reference) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Reference) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Reference) GetPath() *Path { + if m != nil { + return m.Path + } + return nil +} + +type User struct { + Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} + +func (m *User) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *User) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *User) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *User) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *User) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type EntityProto struct { + Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` + EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"` + Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` + Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` + KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"` + Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EntityProto) Reset() { *m = EntityProto{} } +func (m *EntityProto) String() string { return proto.CompactTextString(m) } +func (*EntityProto) ProtoMessage() {} + +func (m *EntityProto) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *EntityProto) GetEntityGroup() *Path { + if m != nil { + return m.EntityGroup + } + return nil +} + +func (m *EntityProto) GetOwner() *User { + if m != nil { + return m.Owner + } + return nil +} + +func (m *EntityProto) GetKind() EntityProto_Kind { + if m != nil && m.Kind != nil { + return *m.Kind + } + return EntityProto_GD_CONTACT +} + +func (m *EntityProto) GetKindUri() string { + if m != nil && m.KindUri != nil { + return *m.KindUri + } + return "" +} + +func (m *EntityProto) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +func (m *EntityProto) GetRawProperty() []*Property { + if m != nil { + return m.RawProperty + } + return nil +} + +func (m *EntityProto) GetRank() int32 { + if m != nil && m.Rank != nil { + return *m.Rank + } + return 0 +} + +type CompositeProperty struct { + IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"` + Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } +func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } +func (*CompositeProperty) ProtoMessage() {} + +func (m *CompositeProperty) GetIndexId() int64 { + if m != nil && m.IndexId != nil { + return *m.IndexId + } + return 0 +} + +func (m *CompositeProperty) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Index struct { + EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"` + Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` + Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Index) Reset() { *m = Index{} } +func (m *Index) String() string { return proto.CompactTextString(m) } +func (*Index) ProtoMessage() {} + +func (m *Index) GetEntityType() string { + if m != nil && m.EntityType != nil { + return *m.EntityType + } + return "" +} + +func (m *Index) GetAncestor() bool { + if m != nil && m.Ancestor != nil { + return *m.Ancestor + } + return false +} + +func (m *Index) GetProperty() []*Index_Property { + if m != nil { + return m.Property + } + return nil +} + +type Index_Property struct { + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Index_Property) Reset() { *m = Index_Property{} } +func (m *Index_Property) String() string { return proto.CompactTextString(m) } +func (*Index_Property) ProtoMessage() {} + +const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING + +func (m *Index_Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Index_Property) GetDirection() Index_Property_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Index_Property_Direction +} + +type CompositeIndex struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` + Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` + State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` + OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } +func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } +func (*CompositeIndex) ProtoMessage() {} + +const Default_CompositeIndex_OnlyUseIfRequired bool = false + +func (m *CompositeIndex) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CompositeIndex) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *CompositeIndex) GetDefinition() *Index { + if m != nil { + return m.Definition + } + return nil +} + +func (m *CompositeIndex) GetState() CompositeIndex_State { + if m != nil && m.State != nil { + return *m.State + } + return CompositeIndex_WRITE_ONLY +} + +func (m *CompositeIndex) GetOnlyUseIfRequired() bool { + if m != nil && m.OnlyUseIfRequired != nil { + return *m.OnlyUseIfRequired + } + return Default_CompositeIndex_OnlyUseIfRequired +} + +type IndexPostfix struct { + IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"` + Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } +func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix) ProtoMessage() {} + +const Default_IndexPostfix_Before bool = true + +func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { + if m != nil { + return m.IndexValue + } + return nil +} + +func (m *IndexPostfix) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *IndexPostfix) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPostfix_Before +} + +type IndexPostfix_IndexValue struct { + PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"` + Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } +func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix_IndexValue) ProtoMessage() {} + +func (m *IndexPostfix_IndexValue) GetPropertyName() string { + if m != nil && m.PropertyName != nil { + return *m.PropertyName + } + return "" +} + +func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type IndexPosition struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexPosition) Reset() { *m = IndexPosition{} } +func (m *IndexPosition) String() string { return proto.CompactTextString(m) } +func (*IndexPosition) ProtoMessage() {} + +const Default_IndexPosition_Before bool = true + +func (m *IndexPosition) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *IndexPosition) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPosition_Before +} + +type Snapshot struct { + Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} + +func (m *Snapshot) GetTs() int64 { + if m != nil && m.Ts != nil { + return *m.Ts + } + return 0 +} + +type InternalHeader struct { + Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InternalHeader) Reset() { *m = InternalHeader{} } +func (m *InternalHeader) String() string { return proto.CompactTextString(m) } +func (*InternalHeader) ProtoMessage() {} + +func (m *InternalHeader) GetQos() string { + if m != nil && m.Qos != nil { + return *m.Qos + } + return "" +} + +type Transaction struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` + App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` + MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Transaction) Reset() { *m = Transaction{} } +func (m *Transaction) String() string { return proto.CompactTextString(m) } +func (*Transaction) ProtoMessage() {} + +const Default_Transaction_MarkChanges bool = false + +func (m *Transaction) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Transaction) GetHandle() uint64 { + if m != nil && m.Handle != nil { + return *m.Handle + } + return 0 +} + +func (m *Transaction) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Transaction) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_Transaction_MarkChanges +} + +type Query struct { + Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"` + Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` + Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"` + SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"` + Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"` + Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` + Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` + EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"` + RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"` + KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"` + Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` + Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` + FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` + PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"` + GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"` + Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` + MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"` + SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"` + PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} + +const Default_Query_Offset int32 = 0 +const Default_Query_RequirePerfectPlan bool = false +const Default_Query_KeysOnly bool = false +const Default_Query_Compile bool = false +const Default_Query_PersistOffset bool = false + +func (m *Query) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Query) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Query) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Query) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *Query) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +func (m *Query) GetFilter() []*Query_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Query) GetSearchQuery() string { + if m != nil && m.SearchQuery != nil { + return *m.SearchQuery + } + return "" +} + +func (m *Query) GetOrder() []*Query_Order { + if m != nil { + return m.Order + } + return nil +} + +func (m *Query) GetHint() Query_Hint { + if m != nil && m.Hint != nil { + return *m.Hint + } + return Query_ORDER_FIRST +} + +func (m *Query) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *Query) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_Query_Offset +} + +func (m *Query) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *Query) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *Query) GetEndCompiledCursor() *CompiledCursor { + if m != nil { + return m.EndCompiledCursor + } + return nil +} + +func (m *Query) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *Query) GetRequirePerfectPlan() bool { + if m != nil && m.RequirePerfectPlan != nil { + return *m.RequirePerfectPlan + } + return Default_Query_RequirePerfectPlan +} + +func (m *Query) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return Default_Query_KeysOnly +} + +func (m *Query) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *Query) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_Query_Compile +} + +func (m *Query) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *Query) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *Query) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *Query) GetGroupByPropertyName() []string { + if m != nil { + return m.GroupByPropertyName + } + return nil +} + +func (m *Query) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return false +} + +func (m *Query) GetMinSafeTimeSeconds() int64 { + if m != nil && m.MinSafeTimeSeconds != nil { + return *m.MinSafeTimeSeconds + } + return 0 +} + +func (m *Query) GetSafeReplicaName() []string { + if m != nil { + return m.SafeReplicaName + } + return nil +} + +func (m *Query) GetPersistOffset() bool { + if m != nil && m.PersistOffset != nil { + return *m.PersistOffset + } + return Default_Query_PersistOffset +} + +type Query_Filter struct { + Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query_Filter) Reset() { *m = Query_Filter{} } +func (m *Query_Filter) String() string { return proto.CompactTextString(m) } +func (*Query_Filter) ProtoMessage() {} + +func (m *Query_Filter) GetOp() Query_Filter_Operator { + if m != nil && m.Op != nil { + return *m.Op + } + return Query_Filter_LESS_THAN +} + +func (m *Query_Filter) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +type Query_Order struct { + Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` + Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query_Order) Reset() { *m = Query_Order{} } +func (m *Query_Order) String() string { return proto.CompactTextString(m) } +func (*Query_Order) ProtoMessage() {} + +const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING + +func (m *Query_Order) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *Query_Order) GetDirection() Query_Order_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Query_Order_Direction +} + +type CompiledQuery struct { + Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"` + Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"` + IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"` + Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` + KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"` + PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"` + DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"` + Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } +func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery) ProtoMessage() {} + +const Default_CompiledQuery_Offset int32 = 0 + +func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { + if m != nil { + return m.Primaryscan + } + return nil +} + +func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { + if m != nil { + return m.Mergejoinscan + } + return nil +} + +func (m *CompiledQuery) GetIndexDef() *Index { + if m != nil { + return m.IndexDef + } + return nil +} + +func (m *CompiledQuery) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_CompiledQuery_Offset +} + +func (m *CompiledQuery) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *CompiledQuery) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *CompiledQuery) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *CompiledQuery) GetDistinctInfixSize() int32 { + if m != nil && m.DistinctInfixSize != nil { + return *m.DistinctInfixSize + } + return 0 +} + +func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { + if m != nil { + return m.Entityfilter + } + return nil +} + +type CompiledQuery_PrimaryScan struct { + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` + StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"` + EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"` + EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"` + StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"` + EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"` + EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } +func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_PrimaryScan) ProtoMessage() {} + +func (m *CompiledQuery_PrimaryScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetEndKey() string { + if m != nil && m.EndKey != nil { + return *m.EndKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { + if m != nil && m.EndInclusive != nil { + return *m.EndInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { + if m != nil { + return m.StartPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { + if m != nil { + return m.EndPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { + if m != nil && m.EndUnappliedLogTimestampUs != nil { + return *m.EndUnappliedLogTimestampUs + } + return 0 +} + +type CompiledQuery_MergeJoinScan struct { + IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"` + PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"` + ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } +func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} + +const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false + +func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { + if m != nil { + return m.PrefixValue + } + return nil +} + +func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { + if m != nil && m.ValuePrefix != nil { + return *m.ValuePrefix + } + return Default_CompiledQuery_MergeJoinScan_ValuePrefix +} + +type CompiledQuery_EntityFilter struct { + Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` + Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } +func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_EntityFilter) ProtoMessage() {} + +const Default_CompiledQuery_EntityFilter_Distinct bool = false + +func (m *CompiledQuery_EntityFilter) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return Default_CompiledQuery_EntityFilter_Distinct +} + +func (m *CompiledQuery_EntityFilter) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +type CompiledCursor struct { + Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } +func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor) ProtoMessage() {} + +func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { + if m != nil { + return m.Position + } + return nil +} + +type CompiledCursor_Position struct { + StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"` + Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"` + Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` + StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } +func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position) ProtoMessage() {} + +const Default_CompiledCursor_Position_StartInclusive bool = true + +func (m *CompiledCursor_Position) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { + if m != nil { + return m.Indexvalue + } + return nil +} + +func (m *CompiledCursor_Position) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *CompiledCursor_Position) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return Default_CompiledCursor_Position_StartInclusive +} + +type CompiledCursor_Position_IndexValue struct { + Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` + Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } +func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} + +func (m *CompiledCursor_Position_IndexValue) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type Cursor struct { + Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` + App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cursor) Reset() { *m = Cursor{} } +func (m *Cursor) String() string { return proto.CompactTextString(m) } +func (*Cursor) ProtoMessage() {} + +func (m *Cursor) GetCursor() uint64 { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return 0 +} + +func (m *Cursor) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +type Error struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} + +type Cost struct { + IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"` + IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"` + EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"` + EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"` + Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"` + ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"` + IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cost) Reset() { *m = Cost{} } +func (m *Cost) String() string { return proto.CompactTextString(m) } +func (*Cost) ProtoMessage() {} + +func (m *Cost) GetIndexWrites() int32 { + if m != nil && m.IndexWrites != nil { + return *m.IndexWrites + } + return 0 +} + +func (m *Cost) GetIndexWriteBytes() int32 { + if m != nil && m.IndexWriteBytes != nil { + return *m.IndexWriteBytes + } + return 0 +} + +func (m *Cost) GetEntityWrites() int32 { + if m != nil && m.EntityWrites != nil { + return *m.EntityWrites + } + return 0 +} + +func (m *Cost) GetEntityWriteBytes() int32 { + if m != nil && m.EntityWriteBytes != nil { + return *m.EntityWriteBytes + } + return 0 +} + +func (m *Cost) GetCommitcost() *Cost_CommitCost { + if m != nil { + return m.Commitcost + } + return nil +} + +func (m *Cost) GetApproximateStorageDelta() int32 { + if m != nil && m.ApproximateStorageDelta != nil { + return *m.ApproximateStorageDelta + } + return 0 +} + +func (m *Cost) GetIdSequenceUpdates() int32 { + if m != nil && m.IdSequenceUpdates != nil { + return *m.IdSequenceUpdates + } + return 0 +} + +type Cost_CommitCost struct { + RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"` + RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } +func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } +func (*Cost_CommitCost) ProtoMessage() {} + +func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { + if m != nil && m.RequestedEntityPuts != nil { + return *m.RequestedEntityPuts + } + return 0 +} + +func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { + if m != nil && m.RequestedEntityDeletes != nil { + return *m.RequestedEntityDeletes + } + return 0 +} + +type GetRequest struct { + Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` + AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} + +const Default_GetRequest_AllowDeferred bool = false + +func (m *GetRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *GetRequest) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *GetRequest) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *GetRequest) GetAllowDeferred() bool { + if m != nil && m.AllowDeferred != nil { + return *m.AllowDeferred + } + return Default_GetRequest_AllowDeferred +} + +type GetResponse struct { + Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"` + Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` + InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} + +const Default_GetResponse_InOrder bool = true + +func (m *GetResponse) GetEntity() []*GetResponse_Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse) GetDeferred() []*Reference { + if m != nil { + return m.Deferred + } + return nil +} + +func (m *GetResponse) GetInOrder() bool { + if m != nil && m.InOrder != nil { + return *m.InOrder + } + return Default_GetResponse_InOrder +} + +type GetResponse_Entity struct { + Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` + Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` + Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } +func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } +func (*GetResponse_Entity) ProtoMessage() {} + +func (m *GetResponse_Entity) GetEntity() *EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse_Entity) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetResponse_Entity) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +type PutRequest struct { + Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` + Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} + +const Default_PutRequest_Trusted bool = false +const Default_PutRequest_Force bool = false +const Default_PutRequest_MarkChanges bool = false +const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT + +func (m *PutRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutRequest) GetEntity() []*EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *PutRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *PutRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_PutRequest_Trusted +} + +func (m *PutRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_PutRequest_Force +} + +func (m *PutRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_PutRequest_MarkChanges +} + +func (m *PutRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { + if m != nil && m.AutoIdPolicy != nil { + return *m.AutoIdPolicy + } + return Default_PutRequest_AutoIdPolicy +} + +type PutResponse struct { + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} + +func (m *PutResponse) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *PutResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type TouchRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"` + Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TouchRequest) Reset() { *m = TouchRequest{} } +func (m *TouchRequest) String() string { return proto.CompactTextString(m) } +func (*TouchRequest) ProtoMessage() {} + +const Default_TouchRequest_Force bool = false + +func (m *TouchRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TouchRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *TouchRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_TouchRequest_Force +} + +func (m *TouchRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type TouchResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TouchResponse) Reset() { *m = TouchResponse{} } +func (m *TouchResponse) String() string { return proto.CompactTextString(m) } +func (*TouchResponse) ProtoMessage() {} + +func (m *TouchResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type DeleteRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} + +const Default_DeleteRequest_Trusted bool = false +const Default_DeleteRequest_Force bool = false +const Default_DeleteRequest_MarkChanges bool = false + +func (m *DeleteRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *DeleteRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_DeleteRequest_Trusted +} + +func (m *DeleteRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_DeleteRequest_Force +} + +func (m *DeleteRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_DeleteRequest_MarkChanges +} + +func (m *DeleteRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type DeleteResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteResponse) ProtoMessage() {} + +func (m *DeleteResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *DeleteResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type NextRequest struct { + Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` + Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` + Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` + Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NextRequest) Reset() { *m = NextRequest{} } +func (m *NextRequest) String() string { return proto.CompactTextString(m) } +func (*NextRequest) ProtoMessage() {} + +const Default_NextRequest_Offset int32 = 0 +const Default_NextRequest_Compile bool = false + +func (m *NextRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *NextRequest) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *NextRequest) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *NextRequest) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_NextRequest_Offset +} + +func (m *NextRequest) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_NextRequest_Compile +} + +type QueryResult struct { + Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` + Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` + SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"` + MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"` + KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"` + IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"` + SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"` + CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` + Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` + Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} + +func (m *QueryResult) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *QueryResult) GetResult() []*EntityProto { + if m != nil { + return m.Result + } + return nil +} + +func (m *QueryResult) GetSkippedResults() int32 { + if m != nil && m.SkippedResults != nil { + return *m.SkippedResults + } + return 0 +} + +func (m *QueryResult) GetMoreResults() bool { + if m != nil && m.MoreResults != nil { + return *m.MoreResults + } + return false +} + +func (m *QueryResult) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *QueryResult) GetIndexOnly() bool { + if m != nil && m.IndexOnly != nil { + return *m.IndexOnly + } + return false +} + +func (m *QueryResult) GetSmallOps() bool { + if m != nil && m.SmallOps != nil { + return *m.SmallOps + } + return false +} + +func (m *QueryResult) GetCompiledQuery() *CompiledQuery { + if m != nil { + return m.CompiledQuery + } + return nil +} + +func (m *QueryResult) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *QueryResult) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +func (m *QueryResult) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type AllocateIdsRequest struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"` + Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` + Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` + Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsRequest) ProtoMessage() {} + +func (m *AllocateIdsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AllocateIdsRequest) GetModelKey() *Reference { + if m != nil { + return m.ModelKey + } + return nil +} + +func (m *AllocateIdsRequest) GetSize() int64 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *AllocateIdsRequest) GetMax() int64 { + if m != nil && m.Max != nil { + return *m.Max + } + return 0 +} + +func (m *AllocateIdsRequest) GetReserve() []*Reference { + if m != nil { + return m.Reserve + } + return nil +} + +type AllocateIdsResponse struct { + Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` + End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` + Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsResponse) ProtoMessage() {} + +func (m *AllocateIdsResponse) GetStart() int64 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *AllocateIdsResponse) GetEnd() int64 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *AllocateIdsResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type CompositeIndices struct { + Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } +func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } +func (*CompositeIndices) ProtoMessage() {} + +func (m *CompositeIndices) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +type AddActionsRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` + Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } +func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } +func (*AddActionsRequest) ProtoMessage() {} + +func (m *AddActionsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AddActionsRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *AddActionsRequest) GetAction() []*Action { + if m != nil { + return m.Action + } + return nil +} + +type AddActionsResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } +func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } +func (*AddActionsResponse) ProtoMessage() {} + +type BeginTransactionRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} + +const Default_BeginTransactionRequest_AllowMultipleEg bool = false + +func (m *BeginTransactionRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BeginTransactionRequest) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { + if m != nil && m.AllowMultipleEg != nil { + return *m.AllowMultipleEg + } + return Default_BeginTransactionRequest_AllowMultipleEg +} + +type CommitResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} + +func (m *CommitResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *CommitResponse) GetVersion() []*CommitResponse_Version { + if m != nil { + return m.Version + } + return nil +} + +type CommitResponse_Version struct { + RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"` + Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } +func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } +func (*CommitResponse_Version) ProtoMessage() {} + +func (m *CommitResponse_Version) GetRootEntityKey() *Reference { + if m != nil { + return m.RootEntityKey + } + return nil +} + +func (m *CommitResponse_Version) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto new file mode 100755 index 0000000..e76f126 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto @@ -0,0 +1,541 @@ +syntax = "proto2"; +option go_package = "datastore"; + +package appengine; + +message Action{} + +message PropertyValue { + optional int64 int64Value = 1; + optional bool booleanValue = 2; + optional string stringValue = 3; + optional double doubleValue = 4; + + optional group PointValue = 5 { + required double x = 6; + required double y = 7; + } + + optional group UserValue = 8 { + required string email = 9; + required string auth_domain = 10; + optional string nickname = 11; + optional string federated_identity = 21; + optional string federated_provider = 22; + } + + optional group ReferenceValue = 12 { + required string app = 13; + optional string name_space = 20; + repeated group PathElement = 14 { + required string type = 15; + optional int64 id = 16; + optional string name = 17; + } + } +} + +message Property { + enum Meaning { + NO_MEANING = 0; + BLOB = 14; + TEXT = 15; + BYTESTRING = 16; + + ATOM_CATEGORY = 1; + ATOM_LINK = 2; + ATOM_TITLE = 3; + ATOM_CONTENT = 4; + ATOM_SUMMARY = 5; + ATOM_AUTHOR = 6; + + GD_WHEN = 7; + GD_EMAIL = 8; + GEORSS_POINT = 9; + GD_IM = 10; + + GD_PHONENUMBER = 11; + GD_POSTALADDRESS = 12; + + GD_RATING = 13; + + BLOBKEY = 17; + ENTITY_PROTO = 19; + + INDEX_VALUE = 18; + }; + + optional Meaning meaning = 1 [default = NO_MEANING]; + optional string meaning_uri = 2; + + required string name = 3; + + required PropertyValue value = 5; + + required bool multiple = 4; + + optional bool searchable = 6 [default=false]; + + enum FtsTokenizationOption { + HTML = 1; + ATOM = 2; + } + + optional FtsTokenizationOption fts_tokenization_option = 8; + + optional string locale = 9 [default = "en"]; +} + +message Path { + repeated group Element = 1 { + required string type = 2; + optional int64 id = 3; + optional string name = 4; + } +} + +message Reference { + required string app = 13; + optional string name_space = 20; + required Path path = 14; +} + +message User { + required string email = 1; + required string auth_domain = 2; + optional string nickname = 3; + optional string federated_identity = 6; + optional string federated_provider = 7; +} + +message EntityProto { + required Reference key = 13; + required Path entity_group = 16; + optional User owner = 17; + + enum Kind { + GD_CONTACT = 1; + GD_EVENT = 2; + GD_MESSAGE = 3; + } + optional Kind kind = 4; + optional string kind_uri = 5; + + repeated Property property = 14; + repeated Property raw_property = 15; + + optional int32 rank = 18; +} + +message CompositeProperty { + required int64 index_id = 1; + repeated string value = 2; +} + +message Index { + required string entity_type = 1; + required bool ancestor = 5; + repeated group Property = 2 { + required string name = 3; + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + optional Direction direction = 4 [default = ASCENDING]; + } +} + +message CompositeIndex { + required string app_id = 1; + required int64 id = 2; + required Index definition = 3; + + enum State { + WRITE_ONLY = 1; + READ_WRITE = 2; + DELETED = 3; + ERROR = 4; + } + required State state = 4; + + optional bool only_use_if_required = 6 [default = false]; +} + +message IndexPostfix { + message IndexValue { + required string property_name = 1; + required PropertyValue value = 2; + } + + repeated IndexValue index_value = 1; + + optional Reference key = 2; + + optional bool before = 3 [default=true]; +} + +message IndexPosition { + optional string key = 1; + + optional bool before = 2 [default=true]; +} + +message Snapshot { + enum Status { + INACTIVE = 0; + ACTIVE = 1; + } + + required int64 ts = 1; +} + +message InternalHeader { + optional string qos = 1; +} + +message Transaction { + optional InternalHeader header = 4; + required fixed64 handle = 1; + required string app = 2; + optional bool mark_changes = 3 [default = false]; +} + +message Query { + optional InternalHeader header = 39; + + required string app = 1; + optional string name_space = 29; + + optional string kind = 3; + optional Reference ancestor = 17; + + repeated group Filter = 4 { + enum Operator { + LESS_THAN = 1; + LESS_THAN_OR_EQUAL = 2; + GREATER_THAN = 3; + GREATER_THAN_OR_EQUAL = 4; + EQUAL = 5; + IN = 6; + EXISTS = 7; + } + + required Operator op = 6; + repeated Property property = 14; + } + + optional string search_query = 8; + + repeated group Order = 9 { + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + + required string property = 10; + optional Direction direction = 11 [default = ASCENDING]; + } + + enum Hint { + ORDER_FIRST = 1; + ANCESTOR_FIRST = 2; + FILTER_FIRST = 3; + } + optional Hint hint = 18; + + optional int32 count = 23; + + optional int32 offset = 12 [default = 0]; + + optional int32 limit = 16; + + optional CompiledCursor compiled_cursor = 30; + optional CompiledCursor end_compiled_cursor = 31; + + repeated CompositeIndex composite_index = 19; + + optional bool require_perfect_plan = 20 [default = false]; + + optional bool keys_only = 21 [default = false]; + + optional Transaction transaction = 22; + + optional bool compile = 25 [default = false]; + + optional int64 failover_ms = 26; + + optional bool strong = 32; + + repeated string property_name = 33; + + repeated string group_by_property_name = 34; + + optional bool distinct = 24; + + optional int64 min_safe_time_seconds = 35; + + repeated string safe_replica_name = 36; + + optional bool persist_offset = 37 [default=false]; +} + +message CompiledQuery { + required group PrimaryScan = 1 { + optional string index_name = 2; + + optional string start_key = 3; + optional bool start_inclusive = 4; + optional string end_key = 5; + optional bool end_inclusive = 6; + + repeated string start_postfix_value = 22; + repeated string end_postfix_value = 23; + + optional int64 end_unapplied_log_timestamp_us = 19; + } + + repeated group MergeJoinScan = 7 { + required string index_name = 8; + + repeated string prefix_value = 9; + + optional bool value_prefix = 20 [default=false]; + } + + optional Index index_def = 21; + + optional int32 offset = 10 [default = 0]; + + optional int32 limit = 11; + + required bool keys_only = 12; + + repeated string property_name = 24; + + optional int32 distinct_infix_size = 25; + + optional group EntityFilter = 13 { + optional bool distinct = 14 [default=false]; + + optional string kind = 17; + optional Reference ancestor = 18; + } +} + +message CompiledCursor { + optional group Position = 2 { + optional string start_key = 27; + + repeated group IndexValue = 29 { + optional string property = 30; + required PropertyValue value = 31; + } + + optional Reference key = 32; + + optional bool start_inclusive = 28 [default=true]; + } +} + +message Cursor { + required fixed64 cursor = 1; + + optional string app = 2; +} + +message Error { + enum ErrorCode { + BAD_REQUEST = 1; + CONCURRENT_TRANSACTION = 2; + INTERNAL_ERROR = 3; + NEED_INDEX = 4; + TIMEOUT = 5; + PERMISSION_DENIED = 6; + BIGTABLE_ERROR = 7; + COMMITTED_BUT_STILL_APPLYING = 8; + CAPABILITY_DISABLED = 9; + TRY_ALTERNATE_BACKEND = 10; + SAFE_TIME_TOO_OLD = 11; + } +} + +message Cost { + optional int32 index_writes = 1; + optional int32 index_write_bytes = 2; + optional int32 entity_writes = 3; + optional int32 entity_write_bytes = 4; + optional group CommitCost = 5 { + optional int32 requested_entity_puts = 6; + optional int32 requested_entity_deletes = 7; + }; + optional int32 approximate_storage_delta = 8; + optional int32 id_sequence_updates = 9; +} + +message GetRequest { + optional InternalHeader header = 6; + + repeated Reference key = 1; + optional Transaction transaction = 2; + + optional int64 failover_ms = 3; + + optional bool strong = 4; + + optional bool allow_deferred = 5 [default=false]; +} + +message GetResponse { + repeated group Entity = 1 { + optional EntityProto entity = 2; + optional Reference key = 4; + + optional int64 version = 3; + } + + repeated Reference deferred = 5; + + optional bool in_order = 6 [default=true]; +} + +message PutRequest { + optional InternalHeader header = 11; + + repeated EntityProto entity = 1; + optional Transaction transaction = 2; + repeated CompositeIndex composite_index = 3; + + optional bool trusted = 4 [default = false]; + + optional bool force = 7 [default = false]; + + optional bool mark_changes = 8 [default = false]; + repeated Snapshot snapshot = 9; + + enum AutoIdPolicy { + CURRENT = 0; + SEQUENTIAL = 1; + } + optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; +} + +message PutResponse { + repeated Reference key = 1; + optional Cost cost = 2; + repeated int64 version = 3; +} + +message TouchRequest { + optional InternalHeader header = 10; + + repeated Reference key = 1; + repeated CompositeIndex composite_index = 2; + optional bool force = 3 [default = false]; + repeated Snapshot snapshot = 9; +} + +message TouchResponse { + optional Cost cost = 1; +} + +message DeleteRequest { + optional InternalHeader header = 10; + + repeated Reference key = 6; + optional Transaction transaction = 5; + + optional bool trusted = 4 [default = false]; + + optional bool force = 7 [default = false]; + + optional bool mark_changes = 8 [default = false]; + repeated Snapshot snapshot = 9; +} + +message DeleteResponse { + optional Cost cost = 1; + repeated int64 version = 3; +} + +message NextRequest { + optional InternalHeader header = 5; + + required Cursor cursor = 1; + optional int32 count = 2; + + optional int32 offset = 4 [default = 0]; + + optional bool compile = 3 [default = false]; +} + +message QueryResult { + optional Cursor cursor = 1; + + repeated EntityProto result = 2; + + optional int32 skipped_results = 7; + + required bool more_results = 3; + + optional bool keys_only = 4; + + optional bool index_only = 9; + + optional bool small_ops = 10; + + optional CompiledQuery compiled_query = 5; + + optional CompiledCursor compiled_cursor = 6; + + repeated CompositeIndex index = 8; + + repeated int64 version = 11; +} + +message AllocateIdsRequest { + optional InternalHeader header = 4; + + optional Reference model_key = 1; + + optional int64 size = 2; + + optional int64 max = 3; + + repeated Reference reserve = 5; +} + +message AllocateIdsResponse { + required int64 start = 1; + required int64 end = 2; + optional Cost cost = 3; +} + +message CompositeIndices { + repeated CompositeIndex index = 1; +} + +message AddActionsRequest { + optional InternalHeader header = 3; + + required Transaction transaction = 1; + repeated Action action = 2; +} + +message AddActionsResponse { +} + +message BeginTransactionRequest { + optional InternalHeader header = 3; + + required string app = 1; + optional bool allow_multiple_eg = 2 [default = false]; +} + +message CommitResponse { + optional Cost cost = 1; + + repeated group Version = 3 { + required Reference root_entity_key = 4; + required int64 version = 5; + } +} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go new file mode 100644 index 0000000..d538701 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -0,0 +1,14 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import netcontext "golang.org/x/net/context" + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +func AppID(c netcontext.Context) string { + return appID(FullyQualifiedAppID(c)) +} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go new file mode 100644 index 0000000..e6b9227 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -0,0 +1,27 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "appengine" + + netcontext "golang.org/x/net/context" +) + +func DefaultVersionHostname(ctx netcontext.Context) string { + return appengine.DefaultVersionHostname(fromContext(ctx)) +} + +func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) } +func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) } +func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } + +func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() } diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go new file mode 100644 index 0000000..ebe68b7 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -0,0 +1,97 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "net/http" + "os" + + netcontext "golang.org/x/net/context" +) + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +const ( + hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" + hRequestLogId = "X-AppEngine-Request-Log-Id" + hDatacenter = "X-AppEngine-Datacenter" +) + +func ctxHeaders(ctx netcontext.Context) http.Header { + return fromContext(ctx).Request().Header +} + +func DefaultVersionHostname(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hDefaultVersionHostname) +} + +func RequestID(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hRequestLogId) +} + +func Datacenter(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hDatacenter) +} + +func ServerSoftware() string { + // TODO(dsymonds): Remove fallback when we've verified this. + if s := os.Getenv("SERVER_SOFTWARE"); s != "" { + return s + } + return "Google App Engine/1.x.x" +} + +// TODO(dsymonds): Remove the metadata fetches. + +func ModuleName(_ netcontext.Context) string { + if s := os.Getenv("GAE_MODULE_NAME"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_name")) +} + +func VersionID(_ netcontext.Context) string { + if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { + return s1 + "." + s2 + } + return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) +} + +func InstanceID() string { + if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_instance")) +} + +func partitionlessAppID() string { + // gae_project has everything except the partition prefix. + appID := os.Getenv("GAE_LONG_APP_ID") + if appID == "" { + appID = string(mustGetMetadata("instance/attributes/gae_project")) + } + return appID +} + +func fullyQualifiedAppID(_ netcontext.Context) string { + appID := partitionlessAppID() + + part := os.Getenv("GAE_PARTITION") + if part == "" { + part = string(mustGetMetadata("instance/attributes/gae_partition")) + } + + if part != "" { + appID = part + "~" + appID + } + return appID +} + +func IsDevAppServer() bool { + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" +} diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.pb.go b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go new file mode 100644 index 0000000..ba7c722 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/image/images_service.pb.go @@ -0,0 +1,845 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/image/images_service.proto +// DO NOT EDIT! + +/* +Package image is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/image/images_service.proto + +It has these top-level messages: + ImagesServiceError + ImagesServiceTransform + Transform + ImageData + InputSettings + OutputSettings + ImagesTransformRequest + ImagesTransformResponse + CompositeImageOptions + ImagesCanvas + ImagesCompositeRequest + ImagesCompositeResponse + ImagesHistogramRequest + ImagesHistogram + ImagesHistogramResponse + ImagesGetUrlBaseRequest + ImagesGetUrlBaseResponse + ImagesDeleteUrlBaseRequest + ImagesDeleteUrlBaseResponse +*/ +package image + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ImagesServiceError_ErrorCode int32 + +const ( + ImagesServiceError_UNSPECIFIED_ERROR ImagesServiceError_ErrorCode = 1 + ImagesServiceError_BAD_TRANSFORM_DATA ImagesServiceError_ErrorCode = 2 + ImagesServiceError_NOT_IMAGE ImagesServiceError_ErrorCode = 3 + ImagesServiceError_BAD_IMAGE_DATA ImagesServiceError_ErrorCode = 4 + ImagesServiceError_IMAGE_TOO_LARGE ImagesServiceError_ErrorCode = 5 + ImagesServiceError_INVALID_BLOB_KEY ImagesServiceError_ErrorCode = 6 + ImagesServiceError_ACCESS_DENIED ImagesServiceError_ErrorCode = 7 + ImagesServiceError_OBJECT_NOT_FOUND ImagesServiceError_ErrorCode = 8 +) + +var ImagesServiceError_ErrorCode_name = map[int32]string{ + 1: "UNSPECIFIED_ERROR", + 2: "BAD_TRANSFORM_DATA", + 3: "NOT_IMAGE", + 4: "BAD_IMAGE_DATA", + 5: "IMAGE_TOO_LARGE", + 6: "INVALID_BLOB_KEY", + 7: "ACCESS_DENIED", + 8: "OBJECT_NOT_FOUND", +} +var ImagesServiceError_ErrorCode_value = map[string]int32{ + "UNSPECIFIED_ERROR": 1, + "BAD_TRANSFORM_DATA": 2, + "NOT_IMAGE": 3, + "BAD_IMAGE_DATA": 4, + "IMAGE_TOO_LARGE": 5, + "INVALID_BLOB_KEY": 6, + "ACCESS_DENIED": 7, + "OBJECT_NOT_FOUND": 8, +} + +func (x ImagesServiceError_ErrorCode) Enum() *ImagesServiceError_ErrorCode { + p := new(ImagesServiceError_ErrorCode) + *p = x + return p +} +func (x ImagesServiceError_ErrorCode) String() string { + return proto.EnumName(ImagesServiceError_ErrorCode_name, int32(x)) +} +func (x *ImagesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ImagesServiceError_ErrorCode_value, data, "ImagesServiceError_ErrorCode") + if err != nil { + return err + } + *x = ImagesServiceError_ErrorCode(value) + return nil +} + +type ImagesServiceTransform_Type int32 + +const ( + ImagesServiceTransform_RESIZE ImagesServiceTransform_Type = 1 + ImagesServiceTransform_ROTATE ImagesServiceTransform_Type = 2 + ImagesServiceTransform_HORIZONTAL_FLIP ImagesServiceTransform_Type = 3 + ImagesServiceTransform_VERTICAL_FLIP ImagesServiceTransform_Type = 4 + ImagesServiceTransform_CROP ImagesServiceTransform_Type = 5 + ImagesServiceTransform_IM_FEELING_LUCKY ImagesServiceTransform_Type = 6 +) + +var ImagesServiceTransform_Type_name = map[int32]string{ + 1: "RESIZE", + 2: "ROTATE", + 3: "HORIZONTAL_FLIP", + 4: "VERTICAL_FLIP", + 5: "CROP", + 6: "IM_FEELING_LUCKY", +} +var ImagesServiceTransform_Type_value = map[string]int32{ + "RESIZE": 1, + "ROTATE": 2, + "HORIZONTAL_FLIP": 3, + "VERTICAL_FLIP": 4, + "CROP": 5, + "IM_FEELING_LUCKY": 6, +} + +func (x ImagesServiceTransform_Type) Enum() *ImagesServiceTransform_Type { + p := new(ImagesServiceTransform_Type) + *p = x + return p +} +func (x ImagesServiceTransform_Type) String() string { + return proto.EnumName(ImagesServiceTransform_Type_name, int32(x)) +} +func (x *ImagesServiceTransform_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ImagesServiceTransform_Type_value, data, "ImagesServiceTransform_Type") + if err != nil { + return err + } + *x = ImagesServiceTransform_Type(value) + return nil +} + +type InputSettings_ORIENTATION_CORRECTION_TYPE int32 + +const ( + InputSettings_UNCHANGED_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 0 + InputSettings_CORRECT_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 1 +) + +var InputSettings_ORIENTATION_CORRECTION_TYPE_name = map[int32]string{ + 0: "UNCHANGED_ORIENTATION", + 1: "CORRECT_ORIENTATION", +} +var InputSettings_ORIENTATION_CORRECTION_TYPE_value = map[string]int32{ + "UNCHANGED_ORIENTATION": 0, + "CORRECT_ORIENTATION": 1, +} + +func (x InputSettings_ORIENTATION_CORRECTION_TYPE) Enum() *InputSettings_ORIENTATION_CORRECTION_TYPE { + p := new(InputSettings_ORIENTATION_CORRECTION_TYPE) + *p = x + return p +} +func (x InputSettings_ORIENTATION_CORRECTION_TYPE) String() string { + return proto.EnumName(InputSettings_ORIENTATION_CORRECTION_TYPE_name, int32(x)) +} +func (x *InputSettings_ORIENTATION_CORRECTION_TYPE) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(InputSettings_ORIENTATION_CORRECTION_TYPE_value, data, "InputSettings_ORIENTATION_CORRECTION_TYPE") + if err != nil { + return err + } + *x = InputSettings_ORIENTATION_CORRECTION_TYPE(value) + return nil +} + +type OutputSettings_MIME_TYPE int32 + +const ( + OutputSettings_PNG OutputSettings_MIME_TYPE = 0 + OutputSettings_JPEG OutputSettings_MIME_TYPE = 1 + OutputSettings_WEBP OutputSettings_MIME_TYPE = 2 +) + +var OutputSettings_MIME_TYPE_name = map[int32]string{ + 0: "PNG", + 1: "JPEG", + 2: "WEBP", +} +var OutputSettings_MIME_TYPE_value = map[string]int32{ + "PNG": 0, + "JPEG": 1, + "WEBP": 2, +} + +func (x OutputSettings_MIME_TYPE) Enum() *OutputSettings_MIME_TYPE { + p := new(OutputSettings_MIME_TYPE) + *p = x + return p +} +func (x OutputSettings_MIME_TYPE) String() string { + return proto.EnumName(OutputSettings_MIME_TYPE_name, int32(x)) +} +func (x *OutputSettings_MIME_TYPE) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(OutputSettings_MIME_TYPE_value, data, "OutputSettings_MIME_TYPE") + if err != nil { + return err + } + *x = OutputSettings_MIME_TYPE(value) + return nil +} + +type CompositeImageOptions_ANCHOR int32 + +const ( + CompositeImageOptions_TOP_LEFT CompositeImageOptions_ANCHOR = 0 + CompositeImageOptions_TOP CompositeImageOptions_ANCHOR = 1 + CompositeImageOptions_TOP_RIGHT CompositeImageOptions_ANCHOR = 2 + CompositeImageOptions_LEFT CompositeImageOptions_ANCHOR = 3 + CompositeImageOptions_CENTER CompositeImageOptions_ANCHOR = 4 + CompositeImageOptions_RIGHT CompositeImageOptions_ANCHOR = 5 + CompositeImageOptions_BOTTOM_LEFT CompositeImageOptions_ANCHOR = 6 + CompositeImageOptions_BOTTOM CompositeImageOptions_ANCHOR = 7 + CompositeImageOptions_BOTTOM_RIGHT CompositeImageOptions_ANCHOR = 8 +) + +var CompositeImageOptions_ANCHOR_name = map[int32]string{ + 0: "TOP_LEFT", + 1: "TOP", + 2: "TOP_RIGHT", + 3: "LEFT", + 4: "CENTER", + 5: "RIGHT", + 6: "BOTTOM_LEFT", + 7: "BOTTOM", + 8: "BOTTOM_RIGHT", +} +var CompositeImageOptions_ANCHOR_value = map[string]int32{ + "TOP_LEFT": 0, + "TOP": 1, + "TOP_RIGHT": 2, + "LEFT": 3, + "CENTER": 4, + "RIGHT": 5, + "BOTTOM_LEFT": 6, + "BOTTOM": 7, + "BOTTOM_RIGHT": 8, +} + +func (x CompositeImageOptions_ANCHOR) Enum() *CompositeImageOptions_ANCHOR { + p := new(CompositeImageOptions_ANCHOR) + *p = x + return p +} +func (x CompositeImageOptions_ANCHOR) String() string { + return proto.EnumName(CompositeImageOptions_ANCHOR_name, int32(x)) +} +func (x *CompositeImageOptions_ANCHOR) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompositeImageOptions_ANCHOR_value, data, "CompositeImageOptions_ANCHOR") + if err != nil { + return err + } + *x = CompositeImageOptions_ANCHOR(value) + return nil +} + +type ImagesServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesServiceError) Reset() { *m = ImagesServiceError{} } +func (m *ImagesServiceError) String() string { return proto.CompactTextString(m) } +func (*ImagesServiceError) ProtoMessage() {} + +type ImagesServiceTransform struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesServiceTransform) Reset() { *m = ImagesServiceTransform{} } +func (m *ImagesServiceTransform) String() string { return proto.CompactTextString(m) } +func (*ImagesServiceTransform) ProtoMessage() {} + +type Transform struct { + Width *int32 `protobuf:"varint,1,opt,name=width" json:"width,omitempty"` + Height *int32 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"` + CropToFit *bool `protobuf:"varint,11,opt,name=crop_to_fit,def=0" json:"crop_to_fit,omitempty"` + CropOffsetX *float32 `protobuf:"fixed32,12,opt,name=crop_offset_x,def=0.5" json:"crop_offset_x,omitempty"` + CropOffsetY *float32 `protobuf:"fixed32,13,opt,name=crop_offset_y,def=0.5" json:"crop_offset_y,omitempty"` + Rotate *int32 `protobuf:"varint,3,opt,name=rotate,def=0" json:"rotate,omitempty"` + HorizontalFlip *bool `protobuf:"varint,4,opt,name=horizontal_flip,def=0" json:"horizontal_flip,omitempty"` + VerticalFlip *bool `protobuf:"varint,5,opt,name=vertical_flip,def=0" json:"vertical_flip,omitempty"` + CropLeftX *float32 `protobuf:"fixed32,6,opt,name=crop_left_x,def=0" json:"crop_left_x,omitempty"` + CropTopY *float32 `protobuf:"fixed32,7,opt,name=crop_top_y,def=0" json:"crop_top_y,omitempty"` + CropRightX *float32 `protobuf:"fixed32,8,opt,name=crop_right_x,def=1" json:"crop_right_x,omitempty"` + CropBottomY *float32 `protobuf:"fixed32,9,opt,name=crop_bottom_y,def=1" json:"crop_bottom_y,omitempty"` + Autolevels *bool `protobuf:"varint,10,opt,name=autolevels,def=0" json:"autolevels,omitempty"` + AllowStretch *bool `protobuf:"varint,14,opt,name=allow_stretch,def=0" json:"allow_stretch,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Transform) Reset() { *m = Transform{} } +func (m *Transform) String() string { return proto.CompactTextString(m) } +func (*Transform) ProtoMessage() {} + +const Default_Transform_CropToFit bool = false +const Default_Transform_CropOffsetX float32 = 0.5 +const Default_Transform_CropOffsetY float32 = 0.5 +const Default_Transform_Rotate int32 = 0 +const Default_Transform_HorizontalFlip bool = false +const Default_Transform_VerticalFlip bool = false +const Default_Transform_CropLeftX float32 = 0 +const Default_Transform_CropTopY float32 = 0 +const Default_Transform_CropRightX float32 = 1 +const Default_Transform_CropBottomY float32 = 1 +const Default_Transform_Autolevels bool = false +const Default_Transform_AllowStretch bool = false + +func (m *Transform) GetWidth() int32 { + if m != nil && m.Width != nil { + return *m.Width + } + return 0 +} + +func (m *Transform) GetHeight() int32 { + if m != nil && m.Height != nil { + return *m.Height + } + return 0 +} + +func (m *Transform) GetCropToFit() bool { + if m != nil && m.CropToFit != nil { + return *m.CropToFit + } + return Default_Transform_CropToFit +} + +func (m *Transform) GetCropOffsetX() float32 { + if m != nil && m.CropOffsetX != nil { + return *m.CropOffsetX + } + return Default_Transform_CropOffsetX +} + +func (m *Transform) GetCropOffsetY() float32 { + if m != nil && m.CropOffsetY != nil { + return *m.CropOffsetY + } + return Default_Transform_CropOffsetY +} + +func (m *Transform) GetRotate() int32 { + if m != nil && m.Rotate != nil { + return *m.Rotate + } + return Default_Transform_Rotate +} + +func (m *Transform) GetHorizontalFlip() bool { + if m != nil && m.HorizontalFlip != nil { + return *m.HorizontalFlip + } + return Default_Transform_HorizontalFlip +} + +func (m *Transform) GetVerticalFlip() bool { + if m != nil && m.VerticalFlip != nil { + return *m.VerticalFlip + } + return Default_Transform_VerticalFlip +} + +func (m *Transform) GetCropLeftX() float32 { + if m != nil && m.CropLeftX != nil { + return *m.CropLeftX + } + return Default_Transform_CropLeftX +} + +func (m *Transform) GetCropTopY() float32 { + if m != nil && m.CropTopY != nil { + return *m.CropTopY + } + return Default_Transform_CropTopY +} + +func (m *Transform) GetCropRightX() float32 { + if m != nil && m.CropRightX != nil { + return *m.CropRightX + } + return Default_Transform_CropRightX +} + +func (m *Transform) GetCropBottomY() float32 { + if m != nil && m.CropBottomY != nil { + return *m.CropBottomY + } + return Default_Transform_CropBottomY +} + +func (m *Transform) GetAutolevels() bool { + if m != nil && m.Autolevels != nil { + return *m.Autolevels + } + return Default_Transform_Autolevels +} + +func (m *Transform) GetAllowStretch() bool { + if m != nil && m.AllowStretch != nil { + return *m.AllowStretch + } + return Default_Transform_AllowStretch +} + +type ImageData struct { + Content []byte `protobuf:"bytes,1,req,name=content" json:"content,omitempty"` + BlobKey *string `protobuf:"bytes,2,opt,name=blob_key" json:"blob_key,omitempty"` + Width *int32 `protobuf:"varint,3,opt,name=width" json:"width,omitempty"` + Height *int32 `protobuf:"varint,4,opt,name=height" json:"height,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImageData) Reset() { *m = ImageData{} } +func (m *ImageData) String() string { return proto.CompactTextString(m) } +func (*ImageData) ProtoMessage() {} + +func (m *ImageData) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +func (m *ImageData) GetBlobKey() string { + if m != nil && m.BlobKey != nil { + return *m.BlobKey + } + return "" +} + +func (m *ImageData) GetWidth() int32 { + if m != nil && m.Width != nil { + return *m.Width + } + return 0 +} + +func (m *ImageData) GetHeight() int32 { + if m != nil && m.Height != nil { + return *m.Height + } + return 0 +} + +type InputSettings struct { + CorrectExifOrientation *InputSettings_ORIENTATION_CORRECTION_TYPE `protobuf:"varint,1,opt,name=correct_exif_orientation,enum=appengine.InputSettings_ORIENTATION_CORRECTION_TYPE,def=0" json:"correct_exif_orientation,omitempty"` + ParseMetadata *bool `protobuf:"varint,2,opt,name=parse_metadata,def=0" json:"parse_metadata,omitempty"` + TransparentSubstitutionRgb *int32 `protobuf:"varint,3,opt,name=transparent_substitution_rgb" json:"transparent_substitution_rgb,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *InputSettings) Reset() { *m = InputSettings{} } +func (m *InputSettings) String() string { return proto.CompactTextString(m) } +func (*InputSettings) ProtoMessage() {} + +const Default_InputSettings_CorrectExifOrientation InputSettings_ORIENTATION_CORRECTION_TYPE = InputSettings_UNCHANGED_ORIENTATION +const Default_InputSettings_ParseMetadata bool = false + +func (m *InputSettings) GetCorrectExifOrientation() InputSettings_ORIENTATION_CORRECTION_TYPE { + if m != nil && m.CorrectExifOrientation != nil { + return *m.CorrectExifOrientation + } + return Default_InputSettings_CorrectExifOrientation +} + +func (m *InputSettings) GetParseMetadata() bool { + if m != nil && m.ParseMetadata != nil { + return *m.ParseMetadata + } + return Default_InputSettings_ParseMetadata +} + +func (m *InputSettings) GetTransparentSubstitutionRgb() int32 { + if m != nil && m.TransparentSubstitutionRgb != nil { + return *m.TransparentSubstitutionRgb + } + return 0 +} + +type OutputSettings struct { + MimeType *OutputSettings_MIME_TYPE `protobuf:"varint,1,opt,name=mime_type,enum=appengine.OutputSettings_MIME_TYPE,def=0" json:"mime_type,omitempty"` + Quality *int32 `protobuf:"varint,2,opt,name=quality" json:"quality,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OutputSettings) Reset() { *m = OutputSettings{} } +func (m *OutputSettings) String() string { return proto.CompactTextString(m) } +func (*OutputSettings) ProtoMessage() {} + +const Default_OutputSettings_MimeType OutputSettings_MIME_TYPE = OutputSettings_PNG + +func (m *OutputSettings) GetMimeType() OutputSettings_MIME_TYPE { + if m != nil && m.MimeType != nil { + return *m.MimeType + } + return Default_OutputSettings_MimeType +} + +func (m *OutputSettings) GetQuality() int32 { + if m != nil && m.Quality != nil { + return *m.Quality + } + return 0 +} + +type ImagesTransformRequest struct { + Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` + Transform []*Transform `protobuf:"bytes,2,rep,name=transform" json:"transform,omitempty"` + Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"` + Input *InputSettings `protobuf:"bytes,4,opt,name=input" json:"input,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesTransformRequest) Reset() { *m = ImagesTransformRequest{} } +func (m *ImagesTransformRequest) String() string { return proto.CompactTextString(m) } +func (*ImagesTransformRequest) ProtoMessage() {} + +func (m *ImagesTransformRequest) GetImage() *ImageData { + if m != nil { + return m.Image + } + return nil +} + +func (m *ImagesTransformRequest) GetTransform() []*Transform { + if m != nil { + return m.Transform + } + return nil +} + +func (m *ImagesTransformRequest) GetOutput() *OutputSettings { + if m != nil { + return m.Output + } + return nil +} + +func (m *ImagesTransformRequest) GetInput() *InputSettings { + if m != nil { + return m.Input + } + return nil +} + +type ImagesTransformResponse struct { + Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` + SourceMetadata *string `protobuf:"bytes,2,opt,name=source_metadata" json:"source_metadata,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesTransformResponse) Reset() { *m = ImagesTransformResponse{} } +func (m *ImagesTransformResponse) String() string { return proto.CompactTextString(m) } +func (*ImagesTransformResponse) ProtoMessage() {} + +func (m *ImagesTransformResponse) GetImage() *ImageData { + if m != nil { + return m.Image + } + return nil +} + +func (m *ImagesTransformResponse) GetSourceMetadata() string { + if m != nil && m.SourceMetadata != nil { + return *m.SourceMetadata + } + return "" +} + +type CompositeImageOptions struct { + SourceIndex *int32 `protobuf:"varint,1,req,name=source_index" json:"source_index,omitempty"` + XOffset *int32 `protobuf:"varint,2,req,name=x_offset" json:"x_offset,omitempty"` + YOffset *int32 `protobuf:"varint,3,req,name=y_offset" json:"y_offset,omitempty"` + Opacity *float32 `protobuf:"fixed32,4,req,name=opacity" json:"opacity,omitempty"` + Anchor *CompositeImageOptions_ANCHOR `protobuf:"varint,5,req,name=anchor,enum=appengine.CompositeImageOptions_ANCHOR" json:"anchor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeImageOptions) Reset() { *m = CompositeImageOptions{} } +func (m *CompositeImageOptions) String() string { return proto.CompactTextString(m) } +func (*CompositeImageOptions) ProtoMessage() {} + +func (m *CompositeImageOptions) GetSourceIndex() int32 { + if m != nil && m.SourceIndex != nil { + return *m.SourceIndex + } + return 0 +} + +func (m *CompositeImageOptions) GetXOffset() int32 { + if m != nil && m.XOffset != nil { + return *m.XOffset + } + return 0 +} + +func (m *CompositeImageOptions) GetYOffset() int32 { + if m != nil && m.YOffset != nil { + return *m.YOffset + } + return 0 +} + +func (m *CompositeImageOptions) GetOpacity() float32 { + if m != nil && m.Opacity != nil { + return *m.Opacity + } + return 0 +} + +func (m *CompositeImageOptions) GetAnchor() CompositeImageOptions_ANCHOR { + if m != nil && m.Anchor != nil { + return *m.Anchor + } + return CompositeImageOptions_TOP_LEFT +} + +type ImagesCanvas struct { + Width *int32 `protobuf:"varint,1,req,name=width" json:"width,omitempty"` + Height *int32 `protobuf:"varint,2,req,name=height" json:"height,omitempty"` + Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"` + Color *int32 `protobuf:"varint,4,opt,name=color,def=-1" json:"color,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesCanvas) Reset() { *m = ImagesCanvas{} } +func (m *ImagesCanvas) String() string { return proto.CompactTextString(m) } +func (*ImagesCanvas) ProtoMessage() {} + +const Default_ImagesCanvas_Color int32 = -1 + +func (m *ImagesCanvas) GetWidth() int32 { + if m != nil && m.Width != nil { + return *m.Width + } + return 0 +} + +func (m *ImagesCanvas) GetHeight() int32 { + if m != nil && m.Height != nil { + return *m.Height + } + return 0 +} + +func (m *ImagesCanvas) GetOutput() *OutputSettings { + if m != nil { + return m.Output + } + return nil +} + +func (m *ImagesCanvas) GetColor() int32 { + if m != nil && m.Color != nil { + return *m.Color + } + return Default_ImagesCanvas_Color +} + +type ImagesCompositeRequest struct { + Image []*ImageData `protobuf:"bytes,1,rep,name=image" json:"image,omitempty"` + Options []*CompositeImageOptions `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + Canvas *ImagesCanvas `protobuf:"bytes,3,req,name=canvas" json:"canvas,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesCompositeRequest) Reset() { *m = ImagesCompositeRequest{} } +func (m *ImagesCompositeRequest) String() string { return proto.CompactTextString(m) } +func (*ImagesCompositeRequest) ProtoMessage() {} + +func (m *ImagesCompositeRequest) GetImage() []*ImageData { + if m != nil { + return m.Image + } + return nil +} + +func (m *ImagesCompositeRequest) GetOptions() []*CompositeImageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *ImagesCompositeRequest) GetCanvas() *ImagesCanvas { + if m != nil { + return m.Canvas + } + return nil +} + +type ImagesCompositeResponse struct { + Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesCompositeResponse) Reset() { *m = ImagesCompositeResponse{} } +func (m *ImagesCompositeResponse) String() string { return proto.CompactTextString(m) } +func (*ImagesCompositeResponse) ProtoMessage() {} + +func (m *ImagesCompositeResponse) GetImage() *ImageData { + if m != nil { + return m.Image + } + return nil +} + +type ImagesHistogramRequest struct { + Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesHistogramRequest) Reset() { *m = ImagesHistogramRequest{} } +func (m *ImagesHistogramRequest) String() string { return proto.CompactTextString(m) } +func (*ImagesHistogramRequest) ProtoMessage() {} + +func (m *ImagesHistogramRequest) GetImage() *ImageData { + if m != nil { + return m.Image + } + return nil +} + +type ImagesHistogram struct { + Red []int32 `protobuf:"varint,1,rep,name=red" json:"red,omitempty"` + Green []int32 `protobuf:"varint,2,rep,name=green" json:"green,omitempty"` + Blue []int32 `protobuf:"varint,3,rep,name=blue" json:"blue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesHistogram) Reset() { *m = ImagesHistogram{} } +func (m *ImagesHistogram) String() string { return proto.CompactTextString(m) } +func (*ImagesHistogram) ProtoMessage() {} + +func (m *ImagesHistogram) GetRed() []int32 { + if m != nil { + return m.Red + } + return nil +} + +func (m *ImagesHistogram) GetGreen() []int32 { + if m != nil { + return m.Green + } + return nil +} + +func (m *ImagesHistogram) GetBlue() []int32 { + if m != nil { + return m.Blue + } + return nil +} + +type ImagesHistogramResponse struct { + Histogram *ImagesHistogram `protobuf:"bytes,1,req,name=histogram" json:"histogram,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesHistogramResponse) Reset() { *m = ImagesHistogramResponse{} } +func (m *ImagesHistogramResponse) String() string { return proto.CompactTextString(m) } +func (*ImagesHistogramResponse) ProtoMessage() {} + +func (m *ImagesHistogramResponse) GetHistogram() *ImagesHistogram { + if m != nil { + return m.Histogram + } + return nil +} + +type ImagesGetUrlBaseRequest struct { + BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` + CreateSecureUrl *bool `protobuf:"varint,2,opt,name=create_secure_url,def=0" json:"create_secure_url,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesGetUrlBaseRequest) Reset() { *m = ImagesGetUrlBaseRequest{} } +func (m *ImagesGetUrlBaseRequest) String() string { return proto.CompactTextString(m) } +func (*ImagesGetUrlBaseRequest) ProtoMessage() {} + +const Default_ImagesGetUrlBaseRequest_CreateSecureUrl bool = false + +func (m *ImagesGetUrlBaseRequest) GetBlobKey() string { + if m != nil && m.BlobKey != nil { + return *m.BlobKey + } + return "" +} + +func (m *ImagesGetUrlBaseRequest) GetCreateSecureUrl() bool { + if m != nil && m.CreateSecureUrl != nil { + return *m.CreateSecureUrl + } + return Default_ImagesGetUrlBaseRequest_CreateSecureUrl +} + +type ImagesGetUrlBaseResponse struct { + Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesGetUrlBaseResponse) Reset() { *m = ImagesGetUrlBaseResponse{} } +func (m *ImagesGetUrlBaseResponse) String() string { return proto.CompactTextString(m) } +func (*ImagesGetUrlBaseResponse) ProtoMessage() {} + +func (m *ImagesGetUrlBaseResponse) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +type ImagesDeleteUrlBaseRequest struct { + BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesDeleteUrlBaseRequest) Reset() { *m = ImagesDeleteUrlBaseRequest{} } +func (m *ImagesDeleteUrlBaseRequest) String() string { return proto.CompactTextString(m) } +func (*ImagesDeleteUrlBaseRequest) ProtoMessage() {} + +func (m *ImagesDeleteUrlBaseRequest) GetBlobKey() string { + if m != nil && m.BlobKey != nil { + return *m.BlobKey + } + return "" +} + +type ImagesDeleteUrlBaseResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ImagesDeleteUrlBaseResponse) Reset() { *m = ImagesDeleteUrlBaseResponse{} } +func (m *ImagesDeleteUrlBaseResponse) String() string { return proto.CompactTextString(m) } +func (*ImagesDeleteUrlBaseResponse) ProtoMessage() {} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/image/images_service.proto b/vendor/google.golang.org/appengine/internal/image/images_service.proto new file mode 100644 index 0000000..f0d2ed5 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/image/images_service.proto @@ -0,0 +1,162 @@ +syntax = "proto2"; +option go_package = "image"; + +package appengine; + +message ImagesServiceError { + enum ErrorCode { + UNSPECIFIED_ERROR = 1; + BAD_TRANSFORM_DATA = 2; + NOT_IMAGE = 3; + BAD_IMAGE_DATA = 4; + IMAGE_TOO_LARGE = 5; + INVALID_BLOB_KEY = 6; + ACCESS_DENIED = 7; + OBJECT_NOT_FOUND = 8; + } +} + +message ImagesServiceTransform { + enum Type { + RESIZE = 1; + ROTATE = 2; + HORIZONTAL_FLIP = 3; + VERTICAL_FLIP = 4; + CROP = 5; + IM_FEELING_LUCKY = 6; + } +} + +message Transform { + optional int32 width = 1; + optional int32 height = 2; + optional bool crop_to_fit = 11 [default = false]; + optional float crop_offset_x = 12 [default = 0.5]; + optional float crop_offset_y = 13 [default = 0.5]; + + optional int32 rotate = 3 [default = 0]; + + optional bool horizontal_flip = 4 [default = false]; + + optional bool vertical_flip = 5 [default = false]; + + optional float crop_left_x = 6 [default = 0.0]; + optional float crop_top_y = 7 [default = 0.0]; + optional float crop_right_x = 8 [default = 1.0]; + optional float crop_bottom_y = 9 [default = 1.0]; + + optional bool autolevels = 10 [default = false]; + + optional bool allow_stretch = 14 [default = false]; +} + +message ImageData { + required bytes content = 1 [ctype=CORD]; + optional string blob_key = 2; + + optional int32 width = 3; + optional int32 height = 4; +} + +message InputSettings { + enum ORIENTATION_CORRECTION_TYPE { + UNCHANGED_ORIENTATION = 0; + CORRECT_ORIENTATION = 1; + } + optional ORIENTATION_CORRECTION_TYPE correct_exif_orientation = 1 + [default=UNCHANGED_ORIENTATION]; + optional bool parse_metadata = 2 [default=false]; + optional int32 transparent_substitution_rgb = 3; +} + +message OutputSettings { + enum MIME_TYPE { + PNG = 0; + JPEG = 1; + WEBP = 2; + } + + optional MIME_TYPE mime_type = 1 [default=PNG]; + optional int32 quality = 2; +} + +message ImagesTransformRequest { + required ImageData image = 1; + repeated Transform transform = 2; + required OutputSettings output = 3; + optional InputSettings input = 4; +} + +message ImagesTransformResponse { + required ImageData image = 1; + optional string source_metadata = 2; +} + +message CompositeImageOptions { + required int32 source_index = 1; + required int32 x_offset = 2; + required int32 y_offset = 3; + required float opacity = 4; + + enum ANCHOR { + TOP_LEFT = 0; + TOP = 1; + TOP_RIGHT = 2; + LEFT = 3; + CENTER = 4; + RIGHT = 5; + BOTTOM_LEFT = 6; + BOTTOM = 7; + BOTTOM_RIGHT = 8; + } + + required ANCHOR anchor = 5; +} + +message ImagesCanvas { + required int32 width = 1; + required int32 height = 2; + required OutputSettings output = 3; + optional int32 color = 4 [default=-1]; +} + +message ImagesCompositeRequest { + repeated ImageData image = 1; + repeated CompositeImageOptions options = 2; + required ImagesCanvas canvas = 3; +} + +message ImagesCompositeResponse { + required ImageData image = 1; +} + +message ImagesHistogramRequest { + required ImageData image = 1; +} + +message ImagesHistogram { + repeated int32 red = 1; + repeated int32 green = 2; + repeated int32 blue = 3; +} + +message ImagesHistogramResponse { + required ImagesHistogram histogram = 1; +} + +message ImagesGetUrlBaseRequest { + required string blob_key = 1; + + optional bool create_secure_url = 2 [default = false]; +} + +message ImagesGetUrlBaseResponse { + required string url = 1; +} + +message ImagesDeleteUrlBaseRequest { + required string blob_key = 1; +} + +message ImagesDeleteUrlBaseResponse { +} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go new file mode 100644 index 0000000..051ea39 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/internal.go @@ -0,0 +1,110 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package internal provides support for package appengine. +// +// Programs should not use this package directly. Its API is not stable. +// Use packages appengine and appengine/* instead. +package internal + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + + remotepb "google.golang.org/appengine/internal/remote_api" +) + +// errorCodeMaps is a map of service name to the error code map for the service. +var errorCodeMaps = make(map[string]map[int32]string) + +// RegisterErrorCodeMap is called from API implementations to register their +// error code map. This should only be called from init functions. +func RegisterErrorCodeMap(service string, m map[int32]string) { + errorCodeMaps[service] = m +} + +type timeoutCodeKey struct { + service string + code int32 +} + +// timeoutCodes is the set of service+code pairs that represent timeouts. +var timeoutCodes = make(map[timeoutCodeKey]bool) + +func RegisterTimeoutErrorCode(service string, code int32) { + timeoutCodes[timeoutCodeKey{service, code}] = true +} + +// APIError is the type returned by appengine.Context's Call method +// when an API call fails in an API-specific way. This may be, for instance, +// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. +type APIError struct { + Service string + Detail string + Code int32 // API-specific error code +} + +func (e *APIError) Error() string { + if e.Code == 0 { + if e.Detail == "" { + return "APIError " + } + return e.Detail + } + s := fmt.Sprintf("API error %d", e.Code) + if m, ok := errorCodeMaps[e.Service]; ok { + s += " (" + e.Service + ": " + m[e.Code] + ")" + } else { + // Shouldn't happen, but provide a bit more detail if it does. + s = e.Service + " " + s + } + if e.Detail != "" { + s += ": " + e.Detail + } + return s +} + +func (e *APIError) IsTimeout() bool { + return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] +} + +// CallError is the type returned by appengine.Context's Call method when an +// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. +type CallError struct { + Detail string + Code int32 + // TODO: Remove this if we get a distinguishable error code. + Timeout bool +} + +func (e *CallError) Error() string { + var msg string + switch remotepb.RpcError_ErrorCode(e.Code) { + case remotepb.RpcError_UNKNOWN: + return e.Detail + case remotepb.RpcError_OVER_QUOTA: + msg = "Over quota" + case remotepb.RpcError_CAPABILITY_DISABLED: + msg = "Capability disabled" + case remotepb.RpcError_CANCELLED: + msg = "Canceled" + default: + msg = fmt.Sprintf("Call error %d", e.Code) + } + s := msg + ": " + e.Detail + if e.Timeout { + s += " (timeout)" + } + return s +} + +func (e *CallError) IsTimeout() bool { + return e.Timeout +} + +// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. +// The function should be prepared to be called on the same message more than once; it should only modify the +// RPC request the first time. +var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/internal_vm_test.go b/vendor/google.golang.org/appengine/internal/internal_vm_test.go new file mode 100644 index 0000000..f809761 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/internal_vm_test.go @@ -0,0 +1,60 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" +) + +func TestInstallingHealthChecker(t *testing.T) { + try := func(desc string, mux *http.ServeMux, wantCode int, wantBody string) { + installHealthChecker(mux) + srv := httptest.NewServer(mux) + defer srv.Close() + + resp, err := http.Get(srv.URL + "/_ah/health") + if err != nil { + t.Errorf("%s: http.Get: %v", desc, err) + return + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("%s: reading body: %v", desc, err) + return + } + + if resp.StatusCode != wantCode { + t.Errorf("%s: got HTTP %d, want %d", desc, resp.StatusCode, wantCode) + return + } + if wantBody != "" && string(body) != wantBody { + t.Errorf("%s: got HTTP body %q, want %q", desc, body, wantBody) + return + } + } + + // If there's no handlers, or only a root handler, a health checker should be installed. + try("empty mux", http.NewServeMux(), 200, "ok") + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "root handler") + }) + try("mux with root handler", mux, 200, "ok") + + // If there's a custom health check handler, one should not be installed. + mux = http.NewServeMux() + mux.HandleFunc("/_ah/health", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(418) + io.WriteString(w, "I'm short and stout!") + }) + try("mux with custom health checker", mux, 418, "I'm short and stout!") +} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go new file mode 100644 index 0000000..20c595b --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go @@ -0,0 +1,899 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/log/log_service.proto +// DO NOT EDIT! + +/* +Package log is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/log/log_service.proto + +It has these top-level messages: + LogServiceError + UserAppLogLine + UserAppLogGroup + FlushRequest + SetStatusRequest + LogOffset + LogLine + RequestLog + LogModuleVersion + LogReadRequest + LogReadResponse + LogUsageRecord + LogUsageRequest + LogUsageResponse +*/ +package log + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type LogServiceError_ErrorCode int32 + +const ( + LogServiceError_OK LogServiceError_ErrorCode = 0 + LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 + LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 +) + +var LogServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_REQUEST", + 2: "STORAGE_ERROR", +} +var LogServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_REQUEST": 1, + "STORAGE_ERROR": 2, +} + +func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { + p := new(LogServiceError_ErrorCode) + *p = x + return p +} +func (x LogServiceError_ErrorCode) String() string { + return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) +} +func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") + if err != nil { + return err + } + *x = LogServiceError_ErrorCode(value) + return nil +} + +type LogServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogServiceError) Reset() { *m = LogServiceError{} } +func (m *LogServiceError) String() string { return proto.CompactTextString(m) } +func (*LogServiceError) ProtoMessage() {} + +type UserAppLogLine struct { + TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"` + Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } +func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } +func (*UserAppLogLine) ProtoMessage() {} + +func (m *UserAppLogLine) GetTimestampUsec() int64 { + if m != nil && m.TimestampUsec != nil { + return *m.TimestampUsec + } + return 0 +} + +func (m *UserAppLogLine) GetLevel() int64 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *UserAppLogLine) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +type UserAppLogGroup struct { + LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } +func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } +func (*UserAppLogGroup) ProtoMessage() {} + +func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { + if m != nil { + return m.LogLine + } + return nil +} + +type FlushRequest struct { + Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FlushRequest) Reset() { *m = FlushRequest{} } +func (m *FlushRequest) String() string { return proto.CompactTextString(m) } +func (*FlushRequest) ProtoMessage() {} + +func (m *FlushRequest) GetLogs() []byte { + if m != nil { + return m.Logs + } + return nil +} + +type SetStatusRequest struct { + Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } +func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } +func (*SetStatusRequest) ProtoMessage() {} + +func (m *SetStatusRequest) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +type LogOffset struct { + RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogOffset) Reset() { *m = LogOffset{} } +func (m *LogOffset) String() string { return proto.CompactTextString(m) } +func (*LogOffset) ProtoMessage() {} + +func (m *LogOffset) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +type LogLine struct { + Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` + Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogLine) Reset() { *m = LogLine{} } +func (m *LogLine) String() string { return proto.CompactTextString(m) } +func (*LogLine) ProtoMessage() {} + +func (m *LogLine) GetTime() int64 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +func (m *LogLine) GetLevel() int32 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *LogLine) GetLogMessage() string { + if m != nil && m.LogMessage != nil { + return *m.LogMessage + } + return "" +} + +type RequestLog struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"` + RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"` + Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` + Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` + Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` + StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"` + Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` + Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` + Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` + Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` + HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"` + Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` + ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"` + Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` + UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"` + UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"` + Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` + ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"` + Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` + Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` + TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"` + TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"` + WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"` + PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"` + Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` + CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"` + Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` + LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"` + AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"` + ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"` + WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"` + WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"` + ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"` + ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequestLog) Reset() { *m = RequestLog{} } +func (m *RequestLog) String() string { return proto.CompactTextString(m) } +func (*RequestLog) ProtoMessage() {} + +const Default_RequestLog_ModuleId string = "default" +const Default_RequestLog_ReplicaIndex int32 = -1 +const Default_RequestLog_Finished bool = true + +func (m *RequestLog) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *RequestLog) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_RequestLog_ModuleId +} + +func (m *RequestLog) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *RequestLog) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *RequestLog) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *RequestLog) GetIp() string { + if m != nil && m.Ip != nil { + return *m.Ip + } + return "" +} + +func (m *RequestLog) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *RequestLog) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *RequestLog) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *RequestLog) GetLatency() int64 { + if m != nil && m.Latency != nil { + return *m.Latency + } + return 0 +} + +func (m *RequestLog) GetMcycles() int64 { + if m != nil && m.Mcycles != nil { + return *m.Mcycles + } + return 0 +} + +func (m *RequestLog) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *RequestLog) GetResource() string { + if m != nil && m.Resource != nil { + return *m.Resource + } + return "" +} + +func (m *RequestLog) GetHttpVersion() string { + if m != nil && m.HttpVersion != nil { + return *m.HttpVersion + } + return "" +} + +func (m *RequestLog) GetStatus() int32 { + if m != nil && m.Status != nil { + return *m.Status + } + return 0 +} + +func (m *RequestLog) GetResponseSize() int64 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *RequestLog) GetReferrer() string { + if m != nil && m.Referrer != nil { + return *m.Referrer + } + return "" +} + +func (m *RequestLog) GetUserAgent() string { + if m != nil && m.UserAgent != nil { + return *m.UserAgent + } + return "" +} + +func (m *RequestLog) GetUrlMapEntry() string { + if m != nil && m.UrlMapEntry != nil { + return *m.UrlMapEntry + } + return "" +} + +func (m *RequestLog) GetCombined() string { + if m != nil && m.Combined != nil { + return *m.Combined + } + return "" +} + +func (m *RequestLog) GetApiMcycles() int64 { + if m != nil && m.ApiMcycles != nil { + return *m.ApiMcycles + } + return 0 +} + +func (m *RequestLog) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *RequestLog) GetCost() float64 { + if m != nil && m.Cost != nil { + return *m.Cost + } + return 0 +} + +func (m *RequestLog) GetTaskQueueName() string { + if m != nil && m.TaskQueueName != nil { + return *m.TaskQueueName + } + return "" +} + +func (m *RequestLog) GetTaskName() string { + if m != nil && m.TaskName != nil { + return *m.TaskName + } + return "" +} + +func (m *RequestLog) GetWasLoadingRequest() bool { + if m != nil && m.WasLoadingRequest != nil { + return *m.WasLoadingRequest + } + return false +} + +func (m *RequestLog) GetPendingTime() int64 { + if m != nil && m.PendingTime != nil { + return *m.PendingTime + } + return 0 +} + +func (m *RequestLog) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return Default_RequestLog_ReplicaIndex +} + +func (m *RequestLog) GetFinished() bool { + if m != nil && m.Finished != nil { + return *m.Finished + } + return Default_RequestLog_Finished +} + +func (m *RequestLog) GetCloneKey() []byte { + if m != nil { + return m.CloneKey + } + return nil +} + +func (m *RequestLog) GetLine() []*LogLine { + if m != nil { + return m.Line + } + return nil +} + +func (m *RequestLog) GetLinesIncomplete() bool { + if m != nil && m.LinesIncomplete != nil { + return *m.LinesIncomplete + } + return false +} + +func (m *RequestLog) GetAppEngineRelease() []byte { + if m != nil { + return m.AppEngineRelease + } + return nil +} + +func (m *RequestLog) GetExitReason() int32 { + if m != nil && m.ExitReason != nil { + return *m.ExitReason + } + return 0 +} + +func (m *RequestLog) GetWasThrottledForTime() bool { + if m != nil && m.WasThrottledForTime != nil { + return *m.WasThrottledForTime + } + return false +} + +func (m *RequestLog) GetWasThrottledForRequests() bool { + if m != nil && m.WasThrottledForRequests != nil { + return *m.WasThrottledForRequests + } + return false +} + +func (m *RequestLog) GetThrottledTime() int64 { + if m != nil && m.ThrottledTime != nil { + return *m.ThrottledTime + } + return 0 +} + +func (m *RequestLog) GetServerName() []byte { + if m != nil { + return m.ServerName + } + return nil +} + +type LogModuleVersion struct { + ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } +func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } +func (*LogModuleVersion) ProtoMessage() {} + +const Default_LogModuleVersion_ModuleId string = "default" + +func (m *LogModuleVersion) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_LogModuleVersion_ModuleId +} + +func (m *LogModuleVersion) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +type LogReadRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` + ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"` + StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` + Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` + RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"` + MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"` + IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"` + Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` + CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"` + HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"` + IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"` + AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"` + IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"` + IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"` + CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"` + NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } +func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } +func (*LogReadRequest) ProtoMessage() {} + +func (m *LogReadRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogReadRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { + if m != nil { + return m.ModuleVersion + } + return nil +} + +func (m *LogReadRequest) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogReadRequest) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogReadRequest) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadRequest) GetRequestId() [][]byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *LogReadRequest) GetMinimumLogLevel() int32 { + if m != nil && m.MinimumLogLevel != nil { + return *m.MinimumLogLevel + } + return 0 +} + +func (m *LogReadRequest) GetIncludeIncomplete() bool { + if m != nil && m.IncludeIncomplete != nil { + return *m.IncludeIncomplete + } + return false +} + +func (m *LogReadRequest) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogReadRequest) GetCombinedLogRegex() string { + if m != nil && m.CombinedLogRegex != nil { + return *m.CombinedLogRegex + } + return "" +} + +func (m *LogReadRequest) GetHostRegex() string { + if m != nil && m.HostRegex != nil { + return *m.HostRegex + } + return "" +} + +func (m *LogReadRequest) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return 0 +} + +func (m *LogReadRequest) GetIncludeAppLogs() bool { + if m != nil && m.IncludeAppLogs != nil { + return *m.IncludeAppLogs + } + return false +} + +func (m *LogReadRequest) GetAppLogsPerRequest() int32 { + if m != nil && m.AppLogsPerRequest != nil { + return *m.AppLogsPerRequest + } + return 0 +} + +func (m *LogReadRequest) GetIncludeHost() bool { + if m != nil && m.IncludeHost != nil { + return *m.IncludeHost + } + return false +} + +func (m *LogReadRequest) GetIncludeAll() bool { + if m != nil && m.IncludeAll != nil { + return *m.IncludeAll + } + return false +} + +func (m *LogReadRequest) GetCacheIterator() bool { + if m != nil && m.CacheIterator != nil { + return *m.CacheIterator + } + return false +} + +func (m *LogReadRequest) GetNumShards() int32 { + if m != nil && m.NumShards != nil { + return *m.NumShards + } + return 0 +} + +type LogReadResponse struct { + Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` + Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` + LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } +func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } +func (*LogReadResponse) ProtoMessage() {} + +func (m *LogReadResponse) GetLog() []*RequestLog { + if m != nil { + return m.Log + } + return nil +} + +func (m *LogReadResponse) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadResponse) GetLastEndTime() int64 { + if m != nil && m.LastEndTime != nil { + return *m.LastEndTime + } + return 0 +} + +type LogUsageRecord struct { + VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"` + Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` + TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"` + Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } +func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } +func (*LogUsageRecord) ProtoMessage() {} + +func (m *LogUsageRecord) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *LogUsageRecord) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRecord) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRecord) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogUsageRecord) GetTotalSize() int64 { + if m != nil && m.TotalSize != nil { + return *m.TotalSize + } + return 0 +} + +func (m *LogUsageRecord) GetRecords() int32 { + if m != nil && m.Records != nil { + return *m.Records + } + return 0 +} + +type LogUsageRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` + ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"` + CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"` + UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"` + VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } +func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } +func (*LogUsageRequest) ProtoMessage() {} + +const Default_LogUsageRequest_ResolutionHours uint32 = 1 + +func (m *LogUsageRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogUsageRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogUsageRequest) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRequest) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRequest) GetResolutionHours() uint32 { + if m != nil && m.ResolutionHours != nil { + return *m.ResolutionHours + } + return Default_LogUsageRequest_ResolutionHours +} + +func (m *LogUsageRequest) GetCombineVersions() bool { + if m != nil && m.CombineVersions != nil { + return *m.CombineVersions + } + return false +} + +func (m *LogUsageRequest) GetUsageVersion() int32 { + if m != nil && m.UsageVersion != nil { + return *m.UsageVersion + } + return 0 +} + +func (m *LogUsageRequest) GetVersionsOnly() bool { + if m != nil && m.VersionsOnly != nil { + return *m.VersionsOnly + } + return false +} + +type LogUsageResponse struct { + Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` + Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } +func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } +func (*LogUsageResponse) ProtoMessage() {} + +func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { + if m != nil { + return m.Usage + } + return nil +} + +func (m *LogUsageResponse) GetSummary() *LogUsageRecord { + if m != nil { + return m.Summary + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto new file mode 100644 index 0000000..8981dc4 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto @@ -0,0 +1,150 @@ +syntax = "proto2"; +option go_package = "log"; + +package appengine; + +message LogServiceError { + enum ErrorCode { + OK = 0; + INVALID_REQUEST = 1; + STORAGE_ERROR = 2; + } +} + +message UserAppLogLine { + required int64 timestamp_usec = 1; + required int64 level = 2; + required string message = 3; +} + +message UserAppLogGroup { + repeated UserAppLogLine log_line = 2; +} + +message FlushRequest { + optional bytes logs = 1; +} + +message SetStatusRequest { + required string status = 1; +} + + +message LogOffset { + optional bytes request_id = 1; +} + +message LogLine { + required int64 time = 1; + required int32 level = 2; + required string log_message = 3; +} + +message RequestLog { + required string app_id = 1; + optional string module_id = 37 [default="default"]; + required string version_id = 2; + required bytes request_id = 3; + optional LogOffset offset = 35; + required string ip = 4; + optional string nickname = 5; + required int64 start_time = 6; + required int64 end_time = 7; + required int64 latency = 8; + required int64 mcycles = 9; + required string method = 10; + required string resource = 11; + required string http_version = 12; + required int32 status = 13; + required int64 response_size = 14; + optional string referrer = 15; + optional string user_agent = 16; + required string url_map_entry = 17; + required string combined = 18; + optional int64 api_mcycles = 19; + optional string host = 20; + optional double cost = 21; + + optional string task_queue_name = 22; + optional string task_name = 23; + + optional bool was_loading_request = 24; + optional int64 pending_time = 25; + optional int32 replica_index = 26 [default = -1]; + optional bool finished = 27 [default = true]; + optional bytes clone_key = 28; + + repeated LogLine line = 29; + + optional bool lines_incomplete = 36; + optional bytes app_engine_release = 38; + + optional int32 exit_reason = 30; + optional bool was_throttled_for_time = 31; + optional bool was_throttled_for_requests = 32; + optional int64 throttled_time = 33; + + optional bytes server_name = 34; +} + +message LogModuleVersion { + optional string module_id = 1 [default="default"]; + optional string version_id = 2; +} + +message LogReadRequest { + required string app_id = 1; + repeated string version_id = 2; + repeated LogModuleVersion module_version = 19; + + optional int64 start_time = 3; + optional int64 end_time = 4; + optional LogOffset offset = 5; + repeated bytes request_id = 6; + + optional int32 minimum_log_level = 7; + optional bool include_incomplete = 8; + optional int64 count = 9; + + optional string combined_log_regex = 14; + optional string host_regex = 15; + optional int32 replica_index = 16; + + optional bool include_app_logs = 10; + optional int32 app_logs_per_request = 17; + optional bool include_host = 11; + optional bool include_all = 12; + optional bool cache_iterator = 13; + optional int32 num_shards = 18; +} + +message LogReadResponse { + repeated RequestLog log = 1; + optional LogOffset offset = 2; + optional int64 last_end_time = 3; +} + +message LogUsageRecord { + optional string version_id = 1; + optional int32 start_time = 2; + optional int32 end_time = 3; + optional int64 count = 4; + optional int64 total_size = 5; + optional int32 records = 6; +} + +message LogUsageRequest { + required string app_id = 1; + repeated string version_id = 2; + optional int32 start_time = 3; + optional int32 end_time = 4; + optional uint32 resolution_hours = 5 [default = 1]; + optional bool combine_versions = 6; + optional int32 usage_version = 7; + optional bool versions_only = 8; +} + +message LogUsageResponse { + repeated LogUsageRecord usage = 1; + optional LogUsageRecord summary = 2; +} diff --git a/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go b/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go new file mode 100644 index 0000000..b8d5f03 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go @@ -0,0 +1,229 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/mail/mail_service.proto +// DO NOT EDIT! + +/* +Package mail is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/mail/mail_service.proto + +It has these top-level messages: + MailServiceError + MailAttachment + MailHeader + MailMessage +*/ +package mail + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type MailServiceError_ErrorCode int32 + +const ( + MailServiceError_OK MailServiceError_ErrorCode = 0 + MailServiceError_INTERNAL_ERROR MailServiceError_ErrorCode = 1 + MailServiceError_BAD_REQUEST MailServiceError_ErrorCode = 2 + MailServiceError_UNAUTHORIZED_SENDER MailServiceError_ErrorCode = 3 + MailServiceError_INVALID_ATTACHMENT_TYPE MailServiceError_ErrorCode = 4 + MailServiceError_INVALID_HEADER_NAME MailServiceError_ErrorCode = 5 + MailServiceError_INVALID_CONTENT_ID MailServiceError_ErrorCode = 6 +) + +var MailServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INTERNAL_ERROR", + 2: "BAD_REQUEST", + 3: "UNAUTHORIZED_SENDER", + 4: "INVALID_ATTACHMENT_TYPE", + 5: "INVALID_HEADER_NAME", + 6: "INVALID_CONTENT_ID", +} +var MailServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INTERNAL_ERROR": 1, + "BAD_REQUEST": 2, + "UNAUTHORIZED_SENDER": 3, + "INVALID_ATTACHMENT_TYPE": 4, + "INVALID_HEADER_NAME": 5, + "INVALID_CONTENT_ID": 6, +} + +func (x MailServiceError_ErrorCode) Enum() *MailServiceError_ErrorCode { + p := new(MailServiceError_ErrorCode) + *p = x + return p +} +func (x MailServiceError_ErrorCode) String() string { + return proto.EnumName(MailServiceError_ErrorCode_name, int32(x)) +} +func (x *MailServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MailServiceError_ErrorCode_value, data, "MailServiceError_ErrorCode") + if err != nil { + return err + } + *x = MailServiceError_ErrorCode(value) + return nil +} + +type MailServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *MailServiceError) Reset() { *m = MailServiceError{} } +func (m *MailServiceError) String() string { return proto.CompactTextString(m) } +func (*MailServiceError) ProtoMessage() {} + +type MailAttachment struct { + FileName *string `protobuf:"bytes,1,req,name=FileName" json:"FileName,omitempty"` + Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"` + ContentID *string `protobuf:"bytes,3,opt,name=ContentID" json:"ContentID,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MailAttachment) Reset() { *m = MailAttachment{} } +func (m *MailAttachment) String() string { return proto.CompactTextString(m) } +func (*MailAttachment) ProtoMessage() {} + +func (m *MailAttachment) GetFileName() string { + if m != nil && m.FileName != nil { + return *m.FileName + } + return "" +} + +func (m *MailAttachment) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *MailAttachment) GetContentID() string { + if m != nil && m.ContentID != nil { + return *m.ContentID + } + return "" +} + +type MailHeader struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MailHeader) Reset() { *m = MailHeader{} } +func (m *MailHeader) String() string { return proto.CompactTextString(m) } +func (*MailHeader) ProtoMessage() {} + +func (m *MailHeader) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MailHeader) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type MailMessage struct { + Sender *string `protobuf:"bytes,1,req,name=Sender" json:"Sender,omitempty"` + ReplyTo *string `protobuf:"bytes,2,opt,name=ReplyTo" json:"ReplyTo,omitempty"` + To []string `protobuf:"bytes,3,rep,name=To" json:"To,omitempty"` + Cc []string `protobuf:"bytes,4,rep,name=Cc" json:"Cc,omitempty"` + Bcc []string `protobuf:"bytes,5,rep,name=Bcc" json:"Bcc,omitempty"` + Subject *string `protobuf:"bytes,6,req,name=Subject" json:"Subject,omitempty"` + TextBody *string `protobuf:"bytes,7,opt,name=TextBody" json:"TextBody,omitempty"` + HtmlBody *string `protobuf:"bytes,8,opt,name=HtmlBody" json:"HtmlBody,omitempty"` + Attachment []*MailAttachment `protobuf:"bytes,9,rep,name=Attachment" json:"Attachment,omitempty"` + Header []*MailHeader `protobuf:"bytes,10,rep,name=Header" json:"Header,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MailMessage) Reset() { *m = MailMessage{} } +func (m *MailMessage) String() string { return proto.CompactTextString(m) } +func (*MailMessage) ProtoMessage() {} + +func (m *MailMessage) GetSender() string { + if m != nil && m.Sender != nil { + return *m.Sender + } + return "" +} + +func (m *MailMessage) GetReplyTo() string { + if m != nil && m.ReplyTo != nil { + return *m.ReplyTo + } + return "" +} + +func (m *MailMessage) GetTo() []string { + if m != nil { + return m.To + } + return nil +} + +func (m *MailMessage) GetCc() []string { + if m != nil { + return m.Cc + } + return nil +} + +func (m *MailMessage) GetBcc() []string { + if m != nil { + return m.Bcc + } + return nil +} + +func (m *MailMessage) GetSubject() string { + if m != nil && m.Subject != nil { + return *m.Subject + } + return "" +} + +func (m *MailMessage) GetTextBody() string { + if m != nil && m.TextBody != nil { + return *m.TextBody + } + return "" +} + +func (m *MailMessage) GetHtmlBody() string { + if m != nil && m.HtmlBody != nil { + return *m.HtmlBody + } + return "" +} + +func (m *MailMessage) GetAttachment() []*MailAttachment { + if m != nil { + return m.Attachment + } + return nil +} + +func (m *MailMessage) GetHeader() []*MailHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/mail/mail_service.proto b/vendor/google.golang.org/appengine/internal/mail/mail_service.proto new file mode 100644 index 0000000..4e57b7a --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/mail/mail_service.proto @@ -0,0 +1,45 @@ +syntax = "proto2"; +option go_package = "mail"; + +package appengine; + +message MailServiceError { + enum ErrorCode { + OK = 0; + INTERNAL_ERROR = 1; + BAD_REQUEST = 2; + UNAUTHORIZED_SENDER = 3; + INVALID_ATTACHMENT_TYPE = 4; + INVALID_HEADER_NAME = 5; + INVALID_CONTENT_ID = 6; + } +} + +message MailAttachment { + required string FileName = 1; + required bytes Data = 2; + optional string ContentID = 3; +} + +message MailHeader { + required string name = 1; + required string value = 2; +} + +message MailMessage { + required string Sender = 1; + optional string ReplyTo = 2; + + repeated string To = 3; + repeated string Cc = 4; + repeated string Bcc = 5; + + required string Subject = 6; + + optional string TextBody = 7; + optional string HtmlBody = 8; + + repeated MailAttachment Attachment = 9; + + repeated MailHeader Header = 10; +} diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go new file mode 100644 index 0000000..4903616 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -0,0 +1,15 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "appengine_internal" +) + +func Main() { + appengine_internal.Main() +} diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go new file mode 100644 index 0000000..57331ad --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -0,0 +1,44 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "io" + "log" + "net/http" + "net/url" + "os" +) + +func Main() { + installHealthChecker(http.DefaultServeMux) + + port := "8080" + if s := os.Getenv("PORT"); s != "" { + port = s + } + + if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil { + log.Fatalf("http.ListenAndServe: %v", err) + } +} + +func installHealthChecker(mux *http.ServeMux) { + // If no health check handler has been installed by this point, add a trivial one. + const healthPath = "/_ah/health" + hreq := &http.Request{ + Method: "GET", + URL: &url.URL{ + Path: healthPath, + }, + } + if _, pat := mux.Handler(hreq); pat != healthPath { + mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + }) + } +} diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go new file mode 100644 index 0000000..252fef8 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go @@ -0,0 +1,938 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/memcache/memcache_service.proto +// DO NOT EDIT! + +/* +Package memcache is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/memcache/memcache_service.proto + +It has these top-level messages: + MemcacheServiceError + AppOverride + MemcacheGetRequest + MemcacheGetResponse + MemcacheSetRequest + MemcacheSetResponse + MemcacheDeleteRequest + MemcacheDeleteResponse + MemcacheIncrementRequest + MemcacheIncrementResponse + MemcacheBatchIncrementRequest + MemcacheBatchIncrementResponse + MemcacheFlushRequest + MemcacheFlushResponse + MemcacheStatsRequest + MergedNamespaceStats + MemcacheStatsResponse + MemcacheGrabTailRequest + MemcacheGrabTailResponse +*/ +package memcache + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type MemcacheServiceError_ErrorCode int32 + +const ( + MemcacheServiceError_OK MemcacheServiceError_ErrorCode = 0 + MemcacheServiceError_UNSPECIFIED_ERROR MemcacheServiceError_ErrorCode = 1 + MemcacheServiceError_NAMESPACE_NOT_SET MemcacheServiceError_ErrorCode = 2 + MemcacheServiceError_PERMISSION_DENIED MemcacheServiceError_ErrorCode = 3 + MemcacheServiceError_INVALID_VALUE MemcacheServiceError_ErrorCode = 6 +) + +var MemcacheServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "UNSPECIFIED_ERROR", + 2: "NAMESPACE_NOT_SET", + 3: "PERMISSION_DENIED", + 6: "INVALID_VALUE", +} +var MemcacheServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "UNSPECIFIED_ERROR": 1, + "NAMESPACE_NOT_SET": 2, + "PERMISSION_DENIED": 3, + "INVALID_VALUE": 6, +} + +func (x MemcacheServiceError_ErrorCode) Enum() *MemcacheServiceError_ErrorCode { + p := new(MemcacheServiceError_ErrorCode) + *p = x + return p +} +func (x MemcacheServiceError_ErrorCode) String() string { + return proto.EnumName(MemcacheServiceError_ErrorCode_name, int32(x)) +} +func (x *MemcacheServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MemcacheServiceError_ErrorCode_value, data, "MemcacheServiceError_ErrorCode") + if err != nil { + return err + } + *x = MemcacheServiceError_ErrorCode(value) + return nil +} + +type MemcacheSetRequest_SetPolicy int32 + +const ( + MemcacheSetRequest_SET MemcacheSetRequest_SetPolicy = 1 + MemcacheSetRequest_ADD MemcacheSetRequest_SetPolicy = 2 + MemcacheSetRequest_REPLACE MemcacheSetRequest_SetPolicy = 3 + MemcacheSetRequest_CAS MemcacheSetRequest_SetPolicy = 4 +) + +var MemcacheSetRequest_SetPolicy_name = map[int32]string{ + 1: "SET", + 2: "ADD", + 3: "REPLACE", + 4: "CAS", +} +var MemcacheSetRequest_SetPolicy_value = map[string]int32{ + "SET": 1, + "ADD": 2, + "REPLACE": 3, + "CAS": 4, +} + +func (x MemcacheSetRequest_SetPolicy) Enum() *MemcacheSetRequest_SetPolicy { + p := new(MemcacheSetRequest_SetPolicy) + *p = x + return p +} +func (x MemcacheSetRequest_SetPolicy) String() string { + return proto.EnumName(MemcacheSetRequest_SetPolicy_name, int32(x)) +} +func (x *MemcacheSetRequest_SetPolicy) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MemcacheSetRequest_SetPolicy_value, data, "MemcacheSetRequest_SetPolicy") + if err != nil { + return err + } + *x = MemcacheSetRequest_SetPolicy(value) + return nil +} + +type MemcacheSetResponse_SetStatusCode int32 + +const ( + MemcacheSetResponse_STORED MemcacheSetResponse_SetStatusCode = 1 + MemcacheSetResponse_NOT_STORED MemcacheSetResponse_SetStatusCode = 2 + MemcacheSetResponse_ERROR MemcacheSetResponse_SetStatusCode = 3 + MemcacheSetResponse_EXISTS MemcacheSetResponse_SetStatusCode = 4 +) + +var MemcacheSetResponse_SetStatusCode_name = map[int32]string{ + 1: "STORED", + 2: "NOT_STORED", + 3: "ERROR", + 4: "EXISTS", +} +var MemcacheSetResponse_SetStatusCode_value = map[string]int32{ + "STORED": 1, + "NOT_STORED": 2, + "ERROR": 3, + "EXISTS": 4, +} + +func (x MemcacheSetResponse_SetStatusCode) Enum() *MemcacheSetResponse_SetStatusCode { + p := new(MemcacheSetResponse_SetStatusCode) + *p = x + return p +} +func (x MemcacheSetResponse_SetStatusCode) String() string { + return proto.EnumName(MemcacheSetResponse_SetStatusCode_name, int32(x)) +} +func (x *MemcacheSetResponse_SetStatusCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MemcacheSetResponse_SetStatusCode_value, data, "MemcacheSetResponse_SetStatusCode") + if err != nil { + return err + } + *x = MemcacheSetResponse_SetStatusCode(value) + return nil +} + +type MemcacheDeleteResponse_DeleteStatusCode int32 + +const ( + MemcacheDeleteResponse_DELETED MemcacheDeleteResponse_DeleteStatusCode = 1 + MemcacheDeleteResponse_NOT_FOUND MemcacheDeleteResponse_DeleteStatusCode = 2 +) + +var MemcacheDeleteResponse_DeleteStatusCode_name = map[int32]string{ + 1: "DELETED", + 2: "NOT_FOUND", +} +var MemcacheDeleteResponse_DeleteStatusCode_value = map[string]int32{ + "DELETED": 1, + "NOT_FOUND": 2, +} + +func (x MemcacheDeleteResponse_DeleteStatusCode) Enum() *MemcacheDeleteResponse_DeleteStatusCode { + p := new(MemcacheDeleteResponse_DeleteStatusCode) + *p = x + return p +} +func (x MemcacheDeleteResponse_DeleteStatusCode) String() string { + return proto.EnumName(MemcacheDeleteResponse_DeleteStatusCode_name, int32(x)) +} +func (x *MemcacheDeleteResponse_DeleteStatusCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MemcacheDeleteResponse_DeleteStatusCode_value, data, "MemcacheDeleteResponse_DeleteStatusCode") + if err != nil { + return err + } + *x = MemcacheDeleteResponse_DeleteStatusCode(value) + return nil +} + +type MemcacheIncrementRequest_Direction int32 + +const ( + MemcacheIncrementRequest_INCREMENT MemcacheIncrementRequest_Direction = 1 + MemcacheIncrementRequest_DECREMENT MemcacheIncrementRequest_Direction = 2 +) + +var MemcacheIncrementRequest_Direction_name = map[int32]string{ + 1: "INCREMENT", + 2: "DECREMENT", +} +var MemcacheIncrementRequest_Direction_value = map[string]int32{ + "INCREMENT": 1, + "DECREMENT": 2, +} + +func (x MemcacheIncrementRequest_Direction) Enum() *MemcacheIncrementRequest_Direction { + p := new(MemcacheIncrementRequest_Direction) + *p = x + return p +} +func (x MemcacheIncrementRequest_Direction) String() string { + return proto.EnumName(MemcacheIncrementRequest_Direction_name, int32(x)) +} +func (x *MemcacheIncrementRequest_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MemcacheIncrementRequest_Direction_value, data, "MemcacheIncrementRequest_Direction") + if err != nil { + return err + } + *x = MemcacheIncrementRequest_Direction(value) + return nil +} + +type MemcacheIncrementResponse_IncrementStatusCode int32 + +const ( + MemcacheIncrementResponse_OK MemcacheIncrementResponse_IncrementStatusCode = 1 + MemcacheIncrementResponse_NOT_CHANGED MemcacheIncrementResponse_IncrementStatusCode = 2 + MemcacheIncrementResponse_ERROR MemcacheIncrementResponse_IncrementStatusCode = 3 +) + +var MemcacheIncrementResponse_IncrementStatusCode_name = map[int32]string{ + 1: "OK", + 2: "NOT_CHANGED", + 3: "ERROR", +} +var MemcacheIncrementResponse_IncrementStatusCode_value = map[string]int32{ + "OK": 1, + "NOT_CHANGED": 2, + "ERROR": 3, +} + +func (x MemcacheIncrementResponse_IncrementStatusCode) Enum() *MemcacheIncrementResponse_IncrementStatusCode { + p := new(MemcacheIncrementResponse_IncrementStatusCode) + *p = x + return p +} +func (x MemcacheIncrementResponse_IncrementStatusCode) String() string { + return proto.EnumName(MemcacheIncrementResponse_IncrementStatusCode_name, int32(x)) +} +func (x *MemcacheIncrementResponse_IncrementStatusCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MemcacheIncrementResponse_IncrementStatusCode_value, data, "MemcacheIncrementResponse_IncrementStatusCode") + if err != nil { + return err + } + *x = MemcacheIncrementResponse_IncrementStatusCode(value) + return nil +} + +type MemcacheServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheServiceError) Reset() { *m = MemcacheServiceError{} } +func (m *MemcacheServiceError) String() string { return proto.CompactTextString(m) } +func (*MemcacheServiceError) ProtoMessage() {} + +type AppOverride struct { + AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + NumMemcachegBackends *int32 `protobuf:"varint,2,opt,name=num_memcacheg_backends" json:"num_memcacheg_backends,omitempty"` + IgnoreShardlock *bool `protobuf:"varint,3,opt,name=ignore_shardlock" json:"ignore_shardlock,omitempty"` + MemcachePoolHint *string `protobuf:"bytes,4,opt,name=memcache_pool_hint" json:"memcache_pool_hint,omitempty"` + MemcacheShardingStrategy []byte `protobuf:"bytes,5,opt,name=memcache_sharding_strategy" json:"memcache_sharding_strategy,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AppOverride) Reset() { *m = AppOverride{} } +func (m *AppOverride) String() string { return proto.CompactTextString(m) } +func (*AppOverride) ProtoMessage() {} + +func (m *AppOverride) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *AppOverride) GetNumMemcachegBackends() int32 { + if m != nil && m.NumMemcachegBackends != nil { + return *m.NumMemcachegBackends + } + return 0 +} + +func (m *AppOverride) GetIgnoreShardlock() bool { + if m != nil && m.IgnoreShardlock != nil { + return *m.IgnoreShardlock + } + return false +} + +func (m *AppOverride) GetMemcachePoolHint() string { + if m != nil && m.MemcachePoolHint != nil { + return *m.MemcachePoolHint + } + return "" +} + +func (m *AppOverride) GetMemcacheShardingStrategy() []byte { + if m != nil { + return m.MemcacheShardingStrategy + } + return nil +} + +type MemcacheGetRequest struct { + Key [][]byte `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"` + ForCas *bool `protobuf:"varint,4,opt,name=for_cas" json:"for_cas,omitempty"` + Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheGetRequest) Reset() { *m = MemcacheGetRequest{} } +func (m *MemcacheGetRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheGetRequest) ProtoMessage() {} + +func (m *MemcacheGetRequest) GetKey() [][]byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MemcacheGetRequest) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *MemcacheGetRequest) GetForCas() bool { + if m != nil && m.ForCas != nil { + return *m.ForCas + } + return false +} + +func (m *MemcacheGetRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheGetResponse struct { + Item []*MemcacheGetResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheGetResponse) Reset() { *m = MemcacheGetResponse{} } +func (m *MemcacheGetResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheGetResponse) ProtoMessage() {} + +func (m *MemcacheGetResponse) GetItem() []*MemcacheGetResponse_Item { + if m != nil { + return m.Item + } + return nil +} + +type MemcacheGetResponse_Item struct { + Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"` + CasId *uint64 `protobuf:"fixed64,5,opt,name=cas_id" json:"cas_id,omitempty"` + ExpiresInSeconds *int32 `protobuf:"varint,6,opt,name=expires_in_seconds" json:"expires_in_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheGetResponse_Item) Reset() { *m = MemcacheGetResponse_Item{} } +func (m *MemcacheGetResponse_Item) String() string { return proto.CompactTextString(m) } +func (*MemcacheGetResponse_Item) ProtoMessage() {} + +func (m *MemcacheGetResponse_Item) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MemcacheGetResponse_Item) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *MemcacheGetResponse_Item) GetFlags() uint32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return 0 +} + +func (m *MemcacheGetResponse_Item) GetCasId() uint64 { + if m != nil && m.CasId != nil { + return *m.CasId + } + return 0 +} + +func (m *MemcacheGetResponse_Item) GetExpiresInSeconds() int32 { + if m != nil && m.ExpiresInSeconds != nil { + return *m.ExpiresInSeconds + } + return 0 +} + +type MemcacheSetRequest struct { + Item []*MemcacheSetRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` + NameSpace *string `protobuf:"bytes,7,opt,name=name_space,def=" json:"name_space,omitempty"` + Override *AppOverride `protobuf:"bytes,10,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheSetRequest) Reset() { *m = MemcacheSetRequest{} } +func (m *MemcacheSetRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheSetRequest) ProtoMessage() {} + +func (m *MemcacheSetRequest) GetItem() []*MemcacheSetRequest_Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *MemcacheSetRequest) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *MemcacheSetRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheSetRequest_Item struct { + Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"` + SetPolicy *MemcacheSetRequest_SetPolicy `protobuf:"varint,5,opt,name=set_policy,enum=appengine.MemcacheSetRequest_SetPolicy,def=1" json:"set_policy,omitempty"` + ExpirationTime *uint32 `protobuf:"fixed32,6,opt,name=expiration_time,def=0" json:"expiration_time,omitempty"` + CasId *uint64 `protobuf:"fixed64,8,opt,name=cas_id" json:"cas_id,omitempty"` + ForCas *bool `protobuf:"varint,9,opt,name=for_cas" json:"for_cas,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheSetRequest_Item) Reset() { *m = MemcacheSetRequest_Item{} } +func (m *MemcacheSetRequest_Item) String() string { return proto.CompactTextString(m) } +func (*MemcacheSetRequest_Item) ProtoMessage() {} + +const Default_MemcacheSetRequest_Item_SetPolicy MemcacheSetRequest_SetPolicy = MemcacheSetRequest_SET +const Default_MemcacheSetRequest_Item_ExpirationTime uint32 = 0 + +func (m *MemcacheSetRequest_Item) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MemcacheSetRequest_Item) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *MemcacheSetRequest_Item) GetFlags() uint32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return 0 +} + +func (m *MemcacheSetRequest_Item) GetSetPolicy() MemcacheSetRequest_SetPolicy { + if m != nil && m.SetPolicy != nil { + return *m.SetPolicy + } + return Default_MemcacheSetRequest_Item_SetPolicy +} + +func (m *MemcacheSetRequest_Item) GetExpirationTime() uint32 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return Default_MemcacheSetRequest_Item_ExpirationTime +} + +func (m *MemcacheSetRequest_Item) GetCasId() uint64 { + if m != nil && m.CasId != nil { + return *m.CasId + } + return 0 +} + +func (m *MemcacheSetRequest_Item) GetForCas() bool { + if m != nil && m.ForCas != nil { + return *m.ForCas + } + return false +} + +type MemcacheSetResponse struct { + SetStatus []MemcacheSetResponse_SetStatusCode `protobuf:"varint,1,rep,name=set_status,enum=appengine.MemcacheSetResponse_SetStatusCode" json:"set_status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheSetResponse) Reset() { *m = MemcacheSetResponse{} } +func (m *MemcacheSetResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheSetResponse) ProtoMessage() {} + +func (m *MemcacheSetResponse) GetSetStatus() []MemcacheSetResponse_SetStatusCode { + if m != nil { + return m.SetStatus + } + return nil +} + +type MemcacheDeleteRequest struct { + Item []*MemcacheDeleteRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` + NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"` + Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheDeleteRequest) Reset() { *m = MemcacheDeleteRequest{} } +func (m *MemcacheDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheDeleteRequest) ProtoMessage() {} + +func (m *MemcacheDeleteRequest) GetItem() []*MemcacheDeleteRequest_Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *MemcacheDeleteRequest) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *MemcacheDeleteRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheDeleteRequest_Item struct { + Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` + DeleteTime *uint32 `protobuf:"fixed32,3,opt,name=delete_time,def=0" json:"delete_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheDeleteRequest_Item) Reset() { *m = MemcacheDeleteRequest_Item{} } +func (m *MemcacheDeleteRequest_Item) String() string { return proto.CompactTextString(m) } +func (*MemcacheDeleteRequest_Item) ProtoMessage() {} + +const Default_MemcacheDeleteRequest_Item_DeleteTime uint32 = 0 + +func (m *MemcacheDeleteRequest_Item) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MemcacheDeleteRequest_Item) GetDeleteTime() uint32 { + if m != nil && m.DeleteTime != nil { + return *m.DeleteTime + } + return Default_MemcacheDeleteRequest_Item_DeleteTime +} + +type MemcacheDeleteResponse struct { + DeleteStatus []MemcacheDeleteResponse_DeleteStatusCode `protobuf:"varint,1,rep,name=delete_status,enum=appengine.MemcacheDeleteResponse_DeleteStatusCode" json:"delete_status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheDeleteResponse) Reset() { *m = MemcacheDeleteResponse{} } +func (m *MemcacheDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheDeleteResponse) ProtoMessage() {} + +func (m *MemcacheDeleteResponse) GetDeleteStatus() []MemcacheDeleteResponse_DeleteStatusCode { + if m != nil { + return m.DeleteStatus + } + return nil +} + +type MemcacheIncrementRequest struct { + Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` + NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"` + Delta *uint64 `protobuf:"varint,2,opt,name=delta,def=1" json:"delta,omitempty"` + Direction *MemcacheIncrementRequest_Direction `protobuf:"varint,3,opt,name=direction,enum=appengine.MemcacheIncrementRequest_Direction,def=1" json:"direction,omitempty"` + InitialValue *uint64 `protobuf:"varint,5,opt,name=initial_value" json:"initial_value,omitempty"` + InitialFlags *uint32 `protobuf:"fixed32,6,opt,name=initial_flags" json:"initial_flags,omitempty"` + Override *AppOverride `protobuf:"bytes,7,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheIncrementRequest) Reset() { *m = MemcacheIncrementRequest{} } +func (m *MemcacheIncrementRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheIncrementRequest) ProtoMessage() {} + +const Default_MemcacheIncrementRequest_Delta uint64 = 1 +const Default_MemcacheIncrementRequest_Direction MemcacheIncrementRequest_Direction = MemcacheIncrementRequest_INCREMENT + +func (m *MemcacheIncrementRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MemcacheIncrementRequest) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *MemcacheIncrementRequest) GetDelta() uint64 { + if m != nil && m.Delta != nil { + return *m.Delta + } + return Default_MemcacheIncrementRequest_Delta +} + +func (m *MemcacheIncrementRequest) GetDirection() MemcacheIncrementRequest_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_MemcacheIncrementRequest_Direction +} + +func (m *MemcacheIncrementRequest) GetInitialValue() uint64 { + if m != nil && m.InitialValue != nil { + return *m.InitialValue + } + return 0 +} + +func (m *MemcacheIncrementRequest) GetInitialFlags() uint32 { + if m != nil && m.InitialFlags != nil { + return *m.InitialFlags + } + return 0 +} + +func (m *MemcacheIncrementRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheIncrementResponse struct { + NewValue *uint64 `protobuf:"varint,1,opt,name=new_value" json:"new_value,omitempty"` + IncrementStatus *MemcacheIncrementResponse_IncrementStatusCode `protobuf:"varint,2,opt,name=increment_status,enum=appengine.MemcacheIncrementResponse_IncrementStatusCode" json:"increment_status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheIncrementResponse) Reset() { *m = MemcacheIncrementResponse{} } +func (m *MemcacheIncrementResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheIncrementResponse) ProtoMessage() {} + +func (m *MemcacheIncrementResponse) GetNewValue() uint64 { + if m != nil && m.NewValue != nil { + return *m.NewValue + } + return 0 +} + +func (m *MemcacheIncrementResponse) GetIncrementStatus() MemcacheIncrementResponse_IncrementStatusCode { + if m != nil && m.IncrementStatus != nil { + return *m.IncrementStatus + } + return MemcacheIncrementResponse_OK +} + +type MemcacheBatchIncrementRequest struct { + NameSpace *string `protobuf:"bytes,1,opt,name=name_space,def=" json:"name_space,omitempty"` + Item []*MemcacheIncrementRequest `protobuf:"bytes,2,rep,name=item" json:"item,omitempty"` + Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheBatchIncrementRequest) Reset() { *m = MemcacheBatchIncrementRequest{} } +func (m *MemcacheBatchIncrementRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheBatchIncrementRequest) ProtoMessage() {} + +func (m *MemcacheBatchIncrementRequest) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *MemcacheBatchIncrementRequest) GetItem() []*MemcacheIncrementRequest { + if m != nil { + return m.Item + } + return nil +} + +func (m *MemcacheBatchIncrementRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheBatchIncrementResponse struct { + Item []*MemcacheIncrementResponse `protobuf:"bytes,1,rep,name=item" json:"item,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheBatchIncrementResponse) Reset() { *m = MemcacheBatchIncrementResponse{} } +func (m *MemcacheBatchIncrementResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheBatchIncrementResponse) ProtoMessage() {} + +func (m *MemcacheBatchIncrementResponse) GetItem() []*MemcacheIncrementResponse { + if m != nil { + return m.Item + } + return nil +} + +type MemcacheFlushRequest struct { + Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheFlushRequest) Reset() { *m = MemcacheFlushRequest{} } +func (m *MemcacheFlushRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheFlushRequest) ProtoMessage() {} + +func (m *MemcacheFlushRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheFlushResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheFlushResponse) Reset() { *m = MemcacheFlushResponse{} } +func (m *MemcacheFlushResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheFlushResponse) ProtoMessage() {} + +type MemcacheStatsRequest struct { + Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheStatsRequest) Reset() { *m = MemcacheStatsRequest{} } +func (m *MemcacheStatsRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheStatsRequest) ProtoMessage() {} + +func (m *MemcacheStatsRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MergedNamespaceStats struct { + Hits *uint64 `protobuf:"varint,1,req,name=hits" json:"hits,omitempty"` + Misses *uint64 `protobuf:"varint,2,req,name=misses" json:"misses,omitempty"` + ByteHits *uint64 `protobuf:"varint,3,req,name=byte_hits" json:"byte_hits,omitempty"` + Items *uint64 `protobuf:"varint,4,req,name=items" json:"items,omitempty"` + Bytes *uint64 `protobuf:"varint,5,req,name=bytes" json:"bytes,omitempty"` + OldestItemAge *uint32 `protobuf:"fixed32,6,req,name=oldest_item_age" json:"oldest_item_age,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MergedNamespaceStats) Reset() { *m = MergedNamespaceStats{} } +func (m *MergedNamespaceStats) String() string { return proto.CompactTextString(m) } +func (*MergedNamespaceStats) ProtoMessage() {} + +func (m *MergedNamespaceStats) GetHits() uint64 { + if m != nil && m.Hits != nil { + return *m.Hits + } + return 0 +} + +func (m *MergedNamespaceStats) GetMisses() uint64 { + if m != nil && m.Misses != nil { + return *m.Misses + } + return 0 +} + +func (m *MergedNamespaceStats) GetByteHits() uint64 { + if m != nil && m.ByteHits != nil { + return *m.ByteHits + } + return 0 +} + +func (m *MergedNamespaceStats) GetItems() uint64 { + if m != nil && m.Items != nil { + return *m.Items + } + return 0 +} + +func (m *MergedNamespaceStats) GetBytes() uint64 { + if m != nil && m.Bytes != nil { + return *m.Bytes + } + return 0 +} + +func (m *MergedNamespaceStats) GetOldestItemAge() uint32 { + if m != nil && m.OldestItemAge != nil { + return *m.OldestItemAge + } + return 0 +} + +type MemcacheStatsResponse struct { + Stats *MergedNamespaceStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheStatsResponse) Reset() { *m = MemcacheStatsResponse{} } +func (m *MemcacheStatsResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheStatsResponse) ProtoMessage() {} + +func (m *MemcacheStatsResponse) GetStats() *MergedNamespaceStats { + if m != nil { + return m.Stats + } + return nil +} + +type MemcacheGrabTailRequest struct { + ItemCount *int32 `protobuf:"varint,1,req,name=item_count" json:"item_count,omitempty"` + NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"` + Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheGrabTailRequest) Reset() { *m = MemcacheGrabTailRequest{} } +func (m *MemcacheGrabTailRequest) String() string { return proto.CompactTextString(m) } +func (*MemcacheGrabTailRequest) ProtoMessage() {} + +func (m *MemcacheGrabTailRequest) GetItemCount() int32 { + if m != nil && m.ItemCount != nil { + return *m.ItemCount + } + return 0 +} + +func (m *MemcacheGrabTailRequest) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *MemcacheGrabTailRequest) GetOverride() *AppOverride { + if m != nil { + return m.Override + } + return nil +} + +type MemcacheGrabTailResponse struct { + Item []*MemcacheGrabTailResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheGrabTailResponse) Reset() { *m = MemcacheGrabTailResponse{} } +func (m *MemcacheGrabTailResponse) String() string { return proto.CompactTextString(m) } +func (*MemcacheGrabTailResponse) ProtoMessage() {} + +func (m *MemcacheGrabTailResponse) GetItem() []*MemcacheGrabTailResponse_Item { + if m != nil { + return m.Item + } + return nil +} + +type MemcacheGrabTailResponse_Item struct { + Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + Flags *uint32 `protobuf:"fixed32,3,opt,name=flags" json:"flags,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MemcacheGrabTailResponse_Item) Reset() { *m = MemcacheGrabTailResponse_Item{} } +func (m *MemcacheGrabTailResponse_Item) String() string { return proto.CompactTextString(m) } +func (*MemcacheGrabTailResponse_Item) ProtoMessage() {} + +func (m *MemcacheGrabTailResponse_Item) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *MemcacheGrabTailResponse_Item) GetFlags() uint32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return 0 +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto new file mode 100644 index 0000000..5f0edcd --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto @@ -0,0 +1,165 @@ +syntax = "proto2"; +option go_package = "memcache"; + +package appengine; + +message MemcacheServiceError { + enum ErrorCode { + OK = 0; + UNSPECIFIED_ERROR = 1; + NAMESPACE_NOT_SET = 2; + PERMISSION_DENIED = 3; + INVALID_VALUE = 6; + } +} + +message AppOverride { + required string app_id = 1; + + optional int32 num_memcacheg_backends = 2 [deprecated=true]; + optional bool ignore_shardlock = 3 [deprecated=true]; + optional string memcache_pool_hint = 4 [deprecated=true]; + optional bytes memcache_sharding_strategy = 5 [deprecated=true]; +} + +message MemcacheGetRequest { + repeated bytes key = 1; + optional string name_space = 2 [default = ""]; + optional bool for_cas = 4; + optional AppOverride override = 5; +} + +message MemcacheGetResponse { + repeated group Item = 1 { + required bytes key = 2; + required bytes value = 3; + optional fixed32 flags = 4; + optional fixed64 cas_id = 5; + optional int32 expires_in_seconds = 6; + } +} + +message MemcacheSetRequest { + enum SetPolicy { + SET = 1; + ADD = 2; + REPLACE = 3; + CAS = 4; + } + repeated group Item = 1 { + required bytes key = 2; + required bytes value = 3; + + optional fixed32 flags = 4; + optional SetPolicy set_policy = 5 [default = SET]; + optional fixed32 expiration_time = 6 [default = 0]; + + optional fixed64 cas_id = 8; + optional bool for_cas = 9; + } + optional string name_space = 7 [default = ""]; + optional AppOverride override = 10; +} + +message MemcacheSetResponse { + enum SetStatusCode { + STORED = 1; + NOT_STORED = 2; + ERROR = 3; + EXISTS = 4; + } + repeated SetStatusCode set_status = 1; +} + +message MemcacheDeleteRequest { + repeated group Item = 1 { + required bytes key = 2; + optional fixed32 delete_time = 3 [default = 0]; + } + optional string name_space = 4 [default = ""]; + optional AppOverride override = 5; +} + +message MemcacheDeleteResponse { + enum DeleteStatusCode { + DELETED = 1; + NOT_FOUND = 2; + } + repeated DeleteStatusCode delete_status = 1; +} + +message MemcacheIncrementRequest { + enum Direction { + INCREMENT = 1; + DECREMENT = 2; + } + required bytes key = 1; + optional string name_space = 4 [default = ""]; + + optional uint64 delta = 2 [default = 1]; + optional Direction direction = 3 [default = INCREMENT]; + + optional uint64 initial_value = 5; + optional fixed32 initial_flags = 6; + optional AppOverride override = 7; +} + +message MemcacheIncrementResponse { + enum IncrementStatusCode { + OK = 1; + NOT_CHANGED = 2; + ERROR = 3; + } + + optional uint64 new_value = 1; + optional IncrementStatusCode increment_status = 2; +} + +message MemcacheBatchIncrementRequest { + optional string name_space = 1 [default = ""]; + repeated MemcacheIncrementRequest item = 2; + optional AppOverride override = 3; +} + +message MemcacheBatchIncrementResponse { + repeated MemcacheIncrementResponse item = 1; +} + +message MemcacheFlushRequest { + optional AppOverride override = 1; +} + +message MemcacheFlushResponse { +} + +message MemcacheStatsRequest { + optional AppOverride override = 1; +} + +message MergedNamespaceStats { + required uint64 hits = 1; + required uint64 misses = 2; + required uint64 byte_hits = 3; + + required uint64 items = 4; + required uint64 bytes = 5; + + required fixed32 oldest_item_age = 6; +} + +message MemcacheStatsResponse { + optional MergedNamespaceStats stats = 1; +} + +message MemcacheGrabTailRequest { + required int32 item_count = 1; + optional string name_space = 2 [default = ""]; + optional AppOverride override = 3; +} + +message MemcacheGrabTailResponse { + repeated group Item = 1 { + required bytes value = 2; + optional fixed32 flags = 3; + } +} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go new file mode 100644 index 0000000..9cc1f71 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/metadata.go @@ -0,0 +1,61 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file has code for accessing metadata. +// +// References: +// https://cloud.google.com/compute/docs/metadata + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" +) + +const ( + metadataHost = "metadata" + metadataPath = "/computeMetadata/v1/" +) + +var ( + metadataRequestHeaders = http.Header{ + "Metadata-Flavor": []string{"Google"}, + } +) + +// TODO(dsymonds): Do we need to support default values, like Python? +func mustGetMetadata(key string) []byte { + b, err := getMetadata(key) + if err != nil { + log.Fatalf("Metadata fetch failed: %v", err) + } + return b +} + +func getMetadata(key string) ([]byte, error) { + // TODO(dsymonds): May need to use url.Parse to support keys with query args. + req := &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: metadataHost, + Path: metadataPath + key, + }, + Header: metadataRequestHeaders, + Host: metadataHost, + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) + } + return ioutil.ReadAll(resp.Body) +} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go new file mode 100644 index 0000000..a0145ed --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go @@ -0,0 +1,375 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/modules/modules_service.proto +// DO NOT EDIT! + +/* +Package modules is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/modules/modules_service.proto + +It has these top-level messages: + ModulesServiceError + GetModulesRequest + GetModulesResponse + GetVersionsRequest + GetVersionsResponse + GetDefaultVersionRequest + GetDefaultVersionResponse + GetNumInstancesRequest + GetNumInstancesResponse + SetNumInstancesRequest + SetNumInstancesResponse + StartModuleRequest + StartModuleResponse + StopModuleRequest + StopModuleResponse + GetHostnameRequest + GetHostnameResponse +*/ +package modules + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ModulesServiceError_ErrorCode int32 + +const ( + ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 + ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 + ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 + ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 + ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 + ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 +) + +var ModulesServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_MODULE", + 2: "INVALID_VERSION", + 3: "INVALID_INSTANCES", + 4: "TRANSIENT_ERROR", + 5: "UNEXPECTED_STATE", +} +var ModulesServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_MODULE": 1, + "INVALID_VERSION": 2, + "INVALID_INSTANCES": 3, + "TRANSIENT_ERROR": 4, + "UNEXPECTED_STATE": 5, +} + +func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { + p := new(ModulesServiceError_ErrorCode) + *p = x + return p +} +func (x ModulesServiceError_ErrorCode) String() string { + return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) +} +func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") + if err != nil { + return err + } + *x = ModulesServiceError_ErrorCode(value) + return nil +} + +type ModulesServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } +func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } +func (*ModulesServiceError) ProtoMessage() {} + +type GetModulesRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } +func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } +func (*GetModulesRequest) ProtoMessage() {} + +type GetModulesResponse struct { + Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } +func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } +func (*GetModulesResponse) ProtoMessage() {} + +func (m *GetModulesResponse) GetModule() []string { + if m != nil { + return m.Module + } + return nil +} + +type GetVersionsRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } +func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionsRequest) ProtoMessage() {} + +func (m *GetVersionsRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetVersionsResponse struct { + Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } +func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*GetVersionsResponse) ProtoMessage() {} + +func (m *GetVersionsResponse) GetVersion() []string { + if m != nil { + return m.Version + } + return nil +} + +type GetDefaultVersionRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } +func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionRequest) ProtoMessage() {} + +func (m *GetDefaultVersionRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetDefaultVersionResponse struct { + Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } +func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionResponse) ProtoMessage() {} + +func (m *GetDefaultVersionResponse) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } +func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesRequest) ProtoMessage() {} + +func (m *GetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesResponse struct { + Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } +func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesResponse) ProtoMessage() {} + +func (m *GetNumInstancesResponse) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } +func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesRequest) ProtoMessage() {} + +func (m *SetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *SetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *SetNumInstancesRequest) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } +func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesResponse) ProtoMessage() {} + +type StartModuleRequest struct { + Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } +func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StartModuleRequest) ProtoMessage() {} + +func (m *StartModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StartModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StartModuleResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } +func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StartModuleResponse) ProtoMessage() {} + +type StopModuleRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } +func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StopModuleRequest) ProtoMessage() {} + +func (m *StopModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StopModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StopModuleResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } +func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StopModuleResponse) ProtoMessage() {} + +type GetHostnameRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } +func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } +func (*GetHostnameRequest) ProtoMessage() {} + +func (m *GetHostnameRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetHostnameRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *GetHostnameRequest) GetInstance() string { + if m != nil && m.Instance != nil { + return *m.Instance + } + return "" +} + +type GetHostnameResponse struct { + Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } +func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } +func (*GetHostnameResponse) ProtoMessage() {} + +func (m *GetHostnameResponse) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto new file mode 100644 index 0000000..d29f006 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto @@ -0,0 +1,80 @@ +syntax = "proto2"; +option go_package = "modules"; + +package appengine; + +message ModulesServiceError { + enum ErrorCode { + OK = 0; + INVALID_MODULE = 1; + INVALID_VERSION = 2; + INVALID_INSTANCES = 3; + TRANSIENT_ERROR = 4; + UNEXPECTED_STATE = 5; + } +} + +message GetModulesRequest { +} + +message GetModulesResponse { + repeated string module = 1; +} + +message GetVersionsRequest { + optional string module = 1; +} + +message GetVersionsResponse { + repeated string version = 1; +} + +message GetDefaultVersionRequest { + optional string module = 1; +} + +message GetDefaultVersionResponse { + required string version = 1; +} + +message GetNumInstancesRequest { + optional string module = 1; + optional string version = 2; +} + +message GetNumInstancesResponse { + required int64 instances = 1; +} + +message SetNumInstancesRequest { + optional string module = 1; + optional string version = 2; + required int64 instances = 3; +} + +message SetNumInstancesResponse {} + +message StartModuleRequest { + required string module = 1; + required string version = 2; +} + +message StartModuleResponse {} + +message StopModuleRequest { + optional string module = 1; + optional string version = 2; +} + +message StopModuleResponse {} + +message GetHostnameRequest { + optional string module = 1; + optional string version = 2; + optional string instance = 3; +} + +message GetHostnameResponse { + required string hostname = 1; +} + diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go new file mode 100644 index 0000000..3b94cf0 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/net.go @@ -0,0 +1,56 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements a network dialer that limits the number of concurrent connections. +// It is only used for API calls. + +import ( + "log" + "net" + "runtime" + "sync" + "time" +) + +var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. + +func limitRelease() { + // non-blocking + select { + case <-limitSem: + default: + // This should not normally happen. + log.Print("appengine: unbalanced limitSem release!") + } +} + +func limitDial(network, addr string) (net.Conn, error) { + limitSem <- 1 + + // Dial with a timeout in case the API host is MIA. + // The connection should normally be very fast. + conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) + if err != nil { + limitRelease() + return nil, err + } + lc := &limitConn{Conn: conn} + runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required + return lc, nil +} + +type limitConn struct { + close sync.Once + net.Conn +} + +func (lc *limitConn) Close() error { + defer lc.close.Do(func() { + limitRelease() + runtime.SetFinalizer(lc, nil) + }) + return lc.Conn.Close() +} diff --git a/vendor/google.golang.org/appengine/internal/net_test.go b/vendor/google.golang.org/appengine/internal/net_test.go new file mode 100644 index 0000000..24da8bb --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/net_test.go @@ -0,0 +1,58 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "sync" + "testing" + "time" + + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" +) + +func TestDialLimit(t *testing.T) { + // Fill up semaphore with false acquisitions to permit only two TCP connections at a time. + // We don't replace limitSem because that results in a data race when net/http lazily closes connections. + nFake := cap(limitSem) - 2 + for i := 0; i < nFake; i++ { + limitSem <- 1 + } + defer func() { + for i := 0; i < nFake; i++ { + <-limitSem + } + }() + + f, c, cleanup := setup() // setup is in api_test.go + defer cleanup() + f.hang = make(chan int) + + // If we make two RunSlowly RPCs (which will wait for f.hang to be strobed), + // then the simple Non200 RPC should hang. + var wg sync.WaitGroup + wg.Add(2) + for i := 0; i < 2; i++ { + go func() { + defer wg.Done() + Call(toContext(c), "errors", "RunSlowly", &basepb.VoidProto{}, &basepb.VoidProto{}) + }() + } + time.Sleep(50 * time.Millisecond) // let those two RPCs start + + ctx, _ := netcontext.WithTimeout(toContext(c), 50*time.Millisecond) + err := Call(ctx, "errors", "Non200", &basepb.VoidProto{}, &basepb.VoidProto{}) + if err != errTimeout { + t.Errorf("Non200 RPC returned with err %v, want errTimeout", err) + } + + // Drain the two RunSlowly calls. + f.hang <- 1 + f.hang <- 1 + wg.Wait() +} diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh new file mode 100755 index 0000000..2fdb546 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/regen.sh @@ -0,0 +1,40 @@ +#!/bin/bash -e +# +# This script rebuilds the generated code for the protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. + +PKG=google.golang.org/appengine + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd $base + +# Run protoc once per package. +for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do + echo 1>&2 "* $dir" + protoc --go_out=. $dir/*.proto +done + +for f in $(find $PKG/internal -name '*.pb.go'); do + # Remove proto.RegisterEnum calls. + # These cause duplicate registration panics when these packages + # are used on classic App Engine. proto.RegisterEnum only affects + # parsing the text format; we don't care about that. + # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 + sed -i '/proto.RegisterEnum/d' $f +done diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go new file mode 100644 index 0000000..526bd39 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go @@ -0,0 +1,231 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/remote_api/remote_api.proto +// DO NOT EDIT! + +/* +Package remote_api is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/remote_api/remote_api.proto + +It has these top-level messages: + Request + ApplicationError + RpcError + Response +*/ +package remote_api + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type RpcError_ErrorCode int32 + +const ( + RpcError_UNKNOWN RpcError_ErrorCode = 0 + RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 + RpcError_PARSE_ERROR RpcError_ErrorCode = 2 + RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 + RpcError_OVER_QUOTA RpcError_ErrorCode = 4 + RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 + RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 + RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 + RpcError_BAD_REQUEST RpcError_ErrorCode = 8 + RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 + RpcError_CANCELLED RpcError_ErrorCode = 10 + RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 + RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 +) + +var RpcError_ErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CALL_NOT_FOUND", + 2: "PARSE_ERROR", + 3: "SECURITY_VIOLATION", + 4: "OVER_QUOTA", + 5: "REQUEST_TOO_LARGE", + 6: "CAPABILITY_DISABLED", + 7: "FEATURE_DISABLED", + 8: "BAD_REQUEST", + 9: "RESPONSE_TOO_LARGE", + 10: "CANCELLED", + 11: "REPLAY_ERROR", + 12: "DEADLINE_EXCEEDED", +} +var RpcError_ErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "CALL_NOT_FOUND": 1, + "PARSE_ERROR": 2, + "SECURITY_VIOLATION": 3, + "OVER_QUOTA": 4, + "REQUEST_TOO_LARGE": 5, + "CAPABILITY_DISABLED": 6, + "FEATURE_DISABLED": 7, + "BAD_REQUEST": 8, + "RESPONSE_TOO_LARGE": 9, + "CANCELLED": 10, + "REPLAY_ERROR": 11, + "DEADLINE_EXCEEDED": 12, +} + +func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { + p := new(RpcError_ErrorCode) + *p = x + return p +} +func (x RpcError_ErrorCode) String() string { + return proto.EnumName(RpcError_ErrorCode_name, int32(x)) +} +func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") + if err != nil { + return err + } + *x = RpcError_ErrorCode(value) + return nil +} + +type Request struct { + ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"` + Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` + Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` + RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} + +func (m *Request) GetServiceName() string { + if m != nil && m.ServiceName != nil { + return *m.ServiceName + } + return "" +} + +func (m *Request) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *Request) GetRequest() []byte { + if m != nil { + return m.Request + } + return nil +} + +func (m *Request) GetRequestId() string { + if m != nil && m.RequestId != nil { + return *m.RequestId + } + return "" +} + +type ApplicationError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ApplicationError) Reset() { *m = ApplicationError{} } +func (m *ApplicationError) String() string { return proto.CompactTextString(m) } +func (*ApplicationError) ProtoMessage() {} + +func (m *ApplicationError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *ApplicationError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type RpcError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RpcError) Reset() { *m = RpcError{} } +func (m *RpcError) String() string { return proto.CompactTextString(m) } +func (*RpcError) ProtoMessage() {} + +func (m *RpcError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *RpcError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type Response struct { + Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` + Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` + ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"` + JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"` + RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} + +func (m *Response) GetResponse() []byte { + if m != nil { + return m.Response + } + return nil +} + +func (m *Response) GetException() []byte { + if m != nil { + return m.Exception + } + return nil +} + +func (m *Response) GetApplicationError() *ApplicationError { + if m != nil { + return m.ApplicationError + } + return nil +} + +func (m *Response) GetJavaException() []byte { + if m != nil { + return m.JavaException + } + return nil +} + +func (m *Response) GetRpcError() *RpcError { + if m != nil { + return m.RpcError + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto new file mode 100644 index 0000000..f21763a --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto @@ -0,0 +1,44 @@ +syntax = "proto2"; +option go_package = "remote_api"; + +package remote_api; + +message Request { + required string service_name = 2; + required string method = 3; + required bytes request = 4; + optional string request_id = 5; +} + +message ApplicationError { + required int32 code = 1; + required string detail = 2; +} + +message RpcError { + enum ErrorCode { + UNKNOWN = 0; + CALL_NOT_FOUND = 1; + PARSE_ERROR = 2; + SECURITY_VIOLATION = 3; + OVER_QUOTA = 4; + REQUEST_TOO_LARGE = 5; + CAPABILITY_DISABLED = 6; + FEATURE_DISABLED = 7; + BAD_REQUEST = 8; + RESPONSE_TOO_LARGE = 9; + CANCELLED = 10; + REPLAY_ERROR = 11; + DEADLINE_EXCEEDED = 12; + } + required int32 code = 1; + optional string detail = 2; +} + +message Response { + optional bytes response = 1; + optional bytes exception = 2; + optional ApplicationError application_error = 3; + optional bytes java_exception = 4; + optional RpcError rpc_error = 5; +} diff --git a/vendor/google.golang.org/appengine/internal/search/search.pb.go b/vendor/google.golang.org/appengine/internal/search/search.pb.go new file mode 100644 index 0000000..7d8d11d --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/search/search.pb.go @@ -0,0 +1,2127 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/search/search.proto +// DO NOT EDIT! + +/* +Package search is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/search/search.proto + +It has these top-level messages: + Scope + Entry + AccessControlList + FieldValue + Field + FieldTypes + IndexShardSettings + FacetValue + Facet + DocumentMetadata + Document + SearchServiceError + RequestStatus + IndexSpec + IndexMetadata + IndexDocumentParams + IndexDocumentRequest + IndexDocumentResponse + DeleteDocumentParams + DeleteDocumentRequest + DeleteDocumentResponse + ListDocumentsParams + ListDocumentsRequest + ListDocumentsResponse + ListIndexesParams + ListIndexesRequest + ListIndexesResponse + DeleteSchemaParams + DeleteSchemaRequest + DeleteSchemaResponse + SortSpec + ScorerSpec + FieldSpec + FacetRange + FacetRequestParam + FacetAutoDetectParam + FacetRequest + FacetRefinement + SearchParams + SearchRequest + FacetResultValue + FacetResult + SearchResult + SearchResponse +*/ +package search + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Scope_Type int32 + +const ( + Scope_USER_BY_CANONICAL_ID Scope_Type = 1 + Scope_USER_BY_EMAIL Scope_Type = 2 + Scope_GROUP_BY_CANONICAL_ID Scope_Type = 3 + Scope_GROUP_BY_EMAIL Scope_Type = 4 + Scope_GROUP_BY_DOMAIN Scope_Type = 5 + Scope_ALL_USERS Scope_Type = 6 + Scope_ALL_AUTHENTICATED_USERS Scope_Type = 7 +) + +var Scope_Type_name = map[int32]string{ + 1: "USER_BY_CANONICAL_ID", + 2: "USER_BY_EMAIL", + 3: "GROUP_BY_CANONICAL_ID", + 4: "GROUP_BY_EMAIL", + 5: "GROUP_BY_DOMAIN", + 6: "ALL_USERS", + 7: "ALL_AUTHENTICATED_USERS", +} +var Scope_Type_value = map[string]int32{ + "USER_BY_CANONICAL_ID": 1, + "USER_BY_EMAIL": 2, + "GROUP_BY_CANONICAL_ID": 3, + "GROUP_BY_EMAIL": 4, + "GROUP_BY_DOMAIN": 5, + "ALL_USERS": 6, + "ALL_AUTHENTICATED_USERS": 7, +} + +func (x Scope_Type) Enum() *Scope_Type { + p := new(Scope_Type) + *p = x + return p +} +func (x Scope_Type) String() string { + return proto.EnumName(Scope_Type_name, int32(x)) +} +func (x *Scope_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Scope_Type_value, data, "Scope_Type") + if err != nil { + return err + } + *x = Scope_Type(value) + return nil +} + +type Entry_Permission int32 + +const ( + Entry_READ Entry_Permission = 1 + Entry_WRITE Entry_Permission = 2 + Entry_FULL_CONTROL Entry_Permission = 3 +) + +var Entry_Permission_name = map[int32]string{ + 1: "READ", + 2: "WRITE", + 3: "FULL_CONTROL", +} +var Entry_Permission_value = map[string]int32{ + "READ": 1, + "WRITE": 2, + "FULL_CONTROL": 3, +} + +func (x Entry_Permission) Enum() *Entry_Permission { + p := new(Entry_Permission) + *p = x + return p +} +func (x Entry_Permission) String() string { + return proto.EnumName(Entry_Permission_name, int32(x)) +} +func (x *Entry_Permission) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Entry_Permission_value, data, "Entry_Permission") + if err != nil { + return err + } + *x = Entry_Permission(value) + return nil +} + +type FieldValue_ContentType int32 + +const ( + FieldValue_TEXT FieldValue_ContentType = 0 + FieldValue_HTML FieldValue_ContentType = 1 + FieldValue_ATOM FieldValue_ContentType = 2 + FieldValue_DATE FieldValue_ContentType = 3 + FieldValue_NUMBER FieldValue_ContentType = 4 + FieldValue_GEO FieldValue_ContentType = 5 +) + +var FieldValue_ContentType_name = map[int32]string{ + 0: "TEXT", + 1: "HTML", + 2: "ATOM", + 3: "DATE", + 4: "NUMBER", + 5: "GEO", +} +var FieldValue_ContentType_value = map[string]int32{ + "TEXT": 0, + "HTML": 1, + "ATOM": 2, + "DATE": 3, + "NUMBER": 4, + "GEO": 5, +} + +func (x FieldValue_ContentType) Enum() *FieldValue_ContentType { + p := new(FieldValue_ContentType) + *p = x + return p +} +func (x FieldValue_ContentType) String() string { + return proto.EnumName(FieldValue_ContentType_name, int32(x)) +} +func (x *FieldValue_ContentType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldValue_ContentType_value, data, "FieldValue_ContentType") + if err != nil { + return err + } + *x = FieldValue_ContentType(value) + return nil +} + +type FacetValue_ContentType int32 + +const ( + FacetValue_ATOM FacetValue_ContentType = 2 + FacetValue_NUMBER FacetValue_ContentType = 4 +) + +var FacetValue_ContentType_name = map[int32]string{ + 2: "ATOM", + 4: "NUMBER", +} +var FacetValue_ContentType_value = map[string]int32{ + "ATOM": 2, + "NUMBER": 4, +} + +func (x FacetValue_ContentType) Enum() *FacetValue_ContentType { + p := new(FacetValue_ContentType) + *p = x + return p +} +func (x FacetValue_ContentType) String() string { + return proto.EnumName(FacetValue_ContentType_name, int32(x)) +} +func (x *FacetValue_ContentType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FacetValue_ContentType_value, data, "FacetValue_ContentType") + if err != nil { + return err + } + *x = FacetValue_ContentType(value) + return nil +} + +type Document_Storage int32 + +const ( + Document_DISK Document_Storage = 0 +) + +var Document_Storage_name = map[int32]string{ + 0: "DISK", +} +var Document_Storage_value = map[string]int32{ + "DISK": 0, +} + +func (x Document_Storage) Enum() *Document_Storage { + p := new(Document_Storage) + *p = x + return p +} +func (x Document_Storage) String() string { + return proto.EnumName(Document_Storage_name, int32(x)) +} +func (x *Document_Storage) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Document_Storage_value, data, "Document_Storage") + if err != nil { + return err + } + *x = Document_Storage(value) + return nil +} + +type SearchServiceError_ErrorCode int32 + +const ( + SearchServiceError_OK SearchServiceError_ErrorCode = 0 + SearchServiceError_INVALID_REQUEST SearchServiceError_ErrorCode = 1 + SearchServiceError_TRANSIENT_ERROR SearchServiceError_ErrorCode = 2 + SearchServiceError_INTERNAL_ERROR SearchServiceError_ErrorCode = 3 + SearchServiceError_PERMISSION_DENIED SearchServiceError_ErrorCode = 4 + SearchServiceError_TIMEOUT SearchServiceError_ErrorCode = 5 + SearchServiceError_CONCURRENT_TRANSACTION SearchServiceError_ErrorCode = 6 +) + +var SearchServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_REQUEST", + 2: "TRANSIENT_ERROR", + 3: "INTERNAL_ERROR", + 4: "PERMISSION_DENIED", + 5: "TIMEOUT", + 6: "CONCURRENT_TRANSACTION", +} +var SearchServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_REQUEST": 1, + "TRANSIENT_ERROR": 2, + "INTERNAL_ERROR": 3, + "PERMISSION_DENIED": 4, + "TIMEOUT": 5, + "CONCURRENT_TRANSACTION": 6, +} + +func (x SearchServiceError_ErrorCode) Enum() *SearchServiceError_ErrorCode { + p := new(SearchServiceError_ErrorCode) + *p = x + return p +} +func (x SearchServiceError_ErrorCode) String() string { + return proto.EnumName(SearchServiceError_ErrorCode_name, int32(x)) +} +func (x *SearchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SearchServiceError_ErrorCode_value, data, "SearchServiceError_ErrorCode") + if err != nil { + return err + } + *x = SearchServiceError_ErrorCode(value) + return nil +} + +type IndexSpec_Consistency int32 + +const ( + IndexSpec_GLOBAL IndexSpec_Consistency = 0 + IndexSpec_PER_DOCUMENT IndexSpec_Consistency = 1 +) + +var IndexSpec_Consistency_name = map[int32]string{ + 0: "GLOBAL", + 1: "PER_DOCUMENT", +} +var IndexSpec_Consistency_value = map[string]int32{ + "GLOBAL": 0, + "PER_DOCUMENT": 1, +} + +func (x IndexSpec_Consistency) Enum() *IndexSpec_Consistency { + p := new(IndexSpec_Consistency) + *p = x + return p +} +func (x IndexSpec_Consistency) String() string { + return proto.EnumName(IndexSpec_Consistency_name, int32(x)) +} +func (x *IndexSpec_Consistency) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexSpec_Consistency_value, data, "IndexSpec_Consistency") + if err != nil { + return err + } + *x = IndexSpec_Consistency(value) + return nil +} + +type IndexSpec_Source int32 + +const ( + IndexSpec_SEARCH IndexSpec_Source = 0 + IndexSpec_DATASTORE IndexSpec_Source = 1 + IndexSpec_CLOUD_STORAGE IndexSpec_Source = 2 +) + +var IndexSpec_Source_name = map[int32]string{ + 0: "SEARCH", + 1: "DATASTORE", + 2: "CLOUD_STORAGE", +} +var IndexSpec_Source_value = map[string]int32{ + "SEARCH": 0, + "DATASTORE": 1, + "CLOUD_STORAGE": 2, +} + +func (x IndexSpec_Source) Enum() *IndexSpec_Source { + p := new(IndexSpec_Source) + *p = x + return p +} +func (x IndexSpec_Source) String() string { + return proto.EnumName(IndexSpec_Source_name, int32(x)) +} +func (x *IndexSpec_Source) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexSpec_Source_value, data, "IndexSpec_Source") + if err != nil { + return err + } + *x = IndexSpec_Source(value) + return nil +} + +type IndexSpec_Mode int32 + +const ( + IndexSpec_PRIORITY IndexSpec_Mode = 0 + IndexSpec_BACKGROUND IndexSpec_Mode = 1 +) + +var IndexSpec_Mode_name = map[int32]string{ + 0: "PRIORITY", + 1: "BACKGROUND", +} +var IndexSpec_Mode_value = map[string]int32{ + "PRIORITY": 0, + "BACKGROUND": 1, +} + +func (x IndexSpec_Mode) Enum() *IndexSpec_Mode { + p := new(IndexSpec_Mode) + *p = x + return p +} +func (x IndexSpec_Mode) String() string { + return proto.EnumName(IndexSpec_Mode_name, int32(x)) +} +func (x *IndexSpec_Mode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexSpec_Mode_value, data, "IndexSpec_Mode") + if err != nil { + return err + } + *x = IndexSpec_Mode(value) + return nil +} + +type IndexDocumentParams_Freshness int32 + +const ( + IndexDocumentParams_SYNCHRONOUSLY IndexDocumentParams_Freshness = 0 + IndexDocumentParams_WHEN_CONVENIENT IndexDocumentParams_Freshness = 1 +) + +var IndexDocumentParams_Freshness_name = map[int32]string{ + 0: "SYNCHRONOUSLY", + 1: "WHEN_CONVENIENT", +} +var IndexDocumentParams_Freshness_value = map[string]int32{ + "SYNCHRONOUSLY": 0, + "WHEN_CONVENIENT": 1, +} + +func (x IndexDocumentParams_Freshness) Enum() *IndexDocumentParams_Freshness { + p := new(IndexDocumentParams_Freshness) + *p = x + return p +} +func (x IndexDocumentParams_Freshness) String() string { + return proto.EnumName(IndexDocumentParams_Freshness_name, int32(x)) +} +func (x *IndexDocumentParams_Freshness) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexDocumentParams_Freshness_value, data, "IndexDocumentParams_Freshness") + if err != nil { + return err + } + *x = IndexDocumentParams_Freshness(value) + return nil +} + +type ScorerSpec_Scorer int32 + +const ( + ScorerSpec_RESCORING_MATCH_SCORER ScorerSpec_Scorer = 0 + ScorerSpec_MATCH_SCORER ScorerSpec_Scorer = 2 +) + +var ScorerSpec_Scorer_name = map[int32]string{ + 0: "RESCORING_MATCH_SCORER", + 2: "MATCH_SCORER", +} +var ScorerSpec_Scorer_value = map[string]int32{ + "RESCORING_MATCH_SCORER": 0, + "MATCH_SCORER": 2, +} + +func (x ScorerSpec_Scorer) Enum() *ScorerSpec_Scorer { + p := new(ScorerSpec_Scorer) + *p = x + return p +} +func (x ScorerSpec_Scorer) String() string { + return proto.EnumName(ScorerSpec_Scorer_name, int32(x)) +} +func (x *ScorerSpec_Scorer) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ScorerSpec_Scorer_value, data, "ScorerSpec_Scorer") + if err != nil { + return err + } + *x = ScorerSpec_Scorer(value) + return nil +} + +type SearchParams_CursorType int32 + +const ( + SearchParams_NONE SearchParams_CursorType = 0 + SearchParams_SINGLE SearchParams_CursorType = 1 + SearchParams_PER_RESULT SearchParams_CursorType = 2 +) + +var SearchParams_CursorType_name = map[int32]string{ + 0: "NONE", + 1: "SINGLE", + 2: "PER_RESULT", +} +var SearchParams_CursorType_value = map[string]int32{ + "NONE": 0, + "SINGLE": 1, + "PER_RESULT": 2, +} + +func (x SearchParams_CursorType) Enum() *SearchParams_CursorType { + p := new(SearchParams_CursorType) + *p = x + return p +} +func (x SearchParams_CursorType) String() string { + return proto.EnumName(SearchParams_CursorType_name, int32(x)) +} +func (x *SearchParams_CursorType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SearchParams_CursorType_value, data, "SearchParams_CursorType") + if err != nil { + return err + } + *x = SearchParams_CursorType(value) + return nil +} + +type SearchParams_ParsingMode int32 + +const ( + SearchParams_STRICT SearchParams_ParsingMode = 0 + SearchParams_RELAXED SearchParams_ParsingMode = 1 +) + +var SearchParams_ParsingMode_name = map[int32]string{ + 0: "STRICT", + 1: "RELAXED", +} +var SearchParams_ParsingMode_value = map[string]int32{ + "STRICT": 0, + "RELAXED": 1, +} + +func (x SearchParams_ParsingMode) Enum() *SearchParams_ParsingMode { + p := new(SearchParams_ParsingMode) + *p = x + return p +} +func (x SearchParams_ParsingMode) String() string { + return proto.EnumName(SearchParams_ParsingMode_name, int32(x)) +} +func (x *SearchParams_ParsingMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SearchParams_ParsingMode_value, data, "SearchParams_ParsingMode") + if err != nil { + return err + } + *x = SearchParams_ParsingMode(value) + return nil +} + +type Scope struct { + Type *Scope_Type `protobuf:"varint,1,opt,name=type,enum=search.Scope_Type" json:"type,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Scope) Reset() { *m = Scope{} } +func (m *Scope) String() string { return proto.CompactTextString(m) } +func (*Scope) ProtoMessage() {} + +func (m *Scope) GetType() Scope_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return Scope_USER_BY_CANONICAL_ID +} + +func (m *Scope) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Entry struct { + Scope *Scope `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"` + Permission *Entry_Permission `protobuf:"varint,2,opt,name=permission,enum=search.Entry_Permission" json:"permission,omitempty"` + DisplayName *string `protobuf:"bytes,3,opt,name=display_name" json:"display_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} + +func (m *Entry) GetScope() *Scope { + if m != nil { + return m.Scope + } + return nil +} + +func (m *Entry) GetPermission() Entry_Permission { + if m != nil && m.Permission != nil { + return *m.Permission + } + return Entry_READ +} + +func (m *Entry) GetDisplayName() string { + if m != nil && m.DisplayName != nil { + return *m.DisplayName + } + return "" +} + +type AccessControlList struct { + Owner *string `protobuf:"bytes,1,opt,name=owner" json:"owner,omitempty"` + Entries []*Entry `protobuf:"bytes,2,rep,name=entries" json:"entries,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AccessControlList) Reset() { *m = AccessControlList{} } +func (m *AccessControlList) String() string { return proto.CompactTextString(m) } +func (*AccessControlList) ProtoMessage() {} + +func (m *AccessControlList) GetOwner() string { + if m != nil && m.Owner != nil { + return *m.Owner + } + return "" +} + +func (m *AccessControlList) GetEntries() []*Entry { + if m != nil { + return m.Entries + } + return nil +} + +type FieldValue struct { + Type *FieldValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FieldValue_ContentType,def=0" json:"type,omitempty"` + Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"` + StringValue *string `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"` + Geo *FieldValue_Geo `protobuf:"group,4,opt,name=Geo" json:"geo,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldValue) Reset() { *m = FieldValue{} } +func (m *FieldValue) String() string { return proto.CompactTextString(m) } +func (*FieldValue) ProtoMessage() {} + +const Default_FieldValue_Type FieldValue_ContentType = FieldValue_TEXT +const Default_FieldValue_Language string = "en" + +func (m *FieldValue) GetType() FieldValue_ContentType { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_FieldValue_Type +} + +func (m *FieldValue) GetLanguage() string { + if m != nil && m.Language != nil { + return *m.Language + } + return Default_FieldValue_Language +} + +func (m *FieldValue) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *FieldValue) GetGeo() *FieldValue_Geo { + if m != nil { + return m.Geo + } + return nil +} + +type FieldValue_Geo struct { + Lat *float64 `protobuf:"fixed64,5,req,name=lat" json:"lat,omitempty"` + Lng *float64 `protobuf:"fixed64,6,req,name=lng" json:"lng,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldValue_Geo) Reset() { *m = FieldValue_Geo{} } +func (m *FieldValue_Geo) String() string { return proto.CompactTextString(m) } +func (*FieldValue_Geo) ProtoMessage() {} + +func (m *FieldValue_Geo) GetLat() float64 { + if m != nil && m.Lat != nil { + return *m.Lat + } + return 0 +} + +func (m *FieldValue_Geo) GetLng() float64 { + if m != nil && m.Lng != nil { + return *m.Lng + } + return 0 +} + +type Field struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value *FieldValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (m *Field) String() string { return proto.CompactTextString(m) } +func (*Field) ProtoMessage() {} + +func (m *Field) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Field) GetValue() *FieldValue { + if m != nil { + return m.Value + } + return nil +} + +type FieldTypes struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Type []FieldValue_ContentType `protobuf:"varint,2,rep,name=type,enum=search.FieldValue_ContentType" json:"type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldTypes) Reset() { *m = FieldTypes{} } +func (m *FieldTypes) String() string { return proto.CompactTextString(m) } +func (*FieldTypes) ProtoMessage() {} + +func (m *FieldTypes) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldTypes) GetType() []FieldValue_ContentType { + if m != nil { + return m.Type + } + return nil +} + +type IndexShardSettings struct { + PrevNumShards []int32 `protobuf:"varint,1,rep,name=prev_num_shards" json:"prev_num_shards,omitempty"` + NumShards *int32 `protobuf:"varint,2,req,name=num_shards,def=1" json:"num_shards,omitempty"` + PrevNumShardsSearchFalse []int32 `protobuf:"varint,3,rep,name=prev_num_shards_search_false" json:"prev_num_shards_search_false,omitempty"` + LocalReplica *string `protobuf:"bytes,4,opt,name=local_replica,def=" json:"local_replica,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexShardSettings) Reset() { *m = IndexShardSettings{} } +func (m *IndexShardSettings) String() string { return proto.CompactTextString(m) } +func (*IndexShardSettings) ProtoMessage() {} + +const Default_IndexShardSettings_NumShards int32 = 1 + +func (m *IndexShardSettings) GetPrevNumShards() []int32 { + if m != nil { + return m.PrevNumShards + } + return nil +} + +func (m *IndexShardSettings) GetNumShards() int32 { + if m != nil && m.NumShards != nil { + return *m.NumShards + } + return Default_IndexShardSettings_NumShards +} + +func (m *IndexShardSettings) GetPrevNumShardsSearchFalse() []int32 { + if m != nil { + return m.PrevNumShardsSearchFalse + } + return nil +} + +func (m *IndexShardSettings) GetLocalReplica() string { + if m != nil && m.LocalReplica != nil { + return *m.LocalReplica + } + return "" +} + +type FacetValue struct { + Type *FacetValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FacetValue_ContentType,def=2" json:"type,omitempty"` + StringValue *string `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetValue) Reset() { *m = FacetValue{} } +func (m *FacetValue) String() string { return proto.CompactTextString(m) } +func (*FacetValue) ProtoMessage() {} + +const Default_FacetValue_Type FacetValue_ContentType = FacetValue_ATOM + +func (m *FacetValue) GetType() FacetValue_ContentType { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_FacetValue_Type +} + +func (m *FacetValue) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +type Facet struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value *FacetValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Facet) Reset() { *m = Facet{} } +func (m *Facet) String() string { return proto.CompactTextString(m) } +func (*Facet) ProtoMessage() {} + +func (m *Facet) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Facet) GetValue() *FacetValue { + if m != nil { + return m.Value + } + return nil +} + +type DocumentMetadata struct { + Version *int64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` + CommittedStVersion *int64 `protobuf:"varint,2,opt,name=committed_st_version" json:"committed_st_version,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DocumentMetadata) Reset() { *m = DocumentMetadata{} } +func (m *DocumentMetadata) String() string { return proto.CompactTextString(m) } +func (*DocumentMetadata) ProtoMessage() {} + +func (m *DocumentMetadata) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func (m *DocumentMetadata) GetCommittedStVersion() int64 { + if m != nil && m.CommittedStVersion != nil { + return *m.CommittedStVersion + } + return 0 +} + +type Document struct { + Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"` + Field []*Field `protobuf:"bytes,3,rep,name=field" json:"field,omitempty"` + OrderId *int32 `protobuf:"varint,4,opt,name=order_id" json:"order_id,omitempty"` + Storage *Document_Storage `protobuf:"varint,5,opt,name=storage,enum=search.Document_Storage,def=0" json:"storage,omitempty"` + Facet []*Facet `protobuf:"bytes,8,rep,name=facet" json:"facet,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} + +const Default_Document_Language string = "en" +const Default_Document_Storage Document_Storage = Document_DISK + +func (m *Document) GetId() string { + if m != nil && m.Id != nil { + return *m.Id + } + return "" +} + +func (m *Document) GetLanguage() string { + if m != nil && m.Language != nil { + return *m.Language + } + return Default_Document_Language +} + +func (m *Document) GetField() []*Field { + if m != nil { + return m.Field + } + return nil +} + +func (m *Document) GetOrderId() int32 { + if m != nil && m.OrderId != nil { + return *m.OrderId + } + return 0 +} + +func (m *Document) GetStorage() Document_Storage { + if m != nil && m.Storage != nil { + return *m.Storage + } + return Default_Document_Storage +} + +func (m *Document) GetFacet() []*Facet { + if m != nil { + return m.Facet + } + return nil +} + +type SearchServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchServiceError) Reset() { *m = SearchServiceError{} } +func (m *SearchServiceError) String() string { return proto.CompactTextString(m) } +func (*SearchServiceError) ProtoMessage() {} + +type RequestStatus struct { + Code *SearchServiceError_ErrorCode `protobuf:"varint,1,req,name=code,enum=search.SearchServiceError_ErrorCode" json:"code,omitempty"` + ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"` + CanonicalCode *int32 `protobuf:"varint,3,opt,name=canonical_code" json:"canonical_code,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RequestStatus) Reset() { *m = RequestStatus{} } +func (m *RequestStatus) String() string { return proto.CompactTextString(m) } +func (*RequestStatus) ProtoMessage() {} + +func (m *RequestStatus) GetCode() SearchServiceError_ErrorCode { + if m != nil && m.Code != nil { + return *m.Code + } + return SearchServiceError_OK +} + +func (m *RequestStatus) GetErrorDetail() string { + if m != nil && m.ErrorDetail != nil { + return *m.ErrorDetail + } + return "" +} + +func (m *RequestStatus) GetCanonicalCode() int32 { + if m != nil && m.CanonicalCode != nil { + return *m.CanonicalCode + } + return 0 +} + +type IndexSpec struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Consistency *IndexSpec_Consistency `protobuf:"varint,2,opt,name=consistency,enum=search.IndexSpec_Consistency,def=1" json:"consistency,omitempty"` + Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` + Version *int32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"` + Source *IndexSpec_Source `protobuf:"varint,5,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"` + Mode *IndexSpec_Mode `protobuf:"varint,6,opt,name=mode,enum=search.IndexSpec_Mode,def=0" json:"mode,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexSpec) Reset() { *m = IndexSpec{} } +func (m *IndexSpec) String() string { return proto.CompactTextString(m) } +func (*IndexSpec) ProtoMessage() {} + +const Default_IndexSpec_Consistency IndexSpec_Consistency = IndexSpec_PER_DOCUMENT +const Default_IndexSpec_Source IndexSpec_Source = IndexSpec_SEARCH +const Default_IndexSpec_Mode IndexSpec_Mode = IndexSpec_PRIORITY + +func (m *IndexSpec) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *IndexSpec) GetConsistency() IndexSpec_Consistency { + if m != nil && m.Consistency != nil { + return *m.Consistency + } + return Default_IndexSpec_Consistency +} + +func (m *IndexSpec) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + +func (m *IndexSpec) GetVersion() int32 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func (m *IndexSpec) GetSource() IndexSpec_Source { + if m != nil && m.Source != nil { + return *m.Source + } + return Default_IndexSpec_Source +} + +func (m *IndexSpec) GetMode() IndexSpec_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_IndexSpec_Mode +} + +type IndexMetadata struct { + IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"` + Field []*FieldTypes `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Storage *IndexMetadata_Storage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexMetadata) Reset() { *m = IndexMetadata{} } +func (m *IndexMetadata) String() string { return proto.CompactTextString(m) } +func (*IndexMetadata) ProtoMessage() {} + +func (m *IndexMetadata) GetIndexSpec() *IndexSpec { + if m != nil { + return m.IndexSpec + } + return nil +} + +func (m *IndexMetadata) GetField() []*FieldTypes { + if m != nil { + return m.Field + } + return nil +} + +func (m *IndexMetadata) GetStorage() *IndexMetadata_Storage { + if m != nil { + return m.Storage + } + return nil +} + +type IndexMetadata_Storage struct { + AmountUsed *int64 `protobuf:"varint,1,opt,name=amount_used" json:"amount_used,omitempty"` + Limit *int64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexMetadata_Storage) Reset() { *m = IndexMetadata_Storage{} } +func (m *IndexMetadata_Storage) String() string { return proto.CompactTextString(m) } +func (*IndexMetadata_Storage) ProtoMessage() {} + +func (m *IndexMetadata_Storage) GetAmountUsed() int64 { + if m != nil && m.AmountUsed != nil { + return *m.AmountUsed + } + return 0 +} + +func (m *IndexMetadata_Storage) GetLimit() int64 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +type IndexDocumentParams struct { + Document []*Document `protobuf:"bytes,1,rep,name=document" json:"document,omitempty"` + Freshness *IndexDocumentParams_Freshness `protobuf:"varint,2,opt,name=freshness,enum=search.IndexDocumentParams_Freshness,def=0" json:"freshness,omitempty"` + IndexSpec *IndexSpec `protobuf:"bytes,3,req,name=index_spec" json:"index_spec,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexDocumentParams) Reset() { *m = IndexDocumentParams{} } +func (m *IndexDocumentParams) String() string { return proto.CompactTextString(m) } +func (*IndexDocumentParams) ProtoMessage() {} + +const Default_IndexDocumentParams_Freshness IndexDocumentParams_Freshness = IndexDocumentParams_SYNCHRONOUSLY + +func (m *IndexDocumentParams) GetDocument() []*Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *IndexDocumentParams) GetFreshness() IndexDocumentParams_Freshness { + if m != nil && m.Freshness != nil { + return *m.Freshness + } + return Default_IndexDocumentParams_Freshness +} + +func (m *IndexDocumentParams) GetIndexSpec() *IndexSpec { + if m != nil { + return m.IndexSpec + } + return nil +} + +type IndexDocumentRequest struct { + Params *IndexDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` + AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } +func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } +func (*IndexDocumentRequest) ProtoMessage() {} + +func (m *IndexDocumentRequest) GetParams() *IndexDocumentParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *IndexDocumentRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type IndexDocumentResponse struct { + Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"` + DocId []string `protobuf:"bytes,2,rep,name=doc_id" json:"doc_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } +func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } +func (*IndexDocumentResponse) ProtoMessage() {} + +func (m *IndexDocumentResponse) GetStatus() []*RequestStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *IndexDocumentResponse) GetDocId() []string { + if m != nil { + return m.DocId + } + return nil +} + +type DeleteDocumentParams struct { + DocId []string `protobuf:"bytes,1,rep,name=doc_id" json:"doc_id,omitempty"` + IndexSpec *IndexSpec `protobuf:"bytes,2,req,name=index_spec" json:"index_spec,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteDocumentParams) Reset() { *m = DeleteDocumentParams{} } +func (m *DeleteDocumentParams) String() string { return proto.CompactTextString(m) } +func (*DeleteDocumentParams) ProtoMessage() {} + +func (m *DeleteDocumentParams) GetDocId() []string { + if m != nil { + return m.DocId + } + return nil +} + +func (m *DeleteDocumentParams) GetIndexSpec() *IndexSpec { + if m != nil { + return m.IndexSpec + } + return nil +} + +type DeleteDocumentRequest struct { + Params *DeleteDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` + AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } +func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDocumentRequest) ProtoMessage() {} + +func (m *DeleteDocumentRequest) GetParams() *DeleteDocumentParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *DeleteDocumentRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type DeleteDocumentResponse struct { + Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} } +func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteDocumentResponse) ProtoMessage() {} + +func (m *DeleteDocumentResponse) GetStatus() []*RequestStatus { + if m != nil { + return m.Status + } + return nil +} + +type ListDocumentsParams struct { + IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"` + StartDocId *string `protobuf:"bytes,2,opt,name=start_doc_id" json:"start_doc_id,omitempty"` + IncludeStartDoc *bool `protobuf:"varint,3,opt,name=include_start_doc,def=1" json:"include_start_doc,omitempty"` + Limit *int32 `protobuf:"varint,4,opt,name=limit,def=100" json:"limit,omitempty"` + KeysOnly *bool `protobuf:"varint,5,opt,name=keys_only" json:"keys_only,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListDocumentsParams) Reset() { *m = ListDocumentsParams{} } +func (m *ListDocumentsParams) String() string { return proto.CompactTextString(m) } +func (*ListDocumentsParams) ProtoMessage() {} + +const Default_ListDocumentsParams_IncludeStartDoc bool = true +const Default_ListDocumentsParams_Limit int32 = 100 + +func (m *ListDocumentsParams) GetIndexSpec() *IndexSpec { + if m != nil { + return m.IndexSpec + } + return nil +} + +func (m *ListDocumentsParams) GetStartDocId() string { + if m != nil && m.StartDocId != nil { + return *m.StartDocId + } + return "" +} + +func (m *ListDocumentsParams) GetIncludeStartDoc() bool { + if m != nil && m.IncludeStartDoc != nil { + return *m.IncludeStartDoc + } + return Default_ListDocumentsParams_IncludeStartDoc +} + +func (m *ListDocumentsParams) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return Default_ListDocumentsParams_Limit +} + +func (m *ListDocumentsParams) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +type ListDocumentsRequest struct { + Params *ListDocumentsParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` + AppId []byte `protobuf:"bytes,2,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListDocumentsRequest) Reset() { *m = ListDocumentsRequest{} } +func (m *ListDocumentsRequest) String() string { return proto.CompactTextString(m) } +func (*ListDocumentsRequest) ProtoMessage() {} + +func (m *ListDocumentsRequest) GetParams() *ListDocumentsParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *ListDocumentsRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type ListDocumentsResponse struct { + Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` + Document []*Document `protobuf:"bytes,2,rep,name=document" json:"document,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListDocumentsResponse) Reset() { *m = ListDocumentsResponse{} } +func (m *ListDocumentsResponse) String() string { return proto.CompactTextString(m) } +func (*ListDocumentsResponse) ProtoMessage() {} + +func (m *ListDocumentsResponse) GetStatus() *RequestStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *ListDocumentsResponse) GetDocument() []*Document { + if m != nil { + return m.Document + } + return nil +} + +type ListIndexesParams struct { + FetchSchema *bool `protobuf:"varint,1,opt,name=fetch_schema" json:"fetch_schema,omitempty"` + Limit *int32 `protobuf:"varint,2,opt,name=limit,def=20" json:"limit,omitempty"` + Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` + StartIndexName *string `protobuf:"bytes,4,opt,name=start_index_name" json:"start_index_name,omitempty"` + IncludeStartIndex *bool `protobuf:"varint,5,opt,name=include_start_index,def=1" json:"include_start_index,omitempty"` + IndexNamePrefix *string `protobuf:"bytes,6,opt,name=index_name_prefix" json:"index_name_prefix,omitempty"` + Offset *int32 `protobuf:"varint,7,opt,name=offset" json:"offset,omitempty"` + Source *IndexSpec_Source `protobuf:"varint,8,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListIndexesParams) Reset() { *m = ListIndexesParams{} } +func (m *ListIndexesParams) String() string { return proto.CompactTextString(m) } +func (*ListIndexesParams) ProtoMessage() {} + +const Default_ListIndexesParams_Limit int32 = 20 +const Default_ListIndexesParams_IncludeStartIndex bool = true +const Default_ListIndexesParams_Source IndexSpec_Source = IndexSpec_SEARCH + +func (m *ListIndexesParams) GetFetchSchema() bool { + if m != nil && m.FetchSchema != nil { + return *m.FetchSchema + } + return false +} + +func (m *ListIndexesParams) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return Default_ListIndexesParams_Limit +} + +func (m *ListIndexesParams) GetNamespace() string { + if m != nil && m.Namespace != nil { + return *m.Namespace + } + return "" +} + +func (m *ListIndexesParams) GetStartIndexName() string { + if m != nil && m.StartIndexName != nil { + return *m.StartIndexName + } + return "" +} + +func (m *ListIndexesParams) GetIncludeStartIndex() bool { + if m != nil && m.IncludeStartIndex != nil { + return *m.IncludeStartIndex + } + return Default_ListIndexesParams_IncludeStartIndex +} + +func (m *ListIndexesParams) GetIndexNamePrefix() string { + if m != nil && m.IndexNamePrefix != nil { + return *m.IndexNamePrefix + } + return "" +} + +func (m *ListIndexesParams) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *ListIndexesParams) GetSource() IndexSpec_Source { + if m != nil && m.Source != nil { + return *m.Source + } + return Default_ListIndexesParams_Source +} + +type ListIndexesRequest struct { + Params *ListIndexesParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` + AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListIndexesRequest) Reset() { *m = ListIndexesRequest{} } +func (m *ListIndexesRequest) String() string { return proto.CompactTextString(m) } +func (*ListIndexesRequest) ProtoMessage() {} + +func (m *ListIndexesRequest) GetParams() *ListIndexesParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *ListIndexesRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type ListIndexesResponse struct { + Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` + IndexMetadata []*IndexMetadata `protobuf:"bytes,2,rep,name=index_metadata" json:"index_metadata,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListIndexesResponse) Reset() { *m = ListIndexesResponse{} } +func (m *ListIndexesResponse) String() string { return proto.CompactTextString(m) } +func (*ListIndexesResponse) ProtoMessage() {} + +func (m *ListIndexesResponse) GetStatus() *RequestStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *ListIndexesResponse) GetIndexMetadata() []*IndexMetadata { + if m != nil { + return m.IndexMetadata + } + return nil +} + +type DeleteSchemaParams struct { + Source *IndexSpec_Source `protobuf:"varint,1,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"` + IndexSpec []*IndexSpec `protobuf:"bytes,2,rep,name=index_spec" json:"index_spec,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteSchemaParams) Reset() { *m = DeleteSchemaParams{} } +func (m *DeleteSchemaParams) String() string { return proto.CompactTextString(m) } +func (*DeleteSchemaParams) ProtoMessage() {} + +const Default_DeleteSchemaParams_Source IndexSpec_Source = IndexSpec_SEARCH + +func (m *DeleteSchemaParams) GetSource() IndexSpec_Source { + if m != nil && m.Source != nil { + return *m.Source + } + return Default_DeleteSchemaParams_Source +} + +func (m *DeleteSchemaParams) GetIndexSpec() []*IndexSpec { + if m != nil { + return m.IndexSpec + } + return nil +} + +type DeleteSchemaRequest struct { + Params *DeleteSchemaParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` + AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteSchemaRequest) Reset() { *m = DeleteSchemaRequest{} } +func (m *DeleteSchemaRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSchemaRequest) ProtoMessage() {} + +func (m *DeleteSchemaRequest) GetParams() *DeleteSchemaParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *DeleteSchemaRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type DeleteSchemaResponse struct { + Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteSchemaResponse) Reset() { *m = DeleteSchemaResponse{} } +func (m *DeleteSchemaResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteSchemaResponse) ProtoMessage() {} + +func (m *DeleteSchemaResponse) GetStatus() []*RequestStatus { + if m != nil { + return m.Status + } + return nil +} + +type SortSpec struct { + SortExpression *string `protobuf:"bytes,1,req,name=sort_expression" json:"sort_expression,omitempty"` + SortDescending *bool `protobuf:"varint,2,opt,name=sort_descending,def=1" json:"sort_descending,omitempty"` + DefaultValueText *string `protobuf:"bytes,4,opt,name=default_value_text" json:"default_value_text,omitempty"` + DefaultValueNumeric *float64 `protobuf:"fixed64,5,opt,name=default_value_numeric" json:"default_value_numeric,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SortSpec) Reset() { *m = SortSpec{} } +func (m *SortSpec) String() string { return proto.CompactTextString(m) } +func (*SortSpec) ProtoMessage() {} + +const Default_SortSpec_SortDescending bool = true + +func (m *SortSpec) GetSortExpression() string { + if m != nil && m.SortExpression != nil { + return *m.SortExpression + } + return "" +} + +func (m *SortSpec) GetSortDescending() bool { + if m != nil && m.SortDescending != nil { + return *m.SortDescending + } + return Default_SortSpec_SortDescending +} + +func (m *SortSpec) GetDefaultValueText() string { + if m != nil && m.DefaultValueText != nil { + return *m.DefaultValueText + } + return "" +} + +func (m *SortSpec) GetDefaultValueNumeric() float64 { + if m != nil && m.DefaultValueNumeric != nil { + return *m.DefaultValueNumeric + } + return 0 +} + +type ScorerSpec struct { + Scorer *ScorerSpec_Scorer `protobuf:"varint,1,opt,name=scorer,enum=search.ScorerSpec_Scorer,def=2" json:"scorer,omitempty"` + Limit *int32 `protobuf:"varint,2,opt,name=limit,def=1000" json:"limit,omitempty"` + MatchScorerParameters *string `protobuf:"bytes,9,opt,name=match_scorer_parameters" json:"match_scorer_parameters,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ScorerSpec) Reset() { *m = ScorerSpec{} } +func (m *ScorerSpec) String() string { return proto.CompactTextString(m) } +func (*ScorerSpec) ProtoMessage() {} + +const Default_ScorerSpec_Scorer ScorerSpec_Scorer = ScorerSpec_MATCH_SCORER +const Default_ScorerSpec_Limit int32 = 1000 + +func (m *ScorerSpec) GetScorer() ScorerSpec_Scorer { + if m != nil && m.Scorer != nil { + return *m.Scorer + } + return Default_ScorerSpec_Scorer +} + +func (m *ScorerSpec) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return Default_ScorerSpec_Limit +} + +func (m *ScorerSpec) GetMatchScorerParameters() string { + if m != nil && m.MatchScorerParameters != nil { + return *m.MatchScorerParameters + } + return "" +} + +type FieldSpec struct { + Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"` + Expression []*FieldSpec_Expression `protobuf:"group,2,rep,name=Expression" json:"expression,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldSpec) Reset() { *m = FieldSpec{} } +func (m *FieldSpec) String() string { return proto.CompactTextString(m) } +func (*FieldSpec) ProtoMessage() {} + +func (m *FieldSpec) GetName() []string { + if m != nil { + return m.Name + } + return nil +} + +func (m *FieldSpec) GetExpression() []*FieldSpec_Expression { + if m != nil { + return m.Expression + } + return nil +} + +type FieldSpec_Expression struct { + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Expression *string `protobuf:"bytes,4,req,name=expression" json:"expression,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldSpec_Expression) Reset() { *m = FieldSpec_Expression{} } +func (m *FieldSpec_Expression) String() string { return proto.CompactTextString(m) } +func (*FieldSpec_Expression) ProtoMessage() {} + +func (m *FieldSpec_Expression) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldSpec_Expression) GetExpression() string { + if m != nil && m.Expression != nil { + return *m.Expression + } + return "" +} + +type FacetRange struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Start *string `protobuf:"bytes,2,opt,name=start" json:"start,omitempty"` + End *string `protobuf:"bytes,3,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetRange) Reset() { *m = FacetRange{} } +func (m *FacetRange) String() string { return proto.CompactTextString(m) } +func (*FacetRange) ProtoMessage() {} + +func (m *FacetRange) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FacetRange) GetStart() string { + if m != nil && m.Start != nil { + return *m.Start + } + return "" +} + +func (m *FacetRange) GetEnd() string { + if m != nil && m.End != nil { + return *m.End + } + return "" +} + +type FacetRequestParam struct { + ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit" json:"value_limit,omitempty"` + Range []*FacetRange `protobuf:"bytes,2,rep,name=range" json:"range,omitempty"` + ValueConstraint []string `protobuf:"bytes,3,rep,name=value_constraint" json:"value_constraint,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetRequestParam) Reset() { *m = FacetRequestParam{} } +func (m *FacetRequestParam) String() string { return proto.CompactTextString(m) } +func (*FacetRequestParam) ProtoMessage() {} + +func (m *FacetRequestParam) GetValueLimit() int32 { + if m != nil && m.ValueLimit != nil { + return *m.ValueLimit + } + return 0 +} + +func (m *FacetRequestParam) GetRange() []*FacetRange { + if m != nil { + return m.Range + } + return nil +} + +func (m *FacetRequestParam) GetValueConstraint() []string { + if m != nil { + return m.ValueConstraint + } + return nil +} + +type FacetAutoDetectParam struct { + ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit,def=10" json:"value_limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetAutoDetectParam) Reset() { *m = FacetAutoDetectParam{} } +func (m *FacetAutoDetectParam) String() string { return proto.CompactTextString(m) } +func (*FacetAutoDetectParam) ProtoMessage() {} + +const Default_FacetAutoDetectParam_ValueLimit int32 = 10 + +func (m *FacetAutoDetectParam) GetValueLimit() int32 { + if m != nil && m.ValueLimit != nil { + return *m.ValueLimit + } + return Default_FacetAutoDetectParam_ValueLimit +} + +type FacetRequest struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Params *FacetRequestParam `protobuf:"bytes,2,opt,name=params" json:"params,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetRequest) Reset() { *m = FacetRequest{} } +func (m *FacetRequest) String() string { return proto.CompactTextString(m) } +func (*FacetRequest) ProtoMessage() {} + +func (m *FacetRequest) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FacetRequest) GetParams() *FacetRequestParam { + if m != nil { + return m.Params + } + return nil +} + +type FacetRefinement struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Range *FacetRefinement_Range `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetRefinement) Reset() { *m = FacetRefinement{} } +func (m *FacetRefinement) String() string { return proto.CompactTextString(m) } +func (*FacetRefinement) ProtoMessage() {} + +func (m *FacetRefinement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FacetRefinement) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +func (m *FacetRefinement) GetRange() *FacetRefinement_Range { + if m != nil { + return m.Range + } + return nil +} + +type FacetRefinement_Range struct { + Start *string `protobuf:"bytes,1,opt,name=start" json:"start,omitempty"` + End *string `protobuf:"bytes,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetRefinement_Range) Reset() { *m = FacetRefinement_Range{} } +func (m *FacetRefinement_Range) String() string { return proto.CompactTextString(m) } +func (*FacetRefinement_Range) ProtoMessage() {} + +func (m *FacetRefinement_Range) GetStart() string { + if m != nil && m.Start != nil { + return *m.Start + } + return "" +} + +func (m *FacetRefinement_Range) GetEnd() string { + if m != nil && m.End != nil { + return *m.End + } + return "" +} + +type SearchParams struct { + IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"` + Query *string `protobuf:"bytes,2,req,name=query" json:"query,omitempty"` + Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"` + Offset *int32 `protobuf:"varint,11,opt,name=offset" json:"offset,omitempty"` + CursorType *SearchParams_CursorType `protobuf:"varint,5,opt,name=cursor_type,enum=search.SearchParams_CursorType,def=0" json:"cursor_type,omitempty"` + Limit *int32 `protobuf:"varint,6,opt,name=limit,def=20" json:"limit,omitempty"` + MatchedCountAccuracy *int32 `protobuf:"varint,7,opt,name=matched_count_accuracy" json:"matched_count_accuracy,omitempty"` + SortSpec []*SortSpec `protobuf:"bytes,8,rep,name=sort_spec" json:"sort_spec,omitempty"` + ScorerSpec *ScorerSpec `protobuf:"bytes,9,opt,name=scorer_spec" json:"scorer_spec,omitempty"` + FieldSpec *FieldSpec `protobuf:"bytes,10,opt,name=field_spec" json:"field_spec,omitempty"` + KeysOnly *bool `protobuf:"varint,12,opt,name=keys_only" json:"keys_only,omitempty"` + ParsingMode *SearchParams_ParsingMode `protobuf:"varint,13,opt,name=parsing_mode,enum=search.SearchParams_ParsingMode,def=0" json:"parsing_mode,omitempty"` + AutoDiscoverFacetCount *int32 `protobuf:"varint,15,opt,name=auto_discover_facet_count,def=0" json:"auto_discover_facet_count,omitempty"` + IncludeFacet []*FacetRequest `protobuf:"bytes,16,rep,name=include_facet" json:"include_facet,omitempty"` + FacetRefinement []*FacetRefinement `protobuf:"bytes,17,rep,name=facet_refinement" json:"facet_refinement,omitempty"` + FacetAutoDetectParam *FacetAutoDetectParam `protobuf:"bytes,18,opt,name=facet_auto_detect_param" json:"facet_auto_detect_param,omitempty"` + FacetDepth *int32 `protobuf:"varint,19,opt,name=facet_depth,def=1000" json:"facet_depth,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchParams) Reset() { *m = SearchParams{} } +func (m *SearchParams) String() string { return proto.CompactTextString(m) } +func (*SearchParams) ProtoMessage() {} + +const Default_SearchParams_CursorType SearchParams_CursorType = SearchParams_NONE +const Default_SearchParams_Limit int32 = 20 +const Default_SearchParams_ParsingMode SearchParams_ParsingMode = SearchParams_STRICT +const Default_SearchParams_AutoDiscoverFacetCount int32 = 0 +const Default_SearchParams_FacetDepth int32 = 1000 + +func (m *SearchParams) GetIndexSpec() *IndexSpec { + if m != nil { + return m.IndexSpec + } + return nil +} + +func (m *SearchParams) GetQuery() string { + if m != nil && m.Query != nil { + return *m.Query + } + return "" +} + +func (m *SearchParams) GetCursor() string { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return "" +} + +func (m *SearchParams) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *SearchParams) GetCursorType() SearchParams_CursorType { + if m != nil && m.CursorType != nil { + return *m.CursorType + } + return Default_SearchParams_CursorType +} + +func (m *SearchParams) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return Default_SearchParams_Limit +} + +func (m *SearchParams) GetMatchedCountAccuracy() int32 { + if m != nil && m.MatchedCountAccuracy != nil { + return *m.MatchedCountAccuracy + } + return 0 +} + +func (m *SearchParams) GetSortSpec() []*SortSpec { + if m != nil { + return m.SortSpec + } + return nil +} + +func (m *SearchParams) GetScorerSpec() *ScorerSpec { + if m != nil { + return m.ScorerSpec + } + return nil +} + +func (m *SearchParams) GetFieldSpec() *FieldSpec { + if m != nil { + return m.FieldSpec + } + return nil +} + +func (m *SearchParams) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *SearchParams) GetParsingMode() SearchParams_ParsingMode { + if m != nil && m.ParsingMode != nil { + return *m.ParsingMode + } + return Default_SearchParams_ParsingMode +} + +func (m *SearchParams) GetAutoDiscoverFacetCount() int32 { + if m != nil && m.AutoDiscoverFacetCount != nil { + return *m.AutoDiscoverFacetCount + } + return Default_SearchParams_AutoDiscoverFacetCount +} + +func (m *SearchParams) GetIncludeFacet() []*FacetRequest { + if m != nil { + return m.IncludeFacet + } + return nil +} + +func (m *SearchParams) GetFacetRefinement() []*FacetRefinement { + if m != nil { + return m.FacetRefinement + } + return nil +} + +func (m *SearchParams) GetFacetAutoDetectParam() *FacetAutoDetectParam { + if m != nil { + return m.FacetAutoDetectParam + } + return nil +} + +func (m *SearchParams) GetFacetDepth() int32 { + if m != nil && m.FacetDepth != nil { + return *m.FacetDepth + } + return Default_SearchParams_FacetDepth +} + +type SearchRequest struct { + Params *SearchParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"` + AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchRequest) Reset() { *m = SearchRequest{} } +func (m *SearchRequest) String() string { return proto.CompactTextString(m) } +func (*SearchRequest) ProtoMessage() {} + +func (m *SearchRequest) GetParams() *SearchParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *SearchRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type FacetResultValue struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,2,req,name=count" json:"count,omitempty"` + Refinement *FacetRefinement `protobuf:"bytes,3,req,name=refinement" json:"refinement,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetResultValue) Reset() { *m = FacetResultValue{} } +func (m *FacetResultValue) String() string { return proto.CompactTextString(m) } +func (*FacetResultValue) ProtoMessage() {} + +func (m *FacetResultValue) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FacetResultValue) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *FacetResultValue) GetRefinement() *FacetRefinement { + if m != nil { + return m.Refinement + } + return nil +} + +type FacetResult struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Value []*FacetResultValue `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FacetResult) Reset() { *m = FacetResult{} } +func (m *FacetResult) String() string { return proto.CompactTextString(m) } +func (*FacetResult) ProtoMessage() {} + +func (m *FacetResult) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FacetResult) GetValue() []*FacetResultValue { + if m != nil { + return m.Value + } + return nil +} + +type SearchResult struct { + Document *Document `protobuf:"bytes,1,req,name=document" json:"document,omitempty"` + Expression []*Field `protobuf:"bytes,4,rep,name=expression" json:"expression,omitempty"` + Score []float64 `protobuf:"fixed64,2,rep,name=score" json:"score,omitempty"` + Cursor *string `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchResult) Reset() { *m = SearchResult{} } +func (m *SearchResult) String() string { return proto.CompactTextString(m) } +func (*SearchResult) ProtoMessage() {} + +func (m *SearchResult) GetDocument() *Document { + if m != nil { + return m.Document + } + return nil +} + +func (m *SearchResult) GetExpression() []*Field { + if m != nil { + return m.Expression + } + return nil +} + +func (m *SearchResult) GetScore() []float64 { + if m != nil { + return m.Score + } + return nil +} + +func (m *SearchResult) GetCursor() string { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return "" +} + +type SearchResponse struct { + Result []*SearchResult `protobuf:"bytes,1,rep,name=result" json:"result,omitempty"` + MatchedCount *int64 `protobuf:"varint,2,req,name=matched_count" json:"matched_count,omitempty"` + Status *RequestStatus `protobuf:"bytes,3,req,name=status" json:"status,omitempty"` + Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"` + FacetResult []*FacetResult `protobuf:"bytes,5,rep,name=facet_result" json:"facet_result,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchResponse) Reset() { *m = SearchResponse{} } +func (m *SearchResponse) String() string { return proto.CompactTextString(m) } +func (*SearchResponse) ProtoMessage() {} + +var extRange_SearchResponse = []proto.ExtensionRange{ + {1000, 9999}, +} + +func (*SearchResponse) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_SearchResponse +} +func (m *SearchResponse) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *SearchResponse) GetResult() []*SearchResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *SearchResponse) GetMatchedCount() int64 { + if m != nil && m.MatchedCount != nil { + return *m.MatchedCount + } + return 0 +} + +func (m *SearchResponse) GetStatus() *RequestStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *SearchResponse) GetCursor() string { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return "" +} + +func (m *SearchResponse) GetFacetResult() []*FacetResult { + if m != nil { + return m.FacetResult + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/search/search.proto b/vendor/google.golang.org/appengine/internal/search/search.proto new file mode 100644 index 0000000..219f4c3 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/search/search.proto @@ -0,0 +1,388 @@ +syntax = "proto2"; +option go_package = "search"; + +package search; + +message Scope { + enum Type { + USER_BY_CANONICAL_ID = 1; + USER_BY_EMAIL = 2; + GROUP_BY_CANONICAL_ID = 3; + GROUP_BY_EMAIL = 4; + GROUP_BY_DOMAIN = 5; + ALL_USERS = 6; + ALL_AUTHENTICATED_USERS = 7; + } + + optional Type type = 1; + optional string value = 2; +} + +message Entry { + enum Permission { + READ = 1; + WRITE = 2; + FULL_CONTROL = 3; + } + + optional Scope scope = 1; + optional Permission permission = 2; + optional string display_name = 3; +} + +message AccessControlList { + optional string owner = 1; + repeated Entry entries = 2; +} + +message FieldValue { + enum ContentType { + TEXT = 0; + HTML = 1; + ATOM = 2; + DATE = 3; + NUMBER = 4; + GEO = 5; + } + + optional ContentType type = 1 [default = TEXT]; + + optional string language = 2 [default = "en"]; + + optional string string_value = 3; + + optional group Geo = 4 { + required double lat = 5; + required double lng = 6; + } +} + +message Field { + required string name = 1; + required FieldValue value = 2; +} + +message FieldTypes { + required string name = 1; + repeated FieldValue.ContentType type = 2; +} + +message IndexShardSettings { + repeated int32 prev_num_shards = 1; + required int32 num_shards = 2 [default=1]; + repeated int32 prev_num_shards_search_false = 3; + optional string local_replica = 4 [default = ""]; +} + +message FacetValue { + enum ContentType { + ATOM = 2; + NUMBER = 4; + } + + optional ContentType type = 1 [default = ATOM]; + optional string string_value = 3; +} + +message Facet { + required string name = 1; + required FacetValue value = 2; +} + +message DocumentMetadata { + optional int64 version = 1; + optional int64 committed_st_version = 2; +} + +message Document { + optional string id = 1; + optional string language = 2 [default = "en"]; + repeated Field field = 3; + optional int32 order_id = 4; + + enum Storage { + DISK = 0; + } + + optional Storage storage = 5 [default = DISK]; + repeated Facet facet = 8; +} + +message SearchServiceError { + enum ErrorCode { + OK = 0; + INVALID_REQUEST = 1; + TRANSIENT_ERROR = 2; + INTERNAL_ERROR = 3; + PERMISSION_DENIED = 4; + TIMEOUT = 5; + CONCURRENT_TRANSACTION = 6; + } +} + +message RequestStatus { + required SearchServiceError.ErrorCode code = 1; + optional string error_detail = 2; + optional int32 canonical_code = 3; +} + +message IndexSpec { + required string name = 1; + + enum Consistency { + GLOBAL = 0; + PER_DOCUMENT = 1; + } + optional Consistency consistency = 2 [default = PER_DOCUMENT]; + + optional string namespace = 3; + optional int32 version = 4; + + enum Source { + SEARCH = 0; + DATASTORE = 1; + CLOUD_STORAGE = 2; + } + optional Source source = 5 [default = SEARCH]; + + enum Mode { + PRIORITY = 0; + BACKGROUND = 1; + } + optional Mode mode = 6 [default = PRIORITY]; +} + +message IndexMetadata { + required IndexSpec index_spec = 1; + + repeated FieldTypes field = 2; + + message Storage { + optional int64 amount_used = 1; + optional int64 limit = 2; + } + optional Storage storage = 3; +} + +message IndexDocumentParams { + repeated Document document = 1; + + enum Freshness { + SYNCHRONOUSLY = 0; + WHEN_CONVENIENT = 1; + } + optional Freshness freshness = 2 [default = SYNCHRONOUSLY, deprecated=true]; + + required IndexSpec index_spec = 3; +} + +message IndexDocumentRequest { + required IndexDocumentParams params = 1; + + optional bytes app_id = 3; +} + +message IndexDocumentResponse { + repeated RequestStatus status = 1; + + repeated string doc_id = 2; +} + +message DeleteDocumentParams { + repeated string doc_id = 1; + + required IndexSpec index_spec = 2; +} + +message DeleteDocumentRequest { + required DeleteDocumentParams params = 1; + + optional bytes app_id = 3; +} + +message DeleteDocumentResponse { + repeated RequestStatus status = 1; +} + +message ListDocumentsParams { + required IndexSpec index_spec = 1; + optional string start_doc_id = 2; + optional bool include_start_doc = 3 [default = true]; + optional int32 limit = 4 [default = 100]; + optional bool keys_only = 5; +} + +message ListDocumentsRequest { + required ListDocumentsParams params = 1; + + optional bytes app_id = 2; +} + +message ListDocumentsResponse { + required RequestStatus status = 1; + + repeated Document document = 2; +} + +message ListIndexesParams { + optional bool fetch_schema = 1; + optional int32 limit = 2 [default = 20]; + optional string namespace = 3; + optional string start_index_name = 4; + optional bool include_start_index = 5 [default = true]; + optional string index_name_prefix = 6; + optional int32 offset = 7; + optional IndexSpec.Source source = 8 [default = SEARCH]; +} + +message ListIndexesRequest { + required ListIndexesParams params = 1; + + optional bytes app_id = 3; +} + +message ListIndexesResponse { + required RequestStatus status = 1; + repeated IndexMetadata index_metadata = 2; +} + +message DeleteSchemaParams { + optional IndexSpec.Source source = 1 [default = SEARCH]; + repeated IndexSpec index_spec = 2; +} + +message DeleteSchemaRequest { + required DeleteSchemaParams params = 1; + + optional bytes app_id = 3; +} + +message DeleteSchemaResponse { + repeated RequestStatus status = 1; +} + +message SortSpec { + required string sort_expression = 1; + optional bool sort_descending = 2 [default = true]; + optional string default_value_text = 4; + optional double default_value_numeric = 5; +} + +message ScorerSpec { + enum Scorer { + RESCORING_MATCH_SCORER = 0; + MATCH_SCORER = 2; + } + optional Scorer scorer = 1 [default = MATCH_SCORER]; + + optional int32 limit = 2 [default = 1000]; + optional string match_scorer_parameters = 9; +} + +message FieldSpec { + repeated string name = 1; + + repeated group Expression = 2 { + required string name = 3; + required string expression = 4; + } +} + +message FacetRange { + optional string name = 1; + optional string start = 2; + optional string end = 3; +} + +message FacetRequestParam { + optional int32 value_limit = 1; + repeated FacetRange range = 2; + repeated string value_constraint = 3; +} + +message FacetAutoDetectParam { + optional int32 value_limit = 1 [default = 10]; +} + +message FacetRequest { + required string name = 1; + optional FacetRequestParam params = 2; +} + +message FacetRefinement { + required string name = 1; + optional string value = 2; + + message Range { + optional string start = 1; + optional string end = 2; + } + optional Range range = 3; +} + +message SearchParams { + required IndexSpec index_spec = 1; + required string query = 2; + optional string cursor = 4; + optional int32 offset = 11; + + enum CursorType { + NONE = 0; + SINGLE = 1; + PER_RESULT = 2; + } + optional CursorType cursor_type = 5 [default = NONE]; + + optional int32 limit = 6 [default = 20]; + optional int32 matched_count_accuracy = 7; + repeated SortSpec sort_spec = 8; + optional ScorerSpec scorer_spec = 9; + optional FieldSpec field_spec = 10; + optional bool keys_only = 12; + + enum ParsingMode { + STRICT = 0; + RELAXED = 1; + } + optional ParsingMode parsing_mode = 13 [default = STRICT]; + + optional int32 auto_discover_facet_count = 15 [default = 0]; + repeated FacetRequest include_facet = 16; + repeated FacetRefinement facet_refinement = 17; + optional FacetAutoDetectParam facet_auto_detect_param = 18; + optional int32 facet_depth = 19 [default=1000]; +} + +message SearchRequest { + required SearchParams params = 1; + + optional bytes app_id = 3; +} + +message FacetResultValue { + required string name = 1; + required int32 count = 2; + required FacetRefinement refinement = 3; +} + +message FacetResult { + required string name = 1; + repeated FacetResultValue value = 2; +} + +message SearchResult { + required Document document = 1; + repeated Field expression = 4; + repeated double score = 2; + optional string cursor = 3; +} + +message SearchResponse { + repeated SearchResult result = 1; + required int64 matched_count = 2; + required RequestStatus status = 3; + optional string cursor = 4; + repeated FacetResult facet_result = 5; + + extensions 1000 to 9999; +} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go new file mode 100644 index 0000000..60628ec --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go @@ -0,0 +1,1858 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/socket/socket_service.proto +// DO NOT EDIT! + +/* +Package socket is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/socket/socket_service.proto + +It has these top-level messages: + RemoteSocketServiceError + AddressPort + CreateSocketRequest + CreateSocketReply + BindRequest + BindReply + GetSocketNameRequest + GetSocketNameReply + GetPeerNameRequest + GetPeerNameReply + SocketOption + SetSocketOptionsRequest + SetSocketOptionsReply + GetSocketOptionsRequest + GetSocketOptionsReply + ConnectRequest + ConnectReply + ListenRequest + ListenReply + AcceptRequest + AcceptReply + ShutDownRequest + ShutDownReply + CloseRequest + CloseReply + SendRequest + SendReply + ReceiveRequest + ReceiveReply + PollEvent + PollRequest + PollReply + ResolveRequest + ResolveReply +*/ +package socket + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type RemoteSocketServiceError_ErrorCode int32 + +const ( + RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 + RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 + RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 + RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 + RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 + RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 +) + +var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ + 1: "SYSTEM_ERROR", + 2: "GAI_ERROR", + 4: "FAILURE", + 5: "PERMISSION_DENIED", + 6: "INVALID_REQUEST", + 7: "SOCKET_CLOSED", +} +var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ + "SYSTEM_ERROR": 1, + "GAI_ERROR": 2, + "FAILURE": 4, + "PERMISSION_DENIED": 5, + "INVALID_REQUEST": 6, + "SOCKET_CLOSED": 7, +} + +func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { + p := new(RemoteSocketServiceError_ErrorCode) + *p = x + return p +} +func (x RemoteSocketServiceError_ErrorCode) String() string { + return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) +} +func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") + if err != nil { + return err + } + *x = RemoteSocketServiceError_ErrorCode(value) + return nil +} + +type RemoteSocketServiceError_SystemError int32 + +const ( + RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 + RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 + RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 + RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 + RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 + RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 + RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 + RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 + RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 + RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 + RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 + RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 + RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 + RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 + RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 + RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 + RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 + RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 + RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 + RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 + RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 + RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 + RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 + RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 + RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 + RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 + RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 + RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 + RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 + RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 + RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 + RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 + RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 + RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 + RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 + RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 + RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 + RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 + RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 + RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 + RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 + RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 + RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 + RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 + RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 + RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 + RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 + RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 + RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 + RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 + RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 + RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 + RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 + RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 + RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 + RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 + RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 + RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 + RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 + RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 + RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 + RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 + RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 + RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 + RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 + RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 + RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 + RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 + RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 + RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 + RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 + RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 + RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 + RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 + RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 + RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 + RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 + RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 + RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 + RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 + RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 + RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 + RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 + RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 + RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 + RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 + RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 + RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 + RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 + RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 + RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 + RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 + RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 + RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 + RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 + RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 + RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 + RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 + RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 + RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 + RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 + RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 + RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 + RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 + RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 + RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 + RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 + RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 + RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 + RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 + RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 + RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 + RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 + RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 + RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 + RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 + RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 + RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 + RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 + RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 + RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 + RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 + RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 + RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 + RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 + RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 + RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 + RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 + RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 + RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 + RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 +) + +var RemoteSocketServiceError_SystemError_name = map[int32]string{ + 0: "SYS_SUCCESS", + 1: "SYS_EPERM", + 2: "SYS_ENOENT", + 3: "SYS_ESRCH", + 4: "SYS_EINTR", + 5: "SYS_EIO", + 6: "SYS_ENXIO", + 7: "SYS_E2BIG", + 8: "SYS_ENOEXEC", + 9: "SYS_EBADF", + 10: "SYS_ECHILD", + 11: "SYS_EAGAIN", + // Duplicate value: 11: "SYS_EWOULDBLOCK", + 12: "SYS_ENOMEM", + 13: "SYS_EACCES", + 14: "SYS_EFAULT", + 15: "SYS_ENOTBLK", + 16: "SYS_EBUSY", + 17: "SYS_EEXIST", + 18: "SYS_EXDEV", + 19: "SYS_ENODEV", + 20: "SYS_ENOTDIR", + 21: "SYS_EISDIR", + 22: "SYS_EINVAL", + 23: "SYS_ENFILE", + 24: "SYS_EMFILE", + 25: "SYS_ENOTTY", + 26: "SYS_ETXTBSY", + 27: "SYS_EFBIG", + 28: "SYS_ENOSPC", + 29: "SYS_ESPIPE", + 30: "SYS_EROFS", + 31: "SYS_EMLINK", + 32: "SYS_EPIPE", + 33: "SYS_EDOM", + 34: "SYS_ERANGE", + 35: "SYS_EDEADLK", + // Duplicate value: 35: "SYS_EDEADLOCK", + 36: "SYS_ENAMETOOLONG", + 37: "SYS_ENOLCK", + 38: "SYS_ENOSYS", + 39: "SYS_ENOTEMPTY", + 40: "SYS_ELOOP", + 42: "SYS_ENOMSG", + 43: "SYS_EIDRM", + 44: "SYS_ECHRNG", + 45: "SYS_EL2NSYNC", + 46: "SYS_EL3HLT", + 47: "SYS_EL3RST", + 48: "SYS_ELNRNG", + 49: "SYS_EUNATCH", + 50: "SYS_ENOCSI", + 51: "SYS_EL2HLT", + 52: "SYS_EBADE", + 53: "SYS_EBADR", + 54: "SYS_EXFULL", + 55: "SYS_ENOANO", + 56: "SYS_EBADRQC", + 57: "SYS_EBADSLT", + 59: "SYS_EBFONT", + 60: "SYS_ENOSTR", + 61: "SYS_ENODATA", + 62: "SYS_ETIME", + 63: "SYS_ENOSR", + 64: "SYS_ENONET", + 65: "SYS_ENOPKG", + 66: "SYS_EREMOTE", + 67: "SYS_ENOLINK", + 68: "SYS_EADV", + 69: "SYS_ESRMNT", + 70: "SYS_ECOMM", + 71: "SYS_EPROTO", + 72: "SYS_EMULTIHOP", + 73: "SYS_EDOTDOT", + 74: "SYS_EBADMSG", + 75: "SYS_EOVERFLOW", + 76: "SYS_ENOTUNIQ", + 77: "SYS_EBADFD", + 78: "SYS_EREMCHG", + 79: "SYS_ELIBACC", + 80: "SYS_ELIBBAD", + 81: "SYS_ELIBSCN", + 82: "SYS_ELIBMAX", + 83: "SYS_ELIBEXEC", + 84: "SYS_EILSEQ", + 85: "SYS_ERESTART", + 86: "SYS_ESTRPIPE", + 87: "SYS_EUSERS", + 88: "SYS_ENOTSOCK", + 89: "SYS_EDESTADDRREQ", + 90: "SYS_EMSGSIZE", + 91: "SYS_EPROTOTYPE", + 92: "SYS_ENOPROTOOPT", + 93: "SYS_EPROTONOSUPPORT", + 94: "SYS_ESOCKTNOSUPPORT", + 95: "SYS_EOPNOTSUPP", + // Duplicate value: 95: "SYS_ENOTSUP", + 96: "SYS_EPFNOSUPPORT", + 97: "SYS_EAFNOSUPPORT", + 98: "SYS_EADDRINUSE", + 99: "SYS_EADDRNOTAVAIL", + 100: "SYS_ENETDOWN", + 101: "SYS_ENETUNREACH", + 102: "SYS_ENETRESET", + 103: "SYS_ECONNABORTED", + 104: "SYS_ECONNRESET", + 105: "SYS_ENOBUFS", + 106: "SYS_EISCONN", + 107: "SYS_ENOTCONN", + 108: "SYS_ESHUTDOWN", + 109: "SYS_ETOOMANYREFS", + 110: "SYS_ETIMEDOUT", + 111: "SYS_ECONNREFUSED", + 112: "SYS_EHOSTDOWN", + 113: "SYS_EHOSTUNREACH", + 114: "SYS_EALREADY", + 115: "SYS_EINPROGRESS", + 116: "SYS_ESTALE", + 117: "SYS_EUCLEAN", + 118: "SYS_ENOTNAM", + 119: "SYS_ENAVAIL", + 120: "SYS_EISNAM", + 121: "SYS_EREMOTEIO", + 122: "SYS_EDQUOT", + 123: "SYS_ENOMEDIUM", + 124: "SYS_EMEDIUMTYPE", + 125: "SYS_ECANCELED", + 126: "SYS_ENOKEY", + 127: "SYS_EKEYEXPIRED", + 128: "SYS_EKEYREVOKED", + 129: "SYS_EKEYREJECTED", + 130: "SYS_EOWNERDEAD", + 131: "SYS_ENOTRECOVERABLE", + 132: "SYS_ERFKILL", +} +var RemoteSocketServiceError_SystemError_value = map[string]int32{ + "SYS_SUCCESS": 0, + "SYS_EPERM": 1, + "SYS_ENOENT": 2, + "SYS_ESRCH": 3, + "SYS_EINTR": 4, + "SYS_EIO": 5, + "SYS_ENXIO": 6, + "SYS_E2BIG": 7, + "SYS_ENOEXEC": 8, + "SYS_EBADF": 9, + "SYS_ECHILD": 10, + "SYS_EAGAIN": 11, + "SYS_EWOULDBLOCK": 11, + "SYS_ENOMEM": 12, + "SYS_EACCES": 13, + "SYS_EFAULT": 14, + "SYS_ENOTBLK": 15, + "SYS_EBUSY": 16, + "SYS_EEXIST": 17, + "SYS_EXDEV": 18, + "SYS_ENODEV": 19, + "SYS_ENOTDIR": 20, + "SYS_EISDIR": 21, + "SYS_EINVAL": 22, + "SYS_ENFILE": 23, + "SYS_EMFILE": 24, + "SYS_ENOTTY": 25, + "SYS_ETXTBSY": 26, + "SYS_EFBIG": 27, + "SYS_ENOSPC": 28, + "SYS_ESPIPE": 29, + "SYS_EROFS": 30, + "SYS_EMLINK": 31, + "SYS_EPIPE": 32, + "SYS_EDOM": 33, + "SYS_ERANGE": 34, + "SYS_EDEADLK": 35, + "SYS_EDEADLOCK": 35, + "SYS_ENAMETOOLONG": 36, + "SYS_ENOLCK": 37, + "SYS_ENOSYS": 38, + "SYS_ENOTEMPTY": 39, + "SYS_ELOOP": 40, + "SYS_ENOMSG": 42, + "SYS_EIDRM": 43, + "SYS_ECHRNG": 44, + "SYS_EL2NSYNC": 45, + "SYS_EL3HLT": 46, + "SYS_EL3RST": 47, + "SYS_ELNRNG": 48, + "SYS_EUNATCH": 49, + "SYS_ENOCSI": 50, + "SYS_EL2HLT": 51, + "SYS_EBADE": 52, + "SYS_EBADR": 53, + "SYS_EXFULL": 54, + "SYS_ENOANO": 55, + "SYS_EBADRQC": 56, + "SYS_EBADSLT": 57, + "SYS_EBFONT": 59, + "SYS_ENOSTR": 60, + "SYS_ENODATA": 61, + "SYS_ETIME": 62, + "SYS_ENOSR": 63, + "SYS_ENONET": 64, + "SYS_ENOPKG": 65, + "SYS_EREMOTE": 66, + "SYS_ENOLINK": 67, + "SYS_EADV": 68, + "SYS_ESRMNT": 69, + "SYS_ECOMM": 70, + "SYS_EPROTO": 71, + "SYS_EMULTIHOP": 72, + "SYS_EDOTDOT": 73, + "SYS_EBADMSG": 74, + "SYS_EOVERFLOW": 75, + "SYS_ENOTUNIQ": 76, + "SYS_EBADFD": 77, + "SYS_EREMCHG": 78, + "SYS_ELIBACC": 79, + "SYS_ELIBBAD": 80, + "SYS_ELIBSCN": 81, + "SYS_ELIBMAX": 82, + "SYS_ELIBEXEC": 83, + "SYS_EILSEQ": 84, + "SYS_ERESTART": 85, + "SYS_ESTRPIPE": 86, + "SYS_EUSERS": 87, + "SYS_ENOTSOCK": 88, + "SYS_EDESTADDRREQ": 89, + "SYS_EMSGSIZE": 90, + "SYS_EPROTOTYPE": 91, + "SYS_ENOPROTOOPT": 92, + "SYS_EPROTONOSUPPORT": 93, + "SYS_ESOCKTNOSUPPORT": 94, + "SYS_EOPNOTSUPP": 95, + "SYS_ENOTSUP": 95, + "SYS_EPFNOSUPPORT": 96, + "SYS_EAFNOSUPPORT": 97, + "SYS_EADDRINUSE": 98, + "SYS_EADDRNOTAVAIL": 99, + "SYS_ENETDOWN": 100, + "SYS_ENETUNREACH": 101, + "SYS_ENETRESET": 102, + "SYS_ECONNABORTED": 103, + "SYS_ECONNRESET": 104, + "SYS_ENOBUFS": 105, + "SYS_EISCONN": 106, + "SYS_ENOTCONN": 107, + "SYS_ESHUTDOWN": 108, + "SYS_ETOOMANYREFS": 109, + "SYS_ETIMEDOUT": 110, + "SYS_ECONNREFUSED": 111, + "SYS_EHOSTDOWN": 112, + "SYS_EHOSTUNREACH": 113, + "SYS_EALREADY": 114, + "SYS_EINPROGRESS": 115, + "SYS_ESTALE": 116, + "SYS_EUCLEAN": 117, + "SYS_ENOTNAM": 118, + "SYS_ENAVAIL": 119, + "SYS_EISNAM": 120, + "SYS_EREMOTEIO": 121, + "SYS_EDQUOT": 122, + "SYS_ENOMEDIUM": 123, + "SYS_EMEDIUMTYPE": 124, + "SYS_ECANCELED": 125, + "SYS_ENOKEY": 126, + "SYS_EKEYEXPIRED": 127, + "SYS_EKEYREVOKED": 128, + "SYS_EKEYREJECTED": 129, + "SYS_EOWNERDEAD": 130, + "SYS_ENOTRECOVERABLE": 131, + "SYS_ERFKILL": 132, +} + +func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { + p := new(RemoteSocketServiceError_SystemError) + *p = x + return p +} +func (x RemoteSocketServiceError_SystemError) String() string { + return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) +} +func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") + if err != nil { + return err + } + *x = RemoteSocketServiceError_SystemError(value) + return nil +} + +type CreateSocketRequest_SocketFamily int32 + +const ( + CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 + CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 +) + +var CreateSocketRequest_SocketFamily_name = map[int32]string{ + 1: "IPv4", + 2: "IPv6", +} +var CreateSocketRequest_SocketFamily_value = map[string]int32{ + "IPv4": 1, + "IPv6": 2, +} + +func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { + p := new(CreateSocketRequest_SocketFamily) + *p = x + return p +} +func (x CreateSocketRequest_SocketFamily) String() string { + return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) +} +func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketFamily(value) + return nil +} + +type CreateSocketRequest_SocketProtocol int32 + +const ( + CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 + CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 +) + +var CreateSocketRequest_SocketProtocol_name = map[int32]string{ + 1: "TCP", + 2: "UDP", +} +var CreateSocketRequest_SocketProtocol_value = map[string]int32{ + "TCP": 1, + "UDP": 2, +} + +func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { + p := new(CreateSocketRequest_SocketProtocol) + *p = x + return p +} +func (x CreateSocketRequest_SocketProtocol) String() string { + return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) +} +func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") + if err != nil { + return err + } + *x = CreateSocketRequest_SocketProtocol(value) + return nil +} + +type SocketOption_SocketOptionLevel int32 + +const ( + SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 + SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 + SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 + SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 +) + +var SocketOption_SocketOptionLevel_name = map[int32]string{ + 0: "SOCKET_SOL_IP", + 1: "SOCKET_SOL_SOCKET", + 6: "SOCKET_SOL_TCP", + 17: "SOCKET_SOL_UDP", +} +var SocketOption_SocketOptionLevel_value = map[string]int32{ + "SOCKET_SOL_IP": 0, + "SOCKET_SOL_SOCKET": 1, + "SOCKET_SOL_TCP": 6, + "SOCKET_SOL_UDP": 17, +} + +func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { + p := new(SocketOption_SocketOptionLevel) + *p = x + return p +} +func (x SocketOption_SocketOptionLevel) String() string { + return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) +} +func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") + if err != nil { + return err + } + *x = SocketOption_SocketOptionLevel(value) + return nil +} + +type SocketOption_SocketOptionName int32 + +const ( + SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 + SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 + SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 + SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 + SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 + SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 + SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 + SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 + SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 + SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 + SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 + SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 + SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 + SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 + SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 +) + +var SocketOption_SocketOptionName_name = map[int32]string{ + 1: "SOCKET_SO_DEBUG", + 2: "SOCKET_SO_REUSEADDR", + 3: "SOCKET_SO_TYPE", + 4: "SOCKET_SO_ERROR", + 5: "SOCKET_SO_DONTROUTE", + 6: "SOCKET_SO_BROADCAST", + 7: "SOCKET_SO_SNDBUF", + 8: "SOCKET_SO_RCVBUF", + 9: "SOCKET_SO_KEEPALIVE", + 10: "SOCKET_SO_OOBINLINE", + 13: "SOCKET_SO_LINGER", + 20: "SOCKET_SO_RCVTIMEO", + 21: "SOCKET_SO_SNDTIMEO", + // Duplicate value: 1: "SOCKET_IP_TOS", + // Duplicate value: 2: "SOCKET_IP_TTL", + // Duplicate value: 3: "SOCKET_IP_HDRINCL", + // Duplicate value: 4: "SOCKET_IP_OPTIONS", + // Duplicate value: 1: "SOCKET_TCP_NODELAY", + // Duplicate value: 2: "SOCKET_TCP_MAXSEG", + // Duplicate value: 3: "SOCKET_TCP_CORK", + // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", + // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", + // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", + // Duplicate value: 7: "SOCKET_TCP_SYNCNT", + // Duplicate value: 8: "SOCKET_TCP_LINGER2", + // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", + // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", + 11: "SOCKET_TCP_INFO", + 12: "SOCKET_TCP_QUICKACK", +} +var SocketOption_SocketOptionName_value = map[string]int32{ + "SOCKET_SO_DEBUG": 1, + "SOCKET_SO_REUSEADDR": 2, + "SOCKET_SO_TYPE": 3, + "SOCKET_SO_ERROR": 4, + "SOCKET_SO_DONTROUTE": 5, + "SOCKET_SO_BROADCAST": 6, + "SOCKET_SO_SNDBUF": 7, + "SOCKET_SO_RCVBUF": 8, + "SOCKET_SO_KEEPALIVE": 9, + "SOCKET_SO_OOBINLINE": 10, + "SOCKET_SO_LINGER": 13, + "SOCKET_SO_RCVTIMEO": 20, + "SOCKET_SO_SNDTIMEO": 21, + "SOCKET_IP_TOS": 1, + "SOCKET_IP_TTL": 2, + "SOCKET_IP_HDRINCL": 3, + "SOCKET_IP_OPTIONS": 4, + "SOCKET_TCP_NODELAY": 1, + "SOCKET_TCP_MAXSEG": 2, + "SOCKET_TCP_CORK": 3, + "SOCKET_TCP_KEEPIDLE": 4, + "SOCKET_TCP_KEEPINTVL": 5, + "SOCKET_TCP_KEEPCNT": 6, + "SOCKET_TCP_SYNCNT": 7, + "SOCKET_TCP_LINGER2": 8, + "SOCKET_TCP_DEFER_ACCEPT": 9, + "SOCKET_TCP_WINDOW_CLAMP": 10, + "SOCKET_TCP_INFO": 11, + "SOCKET_TCP_QUICKACK": 12, +} + +func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { + p := new(SocketOption_SocketOptionName) + *p = x + return p +} +func (x SocketOption_SocketOptionName) String() string { + return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) +} +func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") + if err != nil { + return err + } + *x = SocketOption_SocketOptionName(value) + return nil +} + +type ShutDownRequest_How int32 + +const ( + ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 + ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 + ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 +) + +var ShutDownRequest_How_name = map[int32]string{ + 1: "SOCKET_SHUT_RD", + 2: "SOCKET_SHUT_WR", + 3: "SOCKET_SHUT_RDWR", +} +var ShutDownRequest_How_value = map[string]int32{ + "SOCKET_SHUT_RD": 1, + "SOCKET_SHUT_WR": 2, + "SOCKET_SHUT_RDWR": 3, +} + +func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { + p := new(ShutDownRequest_How) + *p = x + return p +} +func (x ShutDownRequest_How) String() string { + return proto.EnumName(ShutDownRequest_How_name, int32(x)) +} +func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") + if err != nil { + return err + } + *x = ShutDownRequest_How(value) + return nil +} + +type ReceiveRequest_Flags int32 + +const ( + ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 + ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 +) + +var ReceiveRequest_Flags_name = map[int32]string{ + 1: "MSG_OOB", + 2: "MSG_PEEK", +} +var ReceiveRequest_Flags_value = map[string]int32{ + "MSG_OOB": 1, + "MSG_PEEK": 2, +} + +func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { + p := new(ReceiveRequest_Flags) + *p = x + return p +} +func (x ReceiveRequest_Flags) String() string { + return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) +} +func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") + if err != nil { + return err + } + *x = ReceiveRequest_Flags(value) + return nil +} + +type PollEvent_PollEventFlag int32 + +const ( + PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 + PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 + PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 + PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 + PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 + PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 + PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 + PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 + PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 + PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 + PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 + PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 + PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 + PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 +) + +var PollEvent_PollEventFlag_name = map[int32]string{ + 0: "SOCKET_POLLNONE", + 1: "SOCKET_POLLIN", + 2: "SOCKET_POLLPRI", + 4: "SOCKET_POLLOUT", + 8: "SOCKET_POLLERR", + 16: "SOCKET_POLLHUP", + 32: "SOCKET_POLLNVAL", + 64: "SOCKET_POLLRDNORM", + 128: "SOCKET_POLLRDBAND", + 256: "SOCKET_POLLWRNORM", + 512: "SOCKET_POLLWRBAND", + 1024: "SOCKET_POLLMSG", + 4096: "SOCKET_POLLREMOVE", + 8192: "SOCKET_POLLRDHUP", +} +var PollEvent_PollEventFlag_value = map[string]int32{ + "SOCKET_POLLNONE": 0, + "SOCKET_POLLIN": 1, + "SOCKET_POLLPRI": 2, + "SOCKET_POLLOUT": 4, + "SOCKET_POLLERR": 8, + "SOCKET_POLLHUP": 16, + "SOCKET_POLLNVAL": 32, + "SOCKET_POLLRDNORM": 64, + "SOCKET_POLLRDBAND": 128, + "SOCKET_POLLWRNORM": 256, + "SOCKET_POLLWRBAND": 512, + "SOCKET_POLLMSG": 1024, + "SOCKET_POLLREMOVE": 4096, + "SOCKET_POLLRDHUP": 8192, +} + +func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { + p := new(PollEvent_PollEventFlag) + *p = x + return p +} +func (x PollEvent_PollEventFlag) String() string { + return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) +} +func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") + if err != nil { + return err + } + *x = PollEvent_PollEventFlag(value) + return nil +} + +type ResolveReply_ErrorCode int32 + +const ( + ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 + ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 + ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 + ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 + ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 + ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 + ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 + ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 + ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 + ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 + ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 + ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 + ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 + ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 + ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 +) + +var ResolveReply_ErrorCode_name = map[int32]string{ + 1: "SOCKET_EAI_ADDRFAMILY", + 2: "SOCKET_EAI_AGAIN", + 3: "SOCKET_EAI_BADFLAGS", + 4: "SOCKET_EAI_FAIL", + 5: "SOCKET_EAI_FAMILY", + 6: "SOCKET_EAI_MEMORY", + 7: "SOCKET_EAI_NODATA", + 8: "SOCKET_EAI_NONAME", + 9: "SOCKET_EAI_SERVICE", + 10: "SOCKET_EAI_SOCKTYPE", + 11: "SOCKET_EAI_SYSTEM", + 12: "SOCKET_EAI_BADHINTS", + 13: "SOCKET_EAI_PROTOCOL", + 14: "SOCKET_EAI_OVERFLOW", + 15: "SOCKET_EAI_MAX", +} +var ResolveReply_ErrorCode_value = map[string]int32{ + "SOCKET_EAI_ADDRFAMILY": 1, + "SOCKET_EAI_AGAIN": 2, + "SOCKET_EAI_BADFLAGS": 3, + "SOCKET_EAI_FAIL": 4, + "SOCKET_EAI_FAMILY": 5, + "SOCKET_EAI_MEMORY": 6, + "SOCKET_EAI_NODATA": 7, + "SOCKET_EAI_NONAME": 8, + "SOCKET_EAI_SERVICE": 9, + "SOCKET_EAI_SOCKTYPE": 10, + "SOCKET_EAI_SYSTEM": 11, + "SOCKET_EAI_BADHINTS": 12, + "SOCKET_EAI_PROTOCOL": 13, + "SOCKET_EAI_OVERFLOW": 14, + "SOCKET_EAI_MAX": 15, +} + +func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { + p := new(ResolveReply_ErrorCode) + *p = x + return p +} +func (x ResolveReply_ErrorCode) String() string { + return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) +} +func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") + if err != nil { + return err + } + *x = ResolveReply_ErrorCode(value) + return nil +} + +type RemoteSocketServiceError struct { + SystemError *int32 `protobuf:"varint,1,opt,name=system_error,def=0" json:"system_error,omitempty"` + ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } +func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } +func (*RemoteSocketServiceError) ProtoMessage() {} + +const Default_RemoteSocketServiceError_SystemError int32 = 0 + +func (m *RemoteSocketServiceError) GetSystemError() int32 { + if m != nil && m.SystemError != nil { + return *m.SystemError + } + return Default_RemoteSocketServiceError_SystemError +} + +func (m *RemoteSocketServiceError) GetErrorDetail() string { + if m != nil && m.ErrorDetail != nil { + return *m.ErrorDetail + } + return "" +} + +type AddressPort struct { + Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` + PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address" json:"packed_address,omitempty"` + HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint" json:"hostname_hint,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AddressPort) Reset() { *m = AddressPort{} } +func (m *AddressPort) String() string { return proto.CompactTextString(m) } +func (*AddressPort) ProtoMessage() {} + +func (m *AddressPort) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return 0 +} + +func (m *AddressPort) GetPackedAddress() []byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *AddressPort) GetHostnameHint() string { + if m != nil && m.HostnameHint != nil { + return *m.HostnameHint + } + return "" +} + +type CreateSocketRequest struct { + Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` + Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` + SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options" json:"socket_options,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,def=0" json:"listen_backlog,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip" json:"remote_ip,omitempty"` + AppId *string `protobuf:"bytes,9,opt,name=app_id" json:"app_id,omitempty"` + ProjectId *int64 `protobuf:"varint,10,opt,name=project_id" json:"project_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } +func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSocketRequest) ProtoMessage() {} + +const Default_CreateSocketRequest_ListenBacklog int32 = 0 + +func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { + if m != nil && m.Family != nil { + return *m.Family + } + return CreateSocketRequest_IPv4 +} + +func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { + if m != nil && m.Protocol != nil { + return *m.Protocol + } + return CreateSocketRequest_TCP +} + +func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { + if m != nil { + return m.SocketOptions + } + return nil +} + +func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +func (m *CreateSocketRequest) GetListenBacklog() int32 { + if m != nil && m.ListenBacklog != nil { + return *m.ListenBacklog + } + return Default_CreateSocketRequest_ListenBacklog +} + +func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *CreateSocketRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CreateSocketRequest) GetProjectId() int64 { + if m != nil && m.ProjectId != nil { + return *m.ProjectId + } + return 0 +} + +type CreateSocketReply struct { + SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor" json:"socket_descriptor,omitempty"` + ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address" json:"server_address,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } +func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } +func (*CreateSocketReply) ProtoMessage() {} + +var extRange_CreateSocketReply = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_CreateSocketReply +} +func (m *CreateSocketReply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *CreateSocketReply) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CreateSocketReply) GetServerAddress() *AddressPort { + if m != nil { + return m.ServerAddress + } + return nil +} + +func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BindRequest) Reset() { *m = BindRequest{} } +func (m *BindRequest) String() string { return proto.CompactTextString(m) } +func (*BindRequest) ProtoMessage() {} + +func (m *BindRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *BindRequest) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type BindReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BindReply) Reset() { *m = BindReply{} } +func (m *BindReply) String() string { return proto.CompactTextString(m) } +func (*BindReply) ProtoMessage() {} + +func (m *BindReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetSocketNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } +func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameRequest) ProtoMessage() {} + +func (m *GetSocketNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetSocketNameReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } +func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketNameReply) ProtoMessage() {} + +func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type GetPeerNameRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } +func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameRequest) ProtoMessage() {} + +func (m *GetPeerNameRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +type GetPeerNameReply struct { + PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip" json:"peer_ip,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } +func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } +func (*GetPeerNameReply) ProtoMessage() {} + +func (m *GetPeerNameReply) GetPeerIp() *AddressPort { + if m != nil { + return m.PeerIp + } + return nil +} + +type SocketOption struct { + Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` + Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` + Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SocketOption) Reset() { *m = SocketOption{} } +func (m *SocketOption) String() string { return proto.CompactTextString(m) } +func (*SocketOption) ProtoMessage() {} + +func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { + if m != nil && m.Level != nil { + return *m.Level + } + return SocketOption_SOCKET_SOL_IP +} + +func (m *SocketOption) GetOption() SocketOption_SocketOptionName { + if m != nil && m.Option != nil { + return *m.Option + } + return SocketOption_SOCKET_SO_DEBUG +} + +func (m *SocketOption) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type SetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } +func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsRequest) ProtoMessage() {} + +func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type SetSocketOptionsReply struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } +func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*SetSocketOptionsReply) ProtoMessage() {} + +type GetSocketOptionsRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } +func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsRequest) ProtoMessage() {} + +func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type GetSocketOptionsReply struct { + Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } +func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } +func (*GetSocketOptionsReply) ProtoMessage() {} + +func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { + if m != nil { + return m.Options + } + return nil +} + +type ConnectRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip" json:"remote_ip,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } +func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } +func (*ConnectRequest) ProtoMessage() {} + +const Default_ConnectRequest_TimeoutSeconds float64 = -1 + +func (m *ConnectRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ConnectRequest) GetRemoteIp() *AddressPort { + if m != nil { + return m.RemoteIp + } + return nil +} + +func (m *ConnectRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ConnectRequest_TimeoutSeconds +} + +type ConnectReply struct { + ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip" json:"proxy_external_ip,omitempty"` + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConnectReply) Reset() { *m = ConnectReply{} } +func (m *ConnectReply) String() string { return proto.CompactTextString(m) } +func (*ConnectReply) ProtoMessage() {} + +var extRange_ConnectReply = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ConnectReply +} +func (m *ConnectReply) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +func (m *ConnectReply) GetProxyExternalIp() *AddressPort { + if m != nil { + return m.ProxyExternalIp + } + return nil +} + +type ListenRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListenRequest) Reset() { *m = ListenRequest{} } +func (m *ListenRequest) String() string { return proto.CompactTextString(m) } +func (*ListenRequest) ProtoMessage() {} + +func (m *ListenRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ListenRequest) GetBacklog() int32 { + if m != nil && m.Backlog != nil { + return *m.Backlog + } + return 0 +} + +type ListenReply struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListenReply) Reset() { *m = ListenReply{} } +func (m *ListenReply) String() string { return proto.CompactTextString(m) } +func (*ListenReply) ProtoMessage() {} + +type AcceptRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } +func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } +func (*AcceptRequest) ProtoMessage() {} + +const Default_AcceptRequest_TimeoutSeconds float64 = -1 + +func (m *AcceptRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *AcceptRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_AcceptRequest_TimeoutSeconds +} + +type AcceptReply struct { + NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor" json:"new_socket_descriptor,omitempty"` + RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address" json:"remote_address,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AcceptReply) Reset() { *m = AcceptReply{} } +func (m *AcceptReply) String() string { return proto.CompactTextString(m) } +func (*AcceptReply) ProtoMessage() {} + +func (m *AcceptReply) GetNewSocketDescriptor() []byte { + if m != nil { + return m.NewSocketDescriptor + } + return nil +} + +func (m *AcceptReply) GetRemoteAddress() *AddressPort { + if m != nil { + return m.RemoteAddress + } + return nil +} + +type ShutDownRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` + SendOffset *int64 `protobuf:"varint,3,req,name=send_offset" json:"send_offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } +func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } +func (*ShutDownRequest) ProtoMessage() {} + +func (m *ShutDownRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ShutDownRequest) GetHow() ShutDownRequest_How { + if m != nil && m.How != nil { + return *m.How + } + return ShutDownRequest_SOCKET_SHUT_RD +} + +func (m *ShutDownRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return 0 +} + +type ShutDownReply struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } +func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } +func (*ShutDownReply) ProtoMessage() {} + +type CloseRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,def=-1" json:"send_offset,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CloseRequest) Reset() { *m = CloseRequest{} } +func (m *CloseRequest) String() string { return proto.CompactTextString(m) } +func (*CloseRequest) ProtoMessage() {} + +const Default_CloseRequest_SendOffset int64 = -1 + +func (m *CloseRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *CloseRequest) GetSendOffset() int64 { + if m != nil && m.SendOffset != nil { + return *m.SendOffset + } + return Default_CloseRequest_SendOffset +} + +type CloseReply struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CloseReply) Reset() { *m = CloseReply{} } +func (m *CloseReply) String() string { return proto.CompactTextString(m) } +func (*CloseReply) ProtoMessage() {} + +type SendRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` + StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset" json:"stream_offset,omitempty"` + Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` + SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to" json:"send_to,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SendRequest) Reset() { *m = SendRequest{} } +func (m *SendRequest) String() string { return proto.CompactTextString(m) } +func (*SendRequest) ProtoMessage() {} + +const Default_SendRequest_Flags int32 = 0 +const Default_SendRequest_TimeoutSeconds float64 = -1 + +func (m *SendRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *SendRequest) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *SendRequest) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *SendRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_SendRequest_Flags +} + +func (m *SendRequest) GetSendTo() *AddressPort { + if m != nil { + return m.SendTo + } + return nil +} + +func (m *SendRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_SendRequest_TimeoutSeconds +} + +type SendReply struct { + DataSent *int32 `protobuf:"varint,1,opt,name=data_sent" json:"data_sent,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SendReply) Reset() { *m = SendReply{} } +func (m *SendReply) String() string { return proto.CompactTextString(m) } +func (*SendReply) ProtoMessage() {} + +func (m *SendReply) GetDataSent() int32 { + if m != nil && m.DataSent != nil { + return *m.DataSent + } + return 0 +} + +type ReceiveRequest struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + DataSize *int32 `protobuf:"varint,2,req,name=data_size" json:"data_size,omitempty"` + Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } +func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } +func (*ReceiveRequest) ProtoMessage() {} + +const Default_ReceiveRequest_Flags int32 = 0 +const Default_ReceiveRequest_TimeoutSeconds float64 = -1 + +func (m *ReceiveRequest) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *ReceiveRequest) GetDataSize() int32 { + if m != nil && m.DataSize != nil { + return *m.DataSize + } + return 0 +} + +func (m *ReceiveRequest) GetFlags() int32 { + if m != nil && m.Flags != nil { + return *m.Flags + } + return Default_ReceiveRequest_Flags +} + +func (m *ReceiveRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_ReceiveRequest_TimeoutSeconds +} + +type ReceiveReply struct { + StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset" json:"stream_offset,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` + ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from" json:"received_from,omitempty"` + BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size" json:"buffer_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } +func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } +func (*ReceiveReply) ProtoMessage() {} + +func (m *ReceiveReply) GetStreamOffset() int64 { + if m != nil && m.StreamOffset != nil { + return *m.StreamOffset + } + return 0 +} + +func (m *ReceiveReply) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *ReceiveReply) GetReceivedFrom() *AddressPort { + if m != nil { + return m.ReceivedFrom + } + return nil +} + +func (m *ReceiveReply) GetBufferSize() int32 { + if m != nil && m.BufferSize != nil { + return *m.BufferSize + } + return 0 +} + +type PollEvent struct { + SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor" json:"socket_descriptor,omitempty"` + RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events" json:"requested_events,omitempty"` + ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events" json:"observed_events,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PollEvent) Reset() { *m = PollEvent{} } +func (m *PollEvent) String() string { return proto.CompactTextString(m) } +func (*PollEvent) ProtoMessage() {} + +func (m *PollEvent) GetSocketDescriptor() string { + if m != nil && m.SocketDescriptor != nil { + return *m.SocketDescriptor + } + return "" +} + +func (m *PollEvent) GetRequestedEvents() int32 { + if m != nil && m.RequestedEvents != nil { + return *m.RequestedEvents + } + return 0 +} + +func (m *PollEvent) GetObservedEvents() int32 { + if m != nil && m.ObservedEvents != nil { + return *m.ObservedEvents + } + return 0 +} + +type PollRequest struct { + Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,def=-1" json:"timeout_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PollRequest) Reset() { *m = PollRequest{} } +func (m *PollRequest) String() string { return proto.CompactTextString(m) } +func (*PollRequest) ProtoMessage() {} + +const Default_PollRequest_TimeoutSeconds float64 = -1 + +func (m *PollRequest) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +func (m *PollRequest) GetTimeoutSeconds() float64 { + if m != nil && m.TimeoutSeconds != nil { + return *m.TimeoutSeconds + } + return Default_PollRequest_TimeoutSeconds +} + +type PollReply struct { + Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PollReply) Reset() { *m = PollReply{} } +func (m *PollReply) String() string { return proto.CompactTextString(m) } +func (*PollReply) ProtoMessage() {} + +func (m *PollReply) GetEvents() []*PollEvent { + if m != nil { + return m.Events + } + return nil +} + +type ResolveRequest struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } +func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } +func (*ResolveRequest) ProtoMessage() {} + +func (m *ResolveRequest) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { + if m != nil { + return m.AddressFamilies + } + return nil +} + +type ResolveReply struct { + PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address" json:"packed_address,omitempty"` + CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name" json:"canonical_name,omitempty"` + Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ResolveReply) Reset() { *m = ResolveReply{} } +func (m *ResolveReply) String() string { return proto.CompactTextString(m) } +func (*ResolveReply) ProtoMessage() {} + +func (m *ResolveReply) GetPackedAddress() [][]byte { + if m != nil { + return m.PackedAddress + } + return nil +} + +func (m *ResolveReply) GetCanonicalName() string { + if m != nil && m.CanonicalName != nil { + return *m.CanonicalName + } + return "" +} + +func (m *ResolveReply) GetAliases() []string { + if m != nil { + return m.Aliases + } + return nil +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto new file mode 100644 index 0000000..2fcc795 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto @@ -0,0 +1,460 @@ +syntax = "proto2"; +option go_package = "socket"; + +package appengine; + +message RemoteSocketServiceError { + enum ErrorCode { + SYSTEM_ERROR = 1; + GAI_ERROR = 2; + FAILURE = 4; + PERMISSION_DENIED = 5; + INVALID_REQUEST = 6; + SOCKET_CLOSED = 7; + } + + enum SystemError { + option allow_alias = true; + + SYS_SUCCESS = 0; + SYS_EPERM = 1; + SYS_ENOENT = 2; + SYS_ESRCH = 3; + SYS_EINTR = 4; + SYS_EIO = 5; + SYS_ENXIO = 6; + SYS_E2BIG = 7; + SYS_ENOEXEC = 8; + SYS_EBADF = 9; + SYS_ECHILD = 10; + SYS_EAGAIN = 11; + SYS_EWOULDBLOCK = 11; + SYS_ENOMEM = 12; + SYS_EACCES = 13; + SYS_EFAULT = 14; + SYS_ENOTBLK = 15; + SYS_EBUSY = 16; + SYS_EEXIST = 17; + SYS_EXDEV = 18; + SYS_ENODEV = 19; + SYS_ENOTDIR = 20; + SYS_EISDIR = 21; + SYS_EINVAL = 22; + SYS_ENFILE = 23; + SYS_EMFILE = 24; + SYS_ENOTTY = 25; + SYS_ETXTBSY = 26; + SYS_EFBIG = 27; + SYS_ENOSPC = 28; + SYS_ESPIPE = 29; + SYS_EROFS = 30; + SYS_EMLINK = 31; + SYS_EPIPE = 32; + SYS_EDOM = 33; + SYS_ERANGE = 34; + SYS_EDEADLK = 35; + SYS_EDEADLOCK = 35; + SYS_ENAMETOOLONG = 36; + SYS_ENOLCK = 37; + SYS_ENOSYS = 38; + SYS_ENOTEMPTY = 39; + SYS_ELOOP = 40; + SYS_ENOMSG = 42; + SYS_EIDRM = 43; + SYS_ECHRNG = 44; + SYS_EL2NSYNC = 45; + SYS_EL3HLT = 46; + SYS_EL3RST = 47; + SYS_ELNRNG = 48; + SYS_EUNATCH = 49; + SYS_ENOCSI = 50; + SYS_EL2HLT = 51; + SYS_EBADE = 52; + SYS_EBADR = 53; + SYS_EXFULL = 54; + SYS_ENOANO = 55; + SYS_EBADRQC = 56; + SYS_EBADSLT = 57; + SYS_EBFONT = 59; + SYS_ENOSTR = 60; + SYS_ENODATA = 61; + SYS_ETIME = 62; + SYS_ENOSR = 63; + SYS_ENONET = 64; + SYS_ENOPKG = 65; + SYS_EREMOTE = 66; + SYS_ENOLINK = 67; + SYS_EADV = 68; + SYS_ESRMNT = 69; + SYS_ECOMM = 70; + SYS_EPROTO = 71; + SYS_EMULTIHOP = 72; + SYS_EDOTDOT = 73; + SYS_EBADMSG = 74; + SYS_EOVERFLOW = 75; + SYS_ENOTUNIQ = 76; + SYS_EBADFD = 77; + SYS_EREMCHG = 78; + SYS_ELIBACC = 79; + SYS_ELIBBAD = 80; + SYS_ELIBSCN = 81; + SYS_ELIBMAX = 82; + SYS_ELIBEXEC = 83; + SYS_EILSEQ = 84; + SYS_ERESTART = 85; + SYS_ESTRPIPE = 86; + SYS_EUSERS = 87; + SYS_ENOTSOCK = 88; + SYS_EDESTADDRREQ = 89; + SYS_EMSGSIZE = 90; + SYS_EPROTOTYPE = 91; + SYS_ENOPROTOOPT = 92; + SYS_EPROTONOSUPPORT = 93; + SYS_ESOCKTNOSUPPORT = 94; + SYS_EOPNOTSUPP = 95; + SYS_ENOTSUP = 95; + SYS_EPFNOSUPPORT = 96; + SYS_EAFNOSUPPORT = 97; + SYS_EADDRINUSE = 98; + SYS_EADDRNOTAVAIL = 99; + SYS_ENETDOWN = 100; + SYS_ENETUNREACH = 101; + SYS_ENETRESET = 102; + SYS_ECONNABORTED = 103; + SYS_ECONNRESET = 104; + SYS_ENOBUFS = 105; + SYS_EISCONN = 106; + SYS_ENOTCONN = 107; + SYS_ESHUTDOWN = 108; + SYS_ETOOMANYREFS = 109; + SYS_ETIMEDOUT = 110; + SYS_ECONNREFUSED = 111; + SYS_EHOSTDOWN = 112; + SYS_EHOSTUNREACH = 113; + SYS_EALREADY = 114; + SYS_EINPROGRESS = 115; + SYS_ESTALE = 116; + SYS_EUCLEAN = 117; + SYS_ENOTNAM = 118; + SYS_ENAVAIL = 119; + SYS_EISNAM = 120; + SYS_EREMOTEIO = 121; + SYS_EDQUOT = 122; + SYS_ENOMEDIUM = 123; + SYS_EMEDIUMTYPE = 124; + SYS_ECANCELED = 125; + SYS_ENOKEY = 126; + SYS_EKEYEXPIRED = 127; + SYS_EKEYREVOKED = 128; + SYS_EKEYREJECTED = 129; + SYS_EOWNERDEAD = 130; + SYS_ENOTRECOVERABLE = 131; + SYS_ERFKILL = 132; + } + + optional int32 system_error = 1 [default=0]; + optional string error_detail = 2; +} + +message AddressPort { + required int32 port = 1; + optional bytes packed_address = 2; + + optional string hostname_hint = 3; +} + + + +message CreateSocketRequest { + enum SocketFamily { + IPv4 = 1; + IPv6 = 2; + } + + enum SocketProtocol { + TCP = 1; + UDP = 2; + } + + required SocketFamily family = 1; + required SocketProtocol protocol = 2; + + repeated SocketOption socket_options = 3; + + optional AddressPort proxy_external_ip = 4; + + optional int32 listen_backlog = 5 [default=0]; + + optional AddressPort remote_ip = 6; + + optional string app_id = 9; + + optional int64 project_id = 10; +} + +message CreateSocketReply { + optional string socket_descriptor = 1; + + optional AddressPort server_address = 3; + + optional AddressPort proxy_external_ip = 4; + + extensions 1000 to max; +} + + + +message BindRequest { + required string socket_descriptor = 1; + required AddressPort proxy_external_ip = 2; +} + +message BindReply { + optional AddressPort proxy_external_ip = 1; +} + + + +message GetSocketNameRequest { + required string socket_descriptor = 1; +} + +message GetSocketNameReply { + optional AddressPort proxy_external_ip = 2; +} + + + +message GetPeerNameRequest { + required string socket_descriptor = 1; +} + +message GetPeerNameReply { + optional AddressPort peer_ip = 2; +} + + +message SocketOption { + + enum SocketOptionLevel { + SOCKET_SOL_IP = 0; + SOCKET_SOL_SOCKET = 1; + SOCKET_SOL_TCP = 6; + SOCKET_SOL_UDP = 17; + } + + enum SocketOptionName { + option allow_alias = true; + + SOCKET_SO_DEBUG = 1; + SOCKET_SO_REUSEADDR = 2; + SOCKET_SO_TYPE = 3; + SOCKET_SO_ERROR = 4; + SOCKET_SO_DONTROUTE = 5; + SOCKET_SO_BROADCAST = 6; + SOCKET_SO_SNDBUF = 7; + SOCKET_SO_RCVBUF = 8; + SOCKET_SO_KEEPALIVE = 9; + SOCKET_SO_OOBINLINE = 10; + SOCKET_SO_LINGER = 13; + SOCKET_SO_RCVTIMEO = 20; + SOCKET_SO_SNDTIMEO = 21; + + SOCKET_IP_TOS = 1; + SOCKET_IP_TTL = 2; + SOCKET_IP_HDRINCL = 3; + SOCKET_IP_OPTIONS = 4; + + SOCKET_TCP_NODELAY = 1; + SOCKET_TCP_MAXSEG = 2; + SOCKET_TCP_CORK = 3; + SOCKET_TCP_KEEPIDLE = 4; + SOCKET_TCP_KEEPINTVL = 5; + SOCKET_TCP_KEEPCNT = 6; + SOCKET_TCP_SYNCNT = 7; + SOCKET_TCP_LINGER2 = 8; + SOCKET_TCP_DEFER_ACCEPT = 9; + SOCKET_TCP_WINDOW_CLAMP = 10; + SOCKET_TCP_INFO = 11; + SOCKET_TCP_QUICKACK = 12; + } + + required SocketOptionLevel level = 1; + required SocketOptionName option = 2; + required bytes value = 3; +} + + +message SetSocketOptionsRequest { + required string socket_descriptor = 1; + repeated SocketOption options = 2; +} + +message SetSocketOptionsReply { +} + +message GetSocketOptionsRequest { + required string socket_descriptor = 1; + repeated SocketOption options = 2; +} + +message GetSocketOptionsReply { + repeated SocketOption options = 2; +} + + +message ConnectRequest { + required string socket_descriptor = 1; + required AddressPort remote_ip = 2; + optional double timeout_seconds = 3 [default=-1]; +} + +message ConnectReply { + optional AddressPort proxy_external_ip = 1; + + extensions 1000 to max; +} + + +message ListenRequest { + required string socket_descriptor = 1; + required int32 backlog = 2; +} + +message ListenReply { +} + + +message AcceptRequest { + required string socket_descriptor = 1; + optional double timeout_seconds = 2 [default=-1]; +} + +message AcceptReply { + optional bytes new_socket_descriptor = 2; + optional AddressPort remote_address = 3; +} + + + +message ShutDownRequest { + enum How { + SOCKET_SHUT_RD = 1; + SOCKET_SHUT_WR = 2; + SOCKET_SHUT_RDWR = 3; + } + required string socket_descriptor = 1; + required How how = 2; + required int64 send_offset = 3; +} + +message ShutDownReply { +} + + + +message CloseRequest { + required string socket_descriptor = 1; + optional int64 send_offset = 2 [default=-1]; +} + +message CloseReply { +} + + + +message SendRequest { + required string socket_descriptor = 1; + required bytes data = 2 [ctype=CORD]; + required int64 stream_offset = 3; + optional int32 flags = 4 [default=0]; + optional AddressPort send_to = 5; + optional double timeout_seconds = 6 [default=-1]; +} + +message SendReply { + optional int32 data_sent = 1; +} + + +message ReceiveRequest { + enum Flags { + MSG_OOB = 1; + MSG_PEEK = 2; + } + required string socket_descriptor = 1; + required int32 data_size = 2; + optional int32 flags = 3 [default=0]; + optional double timeout_seconds = 5 [default=-1]; +} + +message ReceiveReply { + optional int64 stream_offset = 2; + optional bytes data = 3 [ctype=CORD]; + optional AddressPort received_from = 4; + optional int32 buffer_size = 5; +} + + + +message PollEvent { + + enum PollEventFlag { + SOCKET_POLLNONE = 0; + SOCKET_POLLIN = 1; + SOCKET_POLLPRI = 2; + SOCKET_POLLOUT = 4; + SOCKET_POLLERR = 8; + SOCKET_POLLHUP = 16; + SOCKET_POLLNVAL = 32; + SOCKET_POLLRDNORM = 64; + SOCKET_POLLRDBAND = 128; + SOCKET_POLLWRNORM = 256; + SOCKET_POLLWRBAND = 512; + SOCKET_POLLMSG = 1024; + SOCKET_POLLREMOVE = 4096; + SOCKET_POLLRDHUP = 8192; + }; + + required string socket_descriptor = 1; + required int32 requested_events = 2; + required int32 observed_events = 3; +} + +message PollRequest { + repeated PollEvent events = 1; + optional double timeout_seconds = 2 [default=-1]; +} + +message PollReply { + repeated PollEvent events = 2; +} + +message ResolveRequest { + required string name = 1; + repeated CreateSocketRequest.SocketFamily address_families = 2; +} + +message ResolveReply { + enum ErrorCode { + SOCKET_EAI_ADDRFAMILY = 1; + SOCKET_EAI_AGAIN = 2; + SOCKET_EAI_BADFLAGS = 3; + SOCKET_EAI_FAIL = 4; + SOCKET_EAI_FAMILY = 5; + SOCKET_EAI_MEMORY = 6; + SOCKET_EAI_NODATA = 7; + SOCKET_EAI_NONAME = 8; + SOCKET_EAI_SERVICE = 9; + SOCKET_EAI_SOCKTYPE = 10; + SOCKET_EAI_SYSTEM = 11; + SOCKET_EAI_BADHINTS = 12; + SOCKET_EAI_PROTOCOL = 13; + SOCKET_EAI_OVERFLOW = 14; + SOCKET_EAI_MAX = 15; + }; + + repeated bytes packed_address = 2; + optional string canonical_name = 3; + repeated string aliases = 4; +} diff --git a/vendor/google.golang.org/appengine/internal/system/system_service.pb.go b/vendor/google.golang.org/appengine/internal/system/system_service.pb.go new file mode 100644 index 0000000..56cc3f8 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/system/system_service.pb.go @@ -0,0 +1,198 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/system/system_service.proto +// DO NOT EDIT! + +/* +Package system is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/system/system_service.proto + +It has these top-level messages: + SystemServiceError + SystemStat + GetSystemStatsRequest + GetSystemStatsResponse + StartBackgroundRequestRequest + StartBackgroundRequestResponse +*/ +package system + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type SystemServiceError_ErrorCode int32 + +const ( + SystemServiceError_OK SystemServiceError_ErrorCode = 0 + SystemServiceError_INTERNAL_ERROR SystemServiceError_ErrorCode = 1 + SystemServiceError_BACKEND_REQUIRED SystemServiceError_ErrorCode = 2 + SystemServiceError_LIMIT_REACHED SystemServiceError_ErrorCode = 3 +) + +var SystemServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INTERNAL_ERROR", + 2: "BACKEND_REQUIRED", + 3: "LIMIT_REACHED", +} +var SystemServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INTERNAL_ERROR": 1, + "BACKEND_REQUIRED": 2, + "LIMIT_REACHED": 3, +} + +func (x SystemServiceError_ErrorCode) Enum() *SystemServiceError_ErrorCode { + p := new(SystemServiceError_ErrorCode) + *p = x + return p +} +func (x SystemServiceError_ErrorCode) String() string { + return proto.EnumName(SystemServiceError_ErrorCode_name, int32(x)) +} +func (x *SystemServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SystemServiceError_ErrorCode_value, data, "SystemServiceError_ErrorCode") + if err != nil { + return err + } + *x = SystemServiceError_ErrorCode(value) + return nil +} + +type SystemServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *SystemServiceError) Reset() { *m = SystemServiceError{} } +func (m *SystemServiceError) String() string { return proto.CompactTextString(m) } +func (*SystemServiceError) ProtoMessage() {} + +type SystemStat struct { + // Instaneous value of this stat. + Current *float64 `protobuf:"fixed64,1,opt,name=current" json:"current,omitempty"` + // Average over time, if this stat has an instaneous value. + Average1M *float64 `protobuf:"fixed64,3,opt,name=average1m" json:"average1m,omitempty"` + Average10M *float64 `protobuf:"fixed64,4,opt,name=average10m" json:"average10m,omitempty"` + // Total value, if the stat accumulates over time. + Total *float64 `protobuf:"fixed64,2,opt,name=total" json:"total,omitempty"` + // Rate over time, if this stat accumulates. + Rate1M *float64 `protobuf:"fixed64,5,opt,name=rate1m" json:"rate1m,omitempty"` + Rate10M *float64 `protobuf:"fixed64,6,opt,name=rate10m" json:"rate10m,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SystemStat) Reset() { *m = SystemStat{} } +func (m *SystemStat) String() string { return proto.CompactTextString(m) } +func (*SystemStat) ProtoMessage() {} + +func (m *SystemStat) GetCurrent() float64 { + if m != nil && m.Current != nil { + return *m.Current + } + return 0 +} + +func (m *SystemStat) GetAverage1M() float64 { + if m != nil && m.Average1M != nil { + return *m.Average1M + } + return 0 +} + +func (m *SystemStat) GetAverage10M() float64 { + if m != nil && m.Average10M != nil { + return *m.Average10M + } + return 0 +} + +func (m *SystemStat) GetTotal() float64 { + if m != nil && m.Total != nil { + return *m.Total + } + return 0 +} + +func (m *SystemStat) GetRate1M() float64 { + if m != nil && m.Rate1M != nil { + return *m.Rate1M + } + return 0 +} + +func (m *SystemStat) GetRate10M() float64 { + if m != nil && m.Rate10M != nil { + return *m.Rate10M + } + return 0 +} + +type GetSystemStatsRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSystemStatsRequest) Reset() { *m = GetSystemStatsRequest{} } +func (m *GetSystemStatsRequest) String() string { return proto.CompactTextString(m) } +func (*GetSystemStatsRequest) ProtoMessage() {} + +type GetSystemStatsResponse struct { + // CPU used by this instance, in mcycles. + Cpu *SystemStat `protobuf:"bytes,1,opt,name=cpu" json:"cpu,omitempty"` + // Physical memory (RAM) used by this instance, in megabytes. + Memory *SystemStat `protobuf:"bytes,2,opt,name=memory" json:"memory,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetSystemStatsResponse) Reset() { *m = GetSystemStatsResponse{} } +func (m *GetSystemStatsResponse) String() string { return proto.CompactTextString(m) } +func (*GetSystemStatsResponse) ProtoMessage() {} + +func (m *GetSystemStatsResponse) GetCpu() *SystemStat { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *GetSystemStatsResponse) GetMemory() *SystemStat { + if m != nil { + return m.Memory + } + return nil +} + +type StartBackgroundRequestRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartBackgroundRequestRequest) Reset() { *m = StartBackgroundRequestRequest{} } +func (m *StartBackgroundRequestRequest) String() string { return proto.CompactTextString(m) } +func (*StartBackgroundRequestRequest) ProtoMessage() {} + +type StartBackgroundRequestResponse struct { + // Every /_ah/background request will have an X-AppEngine-BackgroundRequest + // header, whose value will be equal to this parameter, the request_id. + RequestId *string `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartBackgroundRequestResponse) Reset() { *m = StartBackgroundRequestResponse{} } +func (m *StartBackgroundRequestResponse) String() string { return proto.CompactTextString(m) } +func (*StartBackgroundRequestResponse) ProtoMessage() {} + +func (m *StartBackgroundRequestResponse) GetRequestId() string { + if m != nil && m.RequestId != nil { + return *m.RequestId + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/system/system_service.proto b/vendor/google.golang.org/appengine/internal/system/system_service.proto new file mode 100644 index 0000000..32c0bf8 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/system/system_service.proto @@ -0,0 +1,49 @@ +syntax = "proto2"; +option go_package = "system"; + +package appengine; + +message SystemServiceError { + enum ErrorCode { + OK = 0; + INTERNAL_ERROR = 1; + BACKEND_REQUIRED = 2; + LIMIT_REACHED = 3; + } +} + +message SystemStat { + // Instaneous value of this stat. + optional double current = 1; + + // Average over time, if this stat has an instaneous value. + optional double average1m = 3; + optional double average10m = 4; + + // Total value, if the stat accumulates over time. + optional double total = 2; + + // Rate over time, if this stat accumulates. + optional double rate1m = 5; + optional double rate10m = 6; +} + +message GetSystemStatsRequest { +} + +message GetSystemStatsResponse { + // CPU used by this instance, in mcycles. + optional SystemStat cpu = 1; + + // Physical memory (RAM) used by this instance, in megabytes. + optional SystemStat memory = 2; +} + +message StartBackgroundRequestRequest { +} + +message StartBackgroundRequestResponse { + // Every /_ah/background request will have an X-AppEngine-BackgroundRequest + // header, whose value will be equal to this parameter, the request_id. + optional string request_id = 1; +} diff --git a/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go new file mode 100644 index 0000000..c3d428e --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go @@ -0,0 +1,1888 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto +// DO NOT EDIT! + +/* +Package taskqueue is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto + +It has these top-level messages: + TaskQueueServiceError + TaskPayload + TaskQueueRetryParameters + TaskQueueAcl + TaskQueueHttpHeader + TaskQueueMode + TaskQueueAddRequest + TaskQueueAddResponse + TaskQueueBulkAddRequest + TaskQueueBulkAddResponse + TaskQueueDeleteRequest + TaskQueueDeleteResponse + TaskQueueForceRunRequest + TaskQueueForceRunResponse + TaskQueueUpdateQueueRequest + TaskQueueUpdateQueueResponse + TaskQueueFetchQueuesRequest + TaskQueueFetchQueuesResponse + TaskQueueFetchQueueStatsRequest + TaskQueueScannerQueueInfo + TaskQueueFetchQueueStatsResponse + TaskQueuePauseQueueRequest + TaskQueuePauseQueueResponse + TaskQueuePurgeQueueRequest + TaskQueuePurgeQueueResponse + TaskQueueDeleteQueueRequest + TaskQueueDeleteQueueResponse + TaskQueueDeleteGroupRequest + TaskQueueDeleteGroupResponse + TaskQueueQueryTasksRequest + TaskQueueQueryTasksResponse + TaskQueueFetchTaskRequest + TaskQueueFetchTaskResponse + TaskQueueUpdateStorageLimitRequest + TaskQueueUpdateStorageLimitResponse + TaskQueueQueryAndOwnTasksRequest + TaskQueueQueryAndOwnTasksResponse + TaskQueueModifyTaskLeaseRequest + TaskQueueModifyTaskLeaseResponse +*/ +package taskqueue + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import appengine "google.golang.org/appengine/internal/datastore" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type TaskQueueServiceError_ErrorCode int32 + +const ( + TaskQueueServiceError_OK TaskQueueServiceError_ErrorCode = 0 + TaskQueueServiceError_UNKNOWN_QUEUE TaskQueueServiceError_ErrorCode = 1 + TaskQueueServiceError_TRANSIENT_ERROR TaskQueueServiceError_ErrorCode = 2 + TaskQueueServiceError_INTERNAL_ERROR TaskQueueServiceError_ErrorCode = 3 + TaskQueueServiceError_TASK_TOO_LARGE TaskQueueServiceError_ErrorCode = 4 + TaskQueueServiceError_INVALID_TASK_NAME TaskQueueServiceError_ErrorCode = 5 + TaskQueueServiceError_INVALID_QUEUE_NAME TaskQueueServiceError_ErrorCode = 6 + TaskQueueServiceError_INVALID_URL TaskQueueServiceError_ErrorCode = 7 + TaskQueueServiceError_INVALID_QUEUE_RATE TaskQueueServiceError_ErrorCode = 8 + TaskQueueServiceError_PERMISSION_DENIED TaskQueueServiceError_ErrorCode = 9 + TaskQueueServiceError_TASK_ALREADY_EXISTS TaskQueueServiceError_ErrorCode = 10 + TaskQueueServiceError_TOMBSTONED_TASK TaskQueueServiceError_ErrorCode = 11 + TaskQueueServiceError_INVALID_ETA TaskQueueServiceError_ErrorCode = 12 + TaskQueueServiceError_INVALID_REQUEST TaskQueueServiceError_ErrorCode = 13 + TaskQueueServiceError_UNKNOWN_TASK TaskQueueServiceError_ErrorCode = 14 + TaskQueueServiceError_TOMBSTONED_QUEUE TaskQueueServiceError_ErrorCode = 15 + TaskQueueServiceError_DUPLICATE_TASK_NAME TaskQueueServiceError_ErrorCode = 16 + TaskQueueServiceError_SKIPPED TaskQueueServiceError_ErrorCode = 17 + TaskQueueServiceError_TOO_MANY_TASKS TaskQueueServiceError_ErrorCode = 18 + TaskQueueServiceError_INVALID_PAYLOAD TaskQueueServiceError_ErrorCode = 19 + TaskQueueServiceError_INVALID_RETRY_PARAMETERS TaskQueueServiceError_ErrorCode = 20 + TaskQueueServiceError_INVALID_QUEUE_MODE TaskQueueServiceError_ErrorCode = 21 + TaskQueueServiceError_ACL_LOOKUP_ERROR TaskQueueServiceError_ErrorCode = 22 + TaskQueueServiceError_TRANSACTIONAL_REQUEST_TOO_LARGE TaskQueueServiceError_ErrorCode = 23 + TaskQueueServiceError_INCORRECT_CREATOR_NAME TaskQueueServiceError_ErrorCode = 24 + TaskQueueServiceError_TASK_LEASE_EXPIRED TaskQueueServiceError_ErrorCode = 25 + TaskQueueServiceError_QUEUE_PAUSED TaskQueueServiceError_ErrorCode = 26 + TaskQueueServiceError_INVALID_TAG TaskQueueServiceError_ErrorCode = 27 + // Reserved range for the Datastore error codes. + // Original Datastore error code is shifted by DATASTORE_ERROR offset. + TaskQueueServiceError_DATASTORE_ERROR TaskQueueServiceError_ErrorCode = 10000 +) + +var TaskQueueServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "UNKNOWN_QUEUE", + 2: "TRANSIENT_ERROR", + 3: "INTERNAL_ERROR", + 4: "TASK_TOO_LARGE", + 5: "INVALID_TASK_NAME", + 6: "INVALID_QUEUE_NAME", + 7: "INVALID_URL", + 8: "INVALID_QUEUE_RATE", + 9: "PERMISSION_DENIED", + 10: "TASK_ALREADY_EXISTS", + 11: "TOMBSTONED_TASK", + 12: "INVALID_ETA", + 13: "INVALID_REQUEST", + 14: "UNKNOWN_TASK", + 15: "TOMBSTONED_QUEUE", + 16: "DUPLICATE_TASK_NAME", + 17: "SKIPPED", + 18: "TOO_MANY_TASKS", + 19: "INVALID_PAYLOAD", + 20: "INVALID_RETRY_PARAMETERS", + 21: "INVALID_QUEUE_MODE", + 22: "ACL_LOOKUP_ERROR", + 23: "TRANSACTIONAL_REQUEST_TOO_LARGE", + 24: "INCORRECT_CREATOR_NAME", + 25: "TASK_LEASE_EXPIRED", + 26: "QUEUE_PAUSED", + 27: "INVALID_TAG", + 10000: "DATASTORE_ERROR", +} +var TaskQueueServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "UNKNOWN_QUEUE": 1, + "TRANSIENT_ERROR": 2, + "INTERNAL_ERROR": 3, + "TASK_TOO_LARGE": 4, + "INVALID_TASK_NAME": 5, + "INVALID_QUEUE_NAME": 6, + "INVALID_URL": 7, + "INVALID_QUEUE_RATE": 8, + "PERMISSION_DENIED": 9, + "TASK_ALREADY_EXISTS": 10, + "TOMBSTONED_TASK": 11, + "INVALID_ETA": 12, + "INVALID_REQUEST": 13, + "UNKNOWN_TASK": 14, + "TOMBSTONED_QUEUE": 15, + "DUPLICATE_TASK_NAME": 16, + "SKIPPED": 17, + "TOO_MANY_TASKS": 18, + "INVALID_PAYLOAD": 19, + "INVALID_RETRY_PARAMETERS": 20, + "INVALID_QUEUE_MODE": 21, + "ACL_LOOKUP_ERROR": 22, + "TRANSACTIONAL_REQUEST_TOO_LARGE": 23, + "INCORRECT_CREATOR_NAME": 24, + "TASK_LEASE_EXPIRED": 25, + "QUEUE_PAUSED": 26, + "INVALID_TAG": 27, + "DATASTORE_ERROR": 10000, +} + +func (x TaskQueueServiceError_ErrorCode) Enum() *TaskQueueServiceError_ErrorCode { + p := new(TaskQueueServiceError_ErrorCode) + *p = x + return p +} +func (x TaskQueueServiceError_ErrorCode) String() string { + return proto.EnumName(TaskQueueServiceError_ErrorCode_name, int32(x)) +} +func (x *TaskQueueServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TaskQueueServiceError_ErrorCode_value, data, "TaskQueueServiceError_ErrorCode") + if err != nil { + return err + } + *x = TaskQueueServiceError_ErrorCode(value) + return nil +} + +type TaskQueueMode_Mode int32 + +const ( + TaskQueueMode_PUSH TaskQueueMode_Mode = 0 + TaskQueueMode_PULL TaskQueueMode_Mode = 1 +) + +var TaskQueueMode_Mode_name = map[int32]string{ + 0: "PUSH", + 1: "PULL", +} +var TaskQueueMode_Mode_value = map[string]int32{ + "PUSH": 0, + "PULL": 1, +} + +func (x TaskQueueMode_Mode) Enum() *TaskQueueMode_Mode { + p := new(TaskQueueMode_Mode) + *p = x + return p +} +func (x TaskQueueMode_Mode) String() string { + return proto.EnumName(TaskQueueMode_Mode_name, int32(x)) +} +func (x *TaskQueueMode_Mode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TaskQueueMode_Mode_value, data, "TaskQueueMode_Mode") + if err != nil { + return err + } + *x = TaskQueueMode_Mode(value) + return nil +} + +type TaskQueueAddRequest_RequestMethod int32 + +const ( + TaskQueueAddRequest_GET TaskQueueAddRequest_RequestMethod = 1 + TaskQueueAddRequest_POST TaskQueueAddRequest_RequestMethod = 2 + TaskQueueAddRequest_HEAD TaskQueueAddRequest_RequestMethod = 3 + TaskQueueAddRequest_PUT TaskQueueAddRequest_RequestMethod = 4 + TaskQueueAddRequest_DELETE TaskQueueAddRequest_RequestMethod = 5 +) + +var TaskQueueAddRequest_RequestMethod_name = map[int32]string{ + 1: "GET", + 2: "POST", + 3: "HEAD", + 4: "PUT", + 5: "DELETE", +} +var TaskQueueAddRequest_RequestMethod_value = map[string]int32{ + "GET": 1, + "POST": 2, + "HEAD": 3, + "PUT": 4, + "DELETE": 5, +} + +func (x TaskQueueAddRequest_RequestMethod) Enum() *TaskQueueAddRequest_RequestMethod { + p := new(TaskQueueAddRequest_RequestMethod) + *p = x + return p +} +func (x TaskQueueAddRequest_RequestMethod) String() string { + return proto.EnumName(TaskQueueAddRequest_RequestMethod_name, int32(x)) +} +func (x *TaskQueueAddRequest_RequestMethod) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TaskQueueAddRequest_RequestMethod_value, data, "TaskQueueAddRequest_RequestMethod") + if err != nil { + return err + } + *x = TaskQueueAddRequest_RequestMethod(value) + return nil +} + +type TaskQueueQueryTasksResponse_Task_RequestMethod int32 + +const ( + TaskQueueQueryTasksResponse_Task_GET TaskQueueQueryTasksResponse_Task_RequestMethod = 1 + TaskQueueQueryTasksResponse_Task_POST TaskQueueQueryTasksResponse_Task_RequestMethod = 2 + TaskQueueQueryTasksResponse_Task_HEAD TaskQueueQueryTasksResponse_Task_RequestMethod = 3 + TaskQueueQueryTasksResponse_Task_PUT TaskQueueQueryTasksResponse_Task_RequestMethod = 4 + TaskQueueQueryTasksResponse_Task_DELETE TaskQueueQueryTasksResponse_Task_RequestMethod = 5 +) + +var TaskQueueQueryTasksResponse_Task_RequestMethod_name = map[int32]string{ + 1: "GET", + 2: "POST", + 3: "HEAD", + 4: "PUT", + 5: "DELETE", +} +var TaskQueueQueryTasksResponse_Task_RequestMethod_value = map[string]int32{ + "GET": 1, + "POST": 2, + "HEAD": 3, + "PUT": 4, + "DELETE": 5, +} + +func (x TaskQueueQueryTasksResponse_Task_RequestMethod) Enum() *TaskQueueQueryTasksResponse_Task_RequestMethod { + p := new(TaskQueueQueryTasksResponse_Task_RequestMethod) + *p = x + return p +} +func (x TaskQueueQueryTasksResponse_Task_RequestMethod) String() string { + return proto.EnumName(TaskQueueQueryTasksResponse_Task_RequestMethod_name, int32(x)) +} +func (x *TaskQueueQueryTasksResponse_Task_RequestMethod) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TaskQueueQueryTasksResponse_Task_RequestMethod_value, data, "TaskQueueQueryTasksResponse_Task_RequestMethod") + if err != nil { + return err + } + *x = TaskQueueQueryTasksResponse_Task_RequestMethod(value) + return nil +} + +type TaskQueueServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueServiceError) Reset() { *m = TaskQueueServiceError{} } +func (m *TaskQueueServiceError) String() string { return proto.CompactTextString(m) } +func (*TaskQueueServiceError) ProtoMessage() {} + +type TaskPayload struct { + XXX_extensions map[int32]proto.Extension `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskPayload) Reset() { *m = TaskPayload{} } +func (m *TaskPayload) String() string { return proto.CompactTextString(m) } +func (*TaskPayload) ProtoMessage() {} + +func (m *TaskPayload) Marshal() ([]byte, error) { + return proto.MarshalMessageSet(m.ExtensionMap()) +} +func (m *TaskPayload) Unmarshal(buf []byte) error { + return proto.UnmarshalMessageSet(buf, m.ExtensionMap()) +} +func (m *TaskPayload) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(m.XXX_extensions) +} +func (m *TaskPayload) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions) +} + +// ensure TaskPayload satisfies proto.Marshaler and proto.Unmarshaler +var _ proto.Marshaler = (*TaskPayload)(nil) +var _ proto.Unmarshaler = (*TaskPayload)(nil) + +var extRange_TaskPayload = []proto.ExtensionRange{ + {10, 2147483646}, +} + +func (*TaskPayload) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_TaskPayload +} +func (m *TaskPayload) ExtensionMap() map[int32]proto.Extension { + if m.XXX_extensions == nil { + m.XXX_extensions = make(map[int32]proto.Extension) + } + return m.XXX_extensions +} + +type TaskQueueRetryParameters struct { + RetryLimit *int32 `protobuf:"varint,1,opt,name=retry_limit" json:"retry_limit,omitempty"` + AgeLimitSec *int64 `protobuf:"varint,2,opt,name=age_limit_sec" json:"age_limit_sec,omitempty"` + MinBackoffSec *float64 `protobuf:"fixed64,3,opt,name=min_backoff_sec,def=0.1" json:"min_backoff_sec,omitempty"` + MaxBackoffSec *float64 `protobuf:"fixed64,4,opt,name=max_backoff_sec,def=3600" json:"max_backoff_sec,omitempty"` + MaxDoublings *int32 `protobuf:"varint,5,opt,name=max_doublings,def=16" json:"max_doublings,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueRetryParameters) Reset() { *m = TaskQueueRetryParameters{} } +func (m *TaskQueueRetryParameters) String() string { return proto.CompactTextString(m) } +func (*TaskQueueRetryParameters) ProtoMessage() {} + +const Default_TaskQueueRetryParameters_MinBackoffSec float64 = 0.1 +const Default_TaskQueueRetryParameters_MaxBackoffSec float64 = 3600 +const Default_TaskQueueRetryParameters_MaxDoublings int32 = 16 + +func (m *TaskQueueRetryParameters) GetRetryLimit() int32 { + if m != nil && m.RetryLimit != nil { + return *m.RetryLimit + } + return 0 +} + +func (m *TaskQueueRetryParameters) GetAgeLimitSec() int64 { + if m != nil && m.AgeLimitSec != nil { + return *m.AgeLimitSec + } + return 0 +} + +func (m *TaskQueueRetryParameters) GetMinBackoffSec() float64 { + if m != nil && m.MinBackoffSec != nil { + return *m.MinBackoffSec + } + return Default_TaskQueueRetryParameters_MinBackoffSec +} + +func (m *TaskQueueRetryParameters) GetMaxBackoffSec() float64 { + if m != nil && m.MaxBackoffSec != nil { + return *m.MaxBackoffSec + } + return Default_TaskQueueRetryParameters_MaxBackoffSec +} + +func (m *TaskQueueRetryParameters) GetMaxDoublings() int32 { + if m != nil && m.MaxDoublings != nil { + return *m.MaxDoublings + } + return Default_TaskQueueRetryParameters_MaxDoublings +} + +type TaskQueueAcl struct { + UserEmail [][]byte `protobuf:"bytes,1,rep,name=user_email" json:"user_email,omitempty"` + WriterEmail [][]byte `protobuf:"bytes,2,rep,name=writer_email" json:"writer_email,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueAcl) Reset() { *m = TaskQueueAcl{} } +func (m *TaskQueueAcl) String() string { return proto.CompactTextString(m) } +func (*TaskQueueAcl) ProtoMessage() {} + +func (m *TaskQueueAcl) GetUserEmail() [][]byte { + if m != nil { + return m.UserEmail + } + return nil +} + +func (m *TaskQueueAcl) GetWriterEmail() [][]byte { + if m != nil { + return m.WriterEmail + } + return nil +} + +type TaskQueueHttpHeader struct { + Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueHttpHeader) Reset() { *m = TaskQueueHttpHeader{} } +func (m *TaskQueueHttpHeader) String() string { return proto.CompactTextString(m) } +func (*TaskQueueHttpHeader) ProtoMessage() {} + +func (m *TaskQueueHttpHeader) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *TaskQueueHttpHeader) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type TaskQueueMode struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueMode) Reset() { *m = TaskQueueMode{} } +func (m *TaskQueueMode) String() string { return proto.CompactTextString(m) } +func (*TaskQueueMode) ProtoMessage() {} + +type TaskQueueAddRequest struct { + QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"` + TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"` + EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"` + Method *TaskQueueAddRequest_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueAddRequest_RequestMethod,def=2" json:"method,omitempty"` + Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"` + Header []*TaskQueueAddRequest_Header `protobuf:"group,6,rep,name=Header" json:"header,omitempty"` + Body []byte `protobuf:"bytes,9,opt,name=body" json:"body,omitempty"` + Transaction *appengine.Transaction `protobuf:"bytes,10,opt,name=transaction" json:"transaction,omitempty"` + AppId []byte `protobuf:"bytes,11,opt,name=app_id" json:"app_id,omitempty"` + Crontimetable *TaskQueueAddRequest_CronTimetable `protobuf:"group,12,opt,name=CronTimetable" json:"crontimetable,omitempty"` + Description []byte `protobuf:"bytes,15,opt,name=description" json:"description,omitempty"` + Payload *TaskPayload `protobuf:"bytes,16,opt,name=payload" json:"payload,omitempty"` + RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,17,opt,name=retry_parameters" json:"retry_parameters,omitempty"` + Mode *TaskQueueMode_Mode `protobuf:"varint,18,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"` + Tag []byte `protobuf:"bytes,19,opt,name=tag" json:"tag,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueAddRequest) Reset() { *m = TaskQueueAddRequest{} } +func (m *TaskQueueAddRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueAddRequest) ProtoMessage() {} + +const Default_TaskQueueAddRequest_Method TaskQueueAddRequest_RequestMethod = TaskQueueAddRequest_POST +const Default_TaskQueueAddRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH + +func (m *TaskQueueAddRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueAddRequest) GetTaskName() []byte { + if m != nil { + return m.TaskName + } + return nil +} + +func (m *TaskQueueAddRequest) GetEtaUsec() int64 { + if m != nil && m.EtaUsec != nil { + return *m.EtaUsec + } + return 0 +} + +func (m *TaskQueueAddRequest) GetMethod() TaskQueueAddRequest_RequestMethod { + if m != nil && m.Method != nil { + return *m.Method + } + return Default_TaskQueueAddRequest_Method +} + +func (m *TaskQueueAddRequest) GetUrl() []byte { + if m != nil { + return m.Url + } + return nil +} + +func (m *TaskQueueAddRequest) GetHeader() []*TaskQueueAddRequest_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *TaskQueueAddRequest) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +func (m *TaskQueueAddRequest) GetTransaction() *appengine.Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *TaskQueueAddRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueAddRequest) GetCrontimetable() *TaskQueueAddRequest_CronTimetable { + if m != nil { + return m.Crontimetable + } + return nil +} + +func (m *TaskQueueAddRequest) GetDescription() []byte { + if m != nil { + return m.Description + } + return nil +} + +func (m *TaskQueueAddRequest) GetPayload() *TaskPayload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *TaskQueueAddRequest) GetRetryParameters() *TaskQueueRetryParameters { + if m != nil { + return m.RetryParameters + } + return nil +} + +func (m *TaskQueueAddRequest) GetMode() TaskQueueMode_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_TaskQueueAddRequest_Mode +} + +func (m *TaskQueueAddRequest) GetTag() []byte { + if m != nil { + return m.Tag + } + return nil +} + +type TaskQueueAddRequest_Header struct { + Key []byte `protobuf:"bytes,7,req,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,8,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueAddRequest_Header) Reset() { *m = TaskQueueAddRequest_Header{} } +func (m *TaskQueueAddRequest_Header) String() string { return proto.CompactTextString(m) } +func (*TaskQueueAddRequest_Header) ProtoMessage() {} + +func (m *TaskQueueAddRequest_Header) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *TaskQueueAddRequest_Header) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type TaskQueueAddRequest_CronTimetable struct { + Schedule []byte `protobuf:"bytes,13,req,name=schedule" json:"schedule,omitempty"` + Timezone []byte `protobuf:"bytes,14,req,name=timezone" json:"timezone,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueAddRequest_CronTimetable) Reset() { *m = TaskQueueAddRequest_CronTimetable{} } +func (m *TaskQueueAddRequest_CronTimetable) String() string { return proto.CompactTextString(m) } +func (*TaskQueueAddRequest_CronTimetable) ProtoMessage() {} + +func (m *TaskQueueAddRequest_CronTimetable) GetSchedule() []byte { + if m != nil { + return m.Schedule + } + return nil +} + +func (m *TaskQueueAddRequest_CronTimetable) GetTimezone() []byte { + if m != nil { + return m.Timezone + } + return nil +} + +type TaskQueueAddResponse struct { + ChosenTaskName []byte `protobuf:"bytes,1,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueAddResponse) Reset() { *m = TaskQueueAddResponse{} } +func (m *TaskQueueAddResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueAddResponse) ProtoMessage() {} + +func (m *TaskQueueAddResponse) GetChosenTaskName() []byte { + if m != nil { + return m.ChosenTaskName + } + return nil +} + +type TaskQueueBulkAddRequest struct { + AddRequest []*TaskQueueAddRequest `protobuf:"bytes,1,rep,name=add_request" json:"add_request,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueBulkAddRequest) Reset() { *m = TaskQueueBulkAddRequest{} } +func (m *TaskQueueBulkAddRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueBulkAddRequest) ProtoMessage() {} + +func (m *TaskQueueBulkAddRequest) GetAddRequest() []*TaskQueueAddRequest { + if m != nil { + return m.AddRequest + } + return nil +} + +type TaskQueueBulkAddResponse struct { + Taskresult []*TaskQueueBulkAddResponse_TaskResult `protobuf:"group,1,rep,name=TaskResult" json:"taskresult,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueBulkAddResponse) Reset() { *m = TaskQueueBulkAddResponse{} } +func (m *TaskQueueBulkAddResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueBulkAddResponse) ProtoMessage() {} + +func (m *TaskQueueBulkAddResponse) GetTaskresult() []*TaskQueueBulkAddResponse_TaskResult { + if m != nil { + return m.Taskresult + } + return nil +} + +type TaskQueueBulkAddResponse_TaskResult struct { + Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,2,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"` + ChosenTaskName []byte `protobuf:"bytes,3,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueBulkAddResponse_TaskResult) Reset() { *m = TaskQueueBulkAddResponse_TaskResult{} } +func (m *TaskQueueBulkAddResponse_TaskResult) String() string { return proto.CompactTextString(m) } +func (*TaskQueueBulkAddResponse_TaskResult) ProtoMessage() {} + +func (m *TaskQueueBulkAddResponse_TaskResult) GetResult() TaskQueueServiceError_ErrorCode { + if m != nil && m.Result != nil { + return *m.Result + } + return TaskQueueServiceError_OK +} + +func (m *TaskQueueBulkAddResponse_TaskResult) GetChosenTaskName() []byte { + if m != nil { + return m.ChosenTaskName + } + return nil +} + +type TaskQueueDeleteRequest struct { + QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"` + TaskName [][]byte `protobuf:"bytes,2,rep,name=task_name" json:"task_name,omitempty"` + AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueDeleteRequest) Reset() { *m = TaskQueueDeleteRequest{} } +func (m *TaskQueueDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueDeleteRequest) ProtoMessage() {} + +func (m *TaskQueueDeleteRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueDeleteRequest) GetTaskName() [][]byte { + if m != nil { + return m.TaskName + } + return nil +} + +func (m *TaskQueueDeleteRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type TaskQueueDeleteResponse struct { + Result []TaskQueueServiceError_ErrorCode `protobuf:"varint,3,rep,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueDeleteResponse) Reset() { *m = TaskQueueDeleteResponse{} } +func (m *TaskQueueDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueDeleteResponse) ProtoMessage() {} + +func (m *TaskQueueDeleteResponse) GetResult() []TaskQueueServiceError_ErrorCode { + if m != nil { + return m.Result + } + return nil +} + +type TaskQueueForceRunRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueForceRunRequest) Reset() { *m = TaskQueueForceRunRequest{} } +func (m *TaskQueueForceRunRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueForceRunRequest) ProtoMessage() {} + +func (m *TaskQueueForceRunRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueForceRunRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueForceRunRequest) GetTaskName() []byte { + if m != nil { + return m.TaskName + } + return nil +} + +type TaskQueueForceRunResponse struct { + Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,3,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueForceRunResponse) Reset() { *m = TaskQueueForceRunResponse{} } +func (m *TaskQueueForceRunResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueForceRunResponse) ProtoMessage() {} + +func (m *TaskQueueForceRunResponse) GetResult() TaskQueueServiceError_ErrorCode { + if m != nil && m.Result != nil { + return *m.Result + } + return TaskQueueServiceError_OK +} + +type TaskQueueUpdateQueueRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"` + BucketCapacity *int32 `protobuf:"varint,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"` + UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"` + RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,6,opt,name=retry_parameters" json:"retry_parameters,omitempty"` + MaxConcurrentRequests *int32 `protobuf:"varint,7,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"` + Mode *TaskQueueMode_Mode `protobuf:"varint,8,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"` + Acl *TaskQueueAcl `protobuf:"bytes,9,opt,name=acl" json:"acl,omitempty"` + HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,10,rep,name=header_override" json:"header_override,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueUpdateQueueRequest) Reset() { *m = TaskQueueUpdateQueueRequest{} } +func (m *TaskQueueUpdateQueueRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueUpdateQueueRequest) ProtoMessage() {} + +const Default_TaskQueueUpdateQueueRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH + +func (m *TaskQueueUpdateQueueRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueUpdateQueueRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueUpdateQueueRequest) GetBucketRefillPerSecond() float64 { + if m != nil && m.BucketRefillPerSecond != nil { + return *m.BucketRefillPerSecond + } + return 0 +} + +func (m *TaskQueueUpdateQueueRequest) GetBucketCapacity() int32 { + if m != nil && m.BucketCapacity != nil { + return *m.BucketCapacity + } + return 0 +} + +func (m *TaskQueueUpdateQueueRequest) GetUserSpecifiedRate() string { + if m != nil && m.UserSpecifiedRate != nil { + return *m.UserSpecifiedRate + } + return "" +} + +func (m *TaskQueueUpdateQueueRequest) GetRetryParameters() *TaskQueueRetryParameters { + if m != nil { + return m.RetryParameters + } + return nil +} + +func (m *TaskQueueUpdateQueueRequest) GetMaxConcurrentRequests() int32 { + if m != nil && m.MaxConcurrentRequests != nil { + return *m.MaxConcurrentRequests + } + return 0 +} + +func (m *TaskQueueUpdateQueueRequest) GetMode() TaskQueueMode_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_TaskQueueUpdateQueueRequest_Mode +} + +func (m *TaskQueueUpdateQueueRequest) GetAcl() *TaskQueueAcl { + if m != nil { + return m.Acl + } + return nil +} + +func (m *TaskQueueUpdateQueueRequest) GetHeaderOverride() []*TaskQueueHttpHeader { + if m != nil { + return m.HeaderOverride + } + return nil +} + +type TaskQueueUpdateQueueResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueUpdateQueueResponse) Reset() { *m = TaskQueueUpdateQueueResponse{} } +func (m *TaskQueueUpdateQueueResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueUpdateQueueResponse) ProtoMessage() {} + +type TaskQueueFetchQueuesRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + MaxRows *int32 `protobuf:"varint,2,req,name=max_rows" json:"max_rows,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchQueuesRequest) Reset() { *m = TaskQueueFetchQueuesRequest{} } +func (m *TaskQueueFetchQueuesRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchQueuesRequest) ProtoMessage() {} + +func (m *TaskQueueFetchQueuesRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueFetchQueuesRequest) GetMaxRows() int32 { + if m != nil && m.MaxRows != nil { + return *m.MaxRows + } + return 0 +} + +type TaskQueueFetchQueuesResponse struct { + Queue []*TaskQueueFetchQueuesResponse_Queue `protobuf:"group,1,rep,name=Queue" json:"queue,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchQueuesResponse) Reset() { *m = TaskQueueFetchQueuesResponse{} } +func (m *TaskQueueFetchQueuesResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchQueuesResponse) ProtoMessage() {} + +func (m *TaskQueueFetchQueuesResponse) GetQueue() []*TaskQueueFetchQueuesResponse_Queue { + if m != nil { + return m.Queue + } + return nil +} + +type TaskQueueFetchQueuesResponse_Queue struct { + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"` + BucketCapacity *float64 `protobuf:"fixed64,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"` + UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"` + Paused *bool `protobuf:"varint,6,req,name=paused,def=0" json:"paused,omitempty"` + RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,7,opt,name=retry_parameters" json:"retry_parameters,omitempty"` + MaxConcurrentRequests *int32 `protobuf:"varint,8,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"` + Mode *TaskQueueMode_Mode `protobuf:"varint,9,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"` + Acl *TaskQueueAcl `protobuf:"bytes,10,opt,name=acl" json:"acl,omitempty"` + HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,11,rep,name=header_override" json:"header_override,omitempty"` + CreatorName *string `protobuf:"bytes,12,opt,name=creator_name,def=apphosting" json:"creator_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchQueuesResponse_Queue) Reset() { *m = TaskQueueFetchQueuesResponse_Queue{} } +func (m *TaskQueueFetchQueuesResponse_Queue) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchQueuesResponse_Queue) ProtoMessage() {} + +const Default_TaskQueueFetchQueuesResponse_Queue_Paused bool = false +const Default_TaskQueueFetchQueuesResponse_Queue_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH +const Default_TaskQueueFetchQueuesResponse_Queue_CreatorName string = "apphosting" + +func (m *TaskQueueFetchQueuesResponse_Queue) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketRefillPerSecond() float64 { + if m != nil && m.BucketRefillPerSecond != nil { + return *m.BucketRefillPerSecond + } + return 0 +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketCapacity() float64 { + if m != nil && m.BucketCapacity != nil { + return *m.BucketCapacity + } + return 0 +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetUserSpecifiedRate() string { + if m != nil && m.UserSpecifiedRate != nil { + return *m.UserSpecifiedRate + } + return "" +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetPaused() bool { + if m != nil && m.Paused != nil { + return *m.Paused + } + return Default_TaskQueueFetchQueuesResponse_Queue_Paused +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetRetryParameters() *TaskQueueRetryParameters { + if m != nil { + return m.RetryParameters + } + return nil +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetMaxConcurrentRequests() int32 { + if m != nil && m.MaxConcurrentRequests != nil { + return *m.MaxConcurrentRequests + } + return 0 +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetMode() TaskQueueMode_Mode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_TaskQueueFetchQueuesResponse_Queue_Mode +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetAcl() *TaskQueueAcl { + if m != nil { + return m.Acl + } + return nil +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetHeaderOverride() []*TaskQueueHttpHeader { + if m != nil { + return m.HeaderOverride + } + return nil +} + +func (m *TaskQueueFetchQueuesResponse_Queue) GetCreatorName() string { + if m != nil && m.CreatorName != nil { + return *m.CreatorName + } + return Default_TaskQueueFetchQueuesResponse_Queue_CreatorName +} + +type TaskQueueFetchQueueStatsRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + QueueName [][]byte `protobuf:"bytes,2,rep,name=queue_name" json:"queue_name,omitempty"` + MaxNumTasks *int32 `protobuf:"varint,3,opt,name=max_num_tasks,def=0" json:"max_num_tasks,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchQueueStatsRequest) Reset() { *m = TaskQueueFetchQueueStatsRequest{} } +func (m *TaskQueueFetchQueueStatsRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchQueueStatsRequest) ProtoMessage() {} + +const Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks int32 = 0 + +func (m *TaskQueueFetchQueueStatsRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueFetchQueueStatsRequest) GetQueueName() [][]byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueFetchQueueStatsRequest) GetMaxNumTasks() int32 { + if m != nil && m.MaxNumTasks != nil { + return *m.MaxNumTasks + } + return Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks +} + +type TaskQueueScannerQueueInfo struct { + ExecutedLastMinute *int64 `protobuf:"varint,1,req,name=executed_last_minute" json:"executed_last_minute,omitempty"` + ExecutedLastHour *int64 `protobuf:"varint,2,req,name=executed_last_hour" json:"executed_last_hour,omitempty"` + SamplingDurationSeconds *float64 `protobuf:"fixed64,3,req,name=sampling_duration_seconds" json:"sampling_duration_seconds,omitempty"` + RequestsInFlight *int32 `protobuf:"varint,4,opt,name=requests_in_flight" json:"requests_in_flight,omitempty"` + EnforcedRate *float64 `protobuf:"fixed64,5,opt,name=enforced_rate" json:"enforced_rate,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueScannerQueueInfo) Reset() { *m = TaskQueueScannerQueueInfo{} } +func (m *TaskQueueScannerQueueInfo) String() string { return proto.CompactTextString(m) } +func (*TaskQueueScannerQueueInfo) ProtoMessage() {} + +func (m *TaskQueueScannerQueueInfo) GetExecutedLastMinute() int64 { + if m != nil && m.ExecutedLastMinute != nil { + return *m.ExecutedLastMinute + } + return 0 +} + +func (m *TaskQueueScannerQueueInfo) GetExecutedLastHour() int64 { + if m != nil && m.ExecutedLastHour != nil { + return *m.ExecutedLastHour + } + return 0 +} + +func (m *TaskQueueScannerQueueInfo) GetSamplingDurationSeconds() float64 { + if m != nil && m.SamplingDurationSeconds != nil { + return *m.SamplingDurationSeconds + } + return 0 +} + +func (m *TaskQueueScannerQueueInfo) GetRequestsInFlight() int32 { + if m != nil && m.RequestsInFlight != nil { + return *m.RequestsInFlight + } + return 0 +} + +func (m *TaskQueueScannerQueueInfo) GetEnforcedRate() float64 { + if m != nil && m.EnforcedRate != nil { + return *m.EnforcedRate + } + return 0 +} + +type TaskQueueFetchQueueStatsResponse struct { + Queuestats []*TaskQueueFetchQueueStatsResponse_QueueStats `protobuf:"group,1,rep,name=QueueStats" json:"queuestats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchQueueStatsResponse) Reset() { *m = TaskQueueFetchQueueStatsResponse{} } +func (m *TaskQueueFetchQueueStatsResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchQueueStatsResponse) ProtoMessage() {} + +func (m *TaskQueueFetchQueueStatsResponse) GetQueuestats() []*TaskQueueFetchQueueStatsResponse_QueueStats { + if m != nil { + return m.Queuestats + } + return nil +} + +type TaskQueueFetchQueueStatsResponse_QueueStats struct { + NumTasks *int32 `protobuf:"varint,2,req,name=num_tasks" json:"num_tasks,omitempty"` + OldestEtaUsec *int64 `protobuf:"varint,3,req,name=oldest_eta_usec" json:"oldest_eta_usec,omitempty"` + ScannerInfo *TaskQueueScannerQueueInfo `protobuf:"bytes,4,opt,name=scanner_info" json:"scanner_info,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchQueueStatsResponse_QueueStats) Reset() { + *m = TaskQueueFetchQueueStatsResponse_QueueStats{} +} +func (m *TaskQueueFetchQueueStatsResponse_QueueStats) String() string { + return proto.CompactTextString(m) +} +func (*TaskQueueFetchQueueStatsResponse_QueueStats) ProtoMessage() {} + +func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetNumTasks() int32 { + if m != nil && m.NumTasks != nil { + return *m.NumTasks + } + return 0 +} + +func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetOldestEtaUsec() int64 { + if m != nil && m.OldestEtaUsec != nil { + return *m.OldestEtaUsec + } + return 0 +} + +func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetScannerInfo() *TaskQueueScannerQueueInfo { + if m != nil { + return m.ScannerInfo + } + return nil +} + +type TaskQueuePauseQueueRequest struct { + AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + Pause *bool `protobuf:"varint,3,req,name=pause" json:"pause,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueuePauseQueueRequest) Reset() { *m = TaskQueuePauseQueueRequest{} } +func (m *TaskQueuePauseQueueRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueuePauseQueueRequest) ProtoMessage() {} + +func (m *TaskQueuePauseQueueRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueuePauseQueueRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueuePauseQueueRequest) GetPause() bool { + if m != nil && m.Pause != nil { + return *m.Pause + } + return false +} + +type TaskQueuePauseQueueResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueuePauseQueueResponse) Reset() { *m = TaskQueuePauseQueueResponse{} } +func (m *TaskQueuePauseQueueResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueuePauseQueueResponse) ProtoMessage() {} + +type TaskQueuePurgeQueueRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueuePurgeQueueRequest) Reset() { *m = TaskQueuePurgeQueueRequest{} } +func (m *TaskQueuePurgeQueueRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueuePurgeQueueRequest) ProtoMessage() {} + +func (m *TaskQueuePurgeQueueRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueuePurgeQueueRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +type TaskQueuePurgeQueueResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueuePurgeQueueResponse) Reset() { *m = TaskQueuePurgeQueueResponse{} } +func (m *TaskQueuePurgeQueueResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueuePurgeQueueResponse) ProtoMessage() {} + +type TaskQueueDeleteQueueRequest struct { + AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueDeleteQueueRequest) Reset() { *m = TaskQueueDeleteQueueRequest{} } +func (m *TaskQueueDeleteQueueRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueDeleteQueueRequest) ProtoMessage() {} + +func (m *TaskQueueDeleteQueueRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueDeleteQueueRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +type TaskQueueDeleteQueueResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueDeleteQueueResponse) Reset() { *m = TaskQueueDeleteQueueResponse{} } +func (m *TaskQueueDeleteQueueResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueDeleteQueueResponse) ProtoMessage() {} + +type TaskQueueDeleteGroupRequest struct { + AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueDeleteGroupRequest) Reset() { *m = TaskQueueDeleteGroupRequest{} } +func (m *TaskQueueDeleteGroupRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueDeleteGroupRequest) ProtoMessage() {} + +func (m *TaskQueueDeleteGroupRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +type TaskQueueDeleteGroupResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueDeleteGroupResponse) Reset() { *m = TaskQueueDeleteGroupResponse{} } +func (m *TaskQueueDeleteGroupResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueDeleteGroupResponse) ProtoMessage() {} + +type TaskQueueQueryTasksRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + StartTaskName []byte `protobuf:"bytes,3,opt,name=start_task_name" json:"start_task_name,omitempty"` + StartEtaUsec *int64 `protobuf:"varint,4,opt,name=start_eta_usec" json:"start_eta_usec,omitempty"` + StartTag []byte `protobuf:"bytes,6,opt,name=start_tag" json:"start_tag,omitempty"` + MaxRows *int32 `protobuf:"varint,5,opt,name=max_rows,def=1" json:"max_rows,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryTasksRequest) Reset() { *m = TaskQueueQueryTasksRequest{} } +func (m *TaskQueueQueryTasksRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryTasksRequest) ProtoMessage() {} + +const Default_TaskQueueQueryTasksRequest_MaxRows int32 = 1 + +func (m *TaskQueueQueryTasksRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueQueryTasksRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueQueryTasksRequest) GetStartTaskName() []byte { + if m != nil { + return m.StartTaskName + } + return nil +} + +func (m *TaskQueueQueryTasksRequest) GetStartEtaUsec() int64 { + if m != nil && m.StartEtaUsec != nil { + return *m.StartEtaUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksRequest) GetStartTag() []byte { + if m != nil { + return m.StartTag + } + return nil +} + +func (m *TaskQueueQueryTasksRequest) GetMaxRows() int32 { + if m != nil && m.MaxRows != nil { + return *m.MaxRows + } + return Default_TaskQueueQueryTasksRequest_MaxRows +} + +type TaskQueueQueryTasksResponse struct { + Task []*TaskQueueQueryTasksResponse_Task `protobuf:"group,1,rep,name=Task" json:"task,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryTasksResponse) Reset() { *m = TaskQueueQueryTasksResponse{} } +func (m *TaskQueueQueryTasksResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryTasksResponse) ProtoMessage() {} + +func (m *TaskQueueQueryTasksResponse) GetTask() []*TaskQueueQueryTasksResponse_Task { + if m != nil { + return m.Task + } + return nil +} + +type TaskQueueQueryTasksResponse_Task struct { + TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"` + EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"` + Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"` + Method *TaskQueueQueryTasksResponse_Task_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueQueryTasksResponse_Task_RequestMethod" json:"method,omitempty"` + RetryCount *int32 `protobuf:"varint,6,opt,name=retry_count,def=0" json:"retry_count,omitempty"` + Header []*TaskQueueQueryTasksResponse_Task_Header `protobuf:"group,7,rep,name=Header" json:"header,omitempty"` + BodySize *int32 `protobuf:"varint,10,opt,name=body_size" json:"body_size,omitempty"` + Body []byte `protobuf:"bytes,11,opt,name=body" json:"body,omitempty"` + CreationTimeUsec *int64 `protobuf:"varint,12,req,name=creation_time_usec" json:"creation_time_usec,omitempty"` + Crontimetable *TaskQueueQueryTasksResponse_Task_CronTimetable `protobuf:"group,13,opt,name=CronTimetable" json:"crontimetable,omitempty"` + Runlog *TaskQueueQueryTasksResponse_Task_RunLog `protobuf:"group,16,opt,name=RunLog" json:"runlog,omitempty"` + Description []byte `protobuf:"bytes,21,opt,name=description" json:"description,omitempty"` + Payload *TaskPayload `protobuf:"bytes,22,opt,name=payload" json:"payload,omitempty"` + RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,23,opt,name=retry_parameters" json:"retry_parameters,omitempty"` + FirstTryUsec *int64 `protobuf:"varint,24,opt,name=first_try_usec" json:"first_try_usec,omitempty"` + Tag []byte `protobuf:"bytes,25,opt,name=tag" json:"tag,omitempty"` + ExecutionCount *int32 `protobuf:"varint,26,opt,name=execution_count,def=0" json:"execution_count,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryTasksResponse_Task) Reset() { *m = TaskQueueQueryTasksResponse_Task{} } +func (m *TaskQueueQueryTasksResponse_Task) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryTasksResponse_Task) ProtoMessage() {} + +const Default_TaskQueueQueryTasksResponse_Task_RetryCount int32 = 0 +const Default_TaskQueueQueryTasksResponse_Task_ExecutionCount int32 = 0 + +func (m *TaskQueueQueryTasksResponse_Task) GetTaskName() []byte { + if m != nil { + return m.TaskName + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetEtaUsec() int64 { + if m != nil && m.EtaUsec != nil { + return *m.EtaUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task) GetUrl() []byte { + if m != nil { + return m.Url + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetMethod() TaskQueueQueryTasksResponse_Task_RequestMethod { + if m != nil && m.Method != nil { + return *m.Method + } + return TaskQueueQueryTasksResponse_Task_GET +} + +func (m *TaskQueueQueryTasksResponse_Task) GetRetryCount() int32 { + if m != nil && m.RetryCount != nil { + return *m.RetryCount + } + return Default_TaskQueueQueryTasksResponse_Task_RetryCount +} + +func (m *TaskQueueQueryTasksResponse_Task) GetHeader() []*TaskQueueQueryTasksResponse_Task_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetBodySize() int32 { + if m != nil && m.BodySize != nil { + return *m.BodySize + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetCreationTimeUsec() int64 { + if m != nil && m.CreationTimeUsec != nil { + return *m.CreationTimeUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task) GetCrontimetable() *TaskQueueQueryTasksResponse_Task_CronTimetable { + if m != nil { + return m.Crontimetable + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetRunlog() *TaskQueueQueryTasksResponse_Task_RunLog { + if m != nil { + return m.Runlog + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetDescription() []byte { + if m != nil { + return m.Description + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetPayload() *TaskPayload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetRetryParameters() *TaskQueueRetryParameters { + if m != nil { + return m.RetryParameters + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetFirstTryUsec() int64 { + if m != nil && m.FirstTryUsec != nil { + return *m.FirstTryUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task) GetTag() []byte { + if m != nil { + return m.Tag + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task) GetExecutionCount() int32 { + if m != nil && m.ExecutionCount != nil { + return *m.ExecutionCount + } + return Default_TaskQueueQueryTasksResponse_Task_ExecutionCount +} + +type TaskQueueQueryTasksResponse_Task_Header struct { + Key []byte `protobuf:"bytes,8,req,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,9,req,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryTasksResponse_Task_Header) Reset() { + *m = TaskQueueQueryTasksResponse_Task_Header{} +} +func (m *TaskQueueQueryTasksResponse_Task_Header) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryTasksResponse_Task_Header) ProtoMessage() {} + +func (m *TaskQueueQueryTasksResponse_Task_Header) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task_Header) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type TaskQueueQueryTasksResponse_Task_CronTimetable struct { + Schedule []byte `protobuf:"bytes,14,req,name=schedule" json:"schedule,omitempty"` + Timezone []byte `protobuf:"bytes,15,req,name=timezone" json:"timezone,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) Reset() { + *m = TaskQueueQueryTasksResponse_Task_CronTimetable{} +} +func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) String() string { + return proto.CompactTextString(m) +} +func (*TaskQueueQueryTasksResponse_Task_CronTimetable) ProtoMessage() {} + +func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetSchedule() []byte { + if m != nil { + return m.Schedule + } + return nil +} + +func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetTimezone() []byte { + if m != nil { + return m.Timezone + } + return nil +} + +type TaskQueueQueryTasksResponse_Task_RunLog struct { + DispatchedUsec *int64 `protobuf:"varint,17,req,name=dispatched_usec" json:"dispatched_usec,omitempty"` + LagUsec *int64 `protobuf:"varint,18,req,name=lag_usec" json:"lag_usec,omitempty"` + ElapsedUsec *int64 `protobuf:"varint,19,req,name=elapsed_usec" json:"elapsed_usec,omitempty"` + ResponseCode *int64 `protobuf:"varint,20,opt,name=response_code" json:"response_code,omitempty"` + RetryReason *string `protobuf:"bytes,27,opt,name=retry_reason" json:"retry_reason,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryTasksResponse_Task_RunLog) Reset() { + *m = TaskQueueQueryTasksResponse_Task_RunLog{} +} +func (m *TaskQueueQueryTasksResponse_Task_RunLog) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryTasksResponse_Task_RunLog) ProtoMessage() {} + +func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetDispatchedUsec() int64 { + if m != nil && m.DispatchedUsec != nil { + return *m.DispatchedUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetLagUsec() int64 { + if m != nil && m.LagUsec != nil { + return *m.LagUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetElapsedUsec() int64 { + if m != nil && m.ElapsedUsec != nil { + return *m.ElapsedUsec + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetResponseCode() int64 { + if m != nil && m.ResponseCode != nil { + return *m.ResponseCode + } + return 0 +} + +func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetRetryReason() string { + if m != nil && m.RetryReason != nil { + return *m.RetryReason + } + return "" +} + +type TaskQueueFetchTaskRequest struct { + AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"` + QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"` + TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchTaskRequest) Reset() { *m = TaskQueueFetchTaskRequest{} } +func (m *TaskQueueFetchTaskRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchTaskRequest) ProtoMessage() {} + +func (m *TaskQueueFetchTaskRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueFetchTaskRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueFetchTaskRequest) GetTaskName() []byte { + if m != nil { + return m.TaskName + } + return nil +} + +type TaskQueueFetchTaskResponse struct { + Task *TaskQueueQueryTasksResponse `protobuf:"bytes,1,req,name=task" json:"task,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueFetchTaskResponse) Reset() { *m = TaskQueueFetchTaskResponse{} } +func (m *TaskQueueFetchTaskResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueFetchTaskResponse) ProtoMessage() {} + +func (m *TaskQueueFetchTaskResponse) GetTask() *TaskQueueQueryTasksResponse { + if m != nil { + return m.Task + } + return nil +} + +type TaskQueueUpdateStorageLimitRequest struct { + AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` + Limit *int64 `protobuf:"varint,2,req,name=limit" json:"limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueUpdateStorageLimitRequest) Reset() { *m = TaskQueueUpdateStorageLimitRequest{} } +func (m *TaskQueueUpdateStorageLimitRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueUpdateStorageLimitRequest) ProtoMessage() {} + +func (m *TaskQueueUpdateStorageLimitRequest) GetAppId() []byte { + if m != nil { + return m.AppId + } + return nil +} + +func (m *TaskQueueUpdateStorageLimitRequest) GetLimit() int64 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +type TaskQueueUpdateStorageLimitResponse struct { + NewLimit *int64 `protobuf:"varint,1,req,name=new_limit" json:"new_limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueUpdateStorageLimitResponse) Reset() { *m = TaskQueueUpdateStorageLimitResponse{} } +func (m *TaskQueueUpdateStorageLimitResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueUpdateStorageLimitResponse) ProtoMessage() {} + +func (m *TaskQueueUpdateStorageLimitResponse) GetNewLimit() int64 { + if m != nil && m.NewLimit != nil { + return *m.NewLimit + } + return 0 +} + +type TaskQueueQueryAndOwnTasksRequest struct { + QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"` + LeaseSeconds *float64 `protobuf:"fixed64,2,req,name=lease_seconds" json:"lease_seconds,omitempty"` + MaxTasks *int64 `protobuf:"varint,3,req,name=max_tasks" json:"max_tasks,omitempty"` + GroupByTag *bool `protobuf:"varint,4,opt,name=group_by_tag,def=0" json:"group_by_tag,omitempty"` + Tag []byte `protobuf:"bytes,5,opt,name=tag" json:"tag,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryAndOwnTasksRequest) Reset() { *m = TaskQueueQueryAndOwnTasksRequest{} } +func (m *TaskQueueQueryAndOwnTasksRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryAndOwnTasksRequest) ProtoMessage() {} + +const Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag bool = false + +func (m *TaskQueueQueryAndOwnTasksRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueQueryAndOwnTasksRequest) GetLeaseSeconds() float64 { + if m != nil && m.LeaseSeconds != nil { + return *m.LeaseSeconds + } + return 0 +} + +func (m *TaskQueueQueryAndOwnTasksRequest) GetMaxTasks() int64 { + if m != nil && m.MaxTasks != nil { + return *m.MaxTasks + } + return 0 +} + +func (m *TaskQueueQueryAndOwnTasksRequest) GetGroupByTag() bool { + if m != nil && m.GroupByTag != nil { + return *m.GroupByTag + } + return Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag +} + +func (m *TaskQueueQueryAndOwnTasksRequest) GetTag() []byte { + if m != nil { + return m.Tag + } + return nil +} + +type TaskQueueQueryAndOwnTasksResponse struct { + Task []*TaskQueueQueryAndOwnTasksResponse_Task `protobuf:"group,1,rep,name=Task" json:"task,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryAndOwnTasksResponse) Reset() { *m = TaskQueueQueryAndOwnTasksResponse{} } +func (m *TaskQueueQueryAndOwnTasksResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryAndOwnTasksResponse) ProtoMessage() {} + +func (m *TaskQueueQueryAndOwnTasksResponse) GetTask() []*TaskQueueQueryAndOwnTasksResponse_Task { + if m != nil { + return m.Task + } + return nil +} + +type TaskQueueQueryAndOwnTasksResponse_Task struct { + TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"` + EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"` + RetryCount *int32 `protobuf:"varint,4,opt,name=retry_count,def=0" json:"retry_count,omitempty"` + Body []byte `protobuf:"bytes,5,opt,name=body" json:"body,omitempty"` + Tag []byte `protobuf:"bytes,6,opt,name=tag" json:"tag,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueQueryAndOwnTasksResponse_Task) Reset() { + *m = TaskQueueQueryAndOwnTasksResponse_Task{} +} +func (m *TaskQueueQueryAndOwnTasksResponse_Task) String() string { return proto.CompactTextString(m) } +func (*TaskQueueQueryAndOwnTasksResponse_Task) ProtoMessage() {} + +const Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount int32 = 0 + +func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTaskName() []byte { + if m != nil { + return m.TaskName + } + return nil +} + +func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetEtaUsec() int64 { + if m != nil && m.EtaUsec != nil { + return *m.EtaUsec + } + return 0 +} + +func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetRetryCount() int32 { + if m != nil && m.RetryCount != nil { + return *m.RetryCount + } + return Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount +} + +func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetBody() []byte { + if m != nil { + return m.Body + } + return nil +} + +func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTag() []byte { + if m != nil { + return m.Tag + } + return nil +} + +type TaskQueueModifyTaskLeaseRequest struct { + QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"` + TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"` + EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"` + LeaseSeconds *float64 `protobuf:"fixed64,4,req,name=lease_seconds" json:"lease_seconds,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueModifyTaskLeaseRequest) Reset() { *m = TaskQueueModifyTaskLeaseRequest{} } +func (m *TaskQueueModifyTaskLeaseRequest) String() string { return proto.CompactTextString(m) } +func (*TaskQueueModifyTaskLeaseRequest) ProtoMessage() {} + +func (m *TaskQueueModifyTaskLeaseRequest) GetQueueName() []byte { + if m != nil { + return m.QueueName + } + return nil +} + +func (m *TaskQueueModifyTaskLeaseRequest) GetTaskName() []byte { + if m != nil { + return m.TaskName + } + return nil +} + +func (m *TaskQueueModifyTaskLeaseRequest) GetEtaUsec() int64 { + if m != nil && m.EtaUsec != nil { + return *m.EtaUsec + } + return 0 +} + +func (m *TaskQueueModifyTaskLeaseRequest) GetLeaseSeconds() float64 { + if m != nil && m.LeaseSeconds != nil { + return *m.LeaseSeconds + } + return 0 +} + +type TaskQueueModifyTaskLeaseResponse struct { + UpdatedEtaUsec *int64 `protobuf:"varint,1,req,name=updated_eta_usec" json:"updated_eta_usec,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TaskQueueModifyTaskLeaseResponse) Reset() { *m = TaskQueueModifyTaskLeaseResponse{} } +func (m *TaskQueueModifyTaskLeaseResponse) String() string { return proto.CompactTextString(m) } +func (*TaskQueueModifyTaskLeaseResponse) ProtoMessage() {} + +func (m *TaskQueueModifyTaskLeaseResponse) GetUpdatedEtaUsec() int64 { + if m != nil && m.UpdatedEtaUsec != nil { + return *m.UpdatedEtaUsec + } + return 0 +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto new file mode 100644 index 0000000..419aaf5 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto @@ -0,0 +1,342 @@ +syntax = "proto2"; +option go_package = "taskqueue"; + +import "google.golang.org/appengine/internal/datastore/datastore_v3.proto"; + +package appengine; + +message TaskQueueServiceError { + enum ErrorCode { + OK = 0; + UNKNOWN_QUEUE = 1; + TRANSIENT_ERROR = 2; + INTERNAL_ERROR = 3; + TASK_TOO_LARGE = 4; + INVALID_TASK_NAME = 5; + INVALID_QUEUE_NAME = 6; + INVALID_URL = 7; + INVALID_QUEUE_RATE = 8; + PERMISSION_DENIED = 9; + TASK_ALREADY_EXISTS = 10; + TOMBSTONED_TASK = 11; + INVALID_ETA = 12; + INVALID_REQUEST = 13; + UNKNOWN_TASK = 14; + TOMBSTONED_QUEUE = 15; + DUPLICATE_TASK_NAME = 16; + SKIPPED = 17; + TOO_MANY_TASKS = 18; + INVALID_PAYLOAD = 19; + INVALID_RETRY_PARAMETERS = 20; + INVALID_QUEUE_MODE = 21; + ACL_LOOKUP_ERROR = 22; + TRANSACTIONAL_REQUEST_TOO_LARGE = 23; + INCORRECT_CREATOR_NAME = 24; + TASK_LEASE_EXPIRED = 25; + QUEUE_PAUSED = 26; + INVALID_TAG = 27; + + // Reserved range for the Datastore error codes. + // Original Datastore error code is shifted by DATASTORE_ERROR offset. + DATASTORE_ERROR = 10000; + } +} + +message TaskPayload { + extensions 10 to max; + option message_set_wire_format = true; +} + +message TaskQueueRetryParameters { + optional int32 retry_limit = 1; + optional int64 age_limit_sec = 2; + + optional double min_backoff_sec = 3 [default = 0.1]; + optional double max_backoff_sec = 4 [default = 3600]; + optional int32 max_doublings = 5 [default = 16]; +} + +message TaskQueueAcl { + repeated bytes user_email = 1; + repeated bytes writer_email = 2; +} + +message TaskQueueHttpHeader { + required bytes key = 1; + required bytes value = 2; +} + +message TaskQueueMode { + enum Mode { + PUSH = 0; + PULL = 1; + } +} + +message TaskQueueAddRequest { + required bytes queue_name = 1; + required bytes task_name = 2; + required int64 eta_usec = 3; + + enum RequestMethod { + GET = 1; + POST = 2; + HEAD = 3; + PUT = 4; + DELETE = 5; + } + optional RequestMethod method = 5 [default=POST]; + + optional bytes url = 4; + + repeated group Header = 6 { + required bytes key = 7; + required bytes value = 8; + } + + optional bytes body = 9 [ctype=CORD]; + optional Transaction transaction = 10; + optional bytes app_id = 11; + + optional group CronTimetable = 12 { + required bytes schedule = 13; + required bytes timezone = 14; + } + + optional bytes description = 15; + optional TaskPayload payload = 16; + optional TaskQueueRetryParameters retry_parameters = 17; + optional TaskQueueMode.Mode mode = 18 [default=PUSH]; + optional bytes tag = 19; +} + +message TaskQueueAddResponse { + optional bytes chosen_task_name = 1; +} + +message TaskQueueBulkAddRequest { + repeated TaskQueueAddRequest add_request = 1; +} + +message TaskQueueBulkAddResponse { + repeated group TaskResult = 1 { + required TaskQueueServiceError.ErrorCode result = 2; + optional bytes chosen_task_name = 3; + } +} + +message TaskQueueDeleteRequest { + required bytes queue_name = 1; + repeated bytes task_name = 2; + optional bytes app_id = 3; +} + +message TaskQueueDeleteResponse { + repeated TaskQueueServiceError.ErrorCode result = 3; +} + +message TaskQueueForceRunRequest { + optional bytes app_id = 1; + required bytes queue_name = 2; + required bytes task_name = 3; +} + +message TaskQueueForceRunResponse { + required TaskQueueServiceError.ErrorCode result = 3; +} + +message TaskQueueUpdateQueueRequest { + optional bytes app_id = 1; + required bytes queue_name = 2; + required double bucket_refill_per_second = 3; + required int32 bucket_capacity = 4; + optional string user_specified_rate = 5; + optional TaskQueueRetryParameters retry_parameters = 6; + optional int32 max_concurrent_requests = 7; + optional TaskQueueMode.Mode mode = 8 [default = PUSH]; + optional TaskQueueAcl acl = 9; + repeated TaskQueueHttpHeader header_override = 10; +} + +message TaskQueueUpdateQueueResponse { +} + +message TaskQueueFetchQueuesRequest { + optional bytes app_id = 1; + required int32 max_rows = 2; +} + +message TaskQueueFetchQueuesResponse { + repeated group Queue = 1 { + required bytes queue_name = 2; + required double bucket_refill_per_second = 3; + required double bucket_capacity = 4; + optional string user_specified_rate = 5; + required bool paused = 6 [default=false]; + optional TaskQueueRetryParameters retry_parameters = 7; + optional int32 max_concurrent_requests = 8; + optional TaskQueueMode.Mode mode = 9 [default = PUSH]; + optional TaskQueueAcl acl = 10; + repeated TaskQueueHttpHeader header_override = 11; + optional string creator_name = 12 [ctype=CORD, default="apphosting"]; + } +} + +message TaskQueueFetchQueueStatsRequest { + optional bytes app_id = 1; + repeated bytes queue_name = 2; + optional int32 max_num_tasks = 3 [default = 0]; +} + +message TaskQueueScannerQueueInfo { + required int64 executed_last_minute = 1; + required int64 executed_last_hour = 2; + required double sampling_duration_seconds = 3; + optional int32 requests_in_flight = 4; + optional double enforced_rate = 5; +} + +message TaskQueueFetchQueueStatsResponse { + repeated group QueueStats = 1 { + required int32 num_tasks = 2; + required int64 oldest_eta_usec = 3; + optional TaskQueueScannerQueueInfo scanner_info = 4; + } +} +message TaskQueuePauseQueueRequest { + required bytes app_id = 1; + required bytes queue_name = 2; + required bool pause = 3; +} + +message TaskQueuePauseQueueResponse { +} + +message TaskQueuePurgeQueueRequest { + optional bytes app_id = 1; + required bytes queue_name = 2; +} + +message TaskQueuePurgeQueueResponse { +} + +message TaskQueueDeleteQueueRequest { + required bytes app_id = 1; + required bytes queue_name = 2; +} + +message TaskQueueDeleteQueueResponse { +} + +message TaskQueueDeleteGroupRequest { + required bytes app_id = 1; +} + +message TaskQueueDeleteGroupResponse { +} + +message TaskQueueQueryTasksRequest { + optional bytes app_id = 1; + required bytes queue_name = 2; + + optional bytes start_task_name = 3; + optional int64 start_eta_usec = 4; + optional bytes start_tag = 6; + optional int32 max_rows = 5 [default = 1]; +} + +message TaskQueueQueryTasksResponse { + repeated group Task = 1 { + required bytes task_name = 2; + required int64 eta_usec = 3; + optional bytes url = 4; + + enum RequestMethod { + GET = 1; + POST = 2; + HEAD = 3; + PUT = 4; + DELETE = 5; + } + optional RequestMethod method = 5; + + optional int32 retry_count = 6 [default=0]; + + repeated group Header = 7 { + required bytes key = 8; + required bytes value = 9; + } + + optional int32 body_size = 10; + optional bytes body = 11 [ctype=CORD]; + required int64 creation_time_usec = 12; + + optional group CronTimetable = 13 { + required bytes schedule = 14; + required bytes timezone = 15; + } + + optional group RunLog = 16 { + required int64 dispatched_usec = 17; + required int64 lag_usec = 18; + required int64 elapsed_usec = 19; + optional int64 response_code = 20; + optional string retry_reason = 27; + } + + optional bytes description = 21; + optional TaskPayload payload = 22; + optional TaskQueueRetryParameters retry_parameters = 23; + optional int64 first_try_usec = 24; + optional bytes tag = 25; + optional int32 execution_count = 26 [default=0]; + } +} + +message TaskQueueFetchTaskRequest { + optional bytes app_id = 1; + required bytes queue_name = 2; + required bytes task_name = 3; +} + +message TaskQueueFetchTaskResponse { + required TaskQueueQueryTasksResponse task = 1; +} + +message TaskQueueUpdateStorageLimitRequest { + required bytes app_id = 1; + required int64 limit = 2; +} + +message TaskQueueUpdateStorageLimitResponse { + required int64 new_limit = 1; +} + +message TaskQueueQueryAndOwnTasksRequest { + required bytes queue_name = 1; + required double lease_seconds = 2; + required int64 max_tasks = 3; + optional bool group_by_tag = 4 [default=false]; + optional bytes tag = 5; +} + +message TaskQueueQueryAndOwnTasksResponse { + repeated group Task = 1 { + required bytes task_name = 2; + required int64 eta_usec = 3; + optional int32 retry_count = 4 [default=0]; + optional bytes body = 5 [ctype=CORD]; + optional bytes tag = 6; + } +} + +message TaskQueueModifyTaskLeaseRequest { + required bytes queue_name = 1; + required bytes task_name = 2; + required int64 eta_usec = 3; + required double lease_seconds = 4; +} + +message TaskQueueModifyTaskLeaseResponse { + required int64 updated_eta_usec = 1; +} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go new file mode 100644 index 0000000..28a6d18 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -0,0 +1,107 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements hooks for applying datastore transactions. + +import ( + "errors" + "reflect" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/datastore" +) + +var transactionSetters = make(map[reflect.Type]reflect.Value) + +// RegisterTransactionSetter registers a function that sets transaction information +// in a protocol buffer message. f should be a function with two arguments, +// the first being a protocol buffer type, and the second being *datastore.Transaction. +func RegisterTransactionSetter(f interface{}) { + v := reflect.ValueOf(f) + transactionSetters[v.Type().In(0)] = v +} + +// applyTransaction applies the transaction t to message pb +// by using the relevant setter passed to RegisterTransactionSetter. +func applyTransaction(pb proto.Message, t *pb.Transaction) { + v := reflect.ValueOf(pb) + if f, ok := transactionSetters[v.Type()]; ok { + f.Call([]reflect.Value{v, reflect.ValueOf(t)}) + } +} + +var transactionKey = "used for *Transaction" + +func transactionFromContext(ctx netcontext.Context) *transaction { + t, _ := ctx.Value(&transactionKey).(*transaction) + return t +} + +func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { + return netcontext.WithValue(ctx, &transactionKey, t) +} + +type transaction struct { + transaction pb.Transaction + finished bool +} + +var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") + +func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error { + if transactionFromContext(c) != nil { + return errors.New("nested transactions are not supported") + } + + // Begin the transaction. + t := &transaction{} + req := &pb.BeginTransactionRequest{ + App: proto.String(FullyQualifiedAppID(c)), + } + if xg { + req.AllowMultipleEg = proto.Bool(true) + } + if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { + return err + } + + // Call f, rolling back the transaction if f returns a non-nil error, or panics. + // The panic is not recovered. + defer func() { + if t.finished { + return + } + t.finished = true + // Ignore the error return value, since we are already returning a non-nil + // error (or we're panicking). + Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) + }() + if err := f(withTransaction(c, t)); err != nil { + return err + } + t.finished = true + + // Commit the transaction. + res := &pb.CommitResponse{} + err := Call(c, "datastore_v3", "Commit", &t.transaction, res) + if ae, ok := err.(*APIError); ok { + /* TODO: restore this conditional + if appengine.IsDevAppServer() { + */ + // The Python Dev AppServer raises an ApplicationError with error code 2 (which is + // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". + if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { + return ErrConcurrentTransaction + } + if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { + return ErrConcurrentTransaction + } + } + return err +} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go new file mode 100644 index 0000000..af463fb --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go @@ -0,0 +1,355 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto +// DO NOT EDIT! + +/* +Package urlfetch is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto + +It has these top-level messages: + URLFetchServiceError + URLFetchRequest + URLFetchResponse +*/ +package urlfetch + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type URLFetchServiceError_ErrorCode int32 + +const ( + URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0 + URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1 + URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2 + URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3 + URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4 + URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5 + URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6 + URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7 + URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8 + URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9 + URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10 + URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11 + URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12 +) + +var URLFetchServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_URL", + 2: "FETCH_ERROR", + 3: "UNSPECIFIED_ERROR", + 4: "RESPONSE_TOO_LARGE", + 5: "DEADLINE_EXCEEDED", + 6: "SSL_CERTIFICATE_ERROR", + 7: "DNS_ERROR", + 8: "CLOSED", + 9: "INTERNAL_TRANSIENT_ERROR", + 10: "TOO_MANY_REDIRECTS", + 11: "MALFORMED_REPLY", + 12: "CONNECTION_ERROR", +} +var URLFetchServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_URL": 1, + "FETCH_ERROR": 2, + "UNSPECIFIED_ERROR": 3, + "RESPONSE_TOO_LARGE": 4, + "DEADLINE_EXCEEDED": 5, + "SSL_CERTIFICATE_ERROR": 6, + "DNS_ERROR": 7, + "CLOSED": 8, + "INTERNAL_TRANSIENT_ERROR": 9, + "TOO_MANY_REDIRECTS": 10, + "MALFORMED_REPLY": 11, + "CONNECTION_ERROR": 12, +} + +func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode { + p := new(URLFetchServiceError_ErrorCode) + *p = x + return p +} +func (x URLFetchServiceError_ErrorCode) String() string { + return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x)) +} +func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode") + if err != nil { + return err + } + *x = URLFetchServiceError_ErrorCode(value) + return nil +} + +type URLFetchRequest_RequestMethod int32 + +const ( + URLFetchRequest_GET URLFetchRequest_RequestMethod = 1 + URLFetchRequest_POST URLFetchRequest_RequestMethod = 2 + URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3 + URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4 + URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5 + URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6 +) + +var URLFetchRequest_RequestMethod_name = map[int32]string{ + 1: "GET", + 2: "POST", + 3: "HEAD", + 4: "PUT", + 5: "DELETE", + 6: "PATCH", +} +var URLFetchRequest_RequestMethod_value = map[string]int32{ + "GET": 1, + "POST": 2, + "HEAD": 3, + "PUT": 4, + "DELETE": 5, + "PATCH": 6, +} + +func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod { + p := new(URLFetchRequest_RequestMethod) + *p = x + return p +} +func (x URLFetchRequest_RequestMethod) String() string { + return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x)) +} +func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod") + if err != nil { + return err + } + *x = URLFetchRequest_RequestMethod(value) + return nil +} + +type URLFetchServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} } +func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) } +func (*URLFetchServiceError) ProtoMessage() {} + +type URLFetchRequest struct { + Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"` + Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"` + Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` + Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"` + FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"` + Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"` + MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} } +func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest) ProtoMessage() {} + +const Default_URLFetchRequest_FollowRedirects bool = true +const Default_URLFetchRequest_MustValidateServerCertificate bool = true + +func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod { + if m != nil && m.Method != nil { + return *m.Method + } + return URLFetchRequest_GET +} + +func (m *URLFetchRequest) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchRequest) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func (m *URLFetchRequest) GetFollowRedirects() bool { + if m != nil && m.FollowRedirects != nil { + return *m.FollowRedirects + } + return Default_URLFetchRequest_FollowRedirects +} + +func (m *URLFetchRequest) GetDeadline() float64 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return 0 +} + +func (m *URLFetchRequest) GetMustValidateServerCertificate() bool { + if m != nil && m.MustValidateServerCertificate != nil { + return *m.MustValidateServerCertificate + } + return Default_URLFetchRequest_MustValidateServerCertificate +} + +type URLFetchRequest_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} } +func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest_Header) ProtoMessage() {} + +func (m *URLFetchRequest_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchRequest_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type URLFetchResponse struct { + Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"` + StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"` + Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` + ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"` + ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"` + ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"` + FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"` + ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"` + ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"` + ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} } +func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse) ProtoMessage() {} + +const Default_URLFetchResponse_ContentWasTruncated bool = false +const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0 +const Default_URLFetchResponse_ApiBytesSent int64 = 0 +const Default_URLFetchResponse_ApiBytesReceived int64 = 0 + +func (m *URLFetchResponse) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +func (m *URLFetchResponse) GetStatusCode() int32 { + if m != nil && m.StatusCode != nil { + return *m.StatusCode + } + return 0 +} + +func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchResponse) GetContentWasTruncated() bool { + if m != nil && m.ContentWasTruncated != nil { + return *m.ContentWasTruncated + } + return Default_URLFetchResponse_ContentWasTruncated +} + +func (m *URLFetchResponse) GetExternalBytesSent() int64 { + if m != nil && m.ExternalBytesSent != nil { + return *m.ExternalBytesSent + } + return 0 +} + +func (m *URLFetchResponse) GetExternalBytesReceived() int64 { + if m != nil && m.ExternalBytesReceived != nil { + return *m.ExternalBytesReceived + } + return 0 +} + +func (m *URLFetchResponse) GetFinalUrl() string { + if m != nil && m.FinalUrl != nil { + return *m.FinalUrl + } + return "" +} + +func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 { + if m != nil && m.ApiCpuMilliseconds != nil { + return *m.ApiCpuMilliseconds + } + return Default_URLFetchResponse_ApiCpuMilliseconds +} + +func (m *URLFetchResponse) GetApiBytesSent() int64 { + if m != nil && m.ApiBytesSent != nil { + return *m.ApiBytesSent + } + return Default_URLFetchResponse_ApiBytesSent +} + +func (m *URLFetchResponse) GetApiBytesReceived() int64 { + if m != nil && m.ApiBytesReceived != nil { + return *m.ApiBytesReceived + } + return Default_URLFetchResponse_ApiBytesReceived +} + +type URLFetchResponse_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} } +func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse_Header) ProtoMessage() {} + +func (m *URLFetchResponse_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchResponse_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto new file mode 100644 index 0000000..f695edf --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto @@ -0,0 +1,64 @@ +syntax = "proto2"; +option go_package = "urlfetch"; + +package appengine; + +message URLFetchServiceError { + enum ErrorCode { + OK = 0; + INVALID_URL = 1; + FETCH_ERROR = 2; + UNSPECIFIED_ERROR = 3; + RESPONSE_TOO_LARGE = 4; + DEADLINE_EXCEEDED = 5; + SSL_CERTIFICATE_ERROR = 6; + DNS_ERROR = 7; + CLOSED = 8; + INTERNAL_TRANSIENT_ERROR = 9; + TOO_MANY_REDIRECTS = 10; + MALFORMED_REPLY = 11; + CONNECTION_ERROR = 12; + } +} + +message URLFetchRequest { + enum RequestMethod { + GET = 1; + POST = 2; + HEAD = 3; + PUT = 4; + DELETE = 5; + PATCH = 6; + } + required RequestMethod Method = 1; + required string Url = 2; + repeated group Header = 3 { + required string Key = 4; + required string Value = 5; + } + optional bytes Payload = 6 [ctype=CORD]; + + optional bool FollowRedirects = 7 [default=true]; + + optional double Deadline = 8; + + optional bool MustValidateServerCertificate = 9 [default=true]; +} + +message URLFetchResponse { + optional bytes Content = 1; + required int32 StatusCode = 2; + repeated group Header = 3 { + required string Key = 4; + required string Value = 5; + } + optional bool ContentWasTruncated = 6 [default=false]; + optional int64 ExternalBytesSent = 7; + optional int64 ExternalBytesReceived = 8; + + optional string FinalUrl = 9; + + optional int64 ApiCpuMilliseconds = 10 [default=0]; + optional int64 ApiBytesSent = 11 [default=0]; + optional int64 ApiBytesReceived = 12 [default=0]; +} diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.pb.go b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go new file mode 100644 index 0000000..6b52ffc --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go @@ -0,0 +1,289 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/user/user_service.proto +// DO NOT EDIT! + +/* +Package user is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/user/user_service.proto + +It has these top-level messages: + UserServiceError + CreateLoginURLRequest + CreateLoginURLResponse + CreateLogoutURLRequest + CreateLogoutURLResponse + GetOAuthUserRequest + GetOAuthUserResponse + CheckOAuthSignatureRequest + CheckOAuthSignatureResponse +*/ +package user + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type UserServiceError_ErrorCode int32 + +const ( + UserServiceError_OK UserServiceError_ErrorCode = 0 + UserServiceError_REDIRECT_URL_TOO_LONG UserServiceError_ErrorCode = 1 + UserServiceError_NOT_ALLOWED UserServiceError_ErrorCode = 2 + UserServiceError_OAUTH_INVALID_TOKEN UserServiceError_ErrorCode = 3 + UserServiceError_OAUTH_INVALID_REQUEST UserServiceError_ErrorCode = 4 + UserServiceError_OAUTH_ERROR UserServiceError_ErrorCode = 5 +) + +var UserServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "REDIRECT_URL_TOO_LONG", + 2: "NOT_ALLOWED", + 3: "OAUTH_INVALID_TOKEN", + 4: "OAUTH_INVALID_REQUEST", + 5: "OAUTH_ERROR", +} +var UserServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "REDIRECT_URL_TOO_LONG": 1, + "NOT_ALLOWED": 2, + "OAUTH_INVALID_TOKEN": 3, + "OAUTH_INVALID_REQUEST": 4, + "OAUTH_ERROR": 5, +} + +func (x UserServiceError_ErrorCode) Enum() *UserServiceError_ErrorCode { + p := new(UserServiceError_ErrorCode) + *p = x + return p +} +func (x UserServiceError_ErrorCode) String() string { + return proto.EnumName(UserServiceError_ErrorCode_name, int32(x)) +} +func (x *UserServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(UserServiceError_ErrorCode_value, data, "UserServiceError_ErrorCode") + if err != nil { + return err + } + *x = UserServiceError_ErrorCode(value) + return nil +} + +type UserServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *UserServiceError) Reset() { *m = UserServiceError{} } +func (m *UserServiceError) String() string { return proto.CompactTextString(m) } +func (*UserServiceError) ProtoMessage() {} + +type CreateLoginURLRequest struct { + DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"` + AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"` + FederatedIdentity *string `protobuf:"bytes,3,opt,name=federated_identity,def=" json:"federated_identity,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateLoginURLRequest) Reset() { *m = CreateLoginURLRequest{} } +func (m *CreateLoginURLRequest) String() string { return proto.CompactTextString(m) } +func (*CreateLoginURLRequest) ProtoMessage() {} + +func (m *CreateLoginURLRequest) GetDestinationUrl() string { + if m != nil && m.DestinationUrl != nil { + return *m.DestinationUrl + } + return "" +} + +func (m *CreateLoginURLRequest) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *CreateLoginURLRequest) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +type CreateLoginURLResponse struct { + LoginUrl *string `protobuf:"bytes,1,req,name=login_url" json:"login_url,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateLoginURLResponse) Reset() { *m = CreateLoginURLResponse{} } +func (m *CreateLoginURLResponse) String() string { return proto.CompactTextString(m) } +func (*CreateLoginURLResponse) ProtoMessage() {} + +func (m *CreateLoginURLResponse) GetLoginUrl() string { + if m != nil && m.LoginUrl != nil { + return *m.LoginUrl + } + return "" +} + +type CreateLogoutURLRequest struct { + DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"` + AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateLogoutURLRequest) Reset() { *m = CreateLogoutURLRequest{} } +func (m *CreateLogoutURLRequest) String() string { return proto.CompactTextString(m) } +func (*CreateLogoutURLRequest) ProtoMessage() {} + +func (m *CreateLogoutURLRequest) GetDestinationUrl() string { + if m != nil && m.DestinationUrl != nil { + return *m.DestinationUrl + } + return "" +} + +func (m *CreateLogoutURLRequest) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +type CreateLogoutURLResponse struct { + LogoutUrl *string `protobuf:"bytes,1,req,name=logout_url" json:"logout_url,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateLogoutURLResponse) Reset() { *m = CreateLogoutURLResponse{} } +func (m *CreateLogoutURLResponse) String() string { return proto.CompactTextString(m) } +func (*CreateLogoutURLResponse) ProtoMessage() {} + +func (m *CreateLogoutURLResponse) GetLogoutUrl() string { + if m != nil && m.LogoutUrl != nil { + return *m.LogoutUrl + } + return "" +} + +type GetOAuthUserRequest struct { + Scope *string `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"` + Scopes []string `protobuf:"bytes,2,rep,name=scopes" json:"scopes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetOAuthUserRequest) Reset() { *m = GetOAuthUserRequest{} } +func (m *GetOAuthUserRequest) String() string { return proto.CompactTextString(m) } +func (*GetOAuthUserRequest) ProtoMessage() {} + +func (m *GetOAuthUserRequest) GetScope() string { + if m != nil && m.Scope != nil { + return *m.Scope + } + return "" +} + +func (m *GetOAuthUserRequest) GetScopes() []string { + if m != nil { + return m.Scopes + } + return nil +} + +type GetOAuthUserResponse struct { + Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` + UserId *string `protobuf:"bytes,2,req,name=user_id" json:"user_id,omitempty"` + AuthDomain *string `protobuf:"bytes,3,req,name=auth_domain" json:"auth_domain,omitempty"` + UserOrganization *string `protobuf:"bytes,4,opt,name=user_organization,def=" json:"user_organization,omitempty"` + IsAdmin *bool `protobuf:"varint,5,opt,name=is_admin,def=0" json:"is_admin,omitempty"` + ClientId *string `protobuf:"bytes,6,opt,name=client_id,def=" json:"client_id,omitempty"` + Scopes []string `protobuf:"bytes,7,rep,name=scopes" json:"scopes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetOAuthUserResponse) Reset() { *m = GetOAuthUserResponse{} } +func (m *GetOAuthUserResponse) String() string { return proto.CompactTextString(m) } +func (*GetOAuthUserResponse) ProtoMessage() {} + +const Default_GetOAuthUserResponse_IsAdmin bool = false + +func (m *GetOAuthUserResponse) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *GetOAuthUserResponse) GetUserId() string { + if m != nil && m.UserId != nil { + return *m.UserId + } + return "" +} + +func (m *GetOAuthUserResponse) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *GetOAuthUserResponse) GetUserOrganization() string { + if m != nil && m.UserOrganization != nil { + return *m.UserOrganization + } + return "" +} + +func (m *GetOAuthUserResponse) GetIsAdmin() bool { + if m != nil && m.IsAdmin != nil { + return *m.IsAdmin + } + return Default_GetOAuthUserResponse_IsAdmin +} + +func (m *GetOAuthUserResponse) GetClientId() string { + if m != nil && m.ClientId != nil { + return *m.ClientId + } + return "" +} + +func (m *GetOAuthUserResponse) GetScopes() []string { + if m != nil { + return m.Scopes + } + return nil +} + +type CheckOAuthSignatureRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CheckOAuthSignatureRequest) Reset() { *m = CheckOAuthSignatureRequest{} } +func (m *CheckOAuthSignatureRequest) String() string { return proto.CompactTextString(m) } +func (*CheckOAuthSignatureRequest) ProtoMessage() {} + +type CheckOAuthSignatureResponse struct { + OauthConsumerKey *string `protobuf:"bytes,1,req,name=oauth_consumer_key" json:"oauth_consumer_key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CheckOAuthSignatureResponse) Reset() { *m = CheckOAuthSignatureResponse{} } +func (m *CheckOAuthSignatureResponse) String() string { return proto.CompactTextString(m) } +func (*CheckOAuthSignatureResponse) ProtoMessage() {} + +func (m *CheckOAuthSignatureResponse) GetOauthConsumerKey() string { + if m != nil && m.OauthConsumerKey != nil { + return *m.OauthConsumerKey + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.proto b/vendor/google.golang.org/appengine/internal/user/user_service.proto new file mode 100644 index 0000000..f3e9693 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/user/user_service.proto @@ -0,0 +1,58 @@ +syntax = "proto2"; +option go_package = "user"; + +package appengine; + +message UserServiceError { + enum ErrorCode { + OK = 0; + REDIRECT_URL_TOO_LONG = 1; + NOT_ALLOWED = 2; + OAUTH_INVALID_TOKEN = 3; + OAUTH_INVALID_REQUEST = 4; + OAUTH_ERROR = 5; + } +} + +message CreateLoginURLRequest { + required string destination_url = 1; + optional string auth_domain = 2; + optional string federated_identity = 3 [default = ""]; +} + +message CreateLoginURLResponse { + required string login_url = 1; +} + +message CreateLogoutURLRequest { + required string destination_url = 1; + optional string auth_domain = 2; +} + +message CreateLogoutURLResponse { + required string logout_url = 1; +} + +message GetOAuthUserRequest { + optional string scope = 1; + + repeated string scopes = 2; +} + +message GetOAuthUserResponse { + required string email = 1; + required string user_id = 2; + required string auth_domain = 3; + optional string user_organization = 4 [default = ""]; + optional bool is_admin = 5 [default = false]; + optional string client_id = 6 [default = ""]; + + repeated string scopes = 7; +} + +message CheckOAuthSignatureRequest { +} + +message CheckOAuthSignatureResponse { + required string oauth_consumer_key = 1; +} diff --git a/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go new file mode 100644 index 0000000..6d5b0ae --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go @@ -0,0 +1,427 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/xmpp/xmpp_service.proto +// DO NOT EDIT! + +/* +Package xmpp is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/xmpp/xmpp_service.proto + +It has these top-level messages: + XmppServiceError + PresenceRequest + PresenceResponse + BulkPresenceRequest + BulkPresenceResponse + XmppMessageRequest + XmppMessageResponse + XmppSendPresenceRequest + XmppSendPresenceResponse + XmppInviteRequest + XmppInviteResponse +*/ +package xmpp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type XmppServiceError_ErrorCode int32 + +const ( + XmppServiceError_UNSPECIFIED_ERROR XmppServiceError_ErrorCode = 1 + XmppServiceError_INVALID_JID XmppServiceError_ErrorCode = 2 + XmppServiceError_NO_BODY XmppServiceError_ErrorCode = 3 + XmppServiceError_INVALID_XML XmppServiceError_ErrorCode = 4 + XmppServiceError_INVALID_TYPE XmppServiceError_ErrorCode = 5 + XmppServiceError_INVALID_SHOW XmppServiceError_ErrorCode = 6 + XmppServiceError_EXCEEDED_MAX_SIZE XmppServiceError_ErrorCode = 7 + XmppServiceError_APPID_ALIAS_REQUIRED XmppServiceError_ErrorCode = 8 + XmppServiceError_NONDEFAULT_MODULE XmppServiceError_ErrorCode = 9 +) + +var XmppServiceError_ErrorCode_name = map[int32]string{ + 1: "UNSPECIFIED_ERROR", + 2: "INVALID_JID", + 3: "NO_BODY", + 4: "INVALID_XML", + 5: "INVALID_TYPE", + 6: "INVALID_SHOW", + 7: "EXCEEDED_MAX_SIZE", + 8: "APPID_ALIAS_REQUIRED", + 9: "NONDEFAULT_MODULE", +} +var XmppServiceError_ErrorCode_value = map[string]int32{ + "UNSPECIFIED_ERROR": 1, + "INVALID_JID": 2, + "NO_BODY": 3, + "INVALID_XML": 4, + "INVALID_TYPE": 5, + "INVALID_SHOW": 6, + "EXCEEDED_MAX_SIZE": 7, + "APPID_ALIAS_REQUIRED": 8, + "NONDEFAULT_MODULE": 9, +} + +func (x XmppServiceError_ErrorCode) Enum() *XmppServiceError_ErrorCode { + p := new(XmppServiceError_ErrorCode) + *p = x + return p +} +func (x XmppServiceError_ErrorCode) String() string { + return proto.EnumName(XmppServiceError_ErrorCode_name, int32(x)) +} +func (x *XmppServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(XmppServiceError_ErrorCode_value, data, "XmppServiceError_ErrorCode") + if err != nil { + return err + } + *x = XmppServiceError_ErrorCode(value) + return nil +} + +type PresenceResponse_SHOW int32 + +const ( + PresenceResponse_NORMAL PresenceResponse_SHOW = 0 + PresenceResponse_AWAY PresenceResponse_SHOW = 1 + PresenceResponse_DO_NOT_DISTURB PresenceResponse_SHOW = 2 + PresenceResponse_CHAT PresenceResponse_SHOW = 3 + PresenceResponse_EXTENDED_AWAY PresenceResponse_SHOW = 4 +) + +var PresenceResponse_SHOW_name = map[int32]string{ + 0: "NORMAL", + 1: "AWAY", + 2: "DO_NOT_DISTURB", + 3: "CHAT", + 4: "EXTENDED_AWAY", +} +var PresenceResponse_SHOW_value = map[string]int32{ + "NORMAL": 0, + "AWAY": 1, + "DO_NOT_DISTURB": 2, + "CHAT": 3, + "EXTENDED_AWAY": 4, +} + +func (x PresenceResponse_SHOW) Enum() *PresenceResponse_SHOW { + p := new(PresenceResponse_SHOW) + *p = x + return p +} +func (x PresenceResponse_SHOW) String() string { + return proto.EnumName(PresenceResponse_SHOW_name, int32(x)) +} +func (x *PresenceResponse_SHOW) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PresenceResponse_SHOW_value, data, "PresenceResponse_SHOW") + if err != nil { + return err + } + *x = PresenceResponse_SHOW(value) + return nil +} + +type XmppMessageResponse_XmppMessageStatus int32 + +const ( + XmppMessageResponse_NO_ERROR XmppMessageResponse_XmppMessageStatus = 0 + XmppMessageResponse_INVALID_JID XmppMessageResponse_XmppMessageStatus = 1 + XmppMessageResponse_OTHER_ERROR XmppMessageResponse_XmppMessageStatus = 2 +) + +var XmppMessageResponse_XmppMessageStatus_name = map[int32]string{ + 0: "NO_ERROR", + 1: "INVALID_JID", + 2: "OTHER_ERROR", +} +var XmppMessageResponse_XmppMessageStatus_value = map[string]int32{ + "NO_ERROR": 0, + "INVALID_JID": 1, + "OTHER_ERROR": 2, +} + +func (x XmppMessageResponse_XmppMessageStatus) Enum() *XmppMessageResponse_XmppMessageStatus { + p := new(XmppMessageResponse_XmppMessageStatus) + *p = x + return p +} +func (x XmppMessageResponse_XmppMessageStatus) String() string { + return proto.EnumName(XmppMessageResponse_XmppMessageStatus_name, int32(x)) +} +func (x *XmppMessageResponse_XmppMessageStatus) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(XmppMessageResponse_XmppMessageStatus_value, data, "XmppMessageResponse_XmppMessageStatus") + if err != nil { + return err + } + *x = XmppMessageResponse_XmppMessageStatus(value) + return nil +} + +type XmppServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppServiceError) Reset() { *m = XmppServiceError{} } +func (m *XmppServiceError) String() string { return proto.CompactTextString(m) } +func (*XmppServiceError) ProtoMessage() {} + +type PresenceRequest struct { + Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"` + FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PresenceRequest) Reset() { *m = PresenceRequest{} } +func (m *PresenceRequest) String() string { return proto.CompactTextString(m) } +func (*PresenceRequest) ProtoMessage() {} + +func (m *PresenceRequest) GetJid() string { + if m != nil && m.Jid != nil { + return *m.Jid + } + return "" +} + +func (m *PresenceRequest) GetFromJid() string { + if m != nil && m.FromJid != nil { + return *m.FromJid + } + return "" +} + +type PresenceResponse struct { + IsAvailable *bool `protobuf:"varint,1,req,name=is_available" json:"is_available,omitempty"` + Presence *PresenceResponse_SHOW `protobuf:"varint,2,opt,name=presence,enum=appengine.PresenceResponse_SHOW" json:"presence,omitempty"` + Valid *bool `protobuf:"varint,3,opt,name=valid" json:"valid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PresenceResponse) Reset() { *m = PresenceResponse{} } +func (m *PresenceResponse) String() string { return proto.CompactTextString(m) } +func (*PresenceResponse) ProtoMessage() {} + +func (m *PresenceResponse) GetIsAvailable() bool { + if m != nil && m.IsAvailable != nil { + return *m.IsAvailable + } + return false +} + +func (m *PresenceResponse) GetPresence() PresenceResponse_SHOW { + if m != nil && m.Presence != nil { + return *m.Presence + } + return PresenceResponse_NORMAL +} + +func (m *PresenceResponse) GetValid() bool { + if m != nil && m.Valid != nil { + return *m.Valid + } + return false +} + +type BulkPresenceRequest struct { + Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"` + FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BulkPresenceRequest) Reset() { *m = BulkPresenceRequest{} } +func (m *BulkPresenceRequest) String() string { return proto.CompactTextString(m) } +func (*BulkPresenceRequest) ProtoMessage() {} + +func (m *BulkPresenceRequest) GetJid() []string { + if m != nil { + return m.Jid + } + return nil +} + +func (m *BulkPresenceRequest) GetFromJid() string { + if m != nil && m.FromJid != nil { + return *m.FromJid + } + return "" +} + +type BulkPresenceResponse struct { + PresenceResponse []*PresenceResponse `protobuf:"bytes,1,rep,name=presence_response" json:"presence_response,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BulkPresenceResponse) Reset() { *m = BulkPresenceResponse{} } +func (m *BulkPresenceResponse) String() string { return proto.CompactTextString(m) } +func (*BulkPresenceResponse) ProtoMessage() {} + +func (m *BulkPresenceResponse) GetPresenceResponse() []*PresenceResponse { + if m != nil { + return m.PresenceResponse + } + return nil +} + +type XmppMessageRequest struct { + Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"` + Body *string `protobuf:"bytes,2,req,name=body" json:"body,omitempty"` + RawXml *bool `protobuf:"varint,3,opt,name=raw_xml,def=0" json:"raw_xml,omitempty"` + Type *string `protobuf:"bytes,4,opt,name=type,def=chat" json:"type,omitempty"` + FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppMessageRequest) Reset() { *m = XmppMessageRequest{} } +func (m *XmppMessageRequest) String() string { return proto.CompactTextString(m) } +func (*XmppMessageRequest) ProtoMessage() {} + +const Default_XmppMessageRequest_RawXml bool = false +const Default_XmppMessageRequest_Type string = "chat" + +func (m *XmppMessageRequest) GetJid() []string { + if m != nil { + return m.Jid + } + return nil +} + +func (m *XmppMessageRequest) GetBody() string { + if m != nil && m.Body != nil { + return *m.Body + } + return "" +} + +func (m *XmppMessageRequest) GetRawXml() bool { + if m != nil && m.RawXml != nil { + return *m.RawXml + } + return Default_XmppMessageRequest_RawXml +} + +func (m *XmppMessageRequest) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_XmppMessageRequest_Type +} + +func (m *XmppMessageRequest) GetFromJid() string { + if m != nil && m.FromJid != nil { + return *m.FromJid + } + return "" +} + +type XmppMessageResponse struct { + Status []XmppMessageResponse_XmppMessageStatus `protobuf:"varint,1,rep,name=status,enum=appengine.XmppMessageResponse_XmppMessageStatus" json:"status,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppMessageResponse) Reset() { *m = XmppMessageResponse{} } +func (m *XmppMessageResponse) String() string { return proto.CompactTextString(m) } +func (*XmppMessageResponse) ProtoMessage() {} + +func (m *XmppMessageResponse) GetStatus() []XmppMessageResponse_XmppMessageStatus { + if m != nil { + return m.Status + } + return nil +} + +type XmppSendPresenceRequest struct { + Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"` + Type *string `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"` + Show *string `protobuf:"bytes,3,opt,name=show" json:"show,omitempty"` + Status *string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` + FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppSendPresenceRequest) Reset() { *m = XmppSendPresenceRequest{} } +func (m *XmppSendPresenceRequest) String() string { return proto.CompactTextString(m) } +func (*XmppSendPresenceRequest) ProtoMessage() {} + +func (m *XmppSendPresenceRequest) GetJid() string { + if m != nil && m.Jid != nil { + return *m.Jid + } + return "" +} + +func (m *XmppSendPresenceRequest) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *XmppSendPresenceRequest) GetShow() string { + if m != nil && m.Show != nil { + return *m.Show + } + return "" +} + +func (m *XmppSendPresenceRequest) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +func (m *XmppSendPresenceRequest) GetFromJid() string { + if m != nil && m.FromJid != nil { + return *m.FromJid + } + return "" +} + +type XmppSendPresenceResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppSendPresenceResponse) Reset() { *m = XmppSendPresenceResponse{} } +func (m *XmppSendPresenceResponse) String() string { return proto.CompactTextString(m) } +func (*XmppSendPresenceResponse) ProtoMessage() {} + +type XmppInviteRequest struct { + Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"` + FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppInviteRequest) Reset() { *m = XmppInviteRequest{} } +func (m *XmppInviteRequest) String() string { return proto.CompactTextString(m) } +func (*XmppInviteRequest) ProtoMessage() {} + +func (m *XmppInviteRequest) GetJid() string { + if m != nil && m.Jid != nil { + return *m.Jid + } + return "" +} + +func (m *XmppInviteRequest) GetFromJid() string { + if m != nil && m.FromJid != nil { + return *m.FromJid + } + return "" +} + +type XmppInviteResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *XmppInviteResponse) Reset() { *m = XmppInviteResponse{} } +func (m *XmppInviteResponse) String() string { return proto.CompactTextString(m) } +func (*XmppInviteResponse) ProtoMessage() {} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto new file mode 100644 index 0000000..472d52e --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto @@ -0,0 +1,83 @@ +syntax = "proto2"; +option go_package = "xmpp"; + +package appengine; + +message XmppServiceError { + enum ErrorCode { + UNSPECIFIED_ERROR = 1; + INVALID_JID = 2; + NO_BODY = 3; + INVALID_XML = 4; + INVALID_TYPE = 5; + INVALID_SHOW = 6; + EXCEEDED_MAX_SIZE = 7; + APPID_ALIAS_REQUIRED = 8; + NONDEFAULT_MODULE = 9; + } +} + +message PresenceRequest { + required string jid = 1; + optional string from_jid = 2; +} + +message PresenceResponse { + enum SHOW { + NORMAL = 0; + AWAY = 1; + DO_NOT_DISTURB = 2; + CHAT = 3; + EXTENDED_AWAY = 4; + } + + required bool is_available = 1; + optional SHOW presence = 2; + optional bool valid = 3; +} + +message BulkPresenceRequest { + repeated string jid = 1; + optional string from_jid = 2; +} + +message BulkPresenceResponse { + repeated PresenceResponse presence_response = 1; +} + +message XmppMessageRequest { + repeated string jid = 1; + required string body = 2; + optional bool raw_xml = 3 [ default = false ]; + optional string type = 4 [ default = "chat" ]; + optional string from_jid = 5; +} + +message XmppMessageResponse { + enum XmppMessageStatus { + NO_ERROR = 0; + INVALID_JID = 1; + OTHER_ERROR = 2; + } + + repeated XmppMessageStatus status = 1; +} + +message XmppSendPresenceRequest { + required string jid = 1; + optional string type = 2; + optional string show = 3; + optional string status = 4; + optional string from_jid = 5; +} + +message XmppSendPresenceResponse { +} + +message XmppInviteRequest { + required string jid = 1; + optional string from_jid = 2; +} + +message XmppInviteResponse { +} diff --git a/vendor/google.golang.org/appengine/log/api.go b/vendor/google.golang.org/appengine/log/api.go new file mode 100644 index 0000000..24d5860 --- /dev/null +++ b/vendor/google.golang.org/appengine/log/api.go @@ -0,0 +1,40 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package log + +// This file implements the logging API. + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// Debugf formats its arguments according to the format, analogous to fmt.Printf, +// and records the text as a log message at Debug level. The message will be associated +// with the request linked with the provided context. +func Debugf(ctx context.Context, format string, args ...interface{}) { + internal.Logf(ctx, 0, format, args...) +} + +// Infof is like Debugf, but at Info level. +func Infof(ctx context.Context, format string, args ...interface{}) { + internal.Logf(ctx, 1, format, args...) +} + +// Warningf is like Debugf, but at Warning level. +func Warningf(ctx context.Context, format string, args ...interface{}) { + internal.Logf(ctx, 2, format, args...) +} + +// Errorf is like Debugf, but at Error level. +func Errorf(ctx context.Context, format string, args ...interface{}) { + internal.Logf(ctx, 3, format, args...) +} + +// Criticalf is like Debugf, but at Critical level. +func Criticalf(ctx context.Context, format string, args ...interface{}) { + internal.Logf(ctx, 4, format, args...) +} diff --git a/vendor/google.golang.org/appengine/log/log.go b/vendor/google.golang.org/appengine/log/log.go new file mode 100644 index 0000000..b54fe47 --- /dev/null +++ b/vendor/google.golang.org/appengine/log/log.go @@ -0,0 +1,323 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package log provides the means of querying an application's logs from +within an App Engine application. + +Example: + c := appengine.NewContext(r) + query := &log.Query{ + AppLogs: true, + Versions: []string{"1"}, + } + + for results := query.Run(c); ; { + record, err := results.Next() + if err == log.Done { + log.Infof(c, "Done processing results") + break + } + if err != nil { + log.Errorf(c, "Failed to retrieve next log: %v", err) + break + } + log.Infof(c, "Saw record %v", record) + } +*/ +package log // import "google.golang.org/appengine/log" + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/log" +) + +// Query defines a logs query. +type Query struct { + // Start time specifies the earliest log to return (inclusive). + StartTime time.Time + + // End time specifies the latest log to return (exclusive). + EndTime time.Time + + // Offset specifies a position within the log stream to resume reading from, + // and should come from a previously returned Record's field of the same name. + Offset []byte + + // Incomplete controls whether active (incomplete) requests should be included. + Incomplete bool + + // AppLogs indicates if application-level logs should be included. + AppLogs bool + + // ApplyMinLevel indicates if MinLevel should be used to filter results. + ApplyMinLevel bool + + // If ApplyMinLevel is true, only logs for requests with at least one + // application log of MinLevel or higher will be returned. + MinLevel int + + // Versions is the major version IDs whose logs should be retrieved. + // Logs for specific modules can be retrieved by the specifying versions + // in the form "module:version"; the default module is used if no module + // is specified. + Versions []string + + // A list of requests to search for instead of a time-based scan. Cannot be + // combined with filtering options such as StartTime, EndTime, Offset, + // Incomplete, ApplyMinLevel, or Versions. + RequestIDs []string +} + +// AppLog represents a single application-level log. +type AppLog struct { + Time time.Time + Level int + Message string +} + +// Record contains all the information for a single web request. +type Record struct { + AppID string + ModuleID string + VersionID string + RequestID []byte + IP string + Nickname string + AppEngineRelease string + + // The time when this request started. + StartTime time.Time + + // The time when this request finished. + EndTime time.Time + + // Opaque cursor into the result stream. + Offset []byte + + // The time required to process the request. + Latency time.Duration + MCycles int64 + Method string + Resource string + HTTPVersion string + Status int32 + + // The size of the request sent back to the client, in bytes. + ResponseSize int64 + Referrer string + UserAgent string + URLMapEntry string + Combined string + Host string + + // The estimated cost of this request, in dollars. + Cost float64 + TaskQueueName string + TaskName string + WasLoadingRequest bool + PendingTime time.Duration + Finished bool + AppLogs []AppLog + + // Mostly-unique identifier for the instance that handled the request if available. + InstanceID string +} + +// Result represents the result of a query. +type Result struct { + logs []*Record + context context.Context + request *pb.LogReadRequest + resultsSeen bool + err error +} + +// Next returns the next log record, +func (qr *Result) Next() (*Record, error) { + if qr.err != nil { + return nil, qr.err + } + if len(qr.logs) > 0 { + lr := qr.logs[0] + qr.logs = qr.logs[1:] + return lr, nil + } + + if qr.request.Offset == nil && qr.resultsSeen { + return nil, Done + } + + if err := qr.run(); err != nil { + // Errors here may be retried, so don't store the error. + return nil, err + } + + return qr.Next() +} + +// Done is returned when a query iteration has completed. +var Done = errors.New("log: query has no more results") + +// protoToAppLogs takes as input an array of pointers to LogLines, the internal +// Protocol Buffer representation of a single application-level log, +// and converts it to an array of AppLogs, the external representation +// of an application-level log. +func protoToAppLogs(logLines []*pb.LogLine) []AppLog { + appLogs := make([]AppLog, len(logLines)) + + for i, line := range logLines { + appLogs[i] = AppLog{ + Time: time.Unix(0, *line.Time*1e3), + Level: int(*line.Level), + Message: *line.LogMessage, + } + } + + return appLogs +} + +// protoToRecord converts a RequestLog, the internal Protocol Buffer +// representation of a single request-level log, to a Record, its +// corresponding external representation. +func protoToRecord(rl *pb.RequestLog) *Record { + offset, err := proto.Marshal(rl.Offset) + if err != nil { + offset = nil + } + return &Record{ + AppID: *rl.AppId, + ModuleID: rl.GetModuleId(), + VersionID: *rl.VersionId, + RequestID: rl.RequestId, + Offset: offset, + IP: *rl.Ip, + Nickname: rl.GetNickname(), + AppEngineRelease: string(rl.GetAppEngineRelease()), + StartTime: time.Unix(0, *rl.StartTime*1e3), + EndTime: time.Unix(0, *rl.EndTime*1e3), + Latency: time.Duration(*rl.Latency) * time.Microsecond, + MCycles: *rl.Mcycles, + Method: *rl.Method, + Resource: *rl.Resource, + HTTPVersion: *rl.HttpVersion, + Status: *rl.Status, + ResponseSize: *rl.ResponseSize, + Referrer: rl.GetReferrer(), + UserAgent: rl.GetUserAgent(), + URLMapEntry: *rl.UrlMapEntry, + Combined: *rl.Combined, + Host: rl.GetHost(), + Cost: rl.GetCost(), + TaskQueueName: rl.GetTaskQueueName(), + TaskName: rl.GetTaskName(), + WasLoadingRequest: rl.GetWasLoadingRequest(), + PendingTime: time.Duration(rl.GetPendingTime()) * time.Microsecond, + Finished: rl.GetFinished(), + AppLogs: protoToAppLogs(rl.Line), + InstanceID: string(rl.GetCloneKey()), + } +} + +// Run starts a query for log records, which contain request and application +// level log information. +func (params *Query) Run(c context.Context) *Result { + req, err := makeRequest(params, internal.FullyQualifiedAppID(c), appengine.VersionID(c)) + return &Result{ + context: c, + request: req, + err: err, + } +} + +func makeRequest(params *Query, appID, versionID string) (*pb.LogReadRequest, error) { + req := &pb.LogReadRequest{} + req.AppId = &appID + if !params.StartTime.IsZero() { + req.StartTime = proto.Int64(params.StartTime.UnixNano() / 1e3) + } + if !params.EndTime.IsZero() { + req.EndTime = proto.Int64(params.EndTime.UnixNano() / 1e3) + } + if len(params.Offset) > 0 { + var offset pb.LogOffset + if err := proto.Unmarshal(params.Offset, &offset); err != nil { + return nil, fmt.Errorf("bad Offset: %v", err) + } + req.Offset = &offset + } + if params.Incomplete { + req.IncludeIncomplete = ¶ms.Incomplete + } + if params.AppLogs { + req.IncludeAppLogs = ¶ms.AppLogs + } + if params.ApplyMinLevel { + req.MinimumLogLevel = proto.Int32(int32(params.MinLevel)) + } + if params.Versions == nil { + // If no versions were specified, default to the default module at + // the major version being used by this module. + if i := strings.Index(versionID, "."); i >= 0 { + versionID = versionID[:i] + } + req.VersionId = []string{versionID} + } else { + req.ModuleVersion = make([]*pb.LogModuleVersion, 0, len(params.Versions)) + for _, v := range params.Versions { + var m *string + if i := strings.Index(v, ":"); i >= 0 { + m, v = proto.String(v[:i]), v[i+1:] + } + req.ModuleVersion = append(req.ModuleVersion, &pb.LogModuleVersion{ + ModuleId: m, + VersionId: proto.String(v), + }) + } + } + if params.RequestIDs != nil { + ids := make([][]byte, len(params.RequestIDs)) + for i, v := range params.RequestIDs { + ids[i] = []byte(v) + } + req.RequestId = ids + } + + return req, nil +} + +// run takes the query Result produced by a call to Run and updates it with +// more Records. The updated Result contains a new set of logs as well as an +// offset to where more logs can be found. We also convert the items in the +// response from their internal representations to external versions of the +// same structs. +func (r *Result) run() error { + res := &pb.LogReadResponse{} + if err := internal.Call(r.context, "logservice", "Read", r.request, res); err != nil { + return err + } + + r.logs = make([]*Record, len(res.Log)) + r.request.Offset = res.Offset + r.resultsSeen = true + + for i, log := range res.Log { + r.logs[i] = protoToRecord(log) + } + + return nil +} + +func init() { + internal.RegisterErrorCodeMap("logservice", pb.LogServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/log/log_test.go b/vendor/google.golang.org/appengine/log/log_test.go new file mode 100644 index 0000000..726468e --- /dev/null +++ b/vendor/google.golang.org/appengine/log/log_test.go @@ -0,0 +1,112 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package log + +import ( + "reflect" + "testing" + "time" + + "github.com/golang/protobuf/proto" + + pb "google.golang.org/appengine/internal/log" +) + +func TestQueryToRequest(t *testing.T) { + testCases := []struct { + desc string + query *Query + want *pb.LogReadRequest + }{ + { + desc: "Empty", + query: &Query{}, + want: &pb.LogReadRequest{ + AppId: proto.String("s~fake"), + VersionId: []string{"v12"}, + }, + }, + { + desc: "Versions", + query: &Query{ + Versions: []string{"alpha", "backend:beta"}, + }, + want: &pb.LogReadRequest{ + AppId: proto.String("s~fake"), + ModuleVersion: []*pb.LogModuleVersion{ + { + VersionId: proto.String("alpha"), + }, { + ModuleId: proto.String("backend"), + VersionId: proto.String("beta"), + }, + }, + }, + }, + } + + for _, tt := range testCases { + req, err := makeRequest(tt.query, "s~fake", "v12") + + if err != nil { + t.Errorf("%s: got err %v, want nil", tt.desc, err) + continue + } + if !proto.Equal(req, tt.want) { + t.Errorf("%s request:\ngot %v\nwant %v", tt.desc, req, tt.want) + } + } +} + +func TestProtoToRecord(t *testing.T) { + // We deliberately leave ModuleId and other optional fields unset. + p := &pb.RequestLog{ + AppId: proto.String("s~fake"), + VersionId: proto.String("1"), + RequestId: []byte("deadbeef"), + Ip: proto.String("127.0.0.1"), + StartTime: proto.Int64(431044244000000), + EndTime: proto.Int64(431044724000000), + Latency: proto.Int64(480000000), + Mcycles: proto.Int64(7), + Method: proto.String("GET"), + Resource: proto.String("/app"), + HttpVersion: proto.String("1.1"), + Status: proto.Int32(418), + ResponseSize: proto.Int64(1337), + UrlMapEntry: proto.String("_go_app"), + Combined: proto.String("apache log"), + } + // Sanity check that all required fields are set. + if _, err := proto.Marshal(p); err != nil { + t.Fatalf("proto.Marshal: %v", err) + } + want := &Record{ + AppID: "s~fake", + ModuleID: "default", + VersionID: "1", + RequestID: []byte("deadbeef"), + IP: "127.0.0.1", + StartTime: time.Date(1983, 8, 29, 22, 30, 44, 0, time.UTC), + EndTime: time.Date(1983, 8, 29, 22, 38, 44, 0, time.UTC), + Latency: 8 * time.Minute, + MCycles: 7, + Method: "GET", + Resource: "/app", + HTTPVersion: "1.1", + Status: 418, + ResponseSize: 1337, + URLMapEntry: "_go_app", + Combined: "apache log", + Finished: true, + AppLogs: []AppLog{}, + } + got := protoToRecord(p) + // Coerce locations to UTC since otherwise they will be in local. + got.StartTime, got.EndTime = got.StartTime.UTC(), got.EndTime.UTC() + if !reflect.DeepEqual(got, want) { + t.Errorf("protoToRecord:\ngot: %v\nwant: %v", got, want) + } +} diff --git a/vendor/google.golang.org/appengine/mail/mail.go b/vendor/google.golang.org/appengine/mail/mail.go new file mode 100644 index 0000000..f7955aa --- /dev/null +++ b/vendor/google.golang.org/appengine/mail/mail.go @@ -0,0 +1,123 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package mail provides the means of sending email from an +App Engine application. + +Example: + msg := &mail.Message{ + Sender: "romeo@montague.com", + To: []string{"Juliet "}, + Subject: "See you tonight", + Body: "Don't forget our plans. Hark, 'til later.", + } + if err := mail.Send(c, msg); err != nil { + log.Errorf(c, "Alas, my user, the email failed to sendeth: %v", err) + } +*/ +package mail // import "google.golang.org/appengine/mail" + +import ( + "net/mail" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + bpb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/mail" +) + +// A Message represents an email message. +// Addresses may be of any form permitted by RFC 822. +type Message struct { + // Sender must be set, and must be either an application admin + // or the currently signed-in user. + Sender string + ReplyTo string // may be empty + + // At least one of these slices must have a non-zero length, + // except when calling SendToAdmins. + To, Cc, Bcc []string + + Subject string + + // At least one of Body or HTMLBody must be non-empty. + Body string + HTMLBody string + + Attachments []Attachment + + // Extra mail headers. + // See https://cloud.google.com/appengine/docs/go/mail/ + // for permissible headers. + Headers mail.Header +} + +// An Attachment represents an email attachment. +type Attachment struct { + // Name must be set to a valid file name. + Name string + Data []byte + ContentID string +} + +// Send sends an email message. +func Send(c context.Context, msg *Message) error { + return send(c, "Send", msg) +} + +// SendToAdmins sends an email message to the application's administrators. +func SendToAdmins(c context.Context, msg *Message) error { + return send(c, "SendToAdmins", msg) +} + +func send(c context.Context, method string, msg *Message) error { + req := &pb.MailMessage{ + Sender: &msg.Sender, + To: msg.To, + Cc: msg.Cc, + Bcc: msg.Bcc, + Subject: &msg.Subject, + } + if msg.ReplyTo != "" { + req.ReplyTo = &msg.ReplyTo + } + if msg.Body != "" { + req.TextBody = &msg.Body + } + if msg.HTMLBody != "" { + req.HtmlBody = &msg.HTMLBody + } + if len(msg.Attachments) > 0 { + req.Attachment = make([]*pb.MailAttachment, len(msg.Attachments)) + for i, att := range msg.Attachments { + req.Attachment[i] = &pb.MailAttachment{ + FileName: proto.String(att.Name), + Data: att.Data, + } + if att.ContentID != "" { + req.Attachment[i].ContentID = proto.String(att.ContentID) + } + } + } + for key, vs := range msg.Headers { + for _, v := range vs { + req.Header = append(req.Header, &pb.MailHeader{ + Name: proto.String(key), + Value: proto.String(v), + }) + } + } + res := &bpb.VoidProto{} + if err := internal.Call(c, "mail", method, req, res); err != nil { + return err + } + return nil +} + +func init() { + internal.RegisterErrorCodeMap("mail", pb.MailServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/mail/mail_test.go b/vendor/google.golang.org/appengine/mail/mail_test.go new file mode 100644 index 0000000..7502c59 --- /dev/null +++ b/vendor/google.golang.org/appengine/mail/mail_test.go @@ -0,0 +1,65 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package mail + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine/internal/aetesting" + basepb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/mail" +) + +func TestMessageConstruction(t *testing.T) { + var got *pb.MailMessage + c := aetesting.FakeSingleContext(t, "mail", "Send", func(in *pb.MailMessage, out *basepb.VoidProto) error { + got = in + return nil + }) + + msg := &Message{ + Sender: "dsymonds@example.com", + To: []string{"nigeltao@example.com"}, + Body: "Hey, lunch time?", + Attachments: []Attachment{ + // Regression test for a prod bug. The address of a range variable was used when + // constructing the outgoing proto, so multiple attachments used the same name. + { + Name: "att1.txt", + Data: []byte("data1"), + ContentID: "", + }, + { + Name: "att2.txt", + Data: []byte("data2"), + }, + }, + } + if err := Send(c, msg); err != nil { + t.Fatalf("Send: %v", err) + } + want := &pb.MailMessage{ + Sender: proto.String("dsymonds@example.com"), + To: []string{"nigeltao@example.com"}, + Subject: proto.String(""), + TextBody: proto.String("Hey, lunch time?"), + Attachment: []*pb.MailAttachment{ + { + FileName: proto.String("att1.txt"), + Data: []byte("data1"), + ContentID: proto.String(""), + }, + { + FileName: proto.String("att2.txt"), + Data: []byte("data2"), + }, + }, + } + if !proto.Equal(got, want) { + t.Errorf("Bad proto for %+v\n got %v\nwant %v", msg, got, want) + } +} diff --git a/vendor/google.golang.org/appengine/memcache/memcache.go b/vendor/google.golang.org/appengine/memcache/memcache.go new file mode 100644 index 0000000..d8eed4b --- /dev/null +++ b/vendor/google.golang.org/appengine/memcache/memcache.go @@ -0,0 +1,526 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package memcache provides a client for App Engine's distributed in-memory +// key-value store for small chunks of arbitrary data. +// +// The fundamental operations get and set items, keyed by a string. +// +// item0, err := memcache.Get(c, "key") +// if err != nil && err != memcache.ErrCacheMiss { +// return err +// } +// if err == nil { +// fmt.Fprintf(w, "memcache hit: Key=%q Val=[% x]\n", item0.Key, item0.Value) +// } else { +// fmt.Fprintf(w, "memcache miss\n") +// } +// +// and +// +// item1 := &memcache.Item{ +// Key: "foo", +// Value: []byte("bar"), +// } +// if err := memcache.Set(c, item1); err != nil { +// return err +// } +package memcache // import "google.golang.org/appengine/memcache" + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "errors" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/memcache" +) + +var ( + // ErrCacheMiss means that an operation failed + // because the item wasn't present. + ErrCacheMiss = errors.New("memcache: cache miss") + // ErrCASConflict means that a CompareAndSwap call failed due to the + // cached value being modified between the Get and the CompareAndSwap. + // If the cached value was simply evicted rather than replaced, + // ErrNotStored will be returned instead. + ErrCASConflict = errors.New("memcache: compare-and-swap conflict") + // ErrNoStats means that no statistics were available. + ErrNoStats = errors.New("memcache: no statistics available") + // ErrNotStored means that a conditional write operation (i.e. Add or + // CompareAndSwap) failed because the condition was not satisfied. + ErrNotStored = errors.New("memcache: item not stored") + // ErrServerError means that a server error occurred. + ErrServerError = errors.New("memcache: server error") +) + +// Item is the unit of memcache gets and sets. +type Item struct { + // Key is the Item's key (250 bytes maximum). + Key string + // Value is the Item's value. + Value []byte + // Object is the Item's value for use with a Codec. + Object interface{} + // Flags are server-opaque flags whose semantics are entirely up to the + // App Engine app. + Flags uint32 + // Expiration is the maximum duration that the item will stay + // in the cache. + // The zero value means the Item has no expiration time. + // Subsecond precision is ignored. + // This is not set when getting items. + Expiration time.Duration + // casID is a client-opaque value used for compare-and-swap operations. + // Zero means that compare-and-swap is not used. + casID uint64 +} + +const ( + secondsIn30Years = 60 * 60 * 24 * 365 * 30 // from memcache server code + thirtyYears = time.Duration(secondsIn30Years) * time.Second +) + +// protoToItem converts a protocol buffer item to a Go struct. +func protoToItem(p *pb.MemcacheGetResponse_Item) *Item { + return &Item{ + Key: string(p.Key), + Value: p.Value, + Flags: p.GetFlags(), + casID: p.GetCasId(), + } +} + +// If err is an appengine.MultiError, return its first element. Otherwise, return err. +func singleError(err error) error { + if me, ok := err.(appengine.MultiError); ok { + return me[0] + } + return err +} + +// Get gets the item for the given key. ErrCacheMiss is returned for a memcache +// cache miss. The key must be at most 250 bytes in length. +func Get(c context.Context, key string) (*Item, error) { + m, err := GetMulti(c, []string{key}) + if err != nil { + return nil, err + } + if _, ok := m[key]; !ok { + return nil, ErrCacheMiss + } + return m[key], nil +} + +// GetMulti is a batch version of Get. The returned map from keys to items may +// have fewer elements than the input slice, due to memcache cache misses. +// Each key must be at most 250 bytes in length. +func GetMulti(c context.Context, key []string) (map[string]*Item, error) { + if len(key) == 0 { + return nil, nil + } + keyAsBytes := make([][]byte, len(key)) + for i, k := range key { + keyAsBytes[i] = []byte(k) + } + req := &pb.MemcacheGetRequest{ + Key: keyAsBytes, + ForCas: proto.Bool(true), + } + res := &pb.MemcacheGetResponse{} + if err := internal.Call(c, "memcache", "Get", req, res); err != nil { + return nil, err + } + m := make(map[string]*Item, len(res.Item)) + for _, p := range res.Item { + t := protoToItem(p) + m[t.Key] = t + } + return m, nil +} + +// Delete deletes the item for the given key. +// ErrCacheMiss is returned if the specified item can not be found. +// The key must be at most 250 bytes in length. +func Delete(c context.Context, key string) error { + return singleError(DeleteMulti(c, []string{key})) +} + +// DeleteMulti is a batch version of Delete. +// If any keys cannot be found, an appengine.MultiError is returned. +// Each key must be at most 250 bytes in length. +func DeleteMulti(c context.Context, key []string) error { + if len(key) == 0 { + return nil + } + req := &pb.MemcacheDeleteRequest{ + Item: make([]*pb.MemcacheDeleteRequest_Item, len(key)), + } + for i, k := range key { + req.Item[i] = &pb.MemcacheDeleteRequest_Item{Key: []byte(k)} + } + res := &pb.MemcacheDeleteResponse{} + if err := internal.Call(c, "memcache", "Delete", req, res); err != nil { + return err + } + if len(res.DeleteStatus) != len(key) { + return ErrServerError + } + me, any := make(appengine.MultiError, len(key)), false + for i, s := range res.DeleteStatus { + switch s { + case pb.MemcacheDeleteResponse_DELETED: + // OK + case pb.MemcacheDeleteResponse_NOT_FOUND: + me[i] = ErrCacheMiss + any = true + default: + me[i] = ErrServerError + any = true + } + } + if any { + return me + } + return nil +} + +// Increment atomically increments the decimal value in the given key +// by delta and returns the new value. The value must fit in a uint64. +// Overflow wraps around, and underflow is capped to zero. The +// provided delta may be negative. If the key doesn't exist in +// memcache, the provided initial value is used to atomically +// populate it before the delta is applied. +// The key must be at most 250 bytes in length. +func Increment(c context.Context, key string, delta int64, initialValue uint64) (newValue uint64, err error) { + return incr(c, key, delta, &initialValue) +} + +// IncrementExisting works like Increment but assumes that the key +// already exists in memcache and doesn't take an initial value. +// IncrementExisting can save work if calculating the initial value is +// expensive. +// An error is returned if the specified item can not be found. +func IncrementExisting(c context.Context, key string, delta int64) (newValue uint64, err error) { + return incr(c, key, delta, nil) +} + +func incr(c context.Context, key string, delta int64, initialValue *uint64) (newValue uint64, err error) { + req := &pb.MemcacheIncrementRequest{ + Key: []byte(key), + InitialValue: initialValue, + } + if delta >= 0 { + req.Delta = proto.Uint64(uint64(delta)) + } else { + req.Delta = proto.Uint64(uint64(-delta)) + req.Direction = pb.MemcacheIncrementRequest_DECREMENT.Enum() + } + res := &pb.MemcacheIncrementResponse{} + err = internal.Call(c, "memcache", "Increment", req, res) + if err != nil { + return + } + if res.NewValue == nil { + return 0, ErrCacheMiss + } + return *res.NewValue, nil +} + +// set sets the given items using the given conflict resolution policy. +// appengine.MultiError may be returned. +func set(c context.Context, item []*Item, value [][]byte, policy pb.MemcacheSetRequest_SetPolicy) error { + if len(item) == 0 { + return nil + } + req := &pb.MemcacheSetRequest{ + Item: make([]*pb.MemcacheSetRequest_Item, len(item)), + } + for i, t := range item { + p := &pb.MemcacheSetRequest_Item{ + Key: []byte(t.Key), + } + if value == nil { + p.Value = t.Value + } else { + p.Value = value[i] + } + if t.Flags != 0 { + p.Flags = proto.Uint32(t.Flags) + } + if t.Expiration != 0 { + // In the .proto file, MemcacheSetRequest_Item uses a fixed32 (i.e. unsigned) + // for expiration time, while MemcacheGetRequest_Item uses int32 (i.e. signed). + // Throughout this .go file, we use int32. + // Also, in the proto, the expiration value is either a duration (in seconds) + // or an absolute Unix timestamp (in seconds), depending on whether the + // value is less than or greater than or equal to 30 years, respectively. + if t.Expiration < time.Second { + // Because an Expiration of 0 means no expiration, we take + // care here to translate an item with an expiration + // Duration between 0-1 seconds as immediately expiring + // (saying it expired a few seconds ago), rather than + // rounding it down to 0 and making it live forever. + p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) - 5) + } else if t.Expiration >= thirtyYears { + p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) + uint32(t.Expiration/time.Second)) + } else { + p.ExpirationTime = proto.Uint32(uint32(t.Expiration / time.Second)) + } + } + if t.casID != 0 { + p.CasId = proto.Uint64(t.casID) + p.ForCas = proto.Bool(true) + } + p.SetPolicy = policy.Enum() + req.Item[i] = p + } + res := &pb.MemcacheSetResponse{} + if err := internal.Call(c, "memcache", "Set", req, res); err != nil { + return err + } + if len(res.SetStatus) != len(item) { + return ErrServerError + } + me, any := make(appengine.MultiError, len(item)), false + for i, st := range res.SetStatus { + var err error + switch st { + case pb.MemcacheSetResponse_STORED: + // OK + case pb.MemcacheSetResponse_NOT_STORED: + err = ErrNotStored + case pb.MemcacheSetResponse_EXISTS: + err = ErrCASConflict + default: + err = ErrServerError + } + if err != nil { + me[i] = err + any = true + } + } + if any { + return me + } + return nil +} + +// Set writes the given item, unconditionally. +func Set(c context.Context, item *Item) error { + return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_SET)) +} + +// SetMulti is a batch version of Set. +// appengine.MultiError may be returned. +func SetMulti(c context.Context, item []*Item) error { + return set(c, item, nil, pb.MemcacheSetRequest_SET) +} + +// Add writes the given item, if no value already exists for its key. +// ErrNotStored is returned if that condition is not met. +func Add(c context.Context, item *Item) error { + return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_ADD)) +} + +// AddMulti is a batch version of Add. +// appengine.MultiError may be returned. +func AddMulti(c context.Context, item []*Item) error { + return set(c, item, nil, pb.MemcacheSetRequest_ADD) +} + +// CompareAndSwap writes the given item that was previously returned by Get, +// if the value was neither modified or evicted between the Get and the +// CompareAndSwap calls. The item's Key should not change between calls but +// all other item fields may differ. +// ErrCASConflict is returned if the value was modified in between the calls. +// ErrNotStored is returned if the value was evicted in between the calls. +func CompareAndSwap(c context.Context, item *Item) error { + return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_CAS)) +} + +// CompareAndSwapMulti is a batch version of CompareAndSwap. +// appengine.MultiError may be returned. +func CompareAndSwapMulti(c context.Context, item []*Item) error { + return set(c, item, nil, pb.MemcacheSetRequest_CAS) +} + +// Codec represents a symmetric pair of functions that implement a codec. +// Items stored into or retrieved from memcache using a Codec have their +// values marshaled or unmarshaled. +// +// All the methods provided for Codec behave analogously to the package level +// function with same name. +type Codec struct { + Marshal func(interface{}) ([]byte, error) + Unmarshal func([]byte, interface{}) error +} + +// Get gets the item for the given key and decodes the obtained value into v. +// ErrCacheMiss is returned for a memcache cache miss. +// The key must be at most 250 bytes in length. +func (cd Codec) Get(c context.Context, key string, v interface{}) (*Item, error) { + i, err := Get(c, key) + if err != nil { + return nil, err + } + if err := cd.Unmarshal(i.Value, v); err != nil { + return nil, err + } + return i, nil +} + +func (cd Codec) set(c context.Context, items []*Item, policy pb.MemcacheSetRequest_SetPolicy) error { + var vs [][]byte + var me appengine.MultiError + for i, item := range items { + v, err := cd.Marshal(item.Object) + if err != nil { + if me == nil { + me = make(appengine.MultiError, len(items)) + } + me[i] = err + continue + } + if me == nil { + vs = append(vs, v) + } + } + if me != nil { + return me + } + + return set(c, items, vs, policy) +} + +// Set writes the given item, unconditionally. +func (cd Codec) Set(c context.Context, item *Item) error { + return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_SET)) +} + +// SetMulti is a batch version of Set. +// appengine.MultiError may be returned. +func (cd Codec) SetMulti(c context.Context, items []*Item) error { + return cd.set(c, items, pb.MemcacheSetRequest_SET) +} + +// Add writes the given item, if no value already exists for its key. +// ErrNotStored is returned if that condition is not met. +func (cd Codec) Add(c context.Context, item *Item) error { + return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_ADD)) +} + +// AddMulti is a batch version of Add. +// appengine.MultiError may be returned. +func (cd Codec) AddMulti(c context.Context, items []*Item) error { + return cd.set(c, items, pb.MemcacheSetRequest_ADD) +} + +// CompareAndSwap writes the given item that was previously returned by Get, +// if the value was neither modified or evicted between the Get and the +// CompareAndSwap calls. The item's Key should not change between calls but +// all other item fields may differ. +// ErrCASConflict is returned if the value was modified in between the calls. +// ErrNotStored is returned if the value was evicted in between the calls. +func (cd Codec) CompareAndSwap(c context.Context, item *Item) error { + return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_CAS)) +} + +// CompareAndSwapMulti is a batch version of CompareAndSwap. +// appengine.MultiError may be returned. +func (cd Codec) CompareAndSwapMulti(c context.Context, items []*Item) error { + return cd.set(c, items, pb.MemcacheSetRequest_CAS) +} + +var ( + // Gob is a Codec that uses the gob package. + Gob = Codec{gobMarshal, gobUnmarshal} + // JSON is a Codec that uses the json package. + JSON = Codec{json.Marshal, json.Unmarshal} +) + +func gobMarshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func gobUnmarshal(data []byte, v interface{}) error { + return gob.NewDecoder(bytes.NewBuffer(data)).Decode(v) +} + +// Statistics represents a set of statistics about the memcache cache. +// This may include items that have expired but have not yet been removed from the cache. +type Statistics struct { + Hits uint64 // Counter of cache hits + Misses uint64 // Counter of cache misses + ByteHits uint64 // Counter of bytes transferred for gets + + Items uint64 // Items currently in the cache + Bytes uint64 // Size of all items currently in the cache + + Oldest int64 // Age of access of the oldest item, in seconds +} + +// Stats retrieves the current memcache statistics. +func Stats(c context.Context) (*Statistics, error) { + req := &pb.MemcacheStatsRequest{} + res := &pb.MemcacheStatsResponse{} + if err := internal.Call(c, "memcache", "Stats", req, res); err != nil { + return nil, err + } + if res.Stats == nil { + return nil, ErrNoStats + } + return &Statistics{ + Hits: *res.Stats.Hits, + Misses: *res.Stats.Misses, + ByteHits: *res.Stats.ByteHits, + Items: *res.Stats.Items, + Bytes: *res.Stats.Bytes, + Oldest: int64(*res.Stats.OldestItemAge), + }, nil +} + +// Flush flushes all items from memcache. +func Flush(c context.Context) error { + req := &pb.MemcacheFlushRequest{} + res := &pb.MemcacheFlushResponse{} + return internal.Call(c, "memcache", "FlushAll", req, res) +} + +func namespaceMod(m proto.Message, namespace string) { + switch m := m.(type) { + case *pb.MemcacheDeleteRequest: + if m.NameSpace == nil { + m.NameSpace = &namespace + } + case *pb.MemcacheGetRequest: + if m.NameSpace == nil { + m.NameSpace = &namespace + } + case *pb.MemcacheIncrementRequest: + if m.NameSpace == nil { + m.NameSpace = &namespace + } + case *pb.MemcacheSetRequest: + if m.NameSpace == nil { + m.NameSpace = &namespace + } + // MemcacheFlushRequest, MemcacheStatsRequest do not apply namespace. + } +} + +func init() { + internal.RegisterErrorCodeMap("memcache", pb.MemcacheServiceError_ErrorCode_name) + internal.NamespaceMods["memcache"] = namespaceMod +} diff --git a/vendor/google.golang.org/appengine/memcache/memcache_test.go b/vendor/google.golang.org/appengine/memcache/memcache_test.go new file mode 100644 index 0000000..1dc7da4 --- /dev/null +++ b/vendor/google.golang.org/appengine/memcache/memcache_test.go @@ -0,0 +1,263 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package memcache + +import ( + "fmt" + "testing" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/memcache" +) + +var errRPC = fmt.Errorf("RPC error") + +func TestGetRequest(t *testing.T) { + serviceCalled := false + apiKey := "lyric" + + c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, _ *pb.MemcacheGetResponse) error { + // Test request. + if n := len(req.Key); n != 1 { + t.Errorf("got %d want 1", n) + return nil + } + if k := string(req.Key[0]); k != apiKey { + t.Errorf("got %q want %q", k, apiKey) + } + + serviceCalled = true + return nil + }) + + // Test the "forward" path from the API call parameters to the + // protobuf request object. (The "backward" path from the + // protobuf response object to the API call response, + // including the error response, are handled in the next few + // tests). + Get(c, apiKey) + if !serviceCalled { + t.Error("Service was not called as expected") + } +} + +func TestGetResponseHit(t *testing.T) { + key := "lyric" + value := "Where the buffalo roam" + + c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error { + res.Item = []*pb.MemcacheGetResponse_Item{ + {Key: []byte(key), Value: []byte(value)}, + } + return nil + }) + apiItem, err := Get(c, key) + if apiItem == nil || apiItem.Key != key || string(apiItem.Value) != value { + t.Errorf("got %q, %q want {%q,%q}, nil", apiItem, err, key, value) + } +} + +func TestGetResponseMiss(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error { + // don't fill in any of the response + return nil + }) + _, err := Get(c, "something") + if err != ErrCacheMiss { + t.Errorf("got %v want ErrCacheMiss", err) + } +} + +func TestGetResponseRPCError(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error { + return errRPC + }) + + if _, err := Get(c, "something"); err != errRPC { + t.Errorf("got %v want errRPC", err) + } +} + +func TestAddRequest(t *testing.T) { + var apiItem = &Item{ + Key: "lyric", + Value: []byte("Oh, give me a home"), + } + + serviceCalled := false + + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(req *pb.MemcacheSetRequest, _ *pb.MemcacheSetResponse) error { + // Test request. + pbItem := req.Item[0] + if k := string(pbItem.Key); k != apiItem.Key { + t.Errorf("got %q want %q", k, apiItem.Key) + } + if v := string(apiItem.Value); v != string(pbItem.Value) { + t.Errorf("got %q want %q", v, string(pbItem.Value)) + } + if p := *pbItem.SetPolicy; p != pb.MemcacheSetRequest_ADD { + t.Errorf("got %v want %v", p, pb.MemcacheSetRequest_ADD) + } + + serviceCalled = true + return nil + }) + + Add(c, apiItem) + if !serviceCalled { + t.Error("Service was not called as expected") + } +} + +func TestAddResponseStored(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error { + res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_STORED} + return nil + }) + + if err := Add(c, &Item{}); err != nil { + t.Errorf("got %v want nil", err) + } +} + +func TestAddResponseNotStored(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error { + res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_NOT_STORED} + return nil + }) + + if err := Add(c, &Item{}); err != ErrNotStored { + t.Errorf("got %v want ErrNotStored", err) + } +} + +func TestAddResponseError(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error { + res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_ERROR} + return nil + }) + + if err := Add(c, &Item{}); err != ErrServerError { + t.Errorf("got %v want ErrServerError", err) + } +} + +func TestAddResponseRPCError(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error { + return errRPC + }) + + if err := Add(c, &Item{}); err != errRPC { + t.Errorf("got %v want errRPC", err) + } +} + +func TestSetRequest(t *testing.T) { + var apiItem = &Item{ + Key: "lyric", + Value: []byte("Where the buffalo roam"), + } + + serviceCalled := false + + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(req *pb.MemcacheSetRequest, _ *pb.MemcacheSetResponse) error { + // Test request. + if n := len(req.Item); n != 1 { + t.Errorf("got %d want 1", n) + return nil + } + pbItem := req.Item[0] + if k := string(pbItem.Key); k != apiItem.Key { + t.Errorf("got %q want %q", k, apiItem.Key) + } + if v := string(pbItem.Value); v != string(apiItem.Value) { + t.Errorf("got %q want %q", v, string(apiItem.Value)) + } + if p := *pbItem.SetPolicy; p != pb.MemcacheSetRequest_SET { + t.Errorf("got %v want %v", p, pb.MemcacheSetRequest_SET) + } + + serviceCalled = true + return nil + }) + + Set(c, apiItem) + if !serviceCalled { + t.Error("Service was not called as expected") + } +} + +func TestSetResponse(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error { + res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_STORED} + return nil + }) + + if err := Set(c, &Item{}); err != nil { + t.Errorf("got %v want nil", err) + } +} + +func TestSetResponseError(t *testing.T) { + c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error { + res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_ERROR} + return nil + }) + + if err := Set(c, &Item{}); err != ErrServerError { + t.Errorf("got %v want ErrServerError", err) + } +} + +func TestNamespaceResetting(t *testing.T) { + namec := make(chan *string, 1) + c0 := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error { + namec <- req.NameSpace + return errRPC + }) + + // Check that wrapping c0 in a namespace twice works correctly. + c1, err := appengine.Namespace(c0, "A") + if err != nil { + t.Fatalf("appengine.Namespace: %v", err) + } + c2, err := appengine.Namespace(c1, "") // should act as the original context + if err != nil { + t.Fatalf("appengine.Namespace: %v", err) + } + + Get(c0, "key") + if ns := <-namec; ns != nil { + t.Errorf(`Get with c0: ns = %q, want nil`, *ns) + } + + Get(c1, "key") + if ns := <-namec; ns == nil { + t.Error(`Get with c1: ns = nil, want "A"`) + } else if *ns != "A" { + t.Errorf(`Get with c1: ns = %q, want "A"`, *ns) + } + + Get(c2, "key") + if ns := <-namec; ns != nil { + t.Errorf(`Get with c2: ns = %q, want nil`, *ns) + } +} + +func TestGetMultiEmpty(t *testing.T) { + serviceCalled := false + c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, _ *pb.MemcacheGetResponse) error { + serviceCalled = true + return nil + }) + + // Test that the Memcache service is not called when + // GetMulti is passed an empty slice of keys. + GetMulti(c, []string{}) + if serviceCalled { + t.Error("Service was called but should not have been") + } +} diff --git a/vendor/google.golang.org/appengine/module/module.go b/vendor/google.golang.org/appengine/module/module.go new file mode 100644 index 0000000..88e6629 --- /dev/null +++ b/vendor/google.golang.org/appengine/module/module.go @@ -0,0 +1,113 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package module provides functions for interacting with modules. + +The appengine package contains functions that report the identity of the app, +including the module name. +*/ +package module // import "google.golang.org/appengine/module" + +import ( + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/modules" +) + +// List returns the names of modules belonging to this application. +func List(c context.Context) ([]string, error) { + req := &pb.GetModulesRequest{} + res := &pb.GetModulesResponse{} + err := internal.Call(c, "modules", "GetModules", req, res) + return res.Module, err +} + +// NumInstances returns the number of instances of the given module/version. +// If either argument is the empty string it means the default. +func NumInstances(c context.Context, module, version string) (int, error) { + req := &pb.GetNumInstancesRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + res := &pb.GetNumInstancesResponse{} + + if err := internal.Call(c, "modules", "GetNumInstances", req, res); err != nil { + return 0, err + } + return int(*res.Instances), nil +} + +// SetNumInstances sets the number of instances of the given module.version to the +// specified value. If either module or version are the empty string it means the +// default. +func SetNumInstances(c context.Context, module, version string, instances int) error { + req := &pb.SetNumInstancesRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + req.Instances = proto.Int64(int64(instances)) + res := &pb.SetNumInstancesResponse{} + return internal.Call(c, "modules", "SetNumInstances", req, res) +} + +// Versions returns the names of the versions that belong to the specified module. +// If module is the empty string, it means the default module. +func Versions(c context.Context, module string) ([]string, error) { + req := &pb.GetVersionsRequest{} + if module != "" { + req.Module = &module + } + res := &pb.GetVersionsResponse{} + err := internal.Call(c, "modules", "GetVersions", req, res) + return res.GetVersion(), err +} + +// DefaultVersion returns the default version of the specified module. +// If module is the empty string, it means the default module. +func DefaultVersion(c context.Context, module string) (string, error) { + req := &pb.GetDefaultVersionRequest{} + if module != "" { + req.Module = &module + } + res := &pb.GetDefaultVersionResponse{} + err := internal.Call(c, "modules", "GetDefaultVersion", req, res) + return res.GetVersion(), err +} + +// Start starts the specified version of the specified module. +// If either module or version are the empty string, it means the default. +func Start(c context.Context, module, version string) error { + req := &pb.StartModuleRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + res := &pb.StartModuleResponse{} + return internal.Call(c, "modules", "StartModule", req, res) +} + +// Stop stops the specified version of the specified module. +// If either module or version are the empty string, it means the default. +func Stop(c context.Context, module, version string) error { + req := &pb.StopModuleRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + res := &pb.StopModuleResponse{} + return internal.Call(c, "modules", "StopModule", req, res) +} diff --git a/vendor/google.golang.org/appengine/module/module_test.go b/vendor/google.golang.org/appengine/module/module_test.go new file mode 100644 index 0000000..73e8971 --- /dev/null +++ b/vendor/google.golang.org/appengine/module/module_test.go @@ -0,0 +1,124 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package module + +import ( + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/modules" +) + +const version = "test-version" +const module = "test-module" +const instances = 3 + +func TestList(t *testing.T) { + c := aetesting.FakeSingleContext(t, "modules", "GetModules", func(req *pb.GetModulesRequest, res *pb.GetModulesResponse) error { + res.Module = []string{"default", "mod1"} + return nil + }) + got, err := List(c) + if err != nil { + t.Fatalf("List: %v", err) + } + want := []string{"default", "mod1"} + if !reflect.DeepEqual(got, want) { + t.Errorf("List = %v, want %v", got, want) + } +} + +func TestSetNumInstances(t *testing.T) { + c := aetesting.FakeSingleContext(t, "modules", "SetNumInstances", func(req *pb.SetNumInstancesRequest, res *pb.SetNumInstancesResponse) error { + if *req.Module != module { + t.Errorf("Module = %v, want %v", req.Module, module) + } + if *req.Version != version { + t.Errorf("Version = %v, want %v", req.Version, version) + } + if *req.Instances != instances { + t.Errorf("Instances = %v, want %d", req.Instances, instances) + } + return nil + }) + err := SetNumInstances(c, module, version, instances) + if err != nil { + t.Fatalf("SetNumInstances: %v", err) + } +} + +func TestVersions(t *testing.T) { + c := aetesting.FakeSingleContext(t, "modules", "GetVersions", func(req *pb.GetVersionsRequest, res *pb.GetVersionsResponse) error { + if *req.Module != module { + t.Errorf("Module = %v, want %v", req.Module, module) + } + res.Version = []string{"v1", "v2", "v3"} + return nil + }) + got, err := Versions(c, module) + if err != nil { + t.Fatalf("Versions: %v", err) + } + want := []string{"v1", "v2", "v3"} + if !reflect.DeepEqual(got, want) { + t.Errorf("Versions = %v, want %v", got, want) + } +} + +func TestDefaultVersion(t *testing.T) { + c := aetesting.FakeSingleContext(t, "modules", "GetDefaultVersion", func(req *pb.GetDefaultVersionRequest, res *pb.GetDefaultVersionResponse) error { + if *req.Module != module { + t.Errorf("Module = %v, want %v", req.Module, module) + } + res.Version = proto.String(version) + return nil + }) + got, err := DefaultVersion(c, module) + if err != nil { + t.Fatalf("DefaultVersion: %v", err) + } + if got != version { + t.Errorf("Version = %v, want %v", got, version) + } +} + +func TestStart(t *testing.T) { + c := aetesting.FakeSingleContext(t, "modules", "StartModule", func(req *pb.StartModuleRequest, res *pb.StartModuleResponse) error { + if *req.Module != module { + t.Errorf("Module = %v, want %v", req.Module, module) + } + if *req.Version != version { + t.Errorf("Version = %v, want %v", req.Version, version) + } + return nil + }) + + err := Start(c, module, version) + if err != nil { + t.Fatalf("Start: %v", err) + } +} + +func TestStop(t *testing.T) { + c := aetesting.FakeSingleContext(t, "modules", "StopModule", func(req *pb.StopModuleRequest, res *pb.StopModuleResponse) error { + version := "test-version" + module := "test-module" + if *req.Module != module { + t.Errorf("Module = %v, want %v", req.Module, module) + } + if *req.Version != version { + t.Errorf("Version = %v, want %v", req.Version, version) + } + return nil + }) + + err := Stop(c, module, version) + if err != nil { + t.Fatalf("Stop: %v", err) + } +} diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go new file mode 100644 index 0000000..21860ca --- /dev/null +++ b/vendor/google.golang.org/appengine/namespace.go @@ -0,0 +1,25 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "fmt" + "regexp" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// Namespace returns a replacement context that operates within the given namespace. +func Namespace(c context.Context, namespace string) (context.Context, error) { + if !validNamespace.MatchString(namespace) { + return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) + } + return internal.NamespacedContext(c, namespace), nil +} + +// validNamespace matches valid namespace names. +var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/namespace_test.go b/vendor/google.golang.org/appengine/namespace_test.go new file mode 100644 index 0000000..847f640 --- /dev/null +++ b/vendor/google.golang.org/appengine/namespace_test.go @@ -0,0 +1,39 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "testing" + + "golang.org/x/net/context" +) + +func TestNamespaceValidity(t *testing.T) { + testCases := []struct { + namespace string + ok bool + }{ + // data from Python's namespace_manager_test.py + {"", true}, + {"__a.namespace.123__", true}, + {"-_A....NAMESPACE-_", true}, + {"-", true}, + {".", true}, + {".-", true}, + + {"?", false}, + {"+", false}, + {"!", false}, + {" ", false}, + } + for _, tc := range testCases { + _, err := Namespace(context.Background(), tc.namespace) + if err == nil && !tc.ok { + t.Errorf("Namespace %q should be rejected, but wasn't", tc.namespace) + } else if err != nil && tc.ok { + t.Errorf("Namespace %q should be accepted, but wasn't", tc.namespace) + } + } +} diff --git a/vendor/google.golang.org/appengine/remote_api/client.go b/vendor/google.golang.org/appengine/remote_api/client.go new file mode 100644 index 0000000..dbe219d --- /dev/null +++ b/vendor/google.golang.org/appengine/remote_api/client.go @@ -0,0 +1,174 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package remote_api + +// This file provides the client for connecting remotely to a user's production +// application. + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "math/rand" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/remote_api" +) + +// NewRemoteContext returns a context that gives access to the production +// APIs for the application at the given host. All communication will be +// performed over SSL unless the host is localhost. +func NewRemoteContext(host string, client *http.Client) (context.Context, error) { + // Add an appcfg header to outgoing requests. + t := client.Transport + if t == nil { + t = http.DefaultTransport + } + client.Transport = &headerAddingRoundTripper{t} + + url := url.URL{ + Scheme: "https", + Host: host, + Path: "/_ah/remote_api", + } + if host == "localhost" || strings.HasPrefix(host, "localhost:") { + url.Scheme = "http" + } + u := url.String() + appID, err := getAppID(client, u) + if err != nil { + return nil, fmt.Errorf("unable to contact server: %v", err) + } + rc := &remoteContext{ + client: client, + url: u, + } + ctx := internal.WithCallOverride(context.Background(), rc.call) + ctx = internal.WithLogOverride(ctx, rc.logf) + ctx = internal.WithAppIDOverride(ctx, appID) + return ctx, nil +} + +type remoteContext struct { + client *http.Client + url string +} + +var logLevels = map[int64]string{ + 0: "DEBUG", + 1: "INFO", + 2: "WARNING", + 3: "ERROR", + 4: "CRITICAL", +} + +func (c *remoteContext) logf(level int64, format string, args ...interface{}) { + log.Printf(logLevels[level]+": "+format, args...) +} + +func (c *remoteContext) call(ctx context.Context, service, method string, in, out proto.Message) error { + req, err := proto.Marshal(in) + if err != nil { + return fmt.Errorf("error marshalling request: %v", err) + } + + remReq := &pb.Request{ + ServiceName: proto.String(service), + Method: proto.String(method), + Request: req, + // NOTE(djd): RequestId is unused in the server. + } + + req, err = proto.Marshal(remReq) + if err != nil { + return fmt.Errorf("proto.Marshal: %v", err) + } + + // TODO(djd): Respect ctx.Deadline()? + resp, err := c.client.Post(c.url, "application/octet-stream", bytes.NewReader(req)) + if err != nil { + return fmt.Errorf("error sending request: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body) + } + if err != nil { + return fmt.Errorf("failed reading response: %v", err) + } + remResp := &pb.Response{} + if err := proto.Unmarshal(body, remResp); err != nil { + return fmt.Errorf("error unmarshalling response: %v", err) + } + + if ae := remResp.GetApplicationError(); ae != nil { + return &internal.APIError{ + Code: ae.GetCode(), + Detail: ae.GetDetail(), + Service: service, + } + } + + if remResp.Response == nil { + return fmt.Errorf("unexpected response: %s", proto.MarshalTextString(remResp)) + } + + return proto.Unmarshal(remResp.Response, out) +} + +// This is a forgiving regexp designed to parse the app ID from YAML. +var appIDRE = regexp.MustCompile(`app_id["']?\s*:\s*['"]?([-a-z0-9.:~]+)`) + +func getAppID(client *http.Client, url string) (string, error) { + // Generate a pseudo-random token for handshaking. + token := strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Int()) + + resp, err := client.Get(fmt.Sprintf("%s?rtok=%s", url, token)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body) + } + if err != nil { + return "", fmt.Errorf("failed reading response: %v", err) + } + + // Check the token is present in response. + if !bytes.Contains(body, []byte(token)) { + return "", fmt.Errorf("token not found: want %q; body %q", token, body) + } + + match := appIDRE.FindSubmatch(body) + if match == nil { + return "", fmt.Errorf("app ID not found: body %q", body) + } + + return string(match[1]), nil +} + +type headerAddingRoundTripper struct { + Wrapped http.RoundTripper +} + +func (t *headerAddingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + r.Header.Set("X-Appcfg-Api-Version", "1") + return t.Wrapped.RoundTrip(r) +} diff --git a/vendor/google.golang.org/appengine/remote_api/client_test.go b/vendor/google.golang.org/appengine/remote_api/client_test.go new file mode 100644 index 0000000..2e892a0 --- /dev/null +++ b/vendor/google.golang.org/appengine/remote_api/client_test.go @@ -0,0 +1,24 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package remote_api + +import ( + "testing" +) + +func TestAppIDRE(t *testing.T) { + appID := "s~my-appid-539" + tests := []string{ + "{rtok: 8306111115908860449, app_id: s~my-appid-539}\n", + "{rtok: 8306111115908860449, app_id: 's~my-appid-539'}\n", + `{rtok: 8306111115908860449, app_id: "s~my-appid-539"}`, + `{rtok: 8306111115908860449, "app_id":"s~my-appid-539"}`, + } + for _, v := range tests { + if g := appIDRE.FindStringSubmatch(v); g == nil || g[1] != appID { + t.Errorf("appIDRE.FindStringSubmatch(%s) got %q, want %q", v, g, appID) + } + } +} diff --git a/vendor/google.golang.org/appengine/remote_api/remote_api.go b/vendor/google.golang.org/appengine/remote_api/remote_api.go new file mode 100644 index 0000000..68cd7d9 --- /dev/null +++ b/vendor/google.golang.org/appengine/remote_api/remote_api.go @@ -0,0 +1,152 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package remote_api implements the /_ah/remote_api endpoint. +This endpoint is used by offline tools such as the bulk loader. +*/ +package remote_api // import "google.golang.org/appengine/remote_api" + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/remote_api" + "google.golang.org/appengine/log" + "google.golang.org/appengine/user" +) + +func init() { + http.HandleFunc("/_ah/remote_api", handle) +} + +func handle(w http.ResponseWriter, req *http.Request) { + c := appengine.NewContext(req) + + u := user.Current(c) + if u == nil { + u, _ = user.CurrentOAuth(c, + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/appengine.apis", + ) + } + + if u == nil || !u.Admin { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(http.StatusUnauthorized) + io.WriteString(w, "You must be logged in as an administrator to access this.\n") + return + } + if req.Header.Get("X-Appcfg-Api-Version") == "" { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(http.StatusForbidden) + io.WriteString(w, "This request did not contain a necessary header.\n") + return + } + + if req.Method != "POST" { + // Response must be YAML. + rtok := req.FormValue("rtok") + if rtok == "" { + rtok = "0" + } + w.Header().Set("Content-Type", "text/yaml; charset=utf-8") + fmt.Fprintf(w, `{app_id: %q, rtok: %q}`, internal.FullyQualifiedAppID(c), rtok) + return + } + + defer req.Body.Close() + body, err := ioutil.ReadAll(req.Body) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + log.Errorf(c, "Failed reading body: %v", err) + return + } + remReq := &pb.Request{} + if err := proto.Unmarshal(body, remReq); err != nil { + w.WriteHeader(http.StatusBadRequest) + log.Errorf(c, "Bad body: %v", err) + return + } + + service, method := *remReq.ServiceName, *remReq.Method + if !requestSupported(service, method) { + w.WriteHeader(http.StatusBadRequest) + log.Errorf(c, "Unsupported RPC /%s.%s", service, method) + return + } + + rawReq := &rawMessage{remReq.Request} + rawRes := &rawMessage{} + err = internal.Call(c, service, method, rawReq, rawRes) + + remRes := &pb.Response{} + if err == nil { + remRes.Response = rawRes.buf + } else if ae, ok := err.(*internal.APIError); ok { + remRes.ApplicationError = &pb.ApplicationError{ + Code: &ae.Code, + Detail: &ae.Detail, + } + } else { + // This shouldn't normally happen. + log.Errorf(c, "appengine/remote_api: Unexpected error of type %T: %v", err, err) + remRes.ApplicationError = &pb.ApplicationError{ + Code: proto.Int32(0), + Detail: proto.String(err.Error()), + } + } + out, err := proto.Marshal(remRes) + if err != nil { + // This should not be possible. + w.WriteHeader(500) + log.Errorf(c, "proto.Marshal: %v", err) + return + } + + log.Infof(c, "Spooling %d bytes of response to /%s.%s", len(out), service, method) + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Length", strconv.Itoa(len(out))) + w.Write(out) +} + +// rawMessage is a protocol buffer type that is already serialised. +// This allows the remote_api code here to handle messages +// without having to know the real type. +type rawMessage struct { + buf []byte +} + +func (rm *rawMessage) Marshal() ([]byte, error) { + return rm.buf, nil +} + +func (rm *rawMessage) Unmarshal(buf []byte) error { + rm.buf = make([]byte, len(buf)) + copy(rm.buf, buf) + return nil +} + +func requestSupported(service, method string) bool { + // This list of supported services is taken from SERVICE_PB_MAP in remote_api_services.py + switch service { + case "app_identity_service", "blobstore", "capability_service", "channel", "datastore_v3", + "datastore_v4", "file", "images", "logservice", "mail", "matcher", "memcache", "remote_datastore", + "remote_socket", "search", "modules", "system", "taskqueue", "urlfetch", "user", "xmpp": + return true + } + return false +} + +// Methods to satisfy proto.Message. +func (rm *rawMessage) Reset() { rm.buf = nil } +func (rm *rawMessage) String() string { return strconv.Quote(string(rm.buf)) } +func (*rawMessage) ProtoMessage() {} diff --git a/vendor/google.golang.org/appengine/runtime/runtime.go b/vendor/google.golang.org/appengine/runtime/runtime.go new file mode 100644 index 0000000..fa6c12b --- /dev/null +++ b/vendor/google.golang.org/appengine/runtime/runtime.go @@ -0,0 +1,148 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package runtime exposes information about the resource usage of the application. +It also provides a way to run code in a new background context of a module. + +This package does not work on App Engine "flexible environment". +*/ +package runtime // import "google.golang.org/appengine/runtime" + +import ( + "net/http" + + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/system" +) + +// Statistics represents the system's statistics. +type Statistics struct { + // CPU records the CPU consumed by this instance, in megacycles. + CPU struct { + Total float64 + Rate1M float64 // consumption rate over one minute + Rate10M float64 // consumption rate over ten minutes + } + // RAM records the memory used by the instance, in megabytes. + RAM struct { + Current float64 + Average1M float64 // average usage over one minute + Average10M float64 // average usage over ten minutes + } +} + +func Stats(c context.Context) (*Statistics, error) { + req := &pb.GetSystemStatsRequest{} + res := &pb.GetSystemStatsResponse{} + if err := internal.Call(c, "system", "GetSystemStats", req, res); err != nil { + return nil, err + } + s := &Statistics{} + if res.Cpu != nil { + s.CPU.Total = res.Cpu.GetTotal() + s.CPU.Rate1M = res.Cpu.GetRate1M() + s.CPU.Rate10M = res.Cpu.GetRate10M() + } + if res.Memory != nil { + s.RAM.Current = res.Memory.GetCurrent() + s.RAM.Average1M = res.Memory.GetAverage1M() + s.RAM.Average10M = res.Memory.GetAverage10M() + } + return s, nil +} + +/* +RunInBackground makes an API call that triggers an /_ah/background request. + +There are two independent code paths that need to make contact: +the RunInBackground code, and the /_ah/background handler. The matchmaker +loop arranges for the two paths to meet. The RunInBackground code passes +a send to the matchmaker, the /_ah/background passes a recv to the matchmaker, +and the matchmaker hooks them up. +*/ + +func init() { + http.HandleFunc("/_ah/background", handleBackground) + + sc := make(chan send) + rc := make(chan recv) + sendc, recvc = sc, rc + go matchmaker(sc, rc) +} + +var ( + sendc chan<- send // RunInBackground sends to this + recvc chan<- recv // handleBackground sends to this +) + +type send struct { + id string + f func(context.Context) +} + +type recv struct { + id string + ch chan<- func(context.Context) +} + +func matchmaker(sendc <-chan send, recvc <-chan recv) { + // When one side of the match arrives before the other + // it is inserted in the corresponding map. + waitSend := make(map[string]send) + waitRecv := make(map[string]recv) + + for { + select { + case s := <-sendc: + if r, ok := waitRecv[s.id]; ok { + // meet! + delete(waitRecv, s.id) + r.ch <- s.f + } else { + // waiting for r + waitSend[s.id] = s + } + case r := <-recvc: + if s, ok := waitSend[r.id]; ok { + // meet! + delete(waitSend, r.id) + r.ch <- s.f + } else { + // waiting for s + waitRecv[r.id] = r + } + } + } +} + +var newContext = appengine.NewContext // for testing + +func handleBackground(w http.ResponseWriter, req *http.Request) { + id := req.Header.Get("X-AppEngine-BackgroundRequest") + + ch := make(chan func(context.Context)) + recvc <- recv{id, ch} + (<-ch)(newContext(req)) +} + +// RunInBackground runs f in a background goroutine in this process. +// f is provided a context that may outlast the context provided to RunInBackground. +// This is only valid to invoke from a service set to basic or manual scaling. +func RunInBackground(c context.Context, f func(c context.Context)) error { + req := &pb.StartBackgroundRequestRequest{} + res := &pb.StartBackgroundRequestResponse{} + if err := internal.Call(c, "system", "StartBackgroundRequest", req, res); err != nil { + return err + } + sendc <- send{res.GetRequestId(), f} + return nil +} + +func init() { + internal.RegisterErrorCodeMap("system", pb.SystemServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/runtime/runtime_test.go b/vendor/google.golang.org/appengine/runtime/runtime_test.go new file mode 100644 index 0000000..8f3a124 --- /dev/null +++ b/vendor/google.golang.org/appengine/runtime/runtime_test.go @@ -0,0 +1,101 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package runtime + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/system" +) + +func TestRunInBackgroundSendFirst(t *testing.T) { testRunInBackground(t, true) } +func TestRunInBackgroundRecvFirst(t *testing.T) { testRunInBackground(t, false) } + +func testRunInBackground(t *testing.T, sendFirst bool) { + srv := httptest.NewServer(nil) + defer srv.Close() + + const id = "f00bar" + sendWait, recvWait := make(chan bool), make(chan bool) + sbr := make(chan bool) // strobed when system.StartBackgroundRequest has started + + calls := 0 + c := aetesting.FakeSingleContext(t, "system", "StartBackgroundRequest", func(req *pb.StartBackgroundRequestRequest, res *pb.StartBackgroundRequestResponse) error { + calls++ + if calls > 1 { + t.Errorf("Too many calls to system.StartBackgroundRequest") + } + sbr <- true + res.RequestId = proto.String(id) + <-sendWait + return nil + }) + + var c2 context.Context // a fake + newContext = func(*http.Request) context.Context { + return c2 + } + + var fRun int + f := func(c3 context.Context) { + fRun++ + if c3 != c2 { + t.Errorf("f got a different context than expected") + } + } + + ribErrc := make(chan error) + go func() { + ribErrc <- RunInBackground(c, f) + }() + + brErrc := make(chan error) + go func() { + <-sbr + req, err := http.NewRequest("GET", srv.URL+"/_ah/background", nil) + if err != nil { + brErrc <- fmt.Errorf("http.NewRequest: %v", err) + return + } + req.Header.Set("X-AppEngine-BackgroundRequest", id) + client := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + } + + <-recvWait + _, err = client.Do(req) + brErrc <- err + }() + + // Send and receive are both waiting at this point. + waits := [2]chan bool{sendWait, recvWait} + if !sendFirst { + waits[0], waits[1] = waits[1], waits[0] + } + waits[0] <- true + time.Sleep(100 * time.Millisecond) + waits[1] <- true + + if err := <-ribErrc; err != nil { + t.Fatalf("RunInBackground: %v", err) + } + if err := <-brErrc; err != nil { + t.Fatalf("background request: %v", err) + } + + if fRun != 1 { + t.Errorf("Got %d runs of f, want 1", fRun) + } +} diff --git a/vendor/google.golang.org/appengine/search/doc.go b/vendor/google.golang.org/appengine/search/doc.go new file mode 100644 index 0000000..da331ce --- /dev/null +++ b/vendor/google.golang.org/appengine/search/doc.go @@ -0,0 +1,209 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package search provides a client for App Engine's search service. + + +Basic Operations + +Indexes contain documents. Each index is identified by its name: a +human-readable ASCII string. + +Within an index, documents are associated with an ID, which is also +a human-readable ASCII string. A document's contents are a mapping from +case-sensitive field names to values. Valid types for field values are: + - string, + - search.Atom, + - search.HTML, + - time.Time (stored with millisecond precision), + - float64 (value between -2,147,483,647 and 2,147,483,647 inclusive), + - appengine.GeoPoint. + +The Get and Put methods on an Index load and save a document. +A document's contents are typically represented by a struct pointer. + +Example code: + + type Doc struct { + Author string + Comment string + Creation time.Time + } + + index, err := search.Open("comments") + if err != nil { + return err + } + newID, err := index.Put(ctx, "", &Doc{ + Author: "gopher", + Comment: "the truth of the matter", + Creation: time.Now(), + }) + if err != nil { + return err + } + +A single document can be retrieved by its ID. Pass a destination struct +to Get to hold the resulting document. + + var doc Doc + err := index.Get(ctx, id, &doc) + if err != nil { + return err + } + + +Search and Listing Documents + +Indexes have two methods for retrieving multiple documents at once: Search and +List. + +Searching an index for a query will result in an iterator. As with an iterator +from package datastore, pass a destination struct to Next to decode the next +result. Next will return Done when the iterator is exhausted. + + for t := index.Search(ctx, "Comment:truth", nil); ; { + var doc Doc + id, err := t.Next(&doc) + if err == search.Done { + break + } + if err != nil { + return err + } + fmt.Fprintf(w, "%s -> %#v\n", id, doc) + } + +Search takes a string query to determine which documents to return. The query +can be simple, such as a single word to match, or complex. The query +language is described at +https://cloud.google.com/appengine/docs/go/search/query_strings + +Search also takes an optional SearchOptions struct which gives much more +control over how results are calculated and returned. + +Call List to iterate over all documents in an index. + + for t := index.List(ctx, nil); ; { + var doc Doc + id, err := t.Next(&doc) + if err == search.Done { + break + } + if err != nil { + return err + } + fmt.Fprintf(w, "%s -> %#v\n", id, doc) + } + + +Fields and Facets + +A document's contents can be represented by a variety of types. These are +typically struct pointers, but they can also be represented by any type +implementing the FieldLoadSaver interface. The FieldLoadSaver allows metadata +to be set for the document with the DocumentMetadata type. Struct pointers are +more strongly typed and are easier to use; FieldLoadSavers are more flexible. + +A document's contents can be expressed in two ways: fields and facets. + +Fields are the most common way of providing content for documents. Fields can +store data in multiple types and can be matched in searches using query +strings. + +Facets provide a way to attach categorical information to a document. The only +valid types for facets are search.Atom and float64. Facets allow search +results to contain summaries of the categories matched in a search, and to +restrict searches to only match against specific categories. + +By default, for struct pointers, all of the struct fields are used as document +fields, and the field name used is the same as on the struct (and hence must +start with an upper case letter). Struct fields may have a +`search:"name,options"` tag. The name must start with a letter and be +composed only of word characters. A "-" tag name means that the field will be +ignored. If options is "facet" then the struct field will be used as a +document facet. If options is "" then the comma may be omitted. There are no +other recognized options. + +Example code: + + // A and B are renamed to a and b. + // A, C and I are facets. + // D's tag is equivalent to having no tag at all (E). + // F and G are ignored entirely by the search package. + // I has tag information for both the search and json packages. + type TaggedStruct struct { + A float64 `search:"a,facet"` + B float64 `search:"b"` + C float64 `search:",facet"` + D float64 `search:""` + E float64 + F float64 `search:"-"` + G float64 `search:"-,facet"` + I float64 `search:",facet" json:"i"` + } + + +The FieldLoadSaver Interface + +A document's contents can also be represented by any type that implements the +FieldLoadSaver interface. This type may be a struct pointer, but it +does not have to be. The search package will call Load when loading the +document's contents, and Save when saving them. In addition to a slice of +Fields, the Load and Save methods also use the DocumentMetadata type to +provide additional information about a document (such as its Rank, or set of +Facets). Possible uses for this interface include deriving non-stored fields, +verifying fields or setting specific languages for string and HTML fields. + +Example code: + + type CustomFieldsExample struct { + // Item's title and which language it is in. + Title string + Lang string + // Mass, in grams. + Mass int + } + + func (x *CustomFieldsExample) Load(fields []search.Field, meta *search.DocumentMetadata) error { + // Load the title field, failing if any other field is found. + for _, f := range fields { + if f.Name != "title" { + return fmt.Errorf("unknown field %q", f.Name) + } + s, ok := f.Value.(string) + if !ok { + return fmt.Errorf("unsupported type %T for field %q", f.Value, f.Name) + } + x.Title = s + x.Lang = f.Language + } + // Load the mass facet, failing if any other facet is found. + for _, f := range meta.Facets { + if f.Name != "mass" { + return fmt.Errorf("unknown facet %q", f.Name) + } + m, ok := f.Value.(float64) + if !ok { + return fmt.Errorf("unsupported type %T for facet %q", f.Value, f.Name) + } + x.Mass = int(m) + } + return nil + } + + func (x *CustomFieldsExample) Save() ([]search.Field, *search.DocumentMetadata, error) { + fields := []search.Field{ + {Name: "title", Value: x.Title, Language: x.Lang}, + } + meta := &search.DocumentMetadata{ + Facets: { + {Name: "mass", Value: float64(x.Mass)}, + }, + } + return fields, meta, nil + } +*/ +package search diff --git a/vendor/google.golang.org/appengine/search/field.go b/vendor/google.golang.org/appengine/search/field.go new file mode 100644 index 0000000..707c2d8 --- /dev/null +++ b/vendor/google.golang.org/appengine/search/field.go @@ -0,0 +1,82 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package search + +// Field is a name/value pair. A search index's document can be loaded and +// saved as a sequence of Fields. +type Field struct { + // Name is the field name. A valid field name matches /[A-Za-z][A-Za-z0-9_]*/. + Name string + // Value is the field value. The valid types are: + // - string, + // - search.Atom, + // - search.HTML, + // - time.Time (stored with millisecond precision), + // - float64, + // - GeoPoint. + Value interface{} + // Language is a two-letter ISO 639-1 code for the field's language, + // defaulting to "en" if nothing is specified. It may only be specified for + // fields of type string and search.HTML. + Language string + // Derived marks fields that were calculated as a result of a + // FieldExpression provided to Search. This field is ignored when saving a + // document. + Derived bool +} + +// Facet is a name/value pair which is used to add categorical information to a +// document. +type Facet struct { + // Name is the facet name. A valid facet name matches /[A-Za-z][A-Za-z0-9_]*/. + // A facet name cannot be longer than 500 characters. + Name string + // Value is the facet value. + // + // When being used in documents (for example, in + // DocumentMetadata.Facets), the valid types are: + // - search.Atom, + // - float64. + // + // When being used in SearchOptions.Refinements or being returned + // in FacetResult, the valid types are: + // - search.Atom, + // - search.Range. + Value interface{} +} + +// DocumentMetadata is a struct containing information describing a given document. +type DocumentMetadata struct { + // Rank is an integer specifying the order the document will be returned in + // search results. If zero, the rank will be set to the number of seconds since + // 2011-01-01 00:00:00 UTC when being Put into an index. + Rank int + // Facets is the set of facets for this document. + Facets []Facet +} + +// FieldLoadSaver can be converted from and to a slice of Fields +// with additional document metadata. +type FieldLoadSaver interface { + Load([]Field, *DocumentMetadata) error + Save() ([]Field, *DocumentMetadata, error) +} + +// FieldList converts a []Field to implement FieldLoadSaver. +type FieldList []Field + +// Load loads all of the provided fields into l. +// It does not first reset *l to an empty slice. +func (l *FieldList) Load(f []Field, _ *DocumentMetadata) error { + *l = append(*l, f...) + return nil +} + +// Save returns all of l's fields as a slice of Fields. +func (l *FieldList) Save() ([]Field, *DocumentMetadata, error) { + return *l, nil, nil +} + +var _ FieldLoadSaver = (*FieldList)(nil) diff --git a/vendor/google.golang.org/appengine/search/search.go b/vendor/google.golang.org/appengine/search/search.go new file mode 100644 index 0000000..774b051 --- /dev/null +++ b/vendor/google.golang.org/appengine/search/search.go @@ -0,0 +1,1121 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package search // import "google.golang.org/appengine/search" + +// TODO: let Put specify the document language: "en", "fr", etc. Also: order_id?? storage?? +// TODO: Index.GetAll (or Iterator.GetAll)? +// TODO: struct <-> protobuf tests. +// TODO: enforce Python's MIN_NUMBER_VALUE and MIN_DATE (which would disallow a zero +// time.Time)? _MAXIMUM_STRING_LENGTH? + +import ( + "errors" + "fmt" + "math" + "reflect" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/search" +) + +var ( + // ErrInvalidDocumentType is returned when methods like Put, Get or Next + // are passed a dst or src argument of invalid type. + ErrInvalidDocumentType = errors.New("search: invalid document type") + + // ErrNoSuchDocument is returned when no document was found for a given ID. + ErrNoSuchDocument = errors.New("search: no such document") +) + +// Atom is a document field whose contents are indexed as a single indivisible +// string. +type Atom string + +// HTML is a document field whose contents are indexed as HTML. Only text nodes +// are indexed: "foobar" will be treated as "foobar". +type HTML string + +// validIndexNameOrDocID is the Go equivalent of Python's +// _ValidateVisiblePrintableAsciiNotReserved. +func validIndexNameOrDocID(s string) bool { + if strings.HasPrefix(s, "!") { + return false + } + for _, c := range s { + if c < 0x21 || 0x7f <= c { + return false + } + } + return true +} + +var ( + fieldNameRE = regexp.MustCompile(`^[A-Za-z][A-Za-z0-9_]*$`) + languageRE = regexp.MustCompile(`^[a-z]{2}$`) +) + +// validFieldName is the Go equivalent of Python's _CheckFieldName. It checks +// the validity of both field and facet names. +func validFieldName(s string) bool { + return len(s) <= 500 && fieldNameRE.MatchString(s) +} + +// validDocRank checks that the ranks is in the range [0, 2^31). +func validDocRank(r int) bool { + return 0 <= r && r <= (1<<31-1) +} + +// validLanguage checks that a language looks like ISO 639-1. +func validLanguage(s string) bool { + return languageRE.MatchString(s) +} + +// validFloat checks that f is in the range [-2147483647, 2147483647]. +func validFloat(f float64) bool { + return -(1<<31-1) <= f && f <= (1<<31-1) +} + +// Index is an index of documents. +type Index struct { + spec pb.IndexSpec +} + +// orderIDEpoch forms the basis for populating OrderId on documents. +var orderIDEpoch = time.Date(2011, 1, 1, 0, 0, 0, 0, time.UTC) + +// Open opens the index with the given name. The index is created if it does +// not already exist. +// +// The name is a human-readable ASCII string. It must contain no whitespace +// characters and not start with "!". +func Open(name string) (*Index, error) { + if !validIndexNameOrDocID(name) { + return nil, fmt.Errorf("search: invalid index name %q", name) + } + return &Index{ + spec: pb.IndexSpec{ + Name: &name, + }, + }, nil +} + +// Put saves src to the index. If id is empty, a new ID is allocated by the +// service and returned. If id is not empty, any existing index entry for that +// ID is replaced. +// +// The ID is a human-readable ASCII string. It must contain no whitespace +// characters and not start with "!". +// +// src must be a non-nil struct pointer or implement the FieldLoadSaver +// interface. +func (x *Index) Put(c context.Context, id string, src interface{}) (string, error) { + d, err := saveDoc(src) + if err != nil { + return "", err + } + if id != "" { + if !validIndexNameOrDocID(id) { + return "", fmt.Errorf("search: invalid ID %q", id) + } + d.Id = proto.String(id) + } + // spec is modified by Call when applying the current Namespace, so copy it to + // avoid retaining the namespace beyond the scope of the Call. + spec := x.spec + req := &pb.IndexDocumentRequest{ + Params: &pb.IndexDocumentParams{ + Document: []*pb.Document{d}, + IndexSpec: &spec, + }, + } + res := &pb.IndexDocumentResponse{} + if err := internal.Call(c, "search", "IndexDocument", req, res); err != nil { + return "", err + } + if len(res.Status) > 0 { + if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK { + return "", fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail()) + } + } + if len(res.Status) != 1 || len(res.DocId) != 1 { + return "", fmt.Errorf("search: internal error: wrong number of results (%d Statuses, %d DocIDs)", + len(res.Status), len(res.DocId)) + } + return res.DocId[0], nil +} + +// Get loads the document with the given ID into dst. +// +// The ID is a human-readable ASCII string. It must be non-empty, contain no +// whitespace characters and not start with "!". +// +// dst must be a non-nil struct pointer or implement the FieldLoadSaver +// interface. +// +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. ErrFieldMismatch is only returned if +// dst is a struct pointer. It is up to the callee to decide whether this error +// is fatal, recoverable, or ignorable. +func (x *Index) Get(c context.Context, id string, dst interface{}) error { + if id == "" || !validIndexNameOrDocID(id) { + return fmt.Errorf("search: invalid ID %q", id) + } + req := &pb.ListDocumentsRequest{ + Params: &pb.ListDocumentsParams{ + IndexSpec: &x.spec, + StartDocId: proto.String(id), + Limit: proto.Int32(1), + }, + } + res := &pb.ListDocumentsResponse{} + if err := internal.Call(c, "search", "ListDocuments", req, res); err != nil { + return err + } + if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK { + return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail()) + } + if len(res.Document) != 1 || res.Document[0].GetId() != id { + return ErrNoSuchDocument + } + return loadDoc(dst, res.Document[0], nil) +} + +// Delete deletes a document from the index. +func (x *Index) Delete(c context.Context, id string) error { + req := &pb.DeleteDocumentRequest{ + Params: &pb.DeleteDocumentParams{ + DocId: []string{id}, + IndexSpec: &x.spec, + }, + } + res := &pb.DeleteDocumentResponse{} + if err := internal.Call(c, "search", "DeleteDocument", req, res); err != nil { + return err + } + if len(res.Status) != 1 { + return fmt.Errorf("search: internal error: wrong number of results (%d)", len(res.Status)) + } + if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK { + return fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail()) + } + return nil +} + +// List lists all of the documents in an index. The documents are returned in +// increasing ID order. +func (x *Index) List(c context.Context, opts *ListOptions) *Iterator { + t := &Iterator{ + c: c, + index: x, + count: -1, + listInclusive: true, + more: moreList, + } + if opts != nil { + t.listStartID = opts.StartID + t.limit = opts.Limit + t.idsOnly = opts.IDsOnly + } + return t +} + +func moreList(t *Iterator) error { + req := &pb.ListDocumentsRequest{ + Params: &pb.ListDocumentsParams{ + IndexSpec: &t.index.spec, + }, + } + if t.listStartID != "" { + req.Params.StartDocId = &t.listStartID + req.Params.IncludeStartDoc = &t.listInclusive + } + if t.limit > 0 { + req.Params.Limit = proto.Int32(int32(t.limit)) + } + if t.idsOnly { + req.Params.KeysOnly = &t.idsOnly + } + + res := &pb.ListDocumentsResponse{} + if err := internal.Call(t.c, "search", "ListDocuments", req, res); err != nil { + return err + } + if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK { + return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail()) + } + t.listRes = res.Document + t.listStartID, t.listInclusive, t.more = "", false, nil + if len(res.Document) != 0 && t.limit <= 0 { + if id := res.Document[len(res.Document)-1].GetId(); id != "" { + t.listStartID, t.more = id, moreList + } + } + return nil +} + +// ListOptions are the options for listing documents in an index. Passing a nil +// *ListOptions is equivalent to using the default values. +type ListOptions struct { + // StartID is the inclusive lower bound for the ID of the returned + // documents. The zero value means all documents will be returned. + StartID string + + // Limit is the maximum number of documents to return. The zero value + // indicates no limit. + Limit int + + // IDsOnly indicates that only document IDs should be returned for the list + // operation; no document fields are populated. + IDsOnly bool +} + +// Search searches the index for the given query. +func (x *Index) Search(c context.Context, query string, opts *SearchOptions) *Iterator { + t := &Iterator{ + c: c, + index: x, + searchQuery: query, + more: moreSearch, + } + if opts != nil { + if opts.Cursor != "" { + if opts.Offset != 0 { + return errIter("at most one of Cursor and Offset may be specified") + } + t.searchCursor = proto.String(string(opts.Cursor)) + } + t.limit = opts.Limit + t.fields = opts.Fields + t.idsOnly = opts.IDsOnly + t.sort = opts.Sort + t.exprs = opts.Expressions + t.refinements = opts.Refinements + t.facetOpts = opts.Facets + t.searchOffset = opts.Offset + t.countAccuracy = opts.CountAccuracy + } + return t +} + +func moreSearch(t *Iterator) error { + // We use per-result (rather than single/per-page) cursors since this + // lets us return a Cursor for every iterator document. The two cursor + // types are largely interchangeable: a page cursor is the same as the + // last per-result cursor in a given search response. + req := &pb.SearchRequest{ + Params: &pb.SearchParams{ + IndexSpec: &t.index.spec, + Query: &t.searchQuery, + Cursor: t.searchCursor, + CursorType: pb.SearchParams_PER_RESULT.Enum(), + FieldSpec: &pb.FieldSpec{ + Name: t.fields, + }, + }, + } + if t.limit > 0 { + req.Params.Limit = proto.Int32(int32(t.limit)) + } + if t.searchOffset > 0 { + req.Params.Offset = proto.Int32(int32(t.searchOffset)) + t.searchOffset = 0 + } + if t.countAccuracy > 0 { + req.Params.MatchedCountAccuracy = proto.Int32(int32(t.countAccuracy)) + } + if t.idsOnly { + req.Params.KeysOnly = &t.idsOnly + } + if t.sort != nil { + if err := sortToProto(t.sort, req.Params); err != nil { + return err + } + } + if t.refinements != nil { + if err := refinementsToProto(t.refinements, req.Params); err != nil { + return err + } + } + for _, e := range t.exprs { + req.Params.FieldSpec.Expression = append(req.Params.FieldSpec.Expression, &pb.FieldSpec_Expression{ + Name: proto.String(e.Name), + Expression: proto.String(e.Expr), + }) + } + for _, f := range t.facetOpts { + if err := f.setParams(req.Params); err != nil { + return fmt.Errorf("bad FacetSearchOption: %v", err) + } + } + // Don't repeat facet search. + t.facetOpts = nil + + res := &pb.SearchResponse{} + if err := internal.Call(t.c, "search", "Search", req, res); err != nil { + return err + } + if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK { + return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail()) + } + t.searchRes = res.Result + if len(res.FacetResult) > 0 { + t.facetRes = res.FacetResult + } + t.count = int(*res.MatchedCount) + if t.limit > 0 { + t.more = nil + } else { + t.more = moreSearch + } + return nil +} + +// SearchOptions are the options for searching an index. Passing a nil +// *SearchOptions is equivalent to using the default values. +type SearchOptions struct { + // Limit is the maximum number of documents to return. The zero value + // indicates no limit. + Limit int + + // IDsOnly indicates that only document IDs should be returned for the search + // operation; no document fields are populated. + IDsOnly bool + + // Sort controls the ordering of search results. + Sort *SortOptions + + // Fields specifies which document fields to include in the results. If omitted, + // all document fields are returned. No more than 100 fields may be specified. + Fields []string + + // Expressions specifies additional computed fields to add to each returned + // document. + Expressions []FieldExpression + + // Facets controls what facet information is returned for these search results. + // If no options are specified, no facet results will be returned. + Facets []FacetSearchOption + + // Refinements filters the returned documents by requiring them to contain facets + // with specific values. Refinements are applied in conjunction for facets with + // different names, and in disjunction otherwise. + Refinements []Facet + + // Cursor causes the results to commence with the first document after + // the document associated with the cursor. + Cursor Cursor + + // Offset specifies the number of documents to skip over before returning results. + // When specified, Cursor must be nil. + Offset int + + // CountAccuracy specifies the maximum result count that can be expected to + // be accurate. If zero, the count accuracy defaults to 20. + CountAccuracy int +} + +// Cursor represents an iterator's position. +// +// The string value of a cursor is web-safe. It can be saved and restored +// for later use. +type Cursor string + +// FieldExpression defines a custom expression to evaluate for each result. +type FieldExpression struct { + // Name is the name to use for the computed field. + Name string + + // Expr is evaluated to provide a custom content snippet for each document. + // See https://cloud.google.com/appengine/docs/go/search/options for + // the supported expression syntax. + Expr string +} + +// FacetSearchOption controls what facet information is returned in search results. +type FacetSearchOption interface { + setParams(*pb.SearchParams) error +} + +// AutoFacetDiscovery returns a FacetSearchOption which enables automatic facet +// discovery for the search. Automatic facet discovery looks for the facets +// which appear the most often in the aggregate in the matched documents. +// +// The maximum number of facets returned is controlled by facetLimit, and the +// maximum number of values per facet by facetLimit. A limit of zero indicates +// a default limit should be used. +func AutoFacetDiscovery(facetLimit, valueLimit int) FacetSearchOption { + return &autoFacetOpt{facetLimit, valueLimit} +} + +type autoFacetOpt struct { + facetLimit, valueLimit int +} + +const defaultAutoFacetLimit = 10 // As per python runtime search.py. + +func (o *autoFacetOpt) setParams(params *pb.SearchParams) error { + lim := int32(o.facetLimit) + if lim == 0 { + lim = defaultAutoFacetLimit + } + params.AutoDiscoverFacetCount = &lim + if o.valueLimit > 0 { + params.FacetAutoDetectParam = &pb.FacetAutoDetectParam{ + ValueLimit: proto.Int32(int32(o.valueLimit)), + } + } + return nil +} + +// FacetDiscovery returns a FacetSearchOption which selects a facet to be +// returned with the search results. By default, the most frequently +// occurring values for that facet will be returned. However, you can also +// specify a list of particular Atoms or specific Ranges to return. +func FacetDiscovery(name string, value ...interface{}) FacetSearchOption { + return &facetOpt{name, value} +} + +type facetOpt struct { + name string + values []interface{} +} + +func (o *facetOpt) setParams(params *pb.SearchParams) error { + req := &pb.FacetRequest{Name: &o.name} + params.IncludeFacet = append(params.IncludeFacet, req) + if len(o.values) == 0 { + return nil + } + vtype := reflect.TypeOf(o.values[0]) + reqParam := &pb.FacetRequestParam{} + for _, v := range o.values { + if reflect.TypeOf(v) != vtype { + return errors.New("values must all be Atom, or must all be Range") + } + switch v := v.(type) { + case Atom: + reqParam.ValueConstraint = append(reqParam.ValueConstraint, string(v)) + case Range: + rng, err := rangeToProto(v) + if err != nil { + return fmt.Errorf("invalid range: %v", err) + } + reqParam.Range = append(reqParam.Range, rng) + default: + return fmt.Errorf("unsupported value type %T", v) + } + } + req.Params = reqParam + return nil +} + +// FacetDocumentDepth returns a FacetSearchOption which controls the number of +// documents to be evaluated with preparing facet results. +func FacetDocumentDepth(depth int) FacetSearchOption { + return facetDepthOpt(depth) +} + +type facetDepthOpt int + +func (o facetDepthOpt) setParams(params *pb.SearchParams) error { + params.FacetDepth = proto.Int32(int32(o)) + return nil +} + +// FacetResult represents the number of times a particular facet and value +// appeared in the documents matching a search request. +type FacetResult struct { + Facet + + // Count is the number of times this specific facet and value appeared in the + // matching documents. + Count int +} + +// Range represents a numeric range with inclusive start and exclusive end. +// Start may be specified as math.Inf(-1) to indicate there is no minimum +// value, and End may similarly be specified as math.Inf(1); at least one of +// Start or End must be a finite number. +type Range struct { + Start, End float64 +} + +var ( + negInf = math.Inf(-1) + posInf = math.Inf(1) +) + +// AtLeast returns a Range matching any value greater than, or equal to, min. +func AtLeast(min float64) Range { + return Range{Start: min, End: posInf} +} + +// LessThan returns a Range matching any value less than max. +func LessThan(max float64) Range { + return Range{Start: negInf, End: max} +} + +// SortOptions control the ordering and scoring of search results. +type SortOptions struct { + // Expressions is a slice of expressions representing a multi-dimensional + // sort. + Expressions []SortExpression + + // Scorer, when specified, will cause the documents to be scored according to + // search term frequency. + Scorer Scorer + + // Limit is the maximum number of objects to score and/or sort. Limit cannot + // be more than 10,000. The zero value indicates a default limit. + Limit int +} + +// SortExpression defines a single dimension for sorting a document. +type SortExpression struct { + // Expr is evaluated to provide a sorting value for each document. + // See https://cloud.google.com/appengine/docs/go/search/options for + // the supported expression syntax. + Expr string + + // Reverse causes the documents to be sorted in ascending order. + Reverse bool + + // The default value to use when no field is present or the expresion + // cannot be calculated for a document. For text sorts, Default must + // be of type string; for numeric sorts, float64. + Default interface{} +} + +// A Scorer defines how a document is scored. +type Scorer interface { + toProto(*pb.ScorerSpec) +} + +type enumScorer struct { + enum pb.ScorerSpec_Scorer +} + +func (e enumScorer) toProto(spec *pb.ScorerSpec) { + spec.Scorer = e.enum.Enum() +} + +var ( + // MatchScorer assigns a score based on term frequency in a document. + MatchScorer Scorer = enumScorer{pb.ScorerSpec_MATCH_SCORER} + + // RescoringMatchScorer assigns a score based on the quality of the query + // match. It is similar to a MatchScorer but uses a more complex scoring + // algorithm based on match term frequency and other factors like field type. + // Please be aware that this algorithm is continually refined and can change + // over time without notice. This means that the ordering of search results + // that use this scorer can also change without notice. + RescoringMatchScorer Scorer = enumScorer{pb.ScorerSpec_RESCORING_MATCH_SCORER} +) + +func sortToProto(sort *SortOptions, params *pb.SearchParams) error { + for _, e := range sort.Expressions { + spec := &pb.SortSpec{ + SortExpression: proto.String(e.Expr), + } + if e.Reverse { + spec.SortDescending = proto.Bool(false) + } + if e.Default != nil { + switch d := e.Default.(type) { + case float64: + spec.DefaultValueNumeric = &d + case string: + spec.DefaultValueText = &d + default: + return fmt.Errorf("search: invalid Default type %T for expression %q", d, e.Expr) + } + } + params.SortSpec = append(params.SortSpec, spec) + } + + spec := &pb.ScorerSpec{} + if sort.Limit > 0 { + spec.Limit = proto.Int32(int32(sort.Limit)) + params.ScorerSpec = spec + } + if sort.Scorer != nil { + sort.Scorer.toProto(spec) + params.ScorerSpec = spec + } + + return nil +} + +func refinementsToProto(refinements []Facet, params *pb.SearchParams) error { + for _, r := range refinements { + ref := &pb.FacetRefinement{ + Name: proto.String(r.Name), + } + switch v := r.Value.(type) { + case Atom: + ref.Value = proto.String(string(v)) + case Range: + rng, err := rangeToProto(v) + if err != nil { + return fmt.Errorf("search: refinement for facet %q: %v", r.Name, err) + } + // Unfortunately there are two identical messages for identify Facet ranges. + ref.Range = &pb.FacetRefinement_Range{Start: rng.Start, End: rng.End} + default: + return fmt.Errorf("search: unsupported refinement for facet %q of type %T", r.Name, v) + } + params.FacetRefinement = append(params.FacetRefinement, ref) + } + return nil +} + +func rangeToProto(r Range) (*pb.FacetRange, error) { + rng := &pb.FacetRange{} + if r.Start != negInf { + if !validFloat(r.Start) { + return nil, errors.New("invalid value for Start") + } + rng.Start = proto.String(strconv.FormatFloat(r.Start, 'e', -1, 64)) + } else if r.End == posInf { + return nil, errors.New("either Start or End must be finite") + } + if r.End != posInf { + if !validFloat(r.End) { + return nil, errors.New("invalid value for End") + } + rng.End = proto.String(strconv.FormatFloat(r.End, 'e', -1, 64)) + } + return rng, nil +} + +func protoToRange(rng *pb.FacetRefinement_Range) Range { + r := Range{Start: negInf, End: posInf} + if x, err := strconv.ParseFloat(rng.GetStart(), 64); err != nil { + r.Start = x + } + if x, err := strconv.ParseFloat(rng.GetEnd(), 64); err != nil { + r.End = x + } + return r +} + +// Iterator is the result of searching an index for a query or listing an +// index. +type Iterator struct { + c context.Context + index *Index + err error + + listRes []*pb.Document + listStartID string + listInclusive bool + + searchRes []*pb.SearchResult + facetRes []*pb.FacetResult + searchQuery string + searchCursor *string + searchOffset int + sort *SortOptions + + fields []string + exprs []FieldExpression + refinements []Facet + facetOpts []FacetSearchOption + + more func(*Iterator) error + + count int + countAccuracy int + limit int // items left to return; 0 for unlimited. + idsOnly bool +} + +// errIter returns an iterator that only returns the given error. +func errIter(err string) *Iterator { + return &Iterator{ + err: errors.New(err), + } +} + +// Done is returned when a query iteration has completed. +var Done = errors.New("search: query has no more results") + +// Count returns an approximation of the number of documents matched by the +// query. It is only valid to call for iterators returned by Search. +func (t *Iterator) Count() int { return t.count } + +// fetchMore retrieves more results, if there are no errors or pending results. +func (t *Iterator) fetchMore() { + if t.err == nil && len(t.listRes)+len(t.searchRes) == 0 && t.more != nil { + t.err = t.more(t) + } +} + +// Next returns the ID of the next result. When there are no more results, +// Done is returned as the error. +// +// dst must be a non-nil struct pointer, implement the FieldLoadSaver +// interface, or be a nil interface value. If a non-nil dst is provided, it +// will be filled with the indexed fields. dst is ignored if this iterator was +// created with an IDsOnly option. +func (t *Iterator) Next(dst interface{}) (string, error) { + t.fetchMore() + if t.err != nil { + return "", t.err + } + + var doc *pb.Document + var exprs []*pb.Field + switch { + case len(t.listRes) != 0: + doc = t.listRes[0] + t.listRes = t.listRes[1:] + case len(t.searchRes) != 0: + doc = t.searchRes[0].Document + exprs = t.searchRes[0].Expression + t.searchCursor = t.searchRes[0].Cursor + t.searchRes = t.searchRes[1:] + default: + return "", Done + } + if doc == nil { + return "", errors.New("search: internal error: no document returned") + } + if !t.idsOnly && dst != nil { + if err := loadDoc(dst, doc, exprs); err != nil { + return "", err + } + } + return doc.GetId(), nil +} + +// Cursor returns the cursor associated with the current document (that is, +// the document most recently returned by a call to Next). +// +// Passing this cursor in a future call to Search will cause those results +// to commence with the first document after the current document. +func (t *Iterator) Cursor() Cursor { + if t.searchCursor == nil { + return "" + } + return Cursor(*t.searchCursor) +} + +// Facets returns the facets found within the search results, if any facets +// were requested in the SearchOptions. +func (t *Iterator) Facets() ([][]FacetResult, error) { + t.fetchMore() + if t.err != nil && t.err != Done { + return nil, t.err + } + + var facets [][]FacetResult + for _, f := range t.facetRes { + fres := make([]FacetResult, 0, len(f.Value)) + for _, v := range f.Value { + ref := v.Refinement + facet := FacetResult{ + Facet: Facet{Name: ref.GetName()}, + Count: int(v.GetCount()), + } + if ref.Value != nil { + facet.Value = Atom(*ref.Value) + } else { + facet.Value = protoToRange(ref.Range) + } + fres = append(fres, facet) + } + facets = append(facets, fres) + } + return facets, nil +} + +// saveDoc converts from a struct pointer or +// FieldLoadSaver/FieldMetadataLoadSaver to the Document protobuf. +func saveDoc(src interface{}) (*pb.Document, error) { + var err error + var fields []Field + var meta *DocumentMetadata + switch x := src.(type) { + case FieldLoadSaver: + fields, meta, err = x.Save() + default: + fields, meta, err = saveStructWithMeta(src) + } + if err != nil { + return nil, err + } + + fieldsProto, err := fieldsToProto(fields) + if err != nil { + return nil, err + } + d := &pb.Document{ + Field: fieldsProto, + OrderId: proto.Int32(int32(time.Since(orderIDEpoch).Seconds())), + } + if meta != nil { + if meta.Rank != 0 { + if !validDocRank(meta.Rank) { + return nil, fmt.Errorf("search: invalid rank %d, must be [0, 2^31)", meta.Rank) + } + *d.OrderId = int32(meta.Rank) + } + if len(meta.Facets) > 0 { + facets, err := facetsToProto(meta.Facets) + if err != nil { + return nil, err + } + d.Facet = facets + } + } + return d, nil +} + +func fieldsToProto(src []Field) ([]*pb.Field, error) { + // Maps to catch duplicate time or numeric fields. + timeFields, numericFields := make(map[string]bool), make(map[string]bool) + dst := make([]*pb.Field, 0, len(src)) + for _, f := range src { + if !validFieldName(f.Name) { + return nil, fmt.Errorf("search: invalid field name %q", f.Name) + } + fieldValue := &pb.FieldValue{} + switch x := f.Value.(type) { + case string: + fieldValue.Type = pb.FieldValue_TEXT.Enum() + fieldValue.StringValue = proto.String(x) + case Atom: + fieldValue.Type = pb.FieldValue_ATOM.Enum() + fieldValue.StringValue = proto.String(string(x)) + case HTML: + fieldValue.Type = pb.FieldValue_HTML.Enum() + fieldValue.StringValue = proto.String(string(x)) + case time.Time: + if timeFields[f.Name] { + return nil, fmt.Errorf("search: duplicate time field %q", f.Name) + } + timeFields[f.Name] = true + fieldValue.Type = pb.FieldValue_DATE.Enum() + fieldValue.StringValue = proto.String(strconv.FormatInt(x.UnixNano()/1e6, 10)) + case float64: + if numericFields[f.Name] { + return nil, fmt.Errorf("search: duplicate numeric field %q", f.Name) + } + if !validFloat(x) { + return nil, fmt.Errorf("search: numeric field %q with invalid value %f", f.Name, x) + } + numericFields[f.Name] = true + fieldValue.Type = pb.FieldValue_NUMBER.Enum() + fieldValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64)) + case appengine.GeoPoint: + if !x.Valid() { + return nil, fmt.Errorf( + "search: GeoPoint field %q with invalid value %v", + f.Name, x) + } + fieldValue.Type = pb.FieldValue_GEO.Enum() + fieldValue.Geo = &pb.FieldValue_Geo{ + Lat: proto.Float64(x.Lat), + Lng: proto.Float64(x.Lng), + } + default: + return nil, fmt.Errorf("search: unsupported field type: %v", reflect.TypeOf(f.Value)) + } + if f.Language != "" { + switch f.Value.(type) { + case string, HTML: + if !validLanguage(f.Language) { + return nil, fmt.Errorf("search: invalid language for field %q: %q", f.Name, f.Language) + } + fieldValue.Language = proto.String(f.Language) + default: + return nil, fmt.Errorf("search: setting language not supported for field %q of type %T", f.Name, f.Value) + } + } + if p := fieldValue.StringValue; p != nil && !utf8.ValidString(*p) { + return nil, fmt.Errorf("search: %q field is invalid UTF-8: %q", f.Name, *p) + } + dst = append(dst, &pb.Field{ + Name: proto.String(f.Name), + Value: fieldValue, + }) + } + return dst, nil +} + +func facetsToProto(src []Facet) ([]*pb.Facet, error) { + dst := make([]*pb.Facet, 0, len(src)) + for _, f := range src { + if !validFieldName(f.Name) { + return nil, fmt.Errorf("search: invalid facet name %q", f.Name) + } + facetValue := &pb.FacetValue{} + switch x := f.Value.(type) { + case Atom: + if !utf8.ValidString(string(x)) { + return nil, fmt.Errorf("search: %q facet is invalid UTF-8: %q", f.Name, x) + } + facetValue.Type = pb.FacetValue_ATOM.Enum() + facetValue.StringValue = proto.String(string(x)) + case float64: + if !validFloat(x) { + return nil, fmt.Errorf("search: numeric facet %q with invalid value %f", f.Name, x) + } + facetValue.Type = pb.FacetValue_NUMBER.Enum() + facetValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64)) + default: + return nil, fmt.Errorf("search: unsupported facet type: %v", reflect.TypeOf(f.Value)) + } + dst = append(dst, &pb.Facet{ + Name: proto.String(f.Name), + Value: facetValue, + }) + } + return dst, nil +} + +// loadDoc converts from protobufs to a struct pointer or +// FieldLoadSaver/FieldMetadataLoadSaver. The src param provides the document's +// stored fields and facets, and any document metadata. An additional slice of +// fields, exprs, may optionally be provided to contain any derived expressions +// requested by the developer. +func loadDoc(dst interface{}, src *pb.Document, exprs []*pb.Field) (err error) { + fields, err := protoToFields(src.Field) + if err != nil { + return err + } + facets, err := protoToFacets(src.Facet) + if err != nil { + return err + } + if len(exprs) > 0 { + exprFields, err := protoToFields(exprs) + if err != nil { + return err + } + // Mark each field as derived. + for i := range exprFields { + exprFields[i].Derived = true + } + fields = append(fields, exprFields...) + } + meta := &DocumentMetadata{ + Rank: int(src.GetOrderId()), + Facets: facets, + } + switch x := dst.(type) { + case FieldLoadSaver: + return x.Load(fields, meta) + default: + return loadStructWithMeta(dst, fields, meta) + } +} + +func protoToFields(fields []*pb.Field) ([]Field, error) { + dst := make([]Field, 0, len(fields)) + for _, field := range fields { + fieldValue := field.GetValue() + f := Field{ + Name: field.GetName(), + } + switch fieldValue.GetType() { + case pb.FieldValue_TEXT: + f.Value = fieldValue.GetStringValue() + f.Language = fieldValue.GetLanguage() + case pb.FieldValue_ATOM: + f.Value = Atom(fieldValue.GetStringValue()) + case pb.FieldValue_HTML: + f.Value = HTML(fieldValue.GetStringValue()) + f.Language = fieldValue.GetLanguage() + case pb.FieldValue_DATE: + sv := fieldValue.GetStringValue() + millis, err := strconv.ParseInt(sv, 10, 64) + if err != nil { + return nil, fmt.Errorf("search: internal error: bad time.Time encoding %q: %v", sv, err) + } + f.Value = time.Unix(0, millis*1e6) + case pb.FieldValue_NUMBER: + sv := fieldValue.GetStringValue() + x, err := strconv.ParseFloat(sv, 64) + if err != nil { + return nil, err + } + f.Value = x + case pb.FieldValue_GEO: + geoValue := fieldValue.GetGeo() + geoPoint := appengine.GeoPoint{geoValue.GetLat(), geoValue.GetLng()} + if !geoPoint.Valid() { + return nil, fmt.Errorf("search: internal error: invalid GeoPoint encoding: %v", geoPoint) + } + f.Value = geoPoint + default: + return nil, fmt.Errorf("search: internal error: unknown data type %s", fieldValue.GetType()) + } + dst = append(dst, f) + } + return dst, nil +} + +func protoToFacets(facets []*pb.Facet) ([]Facet, error) { + if len(facets) == 0 { + return nil, nil + } + dst := make([]Facet, 0, len(facets)) + for _, facet := range facets { + facetValue := facet.GetValue() + f := Facet{ + Name: facet.GetName(), + } + switch facetValue.GetType() { + case pb.FacetValue_ATOM: + f.Value = Atom(facetValue.GetStringValue()) + case pb.FacetValue_NUMBER: + sv := facetValue.GetStringValue() + x, err := strconv.ParseFloat(sv, 64) + if err != nil { + return nil, err + } + f.Value = x + default: + return nil, fmt.Errorf("search: internal error: unknown data type %s", facetValue.GetType()) + } + dst = append(dst, f) + } + return dst, nil +} + +func namespaceMod(m proto.Message, namespace string) { + set := func(s **string) { + if *s == nil { + *s = &namespace + } + } + switch m := m.(type) { + case *pb.IndexDocumentRequest: + set(&m.Params.IndexSpec.Namespace) + case *pb.ListDocumentsRequest: + set(&m.Params.IndexSpec.Namespace) + case *pb.DeleteDocumentRequest: + set(&m.Params.IndexSpec.Namespace) + case *pb.SearchRequest: + set(&m.Params.IndexSpec.Namespace) + } +} + +func init() { + internal.RegisterErrorCodeMap("search", pb.SearchServiceError_ErrorCode_name) + internal.NamespaceMods["search"] = namespaceMod +} diff --git a/vendor/google.golang.org/appengine/search/search_test.go b/vendor/google.golang.org/appengine/search/search_test.go new file mode 100644 index 0000000..f7c339b --- /dev/null +++ b/vendor/google.golang.org/appengine/search/search_test.go @@ -0,0 +1,1000 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package search + +import ( + "errors" + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/search" +) + +type TestDoc struct { + String string + Atom Atom + HTML HTML + Float float64 + Location appengine.GeoPoint + Time time.Time +} + +type FieldListWithMeta struct { + Fields FieldList + Meta *DocumentMetadata +} + +func (f *FieldListWithMeta) Load(fields []Field, meta *DocumentMetadata) error { + f.Meta = meta + return f.Fields.Load(fields, nil) +} + +func (f *FieldListWithMeta) Save() ([]Field, *DocumentMetadata, error) { + fields, _, err := f.Fields.Save() + return fields, f.Meta, err +} + +// Assert that FieldListWithMeta satisfies FieldLoadSaver +var _ FieldLoadSaver = &FieldListWithMeta{} + +var ( + float = 3.14159 + floatOut = "3.14159e+00" + latitude = 37.3894 + longitude = 122.0819 + testGeo = appengine.GeoPoint{latitude, longitude} + testString = "foobar" + testTime = time.Unix(1337324400, 0) + testTimeOut = "1337324400000" + searchMeta = &DocumentMetadata{ + Rank: 42, + } + searchDoc = TestDoc{ + String: testString, + Atom: Atom(testString), + HTML: HTML(testString), + Float: float, + Location: testGeo, + Time: testTime, + } + searchFields = FieldList{ + Field{Name: "String", Value: testString}, + Field{Name: "Atom", Value: Atom(testString)}, + Field{Name: "HTML", Value: HTML(testString)}, + Field{Name: "Float", Value: float}, + Field{Name: "Location", Value: testGeo}, + Field{Name: "Time", Value: testTime}, + } + // searchFieldsWithLang is a copy of the searchFields with the Language field + // set on text/HTML Fields. + searchFieldsWithLang = FieldList{} + protoFields = []*pb.Field{ + newStringValueField("String", testString, pb.FieldValue_TEXT), + newStringValueField("Atom", testString, pb.FieldValue_ATOM), + newStringValueField("HTML", testString, pb.FieldValue_HTML), + newStringValueField("Float", floatOut, pb.FieldValue_NUMBER), + { + Name: proto.String("Location"), + Value: &pb.FieldValue{ + Geo: &pb.FieldValue_Geo{ + Lat: proto.Float64(latitude), + Lng: proto.Float64(longitude), + }, + Type: pb.FieldValue_GEO.Enum(), + }, + }, + newStringValueField("Time", testTimeOut, pb.FieldValue_DATE), + } +) + +func init() { + for _, f := range searchFields { + if f.Name == "String" || f.Name == "HTML" { + f.Language = "en" + } + searchFieldsWithLang = append(searchFieldsWithLang, f) + } +} + +func newStringValueField(name, value string, valueType pb.FieldValue_ContentType) *pb.Field { + return &pb.Field{ + Name: proto.String(name), + Value: &pb.FieldValue{ + StringValue: proto.String(value), + Type: valueType.Enum(), + }, + } +} + +func newFacet(name, value string, valueType pb.FacetValue_ContentType) *pb.Facet { + return &pb.Facet{ + Name: proto.String(name), + Value: &pb.FacetValue{ + StringValue: proto.String(value), + Type: valueType.Enum(), + }, + } +} + +func TestValidIndexNameOrDocID(t *testing.T) { + testCases := []struct { + s string + want bool + }{ + {"", true}, + {"!", false}, + {"$", true}, + {"!bad", false}, + {"good!", true}, + {"alsoGood", true}, + {"has spaces", false}, + {"is_inva\xffid_UTF-8", false}, + {"is_non-ASCïI", false}, + {"underscores_are_ok", true}, + } + for _, tc := range testCases { + if got := validIndexNameOrDocID(tc.s); got != tc.want { + t.Errorf("%q: got %v, want %v", tc.s, got, tc.want) + } + } +} + +func TestLoadDoc(t *testing.T) { + got, want := TestDoc{}, searchDoc + if err := loadDoc(&got, &pb.Document{Field: protoFields}, nil); err != nil { + t.Fatalf("loadDoc: %v", err) + } + if got != want { + t.Errorf("loadDoc: got %v, wanted %v", got, want) + } +} + +func TestSaveDoc(t *testing.T) { + got, err := saveDoc(&searchDoc) + if err != nil { + t.Fatalf("saveDoc: %v", err) + } + want := protoFields + if !reflect.DeepEqual(got.Field, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} + +func TestLoadFieldList(t *testing.T) { + var got FieldList + want := searchFieldsWithLang + if err := loadDoc(&got, &pb.Document{Field: protoFields}, nil); err != nil { + t.Fatalf("loadDoc: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} + +func TestLangFields(t *testing.T) { + fl := &FieldList{ + {Name: "Foo", Value: "I am English", Language: "en"}, + {Name: "Bar", Value: "私は日本人だ", Language: "jp"}, + } + var got FieldList + doc, err := saveDoc(fl) + if err != nil { + t.Fatalf("saveDoc: %v", err) + } + if err := loadDoc(&got, doc, nil); err != nil { + t.Fatalf("loadDoc: %v", err) + } + if want := fl; !reflect.DeepEqual(&got, want) { + t.Errorf("got %v\nwant %v", got, want) + } +} + +func TestSaveFieldList(t *testing.T) { + got, err := saveDoc(&searchFields) + if err != nil { + t.Fatalf("saveDoc: %v", err) + } + want := protoFields + if !reflect.DeepEqual(got.Field, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} + +func TestLoadFieldAndExprList(t *testing.T) { + var got, want FieldList + for i, f := range searchFieldsWithLang { + f.Derived = (i >= 2) // First 2 elements are "fields", next are "expressions". + want = append(want, f) + } + doc, expr := &pb.Document{Field: protoFields[:2]}, protoFields[2:] + if err := loadDoc(&got, doc, expr); err != nil { + t.Fatalf("loadDoc: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v\nwant %v", got, want) + } +} + +func TestLoadMeta(t *testing.T) { + var got FieldListWithMeta + want := FieldListWithMeta{ + Meta: searchMeta, + Fields: searchFieldsWithLang, + } + doc := &pb.Document{ + Field: protoFields, + OrderId: proto.Int32(42), + } + if err := loadDoc(&got, doc, nil); err != nil { + t.Fatalf("loadDoc: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} + +func TestSaveMeta(t *testing.T) { + got, err := saveDoc(&FieldListWithMeta{ + Meta: searchMeta, + Fields: searchFields, + }) + if err != nil { + t.Fatalf("saveDoc: %v", err) + } + want := &pb.Document{ + Field: protoFields, + OrderId: proto.Int32(42), + } + if !proto.Equal(got, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} + +func TestLoadSaveWithStruct(t *testing.T) { + type gopher struct { + Name string + Info string `search:"about"` + Legs float64 `search:",facet"` + Fuzz Atom `search:"Fur,facet"` + } + + doc := gopher{"Gopher", "Likes slide rules.", 4, Atom("furry")} + pb := &pb.Document{ + Field: []*pb.Field{ + newStringValueField("Name", "Gopher", pb.FieldValue_TEXT), + newStringValueField("about", "Likes slide rules.", pb.FieldValue_TEXT), + }, + Facet: []*pb.Facet{ + newFacet("Legs", "4e+00", pb.FacetValue_NUMBER), + newFacet("Fur", "furry", pb.FacetValue_ATOM), + }, + } + + var gotDoc gopher + if err := loadDoc(&gotDoc, pb, nil); err != nil { + t.Fatalf("loadDoc: %v", err) + } + if !reflect.DeepEqual(gotDoc, doc) { + t.Errorf("loading doc\ngot %v\nwant %v", gotDoc, doc) + } + + gotPB, err := saveDoc(&doc) + if err != nil { + t.Fatalf("saveDoc: %v", err) + } + gotPB.OrderId = nil // Don't test: it's time dependent. + if !proto.Equal(gotPB, pb) { + t.Errorf("saving doc\ngot %v\nwant %v", gotPB, pb) + } +} + +func TestValidFieldNames(t *testing.T) { + testCases := []struct { + name string + valid bool + }{ + {"Normal", true}, + {"Also_OK_123", true}, + {"Not so great", false}, + {"lower_case", true}, + {"Exclaim!", false}, + {"Hello세상아 안녕", false}, + {"", false}, + {"Hεllo", false}, + {strings.Repeat("A", 500), true}, + {strings.Repeat("A", 501), false}, + } + + for _, tc := range testCases { + _, err := saveDoc(&FieldList{ + Field{Name: tc.name, Value: "val"}, + }) + if err != nil && !strings.Contains(err.Error(), "invalid field name") { + t.Errorf("unexpected err %q for field name %q", err, tc.name) + } + if (err == nil) != tc.valid { + t.Errorf("field %q: expected valid %t, received err %v", tc.name, tc.valid, err) + } + } +} + +func TestValidLangs(t *testing.T) { + testCases := []struct { + field Field + valid bool + }{ + {Field{Name: "Foo", Value: "String", Language: ""}, true}, + {Field{Name: "Foo", Value: "String", Language: "en"}, true}, + {Field{Name: "Foo", Value: "String", Language: "aussie"}, false}, + {Field{Name: "Foo", Value: "String", Language: "12"}, false}, + {Field{Name: "Foo", Value: HTML("String"), Language: "en"}, true}, + {Field{Name: "Foo", Value: Atom("String"), Language: "en"}, false}, + {Field{Name: "Foo", Value: 42, Language: "en"}, false}, + } + + for _, tt := range testCases { + _, err := saveDoc(&FieldList{tt.field}) + if err == nil != tt.valid { + t.Errorf("Field %v, got error %v, wanted valid %t", tt.field, err, tt.valid) + } + } +} + +func TestDuplicateFields(t *testing.T) { + testCases := []struct { + desc string + fields FieldList + errMsg string // Non-empty if we expect an error + }{ + { + desc: "multi string", + fields: FieldList{{Name: "FieldA", Value: "val1"}, {Name: "FieldA", Value: "val2"}, {Name: "FieldA", Value: "val3"}}, + }, + { + desc: "multi atom", + fields: FieldList{{Name: "FieldA", Value: Atom("val1")}, {Name: "FieldA", Value: Atom("val2")}, {Name: "FieldA", Value: Atom("val3")}}, + }, + { + desc: "mixed", + fields: FieldList{{Name: "FieldA", Value: testString}, {Name: "FieldA", Value: testTime}, {Name: "FieldA", Value: float}}, + }, + { + desc: "multi time", + fields: FieldList{{Name: "FieldA", Value: testTime}, {Name: "FieldA", Value: testTime}}, + errMsg: `duplicate time field "FieldA"`, + }, + { + desc: "multi num", + fields: FieldList{{Name: "FieldA", Value: float}, {Name: "FieldA", Value: float}}, + errMsg: `duplicate numeric field "FieldA"`, + }, + } + for _, tc := range testCases { + _, err := saveDoc(&tc.fields) + if (err == nil) != (tc.errMsg == "") || (err != nil && !strings.Contains(err.Error(), tc.errMsg)) { + t.Errorf("%s: got err %v, wanted %q", tc.desc, err, tc.errMsg) + } + } +} + +func TestLoadErrFieldMismatch(t *testing.T) { + testCases := []struct { + desc string + dst interface{} + src []*pb.Field + err error + }{ + { + desc: "missing", + dst: &struct{ One string }{}, + src: []*pb.Field{newStringValueField("Two", "woop!", pb.FieldValue_TEXT)}, + err: &ErrFieldMismatch{ + FieldName: "Two", + Reason: "no such struct field", + }, + }, + { + desc: "wrong type", + dst: &struct{ Num float64 }{}, + src: []*pb.Field{newStringValueField("Num", "woop!", pb.FieldValue_TEXT)}, + err: &ErrFieldMismatch{ + FieldName: "Num", + Reason: "type mismatch: float64 for string data", + }, + }, + { + desc: "unsettable", + dst: &struct{ lower string }{}, + src: []*pb.Field{newStringValueField("lower", "woop!", pb.FieldValue_TEXT)}, + err: &ErrFieldMismatch{ + FieldName: "lower", + Reason: "cannot set struct field", + }, + }, + } + for _, tc := range testCases { + err := loadDoc(tc.dst, &pb.Document{Field: tc.src}, nil) + if !reflect.DeepEqual(err, tc.err) { + t.Errorf("%s, got err %v, wanted %v", tc.desc, err, tc.err) + } + } +} + +func TestLimit(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, res *pb.SearchResponse) error { + limit := 20 // Default per page. + if req.Params.Limit != nil { + limit = int(*req.Params.Limit) + } + res.Status = &pb.RequestStatus{Code: pb.SearchServiceError_OK.Enum()} + res.MatchedCount = proto.Int64(int64(limit)) + for i := 0; i < limit; i++ { + res.Result = append(res.Result, &pb.SearchResult{Document: &pb.Document{}}) + res.Cursor = proto.String("moreresults") + } + return nil + }) + + const maxDocs = 500 // Limit maximum number of docs. + testCases := []struct { + limit, want int + }{ + {limit: 0, want: maxDocs}, + {limit: 42, want: 42}, + {limit: 100, want: 100}, + {limit: 1000, want: maxDocs}, + } + + for _, tt := range testCases { + it := index.Search(c, "gopher", &SearchOptions{Limit: tt.limit, IDsOnly: true}) + count := 0 + for ; count < maxDocs; count++ { + _, err := it.Next(nil) + if err == Done { + break + } + if err != nil { + t.Fatalf("err after %d: %v", count, err) + } + } + if count != tt.want { + t.Errorf("got %d results, expected %d", count, tt.want) + } + } +} + +func TestPut(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error { + expectedIn := &pb.IndexDocumentRequest{ + Params: &pb.IndexDocumentParams{ + Document: []*pb.Document{ + {Field: protoFields, OrderId: proto.Int32(42)}, + }, + IndexSpec: &pb.IndexSpec{ + Name: proto.String("Doc"), + }, + }, + } + if !proto.Equal(in, expectedIn) { + return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn) + } + *out = pb.IndexDocumentResponse{ + Status: []*pb.RequestStatus{ + {Code: pb.SearchServiceError_OK.Enum()}, + }, + DocId: []string{ + "doc_id", + }, + } + return nil + }) + + id, err := index.Put(c, "", &FieldListWithMeta{ + Meta: searchMeta, + Fields: searchFields, + }) + if err != nil { + t.Fatal(err) + } + if want := "doc_id"; id != want { + t.Errorf("Got doc ID %q, want %q", id, want) + } +} + +func TestPutAutoOrderID(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error { + if len(in.Params.GetDocument()) < 1 { + return fmt.Errorf("expected at least one Document, got %v", in) + } + got, want := in.Params.Document[0].GetOrderId(), int32(time.Since(orderIDEpoch).Seconds()) + if d := got - want; -5 > d || d > 5 { + return fmt.Errorf("got OrderId %d, want near %d", got, want) + } + *out = pb.IndexDocumentResponse{ + Status: []*pb.RequestStatus{ + {Code: pb.SearchServiceError_OK.Enum()}, + }, + DocId: []string{ + "doc_id", + }, + } + return nil + }) + + if _, err := index.Put(c, "", &searchFields); err != nil { + t.Fatal(err) + } +} + +func TestPutBadStatus(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(_ *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error { + *out = pb.IndexDocumentResponse{ + Status: []*pb.RequestStatus{ + { + Code: pb.SearchServiceError_INVALID_REQUEST.Enum(), + ErrorDetail: proto.String("insufficient gophers"), + }, + }, + } + return nil + }) + + wantErr := "search: INVALID_REQUEST: insufficient gophers" + if _, err := index.Put(c, "", &searchFields); err == nil || err.Error() != wantErr { + t.Fatalf("Put: got %v error, want %q", err, wantErr) + } +} + +func TestSortOptions(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + noErr := errors.New("") // Sentinel err to return to prevent sending request. + + testCases := []struct { + desc string + sort *SortOptions + wantSort []*pb.SortSpec + wantScorer *pb.ScorerSpec + wantErr string + }{ + { + desc: "No SortOptions", + }, + { + desc: "Basic", + sort: &SortOptions{ + Expressions: []SortExpression{ + {Expr: "dog"}, + {Expr: "cat", Reverse: true}, + {Expr: "gopher", Default: "blue"}, + {Expr: "fish", Default: 2.0}, + }, + Limit: 42, + Scorer: MatchScorer, + }, + wantSort: []*pb.SortSpec{ + {SortExpression: proto.String("dog")}, + {SortExpression: proto.String("cat"), SortDescending: proto.Bool(false)}, + {SortExpression: proto.String("gopher"), DefaultValueText: proto.String("blue")}, + {SortExpression: proto.String("fish"), DefaultValueNumeric: proto.Float64(2)}, + }, + wantScorer: &pb.ScorerSpec{ + Limit: proto.Int32(42), + Scorer: pb.ScorerSpec_MATCH_SCORER.Enum(), + }, + }, + { + desc: "Bad expression default", + sort: &SortOptions{ + Expressions: []SortExpression{ + {Expr: "dog", Default: true}, + }, + }, + wantErr: `search: invalid Default type bool for expression "dog"`, + }, + { + desc: "RescoringMatchScorer", + sort: &SortOptions{Scorer: RescoringMatchScorer}, + wantScorer: &pb.ScorerSpec{Scorer: pb.ScorerSpec_RESCORING_MATCH_SCORER.Enum()}, + }, + } + + for _, tt := range testCases { + c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error { + params := req.Params + if !reflect.DeepEqual(params.SortSpec, tt.wantSort) { + t.Errorf("%s: params.SortSpec=%v; want %v", tt.desc, params.SortSpec, tt.wantSort) + } + if !reflect.DeepEqual(params.ScorerSpec, tt.wantScorer) { + t.Errorf("%s: params.ScorerSpec=%v; want %v", tt.desc, params.ScorerSpec, tt.wantScorer) + } + return noErr // Always return some error to prevent response parsing. + }) + + it := index.Search(c, "gopher", &SearchOptions{Sort: tt.sort}) + _, err := it.Next(nil) + if err == nil { + t.Fatalf("%s: err==nil; should not happen", tt.desc) + } + if err.Error() != tt.wantErr { + t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr) + } + } +} + +func TestFieldSpec(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + errFoo := errors.New("foo") // sentinel error when there isn't one. + + testCases := []struct { + desc string + opts *SearchOptions + want *pb.FieldSpec + }{ + { + desc: "No options", + want: &pb.FieldSpec{}, + }, + { + desc: "Fields", + opts: &SearchOptions{ + Fields: []string{"one", "two"}, + }, + want: &pb.FieldSpec{ + Name: []string{"one", "two"}, + }, + }, + { + desc: "Expressions", + opts: &SearchOptions{ + Expressions: []FieldExpression{ + {Name: "one", Expr: "price * quantity"}, + {Name: "two", Expr: "min(daily_use, 10) * rate"}, + }, + }, + want: &pb.FieldSpec{ + Expression: []*pb.FieldSpec_Expression{ + {Name: proto.String("one"), Expression: proto.String("price * quantity")}, + {Name: proto.String("two"), Expression: proto.String("min(daily_use, 10) * rate")}, + }, + }, + }, + } + + for _, tt := range testCases { + c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error { + params := req.Params + if !reflect.DeepEqual(params.FieldSpec, tt.want) { + t.Errorf("%s: params.FieldSpec=%v; want %v", tt.desc, params.FieldSpec, tt.want) + } + return errFoo // Always return some error to prevent response parsing. + }) + + it := index.Search(c, "gopher", tt.opts) + if _, err := it.Next(nil); err != errFoo { + t.Fatalf("%s: got error %v; want %v", tt.desc, err, errFoo) + } + } +} + +func TestBasicSearchOpts(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + noErr := errors.New("") // Sentinel err to return to prevent sending request. + + testCases := []struct { + desc string + facetOpts []FacetSearchOption + cursor Cursor + offset int + countAccuracy int + want *pb.SearchParams + wantErr string + }{ + { + desc: "No options", + want: &pb.SearchParams{}, + }, + { + desc: "Default auto discovery", + facetOpts: []FacetSearchOption{ + AutoFacetDiscovery(0, 0), + }, + want: &pb.SearchParams{ + AutoDiscoverFacetCount: proto.Int32(10), + }, + }, + { + desc: "Auto discovery", + facetOpts: []FacetSearchOption{ + AutoFacetDiscovery(7, 12), + }, + want: &pb.SearchParams{ + AutoDiscoverFacetCount: proto.Int32(7), + FacetAutoDetectParam: &pb.FacetAutoDetectParam{ + ValueLimit: proto.Int32(12), + }, + }, + }, + { + desc: "Param Depth", + facetOpts: []FacetSearchOption{ + AutoFacetDiscovery(7, 12), + }, + want: &pb.SearchParams{ + AutoDiscoverFacetCount: proto.Int32(7), + FacetAutoDetectParam: &pb.FacetAutoDetectParam{ + ValueLimit: proto.Int32(12), + }, + }, + }, + { + desc: "Doc depth", + facetOpts: []FacetSearchOption{ + FacetDocumentDepth(123), + }, + want: &pb.SearchParams{ + FacetDepth: proto.Int32(123), + }, + }, + { + desc: "Facet discovery", + facetOpts: []FacetSearchOption{ + FacetDiscovery("colour"), + FacetDiscovery("size", Atom("M"), Atom("L")), + FacetDiscovery("price", LessThan(7), Range{7, 14}, AtLeast(14)), + }, + want: &pb.SearchParams{ + IncludeFacet: []*pb.FacetRequest{ + {Name: proto.String("colour")}, + {Name: proto.String("size"), Params: &pb.FacetRequestParam{ + ValueConstraint: []string{"M", "L"}, + }}, + {Name: proto.String("price"), Params: &pb.FacetRequestParam{ + Range: []*pb.FacetRange{ + {End: proto.String("7e+00")}, + {Start: proto.String("7e+00"), End: proto.String("1.4e+01")}, + {Start: proto.String("1.4e+01")}, + }, + }}, + }, + }, + }, + { + desc: "Facet discovery - bad value", + facetOpts: []FacetSearchOption{ + FacetDiscovery("colour", true), + }, + wantErr: "bad FacetSearchOption: unsupported value type bool", + }, + { + desc: "Facet discovery - mix value types", + facetOpts: []FacetSearchOption{ + FacetDiscovery("colour", Atom("blue"), AtLeast(7)), + }, + wantErr: "bad FacetSearchOption: values must all be Atom, or must all be Range", + }, + { + desc: "Facet discovery - invalid range", + facetOpts: []FacetSearchOption{ + FacetDiscovery("colour", Range{negInf, posInf}), + }, + wantErr: "bad FacetSearchOption: invalid range: either Start or End must be finite", + }, + { + desc: "Cursor", + cursor: Cursor("mycursor"), + want: &pb.SearchParams{ + Cursor: proto.String("mycursor"), + }, + }, + { + desc: "Offset", + offset: 121, + want: &pb.SearchParams{ + Offset: proto.Int32(121), + }, + }, + { + desc: "Cursor and Offset set", + cursor: Cursor("mycursor"), + offset: 121, + wantErr: "at most one of Cursor and Offset may be specified", + }, + { + desc: "Count accuracy", + countAccuracy: 100, + want: &pb.SearchParams{ + MatchedCountAccuracy: proto.Int32(100), + }, + }, + } + + for _, tt := range testCases { + c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error { + if tt.want == nil { + t.Errorf("%s: expected call to fail", tt.desc) + return nil + } + // Set default fields. + tt.want.Query = proto.String("gopher") + tt.want.IndexSpec = &pb.IndexSpec{Name: proto.String("Doc")} + tt.want.CursorType = pb.SearchParams_PER_RESULT.Enum() + tt.want.FieldSpec = &pb.FieldSpec{} + if got := req.Params; !reflect.DeepEqual(got, tt.want) { + t.Errorf("%s: params=%v; want %v", tt.desc, got, tt.want) + } + return noErr // Always return some error to prevent response parsing. + }) + + it := index.Search(c, "gopher", &SearchOptions{ + Facets: tt.facetOpts, + Cursor: tt.cursor, + Offset: tt.offset, + CountAccuracy: tt.countAccuracy, + }) + _, err := it.Next(nil) + if err == nil { + t.Fatalf("%s: err==nil; should not happen", tt.desc) + } + if err.Error() != tt.wantErr { + t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr) + } + } +} + +func TestFacetRefinements(t *testing.T) { + index, err := Open("Doc") + if err != nil { + t.Fatalf("err from Open: %v", err) + } + + noErr := errors.New("") // Sentinel err to return to prevent sending request. + + testCases := []struct { + desc string + refine []Facet + want []*pb.FacetRefinement + wantErr string + }{ + { + desc: "No refinements", + }, + { + desc: "Basic", + refine: []Facet{ + {Name: "fur", Value: Atom("fluffy")}, + {Name: "age", Value: LessThan(123)}, + {Name: "age", Value: AtLeast(0)}, + {Name: "legs", Value: Range{Start: 3, End: 5}}, + }, + want: []*pb.FacetRefinement{ + {Name: proto.String("fur"), Value: proto.String("fluffy")}, + {Name: proto.String("age"), Range: &pb.FacetRefinement_Range{End: proto.String("1.23e+02")}}, + {Name: proto.String("age"), Range: &pb.FacetRefinement_Range{Start: proto.String("0e+00")}}, + {Name: proto.String("legs"), Range: &pb.FacetRefinement_Range{Start: proto.String("3e+00"), End: proto.String("5e+00")}}, + }, + }, + { + desc: "Infinite range", + refine: []Facet{ + {Name: "age", Value: Range{Start: negInf, End: posInf}}, + }, + wantErr: `search: refinement for facet "age": either Start or End must be finite`, + }, + { + desc: "Bad End value in range", + refine: []Facet{ + {Name: "age", Value: LessThan(2147483648)}, + }, + wantErr: `search: refinement for facet "age": invalid value for End`, + }, + { + desc: "Bad Start value in range", + refine: []Facet{ + {Name: "age", Value: AtLeast(-2147483649)}, + }, + wantErr: `search: refinement for facet "age": invalid value for Start`, + }, + { + desc: "Unknown value type", + refine: []Facet{ + {Name: "age", Value: "you can't use strings!"}, + }, + wantErr: `search: unsupported refinement for facet "age" of type string`, + }, + } + + for _, tt := range testCases { + c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error { + if got := req.Params.FacetRefinement; !reflect.DeepEqual(got, tt.want) { + t.Errorf("%s: params.FacetRefinement=%v; want %v", tt.desc, got, tt.want) + } + return noErr // Always return some error to prevent response parsing. + }) + + it := index.Search(c, "gopher", &SearchOptions{Refinements: tt.refine}) + _, err := it.Next(nil) + if err == nil { + t.Fatalf("%s: err==nil; should not happen", tt.desc) + } + if err.Error() != tt.wantErr { + t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr) + } + } +} + +func TestNamespaceResetting(t *testing.T) { + namec := make(chan *string, 1) + c0 := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(req *pb.IndexDocumentRequest, res *pb.IndexDocumentResponse) error { + namec <- req.Params.IndexSpec.Namespace + return fmt.Errorf("RPC error") + }) + + // Check that wrapping c0 in a namespace twice works correctly. + c1, err := appengine.Namespace(c0, "A") + if err != nil { + t.Fatalf("appengine.Namespace: %v", err) + } + c2, err := appengine.Namespace(c1, "") // should act as the original context + if err != nil { + t.Fatalf("appengine.Namespace: %v", err) + } + + i := (&Index{}) + + i.Put(c0, "something", &searchDoc) + if ns := <-namec; ns != nil { + t.Errorf(`Put with c0: ns = %q, want nil`, *ns) + } + + i.Put(c1, "something", &searchDoc) + if ns := <-namec; ns == nil { + t.Error(`Put with c1: ns = nil, want "A"`) + } else if *ns != "A" { + t.Errorf(`Put with c1: ns = %q, want "A"`, *ns) + } + + i.Put(c2, "something", &searchDoc) + if ns := <-namec; ns != nil { + t.Errorf(`Put with c2: ns = %q, want nil`, *ns) + } +} diff --git a/vendor/google.golang.org/appengine/search/struct.go b/vendor/google.golang.org/appengine/search/struct.go new file mode 100644 index 0000000..e73d2f2 --- /dev/null +++ b/vendor/google.golang.org/appengine/search/struct.go @@ -0,0 +1,251 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package search + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// than the one it was stored from, or when a field is missing or unexported in +// the destination struct. +type ErrFieldMismatch struct { + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("search: cannot load field %q: %s", e.FieldName, e.Reason) +} + +// ErrFacetMismatch is returned when a facet is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. StructType is the type of the struct +// pointed to by the destination argument passed to Iterator.Next. +type ErrFacetMismatch struct { + StructType reflect.Type + FacetName string + Reason string +} + +func (e *ErrFacetMismatch) Error() string { + return fmt.Sprintf("search: cannot load facet %q into a %q: %s", e.FacetName, e.StructType, e.Reason) +} + +// structCodec defines how to convert a given struct to/from a search document. +type structCodec struct { + // byIndex returns the struct tag for the i'th struct field. + byIndex []structTag + + // fieldByName returns the index of the struct field for the given field name. + fieldByName map[string]int + + // facetByName returns the index of the struct field for the given facet name, + facetByName map[string]int +} + +// structTag holds a structured version of each struct field's parsed tag. +type structTag struct { + name string + facet bool + ignore bool +} + +var ( + codecsMu sync.RWMutex + codecs = map[reflect.Type]*structCodec{} +) + +func loadCodec(t reflect.Type) (*structCodec, error) { + codecsMu.RLock() + codec, ok := codecs[t] + codecsMu.RUnlock() + if ok { + return codec, nil + } + + codecsMu.Lock() + defer codecsMu.Unlock() + if codec, ok := codecs[t]; ok { + return codec, nil + } + + codec = &structCodec{ + fieldByName: make(map[string]int), + facetByName: make(map[string]int), + } + + for i, I := 0, t.NumField(); i < I; i++ { + f := t.Field(i) + name, opts := f.Tag.Get("search"), "" + if i := strings.Index(name, ","); i != -1 { + name, opts = name[:i], name[i+1:] + } + ignore := false + if name == "-" { + ignore = true + } else if name == "" { + name = f.Name + } else if !validFieldName(name) { + return nil, fmt.Errorf("search: struct tag has invalid field name: %q", name) + } + facet := opts == "facet" + codec.byIndex = append(codec.byIndex, structTag{name: name, facet: facet, ignore: ignore}) + if facet { + codec.facetByName[name] = i + } else { + codec.fieldByName[name] = i + } + } + + codecs[t] = codec + return codec, nil +} + +// structFLS adapts a struct to be a FieldLoadSaver. +type structFLS struct { + v reflect.Value + codec *structCodec +} + +func (s structFLS) Load(fields []Field, meta *DocumentMetadata) error { + var err error + for _, field := range fields { + i, ok := s.codec.fieldByName[field.Name] + if !ok { + // Note the error, but keep going. + err = &ErrFieldMismatch{ + FieldName: field.Name, + Reason: "no such struct field", + } + continue + + } + f := s.v.Field(i) + if !f.CanSet() { + // Note the error, but keep going. + err = &ErrFieldMismatch{ + FieldName: field.Name, + Reason: "cannot set struct field", + } + continue + } + v := reflect.ValueOf(field.Value) + if ft, vt := f.Type(), v.Type(); ft != vt { + err = &ErrFieldMismatch{ + FieldName: field.Name, + Reason: fmt.Sprintf("type mismatch: %v for %v data", ft, vt), + } + continue + } + f.Set(v) + } + if meta == nil { + return err + } + for _, facet := range meta.Facets { + i, ok := s.codec.facetByName[facet.Name] + if !ok { + // Note the error, but keep going. + if err == nil { + err = &ErrFacetMismatch{ + StructType: s.v.Type(), + FacetName: facet.Name, + Reason: "no matching field found", + } + } + continue + } + f := s.v.Field(i) + if !f.CanSet() { + // Note the error, but keep going. + if err == nil { + err = &ErrFacetMismatch{ + StructType: s.v.Type(), + FacetName: facet.Name, + Reason: "unable to set unexported field of struct", + } + } + continue + } + v := reflect.ValueOf(facet.Value) + if ft, vt := f.Type(), v.Type(); ft != vt { + if err == nil { + err = &ErrFacetMismatch{ + StructType: s.v.Type(), + FacetName: facet.Name, + Reason: fmt.Sprintf("type mismatch: %v for %d data", ft, vt), + } + continue + } + } + f.Set(v) + } + return err +} + +func (s structFLS) Save() ([]Field, *DocumentMetadata, error) { + fields := make([]Field, 0, len(s.codec.fieldByName)) + var facets []Facet + for i, tag := range s.codec.byIndex { + if tag.ignore { + continue + } + f := s.v.Field(i) + if !f.CanSet() { + continue + } + if tag.facet { + facets = append(facets, Facet{Name: tag.name, Value: f.Interface()}) + } else { + fields = append(fields, Field{Name: tag.name, Value: f.Interface()}) + } + } + return fields, &DocumentMetadata{Facets: facets}, nil +} + +// newStructFLS returns a FieldLoadSaver for the struct pointer p. +func newStructFLS(p interface{}) (FieldLoadSaver, error) { + v := reflect.ValueOf(p) + if v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct { + return nil, ErrInvalidDocumentType + } + codec, err := loadCodec(v.Elem().Type()) + if err != nil { + return nil, err + } + return structFLS{v.Elem(), codec}, nil +} + +func loadStructWithMeta(dst interface{}, f []Field, meta *DocumentMetadata) error { + x, err := newStructFLS(dst) + if err != nil { + return err + } + return x.Load(f, meta) +} + +func saveStructWithMeta(src interface{}) ([]Field, *DocumentMetadata, error) { + x, err := newStructFLS(src) + if err != nil { + return nil, nil, err + } + return x.Save() +} + +// LoadStruct loads the fields from f to dst. dst must be a struct pointer. +func LoadStruct(dst interface{}, f []Field) error { + return loadStructWithMeta(dst, f, nil) +} + +// SaveStruct returns the fields from src as a slice of Field. +// src must be a struct pointer. +func SaveStruct(src interface{}) ([]Field, error) { + f, _, err := saveStructWithMeta(src) + return f, err +} diff --git a/vendor/google.golang.org/appengine/search/struct_test.go b/vendor/google.golang.org/appengine/search/struct_test.go new file mode 100644 index 0000000..4e5b5d1 --- /dev/null +++ b/vendor/google.golang.org/appengine/search/struct_test.go @@ -0,0 +1,213 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package search + +import ( + "reflect" + "testing" +) + +func TestLoadingStruct(t *testing.T) { + testCases := []struct { + desc string + fields []Field + meta *DocumentMetadata + want interface{} + wantErr bool + }{ + { + desc: "Basic struct", + fields: []Field{ + {Name: "Name", Value: "Gopher"}, + {Name: "Legs", Value: float64(4)}, + }, + want: &struct { + Name string + Legs float64 + }{"Gopher", 4}, + }, + { + desc: "Struct with tags", + fields: []Field{ + {Name: "Name", Value: "Gopher"}, + {Name: "about", Value: "Likes slide rules."}, + }, + meta: &DocumentMetadata{Facets: []Facet{ + {Name: "Legs", Value: float64(4)}, + {Name: "Fur", Value: Atom("furry")}, + }}, + want: &struct { + Name string + Info string `search:"about"` + Legs float64 `search:",facet"` + Fuzz Atom `search:"Fur,facet"` + }{"Gopher", "Likes slide rules.", 4, Atom("furry")}, + }, + { + desc: "Bad field from tag", + want: &struct { + AlphaBeta string `search:"αβ"` + }{}, + wantErr: true, + }, + { + desc: "Ignore missing field", + fields: []Field{ + {Name: "Meaning", Value: float64(42)}, + }, + want: &struct{}{}, + wantErr: true, + }, + { + desc: "Ignore unsettable field", + fields: []Field{ + {Name: "meaning", Value: float64(42)}, + }, + want: &struct{ meaning float64 }{}, // field not populated. + wantErr: true, + }, + { + desc: "Error on missing facet", + meta: &DocumentMetadata{Facets: []Facet{ + {Name: "Set", Value: Atom("yes")}, + {Name: "Missing", Value: Atom("no")}, + }}, + want: &struct { + Set Atom `search:",facet"` + }{Atom("yes")}, + wantErr: true, + }, + { + desc: "Error on unsettable facet", + meta: &DocumentMetadata{Facets: []Facet{ + {Name: "Set", Value: Atom("yes")}, + {Name: "unset", Value: Atom("no")}, + }}, + want: &struct { + Set Atom `search:",facet"` + }{Atom("yes")}, + wantErr: true, + }, + { + desc: "Error setting ignored field", + fields: []Field{ + {Name: "Set", Value: "yes"}, + {Name: "Ignored", Value: "no"}, + }, + want: &struct { + Set string + Ignored string `search:"-"` + }{Set: "yes"}, + wantErr: true, + }, + { + desc: "Error setting ignored facet", + meta: &DocumentMetadata{Facets: []Facet{ + {Name: "Set", Value: Atom("yes")}, + {Name: "Ignored", Value: Atom("no")}, + }}, + want: &struct { + Set Atom `search:",facet"` + Ignored Atom `search:"-,facet"` + }{Set: Atom("yes")}, + wantErr: true, + }, + } + + for _, tt := range testCases { + // Make a pointer to an empty version of what want points to. + dst := reflect.New(reflect.TypeOf(tt.want).Elem()).Interface() + err := loadStructWithMeta(dst, tt.fields, tt.meta) + if err != nil != tt.wantErr { + t.Errorf("%s: got err %v; want err %t", tt.desc, err, tt.wantErr) + continue + } + if !reflect.DeepEqual(dst, tt.want) { + t.Errorf("%s: doesn't match\ngot: %v\nwant: %v", tt.desc, dst, tt.want) + } + } +} + +func TestSavingStruct(t *testing.T) { + testCases := []struct { + desc string + doc interface{} + wantFields []Field + wantFacets []Facet + }{ + { + desc: "Basic struct", + doc: &struct { + Name string + Legs float64 + }{"Gopher", 4}, + wantFields: []Field{ + {Name: "Name", Value: "Gopher"}, + {Name: "Legs", Value: float64(4)}, + }, + }, + { + desc: "Struct with tags", + doc: &struct { + Name string + Info string `search:"about"` + Legs float64 `search:",facet"` + Fuzz Atom `search:"Fur,facet"` + }{"Gopher", "Likes slide rules.", 4, Atom("furry")}, + wantFields: []Field{ + {Name: "Name", Value: "Gopher"}, + {Name: "about", Value: "Likes slide rules."}, + }, + wantFacets: []Facet{ + {Name: "Legs", Value: float64(4)}, + {Name: "Fur", Value: Atom("furry")}, + }, + }, + { + desc: "Ignore unexported struct fields", + doc: &struct { + Name string + info string + Legs float64 `search:",facet"` + fuzz Atom `search:",facet"` + }{"Gopher", "Likes slide rules.", 4, Atom("furry")}, + wantFields: []Field{ + {Name: "Name", Value: "Gopher"}, + }, + wantFacets: []Facet{ + {Name: "Legs", Value: float64(4)}, + }, + }, + { + desc: "Ignore fields marked -", + doc: &struct { + Name string + Info string `search:"-"` + Legs float64 `search:",facet"` + Fuzz Atom `search:"-,facet"` + }{"Gopher", "Likes slide rules.", 4, Atom("furry")}, + wantFields: []Field{ + {Name: "Name", Value: "Gopher"}, + }, + wantFacets: []Facet{ + {Name: "Legs", Value: float64(4)}, + }, + }, + } + + for _, tt := range testCases { + fields, meta, err := saveStructWithMeta(tt.doc) + if err != nil { + t.Errorf("%s: got err %v; want nil", tt.desc, err) + continue + } + if !reflect.DeepEqual(fields, tt.wantFields) { + t.Errorf("%s: fields don't match\ngot: %v\nwant: %v", tt.desc, fields, tt.wantFields) + } + if facets := meta.Facets; !reflect.DeepEqual(facets, tt.wantFacets) { + t.Errorf("%s: facets don't match\ngot: %v\nwant: %v", tt.desc, facets, tt.wantFacets) + } + } +} diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go new file mode 100644 index 0000000..3de46df --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/doc.go @@ -0,0 +1,10 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package socket provides outbound network sockets. +// +// This package is only required in the classic App Engine environment. +// Applications running only in App Engine "flexible environment" should +// use the standard library's net package. +package socket diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go new file mode 100644 index 0000000..0ad50e2 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_classic.go @@ -0,0 +1,290 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package socket + +import ( + "fmt" + "io" + "net" + "strconv" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/appengine/internal" + + pb "google.golang.org/appengine/internal/socket" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + return DialTimeout(ctx, protocol, addr, 0) +} + +var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ + pb.CreateSocketRequest_IPv4, + pb.CreateSocketRequest_IPv6, +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. + if timeout > 0 { + var cancel context.CancelFunc + dialCtx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) + } + + var prot pb.CreateSocketRequest_SocketProtocol + switch protocol { + case "tcp": + prot = pb.CreateSocketRequest_TCP + case "udp": + prot = pb.CreateSocketRequest_UDP + default: + return nil, fmt.Errorf("socket: unknown protocol %q", protocol) + } + + packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + if len(packedAddrs) == 0 { + return nil, fmt.Errorf("no addresses for %q", host) + } + + packedAddr := packedAddrs[0] // use first address + fam := pb.CreateSocketRequest_IPv4 + if len(packedAddr) == net.IPv6len { + fam = pb.CreateSocketRequest_IPv6 + } + + req := &pb.CreateSocketRequest{ + Family: fam.Enum(), + Protocol: prot.Enum(), + RemoteIp: &pb.AddressPort{ + Port: proto.Int32(int32(port)), + PackedAddress: packedAddr, + }, + } + if resolved { + req.RemoteIp.HostnameHint = &host + } + res := &pb.CreateSocketReply{} + if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { + return nil, err + } + + return &Conn{ + ctx: ctx, + desc: res.GetSocketDescriptor(), + prot: prot, + local: res.ProxyExternalIp, + remote: req.RemoteIp, + }, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + packedAddrs, _, err := resolve(ctx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + addrs = make([]net.IP, len(packedAddrs)) + for i, pa := range packedAddrs { + addrs[i] = net.IP(pa) + } + return addrs, nil +} + +func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { + // Check if it's an IP address. + if ip := net.ParseIP(host); ip != nil { + if ip := ip.To4(); ip != nil { + return [][]byte{ip}, false, nil + } + return [][]byte{ip}, false, nil + } + + req := &pb.ResolveRequest{ + Name: &host, + AddressFamilies: fams, + } + res := &pb.ResolveReply{} + if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { + // XXX: need to map to pb.ResolveReply_ErrorCode? + return nil, false, err + } + return res.PackedAddress, true, nil +} + +// withDeadline is like context.WithDeadline, except it ignores the zero deadline. +func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { + if deadline.IsZero() { + return parent, func() {} + } + return context.WithDeadline(parent, deadline) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + ctx context.Context + desc string + offset int64 + + prot pb.CreateSocketRequest_SocketProtocol + local, remote *pb.AddressPort + + readDeadline, writeDeadline time.Time // optional +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + cn.ctx = ctx +} + +func (cn *Conn) Read(b []byte) (n int, err error) { + const maxRead = 1 << 20 + if len(b) > maxRead { + b = b[:maxRead] + } + + req := &pb.ReceiveRequest{ + SocketDescriptor: &cn.desc, + DataSize: proto.Int32(int32(len(b))), + } + res := &pb.ReceiveReply{} + if !cn.readDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) + defer cancel() + if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { + return 0, err + } + if len(res.Data) == 0 { + return 0, io.EOF + } + if len(res.Data) > len(b) { + return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) + } + return copy(b, res.Data), nil +} + +func (cn *Conn) Write(b []byte) (n int, err error) { + const lim = 1 << 20 // max per chunk + + for n < len(b) { + chunk := b[n:] + if len(chunk) > lim { + chunk = chunk[:lim] + } + + req := &pb.SendRequest{ + SocketDescriptor: &cn.desc, + Data: chunk, + StreamOffset: &cn.offset, + } + res := &pb.SendReply{} + if !cn.writeDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) + defer cancel() + if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { + // assume zero bytes were sent in this RPC + break + } + n += int(res.GetDataSent()) + cn.offset += int64(res.GetDataSent()) + } + + return +} + +func (cn *Conn) Close() error { + req := &pb.CloseRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.CloseReply{} + if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { + return err + } + cn.desc = "CLOSED" + return nil +} + +func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { + if ap == nil { + return nil + } + switch prot { + case pb.CreateSocketRequest_TCP: + return &net.TCPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + case pb.CreateSocketRequest_UDP: + return &net.UDPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + } + panic("unknown protocol " + prot.String()) +} + +func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } +func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } + +func (cn *Conn) SetDeadline(t time.Time) error { + cn.readDeadline = t + cn.writeDeadline = t + return nil +} + +func (cn *Conn) SetReadDeadline(t time.Time) error { + cn.readDeadline = t + return nil +} + +func (cn *Conn) SetWriteDeadline(t time.Time) error { + cn.writeDeadline = t + return nil +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + req := &pb.GetSocketNameRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.GetSocketNameReply{} + return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) +} + +func init() { + internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go new file mode 100644 index 0000000..c804169 --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_vm.go @@ -0,0 +1,64 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package socket + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + conn, err := net.Dial(protocol, addr) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + conn, err := net.DialTimeout(protocol, addr, timeout) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + return net.LookupIP(host) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + net.Conn +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + // This function is not required in App Engine "flexible environment". +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + // This function is not required in App Engine "flexible environment". + return nil +} diff --git a/vendor/google.golang.org/appengine/taskqueue/taskqueue.go b/vendor/google.golang.org/appengine/taskqueue/taskqueue.go new file mode 100644 index 0000000..9b62fac --- /dev/null +++ b/vendor/google.golang.org/appengine/taskqueue/taskqueue.go @@ -0,0 +1,496 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package taskqueue provides a client for App Engine's taskqueue service. +Using this service, applications may perform work outside a user's request. + +A Task may be constructed manually; alternatively, since the most common +taskqueue operation is to add a single POST task, NewPOSTTask makes it easy. + + t := taskqueue.NewPOSTTask("/worker", url.Values{ + "key": {key}, + }) + taskqueue.Add(c, t, "") // add t to the default queue +*/ +package taskqueue // import "google.golang.org/appengine/taskqueue" + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + dspb "google.golang.org/appengine/internal/datastore" + pb "google.golang.org/appengine/internal/taskqueue" +) + +var ( + // ErrTaskAlreadyAdded is the error returned by Add and AddMulti when a task has already been added with a particular name. + ErrTaskAlreadyAdded = errors.New("taskqueue: task has already been added") +) + +// RetryOptions let you control whether to retry a task and the backoff intervals between tries. +type RetryOptions struct { + // Number of tries/leases after which the task fails permanently and is deleted. + // If AgeLimit is also set, both limits must be exceeded for the task to fail permanently. + RetryLimit int32 + + // Maximum time allowed since the task's first try before the task fails permanently and is deleted (only for push tasks). + // If RetryLimit is also set, both limits must be exceeded for the task to fail permanently. + AgeLimit time.Duration + + // Minimum time between successive tries (only for push tasks). + MinBackoff time.Duration + + // Maximum time between successive tries (only for push tasks). + MaxBackoff time.Duration + + // Maximum number of times to double the interval between successive tries before the intervals increase linearly (only for push tasks). + MaxDoublings int32 + + // If MaxDoublings is zero, set ApplyZeroMaxDoublings to true to override the default non-zero value. + // Otherwise a zero MaxDoublings is ignored and the default is used. + ApplyZeroMaxDoublings bool +} + +// toRetryParameter converts RetryOptions to pb.TaskQueueRetryParameters. +func (opt *RetryOptions) toRetryParameters() *pb.TaskQueueRetryParameters { + params := &pb.TaskQueueRetryParameters{} + if opt.RetryLimit > 0 { + params.RetryLimit = proto.Int32(opt.RetryLimit) + } + if opt.AgeLimit > 0 { + params.AgeLimitSec = proto.Int64(int64(opt.AgeLimit.Seconds())) + } + if opt.MinBackoff > 0 { + params.MinBackoffSec = proto.Float64(opt.MinBackoff.Seconds()) + } + if opt.MaxBackoff > 0 { + params.MaxBackoffSec = proto.Float64(opt.MaxBackoff.Seconds()) + } + if opt.MaxDoublings > 0 || (opt.MaxDoublings == 0 && opt.ApplyZeroMaxDoublings) { + params.MaxDoublings = proto.Int32(opt.MaxDoublings) + } + return params +} + +// A Task represents a task to be executed. +type Task struct { + // Path is the worker URL for the task. + // If unset, it will default to /_ah/queue/. + Path string + + // Payload is the data for the task. + // This will be delivered as the HTTP request body. + // It is only used when Method is POST, PUT or PULL. + // url.Values' Encode method may be used to generate this for POST requests. + Payload []byte + + // Additional HTTP headers to pass at the task's execution time. + // To schedule the task to be run with an alternate app version + // or backend, set the "Host" header. + Header http.Header + + // Method is the HTTP method for the task ("GET", "POST", etc.), + // or "PULL" if this is task is destined for a pull-based queue. + // If empty, this defaults to "POST". + Method string + + // A name for the task. + // If empty, a name will be chosen. + Name string + + // Delay specifies the duration the task queue service must wait + // before executing the task. + // Either Delay or ETA may be set, but not both. + Delay time.Duration + + // ETA specifies the earliest time a task may be executed (push queues) + // or leased (pull queues). + // Either Delay or ETA may be set, but not both. + ETA time.Time + + // The number of times the task has been dispatched or leased. + RetryCount int32 + + // Tag for the task. Only used when Method is PULL. + Tag string + + // Retry options for this task. May be nil. + RetryOptions *RetryOptions +} + +func (t *Task) method() string { + if t.Method == "" { + return "POST" + } + return t.Method +} + +// NewPOSTTask creates a Task that will POST to a path with the given form data. +func NewPOSTTask(path string, params url.Values) *Task { + h := make(http.Header) + h.Set("Content-Type", "application/x-www-form-urlencoded") + return &Task{ + Path: path, + Payload: []byte(params.Encode()), + Header: h, + Method: "POST", + } +} + +var ( + currentNamespace = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") + defaultNamespace = http.CanonicalHeaderKey("X-AppEngine-Default-Namespace") +) + +func getDefaultNamespace(ctx context.Context) string { + return internal.IncomingHeaders(ctx).Get(defaultNamespace) +} + +func newAddReq(c context.Context, task *Task, queueName string) (*pb.TaskQueueAddRequest, error) { + if queueName == "" { + queueName = "default" + } + path := task.Path + if path == "" { + path = "/_ah/queue/" + queueName + } + eta := task.ETA + if eta.IsZero() { + eta = time.Now().Add(task.Delay) + } else if task.Delay != 0 { + panic("taskqueue: both Delay and ETA are set") + } + req := &pb.TaskQueueAddRequest{ + QueueName: []byte(queueName), + TaskName: []byte(task.Name), + EtaUsec: proto.Int64(eta.UnixNano() / 1e3), + } + method := task.method() + if method == "PULL" { + // Pull-based task + req.Body = task.Payload + req.Mode = pb.TaskQueueMode_PULL.Enum() + if task.Tag != "" { + req.Tag = []byte(task.Tag) + } + } else { + // HTTP-based task + if v, ok := pb.TaskQueueAddRequest_RequestMethod_value[method]; ok { + req.Method = pb.TaskQueueAddRequest_RequestMethod(v).Enum() + } else { + return nil, fmt.Errorf("taskqueue: bad method %q", method) + } + req.Url = []byte(path) + for k, vs := range task.Header { + for _, v := range vs { + req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{ + Key: []byte(k), + Value: []byte(v), + }) + } + } + if method == "POST" || method == "PUT" { + req.Body = task.Payload + } + + // Namespace headers. + if _, ok := task.Header[currentNamespace]; !ok { + // Fetch the current namespace of this request. + ns := internal.NamespaceFromContext(c) + req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{ + Key: []byte(currentNamespace), + Value: []byte(ns), + }) + } + if _, ok := task.Header[defaultNamespace]; !ok { + // Fetch the X-AppEngine-Default-Namespace header of this request. + if ns := getDefaultNamespace(c); ns != "" { + req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{ + Key: []byte(defaultNamespace), + Value: []byte(ns), + }) + } + } + } + + if task.RetryOptions != nil { + req.RetryParameters = task.RetryOptions.toRetryParameters() + } + + return req, nil +} + +var alreadyAddedErrors = map[pb.TaskQueueServiceError_ErrorCode]bool{ + pb.TaskQueueServiceError_TASK_ALREADY_EXISTS: true, + pb.TaskQueueServiceError_TOMBSTONED_TASK: true, +} + +// Add adds the task to a named queue. +// An empty queue name means that the default queue will be used. +// Add returns an equivalent Task with defaults filled in, including setting +// the task's Name field to the chosen name if the original was empty. +func Add(c context.Context, task *Task, queueName string) (*Task, error) { + req, err := newAddReq(c, task, queueName) + if err != nil { + return nil, err + } + res := &pb.TaskQueueAddResponse{} + if err := internal.Call(c, "taskqueue", "Add", req, res); err != nil { + apiErr, ok := err.(*internal.APIError) + if ok && alreadyAddedErrors[pb.TaskQueueServiceError_ErrorCode(apiErr.Code)] { + return nil, ErrTaskAlreadyAdded + } + return nil, err + } + resultTask := *task + resultTask.Method = task.method() + if task.Name == "" { + resultTask.Name = string(res.ChosenTaskName) + } + return &resultTask, nil +} + +// AddMulti adds multiple tasks to a named queue. +// An empty queue name means that the default queue will be used. +// AddMulti returns a slice of equivalent tasks with defaults filled in, including setting +// each task's Name field to the chosen name if the original was empty. +// If a given task is badly formed or could not be added, an appengine.MultiError is returned. +func AddMulti(c context.Context, tasks []*Task, queueName string) ([]*Task, error) { + req := &pb.TaskQueueBulkAddRequest{ + AddRequest: make([]*pb.TaskQueueAddRequest, len(tasks)), + } + me, any := make(appengine.MultiError, len(tasks)), false + for i, t := range tasks { + req.AddRequest[i], me[i] = newAddReq(c, t, queueName) + any = any || me[i] != nil + } + if any { + return nil, me + } + res := &pb.TaskQueueBulkAddResponse{} + if err := internal.Call(c, "taskqueue", "BulkAdd", req, res); err != nil { + return nil, err + } + if len(res.Taskresult) != len(tasks) { + return nil, errors.New("taskqueue: server error") + } + tasksOut := make([]*Task, len(tasks)) + for i, tr := range res.Taskresult { + tasksOut[i] = new(Task) + *tasksOut[i] = *tasks[i] + tasksOut[i].Method = tasksOut[i].method() + if tasksOut[i].Name == "" { + tasksOut[i].Name = string(tr.ChosenTaskName) + } + if *tr.Result != pb.TaskQueueServiceError_OK { + if alreadyAddedErrors[*tr.Result] { + me[i] = ErrTaskAlreadyAdded + } else { + me[i] = &internal.APIError{ + Service: "taskqueue", + Code: int32(*tr.Result), + } + } + any = true + } + } + if any { + return tasksOut, me + } + return tasksOut, nil +} + +// Delete deletes a task from a named queue. +func Delete(c context.Context, task *Task, queueName string) error { + err := DeleteMulti(c, []*Task{task}, queueName) + if me, ok := err.(appengine.MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti deletes multiple tasks from a named queue. +// If a given task could not be deleted, an appengine.MultiError is returned. +func DeleteMulti(c context.Context, tasks []*Task, queueName string) error { + taskNames := make([][]byte, len(tasks)) + for i, t := range tasks { + taskNames[i] = []byte(t.Name) + } + if queueName == "" { + queueName = "default" + } + req := &pb.TaskQueueDeleteRequest{ + QueueName: []byte(queueName), + TaskName: taskNames, + } + res := &pb.TaskQueueDeleteResponse{} + if err := internal.Call(c, "taskqueue", "Delete", req, res); err != nil { + return err + } + if a, b := len(req.TaskName), len(res.Result); a != b { + return fmt.Errorf("taskqueue: internal error: requested deletion of %d tasks, got %d results", a, b) + } + me, any := make(appengine.MultiError, len(res.Result)), false + for i, ec := range res.Result { + if ec != pb.TaskQueueServiceError_OK { + me[i] = &internal.APIError{ + Service: "taskqueue", + Code: int32(ec), + } + any = true + } + } + if any { + return me + } + return nil +} + +func lease(c context.Context, maxTasks int, queueName string, leaseTime int, groupByTag bool, tag []byte) ([]*Task, error) { + if queueName == "" { + queueName = "default" + } + req := &pb.TaskQueueQueryAndOwnTasksRequest{ + QueueName: []byte(queueName), + LeaseSeconds: proto.Float64(float64(leaseTime)), + MaxTasks: proto.Int64(int64(maxTasks)), + GroupByTag: proto.Bool(groupByTag), + Tag: tag, + } + res := &pb.TaskQueueQueryAndOwnTasksResponse{} + if err := internal.Call(c, "taskqueue", "QueryAndOwnTasks", req, res); err != nil { + return nil, err + } + tasks := make([]*Task, len(res.Task)) + for i, t := range res.Task { + tasks[i] = &Task{ + Payload: t.Body, + Name: string(t.TaskName), + Method: "PULL", + ETA: time.Unix(0, *t.EtaUsec*1e3), + RetryCount: *t.RetryCount, + Tag: string(t.Tag), + } + } + return tasks, nil +} + +// Lease leases tasks from a queue. +// leaseTime is in seconds. +// The number of tasks fetched will be at most maxTasks. +func Lease(c context.Context, maxTasks int, queueName string, leaseTime int) ([]*Task, error) { + return lease(c, maxTasks, queueName, leaseTime, false, nil) +} + +// LeaseByTag leases tasks from a queue, grouped by tag. +// If tag is empty, then the returned tasks are grouped by the tag of the task with earliest ETA. +// leaseTime is in seconds. +// The number of tasks fetched will be at most maxTasks. +func LeaseByTag(c context.Context, maxTasks int, queueName string, leaseTime int, tag string) ([]*Task, error) { + return lease(c, maxTasks, queueName, leaseTime, true, []byte(tag)) +} + +// Purge removes all tasks from a queue. +func Purge(c context.Context, queueName string) error { + if queueName == "" { + queueName = "default" + } + req := &pb.TaskQueuePurgeQueueRequest{ + QueueName: []byte(queueName), + } + res := &pb.TaskQueuePurgeQueueResponse{} + return internal.Call(c, "taskqueue", "PurgeQueue", req, res) +} + +// ModifyLease modifies the lease of a task. +// Used to request more processing time, or to abandon processing. +// leaseTime is in seconds and must not be negative. +func ModifyLease(c context.Context, task *Task, queueName string, leaseTime int) error { + if queueName == "" { + queueName = "default" + } + req := &pb.TaskQueueModifyTaskLeaseRequest{ + QueueName: []byte(queueName), + TaskName: []byte(task.Name), + EtaUsec: proto.Int64(task.ETA.UnixNano() / 1e3), // Used to verify ownership. + LeaseSeconds: proto.Float64(float64(leaseTime)), + } + res := &pb.TaskQueueModifyTaskLeaseResponse{} + if err := internal.Call(c, "taskqueue", "ModifyTaskLease", req, res); err != nil { + return err + } + task.ETA = time.Unix(0, *res.UpdatedEtaUsec*1e3) + return nil +} + +// QueueStatistics represents statistics about a single task queue. +type QueueStatistics struct { + Tasks int // may be an approximation + OldestETA time.Time // zero if there are no pending tasks + + Executed1Minute int // tasks executed in the last minute + InFlight int // tasks executing now + EnforcedRate float64 // requests per second +} + +// QueueStats retrieves statistics about queues. +func QueueStats(c context.Context, queueNames []string) ([]QueueStatistics, error) { + req := &pb.TaskQueueFetchQueueStatsRequest{ + QueueName: make([][]byte, len(queueNames)), + } + for i, q := range queueNames { + if q == "" { + q = "default" + } + req.QueueName[i] = []byte(q) + } + res := &pb.TaskQueueFetchQueueStatsResponse{} + if err := internal.Call(c, "taskqueue", "FetchQueueStats", req, res); err != nil { + return nil, err + } + qs := make([]QueueStatistics, len(res.Queuestats)) + for i, qsg := range res.Queuestats { + qs[i] = QueueStatistics{ + Tasks: int(*qsg.NumTasks), + } + if eta := *qsg.OldestEtaUsec; eta > -1 { + qs[i].OldestETA = time.Unix(0, eta*1e3) + } + if si := qsg.ScannerInfo; si != nil { + qs[i].Executed1Minute = int(*si.ExecutedLastMinute) + qs[i].InFlight = int(si.GetRequestsInFlight()) + qs[i].EnforcedRate = si.GetEnforcedRate() + } + } + return qs, nil +} + +func setTransaction(x *pb.TaskQueueAddRequest, t *dspb.Transaction) { + x.Transaction = t +} + +func init() { + internal.RegisterErrorCodeMap("taskqueue", pb.TaskQueueServiceError_ErrorCode_name) + + // Datastore error codes are shifted by DATASTORE_ERROR when presented through taskqueue. + dsCode := int32(pb.TaskQueueServiceError_DATASTORE_ERROR) + int32(dspb.Error_TIMEOUT) + internal.RegisterTimeoutErrorCode("taskqueue", dsCode) + + // Transaction registration. + internal.RegisterTransactionSetter(setTransaction) + internal.RegisterTransactionSetter(func(x *pb.TaskQueueBulkAddRequest, t *dspb.Transaction) { + for _, req := range x.AddRequest { + setTransaction(req, t) + } + }) +} diff --git a/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go b/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go new file mode 100644 index 0000000..0c14015 --- /dev/null +++ b/vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go @@ -0,0 +1,116 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package taskqueue + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/taskqueue" +) + +func TestAddErrors(t *testing.T) { + var tests = []struct { + err, want error + sameErr bool // if true, should return err exactly + }{ + { + err: &internal.APIError{ + Service: "taskqueue", + Code: int32(pb.TaskQueueServiceError_TASK_ALREADY_EXISTS), + }, + want: ErrTaskAlreadyAdded, + }, + { + err: &internal.APIError{ + Service: "taskqueue", + Code: int32(pb.TaskQueueServiceError_TOMBSTONED_TASK), + }, + want: ErrTaskAlreadyAdded, + }, + { + err: &internal.APIError{ + Service: "taskqueue", + Code: int32(pb.TaskQueueServiceError_UNKNOWN_QUEUE), + }, + want: errors.New("not used"), + sameErr: true, + }, + } + for _, tc := range tests { + c := aetesting.FakeSingleContext(t, "taskqueue", "Add", func(req *pb.TaskQueueAddRequest, res *pb.TaskQueueAddResponse) error { + // don't fill in any of the response + return tc.err + }) + task := &Task{Path: "/worker", Method: "PULL"} + _, err := Add(c, task, "a-queue") + want := tc.want + if tc.sameErr { + want = tc.err + } + if err != want { + t.Errorf("Add with tc.err = %v, got %#v, want = %#v", tc.err, err, want) + } + } +} + +func TestAddMulti(t *testing.T) { + c := aetesting.FakeSingleContext(t, "taskqueue", "BulkAdd", func(req *pb.TaskQueueBulkAddRequest, res *pb.TaskQueueBulkAddResponse) error { + res.Taskresult = []*pb.TaskQueueBulkAddResponse_TaskResult{ + { + Result: pb.TaskQueueServiceError_OK.Enum(), + }, + { + Result: pb.TaskQueueServiceError_TASK_ALREADY_EXISTS.Enum(), + }, + { + Result: pb.TaskQueueServiceError_TOMBSTONED_TASK.Enum(), + }, + { + Result: pb.TaskQueueServiceError_INTERNAL_ERROR.Enum(), + }, + } + return nil + }) + tasks := []*Task{ + {Path: "/worker", Method: "PULL"}, + {Path: "/worker", Method: "PULL"}, + {Path: "/worker", Method: "PULL"}, + {Path: "/worker", Method: "PULL"}, + } + r, err := AddMulti(c, tasks, "a-queue") + if len(r) != len(tasks) { + t.Fatalf("AddMulti returned %d tasks, want %d", len(r), len(tasks)) + } + want := appengine.MultiError{ + nil, + ErrTaskAlreadyAdded, + ErrTaskAlreadyAdded, + &internal.APIError{ + Service: "taskqueue", + Code: int32(pb.TaskQueueServiceError_INTERNAL_ERROR), + }, + } + if !reflect.DeepEqual(err, want) { + t.Errorf("AddMulti got %v, wanted %v", err, want) + } +} + +func TestAddWithEmptyPath(t *testing.T) { + c := aetesting.FakeSingleContext(t, "taskqueue", "Add", func(req *pb.TaskQueueAddRequest, res *pb.TaskQueueAddResponse) error { + if got, want := string(req.Url), "/_ah/queue/a-queue"; got != want { + return fmt.Errorf("req.Url = %q; want %q", got, want) + } + return nil + }) + if _, err := Add(c, &Task{}, "a-queue"); err != nil { + t.Fatalf("Add: %v", err) + } +} diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go new file mode 100644 index 0000000..05642a9 --- /dev/null +++ b/vendor/google.golang.org/appengine/timeout.go @@ -0,0 +1,20 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import "golang.org/x/net/context" + +// IsTimeoutError reports whether err is a timeout error. +func IsTimeoutError(err error) bool { + if err == context.DeadlineExceeded { + return true + } + if t, ok := err.(interface { + IsTimeout() bool + }); ok { + return t.IsTimeout() + } + return false +} diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go new file mode 100644 index 0000000..6ffe1e6 --- /dev/null +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -0,0 +1,210 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package urlfetch provides an http.RoundTripper implementation +// for fetching URLs via App Engine's urlfetch service. +package urlfetch // import "google.golang.org/appengine/urlfetch" + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/urlfetch" +) + +// Transport is an implementation of http.RoundTripper for +// App Engine. Users should generally create an http.Client using +// this transport and use the Client rather than using this transport +// directly. +type Transport struct { + Context context.Context + + // Controls whether the application checks the validity of SSL certificates + // over HTTPS connections. A value of false (the default) instructs the + // application to send a request to the server only if the certificate is + // valid and signed by a trusted certificate authority (CA), and also + // includes a hostname that matches the certificate. A value of true + // instructs the application to perform no certificate validation. + AllowInvalidServerCertificate bool +} + +// Verify statically that *Transport implements http.RoundTripper. +var _ http.RoundTripper = (*Transport)(nil) + +// Client returns an *http.Client using a default urlfetch Transport. This +// client will have the default deadline of 5 seconds, and will check the +// validity of SSL certificates. +// +// Any deadline of the provided context will be used for requests through this client; +// if the client does not have a deadline then a 5 second default is used. +func Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &Transport{ + Context: ctx, + }, + } +} + +type bodyReader struct { + content []byte + truncated bool + closed bool +} + +// ErrTruncatedBody is the error returned after the final Read() from a +// response's Body if the body has been truncated by App Engine's proxy. +var ErrTruncatedBody = errors.New("urlfetch: truncated body") + +func statusCodeToText(code int) string { + if t := http.StatusText(code); t != "" { + return t + } + return strconv.Itoa(code) +} + +func (br *bodyReader) Read(p []byte) (n int, err error) { + if br.closed { + if br.truncated { + return 0, ErrTruncatedBody + } + return 0, io.EOF + } + n = copy(p, br.content) + if n > 0 { + br.content = br.content[n:] + return + } + if br.truncated { + br.closed = true + return 0, ErrTruncatedBody + } + return 0, io.EOF +} + +func (br *bodyReader) Close() error { + br.closed = true + br.content = nil + return nil +} + +// A map of the URL Fetch-accepted methods that take a request body. +var methodAcceptsRequestBody = map[string]bool{ + "POST": true, + "PUT": true, + "PATCH": true, +} + +// urlString returns a valid string given a URL. This function is necessary because +// the String method of URL doesn't correctly handle URLs with non-empty Opaque values. +// See http://code.google.com/p/go/issues/detail?id=4860. +func urlString(u *url.URL) string { + if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") { + return u.String() + } + aux := *u + aux.Opaque = "//" + aux.Host + aux.Opaque + return aux.String() +} + +// RoundTrip issues a single HTTP request and returns its response. Per the +// http.RoundTripper interface, RoundTrip only returns an error if there +// was an unsupported request or the URL Fetch proxy fails. +// Note that HTTP response codes such as 5xx, 403, 404, etc are not +// errors as far as the transport is concerned and will be returned +// with err set to nil. +func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) { + methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] + if !ok { + return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method) + } + + method := pb.URLFetchRequest_RequestMethod(methNum) + + freq := &pb.URLFetchRequest{ + Method: &method, + Url: proto.String(urlString(req.URL)), + FollowRedirects: proto.Bool(false), // http.Client's responsibility + MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), + } + if deadline, ok := t.Context.Deadline(); ok { + freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds()) + } + + for k, vals := range req.Header { + for _, val := range vals { + freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ + Key: proto.String(k), + Value: proto.String(val), + }) + } + } + if methodAcceptsRequestBody[req.Method] && req.Body != nil { + // Avoid a []byte copy if req.Body has a Bytes method. + switch b := req.Body.(type) { + case interface { + Bytes() []byte + }: + freq.Payload = b.Bytes() + default: + freq.Payload, err = ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + } + } + + fres := &pb.URLFetchResponse{} + if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil { + return nil, err + } + + res = &http.Response{} + res.StatusCode = int(*fres.StatusCode) + res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) + res.Header = make(http.Header) + res.Request = req + + // Faked: + res.ProtoMajor = 1 + res.ProtoMinor = 1 + res.Proto = "HTTP/1.1" + res.Close = true + + for _, h := range fres.Header { + hkey := http.CanonicalHeaderKey(*h.Key) + hval := *h.Value + if hkey == "Content-Length" { + // Will get filled in below for all but HEAD requests. + if req.Method == "HEAD" { + res.ContentLength, _ = strconv.ParseInt(hval, 10, 64) + } + continue + } + res.Header.Add(hkey, hval) + } + + if req.Method != "HEAD" { + res.ContentLength = int64(len(fres.Content)) + } + + truncated := fres.GetContentWasTruncated() + res.Body = &bodyReader{content: fres.Content, truncated: truncated} + return +} + +func init() { + internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name) + internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED)) +} diff --git a/vendor/google.golang.org/appengine/user/oauth.go b/vendor/google.golang.org/appengine/user/oauth.go new file mode 100644 index 0000000..ffad571 --- /dev/null +++ b/vendor/google.golang.org/appengine/user/oauth.go @@ -0,0 +1,52 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package user + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/user" +) + +// CurrentOAuth returns the user associated with the OAuth consumer making this +// request. If the OAuth consumer did not make a valid OAuth request, or the +// scopes is non-empty and the current user does not have at least one of the +// scopes, this method will return an error. +func CurrentOAuth(c context.Context, scopes ...string) (*User, error) { + req := &pb.GetOAuthUserRequest{} + if len(scopes) != 1 || scopes[0] != "" { + // The signature for this function used to be CurrentOAuth(Context, string). + // Ignore the singular "" scope to preserve existing behavior. + req.Scopes = scopes + } + + res := &pb.GetOAuthUserResponse{} + + err := internal.Call(c, "user", "GetOAuthUser", req, res) + if err != nil { + return nil, err + } + return &User{ + Email: *res.Email, + AuthDomain: *res.AuthDomain, + Admin: res.GetIsAdmin(), + ID: *res.UserId, + ClientID: res.GetClientId(), + }, nil +} + +// OAuthConsumerKey returns the OAuth consumer key provided with the current +// request. This method will return an error if the OAuth request was invalid. +func OAuthConsumerKey(c context.Context) (string, error) { + req := &pb.CheckOAuthSignatureRequest{} + res := &pb.CheckOAuthSignatureResponse{} + + err := internal.Call(c, "user", "CheckOAuthSignature", req, res) + if err != nil { + return "", err + } + return *res.OauthConsumerKey, err +} diff --git a/vendor/google.golang.org/appengine/user/user.go b/vendor/google.golang.org/appengine/user/user.go new file mode 100644 index 0000000..eb76f59 --- /dev/null +++ b/vendor/google.golang.org/appengine/user/user.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package user provides a client for App Engine's user authentication service. +package user // import "google.golang.org/appengine/user" + +import ( + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/user" +) + +// User represents a user of the application. +type User struct { + Email string + AuthDomain string + Admin bool + + // ID is the unique permanent ID of the user. + // It is populated if the Email is associated + // with a Google account, or empty otherwise. + ID string + + // ClientID is the ID of the pre-registered client so its identity can be verified. + // See https://developers.google.com/console/help/#generatingoauth2 for more information. + ClientID string + + FederatedIdentity string + FederatedProvider string +} + +// String returns a displayable name for the user. +func (u *User) String() string { + if u.AuthDomain != "" && strings.HasSuffix(u.Email, "@"+u.AuthDomain) { + return u.Email[:len(u.Email)-len("@"+u.AuthDomain)] + } + if u.FederatedIdentity != "" { + return u.FederatedIdentity + } + return u.Email +} + +// LoginURL returns a URL that, when visited, prompts the user to sign in, +// then redirects the user to the URL specified by dest. +func LoginURL(c context.Context, dest string) (string, error) { + return LoginURLFederated(c, dest, "") +} + +// LoginURLFederated is like LoginURL but accepts a user's OpenID identifier. +func LoginURLFederated(c context.Context, dest, identity string) (string, error) { + req := &pb.CreateLoginURLRequest{ + DestinationUrl: proto.String(dest), + } + if identity != "" { + req.FederatedIdentity = proto.String(identity) + } + res := &pb.CreateLoginURLResponse{} + if err := internal.Call(c, "user", "CreateLoginURL", req, res); err != nil { + return "", err + } + return *res.LoginUrl, nil +} + +// LogoutURL returns a URL that, when visited, signs the user out, +// then redirects the user to the URL specified by dest. +func LogoutURL(c context.Context, dest string) (string, error) { + req := &pb.CreateLogoutURLRequest{ + DestinationUrl: proto.String(dest), + } + res := &pb.CreateLogoutURLResponse{} + if err := internal.Call(c, "user", "CreateLogoutURL", req, res); err != nil { + return "", err + } + return *res.LogoutUrl, nil +} + +func init() { + internal.RegisterErrorCodeMap("user", pb.UserServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/user/user_classic.go b/vendor/google.golang.org/appengine/user/user_classic.go new file mode 100644 index 0000000..a747ef3 --- /dev/null +++ b/vendor/google.golang.org/appengine/user/user_classic.go @@ -0,0 +1,35 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package user + +import ( + "appengine/user" + + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +func Current(ctx context.Context) *User { + u := user.Current(internal.ClassicContextFromContext(ctx)) + if u == nil { + return nil + } + // Map appengine/user.User to this package's User type. + return &User{ + Email: u.Email, + AuthDomain: u.AuthDomain, + Admin: u.Admin, + ID: u.ID, + FederatedIdentity: u.FederatedIdentity, + FederatedProvider: u.FederatedProvider, + } +} + +func IsAdmin(ctx context.Context) bool { + return user.IsAdmin(internal.ClassicContextFromContext(ctx)) +} diff --git a/vendor/google.golang.org/appengine/user/user_test.go b/vendor/google.golang.org/appengine/user/user_test.go new file mode 100644 index 0000000..5fc5957 --- /dev/null +++ b/vendor/google.golang.org/appengine/user/user_test.go @@ -0,0 +1,99 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package user + +import ( + "fmt" + "net/http" + "testing" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine/internal" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/user" +) + +func baseReq() *http.Request { + return &http.Request{ + Header: http.Header{}, + } +} + +type basicUserTest struct { + nickname, email, authDomain, admin string + // expectations + isNil, isAdmin bool + displayName string +} + +var basicUserTests = []basicUserTest{ + {"", "", "", "0", true, false, ""}, + {"ken", "ken@example.com", "example.com", "0", false, false, "ken"}, + {"ken", "ken@example.com", "auth_domain.com", "1", false, true, "ken@example.com"}, +} + +func TestBasicUserAPI(t *testing.T) { + for i, tc := range basicUserTests { + req := baseReq() + req.Header.Set("X-AppEngine-User-Nickname", tc.nickname) + req.Header.Set("X-AppEngine-User-Email", tc.email) + req.Header.Set("X-AppEngine-Auth-Domain", tc.authDomain) + req.Header.Set("X-AppEngine-User-Is-Admin", tc.admin) + + c := internal.ContextForTesting(req) + + if ga := IsAdmin(c); ga != tc.isAdmin { + t.Errorf("test %d: expected IsAdmin(c) = %v, got %v", i, tc.isAdmin, ga) + } + + u := Current(c) + if tc.isNil { + if u != nil { + t.Errorf("test %d: expected u == nil, got %+v", i, u) + } + continue + } + if u == nil { + t.Errorf("test %d: expected u != nil, got nil", i) + continue + } + if u.Email != tc.email { + t.Errorf("test %d: expected u.Email = %q, got %q", i, tc.email, u.Email) + } + if gs := u.String(); gs != tc.displayName { + t.Errorf("test %d: expected u.String() = %q, got %q", i, tc.displayName, gs) + } + if u.Admin != tc.isAdmin { + t.Errorf("test %d: expected u.Admin = %v, got %v", i, tc.isAdmin, u.Admin) + } + } +} + +func TestLoginURL(t *testing.T) { + expectedQuery := &pb.CreateLoginURLRequest{ + DestinationUrl: proto.String("/destination"), + } + const expectedDest = "/redir/dest" + c := aetesting.FakeSingleContext(t, "user", "CreateLoginURL", func(req *pb.CreateLoginURLRequest, res *pb.CreateLoginURLResponse) error { + if !proto.Equal(req, expectedQuery) { + return fmt.Errorf("got %v, want %v", req, expectedQuery) + } + res.LoginUrl = proto.String(expectedDest) + return nil + }) + + url, err := LoginURL(c, "/destination") + if err != nil { + t.Fatalf("LoginURL failed: %v", err) + } + if url != expectedDest { + t.Errorf("got %v, want %v", url, expectedDest) + } +} + +// TODO(dsymonds): Add test for LogoutURL. diff --git a/vendor/google.golang.org/appengine/user/user_vm.go b/vendor/google.golang.org/appengine/user/user_vm.go new file mode 100644 index 0000000..8dc672e --- /dev/null +++ b/vendor/google.golang.org/appengine/user/user_vm.go @@ -0,0 +1,38 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package user + +import ( + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" +) + +// Current returns the currently logged-in user, +// or nil if the user is not signed in. +func Current(c context.Context) *User { + h := internal.IncomingHeaders(c) + u := &User{ + Email: h.Get("X-AppEngine-User-Email"), + AuthDomain: h.Get("X-AppEngine-Auth-Domain"), + ID: h.Get("X-AppEngine-User-Id"), + Admin: h.Get("X-AppEngine-User-Is-Admin") == "1", + FederatedIdentity: h.Get("X-AppEngine-Federated-Identity"), + FederatedProvider: h.Get("X-AppEngine-Federated-Provider"), + } + if u.Email == "" && u.FederatedIdentity == "" { + return nil + } + return u +} + +// IsAdmin returns true if the current user is signed in and +// is currently registered as an administrator of the application. +func IsAdmin(c context.Context) bool { + h := internal.IncomingHeaders(c) + return h.Get("X-AppEngine-User-Is-Admin") == "1" +} diff --git a/vendor/google.golang.org/appengine/xmpp/xmpp.go b/vendor/google.golang.org/appengine/xmpp/xmpp.go new file mode 100644 index 0000000..3a561fd --- /dev/null +++ b/vendor/google.golang.org/appengine/xmpp/xmpp.go @@ -0,0 +1,253 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +/* +Package xmpp provides the means to send and receive instant messages +to and from users of XMPP-compatible services. + +To send a message, + m := &xmpp.Message{ + To: []string{"kaylee@example.com"}, + Body: `Hi! How's the carrot?`, + } + err := m.Send(c) + +To receive messages, + func init() { + xmpp.Handle(handleChat) + } + + func handleChat(c context.Context, m *xmpp.Message) { + // ... + } +*/ +package xmpp // import "google.golang.org/appengine/xmpp" + +import ( + "errors" + "fmt" + "net/http" + + "golang.org/x/net/context" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/xmpp" +) + +// Message represents an incoming chat message. +type Message struct { + // Sender is the JID of the sender. + // Optional for outgoing messages. + Sender string + + // To is the intended recipients of the message. + // Incoming messages will have exactly one element. + To []string + + // Body is the body of the message. + Body string + + // Type is the message type, per RFC 3921. + // It defaults to "chat". + Type string + + // RawXML is whether the body contains raw XML. + RawXML bool +} + +// Presence represents an outgoing presence update. +type Presence struct { + // Sender is the JID (optional). + Sender string + + // The intended recipient of the presence update. + To string + + // Type, per RFC 3921 (optional). Defaults to "available". + Type string + + // State of presence (optional). + // Valid values: "away", "chat", "xa", "dnd" (RFC 3921). + State string + + // Free text status message (optional). + Status string +} + +var ( + ErrPresenceUnavailable = errors.New("xmpp: presence unavailable") + ErrInvalidJID = errors.New("xmpp: invalid JID") +) + +// Handle arranges for f to be called for incoming XMPP messages. +// Only messages of type "chat" or "normal" will be handled. +func Handle(f func(c context.Context, m *Message)) { + http.HandleFunc("/_ah/xmpp/message/chat/", func(_ http.ResponseWriter, r *http.Request) { + f(appengine.NewContext(r), &Message{ + Sender: r.FormValue("from"), + To: []string{r.FormValue("to")}, + Body: r.FormValue("body"), + }) + }) +} + +// Send sends a message. +// If any failures occur with specific recipients, the error will be an appengine.MultiError. +func (m *Message) Send(c context.Context) error { + req := &pb.XmppMessageRequest{ + Jid: m.To, + Body: &m.Body, + RawXml: &m.RawXML, + } + if m.Type != "" && m.Type != "chat" { + req.Type = &m.Type + } + if m.Sender != "" { + req.FromJid = &m.Sender + } + res := &pb.XmppMessageResponse{} + if err := internal.Call(c, "xmpp", "SendMessage", req, res); err != nil { + return err + } + + if len(res.Status) != len(req.Jid) { + return fmt.Errorf("xmpp: sent message to %d JIDs, but only got %d statuses back", len(req.Jid), len(res.Status)) + } + me, any := make(appengine.MultiError, len(req.Jid)), false + for i, st := range res.Status { + if st != pb.XmppMessageResponse_NO_ERROR { + me[i] = errors.New(st.String()) + any = true + } + } + if any { + return me + } + return nil +} + +// Invite sends an invitation. If the from address is an empty string +// the default (yourapp@appspot.com/bot) will be used. +func Invite(c context.Context, to, from string) error { + req := &pb.XmppInviteRequest{ + Jid: &to, + } + if from != "" { + req.FromJid = &from + } + res := &pb.XmppInviteResponse{} + return internal.Call(c, "xmpp", "SendInvite", req, res) +} + +// Send sends a presence update. +func (p *Presence) Send(c context.Context) error { + req := &pb.XmppSendPresenceRequest{ + Jid: &p.To, + } + if p.State != "" { + req.Show = &p.State + } + if p.Type != "" { + req.Type = &p.Type + } + if p.Sender != "" { + req.FromJid = &p.Sender + } + if p.Status != "" { + req.Status = &p.Status + } + res := &pb.XmppSendPresenceResponse{} + return internal.Call(c, "xmpp", "SendPresence", req, res) +} + +var presenceMap = map[pb.PresenceResponse_SHOW]string{ + pb.PresenceResponse_NORMAL: "", + pb.PresenceResponse_AWAY: "away", + pb.PresenceResponse_DO_NOT_DISTURB: "dnd", + pb.PresenceResponse_CHAT: "chat", + pb.PresenceResponse_EXTENDED_AWAY: "xa", +} + +// GetPresence retrieves a user's presence. +// If the from address is an empty string the default +// (yourapp@appspot.com/bot) will be used. +// Possible return values are "", "away", "dnd", "chat", "xa". +// ErrPresenceUnavailable is returned if the presence is unavailable. +func GetPresence(c context.Context, to string, from string) (string, error) { + req := &pb.PresenceRequest{ + Jid: &to, + } + if from != "" { + req.FromJid = &from + } + res := &pb.PresenceResponse{} + if err := internal.Call(c, "xmpp", "GetPresence", req, res); err != nil { + return "", err + } + if !*res.IsAvailable || res.Presence == nil { + return "", ErrPresenceUnavailable + } + presence, ok := presenceMap[*res.Presence] + if ok { + return presence, nil + } + return "", fmt.Errorf("xmpp: unknown presence %v", *res.Presence) +} + +// GetPresenceMulti retrieves multiple users' presence. +// If the from address is an empty string the default +// (yourapp@appspot.com/bot) will be used. +// Possible return values are "", "away", "dnd", "chat", "xa". +// If any presence is unavailable, an appengine.MultiError is returned +func GetPresenceMulti(c context.Context, to []string, from string) ([]string, error) { + req := &pb.BulkPresenceRequest{ + Jid: to, + } + if from != "" { + req.FromJid = &from + } + res := &pb.BulkPresenceResponse{} + + if err := internal.Call(c, "xmpp", "BulkGetPresence", req, res); err != nil { + return nil, err + } + + presences := make([]string, 0, len(res.PresenceResponse)) + errs := appengine.MultiError{} + + addResult := func(presence string, err error) { + presences = append(presences, presence) + errs = append(errs, err) + } + + anyErr := false + for _, subres := range res.PresenceResponse { + if !subres.GetValid() { + anyErr = true + addResult("", ErrInvalidJID) + continue + } + if !*subres.IsAvailable || subres.Presence == nil { + anyErr = true + addResult("", ErrPresenceUnavailable) + continue + } + presence, ok := presenceMap[*subres.Presence] + if ok { + addResult(presence, nil) + } else { + anyErr = true + addResult("", fmt.Errorf("xmpp: unknown presence %q", *subres.Presence)) + } + } + if anyErr { + return presences, errs + } + return presences, nil +} + +func init() { + internal.RegisterErrorCodeMap("xmpp", pb.XmppServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/xmpp/xmpp_test.go b/vendor/google.golang.org/appengine/xmpp/xmpp_test.go new file mode 100644 index 0000000..c3030d3 --- /dev/null +++ b/vendor/google.golang.org/appengine/xmpp/xmpp_test.go @@ -0,0 +1,173 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package xmpp + +import ( + "fmt" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine" + "google.golang.org/appengine/internal/aetesting" + pb "google.golang.org/appengine/internal/xmpp" +) + +func newPresenceResponse(isAvailable bool, presence pb.PresenceResponse_SHOW, valid bool) *pb.PresenceResponse { + return &pb.PresenceResponse{ + IsAvailable: proto.Bool(isAvailable), + Presence: presence.Enum(), + Valid: proto.Bool(valid), + } +} + +func setPresenceResponse(m *pb.PresenceResponse, isAvailable bool, presence pb.PresenceResponse_SHOW, valid bool) { + m.IsAvailable = &isAvailable + m.Presence = presence.Enum() + m.Valid = &valid +} + +func TestGetPresence(t *testing.T) { + c := aetesting.FakeSingleContext(t, "xmpp", "GetPresence", func(in *pb.PresenceRequest, out *pb.PresenceResponse) error { + if jid := in.GetJid(); jid != "user@example.com" { + return fmt.Errorf("bad jid %q", jid) + } + setPresenceResponse(out, true, pb.PresenceResponse_CHAT, true) + return nil + }) + + presence, err := GetPresence(c, "user@example.com", "") + if err != nil { + t.Fatalf("GetPresence: %v", err) + } + + if presence != "chat" { + t.Errorf("GetPresence: got %#v, want %#v", presence, pb.PresenceResponse_CHAT) + } +} + +func TestGetPresenceMultiSingleJID(t *testing.T) { + c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error { + if !reflect.DeepEqual(in.Jid, []string{"user@example.com"}) { + return fmt.Errorf("bad request jids %#v", in.Jid) + } + out.PresenceResponse = []*pb.PresenceResponse{ + newPresenceResponse(true, pb.PresenceResponse_NORMAL, true), + } + return nil + }) + + presence, err := GetPresenceMulti(c, []string{"user@example.com"}, "") + if err != nil { + t.Fatalf("GetPresenceMulti: %v", err) + } + if !reflect.DeepEqual(presence, []string{""}) { + t.Errorf("GetPresenceMulti: got %s, want %s", presence, []string{""}) + } +} + +func TestGetPresenceMultiJID(t *testing.T) { + c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error { + if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) { + return fmt.Errorf("bad request jids %#v", in.Jid) + } + out.PresenceResponse = []*pb.PresenceResponse{ + newPresenceResponse(true, pb.PresenceResponse_NORMAL, true), + newPresenceResponse(true, pb.PresenceResponse_AWAY, true), + } + return nil + }) + + jids := []string{"user@example.com", "user2@example.com"} + presence, err := GetPresenceMulti(c, jids, "") + if err != nil { + t.Fatalf("GetPresenceMulti: %v", err) + } + want := []string{"", "away"} + if !reflect.DeepEqual(presence, want) { + t.Errorf("GetPresenceMulti: got %v, want %v", presence, want) + } +} + +func TestGetPresenceMultiFromJID(t *testing.T) { + c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error { + if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) { + return fmt.Errorf("bad request jids %#v", in.Jid) + } + if jid := in.GetFromJid(); jid != "bot@appspot.com" { + return fmt.Errorf("bad from jid %q", jid) + } + out.PresenceResponse = []*pb.PresenceResponse{ + newPresenceResponse(true, pb.PresenceResponse_NORMAL, true), + newPresenceResponse(true, pb.PresenceResponse_CHAT, true), + } + return nil + }) + + jids := []string{"user@example.com", "user2@example.com"} + presence, err := GetPresenceMulti(c, jids, "bot@appspot.com") + if err != nil { + t.Fatalf("GetPresenceMulti: %v", err) + } + want := []string{"", "chat"} + if !reflect.DeepEqual(presence, want) { + t.Errorf("GetPresenceMulti: got %v, want %v", presence, want) + } +} + +func TestGetPresenceMultiInvalid(t *testing.T) { + c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error { + if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) { + return fmt.Errorf("bad request jids %#v", in.Jid) + } + out.PresenceResponse = []*pb.PresenceResponse{ + newPresenceResponse(true, pb.PresenceResponse_EXTENDED_AWAY, true), + newPresenceResponse(true, pb.PresenceResponse_CHAT, false), + } + return nil + }) + + jids := []string{"user@example.com", "user2@example.com"} + presence, err := GetPresenceMulti(c, jids, "") + + wantErr := appengine.MultiError{nil, ErrInvalidJID} + if !reflect.DeepEqual(err, wantErr) { + t.Fatalf("GetPresenceMulti: got %#v, want %#v", err, wantErr) + } + + want := []string{"xa", ""} + if !reflect.DeepEqual(presence, want) { + t.Errorf("GetPresenceMulti: got %#v, want %#v", presence, want) + } +} + +func TestGetPresenceMultiUnavailable(t *testing.T) { + c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error { + if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) { + return fmt.Errorf("bad request jids %#v", in.Jid) + } + out.PresenceResponse = []*pb.PresenceResponse{ + newPresenceResponse(false, pb.PresenceResponse_AWAY, true), + newPresenceResponse(false, pb.PresenceResponse_DO_NOT_DISTURB, true), + } + return nil + }) + + jids := []string{"user@example.com", "user2@example.com"} + presence, err := GetPresenceMulti(c, jids, "") + + wantErr := appengine.MultiError{ + ErrPresenceUnavailable, + ErrPresenceUnavailable, + } + if !reflect.DeepEqual(err, wantErr) { + t.Fatalf("GetPresenceMulti: got %#v, want %#v", err, wantErr) + } + want := []string{"", ""} + if !reflect.DeepEqual(presence, want) { + t.Errorf("GetPresenceMulti: got %#v, want %#v", presence, want) + } +}